summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--mk/kernel-ver.mk6
-rw-r--r--target/arm/solidrun-imx6/patches/3.14.36/0001-solidrun-openelec.patch429704
-rw-r--r--target/config/Config.in.kernelversion.choice9
-rw-r--r--target/config/Config.in.kernelversion.default1
4 files changed, 429720 insertions, 0 deletions
diff --git a/mk/kernel-ver.mk b/mk/kernel-ver.mk
index df4bd85de..7a06e4cf9 100644
--- a/mk/kernel-ver.mk
+++ b/mk/kernel-ver.mk
@@ -22,6 +22,12 @@ KERNEL_MOD_VERSION:= $(KERNEL_VERSION)
KERNEL_RELEASE:= 1
KERNEL_HASH:= 8c745b47b3ae0631b2e59423dc255dccbc64d599f9183b390b442dd500e5cb49
endif
+ifeq ($(ADK_KERNEL_VERSION_3_14_36),y)
+KERNEL_VERSION:= 3.14.36
+KERNEL_MOD_VERSION:= $(KERNEL_VERSION)
+KERNEL_RELEASE:= 1
+KERNEL_HASH:= 19d0e157ae36fb6f0789fe4cd7b0b6c67856cfc1995605076b74eff10718f40e
+endif
ifeq ($(ADK_KERNEL_VERSION_3_12_44),y)
KERNEL_VERSION:= 3.12.44
KERNEL_MOD_VERSION:= $(KERNEL_VERSION)
diff --git a/target/arm/solidrun-imx6/patches/3.14.36/0001-solidrun-openelec.patch b/target/arm/solidrun-imx6/patches/3.14.36/0001-solidrun-openelec.patch
new file mode 100644
index 000000000..ef2c8ed0c
--- /dev/null
+++ b/target/arm/solidrun-imx6/patches/3.14.36/0001-solidrun-openelec.patch
@@ -0,0 +1,429704 @@
+diff -Nur linux-3.14.36/arch/arm/boot/dts/clcd-panels.dtsi linux-openelec/arch/arm/boot/dts/clcd-panels.dtsi
+--- linux-3.14.36/arch/arm/boot/dts/clcd-panels.dtsi 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/arch/arm/boot/dts/clcd-panels.dtsi 2015-05-06 12:05:43.000000000 -0500
+@@ -0,0 +1,52 @@
++/*
++ * ARM Ltd. Versatile Express
++ *
++ */
++
++/ {
++ panels {
++ panel@0 {
++ compatible = "panel";
++ mode = "VGA";
++ refresh = <60>;
++ xres = <640>;
++ yres = <480>;
++ pixclock = <39721>;
++ left_margin = <40>;
++ right_margin = <24>;
++ upper_margin = <32>;
++ lower_margin = <11>;
++ hsync_len = <96>;
++ vsync_len = <2>;
++ sync = <0>;
++ vmode = "FB_VMODE_NONINTERLACED";
++
++ tim2 = "TIM2_BCD", "TIM2_IPC";
++ cntl = "CNTL_LCDTFT", "CNTL_BGR", "CNTL_LCDVCOMP(1)";
++ caps = "CLCD_CAP_5551", "CLCD_CAP_565", "CLCD_CAP_888";
++ bpp = <16>;
++ };
++
++ panel@1 {
++ compatible = "panel";
++ mode = "XVGA";
++ refresh = <60>;
++ xres = <1024>;
++ yres = <768>;
++ pixclock = <15748>;
++ left_margin = <152>;
++ right_margin = <48>;
++ upper_margin = <23>;
++ lower_margin = <3>;
++ hsync_len = <104>;
++ vsync_len = <4>;
++ sync = <0>;
++ vmode = "FB_VMODE_NONINTERLACED";
++
++ tim2 = "TIM2_BCD", "TIM2_IPC";
++ cntl = "CNTL_LCDTFT", "CNTL_BGR", "CNTL_LCDVCOMP(1)";
++ caps = "CLCD_CAP_5551", "CLCD_CAP_565", "CLCD_CAP_888";
++ bpp = <16>;
++ };
++ };
++};
+diff -Nur linux-3.14.36/arch/arm/boot/dts/efm32gg-dk3750.dts linux-openelec/arch/arm/boot/dts/efm32gg-dk3750.dts
+--- linux-3.14.36/arch/arm/boot/dts/efm32gg-dk3750.dts 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/boot/dts/efm32gg-dk3750.dts 2015-05-06 12:05:43.000000000 -0500
+@@ -26,7 +26,7 @@
+ };
+
+ i2c@4000a000 {
+- location = <3>;
++ efm32,location = <3>;
+ status = "ok";
+
+ temp@48 {
+diff -Nur linux-3.14.36/arch/arm/boot/dts/imx23.dtsi linux-openelec/arch/arm/boot/dts/imx23.dtsi
+--- linux-3.14.36/arch/arm/boot/dts/imx23.dtsi 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/boot/dts/imx23.dtsi 2015-05-06 12:05:43.000000000 -0500
+@@ -363,7 +363,8 @@
+ compatible = "fsl,imx23-lcdif";
+ reg = <0x80030000 2000>;
+ interrupts = <46 45>;
+- clocks = <&clks 38>;
++ clocks = <&clks 38>, <&clks 38>;
++ clock-names = "pix", "axi";
+ status = "disabled";
+ };
+
+diff -Nur linux-3.14.36/arch/arm/boot/dts/imx25.dtsi linux-openelec/arch/arm/boot/dts/imx25.dtsi
+--- linux-3.14.36/arch/arm/boot/dts/imx25.dtsi 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/boot/dts/imx25.dtsi 2015-07-24 18:03:29.476842002 -0500
+@@ -13,6 +13,7 @@
+
+ / {
+ aliases {
++ ethernet0 = &fec;
+ gpio0 = &gpio1;
+ gpio1 = &gpio2;
+ gpio2 = &gpio3;
+@@ -56,6 +57,7 @@
+
+ osc {
+ compatible = "fsl,imx-osc", "fixed-clock";
++ #clock-cells = <0>;
+ clock-frequency = <24000000>;
+ };
+ };
+diff -Nur linux-3.14.36/arch/arm/boot/dts/imx25.dtsi.orig linux-openelec/arch/arm/boot/dts/imx25.dtsi.orig
+--- linux-3.14.36/arch/arm/boot/dts/imx25.dtsi.orig 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/arch/arm/boot/dts/imx25.dtsi.orig 2015-07-24 18:03:29.376842002 -0500
+@@ -0,0 +1,543 @@
++/*
++ * Copyright 2012 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
++ *
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++#include "skeleton.dtsi"
++
++/ {
++ aliases {
++ ethernet0 = &fec;
++ gpio0 = &gpio1;
++ gpio1 = &gpio2;
++ gpio2 = &gpio3;
++ gpio3 = &gpio4;
++ i2c0 = &i2c1;
++ i2c1 = &i2c2;
++ i2c2 = &i2c3;
++ serial0 = &uart1;
++ serial1 = &uart2;
++ serial2 = &uart3;
++ serial3 = &uart4;
++ serial4 = &uart5;
++ spi0 = &spi1;
++ spi1 = &spi2;
++ spi2 = &spi3;
++ usb0 = &usbotg;
++ usb1 = &usbhost1;
++ ethernet0 = &fec;
++ };
++
++ cpus {
++ #address-cells = <0>;
++ #size-cells = <0>;
++
++ cpu {
++ compatible = "arm,arm926ej-s";
++ device_type = "cpu";
++ };
++ };
++
++ asic: asic-interrupt-controller@68000000 {
++ compatible = "fsl,imx25-asic", "fsl,avic";
++ interrupt-controller;
++ #interrupt-cells = <1>;
++ reg = <0x68000000 0x8000000>;
++ };
++
++ clocks {
++ #address-cells = <1>;
++ #size-cells = <0>;
++
++ osc {
++ compatible = "fsl,imx-osc", "fixed-clock";
++ #clock-cells = <0>;
++ clock-frequency = <24000000>;
++ };
++ };
++
++ soc {
++ #address-cells = <1>;
++ #size-cells = <1>;
++ compatible = "simple-bus";
++ interrupt-parent = <&asic>;
++ ranges;
++
++ aips@43f00000 { /* AIPS1 */
++ compatible = "fsl,aips-bus", "simple-bus";
++ #address-cells = <1>;
++ #size-cells = <1>;
++ reg = <0x43f00000 0x100000>;
++ ranges;
++
++ i2c1: i2c@43f80000 {
++ #address-cells = <1>;
++ #size-cells = <0>;
++ compatible = "fsl,imx25-i2c", "fsl,imx21-i2c";
++ reg = <0x43f80000 0x4000>;
++ clocks = <&clks 48>;
++ clock-names = "";
++ interrupts = <3>;
++ status = "disabled";
++ };
++
++ i2c3: i2c@43f84000 {
++ #address-cells = <1>;
++ #size-cells = <0>;
++ compatible = "fsl,imx25-i2c", "fsl,imx21-i2c";
++ reg = <0x43f84000 0x4000>;
++ clocks = <&clks 48>;
++ clock-names = "";
++ interrupts = <10>;
++ status = "disabled";
++ };
++
++ can1: can@43f88000 {
++ compatible = "fsl,imx25-flexcan", "fsl,p1010-flexcan";
++ reg = <0x43f88000 0x4000>;
++ interrupts = <43>;
++ clocks = <&clks 75>, <&clks 75>;
++ clock-names = "ipg", "per";
++ status = "disabled";
++ };
++
++ can2: can@43f8c000 {
++ compatible = "fsl,imx25-flexcan", "fsl,p1010-flexcan";
++ reg = <0x43f8c000 0x4000>;
++ interrupts = <44>;
++ clocks = <&clks 76>, <&clks 76>;
++ clock-names = "ipg", "per";
++ status = "disabled";
++ };
++
++ uart1: serial@43f90000 {
++ compatible = "fsl,imx25-uart", "fsl,imx21-uart";
++ reg = <0x43f90000 0x4000>;
++ interrupts = <45>;
++ clocks = <&clks 120>, <&clks 57>;
++ clock-names = "ipg", "per";
++ status = "disabled";
++ };
++
++ uart2: serial@43f94000 {
++ compatible = "fsl,imx25-uart", "fsl,imx21-uart";
++ reg = <0x43f94000 0x4000>;
++ interrupts = <32>;
++ clocks = <&clks 121>, <&clks 57>;
++ clock-names = "ipg", "per";
++ status = "disabled";
++ };
++
++ i2c2: i2c@43f98000 {
++ #address-cells = <1>;
++ #size-cells = <0>;
++ compatible = "fsl,imx25-i2c", "fsl,imx21-i2c";
++ reg = <0x43f98000 0x4000>;
++ clocks = <&clks 48>;
++ clock-names = "";
++ interrupts = <4>;
++ status = "disabled";
++ };
++
++ owire@43f9c000 {
++ #address-cells = <1>;
++ #size-cells = <0>;
++ reg = <0x43f9c000 0x4000>;
++ clocks = <&clks 51>;
++ clock-names = "";
++ interrupts = <2>;
++ status = "disabled";
++ };
++
++ spi1: cspi@43fa4000 {
++ #address-cells = <1>;
++ #size-cells = <0>;
++ compatible = "fsl,imx25-cspi", "fsl,imx35-cspi";
++ reg = <0x43fa4000 0x4000>;
++ clocks = <&clks 78>, <&clks 78>;
++ clock-names = "ipg", "per";
++ interrupts = <14>;
++ status = "disabled";
++ };
++
++ kpp@43fa8000 {
++ #address-cells = <1>;
++ #size-cells = <0>;
++ reg = <0x43fa8000 0x4000>;
++ clocks = <&clks 102>;
++ clock-names = "";
++ interrupts = <24>;
++ status = "disabled";
++ };
++
++ iomuxc@43fac000{
++ compatible = "fsl,imx25-iomuxc";
++ reg = <0x43fac000 0x4000>;
++ };
++
++ audmux@43fb0000 {
++ compatible = "fsl,imx25-audmux", "fsl,imx31-audmux";
++ reg = <0x43fb0000 0x4000>;
++ status = "disabled";
++ };
++ };
++
++ spba@50000000 {
++ compatible = "fsl,spba-bus", "simple-bus";
++ #address-cells = <1>;
++ #size-cells = <1>;
++ reg = <0x50000000 0x40000>;
++ ranges;
++
++ spi3: cspi@50004000 {
++ #address-cells = <1>;
++ #size-cells = <0>;
++ compatible = "fsl,imx25-cspi", "fsl,imx35-cspi";
++ reg = <0x50004000 0x4000>;
++ interrupts = <0>;
++ clocks = <&clks 80>, <&clks 80>;
++ clock-names = "ipg", "per";
++ status = "disabled";
++ };
++
++ uart4: serial@50008000 {
++ compatible = "fsl,imx25-uart", "fsl,imx21-uart";
++ reg = <0x50008000 0x4000>;
++ interrupts = <5>;
++ clocks = <&clks 123>, <&clks 57>;
++ clock-names = "ipg", "per";
++ status = "disabled";
++ };
++
++ uart3: serial@5000c000 {
++ compatible = "fsl,imx25-uart", "fsl,imx21-uart";
++ reg = <0x5000c000 0x4000>;
++ interrupts = <18>;
++ clocks = <&clks 122>, <&clks 57>;
++ clock-names = "ipg", "per";
++ status = "disabled";
++ };
++
++ spi2: cspi@50010000 {
++ #address-cells = <1>;
++ #size-cells = <0>;
++ compatible = "fsl,imx25-cspi", "fsl,imx35-cspi";
++ reg = <0x50010000 0x4000>;
++ clocks = <&clks 79>, <&clks 79>;
++ clock-names = "ipg", "per";
++ interrupts = <13>;
++ status = "disabled";
++ };
++
++ ssi2: ssi@50014000 {
++ compatible = "fsl,imx25-ssi", "fsl,imx21-ssi";
++ reg = <0x50014000 0x4000>;
++ interrupts = <11>;
++ status = "disabled";
++ };
++
++ esai@50018000 {
++ reg = <0x50018000 0x4000>;
++ interrupts = <7>;
++ };
++
++ uart5: serial@5002c000 {
++ compatible = "fsl,imx25-uart", "fsl,imx21-uart";
++ reg = <0x5002c000 0x4000>;
++ interrupts = <40>;
++ clocks = <&clks 124>, <&clks 57>;
++ clock-names = "ipg", "per";
++ status = "disabled";
++ };
++
++ tsc: tsc@50030000 {
++ compatible = "fsl,imx25-adc", "fsl,imx21-tsc";
++ reg = <0x50030000 0x4000>;
++ interrupts = <46>;
++ clocks = <&clks 119>;
++ clock-names = "ipg";
++ status = "disabled";
++ };
++
++ ssi1: ssi@50034000 {
++ compatible = "fsl,imx25-ssi", "fsl,imx21-ssi";
++ reg = <0x50034000 0x4000>;
++ interrupts = <12>;
++ status = "disabled";
++ };
++
++ fec: ethernet@50038000 {
++ compatible = "fsl,imx25-fec";
++ reg = <0x50038000 0x4000>;
++ interrupts = <57>;
++ clocks = <&clks 88>, <&clks 65>;
++ clock-names = "ipg", "ahb";
++ status = "disabled";
++ };
++ };
++
++ aips@53f00000 { /* AIPS2 */
++ compatible = "fsl,aips-bus", "simple-bus";
++ #address-cells = <1>;
++ #size-cells = <1>;
++ reg = <0x53f00000 0x100000>;
++ ranges;
++
++ clks: ccm@53f80000 {
++ compatible = "fsl,imx25-ccm";
++ reg = <0x53f80000 0x4000>;
++ interrupts = <31>;
++ #clock-cells = <1>;
++ };
++
++ gpt4: timer@53f84000 {
++ compatible = "fsl,imx25-gpt", "fsl,imx31-gpt";
++ reg = <0x53f84000 0x4000>;
++ clocks = <&clks 9>, <&clks 45>;
++ clock-names = "ipg", "per";
++ interrupts = <1>;
++ };
++
++ gpt3: timer@53f88000 {
++ compatible = "fsl,imx25-gpt", "fsl,imx31-gpt";
++ reg = <0x53f88000 0x4000>;
++ clocks = <&clks 9>, <&clks 47>;
++ clock-names = "ipg", "per";
++ interrupts = <29>;
++ };
++
++ gpt2: timer@53f8c000 {
++ compatible = "fsl,imx25-gpt", "fsl,imx31-gpt";
++ reg = <0x53f8c000 0x4000>;
++ clocks = <&clks 9>, <&clks 47>;
++ clock-names = "ipg", "per";
++ interrupts = <53>;
++ };
++
++ gpt1: timer@53f90000 {
++ compatible = "fsl,imx25-gpt", "fsl,imx31-gpt";
++ reg = <0x53f90000 0x4000>;
++ clocks = <&clks 9>, <&clks 47>;
++ clock-names = "ipg", "per";
++ interrupts = <54>;
++ };
++
++ epit1: timer@53f94000 {
++ compatible = "fsl,imx25-epit";
++ reg = <0x53f94000 0x4000>;
++ interrupts = <28>;
++ };
++
++ epit2: timer@53f98000 {
++ compatible = "fsl,imx25-epit";
++ reg = <0x53f98000 0x4000>;
++ interrupts = <27>;
++ };
++
++ gpio4: gpio@53f9c000 {
++ compatible = "fsl,imx25-gpio", "fsl,imx35-gpio";
++ reg = <0x53f9c000 0x4000>;
++ interrupts = <23>;
++ gpio-controller;
++ #gpio-cells = <2>;
++ interrupt-controller;
++ #interrupt-cells = <2>;
++ };
++
++ pwm2: pwm@53fa0000 {
++ compatible = "fsl,imx25-pwm", "fsl,imx27-pwm";
++ #pwm-cells = <2>;
++ reg = <0x53fa0000 0x4000>;
++ clocks = <&clks 106>, <&clks 36>;
++ clock-names = "ipg", "per";
++ interrupts = <36>;
++ };
++
++ gpio3: gpio@53fa4000 {
++ compatible = "fsl,imx25-gpio", "fsl,imx35-gpio";
++ reg = <0x53fa4000 0x4000>;
++ interrupts = <16>;
++ gpio-controller;
++ #gpio-cells = <2>;
++ interrupt-controller;
++ #interrupt-cells = <2>;
++ };
++
++ pwm3: pwm@53fa8000 {
++ compatible = "fsl,imx25-pwm", "fsl,imx27-pwm";
++ #pwm-cells = <2>;
++ reg = <0x53fa8000 0x4000>;
++ clocks = <&clks 107>, <&clks 36>;
++ clock-names = "ipg", "per";
++ interrupts = <41>;
++ };
++
++ esdhc1: esdhc@53fb4000 {
++ compatible = "fsl,imx25-esdhc";
++ reg = <0x53fb4000 0x4000>;
++ interrupts = <9>;
++ clocks = <&clks 86>, <&clks 63>, <&clks 45>;
++ clock-names = "ipg", "ahb", "per";
++ status = "disabled";
++ };
++
++ esdhc2: esdhc@53fb8000 {
++ compatible = "fsl,imx25-esdhc";
++ reg = <0x53fb8000 0x4000>;
++ interrupts = <8>;
++ clocks = <&clks 87>, <&clks 64>, <&clks 46>;
++ clock-names = "ipg", "ahb", "per";
++ status = "disabled";
++ };
++
++ lcdc: lcdc@53fbc000 {
++ compatible = "fsl,imx25-fb", "fsl,imx21-fb";
++ reg = <0x53fbc000 0x4000>;
++ interrupts = <39>;
++ clocks = <&clks 103>, <&clks 66>, <&clks 49>;
++ clock-names = "ipg", "ahb", "per";
++ status = "disabled";
++ };
++
++ slcdc@53fc0000 {
++ reg = <0x53fc0000 0x4000>;
++ interrupts = <38>;
++ status = "disabled";
++ };
++
++ pwm4: pwm@53fc8000 {
++ compatible = "fsl,imx25-pwm", "fsl,imx27-pwm";
++ reg = <0x53fc8000 0x4000>;
++ clocks = <&clks 108>, <&clks 36>;
++ clock-names = "ipg", "per";
++ interrupts = <42>;
++ };
++
++ gpio1: gpio@53fcc000 {
++ compatible = "fsl,imx25-gpio", "fsl,imx35-gpio";
++ reg = <0x53fcc000 0x4000>;
++ interrupts = <52>;
++ gpio-controller;
++ #gpio-cells = <2>;
++ interrupt-controller;
++ #interrupt-cells = <2>;
++ };
++
++ gpio2: gpio@53fd0000 {
++ compatible = "fsl,imx25-gpio", "fsl,imx35-gpio";
++ reg = <0x53fd0000 0x4000>;
++ interrupts = <51>;
++ gpio-controller;
++ #gpio-cells = <2>;
++ interrupt-controller;
++ #interrupt-cells = <2>;
++ };
++
++ sdma@53fd4000 {
++ compatible = "fsl,imx25-sdma", "fsl,imx35-sdma";
++ reg = <0x53fd4000 0x4000>;
++ clocks = <&clks 112>, <&clks 68>;
++ clock-names = "ipg", "ahb";
++ #dma-cells = <3>;
++ interrupts = <34>;
++ };
++
++ wdog@53fdc000 {
++ compatible = "fsl,imx25-wdt", "fsl,imx21-wdt";
++ reg = <0x53fdc000 0x4000>;
++ clocks = <&clks 126>;
++ clock-names = "";
++ interrupts = <55>;
++ };
++
++ pwm1: pwm@53fe0000 {
++ compatible = "fsl,imx25-pwm", "fsl,imx27-pwm";
++ #pwm-cells = <2>;
++ reg = <0x53fe0000 0x4000>;
++ clocks = <&clks 105>, <&clks 36>;
++ clock-names = "ipg", "per";
++ interrupts = <26>;
++ };
++
++ iim: iim@53ff0000 {
++ compatible = "fsl,imx25-iim", "fsl,imx27-iim";
++ reg = <0x53ff0000 0x4000>;
++ interrupts = <19>;
++ clocks = <&clks 99>;
++ };
++
++ usbphy1: usbphy@1 {
++ compatible = "nop-usbphy";
++ status = "disabled";
++ };
++
++ usbphy2: usbphy@2 {
++ compatible = "nop-usbphy";
++ status = "disabled";
++ };
++
++ usbotg: usb@53ff4000 {
++ compatible = "fsl,imx25-usb", "fsl,imx27-usb";
++ reg = <0x53ff4000 0x0200>;
++ interrupts = <37>;
++ clocks = <&clks 9>, <&clks 70>, <&clks 8>;
++ clock-names = "ipg", "ahb", "per";
++ fsl,usbmisc = <&usbmisc 0>;
++ status = "disabled";
++ };
++
++ usbhost1: usb@53ff4400 {
++ compatible = "fsl,imx25-usb", "fsl,imx27-usb";
++ reg = <0x53ff4400 0x0200>;
++ interrupts = <35>;
++ clocks = <&clks 9>, <&clks 70>, <&clks 8>;
++ clock-names = "ipg", "ahb", "per";
++ fsl,usbmisc = <&usbmisc 1>;
++ status = "disabled";
++ };
++
++ usbmisc: usbmisc@53ff4600 {
++ #index-cells = <1>;
++ compatible = "fsl,imx25-usbmisc";
++ clocks = <&clks 9>, <&clks 70>, <&clks 8>;
++ clock-names = "ipg", "ahb", "per";
++ reg = <0x53ff4600 0x00f>;
++ status = "disabled";
++ };
++
++ dryice@53ffc000 {
++ compatible = "fsl,imx25-dryice", "fsl,imx25-rtc";
++ reg = <0x53ffc000 0x4000>;
++ clocks = <&clks 81>;
++ clock-names = "ipg";
++ interrupts = <25>;
++ };
++ };
++
++ emi@80000000 {
++ compatible = "fsl,emi-bus", "simple-bus";
++ #address-cells = <1>;
++ #size-cells = <1>;
++ reg = <0x80000000 0x3b002000>;
++ ranges;
++
++ nfc: nand@bb000000 {
++ #address-cells = <1>;
++ #size-cells = <1>;
++
++ compatible = "fsl,imx25-nand";
++ reg = <0xbb000000 0x2000>;
++ clocks = <&clks 50>;
++ clock-names = "";
++ interrupts = <33>;
++ status = "disabled";
++ };
++ };
++ };
++};
+diff -Nur linux-3.14.36/arch/arm/boot/dts/imx25-karo-tx25.dts linux-openelec/arch/arm/boot/dts/imx25-karo-tx25.dts
+--- linux-3.14.36/arch/arm/boot/dts/imx25-karo-tx25.dts 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/boot/dts/imx25-karo-tx25.dts 2015-05-06 12:05:43.000000000 -0500
+@@ -16,6 +16,10 @@
+ model = "Ka-Ro TX25";
+ compatible = "karo,imx25-tx25", "fsl,imx25";
+
++ chosen {
++ stdout-path = &uart1;
++ };
++
+ memory {
+ reg = <0x80000000 0x02000000 0x90000000 0x02000000>;
+ };
+diff -Nur linux-3.14.36/arch/arm/boot/dts/imx27-apf27.dts linux-openelec/arch/arm/boot/dts/imx27-apf27.dts
+--- linux-3.14.36/arch/arm/boot/dts/imx27-apf27.dts 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/boot/dts/imx27-apf27.dts 2015-05-06 12:05:43.000000000 -0500
+@@ -29,6 +29,7 @@
+
+ osc26m {
+ compatible = "fsl,imx-osc26m", "fixed-clock";
++ #clock-cells = <0>;
+ clock-frequency = <0>;
+ };
+ };
+diff -Nur linux-3.14.36/arch/arm/boot/dts/imx27.dtsi linux-openelec/arch/arm/boot/dts/imx27.dtsi
+--- linux-3.14.36/arch/arm/boot/dts/imx27.dtsi 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/boot/dts/imx27.dtsi 2015-05-06 12:05:43.000000000 -0500
+@@ -13,6 +13,7 @@
+
+ / {
+ aliases {
++ ethernet0 = &fec;
+ gpio0 = &gpio1;
+ gpio1 = &gpio2;
+ gpio2 = &gpio3;
+@@ -46,6 +47,7 @@
+
+ osc26m {
+ compatible = "fsl,imx-osc26m", "fixed-clock";
++ #clock-cells = <0>;
+ clock-frequency = <26000000>;
+ };
+ };
+diff -Nur linux-3.14.36/arch/arm/boot/dts/imx27-phytec-phycard-s-rdk.dts linux-openelec/arch/arm/boot/dts/imx27-phytec-phycard-s-rdk.dts
+--- linux-3.14.36/arch/arm/boot/dts/imx27-phytec-phycard-s-rdk.dts 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/boot/dts/imx27-phytec-phycard-s-rdk.dts 2015-05-06 12:05:43.000000000 -0500
+@@ -15,6 +15,10 @@
+ model = "Phytec pca100 rapid development kit";
+ compatible = "phytec,imx27-pca100-rdk", "phytec,imx27-pca100", "fsl,imx27";
+
++ chosen {
++ stdout-path = &uart1;
++ };
++
+ display: display {
+ model = "Primeview-PD050VL1";
+ native-mode = <&timing0>;
+diff -Nur linux-3.14.36/arch/arm/boot/dts/imx28.dtsi linux-openelec/arch/arm/boot/dts/imx28.dtsi
+--- linux-3.14.36/arch/arm/boot/dts/imx28.dtsi 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/boot/dts/imx28.dtsi 2015-05-06 12:05:43.000000000 -0500
+@@ -840,7 +840,8 @@
+ compatible = "fsl,imx28-lcdif";
+ reg = <0x80030000 0x2000>;
+ interrupts = <38>;
+- clocks = <&clks 55>;
++ clocks = <&clks 55>, <&clks 55>;
++ clock-names = "pix", "axi";
+ dmas = <&dma_apbh 13>;
+ dma-names = "rx";
+ status = "disabled";
+diff -Nur linux-3.14.36/arch/arm/boot/dts/imx51-babbage.dts linux-openelec/arch/arm/boot/dts/imx51-babbage.dts
+--- linux-3.14.36/arch/arm/boot/dts/imx51-babbage.dts 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/boot/dts/imx51-babbage.dts 2015-05-06 12:05:43.000000000 -0500
+@@ -17,6 +17,10 @@
+ model = "Freescale i.MX51 Babbage Board";
+ compatible = "fsl,imx51-babbage", "fsl,imx51";
+
++ chosen {
++ stdout-path = &uart1;
++ };
++
+ memory {
+ reg = <0x90000000 0x20000000>;
+ };
+diff -Nur linux-3.14.36/arch/arm/boot/dts/imx51.dtsi linux-openelec/arch/arm/boot/dts/imx51.dtsi
+--- linux-3.14.36/arch/arm/boot/dts/imx51.dtsi 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/boot/dts/imx51.dtsi 2015-05-06 12:05:43.000000000 -0500
+@@ -15,6 +15,7 @@
+
+ / {
+ aliases {
++ ethernet0 = &fec;
+ gpio0 = &gpio1;
+ gpio1 = &gpio2;
+ gpio2 = &gpio3;
+@@ -43,21 +44,25 @@
+
+ ckil {
+ compatible = "fsl,imx-ckil", "fixed-clock";
++ #clock-cells = <0>;
+ clock-frequency = <32768>;
+ };
+
+ ckih1 {
+ compatible = "fsl,imx-ckih1", "fixed-clock";
++ #clock-cells = <0>;
+ clock-frequency = <0>;
+ };
+
+ ckih2 {
+ compatible = "fsl,imx-ckih2", "fixed-clock";
++ #clock-cells = <0>;
+ clock-frequency = <0>;
+ };
+
+ osc {
+ compatible = "fsl,imx-osc", "fixed-clock";
++ #clock-cells = <0>;
+ clock-frequency = <24000000>;
+ };
+ };
+diff -Nur linux-3.14.36/arch/arm/boot/dts/imx53.dtsi linux-openelec/arch/arm/boot/dts/imx53.dtsi
+--- linux-3.14.36/arch/arm/boot/dts/imx53.dtsi 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/boot/dts/imx53.dtsi 2015-05-06 12:05:43.000000000 -0500
+@@ -15,6 +15,7 @@
+
+ / {
+ aliases {
++ ethernet0 = &fec;
+ gpio0 = &gpio1;
+ gpio1 = &gpio2;
+ gpio2 = &gpio3;
+@@ -59,21 +60,25 @@
+
+ ckil {
+ compatible = "fsl,imx-ckil", "fixed-clock";
++ #clock-cells = <0>;
+ clock-frequency = <32768>;
+ };
+
+ ckih1 {
+ compatible = "fsl,imx-ckih1", "fixed-clock";
++ #clock-cells = <0>;
+ clock-frequency = <22579200>;
+ };
+
+ ckih2 {
+ compatible = "fsl,imx-ckih2", "fixed-clock";
++ #clock-cells = <0>;
+ clock-frequency = <0>;
+ };
+
+ osc {
+ compatible = "fsl,imx-osc", "fixed-clock";
++ #clock-cells = <0>;
+ clock-frequency = <24000000>;
+ };
+ };
+diff -Nur linux-3.14.36/arch/arm/boot/dts/imx53-mba53.dts linux-openelec/arch/arm/boot/dts/imx53-mba53.dts
+--- linux-3.14.36/arch/arm/boot/dts/imx53-mba53.dts 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/boot/dts/imx53-mba53.dts 2015-05-06 12:05:43.000000000 -0500
+@@ -25,6 +25,10 @@
+ enable-active-low;
+ };
+
++ chosen {
++ stdout-path = &uart2;
++ };
++
+ backlight {
+ compatible = "pwm-backlight";
+ pwms = <&pwm2 0 50000>;
+diff -Nur linux-3.14.36/arch/arm/boot/dts/imx6dl-dfi-fs700-m60.dts linux-openelec/arch/arm/boot/dts/imx6dl-dfi-fs700-m60.dts
+--- linux-3.14.36/arch/arm/boot/dts/imx6dl-dfi-fs700-m60.dts 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/arch/arm/boot/dts/imx6dl-dfi-fs700-m60.dts 2015-05-06 12:05:43.000000000 -0500
+@@ -0,0 +1,23 @@
++/*
++ * Copyright 2013 Sascha Hauer <s.hauer@pengutronix.de>
++ *
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++#ifndef __DTS_V1__
++#define __DTS_V1__
++/dts-v1/;
++#endif
++
++#include "imx6dl.dtsi"
++#include "imx6qdl-dfi-fs700-m60.dtsi"
++
++/ {
++ model = "DFI FS700-M60-6DL i.MX6dl Q7 Board";
++ compatible = "dfi,fs700-m60-6dl", "dfi,fs700e-m60", "fsl,imx6dl";
++};
+diff -Nur linux-3.14.36/arch/arm/boot/dts/imx6dl.dtsi linux-openelec/arch/arm/boot/dts/imx6dl.dtsi
+--- linux-3.14.36/arch/arm/boot/dts/imx6dl.dtsi 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/boot/dts/imx6dl.dtsi 2015-05-06 12:05:43.000000000 -0500
+@@ -8,6 +8,7 @@
+ *
+ */
+
++#include <dt-bindings/interrupt-controller/irq.h>
+ #include "imx6dl-pinfunc.h"
+ #include "imx6qdl.dtsi"
+
+@@ -21,6 +22,26 @@
+ device_type = "cpu";
+ reg = <0>;
+ next-level-cache = <&L2>;
++ operating-points = <
++ /* kHz uV */
++ 996000 1275000
++ 792000 1175000
++ 396000 1075000
++ >;
++ fsl,soc-operating-points = <
++ /* ARM kHz SOC-PU uV */
++ 996000 1175000
++ 792000 1175000
++ 396000 1175000
++ >;
++ clock-latency = <61036>; /* two CLK32 periods */
++ clocks = <&clks 104>, <&clks 6>, <&clks 16>,
++ <&clks 17>, <&clks 170>;
++ clock-names = "arm", "pll2_pfd2_396m", "step",
++ "pll1_sw", "pll1_sys";
++ arm-supply = <&reg_arm>;
++ pu-supply = <&reg_pu>;
++ soc-supply = <&reg_soc>;
+ };
+
+ cpu@1 {
+@@ -32,40 +53,124 @@
+ };
+
+ soc {
++
++ busfreq { /* BUSFREQ */
++ compatible = "fsl,imx6_busfreq";
++ clocks = <&clks 171>, <&clks 6>, <&clks 11>, <&clks 104>, <&clks 172>, <&clks 58>,
++ <&clks 18>, <&clks 60>, <&clks 20>, <&clks 3>, <&clks 22> , <&clks 8>;
++ clock-names = "pll2_bus", "pll2_pfd2_396m", "pll2_198m", "arm", "pll3_usb_otg", "periph",
++ "periph_pre", "periph_clk2", "periph_clk2_sel", "osc", "axi_sel", "pll3_pfd1_540m";
++ interrupts = <0 107 0x04>, <0 112 0x4>;
++ interrupt-names = "irq_busfreq_0", "irq_busfreq_1";
++ fsl,max_ddr_freq = <400000000>;
++ };
++
++ gpu@00130000 {
++ compatible = "fsl,imx6dl-gpu", "fsl,imx6q-gpu";
++ reg = <0x00130000 0x4000>, <0x00134000 0x4000>,
++ <0x0 0x0>;
++ reg-names = "iobase_3d", "iobase_2d",
++ "phys_baseaddr";
++ interrupts = <0 9 0x04>, <0 10 0x04>;
++ interrupt-names = "irq_3d", "irq_2d";
++ clocks = <&clks 143>, <&clks 27>,
++ <&clks 121>, <&clks 122>,
++ <&clks 0>;
++ clock-names = "gpu2d_axi_clk", "gpu3d_axi_clk",
++ "gpu2d_clk", "gpu3d_clk",
++ "gpu3d_shader_clk";
++ resets = <&src 0>, <&src 3>;
++ reset-names = "gpu3d", "gpu2d";
++ pu-supply = <&reg_pu>;
++ };
++
+ ocram: sram@00900000 {
+ compatible = "mmio-sram";
+ reg = <0x00900000 0x20000>;
+ clocks = <&clks 142>;
+ };
+
++ hdmi_core: hdmi_core@00120000 {
++ compatible = "fsl,imx6dl-hdmi-core";
++ reg = <0x00120000 0x9000>;
++ clocks = <&clks 124>, <&clks 123>;
++ clock-names = "hdmi_isfr", "hdmi_iahb";
++ status = "disabled";
++ };
++
++ hdmi_video: hdmi_video@020e0000 {
++ compatible = "fsl,imx6dl-hdmi-video";
++ reg = <0x020e0000 0x1000>;
++ reg-names = "hdmi_gpr";
++ interrupts = <0 115 0x04>;
++ clocks = <&clks 124>, <&clks 123>;
++ clock-names = "hdmi_isfr", "hdmi_iahb";
++ status = "disabled";
++ };
++
++ hdmi_audio: hdmi_audio@00120000 {
++ compatible = "fsl,imx6dl-hdmi-audio";
++ clocks = <&clks 124>, <&clks 123>;
++ clock-names = "hdmi_isfr", "hdmi_iahb";
++ dmas = <&sdma 2 23 0>;
++ dma-names = "tx";
++ status = "disabled";
++ };
++
++ hdmi_cec: hdmi_cec@00120000 {
++ compatible = "fsl,imx6dl-hdmi-cec";
++ interrupts = <0 115 0x04>;
++ status = "disabled";
++ };
++
+ aips1: aips-bus@02000000 {
++ vpu@02040000 {
++ iramsize = <0>;
++ status = "okay";
++ };
++
+ iomuxc: iomuxc@020e0000 {
+ compatible = "fsl,imx6dl-iomuxc";
+ };
+
+ pxp: pxp@020f0000 {
++ compatible = "fsl,imx6dl-pxp-dma";
+ reg = <0x020f0000 0x4000>;
+- interrupts = <0 98 0x04>;
++ interrupts = <0 98 IRQ_TYPE_LEVEL_HIGH>;
++ clocks = <&clks 133>;
++ clock-names = "pxp-axi";
++ status = "disabled";
+ };
+
+ epdc: epdc@020f4000 {
+ reg = <0x020f4000 0x4000>;
+- interrupts = <0 97 0x04>;
++ interrupts = <0 97 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ lcdif: lcdif@020f8000 {
+ reg = <0x020f8000 0x4000>;
+- interrupts = <0 39 0x04>;
++ interrupts = <0 39 IRQ_TYPE_LEVEL_HIGH>;
+ };
+ };
+
+ aips2: aips-bus@02100000 {
++ mipi_dsi: mipi@021e0000 {
++ compatible = "fsl,imx6dl-mipi-dsi";
++ reg = <0x021e0000 0x4000>;
++ interrupts = <0 102 0x04>;
++ gpr = <&gpr>;
++ clocks = <&clks 138>, <&clks 209>;
++ clock-names = "mipi_pllref_clk", "mipi_cfg_clk";
++ status = "disabled";
++ };
++
+ i2c4: i2c@021f8000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+- compatible = "fsl,imx1-i2c";
++ compatible = "fsl,imx6q-i2c", "fsl,imx21-i2c";
+ reg = <0x021f8000 0x4000>;
+- interrupts = <0 35 0x04>;
++ interrupts = <0 35 IRQ_TYPE_LEVEL_HIGH>;
++ clocks = <&clks 116>;
+ status = "disabled";
+ };
+ };
+diff -Nur linux-3.14.36/arch/arm/boot/dts/imx6dl-gw51xx.dts linux-openelec/arch/arm/boot/dts/imx6dl-gw51xx.dts
+--- linux-3.14.36/arch/arm/boot/dts/imx6dl-gw51xx.dts 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/arch/arm/boot/dts/imx6dl-gw51xx.dts 2015-05-06 12:05:43.000000000 -0500
+@@ -0,0 +1,19 @@
++/*
++ * Copyright 2013 Gateworks Corporation
++ *
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/dts-v1/;
++#include "imx6dl.dtsi"
++#include "imx6qdl-gw51xx.dtsi"
++
++/ {
++ model = "Gateworks Ventana i.MX6 DualLite GW51XX";
++ compatible = "gw,imx6dl-gw51xx", "gw,ventana", "fsl,imx6dl";
++};
+diff -Nur linux-3.14.36/arch/arm/boot/dts/imx6dl-gw52xx.dts linux-openelec/arch/arm/boot/dts/imx6dl-gw52xx.dts
+--- linux-3.14.36/arch/arm/boot/dts/imx6dl-gw52xx.dts 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/arch/arm/boot/dts/imx6dl-gw52xx.dts 2015-05-06 12:05:43.000000000 -0500
+@@ -0,0 +1,19 @@
++/*
++ * Copyright 2013 Gateworks Corporation
++ *
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/dts-v1/;
++#include "imx6dl.dtsi"
++#include "imx6qdl-gw52xx.dtsi"
++
++/ {
++ model = "Gateworks Ventana i.MX6 DualLite GW52XX";
++ compatible = "gw,imx6dl-gw52xx", "gw,ventana", "fsl,imx6dl";
++};
+diff -Nur linux-3.14.36/arch/arm/boot/dts/imx6dl-gw53xx.dts linux-openelec/arch/arm/boot/dts/imx6dl-gw53xx.dts
+--- linux-3.14.36/arch/arm/boot/dts/imx6dl-gw53xx.dts 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/arch/arm/boot/dts/imx6dl-gw53xx.dts 2015-05-06 12:05:43.000000000 -0500
+@@ -0,0 +1,19 @@
++/*
++ * Copyright 2013 Gateworks Corporation
++ *
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/dts-v1/;
++#include "imx6dl.dtsi"
++#include "imx6qdl-gw53xx.dtsi"
++
++/ {
++ model = "Gateworks Ventana i.MX6 DualLite GW53XX";
++ compatible = "gw,imx6dl-gw53xx", "gw,ventana", "fsl,imx6dl";
++};
+diff -Nur linux-3.14.36/arch/arm/boot/dts/imx6dl-gw54xx.dts linux-openelec/arch/arm/boot/dts/imx6dl-gw54xx.dts
+--- linux-3.14.36/arch/arm/boot/dts/imx6dl-gw54xx.dts 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/arch/arm/boot/dts/imx6dl-gw54xx.dts 2015-05-06 12:05:43.000000000 -0500
+@@ -0,0 +1,19 @@
++/*
++ * Copyright 2013 Gateworks Corporation
++ *
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/dts-v1/;
++#include "imx6dl.dtsi"
++#include "imx6qdl-gw54xx.dtsi"
++
++/ {
++ model = "Gateworks Ventana i.MX6 DualLite GW54XX";
++ compatible = "gw,imx6dl-gw54xx", "gw,ventana", "fsl,imx6dl";
++};
+diff -Nur linux-3.14.36/arch/arm/boot/dts/imx6dl-hummingboard.dts linux-openelec/arch/arm/boot/dts/imx6dl-hummingboard.dts
+--- linux-3.14.36/arch/arm/boot/dts/imx6dl-hummingboard.dts 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/boot/dts/imx6dl-hummingboard.dts 2015-05-06 12:05:43.000000000 -0500
+@@ -1,163 +1,13 @@
+ /*
+- * Copyright (C) 2013,2014 Russell King
++ * Copyright (C) 2014 Rabeeh Khoury (rabeeh@solid-run.com)
++ * Based on work by Russell King
+ */
+ /dts-v1/;
+
+ #include "imx6dl.dtsi"
+-#include "imx6qdl-microsom.dtsi"
+-#include "imx6qdl-microsom-ar8035.dtsi"
++#include "imx6qdl-hummingboard.dtsi"
+
+ / {
+- model = "SolidRun HummingBoard DL/Solo";
+- compatible = "solidrun,hummingboard", "fsl,imx6dl";
+-
+- ir_recv: ir-receiver {
+- compatible = "gpio-ir-receiver";
+- gpios = <&gpio1 2 1>;
+- pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_hummingboard_gpio1_2>;
+- };
+-
+- regulators {
+- compatible = "simple-bus";
+-
+- reg_3p3v: 3p3v {
+- compatible = "regulator-fixed";
+- regulator-name = "3P3V";
+- regulator-min-microvolt = <3300000>;
+- regulator-max-microvolt = <3300000>;
+- regulator-always-on;
+- };
+-
+- reg_usbh1_vbus: usb-h1-vbus {
+- compatible = "regulator-fixed";
+- enable-active-high;
+- gpio = <&gpio1 0 0>;
+- pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_hummingboard_usbh1_vbus>;
+- regulator-name = "usb_h1_vbus";
+- regulator-min-microvolt = <5000000>;
+- regulator-max-microvolt = <5000000>;
+- };
+-
+- reg_usbotg_vbus: usb-otg-vbus {
+- compatible = "regulator-fixed";
+- enable-active-high;
+- gpio = <&gpio3 22 0>;
+- pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_hummingboard_usbotg_vbus>;
+- regulator-name = "usb_otg_vbus";
+- regulator-min-microvolt = <5000000>;
+- regulator-max-microvolt = <5000000>;
+- };
+- };
+-
+- sound-spdif {
+- compatible = "fsl,imx-audio-spdif";
+- model = "imx-spdif";
+- /* IMX6 doesn't implement this yet */
+- spdif-controller = <&spdif>;
+- spdif-out;
+- };
+-};
+-
+-&can1 {
+- pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_hummingboard_flexcan1>;
+- status = "okay";
+-};
+-
+-&i2c1 {
+- pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_hummingboard_i2c1>;
+-
+- /*
+- * Not fitted on Carrier-1 board... yet
+- status = "okay";
+-
+- rtc: pcf8523@68 {
+- compatible = "nxp,pcf8523";
+- reg = <0x68>;
+- };
+- */
+-};
+-
+-&iomuxc {
+- hummingboard {
+- pinctrl_hummingboard_flexcan1: hummingboard-flexcan1 {
+- fsl,pins = <
+- MX6QDL_PAD_SD3_CLK__FLEXCAN1_RX 0x80000000
+- MX6QDL_PAD_SD3_CMD__FLEXCAN1_TX 0x80000000
+- >;
+- };
+-
+- pinctrl_hummingboard_gpio1_2: hummingboard-gpio1_2 {
+- fsl,pins = <
+- MX6QDL_PAD_GPIO_2__GPIO1_IO02 0x80000000
+- >;
+- };
+-
+- pinctrl_hummingboard_i2c1: hummingboard-i2c1 {
+- fsl,pins = <
+- MX6QDL_PAD_EIM_D21__I2C1_SCL 0x4001b8b1
+- MX6QDL_PAD_EIM_D28__I2C1_SDA 0x4001b8b1
+- >;
+- };
+-
+- pinctrl_hummingboard_spdif: hummingboard-spdif {
+- fsl,pins = <MX6QDL_PAD_GPIO_17__SPDIF_OUT 0x13091>;
+- };
+-
+- pinctrl_hummingboard_usbh1_vbus: hummingboard-usbh1-vbus {
+- fsl,pins = <MX6QDL_PAD_GPIO_0__GPIO1_IO00 0x1b0b0>;
+- };
+-
+- pinctrl_hummingboard_usbotg_vbus: hummingboard-usbotg-vbus {
+- fsl,pins = <MX6QDL_PAD_EIM_D22__GPIO3_IO22 0x1b0b0>;
+- };
+-
+- pinctrl_hummingboard_usdhc2_aux: hummingboard-usdhc2-aux {
+- fsl,pins = <
+- MX6QDL_PAD_GPIO_4__GPIO1_IO04 0x1f071
+- >;
+- };
+-
+- pinctrl_hummingboard_usdhc2: hummingboard-usdhc2 {
+- fsl,pins = <
+- MX6QDL_PAD_SD2_CMD__SD2_CMD 0x17059
+- MX6QDL_PAD_SD2_CLK__SD2_CLK 0x10059
+- MX6QDL_PAD_SD2_DAT0__SD2_DATA0 0x17059
+- MX6QDL_PAD_SD2_DAT1__SD2_DATA1 0x17059
+- MX6QDL_PAD_SD2_DAT2__SD2_DATA2 0x17059
+- MX6QDL_PAD_SD2_DAT3__SD2_DATA3 0x13059
+- >;
+- };
+- };
+-};
+-
+-&spdif {
+- pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_hummingboard_spdif>;
+- status = "okay";
+-};
+-
+-&usbh1 {
+- vbus-supply = <&reg_usbh1_vbus>;
+- status = "okay";
+-};
+-
+-&usbotg {
+- vbus-supply = <&reg_usbotg_vbus>;
+- status = "okay";
+-};
+-
+-&usdhc2 {
+- pinctrl-names = "default";
+- pinctrl-0 = <
+- &pinctrl_hummingboard_usdhc2_aux
+- &pinctrl_hummingboard_usdhc2
+- >;
+- vmmc-supply = <&reg_3p3v>;
+- cd-gpios = <&gpio1 4 0>;
+- status = "okay";
++ model = "SolidRun HummingBoard Solo/DualLite";
++ compatible = "solidrun,hummingboard/dl", "fsl,imx6dl";
+ };
+diff -Nur linux-3.14.36/arch/arm/boot/dts/imx6dl-nitrogen6x.dts linux-openelec/arch/arm/boot/dts/imx6dl-nitrogen6x.dts
+--- linux-3.14.36/arch/arm/boot/dts/imx6dl-nitrogen6x.dts 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/arch/arm/boot/dts/imx6dl-nitrogen6x.dts 2015-05-06 12:05:43.000000000 -0500
+@@ -0,0 +1,21 @@
++/*
++ * Copyright 2013 Boundary Devices, Inc.
++ * Copyright 2012 Freescale Semiconductor, Inc.
++ * Copyright 2011 Linaro Ltd.
++ *
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/dts-v1/;
++#include "imx6dl.dtsi"
++#include "imx6qdl-nitrogen6x.dtsi"
++
++/ {
++ model = "Freescale i.MX6 DualLite Nitrogen6x Board";
++ compatible = "fsl,imx6dl-nitrogen6x", "fsl,imx6dl";
++};
+diff -Nur linux-3.14.36/arch/arm/boot/dts/imx6dl-phytec-pbab01.dts linux-openelec/arch/arm/boot/dts/imx6dl-phytec-pbab01.dts
+--- linux-3.14.36/arch/arm/boot/dts/imx6dl-phytec-pbab01.dts 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/arch/arm/boot/dts/imx6dl-phytec-pbab01.dts 2015-05-06 12:05:43.000000000 -0500
+@@ -0,0 +1,19 @@
++/*
++ * Copyright 2013 Christian Hemp, Phytec Messtechnik GmbH
++ *
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/dts-v1/;
++#include "imx6dl-phytec-pfla02.dtsi"
++#include "imx6qdl-phytec-pbab01.dtsi"
++
++/ {
++ model = "Phytec phyFLEX-i.MX6 DualLite/Solo Carrier-Board";
++ compatible = "phytec,imx6dl-pbab01", "phytec,imx6dl-pfla02", "fsl,imx6dl";
++};
+diff -Nur linux-3.14.36/arch/arm/boot/dts/imx6dl-phytec-pfla02.dtsi linux-openelec/arch/arm/boot/dts/imx6dl-phytec-pfla02.dtsi
+--- linux-3.14.36/arch/arm/boot/dts/imx6dl-phytec-pfla02.dtsi 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/arch/arm/boot/dts/imx6dl-phytec-pfla02.dtsi 2015-05-06 12:05:43.000000000 -0500
+@@ -0,0 +1,22 @@
++/*
++ * Copyright 2013 Christian Hemp, Phytec Messtechnik GmbH
++ *
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++#include "imx6dl.dtsi"
++#include "imx6qdl-phytec-pfla02.dtsi"
++
++/ {
++ model = "Phytec phyFLEX-i.MX6 DualLite/Solo";
++ compatible = "phytec,imx6dl-pfla02", "fsl,imx6dl";
++
++ memory {
++ reg = <0x10000000 0x20000000>;
++ };
++};
+diff -Nur linux-3.14.36/arch/arm/boot/dts/imx6dl-pinfunc.h linux-openelec/arch/arm/boot/dts/imx6dl-pinfunc.h
+--- linux-3.14.36/arch/arm/boot/dts/imx6dl-pinfunc.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/boot/dts/imx6dl-pinfunc.h 2015-05-06 12:05:43.000000000 -0500
+@@ -755,6 +755,7 @@
+ #define MX6QDL_PAD_GPIO_5__I2C3_SCL 0x230 0x600 0x878 0x6 0x2
+ #define MX6QDL_PAD_GPIO_5__ARM_EVENTI 0x230 0x600 0x000 0x7 0x0
+ #define MX6QDL_PAD_GPIO_6__ESAI_TX_CLK 0x234 0x604 0x840 0x0 0x1
++#define MX6QDL_PAD_GPIO_6__ENET_IRQ 0x234 0x604 0x03c 0x11 0xff000609
+ #define MX6QDL_PAD_GPIO_6__I2C3_SDA 0x234 0x604 0x87c 0x2 0x2
+ #define MX6QDL_PAD_GPIO_6__GPIO1_IO06 0x234 0x604 0x000 0x5 0x0
+ #define MX6QDL_PAD_GPIO_6__SD2_LCTL 0x234 0x604 0x000 0x6 0x0
+@@ -950,6 +951,7 @@
+ #define MX6QDL_PAD_RGMII_TXC__GPIO6_IO19 0x2d8 0x6c0 0x000 0x5 0x0
+ #define MX6QDL_PAD_RGMII_TXC__XTALOSC_REF_CLK_24M 0x2d8 0x6c0 0x000 0x7 0x0
+ #define MX6QDL_PAD_SD1_CLK__SD1_CLK 0x2dc 0x6c4 0x928 0x0 0x1
++#define MX6QDL_PAD_SD1_CLK__OSC32K_32K_OUT 0x2dc 0x6c4 0x000 0x2 0x0
+ #define MX6QDL_PAD_SD1_CLK__GPT_CLKIN 0x2dc 0x6c4 0x000 0x3 0x0
+ #define MX6QDL_PAD_SD1_CLK__GPIO1_IO20 0x2dc 0x6c4 0x000 0x5 0x0
+ #define MX6QDL_PAD_SD1_CMD__SD1_CMD 0x2e0 0x6c8 0x000 0x0 0x0
+diff -Nur linux-3.14.36/arch/arm/boot/dts/imx6dl-sabreauto.dts linux-openelec/arch/arm/boot/dts/imx6dl-sabreauto.dts
+--- linux-3.14.36/arch/arm/boot/dts/imx6dl-sabreauto.dts 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/boot/dts/imx6dl-sabreauto.dts 2015-05-06 12:05:43.000000000 -0500
+@@ -15,3 +15,16 @@
+ model = "Freescale i.MX6 DualLite/Solo SABRE Automotive Board";
+ compatible = "fsl,imx6dl-sabreauto", "fsl,imx6dl";
+ };
++
++&ldb {
++ ipu_id = <0>;
++ sec_ipu_id = <0>;
++};
++
++&mxcfb1 {
++ status = "okay";
++};
++
++&mxcfb2 {
++ status = "okay";
++};
+diff -Nur linux-3.14.36/arch/arm/boot/dts/imx6dl-sabrelite.dts linux-openelec/arch/arm/boot/dts/imx6dl-sabrelite.dts
+--- linux-3.14.36/arch/arm/boot/dts/imx6dl-sabrelite.dts 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/arch/arm/boot/dts/imx6dl-sabrelite.dts 2015-05-06 12:05:43.000000000 -0500
+@@ -0,0 +1,20 @@
++/*
++ * Copyright 2011 Freescale Semiconductor, Inc.
++ * Copyright 2011 Linaro Ltd.
++ *
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/dts-v1/;
++#include "imx6dl.dtsi"
++#include "imx6qdl-sabrelite.dtsi"
++
++/ {
++ model = "Freescale i.MX6 DualLite SABRE Lite Board";
++ compatible = "fsl,imx6dl-sabrelite", "fsl,imx6dl";
++};
+diff -Nur linux-3.14.36/arch/arm/boot/dts/imx6dl-sabresd.dts linux-openelec/arch/arm/boot/dts/imx6dl-sabresd.dts
+--- linux-3.14.36/arch/arm/boot/dts/imx6dl-sabresd.dts 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/boot/dts/imx6dl-sabresd.dts 2015-05-06 12:05:43.000000000 -0500
+@@ -15,3 +15,20 @@
+ model = "Freescale i.MX6 DualLite SABRE Smart Device Board";
+ compatible = "fsl,imx6dl-sabresd", "fsl,imx6dl";
+ };
++
++&ldb {
++ ipu_id = <0>;
++ sec_ipu_id = <0>;
++};
++
++&pxp {
++ status = "okay";
++};
++
++&mxcfb1 {
++ status = "okay";
++};
++
++&mxcfb2 {
++ status = "okay";
++};
+diff -Nur linux-3.14.36/arch/arm/boot/dts/imx6dl-sabresd-hdcp.dts linux-openelec/arch/arm/boot/dts/imx6dl-sabresd-hdcp.dts
+--- linux-3.14.36/arch/arm/boot/dts/imx6dl-sabresd-hdcp.dts 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/arch/arm/boot/dts/imx6dl-sabresd-hdcp.dts 2015-05-06 12:05:43.000000000 -0500
+@@ -0,0 +1,19 @@
++/*
++ * Copyright (C) 2013 Freescale Semiconductor, Inc.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include "imx6dl-sabresd.dts"
++
++&hdmi_video {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_hdmi_hdcp>;
++ fsl,hdcp;
++};
++
++&i2c2 {
++ status = "disable";
++};
+diff -Nur linux-3.14.36/arch/arm/boot/dts/imx6q-arm2.dts linux-openelec/arch/arm/boot/dts/imx6q-arm2.dts
+--- linux-3.14.36/arch/arm/boot/dts/imx6q-arm2.dts 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/boot/dts/imx6q-arm2.dts 2015-05-06 12:05:43.000000000 -0500
+@@ -23,14 +23,27 @@
+
+ regulators {
+ compatible = "simple-bus";
++ #address-cells = <1>;
++ #size-cells = <0>;
+
+- reg_3p3v: 3p3v {
++ reg_3p3v: regulator@0 {
+ compatible = "regulator-fixed";
++ reg = <0>;
+ regulator-name = "3P3V";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
++
++ reg_usb_otg_vbus: regulator@1 {
++ compatible = "regulator-fixed";
++ reg = <1>;
++ regulator-name = "usb_otg_vbus";
++ regulator-min-microvolt = <5000000>;
++ regulator-max-microvolt = <5000000>;
++ gpio = <&gpio3 22 0>;
++ enable-active-high;
++ };
+ };
+
+ leds {
+@@ -46,7 +59,7 @@
+
+ &gpmi {
+ pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_gpmi_nand_1>;
++ pinctrl-0 = <&pinctrl_gpmi_nand>;
+ status = "disabled"; /* gpmi nand conflicts with SD */
+ };
+
+@@ -54,28 +67,131 @@
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_hog>;
+
+- hog {
++ imx6q-arm2 {
+ pinctrl_hog: hoggrp {
+ fsl,pins = <
+ MX6QDL_PAD_EIM_D25__GPIO3_IO25 0x80000000
+ >;
+ };
+- };
+
+- arm2 {
+- pinctrl_usdhc3_arm2: usdhc3grp-arm2 {
++ pinctrl_enet: enetgrp {
++ fsl,pins = <
++ MX6QDL_PAD_KEY_COL1__ENET_MDIO 0x1b0b0
++ MX6QDL_PAD_KEY_COL2__ENET_MDC 0x1b0b0
++ MX6QDL_PAD_RGMII_TXC__RGMII_TXC 0x1b0b0
++ MX6QDL_PAD_RGMII_TD0__RGMII_TD0 0x1b0b0
++ MX6QDL_PAD_RGMII_TD1__RGMII_TD1 0x1b0b0
++ MX6QDL_PAD_RGMII_TD2__RGMII_TD2 0x1b0b0
++ MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x1b0b0
++ MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x1b0b0
++ MX6QDL_PAD_ENET_REF_CLK__ENET_TX_CLK 0x1b0b0
++ MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b0b0
++ MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x1b0b0
++ MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x1b0b0
++ MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b0b0
++ MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b0b0
++ MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b0b0
++ MX6QDL_PAD_GPIO_6__ENET_IRQ 0x000b1
++ >;
++ };
++
++ pinctrl_gpmi_nand: gpminandgrp {
++ fsl,pins = <
++ MX6QDL_PAD_NANDF_CLE__NAND_CLE 0xb0b1
++ MX6QDL_PAD_NANDF_ALE__NAND_ALE 0xb0b1
++ MX6QDL_PAD_NANDF_WP_B__NAND_WP_B 0xb0b1
++ MX6QDL_PAD_NANDF_RB0__NAND_READY_B 0xb000
++ MX6QDL_PAD_NANDF_CS0__NAND_CE0_B 0xb0b1
++ MX6QDL_PAD_NANDF_CS1__NAND_CE1_B 0xb0b1
++ MX6QDL_PAD_SD4_CMD__NAND_RE_B 0xb0b1
++ MX6QDL_PAD_SD4_CLK__NAND_WE_B 0xb0b1
++ MX6QDL_PAD_NANDF_D0__NAND_DATA00 0xb0b1
++ MX6QDL_PAD_NANDF_D1__NAND_DATA01 0xb0b1
++ MX6QDL_PAD_NANDF_D2__NAND_DATA02 0xb0b1
++ MX6QDL_PAD_NANDF_D3__NAND_DATA03 0xb0b1
++ MX6QDL_PAD_NANDF_D4__NAND_DATA04 0xb0b1
++ MX6QDL_PAD_NANDF_D5__NAND_DATA05 0xb0b1
++ MX6QDL_PAD_NANDF_D6__NAND_DATA06 0xb0b1
++ MX6QDL_PAD_NANDF_D7__NAND_DATA07 0xb0b1
++ MX6QDL_PAD_SD4_DAT0__NAND_DQS 0x00b1
++ >;
++ };
++
++ pinctrl_uart2: uart2grp {
++ fsl,pins = <
++ MX6QDL_PAD_EIM_D26__UART2_RX_DATA 0x1b0b1
++ MX6QDL_PAD_EIM_D27__UART2_TX_DATA 0x1b0b1
++ MX6QDL_PAD_EIM_D28__UART2_DTE_CTS_B 0x1b0b1
++ MX6QDL_PAD_EIM_D29__UART2_DTE_RTS_B 0x1b0b1
++ >;
++ };
++
++ pinctrl_uart4: uart4grp {
++ fsl,pins = <
++ MX6QDL_PAD_KEY_COL0__UART4_TX_DATA 0x1b0b1
++ MX6QDL_PAD_KEY_ROW0__UART4_RX_DATA 0x1b0b1
++ >;
++ };
++
++ pinctrl_usbotg: usbotggrp {
++ fsl,pins = <
++ MX6QDL_PAD_GPIO_1__USB_OTG_ID 0x17059
++ >;
++ };
++
++ pinctrl_usdhc3: usdhc3grp {
++ fsl,pins = <
++ MX6QDL_PAD_SD3_CMD__SD3_CMD 0x17059
++ MX6QDL_PAD_SD3_CLK__SD3_CLK 0x10059
++ MX6QDL_PAD_SD3_DAT0__SD3_DATA0 0x17059
++ MX6QDL_PAD_SD3_DAT1__SD3_DATA1 0x17059
++ MX6QDL_PAD_SD3_DAT2__SD3_DATA2 0x17059
++ MX6QDL_PAD_SD3_DAT3__SD3_DATA3 0x17059
++ MX6QDL_PAD_SD3_DAT4__SD3_DATA4 0x17059
++ MX6QDL_PAD_SD3_DAT5__SD3_DATA5 0x17059
++ MX6QDL_PAD_SD3_DAT6__SD3_DATA6 0x17059
++ MX6QDL_PAD_SD3_DAT7__SD3_DATA7 0x17059
++ >;
++ };
++
++ pinctrl_usdhc3_cdwp: usdhc3cdwp {
+ fsl,pins = <
+ MX6QDL_PAD_NANDF_CS0__GPIO6_IO11 0x80000000
+ MX6QDL_PAD_NANDF_CS1__GPIO6_IO14 0x80000000
+ >;
+ };
++
++ pinctrl_usdhc4: usdhc4grp {
++ fsl,pins = <
++ MX6QDL_PAD_SD4_CMD__SD4_CMD 0x17059
++ MX6QDL_PAD_SD4_CLK__SD4_CLK 0x10059
++ MX6QDL_PAD_SD4_DAT0__SD4_DATA0 0x17059
++ MX6QDL_PAD_SD4_DAT1__SD4_DATA1 0x17059
++ MX6QDL_PAD_SD4_DAT2__SD4_DATA2 0x17059
++ MX6QDL_PAD_SD4_DAT3__SD4_DATA3 0x17059
++ MX6QDL_PAD_SD4_DAT4__SD4_DATA4 0x17059
++ MX6QDL_PAD_SD4_DAT5__SD4_DATA5 0x17059
++ MX6QDL_PAD_SD4_DAT6__SD4_DATA6 0x17059
++ MX6QDL_PAD_SD4_DAT7__SD4_DATA7 0x17059
++ >;
++ };
+ };
+ };
+
+ &fec {
+ pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_enet_2>;
++ pinctrl-0 = <&pinctrl_enet>;
+ phy-mode = "rgmii";
++ interrupts-extended = <&gpio1 6 IRQ_TYPE_LEVEL_HIGH>,
++ <&intc 0 119 IRQ_TYPE_LEVEL_HIGH>;
++ status = "okay";
++};
++
++&usbotg {
++ vbus-supply = <&reg_usb_otg_vbus>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_usbotg>;
++ disable-over-current;
+ status = "okay";
+ };
+
+@@ -84,8 +200,8 @@
+ wp-gpios = <&gpio6 14 0>;
+ vmmc-supply = <&reg_3p3v>;
+ pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_usdhc3_1
+- &pinctrl_usdhc3_arm2>;
++ pinctrl-0 = <&pinctrl_usdhc3
++ &pinctrl_usdhc3_cdwp>;
+ status = "okay";
+ };
+
+@@ -93,13 +209,13 @@
+ non-removable;
+ vmmc-supply = <&reg_3p3v>;
+ pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_usdhc4_1>;
++ pinctrl-0 = <&pinctrl_usdhc4>;
+ status = "okay";
+ };
+
+ &uart2 {
+ pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_uart2_2>;
++ pinctrl-0 = <&pinctrl_uart2>;
+ fsl,dte-mode;
+ fsl,uart-has-rtscts;
+ status = "okay";
+@@ -107,6 +223,6 @@
+
+ &uart4 {
+ pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_uart4_1>;
++ pinctrl-0 = <&pinctrl_uart4>;
+ status = "okay";
+ };
+diff -Nur linux-3.14.36/arch/arm/boot/dts/imx6q-arm2-hsic.dts linux-openelec/arch/arm/boot/dts/imx6q-arm2-hsic.dts
+--- linux-3.14.36/arch/arm/boot/dts/imx6q-arm2-hsic.dts 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/arch/arm/boot/dts/imx6q-arm2-hsic.dts 2015-05-06 12:05:43.000000000 -0500
+@@ -0,0 +1,32 @@
++/*
++ * Copyright 2013 Freescale Semiconductor, Inc.
++ *
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++#include "imx6q-arm2.dts"
++
++&fec {
++ status = "disabled";
++};
++
++&usbh2 {
++ pinctrl-names = "idle", "active";
++ pinctrl-0 = <&pinctrl_usbh2_1>;
++ pinctrl-1 = <&pinctrl_usbh2_2>;
++ osc-clkgate-delay = <0x3>;
++ status = "okay";
++};
++
++&usbh3 {
++ pinctrl-names = "idle", "active";
++ pinctrl-0 = <&pinctrl_usbh3_1>;
++ pinctrl-1 = <&pinctrl_usbh3_2>;
++ osc-clkgate-delay = <0x3>;
++ status = "okay";
++};
+diff -Nur linux-3.14.36/arch/arm/boot/dts/imx6q-cm-fx6.dts linux-openelec/arch/arm/boot/dts/imx6q-cm-fx6.dts
+--- linux-3.14.36/arch/arm/boot/dts/imx6q-cm-fx6.dts 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/arch/arm/boot/dts/imx6q-cm-fx6.dts 2015-05-06 12:05:43.000000000 -0500
+@@ -0,0 +1,107 @@
++/*
++ * Copyright 2013 CompuLab Ltd.
++ *
++ * Author: Valentin Raevsky <valentin@compulab.co.il>
++ *
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/dts-v1/;
++#include "imx6q.dtsi"
++
++/ {
++ model = "CompuLab CM-FX6";
++ compatible = "compulab,cm-fx6", "fsl,imx6q";
++
++ memory {
++ reg = <0x10000000 0x80000000>;
++ };
++
++ leds {
++ compatible = "gpio-leds";
++
++ heartbeat-led {
++ label = "Heartbeat";
++ gpios = <&gpio2 31 0>;
++ linux,default-trigger = "heartbeat";
++ };
++ };
++};
++
++&fec {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_enet>;
++ phy-mode = "rgmii";
++ status = "okay";
++};
++
++&gpmi {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_gpmi_nand>;
++ status = "okay";
++};
++
++&iomuxc {
++ imx6q-cm-fx6 {
++ pinctrl_enet: enetgrp {
++ fsl,pins = <
++ MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b0b0
++ MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x1b0b0
++ MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x1b0b0
++ MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b0b0
++ MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b0b0
++ MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b0b0
++ MX6QDL_PAD_RGMII_TXC__RGMII_TXC 0x1b0b0
++ MX6QDL_PAD_RGMII_TD0__RGMII_TD0 0x1b0b0
++ MX6QDL_PAD_RGMII_TD1__RGMII_TD1 0x1b0b0
++ MX6QDL_PAD_RGMII_TD2__RGMII_TD2 0x1b0b0
++ MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x1b0b0
++ MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x1b0b0
++ MX6QDL_PAD_ENET_REF_CLK__ENET_TX_CLK 0x1b0b0
++ MX6QDL_PAD_ENET_MDIO__ENET_MDIO 0x1b0b0
++ MX6QDL_PAD_ENET_MDC__ENET_MDC 0x1b0b0
++ MX6QDL_PAD_GPIO_16__ENET_REF_CLK 0x4001b0a8
++ >;
++ };
++
++ pinctrl_gpmi_nand: gpminandgrp {
++ fsl,pins = <
++ MX6QDL_PAD_NANDF_CLE__NAND_CLE 0xb0b1
++ MX6QDL_PAD_NANDF_ALE__NAND_ALE 0xb0b1
++ MX6QDL_PAD_NANDF_WP_B__NAND_WP_B 0xb0b1
++ MX6QDL_PAD_NANDF_RB0__NAND_READY_B 0xb000
++ MX6QDL_PAD_NANDF_CS0__NAND_CE0_B 0xb0b1
++ MX6QDL_PAD_NANDF_CS1__NAND_CE1_B 0xb0b1
++ MX6QDL_PAD_SD4_CMD__NAND_RE_B 0xb0b1
++ MX6QDL_PAD_SD4_CLK__NAND_WE_B 0xb0b1
++ MX6QDL_PAD_NANDF_D0__NAND_DATA00 0xb0b1
++ MX6QDL_PAD_NANDF_D1__NAND_DATA01 0xb0b1
++ MX6QDL_PAD_NANDF_D2__NAND_DATA02 0xb0b1
++ MX6QDL_PAD_NANDF_D3__NAND_DATA03 0xb0b1
++ MX6QDL_PAD_NANDF_D4__NAND_DATA04 0xb0b1
++ MX6QDL_PAD_NANDF_D5__NAND_DATA05 0xb0b1
++ MX6QDL_PAD_NANDF_D6__NAND_DATA06 0xb0b1
++ MX6QDL_PAD_NANDF_D7__NAND_DATA07 0xb0b1
++ MX6QDL_PAD_SD4_DAT0__NAND_DQS 0x00b1
++ >;
++ };
++
++ pinctrl_uart4: uart4grp {
++ fsl,pins = <
++ MX6QDL_PAD_KEY_COL0__UART4_TX_DATA 0x1b0b1
++ MX6QDL_PAD_KEY_ROW0__UART4_RX_DATA 0x1b0b1
++ >;
++ };
++ };
++};
++
++&uart4 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_uart4>;
++ status = "okay";
++};
+diff -Nur linux-3.14.36/arch/arm/boot/dts/imx6q-cubox-i.dts linux-openelec/arch/arm/boot/dts/imx6q-cubox-i.dts
+--- linux-3.14.36/arch/arm/boot/dts/imx6q-cubox-i.dts 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/boot/dts/imx6q-cubox-i.dts 2015-05-06 12:05:43.000000000 -0500
+@@ -13,4 +13,8 @@
+
+ &sata {
+ status = "okay";
++ fsl,transmit-level-mV = <1104>;
++ fsl,transmit-boost-mdB = <0>;
++ fsl,transmit-atten-16ths = <9>;
++ fsl,no-spread-spectrum;
+ };
+diff -Nur linux-3.14.36/arch/arm/boot/dts/imx6q-dfi-fs700-m60.dts linux-openelec/arch/arm/boot/dts/imx6q-dfi-fs700-m60.dts
+--- linux-3.14.36/arch/arm/boot/dts/imx6q-dfi-fs700-m60.dts 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/arch/arm/boot/dts/imx6q-dfi-fs700-m60.dts 2015-05-06 12:05:43.000000000 -0500
+@@ -0,0 +1,23 @@
++/*
++ * Copyright 2013 Sascha Hauer <s.hauer@pengutronix.de>
++ *
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++#ifndef __DTS_V1__
++#define __DTS_V1__
++/dts-v1/;
++#endif
++
++#include "imx6q.dtsi"
++#include "imx6qdl-dfi-fs700-m60.dtsi"
++
++/ {
++ model = "DFI FS700-M60-6QD i.MX6qd Q7 Board";
++ compatible = "dfi,fs700-m60-6qd", "dfi,fs700e-m60", "fsl,imx6q";
++};
+diff -Nur linux-3.14.36/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi linux-openelec/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi
+--- linux-3.14.36/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi 2015-07-24 18:03:30.208842002 -0500
+@@ -5,11 +5,34 @@
+ #include "imx6qdl-microsom-ar8035.dtsi"
+
+ / {
++ chosen {
++ bootargs = "quiet console=ttymxc0,115200 root=/dev/mmcblk0p2 rw";
++ };
++
++ aliases {
++ mxcfb0 = &mxcfb1;
++ };
++
+ ir_recv: ir-receiver {
+ compatible = "gpio-ir-receiver";
+ gpios = <&gpio3 9 1>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_cubox_i_ir>;
++ linux,rc-map-name = "rc-rc6-mce";
++ };
++
++ pwmleds {
++ compatible = "pwm-leds";
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_cubox_i_pwm1>;
++
++ front {
++ active-low;
++ label = "imx6:red:front";
++ max-brightness = <248>;
++ pwms = <&pwm1 0 50000>;
++ linux,default-trigger = "heartbeat";
++ };
+ };
+
+ regulators {
+@@ -49,10 +72,62 @@
+ sound-spdif {
+ compatible = "fsl,imx-audio-spdif";
+ model = "imx-spdif";
+- /* IMX6 doesn't implement this yet */
+ spdif-controller = <&spdif>;
+ spdif-out;
+ };
++
++ sound-hdmi {
++ compatible = "fsl,imx6q-audio-hdmi",
++ "fsl,imx-audio-hdmi";
++ model = "imx-audio-hdmi";
++ hdmi-controller = <&hdmi_audio>;
++ };
++
++ mxcfb1: fb@0 {
++ compatible = "fsl,mxc_sdc_fb";
++ disp_dev = "hdmi";
++ interface_pix_fmt = "RGB24";
++ mode_str ="1920x1080M@60";
++ default_bpp = <32>;
++ int_clk = <0>;
++ late_init = <0>;
++ status = "okay";
++ };
++};
++
++&hdmi_core {
++ ipu_id = <0>;
++ disp_id = <0>;
++ status = "okay";
++};
++
++&hdmi_video {
++ fsl,phy_reg_vlev = <0x0294>;
++ fsl,phy_reg_cksymtx = <0x800d>;
++ status = "okay";
++};
++
++&hdmi_audio {
++ status = "okay";
++};
++
++&hdmi_cec {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_cubox_i_hdmi>;
++ status = "okay";
++};
++
++&i2c2 {
++ clock-frequency = <100000>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_cubox_i_i2c2>;
++
++ status = "okay";
++
++ ddc: imx6_hdmi_i2c@50 {
++ compatible = "fsl,imx6-hdmi-i2c";
++ reg = <0x50>;
++ };
+ };
+
+ &i2c3 {
+@@ -69,6 +144,19 @@
+
+ &iomuxc {
+ cubox_i {
++ pinctrl_cubox_i_hdmi: cubox-i-hdmi {
++ fsl,pins = <
++ MX6QDL_PAD_KEY_ROW2__HDMI_TX_CEC_LINE 0x1f8b0
++ >;
++ };
++
++ pinctrl_cubox_i_i2c2: cubox-i-i2c2 {
++ fsl,pins = <
++ MX6QDL_PAD_KEY_COL3__I2C2_SCL 0x4001b8b1
++ MX6QDL_PAD_KEY_ROW3__I2C2_SDA 0x4001b8b1
++ >;
++ };
++
+ pinctrl_cubox_i_i2c3: cubox-i-i2c3 {
+ fsl,pins = <
+ MX6QDL_PAD_EIM_D17__I2C3_SCL 0x4001b8b1
+@@ -82,16 +170,35 @@
+ >;
+ };
+
++ pinctrl_cubox_i_pwm1: cubox-i-pwm1-front-led {
++ fsl,pins = <MX6QDL_PAD_DISP0_DAT8__PWM1_OUT 0x1b0b0>;
++ };
++
+ pinctrl_cubox_i_spdif: cubox-i-spdif {
+ fsl,pins = <MX6QDL_PAD_GPIO_17__SPDIF_OUT 0x13091>;
+ };
+
++ pinctrl_cubox_i_usbh1: cubox-i-usbh1 {
++ fsl,pins = <MX6QDL_PAD_GPIO_3__USB_H1_OC 0x1b0b0>;
++ };
++
+ pinctrl_cubox_i_usbh1_vbus: cubox-i-usbh1-vbus {
+- fsl,pins = <MX6QDL_PAD_GPIO_0__GPIO1_IO00 0x4001b0b0>;
++ fsl,pins = <MX6QDL_PAD_GPIO_0__GPIO1_IO00 0x1b0b0>;
++ };
++
++ pinctrl_cubox_i_usbotg: cubox-i-usbotg {
++ /*
++ * The Cubox-i pulls ID low, but as it's pointless
++ * leaving it as a pull-up, even if it is just 10uA.
++ */
++ fsl,pins = <
++ MX6QDL_PAD_GPIO_1__USB_OTG_ID 0x13059
++ MX6QDL_PAD_KEY_COL4__USB_OTG_OC 0x1b0b0
++ >;
+ };
+
+ pinctrl_cubox_i_usbotg_vbus: cubox-i-usbotg-vbus {
+- fsl,pins = <MX6QDL_PAD_EIM_D22__GPIO3_IO22 0x4001b0b0>;
++ fsl,pins = <MX6QDL_PAD_EIM_D22__GPIO3_IO22 0x1b0b0>;
+ };
+
+ pinctrl_cubox_i_usdhc2_aux: cubox-i-usdhc2-aux {
+@@ -111,29 +218,76 @@
+ MX6QDL_PAD_SD2_DAT3__SD2_DATA3 0x13059
+ >;
+ };
++
++ pinctrl_cubox_i_usdhc2_100mhz: cubox-i-usdhc2-100mhz {
++ fsl,pins = <
++ MX6QDL_PAD_SD2_CMD__SD2_CMD 0x170b9
++ MX6QDL_PAD_SD2_CLK__SD2_CLK 0x100b9
++ MX6QDL_PAD_SD2_DAT0__SD2_DATA0 0x170b9
++ MX6QDL_PAD_SD2_DAT1__SD2_DATA1 0x170b9
++ MX6QDL_PAD_SD2_DAT2__SD2_DATA2 0x170b9
++ MX6QDL_PAD_SD2_DAT3__SD2_DATA3 0x130b9
++ >;
++ };
++
++ pinctrl_cubox_i_usdhc2_200mhz: cubox-i-usdhc2-200mhz {
++ fsl,pins = <
++ MX6QDL_PAD_SD2_CMD__SD2_CMD 0x170f9
++ MX6QDL_PAD_SD2_CLK__SD2_CLK 0x100f9
++ MX6QDL_PAD_SD2_DAT0__SD2_DATA0 0x170f9
++ MX6QDL_PAD_SD2_DAT1__SD2_DATA1 0x170f9
++ MX6QDL_PAD_SD2_DAT2__SD2_DATA2 0x170f9
++ MX6QDL_PAD_SD2_DAT3__SD2_DATA3 0x130f9
++ >;
++ };
+ };
+ };
+
+ &spdif {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_cubox_i_spdif>;
++ clocks = <&clks 197>, <&clks 0>,
++ <&clks 197>, <&clks 0>,
++ <&clks 0>, <&clks 0>,
++ <&clks 0>, <&clks 0>,
++ <&clks 0>;
++ clock-names = "core", "rxtx0",
++ "rxtx1", "rxtx2",
++ "rxtx3", "rxtx4",
++ "rxtx5", "rxtx6",
++ "rxtx7";
+ status = "okay";
+ };
+
+ &usbh1 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_cubox_i_usbh1>;
+ vbus-supply = <&reg_usbh1_vbus>;
+ status = "okay";
+ };
+
+ &usbotg {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_cubox_i_usbotg>;
+ vbus-supply = <&reg_usbotg_vbus>;
+ status = "okay";
+ };
+
+ &usdhc2 {
+- pinctrl-names = "default";
++ pinctrl-names = "default", "state_100mhz", "state_200mhz";
+ pinctrl-0 = <&pinctrl_cubox_i_usdhc2_aux &pinctrl_cubox_i_usdhc2>;
++ pinctrl-1 = <&pinctrl_cubox_i_usdhc2_aux &pinctrl_cubox_i_usdhc2_100mhz>;
++ pinctrl-2 = <&pinctrl_cubox_i_usdhc2_aux &pinctrl_cubox_i_usdhc2_200mhz>;
+ vmmc-supply = <&reg_3p3v>;
+ cd-gpios = <&gpio1 4 0>;
++ no-1-8-v;
+ status = "okay";
+ };
++
++
++&gpc {
++ fsl,cpu_pupscr_sw2iso = <0xf>;
++ fsl,cpu_pupscr_sw = <0xf>;
++ fsl,cpu_pdnscr_iso2sw = <0x1>;
++ fsl,cpu_pdnscr_iso = <0x1>;
++};
+diff -Nur linux-3.14.36/arch/arm/boot/dts/imx6qdl-dfi-fs700-m60.dtsi linux-openelec/arch/arm/boot/dts/imx6qdl-dfi-fs700-m60.dtsi
+--- linux-3.14.36/arch/arm/boot/dts/imx6qdl-dfi-fs700-m60.dtsi 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/arch/arm/boot/dts/imx6qdl-dfi-fs700-m60.dtsi 2015-05-06 12:05:43.000000000 -0500
+@@ -0,0 +1,199 @@
++/ {
++ regulators {
++ compatible = "simple-bus";
++ #address-cells = <1>;
++ #size-cells = <0>;
++
++ dummy_reg: regulator@0 {
++ compatible = "regulator-fixed";
++ reg = <0>;
++ regulator-name = "dummy-supply";
++ };
++
++ reg_usb_otg_vbus: regulator@1 {
++ compatible = "regulator-fixed";
++ reg = <1>;
++ regulator-name = "usb_otg_vbus";
++ regulator-min-microvolt = <5000000>;
++ regulator-max-microvolt = <5000000>;
++ gpio = <&gpio3 22 0>;
++ enable-active-high;
++ };
++ };
++
++ chosen {
++ stdout-path = &uart1;
++ };
++};
++
++&ecspi3 {
++ fsl,spi-num-chipselects = <1>;
++ cs-gpios = <&gpio4 24 0>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_ecspi3>;
++ status = "okay";
++
++ flash: m25p80@0 {
++ #address-cells = <1>;
++ #size-cells = <1>;
++ compatible = "sst,sst25vf040b", "m25p80";
++ spi-max-frequency = <20000000>;
++ reg = <0>;
++ };
++};
++
++&fec {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_enet>;
++ status = "okay";
++ phy-mode = "rgmii";
++};
++
++&iomuxc {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_hog>;
++
++ imx6qdl-dfi-fs700-m60 {
++ pinctrl_hog: hoggrp {
++ fsl,pins = <
++ MX6QDL_PAD_ENET_CRS_DV__GPIO1_IO25 0x80000000
++ MX6QDL_PAD_GPIO_18__GPIO7_IO13 0x80000000 /* PMIC irq */
++ MX6QDL_PAD_EIM_D26__GPIO3_IO26 0x80000000 /* MAX11801 irq */
++ MX6QDL_PAD_NANDF_D5__GPIO2_IO05 0x000030b0 /* Backlight enable */
++ >;
++ };
++
++ pinctrl_enet: enetgrp {
++ fsl,pins = <
++ MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b0b0
++ MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x1b0b0
++ MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x1b0b0
++ MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b0b0
++ MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b0b0
++ MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b0b0
++ MX6QDL_PAD_RGMII_TXC__RGMII_TXC 0x1b0b0
++ MX6QDL_PAD_RGMII_TD0__RGMII_TD0 0x1b0b0
++ MX6QDL_PAD_RGMII_TD1__RGMII_TD1 0x1b0b0
++ MX6QDL_PAD_RGMII_TD2__RGMII_TD2 0x1b0b0
++ MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x1b0b0
++ MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x1b0b0
++ MX6QDL_PAD_ENET_REF_CLK__ENET_TX_CLK 0x1b0b0
++ MX6QDL_PAD_ENET_MDIO__ENET_MDIO 0x1b0b0
++ MX6QDL_PAD_ENET_MDC__ENET_MDC 0x1b0b0
++ MX6QDL_PAD_GPIO_16__ENET_REF_CLK 0x4001b0a8
++ >;
++ };
++
++ pinctrl_i2c2: i2c2grp {
++ fsl,pins = <
++ MX6QDL_PAD_EIM_EB2__I2C2_SCL 0x4001b8b1
++ MX6QDL_PAD_EIM_D16__I2C2_SDA 0x4001b8b1
++ >;
++ };
++
++ pinctrl_uart1: uart1grp {
++ fsl,pins = <
++ MX6QDL_PAD_CSI0_DAT10__UART1_TX_DATA 0x1b0b1
++ MX6QDL_PAD_CSI0_DAT11__UART1_RX_DATA 0x1b0b1
++ >;
++ };
++
++ pinctrl_usbotg: usbotggrp {
++ fsl,pins = <
++ MX6QDL_PAD_ENET_RX_ER__USB_OTG_ID 0x17059
++ >;
++ };
++
++ pinctrl_usdhc2: usdhc2grp {
++ fsl,pins = <
++ MX6QDL_PAD_SD2_CMD__SD2_CMD 0x17059
++ MX6QDL_PAD_SD2_CLK__SD2_CLK 0x10059
++ MX6QDL_PAD_SD2_DAT0__SD2_DATA0 0x17059
++ MX6QDL_PAD_SD2_DAT1__SD2_DATA1 0x17059
++ MX6QDL_PAD_SD2_DAT2__SD2_DATA2 0x17059
++ MX6QDL_PAD_SD2_DAT3__SD2_DATA3 0x17059
++ MX6QDL_PAD_NANDF_D2__GPIO2_IO02 0x80000000 /* card detect */
++ >;
++ };
++
++ pinctrl_usdhc3: usdhc3grp {
++ fsl,pins = <
++ MX6QDL_PAD_SD3_CMD__SD3_CMD 0x17059
++ MX6QDL_PAD_SD3_CLK__SD3_CLK 0x10059
++ MX6QDL_PAD_SD3_DAT0__SD3_DATA0 0x17059
++ MX6QDL_PAD_SD3_DAT1__SD3_DATA1 0x17059
++ MX6QDL_PAD_SD3_DAT2__SD3_DATA2 0x17059
++ MX6QDL_PAD_SD3_DAT3__SD3_DATA3 0x17059
++ >;
++ };
++
++ pinctrl_usdhc4: usdhc4grp {
++ fsl,pins = <
++ MX6QDL_PAD_SD4_CMD__SD4_CMD 0x17059
++ MX6QDL_PAD_SD4_CLK__SD4_CLK 0x10059
++ MX6QDL_PAD_SD4_DAT0__SD4_DATA0 0x17059
++ MX6QDL_PAD_SD4_DAT1__SD4_DATA1 0x17059
++ MX6QDL_PAD_SD4_DAT2__SD4_DATA2 0x17059
++ MX6QDL_PAD_SD4_DAT3__SD4_DATA3 0x17059
++ MX6QDL_PAD_SD4_DAT4__SD4_DATA4 0x17059
++ MX6QDL_PAD_SD4_DAT5__SD4_DATA5 0x17059
++ MX6QDL_PAD_SD4_DAT6__SD4_DATA6 0x17059
++ MX6QDL_PAD_SD4_DAT7__SD4_DATA7 0x17059
++ >;
++ };
++
++ pinctrl_ecspi3: ecspi3grp {
++ fsl,pins = <
++ MX6QDL_PAD_DISP0_DAT2__ECSPI3_MISO 0x100b1
++ MX6QDL_PAD_DISP0_DAT1__ECSPI3_MOSI 0x100b1
++ MX6QDL_PAD_DISP0_DAT0__ECSPI3_SCLK 0x100b1
++ MX6QDL_PAD_DISP0_DAT3__GPIO4_IO24 0x80000000 /* SPI NOR chipselect */
++ >;
++ };
++ };
++};
++
++&i2c2 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_i2c2>;
++ status = "okay";
++};
++
++&uart1 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_uart1>;
++ status = "okay";
++};
++
++&usbh1 {
++ status = "okay";
++};
++
++&usbotg {
++ vbus-supply = <&reg_usb_otg_vbus>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_usbotg>;
++ disable-over-current;
++ dr_mode = "host";
++ status = "okay";
++};
++
++&usdhc2 { /* module slot */
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_usdhc2>;
++ cd-gpios = <&gpio2 2 0>;
++ status = "okay";
++};
++
++&usdhc3 { /* baseboard slot */
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_usdhc3>;
++};
++
++&usdhc4 { /* eMMC */
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_usdhc4>;
++ bus-width = <8>;
++ non-removable;
++ status = "okay";
++};
+diff -Nur linux-3.14.36/arch/arm/boot/dts/imx6qdl.dtsi linux-openelec/arch/arm/boot/dts/imx6qdl.dtsi
+--- linux-3.14.36/arch/arm/boot/dts/imx6qdl.dtsi 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/boot/dts/imx6qdl.dtsi 2015-05-06 12:05:43.000000000 -0500
+@@ -10,10 +10,16 @@
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
++#include <dt-bindings/interrupt-controller/arm-gic.h>
++
+ #include "skeleton.dtsi"
++#include <dt-bindings/gpio/gpio.h>
+
+ / {
+ aliases {
++ ethernet0 = &fec;
++ can0 = &can1;
++ can1 = &can2;
+ gpio0 = &gpio1;
+ gpio1 = &gpio2;
+ gpio2 = &gpio3;
+@@ -24,6 +30,11 @@
+ i2c0 = &i2c1;
+ i2c1 = &i2c2;
+ i2c2 = &i2c3;
++ ipu0 = &ipu1;
++ mmc0 = &usdhc1;
++ mmc1 = &usdhc2;
++ mmc2 = &usdhc3;
++ mmc3 = &usdhc4;
+ serial0 = &uart1;
+ serial1 = &uart2;
+ serial2 = &uart3;
+@@ -33,13 +44,13 @@
+ spi1 = &ecspi2;
+ spi2 = &ecspi3;
+ spi3 = &ecspi4;
++ usbphy0 = &usbphy1;
++ usbphy1 = &usbphy2;
+ };
+
+ intc: interrupt-controller@00a01000 {
+ compatible = "arm,cortex-a9-gic";
+ #interrupt-cells = <3>;
+- #address-cells = <1>;
+- #size-cells = <1>;
+ interrupt-controller;
+ reg = <0x00a01000 0x1000>,
+ <0x00a00100 0x100>;
+@@ -51,20 +62,27 @@
+
+ ckil {
+ compatible = "fsl,imx-ckil", "fixed-clock";
++ #clock-cells = <0>;
+ clock-frequency = <32768>;
+ };
+
+ ckih1 {
+ compatible = "fsl,imx-ckih1", "fixed-clock";
++ #clock-cells = <0>;
+ clock-frequency = <0>;
+ };
+
+ osc {
+ compatible = "fsl,imx-osc", "fixed-clock";
++ #clock-cells = <0>;
+ clock-frequency = <24000000>;
+ };
+ };
+
++ pu_dummy: pudummy_reg {
++ compatible = "fsl,imx6-dummy-pureg"; /* only used in ldo-bypass */
++ };
++
+ soc {
+ #address-cells = <1>;
+ #size-cells = <1>;
+@@ -75,7 +93,10 @@
+ dma_apbh: dma-apbh@00110000 {
+ compatible = "fsl,imx6q-dma-apbh", "fsl,imx28-dma-apbh";
+ reg = <0x00110000 0x2000>;
+- interrupts = <0 13 0x04>, <0 13 0x04>, <0 13 0x04>, <0 13 0x04>;
++ interrupts = <0 13 IRQ_TYPE_LEVEL_HIGH>,
++ <0 13 IRQ_TYPE_LEVEL_HIGH>,
++ <0 13 IRQ_TYPE_LEVEL_HIGH>,
++ <0 13 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "gpmi0", "gpmi1", "gpmi2", "gpmi3";
+ #dma-cells = <1>;
+ dma-channels = <4>;
+@@ -88,7 +109,7 @@
+ #size-cells = <1>;
+ reg = <0x00112000 0x2000>, <0x00114000 0x2000>;
+ reg-names = "gpmi-nand", "bch";
+- interrupts = <0 15 0x04>;
++ interrupts = <0 15 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "bch";
+ clocks = <&clks 152>, <&clks 153>, <&clks 151>,
+ <&clks 150>, <&clks 149>;
+@@ -109,11 +130,13 @@
+ L2: l2-cache@00a02000 {
+ compatible = "arm,pl310-cache";
+ reg = <0x00a02000 0x1000>;
+- interrupts = <0 92 0x04>;
++ interrupts = <0 92 IRQ_TYPE_LEVEL_HIGH>;
+ cache-unified;
+ cache-level = <2>;
+ arm,tag-latency = <4 2 3>;
+ arm,data-latency = <4 2 3>;
++ arm,dynamic-clk-gating;
++ arm,standby-mode;
+ };
+
+ pcie: pcie@0x01000000 {
+@@ -126,15 +149,22 @@
+ 0x81000000 0 0 0x01f80000 0 0x00010000 /* downstream I/O */
+ 0x82000000 0 0x01000000 0x01000000 0 0x00f00000>; /* non-prefetchable memory */
+ num-lanes = <1>;
+- interrupts = <0 123 0x04>;
+- clocks = <&clks 189>, <&clks 187>, <&clks 206>, <&clks 144>;
+- clock-names = "pcie_ref_125m", "sata_ref_100m", "lvds_gate", "pcie_axi";
++ interrupts = <GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>;
++ interrupt-names = "pme";
++ #interrupt-cells = <1>;
++ interrupt-map-mask = <0 0 0 0x7>;
++ interrupt-map = <0 0 0 1 &intc GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>,
++ <0 0 0 2 &intc GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>,
++ <0 0 0 3 &intc GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>,
++ <0 0 0 4 &intc GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>;
++ clocks = <&clks 144>, <&clks 221>, <&clks 189>, <&clks 187>;
++ clock-names = "pcie_axi", "lvds_gate", "pcie_ref_125m", "sata_ref_100m";
+ status = "disabled";
+ };
+
+ pmu {
+ compatible = "arm,cortex-a9-pmu";
+- interrupts = <0 94 0x04>;
++ interrupts = <0 94 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ aips-bus@02000000 { /* AIPS1 */
+@@ -154,7 +184,7 @@
+ spdif: spdif@02004000 {
+ compatible = "fsl,imx35-spdif";
+ reg = <0x02004000 0x4000>;
+- interrupts = <0 52 0x04>;
++ interrupts = <0 52 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&sdma 14 18 0>,
+ <&sdma 15 18 0>;
+ dma-names = "rx", "tx";
+@@ -176,9 +206,11 @@
+ #size-cells = <0>;
+ compatible = "fsl,imx6q-ecspi", "fsl,imx51-ecspi";
+ reg = <0x02008000 0x4000>;
+- interrupts = <0 31 0x04>;
++ interrupts = <0 31 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks 112>, <&clks 112>;
+ clock-names = "ipg", "per";
++ dmas = <&sdma 3 7 1>, <&sdma 4 7 2>;
++ dma-names = "rx", "tx";
+ status = "disabled";
+ };
+
+@@ -187,9 +219,11 @@
+ #size-cells = <0>;
+ compatible = "fsl,imx6q-ecspi", "fsl,imx51-ecspi";
+ reg = <0x0200c000 0x4000>;
+- interrupts = <0 32 0x04>;
++ interrupts = <0 32 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks 113>, <&clks 113>;
+ clock-names = "ipg", "per";
++ dmas = <&sdma 5 7 1>, <&sdma 6 7 2>;
++ dma-names = "rx", "tx";
+ status = "disabled";
+ };
+
+@@ -198,9 +232,11 @@
+ #size-cells = <0>;
+ compatible = "fsl,imx6q-ecspi", "fsl,imx51-ecspi";
+ reg = <0x02010000 0x4000>;
+- interrupts = <0 33 0x04>;
++ interrupts = <0 33 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks 114>, <&clks 114>;
+ clock-names = "ipg", "per";
++ dmas = <&sdma 7 7 1>, <&sdma 8 7 2>;
++ dma-names = "rx", "tx";
+ status = "disabled";
+ };
+
+@@ -209,16 +245,18 @@
+ #size-cells = <0>;
+ compatible = "fsl,imx6q-ecspi", "fsl,imx51-ecspi";
+ reg = <0x02014000 0x4000>;
+- interrupts = <0 34 0x04>;
++ interrupts = <0 34 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks 115>, <&clks 115>;
+ clock-names = "ipg", "per";
++ dmas = <&sdma 9 7 1>, <&sdma 10 7 2>;
++ dma-names = "rx", "tx";
+ status = "disabled";
+ };
+
+ uart1: serial@02020000 {
+ compatible = "fsl,imx6q-uart", "fsl,imx21-uart";
+ reg = <0x02020000 0x4000>;
+- interrupts = <0 26 0x04>;
++ interrupts = <0 26 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks 160>, <&clks 161>;
+ clock-names = "ipg", "per";
+ dmas = <&sdma 25 4 0>, <&sdma 26 4 0>;
+@@ -227,15 +265,23 @@
+ };
+
+ esai: esai@02024000 {
++ compatible = "fsl,imx6q-esai";
+ reg = <0x02024000 0x4000>;
+- interrupts = <0 51 0x04>;
++ interrupts = <0 51 IRQ_TYPE_LEVEL_HIGH>;
++ clocks = <&clks 118>;
++ fsl,esai-dma-events = <24 23>;
++ fsl,flags = <1>;
++ status = "disabled";
+ };
+
+ ssi1: ssi@02028000 {
+- compatible = "fsl,imx6q-ssi","fsl,imx21-ssi";
++ compatible = "fsl,imx6q-ssi",
++ "fsl,imx51-ssi",
++ "fsl,imx21-ssi";
+ reg = <0x02028000 0x4000>;
+- interrupts = <0 46 0x04>;
+- clocks = <&clks 178>;
++ interrupts = <0 46 IRQ_TYPE_LEVEL_HIGH>;
++ clocks = <&clks 178>, <&clks 157>;
++ clock-names = "ipg", "baud";
+ dmas = <&sdma 37 1 0>,
+ <&sdma 38 1 0>;
+ dma-names = "rx", "tx";
+@@ -245,10 +291,13 @@
+ };
+
+ ssi2: ssi@0202c000 {
+- compatible = "fsl,imx6q-ssi","fsl,imx21-ssi";
++ compatible = "fsl,imx6q-ssi",
++ "fsl,imx51-ssi",
++ "fsl,imx21-ssi";
+ reg = <0x0202c000 0x4000>;
+- interrupts = <0 47 0x04>;
+- clocks = <&clks 179>;
++ interrupts = <0 47 IRQ_TYPE_LEVEL_HIGH>;
++ clocks = <&clks 179>, <&clks 158>;
++ clock-names = "ipg", "baud";
+ dmas = <&sdma 41 1 0>,
+ <&sdma 42 1 0>;
+ dma-names = "rx", "tx";
+@@ -258,10 +307,13 @@
+ };
+
+ ssi3: ssi@02030000 {
+- compatible = "fsl,imx6q-ssi","fsl,imx21-ssi";
++ compatible = "fsl,imx6q-ssi",
++ "fsl,imx51-ssi",
++ "fsl,imx21-ssi";
+ reg = <0x02030000 0x4000>;
+- interrupts = <0 48 0x04>;
+- clocks = <&clks 180>;
++ interrupts = <0 48 IRQ_TYPE_LEVEL_HIGH>;
++ clocks = <&clks 180>, <&clks 159>;
++ clock-names = "ipg", "baud";
+ dmas = <&sdma 45 1 0>,
+ <&sdma 46 1 0>;
+ dma-names = "rx", "tx";
+@@ -271,8 +323,25 @@
+ };
+
+ asrc: asrc@02034000 {
++ compatible = "fsl,imx53-asrc";
+ reg = <0x02034000 0x4000>;
+- interrupts = <0 50 0x04>;
++ interrupts = <0 50 IRQ_TYPE_LEVEL_HIGH>;
++ clocks = <&clks 107>, <&clks 156>;
++ clock-names = "core", "dma";
++ dmas = <&sdma 17 20 1>, <&sdma 18 20 1>, <&sdma 19 20 1>,
++ <&sdma 20 20 1>, <&sdma 21 20 1>, <&sdma 22 20 1>;
++ dma-names = "rxa", "rxb", "rxc",
++ "txa", "txb", "txc";
++ status = "okay";
++ };
++
++ asrc_p2p: asrc_p2p {
++ compatible = "fsl,imx6q-asrc-p2p";
++ fsl,output-rate = <48000>;
++ fsl,output-width = <16>;
++ fsl,asrc-dma-rx-events = <17 18 19>;
++ fsl,asrc-dma-tx-events = <20 21 22>;
++ status = "okay";
+ };
+
+ spba@0203c000 {
+@@ -281,8 +350,19 @@
+ };
+
+ vpu: vpu@02040000 {
++ compatible = "fsl,imx6-vpu";
+ reg = <0x02040000 0x3c000>;
+- interrupts = <0 3 0x04 0 12 0x04>;
++ reg-names = "vpu_regs";
++ interrupts = <0 3 IRQ_TYPE_LEVEL_HIGH>,
++ <0 12 IRQ_TYPE_LEVEL_HIGH>;
++ interrupt-names = "vpu_jpu_irq", "vpu_ipi_irq";
++ clocks = <&clks 168>, <&clks 140>, <&clks 142>;
++ clock-names = "vpu_clk", "mmdc_ch0_axi", "ocram";
++ iramsize = <0x21000>;
++ iram = <&ocram>;
++ resets = <&src 1>;
++ pu-supply = <&reg_pu>;
++ status = "disabled";
+ };
+
+ aipstz@0207c000 { /* AIPSTZ1 */
+@@ -293,7 +373,7 @@
+ #pwm-cells = <2>;
+ compatible = "fsl,imx6q-pwm", "fsl,imx27-pwm";
+ reg = <0x02080000 0x4000>;
+- interrupts = <0 83 0x04>;
++ interrupts = <0 83 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks 62>, <&clks 145>;
+ clock-names = "ipg", "per";
+ };
+@@ -302,7 +382,7 @@
+ #pwm-cells = <2>;
+ compatible = "fsl,imx6q-pwm", "fsl,imx27-pwm";
+ reg = <0x02084000 0x4000>;
+- interrupts = <0 84 0x04>;
++ interrupts = <0 84 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks 62>, <&clks 146>;
+ clock-names = "ipg", "per";
+ };
+@@ -311,7 +391,7 @@
+ #pwm-cells = <2>;
+ compatible = "fsl,imx6q-pwm", "fsl,imx27-pwm";
+ reg = <0x02088000 0x4000>;
+- interrupts = <0 85 0x04>;
++ interrupts = <0 85 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks 62>, <&clks 147>;
+ clock-names = "ipg", "per";
+ };
+@@ -320,7 +400,7 @@
+ #pwm-cells = <2>;
+ compatible = "fsl,imx6q-pwm", "fsl,imx27-pwm";
+ reg = <0x0208c000 0x4000>;
+- interrupts = <0 86 0x04>;
++ interrupts = <0 86 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks 62>, <&clks 148>;
+ clock-names = "ipg", "per";
+ };
+@@ -328,23 +408,25 @@
+ can1: flexcan@02090000 {
+ compatible = "fsl,imx6q-flexcan";
+ reg = <0x02090000 0x4000>;
+- interrupts = <0 110 0x04>;
++ interrupts = <0 110 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks 108>, <&clks 109>;
+ clock-names = "ipg", "per";
++ status = "disabled";
+ };
+
+ can2: flexcan@02094000 {
+ compatible = "fsl,imx6q-flexcan";
+ reg = <0x02094000 0x4000>;
+- interrupts = <0 111 0x04>;
++ interrupts = <0 111 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks 110>, <&clks 111>;
+ clock-names = "ipg", "per";
++ status = "disabled";
+ };
+
+ gpt: gpt@02098000 {
+ compatible = "fsl,imx6q-gpt", "fsl,imx31-gpt";
+ reg = <0x02098000 0x4000>;
+- interrupts = <0 55 0x04>;
++ interrupts = <0 55 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks 119>, <&clks 120>;
+ clock-names = "ipg", "per";
+ };
+@@ -352,7 +434,8 @@
+ gpio1: gpio@0209c000 {
+ compatible = "fsl,imx6q-gpio", "fsl,imx35-gpio";
+ reg = <0x0209c000 0x4000>;
+- interrupts = <0 66 0x04 0 67 0x04>;
++ interrupts = <0 66 IRQ_TYPE_LEVEL_HIGH>,
++ <0 67 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+@@ -362,7 +445,8 @@
+ gpio2: gpio@020a0000 {
+ compatible = "fsl,imx6q-gpio", "fsl,imx35-gpio";
+ reg = <0x020a0000 0x4000>;
+- interrupts = <0 68 0x04 0 69 0x04>;
++ interrupts = <0 68 IRQ_TYPE_LEVEL_HIGH>,
++ <0 69 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+@@ -372,7 +456,8 @@
+ gpio3: gpio@020a4000 {
+ compatible = "fsl,imx6q-gpio", "fsl,imx35-gpio";
+ reg = <0x020a4000 0x4000>;
+- interrupts = <0 70 0x04 0 71 0x04>;
++ interrupts = <0 70 IRQ_TYPE_LEVEL_HIGH>,
++ <0 71 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+@@ -382,7 +467,8 @@
+ gpio4: gpio@020a8000 {
+ compatible = "fsl,imx6q-gpio", "fsl,imx35-gpio";
+ reg = <0x020a8000 0x4000>;
+- interrupts = <0 72 0x04 0 73 0x04>;
++ interrupts = <0 72 IRQ_TYPE_LEVEL_HIGH>,
++ <0 73 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+@@ -392,7 +478,8 @@
+ gpio5: gpio@020ac000 {
+ compatible = "fsl,imx6q-gpio", "fsl,imx35-gpio";
+ reg = <0x020ac000 0x4000>;
+- interrupts = <0 74 0x04 0 75 0x04>;
++ interrupts = <0 74 IRQ_TYPE_LEVEL_HIGH>,
++ <0 75 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+@@ -402,7 +489,8 @@
+ gpio6: gpio@020b0000 {
+ compatible = "fsl,imx6q-gpio", "fsl,imx35-gpio";
+ reg = <0x020b0000 0x4000>;
+- interrupts = <0 76 0x04 0 77 0x04>;
++ interrupts = <0 76 IRQ_TYPE_LEVEL_HIGH>,
++ <0 77 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+@@ -412,7 +500,8 @@
+ gpio7: gpio@020b4000 {
+ compatible = "fsl,imx6q-gpio", "fsl,imx35-gpio";
+ reg = <0x020b4000 0x4000>;
+- interrupts = <0 78 0x04 0 79 0x04>;
++ interrupts = <0 78 IRQ_TYPE_LEVEL_HIGH>,
++ <0 79 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+@@ -421,20 +510,20 @@
+
+ kpp: kpp@020b8000 {
+ reg = <0x020b8000 0x4000>;
+- interrupts = <0 82 0x04>;
++ interrupts = <0 82 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ wdog1: wdog@020bc000 {
+ compatible = "fsl,imx6q-wdt", "fsl,imx21-wdt";
+ reg = <0x020bc000 0x4000>;
+- interrupts = <0 80 0x04>;
++ interrupts = <0 80 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks 0>;
+ };
+
+ wdog2: wdog@020c0000 {
+ compatible = "fsl,imx6q-wdt", "fsl,imx21-wdt";
+ reg = <0x020c0000 0x4000>;
+- interrupts = <0 81 0x04>;
++ interrupts = <0 81 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks 0>;
+ status = "disabled";
+ };
+@@ -442,14 +531,17 @@
+ clks: ccm@020c4000 {
+ compatible = "fsl,imx6q-ccm";
+ reg = <0x020c4000 0x4000>;
+- interrupts = <0 87 0x04 0 88 0x04>;
++ interrupts = <0 87 IRQ_TYPE_LEVEL_HIGH>,
++ <0 88 IRQ_TYPE_LEVEL_HIGH>;
+ #clock-cells = <1>;
+ };
+
+ anatop: anatop@020c8000 {
+ compatible = "fsl,imx6q-anatop", "syscon", "simple-bus";
+ reg = <0x020c8000 0x1000>;
+- interrupts = <0 49 0x04 0 54 0x04 0 127 0x04>;
++ interrupts = <0 49 IRQ_TYPE_LEVEL_HIGH>,
++ <0 54 IRQ_TYPE_LEVEL_HIGH>,
++ <0 127 IRQ_TYPE_LEVEL_HIGH>;
+
+ regulator-1p1@110 {
+ compatible = "fsl,anatop-regulator";
+@@ -495,7 +587,7 @@
+
+ reg_arm: regulator-vddcore@140 {
+ compatible = "fsl,anatop-regulator";
+- regulator-name = "cpu";
++ regulator-name = "vddarm";
+ regulator-min-microvolt = <725000>;
+ regulator-max-microvolt = <1450000>;
+ regulator-always-on;
+@@ -515,7 +607,6 @@
+ regulator-name = "vddpu";
+ regulator-min-microvolt = <725000>;
+ regulator-max-microvolt = <1450000>;
+- regulator-always-on;
+ anatop-reg-offset = <0x140>;
+ anatop-vol-bit-shift = <9>;
+ anatop-vol-bit-width = <5>;
+@@ -547,23 +638,38 @@
+
+ tempmon: tempmon {
+ compatible = "fsl,imx6q-tempmon";
+- interrupts = <0 49 0x04>;
++ interrupts = <0 49 IRQ_TYPE_LEVEL_HIGH>;
+ fsl,tempmon = <&anatop>;
+ fsl,tempmon-data = <&ocotp>;
++ clocks = <&clks 172>;
+ };
+
+ usbphy1: usbphy@020c9000 {
+ compatible = "fsl,imx6q-usbphy", "fsl,imx23-usbphy";
+ reg = <0x020c9000 0x1000>;
+- interrupts = <0 44 0x04>;
++ interrupts = <0 44 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks 182>;
++ fsl,anatop = <&anatop>;
+ };
+
+ usbphy2: usbphy@020ca000 {
+ compatible = "fsl,imx6q-usbphy", "fsl,imx23-usbphy";
+ reg = <0x020ca000 0x1000>;
+- interrupts = <0 45 0x04>;
++ interrupts = <0 45 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks 183>;
++ fsl,anatop = <&anatop>;
++ };
++
++ usbphy_nop1: usbphy_nop1 {
++ compatible = "usb-nop-xceiv";
++ clocks = <&clks 182>;
++ clock-names = "main_clk";
++ };
++
++ usbphy_nop2: usbphy_nop2 {
++ compatible = "usb-nop-xceiv";
++ clocks = <&clks 182>;
++ clock-names = "main_clk";
+ };
+
+ snvs@020cc000 {
+@@ -575,31 +681,39 @@
+ snvs-rtc-lp@34 {
+ compatible = "fsl,sec-v4.0-mon-rtc-lp";
+ reg = <0x34 0x58>;
+- interrupts = <0 19 0x04 0 20 0x04>;
++ interrupts = <0 19 IRQ_TYPE_LEVEL_HIGH>,
++ <0 20 IRQ_TYPE_LEVEL_HIGH>;
+ };
+ };
+
+ epit1: epit@020d0000 { /* EPIT1 */
+ reg = <0x020d0000 0x4000>;
+- interrupts = <0 56 0x04>;
++ interrupts = <0 56 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ epit2: epit@020d4000 { /* EPIT2 */
+ reg = <0x020d4000 0x4000>;
+- interrupts = <0 57 0x04>;
++ interrupts = <0 57 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ src: src@020d8000 {
+ compatible = "fsl,imx6q-src", "fsl,imx51-src";
+ reg = <0x020d8000 0x4000>;
+- interrupts = <0 91 0x04 0 96 0x04>;
++ interrupts = <0 91 IRQ_TYPE_LEVEL_HIGH>,
++ <0 96 IRQ_TYPE_LEVEL_HIGH>;
+ #reset-cells = <1>;
+ };
+
+ gpc: gpc@020dc000 {
+ compatible = "fsl,imx6q-gpc";
+ reg = <0x020dc000 0x4000>;
+- interrupts = <0 89 0x04 0 90 0x04>;
++ interrupts = <0 89 IRQ_TYPE_LEVEL_HIGH>,
++ <0 90 IRQ_TYPE_LEVEL_HIGH>;
++ clocks = <&clks 122>, <&clks 74>, <&clks 121>,
++ <&clks 26>, <&clks 143>, <&clks 168>, <&clks 62>;
++ clock-names = "gpu3d_core", "gpu3d_shader", "gpu2d_core",
++ "gpu2d_axi", "openvg_axi", "vpu_axi", "ipg";
++ pu-supply = <&reg_pu>;
+ };
+
+ gpr: iomuxc-gpr@020e0000 {
+@@ -610,778 +724,40 @@
+ iomuxc: iomuxc@020e0000 {
+ compatible = "fsl,imx6dl-iomuxc", "fsl,imx6q-iomuxc";
+ reg = <0x020e0000 0x4000>;
+-
+- audmux {
+- pinctrl_audmux_1: audmux-1 {
+- fsl,pins = <
+- MX6QDL_PAD_SD2_DAT0__AUD4_RXD 0x80000000
+- MX6QDL_PAD_SD2_DAT3__AUD4_TXC 0x80000000
+- MX6QDL_PAD_SD2_DAT2__AUD4_TXD 0x80000000
+- MX6QDL_PAD_SD2_DAT1__AUD4_TXFS 0x80000000
+- >;
+- };
+-
+- pinctrl_audmux_2: audmux-2 {
+- fsl,pins = <
+- MX6QDL_PAD_CSI0_DAT7__AUD3_RXD 0x80000000
+- MX6QDL_PAD_CSI0_DAT4__AUD3_TXC 0x80000000
+- MX6QDL_PAD_CSI0_DAT5__AUD3_TXD 0x80000000
+- MX6QDL_PAD_CSI0_DAT6__AUD3_TXFS 0x80000000
+- >;
+- };
+-
+- pinctrl_audmux_3: audmux-3 {
+- fsl,pins = <
+- MX6QDL_PAD_DISP0_DAT16__AUD5_TXC 0x80000000
+- MX6QDL_PAD_DISP0_DAT18__AUD5_TXFS 0x80000000
+- MX6QDL_PAD_DISP0_DAT19__AUD5_RXD 0x80000000
+- >;
+- };
+- };
+-
+- ecspi1 {
+- pinctrl_ecspi1_1: ecspi1grp-1 {
+- fsl,pins = <
+- MX6QDL_PAD_EIM_D17__ECSPI1_MISO 0x100b1
+- MX6QDL_PAD_EIM_D18__ECSPI1_MOSI 0x100b1
+- MX6QDL_PAD_EIM_D16__ECSPI1_SCLK 0x100b1
+- >;
+- };
+-
+- pinctrl_ecspi1_2: ecspi1grp-2 {
+- fsl,pins = <
+- MX6QDL_PAD_KEY_COL1__ECSPI1_MISO 0x100b1
+- MX6QDL_PAD_KEY_ROW0__ECSPI1_MOSI 0x100b1
+- MX6QDL_PAD_KEY_COL0__ECSPI1_SCLK 0x100b1
+- >;
+- };
+- };
+-
+- ecspi3 {
+- pinctrl_ecspi3_1: ecspi3grp-1 {
+- fsl,pins = <
+- MX6QDL_PAD_DISP0_DAT2__ECSPI3_MISO 0x100b1
+- MX6QDL_PAD_DISP0_DAT1__ECSPI3_MOSI 0x100b1
+- MX6QDL_PAD_DISP0_DAT0__ECSPI3_SCLK 0x100b1
+- >;
+- };
+- };
+-
+- enet {
+- pinctrl_enet_1: enetgrp-1 {
+- fsl,pins = <
+- MX6QDL_PAD_ENET_MDIO__ENET_MDIO 0x1b0b0
+- MX6QDL_PAD_ENET_MDC__ENET_MDC 0x1b0b0
+- MX6QDL_PAD_RGMII_TXC__RGMII_TXC 0x1b0b0
+- MX6QDL_PAD_RGMII_TD0__RGMII_TD0 0x1b0b0
+- MX6QDL_PAD_RGMII_TD1__RGMII_TD1 0x1b0b0
+- MX6QDL_PAD_RGMII_TD2__RGMII_TD2 0x1b0b0
+- MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x1b0b0
+- MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x1b0b0
+- MX6QDL_PAD_ENET_REF_CLK__ENET_TX_CLK 0x1b0b0
+- MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b0b0
+- MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x1b0b0
+- MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x1b0b0
+- MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b0b0
+- MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b0b0
+- MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b0b0
+- MX6QDL_PAD_GPIO_16__ENET_REF_CLK 0x4001b0a8
+- >;
+- };
+-
+- pinctrl_enet_2: enetgrp-2 {
+- fsl,pins = <
+- MX6QDL_PAD_KEY_COL1__ENET_MDIO 0x1b0b0
+- MX6QDL_PAD_KEY_COL2__ENET_MDC 0x1b0b0
+- MX6QDL_PAD_RGMII_TXC__RGMII_TXC 0x1b0b0
+- MX6QDL_PAD_RGMII_TD0__RGMII_TD0 0x1b0b0
+- MX6QDL_PAD_RGMII_TD1__RGMII_TD1 0x1b0b0
+- MX6QDL_PAD_RGMII_TD2__RGMII_TD2 0x1b0b0
+- MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x1b0b0
+- MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x1b0b0
+- MX6QDL_PAD_ENET_REF_CLK__ENET_TX_CLK 0x1b0b0
+- MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b0b0
+- MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x1b0b0
+- MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x1b0b0
+- MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b0b0
+- MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b0b0
+- MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b0b0
+- >;
+- };
+-
+- pinctrl_enet_3: enetgrp-3 {
+- fsl,pins = <
+- MX6QDL_PAD_ENET_MDIO__ENET_MDIO 0x1b0b0
+- MX6QDL_PAD_ENET_MDC__ENET_MDC 0x1b0b0
+- MX6QDL_PAD_RGMII_TXC__RGMII_TXC 0x1b0b0
+- MX6QDL_PAD_RGMII_TD0__RGMII_TD0 0x1b0b0
+- MX6QDL_PAD_RGMII_TD1__RGMII_TD1 0x1b0b0
+- MX6QDL_PAD_RGMII_TD2__RGMII_TD2 0x1b0b0
+- MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x1b0b0
+- MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x1b0b0
+- MX6QDL_PAD_ENET_REF_CLK__ENET_TX_CLK 0x1b0b0
+- MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b0b0
+- MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x1b0b0
+- MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x1b0b0
+- MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b0b0
+- MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b0b0
+- MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b0b0
+- MX6QDL_PAD_ENET_TX_EN__ENET_TX_EN 0x1b0b0
+- >;
+- };
+- };
+-
+- esai {
+- pinctrl_esai_1: esaigrp-1 {
+- fsl,pins = <
+- MX6QDL_PAD_ENET_RXD0__ESAI_TX_HF_CLK 0x1b030
+- MX6QDL_PAD_ENET_CRS_DV__ESAI_TX_CLK 0x1b030
+- MX6QDL_PAD_ENET_RXD1__ESAI_TX_FS 0x1b030
+- MX6QDL_PAD_ENET_TX_EN__ESAI_TX3_RX2 0x1b030
+- MX6QDL_PAD_ENET_TXD1__ESAI_TX2_RX3 0x1b030
+- MX6QDL_PAD_ENET_TXD0__ESAI_TX4_RX1 0x1b030
+- MX6QDL_PAD_ENET_MDC__ESAI_TX5_RX0 0x1b030
+- MX6QDL_PAD_NANDF_CS2__ESAI_TX0 0x1b030
+- MX6QDL_PAD_NANDF_CS3__ESAI_TX1 0x1b030
+- >;
+- };
+-
+- pinctrl_esai_2: esaigrp-2 {
+- fsl,pins = <
+- MX6QDL_PAD_ENET_CRS_DV__ESAI_TX_CLK 0x1b030
+- MX6QDL_PAD_ENET_RXD1__ESAI_TX_FS 0x1b030
+- MX6QDL_PAD_ENET_TX_EN__ESAI_TX3_RX2 0x1b030
+- MX6QDL_PAD_GPIO_5__ESAI_TX2_RX3 0x1b030
+- MX6QDL_PAD_ENET_TXD0__ESAI_TX4_RX1 0x1b030
+- MX6QDL_PAD_ENET_MDC__ESAI_TX5_RX0 0x1b030
+- MX6QDL_PAD_GPIO_17__ESAI_TX0 0x1b030
+- MX6QDL_PAD_NANDF_CS3__ESAI_TX1 0x1b030
+- MX6QDL_PAD_ENET_MDIO__ESAI_RX_CLK 0x1b030
+- MX6QDL_PAD_GPIO_9__ESAI_RX_FS 0x1b030
+- >;
+- };
+- };
+-
+- flexcan1 {
+- pinctrl_flexcan1_1: flexcan1grp-1 {
+- fsl,pins = <
+- MX6QDL_PAD_KEY_ROW2__FLEXCAN1_RX 0x80000000
+- MX6QDL_PAD_KEY_COL2__FLEXCAN1_TX 0x80000000
+- >;
+- };
+-
+- pinctrl_flexcan1_2: flexcan1grp-2 {
+- fsl,pins = <
+- MX6QDL_PAD_GPIO_7__FLEXCAN1_TX 0x80000000
+- MX6QDL_PAD_KEY_ROW2__FLEXCAN1_RX 0x80000000
+- >;
+- };
+- };
+-
+- flexcan2 {
+- pinctrl_flexcan2_1: flexcan2grp-1 {
+- fsl,pins = <
+- MX6QDL_PAD_KEY_COL4__FLEXCAN2_TX 0x80000000
+- MX6QDL_PAD_KEY_ROW4__FLEXCAN2_RX 0x80000000
+- >;
+- };
+- };
+-
+- gpmi-nand {
+- pinctrl_gpmi_nand_1: gpmi-nand-1 {
+- fsl,pins = <
+- MX6QDL_PAD_NANDF_CLE__NAND_CLE 0xb0b1
+- MX6QDL_PAD_NANDF_ALE__NAND_ALE 0xb0b1
+- MX6QDL_PAD_NANDF_WP_B__NAND_WP_B 0xb0b1
+- MX6QDL_PAD_NANDF_RB0__NAND_READY_B 0xb000
+- MX6QDL_PAD_NANDF_CS0__NAND_CE0_B 0xb0b1
+- MX6QDL_PAD_NANDF_CS1__NAND_CE1_B 0xb0b1
+- MX6QDL_PAD_SD4_CMD__NAND_RE_B 0xb0b1
+- MX6QDL_PAD_SD4_CLK__NAND_WE_B 0xb0b1
+- MX6QDL_PAD_NANDF_D0__NAND_DATA00 0xb0b1
+- MX6QDL_PAD_NANDF_D1__NAND_DATA01 0xb0b1
+- MX6QDL_PAD_NANDF_D2__NAND_DATA02 0xb0b1
+- MX6QDL_PAD_NANDF_D3__NAND_DATA03 0xb0b1
+- MX6QDL_PAD_NANDF_D4__NAND_DATA04 0xb0b1
+- MX6QDL_PAD_NANDF_D5__NAND_DATA05 0xb0b1
+- MX6QDL_PAD_NANDF_D6__NAND_DATA06 0xb0b1
+- MX6QDL_PAD_NANDF_D7__NAND_DATA07 0xb0b1
+- MX6QDL_PAD_SD4_DAT0__NAND_DQS 0x00b1
+- >;
+- };
+- };
+-
+- hdmi_hdcp {
+- pinctrl_hdmi_hdcp_1: hdmihdcpgrp-1 {
+- fsl,pins = <
+- MX6QDL_PAD_KEY_COL3__HDMI_TX_DDC_SCL 0x4001b8b1
+- MX6QDL_PAD_KEY_ROW3__HDMI_TX_DDC_SDA 0x4001b8b1
+- >;
+- };
+-
+- pinctrl_hdmi_hdcp_2: hdmihdcpgrp-2 {
+- fsl,pins = <
+- MX6QDL_PAD_EIM_EB2__HDMI_TX_DDC_SCL 0x4001b8b1
+- MX6QDL_PAD_EIM_D16__HDMI_TX_DDC_SDA 0x4001b8b1
+- >;
+- };
+-
+- pinctrl_hdmi_hdcp_3: hdmihdcpgrp-3 {
+- fsl,pins = <
+- MX6QDL_PAD_EIM_EB2__HDMI_TX_DDC_SCL 0x4001b8b1
+- MX6QDL_PAD_KEY_ROW3__HDMI_TX_DDC_SDA 0x4001b8b1
+- >;
+- };
+- };
+-
+- hdmi_cec {
+- pinctrl_hdmi_cec_1: hdmicecgrp-1 {
+- fsl,pins = <
+- MX6QDL_PAD_EIM_A25__HDMI_TX_CEC_LINE 0x1f8b0
+- >;
+- };
+-
+- pinctrl_hdmi_cec_2: hdmicecgrp-2 {
+- fsl,pins = <
+- MX6QDL_PAD_KEY_ROW2__HDMI_TX_CEC_LINE 0x1f8b0
+- >;
+- };
+- };
+-
+- i2c1 {
+- pinctrl_i2c1_1: i2c1grp-1 {
+- fsl,pins = <
+- MX6QDL_PAD_EIM_D21__I2C1_SCL 0x4001b8b1
+- MX6QDL_PAD_EIM_D28__I2C1_SDA 0x4001b8b1
+- >;
+- };
+-
+- pinctrl_i2c1_2: i2c1grp-2 {
+- fsl,pins = <
+- MX6QDL_PAD_CSI0_DAT8__I2C1_SDA 0x4001b8b1
+- MX6QDL_PAD_CSI0_DAT9__I2C1_SCL 0x4001b8b1
+- >;
+- };
+- };
+-
+- i2c2 {
+- pinctrl_i2c2_1: i2c2grp-1 {
+- fsl,pins = <
+- MX6QDL_PAD_EIM_EB2__I2C2_SCL 0x4001b8b1
+- MX6QDL_PAD_EIM_D16__I2C2_SDA 0x4001b8b1
+- >;
+- };
+-
+- pinctrl_i2c2_2: i2c2grp-2 {
+- fsl,pins = <
+- MX6QDL_PAD_KEY_COL3__I2C2_SCL 0x4001b8b1
+- MX6QDL_PAD_KEY_ROW3__I2C2_SDA 0x4001b8b1
+- >;
+- };
+-
+- pinctrl_i2c2_3: i2c2grp-3 {
+- fsl,pins = <
+- MX6QDL_PAD_EIM_EB2__I2C2_SCL 0x4001b8b1
+- MX6QDL_PAD_KEY_ROW3__I2C2_SDA 0x4001b8b1
+- >;
+- };
+- };
+-
+- i2c3 {
+- pinctrl_i2c3_1: i2c3grp-1 {
+- fsl,pins = <
+- MX6QDL_PAD_EIM_D17__I2C3_SCL 0x4001b8b1
+- MX6QDL_PAD_EIM_D18__I2C3_SDA 0x4001b8b1
+- >;
+- };
+-
+- pinctrl_i2c3_2: i2c3grp-2 {
+- fsl,pins = <
+- MX6QDL_PAD_GPIO_3__I2C3_SCL 0x4001b8b1
+- MX6QDL_PAD_GPIO_6__I2C3_SDA 0x4001b8b1
+- >;
+- };
+-
+- pinctrl_i2c3_3: i2c3grp-3 {
+- fsl,pins = <
+- MX6QDL_PAD_GPIO_5__I2C3_SCL 0x4001b8b1
+- MX6QDL_PAD_GPIO_16__I2C3_SDA 0x4001b8b1
+- >;
+- };
+-
+- pinctrl_i2c3_4: i2c3grp-4 {
+- fsl,pins = <
+- MX6QDL_PAD_GPIO_3__I2C3_SCL 0x4001b8b1
+- MX6QDL_PAD_EIM_D18__I2C3_SDA 0x4001b8b1
+- >;
+- };
+- };
+-
+- ipu1 {
+- pinctrl_ipu1_1: ipu1grp-1 {
+- fsl,pins = <
+- MX6QDL_PAD_DI0_DISP_CLK__IPU1_DI0_DISP_CLK 0x10
+- MX6QDL_PAD_DI0_PIN15__IPU1_DI0_PIN15 0x10
+- MX6QDL_PAD_DI0_PIN2__IPU1_DI0_PIN02 0x10
+- MX6QDL_PAD_DI0_PIN3__IPU1_DI0_PIN03 0x10
+- MX6QDL_PAD_DI0_PIN4__IPU1_DI0_PIN04 0x80000000
+- MX6QDL_PAD_DISP0_DAT0__IPU1_DISP0_DATA00 0x10
+- MX6QDL_PAD_DISP0_DAT1__IPU1_DISP0_DATA01 0x10
+- MX6QDL_PAD_DISP0_DAT2__IPU1_DISP0_DATA02 0x10
+- MX6QDL_PAD_DISP0_DAT3__IPU1_DISP0_DATA03 0x10
+- MX6QDL_PAD_DISP0_DAT4__IPU1_DISP0_DATA04 0x10
+- MX6QDL_PAD_DISP0_DAT5__IPU1_DISP0_DATA05 0x10
+- MX6QDL_PAD_DISP0_DAT6__IPU1_DISP0_DATA06 0x10
+- MX6QDL_PAD_DISP0_DAT7__IPU1_DISP0_DATA07 0x10
+- MX6QDL_PAD_DISP0_DAT8__IPU1_DISP0_DATA08 0x10
+- MX6QDL_PAD_DISP0_DAT9__IPU1_DISP0_DATA09 0x10
+- MX6QDL_PAD_DISP0_DAT10__IPU1_DISP0_DATA10 0x10
+- MX6QDL_PAD_DISP0_DAT11__IPU1_DISP0_DATA11 0x10
+- MX6QDL_PAD_DISP0_DAT12__IPU1_DISP0_DATA12 0x10
+- MX6QDL_PAD_DISP0_DAT13__IPU1_DISP0_DATA13 0x10
+- MX6QDL_PAD_DISP0_DAT14__IPU1_DISP0_DATA14 0x10
+- MX6QDL_PAD_DISP0_DAT15__IPU1_DISP0_DATA15 0x10
+- MX6QDL_PAD_DISP0_DAT16__IPU1_DISP0_DATA16 0x10
+- MX6QDL_PAD_DISP0_DAT17__IPU1_DISP0_DATA17 0x10
+- MX6QDL_PAD_DISP0_DAT18__IPU1_DISP0_DATA18 0x10
+- MX6QDL_PAD_DISP0_DAT19__IPU1_DISP0_DATA19 0x10
+- MX6QDL_PAD_DISP0_DAT20__IPU1_DISP0_DATA20 0x10
+- MX6QDL_PAD_DISP0_DAT21__IPU1_DISP0_DATA21 0x10
+- MX6QDL_PAD_DISP0_DAT22__IPU1_DISP0_DATA22 0x10
+- MX6QDL_PAD_DISP0_DAT23__IPU1_DISP0_DATA23 0x10
+- >;
+- };
+-
+- pinctrl_ipu1_2: ipu1grp-2 { /* parallel camera */
+- fsl,pins = <
+- MX6QDL_PAD_CSI0_DAT12__IPU1_CSI0_DATA12 0x80000000
+- MX6QDL_PAD_CSI0_DAT13__IPU1_CSI0_DATA13 0x80000000
+- MX6QDL_PAD_CSI0_DAT14__IPU1_CSI0_DATA14 0x80000000
+- MX6QDL_PAD_CSI0_DAT15__IPU1_CSI0_DATA15 0x80000000
+- MX6QDL_PAD_CSI0_DAT16__IPU1_CSI0_DATA16 0x80000000
+- MX6QDL_PAD_CSI0_DAT17__IPU1_CSI0_DATA17 0x80000000
+- MX6QDL_PAD_CSI0_DAT18__IPU1_CSI0_DATA18 0x80000000
+- MX6QDL_PAD_CSI0_DAT19__IPU1_CSI0_DATA19 0x80000000
+- MX6QDL_PAD_CSI0_DATA_EN__IPU1_CSI0_DATA_EN 0x80000000
+- MX6QDL_PAD_CSI0_PIXCLK__IPU1_CSI0_PIXCLK 0x80000000
+- MX6QDL_PAD_CSI0_MCLK__IPU1_CSI0_HSYNC 0x80000000
+- MX6QDL_PAD_CSI0_VSYNC__IPU1_CSI0_VSYNC 0x80000000
+- >;
+- };
+-
+- pinctrl_ipu1_3: ipu1grp-3 { /* parallel port 16-bit */
+- fsl,pins = <
+- MX6QDL_PAD_CSI0_DAT4__IPU1_CSI0_DATA04 0x80000000
+- MX6QDL_PAD_CSI0_DAT5__IPU1_CSI0_DATA05 0x80000000
+- MX6QDL_PAD_CSI0_DAT6__IPU1_CSI0_DATA06 0x80000000
+- MX6QDL_PAD_CSI0_DAT7__IPU1_CSI0_DATA07 0x80000000
+- MX6QDL_PAD_CSI0_DAT8__IPU1_CSI0_DATA08 0x80000000
+- MX6QDL_PAD_CSI0_DAT9__IPU1_CSI0_DATA09 0x80000000
+- MX6QDL_PAD_CSI0_DAT10__IPU1_CSI0_DATA10 0x80000000
+- MX6QDL_PAD_CSI0_DAT11__IPU1_CSI0_DATA11 0x80000000
+- MX6QDL_PAD_CSI0_DAT12__IPU1_CSI0_DATA12 0x80000000
+- MX6QDL_PAD_CSI0_DAT13__IPU1_CSI0_DATA13 0x80000000
+- MX6QDL_PAD_CSI0_DAT14__IPU1_CSI0_DATA14 0x80000000
+- MX6QDL_PAD_CSI0_DAT15__IPU1_CSI0_DATA15 0x80000000
+- MX6QDL_PAD_CSI0_DAT16__IPU1_CSI0_DATA16 0x80000000
+- MX6QDL_PAD_CSI0_DAT17__IPU1_CSI0_DATA17 0x80000000
+- MX6QDL_PAD_CSI0_DAT18__IPU1_CSI0_DATA18 0x80000000
+- MX6QDL_PAD_CSI0_DAT19__IPU1_CSI0_DATA19 0x80000000
+- MX6QDL_PAD_CSI0_PIXCLK__IPU1_CSI0_PIXCLK 0x80000000
+- MX6QDL_PAD_CSI0_MCLK__IPU1_CSI0_HSYNC 0x80000000
+- MX6QDL_PAD_CSI0_VSYNC__IPU1_CSI0_VSYNC 0x80000000
+- >;
+- };
+- };
+-
+- mlb {
+- pinctrl_mlb_1: mlbgrp-1 {
+- fsl,pins = <
+- MX6QDL_PAD_GPIO_3__MLB_CLK 0x71
+- MX6QDL_PAD_GPIO_6__MLB_SIG 0x71
+- MX6QDL_PAD_GPIO_2__MLB_DATA 0x71
+- >;
+- };
+-
+- pinctrl_mlb_2: mlbgrp-2 {
+- fsl,pins = <
+- MX6QDL_PAD_ENET_TXD1__MLB_CLK 0x71
+- MX6QDL_PAD_GPIO_6__MLB_SIG 0x71
+- MX6QDL_PAD_GPIO_2__MLB_DATA 0x71
+- >;
+- };
+- };
+-
+- pwm0 {
+- pinctrl_pwm0_1: pwm0grp-1 {
+- fsl,pins = <
+- MX6QDL_PAD_SD1_DAT3__PWM1_OUT 0x1b0b1
+- >;
+- };
+- };
+-
+- pwm3 {
+- pinctrl_pwm3_1: pwm3grp-1 {
+- fsl,pins = <
+- MX6QDL_PAD_SD4_DAT1__PWM3_OUT 0x1b0b1
+- >;
+- };
+- };
+-
+- spdif {
+- pinctrl_spdif_1: spdifgrp-1 {
+- fsl,pins = <
+- MX6QDL_PAD_KEY_COL3__SPDIF_IN 0x1b0b0
+- >;
+- };
+-
+- pinctrl_spdif_2: spdifgrp-2 {
+- fsl,pins = <
+- MX6QDL_PAD_GPIO_16__SPDIF_IN 0x1b0b0
+- MX6QDL_PAD_GPIO_17__SPDIF_OUT 0x1b0b0
+- >;
+- };
+-
+- pinctrl_spdif_3: spdifgrp-3 {
+- fsl,pins = <
+- MX6QDL_PAD_ENET_RXD0__SPDIF_OUT 0x1b0b0
+- >;
+- };
+- };
+-
+- uart1 {
+- pinctrl_uart1_1: uart1grp-1 {
+- fsl,pins = <
+- MX6QDL_PAD_CSI0_DAT10__UART1_TX_DATA 0x1b0b1
+- MX6QDL_PAD_CSI0_DAT11__UART1_RX_DATA 0x1b0b1
+- >;
+- };
+- };
+-
+- uart2 {
+- pinctrl_uart2_1: uart2grp-1 {
+- fsl,pins = <
+- MX6QDL_PAD_EIM_D26__UART2_TX_DATA 0x1b0b1
+- MX6QDL_PAD_EIM_D27__UART2_RX_DATA 0x1b0b1
+- >;
+- };
+-
+- pinctrl_uart2_2: uart2grp-2 { /* DTE mode */
+- fsl,pins = <
+- MX6QDL_PAD_EIM_D26__UART2_RX_DATA 0x1b0b1
+- MX6QDL_PAD_EIM_D27__UART2_TX_DATA 0x1b0b1
+- MX6QDL_PAD_EIM_D28__UART2_DTE_CTS_B 0x1b0b1
+- MX6QDL_PAD_EIM_D29__UART2_DTE_RTS_B 0x1b0b1
+- >;
+- };
+- };
+-
+- uart3 {
+- pinctrl_uart3_1: uart3grp-1 {
+- fsl,pins = <
+- MX6QDL_PAD_SD4_CLK__UART3_RX_DATA 0x1b0b1
+- MX6QDL_PAD_SD4_CMD__UART3_TX_DATA 0x1b0b1
+- MX6QDL_PAD_EIM_D30__UART3_CTS_B 0x1b0b1
+- MX6QDL_PAD_EIM_EB3__UART3_RTS_B 0x1b0b1
+- >;
+- };
+-
+- pinctrl_uart3_2: uart3grp-2 {
+- fsl,pins = <
+- MX6QDL_PAD_EIM_D24__UART3_TX_DATA 0x1b0b1
+- MX6QDL_PAD_EIM_D25__UART3_RX_DATA 0x1b0b1
+- MX6QDL_PAD_EIM_D23__UART3_CTS_B 0x1b0b1
+- MX6QDL_PAD_EIM_EB3__UART3_RTS_B 0x1b0b1
+- >;
+- };
+- };
+-
+- uart4 {
+- pinctrl_uart4_1: uart4grp-1 {
+- fsl,pins = <
+- MX6QDL_PAD_KEY_COL0__UART4_TX_DATA 0x1b0b1
+- MX6QDL_PAD_KEY_ROW0__UART4_RX_DATA 0x1b0b1
+- >;
+- };
+- };
+-
+- usbotg {
+- pinctrl_usbotg_1: usbotggrp-1 {
+- fsl,pins = <
+- MX6QDL_PAD_GPIO_1__USB_OTG_ID 0x17059
+- >;
+- };
+-
+- pinctrl_usbotg_2: usbotggrp-2 {
+- fsl,pins = <
+- MX6QDL_PAD_ENET_RX_ER__USB_OTG_ID 0x17059
+- >;
+- };
+- };
+-
+- usbh2 {
+- pinctrl_usbh2_1: usbh2grp-1 {
+- fsl,pins = <
+- MX6QDL_PAD_RGMII_TXC__USB_H2_DATA 0x40013030
+- MX6QDL_PAD_RGMII_TX_CTL__USB_H2_STROBE 0x40013030
+- >;
+- };
+-
+- pinctrl_usbh2_2: usbh2grp-2 {
+- fsl,pins = <
+- MX6QDL_PAD_RGMII_TX_CTL__USB_H2_STROBE 0x40017030
+- >;
+- };
+- };
+-
+- usbh3 {
+- pinctrl_usbh3_1: usbh3grp-1 {
+- fsl,pins = <
+- MX6QDL_PAD_RGMII_RX_CTL__USB_H3_DATA 0x40013030
+- MX6QDL_PAD_RGMII_RXC__USB_H3_STROBE 0x40013030
+- >;
+- };
+-
+- pinctrl_usbh3_2: usbh3grp-2 {
+- fsl,pins = <
+- MX6QDL_PAD_RGMII_RXC__USB_H3_STROBE 0x40017030
+- >;
+- };
+- };
+-
+- usdhc1 {
+- pinctrl_usdhc1_1: usdhc1grp-1 {
+- fsl,pins = <
+- MX6QDL_PAD_SD1_CMD__SD1_CMD 0x17059
+- MX6QDL_PAD_SD1_CLK__SD1_CLK 0x10059
+- MX6QDL_PAD_SD1_DAT0__SD1_DATA0 0x17059
+- MX6QDL_PAD_SD1_DAT1__SD1_DATA1 0x17059
+- MX6QDL_PAD_SD1_DAT2__SD1_DATA2 0x17059
+- MX6QDL_PAD_SD1_DAT3__SD1_DATA3 0x17059
+- MX6QDL_PAD_NANDF_D0__SD1_DATA4 0x17059
+- MX6QDL_PAD_NANDF_D1__SD1_DATA5 0x17059
+- MX6QDL_PAD_NANDF_D2__SD1_DATA6 0x17059
+- MX6QDL_PAD_NANDF_D3__SD1_DATA7 0x17059
+- >;
+- };
+-
+- pinctrl_usdhc1_2: usdhc1grp-2 {
+- fsl,pins = <
+- MX6QDL_PAD_SD1_CMD__SD1_CMD 0x17059
+- MX6QDL_PAD_SD1_CLK__SD1_CLK 0x10059
+- MX6QDL_PAD_SD1_DAT0__SD1_DATA0 0x17059
+- MX6QDL_PAD_SD1_DAT1__SD1_DATA1 0x17059
+- MX6QDL_PAD_SD1_DAT2__SD1_DATA2 0x17059
+- MX6QDL_PAD_SD1_DAT3__SD1_DATA3 0x17059
+- >;
+- };
+- };
+-
+- usdhc2 {
+- pinctrl_usdhc2_1: usdhc2grp-1 {
+- fsl,pins = <
+- MX6QDL_PAD_SD2_CMD__SD2_CMD 0x17059
+- MX6QDL_PAD_SD2_CLK__SD2_CLK 0x10059
+- MX6QDL_PAD_SD2_DAT0__SD2_DATA0 0x17059
+- MX6QDL_PAD_SD2_DAT1__SD2_DATA1 0x17059
+- MX6QDL_PAD_SD2_DAT2__SD2_DATA2 0x17059
+- MX6QDL_PAD_SD2_DAT3__SD2_DATA3 0x17059
+- MX6QDL_PAD_NANDF_D4__SD2_DATA4 0x17059
+- MX6QDL_PAD_NANDF_D5__SD2_DATA5 0x17059
+- MX6QDL_PAD_NANDF_D6__SD2_DATA6 0x17059
+- MX6QDL_PAD_NANDF_D7__SD2_DATA7 0x17059
+- >;
+- };
+-
+- pinctrl_usdhc2_2: usdhc2grp-2 {
+- fsl,pins = <
+- MX6QDL_PAD_SD2_CMD__SD2_CMD 0x17059
+- MX6QDL_PAD_SD2_CLK__SD2_CLK 0x10059
+- MX6QDL_PAD_SD2_DAT0__SD2_DATA0 0x17059
+- MX6QDL_PAD_SD2_DAT1__SD2_DATA1 0x17059
+- MX6QDL_PAD_SD2_DAT2__SD2_DATA2 0x17059
+- MX6QDL_PAD_SD2_DAT3__SD2_DATA3 0x17059
+- >;
+- };
+- };
+-
+- usdhc3 {
+- pinctrl_usdhc3_1: usdhc3grp-1 {
+- fsl,pins = <
+- MX6QDL_PAD_SD3_CMD__SD3_CMD 0x17059
+- MX6QDL_PAD_SD3_CLK__SD3_CLK 0x10059
+- MX6QDL_PAD_SD3_DAT0__SD3_DATA0 0x17059
+- MX6QDL_PAD_SD3_DAT1__SD3_DATA1 0x17059
+- MX6QDL_PAD_SD3_DAT2__SD3_DATA2 0x17059
+- MX6QDL_PAD_SD3_DAT3__SD3_DATA3 0x17059
+- MX6QDL_PAD_SD3_DAT4__SD3_DATA4 0x17059
+- MX6QDL_PAD_SD3_DAT5__SD3_DATA5 0x17059
+- MX6QDL_PAD_SD3_DAT6__SD3_DATA6 0x17059
+- MX6QDL_PAD_SD3_DAT7__SD3_DATA7 0x17059
+- >;
+- };
+-
+- pinctrl_usdhc3_1_100mhz: usdhc3grp-1-100mhz { /* 100Mhz */
+- fsl,pins = <
+- MX6QDL_PAD_SD3_CMD__SD3_CMD 0x170b9
+- MX6QDL_PAD_SD3_CLK__SD3_CLK 0x100b9
+- MX6QDL_PAD_SD3_DAT0__SD3_DATA0 0x170b9
+- MX6QDL_PAD_SD3_DAT1__SD3_DATA1 0x170b9
+- MX6QDL_PAD_SD3_DAT2__SD3_DATA2 0x170b9
+- MX6QDL_PAD_SD3_DAT3__SD3_DATA3 0x170b9
+- MX6QDL_PAD_SD3_DAT4__SD3_DATA4 0x170b9
+- MX6QDL_PAD_SD3_DAT5__SD3_DATA5 0x170b9
+- MX6QDL_PAD_SD3_DAT6__SD3_DATA6 0x170b9
+- MX6QDL_PAD_SD3_DAT7__SD3_DATA7 0x170b9
+- >;
+- };
+-
+- pinctrl_usdhc3_1_200mhz: usdhc3grp-1-200mhz { /* 200Mhz */
+- fsl,pins = <
+- MX6QDL_PAD_SD3_CMD__SD3_CMD 0x170f9
+- MX6QDL_PAD_SD3_CLK__SD3_CLK 0x100f9
+- MX6QDL_PAD_SD3_DAT0__SD3_DATA0 0x170f9
+- MX6QDL_PAD_SD3_DAT1__SD3_DATA1 0x170f9
+- MX6QDL_PAD_SD3_DAT2__SD3_DATA2 0x170f9
+- MX6QDL_PAD_SD3_DAT3__SD3_DATA3 0x170f9
+- MX6QDL_PAD_SD3_DAT4__SD3_DATA4 0x170f9
+- MX6QDL_PAD_SD3_DAT5__SD3_DATA5 0x170f9
+- MX6QDL_PAD_SD3_DAT6__SD3_DATA6 0x170f9
+- MX6QDL_PAD_SD3_DAT7__SD3_DATA7 0x170f9
+- >;
+- };
+-
+- pinctrl_usdhc3_2: usdhc3grp-2 {
+- fsl,pins = <
+- MX6QDL_PAD_SD3_CMD__SD3_CMD 0x17059
+- MX6QDL_PAD_SD3_CLK__SD3_CLK 0x10059
+- MX6QDL_PAD_SD3_DAT0__SD3_DATA0 0x17059
+- MX6QDL_PAD_SD3_DAT1__SD3_DATA1 0x17059
+- MX6QDL_PAD_SD3_DAT2__SD3_DATA2 0x17059
+- MX6QDL_PAD_SD3_DAT3__SD3_DATA3 0x17059
+- >;
+- };
+- };
+-
+- usdhc4 {
+- pinctrl_usdhc4_1: usdhc4grp-1 {
+- fsl,pins = <
+- MX6QDL_PAD_SD4_CMD__SD4_CMD 0x17059
+- MX6QDL_PAD_SD4_CLK__SD4_CLK 0x10059
+- MX6QDL_PAD_SD4_DAT0__SD4_DATA0 0x17059
+- MX6QDL_PAD_SD4_DAT1__SD4_DATA1 0x17059
+- MX6QDL_PAD_SD4_DAT2__SD4_DATA2 0x17059
+- MX6QDL_PAD_SD4_DAT3__SD4_DATA3 0x17059
+- MX6QDL_PAD_SD4_DAT4__SD4_DATA4 0x17059
+- MX6QDL_PAD_SD4_DAT5__SD4_DATA5 0x17059
+- MX6QDL_PAD_SD4_DAT6__SD4_DATA6 0x17059
+- MX6QDL_PAD_SD4_DAT7__SD4_DATA7 0x17059
+- >;
+- };
+-
+- pinctrl_usdhc4_2: usdhc4grp-2 {
+- fsl,pins = <
+- MX6QDL_PAD_SD4_CMD__SD4_CMD 0x17059
+- MX6QDL_PAD_SD4_CLK__SD4_CLK 0x10059
+- MX6QDL_PAD_SD4_DAT0__SD4_DATA0 0x17059
+- MX6QDL_PAD_SD4_DAT1__SD4_DATA1 0x17059
+- MX6QDL_PAD_SD4_DAT2__SD4_DATA2 0x17059
+- MX6QDL_PAD_SD4_DAT3__SD4_DATA3 0x17059
+- >;
+- };
+- };
+-
+- weim {
+- pinctrl_weim_cs0_1: weim_cs0grp-1 {
+- fsl,pins = <
+- MX6QDL_PAD_EIM_CS0__EIM_CS0_B 0xb0b1
+- >;
+- };
+-
+- pinctrl_weim_nor_1: weim_norgrp-1 {
+- fsl,pins = <
+- MX6QDL_PAD_EIM_OE__EIM_OE_B 0xb0b1
+- MX6QDL_PAD_EIM_RW__EIM_RW 0xb0b1
+- MX6QDL_PAD_EIM_WAIT__EIM_WAIT_B 0xb060
+- /* data */
+- MX6QDL_PAD_EIM_D16__EIM_DATA16 0x1b0b0
+- MX6QDL_PAD_EIM_D17__EIM_DATA17 0x1b0b0
+- MX6QDL_PAD_EIM_D18__EIM_DATA18 0x1b0b0
+- MX6QDL_PAD_EIM_D19__EIM_DATA19 0x1b0b0
+- MX6QDL_PAD_EIM_D20__EIM_DATA20 0x1b0b0
+- MX6QDL_PAD_EIM_D21__EIM_DATA21 0x1b0b0
+- MX6QDL_PAD_EIM_D22__EIM_DATA22 0x1b0b0
+- MX6QDL_PAD_EIM_D23__EIM_DATA23 0x1b0b0
+- MX6QDL_PAD_EIM_D24__EIM_DATA24 0x1b0b0
+- MX6QDL_PAD_EIM_D25__EIM_DATA25 0x1b0b0
+- MX6QDL_PAD_EIM_D26__EIM_DATA26 0x1b0b0
+- MX6QDL_PAD_EIM_D27__EIM_DATA27 0x1b0b0
+- MX6QDL_PAD_EIM_D28__EIM_DATA28 0x1b0b0
+- MX6QDL_PAD_EIM_D29__EIM_DATA29 0x1b0b0
+- MX6QDL_PAD_EIM_D30__EIM_DATA30 0x1b0b0
+- MX6QDL_PAD_EIM_D31__EIM_DATA31 0x1b0b0
+- /* address */
+- MX6QDL_PAD_EIM_A23__EIM_ADDR23 0xb0b1
+- MX6QDL_PAD_EIM_A22__EIM_ADDR22 0xb0b1
+- MX6QDL_PAD_EIM_A21__EIM_ADDR21 0xb0b1
+- MX6QDL_PAD_EIM_A20__EIM_ADDR20 0xb0b1
+- MX6QDL_PAD_EIM_A19__EIM_ADDR19 0xb0b1
+- MX6QDL_PAD_EIM_A18__EIM_ADDR18 0xb0b1
+- MX6QDL_PAD_EIM_A17__EIM_ADDR17 0xb0b1
+- MX6QDL_PAD_EIM_A16__EIM_ADDR16 0xb0b1
+- MX6QDL_PAD_EIM_DA15__EIM_AD15 0xb0b1
+- MX6QDL_PAD_EIM_DA14__EIM_AD14 0xb0b1
+- MX6QDL_PAD_EIM_DA13__EIM_AD13 0xb0b1
+- MX6QDL_PAD_EIM_DA12__EIM_AD12 0xb0b1
+- MX6QDL_PAD_EIM_DA11__EIM_AD11 0xb0b1
+- MX6QDL_PAD_EIM_DA10__EIM_AD10 0xb0b1
+- MX6QDL_PAD_EIM_DA9__EIM_AD09 0xb0b1
+- MX6QDL_PAD_EIM_DA8__EIM_AD08 0xb0b1
+- MX6QDL_PAD_EIM_DA7__EIM_AD07 0xb0b1
+- MX6QDL_PAD_EIM_DA6__EIM_AD06 0xb0b1
+- MX6QDL_PAD_EIM_DA5__EIM_AD05 0xb0b1
+- MX6QDL_PAD_EIM_DA4__EIM_AD04 0xb0b1
+- MX6QDL_PAD_EIM_DA3__EIM_AD03 0xb0b1
+- MX6QDL_PAD_EIM_DA2__EIM_AD02 0xb0b1
+- MX6QDL_PAD_EIM_DA1__EIM_AD01 0xb0b1
+- MX6QDL_PAD_EIM_DA0__EIM_AD00 0xb0b1
+- >;
+- };
+- };
+ };
+
+ ldb: ldb@020e0008 {
+- #address-cells = <1>;
+- #size-cells = <0>;
+ compatible = "fsl,imx6q-ldb", "fsl,imx53-ldb";
+- gpr = <&gpr>;
++ reg = <0x020e0000 0x4000>;
++ clocks = <&clks 135>, <&clks 136>,
++ <&clks 39>, <&clks 40>,
++ <&clks 41>, <&clks 42>,
++ <&clks 184>, <&clks 185>,
++ <&clks 210>, <&clks 211>,
++ <&clks 212>, <&clks 213>;
++ clock-names = "ldb_di0", "ldb_di1",
++ "ipu1_di0_sel", "ipu1_di1_sel",
++ "ipu2_di0_sel", "ipu2_di1_sel",
++ "di0_div_3_5", "di1_div_3_5",
++ "di0_div_7", "di1_div_7",
++ "di0_div_sel", "di1_div_sel";
+ status = "disabled";
+-
+- lvds-channel@0 {
+- reg = <0>;
+- status = "disabled";
+- };
+-
+- lvds-channel@1 {
+- reg = <1>;
+- status = "disabled";
+- };
+ };
+
+ dcic1: dcic@020e4000 {
+ reg = <0x020e4000 0x4000>;
+- interrupts = <0 124 0x04>;
++ interrupts = <0 124 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ dcic2: dcic@020e8000 {
+ reg = <0x020e8000 0x4000>;
+- interrupts = <0 125 0x04>;
++ interrupts = <0 125 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ sdma: sdma@020ec000 {
+ compatible = "fsl,imx6q-sdma", "fsl,imx35-sdma";
+ reg = <0x020ec000 0x4000>;
+- interrupts = <0 2 0x04>;
++ interrupts = <0 2 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks 155>, <&clks 155>;
+ clock-names = "ipg", "ahb";
+ #dma-cells = <3>;
+@@ -1396,9 +772,29 @@
+ reg = <0x02100000 0x100000>;
+ ranges;
+
+- caam@02100000 {
+- reg = <0x02100000 0x40000>;
+- interrupts = <0 105 0x04 0 106 0x04>;
++ crypto: caam@02100000 {
++ compatible = "fsl,sec-v4.0";
++ #address-cells = <1>;
++ #size-cells = <1>;
++ reg = <0x2100000 0x40000>;
++ ranges = <0 0x2100000 0x40000>;
++ interrupt-parent = <&intc>; /* interrupts = <0 92 0x4>; */
++ clocks = <&clks 214>, <&clks 215>, <&clks 216>, <&clks 196>;
++ clock-names = "caam_mem", "caam_aclk", "caam_ipg", "caam_emi_slow";
++
++ sec_jr0: jr0@1000 {
++ compatible = "fsl,sec-v4.0-job-ring";
++ reg = <0x1000 0x1000>;
++ interrupt-parent = <&intc>;
++ interrupts = <0 105 IRQ_TYPE_LEVEL_HIGH>;
++ };
++
++ sec_jr1: jr1@2000 {
++ compatible = "fsl,sec-v4.0-job-ring";
++ reg = <0x2000 0x1000>;
++ interrupt-parent = <&intc>;
++ interrupts = <0 106 IRQ_TYPE_LEVEL_HIGH>;
++ };
+ };
+
+ aipstz@0217c000 { /* AIPSTZ2 */
+@@ -1408,7 +804,7 @@
+ usbotg: usb@02184000 {
+ compatible = "fsl,imx6q-usb", "fsl,imx27-usb";
+ reg = <0x02184000 0x200>;
+- interrupts = <0 43 0x04>;
++ interrupts = <0 43 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks 162>;
+ fsl,usbphy = <&usbphy1>;
+ fsl,usbmisc = <&usbmisc 0>;
+@@ -1418,7 +814,7 @@
+ usbh1: usb@02184200 {
+ compatible = "fsl,imx6q-usb", "fsl,imx27-usb";
+ reg = <0x02184200 0x200>;
+- interrupts = <0 40 0x04>;
++ interrupts = <0 40 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks 162>;
+ fsl,usbphy = <&usbphy2>;
+ fsl,usbmisc = <&usbmisc 1>;
+@@ -1428,18 +824,24 @@
+ usbh2: usb@02184400 {
+ compatible = "fsl,imx6q-usb", "fsl,imx27-usb";
+ reg = <0x02184400 0x200>;
+- interrupts = <0 41 0x04>;
++ interrupts = <0 41 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks 162>;
+ fsl,usbmisc = <&usbmisc 2>;
++ phy_type = "hsic";
++ fsl,usbphy = <&usbphy_nop1>;
++ fsl,anatop = <&anatop>;
+ status = "disabled";
+ };
+
+ usbh3: usb@02184600 {
+ compatible = "fsl,imx6q-usb", "fsl,imx27-usb";
+ reg = <0x02184600 0x200>;
+- interrupts = <0 42 0x04>;
++ interrupts = <0 42 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks 162>;
+ fsl,usbmisc = <&usbmisc 3>;
++ phy_type = "hsic";
++ fsl,usbphy = <&usbphy_nop2>;
++ fsl,anatop = <&anatop>;
+ status = "disabled";
+ };
+
+@@ -1453,7 +855,9 @@
+ fec: ethernet@02188000 {
+ compatible = "fsl,imx6q-fec";
+ reg = <0x02188000 0x4000>;
+- interrupts = <0 118 0x04 0 119 0x04>;
++ interrupts-extended =
++ <&intc 0 118 IRQ_TYPE_LEVEL_HIGH>,
++ <&intc 0 119 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks 117>, <&clks 117>, <&clks 190>;
+ clock-names = "ipg", "ahb", "ptp";
+ status = "disabled";
+@@ -1461,13 +865,15 @@
+
+ mlb@0218c000 {
+ reg = <0x0218c000 0x4000>;
+- interrupts = <0 53 0x04 0 117 0x04 0 126 0x04>;
++ interrupts = <0 53 IRQ_TYPE_LEVEL_HIGH>,
++ <0 117 IRQ_TYPE_LEVEL_HIGH>,
++ <0 126 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ usdhc1: usdhc@02190000 {
+ compatible = "fsl,imx6q-usdhc";
+ reg = <0x02190000 0x4000>;
+- interrupts = <0 22 0x04>;
++ interrupts = <0 22 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks 163>, <&clks 163>, <&clks 163>;
+ clock-names = "ipg", "ahb", "per";
+ bus-width = <4>;
+@@ -1477,7 +883,7 @@
+ usdhc2: usdhc@02194000 {
+ compatible = "fsl,imx6q-usdhc";
+ reg = <0x02194000 0x4000>;
+- interrupts = <0 23 0x04>;
++ interrupts = <0 23 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks 164>, <&clks 164>, <&clks 164>;
+ clock-names = "ipg", "ahb", "per";
+ bus-width = <4>;
+@@ -1487,7 +893,7 @@
+ usdhc3: usdhc@02198000 {
+ compatible = "fsl,imx6q-usdhc";
+ reg = <0x02198000 0x4000>;
+- interrupts = <0 24 0x04>;
++ interrupts = <0 24 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks 165>, <&clks 165>, <&clks 165>;
+ clock-names = "ipg", "ahb", "per";
+ bus-width = <4>;
+@@ -1497,7 +903,7 @@
+ usdhc4: usdhc@0219c000 {
+ compatible = "fsl,imx6q-usdhc";
+ reg = <0x0219c000 0x4000>;
+- interrupts = <0 25 0x04>;
++ interrupts = <0 25 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks 166>, <&clks 166>, <&clks 166>;
+ clock-names = "ipg", "ahb", "per";
+ bus-width = <4>;
+@@ -1509,7 +915,7 @@
+ #size-cells = <0>;
+ compatible = "fsl,imx6q-i2c", "fsl,imx21-i2c";
+ reg = <0x021a0000 0x4000>;
+- interrupts = <0 36 0x04>;
++ interrupts = <0 36 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks 125>;
+ status = "disabled";
+ };
+@@ -1519,7 +925,7 @@
+ #size-cells = <0>;
+ compatible = "fsl,imx6q-i2c", "fsl,imx21-i2c";
+ reg = <0x021a4000 0x4000>;
+- interrupts = <0 37 0x04>;
++ interrupts = <0 37 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks 126>;
+ status = "disabled";
+ };
+@@ -1529,7 +935,7 @@
+ #size-cells = <0>;
+ compatible = "fsl,imx6q-i2c", "fsl,imx21-i2c";
+ reg = <0x021a8000 0x4000>;
+- interrupts = <0 38 0x04>;
++ interrupts = <0 38 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks 127>;
+ status = "disabled";
+ };
+@@ -1538,6 +944,11 @@
+ reg = <0x021ac000 0x4000>;
+ };
+
++ mmdc0-1@021b0000 { /* MMDC0-1 */
++ compatible = "fsl,imx6q-mmdc-combine";
++ reg = <0x021b0000 0x8000>;
++ };
++
+ mmdc0: mmdc@021b0000 { /* MMDC0 */
+ compatible = "fsl,imx6q-mmdc";
+ reg = <0x021b0000 0x4000>;
+@@ -1550,23 +961,29 @@
+ weim: weim@021b8000 {
+ compatible = "fsl,imx6q-weim";
+ reg = <0x021b8000 0x4000>;
+- interrupts = <0 14 0x04>;
++ interrupts = <0 14 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks 196>;
+ };
+
+- ocotp: ocotp@021bc000 {
+- compatible = "fsl,imx6q-ocotp", "syscon";
++ ocotp: ocotp-ctrl@021bc000 {
++ compatible = "syscon";
+ reg = <0x021bc000 0x4000>;
+ };
+
++ ocotp-fuse@021bc000 {
++ compatible = "fsl,imx6q-ocotp";
++ reg = <0x021bc000 0x4000>;
++ clocks = <&clks 128>;
++ };
++
+ tzasc@021d0000 { /* TZASC1 */
+ reg = <0x021d0000 0x4000>;
+- interrupts = <0 108 0x04>;
++ interrupts = <0 108 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ tzasc@021d4000 { /* TZASC2 */
+ reg = <0x021d4000 0x4000>;
+- interrupts = <0 109 0x04>;
++ interrupts = <0 109 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ audmux: audmux@021d8000 {
+@@ -1575,23 +992,32 @@
+ status = "disabled";
+ };
+
+- mipi@021dc000 { /* MIPI-CSI */
++ mipi_csi: mipi_csi@021dc000 {
++ compatible = "fsl,imx6q-mipi-csi2";
+ reg = <0x021dc000 0x4000>;
+- };
+-
+- mipi@021e0000 { /* MIPI-DSI */
+- reg = <0x021e0000 0x4000>;
++ interrupts = <0 100 0x04>, <0 101 0x04>;
++ clocks = <&clks 138>, <&clks 53>, <&clks 204>;
++ /* Note: clks 138 is hsi_tx, however, the dphy_c
++ * hsi_tx and pll_refclk use the same clk gate.
++ * In current clk driver, open/close clk gate do
++ * use hsi_tx for a temporary debug purpose.
++ */
++ clock-names = "dphy_clk", "pixel_clk", "cfg_clk";
++ status = "disabled";
+ };
+
+ vdoa@021e4000 {
++ compatible = "fsl,imx6q-vdoa";
+ reg = <0x021e4000 0x4000>;
+- interrupts = <0 18 0x04>;
++ interrupts = <0 18 IRQ_TYPE_LEVEL_HIGH>;
++ clocks = <&clks 202>;
++ iram = <&ocram>;
+ };
+
+ uart2: serial@021e8000 {
+ compatible = "fsl,imx6q-uart", "fsl,imx21-uart";
+ reg = <0x021e8000 0x4000>;
+- interrupts = <0 27 0x04>;
++ interrupts = <0 27 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks 160>, <&clks 161>;
+ clock-names = "ipg", "per";
+ dmas = <&sdma 27 4 0>, <&sdma 28 4 0>;
+@@ -1602,7 +1028,7 @@
+ uart3: serial@021ec000 {
+ compatible = "fsl,imx6q-uart", "fsl,imx21-uart";
+ reg = <0x021ec000 0x4000>;
+- interrupts = <0 28 0x04>;
++ interrupts = <0 28 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks 160>, <&clks 161>;
+ clock-names = "ipg", "per";
+ dmas = <&sdma 29 4 0>, <&sdma 30 4 0>;
+@@ -1613,7 +1039,7 @@
+ uart4: serial@021f0000 {
+ compatible = "fsl,imx6q-uart", "fsl,imx21-uart";
+ reg = <0x021f0000 0x4000>;
+- interrupts = <0 29 0x04>;
++ interrupts = <0 29 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks 160>, <&clks 161>;
+ clock-names = "ipg", "per";
+ dmas = <&sdma 31 4 0>, <&sdma 32 4 0>;
+@@ -1624,7 +1050,7 @@
+ uart5: serial@021f4000 {
+ compatible = "fsl,imx6q-uart", "fsl,imx21-uart";
+ reg = <0x021f4000 0x4000>;
+- interrupts = <0 30 0x04>;
++ interrupts = <0 30 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks 160>, <&clks 161>;
+ clock-names = "ipg", "per";
+ dmas = <&sdma 33 4 0>, <&sdma 34 4 0>;
+@@ -1634,13 +1060,18 @@
+ };
+
+ ipu1: ipu@02400000 {
+- #crtc-cells = <1>;
+ compatible = "fsl,imx6q-ipu";
+ reg = <0x02400000 0x400000>;
+- interrupts = <0 6 0x4 0 5 0x4>;
+- clocks = <&clks 130>, <&clks 131>, <&clks 132>;
+- clock-names = "bus", "di0", "di1";
++ interrupts = <0 6 IRQ_TYPE_LEVEL_HIGH>,
++ <0 5 IRQ_TYPE_LEVEL_HIGH>;
++ clocks = <&clks 130>, <&clks 131>, <&clks 132>,
++ <&clks 39>, <&clks 40>,
++ <&clks 135>, <&clks 136>;
++ clock-names = "bus", "di0", "di1",
++ "di0_sel", "di1_sel",
++ "ldb_di0", "ldb_di1";
+ resets = <&src 2>;
++ bypass_reset = <0>;
+ };
+ };
+ };
+diff -Nur linux-3.14.36/arch/arm/boot/dts/imx6qdl-gw51xx.dtsi linux-openelec/arch/arm/boot/dts/imx6qdl-gw51xx.dtsi
+--- linux-3.14.36/arch/arm/boot/dts/imx6qdl-gw51xx.dtsi 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/arch/arm/boot/dts/imx6qdl-gw51xx.dtsi 2015-05-06 12:05:43.000000000 -0500
+@@ -0,0 +1,374 @@
++/*
++ * Copyright 2013 Gateworks Corporation
++ *
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/ {
++ /* these are used by bootloader for disabling nodes */
++ aliases {
++ can0 = &can1;
++ ethernet0 = &fec;
++ led0 = &led0;
++ led1 = &led1;
++ nand = &gpmi;
++ usb0 = &usbh1;
++ usb1 = &usbotg;
++ };
++
++ chosen {
++ bootargs = "console=ttymxc1,115200";
++ };
++
++ leds {
++ compatible = "gpio-leds";
++
++ led0: user1 {
++ label = "user1";
++ gpios = <&gpio4 6 0>; /* 102 -> MX6_PANLEDG */
++ default-state = "on";
++ linux,default-trigger = "heartbeat";
++ };
++
++ led1: user2 {
++ label = "user2";
++ gpios = <&gpio4 7 0>; /* 103 -> MX6_PANLEDR */
++ default-state = "off";
++ };
++ };
++
++ memory {
++ reg = <0x10000000 0x20000000>;
++ };
++
++ pps {
++ compatible = "pps-gpio";
++ gpios = <&gpio1 26 0>;
++ status = "okay";
++ };
++
++ regulators {
++ compatible = "simple-bus";
++ #address-cells = <1>;
++ #size-cells = <0>;
++
++ reg_3p3v: regulator@0 {
++ compatible = "regulator-fixed";
++ reg = <0>;
++ regulator-name = "3P3V";
++ regulator-min-microvolt = <3300000>;
++ regulator-max-microvolt = <3300000>;
++ regulator-always-on;
++ };
++
++ reg_5p0v: regulator@1 {
++ compatible = "regulator-fixed";
++ reg = <1>;
++ regulator-name = "5P0V";
++ regulator-min-microvolt = <5000000>;
++ regulator-max-microvolt = <5000000>;
++ regulator-always-on;
++ };
++
++ reg_usb_otg_vbus: regulator@2 {
++ compatible = "regulator-fixed";
++ reg = <2>;
++ regulator-name = "usb_otg_vbus";
++ regulator-min-microvolt = <5000000>;
++ regulator-max-microvolt = <5000000>;
++ gpio = <&gpio3 22 0>;
++ enable-active-high;
++ };
++ };
++};
++
++&fec {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_enet>;
++ phy-mode = "rgmii";
++ phy-reset-gpios = <&gpio1 30 0>;
++ status = "okay";
++};
++
++&gpmi {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_gpmi_nand>;
++ status = "okay";
++};
++
++&i2c1 {
++ clock-frequency = <100000>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_i2c1>;
++ status = "okay";
++
++ eeprom1: eeprom@50 {
++ compatible = "atmel,24c02";
++ reg = <0x50>;
++ pagesize = <16>;
++ };
++
++ eeprom2: eeprom@51 {
++ compatible = "atmel,24c02";
++ reg = <0x51>;
++ pagesize = <16>;
++ };
++
++ eeprom3: eeprom@52 {
++ compatible = "atmel,24c02";
++ reg = <0x52>;
++ pagesize = <16>;
++ };
++
++ eeprom4: eeprom@53 {
++ compatible = "atmel,24c02";
++ reg = <0x53>;
++ pagesize = <16>;
++ };
++
++ gpio: pca9555@23 {
++ compatible = "nxp,pca9555";
++ reg = <0x23>;
++ gpio-controller;
++ #gpio-cells = <2>;
++ };
++
++ hwmon: gsc@29 {
++ compatible = "gw,gsp";
++ reg = <0x29>;
++ };
++
++ rtc: ds1672@68 {
++ compatible = "dallas,ds1672";
++ reg = <0x68>;
++ };
++};
++
++&i2c2 {
++ clock-frequency = <100000>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_i2c2>;
++ status = "okay";
++
++ pmic: ltc3676@3c {
++ compatible = "ltc,ltc3676";
++ reg = <0x3c>;
++
++ regulators {
++ sw1_reg: ltc3676__sw1 {
++ regulator-min-microvolt = <1175000>;
++ regulator-max-microvolt = <1175000>;
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ sw2_reg: ltc3676__sw2 {
++ regulator-min-microvolt = <1800000>;
++ regulator-max-microvolt = <1800000>;
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ sw3_reg: ltc3676__sw3 {
++ regulator-min-microvolt = <1175000>;
++ regulator-max-microvolt = <1175000>;
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ sw4_reg: ltc3676__sw4 {
++ regulator-min-microvolt = <1500000>;
++ regulator-max-microvolt = <1500000>;
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ ldo2_reg: ltc3676__ldo2 {
++ regulator-min-microvolt = <2500000>;
++ regulator-max-microvolt = <2500000>;
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ ldo4_reg: ltc3676__ldo4 {
++ regulator-min-microvolt = <3000000>;
++ regulator-max-microvolt = <3000000>;
++ };
++ };
++ };
++};
++
++&i2c3 {
++ clock-frequency = <100000>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_i2c3>;
++ status = "okay";
++
++ videoin: adv7180@20 {
++ compatible = "adi,adv7180";
++ reg = <0x20>;
++ };
++};
++
++&iomuxc {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_hog>;
++
++ imx6qdl-gw51xx {
++ pinctrl_hog: hoggrp {
++ fsl,pins = <
++ MX6QDL_PAD_EIM_A19__GPIO2_IO19 0x80000000 /* MEZZ_DIO0 */
++ MX6QDL_PAD_EIM_A20__GPIO2_IO18 0x80000000 /* MEZZ_DIO1 */
++ MX6QDL_PAD_EIM_D22__GPIO3_IO22 0x80000000 /* OTG_PWR_EN */
++ MX6QDL_PAD_ENET_RXD1__GPIO1_IO26 0x80000000 /* GPS_PPS */
++ MX6QDL_PAD_ENET_TXD0__GPIO1_IO30 0x80000000 /* PHY Reset */
++ MX6QDL_PAD_GPIO_0__GPIO1_IO00 0x80000000 /* PCIE_RST# */
++ MX6QDL_PAD_KEY_COL0__GPIO4_IO06 0x80000000 /* user1 led */
++ MX6QDL_PAD_KEY_ROW0__GPIO4_IO07 0x80000000 /* user2 led */
++ >;
++ };
++
++ pinctrl_enet: enetgrp {
++ fsl,pins = <
++ MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b0b0
++ MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x1b0b0
++ MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x1b0b0
++ MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b0b0
++ MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b0b0
++ MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b0b0
++ MX6QDL_PAD_RGMII_TXC__RGMII_TXC 0x1b0b0
++ MX6QDL_PAD_RGMII_TD0__RGMII_TD0 0x1b0b0
++ MX6QDL_PAD_RGMII_TD1__RGMII_TD1 0x1b0b0
++ MX6QDL_PAD_RGMII_TD2__RGMII_TD2 0x1b0b0
++ MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x1b0b0
++ MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x1b0b0
++ MX6QDL_PAD_ENET_REF_CLK__ENET_TX_CLK 0x1b0b0
++ MX6QDL_PAD_ENET_MDIO__ENET_MDIO 0x1b0b0
++ MX6QDL_PAD_ENET_MDC__ENET_MDC 0x1b0b0
++ MX6QDL_PAD_GPIO_16__ENET_REF_CLK 0x4001b0a8
++ >;
++ };
++
++ pinctrl_gpmi_nand: gpminandgrp {
++ fsl,pins = <
++ MX6QDL_PAD_NANDF_CLE__NAND_CLE 0xb0b1
++ MX6QDL_PAD_NANDF_ALE__NAND_ALE 0xb0b1
++ MX6QDL_PAD_NANDF_WP_B__NAND_WP_B 0xb0b1
++ MX6QDL_PAD_NANDF_RB0__NAND_READY_B 0xb000
++ MX6QDL_PAD_NANDF_CS0__NAND_CE0_B 0xb0b1
++ MX6QDL_PAD_NANDF_CS1__NAND_CE1_B 0xb0b1
++ MX6QDL_PAD_SD4_CMD__NAND_RE_B 0xb0b1
++ MX6QDL_PAD_SD4_CLK__NAND_WE_B 0xb0b1
++ MX6QDL_PAD_NANDF_D0__NAND_DATA00 0xb0b1
++ MX6QDL_PAD_NANDF_D1__NAND_DATA01 0xb0b1
++ MX6QDL_PAD_NANDF_D2__NAND_DATA02 0xb0b1
++ MX6QDL_PAD_NANDF_D3__NAND_DATA03 0xb0b1
++ MX6QDL_PAD_NANDF_D4__NAND_DATA04 0xb0b1
++ MX6QDL_PAD_NANDF_D5__NAND_DATA05 0xb0b1
++ MX6QDL_PAD_NANDF_D6__NAND_DATA06 0xb0b1
++ MX6QDL_PAD_NANDF_D7__NAND_DATA07 0xb0b1
++ >;
++ };
++
++ pinctrl_i2c1: i2c1grp {
++ fsl,pins = <
++ MX6QDL_PAD_EIM_D21__I2C1_SCL 0x4001b8b1
++ MX6QDL_PAD_EIM_D28__I2C1_SDA 0x4001b8b1
++ >;
++ };
++
++ pinctrl_i2c2: i2c2grp {
++ fsl,pins = <
++ MX6QDL_PAD_KEY_COL3__I2C2_SCL 0x4001b8b1
++ MX6QDL_PAD_KEY_ROW3__I2C2_SDA 0x4001b8b1
++ >;
++ };
++
++ pinctrl_i2c3: i2c3grp {
++ fsl,pins = <
++ MX6QDL_PAD_GPIO_3__I2C3_SCL 0x4001b8b1
++ MX6QDL_PAD_GPIO_6__I2C3_SDA 0x4001b8b1
++ >;
++ };
++
++ pinctrl_uart1: uart1grp {
++ fsl,pins = <
++ MX6QDL_PAD_SD3_DAT7__UART1_TX_DATA 0x1b0b1
++ MX6QDL_PAD_SD3_DAT6__UART1_RX_DATA 0x1b0b1
++ >;
++ };
++
++ pinctrl_uart2: uart2grp {
++ fsl,pins = <
++ MX6QDL_PAD_SD4_DAT7__UART2_TX_DATA 0x1b0b1
++ MX6QDL_PAD_SD4_DAT4__UART2_RX_DATA 0x1b0b1
++ >;
++ };
++
++ pinctrl_uart3: uart3grp {
++ fsl,pins = <
++ MX6QDL_PAD_EIM_D24__UART3_TX_DATA 0x1b0b1
++ MX6QDL_PAD_EIM_D25__UART3_RX_DATA 0x1b0b1
++ >;
++ };
++
++ pinctrl_uart5: uart5grp {
++ fsl,pins = <
++ MX6QDL_PAD_KEY_COL1__UART5_TX_DATA 0x1b0b1
++ MX6QDL_PAD_KEY_ROW1__UART5_RX_DATA 0x1b0b1
++ >;
++ };
++
++ pinctrl_usbotg: usbotggrp {
++ fsl,pins = <
++ MX6QDL_PAD_GPIO_1__USB_OTG_ID 0x17059
++ >;
++ };
++ };
++};
++
++&pcie {
++ reset-gpio = <&gpio1 0 0>;
++ status = "okay";
++};
++
++&uart1 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_uart1>;
++ status = "okay";
++};
++
++&uart2 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_uart2>;
++ status = "okay";
++};
++
++&uart3 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_uart3>;
++ status = "okay";
++};
++
++&uart5 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_uart5>;
++ status = "okay";
++};
++
++&usbotg {
++ vbus-supply = <&reg_usb_otg_vbus>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_usbotg>;
++ disable-over-current;
++ status = "okay";
++};
++
++&usbh1 {
++ status = "okay";
++};
+diff -Nur linux-3.14.36/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi linux-openelec/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi
+--- linux-3.14.36/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi 2015-05-06 12:05:43.000000000 -0500
+@@ -0,0 +1,527 @@
++/*
++ * Copyright 2013 Gateworks Corporation
++ *
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/ {
++ /* these are used by bootloader for disabling nodes */
++ aliases {
++ ethernet0 = &fec;
++ led0 = &led0;
++ led1 = &led1;
++ led2 = &led2;
++ nand = &gpmi;
++ ssi0 = &ssi1;
++ usb0 = &usbh1;
++ usb1 = &usbotg;
++ usdhc2 = &usdhc3;
++ };
++
++ chosen {
++ bootargs = "console=ttymxc1,115200";
++ };
++
++ backlight {
++ compatible = "pwm-backlight";
++ pwms = <&pwm4 0 5000000>;
++ brightness-levels = <0 4 8 16 32 64 128 255>;
++ default-brightness-level = <7>;
++ };
++
++ leds {
++ compatible = "gpio-leds";
++
++ led0: user1 {
++ label = "user1";
++ gpios = <&gpio4 6 0>; /* 102 -> MX6_PANLEDG */
++ default-state = "on";
++ linux,default-trigger = "heartbeat";
++ };
++
++ led1: user2 {
++ label = "user2";
++ gpios = <&gpio4 7 0>; /* 103 -> MX6_PANLEDR */
++ default-state = "off";
++ };
++
++ led2: user3 {
++ label = "user3";
++ gpios = <&gpio4 15 1>; /* 111 - MX6_LOCLED# */
++ default-state = "off";
++ };
++ };
++
++ memory {
++ reg = <0x10000000 0x20000000>;
++ };
++
++ pps {
++ compatible = "pps-gpio";
++ gpios = <&gpio1 26 0>;
++ status = "okay";
++ };
++
++ regulators {
++ compatible = "simple-bus";
++ #address-cells = <1>;
++ #size-cells = <0>;
++
++ reg_1p0v: regulator@0 {
++ compatible = "regulator-fixed";
++ reg = <0>;
++ regulator-name = "1P0V";
++ regulator-min-microvolt = <1000000>;
++ regulator-max-microvolt = <1000000>;
++ regulator-always-on;
++ };
++
++ /* remove this fixed regulator once ltc3676__sw2 driver available */
++ reg_1p8v: regulator@1 {
++ compatible = "regulator-fixed";
++ reg = <1>;
++ regulator-name = "1P8V";
++ regulator-min-microvolt = <1800000>;
++ regulator-max-microvolt = <1800000>;
++ regulator-always-on;
++ };
++
++ reg_3p3v: regulator@2 {
++ compatible = "regulator-fixed";
++ reg = <2>;
++ regulator-name = "3P3V";
++ regulator-min-microvolt = <3300000>;
++ regulator-max-microvolt = <3300000>;
++ regulator-always-on;
++ };
++
++ reg_5p0v: regulator@3 {
++ compatible = "regulator-fixed";
++ reg = <3>;
++ regulator-name = "5P0V";
++ regulator-min-microvolt = <5000000>;
++ regulator-max-microvolt = <5000000>;
++ regulator-always-on;
++ };
++
++ reg_usb_otg_vbus: regulator@4 {
++ compatible = "regulator-fixed";
++ reg = <4>;
++ regulator-name = "usb_otg_vbus";
++ regulator-min-microvolt = <5000000>;
++ regulator-max-microvolt = <5000000>;
++ gpio = <&gpio3 22 0>;
++ enable-active-high;
++ };
++ };
++
++ sound {
++ compatible = "fsl,imx6q-sabrelite-sgtl5000",
++ "fsl,imx-audio-sgtl5000";
++ model = "imx6q-sabrelite-sgtl5000";
++ ssi-controller = <&ssi1>;
++ audio-codec = <&codec>;
++ audio-routing =
++ "MIC_IN", "Mic Jack",
++ "Mic Jack", "Mic Bias",
++ "Headphone Jack", "HP_OUT";
++ mux-int-port = <1>;
++ mux-ext-port = <4>;
++ };
++};
++
++&audmux {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_audmux>;
++ status = "okay";
++};
++
++&fec {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_enet>;
++ phy-mode = "rgmii";
++ phy-reset-gpios = <&gpio1 30 0>;
++ status = "okay";
++};
++
++&gpmi {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_gpmi_nand>;
++ status = "okay";
++};
++
++&i2c1 {
++ clock-frequency = <100000>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_i2c1>;
++ status = "okay";
++
++ eeprom1: eeprom@50 {
++ compatible = "atmel,24c02";
++ reg = <0x50>;
++ pagesize = <16>;
++ };
++
++ eeprom2: eeprom@51 {
++ compatible = "atmel,24c02";
++ reg = <0x51>;
++ pagesize = <16>;
++ };
++
++ eeprom3: eeprom@52 {
++ compatible = "atmel,24c02";
++ reg = <0x52>;
++ pagesize = <16>;
++ };
++
++ eeprom4: eeprom@53 {
++ compatible = "atmel,24c02";
++ reg = <0x53>;
++ pagesize = <16>;
++ };
++
++ gpio: pca9555@23 {
++ compatible = "nxp,pca9555";
++ reg = <0x23>;
++ gpio-controller;
++ #gpio-cells = <2>;
++ };
++
++ hwmon: gsc@29 {
++ compatible = "gw,gsp";
++ reg = <0x29>;
++ };
++
++ rtc: ds1672@68 {
++ compatible = "dallas,ds1672";
++ reg = <0x68>;
++ };
++};
++
++&i2c2 {
++ clock-frequency = <100000>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_i2c2>;
++ status = "okay";
++
++ pciswitch: pex8609@3f {
++ compatible = "plx,pex8609";
++ reg = <0x3f>;
++ };
++
++ pmic: ltc3676@3c {
++ compatible = "ltc,ltc3676";
++ reg = <0x3c>;
++
++ regulators {
++ sw1_reg: ltc3676__sw1 {
++ regulator-min-microvolt = <1175000>;
++ regulator-max-microvolt = <1175000>;
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ sw2_reg: ltc3676__sw2 {
++ regulator-min-microvolt = <1800000>;
++ regulator-max-microvolt = <1800000>;
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ sw3_reg: ltc3676__sw3 {
++ regulator-min-microvolt = <1175000>;
++ regulator-max-microvolt = <1175000>;
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ sw4_reg: ltc3676__sw4 {
++ regulator-min-microvolt = <1500000>;
++ regulator-max-microvolt = <1500000>;
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ ldo2_reg: ltc3676__ldo2 {
++ regulator-min-microvolt = <2500000>;
++ regulator-max-microvolt = <2500000>;
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ ldo3_reg: ltc3676__ldo3 {
++ regulator-min-microvolt = <1800000>;
++ regulator-max-microvolt = <1800000>;
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ ldo4_reg: ltc3676__ldo4 {
++ regulator-min-microvolt = <3000000>;
++ regulator-max-microvolt = <3000000>;
++ };
++ };
++ };
++};
++
++&i2c3 {
++ clock-frequency = <100000>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_i2c3>;
++ status = "okay";
++
++ accelerometer: fxos8700@1e {
++ compatible = "fsl,fxos8700";
++ reg = <0x13>;
++ };
++
++ codec: sgtl5000@0a {
++ compatible = "fsl,sgtl5000";
++ reg = <0x0a>;
++ clocks = <&clks 169>;
++ VDDA-supply = <&reg_1p8v>;
++ VDDIO-supply = <&reg_3p3v>;
++ };
++
++ touchscreen: egalax_ts@04 {
++ compatible = "eeti,egalax_ts";
++ reg = <0x04>;
++ interrupt-parent = <&gpio7>;
++ interrupts = <12 2>; /* gpio7_12 active low */
++ wakeup-gpios = <&gpio7 12 0>;
++ };
++
++ videoin: adv7180@20 {
++ compatible = "adi,adv7180";
++ reg = <0x20>;
++ };
++};
++
++&iomuxc {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_hog>;
++
++ imx6qdl-gw52xx {
++ pinctrl_hog: hoggrp {
++ fsl,pins = <
++ MX6QDL_PAD_EIM_A19__GPIO2_IO19 0x80000000 /* MEZZ_DIO0 */
++ MX6QDL_PAD_EIM_A20__GPIO2_IO18 0x80000000 /* MEZZ_DIO1 */
++ MX6QDL_PAD_EIM_D22__GPIO3_IO22 0x80000000 /* OTG_PWR_EN */
++ MX6QDL_PAD_EIM_D31__GPIO3_IO31 0x80000000 /* VIDDEC_PDN# */
++ MX6QDL_PAD_ENET_TXD0__GPIO1_IO30 0x80000000 /* PHY Reset */
++ MX6QDL_PAD_ENET_TXD1__GPIO1_IO29 0x80000000 /* PCIE_RST# */
++ MX6QDL_PAD_ENET_RXD0__GPIO1_IO27 0x80000000 /* GPS_PWDN */
++ MX6QDL_PAD_ENET_RXD1__GPIO1_IO26 0x80000000 /* GPS_PPS */
++ MX6QDL_PAD_GPIO_0__CCM_CLKO1 0x000130b0 /* AUD4_MCK */
++ MX6QDL_PAD_GPIO_2__GPIO1_IO02 0x80000000 /* USB_SEL_PCI */
++ MX6QDL_PAD_GPIO_17__GPIO7_IO12 0x80000000 /* TOUCH_IRQ# */
++ MX6QDL_PAD_KEY_COL0__GPIO4_IO06 0x80000000 /* user1 led */
++ MX6QDL_PAD_KEY_ROW0__GPIO4_IO07 0x80000000 /* user2 led */
++ MX6QDL_PAD_KEY_ROW4__GPIO4_IO15 0x80000000 /* user3 led */
++ MX6QDL_PAD_SD2_CMD__GPIO1_IO11 0x80000000 /* LVDS_TCH# */
++ MX6QDL_PAD_SD3_DAT5__GPIO7_IO00 0x80000000 /* SD3_CD# */
++ MX6QDL_PAD_SD4_DAT3__GPIO2_IO11 0x80000000 /* UART2_EN# */
++ >;
++ };
++
++ pinctrl_audmux: audmuxgrp {
++ fsl,pins = <
++ MX6QDL_PAD_SD2_DAT0__AUD4_RXD 0x130b0
++ MX6QDL_PAD_SD2_DAT3__AUD4_TXC 0x130b0
++ MX6QDL_PAD_SD2_DAT2__AUD4_TXD 0x110b0
++ MX6QDL_PAD_SD2_DAT1__AUD4_TXFS 0x130b0
++ >;
++ };
++
++ pinctrl_enet: enetgrp {
++ fsl,pins = <
++ MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b0b0
++ MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x1b0b0
++ MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x1b0b0
++ MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b0b0
++ MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b0b0
++ MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b0b0
++ MX6QDL_PAD_RGMII_TXC__RGMII_TXC 0x1b0b0
++ MX6QDL_PAD_RGMII_TD0__RGMII_TD0 0x1b0b0
++ MX6QDL_PAD_RGMII_TD1__RGMII_TD1 0x1b0b0
++ MX6QDL_PAD_RGMII_TD2__RGMII_TD2 0x1b0b0
++ MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x1b0b0
++ MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x1b0b0
++ MX6QDL_PAD_ENET_REF_CLK__ENET_TX_CLK 0x1b0b0
++ MX6QDL_PAD_ENET_MDIO__ENET_MDIO 0x1b0b0
++ MX6QDL_PAD_ENET_MDC__ENET_MDC 0x1b0b0
++ MX6QDL_PAD_GPIO_16__ENET_REF_CLK 0x4001b0a8
++ >;
++ };
++
++ pinctrl_gpmi_nand: gpminandgrp {
++ fsl,pins = <
++ MX6QDL_PAD_NANDF_CLE__NAND_CLE 0xb0b1
++ MX6QDL_PAD_NANDF_ALE__NAND_ALE 0xb0b1
++ MX6QDL_PAD_NANDF_WP_B__NAND_WP_B 0xb0b1
++ MX6QDL_PAD_NANDF_RB0__NAND_READY_B 0xb000
++ MX6QDL_PAD_NANDF_CS0__NAND_CE0_B 0xb0b1
++ MX6QDL_PAD_NANDF_CS1__NAND_CE1_B 0xb0b1
++ MX6QDL_PAD_SD4_CMD__NAND_RE_B 0xb0b1
++ MX6QDL_PAD_SD4_CLK__NAND_WE_B 0xb0b1
++ MX6QDL_PAD_NANDF_D0__NAND_DATA00 0xb0b1
++ MX6QDL_PAD_NANDF_D1__NAND_DATA01 0xb0b1
++ MX6QDL_PAD_NANDF_D2__NAND_DATA02 0xb0b1
++ MX6QDL_PAD_NANDF_D3__NAND_DATA03 0xb0b1
++ MX6QDL_PAD_NANDF_D4__NAND_DATA04 0xb0b1
++ MX6QDL_PAD_NANDF_D5__NAND_DATA05 0xb0b1
++ MX6QDL_PAD_NANDF_D6__NAND_DATA06 0xb0b1
++ MX6QDL_PAD_NANDF_D7__NAND_DATA07 0xb0b1
++ >;
++ };
++
++ pinctrl_i2c1: i2c1grp {
++ fsl,pins = <
++ MX6QDL_PAD_EIM_D21__I2C1_SCL 0x4001b8b1
++ MX6QDL_PAD_EIM_D28__I2C1_SDA 0x4001b8b1
++ >;
++ };
++
++ pinctrl_i2c2: i2c2grp {
++ fsl,pins = <
++ MX6QDL_PAD_KEY_COL3__I2C2_SCL 0x4001b8b1
++ MX6QDL_PAD_KEY_ROW3__I2C2_SDA 0x4001b8b1
++ >;
++ };
++
++ pinctrl_i2c3: i2c3grp {
++ fsl,pins = <
++ MX6QDL_PAD_GPIO_3__I2C3_SCL 0x4001b8b1
++ MX6QDL_PAD_GPIO_6__I2C3_SDA 0x4001b8b1
++ >;
++ };
++
++ pinctrl_pwm4: pwm4grp {
++ fsl,pins = <
++ MX6QDL_PAD_SD1_CMD__PWM4_OUT 0x1b0b1
++ >;
++ };
++
++ pinctrl_uart1: uart1grp {
++ fsl,pins = <
++ MX6QDL_PAD_SD3_DAT7__UART1_TX_DATA 0x1b0b1
++ MX6QDL_PAD_SD3_DAT6__UART1_RX_DATA 0x1b0b1
++ >;
++ };
++
++ pinctrl_uart2: uart2grp {
++ fsl,pins = <
++ MX6QDL_PAD_SD4_DAT7__UART2_TX_DATA 0x1b0b1
++ MX6QDL_PAD_SD4_DAT4__UART2_RX_DATA 0x1b0b1
++ >;
++ };
++
++ pinctrl_uart5: uart5grp {
++ fsl,pins = <
++ MX6QDL_PAD_KEY_COL1__UART5_TX_DATA 0x1b0b1
++ MX6QDL_PAD_KEY_ROW1__UART5_RX_DATA 0x1b0b1
++ >;
++ };
++
++ pinctrl_usbotg: usbotggrp {
++ fsl,pins = <
++ MX6QDL_PAD_GPIO_1__USB_OTG_ID 0x17059
++ >;
++ };
++
++ pinctrl_usdhc3: usdhc3grp {
++ fsl,pins = <
++ MX6QDL_PAD_SD3_CMD__SD3_CMD 0x17059
++ MX6QDL_PAD_SD3_CLK__SD3_CLK 0x10059
++ MX6QDL_PAD_SD3_DAT0__SD3_DATA0 0x17059
++ MX6QDL_PAD_SD3_DAT1__SD3_DATA1 0x17059
++ MX6QDL_PAD_SD3_DAT2__SD3_DATA2 0x17059
++ MX6QDL_PAD_SD3_DAT3__SD3_DATA3 0x17059
++ >;
++ };
++ };
++};
++
++&ldb {
++ status = "okay";
++
++ lvds-channel@0 {
++ fsl,data-mapping = "spwg";
++ fsl,data-width = <18>;
++ status = "okay";
++
++ display-timings {
++ native-mode = <&timing0>;
++ timing0: hsd100pxn1 {
++ clock-frequency = <65000000>;
++ hactive = <1024>;
++ vactive = <768>;
++ hback-porch = <220>;
++ hfront-porch = <40>;
++ vback-porch = <21>;
++ vfront-porch = <7>;
++ hsync-len = <60>;
++ vsync-len = <10>;
++ };
++ };
++ };
++};
++
++&pcie {
++ reset-gpio = <&gpio1 29 0>;
++ status = "okay";
++};
++
++&pwm4 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_pwm4>;
++ status = "okay";
++};
++
++&ssi1 {
++ fsl,mode = "i2s-slave";
++ status = "okay";
++};
++
++&uart1 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_uart1>;
++ status = "okay";
++};
++
++&uart2 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_uart2>;
++ status = "okay";
++};
++
++&uart5 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_uart5>;
++ status = "okay";
++};
++
++&usbotg {
++ vbus-supply = <&reg_usb_otg_vbus>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_usbotg>;
++ disable-over-current;
++ status = "okay";
++};
++
++&usbh1 {
++ status = "okay";
++};
++
++&usdhc3 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_usdhc3>;
++ cd-gpios = <&gpio7 0 0>;
++ vmmc-supply = <&reg_3p3v>;
++ status = "okay";
++};
+diff -Nur linux-3.14.36/arch/arm/boot/dts/imx6qdl-gw53xx.dtsi linux-openelec/arch/arm/boot/dts/imx6qdl-gw53xx.dtsi
+--- linux-3.14.36/arch/arm/boot/dts/imx6qdl-gw53xx.dtsi 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/arch/arm/boot/dts/imx6qdl-gw53xx.dtsi 2015-05-06 12:05:43.000000000 -0500
+@@ -0,0 +1,572 @@
++/*
++ * Copyright 2013 Gateworks Corporation
++ *
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/ {
++ /* these are used by bootloader for disabling nodes */
++ aliases {
++ can0 = &can1;
++ ethernet0 = &fec;
++ ethernet1 = &eth1;
++ led0 = &led0;
++ led1 = &led1;
++ led2 = &led2;
++ nand = &gpmi;
++ sky2 = &eth1;
++ ssi0 = &ssi1;
++ usb0 = &usbh1;
++ usb1 = &usbotg;
++ usdhc2 = &usdhc3;
++ };
++
++ chosen {
++ bootargs = "console=ttymxc1,115200";
++ };
++
++ backlight {
++ compatible = "pwm-backlight";
++ pwms = <&pwm4 0 5000000>;
++ brightness-levels = <0 4 8 16 32 64 128 255>;
++ default-brightness-level = <7>;
++ };
++
++ leds {
++ compatible = "gpio-leds";
++
++ led0: user1 {
++ label = "user1";
++ gpios = <&gpio4 6 0>; /* 102 -> MX6_PANLEDG */
++ default-state = "on";
++ linux,default-trigger = "heartbeat";
++ };
++
++ led1: user2 {
++ label = "user2";
++ gpios = <&gpio4 7 0>; /* 103 -> MX6_PANLEDR */
++ default-state = "off";
++ };
++
++ led2: user3 {
++ label = "user3";
++ gpios = <&gpio4 15 1>; /* 111 -> MX6_LOCLED# */
++ default-state = "off";
++ };
++ };
++
++ memory {
++ reg = <0x10000000 0x40000000>;
++ };
++
++ pps {
++ compatible = "pps-gpio";
++ gpios = <&gpio1 26 0>;
++ status = "okay";
++ };
++
++ regulators {
++ compatible = "simple-bus";
++ #address-cells = <1>;
++ #size-cells = <0>;
++
++ reg_1p0v: regulator@0 {
++ compatible = "regulator-fixed";
++ reg = <0>;
++ regulator-name = "1P0V";
++ regulator-min-microvolt = <1000000>;
++ regulator-max-microvolt = <1000000>;
++ regulator-always-on;
++ };
++
++ /* remove when pmic 1p8 regulator available */
++ reg_1p8v: regulator@1 {
++ compatible = "regulator-fixed";
++ reg = <1>;
++ regulator-name = "1P8V";
++ regulator-min-microvolt = <1800000>;
++ regulator-max-microvolt = <1800000>;
++ regulator-always-on;
++ };
++
++ reg_3p3v: regulator@2 {
++ compatible = "regulator-fixed";
++ reg = <2>;
++ regulator-name = "3P3V";
++ regulator-min-microvolt = <3300000>;
++ regulator-max-microvolt = <3300000>;
++ regulator-always-on;
++ };
++
++ reg_usb_h1_vbus: regulator@3 {
++ compatible = "regulator-fixed";
++ reg = <3>;
++ regulator-name = "usb_h1_vbus";
++ regulator-min-microvolt = <5000000>;
++ regulator-max-microvolt = <5000000>;
++ regulator-always-on;
++ };
++
++ reg_usb_otg_vbus: regulator@4 {
++ compatible = "regulator-fixed";
++ reg = <4>;
++ regulator-name = "usb_otg_vbus";
++ regulator-min-microvolt = <5000000>;
++ regulator-max-microvolt = <5000000>;
++ gpio = <&gpio3 22 0>;
++ enable-active-high;
++ };
++ };
++
++ sound {
++ compatible = "fsl,imx6q-sabrelite-sgtl5000",
++ "fsl,imx-audio-sgtl5000";
++ model = "imx6q-sabrelite-sgtl5000";
++ ssi-controller = <&ssi1>;
++ audio-codec = <&codec>;
++ audio-routing =
++ "MIC_IN", "Mic Jack",
++ "Mic Jack", "Mic Bias",
++ "Headphone Jack", "HP_OUT";
++ mux-int-port = <1>;
++ mux-ext-port = <4>;
++ };
++};
++
++&audmux {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_audmux>;
++ status = "okay";
++};
++
++&can1 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_flexcan1>;
++ status = "okay";
++};
++
++&fec {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_enet>;
++ phy-mode = "rgmii";
++ phy-reset-gpios = <&gpio1 30 0>;
++ status = "okay";
++};
++
++&gpmi {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_gpmi_nand>;
++ status = "okay";
++};
++
++&i2c1 {
++ clock-frequency = <100000>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_i2c1>;
++ status = "okay";
++
++ eeprom1: eeprom@50 {
++ compatible = "atmel,24c02";
++ reg = <0x50>;
++ pagesize = <16>;
++ };
++
++ eeprom2: eeprom@51 {
++ compatible = "atmel,24c02";
++ reg = <0x51>;
++ pagesize = <16>;
++ };
++
++ eeprom3: eeprom@52 {
++ compatible = "atmel,24c02";
++ reg = <0x52>;
++ pagesize = <16>;
++ };
++
++ eeprom4: eeprom@53 {
++ compatible = "atmel,24c02";
++ reg = <0x53>;
++ pagesize = <16>;
++ };
++
++ gpio: pca9555@23 {
++ compatible = "nxp,pca9555";
++ reg = <0x23>;
++ gpio-controller;
++ #gpio-cells = <2>;
++ };
++
++ hwmon: gsc@29 {
++ compatible = "gw,gsp";
++ reg = <0x29>;
++ };
++
++ rtc: ds1672@68 {
++ compatible = "dallas,ds1672";
++ reg = <0x68>;
++ };
++};
++
++&i2c2 {
++ clock-frequency = <100000>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_i2c2>;
++ status = "okay";
++
++ pciclkgen: si53156@6b {
++ compatible = "sil,si53156";
++ reg = <0x6b>;
++ };
++
++ pciswitch: pex8606@3f {
++ compatible = "plx,pex8606";
++ reg = <0x3f>;
++ };
++
++ pmic: ltc3676@3c {
++ compatible = "ltc,ltc3676";
++ reg = <0x3c>;
++
++ regulators {
++ /* VDD_SOC */
++ sw1_reg: ltc3676__sw1 {
++ regulator-min-microvolt = <1175000>;
++ regulator-max-microvolt = <1175000>;
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ /* VDD_1P8 */
++ sw2_reg: ltc3676__sw2 {
++ regulator-min-microvolt = <1800000>;
++ regulator-max-microvolt = <1800000>;
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ /* VDD_ARM */
++ sw3_reg: ltc3676__sw3 {
++ regulator-min-microvolt = <1175000>;
++ regulator-max-microvolt = <1175000>;
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ /* VDD_DDR */
++ sw4_reg: ltc3676__sw4 {
++ regulator-min-microvolt = <1500000>;
++ regulator-max-microvolt = <1500000>;
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ /* VDD_2P5 */
++ ldo2_reg: ltc3676__ldo2 {
++ regulator-min-microvolt = <2500000>;
++ regulator-max-microvolt = <2500000>;
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ /* VDD_1P8 */
++ ldo3_reg: ltc3676__ldo3 {
++ regulator-min-microvolt = <1800000>;
++ regulator-max-microvolt = <1800000>;
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ /* VDD_HIGH */
++ ldo4_reg: ltc3676__ldo4 {
++ regulator-min-microvolt = <3000000>;
++ regulator-max-microvolt = <3000000>;
++ };
++ };
++ };
++};
++
++&i2c3 {
++ clock-frequency = <100000>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_i2c3>;
++ status = "okay";
++
++ accelerometer: fxos8700@1e {
++ compatible = "fsl,fxos8700";
++ reg = <0x1e>;
++ };
++
++ codec: sgtl5000@0a {
++ compatible = "fsl,sgtl5000";
++ reg = <0x0a>;
++ clocks = <&clks 201>;
++ VDDA-supply = <&reg_1p8v>;
++ VDDIO-supply = <&reg_3p3v>;
++ };
++
++ hdmiin: adv7611@4c {
++ compatible = "adi,adv7611";
++ reg = <0x4c>;
++ };
++
++ touchscreen: egalax_ts@04 {
++ compatible = "eeti,egalax_ts";
++ reg = <0x04>;
++ interrupt-parent = <&gpio1>;
++ interrupts = <11 2>; /* gpio1_11 active low */
++ wakeup-gpios = <&gpio1 11 0>;
++ };
++
++ videoout: adv7393@2a {
++ compatible = "adi,adv7393";
++ reg = <0x2a>;
++ };
++
++ videoin: adv7180@20 {
++ compatible = "adi,adv7180";
++ reg = <0x20>;
++ };
++};
++
++&iomuxc {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_hog>;
++
++ imx6qdl-gw53xx {
++ pinctrl_hog: hoggrp {
++ fsl,pins = <
++ MX6QDL_PAD_EIM_A19__GPIO2_IO19 0x80000000 /* PCIE6EXP_DIO0 */
++ MX6QDL_PAD_EIM_A20__GPIO2_IO18 0x80000000 /* PCIE6EXP_DIO1 */
++ MX6QDL_PAD_EIM_D22__GPIO3_IO22 0x80000000 /* OTG_PWR_EN */
++ MX6QDL_PAD_ENET_RXD0__GPIO1_IO27 0x80000000 /* GPS_SHDN */
++ MX6QDL_PAD_ENET_RXD1__GPIO1_IO26 0x80000000 /* GPS_PPS */
++ MX6QDL_PAD_ENET_TX_EN__GPIO1_IO28 0x80000000 /* PCIE IRQ */
++ MX6QDL_PAD_ENET_TXD1__GPIO1_IO29 0x80000000 /* PCIE RST */
++ MX6QDL_PAD_GPIO_0__CCM_CLKO1 0x000130b0 /* AUD4_MCK */
++ MX6QDL_PAD_GPIO_2__GPIO1_IO02 0x80000000 /* CAN_STBY */
++ MX6QDL_PAD_GPIO_8__GPIO1_IO08 0x80000000 /* PMIC_IRQ# */
++ MX6QDL_PAD_GPIO_9__GPIO1_IO09 0x80000000 /* HUB_RST# */
++ MX6QDL_PAD_GPIO_17__GPIO7_IO12 0x80000000 /* PCIE_WDIS# */
++ MX6QDL_PAD_GPIO_19__GPIO4_IO05 0x80000000 /* ACCEL_IRQ# */
++ MX6QDL_PAD_KEY_COL0__GPIO4_IO06 0x80000000 /* user1 led */
++ MX6QDL_PAD_KEY_COL4__GPIO4_IO14 0x80000000 /* USBOTG_OC# */
++ MX6QDL_PAD_KEY_ROW0__GPIO4_IO07 0x80000000 /* user2 led */
++ MX6QDL_PAD_KEY_ROW4__GPIO4_IO15 0x80000000 /* user3 led */
++ MX6QDL_PAD_SD2_CMD__GPIO1_IO11 0x80000000 /* TOUCH_IRQ# */
++ MX6QDL_PAD_SD3_DAT5__GPIO7_IO00 0x80000000 /* SD3_DET# */
++ >;
++ };
++
++ pinctrl_audmux: audmuxgrp {
++ fsl,pins = <
++ MX6QDL_PAD_SD2_DAT0__AUD4_RXD 0x130b0
++ MX6QDL_PAD_SD2_DAT3__AUD4_TXC 0x130b0
++ MX6QDL_PAD_SD2_DAT2__AUD4_TXD 0x110b0
++ MX6QDL_PAD_SD2_DAT1__AUD4_TXFS 0x130b0
++ >;
++ };
++
++ pinctrl_enet: enetgrp {
++ fsl,pins = <
++ MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b0b0
++ MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x1b0b0
++ MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x1b0b0
++ MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b0b0
++ MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b0b0
++ MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b0b0
++ MX6QDL_PAD_RGMII_TXC__RGMII_TXC 0x1b0b0
++ MX6QDL_PAD_RGMII_TD0__RGMII_TD0 0x1b0b0
++ MX6QDL_PAD_RGMII_TD1__RGMII_TD1 0x1b0b0
++ MX6QDL_PAD_RGMII_TD2__RGMII_TD2 0x1b0b0
++ MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x1b0b0
++ MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x1b0b0
++ MX6QDL_PAD_ENET_REF_CLK__ENET_TX_CLK 0x1b0b0
++ MX6QDL_PAD_ENET_MDIO__ENET_MDIO 0x1b0b0
++ MX6QDL_PAD_ENET_MDC__ENET_MDC 0x1b0b0
++ MX6QDL_PAD_GPIO_16__ENET_REF_CLK 0x4001b0a8
++ >;
++ };
++
++ pinctrl_flexcan1: flexcan1grp {
++ fsl,pins = <
++ MX6QDL_PAD_KEY_ROW2__FLEXCAN1_RX 0x80000000
++ MX6QDL_PAD_KEY_COL2__FLEXCAN1_TX 0x80000000
++ >;
++ };
++
++ pinctrl_gpmi_nand: gpminandgrp {
++ fsl,pins = <
++ MX6QDL_PAD_NANDF_CLE__NAND_CLE 0xb0b1
++ MX6QDL_PAD_NANDF_ALE__NAND_ALE 0xb0b1
++ MX6QDL_PAD_NANDF_WP_B__NAND_WP_B 0xb0b1
++ MX6QDL_PAD_NANDF_RB0__NAND_READY_B 0xb000
++ MX6QDL_PAD_NANDF_CS0__NAND_CE0_B 0xb0b1
++ MX6QDL_PAD_NANDF_CS1__NAND_CE1_B 0xb0b1
++ MX6QDL_PAD_SD4_CMD__NAND_RE_B 0xb0b1
++ MX6QDL_PAD_SD4_CLK__NAND_WE_B 0xb0b1
++ MX6QDL_PAD_NANDF_D0__NAND_DATA00 0xb0b1
++ MX6QDL_PAD_NANDF_D1__NAND_DATA01 0xb0b1
++ MX6QDL_PAD_NANDF_D2__NAND_DATA02 0xb0b1
++ MX6QDL_PAD_NANDF_D3__NAND_DATA03 0xb0b1
++ MX6QDL_PAD_NANDF_D4__NAND_DATA04 0xb0b1
++ MX6QDL_PAD_NANDF_D5__NAND_DATA05 0xb0b1
++ MX6QDL_PAD_NANDF_D6__NAND_DATA06 0xb0b1
++ MX6QDL_PAD_NANDF_D7__NAND_DATA07 0xb0b1
++ >;
++ };
++
++ pinctrl_i2c1: i2c1grp {
++ fsl,pins = <
++ MX6QDL_PAD_EIM_D21__I2C1_SCL 0x4001b8b1
++ MX6QDL_PAD_EIM_D28__I2C1_SDA 0x4001b8b1
++ >;
++ };
++
++ pinctrl_i2c2: i2c2grp {
++ fsl,pins = <
++ MX6QDL_PAD_KEY_COL3__I2C2_SCL 0x4001b8b1
++ MX6QDL_PAD_KEY_ROW3__I2C2_SDA 0x4001b8b1
++ >;
++ };
++
++ pinctrl_i2c3: i2c3grp {
++ fsl,pins = <
++ MX6QDL_PAD_GPIO_3__I2C3_SCL 0x4001b8b1
++ MX6QDL_PAD_GPIO_6__I2C3_SDA 0x4001b8b1
++ >;
++ };
++
++ pinctrl_pwm4: pwm4grp {
++ fsl,pins = <
++ MX6QDL_PAD_SD1_CMD__PWM4_OUT 0x1b0b1
++ >;
++ };
++
++ pinctrl_uart1: uart1grp {
++ fsl,pins = <
++ MX6QDL_PAD_SD3_DAT7__UART1_TX_DATA 0x1b0b1
++ MX6QDL_PAD_SD3_DAT6__UART1_RX_DATA 0x1b0b1
++ >;
++ };
++
++ pinctrl_uart2: uart2grp {
++ fsl,pins = <
++ MX6QDL_PAD_SD4_DAT7__UART2_TX_DATA 0x1b0b1
++ MX6QDL_PAD_SD4_DAT4__UART2_RX_DATA 0x1b0b1
++ >;
++ };
++
++ pinctrl_uart5: uart5grp {
++ fsl,pins = <
++ MX6QDL_PAD_KEY_COL1__UART5_TX_DATA 0x1b0b1
++ MX6QDL_PAD_KEY_ROW1__UART5_RX_DATA 0x1b0b1
++ >;
++ };
++
++ pinctrl_usbotg: usbotggrp {
++ fsl,pins = <
++ MX6QDL_PAD_GPIO_1__USB_OTG_ID 0x17059
++ >;
++ };
++
++ pinctrl_usdhc3: usdhc3grp {
++ fsl,pins = <
++ MX6QDL_PAD_SD3_CMD__SD3_CMD 0x17059
++ MX6QDL_PAD_SD3_CLK__SD3_CLK 0x10059
++ MX6QDL_PAD_SD3_DAT0__SD3_DATA0 0x17059
++ MX6QDL_PAD_SD3_DAT1__SD3_DATA1 0x17059
++ MX6QDL_PAD_SD3_DAT2__SD3_DATA2 0x17059
++ MX6QDL_PAD_SD3_DAT3__SD3_DATA3 0x17059
++ >;
++ };
++ };
++};
++
++&ldb {
++ status = "okay";
++
++ lvds-channel@1 {
++ fsl,data-mapping = "spwg";
++ fsl,data-width = <18>;
++ status = "okay";
++
++ display-timings {
++ native-mode = <&timing0>;
++ timing0: hsd100pxn1 {
++ clock-frequency = <65000000>;
++ hactive = <1024>;
++ vactive = <768>;
++ hback-porch = <220>;
++ hfront-porch = <40>;
++ vback-porch = <21>;
++ vfront-porch = <7>;
++ hsync-len = <60>;
++ vsync-len = <10>;
++ };
++ };
++ };
++};
++
++&pcie {
++ reset-gpio = <&gpio1 29 0>;
++ status = "okay";
++
++ eth1: sky2@8 { /* MAC/PHY on bus 8 */
++ compatible = "marvell,sky2";
++ };
++};
++
++&pwm4 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_pwm4>;
++ status = "okay";
++};
++
++&ssi1 {
++ fsl,mode = "i2s-slave";
++ status = "okay";
++};
++
++&uart1 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_uart1>;
++ status = "okay";
++};
++
++&uart2 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_uart2>;
++ status = "okay";
++};
++
++&uart5 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_uart5>;
++ status = "okay";
++};
++
++&usbotg {
++ vbus-supply = <&reg_usb_otg_vbus>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_usbotg>;
++ disable-over-current;
++ status = "okay";
++};
++
++&usbh1 {
++ vbus-supply = <&reg_usb_h1_vbus>;
++ status = "okay";
++};
++
++&usdhc3 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_usdhc3>;
++ cd-gpios = <&gpio7 0 0>;
++ vmmc-supply = <&reg_3p3v>;
++ status = "okay";
++};
+diff -Nur linux-3.14.36/arch/arm/boot/dts/imx6qdl-gw54xx.dtsi linux-openelec/arch/arm/boot/dts/imx6qdl-gw54xx.dtsi
+--- linux-3.14.36/arch/arm/boot/dts/imx6qdl-gw54xx.dtsi 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/arch/arm/boot/dts/imx6qdl-gw54xx.dtsi 2015-05-06 12:05:43.000000000 -0500
+@@ -0,0 +1,599 @@
++/*
++ * Copyright 2013 Gateworks Corporation
++ *
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/ {
++ /* these are used by bootloader for disabling nodes */
++ aliases {
++ can0 = &can1;
++ ethernet0 = &fec;
++ ethernet1 = &eth1;
++ led0 = &led0;
++ led1 = &led1;
++ led2 = &led2;
++ nand = &gpmi;
++ sky2 = &eth1;
++ ssi0 = &ssi1;
++ usb0 = &usbh1;
++ usb1 = &usbotg;
++ usdhc2 = &usdhc3;
++ };
++
++ chosen {
++ bootargs = "console=ttymxc1,115200";
++ };
++
++ backlight {
++ compatible = "pwm-backlight";
++ pwms = <&pwm4 0 5000000>;
++ brightness-levels = <0 4 8 16 32 64 128 255>;
++ default-brightness-level = <7>;
++ };
++
++ leds {
++ compatible = "gpio-leds";
++
++ led0: user1 {
++ label = "user1";
++ gpios = <&gpio4 6 0>; /* 102 -> MX6_PANLEDG */
++ default-state = "on";
++ linux,default-trigger = "heartbeat";
++ };
++
++ led1: user2 {
++ label = "user2";
++ gpios = <&gpio4 7 0>; /* 103 -> MX6_PANLEDR */
++ default-state = "off";
++ };
++
++ led2: user3 {
++ label = "user3";
++ gpios = <&gpio4 15 1>; /* 111 -> MX6_LOCLED# */
++ default-state = "off";
++ };
++ };
++
++ memory {
++ reg = <0x10000000 0x40000000>;
++ };
++
++ pps {
++ compatible = "pps-gpio";
++ gpios = <&gpio1 26 0>;
++ status = "okay";
++ };
++
++ regulators {
++ compatible = "simple-bus";
++ #address-cells = <1>;
++ #size-cells = <0>;
++
++ reg_1p0v: regulator@0 {
++ compatible = "regulator-fixed";
++ reg = <0>;
++ regulator-name = "1P0V";
++ regulator-min-microvolt = <1000000>;
++ regulator-max-microvolt = <1000000>;
++ regulator-always-on;
++ };
++
++ reg_3p3v: regulator@1 {
++ compatible = "regulator-fixed";
++ reg = <1>;
++ regulator-name = "3P3V";
++ regulator-min-microvolt = <3300000>;
++ regulator-max-microvolt = <3300000>;
++ regulator-always-on;
++ };
++
++ reg_usb_h1_vbus: regulator@2 {
++ compatible = "regulator-fixed";
++ reg = <2>;
++ regulator-name = "usb_h1_vbus";
++ regulator-min-microvolt = <5000000>;
++ regulator-max-microvolt = <5000000>;
++ regulator-always-on;
++ };
++
++ reg_usb_otg_vbus: regulator@3 {
++ compatible = "regulator-fixed";
++ reg = <3>;
++ regulator-name = "usb_otg_vbus";
++ regulator-min-microvolt = <5000000>;
++ regulator-max-microvolt = <5000000>;
++ gpio = <&gpio3 22 0>;
++ enable-active-high;
++ };
++ };
++
++ sound {
++ compatible = "fsl,imx6q-sabrelite-sgtl5000",
++ "fsl,imx-audio-sgtl5000";
++ model = "imx6q-sabrelite-sgtl5000";
++ ssi-controller = <&ssi1>;
++ audio-codec = <&codec>;
++ audio-routing =
++ "MIC_IN", "Mic Jack",
++ "Mic Jack", "Mic Bias",
++ "Headphone Jack", "HP_OUT";
++ mux-int-port = <1>;
++ mux-ext-port = <4>;
++ };
++};
++
++&audmux {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_audmux>; /* AUD4<->sgtl5000 */
++ status = "okay";
++};
++
++&can1 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_flexcan1>;
++ status = "okay";
++};
++
++&fec {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_enet>;
++ phy-mode = "rgmii";
++ phy-reset-gpios = <&gpio1 30 0>;
++ status = "okay";
++};
++
++&gpmi {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_gpmi_nand>;
++ status = "okay";
++};
++
++&i2c1 {
++ clock-frequency = <100000>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_i2c1>;
++ status = "okay";
++
++ eeprom1: eeprom@50 {
++ compatible = "atmel,24c02";
++ reg = <0x50>;
++ pagesize = <16>;
++ };
++
++ eeprom2: eeprom@51 {
++ compatible = "atmel,24c02";
++ reg = <0x51>;
++ pagesize = <16>;
++ };
++
++ eeprom3: eeprom@52 {
++ compatible = "atmel,24c02";
++ reg = <0x52>;
++ pagesize = <16>;
++ };
++
++ eeprom4: eeprom@53 {
++ compatible = "atmel,24c02";
++ reg = <0x53>;
++ pagesize = <16>;
++ };
++
++ gpio: pca9555@23 {
++ compatible = "nxp,pca9555";
++ reg = <0x23>;
++ gpio-controller;
++ #gpio-cells = <2>;
++ };
++
++ hwmon: gsc@29 {
++ compatible = "gw,gsp";
++ reg = <0x29>;
++ };
++
++ rtc: ds1672@68 {
++ compatible = "dallas,ds1672";
++ reg = <0x68>;
++ };
++};
++
++&i2c2 {
++ clock-frequency = <100000>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_i2c2>;
++ status = "okay";
++
++ pmic: pfuze100@08 {
++ compatible = "fsl,pfuze100";
++ reg = <0x08>;
++
++ regulators {
++ sw1a_reg: sw1ab {
++ regulator-min-microvolt = <300000>;
++ regulator-max-microvolt = <1875000>;
++ regulator-boot-on;
++ regulator-always-on;
++ regulator-ramp-delay = <6250>;
++ };
++
++ sw1c_reg: sw1c {
++ regulator-min-microvolt = <300000>;
++ regulator-max-microvolt = <1875000>;
++ regulator-boot-on;
++ regulator-always-on;
++ regulator-ramp-delay = <6250>;
++ };
++
++ sw2_reg: sw2 {
++ regulator-min-microvolt = <800000>;
++ regulator-max-microvolt = <3950000>;
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ sw3a_reg: sw3a {
++ regulator-min-microvolt = <400000>;
++ regulator-max-microvolt = <1975000>;
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ sw3b_reg: sw3b {
++ regulator-min-microvolt = <400000>;
++ regulator-max-microvolt = <1975000>;
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ sw4_reg: sw4 {
++ regulator-min-microvolt = <800000>;
++ regulator-max-microvolt = <3300000>;
++ };
++
++ swbst_reg: swbst {
++ regulator-min-microvolt = <5000000>;
++ regulator-max-microvolt = <5150000>;
++ };
++
++ snvs_reg: vsnvs {
++ regulator-min-microvolt = <1000000>;
++ regulator-max-microvolt = <3000000>;
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ vref_reg: vrefddr {
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ vgen1_reg: vgen1 {
++ regulator-min-microvolt = <800000>;
++ regulator-max-microvolt = <1550000>;
++ };
++
++ vgen2_reg: vgen2 {
++ regulator-min-microvolt = <800000>;
++ regulator-max-microvolt = <1550000>;
++ };
++
++ vgen3_reg: vgen3 {
++ regulator-min-microvolt = <1800000>;
++ regulator-max-microvolt = <3300000>;
++ };
++
++ vgen4_reg: vgen4 {
++ regulator-min-microvolt = <1800000>;
++ regulator-max-microvolt = <3300000>;
++ regulator-always-on;
++ };
++
++ vgen5_reg: vgen5 {
++ regulator-min-microvolt = <1800000>;
++ regulator-max-microvolt = <3300000>;
++ regulator-always-on;
++ };
++
++ vgen6_reg: vgen6 {
++ regulator-min-microvolt = <1800000>;
++ regulator-max-microvolt = <3300000>;
++ regulator-always-on;
++ };
++ };
++ };
++
++ pciswitch: pex8609@3f {
++ compatible = "plx,pex8609";
++ reg = <0x3f>;
++ };
++
++ pciclkgen: si52147@6b {
++ compatible = "sil,si52147";
++ reg = <0x6b>;
++ };
++};
++
++&i2c3 {
++ clock-frequency = <100000>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_i2c3>;
++ status = "okay";
++
++ accelerometer: fxos8700@1e {
++ compatible = "fsl,fxos8700";
++ reg = <0x1e>;
++ };
++
++ codec: sgtl5000@0a {
++ compatible = "fsl,sgtl5000";
++ reg = <0x0a>;
++ clocks = <&clks 201>;
++ VDDA-supply = <&sw4_reg>;
++ VDDIO-supply = <&reg_3p3v>;
++ };
++
++ hdmiin: adv7611@4c {
++ compatible = "adi,adv7611";
++ reg = <0x4c>;
++ };
++
++ touchscreen: egalax_ts@04 {
++ compatible = "eeti,egalax_ts";
++ reg = <0x04>;
++ interrupt-parent = <&gpio7>;
++ interrupts = <12 2>; /* gpio7_12 active low */
++ wakeup-gpios = <&gpio7 12 0>;
++ };
++
++ videoout: adv7393@2a {
++ compatible = "adi,adv7393";
++ reg = <0x2a>;
++ };
++
++ videoin: adv7180@20 {
++ compatible = "adi,adv7180";
++ reg = <0x20>;
++ };
++};
++
++&iomuxc {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_hog>;
++
++ imx6qdl-gw54xx {
++ pinctrl_hog: hoggrp {
++ fsl,pins = <
++ MX6QDL_PAD_EIM_D22__GPIO3_IO22 0x80000000 /* OTG_PWR_EN */
++ MX6QDL_PAD_EIM_D19__GPIO3_IO19 0x80000000 /* SPINOR_CS0# */
++ MX6QDL_PAD_ENET_RXD1__GPIO1_IO26 0x80000000 /* GPS_PPS */
++ MX6QDL_PAD_ENET_TX_EN__GPIO1_IO28 0x80000000 /* PCIE IRQ */
++ MX6QDL_PAD_ENET_TXD1__GPIO1_IO29 0x80000000 /* PCIE RST */
++ MX6QDL_PAD_GPIO_0__CCM_CLKO1 0x000130b0 /* AUD4_MCK */
++ MX6QDL_PAD_GPIO_2__GPIO1_IO02 0x80000000 /* CAN_STBY */
++ MX6QDL_PAD_GPIO_17__GPIO7_IO12 0x80000000 /* TOUCH_IRQ# */
++ MX6QDL_PAD_KEY_COL0__GPIO4_IO06 0x80000000 /* user1 led */
++ MX6QDL_PAD_KEY_ROW0__GPIO4_IO07 0x80000000 /* user2 led */
++ MX6QDL_PAD_KEY_ROW4__GPIO4_IO15 0x80000000 /* user3 led */
++ MX6QDL_PAD_SD1_DAT0__GPIO1_IO16 0x80000000 /* USBHUB_RST# */
++ MX6QDL_PAD_SD1_DAT3__GPIO1_IO21 0x80000000 /* MIPI_DIO */
++ >;
++ };
++
++ pinctrl_audmux: audmuxgrp {
++ fsl,pins = <
++ MX6QDL_PAD_SD2_DAT0__AUD4_RXD 0x130b0
++ MX6QDL_PAD_SD2_DAT3__AUD4_TXC 0x130b0
++ MX6QDL_PAD_SD2_DAT2__AUD4_TXD 0x110b0
++ MX6QDL_PAD_SD2_DAT1__AUD4_TXFS 0x130b0
++ >;
++ };
++
++ pinctrl_enet: enetgrp {
++ fsl,pins = <
++ MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b0b0
++ MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x1b0b0
++ MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x1b0b0
++ MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b0b0
++ MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b0b0
++ MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b0b0
++ MX6QDL_PAD_RGMII_TXC__RGMII_TXC 0x1b0b0
++ MX6QDL_PAD_RGMII_TD0__RGMII_TD0 0x1b0b0
++ MX6QDL_PAD_RGMII_TD1__RGMII_TD1 0x1b0b0
++ MX6QDL_PAD_RGMII_TD2__RGMII_TD2 0x1b0b0
++ MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x1b0b0
++ MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x1b0b0
++ MX6QDL_PAD_ENET_REF_CLK__ENET_TX_CLK 0x1b0b0
++ MX6QDL_PAD_ENET_MDIO__ENET_MDIO 0x1b0b0
++ MX6QDL_PAD_ENET_MDC__ENET_MDC 0x1b0b0
++ MX6QDL_PAD_GPIO_16__ENET_REF_CLK 0x4001b0a8
++ >;
++ };
++
++ pinctrl_flexcan1: flexcan1grp {
++ fsl,pins = <
++ MX6QDL_PAD_KEY_ROW2__FLEXCAN1_RX 0x80000000
++ MX6QDL_PAD_KEY_COL2__FLEXCAN1_TX 0x80000000
++ >;
++ };
++
++ pinctrl_gpmi_nand: gpminandgrp {
++ fsl,pins = <
++ MX6QDL_PAD_NANDF_CLE__NAND_CLE 0xb0b1
++ MX6QDL_PAD_NANDF_ALE__NAND_ALE 0xb0b1
++ MX6QDL_PAD_NANDF_WP_B__NAND_WP_B 0xb0b1
++ MX6QDL_PAD_NANDF_RB0__NAND_READY_B 0xb000
++ MX6QDL_PAD_NANDF_CS0__NAND_CE0_B 0xb0b1
++ MX6QDL_PAD_NANDF_CS1__NAND_CE1_B 0xb0b1
++ MX6QDL_PAD_SD4_CMD__NAND_RE_B 0xb0b1
++ MX6QDL_PAD_SD4_CLK__NAND_WE_B 0xb0b1
++ MX6QDL_PAD_NANDF_D0__NAND_DATA00 0xb0b1
++ MX6QDL_PAD_NANDF_D1__NAND_DATA01 0xb0b1
++ MX6QDL_PAD_NANDF_D2__NAND_DATA02 0xb0b1
++ MX6QDL_PAD_NANDF_D3__NAND_DATA03 0xb0b1
++ MX6QDL_PAD_NANDF_D4__NAND_DATA04 0xb0b1
++ MX6QDL_PAD_NANDF_D5__NAND_DATA05 0xb0b1
++ MX6QDL_PAD_NANDF_D6__NAND_DATA06 0xb0b1
++ MX6QDL_PAD_NANDF_D7__NAND_DATA07 0xb0b1
++ >;
++ };
++
++ pinctrl_i2c1: i2c1grp {
++ fsl,pins = <
++ MX6QDL_PAD_EIM_D21__I2C1_SCL 0x4001b8b1
++ MX6QDL_PAD_EIM_D28__I2C1_SDA 0x4001b8b1
++ >;
++ };
++
++ pinctrl_i2c2: i2c2grp {
++ fsl,pins = <
++ MX6QDL_PAD_KEY_COL3__I2C2_SCL 0x4001b8b1
++ MX6QDL_PAD_KEY_ROW3__I2C2_SDA 0x4001b8b1
++ >;
++ };
++
++ pinctrl_i2c3: i2c3grp {
++ fsl,pins = <
++ MX6QDL_PAD_GPIO_3__I2C3_SCL 0x4001b8b1
++ MX6QDL_PAD_GPIO_6__I2C3_SDA 0x4001b8b1
++ >;
++ };
++
++ pinctrl_pwm4: pwm4grp {
++ fsl,pins = <
++ MX6QDL_PAD_SD1_CMD__PWM4_OUT 0x1b0b1
++ >;
++ };
++
++ pinctrl_uart1: uart1grp {
++ fsl,pins = <
++ MX6QDL_PAD_SD3_DAT7__UART1_TX_DATA 0x1b0b1
++ MX6QDL_PAD_SD3_DAT6__UART1_RX_DATA 0x1b0b1
++ >;
++ };
++
++ pinctrl_uart2: uart2grp {
++ fsl,pins = <
++ MX6QDL_PAD_SD4_DAT7__UART2_TX_DATA 0x1b0b1
++ MX6QDL_PAD_SD4_DAT4__UART2_RX_DATA 0x1b0b1
++ >;
++ };
++
++ pinctrl_uart5: uart5grp {
++ fsl,pins = <
++ MX6QDL_PAD_KEY_COL1__UART5_TX_DATA 0x1b0b1
++ MX6QDL_PAD_KEY_ROW1__UART5_RX_DATA 0x1b0b1
++ >;
++ };
++
++ pinctrl_usbotg: usbotggrp {
++ fsl,pins = <
++ MX6QDL_PAD_GPIO_1__USB_OTG_ID 0x17059
++ >;
++ };
++
++ pinctrl_usdhc3: usdhc3grp {
++ fsl,pins = <
++ MX6QDL_PAD_SD3_CMD__SD3_CMD 0x17059
++ MX6QDL_PAD_SD3_CLK__SD3_CLK 0x10059
++ MX6QDL_PAD_SD3_DAT0__SD3_DATA0 0x17059
++ MX6QDL_PAD_SD3_DAT1__SD3_DATA1 0x17059
++ MX6QDL_PAD_SD3_DAT2__SD3_DATA2 0x17059
++ MX6QDL_PAD_SD3_DAT3__SD3_DATA3 0x17059
++ >;
++ };
++ };
++};
++
++&ldb {
++ status = "okay";
++
++ lvds-channel@1 {
++ fsl,data-mapping = "spwg";
++ fsl,data-width = <18>;
++ status = "okay";
++
++ display-timings {
++ native-mode = <&timing0>;
++ timing0: hsd100pxn1 {
++ clock-frequency = <65000000>;
++ hactive = <1024>;
++ vactive = <768>;
++ hback-porch = <220>;
++ hfront-porch = <40>;
++ vback-porch = <21>;
++ vfront-porch = <7>;
++ hsync-len = <60>;
++ vsync-len = <10>;
++ };
++ };
++ };
++};
++
++&pcie {
++ reset-gpio = <&gpio1 29 0>;
++ status = "okay";
++
++ eth1: sky2@8 { /* MAC/PHY on bus 8 */
++ compatible = "marvell,sky2";
++ };
++};
++
++&pwm4 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_pwm4>;
++ status = "okay";
++};
++
++&ssi1 {
++ fsl,mode = "i2s-slave";
++ status = "okay";
++};
++
++&ssi2 {
++ fsl,mode = "i2s-slave";
++ status = "okay";
++};
++
++&uart1 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_uart1>;
++ status = "okay";
++};
++
++&uart2 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_uart2>;
++ status = "okay";
++};
++
++&uart5 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_uart5>;
++ status = "okay";
++};
++
++&usbotg {
++ vbus-supply = <&reg_usb_otg_vbus>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_usbotg>;
++ disable-over-current;
++ status = "okay";
++};
++
++&usbh1 {
++ vbus-supply = <&reg_usb_h1_vbus>;
++ status = "okay";
++};
++
++&usdhc3 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_usdhc3>;
++ cd-gpios = <&gpio7 0 0>;
++ vmmc-supply = <&reg_3p3v>;
++ status = "okay";
++};
+diff -Nur linux-3.14.36/arch/arm/boot/dts/imx6qdl-hummingboard.dtsi linux-openelec/arch/arm/boot/dts/imx6qdl-hummingboard.dtsi
+--- linux-3.14.36/arch/arm/boot/dts/imx6qdl-hummingboard.dtsi 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/arch/arm/boot/dts/imx6qdl-hummingboard.dtsi 2015-05-06 12:05:43.000000000 -0500
+@@ -0,0 +1,367 @@
++/*
++ * Copyright (C) 2013,2014 Russell King
++ */
++#include "imx6qdl-microsom.dtsi"
++#include "imx6qdl-microsom-ar8035.dtsi"
++
++/ {
++ chosen {
++ bootargs = "quiet console=ttymxc0,115200 root=/dev/mmcblk0p2 rw";
++ };
++
++ aliases {
++ mxcfb0 = &mxcfb1;
++ };
++
++ ir_recv: ir-receiver {
++ compatible = "gpio-ir-receiver";
++ gpios = <&gpio3 5 1>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_hummingboard_gpio3_5>;
++ linux,rc-map-name = "rc-rc6-mce";
++ };
++
++ regulators {
++ compatible = "simple-bus";
++
++ reg_3p3v: 3p3v {
++ compatible = "regulator-fixed";
++ regulator-name = "3P3V";
++ regulator-min-microvolt = <3300000>;
++ regulator-max-microvolt = <3300000>;
++ regulator-always-on;
++ };
++
++ reg_usbh1_vbus: usb-h1-vbus {
++ compatible = "regulator-fixed";
++ enable-active-high;
++ gpio = <&gpio1 0 0>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_hummingboard_usbh1_vbus>;
++ regulator-name = "usb_h1_vbus";
++ regulator-min-microvolt = <5000000>;
++ regulator-max-microvolt = <5000000>;
++ };
++
++ reg_usbotg_vbus: usb-otg-vbus {
++ compatible = "regulator-fixed";
++ enable-active-high;
++ gpio = <&gpio3 22 0>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_hummingboard_usbotg_vbus>;
++ regulator-name = "usb_otg_vbus";
++ regulator-min-microvolt = <5000000>;
++ regulator-max-microvolt = <5000000>;
++ };
++ };
++
++ sound-sgtl5000 {
++ audio-codec = <&sgtl5000>;
++ audio-routing =
++ "MIC_IN", "Mic Jack",
++ "Mic Jack", "Mic Bias",
++ "Headphone Jack", "HP_OUT";
++ compatible = "fsl,imx-audio-sgtl5000";
++ model = "On-board Codec";
++ mux-ext-port = <5>;
++ mux-int-port = <1>;
++ ssi-controller = <&ssi1>;
++ };
++
++ sound-spdif {
++ compatible = "fsl,imx-audio-spdif";
++ model = "imx-spdif";
++ spdif-controller = <&spdif>;
++ spdif-out;
++ };
++
++ sound-hdmi {
++ compatible = "fsl,imx6q-audio-hdmi",
++ "fsl,imx-audio-hdmi";
++ model = "imx-audio-hdmi";
++ hdmi-controller = <&hdmi_audio>;
++ };
++
++ mxcfb1: fb@0 {
++ compatible = "fsl,mxc_sdc_fb";
++ disp_dev = "hdmi";
++ interface_pix_fmt = "RGB24";
++ mode_str ="1920x1080M@60";
++ default_bpp = <32>;
++ int_clk = <0>;
++ late_init = <0>;
++ status = "okay";
++ };
++};
++
++&hdmi_core {
++ ipu_id = <0>;
++ disp_id = <0>;
++ status = "okay";
++};
++
++&hdmi_video {
++ fsl,phy_reg_vlev = <0x0294>;
++ fsl,phy_reg_cksymtx = <0x800d>;
++ status = "okay";
++};
++
++&hdmi_audio {
++ status = "okay";
++};
++
++&hdmi_cec {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_hummingboard_hdmi>;
++ status = "okay";
++};
++
++&i2c2 {
++ clock-frequency = <100000>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_hummingboard_i2c2>;
++ status = "okay";
++
++ ddc: imx6_hdmi_i2c@50 {
++ compatible = "fsl,imx6-hdmi-i2c";
++ reg = <0x50>;
++ };
++};
++
++&audmux {
++ status = "okay";
++};
++
++&can1 {
++ pinctrl-names = "default";
++ status = "okay";
++};
++
++&i2c1 {
++ clock-frequency = <100000>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_hummingboard_i2c1>;
++ status = "okay";
++
++ /* Pro model */
++ rtc: pcf8523@68 {
++ compatible = "nxp,pcf8523";
++ reg = <0x68>;
++ };
++
++ /* Pro model */
++ sgtl5000: sgtl5000@0a {
++ clocks = <&clks 201>;
++ compatible = "fsl,sgtl5000";
++ pinctrl-0 = <&pinctrl_hummingboard_sgtl5000>;
++ pinctrl-names = "default";
++ reg = <0x0a>;
++ VDDA-supply = <&reg_3p3v>;
++ VDDIO-supply = <&reg_3p3v>;
++ };
++};
++
++&iomuxc {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_hog>;
++ hummingboard {
++ pinctrl_hog: hoggrp {
++ fsl,pins = <
++ /*
++ * 26 pin header GPIO description. The pins
++ * numbering as following -
++ * GPIO number | GPIO (bank,num) | PIN number
++ * ------------+-----------------+------------
++ * gpio1 | (1,1) | IO7
++ * gpio73 | (3,9) | IO11
++ * gpio72 | (3,8) | IO12
++ * gpio71 | (3,7) | IO13
++ * gpio70 | (3,6) | IO15
++ * gpio194 | (7,2) | IO16
++ * gpio195 | (7,3) | IO18
++ * gpio67 | (3,3) | IO22
++ *
++ * Notice the gpioX and GPIO (Y,Z) mapping forumla :
++ * X = (Y-1) * 32 + Z
++ */
++ MX6QDL_PAD_GPIO_1__GPIO1_IO01 0x400130b1
++ MX6QDL_PAD_EIM_DA9__GPIO3_IO09 0x400130b1
++ MX6QDL_PAD_EIM_DA8__GPIO3_IO08 0x400130b1
++ MX6QDL_PAD_EIM_DA7__GPIO3_IO07 0x400130b1
++ MX6QDL_PAD_EIM_DA6__GPIO3_IO06 0x400130b1
++ MX6QDL_PAD_SD3_CMD__GPIO7_IO02 0x400130b1
++ MX6QDL_PAD_SD3_CLK__GPIO7_IO03 0x400130b1
++ MX6QDL_PAD_EIM_DA3__GPIO3_IO03 0x400130b1
++ >;
++ };
++
++ pinctrl_hummingboard_gpio3_5: hummingboard-gpio3_5 {
++ fsl,pins = <
++ MX6QDL_PAD_EIM_DA5__GPIO3_IO05 0x80000000
++ >;
++ };
++
++ pinctrl_hummingboard_hdmi: hummingboard-hdmi {
++ fsl,pins = <
++ MX6QDL_PAD_KEY_ROW2__HDMI_TX_CEC_LINE 0x1f8b0
++ >;
++ };
++
++ pinctrl_hummingboard_i2c1: hummingboard-i2c1 {
++ fsl,pins = <
++ MX6QDL_PAD_EIM_D21__I2C1_SCL 0x4001b8b1
++ MX6QDL_PAD_EIM_D28__I2C1_SDA 0x4001b8b1
++ >;
++ };
++
++ pinctrl_hummingboard_i2c2: hummingboard-i2c2 {
++ fsl,pins = <
++ MX6QDL_PAD_KEY_COL3__I2C2_SCL 0x4001b8b1
++ MX6QDL_PAD_KEY_ROW3__I2C2_SDA 0x4001b8b1
++ >;
++ };
++
++ pinctrl_hummingboard_sgtl5000: hummingboard-sgtl5000 {
++ fsl,pins = <
++ MX6QDL_PAD_DISP0_DAT19__AUD5_RXD 0x130b0 /*brk*/
++ MX6QDL_PAD_KEY_COL0__AUD5_TXC 0x130b0 /*ok*/
++ MX6QDL_PAD_KEY_ROW0__AUD5_TXD 0x110b0 /*brk*/
++ MX6QDL_PAD_KEY_COL1__AUD5_TXFS 0x130b0 /*ok*/
++ MX6QDL_PAD_GPIO_5__CCM_CLKO1 0x130b0
++ >;
++ };
++
++ pinctrl_hummingboard_spdif: hummingboard-spdif {
++ fsl,pins = <MX6QDL_PAD_GPIO_17__SPDIF_OUT 0x13091>;
++ };
++
++ pinctrl_hummingboard_usbh1_vbus: hummingboard-usbh1-vbus {
++ fsl,pins = <MX6QDL_PAD_GPIO_0__GPIO1_IO00 0x1b0b0>;
++ };
++
++ pinctrl_hummingboard_usbotg_id: hummingboard-usbotg-id {
++ /*
++ * Similar to pinctrl_usbotg_2, but we want it
++ * pulled down for a fixed host connection.
++ */
++ fsl,pins = <MX6QDL_PAD_ENET_RX_ER__USB_OTG_ID 0x13059>;
++ };
++
++ pinctrl_hummingboard_usbotg_vbus: hummingboard-usbotg-vbus {
++ fsl,pins = <MX6QDL_PAD_EIM_D22__GPIO3_IO22 0x1b0b0>;
++ };
++
++ pinctrl_hummingboard_usdhc2_aux: hummingboard-usdhc2-aux {
++ fsl,pins = <
++ MX6QDL_PAD_GPIO_4__GPIO1_IO04 0x1f071
++ >;
++ };
++
++ pinctrl_hummingboard_usdhc2: hummingboard-usdhc2 {
++ fsl,pins = <
++ MX6QDL_PAD_SD2_CMD__SD2_CMD 0x17059
++ MX6QDL_PAD_SD2_CLK__SD2_CLK 0x10059
++ MX6QDL_PAD_SD2_DAT0__SD2_DATA0 0x17059
++ MX6QDL_PAD_SD2_DAT1__SD2_DATA1 0x17059
++ MX6QDL_PAD_SD2_DAT2__SD2_DATA2 0x17059
++ MX6QDL_PAD_SD2_DAT3__SD2_DATA3 0x13059
++ >;
++ };
++
++ pinctrl_hummingboard_pcie_reset: hummingboard-pcie-reset {
++ fsl,pins = <
++ MX6QDL_PAD_EIM_DA4__GPIO3_IO04 0x80000000
++ >;
++ };
++
++ pinctrl_pwm1: pwm1grp {
++ fsl,pins = <
++ MX6QDL_PAD_DISP0_DAT8__PWM1_OUT 0x1b0b1
++ >;
++ };
++
++ };
++};
++
++&spdif {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_hummingboard_spdif>;
++ clocks = <&clks 197>, <&clks 0>,
++ <&clks 197>, <&clks 0>,
++ <&clks 0>, <&clks 0>,
++ <&clks 0>, <&clks 0>,
++ <&clks 0>;
++ clock-names = "core", "rxtx0",
++ "rxtx1", "rxtx2",
++ "rxtx3", "rxtx4",
++ "rxtx5", "rxtx6",
++ "rxtx7";
++ status = "okay";
++};
++
++&ssi1 {
++ fsl,mode = "i2s-slave";
++ status = "okay";
++};
++
++&usbh1 {
++ disable-over-current;
++ vbus-supply = <&reg_usbh1_vbus>;
++ status = "okay";
++};
++
++&usbotg {
++ disable-over-current;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_hummingboard_usbotg_id>;
++ vbus-supply = <&reg_usbotg_vbus>;
++ status = "okay";
++};
++
++&usdhc2 {
++ pinctrl-names = "default";
++ pinctrl-0 = <
++ &pinctrl_hummingboard_usdhc2_aux
++ &pinctrl_hummingboard_usdhc2
++ >;
++ vmmc-supply = <&reg_3p3v>;
++ cd-gpios = <&gpio1 4 0>;
++ status = "okay";
++};
++
++&gpc {
++ fsl,cpu_pupscr_sw2iso = <0xf>;
++ fsl,cpu_pupscr_sw = <0xf>;
++ fsl,cpu_pdnscr_iso2sw = <0x1>;
++ fsl,cpu_pdnscr_iso = <0x1>;
++};
++
++&pcie {
++ pinctrl-names = "default";
++ pinctrl-0 = <
++ &pinctrl_hummingboard_pcie_reset
++ >;
++ reset-gpio = <&gpio3 4 0>;
++ status = "okay";
++ no-msi;
++};
++
++&pwm1 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_pwm1>;
++ status = "okay";
++};
++
++&pwm2 {
++ pinctrl-names = "default";
++ status = "okay";
++};
++
++&pwm3 {
++ status = "disabled";
++};
++
++&pwm4 {
++ status = "disabled";
++};
++
+diff -Nur linux-3.14.36/arch/arm/boot/dts/imx6qdl-microsom-ar8035.dtsi linux-openelec/arch/arm/boot/dts/imx6qdl-microsom-ar8035.dtsi
+--- linux-3.14.36/arch/arm/boot/dts/imx6qdl-microsom-ar8035.dtsi 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/boot/dts/imx6qdl-microsom-ar8035.dtsi 2015-05-06 12:05:43.000000000 -0500
+@@ -17,7 +17,7 @@
+ enet {
+ pinctrl_microsom_enet_ar8035: microsom-enet-ar8035 {
+ fsl,pins = <
+- MX6QDL_PAD_ENET_MDIO__ENET_MDIO 0x1b0b0
++ MX6QDL_PAD_ENET_MDIO__ENET_MDIO 0x1b8b0
+ MX6QDL_PAD_ENET_MDC__ENET_MDC 0x1b0b0
+ /* AR8035 reset */
+ MX6QDL_PAD_KEY_ROW4__GPIO4_IO15 0x130b0
+@@ -26,25 +26,25 @@
+ /* GPIO16 -> AR8035 25MHz */
+ MX6QDL_PAD_GPIO_16__ENET_REF_CLK 0xc0000000
+ MX6QDL_PAD_RGMII_TXC__RGMII_TXC 0x80000000
+- MX6QDL_PAD_RGMII_TD0__RGMII_TD0 0x1b0b0
+- MX6QDL_PAD_RGMII_TD1__RGMII_TD1 0x1b0b0
+- MX6QDL_PAD_RGMII_TD2__RGMII_TD2 0x1b0b0
+- MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x1b0b0
+- MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x1b0b0
++ MX6QDL_PAD_RGMII_TD0__RGMII_TD0 0x1b030
++ MX6QDL_PAD_RGMII_TD1__RGMII_TD1 0x1b030
++ MX6QDL_PAD_RGMII_TD2__RGMII_TD2 0x1b030
++ MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x1b030
++ MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x1b030
+ /* AR8035 CLK_25M --> ENET_REF_CLK (V22) */
+ MX6QDL_PAD_ENET_REF_CLK__ENET_TX_CLK 0x0a0b1
+ /* AR8035 pin strapping: IO voltage: pull up */
+- MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b0b0
++ MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b030
+ /* AR8035 pin strapping: PHYADDR#0: pull down */
+- MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x130b0
++ MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x13030
+ /* AR8035 pin strapping: PHYADDR#1: pull down */
+- MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x130b0
++ MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x13030
+ /* AR8035 pin strapping: MODE#1: pull up */
+- MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b0b0
++ MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b030
+ /* AR8035 pin strapping: MODE#3: pull up */
+- MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b0b0
++ MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b030
+ /* AR8035 pin strapping: MODE#0: pull down */
+- MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x130b0
++ MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x13030
+
+ /*
+ * As the RMII pins are also connected to RGMII
+diff -Nur linux-3.14.36/arch/arm/boot/dts/imx6qdl-microsom.dtsi linux-openelec/arch/arm/boot/dts/imx6qdl-microsom.dtsi
+--- linux-3.14.36/arch/arm/boot/dts/imx6qdl-microsom.dtsi 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/boot/dts/imx6qdl-microsom.dtsi 2015-05-06 12:05:43.000000000 -0500
+@@ -1,9 +1,69 @@
+ /*
+ * Copyright (C) 2013,2014 Russell King
+ */
++#include <dt-bindings/gpio/gpio.h>
++/ {
++ regulators {
++ compatible = "simple-bus";
++
++ reg_brcm_osc: brcm-osc-reg {
++ compatible = "regulator-fixed";
++ enable-active-high;
++ gpio = <&gpio5 5 0>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_microsom_brcm_osc_reg>;
++ regulator-name = "brcm_osc_reg";
++ regulator-min-microvolt = <3300000>;
++ regulator-max-microvolt = <3300000>;
++ regulator-always-on;
++ regulator-boot-on;
++ };
++
++ reg_brcm: brcm-reg {
++ compatible = "regulator-fixed";
++ enable-active-high;
++ gpio = <&gpio3 19 0>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_microsom_brcm_reg>;
++ regulator-name = "brcm_reg";
++ regulator-min-microvolt = <3300000>;
++ regulator-max-microvolt = <3300000>;
++ startup-delay-us = <200000>;
++ };
++ };
++};
+
+ &iomuxc {
+ microsom {
++ pinctrl_microsom_brcm_bt: microsom-brcm-bt {
++ fsl,pins = <
++ MX6QDL_PAD_CSI0_DAT14__GPIO6_IO00 0x40013070
++ MX6QDL_PAD_CSI0_DAT15__GPIO6_IO01 0x40013070
++ MX6QDL_PAD_CSI0_DAT18__GPIO6_IO04 0x40013070
++ >;
++ };
++
++ pinctrl_microsom_brcm_osc_reg: microsom-brcm-osc-reg {
++ fsl,pins = <
++ MX6QDL_PAD_DISP0_DAT11__GPIO5_IO05 0x40013070
++ >;
++ };
++
++ pinctrl_microsom_brcm_reg: microsom-brcm-reg {
++ fsl,pins = <
++ MX6QDL_PAD_EIM_D19__GPIO3_IO19 0x40013070
++ >;
++ };
++
++ pinctrl_microsom_brcm_wifi: microsom-brcm-wifi {
++ fsl,pins = <
++ MX6QDL_PAD_GPIO_8__XTALOSC_REF_CLK_32K 0x1b0b0
++ MX6QDL_PAD_CSI0_DATA_EN__GPIO5_IO20 0x40013070
++ MX6QDL_PAD_CSI0_DAT8__GPIO5_IO26 0x40013070
++ MX6QDL_PAD_CSI0_DAT9__GPIO5_IO27 0x40013070
++ >;
++ };
++
+ pinctrl_microsom_uart1: microsom-uart1 {
+ fsl,pins = <
+ MX6QDL_PAD_CSI0_DAT10__UART1_TX_DATA 0x1b0b1
+@@ -11,12 +71,24 @@
+ >;
+ };
+
+- pinctrl_microsom_usbotg: microsom-usbotg {
+- /*
+- * Similar to pinctrl_usbotg_2, but we want it
+- * pulled down for a fixed host connection.
+- */
+- fsl,pins = <MX6QDL_PAD_GPIO_1__USB_OTG_ID 0x13059>;
++ pinctrl_microsom_uart4_1: microsom-uart4 {
++ fsl,pins = <
++ MX6QDL_PAD_CSI0_DAT12__UART4_TX_DATA 0x1b0b1
++ MX6QDL_PAD_CSI0_DAT13__UART4_RX_DATA 0x1b0b1
++ MX6QDL_PAD_CSI0_DAT16__UART4_RTS_B 0x1b0b1
++ MX6QDL_PAD_CSI0_DAT17__UART4_CTS_B 0x1b0b1
++ >;
++ };
++
++ pinctrl_microsom_usdhc1: microsom-usdhc1 {
++ fsl,pins = <
++ MX6QDL_PAD_SD1_CMD__SD1_CMD 0x17059
++ MX6QDL_PAD_SD1_CLK__SD1_CLK 0x10059
++ MX6QDL_PAD_SD1_DAT0__SD1_DATA0 0x17059
++ MX6QDL_PAD_SD1_DAT1__SD1_DATA1 0x17059
++ MX6QDL_PAD_SD1_DAT2__SD1_DATA2 0x17059
++ MX6QDL_PAD_SD1_DAT3__SD1_DATA3 0x17059
++ >;
+ };
+ };
+ };
+@@ -27,7 +99,23 @@
+ status = "okay";
+ };
+
+-&usbotg {
++/* UART4 - Connected to optional BRCM Wifi/BT/FM */
++&uart4 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_microsom_brcm_bt &pinctrl_microsom_uart4_1>;
++ fsl,uart-has-rtscts;
++ status = "okay";
++};
++
++/* USDHC1 - Connected to optional BRCM Wifi/BT/FM */
++&usdhc1 {
++ card-external-vcc-supply = <&reg_brcm>;
++ card-reset-gpios = <&gpio5 26 GPIO_ACTIVE_LOW>, <&gpio6 0 GPIO_ACTIVE_LOW>;
++ keep-power-in-suspend;
++ non-removable;
+ pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_microsom_usbotg>;
++ pinctrl-0 = <&pinctrl_microsom_brcm_wifi &pinctrl_microsom_usdhc1>;
++ vmmc-supply = <&reg_brcm>;
++ status = "okay";
+ };
++
+diff -Nur linux-3.14.36/arch/arm/boot/dts/imx6qdl-nitrogen6x.dtsi linux-openelec/arch/arm/boot/dts/imx6qdl-nitrogen6x.dtsi
+--- linux-3.14.36/arch/arm/boot/dts/imx6qdl-nitrogen6x.dtsi 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/arch/arm/boot/dts/imx6qdl-nitrogen6x.dtsi 2015-05-06 12:05:43.000000000 -0500
+@@ -0,0 +1,426 @@
++/*
++ * Copyright 2013 Boundary Devices, Inc.
++ * Copyright 2011 Freescale Semiconductor, Inc.
++ * Copyright 2011 Linaro Ltd.
++ *
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++#include <dt-bindings/gpio/gpio.h>
++#include <dt-bindings/input/input.h>
++
++/ {
++ chosen {
++ stdout-path = &uart2;
++ };
++
++ memory {
++ reg = <0x10000000 0x40000000>;
++ };
++
++ regulators {
++ compatible = "simple-bus";
++ #address-cells = <1>;
++ #size-cells = <0>;
++
++ reg_2p5v: regulator@0 {
++ compatible = "regulator-fixed";
++ reg = <0>;
++ regulator-name = "2P5V";
++ regulator-min-microvolt = <2500000>;
++ regulator-max-microvolt = <2500000>;
++ regulator-always-on;
++ };
++
++ reg_3p3v: regulator@1 {
++ compatible = "regulator-fixed";
++ reg = <1>;
++ regulator-name = "3P3V";
++ regulator-min-microvolt = <3300000>;
++ regulator-max-microvolt = <3300000>;
++ regulator-always-on;
++ };
++
++ reg_usb_otg_vbus: regulator@2 {
++ compatible = "regulator-fixed";
++ reg = <2>;
++ regulator-name = "usb_otg_vbus";
++ regulator-min-microvolt = <5000000>;
++ regulator-max-microvolt = <5000000>;
++ gpio = <&gpio3 22 0>;
++ enable-active-high;
++ };
++ };
++
++ gpio-keys {
++ compatible = "gpio-keys";
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_gpio_keys>;
++
++ power {
++ label = "Power Button";
++ gpios = <&gpio2 3 GPIO_ACTIVE_LOW>;
++ linux,code = <KEY_POWER>;
++ gpio-key,wakeup;
++ };
++
++ menu {
++ label = "Menu";
++ gpios = <&gpio2 1 GPIO_ACTIVE_LOW>;
++ linux,code = <KEY_MENU>;
++ };
++
++ home {
++ label = "Home";
++ gpios = <&gpio2 4 GPIO_ACTIVE_LOW>;
++ linux,code = <KEY_HOME>;
++ };
++
++ back {
++ label = "Back";
++ gpios = <&gpio2 2 GPIO_ACTIVE_LOW>;
++ linux,code = <KEY_BACK>;
++ };
++
++ volume-up {
++ label = "Volume Up";
++ gpios = <&gpio7 13 GPIO_ACTIVE_LOW>;
++ linux,code = <KEY_VOLUMEUP>;
++ };
++
++ volume-down {
++ label = "Volume Down";
++ gpios = <&gpio4 5 GPIO_ACTIVE_LOW>;
++ linux,code = <KEY_VOLUMEDOWN>;
++ };
++ };
++
++ sound {
++ compatible = "fsl,imx6q-nitrogen6x-sgtl5000",
++ "fsl,imx-audio-sgtl5000";
++ model = "imx6q-nitrogen6x-sgtl5000";
++ ssi-controller = <&ssi1>;
++ audio-codec = <&codec>;
++ audio-routing =
++ "MIC_IN", "Mic Jack",
++ "Mic Jack", "Mic Bias",
++ "Headphone Jack", "HP_OUT";
++ mux-int-port = <1>;
++ mux-ext-port = <3>;
++ };
++
++ backlight_lcd {
++ compatible = "pwm-backlight";
++ pwms = <&pwm1 0 5000000>;
++ brightness-levels = <0 4 8 16 32 64 128 255>;
++ default-brightness-level = <7>;
++ power-supply = <&reg_3p3v>;
++ status = "okay";
++ };
++
++ backlight_lvds {
++ compatible = "pwm-backlight";
++ pwms = <&pwm4 0 5000000>;
++ brightness-levels = <0 4 8 16 32 64 128 255>;
++ default-brightness-level = <7>;
++ power-supply = <&reg_3p3v>;
++ status = "okay";
++ };
++};
++
++&audmux {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_audmux>;
++ status = "okay";
++};
++
++&ecspi1 {
++ fsl,spi-num-chipselects = <1>;
++ cs-gpios = <&gpio3 19 0>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_ecspi1>;
++ status = "okay";
++
++ flash: m25p80@0 {
++ compatible = "sst,sst25vf016b";
++ spi-max-frequency = <20000000>;
++ reg = <0>;
++ };
++};
++
++&fec {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_enet>;
++ phy-mode = "rgmii";
++ phy-reset-gpios = <&gpio1 27 0>;
++ txen-skew-ps = <0>;
++ txc-skew-ps = <3000>;
++ rxdv-skew-ps = <0>;
++ rxc-skew-ps = <3000>;
++ rxd0-skew-ps = <0>;
++ rxd1-skew-ps = <0>;
++ rxd2-skew-ps = <0>;
++ rxd3-skew-ps = <0>;
++ txd0-skew-ps = <0>;
++ txd1-skew-ps = <0>;
++ txd2-skew-ps = <0>;
++ txd3-skew-ps = <0>;
++ interrupts-extended = <&gpio1 6 IRQ_TYPE_LEVEL_HIGH>,
++ <&intc 0 119 IRQ_TYPE_LEVEL_HIGH>;
++ status = "okay";
++};
++
++&i2c1 {
++ clock-frequency = <100000>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_i2c1>;
++ status = "okay";
++
++ codec: sgtl5000@0a {
++ compatible = "fsl,sgtl5000";
++ reg = <0x0a>;
++ clocks = <&clks 201>;
++ VDDA-supply = <&reg_2p5v>;
++ VDDIO-supply = <&reg_3p3v>;
++ };
++};
++
++&iomuxc {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_hog>;
++
++ imx6q-nitrogen6x {
++ pinctrl_hog: hoggrp {
++ fsl,pins = <
++ /* SGTL5000 sys_mclk */
++ MX6QDL_PAD_GPIO_0__CCM_CLKO1 0x030b0
++ >;
++ };
++
++ pinctrl_audmux: audmuxgrp {
++ fsl,pins = <
++ MX6QDL_PAD_CSI0_DAT7__AUD3_RXD 0x130b0
++ MX6QDL_PAD_CSI0_DAT4__AUD3_TXC 0x130b0
++ MX6QDL_PAD_CSI0_DAT5__AUD3_TXD 0x110b0
++ MX6QDL_PAD_CSI0_DAT6__AUD3_TXFS 0x130b0
++ >;
++ };
++
++ pinctrl_ecspi1: ecspi1grp {
++ fsl,pins = <
++ MX6QDL_PAD_EIM_D17__ECSPI1_MISO 0x100b1
++ MX6QDL_PAD_EIM_D18__ECSPI1_MOSI 0x100b1
++ MX6QDL_PAD_EIM_D16__ECSPI1_SCLK 0x100b1
++ MX6QDL_PAD_EIM_D19__GPIO3_IO19 0x000b1 /* CS */
++ >;
++ };
++
++ pinctrl_enet: enetgrp {
++ fsl,pins = <
++ MX6QDL_PAD_ENET_MDIO__ENET_MDIO 0x100b0
++ MX6QDL_PAD_ENET_MDC__ENET_MDC 0x100b0
++ MX6QDL_PAD_RGMII_TXC__RGMII_TXC 0x100b0
++ MX6QDL_PAD_RGMII_TD0__RGMII_TD0 0x100b0
++ MX6QDL_PAD_RGMII_TD1__RGMII_TD1 0x100b0
++ MX6QDL_PAD_RGMII_TD2__RGMII_TD2 0x100b0
++ MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x100b0
++ MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x100b0
++ MX6QDL_PAD_ENET_REF_CLK__ENET_TX_CLK 0x100b0
++ MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b0b0
++ MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x1b0b0
++ MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x1b0b0
++ MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b0b0
++ MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b0b0
++ MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b0b0
++ /* Phy reset */
++ MX6QDL_PAD_ENET_RXD0__GPIO1_IO27 0x000b0
++ MX6QDL_PAD_GPIO_6__ENET_IRQ 0x000b1
++ >;
++ };
++
++ pinctrl_gpio_keys: gpio_keysgrp {
++ fsl,pins = <
++ /* Power Button */
++ MX6QDL_PAD_NANDF_D3__GPIO2_IO03 0x1b0b0
++ /* Menu Button */
++ MX6QDL_PAD_NANDF_D1__GPIO2_IO01 0x1b0b0
++ /* Home Button */
++ MX6QDL_PAD_NANDF_D4__GPIO2_IO04 0x1b0b0
++ /* Back Button */
++ MX6QDL_PAD_NANDF_D2__GPIO2_IO02 0x1b0b0
++ /* Volume Up Button */
++ MX6QDL_PAD_GPIO_18__GPIO7_IO13 0x1b0b0
++ /* Volume Down Button */
++ MX6QDL_PAD_GPIO_19__GPIO4_IO05 0x1b0b0
++ >;
++ };
++
++ pinctrl_i2c1: i2c1grp {
++ fsl,pins = <
++ MX6QDL_PAD_EIM_D21__I2C1_SCL 0x4001b8b1
++ MX6QDL_PAD_EIM_D28__I2C1_SDA 0x4001b8b1
++ >;
++ };
++
++ pinctrl_pwm1: pwm1grp {
++ fsl,pins = <
++ MX6QDL_PAD_SD1_DAT3__PWM1_OUT 0x1b0b1
++ >;
++ };
++
++ pinctrl_pwm3: pwm3grp {
++ fsl,pins = <
++ MX6QDL_PAD_SD1_DAT1__PWM3_OUT 0x1b0b1
++ >;
++ };
++
++ pinctrl_pwm4: pwm4grp {
++ fsl,pins = <
++ MX6QDL_PAD_SD1_CMD__PWM4_OUT 0x1b0b1
++ >;
++ };
++
++ pinctrl_uart1: uart1grp {
++ fsl,pins = <
++ MX6QDL_PAD_SD3_DAT7__UART1_TX_DATA 0x1b0b1
++ MX6QDL_PAD_SD3_DAT6__UART1_RX_DATA 0x1b0b1
++ >;
++ };
++
++ pinctrl_uart2: uart2grp {
++ fsl,pins = <
++ MX6QDL_PAD_EIM_D26__UART2_TX_DATA 0x1b0b1
++ MX6QDL_PAD_EIM_D27__UART2_RX_DATA 0x1b0b1
++ >;
++ };
++
++ pinctrl_usbotg: usbotggrp {
++ fsl,pins = <
++ MX6QDL_PAD_GPIO_1__USB_OTG_ID 0x17059
++ MX6QDL_PAD_KEY_COL4__USB_OTG_OC 0x1b0b0
++ /* power enable, high active */
++ MX6QDL_PAD_EIM_D22__GPIO3_IO22 0x000b0
++ >;
++ };
++
++ pinctrl_usdhc3: usdhc3grp {
++ fsl,pins = <
++ MX6QDL_PAD_SD3_CMD__SD3_CMD 0x17059
++ MX6QDL_PAD_SD3_CLK__SD3_CLK 0x10059
++ MX6QDL_PAD_SD3_DAT0__SD3_DATA0 0x17059
++ MX6QDL_PAD_SD3_DAT1__SD3_DATA1 0x17059
++ MX6QDL_PAD_SD3_DAT2__SD3_DATA2 0x17059
++ MX6QDL_PAD_SD3_DAT3__SD3_DATA3 0x17059
++ MX6QDL_PAD_SD3_DAT5__GPIO7_IO00 0x1b0b0 /* CD */
++ >;
++ };
++
++ pinctrl_usdhc4: usdhc4grp {
++ fsl,pins = <
++ MX6QDL_PAD_SD4_CMD__SD4_CMD 0x17059
++ MX6QDL_PAD_SD4_CLK__SD4_CLK 0x10059
++ MX6QDL_PAD_SD4_DAT0__SD4_DATA0 0x17059
++ MX6QDL_PAD_SD4_DAT1__SD4_DATA1 0x17059
++ MX6QDL_PAD_SD4_DAT2__SD4_DATA2 0x17059
++ MX6QDL_PAD_SD4_DAT3__SD4_DATA3 0x17059
++ MX6QDL_PAD_NANDF_D6__GPIO2_IO06 0x1b0b0 /* CD */
++ >;
++ };
++ };
++};
++
++&ldb {
++ status = "okay";
++
++ lvds-channel@0 {
++ fsl,data-mapping = "spwg";
++ fsl,data-width = <18>;
++ status = "okay";
++
++ display-timings {
++ native-mode = <&timing0>;
++ timing0: hsd100pxn1 {
++ clock-frequency = <65000000>;
++ hactive = <1024>;
++ vactive = <768>;
++ hback-porch = <220>;
++ hfront-porch = <40>;
++ vback-porch = <21>;
++ vfront-porch = <7>;
++ hsync-len = <60>;
++ vsync-len = <10>;
++ };
++ };
++ };
++};
++
++&pcie {
++ status = "okay";
++};
++
++&pwm1 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_pwm1>;
++ status = "okay";
++};
++
++&pwm3 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_pwm3>;
++ status = "okay";
++};
++
++&pwm4 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_pwm4>;
++ status = "okay";
++};
++
++&ssi1 {
++ fsl,mode = "i2s-slave";
++ status = "okay";
++};
++
++&uart1 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_uart1>;
++ status = "okay";
++};
++
++&uart2 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_uart2>;
++ status = "okay";
++};
++
++&usbh1 {
++ status = "okay";
++};
++
++&usbotg {
++ vbus-supply = <&reg_usb_otg_vbus>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_usbotg>;
++ disable-over-current;
++ status = "okay";
++};
++
++&usdhc3 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_usdhc3>;
++ cd-gpios = <&gpio7 0 0>;
++ vmmc-supply = <&reg_3p3v>;
++ status = "okay";
++};
++
++&usdhc4 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_usdhc4>;
++ cd-gpios = <&gpio2 6 0>;
++ vmmc-supply = <&reg_3p3v>;
++ status = "okay";
++};
+diff -Nur linux-3.14.36/arch/arm/boot/dts/imx6qdl-phytec-pbab01.dtsi linux-openelec/arch/arm/boot/dts/imx6qdl-phytec-pbab01.dtsi
+--- linux-3.14.36/arch/arm/boot/dts/imx6qdl-phytec-pbab01.dtsi 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/arch/arm/boot/dts/imx6qdl-phytec-pbab01.dtsi 2015-05-06 12:05:43.000000000 -0500
+@@ -0,0 +1,98 @@
++/*
++ * Copyright 2013 Christian Hemp, Phytec Messtechnik GmbH
++ *
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/ {
++ chosen {
++ linux,stdout-path = &uart4;
++ };
++};
++
++&fec {
++ status = "okay";
++};
++
++&gpmi {
++ status = "okay";
++};
++
++&i2c2 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_i2c2>;
++ clock-frequency = <100000>;
++ status = "okay";
++
++ tlv320@18 {
++ compatible = "ti,tlv320aic3x";
++ reg = <0x18>;
++ };
++
++ stmpe@41 {
++ compatible = "st,stmpe811";
++ reg = <0x41>;
++ };
++
++ rtc@51 {
++ compatible = "nxp,rtc8564";
++ reg = <0x51>;
++ };
++
++ adc@64 {
++ compatible = "maxim,max1037";
++ reg = <0x64>;
++ };
++};
++
++&i2c3 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_i2c3>;
++ clock-frequency = <100000>;
++ status = "okay";
++};
++
++&uart3 {
++ status = "okay";
++};
++
++&uart4 {
++ status = "okay";
++};
++
++&usbh1 {
++ status = "okay";
++};
++
++&usbotg {
++ status = "okay";
++};
++
++&usdhc2 {
++ status = "okay";
++};
++
++&usdhc3 {
++ status = "okay";
++};
++
++&iomuxc {
++ pinctrl_i2c2: i2c2grp {
++ fsl,pins = <
++ MX6QDL_PAD_EIM_EB2__I2C2_SCL 0x4001b8b1
++ MX6QDL_PAD_EIM_D16__I2C2_SDA 0x4001b8b1
++ >;
++ };
++
++ pinctrl_i2c3: i2c3grp {
++ fsl,pins = <
++ MX6QDL_PAD_EIM_D17__I2C3_SCL 0x4001b8b1
++ MX6QDL_PAD_EIM_D18__I2C3_SDA 0x4001b8b1
++ >;
++ };
++};
+diff -Nur linux-3.14.36/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi linux-openelec/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi
+--- linux-3.14.36/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi 2015-05-06 12:05:43.000000000 -0500
+@@ -0,0 +1,356 @@
++/*
++ * Copyright 2013 Christian Hemp, Phytec Messtechnik GmbH
++ *
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++#include <dt-bindings/gpio/gpio.h>
++
++/ {
++ model = "Phytec phyFLEX-i.MX6 Ouad";
++ compatible = "phytec,imx6q-pfla02", "fsl,imx6q";
++
++ memory {
++ reg = <0x10000000 0x80000000>;
++ };
++
++ regulators {
++ compatible = "simple-bus";
++ #address-cells = <1>;
++ #size-cells = <0>;
++
++ reg_usb_otg_vbus: regulator@0 {
++ compatible = "regulator-fixed";
++ reg = <0>;
++ regulator-name = "usb_otg_vbus";
++ regulator-min-microvolt = <5000000>;
++ regulator-max-microvolt = <5000000>;
++ gpio = <&gpio4 15 0>;
++ };
++
++ reg_usb_h1_vbus: regulator@1 {
++ compatible = "regulator-fixed";
++ reg = <1>;
++ regulator-name = "usb_h1_vbus";
++ regulator-min-microvolt = <5000000>;
++ regulator-max-microvolt = <5000000>;
++ gpio = <&gpio1 0 0>;
++ };
++ };
++
++ gpio_leds: leds {
++ compatible = "gpio-leds";
++
++ green {
++ label = "phyflex:green";
++ gpios = <&gpio1 30 0>;
++ };
++
++ red {
++ label = "phyflex:red";
++ gpios = <&gpio2 31 0>;
++ };
++ };
++};
++
++&ecspi3 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_ecspi3>;
++ status = "okay";
++ fsl,spi-num-chipselects = <1>;
++ cs-gpios = <&gpio4 24 0>;
++
++ flash@0 {
++ compatible = "m25p80";
++ spi-max-frequency = <20000000>;
++ reg = <0>;
++ };
++};
++
++&i2c1 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_i2c1>;
++ status = "okay";
++
++ eeprom@50 {
++ compatible = "atmel,24c32";
++ reg = <0x50>;
++ };
++
++ pmic@58 {
++ compatible = "dialog,da9063";
++ reg = <0x58>;
++ interrupt-parent = <&gpio4>;
++ interrupts = <17 0x8>; /* active-low GPIO4_17 */
++
++ regulators {
++ vddcore_reg: bcore1 {
++ regulator-min-microvolt = <730000>;
++ regulator-max-microvolt = <1380000>;
++ regulator-always-on;
++ };
++
++ vddsoc_reg: bcore2 {
++ regulator-min-microvolt = <730000>;
++ regulator-max-microvolt = <1380000>;
++ regulator-always-on;
++ };
++
++ vdd_ddr3_reg: bpro {
++ regulator-min-microvolt = <1500000>;
++ regulator-max-microvolt = <1500000>;
++ regulator-always-on;
++ };
++
++ vdd_3v3_reg: bperi {
++ regulator-min-microvolt = <3300000>;
++ regulator-max-microvolt = <3300000>;
++ regulator-always-on;
++ };
++
++ vdd_buckmem_reg: bmem {
++ regulator-min-microvolt = <3300000>;
++ regulator-max-microvolt = <3300000>;
++ regulator-always-on;
++ };
++
++ vdd_eth_reg: bio {
++ regulator-min-microvolt = <1200000>;
++ regulator-max-microvolt = <1200000>;
++ regulator-always-on;
++ };
++
++ vdd_eth_io_reg: ldo4 {
++ regulator-min-microvolt = <2500000>;
++ regulator-max-microvolt = <2500000>;
++ regulator-always-on;
++ };
++
++ vdd_mx6_snvs_reg: ldo5 {
++ regulator-min-microvolt = <3000000>;
++ regulator-max-microvolt = <3000000>;
++ regulator-always-on;
++ };
++
++ vdd_3v3_pmic_io_reg: ldo6 {
++ regulator-min-microvolt = <3300000>;
++ regulator-max-microvolt = <3300000>;
++ regulator-always-on;
++ };
++
++ vdd_sd0_reg: ldo9 {
++ regulator-min-microvolt = <3300000>;
++ regulator-max-microvolt = <3300000>;
++ };
++
++ vdd_sd1_reg: ldo10 {
++ regulator-min-microvolt = <3300000>;
++ regulator-max-microvolt = <3300000>;
++ };
++
++ vdd_mx6_high_reg: ldo11 {
++ regulator-min-microvolt = <3000000>;
++ regulator-max-microvolt = <3000000>;
++ regulator-always-on;
++ };
++ };
++ };
++};
++
++&iomuxc {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_hog>;
++
++ imx6q-phytec-pfla02 {
++ pinctrl_hog: hoggrp {
++ fsl,pins = <
++ MX6QDL_PAD_EIM_D23__GPIO3_IO23 0x80000000
++ MX6QDL_PAD_DISP0_DAT3__GPIO4_IO24 0x80000000 /* SPI NOR chipselect */
++ MX6QDL_PAD_DI0_PIN15__GPIO4_IO17 0x80000000 /* PMIC interrupt */
++ MX6QDL_PAD_ENET_TXD0__GPIO1_IO30 0x80000000 /* Green LED */
++ MX6QDL_PAD_EIM_EB3__GPIO2_IO31 0x80000000 /* Red LED */
++ >;
++ };
++
++ pinctrl_ecspi3: ecspi3grp {
++ fsl,pins = <
++ MX6QDL_PAD_DISP0_DAT2__ECSPI3_MISO 0x100b1
++ MX6QDL_PAD_DISP0_DAT1__ECSPI3_MOSI 0x100b1
++ MX6QDL_PAD_DISP0_DAT0__ECSPI3_SCLK 0x100b1
++ >;
++ };
++
++ pinctrl_enet: enetgrp {
++ fsl,pins = <
++ MX6QDL_PAD_ENET_MDIO__ENET_MDIO 0x1b0b0
++ MX6QDL_PAD_ENET_MDC__ENET_MDC 0x1b0b0
++ MX6QDL_PAD_RGMII_TXC__RGMII_TXC 0x1b0b0
++ MX6QDL_PAD_RGMII_TD0__RGMII_TD0 0x1b0b0
++ MX6QDL_PAD_RGMII_TD1__RGMII_TD1 0x1b0b0
++ MX6QDL_PAD_RGMII_TD2__RGMII_TD2 0x1b0b0
++ MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x1b0b0
++ MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x1b0b0
++ MX6QDL_PAD_ENET_REF_CLK__ENET_TX_CLK 0x1b0b0
++ MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b0b0
++ MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x1b0b0
++ MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x1b0b0
++ MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b0b0
++ MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b0b0
++ MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b0b0
++ MX6QDL_PAD_ENET_TX_EN__ENET_TX_EN 0x1b0b0
++ >;
++ };
++
++ pinctrl_gpmi_nand: gpminandgrp {
++ fsl,pins = <
++ MX6QDL_PAD_NANDF_CLE__NAND_CLE 0xb0b1
++ MX6QDL_PAD_NANDF_ALE__NAND_ALE 0xb0b1
++ MX6QDL_PAD_NANDF_WP_B__NAND_WP_B 0xb0b1
++ MX6QDL_PAD_NANDF_RB0__NAND_READY_B 0xb000
++ MX6QDL_PAD_NANDF_CS0__NAND_CE0_B 0xb0b1
++ MX6QDL_PAD_NANDF_CS1__NAND_CE1_B 0xb0b1
++ MX6QDL_PAD_SD4_CMD__NAND_RE_B 0xb0b1
++ MX6QDL_PAD_SD4_CLK__NAND_WE_B 0xb0b1
++ MX6QDL_PAD_NANDF_D0__NAND_DATA00 0xb0b1
++ MX6QDL_PAD_NANDF_D1__NAND_DATA01 0xb0b1
++ MX6QDL_PAD_NANDF_D2__NAND_DATA02 0xb0b1
++ MX6QDL_PAD_NANDF_D3__NAND_DATA03 0xb0b1
++ MX6QDL_PAD_NANDF_D4__NAND_DATA04 0xb0b1
++ MX6QDL_PAD_NANDF_D5__NAND_DATA05 0xb0b1
++ MX6QDL_PAD_NANDF_D6__NAND_DATA06 0xb0b1
++ MX6QDL_PAD_NANDF_D7__NAND_DATA07 0xb0b1
++ MX6QDL_PAD_SD4_DAT0__NAND_DQS 0x00b1
++ >;
++ };
++
++ pinctrl_i2c1: i2c1grp {
++ fsl,pins = <
++ MX6QDL_PAD_EIM_D21__I2C1_SCL 0x4001b8b1
++ MX6QDL_PAD_EIM_D28__I2C1_SDA 0x4001b8b1
++ >;
++ };
++
++ pinctrl_uart3: uart3grp {
++ fsl,pins = <
++ MX6QDL_PAD_EIM_D24__UART3_TX_DATA 0x1b0b1
++ MX6QDL_PAD_EIM_D25__UART3_RX_DATA 0x1b0b1
++ MX6QDL_PAD_EIM_D30__UART3_RTS_B 0x1b0b1
++ MX6QDL_PAD_EIM_D31__UART3_CTS_B 0x1b0b1
++ >;
++ };
++
++ pinctrl_uart4: uart4grp {
++ fsl,pins = <
++ MX6QDL_PAD_KEY_COL0__UART4_TX_DATA 0x1b0b1
++ MX6QDL_PAD_KEY_ROW0__UART4_RX_DATA 0x1b0b1
++ >;
++ };
++
++ pinctrl_usbh1: usbh1grp {
++ fsl,pins = <
++ MX6QDL_PAD_GPIO_0__USB_H1_PWR 0x80000000
++ >;
++ };
++
++ pinctrl_usbotg: usbotggrp {
++ fsl,pins = <
++ MX6QDL_PAD_GPIO_1__USB_OTG_ID 0x17059
++ MX6QDL_PAD_KEY_COL4__USB_OTG_OC 0x1b0b0
++ MX6QDL_PAD_KEY_ROW4__GPIO4_IO15 0x80000000
++ >;
++ };
++
++ pinctrl_usdhc2: usdhc2grp {
++ fsl,pins = <
++ MX6QDL_PAD_SD2_CMD__SD2_CMD 0x17059
++ MX6QDL_PAD_SD2_CLK__SD2_CLK 0x10059
++ MX6QDL_PAD_SD2_DAT0__SD2_DATA0 0x17059
++ MX6QDL_PAD_SD2_DAT1__SD2_DATA1 0x17059
++ MX6QDL_PAD_SD2_DAT2__SD2_DATA2 0x17059
++ MX6QDL_PAD_SD2_DAT3__SD2_DATA3 0x17059
++ >;
++ };
++
++ pinctrl_usdhc3: usdhc3grp {
++ fsl,pins = <
++ MX6QDL_PAD_SD3_CMD__SD3_CMD 0x17059
++ MX6QDL_PAD_SD3_CLK__SD3_CLK 0x10059
++ MX6QDL_PAD_SD3_DAT0__SD3_DATA0 0x17059
++ MX6QDL_PAD_SD3_DAT1__SD3_DATA1 0x17059
++ MX6QDL_PAD_SD3_DAT2__SD3_DATA2 0x17059
++ MX6QDL_PAD_SD3_DAT3__SD3_DATA3 0x17059
++ >;
++ };
++
++ pinctrl_usdhc3_cdwp: usdhc3cdwp {
++ fsl,pins = <
++ MX6QDL_PAD_ENET_RXD0__GPIO1_IO27 0x80000000
++ MX6QDL_PAD_ENET_TXD1__GPIO1_IO29 0x80000000
++ >;
++ };
++ };
++};
++
++&fec {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_enet>;
++ phy-mode = "rgmii";
++ phy-reset-gpios = <&gpio3 23 GPIO_ACTIVE_LOW>;
++ status = "disabled";
++};
++
++&gpmi {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_gpmi_nand>;
++ nand-on-flash-bbt;
++ status = "disabled";
++};
++
++&uart3 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_uart3>;
++ status = "disabled";
++};
++
++&uart4 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_uart4>;
++ status = "disabled";
++};
++
++&usbh1 {
++ vbus-supply = <&reg_usb_h1_vbus>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_usbh1>;
++ status = "disabled";
++};
++
++&usbotg {
++ vbus-supply = <&reg_usb_otg_vbus>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_usbotg>;
++ disable-over-current;
++ status = "disabled";
++};
++
++&usdhc2 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_usdhc2>;
++ cd-gpios = <&gpio1 4 0>;
++ wp-gpios = <&gpio1 2 0>;
++ status = "disabled";
++};
++
++&usdhc3 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_usdhc3
++ &pinctrl_usdhc3_cdwp>;
++ cd-gpios = <&gpio1 27 0>;
++ wp-gpios = <&gpio1 29 0>;
++ status = "disabled";
++};
+diff -Nur linux-3.14.36/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi linux-openelec/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi
+--- linux-3.14.36/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi 2015-05-06 12:05:43.000000000 -0500
+@@ -10,17 +10,146 @@
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
++#include <dt-bindings/gpio/gpio.h>
++
+ / {
++ aliases {
++ mxcfb0 = &mxcfb1;
++ mxcfb1 = &mxcfb2;
++ mxcfb2 = &mxcfb3;
++ mxcfb3 = &mxcfb4;
++ };
++
+ memory {
+ reg = <0x10000000 0x80000000>;
+ };
++
++ leds {
++ compatible = "gpio-leds";
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_gpio_leds>;
++
++ user {
++ label = "debug";
++ gpios = <&gpio5 15 GPIO_ACTIVE_HIGH>;
++ };
++ };
++
++ sound-spdif {
++ compatible = "fsl,imx-audio-spdif",
++ "fsl,imx-sabreauto-spdif";
++ model = "imx-spdif";
++ spdif-controller = <&spdif>;
++ spdif-in;
++ };
++
++ backlight {
++ compatible = "pwm-backlight";
++ pwms = <&pwm3 0 5000000>;
++ brightness-levels = <0 4 8 16 32 64 128 255>;
++ default-brightness-level = <7>;
++ status = "okay";
++ };
++
++ max7310_reset: max7310-reset {
++ compatible = "gpio-reset";
++ reset-gpios = <&gpio1 15 GPIO_ACTIVE_LOW>;
++ reset-delay-us = <1>;
++ #reset-cells = <0>;
++ };
++
++ mxcfb1: fb@0 {
++ compatible = "fsl,mxc_sdc_fb";
++ disp_dev = "ldb";
++ interface_pix_fmt = "RGB666";
++ mode_str ="LDB-XGA";
++ default_bpp = <16>;
++ int_clk = <0>;
++ late_init = <0>;
++ status = "disabled";
++ };
++
++ mxcfb2: fb@1 {
++ compatible = "fsl,mxc_sdc_fb";
++ disp_dev = "hdmi";
++ interface_pix_fmt = "RGB24";
++ mode_str ="1920x1080M@60";
++ default_bpp = <24>;
++ int_clk = <0>;
++ late_init = <0>;
++ status = "disabled";
++ };
++
++ mxcfb3: fb@2 {
++ compatible = "fsl,mxc_sdc_fb";
++ disp_dev = "lcd";
++ interface_pix_fmt = "RGB565";
++ mode_str ="CLAA-WVGA";
++ default_bpp = <16>;
++ int_clk = <0>;
++ late_init = <0>;
++ status = "disabled";
++ };
++
++ mxcfb4: fb@3 {
++ compatible = "fsl,mxc_sdc_fb";
++ disp_dev = "ldb";
++ interface_pix_fmt = "RGB666";
++ mode_str ="LDB-XGA";
++ default_bpp = <16>;
++ int_clk = <0>;
++ late_init = <0>;
++ status = "disabled";
++ };
++
++ backlight {
++ compatible = "pwm-backlight";
++ pwms = <&pwm3 0 5000000>;
++ brightness-levels = <0 4 8 16 32 64 128 255>;
++ default-brightness-level = <7>;
++ };
++
++ regulators {
++ compatible = "simple-bus";
++ reg_audio: cs42888_supply {
++ compatible = "regulator-fixed";
++ regulator-name = "cs42888_supply";
++ regulator-min-microvolt = <3300000>;
++ regulator-max-microvolt = <3300000>;
++ regulator-always-on;
++ };
++ };
++
++ sound-cs42888 {
++ compatible = "fsl,imx6-sabreauto-cs42888",
++ "fsl,imx-audio-cs42888";
++ model = "imx-cs42888";
++ esai-controller = <&esai>;
++ asrc-controller = <&asrc_p2p>;
++ audio-codec = <&codec>;
++ };
++
++ sound-hdmi {
++ compatible = "fsl,imx6q-audio-hdmi",
++ "fsl,imx-audio-hdmi";
++ model = "imx-audio-hdmi";
++ hdmi-controller = <&hdmi_audio>;
++ };
++
++ clocks {
++ codec_osc: anaclk2 {
++ compatible = "fixed-clock";
++ #clock-cells = <0>;
++ clock-frequency = <24576000>;
++ };
++ };
+ };
+
+ &ecspi1 {
+ fsl,spi-num-chipselects = <1>;
+ cs-gpios = <&gpio3 19 0>;
+ pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_ecspi1_1 &pinctrl_ecspi1_sabreauto>;
++ pinctrl-0 = <&pinctrl_ecspi1 &pinctrl_ecspi1_cs>;
+ status = "disabled"; /* pin conflict with WEIM NOR */
+
+ flash: m25p80@0 {
+@@ -34,51 +163,481 @@
+
+ &fec {
+ pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_enet_2>;
++ pinctrl-0 = <&pinctrl_enet>;
+ phy-mode = "rgmii";
++ interrupts-extended = <&gpio1 6 IRQ_TYPE_LEVEL_HIGH>,
++ <&intc 0 119 IRQ_TYPE_LEVEL_HIGH>;
+ status = "okay";
+ };
+
+ &gpmi {
+ pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_gpmi_nand_1>;
++ pinctrl-0 = <&pinctrl_gpmi_nand>;
++ status = "okay";
++};
++
++&i2c2 {
++ clock-frequency = <100000>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_i2c2>;
++ status = "okay";
++
++ egalax_ts@04 {
++ compatible = "eeti,egalax_ts";
++ reg = <0x04>;
++ interrupt-parent = <&gpio2>;
++ interrupts = <28 2>;
++ wakeup-gpios = <&gpio2 28 0>;
++ };
++
++ pmic: pfuze100@08 {
++ compatible = "fsl,pfuze100";
++ reg = <0x08>;
++
++ regulators {
++ sw1a_reg: sw1ab {
++ regulator-min-microvolt = <300000>;
++ regulator-max-microvolt = <1875000>;
++ regulator-boot-on;
++ regulator-always-on;
++ regulator-ramp-delay = <6250>;
++ };
++
++ sw1c_reg: sw1c {
++ regulator-min-microvolt = <300000>;
++ regulator-max-microvolt = <1875000>;
++ regulator-boot-on;
++ regulator-always-on;
++ regulator-ramp-delay = <6250>;
++ };
++
++ sw2_reg: sw2 {
++ regulator-min-microvolt = <800000>;
++ regulator-max-microvolt = <3300000>;
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ sw3a_reg: sw3a {
++ regulator-min-microvolt = <400000>;
++ regulator-max-microvolt = <1975000>;
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ sw3b_reg: sw3b {
++ regulator-min-microvolt = <400000>;
++ regulator-max-microvolt = <1975000>;
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ sw4_reg: sw4 {
++ regulator-min-microvolt = <800000>;
++ regulator-max-microvolt = <3300000>;
++ };
++
++ swbst_reg: swbst {
++ regulator-min-microvolt = <5000000>;
++ regulator-max-microvolt = <5150000>;
++ };
++
++ snvs_reg: vsnvs {
++ regulator-min-microvolt = <1000000>;
++ regulator-max-microvolt = <3000000>;
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ vref_reg: vrefddr {
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ vgen1_reg: vgen1 {
++ regulator-min-microvolt = <800000>;
++ regulator-max-microvolt = <1550000>;
++ };
++
++ vgen2_reg: vgen2 {
++ regulator-min-microvolt = <800000>;
++ regulator-max-microvolt = <1550000>;
++ };
++
++ vgen3_reg: vgen3 {
++ regulator-min-microvolt = <1800000>;
++ regulator-max-microvolt = <3300000>;
++ };
++
++ vgen4_reg: vgen4 {
++ regulator-min-microvolt = <1800000>;
++ regulator-max-microvolt = <3300000>;
++ regulator-always-on;
++ };
++
++ vgen5_reg: vgen5 {
++ regulator-min-microvolt = <1800000>;
++ regulator-max-microvolt = <3300000>;
++ regulator-always-on;
++ };
++
++ vgen6_reg: vgen6 {
++ regulator-min-microvolt = <1800000>;
++ regulator-max-microvolt = <3300000>;
++ regulator-always-on;
++ };
++ };
++ };
++
++ codec: cs42888@048 {
++ compatible = "cirrus,cs42888";
++ reg = <0x048>;
++ clocks = <&codec_osc 0>;
++ clock-names = "codec_osc";
++ VA-supply = <&reg_audio>;
++ VD-supply = <&reg_audio>;
++ VLS-supply = <&reg_audio>;
++ VLC-supply = <&reg_audio>;
++ };
++
++ hdmi: edid@50 {
++ compatible = "fsl,imx6-hdmi-i2c";
++ reg = <0x50>;
++ };
++};
++
++&i2c3 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_i2c3>;
++ pinctrl-assert-gpios = <&gpio5 4 GPIO_ACTIVE_HIGH>;
+ status = "okay";
++
++ max7310_a: gpio@30 {
++ compatible = "maxim,max7310";
++ reg = <0x30>;
++ gpio-controller;
++ #gpio-cells = <2>;
++ resets = <&max7310_reset>;
++ };
++
++ max7310_b: gpio@32 {
++ compatible = "maxim,max7310";
++ reg = <0x32>;
++ gpio-controller;
++ #gpio-cells = <2>;
++ };
++
++ max7310_c: gpio@34 {
++ compatible = "maxim,max7310";
++ reg = <0x34>;
++ gpio-controller;
++ #gpio-cells = <2>;
++ };
+ };
+
+ &iomuxc {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_hog>;
+
+- hog {
++ imx6qdl-sabreauto {
+ pinctrl_hog: hoggrp {
+ fsl,pins = <
+ MX6QDL_PAD_NANDF_CS2__GPIO6_IO15 0x80000000
+ MX6QDL_PAD_SD2_DAT2__GPIO1_IO13 0x80000000
++ MX6QDL_PAD_EIM_A24__GPIO5_IO04 0x80000000
++ MX6QDL_PAD_SD2_DAT0__GPIO1_IO15 0x80000000
+ MX6QDL_PAD_GPIO_18__SD3_VSELECT 0x17059
+ >;
+ };
+- };
+
+- ecspi1 {
+- pinctrl_ecspi1_sabreauto: ecspi1-sabreauto {
++ pinctrl_esai1: esai1grp {
++ fsl,pins = <
++ MX6QDL_PAD_ENET_CRS_DV__ESAI_TX_CLK 0x1b030
++ MX6QDL_PAD_ENET_RXD1__ESAI_TX_FS 0x1b030
++ MX6QDL_PAD_ENET_TX_EN__ESAI_TX3_RX2 0x1b030
++ MX6QDL_PAD_GPIO_5__ESAI_TX2_RX3 0x1b030
++ MX6QDL_PAD_ENET_TXD0__ESAI_TX4_RX1 0x1b030
++ MX6QDL_PAD_ENET_MDC__ESAI_TX5_RX0 0x1b030
++ MX6QDL_PAD_GPIO_17__ESAI_TX0 0x1b030
++ MX6QDL_PAD_NANDF_CS3__ESAI_TX1 0x1b030
++ MX6QDL_PAD_ENET_MDIO__ESAI_RX_CLK 0x1b030
++ MX6QDL_PAD_GPIO_9__ESAI_RX_FS 0x1b030
++ >;
++ };
++
++ pinctrl_ecspi1: ecspi1grp {
++ fsl,pins = <
++ MX6QDL_PAD_EIM_D17__ECSPI1_MISO 0x100b1
++ MX6QDL_PAD_EIM_D18__ECSPI1_MOSI 0x100b1
++ MX6QDL_PAD_EIM_D16__ECSPI1_SCLK 0x100b1
++ >;
++ };
++
++ pinctrl_ecspi1_cs: ecspi1cs {
+ fsl,pins = <
+ MX6QDL_PAD_EIM_D19__GPIO3_IO19 0x80000000
+ >;
+ };
++
++ pinctrl_enet: enetgrp {
++ fsl,pins = <
++ MX6QDL_PAD_KEY_COL1__ENET_MDIO 0x1b0b0
++ MX6QDL_PAD_KEY_COL2__ENET_MDC 0x1b0b0
++ MX6QDL_PAD_RGMII_TXC__RGMII_TXC 0x1b0b0
++ MX6QDL_PAD_RGMII_TD0__RGMII_TD0 0x1b0b0
++ MX6QDL_PAD_RGMII_TD1__RGMII_TD1 0x1b0b0
++ MX6QDL_PAD_RGMII_TD2__RGMII_TD2 0x1b0b0
++ MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x1b0b0
++ MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x1b0b0
++ MX6QDL_PAD_ENET_REF_CLK__ENET_TX_CLK 0x1b0b0
++ MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b0b0
++ MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x1b0b0
++ MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x1b0b0
++ MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b0b0
++ MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b0b0
++ MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b0b0
++ MX6QDL_PAD_GPIO_6__ENET_IRQ 0x000b1
++ >;
++ };
++
++ pinctrl_gpio_leds: gpioledsgrp {
++ fsl,pins = <
++ MX6QDL_PAD_DISP0_DAT21__GPIO5_IO15 0x80000000
++ >;
++ };
++
++ pinctrl_gpmi_nand: gpminandgrp {
++ fsl,pins = <
++ MX6QDL_PAD_NANDF_CLE__NAND_CLE 0xb0b1
++ MX6QDL_PAD_NANDF_ALE__NAND_ALE 0xb0b1
++ MX6QDL_PAD_NANDF_WP_B__NAND_WP_B 0xb0b1
++ MX6QDL_PAD_NANDF_RB0__NAND_READY_B 0xb000
++ MX6QDL_PAD_NANDF_CS0__NAND_CE0_B 0xb0b1
++ MX6QDL_PAD_NANDF_CS1__NAND_CE1_B 0xb0b1
++ MX6QDL_PAD_SD4_CMD__NAND_RE_B 0xb0b1
++ MX6QDL_PAD_SD4_CLK__NAND_WE_B 0xb0b1
++ MX6QDL_PAD_NANDF_D0__NAND_DATA00 0xb0b1
++ MX6QDL_PAD_NANDF_D1__NAND_DATA01 0xb0b1
++ MX6QDL_PAD_NANDF_D2__NAND_DATA02 0xb0b1
++ MX6QDL_PAD_NANDF_D3__NAND_DATA03 0xb0b1
++ MX6QDL_PAD_NANDF_D4__NAND_DATA04 0xb0b1
++ MX6QDL_PAD_NANDF_D5__NAND_DATA05 0xb0b1
++ MX6QDL_PAD_NANDF_D6__NAND_DATA06 0xb0b1
++ MX6QDL_PAD_NANDF_D7__NAND_DATA07 0xb0b1
++ MX6QDL_PAD_SD4_DAT0__NAND_DQS 0x00b1
++ >;
++ };
++
++ pinctrl_hdmi_cec_2: hdmicecgrp-2 {
++ fsl,pins = <
++ MX6QDL_PAD_KEY_ROW2__HDMI_TX_CEC_LINE 0x1f8b0
++ >;
++ };
++
++ pinctrl_i2c2: i2c2grp {
++ fsl,pins = <
++ MX6QDL_PAD_EIM_EB2__I2C2_SCL 0x4001b8b1
++ MX6QDL_PAD_KEY_ROW3__I2C2_SDA 0x4001b8b1
++ >;
++ };
++
++ pinctrl_i2c3: i2c3grp {
++ fsl,pins = <
++ MX6QDL_PAD_GPIO_3__I2C3_SCL 0x4001b8b1
++ MX6QDL_PAD_GPIO_6__I2C3_SDA 0x4001b8b1
++ >;
++ };
++
++ pinctrl_pwm1: pwm1grp {
++ fsl,pins = <
++ MX6QDL_PAD_SD4_DAT1__PWM3_OUT 0x1b0b1
++ >;
++ };
++
++ pinctrl_spdif: spdifgrp {
++ fsl,pins = <
++ MX6QDL_PAD_KEY_COL3__SPDIF_IN 0x1b0b0
++ >;
++ };
++
++ pinctrl_uart3: uart3grp {
++ fsl,pins = <
++ MX6QDL_PAD_SD4_CLK__UART3_RX_DATA 0x1b0b1
++ MX6QDL_PAD_SD4_CMD__UART3_TX_DATA 0x1b0b1
++ MX6QDL_PAD_EIM_D30__UART3_CTS_B 0x1b0b1
++ MX6QDL_PAD_EIM_EB3__UART3_RTS_B 0x1b0b1
++ >;
++ };
++
++ pinctrl_uart4: uart4grp {
++ fsl,pins = <
++ MX6QDL_PAD_KEY_COL0__UART4_TX_DATA 0x1b0b1
++ MX6QDL_PAD_KEY_ROW0__UART4_RX_DATA 0x1b0b1
++ >;
++ };
++
++ pinctrl_usdhc3: usdhc3grp {
++ fsl,pins = <
++ MX6QDL_PAD_SD3_CMD__SD3_CMD 0x17059
++ MX6QDL_PAD_SD3_CLK__SD3_CLK 0x10059
++ MX6QDL_PAD_SD3_DAT0__SD3_DATA0 0x17059
++ MX6QDL_PAD_SD3_DAT1__SD3_DATA1 0x17059
++ MX6QDL_PAD_SD3_DAT2__SD3_DATA2 0x17059
++ MX6QDL_PAD_SD3_DAT3__SD3_DATA3 0x17059
++ MX6QDL_PAD_SD3_DAT4__SD3_DATA4 0x17059
++ MX6QDL_PAD_SD3_DAT5__SD3_DATA5 0x17059
++ MX6QDL_PAD_SD3_DAT6__SD3_DATA6 0x17059
++ MX6QDL_PAD_SD3_DAT7__SD3_DATA7 0x17059
++ >;
++ };
++
++ pinctrl_usdhc3_100mhz: usdhc3grp100mhz {
++ fsl,pins = <
++ MX6QDL_PAD_SD3_CMD__SD3_CMD 0x170b9
++ MX6QDL_PAD_SD3_CLK__SD3_CLK 0x100b9
++ MX6QDL_PAD_SD3_DAT0__SD3_DATA0 0x170b9
++ MX6QDL_PAD_SD3_DAT1__SD3_DATA1 0x170b9
++ MX6QDL_PAD_SD3_DAT2__SD3_DATA2 0x170b9
++ MX6QDL_PAD_SD3_DAT3__SD3_DATA3 0x170b9
++ MX6QDL_PAD_SD3_DAT4__SD3_DATA4 0x170b9
++ MX6QDL_PAD_SD3_DAT5__SD3_DATA5 0x170b9
++ MX6QDL_PAD_SD3_DAT6__SD3_DATA6 0x170b9
++ MX6QDL_PAD_SD3_DAT7__SD3_DATA7 0x170b9
++ >;
++ };
++
++ pinctrl_usdhc3_200mhz: usdhc3grp200mhz {
++ fsl,pins = <
++ MX6QDL_PAD_SD3_CMD__SD3_CMD 0x170f9
++ MX6QDL_PAD_SD3_CLK__SD3_CLK 0x100f9
++ MX6QDL_PAD_SD3_DAT0__SD3_DATA0 0x170f9
++ MX6QDL_PAD_SD3_DAT1__SD3_DATA1 0x170f9
++ MX6QDL_PAD_SD3_DAT2__SD3_DATA2 0x170f9
++ MX6QDL_PAD_SD3_DAT3__SD3_DATA3 0x170f9
++ MX6QDL_PAD_SD3_DAT4__SD3_DATA4 0x170f9
++ MX6QDL_PAD_SD3_DAT5__SD3_DATA5 0x170f9
++ MX6QDL_PAD_SD3_DAT6__SD3_DATA6 0x170f9
++ MX6QDL_PAD_SD3_DAT7__SD3_DATA7 0x170f9
++ >;
++ };
++
++ pinctrl_weim_cs0: weimcs0grp {
++ fsl,pins = <
++ MX6QDL_PAD_EIM_CS0__EIM_CS0_B 0xb0b1
++ >;
++ };
++
++ pinctrl_weim_nor: weimnorgrp {
++ fsl,pins = <
++ MX6QDL_PAD_EIM_OE__EIM_OE_B 0xb0b1
++ MX6QDL_PAD_EIM_RW__EIM_RW 0xb0b1
++ MX6QDL_PAD_EIM_WAIT__EIM_WAIT_B 0xb060
++ MX6QDL_PAD_EIM_D16__EIM_DATA16 0x1b0b0
++ MX6QDL_PAD_EIM_D17__EIM_DATA17 0x1b0b0
++ MX6QDL_PAD_EIM_D18__EIM_DATA18 0x1b0b0
++ MX6QDL_PAD_EIM_D19__EIM_DATA19 0x1b0b0
++ MX6QDL_PAD_EIM_D20__EIM_DATA20 0x1b0b0
++ MX6QDL_PAD_EIM_D21__EIM_DATA21 0x1b0b0
++ MX6QDL_PAD_EIM_D22__EIM_DATA22 0x1b0b0
++ MX6QDL_PAD_EIM_D23__EIM_DATA23 0x1b0b0
++ MX6QDL_PAD_EIM_D24__EIM_DATA24 0x1b0b0
++ MX6QDL_PAD_EIM_D25__EIM_DATA25 0x1b0b0
++ MX6QDL_PAD_EIM_D26__EIM_DATA26 0x1b0b0
++ MX6QDL_PAD_EIM_D27__EIM_DATA27 0x1b0b0
++ MX6QDL_PAD_EIM_D28__EIM_DATA28 0x1b0b0
++ MX6QDL_PAD_EIM_D29__EIM_DATA29 0x1b0b0
++ MX6QDL_PAD_EIM_D30__EIM_DATA30 0x1b0b0
++ MX6QDL_PAD_EIM_D31__EIM_DATA31 0x1b0b0
++ MX6QDL_PAD_EIM_A23__EIM_ADDR23 0xb0b1
++ MX6QDL_PAD_EIM_A22__EIM_ADDR22 0xb0b1
++ MX6QDL_PAD_EIM_A21__EIM_ADDR21 0xb0b1
++ MX6QDL_PAD_EIM_A20__EIM_ADDR20 0xb0b1
++ MX6QDL_PAD_EIM_A19__EIM_ADDR19 0xb0b1
++ MX6QDL_PAD_EIM_A18__EIM_ADDR18 0xb0b1
++ MX6QDL_PAD_EIM_A17__EIM_ADDR17 0xb0b1
++ MX6QDL_PAD_EIM_A16__EIM_ADDR16 0xb0b1
++ MX6QDL_PAD_EIM_DA15__EIM_AD15 0xb0b1
++ MX6QDL_PAD_EIM_DA14__EIM_AD14 0xb0b1
++ MX6QDL_PAD_EIM_DA13__EIM_AD13 0xb0b1
++ MX6QDL_PAD_EIM_DA12__EIM_AD12 0xb0b1
++ MX6QDL_PAD_EIM_DA11__EIM_AD11 0xb0b1
++ MX6QDL_PAD_EIM_DA10__EIM_AD10 0xb0b1
++ MX6QDL_PAD_EIM_DA9__EIM_AD09 0xb0b1
++ MX6QDL_PAD_EIM_DA8__EIM_AD08 0xb0b1
++ MX6QDL_PAD_EIM_DA7__EIM_AD07 0xb0b1
++ MX6QDL_PAD_EIM_DA6__EIM_AD06 0xb0b1
++ MX6QDL_PAD_EIM_DA5__EIM_AD05 0xb0b1
++ MX6QDL_PAD_EIM_DA4__EIM_AD04 0xb0b1
++ MX6QDL_PAD_EIM_DA3__EIM_AD03 0xb0b1
++ MX6QDL_PAD_EIM_DA2__EIM_AD02 0xb0b1
++ MX6QDL_PAD_EIM_DA1__EIM_AD01 0xb0b1
++ MX6QDL_PAD_EIM_DA0__EIM_AD00 0xb0b1
++ >;
++ };
++ };
++};
++
++&ldb {
++ status = "okay";
++
++ lvds-channel@0 {
++ fsl,data-mapping = "spwg";
++ fsl,data-width = <18>;
++ status = "okay";
++
++ display-timings {
++ native-mode = <&timing0>;
++ timing0: hsd100pxn1 {
++ clock-frequency = <65000000>;
++ hactive = <1024>;
++ vactive = <768>;
++ hback-porch = <220>;
++ hfront-porch = <40>;
++ vback-porch = <21>;
++ vfront-porch = <7>;
++ hsync-len = <60>;
++ vsync-len = <10>;
++ };
++ };
+ };
+ };
+
++&pcie {
++ status = "okay";
++};
++
++&pwm3 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_pwm1>;
++ status = "okay";
++};
++
++&spdif {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_spdif>;
++ status = "okay";
++};
++
++&uart3 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_uart3>;
++ pinctrl-assert-gpios = <&max7310_b 4 GPIO_ACTIVE_HIGH>, /* CTS */
++ <&max7310_c 3 GPIO_ACTIVE_HIGH>; /* RXD and TXD */
++ fsl,uart-has-rtscts;
++ status = "okay";
++};
++
+ &uart4 {
+ pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_uart4_1>;
++ pinctrl-0 = <&pinctrl_uart4>;
+ status = "okay";
+ };
+
+ &usdhc3 {
+ pinctrl-names = "default", "state_100mhz", "state_200mhz";
+- pinctrl-0 = <&pinctrl_usdhc3_1>;
+- pinctrl-1 = <&pinctrl_usdhc3_1_100mhz>;
+- pinctrl-2 = <&pinctrl_usdhc3_1_200mhz>;
++ pinctrl-0 = <&pinctrl_usdhc3>;
++ pinctrl-1 = <&pinctrl_usdhc3_100mhz>;
++ pinctrl-2 = <&pinctrl_usdhc3_200mhz>;
+ cd-gpios = <&gpio6 15 0>;
+ wp-gpios = <&gpio1 13 0>;
+ status = "okay";
+@@ -86,7 +645,7 @@
+
+ &weim {
+ pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_weim_nor_1 &pinctrl_weim_cs0_1>;
++ pinctrl-0 = <&pinctrl_weim_nor &pinctrl_weim_cs0>;
+ #address-cells = <2>;
+ #size-cells = <1>;
+ ranges = <0 0 0x08000000 0x08000000>;
+@@ -102,3 +661,48 @@
+ 0x0000c000 0x1404a38e 0x00000000>;
+ };
+ };
++
++&ldb {
++ ipu_id = <1>;
++ disp_id = <0>;
++ ext_ref = <1>;
++ mode = "sep0";
++ sec_ipu_id = <1>;
++ sec_disp_id = <1>;
++ status = "okay";
++};
++
++&esai {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_esai1>;
++ status = "okay";
++};
++
++&hdmi_core {
++ ipu_id = <0>;
++ disp_id = <1>;
++ status = "okay";
++};
++
++&hdmi_video {
++ fsl,phy_reg_vlev = <0x0294>;
++ fsl,phy_reg_cksymtx = <0x800d>;
++ status = "okay";
++};
++
++&hdmi_audio {
++ status = "okay";
++};
++
++&hdmi_cec {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_hdmi_cec_2>;
++ status = "okay";
++};
++
++&gpc {
++ fsl,cpu_pupscr_sw2iso = <0xf>;
++ fsl,cpu_pupscr_sw = <0xf>;
++ fsl,cpu_pdnscr_iso2sw = <0x1>;
++ fsl,cpu_pdnscr_iso = <0x1>;
++};
+diff -Nur linux-3.14.36/arch/arm/boot/dts/imx6qdl-sabrelite.dtsi linux-openelec/arch/arm/boot/dts/imx6qdl-sabrelite.dtsi
+--- linux-3.14.36/arch/arm/boot/dts/imx6qdl-sabrelite.dtsi 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/arch/arm/boot/dts/imx6qdl-sabrelite.dtsi 2015-05-06 12:05:43.000000000 -0500
+@@ -0,0 +1,427 @@
++/*
++ * Copyright 2011 Freescale Semiconductor, Inc.
++ * Copyright 2011 Linaro Ltd.
++ *
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++#include <dt-bindings/gpio/gpio.h>
++#include <dt-bindings/input/input.h>
++
++/ {
++ chosen {
++ stdout-path = &uart2;
++ };
++
++ memory {
++ reg = <0x10000000 0x40000000>;
++ };
++
++ regulators {
++ compatible = "simple-bus";
++ #address-cells = <1>;
++ #size-cells = <0>;
++
++ reg_2p5v: regulator@0 {
++ compatible = "regulator-fixed";
++ reg = <0>;
++ regulator-name = "2P5V";
++ regulator-min-microvolt = <2500000>;
++ regulator-max-microvolt = <2500000>;
++ regulator-always-on;
++ };
++
++ reg_3p3v: regulator@1 {
++ compatible = "regulator-fixed";
++ reg = <1>;
++ regulator-name = "3P3V";
++ regulator-min-microvolt = <3300000>;
++ regulator-max-microvolt = <3300000>;
++ regulator-always-on;
++ };
++
++ reg_usb_otg_vbus: regulator@2 {
++ compatible = "regulator-fixed";
++ reg = <2>;
++ regulator-name = "usb_otg_vbus";
++ regulator-min-microvolt = <5000000>;
++ regulator-max-microvolt = <5000000>;
++ gpio = <&gpio3 22 0>;
++ enable-active-high;
++ };
++ };
++
++ gpio-keys {
++ compatible = "gpio-keys";
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_gpio_keys>;
++
++ power {
++ label = "Power Button";
++ gpios = <&gpio2 3 GPIO_ACTIVE_LOW>;
++ linux,code = <KEY_POWER>;
++ gpio-key,wakeup;
++ };
++
++ menu {
++ label = "Menu";
++ gpios = <&gpio2 1 GPIO_ACTIVE_LOW>;
++ linux,code = <KEY_MENU>;
++ };
++
++ home {
++ label = "Home";
++ gpios = <&gpio2 4 GPIO_ACTIVE_LOW>;
++ linux,code = <KEY_HOME>;
++ };
++
++ back {
++ label = "Back";
++ gpios = <&gpio2 2 GPIO_ACTIVE_LOW>;
++ linux,code = <KEY_BACK>;
++ };
++
++ volume-up {
++ label = "Volume Up";
++ gpios = <&gpio7 13 GPIO_ACTIVE_LOW>;
++ linux,code = <KEY_VOLUMEUP>;
++ };
++
++ volume-down {
++ label = "Volume Down";
++ gpios = <&gpio4 5 GPIO_ACTIVE_LOW>;
++ linux,code = <KEY_VOLUMEDOWN>;
++ };
++ };
++
++ sound {
++ compatible = "fsl,imx6q-sabrelite-sgtl5000",
++ "fsl,imx-audio-sgtl5000";
++ model = "imx6q-sabrelite-sgtl5000";
++ ssi-controller = <&ssi1>;
++ audio-codec = <&codec>;
++ audio-routing =
++ "MIC_IN", "Mic Jack",
++ "Mic Jack", "Mic Bias",
++ "Headphone Jack", "HP_OUT";
++ mux-int-port = <1>;
++ mux-ext-port = <4>;
++ };
++
++ backlight_lcd {
++ compatible = "pwm-backlight";
++ pwms = <&pwm1 0 5000000>;
++ brightness-levels = <0 4 8 16 32 64 128 255>;
++ default-brightness-level = <7>;
++ power-supply = <&reg_3p3v>;
++ status = "okay";
++ };
++
++ backlight_lvds {
++ compatible = "pwm-backlight";
++ pwms = <&pwm4 0 5000000>;
++ brightness-levels = <0 4 8 16 32 64 128 255>;
++ default-brightness-level = <7>;
++ power-supply = <&reg_3p3v>;
++ status = "okay";
++ };
++};
++
++&audmux {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_audmux>;
++ status = "okay";
++};
++
++&ecspi1 {
++ fsl,spi-num-chipselects = <1>;
++ cs-gpios = <&gpio3 19 0>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_ecspi1>;
++ status = "okay";
++
++ flash: m25p80@0 {
++ compatible = "sst,sst25vf016b";
++ spi-max-frequency = <20000000>;
++ reg = <0>;
++ };
++};
++
++&fec {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_enet>;
++ phy-mode = "rgmii";
++ phy-reset-gpios = <&gpio3 23 GPIO_ACTIVE_LOW>;
++ txen-skew-ps = <0>;
++ txc-skew-ps = <3000>;
++ rxdv-skew-ps = <0>;
++ rxc-skew-ps = <3000>;
++ rxd0-skew-ps = <0>;
++ rxd1-skew-ps = <0>;
++ rxd2-skew-ps = <0>;
++ rxd3-skew-ps = <0>;
++ txd0-skew-ps = <0>;
++ txd1-skew-ps = <0>;
++ txd2-skew-ps = <0>;
++ txd3-skew-ps = <0>;
++ interrupts-extended = <&gpio1 6 IRQ_TYPE_LEVEL_HIGH>,
++ <&intc 0 119 IRQ_TYPE_LEVEL_HIGH>;
++ status = "okay";
++};
++
++&i2c1 {
++ clock-frequency = <100000>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_i2c1>;
++ status = "okay";
++
++ codec: sgtl5000@0a {
++ compatible = "fsl,sgtl5000";
++ reg = <0x0a>;
++ clocks = <&clks 201>;
++ VDDA-supply = <&reg_2p5v>;
++ VDDIO-supply = <&reg_3p3v>;
++ };
++};
++
++&iomuxc {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_hog>;
++
++ imx6q-sabrelite {
++ pinctrl_hog: hoggrp {
++ fsl,pins = <
++ /* SGTL5000 sys_mclk */
++ MX6QDL_PAD_GPIO_0__CCM_CLKO1 0x030b0
++ >;
++ };
++
++ pinctrl_audmux: audmuxgrp {
++ fsl,pins = <
++ MX6QDL_PAD_SD2_DAT0__AUD4_RXD 0x130b0
++ MX6QDL_PAD_SD2_DAT3__AUD4_TXC 0x130b0
++ MX6QDL_PAD_SD2_DAT2__AUD4_TXD 0x110b0
++ MX6QDL_PAD_SD2_DAT1__AUD4_TXFS 0x130b0
++ >;
++ };
++
++ pinctrl_ecspi1: ecspi1grp {
++ fsl,pins = <
++ MX6QDL_PAD_EIM_D17__ECSPI1_MISO 0x100b1
++ MX6QDL_PAD_EIM_D18__ECSPI1_MOSI 0x100b1
++ MX6QDL_PAD_EIM_D16__ECSPI1_SCLK 0x100b1
++ MX6QDL_PAD_EIM_D19__GPIO3_IO19 0x000b1 /* CS */
++ >;
++ };
++
++ pinctrl_enet: enetgrp {
++ fsl,pins = <
++ MX6QDL_PAD_ENET_MDIO__ENET_MDIO 0x100b0
++ MX6QDL_PAD_ENET_MDC__ENET_MDC 0x100b0
++ MX6QDL_PAD_RGMII_TXC__RGMII_TXC 0x100b0
++ MX6QDL_PAD_RGMII_TD0__RGMII_TD0 0x100b0
++ MX6QDL_PAD_RGMII_TD1__RGMII_TD1 0x100b0
++ MX6QDL_PAD_RGMII_TD2__RGMII_TD2 0x100b0
++ MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x100b0
++ MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x100b0
++ MX6QDL_PAD_ENET_REF_CLK__ENET_TX_CLK 0x100b0
++ MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b0b0
++ MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x1b0b0
++ MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x1b0b0
++ MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b0b0
++ MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b0b0
++ MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b0b0
++ /* Phy reset */
++ MX6QDL_PAD_EIM_D23__GPIO3_IO23 0x000b0
++ MX6QDL_PAD_GPIO_6__ENET_IRQ 0x000b1
++ >;
++ };
++
++ pinctrl_gpio_keys: gpio_keysgrp {
++ fsl,pins = <
++ /* Power Button */
++ MX6QDL_PAD_NANDF_D3__GPIO2_IO03 0x1b0b0
++ /* Menu Button */
++ MX6QDL_PAD_NANDF_D1__GPIO2_IO01 0x1b0b0
++ /* Home Button */
++ MX6QDL_PAD_NANDF_D4__GPIO2_IO04 0x1b0b0
++ /* Back Button */
++ MX6QDL_PAD_NANDF_D2__GPIO2_IO02 0x1b0b0
++ /* Volume Up Button */
++ MX6QDL_PAD_GPIO_18__GPIO7_IO13 0x1b0b0
++ /* Volume Down Button */
++ MX6QDL_PAD_GPIO_19__GPIO4_IO05 0x1b0b0
++ >;
++ };
++
++ pinctrl_i2c1: i2c1grp {
++ fsl,pins = <
++ MX6QDL_PAD_EIM_D21__I2C1_SCL 0x4001b8b1
++ MX6QDL_PAD_EIM_D28__I2C1_SDA 0x4001b8b1
++ >;
++ };
++
++ pinctrl_pwm1: pwm1grp {
++ fsl,pins = <
++ MX6QDL_PAD_SD1_DAT3__PWM1_OUT 0x1b0b1
++ >;
++ };
++
++ pinctrl_pwm3: pwm3grp {
++ fsl,pins = <
++ MX6QDL_PAD_SD1_DAT1__PWM3_OUT 0x1b0b1
++ >;
++ };
++
++ pinctrl_pwm4: pwm4grp {
++ fsl,pins = <
++ MX6QDL_PAD_SD1_CMD__PWM4_OUT 0x1b0b1
++ >;
++ };
++
++ pinctrl_uart1: uart1grp {
++ fsl,pins = <
++ MX6QDL_PAD_SD3_DAT7__UART1_TX_DATA 0x1b0b1
++ MX6QDL_PAD_SD3_DAT6__UART1_RX_DATA 0x1b0b1
++ >;
++ };
++
++ pinctrl_uart2: uart2grp {
++ fsl,pins = <
++ MX6QDL_PAD_EIM_D26__UART2_TX_DATA 0x1b0b1
++ MX6QDL_PAD_EIM_D27__UART2_RX_DATA 0x1b0b1
++ >;
++ };
++
++ pinctrl_usbotg: usbotggrp {
++ fsl,pins = <
++ MX6QDL_PAD_GPIO_1__USB_OTG_ID 0x17059
++ MX6QDL_PAD_KEY_COL4__USB_OTG_OC 0x1b0b0
++ /* power enable, high active */
++ MX6QDL_PAD_EIM_D22__GPIO3_IO22 0x000b0
++ >;
++ };
++
++ pinctrl_usdhc3: usdhc3grp {
++ fsl,pins = <
++ MX6QDL_PAD_SD3_CMD__SD3_CMD 0x17059
++ MX6QDL_PAD_SD3_CLK__SD3_CLK 0x10059
++ MX6QDL_PAD_SD3_DAT0__SD3_DATA0 0x17059
++ MX6QDL_PAD_SD3_DAT1__SD3_DATA1 0x17059
++ MX6QDL_PAD_SD3_DAT2__SD3_DATA2 0x17059
++ MX6QDL_PAD_SD3_DAT3__SD3_DATA3 0x17059
++ MX6QDL_PAD_SD3_DAT5__GPIO7_IO00 0x1b0b0 /* CD */
++ MX6QDL_PAD_SD3_DAT4__GPIO7_IO01 0x1f0b0 /* WP */
++ >;
++ };
++
++ pinctrl_usdhc4: usdhc4grp {
++ fsl,pins = <
++ MX6QDL_PAD_SD4_CMD__SD4_CMD 0x17059
++ MX6QDL_PAD_SD4_CLK__SD4_CLK 0x10059
++ MX6QDL_PAD_SD4_DAT0__SD4_DATA0 0x17059
++ MX6QDL_PAD_SD4_DAT1__SD4_DATA1 0x17059
++ MX6QDL_PAD_SD4_DAT2__SD4_DATA2 0x17059
++ MX6QDL_PAD_SD4_DAT3__SD4_DATA3 0x17059
++ MX6QDL_PAD_NANDF_D6__GPIO2_IO06 0x1b0b0 /* CD */
++ >;
++ };
++ };
++};
++
++&ldb {
++ status = "okay";
++
++ lvds-channel@0 {
++ fsl,data-mapping = "spwg";
++ fsl,data-width = <18>;
++ status = "okay";
++
++ display-timings {
++ native-mode = <&timing0>;
++ timing0: hsd100pxn1 {
++ clock-frequency = <65000000>;
++ hactive = <1024>;
++ vactive = <768>;
++ hback-porch = <220>;
++ hfront-porch = <40>;
++ vback-porch = <21>;
++ vfront-porch = <7>;
++ hsync-len = <60>;
++ vsync-len = <10>;
++ };
++ };
++ };
++};
++
++&pcie {
++ status = "okay";
++};
++
++&pwm1 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_pwm1>;
++ status = "okay";
++};
++
++&pwm3 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_pwm3>;
++ status = "okay";
++};
++
++&pwm4 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_pwm4>;
++ status = "okay";
++};
++
++&ssi1 {
++ fsl,mode = "i2s-slave";
++ status = "okay";
++};
++
++&uart1 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_uart1>;
++ status = "okay";
++};
++
++&uart2 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_uart2>;
++ status = "okay";
++};
++
++&usbh1 {
++ status = "okay";
++};
++
++&usbotg {
++ vbus-supply = <&reg_usb_otg_vbus>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_usbotg>;
++ disable-over-current;
++ status = "okay";
++};
++
++&usdhc3 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_usdhc3>;
++ cd-gpios = <&gpio7 0 0>;
++ wp-gpios = <&gpio7 1 0>;
++ vmmc-supply = <&reg_3p3v>;
++ status = "okay";
++};
++
++&usdhc4 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_usdhc4>;
++ cd-gpios = <&gpio2 6 0>;
++ vmmc-supply = <&reg_3p3v>;
++ status = "okay";
++};
+diff -Nur linux-3.14.36/arch/arm/boot/dts/imx6qdl-sabresd.dtsi linux-openelec/arch/arm/boot/dts/imx6qdl-sabresd.dtsi
+--- linux-3.14.36/arch/arm/boot/dts/imx6qdl-sabresd.dtsi 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/boot/dts/imx6qdl-sabresd.dtsi 2015-05-06 12:05:43.000000000 -0500
+@@ -10,16 +10,33 @@
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
++#include <dt-bindings/gpio/gpio.h>
++#include <dt-bindings/input/input.h>
++
+ / {
++ aliases {
++ mxcfb0 = &mxcfb1;
++ mxcfb1 = &mxcfb2;
++ mxcfb2 = &mxcfb3;
++ mxcfb3 = &mxcfb4;
++ };
++
++ chosen {
++ stdout-path = &uart1;
++ };
++
+ memory {
+ reg = <0x10000000 0x40000000>;
+ };
+
+ regulators {
+ compatible = "simple-bus";
++ #address-cells = <1>;
++ #size-cells = <0>;
+
+- reg_usb_otg_vbus: usb_otg_vbus {
++ reg_usb_otg_vbus: regulator@0 {
+ compatible = "regulator-fixed";
++ reg = <0>;
+ regulator-name = "usb_otg_vbus";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+@@ -27,8 +44,9 @@
+ enable-active-high;
+ };
+
+- reg_usb_h1_vbus: usb_h1_vbus {
++ reg_usb_h1_vbus: regulator@1 {
+ compatible = "regulator-fixed";
++ reg = <1>;
+ regulator-name = "usb_h1_vbus";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+@@ -36,29 +54,46 @@
+ enable-active-high;
+ };
+
+- reg_audio: wm8962_supply {
++ reg_audio: regulator@2 {
+ compatible = "regulator-fixed";
++ reg = <2>;
+ regulator-name = "wm8962-supply";
+ gpio = <&gpio4 10 0>;
+ enable-active-high;
+ };
++
++ reg_mipi_dsi_pwr_on: mipi_dsi_pwr_on {
++ compatible = "regulator-fixed";
++ regulator-name = "mipi_dsi_pwr_on";
++ gpio = <&gpio6 14 0>;
++ enable-active-high;
++ };
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_gpio_keys>;
++
++ power {
++ label = "Power Button";
++ gpios = <&gpio3 29 GPIO_ACTIVE_LOW>;
++ gpio-key,wakeup;
++ linux,code = <KEY_POWER>;
++ };
+
+ volume-up {
+ label = "Volume Up";
+- gpios = <&gpio1 4 0>;
++ gpios = <&gpio1 4 GPIO_ACTIVE_LOW>;
+ gpio-key,wakeup;
+- linux,code = <115>; /* KEY_VOLUMEUP */
++ linux,code = <KEY_VOLUMEUP>;
+ };
+
+ volume-down {
+ label = "Volume Down";
+- gpios = <&gpio1 5 0>;
++ gpios = <&gpio1 5 GPIO_ACTIVE_LOW>;
+ gpio-key,wakeup;
+- linux,code = <114>; /* KEY_VOLUMEDOWN */
++ linux,code = <KEY_VOLUMEDOWN>;
+ };
+ };
+
+@@ -88,11 +123,107 @@
+ default-brightness-level = <7>;
+ status = "okay";
+ };
++
++ leds {
++ compatible = "gpio-leds";
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_gpio_leds>;
++
++ red {
++ gpios = <&gpio1 2 0>;
++ default-state = "on";
++ };
++ };
++
++ sound-hdmi {
++ compatible = "fsl,imx6q-audio-hdmi",
++ "fsl,imx-audio-hdmi";
++ model = "imx-audio-hdmi";
++ hdmi-controller = <&hdmi_audio>;
++ };
++
++ mxcfb1: fb@0 {
++ compatible = "fsl,mxc_sdc_fb";
++ disp_dev = "ldb";
++ interface_pix_fmt = "RGB666";
++ mode_str ="LDB-XGA";
++ default_bpp = <16>;
++ int_clk = <0>;
++ late_init = <0>;
++ status = "disabled";
++ };
++
++ mxcfb2: fb@1 {
++ compatible = "fsl,mxc_sdc_fb";
++ disp_dev = "hdmi";
++ interface_pix_fmt = "RGB24";
++ mode_str ="1920x1080M@60";
++ default_bpp = <24>;
++ int_clk = <0>;
++ late_init = <0>;
++ status = "disabled";
++ };
++
++ mxcfb3: fb@2 {
++ compatible = "fsl,mxc_sdc_fb";
++ disp_dev = "lcd";
++ interface_pix_fmt = "RGB565";
++ mode_str ="CLAA-WVGA";
++ default_bpp = <16>;
++ int_clk = <0>;
++ late_init = <0>;
++ status = "disabled";
++ };
++
++ mxcfb4: fb@3 {
++ compatible = "fsl,mxc_sdc_fb";
++ disp_dev = "ldb";
++ interface_pix_fmt = "RGB666";
++ mode_str ="LDB-XGA";
++ default_bpp = <16>;
++ int_clk = <0>;
++ late_init = <0>;
++ status = "disabled";
++ };
++
++ lcd@0 {
++ compatible = "fsl,lcd";
++ ipu_id = <0>;
++ disp_id = <0>;
++ default_ifmt = "RGB565";
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_ipu1>;
++ status = "okay";
++ };
++
++ backlight {
++ compatible = "pwm-backlight";
++ pwms = <&pwm1 0 5000000>;
++ brightness-levels = <0 4 8 16 32 64 128 255>;
++ default-brightness-level = <7>;
++ };
++
++ v4l2_out {
++ compatible = "fsl,mxc_v4l2_output";
++ status = "okay";
++ };
++
++ lvds_cabc_ctrl {
++ lvds0-gpios = <&gpio6 15 0>;
++ lvds1-gpios = <&gpio6 16 0>;
++ };
++
++ mipi_dsi_reset: mipi-dsi-reset {
++ compatible = "gpio-reset";
++ reset-gpios = <&gpio6 11 GPIO_ACTIVE_LOW>;
++ reset-delay-us = <50>;
++ #reset-cells = <0>;
++ };
+ };
+
+ &audmux {
+ pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_audmux_2>;
++ pinctrl-0 = <&pinctrl_audmux>;
+ status = "okay";
+ };
+
+@@ -100,7 +231,7 @@
+ fsl,spi-num-chipselects = <1>;
+ cs-gpios = <&gpio4 9 0>;
+ pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_ecspi1_2>;
++ pinctrl-0 = <&pinctrl_ecspi1>;
+ status = "okay";
+
+ flash: m25p80@0 {
+@@ -114,7 +245,7 @@
+
+ &fec {
+ pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_enet_1>;
++ pinctrl-0 = <&pinctrl_enet>;
+ phy-mode = "rgmii";
+ phy-reset-gpios = <&gpio1 25 0>;
+ status = "okay";
+@@ -123,7 +254,7 @@
+ &i2c1 {
+ clock-frequency = <100000>;
+ pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_i2c1_2>;
++ pinctrl-0 = <&pinctrl_i2c1>;
+ status = "okay";
+
+ codec: wm8962@1a {
+@@ -149,10 +280,121 @@
+ };
+ };
+
++&i2c2 {
++ clock-frequency = <100000>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_i2c2>;
++ status = "okay";
++
++ hdmi: edid@50 {
++ compatible = "fsl,imx6-hdmi-i2c";
++ reg = <0x50>;
++ };
++
++ pmic: pfuze100@08 {
++ compatible = "fsl,pfuze100";
++ reg = <0x08>;
++
++ regulators {
++ sw1a_reg: sw1ab {
++ regulator-min-microvolt = <300000>;
++ regulator-max-microvolt = <1875000>;
++ regulator-boot-on;
++ regulator-always-on;
++ regulator-ramp-delay = <6250>;
++ };
++
++ sw1c_reg: sw1c {
++ regulator-min-microvolt = <300000>;
++ regulator-max-microvolt = <1875000>;
++ regulator-boot-on;
++ regulator-always-on;
++ regulator-ramp-delay = <6250>;
++ };
++
++ sw2_reg: sw2 {
++ regulator-min-microvolt = <800000>;
++ regulator-max-microvolt = <3300000>;
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ sw3a_reg: sw3a {
++ regulator-min-microvolt = <400000>;
++ regulator-max-microvolt = <1975000>;
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ sw3b_reg: sw3b {
++ regulator-min-microvolt = <400000>;
++ regulator-max-microvolt = <1975000>;
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ sw4_reg: sw4 {
++ regulator-min-microvolt = <800000>;
++ regulator-max-microvolt = <3300000>;
++ };
++
++ swbst_reg: swbst {
++ regulator-min-microvolt = <5000000>;
++ regulator-max-microvolt = <5150000>;
++ };
++
++ snvs_reg: vsnvs {
++ regulator-min-microvolt = <1000000>;
++ regulator-max-microvolt = <3000000>;
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ vref_reg: vrefddr {
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ vgen1_reg: vgen1 {
++ regulator-min-microvolt = <800000>;
++ regulator-max-microvolt = <1550000>;
++ };
++
++ vgen2_reg: vgen2 {
++ regulator-min-microvolt = <800000>;
++ regulator-max-microvolt = <1550000>;
++ };
++
++ vgen3_reg: vgen3 {
++ regulator-min-microvolt = <1800000>;
++ regulator-max-microvolt = <3300000>;
++ };
++
++ vgen4_reg: vgen4 {
++ regulator-min-microvolt = <1800000>;
++ regulator-max-microvolt = <3300000>;
++ regulator-always-on;
++ };
++
++ vgen5_reg: vgen5 {
++ regulator-min-microvolt = <1800000>;
++ regulator-max-microvolt = <3300000>;
++ regulator-always-on;
++ };
++
++ vgen6_reg: vgen6 {
++ regulator-min-microvolt = <1800000>;
++ regulator-max-microvolt = <3300000>;
++ regulator-always-on;
++ };
++ };
++ };
++};
++
+ &i2c3 {
+ clock-frequency = <100000>;
+ pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_i2c3_2>;
++ pinctrl-0 = <&pinctrl_i2c3>;
+ status = "okay";
+
+ egalax_ts@04 {
+@@ -168,11 +410,9 @@
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_hog>;
+
+- hog {
++ imx6qdl-sabresd {
+ pinctrl_hog: hoggrp {
+ fsl,pins = <
+- MX6QDL_PAD_GPIO_4__GPIO1_IO04 0x80000000
+- MX6QDL_PAD_GPIO_5__GPIO1_IO05 0x80000000
+ MX6QDL_PAD_NANDF_D0__GPIO2_IO00 0x80000000
+ MX6QDL_PAD_NANDF_D1__GPIO2_IO01 0x80000000
+ MX6QDL_PAD_NANDF_D2__GPIO2_IO02 0x80000000
+@@ -182,6 +422,202 @@
+ MX6QDL_PAD_ENET_TXD1__GPIO1_IO29 0x80000000
+ MX6QDL_PAD_EIM_D22__GPIO3_IO22 0x80000000
+ MX6QDL_PAD_ENET_CRS_DV__GPIO1_IO25 0x80000000
++ MX6QDL_PAD_NANDF_CS2__GPIO6_IO15 0x80000000
++ MX6QDL_PAD_NANDF_CS3__GPIO6_IO16 0x80000000
++ MX6QDL_PAD_NANDF_CS0__GPIO6_IO11 0x80000000
++ MX6QDL_PAD_NANDF_CS1__GPIO6_IO14 0x80000000
++ >;
++ };
++
++ pinctrl_audmux: audmuxgrp {
++ fsl,pins = <
++ MX6QDL_PAD_CSI0_DAT7__AUD3_RXD 0x130b0
++ MX6QDL_PAD_CSI0_DAT4__AUD3_TXC 0x130b0
++ MX6QDL_PAD_CSI0_DAT5__AUD3_TXD 0x110b0
++ MX6QDL_PAD_CSI0_DAT6__AUD3_TXFS 0x130b0
++ >;
++ };
++
++ pinctrl_ecspi1: ecspi1grp {
++ fsl,pins = <
++ MX6QDL_PAD_KEY_COL1__ECSPI1_MISO 0x100b1
++ MX6QDL_PAD_KEY_ROW0__ECSPI1_MOSI 0x100b1
++ MX6QDL_PAD_KEY_COL0__ECSPI1_SCLK 0x100b1
++ >;
++ };
++
++ pinctrl_enet: enetgrp {
++ fsl,pins = <
++ MX6QDL_PAD_ENET_MDIO__ENET_MDIO 0x1b0b0
++ MX6QDL_PAD_ENET_MDC__ENET_MDC 0x1b0b0
++ MX6QDL_PAD_RGMII_TXC__RGMII_TXC 0x1b0b0
++ MX6QDL_PAD_RGMII_TD0__RGMII_TD0 0x1b0b0
++ MX6QDL_PAD_RGMII_TD1__RGMII_TD1 0x1b0b0
++ MX6QDL_PAD_RGMII_TD2__RGMII_TD2 0x1b0b0
++ MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x1b0b0
++ MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x1b0b0
++ MX6QDL_PAD_ENET_REF_CLK__ENET_TX_CLK 0x1b0b0
++ MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b0b0
++ MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x1b0b0
++ MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x1b0b0
++ MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b0b0
++ MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b0b0
++ MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b0b0
++ MX6QDL_PAD_GPIO_16__ENET_REF_CLK 0x4001b0a8
++ >;
++ };
++
++ pinctrl_gpio_keys: gpio_keysgrp {
++ fsl,pins = <
++ MX6QDL_PAD_EIM_D29__GPIO3_IO29 0x80000000
++ MX6QDL_PAD_GPIO_4__GPIO1_IO04 0x80000000
++ MX6QDL_PAD_GPIO_5__GPIO1_IO05 0x80000000
++ >;
++ };
++
++ pinctrl_hdmi_cec: hdmi_cecgrp {
++ fsl,pins = <
++ MX6QDL_PAD_KEY_ROW2__HDMI_TX_CEC_LINE 0x1f8b0
++ >;
++ };
++
++ pinctrl_hdmi_hdcp: hdmi_hdcpgrp {
++ fsl,pins = <
++ MX6QDL_PAD_KEY_COL3__HDMI_TX_DDC_SCL 0x4001b8b1
++ MX6QDL_PAD_KEY_ROW3__HDMI_TX_DDC_SDA 0x4001b8b1
++ >;
++ };
++
++ pinctrl_i2c1: i2c1grp {
++ fsl,pins = <
++ MX6QDL_PAD_CSI0_DAT8__I2C1_SDA 0x4001b8b1
++ MX6QDL_PAD_CSI0_DAT9__I2C1_SCL 0x4001b8b1
++ >;
++ };
++
++ pinctrl_i2c2: i2c2grp {
++ fsl,pins = <
++ MX6QDL_PAD_KEY_COL3__I2C2_SCL 0x4001b8b1
++ MX6QDL_PAD_KEY_ROW3__I2C2_SDA 0x4001b8b1
++ >;
++ };
++
++ pinctrl_i2c3: i2c3grp {
++ fsl,pins = <
++ MX6QDL_PAD_GPIO_3__I2C3_SCL 0x4001b8b1
++ MX6QDL_PAD_GPIO_6__I2C3_SDA 0x4001b8b1
++ >;
++ };
++
++ pinctrl_ipu1: ipu1grp {
++ fsl,pins = <
++ MX6QDL_PAD_DI0_DISP_CLK__IPU1_DI0_DISP_CLK 0x10
++ MX6QDL_PAD_DI0_PIN15__IPU1_DI0_PIN15 0x10
++ MX6QDL_PAD_DI0_PIN2__IPU1_DI0_PIN02 0x10
++ MX6QDL_PAD_DI0_PIN3__IPU1_DI0_PIN03 0x10
++ MX6QDL_PAD_DI0_PIN4__IPU1_DI0_PIN04 0x80000000
++ MX6QDL_PAD_DISP0_DAT0__IPU1_DISP0_DATA00 0x10
++ MX6QDL_PAD_DISP0_DAT1__IPU1_DISP0_DATA01 0x10
++ MX6QDL_PAD_DISP0_DAT2__IPU1_DISP0_DATA02 0x10
++ MX6QDL_PAD_DISP0_DAT3__IPU1_DISP0_DATA03 0x10
++ MX6QDL_PAD_DISP0_DAT4__IPU1_DISP0_DATA04 0x10
++ MX6QDL_PAD_DISP0_DAT5__IPU1_DISP0_DATA05 0x10
++ MX6QDL_PAD_DISP0_DAT6__IPU1_DISP0_DATA06 0x10
++ MX6QDL_PAD_DISP0_DAT7__IPU1_DISP0_DATA07 0x10
++ MX6QDL_PAD_DISP0_DAT8__IPU1_DISP0_DATA08 0x10
++ MX6QDL_PAD_DISP0_DAT9__IPU1_DISP0_DATA09 0x10
++ MX6QDL_PAD_DISP0_DAT10__IPU1_DISP0_DATA10 0x10
++ MX6QDL_PAD_DISP0_DAT11__IPU1_DISP0_DATA11 0x10
++ MX6QDL_PAD_DISP0_DAT12__IPU1_DISP0_DATA12 0x10
++ MX6QDL_PAD_DISP0_DAT13__IPU1_DISP0_DATA13 0x10
++ MX6QDL_PAD_DISP0_DAT14__IPU1_DISP0_DATA14 0x10
++ MX6QDL_PAD_DISP0_DAT15__IPU1_DISP0_DATA15 0x10
++ MX6QDL_PAD_DISP0_DAT16__IPU1_DISP0_DATA16 0x10
++ MX6QDL_PAD_DISP0_DAT17__IPU1_DISP0_DATA17 0x10
++ MX6QDL_PAD_DISP0_DAT18__IPU1_DISP0_DATA18 0x10
++ MX6QDL_PAD_DISP0_DAT19__IPU1_DISP0_DATA19 0x10
++ MX6QDL_PAD_DISP0_DAT20__IPU1_DISP0_DATA20 0x10
++ MX6QDL_PAD_DISP0_DAT21__IPU1_DISP0_DATA21 0x10
++ MX6QDL_PAD_DISP0_DAT22__IPU1_DISP0_DATA22 0x10
++ MX6QDL_PAD_DISP0_DAT23__IPU1_DISP0_DATA23 0x10
++ >;
++ };
++
++ pinctrl_pcie: pciegrp {
++ fsl,pins = <
++ MX6QDL_PAD_GPIO_17__GPIO7_IO12 0x80000000
++ >;
++ };
++
++ pinctrl_pwm1: pwm1grp {
++ fsl,pins = <
++ MX6QDL_PAD_SD1_DAT3__PWM1_OUT 0x1b0b1
++ >;
++ };
++
++ pinctrl_uart1: uart1grp {
++ fsl,pins = <
++ MX6QDL_PAD_CSI0_DAT10__UART1_TX_DATA 0x1b0b1
++ MX6QDL_PAD_CSI0_DAT11__UART1_RX_DATA 0x1b0b1
++ >;
++ };
++
++ pinctrl_usbotg: usbotggrp {
++ fsl,pins = <
++ MX6QDL_PAD_ENET_RX_ER__USB_OTG_ID 0x17059
++ >;
++ };
++
++ pinctrl_usdhc2: usdhc2grp {
++ fsl,pins = <
++ MX6QDL_PAD_SD2_CMD__SD2_CMD 0x17059
++ MX6QDL_PAD_SD2_CLK__SD2_CLK 0x10059
++ MX6QDL_PAD_SD2_DAT0__SD2_DATA0 0x17059
++ MX6QDL_PAD_SD2_DAT1__SD2_DATA1 0x17059
++ MX6QDL_PAD_SD2_DAT2__SD2_DATA2 0x17059
++ MX6QDL_PAD_SD2_DAT3__SD2_DATA3 0x17059
++ MX6QDL_PAD_NANDF_D4__SD2_DATA4 0x17059
++ MX6QDL_PAD_NANDF_D5__SD2_DATA5 0x17059
++ MX6QDL_PAD_NANDF_D6__SD2_DATA6 0x17059
++ MX6QDL_PAD_NANDF_D7__SD2_DATA7 0x17059
++ >;
++ };
++
++ pinctrl_usdhc3: usdhc3grp {
++ fsl,pins = <
++ MX6QDL_PAD_SD3_CMD__SD3_CMD 0x17059
++ MX6QDL_PAD_SD3_CLK__SD3_CLK 0x10059
++ MX6QDL_PAD_SD3_DAT0__SD3_DATA0 0x17059
++ MX6QDL_PAD_SD3_DAT1__SD3_DATA1 0x17059
++ MX6QDL_PAD_SD3_DAT2__SD3_DATA2 0x17059
++ MX6QDL_PAD_SD3_DAT3__SD3_DATA3 0x17059
++ MX6QDL_PAD_SD3_DAT4__SD3_DATA4 0x17059
++ MX6QDL_PAD_SD3_DAT5__SD3_DATA5 0x17059
++ MX6QDL_PAD_SD3_DAT6__SD3_DATA6 0x17059
++ MX6QDL_PAD_SD3_DAT7__SD3_DATA7 0x17059
++ >;
++ };
++
++ pinctrl_usdhc4: usdhc4grp {
++ fsl,pins = <
++ MX6QDL_PAD_SD4_CMD__SD4_CMD 0x17059
++ MX6QDL_PAD_SD4_CLK__SD4_CLK 0x10059
++ MX6QDL_PAD_SD4_DAT0__SD4_DATA0 0x17059
++ MX6QDL_PAD_SD4_DAT1__SD4_DATA1 0x17059
++ MX6QDL_PAD_SD4_DAT2__SD4_DATA2 0x17059
++ MX6QDL_PAD_SD4_DAT3__SD4_DATA3 0x17059
++ MX6QDL_PAD_SD4_DAT4__SD4_DATA4 0x17059
++ MX6QDL_PAD_SD4_DAT5__SD4_DATA5 0x17059
++ MX6QDL_PAD_SD4_DAT6__SD4_DATA6 0x17059
++ MX6QDL_PAD_SD4_DAT7__SD4_DATA7 0x17059
++ >;
++ };
++ };
++
++ gpio_leds {
++ pinctrl_gpio_leds: gpioledsgrp {
++ fsl,pins = <
++ MX6QDL_PAD_GPIO_2__GPIO1_IO02 0x80000000
+ >;
+ };
+ };
+@@ -212,9 +648,33 @@
+ };
+ };
+
++&pcie {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_pcie>;
++ reset-gpio = <&gpio7 12 0>;
++ status = "okay";
++};
++
++&pcie {
++ power-on-gpio = <&gpio3 19 0>;
++ reset-gpio = <&gpio7 12 0>;
++ status = "okay";
++};
++
++
+ &pwm1 {
+ pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_pwm0_1>;
++ pinctrl-0 = <&pinctrl_pwm1>;
++ status = "okay";
++};
++
++&ldb {
++ ipu_id = <1>;
++ disp_id = <1>;
++ ext_ref = <1>;
++ mode = "sep1";
++ sec_ipu_id = <1>;
++ sec_disp_id = <0>;
+ status = "okay";
+ };
+
+@@ -225,7 +685,16 @@
+
+ &uart1 {
+ pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_uart1_1>;
++ pinctrl-0 = <&pinctrl_uart1>;
++ status = "okay";
++};
++
++&mipi_dsi {
++ dev_id = <0>;
++ disp_id = <0>;
++ lcd_panel = "TRULY-WVGA";
++ disp-power-on-supply = <&reg_mipi_dsi_pwr_on>;
++ resets = <&mipi_dsi_reset>;
+ status = "okay";
+ };
+
+@@ -237,14 +706,14 @@
+ &usbotg {
+ vbus-supply = <&reg_usb_otg_vbus>;
+ pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_usbotg_2>;
++ pinctrl-0 = <&pinctrl_usbotg>;
+ disable-over-current;
+ status = "okay";
+ };
+
+ &usdhc2 {
+ pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_usdhc2_1>;
++ pinctrl-0 = <&pinctrl_usdhc2>;
+ bus-width = <8>;
+ cd-gpios = <&gpio2 2 0>;
+ wp-gpios = <&gpio2 3 0>;
+@@ -253,9 +722,47 @@
+
+ &usdhc3 {
+ pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_usdhc3_1>;
++ pinctrl-0 = <&pinctrl_usdhc3>;
+ bus-width = <8>;
+ cd-gpios = <&gpio2 0 0>;
+ wp-gpios = <&gpio2 1 0>;
+ status = "okay";
+ };
++
++&usdhc4 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_usdhc4>;
++ bus-width = <8>;
++ non-removable;
++ no-1-8-v;
++ status = "okay";
++};
++
++&hdmi_core {
++ ipu_id = <0>;
++ disp_id = <0>;
++ status = "okay";
++};
++
++&hdmi_video {
++ fsl,phy_reg_vlev = <0x0294>;
++ fsl,phy_reg_cksymtx = <0x800d>;
++ status = "okay";
++};
++
++&hdmi_audio {
++ status = "okay";
++};
++
++&hdmi_cec {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_hdmi_cec>;
++ status = "okay";
++};
++
++&gpc {
++ fsl,cpu_pupscr_sw2iso = <0xf>;
++ fsl,cpu_pupscr_sw = <0xf>;
++ fsl,cpu_pdnscr_iso2sw = <0x1>;
++ fsl,cpu_pdnscr_iso = <0x1>;
++};
+diff -Nur linux-3.14.36/arch/arm/boot/dts/imx6qdl-tbs2910.dtsi linux-openelec/arch/arm/boot/dts/imx6qdl-tbs2910.dtsi
+--- linux-3.14.36/arch/arm/boot/dts/imx6qdl-tbs2910.dtsi 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/arch/arm/boot/dts/imx6qdl-tbs2910.dtsi 2015-07-24 18:03:30.204842002 -0500
+@@ -0,0 +1,800 @@
++/*
++ * Copyright 2012 Freescale Semiconductor, Inc.
++ * Copyright 2011 Linaro Ltd.
++ *
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++#include <dt-bindings/gpio/gpio.h>
++#include <dt-bindings/input/input.h>
++
++/ {
++ aliases {
++ mxcfb0 = &mxcfb1;
++ mxcfb1 = &mxcfb2;
++ mxcfb2 = &mxcfb3;
++ mxcfb3 = &mxcfb4;
++ };
++
++ ir_recv: ir-receiver {
++ compatible = "gpio-ir-receiver";
++ gpios = <&gpio3 18 1>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_sabresd_ir>;
++ linux,rc-map-name = "rc-rc6-mce";
++ };
++
++ chosen {
++ stdout-path = &uart1;
++ };
++
++ memory {
++ reg = <0x10000000 0x40000000>;
++ };
++
++ regulators {
++ compatible = "simple-bus";
++ #address-cells = <1>;
++ #size-cells = <0>;
++
++ reg_usb_otg_vbus: regulator@0 {
++ compatible = "regulator-fixed";
++ reg = <0>;
++ regulator-name = "usb_otg_vbus";
++ regulator-min-microvolt = <5000000>;
++ regulator-max-microvolt = <5000000>;
++ gpio = <&gpio3 22 0>;
++ enable-active-high;
++ };
++
++ reg_usb_h1_vbus: regulator@1 {
++ compatible = "regulator-fixed";
++ reg = <1>;
++ regulator-name = "usb_h1_vbus";
++ regulator-min-microvolt = <5000000>;
++ regulator-max-microvolt = <5000000>;
++ gpio = <&gpio1 29 0>;
++ enable-active-high;
++ };
++
++ reg_audio: regulator@2 {
++ compatible = "regulator-fixed";
++ reg = <2>;
++ regulator-name = "sgtl5000-supply";
++ regulator-min-microvolt = <3300000>;
++ regulator-max-microvolt = <3300000>;
++ regulator-always-on;
++ };
++
++ reg_mipi_dsi_pwr_on: mipi_dsi_pwr_on {
++ compatible = "regulator-fixed";
++ regulator-name = "mipi_dsi_pwr_on";
++ gpio = <&gpio6 14 0>;
++ enable-active-high;
++ };
++ };
++
++ gpio-keys {
++ compatible = "gpio-keys";
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_gpio_keys>;
++
++ power {
++ label = "Power Button";
++ gpios = <&gpio3 29 GPIO_ACTIVE_LOW>;
++ gpio-key,wakeup;
++ linux,code = <KEY_POWER>;
++ };
++
++ volume-up {
++ label = "Volume Up";
++ gpios = <&gpio1 4 GPIO_ACTIVE_LOW>;
++ gpio-key,wakeup;
++ linux,code = <KEY_VOLUMEUP>;
++ };
++
++ volume-down {
++ label = "Volume Down";
++ gpios = <&gpio1 5 GPIO_ACTIVE_LOW>;
++ gpio-key,wakeup;
++ linux,code = <KEY_VOLUMEDOWN>;
++ };
++ };
++
++ sound {
++ compatible = "fsl,imx-audio-sgtl5000";
++ model = "imx-sgtl5000";
++ ssi-controller = <&ssi2>;
++ audio-codec = <&codec>;
++ audio-routing =
++ "MIC_IN", "Mic Jack",
++ "Mic Jack", "Mic Bias",
++ "Headphone Jack", "HP_OUT";
++ mux-int-port = <2>;
++ mux-ext-port = <3>;
++ };
++
++ backlight {
++ compatible = "pwm-backlight";
++ pwms = <&pwm1 0 5000000>;
++ brightness-levels = <0 4 8 16 32 64 128 255>;
++ default-brightness-level = <7>;
++ status = "okay";
++ };
++
++ leds {
++ compatible = "gpio-leds";
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_gpio_leds>;
++
++ red {
++ gpios = <&gpio1 2 0>;
++ default-state = "on";
++ linux,default-trigger = "heartbeat";
++ };
++
++ fan {
++ gpios = <&gpio3 28 0>;
++ default-state = "off";
++ };
++ };
++
++ sound-spdif {
++ compatible = "fsl,imx-audio-spdif";
++ model = "imx-spdif";
++ spdif-controller = <&spdif>;
++ spdif-out;
++ };
++
++ sound-hdmi {
++ compatible = "fsl,imx6q-audio-hdmi",
++ "fsl,imx-audio-hdmi";
++ model = "imx-audio-hdmi";
++ hdmi-controller = <&hdmi_audio>;
++ };
++
++ mxcfb1: fb@0 {
++ compatible = "fsl,mxc_sdc_fb";
++ disp_dev = "ldb";
++ interface_pix_fmt = "RGB666";
++ mode_str ="LDB-XGA";
++ default_bpp = <16>;
++ int_clk = <0>;
++ late_init = <0>;
++ status = "disabled";
++ };
++
++ mxcfb2: fb@1 {
++ compatible = "fsl,mxc_sdc_fb";
++ disp_dev = "hdmi";
++ interface_pix_fmt = "RGB24";
++ mode_str ="1920x1080M@60";
++ default_bpp = <24>;
++ int_clk = <0>;
++ late_init = <0>;
++ status = "disabled";
++ };
++
++ mxcfb3: fb@2 {
++ compatible = "fsl,mxc_sdc_fb";
++ disp_dev = "lcd";
++ interface_pix_fmt = "RGB565";
++ mode_str ="CLAA-WVGA";
++ default_bpp = <16>;
++ int_clk = <0>;
++ late_init = <0>;
++ status = "disabled";
++ };
++
++ mxcfb4: fb@3 {
++ compatible = "fsl,mxc_sdc_fb";
++ disp_dev = "ldb";
++ interface_pix_fmt = "RGB666";
++ mode_str ="LDB-XGA";
++ default_bpp = <16>;
++ int_clk = <0>;
++ late_init = <0>;
++ status = "disabled";
++ };
++
++ lcd@0 {
++ compatible = "fsl,lcd";
++ ipu_id = <0>;
++ disp_id = <0>;
++ default_ifmt = "RGB565";
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_ipu1>;
++ status = "okay";
++ };
++
++ backlight {
++ compatible = "pwm-backlight";
++ pwms = <&pwm1 0 5000000>;
++ brightness-levels = <0 4 8 16 32 64 128 255>;
++ default-brightness-level = <7>;
++ };
++
++ v4l2_out {
++ compatible = "fsl,mxc_v4l2_output";
++ status = "okay";
++ };
++
++ lvds_cabc_ctrl {
++ lvds0-gpios = <&gpio6 15 0>;
++ lvds1-gpios = <&gpio6 16 0>;
++ };
++
++ mipi_dsi_reset: mipi-dsi-reset {
++ compatible = "gpio-reset";
++ reset-gpios = <&gpio6 11 GPIO_ACTIVE_LOW>;
++ reset-delay-us = <50>;
++ #reset-cells = <0>;
++ };
++};
++
++&audmux {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_audmux>;
++ status = "okay";
++};
++
++&ecspi1 {
++ fsl,spi-num-chipselects = <1>;
++ cs-gpios = <&gpio4 9 0>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_ecspi1>;
++ status = "okay";
++
++ flash: m25p80@0 {
++ #address-cells = <1>;
++ #size-cells = <1>;
++ compatible = "st,m25p32";
++ spi-max-frequency = <20000000>;
++ reg = <0>;
++ };
++};
++
++&fec {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_enet>;
++ phy-mode = "rgmii";
++ phy-reset-gpios = <&gpio1 25 0>;
++ status = "okay";
++};
++
++&i2c1 {
++ clock-frequency = <100000>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_i2c1>;
++ status = "okay";
++
++ codec: sgtl5000@0a {
++ clocks = <&clks 201>;
++ compatible = "fsl,sgtl5000";
++ reg = <0x0a>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_sgtl5000>;
++ VDDA-supply = <&reg_audio>;
++ VDDIO-supply = <&reg_audio>;
++ };
++};
++
++&i2c2 {
++ clock-frequency = <100000>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_i2c2>;
++ status = "okay";
++
++ hdmi: edid@50 {
++ compatible = "fsl,imx6-hdmi-i2c";
++ reg = <0x50>;
++ };
++
++ pmic: pfuze100@08 {
++ compatible = "fsl,pfuze100";
++ reg = <0x08>;
++
++ regulators {
++ sw1a_reg: sw1ab {
++ regulator-min-microvolt = <300000>;
++ regulator-max-microvolt = <1875000>;
++ regulator-boot-on;
++ regulator-always-on;
++ regulator-ramp-delay = <6250>;
++ };
++
++ sw1c_reg: sw1c {
++ regulator-min-microvolt = <300000>;
++ regulator-max-microvolt = <1875000>;
++ regulator-boot-on;
++ regulator-always-on;
++ regulator-ramp-delay = <6250>;
++ };
++
++ sw2_reg: sw2 {
++ regulator-min-microvolt = <800000>;
++ regulator-max-microvolt = <3300000>;
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ sw3a_reg: sw3a {
++ regulator-min-microvolt = <400000>;
++ regulator-max-microvolt = <1975000>;
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ sw3b_reg: sw3b {
++ regulator-min-microvolt = <400000>;
++ regulator-max-microvolt = <1975000>;
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ sw4_reg: sw4 {
++ regulator-min-microvolt = <800000>;
++ regulator-max-microvolt = <3300000>;
++ };
++
++ swbst_reg: swbst {
++ regulator-min-microvolt = <5000000>;
++ regulator-max-microvolt = <5150000>;
++ };
++
++ snvs_reg: vsnvs {
++ regulator-min-microvolt = <1000000>;
++ regulator-max-microvolt = <3000000>;
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ vref_reg: vrefddr {
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ vgen1_reg: vgen1 {
++ regulator-min-microvolt = <800000>;
++ regulator-max-microvolt = <1550000>;
++ };
++
++ vgen2_reg: vgen2 {
++ regulator-min-microvolt = <800000>;
++ regulator-max-microvolt = <1550000>;
++ };
++
++ vgen3_reg: vgen3 {
++ regulator-min-microvolt = <1800000>;
++ regulator-max-microvolt = <3300000>;
++ };
++
++ vgen4_reg: vgen4 {
++ regulator-min-microvolt = <1800000>;
++ regulator-max-microvolt = <3300000>;
++ regulator-always-on;
++ };
++
++ vgen5_reg: vgen5 {
++ regulator-min-microvolt = <1800000>;
++ regulator-max-microvolt = <3300000>;
++ regulator-always-on;
++ };
++
++ vgen6_reg: vgen6 {
++ regulator-min-microvolt = <1800000>;
++ regulator-max-microvolt = <3300000>;
++ regulator-always-on;
++ };
++ };
++ };
++};
++
++&i2c3 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_i2c3>;
++ status = "okay";
++
++ rtc: rtc@68 {
++ compatible = "dallas,ds1307";
++ reg = <0x68>;
++ };
++};
++
++&iomuxc {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_hog>;
++
++ imx6qdl-sabresd {
++ pinctrl_hog: hoggrp {
++ fsl,pins = <
++ MX6QDL_PAD_NANDF_D0__GPIO2_IO00 0x80000000
++ MX6QDL_PAD_NANDF_D1__GPIO2_IO01 0x80000000
++ MX6QDL_PAD_NANDF_D2__GPIO2_IO02 0x80000000
++ MX6QDL_PAD_NANDF_D3__GPIO2_IO03 0x80000000
++ MX6QDL_PAD_NANDF_CLE__GPIO6_IO07 0x80000000
++ MX6QDL_PAD_ENET_TXD1__GPIO1_IO29 0x80000000
++ MX6QDL_PAD_EIM_D22__GPIO3_IO22 0x80000000
++ MX6QDL_PAD_ENET_CRS_DV__GPIO1_IO25 0x80000000
++ MX6QDL_PAD_NANDF_CS2__GPIO6_IO15 0x80000000
++ MX6QDL_PAD_NANDF_CS3__GPIO6_IO16 0x80000000
++ MX6QDL_PAD_NANDF_CS0__GPIO6_IO11 0x80000000
++ MX6QDL_PAD_NANDF_CS1__GPIO6_IO14 0x80000000
++ >;
++ };
++
++ pinctrl_sgtl5000: sgtl5000grp {
++ fsl,pins = <
++ MX6QDL_PAD_GPIO_0__CCM_CLKO1 0x130b0
++ >;
++ };
++
++ pinctrl_audmux: audmuxgrp {
++ fsl,pins = <
++ MX6QDL_PAD_CSI0_DAT7__AUD3_RXD 0x130b0
++ MX6QDL_PAD_CSI0_DAT4__AUD3_TXC 0x130b0
++ MX6QDL_PAD_CSI0_DAT5__AUD3_TXD 0x110b0
++ MX6QDL_PAD_CSI0_DAT6__AUD3_TXFS 0x130b0
++ >;
++ };
++
++ pinctrl_ecspi1: ecspi1grp {
++ fsl,pins = <
++ MX6QDL_PAD_KEY_COL1__ECSPI1_MISO 0x100b1
++ MX6QDL_PAD_KEY_ROW0__ECSPI1_MOSI 0x100b1
++ MX6QDL_PAD_KEY_COL0__ECSPI1_SCLK 0x100b1
++ >;
++ };
++
++ pinctrl_enet: enetgrp {
++ fsl,pins = <
++ MX6QDL_PAD_ENET_MDIO__ENET_MDIO 0x1b0b0
++ MX6QDL_PAD_ENET_MDC__ENET_MDC 0x1b0b0
++ MX6QDL_PAD_RGMII_TXC__RGMII_TXC 0x1b0b0
++ MX6QDL_PAD_RGMII_TD0__RGMII_TD0 0x1b0b0
++ MX6QDL_PAD_RGMII_TD1__RGMII_TD1 0x1b0b0
++ MX6QDL_PAD_RGMII_TD2__RGMII_TD2 0x1b0b0
++ MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x1b0b0
++ MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x1b0b0
++ MX6QDL_PAD_ENET_REF_CLK__ENET_TX_CLK 0x1b0b0
++ MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b0b0
++ MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x1b0b0
++ MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x1b0b0
++ MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b0b0
++ MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b0b0
++ MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b0b0
++ MX6QDL_PAD_GPIO_16__ENET_REF_CLK 0x4001b0a8
++ >;
++ };
++
++ pinctrl_gpio_keys: gpio_keysgrp {
++ fsl,pins = <
++ MX6QDL_PAD_EIM_D29__GPIO3_IO29 0x80000000
++ MX6QDL_PAD_GPIO_4__GPIO1_IO04 0x80000000
++ MX6QDL_PAD_GPIO_5__GPIO1_IO05 0x80000000
++ >;
++ };
++
++ pinctrl_sabresd_ir: sabresd-ir {
++ fsl,pins = <
++ MX6QDL_PAD_EIM_D18__GPIO3_IO18 0x80000000
++ >;
++ };
++
++ pinctrl_sabresd_spdif: sabresd-spdif {
++ fsl,pins = <MX6QDL_PAD_GPIO_19__SPDIF_OUT 0x13091>;
++ };
++
++ pinctrl_hdmi_cec: hdmi_cecgrp {
++ fsl,pins = <
++ MX6QDL_PAD_KEY_ROW2__HDMI_TX_CEC_LINE 0x1f8b0
++ >;
++ };
++
++ pinctrl_hdmi_hdcp: hdmi_hdcpgrp {
++ fsl,pins = <
++ MX6QDL_PAD_KEY_COL3__HDMI_TX_DDC_SCL 0x4001b8b1
++ MX6QDL_PAD_KEY_ROW3__HDMI_TX_DDC_SDA 0x4001b8b1
++ >;
++ };
++
++ pinctrl_i2c1: i2c1grp {
++ fsl,pins = <
++ MX6QDL_PAD_CSI0_DAT8__I2C1_SDA 0x4001b8b1
++ MX6QDL_PAD_CSI0_DAT9__I2C1_SCL 0x4001b8b1
++ >;
++ };
++
++ pinctrl_i2c2: i2c2grp {
++ fsl,pins = <
++ MX6QDL_PAD_KEY_COL3__I2C2_SCL 0x4001b8b1
++ MX6QDL_PAD_KEY_ROW3__I2C2_SDA 0x4001b8b1
++ >;
++ };
++
++ pinctrl_i2c3: i2c3grp {
++ fsl,pins = <
++ MX6QDL_PAD_GPIO_3__I2C3_SCL 0x4001b8b1
++ MX6QDL_PAD_GPIO_6__I2C3_SDA 0x4001b8b1
++ >;
++ };
++
++ pinctrl_ipu1: ipu1grp {
++ fsl,pins = <
++ MX6QDL_PAD_DI0_DISP_CLK__IPU1_DI0_DISP_CLK 0x10
++ MX6QDL_PAD_DI0_PIN15__IPU1_DI0_PIN15 0x10
++ MX6QDL_PAD_DI0_PIN2__IPU1_DI0_PIN02 0x10
++ MX6QDL_PAD_DI0_PIN3__IPU1_DI0_PIN03 0x10
++ MX6QDL_PAD_DI0_PIN4__IPU1_DI0_PIN04 0x80000000
++ MX6QDL_PAD_DISP0_DAT0__IPU1_DISP0_DATA00 0x10
++ MX6QDL_PAD_DISP0_DAT1__IPU1_DISP0_DATA01 0x10
++ MX6QDL_PAD_DISP0_DAT2__IPU1_DISP0_DATA02 0x10
++ MX6QDL_PAD_DISP0_DAT3__IPU1_DISP0_DATA03 0x10
++ MX6QDL_PAD_DISP0_DAT4__IPU1_DISP0_DATA04 0x10
++ MX6QDL_PAD_DISP0_DAT5__IPU1_DISP0_DATA05 0x10
++ MX6QDL_PAD_DISP0_DAT6__IPU1_DISP0_DATA06 0x10
++ MX6QDL_PAD_DISP0_DAT7__IPU1_DISP0_DATA07 0x10
++ MX6QDL_PAD_DISP0_DAT8__IPU1_DISP0_DATA08 0x10
++ MX6QDL_PAD_DISP0_DAT9__IPU1_DISP0_DATA09 0x10
++ MX6QDL_PAD_DISP0_DAT10__IPU1_DISP0_DATA10 0x10
++ MX6QDL_PAD_DISP0_DAT11__IPU1_DISP0_DATA11 0x10
++ MX6QDL_PAD_DISP0_DAT12__IPU1_DISP0_DATA12 0x10
++ MX6QDL_PAD_DISP0_DAT13__IPU1_DISP0_DATA13 0x10
++ MX6QDL_PAD_DISP0_DAT14__IPU1_DISP0_DATA14 0x10
++ MX6QDL_PAD_DISP0_DAT15__IPU1_DISP0_DATA15 0x10
++ MX6QDL_PAD_DISP0_DAT16__IPU1_DISP0_DATA16 0x10
++ MX6QDL_PAD_DISP0_DAT17__IPU1_DISP0_DATA17 0x10
++ MX6QDL_PAD_DISP0_DAT18__IPU1_DISP0_DATA18 0x10
++ MX6QDL_PAD_DISP0_DAT19__IPU1_DISP0_DATA19 0x10
++ MX6QDL_PAD_DISP0_DAT20__IPU1_DISP0_DATA20 0x10
++ MX6QDL_PAD_DISP0_DAT21__IPU1_DISP0_DATA21 0x10
++ MX6QDL_PAD_DISP0_DAT22__IPU1_DISP0_DATA22 0x10
++ MX6QDL_PAD_DISP0_DAT23__IPU1_DISP0_DATA23 0x10
++ >;
++ };
++
++ pinctrl_pcie: pciegrp {
++ fsl,pins = <
++ MX6QDL_PAD_GPIO_17__GPIO7_IO12 0x80000000
++ >;
++ };
++
++ pinctrl_pwm1: pwm1grp {
++ fsl,pins = <
++ MX6QDL_PAD_SD1_DAT3__PWM1_OUT 0x1b0b1
++ >;
++ };
++
++ pinctrl_uart1: uart1grp {
++ fsl,pins = <
++ MX6QDL_PAD_CSI0_DAT10__UART1_TX_DATA 0x1b0b1
++ MX6QDL_PAD_CSI0_DAT11__UART1_RX_DATA 0x1b0b1
++ >;
++ };
++
++ pinctrl_usbotg: usbotggrp {
++ fsl,pins = <
++ MX6QDL_PAD_ENET_RX_ER__USB_OTG_ID 0x17059
++ >;
++ };
++
++ pinctrl_usdhc2: usdhc2grp {
++ fsl,pins = <
++ MX6QDL_PAD_SD2_CMD__SD2_CMD 0x17059
++ MX6QDL_PAD_SD2_CLK__SD2_CLK 0x10059
++ MX6QDL_PAD_SD2_DAT0__SD2_DATA0 0x17059
++ MX6QDL_PAD_SD2_DAT1__SD2_DATA1 0x17059
++ MX6QDL_PAD_SD2_DAT2__SD2_DATA2 0x17059
++ MX6QDL_PAD_SD2_DAT3__SD2_DATA3 0x17059
++ MX6QDL_PAD_NANDF_D4__SD2_DATA4 0x17059
++ MX6QDL_PAD_NANDF_D5__SD2_DATA5 0x17059
++ MX6QDL_PAD_NANDF_D6__SD2_DATA6 0x17059
++ MX6QDL_PAD_NANDF_D7__SD2_DATA7 0x17059
++ >;
++ };
++
++ pinctrl_usdhc3: usdhc3grp {
++ fsl,pins = <
++ MX6QDL_PAD_SD3_CMD__SD3_CMD 0x17059
++ MX6QDL_PAD_SD3_CLK__SD3_CLK 0x10059
++ MX6QDL_PAD_SD3_DAT0__SD3_DATA0 0x17059
++ MX6QDL_PAD_SD3_DAT1__SD3_DATA1 0x17059
++ MX6QDL_PAD_SD3_DAT2__SD3_DATA2 0x17059
++ MX6QDL_PAD_SD3_DAT3__SD3_DATA3 0x17059
++ MX6QDL_PAD_SD3_DAT4__SD3_DATA4 0x17059
++ MX6QDL_PAD_SD3_DAT5__SD3_DATA5 0x17059
++ MX6QDL_PAD_SD3_DAT6__SD3_DATA6 0x17059
++ MX6QDL_PAD_SD3_DAT7__SD3_DATA7 0x17059
++ >;
++ };
++
++ pinctrl_usdhc4: usdhc4grp {
++ fsl,pins = <
++ MX6QDL_PAD_SD4_CMD__SD4_CMD 0x17059
++ MX6QDL_PAD_SD4_CLK__SD4_CLK 0x10059
++ MX6QDL_PAD_SD4_DAT0__SD4_DATA0 0x17059
++ MX6QDL_PAD_SD4_DAT1__SD4_DATA1 0x17059
++ MX6QDL_PAD_SD4_DAT2__SD4_DATA2 0x17059
++ MX6QDL_PAD_SD4_DAT3__SD4_DATA3 0x17059
++ MX6QDL_PAD_SD4_DAT4__SD4_DATA4 0x17059
++ MX6QDL_PAD_SD4_DAT5__SD4_DATA5 0x17059
++ MX6QDL_PAD_SD4_DAT6__SD4_DATA6 0x17059
++ MX6QDL_PAD_SD4_DAT7__SD4_DATA7 0x17059
++ >;
++ };
++ };
++
++ gpio_leds {
++ pinctrl_gpio_leds: gpioledsgrp {
++ fsl,pins = <
++ MX6QDL_PAD_GPIO_2__GPIO1_IO02 0x80000000
++ MX6QDL_PAD_EIM_D28__GPIO3_IO28 0x80000000
++ >;
++ };
++ };
++};
++
++&ldb {
++ status = "okay";
++
++ lvds-channel@1 {
++ fsl,data-mapping = "spwg";
++ fsl,data-width = <18>;
++ status = "okay";
++
++ display-timings {
++ native-mode = <&timing0>;
++ timing0: hsd100pxn1 {
++ clock-frequency = <65000000>;
++ hactive = <1024>;
++ vactive = <768>;
++ hback-porch = <220>;
++ hfront-porch = <40>;
++ vback-porch = <21>;
++ vfront-porch = <7>;
++ hsync-len = <60>;
++ vsync-len = <10>;
++ };
++ };
++ };
++};
++
++&pcie {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_pcie>;
++ reset-gpio = <&gpio7 12 0>;
++ status = "okay";
++};
++
++&pcie {
++ power-on-gpio = <&gpio3 19 0>;
++ reset-gpio = <&gpio7 12 0>;
++ status = "okay";
++};
++
++
++&pwm1 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_pwm1>;
++ status = "okay";
++};
++
++&ldb {
++ ipu_id = <1>;
++ disp_id = <1>;
++ ext_ref = <1>;
++ mode = "sep1";
++ sec_ipu_id = <1>;
++ sec_disp_id = <0>;
++ status = "okay";
++};
++
++&ssi2 {
++ fsl,mode = "i2s-slave";
++ status = "okay";
++};
++
++&uart1 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_uart1>;
++ status = "okay";
++};
++
++&mipi_dsi {
++ dev_id = <0>;
++ disp_id = <0>;
++ lcd_panel = "TRULY-WVGA";
++ disp-power-on-supply = <&reg_mipi_dsi_pwr_on>;
++ resets = <&mipi_dsi_reset>;
++ status = "okay";
++};
++
++&spdif {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_sabresd_spdif>;
++ clocks = <&clks 197>, <&clks 0>,
++ <&clks 197>, <&clks 0>,
++ <&clks 0>, <&clks 0>,
++ <&clks 0>, <&clks 0>,
++ <&clks 0>;
++ clock-names = "core", "rxtx0",
++ "rxtx1", "rxtx2",
++ "rxtx3", "rxtx4",
++ "rxtx5", "rxtx6",
++ "rxtx7";
++ status = "okay";
++};
++
++&usbh1 {
++ vbus-supply = <&reg_usb_h1_vbus>;
++ status = "okay";
++};
++
++&usbotg {
++ vbus-supply = <&reg_usb_otg_vbus>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_usbotg>;
++ disable-over-current;
++ status = "okay";
++};
++
++&usdhc2 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_usdhc2>;
++ bus-width = <8>;
++ cd-gpios = <&gpio2 2 0>;
++ wp-gpios = <&gpio2 3 0>;
++ status = "okay";
++};
++
++&usdhc3 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_usdhc3>;
++ bus-width = <8>;
++ cd-gpios = <&gpio2 0 0>;
++ wp-gpios = <&gpio2 1 0>;
++ status = "okay";
++};
++
++&usdhc4 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_usdhc4>;
++ bus-width = <8>;
++ non-removable;
++ no-1-8-v;
++ status = "okay";
++};
++
++&hdmi_core {
++ ipu_id = <0>;
++ disp_id = <0>;
++ status = "okay";
++};
++
++&hdmi_video {
++ fsl,phy_reg_vlev = <0x0294>;
++ fsl,phy_reg_cksymtx = <0x800d>;
++ status = "okay";
++};
++
++&hdmi_audio {
++ status = "okay";
++};
++
++&hdmi_cec {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_hdmi_cec>;
++ status = "okay";
++};
++
++&gpc {
++ fsl,cpu_pupscr_sw2iso = <0xf>;
++ fsl,cpu_pupscr_sw = <0xf>;
++ fsl,cpu_pdnscr_iso2sw = <0x1>;
++ fsl,cpu_pdnscr_iso = <0x1>;
++};
+diff -Nur linux-3.14.36/arch/arm/boot/dts/imx6qdl-wandboard.dtsi linux-openelec/arch/arm/boot/dts/imx6qdl-wandboard.dtsi
+--- linux-3.14.36/arch/arm/boot/dts/imx6qdl-wandboard.dtsi 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/boot/dts/imx6qdl-wandboard.dtsi 2015-05-06 12:05:43.000000000 -0500
+@@ -12,17 +12,21 @@
+ / {
+ regulators {
+ compatible = "simple-bus";
++ #address-cells = <1>;
++ #size-cells = <0>;
+
+- reg_2p5v: 2p5v {
++ reg_2p5v: regulator@0 {
+ compatible = "regulator-fixed";
++ reg = <0>;
+ regulator-name = "2P5V";
+ regulator-min-microvolt = <2500000>;
+ regulator-max-microvolt = <2500000>;
+ regulator-always-on;
+ };
+
+- reg_3p3v: 3p3v {
++ reg_3p3v: regulator@1 {
+ compatible = "regulator-fixed";
++ reg = <1>;
+ regulator-name = "3P3V";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+@@ -54,14 +58,14 @@
+
+ &audmux {
+ pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_audmux_2>;
++ pinctrl-0 = <&pinctrl_audmux>;
+ status = "okay";
+ };
+
+ &i2c2 {
+ clock-frequency = <100000>;
+ pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_i2c2_2>;
++ pinctrl-0 = <&pinctrl_i2c2>;
+ status = "okay";
+
+ codec: sgtl5000@0a {
+@@ -77,7 +81,7 @@
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_hog>;
+
+- hog {
++ imx6qdl-wandboard {
+ pinctrl_hog: hoggrp {
+ fsl,pins = <
+ MX6QDL_PAD_GPIO_0__CCM_CLKO1 0x130b0
+@@ -91,20 +95,121 @@
+ MX6QDL_PAD_EIM_D29__GPIO3_IO29 0x80000000
+ >;
+ };
++
++ pinctrl_audmux: audmuxgrp {
++ fsl,pins = <
++ MX6QDL_PAD_CSI0_DAT7__AUD3_RXD 0x130b0
++ MX6QDL_PAD_CSI0_DAT4__AUD3_TXC 0x130b0
++ MX6QDL_PAD_CSI0_DAT5__AUD3_TXD 0x110b0
++ MX6QDL_PAD_CSI0_DAT6__AUD3_TXFS 0x130b0
++ >;
++ };
++
++ pinctrl_enet: enetgrp {
++ fsl,pins = <
++ MX6QDL_PAD_ENET_MDIO__ENET_MDIO 0x1b0b0
++ MX6QDL_PAD_ENET_MDC__ENET_MDC 0x1b0b0
++ MX6QDL_PAD_RGMII_TXC__RGMII_TXC 0x1b0b0
++ MX6QDL_PAD_RGMII_TD0__RGMII_TD0 0x1b0b0
++ MX6QDL_PAD_RGMII_TD1__RGMII_TD1 0x1b0b0
++ MX6QDL_PAD_RGMII_TD2__RGMII_TD2 0x1b0b0
++ MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x1b0b0
++ MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x1b0b0
++ MX6QDL_PAD_ENET_REF_CLK__ENET_TX_CLK 0x1b0b0
++ MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b0b0
++ MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x1b0b0
++ MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x1b0b0
++ MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b0b0
++ MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b0b0
++ MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b0b0
++ MX6QDL_PAD_GPIO_16__ENET_REF_CLK 0x4001b0a8
++ MX6QDL_PAD_GPIO_6__ENET_IRQ 0x000b1
++ >;
++ };
++
++ pinctrl_i2c2: i2c2grp {
++ fsl,pins = <
++ MX6QDL_PAD_KEY_COL3__I2C2_SCL 0x4001b8b1
++ MX6QDL_PAD_KEY_ROW3__I2C2_SDA 0x4001b8b1
++ >;
++ };
++
++ pinctrl_spdif: spdifgrp {
++ fsl,pins = <
++ MX6QDL_PAD_ENET_RXD0__SPDIF_OUT 0x1b0b0
++ >;
++ };
++
++ pinctrl_uart1: uart1grp {
++ fsl,pins = <
++ MX6QDL_PAD_CSI0_DAT10__UART1_TX_DATA 0x1b0b1
++ MX6QDL_PAD_CSI0_DAT11__UART1_RX_DATA 0x1b0b1
++ >;
++ };
++
++ pinctrl_uart3: uart3grp {
++ fsl,pins = <
++ MX6QDL_PAD_EIM_D24__UART3_TX_DATA 0x1b0b1
++ MX6QDL_PAD_EIM_D25__UART3_RX_DATA 0x1b0b1
++ MX6QDL_PAD_EIM_D23__UART3_CTS_B 0x1b0b1
++ MX6QDL_PAD_EIM_EB3__UART3_RTS_B 0x1b0b1
++ >;
++ };
++
++ pinctrl_usbotg: usbotggrp {
++ fsl,pins = <
++ MX6QDL_PAD_GPIO_1__USB_OTG_ID 0x17059
++ >;
++ };
++
++ pinctrl_usdhc1: usdhc1grp {
++ fsl,pins = <
++ MX6QDL_PAD_SD1_CMD__SD1_CMD 0x17059
++ MX6QDL_PAD_SD1_CLK__SD1_CLK 0x10059
++ MX6QDL_PAD_SD1_DAT0__SD1_DATA0 0x17059
++ MX6QDL_PAD_SD1_DAT1__SD1_DATA1 0x17059
++ MX6QDL_PAD_SD1_DAT2__SD1_DATA2 0x17059
++ MX6QDL_PAD_SD1_DAT3__SD1_DATA3 0x17059
++ >;
++ };
++
++ pinctrl_usdhc2: usdhc2grp {
++ fsl,pins = <
++ MX6QDL_PAD_SD2_CMD__SD2_CMD 0x17059
++ MX6QDL_PAD_SD2_CLK__SD2_CLK 0x10059
++ MX6QDL_PAD_SD2_DAT0__SD2_DATA0 0x17059
++ MX6QDL_PAD_SD2_DAT1__SD2_DATA1 0x17059
++ MX6QDL_PAD_SD2_DAT2__SD2_DATA2 0x17059
++ MX6QDL_PAD_SD2_DAT3__SD2_DATA3 0x17059
++ >;
++ };
++
++ pinctrl_usdhc3: usdhc3grp {
++ fsl,pins = <
++ MX6QDL_PAD_SD3_CMD__SD3_CMD 0x17059
++ MX6QDL_PAD_SD3_CLK__SD3_CLK 0x10059
++ MX6QDL_PAD_SD3_DAT0__SD3_DATA0 0x17059
++ MX6QDL_PAD_SD3_DAT1__SD3_DATA1 0x17059
++ MX6QDL_PAD_SD3_DAT2__SD3_DATA2 0x17059
++ MX6QDL_PAD_SD3_DAT3__SD3_DATA3 0x17059
++ >;
++ };
+ };
+ };
+
+ &fec {
+ pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_enet_1>;
++ pinctrl-0 = <&pinctrl_enet>;
+ phy-mode = "rgmii";
+ phy-reset-gpios = <&gpio3 29 0>;
++ interrupts-extended = <&gpio1 6 IRQ_TYPE_LEVEL_HIGH>,
++ <&intc 0 119 IRQ_TYPE_LEVEL_HIGH>;
+ status = "okay";
+ };
+
+ &spdif {
+ pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_spdif_3>;
++ pinctrl-0 = <&pinctrl_spdif>;
+ status = "okay";
+ };
+
+@@ -115,13 +220,13 @@
+
+ &uart1 {
+ pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_uart1_1>;
++ pinctrl-0 = <&pinctrl_uart1>;
+ status = "okay";
+ };
+
+ &uart3 {
+ pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_uart3_2>;
++ pinctrl-0 = <&pinctrl_uart3>;
+ fsl,uart-has-rtscts;
+ status = "okay";
+ };
+@@ -132,7 +237,7 @@
+
+ &usbotg {
+ pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_usbotg_1>;
++ pinctrl-0 = <&pinctrl_usbotg>;
+ disable-over-current;
+ dr_mode = "peripheral";
+ status = "okay";
+@@ -140,21 +245,21 @@
+
+ &usdhc1 {
+ pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_usdhc1_2>;
++ pinctrl-0 = <&pinctrl_usdhc1>;
+ cd-gpios = <&gpio1 2 0>;
+ status = "okay";
+ };
+
+ &usdhc2 {
+ pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_usdhc2_2>;
++ pinctrl-0 = <&pinctrl_usdhc2>;
+ non-removable;
+ status = "okay";
+ };
+
+ &usdhc3 {
+ pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_usdhc3_2>;
++ pinctrl-0 = <&pinctrl_usdhc3>;
+ cd-gpios = <&gpio3 9 0>;
+ status = "okay";
+ };
+diff -Nur linux-3.14.36/arch/arm/boot/dts/imx6q-dmo-edmqmx6.dts linux-openelec/arch/arm/boot/dts/imx6q-dmo-edmqmx6.dts
+--- linux-3.14.36/arch/arm/boot/dts/imx6q-dmo-edmqmx6.dts 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/arch/arm/boot/dts/imx6q-dmo-edmqmx6.dts 2015-05-06 12:05:43.000000000 -0500
+@@ -0,0 +1,432 @@
++/*
++ * Copyright 2013 Data Modul AG
++ *
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/dts-v1/;
++
++#include <dt-bindings/gpio/gpio.h>
++#include "imx6q.dtsi"
++
++/ {
++ model = "Data Modul eDM-QMX6 Board";
++ compatible = "dmo,imx6q-edmqmx6", "fsl,imx6q";
++
++ chosen {
++ stdout-path = &uart2;
++ };
++
++ aliases {
++ gpio7 = &stmpe_gpio1;
++ gpio8 = &stmpe_gpio2;
++ stmpe-i2c0 = &stmpe1;
++ stmpe-i2c1 = &stmpe2;
++ };
++
++ memory {
++ reg = <0x10000000 0x80000000>;
++ };
++
++ regulators {
++ compatible = "simple-bus";
++ #address-cells = <1>;
++ #size-cells = <0>;
++
++ reg_3p3v: regulator@0 {
++ compatible = "regulator-fixed";
++ reg = <0>;
++ regulator-name = "3P3V";
++ regulator-min-microvolt = <3300000>;
++ regulator-max-microvolt = <3300000>;
++ regulator-always-on;
++ };
++
++ reg_usb_otg_switch: regulator@1 {
++ compatible = "regulator-fixed";
++ reg = <1>;
++ regulator-name = "usb_otg_switch";
++ regulator-min-microvolt = <5000000>;
++ regulator-max-microvolt = <5000000>;
++ gpio = <&gpio7 12 0>;
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ reg_usb_host1: regulator@2 {
++ compatible = "regulator-fixed";
++ reg = <2>;
++ regulator-name = "usb_host1_en";
++ regulator-min-microvolt = <3300000>;
++ regulator-max-microvolt = <3300000>;
++ gpio = <&gpio3 31 0>;
++ enable-active-high;
++ };
++ };
++
++ gpio-leds {
++ compatible = "gpio-leds";
++
++ led-blue {
++ label = "blue";
++ gpios = <&stmpe_gpio1 8 GPIO_ACTIVE_HIGH>;
++ linux,default-trigger = "heartbeat";
++ };
++
++ led-green {
++ label = "green";
++ gpios = <&stmpe_gpio1 9 GPIO_ACTIVE_HIGH>;
++ };
++
++ led-pink {
++ label = "pink";
++ gpios = <&stmpe_gpio1 10 GPIO_ACTIVE_HIGH>;
++ };
++
++ led-red {
++ label = "red";
++ gpios = <&stmpe_gpio1 11 GPIO_ACTIVE_HIGH>;
++ };
++ };
++};
++
++&ecspi5 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_ecspi5>;
++ fsl,spi-num-chipselects = <1>;
++ cs-gpios = <&gpio1 12 0>;
++ status = "okay";
++
++ flash: m25p80@0 {
++ compatible = "m25p80";
++ spi-max-frequency = <40000000>;
++ reg = <0>;
++ };
++};
++
++&fec {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_enet>;
++ phy-mode = "rgmii";
++ phy-reset-gpios = <&gpio3 23 0>;
++ phy-supply = <&vgen2_1v2_eth>;
++ status = "okay";
++};
++
++&i2c2 {
++ clock-frequency = <100000>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_i2c2
++ &pinctrl_stmpe1
++ &pinctrl_stmpe2
++ &pinctrl_pfuze>;
++ status = "okay";
++
++ pmic: pfuze100@08 {
++ compatible = "fsl,pfuze100";
++ reg = <0x08>;
++ interrupt-parent = <&gpio3>;
++ interrupts = <20 8>;
++
++ regulators {
++ sw1a_reg: sw1ab {
++ regulator-min-microvolt = <300000>;
++ regulator-max-microvolt = <1875000>;
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ sw1c_reg: sw1c {
++ regulator-min-microvolt = <300000>;
++ regulator-max-microvolt = <1875000>;
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ sw2_reg: sw2 {
++ regulator-min-microvolt = <800000>;
++ regulator-max-microvolt = <3300000>;
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ sw3a_reg: sw3a {
++ regulator-min-microvolt = <400000>;
++ regulator-max-microvolt = <1975000>;
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ sw3b_reg: sw3b {
++ regulator-min-microvolt = <400000>;
++ regulator-max-microvolt = <1975000>;
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ sw4_reg: sw4 {
++ regulator-min-microvolt = <400000>;
++ regulator-max-microvolt = <1975000>;
++ regulator-always-on;
++ };
++
++ swbst_reg: swbst {
++ regulator-min-microvolt = <5000000>;
++ regulator-max-microvolt = <5150000>;
++ regulator-always-on;
++ };
++
++ snvs_reg: vsnvs {
++ regulator-min-microvolt = <1000000>;
++ regulator-max-microvolt = <3000000>;
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ vref_reg: vrefddr {
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ vgen1_reg: vgen1 {
++ regulator-min-microvolt = <800000>;
++ regulator-max-microvolt = <1550000>;
++ };
++
++ vgen2_1v2_eth: vgen2 {
++ regulator-min-microvolt = <800000>;
++ regulator-max-microvolt = <1550000>;
++ };
++
++ vdd_high_in: vgen3 {
++ regulator-min-microvolt = <1800000>;
++ regulator-max-microvolt = <3300000>;
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ vgen4_reg: vgen4 {
++ regulator-min-microvolt = <1800000>;
++ regulator-max-microvolt = <3300000>;
++ regulator-always-on;
++ };
++
++ vgen5_reg: vgen5 {
++ regulator-min-microvolt = <1800000>;
++ regulator-max-microvolt = <3300000>;
++ regulator-always-on;
++ };
++
++ vgen6_reg: vgen6 {
++ regulator-min-microvolt = <1800000>;
++ regulator-max-microvolt = <3300000>;
++ regulator-always-on;
++ };
++ };
++ };
++
++ stmpe1: stmpe1601@40 {
++ compatible = "st,stmpe1601";
++ reg = <0x40>;
++ interrupts = <30 0>;
++ interrupt-parent = <&gpio3>;
++ vcc-supply = <&sw2_reg>;
++ vio-supply = <&sw2_reg>;
++
++ stmpe_gpio1: stmpe_gpio {
++ #gpio-cells = <2>;
++ compatible = "st,stmpe-gpio";
++ };
++ };
++
++ stmpe2: stmpe1601@44 {
++ compatible = "st,stmpe1601";
++ reg = <0x44>;
++ interrupts = <2 0>;
++ interrupt-parent = <&gpio5>;
++ vcc-supply = <&sw2_reg>;
++ vio-supply = <&sw2_reg>;
++
++ stmpe_gpio2: stmpe_gpio {
++ #gpio-cells = <2>;
++ compatible = "st,stmpe-gpio";
++ };
++ };
++
++ temp1: ad7414@4c {
++ compatible = "ad,ad7414";
++ reg = <0x4c>;
++ };
++
++ temp2: ad7414@4d {
++ compatible = "ad,ad7414";
++ reg = <0x4d>;
++ };
++
++ rtc: m41t62@68 {
++ compatible = "stm,m41t62";
++ reg = <0x68>;
++ };
++};
++
++&iomuxc {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_hog>;
++
++ imx6q-dmo-edmqmx6 {
++ pinctrl_hog: hoggrp {
++ fsl,pins = <
++ MX6QDL_PAD_EIM_A16__GPIO2_IO22 0x80000000
++ MX6QDL_PAD_EIM_A17__GPIO2_IO21 0x80000000
++ >;
++ };
++
++ pinctrl_ecspi5: ecspi5rp-1 {
++ fsl,pins = <
++ MX6QDL_PAD_SD1_DAT0__ECSPI5_MISO 0x80000000
++ MX6QDL_PAD_SD1_CMD__ECSPI5_MOSI 0x80000000
++ MX6QDL_PAD_SD1_CLK__ECSPI5_SCLK 0x80000000
++ MX6QDL_PAD_SD2_DAT3__GPIO1_IO12 0x80000000
++ >;
++ };
++
++ pinctrl_enet: enetgrp {
++ fsl,pins = <
++ MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b0b0
++ MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x1b0b0
++ MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x1b0b0
++ MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b0b0
++ MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b0b0
++ MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b0b0
++ MX6QDL_PAD_RGMII_TXC__RGMII_TXC 0x1b0b0
++ MX6QDL_PAD_RGMII_TD0__RGMII_TD0 0x1b0b0
++ MX6QDL_PAD_RGMII_TD1__RGMII_TD1 0x1b0b0
++ MX6QDL_PAD_RGMII_TD2__RGMII_TD2 0x1b0b0
++ MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x1b0b0
++ MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x1b0b0
++ MX6QDL_PAD_ENET_REF_CLK__ENET_TX_CLK 0x1b0b0
++ MX6QDL_PAD_ENET_MDIO__ENET_MDIO 0x1b0b0
++ MX6QDL_PAD_ENET_MDC__ENET_MDC 0x1b0b0
++ MX6QDL_PAD_GPIO_16__ENET_REF_CLK 0x4001b0a8
++ >;
++ };
++
++ pinctrl_i2c2: i2c2grp {
++ fsl,pins = <
++ MX6QDL_PAD_EIM_EB2__I2C2_SCL 0x4001b8b1
++ MX6QDL_PAD_KEY_ROW3__I2C2_SDA 0x4001b8b1
++ >;
++ };
++
++ pinctrl_pfuze: pfuze100grp1 {
++ fsl,pins = <
++ MX6QDL_PAD_EIM_D20__GPIO3_IO20 0x80000000
++ >;
++ };
++
++ pinctrl_stmpe1: stmpe1grp {
++ fsl,pins = <MX6QDL_PAD_EIM_D30__GPIO3_IO30 0x80000000>;
++ };
++
++ pinctrl_stmpe2: stmpe2grp {
++ fsl,pins = <MX6QDL_PAD_EIM_A25__GPIO5_IO02 0x80000000>;
++ };
++
++ pinctrl_uart1: uart1grp {
++ fsl,pins = <
++ MX6QDL_PAD_SD3_DAT7__UART1_TX_DATA 0x1b0b1
++ MX6QDL_PAD_SD3_DAT6__UART1_RX_DATA 0x1b0b1
++ >;
++ };
++
++ pinctrl_uart2: uart2grp {
++ fsl,pins = <
++ MX6QDL_PAD_EIM_D26__UART2_TX_DATA 0x1b0b1
++ MX6QDL_PAD_EIM_D27__UART2_RX_DATA 0x1b0b1
++ >;
++ };
++
++ pinctrl_usbotg: usbotggrp {
++ fsl,pins = <
++ MX6QDL_PAD_ENET_RX_ER__USB_OTG_ID 0x17059
++ >;
++ };
++
++ pinctrl_usdhc3: usdhc3grp {
++ fsl,pins = <
++ MX6QDL_PAD_SD3_CMD__SD3_CMD 0x17059
++ MX6QDL_PAD_SD3_CLK__SD3_CLK 0x10059
++ MX6QDL_PAD_SD3_DAT0__SD3_DATA0 0x17059
++ MX6QDL_PAD_SD3_DAT1__SD3_DATA1 0x17059
++ MX6QDL_PAD_SD3_DAT2__SD3_DATA2 0x17059
++ MX6QDL_PAD_SD3_DAT3__SD3_DATA3 0x17059
++ >;
++ };
++
++ pinctrl_usdhc4: usdhc4grp {
++ fsl,pins = <
++ MX6QDL_PAD_SD4_CMD__SD4_CMD 0x17059
++ MX6QDL_PAD_SD4_CLK__SD4_CLK 0x10059
++ MX6QDL_PAD_SD4_DAT0__SD4_DATA0 0x17059
++ MX6QDL_PAD_SD4_DAT1__SD4_DATA1 0x17059
++ MX6QDL_PAD_SD4_DAT2__SD4_DATA2 0x17059
++ MX6QDL_PAD_SD4_DAT3__SD4_DATA3 0x17059
++ MX6QDL_PAD_SD4_DAT4__SD4_DATA4 0x17059
++ MX6QDL_PAD_SD4_DAT5__SD4_DATA5 0x17059
++ MX6QDL_PAD_SD4_DAT6__SD4_DATA6 0x17059
++ MX6QDL_PAD_SD4_DAT7__SD4_DATA7 0x17059
++ >;
++ };
++ };
++};
++
++&sata {
++ status = "okay";
++};
++
++&uart1 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_uart1>;
++ status = "okay";
++};
++
++&uart2 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_uart2>;
++ status = "okay";
++};
++
++&usbh1 {
++ vbus-supply = <&reg_usb_host1>;
++ disable-over-current;
++ dr_mode = "host";
++ status = "okay";
++};
++
++&usbotg {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_usbotg>;
++ disable-over-current;
++ status = "okay";
++};
++
++&usdhc3 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_usdhc3>;
++ vmmc-supply = <&reg_3p3v>;
++ status = "okay";
++};
++
++&usdhc4 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_usdhc4>;
++ vmmc-supply = <&reg_3p3v>;
++ non-removable;
++ bus-width = <8>;
++ status = "okay";
++};
+diff -Nur linux-3.14.36/arch/arm/boot/dts/imx6q.dtsi linux-openelec/arch/arm/boot/dts/imx6q.dtsi
+--- linux-3.14.36/arch/arm/boot/dts/imx6q.dtsi 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/boot/dts/imx6q.dtsi 2015-05-06 12:05:43.000000000 -0500
+@@ -8,10 +8,16 @@
+ *
+ */
+
++#include <dt-bindings/interrupt-controller/irq.h>
+ #include "imx6q-pinfunc.h"
+ #include "imx6qdl.dtsi"
+
+ / {
++ aliases {
++ ipu1 = &ipu2;
++ spi4 = &ecspi5;
++ };
++
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+@@ -25,8 +31,17 @@
+ /* kHz uV */
+ 1200000 1275000
+ 996000 1250000
++ 852000 1250000
+ 792000 1150000
+- 396000 950000
++ 396000 975000
++ >;
++ fsl,soc-operating-points = <
++ /* ARM kHz SOC-PU uV */
++ 1200000 1275000
++ 996000 1250000
++ 852000 1250000
++ 792000 1175000
++ 396000 1175000
+ >;
+ clock-latency = <61036>; /* two CLK32 periods */
+ clocks = <&clks 104>, <&clks 6>, <&clks 16>,
+@@ -61,12 +76,77 @@
+ };
+
+ soc {
++
++ busfreq { /* BUSFREQ */
++ compatible = "fsl,imx6_busfreq";
++ clocks = <&clks 171>, <&clks 6>, <&clks 11>, <&clks 104>, <&clks 172>, <&clks 58>,
++ <&clks 18>, <&clks 60>, <&clks 20>, <&clks 3>;
++ clock-names = "pll2_bus", "pll2_pfd2_396m", "pll2_198m", "arm", "pll3_usb_otg", "periph",
++ "periph_pre", "periph_clk2", "periph_clk2_sel", "osc";
++ interrupts = <0 107 0x04>, <0 112 0x4>, <0 113 0x4>, <0 114 0x4>;
++ interrupt-names = "irq_busfreq_0", "irq_busfreq_1", "irq_busfreq_2", "irq_busfreq_3";
++ fsl,max_ddr_freq = <528000000>;
++ };
++
++ gpu@00130000 {
++ compatible = "fsl,imx6q-gpu";
++ reg = <0x00130000 0x4000>, <0x00134000 0x4000>,
++ <0x02204000 0x4000>, <0x0 0x0>;
++ reg-names = "iobase_3d", "iobase_2d",
++ "iobase_vg", "phys_baseaddr";
++ interrupts = <0 9 0x04>, <0 10 0x04>,<0 11 0x04>;
++ interrupt-names = "irq_3d", "irq_2d", "irq_vg";
++ clocks = <&clks 26>, <&clks 143>,
++ <&clks 27>, <&clks 121>,
++ <&clks 122>, <&clks 74>;
++ clock-names = "gpu2d_axi_clk", "openvg_axi_clk",
++ "gpu3d_axi_clk", "gpu2d_clk",
++ "gpu3d_clk", "gpu3d_shader_clk";
++ resets = <&src 0>, <&src 3>, <&src 3>;
++ reset-names = "gpu3d", "gpu2d", "gpuvg";
++ pu-supply = <&reg_pu>;
++ };
++
+ ocram: sram@00900000 {
+ compatible = "mmio-sram";
+ reg = <0x00900000 0x40000>;
+ clocks = <&clks 142>;
+ };
+
++ hdmi_core: hdmi_core@00120000 {
++ compatible = "fsl,imx6q-hdmi-core";
++ reg = <0x00120000 0x9000>;
++ clocks = <&clks 124>, <&clks 123>;
++ clock-names = "hdmi_isfr", "hdmi_iahb";
++ status = "disabled";
++ };
++
++ hdmi_video: hdmi_video@020e0000 {
++ compatible = "fsl,imx6q-hdmi-video";
++ reg = <0x020e0000 0x1000>;
++ reg-names = "hdmi_gpr";
++ interrupts = <0 115 0x04>;
++ clocks = <&clks 124>, <&clks 123>;
++ clock-names = "hdmi_isfr", "hdmi_iahb";
++ status = "disabled";
++ };
++
++ hdmi_audio: hdmi_audio@00120000 {
++ compatible = "fsl,imx6q-hdmi-audio";
++ clocks = <&clks 124>, <&clks 123>;
++ clock-names = "hdmi_isfr", "hdmi_iahb";
++ dmas = <&sdma 2 23 0>;
++ dma-names = "tx";
++ status = "disabled";
++ };
++
++ hdmi_cec: hdmi_cec@00120000 {
++ compatible = "fsl,imx6q-hdmi-cec";
++ interrupts = <0 115 0x04>;
++ status = "disabled";
++ };
++
++
+ aips-bus@02000000 { /* AIPS1 */
+ spba-bus@02000000 {
+ ecspi5: ecspi@02018000 {
+@@ -74,13 +154,17 @@
+ #size-cells = <0>;
+ compatible = "fsl,imx6q-ecspi", "fsl,imx51-ecspi";
+ reg = <0x02018000 0x4000>;
+- interrupts = <0 35 0x04>;
++ interrupts = <0 35 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks 116>, <&clks 116>;
+ clock-names = "ipg", "per";
+ status = "disabled";
+ };
+ };
+
++ vpu@02040000 {
++ status = "okay";
++ };
++
+ iomuxc: iomuxc@020e0000 {
+ compatible = "fsl,imx6q-iomuxc";
+
+@@ -122,40 +206,40 @@
+ };
+ };
+
++ aips-bus@02100000 { /* AIPS2 */
++ mipi_dsi: mipi@021e0000 {
++ compatible = "fsl,imx6q-mipi-dsi";
++ reg = <0x021e0000 0x4000>;
++ interrupts = <0 102 0x04>;
++ gpr = <&gpr>;
++ clocks = <&clks 138>, <&clks 209>;
++ clock-names = "mipi_pllref_clk", "mipi_cfg_clk";
++ status = "disabled";
++ };
++ };
++
+ sata: sata@02200000 {
+ compatible = "fsl,imx6q-ahci";
+ reg = <0x02200000 0x4000>;
+- interrupts = <0 39 0x04>;
++ interrupts = <0 39 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks 154>, <&clks 187>, <&clks 105>;
+ clock-names = "sata", "sata_ref", "ahb";
+ status = "disabled";
+ };
+
+ ipu2: ipu@02800000 {
+- #crtc-cells = <1>;
+ compatible = "fsl,imx6q-ipu";
+ reg = <0x02800000 0x400000>;
+- interrupts = <0 8 0x4 0 7 0x4>;
+- clocks = <&clks 133>, <&clks 134>, <&clks 137>;
+- clock-names = "bus", "di0", "di1";
++ interrupts = <0 8 IRQ_TYPE_LEVEL_HIGH>,
++ <0 7 IRQ_TYPE_LEVEL_HIGH>;
++ clocks = <&clks 133>, <&clks 134>, <&clks 137>,
++ <&clks 41>, <&clks 42>,
++ <&clks 135>, <&clks 136>;
++ clock-names = "bus", "di0", "di1",
++ "di0_sel", "di1_sel",
++ "ldb_di0", "ldb_di1";
+ resets = <&src 4>;
++ bypass_reset = <0>;
+ };
+ };
+ };
+-
+-&ldb {
+- clocks = <&clks 33>, <&clks 34>,
+- <&clks 39>, <&clks 40>, <&clks 41>, <&clks 42>,
+- <&clks 135>, <&clks 136>;
+- clock-names = "di0_pll", "di1_pll",
+- "di0_sel", "di1_sel", "di2_sel", "di3_sel",
+- "di0", "di1";
+-
+- lvds-channel@0 {
+- crtcs = <&ipu1 0>, <&ipu1 1>, <&ipu2 0>, <&ipu2 1>;
+- };
+-
+- lvds-channel@1 {
+- crtcs = <&ipu1 0>, <&ipu1 1>, <&ipu2 0>, <&ipu2 1>;
+- };
+-};
+diff -Nur linux-3.14.36/arch/arm/boot/dts/imx6q-gk802.dts linux-openelec/arch/arm/boot/dts/imx6q-gk802.dts
+--- linux-3.14.36/arch/arm/boot/dts/imx6q-gk802.dts 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/arch/arm/boot/dts/imx6q-gk802.dts 2015-05-06 12:05:43.000000000 -0500
+@@ -0,0 +1,229 @@
++/*
++ * Copyright (C) 2013 Philipp Zabel
++ *
++ * This file is licensed under the terms of the GNU General Public License
++ * version 2. This program is licensed "as is" without any warranty of any
++ * kind, whether express or implied.
++ */
++
++/dts-v1/;
++#include "imx6q.dtsi"
++
++/ {
++ model = "Zealz GK802";
++ compatible = "zealz,imx6q-gk802", "fsl,imx6q";
++
++ aliases {
++ mxcfb0 = &mxcfb1;
++ };
++
++ chosen {
++ stdout-path = &uart4;
++ };
++
++ memory {
++ reg = <0x10000000 0x40000000>;
++ };
++
++ regulators {
++ compatible = "simple-bus";
++ #address-cells = <1>;
++ #size-cells = <0>;
++
++ reg_3p3v: regulator@0 {
++ compatible = "regulator-fixed";
++ reg = <0>;
++ regulator-name = "3P3V";
++ regulator-min-microvolt = <3300000>;
++ regulator-max-microvolt = <3300000>;
++ regulator-always-on;
++ };
++
++ reg_usb_h1_vbus: usb_h1_vbus {
++ compatible = "regulator-fixed";
++ regulator-name = "usb_h1_vbus";
++ regulator-min-microvolt = <5000000>;
++ regulator-max-microvolt = <5000000>;
++ gpio = <&gpio2 0 0>;
++ };
++ };
++
++ gpio-keys {
++ compatible = "gpio-keys";
++
++ recovery-button {
++ label = "recovery";
++ gpios = <&gpio3 16 1>;
++ linux,code = <0x198>; /* KEY_RESTART */
++ gpio-key,wakeup;
++ };
++
++ };
++
++ sound-hdmi {
++ compatible = "fsl,imx6q-audio-hdmi",
++ "fsl,imx-audio-hdmi";
++ model = "imx-audio-hdmi";
++ hdmi-controller = <&hdmi_audio>;
++ };
++
++ mxcfb1: fb@0 {
++ compatible = "fsl,mxc_sdc_fb";
++ disp_dev = "hdmi";
++ interface_pix_fmt = "RGB24";
++ mode_str ="1920x1080M@60";
++ default_bpp = <32>;
++ int_clk = <0>;
++ late_init = <0>;
++ status = "okay";
++ };
++};
++
++&hdmi_core {
++ ipu_id = <0>;
++ disp_id = <0>;
++ status = "okay";
++};
++
++&hdmi_video {
++ fsl,phy_reg_vlev = <0x0294>;
++ fsl,phy_reg_cksymtx = <0x800d>;
++ status = "okay";
++};
++
++&hdmi_audio {
++ status = "okay";
++};
++
++
++/* Internal I2C */
++&i2c2 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_i2c2>;
++ clock-frequency = <100000>;
++ status = "okay";
++
++ /* SDMC DM2016 1024 bit EEPROM + 128 bit OTP */
++ eeprom: dm2016@51 {
++ compatible = "sdmc,dm2016";
++ reg = <0x51>;
++ };
++};
++
++/* External I2C via HDMI */
++&i2c3 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_i2c3>;
++ clock-frequency = <100000>;
++ status = "okay";
++
++ ddc: imx6_hdmi_i2c@50 {
++ compatible = "fsl,imx6-hdmi-i2c";
++ reg = <0x50>;
++ };
++};
++
++&iomuxc {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_hog>;
++
++ imx6q-gk802 {
++ pinctrl_hog: hoggrp {
++ fsl,pins = <
++ /* Recovery button, active-low */
++ MX6QDL_PAD_EIM_D16__GPIO3_IO16 0x100b1
++ /* RTL8192CU enable GPIO, active-low */
++ MX6QDL_PAD_NANDF_D0__GPIO2_IO00 0x1b0b0
++ >;
++ };
++
++ pinctrl_i2c2: i2c2grp {
++ fsl,pins = <
++ MX6QDL_PAD_KEY_COL3__I2C2_SCL 0x4001b8b1
++ MX6QDL_PAD_KEY_ROW3__I2C2_SDA 0x4001b8b1
++ >;
++ };
++
++ pinctrl_i2c3: i2c3grp {
++ fsl,pins = <
++ MX6QDL_PAD_GPIO_5__I2C3_SCL 0x4001b8b1
++ MX6QDL_PAD_GPIO_16__I2C3_SDA 0x4001b8b1
++ >;
++ };
++
++ pinctrl_uart4: uart4grp {
++ fsl,pins = <
++ MX6QDL_PAD_KEY_COL0__UART4_TX_DATA 0x1b0b1
++ MX6QDL_PAD_KEY_ROW0__UART4_RX_DATA 0x1b0b1
++ >;
++ };
++
++ pinctrl_usdhc3: usdhc3grp {
++ fsl,pins = <
++ MX6QDL_PAD_SD3_CMD__SD3_CMD 0x17059
++ MX6QDL_PAD_SD3_CLK__SD3_CLK 0x10059
++ MX6QDL_PAD_SD3_DAT0__SD3_DATA0 0x17059
++ MX6QDL_PAD_SD3_DAT1__SD3_DATA1 0x17059
++ MX6QDL_PAD_SD3_DAT2__SD3_DATA2 0x17059
++ MX6QDL_PAD_SD3_DAT3__SD3_DATA3 0x17059
++ >;
++ };
++
++ pinctrl_usdhc4: usdhc4grp {
++ fsl,pins = <
++ MX6QDL_PAD_SD4_CMD__SD4_CMD 0x17059
++ MX6QDL_PAD_SD4_CLK__SD4_CLK 0x10059
++ MX6QDL_PAD_SD4_DAT0__SD4_DATA0 0x17059
++ MX6QDL_PAD_SD4_DAT1__SD4_DATA1 0x17059
++ MX6QDL_PAD_SD4_DAT2__SD4_DATA2 0x17059
++ MX6QDL_PAD_SD4_DAT3__SD4_DATA3 0x17059
++ >;
++ };
++ };
++};
++
++&uart2 {
++ status = "okay";
++};
++
++&uart4 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_uart4>;
++ status = "okay";
++};
++
++/* External USB-A port (USBOTG) */
++&usbotg {
++ phy_type = "utmi";
++ dr_mode = "host";
++ disable-over-current;
++ status = "okay";
++};
++
++/* Internal USB port (USBH1), connected to RTL8192CU */
++&usbh1 {
++ phy_type = "utmi";
++ dr_mode = "host";
++ vbus-supply = <&reg_usb_h1_vbus>;
++ disable-over-current;
++ status = "okay";
++};
++
++/* External microSD */
++&usdhc3 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_usdhc3>;
++ bus-width = <4>;
++ cd-gpios = <&gpio6 11 0>;
++ vmmc-supply = <&reg_3p3v>;
++ status = "okay";
++};
++
++/* Internal microSD */
++&usdhc4 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_usdhc4>;
++ bus-width = <4>;
++ vmmc-supply = <&reg_3p3v>;
++ status = "okay";
++};
+diff -Nur linux-3.14.36/arch/arm/boot/dts/imx6q-gw51xx.dts linux-openelec/arch/arm/boot/dts/imx6q-gw51xx.dts
+--- linux-3.14.36/arch/arm/boot/dts/imx6q-gw51xx.dts 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/arch/arm/boot/dts/imx6q-gw51xx.dts 2015-05-06 12:05:43.000000000 -0500
+@@ -0,0 +1,19 @@
++/*
++ * Copyright 2013 Gateworks Corporation
++ *
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/dts-v1/;
++#include "imx6q.dtsi"
++#include "imx6qdl-gw54xx.dtsi"
++
++/ {
++ model = "Gateworks Ventana i.MX6 Quad GW51XX";
++ compatible = "gw,imx6q-gw51xx", "gw,ventana", "fsl,imx6q";
++};
+diff -Nur linux-3.14.36/arch/arm/boot/dts/imx6q-gw52xx.dts linux-openelec/arch/arm/boot/dts/imx6q-gw52xx.dts
+--- linux-3.14.36/arch/arm/boot/dts/imx6q-gw52xx.dts 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/arch/arm/boot/dts/imx6q-gw52xx.dts 2015-05-06 12:05:43.000000000 -0500
+@@ -0,0 +1,23 @@
++/*
++ * Copyright 2013 Gateworks Corporation
++ *
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/dts-v1/;
++#include "imx6q.dtsi"
++#include "imx6qdl-gw52xx.dtsi"
++
++/ {
++ model = "Gateworks Ventana i.MX6 Quad GW52XX";
++ compatible = "gw,imx6q-gw52xx", "gw,ventana", "fsl,imx6q";
++};
++
++&sata {
++ status = "okay";
++};
+diff -Nur linux-3.14.36/arch/arm/boot/dts/imx6q-gw53xx.dts linux-openelec/arch/arm/boot/dts/imx6q-gw53xx.dts
+--- linux-3.14.36/arch/arm/boot/dts/imx6q-gw53xx.dts 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/arch/arm/boot/dts/imx6q-gw53xx.dts 2015-05-06 12:05:43.000000000 -0500
+@@ -0,0 +1,23 @@
++/*
++ * Copyright 2013 Gateworks Corporation
++ *
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/dts-v1/;
++#include "imx6q.dtsi"
++#include "imx6qdl-gw53xx.dtsi"
++
++/ {
++ model = "Gateworks Ventana i.MX6 Quad GW53XX";
++ compatible = "gw,imx6q-gw53xx", "gw,ventana", "fsl,imx6q";
++};
++
++&sata {
++ status = "okay";
++};
+diff -Nur linux-3.14.36/arch/arm/boot/dts/imx6q-gw5400-a.dts linux-openelec/arch/arm/boot/dts/imx6q-gw5400-a.dts
+--- linux-3.14.36/arch/arm/boot/dts/imx6q-gw5400-a.dts 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/arch/arm/boot/dts/imx6q-gw5400-a.dts 2015-05-06 12:05:43.000000000 -0500
+@@ -0,0 +1,543 @@
++/*
++ * Copyright 2013 Gateworks Corporation
++ *
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/dts-v1/;
++#include "imx6q.dtsi"
++
++/ {
++ model = "Gateworks Ventana GW5400-A";
++ compatible = "gw,imx6q-gw5400-a", "gw,ventana", "fsl,imx6q";
++
++ /* these are used by bootloader for disabling nodes */
++ aliases {
++ ethernet0 = &fec;
++ ethernet1 = &eth1;
++ i2c0 = &i2c1;
++ i2c1 = &i2c2;
++ i2c2 = &i2c3;
++ led0 = &led0;
++ led1 = &led1;
++ led2 = &led2;
++ sky2 = &eth1;
++ ssi0 = &ssi1;
++ spi0 = &ecspi1;
++ usb0 = &usbh1;
++ usb1 = &usbotg;
++ usdhc2 = &usdhc3;
++ };
++
++ chosen {
++ bootargs = "console=ttymxc1,115200";
++ };
++
++ leds {
++ compatible = "gpio-leds";
++
++ led0: user1 {
++ label = "user1";
++ gpios = <&gpio4 6 0>; /* 102 -> MX6_PANLEDG */
++ default-state = "on";
++ linux,default-trigger = "heartbeat";
++ };
++
++ led1: user2 {
++ label = "user2";
++ gpios = <&gpio4 10 0>; /* 106 -> MX6_PANLEDR */
++ default-state = "off";
++ };
++
++ led2: user3 {
++ label = "user3";
++ gpios = <&gpio4 15 1>; /* 111 -> MX6_LOCLED# */
++ default-state = "off";
++ };
++ };
++
++ memory {
++ reg = <0x10000000 0x40000000>;
++ };
++
++ pps {
++ compatible = "pps-gpio";
++ gpios = <&gpio1 5 0>;
++ status = "okay";
++ };
++
++ regulators {
++ compatible = "simple-bus";
++ #address-cells = <1>;
++ #size-cells = <0>;
++
++ reg_1p0v: regulator@0 {
++ compatible = "regulator-fixed";
++ reg = <0>;
++ regulator-name = "1P0V";
++ regulator-min-microvolt = <1000000>;
++ regulator-max-microvolt = <1000000>;
++ regulator-always-on;
++ };
++
++ reg_3p3v: regulator@1 {
++ compatible = "regulator-fixed";
++ reg = <1>;
++ regulator-name = "3P3V";
++ regulator-min-microvolt = <3300000>;
++ regulator-max-microvolt = <3300000>;
++ regulator-always-on;
++ };
++
++ reg_usb_h1_vbus: regulator@2 {
++ compatible = "regulator-fixed";
++ reg = <2>;
++ regulator-name = "usb_h1_vbus";
++ regulator-min-microvolt = <5000000>;
++ regulator-max-microvolt = <5000000>;
++ regulator-always-on;
++ };
++
++ reg_usb_otg_vbus: regulator@3 {
++ compatible = "regulator-fixed";
++ reg = <3>;
++ regulator-name = "usb_otg_vbus";
++ regulator-min-microvolt = <5000000>;
++ regulator-max-microvolt = <5000000>;
++ gpio = <&gpio3 22 0>;
++ enable-active-high;
++ };
++ };
++
++ sound {
++ compatible = "fsl,imx6q-sabrelite-sgtl5000",
++ "fsl,imx-audio-sgtl5000";
++ model = "imx6q-sabrelite-sgtl5000";
++ ssi-controller = <&ssi1>;
++ audio-codec = <&codec>;
++ audio-routing =
++ "MIC_IN", "Mic Jack",
++ "Mic Jack", "Mic Bias",
++ "Headphone Jack", "HP_OUT";
++ mux-int-port = <1>;
++ mux-ext-port = <4>;
++ };
++};
++
++&audmux {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_audmux>;
++ status = "okay";
++};
++
++&ecspi1 {
++ fsl,spi-num-chipselects = <1>;
++ cs-gpios = <&gpio3 19 0>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_ecspi1>;
++ status = "okay";
++
++ flash: m25p80@0 {
++ compatible = "sst,w25q256";
++ spi-max-frequency = <30000000>;
++ reg = <0>;
++ };
++};
++
++&fec {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_enet>;
++ phy-mode = "rgmii";
++ phy-reset-gpios = <&gpio1 30 0>;
++ status = "okay";
++};
++
++&i2c1 {
++ clock-frequency = <100000>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_i2c1>;
++ status = "okay";
++
++ eeprom1: eeprom@50 {
++ compatible = "atmel,24c02";
++ reg = <0x50>;
++ pagesize = <16>;
++ };
++
++ eeprom2: eeprom@51 {
++ compatible = "atmel,24c02";
++ reg = <0x51>;
++ pagesize = <16>;
++ };
++
++ eeprom3: eeprom@52 {
++ compatible = "atmel,24c02";
++ reg = <0x52>;
++ pagesize = <16>;
++ };
++
++ eeprom4: eeprom@53 {
++ compatible = "atmel,24c02";
++ reg = <0x53>;
++ pagesize = <16>;
++ };
++
++ gpio: pca9555@23 {
++ compatible = "nxp,pca9555";
++ reg = <0x23>;
++ gpio-controller;
++ #gpio-cells = <2>;
++ };
++
++ hwmon: gsc@29 {
++ compatible = "gw,gsp";
++ reg = <0x29>;
++ };
++
++ rtc: ds1672@68 {
++ compatible = "dallas,ds1672";
++ reg = <0x68>;
++ };
++};
++
++&i2c2 {
++ clock-frequency = <100000>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_i2c2>;
++ status = "okay";
++
++ pmic: pfuze100@08 {
++ compatible = "fsl,pfuze100";
++ reg = <0x08>;
++
++ regulators {
++ sw1a_reg: sw1ab {
++ regulator-min-microvolt = <300000>;
++ regulator-max-microvolt = <1875000>;
++ regulator-boot-on;
++ regulator-always-on;
++ regulator-ramp-delay = <6250>;
++ };
++
++ sw1c_reg: sw1c {
++ regulator-min-microvolt = <300000>;
++ regulator-max-microvolt = <1875000>;
++ regulator-boot-on;
++ regulator-always-on;
++ regulator-ramp-delay = <6250>;
++ };
++
++ sw2_reg: sw2 {
++ regulator-min-microvolt = <800000>;
++ regulator-max-microvolt = <3950000>;
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ sw3a_reg: sw3a {
++ regulator-min-microvolt = <400000>;
++ regulator-max-microvolt = <1975000>;
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ sw3b_reg: sw3b {
++ regulator-min-microvolt = <400000>;
++ regulator-max-microvolt = <1975000>;
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ sw4_reg: sw4 {
++ regulator-min-microvolt = <800000>;
++ regulator-max-microvolt = <3300000>;
++ };
++
++ swbst_reg: swbst {
++ regulator-min-microvolt = <5000000>;
++ regulator-max-microvolt = <5150000>;
++ };
++
++ snvs_reg: vsnvs {
++ regulator-min-microvolt = <1000000>;
++ regulator-max-microvolt = <3000000>;
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ vref_reg: vrefddr {
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ vgen1_reg: vgen1 {
++ regulator-min-microvolt = <800000>;
++ regulator-max-microvolt = <1550000>;
++ };
++
++ vgen2_reg: vgen2 {
++ regulator-min-microvolt = <800000>;
++ regulator-max-microvolt = <1550000>;
++ };
++
++ vgen3_reg: vgen3 {
++ regulator-min-microvolt = <1800000>;
++ regulator-max-microvolt = <3300000>;
++ };
++
++ vgen4_reg: vgen4 {
++ regulator-min-microvolt = <1800000>;
++ regulator-max-microvolt = <3300000>;
++ regulator-always-on;
++ };
++
++ vgen5_reg: vgen5 {
++ regulator-min-microvolt = <1800000>;
++ regulator-max-microvolt = <3300000>;
++ regulator-always-on;
++ };
++
++ vgen6_reg: vgen6 {
++ regulator-min-microvolt = <1800000>;
++ regulator-max-microvolt = <3300000>;
++ regulator-always-on;
++ };
++ };
++ };
++
++ pciswitch: pex8609@3f {
++ compatible = "plx,pex8609";
++ reg = <0x3f>;
++ };
++
++ pciclkgen: si52147@6b {
++ compatible = "sil,si52147";
++ reg = <0x6b>;
++ };
++};
++
++&i2c3 {
++ clock-frequency = <100000>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_i2c3>;
++ status = "okay";
++
++ accelerometer: mma8450@1c {
++ compatible = "fsl,mma8450";
++ reg = <0x1c>;
++ };
++
++ codec: sgtl5000@0a {
++ compatible = "fsl,sgtl5000";
++ reg = <0x0a>;
++ clocks = <&clks 201>;
++ VDDA-supply = <&sw4_reg>;
++ VDDIO-supply = <&reg_3p3v>;
++ };
++
++ hdmiin: adv7611@4c {
++ compatible = "adi,adv7611";
++ reg = <0x4c>;
++ };
++
++ touchscreen: egalax_ts@04 {
++ compatible = "eeti,egalax_ts";
++ reg = <0x04>;
++ interrupt-parent = <&gpio7>;
++ interrupts = <12 2>; /* gpio7_12 active low */
++ wakeup-gpios = <&gpio7 12 0>;
++ };
++
++ videoout: adv7393@2a {
++ compatible = "adi,adv7393";
++ reg = <0x2a>;
++ };
++
++ videoin: adv7180@20 {
++ compatible = "adi,adv7180";
++ reg = <0x20>;
++ };
++};
++
++&iomuxc {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_hog>;
++
++ imx6q-gw5400-a {
++ pinctrl_hog: hoggrp {
++ fsl,pins = <
++ MX6QDL_PAD_EIM_D22__GPIO3_IO22 0x80000000 /* OTG_PWR_EN */
++ MX6QDL_PAD_EIM_D19__GPIO3_IO19 0x80000000 /* SPINOR_CS0# */
++ MX6QDL_PAD_ENET_TX_EN__GPIO1_IO28 0x80000000 /* PCIE IRQ */
++ MX6QDL_PAD_ENET_TXD1__GPIO1_IO29 0x80000000 /* PCIE RST */
++ MX6QDL_PAD_GPIO_0__CCM_CLKO1 0x000130b0 /* AUD4_MCK */
++ MX6QDL_PAD_GPIO_5__GPIO1_IO05 0x80000000 /* GPS_PPS */
++ MX6QDL_PAD_GPIO_17__GPIO7_IO12 0x80000000 /* TOUCH_IRQ# */
++ MX6QDL_PAD_KEY_COL0__GPIO4_IO06 0x80000000 /* user1 led */
++ MX6QDL_PAD_KEY_COL2__GPIO4_IO10 0x80000000 /* user2 led */
++ MX6QDL_PAD_KEY_ROW4__GPIO4_IO15 0x80000000 /* user3 led */
++ MX6QDL_PAD_SD1_DAT0__GPIO1_IO16 0x80000000 /* USBHUB_RST# */
++ MX6QDL_PAD_SD1_DAT3__GPIO1_IO21 0x80000000 /* MIPI_DIO */
++ >;
++ };
++
++ pinctrl_audmux: audmuxgrp {
++ fsl,pins = <
++ MX6QDL_PAD_SD2_DAT0__AUD4_RXD 0x130b0
++ MX6QDL_PAD_SD2_DAT3__AUD4_TXC 0x130b0
++ MX6QDL_PAD_SD2_DAT2__AUD4_TXD 0x110b0
++ MX6QDL_PAD_SD2_DAT1__AUD4_TXFS 0x130b0
++ >;
++ };
++
++ pinctrl_ecspi1: ecspi1grp {
++ fsl,pins = <
++ MX6QDL_PAD_EIM_D17__ECSPI1_MISO 0x100b1
++ MX6QDL_PAD_EIM_D18__ECSPI1_MOSI 0x100b1
++ MX6QDL_PAD_EIM_D16__ECSPI1_SCLK 0x100b1
++ >;
++ };
++
++ pinctrl_enet: enetgrp {
++ fsl,pins = <
++ MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b0b0
++ MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x1b0b0
++ MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x1b0b0
++ MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b0b0
++ MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b0b0
++ MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b0b0
++ MX6QDL_PAD_RGMII_TXC__RGMII_TXC 0x1b0b0
++ MX6QDL_PAD_RGMII_TD0__RGMII_TD0 0x1b0b0
++ MX6QDL_PAD_RGMII_TD1__RGMII_TD1 0x1b0b0
++ MX6QDL_PAD_RGMII_TD2__RGMII_TD2 0x1b0b0
++ MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x1b0b0
++ MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x1b0b0
++ MX6QDL_PAD_ENET_REF_CLK__ENET_TX_CLK 0x1b0b0
++ MX6QDL_PAD_ENET_MDIO__ENET_MDIO 0x1b0b0
++ MX6QDL_PAD_ENET_MDC__ENET_MDC 0x1b0b0
++ MX6QDL_PAD_GPIO_16__ENET_REF_CLK 0x4001b0a8
++ >;
++ };
++
++ pinctrl_i2c1: i2c1grp {
++ fsl,pins = <
++ MX6QDL_PAD_EIM_D21__I2C1_SCL 0x4001b8b1
++ MX6QDL_PAD_EIM_D28__I2C1_SDA 0x4001b8b1
++ >;
++ };
++
++ pinctrl_i2c2: i2c2grp {
++ fsl,pins = <
++ MX6QDL_PAD_KEY_COL3__I2C2_SCL 0x4001b8b1
++ MX6QDL_PAD_KEY_ROW3__I2C2_SDA 0x4001b8b1
++ >;
++ };
++
++ pinctrl_i2c3: i2c3grp {
++ fsl,pins = <
++ MX6QDL_PAD_GPIO_3__I2C3_SCL 0x4001b8b1
++ MX6QDL_PAD_GPIO_6__I2C3_SDA 0x4001b8b1
++ >;
++ };
++
++ pinctrl_uart1: uart1grp {
++ fsl,pins = <
++ MX6QDL_PAD_SD3_DAT7__UART1_TX_DATA 0x1b0b1
++ MX6QDL_PAD_SD3_DAT6__UART1_RX_DATA 0x1b0b1
++ >;
++ };
++
++ pinctrl_uart2: uart2grp {
++ fsl,pins = <
++ MX6QDL_PAD_SD4_DAT7__UART2_TX_DATA 0x1b0b1
++ MX6QDL_PAD_SD4_DAT4__UART2_RX_DATA 0x1b0b1
++ >;
++ };
++
++ pinctrl_uart5: uart5grp {
++ fsl,pins = <
++ MX6QDL_PAD_KEY_COL1__UART5_TX_DATA 0x1b0b1
++ MX6QDL_PAD_KEY_ROW1__UART5_RX_DATA 0x1b0b1
++ >;
++ };
++
++ pinctrl_usbotg: usbotggrp {
++ fsl,pins = <
++ MX6QDL_PAD_GPIO_1__USB_OTG_ID 0x17059
++ >;
++ };
++
++ pinctrl_usdhc3: usdhc3grp {
++ fsl,pins = <
++ MX6QDL_PAD_SD3_CMD__SD3_CMD 0x17059
++ MX6QDL_PAD_SD3_CLK__SD3_CLK 0x10059
++ MX6QDL_PAD_SD3_DAT0__SD3_DATA0 0x17059
++ MX6QDL_PAD_SD3_DAT1__SD3_DATA1 0x17059
++ MX6QDL_PAD_SD3_DAT2__SD3_DATA2 0x17059
++ MX6QDL_PAD_SD3_DAT3__SD3_DATA3 0x17059
++ >;
++ };
++ };
++};
++
++&ldb {
++ status = "okay";
++};
++
++&pcie {
++ reset-gpio = <&gpio1 29 0>;
++ status = "okay";
++
++ eth1: sky2@8 { /* MAC/PHY on bus 8 */
++ compatible = "marvell,sky2";
++ };
++};
++
++&ssi1 {
++ fsl,mode = "i2s-slave";
++ status = "okay";
++};
++
++&uart1 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_uart1>;
++ status = "okay";
++};
++
++&uart2 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_uart2>;
++ status = "okay";
++};
++
++&uart5 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_uart5>;
++ status = "okay";
++};
++
++&usbotg {
++ vbus-supply = <&reg_usb_otg_vbus>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_usbotg>;
++ disable-over-current;
++ status = "okay";
++};
++
++&usbh1 {
++ vbus-supply = <&reg_usb_h1_vbus>;
++ status = "okay";
++};
++
++&usdhc3 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_usdhc3>;
++ cd-gpios = <&gpio7 0 0>;
++ vmmc-supply = <&reg_3p3v>;
++ status = "okay";
++};
+diff -Nur linux-3.14.36/arch/arm/boot/dts/imx6q-gw54xx.dts linux-openelec/arch/arm/boot/dts/imx6q-gw54xx.dts
+--- linux-3.14.36/arch/arm/boot/dts/imx6q-gw54xx.dts 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/arch/arm/boot/dts/imx6q-gw54xx.dts 2015-05-06 12:05:43.000000000 -0500
+@@ -0,0 +1,23 @@
++/*
++ * Copyright 2013 Gateworks Corporation
++ *
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/dts-v1/;
++#include "imx6q.dtsi"
++#include "imx6qdl-gw54xx.dtsi"
++
++/ {
++ model = "Gateworks Ventana i.MX6 Quad GW54XX";
++ compatible = "gw,imx6q-gw54xx", "gw,ventana", "fsl,imx6q";
++};
++
++&sata {
++ status = "okay";
++};
+diff -Nur linux-3.14.36/arch/arm/boot/dts/imx6q-hummingboard.dts linux-openelec/arch/arm/boot/dts/imx6q-hummingboard.dts
+--- linux-3.14.36/arch/arm/boot/dts/imx6q-hummingboard.dts 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/arch/arm/boot/dts/imx6q-hummingboard.dts 2015-05-06 12:05:43.000000000 -0500
+@@ -0,0 +1,21 @@
++/*
++ * Copyright (C) 2014 Rabeeh Khoury (rabeeh@solid-run.com)
++ * Based on work by Russell King
++ */
++/dts-v1/;
++
++#include "imx6q.dtsi"
++#include "imx6qdl-hummingboard.dtsi"
++
++/ {
++ model = "SolidRun HummingBoard Dual/Quad";
++ compatible = "solidrun,hummingboard/q", "fsl,imx6q";
++};
++
++&sata {
++ status = "okay";
++ fsl,transmit-level-mV = <1104>;
++ fsl,transmit-boost-mdB = <0>;
++ fsl,transmit-atten-16ths = <9>;
++ fsl,no-spread-spectrum;
++};
+diff -Nur linux-3.14.36/arch/arm/boot/dts/imx6q-nitrogen6x.dts linux-openelec/arch/arm/boot/dts/imx6q-nitrogen6x.dts
+--- linux-3.14.36/arch/arm/boot/dts/imx6q-nitrogen6x.dts 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/arch/arm/boot/dts/imx6q-nitrogen6x.dts 2015-05-06 12:05:43.000000000 -0500
+@@ -0,0 +1,25 @@
++/*
++ * Copyright 2013 Boundary Devices, Inc.
++ * Copyright 2012 Freescale Semiconductor, Inc.
++ * Copyright 2011 Linaro Ltd.
++ *
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/dts-v1/;
++#include "imx6q.dtsi"
++#include "imx6qdl-nitrogen6x.dtsi"
++
++/ {
++ model = "Freescale i.MX6 Quad Nitrogen6x Board";
++ compatible = "fsl,imx6q-nitrogen6x", "fsl,imx6q";
++};
++
++&sata {
++ status = "okay";
++};
+diff -Nur linux-3.14.36/arch/arm/boot/dts/imx6q-phytec-pbab01.dts linux-openelec/arch/arm/boot/dts/imx6q-phytec-pbab01.dts
+--- linux-3.14.36/arch/arm/boot/dts/imx6q-phytec-pbab01.dts 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/boot/dts/imx6q-phytec-pbab01.dts 2015-05-06 12:05:43.000000000 -0500
+@@ -11,24 +11,17 @@
+
+ /dts-v1/;
+ #include "imx6q-phytec-pfla02.dtsi"
++#include "imx6qdl-phytec-pbab01.dtsi"
+
+ / {
+ model = "Phytec phyFLEX-i.MX6 Quad Carrier-Board";
+ compatible = "phytec,imx6q-pbab01", "phytec,imx6q-pfla02", "fsl,imx6q";
+-};
+-
+-&fec {
+- status = "okay";
+-};
+-
+-&uart4 {
+- status = "okay";
+-};
+
+-&usdhc2 {
+- status = "okay";
++ chosen {
++ stdout-path = &uart4;
++ };
+ };
+
+-&usdhc3 {
+- status = "okay";
++&sata {
++ status = "okay";
+ };
+diff -Nur linux-3.14.36/arch/arm/boot/dts/imx6q-phytec-pfla02.dtsi linux-openelec/arch/arm/boot/dts/imx6q-phytec-pfla02.dtsi
+--- linux-3.14.36/arch/arm/boot/dts/imx6q-phytec-pfla02.dtsi 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/boot/dts/imx6q-phytec-pfla02.dtsi 2015-05-06 12:05:43.000000000 -0500
+@@ -10,171 +10,13 @@
+ */
+
+ #include "imx6q.dtsi"
++#include "imx6qdl-phytec-pfla02.dtsi"
+
+ / {
+- model = "Phytec phyFLEX-i.MX6 Ouad";
++ model = "Phytec phyFLEX-i.MX6 Quad";
+ compatible = "phytec,imx6q-pfla02", "fsl,imx6q";
+
+ memory {
+ reg = <0x10000000 0x80000000>;
+ };
+ };
+-
+-&ecspi3 {
+- pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_ecspi3_1>;
+- status = "okay";
+- fsl,spi-num-chipselects = <1>;
+- cs-gpios = <&gpio4 24 0>;
+-
+- flash@0 {
+- compatible = "m25p80";
+- spi-max-frequency = <20000000>;
+- reg = <0>;
+- };
+-};
+-
+-&i2c1 {
+- pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_i2c1_1>;
+- status = "okay";
+-
+- eeprom@50 {
+- compatible = "atmel,24c32";
+- reg = <0x50>;
+- };
+-
+- pmic@58 {
+- compatible = "dialog,da9063";
+- reg = <0x58>;
+- interrupt-parent = <&gpio4>;
+- interrupts = <17 0x8>; /* active-low GPIO4_17 */
+-
+- regulators {
+- vddcore_reg: bcore1 {
+- regulator-min-microvolt = <730000>;
+- regulator-max-microvolt = <1380000>;
+- regulator-always-on;
+- };
+-
+- vddsoc_reg: bcore2 {
+- regulator-min-microvolt = <730000>;
+- regulator-max-microvolt = <1380000>;
+- regulator-always-on;
+- };
+-
+- vdd_ddr3_reg: bpro {
+- regulator-min-microvolt = <1500000>;
+- regulator-max-microvolt = <1500000>;
+- regulator-always-on;
+- };
+-
+- vdd_3v3_reg: bperi {
+- regulator-min-microvolt = <3300000>;
+- regulator-max-microvolt = <3300000>;
+- regulator-always-on;
+- };
+-
+- vdd_buckmem_reg: bmem {
+- regulator-min-microvolt = <3300000>;
+- regulator-max-microvolt = <3300000>;
+- regulator-always-on;
+- };
+-
+- vdd_eth_reg: bio {
+- regulator-min-microvolt = <1200000>;
+- regulator-max-microvolt = <1200000>;
+- regulator-always-on;
+- };
+-
+- vdd_eth_io_reg: ldo4 {
+- regulator-min-microvolt = <2500000>;
+- regulator-max-microvolt = <2500000>;
+- regulator-always-on;
+- };
+-
+- vdd_mx6_snvs_reg: ldo5 {
+- regulator-min-microvolt = <3000000>;
+- regulator-max-microvolt = <3000000>;
+- regulator-always-on;
+- };
+-
+- vdd_3v3_pmic_io_reg: ldo6 {
+- regulator-min-microvolt = <3300000>;
+- regulator-max-microvolt = <3300000>;
+- regulator-always-on;
+- };
+-
+- vdd_sd0_reg: ldo9 {
+- regulator-min-microvolt = <3300000>;
+- regulator-max-microvolt = <3300000>;
+- };
+-
+- vdd_sd1_reg: ldo10 {
+- regulator-min-microvolt = <3300000>;
+- regulator-max-microvolt = <3300000>;
+- };
+-
+- vdd_mx6_high_reg: ldo11 {
+- regulator-min-microvolt = <3000000>;
+- regulator-max-microvolt = <3000000>;
+- regulator-always-on;
+- };
+- };
+- };
+-};
+-
+-&iomuxc {
+- pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_hog>;
+-
+- hog {
+- pinctrl_hog: hoggrp {
+- fsl,pins = <
+- MX6QDL_PAD_EIM_D23__GPIO3_IO23 0x80000000
+- MX6QDL_PAD_DISP0_DAT3__GPIO4_IO24 0x80000000 /* SPI NOR chipselect */
+- MX6QDL_PAD_DI0_PIN15__GPIO4_IO17 0x80000000 /* PMIC interrupt */
+- >;
+- };
+- };
+-
+- pfla02 {
+- pinctrl_usdhc3_pfla02: usdhc3grp-pfla02 {
+- fsl,pins = <
+- MX6QDL_PAD_ENET_RXD0__GPIO1_IO27 0x80000000
+- MX6QDL_PAD_ENET_TXD1__GPIO1_IO29 0x80000000
+- >;
+- };
+- };
+-};
+-
+-&fec {
+- pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_enet_3>;
+- phy-mode = "rgmii";
+- phy-reset-gpios = <&gpio3 23 0>;
+- status = "disabled";
+-};
+-
+-&uart4 {
+- pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_uart4_1>;
+- status = "disabled";
+-};
+-
+-&usdhc2 {
+- pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_usdhc2_2>;
+- cd-gpios = <&gpio1 4 0>;
+- wp-gpios = <&gpio1 2 0>;
+- status = "disabled";
+-};
+-
+-&usdhc3 {
+- pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_usdhc3_2
+- &pinctrl_usdhc3_pfla02>;
+- cd-gpios = <&gpio1 27 0>;
+- wp-gpios = <&gpio1 29 0>;
+- status = "disabled";
+-};
+diff -Nur linux-3.14.36/arch/arm/boot/dts/imx6q-pinfunc.h linux-openelec/arch/arm/boot/dts/imx6q-pinfunc.h
+--- linux-3.14.36/arch/arm/boot/dts/imx6q-pinfunc.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/boot/dts/imx6q-pinfunc.h 2015-05-06 12:05:43.000000000 -0500
+@@ -673,6 +673,7 @@
+ #define MX6QDL_PAD_GPIO_3__USB_H1_OC 0x22c 0x5fc 0x948 0x6 0x1
+ #define MX6QDL_PAD_GPIO_3__MLB_CLK 0x22c 0x5fc 0x900 0x7 0x1
+ #define MX6QDL_PAD_GPIO_6__ESAI_TX_CLK 0x230 0x600 0x870 0x0 0x1
++#define MX6QDL_PAD_GPIO_6__ENET_IRQ 0x230 0x600 0x03c 0x11 0xff000609
+ #define MX6QDL_PAD_GPIO_6__I2C3_SDA 0x230 0x600 0x8ac 0x2 0x1
+ #define MX6QDL_PAD_GPIO_6__GPIO1_IO06 0x230 0x600 0x000 0x5 0x0
+ #define MX6QDL_PAD_GPIO_6__SD2_LCTL 0x230 0x600 0x000 0x6 0x0
+@@ -1024,6 +1025,7 @@
+ #define MX6QDL_PAD_SD1_DAT2__WDOG1_RESET_B_DEB 0x34c 0x734 0x000 0x6 0x0
+ #define MX6QDL_PAD_SD1_CLK__SD1_CLK 0x350 0x738 0x000 0x0 0x0
+ #define MX6QDL_PAD_SD1_CLK__ECSPI5_SCLK 0x350 0x738 0x828 0x1 0x0
++#define MX6QDL_PAD_SD1_CLK__OSC32K_32K_OUT 0x350 0x738 0x000 0x2 0x0
+ #define MX6QDL_PAD_SD1_CLK__GPT_CLKIN 0x350 0x738 0x000 0x3 0x0
+ #define MX6QDL_PAD_SD1_CLK__GPIO1_IO20 0x350 0x738 0x000 0x5 0x0
+ #define MX6QDL_PAD_SD2_CLK__SD2_CLK 0x354 0x73c 0x000 0x0 0x0
+diff -Nur linux-3.14.36/arch/arm/boot/dts/imx6q-sabreauto.dts linux-openelec/arch/arm/boot/dts/imx6q-sabreauto.dts
+--- linux-3.14.36/arch/arm/boot/dts/imx6q-sabreauto.dts 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/boot/dts/imx6q-sabreauto.dts 2015-05-06 12:05:43.000000000 -0500
+@@ -20,6 +20,22 @@
+ compatible = "fsl,imx6q-sabreauto", "fsl,imx6q";
+ };
+
++&mxcfb1 {
++ status = "okay";
++};
++
++&mxcfb2 {
++ status = "okay";
++};
++
++&mxcfb3 {
++ status = "okay";
++};
++
++&mxcfb4 {
++ status = "okay";
++};
++
+ &sata {
+ status = "okay";
+ };
+diff -Nur linux-3.14.36/arch/arm/boot/dts/imx6q-sabrelite.dts linux-openelec/arch/arm/boot/dts/imx6q-sabrelite.dts
+--- linux-3.14.36/arch/arm/boot/dts/imx6q-sabrelite.dts 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/boot/dts/imx6q-sabrelite.dts 2015-05-06 12:05:43.000000000 -0500
+@@ -12,189 +12,13 @@
+
+ /dts-v1/;
+ #include "imx6q.dtsi"
++#include "imx6qdl-sabrelite.dtsi"
+
+ / {
+ model = "Freescale i.MX6 Quad SABRE Lite Board";
+ compatible = "fsl,imx6q-sabrelite", "fsl,imx6q";
+-
+- memory {
+- reg = <0x10000000 0x40000000>;
+- };
+-
+- regulators {
+- compatible = "simple-bus";
+-
+- reg_2p5v: 2p5v {
+- compatible = "regulator-fixed";
+- regulator-name = "2P5V";
+- regulator-min-microvolt = <2500000>;
+- regulator-max-microvolt = <2500000>;
+- regulator-always-on;
+- };
+-
+- reg_3p3v: 3p3v {
+- compatible = "regulator-fixed";
+- regulator-name = "3P3V";
+- regulator-min-microvolt = <3300000>;
+- regulator-max-microvolt = <3300000>;
+- regulator-always-on;
+- };
+-
+- reg_usb_otg_vbus: usb_otg_vbus {
+- compatible = "regulator-fixed";
+- regulator-name = "usb_otg_vbus";
+- regulator-min-microvolt = <5000000>;
+- regulator-max-microvolt = <5000000>;
+- gpio = <&gpio3 22 0>;
+- enable-active-high;
+- };
+- };
+-
+- sound {
+- compatible = "fsl,imx6q-sabrelite-sgtl5000",
+- "fsl,imx-audio-sgtl5000";
+- model = "imx6q-sabrelite-sgtl5000";
+- ssi-controller = <&ssi1>;
+- audio-codec = <&codec>;
+- audio-routing =
+- "MIC_IN", "Mic Jack",
+- "Mic Jack", "Mic Bias",
+- "Headphone Jack", "HP_OUT";
+- mux-int-port = <1>;
+- mux-ext-port = <4>;
+- };
+-};
+-
+-&audmux {
+- status = "okay";
+- pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_audmux_1>;
+-};
+-
+-&ecspi1 {
+- fsl,spi-num-chipselects = <1>;
+- cs-gpios = <&gpio3 19 0>;
+- pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_ecspi1_1>;
+- status = "okay";
+-
+- flash: m25p80@0 {
+- compatible = "sst,sst25vf016b";
+- spi-max-frequency = <20000000>;
+- reg = <0>;
+- };
+-};
+-
+-&fec {
+- pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_enet_1>;
+- phy-mode = "rgmii";
+- phy-reset-gpios = <&gpio3 23 0>;
+- status = "okay";
+-};
+-
+-&i2c1 {
+- status = "okay";
+- clock-frequency = <100000>;
+- pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_i2c1_1>;
+-
+- codec: sgtl5000@0a {
+- compatible = "fsl,sgtl5000";
+- reg = <0x0a>;
+- clocks = <&clks 201>;
+- VDDA-supply = <&reg_2p5v>;
+- VDDIO-supply = <&reg_3p3v>;
+- };
+-};
+-
+-&iomuxc {
+- pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_hog>;
+-
+- hog {
+- pinctrl_hog: hoggrp {
+- fsl,pins = <
+- MX6QDL_PAD_NANDF_D6__GPIO2_IO06 0x80000000
+- MX6QDL_PAD_NANDF_D7__GPIO2_IO07 0x80000000
+- MX6QDL_PAD_EIM_D19__GPIO3_IO19 0x80000000
+- MX6QDL_PAD_EIM_D22__GPIO3_IO22 0x80000000
+- MX6QDL_PAD_EIM_D23__GPIO3_IO23 0x80000000
+- MX6QDL_PAD_SD3_DAT5__GPIO7_IO00 0x80000000
+- MX6QDL_PAD_SD3_DAT4__GPIO7_IO01 0x1f0b0
+- MX6QDL_PAD_GPIO_0__CCM_CLKO1 0x80000000
+- MX6QDL_PAD_EIM_D23__GPIO3_IO23 0x80000000
+- >;
+- };
+- };
+-};
+-
+-&ldb {
+- status = "okay";
+-
+- lvds-channel@0 {
+- fsl,data-mapping = "spwg";
+- fsl,data-width = <18>;
+- status = "okay";
+-
+- display-timings {
+- native-mode = <&timing0>;
+- timing0: hsd100pxn1 {
+- clock-frequency = <65000000>;
+- hactive = <1024>;
+- vactive = <768>;
+- hback-porch = <220>;
+- hfront-porch = <40>;
+- vback-porch = <21>;
+- vfront-porch = <7>;
+- hsync-len = <60>;
+- vsync-len = <10>;
+- };
+- };
+- };
+ };
+
+ &sata {
+ status = "okay";
+ };
+-
+-&ssi1 {
+- fsl,mode = "i2s-slave";
+- status = "okay";
+-};
+-
+-&uart2 {
+- status = "okay";
+- pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_uart2_1>;
+-};
+-
+-&usbh1 {
+- status = "okay";
+-};
+-
+-&usbotg {
+- vbus-supply = <&reg_usb_otg_vbus>;
+- pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_usbotg_1>;
+- disable-over-current;
+- status = "okay";
+-};
+-
+-&usdhc3 {
+- pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_usdhc3_2>;
+- cd-gpios = <&gpio7 0 0>;
+- wp-gpios = <&gpio7 1 0>;
+- vmmc-supply = <&reg_3p3v>;
+- status = "okay";
+-};
+-
+-&usdhc4 {
+- pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_usdhc4_2>;
+- cd-gpios = <&gpio2 6 0>;
+- wp-gpios = <&gpio2 7 0>;
+- vmmc-supply = <&reg_3p3v>;
+- status = "okay";
+-};
+diff -Nur linux-3.14.36/arch/arm/boot/dts/imx6q-sabresd.dts linux-openelec/arch/arm/boot/dts/imx6q-sabresd.dts
+--- linux-3.14.36/arch/arm/boot/dts/imx6q-sabresd.dts 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/boot/dts/imx6q-sabresd.dts 2015-05-06 12:05:43.000000000 -0500
+@@ -23,3 +23,19 @@
+ &sata {
+ status = "okay";
+ };
++
++&mxcfb1 {
++ status = "okay";
++};
++
++&mxcfb2 {
++ status = "okay";
++};
++
++&mxcfb3 {
++ status = "okay";
++};
++
++&mxcfb4 {
++ status = "okay";
++};
+diff -Nur linux-3.14.36/arch/arm/boot/dts/imx6q-sabresd-hdcp.dts linux-openelec/arch/arm/boot/dts/imx6q-sabresd-hdcp.dts
+--- linux-3.14.36/arch/arm/boot/dts/imx6q-sabresd-hdcp.dts 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/arch/arm/boot/dts/imx6q-sabresd-hdcp.dts 2015-05-06 12:05:43.000000000 -0500
+@@ -0,0 +1,23 @@
++/*
++ * Copyright 2012-2013 Freescale Semiconductor, Inc.
++ * Copyright 2011 Linaro Ltd.
++ *
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++#include "imx6q-sabresd.dts"
++
++&hdmi_video {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_hdmi_hdcp>;
++ fsl,hdcp;
++};
++
++&i2c2 {
++ status = "disable";
++};
+diff -Nur linux-3.14.36/arch/arm/boot/dts/imx6q-sbc6x.dts linux-openelec/arch/arm/boot/dts/imx6q-sbc6x.dts
+--- linux-3.14.36/arch/arm/boot/dts/imx6q-sbc6x.dts 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/boot/dts/imx6q-sbc6x.dts 2015-05-06 12:05:43.000000000 -0500
+@@ -17,28 +17,78 @@
+ };
+ };
+
++
+ &fec {
+ pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_enet_1>;
++ pinctrl-0 = <&pinctrl_enet>;
+ phy-mode = "rgmii";
+ status = "okay";
+ };
+
++&iomuxc {
++ imx6q-sbc6x {
++ pinctrl_enet: enetgrp {
++ fsl,pins = <
++ MX6QDL_PAD_ENET_MDIO__ENET_MDIO 0x1b0b0
++ MX6QDL_PAD_ENET_MDC__ENET_MDC 0x1b0b0
++ MX6QDL_PAD_RGMII_TXC__RGMII_TXC 0x1b0b0
++ MX6QDL_PAD_RGMII_TD0__RGMII_TD0 0x1b0b0
++ MX6QDL_PAD_RGMII_TD1__RGMII_TD1 0x1b0b0
++ MX6QDL_PAD_RGMII_TD2__RGMII_TD2 0x1b0b0
++ MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x1b0b0
++ MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x1b0b0
++ MX6QDL_PAD_ENET_REF_CLK__ENET_TX_CLK 0x1b0b0
++ MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b0b0
++ MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x1b0b0
++ MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x1b0b0
++ MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b0b0
++ MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b0b0
++ MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b0b0
++ MX6QDL_PAD_GPIO_16__ENET_REF_CLK 0x4001b0a8
++ >;
++ };
++
++ pinctrl_uart1: uart1grp {
++ fsl,pins = <
++ MX6QDL_PAD_CSI0_DAT10__UART1_TX_DATA 0x1b0b1
++ MX6QDL_PAD_CSI0_DAT11__UART1_RX_DATA 0x1b0b1
++ >;
++ };
++
++ pinctrl_usbotg: usbotggrp {
++ fsl,pins = <
++ MX6QDL_PAD_GPIO_1__USB_OTG_ID 0x17059
++ >;
++ };
++
++ pinctrl_usdhc3: usdhc3grp {
++ fsl,pins = <
++ MX6QDL_PAD_SD3_CMD__SD3_CMD 0x17059
++ MX6QDL_PAD_SD3_CLK__SD3_CLK 0x10059
++ MX6QDL_PAD_SD3_DAT0__SD3_DATA0 0x17059
++ MX6QDL_PAD_SD3_DAT1__SD3_DATA1 0x17059
++ MX6QDL_PAD_SD3_DAT2__SD3_DATA2 0x17059
++ MX6QDL_PAD_SD3_DAT3__SD3_DATA3 0x17059
++ >;
++ };
++ };
++};
++
+ &uart1 {
+ pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_uart1_1>;
++ pinctrl-0 = <&pinctrl_uart1>;
+ status = "okay";
+ };
+
+ &usbotg {
+ pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_usbotg_1>;
++ pinctrl-0 = <&pinctrl_usbotg>;
+ disable-over-current;
+ status = "okay";
+ };
+
+ &usdhc3 {
+ pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_usdhc3_2>;
++ pinctrl-0 = <&pinctrl_usdhc3>;
+ status = "okay";
+ };
+diff -Nur linux-3.14.36/arch/arm/boot/dts/imx6q-tbs2910.dts linux-openelec/arch/arm/boot/dts/imx6q-tbs2910.dts
+--- linux-3.14.36/arch/arm/boot/dts/imx6q-tbs2910.dts 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/arch/arm/boot/dts/imx6q-tbs2910.dts 2015-07-24 18:03:30.200842002 -0500
+@@ -0,0 +1,41 @@
++/*
++ * Copyright 2012 Freescale Semiconductor, Inc.
++ * Copyright 2011 Linaro Ltd.
++ *
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/dts-v1/;
++
++#include "imx6q.dtsi"
++#include "imx6qdl-tbs2910.dtsi"
++
++/ {
++ model = "TBS Matrix";
++ compatible = "fsl,imx6q-sabresd", "fsl,imx6q";
++};
++
++&sata {
++ status = "okay";
++};
++
++&mxcfb1 {
++ status = "okay";
++};
++
++&mxcfb2 {
++ status = "okay";
++};
++
++&mxcfb3 {
++ status = "okay";
++};
++
++&mxcfb4 {
++ status = "okay";
++};
+diff -Nur linux-3.14.36/arch/arm/boot/dts/imx6q-udoo.dts linux-openelec/arch/arm/boot/dts/imx6q-udoo.dts
+--- linux-3.14.36/arch/arm/boot/dts/imx6q-udoo.dts 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/boot/dts/imx6q-udoo.dts 2015-07-24 18:03:30.248842002 -0500
+@@ -2,6 +2,10 @@
+ * Copyright 2013 Freescale Semiconductor, Inc.
+ *
+ * Author: Fabio Estevam <fabio.estevam@freescale.com>
++ *
++ * Copyright (C) 2014 Jasbir
++ * Copyright (C) 2014 udoo team
++ * Copyright (C) 2014 vpeter
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+@@ -16,9 +20,395 @@
+ model = "Udoo i.MX6 Quad Board";
+ compatible = "udoo,imx6q-udoo", "fsl,imx6q";
+
++ chosen {
++ stdout-path = &uart1;
++ };
++
++ aliases {
++ mxcfb0 = &mxcfb1;
++ mxcfb1 = &mxcfb2;
++ mxcfb2 = &mxcfb3;
++ mxcfb3 = &mxcfb4;
++ ssi0 = &ssi1;
++ };
++
+ memory {
+ reg = <0x10000000 0x40000000>;
+ };
++
++ regulators {
++ compatible = "simple-bus";
++
++ reg_2p5v: 2p5v {
++ compatible = "regulator-fixed";
++ regulator-name = "2P5V";
++ regulator-min-microvolt = <2500000>;
++ regulator-max-microvolt = <2500000>;
++ regulator-always-on;
++ };
++
++ reg_3p3v: 3p3v {
++ compatible = "regulator-fixed";
++ regulator-name = "3P3V";
++ regulator-min-microvolt = <3300000>;
++ regulator-max-microvolt = <3300000>;
++ regulator-always-on;
++ };
++
++ aux_5v: aux5v {
++ compatible = "regulator-fixed";
++ regulator-name = "AUX_5V";
++ regulator-min-microvolt = <5000000>;
++ regulator-max-microvolt = <5000000>;
++ gpio = <&gpio6 10 1>;
++ regulator-boot-on;
++ enable-active-high;
++ };
++
++ reg_sensor: sensor_supply {
++ compatible = "regulator-fixed";
++ regulator-name = "sensor-SUPPLY";
++ enable-active-high;
++ };
++
++ reg_usb_otg_vbus: usb_otg_vbus {
++ compatible = "regulator-fixed";
++ regulator-name = "usb_otg_vbus";
++ regulator-min-microvolt = <5000000>;
++ regulator-max-microvolt = <5000000>;
++ enable-active-high;
++ };
++
++ reg_usb_h1_vbus: usb_h1_vbus {
++ compatible = "regulator-fixed";
++ regulator-name = "usb_h1_vbus";
++ regulator-min-microvolt = <5000000>;
++ regulator-max-microvolt = <5000000>;
++ enable-active-high;
++ startup-delay-us = <2>; /* USB2415 requires a POR of 1 us minimum */
++ gpio = <&gpio7 12 0>;
++ };
++ };
++
++ mxcfb1: fb@0 {
++ compatible = "fsl,mxc_sdc_fb";
++ disp_dev = "hdmi";
++ interface_pix_fmt = "RGB24";
++ mode_str ="1920x1080M@60";
++ default_bpp = <24>;
++ int_clk = <0>;
++ late_init = <0>;
++ status = "okay";
++ };
++
++ mxcfb2: fb@1 {
++ compatible = "fsl,mxc_sdc_fb";
++ disp_dev = "ldb";
++ default_bpp = <24>;
++ interface_pix_fmt = "RGB24";
++ mode_str ="";
++ int_clk = <0>;
++ late_init = <1>;
++ status = "okay";
++ };
++
++ mxcfb3: fb@2 {
++ compatible = "fsl,mxc_sdc_fb";
++ disp_dev = "hdmi";
++ interface_pix_fmt = "RGB24";
++ mode_str ="1920x1080M@60";
++ default_bpp = <24>;
++ int_clk = <0>;
++ late_init = <0>;
++ status = "disabled";
++ };
++
++ mxcfb4: fb@3 {
++ compatible = "fsl,mxc_sdc_fb";
++ disp_dev = "hdmi";
++ interface_pix_fmt = "RGB24";
++ mode_str ="1920x1080M@60";
++ default_bpp = <24>;
++ int_clk = <0>;
++ late_init = <0>;
++ status = "disabled";
++ };
++
++ codec: vt1613 {
++ compatible = "via,vt1613";
++ };
++
++ sound {
++ compatible = "udoo,imx-vt1613-audio";
++ ssi-controller = <&ssi1>;
++ audio-codec = <&codec>;
++ mux-int-port = <1>;
++ mux-ext-port = <6>;
++ };
++
++ sound-hdmi {
++ compatible = "fsl,imx6q-audio-hdmi",
++ "fsl,imx-audio-hdmi";
++ model = "imx-audio-hdmi";
++ hdmi-controller = <&hdmi_audio>;
++ };
++
++ sound-spdif {
++ compatible = "fsl,imx-audio-spdif",
++ "fsl,imx-sabreauto-spdif";
++ model = "imx-spdif";
++ spdif-controller = <&spdif>;
++ spdif-in;
++ status = "disabled";
++ };
++
++ v4l2_out {
++ compatible = "fsl,mxc_v4l2_output";
++ status = "okay";
++ };
++
++ poweroff {
++ compatible = "udoo,poweroff";
++ sam3x_rst_gpio = <&gpio1 0 GPIO_ACTIVE_LOW>;
++ pwr_5v_gpio = <&gpio2 4 GPIO_ACTIVE_HIGH>;
++ arduino_mode = <0>;
++ };
++};
++
++&ldb {
++ ipu_id = <1>;
++ disp_id = <0>;
++ ext_ref = <1>;
++ mode = "sep0";
++ sec_ipu_id = <1>;
++ sec_disp_id = <1>;
++ status = "okay";
++};
++
++&hdmi_audio {
++ status = "okay";
++};
++
++&hdmi_core {
++ ipu_id = <0>;
++ disp_id = <0>;
++ status = "okay";
++};
++
++&hdmi_video {
++ fsl,phy_reg_vlev = <0x0294>;
++ fsl,phy_reg_cksymtx = <0x800d>;
++ status = "okay";
++};
++
++&i2c1 {
++ clock-frequency = <100000>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_i2c1_2>;
++ status = "okay";
++};
++
++&i2c2 {
++ clock-frequency = <100000>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_i2c2_2>;
++ status = "okay";
++
++ hdmi: edid@50 {
++ compatible = "fsl,imx6-hdmi-i2c";
++ reg = <0x50>;
++ };
++};
++
++&i2c3 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_i2c3_5>;
++ status = "okay";
++
++ touchscreen: st1232@55 {
++ compatible = "sitronix,st1232";
++ reg = <0x55>;
++ interrupt-parent = <&gpio1>;
++ interrupts = <13 IRQ_TYPE_LEVEL_LOW>;
++ gpios = <&gpio1 15 GPIO_ACTIVE_LOW>;
++ /* udoo poweroff driver */
++ lcd_panel_on_gpio = <&gpio1 2 GPIO_ACTIVE_HIGH>;
++ lcd_backlight_gpio = <&gpio1 4 GPIO_ACTIVE_HIGH>;
++ };
++};
++
++&fec {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_enet>;
++ phy-mode = "rgmii";
++ status = "okay";
++};
++
++&iomuxc {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_hog>;
++
++ imx6q-udoo {
++ pinctrl_hog: hoggrp {
++ fsl,pins = <
++ MX6QDL_PAD_NANDF_D4__GPIO2_IO04 0x80000000 /* 5v enable */
++ MX6QDL_PAD_NANDF_CS0__GPIO6_IO11 0x80000000 /* Vtt suspend */
++ MX6QDL_PAD_SD2_DAT0__GPIO1_IO15 0x80000000 /* touch reset */
++ MX6QDL_PAD_EIM_EB3__GPIO2_IO31 0x80000000 /* ethernet power */
++
++ MX6QDL_PAD_GPIO_17__GPIO7_IO12 0x80000000 /* usb hub reset */
++ MX6QDL_PAD_NANDF_CS2__CCM_CLKO2 0x130b0 /* clk usb hub */
++ MX6QDL_PAD_EIM_WAIT__GPIO5_IO00 0xb0b1 /* usb otg select */
++
++ MX6QDL_PAD_NANDF_D5__GPIO2_IO05 0x80000000 /* sdcard power */
++ MX6QDL_PAD_SD3_DAT5__GPIO7_IO00 0x80000000 /* sd card detect */
++ MX6QDL_PAD_DISP0_DAT5__GPIO4_IO26 0x80000000 /* select dbg uart*/
++ MX6QDL_PAD_GPIO_0__GPIO1_IO00 0x80000000 /* SAM3X reset */
++ MX6QDL_PAD_DISP0_DAT0__GPIO4_IO21 0x30b1 /* SAM3X erase */
++ MX6QDL_PAD_GPIO_16__GPIO7_IO11 0xb0b1 /* SAM3X vbus_en */
++ MX6QDL_PAD_SD4_DAT7__GPIO2_IO15 0x80000000 /* SAM3X usb host */
++ MX6QDL_PAD_GPIO_2__GPIO1_IO02 0x80000000 /* panel on */
++ MX6QDL_PAD_GPIO_4__GPIO1_IO04 0x80000000 /* backlight on */
++ MX6QDL_PAD_CSI0_DAT19__GPIO6_IO05 0x80000000 /* camera reset */
++ MX6QDL_PAD_CSI0_DAT18__GPIO6_IO04 0x80000000 /* camera enable */
++ MX6QDL_PAD_CSI0_PIXCLK__GPIO5_IO18 0x80000000 /* input mon serial*/
++ MX6QDL_PAD_CSI0_DAT17__GPIO6_IO03 0x80000000 /* input mon serial*/
++ MX6QDL_PAD_EIM_A19__GPIO2_IO19 0x80000000 /* writeprotect spi*/
++ MX6QDL_PAD_GPIO_3__GPIO1_IO03 0x30b1 /* arduino pinout */
++ >;
++ };
++
++ pinctrl_i2c1_2: i2c1grp-2 {
++ fsl,pins = <
++ MX6QDL_PAD_CSI0_DAT8__I2C1_SDA 0x4001b8b1
++ MX6QDL_PAD_CSI0_DAT9__I2C1_SCL 0x4001b8b1
++ >;
++ };
++
++ pinctrl_i2c2_2: i2c2grp-2 {
++ fsl,pins = <
++ MX6QDL_PAD_KEY_COL3__I2C2_SCL 0x4001b8b1
++ MX6QDL_PAD_KEY_ROW3__I2C2_SDA 0x4001b8b1
++ >;
++ };
++
++ pinctrl_i2c3_5: i2c3grp-5 {
++ fsl,pins = <
++ MX6QDL_PAD_GPIO_5__I2C3_SCL 0x4001b8b1
++ MX6QDL_PAD_GPIO_6__I2C3_SDA 0x4001b8b1
++ >;
++ };
++
++ pinctrl_enet: enetgrp {
++ fsl,pins = <
++ MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b0b0
++ MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x1b0b0
++ MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x1b0b0
++ MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b0b0
++ MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b0b0
++ MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b0b0
++ MX6QDL_PAD_RGMII_TXC__RGMII_TXC 0x1b0b0
++ MX6QDL_PAD_RGMII_TD0__RGMII_TD0 0x1b0b0
++ MX6QDL_PAD_RGMII_TD1__RGMII_TD1 0x1b0b0
++ MX6QDL_PAD_RGMII_TD2__RGMII_TD2 0x1b0b0
++ MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x1b0b0
++ MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x1b0b0
++ MX6QDL_PAD_ENET_REF_CLK__ENET_TX_CLK 0x1b0b0
++ MX6QDL_PAD_ENET_MDIO__ENET_MDIO 0x1b0b0
++ MX6QDL_PAD_ENET_MDC__ENET_MDC 0x1b0b0
++ MX6QDL_PAD_EIM_D23__GPIO3_IO23 0x80000000 /* reset */
++ >;
++ };
++
++ pinctrl_uart2: uart2grp {
++ fsl,pins = <
++ MX6QDL_PAD_EIM_D26__UART2_TX_DATA 0x1b0b1
++ MX6QDL_PAD_EIM_D27__UART2_RX_DATA 0x1b0b1
++ >;
++ };
++
++ pinctrl_uart4: uart4grp {
++ fsl,pins = <
++ MX6QDL_PAD_KEY_COL0__UART4_TX_DATA 0x1b0b1
++ MX6QDL_PAD_KEY_ROW0__UART4_RX_DATA 0x1b0b1
++ >;
++ };
++
++ pinctrl_usdhc3: usdhc3grp {
++ fsl,pins = <
++ MX6QDL_PAD_SD3_CMD__SD3_CMD 0x17059
++ MX6QDL_PAD_SD3_CLK__SD3_CLK 0x10059
++ MX6QDL_PAD_SD3_DAT0__SD3_DATA0 0x17059
++ MX6QDL_PAD_SD3_DAT1__SD3_DATA1 0x17059
++ MX6QDL_PAD_SD3_DAT2__SD3_DATA2 0x17059
++ MX6QDL_PAD_SD3_DAT3__SD3_DATA3 0x17059
++ >;
++ };
++
++ pinctrl_i2c3_1: i2c3grp-1 {
++ fsl,pins = <
++ MX6QDL_PAD_EIM_D17__I2C3_SCL 0x4001b8b1
++ MX6QDL_PAD_EIM_D18__I2C3_SDA 0x4001b8b1
++ >;
++ };
++
++ pinctrl_spdif_1: spdifgrp-1 {
++ fsl,pins = <
++ MX6QDL_PAD_KEY_COL3__SPDIF_IN 0x1b0b0
++ >;
++ };
++
++ /*pinctrl_hdmi_cec_1: hdmicecgrp-1 {
++ fsl,pins = <
++ MX6QDL_PAD_EIM_A25__HDMI_TX_CEC_LINE 0x1f8b0
++ >;
++ };*/
++
++ ac97link_running: ac97link_runninggrp {
++ fsl,pins = <
++ MX6QDL_PAD_DI0_PIN2__AUD6_TXD 0x80000000
++ MX6QDL_PAD_DI0_PIN3__AUD6_TXFS 0x80000000
++ MX6QDL_PAD_DI0_PIN4__AUD6_RXD 0x80000000
++ MX6QDL_PAD_DI0_PIN15__AUD6_TXC 0x80000000
++ >;
++ };
++
++ ac97link_reset: ac97link_resetgrp {
++ fsl,pins = <
++ MX6QDL_PAD_EIM_EB2__GPIO2_IO30 0x80000000
++ MX6QDL_PAD_DI0_PIN3__GPIO4_IO19 0x80000000
++ MX6QDL_PAD_DI0_PIN2__GPIO4_IO18 0x80000000
++ >;
++ };
++
++ ac97link_warm_reset: ac97link_warm_resetgrp {
++ fsl,pins = <
++ MX6QDL_PAD_DI0_PIN3__GPIO4_IO19 0x80000000
++ >;
++ };
++ };
++};
++
++&audmux {
++ status = "okay";
++};
++
++&ssi1 {
++ fsl,mode = "ac97-slave";
++ pinctrl-names = "default", "ac97-running", "ac97-reset", "ac97-warm-reset";
++ pinctrl-0 = <&ac97link_running>;
++ pinctrl-1 = <&ac97link_running>;
++ pinctrl-2 = <&ac97link_reset>;
++ pinctrl-3 = <&ac97link_warm_reset>;
++ /* sync, sdata (output), reset */
++ ac97-gpios = <&gpio4 19 0 &gpio4 18 0 &gpio2 30 0>;
++ status = "okay";
++};
++
++&spdif {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_spdif_1>;
++ status = "disabled";
+ };
+
+ &sata {
+@@ -27,13 +417,37 @@
+
+ &uart2 {
+ pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_uart2_1>;
++ pinctrl-0 = <&pinctrl_uart2>;
+ status = "okay";
+ };
+
++&uart4 { /* sam3x port */
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_uart4>;
++ status = "okay";
++};
++
+ &usdhc3 {
+ pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_usdhc3_2>;
++ pinctrl-0 = <&pinctrl_usdhc3>;
+ non-removable;
++ keep-power-in-suspend;
+ status = "okay";
+ };
++
++&usbotg {
++ status = "disabled";
++};
++
++&usbh1 {
++ vbus-supply = <&reg_usb_h1_vbus>;
++ clocks = <&clks 201>;
++ clock-names = "phy";
++ status = "okay";
++};
++
++&hdmi_cec {
++ /*pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_hdmi_cec_1>;*/
++ status = "disabled";
++};
+diff -Nur linux-3.14.36/arch/arm/boot/dts/imx6sl.dtsi linux-openelec/arch/arm/boot/dts/imx6sl.dtsi
+--- linux-3.14.36/arch/arm/boot/dts/imx6sl.dtsi 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/boot/dts/imx6sl.dtsi 2015-05-06 12:05:43.000000000 -0500
+@@ -7,12 +7,14 @@
+ *
+ */
+
++#include <dt-bindings/interrupt-controller/irq.h>
+ #include "skeleton.dtsi"
+ #include "imx6sl-pinfunc.h"
+ #include <dt-bindings/clock/imx6sl-clock.h>
+
+ / {
+ aliases {
++ ethernet0 = &fec;
+ gpio0 = &gpio1;
+ gpio1 = &gpio2;
+ gpio2 = &gpio3;
+@@ -27,25 +29,46 @@
+ spi1 = &ecspi2;
+ spi2 = &ecspi3;
+ spi3 = &ecspi4;
++ usbphy0 = &usbphy1;
++ usbphy1 = &usbphy2;
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+- cpu@0 {
++ cpu0: cpu@0 {
+ compatible = "arm,cortex-a9";
+ device_type = "cpu";
+ reg = <0x0>;
+ next-level-cache = <&L2>;
++ operating-points = <
++ /* kHz uV */
++ 996000 1275000
++ 792000 1175000
++ 396000 975000
++ >;
++ fsl,soc-operating-points = <
++ /* ARM kHz SOC-PU uV */
++ 996000 1225000
++ 792000 1175000
++ 396000 1175000
++ >;
++ clock-latency = <61036>; /* two CLK32 periods */
++ clocks = <&clks IMX6SL_CLK_ARM>, <&clks IMX6SL_CLK_PLL2_PFD2>,
++ <&clks IMX6SL_CLK_STEP>, <&clks IMX6SL_CLK_PLL1_SW>,
++ <&clks IMX6SL_CLK_PLL1_SYS>;
++ clock-names = "arm", "pll2_pfd2_396m", "step",
++ "pll1_sw", "pll1_sys";
++ arm-supply = <&reg_arm>;
++ pu-supply = <&reg_pu>;
++ soc-supply = <&reg_soc>;
+ };
+ };
+
+ intc: interrupt-controller@00a01000 {
+ compatible = "arm,cortex-a9-gic";
+ #interrupt-cells = <3>;
+- #address-cells = <1>;
+- #size-cells = <1>;
+ interrupt-controller;
+ reg = <0x00a01000 0x1000>,
+ <0x00a00100 0x100>;
+@@ -57,15 +80,21 @@
+
+ ckil {
+ compatible = "fixed-clock";
++ #clock-cells = <0>;
+ clock-frequency = <32768>;
+ };
+
+ osc {
+ compatible = "fixed-clock";
++ #clock-cells = <0>;
+ clock-frequency = <24000000>;
+ };
+ };
+
++ pu_dummy: pudummy_reg {
++ compatible = "fsl,imx6-dummy-pureg"; /* only used in ldo-bypass */
++ };
++
+ soc {
+ #address-cells = <1>;
+ #size-cells = <1>;
+@@ -73,19 +102,45 @@
+ interrupt-parent = <&intc>;
+ ranges;
+
++ ocram: sram@00900000 {
++ compatible = "mmio-sram";
++ reg = <0x00900000 0x20000>;
++ clocks = <&clks IMX6SL_CLK_OCRAM>;
++ };
++
++ busfreq { /* BUSFREQ */
++ compatible = "fsl,imx6_busfreq";
++ clocks = <&clks IMX6SL_CLK_PLL2_BUS>, <&clks IMX6SL_CLK_PLL2_PFD2>,
++ <&clks IMX6SL_CLK_PLL2_198M>, <&clks IMX6SL_CLK_ARM>,
++ <&clks IMX6SL_CLK_PLL3_USB_OTG>, <&clks IMX6SL_CLK_PERIPH>,
++ <&clks IMX6SL_CLK_PRE_PERIPH_SEL>, <&clks IMX6SL_CLK_PERIPH_CLK2>,
++ <&clks IMX6SL_CLK_PERIPH_CLK2_SEL>, <&clks IMX6SL_CLK_OSC>,
++ <&clks IMX6SL_CLK_PLL1_SYS>, <&clks IMX6SL_CLK_PERIPH2>,
++ <&clks IMX6SL_CLK_AHB>, <&clks IMX6SL_CLK_OCRAM>,
++ <&clks IMX6SL_CLK_PLL1_SW>, <&clks IMX6SL_CLK_PRE_PERIPH2_SEL>,
++ <&clks IMX6SL_CLK_PERIPH2_CLK2_SEL>, <&clks IMX6SL_CLK_PERIPH2_CLK2>,
++ <&clks IMX6SL_CLK_STEP>;
++ clock-names = "pll2_bus", "pll2_pfd2_396m", "pll2_198m", "arm", "pll3_usb_otg", "periph",
++ "periph_pre", "periph_clk2", "periph_clk2_sel", "osc", "pll1_sys", "periph2", "ahb", "ocram", "pll1_sw",
++ "periph2_pre", "periph2_clk2_sel", "periph2_clk2", "step";
++ fsl,max_ddr_freq = <400000000>;
++ };
++
+ L2: l2-cache@00a02000 {
+ compatible = "arm,pl310-cache";
+ reg = <0x00a02000 0x1000>;
+- interrupts = <0 92 0x04>;
++ interrupts = <0 92 IRQ_TYPE_LEVEL_HIGH>;
+ cache-unified;
+ cache-level = <2>;
+ arm,tag-latency = <4 2 3>;
+ arm,data-latency = <4 2 3>;
++ arm,dynamic-clk-gating;
++ arm,standby-mode;
+ };
+
+ pmu {
+ compatible = "arm,cortex-a9-pmu";
+- interrupts = <0 94 0x04>;
++ interrupts = <0 94 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ aips1: aips-bus@02000000 {
+@@ -104,7 +159,7 @@
+
+ spdif: spdif@02004000 {
+ reg = <0x02004000 0x4000>;
+- interrupts = <0 52 0x04>;
++ interrupts = <0 52 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ ecspi1: ecspi@02008000 {
+@@ -112,7 +167,7 @@
+ #size-cells = <0>;
+ compatible = "fsl,imx6sl-ecspi", "fsl,imx51-ecspi";
+ reg = <0x02008000 0x4000>;
+- interrupts = <0 31 0x04>;
++ interrupts = <0 31 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX6SL_CLK_ECSPI1>,
+ <&clks IMX6SL_CLK_ECSPI1>;
+ clock-names = "ipg", "per";
+@@ -124,7 +179,7 @@
+ #size-cells = <0>;
+ compatible = "fsl,imx6sl-ecspi", "fsl,imx51-ecspi";
+ reg = <0x0200c000 0x4000>;
+- interrupts = <0 32 0x04>;
++ interrupts = <0 32 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX6SL_CLK_ECSPI2>,
+ <&clks IMX6SL_CLK_ECSPI2>;
+ clock-names = "ipg", "per";
+@@ -136,7 +191,7 @@
+ #size-cells = <0>;
+ compatible = "fsl,imx6sl-ecspi", "fsl,imx51-ecspi";
+ reg = <0x02010000 0x4000>;
+- interrupts = <0 33 0x04>;
++ interrupts = <0 33 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX6SL_CLK_ECSPI3>,
+ <&clks IMX6SL_CLK_ECSPI3>;
+ clock-names = "ipg", "per";
+@@ -148,7 +203,7 @@
+ #size-cells = <0>;
+ compatible = "fsl,imx6sl-ecspi", "fsl,imx51-ecspi";
+ reg = <0x02014000 0x4000>;
+- interrupts = <0 34 0x04>;
++ interrupts = <0 34 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX6SL_CLK_ECSPI4>,
+ <&clks IMX6SL_CLK_ECSPI4>;
+ clock-names = "ipg", "per";
+@@ -159,7 +214,7 @@
+ compatible = "fsl,imx6sl-uart",
+ "fsl,imx6q-uart", "fsl,imx21-uart";
+ reg = <0x02018000 0x4000>;
+- interrupts = <0 30 0x04>;
++ interrupts = <0 30 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX6SL_CLK_UART>,
+ <&clks IMX6SL_CLK_UART_SERIAL>;
+ clock-names = "ipg", "per";
+@@ -172,7 +227,7 @@
+ compatible = "fsl,imx6sl-uart",
+ "fsl,imx6q-uart", "fsl,imx21-uart";
+ reg = <0x02020000 0x4000>;
+- interrupts = <0 26 0x04>;
++ interrupts = <0 26 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX6SL_CLK_UART>,
+ <&clks IMX6SL_CLK_UART_SERIAL>;
+ clock-names = "ipg", "per";
+@@ -185,7 +240,7 @@
+ compatible = "fsl,imx6sl-uart",
+ "fsl,imx6q-uart", "fsl,imx21-uart";
+ reg = <0x02024000 0x4000>;
+- interrupts = <0 27 0x04>;
++ interrupts = <0 27 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX6SL_CLK_UART>,
+ <&clks IMX6SL_CLK_UART_SERIAL>;
+ clock-names = "ipg", "per";
+@@ -195,9 +250,11 @@
+ };
+
+ ssi1: ssi@02028000 {
+- compatible = "fsl,imx6sl-ssi","fsl,imx21-ssi";
++ compatible = "fsl,imx6sl-ssi",
++ "fsl,imx51-ssi",
++ "fsl,imx21-ssi";
+ reg = <0x02028000 0x4000>;
+- interrupts = <0 46 0x04>;
++ interrupts = <0 46 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX6SL_CLK_SSI1>;
+ dmas = <&sdma 37 1 0>,
+ <&sdma 38 1 0>;
+@@ -207,9 +264,11 @@
+ };
+
+ ssi2: ssi@0202c000 {
+- compatible = "fsl,imx6sl-ssi","fsl,imx21-ssi";
++ compatible = "fsl,imx6sl-ssi",
++ "fsl,imx51-ssi",
++ "fsl,imx21-ssi";
+ reg = <0x0202c000 0x4000>;
+- interrupts = <0 47 0x04>;
++ interrupts = <0 47 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX6SL_CLK_SSI2>;
+ dmas = <&sdma 41 1 0>,
+ <&sdma 42 1 0>;
+@@ -219,9 +278,11 @@
+ };
+
+ ssi3: ssi@02030000 {
+- compatible = "fsl,imx6sl-ssi","fsl,imx21-ssi";
++ compatible = "fsl,imx6sl-ssi",
++ "fsl,imx51-ssi",
++ "fsl,imx21-ssi";
+ reg = <0x02030000 0x4000>;
+- interrupts = <0 48 0x04>;
++ interrupts = <0 48 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX6SL_CLK_SSI3>;
+ dmas = <&sdma 45 1 0>,
+ <&sdma 46 1 0>;
+@@ -234,7 +295,7 @@
+ compatible = "fsl,imx6sl-uart",
+ "fsl,imx6q-uart", "fsl,imx21-uart";
+ reg = <0x02034000 0x4000>;
+- interrupts = <0 28 0x04>;
++ interrupts = <0 28 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX6SL_CLK_UART>,
+ <&clks IMX6SL_CLK_UART_SERIAL>;
+ clock-names = "ipg", "per";
+@@ -247,7 +308,7 @@
+ compatible = "fsl,imx6sl-uart",
+ "fsl,imx6q-uart", "fsl,imx21-uart";
+ reg = <0x02038000 0x4000>;
+- interrupts = <0 29 0x04>;
++ interrupts = <0 29 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX6SL_CLK_UART>,
+ <&clks IMX6SL_CLK_UART_SERIAL>;
+ clock-names = "ipg", "per";
+@@ -261,7 +322,7 @@
+ #pwm-cells = <2>;
+ compatible = "fsl,imx6sl-pwm", "fsl,imx27-pwm";
+ reg = <0x02080000 0x4000>;
+- interrupts = <0 83 0x04>;
++ interrupts = <0 83 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX6SL_CLK_PWM1>,
+ <&clks IMX6SL_CLK_PWM1>;
+ clock-names = "ipg", "per";
+@@ -271,7 +332,7 @@
+ #pwm-cells = <2>;
+ compatible = "fsl,imx6sl-pwm", "fsl,imx27-pwm";
+ reg = <0x02084000 0x4000>;
+- interrupts = <0 84 0x04>;
++ interrupts = <0 84 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX6SL_CLK_PWM2>,
+ <&clks IMX6SL_CLK_PWM2>;
+ clock-names = "ipg", "per";
+@@ -281,7 +342,7 @@
+ #pwm-cells = <2>;
+ compatible = "fsl,imx6sl-pwm", "fsl,imx27-pwm";
+ reg = <0x02088000 0x4000>;
+- interrupts = <0 85 0x04>;
++ interrupts = <0 85 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX6SL_CLK_PWM3>,
+ <&clks IMX6SL_CLK_PWM3>;
+ clock-names = "ipg", "per";
+@@ -291,7 +352,7 @@
+ #pwm-cells = <2>;
+ compatible = "fsl,imx6sl-pwm", "fsl,imx27-pwm";
+ reg = <0x0208c000 0x4000>;
+- interrupts = <0 86 0x04>;
++ interrupts = <0 86 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX6SL_CLK_PWM4>,
+ <&clks IMX6SL_CLK_PWM4>;
+ clock-names = "ipg", "per";
+@@ -300,7 +361,7 @@
+ gpt: gpt@02098000 {
+ compatible = "fsl,imx6sl-gpt";
+ reg = <0x02098000 0x4000>;
+- interrupts = <0 55 0x04>;
++ interrupts = <0 55 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX6SL_CLK_GPT>,
+ <&clks IMX6SL_CLK_GPT_SERIAL>;
+ clock-names = "ipg", "per";
+@@ -309,7 +370,8 @@
+ gpio1: gpio@0209c000 {
+ compatible = "fsl,imx6sl-gpio", "fsl,imx35-gpio";
+ reg = <0x0209c000 0x4000>;
+- interrupts = <0 66 0x04 0 67 0x04>;
++ interrupts = <0 66 IRQ_TYPE_LEVEL_HIGH>,
++ <0 67 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+@@ -319,7 +381,8 @@
+ gpio2: gpio@020a0000 {
+ compatible = "fsl,imx6sl-gpio", "fsl,imx35-gpio";
+ reg = <0x020a0000 0x4000>;
+- interrupts = <0 68 0x04 0 69 0x04>;
++ interrupts = <0 68 IRQ_TYPE_LEVEL_HIGH>,
++ <0 69 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+@@ -329,7 +392,8 @@
+ gpio3: gpio@020a4000 {
+ compatible = "fsl,imx6sl-gpio", "fsl,imx35-gpio";
+ reg = <0x020a4000 0x4000>;
+- interrupts = <0 70 0x04 0 71 0x04>;
++ interrupts = <0 70 IRQ_TYPE_LEVEL_HIGH>,
++ <0 71 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+@@ -339,7 +403,8 @@
+ gpio4: gpio@020a8000 {
+ compatible = "fsl,imx6sl-gpio", "fsl,imx35-gpio";
+ reg = <0x020a8000 0x4000>;
+- interrupts = <0 72 0x04 0 73 0x04>;
++ interrupts = <0 72 IRQ_TYPE_LEVEL_HIGH>,
++ <0 73 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+@@ -349,7 +414,8 @@
+ gpio5: gpio@020ac000 {
+ compatible = "fsl,imx6sl-gpio", "fsl,imx35-gpio";
+ reg = <0x020ac000 0x4000>;
+- interrupts = <0 74 0x04 0 75 0x04>;
++ interrupts = <0 74 IRQ_TYPE_LEVEL_HIGH>,
++ <0 75 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+@@ -357,21 +423,23 @@
+ };
+
+ kpp: kpp@020b8000 {
++ compatible = "fsl,imx6sl-kpp", "fsl,imx21-kpp";
+ reg = <0x020b8000 0x4000>;
+- interrupts = <0 82 0x04>;
++ interrupts = <0 82 IRQ_TYPE_LEVEL_HIGH>;
++ clocks = <&clks IMX6SL_CLK_DUMMY>;
+ };
+
+ wdog1: wdog@020bc000 {
+ compatible = "fsl,imx6sl-wdt", "fsl,imx21-wdt";
+ reg = <0x020bc000 0x4000>;
+- interrupts = <0 80 0x04>;
++ interrupts = <0 80 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX6SL_CLK_DUMMY>;
+ };
+
+ wdog2: wdog@020c0000 {
+ compatible = "fsl,imx6sl-wdt", "fsl,imx21-wdt";
+ reg = <0x020c0000 0x4000>;
+- interrupts = <0 81 0x04>;
++ interrupts = <0 81 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX6SL_CLK_DUMMY>;
+ status = "disabled";
+ };
+@@ -379,7 +447,8 @@
+ clks: ccm@020c4000 {
+ compatible = "fsl,imx6sl-ccm";
+ reg = <0x020c4000 0x4000>;
+- interrupts = <0 87 0x04 0 88 0x04>;
++ interrupts = <0 87 IRQ_TYPE_LEVEL_HIGH>,
++ <0 88 IRQ_TYPE_LEVEL_HIGH>;
+ #clock-cells = <1>;
+ };
+
+@@ -388,7 +457,9 @@
+ "fsl,imx6q-anatop",
+ "syscon", "simple-bus";
+ reg = <0x020c8000 0x1000>;
+- interrupts = <0 49 0x04 0 54 0x04 0 127 0x04>;
++ interrupts = <0 49 IRQ_TYPE_LEVEL_HIGH>,
++ <0 54 IRQ_TYPE_LEVEL_HIGH>,
++ <0 127 IRQ_TYPE_LEVEL_HIGH>;
+
+ regulator-1p1@110 {
+ compatible = "fsl,anatop-regulator";
+@@ -434,7 +505,7 @@
+
+ reg_arm: regulator-vddcore@140 {
+ compatible = "fsl,anatop-regulator";
+- regulator-name = "cpu";
++ regulator-name = "vddarm";
+ regulator-min-microvolt = <725000>;
+ regulator-max-microvolt = <1450000>;
+ regulator-always-on;
+@@ -454,7 +525,6 @@
+ regulator-name = "vddpu";
+ regulator-min-microvolt = <725000>;
+ regulator-max-microvolt = <1450000>;
+- regulator-always-on;
+ anatop-reg-offset = <0x140>;
+ anatop-vol-bit-shift = <9>;
+ anatop-vol-bit-width = <5>;
+@@ -484,18 +554,34 @@
+ };
+ };
+
++ tempmon: tempmon {
++ compatible = "fsl,imx6sl-tempmon", "fsl,imx6q-tempmon";
++ interrupts = <0 49 0x04>;
++ fsl,tempmon = <&anatop>;
++ fsl,tempmon-data = <&ocotp>;
++ clocks = <&clks IMX6SL_CLK_PLL3_USB_OTG>;
++ };
++
+ usbphy1: usbphy@020c9000 {
+ compatible = "fsl,imx6sl-usbphy", "fsl,imx23-usbphy";
+ reg = <0x020c9000 0x1000>;
+- interrupts = <0 44 0x04>;
++ interrupts = <0 44 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX6SL_CLK_USBPHY1>;
++ fsl,anatop = <&anatop>;
+ };
+
+ usbphy2: usbphy@020ca000 {
+ compatible = "fsl,imx6sl-usbphy", "fsl,imx23-usbphy";
+ reg = <0x020ca000 0x1000>;
+- interrupts = <0 45 0x04>;
++ interrupts = <0 45 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX6SL_CLK_USBPHY2>;
++ fsl,anatop = <&anatop>;
++ };
++
++ usbphy_nop1: usbphy_nop1 {
++ compatible = "usb-nop-xceiv";
++ clocks = <&clks IMX6SL_CLK_USBPHY1>;
++ clock-names = "main_clk";
+ };
+
+ snvs@020cc000 {
+@@ -507,271 +593,165 @@
+ snvs-rtc-lp@34 {
+ compatible = "fsl,sec-v4.0-mon-rtc-lp";
+ reg = <0x34 0x58>;
+- interrupts = <0 19 0x04 0 20 0x04>;
++ interrupts = <0 19 IRQ_TYPE_LEVEL_HIGH>,
++ <0 20 IRQ_TYPE_LEVEL_HIGH>;
+ };
+- };
+-
+- epit1: epit@020d0000 {
+- reg = <0x020d0000 0x4000>;
+- interrupts = <0 56 0x04>;
+- };
+
+- epit2: epit@020d4000 {
+- reg = <0x020d4000 0x4000>;
+- interrupts = <0 57 0x04>;
+- };
+-
+- src: src@020d8000 {
+- compatible = "fsl,imx6sl-src", "fsl,imx51-src";
+- reg = <0x020d8000 0x4000>;
+- interrupts = <0 91 0x04 0 96 0x04>;
+- #reset-cells = <1>;
+- };
+-
+- gpc: gpc@020dc000 {
+- compatible = "fsl,imx6sl-gpc", "fsl,imx6q-gpc";
+- reg = <0x020dc000 0x4000>;
+- interrupts = <0 89 0x04>;
+- };
+-
+- gpr: iomuxc-gpr@020e0000 {
+- compatible = "fsl,imx6sl-iomuxc-gpr",
+- "fsl,imx6q-iomuxc-gpr", "syscon";
+- reg = <0x020e0000 0x38>;
+- };
+-
+- iomuxc: iomuxc@020e0000 {
+- compatible = "fsl,imx6sl-iomuxc";
+- reg = <0x020e0000 0x4000>;
+-
+- ecspi1 {
+- pinctrl_ecspi1_1: ecspi1grp-1 {
++ csi {
++ pinctrl_csi_0: csigrp-0 {
+ fsl,pins = <
+- MX6SL_PAD_ECSPI1_MISO__ECSPI1_MISO 0x100b1
+- MX6SL_PAD_ECSPI1_MOSI__ECSPI1_MOSI 0x100b1
+- MX6SL_PAD_ECSPI1_SCLK__ECSPI1_SCLK 0x100b1
++ MX6SL_PAD_EPDC_GDRL__CSI_MCLK 0x110b0
++ MX6SL_PAD_EPDC_GDCLK__CSI_PIXCLK 0x110b0
++ MX6SL_PAD_EPDC_GDSP__CSI_VSYNC 0x110b0
++ MX6SL_PAD_EPDC_GDOE__CSI_HSYNC 0x110b0
++ MX6SL_PAD_EPDC_SDLE__CSI_DATA09 0x110b0
++ MX6SL_PAD_EPDC_SDCLK__CSI_DATA08 0x110b0
++ MX6SL_PAD_EPDC_D7__CSI_DATA07 0x110b0
++ MX6SL_PAD_EPDC_D6__CSI_DATA06 0x110b0
++ MX6SL_PAD_EPDC_D5__CSI_DATA05 0x110b0
++ MX6SL_PAD_EPDC_D4__CSI_DATA04 0x110b0
++ MX6SL_PAD_EPDC_D3__CSI_DATA03 0x110b0
++ MX6SL_PAD_EPDC_D2__CSI_DATA02 0x110b0
++ MX6SL_PAD_EPDC_D1__CSI_DATA01 0x110b0
++ MX6SL_PAD_EPDC_D0__CSI_DATA00 0x110b0
++ MX6SL_PAD_EPDC_SDSHR__GPIO1_IO26 0x80000000
++ MX6SL_PAD_EPDC_SDOE__GPIO1_IO25 0x80000000
+ >;
+ };
+ };
+
+- fec {
+- pinctrl_fec_1: fecgrp-1 {
++ i2c1 {
++ pinctrl_i2c1_1: i2c1grp-1 {
+ fsl,pins = <
+- MX6SL_PAD_FEC_MDC__FEC_MDC 0x1b0b0
+- MX6SL_PAD_FEC_MDIO__FEC_MDIO 0x1b0b0
+- MX6SL_PAD_FEC_CRS_DV__FEC_RX_DV 0x1b0b0
+- MX6SL_PAD_FEC_RXD0__FEC_RX_DATA0 0x1b0b0
+- MX6SL_PAD_FEC_RXD1__FEC_RX_DATA1 0x1b0b0
+- MX6SL_PAD_FEC_TX_EN__FEC_TX_EN 0x1b0b0
+- MX6SL_PAD_FEC_TXD0__FEC_TX_DATA0 0x1b0b0
+- MX6SL_PAD_FEC_TXD1__FEC_TX_DATA1 0x1b0b0
+- MX6SL_PAD_FEC_REF_CLK__FEC_REF_OUT 0x4001b0a8
++ MX6SL_PAD_I2C1_SCL__I2C1_SCL 0x4001b8b1
++ MX6SL_PAD_I2C1_SDA__I2C1_SDA 0x4001b8b1
+ >;
+ };
+ };
+
+- uart1 {
+- pinctrl_uart1_1: uart1grp-1 {
++ i2c2 {
++ pinctrl_i2c2_1: i2c2grp-1 {
+ fsl,pins = <
+- MX6SL_PAD_UART1_RXD__UART1_RX_DATA 0x1b0b1
+- MX6SL_PAD_UART1_TXD__UART1_TX_DATA 0x1b0b1
++ MX6SL_PAD_I2C2_SCL__I2C2_SCL 0x4001b8b1
++ MX6SL_PAD_I2C2_SDA__I2C2_SDA 0x4001b8b1
+ >;
+ };
+ };
+
+- usbotg1 {
+- pinctrl_usbotg1_1: usbotg1grp-1 {
+- fsl,pins = <
+- MX6SL_PAD_EPDC_PWRCOM__USB_OTG1_ID 0x17059
+- >;
+- };
+-
+- pinctrl_usbotg1_2: usbotg1grp-2 {
++ i2c3 {
++ pinctrl_i2c3_1: i2c3grp-1 {
+ fsl,pins = <
+- MX6SL_PAD_FEC_RXD0__USB_OTG1_ID 0x17059
+- >;
+- };
+-
+- pinctrl_usbotg1_3: usbotg1grp-3 {
+- fsl,pins = <
+- MX6SL_PAD_LCD_DAT1__USB_OTG1_ID 0x17059
+- >;
+- };
+-
+- pinctrl_usbotg1_4: usbotg1grp-4 {
+- fsl,pins = <
+- MX6SL_PAD_REF_CLK_32K__USB_OTG1_ID 0x17059
+- >;
+- };
+-
+- pinctrl_usbotg1_5: usbotg1grp-5 {
+- fsl,pins = <
+- MX6SL_PAD_SD3_DAT0__USB_OTG1_ID 0x17059
++ MX6SL_PAD_EPDC_SDCE2__I2C3_SCL 0x4001b8b1
++ MX6SL_PAD_EPDC_SDCE3__I2C3_SDA 0x4001b8b1
+ >;
+ };
+ };
+
+- usbotg2 {
+- pinctrl_usbotg2_1: usbotg2grp-1 {
+- fsl,pins = <
+- MX6SL_PAD_ECSPI1_SCLK__USB_OTG2_OC 0x17059
+- >;
+- };
+-
+- pinctrl_usbotg2_2: usbotg2grp-2 {
++ lcdif {
++ pinctrl_lcdif_dat_0: lcdifdatgrp-0 {
+ fsl,pins = <
+- MX6SL_PAD_ECSPI2_SCLK__USB_OTG2_OC 0x17059
++ MX6SL_PAD_LCD_DAT0__LCD_DATA00 0x1b0b0
++ MX6SL_PAD_LCD_DAT1__LCD_DATA01 0x1b0b0
++ MX6SL_PAD_LCD_DAT2__LCD_DATA02 0x1b0b0
++ MX6SL_PAD_LCD_DAT3__LCD_DATA03 0x1b0b0
++ MX6SL_PAD_LCD_DAT4__LCD_DATA04 0x1b0b0
++ MX6SL_PAD_LCD_DAT5__LCD_DATA05 0x1b0b0
++ MX6SL_PAD_LCD_DAT6__LCD_DATA06 0x1b0b0
++ MX6SL_PAD_LCD_DAT7__LCD_DATA07 0x1b0b0
++ MX6SL_PAD_LCD_DAT8__LCD_DATA08 0x1b0b0
++ MX6SL_PAD_LCD_DAT9__LCD_DATA09 0x1b0b0
++ MX6SL_PAD_LCD_DAT10__LCD_DATA10 0x1b0b0
++ MX6SL_PAD_LCD_DAT11__LCD_DATA11 0x1b0b0
++ MX6SL_PAD_LCD_DAT12__LCD_DATA12 0x1b0b0
++ MX6SL_PAD_LCD_DAT13__LCD_DATA13 0x1b0b0
++ MX6SL_PAD_LCD_DAT14__LCD_DATA14 0x1b0b0
++ MX6SL_PAD_LCD_DAT15__LCD_DATA15 0x1b0b0
++ MX6SL_PAD_LCD_DAT16__LCD_DATA16 0x1b0b0
++ MX6SL_PAD_LCD_DAT17__LCD_DATA17 0x1b0b0
++ MX6SL_PAD_LCD_DAT18__LCD_DATA18 0x1b0b0
++ MX6SL_PAD_LCD_DAT19__LCD_DATA19 0x1b0b0
++ MX6SL_PAD_LCD_DAT20__LCD_DATA20 0x1b0b0
++ MX6SL_PAD_LCD_DAT21__LCD_DATA21 0x1b0b0
++ MX6SL_PAD_LCD_DAT22__LCD_DATA22 0x1b0b0
++ MX6SL_PAD_LCD_DAT23__LCD_DATA23 0x1b0b0
+ >;
+ };
+
+- pinctrl_usbotg2_3: usbotg2grp-3 {
++ pinctrl_lcdif_ctrl_0: lcdifctrlgrp-0 {
+ fsl,pins = <
+- MX6SL_PAD_KEY_ROW5__USB_OTG2_OC 0x17059
+- >;
+- };
+-
+- pinctrl_usbotg2_4: usbotg2grp-4 {
+- fsl,pins = <
+- MX6SL_PAD_SD3_DAT2__USB_OTG2_OC 0x17059
++ MX6SL_PAD_LCD_CLK__LCD_CLK 0x1b0b0
++ MX6SL_PAD_LCD_ENABLE__LCD_ENABLE 0x1b0b0
++ MX6SL_PAD_LCD_HSYNC__LCD_HSYNC 0x1b0b0
++ MX6SL_PAD_LCD_VSYNC__LCD_VSYNC 0x1b0b0
++ MX6SL_PAD_LCD_RESET__LCD_RESET 0x1b0b0
+ >;
+ };
+ };
+
+- usdhc1 {
+- pinctrl_usdhc1_1: usdhc1grp-1 {
++ pwm1 {
++ pinctrl_pwm1_0: pwm1grp-0 {
+ fsl,pins = <
+- MX6SL_PAD_SD1_CMD__SD1_CMD 0x17059
+- MX6SL_PAD_SD1_CLK__SD1_CLK 0x10059
+- MX6SL_PAD_SD1_DAT0__SD1_DATA0 0x17059
+- MX6SL_PAD_SD1_DAT1__SD1_DATA1 0x17059
+- MX6SL_PAD_SD1_DAT2__SD1_DATA2 0x17059
+- MX6SL_PAD_SD1_DAT3__SD1_DATA3 0x17059
+- MX6SL_PAD_SD1_DAT4__SD1_DATA4 0x17059
+- MX6SL_PAD_SD1_DAT5__SD1_DATA5 0x17059
+- MX6SL_PAD_SD1_DAT6__SD1_DATA6 0x17059
+- MX6SL_PAD_SD1_DAT7__SD1_DATA7 0x17059
++ MX6SL_PAD_PWM1__PWM1_OUT 0x110b0
+ >;
+ };
+-
+- pinctrl_usdhc1_1_100mhz: usdhc1grp-1-100mhz {
+- fsl,pins = <
+- MX6SL_PAD_SD1_CMD__SD1_CMD 0x170b9
+- MX6SL_PAD_SD1_CLK__SD1_CLK 0x100b9
+- MX6SL_PAD_SD1_DAT0__SD1_DATA0 0x170b9
+- MX6SL_PAD_SD1_DAT1__SD1_DATA1 0x170b9
+- MX6SL_PAD_SD1_DAT2__SD1_DATA2 0x170b9
+- MX6SL_PAD_SD1_DAT3__SD1_DATA3 0x170b9
+- MX6SL_PAD_SD1_DAT4__SD1_DATA4 0x170b9
+- MX6SL_PAD_SD1_DAT5__SD1_DATA5 0x170b9
+- MX6SL_PAD_SD1_DAT6__SD1_DATA6 0x170b9
+- MX6SL_PAD_SD1_DAT7__SD1_DATA7 0x170b9
+- >;
+- };
+-
+- pinctrl_usdhc1_1_200mhz: usdhc1grp-1-200mhz {
+- fsl,pins = <
+- MX6SL_PAD_SD1_CMD__SD1_CMD 0x170f9
+- MX6SL_PAD_SD1_CLK__SD1_CLK 0x100f9
+- MX6SL_PAD_SD1_DAT0__SD1_DATA0 0x170f9
+- MX6SL_PAD_SD1_DAT1__SD1_DATA1 0x170f9
+- MX6SL_PAD_SD1_DAT2__SD1_DATA2 0x170f9
+- MX6SL_PAD_SD1_DAT3__SD1_DATA3 0x170f9
+- MX6SL_PAD_SD1_DAT4__SD1_DATA4 0x170f9
+- MX6SL_PAD_SD1_DAT5__SD1_DATA5 0x170f9
+- MX6SL_PAD_SD1_DAT6__SD1_DATA6 0x170f9
+- MX6SL_PAD_SD1_DAT7__SD1_DATA7 0x170f9
+- >;
+- };
+-
+-
+ };
++ };
+
+- usdhc2 {
+- pinctrl_usdhc2_1: usdhc2grp-1 {
+- fsl,pins = <
+- MX6SL_PAD_SD2_CMD__SD2_CMD 0x17059
+- MX6SL_PAD_SD2_CLK__SD2_CLK 0x10059
+- MX6SL_PAD_SD2_DAT0__SD2_DATA0 0x17059
+- MX6SL_PAD_SD2_DAT1__SD2_DATA1 0x17059
+- MX6SL_PAD_SD2_DAT2__SD2_DATA2 0x17059
+- MX6SL_PAD_SD2_DAT3__SD2_DATA3 0x17059
+- >;
+- };
+-
+- pinctrl_usdhc2_1_100mhz: usdhc2grp-1-100mhz {
+- fsl,pins = <
+- MX6SL_PAD_SD2_CMD__SD2_CMD 0x170b9
+- MX6SL_PAD_SD2_CLK__SD2_CLK 0x100b9
+- MX6SL_PAD_SD2_DAT0__SD2_DATA0 0x170b9
+- MX6SL_PAD_SD2_DAT1__SD2_DATA1 0x170b9
+- MX6SL_PAD_SD2_DAT2__SD2_DATA2 0x170b9
+- MX6SL_PAD_SD2_DAT3__SD2_DATA3 0x170b9
+- >;
+- };
++ epit1: epit@020d0000 {
++ reg = <0x020d0000 0x4000>;
++ interrupts = <0 56 IRQ_TYPE_LEVEL_HIGH>;
++ };
+
+- pinctrl_usdhc2_1_200mhz: usdhc2grp-1-200mhz {
+- fsl,pins = <
+- MX6SL_PAD_SD2_CMD__SD2_CMD 0x170f9
+- MX6SL_PAD_SD2_CLK__SD2_CLK 0x100f9
+- MX6SL_PAD_SD2_DAT0__SD2_DATA0 0x170f9
+- MX6SL_PAD_SD2_DAT1__SD2_DATA1 0x170f9
+- MX6SL_PAD_SD2_DAT2__SD2_DATA2 0x170f9
+- MX6SL_PAD_SD2_DAT3__SD2_DATA3 0x170f9
+- >;
+- };
++ epit2: epit@020d4000 {
++ reg = <0x020d4000 0x4000>;
++ interrupts = <0 57 IRQ_TYPE_LEVEL_HIGH>;
++ };
+
+- };
++ src: src@020d8000 {
++ compatible = "fsl,imx6sl-src", "fsl,imx51-src";
++ reg = <0x020d8000 0x4000>;
++ interrupts = <0 91 IRQ_TYPE_LEVEL_HIGH>,
++ <0 96 IRQ_TYPE_LEVEL_HIGH>;
++ #reset-cells = <1>;
++ };
+
+- usdhc3 {
+- pinctrl_usdhc3_1: usdhc3grp-1 {
+- fsl,pins = <
+- MX6SL_PAD_SD3_CMD__SD3_CMD 0x17059
+- MX6SL_PAD_SD3_CLK__SD3_CLK 0x10059
+- MX6SL_PAD_SD3_DAT0__SD3_DATA0 0x17059
+- MX6SL_PAD_SD3_DAT1__SD3_DATA1 0x17059
+- MX6SL_PAD_SD3_DAT2__SD3_DATA2 0x17059
+- MX6SL_PAD_SD3_DAT3__SD3_DATA3 0x17059
+- >;
+- };
++ gpc: gpc@020dc000 {
++ compatible = "fsl,imx6sl-gpc", "fsl,imx6q-gpc";
++ reg = <0x020dc000 0x4000>;
++ interrupts = <0 89 IRQ_TYPE_LEVEL_HIGH>;
++ clocks = <&clks IMX6SL_CLK_GPU2D_PODF>, <&clks IMX6SL_CLK_GPU2D_OVG>,
++ <&clks IMX6SL_CLK_IPG>;
++ clock-names = "gpu2d_podf", "gpu2d_ovg", "ipg";
++ pu-supply = <&reg_pu>;
++ };
+
+- pinctrl_usdhc3_1_100mhz: usdhc3grp-1-100mhz {
+- fsl,pins = <
+- MX6SL_PAD_SD3_CMD__SD3_CMD 0x170b9
+- MX6SL_PAD_SD3_CLK__SD3_CLK 0x100b9
+- MX6SL_PAD_SD3_DAT0__SD3_DATA0 0x170b9
+- MX6SL_PAD_SD3_DAT1__SD3_DATA1 0x170b9
+- MX6SL_PAD_SD3_DAT2__SD3_DATA2 0x170b9
+- MX6SL_PAD_SD3_DAT3__SD3_DATA3 0x170b9
+- >;
+- };
++ gpr: iomuxc-gpr@020e0000 {
++ compatible = "fsl,imx6sl-iomuxc-gpr",
++ "fsl,imx6q-iomuxc-gpr", "syscon";
++ reg = <0x020e0000 0x38>;
++ };
+
+- pinctrl_usdhc3_1_200mhz: usdhc3grp-1-200mhz {
+- fsl,pins = <
+- MX6SL_PAD_SD3_CMD__SD3_CMD 0x170f9
+- MX6SL_PAD_SD3_CLK__SD3_CLK 0x100f9
+- MX6SL_PAD_SD3_DAT0__SD3_DATA0 0x170f9
+- MX6SL_PAD_SD3_DAT1__SD3_DATA1 0x170f9
+- MX6SL_PAD_SD3_DAT2__SD3_DATA2 0x170f9
+- MX6SL_PAD_SD3_DAT3__SD3_DATA3 0x170f9
+- >;
+- };
+- };
++ iomuxc: iomuxc@020e0000 {
++ compatible = "fsl,imx6sl-iomuxc";
++ reg = <0x020e0000 0x4000>;
+ };
+
+ csi: csi@020e4000 {
++ compatible = "fsl,imx6sl-csi";
+ reg = <0x020e4000 0x4000>;
+- interrupts = <0 7 0x04>;
++ interrupts = <0 7 IRQ_TYPE_LEVEL_HIGH>;
++ status = "disabled";
+ };
+
+ spdc: spdc@020e8000 {
+ reg = <0x020e8000 0x4000>;
+- interrupts = <0 6 0x04>;
++ interrupts = <0 6 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ sdma: sdma@020ec000 {
+ compatible = "fsl,imx6sl-sdma", "fsl,imx35-sdma";
+ reg = <0x020ec000 0x4000>;
+- interrupts = <0 2 0x04>;
++ interrupts = <0 2 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX6SL_CLK_SDMA>,
+ <&clks IMX6SL_CLK_SDMA>;
+ clock-names = "ipg", "ahb";
+@@ -781,23 +761,32 @@
+ };
+
+ pxp: pxp@020f0000 {
++ compatible = "fsl,imx6sl-pxp-dma", "fsl,imx6dl-pxp-dma";
+ reg = <0x020f0000 0x4000>;
+- interrupts = <0 98 0x04>;
++ interrupts = <0 98 IRQ_TYPE_LEVEL_HIGH>;
++ clocks = <&clks 111>;
++ clock-names = "pxp-axi";
++ status = "disabled";
+ };
+
+ epdc: epdc@020f4000 {
+ reg = <0x020f4000 0x4000>;
+- interrupts = <0 97 0x04>;
++ interrupts = <0 97 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ lcdif: lcdif@020f8000 {
++ compatible = "fsl,imx6sl-lcdif", "fsl,imx28-lcdif";
+ reg = <0x020f8000 0x4000>;
+- interrupts = <0 39 0x04>;
++ interrupts = <0 39 IRQ_TYPE_LEVEL_HIGH>;
++ clocks = <&clks IMX6SL_CLK_LCDIF_PIX>,
++ <&clks IMX6SL_CLK_LCDIF_AXI>;
++ clock-names = "pix", "axi";
++ status = "disabled";
+ };
+
+ dcp: dcp@020fc000 {
+ reg = <0x020fc000 0x4000>;
+- interrupts = <0 99 0x04>;
++ interrupts = <0 99 IRQ_TYPE_LEVEL_HIGH>;
+ };
+ };
+
+@@ -811,7 +800,7 @@
+ usbotg1: usb@02184000 {
+ compatible = "fsl,imx6sl-usb", "fsl,imx27-usb";
+ reg = <0x02184000 0x200>;
+- interrupts = <0 43 0x04>;
++ interrupts = <0 43 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX6SL_CLK_USBOH3>;
+ fsl,usbphy = <&usbphy1>;
+ fsl,usbmisc = <&usbmisc 0>;
+@@ -821,7 +810,7 @@
+ usbotg2: usb@02184200 {
+ compatible = "fsl,imx6sl-usb", "fsl,imx27-usb";
+ reg = <0x02184200 0x200>;
+- interrupts = <0 42 0x04>;
++ interrupts = <0 42 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX6SL_CLK_USBOH3>;
+ fsl,usbphy = <&usbphy2>;
+ fsl,usbmisc = <&usbmisc 1>;
+@@ -831,9 +820,12 @@
+ usbh: usb@02184400 {
+ compatible = "fsl,imx6sl-usb", "fsl,imx27-usb";
+ reg = <0x02184400 0x200>;
+- interrupts = <0 40 0x04>;
++ interrupts = <0 40 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX6SL_CLK_USBOH3>;
+ fsl,usbmisc = <&usbmisc 2>;
++ phy_type = "hsic";
++ fsl,usbphy = <&usbphy_nop1>;
++ fsl,anatop = <&anatop>;
+ status = "disabled";
+ };
+
+@@ -847,7 +839,7 @@
+ fec: ethernet@02188000 {
+ compatible = "fsl,imx6sl-fec", "fsl,imx25-fec";
+ reg = <0x02188000 0x4000>;
+- interrupts = <0 114 0x04>;
++ interrupts = <0 114 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX6SL_CLK_ENET_REF>,
+ <&clks IMX6SL_CLK_ENET_REF>;
+ clock-names = "ipg", "ahb";
+@@ -857,7 +849,7 @@
+ usdhc1: usdhc@02190000 {
+ compatible = "fsl,imx6sl-usdhc", "fsl,imx6q-usdhc";
+ reg = <0x02190000 0x4000>;
+- interrupts = <0 22 0x04>;
++ interrupts = <0 22 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX6SL_CLK_USDHC1>,
+ <&clks IMX6SL_CLK_USDHC1>,
+ <&clks IMX6SL_CLK_USDHC1>;
+@@ -869,7 +861,7 @@
+ usdhc2: usdhc@02194000 {
+ compatible = "fsl,imx6sl-usdhc", "fsl,imx6q-usdhc";
+ reg = <0x02194000 0x4000>;
+- interrupts = <0 23 0x04>;
++ interrupts = <0 23 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX6SL_CLK_USDHC2>,
+ <&clks IMX6SL_CLK_USDHC2>,
+ <&clks IMX6SL_CLK_USDHC2>;
+@@ -881,7 +873,7 @@
+ usdhc3: usdhc@02198000 {
+ compatible = "fsl,imx6sl-usdhc", "fsl,imx6q-usdhc";
+ reg = <0x02198000 0x4000>;
+- interrupts = <0 24 0x04>;
++ interrupts = <0 24 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX6SL_CLK_USDHC3>,
+ <&clks IMX6SL_CLK_USDHC3>,
+ <&clks IMX6SL_CLK_USDHC3>;
+@@ -893,7 +885,7 @@
+ usdhc4: usdhc@0219c000 {
+ compatible = "fsl,imx6sl-usdhc", "fsl,imx6q-usdhc";
+ reg = <0x0219c000 0x4000>;
+- interrupts = <0 25 0x04>;
++ interrupts = <0 25 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX6SL_CLK_USDHC4>,
+ <&clks IMX6SL_CLK_USDHC4>,
+ <&clks IMX6SL_CLK_USDHC4>;
+@@ -907,7 +899,7 @@
+ #size-cells = <0>;
+ compatible = "fsl,imx6sl-i2c", "fsl,imx21-i2c";
+ reg = <0x021a0000 0x4000>;
+- interrupts = <0 36 0x04>;
++ interrupts = <0 36 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX6SL_CLK_I2C1>;
+ status = "disabled";
+ };
+@@ -917,7 +909,7 @@
+ #size-cells = <0>;
+ compatible = "fsl,imx6sl-i2c", "fsl,imx21-i2c";
+ reg = <0x021a4000 0x4000>;
+- interrupts = <0 37 0x04>;
++ interrupts = <0 37 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX6SL_CLK_I2C2>;
+ status = "disabled";
+ };
+@@ -927,7 +919,7 @@
+ #size-cells = <0>;
+ compatible = "fsl,imx6sl-i2c", "fsl,imx21-i2c";
+ reg = <0x021a8000 0x4000>;
+- interrupts = <0 38 0x04>;
++ interrupts = <0 38 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX6SL_CLK_I2C3>;
+ status = "disabled";
+ };
+@@ -939,17 +931,23 @@
+
+ rngb: rngb@021b4000 {
+ reg = <0x021b4000 0x4000>;
+- interrupts = <0 5 0x04>;
++ interrupts = <0 5 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ weim: weim@021b8000 {
+ reg = <0x021b8000 0x4000>;
+- interrupts = <0 14 0x04>;
++ interrupts = <0 14 IRQ_TYPE_LEVEL_HIGH>;
++ };
++
++ ocotp: ocotp-ctrl@021bc000 {
++ compatible = "syscon";
++ reg = <0x021bc000 0x4000>;
+ };
+
+- ocotp: ocotp@021bc000 {
+- compatible = "fsl,imx6sl-ocotp";
++ ocotp-fuse@021bc000 {
++ compatible = "fsl,imx6sl-ocotp", "fsl,imx6q-ocotp";
+ reg = <0x021bc000 0x4000>;
++ clocks = <&clks IMX6SL_CLK_OCOTP>;
+ };
+
+ audmux: audmux@021d8000 {
+@@ -957,6 +955,25 @@
+ reg = <0x021d8000 0x4000>;
+ status = "disabled";
+ };
++
++ gpu: gpu@02200000 {
++ compatible = "fsl,imx6sl-gpu", "fsl,imx6q-gpu";
++ reg = <0x02200000 0x4000>, <0x02204000 0x4000>,
++ <0x80000000 0x0>;
++ reg-names = "iobase_2d", "iobase_vg",
++ "phys_baseaddr";
++ interrupts = <0 10 0x04>, <0 11 0x04>;
++ interrupt-names = "irq_2d", "irq_vg";
++ clocks = <&clks IMX6SL_CLK_MMDC_ROOT>,
++ <&clks IMX6SL_CLK_MMDC_ROOT>,
++ <&clks IMX6SL_CLK_GPU2D_OVG>;
++ clock-names = "gpu2d_axi_clk", "openvg_axi_clk",
++ "gpu2d_clk";
++ resets = <&src 3>, <&src 3>;
++ reset-names = "gpu2d", "gpuvg";
++ pu-supply = <&reg_pu>;
++ };
++
+ };
+ };
+ };
+diff -Nur linux-3.14.36/arch/arm/boot/dts/imx6sl-evk-csi.dts linux-openelec/arch/arm/boot/dts/imx6sl-evk-csi.dts
+--- linux-3.14.36/arch/arm/boot/dts/imx6sl-evk-csi.dts 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/arch/arm/boot/dts/imx6sl-evk-csi.dts 2015-05-06 12:05:43.000000000 -0500
+@@ -0,0 +1,27 @@
++/*
++ * Copyright (C) 2013 Freescale Semiconductor, Inc.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include "imx6sl-evk.dts"
++
++/ {
++ csi_v4l2_cap {
++ status = "okay";
++ };
++};
++
++&csi {
++ status = "okay";
++};
++
++&i2c3 {
++ status = "okay";
++};
++
++&epdc {
++ status = "disabled";
++};
+diff -Nur linux-3.14.36/arch/arm/boot/dts/imx6sl-evk.dts linux-openelec/arch/arm/boot/dts/imx6sl-evk.dts
+--- linux-3.14.36/arch/arm/boot/dts/imx6sl-evk.dts 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/boot/dts/imx6sl-evk.dts 2015-05-06 12:05:43.000000000 -0500
+@@ -8,6 +8,8 @@
+
+ /dts-v1/;
+
++#include <dt-bindings/gpio/gpio.h>
++#include <dt-bindings/input/input.h>
+ #include "imx6sl.dtsi"
+
+ / {
+@@ -18,11 +20,26 @@
+ reg = <0x80000000 0x40000000>;
+ };
+
++ leds {
++ compatible = "gpio-leds";
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_led>;
++
++ user {
++ label = "debug";
++ gpios = <&gpio3 20 GPIO_ACTIVE_HIGH>;
++ linux,default-trigger = "heartbeat";
++ };
++ };
++
+ regulators {
+ compatible = "simple-bus";
++ #address-cells = <1>;
++ #size-cells = <0>;
+
+- reg_usb_otg1_vbus: usb_otg1_vbus {
++ reg_usb_otg1_vbus: regulator@0 {
+ compatible = "regulator-fixed";
++ reg = <0>;
+ regulator-name = "usb_otg1_vbus";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+@@ -30,22 +47,63 @@
+ enable-active-high;
+ };
+
+- reg_usb_otg2_vbus: usb_otg2_vbus {
++ reg_usb_otg2_vbus: regulator@1 {
+ compatible = "regulator-fixed";
++ reg = <1>;
+ regulator-name = "usb_otg2_vbus";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ gpio = <&gpio4 2 0>;
+ enable-active-high;
+ };
++
++ reg_aud3v: regulator@2 {
++ compatible = "regulator-fixed";
++ reg = <2>;
++ regulator-name = "wm8962-supply-3v15";
++ regulator-min-microvolt = <3150000>;
++ regulator-max-microvolt = <3150000>;
++ regulator-boot-on;
++ };
++
++ reg_aud4v: regulator@3 {
++ compatible = "regulator-fixed";
++ reg = <3>;
++ regulator-name = "wm8962-supply-4v2";
++ regulator-min-microvolt = <4325000>;
++ regulator-max-microvolt = <4325000>;
++ regulator-boot-on;
++ };
+ };
++
++ sound {
++ compatible = "fsl,imx6sl-evk-wm8962", "fsl,imx-audio-wm8962";
++ model = "wm8962-audio";
++ ssi-controller = <&ssi2>;
++ audio-codec = <&codec>;
++ audio-routing =
++ "Headphone Jack", "HPOUTL",
++ "Headphone Jack", "HPOUTR",
++ "Ext Spk", "SPKOUTL",
++ "Ext Spk", "SPKOUTR",
++ "AMIC", "MICBIAS",
++ "IN3R", "AMIC";
++ mux-int-port = <2>;
++ mux-ext-port = <3>;
++ };
++};
++
++&audmux {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_audmux3>;
++ status = "okay";
+ };
+
+ &ecspi1 {
+ fsl,spi-num-chipselects = <1>;
+ cs-gpios = <&gpio4 11 0>;
+ pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_ecspi1_1>;
++ pinctrl-0 = <&pinctrl_ecspi1>;
+ status = "okay";
+
+ flash: m25p80@0 {
+@@ -57,18 +115,326 @@
+ };
+ };
+
++&csi {
++ status = "okay";
++};
++
++&cpu0 {
++ arm-supply = <&sw1a_reg>;
++ soc-supply = <&sw1c_reg>;
++ pu-supply = <&pu_dummy>; /* use pu_dummy if VDDSOC share with VDDPU */
++};
++
+ &fec {
+ pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_fec_1>;
++ pinctrl-0 = <&pinctrl_fec>;
+ phy-mode = "rmii";
+ status = "okay";
+ };
+
++&i2c1 {
++ clock-frequency = <100000>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_i2c1>;
++ status = "okay";
++
++ pmic: pfuze100@08 {
++ compatible = "fsl,pfuze100";
++ reg = <0x08>;
++
++ regulators {
++ sw1a_reg: sw1ab {
++ regulator-min-microvolt = <300000>;
++ regulator-max-microvolt = <1875000>;
++ regulator-boot-on;
++ regulator-always-on;
++ regulator-ramp-delay = <6250>;
++ };
++
++ sw1c_reg: sw1c {
++ regulator-min-microvolt = <300000>;
++ regulator-max-microvolt = <1875000>;
++ regulator-boot-on;
++ regulator-always-on;
++ regulator-ramp-delay = <6250>;
++ };
++
++ sw2_reg: sw2 {
++ regulator-min-microvolt = <800000>;
++ regulator-max-microvolt = <3300000>;
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ sw3a_reg: sw3a {
++ regulator-min-microvolt = <400000>;
++ regulator-max-microvolt = <1975000>;
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ sw3b_reg: sw3b {
++ regulator-min-microvolt = <400000>;
++ regulator-max-microvolt = <1975000>;
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ sw4_reg: sw4 {
++ regulator-min-microvolt = <800000>;
++ regulator-max-microvolt = <3300000>;
++ };
++
++ swbst_reg: swbst {
++ regulator-min-microvolt = <5000000>;
++ regulator-max-microvolt = <5150000>;
++ };
++
++ snvs_reg: vsnvs {
++ regulator-min-microvolt = <1000000>;
++ regulator-max-microvolt = <3000000>;
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ vref_reg: vrefddr {
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ vgen1_reg: vgen1 {
++ regulator-min-microvolt = <800000>;
++ regulator-max-microvolt = <1550000>;
++ regulator-always-on;
++ };
++
++ vgen2_reg: vgen2 {
++ regulator-min-microvolt = <800000>;
++ regulator-max-microvolt = <1550000>;
++ };
++
++ vgen3_reg: vgen3 {
++ regulator-min-microvolt = <1800000>;
++ regulator-max-microvolt = <3300000>;
++ };
++
++ vgen4_reg: vgen4 {
++ regulator-min-microvolt = <1800000>;
++ regulator-max-microvolt = <3300000>;
++ regulator-always-on;
++ };
++
++ vgen5_reg: vgen5 {
++ regulator-min-microvolt = <1800000>;
++ regulator-max-microvolt = <3300000>;
++ regulator-always-on;
++ };
++
++ vgen6_reg: vgen6 {
++ regulator-min-microvolt = <1800000>;
++ regulator-max-microvolt = <3300000>;
++ regulator-always-on;
++ };
++ };
++ };
++
++ regulators {
++ compatible = "simple-bus";
++
++ reg_lcd_3v3: lcd-3v3 {
++ compatible = "regulator-fixed";
++ regulator-name = "lcd-3v3";
++ gpio = <&gpio4 3 0>;
++ enable-active-high;
++ };
++ };
++
++ backlight {
++ compatible = "pwm-backlight";
++ pwms = <&pwm1 0 5000000>;
++ brightness-levels = <0 4 8 16 32 64 128 255>;
++ default-brightness-level = <6>;
++ };
++
++ csi_v4l2_cap {
++ compatible = "fsl,imx6sl-csi-v4l2";
++ status = "okay";
++ };
++
++ pxp_v4l2_out {
++ compatible = "fsl,imx6sl-pxp-v4l2";
++ status = "okay";
++ };
++};
++
++&i2c2 {
++ clock-frequency = <100000>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_i2c2>;
++ status = "okay";
++
++ codec: wm8962@1a {
++ compatible = "wlf,wm8962";
++ reg = <0x1a>;
++ clocks = <&clks IMX6SL_CLK_EXTERN_AUDIO>;
++ DCVDD-supply = <&vgen3_reg>;
++ DBVDD-supply = <&reg_aud3v>;
++ AVDD-supply = <&vgen3_reg>;
++ CPVDD-supply = <&vgen3_reg>;
++ MICVDD-supply = <&reg_aud3v>;
++ PLLVDD-supply = <&vgen3_reg>;
++ SPKVDD1-supply = <&reg_aud4v>;
++ SPKVDD2-supply = <&reg_aud4v>;
++ };
++};
++
++&i2c1 {
++ clock-frequency = <100000>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_i2c1_1>;
++ status = "okay";
++
++ pmic: pfuze100@08 {
++ compatible = "fsl,pfuze100";
++ reg = <0x08>;
++
++ regulators {
++ sw1a_reg: sw1ab {
++ regulator-min-microvolt = <300000>;
++ regulator-max-microvolt = <1875000>;
++ regulator-boot-on;
++ regulator-always-on;
++ regulator-ramp-delay = <6250>;
++ };
++
++ sw1c_reg: sw1c {
++ regulator-min-microvolt = <300000>;
++ regulator-max-microvolt = <1875000>;
++ regulator-boot-on;
++ regulator-always-on;
++ regulator-ramp-delay = <6250>;
++ };
++
++ sw2_reg: sw2 {
++ regulator-min-microvolt = <800000>;
++ regulator-max-microvolt = <3300000>;
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ sw3a_reg: sw3a {
++ regulator-min-microvolt = <400000>;
++ regulator-max-microvolt = <1975000>;
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ sw3b_reg: sw3b {
++ regulator-min-microvolt = <400000>;
++ regulator-max-microvolt = <1975000>;
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ sw4_reg: sw4 {
++ regulator-min-microvolt = <800000>;
++ regulator-max-microvolt = <3300000>;
++ };
++
++ swbst_reg: swbst {
++ regulator-min-microvolt = <5000000>;
++ regulator-max-microvolt = <5150000>;
++ };
++
++ snvs_reg: vsnvs {
++ regulator-min-microvolt = <1000000>;
++ regulator-max-microvolt = <3000000>;
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ vref_reg: vrefddr {
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ vgen1_reg: vgen1 {
++ regulator-min-microvolt = <800000>;
++ regulator-max-microvolt = <1550000>;
++ };
++
++ vgen2_reg: vgen2 {
++ regulator-min-microvolt = <800000>;
++ regulator-max-microvolt = <1550000>;
++ };
++
++ vgen3_reg: vgen3 {
++ regulator-min-microvolt = <1800000>;
++ regulator-max-microvolt = <3300000>;
++ regulator-always-on;
++ };
++
++ vgen4_reg: vgen4 {
++ regulator-min-microvolt = <1800000>;
++ regulator-max-microvolt = <3300000>;
++ regulator-always-on;
++ };
++
++ vgen5_reg: vgen5 {
++ regulator-min-microvolt = <1800000>;
++ regulator-max-microvolt = <3300000>;
++ regulator-always-on;
++ };
++
++ vgen6_reg: vgen6 {
++ regulator-min-microvolt = <1800000>;
++ regulator-max-microvolt = <3300000>;
++ regulator-always-on;
++ };
++ };
++ };
++
++ mma8450@1c {
++ compatible = "fsl,mma8450";
++ reg = <0x1c>;
++ };
++};
++
++&i2c2 {
++ clock-frequency = <100000>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_i2c2_1>;
++ status = "okay";
++};
++
++&i2c3 {
++ clock-frequency = <100000>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_i2c3_1>;
++ status = "okay";
++
++ ov564x: ov564x@3c {
++ compatible = "ovti,ov564x";
++ reg = <0x3c>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_csi_0>;
++ clocks = <&clks IMX6SL_CLK_CSI>;
++ clock-names = "csi_mclk";
++ AVDD-supply = <&vgen6_reg>; /* 2.8v */
++ DVDD-supply = <&vgen2_reg>; /* 1.5v*/
++ pwn-gpios = <&gpio1 25 1>;
++ rst-gpios = <&gpio1 26 0>;
++ csi_id = <0>;
++ mclk = <24000000>;
++ mclk_source = <0>;
++ };
++};
++
+ &iomuxc {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_hog>;
+
+- hog {
++ imx6sl-evk {
+ pinctrl_hog: hoggrp {
+ fsl,pins = <
+ MX6SL_PAD_KEY_ROW7__GPIO4_IO07 0x17059
+@@ -78,21 +444,270 @@
+ MX6SL_PAD_REF_CLK_32K__GPIO3_IO22 0x17059
+ MX6SL_PAD_KEY_COL4__GPIO4_IO00 0x80000000
+ MX6SL_PAD_KEY_COL5__GPIO4_IO02 0x80000000
++ MX6SL_PAD_AUD_MCLK__AUDIO_CLK_OUT 0x4130b0
++ >;
++ };
++
++ pinctrl_audmux3: audmux3grp {
++ fsl,pins = <
++ MX6SL_PAD_AUD_RXD__AUD3_RXD 0x4130b0
++ MX6SL_PAD_AUD_TXC__AUD3_TXC 0x4130b0
++ MX6SL_PAD_AUD_TXD__AUD3_TXD 0x4110b0
++ MX6SL_PAD_AUD_TXFS__AUD3_TXFS 0x4130b0
++ >;
++ };
++
++ pinctrl_ecspi1: ecspi1grp {
++ fsl,pins = <
++ MX6SL_PAD_ECSPI1_MISO__ECSPI1_MISO 0x100b1
++ MX6SL_PAD_ECSPI1_MOSI__ECSPI1_MOSI 0x100b1
++ MX6SL_PAD_ECSPI1_SCLK__ECSPI1_SCLK 0x100b1
++ MX6SL_PAD_ECSPI1_SS0__GPIO4_IO11 0x80000000
++ >;
++ };
++
++ pinctrl_fec: fecgrp {
++ fsl,pins = <
++ MX6SL_PAD_FEC_MDC__FEC_MDC 0x1b0b0
++ MX6SL_PAD_FEC_MDIO__FEC_MDIO 0x1b0b0
++ MX6SL_PAD_FEC_CRS_DV__FEC_RX_DV 0x1b0b0
++ MX6SL_PAD_FEC_RXD0__FEC_RX_DATA0 0x1b0b0
++ MX6SL_PAD_FEC_RXD1__FEC_RX_DATA1 0x1b0b0
++ MX6SL_PAD_FEC_TX_EN__FEC_TX_EN 0x1b0b0
++ MX6SL_PAD_FEC_TXD0__FEC_TX_DATA0 0x1b0b0
++ MX6SL_PAD_FEC_TXD1__FEC_TX_DATA1 0x1b0b0
++ MX6SL_PAD_FEC_REF_CLK__FEC_REF_OUT 0x4001b0a8
++ >;
++ };
++
++ pinctrl_i2c1: i2c1grp {
++ fsl,pins = <
++ MX6SL_PAD_I2C1_SCL__I2C1_SCL 0x4001b8b1
++ MX6SL_PAD_I2C1_SDA__I2C1_SDA 0x4001b8b1
++ >;
++ };
++
++
++ pinctrl_i2c2: i2c2grp {
++ fsl,pins = <
++ MX6SL_PAD_I2C2_SCL__I2C2_SCL 0x4001b8b1
++ MX6SL_PAD_I2C2_SDA__I2C2_SDA 0x4001b8b1
++ >;
++ };
++
++ pinctrl_led: ledgrp {
++ fsl,pins = <
++ MX6SL_PAD_HSIC_STROBE__GPIO3_IO20 0x17059
++ >;
++ };
++
++ pinctrl_kpp: kppgrp {
++ fsl,pins = <
++ MX6SL_PAD_KEY_ROW0__KEY_ROW0 0x1b010
++ MX6SL_PAD_KEY_ROW1__KEY_ROW1 0x1b010
++ MX6SL_PAD_KEY_ROW2__KEY_ROW2 0x1b0b0
++ MX6SL_PAD_KEY_COL0__KEY_COL0 0x110b0
++ MX6SL_PAD_KEY_COL1__KEY_COL1 0x110b0
++ MX6SL_PAD_KEY_COL2__KEY_COL2 0x110b0
++ >;
++ };
++
++ pinctrl_uart1: uart1grp {
++ fsl,pins = <
++ MX6SL_PAD_UART1_RXD__UART1_RX_DATA 0x1b0b1
++ MX6SL_PAD_UART1_TXD__UART1_TX_DATA 0x1b0b1
++ >;
++ };
++
++ pinctrl_usbotg1: usbotg1grp {
++ fsl,pins = <
++ MX6SL_PAD_EPDC_PWRCOM__USB_OTG1_ID 0x17059
++ >;
++ };
++
++ pinctrl_usdhc1: usdhc1grp {
++ fsl,pins = <
++ MX6SL_PAD_SD1_CMD__SD1_CMD 0x17059
++ MX6SL_PAD_SD1_CLK__SD1_CLK 0x10059
++ MX6SL_PAD_SD1_DAT0__SD1_DATA0 0x17059
++ MX6SL_PAD_SD1_DAT1__SD1_DATA1 0x17059
++ MX6SL_PAD_SD1_DAT2__SD1_DATA2 0x17059
++ MX6SL_PAD_SD1_DAT3__SD1_DATA3 0x17059
++ MX6SL_PAD_SD1_DAT4__SD1_DATA4 0x17059
++ MX6SL_PAD_SD1_DAT5__SD1_DATA5 0x17059
++ MX6SL_PAD_SD1_DAT6__SD1_DATA6 0x17059
++ MX6SL_PAD_SD1_DAT7__SD1_DATA7 0x17059
++ >;
++ };
++
++ pinctrl_usdhc1_100mhz: usdhc1grp100mhz {
++ fsl,pins = <
++ MX6SL_PAD_SD1_CMD__SD1_CMD 0x170b9
++ MX6SL_PAD_SD1_CLK__SD1_CLK 0x100b9
++ MX6SL_PAD_SD1_DAT0__SD1_DATA0 0x170b9
++ MX6SL_PAD_SD1_DAT1__SD1_DATA1 0x170b9
++ MX6SL_PAD_SD1_DAT2__SD1_DATA2 0x170b9
++ MX6SL_PAD_SD1_DAT3__SD1_DATA3 0x170b9
++ MX6SL_PAD_SD1_DAT4__SD1_DATA4 0x170b9
++ MX6SL_PAD_SD1_DAT5__SD1_DATA5 0x170b9
++ MX6SL_PAD_SD1_DAT6__SD1_DATA6 0x170b9
++ MX6SL_PAD_SD1_DAT7__SD1_DATA7 0x170b9
++ >;
++ };
++
++ pinctrl_usdhc1_200mhz: usdhc1grp200mhz {
++ fsl,pins = <
++ MX6SL_PAD_SD1_CMD__SD1_CMD 0x170f9
++ MX6SL_PAD_SD1_CLK__SD1_CLK 0x100f9
++ MX6SL_PAD_SD1_DAT0__SD1_DATA0 0x170f9
++ MX6SL_PAD_SD1_DAT1__SD1_DATA1 0x170f9
++ MX6SL_PAD_SD1_DAT2__SD1_DATA2 0x170f9
++ MX6SL_PAD_SD1_DAT3__SD1_DATA3 0x170f9
++ MX6SL_PAD_SD1_DAT4__SD1_DATA4 0x170f9
++ MX6SL_PAD_SD1_DAT5__SD1_DATA5 0x170f9
++ MX6SL_PAD_SD1_DAT6__SD1_DATA6 0x170f9
++ MX6SL_PAD_SD1_DAT7__SD1_DATA7 0x170f9
++ >;
++ };
++
++ pinctrl_usdhc2: usdhc2grp {
++ fsl,pins = <
++ MX6SL_PAD_SD2_CMD__SD2_CMD 0x17059
++ MX6SL_PAD_SD2_CLK__SD2_CLK 0x10059
++ MX6SL_PAD_SD2_DAT0__SD2_DATA0 0x17059
++ MX6SL_PAD_SD2_DAT1__SD2_DATA1 0x17059
++ MX6SL_PAD_SD2_DAT2__SD2_DATA2 0x17059
++ MX6SL_PAD_SD2_DAT3__SD2_DATA3 0x17059
++ >;
++ };
++
++ pinctrl_usdhc2_100mhz: usdhc2grp100mhz {
++ fsl,pins = <
++ MX6SL_PAD_SD2_CMD__SD2_CMD 0x170b9
++ MX6SL_PAD_SD2_CLK__SD2_CLK 0x100b9
++ MX6SL_PAD_SD2_DAT0__SD2_DATA0 0x170b9
++ MX6SL_PAD_SD2_DAT1__SD2_DATA1 0x170b9
++ MX6SL_PAD_SD2_DAT2__SD2_DATA2 0x170b9
++ MX6SL_PAD_SD2_DAT3__SD2_DATA3 0x170b9
++ >;
++ };
++
++ pinctrl_usdhc2_200mhz: usdhc2grp200mhz {
++ fsl,pins = <
++ MX6SL_PAD_SD2_CMD__SD2_CMD 0x170f9
++ MX6SL_PAD_SD2_CLK__SD2_CLK 0x100f9
++ MX6SL_PAD_SD2_DAT0__SD2_DATA0 0x170f9
++ MX6SL_PAD_SD2_DAT1__SD2_DATA1 0x170f9
++ MX6SL_PAD_SD2_DAT2__SD2_DATA2 0x170f9
++ MX6SL_PAD_SD2_DAT3__SD2_DATA3 0x170f9
++ >;
++ };
++
++ pinctrl_usdhc3: usdhc3grp {
++ fsl,pins = <
++ MX6SL_PAD_SD3_CMD__SD3_CMD 0x17059
++ MX6SL_PAD_SD3_CLK__SD3_CLK 0x10059
++ MX6SL_PAD_SD3_DAT0__SD3_DATA0 0x17059
++ MX6SL_PAD_SD3_DAT1__SD3_DATA1 0x17059
++ MX6SL_PAD_SD3_DAT2__SD3_DATA2 0x17059
++ MX6SL_PAD_SD3_DAT3__SD3_DATA3 0x17059
++ >;
++ };
++
++ pinctrl_usdhc3_100mhz: usdhc3grp100mhz {
++ fsl,pins = <
++ MX6SL_PAD_SD3_CMD__SD3_CMD 0x170b9
++ MX6SL_PAD_SD3_CLK__SD3_CLK 0x100b9
++ MX6SL_PAD_SD3_DAT0__SD3_DATA0 0x170b9
++ MX6SL_PAD_SD3_DAT1__SD3_DATA1 0x170b9
++ MX6SL_PAD_SD3_DAT2__SD3_DATA2 0x170b9
++ MX6SL_PAD_SD3_DAT3__SD3_DATA3 0x170b9
++ >;
++ };
++
++ pinctrl_usdhc3_200mhz: usdhc3grp200mhz {
++ fsl,pins = <
++ MX6SL_PAD_SD3_CMD__SD3_CMD 0x170f9
++ MX6SL_PAD_SD3_CLK__SD3_CLK 0x100f9
++ MX6SL_PAD_SD3_DAT0__SD3_DATA0 0x170f9
++ MX6SL_PAD_SD3_DAT1__SD3_DATA1 0x170f9
++ MX6SL_PAD_SD3_DAT2__SD3_DATA2 0x170f9
++ MX6SL_PAD_SD3_DAT3__SD3_DATA3 0x170f9
+ >;
+ };
+ };
+ };
+
++&kpp {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_kpp>;
++ linux,keymap = <
++ MATRIX_KEY(0x0, 0x0, KEY_UP) /* ROW0, COL0 */
++ MATRIX_KEY(0x0, 0x1, KEY_DOWN) /* ROW0, COL1 */
++ MATRIX_KEY(0x0, 0x2, KEY_ENTER) /* ROW0, COL2 */
++ MATRIX_KEY(0x1, 0x0, KEY_HOME) /* ROW1, COL0 */
++ MATRIX_KEY(0x1, 0x1, KEY_RIGHT) /* ROW1, COL1 */
++ MATRIX_KEY(0x1, 0x2, KEY_LEFT) /* ROW1, COL2 */
++ MATRIX_KEY(0x2, 0x0, KEY_VOLUMEDOWN) /* ROW2, COL0 */
++ MATRIX_KEY(0x2, 0x1, KEY_VOLUMEUP) /* ROW2, COL1 */
++ >;
++ status = "okay";
++};
++
++&ssi2 {
++ fsl,mode = "i2s-slave";
++ status = "okay";
++};
++
++&lcdif {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_lcdif_dat_0
++ &pinctrl_lcdif_ctrl_0>;
++ lcd-supply = <&reg_lcd_3v3>;
++ display = <&display>;
++ status = "okay";
++
++ display: display {
++ bits-per-pixel = <16>;
++ bus-width = <24>;
++
++ display-timings {
++ native-mode = <&timing0>;
++ timing0: timing0 {
++ clock-frequency = <33500000>;
++ hactive = <800>;
++ vactive = <480>;
++ hback-porch = <89>;
++ hfront-porch = <164>;
++ vback-porch = <23>;
++ vfront-porch = <10>;
++ hsync-len = <10>;
++ vsync-len = <10>;
++ hsync-active = <0>;
++ vsync-active = <0>;
++ de-active = <1>;
++ pixelclk-active = <0>;
++ };
++ };
++ };
++};
++
++&pwm1 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_pwm1_0>;
++ status = "okay";
++};
++
+ &uart1 {
+ pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_uart1_1>;
++ pinctrl-0 = <&pinctrl_uart1>;
+ status = "okay";
+ };
+
+ &usbotg1 {
+ vbus-supply = <&reg_usb_otg1_vbus>;
+ pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_usbotg1_1>;
++ pinctrl-0 = <&pinctrl_usbotg1>;
+ disable-over-current;
+ status = "okay";
+ };
+@@ -106,9 +721,9 @@
+
+ &usdhc1 {
+ pinctrl-names = "default", "state_100mhz", "state_200mhz";
+- pinctrl-0 = <&pinctrl_usdhc1_1>;
+- pinctrl-1 = <&pinctrl_usdhc1_1_100mhz>;
+- pinctrl-2 = <&pinctrl_usdhc1_1_200mhz>;
++ pinctrl-0 = <&pinctrl_usdhc1>;
++ pinctrl-1 = <&pinctrl_usdhc1_100mhz>;
++ pinctrl-2 = <&pinctrl_usdhc1_200mhz>;
+ bus-width = <8>;
+ cd-gpios = <&gpio4 7 0>;
+ wp-gpios = <&gpio4 6 0>;
+@@ -117,9 +732,9 @@
+
+ &usdhc2 {
+ pinctrl-names = "default", "state_100mhz", "state_200mhz";
+- pinctrl-0 = <&pinctrl_usdhc2_1>;
+- pinctrl-1 = <&pinctrl_usdhc2_1_100mhz>;
+- pinctrl-2 = <&pinctrl_usdhc2_1_200mhz>;
++ pinctrl-0 = <&pinctrl_usdhc2>;
++ pinctrl-1 = <&pinctrl_usdhc2_100mhz>;
++ pinctrl-2 = <&pinctrl_usdhc2_200mhz>;
+ cd-gpios = <&gpio5 0 0>;
+ wp-gpios = <&gpio4 29 0>;
+ status = "okay";
+@@ -127,9 +742,26 @@
+
+ &usdhc3 {
+ pinctrl-names = "default", "state_100mhz", "state_200mhz";
+- pinctrl-0 = <&pinctrl_usdhc3_1>;
+- pinctrl-1 = <&pinctrl_usdhc3_1_100mhz>;
+- pinctrl-2 = <&pinctrl_usdhc3_1_200mhz>;
++ pinctrl-0 = <&pinctrl_usdhc3>;
++ pinctrl-1 = <&pinctrl_usdhc3_100mhz>;
++ pinctrl-2 = <&pinctrl_usdhc3_200mhz>;
+ cd-gpios = <&gpio3 22 0>;
+ status = "okay";
+ };
++
++&pxp {
++ status = "okay";
++};
++
++&gpc {
++ fsl,cpu_pupscr_sw2iso = <0xf>;
++ fsl,cpu_pupscr_sw = <0xf>;
++ fsl,cpu_pdnscr_iso2sw = <0x1>;
++ fsl,cpu_pdnscr_iso = <0x1>;
++ fsl,ldo-bypass; /* use ldo-bypass, u-boot will check it and configure */
++ pu-supply = <&pu_dummy>; /* ldo-bypass:use pu_dummy if VDDSOC share with VDDPU */
++};
++
++&gpu {
++ pu-supply = <&pu_dummy>; /* ldo-bypass:use pu_dummy if VDDSOC share with VDDPU */
++};
+diff -Nur linux-3.14.36/arch/arm/boot/dts/Makefile linux-openelec/arch/arm/boot/dts/Makefile
+--- linux-3.14.36/arch/arm/boot/dts/Makefile 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/boot/dts/Makefile 2015-07-24 18:03:30.200842002 -0500
+@@ -154,16 +154,38 @@
+ imx53-qsb.dtb \
+ imx53-smd.dtb \
+ imx6dl-cubox-i.dtb \
++ imx6dl-dfi-fs700-m60.dtb \
++ imx6dl-gw51xx.dtb \
++ imx6dl-gw52xx.dtb \
++ imx6dl-gw53xx.dtb \
++ imx6dl-gw54xx.dtb \
+ imx6dl-hummingboard.dtb \
++ imx6dl-nitrogen6x.dtb \
++ imx6dl-phytec-pbab01.dtb \
+ imx6dl-sabreauto.dtb \
++ imx6dl-sabrelite.dtb \
+ imx6dl-sabresd.dtb \
++ imx6dl-sabresd-hdcp.dtb \
+ imx6dl-wandboard.dtb \
+ imx6q-arm2.dtb \
++ imx6q-cm-fx6.dtb \
+ imx6q-cubox-i.dtb \
++ imx6q-hummingboard.dtb \
++ imx6q-dfi-fs700-m60.dtb \
++ imx6q-dmo-edmqmx6.dtb \
++ imx6q-gk802.dtb \
++ imx6q-gw51xx.dtb \
++ imx6q-gw52xx.dtb \
++ imx6q-gw53xx.dtb \
++ imx6q-gw5400-a.dtb \
++ imx6q-gw54xx.dtb \
++ imx6q-nitrogen6x.dtb \
+ imx6q-phytec-pbab01.dtb \
+ imx6q-sabreauto.dtb \
+ imx6q-sabrelite.dtb \
+ imx6q-sabresd.dtb \
++ imx6q-sabresd-hdcp.dtb \
++ imx6q-tbs2910.dtb \
+ imx6q-sbc6x.dtb \
+ imx6q-udoo.dtb \
+ imx6q-wandboard.dtb \
+@@ -312,7 +334,14 @@
+ dtb-$(CONFIG_ARCH_VEXPRESS) += vexpress-v2p-ca5s.dtb \
+ vexpress-v2p-ca9.dtb \
+ vexpress-v2p-ca15-tc1.dtb \
+- vexpress-v2p-ca15_a7.dtb
++ vexpress-v2p-ca15_a7.dtb \
++ rtsm_ve-cortex_a9x2.dtb \
++ rtsm_ve-cortex_a9x4.dtb \
++ rtsm_ve-cortex_a15x1.dtb \
++ rtsm_ve-cortex_a15x2.dtb \
++ rtsm_ve-cortex_a15x4.dtb \
++ rtsm_ve-v2p-ca15x1-ca7x1.dtb \
++ rtsm_ve-v2p-ca15x4-ca7x4.dtb
+ dtb-$(CONFIG_ARCH_VIRT) += xenvm-4.2.dtb
+ dtb-$(CONFIG_ARCH_VT8500) += vt8500-bv07.dtb \
+ wm8505-ref.dtb \
+diff -Nur linux-3.14.36/arch/arm/boot/dts/marco.dtsi linux-openelec/arch/arm/boot/dts/marco.dtsi
+--- linux-3.14.36/arch/arm/boot/dts/marco.dtsi 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/boot/dts/marco.dtsi 2015-05-06 12:05:43.000000000 -0500
+@@ -36,7 +36,7 @@
+ ranges = <0x40000000 0x40000000 0xa0000000>;
+
+ l2-cache-controller@c0030000 {
+- compatible = "sirf,marco-pl310-cache", "arm,pl310-cache";
++ compatible = "arm,pl310-cache";
+ reg = <0xc0030000 0x1000>;
+ interrupts = <0 59 0>;
+ arm,tag-latency = <1 1 1>;
+diff -Nur linux-3.14.36/arch/arm/boot/dts/prima2.dtsi linux-openelec/arch/arm/boot/dts/prima2.dtsi
+--- linux-3.14.36/arch/arm/boot/dts/prima2.dtsi 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/boot/dts/prima2.dtsi 2015-05-06 12:05:43.000000000 -0500
+@@ -48,7 +48,7 @@
+ ranges = <0x40000000 0x40000000 0x80000000>;
+
+ l2-cache-controller@80040000 {
+- compatible = "arm,pl310-cache", "sirf,prima2-pl310-cache";
++ compatible = "arm,pl310-cache";
+ reg = <0x80040000 0x1000>;
+ interrupts = <59>;
+ arm,tag-latency = <1 1 1>;
+diff -Nur linux-3.14.36/arch/arm/boot/dts/rtsm_ve-cortex_a15x1.dts linux-openelec/arch/arm/boot/dts/rtsm_ve-cortex_a15x1.dts
+--- linux-3.14.36/arch/arm/boot/dts/rtsm_ve-cortex_a15x1.dts 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/arch/arm/boot/dts/rtsm_ve-cortex_a15x1.dts 2015-05-06 12:05:43.000000000 -0500
+@@ -0,0 +1,159 @@
++/*
++ * ARM Ltd. Fast Models
++ *
++ * Versatile Express (VE) system model
++ * ARMCortexA15x1CT
++ *
++ * RTSM_VE_Cortex_A15x1.lisa
++ */
++
++/dts-v1/;
++
++/ {
++ model = "RTSM_VE_CortexA15x1";
++ arm,vexpress,site = <0xf>;
++ compatible = "arm,rtsm_ve,cortex_a15x1", "arm,vexpress";
++ interrupt-parent = <&gic>;
++ #address-cells = <2>;
++ #size-cells = <2>;
++
++ chosen { };
++
++ aliases {
++ serial0 = &v2m_serial0;
++ serial1 = &v2m_serial1;
++ serial2 = &v2m_serial2;
++ serial3 = &v2m_serial3;
++ };
++
++ cpus {
++ #address-cells = <1>;
++ #size-cells = <0>;
++
++ cpu@0 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a15";
++ reg = <0>;
++ };
++ };
++
++ memory@80000000 {
++ device_type = "memory";
++ reg = <0 0x80000000 0 0x80000000>;
++ };
++
++ gic: interrupt-controller@2c001000 {
++ compatible = "arm,cortex-a15-gic", "arm,cortex-a9-gic";
++ #interrupt-cells = <3>;
++ #address-cells = <0>;
++ interrupt-controller;
++ reg = <0 0x2c001000 0 0x1000>,
++ <0 0x2c002000 0 0x1000>,
++ <0 0x2c004000 0 0x2000>,
++ <0 0x2c006000 0 0x2000>;
++ interrupts = <1 9 0xf04>;
++ };
++
++ timer {
++ compatible = "arm,armv7-timer";
++ interrupts = <1 13 0xf08>,
++ <1 14 0xf08>,
++ <1 11 0xf08>,
++ <1 10 0xf08>;
++ };
++
++ dcc {
++ compatible = "arm,vexpress,config-bus";
++ arm,vexpress,config-bridge = <&v2m_sysreg>;
++
++ osc@0 {
++ /* ACLK clock to the AXI master port on the test chip */
++ compatible = "arm,vexpress-osc";
++ arm,vexpress-sysreg,func = <1 0>;
++ freq-range = <30000000 50000000>;
++ #clock-cells = <0>;
++ clock-output-names = "extsaxiclk";
++ };
++
++ oscclk1: osc@1 {
++ /* Reference clock for the CLCD */
++ compatible = "arm,vexpress-osc";
++ arm,vexpress-sysreg,func = <1 1>;
++ freq-range = <10000000 80000000>;
++ #clock-cells = <0>;
++ clock-output-names = "clcdclk";
++ };
++
++ smbclk: oscclk2: osc@2 {
++ /* Reference clock for the test chip internal PLLs */
++ compatible = "arm,vexpress-osc";
++ arm,vexpress-sysreg,func = <1 2>;
++ freq-range = <33000000 100000000>;
++ #clock-cells = <0>;
++ clock-output-names = "tcrefclk";
++ };
++ };
++
++ smb {
++ compatible = "simple-bus";
++
++ #address-cells = <2>;
++ #size-cells = <1>;
++ ranges = <0 0 0 0x08000000 0x04000000>,
++ <1 0 0 0x14000000 0x04000000>,
++ <2 0 0 0x18000000 0x04000000>,
++ <3 0 0 0x1c000000 0x04000000>,
++ <4 0 0 0x0c000000 0x04000000>,
++ <5 0 0 0x10000000 0x04000000>;
++
++ #interrupt-cells = <1>;
++ interrupt-map-mask = <0 0 63>;
++ interrupt-map = <0 0 0 &gic 0 0 4>,
++ <0 0 1 &gic 0 1 4>,
++ <0 0 2 &gic 0 2 4>,
++ <0 0 3 &gic 0 3 4>,
++ <0 0 4 &gic 0 4 4>,
++ <0 0 5 &gic 0 5 4>,
++ <0 0 6 &gic 0 6 4>,
++ <0 0 7 &gic 0 7 4>,
++ <0 0 8 &gic 0 8 4>,
++ <0 0 9 &gic 0 9 4>,
++ <0 0 10 &gic 0 10 4>,
++ <0 0 11 &gic 0 11 4>,
++ <0 0 12 &gic 0 12 4>,
++ <0 0 13 &gic 0 13 4>,
++ <0 0 14 &gic 0 14 4>,
++ <0 0 15 &gic 0 15 4>,
++ <0 0 16 &gic 0 16 4>,
++ <0 0 17 &gic 0 17 4>,
++ <0 0 18 &gic 0 18 4>,
++ <0 0 19 &gic 0 19 4>,
++ <0 0 20 &gic 0 20 4>,
++ <0 0 21 &gic 0 21 4>,
++ <0 0 22 &gic 0 22 4>,
++ <0 0 23 &gic 0 23 4>,
++ <0 0 24 &gic 0 24 4>,
++ <0 0 25 &gic 0 25 4>,
++ <0 0 26 &gic 0 26 4>,
++ <0 0 27 &gic 0 27 4>,
++ <0 0 28 &gic 0 28 4>,
++ <0 0 29 &gic 0 29 4>,
++ <0 0 30 &gic 0 30 4>,
++ <0 0 31 &gic 0 31 4>,
++ <0 0 32 &gic 0 32 4>,
++ <0 0 33 &gic 0 33 4>,
++ <0 0 34 &gic 0 34 4>,
++ <0 0 35 &gic 0 35 4>,
++ <0 0 36 &gic 0 36 4>,
++ <0 0 37 &gic 0 37 4>,
++ <0 0 38 &gic 0 38 4>,
++ <0 0 39 &gic 0 39 4>,
++ <0 0 40 &gic 0 40 4>,
++ <0 0 41 &gic 0 41 4>,
++ <0 0 42 &gic 0 42 4>;
++
++ /include/ "rtsm_ve-motherboard.dtsi"
++ };
++};
++
++/include/ "clcd-panels.dtsi"
+diff -Nur linux-3.14.36/arch/arm/boot/dts/rtsm_ve-cortex_a15x2.dts linux-openelec/arch/arm/boot/dts/rtsm_ve-cortex_a15x2.dts
+--- linux-3.14.36/arch/arm/boot/dts/rtsm_ve-cortex_a15x2.dts 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/arch/arm/boot/dts/rtsm_ve-cortex_a15x2.dts 2015-05-06 12:05:43.000000000 -0500
+@@ -0,0 +1,165 @@
++/*
++ * ARM Ltd. Fast Models
++ *
++ * Versatile Express (VE) system model
++ * ARMCortexA15x2CT
++ *
++ * RTSM_VE_Cortex_A15x2.lisa
++ */
++
++/dts-v1/;
++
++/ {
++ model = "RTSM_VE_CortexA15x2";
++ arm,vexpress,site = <0xf>;
++ compatible = "arm,rtsm_ve,cortex_a15x2", "arm,vexpress";
++ interrupt-parent = <&gic>;
++ #address-cells = <2>;
++ #size-cells = <2>;
++
++ chosen { };
++
++ aliases {
++ serial0 = &v2m_serial0;
++ serial1 = &v2m_serial1;
++ serial2 = &v2m_serial2;
++ serial3 = &v2m_serial3;
++ };
++
++ cpus {
++ #address-cells = <1>;
++ #size-cells = <0>;
++
++ cpu@0 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a15";
++ reg = <0>;
++ };
++
++ cpu@1 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a15";
++ reg = <1>;
++ };
++ };
++
++ memory@80000000 {
++ device_type = "memory";
++ reg = <0 0x80000000 0 0x80000000>;
++ };
++
++ gic: interrupt-controller@2c001000 {
++ compatible = "arm,cortex-a15-gic", "arm,cortex-a9-gic";
++ #interrupt-cells = <3>;
++ #address-cells = <0>;
++ interrupt-controller;
++ reg = <0 0x2c001000 0 0x1000>,
++ <0 0x2c002000 0 0x1000>,
++ <0 0x2c004000 0 0x2000>,
++ <0 0x2c006000 0 0x2000>;
++ interrupts = <1 9 0xf04>;
++ };
++
++ timer {
++ compatible = "arm,armv7-timer";
++ interrupts = <1 13 0xf08>,
++ <1 14 0xf08>,
++ <1 11 0xf08>,
++ <1 10 0xf08>;
++ };
++
++ dcc {
++ compatible = "arm,vexpress,config-bus";
++ arm,vexpress,config-bridge = <&v2m_sysreg>;
++
++ osc@0 {
++ /* ACLK clock to the AXI master port on the test chip */
++ compatible = "arm,vexpress-osc";
++ arm,vexpress-sysreg,func = <1 0>;
++ freq-range = <30000000 50000000>;
++ #clock-cells = <0>;
++ clock-output-names = "extsaxiclk";
++ };
++
++ oscclk1: osc@1 {
++ /* Reference clock for the CLCD */
++ compatible = "arm,vexpress-osc";
++ arm,vexpress-sysreg,func = <1 1>;
++ freq-range = <10000000 80000000>;
++ #clock-cells = <0>;
++ clock-output-names = "clcdclk";
++ };
++
++ smbclk: oscclk2: osc@2 {
++ /* Reference clock for the test chip internal PLLs */
++ compatible = "arm,vexpress-osc";
++ arm,vexpress-sysreg,func = <1 2>;
++ freq-range = <33000000 100000000>;
++ #clock-cells = <0>;
++ clock-output-names = "tcrefclk";
++ };
++ };
++
++ smb {
++ compatible = "simple-bus";
++
++ #address-cells = <2>;
++ #size-cells = <1>;
++ ranges = <0 0 0 0x08000000 0x04000000>,
++ <1 0 0 0x14000000 0x04000000>,
++ <2 0 0 0x18000000 0x04000000>,
++ <3 0 0 0x1c000000 0x04000000>,
++ <4 0 0 0x0c000000 0x04000000>,
++ <5 0 0 0x10000000 0x04000000>;
++
++ #interrupt-cells = <1>;
++ interrupt-map-mask = <0 0 63>;
++ interrupt-map = <0 0 0 &gic 0 0 4>,
++ <0 0 1 &gic 0 1 4>,
++ <0 0 2 &gic 0 2 4>,
++ <0 0 3 &gic 0 3 4>,
++ <0 0 4 &gic 0 4 4>,
++ <0 0 5 &gic 0 5 4>,
++ <0 0 6 &gic 0 6 4>,
++ <0 0 7 &gic 0 7 4>,
++ <0 0 8 &gic 0 8 4>,
++ <0 0 9 &gic 0 9 4>,
++ <0 0 10 &gic 0 10 4>,
++ <0 0 11 &gic 0 11 4>,
++ <0 0 12 &gic 0 12 4>,
++ <0 0 13 &gic 0 13 4>,
++ <0 0 14 &gic 0 14 4>,
++ <0 0 15 &gic 0 15 4>,
++ <0 0 16 &gic 0 16 4>,
++ <0 0 17 &gic 0 17 4>,
++ <0 0 18 &gic 0 18 4>,
++ <0 0 19 &gic 0 19 4>,
++ <0 0 20 &gic 0 20 4>,
++ <0 0 21 &gic 0 21 4>,
++ <0 0 22 &gic 0 22 4>,
++ <0 0 23 &gic 0 23 4>,
++ <0 0 24 &gic 0 24 4>,
++ <0 0 25 &gic 0 25 4>,
++ <0 0 26 &gic 0 26 4>,
++ <0 0 27 &gic 0 27 4>,
++ <0 0 28 &gic 0 28 4>,
++ <0 0 29 &gic 0 29 4>,
++ <0 0 30 &gic 0 30 4>,
++ <0 0 31 &gic 0 31 4>,
++ <0 0 32 &gic 0 32 4>,
++ <0 0 33 &gic 0 33 4>,
++ <0 0 34 &gic 0 34 4>,
++ <0 0 35 &gic 0 35 4>,
++ <0 0 36 &gic 0 36 4>,
++ <0 0 37 &gic 0 37 4>,
++ <0 0 38 &gic 0 38 4>,
++ <0 0 39 &gic 0 39 4>,
++ <0 0 40 &gic 0 40 4>,
++ <0 0 41 &gic 0 41 4>,
++ <0 0 42 &gic 0 42 4>;
++
++ /include/ "rtsm_ve-motherboard.dtsi"
++ };
++};
++
++/include/ "clcd-panels.dtsi"
+diff -Nur linux-3.14.36/arch/arm/boot/dts/rtsm_ve-cortex_a15x4.dts linux-openelec/arch/arm/boot/dts/rtsm_ve-cortex_a15x4.dts
+--- linux-3.14.36/arch/arm/boot/dts/rtsm_ve-cortex_a15x4.dts 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/arch/arm/boot/dts/rtsm_ve-cortex_a15x4.dts 2015-05-06 12:05:43.000000000 -0500
+@@ -0,0 +1,177 @@
++/*
++ * ARM Ltd. Fast Models
++ *
++ * Versatile Express (VE) system model
++ * ARMCortexA15x4CT
++ *
++ * RTSM_VE_Cortex_A15x4.lisa
++ */
++
++/dts-v1/;
++
++/ {
++ model = "RTSM_VE_CortexA15x4";
++ arm,vexpress,site = <0xf>;
++ compatible = "arm,rtsm_ve,cortex_a15x4", "arm,vexpress";
++ interrupt-parent = <&gic>;
++ #address-cells = <2>;
++ #size-cells = <2>;
++
++ chosen { };
++
++ aliases {
++ serial0 = &v2m_serial0;
++ serial1 = &v2m_serial1;
++ serial2 = &v2m_serial2;
++ serial3 = &v2m_serial3;
++ };
++
++ cpus {
++ #address-cells = <1>;
++ #size-cells = <0>;
++
++ cpu@0 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a15";
++ reg = <0>;
++ };
++
++ cpu@1 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a15";
++ reg = <1>;
++ };
++
++ cpu@2 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a15";
++ reg = <2>;
++ };
++
++ cpu@3 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a15";
++ reg = <3>;
++ };
++ };
++
++ memory@80000000 {
++ device_type = "memory";
++ reg = <0 0x80000000 0 0x80000000>;
++ };
++
++ gic: interrupt-controller@2c001000 {
++ compatible = "arm,cortex-a15-gic", "arm,cortex-a9-gic";
++ #interrupt-cells = <3>;
++ #address-cells = <0>;
++ interrupt-controller;
++ reg = <0 0x2c001000 0 0x1000>,
++ <0 0x2c002000 0 0x1000>,
++ <0 0x2c004000 0 0x2000>,
++ <0 0x2c006000 0 0x2000>;
++ interrupts = <1 9 0xf04>;
++ };
++
++ timer {
++ compatible = "arm,armv7-timer";
++ interrupts = <1 13 0xf08>,
++ <1 14 0xf08>,
++ <1 11 0xf08>,
++ <1 10 0xf08>;
++ };
++
++ dcc {
++ compatible = "arm,vexpress,config-bus";
++ arm,vexpress,config-bridge = <&v2m_sysreg>;
++
++ osc@0 {
++ /* ACLK clock to the AXI master port on the test chip */
++ compatible = "arm,vexpress-osc";
++ arm,vexpress-sysreg,func = <1 0>;
++ freq-range = <30000000 50000000>;
++ #clock-cells = <0>;
++ clock-output-names = "extsaxiclk";
++ };
++
++ oscclk1: osc@1 {
++ /* Reference clock for the CLCD */
++ compatible = "arm,vexpress-osc";
++ arm,vexpress-sysreg,func = <1 1>;
++ freq-range = <10000000 80000000>;
++ #clock-cells = <0>;
++ clock-output-names = "clcdclk";
++ };
++
++ smbclk: oscclk2: osc@2 {
++ /* Reference clock for the test chip internal PLLs */
++ compatible = "arm,vexpress-osc";
++ arm,vexpress-sysreg,func = <1 2>;
++ freq-range = <33000000 100000000>;
++ #clock-cells = <0>;
++ clock-output-names = "tcrefclk";
++ };
++ };
++
++ smb {
++ compatible = "simple-bus";
++
++ #address-cells = <2>;
++ #size-cells = <1>;
++ ranges = <0 0 0 0x08000000 0x04000000>,
++ <1 0 0 0x14000000 0x04000000>,
++ <2 0 0 0x18000000 0x04000000>,
++ <3 0 0 0x1c000000 0x04000000>,
++ <4 0 0 0x0c000000 0x04000000>,
++ <5 0 0 0x10000000 0x04000000>;
++
++ #interrupt-cells = <1>;
++ interrupt-map-mask = <0 0 63>;
++ interrupt-map = <0 0 0 &gic 0 0 4>,
++ <0 0 1 &gic 0 1 4>,
++ <0 0 2 &gic 0 2 4>,
++ <0 0 3 &gic 0 3 4>,
++ <0 0 4 &gic 0 4 4>,
++ <0 0 5 &gic 0 5 4>,
++ <0 0 6 &gic 0 6 4>,
++ <0 0 7 &gic 0 7 4>,
++ <0 0 8 &gic 0 8 4>,
++ <0 0 9 &gic 0 9 4>,
++ <0 0 10 &gic 0 10 4>,
++ <0 0 11 &gic 0 11 4>,
++ <0 0 12 &gic 0 12 4>,
++ <0 0 13 &gic 0 13 4>,
++ <0 0 14 &gic 0 14 4>,
++ <0 0 15 &gic 0 15 4>,
++ <0 0 16 &gic 0 16 4>,
++ <0 0 17 &gic 0 17 4>,
++ <0 0 18 &gic 0 18 4>,
++ <0 0 19 &gic 0 19 4>,
++ <0 0 20 &gic 0 20 4>,
++ <0 0 21 &gic 0 21 4>,
++ <0 0 22 &gic 0 22 4>,
++ <0 0 23 &gic 0 23 4>,
++ <0 0 24 &gic 0 24 4>,
++ <0 0 25 &gic 0 25 4>,
++ <0 0 26 &gic 0 26 4>,
++ <0 0 27 &gic 0 27 4>,
++ <0 0 28 &gic 0 28 4>,
++ <0 0 29 &gic 0 29 4>,
++ <0 0 30 &gic 0 30 4>,
++ <0 0 31 &gic 0 31 4>,
++ <0 0 32 &gic 0 32 4>,
++ <0 0 33 &gic 0 33 4>,
++ <0 0 34 &gic 0 34 4>,
++ <0 0 35 &gic 0 35 4>,
++ <0 0 36 &gic 0 36 4>,
++ <0 0 37 &gic 0 37 4>,
++ <0 0 38 &gic 0 38 4>,
++ <0 0 39 &gic 0 39 4>,
++ <0 0 40 &gic 0 40 4>,
++ <0 0 41 &gic 0 41 4>,
++ <0 0 42 &gic 0 42 4>;
++
++ /include/ "rtsm_ve-motherboard.dtsi"
++ };
++};
++
++/include/ "clcd-panels.dtsi"
+diff -Nur linux-3.14.36/arch/arm/boot/dts/rtsm_ve-cortex_a9x2.dts linux-openelec/arch/arm/boot/dts/rtsm_ve-cortex_a9x2.dts
+--- linux-3.14.36/arch/arm/boot/dts/rtsm_ve-cortex_a9x2.dts 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/arch/arm/boot/dts/rtsm_ve-cortex_a9x2.dts 2015-05-06 12:05:43.000000000 -0500
+@@ -0,0 +1,171 @@
++/*
++ * ARM Ltd. Fast Models
++ *
++ * Versatile Express (VE) system model
++ * ARMCortexA9MPx2CT
++ *
++ * RTSM_VE_Cortex_A9x2.lisa
++ */
++
++/dts-v1/;
++
++/ {
++ model = "RTSM_VE_CortexA9x2";
++ arm,vexpress,site = <0xf>;
++ compatible = "arm,rtsm_ve,cortex_a9x2", "arm,vexpress";
++ interrupt-parent = <&gic>;
++ #address-cells = <1>;
++ #size-cells = <1>;
++
++ chosen { };
++
++ aliases {
++ serial0 = &v2m_serial0;
++ serial1 = &v2m_serial1;
++ serial2 = &v2m_serial2;
++ serial3 = &v2m_serial3;
++ };
++
++ cpus {
++ #address-cells = <1>;
++ #size-cells = <0>;
++
++ cpu@0 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a9";
++ reg = <0>;
++ };
++
++ cpu@1 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a9";
++ reg = <1>;
++ };
++ };
++
++ memory@80000000 {
++ device_type = "memory";
++ reg = <0x80000000 0x80000000>;
++ };
++
++ scu@2c000000 {
++ compatible = "arm,cortex-a9-scu";
++ reg = <0x2c000000 0x58>;
++ };
++
++ timer@2c000600 {
++ compatible = "arm,cortex-a9-twd-timer";
++ reg = <0x2c000600 0x20>;
++ interrupts = <1 13 0xf04>;
++ };
++
++ watchdog@2c000620 {
++ compatible = "arm,cortex-a9-twd-wdt";
++ reg = <0x2c000620 0x20>;
++ interrupts = <1 14 0xf04>;
++ };
++
++ gic: interrupt-controller@2c001000 {
++ compatible = "arm,cortex-a9-gic";
++ #interrupt-cells = <3>;
++ #address-cells = <0>;
++ interrupt-controller;
++ reg = <0x2c001000 0x1000>,
++ <0x2c000100 0x100>;
++ };
++
++ dcc {
++ compatible = "arm,vexpress,config-bus";
++ arm,vexpress,config-bridge = <&v2m_sysreg>;
++
++ osc@0 {
++ /* ACLK clock to the AXI master port on the test chip */
++ compatible = "arm,vexpress-osc";
++ arm,vexpress-sysreg,func = <1 0>;
++ freq-range = <30000000 50000000>;
++ #clock-cells = <0>;
++ clock-output-names = "extsaxiclk";
++ };
++
++ oscclk1: osc@1 {
++ /* Reference clock for the CLCD */
++ compatible = "arm,vexpress-osc";
++ arm,vexpress-sysreg,func = <1 1>;
++ freq-range = <10000000 80000000>;
++ #clock-cells = <0>;
++ clock-output-names = "clcdclk";
++ };
++
++ smbclk: oscclk2: osc@2 {
++ /* Reference clock for the test chip internal PLLs */
++ compatible = "arm,vexpress-osc";
++ arm,vexpress-sysreg,func = <1 2>;
++ freq-range = <33000000 100000000>;
++ #clock-cells = <0>;
++ clock-output-names = "tcrefclk";
++ };
++ };
++
++ smb {
++ compatible = "simple-bus";
++
++ #address-cells = <2>;
++ #size-cells = <1>;
++ ranges = <0 0 0x08000000 0x04000000>,
++ <1 0 0x14000000 0x04000000>,
++ <2 0 0x18000000 0x04000000>,
++ <3 0 0x1c000000 0x04000000>,
++ <4 0 0x0c000000 0x04000000>,
++ <5 0 0x10000000 0x04000000>;
++
++ #interrupt-cells = <1>;
++ interrupt-map-mask = <0 0 63>;
++ interrupt-map = <0 0 0 &gic 0 0 4>,
++ <0 0 1 &gic 0 1 4>,
++ <0 0 2 &gic 0 2 4>,
++ <0 0 3 &gic 0 3 4>,
++ <0 0 4 &gic 0 4 4>,
++ <0 0 5 &gic 0 5 4>,
++ <0 0 6 &gic 0 6 4>,
++ <0 0 7 &gic 0 7 4>,
++ <0 0 8 &gic 0 8 4>,
++ <0 0 9 &gic 0 9 4>,
++ <0 0 10 &gic 0 10 4>,
++ <0 0 11 &gic 0 11 4>,
++ <0 0 12 &gic 0 12 4>,
++ <0 0 13 &gic 0 13 4>,
++ <0 0 14 &gic 0 14 4>,
++ <0 0 15 &gic 0 15 4>,
++ <0 0 16 &gic 0 16 4>,
++ <0 0 17 &gic 0 17 4>,
++ <0 0 18 &gic 0 18 4>,
++ <0 0 19 &gic 0 19 4>,
++ <0 0 20 &gic 0 20 4>,
++ <0 0 21 &gic 0 21 4>,
++ <0 0 22 &gic 0 22 4>,
++ <0 0 23 &gic 0 23 4>,
++ <0 0 24 &gic 0 24 4>,
++ <0 0 25 &gic 0 25 4>,
++ <0 0 26 &gic 0 26 4>,
++ <0 0 27 &gic 0 27 4>,
++ <0 0 28 &gic 0 28 4>,
++ <0 0 29 &gic 0 29 4>,
++ <0 0 30 &gic 0 30 4>,
++ <0 0 31 &gic 0 31 4>,
++ <0 0 32 &gic 0 32 4>,
++ <0 0 33 &gic 0 33 4>,
++ <0 0 34 &gic 0 34 4>,
++ <0 0 35 &gic 0 35 4>,
++ <0 0 36 &gic 0 36 4>,
++ <0 0 37 &gic 0 37 4>,
++ <0 0 38 &gic 0 38 4>,
++ <0 0 39 &gic 0 39 4>,
++ <0 0 40 &gic 0 40 4>,
++ <0 0 41 &gic 0 41 4>,
++ <0 0 42 &gic 0 42 4>;
++
++ /include/ "rtsm_ve-motherboard.dtsi"
++ };
++};
++
++/include/ "clcd-panels.dtsi"
+diff -Nur linux-3.14.36/arch/arm/boot/dts/rtsm_ve-cortex_a9x4.dts linux-openelec/arch/arm/boot/dts/rtsm_ve-cortex_a9x4.dts
+--- linux-3.14.36/arch/arm/boot/dts/rtsm_ve-cortex_a9x4.dts 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/arch/arm/boot/dts/rtsm_ve-cortex_a9x4.dts 2015-05-06 12:05:43.000000000 -0500
+@@ -0,0 +1,183 @@
++/*
++ * ARM Ltd. Fast Models
++ *
++ * Versatile Express (VE) system model
++ * ARMCortexA9MPx4CT
++ *
++ * RTSM_VE_Cortex_A9x4.lisa
++ */
++
++/dts-v1/;
++
++/ {
++ model = "RTSM_VE_CortexA9x4";
++ arm,vexpress,site = <0xf>;
++ compatible = "arm,rtsm_ve,cortex_a9x4", "arm,vexpress";
++ interrupt-parent = <&gic>;
++ #address-cells = <1>;
++ #size-cells = <1>;
++
++ chosen { };
++
++ aliases {
++ serial0 = &v2m_serial0;
++ serial1 = &v2m_serial1;
++ serial2 = &v2m_serial2;
++ serial3 = &v2m_serial3;
++ };
++
++ cpus {
++ #address-cells = <1>;
++ #size-cells = <0>;
++
++ cpu@0 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a9";
++ reg = <0>;
++ };
++
++ cpu@1 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a9";
++ reg = <1>;
++ };
++
++ cpu@2 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a9";
++ reg = <2>;
++ };
++
++ cpu@3 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a9";
++ reg = <3>;
++ };
++ };
++
++ memory@80000000 {
++ device_type = "memory";
++ reg = <0x80000000 0x80000000>;
++ };
++
++ scu@2c000000 {
++ compatible = "arm,cortex-a9-scu";
++ reg = <0x2c000000 0x58>;
++ };
++
++ timer@2c000600 {
++ compatible = "arm,cortex-a9-twd-timer";
++ reg = <0x2c000600 0x20>;
++ interrupts = <1 13 0xf04>;
++ };
++
++ watchdog@2c000620 {
++ compatible = "arm,cortex-a9-twd-wdt";
++ reg = <0x2c000620 0x20>;
++ interrupts = <1 14 0xf04>;
++ };
++
++ gic: interrupt-controller@2c001000 {
++ compatible = "arm,cortex-a9-gic";
++ #interrupt-cells = <3>;
++ #address-cells = <0>;
++ interrupt-controller;
++ reg = <0x2c001000 0x1000>,
++ <0x2c000100 0x100>;
++ };
++
++ dcc {
++ compatible = "arm,vexpress,config-bus";
++ arm,vexpress,config-bridge = <&v2m_sysreg>;
++
++ osc@0 {
++ /* ACLK clock to the AXI master port on the test chip */
++ compatible = "arm,vexpress-osc";
++ arm,vexpress-sysreg,func = <1 0>;
++ freq-range = <30000000 50000000>;
++ #clock-cells = <0>;
++ clock-output-names = "extsaxiclk";
++ };
++
++ oscclk1: osc@1 {
++ /* Reference clock for the CLCD */
++ compatible = "arm,vexpress-osc";
++ arm,vexpress-sysreg,func = <1 1>;
++ freq-range = <10000000 80000000>;
++ #clock-cells = <0>;
++ clock-output-names = "clcdclk";
++ };
++
++ smbclk: oscclk2: osc@2 {
++ /* Reference clock for the test chip internal PLLs */
++ compatible = "arm,vexpress-osc";
++ arm,vexpress-sysreg,func = <1 2>;
++ freq-range = <33000000 100000000>;
++ #clock-cells = <0>;
++ clock-output-names = "tcrefclk";
++ };
++ };
++
++ smb {
++ compatible = "simple-bus";
++
++ #address-cells = <2>;
++ #size-cells = <1>;
++ ranges = <0 0 0x08000000 0x04000000>,
++ <1 0 0x14000000 0x04000000>,
++ <2 0 0x18000000 0x04000000>,
++ <3 0 0x1c000000 0x04000000>,
++ <4 0 0x0c000000 0x04000000>,
++ <5 0 0x10000000 0x04000000>;
++
++ #interrupt-cells = <1>;
++ interrupt-map-mask = <0 0 63>;
++ interrupt-map = <0 0 0 &gic 0 0 4>,
++ <0 0 1 &gic 0 1 4>,
++ <0 0 2 &gic 0 2 4>,
++ <0 0 3 &gic 0 3 4>,
++ <0 0 4 &gic 0 4 4>,
++ <0 0 5 &gic 0 5 4>,
++ <0 0 6 &gic 0 6 4>,
++ <0 0 7 &gic 0 7 4>,
++ <0 0 8 &gic 0 8 4>,
++ <0 0 9 &gic 0 9 4>,
++ <0 0 10 &gic 0 10 4>,
++ <0 0 11 &gic 0 11 4>,
++ <0 0 12 &gic 0 12 4>,
++ <0 0 13 &gic 0 13 4>,
++ <0 0 14 &gic 0 14 4>,
++ <0 0 15 &gic 0 15 4>,
++ <0 0 16 &gic 0 16 4>,
++ <0 0 17 &gic 0 17 4>,
++ <0 0 18 &gic 0 18 4>,
++ <0 0 19 &gic 0 19 4>,
++ <0 0 20 &gic 0 20 4>,
++ <0 0 21 &gic 0 21 4>,
++ <0 0 22 &gic 0 22 4>,
++ <0 0 23 &gic 0 23 4>,
++ <0 0 24 &gic 0 24 4>,
++ <0 0 25 &gic 0 25 4>,
++ <0 0 26 &gic 0 26 4>,
++ <0 0 27 &gic 0 27 4>,
++ <0 0 28 &gic 0 28 4>,
++ <0 0 29 &gic 0 29 4>,
++ <0 0 30 &gic 0 30 4>,
++ <0 0 31 &gic 0 31 4>,
++ <0 0 32 &gic 0 32 4>,
++ <0 0 33 &gic 0 33 4>,
++ <0 0 34 &gic 0 34 4>,
++ <0 0 35 &gic 0 35 4>,
++ <0 0 36 &gic 0 36 4>,
++ <0 0 37 &gic 0 37 4>,
++ <0 0 38 &gic 0 38 4>,
++ <0 0 39 &gic 0 39 4>,
++ <0 0 40 &gic 0 40 4>,
++ <0 0 41 &gic 0 41 4>,
++ <0 0 42 &gic 0 42 4>;
++
++ /include/ "rtsm_ve-motherboard.dtsi"
++ };
++};
++
++/include/ "clcd-panels.dtsi"
+diff -Nur linux-3.14.36/arch/arm/boot/dts/rtsm_ve-motherboard.dtsi linux-openelec/arch/arm/boot/dts/rtsm_ve-motherboard.dtsi
+--- linux-3.14.36/arch/arm/boot/dts/rtsm_ve-motherboard.dtsi 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/arch/arm/boot/dts/rtsm_ve-motherboard.dtsi 2015-05-06 12:05:43.000000000 -0500
+@@ -0,0 +1,231 @@
++/*
++ * ARM Ltd. Fast Models
++ *
++ * Versatile Express (VE) system model
++ * Motherboard component
++ *
++ * VEMotherBoard.lisa
++ */
++
++ motherboard {
++ compatible = "arm,vexpress,v2m-p1", "simple-bus";
++ arm,hbi = <0x190>;
++ arm,vexpress,site = <0>;
++ arm,v2m-memory-map = "rs1";
++ #address-cells = <2>; /* SMB chipselect number and offset */
++ #size-cells = <1>;
++ #interrupt-cells = <1>;
++ ranges;
++
++ flash@0,00000000 {
++ compatible = "arm,vexpress-flash", "cfi-flash";
++ reg = <0 0x00000000 0x04000000>,
++ <4 0x00000000 0x04000000>;
++ bank-width = <4>;
++ };
++
++ vram@2,00000000 {
++ compatible = "arm,vexpress-vram";
++ reg = <2 0x00000000 0x00800000>;
++ };
++
++ ethernet@2,02000000 {
++ compatible = "smsc,lan91c111";
++ reg = <2 0x02000000 0x10000>;
++ interrupts = <15>;
++ };
++
++ iofpga@3,00000000 {
++ compatible = "arm,amba-bus", "simple-bus";
++ #address-cells = <1>;
++ #size-cells = <1>;
++ ranges = <0 3 0 0x200000>;
++
++ v2m_sysreg: sysreg@010000 {
++ compatible = "arm,vexpress-sysreg";
++ reg = <0x010000 0x1000>;
++ gpio-controller;
++ #gpio-cells = <2>;
++ };
++
++ v2m_sysctl: sysctl@020000 {
++ compatible = "arm,sp810", "arm,primecell";
++ reg = <0x020000 0x1000>;
++ clocks = <&v2m_refclk32khz>, <&v2m_refclk1mhz>, <&smbclk>;
++ clock-names = "refclk", "timclk", "apb_pclk";
++ #clock-cells = <1>;
++ clock-output-names = "timerclken0", "timerclken1", "timerclken2", "timerclken3";
++ };
++
++ aaci@040000 {
++ compatible = "arm,pl041", "arm,primecell";
++ reg = <0x040000 0x1000>;
++ interrupts = <11>;
++ clocks = <&smbclk>;
++ clock-names = "apb_pclk";
++ };
++
++ mmci@050000 {
++ compatible = "arm,pl180", "arm,primecell";
++ reg = <0x050000 0x1000>;
++ interrupts = <9 10>;
++ cd-gpios = <&v2m_sysreg 0 0>;
++ wp-gpios = <&v2m_sysreg 1 0>;
++ max-frequency = <12000000>;
++ vmmc-supply = <&v2m_fixed_3v3>;
++ clocks = <&v2m_clk24mhz>, <&smbclk>;
++ clock-names = "mclk", "apb_pclk";
++ };
++
++ kmi@060000 {
++ compatible = "arm,pl050", "arm,primecell";
++ reg = <0x060000 0x1000>;
++ interrupts = <12>;
++ clocks = <&v2m_clk24mhz>, <&smbclk>;
++ clock-names = "KMIREFCLK", "apb_pclk";
++ };
++
++ kmi@070000 {
++ compatible = "arm,pl050", "arm,primecell";
++ reg = <0x070000 0x1000>;
++ interrupts = <13>;
++ clocks = <&v2m_clk24mhz>, <&smbclk>;
++ clock-names = "KMIREFCLK", "apb_pclk";
++ };
++
++ v2m_serial0: uart@090000 {
++ compatible = "arm,pl011", "arm,primecell";
++ reg = <0x090000 0x1000>;
++ interrupts = <5>;
++ clocks = <&v2m_clk24mhz>, <&smbclk>;
++ clock-names = "uartclk", "apb_pclk";
++ };
++
++ v2m_serial1: uart@0a0000 {
++ compatible = "arm,pl011", "arm,primecell";
++ reg = <0x0a0000 0x1000>;
++ interrupts = <6>;
++ clocks = <&v2m_clk24mhz>, <&smbclk>;
++ clock-names = "uartclk", "apb_pclk";
++ };
++
++ v2m_serial2: uart@0b0000 {
++ compatible = "arm,pl011", "arm,primecell";
++ reg = <0x0b0000 0x1000>;
++ interrupts = <7>;
++ clocks = <&v2m_clk24mhz>, <&smbclk>;
++ clock-names = "uartclk", "apb_pclk";
++ };
++
++ v2m_serial3: uart@0c0000 {
++ compatible = "arm,pl011", "arm,primecell";
++ reg = <0x0c0000 0x1000>;
++ interrupts = <8>;
++ clocks = <&v2m_clk24mhz>, <&smbclk>;
++ clock-names = "uartclk", "apb_pclk";
++ };
++
++ wdt@0f0000 {
++ compatible = "arm,sp805", "arm,primecell";
++ reg = <0x0f0000 0x1000>;
++ interrupts = <0>;
++ clocks = <&v2m_refclk32khz>, <&smbclk>;
++ clock-names = "wdogclk", "apb_pclk";
++ };
++
++ v2m_timer01: timer@110000 {
++ compatible = "arm,sp804", "arm,primecell";
++ reg = <0x110000 0x1000>;
++ interrupts = <2>;
++ clocks = <&v2m_sysctl 0>, <&v2m_sysctl 1>, <&smbclk>;
++ clock-names = "timclken1", "timclken2", "apb_pclk";
++ };
++
++ v2m_timer23: timer@120000 {
++ compatible = "arm,sp804", "arm,primecell";
++ reg = <0x120000 0x1000>;
++ interrupts = <3>;
++ clocks = <&v2m_sysctl 2>, <&v2m_sysctl 3>, <&smbclk>;
++ clock-names = "timclken1", "timclken2", "apb_pclk";
++ };
++
++ rtc@170000 {
++ compatible = "arm,pl031", "arm,primecell";
++ reg = <0x170000 0x1000>;
++ interrupts = <4>;
++ clocks = <&smbclk>;
++ clock-names = "apb_pclk";
++ };
++
++ clcd@1f0000 {
++ compatible = "arm,pl111", "arm,primecell";
++ reg = <0x1f0000 0x1000>;
++ interrupts = <14>;
++ clocks = <&v2m_oscclk1>, <&smbclk>;
++ clock-names = "v2m:oscclk1", "apb_pclk";
++ mode = "VGA";
++ use_dma = <0>;
++ framebuffer = <0x18000000 0x00180000>;
++ };
++
++ virtio_block@0130000 {
++ compatible = "virtio,mmio";
++ reg = <0x130000 0x200>;
++ interrupts = <42>;
++ };
++
++ };
++
++ v2m_fixed_3v3: fixedregulator@0 {
++ compatible = "regulator-fixed";
++ regulator-name = "3V3";
++ regulator-min-microvolt = <3300000>;
++ regulator-max-microvolt = <3300000>;
++ regulator-always-on;
++ };
++
++ v2m_clk24mhz: clk24mhz {
++ compatible = "fixed-clock";
++ #clock-cells = <0>;
++ clock-frequency = <24000000>;
++ clock-output-names = "v2m:clk24mhz";
++ };
++
++ v2m_refclk1mhz: refclk1mhz {
++ compatible = "fixed-clock";
++ #clock-cells = <0>;
++ clock-frequency = <1000000>;
++ clock-output-names = "v2m:refclk1mhz";
++ };
++
++ v2m_refclk32khz: refclk32khz {
++ compatible = "fixed-clock";
++ #clock-cells = <0>;
++ clock-frequency = <32768>;
++ clock-output-names = "v2m:refclk32khz";
++ };
++
++ mcc {
++ compatible = "simple-bus";
++ arm,vexpress,config-bridge = <&v2m_sysreg>;
++
++ v2m_oscclk1: osc@1 {
++ /* CLCD clock */
++ compatible = "arm,vexpress-osc";
++ arm,vexpress-sysreg,func = <1 1>;
++ freq-range = <23750000 63500000>;
++ #clock-cells = <0>;
++ clock-output-names = "v2m:oscclk1";
++ };
++
++ muxfpga@0 {
++ compatible = "arm,vexpress-muxfpga";
++ arm,vexpress-sysreg,func = <7 0>;
++ };
++
++ shutdown@0 {
++ compatible = "arm,vexpress-shutdown";
++ arm,vexpress-sysreg,func = <8 0>;
++ };
++ };
++ };
+diff -Nur linux-3.14.36/arch/arm/boot/dts/rtsm_ve-v2p-ca15x1-ca7x1.dts linux-openelec/arch/arm/boot/dts/rtsm_ve-v2p-ca15x1-ca7x1.dts
+--- linux-3.14.36/arch/arm/boot/dts/rtsm_ve-v2p-ca15x1-ca7x1.dts 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/arch/arm/boot/dts/rtsm_ve-v2p-ca15x1-ca7x1.dts 2015-05-06 12:05:43.000000000 -0500
+@@ -0,0 +1,233 @@
++/*
++ * ARM Ltd. Fast Models
++ *
++ * Versatile Express (VE) system model
++ * ARMCortexA15x4CT
++ * ARMCortexA7x4CT
++ * RTSM_VE_Cortex_A15x1_A7x1.lisa
++ */
++
++/dts-v1/;
++
++/memreserve/ 0xff000000 0x01000000;
++
++/ {
++ model = "RTSM_VE_CortexA15x1-A7x1";
++ arm,vexpress,site = <0xf>;
++ compatible = "arm,rtsm_ve,cortex_a15x1_a7x1", "arm,vexpress";
++ interrupt-parent = <&gic>;
++ #address-cells = <2>;
++ #size-cells = <2>;
++
++ chosen { };
++
++ aliases {
++ serial0 = &v2m_serial0;
++ serial1 = &v2m_serial1;
++ serial2 = &v2m_serial2;
++ serial3 = &v2m_serial3;
++ };
++
++ clusters {
++ #address-cells = <1>;
++ #size-cells = <0>;
++
++ cluster0: cluster@0 {
++ reg = <0>;
++// freqs = <500000000 600000000 700000000 800000000 900000000 1000000000 1100000000 1200000000>;
++ cores {
++ #address-cells = <1>;
++ #size-cells = <0>;
++
++ core0: core@0 {
++ reg = <0>;
++ };
++
++ };
++ };
++
++ cluster1: cluster@1 {
++ reg = <1>;
++// freqs = <350000000 400000000 500000000 600000000 700000000 800000000 900000000 1000000000>;
++ cores {
++ #address-cells = <1>;
++ #size-cells = <0>;
++
++ core1: core@0 {
++ reg = <0>;
++ };
++
++ };
++ };
++ };
++
++ cpus {
++ #address-cells = <1>;
++ #size-cells = <0>;
++
++ cpu0: cpu@0 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a15";
++ reg = <0>;
++ cluster = <&cluster0>;
++ core = <&core0>;
++// clock-frequency = <1000000000>;
++ cci-control-port = <&cci_control1>;
++ };
++
++ cpu1: cpu@1 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a7";
++ reg = <0x100>;
++ cluster = <&cluster1>;
++ core = <&core1>;
++// clock-frequency = <800000000>;
++ cci-control-port = <&cci_control2>;
++ };
++ };
++
++ memory@80000000 {
++ device_type = "memory";
++ reg = <0 0x80000000 0 0x80000000>;
++ };
++
++ cci@2c090000 {
++ compatible = "arm,cci-400", "arm,cci";
++ #address-cells = <1>;
++ #size-cells = <1>;
++ reg = <0 0x2c090000 0 0x1000>;
++ ranges = <0x0 0x0 0x2c090000 0x10000>;
++
++ cci_control1: slave-if@4000 {
++ compatible = "arm,cci-400-ctrl-if";
++ interface-type = "ace";
++ reg = <0x4000 0x1000>;
++ };
++
++ cci_control2: slave-if@5000 {
++ compatible = "arm,cci-400-ctrl-if";
++ interface-type = "ace";
++ reg = <0x5000 0x1000>;
++ };
++ };
++
++ dcscb@60000000 {
++ compatible = "arm,rtsm,dcscb";
++ reg = <0 0x60000000 0 0x1000>;
++ };
++
++ gic: interrupt-controller@2c001000 {
++ compatible = "arm,cortex-a15-gic", "arm,cortex-a9-gic";
++ #interrupt-cells = <3>;
++ #address-cells = <0>;
++ interrupt-controller;
++ reg = <0 0x2c001000 0 0x1000>,
++ <0 0x2c002000 0 0x1000>,
++ <0 0x2c004000 0 0x2000>,
++ <0 0x2c006000 0 0x2000>;
++ interrupts = <1 9 0xf04>;
++ };
++
++ timer {
++ compatible = "arm,armv7-timer";
++ interrupts = <1 13 0xf08>,
++ <1 14 0xf08>,
++ <1 11 0xf08>,
++ <1 10 0xf08>;
++ };
++
++ dcc {
++ compatible = "arm,vexpress,config-bus";
++ arm,vexpress,config-bridge = <&v2m_sysreg>;
++
++ osc@0 {
++ /* ACLK clock to the AXI master port on the test chip */
++ compatible = "arm,vexpress-osc";
++ arm,vexpress-sysreg,func = <1 0>;
++ freq-range = <30000000 50000000>;
++ #clock-cells = <0>;
++ clock-output-names = "extsaxiclk";
++ };
++
++ oscclk1: osc@1 {
++ /* Reference clock for the CLCD */
++ compatible = "arm,vexpress-osc";
++ arm,vexpress-sysreg,func = <1 1>;
++ freq-range = <10000000 80000000>;
++ #clock-cells = <0>;
++ clock-output-names = "clcdclk";
++ };
++
++ smbclk: oscclk2: osc@2 {
++ /* Reference clock for the test chip internal PLLs */
++ compatible = "arm,vexpress-osc";
++ arm,vexpress-sysreg,func = <1 2>;
++ freq-range = <33000000 100000000>;
++ #clock-cells = <0>;
++ clock-output-names = "tcrefclk";
++ };
++ };
++
++ smb {
++ compatible = "simple-bus";
++
++ #address-cells = <2>;
++ #size-cells = <1>;
++ ranges = <0 0 0 0x08000000 0x04000000>,
++ <1 0 0 0x14000000 0x04000000>,
++ <2 0 0 0x18000000 0x04000000>,
++ <3 0 0 0x1c000000 0x04000000>,
++ <4 0 0 0x0c000000 0x04000000>,
++ <5 0 0 0x10000000 0x04000000>;
++
++ #interrupt-cells = <1>;
++ interrupt-map-mask = <0 0 63>;
++ interrupt-map = <0 0 0 &gic 0 0 4>,
++ <0 0 1 &gic 0 1 4>,
++ <0 0 2 &gic 0 2 4>,
++ <0 0 3 &gic 0 3 4>,
++ <0 0 4 &gic 0 4 4>,
++ <0 0 5 &gic 0 5 4>,
++ <0 0 6 &gic 0 6 4>,
++ <0 0 7 &gic 0 7 4>,
++ <0 0 8 &gic 0 8 4>,
++ <0 0 9 &gic 0 9 4>,
++ <0 0 10 &gic 0 10 4>,
++ <0 0 11 &gic 0 11 4>,
++ <0 0 12 &gic 0 12 4>,
++ <0 0 13 &gic 0 13 4>,
++ <0 0 14 &gic 0 14 4>,
++ <0 0 15 &gic 0 15 4>,
++ <0 0 16 &gic 0 16 4>,
++ <0 0 17 &gic 0 17 4>,
++ <0 0 18 &gic 0 18 4>,
++ <0 0 19 &gic 0 19 4>,
++ <0 0 20 &gic 0 20 4>,
++ <0 0 21 &gic 0 21 4>,
++ <0 0 22 &gic 0 22 4>,
++ <0 0 23 &gic 0 23 4>,
++ <0 0 24 &gic 0 24 4>,
++ <0 0 25 &gic 0 25 4>,
++ <0 0 26 &gic 0 26 4>,
++ <0 0 27 &gic 0 27 4>,
++ <0 0 28 &gic 0 28 4>,
++ <0 0 29 &gic 0 29 4>,
++ <0 0 30 &gic 0 30 4>,
++ <0 0 31 &gic 0 31 4>,
++ <0 0 32 &gic 0 32 4>,
++ <0 0 33 &gic 0 33 4>,
++ <0 0 34 &gic 0 34 4>,
++ <0 0 35 &gic 0 35 4>,
++ <0 0 36 &gic 0 36 4>,
++ <0 0 37 &gic 0 37 4>,
++ <0 0 38 &gic 0 38 4>,
++ <0 0 39 &gic 0 39 4>,
++ <0 0 40 &gic 0 40 4>,
++ <0 0 41 &gic 0 41 4>,
++ <0 0 42 &gic 0 42 4>;
++
++ /include/ "rtsm_ve-motherboard.dtsi"
++ };
++};
++
++/include/ "clcd-panels.dtsi"
+diff -Nur linux-3.14.36/arch/arm/boot/dts/rtsm_ve-v2p-ca15x4-ca7x4.dts linux-openelec/arch/arm/boot/dts/rtsm_ve-v2p-ca15x4-ca7x4.dts
+--- linux-3.14.36/arch/arm/boot/dts/rtsm_ve-v2p-ca15x4-ca7x4.dts 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/arch/arm/boot/dts/rtsm_ve-v2p-ca15x4-ca7x4.dts 2015-05-06 12:05:43.000000000 -0500
+@@ -0,0 +1,317 @@
++/*
++ * ARM Ltd. Fast Models
++ *
++ * Versatile Express (VE) system model
++ * ARMCortexA15x4CT
++ * ARMCortexA7x4CT
++ * RTSM_VE_Cortex_A15x4_A7x4.lisa
++ */
++
++/dts-v1/;
++
++/memreserve/ 0xff000000 0x01000000;
++
++/ {
++ model = "RTSM_VE_CortexA15x4-A7x4";
++ arm,vexpress,site = <0xf>;
++ compatible = "arm,rtsm_ve,cortex_a15x4_a7x4", "arm,vexpress";
++ interrupt-parent = <&gic>;
++ #address-cells = <2>;
++ #size-cells = <2>;
++
++ chosen { };
++
++ aliases {
++ serial0 = &v2m_serial0;
++ serial1 = &v2m_serial1;
++ serial2 = &v2m_serial2;
++ serial3 = &v2m_serial3;
++ };
++
++ clusters {
++ #address-cells = <1>;
++ #size-cells = <0>;
++
++ cluster0: cluster@0 {
++ reg = <0>;
++// freqs = <500000000 600000000 700000000 800000000 900000000 1000000000 1100000000 1200000000>;
++ cores {
++ #address-cells = <1>;
++ #size-cells = <0>;
++
++ core0: core@0 {
++ reg = <0>;
++ };
++
++ core1: core@1 {
++ reg = <1>;
++ };
++
++ core2: core@2 {
++ reg = <2>;
++ };
++
++ core3: core@3 {
++ reg = <3>;
++ };
++
++ };
++ };
++
++ cluster1: cluster@1 {
++ reg = <1>;
++// freqs = <350000000 400000000 500000000 600000000 700000000 800000000 900000000 1000000000>;
++ cores {
++ #address-cells = <1>;
++ #size-cells = <0>;
++
++ core4: core@0 {
++ reg = <0>;
++ };
++
++ core5: core@1 {
++ reg = <1>;
++ };
++
++ core6: core@2 {
++ reg = <2>;
++ };
++
++ core7: core@3 {
++ reg = <3>;
++ };
++
++ };
++ };
++ };
++
++ cpus {
++ #address-cells = <1>;
++ #size-cells = <0>;
++
++ cpu0: cpu@0 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a15";
++ reg = <0>;
++ cluster = <&cluster0>;
++ core = <&core0>;
++// clock-frequency = <1000000000>;
++ cci-control-port = <&cci_control1>;
++ };
++
++ cpu1: cpu@1 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a15";
++ reg = <1>;
++ cluster = <&cluster0>;
++ core = <&core1>;
++// clock-frequency = <1000000000>;
++ cci-control-port = <&cci_control1>;
++ };
++
++ cpu2: cpu@2 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a15";
++ reg = <2>;
++ cluster = <&cluster0>;
++ core = <&core2>;
++// clock-frequency = <1000000000>;
++ cci-control-port = <&cci_control1>;
++ };
++
++ cpu3: cpu@3 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a15";
++ reg = <3>;
++ cluster = <&cluster0>;
++ core = <&core3>;
++// clock-frequency = <1000000000>;
++ cci-control-port = <&cci_control1>;
++ };
++
++ cpu4: cpu@4 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a7";
++ reg = <0x100>;
++ cluster = <&cluster1>;
++ core = <&core4>;
++// clock-frequency = <800000000>;
++ cci-control-port = <&cci_control2>;
++ };
++
++ cpu5: cpu@5 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a7";
++ reg = <0x101>;
++ cluster = <&cluster1>;
++ core = <&core5>;
++// clock-frequency = <800000000>;
++ cci-control-port = <&cci_control2>;
++ };
++
++ cpu6: cpu@6 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a7";
++ reg = <0x102>;
++ cluster = <&cluster1>;
++ core = <&core6>;
++// clock-frequency = <800000000>;
++ cci-control-port = <&cci_control2>;
++ };
++
++ cpu7: cpu@7 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a7";
++ reg = <0x103>;
++ cluster = <&cluster1>;
++ core = <&core7>;
++// clock-frequency = <800000000>;
++ cci-control-port = <&cci_control2>;
++ };
++ };
++
++ memory@80000000 {
++ device_type = "memory";
++ reg = <0 0x80000000 0 0x80000000>;
++ };
++
++ cci@2c090000 {
++ compatible = "arm,cci-400", "arm,cci";
++ #address-cells = <1>;
++ #size-cells = <1>;
++ reg = <0 0x2c090000 0 0x1000>;
++ ranges = <0x0 0x0 0x2c090000 0x10000>;
++
++ cci_control1: slave-if@4000 {
++ compatible = "arm,cci-400-ctrl-if";
++ interface-type = "ace";
++ reg = <0x4000 0x1000>;
++ };
++
++ cci_control2: slave-if@5000 {
++ compatible = "arm,cci-400-ctrl-if";
++ interface-type = "ace";
++ reg = <0x5000 0x1000>;
++ };
++ };
++
++ dcscb@60000000 {
++ compatible = "arm,rtsm,dcscb";
++ reg = <0 0x60000000 0 0x1000>;
++ };
++
++ gic: interrupt-controller@2c001000 {
++ compatible = "arm,cortex-a15-gic", "arm,cortex-a9-gic";
++ #interrupt-cells = <3>;
++ #address-cells = <0>;
++ interrupt-controller;
++ reg = <0 0x2c001000 0 0x1000>,
++ <0 0x2c002000 0 0x1000>,
++ <0 0x2c004000 0 0x2000>,
++ <0 0x2c006000 0 0x2000>;
++ interrupts = <1 9 0xf04>;
++ };
++
++ timer {
++ compatible = "arm,armv7-timer";
++ interrupts = <1 13 0xf08>,
++ <1 14 0xf08>,
++ <1 11 0xf08>,
++ <1 10 0xf08>;
++ };
++
++ dcc {
++ compatible = "arm,vexpress,config-bus";
++ arm,vexpress,config-bridge = <&v2m_sysreg>;
++
++ osc@0 {
++ /* ACLK clock to the AXI master port on the test chip */
++ compatible = "arm,vexpress-osc";
++ arm,vexpress-sysreg,func = <1 0>;
++ freq-range = <30000000 50000000>;
++ #clock-cells = <0>;
++ clock-output-names = "extsaxiclk";
++ };
++
++ oscclk1: osc@1 {
++ /* Reference clock for the CLCD */
++ compatible = "arm,vexpress-osc";
++ arm,vexpress-sysreg,func = <1 1>;
++ freq-range = <10000000 80000000>;
++ #clock-cells = <0>;
++ clock-output-names = "clcdclk";
++ };
++
++ smbclk: oscclk2: osc@2 {
++ /* Reference clock for the test chip internal PLLs */
++ compatible = "arm,vexpress-osc";
++ arm,vexpress-sysreg,func = <1 2>;
++ freq-range = <33000000 100000000>;
++ #clock-cells = <0>;
++ clock-output-names = "tcrefclk";
++ };
++ };
++
++ smb {
++ compatible = "simple-bus";
++
++ #address-cells = <2>;
++ #size-cells = <1>;
++ ranges = <0 0 0 0x08000000 0x04000000>,
++ <1 0 0 0x14000000 0x04000000>,
++ <2 0 0 0x18000000 0x04000000>,
++ <3 0 0 0x1c000000 0x04000000>,
++ <4 0 0 0x0c000000 0x04000000>,
++ <5 0 0 0x10000000 0x04000000>;
++
++ #interrupt-cells = <1>;
++ interrupt-map-mask = <0 0 63>;
++ interrupt-map = <0 0 0 &gic 0 0 4>,
++ <0 0 1 &gic 0 1 4>,
++ <0 0 2 &gic 0 2 4>,
++ <0 0 3 &gic 0 3 4>,
++ <0 0 4 &gic 0 4 4>,
++ <0 0 5 &gic 0 5 4>,
++ <0 0 6 &gic 0 6 4>,
++ <0 0 7 &gic 0 7 4>,
++ <0 0 8 &gic 0 8 4>,
++ <0 0 9 &gic 0 9 4>,
++ <0 0 10 &gic 0 10 4>,
++ <0 0 11 &gic 0 11 4>,
++ <0 0 12 &gic 0 12 4>,
++ <0 0 13 &gic 0 13 4>,
++ <0 0 14 &gic 0 14 4>,
++ <0 0 15 &gic 0 15 4>,
++ <0 0 16 &gic 0 16 4>,
++ <0 0 17 &gic 0 17 4>,
++ <0 0 18 &gic 0 18 4>,
++ <0 0 19 &gic 0 19 4>,
++ <0 0 20 &gic 0 20 4>,
++ <0 0 21 &gic 0 21 4>,
++ <0 0 22 &gic 0 22 4>,
++ <0 0 23 &gic 0 23 4>,
++ <0 0 24 &gic 0 24 4>,
++ <0 0 25 &gic 0 25 4>,
++ <0 0 26 &gic 0 26 4>,
++ <0 0 27 &gic 0 27 4>,
++ <0 0 28 &gic 0 28 4>,
++ <0 0 29 &gic 0 29 4>,
++ <0 0 30 &gic 0 30 4>,
++ <0 0 31 &gic 0 31 4>,
++ <0 0 32 &gic 0 32 4>,
++ <0 0 33 &gic 0 33 4>,
++ <0 0 34 &gic 0 34 4>,
++ <0 0 35 &gic 0 35 4>,
++ <0 0 36 &gic 0 36 4>,
++ <0 0 37 &gic 0 37 4>,
++ <0 0 38 &gic 0 38 4>,
++ <0 0 39 &gic 0 39 4>,
++ <0 0 40 &gic 0 40 4>,
++ <0 0 41 &gic 0 41 4>,
++ <0 0 42 &gic 0 42 4>;
++
++ /include/ "rtsm_ve-motherboard.dtsi"
++ };
++};
++
++/include/ "clcd-panels.dtsi"
+diff -Nur linux-3.14.36/arch/arm/boot/dts/vexpress-v2m.dtsi linux-openelec/arch/arm/boot/dts/vexpress-v2m.dtsi
+--- linux-3.14.36/arch/arm/boot/dts/vexpress-v2m.dtsi 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/boot/dts/vexpress-v2m.dtsi 2015-05-06 12:05:43.000000000 -0500
+@@ -227,6 +227,7 @@
+ };
+
+ clcd@1f000 {
++ status = "disabled";
+ compatible = "arm,pl111", "arm,primecell";
+ reg = <0x1f000 0x1000>;
+ interrupts = <14>;
+diff -Nur linux-3.14.36/arch/arm/boot/dts/vexpress-v2m-rs1.dtsi linux-openelec/arch/arm/boot/dts/vexpress-v2m-rs1.dtsi
+--- linux-3.14.36/arch/arm/boot/dts/vexpress-v2m-rs1.dtsi 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/boot/dts/vexpress-v2m-rs1.dtsi 2015-05-06 12:05:43.000000000 -0500
+@@ -228,6 +228,7 @@
+ };
+
+ clcd@1f0000 {
++ status = "disabled";
+ compatible = "arm,pl111", "arm,primecell";
+ reg = <0x1f0000 0x1000>;
+ interrupts = <14>;
+diff -Nur linux-3.14.36/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts linux-openelec/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts
+--- linux-3.14.36/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts 2015-05-06 12:05:43.000000000 -0500
+@@ -9,6 +9,8 @@
+
+ /dts-v1/;
+
++/memreserve/ 0xff000000 0x01000000;
++
+ / {
+ model = "V2P-CA15_CA7";
+ arm,hbi = <0x249>;
+@@ -29,29 +31,60 @@
+ i2c1 = &v2m_i2c_pcie;
+ };
+
+- cpus {
++ clusters {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+- cpu0: cpu@0 {
+- device_type = "cpu";
+- compatible = "arm,cortex-a15";
++ cluster0: cluster@0 {
+ reg = <0>;
+- cci-control-port = <&cci_control1>;
++ cores {
++ #address-cells = <1>;
++ #size-cells = <0>;
++
++ core0: core@0 {
++ reg = <0>;
++ };
++
++ core1: core@1 {
++ reg = <1>;
++ };
++
++ };
+ };
+
+- cpu1: cpu@1 {
+- device_type = "cpu";
+- compatible = "arm,cortex-a15";
++ cluster1: cluster@1 {
+ reg = <1>;
+- cci-control-port = <&cci_control1>;
++ cores {
++ #address-cells = <1>;
++ #size-cells = <0>;
++
++ core2: core@0 {
++ reg = <0>;
++ };
++
++ core3: core@1 {
++ reg = <1>;
++ };
++
++ core4: core@2 {
++ reg = <2>;
++ };
++ };
+ };
++ };
++
++ cpus {
++ #address-cells = <1>;
++ #size-cells = <0>;
+
+ cpu2: cpu@2 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a7";
+ reg = <0x100>;
+ cci-control-port = <&cci_control2>;
++ cluster = <&cluster1>;
++ core = <&core2>;
++ clock-frequency = <800000000>;
+ };
+
+ cpu3: cpu@3 {
+@@ -59,6 +92,9 @@
+ compatible = "arm,cortex-a7";
+ reg = <0x101>;
+ cci-control-port = <&cci_control2>;
++ cluster = <&cluster1>;
++ core = <&core3>;
++ clock-frequency = <800000000>;
+ };
+
+ cpu4: cpu@4 {
+@@ -66,12 +102,35 @@
+ compatible = "arm,cortex-a7";
+ reg = <0x102>;
+ cci-control-port = <&cci_control2>;
++ cluster = <&cluster1>;
++ core = <&core4>;
++ clock-frequency = <800000000>;
++ };
++
++ cpu0: cpu@0 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a15";
++ reg = <0>;
++ cci-control-port = <&cci_control1>;
++ cluster = <&cluster0>;
++ core = <&core0>;
++ clock-frequency = <1000000000>;
++ };
++
++ cpu1: cpu@1 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a15";
++ reg = <1>;
++ cci-control-port = <&cci_control1>;
++ cluster = <&cluster0>;
++ core = <&core1>;
++ clock-frequency = <1000000000>;
+ };
+ };
+
+ memory@80000000 {
+ device_type = "memory";
+- reg = <0 0x80000000 0 0x40000000>;
++ reg = <0 0x80000000 0 0x80000000>;
+ };
+
+ wdt@2a490000 {
+@@ -86,6 +145,8 @@
+ compatible = "arm,hdlcd";
+ reg = <0 0x2b000000 0 0x1000>;
+ interrupts = <0 85 4>;
++ mode = "1024x768-16@60";
++ framebuffer = <0 0xff000000 0 0x01000000>;
+ clocks = <&oscclk5>;
+ clock-names = "pxlclk";
+ };
+@@ -127,6 +188,16 @@
+ interface-type = "ace";
+ reg = <0x5000 0x1000>;
+ };
++
++ pmu@9000 {
++ compatible = "arm,cci-400-pmu";
++ reg = <0x9000 0x5000>;
++ interrupts = <0 101 4>,
++ <0 102 4>,
++ <0 103 4>,
++ <0 104 4>,
++ <0 105 4>;
++ };
+ };
+
+ memory-controller@7ffd0000 {
+@@ -164,12 +235,21 @@
+ <1 10 0xf08>;
+ };
+
+- pmu {
++ pmu_a15 {
+ compatible = "arm,cortex-a15-pmu";
++ cluster = <&cluster0>;
+ interrupts = <0 68 4>,
+ <0 69 4>;
+ };
+
++ pmu_a7 {
++ compatible = "arm,cortex-a7-pmu";
++ cluster = <&cluster1>;
++ interrupts = <0 128 4>,
++ <0 129 4>,
++ <0 130 4>;
++ };
++
+ oscclk6a: oscclk6a {
+ /* Reference 24MHz clock */
+ compatible = "fixed-clock";
+@@ -178,6 +258,19 @@
+ clock-output-names = "oscclk6a";
+ };
+
++/* PSCI requires support from firmware and is not present in the normal TC2
++ * distribution, so this node is commented out by default...
++
++ psci {
++ compatible = "arm,psci";
++ method = "smc";
++ cpu_suspend = <0x80100001>;
++ cpu_off = <0x80100002>;
++ cpu_on = <0x80100003>;
++ migrate = <0x80100004>;
++ };
++*/
++
+ dcc {
+ compatible = "arm,vexpress,config-bus";
+ arm,vexpress,config-bridge = <&v2m_sysreg>;
+diff -Nur linux-3.14.36/arch/arm/boot/dts/vexpress-v2p-ca15-tc1.dts linux-openelec/arch/arm/boot/dts/vexpress-v2p-ca15-tc1.dts
+--- linux-3.14.36/arch/arm/boot/dts/vexpress-v2p-ca15-tc1.dts 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/boot/dts/vexpress-v2p-ca15-tc1.dts 2015-05-06 12:05:43.000000000 -0500
+@@ -9,6 +9,8 @@
+
+ /dts-v1/;
+
++/memreserve/ 0xbf000000 0x01000000;
++
+ / {
+ model = "V2P-CA15";
+ arm,hbi = <0x237>;
+@@ -57,6 +59,8 @@
+ interrupts = <0 85 4>;
+ clocks = <&oscclk5>;
+ clock-names = "pxlclk";
++ mode = "1024x768-16@60";
++ framebuffer = <0 0xbf000000 0 0x01000000>;
+ };
+
+ memory-controller@2b0a0000 {
+diff -Nur linux-3.14.36/arch/arm/boot/dts/vexpress-v2p-ca5s.dts linux-openelec/arch/arm/boot/dts/vexpress-v2p-ca5s.dts
+--- linux-3.14.36/arch/arm/boot/dts/vexpress-v2p-ca5s.dts 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/boot/dts/vexpress-v2p-ca5s.dts 2015-05-06 12:05:43.000000000 -0500
+@@ -9,6 +9,8 @@
+
+ /dts-v1/;
+
++/memreserve/ 0xbf000000 0x01000000;
++
+ / {
+ model = "V2P-CA5s";
+ arm,hbi = <0x225>;
+@@ -59,6 +61,8 @@
+ interrupts = <0 85 4>;
+ clocks = <&oscclk3>;
+ clock-names = "pxlclk";
++ mode = "640x480-16@60";
++ framebuffer = <0xbf000000 0x01000000>;
+ };
+
+ memory-controller@2a150000 {
+diff -Nur linux-3.14.36/arch/arm/boot/dts/vexpress-v2p-ca9.dts linux-openelec/arch/arm/boot/dts/vexpress-v2p-ca9.dts
+--- linux-3.14.36/arch/arm/boot/dts/vexpress-v2p-ca9.dts 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/boot/dts/vexpress-v2p-ca9.dts 2015-05-06 12:05:43.000000000 -0500
+@@ -9,6 +9,8 @@
+
+ /dts-v1/;
+
++/include/ "clcd-panels.dtsi"
++
+ / {
+ model = "V2P-CA9";
+ arm,hbi = <0x191>;
+@@ -73,6 +75,8 @@
+ interrupts = <0 44 4>;
+ clocks = <&oscclk1>, <&oscclk2>;
+ clock-names = "clcdclk", "apb_pclk";
++ mode = "XVGA";
++ use_dma = <1>;
+ };
+
+ memory-controller@100e0000 {
+diff -Nur linux-3.14.36/arch/arm/boot/dts/vf610.dtsi linux-openelec/arch/arm/boot/dts/vf610.dtsi
+--- linux-3.14.36/arch/arm/boot/dts/vf610.dtsi 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/boot/dts/vf610.dtsi 2015-05-06 12:05:43.000000000 -0500
+@@ -44,11 +44,13 @@
+
+ sxosc {
+ compatible = "fixed-clock";
++ #clock-cells = <0>;
+ clock-frequency = <32768>;
+ };
+
+ fxosc {
+ compatible = "fixed-clock";
++ #clock-cells = <0>;
+ clock-frequency = <24000000>;
+ };
+ };
+diff -Nur linux-3.14.36/arch/arm/boot/dts/vf610-twr.dts linux-openelec/arch/arm/boot/dts/vf610-twr.dts
+--- linux-3.14.36/arch/arm/boot/dts/vf610-twr.dts 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/boot/dts/vf610-twr.dts 2015-05-06 12:05:43.000000000 -0500
+@@ -25,11 +25,13 @@
+ clocks {
+ audio_ext {
+ compatible = "fixed-clock";
++ #clock-cells = <0>;
+ clock-frequency = <24576000>;
+ };
+
+ enet_ext {
+ compatible = "fixed-clock";
++ #clock-cells = <0>;
+ clock-frequency = <50000000>;
+ };
+ };
+diff -Nur linux-3.14.36/arch/arm/common/Makefile linux-openelec/arch/arm/common/Makefile
+--- linux-3.14.36/arch/arm/common/Makefile 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/common/Makefile 2015-05-06 12:05:43.000000000 -0500
+@@ -13,6 +13,7 @@
+ obj-$(CONFIG_PCI_HOST_ITE8152) += it8152.o
+ obj-$(CONFIG_ARM_TIMER_SP804) += timer-sp.o
+ obj-$(CONFIG_MCPM) += mcpm_head.o mcpm_entry.o mcpm_platsmp.o vlock.o
++CFLAGS_REMOVE_mcpm_entry.o = -pg
+ AFLAGS_mcpm_head.o := -march=armv7-a
+ AFLAGS_vlock.o := -march=armv7-a
+ obj-$(CONFIG_TI_PRIV_EDMA) += edma.o
+diff -Nur linux-3.14.36/arch/arm/configs/imx_v6_v7_defconfig linux-openelec/arch/arm/configs/imx_v6_v7_defconfig
+--- linux-3.14.36/arch/arm/configs/imx_v6_v7_defconfig 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/configs/imx_v6_v7_defconfig 2015-05-06 12:05:43.000000000 -0500
+@@ -45,6 +45,9 @@
+ CONFIG_AEABI=y
+ CONFIG_HIGHMEM=y
+ CONFIG_CMDLINE="noinitrd console=ttymxc0,115200"
++CONFIG_CPU_FREQ=y
++CONFIG_ARM_IMX6Q_CPUFREQ=y
++CONFIG_CPU_IDLE=y
+ CONFIG_VFP=y
+ CONFIG_NEON=y
+ CONFIG_BINFMT_MISC=m
+@@ -70,6 +73,8 @@
+ CONFIG_DEVTMPFS=y
+ CONFIG_DEVTMPFS_MOUNT=y
+ # CONFIG_STANDALONE is not set
++CONFIG_CMA=y
++CONFIG_CMA_SIZE_MBYTES=256
+ CONFIG_IMX_WEIM=y
+ CONFIG_CONNECTOR=y
+ CONFIG_MTD=y
+@@ -154,7 +159,12 @@
+ CONFIG_SPI_IMX=y
+ CONFIG_GPIO_SYSFS=y
+ CONFIG_GPIO_MC9S08DZ60=y
++CONFIG_GPIO_PCA953X=y
+ # CONFIG_HWMON is not set
++CONFIG_THERMAL=y
++CONFIG_CPU_THERMAL=y
++CONFIG_IMX_THERMAL=y
++CONFIG_DEVICE_THERMAL=y
+ CONFIG_WATCHDOG=y
+ CONFIG_IMX2_WDT=y
+ CONFIG_MFD_DA9052_I2C=y
+@@ -170,32 +180,44 @@
+ CONFIG_REGULATOR_PFUZE100=y
+ CONFIG_MEDIA_SUPPORT=y
+ CONFIG_MEDIA_CAMERA_SUPPORT=y
++CONFIG_MEDIA_USB_SUPPORT=y
++CONFIG_USB_VIDEO_CLASS=m
+ CONFIG_MEDIA_RC_SUPPORT=y
+ CONFIG_RC_DEVICES=y
+ CONFIG_IR_GPIO_CIR=y
+ CONFIG_V4L_PLATFORM_DRIVERS=y
++CONFIG_VIDEO_MXC_OUTPUT=y
++CONFIG_VIDEO_MXC_IPU_OUTPUT=y
+ CONFIG_SOC_CAMERA=y
+ CONFIG_VIDEO_MX3=y
+ CONFIG_V4L_MEM2MEM_DRIVERS=y
+ CONFIG_VIDEO_CODA=y
+ CONFIG_SOC_CAMERA_OV2640=y
+ CONFIG_DRM=y
++CONFIG_DRM_VIVANTE=y
+ CONFIG_BACKLIGHT_LCD_SUPPORT=y
+ CONFIG_LCD_CLASS_DEVICE=y
+ CONFIG_LCD_L4F00242T03=y
+ CONFIG_LCD_PLATFORM=y
+ CONFIG_BACKLIGHT_CLASS_DEVICE=y
+ CONFIG_BACKLIGHT_PWM=y
++CONFIG_FB_MXC_SYNC_PANEL=y
++CONFIG_FB_MXC_LDB=y
++CONFIG_FB_MXC_HDMI=y
++CONFIG_FB_MXC_MIPI_DSI=y
++CONFIG_FB_MXC_TRULY_WVGA_SYNC_PANEL=y
+ CONFIG_FRAMEBUFFER_CONSOLE=y
+ CONFIG_LOGO=y
+ CONFIG_SOUND=y
+ CONFIG_SND=y
++CONFIG_SND_USB_AUDIO=m
+ CONFIG_SND_SOC=y
+ CONFIG_SND_IMX_SOC=y
+ CONFIG_SND_SOC_PHYCORE_AC97=y
+ CONFIG_SND_SOC_EUKREA_TLV320=y
+ CONFIG_SND_SOC_IMX_WM8962=y
+ CONFIG_SND_SOC_IMX_SGTL5000=y
++CONFIG_SND_SOC_IMX_CS42888=y
+ CONFIG_SND_SOC_IMX_SPDIF=y
+ CONFIG_SND_SOC_IMX_MC13783=y
+ CONFIG_USB=y
+@@ -208,12 +230,18 @@
+ CONFIG_NOP_USB_XCEIV=y
+ CONFIG_USB_MXS_PHY=y
+ CONFIG_USB_GADGET=y
++CONFIG_USB_ZERO=m
+ CONFIG_USB_ETH=m
+ CONFIG_USB_MASS_STORAGE=m
++CONFIG_USB_G_SERIAL=m
+ CONFIG_MMC=y
++CONFIG_MMC_UNSAFE_RESUME=y
+ CONFIG_MMC_SDHCI=y
+ CONFIG_MMC_SDHCI_PLTFM=y
+ CONFIG_MMC_SDHCI_ESDHC_IMX=y
++CONFIG_MXC_IPU=y
++CONFIG_MXC_GPU_VIV=y
++CONFIG_MXC_ASRC=y
+ CONFIG_NEW_LEDS=y
+ CONFIG_LEDS_CLASS=y
+ CONFIG_LEDS_GPIO=y
+@@ -229,16 +257,10 @@
+ CONFIG_RTC_DRV_MXC=y
+ CONFIG_RTC_DRV_SNVS=y
+ CONFIG_DMADEVICES=y
++CONFIG_MXC_PXP_V2=y
+ CONFIG_IMX_SDMA=y
+ CONFIG_MXS_DMA=y
+ CONFIG_STAGING=y
+-CONFIG_DRM_IMX=y
+-CONFIG_DRM_IMX_FB_HELPER=y
+-CONFIG_DRM_IMX_PARALLEL_DISPLAY=y
+-CONFIG_DRM_IMX_TVE=y
+-CONFIG_DRM_IMX_LDB=y
+-CONFIG_DRM_IMX_IPUV3_CORE=y
+-CONFIG_DRM_IMX_IPUV3=y
+ CONFIG_COMMON_CLK_DEBUG=y
+ # CONFIG_IOMMU_SUPPORT is not set
+ CONFIG_PWM=y
+diff -Nur linux-3.14.36/arch/arm/configs/imx_v7_cbi_hb_base_defconfig linux-openelec/arch/arm/configs/imx_v7_cbi_hb_base_defconfig
+--- linux-3.14.36/arch/arm/configs/imx_v7_cbi_hb_base_defconfig 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/arch/arm/configs/imx_v7_cbi_hb_base_defconfig 2015-05-06 12:05:43.000000000 -0500
+@@ -0,0 +1,367 @@
++# CONFIG_LOCALVERSION_AUTO is not set
++CONFIG_KERNEL_LZO=y
++CONFIG_SYSVIPC=y
++CONFIG_FHANDLE=y
++CONFIG_NO_HZ=y
++CONFIG_HIGH_RES_TIMERS=y
++CONFIG_LOG_BUF_SHIFT=18
++CONFIG_CGROUPS=y
++CONFIG_RELAY=y
++CONFIG_BLK_DEV_INITRD=y
++CONFIG_EXPERT=y
++CONFIG_PERF_EVENTS=y
++CONFIG_CLEANCACHE=y
++CONFIG_FRONTSWAP=y
++CONFIG_ZSWAP=y
++CONFIG_ZSMALLOC=y
++# CONFIG_SLUB_DEBUG is not set
++# CONFIG_COMPAT_BRK is not set
++CONFIG_MODULES=y
++CONFIG_MODULE_UNLOAD=y
++CONFIG_MODVERSIONS=y
++CONFIG_MODULE_SRCVERSION_ALL=y
++# CONFIG_BLK_DEV_BSG is not set
++CONFIG_GPIO_PCA953X=y
++CONFIG_ARCH_MXC=y
++CONFIG_MXC_DEBUG_BOARD=y
++CONFIG_SOC_IMX6Q=y
++CONFIG_SOC_IMX6SL=y
++# CONFIG_SWP_EMULATE is not set
++CONFIG_PCI=y
++CONFIG_PCIE_DW=y
++CONFIG_PCI_IMX6=y
++CONFIG_SMP=y
++CONFIG_VMSPLIT_2G=y
++CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_AEABI=y
++# CONFIG_OABI_COMPAT is not set
++CONFIG_HIGHMEM=y
++CONFIG_CMDLINE="noinitrd console=ttymxc0,115200"
++CONFIG_CPU_FREQ=y
++CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE=y
++CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
++CONFIG_CPU_FREQ_GOV_POWERSAVE=y
++CONFIG_CPU_FREQ_GOV_USERSPACE=y
++CONFIG_CPU_FREQ_GOV_ONDEMAND=y
++CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
++CONFIG_ARM_IMX6_CPUFREQ=y
++CONFIG_CPU_IDLE=y
++CONFIG_VFP=y
++CONFIG_VFPv3=y
++CONFIG_NEON=y
++CONFIG_KERNEL_MODE_NEON=y
++CONFIG_BINFMT_MISC=m
++CONFIG_PM_RUNTIME=y
++CONFIG_PM_DEBUG=y
++CONFIG_PM_TEST_SUSPEND=y
++CONFIG_IOSCHED_BFQ=y
++CONFIG_CGROUP_BFQIO=y
++CONFIG_DEFAULT_BFQ=y
++CONFIG_DEFAULT_IOSCHED="bfq"
++CONFIG_NET=y
++CONFIG_PACKET=y
++CONFIG_UNIX=y
++CONFIG_INET=y
++CONFIG_IP_PNP=y
++CONFIG_IP_PNP_DHCP=y
++# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
++# CONFIG_INET_XFRM_MODE_TUNNEL is not set
++# CONFIG_INET_XFRM_MODE_BEET is not set
++# CONFIG_INET_LRO is not set
++CONFIG_IPV6=y
++CONFIG_NETFILTER=y
++CONFIG_VLAN_8021Q=y
++CONFIG_WIRELESS=y
++CONFIG_WIRELESS_EXT=y
++CONFIG_WEXT_CORE=y
++CONFIG_WEXT_PROC=y
++CONFIG_WEXT_SPY=y
++CONFIG_WEXT_PRIV=y
++CONFIG_CFG80211=y
++CONFIG_ETHERNET=y
++# CONFIG_NET_VENDOR_BROADCOM is not set
++# CONFIG_NET_VENDOR_CIRRUS is not set
++# CONFIG_NET_VENDOR_FARADAY
++# CONFIG_NET_VENDOR_INTEL
++# CONFIG_NET_VENDOR_I825XX
++# CONFIG_NET_VENDOR_MARVELL
++# CONFIG_NET_VENDOR_MICROCHIP
++# CONFIG_NET_VENDOR_MICROCHIP=y
++# CONFIG_ENC28J60 is not set
++# CONFIG_NET_VENDOR_NATSEMI=y
++# CONFIG_NET_VENDOR_8390=y
++# CONFIG_AX88796 is not set
++# CONFIG_ETHOC is not set
++# CONFIG_SH_ETH is not set
++# CONFIG_NET_VENDOR_SEEQ=y
++# CONFIG_NET_VENDOR_SMSC=y
++# CONFIG_SMC91X is not set
++# CONFIG_SMC911X is not set
++# CONFIG_SMSC911X is not set
++# CONFIG_NET_VENDOR_STMICRO=y
++# CONFIG_STMMAC_ETH is not set
++# CONFIG_NET_VENDOR_VIA=y
++# CONFIG_VIA_VELOCITY is not set
++# CONFIG_NET_VENDOR_WIZNET=y
++CONFIG_NET_VENDOR_FREESCALE=y
++CONFIG_FEC=y
++CONFIG_PHYLIB=y
++CONFIG_AT803X_PHY=y
++CONFIG_WLAN=y
++CONFIG_BRCMUTIL=m
++CONFIG_BRCMFMAC=m
++CONFIG_BRCMFMAC_SDIO=y
++CONFIG_DEVTMPFS=y
++CONFIG_DEVTMPFS_MOUNT=y
++# CONFIG_STANDALONE is not set
++CONFIG_DMA_CMA=y
++CONFIG_CMA=y
++CONFIG_CMA_SIZE_MBYTES=256
++CONFIG_CONNECTOR=y
++# CONFIG_MTD is not set
++CONFIG_BLK_DEV_LOOP=y
++CONFIG_BLK_DEV_RAM=y
++CONFIG_BLK_DEV_RAM_SIZE=65536
++# CONFIG_SCSI_PROC_FS is not set
++CONFIG_BLK_DEV_SD=y
++CONFIG_SCSI_MULTI_LUN=y
++CONFIG_SCSI_CONSTANTS=y
++CONFIG_SCSI_LOGGING=y
++CONFIG_SCSI_SCAN_ASYNC=y
++# CONFIG_SCSI_LOWLEVEL is not set
++CONFIG_ATA=y
++CONFIG_SATA_AHCI_PLATFORM=y
++CONFIG_AHCI_IMX=y
++CONFIG_NETDEVICES=y
++CONFIG_INPUT_EVDEV=y
++# CONFIG_INPUT_EVBUG is not set
++CONFIG_KEYBOARD_GPIO=y
++CONFIG_KEYBOARD_IMX=y
++# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
++# CONFIG_KEYBOARD_ATKBD is not set
++# CONFIG_MOUSE_PS2 is not set
++CONFIG_INPUT_MISC=y
++CONFIG_SERIO_SERPORT=m
++CONFIG_VT_HW_CONSOLE_BINDING=y
++# CONFIG_LEGACY_PTYS is not set
++# CONFIG_DEVKMEM is not set
++CONFIG_SERIAL_IMX=y
++CONFIG_SERIAL_IMX_CONSOLE=y
++CONFIG_SERIAL_FSL_LPUART=y
++CONFIG_SERIAL_FSL_LPUART_CONSOLE=y
++CONFIG_FSL_OTP=y
++CONFIG_GPIO_MXC=y
++# CONFIG_I2C_COMPAT is not set
++CONFIG_I2C_CHARDEV=y
++# CONFIG_I2C_HELPER_AUTO is not set
++CONFIG_I2C_ALGOPCF=m
++CONFIG_I2C_ALGOPCA=m
++CONFIG_I2C_IMX=y
++CONFIG_SPI=y
++CONFIG_SPI_IMX=y
++CONFIG_GPIO_SYSFS=y
++CONFIG_POWER_SUPPLY=y
++CONFIG_THERMAL=y
++CONFIG_CPU_THERMAL=y
++CONFIG_IMX_THERMAL=y
++CONFIG_DEVICE_THERMAL=y
++CONFIG_WATCHDOG=y
++CONFIG_IMX2_WDT=y
++CONFIG_MFD_DA9052_I2C=y
++CONFIG_MFD_MC13XXX_SPI=y
++CONFIG_MFD_MC13XXX_I2C=y
++CONFIG_MFD_SI476X_CORE=y
++CONFIG_REGULATOR=y
++CONFIG_REGULATOR_FIXED_VOLTAGE=y
++CONFIG_REGULATOR_ANATOP=y
++CONFIG_REGULATOR_PFUZE100=y
++CONFIG_MEDIA_SUPPORT=y
++CONFIG_MEDIA_CAMERA_SUPPORT=y
++# CONFIG_MEDIA_RADIO_SUPPORT is not set
++CONFIG_VIDEO_V4L2_INT_DEVICE=y
++# CONFIG_MEDIA_USB_SUPPORT isnot set
++# CONFIG_USB_VIDEO_CLASS is not set
++# CONFIG_RADIO_ADAPTERS is not set
++CONFIG_V4L_PLATFORM_DRIVERS=y
++CONFIG_VIDEO_MXC_OUTPUT=y
++CONFIG_VIDEO_MXC_CAPTURE=m
++CONFIG_VIDEO_MXC_CSI_CAMERA=m
++CONFIG_MXC_CAMERA_OV5640=m
++CONFIG_MXC_CAMERA_OV5642=m
++CONFIG_MXC_CAMERA_OV5640_MIPI=m
++CONFIG_MXC_TVIN_ADV7180=m
++CONFIG_MXC_IPU_DEVICE_QUEUE_SDC=m
++CONFIG_VIDEO_MXC_IPU_OUTPUT=y
++CONFIG_VIDEO_MXC_PXP_V4L2=y
++CONFIG_SOC_CAMERA=y
++CONFIG_SOC_CAMERA_OV2640=y
++CONFIG_DRM=y
++CONFIG_DRM_VIVANTE=y
++CONFIG_FB=y
++# CONFIG_FB_MX3 is not set
++CONFIG_FB_MXC_SYNC_PANEL=y
++CONFIG_FB_MXC_LDB=y
++CONFIG_FB_MXC_HDMI=y
++CONFIG_FRAMEBUFFER_CONSOLE=y
++CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
++CONFIG_FONTS=y
++CONFIG_FONT_8x8=y
++CONFIG_FONT_8x16=y
++CONFIG_LOGO=y
++CONFIG_SOUND=y
++CONFIG_SND=y
++CONFIG_SND_USB_AUDIO=m
++CONFIG_SND_SOC=y
++CONFIG_SND_IMX_SOC=y
++CONFIG_SND_SOC_IMX_SGTL5000=y
++CONFIG_SND_SOC_IMX_SPDIF=y
++CONFIG_SND_SOC_IMX_HDMI=y
++CONFIG_USB=y
++CONFIG_USB_EHCI_HCD=y
++CONFIG_USB_STORAGE=y
++CONFIG_USB_CHIPIDEA=y
++CONFIG_USB_CHIPIDEA_UDC=y
++CONFIG_USB_CHIPIDEA_HOST=y
++CONFIG_USB_PHY=y
++CONFIG_NOP_USB_XCEIV=y
++CONFIG_USB_MXS_PHY=y
++CONFIG_USB_GADGET=y
++CONFIG_USB_ZERO=m
++CONFIG_USB_ETH=m
++CONFIG_USB_MASS_STORAGE=m
++CONFIG_USB_G_SERIAL=m
++CONFIG_MMC=y
++CONFIG_MMC_UNSAFE_RESUME=y
++CONFIG_MMC_SDHCI=y
++CONFIG_MMC_SDHCI_PLTFM=y
++CONFIG_MMC_SDHCI_ESDHC_IMX=y
++CONFIG_MXC_IPU=y
++CONFIG_MXC_GPU_VIV=y
++CONFIG_MXC_ASRC=y
++CONFIG_MXC_HDMI_CEC=y
++CONFIG_MXC_MIPI_CSI2=y
++CONFIG_MXC_MLB150=m
++CONFIG_NEW_LEDS=y
++CONFIG_LEDS_CLASS=y
++CONFIG_LEDS_GPIO=y
++CONFIG_LEDS_TRIGGERS=y
++CONFIG_LEDS_TRIGGER_GPIO=y
++CONFIG_RTC_CLASS=y
++CONFIG_RTC_DRV_MXC=y
++CONFIG_RTC_DRV_SNVS=y
++CONFIG_RTC_DRV_PCF8523=y
++CONFIG_DMADEVICES=y
++CONFIG_MXC_PXP_V2=y
++CONFIG_IMX_SDMA=y
++CONFIG_MXS_DMA=y
++CONFIG_SRAM=y
++CONFIG_STAGING=y
++CONFIG_COMMON_CLK_DEBUG=y
++# CONFIG_IOMMU_SUPPORT is not set
++CONFIG_PWM=y
++CONFIG_PWM_SYSFS=y
++CONFIG_PWM_IMX=y
++CONFIG_IRQCHIP=y
++CONFIG_ARM_GIC=y
++# CONFIG_IPACK_BUS is not set
++CONFIG_ARCH_HAS_RESET_CONTROLLER=y
++CONFIG_RESET_CONTROLLER=y
++CONFIG_RESET_GPIO=y
++CONFIG_EXT4_FS=y
++CONFIG_EXT4_USE_FOR_EXT23=y
++CONFIG_EXT4_FS_XATTR=y
++CONFIG_EXT4_FS_POSIX_ACL=y
++CONFIG_EXT4_FS_SECURITY=y
++CONFIG_QUOTA=y
++CONFIG_QUOTA_NETLINK_INTERFACE=y
++# CONFIG_PRINT_QUOTA_WARNING is not set
++CONFIG_AUTOFS4_FS=y
++CONFIG_FUSE_FS=y
++CONFIG_ISO9660_FS=m
++CONFIG_JOLIET=y
++CONFIG_ZISOFS=y
++CONFIG_UDF_FS=m
++CONFIG_MSDOS_FS=m
++CONFIG_VFAT_FS=y
++CONFIG_TMPFS=y
++CONFIG_JFFS2_FS=y
++CONFIG_UBIFS_FS=y
++CONFIG_NFS_FS=y
++CONFIG_NFS_V3_ACL=y
++CONFIG_NFS_V4=y
++CONFIG_ROOT_NFS=y
++CONFIG_NLS_DEFAULT="cp437"
++CONFIG_NLS_CODEPAGE_437=y
++CONFIG_NLS_ASCII=y
++CONFIG_NLS_ISO8859_1=y
++CONFIG_NLS_ISO8859_15=m
++CONFIG_NLS_UTF8=y
++CONFIG_MAGIC_SYSRQ=y
++# CONFIG_SCHED_DEBUG is not set
++# CONFIG_DEBUG_BUGVERBOSE is not set
++# CONFIG_FTRACE is not set
++CONFIG_SECURITYFS=y
++CONFIG_CRYPTO_USER=y
++CONFIG_CRYPTO_TEST=m
++CONFIG_CRYPTO_CCM=y
++CONFIG_CRYPTO_GCM=y
++CONFIG_CRYPTO_CBC=y
++CONFIG_CRYPTO_CTS=y
++CONFIG_CRYPTO_ECB=y
++CONFIG_CRYPTO_LRW=y
++CONFIG_CRYPTO_XTS=y
++CONFIG_CRYPTO_MD4=y
++CONFIG_CRYPTO_MD5=y
++CONFIG_CRYPTO_MICHAEL_MIC=y
++CONFIG_CRYPTO_RMD128=y
++CONFIG_CRYPTO_RMD160=y
++CONFIG_CRYPTO_RMD256=y
++CONFIG_CRYPTO_RMD320=y
++CONFIG_CRYPTO_SHA1=y
++CONFIG_CRYPTO_SHA256=y
++CONFIG_CRYPTO_SHA512=y
++CONFIG_CRYPTO_TGR192=y
++CONFIG_CRYPTO_WP512=y
++CONFIG_CRYPTO_BLOWFISH=y
++CONFIG_CRYPTO_CAMELLIA=y
++CONFIG_CRYPTO_DES=y
++CONFIG_CRYPTO_TWOFISH=y
++# CONFIG_CRYPTO_ANSI_CPRNG is not set
++CONFIG_CRYPTO_DEV_FSL_CAAM=y
++CONFIG_CRYPTO_DEV_FSL_CAAM_SM=y
++CONFIG_CRYPTO_DEV_FSL_CAAM_SM_TEST=y
++CONFIG_CRYPTO_DEV_FSL_CAAM_SECVIO=y
++CONFIG_CRYPTO_AES_ARM_BS=y
++CONFIG_CRC_CCITT=m
++CONFIG_CRC_T10DIF=y
++CONFIG_CRC7=m
++CONFIG_LIBCRC32C=m
++# CONFIG_MXC_MMA8451 is not set
++CONFIG_RC_CORE=m
++CONFIG_RC_DECODERS=y
++CONFIG_LIRC=m
++CONFIG_RC_LOOPBACK=m
++CONFIG_RC_MAP=m
++CONFIG_RC_DEVICES=y
++CONFIG_RC_ATI_REMOTE=m
++CONFIG_IR_NEC_DECODER=m
++CONFIG_IR_RC5_DECODER=m
++CONFIG_IR_RC6_DECODER=m
++CONFIG_IR_JVC_DECODER=m
++CONFIG_IR_SONY_DECODER=m
++CONFIG_IR_RC5_SZ_DECODER=m
++CONFIG_IR_SANYO_DECODER=m
++CONFIG_IR_MCE_KBD_DECODER=m
++CONFIG_IR_LIRC_CODEC=m
++CONFIG_IR_IMON=m
++CONFIG_IR_MCEUSB=m
++CONFIG_IR_ITE_CIR=m
++CONFIG_IR_NUVOTON=m
++CONFIG_IR_FINTEK=m
++CONFIG_IR_REDRAT3=m
++CONFIG_IR_ENE=m
++CONFIG_IR_STREAMZAP=m
++CONFIG_IR_WINBOND_CIR=m
++CONFIG_IR_IGUANA=m
++CONFIG_IR_TTUSBIR=m
++CONFIG_IR_GPIO_CIR=m
+diff -Nur linux-3.14.36/arch/arm/configs/imx_v7_cbi_hb_defconfig linux-openelec/arch/arm/configs/imx_v7_cbi_hb_defconfig
+--- linux-3.14.36/arch/arm/configs/imx_v7_cbi_hb_defconfig 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/arch/arm/configs/imx_v7_cbi_hb_defconfig 2015-05-06 12:05:43.000000000 -0500
+@@ -0,0 +1,5138 @@
++#
++# Automatically generated make config: don't edit
++#
++CONFIG_MMU=y
++CONFIG_HOTPLUG_CPU=y
++# CONFIG_BOOTPARAM_HOTPLUG_CPU0 is not set
++# CONFIG_DEBUG_HOTPLUG_CPU0 is not set
++CONFIG_LOCALVERSION=""
++CONFIG_CROSS_COMPILE=""
++CONFIG_DEFAULT_HOSTNAME="(none)"
++
++#
++# Code maturity level options
++#
++CONFIG_EXPERIMENTAL=y
++CONFIG_HOTPLUG=y
++CONFIG_UEVENT_HELPER_PATH=""
++CONFIG_PREVENT_FIRMWARE_BUILD=y
++
++CONFIG_BUILD_DOCSRC=y
++
++#
++# General setup
++#
++CONFIG_KERNEL_LZO=y
++# CONFIG_KERNEL_BZIP2 is not set
++# CONFIG_KERNEL_LZMA is not set
++CONFIG_SWAP=y
++CONFIG_BSD_PROCESS_ACCT=y
++CONFIG_BSD_PROCESS_ACCT_V3=y
++# CONFIG_COMPILE_TEST is not set
++CONFIG_TASKSTATS=y
++CONFIG_TASK_DELAY_ACCT=y
++CONFIG_TASK_XACCT=y
++CONFIG_TASK_IO_ACCOUNTING=y
++CONFIG_SYSCTL=y
++# CONFIG_IKCONFIG is not set
++# CONFIG_EMBEDDED is not set
++CONFIG_KALLSYMS=y
++CONFIG_KALLSYMS_ALL=y
++CONFIG_FUTEX=y
++CONFIG_EPOLL=y
++CONFIG_IOSCHED_NOOP=y
++CONFIG_IOSCHED_DEADLINE=y
++CONFIG_IOSCHED_CFQ=y
++CONFIG_CFQ_GROUP_IOSCHED=y
++CONFIG_IOSCHED_BFQ=y
++CONFIG_CGROUP_BFQIO=y
++CONFIG_DEFAULT_BFQ=y
++CONFIG_DEFAULT_IOSCHED="bfq"
++# CONFIG_CHECKPOINT_RESTORE is not set
++CONFIG_NAMESPACES=y
++CONFIG_PID_NS=y
++CONFIG_UTS_NS=y
++CONFIG_IPC_NS=y
++CONFIG_NET_NS=y
++CONFIG_USER_NS=y
++# CONFIG_UIDGID_STRICT_TYPE_CHECKS is not set
++CONFIG_SYSVIPC=y
++CONFIG_FHANDLE=y
++CONFIG_NO_HZ=y
++CONFIG_HIGH_RES_TIMERS=y
++CONFIG_LOG_BUF_SHIFT=18
++CONFIG_CGROUPS=y
++CONFIG_RELAY=y
++CONFIG_BLK_DEV_INITRD=y
++CONFIG_EXPERT=y
++CONFIG_PERF_EVENTS=y
++# CONFIG_SLUB_DEBUG is not set
++# CONFIG_COMPAT_BRK is not set
++CONFIG_MODULES=y
++CONFIG_MODULE_UNLOAD=y
++CONFIG_MODVERSIONS=y
++CONFIG_MODULE_SRCVERSION_ALL=y
++
++CONFIG_POSIX_MQUEUE=y
++CONFIG_PREEMPT_VOLUNTARY=y
++
++CONFIG_SLUB=y
++CONFIG_SLUB_CPU_PARTIAL=y
++# CONFIG_SLUB_STATS is not set
++# CONFIG_SLUB_DEBUG_ON is not set
++
++# CONFIG_AD525X_DPOT is not set
++# CONFIG_ATMEL_PWM is not set
++# CONFIG_IWMC3200TOP is not set
++# CONFIG_BLK_DEV_BSG is not set
++
++# MX6 specific kernel configuration
++CONFIG_GPIO_PCA953X=y
++CONFIG_ARCH_MXC=y
++CONFIG_MXC_DEBUG_BOARD=y
++CONFIG_SOC_IMX6Q=y
++CONFIG_SOC_IMX6SL=y
++# CONFIG_SWP_EMULATE is not set
++CONFIG_PCI=y
++CONFIG_PCIE_DW=y
++CONFIG_PCI_IMX6=y
++CONFIG_SMP=y
++CONFIG_VMSPLIT_2G=y
++CONFIG_AEABI=y
++# CONFIG_OABI_COMPAT is not set
++CONFIG_HIGHMEM=y
++CONFIG_CMDLINE="noinitrd console=ttymxc0,115200"
++CONFIG_CPU_FREQ=y
++CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE=y
++CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
++CONFIG_CPU_FREQ_GOV_POWERSAVE=y
++CONFIG_CPU_FREQ_GOV_USERSPACE=y
++CONFIG_CPU_FREQ_GOV_ONDEMAND=y
++CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
++CONFIG_ARM_IMX6_CPUFREQ=y
++CONFIG_CPU_IDLE=y
++CONFIG_VFP=y
++CONFIG_VFPv3=y
++CONFIG_NEON=y
++CONFIG_KERNEL_MODE_NEON=y
++CONFIG_BINFMT_MISC=m
++CONFIG_PM_RUNTIME=y
++CONFIG_PM_DEBUG=y
++CONFIG_PM_TEST_SUSPEND=y
++CONFIG_NET=y
++CONFIG_PACKET=y
++CONFIG_UNIX=y
++CONFIG_INET=y
++CONFIG_IP_PNP=y
++CONFIG_IP_PNP_DHCP=y
++# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
++# CONFIG_INET_XFRM_MODE_TUNNEL is not set
++# CONFIG_INET_XFRM_MODE_BEET is not set
++# CONFIG_INET_LRO is not set
++CONFIG_IPV6=y
++CONFIG_NETFILTER=y
++CONFIG_VLAN_8021Q=y
++CONFIG_WIRELESS=y
++CONFIG_WIRELESS_EXT=y
++CONFIG_WEXT_CORE=y
++CONFIG_WEXT_PROC=y
++CONFIG_WEXT_SPY=y
++CONFIG_WEXT_PRIV=y
++CONFIG_CFG80211=y
++CONFIG_ETHERNET=y
++# CONFIG_NET_VENDOR_BROADCOM is not set
++# CONFIG_NET_VENDOR_CIRRUS is not set
++# CONFIG_NET_VENDOR_FARADAY
++# CONFIG_NET_VENDOR_INTEL
++# CONFIG_NET_VENDOR_I825XX
++# CONFIG_NET_VENDOR_MARVELL
++# CONFIG_NET_VENDOR_MICROCHIP
++# CONFIG_NET_VENDOR_MICROCHIP=y
++# CONFIG_ENC28J60 is not set
++# CONFIG_NET_VENDOR_NATSEMI=y
++# CONFIG_NET_VENDOR_8390=y
++# CONFIG_AX88796 is not set
++# CONFIG_ETHOC is not set
++# CONFIG_SH_ETH is not set
++# CONFIG_NET_VENDOR_SEEQ=y
++# CONFIG_NET_VENDOR_SMSC=y
++# CONFIG_SMC91X is not set
++# CONFIG_SMC911X is not set
++# CONFIG_SMSC911X is not set
++# CONFIG_NET_VENDOR_STMICRO=y
++# CONFIG_STMMAC_ETH is not set
++# CONFIG_NET_VENDOR_VIA=y
++# CONFIG_VIA_VELOCITY is not set
++# CONFIG_NET_VENDOR_WIZNET=y
++CONFIG_NET_VENDOR_FREESCALE=y
++CONFIG_FEC=y
++CONFIG_PHYLIB=y
++CONFIG_AT803X_PHY=y
++CONFIG_WLAN=y
++CONFIG_BRCMUTIL=m
++CONFIG_BRCMFMAC=m
++CONFIG_BRCMFMAC_SDIO=y
++CONFIG_DEVTMPFS=y
++CONFIG_DEVTMPFS_MOUNT=y
++# CONFIG_STANDALONE is not set
++CONFIG_DMA_CMA=y
++CONFIG_CMA=y
++CONFIG_CMA_SIZE_MBYTES=256
++CONFIG_CONNECTOR=y
++# CONFIG_MTD is not set
++CONFIG_BLK_DEV_LOOP=y
++CONFIG_BLK_DEV_RAM=y
++CONFIG_BLK_DEV_RAM_SIZE=65536
++# CONFIG_SCSI_PROC_FS is not set
++CONFIG_BLK_DEV_SD=y
++CONFIG_SCSI_MULTI_LUN=y
++CONFIG_SCSI_CONSTANTS=y
++CONFIG_SCSI_LOGGING=y
++CONFIG_SCSI_SCAN_ASYNC=y
++# CONFIG_SCSI_LOWLEVEL is not set
++CONFIG_ATA=y
++CONFIG_SATA_AHCI_PLATFORM=y
++CONFIG_AHCI_IMX=y
++CONFIG_NETDEVICES=y
++CONFIG_INPUT_EVDEV=y
++# CONFIG_INPUT_EVBUG is not set
++CONFIG_KEYBOARD_GPIO=y
++CONFIG_KEYBOARD_IMX=y
++# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
++# CONFIG_KEYBOARD_ATKBD is not set
++# CONFIG_MOUSE_PS2 is not set
++CONFIG_INPUT_MISC=y
++CONFIG_SERIO_SERPORT=m
++CONFIG_VT_HW_CONSOLE_BINDING=y
++# CONFIG_LEGACY_PTYS is not set
++# CONFIG_DEVKMEM is not set
++CONFIG_SERIAL_IMX=y
++CONFIG_SERIAL_IMX_CONSOLE=y
++CONFIG_SERIAL_FSL_LPUART=y
++CONFIG_SERIAL_FSL_LPUART_CONSOLE=y
++CONFIG_FSL_OTP=y
++CONFIG_GPIO_MXC=y
++# CONFIG_I2C_COMPAT is not set
++CONFIG_I2C_CHARDEV=y
++# CONFIG_I2C_HELPER_AUTO is not set
++CONFIG_I2C_ALGOPCF=m
++CONFIG_I2C_ALGOPCA=m
++CONFIG_I2C_IMX=y
++CONFIG_SPI=y
++CONFIG_SPI_IMX=y
++CONFIG_GPIO_SYSFS=y
++CONFIG_POWER_SUPPLY=y
++CONFIG_THERMAL=y
++CONFIG_CPU_THERMAL=y
++CONFIG_IMX_THERMAL=y
++CONFIG_DEVICE_THERMAL=y
++CONFIG_WATCHDOG=y
++CONFIG_IMX2_WDT=y
++CONFIG_MFD_DA9052_I2C=y
++CONFIG_MFD_MC13XXX_SPI=y
++CONFIG_MFD_MC13XXX_I2C=y
++CONFIG_MFD_SI476X_CORE=y
++CONFIG_REGULATOR=y
++CONFIG_REGULATOR_FIXED_VOLTAGE=y
++CONFIG_REGULATOR_ANATOP=y
++CONFIG_REGULATOR_PFUZE100=y
++CONFIG_MEDIA_SUPPORT=y
++CONFIG_MEDIA_CAMERA_SUPPORT=y
++# CONFIG_MEDIA_RADIO_SUPPORT is not set
++CONFIG_VIDEO_V4L2_INT_DEVICE=y
++CONFIG_MEDIA_USB_SUPPORT=y
++CONFIG_USB_VIDEO_CLASS=m
++# CONFIG_RADIO_ADAPTERS is not set
++CONFIG_V4L_PLATFORM_DRIVERS=y
++CONFIG_VIDEO_MXC_OUTPUT=y
++CONFIG_VIDEO_MXC_CAPTURE=m
++CONFIG_VIDEO_MXC_CSI_CAMERA=m
++CONFIG_MXC_CAMERA_OV5640=m
++CONFIG_MXC_CAMERA_OV5642=m
++CONFIG_MXC_CAMERA_OV5640_MIPI=m
++CONFIG_MXC_TVIN_ADV7180=m
++CONFIG_MXC_IPU_DEVICE_QUEUE_SDC=m
++CONFIG_VIDEO_MXC_IPU_OUTPUT=y
++CONFIG_VIDEO_MXC_PXP_V4L2=y
++CONFIG_SOC_CAMERA=y
++CONFIG_SOC_CAMERA_OV2640=y
++CONFIG_DRM=y
++CONFIG_DRM_VIVANTE=y
++CONFIG_FB=y
++# CONFIG_FB_MX3 is not set
++CONFIG_FB_MXC_SYNC_PANEL=y
++CONFIG_FB_MXC_LDB=y
++CONFIG_FB_MXC_HDMI=y
++CONFIG_FRAMEBUFFER_CONSOLE=y
++CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
++CONFIG_FONTS=y
++CONFIG_FONT_8x8=y
++CONFIG_FONT_8x16=y
++CONFIG_LOGO=y
++CONFIG_SOUND=y
++CONFIG_SND=y
++CONFIG_SND_USB_AUDIO=m
++CONFIG_SND_SOC=y
++CONFIG_SND_IMX_SOC=y
++CONFIG_SND_SOC_IMX_SGTL5000=y
++CONFIG_SND_SOC_IMX_SPDIF=y
++CONFIG_SND_SOC_IMX_HDMI=y
++CONFIG_USB=y
++CONFIG_USB_EHCI_HCD=y
++CONFIG_USB_STORAGE=y
++CONFIG_USB_CHIPIDEA=y
++CONFIG_USB_CHIPIDEA_UDC=y
++CONFIG_USB_CHIPIDEA_HOST=y
++CONFIG_USB_PHY=y
++CONFIG_NOP_USB_XCEIV=y
++CONFIG_USB_MXS_PHY=y
++CONFIG_USB_GADGET=y
++CONFIG_USB_ZERO=m
++CONFIG_USB_ETH=m
++CONFIG_USB_MASS_STORAGE=m
++CONFIG_USB_G_SERIAL=m
++CONFIG_MMC=y
++CONFIG_MMC_UNSAFE_RESUME=y
++CONFIG_MMC_SDHCI=y
++CONFIG_MMC_SDHCI_PLTFM=y
++CONFIG_MMC_SDHCI_ESDHC_IMX=y
++CONFIG_MXC_IPU=y
++CONFIG_MXC_GPU_VIV=y
++CONFIG_MXC_ASRC=y
++CONFIG_MXC_HDMI_CEC=y
++CONFIG_MXC_MIPI_CSI2=y
++CONFIG_MXC_MLB150=m
++CONFIG_NEW_LEDS=y
++CONFIG_LEDS_CLASS=y
++CONFIG_LEDS_GPIO=y
++CONFIG_LEDS_TRIGGERS=y
++CONFIG_LEDS_TRIGGER_GPIO=y
++CONFIG_RTC_CLASS=y
++CONFIG_RTC_DRV_MXC=y
++CONFIG_RTC_DRV_SNVS=y
++CONFIG_RTC_DRV_PCF8523=y
++CONFIG_DMADEVICES=y
++CONFIG_MXC_PXP_V2=y
++CONFIG_IMX_SDMA=y
++CONFIG_MXS_DMA=y
++CONFIG_SRAM=y
++CONFIG_STAGING=y
++CONFIG_COMMON_CLK_DEBUG=y
++# CONFIG_IOMMU_SUPPORT is not set
++CONFIG_PWM=y
++CONFIG_PWM_SYSFS=y
++CONFIG_PWM_IMX=y
++CONFIG_IRQCHIP=y
++CONFIG_ARM_GIC=y
++CONFIG_ARCH_HAS_RESET_CONTROLLER=y
++CONFIG_RESET_CONTROLLER=y
++CONFIG_RESET_GPIO=y
++CONFIG_EXT4_FS=y
++CONFIG_EXT4_USE_FOR_EXT23=y
++CONFIG_EXT4_FS_XATTR=y
++CONFIG_EXT4_FS_POSIX_ACL=y
++CONFIG_EXT4_FS_SECURITY=y
++CONFIG_QUOTA=y
++CONFIG_QUOTA_NETLINK_INTERFACE=y
++# CONFIG_PRINT_QUOTA_WARNING is not set
++CONFIG_AUTOFS4_FS=y
++CONFIG_FUSE_FS=y
++CONFIG_ISO9660_FS=m
++CONFIG_JOLIET=y
++CONFIG_ZISOFS=y
++CONFIG_UDF_FS=m
++CONFIG_MSDOS_FS=m
++CONFIG_VFAT_FS=y
++CONFIG_TMPFS=y
++CONFIG_JFFS2_FS=y
++CONFIG_UBIFS_FS=y
++CONFIG_NFS_FS=y
++CONFIG_NFS_V3_ACL=y
++CONFIG_NFS_V4=y
++CONFIG_ROOT_NFS=y
++CONFIG_NLS_DEFAULT="cp437"
++CONFIG_NLS_CODEPAGE_437=y
++CONFIG_NLS_ASCII=y
++CONFIG_NLS_ISO8859_1=y
++CONFIG_NLS_ISO8859_15=m
++CONFIG_NLS_UTF8=y
++CONFIG_MAGIC_SYSRQ=y
++# CONFIG_SCHED_DEBUG is not set
++# CONFIG_DEBUG_BUGVERBOSE is not set
++# CONFIG_FTRACE is not set
++CONFIG_SECURITYFS=y
++CONFIG_CRYPTO_USER=y
++CONFIG_CRYPTO_TEST=m
++CONFIG_CRYPTO_CCM=y
++CONFIG_CRYPTO_GCM=y
++CONFIG_CRYPTO_CBC=y
++CONFIG_CRYPTO_CTS=y
++CONFIG_CRYPTO_ECB=y
++CONFIG_CRYPTO_LRW=y
++CONFIG_CRYPTO_XTS=y
++CONFIG_CRYPTO_MD4=y
++CONFIG_CRYPTO_MD5=y
++CONFIG_CRYPTO_MICHAEL_MIC=y
++CONFIG_CRYPTO_RMD128=y
++CONFIG_CRYPTO_RMD160=y
++CONFIG_CRYPTO_RMD256=y
++CONFIG_CRYPTO_RMD320=y
++CONFIG_CRYPTO_SHA1=y
++CONFIG_CRYPTO_SHA256=y
++CONFIG_CRYPTO_SHA512=y
++CONFIG_CRYPTO_TGR192=y
++CONFIG_CRYPTO_WP512=y
++CONFIG_CRYPTO_BLOWFISH=y
++CONFIG_CRYPTO_CAMELLIA=y
++CONFIG_CRYPTO_DES=y
++CONFIG_CRYPTO_TWOFISH=y
++# CONFIG_CRYPTO_ANSI_CPRNG is not set
++CONFIG_CRYPTO_DEV_FSL_CAAM=y
++CONFIG_CRYPTO_DEV_FSL_CAAM_SM=y
++CONFIG_CRYPTO_DEV_FSL_CAAM_SM_TEST=y
++CONFIG_CRYPTO_DEV_FSL_CAAM_SECVIO=y
++CONFIG_CRYPTO_AES_ARM_BS=y
++CONFIG_CRC_CCITT=m
++CONFIG_CRC_T10DIF=y
++CONFIG_CRC7=m
++CONFIG_LIBCRC32C=m
++# CONFIG_MXC_MMA8451 is not set
++
++#
++# Loadable module support
++#
++# CONFIG_MODULE_FORCE_LOAD is not set
++# -- MODULE_FORCE_UNLOAD is controlled by config-debug/nodebug
++
++# CONFIG_PCI_DEBUG is not set
++CONFIG_PCI_STUB=y
++CONFIG_PCI_IOV=y
++CONFIG_PCI_PRI=y
++CONFIG_PCI_PASID=y
++CONFIG_HT_IRQ=y
++CONFIG_PCI_MSI=y
++# CONFIG_PCI_REALLOC_ENABLE_AUTO is not set
++CONFIG_PCIEPORTBUS=y
++CONFIG_PCIEAER=y
++CONFIG_PCIEASPM=y
++# CONFIG_PCIEASPM_DEBUG is not set
++CONFIG_PCIE_ECRC=y
++CONFIG_PCIEAER_INJECT=m
++CONFIG_HOTPLUG_PCI_PCIE=y
++CONFIG_HOTPLUG_PCI_FAKE=m
++
++# CONFIG_SGI_IOC4 is not set
++
++# CONFIG_ISA is not set
++# CONFIG_SCx200 is not set
++
++#
++# PCMCIA/CardBus support
++# FIXME: Deprecate Cardbus ?
++#
++CONFIG_PCMCIA=y
++CONFIG_PCMCIA_LOAD_CIS=y
++# CONFIG_PCMCIA_DEBUG is not set
++CONFIG_YENTA=m
++CONFIG_CARDBUS=y
++CONFIG_I82092=m
++CONFIG_PD6729=m
++
++CONFIG_PCCARD=y
++CONFIG_SDIO_UART=m
++# CONFIG_MMC_TEST is not set
++# CONFIG_MMC_DEBUG is not set
++# https://lists.fedoraproject.org/pipermail/kernel/2014-February/004889.html
++# CONFIG_MMC_CLKGATE is not set
++CONFIG_MMC_BLOCK=y
++CONFIG_MMC_BLOCK_MINORS=8
++CONFIG_MMC_BLOCK_BOUNCE=y
++CONFIG_MMC_SDHCI_PCI=m
++CONFIG_MMC_SDHCI_ACPI=m
++CONFIG_MMC_SDRICOH_CS=m
++CONFIG_MMC_TIFM_SD=m
++CONFIG_MMC_WBSD=m
++CONFIG_MMC_VIA_SDMMC=m
++CONFIG_MMC_CB710=m
++CONFIG_MMC_RICOH_MMC=y
++CONFIG_MMC_USHC=m
++CONFIG_MMC_REALTEK_PCI=m
++CONFIG_MMC_VUB300=m
++# CONFIG_MMC_SDHCI_PXAV2 is not set
++# CONFIG_MMC_SDHCI_PXAV3 is not set
++# CONFIG_MMC_SDHCI_OF_ARASAN is not set
++
++
++CONFIG_CB710_CORE=m
++# CONFIG_CB710_DEBUG is not set
++
++CONFIG_INFINIBAND=m
++CONFIG_INFINIBAND_MTHCA=m
++# CONFIG_INFINIBAND_MTHCA_DEBUG is not set
++CONFIG_INFINIBAND_IPOIB=m
++CONFIG_INFINIBAND_IPOIB_DEBUG=y
++CONFIG_INFINIBAND_IPOIB_DEBUG_DATA=y
++CONFIG_INFINIBAND_IPOIB_CM=y
++CONFIG_INFINIBAND_SRP=m
++CONFIG_INFINIBAND_SRPT=m
++CONFIG_INFINIBAND_USER_MAD=m
++CONFIG_INFINIBAND_USER_ACCESS=m
++# CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING is not set #staging
++CONFIG_INFINIBAND_IPATH=m
++CONFIG_INFINIBAND_ISER=m
++CONFIG_INFINIBAND_ISERT=m
++CONFIG_INFINIBAND_AMSO1100=m
++# CONFIG_INFINIBAND_AMSO1100_DEBUG is not set
++CONFIG_INFINIBAND_CXGB3=m
++CONFIG_INFINIBAND_CXGB4=m
++CONFIG_SCSI_CXGB3_ISCSI=m
++CONFIG_SCSI_CXGB4_ISCSI=m
++# CONFIG_INFINIBAND_CXGB3_DEBUG is not set
++CONFIG_MLX4_INFINIBAND=m
++CONFIG_MLX5_INFINIBAND=m
++CONFIG_INFINIBAND_NES=m
++# CONFIG_INFINIBAND_NES_DEBUG is not set
++CONFIG_INFINIBAND_QIB=m
++CONFIG_INFINIBAND_QIB_DCA=y
++# CONFIG_INFINIBAND_OCRDMA is not set
++# CONFIG_INFINIBAND_USNIC is not set
++
++#
++# Executable file formats
++#
++CONFIG_BINFMT_ELF=y
++CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y
++# CONFIG_BINFMT_AOUT is not set
++CONFIG_BINFMT_SCRIPT=y
++
++#
++# Device Drivers
++#
++
++# CONFIG_COMMON_CLK_SI5351 is not set
++
++#
++# Generic Driver Options
++#
++CONFIG_FW_LOADER=y
++# CONFIG_FIRMWARE_IN_KERNEL is not set
++CONFIG_EXTRA_FIRMWARE=""
++
++# Give this a try in rawhide for now
++# CONFIG_FW_LOADER_USER_HELPER is not set
++
++
++
++#
++# Memory Technology Devices (MTD)
++#
++# CONFIG_MTD_TESTS is not set
++# CONFIG_MTD_REDBOOT_PARTS is not set
++# CONFIG_MTD_AR7_PARTS is not set
++# CONFIG_MTD_CMDLINE_PARTS is not set
++
++#
++# User Modules And Translation Layers
++#
++# CONFIG_MTD_CHAR is not set
++# CONFIG_MTD_BLKDEVS is not set
++# CONFIG_MTD_BLOCK is not set
++# CONFIG_MTD_BLOCK_RO is not set
++# CONFIG_FTL is not set
++# CONFIG_NFTL is not set
++# CONFIG_INFTL is not set
++# CONFIG_RFD_FTL is not set
++# CONFIG_SSFDC is not set
++# CONFIG_SM_FTL is not set
++# CONFIG_MTD_OOPS is not set
++# CONFIG_MTD_SWAP is not set
++
++#
++# RAM/ROM/Flash chip drivers
++#
++# CONFIG_MTD_CFI is not set
++# CONFIG_MTD_JEDECPROBE is not set
++CONFIG_MTD_MAP_BANK_WIDTH_1=y
++CONFIG_MTD_MAP_BANK_WIDTH_2=y
++CONFIG_MTD_MAP_BANK_WIDTH_4=y
++# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
++# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
++# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
++CONFIG_MTD_CFI_I1=y
++CONFIG_MTD_CFI_I2=y
++# CONFIG_MTD_CFI_I4 is not set
++# CONFIG_MTD_CFI_I8 is not set
++# CONFIG_MTD_RAM is not set
++# CONFIG_MTD_ROM is not set
++# CONFIG_MTD_ABSENT is not set
++
++#
++# Mapping drivers for chip access
++#
++# CONFIG_MTD_COMPLEX_MAPPINGS is not set
++# CONFIG_MTD_TS5500 is not set
++# CONFIG_MTD_INTEL_VR_NOR is not set
++# CONFIG_MTD_PLATRAM is not set
++
++# Self-contained MTD device drivers
++# CONFIG_MTD_PMC551 is not set
++# CONFIG_MTD_SLRAM is not set
++# CONFIG_MTD_PHRAM is not set
++# CONFIG_MTD_MTDRAM is not set
++# CONFIG_MTD_BLOCK2MTD is not set
++
++#
++# Disk-On-Chip Device Drivers
++#
++# CONFIG_MTD_DOCG3 is not set
++# CONFIG_MTD_NAND is not set
++# CONFIG_MTD_ONENAND is not set
++# CONFIG_MTD_NAND_VERIFY_WRITE is not set
++# CONFIG_MTD_NAND_ECC_BCH is not set
++# CONFIG_MTD_NAND_MUSEUM_IDS is not set
++# CONFIG_MTD_NAND_DISKONCHIP is not set
++# CONFIG_MTD_LPDDR is not set
++CONFIG_MTD_UBI=m
++CONFIG_MTD_UBI_WL_THRESHOLD=4096
++CONFIG_MTD_UBI_BEB_LIMIT=20
++# CONFIG_MTD_UBI_FASTMAP is not set
++# CONFIG_MTD_UBI_GLUEBI is not set
++
++#
++# Parallel port support
++#
++CONFIG_PARPORT=m
++CONFIG_PARPORT_PC=m
++CONFIG_PARPORT_SERIAL=m
++# CONFIG_PARPORT_PC_FIFO is not set
++# CONFIG_PARPORT_PC_SUPERIO is not set
++CONFIG_PARPORT_PC_PCMCIA=m
++CONFIG_PARPORT_1284=y
++# CONFIG_PARPORT_AX88796 is not set
++
++CONFIG_ACPI_PCI_SLOT=y
++CONFIG_HOTPLUG_PCI_ACPI=y
++CONFIG_HOTPLUG_PCI_ACPI_IBM=m
++
++#
++# Block devices
++#
++CONFIG_BLK_DEV=y
++CONFIG_BLK_DEV_NULL_BLK=m
++CONFIG_BLK_DEV_FD=m
++# CONFIG_PARIDE is not set
++CONFIG_ZRAM=m
++# CONFIG_ZRAM_DEBUG is not set
++CONFIG_ENHANCEIO=m
++
++CONFIG_BLK_CPQ_DA=m
++CONFIG_BLK_CPQ_CISS_DA=m
++CONFIG_CISS_SCSI_TAPE=y
++CONFIG_BLK_DEV_DAC960=m
++# CONFIG_BLK_DEV_PCIESSD_MTIP32XX is not set
++CONFIG_BLK_DEV_DRBD=m
++CONFIG_BLK_DEV_UMEM=m
++CONFIG_BLK_DEV_LOOP_MIN_COUNT=0
++# Fedora 18 util-linux is the last release that supports cryptoloop devices
++# CONFIG_BLK_DEV_CRYPTOLOOP is not set
++CONFIG_BLK_DEV_NBD=m
++CONFIG_BLK_DEV_NVME=m
++CONFIG_BLK_DEV_SKD=m # 64-bit only but easier to put here
++CONFIG_BLK_DEV_OSD=m
++CONFIG_BLK_DEV_RAM_COUNT=16
++CONFIG_BLK_DEV_IO_TRACE=y
++
++CONFIG_BLK_DEV_BSGLIB=y
++CONFIG_BLK_DEV_INTEGRITY=y
++CONFIG_BLK_DEV_THROTTLING=y
++# CONFIG_BLK_CMDLINE_PARSER is not set
++
++
++#
++# ATA/ATAPI/MFM/RLL support
++#
++# CONFIG_IDE is not set
++
++# CONFIG_BLK_DEV_HD is not set
++# CONFIG_BLK_DEV_RSXX is not set
++
++CONFIG_SCSI_VIRTIO=m
++CONFIG_VIRTIO_BLK=m
++CONFIG_VIRTIO_PCI=m
++CONFIG_VIRTIO_BALLOON=m
++CONFIG_VIRTIO_MMIO=m
++# CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES is not set
++CONFIG_VIRTIO_NET=m
++CONFIG_HW_RANDOM_VIRTIO=m
++CONFIG_VIRTIO_CONSOLE=m
++CONFIG_VHOST_NET=m
++CONFIG_TCM_VHOST=m
++CONFIG_VHOST_SCSI=m
++
++#
++# SCSI device support
++#
++CONFIG_SCSI=y
++
++CONFIG_SCSI_ENCLOSURE=m
++CONFIG_SCSI_SRP=m
++CONFIG_SCSI_SRP_ATTRS=m
++CONFIG_SCSI_TGT=m
++CONFIG_SCSI_ISCI=m
++CONFIG_SCSI_CHELSIO_FCOE=m
++
++CONFIG_SCSI_DH=y
++CONFIG_SCSI_DH_RDAC=m
++CONFIG_SCSI_DH_HP_SW=m
++CONFIG_SCSI_DH_EMC=m
++CONFIG_SCSI_DH_ALUA=m
++
++#
++# SCSI support type (disk, tape, CD-ROM)
++#
++CONFIG_CHR_DEV_ST=m
++CONFIG_CHR_DEV_OSST=m
++CONFIG_BLK_DEV_SR=y
++CONFIG_BLK_DEV_SR_VENDOR=y
++CONFIG_CHR_DEV_SG=y
++CONFIG_CHR_DEV_SCH=m
++
++#
++# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
++#
++CONFIG_SCSI_SPI_ATTRS=m
++CONFIG_SCSI_FC_ATTRS=m
++CONFIG_SCSI_FC_TGT_ATTRS=y
++CONFIG_SCSI_ISCSI_ATTRS=m
++CONFIG_SCSI_SAS_ATTRS=m
++CONFIG_SCSI_SRP_TGT_ATTRS=y
++CONFIG_SCSI_SAS_LIBSAS=m
++CONFIG_SCSI_SAS_ATA=y
++CONFIG_SCSI_SAS_HOST_SMP=y
++CONFIG_RAID_ATTRS=m
++
++CONFIG_ISCSI_TCP=m
++CONFIG_ISCSI_BOOT_SYSFS=m
++
++#
++# SCSI low-level drivers
++#
++CONFIG_BLK_DEV_3W_XXXX_RAID=m
++CONFIG_SCSI_3W_9XXX=m
++CONFIG_SCSI_ACARD=m
++CONFIG_SCSI_AACRAID=m
++CONFIG_SCSI_AIC7XXX=m
++# http://lists.fedoraproject.org/pipermail/kernel/2013-February/004102.html
++# CONFIG_SCSI_AIC7XXX_OLD is not set
++CONFIG_AIC7XXX_CMDS_PER_DEVICE=4
++CONFIG_AIC7XXX_RESET_DELAY_MS=15000
++# CONFIG_AIC7XXX_BUILD_FIRMWARE is not set
++# CONFIG_AIC7XXX_DEBUG_ENABLE is not set
++CONFIG_AIC7XXX_DEBUG_MASK=0
++# CONFIG_AIC7XXX_REG_PRETTY_PRINT is not set
++CONFIG_SCSI_AIC79XX=m
++CONFIG_AIC79XX_CMDS_PER_DEVICE=4
++CONFIG_AIC79XX_RESET_DELAY_MS=15000
++# CONFIG_AIC79XX_BUILD_FIRMWARE is not set
++# CONFIG_AIC79XX_DEBUG_ENABLE is not set
++CONFIG_AIC79XX_DEBUG_MASK=0
++# CONFIG_AIC79XX_REG_PRETTY_PRINT is not set
++CONFIG_SCSI_AIC94XX=m
++# CONFIG_AIC94XX_DEBUG is not set
++# CONFIG_SCSI_ADVANSYS is not set
++CONFIG_SCSI_BFA_FC=m
++CONFIG_MEGARAID_NEWGEN=y
++CONFIG_MEGARAID_MM=m
++CONFIG_MEGARAID_MAILBOX=m
++CONFIG_MEGARAID_LEGACY=m
++CONFIG_MEGARAID_SAS=m
++CONFIG_SCSI_ESAS2R=m
++CONFIG_SCSI_MVSAS=m
++# CONFIG_SCSI_MVSAS_DEBUG is not set
++CONFIG_SCSI_MVSAS_TASKLET=y
++CONFIG_SCSI_MPT2SAS=m
++CONFIG_SCSI_MPT2SAS_MAX_SGE=128
++CONFIG_SCSI_MPT2SAS_LOGGING=y
++CONFIG_SCSI_MPT3SAS=m
++CONFIG_SCSI_MPT3SAS_MAX_SGE=128
++CONFIG_SCSI_MPT3SAS_LOGGING=y
++
++CONFIG_SCSI_UFSHCD=m
++CONFIG_SCSI_UFSHCD_PCI=m
++# CONFIG_SCSI_UFSHCD_PLATFORM is not set
++
++CONFIG_SCSI_MVUMI=m
++
++CONFIG_SCSI_OSD_INITIATOR=m
++CONFIG_SCSI_OSD_ULD=m
++CONFIG_SCSI_OSD_DPRINT_SENSE=1
++# CONFIG_SCSI_OSD_DEBUG is not set
++
++CONFIG_SCSI_BNX2_ISCSI=m
++CONFIG_SCSI_BNX2X_FCOE=m
++CONFIG_BE2ISCSI=m
++CONFIG_SCSI_PMCRAID=m
++
++CONFIG_SCSI_HPSA=m
++CONFIG_SCSI_3W_SAS=m
++CONFIG_SCSI_PM8001=m
++CONFIG_VMWARE_PVSCSI=m
++CONFIG_VMWARE_BALLOON=m
++
++CONFIG_SCSI_ARCMSR=m
++CONFIG_SCSI_BUSLOGIC=m
++CONFIG_SCSI_INITIO=m
++CONFIG_SCSI_FLASHPOINT=y
++CONFIG_SCSI_DMX3191D=m
++# CONFIG_SCSI_EATA is not set
++# CONFIG_SCSI_EATA_PIO is not set
++# CONFIG_SCSI_FUTURE_DOMAIN is not set
++CONFIG_SCSI_GDTH=m
++CONFIG_SCSI_HPTIOP=m
++CONFIG_SCSI_IPS=m
++CONFIG_SCSI_INIA100=m
++# CONFIG_SCSI_PPA is not set
++# CONFIG_SCSI_IMM is not set
++# CONFIG_SCSI_IZIP_EPP16 is not set
++# CONFIG_SCSI_IZIP_SLOW_CTR is not set
++CONFIG_SCSI_STEX=m
++CONFIG_SCSI_SYM53C8XX_2=m
++CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=1
++CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS=16
++CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64
++CONFIG_SCSI_SYM53C8XX_MMIO=y
++CONFIG_SCSI_QLOGIC_1280=m
++CONFIG_SCSI_DC395x=m
++# CONFIG_SCSI_NSP32 is not set
++CONFIG_SCSI_DEBUG=m
++CONFIG_SCSI_DC390T=m
++CONFIG_SCSI_QLA_FC=m
++CONFIG_TCM_QLA2XXX=m
++CONFIG_SCSI_QLA_ISCSI=m
++CONFIG_SCSI_IPR=m
++CONFIG_SCSI_IPR_TRACE=y
++CONFIG_SCSI_IPR_DUMP=y
++# CONFIG_SCSI_DPT_I2O is not set
++CONFIG_SCSI_LPFC=m
++# CONFIG_SCSI_LPFC_DEBUG_FS is not set
++
++# PCMCIA SCSI adapter support
++# CONFIG_SCSI_LOWLEVEL_PCMCIA is not set
++
++CONFIG_ATA_BMDMA=y
++CONFIG_ATA_VERBOSE_ERROR=y
++CONFIG_ATA_SFF=y
++CONFIG_ATA_PIIX=y
++# CONFIG_SATA_HIGHBANK is not set
++CONFIG_ATA_ACPI=y
++CONFIG_BLK_DEV_SX8=m
++CONFIG_PDC_ADMA=m
++CONFIG_SATA_AHCI=y
++CONFIG_SATA_INIC162X=m
++CONFIG_SATA_MV=m
++CONFIG_SATA_NV=m
++CONFIG_SATA_PMP=y
++CONFIG_SATA_PROMISE=m
++CONFIG_SATA_QSTOR=m
++CONFIG_SATA_RCAR=m
++CONFIG_SATA_SIL=m
++CONFIG_SATA_SIL24=m
++CONFIG_SATA_SIS=m
++CONFIG_SATA_SVW=m
++CONFIG_SATA_SX4=m
++CONFIG_SATA_ULI=m
++CONFIG_SATA_VIA=m
++CONFIG_SATA_VITESSE=m
++# CONFIG_SATA_ZPODD is not set
++CONFIG_SATA_ACARD_AHCI=m
++
++# CONFIG_PATA_LEGACY is not set
++CONFIG_PATA_ACPI=m
++CONFIG_PATA_ALI=m
++CONFIG_PATA_AMD=m
++CONFIG_PATA_ARASAN_CF=m
++CONFIG_PATA_ARTOP=m
++CONFIG_PATA_ATIIXP=m
++CONFIG_PATA_CMD640_PCI=m
++CONFIG_PATA_CMD64X=m
++CONFIG_PATA_CS5520=m
++CONFIG_PATA_CS5530=m
++CONFIG_PATA_CS5535=m
++CONFIG_PATA_CS5536=m
++CONFIG_PATA_CYPRESS=m
++CONFIG_PATA_EFAR=m
++CONFIG_ATA_GENERIC=m
++CONFIG_PATA_HPT366=m
++CONFIG_PATA_HPT37X=m
++CONFIG_PATA_HPT3X2N=m
++CONFIG_PATA_HPT3X3=m
++# CONFIG_PATA_HPT3X3_DMA is not set
++CONFIG_PATA_IT821X=m
++CONFIG_PATA_IT8213=m
++CONFIG_PATA_JMICRON=m
++CONFIG_PATA_NINJA32=m
++CONFIG_PATA_MARVELL=m
++CONFIG_PATA_MPIIX=m
++CONFIG_PATA_NETCELL=m
++CONFIG_PATA_NS87410=m
++CONFIG_PATA_NS87415=m
++CONFIG_PATA_OLDPIIX=m
++CONFIG_PATA_OPTI=m
++CONFIG_PATA_OPTIDMA=m
++CONFIG_PATA_PCMCIA=m
++CONFIG_PATA_PDC_OLD=m
++# CONFIG_PATA_RADISYS is not set
++CONFIG_PATA_RDC=m
++# CONFIG_PATA_RZ1000 is not set
++# CONFIG_PATA_SC1200 is not set
++CONFIG_PATA_SERVERWORKS=m
++CONFIG_PATA_PDC2027X=m
++CONFIG_PATA_SCH=m
++CONFIG_PATA_SIL680=m
++CONFIG_PATA_SIS=m
++CONFIG_PATA_TOSHIBA=m
++CONFIG_PATA_TRIFLEX=m
++CONFIG_PATA_VIA=m
++CONFIG_PATA_WINBOND=m
++CONFIG_PATA_ATP867X=m
++
++
++#
++# Multi-device support (RAID and LVM)
++#
++CONFIG_MD=y
++CONFIG_BLK_DEV_MD=y
++CONFIG_MD_AUTODETECT=y
++CONFIG_MD_FAULTY=m
++CONFIG_MD_LINEAR=m
++CONFIG_MD_MULTIPATH=m
++CONFIG_MD_RAID0=m
++CONFIG_MD_RAID1=m
++CONFIG_MD_RAID10=m
++CONFIG_MD_RAID456=m
++
++CONFIG_BCACHE=m
++# CONFIG_BCACHE_DEBUG is not set
++# CONFIG_BCACHE_EDEBUG is not set
++# CONFIG_BCACHE_CLOSURES_DEBUG is not set
++
++# CONFIG_MULTICORE_RAID456 is not set
++CONFIG_ASYNC_RAID6_TEST=m
++CONFIG_BLK_DEV_DM=y
++CONFIG_DM_CRYPT=m
++CONFIG_DM_DEBUG=y
++CONFIG_DM_DELAY=m
++CONFIG_DM_MIRROR=y
++CONFIG_DM_MULTIPATH=m
++CONFIG_DM_SNAPSHOT=y
++CONFIG_DM_THIN_PROVISIONING=m
++CONFIG_DM_CACHE=m
++CONFIG_DM_CACHE_MQ=m
++CONFIG_DM_CACHE_CLEANER=m
++# CONFIG_DM_DEBUG_BLOCK_STACK_TRACING is not set
++# CONFIG_DM_DEBUG_SPACE_MAPS is not set
++CONFIG_DM_UEVENT=y
++CONFIG_DM_ZERO=y
++CONFIG_DM_LOG_USERSPACE=m
++CONFIG_DM_MULTIPATH_QL=m
++CONFIG_DM_MULTIPATH_ST=m
++CONFIG_DM_RAID=m
++CONFIG_DM_FLAKEY=m
++CONFIG_DM_VERITY=m
++CONFIG_DM_SWITCH=m
++
++#
++# Fusion MPT device support
++#
++CONFIG_FUSION=y
++CONFIG_FUSION_SPI=m
++CONFIG_FUSION_FC=m
++CONFIG_FUSION_MAX_SGE=40
++CONFIG_FUSION_CTL=m
++CONFIG_FUSION_LAN=m
++CONFIG_FUSION_SAS=m
++CONFIG_FUSION_LOGGING=y
++
++#
++# IEEE 1394 (FireWire) support (JUJU alternative stack)
++#
++CONFIG_FIREWIRE=m
++CONFIG_FIREWIRE_OHCI=m
++CONFIG_FIREWIRE_SBP2=m
++CONFIG_FIREWIRE_NET=m
++CONFIG_FIREWIRE_OHCI_DEBUG=y
++CONFIG_FIREWIRE_NOSY=m
++# CONFIG_FIREWIRE_SERIAL is not set
++# CONFIG_FIREWIRE_OHCI_REMOTE_DMA is not set
++
++#
++# IEEE 1394 (FireWire) support
++#
++
++#
++# I2O device support
++#
++# CONFIG_I2O is not set
++# CONFIG_I2O_LCT_NOTIFY_ON_CHANGES is not set
++
++#
++# Virtualization support drivers
++#
++# CONFIG_VIRT_DRIVERS is not set
++
++# Networking support
++#
++
++CONFIG_NET_DMA=y
++
++CONFIG_NETLINK_MMAP=y
++CONFIG_NETLINK_DIAG=m
++
++CONFIG_TCP_CONG_ADVANCED=y
++CONFIG_TCP_CONG_BIC=m
++CONFIG_TCP_CONG_CUBIC=y
++CONFIG_TCP_CONG_HTCP=m
++CONFIG_TCP_CONG_HSTCP=m
++CONFIG_TCP_CONG_HYBLA=m
++CONFIG_TCP_CONG_ILLINOIS=m
++CONFIG_TCP_CONG_LP=m
++CONFIG_TCP_CONG_SCALABLE=m
++CONFIG_TCP_CONG_VEGAS=m
++CONFIG_TCP_CONG_VENO=m
++CONFIG_TCP_CONG_WESTWOOD=m
++CONFIG_TCP_CONG_YEAH=m
++
++CONFIG_TCP_MD5SIG=y
++
++#
++# Networking options
++#
++CONFIG_PACKET_DIAG=m
++CONFIG_UNIX_DIAG=m
++CONFIG_NET_KEY=m
++CONFIG_NET_KEY_MIGRATE=y
++CONFIG_INET_TUNNEL=m
++CONFIG_INET_DIAG=m
++CONFIG_INET_UDP_DIAG=m
++CONFIG_IP_MULTICAST=y
++CONFIG_IP_ADVANCED_ROUTER=y
++CONFIG_IP_FIB_TRIE_STATS=y
++CONFIG_IP_MULTIPLE_TABLES=y
++CONFIG_IP_ROUTE_MULTIPATH=y
++CONFIG_IP_ROUTE_VERBOSE=y
++CONFIG_IP_NF_SECURITY=m
++CONFIG_NET_IPIP=m
++CONFIG_NET_IPGRE_DEMUX=m
++CONFIG_NET_IPGRE=m
++CONFIG_NET_IPGRE_BROADCAST=y
++CONFIG_IP_MROUTE=y
++CONFIG_IP_MROUTE_MULTIPLE_TABLES=y
++CONFIG_IP_PIMSM_V1=y
++CONFIG_IP_PIMSM_V2=y
++CONFIG_ARPD=y
++CONFIG_SYN_COOKIES=y
++CONFIG_NET_IPVTI=m
++CONFIG_INET_AH=m
++CONFIG_INET_ESP=m
++CONFIG_INET_IPCOMP=m
++CONFIG_NETCONSOLE=m
++CONFIG_NETCONSOLE_DYNAMIC=y
++CONFIG_NETPOLL_TRAP=y
++CONFIG_NET_POLL_CONTROLLER=y
++
++#
++# IP: Virtual Server Configuration
++#
++CONFIG_IP_VS=m
++# CONFIG_IP_VS_DEBUG is not set
++CONFIG_IP_VS_TAB_BITS=12
++CONFIG_IP_VS_PROTO_TCP=y
++CONFIG_IP_VS_PROTO_UDP=y
++CONFIG_IP_VS_PROTO_ESP=y
++CONFIG_IP_VS_PROTO_AH=y
++CONFIG_IP_VS_PROTO_SCTP=y
++CONFIG_IP_VS_IPV6=y
++CONFIG_IP_VS_RR=m
++CONFIG_IP_VS_WRR=m
++CONFIG_IP_VS_LC=m
++CONFIG_IP_VS_WLC=m
++CONFIG_IP_VS_LBLC=m
++CONFIG_IP_VS_LBLCR=m
++CONFIG_IP_VS_DH=m
++CONFIG_IP_VS_SH=m
++CONFIG_IP_VS_SED=m
++CONFIG_IP_VS_NQ=m
++
++#
++# IPVS SH scheduler
++#
++CONFIG_IP_VS_SH_TAB_BITS=8
++
++CONFIG_IP_VS_FTP=m
++CONFIG_IP_VS_PE_SIP=m
++
++CONFIG_IPV6_PRIVACY=y
++CONFIG_IPV6_ROUTER_PREF=y
++CONFIG_IPV6_ROUTE_INFO=y
++CONFIG_IPV6_OPTIMISTIC_DAD=y
++CONFIG_INET6_AH=m
++CONFIG_INET6_ESP=m
++CONFIG_INET6_IPCOMP=m
++CONFIG_IPV6_MIP6=y
++CONFIG_IPV6_VTI=m
++CONFIG_IPV6_SIT=m
++CONFIG_IPV6_SIT_6RD=y
++CONFIG_IPV6_TUNNEL=m
++# CONFIG_IPV6_GRE is not set
++CONFIG_IPV6_SUBTREES=y
++CONFIG_IPV6_MULTIPLE_TABLES=y
++CONFIG_IPV6_MROUTE=y
++CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y
++CONFIG_IPV6_PIMSM_V2=y
++
++CONFIG_RDS=m
++# CONFIG_RDS_DEBUG is not set
++CONFIG_RDS_RDMA=m
++CONFIG_RDS_TCP=m
++
++CONFIG_NET_9P=m
++CONFIG_NET_9P_VIRTIO=m
++# CONFIG_NET_9P_DEBUG is not set
++CONFIG_NET_9P_RDMA=m
++
++# CONFIG_DECNET is not set
++CONFIG_BRIDGE=m
++CONFIG_BRIDGE_IGMP_SNOOPING=y
++CONFIG_BRIDGE_VLAN_FILTERING=y
++
++# PHY timestamping adds overhead
++CONFIG_NETWORK_PHY_TIMESTAMPING=y
++
++CONFIG_NETFILTER_ADVANCED=y
++CONFIG_NF_CONNTRACK=m
++CONFIG_NETFILTER_NETLINK=m
++CONFIG_NETFILTER_NETLINK_ACCT=m
++CONFIG_NETFILTER_NETLINK_QUEUE=m
++CONFIG_NETFILTER_NETLINK_QUEUE_CT=y
++CONFIG_NETFILTER_NETLINK_LOG=m
++CONFIG_NETFILTER_TPROXY=m
++CONFIG_NETFILTER_XTABLES=y
++CONFIG_NETFILTER_XT_SET=m
++CONFIG_NETFILTER_XT_MARK=m
++CONFIG_NETFILTER_XT_CONNMARK=m
++
++CONFIG_NETFILTER_XT_TARGET_AUDIT=m
++CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
++CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
++CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
++CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m
++CONFIG_NETFILTER_XT_TARGET_CT=m
++CONFIG_NETFILTER_XT_TARGET_DSCP=m
++CONFIG_NETFILTER_XT_TARGET_HMARK=m
++CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m
++CONFIG_NETFILTER_XT_TARGET_LED=m
++CONFIG_NETFILTER_XT_TARGET_LOG=m
++CONFIG_NETFILTER_XT_TARGET_MARK=m
++CONFIG_NETFILTER_XT_TARGET_NFLOG=m
++CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
++CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
++CONFIG_NETFILTER_XT_TARGET_RATEEST=m
++CONFIG_NETFILTER_XT_TARGET_SECMARK=m
++CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
++CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
++CONFIG_NETFILTER_XT_TARGET_TRACE=m
++CONFIG_NETFILTER_XT_TARGET_TEE=m
++CONFIG_NETFILTER_XT_TARGET_TPROXY=m
++
++CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m
++CONFIG_NETFILTER_XT_MATCH_BPF=m
++CONFIG_NETFILTER_XT_MATCH_CGROUP=m
++CONFIG_NETFILTER_XT_MATCH_CLUSTER=m
++CONFIG_NETFILTER_XT_MATCH_COMMENT=m
++CONFIG_NETFILTER_XT_MATCH_CPU=m
++CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
++CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m
++CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
++CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
++CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
++CONFIG_NETFILTER_XT_MATCH_DCCP=m
++CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m
++CONFIG_NETFILTER_XT_MATCH_DSCP=m
++CONFIG_NETFILTER_XT_MATCH_ECN=m
++CONFIG_NETFILTER_XT_MATCH_ESP=m
++CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
++CONFIG_NETFILTER_XT_MATCH_HELPER=m
++CONFIG_NETFILTER_XT_MATCH_HL=m
++CONFIG_NETFILTER_XT_MATCH_IPCOMP=m
++CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
++CONFIG_NETFILTER_XT_MATCH_IPVS=m
++CONFIG_NETFILTER_XT_MATCH_L2TP=m
++CONFIG_NETFILTER_XT_MATCH_LENGTH=m
++CONFIG_NETFILTER_XT_MATCH_LIMIT=m
++CONFIG_NETFILTER_XT_MATCH_MAC=m
++CONFIG_NETFILTER_XT_MATCH_MARK=m
++CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
++CONFIG_NETFILTER_XT_MATCH_NFACCT=m
++CONFIG_NETFILTER_XT_MATCH_OSF=m
++CONFIG_NETFILTER_XT_MATCH_OWNER=m
++CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
++CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
++CONFIG_NETFILTER_XT_MATCH_POLICY=m
++CONFIG_NETFILTER_XT_MATCH_QUOTA=m
++CONFIG_NETFILTER_XT_MATCH_RATEEST=m
++CONFIG_NETFILTER_XT_MATCH_REALM=m
++CONFIG_NETFILTER_XT_MATCH_RECENT=m
++CONFIG_NETFILTER_XT_MATCH_SCTP=m
++CONFIG_NETFILTER_XT_MATCH_SOCKET=m
++CONFIG_NETFILTER_XT_MATCH_STATE=y
++CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
++CONFIG_NETFILTER_XT_MATCH_STRING=m
++CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
++CONFIG_NETFILTER_XT_MATCH_TIME=m
++CONFIG_NETFILTER_XT_MATCH_U32=m
++
++# CONFIG_NETFILTER_DEBUG is not set
++CONFIG_BRIDGE_NETFILTER=y
++
++#
++# IP: Netfilter Configuration
++#
++
++CONFIG_NF_CONNTRACK_MARK=y
++CONFIG_NF_CONNTRACK_SECMARK=y
++CONFIG_NF_CONNTRACK_EVENTS=y
++CONFIG_NF_CONNTRACK_ZONES=y
++CONFIG_NF_CONNTRACK_PROCFS=y # check if contrack(8) in f17 supports netlink
++# CONFIG_NF_CONNTRACK_PROC_COMPAT is not set
++CONFIG_NF_CONNTRACK_AMANDA=m
++CONFIG_NF_CONNTRACK_FTP=m
++CONFIG_NF_CONNTRACK_H323=m
++CONFIG_NF_CONNTRACK_IRC=m
++CONFIG_NF_CONNTRACK_NETBIOS_NS=m
++CONFIG_NF_CONNTRACK_PPTP=m
++CONFIG_NF_CONNTRACK_SANE=m
++CONFIG_NF_CONNTRACK_SIP=m
++CONFIG_NF_CONNTRACK_TFTP=m
++CONFIG_NF_CONNTRACK_IPV4=y
++CONFIG_NF_CONNTRACK_IPV6=y
++# CONFIG_NF_CONNTRACK_TIMEOUT is not set
++CONFIG_NF_CONNTRACK_TIMESTAMP=y
++CONFIG_NF_CONNTRACK_SNMP=m
++CONFIG_NF_NAT=m
++CONFIG_NF_NAT_SNMP_BASIC=m
++CONFIG_NF_CT_PROTO_DCCP=m
++CONFIG_NF_CT_PROTO_SCTP=m
++CONFIG_NF_CT_NETLINK=m
++# CONFIG_NF_CT_NETLINK_TIMEOUT is not set
++CONFIG_NF_CT_NETLINK_HELPER=m
++CONFIG_NF_CT_PROTO_UDPLITE=m
++
++CONFIG_IP_NF_MATCH_AH=m
++CONFIG_IP_NF_MATCH_ECN=m
++CONFIG_IP_NF_MATCH_RPFILTER=m
++CONFIG_IP_NF_MATCH_TTL=m
++CONFIG_IP_NF_TARGET_CLUSTERIP=m
++CONFIG_IP_NF_TARGET_REDIRECT=m
++CONFIG_IP_NF_TARGET_NETMAP=m
++CONFIG_IP_NF_TARGET_ECN=m
++CONFIG_IP_NF_TARGET_LOG=m
++CONFIG_IP_NF_TARGET_ULOG=m
++CONFIG_IP_NF_TARGET_REJECT=y
++CONFIG_IP_NF_TARGET_SYNPROXY=m
++CONFIG_IP_NF_TARGET_TTL=m
++CONFIG_NF_NAT_IPV4=m
++CONFIG_IP_NF_TARGET_MASQUERADE=m
++CONFIG_IP_NF_MANGLE=m
++CONFIG_IP_NF_ARPTABLES=m
++CONFIG_IP_NF_ARPFILTER=m
++CONFIG_IP_NF_ARP_MANGLE=m
++CONFIG_IP_NF_QUEUE=m
++CONFIG_IP_NF_RAW=m
++
++CONFIG_IP_NF_IPTABLES=y
++CONFIG_IP_NF_FILTER=y
++
++#
++# IPv6: Netfilter Configuration
++#
++CONFIG_IP6_NF_FILTER=m
++CONFIG_IP6_NF_IPTABLES=m
++CONFIG_IP6_NF_MANGLE=m
++CONFIG_IP6_NF_MATCH_AH=m
++CONFIG_IP6_NF_MATCH_EUI64=m
++CONFIG_IP6_NF_MATCH_FRAG=m
++CONFIG_IP6_NF_MATCH_HL=m
++CONFIG_IP6_NF_MATCH_IPV6HEADER=m
++CONFIG_IP6_NF_MATCH_MH=m
++CONFIG_IP6_NF_MATCH_RPFILTER=m
++CONFIG_IP6_NF_MATCH_OPTS=m
++CONFIG_IP6_NF_MATCH_RT=m
++CONFIG_IP6_NF_QUEUE=m
++CONFIG_IP6_NF_RAW=m
++CONFIG_IP6_NF_SECURITY=m
++CONFIG_IP6_NF_TARGET_LOG=m
++CONFIG_IP6_NF_TARGET_REJECT=m
++CONFIG_IP6_NF_TARGET_SYNPROXY=m
++CONFIG_IP6_NF_TARGET_HL=m
++CONFIG_NF_NAT_IPV6=m
++CONFIG_IP6_NF_TARGET_MASQUERADE=m
++# CONFIG_IP6_NF_TARGET_NPT is not set
++
++# nf_tables support
++CONFIG_NF_TABLES=m
++CONFIG_NF_TABLES_INET=m
++CONFIG_NFT_EXTHDR=m
++CONFIG_NFT_META=m
++CONFIG_NFT_CT=m
++CONFIG_NFT_RBTREE=m
++CONFIG_NFT_HASH=m
++CONFIG_NFT_COUNTER=m
++CONFIG_NFT_LOG=m
++CONFIG_NFT_LIMIT=m
++CONFIG_NFT_NAT=m
++CONFIG_NFT_QUEUE=m
++CONFIG_NFT_REJECT=m
++CONFIG_NFT_COMPAT=m
++
++CONFIG_NF_TABLES_IPV4=m
++CONFIG_NFT_REJECT_IPV4=m
++CONFIG_NFT_CHAIN_ROUTE_IPV4=m
++CONFIG_NFT_CHAIN_NAT_IPV4=m
++CONFIG_NF_TABLES_ARP=m
++
++CONFIG_NF_TABLES_IPV6=m
++CONFIG_NFT_CHAIN_ROUTE_IPV6=m
++CONFIG_NFT_CHAIN_NAT_IPV6=m
++
++CONFIG_NF_TABLES_BRIDGE=m
++#
++# Bridge: Netfilter Configuration
++#
++CONFIG_BRIDGE_NF_EBTABLES=m
++CONFIG_BRIDGE_EBT_802_3=m
++CONFIG_BRIDGE_EBT_AMONG=m
++CONFIG_BRIDGE_EBT_ARP=m
++CONFIG_BRIDGE_EBT_ARPREPLY=m
++CONFIG_BRIDGE_EBT_BROUTE=m
++CONFIG_BRIDGE_EBT_DNAT=m
++CONFIG_BRIDGE_EBT_IP=m
++CONFIG_BRIDGE_EBT_IP6=m
++CONFIG_BRIDGE_EBT_LIMIT=m
++CONFIG_BRIDGE_EBT_LOG=m
++CONFIG_BRIDGE_EBT_MARK=m
++CONFIG_BRIDGE_EBT_MARK_T=m
++CONFIG_BRIDGE_EBT_NFLOG=m
++CONFIG_BRIDGE_EBT_PKTTYPE=m
++CONFIG_BRIDGE_EBT_REDIRECT=m
++CONFIG_BRIDGE_EBT_SNAT=m
++CONFIG_BRIDGE_EBT_STP=m
++CONFIG_BRIDGE_EBT_T_FILTER=m
++CONFIG_BRIDGE_EBT_T_NAT=m
++CONFIG_BRIDGE_EBT_ULOG=m
++CONFIG_BRIDGE_EBT_VLAN=m
++CONFIG_XFRM=y
++CONFIG_XFRM_MIGRATE=y
++CONFIG_XFRM_SUB_POLICY=y
++CONFIG_XFRM_STATISTICS=y
++CONFIG_XFRM_USER=y
++CONFIG_INET6_XFRM_MODE_TRANSPORT=m
++CONFIG_INET6_XFRM_MODE_TUNNEL=m
++CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
++CONFIG_INET6_XFRM_MODE_BEET=m
++
++CONFIG_IP_SET=m
++CONFIG_IP_SET_MAX=256
++CONFIG_IP_SET_BITMAP_IP=m
++CONFIG_IP_SET_BITMAP_IPMAC=m
++CONFIG_IP_SET_BITMAP_PORT=m
++CONFIG_IP_SET_HASH_IP=m
++CONFIG_IP_SET_HASH_IPPORT=m
++CONFIG_IP_SET_HASH_IPPORTIP=m
++CONFIG_IP_SET_HASH_IPPORTNET=m
++CONFIG_IP_SET_HASH_NETPORTNET=m
++CONFIG_IP_SET_HASH_NET=m
++CONFIG_IP_SET_HASH_NETNET=m
++CONFIG_IP_SET_HASH_NETPORT=m
++CONFIG_IP_SET_HASH_NETIFACE=m
++CONFIG_IP_SET_LIST_SET=m
++
++#
++# SCTP Configuration (EXPERIMENTAL)
++#
++CONFIG_IP_SCTP=m
++CONFIG_NET_SCTPPROBE=m
++# CONFIG_SCTP_DBG_MSG is not set
++# CONFIG_SCTP_DBG_OBJCNT is not set
++CONFIG_SCTP_DEFAULT_COOKIE_HMAC_SHA1=y
++# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_MD5 is not set
++# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_NONE is not set
++CONFIG_SCTP_COOKIE_HMAC_MD5=y
++CONFIG_SCTP_COOKIE_HMAC_SHA1=y
++CONFIG_ATM=m
++CONFIG_VLAN_8021Q_GVRP=y
++CONFIG_VLAN_8021Q_MVRP=y
++CONFIG_LLC=m
++# CONFIG_LLC2 is not set
++CONFIG_IPX=m
++# CONFIG_IPX_INTERN is not set
++CONFIG_ATALK=m
++CONFIG_DEV_APPLETALK=m
++CONFIG_IPDDP=m
++CONFIG_IPDDP_ENCAP=y
++CONFIG_IPDDP_DECAP=y
++# CONFIG_X25 is not set
++# CONFIG_LAPB is not set
++# CONFIG_ECONET is not set
++CONFIG_WAN_ROUTER=m
++CONFIG_IP_DCCP=m
++CONFIG_IP_DCCP_CCID2=m
++# CONFIG_IP_DCCP_CCID2_DEBUG is not set
++CONFIG_IP_DCCP_CCID3=y
++# CONFIG_IP_DCCP_CCID3_DEBUG is not set
++# CONFIG_IP_DCCP_DEBUG is not set
++# CONFIG_NET_DCCPPROBE is not set
++
++#
++# TIPC Configuration (EXPERIMENTAL)
++#
++CONFIG_TIPC=m
++CONFIG_TIPC_PORTS=8192
++# CONFIG_TIPC_MEDIA_IB is not set
++# CONFIG_TIPC_ADVANCED is not set
++# CONFIG_TIPC_DEBUG is not set
++
++CONFIG_NETLABEL=y
++
++#
++# QoS and/or fair queueing
++#
++CONFIG_NET_SCHED=y
++CONFIG_NET_SCH_CBQ=m
++CONFIG_NET_SCH_DSMARK=m
++CONFIG_NET_SCH_DRR=m
++CONFIG_NET_SCH_GRED=m
++CONFIG_NET_SCH_HFSC=m
++CONFIG_NET_SCH_HTB=m
++CONFIG_NET_SCH_INGRESS=m
++CONFIG_NET_SCH_NETEM=m
++CONFIG_NET_SCH_PRIO=m
++CONFIG_NET_SCH_RED=m
++CONFIG_NET_SCH_SFQ=m
++CONFIG_NET_SCH_TBF=m
++CONFIG_NET_SCH_TEQL=m
++CONFIG_NET_SCH_SFB=m
++CONFIG_NET_SCH_MQPRIO=m
++CONFIG_NET_SCH_MULTIQ=m
++CONFIG_NET_SCH_CHOKE=m
++CONFIG_NET_SCH_QFQ=m
++CONFIG_NET_SCH_CODEL=m
++CONFIG_NET_SCH_FQ_CODEL=m
++CONFIG_NET_SCH_FQ=m
++CONFIG_NET_SCH_HHF=m
++CONFIG_NET_SCH_PIE=m
++CONFIG_NET_SCH_PLUG=m
++CONFIG_NET_CLS=y
++CONFIG_NET_CLS_ACT=y
++CONFIG_NET_CLS_BASIC=m
++CONFIG_NET_CLS_CGROUP=y
++CONFIG_NET_CLS_BPF=m
++CONFIG_NET_CLS_FLOW=m
++CONFIG_NET_CLS_FW=m
++CONFIG_NET_CLS_IND=y
++CONFIG_NET_CLS_ROUTE4=m
++CONFIG_NET_CLS_ROUTE=y
++CONFIG_NET_CLS_RSVP=m
++CONFIG_NET_CLS_RSVP6=m
++CONFIG_NET_CLS_TCINDEX=m
++CONFIG_NET_CLS_U32=m
++CONFIG_CLS_U32_MARK=y
++CONFIG_CLS_U32_PERF=y
++CONFIG_NET_EMATCH=y
++CONFIG_NET_EMATCH_CMP=m
++CONFIG_NET_EMATCH_META=m
++CONFIG_NET_EMATCH_NBYTE=m
++CONFIG_NET_EMATCH_STACK=32
++CONFIG_NET_EMATCH_TEXT=m
++CONFIG_NET_EMATCH_IPSET=m
++CONFIG_NET_EMATCH_U32=m
++
++CONFIG_NET_ACT_CSUM=m
++CONFIG_NET_ACT_GACT=m
++CONFIG_GACT_PROB=y
++CONFIG_NET_ACT_IPT=m
++CONFIG_NET_ACT_MIRRED=m
++CONFIG_NET_ACT_NAT=m
++CONFIG_NET_ACT_PEDIT=m
++CONFIG_NET_ACT_POLICE=m
++CONFIG_NET_ACT_SIMP=m
++CONFIG_NET_ACT_SKBEDIT=m
++
++CONFIG_DCB=y
++CONFIG_DNS_RESOLVER=m
++CONFIG_BATMAN_ADV=m
++CONFIG_BATMAN_ADV_BLA=y
++CONFIG_BATMAN_ADV_DAT=y
++CONFIG_BATMAN_ADV_NC=y
++
++# CONFIG_BATMAN_ADV_DEBUG is not set
++CONFIG_OPENVSWITCH=m
++CONFIG_OPENVSWITCH_GRE=y
++CONFIG_OPENVSWITCH_VXLAN=y
++CONFIG_VSOCKETS=m
++
++
++#
++# Network testing
++#
++CONFIG_NET_PKTGEN=m
++# CONFIG_NET_TCPPROBE is not set
++CONFIG_NET_DROP_MONITOR=y
++
++# disable later --kyle
++
++#
++# ARCnet devices
++#
++# CONFIG_ARCNET is not set
++CONFIG_IFB=m
++CONFIG_NET_TEAM=m
++CONFIG_NET_TEAM_MODE_ROUNDROBIN=m
++CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m
++CONFIG_NET_TEAM_MODE_LOADBALANCE=m
++CONFIG_NET_TEAM_MODE_BROADCAST=m
++CONFIG_NET_TEAM_MODE_RANDOM=m
++CONFIG_DUMMY=m
++CONFIG_BONDING=m
++CONFIG_MACVLAN=m
++CONFIG_MACVTAP=m
++CONFIG_VXLAN=m
++CONFIG_EQUALIZER=m
++CONFIG_TUN=m
++CONFIG_VETH=m
++CONFIG_NLMON=m
++
++#
++# ATM
++#
++CONFIG_ATM_DRIVERS=y
++# CONFIG_ATM_DUMMY is not set
++CONFIG_ATM_CLIP=m
++CONFIG_ATM_LANE=m
++CONFIG_ATM_BR2684=m
++CONFIG_NET_SCH_ATM=m
++CONFIG_ATM_TCP=m
++# CONFIG_ATM_LANAI is not set
++CONFIG_ATM_ENI=m
++CONFIG_ATM_FIRESTREAM=m
++# CONFIG_ATM_ZATM is not set
++# CONFIG_ATM_IDT77252 is not set
++# CONFIG_ATM_AMBASSADOR is not set
++# CONFIG_ATM_HORIZON is not set
++# CONFIG_ATM_FORE200E is not set
++# CONFIG_ATM_FORE200E_USE_TASKLET is not set
++CONFIG_ATM_FORE200E_TX_RETRY=16
++CONFIG_ATM_FORE200E_DEBUG=0
++
++CONFIG_ATM_HE=m
++CONFIG_PPTP=m
++CONFIG_PPPOATM=m
++CONFIG_PPPOL2TP=m
++CONFIG_ATM_NICSTAR=m
++# CONFIG_ATM_IA is not set
++# CONFIG_ATM_CLIP_NO_ICMP is not set
++# CONFIG_ATM_MPOA is not set
++# CONFIG_ATM_BR2684_IPFILTER is not set
++# CONFIG_ATM_ENI_DEBUG is not set
++# CONFIG_ATM_ENI_TUNE_BURST is not set
++# CONFIG_ATM_ZATM_DEBUG is not set
++# CONFIG_ATM_IDT77252_DEBUG is not set
++# CONFIG_ATM_IDT77252_RCV_ALL is not set
++# CONFIG_ATM_AMBASSADOR_DEBUG is not set
++# CONFIG_ATM_HORIZON_DEBUG is not set
++# CONFIG_ATM_HE_USE_SUNI is not set
++# CONFIG_ATM_NICSTAR_USE_SUNI is not set
++# CONFIG_ATM_NICSTAR_USE_IDT77105 is not set
++# CONFIG_ATM_IA_DEBUG is not set
++CONFIG_ATM_SOLOS=m
++
++CONFIG_L2TP=m
++CONFIG_L2TP_V3=y
++CONFIG_L2TP_IP=m
++CONFIG_L2TP_ETH=m
++
++# CONFIG_CAIF is not set
++
++CONFIG_RFKILL=m
++CONFIG_RFKILL_GPIO=m
++CONFIG_RFKILL_INPUT=y
++
++
++#
++# Ethernet (10 or 100Mbit)
++#
++
++CONFIG_NET_VENDOR_ADAPTEC=y
++CONFIG_ADAPTEC_STARFIRE=m
++
++CONFIG_NET_VENDOR_ALTEON=y
++CONFIG_ACENIC=m
++# CONFIG_ACENIC_OMIT_TIGON_I is not set
++
++CONFIG_NET_VENDOR_AMD=y
++CONFIG_PCNET32=m
++CONFIG_AMD8111_ETH=m
++CONFIG_PCMCIA_NMCLAN=m
++
++CONFIG_NET_VENDOR_ARC=y
++CONFIG_ARC_EMAC=m
++
++CONFIG_NET_VENDOR_ATHEROS=y
++CONFIG_ALX=m
++CONFIG_ATL2=m
++CONFIG_ATL1=m
++CONFIG_ATL1C=m
++CONFIG_ATL1E=m
++CONFIG_NET_CADENCE=y
++CONFIG_ARM_AT91_ETHER=m
++CONFIG_MACB=m
++
++CONFIG_NET_VENDOR_BROCADE=y
++CONFIG_BNA=m
++CONFIG_NET_CALXEDA_XGMAC=m
++
++CONFIG_NET_VENDOR_CHELSIO=y
++CONFIG_CHELSIO_T1=m
++CONFIG_CHELSIO_T1_1G=y
++CONFIG_CHELSIO_T3=m
++CONFIG_CHELSIO_T4=m
++CONFIG_CHELSIO_T4VF=m
++
++CONFIG_NET_VENDOR_CISCO=y
++CONFIG_ENIC=m
++
++CONFIG_NET_VENDOR_DEC=y
++#
++# Tulip family network device support
++#
++CONFIG_NET_TULIP=y
++CONFIG_DE2104X=m
++CONFIG_DE2104X_DSL=0
++CONFIG_TULIP=m
++# CONFIG_TULIP_NAPI is not set
++# CONFIG_TULIP_MWI is not set
++CONFIG_TULIP_MMIO=y
++# CONFIG_NI5010 is not set
++CONFIG_DE4X5=m
++CONFIG_WINBOND_840=m
++CONFIG_DM9102=m
++CONFIG_PCMCIA_XIRCOM=m
++CONFIG_ULI526X=m
++
++CONFIG_NET_VENDOR_DLINK=y
++CONFIG_DE600=m
++CONFIG_DE620=m
++CONFIG_DL2K=m
++CONFIG_SUNDANCE=m
++# CONFIG_SUNDANCE_MMIO is not set
++
++CONFIG_NET_VENDOR_EMULEX=y
++CONFIG_BE2NET=m
++
++CONFIG_NET_VENDOR_EXAR=y
++CONFIG_S2IO=m
++CONFIG_VXGE=m
++# CONFIG_VXGE_DEBUG_TRACE_ALL is not set
++
++# CONFIG_NET_VENDOR_FARADAY is not set
++# CONFIG_NET_VENDOR_FUJITSU is not set
++# CONFIG_NET_VENDOR_HP is not set
++CONFIG_NET_VENDOR_INTEL=y
++CONFIG_E100=m
++CONFIG_E1000=m
++CONFIG_E1000E=m
++CONFIG_IGB=m
++CONFIG_IGB_HWMON=y
++CONFIG_IGB_DCA=y
++CONFIG_IGB_PTP=y
++CONFIG_IGBVF=m
++CONFIG_IXGB=m
++CONFIG_IXGBEVF=m
++CONFIG_IXGBE=m
++CONFIG_IXGBE_DCA=y
++CONFIG_IXGBE_DCB=y
++CONFIG_IXGBE_HWMON=y
++CONFIG_IXGBE_PTP=y
++CONFIG_I40E=m
++# CONFIG_I40E_VXLAN is not set
++# CONFIG_I40E_DCB is not set
++# CONFIG_I40EVF is not set
++
++
++# CONFIG_NET_VENDOR_I825XX is not set
++CONFIG_NET_VENDOR_MARVELL=y
++CONFIG_MVMDIO=m
++CONFIG_SKGE=m
++# CONFIG_SKGE_DEBUG is not set
++CONFIG_SKGE_GENESIS=y
++CONFIG_SKY2=m
++# CONFIG_SKY2_DEBUG is not set
++
++CONFIG_NET_VENDOR_MICREL=y
++CONFIG_KSZ884X_PCI=m
++# CONFIG_KS8842 is not set
++# CONFIG_KS8851_MLL is not set
++
++CONFIG_NET_VENDOR_MYRI=y
++CONFIG_MYRI10GE=m
++CONFIG_MYRI10GE_DCA=y
++
++CONFIG_NATSEMI=m
++CONFIG_NS83820=m
++
++CONFIG_PCMCIA_AXNET=m
++CONFIG_NE2K_PCI=m
++CONFIG_NE3210=m
++CONFIG_PCMCIA_PCNET=m
++
++CONFIG_NET_VENDOR_NVIDIA=y
++CONFIG_FORCEDETH=m
++
++CONFIG_NET_VENDOR_OKI=y
++# CONFIG_PCH_GBE is not set
++# CONFIG_PCH_PTP is not set
++
++CONFIG_NET_PACKET_ENGINE=y
++CONFIG_HAMACHI=m
++CONFIG_YELLOWFIN=m
++
++CONFIG_NET_VENDOR_QLOGIC=y
++CONFIG_QLA3XXX=m
++CONFIG_QLCNIC=m
++CONFIG_QLCNIC_SRIOV=y
++CONFIG_QLCNIC_DCB=y
++CONFIG_QLGE=m
++CONFIG_NETXEN_NIC=m
++
++CONFIG_NET_VENDOR_REALTEK=y
++CONFIG_ATP=m
++CONFIG_8139CP=m
++CONFIG_8139TOO=m
++# CONFIG_8139TOO_PIO is not set
++# CONFIG_8139TOO_TUNE_TWISTER is not set
++CONFIG_8139TOO_8129=y
++# CONFIG_8139_OLD_RX_RESET is not set
++CONFIG_R8169=m
++
++
++CONFIG_NET_VENDOR_RDC=y
++CONFIG_R6040=m
++
++
++CONFIG_NET_VENDOR_SILAN=y
++CONFIG_SC92031=m
++
++CONFIG_NET_VENDOR_SIS=y
++CONFIG_SIS900=m
++CONFIG_SIS190=m
++
++CONFIG_PCMCIA_SMC91C92=m
++CONFIG_EPIC100=m
++CONFIG_SMSC9420=m
++
++# CONFIG_STMMAC_PLATFORM is not set
++# CONFIG_STMMAC_PCI is not set
++# CONFIG_STMMAC_DA is not set
++# CONFIG_STMMAC_DUAL_MAC is not set
++# CONFIG_STMMAC_TIMER is not set
++# CONFIG_STMMAC_DEBUG_FS is not set
++
++CONFIG_NET_VENDOR_SUN=y
++CONFIG_HAPPYMEAL=m
++CONFIG_SUNGEM=m
++CONFIG_CASSINI=m
++CONFIG_NIU=m
++
++CONFIG_NET_VENDOR_TEHUTI=y
++CONFIG_TEHUTI=m
++
++CONFIG_NET_VENDOR_TI=y
++CONFIG_TLAN=m
++
++CONFIG_VIA_RHINE=m
++CONFIG_VIA_RHINE_MMIO=y
++
++CONFIG_WIZNET_W5100=m
++CONFIG_WIZNET_W5300=m
++CONFIG_NET_VENDOR_XIRCOM=y
++CONFIG_PCMCIA_XIRC2PS=m
++
++CONFIG_AMD_PHY=m
++CONFIG_BROADCOM_PHY=m
++CONFIG_BCM87XX_PHY=m
++CONFIG_CICADA_PHY=m
++CONFIG_DAVICOM_PHY=m
++CONFIG_DP83640_PHY=m
++CONFIG_FIXED_PHY=y
++CONFIG_MDIO_BITBANG=m
++CONFIG_NATIONAL_PHY=m
++CONFIG_ICPLUS_PHY=m
++CONFIG_BCM63XX_PHY=m
++CONFIG_LSI_ET1011C_PHY=m
++CONFIG_LXT_PHY=m
++CONFIG_MARVELL_PHY=m
++CONFIG_QSEMI_PHY=m
++CONFIG_REALTEK_PHY=m
++CONFIG_SMSC_PHY=m
++CONFIG_STE10XP=m
++CONFIG_VITESSE_PHY=m
++CONFIG_MICREL_PHY=m
++
++CONFIG_MII=m
++CONFIG_NET_CORE=y
++CONFIG_NET_VENDOR_3COM=y
++CONFIG_VORTEX=m
++CONFIG_TYPHOON=m
++CONFIG_DNET=m
++
++
++CONFIG_LNE390=m
++CONFIG_ES3210=m
++CONFIG_NET_PCI=y
++CONFIG_B44=m
++CONFIG_B44_PCI=y
++CONFIG_BNX2=m
++CONFIG_BNX2X=m
++CONFIG_BNX2X_SRIOV=y
++CONFIG_CNIC=m
++CONFIG_FEALNX=m
++CONFIG_NET_POCKET=y
++
++#
++# Ethernet (1000 Mbit)
++#
++CONFIG_TIGON3=m
++CONFIG_JME=m
++
++#
++# Ethernet (10000 Mbit)
++#
++# CONFIG_IP1000 is not set
++# CONFIG_MLX4_EN is not set
++# CONFIG_SFC is not set
++
++# CONFIG_FDDI is not set
++# CONFIG_DEFXX is not set
++# CONFIG_SKFP is not set
++# CONFIG_HIPPI is not set
++# CONFIG_PLIP is not set
++CONFIG_PPP=m
++CONFIG_PPP_MULTILINK=y
++CONFIG_PPP_FILTER=y
++CONFIG_PPP_ASYNC=m
++CONFIG_PPP_SYNC_TTY=m
++CONFIG_PPP_DEFLATE=m
++CONFIG_IPPP_FILTER=y
++CONFIG_PPP_BSDCOMP=y
++CONFIG_PPPOE=m
++CONFIG_PPP_MPPE=m
++CONFIG_SLIP=m
++CONFIG_SLIP_COMPRESSED=y
++CONFIG_SLIP_SMART=y
++# CONFIG_SLIP_MODE_SLIP6 is not set
++
++#
++# Wireless LAN
++#
++#
++# CONFIG_STRIP is not set
++# CONFIG_PCMCIA_RAYCS is not set
++
++CONFIG_CFG80211_WEXT=y
++# CONFIG_CFG80211_REG_DEBUG is not set
++# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set
++CONFIG_CFG80211_DEFAULT_PS=y
++CONFIG_NL80211=y
++# CONFIG_NL80211_TESTMODE is not set
++# CONFIG_WIRELESS_EXT_SYSFS is not set
++CONFIG_LIB80211=m
++CONFIG_LIB80211_CRYPT_WEP=m
++CONFIG_LIB80211_CRYPT_CCMP=m
++CONFIG_LIB80211_CRYPT_TKIP=m
++# CONFIG_LIB80211_DEBUG is not set
++
++CONFIG_MAC80211=m
++CONFIG_MAC80211_RC_MINSTREL=y
++# CONFIG_MAC80211_RC_DEFAULT_PID is not set
++CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y
++CONFIG_MAC80211_RC_DEFAULT="minstrel"
++CONFIG_MAC80211_MESH=y
++CONFIG_MAC80211_LEDS=y
++# CONFIG_MAC80211_DEBUG_MENU is not set
++
++# CONFIG_WIMAX is not set
++
++# CONFIG_ADM8211 is not set
++CONFIG_ATH_COMMON=m
++CONFIG_ATH_CARDS=m
++CONFIG_ATH5K=m
++CONFIG_ATH5K_DEBUG=y
++# CONFIG_ATH5K_TRACER is not set
++CONFIG_ATH6KL=m
++CONFIG_ATH6KL_DEBUG=y
++CONFIG_ATH6KL_SDIO=m
++CONFIG_ATH6KL_USB=m
++# CONFIG_ATH6KL_TRACING is not set
++CONFIG_AR5523=m
++CONFIG_ATH9K=m
++CONFIG_ATH9K_PCI=y
++CONFIG_ATH9K_AHB=y
++# CONFIG_ATH9K_DEBUG is not set
++# CONFIG_ATH9K_MAC_DEBUG is not set
++CONFIG_ATH9K_HTC=m
++CONFIG_ATH9K_BTCOEX_SUPPORT=y
++# CONFIG_ATH9K_LEGACY_RATE_CONTROL is not set
++# CONFIG_ATH9K_WOW is not set
++#
++CONFIG_ATH10K=m
++CONFIG_ATH10K_PCI=m
++# CONFIG_ATH10K_DEBUG is not set
++# CONFIG_ATH10K_TRACING is not set
++CONFIG_ATH10K_DEBUGFS=y
++CONFIG_WCN36XX=m
++# CONFIG_WCN36XX_DEBUGFS is not set
++CONFIG_WIL6210=m
++CONFIG_WIL6210_ISR_COR=y
++# CONFIG_WIL6210_TRACING is not set
++CONFIG_CARL9170=m
++CONFIG_CARL9170_LEDS=y
++# CONFIG_CARL9170_HWRNG is not set
++CONFIG_AT76C50X_USB=m
++# CONFIG_AIRO is not set
++# CONFIG_AIRO_CS is not set
++# CONFIG_ATMEL is not set
++CONFIG_B43=m
++CONFIG_B43_PCMCIA=y
++CONFIG_B43_SDIO=y
++CONFIG_B43_BCMA=y
++# CONFIG_B43_BCMA_EXTRA is not set
++CONFIG_B43_BCMA_PIO=y
++# CONFIG_B43_DEBUG is not set
++CONFIG_B43_PHY_LP=y
++CONFIG_B43_PHY_N=y
++CONFIG_B43_PHY_HT=y
++# CONFIG_B43_FORCE_PIO is not set
++CONFIG_B43LEGACY=m
++# CONFIG_B43LEGACY_DEBUG is not set
++CONFIG_B43LEGACY_DMA=y
++CONFIG_B43LEGACY_PIO=y
++CONFIG_B43LEGACY_DMA_AND_PIO_MODE=y
++# CONFIG_B43LEGACY_DMA_MODE is not set
++# CONFIG_B43LEGACY_PIO_MODE is not set
++CONFIG_BRCMSMAC=m
++# CONFIG_BRCMFMAC_SDIO_OOB is not set
++CONFIG_BRCMFMAC_USB=y
++# CONFIG_BRCM_TRACING is not set
++# CONFIG_BRCMISCAN is not set
++# CONFIG_BRCMDBG is not set
++CONFIG_HERMES=m
++CONFIG_HERMES_CACHE_FW_ON_INIT=y
++# CONFIG_HERMES_PRISM is not set
++CONFIG_NORTEL_HERMES=m
++CONFIG_PCI_HERMES=m
++CONFIG_PLX_HERMES=m
++CONFIG_PCMCIA_HERMES=m
++CONFIG_ORINOCO_USB=m
++# CONFIG_TMD_HERMES is not set
++# CONFIG_PCMCIA_SPECTRUM is not set
++CONFIG_CW1200=m
++CONFIG_CW1200_WLAN_SDIO=m
++CONFIG_CW1200_WLAN_SPI=m
++# CONFIG_HOSTAP is not set
++# CONFIG_IPW2100 is not set
++# CONFIG_IPW2200 is not set
++# CONFIG_IPW2100_DEBUG is not set
++# CONFIG_IPW2200_DEBUG is not set
++# CONFIG_LIBIPW_DEBUG is not set
++CONFIG_LIBERTAS=m
++CONFIG_LIBERTAS_USB=m
++CONFIG_LIBERTAS_CS=m
++CONFIG_LIBERTAS_SDIO=m
++# CONFIG_LIBERTAS_DEBUG is not set
++# CONFIG_LIBERTAS_THINFIRM is not set
++CONFIG_LIBERTAS_MESH=y
++CONFIG_IWLWIFI=m
++CONFIG_IWLDVM=m
++CONFIG_IWLMVM=m
++CONFIG_IWLWIFI_DEBUG=y
++CONFIG_IWLWIFI_DEVICE_SVTOOL=y
++# CONFIG_IWLWIFI_EXPERIMENTAL_MFP is not set
++CONFIG_IWLWIFI_UCODE16=y
++# CONFIG_IWLWIFI_P2P is not set
++CONFIG_IWLEGACY=m
++CONFIG_IWLEGACY_DEBUG=y
++# CONFIG_IWLWIFI_LEGACY_DEVICE_TRACING is not set
++CONFIG_IWL4965=y
++CONFIG_IWL3945=m
++# CONFIG_IWM is not set
++# CONFIG_IWLWIFI_DEBUG_EXPERIMENTAL_UCODE is not set
++CONFIG_MAC80211_HWSIM=m
++CONFIG_P54_COMMON=m
++CONFIG_P54_USB=m
++CONFIG_P54_PCI=m
++CONFIG_MWL8K=m
++# CONFIG_PRISM54 is not set
++# CONFIG_PCMCIA_WL3501 is not set
++CONFIG_RT2X00=m
++# CONFIG_RT2X00_DEBUG is not set
++CONFIG_RT2400PCI=m
++CONFIG_RT2500PCI=m
++CONFIG_RT61PCI=m
++CONFIG_RT2500USB=m
++CONFIG_RT2800USB=m
++CONFIG_RT2800USB_RT33XX=y
++CONFIG_RT2800USB_RT35XX=y
++CONFIG_RT2800USB_RT3573=y
++CONFIG_RT2800USB_RT53XX=y
++CONFIG_RT2800USB_RT55XX=y
++CONFIG_RT2800USB_UNKNOWN=y
++CONFIG_RT2800PCI=m
++CONFIG_RT2800PCI_RT3290=y
++CONFIG_RT2800PCI_RT33XX=y
++CONFIG_RT2800PCI_RT35XX=y
++CONFIG_RT2800PCI_RT53XX=y
++CONFIG_RT73USB=m
++CONFIG_RTL8180=m
++CONFIG_RTL8187=m
++# CONFIG_USB_ZD1201 is not set
++# CONFIG_USB_NET_SR9800 is not set
++CONFIG_USB_NET_RNDIS_WLAN=m
++CONFIG_USB_NET_KALMIA=m
++CONFIG_USB_NET_QMI_WWAN=m
++CONFIG_USB_NET_SMSC75XX=m
++# CONFIG_WL_TI is not set
++CONFIG_ZD1211RW=m
++# CONFIG_ZD1211RW_DEBUG is not set
++
++CONFIG_WL12XX=m
++CONFIG_WL12XX_SPI=m
++CONFIG_WL12XX_SDIO=m
++
++CONFIG_WL1251=m
++CONFIG_WL1251_SPI=m
++CONFIG_WL1251_SDIO=m
++
++CONFIG_RTL_CARDS=m
++CONFIG_RTLWIFI=m
++CONFIG_RTL8192CE=m
++CONFIG_RTL8192SE=m
++CONFIG_RTL8192CU=m
++CONFIG_RTL8192DE=m
++CONFIG_RTL8723AE=m
++CONFIG_RTL8188EE=m
++
++CONFIG_MWIFIEX=m
++CONFIG_MWIFIEX_SDIO=m
++CONFIG_MWIFIEX_PCIE=m
++CONFIG_MWIFIEX_USB=m
++
++#
++# Token Ring devices
++#
++# CONFIG_TR is not set
++
++CONFIG_NET_FC=y
++
++#
++# Wan interfaces
++#
++# CONFIG_WAN is not set
++
++#
++# PCMCIA network device support
++#
++CONFIG_NET_PCMCIA=y
++CONFIG_PCMCIA_3C589=m
++CONFIG_PCMCIA_3C574=m
++CONFIG_PCMCIA_FMVJ18X=m
++
++#
++# Amateur Radio support
++#
++CONFIG_HAMRADIO=y
++CONFIG_AX25=m
++CONFIG_AX25_DAMA_SLAVE=y
++
++# CONFIG_CAN is not set
++
++CONFIG_NETROM=m
++CONFIG_ROSE=m
++CONFIG_MKISS=m
++CONFIG_6PACK=m
++CONFIG_BPQETHER=m
++CONFIG_BAYCOM_SER_FDX=m
++CONFIG_BAYCOM_SER_HDX=m
++CONFIG_BAYCOM_PAR=m
++CONFIG_BAYCOM_EPP=m
++CONFIG_YAM=m
++
++CONFIG_NFC=m
++CONFIG_NFC_DIGITAL=m
++CONFIG_NFC_NCI=m
++CONFIG_NFC_HCI=m
++CONFIG_NFC_SHDLC=y
++CONFIG_NFC_LLCP=y
++CONFIG_NFC_SIM=m
++CONFIG_NFC_MRVL=m
++CONFIG_NFC_MRVL_USB=m
++
++#
++# Near Field Communication (NFC) devices
++#
++CONFIG_NFC_PORT100=m
++CONFIG_NFC_PN544=m
++CONFIG_NFC_PN544_I2C=m
++CONFIG_NFC_PN533=m
++CONFIG_NFC_MICROREAD=m
++CONFIG_NFC_MICROREAD_I2C=m
++
++#
++# IrDA (infrared) support
++#
++CONFIG_IRDA=m
++# CONFIG_IRDA_DEBUG is not set
++CONFIG_IRLAN=m
++CONFIG_IRNET=m
++CONFIG_IRCOMM=m
++# CONFIG_IRDA_ULTRA is not set
++CONFIG_IRDA_CACHE_LAST_LSAP=y
++CONFIG_IRDA_FAST_RR=y
++CONFIG_IRTTY_SIR=m
++CONFIG_DONGLE=y
++CONFIG_ACTISYS_DONGLE=m
++CONFIG_ACT200L_DONGLE=m
++CONFIG_ESI_DONGLE=m
++CONFIG_GIRBIL_DONGLE=m
++CONFIG_KINGSUN_DONGLE=m
++CONFIG_KSDAZZLE_DONGLE=m
++CONFIG_KS959_DONGLE=m
++CONFIG_LITELINK_DONGLE=m
++CONFIG_MA600_DONGLE=m
++CONFIG_MCP2120_DONGLE=m
++CONFIG_OLD_BELKIN_DONGLE=m
++CONFIG_TEKRAM_DONGLE=m
++CONFIG_TOIM3232_DONGLE=m
++
++CONFIG_ALI_FIR=m
++CONFIG_MCS_FIR=m
++CONFIG_NSC_FIR=m
++CONFIG_SIGMATEL_FIR=m
++CONFIG_SMC_IRCC_FIR=m
++# CONFIG_TOSHIBA_FIR is not set
++CONFIG_USB_IRDA=m
++CONFIG_VLSI_FIR=m
++CONFIG_VIA_FIR=m
++CONFIG_WINBOND_FIR=m
++
++#
++# Bluetooth support
++#
++CONFIG_BT=m
++CONFIG_BT_L2CAP=y
++CONFIG_BT_SCO=y
++CONFIG_BT_CMTP=m
++CONFIG_BT_RFCOMM=m
++CONFIG_BT_RFCOMM_TTY=y
++CONFIG_BT_BNEP=m
++CONFIG_BT_BNEP_MC_FILTER=y
++CONFIG_BT_BNEP_PROTO_FILTER=y
++CONFIG_BT_HIDP=m
++
++#
++# Bluetooth device drivers
++#
++CONFIG_BT_HCIBTUSB=m
++# Disable the BT_HCIUSB driver.
++# It sucks more power than BT_HCIBTUSB which has the same functionality.
++CONFIG_BT_HCIUART=m
++CONFIG_BT_HCIUART_H4=y
++CONFIG_BT_HCIUART_BCSP=y
++CONFIG_BT_HCIUART_ATH3K=y
++CONFIG_BT_HCIUART_3WIRE=y
++CONFIG_BT_HCIDTL1=m
++CONFIG_BT_HCIBT3C=m
++CONFIG_BT_HCIBLUECARD=m
++CONFIG_BT_HCIBTUART=m
++CONFIG_BT_HCIVHCI=m
++CONFIG_BT_HCIBCM203X=m
++CONFIG_BT_HCIBFUSB=m
++CONFIG_BT_HCIBPA10X=m
++CONFIG_BT_HCIBTSDIO=m
++CONFIG_BT_HCIUART_LL=y
++CONFIG_BT_MRVL=m
++CONFIG_BT_MRVL_SDIO=m
++CONFIG_BT_ATH3K=m
++CONFIG_BT_WILINK=m
++
++#
++# ISDN subsystem
++#
++CONFIG_ISDN=y
++CONFIG_MISDN=m
++CONFIG_MISDN_DSP=m
++CONFIG_MISDN_L1OIP=m
++CONFIG_MISDN_AVMFRITZ=m
++CONFIG_MISDN_SPEEDFAX=m
++CONFIG_MISDN_INFINEON=m
++CONFIG_MISDN_W6692=m
++CONFIG_MISDN_NETJET=m
++
++#
++# mISDN hardware drivers
++#
++CONFIG_MISDN_HFCPCI=m
++CONFIG_MISDN_HFCMULTI=m
++CONFIG_ISDN_I4L=m
++CONFIG_ISDN_DRV_AVMB1_B1PCI=m
++CONFIG_ISDN_DRV_AVMB1_B1PCMCIA=m
++CONFIG_ISDN_DRV_AVMB1_T1PCI=m
++CONFIG_ISDN_DRV_AVMB1_C4=m
++
++CONFIG_MISDN_HFCUSB=m
++
++CONFIG_ISDN_PPP=y
++CONFIG_ISDN_PPP_VJ=y
++CONFIG_ISDN_MPP=y
++# CONFIG_ISDN_PPP_BSDCOMP is not set
++CONFIG_ISDN_TTY_FAX=y
++CONFIG_DE_AOC=y
++
++CONFIG_ISDN_AUDIO=y
++
++CONFIG_ISDN_DRV_HISAX=m
++CONFIG_ISDN_DRV_AVMB1_B1PCIV4=y
++CONFIG_ISDN_DRV_AVMB1_AVM_CS=m
++
++CONFIG_ISDN_CAPI_CAPIDRV=m
++CONFIG_ISDN_DIVERSION=m
++
++CONFIG_HISAX_EURO=y
++CONFIG_HISAX_1TR6=y
++CONFIG_HISAX_NI1=y
++CONFIG_HISAX_MAX_CARDS=8
++CONFIG_HISAX_16_3=y
++CONFIG_HISAX_TELESPCI=y
++CONFIG_HISAX_S0BOX=y
++CONFIG_HISAX_FRITZPCI=y
++CONFIG_HISAX_AVM_A1_PCMCIA=y
++CONFIG_HISAX_ELSA=y
++CONFIG_HISAX_DIEHLDIVA=y
++CONFIG_HISAX_SEDLBAUER=y
++CONFIG_HISAX_NETJET=y
++CONFIG_HISAX_NETJET_U=y
++CONFIG_HISAX_NICCY=y
++CONFIG_HISAX_BKM_A4T=y
++CONFIG_HISAX_SCT_QUADRO=y
++CONFIG_HISAX_GAZEL=y
++CONFIG_HISAX_HFC_PCI=y
++CONFIG_HISAX_W6692=y
++CONFIG_HISAX_HFC_SX=y
++CONFIG_HISAX_ENTERNOW_PCI=y
++# CONFIG_HISAX_DEBUG is not set
++CONFIG_HISAX_AVM_A1_CS=m
++CONFIG_HISAX_ST5481=m
++# CONFIG_HISAX_HFCUSB is not set
++CONFIG_HISAX_FRITZ_PCIPNP=m
++CONFIG_HISAX_NO_SENDCOMPLETE=y
++CONFIG_HISAX_NO_LLC=y
++CONFIG_HISAX_NO_KEYPAD=y
++CONFIG_HISAX_SEDLBAUER_CS=m
++CONFIG_HISAX_ELSA_CS=m
++CONFIG_HISAX_TELES_CS=m
++CONFIG_HISAX_HFC4S8S=m
++
++CONFIG_ISDN_DRV_LOOP=m
++CONFIG_HYSDN=m
++CONFIG_HYSDN_CAPI=y
++
++
++#
++# CAPI subsystem
++#
++CONFIG_ISDN_CAPI=m
++# CONFIG_CAPI_TRACE is not set
++CONFIG_ISDN_DRV_AVMB1_VERBOSE_REASON=y
++CONFIG_ISDN_CAPI_MIDDLEWARE=y
++CONFIG_ISDN_CAPI_CAPI20=m
++
++#
++# CAPI hardware drivers
++#
++
++#
++# Active AVM cards
++#
++CONFIG_CAPI_AVM=y
++
++#
++# Active Eicon DIVA Server cards
++#
++# CONFIG_CAPI_EICON is not set
++CONFIG_ISDN_DIVAS=m
++CONFIG_ISDN_DIVAS_BRIPCI=y
++CONFIG_ISDN_DIVAS_PRIPCI=y
++CONFIG_ISDN_DIVAS_DIVACAPI=m
++CONFIG_ISDN_DIVAS_USERIDI=m
++CONFIG_ISDN_DIVAS_MAINT=m
++
++CONFIG_ISDN_DRV_GIGASET=m
++CONFIG_GIGASET_CAPI=y
++CONFIG_GIGASET_BASE=m
++CONFIG_GIGASET_M101=m
++CONFIG_GIGASET_M105=m
++# CONFIG_GIGASET_DEBUG is not set
++
++#
++# Telephony Support
++#
++# CONFIG_PHONE is not set
++
++#
++# Input device support
++#
++CONFIG_INPUT=y
++CONFIG_INPUT_FF_MEMLESS=m
++
++#
++# Userland interfaces
++#
++CONFIG_INPUT_MOUSEDEV=y
++CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
++CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
++CONFIG_INPUT_JOYDEV=m
++# CONFIG_INPUT_MATRIXKMAP is not set
++
++CONFIG_INPUT_TABLET=y
++CONFIG_TABLET_USB_ACECAD=m
++CONFIG_TABLET_USB_AIPTEK=m
++CONFIG_TABLET_USB_GTCO=m
++CONFIG_TABLET_USB_HANWANG=m
++CONFIG_TABLET_USB_KBTAB=m
++CONFIG_TABLET_USB_WACOM=m
++
++CONFIG_INPUT_POWERMATE=m
++CONFIG_INPUT_YEALINK=m
++CONFIG_INPUT_CM109=m
++CONFIG_INPUT_POLLDEV=m
++CONFIG_INPUT_SPARSEKMAP=m
++# CONFIG_INPUT_ADXL34X is not set
++# CONFIG_INPUT_BMA150 is not set
++# CONFIG_INPUT_IMS_PCU is not set
++CONFIG_INPUT_CMA3000=m
++CONFIG_INPUT_CMA3000_I2C=m
++CONFIG_INPUT_IDEAPAD_SLIDEBAR=m
++
++#
++# Input I/O drivers
++#
++CONFIG_GAMEPORT=m
++CONFIG_GAMEPORT_NS558=m
++CONFIG_GAMEPORT_L4=m
++CONFIG_GAMEPORT_EMU10K1=m
++CONFIG_GAMEPORT_FM801=m
++CONFIG_SERIO=y
++CONFIG_SERIO_I8042=y
++CONFIG_SERIO_RAW=m
++CONFIG_SERIO_ALTERA_PS2=m
++# CONFIG_SERIO_PS2MULT is not set
++CONFIG_SERIO_ARC_PS2=m
++# CONFIG_SERIO_APBPS2 is not set
++
++# CONFIG_SERIO_CT82C710 is not set
++# CONFIG_SERIO_OLPC_APSP is not set
++# CONFIG_SERIO_PARKBD is not set
++# CONFIG_SERIO_PCIPS2 is not set
++# CONFIG_SERIO_LIBPS2 is not set
++
++#
++# Input Device Drivers
++#
++CONFIG_INPUT_KEYBOARD=y
++# CONFIG_KEYBOARD_SUNKBD is not set
++# CONFIG_KEYBOARD_SH_KEYSC is not set
++# CONFIG_KEYBOARD_XTKBD is not set
++# CONFIG_KEYBOARD_MATRIX is not set
++# CONFIG_KEYBOARD_NEWTON is not set
++# CONFIG_KEYBOARD_STOWAWAY is not set
++# CONFIG_KEYBOARD_LKKBD is not set
++# CONFIG_KEYBOARD_LM8323 is not set
++# CONFIG_KEYBOARD_LM8333 is not set
++# CONFIG_KEYBOARD_MAX7359 is not set
++# CONFIG_KEYBOARD_ADP5589 is not set
++# CONFIG_KEYBOARD_MPR121 is not set
++# CONFIG_KEYBOARD_QT1070 is not set
++# CONFIG_KEYBOARD_MCS is not set
++# CONFIG_KEYBOARD_OPENCORES is not set
++# CONFIG_KEYBOARD_SAMSUNG is not set
++# CONFIG_KEYBOARD_QT2160 is not set
++# CONFIG_KEYBOARD_TCA6416 is not set
++# CONFIG_KEYBOARD_TCA8418 is not set
++# CONFIG_KEYBOARD_OMAP4 is not set
++CONFIG_INPUT_MOUSE=y
++# CONFIG_MOUSE_PS2_TOUCHKIT is not set
++CONFIG_MOUSE_PS2_ELANTECH=y
++CONFIG_MOUSE_PS2_SENTELIC=y
++CONFIG_MOUSE_SERIAL=m
++CONFIG_MOUSE_VSXXXAA=m
++CONFIG_MOUSE_APPLETOUCH=m
++CONFIG_MOUSE_BCM5974=m
++CONFIG_MOUSE_SYNAPTICS_I2C=m
++CONFIG_MOUSE_SYNAPTICS_USB=m
++CONFIG_MOUSE_CYAPA=m
++CONFIG_INPUT_JOYSTICK=y
++CONFIG_JOYSTICK_ANALOG=m
++CONFIG_JOYSTICK_A3D=m
++CONFIG_JOYSTICK_ADI=m
++CONFIG_JOYSTICK_COBRA=m
++CONFIG_JOYSTICK_GF2K=m
++CONFIG_JOYSTICK_GRIP=m
++CONFIG_JOYSTICK_GRIP_MP=m
++CONFIG_JOYSTICK_GUILLEMOT=m
++CONFIG_JOYSTICK_INTERACT=m
++CONFIG_JOYSTICK_SIDEWINDER=m
++CONFIG_JOYSTICK_TMDC=m
++CONFIG_JOYSTICK_IFORCE=m
++CONFIG_JOYSTICK_IFORCE_USB=y
++CONFIG_JOYSTICK_IFORCE_232=y
++CONFIG_JOYSTICK_WARRIOR=m
++CONFIG_JOYSTICK_MAGELLAN=m
++CONFIG_JOYSTICK_SPACEORB=m
++CONFIG_JOYSTICK_SPACEBALL=m
++CONFIG_JOYSTICK_STINGER=m
++CONFIG_JOYSTICK_DB9=m
++CONFIG_JOYSTICK_GAMECON=m
++CONFIG_JOYSTICK_TURBOGRAFX=m
++CONFIG_JOYSTICK_JOYDUMP=m
++CONFIG_JOYSTICK_TWIDJOY=m
++CONFIG_JOYSTICK_WALKERA0701=m
++CONFIG_JOYSTICK_XPAD=m
++CONFIG_JOYSTICK_XPAD_FF=y
++CONFIG_JOYSTICK_XPAD_LEDS=y
++CONFIG_JOYSTICK_ZHENHUA=m
++# CONFIG_JOYSTICK_AS5011 is not set
++
++CONFIG_INPUT_TOUCHSCREEN=y
++# CONFIG_TOUCHSCREEN_AD7879 is not set
++CONFIG_TOUCHSCREEN_AD7879_I2C=m
++# CONFIG_TOUCHSCREEN_CY8CTMG110 is not set
++# CONFIG_TOUCHSCREEN_CYTTSP_CORE is not set
++# CONFIG_TOUCHSCREEN_CYTTSP4_CORE is not set
++CONFIG_TOUCHSCREEN_DYNAPRO=m
++CONFIG_TOUCHSCREEN_EDT_FT5X06=m
++CONFIG_TOUCHSCREEN_EETI=m
++CONFIG_TOUCHSCREEN_EGALAX=m
++CONFIG_TOUCHSCREEN_ELO=m
++CONFIG_TOUCHSCREEN_FUJITSU=m
++CONFIG_TOUCHSCREEN_GUNZE=m
++# CONFIG_TOUCHSCREEN_HAMPSHIRE is not set
++CONFIG_TOUCHSCREEN_INEXIO=m
++CONFIG_TOUCHSCREEN_ILI210X=m
++CONFIG_TOUCHSCREEN_MMS114=m
++CONFIG_TOUCHSCREEN_MTOUCH=m
++CONFIG_TOUCHSCREEN_MCS5000=m
++CONFIG_TOUCHSCREEN_MK712=m
++CONFIG_TOUCHSCREEN_PENMOUNT=m
++# CONFIG_TOUCHSCREEN_SUR40 is not set
++# CONFIG_TOUCHSCREEN_TPS6507X is not set
++CONFIG_TOUCHSCREEN_TSC_SERIO=m
++CONFIG_TOUCHSCREEN_TSC2007=m
++CONFIG_TOUCHSCREEN_TOUCHIT213=m
++CONFIG_TOUCHSCREEN_TOUCHRIGHT=m
++CONFIG_TOUCHSCREEN_TOUCHWIN=m
++CONFIG_TOUCHSCREEN_PIXCIR=m
++CONFIG_TOUCHSCREEN_UCB1400=m
++CONFIG_TOUCHSCREEN_WACOM_W8001=m
++CONFIG_TOUCHSCREEN_WACOM_I2C=m
++CONFIG_TOUCHSCREEN_USB_E2I=y
++CONFIG_TOUCHSCREEN_USB_COMPOSITE=m
++# CONFIG_TOUCHSCREEN_WM97XX is not set
++CONFIG_TOUCHSCREEN_W90X900=m
++# CONFIG_TOUCHSCREEN_BU21013 is not set
++CONFIG_TOUCHSCREEN_ST1232=m
++CONFIG_TOUCHSCREEN_ATMEL_MXT=m
++# CONFIG_TOUCHSCREEN_MAX11801 is not set
++CONFIG_TOUCHSCREEN_AUO_PIXCIR=m
++CONFIG_TOUCHSCREEN_TI_AM335X_TSC=m
++CONFIG_TOUCHSCREEN_ZFORCE=m
++
++CONFIG_INPUT_PCSPKR=m
++CONFIG_INPUT_RETU_PWRBUTTON=m
++CONFIG_INPUT_UINPUT=m
++CONFIG_INPUT_WISTRON_BTNS=m
++CONFIG_INPUT_ATLAS_BTNS=m
++
++CONFIG_INPUT_ATI_REMOTE2=m
++CONFIG_INPUT_KEYSPAN_REMOTE=m
++
++CONFIG_MAC_EMUMOUSEBTN=y
++
++CONFIG_INPUT_WM831X_ON=m
++
++
++# CONFIG_INPUT_AD714X is not set
++# CONFIG_INPUT_PCF8574 is not set
++CONFIG_INPUT_MMA8450=m
++CONFIG_INPUT_MPU3050=m
++CONFIG_INPUT_KXTJ9=m
++# CONFIG_INPUT_KXTJ9_POLLED_MODE is not set
++
++#
++# Character devices
++#
++CONFIG_VT=y
++CONFIG_VT_CONSOLE=y
++CONFIG_HW_CONSOLE=y
++CONFIG_SERIAL_NONSTANDARD=y
++CONFIG_ROCKETPORT=m
++CONFIG_SYNCLINK=m
++CONFIG_SYNCLINKMP=m
++CONFIG_SYNCLINK_GT=m
++CONFIG_N_HDLC=m
++CONFIG_N_GSM=m
++# CONFIG_TRACE_SINK is not set
++# CONFIG_STALDRV is not set
++# CONFIG_DUMMY_IRQ is not set
++# CONFIG_IBM_ASM is not set
++CONFIG_TIFM_CORE=m
++CONFIG_TIFM_7XX1=m
++CONFIG_TCG_TPM=m
++CONFIG_TCG_TIS=m
++# CONFIG_TCG_TIS_I2C_INFINEON is not set
++# CONFIG_TCG_TIS_I2C_ATMEL is not set
++# CONFIG_TCG_TIS_I2C_NUVOTON is not set
++CONFIG_TCG_NSC=m
++CONFIG_TCG_ATMEL=m
++# CONFIG_TCG_INFINEON is not set
++# CONFIG_TCG_ST33_I2C is not set
++# CONFIG_TCG_XEN is not set
++CONFIG_TELCLOCK=m
++
++#
++# Serial drivers
++#
++CONFIG_SERIAL_8250=y
++# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
++CONFIG_SERIAL_8250_CONSOLE=y
++CONFIG_SERIAL_8250_CS=m
++CONFIG_SERIAL_8250_NR_UARTS=32
++CONFIG_SERIAL_8250_RUNTIME_UARTS=4
++CONFIG_SERIAL_8250_EXTENDED=y
++CONFIG_SERIAL_8250_MANY_PORTS=y
++CONFIG_SERIAL_8250_SHARE_IRQ=y
++# CONFIG_SERIAL_8250_DETECT_IRQ is not set
++CONFIG_SERIAL_8250_RSA=y
++# CONFIG_SERIAL_8250_DW is not set
++CONFIG_CYCLADES=m
++# CONFIG_CYZ_INTR is not set
++# CONFIG_MOXA_INTELLIO is not set
++# CONFIG_MOXA_SMARTIO is not set
++# CONFIG_ISI is not set
++# CONFIG_RIO is not set
++CONFIG_SERIAL_JSM=m
++# CONFIG_SERIAL_SCCNXP is not set
++# CONFIG_SERIAL_MFD_HSU is not set
++
++# CONFIG_SERIAL_ALTERA_JTAGUART is not set
++# CONFIG_SERIAL_ALTERA_UART is not set
++
++#
++# Non-8250 serial port support
++#
++CONFIG_SERIAL_CORE=y
++CONFIG_SERIAL_CORE_CONSOLE=y
++# CONFIG_SERIAL_XILINX_PS_UART is not set
++# CONFIG_SERIAL_TIMBERDALE is not set
++CONFIG_SERIAL_ARC=m
++CONFIG_SERIAL_ARC_NR_PORTS=1
++# CONFIG_SERIAL_RP2 is not set
++# CONFIG_SERIAL_ST_ASC is not set
++# CONFIG_SERIAL_PCH_UART is not set
++
++CONFIG_UNIX98_PTYS=y
++CONFIG_DEVPTS_MULTIPLE_INSTANCES=y
++CONFIG_PRINTER=m
++CONFIG_LP_CONSOLE=y
++CONFIG_PPDEV=m
++
++#
++# I2C support
++#
++CONFIG_I2C=y
++# CONFIG_I2C_MUX is not set
++# CONFIG_I2C_ARB_GPIO_CHALLENGE is not set
++# CONFIG_I2C_MUX_PCA954x is not set
++# CONFIG_I2C_MUX_GPIO is not set
++# CONFIG_I2C_MUX_PCA9541 is not set
++# CONFIG_I2C_MUX_PINCTRL is not set
++#
++
++#
++# I2C Algorithms
++#
++# CONFIG_I2C_DEBUG_ALGO is not set
++CONFIG_I2C_ALGOBIT=m
++
++#
++# I2C Hardware Bus support
++#
++
++# CONFIG_I2C_ALI1535 is not set
++# CONFIG_I2C_ALI1563 is not set
++# CONFIG_I2C_ALI15X3 is not set
++# CONFIG_I2C_AMD756 is not set
++# CONFIG_I2C_AMD756_S4882 is not set
++# CONFIG_I2C_AMD8111 is not set
++# CONFIG_I2C_DEBUG_CORE is not set
++# CONFIG_I2C_DEBUG_BUS is not set
++# CONFIG_I2C_I801 is not set
++# CONFIG_I2C_ISCH is not set
++# CONFIG_I2C_NFORCE2_S4985 is not set
++# CONFIG_I2C_INTEL_MID is not set
++# CONFIG_I2C_EG20T is not set
++# CONFIG_I2C_CBUS_GPIO is not set
++CONFIG_I2C_VIPERBOARD=m
++
++CONFIG_EEPROM_AT24=m
++CONFIG_EEPROM_LEGACY=m
++CONFIG_EEPROM_93CX6=m
++CONFIG_EEPROM_MAX6875=m
++
++CONFIG_I2C_NFORCE2=m
++# CONFIG_I2C_OCORES is not set
++CONFIG_I2C_PARPORT=m
++CONFIG_I2C_PARPORT_LIGHT=m
++# CONFIG_I2C_ROBOTFUZZ_OSIF is not set
++CONFIG_I2C_PASEMI=m
++CONFIG_I2C_PCA_PLATFORM=m
++# CONFIG_I2C_PIIX4 is not set
++# CONFIG_SCx200_ACB is not set
++# CONFIG_I2C_SIS5595 is not set
++# CONFIG_I2C_SIS630 is not set
++# CONFIG_I2C_SIS96X is not set
++CONFIG_I2C_SIMTEC=m
++CONFIG_I2C_STUB=m
++CONFIG_I2C_TINY_USB=m
++# CONFIG_I2C_TAOS_EVM is not set
++# CONFIG_I2C_VIA is not set
++# CONFIG_I2C_VIAPRO is not set
++# CONFIG_I2C_DESIGNWARE is not set
++# CONFIG_I2C_XILINX is not set
++
++CONFIG_I2C_DIOLAN_U2C=m
++
++#
++# I2C Hardware Sensors Chip support
++#
++CONFIG_SENSORS_ATK0110=m
++CONFIG_SENSORS_ABITUGURU=m
++CONFIG_SENSORS_ABITUGURU3=m
++CONFIG_SENSORS_AD7414=m
++CONFIG_SENSORS_AD7418=m
++CONFIG_SENSORS_ADM1021=m
++CONFIG_SENSORS_ADM1025=m
++CONFIG_SENSORS_ADM1026=m
++CONFIG_SENSORS_ADM1029=m
++CONFIG_SENSORS_ADM1031=m
++CONFIG_SENSORS_ADM9240=m
++CONFIG_SENSORS_ADT7310=m
++CONFIG_SENSORS_ADT7410=m
++CONFIG_SENSORS_ADS7828=m
++CONFIG_SENSORS_ADT7462=m
++CONFIG_SENSORS_ADT7470=m
++CONFIG_SENSORS_ADT7475=m
++CONFIG_SENSORS_APPLESMC=m
++CONFIG_SENSORS_ASB100=m
++CONFIG_SENSORS_ATXP1=m
++CONFIG_SENSORS_CORETEMP=m
++CONFIG_SENSORS_DME1737=m
++CONFIG_SENSORS_DS1621=m
++# CONFIG_DS1682 is not set
++CONFIG_SENSORS_F71805F=m
++CONFIG_SENSORS_F71882FG=m
++CONFIG_SENSORS_F75375S=m
++CONFIG_SENSORS_FSCHMD=m
++CONFIG_SENSORS_G760A=m
++CONFIG_SENSORS_G762=m
++CONFIG_SENSORS_GL518SM=m
++CONFIG_SENSORS_GL520SM=m
++CONFIG_SENSORS_HDAPS=m
++# CONFIG_SENSORS_HIH6130 is not set
++# CONFIG_SENSORS_HTU21 is not set
++# CONFIG_SENSORS_I5K_AMB is not set
++# FIXME: IBMAEM x86 only?
++CONFIG_SENSORS_IBMAEM=m
++CONFIG_SENSORS_IBMPEX=m
++# CONFIG_SENSORS_IIO_HWMON is not set
++CONFIG_SENSORS_IT87=m
++CONFIG_SENSORS_K8TEMP=m
++CONFIG_SENSORS_K10TEMP=m
++CONFIG_SENSORS_LIS3LV02D=m
++CONFIG_SENSORS_LIS3_SPI=m
++CONFIG_SENSORS_LIS3_I2C=m
++CONFIG_SENSORS_LM63=m
++CONFIG_SENSORS_LM75=m
++CONFIG_SENSORS_LM77=m
++CONFIG_SENSORS_LM78=m
++CONFIG_SENSORS_LM80=m
++CONFIG_SENSORS_LM83=m
++CONFIG_SENSORS_LM85=m
++CONFIG_SENSORS_LM87=m
++CONFIG_SENSORS_LM90=m
++CONFIG_SENSORS_LM92=m
++CONFIG_SENSORS_LM93=m
++CONFIG_SENSORS_LM95234=m
++CONFIG_SENSORS_LTC4245=m
++CONFIG_SENSORS_MAX1619=m
++CONFIG_SENSORS_MAX6650=m
++CONFIG_SENSORS_MAX6697=m
++CONFIG_SENSORS_MCP3021=m
++CONFIG_SENSORS_NCT6775=m
++CONFIG_SENSORS_NTC_THERMISTOR=m
++CONFIG_SENSORS_PC87360=m
++CONFIG_SENSORS_PC87427=m
++CONFIG_SENSORS_PCF8591=m
++CONFIG_SENSORS_SHT15=m
++CONFIG_SENSORS_SIS5595=m
++CONFIG_CHARGER_SMB347=m
++CONFIG_SENSORS_SMSC47M1=m
++CONFIG_SENSORS_SMSC47M192=m
++CONFIG_SENSORS_SMSC47B397=m
++CONFIG_SENSORS_THMC50=m
++CONFIG_SENSORS_TMP401=m
++CONFIG_APDS9802ALS=m
++CONFIG_ISL29020=m
++CONFIG_ISL29003=m
++CONFIG_SENSORS_BH1770=m
++CONFIG_SENSORS_APDS990X=m
++CONFIG_SENSORS_TSL2550=m
++CONFIG_SENSORS_VIA686A=m
++CONFIG_SENSORS_VIA_CPUTEMP=m
++CONFIG_SENSORS_VT1211=m
++CONFIG_SENSORS_VT8231=m
++CONFIG_SENSORS_W83627HF=m
++CONFIG_SENSORS_W83781D=m
++CONFIG_SENSORS_W83L785TS=m
++CONFIG_SENSORS_W83L786NG=m
++CONFIG_SENSORS_W83627EHF=m
++CONFIG_SENSORS_W83791D=m
++CONFIG_SENSORS_W83792D=m
++CONFIG_SENSORS_W83793=m
++CONFIG_SENSORS_LTC4215=m
++CONFIG_SENSORS_LM95241=m
++CONFIG_SENSORS_LM95245=m
++CONFIG_SENSORS_TMP421=m
++CONFIG_SENSORS_WM8350=m
++CONFIG_SENSORS_WM831X=m
++CONFIG_SENSORS_LM73=m
++CONFIG_SENSORS_AMC6821=m
++CONFIG_SENSORS_INA2XX=m
++CONFIG_SENSORS_INA209=m
++CONFIG_SENSORS_ADT7411=m
++CONFIG_SENSORS_ASC7621=m
++CONFIG_SENSORS_EMC1403=m
++CONFIG_SENSORS_TMP102=m
++CONFIG_SENSORS_LTC4261=m
++# CONFIG_SENSORS_BH1780 is not set
++# CONFIG_SENSORS_JC42 is not set
++# CONFIG_SENSORS_SMM665 is not set
++# CONFIG_SENSORS_EMC2103 is not set
++# CONFIG_SENSORS_GPIO_FAN is not set
++CONFIG_SENSORS_W83795=m
++# CONFIG_SENSORS_W83795_FANCTRL is not set
++CONFIG_SENSORS_DS620=m
++CONFIG_SENSORS_SHT21=m
++CONFIG_SENSORS_LINEAGE=m
++CONFIG_SENSORS_LTC4151=m
++CONFIG_SENSORS_MAX6639=m
++CONFIG_SENSORS_SCH5627=m
++CONFIG_SENSORS_SCH5636=m
++CONFIG_SENSORS_ADS1015=m
++CONFIG_SENSORS_MAX16065=m
++CONFIG_SENSORS_MAX6642=m
++CONFIG_SENSORS_ADM1275=m
++CONFIG_SENSORS_UCD9000=m
++CONFIG_SENSORS_UCD9200=m
++CONFIG_SENSORS_ZL6100=m
++CONFIG_SENSORS_EMC6W201=m
++
++CONFIG_PMBUS=m
++CONFIG_SENSORS_PMBUS=m
++CONFIG_SENSORS_MAX16064=m
++CONFIG_SENSORS_LM25066=m
++CONFIG_SENSORS_LTC2978=m
++CONFIG_SENSORS_MAX34440=m
++CONFIG_SENSORS_MAX8688=m
++CONFIG_SENSORS_MAX1668=m
++CONFIG_SENSORS_MAX197=m
++
++# Industrial I/O subsystem configuration
++CONFIG_IIO=m
++CONFIG_IIO_BUFFER=y
++CONFIG_IIO_BUFFER_CB=y
++# CONFIG_IIO_KFIFO_BUF is not set
++CONFIG_IIO_TRIGGERED_BUFFER=m
++CONFIG_IIO_TRIGGER=y
++CONFIG_IIO_CONSUMERS_PER_TRIGGER=2
++CONFIG_IIO_INTERRUPT_TRIGGER=m
++CONFIG_HID_SENSOR_IIO_COMMON=m
++CONFIG_HID_SENSOR_IIO_TRIGGER=m
++CONFIG_HID_SENSOR_ENUM_BASE_QUIRKS=y
++# CONFIG_IIO_SYSFS_TRIGGER is not set
++# CONFIG_AD5446 is not set
++# CONFIG_AD5380 is not set
++# CONFIG_AD5064 is not set
++# CONFIG_BMA180 is not set
++# CONFIG_MAX1363 is not set
++# CONFIG_MAX517 is not set
++# CONFIG_MCP4725 is not set
++# CONFIG_ITG3200 is not set
++# CONFIG_APDS9300 is not set
++# CONFIG_CM32181 is not set
++# CONFIG_CM36651 is not set
++# CONFIG_GP2AP020A00F is not set
++# CONFIG_TSL2583 is not set
++# CONFIG_TSL2x7x is not set
++# CONFIG_TCS3472 is not set
++# CONFIG_TSL4531 is not set
++# CONFIG_NAU7802 is not set
++# CONFIG_TI_ADC081C is not set
++# CONFIG_EXYNOS_ADC is not set
++# CONFIG_VIPERBOARD_ADC is not set
++# CONFIG_INV_MPU6050_IIO is not set
++CONFIG_IIO_ST_GYRO_3AXIS=m
++CONFIG_IIO_ST_MAGN_3AXIS=m
++CONFIG_IIO_ST_ACCEL_3AXIS=m
++CONFIG_HID_SENSOR_INCLINOMETER_3D=m
++# CONFIG_ADJD_S311 is not set
++# CONFIG_SENSORS_TSL2563 is not set
++# CONFIG_VCNL4000 is not set
++# CONFIG_AK8975 is not set
++# CONFIG_MAG3110 is not set
++# CONFIG_TMP006 is not set
++# CONFIG_IIO_ST_PRESS is not set
++# CONFIG_KXSD9 is not set
++# CONFIG_AD7266 is not set
++# CONFIG_AD7298 is not set
++# CONFIG_AD7476 is not set
++# CONFIG_AD7791 is not set
++# CONFIG_AD7793 is not set
++# CONFIG_AD7887 is not set
++# CONFIG_AD7923 is not set
++# CONFIG_MCP320X is not set
++# CONFIG_MCP3422 is not set
++# CONFIG_AD8366 is not set
++# CONFIG_AD5360 is not set
++# CONFIG_AD5421 is not set
++# CONFIG_AD5449 is not set
++# CONFIG_AD5504 is not set
++# CONFIG_AD5624R_SPI is not set
++# CONFIG_AD5686 is not set
++# CONFIG_AD5755 is not set
++# CONFIG_AD5764 is not set
++# CONFIG_AD5791 is not set
++# CONFIG_AD7303 is not set
++# CONFIG_AD9523 is not set
++# CONFIG_ADF4350 is not set
++# CONFIG_ADIS16080 is not set
++# CONFIG_ADIS16130 is not set
++# CONFIG_ADIS16136 is not set
++# CONFIG_ADIS16260 is not set
++# CONFIG_ADXRS450 is not set
++# CONFIG_ADIS16400 is not set
++# CONFIG_ADIS16480 is not set
++# CONFIG_DHT11 is not set
++# CONFIG_MPL3115 is not set
++
++# staging IIO drivers
++# CONFIG_AD7291 is not set
++# CONFIG_AD7606 is not set
++# CONFIG_AD799X is not set
++# CONFIG_ADT7316 is not set
++# CONFIG_AD7150 is not set
++# CONFIG_AD7152 is not set
++# CONFIG_AD7746 is not set
++# CONFIG_AD5933 is not set
++# CONFIG_ADE7854 is not set
++# CONFIG_SENSORS_ISL29018 is not set
++# CONFIG_SENSORS_ISL29028 is not set
++# CONFIG_SENSORS_HMC5843 is not set
++# CONFIG_IIO_PERIODIC_RTC_TRIGGER is not set
++# CONFIG_IIO_SIMPLE_DUMMY is not set
++# CONFIG_ADIS16201 is not set
++# CONFIG_ADIS16203 is not set
++# CONFIG_ADIS16204 is not set
++# CONFIG_ADIS16209 is not set
++# CONFIG_ADIS16220 is not set
++# CONFIG_ADIS16240 is not set
++# CONFIG_LIS3L02DQ is not set
++# CONFIG_SCA3000 is not set
++# CONFIG_AD7780 is not set
++# CONFIG_AD7816 is not set
++# CONFIG_AD7192 is not set
++# CONFIG_AD7280 is not set
++# CONFIG_AD5930 is not set
++# CONFIG_AD9832 is not set
++# CONFIG_AD9834 is not set
++# CONFIG_AD9850 is not set
++# CONFIG_AD9852 is not set
++# CONFIG_AD9910 is not set
++# CONFIG_AD9951 is not set
++# CONFIG_ADIS16060 is not set
++# CONFIG_ADE7753 is not set
++# CONFIG_ADE7754 is not set
++# CONFIG_ADE7758 is not set
++# CONFIG_ADE7759 is not set
++# CONFIG_AD2S90 is not set
++# CONFIG_AD2S1200 is not set
++# CONFIG_AD2S1210 is not set
++
++
++
++# CONFIG_HMC6352 is not set
++# CONFIG_BMP085 is not set
++# CONFIG_BMP085_I2C is not set
++# CONFIG_PCH_PHUB is not set
++# CONFIG_USB_SWITCH_FSA9480 is not set
++
++CONFIG_W1=m
++CONFIG_W1_CON=y
++# CONFIG_W1_MASTER_MATROX is not set
++CONFIG_W1_MASTER_DS2490=m
++CONFIG_W1_MASTER_DS2482=m
++CONFIG_W1_MASTER_DS1WM=m
++CONFIG_W1_MASTER_GPIO=m
++# CONFIG_HDQ_MASTER_OMAP is not set
++CONFIG_W1_SLAVE_THERM=m
++CONFIG_W1_SLAVE_SMEM=m
++CONFIG_W1_SLAVE_DS2408=m
++# CONFIG_W1_SLAVE_DS2408_READBACK is not set
++CONFIG_W1_SLAVE_DS2413=m
++CONFIG_W1_SLAVE_DS2423=m
++CONFIG_W1_SLAVE_DS2431=m
++CONFIG_W1_SLAVE_DS2433=m
++CONFIG_W1_SLAVE_DS2433_CRC=y
++CONFIG_W1_SLAVE_DS2760=m
++CONFIG_W1_SLAVE_DS2780=m
++CONFIG_W1_SLAVE_DS2781=m
++CONFIG_W1_SLAVE_DS28E04=m
++CONFIG_W1_SLAVE_BQ27000=m
++
++#
++# Mice
++#
++
++#
++# IPMI
++#
++CONFIG_IPMI_HANDLER=m
++# CONFIG_IPMI_PANIC_EVENT is not set
++CONFIG_IPMI_DEVICE_INTERFACE=m
++CONFIG_IPMI_WATCHDOG=m
++CONFIG_IPMI_SI=m
++CONFIG_IPMI_POWEROFF=m
++
++#
++# Watchdog Cards
++#
++CONFIG_WATCHDOG_CORE=y
++# CONFIG_WATCHDOG_NOWAYOUT is not set
++CONFIG_SOFT_WATCHDOG=m
++CONFIG_WDTPCI=m
++# CONFIG_ACQUIRE_WDT is not set
++# CONFIG_ADVANTECH_WDT is not set
++# CONFIG_EUROTECH_WDT is not set
++CONFIG_IB700_WDT=m
++# CONFIG_SCx200_WDT is not set
++# CONFIG_60XX_WDT is not set
++CONFIG_W83877F_WDT=m
++CONFIG_W83627HF_WDT=m
++CONFIG_MACHZ_WDT=m
++# CONFIG_SC520_WDT is not set
++CONFIG_ALIM7101_WDT=m
++CONFIG_ALIM1535_WDT=m
++CONFIG_IT87_WDT=m
++CONFIG_ITCO_WDT=m
++CONFIG_ITCO_VENDOR_SUPPORT=y
++# CONFIG_SC1200_WDT is not set
++# CONFIG_PC87413_WDT is not set
++# CONFIG_WAFER_WDT is not set
++# CONFIG_CPU5_WDT is not set
++CONFIG_I6300ESB_WDT=m
++CONFIG_IT8712F_WDT=m
++# CONFIG_SBC8360_WDT is not set
++# CONFIG_SBC7240_WDT is not set
++CONFIG_SMSC_SCH311X_WDT=m
++CONFIG_W83977F_WDT=m
++CONFIG_PCIPCWATCHDOG=m
++CONFIG_USBPCWATCHDOG=m
++# CONFIG_SBC_EPX_C3_WATCHDOG is not set
++CONFIG_WM8350_WATCHDOG=m
++CONFIG_WM831X_WATCHDOG=m
++# CONFIG_MAX63XX_WATCHDOG is not set
++# CONFIG_DW_WATCHDOG is not set
++CONFIG_W83697UG_WDT=m
++# CONFIG_MEN_A21_WDT is not set
++# CONFIG_GPIO_WATCHDOG is not set
++
++CONFIG_HW_RANDOM=y
++CONFIG_HW_RANDOM_TIMERIOMEM=m
++CONFIG_HW_RANDOM_TPM=m
++# CONFIG_HW_RANDOM_ATMEL is not set
++# CONFIG_HW_RANDOM_EXYNOS is not set
++# CONFIG_NVRAM is not set
++# CONFIG_RTC is not set
++# CONFIG_RTC_DEBUG is not set
++# CONFIG_GEN_RTC is not set
++CONFIG_RTC_HCTOSYS=y
++# CONFIG_RTC_SYSTOHC is not set
++CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
++CONFIG_RTC_INTF_SYSFS=y
++CONFIG_RTC_INTF_PROC=y
++CONFIG_RTC_INTF_DEV=y
++# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
++CONFIG_RTC_DRV_CMOS=y
++CONFIG_RTC_DRV_DS1307=m
++CONFIG_RTC_DRV_DS1511=m
++CONFIG_RTC_DRV_DS1553=m
++CONFIG_RTC_DRV_DS1672=m
++CONFIG_RTC_DRV_DS1742=m
++CONFIG_RTC_DRV_DS1374=m
++# CONFIG_RTC_DRV_EP93XX is not set
++CONFIG_RTC_DRV_FM3130=m
++CONFIG_RTC_DRV_ISL1208=m
++CONFIG_RTC_DRV_M41T80=m
++CONFIG_RTC_DRV_M41T80_WDT=y
++CONFIG_RTC_DRV_M48T59=m
++CONFIG_RTC_DRV_MAX6900=m
++# CONFIG_RTC_DRV_M48T86 is not set
++CONFIG_RTC_DRV_PCF2127=m
++CONFIG_RTC_DRV_PCF8563=m
++CONFIG_RTC_DRV_PCF8583=m
++CONFIG_RTC_DRV_RS5C372=m
++# CONFIG_RTC_DRV_SA1100 is not set
++# CONFIG_RTC_DRV_TEST is not set
++CONFIG_RTC_DRV_X1205=m
++CONFIG_RTC_DRV_V3020=m
++CONFIG_RTC_DRV_DS2404=m
++CONFIG_RTC_DRV_STK17TA8=m
++# CONFIG_RTC_DRV_S35390A is not set
++CONFIG_RTC_DRV_RX8581=m
++CONFIG_RTC_DRV_RX8025=m
++CONFIG_RTC_DRV_DS1286=m
++CONFIG_RTC_DRV_M48T35=m
++CONFIG_RTC_DRV_BQ4802=m
++CONFIG_RTC_DRV_WM8350=m
++# CONFIG_RTC_DRV_AB3100 is not set
++CONFIG_RTC_DRV_WM831X=m
++CONFIG_RTC_DRV_BQ32K=m
++CONFIG_RTC_DRV_MSM6242=m
++CONFIG_RTC_DRV_RP5C01=m
++CONFIG_RTC_DRV_EM3027=m
++CONFIG_RTC_DRV_RV3029C2=m
++CONFIG_RTC_DRV_PCF50633=m
++CONFIG_RTC_DRV_DS3232=m
++CONFIG_RTC_DRV_ISL12022=m
++# CONFIG_RTC_DRV_HID_SENSOR_TIME is not set
++# CONFIG_RTC_DRV_MOXART is not set
++# CONFIG_RTC_DRV_ISL12057 is not set
++
++CONFIG_R3964=m
++# CONFIG_APPLICOM is not set
++# CONFIG_SONYPI is not set
++
++#
++# Ftape, the floppy tape device driver
++#
++CONFIG_AGP=y
++CONFIG_AGP_ALI=y
++CONFIG_AGP_ATI=y
++CONFIG_AGP_AMD=y
++CONFIG_AGP_AMD64=y
++CONFIG_AGP_INTEL=y
++CONFIG_AGP_NVIDIA=y
++CONFIG_AGP_SIS=y
++CONFIG_AGP_SWORKS=y
++CONFIG_AGP_VIA=y
++CONFIG_AGP_EFFICEON=y
++
++CONFIG_VGA_ARB=y
++CONFIG_VGA_ARB_MAX_GPUS=16
++
++# CONFIG_STUB_POULSBO is not set
++
++#
++# PCMCIA character devices
++#
++# CONFIG_SYNCLINK_CS is not set
++
++CONFIG_CARDMAN_4000=m
++CONFIG_CARDMAN_4040=m
++
++CONFIG_MWAVE=m
++CONFIG_RAW_DRIVER=y
++CONFIG_MAX_RAW_DEVS=8192
++CONFIG_HANGCHECK_TIMER=m
++
++CONFIG_MEDIA_PCI_SUPPORT=y
++#
++# Multimedia devices
++#
++CONFIG_MEDIA_ANALOG_TV_SUPPORT=y
++CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y
++CONFIG_MEDIA_RC_SUPPORT=y
++CONFIG_MEDIA_CONTROLLER=y
++CONFIG_VIDEO_DEV=m
++# CONFIG_VIDEO_ADV_DEBUG is not set
++CONFIG_VIDEO_HELPER_CHIPS_AUTO=y
++CONFIG_VIDEO_V4L2=y
++CONFIG_VIDEO_V4L2_SUBDEV_API=y
++# CONFIG_VIDEO_VIVI is not set
++# CONFIG_USB_SI4713 is not set
++# CONFIG_PLATFORM_SI4713 is not set
++# CONFIG_I2C_SI4713 is not set
++# CONFIG_USB_RAREMONO is not set
++
++#
++# Video For Linux
++#
++
++#
++# Video Adapters
++#
++CONFIG_V4L_USB_DRIVERS=y
++CONFIG_VIDEO_CAPTURE_DRIVERS=y
++CONFIG_V4L_PCI_DRIVERS=y
++CONFIG_VIDEO_AU0828=m
++CONFIG_VIDEO_AU0828_V4L2=y
++CONFIG_VIDEO_BT848=m
++CONFIG_VIDEO_BT848_DVB=y
++CONFIG_VIDEO_BWQCAM=m
++CONFIG_VIDEO_SR030PC30=m
++CONFIG_VIDEO_NOON010PC30=m
++CONFIG_VIDEO_CAFE_CCIC=m
++# CONFIG_VIDEO_CPIA is not set
++CONFIG_VIDEO_CPIA2=m
++CONFIG_VIDEO_CQCAM=m
++CONFIG_VIDEO_CX23885=m
++CONFIG_MEDIA_ALTERA_CI=m
++CONFIG_VIDEO_CX18=m
++CONFIG_VIDEO_CX18_ALSA=m
++CONFIG_VIDEO_CX88=m
++CONFIG_VIDEO_CX88_DVB=m
++CONFIG_VIDEO_CX88_ALSA=m
++CONFIG_VIDEO_CX88_BLACKBIRD=m
++CONFIG_VIDEO_CX88_ENABLE_VP3054=y
++CONFIG_VIDEO_CX88_VP3054=m
++CONFIG_VIDEO_EM28XX=m
++CONFIG_VIDEO_EM28XX_V4L2=m
++CONFIG_VIDEO_EM28XX_ALSA=m
++CONFIG_VIDEO_EM28XX_DVB=m
++CONFIG_VIDEO_EM28XX_RC=y
++CONFIG_VIDEO_CX231XX=m
++CONFIG_VIDEO_CX231XX_ALSA=m
++CONFIG_VIDEO_CX231XX_DVB=m
++CONFIG_VIDEO_CX231XX_RC=y
++CONFIG_VIDEO_HEXIUM_ORION=m
++CONFIG_VIDEO_HEXIUM_GEMINI=m
++CONFIG_VIDEO_IVTV=m
++# CONFIG_VIDEO_IVTV_ALSA is not set
++CONFIG_VIDEO_MEYE=m
++CONFIG_VIDEO_MXB=m
++CONFIG_VIDEO_PVRUSB2_DVB=y
++# CONFIG_VIDEO_PMS is not set
++CONFIG_VIDEO_HDPVR=m
++CONFIG_VIDEO_SAA6588=m
++CONFIG_VIDEO_SAA7134=m
++CONFIG_VIDEO_SAA7134_ALSA=m
++CONFIG_VIDEO_SAA7134_DVB=m
++CONFIG_VIDEO_SAA7134_RC=y
++CONFIG_VIDEO_USBVISION=m
++CONFIG_VIDEO_STK1160_COMMON=m
++CONFIG_VIDEO_STK1160=m
++CONFIG_VIDEO_STK1160_AC97=y
++CONFIG_VIDEO_W9966=m
++CONFIG_VIDEO_ZORAN=m
++CONFIG_VIDEO_ZORAN_AVS6EYES=m
++CONFIG_VIDEO_ZORAN_BUZ=m
++CONFIG_VIDEO_ZORAN_DC10=m
++CONFIG_VIDEO_ZORAN_DC30=m
++CONFIG_VIDEO_ZORAN_LML33=m
++CONFIG_VIDEO_ZORAN_LML33R10=m
++CONFIG_VIDEO_ZORAN_ZR36060=m
++# CONFIG_V4L_ISA_PARPORT_DRIVERS is not set
++CONFIG_VIDEO_FB_IVTV=m
++CONFIG_VIDEO_SAA7164=m
++CONFIG_VIDEO_TM6000=m
++CONFIG_VIDEO_TM6000_ALSA=m
++CONFIG_VIDEO_TM6000_DVB=m
++CONFIG_VIDEO_TLG2300=m
++CONFIG_VIDEO_USBTV=m
++
++CONFIG_USB_VIDEO_CLASS_INPUT_EVDEV=y
++
++#
++# Radio Adapters
++#
++CONFIG_RADIO_MAXIRADIO=m
++CONFIG_RADIO_SHARK=m
++CONFIG_RADIO_SHARK2=m
++CONFIG_RADIO_WL1273=m
++
++CONFIG_MEDIA_ATTACH=y
++
++#
++# V4L/DVB tuners
++# Selected automatically by not setting CONFIG_MEDIA_TUNER_CUSTOMISE
++#
++# CONFIG_MEDIA_TUNER_CUSTOMISE is not set
++
++#
++# Digital Video Broadcasting Devices
++#
++CONFIG_DVB_CAPTURE_DRIVERS=y
++CONFIG_DVB_CORE=m
++CONFIG_DVB_NET=y
++CONFIG_DVB_MAX_ADAPTERS=8
++CONFIG_DVB_DYNAMIC_MINORS=y
++
++#
++# DVB frontends
++# Selected automatically by not setting CONFIG_DVB_FE_CUSTOMISE
++#
++# CONFIG_DVB_FE_CUSTOMISE is not set
++
++#
++# Supported DVB bridge Modules
++#
++CONFIG_DVB_BT8XX=m
++CONFIG_DVB_BUDGET_CORE=m
++CONFIG_DVB_PLUTO2=m
++CONFIG_SMS_SIANO_MDTV=m
++CONFIG_SMS_SIANO_RC=y
++# CONFIG_SMS_SIANO_DEBUGFS is not set
++CONFIG_MEDIA_SUBDRV_AUTOSELECT=y
++CONFIG_SMS_USB_DRV=m
++CONFIG_SMS_SDIO_DRV=m
++CONFIG_DVB_TTUSB_DEC=m
++CONFIG_DVB_USB_DTV5100=m
++CONFIG_DVB_USB_AF9015=m
++CONFIG_DVB_USB_ANYSEE=m
++CONFIG_DVB_USB_DW2102=m
++CONFIG_DVB_USB_FRIIO=m
++CONFIG_DVB_USB_EC168=m
++CONFIG_DVB_USB_PCTV452E=m
++CONFIG_DVB_USB_IT913X=m
++CONFIG_DVB_USB_MXL111SF=m
++CONFIG_DVB_DM1105=m
++CONFIG_DVB_FIREDTV=m
++CONFIG_DVB_NGENE=m
++CONFIG_DVB_DDBRIDGE=m
++CONFIG_DVB_USB_TECHNISAT_USB2=m
++CONFIG_DVB_USB_V2=m
++
++CONFIG_DVB_AV7110=m
++CONFIG_DVB_AV7110_OSD=y
++CONFIG_DVB_BUDGET=m
++CONFIG_DVB_BUDGET_CI=m
++CONFIG_DVB_BUDGET_AV=m
++CONFIG_DVB_BUDGET_PATCH=m
++
++CONFIG_DVB_TTUSB_BUDGET=m
++
++CONFIG_DVB_USB_CINERGY_T2=m
++CONFIG_DVB_B2C2_FLEXCOP=m
++# CONFIG_DVB_B2C2_FLEXCOP_USB_DEBUG is not set
++
++CONFIG_DVB_B2C2_FLEXCOP_PCI=m
++# CONFIG_DVB_B2C2_FLEXCOP_PCI_DEBUG is not set
++CONFIG_DVB_B2C2_FLEXCOP_USB=m
++# CONFIG_DVB_B2C2_FLEXCOP_DEBUG is not set
++CONFIG_DVB_USB=m
++# CONFIG_DVB_USB_DEBUG is not set
++CONFIG_DVB_USB_A800=m
++CONFIG_DVB_USB_AF9005=m
++CONFIG_DVB_USB_AF9005_REMOTE=m
++CONFIG_DVB_USB_AU6610=m
++CONFIG_DVB_USB_CXUSB=m
++CONFIG_DVB_USB_DIBUSB_MB=m
++# CONFIG_DVB_USB_DIBUSB_MB_FAULTY is not set
++CONFIG_DVB_USB_DIBUSB_MC=m
++CONFIG_DVB_USB_DIB0700=m
++CONFIG_DVB_USB_DIGITV=m
++CONFIG_DVB_USB_DTT200U=m
++CONFIG_DVB_USB_GL861=m
++CONFIG_DVB_USB_GP8PSK=m
++CONFIG_DVB_USB_M920X=m
++CONFIG_DVB_USB_NOVA_T_USB2=m
++CONFIG_DVB_USB_CE6230=m
++CONFIG_DVB_USB_OPERA1=m
++CONFIG_DVB_USB_TTUSB2=m
++CONFIG_DVB_USB_UMT_010=m
++CONFIG_DVB_USB_VP702X=m
++CONFIG_DVB_USB_VP7045=m
++CONFIG_DVB_USB_AZ6027=m
++CONFIG_DVB_USB_AZ6007=m
++CONFIG_DVB_USB_LME2510=m
++CONFIG_DVB_USB_RTL28XXU=m
++CONFIG_DVB_USB_AF9035=m
++
++CONFIG_DVB_PT1=m
++
++CONFIG_MANTIS_CORE=m
++CONFIG_DVB_MANTIS=m
++CONFIG_DVB_HOPPER=m
++
++CONFIG_VIDEO_SAA7146=m
++CONFIG_VIDEO_SAA7146_VV=m
++CONFIG_VIDEO_TVP5150=m
++CONFIG_VIDEO_TUNER=m
++CONFIG_VIDEO_BTCX=m
++CONFIG_VIDEO_PVRUSB2=m
++CONFIG_VIDEO_PVRUSB2_SYSFS=y
++# CONFIG_VIDEO_PVRUSB2_DEBUGIFC is not set
++
++CONFIG_RC_CORE=m
++CONFIG_RC_DECODERS=y
++CONFIG_LIRC=m
++CONFIG_RC_LOOPBACK=m
++CONFIG_RC_MAP=m
++CONFIG_RC_DEVICES=y
++CONFIG_RC_ATI_REMOTE=m
++CONFIG_IR_NEC_DECODER=m
++CONFIG_IR_RC5_DECODER=m
++CONFIG_IR_RC6_DECODER=m
++CONFIG_IR_JVC_DECODER=m
++CONFIG_IR_SONY_DECODER=m
++CONFIG_IR_RC5_SZ_DECODER=m
++CONFIG_IR_SANYO_DECODER=m
++CONFIG_IR_MCE_KBD_DECODER=m
++CONFIG_IR_LIRC_CODEC=m
++CONFIG_IR_IMON=m
++CONFIG_IR_MCEUSB=m
++CONFIG_IR_ITE_CIR=m
++CONFIG_IR_NUVOTON=m
++CONFIG_IR_FINTEK=m
++CONFIG_IR_REDRAT3=m
++CONFIG_IR_ENE=m
++CONFIG_IR_STREAMZAP=m
++CONFIG_IR_WINBOND_CIR=m
++CONFIG_IR_IGUANA=m
++CONFIG_IR_TTUSBIR=m
++CONFIG_IR_GPIO_CIR=m
++
++CONFIG_V4L_MEM2MEM_DRIVERS=y
++# CONFIG_VIDEO_MEM2MEM_DEINTERLACE is not set
++# CONFIG_VIDEO_SH_VEU is not set
++# CONFIG_VIDEO_RENESAS_VSP1 is not set
++# CONFIG_V4L_TEST_DRIVERS is not set
++
++# CONFIG_VIDEO_MEM2MEM_TESTDEV is not set
++
++#
++# Broadcom Crystal HD video decoder driver
++#
++CONFIG_CRYSTALHD=m
++
++#
++# Graphics support
++#
++
++CONFIG_DISPLAY_SUPPORT=m
++CONFIG_VIDEO_OUTPUT_CONTROL=m
++
++#
++# Console display driver support
++#
++CONFIG_VGA_CONSOLE=y
++CONFIG_VGACON_SOFT_SCROLLBACK=y
++CONFIG_VGACON_SOFT_SCROLLBACK_SIZE=64
++CONFIG_DUMMY_CONSOLE=y
++CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y
++
++#
++# Logo configuration
++#
++# CONFIG_LOGO_LINUX_MONO is not set
++# CONFIG_LOGO_LINUX_VGA16 is not set
++CONFIG_LOGO_LINUX_CLUT224=y
++
++#
++# Sound
++#
++
++#
++# Advanced Linux Sound Architecture
++#
++CONFIG_SOUND_OSS_CORE_PRECLAIM=y
++# CONFIG_SND_DEBUG_VERBOSE is not set
++CONFIG_SND_VERBOSE_PROCFS=y
++CONFIG_SND_SEQUENCER=y
++CONFIG_SND_HRTIMER=y
++CONFIG_SND_SEQ_HRTIMER_DEFAULT=y
++CONFIG_SND_SEQ_DUMMY=m
++CONFIG_SND_SEQUENCER_OSS=y
++CONFIG_SND_SEQ_RTCTIMER_DEFAULT=y
++CONFIG_SND_OSSEMUL=y
++CONFIG_SND_MIXER_OSS=y
++CONFIG_SND_PCM_OSS=y
++CONFIG_SND_PCM_OSS_PLUGINS=y
++CONFIG_SND_RTCTIMER=y
++CONFIG_SND_DYNAMIC_MINORS=y
++CONFIG_SND_MAX_CARDS=32
++# CONFIG_SND_SUPPORT_OLD_API is not set
++
++#
++# Generic devices
++#
++CONFIG_SND_DUMMY=m
++CONFIG_SND_ALOOP=m
++CONFIG_SND_VIRMIDI=m
++CONFIG_SND_MTPAV=m
++CONFIG_SND_MTS64=m
++CONFIG_SND_SERIAL_U16550=m
++CONFIG_SND_MPU401=m
++CONFIG_SND_PORTMAN2X4=m
++CONFIG_SND_AC97_POWER_SAVE=y
++CONFIG_SND_AC97_POWER_SAVE_DEFAULT=0
++
++CONFIG_SND_DRIVERS=y
++
++#
++# ISA devices
++#
++CONFIG_SND_AD1889=m
++
++#
++# PCI devices
++#
++CONFIG_SND_PCI=y
++CONFIG_SND_ALI5451=m
++CONFIG_SND_ALS300=m
++CONFIG_SND_ALS4000=m
++CONFIG_SND_ATIIXP=m
++CONFIG_SND_ATIIXP_MODEM=m
++CONFIG_SND_AU8810=m
++CONFIG_SND_AU8820=m
++CONFIG_SND_AU8830=m
++# CONFIG_SND_AW2 is not set
++CONFIG_SND_AZT3328=m
++CONFIG_SND_BT87X=m
++# CONFIG_SND_BT87X_OVERCLOCK is not set
++CONFIG_SND_CA0106=m
++CONFIG_SND_CMIPCI=m
++CONFIG_SND_CS46XX=m
++CONFIG_SND_CS46XX_NEW_DSP=y
++CONFIG_SND_CS4281=m
++CONFIG_SND_CS5530=m
++CONFIG_SND_CS5535AUDIO=m
++CONFIG_SND_EMU10K1=m
++CONFIG_SND_EMU10K1X=m
++CONFIG_SND_ENS1370=m
++CONFIG_SND_ENS1371=m
++CONFIG_SND_ES1938=m
++CONFIG_SND_ES1968=m
++CONFIG_SND_ES1968_INPUT=y
++CONFIG_SND_ES1968_RADIO=y
++CONFIG_SND_FM801=m
++CONFIG_SND_FM801_TEA575X_BOOL=y
++CONFIG_SND_CTXFI=m
++CONFIG_SND_LX6464ES=m
++CONFIG_SND_HDA_INTEL=y
++CONFIG_SND_HDA_INPUT_BEEP=y
++CONFIG_SND_HDA_INPUT_BEEP_MODE=0
++CONFIG_SND_HDA_INPUT_JACK=y
++CONFIG_SND_HDA_PATCH_LOADER=y
++CONFIG_SND_HDA_HWDEP=y
++CONFIG_SND_HDA_CODEC_REALTEK=y
++CONFIG_SND_HDA_ENABLE_REALTEK_QUIRKS=y
++CONFIG_SND_HDA_CODEC_CA0110=y
++CONFIG_SND_HDA_CODEC_ANALOG=y
++CONFIG_SND_HDA_CODEC_SIGMATEL=y
++CONFIG_SND_HDA_CODEC_VIA=y
++CONFIG_SND_HDA_CODEC_CIRRUS=y
++CONFIG_SND_HDA_CODEC_CONEXANT=y
++CONFIG_SND_HDA_CODEC_CMEDIA=y
++CONFIG_SND_HDA_CODEC_SI3054=y
++CONFIG_SND_HDA_CODEC_HDMI=y
++CONFIG_SND_HDA_I915=y
++CONFIG_SND_HDA_CODEC_CA0132=y
++CONFIG_SND_HDA_CODEC_CA0132_DSP=y
++CONFIG_SND_HDA_GENERIC=y
++CONFIG_SND_HDA_POWER_SAVE=y
++CONFIG_SND_HDA_POWER_SAVE_DEFAULT=0
++CONFIG_SND_HDA_RECONFIG=y
++CONFIG_SND_HDA_PREALLOC_SIZE=4096
++CONFIG_SND_HDSPM=m
++CONFIG_SND_ICE1712=m
++CONFIG_SND_ICE1724=m
++CONFIG_SND_INTEL8X0=y
++CONFIG_SND_INTEL8X0M=m
++CONFIG_SND_KORG1212=m
++CONFIG_SND_MAESTRO3=m
++CONFIG_SND_MAESTRO3_INPUT=y
++CONFIG_SND_MIXART=m
++CONFIG_SND_NM256=m
++CONFIG_SND_OXYGEN=m
++CONFIG_SND_RME32=m
++CONFIG_SND_PCSP=m
++CONFIG_SND_PCXHR=m
++CONFIG_SND_RIPTIDE=m
++CONFIG_SND_RME96=m
++CONFIG_SND_RME9652=m
++CONFIG_SND_SIS7019=m
++CONFIG_SND_SONICVIBES=m
++CONFIG_SND_HDSP=m
++CONFIG_SND_TRIDENT=m
++CONFIG_SND_VIA82XX=m
++CONFIG_SND_VIA82XX_MODEM=m
++CONFIG_SND_VIRTUOSO=m
++CONFIG_SND_VX222=m
++CONFIG_SND_YMFPCI=m
++CONFIG_SND_ASIHPI=m
++CONFIG_SND_LOLA=m
++
++#
++# ALSA USB devices
++#
++CONFIG_SND_USB=y
++CONFIG_SND_USB_CAIAQ=m
++CONFIG_SND_USB_CAIAQ_INPUT=y
++CONFIG_SND_USB_USX2Y=m
++CONFIG_SND_USB_US122L=m
++CONFIG_SND_USB_UA101=m
++CONFIG_SND_USB_6FIRE=m
++CONFIG_SND_USB_HIFACE=m
++
++#
++# PCMCIA devices
++#
++# CONFIG_SND_PCMCIA is not set
++
++CONFIG_SND_FIREWIRE=y
++CONFIG_SND_FIREWIRE_SPEAKERS=m
++CONFIG_SND_ISIGHT=m
++CONFIG_SND_SCS1X=m
++CONFIG_SND_DICE=m
++
++#
++# Open Sound System
++#
++# CONFIG_SOUND_PRIME is not set
++
++#
++# USB support
++#
++CONFIG_USB_SUPPORT=y
++# CONFIG_USB_DEBUG is not set
++
++# DEPRECATED: See bug 362221. Fix udev.
++# CONFIG_USB_DEVICE_CLASS is not set
++
++
++#
++# Miscellaneous USB options
++#
++
++# Deprecated.
++# CONFIG_USB_DEVICEFS is not set
++
++CONFIG_USB_DEFAULT_PERSIST=y
++# CONFIG_USB_DYNAMIC_MINORS is not set
++CONFIG_USB_SUSPEND=y
++
++#
++# USB Host Controller Drivers
++#
++CONFIG_USB_EHCI_ROOT_HUB_TT=y
++CONFIG_USB_EHCI_TT_NEWSCHED=y
++# CONFIG_USB_EHCI_MV is not set
++# CONFIG_USB_EHCI_HCD_PLATFORM is not set
++# CONFIG_USB_ISP116X_HCD is not set
++# CONFIG_USB_ISP1760_HCD is not set
++CONFIG_USB_ISP1362_HCD=m
++CONFIG_USB_FUSBH200_HCD=m
++# CONFIG_USB_FOTG210_HCD is not set
++# CONFIG_USB_GR_UDC is not set
++CONFIG_USB_OHCI_HCD=y
++CONFIG_USB_OHCI_HCD_PCI=y
++# CONFIG_USB_OHCI_HCD_SSB is not set
++# CONFIG_USB_HCD_TEST_MODE is not set
++# CONFIG_USB_OHCI_HCD_PLATFORM is not set
++CONFIG_USB_UHCI_HCD=y
++CONFIG_USB_SL811_HCD=m
++CONFIG_USB_SL811_HCD_ISO=y
++# CONFIG_USB_SL811_CS is not set
++# CONFIG_USB_R8A66597_HCD is not set
++CONFIG_USB_XHCI_HCD=y
++# CONFIG_USB_XHCI_HCD_DEBUGGING is not set
++
++#
++# USB Device Class drivers
++#
++
++#
++# USB Bluetooth TTY can only be used with disabled Bluetooth subsystem
++#
++CONFIG_USB_ACM=m
++CONFIG_USB_PRINTER=m
++CONFIG_USB_WDM=m
++CONFIG_USB_TMC=m
++# CONFIG_BLK_DEV_UB is not set
++# CONFIG_USB_STORAGE_DEBUG is not set
++CONFIG_USB_STORAGE_CYPRESS_ATACB=m
++CONFIG_USB_STORAGE_DATAFAB=m
++CONFIG_USB_STORAGE_FREECOM=m
++CONFIG_USB_STORAGE_ISD200=m
++CONFIG_USB_STORAGE_SDDR09=m
++CONFIG_USB_STORAGE_SDDR55=m
++CONFIG_USB_STORAGE_JUMPSHOT=m
++CONFIG_USB_STORAGE_USBAT=y
++CONFIG_USB_STORAGE_ONETOUCH=m
++CONFIG_USB_STORAGE_ALAUDA=m
++CONFIG_USB_STORAGE_KARMA=m
++CONFIG_USB_STORAGE_REALTEK=m
++CONFIG_REALTEK_AUTOPM=y
++CONFIG_USB_STORAGE_ENE_UB6250=m
++# CONFIG_USB_LIBUSUAL is not set
++# CONFIG_USB_UAS is not set
++
++
++#
++# USB Human Interface Devices (HID)
++#
++CONFIG_USB_HID=y
++
++CONFIG_HID_SUPPORT=y
++
++CONFIG_HID=y
++CONFIG_I2C_HID=m
++CONFIG_HID_BATTERY_STRENGTH=y
++# debugging default is y upstream now
++CONFIG_HIDRAW=y
++CONFIG_UHID=m
++CONFIG_HID_PID=y
++CONFIG_LOGITECH_FF=y
++CONFIG_HID_LOGITECH_DJ=m
++CONFIG_LOGIWII_FF=y
++CONFIG_LOGIRUMBLEPAD2_FF=y
++CONFIG_PANTHERLORD_FF=y
++CONFIG_THRUSTMASTER_FF=y
++CONFIG_HID_WACOM=m
++CONFIG_HID_WACOM_POWER_SUPPLY=y
++CONFIG_ZEROPLUS_FF=y
++CONFIG_USB_HIDDEV=y
++CONFIG_USB_IDMOUSE=m
++CONFIG_DRAGONRISE_FF=y
++CONFIG_GREENASIA_FF=y
++CONFIG_SMARTJOYPLUS_FF=y
++CONFIG_LOGIG940_FF=y
++CONFIG_LOGIWHEELS_FF=y
++CONFIG_HID_MAGICMOUSE=y
++CONFIG_HID_MULTITOUCH=m
++CONFIG_HID_NTRIG=y
++CONFIG_HID_QUANTA=y
++CONFIG_HID_PRIMAX=m
++CONFIG_HID_PS3REMOTE=m
++CONFIG_HID_PRODIKEYS=m
++CONFIG_HID_DRAGONRISE=m
++CONFIG_HID_GYRATION=m
++CONFIG_HID_ICADE=m
++CONFIG_HID_TWINHAN=m
++CONFIG_HID_ORTEK=m
++CONFIG_HID_PANTHERLORD=m
++CONFIG_HID_PETALYNX=m
++CONFIG_HID_PICOLCD=m
++CONFIG_HID_RMI=m
++CONFIG_HID_ROCCAT=m
++CONFIG_HID_ROCCAT_KONE=m
++CONFIG_HID_SAMSUNG=m
++CONFIG_HID_SONY=m
++CONFIG_SONY_FF=y
++CONFIG_HID_SUNPLUS=m
++CONFIG_HID_STEELSERIES=m
++CONFIG_HID_GREENASIA=m
++CONFIG_HID_SMARTJOYPLUS=m
++CONFIG_HID_TOPSEED=m
++CONFIG_HID_THINGM=m
++CONFIG_HID_THRUSTMASTER=m
++CONFIG_HID_XINMO=m
++CONFIG_HID_ZEROPLUS=m
++CONFIG_HID_ZYDACRON=m
++CONFIG_HID_SENSOR_HUB=m
++CONFIG_HID_SENSOR_GYRO_3D=m
++CONFIG_HID_SENSOR_MAGNETOMETER_3D=m
++CONFIG_HID_SENSOR_ALS=m
++CONFIG_HID_SENSOR_ACCEL_3D=m
++CONFIG_HID_EMS_FF=m
++CONFIG_HID_ELECOM=m
++CONFIG_HID_ELO=m
++CONFIG_HID_UCLOGIC=m
++CONFIG_HID_WALTOP=m
++CONFIG_HID_ROCCAT_PYRA=m
++CONFIG_HID_ROCCAT_KONEPLUS=m
++CONFIG_HID_ACRUX=m
++CONFIG_HID_ACRUX_FF=y
++CONFIG_HID_KEYTOUCH=m
++CONFIG_HID_LCPOWER=m
++CONFIG_HID_LENOVO_TPKBD=m
++CONFIG_HID_ROCCAT_ARVO=m
++CONFIG_HID_ROCCAT_ISKU=m
++CONFIG_HID_ROCCAT_KOVAPLUS=m
++CONFIG_HID_HOLTEK=m
++CONFIG_HOLTEK_FF=y
++CONFIG_HID_HUION=m
++CONFIG_HID_SPEEDLINK=m
++CONFIG_HID_WIIMOTE=m
++CONFIG_HID_WIIMOTE_EXT=y
++CONFIG_HID_KYE=m
++CONFIG_HID_SAITEK=m
++CONFIG_HID_TIVO=m
++CONFIG_HID_GENERIC=y
++CONFIG_HID_AUREAL=m
++CONFIG_HID_APPLEIR=m
++
++
++#
++# USB Imaging devices
++#
++CONFIG_USB_MDC800=m
++CONFIG_USB_MICROTEK=m
++
++#
++# USB Multimedia devices
++#
++
++CONFIG_USB_DSBR=m
++# CONFIG_USB_ET61X251 is not set
++CONFIG_USB_M5602=m
++CONFIG_USB_STV06XX=m
++CONFIG_USB_GSPCA=m
++CONFIG_USB_GSPCA_MR97310A=m
++CONFIG_USB_GSPCA_BENQ=m
++CONFIG_USB_GSPCA_CONEX=m
++CONFIG_USB_GSPCA_CPIA1=m
++CONFIG_USB_GSPCA_ETOMS=m
++CONFIG_USB_GSPCA_FINEPIX=m
++CONFIG_USB_GSPCA_MARS=m
++CONFIG_USB_GSPCA_OV519=m
++CONFIG_USB_GSPCA_OV534=m
++CONFIG_USB_GSPCA_OV534_9=m
++CONFIG_USB_GSPCA_PAC207=m
++CONFIG_USB_GSPCA_PAC7311=m
++CONFIG_USB_GSPCA_SN9C2028=m
++CONFIG_USB_GSPCA_SN9C20X=m
++CONFIG_USB_GSPCA_SONIXB=m
++CONFIG_USB_GSPCA_SONIXJ=m
++CONFIG_USB_GSPCA_SPCA500=m
++CONFIG_USB_GSPCA_SPCA501=m
++CONFIG_USB_GSPCA_SPCA505=m
++CONFIG_USB_GSPCA_SPCA506=m
++CONFIG_USB_GSPCA_SPCA508=m
++CONFIG_USB_GSPCA_SPCA561=m
++CONFIG_USB_GSPCA_STK014=m
++CONFIG_USB_GSPCA_STK1135=m
++CONFIG_USB_GSPCA_SUNPLUS=m
++CONFIG_USB_GSPCA_T613=m
++CONFIG_USB_GSPCA_TOPRO=m
++CONFIG_USB_GSPCA_TV8532=m
++CONFIG_USB_GSPCA_VC032X=m
++CONFIG_USB_GSPCA_ZC3XX=m
++CONFIG_USB_GSPCA_SQ905=m
++CONFIG_USB_GSPCA_SQ905C=m
++CONFIG_USB_GSPCA_PAC7302=m
++CONFIG_USB_GSPCA_STV0680=m
++CONFIG_USB_GL860=m
++CONFIG_USB_GSPCA_JEILINJ=m
++CONFIG_USB_GSPCA_JL2005BCD=m
++CONFIG_USB_GSPCA_KONICA=m
++CONFIG_USB_GSPCA_XIRLINK_CIT=m
++CONFIG_USB_GSPCA_SPCA1528=m
++CONFIG_USB_GSPCA_SQ930X=m
++CONFIG_USB_GSPCA_NW80X=m
++CONFIG_USB_GSPCA_VICAM=m
++CONFIG_USB_GSPCA_KINECT=m
++CONFIG_USB_GSPCA_SE401=m
++
++CONFIG_USB_S2255=m
++# CONFIG_VIDEO_SH_MOBILE_CEU is not set
++# CONFIG_VIDEO_SH_MOBILE_CSI2 is not set
++# CONFIG_USB_SN9C102 is not set
++CONFIG_USB_ZR364XX=m
++
++#
++# USB Network adaptors
++#
++CONFIG_USB_CATC=m
++CONFIG_USB_HSO=m
++CONFIG_USB_KAWETH=m
++CONFIG_USB_PEGASUS=m
++CONFIG_USB_RTL8150=m
++CONFIG_USB_RTL8152=m
++CONFIG_USB_USBNET=m
++CONFIG_USB_SPEEDTOUCH=m
++CONFIG_USB_NET_AX8817X=m
++CONFIG_USB_NET_AX88179_178A=m
++CONFIG_USB_NET_DM9601=m
++CONFIG_USB_NET_SR9700=m
++CONFIG_USB_NET_SMSC95XX=m
++CONFIG_USB_NET_GL620A=m
++CONFIG_USB_NET_NET1080=m
++CONFIG_USB_NET_PLUSB=m
++CONFIG_USB_NET_MCS7830=m
++CONFIG_USB_NET_RNDIS_HOST=m
++CONFIG_USB_NET_CDC_SUBSET=m
++CONFIG_USB_NET_CDC_EEM=m
++CONFIG_USB_NET_CDC_NCM=m
++CONFIG_USB_NET_HUAWEI_CDC_NCM=m
++CONFIG_USB_NET_CDC_MBIM=m
++CONFIG_USB_NET_ZAURUS=m
++CONFIG_USB_NET_CX82310_ETH=m
++CONFIG_USB_NET_INT51X1=m
++CONFIG_USB_CDC_PHONET=m
++CONFIG_USB_IPHETH=m
++CONFIG_USB_SIERRA_NET=m
++CONFIG_USB_VL600=m
++
++#
++# USB Host-to-Host Cables
++#
++CONFIG_USB_AN2720=y
++CONFIG_USB_BELKIN=y
++
++#
++# Intelligent USB Devices/Gadgets
++#
++CONFIG_USB_ARMLINUX=y
++CONFIG_USB_EPSON2888=y
++CONFIG_USB_KC2190=y
++
++# CONFIG_USB_MUSB_HDRC is not set
++
++#
++# USB port drivers
++#
++CONFIG_USB_USS720=m
++
++#
++# USB Serial Converter support
++#
++CONFIG_USB_SERIAL=y
++CONFIG_USB_SERIAL_GENERIC=y
++CONFIG_USB_SERIAL_SIMPLE=m
++CONFIG_USB_SERIAL_AIRCABLE=m
++CONFIG_USB_SERIAL_ARK3116=m
++CONFIG_USB_SERIAL_BELKIN=m
++CONFIG_USB_SERIAL_CH341=m
++CONFIG_USB_SERIAL_CYPRESS_M8=m
++CONFIG_USB_SERIAL_CYBERJACK=m
++CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m
++CONFIG_USB_SERIAL_CP210X=m
++CONFIG_USB_SERIAL_QUALCOMM=m
++CONFIG_USB_SERIAL_SYMBOL=m
++CONFIG_USB_SERIAL_EDGEPORT=m
++CONFIG_USB_SERIAL_EDGEPORT_TI=m
++CONFIG_USB_SERIAL_EMPEG=m
++# CONFIG_USB_SERIAL_F81232 is not set
++CONFIG_USB_SERIAL_FTDI_SIO=m
++CONFIG_USB_SERIAL_FUNSOFT=m
++CONFIG_USB_SERIAL_GARMIN=m
++CONFIG_USB_SERIAL_HP4X=m
++CONFIG_USB_SERIAL_IPAQ=m
++CONFIG_USB_SERIAL_IPW=m
++CONFIG_USB_SERIAL_IR=m
++CONFIG_USB_SERIAL_IUU=m
++CONFIG_USB_SERIAL_KEYSPAN_PDA=m
++CONFIG_USB_SERIAL_KEYSPAN=m
++CONFIG_USB_SERIAL_KEYSPAN_MPR=y
++CONFIG_USB_SERIAL_KEYSPAN_USA28=y
++CONFIG_USB_SERIAL_KEYSPAN_USA28X=y
++CONFIG_USB_SERIAL_KEYSPAN_USA28XA=y
++CONFIG_USB_SERIAL_KEYSPAN_USA28XB=y
++CONFIG_USB_SERIAL_KEYSPAN_USA19=y
++CONFIG_USB_SERIAL_KEYSPAN_USA18X=y
++CONFIG_USB_SERIAL_KEYSPAN_USA19W=y
++CONFIG_USB_SERIAL_KEYSPAN_USA19QW=y
++CONFIG_USB_SERIAL_KEYSPAN_USA19QI=y
++CONFIG_USB_SERIAL_KEYSPAN_USA49W=y
++CONFIG_USB_SERIAL_KEYSPAN_USA49WLC=y
++CONFIG_USB_SERIAL_KLSI=m
++CONFIG_USB_SERIAL_KOBIL_SCT=m
++CONFIG_USB_SERIAL_MCT_U232=m
++# CONFIG_USB_SERIAL_METRO is not set
++CONFIG_USB_SERIAL_MOS7720=m
++CONFIG_USB_SERIAL_MOS7715_PARPORT=y
++# CONFIG_USB_SERIAL_ZIO is not set
++# CONFIG_USB_SERIAL_WISHBONE is not set
++# CONFIG_USB_SERIAL_ZTE is not set
++CONFIG_USB_SERIAL_MOS7840=m
++CONFIG_USB_SERIAL_MOTOROLA=m
++# CONFIG_USB_SERIAL_MXUPORT is not set
++CONFIG_USB_SERIAL_NAVMAN=m
++CONFIG_USB_SERIAL_OPTION=m
++CONFIG_USB_SERIAL_OTI6858=m
++CONFIG_USB_SERIAL_OPTICON=m
++CONFIG_USB_SERIAL_OMNINET=m
++CONFIG_USB_SERIAL_PL2303=m
++# CONFIG_USB_SERIAL_QUATECH2 is not set
++CONFIG_USB_SERIAL_SAFE=m
++CONFIG_USB_SERIAL_SAFE_PADDED=y
++CONFIG_USB_SERIAL_SIERRAWIRELESS=m
++CONFIG_USB_SERIAL_SIEMENS_MPI=m
++CONFIG_USB_SERIAL_SPCP8X5=m
++CONFIG_USB_SERIAL_TI=m
++CONFIG_USB_SERIAL_VISOR=m
++CONFIG_USB_SERIAL_WHITEHEAT=m
++CONFIG_USB_SERIAL_XIRCOM=m
++CONFIG_USB_SERIAL_QCAUX=m
++CONFIG_USB_SERIAL_VIVOPAY_SERIAL=m
++CONFIG_USB_SERIAL_XSENS_MT=m
++CONFIG_USB_SERIAL_DEBUG=m
++CONFIG_USB_SERIAL_SSU100=m
++CONFIG_USB_SERIAL_QT2=m
++CONFIG_USB_SERIAL_FLASHLOADER=m
++CONFIG_USB_SERIAL_SUUNTO=m
++CONFIG_USB_SERIAL_CONSOLE=y
++
++CONFIG_USB_EZUSB=y
++CONFIG_USB_EMI62=m
++CONFIG_USB_LED=m
++# CONFIG_USB_CYPRESS_CY7C63 is not set
++
++#
++# USB Miscellaneous drivers
++#
++
++CONFIG_USB_ADUTUX=m
++CONFIG_USB_SEVSEG=m
++CONFIG_USB_ALI_M5632=y
++CONFIG_USB_APPLEDISPLAY=m
++
++# Physical Layer USB driver
++# CONFIG_USB_OTG_FSM is not set
++
++# CONFIG_GENERIC_PHY is not set
++# CONFIG_PHY_EXYNOS_MIPI_VIDEO is not set
++# CONFIG_PHY_EXYNOS_DP_VIDEO is not set
++# CONFIG_OMAP_USB2 is not set
++# CONFIG_OMAP_USB3 is not set
++# CONFIG_OMAP_CONTROL_USB is not set
++# CONFIG_AM335X_PHY_USB is not set
++# CONFIG_SAMSUNG_USBPHY is not set
++# CONFIG_SAMSUNG_USB2PHY is not set
++# CONFIG_SAMSUNG_USB3PHY is not set
++# CONFIG_BCM_KONA_USB2_PHY is not set
++CONFIG_USB_RCAR_PHY=m
++CONFIG_USB_ATM=m
++CONFIG_USB_CXACRU=m
++# CONFIG_USB_C67X00_HCD is not set
++# CONFIG_USB_CYTHERM is not set
++CONFIG_USB_EMI26=m
++CONFIG_USB_FTDI_ELAN=m
++CONFIG_USB_FILE_STORAGE=m
++# CONFIG_USB_FILE_STORAGE_TEST is not set
++# CONFIG_USB_DWC3 is not set
++# CONFIG_USB_GADGETFS is not set
++# CONFIG_USB_OXU210HP_HCD is not set
++CONFIG_USB_IOWARRIOR=m
++CONFIG_USB_ISIGHTFW=m
++CONFIG_USB_YUREX=m
++CONFIG_USB_EZUSB_FX2=m
++CONFIG_USB_HSIC_USB3503=m
++CONFIG_USB_LCD=m
++CONFIG_USB_LD=m
++CONFIG_USB_LEGOTOWER=m
++CONFIG_USB_MON=y
++CONFIG_USB_PWC=m
++CONFIG_USB_PWC_INPUT_EVDEV=y
++# CONFIG_USB_PWC_DEBUG is not set
++# CONFIG_USB_RIO500 is not set
++CONFIG_USB_SISUSBVGA=m
++CONFIG_USB_SISUSBVGA_CON=y
++CONFIG_RADIO_SI470X=y
++CONFIG_USB_KEENE=m
++CONFIG_USB_MA901=m
++CONFIG_USB_SI470X=m
++CONFIG_I2C_SI470X=m
++CONFIG_RADIO_SI4713=m
++# CONFIG_RADIO_TEF6862 is not set
++CONFIG_USB_MR800=m
++CONFIG_USB_STKWEBCAM=m
++# CONFIG_USB_TEST is not set
++# CONFIG_USB_EHSET_TEST_FIXTURE is not set
++CONFIG_USB_TRANCEVIBRATOR=m
++CONFIG_USB_U132_HCD=m
++CONFIG_USB_UEAGLEATM=m
++CONFIG_USB_XUSBATM=m
++
++# CONFIG_USB_DWC2 is not set
++
++CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
++
++# CONFIG_USB_ISP1301 is not set
++
++# CONFIG_USB_OTG is not set
++
++#
++# Sonics Silicon Backplane
++#
++CONFIG_SSB=m
++CONFIG_SSB_PCIHOST=y
++CONFIG_SSB_SDIOHOST=y
++CONFIG_SSB_PCMCIAHOST=y
++# CONFIG_SSB_SILENT is not set
++# CONFIG_SSB_DEBUG is not set
++CONFIG_SSB_DRIVER_PCICORE=y
++CONFIG_SSB_DRIVER_GPIO=y
++
++# Multifunction USB devices
++# CONFIG_MFD_PCF50633 is not set
++CONFIG_PCF50633_ADC=m
++CONFIG_PCF50633_GPIO=m
++# CONFIG_AB3100_CORE is not set
++CONFIG_INPUT_PCF50633_PMU=m
++CONFIG_INPUT_GPIO_ROTARY_ENCODER=m
++
++CONFIG_MFD_SUPPORT=y
++CONFIG_MFD_VX855=m
++CONFIG_MFD_SM501=m
++CONFIG_MFD_SM501_GPIO=y
++CONFIG_MFD_RTSX_PCI=m
++# CONFIG_MFD_TI_AM335X_TSCADC is not set
++CONFIG_MFD_VIPERBOARD=m
++# CONFIG_MFD_RETU is not set
++# CONFIG_MFD_TC6393XB is not set
++# CONFIG_MFD_WM8400 is not set
++# CONFIG_MFD_WM8350_I2C is not set
++# CONFIG_MFD_WM8350 is not set
++# CONFIG_MFD_WM831X is not set
++# CONFIG_AB3100_OTP is not set
++# CONFIG_MFD_TIMBERDALE is not set
++# CONFIG_MFD_WM8994 is not set
++# CONFIG_MFD_88PM860X is not set
++# CONFIG_LPC_SCH is not set
++# CONFIG_LPC_ICH is not set
++# CONFIG_HTC_I2CPLD is not set
++# CONFIG_MFD_MAX8925 is not set
++# CONFIG_MFD_ASIC3 is not set
++# CONFIG_MFD_AS3722 is not set
++# CONFIG_HTC_EGPIO is not set
++# CONFIG_TPS6507X is not set
++# CONFIG_ABX500_CORE is not set
++# CONFIG_MFD_RDC321X is not set
++# CONFIG_MFD_JANZ_CMODIO is not set
++# CONFIG_MFD_KEMPLD is not set
++# CONFIG_MFD_WM831X_I2C is not set
++# CONFIG_MFD_CS5535 is not set
++# CONFIG_MFD_STMPE is not set
++# CONFIG_MFD_MAX8998 is not set
++# CONFIG_MFD_TPS6586X is not set
++# CONFIG_MFD_TC3589X is not set
++# CONFIG_MFD_WL1273_CORE is not set
++# CONFIG_MFD_TPS65217 is not set
++# CONFIG_MFD_LM3533 is not set
++# CONFIG_MFD_ARIZONA is not set
++# CONFIG_MFD_ARIZONA_I2C is not set
++# CONFIG_MFD_CROS_EC is not set
++# CONFIG_MFD_TPS65912 is not set
++# CONFIG_MFD_SYSCON is not set
++# CONFIG_MFD_DA9063 is not set
++# CONFIG_MFD_LP3943 is not set
++
++#
++# File systems
++#
++CONFIG_MISC_FILESYSTEMS=y
++
++# ext4 is used for ext2 and ext3 filesystems
++CONFIG_JBD2=y
++CONFIG_FS_MBCACHE=y
++CONFIG_REISERFS_FS=m
++# CONFIG_REISERFS_CHECK is not set
++CONFIG_REISERFS_PROC_INFO=y
++CONFIG_REISERFS_FS_XATTR=y
++CONFIG_REISERFS_FS_POSIX_ACL=y
++CONFIG_REISERFS_FS_SECURITY=y
++CONFIG_JFS_FS=m
++# CONFIG_JFS_DEBUG is not set
++# CONFIG_JFS_STATISTICS is not set
++CONFIG_JFS_POSIX_ACL=y
++CONFIG_JFS_SECURITY=y
++CONFIG_XFS_FS=m
++# CONFIG_XFS_DEBUG is not set
++# CONFIG_XFS_RT is not set
++CONFIG_XFS_QUOTA=y
++CONFIG_XFS_POSIX_ACL=y
++CONFIG_MINIX_FS=m
++CONFIG_ROMFS_FS=m
++# CONFIG_QFMT_V1 is not set
++CONFIG_QFMT_V2=y
++CONFIG_QUOTACTL=y
++CONFIG_DNOTIFY=y
++# Autofsv3 is obsolete.
++# systemd is dependant upon AUTOFS, so build it in.
++# CONFIG_EXOFS_FS is not set
++# CONFIG_EXOFS_DEBUG is not set
++CONFIG_NILFS2_FS=m
++# CONFIG_LOGFS is not set
++CONFIG_CEPH_FS=m
++CONFIG_CEPH_FSCACHE=y
++CONFIG_BLK_DEV_RBD=m
++CONFIG_CEPH_LIB=m
++CONFIG_CEPH_FS_POSIX_ACL=y
++# CONFIG_CEPH_LIB_USE_DNS_RESOLVER is not set
++
++CONFIG_FSCACHE=m
++CONFIG_FSCACHE_STATS=y
++# CONFIG_FSCACHE_HISTOGRAM is not set
++# CONFIG_FSCACHE_DEBUG is not set
++CONFIG_FSCACHE_OBJECT_LIST=y
++
++CONFIG_CACHEFILES=m
++# CONFIG_CACHEFILES_DEBUG is not set
++# CONFIG_CACHEFILES_HISTOGRAM is not set
++
++#
++# CD-ROM/DVD Filesystems
++#
++
++#
++# DOS/FAT/NT Filesystems
++#
++CONFIG_FAT_FS=m
++CONFIG_FAT_DEFAULT_CODEPAGE=437
++CONFIG_FAT_DEFAULT_IOCHARSET="ascii"
++# CONFIG_NTFS_FS is not set
++
++#
++# Pseudo filesystems
++#
++CONFIG_PROC_FS=y
++CONFIG_PROC_KCORE=y
++CONFIG_PROC_VMCORE=y
++CONFIG_TMPFS_POSIX_ACL=y
++CONFIG_TMPFS_XATTR=y
++CONFIG_HUGETLBFS=y
++CONFIG_HUGETLB_PAGE=y
++# CONFIG_DEBUG_FS is not set
++
++#
++# Miscellaneous filesystems
++#
++# CONFIG_ADFS_FS is not set
++CONFIG_AFFS_FS=m
++CONFIG_ECRYPT_FS=m
++# CONFIG_ECRYPT_FS_MESSAGING is not set
++CONFIG_HFS_FS=m
++CONFIG_HFSPLUS_FS=m
++# CONFIG_HFSPLUS_FS_POSIX_ACL is not set
++CONFIG_BEFS_FS=m
++# CONFIG_BEFS_DEBUG is not set
++# CONFIG_BFS_FS is not set
++# CONFIG_EFS_FS is not set
++
++CONFIG_CRAMFS=m
++CONFIG_SQUASHFS=m
++CONFIG_SQUASHFS_XATTR=y
++CONFIG_SQUASHFS_LZO=y
++CONFIG_SQUASHFS_XZ=y
++CONFIG_SQUASHFS_ZLIB=y
++# CONFIG_SQUASHFS_4K_DEVBLK_SIZE is not set
++# CONFIG_SQUASHFS_EMBEDDED is not set
++# CONFIG_VXFS_FS is not set
++# CONFIG_HPFS_FS is not set
++# CONFIG_QNX4FS_FS is not set
++# CONFIG_QNX6FS_FS is not set
++CONFIG_SYSV_FS=m
++CONFIG_UFS_FS=m
++# CONFIG_UFS_FS_WRITE is not set
++# CONFIG_UFS_DEBUG is not set
++CONFIG_9P_FS=m
++CONFIG_9P_FSCACHE=y
++CONFIG_9P_FS_POSIX_ACL=y
++CONFIG_9P_FS_SECURITY=y
++# CONFIG_OMFS_FS is not set
++CONFIG_CUSE=m
++# CONFIG_F2FS_FS is not set
++
++#
++# Network File Systems
++#
++CONFIG_NETWORK_FILESYSTEMS=y
++# CONFIG_NFS_V2 is not set
++CONFIG_NFS_V3=y
++CONFIG_NFS_SWAP=y
++CONFIG_NFS_V4_1=y
++CONFIG_NFS_V4_1_IMPLEMENTATION_ID_DOMAIN="kernel.org"
++# CONFIG_NFS_V4_1_MIGRATION is not set
++CONFIG_NFS_V4_2=y
++CONFIG_NFSD=m
++CONFIG_NFSD_V3=y
++CONFIG_NFSD_V3_ACL=y
++CONFIG_NFSD_V4=y
++CONFIG_NFSD_V4_SECURITY_LABEL=y
++CONFIG_NFS_FSCACHE=y
++# CONFIG_NFS_USE_LEGACY_DNS is not set
++CONFIG_PNFS_OBJLAYOUT=m
++CONFIG_PNFS_BLOCK=m
++CONFIG_LOCKD=m
++CONFIG_LOCKD_V4=y
++CONFIG_EXPORTFS=y
++CONFIG_SUNRPC=m
++CONFIG_SUNRPC_GSS=m
++CONFIG_SUNRPC_XPRT_RDMA=m
++CONFIG_SUNRPC_DEBUG=y
++CONFIG_RPCSEC_GSS_KRB5=m
++CONFIG_CIFS=m
++CONFIG_CIFS_STATS=y
++# CONFIG_CIFS_STATS2 is not set
++CONFIG_CIFS_SMB2=y
++CONFIG_CIFS_UPCALL=y
++CONFIG_CIFS_XATTR=y
++CONFIG_CIFS_POSIX=y
++CONFIG_CIFS_FSCACHE=y
++CONFIG_CIFS_ACL=y
++CONFIG_CIFS_WEAK_PW_HASH=y
++CONFIG_CIFS_DEBUG=y
++# CONFIG_CIFS_DEBUG2 is not set
++CONFIG_CIFS_DFS_UPCALL=y
++CONFIG_CIFS_NFSD_EXPORT=y
++CONFIG_NCP_FS=m
++CONFIG_NCPFS_PACKET_SIGNING=y
++CONFIG_NCPFS_IOCTL_LOCKING=y
++CONFIG_NCPFS_STRONG=y
++CONFIG_NCPFS_NFS_NS=y
++CONFIG_NCPFS_OS2_NS=y
++CONFIG_NCPFS_SMALLDOS=y
++CONFIG_NCPFS_NLS=y
++CONFIG_NCPFS_EXTRAS=y
++CONFIG_CODA_FS=m
++# CONFIG_AFS_FS is not set
++# CONFIG_AF_RXRPC is not set
++
++CONFIG_OCFS2_FS=m
++# CONFIG_OCFS2_DEBUG_FS is not set
++# CONFIG_OCFS2_DEBUG_MASKLOG is not set
++CONFIG_OCFS2_FS_O2CB=m
++CONFIG_OCFS2_FS_USERSPACE_CLUSTER=m
++# CONFIG_OCFS2_FS_STATS is not set
++
++CONFIG_BTRFS_FS=m
++CONFIG_BTRFS_FS_POSIX_ACL=y
++# Maybe see if we want this on for debug kernels?
++# CONFIG_BTRFS_FS_CHECK_INTEGRITY is not set
++# CONFIG_BTRFS_FS_RUN_SANITY_TESTS is not set
++# CONFIG_BTRFS_DEBUG is not set
++# CONFIG_BTRFS_ASSERT is not set
++
++CONFIG_CONFIGFS_FS=y
++
++CONFIG_DLM=m
++CONFIG_DLM_DEBUG=y
++CONFIG_GFS2_FS=m
++CONFIG_GFS2_FS_LOCKING_DLM=y
++
++
++CONFIG_UBIFS_FS_XATTR=y
++# CONFIG_UBIFS_FS_ADVANCED_COMPR is not set
++# CONFIG_UBIFS_FS_DEBUG is not set
++
++#
++# Partition Types
++#
++CONFIG_PARTITION_ADVANCED=y
++# CONFIG_ACORN_PARTITION is not set
++CONFIG_AIX_PARTITION=y
++CONFIG_AMIGA_PARTITION=y
++# CONFIG_ATARI_PARTITION is not set
++CONFIG_BSD_DISKLABEL=y
++CONFIG_EFI_PARTITION=y
++CONFIG_KARMA_PARTITION=y
++CONFIG_LDM_PARTITION=y
++# CONFIG_LDM_DEBUG is not set
++CONFIG_MAC_PARTITION=y
++CONFIG_MSDOS_PARTITION=y
++CONFIG_MINIX_SUBPARTITION=y
++CONFIG_OSF_PARTITION=y
++CONFIG_SGI_PARTITION=y
++CONFIG_SOLARIS_X86_PARTITION=y
++CONFIG_SUN_PARTITION=y
++# CONFIG_SYSV68_PARTITION is not set
++CONFIG_UNIXWARE_DISKLABEL=y
++# CONFIG_ULTRIX_PARTITION is not set
++# CONFIG_CMDLINE_PARTITION is not set
++
++CONFIG_NLS=y
++
++#
++# Native Language Support
++#
++CONFIG_NLS_CODEPAGE_737=m
++CONFIG_NLS_CODEPAGE_775=m
++CONFIG_NLS_CODEPAGE_850=m
++CONFIG_NLS_CODEPAGE_852=m
++CONFIG_NLS_CODEPAGE_855=m
++CONFIG_NLS_CODEPAGE_857=m
++CONFIG_NLS_CODEPAGE_860=m
++CONFIG_NLS_CODEPAGE_861=m
++CONFIG_NLS_CODEPAGE_862=m
++CONFIG_NLS_CODEPAGE_863=m
++CONFIG_NLS_CODEPAGE_864=m
++CONFIG_NLS_CODEPAGE_865=m
++CONFIG_NLS_CODEPAGE_866=m
++CONFIG_NLS_CODEPAGE_869=m
++CONFIG_NLS_CODEPAGE_936=m
++CONFIG_NLS_CODEPAGE_950=m
++CONFIG_NLS_CODEPAGE_932=m
++CONFIG_NLS_CODEPAGE_949=m
++CONFIG_NLS_CODEPAGE_874=m
++CONFIG_NLS_ISO8859_8=m
++CONFIG_NLS_CODEPAGE_1250=m
++CONFIG_NLS_CODEPAGE_1251=m
++CONFIG_NLS_ISO8859_2=m
++CONFIG_NLS_ISO8859_3=m
++CONFIG_NLS_ISO8859_4=m
++CONFIG_NLS_ISO8859_5=m
++CONFIG_NLS_ISO8859_6=m
++CONFIG_NLS_ISO8859_7=m
++CONFIG_NLS_ISO8859_9=m
++CONFIG_NLS_ISO8859_13=m
++CONFIG_NLS_ISO8859_14=m
++CONFIG_NLS_KOI8_R=m
++CONFIG_NLS_KOI8_U=m
++CONFIG_NLS_MAC_ROMAN=m
++CONFIG_NLS_MAC_CELTIC=m
++CONFIG_NLS_MAC_CENTEURO=m
++CONFIG_NLS_MAC_CROATIAN=m
++CONFIG_NLS_MAC_CYRILLIC=m
++CONFIG_NLS_MAC_GAELIC=m
++CONFIG_NLS_MAC_GREEK=m
++CONFIG_NLS_MAC_ICELAND=m
++CONFIG_NLS_MAC_INUIT=m
++CONFIG_NLS_MAC_ROMANIAN=m
++CONFIG_NLS_MAC_TURKISH=m
++
++#
++# Profiling support
++#
++CONFIG_PROFILING=y
++CONFIG_OPROFILE=m
++CONFIG_OPROFILE_EVENT_MULTIPLEX=y
++
++#
++# Kernel hacking
++#
++CONFIG_DEBUG_KERNEL=y
++CONFIG_FRAME_WARN=1024
++CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0x0
++# CONFIG_DEBUG_INFO is not set
++CONFIG_FRAME_POINTER=y
++# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
++# CONFIG_DEBUG_DRIVER is not set
++CONFIG_HEADERS_CHECK=y
++# CONFIG_LKDTM is not set
++# CONFIG_NOTIFIER_ERROR_INJECTION is not set
++# CONFIG_READABLE_ASM is not set
++
++# CONFIG_RT_MUTEX_TESTER is not set
++# CONFIG_DEBUG_LOCKDEP is not set
++# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
++
++# DEBUG options that don't get enabled/disabled with 'make debug/release'
++
++# This generates a huge amount of dmesg spew
++# CONFIG_DEBUG_KOBJECT is not set
++#
++# This breaks booting until the module patches are in-tree
++# CONFIG_DEBUG_KOBJECT_RELEASE is not set
++#
++#
++# These debug options are deliberatly left on (even in 'make release' kernels).
++# They aren't that much of a performance impact, and the value
++# from getting useful bug-reports makes it worth leaving them on.
++# CONFIG_DEBUG_HIGHMEM is not set
++# CONFIG_DEBUG_SHIRQ is not set
++CONFIG_BOOT_PRINTK_DELAY=y
++CONFIG_DEBUG_DEVRES=y
++CONFIG_DEBUG_RODATA_TEST=y
++CONFIG_DEBUG_NX_TEST=m
++CONFIG_DEBUG_SET_MODULE_RONX=y
++CONFIG_DEBUG_BOOT_PARAMS=y
++# CONFIG_DEBUG_VM is not set
++# CONFIG_DEBUG_STRICT_USER_COPY_CHECKS is not set
++CONFIG_LOCKUP_DETECTOR=y
++# CONFIG_DEBUG_INFO_REDUCED is not set
++# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
++# CONFIG_BOOTPARAM_HARDLOCKUP_PANIC is not set
++# CONFIG_PANIC_ON_OOPS is not set
++CONFIG_PANIC_TIMEOUT=0
++CONFIG_ATOMIC64_SELFTEST=y
++CONFIG_MEMORY_FAILURE=y
++CONFIG_HWPOISON_INJECT=m
++CONFIG_CROSS_MEMORY_ATTACH=y
++# CONFIG_DEBUG_SECTION_MISMATCH is not set
++# CONFIG_BACKTRACE_SELF_TEST is not set
++CONFIG_RESOURCE_COUNTERS=y
++# CONFIG_DEBUG_VIRTUAL is not set
++# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
++CONFIG_EARLY_PRINTK_DBGP=y
++# CONFIG_PAGE_POISONING is not set
++# CONFIG_CRASH_DUMP is not set
++# CONFIG_CRASH is not set
++# CONFIG_GCOV_KERNEL is not set
++# CONFIG_RAMOOPS is not set
++
++
++#
++# Security options
++#
++CONFIG_SECURITY=y
++# CONFIG_SECURITY_DMESG_RESTRICT is not set
++CONFIG_SECURITY_NETWORK=y
++CONFIG_SECURITY_NETWORK_XFRM=y
++# CONFIG_SECURITY_PATH is not set
++CONFIG_SECURITY_SELINUX=y
++CONFIG_SECURITY_SELINUX_BOOTPARAM=y
++CONFIG_SECURITY_SELINUX_DISABLE=y
++CONFIG_SECURITY_SELINUX_DEVELOP=y
++CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=1
++CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE=1
++CONFIG_SECURITY_SELINUX_AVC_STATS=y
++# CONFIG_SECURITY_SELINUX_POLICYDB_VERSION_MAX is not set
++# CONFIG_SECURITY_SMACK is not set
++# CONFIG_SECURITY_TOMOYO is not set
++# CONFIG_SECURITY_APPARMOR is not set
++# CONFIG_SECURITY_YAMA is not set
++CONFIG_AUDIT=y
++CONFIG_AUDITSYSCALL=y
++# http://lists.fedoraproject.org/pipermail/kernel/2013-February/004125.html
++CONFIG_AUDIT_LOGINUID_IMMUTABLE=y
++
++CONFIG_SECCOMP=y
++
++# CONFIG_SSBI is not set
++
++#
++# Cryptographic options
++#
++CONFIG_CRYPTO=y
++CONFIG_CRYPTO_FIPS=y
++CONFIG_CRYPTO_USER_API_HASH=y
++CONFIG_CRYPTO_USER_API_SKCIPHER=y
++CONFIG_CRYPTO_MANAGER=y
++# Note, CONFIG_CRYPTO_MANAGER_DISABLE_TESTS needs to be unset, or FIPS will be disabled.
++# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
++CONFIG_CRYPTO_HW=y
++CONFIG_CRYPTO_BLKCIPHER=y
++# CONFIG_CRYPTO_CRYPTD is not set
++CONFIG_CRYPTO_AES=y
++CONFIG_CRYPTO_ARC4=m
++CONFIG_CRYPTO_ANUBIS=m
++CONFIG_CRYPTO_AUTHENC=m
++CONFIG_CRYPTO_CAST5=m
++CONFIG_CRYPTO_CAST6=m
++CONFIG_CRYPTO_CRC32C=y
++CONFIG_CRYPTO_CRC32=m
++CONFIG_CRYPTO_CTR=y
++CONFIG_CRYPTO_DEFLATE=m
++CONFIG_CRYPTO_FCRYPT=m
++CONFIG_CRYPTO_GF128MUL=m
++CONFIG_CRYPTO_CMAC=m
++CONFIG_CRYPTO_HMAC=y
++CONFIG_CRYPTO_KHAZAD=m
++CONFIG_CRYPTO_LZO=m
++CONFIG_CRYPTO_LZ4=m
++CONFIG_CRYPTO_LZ4HC=m
++CONFIG_CRYPTO_NULL=m
++CONFIG_CRYPTO_PCBC=m
++CONFIG_CRYPTO_SALSA20=m
++CONFIG_CRYPTO_SALSA20_586=m
++CONFIG_CRYPTO_SEED=m
++CONFIG_CRYPTO_SEQIV=m
++CONFIG_CRYPTO_SERPENT=m
++CONFIG_CRYPTO_TEA=m
++CONFIG_CRYPTO_XCBC=m
++CONFIG_CRYPTO_VMAC=m
++CONFIG_CRYPTO_CRC32C_INTEL=m
++CONFIG_CRYPTO_GHASH=m
++CONFIG_CRYPTO_DEV_HIFN_795X=m
++CONFIG_CRYPTO_DEV_HIFN_795X_RNG=y
++CONFIG_CRYPTO_PCRYPT=m
++
++
++
++# Random number generation
++
++#
++# Library routines
++#
++CONFIG_CRC16=y
++CONFIG_CRC32=m
++# CONFIG_CRC32_SELFTEST is not set
++CONFIG_CRC_ITU_T=m
++CONFIG_CRC8=m
++# CONFIG_RANDOM32_SELFTEST is not set
++CONFIG_CORDIC=m
++# CONFIG_DDR is not set
++
++CONFIG_CRYPTO_ZLIB=m
++CONFIG_ZLIB_INFLATE=y
++CONFIG_ZLIB_DEFLATE=m
++
++CONFIG_INITRAMFS_SOURCE=""
++CONFIG_KEYS=y
++CONFIG_PERSISTENT_KEYRINGS=y
++CONFIG_BIG_KEYS=y
++CONFIG_TRUSTED_KEYS=m
++CONFIG_ENCRYPTED_KEYS=m
++CONFIG_KEYS_DEBUG_PROC_KEYS=y
++CONFIG_CDROM_PKTCDVD=m
++CONFIG_CDROM_PKTCDVD_BUFFERS=8
++# CONFIG_CDROM_PKTCDVD_WCACHE is not set
++
++CONFIG_ATA_OVER_ETH=m
++CONFIG_BACKLIGHT_LCD_SUPPORT=y
++CONFIG_BACKLIGHT_CLASS_DEVICE=m
++# CONFIG_BACKLIGHT_GENERIC is not set
++CONFIG_BACKLIGHT_PROGEAR=m
++
++CONFIG_LCD_CLASS_DEVICE=m
++CONFIG_LCD_PLATFORM=m
++
++CONFIG_FAIR_GROUP_SCHED=y
++CONFIG_CFS_BANDWIDTH=y
++CONFIG_SCHED_OMIT_FRAME_POINTER=y
++CONFIG_RT_GROUP_SCHED=y
++CONFIG_SCHED_AUTOGROUP=y
++
++CONFIG_CPUSETS=y
++CONFIG_PROC_PID_CPUSET=y
++
++# CONFIG_CGROUP_DEBUG is not set
++CONFIG_CGROUP_CPUACCT=y
++CONFIG_CGROUP_DEVICE=y
++CONFIG_CGROUP_FREEZER=y
++CONFIG_CGROUP_SCHED=y
++CONFIG_MEMCG=y
++CONFIG_MEMCG_SWAP=y
++CONFIG_MEMCG_SWAP_ENABLED=y
++CONFIG_MEMCG_KMEM=y
++# CONFIG_CGROUP_HUGETLB is not set
++CONFIG_CGROUP_PERF=y
++CONFIG_CGROUP_NET_PRIO=m
++# CONFIG_CGROUP_NET_CLASSID is not set
++CONFIG_BLK_CGROUP=y
++
++# CONFIG_SYSFS_DEPRECATED is not set
++# CONFIG_SYSFS_DEPRECATED_V2 is not set
++
++CONFIG_PRINTK_TIME=y
++
++CONFIG_ENABLE_MUST_CHECK=y
++# CONFIG_ENABLE_WARN_DEPRECATED is not set
++
++CONFIG_KEXEC=y
++
++CONFIG_HWMON=y
++# CONFIG_HWMON_DEBUG_CHIP is not set
++CONFIG_THERMAL_HWMON=y
++# CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE is not set
++# CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE is not set
++CONFIG_THERMAL_GOV_FAIR_SHARE=y
++# CONFIG_THERMAL_GOV_USER_SPACE is not set
++CONFIG_THERMAL_GOV_STEP_WISE=y
++# CONFIG_THERMAL_EMULATION is not set
++
++CONFIG_INOTIFY=y
++CONFIG_INOTIFY_USER=y
++
++#
++# Bus devices
++#
++# CONFIG_OMAP_OCP2SCP is not set
++CONFIG_PROC_EVENTS=y
++
++CONFIG_IBMASR=m
++
++CONFIG_PM=y
++CONFIG_PM_STD_PARTITION=""
++# CONFIG_DPM_WATCHDOG is not set # revisit this in debug
++CONFIG_PM_TRACE=y
++CONFIG_PM_TRACE_RTC=y
++# CONFIG_PM_OPP is not set
++# CONFIG_PM_AUTOSLEEP is not set
++# CONFIG_PM_WAKELOCKS is not set
++CONFIG_HIBERNATION=y
++# CONFIG_WQ_POWER_EFFICIENT_DEFAULT is not set
++CONFIG_SUSPEND=y
++
++CONFIG_CPU_FREQ_TABLE=y
++CONFIG_CPU_FREQ_STAT=m
++CONFIG_CPU_FREQ_STAT_DETAILS=y
++
++
++CONFIG_NET_VENDOR_SMC=y
++# CONFIG_IBMTR is not set
++# CONFIG_SKISA is not set
++# CONFIG_PROTEON is not set
++# CONFIG_SMCTR is not set
++
++# CONFIG_MOUSE_ATIXL is not set
++
++# CONFIG_MEDIA_PARPORT_SUPPORT is not set
++
++CONFIG_RADIO_TEA5764=m
++CONFIG_RADIO_SAA7706H=m
++CONFIG_RADIO_CADET=m
++CONFIG_RADIO_RTRACK=m
++CONFIG_RADIO_RTRACK2=m
++CONFIG_RADIO_AZTECH=m
++CONFIG_RADIO_GEMTEK=m
++CONFIG_RADIO_SF16FMI=m
++CONFIG_RADIO_SF16FMR2=m
++CONFIG_RADIO_TERRATEC=m
++CONFIG_RADIO_TRUST=m
++CONFIG_RADIO_TYPHOON=m
++CONFIG_RADIO_ZOLTRIX=m
++
++CONFIG_SND_DARLA20=m
++CONFIG_SND_GINA20=m
++CONFIG_SND_LAYLA20=m
++CONFIG_SND_DARLA24=m
++CONFIG_SND_GINA24=m
++CONFIG_SND_LAYLA24=m
++CONFIG_SND_MONA=m
++CONFIG_SND_MIA=m
++CONFIG_SND_ECHO3G=m
++CONFIG_SND_INDIGO=m
++CONFIG_SND_INDIGOIO=m
++CONFIG_SND_INDIGODJ=m
++CONFIG_SND_INDIGOIOX=m
++CONFIG_SND_INDIGODJX=m
++
++CONFIG_BALLOON_COMPACTION=y
++CONFIG_COMPACTION=y
++CONFIG_MIGRATION=y
++CONFIG_BOUNCE=y
++# CONFIG_LEDS_AMS_DELTA is not set
++# CONFIG_LEDS_LOCOMO is not set
++# CONFIG_LEDS_NET48XX is not set
++# CONFIG_LEDS_NET5501 is not set
++# CONFIG_LEDS_PCA9532 is not set
++# CONFIG_LEDS_PCA955X is not set
++# CONFIG_LEDS_BD2802 is not set
++# CONFIG_LEDS_S3C24XX is not set
++# CONFIG_LEDS_PCA9633 is not set
++CONFIG_LEDS_DELL_NETBOOKS=m
++# CONFIG_LEDS_TCA6507 is not set
++# CONFIG_LEDS_LM355x is not set
++# CONFIG_LEDS_OT200 is not set
++# CONFIG_LEDS_PWM is not set
++# CONFIG_LEDS_LP8501 is not set
++# CONFIG_LEDS_PCA963X is not set
++# CONFIG_LEDS_PCA9685 is not set
++CONFIG_LEDS_TRIGGER_TIMER=m
++CONFIG_LEDS_TRIGGER_ONESHOT=m
++CONFIG_LEDS_TRIGGER_IDE_DISK=y
++CONFIG_LEDS_TRIGGER_HEARTBEAT=m
++CONFIG_LEDS_TRIGGER_BACKLIGHT=m
++# CONFIG_LEDS_TRIGGER_CPU is not set
++CONFIG_LEDS_TRIGGER_DEFAULT_ON=m
++CONFIG_LEDS_TRIGGER_TRANSIENT=m
++CONFIG_LEDS_TRIGGER_CAMERA=m
++CONFIG_LEDS_ALIX2=m
++CONFIG_LEDS_CLEVO_MAIL=m
++CONFIG_LEDS_INTEL_SS4200=m
++CONFIG_LEDS_LM3530=m
++# CONFIG_LEDS_LM3642 is not set
++CONFIG_LEDS_LM3556=m
++CONFIG_LEDS_BLINKM=m
++CONFIG_LEDS_LP3944=m
++CONFIG_LEDS_LP5521=m
++CONFIG_LEDS_LP5523=m
++CONFIG_LEDS_LP5562=m
++CONFIG_LEDS_LT3593=m
++CONFIG_LEDS_REGULATOR=m
++CONFIG_LEDS_WM8350=m
++CONFIG_LEDS_WM831X_STATUS=m
++
++CONFIG_DMA_ENGINE=y
++CONFIG_DW_DMAC_CORE=m
++CONFIG_DW_DMAC=m
++CONFIG_DW_DMAC_PCI=m
++# CONFIG_DW_DMAC_BIG_ENDIAN_IO is not set
++# CONFIG_TIMB_DMA is not set
++# CONFIG_DMATEST is not set
++CONFIG_ASYNC_TX_DMA=y
++
++CONFIG_UNUSED_SYMBOLS=y
++
++CONFIG_UPROBE_EVENT=y
++
++CONFIG_DYNAMIC_FTRACE=y
++# CONFIG_IRQSOFF_TRACER is not set
++CONFIG_SCHED_TRACER=y
++CONFIG_CONTEXT_SWITCH_TRACER=y
++CONFIG_TRACER_SNAPSHOT=y
++# CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP is not set
++CONFIG_FTRACE_SYSCALLS=y
++CONFIG_FTRACE_MCOUNT_RECORD=y
++# CONFIG_FTRACE_STARTUP_TEST is not set
++# CONFIG_TRACE_BRANCH_PROFILING is not set
++CONFIG_FUNCTION_PROFILER=y
++CONFIG_RING_BUFFER_BENCHMARK=m
++# CONFIG_RING_BUFFER_STARTUP_TEST is not set
++# CONFIG_RBTREE_TEST is not set
++# CONFIG_INTERVAL_TREE_TEST is not set
++CONFIG_FUNCTION_TRACER=y
++CONFIG_STACK_TRACER=y
++# CONFIG_FUNCTION_GRAPH_TRACER is not set
++
++CONFIG_KPROBES=y
++CONFIG_KPROBE_EVENT=y
++# CONFIG_KPROBES_SANITY_TEST is not set
++# CONFIG_JUMP_LABEL is not set
++CONFIG_OPTPROBES=y
++
++CONFIG_HZ_1000=y
++
++CONFIG_TIMER_STATS=y
++CONFIG_PERF_COUNTERS=y
++
++# Auxillary displays
++CONFIG_KS0108=m
++CONFIG_KS0108_PORT=0x378
++CONFIG_KS0108_DELAY=2
++CONFIG_CFAG12864B=y
++CONFIG_CFAG12864B_RATE=20
++
++# CONFIG_PHANTOM is not set
++
++# CONFIG_POWER_SUPPLY_DEBUG is not set
++
++# CONFIG_TEST_POWER is not set
++CONFIG_APM_POWER=m
++# CONFIG_GENERIC_ADC_BATTERY is not set
++# CONFIG_WM831X_POWER is not set
++
++# CONFIG_BATTERY_DS2760 is not set
++# CONFIG_BATTERY_DS2781 is not set
++# CONFIG_BATTERY_DS2782 is not set
++# CONFIG_BATTERY_SBS is not set
++# CONFIG_BATTERY_BQ20Z75 is not set
++# CONFIG_BATTERY_DS2780 is not set
++# CONFIG_BATTERY_BQ27x00 is not set
++# CONFIG_BATTERY_MAX17040 is not set
++# CONFIG_BATTERY_MAX17042 is not set
++# CONFIG_BATTERY_GOLDFISH is not set
++
++# CONFIG_CHARGER_ISP1704 is not set
++# CONFIG_CHARGER_MAX8903 is not set
++# CONFIG_CHARGER_LP8727 is not set
++# CONFIG_CHARGER_GPIO is not set
++# CONFIG_CHARGER_PCF50633 is not set
++# CONFIG_CHARGER_BQ2415X is not set
++# CONFIG_CHARGER_BQ24190 is not set
++# CONFIG_CHARGER_BQ24735 is not set
++CONFIG_POWER_RESET=y
++
++# CONFIG_PDA_POWER is not set
++
++CONFIG_AUXDISPLAY=y
++
++CONFIG_UIO=m
++CONFIG_UIO_CIF=m
++# CONFIG_UIO_PDRV is not set
++# CONFIG_UIO_PDRV_GENIRQ is not set
++# CONFIG_UIO_DMEM_GENIRQ is not set
++CONFIG_UIO_AEC=m
++CONFIG_UIO_SERCOS3=m
++CONFIG_UIO_PCI_GENERIC=m
++# CONFIG_UIO_NETX is not set
++# CONFIG_UIO_MF624 is not set
++
++CONFIG_VFIO=m
++CONFIG_VFIO_IOMMU_TYPE1=m
++CONFIG_VFIO_PCI=m
++
++
++# LIRC
++CONFIG_LIRC_STAGING=y
++CONFIG_LIRC_BT829=m
++CONFIG_LIRC_IGORPLUGUSB=m
++CONFIG_LIRC_IMON=m
++CONFIG_LIRC_ZILOG=m
++CONFIG_LIRC_PARALLEL=m
++CONFIG_LIRC_SERIAL=m
++CONFIG_LIRC_SERIAL_TRANSMITTER=y
++CONFIG_LIRC_SASEM=m
++CONFIG_LIRC_SIR=m
++CONFIG_LIRC_TTUSBIR=m
++
++# CONFIG_SAMPLES is not set
++
++
++CONFIG_NOZOMI=m
++# CONFIG_TPS65010 is not set
++
++CONFIG_INPUT_APANEL=m
++CONFIG_INPUT_GP2A=m
++# CONFIG_INPUT_GPIO_TILT_POLLED is not set
++# CONFIG_INPUT_GPIO_BEEPER is not set
++
++# CONFIG_INTEL_MENLOW is not set
++CONFIG_ENCLOSURE_SERVICES=m
++CONFIG_IPWIRELESS=m
++
++# CONFIG_BLK_DEV_XIP is not set
++CONFIG_MEMSTICK=m
++# CONFIG_MEMSTICK_DEBUG is not set
++# CONFIG_MEMSTICK_UNSAFE_RESUME is not set
++CONFIG_MSPRO_BLOCK=m
++# CONFIG_MS_BLOCK is not set
++CONFIG_MEMSTICK_TIFM_MS=m
++CONFIG_MEMSTICK_JMICRON_38X=m
++CONFIG_MEMSTICK_R592=m
++CONFIG_MEMSTICK_REALTEK_PCI=m
++
++CONFIG_ACCESSIBILITY=y
++CONFIG_A11Y_BRAILLE_CONSOLE=y
++
++# CONFIG_HTC_PASIC3 is not set
++
++# MT9V022_PCA9536_SWITCH is not set
++
++CONFIG_OPTIMIZE_INLINING=y
++
++# FIXME: This should be x86/ia64 only
++# CONFIG_HP_ILO is not set
++
++CONFIG_GPIOLIB=y
++# CONFIG_PINCTRL is not set
++# CONFIG_DEBUG_PINCTRL is not set
++# CONFIG_PINMUX is not set
++# CONFIG_PINCONF is not set
++
++CONFIG_NET_DSA=m
++CONFIG_NET_DSA_MV88E6060=m
++CONFIG_NET_DSA_MV88E6131=m
++CONFIG_NET_DSA_MV88E6123_61_65=m
++
++# Used by Maemo, we don't care.
++# CONFIG_PHONET is not set
++
++# CONFIG_ICS932S401 is not set
++# CONFIG_ATMEL_SSC is not set
++
++# CONFIG_C2PORT is not set
++
++# CONFIG_REGULATOR_DEBUG is not set
++
++CONFIG_WM8350_POWER=m
++
++# CONFIG_VIDEO_FIXED_MINOR_RANGES is not set
++
++CONFIG_USB_WUSB=m
++CONFIG_USB_WUSB_CBAF=m
++# CONFIG_USB_WUSB_CBAF_DEBUG is not set
++CONFIG_USB_WHCI_HCD=m
++CONFIG_USB_HWA_HCD=m
++# CONFIG_USB_HCD_BCMA is not set
++# CONFIG_USB_HCD_SSB is not set
++
++CONFIG_UWB=m
++CONFIG_UWB_HWA=m
++CONFIG_UWB_WHCI=m
++CONFIG_UWB_I1480U=m
++
++# CONFIG_ANDROID is not set
++CONFIG_STAGING_MEDIA=y
++# CONFIG_DVB_AS102 is not set
++# CONFIG_ET131X is not set
++# CONFIG_SLICOSS is not set
++# CONFIG_WLAGS49_H2 is not set
++# CONFIG_WLAGS49_H25 is not set
++# CONFIG_VIDEO_DT3155 is not set
++# CONFIG_TI_ST is not set
++# CONFIG_FB_XGI is not set
++# CONFIG_VIDEO_GO7007 is not set
++# CONFIG_I2C_BCM2048 is not set
++# CONFIG_VIDEO_TCM825X is not set
++# CONFIG_VIDEO_OMAP4 is not set
++# CONFIG_USB_MSI3101 is not set
++# CONFIG_DT3155 is not set
++# CONFIG_W35UND is not set
++# CONFIG_PRISM2_USB is not set
++# CONFIG_ECHO is not set
++CONFIG_USB_ATMEL=m
++# CONFIG_COMEDI is not set
++# CONFIG_ASUS_OLED is not set
++# CONFIG_PANEL is not set
++# CONFIG_TRANZPORT is not set
++# CONFIG_POHMELFS is not set
++# CONFIG_IDE_PHISON is not set
++# CONFIG_LINE6_USB is not set
++# CONFIG_VME_BUS is not set
++# CONFIG_RAR_REGISTER is not set
++# CONFIG_VT6656 is not set
++# CONFIG_USB_SERIAL_QUATECH_USB2 is not set
++# Larry Finger maintains these (rhbz 913753)
++CONFIG_RTLLIB=m
++CONFIG_RTLLIB_CRYPTO_CCMP=m
++CONFIG_RTLLIB_CRYPTO_TKIP=m
++CONFIG_RTLLIB_CRYPTO_WEP=m
++CONFIG_RTL8192E=m
++# CONFIG_INPUT_GPIO is not set
++# CONFIG_VIDEO_CX25821 is not set
++# CONFIG_R8187SE is not set
++# CONFIG_R8188EU is not set
++# CONFIG_R8821AE is not set
++# CONFIG_RTL8192U is not set
++# CONFIG_FB_SM7XX is not set
++# CONFIG_SPECTRA is not set
++# CONFIG_EASYCAP is not set
++# CONFIG_SOLO6X10 is not set
++# CONFIG_ACPI_QUICKSTART is not set
++# CONFIG_LTE_GDM724X is not set
++CONFIG_R8712U=m # Larry Finger maintains this (rhbz 699618)
++# CONFIG_R8712_AP is not set
++# CONFIG_ATH6K_LEGACY is not set
++# CONFIG_USB_ENESTORAGE is not set
++# CONFIG_BCM_WIMAX is not set
++# CONFIG_USB_BTMTK is not set
++# CONFIG_FT1000 is not set
++# CONFIG_SPEAKUP is not set
++# CONFIG_DX_SEP is not set
++# CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI4 is not set
++# CONFIG_TOUCHSCREEN_CLEARPAD_TM1217 is not set
++# CONFIG_RTS_PSTOR is not set
++CONFIG_ALTERA_STAPL=m
++# CONFIG_DVB_CXD2099 is not set
++# CONFIG_USBIP_CORE is not set
++# CONFIG_INTEL_MEI is not set
++# CONFIG_ZCACHE is not set
++# CONFIG_RTS5139 is not set
++# CONFIG_NVEC_LEDS is not set
++# CONFIG_VT6655 is not set
++# CONFIG_RAMSTER is not set
++# CONFIG_USB_WPAN_HCD is not set
++# CONFIG_WIMAX_GDM72XX is not set
++# CONFIG_IPACK_BUS is not set
++# CONFIG_CSR_WIFI is not set
++# CONFIG_ZCACHE2 is not set
++# CONFIG_NET_VENDOR_SILICOM is not set
++# CONFIG_SBYPASS is not set
++# CONFIG_BPCTL is not set
++# CONFIG_CED1401 is not set
++# CONFIG_DGRP is not set
++# CONFIG_SB105X is not set
++# CONFIG_LUSTRE_FS is not set
++# CONFIG_XILLYBUS is not set
++# CONFIG_DGAP is not set
++# CONFIG_DGNC is not set
++# CONFIG_RTS5208 is not set
++# END OF STAGING
++
++#
++# Remoteproc drivers (EXPERIMENTAL)
++#
++# CONFIG_STE_MODEM_RPROC is not set
++
++CONFIG_LIBFC=m
++CONFIG_LIBFCOE=m
++CONFIG_FCOE=m
++CONFIG_FCOE_FNIC=m
++
++
++# CONFIG_IMA is not set
++CONFIG_IMA_MEASURE_PCR_IDX=10
++CONFIG_IMA_AUDIT=y
++CONFIG_IMA_LSM_RULES=y
++
++# CONFIG_EVM is not set
++# CONFIG_PWM_PCA9685 is not set
++
++CONFIG_LSM_MMAP_MIN_ADDR=65536
++
++CONFIG_STRIP_ASM_SYMS=y
++
++# CONFIG_RCU_FANOUT_EXACT is not set
++# FIXME: Revisit FAST_NO_HZ after it's fixed
++# CONFIG_RCU_FAST_NO_HZ is not set
++# CONFIG_RCU_NOCB_CPU is not set
++CONFIG_RCU_CPU_STALL_TIMEOUT=60
++# CONFIG_RCU_TORTURE_TEST is not set
++# CONFIG_RCU_TRACE is not set
++# CONFIG_RCU_CPU_STALL_INFO is not set
++# CONFIG_RCU_USER_QS is not set
++
++CONFIG_KSM=y
++CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
++
++CONFIG_FSNOTIFY=y
++CONFIG_FANOTIFY=y
++CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
++
++CONFIG_IEEE802154=m
++CONFIG_IEEE802154_6LOWPAN=m
++CONFIG_IEEE802154_DRIVERS=m
++CONFIG_IEEE802154_FAKEHARD=m
++CONFIG_IEEE802154_FAKELB=m
++
++CONFIG_MAC802154=m
++CONFIG_NET_MPLS_GSO=m
++
++# CONFIG_HSR is not set
++
++# CONFIG_EXTCON is not set
++# CONFIG_EXTCON_ADC_JACK is not set
++# CONFIG_MEMORY is not set
++
++CONFIG_PPS=m
++# CONFIG_PPS_CLIENT_KTIMER is not set
++CONFIG_PPS_CLIENT_LDISC=m
++# CONFIG_PPS_DEBUG is not set
++CONFIG_PPS_CLIENT_PARPORT=m
++CONFIG_PPS_GENERATOR_PARPORT=m
++CONFIG_PPS_CLIENT_GPIO=m
++CONFIG_NTP_PPS=y
++
++CONFIG_PTP_1588_CLOCK=m
++CONFIG_PTP_1588_CLOCK_PCH=m
++
++CONFIG_CLEANCACHE=y
++CONFIG_FRONTSWAP=y
++CONFIG_ZSWAP=y
++CONFIG_ZSMALLOC=y
++# CONFIG_PGTABLE_MAPPING is not set
++
++# CONFIG_MDIO_GPIO is not set
++# CONFIG_KEYBOARD_GPIO_POLLED is not set
++# CONFIG_MOUSE_GPIO is not set
++# CONFIG_I2C_DESIGNWARE_PLATFORM is not set
++# CONFIG_I2C_DESIGNWARE_PCI is not set
++# CONFIG_I2C_GPIO is not set
++# CONFIG_DEBUG_GPIO is not set
++# CONFIG_GPIO_GENERIC_PLATFORM is not set
++# CONFIG_GPIO_CS5535 is not set
++# CONFIG_GPIO_IT8761E is not set
++# CONFIG SB105x is not set
++# CONFIG_GPIO_TS5500 is not set
++CONFIG_GPIO_VIPERBOARD=m
++# CONFIG_UCB1400_CORE is not set
++# CONFIG_TPS6105X is not set
++# CONFIG_RADIO_MIROPCM20 is not set
++# CONFIG_USB_GPIO_VBUS is not set
++# CONFIG_GPIO_SCH is not set
++# CONFIG_GPIO_LANGWELL is not set
++# CONFIG_GPIO_RDC321X is not set
++# CONFIG_GPIO_VX855 is not set
++# CONFIG_GPIO_PCH is not set
++# CONFIG_GPIO_ML_IOH is not set
++# CONFIG_GPIO_AMD8111 is not set
++# CONFIG_GPIO_BT8XX is not set
++# CONFIG_GPIO_GRGPIO is not set
++# CONFIG_GPIO_PL061 is not set
++# CONFIG_GPIO_BCM_KONA is not set
++# CONFIG_GPIO_SCH311X is not set
++CONFIG_GPIO_MAX730X=m
++CONFIG_GPIO_MAX7300=m
++CONFIG_GPIO_MAX732X=m
++CONFIG_GPIO_PCF857X=m
++CONFIG_GPIO_SX150X=y
++CONFIG_GPIO_ADP5588=m
++CONFIG_GPIO_ADNP=m
++CONFIG_GPIO_MAX7301=m
++CONFIG_GPIO_MCP23S08=m
++CONFIG_GPIO_MC33880=m
++CONFIG_GPIO_74X164=m
++
++# FIXME: Why?
++CONFIG_EVENT_POWER_TRACING_DEPRECATED=y
++
++CONFIG_TEST_KSTRTOX=y
++CONFIG_XZ_DEC=y
++CONFIG_XZ_DEC_X86=y
++CONFIG_XZ_DEC_POWERPC=y
++# CONFIG_XZ_DEC_IA64 is not set
++CONFIG_XZ_DEC_ARM=y
++# CONFIG_XZ_DEC_ARMTHUMB is not set
++# CONFIG_XZ_DEC_SPARC is not set
++# CONFIG_XZ_DEC_TEST is not set
++
++# CONFIG_POWER_AVS is not set
++
++CONFIG_TARGET_CORE=m
++CONFIG_ISCSI_TARGET=m
++CONFIG_LOOPBACK_TARGET=m
++CONFIG_SBP_TARGET=m
++CONFIG_TCM_IBLOCK=m
++CONFIG_TCM_FILEIO=m
++CONFIG_TCM_PSCSI=m
++CONFIG_TCM_FC=m
++
++CONFIG_HWSPINLOCK=m
++
++CONFIG_PSTORE=y
++CONFIG_PSTORE_RAM=m
++# CONFIG_PSTORE_CONSOLE is not set
++# CONFIG_PSTORE_FTRACE is not set
++
++# CONFIG_TEST_MODULE is not set
++# CONFIG_TEST_USER_COPY is not set
++
++# CONFIG_AVERAGE is not set
++# CONFIG_VMXNET3 is not set
++
++# CONFIG_SIGMA is not set
++
++CONFIG_DEFAULT_MESSAGE_LOGLEVEL=4
++
++CONFIG_BCMA=m
++CONFIG_BCMA_BLOCKIO=y
++CONFIG_BCMA_HOST_PCI_POSSIBLE=y
++CONFIG_BCMA_HOST_PCI=y
++# CONFIG_BCMA_HOST_SOC is not set
++CONFIG_BCMA_DRIVER_GMAC_CMN=y
++CONFIG_BCMA_DRIVER_GPIO=y
++# CONFIG_BCMA_DEBUG is not set
++
++# CONFIG_GOOGLE_FIRMWARE is not set
++# CONFIG_INTEL_MID_PTI is not set
++
++# CONFIG_MAILBOX is not set
++
++CONFIG_FMC=m
++CONFIG_FMC_FAKEDEV=m
++CONFIG_FMC_TRIVIAL=m
++CONFIG_FMC_WRITE_EEPROM=m
++CONFIG_FMC_CHARDEV=m
++
++# CONFIG_GENWQE is not set
++
++# CONFIG_POWERCAP is not set
++
++# CONFIG_HSI is not set
++
++
++# CONFIG_ARM_ARCH_TIMER_EVTSTREAM is not set
++
++# CONFIG_PM_DEVFREQ is not set
++# CONFIG_MODULE_SIG is not set
++# CONFIG_SYSTEM_TRUSTED_KEYRING is not set
++# CONFIG_SYSTEM_BLACKLIST_KEYRING is not set
++# CONFIG_MODULE_VERIFY_ELF is not set
++# CONFIG_CRYPTO_KEY_TYPE is not set
++# CONFIG_PGP_LIBRARY is not set
++# CONFIG_PGP_PRELOAD is not set
++# CONFIG_LOCALVERSION_AUTO is not set
++CONFIG_PROC_DEVICETREE=y
++
+diff -Nur linux-3.14.36/arch/arm/configs/imx_v7_defconfig linux-openelec/arch/arm/configs/imx_v7_defconfig
+--- linux-3.14.36/arch/arm/configs/imx_v7_defconfig 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/arch/arm/configs/imx_v7_defconfig 2015-05-06 12:05:43.000000000 -0500
+@@ -0,0 +1,343 @@
++# CONFIG_LOCALVERSION_AUTO is not set
++CONFIG_KERNEL_LZO=y
++CONFIG_SYSVIPC=y
++CONFIG_NO_HZ=y
++CONFIG_HIGH_RES_TIMERS=y
++CONFIG_LOG_BUF_SHIFT=18
++CONFIG_CGROUPS=y
++CONFIG_RELAY=y
++CONFIG_BLK_DEV_INITRD=y
++CONFIG_EXPERT=y
++CONFIG_PERF_EVENTS=y
++# CONFIG_SLUB_DEBUG is not set
++# CONFIG_COMPAT_BRK is not set
++CONFIG_MODULES=y
++CONFIG_MODULE_UNLOAD=y
++CONFIG_MODVERSIONS=y
++CONFIG_MODULE_SRCVERSION_ALL=y
++# CONFIG_BLK_DEV_BSG is not set
++CONFIG_GPIO_PCA953X=y
++CONFIG_ARCH_MXC=y
++CONFIG_MXC_DEBUG_BOARD=y
++CONFIG_MACH_IMX51_DT=y
++CONFIG_MACH_EUKREA_CPUIMX51SD=y
++CONFIG_SOC_IMX53=y
++CONFIG_SOC_IMX6Q=y
++CONFIG_SOC_IMX6SL=y
++CONFIG_SOC_VF610=y
++# CONFIG_SWP_EMULATE is not set
++CONFIG_SMP=y
++CONFIG_VMSPLIT_2G=y
++CONFIG_PREEMPT=y
++CONFIG_AEABI=y
++# CONFIG_OABI_COMPAT is not set
++CONFIG_HIGHMEM=y
++CONFIG_CMDLINE="noinitrd console=ttymxc0,115200"
++CONFIG_CPU_FREQ=y
++CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE=y
++CONFIG_CPU_FREQ_GOV_POWERSAVE=y
++CONFIG_CPU_FREQ_GOV_USERSPACE=y
++CONFIG_CPU_FREQ_GOV_ONDEMAND=y
++CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
++CONFIG_ARM_IMX6_CPUFREQ=y
++CONFIG_CPU_IDLE=y
++CONFIG_VFP=y
++CONFIG_NEON=y
++CONFIG_BINFMT_MISC=m
++CONFIG_PM_RUNTIME=y
++CONFIG_PM_DEBUG=y
++CONFIG_PM_TEST_SUSPEND=y
++CONFIG_NET=y
++CONFIG_PACKET=y
++CONFIG_UNIX=y
++CONFIG_INET=y
++CONFIG_IP_PNP=y
++CONFIG_IP_PNP_DHCP=y
++# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
++# CONFIG_INET_XFRM_MODE_TUNNEL is not set
++# CONFIG_INET_XFRM_MODE_BEET is not set
++# CONFIG_INET_LRO is not set
++CONFIG_IPV6=y
++CONFIG_NETFILTER=y
++CONFIG_VLAN_8021Q=y
++# CONFIG_WIRELESS is not set
++CONFIG_DEVTMPFS=y
++CONFIG_DEVTMPFS_MOUNT=y
++# CONFIG_STANDALONE is not set
++CONFIG_CMA=y
++CONFIG_CMA_SIZE_MBYTES=320
++CONFIG_IMX_WEIM=y
++CONFIG_CONNECTOR=y
++CONFIG_MTD=y
++CONFIG_MTD_CMDLINE_PARTS=y
++CONFIG_MTD_BLOCK=y
++CONFIG_MTD_CFI=y
++CONFIG_MTD_JEDECPROBE=y
++CONFIG_MTD_CFI_INTELEXT=y
++CONFIG_MTD_CFI_AMDSTD=y
++CONFIG_MTD_CFI_STAA=y
++CONFIG_MTD_PHYSMAP_OF=y
++CONFIG_MTD_DATAFLASH=y
++CONFIG_MTD_M25P80=y
++CONFIG_MTD_SST25L=y
++CONFIG_MTD_NAND=y
++CONFIG_MTD_NAND_GPMI_NAND=y
++CONFIG_MTD_NAND_MXC=y
++CONFIG_MTD_UBI=y
++CONFIG_BLK_DEV_LOOP=y
++CONFIG_BLK_DEV_RAM=y
++CONFIG_BLK_DEV_RAM_SIZE=65536
++CONFIG_EEPROM_AT24=y
++CONFIG_EEPROM_AT25=y
++# CONFIG_SCSI_PROC_FS is not set
++CONFIG_BLK_DEV_SD=y
++CONFIG_SCSI_MULTI_LUN=y
++CONFIG_SCSI_CONSTANTS=y
++CONFIG_SCSI_LOGGING=y
++CONFIG_SCSI_SCAN_ASYNC=y
++# CONFIG_SCSI_LOWLEVEL is not set
++CONFIG_ATA=y
++CONFIG_SATA_AHCI_PLATFORM=y
++CONFIG_AHCI_IMX=y
++CONFIG_PATA_IMX=y
++CONFIG_NETDEVICES=y
++# CONFIG_NET_VENDOR_BROADCOM is not set
++CONFIG_CS89x0=y
++CONFIG_CS89x0_PLATFORM=y
++# CONFIG_NET_VENDOR_FARADAY is not set
++# CONFIG_NET_VENDOR_INTEL is not set
++# CONFIG_NET_VENDOR_MARVELL is not set
++# CONFIG_NET_VENDOR_MICREL is not set
++# CONFIG_NET_VENDOR_MICROCHIP is not set
++# CONFIG_NET_VENDOR_NATSEMI is not set
++# CONFIG_NET_VENDOR_SEEQ is not set
++CONFIG_SMC91X=y
++CONFIG_SMC911X=y
++CONFIG_SMSC911X=y
++# CONFIG_NET_VENDOR_STMICRO is not set
++# CONFIG_WLAN is not set
++# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
++CONFIG_INPUT_EVDEV=y
++CONFIG_INPUT_EVBUG=m
++CONFIG_KEYBOARD_GPIO=y
++CONFIG_KEYBOARD_IMX=y
++CONFIG_MOUSE_PS2=m
++CONFIG_MOUSE_PS2_ELANTECH=y
++CONFIG_INPUT_TOUCHSCREEN=y
++CONFIG_TOUCHSCREEN_EGALAX=y
++CONFIG_TOUCHSCREEN_EGALAX_SINGLE_TOUCH=y
++CONFIG_TOUCHSCREEN_MAX11801=y
++CONFIG_TOUCHSCREEN_MC13783=y
++CONFIG_INPUT_MISC=y
++CONFIG_INPUT_MMA8450=y
++CONFIG_INPUT_ISL29023=y
++CONFIG_SERIO_SERPORT=m
++CONFIG_VT_HW_CONSOLE_BINDING=y
++# CONFIG_LEGACY_PTYS is not set
++# CONFIG_DEVKMEM is not set
++CONFIG_SERIAL_IMX=y
++CONFIG_SERIAL_IMX_CONSOLE=y
++CONFIG_SERIAL_FSL_LPUART=y
++CONFIG_SERIAL_FSL_LPUART_CONSOLE=y
++CONFIG_FSL_OTP=y
++# CONFIG_I2C_COMPAT is not set
++CONFIG_I2C_CHARDEV=y
++# CONFIG_I2C_HELPER_AUTO is not set
++CONFIG_I2C_ALGOPCF=m
++CONFIG_I2C_ALGOPCA=m
++CONFIG_I2C_IMX=y
++CONFIG_SPI=y
++CONFIG_SPI_IMX=y
++CONFIG_GPIO_SYSFS=y
++CONFIG_POWER_SUPPLY=y
++CONFIG_SABRESD_MAX8903=y
++CONFIG_IMX6_USB_CHARGER=y
++CONFIG_SENSORS_MAG3110=y
++CONFIG_THERMAL=y
++CONFIG_CPU_THERMAL=y
++CONFIG_IMX_THERMAL=y
++CONFIG_DEVICE_THERMAL=y
++CONFIG_WATCHDOG=y
++CONFIG_IMX2_WDT=y
++CONFIG_MFD_DA9052_I2C=y
++CONFIG_MFD_MC13XXX_SPI=y
++CONFIG_MFD_MC13XXX_I2C=y
++CONFIG_MFD_SI476X_CORE=y
++CONFIG_REGULATOR=y
++CONFIG_REGULATOR_FIXED_VOLTAGE=y
++CONFIG_REGULATOR_DA9052=y
++CONFIG_REGULATOR_ANATOP=y
++CONFIG_REGULATOR_MC13783=y
++CONFIG_REGULATOR_MC13892=y
++CONFIG_REGULATOR_PFUZE100=y
++CONFIG_MEDIA_SUPPORT=y
++CONFIG_MEDIA_CAMERA_SUPPORT=y
++CONFIG_MEDIA_RADIO_SUPPORT=y
++CONFIG_VIDEO_V4L2_INT_DEVICE=y
++CONFIG_MEDIA_USB_SUPPORT=y
++CONFIG_USB_VIDEO_CLASS=m
++CONFIG_V4L_PLATFORM_DRIVERS=y
++CONFIG_VIDEO_MXC_OUTPUT=y
++CONFIG_VIDEO_MXC_CAPTURE=m
++CONFIG_VIDEO_MXC_CSI_CAMERA=m
++CONFIG_MXC_CAMERA_OV5640=m
++CONFIG_MXC_CAMERA_OV5642=m
++CONFIG_MXC_CAMERA_OV5640_MIPI=m
++CONFIG_MXC_TVIN_ADV7180=m
++CONFIG_MXC_IPU_DEVICE_QUEUE_SDC=m
++CONFIG_VIDEO_MXC_IPU_OUTPUT=y
++CONFIG_VIDEO_MXC_PXP_V4L2=y
++CONFIG_SOC_CAMERA=y
++CONFIG_VIDEO_MX3=y
++CONFIG_RADIO_SI476X=y
++CONFIG_SOC_CAMERA_OV2640=y
++CONFIG_DRM=y
++CONFIG_DRM_VIVANTE=y
++CONFIG_FB=y
++CONFIG_FB_MXS=y
++CONFIG_BACKLIGHT_LCD_SUPPORT=y
++CONFIG_LCD_CLASS_DEVICE=y
++CONFIG_LCD_L4F00242T03=y
++CONFIG_LCD_PLATFORM=y
++CONFIG_BACKLIGHT_CLASS_DEVICE=y
++CONFIG_BACKLIGHT_PWM=y
++CONFIG_FB_MXC_SYNC_PANEL=y
++CONFIG_FB_MXC_LDB=y
++CONFIG_FB_MXC_MIPI_DSI=y
++CONFIG_FB_MXC_TRULY_WVGA_SYNC_PANEL=y
++CONFIG_FB_MXC_HDMI=y
++CONFIG_FRAMEBUFFER_CONSOLE=y
++CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
++CONFIG_FONTS=y
++CONFIG_FONT_8x8=y
++CONFIG_FONT_8x16=y
++CONFIG_LOGO=y
++CONFIG_SOUND=y
++CONFIG_SND=y
++CONFIG_SND_USB_AUDIO=m
++CONFIG_SND_SOC=y
++CONFIG_SND_IMX_SOC=y
++CONFIG_SND_SOC_EUKREA_TLV320=y
++CONFIG_SND_SOC_IMX_CS42888=y
++CONFIG_SND_SOC_IMX_WM8962=y
++CONFIG_SND_SOC_IMX_SGTL5000=y
++CONFIG_SND_SOC_IMX_SPDIF=y
++CONFIG_SND_SOC_IMX_MC13783=y
++CONFIG_SND_SOC_IMX_HDMI=y
++CONFIG_SND_SOC_IMX_SI476X=y
++CONFIG_USB=y
++CONFIG_USB_EHCI_HCD=y
++CONFIG_USB_STORAGE=y
++CONFIG_USB_CHIPIDEA=y
++CONFIG_USB_CHIPIDEA_UDC=y
++CONFIG_USB_CHIPIDEA_HOST=y
++CONFIG_USB_PHY=y
++CONFIG_NOP_USB_XCEIV=y
++CONFIG_USB_MXS_PHY=y
++CONFIG_USB_GADGET=y
++CONFIG_USB_ZERO=m
++CONFIG_USB_ETH=m
++CONFIG_USB_MASS_STORAGE=m
++CONFIG_USB_G_SERIAL=m
++CONFIG_MMC=y
++CONFIG_MMC_UNSAFE_RESUME=y
++CONFIG_MMC_SDHCI=y
++CONFIG_MMC_SDHCI_PLTFM=y
++CONFIG_MMC_SDHCI_ESDHC_IMX=y
++CONFIG_MXC_IPU=y
++CONFIG_MXC_GPU_VIV=y
++CONFIG_MXC_ASRC=y
++CONFIG_MXC_MIPI_CSI2=y
++CONFIG_MXC_MLB150=m
++CONFIG_NEW_LEDS=y
++CONFIG_LEDS_CLASS=y
++CONFIG_LEDS_GPIO=y
++CONFIG_LEDS_TRIGGERS=y
++CONFIG_LEDS_TRIGGER_GPIO=y
++CONFIG_RTC_CLASS=y
++CONFIG_RTC_INTF_DEV_UIE_EMUL=y
++CONFIG_RTC_DRV_MC13XXX=y
++CONFIG_RTC_DRV_MXC=y
++CONFIG_RTC_DRV_SNVS=y
++CONFIG_DMADEVICES=y
++CONFIG_MXC_PXP_V2=y
++CONFIG_IMX_SDMA=y
++CONFIG_MXS_DMA=y
++CONFIG_STAGING=y
++CONFIG_COMMON_CLK_DEBUG=y
++# CONFIG_IOMMU_SUPPORT is not set
++CONFIG_PWM=y
++CONFIG_PWM_IMX=y
++CONFIG_EXT2_FS=y
++CONFIG_EXT2_FS_XATTR=y
++CONFIG_EXT2_FS_POSIX_ACL=y
++CONFIG_EXT2_FS_SECURITY=y
++CONFIG_EXT3_FS=y
++CONFIG_EXT3_FS_POSIX_ACL=y
++CONFIG_EXT3_FS_SECURITY=y
++CONFIG_EXT4_FS=y
++CONFIG_EXT4_FS_POSIX_ACL=y
++CONFIG_EXT4_FS_SECURITY=y
++CONFIG_QUOTA=y
++CONFIG_QUOTA_NETLINK_INTERFACE=y
++# CONFIG_PRINT_QUOTA_WARNING is not set
++CONFIG_AUTOFS4_FS=y
++CONFIG_FUSE_FS=y
++CONFIG_ISO9660_FS=m
++CONFIG_JOLIET=y
++CONFIG_ZISOFS=y
++CONFIG_UDF_FS=m
++CONFIG_MSDOS_FS=m
++CONFIG_VFAT_FS=y
++CONFIG_TMPFS=y
++CONFIG_JFFS2_FS=y
++CONFIG_UBIFS_FS=y
++CONFIG_NFS_FS=y
++CONFIG_NFS_V3_ACL=y
++CONFIG_NFS_V4=y
++CONFIG_ROOT_NFS=y
++CONFIG_NLS_DEFAULT="cp437"
++CONFIG_NLS_CODEPAGE_437=y
++CONFIG_NLS_ASCII=y
++CONFIG_NLS_ISO8859_1=y
++CONFIG_NLS_ISO8859_15=m
++CONFIG_NLS_UTF8=y
++CONFIG_MAGIC_SYSRQ=y
++# CONFIG_SCHED_DEBUG is not set
++# CONFIG_DEBUG_BUGVERBOSE is not set
++# CONFIG_FTRACE is not set
++CONFIG_SECURITYFS=y
++CONFIG_CRYPTO_USER=y
++CONFIG_CRYPTO_TEST=m
++CONFIG_CRYPTO_CCM=y
++CONFIG_CRYPTO_GCM=y
++CONFIG_CRYPTO_CBC=y
++CONFIG_CRYPTO_CTS=y
++CONFIG_CRYPTO_ECB=y
++CONFIG_CRYPTO_LRW=y
++CONFIG_CRYPTO_XTS=y
++CONFIG_CRYPTO_MD4=y
++CONFIG_CRYPTO_MD5=y
++CONFIG_CRYPTO_MICHAEL_MIC=y
++CONFIG_CRYPTO_RMD128=y
++CONFIG_CRYPTO_RMD160=y
++CONFIG_CRYPTO_RMD256=y
++CONFIG_CRYPTO_RMD320=y
++CONFIG_CRYPTO_SHA1=y
++CONFIG_CRYPTO_SHA256=y
++CONFIG_CRYPTO_SHA512=y
++CONFIG_CRYPTO_TGR192=y
++CONFIG_CRYPTO_WP512=y
++CONFIG_CRYPTO_BLOWFISH=y
++CONFIG_CRYPTO_CAMELLIA=y
++CONFIG_CRYPTO_DES=y
++CONFIG_CRYPTO_TWOFISH=y
++# CONFIG_CRYPTO_ANSI_CPRNG is not set
++CONFIG_CRYPTO_DEV_FSL_CAAM=y
++CONFIG_CRYPTO_DEV_FSL_CAAM_SM=y
++CONFIG_CRYPTO_DEV_FSL_CAAM_SM_TEST=y
++CONFIG_CRYPTO_DEV_FSL_CAAM_SECVIO=y
++CONFIG_CRC_CCITT=m
++CONFIG_CRC_T10DIF=y
++CONFIG_CRC7=m
++CONFIG_LIBCRC32C=m
+diff -Nur linux-3.14.36/arch/arm/configs/imx_v7_mfg_defconfig linux-openelec/arch/arm/configs/imx_v7_mfg_defconfig
+--- linux-3.14.36/arch/arm/configs/imx_v7_mfg_defconfig 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/arch/arm/configs/imx_v7_mfg_defconfig 2015-05-06 12:05:43.000000000 -0500
+@@ -0,0 +1,341 @@
++CONFIG_KERNEL_LZO=y
++CONFIG_SYSVIPC=y
++CONFIG_NO_HZ=y
++CONFIG_HIGH_RES_TIMERS=y
++CONFIG_IKCONFIG=y
++CONFIG_IKCONFIG_PROC=y
++CONFIG_LOG_BUF_SHIFT=18
++CONFIG_CGROUPS=y
++CONFIG_RELAY=y
++CONFIG_BLK_DEV_INITRD=y
++CONFIG_EXPERT=y
++CONFIG_PERF_EVENTS=y
++# CONFIG_SLUB_DEBUG is not set
++# CONFIG_COMPAT_BRK is not set
++CONFIG_MODULES=y
++CONFIG_MODULE_UNLOAD=y
++CONFIG_MODVERSIONS=y
++CONFIG_MODULE_SRCVERSION_ALL=y
++# CONFIG_BLK_DEV_BSG is not set
++CONFIG_GPIO_PCA953X=y
++CONFIG_ARCH_MXC=y
++CONFIG_MXC_DEBUG_BOARD=y
++CONFIG_MACH_IMX51_DT=y
++CONFIG_MACH_EUKREA_CPUIMX51SD=y
++CONFIG_SOC_IMX53=y
++CONFIG_SOC_IMX6Q=y
++CONFIG_SOC_IMX6SL=y
++CONFIG_SOC_VF610=y
++# CONFIG_SWP_EMULATE is not set
++CONFIG_SMP=y
++CONFIG_VMSPLIT_2G=y
++CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_AEABI=y
++# CONFIG_OABI_COMPAT is not set
++CONFIG_CMDLINE="noinitrd console=ttymxc0,115200"
++CONFIG_CPU_FREQ=y
++CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE=y
++CONFIG_CPU_FREQ_GOV_POWERSAVE=y
++CONFIG_CPU_FREQ_GOV_USERSPACE=y
++CONFIG_CPU_FREQ_GOV_ONDEMAND=y
++CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
++CONFIG_ARM_IMX6_CPUFREQ=y
++CONFIG_CPU_IDLE=y
++CONFIG_VFP=y
++CONFIG_NEON=y
++CONFIG_BINFMT_MISC=m
++CONFIG_PM_RUNTIME=y
++CONFIG_PM_DEBUG=y
++CONFIG_PM_TEST_SUSPEND=y
++CONFIG_NET=y
++CONFIG_PACKET=y
++CONFIG_UNIX=y
++CONFIG_INET=y
++CONFIG_IP_PNP=y
++CONFIG_IP_PNP_DHCP=y
++# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
++# CONFIG_INET_XFRM_MODE_TUNNEL is not set
++# CONFIG_INET_XFRM_MODE_BEET is not set
++# CONFIG_INET_LRO is not set
++CONFIG_IPV6=y
++CONFIG_NETFILTER=y
++CONFIG_VLAN_8021Q=y
++CONFIG_CFG80211=y
++CONFIG_CFG80211_WEXT=y
++CONFIG_MAC80211=y
++CONFIG_DEVTMPFS=y
++CONFIG_DEVTMPFS_MOUNT=y
++# CONFIG_STANDALONE is not set
++CONFIG_CMA=y
++CONFIG_CMA_SIZE_MBYTES=320
++CONFIG_IMX_WEIM=y
++CONFIG_CONNECTOR=y
++CONFIG_MTD=y
++CONFIG_MTD_CMDLINE_PARTS=y
++CONFIG_MTD_BLOCK=y
++CONFIG_MTD_CFI=y
++CONFIG_MTD_JEDECPROBE=y
++CONFIG_MTD_CFI_INTELEXT=y
++CONFIG_MTD_CFI_AMDSTD=y
++CONFIG_MTD_CFI_STAA=y
++CONFIG_MTD_PHYSMAP_OF=y
++CONFIG_MTD_DATAFLASH=y
++CONFIG_MTD_M25P80=y
++CONFIG_MTD_SST25L=y
++CONFIG_MTD_NAND=y
++CONFIG_MTD_NAND_GPMI_NAND=y
++CONFIG_MTD_NAND_MXC=y
++CONFIG_MTD_UBI=y
++CONFIG_BLK_DEV_LOOP=y
++CONFIG_BLK_DEV_RAM=y
++CONFIG_BLK_DEV_RAM_SIZE=65536
++CONFIG_EEPROM_AT24=y
++CONFIG_EEPROM_AT25=y
++# CONFIG_SCSI_PROC_FS is not set
++CONFIG_BLK_DEV_SD=y
++CONFIG_SCSI_MULTI_LUN=y
++CONFIG_SCSI_CONSTANTS=y
++CONFIG_SCSI_LOGGING=y
++CONFIG_SCSI_SCAN_ASYNC=y
++# CONFIG_SCSI_LOWLEVEL is not set
++CONFIG_ATA=y
++CONFIG_SATA_AHCI_PLATFORM=y
++CONFIG_AHCI_IMX=y
++CONFIG_PATA_IMX=y
++CONFIG_NETDEVICES=y
++# CONFIG_NET_VENDOR_BROADCOM is not set
++CONFIG_CS89x0=y
++CONFIG_CS89x0_PLATFORM=y
++# CONFIG_NET_VENDOR_FARADAY is not set
++# CONFIG_NET_VENDOR_INTEL is not set
++# CONFIG_NET_VENDOR_MARVELL is not set
++# CONFIG_NET_VENDOR_MICREL is not set
++# CONFIG_NET_VENDOR_MICROCHIP is not set
++# CONFIG_NET_VENDOR_NATSEMI is not set
++# CONFIG_NET_VENDOR_SEEQ is not set
++CONFIG_SMC91X=y
++CONFIG_SMC911X=y
++CONFIG_SMSC911X=y
++# CONFIG_NET_VENDOR_STMICRO is not set
++CONFIG_ATH_CARDS=y
++CONFIG_ATH6KL=m
++CONFIG_ATH6KL_SDIO=m
++# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
++CONFIG_INPUT_EVDEV=y
++CONFIG_INPUT_EVBUG=m
++CONFIG_KEYBOARD_GPIO=y
++CONFIG_KEYBOARD_IMX=y
++CONFIG_MOUSE_PS2=m
++CONFIG_MOUSE_PS2_ELANTECH=y
++CONFIG_INPUT_TOUCHSCREEN=y
++CONFIG_TOUCHSCREEN_EGALAX=y
++CONFIG_TOUCHSCREEN_ELAN=y
++CONFIG_TOUCHSCREEN_MAX11801=y
++CONFIG_TOUCHSCREEN_MC13783=y
++CONFIG_INPUT_MISC=y
++CONFIG_INPUT_MMA8450=y
++CONFIG_INPUT_ISL29023=y
++CONFIG_SERIO_SERPORT=m
++CONFIG_VT_HW_CONSOLE_BINDING=y
++# CONFIG_LEGACY_PTYS is not set
++# CONFIG_DEVKMEM is not set
++CONFIG_SERIAL_IMX=y
++CONFIG_SERIAL_IMX_CONSOLE=y
++CONFIG_SERIAL_FSL_LPUART=y
++CONFIG_SERIAL_FSL_LPUART_CONSOLE=y
++CONFIG_FSL_OTP=y
++# CONFIG_I2C_COMPAT is not set
++CONFIG_I2C_CHARDEV=y
++# CONFIG_I2C_HELPER_AUTO is not set
++CONFIG_I2C_ALGOPCF=m
++CONFIG_I2C_ALGOPCA=m
++CONFIG_I2C_IMX=y
++CONFIG_SPI=y
++CONFIG_SPI_IMX=y
++CONFIG_GPIO_SYSFS=y
++CONFIG_POWER_SUPPLY=y
++CONFIG_SABRESD_MAX8903=y
++CONFIG_SENSORS_MAX17135=y
++CONFIG_SENSORS_MAG3110=y
++CONFIG_THERMAL=y
++CONFIG_CPU_THERMAL=y
++CONFIG_IMX_THERMAL=y
++CONFIG_DEVICE_THERMAL=y
++CONFIG_WATCHDOG=y
++CONFIG_IMX2_WDT=y
++CONFIG_MFD_DA9052_I2C=y
++CONFIG_MFD_MC13XXX_SPI=y
++CONFIG_MFD_MC13XXX_I2C=y
++CONFIG_MFD_MAX17135=y
++CONFIG_MFD_SI476X_CORE=y
++CONFIG_REGULATOR=y
++CONFIG_REGULATOR_FIXED_VOLTAGE=y
++CONFIG_REGULATOR_DA9052=y
++CONFIG_REGULATOR_ANATOP=y
++CONFIG_REGULATOR_MC13783=y
++CONFIG_REGULATOR_MC13892=y
++CONFIG_REGULATOR_MAX17135=y
++CONFIG_REGULATOR_PFUZE100=y
++CONFIG_MEDIA_SUPPORT=y
++CONFIG_MEDIA_CAMERA_SUPPORT=y
++CONFIG_MEDIA_RADIO_SUPPORT=y
++CONFIG_VIDEO_V4L2_INT_DEVICE=y
++CONFIG_MEDIA_USB_SUPPORT=y
++CONFIG_USB_VIDEO_CLASS=m
++CONFIG_V4L_PLATFORM_DRIVERS=y
++CONFIG_VIDEO_MXC_OUTPUT=y
++CONFIG_VIDEO_MXC_CAPTURE=m
++CONFIG_VIDEO_MXC_CSI_CAMERA=m
++CONFIG_MXC_CAMERA_OV5640=m
++CONFIG_MXC_CAMERA_OV5642=m
++CONFIG_MXC_CAMERA_OV5640_MIPI=m
++CONFIG_MXC_TVIN_ADV7180=m
++CONFIG_MXC_IPU_DEVICE_QUEUE_SDC=m
++CONFIG_VIDEO_MXC_IPU_OUTPUT=y
++CONFIG_VIDEO_MXC_PXP_V4L2=y
++CONFIG_SOC_CAMERA=y
++CONFIG_VIDEO_MX3=y
++CONFIG_RADIO_SI476X=y
++CONFIG_SOC_CAMERA_OV2640=y
++CONFIG_DRM=y
++CONFIG_DRM_VIVANTE=y
++CONFIG_FB=y
++CONFIG_FB_MXS=y
++CONFIG_BACKLIGHT_LCD_SUPPORT=y
++CONFIG_LCD_CLASS_DEVICE=y
++CONFIG_LCD_L4F00242T03=y
++CONFIG_LCD_PLATFORM=y
++CONFIG_BACKLIGHT_CLASS_DEVICE=y
++CONFIG_BACKLIGHT_PWM=y
++CONFIG_FB_MXC_SYNC_PANEL=y
++CONFIG_FB_MXC_LDB=y
++CONFIG_FB_MXC_MIPI_DSI=y
++CONFIG_FB_MXC_TRULY_WVGA_SYNC_PANEL=y
++CONFIG_FB_MXC_HDMI=y
++CONFIG_FB_MXC_EINK_PANEL=y
++CONFIG_FB_MXS_SII902X=y
++CONFIG_FRAMEBUFFER_CONSOLE=y
++CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
++CONFIG_FONTS=y
++CONFIG_FONT_8x8=y
++CONFIG_FONT_8x16=y
++CONFIG_LOGO=y
++CONFIG_SOUND=y
++CONFIG_SND=y
++CONFIG_SND_USB_AUDIO=m
++CONFIG_SND_SOC=y
++CONFIG_SND_IMX_SOC=y
++CONFIG_SND_SOC_EUKREA_TLV320=y
++CONFIG_SND_SOC_IMX_CS42888=y
++CONFIG_SND_SOC_IMX_WM8962=y
++CONFIG_SND_SOC_IMX_SGTL5000=y
++CONFIG_SND_SOC_IMX_SPDIF=y
++CONFIG_SND_SOC_IMX_MC13783=y
++CONFIG_SND_SOC_IMX_HDMI=y
++CONFIG_SND_SOC_IMX_SI476X=y
++CONFIG_USB=y
++CONFIG_USB_EHCI_HCD=y
++CONFIG_USB_STORAGE=y
++CONFIG_USB_CHIPIDEA=y
++CONFIG_USB_CHIPIDEA_UDC=y
++CONFIG_USB_CHIPIDEA_HOST=y
++CONFIG_USB_PHY=y
++CONFIG_USB_MXS_PHY=y
++CONFIG_USB_GADGET=y
++# CONFIG_USB_ZERO is not set
++# CONFIG_USB_AUDIO is not set
++# CONFIG_USB_ETH is not set
++# CONFIG_USB_G_NCM is not set
++# CONFIG_USB_GADGETFS is not set
++# CONFIG_USB_FUNCTIONFS is not set
++CONFIG_USB_MASS_STORAGE=y
++CONFIG_FSL_UTP=y
++# CONFIG_USB_G_SERIAL is not set
++# CONFIG_USB_MIDI_GADGET is not set
++# CONFIG_USB_G_PRINTER is not set
++# CONFIG_USB_CDC_COMPOSITE is not set
++# CONFIG_USB_G_ACM_MS is not set
++# CONFIG_USB_G_MULTI is not set
++# CONFIG_USB_G_HID is not set
++# CONFIG_USB_G_DBGP is not set
++# CONFIG_USB_G_WEBCAM is not set
++CONFIG_MMC=y
++CONFIG_MMC_UNSAFE_RESUME=y
++CONFIG_MMC_SDHCI=y
++CONFIG_MMC_SDHCI_PLTFM=y
++CONFIG_MMC_SDHCI_ESDHC_IMX=y
++CONFIG_MXC_IPU=y
++CONFIG_MXC_GPU_VIV=y
++CONFIG_MXC_ASRC=y
++CONFIG_MXC_MIPI_CSI2=y
++CONFIG_NEW_LEDS=y
++CONFIG_LEDS_CLASS=y
++CONFIG_RTC_CLASS=y
++CONFIG_RTC_INTF_DEV_UIE_EMUL=y
++CONFIG_RTC_DRV_MC13XXX=y
++CONFIG_RTC_DRV_MXC=y
++CONFIG_RTC_DRV_SNVS=y
++CONFIG_DMADEVICES=y
++CONFIG_MXC_PXP_V2=y
++CONFIG_IMX_SDMA=y
++CONFIG_MXS_DMA=y
++CONFIG_STAGING=y
++CONFIG_COMMON_CLK_DEBUG=y
++# CONFIG_IOMMU_SUPPORT is not set
++CONFIG_PWM=y
++CONFIG_PWM_IMX=y
++CONFIG_EXT2_FS=y
++CONFIG_EXT2_FS_XATTR=y
++CONFIG_EXT2_FS_POSIX_ACL=y
++CONFIG_EXT2_FS_SECURITY=y
++CONFIG_EXT3_FS=y
++CONFIG_EXT3_FS_POSIX_ACL=y
++CONFIG_EXT3_FS_SECURITY=y
++CONFIG_EXT4_FS=y
++CONFIG_EXT4_FS_POSIX_ACL=y
++CONFIG_EXT4_FS_SECURITY=y
++CONFIG_QUOTA=y
++CONFIG_QUOTA_NETLINK_INTERFACE=y
++# CONFIG_PRINT_QUOTA_WARNING is not set
++CONFIG_AUTOFS4_FS=y
++CONFIG_FUSE_FS=y
++CONFIG_ISO9660_FS=m
++CONFIG_JOLIET=y
++CONFIG_ZISOFS=y
++CONFIG_UDF_FS=m
++CONFIG_MSDOS_FS=m
++CONFIG_VFAT_FS=y
++CONFIG_TMPFS=y
++CONFIG_JFFS2_FS=y
++CONFIG_UBIFS_FS=y
++CONFIG_NFS_FS=y
++CONFIG_NFS_V3_ACL=y
++CONFIG_NFS_V4=y
++CONFIG_ROOT_NFS=y
++CONFIG_NLS_DEFAULT="cp437"
++CONFIG_NLS_CODEPAGE_437=y
++CONFIG_NLS_ASCII=y
++CONFIG_NLS_ISO8859_1=y
++CONFIG_NLS_ISO8859_15=m
++CONFIG_NLS_UTF8=y
++CONFIG_MAGIC_SYSRQ=y
++# CONFIG_SCHED_DEBUG is not set
++# CONFIG_DEBUG_BUGVERBOSE is not set
++# CONFIG_FTRACE is not set
++CONFIG_SECURITYFS=y
++CONFIG_CRYPTO_USER=y
++CONFIG_CRYPTO_CCM=y
++CONFIG_CRYPTO_GCM=y
++CONFIG_CRYPTO_CBC=y
++CONFIG_CRYPTO_CTS=y
++CONFIG_CRYPTO_ECB=y
++CONFIG_CRYPTO_LRW=y
++# CONFIG_CRYPTO_ANSI_CPRNG is not set
++CONFIG_CRYPTO_DEV_FSL_CAAM=y
++CONFIG_CRYPTO_DEV_FSL_CAAM_SM=y
++CONFIG_CRYPTO_DEV_FSL_CAAM_SM_TEST=y
++CONFIG_CRYPTO_DEV_FSL_CAAM_SECVIO=y
++CONFIG_CRC_CCITT=m
++CONFIG_CRC_T10DIF=y
++CONFIG_CRC7=m
++CONFIG_LIBCRC32C=m
+diff -Nur linux-3.14.36/arch/arm/include/asm/arch_timer.h linux-openelec/arch/arm/include/asm/arch_timer.h
+--- linux-3.14.36/arch/arm/include/asm/arch_timer.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/include/asm/arch_timer.h 2015-05-06 12:05:43.000000000 -0500
+@@ -107,7 +107,6 @@
+ /* Also disable virtual event stream */
+ cntkctl &= ~(ARCH_TIMER_USR_PT_ACCESS_EN
+ | ARCH_TIMER_USR_VT_ACCESS_EN
+- | ARCH_TIMER_VIRT_EVT_EN
+ | ARCH_TIMER_USR_VCT_ACCESS_EN
+ | ARCH_TIMER_USR_PCT_ACCESS_EN);
+ arch_timer_set_cntkctl(cntkctl);
+diff -Nur linux-3.14.36/arch/arm/include/asm/atomic.h linux-openelec/arch/arm/include/asm/atomic.h
+--- linux-3.14.36/arch/arm/include/asm/atomic.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/include/asm/atomic.h 2015-05-06 12:05:43.000000000 -0500
+@@ -60,6 +60,7 @@
+ int result;
+
+ smp_mb();
++ prefetchw(&v->counter);
+
+ __asm__ __volatile__("@ atomic_add_return\n"
+ "1: ldrex %0, [%3]\n"
+@@ -99,6 +100,7 @@
+ int result;
+
+ smp_mb();
++ prefetchw(&v->counter);
+
+ __asm__ __volatile__("@ atomic_sub_return\n"
+ "1: ldrex %0, [%3]\n"
+@@ -121,6 +123,7 @@
+ unsigned long res;
+
+ smp_mb();
++ prefetchw(&ptr->counter);
+
+ do {
+ __asm__ __volatile__("@ atomic_cmpxchg\n"
+@@ -138,6 +141,33 @@
+ return oldval;
+ }
+
++static inline int __atomic_add_unless(atomic_t *v, int a, int u)
++{
++ int oldval, newval;
++ unsigned long tmp;
++
++ smp_mb();
++ prefetchw(&v->counter);
++
++ __asm__ __volatile__ ("@ atomic_add_unless\n"
++"1: ldrex %0, [%4]\n"
++" teq %0, %5\n"
++" beq 2f\n"
++" add %1, %0, %6\n"
++" strex %2, %1, [%4]\n"
++" teq %2, #0\n"
++" bne 1b\n"
++"2:"
++ : "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
++ : "r" (&v->counter), "r" (u), "r" (a)
++ : "cc");
++
++ if (oldval != u)
++ smp_mb();
++
++ return oldval;
++}
++
+ #else /* ARM_ARCH_6 */
+
+ #ifdef CONFIG_SMP
+@@ -186,10 +216,6 @@
+ return ret;
+ }
+
+-#endif /* __LINUX_ARM_ARCH__ */
+-
+-#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+-
+ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
+ {
+ int c, old;
+@@ -200,6 +226,10 @@
+ return c;
+ }
+
++#endif /* __LINUX_ARM_ARCH__ */
++
++#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
++
+ #define atomic_inc(v) atomic_add(1, v)
+ #define atomic_dec(v) atomic_sub(1, v)
+
+@@ -299,6 +329,7 @@
+ unsigned long tmp;
+
+ smp_mb();
++ prefetchw(&v->counter);
+
+ __asm__ __volatile__("@ atomic64_add_return\n"
+ "1: ldrexd %0, %H0, [%3]\n"
+@@ -340,6 +371,7 @@
+ unsigned long tmp;
+
+ smp_mb();
++ prefetchw(&v->counter);
+
+ __asm__ __volatile__("@ atomic64_sub_return\n"
+ "1: ldrexd %0, %H0, [%3]\n"
+@@ -364,6 +396,7 @@
+ unsigned long res;
+
+ smp_mb();
++ prefetchw(&ptr->counter);
+
+ do {
+ __asm__ __volatile__("@ atomic64_cmpxchg\n"
+@@ -388,6 +421,7 @@
+ unsigned long tmp;
+
+ smp_mb();
++ prefetchw(&ptr->counter);
+
+ __asm__ __volatile__("@ atomic64_xchg\n"
+ "1: ldrexd %0, %H0, [%3]\n"
+@@ -409,6 +443,7 @@
+ unsigned long tmp;
+
+ smp_mb();
++ prefetchw(&v->counter);
+
+ __asm__ __volatile__("@ atomic64_dec_if_positive\n"
+ "1: ldrexd %0, %H0, [%3]\n"
+@@ -436,6 +471,7 @@
+ int ret = 1;
+
+ smp_mb();
++ prefetchw(&v->counter);
+
+ __asm__ __volatile__("@ atomic64_add_unless\n"
+ "1: ldrexd %0, %H0, [%4]\n"
+diff -Nur linux-3.14.36/arch/arm/include/asm/cmpxchg.h linux-openelec/arch/arm/include/asm/cmpxchg.h
+--- linux-3.14.36/arch/arm/include/asm/cmpxchg.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/include/asm/cmpxchg.h 2015-05-06 12:05:43.000000000 -0500
+@@ -2,6 +2,7 @@
+ #define __ASM_ARM_CMPXCHG_H
+
+ #include <linux/irqflags.h>
++#include <linux/prefetch.h>
+ #include <asm/barrier.h>
+
+ #if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110)
+@@ -35,6 +36,7 @@
+ #endif
+
+ smp_mb();
++ prefetchw((const void *)ptr);
+
+ switch (size) {
+ #if __LINUX_ARM_ARCH__ >= 6
+@@ -138,6 +140,8 @@
+ {
+ unsigned long oldval, res;
+
++ prefetchw((const void *)ptr);
++
+ switch (size) {
+ #ifndef CONFIG_CPU_V6 /* min ARCH >= ARMv6K */
+ case 1:
+@@ -230,6 +234,8 @@
+ unsigned long long oldval;
+ unsigned long res;
+
++ prefetchw(ptr);
++
+ __asm__ __volatile__(
+ "1: ldrexd %1, %H1, [%3]\n"
+ " teq %1, %4\n"
+diff -Nur linux-3.14.36/arch/arm/include/asm/ftrace.h linux-openelec/arch/arm/include/asm/ftrace.h
+--- linux-3.14.36/arch/arm/include/asm/ftrace.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/include/asm/ftrace.h 2015-05-06 12:05:43.000000000 -0500
+@@ -52,15 +52,7 @@
+
+ #endif
+
+-#define HAVE_ARCH_CALLER_ADDR
+-
+-#define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0))
+-#define CALLER_ADDR1 ((unsigned long)return_address(1))
+-#define CALLER_ADDR2 ((unsigned long)return_address(2))
+-#define CALLER_ADDR3 ((unsigned long)return_address(3))
+-#define CALLER_ADDR4 ((unsigned long)return_address(4))
+-#define CALLER_ADDR5 ((unsigned long)return_address(5))
+-#define CALLER_ADDR6 ((unsigned long)return_address(6))
++#define ftrace_return_address(n) return_address(n)
+
+ #endif /* ifndef __ASSEMBLY__ */
+
+diff -Nur linux-3.14.36/arch/arm/include/asm/futex.h linux-openelec/arch/arm/include/asm/futex.h
+--- linux-3.14.36/arch/arm/include/asm/futex.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/include/asm/futex.h 2015-05-06 12:05:43.000000000 -0500
+@@ -23,6 +23,7 @@
+
+ #define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg) \
+ smp_mb(); \
++ prefetchw(uaddr); \
+ __asm__ __volatile__( \
+ "1: ldrex %1, [%3]\n" \
+ " " insn "\n" \
+@@ -46,6 +47,8 @@
+ return -EFAULT;
+
+ smp_mb();
++ /* Prefetching cannot fault */
++ prefetchw(uaddr);
+ __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
+ "1: ldrex %1, [%4]\n"
+ " teq %1, %2\n"
+diff -Nur linux-3.14.36/arch/arm/include/asm/glue-cache.h linux-openelec/arch/arm/include/asm/glue-cache.h
+--- linux-3.14.36/arch/arm/include/asm/glue-cache.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/include/asm/glue-cache.h 2015-05-06 12:05:43.000000000 -0500
+@@ -102,19 +102,19 @@
+ #endif
+
+ #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K)
+-# ifdef _CACHE
++//# ifdef _CACHE
+ # define MULTI_CACHE 1
+-# else
+-# define _CACHE v6
+-# endif
++//# else
++//# define _CACHE v6
++//# endif
+ #endif
+
+ #if defined(CONFIG_CPU_V7)
+-# ifdef _CACHE
++//# ifdef _CACHE
+ # define MULTI_CACHE 1
+-# else
+-# define _CACHE v7
+-# endif
++//# else
++//# define _CACHE v7
++//# endif
+ #endif
+
+ #if defined(CONFIG_CPU_V7M)
+diff -Nur linux-3.14.36/arch/arm/include/asm/hardware/cache-l2x0.h linux-openelec/arch/arm/include/asm/hardware/cache-l2x0.h
+--- linux-3.14.36/arch/arm/include/asm/hardware/cache-l2x0.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/include/asm/hardware/cache-l2x0.h 2015-05-06 12:05:43.000000000 -0500
+@@ -26,8 +26,8 @@
+ #define L2X0_CACHE_TYPE 0x004
+ #define L2X0_CTRL 0x100
+ #define L2X0_AUX_CTRL 0x104
+-#define L2X0_TAG_LATENCY_CTRL 0x108
+-#define L2X0_DATA_LATENCY_CTRL 0x10C
++#define L310_TAG_LATENCY_CTRL 0x108
++#define L310_DATA_LATENCY_CTRL 0x10C
+ #define L2X0_EVENT_CNT_CTRL 0x200
+ #define L2X0_EVENT_CNT1_CFG 0x204
+ #define L2X0_EVENT_CNT0_CFG 0x208
+@@ -54,53 +54,93 @@
+ #define L2X0_LOCKDOWN_WAY_D_BASE 0x900
+ #define L2X0_LOCKDOWN_WAY_I_BASE 0x904
+ #define L2X0_LOCKDOWN_STRIDE 0x08
+-#define L2X0_ADDR_FILTER_START 0xC00
+-#define L2X0_ADDR_FILTER_END 0xC04
++#define L310_ADDR_FILTER_START 0xC00
++#define L310_ADDR_FILTER_END 0xC04
+ #define L2X0_TEST_OPERATION 0xF00
+ #define L2X0_LINE_DATA 0xF10
+ #define L2X0_LINE_TAG 0xF30
+ #define L2X0_DEBUG_CTRL 0xF40
+-#define L2X0_PREFETCH_CTRL 0xF60
+-#define L2X0_POWER_CTRL 0xF80
+-#define L2X0_DYNAMIC_CLK_GATING_EN (1 << 1)
+-#define L2X0_STNDBY_MODE_EN (1 << 0)
++#define L310_PREFETCH_CTRL 0xF60
++#define L310_POWER_CTRL 0xF80
++#define L310_DYNAMIC_CLK_GATING_EN (1 << 1)
++#define L310_STNDBY_MODE_EN (1 << 0)
+
+ /* Registers shifts and masks */
+ #define L2X0_CACHE_ID_PART_MASK (0xf << 6)
+ #define L2X0_CACHE_ID_PART_L210 (1 << 6)
++#define L2X0_CACHE_ID_PART_L220 (2 << 6)
+ #define L2X0_CACHE_ID_PART_L310 (3 << 6)
+ #define L2X0_CACHE_ID_RTL_MASK 0x3f
+-#define L2X0_CACHE_ID_RTL_R0P0 0x0
+-#define L2X0_CACHE_ID_RTL_R1P0 0x2
+-#define L2X0_CACHE_ID_RTL_R2P0 0x4
+-#define L2X0_CACHE_ID_RTL_R3P0 0x5
+-#define L2X0_CACHE_ID_RTL_R3P1 0x6
+-#define L2X0_CACHE_ID_RTL_R3P2 0x8
+-
+-#define L2X0_AUX_CTRL_MASK 0xc0000fff
++#define L210_CACHE_ID_RTL_R0P2_02 0x00
++#define L210_CACHE_ID_RTL_R0P1 0x01
++#define L210_CACHE_ID_RTL_R0P2_01 0x02
++#define L210_CACHE_ID_RTL_R0P3 0x03
++#define L210_CACHE_ID_RTL_R0P4 0x0b
++#define L210_CACHE_ID_RTL_R0P5 0x0f
++#define L220_CACHE_ID_RTL_R1P7_01REL0 0x06
++#define L310_CACHE_ID_RTL_R0P0 0x00
++#define L310_CACHE_ID_RTL_R1P0 0x02
++#define L310_CACHE_ID_RTL_R2P0 0x04
++#define L310_CACHE_ID_RTL_R3P0 0x05
++#define L310_CACHE_ID_RTL_R3P1 0x06
++#define L310_CACHE_ID_RTL_R3P1_50REL0 0x07
++#define L310_CACHE_ID_RTL_R3P2 0x08
++#define L310_CACHE_ID_RTL_R3P3 0x09
++
++/* L2C auxiliary control register - bits common to L2C-210/220/310 */
++#define L2C_AUX_CTRL_WAY_SIZE_SHIFT 17
++#define L2C_AUX_CTRL_WAY_SIZE_MASK (7 << 17)
++#define L2C_AUX_CTRL_WAY_SIZE(n) ((n) << 17)
++#define L2C_AUX_CTRL_EVTMON_ENABLE BIT(20)
++#define L2C_AUX_CTRL_PARITY_ENABLE BIT(21)
++#define L2C_AUX_CTRL_SHARED_OVERRIDE BIT(22)
++/* L2C-210/220 common bits */
+ #define L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT 0
+-#define L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK 0x7
++#define L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK (7 << 0)
+ #define L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT 3
+-#define L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK (0x7 << 3)
++#define L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK (7 << 3)
+ #define L2X0_AUX_CTRL_TAG_LATENCY_SHIFT 6
+-#define L2X0_AUX_CTRL_TAG_LATENCY_MASK (0x7 << 6)
++#define L2X0_AUX_CTRL_TAG_LATENCY_MASK (7 << 6)
+ #define L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT 9
+-#define L2X0_AUX_CTRL_DIRTY_LATENCY_MASK (0x7 << 9)
+-#define L2X0_AUX_CTRL_ASSOCIATIVITY_SHIFT 16
+-#define L2X0_AUX_CTRL_WAY_SIZE_SHIFT 17
+-#define L2X0_AUX_CTRL_WAY_SIZE_MASK (0x7 << 17)
+-#define L2X0_AUX_CTRL_SHARE_OVERRIDE_SHIFT 22
+-#define L2X0_AUX_CTRL_NS_LOCKDOWN_SHIFT 26
+-#define L2X0_AUX_CTRL_NS_INT_CTRL_SHIFT 27
+-#define L2X0_AUX_CTRL_DATA_PREFETCH_SHIFT 28
+-#define L2X0_AUX_CTRL_INSTR_PREFETCH_SHIFT 29
+-#define L2X0_AUX_CTRL_EARLY_BRESP_SHIFT 30
+-
+-#define L2X0_LATENCY_CTRL_SETUP_SHIFT 0
+-#define L2X0_LATENCY_CTRL_RD_SHIFT 4
+-#define L2X0_LATENCY_CTRL_WR_SHIFT 8
+-
+-#define L2X0_ADDR_FILTER_EN 1
++#define L2X0_AUX_CTRL_DIRTY_LATENCY_MASK (7 << 9)
++#define L2X0_AUX_CTRL_ASSOC_SHIFT 13
++#define L2X0_AUX_CTRL_ASSOC_MASK (15 << 13)
++/* L2C-210 specific bits */
++#define L210_AUX_CTRL_WRAP_DISABLE BIT(12)
++#define L210_AUX_CTRL_WA_OVERRIDE BIT(23)
++#define L210_AUX_CTRL_EXCLUSIVE_ABORT BIT(24)
++/* L2C-220 specific bits */
++#define L220_AUX_CTRL_EXCLUSIVE_CACHE BIT(12)
++#define L220_AUX_CTRL_FWA_SHIFT 23
++#define L220_AUX_CTRL_FWA_MASK (3 << 23)
++#define L220_AUX_CTRL_NS_LOCKDOWN BIT(26)
++#define L220_AUX_CTRL_NS_INT_CTRL BIT(27)
++/* L2C-310 specific bits */
++#define L310_AUX_CTRL_FULL_LINE_ZERO BIT(0) /* R2P0+ */
++#define L310_AUX_CTRL_HIGHPRIO_SO_DEV BIT(10) /* R2P0+ */
++#define L310_AUX_CTRL_STORE_LIMITATION BIT(11) /* R2P0+ */
++#define L310_AUX_CTRL_EXCLUSIVE_CACHE BIT(12)
++#define L310_AUX_CTRL_ASSOCIATIVITY_16 BIT(16)
++#define L310_AUX_CTRL_CACHE_REPLACE_RR BIT(25) /* R2P0+ */
++#define L310_AUX_CTRL_NS_LOCKDOWN BIT(26)
++#define L310_AUX_CTRL_NS_INT_CTRL BIT(27)
++#define L310_AUX_CTRL_DATA_PREFETCH BIT(28)
++#define L310_AUX_CTRL_INSTR_PREFETCH BIT(29)
++#define L310_AUX_CTRL_EARLY_BRESP BIT(30) /* R2P0+ */
++
++#define L310_LATENCY_CTRL_SETUP(n) ((n) << 0)
++#define L310_LATENCY_CTRL_RD(n) ((n) << 4)
++#define L310_LATENCY_CTRL_WR(n) ((n) << 8)
++
++#define L310_ADDR_FILTER_EN 1
++
++#define L310_PREFETCH_CTRL_OFFSET_MASK 0x1f
++#define L310_PREFETCH_CTRL_DBL_LINEFILL_INCR BIT(23)
++#define L310_PREFETCH_CTRL_PREFETCH_DROP BIT(24)
++#define L310_PREFETCH_CTRL_DBL_LINEFILL_WRAP BIT(27)
++#define L310_PREFETCH_CTRL_DATA_PREFETCH BIT(28)
++#define L310_PREFETCH_CTRL_INSTR_PREFETCH BIT(29)
++#define L310_PREFETCH_CTRL_DBL_LINEFILL BIT(30)
+
+ #define L2X0_CTRL_EN 1
+
+diff -Nur linux-3.14.36/arch/arm/include/asm/outercache.h linux-openelec/arch/arm/include/asm/outercache.h
+--- linux-3.14.36/arch/arm/include/asm/outercache.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/include/asm/outercache.h 2015-05-06 12:05:43.000000000 -0500
+@@ -21,6 +21,7 @@
+ #ifndef __ASM_OUTERCACHE_H
+ #define __ASM_OUTERCACHE_H
+
++#include <linux/bug.h>
+ #include <linux/types.h>
+
+ struct outer_cache_fns {
+@@ -28,53 +29,84 @@
+ void (*clean_range)(unsigned long, unsigned long);
+ void (*flush_range)(unsigned long, unsigned long);
+ void (*flush_all)(void);
+- void (*inv_all)(void);
+ void (*disable)(void);
+ #ifdef CONFIG_OUTER_CACHE_SYNC
+ void (*sync)(void);
+ #endif
+- void (*set_debug)(unsigned long);
+ void (*resume)(void);
++
++ /* This is an ARM L2C thing */
++ void (*write_sec)(unsigned long, unsigned);
+ };
+
+ extern struct outer_cache_fns outer_cache;
+
+ #ifdef CONFIG_OUTER_CACHE
+-
++/**
++ * outer_inv_range - invalidate range of outer cache lines
++ * @start: starting physical address, inclusive
++ * @end: end physical address, exclusive
++ */
+ static inline void outer_inv_range(phys_addr_t start, phys_addr_t end)
+ {
+ if (outer_cache.inv_range)
+ outer_cache.inv_range(start, end);
+ }
++
++/**
++ * outer_clean_range - clean dirty outer cache lines
++ * @start: starting physical address, inclusive
++ * @end: end physical address, exclusive
++ */
+ static inline void outer_clean_range(phys_addr_t start, phys_addr_t end)
+ {
+ if (outer_cache.clean_range)
+ outer_cache.clean_range(start, end);
+ }
++
++/**
++ * outer_flush_range - clean and invalidate outer cache lines
++ * @start: starting physical address, inclusive
++ * @end: end physical address, exclusive
++ */
+ static inline void outer_flush_range(phys_addr_t start, phys_addr_t end)
+ {
+ if (outer_cache.flush_range)
+ outer_cache.flush_range(start, end);
+ }
+
++/**
++ * outer_flush_all - clean and invalidate all cache lines in the outer cache
++ *
++ * Note: depending on implementation, this may not be atomic - it must
++ * only be called with interrupts disabled and no other active outer
++ * cache masters.
++ *
++ * It is intended that this function is only used by implementations
++ * needing to override the outer_cache.disable() method due to security.
++ * (Some implementations perform this as a clean followed by an invalidate.)
++ */
+ static inline void outer_flush_all(void)
+ {
+ if (outer_cache.flush_all)
+ outer_cache.flush_all();
+ }
+
+-static inline void outer_inv_all(void)
+-{
+- if (outer_cache.inv_all)
+- outer_cache.inv_all();
+-}
+-
+-static inline void outer_disable(void)
+-{
+- if (outer_cache.disable)
+- outer_cache.disable();
+-}
+-
++/**
++ * outer_disable - clean, invalidate and disable the outer cache
++ *
++ * Disable the outer cache, ensuring that any data contained in the outer
++ * cache is pushed out to lower levels of system memory. The note and
++ * conditions above concerning outer_flush_all() applies here.
++ */
++extern void outer_disable(void);
++
++/**
++ * outer_resume - restore the cache configuration and re-enable outer cache
++ *
++ * Restore any configuration that the cache had when previously enabled,
++ * and re-enable the outer cache.
++ */
+ static inline void outer_resume(void)
+ {
+ if (outer_cache.resume)
+@@ -90,13 +122,18 @@
+ static inline void outer_flush_range(phys_addr_t start, phys_addr_t end)
+ { }
+ static inline void outer_flush_all(void) { }
+-static inline void outer_inv_all(void) { }
+ static inline void outer_disable(void) { }
+ static inline void outer_resume(void) { }
+
+ #endif
+
+ #ifdef CONFIG_OUTER_CACHE_SYNC
++/**
++ * outer_sync - perform a sync point for outer cache
++ *
++ * Ensure that all outer cache operations are complete and any store
++ * buffers are drained.
++ */
+ static inline void outer_sync(void)
+ {
+ if (outer_cache.sync)
+diff -Nur linux-3.14.36/arch/arm/include/asm/pmu.h linux-openelec/arch/arm/include/asm/pmu.h
+--- linux-3.14.36/arch/arm/include/asm/pmu.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/include/asm/pmu.h 2015-05-06 12:05:43.000000000 -0500
+@@ -62,9 +62,19 @@
+ raw_spinlock_t pmu_lock;
+ };
+
++struct cpupmu_regs {
++ u32 pmc;
++ u32 pmcntenset;
++ u32 pmuseren;
++ u32 pmintenset;
++ u32 pmxevttype[8];
++ u32 pmxevtcnt[8];
++};
++
+ struct arm_pmu {
+ struct pmu pmu;
+ cpumask_t active_irqs;
++ cpumask_t valid_cpus;
+ char *name;
+ irqreturn_t (*handle_irq)(int irq_num, void *dev);
+ void (*enable)(struct perf_event *event);
+@@ -81,6 +91,8 @@
+ int (*request_irq)(struct arm_pmu *, irq_handler_t handler);
+ void (*free_irq)(struct arm_pmu *);
+ int (*map_event)(struct perf_event *event);
++ void (*save_regs)(struct arm_pmu *, struct cpupmu_regs *);
++ void (*restore_regs)(struct arm_pmu *, struct cpupmu_regs *);
+ int num_events;
+ atomic_t active_events;
+ struct mutex reserve_mutex;
+diff -Nur linux-3.14.36/arch/arm/include/asm/psci.h linux-openelec/arch/arm/include/asm/psci.h
+--- linux-3.14.36/arch/arm/include/asm/psci.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/include/asm/psci.h 2015-05-06 12:05:43.000000000 -0500
+@@ -16,6 +16,10 @@
+
+ #define PSCI_POWER_STATE_TYPE_STANDBY 0
+ #define PSCI_POWER_STATE_TYPE_POWER_DOWN 1
++#define PSCI_POWER_STATE_AFFINITY_LEVEL0 0
++#define PSCI_POWER_STATE_AFFINITY_LEVEL1 1
++#define PSCI_POWER_STATE_AFFINITY_LEVEL2 2
++#define PSCI_POWER_STATE_AFFINITY_LEVEL3 3
+
+ struct psci_power_state {
+ u16 id;
+@@ -42,4 +46,12 @@
+ static inline bool psci_smp_available(void) { return false; }
+ #endif
+
++#ifdef CONFIG_ARM_PSCI
++extern int psci_probe(void);
++#else
++static inline int psci_probe(void)
++{
++ return -ENODEV;
++}
++#endif
+ #endif /* __ASM_ARM_PSCI_H */
+diff -Nur linux-3.14.36/arch/arm/include/asm/topology.h linux-openelec/arch/arm/include/asm/topology.h
+--- linux-3.14.36/arch/arm/include/asm/topology.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/include/asm/topology.h 2015-05-06 12:05:43.000000000 -0500
+@@ -26,11 +26,14 @@
+ void init_cpu_topology(void);
+ void store_cpu_topology(unsigned int cpuid);
+ const struct cpumask *cpu_coregroup_mask(int cpu);
++int cluster_to_logical_mask(unsigned int socket_id, cpumask_t *cluster_mask);
+
+ #else
+
+ static inline void init_cpu_topology(void) { }
+ static inline void store_cpu_topology(unsigned int cpuid) { }
++static inline int cluster_to_logical_mask(unsigned int socket_id,
++ cpumask_t *cluster_mask) { return -EINVAL; }
+
+ #endif
+
+diff -Nur linux-3.14.36/arch/arm/Kconfig linux-openelec/arch/arm/Kconfig
+--- linux-3.14.36/arch/arm/Kconfig 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/Kconfig 2015-05-06 12:05:43.000000000 -0500
+@@ -1216,19 +1216,6 @@
+ register of the Cortex-A9 which reduces the linefill issuing
+ capabilities of the processor.
+
+-config PL310_ERRATA_588369
+- bool "PL310 errata: Clean & Invalidate maintenance operations do not invalidate clean lines"
+- depends on CACHE_L2X0
+- help
+- The PL310 L2 cache controller implements three types of Clean &
+- Invalidate maintenance operations: by Physical Address
+- (offset 0x7F0), by Index/Way (0x7F8) and by Way (0x7FC).
+- They are architecturally defined to behave as the execution of a
+- clean operation followed immediately by an invalidate operation,
+- both performing to the same memory location. This functionality
+- is not correctly implemented in PL310 as clean lines are not
+- invalidated as a result of these operations.
+-
+ config ARM_ERRATA_643719
+ bool "ARM errata: LoUIS bit field in CLIDR register is incorrect"
+ depends on CPU_V7 && SMP
+@@ -1251,17 +1238,6 @@
+ tables. The workaround changes the TLB flushing routines to invalidate
+ entries regardless of the ASID.
+
+-config PL310_ERRATA_727915
+- bool "PL310 errata: Background Clean & Invalidate by Way operation can cause data corruption"
+- depends on CACHE_L2X0
+- help
+- PL310 implements the Clean & Invalidate by Way L2 cache maintenance
+- operation (offset 0x7FC). This operation runs in background so that
+- PL310 can handle normal accesses while it is in progress. Under very
+- rare circumstances, due to this erratum, write data can be lost when
+- PL310 treats a cacheable write transaction during a Clean &
+- Invalidate by Way operation.
+-
+ config ARM_ERRATA_743622
+ bool "ARM errata: Faulty hazard checking in the Store Buffer may lead to data corruption"
+ depends on CPU_V7
+@@ -1287,21 +1263,6 @@
+ operation is received by a CPU before the ICIALLUIS has completed,
+ potentially leading to corrupted entries in the cache or TLB.
+
+-config PL310_ERRATA_753970
+- bool "PL310 errata: cache sync operation may be faulty"
+- depends on CACHE_PL310
+- help
+- This option enables the workaround for the 753970 PL310 (r3p0) erratum.
+-
+- Under some condition the effect of cache sync operation on
+- the store buffer still remains when the operation completes.
+- This means that the store buffer is always asked to drain and
+- this prevents it from merging any further writes. The workaround
+- is to replace the normal offset of cache sync operation (0x730)
+- by another offset targeting an unmapped PL310 register 0x740.
+- This has the same effect as the cache sync operation: store buffer
+- drain and waiting for all buffers empty.
+-
+ config ARM_ERRATA_754322
+ bool "ARM errata: possible faulty MMU translations following an ASID switch"
+ depends on CPU_V7
+@@ -1350,18 +1311,6 @@
+ relevant cache maintenance functions and sets a specific bit
+ in the diagnostic control register of the SCU.
+
+-config PL310_ERRATA_769419
+- bool "PL310 errata: no automatic Store Buffer drain"
+- depends on CACHE_L2X0
+- help
+- On revisions of the PL310 prior to r3p2, the Store Buffer does
+- not automatically drain. This can cause normal, non-cacheable
+- writes to be retained when the memory system is idle, leading
+- to suboptimal I/O performance for drivers using coherent DMA.
+- This option adds a write barrier to the cpu_idle loop so that,
+- on systems with an outer cache, the store buffer is drained
+- explicitly.
+-
+ config ARM_ERRATA_775420
+ bool "ARM errata: A data cache maintenance operation which aborts, might lead to deadlock"
+ depends on CPU_V7
+@@ -1391,6 +1340,29 @@
+ loop buffer may deliver incorrect instructions. This
+ workaround disables the loop buffer to avoid the erratum.
+
++config ARM_ERRATA_794072
++ bool "ARM errata: A short loop including a DMB instruction might cause a denial of service"
++ depends on CPU_V7 && SMP
++ help
++ This option enables the workaround for the 794072 Cortex-A9
++ (all revisions). A processor which continuously executes a short
++ loop containing a DMB instruction might prevent a CP15 operation
++ broadcast by another processor making further progress, causing
++ a denial of service. This erratum can be worked around by setting
++ bit[4] of the undocumented Diagnostic Control Register to 1.
++
++config ARM_ERRATA_761320
++ bool "Full cache line writes to the same memory region from at least two processors might deadlock processor"
++ depends on CPU_V7 && SMP
++ help
++ This option enables the workaround for the 761320 Cortex-A9 (r0..r3).
++ Under very rare circumstances, full cache line writes
++ from (at least) 2 processors on cache lines in hazard with
++ other requests may cause arbitration issues in the SCU,
++ leading to processor deadlock. This erratum can be
++ worked around by setting bit[21] of the undocumented
++ Diagnostic Control Register to 1.
++
+ endmenu
+
+ source "arch/arm/common/Kconfig"
+@@ -1835,6 +1807,7 @@
+ range 11 64 if ARCH_SHMOBILE_LEGACY
+ default "12" if SOC_AM33XX
+ default "9" if SA1111 || ARCH_EFM32
++ default "14" if ARCH_MXC
+ default "11"
+ help
+ The kernel memory allocator divides physically contiguous memory
+diff -Nur linux-3.14.36/arch/arm/kernel/perf_event.c linux-openelec/arch/arm/kernel/perf_event.c
+--- linux-3.14.36/arch/arm/kernel/perf_event.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/kernel/perf_event.c 2015-05-06 12:05:43.000000000 -0500
+@@ -12,6 +12,7 @@
+ */
+ #define pr_fmt(fmt) "hw perfevents: " fmt
+
++#include <linux/cpumask.h>
+ #include <linux/kernel.h>
+ #include <linux/platform_device.h>
+ #include <linux/pm_runtime.h>
+@@ -86,6 +87,9 @@
+ return armpmu_map_cache_event(cache_map, config);
+ case PERF_TYPE_RAW:
+ return armpmu_map_raw_event(raw_event_mask, config);
++ default:
++ if (event->attr.type >= PERF_TYPE_MAX)
++ return armpmu_map_raw_event(raw_event_mask, config);
+ }
+
+ return -ENOENT;
+@@ -159,6 +163,8 @@
+ struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+
++ if (!cpumask_test_cpu(smp_processor_id(), &armpmu->valid_cpus))
++ return;
+ /*
+ * ARM pmu always has to update the counter, so ignore
+ * PERF_EF_UPDATE, see comments in armpmu_start().
+@@ -175,6 +181,8 @@
+ struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+
++ if (!cpumask_test_cpu(smp_processor_id(), &armpmu->valid_cpus))
++ return;
+ /*
+ * ARM pmu always has to reprogram the period, so ignore
+ * PERF_EF_RELOAD, see the comment below.
+@@ -202,6 +210,9 @@
+ struct hw_perf_event *hwc = &event->hw;
+ int idx = hwc->idx;
+
++ if (!cpumask_test_cpu(smp_processor_id(), &armpmu->valid_cpus))
++ return;
++
+ armpmu_stop(event, PERF_EF_UPDATE);
+ hw_events->events[idx] = NULL;
+ clear_bit(idx, hw_events->used_mask);
+@@ -218,6 +229,10 @@
+ int idx;
+ int err = 0;
+
++ /* An event following a process won't be stopped earlier */
++ if (!cpumask_test_cpu(smp_processor_id(), &armpmu->valid_cpus))
++ return 0;
++
+ perf_pmu_disable(event->pmu);
+
+ /* If we don't have a space for the counter then finish early. */
+@@ -419,6 +434,10 @@
+ int err = 0;
+ atomic_t *active_events = &armpmu->active_events;
+
++ if (event->cpu != -1 &&
++ !cpumask_test_cpu(event->cpu, &armpmu->valid_cpus))
++ return -ENOENT;
++
+ /* does not support taken branch sampling */
+ if (has_branch_stack(event))
+ return -EOPNOTSUPP;
+diff -Nur linux-3.14.36/arch/arm/kernel/perf_event_cpu.c linux-openelec/arch/arm/kernel/perf_event_cpu.c
+--- linux-3.14.36/arch/arm/kernel/perf_event_cpu.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/kernel/perf_event_cpu.c 2015-05-06 12:05:43.000000000 -0500
+@@ -19,6 +19,7 @@
+ #define pr_fmt(fmt) "CPU PMU: " fmt
+
+ #include <linux/bitmap.h>
++#include <linux/cpu_pm.h>
+ #include <linux/export.h>
+ #include <linux/kernel.h>
+ #include <linux/of.h>
+@@ -31,33 +32,36 @@
+ #include <asm/pmu.h>
+
+ /* Set at runtime when we know what CPU type we are. */
+-static struct arm_pmu *cpu_pmu;
++static DEFINE_PER_CPU(struct arm_pmu *, cpu_pmu);
+
+ static DEFINE_PER_CPU(struct perf_event * [ARMPMU_MAX_HWEVENTS], hw_events);
+ static DEFINE_PER_CPU(unsigned long [BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)], used_mask);
+ static DEFINE_PER_CPU(struct pmu_hw_events, cpu_hw_events);
+
++static DEFINE_PER_CPU(struct cpupmu_regs, cpu_pmu_regs);
++
+ /*
+ * Despite the names, these two functions are CPU-specific and are used
+ * by the OProfile/perf code.
+ */
+ const char *perf_pmu_name(void)
+ {
+- if (!cpu_pmu)
++ struct arm_pmu *pmu = per_cpu(cpu_pmu, 0);
++ if (!pmu)
+ return NULL;
+
+- return cpu_pmu->name;
++ return pmu->name;
+ }
+ EXPORT_SYMBOL_GPL(perf_pmu_name);
+
+ int perf_num_counters(void)
+ {
+- int max_events = 0;
++ struct arm_pmu *pmu = per_cpu(cpu_pmu, 0);
+
+- if (cpu_pmu != NULL)
+- max_events = cpu_pmu->num_events;
++ if (!pmu)
++ return 0;
+
+- return max_events;
++ return pmu->num_events;
+ }
+ EXPORT_SYMBOL_GPL(perf_num_counters);
+
+@@ -75,11 +79,13 @@
+ {
+ int i, irq, irqs;
+ struct platform_device *pmu_device = cpu_pmu->plat_device;
++ int cpu = -1;
+
+ irqs = min(pmu_device->num_resources, num_possible_cpus());
+
+ for (i = 0; i < irqs; ++i) {
+- if (!cpumask_test_and_clear_cpu(i, &cpu_pmu->active_irqs))
++ cpu = cpumask_next(cpu, &cpu_pmu->valid_cpus);
++ if (!cpumask_test_and_clear_cpu(cpu, &cpu_pmu->active_irqs))
+ continue;
+ irq = platform_get_irq(pmu_device, i);
+ if (irq >= 0)
+@@ -91,6 +97,7 @@
+ {
+ int i, err, irq, irqs;
+ struct platform_device *pmu_device = cpu_pmu->plat_device;
++ int cpu = -1;
+
+ if (!pmu_device)
+ return -ENODEV;
+@@ -103,6 +110,7 @@
+
+ for (i = 0; i < irqs; ++i) {
+ err = 0;
++ cpu = cpumask_next(cpu, &cpu_pmu->valid_cpus);
+ irq = platform_get_irq(pmu_device, i);
+ if (irq < 0)
+ continue;
+@@ -112,7 +120,7 @@
+ * assume that we're running on a uniprocessor machine and
+ * continue. Otherwise, continue without this interrupt.
+ */
+- if (irq_set_affinity(irq, cpumask_of(i)) && irqs > 1) {
++ if (irq_set_affinity(irq, cpumask_of(cpu)) && irqs > 1) {
+ pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n",
+ irq, i);
+ continue;
+@@ -127,7 +135,7 @@
+ return err;
+ }
+
+- cpumask_set_cpu(i, &cpu_pmu->active_irqs);
++ cpumask_set_cpu(cpu, &cpu_pmu->active_irqs);
+ }
+
+ return 0;
+@@ -136,7 +144,7 @@
+ static void cpu_pmu_init(struct arm_pmu *cpu_pmu)
+ {
+ int cpu;
+- for_each_possible_cpu(cpu) {
++ for_each_cpu_mask(cpu, cpu_pmu->valid_cpus) {
+ struct pmu_hw_events *events = &per_cpu(cpu_hw_events, cpu);
+ events->events = per_cpu(hw_events, cpu);
+ events->used_mask = per_cpu(used_mask, cpu);
+@@ -149,7 +157,7 @@
+
+ /* Ensure the PMU has sane values out of reset. */
+ if (cpu_pmu->reset)
+- on_each_cpu(cpu_pmu->reset, cpu_pmu, 1);
++ on_each_cpu_mask(&cpu_pmu->valid_cpus, cpu_pmu->reset, cpu_pmu, 1);
+ }
+
+ /*
+@@ -161,21 +169,46 @@
+ static int cpu_pmu_notify(struct notifier_block *b, unsigned long action,
+ void *hcpu)
+ {
++ struct arm_pmu *pmu = per_cpu(cpu_pmu, (long)hcpu);
++
+ if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING)
+ return NOTIFY_DONE;
+
+- if (cpu_pmu && cpu_pmu->reset)
+- cpu_pmu->reset(cpu_pmu);
++ if (pmu && pmu->reset)
++ pmu->reset(pmu);
+ else
+ return NOTIFY_DONE;
+
+ return NOTIFY_OK;
+ }
+
++static int cpu_pmu_pm_notify(struct notifier_block *b,
++ unsigned long action, void *hcpu)
++{
++ int cpu = smp_processor_id();
++ struct arm_pmu *pmu = per_cpu(cpu_pmu, cpu);
++ struct cpupmu_regs *pmuregs = &per_cpu(cpu_pmu_regs, cpu);
++
++ if (!pmu)
++ return NOTIFY_DONE;
++
++ if (action == CPU_PM_ENTER && pmu->save_regs) {
++ pmu->save_regs(pmu, pmuregs);
++ } else if (action == CPU_PM_EXIT && pmu->restore_regs) {
++ pmu->restore_regs(pmu, pmuregs);
++ }
++
++ return NOTIFY_OK;
++}
++
+ static struct notifier_block cpu_pmu_hotplug_notifier = {
+ .notifier_call = cpu_pmu_notify,
+ };
+
++static struct notifier_block cpu_pmu_pm_notifier = {
++ .notifier_call = cpu_pmu_pm_notify,
++};
++
+ /*
+ * PMU platform driver and devicetree bindings.
+ */
+@@ -247,6 +280,9 @@
+ }
+ }
+
++ /* assume PMU support all the CPUs in this case */
++ cpumask_setall(&pmu->valid_cpus);
++
+ put_cpu();
+ return ret;
+ }
+@@ -254,15 +290,10 @@
+ static int cpu_pmu_device_probe(struct platform_device *pdev)
+ {
+ const struct of_device_id *of_id;
+- const int (*init_fn)(struct arm_pmu *);
+ struct device_node *node = pdev->dev.of_node;
+ struct arm_pmu *pmu;
+- int ret = -ENODEV;
+-
+- if (cpu_pmu) {
+- pr_info("attempt to register multiple PMU devices!");
+- return -ENOSPC;
+- }
++ int ret = 0;
++ int cpu;
+
+ pmu = kzalloc(sizeof(struct arm_pmu), GFP_KERNEL);
+ if (!pmu) {
+@@ -271,8 +302,28 @@
+ }
+
+ if (node && (of_id = of_match_node(cpu_pmu_of_device_ids, pdev->dev.of_node))) {
+- init_fn = of_id->data;
+- ret = init_fn(pmu);
++ smp_call_func_t init_fn = (smp_call_func_t)of_id->data;
++ struct device_node *ncluster;
++ int cluster = -1;
++ cpumask_t sibling_mask;
++
++ ncluster = of_parse_phandle(node, "cluster", 0);
++ if (ncluster) {
++ int len;
++ const u32 *hwid;
++ hwid = of_get_property(ncluster, "reg", &len);
++ if (hwid && len == 4)
++ cluster = be32_to_cpup(hwid);
++ }
++ /* set sibling mask to all cpu mask if socket is not specified */
++ if (cluster == -1 ||
++ cluster_to_logical_mask(cluster, &sibling_mask))
++ cpumask_setall(&sibling_mask);
++
++ smp_call_function_any(&sibling_mask, init_fn, pmu, 1);
++
++ /* now set the valid_cpus after init */
++ cpumask_copy(&pmu->valid_cpus, &sibling_mask);
+ } else {
+ ret = probe_current_pmu(pmu);
+ }
+@@ -282,10 +333,12 @@
+ goto out_free;
+ }
+
+- cpu_pmu = pmu;
+- cpu_pmu->plat_device = pdev;
+- cpu_pmu_init(cpu_pmu);
+- ret = armpmu_register(cpu_pmu, PERF_TYPE_RAW);
++ for_each_cpu_mask(cpu, pmu->valid_cpus)
++ per_cpu(cpu_pmu, cpu) = pmu;
++
++ pmu->plat_device = pdev;
++ cpu_pmu_init(pmu);
++ ret = armpmu_register(pmu, -1);
+
+ if (!ret)
+ return 0;
+@@ -314,9 +367,17 @@
+ if (err)
+ return err;
+
++ err = cpu_pm_register_notifier(&cpu_pmu_pm_notifier);
++ if (err) {
++ unregister_cpu_notifier(&cpu_pmu_hotplug_notifier);
++ return err;
++ }
++
+ err = platform_driver_register(&cpu_pmu_driver);
+- if (err)
++ if (err) {
++ cpu_pm_unregister_notifier(&cpu_pmu_pm_notifier);
+ unregister_cpu_notifier(&cpu_pmu_hotplug_notifier);
++ }
+
+ return err;
+ }
+diff -Nur linux-3.14.36/arch/arm/kernel/perf_event_v7.c linux-openelec/arch/arm/kernel/perf_event_v7.c
+--- linux-3.14.36/arch/arm/kernel/perf_event_v7.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/kernel/perf_event_v7.c 2015-05-06 12:05:43.000000000 -0500
+@@ -950,6 +950,51 @@
+ }
+ #endif
+
++static void armv7pmu_save_regs(struct arm_pmu *cpu_pmu,
++ struct cpupmu_regs *regs)
++{
++ unsigned int cnt;
++ asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (regs->pmc));
++ if (!(regs->pmc & ARMV7_PMNC_E))
++ return;
++
++ asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r" (regs->pmcntenset));
++ asm volatile("mrc p15, 0, %0, c9, c14, 0" : "=r" (regs->pmuseren));
++ asm volatile("mrc p15, 0, %0, c9, c14, 1" : "=r" (regs->pmintenset));
++ asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (regs->pmxevtcnt[0]));
++ for (cnt = ARMV7_IDX_COUNTER0;
++ cnt <= ARMV7_IDX_COUNTER_LAST(cpu_pmu); cnt++) {
++ armv7_pmnc_select_counter(cnt);
++ asm volatile("mrc p15, 0, %0, c9, c13, 1"
++ : "=r"(regs->pmxevttype[cnt]));
++ asm volatile("mrc p15, 0, %0, c9, c13, 2"
++ : "=r"(regs->pmxevtcnt[cnt]));
++ }
++ return;
++}
++
++static void armv7pmu_restore_regs(struct arm_pmu *cpu_pmu,
++ struct cpupmu_regs *regs)
++{
++ unsigned int cnt;
++ if (!(regs->pmc & ARMV7_PMNC_E))
++ return;
++
++ asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (regs->pmcntenset));
++ asm volatile("mcr p15, 0, %0, c9, c14, 0" : : "r" (regs->pmuseren));
++ asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (regs->pmintenset));
++ asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (regs->pmxevtcnt[0]));
++ for (cnt = ARMV7_IDX_COUNTER0;
++ cnt <= ARMV7_IDX_COUNTER_LAST(cpu_pmu); cnt++) {
++ armv7_pmnc_select_counter(cnt);
++ asm volatile("mcr p15, 0, %0, c9, c13, 1"
++ : : "r"(regs->pmxevttype[cnt]));
++ asm volatile("mcr p15, 0, %0, c9, c13, 2"
++ : : "r"(regs->pmxevtcnt[cnt]));
++ }
++ asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r" (regs->pmc));
++}
++
+ static void armv7pmu_enable_event(struct perf_event *event)
+ {
+ unsigned long flags;
+@@ -1223,6 +1268,8 @@
+ cpu_pmu->start = armv7pmu_start;
+ cpu_pmu->stop = armv7pmu_stop;
+ cpu_pmu->reset = armv7pmu_reset;
++ cpu_pmu->save_regs = armv7pmu_save_regs;
++ cpu_pmu->restore_regs = armv7pmu_restore_regs;
+ cpu_pmu->max_period = (1LLU << 32) - 1;
+ };
+
+@@ -1240,7 +1287,7 @@
+ static int armv7_a8_pmu_init(struct arm_pmu *cpu_pmu)
+ {
+ armv7pmu_init(cpu_pmu);
+- cpu_pmu->name = "ARMv7 Cortex-A8";
++ cpu_pmu->name = "ARMv7_Cortex_A8";
+ cpu_pmu->map_event = armv7_a8_map_event;
+ cpu_pmu->num_events = armv7_read_num_pmnc_events();
+ return 0;
+@@ -1249,7 +1296,7 @@
+ static int armv7_a9_pmu_init(struct arm_pmu *cpu_pmu)
+ {
+ armv7pmu_init(cpu_pmu);
+- cpu_pmu->name = "ARMv7 Cortex-A9";
++ cpu_pmu->name = "ARMv7_Cortex_A9";
+ cpu_pmu->map_event = armv7_a9_map_event;
+ cpu_pmu->num_events = armv7_read_num_pmnc_events();
+ return 0;
+@@ -1258,7 +1305,7 @@
+ static int armv7_a5_pmu_init(struct arm_pmu *cpu_pmu)
+ {
+ armv7pmu_init(cpu_pmu);
+- cpu_pmu->name = "ARMv7 Cortex-A5";
++ cpu_pmu->name = "ARMv7_Cortex_A5";
+ cpu_pmu->map_event = armv7_a5_map_event;
+ cpu_pmu->num_events = armv7_read_num_pmnc_events();
+ return 0;
+@@ -1267,7 +1314,7 @@
+ static int armv7_a15_pmu_init(struct arm_pmu *cpu_pmu)
+ {
+ armv7pmu_init(cpu_pmu);
+- cpu_pmu->name = "ARMv7 Cortex-A15";
++ cpu_pmu->name = "ARMv7_Cortex_A15";
+ cpu_pmu->map_event = armv7_a15_map_event;
+ cpu_pmu->num_events = armv7_read_num_pmnc_events();
+ cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
+@@ -1277,7 +1324,7 @@
+ static int armv7_a7_pmu_init(struct arm_pmu *cpu_pmu)
+ {
+ armv7pmu_init(cpu_pmu);
+- cpu_pmu->name = "ARMv7 Cortex-A7";
++ cpu_pmu->name = "ARMv7_Cortex_A7";
+ cpu_pmu->map_event = armv7_a7_map_event;
+ cpu_pmu->num_events = armv7_read_num_pmnc_events();
+ cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
+diff -Nur linux-3.14.36/arch/arm/kernel/process.c linux-openelec/arch/arm/kernel/process.c
+--- linux-3.14.36/arch/arm/kernel/process.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/kernel/process.c 2015-07-24 18:03:28.436842002 -0500
+@@ -172,8 +172,10 @@
+ */
+ void arch_cpu_idle(void)
+ {
++ idle_notifier_call_chain(IDLE_START);
+ if (cpuidle_idle_call())
+ default_idle();
++ idle_notifier_call_chain(IDLE_END);
+ }
+
+ /*
+diff -Nur linux-3.14.36/arch/arm/kernel/process.c.orig linux-openelec/arch/arm/kernel/process.c.orig
+--- linux-3.14.36/arch/arm/kernel/process.c.orig 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/arch/arm/kernel/process.c.orig 2015-05-06 12:05:43.000000000 -0500
+@@ -0,0 +1,514 @@
++/*
++ * linux/arch/arm/kernel/process.c
++ *
++ * Copyright (C) 1996-2000 Russell King - Converted to ARM.
++ * Original Copyright (C) 1995 Linus Torvalds
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++#include <stdarg.h>
++
++#include <linux/export.h>
++#include <linux/sched.h>
++#include <linux/kernel.h>
++#include <linux/mm.h>
++#include <linux/stddef.h>
++#include <linux/unistd.h>
++#include <linux/user.h>
++#include <linux/delay.h>
++#include <linux/reboot.h>
++#include <linux/interrupt.h>
++#include <linux/kallsyms.h>
++#include <linux/init.h>
++#include <linux/cpu.h>
++#include <linux/elfcore.h>
++#include <linux/pm.h>
++#include <linux/tick.h>
++#include <linux/utsname.h>
++#include <linux/uaccess.h>
++#include <linux/random.h>
++#include <linux/hw_breakpoint.h>
++#include <linux/cpuidle.h>
++#include <linux/leds.h>
++#include <linux/reboot.h>
++
++#include <asm/cacheflush.h>
++#include <asm/idmap.h>
++#include <asm/processor.h>
++#include <asm/thread_notify.h>
++#include <asm/stacktrace.h>
++#include <asm/mach/time.h>
++#include <asm/tls.h>
++
++#ifdef CONFIG_CC_STACKPROTECTOR
++#include <linux/stackprotector.h>
++unsigned long __stack_chk_guard __read_mostly;
++EXPORT_SYMBOL(__stack_chk_guard);
++#endif
++
++static const char *processor_modes[] = {
++ "USER_26", "FIQ_26" , "IRQ_26" , "SVC_26" , "UK4_26" , "UK5_26" , "UK6_26" , "UK7_26" ,
++ "UK8_26" , "UK9_26" , "UK10_26", "UK11_26", "UK12_26", "UK13_26", "UK14_26", "UK15_26",
++ "USER_32", "FIQ_32" , "IRQ_32" , "SVC_32" , "UK4_32" , "UK5_32" , "UK6_32" , "ABT_32" ,
++ "UK8_32" , "UK9_32" , "UK10_32", "UND_32" , "UK12_32", "UK13_32", "UK14_32", "SYS_32"
++};
++
++static const char *isa_modes[] = {
++ "ARM" , "Thumb" , "Jazelle", "ThumbEE"
++};
++
++extern void call_with_stack(void (*fn)(void *), void *arg, void *sp);
++typedef void (*phys_reset_t)(unsigned long);
++
++/*
++ * A temporary stack to use for CPU reset. This is static so that we
++ * don't clobber it with the identity mapping. When running with this
++ * stack, any references to the current task *will not work* so you
++ * should really do as little as possible before jumping to your reset
++ * code.
++ */
++static u64 soft_restart_stack[16];
++
++static void __soft_restart(void *addr)
++{
++ phys_reset_t phys_reset;
++
++ /* Take out a flat memory mapping. */
++ setup_mm_for_reboot();
++
++ /* Clean and invalidate caches */
++ flush_cache_all();
++
++ /* Turn off caching */
++ cpu_proc_fin();
++
++ /* Push out any further dirty data, and ensure cache is empty */
++ flush_cache_all();
++
++ /* Switch to the identity mapping. */
++ phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset);
++ phys_reset((unsigned long)addr);
++
++ /* Should never get here. */
++ BUG();
++}
++
++void soft_restart(unsigned long addr)
++{
++ u64 *stack = soft_restart_stack + ARRAY_SIZE(soft_restart_stack);
++
++ /* Disable interrupts first */
++ local_irq_disable();
++ local_fiq_disable();
++
++ /* Disable the L2 if we're the last man standing. */
++ if (num_online_cpus() == 1)
++ outer_disable();
++
++ /* Change to the new stack and continue with the reset. */
++ call_with_stack(__soft_restart, (void *)addr, (void *)stack);
++
++ /* Should never get here. */
++ BUG();
++}
++
++static void null_restart(enum reboot_mode reboot_mode, const char *cmd)
++{
++}
++
++/*
++ * Function pointers to optional machine specific functions
++ */
++void (*pm_power_off)(void);
++EXPORT_SYMBOL(pm_power_off);
++
++void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd) = null_restart;
++EXPORT_SYMBOL_GPL(arm_pm_restart);
++
++/*
++ * This is our default idle handler.
++ */
++
++void (*arm_pm_idle)(void);
++
++static void default_idle(void)
++{
++ if (arm_pm_idle)
++ arm_pm_idle();
++ else
++ cpu_do_idle();
++ local_irq_enable();
++}
++
++void arch_cpu_idle_prepare(void)
++{
++ local_fiq_enable();
++}
++
++void arch_cpu_idle_enter(void)
++{
++ ledtrig_cpu(CPU_LED_IDLE_START);
++#ifdef CONFIG_PL310_ERRATA_769419
++ wmb();
++#endif
++}
++
++void arch_cpu_idle_exit(void)
++{
++ ledtrig_cpu(CPU_LED_IDLE_END);
++}
++
++#ifdef CONFIG_HOTPLUG_CPU
++void arch_cpu_idle_dead(void)
++{
++ cpu_die();
++}
++#endif
++
++/*
++ * Called from the core idle loop.
++ */
++void arch_cpu_idle(void)
++{
++ idle_notifier_call_chain(IDLE_START);
++ if (cpuidle_idle_call())
++ default_idle();
++ idle_notifier_call_chain(IDLE_END);
++}
++
++/*
++ * Called by kexec, immediately prior to machine_kexec().
++ *
++ * This must completely disable all secondary CPUs; simply causing those CPUs
++ * to execute e.g. a RAM-based pin loop is not sufficient. This allows the
++ * kexec'd kernel to use any and all RAM as it sees fit, without having to
++ * avoid any code or data used by any SW CPU pin loop. The CPU hotplug
++ * functionality embodied in disable_nonboot_cpus() to achieve this.
++ */
++void machine_shutdown(void)
++{
++ disable_nonboot_cpus();
++}
++
++/*
++ * Halting simply requires that the secondary CPUs stop performing any
++ * activity (executing tasks, handling interrupts). smp_send_stop()
++ * achieves this.
++ */
++void machine_halt(void)
++{
++ local_irq_disable();
++ smp_send_stop();
++
++ local_irq_disable();
++ while (1);
++}
++
++/*
++ * Power-off simply requires that the secondary CPUs stop performing any
++ * activity (executing tasks, handling interrupts). smp_send_stop()
++ * achieves this. When the system power is turned off, it will take all CPUs
++ * with it.
++ */
++void machine_power_off(void)
++{
++ local_irq_disable();
++ smp_send_stop();
++
++ if (pm_power_off)
++ pm_power_off();
++}
++
++/*
++ * Restart requires that the secondary CPUs stop performing any activity
++ * while the primary CPU resets the system. Systems with a single CPU can
++ * use soft_restart() as their machine descriptor's .restart hook, since that
++ * will cause the only available CPU to reset. Systems with multiple CPUs must
++ * provide a HW restart implementation, to ensure that all CPUs reset at once.
++ * This is required so that any code running after reset on the primary CPU
++ * doesn't have to co-ordinate with other CPUs to ensure they aren't still
++ * executing pre-reset code, and using RAM that the primary CPU's code wishes
++ * to use. Implementing such co-ordination would be essentially impossible.
++ */
++void machine_restart(char *cmd)
++{
++ local_irq_disable();
++ smp_send_stop();
++
++ arm_pm_restart(reboot_mode, cmd);
++
++ /* Give a grace period for failure to restart of 1s */
++ mdelay(1000);
++
++ /* Whoops - the platform was unable to reboot. Tell the user! */
++ printk("Reboot failed -- System halted\n");
++ local_irq_disable();
++ while (1);
++}
++
++void __show_regs(struct pt_regs *regs)
++{
++ unsigned long flags;
++ char buf[64];
++
++ show_regs_print_info(KERN_DEFAULT);
++
++ print_symbol("PC is at %s\n", instruction_pointer(regs));
++ print_symbol("LR is at %s\n", regs->ARM_lr);
++ printk("pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n"
++ "sp : %08lx ip : %08lx fp : %08lx\n",
++ regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr,
++ regs->ARM_sp, regs->ARM_ip, regs->ARM_fp);
++ printk("r10: %08lx r9 : %08lx r8 : %08lx\n",
++ regs->ARM_r10, regs->ARM_r9,
++ regs->ARM_r8);
++ printk("r7 : %08lx r6 : %08lx r5 : %08lx r4 : %08lx\n",
++ regs->ARM_r7, regs->ARM_r6,
++ regs->ARM_r5, regs->ARM_r4);
++ printk("r3 : %08lx r2 : %08lx r1 : %08lx r0 : %08lx\n",
++ regs->ARM_r3, regs->ARM_r2,
++ regs->ARM_r1, regs->ARM_r0);
++
++ flags = regs->ARM_cpsr;
++ buf[0] = flags & PSR_N_BIT ? 'N' : 'n';
++ buf[1] = flags & PSR_Z_BIT ? 'Z' : 'z';
++ buf[2] = flags & PSR_C_BIT ? 'C' : 'c';
++ buf[3] = flags & PSR_V_BIT ? 'V' : 'v';
++ buf[4] = '\0';
++
++ printk("Flags: %s IRQs o%s FIQs o%s Mode %s ISA %s Segment %s\n",
++ buf, interrupts_enabled(regs) ? "n" : "ff",
++ fast_interrupts_enabled(regs) ? "n" : "ff",
++ processor_modes[processor_mode(regs)],
++ isa_modes[isa_mode(regs)],
++ get_fs() == get_ds() ? "kernel" : "user");
++#ifdef CONFIG_CPU_CP15
++ {
++ unsigned int ctrl;
++
++ buf[0] = '\0';
++#ifdef CONFIG_CPU_CP15_MMU
++ {
++ unsigned int transbase, dac;
++ asm("mrc p15, 0, %0, c2, c0\n\t"
++ "mrc p15, 0, %1, c3, c0\n"
++ : "=r" (transbase), "=r" (dac));
++ snprintf(buf, sizeof(buf), " Table: %08x DAC: %08x",
++ transbase, dac);
++ }
++#endif
++ asm("mrc p15, 0, %0, c1, c0\n" : "=r" (ctrl));
++
++ printk("Control: %08x%s\n", ctrl, buf);
++ }
++#endif
++}
++
++void show_regs(struct pt_regs * regs)
++{
++ printk("\n");
++ __show_regs(regs);
++ dump_stack();
++}
++
++ATOMIC_NOTIFIER_HEAD(thread_notify_head);
++
++EXPORT_SYMBOL_GPL(thread_notify_head);
++
++/*
++ * Free current thread data structures etc..
++ */
++void exit_thread(void)
++{
++ thread_notify(THREAD_NOTIFY_EXIT, current_thread_info());
++}
++
++void flush_thread(void)
++{
++ struct thread_info *thread = current_thread_info();
++ struct task_struct *tsk = current;
++
++ flush_ptrace_hw_breakpoint(tsk);
++
++ memset(thread->used_cp, 0, sizeof(thread->used_cp));
++ memset(&tsk->thread.debug, 0, sizeof(struct debug_info));
++ memset(&thread->fpstate, 0, sizeof(union fp_state));
++
++ thread_notify(THREAD_NOTIFY_FLUSH, thread);
++}
++
++void release_thread(struct task_struct *dead_task)
++{
++}
++
++asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
++
++int
++copy_thread(unsigned long clone_flags, unsigned long stack_start,
++ unsigned long stk_sz, struct task_struct *p)
++{
++ struct thread_info *thread = task_thread_info(p);
++ struct pt_regs *childregs = task_pt_regs(p);
++
++ memset(&thread->cpu_context, 0, sizeof(struct cpu_context_save));
++
++ if (likely(!(p->flags & PF_KTHREAD))) {
++ *childregs = *current_pt_regs();
++ childregs->ARM_r0 = 0;
++ if (stack_start)
++ childregs->ARM_sp = stack_start;
++ } else {
++ memset(childregs, 0, sizeof(struct pt_regs));
++ thread->cpu_context.r4 = stk_sz;
++ thread->cpu_context.r5 = stack_start;
++ childregs->ARM_cpsr = SVC_MODE;
++ }
++ thread->cpu_context.pc = (unsigned long)ret_from_fork;
++ thread->cpu_context.sp = (unsigned long)childregs;
++
++ clear_ptrace_hw_breakpoint(p);
++
++ if (clone_flags & CLONE_SETTLS)
++ thread->tp_value[0] = childregs->ARM_r3;
++ thread->tp_value[1] = get_tpuser();
++
++ thread_notify(THREAD_NOTIFY_COPY, thread);
++
++ return 0;
++}
++
++/*
++ * Fill in the task's elfregs structure for a core dump.
++ */
++int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs)
++{
++ elf_core_copy_regs(elfregs, task_pt_regs(t));
++ return 1;
++}
++
++/*
++ * fill in the fpe structure for a core dump...
++ */
++int dump_fpu (struct pt_regs *regs, struct user_fp *fp)
++{
++ struct thread_info *thread = current_thread_info();
++ int used_math = thread->used_cp[1] | thread->used_cp[2];
++
++ if (used_math)
++ memcpy(fp, &thread->fpstate.soft, sizeof (*fp));
++
++ return used_math != 0;
++}
++EXPORT_SYMBOL(dump_fpu);
++
++unsigned long get_wchan(struct task_struct *p)
++{
++ struct stackframe frame;
++ unsigned long stack_page;
++ int count = 0;
++ if (!p || p == current || p->state == TASK_RUNNING)
++ return 0;
++
++ frame.fp = thread_saved_fp(p);
++ frame.sp = thread_saved_sp(p);
++ frame.lr = 0; /* recovered from the stack */
++ frame.pc = thread_saved_pc(p);
++ stack_page = (unsigned long)task_stack_page(p);
++ do {
++ if (frame.sp < stack_page ||
++ frame.sp >= stack_page + THREAD_SIZE ||
++ unwind_frame(&frame) < 0)
++ return 0;
++ if (!in_sched_functions(frame.pc))
++ return frame.pc;
++ } while (count ++ < 16);
++ return 0;
++}
++
++unsigned long arch_randomize_brk(struct mm_struct *mm)
++{
++ unsigned long range_end = mm->brk + 0x02000000;
++ return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
++}
++
++#ifdef CONFIG_MMU
++#ifdef CONFIG_KUSER_HELPERS
++/*
++ * The vectors page is always readable from user space for the
++ * atomic helpers. Insert it into the gate_vma so that it is visible
++ * through ptrace and /proc/<pid>/mem.
++ */
++static struct vm_area_struct gate_vma = {
++ .vm_start = 0xffff0000,
++ .vm_end = 0xffff0000 + PAGE_SIZE,
++ .vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC,
++};
++
++static int __init gate_vma_init(void)
++{
++ gate_vma.vm_page_prot = PAGE_READONLY_EXEC;
++ return 0;
++}
++arch_initcall(gate_vma_init);
++
++struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
++{
++ return &gate_vma;
++}
++
++int in_gate_area(struct mm_struct *mm, unsigned long addr)
++{
++ return (addr >= gate_vma.vm_start) && (addr < gate_vma.vm_end);
++}
++
++int in_gate_area_no_mm(unsigned long addr)
++{
++ return in_gate_area(NULL, addr);
++}
++#define is_gate_vma(vma) ((vma) == &gate_vma)
++#else
++#define is_gate_vma(vma) 0
++#endif
++
++const char *arch_vma_name(struct vm_area_struct *vma)
++{
++ return is_gate_vma(vma) ? "[vectors]" :
++ (vma->vm_mm && vma->vm_start == vma->vm_mm->context.sigpage) ?
++ "[sigpage]" : NULL;
++}
++
++static struct page *signal_page;
++extern struct page *get_signal_page(void);
++
++int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
++{
++ struct mm_struct *mm = current->mm;
++ unsigned long addr;
++ int ret;
++
++ if (!signal_page)
++ signal_page = get_signal_page();
++ if (!signal_page)
++ return -ENOMEM;
++
++ down_write(&mm->mmap_sem);
++ addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
++ if (IS_ERR_VALUE(addr)) {
++ ret = addr;
++ goto up_fail;
++ }
++
++ ret = install_special_mapping(mm, addr, PAGE_SIZE,
++ VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
++ &signal_page);
++
++ if (ret == 0)
++ mm->context.sigpage = addr;
++
++ up_fail:
++ up_write(&mm->mmap_sem);
++ return ret;
++}
++#endif
+diff -Nur linux-3.14.36/arch/arm/kernel/psci.c linux-openelec/arch/arm/kernel/psci.c
+--- linux-3.14.36/arch/arm/kernel/psci.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/kernel/psci.c 2015-05-06 12:05:43.000000000 -0500
+@@ -42,6 +42,7 @@
+ #define PSCI_RET_EOPNOTSUPP -1
+ #define PSCI_RET_EINVAL -2
+ #define PSCI_RET_EPERM -3
++#define PSCI_RET_EALREADYON -4
+
+ static int psci_to_linux_errno(int errno)
+ {
+@@ -54,6 +55,8 @@
+ return -EINVAL;
+ case PSCI_RET_EPERM:
+ return -EPERM;
++ case PSCI_RET_EALREADYON:
++ return -EAGAIN;
+ };
+
+ return -EINVAL;
+@@ -153,7 +156,7 @@
+ return psci_to_linux_errno(err);
+ }
+
+-static const struct of_device_id psci_of_match[] __initconst = {
++static const struct of_device_id psci_of_match[] = {
+ { .compatible = "arm,psci", },
+ {},
+ };
+@@ -208,3 +211,16 @@
+ of_node_put(np);
+ return;
+ }
++
++int psci_probe(void)
++{
++ struct device_node *np;
++ int ret = -ENODEV;
++
++ np = of_find_matching_node(NULL, psci_of_match);
++ if (np)
++ ret = 0;
++
++ of_node_put(np);
++ return ret;
++}
+diff -Nur linux-3.14.36/arch/arm/kernel/setup.c linux-openelec/arch/arm/kernel/setup.c
+--- linux-3.14.36/arch/arm/kernel/setup.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/kernel/setup.c 2015-07-24 18:03:29.292842002 -0500
+@@ -273,6 +273,19 @@
+ int aliasing_icache;
+ unsigned int id_reg, num_sets, line_size;
+
++#ifdef CONFIG_BIG_LITTLE
++ /*
++ * We expect a combination of Cortex-A15 and Cortex-A7 cores.
++ * A7 = VIPT aliasing I-cache
++ * A15 = PIPT (non-aliasing) I-cache
++ * To cater for this discrepancy, let's assume aliasing I-cache
++ * all the time. This means unneeded extra work on the A15 but
++ * only ptrace is affected which is not performance critical.
++ */
++ if ((read_cpuid_id() & 0xff0ffff0) == 0x410fc0f0)
++ return 1;
++#endif
++
+ /* PIPT caches never alias. */
+ if (icache_is_pipt())
+ return 0;
+diff -Nur linux-3.14.36/arch/arm/kernel/setup.c.orig linux-openelec/arch/arm/kernel/setup.c.orig
+--- linux-3.14.36/arch/arm/kernel/setup.c.orig 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/arch/arm/kernel/setup.c.orig 2015-05-06 12:05:43.000000000 -0500
+@@ -0,0 +1,1095 @@
++/*
++ * linux/arch/arm/kernel/setup.c
++ *
++ * Copyright (C) 1995-2001 Russell King
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++#include <linux/export.h>
++#include <linux/kernel.h>
++#include <linux/stddef.h>
++#include <linux/ioport.h>
++#include <linux/delay.h>
++#include <linux/utsname.h>
++#include <linux/initrd.h>
++#include <linux/console.h>
++#include <linux/bootmem.h>
++#include <linux/seq_file.h>
++#include <linux/screen_info.h>
++#include <linux/of_platform.h>
++#include <linux/init.h>
++#include <linux/kexec.h>
++#include <linux/of_fdt.h>
++#include <linux/cpu.h>
++#include <linux/interrupt.h>
++#include <linux/smp.h>
++#include <linux/proc_fs.h>
++#include <linux/memblock.h>
++#include <linux/bug.h>
++#include <linux/compiler.h>
++#include <linux/sort.h>
++
++#include <asm/unified.h>
++#include <asm/cp15.h>
++#include <asm/cpu.h>
++#include <asm/cputype.h>
++#include <asm/elf.h>
++#include <asm/procinfo.h>
++#include <asm/psci.h>
++#include <asm/sections.h>
++#include <asm/setup.h>
++#include <asm/smp_plat.h>
++#include <asm/mach-types.h>
++#include <asm/cacheflush.h>
++#include <asm/cachetype.h>
++#include <asm/tlbflush.h>
++
++#include <asm/prom.h>
++#include <asm/mach/arch.h>
++#include <asm/mach/irq.h>
++#include <asm/mach/time.h>
++#include <asm/system_info.h>
++#include <asm/system_misc.h>
++#include <asm/traps.h>
++#include <asm/unwind.h>
++#include <asm/memblock.h>
++#include <asm/virt.h>
++
++#include "atags.h"
++
++
++#if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
++char fpe_type[8];
++
++static int __init fpe_setup(char *line)
++{
++ memcpy(fpe_type, line, 8);
++ return 1;
++}
++
++__setup("fpe=", fpe_setup);
++#endif
++
++extern void paging_init(const struct machine_desc *desc);
++extern void early_paging_init(const struct machine_desc *,
++ struct proc_info_list *);
++extern void sanity_check_meminfo(void);
++extern enum reboot_mode reboot_mode;
++extern void setup_dma_zone(const struct machine_desc *desc);
++
++unsigned int processor_id;
++EXPORT_SYMBOL(processor_id);
++unsigned int __machine_arch_type __read_mostly;
++EXPORT_SYMBOL(__machine_arch_type);
++unsigned int cacheid __read_mostly;
++EXPORT_SYMBOL(cacheid);
++
++unsigned int __atags_pointer __initdata;
++
++unsigned int system_rev;
++EXPORT_SYMBOL(system_rev);
++
++unsigned int system_serial_low;
++EXPORT_SYMBOL(system_serial_low);
++
++unsigned int system_serial_high;
++EXPORT_SYMBOL(system_serial_high);
++
++unsigned int elf_hwcap __read_mostly;
++EXPORT_SYMBOL(elf_hwcap);
++
++
++#ifdef MULTI_CPU
++struct processor processor __read_mostly;
++#endif
++#ifdef MULTI_TLB
++struct cpu_tlb_fns cpu_tlb __read_mostly;
++#endif
++#ifdef MULTI_USER
++struct cpu_user_fns cpu_user __read_mostly;
++#endif
++#ifdef MULTI_CACHE
++struct cpu_cache_fns cpu_cache __read_mostly;
++#endif
++#ifdef CONFIG_OUTER_CACHE
++struct outer_cache_fns outer_cache __read_mostly;
++EXPORT_SYMBOL(outer_cache);
++#endif
++
++/*
++ * Cached cpu_architecture() result for use by assembler code.
++ * C code should use the cpu_architecture() function instead of accessing this
++ * variable directly.
++ */
++int __cpu_architecture __read_mostly = CPU_ARCH_UNKNOWN;
++
++struct stack {
++ u32 irq[3];
++ u32 abt[3];
++ u32 und[3];
++} ____cacheline_aligned;
++
++#ifndef CONFIG_CPU_V7M
++static struct stack stacks[NR_CPUS];
++#endif
++
++char elf_platform[ELF_PLATFORM_SIZE];
++EXPORT_SYMBOL(elf_platform);
++
++static const char *cpu_name;
++static const char *machine_name;
++static char __initdata cmd_line[COMMAND_LINE_SIZE];
++const struct machine_desc *machine_desc __initdata;
++
++static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
++#define ENDIANNESS ((char)endian_test.l)
++
++DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
++
++/*
++ * Standard memory resources
++ */
++static struct resource mem_res[] = {
++ {
++ .name = "Video RAM",
++ .start = 0,
++ .end = 0,
++ .flags = IORESOURCE_MEM
++ },
++ {
++ .name = "Kernel code",
++ .start = 0,
++ .end = 0,
++ .flags = IORESOURCE_MEM
++ },
++ {
++ .name = "Kernel data",
++ .start = 0,
++ .end = 0,
++ .flags = IORESOURCE_MEM
++ }
++};
++
++#define video_ram mem_res[0]
++#define kernel_code mem_res[1]
++#define kernel_data mem_res[2]
++
++static struct resource io_res[] = {
++ {
++ .name = "reserved",
++ .start = 0x3bc,
++ .end = 0x3be,
++ .flags = IORESOURCE_IO | IORESOURCE_BUSY
++ },
++ {
++ .name = "reserved",
++ .start = 0x378,
++ .end = 0x37f,
++ .flags = IORESOURCE_IO | IORESOURCE_BUSY
++ },
++ {
++ .name = "reserved",
++ .start = 0x278,
++ .end = 0x27f,
++ .flags = IORESOURCE_IO | IORESOURCE_BUSY
++ }
++};
++
++#define lp0 io_res[0]
++#define lp1 io_res[1]
++#define lp2 io_res[2]
++
++static const char *proc_arch[] = {
++ "undefined/unknown",
++ "3",
++ "4",
++ "4T",
++ "5",
++ "5T",
++ "5TE",
++ "5TEJ",
++ "6TEJ",
++ "7",
++ "7M",
++ "?(12)",
++ "?(13)",
++ "?(14)",
++ "?(15)",
++ "?(16)",
++ "?(17)",
++};
++
++#ifdef CONFIG_CPU_V7M
++static int __get_cpu_architecture(void)
++{
++ return CPU_ARCH_ARMv7M;
++}
++#else
++static int __get_cpu_architecture(void)
++{
++ int cpu_arch;
++
++ if ((read_cpuid_id() & 0x0008f000) == 0) {
++ cpu_arch = CPU_ARCH_UNKNOWN;
++ } else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
++ cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
++ } else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
++ cpu_arch = (read_cpuid_id() >> 16) & 7;
++ if (cpu_arch)
++ cpu_arch += CPU_ARCH_ARMv3;
++ } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
++ unsigned int mmfr0;
++
++ /* Revised CPUID format. Read the Memory Model Feature
++ * Register 0 and check for VMSAv7 or PMSAv7 */
++ asm("mrc p15, 0, %0, c0, c1, 4"
++ : "=r" (mmfr0));
++ if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
++ (mmfr0 & 0x000000f0) >= 0x00000030)
++ cpu_arch = CPU_ARCH_ARMv7;
++ else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
++ (mmfr0 & 0x000000f0) == 0x00000020)
++ cpu_arch = CPU_ARCH_ARMv6;
++ else
++ cpu_arch = CPU_ARCH_UNKNOWN;
++ } else
++ cpu_arch = CPU_ARCH_UNKNOWN;
++
++ return cpu_arch;
++}
++#endif
++
++int __pure cpu_architecture(void)
++{
++ BUG_ON(__cpu_architecture == CPU_ARCH_UNKNOWN);
++
++ return __cpu_architecture;
++}
++
++static int cpu_has_aliasing_icache(unsigned int arch)
++{
++ int aliasing_icache;
++ unsigned int id_reg, num_sets, line_size;
++
++#ifdef CONFIG_BIG_LITTLE
++ /*
++ * We expect a combination of Cortex-A15 and Cortex-A7 cores.
++ * A7 = VIPT aliasing I-cache
++ * A15 = PIPT (non-aliasing) I-cache
++ * To cater for this discrepancy, let's assume aliasing I-cache
++ * all the time. This means unneeded extra work on the A15 but
++ * only ptrace is affected which is not performance critical.
++ */
++ if ((read_cpuid_id() & 0xff0ffff0) == 0x410fc0f0)
++ return 1;
++#endif
++
++ /* PIPT caches never alias. */
++ if (icache_is_pipt())
++ return 0;
++
++ /* arch specifies the register format */
++ switch (arch) {
++ case CPU_ARCH_ARMv7:
++ asm("mcr p15, 2, %0, c0, c0, 0 @ set CSSELR"
++ : /* No output operands */
++ : "r" (1));
++ isb();
++ asm("mrc p15, 1, %0, c0, c0, 0 @ read CCSIDR"
++ : "=r" (id_reg));
++ line_size = 4 << ((id_reg & 0x7) + 2);
++ num_sets = ((id_reg >> 13) & 0x7fff) + 1;
++ aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
++ break;
++ case CPU_ARCH_ARMv6:
++ aliasing_icache = read_cpuid_cachetype() & (1 << 11);
++ break;
++ default:
++ /* I-cache aliases will be handled by D-cache aliasing code */
++ aliasing_icache = 0;
++ }
++
++ return aliasing_icache;
++}
++
++static void __init cacheid_init(void)
++{
++ unsigned int arch = cpu_architecture();
++
++ if (arch == CPU_ARCH_ARMv7M) {
++ cacheid = 0;
++ } else if (arch >= CPU_ARCH_ARMv6) {
++ unsigned int cachetype = read_cpuid_cachetype();
++ if ((cachetype & (7 << 29)) == 4 << 29) {
++ /* ARMv7 register format */
++ arch = CPU_ARCH_ARMv7;
++ cacheid = CACHEID_VIPT_NONALIASING;
++ switch (cachetype & (3 << 14)) {
++ case (1 << 14):
++ cacheid |= CACHEID_ASID_TAGGED;
++ break;
++ case (3 << 14):
++ cacheid |= CACHEID_PIPT;
++ break;
++ }
++ } else {
++ arch = CPU_ARCH_ARMv6;
++ if (cachetype & (1 << 23))
++ cacheid = CACHEID_VIPT_ALIASING;
++ else
++ cacheid = CACHEID_VIPT_NONALIASING;
++ }
++ if (cpu_has_aliasing_icache(arch))
++ cacheid |= CACHEID_VIPT_I_ALIASING;
++ } else {
++ cacheid = CACHEID_VIVT;
++ }
++
++ pr_info("CPU: %s data cache, %s instruction cache\n",
++ cache_is_vivt() ? "VIVT" :
++ cache_is_vipt_aliasing() ? "VIPT aliasing" :
++ cache_is_vipt_nonaliasing() ? "PIPT / VIPT nonaliasing" : "unknown",
++ cache_is_vivt() ? "VIVT" :
++ icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
++ icache_is_vipt_aliasing() ? "VIPT aliasing" :
++ icache_is_pipt() ? "PIPT" :
++ cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
++}
++
++/*
++ * These functions re-use the assembly code in head.S, which
++ * already provide the required functionality.
++ */
++extern struct proc_info_list *lookup_processor_type(unsigned int);
++
++void __init early_print(const char *str, ...)
++{
++ extern void printascii(const char *);
++ char buf[256];
++ va_list ap;
++
++ va_start(ap, str);
++ vsnprintf(buf, sizeof(buf), str, ap);
++ va_end(ap);
++
++#ifdef CONFIG_DEBUG_LL
++ printascii(buf);
++#endif
++ printk("%s", buf);
++}
++
++static void __init cpuid_init_hwcaps(void)
++{
++ unsigned int divide_instrs, vmsa;
++
++ if (cpu_architecture() < CPU_ARCH_ARMv7)
++ return;
++
++ divide_instrs = (read_cpuid_ext(CPUID_EXT_ISAR0) & 0x0f000000) >> 24;
++
++ switch (divide_instrs) {
++ case 2:
++ elf_hwcap |= HWCAP_IDIVA;
++ case 1:
++ elf_hwcap |= HWCAP_IDIVT;
++ }
++
++ /* LPAE implies atomic ldrd/strd instructions */
++ vmsa = (read_cpuid_ext(CPUID_EXT_MMFR0) & 0xf) >> 0;
++ if (vmsa >= 5)
++ elf_hwcap |= HWCAP_LPAE;
++}
++
++static void __init feat_v6_fixup(void)
++{
++ int id = read_cpuid_id();
++
++ if ((id & 0xff0f0000) != 0x41070000)
++ return;
++
++ /*
++ * HWCAP_TLS is available only on 1136 r1p0 and later,
++ * see also kuser_get_tls_init.
++ */
++ if ((((id >> 4) & 0xfff) == 0xb36) && (((id >> 20) & 3) == 0))
++ elf_hwcap &= ~HWCAP_TLS;
++}
++
++/*
++ * cpu_init - initialise one CPU.
++ *
++ * cpu_init sets up the per-CPU stacks.
++ */
++void notrace cpu_init(void)
++{
++#ifndef CONFIG_CPU_V7M
++ unsigned int cpu = smp_processor_id();
++ struct stack *stk = &stacks[cpu];
++
++ if (cpu >= NR_CPUS) {
++ pr_crit("CPU%u: bad primary CPU number\n", cpu);
++ BUG();
++ }
++
++ /*
++ * This only works on resume and secondary cores. For booting on the
++ * boot cpu, smp_prepare_boot_cpu is called after percpu area setup.
++ */
++ set_my_cpu_offset(per_cpu_offset(cpu));
++
++ cpu_proc_init();
++
++ /*
++ * Define the placement constraint for the inline asm directive below.
++ * In Thumb-2, msr with an immediate value is not allowed.
++ */
++#ifdef CONFIG_THUMB2_KERNEL
++#define PLC "r"
++#else
++#define PLC "I"
++#endif
++
++ /*
++ * setup stacks for re-entrant exception handlers
++ */
++ __asm__ (
++ "msr cpsr_c, %1\n\t"
++ "add r14, %0, %2\n\t"
++ "mov sp, r14\n\t"
++ "msr cpsr_c, %3\n\t"
++ "add r14, %0, %4\n\t"
++ "mov sp, r14\n\t"
++ "msr cpsr_c, %5\n\t"
++ "add r14, %0, %6\n\t"
++ "mov sp, r14\n\t"
++ "msr cpsr_c, %7"
++ :
++ : "r" (stk),
++ PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
++ "I" (offsetof(struct stack, irq[0])),
++ PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
++ "I" (offsetof(struct stack, abt[0])),
++ PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
++ "I" (offsetof(struct stack, und[0])),
++ PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
++ : "r14");
++#endif
++}
++
++u32 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID };
++
++void __init smp_setup_processor_id(void)
++{
++ int i;
++ u32 mpidr = is_smp() ? read_cpuid_mpidr() & MPIDR_HWID_BITMASK : 0;
++ u32 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
++
++ cpu_logical_map(0) = cpu;
++ for (i = 1; i < nr_cpu_ids; ++i)
++ cpu_logical_map(i) = i == cpu ? 0 : i;
++
++ /*
++ * clear __my_cpu_offset on boot CPU to avoid hang caused by
++ * using percpu variable early, for example, lockdep will
++ * access percpu variable inside lock_release
++ */
++ set_my_cpu_offset(0);
++
++ pr_info("Booting Linux on physical CPU 0x%x\n", mpidr);
++}
++
++struct mpidr_hash mpidr_hash;
++#ifdef CONFIG_SMP
++/**
++ * smp_build_mpidr_hash - Pre-compute shifts required at each affinity
++ * level in order to build a linear index from an
++ * MPIDR value. Resulting algorithm is a collision
++ * free hash carried out through shifting and ORing
++ */
++static void __init smp_build_mpidr_hash(void)
++{
++ u32 i, affinity;
++ u32 fs[3], bits[3], ls, mask = 0;
++ /*
++ * Pre-scan the list of MPIDRS and filter out bits that do
++ * not contribute to affinity levels, ie they never toggle.
++ */
++ for_each_possible_cpu(i)
++ mask |= (cpu_logical_map(i) ^ cpu_logical_map(0));
++ pr_debug("mask of set bits 0x%x\n", mask);
++ /*
++ * Find and stash the last and first bit set at all affinity levels to
++ * check how many bits are required to represent them.
++ */
++ for (i = 0; i < 3; i++) {
++ affinity = MPIDR_AFFINITY_LEVEL(mask, i);
++ /*
++ * Find the MSB bit and LSB bits position
++ * to determine how many bits are required
++ * to express the affinity level.
++ */
++ ls = fls(affinity);
++ fs[i] = affinity ? ffs(affinity) - 1 : 0;
++ bits[i] = ls - fs[i];
++ }
++ /*
++ * An index can be created from the MPIDR by isolating the
++ * significant bits at each affinity level and by shifting
++ * them in order to compress the 24 bits values space to a
++ * compressed set of values. This is equivalent to hashing
++ * the MPIDR through shifting and ORing. It is a collision free
++ * hash though not minimal since some levels might contain a number
++ * of CPUs that is not an exact power of 2 and their bit
++ * representation might contain holes, eg MPIDR[7:0] = {0x2, 0x80}.
++ */
++ mpidr_hash.shift_aff[0] = fs[0];
++ mpidr_hash.shift_aff[1] = MPIDR_LEVEL_BITS + fs[1] - bits[0];
++ mpidr_hash.shift_aff[2] = 2*MPIDR_LEVEL_BITS + fs[2] -
++ (bits[1] + bits[0]);
++ mpidr_hash.mask = mask;
++ mpidr_hash.bits = bits[2] + bits[1] + bits[0];
++ pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] mask[0x%x] bits[%u]\n",
++ mpidr_hash.shift_aff[0],
++ mpidr_hash.shift_aff[1],
++ mpidr_hash.shift_aff[2],
++ mpidr_hash.mask,
++ mpidr_hash.bits);
++ /*
++ * 4x is an arbitrary value used to warn on a hash table much bigger
++ * than expected on most systems.
++ */
++ if (mpidr_hash_size() > 4 * num_possible_cpus())
++ pr_warn("Large number of MPIDR hash buckets detected\n");
++ sync_cache_w(&mpidr_hash);
++}
++#endif
++
++static void __init setup_processor(void)
++{
++ struct proc_info_list *list;
++
++ /*
++ * locate processor in the list of supported processor
++ * types. The linker builds this table for us from the
++ * entries in arch/arm/mm/proc-*.S
++ */
++ list = lookup_processor_type(read_cpuid_id());
++ if (!list) {
++ pr_err("CPU configuration botched (ID %08x), unable to continue.\n",
++ read_cpuid_id());
++ while (1);
++ }
++
++ cpu_name = list->cpu_name;
++ __cpu_architecture = __get_cpu_architecture();
++
++#ifdef MULTI_CPU
++ processor = *list->proc;
++#endif
++#ifdef MULTI_TLB
++ cpu_tlb = *list->tlb;
++#endif
++#ifdef MULTI_USER
++ cpu_user = *list->user;
++#endif
++#ifdef MULTI_CACHE
++ cpu_cache = *list->cache;
++#endif
++
++ pr_info("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
++ cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
++ proc_arch[cpu_architecture()], cr_alignment);
++
++ snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
++ list->arch_name, ENDIANNESS);
++ snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c",
++ list->elf_name, ENDIANNESS);
++ elf_hwcap = list->elf_hwcap;
++
++ cpuid_init_hwcaps();
++
++#ifndef CONFIG_ARM_THUMB
++ elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT);
++#endif
++
++ erratum_a15_798181_init();
++
++ feat_v6_fixup();
++
++ cacheid_init();
++ cpu_init();
++}
++
++void __init dump_machine_table(void)
++{
++ const struct machine_desc *p;
++
++ early_print("Available machine support:\n\nID (hex)\tNAME\n");
++ for_each_machine_desc(p)
++ early_print("%08x\t%s\n", p->nr, p->name);
++
++ early_print("\nPlease check your kernel config and/or bootloader.\n");
++
++ while (true)
++ /* can't use cpu_relax() here as it may require MMU setup */;
++}
++
++int __init arm_add_memory(u64 start, u64 size)
++{
++ struct membank *bank = &meminfo.bank[meminfo.nr_banks];
++ u64 aligned_start;
++
++ if (meminfo.nr_banks >= NR_BANKS) {
++ pr_crit("NR_BANKS too low, ignoring memory at 0x%08llx\n",
++ (long long)start);
++ return -EINVAL;
++ }
++
++ /*
++ * Ensure that start/size are aligned to a page boundary.
++ * Size is appropriately rounded down, start is rounded up.
++ */
++ size -= start & ~PAGE_MASK;
++ aligned_start = PAGE_ALIGN(start);
++
++#ifndef CONFIG_ARCH_PHYS_ADDR_T_64BIT
++ if (aligned_start > ULONG_MAX) {
++ pr_crit("Ignoring memory at 0x%08llx outside 32-bit physical address space\n",
++ (long long)start);
++ return -EINVAL;
++ }
++
++ if (aligned_start + size > ULONG_MAX) {
++ pr_crit("Truncating memory at 0x%08llx to fit in 32-bit physical address space\n",
++ (long long)start);
++ /*
++ * To ensure bank->start + bank->size is representable in
++ * 32 bits, we use ULONG_MAX as the upper limit rather than 4GB.
++ * This means we lose a page after masking.
++ */
++ size = ULONG_MAX - aligned_start;
++ }
++#endif
++
++ if (aligned_start < PHYS_OFFSET) {
++ if (aligned_start + size <= PHYS_OFFSET) {
++ pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
++ aligned_start, aligned_start + size);
++ return -EINVAL;
++ }
++
++ pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
++ aligned_start, (u64)PHYS_OFFSET);
++
++ size -= PHYS_OFFSET - aligned_start;
++ aligned_start = PHYS_OFFSET;
++ }
++
++ bank->start = aligned_start;
++ bank->size = size & ~(phys_addr_t)(PAGE_SIZE - 1);
++
++ /*
++ * Check whether this memory region has non-zero size or
++ * invalid node number.
++ */
++ if (bank->size == 0)
++ return -EINVAL;
++
++ meminfo.nr_banks++;
++ return 0;
++}
++
++/*
++ * Pick out the memory size. We look for mem=size@start,
++ * where start and size are "size[KkMm]"
++ */
++static int __init early_mem(char *p)
++{
++ static int usermem __initdata = 0;
++ u64 size;
++ u64 start;
++ char *endp;
++
++ /*
++ * If the user specifies memory size, we
++ * blow away any automatically generated
++ * size.
++ */
++ if (usermem == 0) {
++ usermem = 1;
++ meminfo.nr_banks = 0;
++ }
++
++ start = PHYS_OFFSET;
++ size = memparse(p, &endp);
++ if (*endp == '@')
++ start = memparse(endp + 1, NULL);
++
++ arm_add_memory(start, size);
++
++ return 0;
++}
++early_param("mem", early_mem);
++
++static void __init request_standard_resources(const struct machine_desc *mdesc)
++{
++ struct memblock_region *region;
++ struct resource *res;
++
++ kernel_code.start = virt_to_phys(_text);
++ kernel_code.end = virt_to_phys(_etext - 1);
++ kernel_data.start = virt_to_phys(_sdata);
++ kernel_data.end = virt_to_phys(_end - 1);
++
++ for_each_memblock(memory, region) {
++ res = memblock_virt_alloc(sizeof(*res), 0);
++ res->name = "System RAM";
++ res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
++ res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
++ res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
++
++ request_resource(&iomem_resource, res);
++
++ if (kernel_code.start >= res->start &&
++ kernel_code.end <= res->end)
++ request_resource(res, &kernel_code);
++ if (kernel_data.start >= res->start &&
++ kernel_data.end <= res->end)
++ request_resource(res, &kernel_data);
++ }
++
++ if (mdesc->video_start) {
++ video_ram.start = mdesc->video_start;
++ video_ram.end = mdesc->video_end;
++ request_resource(&iomem_resource, &video_ram);
++ }
++
++ /*
++ * Some machines don't have the possibility of ever
++ * possessing lp0, lp1 or lp2
++ */
++ if (mdesc->reserve_lp0)
++ request_resource(&ioport_resource, &lp0);
++ if (mdesc->reserve_lp1)
++ request_resource(&ioport_resource, &lp1);
++ if (mdesc->reserve_lp2)
++ request_resource(&ioport_resource, &lp2);
++}
++
++#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
++struct screen_info screen_info = {
++ .orig_video_lines = 30,
++ .orig_video_cols = 80,
++ .orig_video_mode = 0,
++ .orig_video_ega_bx = 0,
++ .orig_video_isVGA = 1,
++ .orig_video_points = 8
++};
++#endif
++
++static int __init customize_machine(void)
++{
++ /*
++ * customizes platform devices, or adds new ones
++ * On DT based machines, we fall back to populating the
++ * machine from the device tree, if no callback is provided,
++ * otherwise we would always need an init_machine callback.
++ */
++ if (machine_desc->init_machine)
++ machine_desc->init_machine();
++#ifdef CONFIG_OF
++ else
++ of_platform_populate(NULL, of_default_bus_match_table,
++ NULL, NULL);
++#endif
++ return 0;
++}
++arch_initcall(customize_machine);
++
++static int __init init_machine_late(void)
++{
++ if (machine_desc->init_late)
++ machine_desc->init_late();
++ return 0;
++}
++late_initcall(init_machine_late);
++
++#ifdef CONFIG_KEXEC
++static inline unsigned long long get_total_mem(void)
++{
++ unsigned long total;
++
++ total = max_low_pfn - min_low_pfn;
++ return total << PAGE_SHIFT;
++}
++
++/**
++ * reserve_crashkernel() - reserves memory are for crash kernel
++ *
++ * This function reserves memory area given in "crashkernel=" kernel command
++ * line parameter. The memory reserved is used by a dump capture kernel when
++ * primary kernel is crashing.
++ */
++static void __init reserve_crashkernel(void)
++{
++ unsigned long long crash_size, crash_base;
++ unsigned long long total_mem;
++ int ret;
++
++ total_mem = get_total_mem();
++ ret = parse_crashkernel(boot_command_line, total_mem,
++ &crash_size, &crash_base);
++ if (ret)
++ return;
++
++ ret = memblock_reserve(crash_base, crash_size);
++ if (ret < 0) {
++ pr_warn("crashkernel reservation failed - memory is in use (0x%lx)\n",
++ (unsigned long)crash_base);
++ return;
++ }
++
++ pr_info("Reserving %ldMB of memory at %ldMB for crashkernel (System RAM: %ldMB)\n",
++ (unsigned long)(crash_size >> 20),
++ (unsigned long)(crash_base >> 20),
++ (unsigned long)(total_mem >> 20));
++
++ crashk_res.start = crash_base;
++ crashk_res.end = crash_base + crash_size - 1;
++ insert_resource(&iomem_resource, &crashk_res);
++}
++#else
++static inline void reserve_crashkernel(void) {}
++#endif /* CONFIG_KEXEC */
++
++static int __init meminfo_cmp(const void *_a, const void *_b)
++{
++ const struct membank *a = _a, *b = _b;
++ long cmp = bank_pfn_start(a) - bank_pfn_start(b);
++ return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
++}
++
++void __init hyp_mode_check(void)
++{
++#ifdef CONFIG_ARM_VIRT_EXT
++ sync_boot_mode();
++
++ if (is_hyp_mode_available()) {
++ pr_info("CPU: All CPU(s) started in HYP mode.\n");
++ pr_info("CPU: Virtualization extensions available.\n");
++ } else if (is_hyp_mode_mismatched()) {
++ pr_warn("CPU: WARNING: CPU(s) started in wrong/inconsistent modes (primary CPU mode 0x%x)\n",
++ __boot_cpu_mode & MODE_MASK);
++ pr_warn("CPU: This may indicate a broken bootloader or firmware.\n");
++ } else
++ pr_info("CPU: All CPU(s) started in SVC mode.\n");
++#endif
++}
++
++void __init setup_arch(char **cmdline_p)
++{
++ const struct machine_desc *mdesc;
++
++ setup_processor();
++ mdesc = setup_machine_fdt(__atags_pointer);
++ if (!mdesc)
++ mdesc = setup_machine_tags(__atags_pointer, __machine_arch_type);
++ machine_desc = mdesc;
++ machine_name = mdesc->name;
++
++ if (mdesc->reboot_mode != REBOOT_HARD)
++ reboot_mode = mdesc->reboot_mode;
++
++ init_mm.start_code = (unsigned long) _text;
++ init_mm.end_code = (unsigned long) _etext;
++ init_mm.end_data = (unsigned long) _edata;
++ init_mm.brk = (unsigned long) _end;
++
++ /* populate cmd_line too for later use, preserving boot_command_line */
++ strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
++ *cmdline_p = cmd_line;
++
++ parse_early_param();
++
++ sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
++
++ early_paging_init(mdesc, lookup_processor_type(read_cpuid_id()));
++ setup_dma_zone(mdesc);
++ sanity_check_meminfo();
++ arm_memblock_init(&meminfo, mdesc);
++
++ paging_init(mdesc);
++ request_standard_resources(mdesc);
++
++ if (mdesc->restart)
++ arm_pm_restart = mdesc->restart;
++
++ unflatten_device_tree();
++
++ arm_dt_init_cpu_maps();
++ psci_init();
++#ifdef CONFIG_SMP
++ if (is_smp()) {
++ if (!mdesc->smp_init || !mdesc->smp_init()) {
++ if (psci_smp_available())
++ smp_set_ops(&psci_smp_ops);
++ else if (mdesc->smp)
++ smp_set_ops(mdesc->smp);
++ }
++ smp_init_cpus();
++ smp_build_mpidr_hash();
++ }
++#endif
++
++ if (!is_smp())
++ hyp_mode_check();
++
++ reserve_crashkernel();
++
++#ifdef CONFIG_MULTI_IRQ_HANDLER
++ handle_arch_irq = mdesc->handle_irq;
++#endif
++
++#ifdef CONFIG_VT
++#if defined(CONFIG_VGA_CONSOLE)
++ conswitchp = &vga_con;
++#elif defined(CONFIG_DUMMY_CONSOLE)
++ conswitchp = &dummy_con;
++#endif
++#endif
++
++ if (mdesc->init_early)
++ mdesc->init_early();
++}
++
++
++static int __init topology_init(void)
++{
++ int cpu;
++
++ for_each_possible_cpu(cpu) {
++ struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
++ cpuinfo->cpu.hotpluggable = 1;
++ register_cpu(&cpuinfo->cpu, cpu);
++ }
++
++ return 0;
++}
++subsys_initcall(topology_init);
++
++#ifdef CONFIG_HAVE_PROC_CPU
++static int __init proc_cpu_init(void)
++{
++ struct proc_dir_entry *res;
++
++ res = proc_mkdir("cpu", NULL);
++ if (!res)
++ return -ENOMEM;
++ return 0;
++}
++fs_initcall(proc_cpu_init);
++#endif
++
++static const char *hwcap_str[] = {
++ "swp",
++ "half",
++ "thumb",
++ "26bit",
++ "fastmult",
++ "fpa",
++ "vfp",
++ "edsp",
++ "java",
++ "iwmmxt",
++ "crunch",
++ "thumbee",
++ "neon",
++ "vfpv3",
++ "vfpv3d16",
++ "tls",
++ "vfpv4",
++ "idiva",
++ "idivt",
++ "vfpd32",
++ "lpae",
++ "evtstrm",
++ NULL
++};
++
++static int c_show(struct seq_file *m, void *v)
++{
++ int i, j;
++ u32 cpuid;
++
++ for_each_online_cpu(i) {
++ /*
++ * glibc reads /proc/cpuinfo to determine the number of
++ * online processors, looking for lines beginning with
++ * "processor". Give glibc what it expects.
++ */
++ seq_printf(m, "processor\t: %d\n", i);
++ cpuid = is_smp() ? per_cpu(cpu_data, i).cpuid : read_cpuid_id();
++ seq_printf(m, "model name\t: %s rev %d (%s)\n",
++ cpu_name, cpuid & 15, elf_platform);
++
++ /* dump out the processor features */
++ seq_puts(m, "Features\t: ");
++
++ for (j = 0; hwcap_str[j]; j++)
++ if (elf_hwcap & (1 << j))
++ seq_printf(m, "%s ", hwcap_str[j]);
++
++ seq_printf(m, "\nCPU implementer\t: 0x%02x\n", cpuid >> 24);
++ seq_printf(m, "CPU architecture: %s\n",
++ proc_arch[cpu_architecture()]);
++
++ if ((cpuid & 0x0008f000) == 0x00000000) {
++ /* pre-ARM7 */
++ seq_printf(m, "CPU part\t: %07x\n", cpuid >> 4);
++ } else {
++ if ((cpuid & 0x0008f000) == 0x00007000) {
++ /* ARM7 */
++ seq_printf(m, "CPU variant\t: 0x%02x\n",
++ (cpuid >> 16) & 127);
++ } else {
++ /* post-ARM7 */
++ seq_printf(m, "CPU variant\t: 0x%x\n",
++ (cpuid >> 20) & 15);
++ }
++ seq_printf(m, "CPU part\t: 0x%03x\n",
++ (cpuid >> 4) & 0xfff);
++ }
++ seq_printf(m, "CPU revision\t: %d\n\n", cpuid & 15);
++ }
++
++ seq_printf(m, "Hardware\t: %s\n", machine_name);
++ seq_printf(m, "Revision\t: %04x\n", system_rev);
++ seq_printf(m, "Serial\t\t: %08x%08x\n",
++ system_serial_high, system_serial_low);
++
++ return 0;
++}
++
++static void *c_start(struct seq_file *m, loff_t *pos)
++{
++ return *pos < 1 ? (void *)1 : NULL;
++}
++
++static void *c_next(struct seq_file *m, void *v, loff_t *pos)
++{
++ ++*pos;
++ return NULL;
++}
++
++static void c_stop(struct seq_file *m, void *v)
++{
++}
++
++const struct seq_operations cpuinfo_op = {
++ .start = c_start,
++ .next = c_next,
++ .stop = c_stop,
++ .show = c_show
++};
+diff -Nur linux-3.14.36/arch/arm/kernel/topology.c linux-openelec/arch/arm/kernel/topology.c
+--- linux-3.14.36/arch/arm/kernel/topology.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/kernel/topology.c 2015-05-06 12:05:43.000000000 -0500
+@@ -267,6 +267,33 @@
+ }
+
+ /*
++ * cluster_to_logical_mask - return cpu logical mask of CPUs in a cluster
++ * @socket_id: cluster HW identifier
++ * @cluster_mask: the cpumask location to be initialized, modified by the
++ * function only if return value == 0
++ *
++ * Return:
++ *
++ * 0 on success
++ * -EINVAL if cluster_mask is NULL or there is no record matching socket_id
++ */
++int cluster_to_logical_mask(unsigned int socket_id, cpumask_t *cluster_mask)
++{
++ int cpu;
++
++ if (!cluster_mask)
++ return -EINVAL;
++
++ for_each_online_cpu(cpu)
++ if (socket_id == topology_physical_package_id(cpu)) {
++ cpumask_copy(cluster_mask, topology_core_cpumask(cpu));
++ return 0;
++ }
++
++ return -EINVAL;
++}
++
++/*
+ * init_cpu_topology is called at boot when only one cpu is running
+ * which prevent simultaneous write access to cpu_topology array
+ */
+diff -Nur linux-3.14.36/arch/arm/lib/bitops.h linux-openelec/arch/arm/lib/bitops.h
+--- linux-3.14.36/arch/arm/lib/bitops.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/lib/bitops.h 2015-05-06 12:05:43.000000000 -0500
+@@ -37,6 +37,11 @@
+ add r1, r1, r0, lsl #2 @ Get word offset
+ mov r3, r2, lsl r3 @ create mask
+ smp_dmb
++#if __LINUX_ARM_ARCH__ >= 7 && defined(CONFIG_SMP)
++ .arch_extension mp
++ ALT_SMP(W(pldw) [r1])
++ ALT_UP(W(nop))
++#endif
+ 1: ldrex r2, [r1]
+ ands r0, r2, r3 @ save old value of bit
+ \instr r2, r2, r3 @ toggle bit
+diff -Nur linux-3.14.36/arch/arm/mach-berlin/berlin.c linux-openelec/arch/arm/mach-berlin/berlin.c
+--- linux-3.14.36/arch/arm/mach-berlin/berlin.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/mach-berlin/berlin.c 2015-05-06 12:05:43.000000000 -0500
+@@ -24,7 +24,7 @@
+ * with DT probing for L2CCs, berlin_init_machine can be removed.
+ * Note: 88DE3005 (Armada 1500-mini) uses pl310 l2cc
+ */
+- l2x0_of_init(0x70c00000, 0xfeffffff);
++ l2x0_of_init(0x30c00000, 0xfeffffff);
+ of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
+ }
+
+diff -Nur linux-3.14.36/arch/arm/mach-cns3xxx/core.c linux-openelec/arch/arm/mach-cns3xxx/core.c
+--- linux-3.14.36/arch/arm/mach-cns3xxx/core.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/mach-cns3xxx/core.c 2015-05-06 12:05:43.000000000 -0500
+@@ -240,9 +240,9 @@
+ *
+ * 1 cycle of latency for setup, read and write accesses
+ */
+- val = readl(base + L2X0_TAG_LATENCY_CTRL);
++ val = readl(base + L310_TAG_LATENCY_CTRL);
+ val &= 0xfffff888;
+- writel(val, base + L2X0_TAG_LATENCY_CTRL);
++ writel(val, base + L310_TAG_LATENCY_CTRL);
+
+ /*
+ * Data RAM Control register
+@@ -253,12 +253,12 @@
+ *
+ * 1 cycle of latency for setup, read and write accesses
+ */
+- val = readl(base + L2X0_DATA_LATENCY_CTRL);
++ val = readl(base + L310_DATA_LATENCY_CTRL);
+ val &= 0xfffff888;
+- writel(val, base + L2X0_DATA_LATENCY_CTRL);
++ writel(val, base + L310_DATA_LATENCY_CTRL);
+
+ /* 32 KiB, 8-way, parity disable */
+- l2x0_init(base, 0x00540000, 0xfe000fff);
++ l2x0_init(base, 0x00500000, 0xfe0f0fff);
+ }
+
+ #endif /* CONFIG_CACHE_L2X0 */
+diff -Nur linux-3.14.36/arch/arm/mach-exynos/common.c linux-openelec/arch/arm/mach-exynos/common.c
+--- linux-3.14.36/arch/arm/mach-exynos/common.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/mach-exynos/common.c 2015-05-06 12:05:43.000000000 -0500
+@@ -45,9 +45,6 @@
+ #include "common.h"
+ #include "regs-pmu.h"
+
+-#define L2_AUX_VAL 0x7C470001
+-#define L2_AUX_MASK 0xC200ffff
+-
+ static const char name_exynos4210[] = "EXYNOS4210";
+ static const char name_exynos4212[] = "EXYNOS4212";
+ static const char name_exynos4412[] = "EXYNOS4412";
+@@ -400,7 +397,7 @@
+ {
+ int ret;
+
+- ret = l2x0_of_init(L2_AUX_VAL, L2_AUX_MASK);
++ ret = l2x0_of_init(0x3c400001, 0xc20fffff);
+ if (ret)
+ return ret;
+
+diff -Nur linux-3.14.36/arch/arm/mach-highbank/highbank.c linux-openelec/arch/arm/mach-highbank/highbank.c
+--- linux-3.14.36/arch/arm/mach-highbank/highbank.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/mach-highbank/highbank.c 2015-05-06 12:05:43.000000000 -0500
+@@ -20,7 +20,7 @@
+ #include <linux/input.h>
+ #include <linux/io.h>
+ #include <linux/irqchip.h>
+-#include <linux/mailbox.h>
++#include <linux/pl320-ipc.h>
+ #include <linux/of.h>
+ #include <linux/of_irq.h>
+ #include <linux/of_platform.h>
+@@ -51,11 +51,13 @@
+ }
+
+
+-static void highbank_l2x0_disable(void)
++static void highbank_l2c310_write_sec(unsigned long val, unsigned reg)
+ {
+- outer_flush_all();
+- /* Disable PL310 L2 Cache controller */
+- highbank_smc1(0x102, 0x0);
++ if (reg == L2X0_CTRL)
++ highbank_smc1(0x102, val);
++ else
++ WARN_ONCE(1, "Highbank L2C310: ignoring write to reg 0x%x\n",
++ reg);
+ }
+
+ static void __init highbank_init_irq(void)
+@@ -66,11 +68,9 @@
+ highbank_scu_map_io();
+
+ /* Enable PL310 L2 Cache controller */
+- if (IS_ENABLED(CONFIG_CACHE_L2X0) &&
+- of_find_compatible_node(NULL, NULL, "arm,pl310-cache")) {
+- highbank_smc1(0x102, 0x1);
+- l2x0_of_init(0, ~0UL);
+- outer_cache.disable = highbank_l2x0_disable;
++ if (IS_ENABLED(CONFIG_CACHE_L2X0)) {
++ outer_cache.write_sec = highbank_l2c310_write_sec;
++ l2x0_of_init(0, ~0);
+ }
+ }
+
+diff -Nur linux-3.14.36/arch/arm/mach-imx/anatop.c linux-openelec/arch/arm/mach-imx/anatop.c
+--- linux-3.14.36/arch/arm/mach-imx/anatop.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/mach-imx/anatop.c 2015-05-06 12:05:43.000000000 -0500
+@@ -9,6 +9,7 @@
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
++#include <linux/delay.h>
+ #include <linux/err.h>
+ #include <linux/io.h>
+ #include <linux/of.h>
+@@ -35,6 +36,10 @@
+ #define BM_ANADIG_USB_CHRG_DETECT_CHK_CHRG_B 0x80000
+ #define BM_ANADIG_USB_CHRG_DETECT_EN_B 0x100000
+
++#define ANADIG_REG_TARG_MASK 0x1f
++#define ANADIG_REG1_TARG_SHIFT 9 /* VDDPU */
++#define ANADIG_REG2_TARG_SHIFT 18 /* VDDSOC */
++
+ static struct regmap *anatop;
+
+ static void imx_anatop_enable_weak2p5(bool enable)
+@@ -78,6 +83,28 @@
+ BM_ANADIG_USB_CHRG_DETECT_CHK_CHRG_B);
+ }
+
++void imx_anatop_pu_enable(bool enable)
++{
++ u32 val;
++
++ regmap_read(anatop, ANADIG_REG_CORE, &val);
++ val &= ANADIG_REG_TARG_MASK << ANADIG_REG2_TARG_SHIFT;
++ /*
++ * set pu regulator only in LDO_BYPASS mode(know by VDDSOC reg 0x1f),
++ * else handled by anatop regulator driver.
++ */
++ if (((val >> (ANADIG_REG2_TARG_SHIFT)) & ANADIG_REG_TARG_MASK)
++ == ANADIG_REG_TARG_MASK) {
++ if (enable) {
++ regmap_write(anatop, ANADIG_REG_CORE + REG_SET,
++ ANADIG_REG_TARG_MASK << ANADIG_REG1_TARG_SHIFT);
++ udelay(70); /* bypass need 70us to be stable */
++ } else {
++ regmap_write(anatop, ANADIG_REG_CORE + REG_CLR,
++ ANADIG_REG_TARG_MASK << ANADIG_REG1_TARG_SHIFT);
++ }
++ }
++}
+ void __init imx_init_revision_from_anatop(void)
+ {
+ struct device_node *np;
+@@ -104,6 +131,15 @@
+ case 2:
+ revision = IMX_CHIP_REVISION_1_2;
+ break;
++ case 3:
++ revision = IMX_CHIP_REVISION_1_3;
++ break;
++ case 4:
++ revision = IMX_CHIP_REVISION_1_4;
++ break;
++ case 5:
++ revision = IMX_CHIP_REVISION_1_5;
++ break;
+ default:
+ revision = IMX_CHIP_REVISION_UNKNOWN;
+ }
+diff -Nur linux-3.14.36/arch/arm/mach-imx/busfreq_ddr3.c linux-openelec/arch/arm/mach-imx/busfreq_ddr3.c
+--- linux-3.14.36/arch/arm/mach-imx/busfreq_ddr3.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/arch/arm/mach-imx/busfreq_ddr3.c 2015-05-06 12:05:43.000000000 -0500
+@@ -0,0 +1,471 @@
++/*
++ * Copyright (C) 2011-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/*!
++ * @file busfreq_ddr3.c
++ *
++ * @brief iMX6 DDR3 frequency change specific file.
++ *
++ * @ingroup PM
++ */
++#include <asm/cacheflush.h>
++#include <asm/fncpy.h>
++#include <asm/io.h>
++#include <asm/mach/map.h>
++#include <asm/mach-types.h>
++#include <asm/tlb.h>
++#include <linux/clk.h>
++#include <linux/cpumask.h>
++#include <linux/delay.h>
++#include <linux/genalloc.h>
++#include <linux/interrupt.h>
++#include <linux/irqchip/arm-gic.h>
++#include <linux/kernel.h>
++#include <linux/mutex.h>
++#include <linux/of.h>
++#include <linux/of_address.h>
++#include <linux/of_device.h>
++#include <linux/platform_device.h>
++#include <linux/proc_fs.h>
++#include <linux/sched.h>
++#include <linux/smp.h>
++
++#include "hardware.h"
++
++/* DDR settings */
++static unsigned long (*iram_ddr_settings)[2];
++static unsigned long (*normal_mmdc_settings)[2];
++static unsigned long (*iram_iomux_settings)[2];
++static void __iomem *mmdc_base;
++static void __iomem *iomux_base;
++static void __iomem *ccm_base;
++static void __iomem *l2_base;
++static void __iomem *gic_dist_base;
++static u32 *irqs_used;
++
++static void *ddr_freq_change_iram_base;
++static int ddr_settings_size;
++static int iomux_settings_size;
++static volatile unsigned int cpus_in_wfe;
++static volatile bool wait_for_ddr_freq_update;
++static int curr_ddr_rate;
++
++void (*mx6_change_ddr_freq)(u32 freq, void *ddr_settings,
++ bool dll_mode, void *iomux_offsets) = NULL;
++
++extern unsigned int ddr_med_rate;
++extern unsigned int ddr_normal_rate;
++extern int low_bus_freq_mode;
++extern int audio_bus_freq_mode;
++extern void mx6_ddr3_freq_change(u32 freq, void *ddr_settings,
++ bool dll_mode, void *iomux_offsets);
++
++#define MIN_DLL_ON_FREQ 333000000
++#define MAX_DLL_OFF_FREQ 125000000
++#define DDR_FREQ_CHANGE_SIZE 0x2000
++
++unsigned long ddr3_dll_mx6q[][2] = {
++ {0x0c, 0x0},
++ {0x10, 0x0},
++ {0x1C, 0x04088032},
++ {0x1C, 0x0408803a},
++ {0x1C, 0x08408030},
++ {0x1C, 0x08408038},
++ {0x818, 0x0},
++};
++
++unsigned long ddr3_calibration[][2] = {
++ {0x83c, 0x0},
++ {0x840, 0x0},
++ {0x483c, 0x0},
++ {0x4840, 0x0},
++ {0x848, 0x0},
++ {0x4848, 0x0},
++ {0x850, 0x0},
++ {0x4850, 0x0},
++};
++
++unsigned long ddr3_dll_mx6dl[][2] = {
++ {0x0c, 0x0},
++ {0x10, 0x0},
++ {0x1C, 0x04008032},
++ {0x1C, 0x0400803a},
++ {0x1C, 0x07208030},
++ {0x1C, 0x07208038},
++ {0x818, 0x0},
++};
++
++unsigned long iomux_offsets_mx6q[][2] = {
++ {0x5A8, 0x0},
++ {0x5B0, 0x0},
++ {0x524, 0x0},
++ {0x51C, 0x0},
++ {0x518, 0x0},
++ {0x50C, 0x0},
++ {0x5B8, 0x0},
++ {0x5C0, 0x0},
++};
++
++unsigned long iomux_offsets_mx6dl[][2] = {
++ {0x4BC, 0x0},
++ {0x4C0, 0x0},
++ {0x4C4, 0x0},
++ {0x4C8, 0x0},
++ {0x4CC, 0x0},
++ {0x4D0, 0x0},
++ {0x4D4, 0x0},
++ {0x4D8, 0x0},
++};
++
++unsigned long ddr3_400[][2] = {
++ {0x83c, 0x42490249},
++ {0x840, 0x02470247},
++ {0x483c, 0x42570257},
++ {0x4840, 0x02400240},
++ {0x848, 0x4039363C},
++ {0x4848, 0x3A39333F},
++ {0x850, 0x38414441},
++ {0x4850, 0x472D4833}
++};
++
++int can_change_ddr_freq(void)
++{
++ return 1;
++}
++
++/*
++ * each active core apart from the one changing
++ * the DDR frequency will execute this function.
++ * the rest of the cores have to remain in WFE
++ * state until the frequency is changed.
++ */
++irqreturn_t wait_in_wfe_irq(int irq, void *dev_id)
++{
++ u32 me = smp_processor_id();
++
++ *((char *)(&cpus_in_wfe) + (u8)me) = 0xff;
++
++ while (wait_for_ddr_freq_update)
++ wfe();
++
++ *((char *)(&cpus_in_wfe) + (u8)me) = 0;
++
++ return IRQ_HANDLED;
++}
++
++/* change the DDR frequency. */
++int update_ddr_freq(int ddr_rate)
++{
++ int i, j;
++ unsigned int reg;
++ bool dll_off = false;
++ unsigned int online_cpus = 0;
++ int cpu = 0;
++ int me;
++
++ if (!can_change_ddr_freq())
++ return -1;
++
++ if (ddr_rate == curr_ddr_rate)
++ return 0;
++
++ pr_debug("Bus freq set to %d start...\n", ddr_rate);
++
++ if (low_bus_freq_mode || audio_bus_freq_mode)
++ dll_off = true;
++
++ iram_ddr_settings[0][0] = ddr_settings_size;
++ iram_iomux_settings[0][0] = iomux_settings_size;
++ if (ddr_rate == ddr_med_rate && cpu_is_imx6q()) {
++ for (i = 0; i < ARRAY_SIZE(ddr3_dll_mx6q); i++) {
++ iram_ddr_settings[i + 1][0] =
++ normal_mmdc_settings[i][0];
++ iram_ddr_settings[i + 1][1] =
++ normal_mmdc_settings[i][1];
++ }
++ for (j = 0, i = ARRAY_SIZE(ddr3_dll_mx6q);
++ i < iram_ddr_settings[0][0]; j++, i++) {
++ iram_ddr_settings[i + 1][0] =
++ ddr3_400[j][0];
++ iram_ddr_settings[i + 1][1] =
++ ddr3_400[j][1];
++ }
++ } else if (ddr_rate == ddr_normal_rate) {
++ for (i = 0; i < iram_ddr_settings[0][0]; i++) {
++ iram_ddr_settings[i + 1][0] =
++ normal_mmdc_settings[i][0];
++ iram_ddr_settings[i + 1][1] =
++ normal_mmdc_settings[i][1];
++ }
++ }
++
++ /* ensure that all Cores are in WFE. */
++ local_irq_disable();
++
++ me = smp_processor_id();
++
++ *((char *)(&cpus_in_wfe) + (u8)me) = 0xff;
++ wait_for_ddr_freq_update = true;
++ for_each_online_cpu(cpu) {
++ *((char *)(&online_cpus) + (u8)cpu) = 0xff;
++ if (cpu != me) {
++ /* set the interrupt to be pending in the GIC. */
++ reg = 1 << (irqs_used[cpu] % 32);
++ writel_relaxed(reg, gic_dist_base + GIC_DIST_PENDING_SET
++ + (irqs_used[cpu] / 32) * 4);
++ }
++ }
++ while (cpus_in_wfe != online_cpus)
++ udelay(5);
++
++ /*
++ * Flush the TLB, to ensure no TLB maintenance occurs
++ * when DDR is in self-refresh.
++ */
++ local_flush_tlb_all();
++ /* Now we can change the DDR frequency. */
++ mx6_change_ddr_freq(ddr_rate, iram_ddr_settings,
++ dll_off, iram_iomux_settings);
++
++ curr_ddr_rate = ddr_rate;
++
++ /* DDR frequency change is done . */
++ wait_for_ddr_freq_update = false;
++
++ /* wake up all the cores. */
++ sev();
++
++ *((char *)(&cpus_in_wfe) + (u8)me) = 0;
++
++ local_irq_enable();
++
++ pr_debug("Bus freq set to %d done!\n", ddr_rate);
++
++ return 0;
++}
++
++int init_mmdc_ddr3_settings(struct platform_device *busfreq_pdev)
++{
++ struct device *dev = &busfreq_pdev->dev;
++ struct platform_device *ocram_dev;
++ unsigned int iram_paddr;
++ int i, err;
++ u32 cpu;
++ struct device_node *node;
++ struct gen_pool *iram_pool;
++
++ node = of_find_compatible_node(NULL, NULL, "fsl,imx6q-mmdc-combine");
++ if (!node) {
++ pr_err("failed to find imx6q-mmdc device tree data!\n");
++ return -EINVAL;
++ }
++ mmdc_base = of_iomap(node, 0);
++ WARN(!mmdc_base, "unable to map mmdc registers\n");
++
++ node = NULL;
++ if (cpu_is_imx6q())
++ node = of_find_compatible_node(NULL, NULL, "fsl,imx6q-iomuxc");
++ if (cpu_is_imx6dl())
++ node = of_find_compatible_node(NULL, NULL,
++ "fsl,imx6dl-iomuxc");
++ if (!node) {
++ pr_err("failed to find imx6q-iomux device tree data!\n");
++ return -EINVAL;
++ }
++ iomux_base = of_iomap(node, 0);
++ WARN(!iomux_base, "unable to map iomux registers\n");
++
++ node = of_find_compatible_node(NULL, NULL, "fsl,imx6q-ccm");
++ if (!node) {
++ pr_err("failed to find imx6q-ccm device tree data!\n");
++ return -EINVAL;
++ }
++ ccm_base = of_iomap(node, 0);
++ WARN(!ccm_base, "unable to map mmdc registers\n");
++
++ node = of_find_compatible_node(NULL, NULL, "arm,pl310-cache");
++ if (!node) {
++ pr_err("failed to find imx6q-pl310-cache device tree data!\n");
++ return -EINVAL;
++ }
++ l2_base = of_iomap(node, 0);
++ WARN(!ccm_base, "unable to map mmdc registers\n");
++
++ node = NULL;
++ node = of_find_compatible_node(NULL, NULL, "arm,cortex-a9-gic");
++ if (!node) {
++ pr_err("failed to find imx6q-a9-gic device tree data!\n");
++ return -EINVAL;
++ }
++ gic_dist_base = of_iomap(node, 0);
++ WARN(!gic_dist_base, "unable to map gic dist registers\n");
++
++ if (cpu_is_imx6q())
++ ddr_settings_size = ARRAY_SIZE(ddr3_dll_mx6q) +
++ ARRAY_SIZE(ddr3_calibration);
++ if (cpu_is_imx6dl())
++ ddr_settings_size = ARRAY_SIZE(ddr3_dll_mx6dl) +
++ ARRAY_SIZE(ddr3_calibration);
++
++ normal_mmdc_settings = kmalloc((ddr_settings_size * 8), GFP_KERNEL);
++ if (cpu_is_imx6q()) {
++ memcpy(normal_mmdc_settings, ddr3_dll_mx6q,
++ sizeof(ddr3_dll_mx6q));
++ memcpy(((char *)normal_mmdc_settings + sizeof(ddr3_dll_mx6q)),
++ ddr3_calibration, sizeof(ddr3_calibration));
++ }
++ if (cpu_is_imx6dl()) {
++ memcpy(normal_mmdc_settings, ddr3_dll_mx6dl,
++ sizeof(ddr3_dll_mx6dl));
++ memcpy(((char *)normal_mmdc_settings + sizeof(ddr3_dll_mx6dl)),
++ ddr3_calibration, sizeof(ddr3_calibration));
++ }
++ /* store the original DDR settings at boot. */
++ for (i = 0; i < ddr_settings_size; i++) {
++ /*
++ * writes via command mode register cannot be read back.
++ * hence hardcode them in the initial static array.
++ * this may require modification on a per customer basis.
++ */
++ if (normal_mmdc_settings[i][0] != 0x1C)
++ normal_mmdc_settings[i][1] =
++ readl_relaxed(mmdc_base
++ + normal_mmdc_settings[i][0]);
++ }
++
++ irqs_used = devm_kzalloc(dev, sizeof(u32) * num_present_cpus(),
++ GFP_KERNEL);
++
++ for_each_online_cpu(cpu) {
++ int irq;
++
++ /*
++ * set up a reserved interrupt to get all
++ * the active cores into a WFE state
++ * before changing the DDR frequency.
++ */
++ irq = platform_get_irq(busfreq_pdev, cpu);
++ err = request_irq(irq, wait_in_wfe_irq,
++ IRQF_PERCPU, "mmdc_1", NULL);
++ if (err) {
++ dev_err(dev,
++ "Busfreq:request_irq failed %d, err = %d\n",
++ irq, err);
++ return err;
++ }
++ err = irq_set_affinity(irq, cpumask_of(cpu));
++ if (err) {
++ dev_err(dev,
++ "Busfreq: Cannot set irq affinity irq=%d,\n",
++ irq);
++ return err;
++ }
++ irqs_used[cpu] = irq;
++ }
++
++ node = NULL;
++ node = of_find_compatible_node(NULL, NULL, "mmio-sram");
++ if (!node) {
++ dev_err(dev, "%s: failed to find ocram node\n",
++ __func__);
++ return -EINVAL;
++ }
++
++ ocram_dev = of_find_device_by_node(node);
++ if (!ocram_dev) {
++ dev_err(dev, "failed to find ocram device!\n");
++ return -EINVAL;
++ }
++
++ iram_pool = dev_get_gen_pool(&ocram_dev->dev);
++ if (!iram_pool) {
++ dev_err(dev, "iram pool unavailable!\n");
++ return -EINVAL;
++ }
++
++ iomux_settings_size = ARRAY_SIZE(iomux_offsets_mx6q);
++ iram_iomux_settings = gen_pool_alloc(iram_pool,
++ (iomux_settings_size * 8) + 8);
++ if (!iram_iomux_settings) {
++ dev_err(dev, "unable to alloc iram for IOMUX settings!\n");
++ return -ENOMEM;
++ }
++
++ /*
++ * Allocate extra space to store the number of entries in the
++ * ddr_settings plus 4 extra regsiter information that needs
++ * to be passed to the frequency change code.
++ * sizeof(iram_ddr_settings) = sizeof(ddr_settings) +
++ * entries in ddr_settings + 16.
++ * The last 4 enties store the addresses of the registers:
++ * CCM_BASE_ADDR
++ * MMDC_BASE_ADDR
++ * IOMUX_BASE_ADDR
++ * L2X0_BASE_ADDR
++ */
++ iram_ddr_settings = gen_pool_alloc(iram_pool,
++ (ddr_settings_size * 8) + 8 + 32);
++ if (!iram_ddr_settings) {
++ dev_err(dev, "unable to alloc iram for ddr settings!\n");
++ return -ENOMEM;
++ }
++ i = ddr_settings_size + 1;
++ iram_ddr_settings[i][0] = (unsigned long)mmdc_base;
++ iram_ddr_settings[i+1][0] = (unsigned long)ccm_base;
++ iram_ddr_settings[i+2][0] = (unsigned long)iomux_base;
++ iram_ddr_settings[i+3][0] = (unsigned long)l2_base;
++
++ if (cpu_is_imx6q()) {
++ /* store the IOMUX settings at boot. */
++ for (i = 0; i < iomux_settings_size; i++) {
++ iomux_offsets_mx6q[i][1] =
++ readl_relaxed(iomux_base +
++ iomux_offsets_mx6q[i][0]);
++ iram_iomux_settings[i+1][0] = iomux_offsets_mx6q[i][0];
++ iram_iomux_settings[i+1][1] = iomux_offsets_mx6q[i][1];
++ }
++ }
++
++ if (cpu_is_imx6dl()) {
++ for (i = 0; i < iomux_settings_size; i++) {
++ iomux_offsets_mx6dl[i][1] =
++ readl_relaxed(iomux_base +
++ iomux_offsets_mx6dl[i][0]);
++ iram_iomux_settings[i+1][0] = iomux_offsets_mx6dl[i][0];
++ iram_iomux_settings[i+1][1] = iomux_offsets_mx6dl[i][1];
++ }
++ }
++
++ ddr_freq_change_iram_base = gen_pool_alloc(iram_pool,
++ DDR_FREQ_CHANGE_SIZE);
++ if (!ddr_freq_change_iram_base) {
++ dev_err(dev, "Cannot alloc iram for ddr freq change code!\n");
++ return -ENOMEM;
++ }
++
++ iram_paddr = gen_pool_virt_to_phys(iram_pool,
++ (unsigned long)ddr_freq_change_iram_base);
++ /*
++ * Need to remap the area here since we want
++ * the memory region to be executable.
++ */
++ ddr_freq_change_iram_base = __arm_ioremap(iram_paddr,
++ DDR_FREQ_CHANGE_SIZE,
++ MT_MEMORY_RWX_NONCACHED);
++ mx6_change_ddr_freq = (void *)fncpy(ddr_freq_change_iram_base,
++ &mx6_ddr3_freq_change, DDR_FREQ_CHANGE_SIZE);
++
++ curr_ddr_rate = ddr_normal_rate;
++
++ return 0;
++}
+diff -Nur linux-3.14.36/arch/arm/mach-imx/busfreq-imx6.c linux-openelec/arch/arm/mach-imx/busfreq-imx6.c
+--- linux-3.14.36/arch/arm/mach-imx/busfreq-imx6.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/arch/arm/mach-imx/busfreq-imx6.c 2015-07-24 18:03:30.408842002 -0500
+@@ -0,0 +1,953 @@
++/*
++ * Copyright (C) 2011-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
++ */
++
++/*!
++ * @file busfreq-imx6.c
++ *
++ * @brief A common API for the Freescale Semiconductor iMX6 Busfreq API
++ *
++ * The APIs are for setting bus frequency to different values based on the
++ * highest freqeuncy requested.
++ *
++ * @ingroup PM
++ */
++
++#include <asm/cacheflush.h>
++#include <asm/io.h>
++#include <asm/mach/map.h>
++#include <asm/mach-types.h>
++#include <asm/tlb.h>
++#include <linux/busfreq-imx6.h>
++#include <linux/clk.h>
++#include <linux/clk-provider.h>
++#include <linux/delay.h>
++#include <linux/module.h>
++#include <linux/mutex.h>
++#include <linux/of.h>
++#include <linux/platform_device.h>
++#include <linux/proc_fs.h>
++#include <linux/reboot.h>
++#include <linux/regulator/consumer.h>
++#include <linux/sched.h>
++#include <linux/suspend.h>
++#include "hardware.h"
++
++#define LPAPM_CLK 24000000
++#define DDR3_AUDIO_CLK 50000000
++#define LPDDR2_AUDIO_CLK 100000000
++
++int high_bus_freq_mode;
++int med_bus_freq_mode;
++int audio_bus_freq_mode;
++int low_bus_freq_mode;
++int ultra_low_bus_freq_mode;
++unsigned int ddr_med_rate;
++unsigned int ddr_normal_rate;
++
++#ifdef CONFIG_ARM_IMX6_CPUFREQ
++static int bus_freq_scaling_initialized;
++static struct device *busfreq_dev;
++static int busfreq_suspended;
++static u32 org_arm_rate;
++static int bus_freq_scaling_is_active;
++static int high_bus_count, med_bus_count, audio_bus_count, low_bus_count;
++static unsigned int ddr_low_rate;
++
++extern int init_mmdc_lpddr2_settings(struct platform_device *dev);
++extern int init_mmdc_ddr3_settings(struct platform_device *dev);
++extern int update_ddr_freq(int ddr_rate);
++extern int update_lpddr2_freq(int ddr_rate);
++
++DEFINE_MUTEX(bus_freq_mutex);
++static DEFINE_SPINLOCK(freq_lock);
++
++static struct clk *pll2_400;
++static struct clk *periph_clk;
++static struct clk *periph_pre_clk;
++static struct clk *periph_clk2_sel;
++static struct clk *periph_clk2;
++static struct clk *osc_clk;
++static struct clk *cpu_clk;
++static struct clk *pll3;
++static struct clk *pll2;
++static struct clk *pll2_200;
++static struct clk *pll1_sys;
++static struct clk *periph2_clk;
++static struct clk *ocram_clk;
++static struct clk *ahb_clk;
++static struct clk *pll1_sw_clk;
++static struct clk *periph2_pre_clk;
++static struct clk *periph2_clk2_sel;
++static struct clk *periph2_clk2;
++static struct clk *step_clk;
++static struct clk *axi_sel_clk;
++static struct clk *pll3_pfd1_540m;
++
++static u32 pll2_org_rate;
++static struct delayed_work low_bus_freq_handler;
++static struct delayed_work bus_freq_daemon;
++
++static void enter_lpm_imx6sl(void)
++{
++ unsigned long flags;
++
++ if (high_bus_freq_mode) {
++ pll2_org_rate = clk_get_rate(pll2);
++ /* Set periph_clk to be sourced from OSC_CLK */
++ clk_set_parent(periph_clk2_sel, osc_clk);
++ clk_set_parent(periph_clk, periph_clk2);
++ /* Ensure AHB/AXI clks are at 24MHz. */
++ clk_set_rate(ahb_clk, LPAPM_CLK);
++ clk_set_rate(ocram_clk, LPAPM_CLK);
++ }
++ if (audio_bus_count) {
++ /* Set AHB to 8MHz to lower pwer.*/
++ clk_set_rate(ahb_clk, LPAPM_CLK / 3);
++
++ /* Set up DDR to 100MHz. */
++ spin_lock_irqsave(&freq_lock, flags);
++ update_lpddr2_freq(LPDDR2_AUDIO_CLK);
++ spin_unlock_irqrestore(&freq_lock, flags);
++
++ /* Fix the clock tree in kernel */
++ clk_set_rate(pll2, pll2_org_rate);
++ clk_set_parent(periph2_pre_clk, pll2_200);
++ clk_set_parent(periph2_clk, periph2_pre_clk);
++
++ if (low_bus_freq_mode || ultra_low_bus_freq_mode) {
++ /*
++ * Swtich ARM to run off PLL2_PFD2_400MHz
++ * since DDR is anyway at 100MHz.
++ */
++ clk_set_parent(step_clk, pll2_400);
++ clk_set_parent(pll1_sw_clk, step_clk);
++ /*
++ * Ensure that the clock will be
++ * at original speed.
++ */
++ clk_set_rate(cpu_clk, org_arm_rate);
++ }
++ low_bus_freq_mode = 0;
++ ultra_low_bus_freq_mode = 0;
++ audio_bus_freq_mode = 1;
++ } else {
++ u32 arm_div, pll1_rate;
++ org_arm_rate = clk_get_rate(cpu_clk);
++ if (low_bus_freq_mode && low_bus_count == 0) {
++ /*
++ * We are already in DDR @ 24MHz state, but
++ * no one but ARM needs the DDR. In this case,
++ * we can lower the DDR freq to 1MHz when ARM
++ * enters WFI in this state. Keep track of this state.
++ */
++ ultra_low_bus_freq_mode = 1;
++ low_bus_freq_mode = 0;
++ audio_bus_freq_mode = 0;
++ } else {
++ if (!ultra_low_bus_freq_mode && !low_bus_freq_mode) {
++ /*
++ * Set DDR to 24MHz.
++ * Since we are going to bypass PLL2,
++ * we need to move ARM clk off PLL2_PFD2
++ * to PLL1. Make sure the PLL1 is running
++ * at the lowest possible freq.
++ */
++ clk_set_rate(pll1_sys,
++ clk_round_rate(pll1_sys, org_arm_rate));
++ pll1_rate = clk_get_rate(pll1_sys);
++ arm_div = pll1_rate / org_arm_rate + 1;
++ /*
++ * Ensure ARM CLK is lower before
++ * changing the parent.
++ */
++ clk_set_rate(cpu_clk, org_arm_rate / arm_div);
++ /* Now set the ARM clk parent to PLL1_SYS. */
++ clk_set_parent(pll1_sw_clk, pll1_sys);
++
++ /*
++ * Set STEP_CLK back to OSC to save power and
++ * also to maintain the parent.The WFI iram code
++ * will switch step_clk to osc, but the clock API
++ * is not aware of the change and when a new request
++ * to change the step_clk parent to pll2_pfd2_400M
++ * is requested sometime later, the change is ignored.
++ */
++ clk_set_parent(step_clk, osc_clk);
++ /* Now set DDR to 24MHz. */
++ spin_lock_irqsave(&freq_lock, flags);
++ update_lpddr2_freq(LPAPM_CLK);
++ spin_unlock_irqrestore(&freq_lock, flags);
++
++ /*
++ * Fix the clock tree in kernel.
++ * Make sure PLL2 rate is updated as it gets
++ * bypassed in the DDR freq change code.
++ */
++ clk_set_rate(pll2, LPAPM_CLK);
++ clk_set_parent(periph2_clk2_sel, pll2);
++ clk_set_parent(periph2_clk, periph2_clk2_sel);
++
++ }
++ if (low_bus_count == 0) {
++ ultra_low_bus_freq_mode = 1;
++ low_bus_freq_mode = 0;
++ } else {
++ ultra_low_bus_freq_mode = 0;
++ low_bus_freq_mode = 1;
++ }
++ audio_bus_freq_mode = 0;
++ }
++ }
++}
++
++static void exit_lpm_imx6sl(void)
++{
++ unsigned long flags;
++
++ spin_lock_irqsave(&freq_lock, flags);
++ /* Change DDR freq in IRAM. */
++ update_lpddr2_freq(ddr_normal_rate);
++ spin_unlock_irqrestore(&freq_lock, flags);
++
++ /*
++ * Fix the clock tree in kernel.
++ * Make sure PLL2 rate is updated as it gets
++ * un-bypassed in the DDR freq change code.
++ */
++ clk_set_rate(pll2, pll2_org_rate);
++ clk_set_parent(periph2_pre_clk, pll2_400);
++ clk_set_parent(periph2_clk, periph2_pre_clk);
++
++ /* Ensure that periph_clk is sourced from PLL2_400. */
++ clk_set_parent(periph_pre_clk, pll2_400);
++ /*
++ * Before switching the perhiph_clk, ensure that the
++ * AHB/AXI will not be too fast.
++ */
++ clk_set_rate(ahb_clk, LPAPM_CLK / 3);
++ clk_set_rate(ocram_clk, LPAPM_CLK / 2);
++ clk_set_parent(periph_clk, periph_pre_clk);
++
++ if (low_bus_freq_mode || ultra_low_bus_freq_mode) {
++ /* Move ARM from PLL1_SW_CLK to PLL2_400. */
++ clk_set_parent(step_clk, pll2_400);
++ clk_set_parent(pll1_sw_clk, step_clk);
++ clk_set_rate(cpu_clk, org_arm_rate);
++ ultra_low_bus_freq_mode = 0;
++ }
++}
++
++int reduce_bus_freq(void)
++{
++ int ret = 0;
++ clk_prepare_enable(pll3);
++ if (cpu_is_imx6sl())
++ enter_lpm_imx6sl();
++ else {
++ if (cpu_is_imx6dl() && (clk_get_parent(axi_sel_clk)
++ != periph_clk))
++ /* Set axi to periph_clk */
++ clk_set_parent(axi_sel_clk, periph_clk);
++
++ if (audio_bus_count) {
++ /* Need to ensure that PLL2_PFD_400M is kept ON. */
++ clk_prepare_enable(pll2_400);
++ update_ddr_freq(DDR3_AUDIO_CLK);
++ /* Make sure periph clk's parent also got updated */
++ ret = clk_set_parent(periph_clk2_sel, pll3);
++ if (ret)
++ dev_WARN(busfreq_dev,
++ "%s: %d: clk set parent fail!\n",
++ __func__, __LINE__);
++ ret = clk_set_parent(periph_pre_clk, pll2_200);
++ if (ret)
++ dev_WARN(busfreq_dev,
++ "%s: %d: clk set parent fail!\n",
++ __func__, __LINE__);
++ ret = clk_set_parent(periph_clk, periph_pre_clk);
++ if (ret)
++ dev_WARN(busfreq_dev,
++ "%s: %d: clk set parent fail!\n",
++ __func__, __LINE__);
++ audio_bus_freq_mode = 1;
++ low_bus_freq_mode = 0;
++ } else {
++ update_ddr_freq(LPAPM_CLK);
++ /* Make sure periph clk's parent also got updated */
++ ret = clk_set_parent(periph_clk2_sel, osc_clk);
++ if (ret)
++ dev_WARN(busfreq_dev,
++ "%s: %d: clk set parent fail!\n",
++ __func__, __LINE__);
++ /* Set periph_clk parent to OSC via periph_clk2_sel */
++ ret = clk_set_parent(periph_clk, periph_clk2);
++ if (ret)
++ dev_WARN(busfreq_dev,
++ "%s: %d: clk set parent fail!\n",
++ __func__, __LINE__);
++ if (audio_bus_freq_mode)
++ clk_disable_unprepare(pll2_400);
++ low_bus_freq_mode = 1;
++ audio_bus_freq_mode = 0;
++ }
++ }
++ clk_disable_unprepare(pll3);
++
++ med_bus_freq_mode = 0;
++ high_bus_freq_mode = 0;
++
++ if (audio_bus_freq_mode)
++ dev_dbg(busfreq_dev, "Bus freq set to audio mode. Count:\
++ high %d, med %d, audio %d\n",
++ high_bus_count, med_bus_count, audio_bus_count);
++ if (low_bus_freq_mode)
++ dev_dbg(busfreq_dev, "Bus freq set to low mode. Count:\
++ high %d, med %d, audio %d\n",
++ high_bus_count, med_bus_count, audio_bus_count);
++
++ return ret;
++}
++
++static void reduce_bus_freq_handler(struct work_struct *work)
++{
++ mutex_lock(&bus_freq_mutex);
++
++ reduce_bus_freq();
++
++ mutex_unlock(&bus_freq_mutex);
++}
++
++/*
++ * Set the DDR, AHB to 24MHz.
++ * This mode will be activated only when none of the modules that
++ * need a higher DDR or AHB frequency are active.
++ */
++int set_low_bus_freq(void)
++{
++ if (busfreq_suspended)
++ return 0;
++
++ if (!bus_freq_scaling_initialized || !bus_freq_scaling_is_active)
++ return 0;
++
++ /*
++ * Check to see if we need to got from
++ * low bus freq mode to audio bus freq mode.
++ * If so, the change needs to be done immediately.
++ */
++ if (audio_bus_count && (low_bus_freq_mode || ultra_low_bus_freq_mode))
++ reduce_bus_freq();
++ else
++ /*
++ * Don't lower the frequency immediately. Instead
++ * scheduled a delayed work and drop the freq if
++ * the conditions still remain the same.
++ */
++ schedule_delayed_work(&low_bus_freq_handler,
++ usecs_to_jiffies(3000000));
++ return 0;
++}
++
++/*
++ * Set the DDR to either 528MHz or 400MHz for iMX6qd
++ * or 400MHz for iMX6dl.
++ */
++int set_high_bus_freq(int high_bus_freq)
++{
++ int ret = 0;
++ struct clk *periph_clk_parent;
++
++ if (bus_freq_scaling_initialized && bus_freq_scaling_is_active)
++ cancel_delayed_work_sync(&low_bus_freq_handler);
++
++ if (busfreq_suspended)
++ return 0;
++
++ if (cpu_is_imx6q())
++ periph_clk_parent = pll2;
++ else
++ periph_clk_parent = pll2_400;
++
++ if (!bus_freq_scaling_initialized || !bus_freq_scaling_is_active)
++ return 0;
++
++ if (high_bus_freq_mode)
++ return 0;
++
++ /* medium bus freq is only supported for MX6DQ */
++ if (med_bus_freq_mode && !high_bus_freq)
++ return 0;
++
++ clk_prepare_enable(pll3);
++ if (cpu_is_imx6sl())
++ exit_lpm_imx6sl();
++ else {
++ if (high_bus_freq) {
++ update_ddr_freq(ddr_normal_rate);
++ /* Make sure periph clk's parent also got updated */
++ ret = clk_set_parent(periph_clk2_sel, pll3);
++ if (ret)
++ dev_WARN(busfreq_dev,
++ "%s: %d: clk set parent fail!\n",
++ __func__, __LINE__);
++ ret = clk_set_parent(periph_pre_clk, periph_clk_parent);
++ if (ret)
++ dev_WARN(busfreq_dev,
++ "%s: %d: clk set parent fail!\n",
++ __func__, __LINE__);
++ ret = clk_set_parent(periph_clk, periph_pre_clk);
++ if (ret)
++ dev_WARN(busfreq_dev,
++ "%s: %d: clk set parent fail!\n",
++ __func__, __LINE__);
++ if (cpu_is_imx6dl() && (clk_get_parent(axi_sel_clk)
++ != pll3_pfd1_540m))
++ /* Set axi to pll3_pfd1_540m */
++ clk_set_parent(axi_sel_clk, pll3_pfd1_540m);
++ } else {
++ update_ddr_freq(ddr_med_rate);
++ /* Make sure periph clk's parent also got updated */
++ ret = clk_set_parent(periph_clk2_sel, pll3);
++ if (ret)
++ dev_WARN(busfreq_dev,
++ "%s: %d: clk set parent fail!\n",
++ __func__, __LINE__);
++ ret = clk_set_parent(periph_pre_clk, pll2_400);
++ if (ret)
++ dev_WARN(busfreq_dev,
++ "%s: %d: clk set parent fail!\n",
++ __func__, __LINE__);
++ ret = clk_set_parent(periph_clk, periph_pre_clk);
++ if (ret)
++ dev_WARN(busfreq_dev,
++ "%s: %d: clk set parent fail!\n",
++ __func__, __LINE__);
++ }
++ if (audio_bus_freq_mode)
++ clk_disable_unprepare(pll2_400);
++ }
++
++ high_bus_freq_mode = 1;
++ med_bus_freq_mode = 0;
++ low_bus_freq_mode = 0;
++ audio_bus_freq_mode = 0;
++
++ clk_disable_unprepare(pll3);
++
++ if (high_bus_freq_mode)
++ dev_dbg(busfreq_dev, "Bus freq set to high mode. Count:\
++ high %d, med %d, audio %d\n",
++ high_bus_count, med_bus_count, audio_bus_count);
++ if (med_bus_freq_mode)
++ dev_dbg(busfreq_dev, "Bus freq set to med mode. Count:\
++ high %d, med %d, audio %d\n",
++ high_bus_count, med_bus_count, audio_bus_count);
++
++ return 0;
++}
++#endif
++
++void request_bus_freq(enum bus_freq_mode mode)
++{
++#ifdef CONFIG_ARM_IMX6_CPUFREQ
++ mutex_lock(&bus_freq_mutex);
++
++ if (mode == BUS_FREQ_HIGH)
++ high_bus_count++;
++ else if (mode == BUS_FREQ_MED)
++ med_bus_count++;
++ else if (mode == BUS_FREQ_AUDIO)
++ audio_bus_count++;
++ else if (mode == BUS_FREQ_LOW)
++ low_bus_count++;
++
++ if (busfreq_suspended || !bus_freq_scaling_initialized ||
++ !bus_freq_scaling_is_active) {
++ mutex_unlock(&bus_freq_mutex);
++ return;
++ }
++ cancel_delayed_work_sync(&low_bus_freq_handler);
++
++ if (cpu_is_imx6dl()) {
++ /* No support for medium setpoint on MX6DL. */
++ if (mode == BUS_FREQ_MED) {
++ high_bus_count++;
++ mode = BUS_FREQ_HIGH;
++ }
++ }
++
++ if ((mode == BUS_FREQ_HIGH) && (!high_bus_freq_mode)) {
++ set_high_bus_freq(1);
++ mutex_unlock(&bus_freq_mutex);
++ return;
++ }
++
++ if ((mode == BUS_FREQ_MED) && (!high_bus_freq_mode) &&
++ (!med_bus_freq_mode)) {
++ set_high_bus_freq(0);
++ mutex_unlock(&bus_freq_mutex);
++ return;
++ }
++ if ((mode == BUS_FREQ_AUDIO) && (!high_bus_freq_mode) &&
++ (!med_bus_freq_mode) && (!audio_bus_freq_mode)) {
++ set_low_bus_freq();
++ mutex_unlock(&bus_freq_mutex);
++ return;
++ }
++ mutex_unlock(&bus_freq_mutex);
++#endif
++ return;
++}
++EXPORT_SYMBOL(request_bus_freq);
++
++void release_bus_freq(enum bus_freq_mode mode)
++{
++#ifdef CONFIG_ARM_IMX6_CPUFREQ
++ mutex_lock(&bus_freq_mutex);
++
++ if (mode == BUS_FREQ_HIGH) {
++ if (high_bus_count == 0) {
++ dev_err(busfreq_dev, "high bus count mismatch!\n");
++ dump_stack();
++ mutex_unlock(&bus_freq_mutex);
++ return;
++ }
++ high_bus_count--;
++ } else if (mode == BUS_FREQ_MED) {
++ if (med_bus_count == 0) {
++ dev_err(busfreq_dev, "med bus count mismatch!\n");
++ dump_stack();
++ mutex_unlock(&bus_freq_mutex);
++ return;
++ }
++ med_bus_count--;
++ } else if (mode == BUS_FREQ_AUDIO) {
++ if (audio_bus_count == 0) {
++ dev_err(busfreq_dev, "audio bus count mismatch!\n");
++ dump_stack();
++ mutex_unlock(&bus_freq_mutex);
++ return;
++ }
++ audio_bus_count--;
++ } else if (mode == BUS_FREQ_LOW) {
++ if (low_bus_count == 0) {
++ dev_err(busfreq_dev, "low bus count mismatch!\n");
++ dump_stack();
++ mutex_unlock(&bus_freq_mutex);
++ return;
++ }
++ low_bus_count--;
++ }
++
++ if (busfreq_suspended || !bus_freq_scaling_initialized ||
++ !bus_freq_scaling_is_active) {
++ mutex_unlock(&bus_freq_mutex);
++ return;
++ }
++
++ if (cpu_is_imx6dl()) {
++ /* No support for medium setpoint on MX6DL. */
++ if (mode == BUS_FREQ_MED) {
++ high_bus_count--;
++ mode = BUS_FREQ_HIGH;
++ }
++ }
++
++ if ((!audio_bus_freq_mode) && (high_bus_count == 0) &&
++ (med_bus_count == 0) && (audio_bus_count != 0)) {
++ set_low_bus_freq();
++ mutex_unlock(&bus_freq_mutex);
++ return;
++ }
++ if ((!low_bus_freq_mode) && (high_bus_count == 0) &&
++ (med_bus_count == 0) && (audio_bus_count == 0) &&
++ (low_bus_count != 0)) {
++ set_low_bus_freq();
++ mutex_unlock(&bus_freq_mutex);
++ return;
++ }
++ if ((!ultra_low_bus_freq_mode) && (high_bus_count == 0) &&
++ (med_bus_count == 0) && (audio_bus_count == 0) &&
++ (low_bus_count == 0)) {
++ set_low_bus_freq();
++ mutex_unlock(&bus_freq_mutex);
++ return;
++ }
++
++ mutex_unlock(&bus_freq_mutex);
++#endif
++ return;
++}
++EXPORT_SYMBOL(release_bus_freq);
++
++#ifdef CONFIG_ARM_IMX6_CPUFREQ
++static void bus_freq_daemon_handler(struct work_struct *work)
++{
++ mutex_lock(&bus_freq_mutex);
++ if ((!low_bus_freq_mode) && (high_bus_count == 0) &&
++ (med_bus_count == 0) && (audio_bus_count == 0))
++ set_low_bus_freq();
++ mutex_unlock(&bus_freq_mutex);
++}
++
++static ssize_t bus_freq_scaling_enable_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ if (bus_freq_scaling_is_active)
++ return sprintf(buf, "Bus frequency scaling is enabled\n");
++ else
++ return sprintf(buf, "Bus frequency scaling is disabled\n");
++}
++
++static ssize_t bus_freq_scaling_enable_store(struct device *dev,
++ struct device_attribute *attr,
++ const char *buf, size_t size)
++{
++ if (strncmp(buf, "1", 1) == 0) {
++ bus_freq_scaling_is_active = 1;
++ set_high_bus_freq(1);
++ /*
++ * We set bus freq to highest at the beginning,
++ * so we use this daemon thread to make sure system
++ * can enter low bus mode if
++ * there is no high bus request pending
++ */
++ schedule_delayed_work(&bus_freq_daemon,
++ usecs_to_jiffies(5000000));
++ } else if (strncmp(buf, "0", 1) == 0) {
++ if (bus_freq_scaling_is_active)
++ set_high_bus_freq(1);
++ bus_freq_scaling_is_active = 0;
++ }
++ return size;
++}
++
++static int bus_freq_pm_notify(struct notifier_block *nb, unsigned long event,
++ void *dummy)
++{
++ mutex_lock(&bus_freq_mutex);
++
++ if (event == PM_SUSPEND_PREPARE) {
++ high_bus_count++;
++ set_high_bus_freq(1);
++ busfreq_suspended = 1;
++ } else if (event == PM_POST_SUSPEND) {
++ busfreq_suspended = 0;
++ high_bus_count--;
++ schedule_delayed_work(&bus_freq_daemon,
++ usecs_to_jiffies(5000000));
++ }
++
++ mutex_unlock(&bus_freq_mutex);
++
++ return NOTIFY_OK;
++}
++
++static int busfreq_reboot_notifier_event(struct notifier_block *this,
++ unsigned long event, void *ptr)
++{
++ /* System is rebooting. Set the system into high_bus_freq_mode. */
++ request_bus_freq(BUS_FREQ_HIGH);
++
++ return 0;
++}
++
++static struct notifier_block imx_bus_freq_pm_notifier = {
++ .notifier_call = bus_freq_pm_notify,
++};
++
++static struct notifier_block imx_busfreq_reboot_notifier = {
++ .notifier_call = busfreq_reboot_notifier_event,
++};
++
++
++static DEVICE_ATTR(enable, 0644, bus_freq_scaling_enable_show,
++ bus_freq_scaling_enable_store);
++#endif
++
++/*!
++ * This is the probe routine for the bus frequency driver.
++ *
++ * @param pdev The platform device structure
++ *
++ * @return The function returns 0 on success
++ *
++ */
++
++static int busfreq_probe(struct platform_device *pdev)
++{
++#ifdef CONFIG_ARM_IMX6_CPUFREQ
++ u32 err;
++
++ busfreq_dev = &pdev->dev;
++
++ pll2_400 = devm_clk_get(&pdev->dev, "pll2_pfd2_396m");
++ if (IS_ERR(pll2_400)) {
++ dev_err(busfreq_dev, "%s: failed to get pll2_pfd2_396m\n",
++ __func__);
++ return PTR_ERR(pll2_400);
++ }
++
++ pll2_200 = devm_clk_get(&pdev->dev, "pll2_198m");
++ if (IS_ERR(pll2_200)) {
++ dev_err(busfreq_dev, "%s: failed to get pll2_198m\n",
++ __func__);
++ return PTR_ERR(pll2_200);
++ }
++
++ pll2 = devm_clk_get(&pdev->dev, "pll2_bus");
++ if (IS_ERR(pll2)) {
++ dev_err(busfreq_dev, "%s: failed to get pll2_bus\n",
++ __func__);
++ return PTR_ERR(pll2);
++ }
++
++ cpu_clk = devm_clk_get(&pdev->dev, "arm");
++ if (IS_ERR(cpu_clk)) {
++ dev_err(busfreq_dev, "%s: failed to get cpu_clk\n",
++ __func__);
++ return PTR_ERR(cpu_clk);
++ }
++
++ pll3 = devm_clk_get(&pdev->dev, "pll3_usb_otg");
++ if (IS_ERR(pll3)) {
++ dev_err(busfreq_dev, "%s: failed to get pll3_usb_otg\n",
++ __func__);
++ return PTR_ERR(pll3);
++ }
++
++ periph_clk = devm_clk_get(&pdev->dev, "periph");
++ if (IS_ERR(periph_clk)) {
++ dev_err(busfreq_dev, "%s: failed to get periph\n",
++ __func__);
++ return PTR_ERR(periph_clk);
++ }
++
++ periph_pre_clk = devm_clk_get(&pdev->dev, "periph_pre");
++ if (IS_ERR(periph_pre_clk)) {
++ dev_err(busfreq_dev, "%s: failed to get periph_pre\n",
++ __func__);
++ return PTR_ERR(periph_pre_clk);
++ }
++
++ periph_clk2 = devm_clk_get(&pdev->dev, "periph_clk2");
++ if (IS_ERR(periph_clk2)) {
++ dev_err(busfreq_dev, "%s: failed to get periph_clk2\n",
++ __func__);
++ return PTR_ERR(periph_clk2);
++ }
++
++ periph_clk2_sel = devm_clk_get(&pdev->dev, "periph_clk2_sel");
++ if (IS_ERR(periph_clk2_sel)) {
++ dev_err(busfreq_dev, "%s: failed to get periph_clk2_sel\n",
++ __func__);
++ return PTR_ERR(periph_clk2_sel);
++ }
++
++ osc_clk = devm_clk_get(&pdev->dev, "osc");
++ if (IS_ERR(osc_clk)) {
++ dev_err(busfreq_dev, "%s: failed to get osc_clk\n",
++ __func__);
++ return PTR_ERR(osc_clk);
++ }
++
++ if (cpu_is_imx6dl()) {
++ axi_sel_clk = devm_clk_get(&pdev->dev, "axi_sel");
++ if (IS_ERR(axi_sel_clk)) {
++ dev_err(busfreq_dev, "%s: failed to get axi_sel_clk\n",
++ __func__);
++ return PTR_ERR(axi_sel_clk);
++ }
++
++ pll3_pfd1_540m = devm_clk_get(&pdev->dev, "pll3_pfd1_540m");
++ if (IS_ERR(pll3_pfd1_540m)) {
++ dev_err(busfreq_dev,
++ "%s: failed to get pll3_pfd1_540m\n", __func__);
++ return PTR_ERR(pll3_pfd1_540m);
++ }
++ }
++
++ if (cpu_is_imx6sl()) {
++ pll1_sys = devm_clk_get(&pdev->dev, "pll1_sys");
++ if (IS_ERR(pll1_sys)) {
++ dev_err(busfreq_dev, "%s: failed to get pll1_sys\n",
++ __func__);
++ return PTR_ERR(pll1_sys);
++ }
++
++ ahb_clk = devm_clk_get(&pdev->dev, "ahb");
++ if (IS_ERR(ahb_clk)) {
++ dev_err(busfreq_dev, "%s: failed to get ahb_clk\n",
++ __func__);
++ return PTR_ERR(ahb_clk);
++ }
++
++ ocram_clk = devm_clk_get(&pdev->dev, "ocram");
++ if (IS_ERR(ocram_clk)) {
++ dev_err(busfreq_dev, "%s: failed to get ocram_clk\n",
++ __func__);
++ return PTR_ERR(ocram_clk);
++ }
++
++ pll1_sw_clk = devm_clk_get(&pdev->dev, "pll1_sw");
++ if (IS_ERR(pll1_sw_clk)) {
++ dev_err(busfreq_dev, "%s: failed to get pll1_sw_clk\n",
++ __func__);
++ return PTR_ERR(pll1_sw_clk);
++ }
++
++ periph2_clk = devm_clk_get(&pdev->dev, "periph2");
++ if (IS_ERR(periph2_clk)) {
++ dev_err(busfreq_dev, "%s: failed to get periph2\n",
++ __func__);
++ return PTR_ERR(periph2_clk);
++ }
++
++ periph2_pre_clk = devm_clk_get(&pdev->dev, "periph2_pre");
++ if (IS_ERR(periph2_pre_clk)) {
++ dev_err(busfreq_dev,
++ "%s: failed to get periph2_pre_clk\n",
++ __func__);
++ return PTR_ERR(periph2_pre_clk);
++ }
++
++ periph2_clk2 = devm_clk_get(&pdev->dev, "periph2_clk2");
++ if (IS_ERR(periph2_clk2)) {
++ dev_err(busfreq_dev,
++ "%s: failed to get periph2_clk2\n",
++ __func__);
++ return PTR_ERR(periph2_clk2);
++ }
++
++ periph2_clk2_sel = devm_clk_get(&pdev->dev, "periph2_clk2_sel");
++ if (IS_ERR(periph2_clk2_sel)) {
++ dev_err(busfreq_dev,
++ "%s: failed to get periph2_clk2_sel\n",
++ __func__);
++ return PTR_ERR(periph2_clk2_sel);
++ }
++
++ step_clk = devm_clk_get(&pdev->dev, "step");
++ if (IS_ERR(step_clk)) {
++ dev_err(busfreq_dev,
++ "%s: failed to get step_clk\n",
++ __func__);
++ return PTR_ERR(periph2_clk2_sel);
++ }
++
++ }
++
++ err = sysfs_create_file(&busfreq_dev->kobj, &dev_attr_enable.attr);
++ if (err) {
++ dev_err(busfreq_dev,
++ "Unable to register sysdev entry for BUSFREQ");
++ return err;
++ }
++
++ if (of_property_read_u32(pdev->dev.of_node, "fsl,max_ddr_freq",
++ &ddr_normal_rate)) {
++ dev_err(busfreq_dev, "max_ddr_freq entry missing\n");
++ return -EINVAL;
++ }
++#endif
++
++ high_bus_freq_mode = 1;
++ med_bus_freq_mode = 0;
++ low_bus_freq_mode = 0;
++ audio_bus_freq_mode = 0;
++ ultra_low_bus_freq_mode = 0;
++
++#ifdef CONFIG_ARM_IMX6_CPUFREQ
++ bus_freq_scaling_is_active = 1;
++ bus_freq_scaling_initialized = 1;
++
++ ddr_low_rate = LPAPM_CLK;
++ if (cpu_is_imx6q()) {
++ if (of_property_read_u32(pdev->dev.of_node, "fsl,med_ddr_freq",
++ &ddr_med_rate)) {
++ dev_info(busfreq_dev,
++ "DDR medium rate not supported.\n");
++ ddr_med_rate = ddr_normal_rate;
++ }
++ }
++
++ INIT_DELAYED_WORK(&low_bus_freq_handler, reduce_bus_freq_handler);
++ INIT_DELAYED_WORK(&bus_freq_daemon, bus_freq_daemon_handler);
++ register_pm_notifier(&imx_bus_freq_pm_notifier);
++ register_reboot_notifier(&imx_busfreq_reboot_notifier);
++
++ if (cpu_is_imx6sl())
++ err = init_mmdc_lpddr2_settings(pdev);
++ else
++ err = init_mmdc_ddr3_settings(pdev);
++ if (err) {
++ dev_err(busfreq_dev, "Busfreq init of MMDC failed\n");
++ return err;
++ }
++#endif
++ return 0;
++}
++
++static const struct of_device_id imx6_busfreq_ids[] = {
++ { .compatible = "fsl,imx6_busfreq", },
++ { /* sentinel */ }
++};
++
++static struct platform_driver busfreq_driver = {
++ .driver = {
++ .name = "imx6_busfreq",
++ .owner = THIS_MODULE,
++ .of_match_table = imx6_busfreq_ids,
++ },
++ .probe = busfreq_probe,
++};
++
++/*!
++ * Initialise the busfreq_driver.
++ *
++ * @return The function always returns 0.
++ */
++
++static int __init busfreq_init(void)
++{
++#ifndef CONFIG_MX6_VPU_352M
++ if (platform_driver_register(&busfreq_driver) != 0)
++ return -ENODEV;
++
++ printk(KERN_INFO "Bus freq driver module loaded\n");
++#endif
++ return 0;
++}
++
++static void __exit busfreq_cleanup(void)
++{
++#ifdef CONFIG_ARM_IMX6_CPUFREQ
++ sysfs_remove_file(&busfreq_dev->kobj, &dev_attr_enable.attr);
++
++ bus_freq_scaling_initialized = 0;
++#endif
++ /* Unregister the device structure */
++ platform_driver_unregister(&busfreq_driver);
++}
++
++module_init(busfreq_init);
++module_exit(busfreq_cleanup);
++
++MODULE_AUTHOR("Freescale Semiconductor, Inc.");
++MODULE_DESCRIPTION("BusFreq driver");
++MODULE_LICENSE("GPL");
+diff -Nur linux-3.14.36/arch/arm/mach-imx/busfreq_lpddr2.c linux-openelec/arch/arm/mach-imx/busfreq_lpddr2.c
+--- linux-3.14.36/arch/arm/mach-imx/busfreq_lpddr2.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/arch/arm/mach-imx/busfreq_lpddr2.c 2015-05-06 12:05:43.000000000 -0500
+@@ -0,0 +1,183 @@
++/*
++ * Copyright (C) 2011-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/*!
++ * @file busfreq_lpddr2.c
++ *
++ * @brief iMX6 LPDDR2 frequency change specific file.
++ *
++ * @ingroup PM
++ */
++#include <asm/cacheflush.h>
++#include <asm/fncpy.h>
++#include <asm/io.h>
++#include <asm/mach/map.h>
++#include <asm/mach-types.h>
++#include <asm/tlb.h>
++#include <linux/clk.h>
++#include <linux/cpumask.h>
++#include <linux/delay.h>
++#include <linux/genalloc.h>
++#include <linux/interrupt.h>
++#include <linux/irqchip/arm-gic.h>
++#include <linux/kernel.h>
++#include <linux/mutex.h>
++#include <linux/of.h>
++#include <linux/of_address.h>
++#include <linux/of_device.h>
++#include <linux/platform_device.h>
++#include <linux/proc_fs.h>
++#include <linux/sched.h>
++#include <linux/smp.h>
++
++#include "hardware.h"
++
++/* DDR settings */
++static void __iomem *mmdc_base;
++static void __iomem *anatop_base;
++static void __iomem *ccm_base;
++static void __iomem *l2_base;
++static struct device *busfreq_dev;
++static void *ddr_freq_change_iram_base;
++static int curr_ddr_rate;
++
++unsigned long reg_addrs[4];
++
++void (*mx6_change_lpddr2_freq)(u32 ddr_freq, int bus_freq_mode,
++ void *iram_addr) = NULL;
++
++extern unsigned int ddr_normal_rate;
++extern int low_bus_freq_mode;
++extern int ultra_low_bus_freq_mode;
++extern void mx6_lpddr2_freq_change(u32 freq, int bus_freq_mode,
++ void *iram_addr);
++
++
++#define LPDDR2_FREQ_CHANGE_SIZE 0x1000
++
++
++/* change the DDR frequency. */
++int update_lpddr2_freq(int ddr_rate)
++{
++ if (ddr_rate == curr_ddr_rate)
++ return 0;
++
++ dev_dbg(busfreq_dev, "\nBus freq set to %d start...\n", ddr_rate);
++
++ /*
++ * Flush the TLB, to ensure no TLB maintenance occurs
++ * when DDR is in self-refresh.
++ */
++ local_flush_tlb_all();
++ /* Now change DDR frequency. */
++ mx6_change_lpddr2_freq(ddr_rate,
++ (low_bus_freq_mode | ultra_low_bus_freq_mode),
++ reg_addrs);
++
++ curr_ddr_rate = ddr_rate;
++
++ dev_dbg(busfreq_dev, "\nBus freq set to %d done...\n", ddr_rate);
++
++ return 0;
++}
++
++int init_mmdc_lpddr2_settings(struct platform_device *busfreq_pdev)
++{
++ struct platform_device *ocram_dev;
++ unsigned int iram_paddr;
++ struct device_node *node;
++ struct gen_pool *iram_pool;
++
++ busfreq_dev = &busfreq_pdev->dev;
++ node = of_find_compatible_node(NULL, NULL, "fsl,imx6sl-mmdc");
++ if (!node) {
++ printk(KERN_ERR "failed to find imx6sl-mmdc device tree data!\n");
++ return -EINVAL;
++ }
++ mmdc_base = of_iomap(node, 0);
++ WARN(!mmdc_base, "unable to map mmdc registers\n");
++
++ node = NULL;
++ node = of_find_compatible_node(NULL, NULL, "fsl,imx6sl-ccm");
++ if (!node) {
++ printk(KERN_ERR "failed to find imx6sl-ccm device tree data!\n");
++ return -EINVAL;
++ }
++ ccm_base = of_iomap(node, 0);
++ WARN(!ccm_base, "unable to map ccm registers\n");
++
++ node = of_find_compatible_node(NULL, NULL, "arm,pl310-cache");
++ if (!node) {
++ printk(KERN_ERR "failed to find imx6sl-pl310-cache device tree data!\n");
++ return -EINVAL;
++ }
++ l2_base = of_iomap(node, 0);
++ WARN(!l2_base, "unable to map PL310 registers\n");
++
++ node = of_find_compatible_node(NULL, NULL, "fsl,imx6sl-anatop");
++ if (!node) {
++ printk(KERN_ERR "failed to find imx6sl-pl310-cache device tree data!\n");
++ return -EINVAL;
++ }
++ anatop_base = of_iomap(node, 0);
++ WARN(!anatop_base, "unable to map anatop registers\n");
++
++ node = NULL;
++ node = of_find_compatible_node(NULL, NULL, "mmio-sram");
++ if (!node) {
++ dev_err(busfreq_dev, "%s: failed to find ocram node\n",
++ __func__);
++ return -EINVAL;
++ }
++
++ ocram_dev = of_find_device_by_node(node);
++ if (!ocram_dev) {
++ dev_err(busfreq_dev, "failed to find ocram device!\n");
++ return -EINVAL;
++ }
++
++ iram_pool = dev_get_gen_pool(&ocram_dev->dev);
++ if (!iram_pool) {
++ dev_err(busfreq_dev, "iram pool unavailable!\n");
++ return -EINVAL;
++ }
++
++ reg_addrs[0] = (unsigned long)anatop_base;
++ reg_addrs[1] = (unsigned long)ccm_base;
++ reg_addrs[2] = (unsigned long)mmdc_base;
++ reg_addrs[3] = (unsigned long)l2_base;
++
++ ddr_freq_change_iram_base = (void *)gen_pool_alloc(iram_pool,
++ LPDDR2_FREQ_CHANGE_SIZE);
++ if (!ddr_freq_change_iram_base) {
++ dev_err(busfreq_dev,
++ "Cannot alloc iram for ddr freq change code!\n");
++ return -ENOMEM;
++ }
++
++ iram_paddr = gen_pool_virt_to_phys(iram_pool,
++ (unsigned long)ddr_freq_change_iram_base);
++ /*
++ * Need to remap the area here since we want
++ * the memory region to be executable.
++ */
++ ddr_freq_change_iram_base = __arm_ioremap(iram_paddr,
++ LPDDR2_FREQ_CHANGE_SIZE,
++ MT_MEMORY_RWX_NONCACHED);
++ mx6_change_lpddr2_freq = (void *)fncpy(ddr_freq_change_iram_base,
++ &mx6_lpddr2_freq_change, LPDDR2_FREQ_CHANGE_SIZE);
++
++ curr_ddr_rate = ddr_normal_rate;
++
++ return 0;
++}
+diff -Nur linux-3.14.36/arch/arm/mach-imx/clk.h linux-openelec/arch/arm/mach-imx/clk.h
+--- linux-3.14.36/arch/arm/mach-imx/clk.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/mach-imx/clk.h 2015-05-06 12:05:43.000000000 -0500
+@@ -23,7 +23,8 @@
+ };
+
+ struct clk *imx_clk_pllv3(enum imx_pllv3_type type, const char *name,
+- const char *parent_name, void __iomem *base, u32 div_mask);
++ const char *parent_name, void __iomem *base,
++ u32 div_mask, bool always_on);
+
+ struct clk *clk_register_gate2(struct device *dev, const char *name,
+ const char *parent_name, unsigned long flags,
+diff -Nur linux-3.14.36/arch/arm/mach-imx/clk-imx6q.c linux-openelec/arch/arm/mach-imx/clk-imx6q.c
+--- linux-3.14.36/arch/arm/mach-imx/clk-imx6q.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/mach-imx/clk-imx6q.c 2015-07-24 18:03:30.408842002 -0500
+@@ -1,5 +1,5 @@
+ /*
+- * Copyright 2011-2013 Freescale Semiconductor, Inc.
++ * Copyright 2011-2014 Freescale Semiconductor, Inc.
+ * Copyright 2011 Linaro Ltd.
+ *
+ * The code contained herein is licensed under the GNU General Public
+@@ -24,6 +24,8 @@
+ #include "common.h"
+ #include "hardware.h"
+
++#define CCM_CCGR_OFFSET(index) (index * 2)
++
+ static const char *step_sels[] = { "osc", "pll2_pfd2_396m", };
+ static const char *pll1_sw_sels[] = { "pll1_sys", "step", };
+ static const char *periph_pre_sels[] = { "pll2_bus", "pll2_pfd2_396m", "pll2_pfd0_352m", "pll2_198m", };
+@@ -39,6 +41,8 @@
+ static const char *gpu3d_shader_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll2_pfd1_594m", "pll3_pfd0_720m", };
+ static const char *ipu_sels[] = { "mmdc_ch0_axi", "pll2_pfd2_396m", "pll3_120m", "pll3_pfd1_540m", };
+ static const char *ldb_di_sels[] = { "pll5_video_div", "pll2_pfd0_352m", "pll2_pfd2_396m", "mmdc_ch1_axi", "pll3_usb_otg", };
++static const char *ldb_di0_div_sels[] = { "ldb_di0_div_3_5", "ldb_di0_div_7", };
++static const char *ldb_di1_div_sels[] = { "ldb_di1_div_3_5", "ldb_di1_div_7", };
+ static const char *ipu_di_pre_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll5_video_div", "pll2_pfd0_352m", "pll2_pfd2_396m", "pll3_pfd1_540m", };
+ static const char *ipu1_di0_sels[] = { "ipu1_di0_pre", "dummy", "dummy", "ldb_di0", "ldb_di1", };
+ static const char *ipu1_di1_sels[] = { "ipu1_di1_pre", "dummy", "dummy", "ldb_di0", "ldb_di1", };
+@@ -72,6 +76,10 @@
+ "pll4_audio", "pll5_video", "pll8_mlb", "enet_ref",
+ "pcie_ref", "sata_ref",
+ };
++static const char *pll_av_sels[] = { "osc", "lvds1_in", "lvds2_in", "dummy", };
++static void __iomem *anatop_base;
++static void __iomem *ccm_base;
++
+
+ enum mx6q_clks {
+ dummy, ckil, ckih, osc, pll2_pfd0_352m, pll2_pfd1_594m, pll2_pfd2_396m,
+@@ -88,11 +96,11 @@
+ periph_clk2, periph2_clk2, ipg, ipg_per, esai_pred, esai_podf,
+ asrc_pred, asrc_podf, spdif_pred, spdif_podf, can_root, ecspi_root,
+ gpu2d_core_podf, gpu3d_core_podf, gpu3d_shader, ipu1_podf, ipu2_podf,
+- ldb_di0_podf, ldb_di1_podf, ipu1_di0_pre, ipu1_di1_pre, ipu2_di0_pre,
+- ipu2_di1_pre, hsi_tx_podf, ssi1_pred, ssi1_podf, ssi2_pred, ssi2_podf,
+- ssi3_pred, ssi3_podf, uart_serial_podf, usdhc1_podf, usdhc2_podf,
+- usdhc3_podf, usdhc4_podf, enfc_pred, enfc_podf, emi_podf,
+- emi_slow_podf, vpu_axi_podf, cko1_podf, axi, mmdc_ch0_axi_podf,
++ ldb_di0_podf_unused, ldb_di1_podf_unused, ipu1_di0_pre, ipu1_di1_pre,
++ ipu2_di0_pre, ipu2_di1_pre, hsi_tx_podf, ssi1_pred, ssi1_podf,
++ ssi2_pred, ssi2_podf, ssi3_pred, ssi3_podf, uart_serial_podf,
++ usdhc1_podf, usdhc2_podf, usdhc3_podf, usdhc4_podf, enfc_pred, enfc_podf,
++ emi_podf, emi_slow_podf, vpu_axi_podf, cko1_podf, axi, mmdc_ch0_axi_podf,
+ mmdc_ch1_axi_podf, arm, ahb, apbh_dma, asrc, can1_ipg, can1_serial,
+ can2_ipg, can2_serial, ecspi1, ecspi2, ecspi3, ecspi4, ecspi5, enet,
+ esai, gpt_ipg, gpt_ipg_per, gpu2d_core, gpu3d_core, hdmi_iahb,
+@@ -107,7 +115,10 @@
+ sata_ref, sata_ref_100m, pcie_ref, pcie_ref_125m, enet_ref, usbphy1_gate,
+ usbphy2_gate, pll4_post_div, pll5_post_div, pll5_video_div, eim_slow,
+ spdif, cko2_sel, cko2_podf, cko2, cko, vdoa, pll4_audio_div,
+- lvds1_sel, lvds2_sel, lvds1_gate, lvds2_gate, clk_max
++ lvds1_sel, lvds2_sel, lvds1_gate, lvds2_gate, gpt_3m, video_27m,
++ ldb_di0_div_7, ldb_di1_div_7, ldb_di0_div_sel, ldb_di1_div_sel,
++ caam_mem, caam_aclk, caam_ipg, epit1, epit2, tzasc2, lvds1_in, lvds1_out,
++ pll4_sel, lvds2_in, lvds2_out, anaclk1, anaclk2, clk_max
+ };
+
+ static struct clk *clk[clk_max];
+@@ -140,20 +151,131 @@
+ { /* sentinel */ }
+ };
+
++static void init_ldb_clks(enum mx6q_clks new_parent)
++{
++ u32 reg;
++
++ /*
++ * Need to follow a strict procedure when changing the LDB
++ * clock, else we can introduce a glitch. Things to keep in
++ * mind:
++ * 1. The current and new parent clocks must be disabled.
++ * 2. The default clock for ldb_dio_clk is mmdc_ch1 which has
++ * no CG bit.
++ * 3. In the RTL implementation of the LDB_DI_CLK_SEL mux
++ * the top four options are in one mux and the PLL3 option along
++ * with another option is in the second mux. There is third mux
++ * used to decide between the first and second mux.
++ * The code below switches the parent to the bottom mux first
++ * and then manipulates the top mux. This ensures that no glitch
++ * will enter the divider.
++ *
++ * Need to disable MMDC_CH1 clock manually as there is no CG bit
++ * for this clock. The only way to disable this clock is to move
++ * it topll3_sw_clk and then to disable pll3_sw_clk
++ * Make sure periph2_clk2_sel is set to pll3_sw_clk
++ */
++ reg = readl_relaxed(ccm_base + 0x18);
++ reg &= ~(1 << 20);
++ writel_relaxed(reg, ccm_base + 0x18);
++
++ /*
++ * Set MMDC_CH1 mask bit.
++ */
++ reg = readl_relaxed(ccm_base + 0x4);
++ reg |= 1 << 16;
++ writel_relaxed(reg, ccm_base + 0x4);
++
++ /*
++ * Set the periph2_clk_sel to the top mux so that
++ * mmdc_ch1 is from pll3_sw_clk.
++ */
++ reg = readl_relaxed(ccm_base + 0x14);
++ reg |= 1 << 26;
++ writel_relaxed(reg, ccm_base + 0x14);
++
++ /*
++ * Wait for the clock switch.
++ */
++ while (readl_relaxed(ccm_base + 0x48))
++ ;
++
++ /*
++ * Disable pll3_sw_clk by selecting the bypass clock source.
++ */
++ reg = readl_relaxed(ccm_base + 0xc);
++ reg |= 1 << 0;
++ writel_relaxed(reg, ccm_base + 0xc);
++
++ /*
++ * Set the ldb_di0_clk and ldb_di1_clk to 111b.
++ */
++ reg = readl_relaxed(ccm_base + 0x2c);
++ reg |= ((7 << 9) | (7 << 12));
++ writel_relaxed(reg, ccm_base + 0x2c);
++
++ /*
++ * Set the ldb_di0_clk and ldb_di1_clk to 100b.
++ */
++ reg = readl_relaxed(ccm_base + 0x2c);
++ reg &= ~((7 << 9) | (7 << 12));
++ reg |= ((4 << 9) | (4 << 12));
++ writel_relaxed(reg, ccm_base + 0x2c);
++
++ /*
++ * Perform the LDB parent clock switch.
++ */
++ clk_set_parent(clk[ldb_di0_sel], clk[new_parent]);
++ clk_set_parent(clk[ldb_di1_sel], clk[new_parent]);
++
++ /*
++ * Unbypass pll3_sw_clk.
++ */
++ reg = readl_relaxed(ccm_base + 0xc);
++ reg &= ~(1 << 0);
++ writel_relaxed(reg, ccm_base + 0xc);
++
++ /*
++ * Set the periph2_clk_sel back to the bottom mux so that
++ * mmdc_ch1 is from its original parent.
++ */
++ reg = readl_relaxed(ccm_base + 0x14);
++ reg &= ~(1 << 26);
++ writel_relaxed(reg, ccm_base + 0x14);
++
++ /*
++ * Wait for the clock switch.
++ */
++ while (readl_relaxed(ccm_base + 0x48))
++ ;
++
++ /*
++ * Clear MMDC_CH1 mask bit.
++ */
++ reg = readl_relaxed(ccm_base + 0x4);
++ reg &= ~(1 << 16);
++ writel_relaxed(reg, ccm_base + 0x4);
++
++}
++
+ static void __init imx6q_clocks_init(struct device_node *ccm_node)
+ {
+ struct device_node *np;
+ void __iomem *base;
+ int i, irq;
+ int ret;
++ u32 reg;
+
+ clk[dummy] = imx_clk_fixed("dummy", 0);
+ clk[ckil] = imx_obtain_fixed_clock("ckil", 0);
+ clk[ckih] = imx_obtain_fixed_clock("ckih1", 0);
+ clk[osc] = imx_obtain_fixed_clock("osc", 0);
++ /* Clock source from external clock via ANACLK1/2 PADs */
++ clk[anaclk1] = imx_obtain_fixed_clock("anaclk1", 0);
++ clk[anaclk2] = imx_obtain_fixed_clock("anaclk2", 0);
+
+ np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-anatop");
+- base = of_iomap(np, 0);
++ anatop_base = base = of_iomap(np, 0);
+ WARN_ON(!base);
+
+ /* Audio/video PLL post dividers do not work on i.MX6q revision 1.0 */
+@@ -165,13 +287,18 @@
+ }
+
+ /* type name parent_name base div_mask */
+- clk[pll1_sys] = imx_clk_pllv3(IMX_PLLV3_SYS, "pll1_sys", "osc", base, 0x7f);
+- clk[pll2_bus] = imx_clk_pllv3(IMX_PLLV3_GENERIC, "pll2_bus", "osc", base + 0x30, 0x1);
+- clk[pll3_usb_otg] = imx_clk_pllv3(IMX_PLLV3_USB, "pll3_usb_otg", "osc", base + 0x10, 0x3);
+- clk[pll4_audio] = imx_clk_pllv3(IMX_PLLV3_AV, "pll4_audio", "osc", base + 0x70, 0x7f);
+- clk[pll5_video] = imx_clk_pllv3(IMX_PLLV3_AV, "pll5_video", "osc", base + 0xa0, 0x7f);
+- clk[pll6_enet] = imx_clk_pllv3(IMX_PLLV3_ENET, "pll6_enet", "osc", base + 0xe0, 0x3);
+- clk[pll7_usb_host] = imx_clk_pllv3(IMX_PLLV3_USB, "pll7_usb_host","osc", base + 0x20, 0x3);
++ clk[pll1_sys] = imx_clk_pllv3(IMX_PLLV3_SYS, "pll1_sys", "osc", base, 0x7f, false);
++ clk[pll2_bus] = imx_clk_pllv3(IMX_PLLV3_GENERIC, "pll2_bus", "osc", base + 0x30, 0x1, false);
++ clk[pll3_usb_otg] = imx_clk_pllv3(IMX_PLLV3_USB, "pll3_usb_otg", "osc", base + 0x10, 0x3, false);
++ clk[pll4_audio] = imx_clk_pllv3(IMX_PLLV3_AV, "pll4_audio", "pll4_sel", base + 0x70, 0x7f, false);
++ clk[pll5_video] = imx_clk_pllv3(IMX_PLLV3_AV, "pll5_video", "osc", base + 0xa0, 0x7f, false);
++ clk[pll6_enet] = imx_clk_pllv3(IMX_PLLV3_ENET, "pll6_enet", "osc", base + 0xe0, 0x3, false);
++ clk[pll7_usb_host] = imx_clk_pllv3(IMX_PLLV3_USB, "pll7_usb_host", "osc", base + 0x20, 0x3, false);
++
++ /* name reg shift width parent_names num_parents */
++ clk[lvds1_sel] = imx_clk_mux("lvds1_sel", base + 0x160, 0, 5, lvds_sels, ARRAY_SIZE(lvds_sels));
++ clk[lvds2_sel] = imx_clk_mux("lvds2_sel", base + 0x160, 5, 5, lvds_sels, ARRAY_SIZE(lvds_sels));
++ clk[pll4_sel] = imx_clk_mux("pll4_sel", base + 0x70, 14, 2, pll_av_sels, ARRAY_SIZE(pll_av_sels));
+
+ /*
+ * Bit 20 is the reserved and read-only bit, we do this only for:
+@@ -191,6 +318,11 @@
+
+ clk[sata_ref] = imx_clk_fixed_factor("sata_ref", "pll6_enet", 1, 5);
+ clk[pcie_ref] = imx_clk_fixed_factor("pcie_ref", "pll6_enet", 1, 4);
++ /* NOTICE: The gate of the lvds1/2 in/out is used to select the clk direction */
++ clk[lvds1_in] = imx_clk_gate("lvds1_in", "anaclk1", base + 0x160, 12);
++ clk[lvds2_in] = imx_clk_gate("lvds2_in", "anaclk2", base + 0x160, 13);
++ clk[lvds1_out] = imx_clk_gate("lvds1_out", "lvds1_sel", base + 0x160, 10);
++ clk[lvds2_out] = imx_clk_gate("lvds2_out", "lvds2_sel", base + 0x160, 11);
+
+ clk[sata_ref_100m] = imx_clk_gate("sata_ref_100m", "sata_ref", base + 0xe0, 20);
+ clk[pcie_ref_125m] = imx_clk_gate("pcie_ref_125m", "pcie_ref", base + 0xe0, 19);
+@@ -199,18 +331,6 @@
+ base + 0xe0, 0, 2, 0, clk_enet_ref_table,
+ &imx_ccm_lock);
+
+- clk[lvds1_sel] = imx_clk_mux("lvds1_sel", base + 0x160, 0, 5, lvds_sels, ARRAY_SIZE(lvds_sels));
+- clk[lvds2_sel] = imx_clk_mux("lvds2_sel", base + 0x160, 5, 5, lvds_sels, ARRAY_SIZE(lvds_sels));
+-
+- /*
+- * lvds1_gate and lvds2_gate are pseudo-gates. Both can be
+- * independently configured as clock inputs or outputs. We treat
+- * the "output_enable" bit as a gate, even though it's really just
+- * enabling clock output.
+- */
+- clk[lvds1_gate] = imx_clk_gate("lvds1_gate", "dummy", base + 0x160, 10);
+- clk[lvds2_gate] = imx_clk_gate("lvds2_gate", "dummy", base + 0x160, 11);
+-
+ /* name parent_name reg idx */
+ clk[pll2_pfd0_352m] = imx_clk_pfd("pll2_pfd0_352m", "pll2_bus", base + 0x100, 0);
+ clk[pll2_pfd1_594m] = imx_clk_pfd("pll2_pfd1_594m", "pll2_bus", base + 0x100, 1);
+@@ -226,6 +346,8 @@
+ clk[pll3_80m] = imx_clk_fixed_factor("pll3_80m", "pll3_usb_otg", 1, 6);
+ clk[pll3_60m] = imx_clk_fixed_factor("pll3_60m", "pll3_usb_otg", 1, 8);
+ clk[twd] = imx_clk_fixed_factor("twd", "arm", 1, 2);
++ clk[gpt_3m] = imx_clk_fixed_factor("gpt_3m", "osc", 1, 8);
++ clk[video_27m] = imx_clk_fixed_factor("video_27m", "pll3_pfd1_540m", 1, 20);
+
+ clk[pll4_post_div] = clk_register_divider_table(NULL, "pll4_post_div", "pll4_audio", CLK_SET_RATE_PARENT, base + 0x70, 19, 2, 0, post_div_table, &imx_ccm_lock);
+ clk[pll4_audio_div] = clk_register_divider(NULL, "pll4_audio_div", "pll4_post_div", CLK_SET_RATE_PARENT, base + 0x170, 15, 1, 0, &imx_ccm_lock);
+@@ -233,7 +355,7 @@
+ clk[pll5_video_div] = clk_register_divider_table(NULL, "pll5_video_div", "pll5_post_div", CLK_SET_RATE_PARENT, base + 0x170, 30, 2, 0, video_div_table, &imx_ccm_lock);
+
+ np = ccm_node;
+- base = of_iomap(np, 0);
++ ccm_base = base = of_iomap(np, 0);
+ WARN_ON(!base);
+
+ imx6q_pm_set_ccm_base(base);
+@@ -258,14 +380,16 @@
+ clk[ipu2_sel] = imx_clk_mux("ipu2_sel", base + 0x3c, 14, 2, ipu_sels, ARRAY_SIZE(ipu_sels));
+ clk[ldb_di0_sel] = imx_clk_mux_flags("ldb_di0_sel", base + 0x2c, 9, 3, ldb_di_sels, ARRAY_SIZE(ldb_di_sels), CLK_SET_RATE_PARENT);
+ clk[ldb_di1_sel] = imx_clk_mux_flags("ldb_di1_sel", base + 0x2c, 12, 3, ldb_di_sels, ARRAY_SIZE(ldb_di_sels), CLK_SET_RATE_PARENT);
+- clk[ipu1_di0_pre_sel] = imx_clk_mux("ipu1_di0_pre_sel", base + 0x34, 6, 3, ipu_di_pre_sels, ARRAY_SIZE(ipu_di_pre_sels));
+- clk[ipu1_di1_pre_sel] = imx_clk_mux("ipu1_di1_pre_sel", base + 0x34, 15, 3, ipu_di_pre_sels, ARRAY_SIZE(ipu_di_pre_sels));
+- clk[ipu2_di0_pre_sel] = imx_clk_mux("ipu2_di0_pre_sel", base + 0x38, 6, 3, ipu_di_pre_sels, ARRAY_SIZE(ipu_di_pre_sels));
+- clk[ipu2_di1_pre_sel] = imx_clk_mux("ipu2_di1_pre_sel", base + 0x38, 15, 3, ipu_di_pre_sels, ARRAY_SIZE(ipu_di_pre_sels));
+- clk[ipu1_di0_sel] = imx_clk_mux("ipu1_di0_sel", base + 0x34, 0, 3, ipu1_di0_sels, ARRAY_SIZE(ipu1_di0_sels));
+- clk[ipu1_di1_sel] = imx_clk_mux("ipu1_di1_sel", base + 0x34, 9, 3, ipu1_di1_sels, ARRAY_SIZE(ipu1_di1_sels));
+- clk[ipu2_di0_sel] = imx_clk_mux("ipu2_di0_sel", base + 0x38, 0, 3, ipu2_di0_sels, ARRAY_SIZE(ipu2_di0_sels));
+- clk[ipu2_di1_sel] = imx_clk_mux("ipu2_di1_sel", base + 0x38, 9, 3, ipu2_di1_sels, ARRAY_SIZE(ipu2_di1_sels));
++ clk[ldb_di0_div_sel] = imx_clk_mux_flags("ldb_di0_div_sel", base + 0x20, 10, 1, ldb_di0_div_sels, ARRAY_SIZE(ldb_di0_div_sels), CLK_SET_RATE_PARENT);
++ clk[ldb_di1_div_sel] = imx_clk_mux_flags("ldb_di1_div_sel", base + 0x20, 11, 1, ldb_di1_div_sels, ARRAY_SIZE(ldb_di1_div_sels), CLK_SET_RATE_PARENT);
++ clk[ipu1_di0_pre_sel] = imx_clk_mux_flags("ipu1_di0_pre_sel", base + 0x34, 6, 3, ipu_di_pre_sels, ARRAY_SIZE(ipu_di_pre_sels), CLK_SET_RATE_PARENT);
++ clk[ipu1_di1_pre_sel] = imx_clk_mux_flags("ipu1_di1_pre_sel", base + 0x34, 15, 3, ipu_di_pre_sels, ARRAY_SIZE(ipu_di_pre_sels), CLK_SET_RATE_PARENT);
++ clk[ipu2_di0_pre_sel] = imx_clk_mux_flags("ipu2_di0_pre_sel", base + 0x38, 6, 3, ipu_di_pre_sels, ARRAY_SIZE(ipu_di_pre_sels), CLK_SET_RATE_PARENT);
++ clk[ipu2_di1_pre_sel] = imx_clk_mux_flags("ipu2_di1_pre_sel", base + 0x38, 15, 3, ipu_di_pre_sels, ARRAY_SIZE(ipu_di_pre_sels), CLK_SET_RATE_PARENT);
++ clk[ipu1_di0_sel] = imx_clk_mux_flags("ipu1_di0_sel", base + 0x34, 0, 3, ipu1_di0_sels, ARRAY_SIZE(ipu1_di0_sels), CLK_SET_RATE_PARENT);
++ clk[ipu1_di1_sel] = imx_clk_mux_flags("ipu1_di1_sel", base + 0x34, 9, 3, ipu1_di1_sels, ARRAY_SIZE(ipu1_di1_sels), CLK_SET_RATE_PARENT);
++ clk[ipu2_di0_sel] = imx_clk_mux_flags("ipu2_di0_sel", base + 0x38, 0, 3, ipu2_di0_sels, ARRAY_SIZE(ipu2_di0_sels), CLK_SET_RATE_PARENT);
++ clk[ipu2_di1_sel] = imx_clk_mux_flags("ipu2_di1_sel", base + 0x38, 9, 3, ipu2_di1_sels, ARRAY_SIZE(ipu2_di1_sels), CLK_SET_RATE_PARENT);
+ clk[hsi_tx_sel] = imx_clk_mux("hsi_tx_sel", base + 0x30, 28, 1, hsi_tx_sels, ARRAY_SIZE(hsi_tx_sels));
+ clk[pcie_axi_sel] = imx_clk_mux("pcie_axi_sel", base + 0x18, 10, 1, pcie_axi_sels, ARRAY_SIZE(pcie_axi_sels));
+ clk[ssi1_sel] = imx_clk_fixup_mux("ssi1_sel", base + 0x1c, 10, 2, ssi_sels, ARRAY_SIZE(ssi_sels), imx_cscmr1_fixup);
+@@ -307,9 +431,9 @@
+ clk[ipu1_podf] = imx_clk_divider("ipu1_podf", "ipu1_sel", base + 0x3c, 11, 3);
+ clk[ipu2_podf] = imx_clk_divider("ipu2_podf", "ipu2_sel", base + 0x3c, 16, 3);
+ clk[ldb_di0_div_3_5] = imx_clk_fixed_factor("ldb_di0_div_3_5", "ldb_di0_sel", 2, 7);
+- clk[ldb_di0_podf] = imx_clk_divider_flags("ldb_di0_podf", "ldb_di0_div_3_5", base + 0x20, 10, 1, 0);
++ clk[ldb_di0_div_7] = imx_clk_fixed_factor("ldb_di0_div_7", "ldb_di0_sel", 1, 7);
+ clk[ldb_di1_div_3_5] = imx_clk_fixed_factor("ldb_di1_div_3_5", "ldb_di1_sel", 2, 7);
+- clk[ldb_di1_podf] = imx_clk_divider_flags("ldb_di1_podf", "ldb_di1_div_3_5", base + 0x20, 11, 1, 0);
++ clk[ldb_di1_div_7] = imx_clk_fixed_factor("ldb_di1_div_7", "ldb_di1_sel", 1, 7);
+ clk[ipu1_di0_pre] = imx_clk_divider("ipu1_di0_pre", "ipu1_di0_pre_sel", base + 0x34, 3, 3);
+ clk[ipu1_di1_pre] = imx_clk_divider("ipu1_di1_pre", "ipu1_di1_pre_sel", base + 0x34, 12, 3);
+ clk[ipu2_di0_pre] = imx_clk_divider("ipu2_di0_pre", "ipu2_di0_pre_sel", base + 0x38, 3, 3);
+@@ -344,6 +468,9 @@
+ /* name parent_name reg shift */
+ clk[apbh_dma] = imx_clk_gate2("apbh_dma", "usdhc3", base + 0x68, 4);
+ clk[asrc] = imx_clk_gate2("asrc", "asrc_podf", base + 0x68, 6);
++ clk[caam_mem] = imx_clk_gate2("caam_mem", "ahb", base + 0x68, 8);
++ clk[caam_aclk] = imx_clk_gate2("caam_aclk", "ahb", base + 0x68, 10);
++ clk[caam_ipg] = imx_clk_gate2("caam_ipg", "ipg", base + 0x68, 12);
+ clk[can1_ipg] = imx_clk_gate2("can1_ipg", "ipg", base + 0x68, 14);
+ clk[can1_serial] = imx_clk_gate2("can1_serial", "can_root", base + 0x68, 16);
+ clk[can2_ipg] = imx_clk_gate2("can2_ipg", "ipg", base + 0x68, 18);
+@@ -354,6 +481,8 @@
+ clk[ecspi4] = imx_clk_gate2("ecspi4", "ecspi_root", base + 0x6c, 6);
+ clk[ecspi5] = imx_clk_gate2("ecspi5", "ecspi_root", base + 0x6c, 8);
+ clk[enet] = imx_clk_gate2("enet", "ipg", base + 0x6c, 10);
++ clk[epit1] = imx_clk_gate2("epit1", "ipg", base + 0x6c, 12);
++ clk[epit2] = imx_clk_gate2("epit2", "ipg", base + 0x6c, 14);
+ clk[esai] = imx_clk_gate2("esai", "esai_podf", base + 0x6c, 16);
+ clk[gpt_ipg] = imx_clk_gate2("gpt_ipg", "ipg", base + 0x6c, 20);
+ clk[gpt_ipg_per] = imx_clk_gate2("gpt_ipg_per", "ipg_per", base + 0x6c, 22);
+@@ -373,15 +502,16 @@
+ clk[i2c3] = imx_clk_gate2("i2c3", "ipg_per", base + 0x70, 10);
+ clk[iim] = imx_clk_gate2("iim", "ipg", base + 0x70, 12);
+ clk[enfc] = imx_clk_gate2("enfc", "enfc_podf", base + 0x70, 14);
++ clk[tzasc2] = imx_clk_gate2("tzasc2", "mmdc_ch0_axi_podf", base + 0x70, 24);
+ clk[vdoa] = imx_clk_gate2("vdoa", "vdo_axi", base + 0x70, 26);
+ clk[ipu1] = imx_clk_gate2("ipu1", "ipu1_podf", base + 0x74, 0);
+ clk[ipu1_di0] = imx_clk_gate2("ipu1_di0", "ipu1_di0_sel", base + 0x74, 2);
+ clk[ipu1_di1] = imx_clk_gate2("ipu1_di1", "ipu1_di1_sel", base + 0x74, 4);
+ clk[ipu2] = imx_clk_gate2("ipu2", "ipu2_podf", base + 0x74, 6);
+ clk[ipu2_di0] = imx_clk_gate2("ipu2_di0", "ipu2_di0_sel", base + 0x74, 8);
+- clk[ldb_di0] = imx_clk_gate2("ldb_di0", "ldb_di0_podf", base + 0x74, 12);
+- clk[ldb_di1] = imx_clk_gate2("ldb_di1", "ldb_di1_podf", base + 0x74, 14);
+ clk[ipu2_di1] = imx_clk_gate2("ipu2_di1", "ipu2_di1_sel", base + 0x74, 10);
++ clk[ldb_di0] = imx_clk_gate2("ldb_di0", "ldb_di0_div_sel", base + 0x74, 12);
++ clk[ldb_di1] = imx_clk_gate2("ldb_di1", "ldb_di1_div_sel", base + 0x74, 14);
+ clk[hsi_tx] = imx_clk_gate2("hsi_tx", "hsi_tx_podf", base + 0x74, 16);
+ if (cpu_is_imx6dl())
+ /*
+@@ -413,6 +543,9 @@
+ clk[ssi1_ipg] = imx_clk_gate2("ssi1_ipg", "ipg", base + 0x7c, 18);
+ clk[ssi2_ipg] = imx_clk_gate2("ssi2_ipg", "ipg", base + 0x7c, 20);
+ clk[ssi3_ipg] = imx_clk_gate2("ssi3_ipg", "ipg", base + 0x7c, 22);
++ clk[ssi1] = imx_clk_gate2("ssi1", "ssi1_podf", base + 0x7c, 18);
++ clk[ssi2] = imx_clk_gate2("ssi2", "ssi2_podf", base + 0x7c, 20);
++ clk[ssi3] = imx_clk_gate2("ssi3", "ssi3_podf", base + 0x7c, 22);
+ clk[uart_ipg] = imx_clk_gate2("uart_ipg", "ipg", base + 0x7c, 24);
+ clk[uart_serial] = imx_clk_gate2("uart_serial", "uart_serial_podf", base + 0x7c, 26);
+ clk[usboh3] = imx_clk_gate2("usboh3", "ipg", base + 0x80, 0);
+@@ -431,25 +564,79 @@
+ pr_err("i.MX6q clk %d: register failed with %ld\n",
+ i, PTR_ERR(clk[i]));
+
++ /* Initialize clock gate status */
++ writel_relaxed(1 << CCM_CCGR_OFFSET(11) |
++ 3 << CCM_CCGR_OFFSET(1) |
++ 3 << CCM_CCGR_OFFSET(0), base + 0x68);
++ if (cpu_is_imx6q() && imx_get_soc_revision() == IMX_CHIP_REVISION_1_0)
++ writel_relaxed(3 << CCM_CCGR_OFFSET(11) |
++ 3 << CCM_CCGR_OFFSET(10), base + 0x6c);
++ else
++ writel_relaxed(3 << CCM_CCGR_OFFSET(10), base + 0x6c);
++ writel_relaxed(1 << CCM_CCGR_OFFSET(12) |
++ 3 << CCM_CCGR_OFFSET(11) |
++ 3 << CCM_CCGR_OFFSET(10) |
++ 3 << CCM_CCGR_OFFSET(9) |
++ 3 << CCM_CCGR_OFFSET(8), base + 0x70);
++ writel_relaxed(3 << CCM_CCGR_OFFSET(14) |
++ 1 << CCM_CCGR_OFFSET(13) |
++ 3 << CCM_CCGR_OFFSET(12) |
++ 1 << CCM_CCGR_OFFSET(11) |
++ 3 << CCM_CCGR_OFFSET(10), base + 0x74);
++ writel_relaxed(3 << CCM_CCGR_OFFSET(7) |
++ 3 << CCM_CCGR_OFFSET(6) |
++ 3 << CCM_CCGR_OFFSET(4), base + 0x78);
++ writel_relaxed(1 << CCM_CCGR_OFFSET(0), base + 0x7c);
++ writel_relaxed(0, base + 0x80);
++
++ /* Make sure PFDs are disabled at boot. */
++ reg = readl_relaxed(anatop_base + 0x100);
++ /* Cannot disable pll2_pfd2_396M, as it is the MMDC clock in iMX6DL */
++ if (cpu_is_imx6dl())
++ reg |= 0x80008080;
++ else
++ reg |= 0x80808080;
++ writel_relaxed(reg, anatop_base + 0x100);
++
++ /* Disable PLL3 PFDs. */
++ reg = readl_relaxed(anatop_base + 0xF0);
++ reg |= 0x80808080;
++ writel_relaxed(reg, anatop_base + 0xF0);
++
++ /* Make sure PLLs is disabled */
++ reg = readl_relaxed(anatop_base + 0xA0);
++ reg &= ~(1 << 13);
++ writel_relaxed(reg, anatop_base + 0xA0);
++
+ clk_data.clks = clk;
+ clk_data.clk_num = ARRAY_SIZE(clk);
+ of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
+
+ clk_register_clkdev(clk[gpt_ipg], "ipg", "imx-gpt.0");
+ clk_register_clkdev(clk[gpt_ipg_per], "per", "imx-gpt.0");
++ clk_register_clkdev(clk[gpt_3m], "gpt_3m", "imx-gpt.0");
+ clk_register_clkdev(clk[cko1_sel], "cko1_sel", NULL);
+ clk_register_clkdev(clk[ahb], "ahb", NULL);
+ clk_register_clkdev(clk[cko1], "cko1", NULL);
+ clk_register_clkdev(clk[arm], NULL, "cpu0");
+- clk_register_clkdev(clk[pll4_post_div], "pll4_post_div", NULL);
+- clk_register_clkdev(clk[pll4_audio], "pll4_audio", NULL);
++ clk_register_clkdev(clk[pll4_audio_div], "pll4_audio_div", NULL);
++ clk_register_clkdev(clk[pll4_sel], "pll4_sel", NULL);
++ clk_register_clkdev(clk[lvds2_in], "lvds2_in", NULL);
++ clk_register_clkdev(clk[esai], "esai", NULL);
+
+- if ((imx_get_soc_revision() != IMX_CHIP_REVISION_1_0) ||
+- cpu_is_imx6dl()) {
+- clk_set_parent(clk[ldb_di0_sel], clk[pll5_video_div]);
+- clk_set_parent(clk[ldb_di1_sel], clk[pll5_video_div]);
++ if (cpu_is_imx6dl()) {
++ clk_set_parent(clk[ipu1_sel], clk[pll3_pfd1_540m]);
+ }
+
++ clk_set_parent(clk[ipu1_di0_pre_sel], clk[pll5_video_div]);
++ clk_set_parent(clk[ipu1_di1_pre_sel], clk[pll5_video_div]);
++ clk_set_parent(clk[ipu2_di0_pre_sel], clk[pll5_video_div]);
++ clk_set_parent(clk[ipu2_di1_pre_sel], clk[pll5_video_div]);
++ clk_set_parent(clk[ipu1_di0_sel], clk[ipu1_di0_pre]);
++ clk_set_parent(clk[ipu1_di1_sel], clk[ipu1_di1_pre]);
++ clk_set_parent(clk[ipu2_di0_sel], clk[ipu2_di0_pre]);
++ clk_set_parent(clk[ipu2_di1_sel], clk[ipu2_di1_pre]);
++
+ /*
+ * The gpmi needs 100MHz frequency in the EDO/Sync mode,
+ * We can not get the 100MHz from the pll2_pfd0_352m.
+@@ -457,6 +644,19 @@
+ */
+ clk_set_parent(clk[enfc_sel], clk[pll2_pfd2_396m]);
+
++ /* Set the parent clks of PCIe lvds1 and pcie_axi to be sata ref, axi */
++ if (clk_set_parent(clk[lvds1_sel], clk[sata_ref]))
++ pr_err("Failed to set PCIe bus parent clk.\n");
++ if (clk_set_parent(clk[pcie_axi_sel], clk[axi]))
++ pr_err("Failed to set PCIe parent clk.\n");
++
++ /* gpu clock initilazation */
++ clk_set_parent(clk[gpu3d_shader_sel], clk[pll2_pfd1_594m]);
++ clk_set_rate(clk[gpu3d_shader], 594000000);
++ clk_set_parent(clk[gpu3d_core_sel], clk[mmdc_ch0_axi]);
++ clk_set_rate(clk[gpu3d_core], 528000000);
++ clk_set_parent(clk[gpu2d_core_sel], clk[pll3_usb_otg]);
++
+ for (i = 0; i < ARRAY_SIZE(clks_init_on); i++)
+ clk_prepare_enable(clk[clks_init_on[i]]);
+
+@@ -465,6 +665,25 @@
+ clk_prepare_enable(clk[usbphy2_gate]);
+ }
+
++ /* ipu clock initialization */
++ init_ldb_clks(pll2_pfd0_352m);
++ clk_set_parent(clk[ipu1_di0_pre_sel], clk[pll5_video_div]);
++ clk_set_parent(clk[ipu1_di1_pre_sel], clk[pll5_video_div]);
++ clk_set_parent(clk[ipu2_di0_pre_sel], clk[pll5_video_div]);
++ clk_set_parent(clk[ipu2_di1_pre_sel], clk[pll5_video_div]);
++ clk_set_parent(clk[ipu1_di0_sel], clk[ipu1_di0_pre]);
++ clk_set_parent(clk[ipu1_di1_sel], clk[ipu1_di1_pre]);
++ clk_set_parent(clk[ipu2_di0_sel], clk[ipu2_di0_pre]);
++ clk_set_parent(clk[ipu2_di1_sel], clk[ipu2_di1_pre]);
++ if (cpu_is_imx6dl()) {
++ clk_set_rate(clk[pll3_pfd1_540m], 540000000);
++ clk_set_parent(clk[ipu1_sel], clk[pll3_pfd1_540m]);
++ clk_set_parent(clk[axi_sel], clk[pll3_pfd1_540m]);
++ } else if (cpu_is_imx6q()) {
++ clk_set_parent(clk[ipu1_sel], clk[mmdc_ch0_axi]);
++ clk_set_parent(clk[ipu2_sel], clk[mmdc_ch0_axi]);
++ }
++
+ /*
+ * Let's initially set up CLKO with OSC24M, since this configuration
+ * is widely used by imx6q board designs to clock audio codec.
+@@ -482,6 +701,34 @@
+ if (IS_ENABLED(CONFIG_PCI_IMX6))
+ clk_set_parent(clk[lvds1_sel], clk[sata_ref]);
+
++ /* Audio clocks */
++ clk_set_parent(clk[ssi1_sel], clk[pll4_audio_div]);
++ clk_set_parent(clk[ssi2_sel], clk[pll4_audio_div]);
++ clk_set_parent(clk[ssi3_sel], clk[pll4_audio_div]);
++ clk_set_parent(clk[esai_sel], clk[pll4_audio_div]);
++ clk_set_parent(clk[spdif_sel], clk[pll3_pfd3_454m]);
++ clk_set_parent(clk[asrc_sel], clk[pll3_usb_otg]);
++ clk_set_rate(clk[asrc_sel], 7500000);
++
++ /* Set pll4_audio to a value that can derive 5K-88.2KHz and 8K-96KHz */
++ clk_set_rate(clk[pll4_audio_div], 541900800);
++
++#ifdef CONFIG_MX6_VPU_352M
++ /*
++ * If VPU 352M is enabled, then PLL2_PDF2 need to be
++ * set to 352M, cpufreq will be disabled as VDDSOC/PU
++ * need to be at highest voltage, scaling cpu freq is
++ * not saving any power, and busfreq will be also disabled
++ * as the PLL2_PFD2 is not at default freq, in a word,
++ * all modules that sourceing clk from PLL2_PFD2 will
++ * be impacted.
++ */
++ clk_set_rate(clk[pll2_pfd2_396m], 352000000);
++ clk_set_parent(clk[vpu_axi_sel], clk[pll2_pfd2_396m]);
++ pr_info("VPU 352M is enabled!\n");
++#endif
++
++
+ /* Set initial power mode */
+ imx6q_set_lpm(WAIT_CLOCKED);
+
+diff -Nur linux-3.14.36/arch/arm/mach-imx/clk-imx6q.c.orig linux-openelec/arch/arm/mach-imx/clk-imx6q.c.orig
+--- linux-3.14.36/arch/arm/mach-imx/clk-imx6q.c.orig 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/arch/arm/mach-imx/clk-imx6q.c.orig 2015-05-06 12:05:43.000000000 -0500
+@@ -0,0 +1,725 @@
++/*
++ * Copyright 2011-2014 Freescale Semiconductor, Inc.
++ * Copyright 2011 Linaro Ltd.
++ *
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++#include <linux/init.h>
++#include <linux/types.h>
++#include <linux/clk.h>
++#include <linux/clkdev.h>
++#include <linux/err.h>
++#include <linux/io.h>
++#include <linux/of.h>
++#include <linux/of_address.h>
++#include <linux/of_irq.h>
++
++#include "clk.h"
++#include "common.h"
++#include "hardware.h"
++
++#define CCM_CCGR_OFFSET(index) (index * 2)
++
++static const char *step_sels[] = { "osc", "pll2_pfd2_396m", };
++static const char *pll1_sw_sels[] = { "pll1_sys", "step", };
++static const char *periph_pre_sels[] = { "pll2_bus", "pll2_pfd2_396m", "pll2_pfd0_352m", "pll2_198m", };
++static const char *periph_clk2_sels[] = { "pll3_usb_otg", "osc", "osc", "dummy", };
++static const char *periph2_clk2_sels[] = { "pll3_usb_otg", "pll2_bus", };
++static const char *periph_sels[] = { "periph_pre", "periph_clk2", };
++static const char *periph2_sels[] = { "periph2_pre", "periph2_clk2", };
++static const char *axi_sels[] = { "periph", "pll2_pfd2_396m", "periph", "pll3_pfd1_540m", };
++static const char *audio_sels[] = { "pll4_audio_div", "pll3_pfd2_508m", "pll3_pfd3_454m", "pll3_usb_otg", };
++static const char *gpu_axi_sels[] = { "axi", "ahb", };
++static const char *gpu2d_core_sels[] = { "axi", "pll3_usb_otg", "pll2_pfd0_352m", "pll2_pfd2_396m", };
++static const char *gpu3d_core_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll2_pfd1_594m", "pll2_pfd2_396m", };
++static const char *gpu3d_shader_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll2_pfd1_594m", "pll3_pfd0_720m", };
++static const char *ipu_sels[] = { "mmdc_ch0_axi", "pll2_pfd2_396m", "pll3_120m", "pll3_pfd1_540m", };
++static const char *ldb_di_sels[] = { "pll5_video_div", "pll2_pfd0_352m", "pll2_pfd2_396m", "mmdc_ch1_axi", "pll3_usb_otg", };
++static const char *ldb_di0_div_sels[] = { "ldb_di0_div_3_5", "ldb_di0_div_7", };
++static const char *ldb_di1_div_sels[] = { "ldb_di1_div_3_5", "ldb_di1_div_7", };
++static const char *ipu_di_pre_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll5_video_div", "pll2_pfd0_352m", "pll2_pfd2_396m", "pll3_pfd1_540m", };
++static const char *ipu1_di0_sels[] = { "ipu1_di0_pre", "dummy", "dummy", "ldb_di0", "ldb_di1", };
++static const char *ipu1_di1_sels[] = { "ipu1_di1_pre", "dummy", "dummy", "ldb_di0", "ldb_di1", };
++static const char *ipu2_di0_sels[] = { "ipu2_di0_pre", "dummy", "dummy", "ldb_di0", "ldb_di1", };
++static const char *ipu2_di1_sels[] = { "ipu2_di1_pre", "dummy", "dummy", "ldb_di0", "ldb_di1", };
++static const char *hsi_tx_sels[] = { "pll3_120m", "pll2_pfd2_396m", };
++static const char *pcie_axi_sels[] = { "axi", "ahb", };
++static const char *ssi_sels[] = { "pll3_pfd2_508m", "pll3_pfd3_454m", "pll4_audio_div", };
++static const char *usdhc_sels[] = { "pll2_pfd2_396m", "pll2_pfd0_352m", };
++static const char *enfc_sels[] = { "pll2_pfd0_352m", "pll2_bus", "pll3_usb_otg", "pll2_pfd2_396m", };
++static const char *emi_sels[] = { "pll2_pfd2_396m", "pll3_usb_otg", "axi", "pll2_pfd0_352m", };
++static const char *emi_slow_sels[] = { "axi", "pll3_usb_otg", "pll2_pfd2_396m", "pll2_pfd0_352m", };
++static const char *vdo_axi_sels[] = { "axi", "ahb", };
++static const char *vpu_axi_sels[] = { "axi", "pll2_pfd2_396m", "pll2_pfd0_352m", };
++static const char *cko1_sels[] = { "pll3_usb_otg", "pll2_bus", "pll1_sys", "pll5_video_div",
++ "dummy", "axi", "enfc", "ipu1_di0", "ipu1_di1", "ipu2_di0",
++ "ipu2_di1", "ahb", "ipg", "ipg_per", "ckil", "pll4_audio_div", };
++static const char *cko2_sels[] = {
++ "mmdc_ch0_axi", "mmdc_ch1_axi", "usdhc4", "usdhc1",
++ "gpu2d_axi", "dummy", "ecspi_root", "gpu3d_axi",
++ "usdhc3", "dummy", "arm", "ipu1",
++ "ipu2", "vdo_axi", "osc", "gpu2d_core",
++ "gpu3d_core", "usdhc2", "ssi1", "ssi2",
++ "ssi3", "gpu3d_shader", "vpu_axi", "can_root",
++ "ldb_di0", "ldb_di1", "esai", "eim_slow",
++ "uart_serial", "spdif", "asrc", "hsi_tx",
++};
++static const char *cko_sels[] = { "cko1", "cko2", };
++static const char *lvds_sels[] = {
++ "dummy", "dummy", "dummy", "dummy", "dummy", "dummy",
++ "pll4_audio", "pll5_video", "pll8_mlb", "enet_ref",
++ "pcie_ref", "sata_ref",
++};
++static const char *pll_av_sels[] = { "osc", "lvds1_in", "lvds2_in", "dummy", };
++static void __iomem *anatop_base;
++static void __iomem *ccm_base;
++
++
++enum mx6q_clks {
++ dummy, ckil, ckih, osc, pll2_pfd0_352m, pll2_pfd1_594m, pll2_pfd2_396m,
++ pll3_pfd0_720m, pll3_pfd1_540m, pll3_pfd2_508m, pll3_pfd3_454m,
++ pll2_198m, pll3_120m, pll3_80m, pll3_60m, twd, step, pll1_sw,
++ periph_pre, periph2_pre, periph_clk2_sel, periph2_clk2_sel, axi_sel,
++ esai_sel, asrc_sel, spdif_sel, gpu2d_axi, gpu3d_axi, gpu2d_core_sel,
++ gpu3d_core_sel, gpu3d_shader_sel, ipu1_sel, ipu2_sel, ldb_di0_sel,
++ ldb_di1_sel, ipu1_di0_pre_sel, ipu1_di1_pre_sel, ipu2_di0_pre_sel,
++ ipu2_di1_pre_sel, ipu1_di0_sel, ipu1_di1_sel, ipu2_di0_sel,
++ ipu2_di1_sel, hsi_tx_sel, pcie_axi_sel, ssi1_sel, ssi2_sel, ssi3_sel,
++ usdhc1_sel, usdhc2_sel, usdhc3_sel, usdhc4_sel, enfc_sel, emi_sel,
++ emi_slow_sel, vdo_axi_sel, vpu_axi_sel, cko1_sel, periph, periph2,
++ periph_clk2, periph2_clk2, ipg, ipg_per, esai_pred, esai_podf,
++ asrc_pred, asrc_podf, spdif_pred, spdif_podf, can_root, ecspi_root,
++ gpu2d_core_podf, gpu3d_core_podf, gpu3d_shader, ipu1_podf, ipu2_podf,
++ ldb_di0_podf_unused, ldb_di1_podf_unused, ipu1_di0_pre, ipu1_di1_pre,
++ ipu2_di0_pre, ipu2_di1_pre, hsi_tx_podf, ssi1_pred, ssi1_podf,
++ ssi2_pred, ssi2_podf, ssi3_pred, ssi3_podf, uart_serial_podf,
++ usdhc1_podf, usdhc2_podf, usdhc3_podf, usdhc4_podf, enfc_pred, enfc_podf,
++ emi_podf, emi_slow_podf, vpu_axi_podf, cko1_podf, axi, mmdc_ch0_axi_podf,
++ mmdc_ch1_axi_podf, arm, ahb, apbh_dma, asrc, can1_ipg, can1_serial,
++ can2_ipg, can2_serial, ecspi1, ecspi2, ecspi3, ecspi4, ecspi5, enet,
++ esai, gpt_ipg, gpt_ipg_per, gpu2d_core, gpu3d_core, hdmi_iahb,
++ hdmi_isfr, i2c1, i2c2, i2c3, iim, enfc, ipu1, ipu1_di0, ipu1_di1, ipu2,
++ ipu2_di0, ldb_di0, ldb_di1, ipu2_di1, hsi_tx, mlb, mmdc_ch0_axi,
++ mmdc_ch1_axi, ocram, openvg_axi, pcie_axi, pwm1, pwm2, pwm3, pwm4, per1_bch,
++ gpmi_bch_apb, gpmi_bch, gpmi_io, gpmi_apb, sata, sdma, spba, ssi1,
++ ssi2, ssi3, uart_ipg, uart_serial, usboh3, usdhc1, usdhc2, usdhc3,
++ usdhc4, vdo_axi, vpu_axi, cko1, pll1_sys, pll2_bus, pll3_usb_otg,
++ pll4_audio, pll5_video, pll8_mlb, pll7_usb_host, pll6_enet, ssi1_ipg,
++ ssi2_ipg, ssi3_ipg, rom, usbphy1, usbphy2, ldb_di0_div_3_5, ldb_di1_div_3_5,
++ sata_ref, sata_ref_100m, pcie_ref, pcie_ref_125m, enet_ref, usbphy1_gate,
++ usbphy2_gate, pll4_post_div, pll5_post_div, pll5_video_div, eim_slow,
++ spdif, cko2_sel, cko2_podf, cko2, cko, vdoa, pll4_audio_div,
++ lvds1_sel, lvds2_sel, lvds1_gate, lvds2_gate, gpt_3m, video_27m,
++ ldb_di0_div_7, ldb_di1_div_7, ldb_di0_div_sel, ldb_di1_div_sel,
++ caam_mem, caam_aclk, caam_ipg, epit1, epit2, tzasc2, lvds1_in, lvds1_out,
++ pll4_sel, lvds2_in, lvds2_out, anaclk1, anaclk2, clk_max
++};
++
++static struct clk *clk[clk_max];
++static struct clk_onecell_data clk_data;
++
++static enum mx6q_clks const clks_init_on[] __initconst = {
++ mmdc_ch0_axi, rom, arm,
++};
++
++static struct clk_div_table clk_enet_ref_table[] = {
++ { .val = 0, .div = 20, },
++ { .val = 1, .div = 10, },
++ { .val = 2, .div = 5, },
++ { .val = 3, .div = 4, },
++ { /* sentinel */ }
++};
++
++static struct clk_div_table post_div_table[] = {
++ { .val = 2, .div = 1, },
++ { .val = 1, .div = 2, },
++ { .val = 0, .div = 4, },
++ { /* sentinel */ }
++};
++
++static struct clk_div_table video_div_table[] = {
++ { .val = 0, .div = 1, },
++ { .val = 1, .div = 2, },
++ { .val = 2, .div = 1, },
++ { .val = 3, .div = 4, },
++ { /* sentinel */ }
++};
++
++static void init_ldb_clks(enum mx6q_clks new_parent)
++{
++ u32 reg;
++
++ /*
++ * Need to follow a strict procedure when changing the LDB
++ * clock, else we can introduce a glitch. Things to keep in
++ * mind:
++ * 1. The current and new parent clocks must be disabled.
++ * 2. The default clock for ldb_dio_clk is mmdc_ch1 which has
++ * no CG bit.
++ * 3. In the RTL implementation of the LDB_DI_CLK_SEL mux
++ * the top four options are in one mux and the PLL3 option along
++ * with another option is in the second mux. There is third mux
++ * used to decide between the first and second mux.
++ * The code below switches the parent to the bottom mux first
++ * and then manipulates the top mux. This ensures that no glitch
++ * will enter the divider.
++ *
++ * Need to disable MMDC_CH1 clock manually as there is no CG bit
++ * for this clock. The only way to disable this clock is to move
++ * it topll3_sw_clk and then to disable pll3_sw_clk
++ * Make sure periph2_clk2_sel is set to pll3_sw_clk
++ */
++ reg = readl_relaxed(ccm_base + 0x18);
++ reg &= ~(1 << 20);
++ writel_relaxed(reg, ccm_base + 0x18);
++
++ /*
++ * Set MMDC_CH1 mask bit.
++ */
++ reg = readl_relaxed(ccm_base + 0x4);
++ reg |= 1 << 16;
++ writel_relaxed(reg, ccm_base + 0x4);
++
++ /*
++ * Set the periph2_clk_sel to the top mux so that
++ * mmdc_ch1 is from pll3_sw_clk.
++ */
++ reg = readl_relaxed(ccm_base + 0x14);
++ reg |= 1 << 26;
++ writel_relaxed(reg, ccm_base + 0x14);
++
++ /*
++ * Wait for the clock switch.
++ */
++ while (readl_relaxed(ccm_base + 0x48))
++ ;
++
++ /*
++ * Disable pll3_sw_clk by selecting the bypass clock source.
++ */
++ reg = readl_relaxed(ccm_base + 0xc);
++ reg |= 1 << 0;
++ writel_relaxed(reg, ccm_base + 0xc);
++
++ /*
++ * Set the ldb_di0_clk and ldb_di1_clk to 111b.
++ */
++ reg = readl_relaxed(ccm_base + 0x2c);
++ reg |= ((7 << 9) | (7 << 12));
++ writel_relaxed(reg, ccm_base + 0x2c);
++
++ /*
++ * Set the ldb_di0_clk and ldb_di1_clk to 100b.
++ */
++ reg = readl_relaxed(ccm_base + 0x2c);
++ reg &= ~((7 << 9) | (7 << 12));
++ reg |= ((4 << 9) | (4 << 12));
++ writel_relaxed(reg, ccm_base + 0x2c);
++
++ /*
++ * Perform the LDB parent clock switch.
++ */
++ clk_set_parent(clk[ldb_di0_sel], clk[new_parent]);
++ clk_set_parent(clk[ldb_di1_sel], clk[new_parent]);
++
++ /*
++ * Unbypass pll3_sw_clk.
++ */
++ reg = readl_relaxed(ccm_base + 0xc);
++ reg &= ~(1 << 0);
++ writel_relaxed(reg, ccm_base + 0xc);
++
++ /*
++ * Set the periph2_clk_sel back to the bottom mux so that
++ * mmdc_ch1 is from its original parent.
++ */
++ reg = readl_relaxed(ccm_base + 0x14);
++ reg &= ~(1 << 26);
++ writel_relaxed(reg, ccm_base + 0x14);
++
++ /*
++ * Wait for the clock switch.
++ */
++ while (readl_relaxed(ccm_base + 0x48))
++ ;
++
++ /*
++ * Clear MMDC_CH1 mask bit.
++ */
++ reg = readl_relaxed(ccm_base + 0x4);
++ reg &= ~(1 << 16);
++ writel_relaxed(reg, ccm_base + 0x4);
++
++}
++
++static void __init imx6q_clocks_init(struct device_node *ccm_node)
++{
++ struct device_node *np;
++ void __iomem *base;
++ int i, irq;
++ int ret;
++ u32 reg;
++
++ clk[dummy] = imx_clk_fixed("dummy", 0);
++ clk[ckil] = imx_obtain_fixed_clock("ckil", 0);
++ clk[ckih] = imx_obtain_fixed_clock("ckih1", 0);
++ clk[osc] = imx_obtain_fixed_clock("osc", 0);
++ /* Clock source from external clock via ANACLK1/2 PADs */
++ clk[anaclk1] = imx_obtain_fixed_clock("anaclk1", 0);
++ clk[anaclk2] = imx_obtain_fixed_clock("anaclk2", 0);
++
++ np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-anatop");
++ anatop_base = base = of_iomap(np, 0);
++ WARN_ON(!base);
++
++ /* Audio/video PLL post dividers do not work on i.MX6q revision 1.0 */
++ if (cpu_is_imx6q() && imx_get_soc_revision() == IMX_CHIP_REVISION_1_0) {
++ post_div_table[1].div = 1;
++ post_div_table[2].div = 1;
++ video_div_table[1].div = 1;
++ video_div_table[2].div = 1;
++ };
++
++ /* type name parent_name base div_mask */
++ clk[pll1_sys] = imx_clk_pllv3(IMX_PLLV3_SYS, "pll1_sys", "osc", base, 0x7f, false);
++ clk[pll2_bus] = imx_clk_pllv3(IMX_PLLV3_GENERIC, "pll2_bus", "osc", base + 0x30, 0x1, false);
++ clk[pll3_usb_otg] = imx_clk_pllv3(IMX_PLLV3_USB, "pll3_usb_otg", "osc", base + 0x10, 0x3, false);
++ clk[pll4_audio] = imx_clk_pllv3(IMX_PLLV3_AV, "pll4_audio", "pll4_sel", base + 0x70, 0x7f, false);
++ clk[pll5_video] = imx_clk_pllv3(IMX_PLLV3_AV, "pll5_video", "osc", base + 0xa0, 0x7f, false);
++ clk[pll6_enet] = imx_clk_pllv3(IMX_PLLV3_ENET, "pll6_enet", "osc", base + 0xe0, 0x3, false);
++ clk[pll7_usb_host] = imx_clk_pllv3(IMX_PLLV3_USB, "pll7_usb_host", "osc", base + 0x20, 0x3, false);
++
++ /* name reg shift width parent_names num_parents */
++ clk[lvds1_sel] = imx_clk_mux("lvds1_sel", base + 0x160, 0, 5, lvds_sels, ARRAY_SIZE(lvds_sels));
++ clk[lvds2_sel] = imx_clk_mux("lvds2_sel", base + 0x160, 5, 5, lvds_sels, ARRAY_SIZE(lvds_sels));
++ clk[pll4_sel] = imx_clk_mux("pll4_sel", base + 0x70, 14, 2, pll_av_sels, ARRAY_SIZE(pll_av_sels));
++
++ /*
++ * Bit 20 is the reserved and read-only bit, we do this only for:
++ * - Do nothing for usbphy clk_enable/disable
++ * - Keep refcount when do usbphy clk_enable/disable, in that case,
++ * the clk framework may need to enable/disable usbphy's parent
++ */
++ clk[usbphy1] = imx_clk_gate("usbphy1", "pll3_usb_otg", base + 0x10, 20);
++ clk[usbphy2] = imx_clk_gate("usbphy2", "pll7_usb_host", base + 0x20, 20);
++
++ /*
++ * usbphy*_gate needs to be on after system boots up, and software
++ * never needs to control it anymore.
++ */
++ clk[usbphy1_gate] = imx_clk_gate("usbphy1_gate", "dummy", base + 0x10, 6);
++ clk[usbphy2_gate] = imx_clk_gate("usbphy2_gate", "dummy", base + 0x20, 6);
++
++ clk[sata_ref] = imx_clk_fixed_factor("sata_ref", "pll6_enet", 1, 5);
++ clk[pcie_ref] = imx_clk_fixed_factor("pcie_ref", "pll6_enet", 1, 4);
++ /* NOTICE: The gate of the lvds1/2 in/out is used to select the clk direction */
++ clk[lvds1_in] = imx_clk_gate("lvds1_in", "anaclk1", base + 0x160, 12);
++ clk[lvds2_in] = imx_clk_gate("lvds2_in", "anaclk2", base + 0x160, 13);
++ clk[lvds1_out] = imx_clk_gate("lvds1_out", "lvds1_sel", base + 0x160, 10);
++ clk[lvds2_out] = imx_clk_gate("lvds2_out", "lvds2_sel", base + 0x160, 11);
++
++ clk[sata_ref_100m] = imx_clk_gate("sata_ref_100m", "sata_ref", base + 0xe0, 20);
++ clk[pcie_ref_125m] = imx_clk_gate("pcie_ref_125m", "pcie_ref", base + 0xe0, 19);
++
++ clk[enet_ref] = clk_register_divider_table(NULL, "enet_ref", "pll6_enet", 0,
++ base + 0xe0, 0, 2, 0, clk_enet_ref_table,
++ &imx_ccm_lock);
++
++ /* name parent_name reg idx */
++ clk[pll2_pfd0_352m] = imx_clk_pfd("pll2_pfd0_352m", "pll2_bus", base + 0x100, 0);
++ clk[pll2_pfd1_594m] = imx_clk_pfd("pll2_pfd1_594m", "pll2_bus", base + 0x100, 1);
++ clk[pll2_pfd2_396m] = imx_clk_pfd("pll2_pfd2_396m", "pll2_bus", base + 0x100, 2);
++ clk[pll3_pfd0_720m] = imx_clk_pfd("pll3_pfd0_720m", "pll3_usb_otg", base + 0xf0, 0);
++ clk[pll3_pfd1_540m] = imx_clk_pfd("pll3_pfd1_540m", "pll3_usb_otg", base + 0xf0, 1);
++ clk[pll3_pfd2_508m] = imx_clk_pfd("pll3_pfd2_508m", "pll3_usb_otg", base + 0xf0, 2);
++ clk[pll3_pfd3_454m] = imx_clk_pfd("pll3_pfd3_454m", "pll3_usb_otg", base + 0xf0, 3);
++
++ /* name parent_name mult div */
++ clk[pll2_198m] = imx_clk_fixed_factor("pll2_198m", "pll2_pfd2_396m", 1, 2);
++ clk[pll3_120m] = imx_clk_fixed_factor("pll3_120m", "pll3_usb_otg", 1, 4);
++ clk[pll3_80m] = imx_clk_fixed_factor("pll3_80m", "pll3_usb_otg", 1, 6);
++ clk[pll3_60m] = imx_clk_fixed_factor("pll3_60m", "pll3_usb_otg", 1, 8);
++ clk[twd] = imx_clk_fixed_factor("twd", "arm", 1, 2);
++ clk[gpt_3m] = imx_clk_fixed_factor("gpt_3m", "osc", 1, 8);
++ clk[video_27m] = imx_clk_fixed_factor("video_27m", "pll3_pfd1_540m", 1, 20);
++
++ clk[pll4_post_div] = clk_register_divider_table(NULL, "pll4_post_div", "pll4_audio", CLK_SET_RATE_PARENT, base + 0x70, 19, 2, 0, post_div_table, &imx_ccm_lock);
++ clk[pll4_audio_div] = clk_register_divider(NULL, "pll4_audio_div", "pll4_post_div", CLK_SET_RATE_PARENT, base + 0x170, 15, 1, 0, &imx_ccm_lock);
++ clk[pll5_post_div] = clk_register_divider_table(NULL, "pll5_post_div", "pll5_video", CLK_SET_RATE_PARENT, base + 0xa0, 19, 2, 0, post_div_table, &imx_ccm_lock);
++ clk[pll5_video_div] = clk_register_divider_table(NULL, "pll5_video_div", "pll5_post_div", CLK_SET_RATE_PARENT, base + 0x170, 30, 2, 0, video_div_table, &imx_ccm_lock);
++
++ np = ccm_node;
++ ccm_base = base = of_iomap(np, 0);
++ WARN_ON(!base);
++
++ imx6q_pm_set_ccm_base(base);
++
++ /* name reg shift width parent_names num_parents */
++ clk[step] = imx_clk_mux("step", base + 0xc, 8, 1, step_sels, ARRAY_SIZE(step_sels));
++ clk[pll1_sw] = imx_clk_mux("pll1_sw", base + 0xc, 2, 1, pll1_sw_sels, ARRAY_SIZE(pll1_sw_sels));
++ clk[periph_pre] = imx_clk_mux("periph_pre", base + 0x18, 18, 2, periph_pre_sels, ARRAY_SIZE(periph_pre_sels));
++ clk[periph2_pre] = imx_clk_mux("periph2_pre", base + 0x18, 21, 2, periph_pre_sels, ARRAY_SIZE(periph_pre_sels));
++ clk[periph_clk2_sel] = imx_clk_mux("periph_clk2_sel", base + 0x18, 12, 2, periph_clk2_sels, ARRAY_SIZE(periph_clk2_sels));
++ clk[periph2_clk2_sel] = imx_clk_mux("periph2_clk2_sel", base + 0x18, 20, 1, periph2_clk2_sels, ARRAY_SIZE(periph2_clk2_sels));
++ clk[axi_sel] = imx_clk_mux("axi_sel", base + 0x14, 6, 2, axi_sels, ARRAY_SIZE(axi_sels));
++ clk[esai_sel] = imx_clk_mux("esai_sel", base + 0x20, 19, 2, audio_sels, ARRAY_SIZE(audio_sels));
++ clk[asrc_sel] = imx_clk_mux("asrc_sel", base + 0x30, 7, 2, audio_sels, ARRAY_SIZE(audio_sels));
++ clk[spdif_sel] = imx_clk_mux("spdif_sel", base + 0x30, 20, 2, audio_sels, ARRAY_SIZE(audio_sels));
++ clk[gpu2d_axi] = imx_clk_mux("gpu2d_axi", base + 0x18, 0, 1, gpu_axi_sels, ARRAY_SIZE(gpu_axi_sels));
++ clk[gpu3d_axi] = imx_clk_mux("gpu3d_axi", base + 0x18, 1, 1, gpu_axi_sels, ARRAY_SIZE(gpu_axi_sels));
++ clk[gpu2d_core_sel] = imx_clk_mux("gpu2d_core_sel", base + 0x18, 16, 2, gpu2d_core_sels, ARRAY_SIZE(gpu2d_core_sels));
++ clk[gpu3d_core_sel] = imx_clk_mux("gpu3d_core_sel", base + 0x18, 4, 2, gpu3d_core_sels, ARRAY_SIZE(gpu3d_core_sels));
++ clk[gpu3d_shader_sel] = imx_clk_mux("gpu3d_shader_sel", base + 0x18, 8, 2, gpu3d_shader_sels, ARRAY_SIZE(gpu3d_shader_sels));
++ clk[ipu1_sel] = imx_clk_mux("ipu1_sel", base + 0x3c, 9, 2, ipu_sels, ARRAY_SIZE(ipu_sels));
++ clk[ipu2_sel] = imx_clk_mux("ipu2_sel", base + 0x3c, 14, 2, ipu_sels, ARRAY_SIZE(ipu_sels));
++ clk[ldb_di0_sel] = imx_clk_mux_flags("ldb_di0_sel", base + 0x2c, 9, 3, ldb_di_sels, ARRAY_SIZE(ldb_di_sels), CLK_SET_RATE_PARENT);
++ clk[ldb_di1_sel] = imx_clk_mux_flags("ldb_di1_sel", base + 0x2c, 12, 3, ldb_di_sels, ARRAY_SIZE(ldb_di_sels), CLK_SET_RATE_PARENT);
++ clk[ldb_di0_div_sel] = imx_clk_mux_flags("ldb_di0_div_sel", base + 0x20, 10, 1, ldb_di0_div_sels, ARRAY_SIZE(ldb_di0_div_sels), CLK_SET_RATE_PARENT);
++ clk[ldb_di1_div_sel] = imx_clk_mux_flags("ldb_di1_div_sel", base + 0x20, 11, 1, ldb_di1_div_sels, ARRAY_SIZE(ldb_di1_div_sels), CLK_SET_RATE_PARENT);
++ clk[ipu1_di0_pre_sel] = imx_clk_mux_flags("ipu1_di0_pre_sel", base + 0x34, 6, 3, ipu_di_pre_sels, ARRAY_SIZE(ipu_di_pre_sels), CLK_SET_RATE_PARENT);
++ clk[ipu1_di1_pre_sel] = imx_clk_mux_flags("ipu1_di1_pre_sel", base + 0x34, 15, 3, ipu_di_pre_sels, ARRAY_SIZE(ipu_di_pre_sels), CLK_SET_RATE_PARENT);
++ clk[ipu2_di0_pre_sel] = imx_clk_mux_flags("ipu2_di0_pre_sel", base + 0x38, 6, 3, ipu_di_pre_sels, ARRAY_SIZE(ipu_di_pre_sels), CLK_SET_RATE_PARENT);
++ clk[ipu2_di1_pre_sel] = imx_clk_mux_flags("ipu2_di1_pre_sel", base + 0x38, 15, 3, ipu_di_pre_sels, ARRAY_SIZE(ipu_di_pre_sels), CLK_SET_RATE_PARENT);
++ clk[ipu1_di0_sel] = imx_clk_mux_flags("ipu1_di0_sel", base + 0x34, 0, 3, ipu1_di0_sels, ARRAY_SIZE(ipu1_di0_sels), CLK_SET_RATE_PARENT);
++ clk[ipu1_di1_sel] = imx_clk_mux_flags("ipu1_di1_sel", base + 0x34, 9, 3, ipu1_di1_sels, ARRAY_SIZE(ipu1_di1_sels), CLK_SET_RATE_PARENT);
++ clk[ipu2_di0_sel] = imx_clk_mux_flags("ipu2_di0_sel", base + 0x38, 0, 3, ipu2_di0_sels, ARRAY_SIZE(ipu2_di0_sels), CLK_SET_RATE_PARENT);
++ clk[ipu2_di1_sel] = imx_clk_mux_flags("ipu2_di1_sel", base + 0x38, 9, 3, ipu2_di1_sels, ARRAY_SIZE(ipu2_di1_sels), CLK_SET_RATE_PARENT);
++ clk[hsi_tx_sel] = imx_clk_mux("hsi_tx_sel", base + 0x30, 28, 1, hsi_tx_sels, ARRAY_SIZE(hsi_tx_sels));
++ clk[pcie_axi_sel] = imx_clk_mux("pcie_axi_sel", base + 0x18, 10, 1, pcie_axi_sels, ARRAY_SIZE(pcie_axi_sels));
++ clk[ssi1_sel] = imx_clk_fixup_mux("ssi1_sel", base + 0x1c, 10, 2, ssi_sels, ARRAY_SIZE(ssi_sels), imx_cscmr1_fixup);
++ clk[ssi2_sel] = imx_clk_fixup_mux("ssi2_sel", base + 0x1c, 12, 2, ssi_sels, ARRAY_SIZE(ssi_sels), imx_cscmr1_fixup);
++ clk[ssi3_sel] = imx_clk_fixup_mux("ssi3_sel", base + 0x1c, 14, 2, ssi_sels, ARRAY_SIZE(ssi_sels), imx_cscmr1_fixup);
++ clk[usdhc1_sel] = imx_clk_fixup_mux("usdhc1_sel", base + 0x1c, 16, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels), imx_cscmr1_fixup);
++ clk[usdhc2_sel] = imx_clk_fixup_mux("usdhc2_sel", base + 0x1c, 17, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels), imx_cscmr1_fixup);
++ clk[usdhc3_sel] = imx_clk_fixup_mux("usdhc3_sel", base + 0x1c, 18, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels), imx_cscmr1_fixup);
++ clk[usdhc4_sel] = imx_clk_fixup_mux("usdhc4_sel", base + 0x1c, 19, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels), imx_cscmr1_fixup);
++ clk[enfc_sel] = imx_clk_mux("enfc_sel", base + 0x2c, 16, 2, enfc_sels, ARRAY_SIZE(enfc_sels));
++ clk[emi_sel] = imx_clk_fixup_mux("emi_sel", base + 0x1c, 27, 2, emi_sels, ARRAY_SIZE(emi_sels), imx_cscmr1_fixup);
++ clk[emi_slow_sel] = imx_clk_fixup_mux("emi_slow_sel", base + 0x1c, 29, 2, emi_slow_sels, ARRAY_SIZE(emi_slow_sels), imx_cscmr1_fixup);
++ clk[vdo_axi_sel] = imx_clk_mux("vdo_axi_sel", base + 0x18, 11, 1, vdo_axi_sels, ARRAY_SIZE(vdo_axi_sels));
++ clk[vpu_axi_sel] = imx_clk_mux("vpu_axi_sel", base + 0x18, 14, 2, vpu_axi_sels, ARRAY_SIZE(vpu_axi_sels));
++ clk[cko1_sel] = imx_clk_mux("cko1_sel", base + 0x60, 0, 4, cko1_sels, ARRAY_SIZE(cko1_sels));
++ clk[cko2_sel] = imx_clk_mux("cko2_sel", base + 0x60, 16, 5, cko2_sels, ARRAY_SIZE(cko2_sels));
++ clk[cko] = imx_clk_mux("cko", base + 0x60, 8, 1, cko_sels, ARRAY_SIZE(cko_sels));
++
++ /* name reg shift width busy: reg, shift parent_names num_parents */
++ clk[periph] = imx_clk_busy_mux("periph", base + 0x14, 25, 1, base + 0x48, 5, periph_sels, ARRAY_SIZE(periph_sels));
++ clk[periph2] = imx_clk_busy_mux("periph2", base + 0x14, 26, 1, base + 0x48, 3, periph2_sels, ARRAY_SIZE(periph2_sels));
++
++ /* name parent_name reg shift width */
++ clk[periph_clk2] = imx_clk_divider("periph_clk2", "periph_clk2_sel", base + 0x14, 27, 3);
++ clk[periph2_clk2] = imx_clk_divider("periph2_clk2", "periph2_clk2_sel", base + 0x14, 0, 3);
++ clk[ipg] = imx_clk_divider("ipg", "ahb", base + 0x14, 8, 2);
++ clk[ipg_per] = imx_clk_fixup_divider("ipg_per", "ipg", base + 0x1c, 0, 6, imx_cscmr1_fixup);
++ clk[esai_pred] = imx_clk_divider("esai_pred", "esai_sel", base + 0x28, 9, 3);
++ clk[esai_podf] = imx_clk_divider("esai_podf", "esai_pred", base + 0x28, 25, 3);
++ clk[asrc_pred] = imx_clk_divider("asrc_pred", "asrc_sel", base + 0x30, 12, 3);
++ clk[asrc_podf] = imx_clk_divider("asrc_podf", "asrc_pred", base + 0x30, 9, 3);
++ clk[spdif_pred] = imx_clk_divider("spdif_pred", "spdif_sel", base + 0x30, 25, 3);
++ clk[spdif_podf] = imx_clk_divider("spdif_podf", "spdif_pred", base + 0x30, 22, 3);
++ clk[can_root] = imx_clk_divider("can_root", "pll3_60m", base + 0x20, 2, 6);
++ clk[ecspi_root] = imx_clk_divider("ecspi_root", "pll3_60m", base + 0x38, 19, 6);
++ clk[gpu2d_core_podf] = imx_clk_divider("gpu2d_core_podf", "gpu2d_core_sel", base + 0x18, 23, 3);
++ clk[gpu3d_core_podf] = imx_clk_divider("gpu3d_core_podf", "gpu3d_core_sel", base + 0x18, 26, 3);
++ clk[gpu3d_shader] = imx_clk_divider("gpu3d_shader", "gpu3d_shader_sel", base + 0x18, 29, 3);
++ clk[ipu1_podf] = imx_clk_divider("ipu1_podf", "ipu1_sel", base + 0x3c, 11, 3);
++ clk[ipu2_podf] = imx_clk_divider("ipu2_podf", "ipu2_sel", base + 0x3c, 16, 3);
++ clk[ldb_di0_div_3_5] = imx_clk_fixed_factor("ldb_di0_div_3_5", "ldb_di0_sel", 2, 7);
++ clk[ldb_di0_div_7] = imx_clk_fixed_factor("ldb_di0_div_7", "ldb_di0_sel", 1, 7);
++ clk[ldb_di1_div_3_5] = imx_clk_fixed_factor("ldb_di1_div_3_5", "ldb_di1_sel", 2, 7);
++ clk[ldb_di1_div_7] = imx_clk_fixed_factor("ldb_di1_div_7", "ldb_di1_sel", 1, 7);
++ clk[ipu1_di0_pre] = imx_clk_divider("ipu1_di0_pre", "ipu1_di0_pre_sel", base + 0x34, 3, 3);
++ clk[ipu1_di1_pre] = imx_clk_divider("ipu1_di1_pre", "ipu1_di1_pre_sel", base + 0x34, 12, 3);
++ clk[ipu2_di0_pre] = imx_clk_divider("ipu2_di0_pre", "ipu2_di0_pre_sel", base + 0x38, 3, 3);
++ clk[ipu2_di1_pre] = imx_clk_divider("ipu2_di1_pre", "ipu2_di1_pre_sel", base + 0x38, 12, 3);
++ clk[hsi_tx_podf] = imx_clk_divider("hsi_tx_podf", "hsi_tx_sel", base + 0x30, 29, 3);
++ clk[ssi1_pred] = imx_clk_divider("ssi1_pred", "ssi1_sel", base + 0x28, 6, 3);
++ clk[ssi1_podf] = imx_clk_divider("ssi1_podf", "ssi1_pred", base + 0x28, 0, 6);
++ clk[ssi2_pred] = imx_clk_divider("ssi2_pred", "ssi2_sel", base + 0x2c, 6, 3);
++ clk[ssi2_podf] = imx_clk_divider("ssi2_podf", "ssi2_pred", base + 0x2c, 0, 6);
++ clk[ssi3_pred] = imx_clk_divider("ssi3_pred", "ssi3_sel", base + 0x28, 22, 3);
++ clk[ssi3_podf] = imx_clk_divider("ssi3_podf", "ssi3_pred", base + 0x28, 16, 6);
++ clk[uart_serial_podf] = imx_clk_divider("uart_serial_podf", "pll3_80m", base + 0x24, 0, 6);
++ clk[usdhc1_podf] = imx_clk_divider("usdhc1_podf", "usdhc1_sel", base + 0x24, 11, 3);
++ clk[usdhc2_podf] = imx_clk_divider("usdhc2_podf", "usdhc2_sel", base + 0x24, 16, 3);
++ clk[usdhc3_podf] = imx_clk_divider("usdhc3_podf", "usdhc3_sel", base + 0x24, 19, 3);
++ clk[usdhc4_podf] = imx_clk_divider("usdhc4_podf", "usdhc4_sel", base + 0x24, 22, 3);
++ clk[enfc_pred] = imx_clk_divider("enfc_pred", "enfc_sel", base + 0x2c, 18, 3);
++ clk[enfc_podf] = imx_clk_divider("enfc_podf", "enfc_pred", base + 0x2c, 21, 6);
++ clk[emi_podf] = imx_clk_fixup_divider("emi_podf", "emi_sel", base + 0x1c, 20, 3, imx_cscmr1_fixup);
++ clk[emi_slow_podf] = imx_clk_fixup_divider("emi_slow_podf", "emi_slow_sel", base + 0x1c, 23, 3, imx_cscmr1_fixup);
++ clk[vpu_axi_podf] = imx_clk_divider("vpu_axi_podf", "vpu_axi_sel", base + 0x24, 25, 3);
++ clk[cko1_podf] = imx_clk_divider("cko1_podf", "cko1_sel", base + 0x60, 4, 3);
++ clk[cko2_podf] = imx_clk_divider("cko2_podf", "cko2_sel", base + 0x60, 21, 3);
++
++ /* name parent_name reg shift width busy: reg, shift */
++ clk[axi] = imx_clk_busy_divider("axi", "axi_sel", base + 0x14, 16, 3, base + 0x48, 0);
++ clk[mmdc_ch0_axi_podf] = imx_clk_busy_divider("mmdc_ch0_axi_podf", "periph", base + 0x14, 19, 3, base + 0x48, 4);
++ clk[mmdc_ch1_axi_podf] = imx_clk_busy_divider("mmdc_ch1_axi_podf", "periph2", base + 0x14, 3, 3, base + 0x48, 2);
++ clk[arm] = imx_clk_busy_divider("arm", "pll1_sw", base + 0x10, 0, 3, base + 0x48, 16);
++ clk[ahb] = imx_clk_busy_divider("ahb", "periph", base + 0x14, 10, 3, base + 0x48, 1);
++
++ /* name parent_name reg shift */
++ clk[apbh_dma] = imx_clk_gate2("apbh_dma", "usdhc3", base + 0x68, 4);
++ clk[asrc] = imx_clk_gate2("asrc", "asrc_podf", base + 0x68, 6);
++ clk[caam_mem] = imx_clk_gate2("caam_mem", "ahb", base + 0x68, 8);
++ clk[caam_aclk] = imx_clk_gate2("caam_aclk", "ahb", base + 0x68, 10);
++ clk[caam_ipg] = imx_clk_gate2("caam_ipg", "ipg", base + 0x68, 12);
++ clk[can1_ipg] = imx_clk_gate2("can1_ipg", "ipg", base + 0x68, 14);
++ clk[can1_serial] = imx_clk_gate2("can1_serial", "can_root", base + 0x68, 16);
++ clk[can2_ipg] = imx_clk_gate2("can2_ipg", "ipg", base + 0x68, 18);
++ clk[can2_serial] = imx_clk_gate2("can2_serial", "can_root", base + 0x68, 20);
++ clk[ecspi1] = imx_clk_gate2("ecspi1", "ecspi_root", base + 0x6c, 0);
++ clk[ecspi2] = imx_clk_gate2("ecspi2", "ecspi_root", base + 0x6c, 2);
++ clk[ecspi3] = imx_clk_gate2("ecspi3", "ecspi_root", base + 0x6c, 4);
++ clk[ecspi4] = imx_clk_gate2("ecspi4", "ecspi_root", base + 0x6c, 6);
++ clk[ecspi5] = imx_clk_gate2("ecspi5", "ecspi_root", base + 0x6c, 8);
++ clk[enet] = imx_clk_gate2("enet", "ipg", base + 0x6c, 10);
++ clk[epit1] = imx_clk_gate2("epit1", "ipg", base + 0x6c, 12);
++ clk[epit2] = imx_clk_gate2("epit2", "ipg", base + 0x6c, 14);
++ clk[esai] = imx_clk_gate2("esai", "esai_podf", base + 0x6c, 16);
++ clk[gpt_ipg] = imx_clk_gate2("gpt_ipg", "ipg", base + 0x6c, 20);
++ clk[gpt_ipg_per] = imx_clk_gate2("gpt_ipg_per", "ipg_per", base + 0x6c, 22);
++ if (cpu_is_imx6dl())
++ /*
++ * The multiplexer and divider of imx6q clock gpu3d_shader get
++ * redefined/reused as gpu2d_core_sel and gpu2d_core_podf on imx6dl.
++ */
++ clk[gpu2d_core] = imx_clk_gate2("gpu2d_core", "gpu3d_shader", base + 0x6c, 24);
++ else
++ clk[gpu2d_core] = imx_clk_gate2("gpu2d_core", "gpu2d_core_podf", base + 0x6c, 24);
++ clk[gpu3d_core] = imx_clk_gate2("gpu3d_core", "gpu3d_core_podf", base + 0x6c, 26);
++ clk[hdmi_iahb] = imx_clk_gate2("hdmi_iahb", "ahb", base + 0x70, 0);
++ clk[hdmi_isfr] = imx_clk_gate2("hdmi_isfr", "pll3_pfd1_540m", base + 0x70, 4);
++ clk[i2c1] = imx_clk_gate2("i2c1", "ipg_per", base + 0x70, 6);
++ clk[i2c2] = imx_clk_gate2("i2c2", "ipg_per", base + 0x70, 8);
++ clk[i2c3] = imx_clk_gate2("i2c3", "ipg_per", base + 0x70, 10);
++ clk[iim] = imx_clk_gate2("iim", "ipg", base + 0x70, 12);
++ clk[enfc] = imx_clk_gate2("enfc", "enfc_podf", base + 0x70, 14);
++ clk[tzasc2] = imx_clk_gate2("tzasc2", "mmdc_ch0_axi_podf", base + 0x70, 24);
++ clk[vdoa] = imx_clk_gate2("vdoa", "vdo_axi", base + 0x70, 26);
++ clk[ipu1] = imx_clk_gate2("ipu1", "ipu1_podf", base + 0x74, 0);
++ clk[ipu1_di0] = imx_clk_gate2("ipu1_di0", "ipu1_di0_sel", base + 0x74, 2);
++ clk[ipu1_di1] = imx_clk_gate2("ipu1_di1", "ipu1_di1_sel", base + 0x74, 4);
++ clk[ipu2] = imx_clk_gate2("ipu2", "ipu2_podf", base + 0x74, 6);
++ clk[ipu2_di0] = imx_clk_gate2("ipu2_di0", "ipu2_di0_sel", base + 0x74, 8);
++ clk[ipu2_di1] = imx_clk_gate2("ipu2_di1", "ipu2_di1_sel", base + 0x74, 10);
++ clk[ldb_di0] = imx_clk_gate2("ldb_di0", "ldb_di0_div_sel", base + 0x74, 12);
++ clk[ldb_di1] = imx_clk_gate2("ldb_di1", "ldb_di1_div_sel", base + 0x74, 14);
++ clk[hsi_tx] = imx_clk_gate2("hsi_tx", "hsi_tx_podf", base + 0x74, 16);
++ if (cpu_is_imx6dl())
++ /*
++ * The multiplexer and divider of the imx6q clock gpu2d get
++ * redefined/reused as mlb_sys_sel and mlb_sys_clk_podf on imx6dl.
++ */
++ clk[mlb] = imx_clk_gate2("mlb", "gpu2d_core_podf", base + 0x74, 18);
++ else
++ clk[mlb] = imx_clk_gate2("mlb", "axi", base + 0x74, 18);
++ clk[mmdc_ch0_axi] = imx_clk_gate2("mmdc_ch0_axi", "mmdc_ch0_axi_podf", base + 0x74, 20);
++ clk[mmdc_ch1_axi] = imx_clk_gate2("mmdc_ch1_axi", "mmdc_ch1_axi_podf", base + 0x74, 22);
++ clk[ocram] = imx_clk_gate2("ocram", "ahb", base + 0x74, 28);
++ clk[openvg_axi] = imx_clk_gate2("openvg_axi", "axi", base + 0x74, 30);
++ clk[pcie_axi] = imx_clk_gate2("pcie_axi", "pcie_axi_sel", base + 0x78, 0);
++ clk[per1_bch] = imx_clk_gate2("per1_bch", "usdhc3", base + 0x78, 12);
++ clk[pwm1] = imx_clk_gate2("pwm1", "ipg_per", base + 0x78, 16);
++ clk[pwm2] = imx_clk_gate2("pwm2", "ipg_per", base + 0x78, 18);
++ clk[pwm3] = imx_clk_gate2("pwm3", "ipg_per", base + 0x78, 20);
++ clk[pwm4] = imx_clk_gate2("pwm4", "ipg_per", base + 0x78, 22);
++ clk[gpmi_bch_apb] = imx_clk_gate2("gpmi_bch_apb", "usdhc3", base + 0x78, 24);
++ clk[gpmi_bch] = imx_clk_gate2("gpmi_bch", "usdhc4", base + 0x78, 26);
++ clk[gpmi_io] = imx_clk_gate2("gpmi_io", "enfc", base + 0x78, 28);
++ clk[gpmi_apb] = imx_clk_gate2("gpmi_apb", "usdhc3", base + 0x78, 30);
++ clk[rom] = imx_clk_gate2("rom", "ahb", base + 0x7c, 0);
++ clk[sata] = imx_clk_gate2("sata", "ipg", base + 0x7c, 4);
++ clk[sdma] = imx_clk_gate2("sdma", "ahb", base + 0x7c, 6);
++ clk[spba] = imx_clk_gate2("spba", "ipg", base + 0x7c, 12);
++ clk[spdif] = imx_clk_gate2("spdif", "spdif_podf", base + 0x7c, 14);
++ clk[ssi1_ipg] = imx_clk_gate2("ssi1_ipg", "ipg", base + 0x7c, 18);
++ clk[ssi2_ipg] = imx_clk_gate2("ssi2_ipg", "ipg", base + 0x7c, 20);
++ clk[ssi3_ipg] = imx_clk_gate2("ssi3_ipg", "ipg", base + 0x7c, 22);
++ clk[ssi1] = imx_clk_gate2("ssi1", "ssi1_podf", base + 0x7c, 18);
++ clk[ssi2] = imx_clk_gate2("ssi2", "ssi2_podf", base + 0x7c, 20);
++ clk[ssi3] = imx_clk_gate2("ssi3", "ssi3_podf", base + 0x7c, 22);
++ clk[uart_ipg] = imx_clk_gate2("uart_ipg", "ipg", base + 0x7c, 24);
++ clk[uart_serial] = imx_clk_gate2("uart_serial", "uart_serial_podf", base + 0x7c, 26);
++ clk[usboh3] = imx_clk_gate2("usboh3", "ipg", base + 0x80, 0);
++ clk[usdhc1] = imx_clk_gate2("usdhc1", "usdhc1_podf", base + 0x80, 2);
++ clk[usdhc2] = imx_clk_gate2("usdhc2", "usdhc2_podf", base + 0x80, 4);
++ clk[usdhc3] = imx_clk_gate2("usdhc3", "usdhc3_podf", base + 0x80, 6);
++ clk[usdhc4] = imx_clk_gate2("usdhc4", "usdhc4_podf", base + 0x80, 8);
++ clk[eim_slow] = imx_clk_gate2("eim_slow", "emi_slow_podf", base + 0x80, 10);
++ clk[vdo_axi] = imx_clk_gate2("vdo_axi", "vdo_axi_sel", base + 0x80, 12);
++ clk[vpu_axi] = imx_clk_gate2("vpu_axi", "vpu_axi_podf", base + 0x80, 14);
++ clk[cko1] = imx_clk_gate("cko1", "cko1_podf", base + 0x60, 7);
++ clk[cko2] = imx_clk_gate("cko2", "cko2_podf", base + 0x60, 24);
++
++ for (i = 0; i < ARRAY_SIZE(clk); i++)
++ if (IS_ERR(clk[i]))
++ pr_err("i.MX6q clk %d: register failed with %ld\n",
++ i, PTR_ERR(clk[i]));
++
++ /* Initialize clock gate status */
++ writel_relaxed(1 << CCM_CCGR_OFFSET(11) |
++ 3 << CCM_CCGR_OFFSET(1) |
++ 3 << CCM_CCGR_OFFSET(0), base + 0x68);
++ if (cpu_is_imx6q() && imx_get_soc_revision() == IMX_CHIP_REVISION_1_0)
++ writel_relaxed(3 << CCM_CCGR_OFFSET(11) |
++ 3 << CCM_CCGR_OFFSET(10), base + 0x6c);
++ else
++ writel_relaxed(3 << CCM_CCGR_OFFSET(10), base + 0x6c);
++ writel_relaxed(1 << CCM_CCGR_OFFSET(12) |
++ 3 << CCM_CCGR_OFFSET(11) |
++ 3 << CCM_CCGR_OFFSET(10) |
++ 3 << CCM_CCGR_OFFSET(9) |
++ 3 << CCM_CCGR_OFFSET(8), base + 0x70);
++ writel_relaxed(3 << CCM_CCGR_OFFSET(14) |
++ 1 << CCM_CCGR_OFFSET(13) |
++ 3 << CCM_CCGR_OFFSET(12) |
++ 1 << CCM_CCGR_OFFSET(11) |
++ 3 << CCM_CCGR_OFFSET(10), base + 0x74);
++ writel_relaxed(3 << CCM_CCGR_OFFSET(7) |
++ 3 << CCM_CCGR_OFFSET(6) |
++ 3 << CCM_CCGR_OFFSET(4), base + 0x78);
++ writel_relaxed(1 << CCM_CCGR_OFFSET(0), base + 0x7c);
++ writel_relaxed(0, base + 0x80);
++
++ /* Make sure PFDs are disabled at boot. */
++ reg = readl_relaxed(anatop_base + 0x100);
++ /* Cannot disable pll2_pfd2_396M, as it is the MMDC clock in iMX6DL */
++ if (cpu_is_imx6dl())
++ reg |= 0x80008080;
++ else
++ reg |= 0x80808080;
++ writel_relaxed(reg, anatop_base + 0x100);
++
++ /* Disable PLL3 PFDs. */
++ reg = readl_relaxed(anatop_base + 0xF0);
++ reg |= 0x80808080;
++ writel_relaxed(reg, anatop_base + 0xF0);
++
++ /* Make sure PLLs is disabled */
++ reg = readl_relaxed(anatop_base + 0xA0);
++ reg &= ~(1 << 13);
++ writel_relaxed(reg, anatop_base + 0xA0);
++
++ clk_data.clks = clk;
++ clk_data.clk_num = ARRAY_SIZE(clk);
++ of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
++
++ clk_register_clkdev(clk[gpt_ipg], "ipg", "imx-gpt.0");
++ clk_register_clkdev(clk[gpt_ipg_per], "per", "imx-gpt.0");
++ clk_register_clkdev(clk[gpt_3m], "gpt_3m", "imx-gpt.0");
++ clk_register_clkdev(clk[cko1_sel], "cko1_sel", NULL);
++ clk_register_clkdev(clk[ahb], "ahb", NULL);
++ clk_register_clkdev(clk[cko1], "cko1", NULL);
++ clk_register_clkdev(clk[arm], NULL, "cpu0");
++ clk_register_clkdev(clk[pll4_audio_div], "pll4_audio_div", NULL);
++ clk_register_clkdev(clk[pll4_sel], "pll4_sel", NULL);
++ clk_register_clkdev(clk[lvds2_in], "lvds2_in", NULL);
++ clk_register_clkdev(clk[esai], "esai", NULL);
++
++ if (cpu_is_imx6dl()) {
++ clk_set_parent(clk[ipu1_sel], clk[pll3_pfd1_540m]);
++ }
++
++ clk_set_parent(clk[ipu1_di0_pre_sel], clk[pll5_video_div]);
++ clk_set_parent(clk[ipu1_di1_pre_sel], clk[pll5_video_div]);
++ clk_set_parent(clk[ipu2_di0_pre_sel], clk[pll5_video_div]);
++ clk_set_parent(clk[ipu2_di1_pre_sel], clk[pll5_video_div]);
++ clk_set_parent(clk[ipu1_di0_sel], clk[ipu1_di0_pre]);
++ clk_set_parent(clk[ipu1_di1_sel], clk[ipu1_di1_pre]);
++ clk_set_parent(clk[ipu2_di0_sel], clk[ipu2_di0_pre]);
++ clk_set_parent(clk[ipu2_di1_sel], clk[ipu2_di1_pre]);
++
++ /*
++ * The gpmi needs 100MHz frequency in the EDO/Sync mode,
++ * We can not get the 100MHz from the pll2_pfd0_352m.
++ * So choose pll2_pfd2_396m as enfc_sel's parent.
++ */
++ clk_set_parent(clk[enfc_sel], clk[pll2_pfd2_396m]);
++
++ /* Set the parent clks of PCIe lvds1 and pcie_axi to be sata ref, axi */
++ if (clk_set_parent(clk[lvds1_sel], clk[sata_ref]))
++ pr_err("Failed to set PCIe bus parent clk.\n");
++ if (clk_set_parent(clk[pcie_axi_sel], clk[axi]))
++ pr_err("Failed to set PCIe parent clk.\n");
++
++ /* gpu clock initilazation */
++ clk_set_parent(clk[gpu3d_shader_sel], clk[pll2_pfd1_594m]);
++ clk_set_rate(clk[gpu3d_shader], 594000000);
++ clk_set_parent(clk[gpu3d_core_sel], clk[mmdc_ch0_axi]);
++ clk_set_rate(clk[gpu3d_core], 528000000);
++ clk_set_parent(clk[gpu2d_core_sel], clk[pll3_usb_otg]);
++
++ for (i = 0; i < ARRAY_SIZE(clks_init_on); i++)
++ clk_prepare_enable(clk[clks_init_on[i]]);
++
++ if (IS_ENABLED(CONFIG_USB_MXS_PHY)) {
++ clk_prepare_enable(clk[usbphy1_gate]);
++ clk_prepare_enable(clk[usbphy2_gate]);
++ }
++
++ /* ipu clock initialization */
++ init_ldb_clks(pll2_pfd0_352m);
++ clk_set_parent(clk[ipu1_di0_pre_sel], clk[pll5_video_div]);
++ clk_set_parent(clk[ipu1_di1_pre_sel], clk[pll5_video_div]);
++ clk_set_parent(clk[ipu2_di0_pre_sel], clk[pll5_video_div]);
++ clk_set_parent(clk[ipu2_di1_pre_sel], clk[pll5_video_div]);
++ clk_set_parent(clk[ipu1_di0_sel], clk[ipu1_di0_pre]);
++ clk_set_parent(clk[ipu1_di1_sel], clk[ipu1_di1_pre]);
++ clk_set_parent(clk[ipu2_di0_sel], clk[ipu2_di0_pre]);
++ clk_set_parent(clk[ipu2_di1_sel], clk[ipu2_di1_pre]);
++ if (cpu_is_imx6dl()) {
++ clk_set_rate(clk[pll3_pfd1_540m], 540000000);
++ clk_set_parent(clk[ipu1_sel], clk[pll3_pfd1_540m]);
++ clk_set_parent(clk[axi_sel], clk[pll3_pfd1_540m]);
++ } else if (cpu_is_imx6q()) {
++ clk_set_parent(clk[ipu1_sel], clk[mmdc_ch0_axi]);
++ clk_set_parent(clk[ipu2_sel], clk[mmdc_ch0_axi]);
++ }
++
++ /*
++ * Let's initially set up CLKO with OSC24M, since this configuration
++ * is widely used by imx6q board designs to clock audio codec.
++ */
++ ret = clk_set_parent(clk[cko2_sel], clk[osc]);
++ if (!ret)
++ ret = clk_set_parent(clk[cko], clk[cko2]);
++ if (ret)
++ pr_warn("failed to set up CLKO: %d\n", ret);
++
++ /* Audio-related clocks configuration */
++ clk_set_parent(clk[spdif_sel], clk[pll3_pfd3_454m]);
++
++ /* All existing boards with PCIe use LVDS1 */
++ if (IS_ENABLED(CONFIG_PCI_IMX6))
++ clk_set_parent(clk[lvds1_sel], clk[sata_ref]);
++
++ /* Audio clocks */
++ clk_set_parent(clk[ssi1_sel], clk[pll4_audio_div]);
++ clk_set_parent(clk[ssi2_sel], clk[pll4_audio_div]);
++ clk_set_parent(clk[ssi3_sel], clk[pll4_audio_div]);
++ clk_set_parent(clk[esai_sel], clk[pll4_audio_div]);
++ clk_set_parent(clk[spdif_sel], clk[pll3_pfd3_454m]);
++ clk_set_parent(clk[asrc_sel], clk[pll3_usb_otg]);
++ clk_set_rate(clk[asrc_sel], 7500000);
++
++ /* Set pll4_audio to a value that can derive 5K-88.2KHz and 8K-96KHz */
++ clk_set_rate(clk[pll4_audio_div], 541900800);
++
++ /* Set initial power mode */
++ imx6q_set_lpm(WAIT_CLOCKED);
++
++ np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-gpt");
++ base = of_iomap(np, 0);
++ WARN_ON(!base);
++ irq = irq_of_parse_and_map(np, 0);
++ mxc_timer_init(base, irq);
++}
++CLK_OF_DECLARE(imx6q, "fsl,imx6q-ccm", imx6q_clocks_init);
+diff -Nur linux-3.14.36/arch/arm/mach-imx/clk-imx6sl.c linux-openelec/arch/arm/mach-imx/clk-imx6sl.c
+--- linux-3.14.36/arch/arm/mach-imx/clk-imx6sl.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/mach-imx/clk-imx6sl.c 2015-05-06 12:05:43.000000000 -0500
+@@ -7,9 +7,29 @@
+ *
+ */
+
++#define CCM_CCDR_OFFSET 0x4
++#define ANATOP_PLL_USB1 0x10
++#define ANATOP_PLL_USB2 0x20
++#define ANATOP_PLL_ENET 0xE0
++#define ANATOP_PLL_BYPASS_OFFSET (1 << 16)
++#define ANATOP_PLL_ENABLE_OFFSET (1 << 13)
++#define ANATOP_PLL_POWER_OFFSET (1 << 12)
++#define ANATOP_PFD_480n_OFFSET 0xf0
++#define ANATOP_PFD_528n_OFFSET 0x100
++#define PFD0_CLKGATE (1 << 7)
++#define PFD1_CLK_GATE (1 << 15)
++#define PFD2_CLK_GATE (1 << 23)
++#define PFD3_CLK_GATE (1 << 31)
++#define CCDR_CH0_HS_BYP 17
++#define OSC_RATE 24000000
++
++#define CCM_CCGR_OFFSET(index) (index * 2)
++
+ #include <linux/clk.h>
+ #include <linux/clkdev.h>
+ #include <linux/err.h>
++#include <linux/init.h>
++#include <linux/io.h>
+ #include <linux/of.h>
+ #include <linux/of_address.h>
+ #include <linux/of_irq.h>
+@@ -18,6 +38,7 @@
+ #include "clk.h"
+ #include "common.h"
+
++static bool uart_from_osc;
+ static const char const *step_sels[] = { "osc", "pll2_pfd2", };
+ static const char const *pll1_sw_sels[] = { "pll1_sys", "step", };
+ static const char const *ocram_alt_sels[] = { "pll2_pfd2", "pll3_pfd1", };
+@@ -25,8 +46,8 @@
+ static const char const *pre_periph_sels[] = { "pll2_bus", "pll2_pfd2", "pll2_pfd0", "pll2_198m", };
+ static const char const *periph_clk2_sels[] = { "pll3_usb_otg", "osc", "osc", "dummy", };
+ static const char const *periph2_clk2_sels[] = { "pll3_usb_otg", "pll2_bus", };
+-static const char const *periph_sels[] = { "pre_periph_sel", "periph_clk2_podf", };
+-static const char const *periph2_sels[] = { "pre_periph2_sel", "periph2_clk2_podf", };
++static const char const *periph_sels[] = { "pre_periph_sel", "periph_clk2", };
++static const char const *periph2_sels[] = { "pre_periph2_sel", "periph2_clk2", };
+ static const char const *csi_lcdif_sels[] = { "mmdc", "pll2_pfd2", "pll3_120m", "pll3_pfd1", };
+ static const char const *usdhc_sels[] = { "pll2_pfd2", "pll2_pfd0", };
+ static const char const *ssi_sels[] = { "pll3_pfd2", "pll3_pfd3", "pll4_audio_div", "dummy", };
+@@ -38,7 +59,7 @@
+ static const char const *epdc_pix_sels[] = { "pll2_bus", "pll3_usb_otg", "pll5_video_div", "pll2_pfd0", "pll2_pfd1", "pll3_pfd1", };
+ static const char const *audio_sels[] = { "pll4_audio_div", "pll3_pfd2", "pll3_pfd3", "pll3_usb_otg", };
+ static const char const *ecspi_sels[] = { "pll3_60m", "osc", };
+-static const char const *uart_sels[] = { "pll3_80m", "osc", };
++static const char const *uart_sels[] = { "pll3_80m", "uart_osc_4M", };
+
+ static struct clk_div_table clk_enet_ref_table[] = {
+ { .val = 0, .div = 20, },
+@@ -65,6 +86,80 @@
+
+ static struct clk *clks[IMX6SL_CLK_END];
+ static struct clk_onecell_data clk_data;
++static u32 cur_arm_podf;
++static u32 pll1_org_rate;
++
++extern int low_bus_freq_mode;
++extern int audio_bus_freq_mode;
++
++/*
++ * On MX6SL, need to ensure that the ARM:IPG clock ratio is maintained
++ * within 12:5 when the clocks to ARM are gated when the SOC enters
++ * WAIT mode. This is necessary to avoid WAIT mode issue (an early
++ * interrupt waking up the ARM).
++ * This function will set the ARM clk to max value within the 12:5 limit.
++ */
++void imx6sl_set_wait_clk(bool enter)
++{
++ u32 parent_rate;
++
++ if (enter) {
++ u32 wait_podf;
++ u32 new_parent_rate = OSC_RATE;
++ u32 ipg_rate = clk_get_rate(clks[IMX6SL_CLK_IPG]);
++ u32 max_arm_wait_clk = (12 * ipg_rate) / 5;
++ parent_rate = clk_get_rate(clks[IMX6SL_CLK_PLL1_SW]);
++ cur_arm_podf = parent_rate / clk_get_rate(clks[IMX6SL_CLK_ARM]);
++ if (low_bus_freq_mode) {
++ /*
++ * IPG clk is at 12MHz at this point, we can only run
++ * ARM at a max of 28.8MHz. So we need to set ARM
++ * to run from the 24MHz OSC, as there is no way to
++ * get 28.8MHz when ARM is sourced from PLL1.
++ */
++ clk_set_parent(clks[IMX6SL_CLK_STEP],
++ clks[IMX6SL_CLK_OSC]);
++ clk_set_parent(clks[IMX6SL_CLK_PLL1_SW],
++ clks[IMX6SL_CLK_STEP]);
++ } else if (audio_bus_freq_mode) {
++ /*
++ * In this mode ARM is from PLL2_PFD2 (396MHz),
++ * but IPG is at 12MHz. Need to switch ARM to run
++ * from the bypassed PLL1 clocks so that we can run
++ * ARM at 24MHz.
++ */
++ pll1_org_rate = clk_get_rate(clks[IMX6SL_CLK_PLL1_SYS]);
++ /* Ensure PLL1 is at 24MHz. */
++ clk_set_rate(clks[IMX6SL_CLK_PLL1_SYS], OSC_RATE);
++ clk_set_parent(clks[IMX6SL_CLK_PLL1_SW], clks[IMX6SL_CLK_PLL1_SYS]);
++ } else
++ new_parent_rate = clk_get_rate(clks[IMX6SL_CLK_PLL1_SW]);
++ wait_podf = (new_parent_rate + max_arm_wait_clk - 1) /
++ max_arm_wait_clk;
++
++ clk_set_rate(clks[IMX6SL_CLK_ARM], new_parent_rate / wait_podf);
++ } else {
++ if (low_bus_freq_mode)
++ /* Move ARM back to PLL1. */
++ clk_set_parent(clks[IMX6SL_CLK_PLL1_SW],
++ clks[IMX6SL_CLK_PLL1_SYS]);
++ else if (audio_bus_freq_mode) {
++ /* Move ARM back to PLL2_PFD2 via STEP_CLK. */
++ clk_set_parent(clks[IMX6SL_CLK_PLL1_SW], clks[IMX6SL_CLK_STEP]);
++ clk_set_rate(clks[IMX6SL_CLK_PLL1_SYS], pll1_org_rate);
++ }
++ parent_rate = clk_get_rate(clks[IMX6SL_CLK_PLL1_SW]);
++ clk_set_rate(clks[IMX6SL_CLK_ARM], parent_rate / cur_arm_podf);
++ }
++}
++
++static int __init setup_uart_clk(char *uart_rate)
++{
++ uart_from_osc = true;
++ return 1;
++}
++
++__setup("uart_at_4M", setup_uart_clk);
+
+ static void __init imx6sl_clocks_init(struct device_node *ccm_node)
+ {
+@@ -72,6 +167,8 @@
+ void __iomem *base;
+ int irq;
+ int i;
++ int ret;
++ u32 reg;
+
+ clks[IMX6SL_CLK_DUMMY] = imx_clk_fixed("dummy", 0);
+ clks[IMX6SL_CLK_CKIL] = imx_obtain_fixed_clock("ckil", 0);
+@@ -82,13 +179,18 @@
+ WARN_ON(!base);
+
+ /* type name parent base div_mask */
+- clks[IMX6SL_CLK_PLL1_SYS] = imx_clk_pllv3(IMX_PLLV3_SYS, "pll1_sys", "osc", base, 0x7f);
+- clks[IMX6SL_CLK_PLL2_BUS] = imx_clk_pllv3(IMX_PLLV3_GENERIC, "pll2_bus", "osc", base + 0x30, 0x1);
+- clks[IMX6SL_CLK_PLL3_USB_OTG] = imx_clk_pllv3(IMX_PLLV3_USB, "pll3_usb_otg", "osc", base + 0x10, 0x3);
+- clks[IMX6SL_CLK_PLL4_AUDIO] = imx_clk_pllv3(IMX_PLLV3_AV, "pll4_audio", "osc", base + 0x70, 0x7f);
+- clks[IMX6SL_CLK_PLL5_VIDEO] = imx_clk_pllv3(IMX_PLLV3_AV, "pll5_video", "osc", base + 0xa0, 0x7f);
+- clks[IMX6SL_CLK_PLL6_ENET] = imx_clk_pllv3(IMX_PLLV3_ENET, "pll6_enet", "osc", base + 0xe0, 0x3);
+- clks[IMX6SL_CLK_PLL7_USB_HOST] = imx_clk_pllv3(IMX_PLLV3_USB, "pll7_usb_host", "osc", base + 0x20, 0x3);
++ clks[IMX6SL_CLK_PLL1_SYS] = imx_clk_pllv3(IMX_PLLV3_SYS, "pll1_sys", "osc", base, 0x7f, true);
++ clks[IMX6SL_CLK_PLL2_BUS] = imx_clk_pllv3(IMX_PLLV3_GENERIC, "pll2_bus", "osc", base + 0x30, 0x1, true);
++ clks[IMX6SL_CLK_PLL3_USB_OTG] = imx_clk_pllv3(IMX_PLLV3_USB, "pll3_usb_otg", "osc", base + 0x10, 0x3, false);
++ clks[IMX6SL_CLK_PLL4_AUDIO] = imx_clk_pllv3(IMX_PLLV3_AV, "pll4_audio", "osc", base + 0x70, 0x7f, false);
++ clks[IMX6SL_CLK_PLL5_VIDEO] = imx_clk_pllv3(IMX_PLLV3_AV, "pll5_video", "osc", base + 0xa0, 0x7f, false);
++ clks[IMX6SL_CLK_PLL6_ENET] = imx_clk_pllv3(IMX_PLLV3_ENET, "pll6_enet", "osc", base + 0xe0, 0x3, false);
++ clks[IMX6SL_CLK_PLL7_USB_HOST] = imx_clk_pllv3(IMX_PLLV3_USB, "pll7_usb_host", "osc", base + 0x20, 0x3, false);
++
++ /* Ensure the AHB clk is at 132MHz. */
++ ret = clk_set_rate(clks[IMX6SL_CLK_AHB], 132000000);
++ if (ret)
++ pr_warn("%s: failed to set AHB clock rate %d\n", __func__, ret);
+
+ /*
+ * usbphy1 and usbphy2 are implemented as dummy gates using reserve
+@@ -118,11 +220,36 @@
+ clks[IMX6SL_CLK_PLL3_PFD2] = imx_clk_pfd("pll3_pfd2", "pll3_usb_otg", base + 0xf0, 2);
+ clks[IMX6SL_CLK_PLL3_PFD3] = imx_clk_pfd("pll3_pfd3", "pll3_usb_otg", base + 0xf0, 3);
+
+- /* name parent_name mult div */
+- clks[IMX6SL_CLK_PLL2_198M] = imx_clk_fixed_factor("pll2_198m", "pll2_pfd2", 1, 2);
+- clks[IMX6SL_CLK_PLL3_120M] = imx_clk_fixed_factor("pll3_120m", "pll3_usb_otg", 1, 4);
+- clks[IMX6SL_CLK_PLL3_80M] = imx_clk_fixed_factor("pll3_80m", "pll3_usb_otg", 1, 6);
+- clks[IMX6SL_CLK_PLL3_60M] = imx_clk_fixed_factor("pll3_60m", "pll3_usb_otg", 1, 8);
++ /* name parent_name mult div */
++ clks[IMX6SL_CLK_PLL2_198M] = imx_clk_fixed_factor("pll2_198m", "pll2_pfd2", 1, 2);
++ clks[IMX6SL_CLK_PLL3_120M] = imx_clk_fixed_factor("pll3_120m", "pll3_usb_otg", 1, 4);
++ clks[IMX6SL_CLK_PLL3_80M] = imx_clk_fixed_factor("pll3_80m", "pll3_usb_otg", 1, 6);
++ clks[IMX6SL_CLK_PLL3_60M] = imx_clk_fixed_factor("pll3_60m", "pll3_usb_otg", 1, 8);
++ clks[IMX6SL_CLK_UART_OSC_4M] = imx_clk_fixed_factor("uart_osc_4M", "osc", 1, 6);
++
++ /* Ensure all PFDs but PLL2_PFD2 are disabled. */
++ reg = readl_relaxed(base + ANATOP_PFD_480n_OFFSET);
++ reg |= (PFD0_CLKGATE | PFD1_CLK_GATE | PFD2_CLK_GATE | PFD3_CLK_GATE);
++ writel_relaxed(reg, base + ANATOP_PFD_480n_OFFSET);
++ reg = readl_relaxed(base + ANATOP_PFD_528n_OFFSET);
++ reg |= (PFD0_CLKGATE | PFD1_CLK_GATE);
++ writel_relaxed(reg, base + ANATOP_PFD_528n_OFFSET);
++
++ /* Ensure Unused PLLs are disabled. */
++ reg = readl_relaxed(base + ANATOP_PLL_USB1);
++ reg |= ANATOP_PLL_BYPASS_OFFSET;
++ reg &= ~(ANATOP_PLL_ENABLE_OFFSET | ANATOP_PLL_POWER_OFFSET);
++ writel_relaxed(reg, base + ANATOP_PLL_USB1);
++
++ reg = readl_relaxed(base + ANATOP_PLL_USB2);
++ reg |= ANATOP_PLL_BYPASS_OFFSET;
++ reg &= ~(ANATOP_PLL_ENABLE_OFFSET | ANATOP_PLL_POWER_OFFSET);
++ writel_relaxed(reg, base + ANATOP_PLL_USB2);
++
++ reg = readl_relaxed(base + ANATOP_PLL_ENET);
++ reg |= (ANATOP_PLL_BYPASS_OFFSET | ANATOP_PLL_POWER_OFFSET);
++ reg &= ~ANATOP_PLL_ENABLE_OFFSET;
++ writel_relaxed(reg, base + ANATOP_PLL_ENET);
+
+ np = ccm_node;
+ base = of_iomap(np, 0);
+@@ -158,7 +285,7 @@
+ clks[IMX6SL_CLK_EPDC_PIX_SEL] = imx_clk_mux("epdc_pix_sel", base + 0x38, 15, 3, epdc_pix_sels, ARRAY_SIZE(epdc_pix_sels));
+ clks[IMX6SL_CLK_SPDIF0_SEL] = imx_clk_mux("spdif0_sel", base + 0x30, 20, 2, audio_sels, ARRAY_SIZE(audio_sels));
+ clks[IMX6SL_CLK_SPDIF1_SEL] = imx_clk_mux("spdif1_sel", base + 0x30, 7, 2, audio_sels, ARRAY_SIZE(audio_sels));
+- clks[IMX6SL_CLK_EXTERN_AUDIO_SEL] = imx_clk_mux("extern_audio_sel", base + 0x20, 19, 2, audio_sels, ARRAY_SIZE(audio_sels));
++ clks[IMX6SL_CLK_EXTERN_AUDIO_SEL] = imx_clk_mux_flags("extern_audio_sel", base + 0x20, 19, 2, audio_sels, ARRAY_SIZE(audio_sels), CLK_SET_RATE_PARENT);
+ clks[IMX6SL_CLK_ECSPI_SEL] = imx_clk_mux("ecspi_sel", base + 0x38, 18, 1, ecspi_sels, ARRAY_SIZE(ecspi_sels));
+ clks[IMX6SL_CLK_UART_SEL] = imx_clk_mux("uart_sel", base + 0x24, 6, 1, uart_sels, ARRAY_SIZE(uart_sels));
+
+@@ -168,8 +295,8 @@
+
+ /* name parent_name reg shift width */
+ clks[IMX6SL_CLK_OCRAM_PODF] = imx_clk_divider("ocram_podf", "ocram_sel", base + 0x14, 16, 3);
+- clks[IMX6SL_CLK_PERIPH_CLK2_PODF] = imx_clk_divider("periph_clk2_podf", "periph_clk2_sel", base + 0x14, 27, 3);
+- clks[IMX6SL_CLK_PERIPH2_CLK2_PODF] = imx_clk_divider("periph2_clk2_podf", "periph2_clk2_sel", base + 0x14, 0, 3);
++ clks[IMX6SL_CLK_PERIPH_CLK2] = imx_clk_divider("periph_clk2", "periph_clk2_sel", base + 0x14, 27, 3);
++ clks[IMX6SL_CLK_PERIPH2_CLK2] = imx_clk_divider("periph2_clk2", "periph2_clk2_sel", base + 0x14, 0, 3);
+ clks[IMX6SL_CLK_IPG] = imx_clk_divider("ipg", "ahb", base + 0x14, 8, 2);
+ clks[IMX6SL_CLK_CSI_PODF] = imx_clk_divider("csi_podf", "csi_sel", base + 0x3c, 11, 3);
+ clks[IMX6SL_CLK_LCDIF_AXI_PODF] = imx_clk_divider("lcdif_axi_podf", "lcdif_axi_sel", base + 0x3c, 16, 3);
+@@ -251,6 +378,25 @@
+ pr_err("i.MX6SL clk %d: register failed with %ld\n",
+ i, PTR_ERR(clks[i]));
+
++ /* Initialize clock gate status */
++ writel_relaxed(1 << CCM_CCGR_OFFSET(11) |
++ 3 << CCM_CCGR_OFFSET(1) |
++ 3 << CCM_CCGR_OFFSET(0), base + 0x68);
++ writel_relaxed(3 << CCM_CCGR_OFFSET(10), base + 0x6c);
++ writel_relaxed(1 << CCM_CCGR_OFFSET(11) |
++ 3 << CCM_CCGR_OFFSET(10) |
++ 3 << CCM_CCGR_OFFSET(9) |
++ 3 << CCM_CCGR_OFFSET(8), base + 0x70);
++ writel_relaxed(3 << CCM_CCGR_OFFSET(14) |
++ 3 << CCM_CCGR_OFFSET(13) |
++ 3 << CCM_CCGR_OFFSET(12) |
++ 3 << CCM_CCGR_OFFSET(11) |
++ 3 << CCM_CCGR_OFFSET(10), base + 0x74);
++ writel_relaxed(3 << CCM_CCGR_OFFSET(7) |
++ 3 << CCM_CCGR_OFFSET(4), base + 0x78);
++ writel_relaxed(1 << CCM_CCGR_OFFSET(0), base + 0x7c);
++ writel_relaxed(0, base + 0x80);
++
+ clk_data.clks = clks;
+ clk_data.clk_num = ARRAY_SIZE(clks);
+ of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
+@@ -258,17 +404,58 @@
+ clk_register_clkdev(clks[IMX6SL_CLK_GPT], "ipg", "imx-gpt.0");
+ clk_register_clkdev(clks[IMX6SL_CLK_GPT_SERIAL], "per", "imx-gpt.0");
+
++ /*
++ * Make sure the ARM clk is enabled to maintain the correct usecount
++ * and enabling/disabling of parent PLLs.
++ */
++ ret = clk_prepare_enable(clks[IMX6SL_CLK_ARM]);
++ if (ret)
++ pr_warn("%s: failed to enable ARM core clock %d\n",
++ __func__, ret);
++
++ /*
++ * Make sure the MMDC clk is enabled to maintain the correct usecount
++ * and enabling/disabling of parent PLLs.
++ */
++ ret = clk_prepare_enable(clks[IMX6SL_CLK_MMDC_ROOT]);
++ if (ret)
++ pr_warn("%s: failed to enable MMDC clock %d\n",
++ __func__, ret);
++
+ if (IS_ENABLED(CONFIG_USB_MXS_PHY)) {
+ clk_prepare_enable(clks[IMX6SL_CLK_USBPHY1_GATE]);
+ clk_prepare_enable(clks[IMX6SL_CLK_USBPHY2_GATE]);
+ }
+
++ clk_set_parent(clks[IMX6SL_CLK_GPU2D_OVG_SEL],
++ clks[IMX6SL_CLK_PLL2_BUS]);
++ clk_set_parent(clks[IMX6SL_CLK_GPU2D_SEL], clks[IMX6SL_CLK_PLL2_BUS]);
++
+ /* Audio-related clocks configuration */
+ clk_set_parent(clks[IMX6SL_CLK_SPDIF0_SEL], clks[IMX6SL_CLK_PLL3_PFD3]);
+
++ /* set extern_audio to be sourced from PLL4/audio PLL */
++ clk_set_parent(clks[IMX6SL_CLK_EXTERN_AUDIO_SEL], clks[IMX6SL_CLK_PLL4_AUDIO_DIV]);
++ /* set extern_audio to 24MHz */
++ clk_set_rate(clks[IMX6SL_CLK_PLL4_AUDIO], 24000000);
++ clk_set_rate(clks[IMX6SL_CLK_EXTERN_AUDIO], 24000000);
++
++ /* set SSI2 parent to PLL4 */
++ clk_set_parent(clks[IMX6SL_CLK_SSI2_SEL], clks[IMX6SL_CLK_PLL4_AUDIO_DIV]);
++ clk_set_rate(clks[IMX6SL_CLK_SSI2], 24000000);
++
+ /* Set initial power mode */
+ imx6q_set_lpm(WAIT_CLOCKED);
+
++ /* Ensure that CH0 handshake is bypassed. */
++ reg = readl_relaxed(base + CCM_CCDR_OFFSET);
++ reg |= 1 << CCDR_CH0_HS_BYP;
++ writel_relaxed(reg, base + CCM_CCDR_OFFSET);
++
++ /* Set the UART parent if needed. */
++ if (uart_from_osc)
++ ret = clk_set_parent(clks[IMX6SL_CLK_UART_SEL], clks[IMX6SL_CLK_UART_OSC_4M]);
++
+ np = of_find_compatible_node(NULL, NULL, "fsl,imx6sl-gpt");
+ base = of_iomap(np, 0);
+ WARN_ON(!base);
+diff -Nur linux-3.14.36/arch/arm/mach-imx/clk-pfd.c linux-openelec/arch/arm/mach-imx/clk-pfd.c
+--- linux-3.14.36/arch/arm/mach-imx/clk-pfd.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/mach-imx/clk-pfd.c 2015-05-06 12:05:43.000000000 -0500
+@@ -1,5 +1,5 @@
+ /*
+- * Copyright 2012 Freescale Semiconductor, Inc.
++ * Copyright 2012-2013 Freescale Semiconductor, Inc.
+ * Copyright 2012 Linaro Ltd.
+ *
+ * The code contained herein is licensed under the GNU General Public
+@@ -17,6 +17,8 @@
+ #include <linux/err.h>
+ #include "clk.h"
+
++#define BYPASS_RATE 24000000
++
+ /**
+ * struct clk_pfd - IMX PFD clock
+ * @clk_hw: clock source
+@@ -62,9 +64,14 @@
+ u64 tmp = parent_rate;
+ u8 frac = (readl_relaxed(pfd->reg) >> (pfd->idx * 8)) & 0x3f;
+
+- tmp *= 18;
+- do_div(tmp, frac);
+-
++ /*
++ * If the parent PLL is in bypass state, the PFDs
++ * are also in bypass state.
++ */
++ if (tmp != BYPASS_RATE) {
++ tmp *= 18;
++ do_div(tmp, frac);
++ }
+ return tmp;
+ }
+
+@@ -74,17 +81,22 @@
+ u64 tmp = *prate;
+ u8 frac;
+
+- tmp = tmp * 18 + rate / 2;
+- do_div(tmp, rate);
+- frac = tmp;
+- if (frac < 12)
+- frac = 12;
+- else if (frac > 35)
+- frac = 35;
+- tmp = *prate;
+- tmp *= 18;
+- do_div(tmp, frac);
+-
++ /*
++ * If the parent PLL is in bypass state, the PFDs
++ * are also in bypass state.
++ */
++ if (tmp != BYPASS_RATE) {
++ tmp = tmp * 18 + rate / 2;
++ do_div(tmp, rate);
++ frac = tmp;
++ if (frac < 12)
++ frac = 12;
++ else if (frac > 35)
++ frac = 35;
++ tmp = *prate;
++ tmp *= 18;
++ do_div(tmp, frac);
++ }
+ return tmp;
+ }
+
+@@ -95,6 +107,9 @@
+ u64 tmp = parent_rate;
+ u8 frac;
+
++ if (tmp == BYPASS_RATE)
++ return 0;
++
+ tmp = tmp * 18 + rate / 2;
+ do_div(tmp, rate);
+ frac = tmp;
+diff -Nur linux-3.14.36/arch/arm/mach-imx/clk-pllv3.c linux-openelec/arch/arm/mach-imx/clk-pllv3.c
+--- linux-3.14.36/arch/arm/mach-imx/clk-pllv3.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/mach-imx/clk-pllv3.c 2015-05-06 12:05:43.000000000 -0500
+@@ -26,12 +26,15 @@
+ #define BM_PLL_ENABLE (0x1 << 13)
+ #define BM_PLL_BYPASS (0x1 << 16)
+ #define BM_PLL_LOCK (0x1 << 31)
++#define BYPASS_RATE 24000000
++#define BYPASS_MASK 0x10000
+
+ /**
+ * struct clk_pllv3 - IMX PLL clock version 3
+ * @clk_hw: clock source
+ * @base: base address of PLL registers
+ * @powerup_set: set POWER bit to power up the PLL
++ * @always_on : Leave the PLL powered up all the time.
+ * @div_mask: mask of divider bits
+ *
+ * IMX PLL clock version 3, found on i.MX6 series. Divider for pllv3
+@@ -41,7 +44,9 @@
+ struct clk_hw hw;
+ void __iomem *base;
+ bool powerup_set;
++ bool always_on;
+ u32 div_mask;
++ u32 rate_req;
+ };
+
+ #define to_clk_pllv3(_hw) container_of(_hw, struct clk_pllv3, hw)
+@@ -61,54 +66,53 @@
+ break;
+ if (time_after(jiffies, timeout))
+ break;
+- usleep_range(50, 500);
++ udelay(100);
+ } while (1);
+
+ return readl_relaxed(pll->base) & BM_PLL_LOCK ? 0 : -ETIMEDOUT;
+ }
+
+-static int clk_pllv3_prepare(struct clk_hw *hw)
++static int clk_pllv3_power_up_down(struct clk_hw *hw, bool enable)
+ {
+ struct clk_pllv3 *pll = to_clk_pllv3(hw);
+- u32 val;
+- int ret;
+-
+- val = readl_relaxed(pll->base);
+- if (pll->powerup_set)
+- val |= BM_PLL_POWER;
+- else
+- val &= ~BM_PLL_POWER;
+- writel_relaxed(val, pll->base);
+-
+- ret = clk_pllv3_wait_lock(pll);
+- if (ret)
+- return ret;
++ u32 val, ret = 0;
+
+- val = readl_relaxed(pll->base);
+- val &= ~BM_PLL_BYPASS;
+- writel_relaxed(val, pll->base);
+-
+- return 0;
+-}
++ if (enable) {
++ val = readl_relaxed(pll->base);
++ val &= ~BM_PLL_BYPASS;
++ if (pll->powerup_set)
++ val |= BM_PLL_POWER;
++ else
++ val &= ~BM_PLL_POWER;
++ writel_relaxed(val, pll->base);
++
++ ret = clk_pllv3_wait_lock(pll);
++ } else {
++ val = readl_relaxed(pll->base);
++ val |= BM_PLL_BYPASS;
++ if (pll->powerup_set)
++ val &= ~BM_PLL_POWER;
++ else
++ val |= BM_PLL_POWER;
++ writel_relaxed(val, pll->base);
++ }
+
+-static void clk_pllv3_unprepare(struct clk_hw *hw)
+-{
+- struct clk_pllv3 *pll = to_clk_pllv3(hw);
+- u32 val;
++ if (!ret) {
++ val = readl_relaxed(pll->base);
++ val &= ~BM_PLL_BYPASS;
++ writel_relaxed(val, pll->base);
++ }
+
+- val = readl_relaxed(pll->base);
+- val |= BM_PLL_BYPASS;
+- if (pll->powerup_set)
+- val &= ~BM_PLL_POWER;
+- else
+- val |= BM_PLL_POWER;
+- writel_relaxed(val, pll->base);
++ return ret;
+ }
+
+ static int clk_pllv3_enable(struct clk_hw *hw)
+ {
+ struct clk_pllv3 *pll = to_clk_pllv3(hw);
+ u32 val;
++
++ if (pll->rate_req != BYPASS_RATE)
++ clk_pllv3_power_up_down(hw, true);
+
+ val = readl_relaxed(pll->base);
+ val |= BM_PLL_ENABLE;
+@@ -123,8 +127,12 @@
+ u32 val;
+
+ val = readl_relaxed(pll->base);
+- val &= ~BM_PLL_ENABLE;
++ if (!pll->always_on)
++ val &= ~BM_PLL_ENABLE;
+ writel_relaxed(val, pll->base);
++
++ if (pll->rate_req != BYPASS_RATE)
++ clk_pllv3_power_up_down(hw, false);
+ }
+
+ static unsigned long clk_pllv3_recalc_rate(struct clk_hw *hw,
+@@ -132,8 +140,15 @@
+ {
+ struct clk_pllv3 *pll = to_clk_pllv3(hw);
+ u32 div = readl_relaxed(pll->base) & pll->div_mask;
++ u32 bypass = readl_relaxed(pll->base) & BYPASS_MASK;
++ u32 rate;
++
++ if (pll->rate_req == BYPASS_RATE && bypass)
++ rate = BYPASS_RATE;
++ else
++ rate = (div == 1) ? parent_rate * 22 : parent_rate * 20;
+
+- return (div == 1) ? parent_rate * 22 : parent_rate * 20;
++ return rate;
+ }
+
+ static long clk_pllv3_round_rate(struct clk_hw *hw, unsigned long rate,
+@@ -141,6 +156,10 @@
+ {
+ unsigned long parent_rate = *prate;
+
++ /* If the PLL is bypassed, its rate is 24MHz. */
++ if (rate == BYPASS_RATE)
++ return BYPASS_RATE;
++
+ return (rate >= parent_rate * 22) ? parent_rate * 22 :
+ parent_rate * 20;
+ }
+@@ -151,6 +170,22 @@
+ struct clk_pllv3 *pll = to_clk_pllv3(hw);
+ u32 val, div;
+
++ pll->rate_req = rate;
++ val = readl_relaxed(pll->base);
++
++ /* If the PLL is bypassed, its rate is 24MHz. */
++ if (rate == BYPASS_RATE) {
++ /* Set the bypass bit. */
++ val |= BM_PLL_BYPASS;
++ /* Power down the PLL. */
++ if (pll->powerup_set)
++ val &= ~BM_PLL_POWER;
++ else
++ val |= BM_PLL_POWER;
++ writel_relaxed(val, pll->base);
++
++ return 0;
++ }
+ if (rate == parent_rate * 22)
+ div = 1;
+ else if (rate == parent_rate * 20)
+@@ -167,8 +202,6 @@
+ }
+
+ static const struct clk_ops clk_pllv3_ops = {
+- .prepare = clk_pllv3_prepare,
+- .unprepare = clk_pllv3_unprepare,
+ .enable = clk_pllv3_enable,
+ .disable = clk_pllv3_disable,
+ .recalc_rate = clk_pllv3_recalc_rate,
+@@ -181,6 +214,10 @@
+ {
+ struct clk_pllv3 *pll = to_clk_pllv3(hw);
+ u32 div = readl_relaxed(pll->base) & pll->div_mask;
++ u32 bypass = readl_relaxed(pll->base) & BYPASS_MASK;
++
++ if (pll->rate_req == BYPASS_RATE && bypass)
++ return BYPASS_RATE;
+
+ return parent_rate * div / 2;
+ }
+@@ -193,6 +230,9 @@
+ unsigned long max_rate = parent_rate * 108 / 2;
+ u32 div;
+
++ if (rate == BYPASS_RATE)
++ return BYPASS_RATE;
++
+ if (rate > max_rate)
+ rate = max_rate;
+ else if (rate < min_rate)
+@@ -210,9 +250,26 @@
+ unsigned long max_rate = parent_rate * 108 / 2;
+ u32 val, div;
+
+- if (rate < min_rate || rate > max_rate)
++ if (rate != BYPASS_RATE && (rate < min_rate || rate > max_rate))
+ return -EINVAL;
+
++ pll->rate_req = rate;
++ val = readl_relaxed(pll->base);
++
++ if (rate == BYPASS_RATE) {
++ /*
++ * Set the PLL in bypass mode if rate requested is
++ * BYPASS_RATE.
++ */
++ val |= BM_PLL_BYPASS;
++ /* Power down the PLL. */
++ if (pll->powerup_set)
++ val &= ~BM_PLL_POWER;
++ else
++ val |= BM_PLL_POWER;
++ writel_relaxed(val, pll->base);
++ return 0;
++ }
+ div = rate * 2 / parent_rate;
+ val = readl_relaxed(pll->base);
+ val &= ~pll->div_mask;
+@@ -223,8 +280,6 @@
+ }
+
+ static const struct clk_ops clk_pllv3_sys_ops = {
+- .prepare = clk_pllv3_prepare,
+- .unprepare = clk_pllv3_unprepare,
+ .enable = clk_pllv3_enable,
+ .disable = clk_pllv3_disable,
+ .recalc_rate = clk_pllv3_sys_recalc_rate,
+@@ -239,6 +294,10 @@
+ u32 mfn = readl_relaxed(pll->base + PLL_NUM_OFFSET);
+ u32 mfd = readl_relaxed(pll->base + PLL_DENOM_OFFSET);
+ u32 div = readl_relaxed(pll->base) & pll->div_mask;
++ u32 bypass = readl_relaxed(pll->base) & BYPASS_MASK;
++
++ if (pll->rate_req == BYPASS_RATE && bypass)
++ return BYPASS_RATE;
+
+ return (parent_rate * div) + ((parent_rate / mfd) * mfn);
+ }
+@@ -253,6 +312,9 @@
+ u32 mfn, mfd = 1000000;
+ s64 temp64;
+
++ if (rate == BYPASS_RATE)
++ return BYPASS_RATE;
++
+ if (rate > max_rate)
+ rate = max_rate;
+ else if (rate < min_rate)
+@@ -273,13 +335,36 @@
+ struct clk_pllv3 *pll = to_clk_pllv3(hw);
+ unsigned long min_rate = parent_rate * 27;
+ unsigned long max_rate = parent_rate * 54;
+- u32 val, div;
++ u32 val, newval, div;
+ u32 mfn, mfd = 1000000;
+ s64 temp64;
++ int ret;
+
+- if (rate < min_rate || rate > max_rate)
++ if (rate != BYPASS_RATE && (rate < min_rate || rate > max_rate))
+ return -EINVAL;
+
++ pll->rate_req = rate;
++ val = readl_relaxed(pll->base);
++
++ if (rate == BYPASS_RATE) {
++ /*
++ * Set the PLL in bypass mode if rate requested is
++ * BYPASS_RATE.
++ */
++ /* Bypass the PLL */
++ val |= BM_PLL_BYPASS;
++ /* Power down the PLL. */
++ if (pll->powerup_set)
++ val &= ~BM_PLL_POWER;
++ else
++ val |= BM_PLL_POWER;
++ writel_relaxed(val, pll->base);
++ return 0;
++ }
++ /* Else clear the bypass bit. */
++ val &= ~BM_PLL_BYPASS;
++ writel_relaxed(val, pll->base);
++
+ div = rate / parent_rate;
+ temp64 = (u64) (rate - div * parent_rate);
+ temp64 *= mfd;
+@@ -287,18 +372,30 @@
+ mfn = temp64;
+
+ val = readl_relaxed(pll->base);
+- val &= ~pll->div_mask;
+- val |= div;
+- writel_relaxed(val, pll->base);
++
++ /* set the PLL into bypass mode */
++ newval = val | BM_PLL_BYPASS;
++ writel_relaxed(newval, pll->base);
++
++ /* configure the new frequency */
++ newval &= ~pll->div_mask;
++ newval |= div;
++ writel_relaxed(newval, pll->base);
+ writel_relaxed(mfn, pll->base + PLL_NUM_OFFSET);
+- writel_relaxed(mfd, pll->base + PLL_DENOM_OFFSET);
++ writel(mfd, pll->base + PLL_DENOM_OFFSET);
+
+- return clk_pllv3_wait_lock(pll);
++ ret = clk_pllv3_wait_lock(pll);
++ if (ret == 0 && val & BM_PLL_POWER) {
++ /* only if it locked can we switch back to the PLL */
++ newval &= ~BM_PLL_BYPASS;
++ newval |= val & BM_PLL_BYPASS;
++ writel(newval, pll->base);
++ }
++
++ return ret;
+ }
+
+ static const struct clk_ops clk_pllv3_av_ops = {
+- .prepare = clk_pllv3_prepare,
+- .unprepare = clk_pllv3_unprepare,
+ .enable = clk_pllv3_enable,
+ .disable = clk_pllv3_disable,
+ .recalc_rate = clk_pllv3_av_recalc_rate,
+@@ -313,8 +410,6 @@
+ }
+
+ static const struct clk_ops clk_pllv3_enet_ops = {
+- .prepare = clk_pllv3_prepare,
+- .unprepare = clk_pllv3_unprepare,
+ .enable = clk_pllv3_enable,
+ .disable = clk_pllv3_disable,
+ .recalc_rate = clk_pllv3_enet_recalc_rate,
+@@ -322,7 +417,7 @@
+
+ struct clk *imx_clk_pllv3(enum imx_pllv3_type type, const char *name,
+ const char *parent_name, void __iomem *base,
+- u32 div_mask)
++ u32 div_mask, bool always_on)
+ {
+ struct clk_pllv3 *pll;
+ const struct clk_ops *ops;
+@@ -352,6 +447,7 @@
+ }
+ pll->base = base;
+ pll->div_mask = div_mask;
++ pll->always_on = always_on;
+
+ init.name = name;
+ init.ops = ops;
+diff -Nur linux-3.14.36/arch/arm/mach-imx/common.h linux-openelec/arch/arm/mach-imx/common.h
+--- linux-3.14.36/arch/arm/mach-imx/common.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/mach-imx/common.h 2015-05-06 12:05:43.000000000 -0500
+@@ -1,5 +1,5 @@
+ /*
+- * Copyright 2004-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ * Copyright 2004-2014 Freescale Semiconductor, Inc. All Rights Reserved.
+ */
+
+ /*
+@@ -116,7 +116,6 @@
+ void imx_set_cpu_jump(int cpu, void *jump_addr);
+ u32 imx_get_cpu_arg(int cpu);
+ void imx_set_cpu_arg(int cpu, u32 arg);
+-void v7_cpu_resume(void);
+ #ifdef CONFIG_SMP
+ void v7_secondary_startup(void);
+ void imx_scu_map_io(void);
+@@ -129,7 +128,7 @@
+ #endif
+ void imx_src_init(void);
+ void imx_gpc_init(void);
+-void imx_gpc_pre_suspend(void);
++void imx_gpc_pre_suspend(bool arm_power_off);
+ void imx_gpc_post_resume(void);
+ void imx_gpc_mask_all(void);
+ void imx_gpc_restore_all(void);
+@@ -138,14 +137,28 @@
+ void imx_anatop_init(void);
+ void imx_anatop_pre_suspend(void);
+ void imx_anatop_post_resume(void);
++void imx_anatop_pu_enable(bool enable);
+ int imx6q_set_lpm(enum mxc_cpu_pwr_mode mode);
+-void imx6q_set_chicken_bit(void);
++void imx6q_set_cache_lpm_in_wait(bool enable);
++void imx6sl_set_wait_clk(bool enter);
++void imx6_enet_mac_init(const char *compatible);
+
+ void imx_cpu_die(unsigned int cpu);
+ int imx_cpu_kill(unsigned int cpu);
+
++#ifdef CONFIG_SUSPEND
++void v7_cpu_resume(void);
++void imx6_suspend(void __iomem *ocram_vbase);
++#else
++static inline void v7_cpu_resume(void) {}
++static inline void imx6_suspend(void __iomem *ocram_vbase) {}
++#endif
++
+ void imx6q_pm_init(void);
++void imx6dl_pm_init(void);
++void imx6sl_pm_init(void);
+ void imx6q_pm_set_ccm_base(void __iomem *base);
++
+ #ifdef CONFIG_PM
+ void imx5_pm_init(void);
+ #else
+diff -Nur linux-3.14.36/arch/arm/mach-imx/cpuidle.h linux-openelec/arch/arm/mach-imx/cpuidle.h
+--- linux-3.14.36/arch/arm/mach-imx/cpuidle.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/mach-imx/cpuidle.h 2015-05-06 12:05:43.000000000 -0500
+@@ -1,5 +1,5 @@
+ /*
+- * Copyright 2012 Freescale Semiconductor, Inc.
++ * Copyright 2012-2013 Freescale Semiconductor, Inc.
+ * Copyright 2012 Linaro Ltd.
+ *
+ * The code contained herein is licensed under the GNU General Public
+@@ -13,6 +13,7 @@
+ #ifdef CONFIG_CPU_IDLE
+ extern int imx5_cpuidle_init(void);
+ extern int imx6q_cpuidle_init(void);
++extern int imx6sl_cpuidle_init(void);
+ #else
+ static inline int imx5_cpuidle_init(void)
+ {
+@@ -22,4 +23,8 @@
+ {
+ return 0;
+ }
++static inline int imx6sl_cpuidle_init(void)
++{
++ return 0;
++}
+ #endif
+diff -Nur linux-3.14.36/arch/arm/mach-imx/cpuidle-imx6q.c linux-openelec/arch/arm/mach-imx/cpuidle-imx6q.c
+--- linux-3.14.36/arch/arm/mach-imx/cpuidle-imx6q.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/mach-imx/cpuidle-imx6q.c 2015-05-06 12:05:43.000000000 -0500
+@@ -1,5 +1,5 @@
+ /*
+- * Copyright (C) 2012 Freescale Semiconductor, Inc.
++ * Copyright (C) 2012-2013 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+@@ -68,8 +68,8 @@
+ /* Need to enable SCU standby for entering WAIT modes */
+ imx_scu_standby_enable();
+
+- /* Set chicken bit to get a reliable WAIT mode support */
+- imx6q_set_chicken_bit();
++ /* Set cache lpm bit for reliable WAIT mode support */
++ imx6q_set_cache_lpm_in_wait(true);
+
+ return cpuidle_register(&imx6q_cpuidle_driver, NULL);
+ }
+diff -Nur linux-3.14.36/arch/arm/mach-imx/cpuidle-imx6sl.c linux-openelec/arch/arm/mach-imx/cpuidle-imx6sl.c
+--- linux-3.14.36/arch/arm/mach-imx/cpuidle-imx6sl.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/arch/arm/mach-imx/cpuidle-imx6sl.c 2015-05-06 12:05:43.000000000 -0500
+@@ -0,0 +1,149 @@
++/*
++ * Copyright (C) 2012-2013 Freescale Semiconductor, Inc.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include <linux/cpuidle.h>
++#include <linux/genalloc.h>
++#include <linux/module.h>
++#include <linux/of.h>
++#include <linux/of_address.h>
++#include <linux/of_device.h>
++#include <asm/cpuidle.h>
++#include <asm/fncpy.h>
++#include <asm/mach/map.h>
++#include <asm/proc-fns.h>
++#include <asm/tlb.h>
++
++#include "common.h"
++#include "cpuidle.h"
++
++extern u32 audio_bus_freq_mode;
++extern u32 ultra_low_bus_freq_mode;
++extern unsigned long reg_addrs[];
++extern void imx6sl_low_power_wfi(void);
++
++static void __iomem *iomux_base;
++static void *wfi_iram_base;
++
++void (*imx6sl_wfi_in_iram_fn)(void *wfi_iram_base,
++ void *iomux_addr, void *regs_addr, u32 audio_mode) = NULL;
++
++#define WFI_IN_IRAM_SIZE 0x1000
++
++static int imx6sl_enter_wait(struct cpuidle_device *dev,
++ struct cpuidle_driver *drv, int index)
++{
++ imx6q_set_lpm(WAIT_UNCLOCKED);
++#ifdef CONFIG_ARM_IMX6_CPUFREQ
++ if (ultra_low_bus_freq_mode || audio_bus_freq_mode) {
++ /*
++ * Flush the TLB, to ensure no TLB maintenance occurs
++ * when DDR is in self-refresh.
++ */
++ local_flush_tlb_all();
++ /*
++ * Run WFI code from IRAM.
++ * Drop the DDR freq to 1MHz and AHB to 3MHz
++ * Also float DDR IO pads.
++ */
++ imx6sl_wfi_in_iram_fn(wfi_iram_base, iomux_base, reg_addrs, audio_bus_freq_mode);
++ }
++ else
++#endif
++ {
++ imx6sl_set_wait_clk(true);
++ cpu_do_idle();
++ imx6sl_set_wait_clk(false);
++ }
++ imx6q_set_lpm(WAIT_CLOCKED);
++
++ return index;
++}
++
++static struct cpuidle_driver imx6sl_cpuidle_driver = {
++ .name = "imx6sl_cpuidle",
++ .owner = THIS_MODULE,
++ .states = {
++ /* WFI */
++ ARM_CPUIDLE_WFI_STATE,
++ /* WAIT */
++ {
++ .exit_latency = 50,
++ .target_residency = 75,
++ .flags = CPUIDLE_FLAG_TIME_VALID |
++ CPUIDLE_FLAG_TIMER_STOP,
++ .enter = imx6sl_enter_wait,
++ .name = "WAIT",
++ .desc = "Clock off",
++ },
++ },
++ .state_count = 2,
++ .safe_state_index = 0,
++};
++
++int __init imx6sl_cpuidle_init(void)
++{
++ struct platform_device *ocram_dev;
++ unsigned int iram_paddr;
++ struct device_node *node;
++ struct gen_pool *iram_pool;
++
++ node = of_find_compatible_node(NULL, NULL, "fsl,imx6sl-iomuxc");
++ if (!node) {
++ pr_err("failed to find imx6sl-iomuxc device tree data!\n");
++ return -EINVAL;
++ }
++ iomux_base = of_iomap(node, 0);
++ WARN(!iomux_base, "unable to map iomux registers\n");
++
++ node = NULL;
++ node = of_find_compatible_node(NULL, NULL, "mmio-sram");
++ if (!node) {
++ pr_err("%s: failed to find ocram node\n",
++ __func__);
++ return -EINVAL;
++ }
++
++ ocram_dev = of_find_device_by_node(node);
++ if (!ocram_dev) {
++ pr_err("failed to find ocram device!\n");
++ return -EINVAL;
++ }
++
++ iram_pool = dev_get_gen_pool(&ocram_dev->dev);
++ if (!iram_pool) {
++ pr_err("iram pool unavailable!\n");
++ return -EINVAL;
++ }
++ /*
++ * Allocate IRAM memory when ARM executes WFI in
++ * ultra_low_power_mode.
++ */
++ wfi_iram_base = (void *)gen_pool_alloc(iram_pool,
++ WFI_IN_IRAM_SIZE);
++ if (!wfi_iram_base) {
++ pr_err("Cannot alloc iram for wfi code!\n");
++ return -ENOMEM;
++ }
++
++ iram_paddr = gen_pool_virt_to_phys(iram_pool,
++ (unsigned long)wfi_iram_base);
++ /*
++ * Need to remap the area here since we want
++ * the memory region to be executable.
++ */
++ wfi_iram_base = __arm_ioremap(iram_paddr,
++ WFI_IN_IRAM_SIZE,
++ MT_MEMORY_RWX_NONCACHED);
++ if (!wfi_iram_base)
++ pr_err("wfi_ram_base NOT remapped\n");
++
++ imx6sl_wfi_in_iram_fn = (void *)fncpy(wfi_iram_base,
++ &imx6sl_low_power_wfi, WFI_IN_IRAM_SIZE);
++
++ return cpuidle_register(&imx6sl_cpuidle_driver, NULL);
++}
+diff -Nur linux-3.14.36/arch/arm/mach-imx/ddr3_freq_imx6.S linux-openelec/arch/arm/mach-imx/ddr3_freq_imx6.S
+--- linux-3.14.36/arch/arm/mach-imx/ddr3_freq_imx6.S 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/arch/arm/mach-imx/ddr3_freq_imx6.S 2015-05-06 12:05:43.000000000 -0500
+@@ -0,0 +1,893 @@
++/*
++ * Copyright (C) 2011-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
++ */
++
++#include <linux/linkage.h>
++
++#define MMDC0_MDPDC 0x4
++#define MMDC0_MDCF0 0x0c
++#define MMDC0_MDCF1 0x10
++#define MMDC0_MDMISC 0x18
++#define MMDC0_MDSCR 0x1c
++#define MMDC0_MAPSR 0x404
++#define MMDC0_MADPCR0 0x410
++#define MMDC0_MPZQHWCTRL 0x800
++#define MMDC1_MPZQHWCTRL 0x4800
++#define MMDC0_MPODTCTRL 0x818
++#define MMDC1_MPODTCTRL 0x4818
++#define MMDC0_MPDGCTRL0 0x83c
++#define MMDC1_MPDGCTRL0 0x483c
++#define MMDC0_MPMUR0 0x8b8
++#define MMDC1_MPMUR0 0x48b8
++
++#define CCM_CBCDR 0x14
++#define CCM_CBCMR 0x18
++#define CCM_CSCMR1 0x1c
++#define CCM_CDHIPR 0x48
++
++#define L2_CACHE_SYNC 0x730
++
++ .align 3
++
++ .macro switch_to_528MHz
++
++ /* check if periph_clk_sel is already set */
++ ldr r0, [r6, #CCM_CBCDR]
++ and r0, r0, #(1 << 25)
++ cmp r0, #(1 << 25)
++ beq set_ahb_podf_before_switch
++
++ /* change periph_clk to be sourced from pll3_clk. */
++ ldr r0, [r6, #CCM_CBCMR]
++ bic r0, r0, #(3 << 12)
++ str r0, [r6, #CCM_CBCMR]
++
++ ldr r0, [r6, #CCM_CBCDR]
++ bic r0, r0, #(0x38 << 20)
++ str r0, [r6, #CCM_CBCDR]
++
++ /*
++ * set the AHB dividers before the switch,
++ * don't change AXI clock divider,
++ * set the MMDC_DIV=1, AXI_DIV = 2, AHB_DIV=4,
++ */
++ ldr r0, [r6, #CCM_CBCDR]
++ ldr r2, =0x3f1f00
++ bic r0, r0, r2
++ orr r0, r0, #0xd00
++ orr r0, r0, #(1 << 16)
++ str r0, [r6, #CCM_CBCDR]
++
++wait_div_update528:
++ ldr r0, [r6, #CCM_CDHIPR]
++ cmp r0, #0
++ bne wait_div_update528
++
++ /* now switch periph_clk to pll3_main_clk. */
++ ldr r0, [r6, #CCM_CBCDR]
++ orr r0, r0, #(1 << 25)
++ str r0, [r6, #CCM_CBCDR]
++
++periph_clk_switch3:
++ ldr r0, [r6, #CCM_CDHIPR]
++ cmp r0, #0
++ bne periph_clk_switch3
++
++ b switch_pre_periph_clk_528
++
++set_ahb_podf_before_switch:
++ /*
++ * set the MMDC_DIV=1, AXI_DIV = 2, AHB_DIV=4,
++ */
++ ldr r0, [r6, #CCM_CBCDR]
++ ldr r2, =0x3f1f00
++ bic r0, r0, r2
++ orr r0, r0, #0xd00
++ orr r0, r0, #(1 << 16)
++ str r0, [r6, #CCM_CBCDR]
++
++wait_div_update528_1:
++ ldr r0, [r6, #CCM_CDHIPR]
++ cmp r0, #0
++ bne wait_div_update528_1
++
++switch_pre_periph_clk_528:
++
++ /* now switch pre_periph_clk to PLL2_528MHz. */
++ ldr r0, [r6, #CCM_CBCMR]
++ bic r0, r0, #(0xc << 16)
++ str r0, [r6, #CCM_CBCMR]
++
++ /* now switch periph_clk back. */
++ ldr r0, [r6, #CCM_CBCDR]
++ bic r0, r0, #(1 << 25)
++ str r0, [r6, #CCM_CBCDR]
++
++periph_clk_switch4:
++ ldr r0, [r6, #CCM_CDHIPR]
++ cmp r0, #0
++ bne periph_clk_switch4
++
++ .endm
++
++ .macro switch_to_400MHz
++
++ /* check if periph_clk_sel is already set. */
++ ldr r0, [r6, #CCM_CBCDR]
++ and r0, r0, #(1 << 25)
++ cmp r0, #(1 << 25)
++ beq set_ahb_podf_before_switch1
++
++ /* change periph_clk to be sourced from pll3_clk. */
++ ldr r0, [r6, #CCM_CBCMR]
++ bic r0, r0, #(3 << 12)
++ str r0, [r6, #CCM_CBCMR]
++
++ ldr r0, [r6, #CCM_CBCDR]
++ bic r0, r0, #(0x38 << 24)
++ str r0, [r6, #CCM_CBCDR]
++
++ /* now switch periph_clk to pll3_main_clk. */
++ ldr r0, [r6, #CCM_CBCDR]
++ orr r0, r0, #(1 << 25)
++ str r0, [r6, #CCM_CBCDR]
++
++periph_clk_switch5:
++ ldr r0, [r6, #CCM_CDHIPR]
++ cmp r0, #0
++ bne periph_clk_switch5
++
++ b switch_pre_periph_clk_400
++
++set_ahb_podf_before_switch1:
++ /*
++ * set the MMDC_DIV=1, AXI_DIV = 2, AHB_DIV=4,
++ */
++ ldr r0, [r6, #CCM_CBCDR]
++ ldr r2, =0x3f1f00
++ bic r0, r0, r2
++ orr r0, r0, #(0x9 << 8)
++ orr r0, r0, #(1 << 16)
++ str r0, [r6, #CCM_CBCDR]
++
++wait_div_update400_1:
++ ldr r0, [r6, #CCM_CDHIPR]
++ cmp r0, #0
++ bne wait_div_update400_1
++
++switch_pre_periph_clk_400:
++
++ /* now switch pre_periph_clk to PFD_400MHz. */
++ ldr r0, [r6, #CCM_CBCMR]
++ bic r0, r0, #(0xc << 16)
++ orr r0, r0, #(0x4 << 16)
++ str r0, [r6, #CCM_CBCMR]
++
++ /* now switch periph_clk back. */
++ ldr r0, [r6, #CCM_CBCDR]
++ bic r0, r0, #(1 << 25)
++ str r0, [r6, #CCM_CBCDR]
++
++periph_clk_switch6:
++ ldr r0, [r6, #CCM_CDHIPR]
++ cmp r0, #0
++ bne periph_clk_switch6
++
++ /*
++ * change AHB divider so that we are at 400/3=133MHz.
++ * don't change AXI clock divider.
++ * set the MMDC_DIV=1, AXI_DIV=2, AHB_DIV=3,
++ */
++ ldr r0, [r6, #CCM_CBCDR]
++ ldr r2, =0x3f1f00
++ bic r0, r0, r2
++ orr r0, r0, #(0x9 << 8)
++ orr r0, r0, #(1 << 16)
++ str r0, [r6, #CCM_CBCDR]
++
++wait_div_update400_2:
++ ldr r0, [r6, #CCM_CDHIPR]
++ cmp r0, #0
++ bne wait_div_update400_2
++
++ .endm
++
++ .macro switch_to_50MHz
++
++ /* check if periph_clk_sel is already set. */
++ ldr r0, [r6, #CCM_CBCDR]
++ and r0, r0, #(1 << 25)
++ cmp r0, #(1 << 25)
++ beq switch_pre_periph_clk_50
++
++ /*
++ * set the periph_clk to be sourced from PLL2_PFD_200M
++ * change periph_clk to be sourced from pll3_clk.
++ * ensure PLL3 is the source and set the divider to 1.
++ */
++ ldr r0, [r6, #CCM_CBCMR]
++ bic r0, r0, #(0x3 << 12)
++ str r0, [r6, #CCM_CBCMR]
++
++ ldr r0, [r6, #CCM_CBCDR]
++ bic r0, r0, #(0x38 << 24)
++ str r0, [r6, #CCM_CBCDR]
++
++ /* now switch periph_clk to pll3_main_clk. */
++ ldr r0, [r6, #CCM_CBCDR]
++ orr r0, r0, #(1 << 25)
++ str r0, [r6, #CCM_CBCDR]
++
++periph_clk_switch_50:
++ ldr r0, [r6, #CCM_CDHIPR]
++ cmp r0, #0
++ bne periph_clk_switch_50
++
++switch_pre_periph_clk_50:
++
++ /* now switch pre_periph_clk to PFD_200MHz. */
++ ldr r0, [r6, #CCM_CBCMR]
++ orr r0, r0, #(0xc << 16)
++ str r0, [r6, #CCM_CBCMR]
++
++ /*
++ * set the MMDC_DIV=4, AXI_DIV = 4, AHB_DIV=8,
++ */
++ ldr r0, [r6, #CCM_CBCDR]
++ ldr r2, =0x3f1f00
++ bic r0, r0, r2
++ orr r0, r0, #(0x18 << 16)
++ orr r0, r0, #(0x3 << 16)
++
++ /*
++ * if changing AHB divider remember to change
++ * the IPGPER divider too below.
++ */
++ orr r0, r0, #0x1d00
++ str r0, [r6, #CCM_CBCDR]
++
++wait_div_update_50:
++ ldr r0, [r6, #CCM_CDHIPR]
++ cmp r0, #0
++ bne wait_div_update_50
++
++ /* now switch periph_clk back. */
++ ldr r0, [r6, #CCM_CBCDR]
++ bic r0, r0, #(1 << 25)
++ str r0, [r6, #CCM_CBCDR]
++
++periph_clk_switch2:
++ ldr r0, [r6, #CCM_CDHIPR]
++ cmp r0, #0
++ bne periph_clk_switch2
++
++ .endm
++
++ .macro switch_to_24MHz
++ /*
++ * change the freq now try setting DDR to 24MHz.
++ * source it from the periph_clk2 ensure the
++ * periph_clk2 is sourced from 24MHz and the
++ * divider is 1.
++ */
++
++ ldr r0, [r6, #CCM_CBCMR]
++ bic r0, r0, #(0x3 << 12)
++ orr r0, r0, #(1 << 12)
++ str r0, [r6, #CCM_CBCMR]
++
++ ldr r0, [r6, #CCM_CBCDR]
++ bic r0, r0, #(0x38 << 24)
++ str r0, [r6, #CCM_CBCDR]
++
++ /* now switch periph_clk to 24MHz. */
++ ldr r0, [r6, #CCM_CBCDR]
++ orr r0, r0, #(1 << 25)
++ str r0, [r6, #CCM_CBCDR]
++
++periph_clk_switch1:
++ ldr r0, [r6, #CCM_CDHIPR]
++ cmp r0, #0
++ bne periph_clk_switch1
++
++ /* change all the dividers to 1. */
++ ldr r0, [r6, #CCM_CBCDR]
++ ldr r2, =0x3f1f00
++ bic r0, r0, r2
++ orr r0, r0, #(1 << 8)
++ str r0, [r6, #CCM_CBCDR]
++
++ /* Wait for the divider to change. */
++wait_div_update:
++ ldr r0, [r6, #CCM_CDHIPR]
++ cmp r0, #0
++ bne wait_div_update
++
++ .endm
++
++/*
++ * mx6_ddr3_freq_change
++ *
++ * idle the processor (eg, wait for interrupt).
++ * make sure DDR is in self-refresh.
++ * IRQs are already disabled.
++ */
++ENTRY(mx6_ddr3_freq_change)
++
++ stmfd sp!, {r4-r12}
++
++ /*
++ * r5 -> mmdc_base
++ * r6 -> ccm_base
++ * r7 -> iomux_base
++ * r12 -> l2_base
++ */
++ mov r4, r0
++ mov r8, r1
++ mov r9, r2
++ mov r11, r3
++
++ /*
++ * Get the addresses of the registers.
++ * They are last few entries in the
++ * ddr_settings parameter.
++ * The first entry contains the count,
++ * and each entry is 2 words.
++ */
++ ldr r0, [r1]
++ add r0, r0, #1
++ lsl r0, r0, #3
++ add r1, r0, r1
++ /* mmdc_base. */
++ ldr r5, [r1]
++ add r1, #8
++ /* ccm_base */
++ ldr r6, [r1]
++ add r1, #8
++ /*iomux_base */
++ ldr r7, [r1]
++ add r1, #8
++ /*l2_base */
++ ldr r12, [r1]
++
++ddr_freq_change:
++ /*
++ * make sure no TLB miss will occur when
++ * the DDR is in self refresh. invalidate
++ * TLB single entry to ensure that the
++ * address is not already in the TLB.
++ */
++
++ adr r10, ddr_freq_change
++
++ ldr r2, [r6]
++ ldr r2, [r5]
++ ldr r2, [r7]
++ ldr r2, [r8]
++ ldr r2, [r10]
++ ldr r2, [r11]
++ ldr r2, [r12]
++
++#ifdef CONFIG_CACHE_L2X0
++ /*
++ * Make sure the L2 buffers are drained.
++ * Sync operation on L2 drains the buffers.
++ */
++ mov r1, #0x0
++ str r1, [r12, #L2_CACHE_SYNC]
++#endif
++
++ /* disable automatic power saving. */
++ ldr r0, [r5, #MMDC0_MAPSR]
++ orr r0, r0, #0x01
++ str r0, [r5, #MMDC0_MAPSR]
++
++ /* disable MMDC power down timer. */
++ ldr r0, [r5, #MMDC0_MDPDC]
++ bic r0, r0, #(0xff << 8)
++ str r0, [r5, #MMDC0_MDPDC]
++
++ /* delay for a while */
++ ldr r1, =4
++delay1:
++ ldr r2, =0
++cont1:
++ ldr r0, [r5, r2]
++ add r2, r2, #4
++ cmp r2, #16
++ bne cont1
++ sub r1, r1, #1
++ cmp r1, #0
++ bgt delay1
++
++ /* set CON_REG */
++ ldr r0, =0x8000
++ str r0, [r5, #MMDC0_MDSCR]
++poll_conreq_set_1:
++ ldr r0, [r5, #MMDC0_MDSCR]
++ and r0, r0, #(0x4 << 12)
++ cmp r0, #(0x4 << 12)
++ bne poll_conreq_set_1
++
++ ldr r0, =0x00008010
++ str r0, [r5, #MMDC0_MDSCR]
++ ldr r0, =0x00008018
++ str r0, [r5, #MMDC0_MDSCR]
++
++ /*
++ * if requested frequency is greater than
++ * 300MHz go to DLL on mode.
++ */
++ ldr r1, =300000000
++ cmp r4, r1
++ bge dll_on_mode
++
++dll_off_mode:
++
++ /* if DLL is currently on, turn it off. */
++ cmp r9, #1
++ beq continue_dll_off_1
++
++ ldr r0, =0x00018031
++ str r0, [r5, #MMDC0_MDSCR]
++
++ ldr r0, =0x00018039
++ str r0, [r5, #MMDC0_MDSCR]
++
++ ldr r1, =10
++delay1a:
++ ldr r2, =0
++cont1a:
++ ldr r0, [r5, r2]
++ add r2, r2, #4
++ cmp r2, #16
++ bne cont1a
++ sub r1, r1, #1
++ cmp r1, #0
++ bgt delay1a
++
++continue_dll_off_1:
++ /* set DVFS - enter self refresh mode */
++ ldr r0, [r5, #MMDC0_MAPSR]
++ orr r0, r0, #(1 << 21)
++ str r0, [r5, #MMDC0_MAPSR]
++
++ /* de-assert con_req */
++ mov r0, #0x0
++ str r0, [r5, #MMDC0_MDSCR]
++
++poll_dvfs_set_1:
++ ldr r0, [r5, #MMDC0_MAPSR]
++ and r0, r0, #(1 << 25)
++ cmp r0, #(1 << 25)
++ bne poll_dvfs_set_1
++
++ ldr r1, =24000000
++ cmp r4, r1
++ beq switch_freq_24
++
++ switch_to_50MHz
++ b continue_dll_off_2
++
++switch_freq_24:
++ switch_to_24MHz
++
++continue_dll_off_2:
++
++ /* set SBS - block ddr accesses */
++ ldr r0, [r5, #MMDC0_MADPCR0]
++ orr r0, r0, #(1 << 8)
++ str r0, [r5, #MMDC0_MADPCR0]
++
++ /* clear DVFS - exit from self refresh mode */
++ ldr r0, [r5, #MMDC0_MAPSR]
++ bic r0, r0, #(1 << 21)
++ str r0, [r5, #MMDC0_MAPSR]
++
++poll_dvfs_clear_1:
++ ldr r0, [r5, #MMDC0_MAPSR]
++ and r0, r0, #(1 << 25)
++ cmp r0, #(1 << 25)
++ beq poll_dvfs_clear_1
++
++ /* if DLL was previously on, continue DLL off routine. */
++ cmp r9, #1
++ beq continue_dll_off_3
++
++ ldr r0, =0x00018031
++ str r0, [r5, #MMDC0_MDSCR]
++
++ ldr r0, =0x00018039
++ str r0, [r5, #MMDC0_MDSCR]
++
++ ldr r0, =0x08208030
++ str r0, [r5, #MMDC0_MDSCR]
++
++ ldr r0, =0x08208038
++ str r0, [r5, #MMDC0_MDSCR]
++
++ ldr r0, =0x00088032
++ str r0, [r5, #MMDC0_MDSCR]
++
++ ldr r0, =0x0008803A
++ str r0, [r5, #MMDC0_MDSCR]
++
++ /* delay for a while. */
++ ldr r1, =4
++delay_1:
++ ldr r2, =0
++cont_1:
++ ldr r0, [r5, r2]
++ add r2, r2, #4
++ cmp r2, #16
++ bne cont_1
++ sub r1, r1, #1
++ cmp r1, #0
++ bgt delay_1
++
++ ldr r0, [r5, #MMDC0_MDCF0]
++ bic r0, r0, #0xf
++ orr r0, r0, #0x3
++ str r0, [r5, #MMDC0_MDCF0]
++
++ ldr r0, [r5, #MMDC0_MDCF1]
++ bic r0, r0, #0x7
++ orr r0, r0, #0x4
++ str r0, [r5, #MMDC0_MDCF1]
++
++ ldr r0, =0x00091680
++ str r0, [r5, #MMDC0_MDMISC]
++
++ /* enable dqs pull down in the IOMUX. */
++ ldr r1, [r11]
++ add r11, r11, #8
++ ldr r2, =0x3028
++update_iomux:
++ ldr r0, [r11, #0x0]
++ ldr r3, [r7, r0]
++ bic r3, r3, r2
++ orr r3, r3, #(0x3 << 12)
++ orr r3, r3, #0x28
++ str r3, [r7, r0]
++ add r11, r11, #8
++ sub r1, r1, #1
++ cmp r1, #0
++ bgt update_iomux
++
++ /* ODT disabled. */
++ ldr r0, =0x0
++ ldr r2, =MMDC0_MPODTCTRL
++ str r0, [r5, r2]
++ ldr r2, =MMDC1_MPODTCTRL
++ str r0, [r5, r2]
++
++ /* DQS gating disabled. */
++ ldr r2, =MMDC0_MPDGCTRL0
++ ldr r0, [r5, r2]
++ orr r0, r0, #(1 << 29)
++ str r0, [r5, r2]
++
++ ldr r2, =MMDC1_MPDGCTRL0
++ ldr r0, [r5, r2]
++ orr r0, r0, #(0x1 << 29)
++ str r0, [r5, r2]
++
++ /* MMDC0_MAPSR adopt power down enable. */
++ ldr r0, [r5, #MMDC0_MAPSR]
++ bic r0, r0, #0x01
++ str r0, [r5, #MMDC0_MAPSR]
++
++ /* frc_msr + mu bypass */
++ ldr r0, =0x00000060
++ str r0, [r5, #MMDC0_MPMUR0]
++ ldr r2, =MMDC1_MPMUR0
++ str r0, [r5, r2]
++ ldr r0, =0x00000460
++ str r0, [r5, #MMDC0_MPMUR0]
++ ldr r2, =MMDC1_MPMUR0
++ str r0, [r5, r2]
++ ldr r0, =0x00000c60
++ str r0, [r5, #MMDC0_MPMUR0]
++ ldr r2, =MMDC1_MPMUR0
++ str r0, [r5, r2]
++
++continue_dll_off_3:
++ /* clear SBS - unblock accesses to DDR. */
++ ldr r0, [r5, #MMDC0_MADPCR0]
++ bic r0, r0, #(0x1 << 8)
++ str r0, [r5, #MMDC0_MADPCR0]
++
++ mov r0, #0x0
++ str r0, [r5, #MMDC0_MDSCR]
++poll_conreq_clear_1:
++ ldr r0, [r5, #MMDC0_MDSCR]
++ and r0, r0, #(0x4 << 12)
++ cmp r0, #(0x4 << 12)
++ beq poll_conreq_clear_1
++
++ b done
++
++dll_on_mode:
++ /* assert DVFS - enter self refresh mode. */
++ ldr r0, [r5, #MMDC0_MAPSR]
++ orr r0, r0, #(1 << 21)
++ str r0, [r5, #MMDC0_MAPSR]
++
++ /* de-assert CON_REQ. */
++ mov r0, #0x0
++ str r0, [r5, #MMDC0_MDSCR]
++
++ /* poll DVFS ack. */
++poll_dvfs_set_2:
++ ldr r0, [r5, #MMDC0_MAPSR]
++ and r0, r0, #(1 << 25)
++ cmp r0, #(1 << 25)
++ bne poll_dvfs_set_2
++
++ ldr r1, =528000000
++ cmp r4, r1
++ beq switch_freq_528
++
++ switch_to_400MHz
++
++ b continue_dll_on
++
++switch_freq_528:
++ switch_to_528MHz
++
++continue_dll_on:
++
++ /* set SBS step-by-step mode. */
++ ldr r0, [r5, #MMDC0_MADPCR0]
++ orr r0, r0, #( 1 << 8)
++ str r0, [r5, #MMDC0_MADPCR0]
++
++ /* clear DVFS - exit self refresh mode. */
++ ldr r0, [r5, #MMDC0_MAPSR]
++ bic r0, r0, #(1 << 21)
++ str r0, [r5, #MMDC0_MAPSR]
++
++poll_dvfs_clear_2:
++ ldr r0, [r5, #MMDC0_MAPSR]
++ and r0, r0, #(1 << 25)
++ cmp r0, #(1 << 25)
++ beq poll_dvfs_clear_2
++
++ /* if DLL is currently off, turn it back on. */
++ cmp r9, #0
++ beq update_calibration_only
++
++ ldr r0, =0xa5390003
++ str r0, [r5, #MMDC0_MPZQHWCTRL]
++ ldr r2, =MMDC1_MPZQHWCTRL
++ str r0, [r5, r2]
++
++ /* enable DQS gating. */
++ ldr r2, =MMDC0_MPDGCTRL0
++ ldr r0, [r5, r2]
++ bic r0, r0, #(1 << 29)
++ str r0, [r5, r2]
++
++ ldr r2, =MMDC1_MPDGCTRL0
++ ldr r0, [r5, r2]
++ bic r0, r0, #(1 << 29)
++ str r0, [r5, r2]
++
++ /* force measure. */
++ ldr r0, =0x00000800
++ str r0, [r5, #MMDC0_MPMUR0]
++ ldr r2, =MMDC1_MPMUR0
++ str r0, [r5, r2]
++
++ /* delay for while. */
++ ldr r1, =4
++delay5:
++ ldr r2, =0
++cont5:
++ ldr r0, [r5, r2]
++ add r2, r2, #4
++ cmp r2, #16
++ bne cont5
++ sub r1, r1, #1
++ cmp r1, #0
++ bgt delay5
++
++ /* disable dqs pull down in the IOMUX. */
++ ldr r1, [r11]
++ add r11, r11, #8
++update_iomux1:
++ ldr r0, [r11, #0x0]
++ ldr r3, [r11, #0x4]
++ str r3, [r7, r0]
++ add r11, r11, #8
++ sub r1, r1, #1
++ cmp r1, #0
++ bgt update_iomux1
++
++ /* config MMDC timings to 528MHz. */
++ ldr r9, [r8]
++ add r8, r8, #8
++ ldr r0, [r8, #0x0]
++ ldr r3, [r8, #0x4]
++ str r3, [r5, r0]
++ add r8, r8, #8
++
++ ldr r0, [r8, #0x0]
++ ldr r3, [r8, #0x4]
++ str r3, [r5, r0]
++ add r8, r8, #8
++
++ /* update MISC register: WALAT, RALAT */
++ ldr r0, =0x00081740
++ str r0, [r5, #MMDC0_MDMISC]
++
++ /* configure ddr devices to dll on, odt. */
++ ldr r0, =0x00028031
++ str r0, [r5, #MMDC0_MDSCR]
++
++ ldr r0, =0x00028039
++ str r0, [r5, #MMDC0_MDSCR]
++
++ /* delay for while. */
++ ldr r1, =4
++delay7:
++ ldr r2, =0
++cont7:
++ ldr r0, [r5, r2]
++ add r2, r2, #4
++ cmp r2, #16
++ bne cont7
++ sub r1, r1, #1
++ cmp r1, #0
++ bgt delay7
++
++ /* reset dll. */
++ ldr r0, =0x09208030
++ str r0, [r5, #MMDC0_MDSCR]
++
++ ldr r0, =0x09208038
++ str r0, [r5, #MMDC0_MDSCR]
++
++ /* delay for while. */
++ ldr r1, =100
++delay8:
++ ldr r2, =0
++cont8:
++ ldr r0, [r5, r2]
++ add r2, r2, #4
++ cmp r2, #16
++ bne cont8
++ sub r1, r1, #1
++ cmp r1, #0
++ bgt delay8
++
++ ldr r0, [r8, #0x0]
++ ldr r3, [r8, #0x4]
++ str r3, [r5, r0]
++ add r8, r8, #8
++
++ ldr r0, [r8, #0x0]
++ ldr r3, [r8, #0x4]
++ str r3, [r5, r0]
++ add r8, r8, #8
++
++ ldr r0, =0x00428031
++ str r0, [r5, #MMDC0_MDSCR]
++
++ ldr r0, =0x00428039
++ str r0, [r5, #MMDC0_MDSCR]
++
++ ldr r0, [r8, #0x0]
++ ldr r3, [r8, #0x4]
++ str r3, [r5, r0]
++ add r8, r8, #8
++
++ ldr r0, [r8, #0x0]
++ ldr r3, [r8, #0x4]
++ str r3, [r5, r0]
++ add r8, r8, #8
++
++ /* issue a zq command. */
++ ldr r0, =0x04008040
++ str r0, [r5, #MMDC0_MDSCR]
++
++ ldr r0, =0x04008048
++ str r0, [r5, #MMDC0_MDSCR]
++
++ /* MMDC ODT enable. */
++ ldr r0, [r8, #0x0]
++ ldr r3, [r8, #0x4]
++ str r3, [r5, r0]
++ add r8, r8, #8
++
++ ldr r2, =0x4818
++ str r0, [r5, r2]
++
++ /* delay for while. */
++ ldr r1, =40
++delay15:
++ ldr r2, =0
++cont15:
++ ldr r0, [r5, r2]
++ add r2, r2, #4
++ cmp r2, #16
++ bne cont15
++ sub r1, r1, #1
++ cmp r1, #0
++ bgt delay15
++
++ /* MMDC0_MAPSR adopt power down enable. */
++ ldr r0, [r5, #MMDC0_MAPSR]
++ bic r0, r0, #0x01
++ str r0, [r5, #MMDC0_MAPSR]
++
++ /* enable MMDC power down timer. */
++ ldr r0, [r5, #MMDC0_MDPDC]
++ orr r0, r0, #(0x55 << 8)
++ str r0, [r5, #MMDC0_MDPDC]
++
++ b update_calibration
++
++update_calibration_only:
++ ldr r1, [r8]
++ sub r1, r1, #7
++ add r8, r8, #64
++ b update_calib
++
++update_calibration:
++ /* write the new calibration values. */
++ mov r1, r9
++ sub r1, r1, #7
++
++update_calib:
++ ldr r0, [r8, #0x0]
++ ldr r3, [r8, #0x4]
++ str r3, [r5, r0]
++ add r8, r8, #8
++ sub r1, r1, #1
++ cmp r1, #0
++ bgt update_calib
++
++ /* perform a force measurement. */
++ ldr r0, =0x800
++ str r0, [r5, #MMDC0_MPMUR0]
++ ldr r2, =MMDC1_MPMUR0
++ str r0, [r5, r2]
++
++ /* clear SBS - unblock DDR accesses. */
++ ldr r0, [r5, #MMDC0_MADPCR0]
++ bic r0, r0, #(1 << 8)
++ str r0, [r5, #MMDC0_MADPCR0]
++
++ mov r0, #0x0
++ str r0, [r5, #MMDC0_MDSCR]
++poll_conreq_clear_2:
++ ldr r0, [r5, #MMDC0_MDSCR]
++ and r0, r0, #(0x4 << 12)
++ cmp r0, #(0x4 << 12)
++ beq poll_conreq_clear_2
++
++done:
++ /* restore registers */
++
++ ldmfd sp!, {r4-r12}
++ mov pc, lr
++
++ .type mx6_do_ddr3_freq_change, #object
++ENTRY(mx6_do_ddr_freq_change)
++ .word mx6_ddr3_freq_change
++ .size mx6_ddr3_freq_change, . - mx6_ddr3_freq_change
+diff -Nur linux-3.14.36/arch/arm/mach-imx/gpc.c linux-openelec/arch/arm/mach-imx/gpc.c
+--- linux-3.14.36/arch/arm/mach-imx/gpc.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/mach-imx/gpc.c 2015-05-06 12:05:43.000000000 -0500
+@@ -10,30 +10,69 @@
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
++#include <linux/clk.h>
++#include <linux/delay.h>
+ #include <linux/io.h>
+ #include <linux/irq.h>
++#include <linux/module.h>
+ #include <linux/of.h>
+ #include <linux/of_address.h>
+ #include <linux/of_irq.h>
++#include <linux/platform_device.h>
+ #include <linux/irqchip/arm-gic.h>
++#include <linux/regulator/consumer.h>
++#include <linux/regulator/driver.h>
++#include <linux/regulator/machine.h>
+ #include "common.h"
++#include "hardware.h"
+
+ #define GPC_IMR1 0x008
+ #define GPC_PGC_CPU_PDN 0x2a0
++#define GPC_PGC_GPU_PDN 0x260
++#define GPC_PGC_GPU_PUPSCR 0x264
++#define GPC_PGC_GPU_PDNSCR 0x268
++#define GPC_PGC_GPU_SW_SHIFT 0
++#define GPC_PGC_GPU_SW_MASK 0x3f
++#define GPC_PGC_GPU_SW2ISO_SHIFT 8
++#define GPC_PGC_GPU_SW2ISO_MASK 0x3f
++#define GPC_PGC_CPU_PUPSCR 0x2a4
++#define GPC_PGC_CPU_PDNSCR 0x2a8
++#define GPC_PGC_CPU_SW_SHIFT 0
++#define GPC_PGC_CPU_SW_MASK 0x3f
++#define GPC_PGC_CPU_SW2ISO_SHIFT 8
++#define GPC_PGC_CPU_SW2ISO_MASK 0x3f
++#define GPC_CNTR 0x0
++#define GPC_CNTR_PU_UP_REQ_SHIFT 0x1
++#define GPC_CNTR_PU_DOWN_REQ_SHIFT 0x0
+
+ #define IMR_NUM 4
+
+ static void __iomem *gpc_base;
+ static u32 gpc_wake_irqs[IMR_NUM];
+ static u32 gpc_saved_imrs[IMR_NUM];
++static struct clk *gpu3d_clk, *gpu3d_shader_clk, *gpu2d_clk, *gpu2d_axi_clk;
++static struct clk *openvg_axi_clk, *vpu_clk, *ipg_clk;
++static struct device *gpc_dev;
++struct regulator *pu_reg;
++struct notifier_block nb;
++static struct regulator_dev *pu_dummy_regulator_rdev;
++static struct regulator_init_data pu_dummy_initdata = {
++ .constraints = {
++ .max_uV = 1450000, /* allign with real max of anatop */
++ .valid_ops_mask = REGULATOR_CHANGE_STATUS |
++ REGULATOR_CHANGE_VOLTAGE,
++ },
++};
++static int pu_dummy_enable;
+
+-void imx_gpc_pre_suspend(void)
++void imx_gpc_pre_suspend(bool arm_power_off)
+ {
+ void __iomem *reg_imr1 = gpc_base + GPC_IMR1;
+ int i;
+
+- /* Tell GPC to power off ARM core when suspend */
+- writel_relaxed(0x1, gpc_base + GPC_PGC_CPU_PDN);
++ if (arm_power_off)
++ /* Tell GPC to power off ARM core when suspend */
++ writel_relaxed(0x1, gpc_base + GPC_PGC_CPU_PDN);
+
+ for (i = 0; i < IMR_NUM; i++) {
+ gpc_saved_imrs[i] = readl_relaxed(reg_imr1 + i * 4);
+@@ -120,10 +159,119 @@
+ writel_relaxed(val, reg);
+ }
+
++static void imx_pu_clk(bool enable)
++{
++ if (enable) {
++ if (cpu_is_imx6sl()) {
++ clk_prepare_enable(gpu2d_clk);
++ clk_prepare_enable(openvg_axi_clk);
++ } else {
++ clk_prepare_enable(vpu_clk);
++ clk_prepare_enable(gpu3d_clk);
++ clk_prepare_enable(gpu3d_shader_clk);
++ clk_prepare_enable(gpu2d_clk);
++ clk_prepare_enable(gpu2d_axi_clk);
++ clk_prepare_enable(openvg_axi_clk);
++ }
++ } else {
++ if (cpu_is_imx6sl()) {
++ clk_disable_unprepare(gpu2d_clk);
++ clk_disable_unprepare(openvg_axi_clk);
++ } else {
++ clk_disable_unprepare(openvg_axi_clk);
++ clk_disable_unprepare(gpu2d_axi_clk);
++ clk_disable_unprepare(gpu2d_clk);
++ clk_disable_unprepare(gpu3d_shader_clk);
++ clk_disable_unprepare(gpu3d_clk);
++ clk_disable_unprepare(vpu_clk);
++ }
++ }
++}
++
++static void imx_gpc_pu_enable(bool enable)
++{
++ u32 rate, delay_us;
++ u32 gpu_pupscr_sw2iso, gpu_pdnscr_iso2sw;
++ u32 gpu_pupscr_sw, gpu_pdnscr_iso;
++
++ /* get ipg clk rate for PGC delay */
++ rate = clk_get_rate(ipg_clk);
++
++ if (enable) {
++ imx_anatop_pu_enable(true);
++ /*
++ * need to add necessary delay between powering up PU LDO and
++ * disabling PU isolation in PGC, the counter of PU isolation
++ * is based on ipg clk.
++ */
++ gpu_pupscr_sw2iso = (readl_relaxed(gpc_base +
++ GPC_PGC_GPU_PUPSCR) >> GPC_PGC_GPU_SW2ISO_SHIFT)
++ & GPC_PGC_GPU_SW2ISO_MASK;
++ gpu_pupscr_sw = (readl_relaxed(gpc_base +
++ GPC_PGC_GPU_PUPSCR) >> GPC_PGC_GPU_SW_SHIFT)
++ & GPC_PGC_GPU_SW_MASK;
++ delay_us = (gpu_pupscr_sw2iso + gpu_pupscr_sw) * 1000000
++ / rate + 1;
++ udelay(delay_us);
++
++ imx_pu_clk(true);
++ writel_relaxed(1, gpc_base + GPC_PGC_GPU_PDN);
++ writel_relaxed(1 << GPC_CNTR_PU_UP_REQ_SHIFT,
++ gpc_base + GPC_CNTR);
++ while (readl_relaxed(gpc_base + GPC_CNTR) &
++ (1 << GPC_CNTR_PU_UP_REQ_SHIFT))
++ ;
++ imx_pu_clk(false);
++ } else {
++ writel_relaxed(1, gpc_base + GPC_PGC_GPU_PDN);
++ writel_relaxed(1 << GPC_CNTR_PU_DOWN_REQ_SHIFT,
++ gpc_base + GPC_CNTR);
++ while (readl_relaxed(gpc_base + GPC_CNTR) &
++ (1 << GPC_CNTR_PU_DOWN_REQ_SHIFT))
++ ;
++ /*
++ * need to add necessary delay between enabling PU isolation
++ * in PGC and powering down PU LDO , the counter of PU isolation
++ * is based on ipg clk.
++ */
++ gpu_pdnscr_iso2sw = (readl_relaxed(gpc_base +
++ GPC_PGC_GPU_PDNSCR) >> GPC_PGC_GPU_SW2ISO_SHIFT)
++ & GPC_PGC_GPU_SW2ISO_MASK;
++ gpu_pdnscr_iso = (readl_relaxed(gpc_base +
++ GPC_PGC_GPU_PDNSCR) >> GPC_PGC_GPU_SW_SHIFT)
++ & GPC_PGC_GPU_SW_MASK;
++ delay_us = (gpu_pdnscr_iso2sw + gpu_pdnscr_iso) * 1000000
++ / rate + 1;
++ udelay(delay_us);
++ imx_anatop_pu_enable(false);
++ }
++}
++
++static int imx_gpc_regulator_notify(struct notifier_block *nb,
++ unsigned long event,
++ void *ignored)
++{
++ switch (event) {
++ case REGULATOR_EVENT_PRE_DISABLE:
++ imx_gpc_pu_enable(false);
++ break;
++ case REGULATOR_EVENT_ENABLE:
++ imx_gpc_pu_enable(true);
++ break;
++ default:
++ break;
++ }
++
++ return NOTIFY_OK;
++}
++
+ void __init imx_gpc_init(void)
+ {
+ struct device_node *np;
+ int i;
++ u32 val;
++ u32 cpu_pupscr_sw2iso, cpu_pupscr_sw;
++ u32 cpu_pdnscr_iso2sw, cpu_pdnscr_iso;
+
+ np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-gpc");
+ gpc_base = of_iomap(np, 0);
+@@ -137,4 +285,190 @@
+ gic_arch_extn.irq_mask = imx_gpc_irq_mask;
+ gic_arch_extn.irq_unmask = imx_gpc_irq_unmask;
+ gic_arch_extn.irq_set_wake = imx_gpc_irq_set_wake;
++
++ /*
++ * If there are CPU isolation timing settings in dts,
++ * update them according to dts, otherwise, keep them
++ * with default value in registers.
++ */
++ cpu_pupscr_sw2iso = cpu_pupscr_sw =
++ cpu_pdnscr_iso2sw = cpu_pdnscr_iso = 0;
++
++ /* Read CPU isolation setting for GPC */
++ of_property_read_u32(np, "fsl,cpu_pupscr_sw2iso", &cpu_pupscr_sw2iso);
++ of_property_read_u32(np, "fsl,cpu_pupscr_sw", &cpu_pupscr_sw);
++ of_property_read_u32(np, "fsl,cpu_pdnscr_iso2sw", &cpu_pdnscr_iso2sw);
++ of_property_read_u32(np, "fsl,cpu_pdnscr_iso", &cpu_pdnscr_iso);
++
++ /* Update CPU PUPSCR timing if it is defined in dts */
++ val = readl_relaxed(gpc_base + GPC_PGC_CPU_PUPSCR);
++ if (cpu_pupscr_sw2iso)
++ val &= ~(GPC_PGC_CPU_SW2ISO_MASK << GPC_PGC_CPU_SW2ISO_SHIFT);
++ if (cpu_pupscr_sw)
++ val &= ~(GPC_PGC_CPU_SW_MASK << GPC_PGC_CPU_SW_SHIFT);
++ val |= cpu_pupscr_sw2iso << GPC_PGC_CPU_SW2ISO_SHIFT;
++ val |= cpu_pupscr_sw << GPC_PGC_CPU_SW_SHIFT;
++ writel_relaxed(val, gpc_base + GPC_PGC_CPU_PUPSCR);
++
++ /* Update CPU PDNSCR timing if it is defined in dts */
++ val = readl_relaxed(gpc_base + GPC_PGC_CPU_PDNSCR);
++ if (cpu_pdnscr_iso2sw)
++ val &= ~(GPC_PGC_CPU_SW2ISO_MASK << GPC_PGC_CPU_SW2ISO_SHIFT);
++ if (cpu_pdnscr_iso)
++ val &= ~(GPC_PGC_CPU_SW_MASK << GPC_PGC_CPU_SW_SHIFT);
++ val |= cpu_pdnscr_iso2sw << GPC_PGC_CPU_SW2ISO_SHIFT;
++ val |= cpu_pdnscr_iso << GPC_PGC_CPU_SW_SHIFT;
++ writel_relaxed(val, gpc_base + GPC_PGC_CPU_PDNSCR);
++}
++
++static int imx_pureg_set_voltage(struct regulator_dev *reg, int min_uV,
++ int max_uV, unsigned *selector)
++{
++ return 0;
++}
++
++static int imx_pureg_enable(struct regulator_dev *rdev)
++{
++ pu_dummy_enable = 1;
++
++ return 0;
++}
++
++static int imx_pureg_disable(struct regulator_dev *rdev)
++{
++ pu_dummy_enable = 0;
++
++ return 0;
+ }
++
++static int imx_pureg_is_enable(struct regulator_dev *rdev)
++{
++ return pu_dummy_enable;
++}
++
++static int imx_pureg_list_voltage(struct regulator_dev *rdev,
++ unsigned int selector)
++{
++ return 0;
++}
++
++static struct regulator_ops pu_dummy_ops = {
++ .set_voltage = imx_pureg_set_voltage,
++ .enable = imx_pureg_enable,
++ .disable = imx_pureg_disable,
++ .is_enabled = imx_pureg_is_enable,
++ .list_voltage = imx_pureg_list_voltage,
++};
++
++static struct regulator_desc pu_dummy_desc = {
++ .name = "pureg-dummy",
++ .id = -1,
++ .type = REGULATOR_VOLTAGE,
++ .owner = THIS_MODULE,
++ .ops = &pu_dummy_ops,
++};
++
++static int pu_dummy_probe(struct platform_device *pdev)
++{
++ struct regulator_config config = { };
++ int ret;
++
++ config.dev = &pdev->dev;
++ config.init_data = &pu_dummy_initdata;
++ config.of_node = pdev->dev.of_node;
++
++ pu_dummy_regulator_rdev = regulator_register(&pu_dummy_desc, &config);
++ if (IS_ERR(pu_dummy_regulator_rdev)) {
++ ret = PTR_ERR(pu_dummy_regulator_rdev);
++ dev_err(&pdev->dev, "Failed to register regulator: %d\n", ret);
++ return ret;
++ }
++
++ return 0;
++}
++
++static const struct of_device_id imx_pudummy_ids[] = {
++ { .compatible = "fsl,imx6-dummy-pureg" },
++};
++MODULE_DEVICE_TABLE(of, imx_pudummy_ids);
++
++static struct platform_driver pu_dummy_driver = {
++ .probe = pu_dummy_probe,
++ .driver = {
++ .name = "pu-dummy",
++ .owner = THIS_MODULE,
++ .of_match_table = imx_pudummy_ids,
++ },
++};
++
++static int imx_gpc_probe(struct platform_device *pdev)
++{
++ int ret;
++
++ gpc_dev = &pdev->dev;
++
++ pu_reg = devm_regulator_get(gpc_dev, "pu");
++ if (IS_ERR(pu_reg)) {
++ ret = PTR_ERR(pu_reg);
++ dev_info(gpc_dev, "pu regulator not ready.\n");
++ return ret;
++ }
++ nb.notifier_call = &imx_gpc_regulator_notify;
++
++ /* Get gpu&vpu clk for power up PU by GPC */
++ if (cpu_is_imx6sl()) {
++ gpu2d_clk = devm_clk_get(gpc_dev, "gpu2d_podf");
++ openvg_axi_clk = devm_clk_get(gpc_dev, "gpu2d_ovg");
++ ipg_clk = devm_clk_get(gpc_dev, "ipg");
++ if (IS_ERR(gpu2d_clk) || IS_ERR(openvg_axi_clk)
++ || IS_ERR(ipg_clk)) {
++ dev_err(gpc_dev, "failed to get clk!\n");
++ return -ENOENT;
++ }
++ } else {
++ gpu3d_clk = devm_clk_get(gpc_dev, "gpu3d_core");
++ gpu3d_shader_clk = devm_clk_get(gpc_dev, "gpu3d_shader");
++ gpu2d_clk = devm_clk_get(gpc_dev, "gpu2d_core");
++ gpu2d_axi_clk = devm_clk_get(gpc_dev, "gpu2d_axi");
++ openvg_axi_clk = devm_clk_get(gpc_dev, "openvg_axi");
++ vpu_clk = devm_clk_get(gpc_dev, "vpu_axi");
++ ipg_clk = devm_clk_get(gpc_dev, "ipg");
++ if (IS_ERR(gpu3d_clk) || IS_ERR(gpu3d_shader_clk)
++ || IS_ERR(gpu2d_clk) || IS_ERR(gpu2d_axi_clk)
++ || IS_ERR(openvg_axi_clk) || IS_ERR(vpu_clk)
++ || IS_ERR(ipg_clk)) {
++ dev_err(gpc_dev, "failed to get clk!\n");
++ return -ENOENT;
++ }
++ }
++
++ ret = regulator_register_notifier(pu_reg, &nb);
++ if (ret) {
++ dev_err(gpc_dev,
++ "regulator notifier request failed\n");
++ return ret;
++ }
++
++ return 0;
++}
++
++static const struct of_device_id imx_gpc_ids[] = {
++ { .compatible = "fsl,imx6q-gpc" },
++};
++MODULE_DEVICE_TABLE(of, imx_gpc_ids);
++
++static struct platform_driver imx_gpc_platdrv = {
++ .driver = {
++ .name = "imx-gpc",
++ .owner = THIS_MODULE,
++ .of_match_table = imx_gpc_ids,
++ },
++ .probe = imx_gpc_probe,
++};
++module_platform_driver(imx_gpc_platdrv);
++
++module_platform_driver(pu_dummy_driver);
++
++MODULE_AUTHOR("Anson Huang <b20788@freescale.com>");
++MODULE_DESCRIPTION("Freescale i.MX GPC driver");
++MODULE_LICENSE("GPL");
+diff -Nur linux-3.14.36/arch/arm/mach-imx/hardware.h linux-openelec/arch/arm/mach-imx/hardware.h
+--- linux-3.14.36/arch/arm/mach-imx/hardware.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/mach-imx/hardware.h 2015-05-06 12:05:43.000000000 -0500
+@@ -1,5 +1,5 @@
+ /*
+- * Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved.
++ * Copyright 2004-2007, 2014 Freescale Semiconductor, Inc. All Rights Reserved.
+ * Copyright 2008 Juergen Beisert, kernel@pengutronix.de
+ *
+ * This program is free software; you can redistribute it and/or
+@@ -20,7 +20,9 @@
+ #ifndef __ASM_ARCH_MXC_HARDWARE_H__
+ #define __ASM_ARCH_MXC_HARDWARE_H__
+
++#ifndef __ASSEMBLY__
+ #include <asm/io.h>
++#endif
+ #include <asm/sizes.h>
+
+ #define addr_in_module(addr, mod) \
+diff -Nur linux-3.14.36/arch/arm/mach-imx/headsmp.S linux-openelec/arch/arm/mach-imx/headsmp.S
+--- linux-3.14.36/arch/arm/mach-imx/headsmp.S 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/mach-imx/headsmp.S 2015-05-06 12:05:43.000000000 -0500
+@@ -12,8 +12,6 @@
+
+ #include <linux/linkage.h>
+ #include <linux/init.h>
+-#include <asm/asm-offsets.h>
+-#include <asm/hardware/cache-l2x0.h>
+
+ .section ".text.head", "ax"
+
+@@ -35,37 +33,3 @@
+ b secondary_startup
+ ENDPROC(v7_secondary_startup)
+ #endif
+-
+-#ifdef CONFIG_ARM_CPU_SUSPEND
+-/*
+- * The following code must assume it is running from physical address
+- * where absolute virtual addresses to the data section have to be
+- * turned into relative ones.
+- */
+-
+-#ifdef CONFIG_CACHE_L2X0
+- .macro pl310_resume
+- adr r0, l2x0_saved_regs_offset
+- ldr r2, [r0]
+- add r2, r2, r0
+- ldr r0, [r2, #L2X0_R_PHY_BASE] @ get physical base of l2x0
+- ldr r1, [r2, #L2X0_R_AUX_CTRL] @ get aux_ctrl value
+- str r1, [r0, #L2X0_AUX_CTRL] @ restore aux_ctrl
+- mov r1, #0x1
+- str r1, [r0, #L2X0_CTRL] @ re-enable L2
+- .endm
+-
+-l2x0_saved_regs_offset:
+- .word l2x0_saved_regs - .
+-
+-#else
+- .macro pl310_resume
+- .endm
+-#endif
+-
+-ENTRY(v7_cpu_resume)
+- bl v7_invalidate_l1
+- pl310_resume
+- b cpu_resume
+-ENDPROC(v7_cpu_resume)
+-#endif
+diff -Nur linux-3.14.36/arch/arm/mach-imx/imx6sl_wfi.S linux-openelec/arch/arm/mach-imx/imx6sl_wfi.S
+--- linux-3.14.36/arch/arm/mach-imx/imx6sl_wfi.S 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/arch/arm/mach-imx/imx6sl_wfi.S 2015-05-06 12:05:43.000000000 -0500
+@@ -0,0 +1,639 @@
++/*
++ * Copyright (C) 2012-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
++ */
++
++#include <linux/linkage.h>
++#define IRAM_WAIT_SIZE (1 << 11)
++
++ .macro sl_ddr_io_save
++
++ ldr r4, [r1, #0x30c] /* DRAM_DQM0 */
++ ldr r5, [r1, #0x310] /* DRAM_DQM1 */
++ ldr r6, [r1, #0x314] /* DRAM_DQM2 */
++ ldr r7, [r1, #0x318] /* DRAM_DQM3 */
++ stmfd r9!, {r4-r7}
++
++ ldr r4, [r1, #0x5c4] /* GPR_B0DS */
++ ldr r5, [r1, #0x5cc] /* GPR_B1DS */
++ ldr r6, [r1, #0x5d4] /* GPR_B2DS */
++ ldr r7, [r1, #0x5d8] /* GPR_B3DS */
++ stmfd r9!, {r4-r7}
++
++ ldr r4, [r1, #0x300] /* DRAM_CAS */
++ ldr r5, [r1, #0x31c] /* DRAM_RAS */
++ ldr r6, [r1, #0x338] /* DRAM_SDCLK_0 */
++ ldr r7, [r1, #0x5ac] /* GPR_ADDS*/
++ stmfd r9!, {r4-r7}
++
++ ldr r4, [r1, #0x5b0] /* DDRMODE_CTL */
++ ldr r5, [r1, #0x5c0] /* DDRMODE */
++ ldr r6, [r1, #0x33c] /* DRAM_SODT0*/
++ ldr r7, [r1, #0x340] /* DRAM_SODT1*/
++ stmfd r9!, {r4-r7}
++
++ ldr r4, [r1, #0x330] /* DRAM_SDCKE0 */
++ ldr r5, [r1, #0x334] /* DRAM_SDCKE1 */
++ ldr r6, [r1, #0x320] /* DRAM_RESET */
++ stmfd r9!, {r4-r6}
++
++ .endm
++
++ .macro sl_ddr_io_restore
++
++ /*
++ * r9 points to IRAM stack.
++ * r1 points to IOMUX base address.
++ * r8 points to MMDC base address.
++ */
++ ldmea r9!, {r4-r7}
++ str r4, [r1, #0x30c] /* DRAM_DQM0 */
++ str r5, [r1, #0x310] /* DRAM_DQM1 */
++ str r6, [r1, #0x314] /* DRAM_DQM2 */
++ str r7, [r1, #0x318] /* DRAM_DQM3 */
++
++ ldmea r9!, {r4-r7}
++ str r4, [r1, #0x5c4] /* GPR_B0DS */
++ str r5, [r1, #0x5cc] /* GPR_B1DS */
++ str r6, [r1, #0x5d4] /* GPR_B2DS */
++ str r7, [r1, #0x5d8] /* GPR_B3DS */
++
++ ldmea r9!, {r4-r7}
++ str r4, [r1, #0x300] /* DRAM_CAS */
++ str r5, [r1, #0x31c] /* DRAM_RAS */
++ str r6, [r1, #0x338] /* DRAM_SDCLK_0 */
++ str r7, [r1, #0x5ac] /* GPR_ADDS*/
++
++ ldmea r9!, {r4-r7}
++ str r4, [r1, #0x5b0] /* DDRMODE_CTL */
++ str r5, [r1, #0x5c0] /* DDRMODE */
++ str r6, [r1, #0x33c] /* DRAM_SODT0*/
++ str r7, [r1, #0x340] /* DRAM_SODT1*/
++
++ ldmea r9!, {r4-r6}
++ str r4, [r1, #0x330] /* DRAM_SDCKE0 */
++ str r5, [r1, #0x334] /* DRAM_SDCKE1 */
++ str r6, [r1, #0x320] /* DRAM_RESET */
++
++ /*
++ * Need to reset the FIFO to avoid MMDC lockup
++ * caused because of floating/changing the
++ * configuration of many DDR IO pads.
++ */
++ ldr r7, =0x83c
++ ldr r6, [r8, r7]
++ orr r6, r6, #0x80000000
++ str r6, [r8, r7]
++fifo_reset1_wait:
++ ldr r6, [r8, r7]
++ and r6, r6, #0x80000000
++ cmp r6, #0
++ bne fifo_reset1_wait
++
++ /* reset FIFO a second time */
++ ldr r6, [r8, r7]
++ orr r6, r6, #0x80000000
++ str r6, [r8, r7]
++fifo_reset2_wait:
++ ldr r6, [r8, r7]
++ and r6, r6, #0x80000000
++ cmp r6, #0
++ bne fifo_reset2_wait
++
++ .endm
++
++ .macro sl_ddr_io_set_lpm
++
++ mov r4, #0
++ str r4, [r1, #0x30c] /* DRAM_DQM0 */
++ str r4, [r1, #0x310] /* DRAM_DQM1 */
++ str r4, [r1, #0x314] /* DRAM_DQM2 */
++ str r4, [r1, #0x318] /* DRAM_DQM3 */
++
++ str r4, [r1, #0x5c4] /* GPR_B0DS */
++ str r4, [r1, #0x5cc] /* GPR_B1DS */
++ str r4, [r1, #0x5d4] /* GPR_B2DS */
++ str r4, [r1, #0x5d8] /* GPR_B3DS */
++
++ str r4, [r1, #0x300] /* DRAM_CAS */
++ str r4, [r1, #0x31c] /* DRAM_RAS */
++ str r4, [r1, #0x338] /* DRAM_SDCLK_0 */
++ str r4, [r1, #0x5ac] /* GPR_ADDS*/
++
++ str r4, [r1, #0x5b0] /* DDRMODE_CTL */
++ str r4, [r1, #0x5c0] /* DDRMODE */
++ str r4, [r1, #0x33c] /* DRAM_SODT0*/
++ str r4, [r1, #0x340] /* DRAM_SODT1*/
++
++ mov r4, #0x80000
++ str r4, [r1, #0x320] /* DRAM_RESET */
++ mov r4, #0x1000
++ str r4, [r1, #0x330] /* DRAM_SDCKE0 */
++ str r4, [r1, #0x334] /* DRAM_SDCKE1 */
++
++ .endm
++
++/*
++ * imx6sl_low_power_wfi
++ *
++ * Idle the processor (eg, wait for interrupt).
++ * Make sure DDR is in self-refresh.
++ * IRQs are already disabled.
++ * r0: WFI IRAMcode base address.
++ * r1: IOMUX base address
++ * r2: Base address of CCM, ANATOP and MMDC
++ * r3: 1 if in audio_bus_freq_mode
++ */
++ .align 3
++ENTRY(imx6sl_low_power_wfi)
++
++ push {r4-r11}
++
++mx6sl_lpm_wfi:
++ /* Store audio_bus_freq_mode */
++ mov r11, r3
++
++ mov r4,r2
++ /* Get the IRAM data storage address. */
++ mov r10, r0
++ mov r9, r0 /* get suspend_iram_base */
++ add r9, r9, #IRAM_WAIT_SIZE
++
++ /* Anatop Base address in r3. */
++ ldr r3, [r4]
++ /* CCM Base Address in r2 */
++ ldr r2, [r4, #0x4]
++ /* MMDC Base Address in r8 */
++ ldr r8, [r4, #0x8]
++ /* L2 Base Address in r7 */
++ ldr r7, [r4, #0xC]
++
++ ldr r6, [r8]
++ ldr r6, [r3]
++ ldr r6, [r2]
++ ldr r6, [r1]
++
++ /* Store the original ARM PODF. */
++ ldr r0, [r2, #0x10]
++
++ /* Drain all the L1 buffers. */
++ dsb
++
++#ifdef CONFIG_CACHE_L2X0
++ /*
++ * Need to make sure the buffers in L2 are drained.
++ * Performing a sync operation does this.
++ */
++ mov r6, #0x0
++ str r6, [r7, #0x730]
++#endif
++
++ /*
++ * The second dsb might be needed to keep cache sync (device write)
++ * ordering with the memory accesses before it.
++ */
++ dsb
++ isb
++
++ /* Save the DDR IO state. */
++ sl_ddr_io_save
++
++ /* Disable Automatic power savings. */
++ ldr r6, [r8, #0x404]
++ orr r6, r6, #0x01
++ str r6, [r8, #0x404]
++
++ /* Make the DDR explicitly enter self-refresh. */
++ ldr r6, [r8, #0x404]
++ orr r6, r6, #0x200000
++ str r6, [r8, #0x404]
++
++poll_dvfs_set_1:
++ ldr r6, [r8, #0x404]
++ and r6, r6, #0x2000000
++ cmp r6, #0x2000000
++ bne poll_dvfs_set_1
++
++ /* set SBS step-by-step mode */
++ ldr r6, [r8, #0x410]
++ orr r6, r6, #0x100
++ str r6, [r8, #0x410]
++
++ cmp r11, #1
++ beq audio_mode
++ /*
++ * Now set DDR rate to 1MHz.
++ * DDR is from bypassed PLL2 on periph2_clk2 path.
++ * Set the periph2_clk2_podf to divide by 8.
++ */
++ ldr r6, [r2, #0x14]
++ orr r6, r6, #0x07
++ str r6, [r2, #0x14]
++
++ /* Now set MMDC PODF to divide by 3. */
++ ldr r6, [r2, #0x14]
++ bic r6, r6, #0x38
++ orr r6, r6, #0x10
++ str r6, [r2, #0x14]
++ b mmdc_podf
++
++audio_mode:
++ /* MMDC is from PLL2_200M.
++ * Set the mmdc_podf to div by 8.
++ */
++ ldr r6, [r2, #0x14]
++ orr r6, r6, #0x38
++ str r6, [r2, #0x14]
++
++ /* Loop till podf is accepted. */
++mmdc_podf:
++ ldr r6, [r2, #0x48]
++ cmp r6, #0x0
++ bne mmdc_podf
++
++ /* Set the DDR IO in LPM state. */
++ sl_ddr_io_set_lpm
++
++ cmp r11, #1
++ beq do_audio_arm_clk
++
++ /*
++ * Check if none of the PLLs are
++ * locked, except PLL1 which will get
++ * bypassed below.
++ * We should not be here if PLL2 is not
++ * bypassed.
++ */
++ ldr r7, =1
++ /* USB1 PLL3 */
++ ldr r6, [r3, #0x10]
++ and r6, r6, #0x80000000
++ cmp r6, #0x80000000
++ beq no_analog_saving
++
++ /* USB2 PLL7 */
++ ldr r6, [r3, #0x20]
++ and r6, r6, #0x80000000
++ cmp r6, #0x80000000
++ beq no_analog_saving
++
++ /* Audio PLL4 */
++ ldr r6, [r3, #0x70]
++ and r6, r6, #0x80000000
++ cmp r6, #0x80000000
++ beq no_analog_saving
++
++ /* Video PLL5 */
++ ldr r6, [r3, #0xA0]
++ and r6, r6, #0x80000000
++ cmp r6, #0x80000000
++ beq no_analog_saving
++
++ /* ENET PLL8 */
++ ldr r6, [r3, #0xE0]
++ and r6, r6, #0x80000000
++ cmp r6, #0x80000000
++ beq no_analog_saving
++
++ b cont
++
++no_analog_saving:
++ ldr r7, =0
++
++cont:
++ /* Set the AHB to 3MHz. AXI to 3MHz. */
++ ldr r9, [r2, #0x14]
++ mov r6, r9
++ orr r6, r6, #0x1c00
++ orr r6, r6, #0x70000
++ str r6, [r2, #0x14]
++
++ /* Loop till podf is accepted. */
++ahb_podf:
++ ldr r6, [r2, #0x48]
++ cmp r6, #0x0
++ bne podf_loop
++
++ /*
++ * Now set ARM to 24MHz.
++ * Move ARM to be sourced from STEP_CLK
++ * after setting STEP_CLK to 24MHz.
++ */
++ ldr r6, [r2, #0xc]
++ bic r6, r6, #0x100
++ str r6, [r2, #0x0c]
++ /* Now PLL1_SW_CLK to step_clk. */
++ ldr r6, [r2, #0x0c]
++ orr r6, r6, #0x4
++ str r6, [r2, #0x0c]
++
++ /* Bypass PLL1 and power it down. */
++ ldr r6, =(1 << 16)
++ orr r6, r6, #0x1000
++ str r6, [r3, #0x04]
++
++ /*
++ * Set the ARM PODF to divide by 8.
++ * IPG is at 1.5MHz here, we need ARM to
++ * run at the 12:5 ratio (WAIT mode issue).
++ */
++ ldr r6, =0x7
++ str r6, [r2, #0x10]
++
++ /* Loop till podf is accepted. */
++podf_loop:
++ ldr r6, [r2, #0x48]
++ cmp r6, #0x0
++ bne podf_loop
++
++ /*
++ * Check if we can save some
++ * power in the Analog section.
++ */
++ cmp r7, #0x1
++ bne do_wfi
++
++ /* Disable 1p1 brown out. */
++ ldr r6, [r3, #0x110]
++ bic r6, r6, #0x2
++ str r6, [r3, #0x110]
++
++ /* Enable the weak 2P5 */
++ ldr r6, [r3, #0x130]
++ orr r6, r6, #0x40000
++ str r6, [r3, #0x130]
++
++ /* Disable main 2p5. */
++ ldr r6, [r3, #0x130]
++ bic r6, r6, #0x1
++ str r6, [r3, #0x130]
++
++ /*
++ * Set the OSC bias current to -37.5%
++ * to drop the power on VDDHIGH.
++ */
++ ldr r6, [r3, #0x150]
++ orr r6, r6, #0xC000
++ str r6, [r3, #0x150]
++
++ /* Enable low power bandgap */
++ ldr r6, [r3, #0x260]
++ orr r6, r6, #0x20
++ str r6, [r3, #0x260]
++
++ /*
++ * Turn off the bias current
++ * from the regular bandgap.
++ */
++ ldr r6, [r3, #0x260]
++ orr r6, r6, #0x80
++ str r6, [r3, #0x260]
++
++ /*
++ * Clear the REFTOP_SELFBIASOFF,
++ * self-bias circuit of the band gap.
++ * Per RM, should be cleared when
++ * band gap is powered down.
++ */
++ ldr r6, [r3, #0x150]
++ bic r6, r6, #0x8
++ str r6, [r3, #0x150]
++
++ /* Power down the regular bandgap. */
++ ldr r6, [r3, #0x150]
++ orr r6, r6, #0x1
++ str r6, [r3, #0x150]
++
++ b do_wfi
++
++do_audio_arm_clk:
++ /*
++ * ARM is from PLL2_PFD2_400M here.
++ * Switch ARM to bypassed PLL1.
++ */
++ ldr r6, [r2, #0xC]
++ bic r6, r6, #0x4
++ str r6, [r2, #0xC]
++
++ /*
++ * Set the ARM_PODF to divide by 2
++ * as IPG is at 4MHz, we cannot run
++ * ARM_CLK above 9.6MHz when
++ * system enters WAIT mode.
++ */
++ ldr r6, =0x2
++ str r6, [r2, #0x10]
++
++ /* Loop till podf is accepted. */
++podf_loop_audio:
++ ldr r6, [r2, #0x48]
++ cmp r6, #0x0
++ bne podf_loop_audio
++
++do_wfi:
++ /* Now do WFI. */
++ wfi
++
++ /* Set original ARM PODF back. */
++ str r0, [r2, #0x10]
++
++ /* Loop till podf is accepted. */
++podf_loop1:
++ ldr r6, [r2, #0x48]
++ cmp r6, #0x0
++ bne podf_loop1
++
++ cmp r11, #1
++ beq audio_arm_clk_restore
++
++ /*
++ * Check if powered down
++ * analog components.
++ */
++ cmp r7, #0x1
++ bne skip_analog_restore
++
++ /* Power up the regular bandgap. */
++ ldr r6, [r3, #0x150]
++ bic r6, r6, #0x1
++ str r6, [r3, #0x150]
++
++ /*
++ * Turn on the bias current
++ * from the regular bandgap.
++ */
++ ldr r6, [r3, #0x260]
++ bic r6, r6, #0x80
++ str r6, [r3, #0x260]
++
++ /* Disable the low power bandgap */
++ ldr r6, [r3, #0x260]
++ bic r6, r6, #0x20
++ str r6, [r3, #0x260]
++
++ /*
++ * Set the OSC bias current to max
++ * value for normal operation.
++ */
++ ldr r6, [r3, #0x150]
++ bic r6, r6, #0xC000
++ str r6, [r3, #0x150]
++
++ /* Enable main 2p5. */
++ ldr r6, [r3, #0x130]
++ orr r6, r6, #0x1
++ str r6, [r3, #0x130]
++
++ /* Ensure the 2P5 is up. */
++loop_2p5:
++ ldr r6, [r3, #0x130]
++ and r6, r6, #0x20000
++ cmp r6, #0x20000
++ bne loop_2p5
++
++ /* Disable the weak 2P5 */
++ ldr r6, [r3, #0x130]
++ bic r6, r6, #0x40000
++ str r6, [r3, #0x130]
++
++ /* Enable 1p1 brown out. */
++ ldr r6, [r3, #0x110]
++ orr r6, r6, #0x2
++ str r6, [r3, #0x110]
++
++skip_analog_restore:
++
++ /* Power up PLL1 and un-bypass it. */
++ ldr r6, =(1 << 12)
++ str r6, [r3, #0x08]
++
++ /* Wait for PLL1 to relock. */
++wait_for_pll_lock:
++ ldr r6, [r3, #0x0]
++ and r6, r6, #0x80000000
++ cmp r6, #0x80000000
++ bne wait_for_pll_lock
++
++ ldr r6, =(1 << 16)
++ str r6, [r3, #0x08]
++
++ /* Set PLL1_sw_clk back to PLL1. */
++ ldr r6, [r2, #0x0c]
++ bic r6, r6, #0x4
++ str r6, [r2, #0xc]
++
++ /* Restore AHB/AXI back. */
++ str r9, [r2, #0x14]
++
++ /* Loop till podf is accepted. */
++ahb_podf1:
++ ldr r6, [r2, #0x48]
++ cmp r6, #0x0
++ bne podf_loop1
++
++ b wfi_restore
++
++ audio_arm_clk_restore:
++ /* Move ARM back to PLL2_PFD2_400M */
++ ldr r6, [r2, #0xC]
++ orr r6, r6, #0x4
++ str r6, [r2, #0xC]
++
++wfi_restore:
++ /* get suspend_iram_base */
++ mov r9, r10
++ add r9, r9, #IRAM_WAIT_SIZE
++
++ /* Restore the DDR IO before exiting self-refresh. */
++ sl_ddr_io_restore
++
++ /*
++ * Set MMDC back to 24MHz.
++ * Set periph2_clk2_podf to divide by 1
++ * Now set MMDC PODF to divide by 1.
++ */
++ ldr r6, [r2, #0x14]
++ bic r6, r6, #0x3f
++ str r6, [r2, #0x14]
++
++mmdc_podf1:
++ ldr r6, [r2, #0x48]
++ cmp r6, #0x0
++ bne mmdc_podf1
++
++ /* clear DVFS - exit from self refresh mode */
++ ldr r6, [r8, #0x404]
++ bic r6, r6, #0x200000
++ str r6, [r8, #0x404]
++
++poll_dvfs_clear_1:
++ ldr r6, [r8, #0x404]
++ and r6, r6, #0x2000000
++ cmp r6, #0x2000000
++ beq poll_dvfs_clear_1
++
++ /*
++ * Add these nops so that the
++ * prefetcher will not try to get
++ * any instructions from DDR.
++ * The prefetch depth is about 23
++ * on A9, so adding 25 nops.
++ */
++ nop
++ nop
++ nop
++ nop
++ nop
++
++ nop
++ nop
++ nop
++ nop
++ nop
++
++ nop
++ nop
++ nop
++ nop
++ nop
++
++ nop
++ nop
++ nop
++ nop
++ nop
++
++ nop
++ nop
++ nop
++ nop
++ nop
++
++ /* Enable Automatic power savings. */
++ ldr r6, [r8, #0x404]
++ bic r6, r6, #0x01
++ str r6, [r8, #0x404]
++
++ /* clear SBS - unblock DDR accesses */
++ ldr r6, [r8, #0x410]
++ bic r6, r6, #0x100
++ str r6, [r8, #0x410]
++
++
++ pop {r4-r11}
++
++ /* Restore registers */
++ mov pc, lr
+diff -Nur linux-3.14.36/arch/arm/mach-imx/Kconfig linux-openelec/arch/arm/mach-imx/Kconfig
+--- linux-3.14.36/arch/arm/mach-imx/Kconfig 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/mach-imx/Kconfig 2015-05-06 12:05:43.000000000 -0500
+@@ -1,5 +1,6 @@
+ config ARCH_MXC
+ bool "Freescale i.MX family" if ARCH_MULTI_V4_V5 || ARCH_MULTI_V6_V7
++ select ARCH_HAS_RESET_CONTROLLER
+ select ARCH_REQUIRE_GPIOLIB
+ select ARM_CPU_SUSPEND if PM
+ select ARM_PATCH_PHYS_VIRT
+@@ -13,6 +14,7 @@
+ select PINCTRL
+ select SOC_BUS
+ select SPARSE_IRQ
++ select SRAM
+ select USE_OF
+ help
+ Support for Freescale MXC/iMX-based family of processors
+@@ -63,7 +65,6 @@
+
+ config HAVE_IMX_SRC
+ def_bool y if SMP
+- select ARCH_HAS_RESET_CONTROLLER
+
+ config IMX_HAVE_IOMUX_V1
+ bool
+@@ -791,6 +792,8 @@
+ select ARM_ERRATA_754322
+ select ARM_ERRATA_764369 if SMP
+ select ARM_ERRATA_775420
++ select ARM_ERRATA_794072 if SMP
++ select ARM_ERRATA_761320 if SMP
+ select ARM_GIC
+ select CPU_V7
+ select HAVE_ARM_SCU if SMP
+@@ -803,11 +806,13 @@
+ select MFD_SYSCON
+ select MIGHT_HAVE_PCI
+ select PCI_DOMAINS if PCI
++ select ARCH_SUPPORTS_MSI
+ select PINCTRL_IMX6Q
+ select PL310_ERRATA_588369 if CACHE_PL310
+ select PL310_ERRATA_727915 if CACHE_PL310
+ select PL310_ERRATA_769419 if CACHE_PL310
+ select PM_OPP if PM
++ select ZONE_DMA
+
+ help
+ This enables support for Freescale i.MX6 Quad processor.
+diff -Nur linux-3.14.36/arch/arm/mach-imx/lpddr2_freq_imx6.S linux-openelec/arch/arm/mach-imx/lpddr2_freq_imx6.S
+--- linux-3.14.36/arch/arm/mach-imx/lpddr2_freq_imx6.S 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/arch/arm/mach-imx/lpddr2_freq_imx6.S 2015-05-06 12:05:43.000000000 -0500
+@@ -0,0 +1,484 @@
++/*
++ * Copyright (C) 2012-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
++ */
++
++#include <linux/linkage.h>
++
++ .macro mx6sl_switch_to_24MHz
++
++ /*
++ * Set MMDC clock to be sourced from PLL3.
++ * Ensure first periph2_clk2 is sourced from PLL3.
++ * Set the PERIPH2_CLK2_PODF to divide by 2.
++ */
++ ldr r6, [r2, #0x14]
++ bic r6, r6, #0x7
++ orr r6, r6, #0x1
++ str r6, [r2, #0x14]
++
++ /* Select PLL3 to source MMDC. */
++ ldr r6, [r2, #0x18]
++ bic r6, r6, #0x100000
++ str r6, [r2, #0x18]
++
++ /* Swtich periph2_clk_sel to run from PLL3. */
++ ldr r6, [r2, #0x14]
++ orr r6, r6, #0x4000000
++ str r6, [r2, #0x14]
++
++periph2_clk_switch1:
++ ldr r6, [r2, #0x48]
++ cmp r6, #0
++ bne periph2_clk_switch1
++
++ /*
++ * Need to clock gate the 528 PFDs before
++ * powering down PLL2.
++ * Only the PLL2_PFD2_400M should be ON
++ * at this time, so only clock gate that one.
++ */
++ ldr r6, [r3, #0x100]
++ orr r6, r6, #0x800000
++ str r6, [r3, #0x100]
++
++ /*
++ * Set PLL2 to bypass state. We should be here
++ * only if MMDC is not sourced from PLL2.
++ */
++ ldr r6, [r3, #0x30]
++ orr r6, r6, #0x10000
++ str r6, [r3, #0x30]
++
++ ldr r6, [r3, #0x30]
++ orr r6, r6, #0x1000
++ str r6, [r3, #0x30]
++
++ /* Ensure pre_periph2_clk_mux is set to pll2 */
++ ldr r6, [r2, #0x18]
++ bic r6, r6, #0x600000
++ str r6, [r2, #0x18]
++
++ /* Set MMDC clock to be sourced from the bypassed PLL2. */
++ ldr r6, [r2, #0x14]
++ bic r6, r6, #0x4000000
++ str r6, [r2, #0x14]
++
++periph2_clk_switch2:
++ ldr r6, [r2, #0x48]
++ cmp r6, #0
++ bne periph2_clk_switch2
++
++ /*
++ * Now move MMDC back to periph2_clk2 source.
++ * after selecting PLL2 as the option.
++ * Select PLL2 as the source.
++ */
++ ldr r6, [r2, #0x18]
++ orr r6, r6, #0x100000
++ str r6, [r2, #0x18]
++
++ /* set periph2_clk2_podf to divide by 1. */
++ ldr r6, [r2, #0x14]
++ bic r6, r6, #0x7
++ str r6, [r2, #0x14]
++
++ /* Now move periph2_clk to periph2_clk2 source */
++ ldr r6, [r2, #0x14]
++ orr r6, r6, #0x4000000
++ str r6, [r2, #0x14]
++
++periph2_clk_switch3:
++ ldr r6, [r2, #0x48]
++ cmp r6, #0
++ bne periph2_clk_switch3
++
++ /* Now set the MMDC PODF back to 1.*/
++ ldr r6, [r2, #0x14]
++ bic r6, r6, #0x38
++ str r6, [r2, #0x14]
++
++mmdc_podf0:
++ ldr r6, [r2, #0x48]
++ cmp r6, #0
++ bne mmdc_podf0
++
++ .endm
++
++ .macro ddr_switch_400MHz
++
++ /* Set MMDC divider first, in case PLL3 is at 480MHz. */
++ ldr r6, [r3, #0x10]
++ and r6, r6, #0x10000
++ cmp r6, #0x10000
++ beq pll3_in_bypass
++
++ /* Set MMDC divder to divide by 2. */
++ ldr r6, [r2, #0x14]
++ bic r6, r6, #0x38
++ orr r6, r6, #0x8
++ str r6, [r2, #0x14]
++
++mmdc_podf:
++ ldr r6, [r2, #0x48]
++ cmp r6, #0
++ bne mmdc_podf
++
++pll3_in_bypass:
++ /*
++ * Check if we are switching between
++ * 400Mhz <-> 100MHz.If so, we should
++ * try to source MMDC from PLL2_200M.
++ */
++ cmp r1, #0
++ beq not_low_bus_freq
++
++ /* Ensure that MMDC is sourced from PLL2 mux first. */
++ ldr r6, [r2, #0x14]
++ bic r6, r6, #0x4000000
++ str r6, [r2, #0x14]
++
++periph2_clk_switch4:
++ ldr r6, [r2, #0x48]
++ cmp r6, #0
++ bne periph2_clk_switch4
++
++not_low_bus_freq:
++ /* Now ensure periph2_clk2_sel mux is set to PLL3 */
++ ldr r6, [r2, #0x18]
++ bic r6, r6, #0x100000
++ str r6, [r2, #0x18]
++
++ /* Now switch MMDC to PLL3. */
++ ldr r6, [r2, #0x14]
++ orr r6, r6, #0x4000000
++ str r6, [r2, #0x14]
++
++periph2_clk_switch5:
++ ldr r6, [r2, #0x48]
++ cmp r6, #0
++ bne periph2_clk_switch5
++
++ /*
++ * Check if PLL2 is already unlocked.
++ * If so do nothing with PLL2.
++ */
++ cmp r1, #0
++ beq pll2_already_on
++
++ /* Now power up PLL2 and unbypass it. */
++ ldr r6, [r3, #0x30]
++ bic r6, r6, #0x1000
++ str r6, [r3, #0x30]
++
++ /* Make sure PLL2 has locked.*/
++wait_for_pll_lock:
++ ldr r6, [r3, #0x30]
++ and r6, r6, #0x80000000
++ cmp r6, #0x80000000
++ bne wait_for_pll_lock
++
++ ldr r6, [r3, #0x30]
++ bic r6, r6, #0x10000
++ str r6, [r3, #0x30]
++
++ /*
++ * Need to enable the 528 PFDs after
++ * powering up PLL2.
++ * Only the PLL2_PFD2_400M should be ON
++ * as it feeds the MMDC. Rest should have
++ * been managed by clock code.
++ */
++ ldr r6, [r3, #0x100]
++ bic r6, r6, #0x800000
++ str r6, [r3, #0x100]
++
++pll2_already_on:
++ /*
++ * Now switch MMDC clk back to pll2_mux option.
++ * Ensure pre_periph2_clk2 is set to pll2_pfd_400M.
++ * If switching to audio DDR freq, set the
++ * pre_periph2_clk2 to PLL2_PFD_200M
++ */
++ ldr r6, =400000000
++ cmp r6, r0
++ bne use_pll2_pfd_200M
++
++ ldr r6, [r2, #0x18]
++ bic r6, r6, #0x600000
++ orr r6, r6, #0x200000
++ str r6, [r2, #0x18]
++ ldr r6, =400000000
++ b cont2
++
++use_pll2_pfd_200M:
++ ldr r6, [r2, #0x18]
++ orr r6, r6, #0x600000
++ str r6, [r2, #0x18]
++ ldr r6, =200000000
++
++cont2:
++ ldr r4, [r2, #0x14]
++ bic r4, r4, #0x4000000
++ str r4, [r2, #0x14]
++
++periph2_clk_switch6:
++ ldr r4, [r2, #0x48]
++ cmp r4, #0
++ bne periph2_clk_switch6
++
++change_divider_only:
++ /*
++ * Calculate the MMDC divider
++ * based on the requested freq.
++ */
++ ldr r4, =0
++Loop2:
++ sub r6, r6, r0
++ cmp r6, r0
++ blt Div_Found
++ add r4, r4, #1
++ bgt Loop2
++
++ /* Shift divider into correct offset. */
++ lsl r4, r4, #3
++Div_Found:
++ /* Set the MMDC PODF. */
++ ldr r6, [r2, #0x14]
++ bic r6, r6, #0x38
++ orr r6, r6, r4
++ str r6, [r2, #0x14]
++
++mmdc_podf1:
++ ldr r6, [r2, #0x48]
++ cmp r6, #0
++ bne mmdc_podf1
++
++ .endm
++
++ .macro mmdc_clk_lower_100MHz
++
++ /*
++ * Prior to reducing the DDR frequency (at 528/400 MHz),
++ * read the Measure unit count bits (MU_UNIT_DEL_NUM)
++ */
++ ldr r5, =0x8B8
++ ldr r6, [r8, r5]
++ /* Original MU unit count */
++ mov r6, r6, LSR #16
++ ldr r4, =0x3FF
++ and r6, r6, r4
++ /* Original MU unit count * 2 */
++ mov r7, r6, LSL #1
++ /*
++ * Bypass the automatic measure unit when below 100 MHz
++ * by setting the Measure unit bypass enable bit (MU_BYP_EN)
++ */
++ ldr r6, [r8, r5]
++ orr r6, r6, #0x400
++ str r6, [r8, r5]
++ /*
++ * Double the measure count value read in step 1 and program it in the
++ * measurement bypass bits (MU_BYP_VAL) of the MMDC PHY Measure Unit
++ * Register for the reduced frequency operation below 100 MHz
++ */
++ ldr r6, [r8, r5]
++ ldr r4, =0x3FF
++ bic r6, r6, r4
++ orr r6, r6, r7
++ str r6, [r8, r5]
++ /* Now perform a Force Measurement. */
++ ldr r6, [r8, r5]
++ orr r6, r6, #0x800
++ str r6, [r8, r5]
++ /* Wait for FRC_MSR to clear. */
++force_measure:
++ ldr r6, [r8, r5]
++ and r6, r6, #0x800
++ cmp r6, #0x0
++ bne force_measure
++
++ .endm
++
++ .macro mmdc_clk_above_100MHz
++
++ /* Make sure that the PHY measurement unit is NOT in bypass mode */
++ ldr r5, =0x8B8
++ ldr r6, [r8, r5]
++ bic r6, r6, #0x400
++ str r6, [r8, r5]
++ /* Now perform a Force Measurement. */
++ ldr r6, [r8, r5]
++ orr r6, r6, #0x800
++ str r6, [r8, r5]
++ /* Wait for FRC_MSR to clear. */
++force_measure1:
++ ldr r6, [r8, r5]
++ and r6, r6, #0x800
++ cmp r6, #0x0
++ bne force_measure1
++ .endm
++
++/*
++ * mx6_lpddr2_freq_change
++ *
++ * Make sure DDR is in self-refresh.
++ * IRQs are already disabled.
++ * r0 : DDR freq.
++ * r1: low_bus_freq_mode flag
++ * r2: Pointer to array containing addresses of registers.
++ */
++ .align 3
++ENTRY(mx6_lpddr2_freq_change)
++
++ push {r4-r10}
++
++ mov r4, r2
++ ldr r3, [r4] @ANATOP_BASE_ADDR
++ ldr r2, [r4, #0x4] @CCM_BASE_ADDR
++ ldr r8, [r4, #0x8] @MMDC_P0_BASE_ADDR
++ ldr r7, [r4, #0xC] @L2_BASE_ADDR
++
++lpddr2_freq_change:
++ adr r9, lpddr2_freq_change
++
++ /* Prime all TLB entries. */
++ ldr r6, [r9]
++ ldr r6, [r8]
++ ldr r6, [r3]
++ ldr r6, [r2]
++
++ /* Drain all the L1 buffers. */
++ dsb
++
++#ifdef CONFIG_CACHE_L2X0
++ /*
++ * Need to make sure the buffers in L2 are drained.
++ * Performing a sync operation does this.
++ */
++ mov r6, #0x0
++ str r6, [r7, #0x730]
++#endif
++
++ /*
++ * The second dsb might be needed to keep cache sync (device write)
++ * ordering with the memory accesses before it.
++ */
++ dsb
++ isb
++
++ /* Disable Automatic power savings. */
++ ldr r6, [r8, #0x404]
++ orr r6, r6, #0x01
++ str r6, [r8, #0x404]
++
++ /* MMDC0_MDPDC disable power down timer */
++ ldr r6, [r8, #0x4]
++ bic r6, r6, #0xff00
++ str r6, [r8, #0x4]
++
++ /* Delay for a while */
++ ldr r10, =10
++delay1:
++ ldr r7, =0
++cont1:
++ ldr r6, [r8, r7]
++ add r7, r7, #4
++ cmp r7, #16
++ bne cont1
++ sub r10, r10, #1
++ cmp r10, #0
++ bgt delay1
++
++ /* Make the DDR explicitly enter self-refresh. */
++ ldr r6, [r8, #0x404]
++ orr r6, r6, #0x200000
++ str r6, [r8, #0x404]
++
++poll_dvfs_set_1:
++ ldr r6, [r8, #0x404]
++ and r6, r6, #0x2000000
++ cmp r6, #0x2000000
++ bne poll_dvfs_set_1
++
++ /* set SBS step-by-step mode */
++ ldr r6, [r8, #0x410]
++ orr r6, r6, #0x100
++ str r6, [r8, #0x410]
++
++ ldr r10, =100000000
++ cmp r0, r10
++ bgt set_ddr_mu_above_100
++ mmdc_clk_lower_100MHz
++
++set_ddr_mu_above_100:
++ ldr r10, =24000000
++ cmp r0, r10
++ beq set_to_24MHz
++
++ ddr_switch_400MHz
++
++ ldr r10,=100000000
++ cmp r0, r10
++ blt done
++ mmdc_clk_above_100MHz
++
++ b done
++
++set_to_24MHz:
++ mx6sl_switch_to_24MHz
++
++done:
++ /* clear DVFS - exit from self refresh mode */
++ ldr r6, [r8, #0x404]
++ bic r6, r6, #0x200000
++ str r6, [r8, #0x404]
++
++poll_dvfs_clear_1:
++ ldr r6, [r8, #0x404]
++ and r6, r6, #0x2000000
++ cmp r6, #0x2000000
++ beq poll_dvfs_clear_1
++
++ /* Enable Automatic power savings. */
++ ldr r6, [r8, #0x404]
++ bic r6, r6, #0x01
++ str r6, [r8, #0x404]
++
++ ldr r10, =24000000
++ cmp r0, r10
++ beq skip_power_down
++
++ /* Enable MMDC power down timer. */
++ ldr r6, [r8, #0x4]
++ orr r6, r6, #0x5500
++ str r6, [r8, #0x4]
++
++skip_power_down:
++ /* clear SBS - unblock DDR accesses */
++ ldr r6, [r8, #0x410]
++ bic r6, r6, #0x100
++ str r6, [r8, #0x410]
++
++ pop {r4-r10}
++
++ /* Restore registers */
++ mov pc, lr
++
++ .type mx6_lpddr2_do_iram, #object
++ENTRY(mx6_lpddr2_do_iram)
++ .word mx6_lpddr2_freq_change
++ .size mx6_lpddr2_freq_change, . - mx6_lpddr2_freq_change
+diff -Nur linux-3.14.36/arch/arm/mach-imx/mach-imx6q.c linux-openelec/arch/arm/mach-imx/mach-imx6q.c
+--- linux-3.14.36/arch/arm/mach-imx/mach-imx6q.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/mach-imx/mach-imx6q.c 2015-07-24 18:03:30.408842002 -0500
+@@ -1,5 +1,5 @@
+ /*
+- * Copyright 2011-2013 Freescale Semiconductor, Inc.
++ * Copyright 2011-2014 Freescale Semiconductor, Inc.
+ * Copyright 2011 Linaro Ltd.
+ *
+ * The code contained herein is licensed under the GNU General Public
+@@ -15,6 +15,7 @@
+ #include <linux/cpu.h>
+ #include <linux/delay.h>
+ #include <linux/export.h>
++#include <linux/gpio.h>
+ #include <linux/init.h>
+ #include <linux/io.h>
+ #include <linux/irq.h>
+@@ -22,15 +23,19 @@
+ #include <linux/of.h>
+ #include <linux/of_address.h>
+ #include <linux/of_irq.h>
++#include <linux/of_gpio.h>
+ #include <linux/of_platform.h>
+ #include <linux/pm_opp.h>
+ #include <linux/pci.h>
+ #include <linux/phy.h>
++#include <linux/pm_opp.h>
+ #include <linux/reboot.h>
+ #include <linux/regmap.h>
+ #include <linux/micrel_phy.h>
+ #include <linux/mfd/syscon.h>
+ #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
++#include <linux/of_net.h>
++#include <linux/fsl_otp.h>
+ #include <asm/mach/arch.h>
+ #include <asm/mach/map.h>
+ #include <asm/system_misc.h>
+@@ -194,6 +199,87 @@
+
+ }
+
++static void __init imx6q_csi_mux_init(void)
++{
++ /*
++ * MX6Q SabreSD board:
++ * IPU1 CSI0 connects to parallel interface.
++ * Set GPR1 bit 19 to 0x1.
++ *
++ * MX6DL SabreSD board:
++ * IPU1 CSI0 connects to parallel interface.
++ * Set GPR13 bit 0-2 to 0x4.
++ * IPU1 CSI1 connects to MIPI CSI2 virtual channel 1.
++ * Set GPR13 bit 3-5 to 0x1.
++ */
++ struct regmap *gpr;
++
++ gpr = syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr");
++ if (!IS_ERR(gpr)) {
++ if (of_machine_is_compatible("fsl,imx6q-sabresd") ||
++ of_machine_is_compatible("fsl,imx6q-sabreauto"))
++ regmap_update_bits(gpr, IOMUXC_GPR1, 1 << 19, 1 << 19);
++ else if (of_machine_is_compatible("fsl,imx6dl-sabresd") ||
++ of_machine_is_compatible("fsl,imx6dl-sabreauto"))
++ regmap_update_bits(gpr, IOMUXC_GPR13, 0x3F, 0x0C);
++ } else {
++ pr_err("%s(): failed to find fsl,imx6q-iomux-gpr regmap\n",
++ __func__);
++ }
++}
++
++#define OCOTP_MACn(n) (0x00000620 + (n) * 0x10)
++void __init imx6_enet_mac_init(const char *compatible)
++{
++ struct device_node *enet_np;
++ struct property *newmac;
++ u32 macaddr_low, macaddr_high;
++ u8 *macaddr;
++ int ret;
++
++ enet_np = of_find_compatible_node(NULL, NULL, compatible);
++ if (!enet_np)
++ return;
++
++ if (of_get_mac_address(enet_np))
++ goto put_enet_node;
++
++ ret = fsl_otp_readl(OCOTP_MACn(0), &macaddr_high);
++ ret = fsl_otp_readl(OCOTP_MACn(1), &macaddr_low);
++
++ newmac = kzalloc(sizeof(*newmac) + 6, GFP_KERNEL);
++ if (!newmac)
++ goto put_enet_node;
++
++ newmac->value = newmac + 1;
++ newmac->length = 6;
++ newmac->name = kstrdup("local-mac-address", GFP_KERNEL);
++ if (!newmac->name) {
++ kfree(newmac);
++ goto put_enet_node;
++ }
++
++ macaddr = newmac->value;
++ macaddr[5] = macaddr_high & 0xff;
++ macaddr[4] = (macaddr_high >> 8) & 0xff;
++ macaddr[3] = (macaddr_high >> 16) & 0xff;
++ macaddr[2] = (macaddr_high >> 24) & 0xff;
++ macaddr[1] = macaddr_low & 0xff;
++ macaddr[0] = (macaddr_low >> 8) & 0xff;
++
++ of_update_property(enet_np, newmac);
++
++put_enet_node:
++ of_node_put(enet_np);
++}
++
++static inline void imx6q_enet_init(void)
++{
++ imx6_enet_mac_init("fsl,imx6q-fec");
++ imx6q_enet_phy_init();
++ imx6q_1588_init();
++}
++
+ static void __init imx6q_init_machine(void)
+ {
+ struct device *parent;
+@@ -207,45 +293,60 @@
+ if (parent == NULL)
+ pr_warn("failed to initialize soc device\n");
+
+- imx6q_enet_phy_init();
+-
+ of_platform_populate(NULL, of_default_bus_match_table, NULL, parent);
+
++ imx6q_enet_init();
+ imx_anatop_init();
+- imx6q_pm_init();
+- imx6q_1588_init();
++ cpu_is_imx6q() ? imx6q_pm_init() : imx6dl_pm_init();
++ imx6q_csi_mux_init();
+ }
+
+ #define OCOTP_CFG3 0x440
+ #define OCOTP_CFG3_SPEED_SHIFT 16
+ #define OCOTP_CFG3_SPEED_1P2GHZ 0x3
++#define OCOTP_CFG3_SPEED_1GHZ 0x2
++#define OCOTP_CFG3_SPEED_850MHZ 0x1
++#define OCOTP_CFG3_SPEED_800MHZ 0x0
+
+-static void __init imx6q_opp_check_1p2ghz(struct device *cpu_dev)
++static void __init imx6q_opp_check_speed_grading(struct device *cpu_dev)
+ {
+- struct device_node *np;
+- void __iomem *base;
+ u32 val;
++ int ret;
+
+- np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-ocotp");
+- if (!np) {
+- pr_warn("failed to find ocotp node\n");
+- return;
++ ret = fsl_otp_readl(OCOTP_CFG3, &val);
++ if (ret) {
++ pr_warn("failed to read ocotp\n");
++ return;
+ }
+
+- base = of_iomap(np, 0);
+- if (!base) {
+- pr_warn("failed to map ocotp\n");
+- goto put_node;
+- }
++ /*
++ * SPEED_GRADING[1:0] defines the max speed of ARM:
++ * 2b'11: 1200000000Hz; -- i.MX6Q only.
++ * 2b'10: 1000000000Hz;
++ * 2b'01: 850000000Hz; -- i.MX6Q Only, exclusive with 1GHz.
++ * 2b'00: 800000000Hz;
++ * We need to set the max speed of ARM according to fuse map.
++ */
+
+- val = readl_relaxed(base + OCOTP_CFG3);
+ val >>= OCOTP_CFG3_SPEED_SHIFT;
+- if ((val & 0x3) != OCOTP_CFG3_SPEED_1P2GHZ)
+- if (dev_pm_opp_disable(cpu_dev, 1200000000))
+- pr_warn("failed to disable 1.2 GHz OPP\n");
+-
+-put_node:
+- of_node_put(np);
++ if (cpu_is_imx6q()) {
++ if ((val & 0x3) < OCOTP_CFG3_SPEED_1P2GHZ)
++ if (dev_pm_opp_disable(cpu_dev, 1200000000))
++ pr_warn("failed to disable 1.2 GHz OPP\n");
++ }
++ if ((val & 0x3) < OCOTP_CFG3_SPEED_1GHZ)
++ if (dev_pm_opp_disable(cpu_dev, 996000000))
++ pr_warn("failed to disable 1 GHz OPP\n");
++ if (cpu_is_imx6q()) {
++ if ((val & 0x3) < OCOTP_CFG3_SPEED_850MHZ ||
++ (val & 0x3) == OCOTP_CFG3_SPEED_1GHZ)
++ if (dev_pm_opp_disable(cpu_dev, 852000000))
++ pr_warn("failed to disable 850 MHz OPP\n");
++ }
++ if (IS_ENABLED(CONFIG_MX6_VPU_352M)) {
++ if (dev_pm_opp_disable(cpu_dev, 396000000))
++ pr_warn("failed to disable 396MHz OPP\n");
++ }
+ }
+
+ static void __init imx6q_opp_init(void)
+@@ -268,29 +369,70 @@
+ goto put_node;
+ }
+
+- imx6q_opp_check_1p2ghz(cpu_dev);
++ imx6q_opp_check_speed_grading(cpu_dev);
+
+ put_node:
+ of_node_put(np);
+ }
+
++#define ESAI_AUDIO_MCLK 24576000
++
++static void __init imx6q_audio_lvds2_init(void)
++{
++ struct clk *pll4_sel, *lvds2_in, *pll4_audio_div, *esai;
++
++ pll4_audio_div = clk_get_sys(NULL, "pll4_audio_div");
++ pll4_sel = clk_get_sys(NULL, "pll4_sel");
++ lvds2_in = clk_get_sys(NULL, "lvds2_in");
++ esai = clk_get_sys(NULL, "esai");
++ if (IS_ERR(pll4_audio_div) || IS_ERR(pll4_sel) ||
++ IS_ERR(lvds2_in) || IS_ERR(esai))
++ return;
++
++ if (clk_get_rate(lvds2_in) != ESAI_AUDIO_MCLK)
++ return;
++
++ clk_set_parent(pll4_sel, lvds2_in);
++ clk_set_rate(pll4_audio_div, 786432000);
++ clk_set_rate(esai, ESAI_AUDIO_MCLK);
++}
++
+ static struct platform_device imx6q_cpufreq_pdev = {
+- .name = "imx6q-cpufreq",
++ .name = "imx6-cpufreq",
+ };
+
+ static void __init imx6q_init_late(void)
+ {
++ struct regmap *gpr;
++
++ /*
++ * Need to force IOMUXC irq pending to meet CCM low power mode
++ * restriction, this is recommended by hardware team.
++ */
++ gpr = syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr");
++ if (!IS_ERR(gpr))
++ regmap_update_bits(gpr, IOMUXC_GPR1,
++ IMX6Q_GPR1_GINT_MASK,
++ IMX6Q_GPR1_GINT_ASSERT);
++
+ /*
+ * WAIT mode is broken on TO 1.0 and 1.1, so there is no point
+ * to run cpuidle on them.
+ */
+- if (imx_get_soc_revision() > IMX_CHIP_REVISION_1_1)
++ if ((cpu_is_imx6q() && imx_get_soc_revision() > IMX_CHIP_REVISION_1_1)
++ || (cpu_is_imx6dl() && imx_get_soc_revision() >
++ IMX_CHIP_REVISION_1_0))
+ imx6q_cpuidle_init();
+
+- if (IS_ENABLED(CONFIG_ARM_IMX6Q_CPUFREQ)) {
++ if (IS_ENABLED(CONFIG_ARM_IMX6_CPUFREQ)) {
+ imx6q_opp_init();
+ platform_device_register(&imx6q_cpufreq_pdev);
+ }
++
++ if (of_machine_is_compatible("fsl,imx6q-sabreauto")
++ || of_machine_is_compatible("fsl,imx6dl-sabreauto")) {
++ imx6q_audio_lvds2_init();
++ }
+ }
+
+ static void __init imx6q_map_io(void)
+@@ -315,6 +457,12 @@
+ };
+
+ DT_MACHINE_START(IMX6Q, "Freescale i.MX6 Quad/DualLite (Device Tree)")
++ /*
++ * i.MX6Q/DL maps system memory at 0x10000000 (offset 256MiB), and
++ * GPU has a limit on physical address that it accesses, which must
++ * be below 2GiB.
++ */
++ .dma_zone_size = (SZ_2G - SZ_256M),
+ .smp = smp_ops(imx_smp_ops),
+ .map_io = imx6q_map_io,
+ .init_irq = imx6q_init_irq,
+diff -Nur linux-3.14.36/arch/arm/mach-imx/mach-imx6q.c.orig linux-openelec/arch/arm/mach-imx/mach-imx6q.c.orig
+--- linux-3.14.36/arch/arm/mach-imx/mach-imx6q.c.orig 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/arch/arm/mach-imx/mach-imx6q.c.orig 2015-05-06 12:05:43.000000000 -0500
+@@ -0,0 +1,469 @@
++/*
++ * Copyright 2011-2014 Freescale Semiconductor, Inc.
++ * Copyright 2011 Linaro Ltd.
++ *
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++#include <linux/clk.h>
++#include <linux/clkdev.h>
++#include <linux/cpu.h>
++#include <linux/delay.h>
++#include <linux/export.h>
++#include <linux/gpio.h>
++#include <linux/init.h>
++#include <linux/io.h>
++#include <linux/irq.h>
++#include <linux/irqchip.h>
++#include <linux/of.h>
++#include <linux/of_address.h>
++#include <linux/of_irq.h>
++#include <linux/of_gpio.h>
++#include <linux/of_platform.h>
++#include <linux/pm_opp.h>
++#include <linux/pci.h>
++#include <linux/phy.h>
++#include <linux/pm_opp.h>
++#include <linux/reboot.h>
++#include <linux/regmap.h>
++#include <linux/micrel_phy.h>
++#include <linux/mfd/syscon.h>
++#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
++#include <linux/of_net.h>
++#include <linux/fsl_otp.h>
++#include <asm/mach/arch.h>
++#include <asm/mach/map.h>
++#include <asm/system_misc.h>
++
++#include "common.h"
++#include "cpuidle.h"
++#include "hardware.h"
++
++/* For imx6q sabrelite board: set KSZ9021RN RGMII pad skew */
++static int ksz9021rn_phy_fixup(struct phy_device *phydev)
++{
++ if (IS_BUILTIN(CONFIG_PHYLIB)) {
++ /* min rx data delay */
++ phy_write(phydev, MICREL_KSZ9021_EXTREG_CTRL,
++ 0x8000 | MICREL_KSZ9021_RGMII_RX_DATA_PAD_SCEW);
++ phy_write(phydev, MICREL_KSZ9021_EXTREG_DATA_WRITE, 0x0000);
++
++ /* max rx/tx clock delay, min rx/tx control delay */
++ phy_write(phydev, MICREL_KSZ9021_EXTREG_CTRL,
++ 0x8000 | MICREL_KSZ9021_RGMII_CLK_CTRL_PAD_SCEW);
++ phy_write(phydev, MICREL_KSZ9021_EXTREG_DATA_WRITE, 0xf0f0);
++ phy_write(phydev, MICREL_KSZ9021_EXTREG_CTRL,
++ MICREL_KSZ9021_RGMII_CLK_CTRL_PAD_SCEW);
++ }
++
++ return 0;
++}
++
++static void mmd_write_reg(struct phy_device *dev, int device, int reg, int val)
++{
++ phy_write(dev, 0x0d, device);
++ phy_write(dev, 0x0e, reg);
++ phy_write(dev, 0x0d, (1 << 14) | device);
++ phy_write(dev, 0x0e, val);
++}
++
++static int ksz9031rn_phy_fixup(struct phy_device *dev)
++{
++ /*
++ * min rx data delay, max rx/tx clock delay,
++ * min rx/tx control delay
++ */
++ mmd_write_reg(dev, 2, 4, 0);
++ mmd_write_reg(dev, 2, 5, 0);
++ mmd_write_reg(dev, 2, 8, 0x003ff);
++
++ return 0;
++}
++
++/*
++ * fixup for PLX PEX8909 bridge to configure GPIO1-7 as output High
++ * as they are used for slots1-7 PERST#
++ */
++static void ventana_pciesw_early_fixup(struct pci_dev *dev)
++{
++ u32 dw;
++
++ if (!of_machine_is_compatible("gw,ventana"))
++ return;
++
++ if (dev->devfn != 0)
++ return;
++
++ pci_read_config_dword(dev, 0x62c, &dw);
++ dw |= 0xaaa8; // GPIO1-7 outputs
++ pci_write_config_dword(dev, 0x62c, dw);
++
++ pci_read_config_dword(dev, 0x644, &dw);
++ dw |= 0xfe; // GPIO1-7 output high
++ pci_write_config_dword(dev, 0x644, dw);
++
++ msleep(100);
++}
++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_PLX, 0x8609, ventana_pciesw_early_fixup);
++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_PLX, 0x8606, ventana_pciesw_early_fixup);
++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_PLX, 0x8604, ventana_pciesw_early_fixup);
++
++static int ar8031_phy_fixup(struct phy_device *dev)
++{
++ u16 val;
++
++ /* To enable AR8031 output a 125MHz clk from CLK_25M */
++ phy_write(dev, 0xd, 0x7);
++ phy_write(dev, 0xe, 0x8016);
++ phy_write(dev, 0xd, 0x4007);
++
++ val = phy_read(dev, 0xe);
++ val &= 0xffe3;
++ val |= 0x18;
++ phy_write(dev, 0xe, val);
++
++ /* introduce tx clock delay */
++ phy_write(dev, 0x1d, 0x5);
++ val = phy_read(dev, 0x1e);
++ val |= 0x0100;
++ phy_write(dev, 0x1e, val);
++
++ return 0;
++}
++
++#define PHY_ID_AR8031 0x004dd074
++
++static int ar8035_phy_fixup(struct phy_device *dev)
++{
++ u16 val;
++
++ /* Ar803x phy SmartEEE feature cause link status generates glitch,
++ * which cause ethernet link down/up issue, so disable SmartEEE
++ */
++ phy_write(dev, 0xd, 0x3);
++ phy_write(dev, 0xe, 0x805d);
++ phy_write(dev, 0xd, 0x4003);
++
++ val = phy_read(dev, 0xe);
++ phy_write(dev, 0xe, val & ~(1 << 8));
++
++ /*
++ * Enable 125MHz clock from CLK_25M on the AR8031. This
++ * is fed in to the IMX6 on the ENET_REF_CLK (V22) pad.
++ * Also, introduce a tx clock delay.
++ *
++ * This is the same as is the AR8031 fixup.
++ */
++ ar8031_phy_fixup(dev);
++
++ /*check phy power*/
++ val = phy_read(dev, 0x0);
++ if (val & BMCR_PDOWN)
++ phy_write(dev, 0x0, val & ~BMCR_PDOWN);
++
++ return 0;
++}
++
++#define PHY_ID_AR8035 0x004dd072
++
++static void __init imx6q_enet_phy_init(void)
++{
++ if (IS_BUILTIN(CONFIG_PHYLIB)) {
++ phy_register_fixup_for_uid(PHY_ID_KSZ9021, MICREL_PHY_ID_MASK,
++ ksz9021rn_phy_fixup);
++ phy_register_fixup_for_uid(PHY_ID_KSZ9031, MICREL_PHY_ID_MASK,
++ ksz9031rn_phy_fixup);
++ phy_register_fixup_for_uid(PHY_ID_AR8031, 0xffffffff,
++ ar8031_phy_fixup);
++ phy_register_fixup_for_uid(PHY_ID_AR8035, 0xffffffef,
++ ar8035_phy_fixup);
++ }
++}
++
++static void __init imx6q_1588_init(void)
++{
++ struct regmap *gpr;
++
++ gpr = syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr");
++ if (!IS_ERR(gpr))
++ regmap_update_bits(gpr, IOMUXC_GPR1,
++ IMX6Q_GPR1_ENET_CLK_SEL_MASK,
++ IMX6Q_GPR1_ENET_CLK_SEL_ANATOP);
++ else
++ pr_err("failed to find fsl,imx6q-iomux-gpr regmap\n");
++
++}
++
++static void __init imx6q_csi_mux_init(void)
++{
++ /*
++ * MX6Q SabreSD board:
++ * IPU1 CSI0 connects to parallel interface.
++ * Set GPR1 bit 19 to 0x1.
++ *
++ * MX6DL SabreSD board:
++ * IPU1 CSI0 connects to parallel interface.
++ * Set GPR13 bit 0-2 to 0x4.
++ * IPU1 CSI1 connects to MIPI CSI2 virtual channel 1.
++ * Set GPR13 bit 3-5 to 0x1.
++ */
++ struct regmap *gpr;
++
++ gpr = syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr");
++ if (!IS_ERR(gpr)) {
++ if (of_machine_is_compatible("fsl,imx6q-sabresd") ||
++ of_machine_is_compatible("fsl,imx6q-sabreauto"))
++ regmap_update_bits(gpr, IOMUXC_GPR1, 1 << 19, 1 << 19);
++ else if (of_machine_is_compatible("fsl,imx6dl-sabresd") ||
++ of_machine_is_compatible("fsl,imx6dl-sabreauto"))
++ regmap_update_bits(gpr, IOMUXC_GPR13, 0x3F, 0x0C);
++ } else {
++ pr_err("%s(): failed to find fsl,imx6q-iomux-gpr regmap\n",
++ __func__);
++ }
++}
++
++#define OCOTP_MACn(n) (0x00000620 + (n) * 0x10)
++void __init imx6_enet_mac_init(const char *compatible)
++{
++ struct device_node *enet_np;
++ struct property *newmac;
++ u32 macaddr_low, macaddr_high;
++ u8 *macaddr;
++ int ret;
++
++ enet_np = of_find_compatible_node(NULL, NULL, compatible);
++ if (!enet_np)
++ return;
++
++ if (of_get_mac_address(enet_np))
++ goto put_enet_node;
++
++ ret = fsl_otp_readl(OCOTP_MACn(0), &macaddr_high);
++ ret = fsl_otp_readl(OCOTP_MACn(1), &macaddr_low);
++
++ newmac = kzalloc(sizeof(*newmac) + 6, GFP_KERNEL);
++ if (!newmac)
++ goto put_enet_node;
++
++ newmac->value = newmac + 1;
++ newmac->length = 6;
++ newmac->name = kstrdup("local-mac-address", GFP_KERNEL);
++ if (!newmac->name) {
++ kfree(newmac);
++ goto put_enet_node;
++ }
++
++ macaddr = newmac->value;
++ macaddr[5] = macaddr_high & 0xff;
++ macaddr[4] = (macaddr_high >> 8) & 0xff;
++ macaddr[3] = (macaddr_high >> 16) & 0xff;
++ macaddr[2] = (macaddr_high >> 24) & 0xff;
++ macaddr[1] = macaddr_low & 0xff;
++ macaddr[0] = (macaddr_low >> 8) & 0xff;
++
++ of_update_property(enet_np, newmac);
++
++put_enet_node:
++ of_node_put(enet_np);
++}
++
++static inline void imx6q_enet_init(void)
++{
++ imx6_enet_mac_init("fsl,imx6q-fec");
++ imx6q_enet_phy_init();
++ imx6q_1588_init();
++}
++
++static void __init imx6q_init_machine(void)
++{
++ struct device *parent;
++
++ imx_print_silicon_rev(cpu_is_imx6dl() ? "i.MX6DL" : "i.MX6Q",
++ imx_get_soc_revision());
++
++ mxc_arch_reset_init_dt();
++
++ parent = imx_soc_device_init();
++ if (parent == NULL)
++ pr_warn("failed to initialize soc device\n");
++
++ of_platform_populate(NULL, of_default_bus_match_table, NULL, parent);
++
++ imx6q_enet_init();
++ imx_anatop_init();
++ cpu_is_imx6q() ? imx6q_pm_init() : imx6dl_pm_init();
++ imx6q_csi_mux_init();
++}
++
++#define OCOTP_CFG3 0x440
++#define OCOTP_CFG3_SPEED_SHIFT 16
++#define OCOTP_CFG3_SPEED_1P2GHZ 0x3
++#define OCOTP_CFG3_SPEED_1GHZ 0x2
++#define OCOTP_CFG3_SPEED_850MHZ 0x1
++#define OCOTP_CFG3_SPEED_800MHZ 0x0
++
++static void __init imx6q_opp_check_speed_grading(struct device *cpu_dev)
++{
++ u32 val;
++ int ret;
++
++ ret = fsl_otp_readl(OCOTP_CFG3, &val);
++ if (ret) {
++ pr_warn("failed to read ocotp\n");
++ return;
++ }
++
++ /*
++ * SPEED_GRADING[1:0] defines the max speed of ARM:
++ * 2b'11: 1200000000Hz; -- i.MX6Q only.
++ * 2b'10: 1000000000Hz;
++ * 2b'01: 850000000Hz; -- i.MX6Q Only, exclusive with 1GHz.
++ * 2b'00: 800000000Hz;
++ * We need to set the max speed of ARM according to fuse map.
++ */
++
++ val >>= OCOTP_CFG3_SPEED_SHIFT;
++ if (cpu_is_imx6q()) {
++ if ((val & 0x3) < OCOTP_CFG3_SPEED_1P2GHZ)
++ if (dev_pm_opp_disable(cpu_dev, 1200000000))
++ pr_warn("failed to disable 1.2 GHz OPP\n");
++ }
++ if ((val & 0x3) < OCOTP_CFG3_SPEED_1GHZ)
++ if (dev_pm_opp_disable(cpu_dev, 996000000))
++ pr_warn("failed to disable 1 GHz OPP\n");
++ if (cpu_is_imx6q()) {
++ if ((val & 0x3) < OCOTP_CFG3_SPEED_850MHZ ||
++ (val & 0x3) == OCOTP_CFG3_SPEED_1GHZ)
++ if (dev_pm_opp_disable(cpu_dev, 852000000))
++ pr_warn("failed to disable 850 MHz OPP\n");
++ }
++}
++
++static void __init imx6q_opp_init(void)
++{
++ struct device_node *np;
++ struct device *cpu_dev = get_cpu_device(0);
++
++ if (!cpu_dev) {
++ pr_warn("failed to get cpu0 device\n");
++ return;
++ }
++ np = of_node_get(cpu_dev->of_node);
++ if (!np) {
++ pr_warn("failed to find cpu0 node\n");
++ return;
++ }
++
++ if (of_init_opp_table(cpu_dev)) {
++ pr_warn("failed to init OPP table\n");
++ goto put_node;
++ }
++
++ imx6q_opp_check_speed_grading(cpu_dev);
++
++put_node:
++ of_node_put(np);
++}
++
++#define ESAI_AUDIO_MCLK 24576000
++
++static void __init imx6q_audio_lvds2_init(void)
++{
++ struct clk *pll4_sel, *lvds2_in, *pll4_audio_div, *esai;
++
++ pll4_audio_div = clk_get_sys(NULL, "pll4_audio_div");
++ pll4_sel = clk_get_sys(NULL, "pll4_sel");
++ lvds2_in = clk_get_sys(NULL, "lvds2_in");
++ esai = clk_get_sys(NULL, "esai");
++ if (IS_ERR(pll4_audio_div) || IS_ERR(pll4_sel) ||
++ IS_ERR(lvds2_in) || IS_ERR(esai))
++ return;
++
++ if (clk_get_rate(lvds2_in) != ESAI_AUDIO_MCLK)
++ return;
++
++ clk_set_parent(pll4_sel, lvds2_in);
++ clk_set_rate(pll4_audio_div, 786432000);
++ clk_set_rate(esai, ESAI_AUDIO_MCLK);
++}
++
++static struct platform_device imx6q_cpufreq_pdev = {
++ .name = "imx6-cpufreq",
++};
++
++static void __init imx6q_init_late(void)
++{
++ struct regmap *gpr;
++
++ /*
++ * Need to force IOMUXC irq pending to meet CCM low power mode
++ * restriction, this is recommended by hardware team.
++ */
++ gpr = syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr");
++ if (!IS_ERR(gpr))
++ regmap_update_bits(gpr, IOMUXC_GPR1,
++ IMX6Q_GPR1_GINT_MASK,
++ IMX6Q_GPR1_GINT_ASSERT);
++
++ /*
++ * WAIT mode is broken on TO 1.0 and 1.1, so there is no point
++ * to run cpuidle on them.
++ */
++ if ((cpu_is_imx6q() && imx_get_soc_revision() > IMX_CHIP_REVISION_1_1)
++ || (cpu_is_imx6dl() && imx_get_soc_revision() >
++ IMX_CHIP_REVISION_1_0))
++ imx6q_cpuidle_init();
++
++ if (IS_ENABLED(CONFIG_ARM_IMX6_CPUFREQ)) {
++ imx6q_opp_init();
++ platform_device_register(&imx6q_cpufreq_pdev);
++ }
++
++ if (of_machine_is_compatible("fsl,imx6q-sabreauto")
++ || of_machine_is_compatible("fsl,imx6dl-sabreauto")) {
++ imx6q_audio_lvds2_init();
++ }
++}
++
++static void __init imx6q_map_io(void)
++{
++ debug_ll_io_init();
++ imx_scu_map_io();
++}
++
++static void __init imx6q_init_irq(void)
++{
++ imx_init_revision_from_anatop();
++ imx_init_l2cache();
++ imx_src_init();
++ imx_gpc_init();
++ irqchip_init();
++}
++
++static const char *imx6q_dt_compat[] __initconst = {
++ "fsl,imx6dl",
++ "fsl,imx6q",
++ NULL,
++};
++
++DT_MACHINE_START(IMX6Q, "Freescale i.MX6 Quad/DualLite (Device Tree)")
++ /*
++ * i.MX6Q/DL maps system memory at 0x10000000 (offset 256MiB), and
++ * GPU has a limit on physical address that it accesses, which must
++ * be below 2GiB.
++ */
++ .dma_zone_size = (SZ_2G - SZ_256M),
++ .smp = smp_ops(imx_smp_ops),
++ .map_io = imx6q_map_io,
++ .init_irq = imx6q_init_irq,
++ .init_machine = imx6q_init_machine,
++ .init_late = imx6q_init_late,
++ .dt_compat = imx6q_dt_compat,
++ .restart = mxc_restart,
++MACHINE_END
+diff -Nur linux-3.14.36/arch/arm/mach-imx/mach-imx6sl.c linux-openelec/arch/arm/mach-imx/mach-imx6sl.c
+--- linux-3.14.36/arch/arm/mach-imx/mach-imx6sl.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/mach-imx/mach-imx6sl.c 2015-05-06 12:05:43.000000000 -0500
+@@ -17,8 +17,9 @@
+ #include <asm/mach/map.h>
+
+ #include "common.h"
++#include "cpuidle.h"
+
+-static void __init imx6sl_fec_init(void)
++static void __init imx6sl_fec_clk_init(void)
+ {
+ struct regmap *gpr;
+
+@@ -34,8 +35,17 @@
+ }
+ }
+
++static inline void imx6sl_fec_init(void)
++{
++ imx6sl_fec_clk_init();
++ imx6_enet_mac_init("fsl,imx6sl-fec");
++}
++
+ static void __init imx6sl_init_late(void)
+ {
++ /* Init CPUIDLE */
++ imx6sl_cpuidle_init();
++
+ /* imx6sl reuses imx6q cpufreq driver */
+ if (IS_ENABLED(CONFIG_ARM_IMX6Q_CPUFREQ))
+ platform_device_register_simple("imx6q-cpufreq", -1, NULL, 0);
+@@ -55,8 +65,7 @@
+
+ imx6sl_fec_init();
+ imx_anatop_init();
+- /* Reuse imx6q pm code */
+- imx6q_pm_init();
++ imx6sl_pm_init();
+ }
+
+ static void __init imx6sl_init_irq(void)
+diff -Nur linux-3.14.36/arch/arm/mach-imx/mach-vf610.c linux-openelec/arch/arm/mach-imx/mach-vf610.c
+--- linux-3.14.36/arch/arm/mach-imx/mach-vf610.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/mach-imx/mach-vf610.c 2015-05-06 12:05:43.000000000 -0500
+@@ -22,7 +22,7 @@
+
+ static void __init vf610_init_irq(void)
+ {
+- l2x0_of_init(0, ~0UL);
++ l2x0_of_init(0, ~0);
+ irqchip_init();
+ }
+
+diff -Nur linux-3.14.36/arch/arm/mach-imx/Makefile linux-openelec/arch/arm/mach-imx/Makefile
+--- linux-3.14.36/arch/arm/mach-imx/Makefile 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/mach-imx/Makefile 2015-05-06 12:05:43.000000000 -0500
+@@ -30,6 +30,7 @@
+ ifeq ($(CONFIG_CPU_IDLE),y)
+ obj-$(CONFIG_SOC_IMX5) += cpuidle-imx5.o
+ obj-$(CONFIG_SOC_IMX6Q) += cpuidle-imx6q.o
++obj-$(CONFIG_SOC_IMX6SL) += cpuidle-imx6sl.o
+ endif
+
+ ifdef CONFIG_SND_IMX_SOC
+@@ -101,9 +102,18 @@
+ obj-$(CONFIG_SOC_IMX6Q) += clk-imx6q.o mach-imx6q.o
+ obj-$(CONFIG_SOC_IMX6SL) += clk-imx6sl.o mach-imx6sl.o
+
+-obj-$(CONFIG_SOC_IMX6Q) += pm-imx6q.o headsmp.o
+-# i.MX6SL reuses i.MX6Q code
+-obj-$(CONFIG_SOC_IMX6SL) += pm-imx6q.o headsmp.o
++AFLAGS_suspend-imx6.o :=-Wa,-march=armv7-a
++obj-$(CONFIG_PM) += suspend-imx6.o pm-imx6.o headsmp.o
++
++obj-y += busfreq-imx6.o
++ifeq ($(CONFIG_ARM_IMX6_CPUFREQ),y)
++obj-$(CONFIG_SOC_IMX6Q) += ddr3_freq_imx6.o busfreq_ddr3.o
++obj-$(CONFIG_SOC_IMX6SL) += lpddr2_freq_imx6.o busfreq_lpddr2.o
++endif
++ifeq ($(CONFIG_CPU_IDLE), y)
++obj-$(CONFIG_SOC_IMX6SL) += imx6sl_wfi.o
++endif
++
+
+ # i.MX5 based machines
+ obj-$(CONFIG_MACH_MX51_BABBAGE) += mach-mx51_babbage.o
+diff -Nur linux-3.14.36/arch/arm/mach-imx/mx6.h linux-openelec/arch/arm/mach-imx/mx6.h
+--- linux-3.14.36/arch/arm/mach-imx/mx6.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/arch/arm/mach-imx/mx6.h 2015-05-06 12:05:43.000000000 -0500
+@@ -0,0 +1,35 @@
++/*
++ * Copyright 2004-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#ifndef __ASM_ARCH_MXC_IOMAP_H__
++#define __ASM_ARCH_MXC_IOMAP_H__
++
++#define MX6Q_IO_P2V(x) IMX_IO_P2V(x)
++#define MX6Q_IO_ADDRESS(x) IOMEM(MX6Q_IO_P2V(x))
++
++#define MX6Q_L2_BASE_ADDR 0x00a02000
++#define MX6Q_L2_SIZE 0x1000
++#define MX6Q_IOMUXC_BASE_ADDR 0x020e0000
++#define MX6Q_IOMUXC_SIZE 0x4000
++#define MX6Q_SRC_BASE_ADDR 0x020d8000
++#define MX6Q_SRC_SIZE 0x4000
++#define MX6Q_CCM_BASE_ADDR 0x020c4000
++#define MX6Q_CCM_SIZE 0x4000
++#define MX6Q_ANATOP_BASE_ADDR 0x020c8000
++#define MX6Q_ANATOP_SIZE 0x1000
++#define MX6Q_GPC_BASE_ADDR 0x020dc000
++#define MX6Q_GPC_SIZE 0x4000
++#define MX6Q_MMDC_P0_BASE_ADDR 0x021b0000
++#define MX6Q_MMDC_P0_SIZE 0x4000
++#define MX6Q_MMDC_P1_BASE_ADDR 0x021b4000
++#define MX6Q_MMDC_P1_SIZE 0x4000
++
++#define MX6_SUSPEND_IRAM_SIZE 0x1000
++#endif
+diff -Nur linux-3.14.36/arch/arm/mach-imx/mxc.h linux-openelec/arch/arm/mach-imx/mxc.h
+--- linux-3.14.36/arch/arm/mach-imx/mxc.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/mach-imx/mxc.h 2015-05-06 12:05:43.000000000 -0500
+@@ -42,6 +42,8 @@
+ #define IMX_CHIP_REVISION_1_1 0x11
+ #define IMX_CHIP_REVISION_1_2 0x12
+ #define IMX_CHIP_REVISION_1_3 0x13
++#define IMX_CHIP_REVISION_1_4 0x14
++#define IMX_CHIP_REVISION_1_5 0x15
+ #define IMX_CHIP_REVISION_2_0 0x20
+ #define IMX_CHIP_REVISION_2_1 0x21
+ #define IMX_CHIP_REVISION_2_2 0x22
+@@ -177,6 +179,7 @@
+ extern struct cpu_op *(*get_cpu_op)(int *op);
+ #endif
+
++#define cpu_is_imx6() (cpu_is_imx6q() || cpu_is_imx6dl() || cpu_is_imx6sl())
+ #define cpu_is_mx3() (cpu_is_mx31() || cpu_is_mx35())
+ #define cpu_is_mx2() (cpu_is_mx21() || cpu_is_mx27())
+
+diff -Nur linux-3.14.36/arch/arm/mach-imx/pm-imx6.c linux-openelec/arch/arm/mach-imx/pm-imx6.c
+--- linux-3.14.36/arch/arm/mach-imx/pm-imx6.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/arch/arm/mach-imx/pm-imx6.c 2015-05-06 12:05:43.000000000 -0500
+@@ -0,0 +1,580 @@
++/*
++ * Copyright 2011-2014 Freescale Semiconductor, Inc.
++ * Copyright 2011 Linaro Ltd.
++ *
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++#include <linux/delay.h>
++#include <linux/init.h>
++#include <linux/io.h>
++#include <linux/irq.h>
++#include <linux/genalloc.h>
++#include <linux/mfd/syscon.h>
++#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
++#include <linux/of.h>
++#include <linux/of_address.h>
++#include <linux/of_platform.h>
++#include <linux/regmap.h>
++#include <linux/suspend.h>
++#include <linux/regmap.h>
++#include <linux/mfd/syscon.h>
++#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
++#include <asm/cacheflush.h>
++#include <asm/fncpy.h>
++#include <asm/proc-fns.h>
++#include <asm/suspend.h>
++#include <asm/tlb.h>
++
++#include "common.h"
++#include "hardware.h"
++
++#define CCR 0x0
++#define BM_CCR_WB_COUNT (0x7 << 16)
++#define BM_CCR_RBC_BYPASS_COUNT (0x3f << 21)
++#define BM_CCR_RBC_EN (0x1 << 27)
++
++#define CLPCR 0x54
++#define BP_CLPCR_LPM 0
++#define BM_CLPCR_LPM (0x3 << 0)
++#define BM_CLPCR_BYPASS_PMIC_READY (0x1 << 2)
++#define BM_CLPCR_ARM_CLK_DIS_ON_LPM (0x1 << 5)
++#define BM_CLPCR_SBYOS (0x1 << 6)
++#define BM_CLPCR_DIS_REF_OSC (0x1 << 7)
++#define BM_CLPCR_VSTBY (0x1 << 8)
++#define BP_CLPCR_STBY_COUNT 9
++#define BM_CLPCR_STBY_COUNT (0x3 << 9)
++#define BM_CLPCR_COSC_PWRDOWN (0x1 << 11)
++#define BM_CLPCR_WB_PER_AT_LPM (0x1 << 16)
++#define BM_CLPCR_WB_CORE_AT_LPM (0x1 << 17)
++#define BM_CLPCR_BYP_MMDC_CH0_LPM_HS (0x1 << 19)
++#define BM_CLPCR_BYP_MMDC_CH1_LPM_HS (0x1 << 21)
++#define BM_CLPCR_MASK_CORE0_WFI (0x1 << 22)
++#define BM_CLPCR_MASK_CORE1_WFI (0x1 << 23)
++#define BM_CLPCR_MASK_CORE2_WFI (0x1 << 24)
++#define BM_CLPCR_MASK_CORE3_WFI (0x1 << 25)
++#define BM_CLPCR_MASK_SCU_IDLE (0x1 << 26)
++#define BM_CLPCR_MASK_L2CC_IDLE (0x1 << 27)
++
++#define CGPR 0x64
++#define BM_CGPR_INT_MEM_CLK_LPM (0x1 << 17)
++
++#define MX6Q_SUSPEND_OCRAM_SIZE 0x1000
++#define MX6_MAX_MMDC_IO_NUM 33
++
++static void __iomem *ccm_base;
++static void __iomem *suspend_ocram_base;
++static void (*imx6_suspend_in_ocram_fn)(void __iomem *ocram_vbase);
++
++/*
++ * suspend ocram space layout:
++ * ======================== high address ======================
++ * .
++ * .
++ * .
++ * ^
++ * ^
++ * ^
++ * imx6_suspend code
++ * PM_INFO structure(imx6_cpu_pm_info)
++ * ======================== low address =======================
++ */
++
++struct imx6_pm_base {
++ phys_addr_t pbase;
++ void __iomem *vbase;
++};
++
++struct imx6_pm_socdata {
++ u32 cpu_type;
++ const char *mmdc_compat;
++ const char *src_compat;
++ const char *iomuxc_compat;
++ const char *gpc_compat;
++ const u32 mmdc_io_num;
++ const u32 *mmdc_io_offset;
++};
++
++static const u32 imx6q_mmdc_io_offset[] __initconst = {
++ 0x5ac, 0x5b4, 0x528, 0x520, /* DQM0 ~ DQM3 */
++ 0x514, 0x510, 0x5bc, 0x5c4, /* DQM4 ~ DQM7 */
++ 0x56c, 0x578, 0x588, 0x594, /* CAS, RAS, SDCLK_0, SDCLK_1 */
++ 0x5a8, 0x5b0, 0x524, 0x51c, /* SDQS0 ~ SDQS3 */
++ 0x518, 0x50c, 0x5b8, 0x5c0, /* SDQS4 ~ SDQS7 */
++ 0x784, 0x788, 0x794, 0x79c, /* GPR_B0DS ~ GPR_B3DS */
++ 0x7a0, 0x7a4, 0x7a8, 0x748, /* GPR_B4DS ~ GPR_B7DS */
++ 0x59c, 0x5a0, 0x750, 0x774, /* SODT0, SODT1, MODE_CTL, MODE */
++ 0x74c, /* GPR_ADDS */
++};
++
++static const struct imx6_pm_socdata imx6q_pm_data __initconst = {
++ .cpu_type = MXC_CPU_IMX6Q,
++ .mmdc_compat = "fsl,imx6q-mmdc",
++ .src_compat = "fsl,imx6q-src",
++ .iomuxc_compat = "fsl,imx6q-iomuxc",
++ .gpc_compat = "fsl,imx6q-gpc",
++ .mmdc_io_num = ARRAY_SIZE(imx6q_mmdc_io_offset),
++ .mmdc_io_offset = imx6q_mmdc_io_offset,
++};
++
++/*
++ * This structure is for passing necessary data for low level ocram
++ * suspend code(arch/arm/mach-imx/suspend-imx6.S), if this struct
++ * definition is changed, the offset definition in
++ * arch/arm/mach-imx/suspend-imx6.S must be also changed accordingly,
++ * otherwise, the suspend to ocram function will be broken!
++ */
++struct imx6_cpu_pm_info {
++ phys_addr_t pbase; /* The physical address of pm_info. */
++ phys_addr_t resume_addr; /* The physical resume address for asm code */
++ u32 cpu_type;
++ u32 pm_info_size; /* Size of pm_info. */
++ struct imx6_pm_base mmdc_base;
++ struct imx6_pm_base src_base;
++ struct imx6_pm_base iomuxc_base;
++ struct imx6_pm_base ccm_base;
++ struct imx6_pm_base gpc_base;
++ struct imx6_pm_base l2_base;
++ u32 mmdc_io_num; /* Number of MMDC IOs which need saved/restored. */
++ u32 mmdc_io_val[MX6_MAX_MMDC_IO_NUM][2]; /* To save offset and value */
++} __aligned(8);
++
++void imx6q_set_cache_lpm_in_wait(bool enable)
++{
++ if ((cpu_is_imx6q() && imx_get_soc_revision() >
++ IMX_CHIP_REVISION_1_1) ||
++ (cpu_is_imx6dl() && imx_get_soc_revision() >
++ IMX_CHIP_REVISION_1_0)) {
++ u32 val;
++
++ val = readl_relaxed(ccm_base + CGPR);
++ if (enable)
++ val |= BM_CGPR_INT_MEM_CLK_LPM;
++ else
++ val &= ~BM_CGPR_INT_MEM_CLK_LPM;
++ writel_relaxed(val, ccm_base + CGPR);
++ }
++}
++
++static void imx6q_enable_rbc(bool enable)
++{
++ u32 val;
++
++ /*
++ * need to mask all interrupts in GPC before
++ * operating RBC configurations
++ */
++ imx_gpc_mask_all();
++
++ /* configure RBC enable bit */
++ val = readl_relaxed(ccm_base + CCR);
++ val &= ~BM_CCR_RBC_EN;
++ val |= enable ? BM_CCR_RBC_EN : 0;
++ writel_relaxed(val, ccm_base + CCR);
++
++ /* configure RBC count */
++ val = readl_relaxed(ccm_base + CCR);
++ val &= ~BM_CCR_RBC_BYPASS_COUNT;
++ val |= enable ? BM_CCR_RBC_BYPASS_COUNT : 0;
++ writel(val, ccm_base + CCR);
++
++ /*
++ * need to delay at least 2 cycles of CKIL(32K)
++ * due to hardware design requirement, which is
++ * ~61us, here we use 65us for safe
++ */
++ udelay(65);
++
++ /* restore GPC interrupt mask settings */
++ imx_gpc_restore_all();
++}
++
++static void imx6q_enable_wb(bool enable)
++{
++ u32 val;
++
++ /* configure well bias enable bit */
++ val = readl_relaxed(ccm_base + CLPCR);
++ val &= ~BM_CLPCR_WB_PER_AT_LPM;
++ val |= enable ? BM_CLPCR_WB_PER_AT_LPM : 0;
++ writel_relaxed(val, ccm_base + CLPCR);
++
++ /* configure well bias count */
++ val = readl_relaxed(ccm_base + CCR);
++ val &= ~BM_CCR_WB_COUNT;
++ val |= enable ? BM_CCR_WB_COUNT : 0;
++ writel_relaxed(val, ccm_base + CCR);
++}
++
++int imx6q_set_lpm(enum mxc_cpu_pwr_mode mode)
++{
++ struct irq_desc *iomuxc_irq_desc;
++ u32 val = readl_relaxed(ccm_base + CLPCR);
++
++ val &= ~BM_CLPCR_LPM;
++ switch (mode) {
++ case WAIT_CLOCKED:
++ break;
++ case WAIT_UNCLOCKED:
++ val |= 0x1 << BP_CLPCR_LPM;
++ val |= BM_CLPCR_ARM_CLK_DIS_ON_LPM;
++ val &= ~BM_CLPCR_VSTBY;
++ val &= ~BM_CLPCR_SBYOS;
++ if (cpu_is_imx6sl())
++ val |= BM_CLPCR_BYP_MMDC_CH0_LPM_HS;
++ else
++ val |= BM_CLPCR_BYP_MMDC_CH1_LPM_HS;
++ break;
++ case STOP_POWER_ON:
++ val |= 0x2 << BP_CLPCR_LPM;
++ val &= ~BM_CLPCR_VSTBY;
++ val &= ~BM_CLPCR_SBYOS;
++ val |= BM_CLPCR_BYP_MMDC_CH1_LPM_HS;
++ break;
++ case WAIT_UNCLOCKED_POWER_OFF:
++ val |= 0x1 << BP_CLPCR_LPM;
++ val &= ~BM_CLPCR_VSTBY;
++ val &= ~BM_CLPCR_SBYOS;
++ break;
++ case STOP_POWER_OFF:
++ val |= 0x2 << BP_CLPCR_LPM;
++ val |= 0x3 << BP_CLPCR_STBY_COUNT;
++ val |= BM_CLPCR_VSTBY;
++ val |= BM_CLPCR_SBYOS;
++ if (cpu_is_imx6sl()) {
++ val |= BM_CLPCR_BYPASS_PMIC_READY;
++ val |= BM_CLPCR_BYP_MMDC_CH0_LPM_HS;
++ } else {
++ val |= BM_CLPCR_BYP_MMDC_CH1_LPM_HS;
++ }
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ /*
++ * ERR007265: CCM: When improper low-power sequence is used,
++ * the SoC enters low power mode before the ARM core executes WFI.
++ *
++ * Software workaround:
++ * 1) Software should trigger IRQ #32 (IOMUX) to be always pending
++ * by setting IOMUX_GPR1_GINT.
++ * 2) Software should then unmask IRQ #32 in GPC before setting CCM
++ * Low-Power mode.
++ * 3) Software should mask IRQ #32 right after CCM Low-Power mode
++ * is set (set bits 0-1 of CCM_CLPCR).
++ */
++ iomuxc_irq_desc = irq_to_desc(32);
++ imx_gpc_irq_unmask(&iomuxc_irq_desc->irq_data);
++ writel_relaxed(val, ccm_base + CLPCR);
++ imx_gpc_irq_mask(&iomuxc_irq_desc->irq_data);
++
++ return 0;
++}
++
++static int imx6q_suspend_finish(unsigned long val)
++{
++ if (!imx6_suspend_in_ocram_fn) {
++ cpu_do_idle();
++ } else {
++ /*
++ * call low level suspend function in ocram,
++ * as we need to float DDR IO.
++ */
++ local_flush_tlb_all();
++ imx6_suspend_in_ocram_fn(suspend_ocram_base);
++ }
++
++ return 0;
++}
++
++static int imx6q_pm_enter(suspend_state_t state)
++{
++ struct regmap *g;
++
++ /*
++ * L2 can exit by 'reset' or Inband beacon (from remote EP)
++ * toggling phy_powerdown has same effect as 'inband beacon'
++ * So, toggle bit18 of GPR1, used as a workaround of errata
++ * "PCIe PCIe does not support L2 Power Down"
++ */
++ if (IS_ENABLED(CONFIG_PCI_IMX6)) {
++ g = syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr");
++ if (IS_ERR(g)) {
++ pr_err("failed to find fsl,imx6q-iomux-gpr regmap\n");
++ return PTR_ERR(g);
++ }
++ regmap_update_bits(g, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_TEST_PD,
++ IMX6Q_GPR1_PCIE_TEST_PD);
++ }
++
++ switch (state) {
++ case PM_SUSPEND_STANDBY:
++ imx6q_set_lpm(STOP_POWER_ON);
++ imx6q_set_cache_lpm_in_wait(true);
++ imx_gpc_pre_suspend(false);
++ if (cpu_is_imx6sl())
++ imx6sl_set_wait_clk(true);
++ /* Zzz ... */
++ cpu_do_idle();
++ if (cpu_is_imx6sl())
++ imx6sl_set_wait_clk(false);
++ imx_gpc_post_resume();
++ imx6q_set_lpm(WAIT_CLOCKED);
++ break;
++ case PM_SUSPEND_MEM:
++ imx6q_set_cache_lpm_in_wait(false);
++ imx6q_set_lpm(STOP_POWER_OFF);
++ imx6q_enable_wb(true);
++ /*
++ * For suspend into ocram, asm code already take care of
++ * RBC setting, so we do NOT need to do that here.
++ */
++ if (!imx6_suspend_in_ocram_fn)
++ imx6q_enable_rbc(true);
++ imx_gpc_pre_suspend(true);
++ imx_anatop_pre_suspend();
++ imx_set_cpu_jump(0, v7_cpu_resume);
++ /* Zzz ... */
++ cpu_suspend(0, imx6q_suspend_finish);
++ if (cpu_is_imx6q() || cpu_is_imx6dl())
++ imx_smp_prepare();
++ imx_anatop_post_resume();
++ imx_gpc_post_resume();
++ imx6q_enable_wb(false);
++ imx6q_set_lpm(WAIT_CLOCKED);
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ /*
++ * L2 can exit by 'reset' or Inband beacon (from remote EP)
++ * toggling phy_powerdown has same effect as 'inband beacon'
++ * So, toggle bit18 of GPR1, used as a workaround of errata
++ * "PCIe PCIe does not support L2 Power Down"
++ */
++ if (IS_ENABLED(CONFIG_PCI_IMX6)) {
++ regmap_update_bits(g, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_TEST_PD,
++ !IMX6Q_GPR1_PCIE_TEST_PD);
++ }
++
++ return 0;
++}
++
++static int imx6q_pm_valid(suspend_state_t state)
++{
++ return (state == PM_SUSPEND_STANDBY || state == PM_SUSPEND_MEM);
++}
++
++static const struct platform_suspend_ops imx6q_pm_ops = {
++ .enter = imx6q_pm_enter,
++ .valid = imx6q_pm_valid,
++};
++
++void __init imx6q_pm_set_ccm_base(void __iomem *base)
++{
++ ccm_base = base;
++}
++
++static int __init imx6_pm_get_base(struct imx6_pm_base *base,
++ const char *compat)
++{
++ struct device_node *node;
++ struct resource res;
++ int ret = 0;
++
++ node = of_find_compatible_node(NULL, NULL, compat);
++ if (!node) {
++ ret = -ENODEV;
++ goto out;
++ }
++
++ ret = of_address_to_resource(node, 0, &res);
++ if (ret)
++ goto put_node;
++
++ base->pbase = res.start;
++ base->vbase = ioremap(res.start, resource_size(&res));
++ if (!base->vbase)
++ ret = -ENOMEM;
++
++put_node:
++ of_node_put(node);
++out:
++ return ret;
++}
++
++static int __init imx6q_ocram_suspend_init(const struct imx6_pm_socdata
++ *socdata)
++{
++ phys_addr_t ocram_pbase;
++ struct device_node *node;
++ struct platform_device *pdev;
++ struct imx6_cpu_pm_info *pm_info;
++ struct gen_pool *ocram_pool;
++ unsigned long ocram_base;
++ int i, ret = 0;
++ const u32 *mmdc_offset_array;
++
++ if (!socdata) {
++ pr_warn("%s: invalid argument!\n", __func__);
++ return -EINVAL;
++ }
++
++ node = of_find_compatible_node(NULL, NULL, "mmio-sram");
++ if (!node) {
++ pr_warn("%s: failed to find ocram node!\n", __func__);
++ return -ENODEV;
++ }
++
++ pdev = of_find_device_by_node(node);
++ if (!pdev) {
++ pr_warn("%s: failed to find ocram device!\n", __func__);
++ ret = -ENODEV;
++ goto put_node;
++ }
++
++ ocram_pool = dev_get_gen_pool(&pdev->dev);
++ if (!ocram_pool) {
++ pr_warn("%s: ocram pool unavailable!\n", __func__);
++ ret = -ENODEV;
++ goto put_node;
++ }
++
++ ocram_base = gen_pool_alloc(ocram_pool, MX6Q_SUSPEND_OCRAM_SIZE);
++ if (!ocram_base) {
++ pr_warn("%s: unable to alloc ocram!\n", __func__);
++ ret = -ENOMEM;
++ goto put_node;
++ }
++
++ ocram_pbase = gen_pool_virt_to_phys(ocram_pool, ocram_base);
++
++ suspend_ocram_base = __arm_ioremap_exec(ocram_pbase,
++ MX6Q_SUSPEND_OCRAM_SIZE, false);
++
++ pm_info = suspend_ocram_base;
++ pm_info->pbase = ocram_pbase;
++ pm_info->resume_addr = virt_to_phys(v7_cpu_resume);
++ pm_info->pm_info_size = sizeof(*pm_info);
++
++ /*
++ * ccm physical address is not used by asm code currently,
++ * so get ccm virtual address directly, as we already have
++ * it from ccm driver.
++ */
++ pm_info->ccm_base.vbase = ccm_base;
++
++ ret = imx6_pm_get_base(&pm_info->mmdc_base, socdata->mmdc_compat);
++ if (ret) {
++ pr_warn("%s: failed to get mmdc base %d!\n", __func__, ret);
++ goto put_node;
++ }
++
++ ret = imx6_pm_get_base(&pm_info->src_base, socdata->src_compat);
++ if (ret) {
++ pr_warn("%s: failed to get src base %d!\n", __func__, ret);
++ goto src_map_failed;
++ }
++
++ ret = imx6_pm_get_base(&pm_info->iomuxc_base, socdata->iomuxc_compat);
++ if (ret) {
++ pr_warn("%s: failed to get iomuxc base %d!\n", __func__, ret);
++ goto iomuxc_map_failed;
++ }
++
++ ret = imx6_pm_get_base(&pm_info->gpc_base, socdata->gpc_compat);
++ if (ret) {
++ pr_warn("%s: failed to get gpc base %d!\n", __func__, ret);
++ goto gpc_map_failed;
++ }
++
++ ret = imx6_pm_get_base(&pm_info->l2_base, "arm,pl310-cache");
++ if (ret) {
++ pr_warn("%s: failed to get pl310-cache base %d!\n",
++ __func__, ret);
++ goto pl310_cache_map_failed;
++ }
++
++ pm_info->cpu_type = socdata->cpu_type;
++ pm_info->mmdc_io_num = socdata->mmdc_io_num;
++ mmdc_offset_array = socdata->mmdc_io_offset;
++
++ for (i = 0; i < pm_info->mmdc_io_num; i++) {
++ pm_info->mmdc_io_val[i][0] =
++ mmdc_offset_array[i];
++ pm_info->mmdc_io_val[i][1] =
++ readl_relaxed(pm_info->iomuxc_base.vbase +
++ mmdc_offset_array[i]);
++ }
++
++ imx6_suspend_in_ocram_fn = fncpy(
++ suspend_ocram_base + sizeof(*pm_info),
++ &imx6_suspend,
++ MX6Q_SUSPEND_OCRAM_SIZE - sizeof(*pm_info));
++
++ goto put_node;
++
++pl310_cache_map_failed:
++ iounmap(&pm_info->gpc_base.vbase);
++gpc_map_failed:
++ iounmap(&pm_info->iomuxc_base.vbase);
++iomuxc_map_failed:
++ iounmap(&pm_info->src_base.vbase);
++src_map_failed:
++ iounmap(&pm_info->mmdc_base.vbase);
++put_node:
++ of_node_put(node);
++
++ return ret;
++}
++
++static void __init imx6_pm_common_init(const struct imx6_pm_socdata
++ *socdata)
++{
++ struct regmap *gpr;
++ int ret;
++
++ WARN_ON(!ccm_base);
++
++ ret = imx6q_ocram_suspend_init(socdata);
++ if (ret)
++ pr_warn("%s: failed to initialize ocram suspend %d!\n",
++ __func__, ret);
++
++ /*
++ * This is for SW workaround step #1 of ERR007265, see comments
++ * in imx6q_set_lpm for details of this errata.
++ * Force IOMUXC irq pending, so that the interrupt to GPC can be
++ * used to deassert dsm_request signal when the signal gets
++ * asserted unexpectedly.
++ */
++ gpr = syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr");
++ if (!IS_ERR(gpr))
++ regmap_update_bits(gpr, IOMUXC_GPR1, IMX6Q_GPR1_GINT_MASK,
++ IMX6Q_GPR1_GINT_MASK);
++
++
++ suspend_set_ops(&imx6q_pm_ops);
++}
++
++void __init imx6q_pm_init(void)
++{
++ imx6_pm_common_init(&imx6q_pm_data);
++}
++
++void __init imx6dl_pm_init(void)
++{
++ imx6_pm_common_init(NULL);
++}
++
++void __init imx6sl_pm_init(void)
++{
++ imx6_pm_common_init(NULL);
++}
+diff -Nur linux-3.14.36/arch/arm/mach-imx/pm-imx6q.c linux-openelec/arch/arm/mach-imx/pm-imx6q.c
+--- linux-3.14.36/arch/arm/mach-imx/pm-imx6q.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/mach-imx/pm-imx6q.c 1969-12-31 18:00:00.000000000 -0600
+@@ -1,241 +0,0 @@
+-/*
+- * Copyright 2011-2013 Freescale Semiconductor, Inc.
+- * Copyright 2011 Linaro Ltd.
+- *
+- * The code contained herein is licensed under the GNU General Public
+- * License. You may obtain a copy of the GNU General Public License
+- * Version 2 or later at the following locations:
+- *
+- * http://www.opensource.org/licenses/gpl-license.html
+- * http://www.gnu.org/copyleft/gpl.html
+- */
+-
+-#include <linux/delay.h>
+-#include <linux/init.h>
+-#include <linux/io.h>
+-#include <linux/irq.h>
+-#include <linux/mfd/syscon.h>
+-#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
+-#include <linux/of.h>
+-#include <linux/of_address.h>
+-#include <linux/regmap.h>
+-#include <linux/suspend.h>
+-#include <asm/cacheflush.h>
+-#include <asm/proc-fns.h>
+-#include <asm/suspend.h>
+-#include <asm/hardware/cache-l2x0.h>
+-
+-#include "common.h"
+-#include "hardware.h"
+-
+-#define CCR 0x0
+-#define BM_CCR_WB_COUNT (0x7 << 16)
+-#define BM_CCR_RBC_BYPASS_COUNT (0x3f << 21)
+-#define BM_CCR_RBC_EN (0x1 << 27)
+-
+-#define CLPCR 0x54
+-#define BP_CLPCR_LPM 0
+-#define BM_CLPCR_LPM (0x3 << 0)
+-#define BM_CLPCR_BYPASS_PMIC_READY (0x1 << 2)
+-#define BM_CLPCR_ARM_CLK_DIS_ON_LPM (0x1 << 5)
+-#define BM_CLPCR_SBYOS (0x1 << 6)
+-#define BM_CLPCR_DIS_REF_OSC (0x1 << 7)
+-#define BM_CLPCR_VSTBY (0x1 << 8)
+-#define BP_CLPCR_STBY_COUNT 9
+-#define BM_CLPCR_STBY_COUNT (0x3 << 9)
+-#define BM_CLPCR_COSC_PWRDOWN (0x1 << 11)
+-#define BM_CLPCR_WB_PER_AT_LPM (0x1 << 16)
+-#define BM_CLPCR_WB_CORE_AT_LPM (0x1 << 17)
+-#define BM_CLPCR_BYP_MMDC_CH0_LPM_HS (0x1 << 19)
+-#define BM_CLPCR_BYP_MMDC_CH1_LPM_HS (0x1 << 21)
+-#define BM_CLPCR_MASK_CORE0_WFI (0x1 << 22)
+-#define BM_CLPCR_MASK_CORE1_WFI (0x1 << 23)
+-#define BM_CLPCR_MASK_CORE2_WFI (0x1 << 24)
+-#define BM_CLPCR_MASK_CORE3_WFI (0x1 << 25)
+-#define BM_CLPCR_MASK_SCU_IDLE (0x1 << 26)
+-#define BM_CLPCR_MASK_L2CC_IDLE (0x1 << 27)
+-
+-#define CGPR 0x64
+-#define BM_CGPR_CHICKEN_BIT (0x1 << 17)
+-
+-static void __iomem *ccm_base;
+-
+-void imx6q_set_chicken_bit(void)
+-{
+- u32 val = readl_relaxed(ccm_base + CGPR);
+-
+- val |= BM_CGPR_CHICKEN_BIT;
+- writel_relaxed(val, ccm_base + CGPR);
+-}
+-
+-static void imx6q_enable_rbc(bool enable)
+-{
+- u32 val;
+-
+- /*
+- * need to mask all interrupts in GPC before
+- * operating RBC configurations
+- */
+- imx_gpc_mask_all();
+-
+- /* configure RBC enable bit */
+- val = readl_relaxed(ccm_base + CCR);
+- val &= ~BM_CCR_RBC_EN;
+- val |= enable ? BM_CCR_RBC_EN : 0;
+- writel_relaxed(val, ccm_base + CCR);
+-
+- /* configure RBC count */
+- val = readl_relaxed(ccm_base + CCR);
+- val &= ~BM_CCR_RBC_BYPASS_COUNT;
+- val |= enable ? BM_CCR_RBC_BYPASS_COUNT : 0;
+- writel(val, ccm_base + CCR);
+-
+- /*
+- * need to delay at least 2 cycles of CKIL(32K)
+- * due to hardware design requirement, which is
+- * ~61us, here we use 65us for safe
+- */
+- udelay(65);
+-
+- /* restore GPC interrupt mask settings */
+- imx_gpc_restore_all();
+-}
+-
+-static void imx6q_enable_wb(bool enable)
+-{
+- u32 val;
+-
+- /* configure well bias enable bit */
+- val = readl_relaxed(ccm_base + CLPCR);
+- val &= ~BM_CLPCR_WB_PER_AT_LPM;
+- val |= enable ? BM_CLPCR_WB_PER_AT_LPM : 0;
+- writel_relaxed(val, ccm_base + CLPCR);
+-
+- /* configure well bias count */
+- val = readl_relaxed(ccm_base + CCR);
+- val &= ~BM_CCR_WB_COUNT;
+- val |= enable ? BM_CCR_WB_COUNT : 0;
+- writel_relaxed(val, ccm_base + CCR);
+-}
+-
+-int imx6q_set_lpm(enum mxc_cpu_pwr_mode mode)
+-{
+- struct irq_desc *iomuxc_irq_desc;
+- u32 val = readl_relaxed(ccm_base + CLPCR);
+-
+- val &= ~BM_CLPCR_LPM;
+- switch (mode) {
+- case WAIT_CLOCKED:
+- break;
+- case WAIT_UNCLOCKED:
+- val |= 0x1 << BP_CLPCR_LPM;
+- val |= BM_CLPCR_ARM_CLK_DIS_ON_LPM;
+- break;
+- case STOP_POWER_ON:
+- val |= 0x2 << BP_CLPCR_LPM;
+- break;
+- case WAIT_UNCLOCKED_POWER_OFF:
+- val |= 0x1 << BP_CLPCR_LPM;
+- val &= ~BM_CLPCR_VSTBY;
+- val &= ~BM_CLPCR_SBYOS;
+- break;
+- case STOP_POWER_OFF:
+- val |= 0x2 << BP_CLPCR_LPM;
+- val |= 0x3 << BP_CLPCR_STBY_COUNT;
+- val |= BM_CLPCR_VSTBY;
+- val |= BM_CLPCR_SBYOS;
+- if (cpu_is_imx6sl()) {
+- val |= BM_CLPCR_BYPASS_PMIC_READY;
+- val |= BM_CLPCR_BYP_MMDC_CH0_LPM_HS;
+- } else {
+- val |= BM_CLPCR_BYP_MMDC_CH1_LPM_HS;
+- }
+- break;
+- default:
+- return -EINVAL;
+- }
+-
+- /*
+- * ERR007265: CCM: When improper low-power sequence is used,
+- * the SoC enters low power mode before the ARM core executes WFI.
+- *
+- * Software workaround:
+- * 1) Software should trigger IRQ #32 (IOMUX) to be always pending
+- * by setting IOMUX_GPR1_GINT.
+- * 2) Software should then unmask IRQ #32 in GPC before setting CCM
+- * Low-Power mode.
+- * 3) Software should mask IRQ #32 right after CCM Low-Power mode
+- * is set (set bits 0-1 of CCM_CLPCR).
+- */
+- iomuxc_irq_desc = irq_to_desc(32);
+- imx_gpc_irq_unmask(&iomuxc_irq_desc->irq_data);
+- writel_relaxed(val, ccm_base + CLPCR);
+- imx_gpc_irq_mask(&iomuxc_irq_desc->irq_data);
+-
+- return 0;
+-}
+-
+-static int imx6q_suspend_finish(unsigned long val)
+-{
+- cpu_do_idle();
+- return 0;
+-}
+-
+-static int imx6q_pm_enter(suspend_state_t state)
+-{
+- switch (state) {
+- case PM_SUSPEND_MEM:
+- imx6q_set_lpm(STOP_POWER_OFF);
+- imx6q_enable_wb(true);
+- imx6q_enable_rbc(true);
+- imx_gpc_pre_suspend();
+- imx_anatop_pre_suspend();
+- imx_set_cpu_jump(0, v7_cpu_resume);
+- /* Zzz ... */
+- cpu_suspend(0, imx6q_suspend_finish);
+- if (cpu_is_imx6q() || cpu_is_imx6dl())
+- imx_smp_prepare();
+- imx_anatop_post_resume();
+- imx_gpc_post_resume();
+- imx6q_enable_rbc(false);
+- imx6q_enable_wb(false);
+- imx6q_set_lpm(WAIT_CLOCKED);
+- break;
+- default:
+- return -EINVAL;
+- }
+-
+- return 0;
+-}
+-
+-static const struct platform_suspend_ops imx6q_pm_ops = {
+- .enter = imx6q_pm_enter,
+- .valid = suspend_valid_only_mem,
+-};
+-
+-void __init imx6q_pm_set_ccm_base(void __iomem *base)
+-{
+- ccm_base = base;
+-}
+-
+-void __init imx6q_pm_init(void)
+-{
+- struct regmap *gpr;
+-
+- WARN_ON(!ccm_base);
+-
+- /*
+- * This is for SW workaround step #1 of ERR007265, see comments
+- * in imx6q_set_lpm for details of this errata.
+- * Force IOMUXC irq pending, so that the interrupt to GPC can be
+- * used to deassert dsm_request signal when the signal gets
+- * asserted unexpectedly.
+- */
+- gpr = syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr");
+- if (!IS_ERR(gpr))
+- regmap_update_bits(gpr, IOMUXC_GPR1, IMX6Q_GPR1_GINT,
+- IMX6Q_GPR1_GINT);
+-
+-
+- suspend_set_ops(&imx6q_pm_ops);
+-}
+diff -Nur linux-3.14.36/arch/arm/mach-imx/suspend-imx6.S linux-openelec/arch/arm/mach-imx/suspend-imx6.S
+--- linux-3.14.36/arch/arm/mach-imx/suspend-imx6.S 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/arch/arm/mach-imx/suspend-imx6.S 2015-05-06 12:05:43.000000000 -0500
+@@ -0,0 +1,306 @@
++/*
++ * Copyright 2014 Freescale Semiconductor, Inc.
++ *
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++#include <linux/linkage.h>
++#include <asm/asm-offsets.h>
++#include <asm/hardware/cache-l2x0.h>
++#include "hardware.h"
++
++/*
++ * ==================== low level suspend ====================
++ *
++ * Better to follow below rules to use ARM registers:
++ * r0: pm_info structure address;
++ * r1 ~ r4: for saving pm_info members;
++ * r5 ~ r10: free registers;
++ * r11: io base address.
++ *
++ * suspend ocram space layout:
++ * ======================== high address ======================
++ * .
++ * .
++ * .
++ * ^
++ * ^
++ * ^
++ * imx6_suspend code
++ * PM_INFO structure(imx6_cpu_pm_info)
++ * ======================== low address =======================
++ */
++
++/*
++ * Below offsets are based on struct imx6_cpu_pm_info
++ * which defined in arch/arm/mach-imx/pm-imx6q.c, this
++ * structure contains necessary pm info for low level
++ * suspend related code.
++ */
++#define PM_INFO_PBASE_OFFSET 0x0
++#define PM_INFO_RESUME_ADDR_OFFSET 0x4
++#define PM_INFO_CPU_TYPE_OFFSET 0x8
++#define PM_INFO_PM_INFO_SIZE_OFFSET 0xC
++#define PM_INFO_MX6Q_MMDC_P_OFFSET 0x10
++#define PM_INFO_MX6Q_MMDC_V_OFFSET 0x14
++#define PM_INFO_MX6Q_SRC_P_OFFSET 0x18
++#define PM_INFO_MX6Q_SRC_V_OFFSET 0x1C
++#define PM_INFO_MX6Q_IOMUXC_P_OFFSET 0x20
++#define PM_INFO_MX6Q_IOMUXC_V_OFFSET 0x24
++#define PM_INFO_MX6Q_CCM_P_OFFSET 0x28
++#define PM_INFO_MX6Q_CCM_V_OFFSET 0x2C
++#define PM_INFO_MX6Q_GPC_P_OFFSET 0x30
++#define PM_INFO_MX6Q_GPC_V_OFFSET 0x34
++#define PM_INFO_MX6Q_L2_P_OFFSET 0x38
++#define PM_INFO_MX6Q_L2_V_OFFSET 0x3C
++#define PM_INFO_MMDC_IO_NUM_OFFSET 0x40
++#define PM_INFO_MMDC_IO_VAL_OFFSET 0x44
++
++#define MX6Q_SRC_GPR1 0x20
++#define MX6Q_SRC_GPR2 0x24
++#define MX6Q_MMDC_MAPSR 0x404
++#define MX6Q_GPC_IMR1 0x08
++#define MX6Q_GPC_IMR2 0x0c
++#define MX6Q_GPC_IMR3 0x10
++#define MX6Q_GPC_IMR4 0x14
++#define MX6Q_CCM_CCR 0x0
++
++ .align 3
++
++ .macro sync_l2_cache
++
++ /* sync L2 cache to drain L2's buffers to DRAM. */
++#ifdef CONFIG_CACHE_L2X0
++ ldr r11, [r0, #PM_INFO_MX6Q_L2_V_OFFSET]
++ mov r6, #0x0
++ str r6, [r11, #L2X0_CACHE_SYNC]
++1:
++ ldr r6, [r11, #L2X0_CACHE_SYNC]
++ ands r6, r6, #0x1
++ bne 1b
++#endif
++
++ .endm
++
++ .macro resume_mmdc
++
++ /* restore MMDC IO */
++ cmp r5, #0x0
++ ldreq r11, [r0, #PM_INFO_MX6Q_IOMUXC_V_OFFSET]
++ ldrne r11, [r0, #PM_INFO_MX6Q_IOMUXC_P_OFFSET]
++
++ ldr r6, [r0, #PM_INFO_MMDC_IO_NUM_OFFSET]
++ ldr r7, =PM_INFO_MMDC_IO_VAL_OFFSET
++ add r7, r7, r0
++1:
++ ldr r8, [r7], #0x4
++ ldr r9, [r7], #0x4
++ str r9, [r11, r8]
++ subs r6, r6, #0x1
++ bne 1b
++
++ cmp r5, #0x0
++ ldreq r11, [r0, #PM_INFO_MX6Q_MMDC_V_OFFSET]
++ ldrne r11, [r0, #PM_INFO_MX6Q_MMDC_P_OFFSET]
++
++ /* let DDR out of self-refresh */
++ ldr r7, [r11, #MX6Q_MMDC_MAPSR]
++ bic r7, r7, #(1 << 21)
++ str r7, [r11, #MX6Q_MMDC_MAPSR]
++2:
++ ldr r7, [r11, #MX6Q_MMDC_MAPSR]
++ ands r7, r7, #(1 << 25)
++ bne 2b
++
++ /* enable DDR auto power saving */
++ ldr r7, [r11, #MX6Q_MMDC_MAPSR]
++ bic r7, r7, #0x1
++ str r7, [r11, #MX6Q_MMDC_MAPSR]
++
++ .endm
++
++ENTRY(imx6_suspend)
++ ldr r1, [r0, #PM_INFO_PBASE_OFFSET]
++ ldr r2, [r0, #PM_INFO_RESUME_ADDR_OFFSET]
++ ldr r3, [r0, #PM_INFO_CPU_TYPE_OFFSET]
++ ldr r4, [r0, #PM_INFO_PM_INFO_SIZE_OFFSET]
++
++ /*
++ * counting the resume address in iram
++ * to set it in SRC register.
++ */
++ ldr r6, =imx6_suspend
++ ldr r7, =resume
++ sub r7, r7, r6
++ add r8, r1, r4
++ add r9, r8, r7
++
++ /*
++ * make sure TLB contain the addr we want,
++ * as we will access them after MMDC IO floated.
++ */
++
++ ldr r11, [r0, #PM_INFO_MX6Q_CCM_V_OFFSET]
++ ldr r6, [r11, #0x0]
++ ldr r11, [r0, #PM_INFO_MX6Q_GPC_V_OFFSET]
++ ldr r6, [r11, #0x0]
++ ldr r11, [r0, #PM_INFO_MX6Q_IOMUXC_V_OFFSET]
++ ldr r6, [r11, #0x0]
++
++ /* use r11 to store the IO address */
++ ldr r11, [r0, #PM_INFO_MX6Q_SRC_V_OFFSET]
++ /* store physical resume addr and pm_info address. */
++ str r9, [r11, #MX6Q_SRC_GPR1]
++ str r1, [r11, #MX6Q_SRC_GPR2]
++
++ /* need to sync L2 cache before DSM. */
++ sync_l2_cache
++
++ ldr r11, [r0, #PM_INFO_MX6Q_MMDC_V_OFFSET]
++ /*
++ * put DDR explicitly into self-refresh and
++ * disable automatic power savings.
++ */
++ ldr r7, [r11, #MX6Q_MMDC_MAPSR]
++ orr r7, r7, #0x1
++ str r7, [r11, #MX6Q_MMDC_MAPSR]
++
++ /* make the DDR explicitly enter self-refresh. */
++ ldr r7, [r11, #MX6Q_MMDC_MAPSR]
++ orr r7, r7, #(1 << 21)
++ str r7, [r11, #MX6Q_MMDC_MAPSR]
++
++poll_dvfs_set:
++ ldr r7, [r11, #MX6Q_MMDC_MAPSR]
++ ands r7, r7, #(1 << 25)
++ beq poll_dvfs_set
++
++ ldr r11, [r0, #PM_INFO_MX6Q_IOMUXC_V_OFFSET]
++ ldr r6, =0x0
++ ldr r7, [r0, #PM_INFO_MMDC_IO_NUM_OFFSET]
++ ldr r8, =PM_INFO_MMDC_IO_VAL_OFFSET
++ add r8, r8, r0
++set_mmdc_io_lpm:
++ ldr r9, [r8], #0x8
++ str r6, [r11, r9]
++ subs r7, r7, #0x1
++ bne set_mmdc_io_lpm
++
++ /*
++ * mask all GPC interrupts before
++ * enabling the RBC counters to
++ * avoid the counter starting too
++ * early if an interupt is already
++ * pending.
++ */
++ ldr r11, [r0, #PM_INFO_MX6Q_GPC_V_OFFSET]
++ ldr r6, [r11, #MX6Q_GPC_IMR1]
++ ldr r7, [r11, #MX6Q_GPC_IMR2]
++ ldr r8, [r11, #MX6Q_GPC_IMR3]
++ ldr r9, [r11, #MX6Q_GPC_IMR4]
++
++ ldr r10, =0xffffffff
++ str r10, [r11, #MX6Q_GPC_IMR1]
++ str r10, [r11, #MX6Q_GPC_IMR2]
++ str r10, [r11, #MX6Q_GPC_IMR3]
++ str r10, [r11, #MX6Q_GPC_IMR4]
++
++ /*
++ * enable the RBC bypass counter here
++ * to hold off the interrupts. RBC counter
++ * = 32 (1ms), Minimum RBC delay should be
++ * 400us for the analog LDOs to power down.
++ */
++ ldr r11, [r0, #PM_INFO_MX6Q_CCM_V_OFFSET]
++ ldr r10, [r11, #MX6Q_CCM_CCR]
++ bic r10, r10, #(0x3f << 21)
++ orr r10, r10, #(0x20 << 21)
++ str r10, [r11, #MX6Q_CCM_CCR]
++
++ /* enable the counter. */
++ ldr r10, [r11, #MX6Q_CCM_CCR]
++ orr r10, r10, #(0x1 << 27)
++ str r10, [r11, #MX6Q_CCM_CCR]
++
++ /* unmask all the GPC interrupts. */
++ ldr r11, [r0, #PM_INFO_MX6Q_GPC_V_OFFSET]
++ str r6, [r11, #MX6Q_GPC_IMR1]
++ str r7, [r11, #MX6Q_GPC_IMR2]
++ str r8, [r11, #MX6Q_GPC_IMR3]
++ str r9, [r11, #MX6Q_GPC_IMR4]
++
++ /*
++ * now delay for a short while (3usec)
++ * ARM is at 1GHz at this point
++ * so a short loop should be enough.
++ * this delay is required to ensure that
++ * the RBC counter can start counting in
++ * case an interrupt is already pending
++ * or in case an interrupt arrives just
++ * as ARM is about to assert DSM_request.
++ */
++ ldr r6, =2000
++rbc_loop:
++ subs r6, r6, #0x1
++ bne rbc_loop
++
++ /* Zzz, enter stop mode */
++ wfi
++ nop
++ nop
++ nop
++ nop
++
++ /*
++ * run to here means there is pending
++ * wakeup source, system should auto
++ * resume, we need to restore MMDC IO first
++ */
++ mov r5, #0x0
++ resume_mmdc
++
++ /* return to suspend finish */
++ mov pc, lr
++
++resume:
++ /* invalidate L1 I-cache first */
++ mov r6, #0x0
++ mcr p15, 0, r6, c7, c5, 0
++ mcr p15, 0, r6, c7, c5, 6
++ /* enable the Icache and branch prediction */
++ mov r6, #0x1800
++ mcr p15, 0, r6, c1, c0, 0
++ isb
++
++ /* get physical resume address from pm_info. */
++ ldr lr, [r0, #PM_INFO_RESUME_ADDR_OFFSET]
++ /* clear core0's entry and parameter */
++ ldr r11, [r0, #PM_INFO_MX6Q_SRC_P_OFFSET]
++ mov r7, #0x0
++ str r7, [r11, #MX6Q_SRC_GPR1]
++ str r7, [r11, #MX6Q_SRC_GPR2]
++
++ mov r5, #0x1
++ resume_mmdc
++
++ mov pc, lr
++ENDPROC(imx6_suspend)
++
++/*
++ * The following code must assume it is running from physical address
++ * where absolute virtual addresses to the data section have to be
++ * turned into relative ones.
++ */
++
++ENTRY(v7_cpu_resume)
++ bl v7_invalidate_l1
++#ifdef CONFIG_CACHE_L2X0
++ bl l2c310_early_resume
++#endif
++ b cpu_resume
++ENDPROC(v7_cpu_resume)
+diff -Nur linux-3.14.36/arch/arm/mach-imx/system.c linux-openelec/arch/arm/mach-imx/system.c
+--- linux-3.14.36/arch/arm/mach-imx/system.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/mach-imx/system.c 2015-05-06 12:05:43.000000000 -0500
+@@ -34,6 +34,7 @@
+
+ static void __iomem *wdog_base;
+ static struct clk *wdog_clk;
++static u32 wdog_source = 1; /* use WDOG1 default */
+
+ /*
+ * Reset the system. It is called by machine_restart().
+@@ -47,6 +48,15 @@
+
+ if (cpu_is_mx1())
+ wcr_enable = (1 << 0);
++ /*
++ * Some i.MX6 boards use WDOG2 to reset external pmic in bypass mode,
++ * so do WDOG2 reset here. Do not set SRS, since we will
++ * trigger external POR later. Use WDOG1 to reset in ldo-enable
++ * mode. You can set it by "fsl,wdog-reset" in dts.
++ */
++ else if (wdog_source == 2 && (cpu_is_imx6q() || cpu_is_imx6dl() ||
++ cpu_is_imx6sl()))
++ wcr_enable = 0x14;
+ else
+ wcr_enable = (1 << 2);
+
+@@ -90,12 +100,29 @@
+
+ void __init mxc_arch_reset_init_dt(void)
+ {
+- struct device_node *np;
++ struct device_node *np = NULL;
++
++ if (cpu_is_imx6q() || cpu_is_imx6dl())
++ np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-gpc");
++ else if (cpu_is_imx6sl())
++ np = of_find_compatible_node(NULL, NULL, "fsl,imx6sl-gpc");
++
++ if (np)
++ of_property_read_u32(np, "fsl,wdog-reset", &wdog_source);
++ pr_info("Use WDOG%d as reset source\n", wdog_source);
+
+ np = of_find_compatible_node(NULL, NULL, "fsl,imx21-wdt");
+ wdog_base = of_iomap(np, 0);
+ WARN_ON(!wdog_base);
+
++ /* Some i.MX6 boards use WDOG2 to reset board in ldo-bypass mode */
++ if (wdog_source == 2 && (cpu_is_imx6q() || cpu_is_imx6dl() ||
++ cpu_is_imx6sl())) {
++ np = of_find_compatible_node(np, NULL, "fsl,imx21-wdt");
++ wdog_base = of_iomap(np, 0);
++ WARN_ON(!wdog_base);
++ }
++
+ wdog_clk = of_clk_get(np, 0);
+ if (IS_ERR(wdog_clk)) {
+ pr_warn("%s: failed to get wdog clock\n", __func__);
+@@ -124,7 +151,7 @@
+ }
+
+ /* Configure the L2 PREFETCH and POWER registers */
+- val = readl_relaxed(l2x0_base + L2X0_PREFETCH_CTRL);
++ val = readl_relaxed(l2x0_base + L310_PREFETCH_CTRL);
+ val |= 0x70800000;
+ /*
+ * The L2 cache controller(PL310) version on the i.MX6D/Q is r3p1-50rel0
+@@ -137,14 +164,12 @@
+ */
+ if (cpu_is_imx6q())
+ val &= ~(1 << 30 | 1 << 23);
+- writel_relaxed(val, l2x0_base + L2X0_PREFETCH_CTRL);
+- val = L2X0_DYNAMIC_CLK_GATING_EN | L2X0_STNDBY_MODE_EN;
+- writel_relaxed(val, l2x0_base + L2X0_POWER_CTRL);
++ writel_relaxed(val, l2x0_base + L310_PREFETCH_CTRL);
+
+ iounmap(l2x0_base);
+ of_node_put(np);
+
+ out:
+- l2x0_of_init(0, ~0UL);
++ l2x0_of_init(0, ~0);
+ }
+ #endif
+diff -Nur linux-3.14.36/arch/arm/mach-imx/time.c linux-openelec/arch/arm/mach-imx/time.c
+--- linux-3.14.36/arch/arm/mach-imx/time.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/mach-imx/time.c 2015-05-06 12:05:43.000000000 -0500
+@@ -60,7 +60,11 @@
+ #define V2_TCTL_WAITEN (1 << 3) /* Wait enable mode */
+ #define V2_TCTL_CLK_IPG (1 << 6)
+ #define V2_TCTL_CLK_PER (2 << 6)
++#define V2_TCTL_CLK_OSC_DIV8 (5 << 6)
++#define V2_TCTL_CLK_OSC (7 << 6)
++#define V2_TCTL_24MEN (1 << 10)
+ #define V2_TCTL_FRR (1 << 9)
++#define V2_TPRER_PRE24M 12
+ #define V2_IR 0x0c
+ #define V2_TSTAT 0x08
+ #define V2_TSTAT_OF1 (1 << 0)
+@@ -277,11 +281,20 @@
+
+ void __init mxc_timer_init(void __iomem *base, int irq)
+ {
+- uint32_t tctl_val;
++ uint32_t tctl_val, tprer_val;
+ struct clk *timer_clk;
+ struct clk *timer_ipg_clk;
+
+- timer_clk = clk_get_sys("imx-gpt.0", "per");
++ /*
++ * gpt clk source from 24M OSC on imx6q > TO1.0 and
++ * imx6dl, others from per clk.
++ */
++ if ((cpu_is_imx6q() && imx_get_soc_revision() > IMX_CHIP_REVISION_1_0)
++ || cpu_is_imx6dl())
++ timer_clk = clk_get_sys("imx-gpt.0", "gpt_3m");
++ else
++ timer_clk = clk_get_sys("imx-gpt.0", "per");
++
+ if (IS_ERR(timer_clk)) {
+ pr_err("i.MX timer: unable to get clk\n");
+ return;
+@@ -302,10 +315,24 @@
+ __raw_writel(0, timer_base + MXC_TCTL);
+ __raw_writel(0, timer_base + MXC_TPRER); /* see datasheet note */
+
+- if (timer_is_v2())
+- tctl_val = V2_TCTL_CLK_PER | V2_TCTL_FRR | V2_TCTL_WAITEN | MXC_TCTL_TEN;
+- else
++ if (timer_is_v2()) {
++ if ((cpu_is_imx6q() && imx_get_soc_revision() >
++ IMX_CHIP_REVISION_1_0) || cpu_is_imx6dl()) {
++ tctl_val = V2_TCTL_CLK_OSC_DIV8 | V2_TCTL_FRR |
++ V2_TCTL_WAITEN | MXC_TCTL_TEN;
++ if (cpu_is_imx6dl()) {
++ /* 24 / 8 = 3 MHz */
++ tprer_val = 7 << V2_TPRER_PRE24M;
++ __raw_writel(tprer_val, timer_base + MXC_TPRER);
++ tctl_val |= V2_TCTL_24MEN;
++ }
++ } else {
++ tctl_val = V2_TCTL_CLK_PER | V2_TCTL_FRR |
++ V2_TCTL_WAITEN | MXC_TCTL_TEN;
++ }
++ } else {
+ tctl_val = MX1_2_TCTL_FRR | MX1_2_TCTL_CLK_PCLK1 | MXC_TCTL_TEN;
++ }
+
+ __raw_writel(tctl_val, timer_base + MXC_TCTL);
+
+diff -Nur linux-3.14.36/arch/arm/mach-nomadik/cpu-8815.c linux-openelec/arch/arm/mach-nomadik/cpu-8815.c
+--- linux-3.14.36/arch/arm/mach-nomadik/cpu-8815.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/mach-nomadik/cpu-8815.c 2015-05-06 12:05:43.000000000 -0500
+@@ -147,7 +147,7 @@
+ {
+ #ifdef CONFIG_CACHE_L2X0
+ /* At full speed latency must be >=2, so 0x249 in low bits */
+- l2x0_of_init(0x00730249, 0xfe000fff);
++ l2x0_of_init(0x00700249, 0xfe0fefff);
+ #endif
+ of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
+ }
+diff -Nur linux-3.14.36/arch/arm/mach-omap2/common.h linux-openelec/arch/arm/mach-omap2/common.h
+--- linux-3.14.36/arch/arm/mach-omap2/common.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/mach-omap2/common.h 2015-05-06 12:05:43.000000000 -0500
+@@ -91,6 +91,7 @@
+ extern void omap3_secure_sync32k_timer_init(void);
+ extern void omap3_gptimer_timer_init(void);
+ extern void omap4_local_timer_init(void);
++int omap_l2_cache_init(void);
+ extern void omap5_realtime_timer_init(void);
+
+ void omap2420_init_early(void);
+diff -Nur linux-3.14.36/arch/arm/mach-omap2/io.c linux-openelec/arch/arm/mach-omap2/io.c
+--- linux-3.14.36/arch/arm/mach-omap2/io.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/mach-omap2/io.c 2015-05-06 12:05:43.000000000 -0500
+@@ -608,6 +608,7 @@
+ am43xx_clockdomains_init();
+ am43xx_hwmod_init();
+ omap_hwmod_init_postsetup();
++ omap_l2_cache_init();
+ omap_clk_soc_init = am43xx_dt_clk_init;
+ }
+
+@@ -639,6 +640,7 @@
+ omap44xx_clockdomains_init();
+ omap44xx_hwmod_init();
+ omap_hwmod_init_postsetup();
++ omap_l2_cache_init();
+ omap_clk_soc_init = omap4xxx_dt_clk_init;
+ }
+
+diff -Nur linux-3.14.36/arch/arm/mach-omap2/Kconfig linux-openelec/arch/arm/mach-omap2/Kconfig
+--- linux-3.14.36/arch/arm/mach-omap2/Kconfig 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/mach-omap2/Kconfig 2015-05-06 12:05:43.000000000 -0500
+@@ -78,6 +78,7 @@
+ select MULTI_IRQ_HANDLER
+ select ARM_GIC
+ select MACH_OMAP_GENERIC
++ select MIGHT_HAVE_CACHE_L2X0
+
+ config SOC_DRA7XX
+ bool "TI DRA7XX"
+diff -Nur linux-3.14.36/arch/arm/mach-omap2/omap4-common.c linux-openelec/arch/arm/mach-omap2/omap4-common.c
+--- linux-3.14.36/arch/arm/mach-omap2/omap4-common.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/mach-omap2/omap4-common.c 2015-05-06 12:05:43.000000000 -0500
+@@ -166,75 +166,57 @@
+ return l2cache_base;
+ }
+
+-static void omap4_l2x0_disable(void)
++static void omap4_l2c310_write_sec(unsigned long val, unsigned reg)
+ {
+- outer_flush_all();
+- /* Disable PL310 L2 Cache controller */
+- omap_smc1(0x102, 0x0);
+-}
++ unsigned smc_op;
+
+-static void omap4_l2x0_set_debug(unsigned long val)
+-{
+- /* Program PL310 L2 Cache controller debug register */
+- omap_smc1(0x100, val);
++ switch (reg) {
++ case L2X0_CTRL:
++ smc_op = OMAP4_MON_L2X0_CTRL_INDEX;
++ break;
++
++ case L2X0_AUX_CTRL:
++ smc_op = OMAP4_MON_L2X0_AUXCTRL_INDEX;
++ break;
++
++ case L2X0_DEBUG_CTRL:
++ smc_op = OMAP4_MON_L2X0_DBG_CTRL_INDEX;
++ break;
++
++ case L310_PREFETCH_CTRL:
++ smc_op = OMAP4_MON_L2X0_PREFETCH_INDEX;
++ break;
++
++ default:
++ WARN_ONCE(1, "OMAP L2C310: ignoring write to reg 0x%x\n", reg);
++ return;
++ }
++
++ omap_smc1(smc_op, val);
+ }
+
+-static int __init omap_l2_cache_init(void)
++int __init omap_l2_cache_init(void)
+ {
+- u32 aux_ctrl = 0;
+-
+- /*
+- * To avoid code running on other OMAPs in
+- * multi-omap builds
+- */
+- if (!cpu_is_omap44xx())
+- return -ENODEV;
++ u32 aux_ctrl;
+
+ /* Static mapping, never released */
+ l2cache_base = ioremap(OMAP44XX_L2CACHE_BASE, SZ_4K);
+ if (WARN_ON(!l2cache_base))
+ return -ENOMEM;
+
+- /*
+- * 16-way associativity, parity disabled
+- * Way size - 32KB (es1.0)
+- * Way size - 64KB (es2.0 +)
+- */
+- aux_ctrl = ((1 << L2X0_AUX_CTRL_ASSOCIATIVITY_SHIFT) |
+- (0x1 << 25) |
+- (0x1 << L2X0_AUX_CTRL_NS_LOCKDOWN_SHIFT) |
+- (0x1 << L2X0_AUX_CTRL_NS_INT_CTRL_SHIFT));
+-
+- if (omap_rev() == OMAP4430_REV_ES1_0) {
+- aux_ctrl |= 0x2 << L2X0_AUX_CTRL_WAY_SIZE_SHIFT;
+- } else {
+- aux_ctrl |= ((0x3 << L2X0_AUX_CTRL_WAY_SIZE_SHIFT) |
+- (1 << L2X0_AUX_CTRL_SHARE_OVERRIDE_SHIFT) |
+- (1 << L2X0_AUX_CTRL_DATA_PREFETCH_SHIFT) |
+- (1 << L2X0_AUX_CTRL_INSTR_PREFETCH_SHIFT) |
+- (1 << L2X0_AUX_CTRL_EARLY_BRESP_SHIFT));
+- }
+- if (omap_rev() != OMAP4430_REV_ES1_0)
+- omap_smc1(0x109, aux_ctrl);
+-
+- /* Enable PL310 L2 Cache controller */
+- omap_smc1(0x102, 0x1);
++ /* 16-way associativity, parity disabled, way size - 64KB (es2.0 +) */
++ aux_ctrl = L2C_AUX_CTRL_SHARED_OVERRIDE |
++ L310_AUX_CTRL_DATA_PREFETCH |
++ L310_AUX_CTRL_INSTR_PREFETCH;
+
++ outer_cache.write_sec = omap4_l2c310_write_sec;
+ if (of_have_populated_dt())
+- l2x0_of_init(aux_ctrl, L2X0_AUX_CTRL_MASK);
++ l2x0_of_init(aux_ctrl, 0xcf9fffff);
+ else
+- l2x0_init(l2cache_base, aux_ctrl, L2X0_AUX_CTRL_MASK);
+-
+- /*
+- * Override default outer_cache.disable with a OMAP4
+- * specific one
+- */
+- outer_cache.disable = omap4_l2x0_disable;
+- outer_cache.set_debug = omap4_l2x0_set_debug;
++ l2x0_init(l2cache_base, aux_ctrl, 0xcf9fffff);
+
+ return 0;
+ }
+-omap_early_initcall(omap_l2_cache_init);
+ #endif
+
+ void __iomem *omap4_get_sar_ram_base(void)
+diff -Nur linux-3.14.36/arch/arm/mach-omap2/omap-mpuss-lowpower.c linux-openelec/arch/arm/mach-omap2/omap-mpuss-lowpower.c
+--- linux-3.14.36/arch/arm/mach-omap2/omap-mpuss-lowpower.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/mach-omap2/omap-mpuss-lowpower.c 2015-05-06 12:05:43.000000000 -0500
+@@ -187,19 +187,15 @@
+ * in every restore MPUSS OFF path.
+ */
+ #ifdef CONFIG_CACHE_L2X0
+-static void save_l2x0_context(void)
++static void __init save_l2x0_context(void)
+ {
+- u32 val;
+- void __iomem *l2x0_base = omap4_get_l2cache_base();
+- if (l2x0_base) {
+- val = __raw_readl(l2x0_base + L2X0_AUX_CTRL);
+- __raw_writel(val, sar_base + L2X0_AUXCTRL_OFFSET);
+- val = __raw_readl(l2x0_base + L2X0_PREFETCH_CTRL);
+- __raw_writel(val, sar_base + L2X0_PREFETCH_CTRL_OFFSET);
+- }
++ __raw_writel(l2x0_saved_regs.aux_ctrl,
++ sar_base + L2X0_AUXCTRL_OFFSET);
++ __raw_writel(l2x0_saved_regs.prefetch_ctrl,
++ sar_base + L2X0_PREFETCH_CTRL_OFFSET);
+ }
+ #else
+-static void save_l2x0_context(void)
++static void __init save_l2x0_context(void)
+ {}
+ #endif
+
+diff -Nur linux-3.14.36/arch/arm/mach-prima2/l2x0.c linux-openelec/arch/arm/mach-prima2/l2x0.c
+--- linux-3.14.36/arch/arm/mach-prima2/l2x0.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/mach-prima2/l2x0.c 2015-05-06 12:05:43.000000000 -0500
+@@ -8,43 +8,10 @@
+
+ #include <linux/init.h>
+ #include <linux/kernel.h>
+-#include <linux/of.h>
+ #include <asm/hardware/cache-l2x0.h>
+
+-struct l2x0_aux
+-{
+- u32 val;
+- u32 mask;
+-};
+-
+-static struct l2x0_aux prima2_l2x0_aux __initconst = {
+- .val = 2 << L2X0_AUX_CTRL_WAY_SIZE_SHIFT,
+- .mask = 0,
+-};
+-
+-static struct l2x0_aux marco_l2x0_aux __initconst = {
+- .val = (2 << L2X0_AUX_CTRL_WAY_SIZE_SHIFT) |
+- (1 << L2X0_AUX_CTRL_ASSOCIATIVITY_SHIFT),
+- .mask = L2X0_AUX_CTRL_MASK,
+-};
+-
+-static struct of_device_id sirf_l2x0_ids[] __initconst = {
+- { .compatible = "sirf,prima2-pl310-cache", .data = &prima2_l2x0_aux, },
+- { .compatible = "sirf,marco-pl310-cache", .data = &marco_l2x0_aux, },
+- {},
+-};
+-
+ static int __init sirfsoc_l2x0_init(void)
+ {
+- struct device_node *np;
+- const struct l2x0_aux *aux;
+-
+- np = of_find_matching_node(NULL, sirf_l2x0_ids);
+- if (np) {
+- aux = of_match_node(sirf_l2x0_ids, np)->data;
+- return l2x0_of_init(aux->val, aux->mask);
+- }
+-
+- return 0;
++ return l2x0_of_init(0, ~0);
+ }
+ early_initcall(sirfsoc_l2x0_init);
+diff -Nur linux-3.14.36/arch/arm/mach-prima2/pm.c linux-openelec/arch/arm/mach-prima2/pm.c
+--- linux-3.14.36/arch/arm/mach-prima2/pm.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/mach-prima2/pm.c 2015-05-06 12:05:43.000000000 -0500
+@@ -71,7 +71,6 @@
+ case PM_SUSPEND_MEM:
+ sirfsoc_pre_suspend_power_off();
+
+- outer_flush_all();
+ outer_disable();
+ /* go zzz */
+ cpu_suspend(0, sirfsoc_finish_suspend);
+diff -Nur linux-3.14.36/arch/arm/mach-realview/realview_eb.c linux-openelec/arch/arm/mach-realview/realview_eb.c
+--- linux-3.14.36/arch/arm/mach-realview/realview_eb.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/mach-realview/realview_eb.c 2015-05-06 12:05:43.000000000 -0500
+@@ -442,8 +442,13 @@
+ realview_eb11mp_fixup();
+
+ #ifdef CONFIG_CACHE_L2X0
+- /* 1MB (128KB/way), 8-way associativity, evmon/parity/share enabled
+- * Bits: .... ...0 0111 1001 0000 .... .... .... */
++ /*
++ * The PL220 needs to be manually configured as the hardware
++ * doesn't report the correct sizes.
++ * 1MB (128KB/way), 8-way associativity, event monitor and
++ * parity enabled, ignore share bit, no force write allocate
++ * Bits: .... ...0 0111 1001 0000 .... .... ....
++ */
+ l2x0_init(__io_address(REALVIEW_EB11MP_L220_BASE), 0x00790000, 0xfe000fff);
+ #endif
+ platform_device_register(&pmu_device);
+diff -Nur linux-3.14.36/arch/arm/mach-realview/realview_pb1176.c linux-openelec/arch/arm/mach-realview/realview_pb1176.c
+--- linux-3.14.36/arch/arm/mach-realview/realview_pb1176.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/mach-realview/realview_pb1176.c 2015-05-06 12:05:43.000000000 -0500
+@@ -355,7 +355,13 @@
+ int i;
+
+ #ifdef CONFIG_CACHE_L2X0
+- /* 128Kb (16Kb/way) 8-way associativity. evmon/parity/share enabled. */
++ /*
++ * The PL220 needs to be manually configured as the hardware
++ * doesn't report the correct sizes.
++ * 128kB (16kB/way), 8-way associativity, event monitor and
++ * parity enabled, ignore share bit, no force write allocate
++ * Bits: .... ...0 0111 0011 0000 .... .... ....
++ */
+ l2x0_init(__io_address(REALVIEW_PB1176_L220_BASE), 0x00730000, 0xfe000fff);
+ #endif
+
+diff -Nur linux-3.14.36/arch/arm/mach-realview/realview_pb11mp.c linux-openelec/arch/arm/mach-realview/realview_pb11mp.c
+--- linux-3.14.36/arch/arm/mach-realview/realview_pb11mp.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/mach-realview/realview_pb11mp.c 2015-05-06 12:05:43.000000000 -0500
+@@ -337,8 +337,13 @@
+ int i;
+
+ #ifdef CONFIG_CACHE_L2X0
+- /* 1MB (128KB/way), 8-way associativity, evmon/parity/share enabled
+- * Bits: .... ...0 0111 1001 0000 .... .... .... */
++ /*
++ * The PL220 needs to be manually configured as the hardware
++ * doesn't report the correct sizes.
++ * 1MB (128KB/way), 8-way associativity, event monitor and
++ * parity enabled, ignore share bit, no force write allocate
++ * Bits: .... ...0 0111 1001 0000 .... .... ....
++ */
+ l2x0_init(__io_address(REALVIEW_TC11MP_L220_BASE), 0x00790000, 0xfe000fff);
+ #endif
+
+diff -Nur linux-3.14.36/arch/arm/mach-realview/realview_pbx.c linux-openelec/arch/arm/mach-realview/realview_pbx.c
+--- linux-3.14.36/arch/arm/mach-realview/realview_pbx.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/mach-realview/realview_pbx.c 2015-05-06 12:05:43.000000000 -0500
+@@ -370,8 +370,8 @@
+ __io_address(REALVIEW_PBX_TILE_L220_BASE);
+
+ /* set RAM latencies to 1 cycle for eASIC */
+- writel(0, l2x0_base + L2X0_TAG_LATENCY_CTRL);
+- writel(0, l2x0_base + L2X0_DATA_LATENCY_CTRL);
++ writel(0, l2x0_base + L310_TAG_LATENCY_CTRL);
++ writel(0, l2x0_base + L310_DATA_LATENCY_CTRL);
+
+ /* 16KB way size, 8-way associativity, parity disabled
+ * Bits: .. 0 0 0 0 1 00 1 0 1 001 0 000 0 .... .... .... */
+diff -Nur linux-3.14.36/arch/arm/mach-rockchip/rockchip.c linux-openelec/arch/arm/mach-rockchip/rockchip.c
+--- linux-3.14.36/arch/arm/mach-rockchip/rockchip.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/mach-rockchip/rockchip.c 2015-05-06 12:05:43.000000000 -0500
+@@ -25,7 +25,7 @@
+
+ static void __init rockchip_dt_init(void)
+ {
+- l2x0_of_init(0, ~0UL);
++ l2x0_of_init(0, ~0);
+ of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
+ }
+
+diff -Nur linux-3.14.36/arch/arm/mach-shmobile/board-armadillo800eva.c linux-openelec/arch/arm/mach-shmobile/board-armadillo800eva.c
+--- linux-3.14.36/arch/arm/mach-shmobile/board-armadillo800eva.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/mach-shmobile/board-armadillo800eva.c 2015-05-06 12:05:43.000000000 -0500
+@@ -1270,8 +1270,8 @@
+
+
+ #ifdef CONFIG_CACHE_L2X0
+- /* Early BRESP enable, Shared attribute override enable, 32K*8way */
+- l2x0_init(IOMEM(0xf0002000), 0x40440000, 0x82000fff);
++ /* Shared attribute override enable, 32K*8way */
++ l2x0_init(IOMEM(0xf0002000), 0x00400000, 0xc20f0fff);
+ #endif
+
+ i2c_register_board_info(0, i2c0_devices, ARRAY_SIZE(i2c0_devices));
+diff -Nur linux-3.14.36/arch/arm/mach-shmobile/board-armadillo800eva-reference.c linux-openelec/arch/arm/mach-shmobile/board-armadillo800eva-reference.c
+--- linux-3.14.36/arch/arm/mach-shmobile/board-armadillo800eva-reference.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/mach-shmobile/board-armadillo800eva-reference.c 2015-05-06 12:05:43.000000000 -0500
+@@ -164,8 +164,8 @@
+ r8a7740_meram_workaround();
+
+ #ifdef CONFIG_CACHE_L2X0
+- /* Early BRESP enable, Shared attribute override enable, 32K*8way */
+- l2x0_init(IOMEM(0xf0002000), 0x40440000, 0x82000fff);
++ /* Shared attribute override enable, 32K*8way */
++ l2x0_init(IOMEM(0xf0002000), 0x00400000, 0xc20f0fff);
+ #endif
+
+ r8a7740_add_standard_devices_dt();
+diff -Nur linux-3.14.36/arch/arm/mach-shmobile/board-kzm9g.c linux-openelec/arch/arm/mach-shmobile/board-kzm9g.c
+--- linux-3.14.36/arch/arm/mach-shmobile/board-kzm9g.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/mach-shmobile/board-kzm9g.c 2015-05-06 12:05:43.000000000 -0500
+@@ -878,8 +878,8 @@
+ gpio_request_one(223, GPIOF_IN, NULL); /* IRQ8 */
+
+ #ifdef CONFIG_CACHE_L2X0
+- /* Early BRESP enable, Shared attribute override enable, 64K*8way */
+- l2x0_init(IOMEM(0xf0100000), 0x40460000, 0x82000fff);
++ /* Shared attribute override enable, 64K*8way */
++ l2x0_init(IOMEM(0xf0100000), 0x00400000, 0xc20f0fff);
+ #endif
+
+ i2c_register_board_info(0, i2c0_devices, ARRAY_SIZE(i2c0_devices));
+diff -Nur linux-3.14.36/arch/arm/mach-shmobile/board-kzm9g-reference.c linux-openelec/arch/arm/mach-shmobile/board-kzm9g-reference.c
+--- linux-3.14.36/arch/arm/mach-shmobile/board-kzm9g-reference.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/mach-shmobile/board-kzm9g-reference.c 2015-05-06 12:05:43.000000000 -0500
+@@ -36,8 +36,8 @@
+ sh73a0_add_standard_devices_dt();
+
+ #ifdef CONFIG_CACHE_L2X0
+- /* Early BRESP enable, Shared attribute override enable, 64K*8way */
+- l2x0_init(IOMEM(0xf0100000), 0x40460000, 0x82000fff);
++ /* Shared attribute override enable, 64K*8way */
++ l2x0_init(IOMEM(0xf0100000), 0x00400000, 0xc20f0fff);
+ #endif
+ }
+
+diff -Nur linux-3.14.36/arch/arm/mach-shmobile/setup-r8a7778.c linux-openelec/arch/arm/mach-shmobile/setup-r8a7778.c
+--- linux-3.14.36/arch/arm/mach-shmobile/setup-r8a7778.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/mach-shmobile/setup-r8a7778.c 2015-05-06 12:05:43.000000000 -0500
+@@ -298,10 +298,10 @@
+ void __iomem *base = ioremap_nocache(0xf0100000, 0x1000);
+ if (base) {
+ /*
+- * Early BRESP enable, Shared attribute override enable, 64K*16way
++ * Shared attribute override enable, 64K*16way
+ * don't call iounmap(base)
+ */
+- l2x0_init(base, 0x40470000, 0x82000fff);
++ l2x0_init(base, 0x00400000, 0xc20f0fff);
+ }
+ #endif
+
+diff -Nur linux-3.14.36/arch/arm/mach-shmobile/setup-r8a7779.c linux-openelec/arch/arm/mach-shmobile/setup-r8a7779.c
+--- linux-3.14.36/arch/arm/mach-shmobile/setup-r8a7779.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/mach-shmobile/setup-r8a7779.c 2015-05-06 12:05:43.000000000 -0500
+@@ -700,8 +700,8 @@
+ void __init r8a7779_add_standard_devices(void)
+ {
+ #ifdef CONFIG_CACHE_L2X0
+- /* Early BRESP enable, Shared attribute override enable, 64K*16way */
+- l2x0_init(IOMEM(0xf0100000), 0x40470000, 0x82000fff);
++ /* Shared attribute override enable, 64K*16way */
++ l2x0_init(IOMEM(0xf0100000), 0x00400000, 0xc20f0fff);
+ #endif
+ r8a7779_pm_init();
+
+diff -Nur linux-3.14.36/arch/arm/mach-socfpga/socfpga.c linux-openelec/arch/arm/mach-socfpga/socfpga.c
+--- linux-3.14.36/arch/arm/mach-socfpga/socfpga.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/mach-socfpga/socfpga.c 2015-05-06 12:05:43.000000000 -0500
+@@ -104,7 +104,7 @@
+
+ static void __init socfpga_cyclone5_init(void)
+ {
+- l2x0_of_init(0, ~0UL);
++ l2x0_of_init(0, ~0);
+ of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
+ socfpga_init_clocks();
+ }
+diff -Nur linux-3.14.36/arch/arm/mach-spear/platsmp.c linux-openelec/arch/arm/mach-spear/platsmp.c
+--- linux-3.14.36/arch/arm/mach-spear/platsmp.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/mach-spear/platsmp.c 2015-05-06 12:05:43.000000000 -0500
+@@ -20,6 +20,18 @@
+ #include <mach/spear.h>
+ #include "generic.h"
+
++/*
++ * Write pen_release in a way that is guaranteed to be visible to all
++ * observers, irrespective of whether they're taking part in coherency
++ * or not. This is necessary for the hotplug code to work reliably.
++ */
++static void write_pen_release(int val)
++{
++ pen_release = val;
++ smp_wmb();
++ sync_cache_w(&pen_release);
++}
++
+ static DEFINE_SPINLOCK(boot_lock);
+
+ static void __iomem *scu_base = IOMEM(VA_SCU_BASE);
+@@ -30,8 +42,7 @@
+ * let the primary processor know we're out of the
+ * pen, then head off into the C entry point
+ */
+- pen_release = -1;
+- smp_wmb();
++ write_pen_release(-1);
+
+ /*
+ * Synchronise with the boot thread.
+@@ -58,9 +69,7 @@
+ * Note that "pen_release" is the hardware CPU ID, whereas
+ * "cpu" is Linux's internal ID.
+ */
+- pen_release = cpu;
+- flush_cache_all();
+- outer_flush_all();
++ write_pen_release(cpu);
+
+ timeout = jiffies + (1 * HZ);
+ while (time_before(jiffies, timeout)) {
+diff -Nur linux-3.14.36/arch/arm/mach-spear/spear13xx.c linux-openelec/arch/arm/mach-spear/spear13xx.c
+--- linux-3.14.36/arch/arm/mach-spear/spear13xx.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/mach-spear/spear13xx.c 2015-05-06 12:05:43.000000000 -0500
+@@ -38,15 +38,15 @@
+ if (!IS_ENABLED(CONFIG_CACHE_L2X0))
+ return;
+
+- writel_relaxed(0x06, VA_L2CC_BASE + L2X0_PREFETCH_CTRL);
++ writel_relaxed(0x06, VA_L2CC_BASE + L310_PREFETCH_CTRL);
+
+ /*
+ * Program following latencies in order to make
+ * SPEAr1340 work at 600 MHz
+ */
+- writel_relaxed(0x221, VA_L2CC_BASE + L2X0_TAG_LATENCY_CTRL);
+- writel_relaxed(0x441, VA_L2CC_BASE + L2X0_DATA_LATENCY_CTRL);
+- l2x0_init(VA_L2CC_BASE, 0x70A60001, 0xfe00ffff);
++ writel_relaxed(0x221, VA_L2CC_BASE + L310_TAG_LATENCY_CTRL);
++ writel_relaxed(0x441, VA_L2CC_BASE + L310_DATA_LATENCY_CTRL);
++ l2x0_init(VA_L2CC_BASE, 0x30a00001, 0xfe0fffff);
+ }
+
+ /*
+diff -Nur linux-3.14.36/arch/arm/mach-sti/board-dt.c linux-openelec/arch/arm/mach-sti/board-dt.c
+--- linux-3.14.36/arch/arm/mach-sti/board-dt.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/mach-sti/board-dt.c 2015-05-06 12:05:43.000000000 -0500
+@@ -16,15 +16,9 @@
+
+ void __init stih41x_l2x0_init(void)
+ {
+- u32 way_size = 0x4;
+- u32 aux_ctrl;
+- /* may be this can be encoded in macros like BIT*() */
+- aux_ctrl = (0x1 << L2X0_AUX_CTRL_SHARE_OVERRIDE_SHIFT) |
+- (0x1 << L2X0_AUX_CTRL_DATA_PREFETCH_SHIFT) |
+- (0x1 << L2X0_AUX_CTRL_INSTR_PREFETCH_SHIFT) |
+- (way_size << L2X0_AUX_CTRL_WAY_SIZE_SHIFT);
+-
+- l2x0_of_init(aux_ctrl, L2X0_AUX_CTRL_MASK);
++ l2x0_of_init(L2C_AUX_CTRL_SHARED_OVERRIDE |
++ L310_AUX_CTRL_DATA_PREFETCH |
++ L310_AUX_CTRL_INSTR_PREFETCH, 0xc00f0fff);
+ }
+
+ static void __init stih41x_machine_init(void)
+diff -Nur linux-3.14.36/arch/arm/mach-tegra/pm.h linux-openelec/arch/arm/mach-tegra/pm.h
+--- linux-3.14.36/arch/arm/mach-tegra/pm.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/mach-tegra/pm.h 2015-05-06 12:05:43.000000000 -0500
+@@ -35,8 +35,6 @@
+ void tegra30_lp1_iram_hook(void);
+ void tegra30_sleep_core_init(void);
+
+-extern unsigned long l2x0_saved_regs_addr;
+-
+ void tegra_clear_cpu_in_lp2(void);
+ bool tegra_set_cpu_in_lp2(void);
+
+diff -Nur linux-3.14.36/arch/arm/mach-tegra/reset-handler.S linux-openelec/arch/arm/mach-tegra/reset-handler.S
+--- linux-3.14.36/arch/arm/mach-tegra/reset-handler.S 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/mach-tegra/reset-handler.S 2015-07-24 18:03:29.256842002 -0500
+@@ -19,7 +19,6 @@
+
+ #include <asm/cache.h>
+ #include <asm/asm-offsets.h>
+-#include <asm/hardware/cache-l2x0.h>
+
+ #include "flowctrl.h"
+ #include "fuse.h"
+@@ -79,8 +78,10 @@
+ str r1, [r0]
+ #endif
+
++#ifdef CONFIG_CACHE_L2X0
+ /* L2 cache resume & re-enable */
+- l2_cache_resume r0, r1, r2, l2x0_saved_regs_addr
++ bl l2c310_early_resume
++#endif
+ end_ca9_scu_l2_resume:
+ mov32 r9, 0xc0f
+ cmp r8, r9
+@@ -90,12 +91,6 @@
+ ENDPROC(tegra_resume)
+ #endif
+
+-#ifdef CONFIG_CACHE_L2X0
+- .globl l2x0_saved_regs_addr
+-l2x0_saved_regs_addr:
+- .long 0
+-#endif
+-
+ .align L1_CACHE_SHIFT
+ ENTRY(__tegra_cpu_reset_handler_start)
+
+diff -Nur linux-3.14.36/arch/arm/mach-tegra/reset-handler.S.orig linux-openelec/arch/arm/mach-tegra/reset-handler.S.orig
+--- linux-3.14.36/arch/arm/mach-tegra/reset-handler.S.orig 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/arch/arm/mach-tegra/reset-handler.S.orig 2015-05-06 12:05:43.000000000 -0500
+@@ -0,0 +1,284 @@
++/*
++ * Copyright (c) 2012, NVIDIA Corporation. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program. If not, see <http://www.gnu.org/licenses/>.
++ */
++
++#include <linux/linkage.h>
++#include <linux/init.h>
++
++#include <asm/cache.h>
++#include <asm/asm-offsets.h>
++
++#include "flowctrl.h"
++#include "fuse.h"
++#include "iomap.h"
++#include "reset.h"
++#include "sleep.h"
++
++#define PMC_SCRATCH41 0x140
++
++#define RESET_DATA(x) ((TEGRA_RESET_##x)*4)
++
++#ifdef CONFIG_PM_SLEEP
++/*
++ * tegra_resume
++ *
++ * CPU boot vector when restarting the a CPU following
++ * an LP2 transition. Also branched to by LP0 and LP1 resume after
++ * re-enabling sdram.
++ *
++ * r6: SoC ID
++ * r8: CPU part number
++ */
++ENTRY(tegra_resume)
++ check_cpu_part_num 0xc09, r8, r9
++ bleq v7_invalidate_l1
++
++ cpu_id r0
++ cmp r0, #0 @ CPU0?
++ THUMB( it ne )
++ bne cpu_resume @ no
++
++ /* Are we on Tegra20? */
++ cmp r6, #TEGRA20
++ beq 1f @ Yes
++ /* Clear the flow controller flags for this CPU. */
++ cpu_to_csr_reg r1, r0
++ mov32 r2, TEGRA_FLOW_CTRL_BASE
++ ldr r1, [r2, r1]
++ /* Clear event & intr flag */
++ orr r1, r1, \
++ #FLOW_CTRL_CSR_INTR_FLAG | FLOW_CTRL_CSR_EVENT_FLAG
++ movw r0, #0x3FFD @ enable, cluster_switch, immed, bitmaps
++ @ & ext flags for CPU power mgnt
++ bic r1, r1, r0
++ str r1, [r2]
++1:
++
++ mov32 r9, 0xc09
++ cmp r8, r9
++ bne end_ca9_scu_l2_resume
++#ifdef CONFIG_HAVE_ARM_SCU
++ /* enable SCU */
++ mov32 r0, TEGRA_ARM_PERIF_BASE
++ ldr r1, [r0]
++ orr r1, r1, #1
++ str r1, [r0]
++#endif
++
++#ifdef CONFIG_CACHE_L2X0
++ /* L2 cache resume & re-enable */
++ bl l2c310_early_resume
++#endif
++end_ca9_scu_l2_resume:
++ mov32 r9, 0xc0f
++ cmp r8, r9
++ bleq tegra_init_l2_for_a15
++
++ b cpu_resume
++ENDPROC(tegra_resume)
++#endif
++
++ .align L1_CACHE_SHIFT
++ENTRY(__tegra_cpu_reset_handler_start)
++
++/*
++ * __tegra_cpu_reset_handler:
++ *
++ * Common handler for all CPU reset events.
++ *
++ * Register usage within the reset handler:
++ *
++ * Others: scratch
++ * R6 = SoC ID
++ * R7 = CPU present (to the OS) mask
++ * R8 = CPU in LP1 state mask
++ * R9 = CPU in LP2 state mask
++ * R10 = CPU number
++ * R11 = CPU mask
++ * R12 = pointer to reset handler data
++ *
++ * NOTE: This code is copied to IRAM. All code and data accesses
++ * must be position-independent.
++ */
++
++ .align L1_CACHE_SHIFT
++ENTRY(__tegra_cpu_reset_handler)
++
++ cpsid aif, 0x13 @ SVC mode, interrupts disabled
++
++ tegra_get_soc_id TEGRA_APB_MISC_BASE, r6
++#ifdef CONFIG_ARCH_TEGRA_2x_SOC
++t20_check:
++ cmp r6, #TEGRA20
++ bne after_t20_check
++t20_errata:
++ # Tegra20 is a Cortex-A9 r1p1
++ mrc p15, 0, r0, c1, c0, 0 @ read system control register
++ orr r0, r0, #1 << 14 @ erratum 716044
++ mcr p15, 0, r0, c1, c0, 0 @ write system control register
++ mrc p15, 0, r0, c15, c0, 1 @ read diagnostic register
++ orr r0, r0, #1 << 4 @ erratum 742230
++ orr r0, r0, #1 << 11 @ erratum 751472
++ mcr p15, 0, r0, c15, c0, 1 @ write diagnostic register
++ b after_errata
++after_t20_check:
++#endif
++#ifdef CONFIG_ARCH_TEGRA_3x_SOC
++t30_check:
++ cmp r6, #TEGRA30
++ bne after_t30_check
++t30_errata:
++ # Tegra30 is a Cortex-A9 r2p9
++ mrc p15, 0, r0, c15, c0, 1 @ read diagnostic register
++ orr r0, r0, #1 << 6 @ erratum 743622
++ orr r0, r0, #1 << 11 @ erratum 751472
++ mcr p15, 0, r0, c15, c0, 1 @ write diagnostic register
++ b after_errata
++after_t30_check:
++#endif
++after_errata:
++ mrc p15, 0, r10, c0, c0, 5 @ MPIDR
++ and r10, r10, #0x3 @ R10 = CPU number
++ mov r11, #1
++ mov r11, r11, lsl r10 @ R11 = CPU mask
++ adr r12, __tegra_cpu_reset_handler_data
++
++#ifdef CONFIG_SMP
++ /* Does the OS know about this CPU? */
++ ldr r7, [r12, #RESET_DATA(MASK_PRESENT)]
++ tst r7, r11 @ if !present
++ bleq __die @ CPU not present (to OS)
++#endif
++
++#ifdef CONFIG_ARCH_TEGRA_2x_SOC
++ /* Are we on Tegra20? */
++ cmp r6, #TEGRA20
++ bne 1f
++ /* If not CPU0, don't let CPU0 reset CPU1 now that CPU1 is coming up. */
++ mov32 r5, TEGRA_PMC_BASE
++ mov r0, #0
++ cmp r10, #0
++ strne r0, [r5, #PMC_SCRATCH41]
++1:
++#endif
++
++ /* Waking up from LP1? */
++ ldr r8, [r12, #RESET_DATA(MASK_LP1)]
++ tst r8, r11 @ if in_lp1
++ beq __is_not_lp1
++ cmp r10, #0
++ bne __die @ only CPU0 can be here
++ ldr lr, [r12, #RESET_DATA(STARTUP_LP1)]
++ cmp lr, #0
++ bleq __die @ no LP1 startup handler
++ THUMB( add lr, lr, #1 ) @ switch to Thumb mode
++ bx lr
++__is_not_lp1:
++
++ /* Waking up from LP2? */
++ ldr r9, [r12, #RESET_DATA(MASK_LP2)]
++ tst r9, r11 @ if in_lp2
++ beq __is_not_lp2
++ ldr lr, [r12, #RESET_DATA(STARTUP_LP2)]
++ cmp lr, #0
++ bleq __die @ no LP2 startup handler
++ bx lr
++
++__is_not_lp2:
++
++#ifdef CONFIG_SMP
++ /*
++ * Can only be secondary boot (initial or hotplug)
++ * CPU0 can't be here for Tegra20/30
++ */
++ cmp r6, #TEGRA114
++ beq __no_cpu0_chk
++ cmp r10, #0
++ bleq __die @ CPU0 cannot be here
++__no_cpu0_chk:
++ ldr lr, [r12, #RESET_DATA(STARTUP_SECONDARY)]
++ cmp lr, #0
++ bleq __die @ no secondary startup handler
++ bx lr
++#endif
++
++/*
++ * We don't know why the CPU reset. Just kill it.
++ * The LR register will contain the address we died at + 4.
++ */
++
++__die:
++ sub lr, lr, #4
++ mov32 r7, TEGRA_PMC_BASE
++ str lr, [r7, #PMC_SCRATCH41]
++
++ mov32 r7, TEGRA_CLK_RESET_BASE
++
++ /* Are we on Tegra20? */
++ cmp r6, #TEGRA20
++ bne 1f
++
++#ifdef CONFIG_ARCH_TEGRA_2x_SOC
++ mov32 r0, 0x1111
++ mov r1, r0, lsl r10
++ str r1, [r7, #0x340] @ CLK_RST_CPU_CMPLX_SET
++#endif
++1:
++#ifdef CONFIG_ARCH_TEGRA_3x_SOC
++ mov32 r6, TEGRA_FLOW_CTRL_BASE
++
++ cmp r10, #0
++ moveq r1, #FLOW_CTRL_HALT_CPU0_EVENTS
++ moveq r2, #FLOW_CTRL_CPU0_CSR
++ movne r1, r10, lsl #3
++ addne r2, r1, #(FLOW_CTRL_CPU1_CSR-8)
++ addne r1, r1, #(FLOW_CTRL_HALT_CPU1_EVENTS-8)
++
++ /* Clear CPU "event" and "interrupt" flags and power gate
++ it when halting but not before it is in the "WFI" state. */
++ ldr r0, [r6, +r2]
++ orr r0, r0, #FLOW_CTRL_CSR_INTR_FLAG | FLOW_CTRL_CSR_EVENT_FLAG
++ orr r0, r0, #FLOW_CTRL_CSR_ENABLE
++ str r0, [r6, +r2]
++
++ /* Unconditionally halt this CPU */
++ mov r0, #FLOW_CTRL_WAITEVENT
++ str r0, [r6, +r1]
++ ldr r0, [r6, +r1] @ memory barrier
++
++ dsb
++ isb
++ wfi @ CPU should be power gated here
++
++ /* If the CPU didn't power gate above just kill it's clock. */
++
++ mov r0, r11, lsl #8
++ str r0, [r7, #348] @ CLK_CPU_CMPLX_SET
++#endif
++
++ /* If the CPU still isn't dead, just spin here. */
++ b .
++ENDPROC(__tegra_cpu_reset_handler)
++
++ .align L1_CACHE_SHIFT
++ .type __tegra_cpu_reset_handler_data, %object
++ .globl __tegra_cpu_reset_handler_data
++__tegra_cpu_reset_handler_data:
++ .rept TEGRA_RESET_DATA_SIZE
++ .long 0
++ .endr
++ .align L1_CACHE_SHIFT
++
++ENTRY(__tegra_cpu_reset_handler_end)
+diff -Nur linux-3.14.36/arch/arm/mach-tegra/sleep.h linux-openelec/arch/arm/mach-tegra/sleep.h
+--- linux-3.14.36/arch/arm/mach-tegra/sleep.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/mach-tegra/sleep.h 2015-05-06 12:05:43.000000000 -0500
+@@ -120,37 +120,6 @@
+ mov \tmp1, \tmp1, lsr #8
+ .endm
+
+-/* Macro to resume & re-enable L2 cache */
+-#ifndef L2X0_CTRL_EN
+-#define L2X0_CTRL_EN 1
+-#endif
+-
+-#ifdef CONFIG_CACHE_L2X0
+-.macro l2_cache_resume, tmp1, tmp2, tmp3, phys_l2x0_saved_regs
+- W(adr) \tmp1, \phys_l2x0_saved_regs
+- ldr \tmp1, [\tmp1]
+- ldr \tmp2, [\tmp1, #L2X0_R_PHY_BASE]
+- ldr \tmp3, [\tmp2, #L2X0_CTRL]
+- tst \tmp3, #L2X0_CTRL_EN
+- bne exit_l2_resume
+- ldr \tmp3, [\tmp1, #L2X0_R_TAG_LATENCY]
+- str \tmp3, [\tmp2, #L2X0_TAG_LATENCY_CTRL]
+- ldr \tmp3, [\tmp1, #L2X0_R_DATA_LATENCY]
+- str \tmp3, [\tmp2, #L2X0_DATA_LATENCY_CTRL]
+- ldr \tmp3, [\tmp1, #L2X0_R_PREFETCH_CTRL]
+- str \tmp3, [\tmp2, #L2X0_PREFETCH_CTRL]
+- ldr \tmp3, [\tmp1, #L2X0_R_PWR_CTRL]
+- str \tmp3, [\tmp2, #L2X0_POWER_CTRL]
+- ldr \tmp3, [\tmp1, #L2X0_R_AUX_CTRL]
+- str \tmp3, [\tmp2, #L2X0_AUX_CTRL]
+- mov \tmp3, #L2X0_CTRL_EN
+- str \tmp3, [\tmp2, #L2X0_CTRL]
+-exit_l2_resume:
+-.endm
+-#else /* CONFIG_CACHE_L2X0 */
+-.macro l2_cache_resume, tmp1, tmp2, tmp3, phys_l2x0_saved_regs
+-.endm
+-#endif /* CONFIG_CACHE_L2X0 */
+ #else
+ void tegra_pen_lock(void);
+ void tegra_pen_unlock(void);
+diff -Nur linux-3.14.36/arch/arm/mach-tegra/tegra.c linux-openelec/arch/arm/mach-tegra/tegra.c
+--- linux-3.14.36/arch/arm/mach-tegra/tegra.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/mach-tegra/tegra.c 2015-05-06 12:05:43.000000000 -0500
+@@ -73,27 +73,7 @@
+ static void __init tegra_init_cache(void)
+ {
+ #ifdef CONFIG_CACHE_L2X0
+- static const struct of_device_id pl310_ids[] __initconst = {
+- { .compatible = "arm,pl310-cache", },
+- {}
+- };
+-
+- struct device_node *np;
+- int ret;
+- void __iomem *p = IO_ADDRESS(TEGRA_ARM_PERIF_BASE) + 0x3000;
+- u32 aux_ctrl, cache_type;
+-
+- np = of_find_matching_node(NULL, pl310_ids);
+- if (!np)
+- return;
+-
+- cache_type = readl(p + L2X0_CACHE_TYPE);
+- aux_ctrl = (cache_type & 0x700) << (17-8);
+- aux_ctrl |= 0x7C400001;
+-
+- ret = l2x0_of_init(aux_ctrl, 0x8200c3fe);
+- if (!ret)
+- l2x0_saved_regs_addr = virt_to_phys(&l2x0_saved_regs);
++ l2x0_of_init(0x3c400001, 0xc20fc3fe);
+ #endif
+ }
+
+diff -Nur linux-3.14.36/arch/arm/mach-ux500/board-mop500-audio.c linux-openelec/arch/arm/mach-ux500/board-mop500-audio.c
+--- linux-3.14.36/arch/arm/mach-ux500/board-mop500-audio.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/mach-ux500/board-mop500-audio.c 2015-05-06 12:05:43.000000000 -0500
+@@ -9,7 +9,6 @@
+ #include <linux/gpio.h>
+ #include <linux/platform_data/dma-ste-dma40.h>
+
+-#include "irqs.h"
+ #include <linux/platform_data/asoc-ux500-msp.h>
+
+ #include "ste-dma40-db8500.h"
+diff -Nur linux-3.14.36/arch/arm/mach-ux500/cache-l2x0.c linux-openelec/arch/arm/mach-ux500/cache-l2x0.c
+--- linux-3.14.36/arch/arm/mach-ux500/cache-l2x0.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/mach-ux500/cache-l2x0.c 2015-05-06 12:05:43.000000000 -0500
+@@ -35,10 +35,16 @@
+ return 0;
+ }
+
+-static int __init ux500_l2x0_init(void)
++static void ux500_l2c310_write_sec(unsigned long val, unsigned reg)
+ {
+- u32 aux_val = 0x3e000000;
++ /*
++ * We can't write to secure registers as we are in non-secure
++ * mode, until we have some SMI service available.
++ */
++}
+
++static int __init ux500_l2x0_init(void)
++{
+ if (cpu_is_u8500_family() || cpu_is_ux540_family())
+ l2x0_base = __io_address(U8500_L2CC_BASE);
+ else
+@@ -48,28 +54,12 @@
+ /* Unlock before init */
+ ux500_l2x0_unlock();
+
+- /* DBx540's L2 has 128KB way size */
+- if (cpu_is_ux540_family())
+- /* 128KB way size */
+- aux_val |= (0x4 << L2X0_AUX_CTRL_WAY_SIZE_SHIFT);
+- else
+- /* 64KB way size */
+- aux_val |= (0x3 << L2X0_AUX_CTRL_WAY_SIZE_SHIFT);
++ outer_cache.write_sec = ux500_l2c310_write_sec;
+
+- /* 64KB way size, 8 way associativity, force WA */
+ if (of_have_populated_dt())
+- l2x0_of_init(aux_val, 0xc0000fff);
++ l2x0_of_init(0, ~0);
+ else
+- l2x0_init(l2x0_base, aux_val, 0xc0000fff);
+-
+- /*
+- * We can't disable l2 as we are in non secure mode, currently
+- * this seems be called only during kexec path. So let's
+- * override outer.disable with nasty assignment until we have
+- * some SMI service available.
+- */
+- outer_cache.disable = NULL;
+- outer_cache.set_debug = NULL;
++ l2x0_init(l2x0_base, 0, ~0);
+
+ return 0;
+ }
+diff -Nur linux-3.14.36/arch/arm/mach-ux500/cpu-db8500.c linux-openelec/arch/arm/mach-ux500/cpu-db8500.c
+--- linux-3.14.36/arch/arm/mach-ux500/cpu-db8500.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/mach-ux500/cpu-db8500.c 2015-05-06 12:05:43.000000000 -0500
+@@ -27,7 +27,6 @@
+ #include <asm/mach/map.h>
+
+ #include "setup.h"
+-#include "irqs.h"
+
+ #include "board-mop500-regulators.h"
+ #include "board-mop500.h"
+@@ -35,14 +34,11 @@
+ #include "id.h"
+
+ struct ab8500_platform_data ab8500_platdata = {
+- .irq_base = MOP500_AB8500_IRQ_BASE,
+ .regulator = &ab8500_regulator_plat_data,
+ };
+
+ struct prcmu_pdata db8500_prcmu_pdata = {
+ .ab_platdata = &ab8500_platdata,
+- .ab_irq = IRQ_DB8500_AB8500,
+- .irq_base = IRQ_PRCMU_BASE,
+ .version_offset = DB8500_PRCMU_FW_VERSION_OFFSET,
+ .legacy_offset = DB8500_PRCMU_LEGACY_OFFSET,
+ };
+diff -Nur linux-3.14.36/arch/arm/mach-ux500/irqs-board-mop500.h linux-openelec/arch/arm/mach-ux500/irqs-board-mop500.h
+--- linux-3.14.36/arch/arm/mach-ux500/irqs-board-mop500.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/mach-ux500/irqs-board-mop500.h 1969-12-31 18:00:00.000000000 -0600
+@@ -1,55 +0,0 @@
+-/*
+- * Copyright (C) ST-Ericsson SA 2010
+- *
+- * Author: Rabin Vincent <rabin.vincent@stericsson.com>
+- * License terms: GNU General Public License (GPL) version 2
+- */
+-
+-#ifndef __MACH_IRQS_BOARD_MOP500_H
+-#define __MACH_IRQS_BOARD_MOP500_H
+-
+-/* Number of AB8500 irqs is taken from header file */
+-#include <linux/mfd/abx500/ab8500.h>
+-
+-#define MOP500_AB8500_IRQ_BASE IRQ_BOARD_START
+-#define MOP500_AB8500_IRQ_END (MOP500_AB8500_IRQ_BASE \
+- + AB8500_MAX_NR_IRQS)
+-
+-/* TC35892 */
+-#define TC35892_NR_INTERNAL_IRQS 8
+-#define TC35892_INT_GPIO(x) (TC35892_NR_INTERNAL_IRQS + (x))
+-#define TC35892_NR_GPIOS 24
+-#define TC35892_NR_IRQS TC35892_INT_GPIO(TC35892_NR_GPIOS)
+-
+-#define MOP500_EGPIO_NR_IRQS TC35892_NR_IRQS
+-
+-#define MOP500_EGPIO_IRQ_BASE MOP500_AB8500_IRQ_END
+-#define MOP500_EGPIO_IRQ_END (MOP500_EGPIO_IRQ_BASE \
+- + MOP500_EGPIO_NR_IRQS)
+-/* STMPE1601 irqs */
+-#define STMPE_NR_INTERNAL_IRQS 9
+-#define STMPE_INT_GPIO(x) (STMPE_NR_INTERNAL_IRQS + (x))
+-#define STMPE_NR_GPIOS 24
+-#define STMPE_NR_IRQS STMPE_INT_GPIO(STMPE_NR_GPIOS)
+-
+-#define MOP500_STMPE1601_IRQBASE MOP500_EGPIO_IRQ_END
+-#define MOP500_STMPE1601_IRQ(x) (MOP500_STMPE1601_IRQBASE + (x))
+-
+-#define MOP500_STMPE1601_IRQ_END \
+- MOP500_STMPE1601_IRQ(STMPE_NR_INTERNAL_IRQS)
+-
+-#define MOP500_NR_IRQS MOP500_STMPE1601_IRQ_END
+-
+-#define MOP500_IRQ_END MOP500_NR_IRQS
+-
+-/*
+- * We may have several boards, but only one will run at a
+- * time, so the one with most IRQs will bump this ahead,
+- * but the IRQ_BOARD_START remains the same for either board.
+- */
+-#if MOP500_IRQ_END > IRQ_BOARD_END
+-#undef IRQ_BOARD_END
+-#define IRQ_BOARD_END MOP500_IRQ_END
+-#endif
+-
+-#endif
+diff -Nur linux-3.14.36/arch/arm/mach-ux500/irqs-db8500.h linux-openelec/arch/arm/mach-ux500/irqs-db8500.h
+--- linux-3.14.36/arch/arm/mach-ux500/irqs-db8500.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/mach-ux500/irqs-db8500.h 1969-12-31 18:00:00.000000000 -0600
+@@ -1,125 +0,0 @@
+-/*
+- * Copyright (C) ST-Ericsson SA 2010
+- *
+- * Author: Rabin Vincent <rabin.vincent@stericsson.com>
+- * License terms: GNU General Public License (GPL) version 2
+- */
+-
+-#ifndef __MACH_IRQS_DB8500_H
+-#define __MACH_IRQS_DB8500_H
+-
+-#define IRQ_DB8500_MTU0 (IRQ_SHPI_START + 4)
+-#define IRQ_DB8500_SPI2 (IRQ_SHPI_START + 6)
+-#define IRQ_DB8500_PMU (IRQ_SHPI_START + 7)
+-#define IRQ_DB8500_SPI0 (IRQ_SHPI_START + 8)
+-#define IRQ_DB8500_RTT (IRQ_SHPI_START + 9)
+-#define IRQ_DB8500_PKA (IRQ_SHPI_START + 10)
+-#define IRQ_DB8500_UART0 (IRQ_SHPI_START + 11)
+-#define IRQ_DB8500_I2C3 (IRQ_SHPI_START + 12)
+-#define IRQ_DB8500_L2CC (IRQ_SHPI_START + 13)
+-#define IRQ_DB8500_SSP0 (IRQ_SHPI_START + 14)
+-#define IRQ_DB8500_CRYP1 (IRQ_SHPI_START + 15)
+-#define IRQ_DB8500_MSP1_RX (IRQ_SHPI_START + 16)
+-#define IRQ_DB8500_MTU1 (IRQ_SHPI_START + 17)
+-#define IRQ_DB8500_RTC (IRQ_SHPI_START + 18)
+-#define IRQ_DB8500_UART1 (IRQ_SHPI_START + 19)
+-#define IRQ_DB8500_USB_WAKEUP (IRQ_SHPI_START + 20)
+-#define IRQ_DB8500_I2C0 (IRQ_SHPI_START + 21)
+-#define IRQ_DB8500_I2C1 (IRQ_SHPI_START + 22)
+-#define IRQ_DB8500_USBOTG (IRQ_SHPI_START + 23)
+-#define IRQ_DB8500_DMA_SECURE (IRQ_SHPI_START + 24)
+-#define IRQ_DB8500_DMA (IRQ_SHPI_START + 25)
+-#define IRQ_DB8500_UART2 (IRQ_SHPI_START + 26)
+-#define IRQ_DB8500_ICN_PMU1 (IRQ_SHPI_START + 27)
+-#define IRQ_DB8500_ICN_PMU2 (IRQ_SHPI_START + 28)
+-#define IRQ_DB8500_HSIR_EXCEP (IRQ_SHPI_START + 29)
+-#define IRQ_DB8500_MSP0 (IRQ_SHPI_START + 31)
+-#define IRQ_DB8500_HSIR_CH0_OVRRUN (IRQ_SHPI_START + 32)
+-#define IRQ_DB8500_HSIR_CH1_OVRRUN (IRQ_SHPI_START + 33)
+-#define IRQ_DB8500_HSIR_CH2_OVRRUN (IRQ_SHPI_START + 34)
+-#define IRQ_DB8500_HSIR_CH3_OVRRUN (IRQ_SHPI_START + 35)
+-#define IRQ_DB8500_HSIR_CH4_OVRRUN (IRQ_SHPI_START + 36)
+-#define IRQ_DB8500_HSIR_CH5_OVRRUN (IRQ_SHPI_START + 37)
+-#define IRQ_DB8500_HSIR_CH6_OVRRUN (IRQ_SHPI_START + 38)
+-#define IRQ_DB8500_HSIR_CH7_OVRRUN (IRQ_SHPI_START + 39)
+-#define IRQ_DB8500_AB8500 (IRQ_SHPI_START + 40)
+-#define IRQ_DB8500_SDMMC2 (IRQ_SHPI_START + 41)
+-#define IRQ_DB8500_SIA (IRQ_SHPI_START + 42)
+-#define IRQ_DB8500_SIA2 (IRQ_SHPI_START + 43)
+-#define IRQ_DB8500_SVA (IRQ_SHPI_START + 44)
+-#define IRQ_DB8500_SVA2 (IRQ_SHPI_START + 45)
+-#define IRQ_DB8500_PRCMU0 (IRQ_SHPI_START + 46)
+-#define IRQ_DB8500_PRCMU1 (IRQ_SHPI_START + 47)
+-#define IRQ_DB8500_DISP (IRQ_SHPI_START + 48)
+-#define IRQ_DB8500_SPI3 (IRQ_SHPI_START + 49)
+-#define IRQ_DB8500_SDMMC1 (IRQ_SHPI_START + 50)
+-#define IRQ_DB8500_I2C4 (IRQ_SHPI_START + 51)
+-#define IRQ_DB8500_SSP1 (IRQ_SHPI_START + 52)
+-#define IRQ_DB8500_SKE (IRQ_SHPI_START + 53)
+-#define IRQ_DB8500_KB (IRQ_SHPI_START + 54)
+-#define IRQ_DB8500_I2C2 (IRQ_SHPI_START + 55)
+-#define IRQ_DB8500_B2R2 (IRQ_SHPI_START + 56)
+-#define IRQ_DB8500_CRYP0 (IRQ_SHPI_START + 57)
+-#define IRQ_DB8500_SDMMC3 (IRQ_SHPI_START + 59)
+-#define IRQ_DB8500_SDMMC0 (IRQ_SHPI_START + 60)
+-#define IRQ_DB8500_HSEM (IRQ_SHPI_START + 61)
+-#define IRQ_DB8500_MSP1 (IRQ_SHPI_START + 62)
+-#define IRQ_DB8500_SBAG (IRQ_SHPI_START + 63)
+-#define IRQ_DB8500_SPI1 (IRQ_SHPI_START + 96)
+-#define IRQ_DB8500_SRPTIMER (IRQ_SHPI_START + 97)
+-#define IRQ_DB8500_MSP2 (IRQ_SHPI_START + 98)
+-#define IRQ_DB8500_SDMMC4 (IRQ_SHPI_START + 99)
+-#define IRQ_DB8500_SDMMC5 (IRQ_SHPI_START + 100)
+-#define IRQ_DB8500_HSIRD0 (IRQ_SHPI_START + 104)
+-#define IRQ_DB8500_HSIRD1 (IRQ_SHPI_START + 105)
+-#define IRQ_DB8500_HSITD0 (IRQ_SHPI_START + 106)
+-#define IRQ_DB8500_HSITD1 (IRQ_SHPI_START + 107)
+-#define IRQ_DB8500_CTI0 (IRQ_SHPI_START + 108)
+-#define IRQ_DB8500_CTI1 (IRQ_SHPI_START + 109)
+-#define IRQ_DB8500_ICN_ERR (IRQ_SHPI_START + 110)
+-#define IRQ_DB8500_MALI_PPMMU (IRQ_SHPI_START + 112)
+-#define IRQ_DB8500_MALI_PP (IRQ_SHPI_START + 113)
+-#define IRQ_DB8500_MALI_GPMMU (IRQ_SHPI_START + 114)
+-#define IRQ_DB8500_MALI_GP (IRQ_SHPI_START + 115)
+-#define IRQ_DB8500_MALI (IRQ_SHPI_START + 116)
+-#define IRQ_DB8500_PRCMU_SEM (IRQ_SHPI_START + 118)
+-#define IRQ_DB8500_GPIO0 (IRQ_SHPI_START + 119)
+-#define IRQ_DB8500_GPIO1 (IRQ_SHPI_START + 120)
+-#define IRQ_DB8500_GPIO2 (IRQ_SHPI_START + 121)
+-#define IRQ_DB8500_GPIO3 (IRQ_SHPI_START + 122)
+-#define IRQ_DB8500_GPIO4 (IRQ_SHPI_START + 123)
+-#define IRQ_DB8500_GPIO5 (IRQ_SHPI_START + 124)
+-#define IRQ_DB8500_GPIO6 (IRQ_SHPI_START + 125)
+-#define IRQ_DB8500_GPIO7 (IRQ_SHPI_START + 126)
+-#define IRQ_DB8500_GPIO8 (IRQ_SHPI_START + 127)
+-
+-#define IRQ_CA_WAKE_REQ_ED (IRQ_SHPI_START + 71)
+-#define IRQ_AC_READ_NOTIFICATION_0_ED (IRQ_SHPI_START + 66)
+-#define IRQ_AC_READ_NOTIFICATION_1_ED (IRQ_SHPI_START + 64)
+-#define IRQ_CA_MSG_PEND_NOTIFICATION_0_ED (IRQ_SHPI_START + 67)
+-#define IRQ_CA_MSG_PEND_NOTIFICATION_1_ED (IRQ_SHPI_START + 65)
+-
+-#define IRQ_CA_WAKE_REQ_V1 (IRQ_SHPI_START + 83)
+-#define IRQ_AC_READ_NOTIFICATION_0_V1 (IRQ_SHPI_START + 78)
+-#define IRQ_AC_READ_NOTIFICATION_1_V1 (IRQ_SHPI_START + 76)
+-#define IRQ_CA_MSG_PEND_NOTIFICATION_0_V1 (IRQ_SHPI_START + 79)
+-#define IRQ_CA_MSG_PEND_NOTIFICATION_1_V1 (IRQ_SHPI_START + 77)
+-
+-#ifdef CONFIG_UX500_SOC_DB8500
+-
+-/* Virtual interrupts corresponding to the PRCMU wakeups. */
+-#define IRQ_PRCMU_BASE IRQ_SOC_START
+-#define IRQ_PRCMU_END (IRQ_PRCMU_BASE + 23)
+-
+-/*
+- * We may have several SoCs, but only one will run at a
+- * time, so the one with most IRQs will bump this ahead,
+- * but the IRQ_SOC_START remains the same for either SoC.
+- */
+-#if IRQ_SOC_END < IRQ_PRCMU_END
+-#undef IRQ_SOC_END
+-#define IRQ_SOC_END IRQ_PRCMU_END
+-#endif
+-
+-#endif /* CONFIG_UX500_SOC_DB8500 */
+-#endif
+diff -Nur linux-3.14.36/arch/arm/mach-ux500/irqs.h linux-openelec/arch/arm/mach-ux500/irqs.h
+--- linux-3.14.36/arch/arm/mach-ux500/irqs.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/mach-ux500/irqs.h 1969-12-31 18:00:00.000000000 -0600
+@@ -1,49 +0,0 @@
+-/*
+- * Copyright (C) 2008 STMicroelectronics
+- * Copyright (C) 2009 ST-Ericsson.
+- *
+- * This program is free software; you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License as published by
+- * the Free Software Foundation; either version 2 of the License, or
+- * (at your option) any later version.
+- */
+-#ifndef ASM_ARCH_IRQS_H
+-#define ASM_ARCH_IRQS_H
+-
+-#define IRQ_LOCALTIMER 29
+-#define IRQ_LOCALWDOG 30
+-
+-/* Shared Peripheral Interrupt (SHPI) */
+-#define IRQ_SHPI_START 32
+-
+-/*
+- * MTU0 preserved for now until plat-nomadik is taught not to use it. Don't
+- * add any other IRQs here, use the irqs-dbx500.h files.
+- */
+-#define IRQ_MTU0 (IRQ_SHPI_START + 4)
+-
+-#define DBX500_NR_INTERNAL_IRQS 166
+-
+-/* After chip-specific IRQ numbers we have the GPIO ones */
+-#define NOMADIK_NR_GPIO 288
+-#define NOMADIK_GPIO_TO_IRQ(gpio) ((gpio) + DBX500_NR_INTERNAL_IRQS)
+-#define NOMADIK_IRQ_TO_GPIO(irq) ((irq) - DBX500_NR_INTERNAL_IRQS)
+-#define IRQ_GPIO_END NOMADIK_GPIO_TO_IRQ(NOMADIK_NR_GPIO)
+-
+-#define IRQ_SOC_START IRQ_GPIO_END
+-/* This will be overridden by SoC-specific irq headers */
+-#define IRQ_SOC_END IRQ_SOC_START
+-
+-#include "irqs-db8500.h"
+-
+-#define IRQ_BOARD_START IRQ_SOC_END
+-/* This will be overridden by board-specific irq headers */
+-#define IRQ_BOARD_END IRQ_BOARD_START
+-
+-#ifdef CONFIG_MACH_MOP500
+-#include "irqs-board-mop500.h"
+-#endif
+-
+-#define UX500_NR_IRQS IRQ_BOARD_END
+-
+-#endif /* ASM_ARCH_IRQS_H */
+diff -Nur linux-3.14.36/arch/arm/mach-vexpress/ct-ca9x4.c linux-openelec/arch/arm/mach-vexpress/ct-ca9x4.c
+--- linux-3.14.36/arch/arm/mach-vexpress/ct-ca9x4.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/mach-vexpress/ct-ca9x4.c 2015-05-06 12:05:43.000000000 -0500
+@@ -45,6 +45,23 @@
+ iotable_init(ct_ca9x4_io_desc, ARRAY_SIZE(ct_ca9x4_io_desc));
+ }
+
++static void __init ca9x4_l2_init(void)
++{
++#ifdef CONFIG_CACHE_L2X0
++ void __iomem *l2x0_base = ioremap(CT_CA9X4_L2CC, SZ_4K);
++
++ if (l2x0_base) {
++ /* set RAM latencies to 1 cycle for this core tile. */
++ writel(0, l2x0_base + L310_TAG_LATENCY_CTRL);
++ writel(0, l2x0_base + L310_DATA_LATENCY_CTRL);
++
++ l2x0_init(l2x0_base, 0x00400000, 0xfe0fffff);
++ } else {
++ pr_err("L2C: unable to map L2 cache controller\n");
++ }
++#endif
++}
++
+ #ifdef CONFIG_HAVE_ARM_TWD
+ static DEFINE_TWD_LOCAL_TIMER(twd_local_timer, A9_MPCORE_TWD, IRQ_LOCALTIMER);
+
+@@ -63,6 +80,7 @@
+ gic_init(0, 29, ioremap(A9_MPCORE_GIC_DIST, SZ_4K),
+ ioremap(A9_MPCORE_GIC_CPU, SZ_256));
+ ca9x4_twd_init();
++ ca9x4_l2_init();
+ }
+
+ static int ct_ca9x4_clcd_setup(struct clcd_fb *fb)
+@@ -141,16 +159,6 @@
+ {
+ int i;
+
+-#ifdef CONFIG_CACHE_L2X0
+- void __iomem *l2x0_base = ioremap(CT_CA9X4_L2CC, SZ_4K);
+-
+- /* set RAM latencies to 1 cycle for this core tile. */
+- writel(0, l2x0_base + L2X0_TAG_LATENCY_CTRL);
+- writel(0, l2x0_base + L2X0_DATA_LATENCY_CTRL);
+-
+- l2x0_init(l2x0_base, 0x00400000, 0xfe0fffff);
+-#endif
+-
+ for (i = 0; i < ARRAY_SIZE(ct_ca9x4_amba_devs); i++)
+ amba_device_register(ct_ca9x4_amba_devs[i], &iomem_resource);
+
+diff -Nur linux-3.14.36/arch/arm/mach-vexpress/dcscb.c linux-openelec/arch/arm/mach-vexpress/dcscb.c
+--- linux-3.14.36/arch/arm/mach-vexpress/dcscb.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/mach-vexpress/dcscb.c 2015-05-06 12:05:43.000000000 -0500
+@@ -23,6 +23,7 @@
+ #include <asm/cacheflush.h>
+ #include <asm/cputype.h>
+ #include <asm/cp15.h>
++#include <asm/psci.h>
+
+
+ #define RST_HOLD0 0x0
+@@ -193,6 +194,12 @@
+ unsigned int cfg;
+ int ret;
+
++ ret = psci_probe();
++ if (!ret) {
++ pr_debug("psci found. Aborting native init\n");
++ return -ENODEV;
++ }
++
+ if (!cci_probed())
+ return -ENODEV;
+
+diff -Nur linux-3.14.36/arch/arm/mach-vexpress/Kconfig linux-openelec/arch/arm/mach-vexpress/Kconfig
+--- linux-3.14.36/arch/arm/mach-vexpress/Kconfig 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/mach-vexpress/Kconfig 2015-05-06 12:05:43.000000000 -0500
+@@ -55,6 +55,7 @@
+
+ config ARCH_VEXPRESS_CA9X4
+ bool "Versatile Express Cortex-A9x4 tile"
++ select ARM_ERRATA_643719
+
+ config ARCH_VEXPRESS_DCSCB
+ bool "Dual Cluster System Control Block (DCSCB) support"
+diff -Nur linux-3.14.36/arch/arm/mach-vexpress/Makefile linux-openelec/arch/arm/mach-vexpress/Makefile
+--- linux-3.14.36/arch/arm/mach-vexpress/Makefile 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/mach-vexpress/Makefile 2015-05-06 12:05:43.000000000 -0500
+@@ -8,8 +8,15 @@
+ obj-$(CONFIG_ARCH_VEXPRESS_CA9X4) += ct-ca9x4.o
+ obj-$(CONFIG_ARCH_VEXPRESS_DCSCB) += dcscb.o dcscb_setup.o
+ CFLAGS_dcscb.o += -march=armv7-a
++CFLAGS_REMOVE_dcscb.o = -pg
+ obj-$(CONFIG_ARCH_VEXPRESS_SPC) += spc.o
++CFLAGS_REMOVE_spc.o = -pg
+ obj-$(CONFIG_ARCH_VEXPRESS_TC2_PM) += tc2_pm.o
+ CFLAGS_tc2_pm.o += -march=armv7-a
++CFLAGS_REMOVE_tc2_pm.o = -pg
++ifeq ($(CONFIG_ARCH_VEXPRESS_TC2_PM),y)
++obj-$(CONFIG_ARM_PSCI) += tc2_pm_psci.o
++CFLAGS_REMOVE_tc2_pm_psci.o = -pg
++endif
+ obj-$(CONFIG_SMP) += platsmp.o
+ obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o
+diff -Nur linux-3.14.36/arch/arm/mach-vexpress/spc.c linux-openelec/arch/arm/mach-vexpress/spc.c
+--- linux-3.14.36/arch/arm/mach-vexpress/spc.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/mach-vexpress/spc.c 2015-05-06 12:05:43.000000000 -0500
+@@ -392,7 +392,7 @@
+ * +--------------------------+
+ * | 31 20 | 19 0 |
+ * +--------------------------+
+- * | u_volt | freq(kHz) |
++ * | m_volt | freq(kHz) |
+ * +--------------------------+
+ */
+ #define MULT_FACTOR 20
+@@ -414,7 +414,7 @@
+ ret = ve_spc_read_sys_cfg(SYSCFG_SCC, off, &data);
+ if (!ret) {
+ opps->freq = (data & FREQ_MASK) * MULT_FACTOR;
+- opps->u_volt = data >> VOLT_SHIFT;
++ opps->u_volt = (data >> VOLT_SHIFT) * 1000;
+ } else {
+ break;
+ }
+diff -Nur linux-3.14.36/arch/arm/mach-vexpress/tc2_pm.c linux-openelec/arch/arm/mach-vexpress/tc2_pm.c
+--- linux-3.14.36/arch/arm/mach-vexpress/tc2_pm.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/mach-vexpress/tc2_pm.c 2015-05-06 12:05:43.000000000 -0500
+@@ -27,6 +27,7 @@
+ #include <asm/cacheflush.h>
+ #include <asm/cputype.h>
+ #include <asm/cp15.h>
++#include <asm/psci.h>
+
+ #include <linux/arm-cci.h>
+
+@@ -329,6 +330,12 @@
+ u32 a15_cluster_id, a7_cluster_id, sys_info;
+ struct device_node *np;
+
++ ret = psci_probe();
++ if (!ret) {
++ pr_debug("psci found. Aborting native init\n");
++ return -ENODEV;
++ }
++
+ /*
+ * The power management-related features are hidden behind
+ * SCC registers. We need to extract runtime information like
+diff -Nur linux-3.14.36/arch/arm/mach-vexpress/tc2_pm_psci.c linux-openelec/arch/arm/mach-vexpress/tc2_pm_psci.c
+--- linux-3.14.36/arch/arm/mach-vexpress/tc2_pm_psci.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/arch/arm/mach-vexpress/tc2_pm_psci.c 2015-05-06 12:05:43.000000000 -0500
+@@ -0,0 +1,173 @@
++/*
++ * arch/arm/mach-vexpress/tc2_pm_psci.c - TC2 PSCI support
++ *
++ * Created by: Achin Gupta, December 2012
++ * Copyright: (C) 2012 ARM Limited
++ *
++ * Some portions of this file were originally written by Nicolas Pitre
++ * Copyright: (C) 2012 Linaro Limited
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include <linux/init.h>
++#include <linux/kernel.h>
++#include <linux/of.h>
++#include <linux/spinlock.h>
++#include <linux/errno.h>
++
++#include <asm/mcpm.h>
++#include <asm/proc-fns.h>
++#include <asm/cacheflush.h>
++#include <asm/psci.h>
++#include <asm/atomic.h>
++#include <asm/cputype.h>
++#include <asm/cp15.h>
++
++#include <mach/motherboard.h>
++
++#include <linux/vexpress.h>
++
++/*
++ * Platform specific state id understood by the firmware and used to
++ * program the power controller
++ */
++#define PSCI_POWER_STATE_ID 0
++
++#define TC2_CLUSTERS 2
++#define TC2_MAX_CPUS_PER_CLUSTER 3
++
++static atomic_t tc2_pm_use_count[TC2_MAX_CPUS_PER_CLUSTER][TC2_CLUSTERS];
++
++static int tc2_pm_psci_power_up(unsigned int cpu, unsigned int cluster)
++{
++ unsigned int mpidr = (cluster << 8) | cpu;
++ int ret = 0;
++
++ BUG_ON(!psci_ops.cpu_on);
++
++ switch (atomic_inc_return(&tc2_pm_use_count[cpu][cluster])) {
++ case 1:
++ /*
++ * This is a request to power up a cpu that linux thinks has
++ * been powered down. Retries are needed if the firmware has
++ * seen the power down request as yet.
++ */
++ do
++ ret = psci_ops.cpu_on(mpidr,
++ virt_to_phys(mcpm_entry_point));
++ while (ret == -EAGAIN);
++
++ return ret;
++ case 2:
++ /* This power up request has overtaken a power down request */
++ return ret;
++ default:
++ /* Any other value is a bug */
++ BUG();
++ }
++}
++
++static void tc2_pm_psci_power_down(void)
++{
++ struct psci_power_state power_state;
++ unsigned int mpidr, cpu, cluster;
++
++ mpidr = read_cpuid_mpidr();
++ cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
++ cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
++
++ BUG_ON(!psci_ops.cpu_off);
++
++ switch (atomic_dec_return(&tc2_pm_use_count[cpu][cluster])) {
++ case 1:
++ /*
++ * Overtaken by a power up. Flush caches, exit coherency,
++ * return & fake a reset
++ */
++ set_cr(get_cr() & ~CR_C);
++
++ flush_cache_louis();
++
++ asm volatile ("clrex");
++ set_auxcr(get_auxcr() & ~(1 << 6));
++
++ return;
++ case 0:
++ /* A normal request to possibly power down the cluster */
++ power_state.id = PSCI_POWER_STATE_ID;
++ power_state.type = PSCI_POWER_STATE_TYPE_POWER_DOWN;
++ power_state.affinity_level = PSCI_POWER_STATE_AFFINITY_LEVEL1;
++
++ psci_ops.cpu_off(power_state);
++
++ /* On success this function never returns */
++ default:
++ /* Any other value is a bug */
++ BUG();
++ }
++}
++
++static void tc2_pm_psci_suspend(u64 unused)
++{
++ struct psci_power_state power_state;
++
++ BUG_ON(!psci_ops.cpu_suspend);
++
++ /* On TC2 always attempt to power down the cluster */
++ power_state.id = PSCI_POWER_STATE_ID;
++ power_state.type = PSCI_POWER_STATE_TYPE_POWER_DOWN;
++ power_state.affinity_level = PSCI_POWER_STATE_AFFINITY_LEVEL1;
++
++ psci_ops.cpu_suspend(power_state, virt_to_phys(mcpm_entry_point));
++
++ /* On success this function never returns */
++ BUG();
++}
++
++static const struct mcpm_platform_ops tc2_pm_power_ops = {
++ .power_up = tc2_pm_psci_power_up,
++ .power_down = tc2_pm_psci_power_down,
++ .suspend = tc2_pm_psci_suspend,
++};
++
++static void __init tc2_pm_usage_count_init(void)
++{
++ unsigned int mpidr, cpu, cluster;
++
++ mpidr = read_cpuid_mpidr();
++ cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
++ cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
++
++ pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
++ BUG_ON(cluster >= TC2_CLUSTERS || cpu >= TC2_MAX_CPUS_PER_CLUSTER);
++
++ atomic_set(&tc2_pm_use_count[cpu][cluster], 1);
++}
++
++static int __init tc2_pm_psci_init(void)
++{
++ int ret;
++
++ ret = psci_probe();
++ if (ret) {
++ pr_debug("psci not found. Aborting psci init\n");
++ return -ENODEV;
++ }
++
++ if (!of_machine_is_compatible("arm,vexpress,v2p-ca15_a7"))
++ return -ENODEV;
++
++ tc2_pm_usage_count_init();
++
++ ret = mcpm_platform_register(&tc2_pm_power_ops);
++ if (!ret)
++ ret = mcpm_sync_init(NULL);
++ if (!ret)
++ pr_info("TC2 power management using PSCI initialized\n");
++ return ret;
++}
++
++early_initcall(tc2_pm_psci_init);
+diff -Nur linux-3.14.36/arch/arm/mach-vexpress/v2m.c linux-openelec/arch/arm/mach-vexpress/v2m.c
+--- linux-3.14.36/arch/arm/mach-vexpress/v2m.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/mach-vexpress/v2m.c 2015-05-06 12:05:43.000000000 -0500
+@@ -7,6 +7,7 @@
+ #include <linux/io.h>
+ #include <linux/smp.h>
+ #include <linux/init.h>
++#include <linux/memblock.h>
+ #include <linux/of_address.h>
+ #include <linux/of_fdt.h>
+ #include <linux/of_irq.h>
+@@ -369,6 +370,31 @@
+ .init_machine = v2m_init,
+ MACHINE_END
+
++static void __init v2m_dt_hdlcd_init(void)
++{
++ struct device_node *node;
++ int len, na, ns;
++ const __be32 *prop;
++ phys_addr_t fb_base, fb_size;
++
++ node = of_find_compatible_node(NULL, NULL, "arm,hdlcd");
++ if (!node)
++ return;
++
++ na = of_n_addr_cells(node);
++ ns = of_n_size_cells(node);
++
++ prop = of_get_property(node, "framebuffer", &len);
++ if (WARN_ON(!prop || len < (na + ns) * sizeof(*prop)))
++ return;
++
++ fb_base = of_read_number(prop, na);
++ fb_size = of_read_number(prop + na, ns);
++
++ if (WARN_ON(memblock_remove(fb_base, fb_size)))
++ return;
++};
++
+ static struct map_desc v2m_rs1_io_desc __initdata = {
+ .virtual = V2M_PERIPH,
+ .pfn = __phys_to_pfn(0x1c000000),
+@@ -421,6 +447,8 @@
+ }
+
+ versatile_sched_clock_init(vexpress_get_24mhz_clock_base(), 24000000);
++
++ v2m_dt_hdlcd_init();
+ }
+
+ static const struct of_device_id v2m_dt_bus_match[] __initconst = {
+diff -Nur linux-3.14.36/arch/arm/mach-zynq/common.c linux-openelec/arch/arm/mach-zynq/common.c
+--- linux-3.14.36/arch/arm/mach-zynq/common.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/mach-zynq/common.c 2015-05-06 12:05:43.000000000 -0500
+@@ -67,7 +67,7 @@
+ /*
+ * 64KB way size, 8-way associativity, parity disabled
+ */
+- l2x0_of_init(0x02060000, 0xF0F0FFFF);
++ l2x0_of_init(0x02000000, 0xf0ffffff);
+
+ of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
+
+diff -Nur linux-3.14.36/arch/arm/mm/cache-feroceon-l2.c linux-openelec/arch/arm/mm/cache-feroceon-l2.c
+--- linux-3.14.36/arch/arm/mm/cache-feroceon-l2.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/mm/cache-feroceon-l2.c 2015-05-06 12:05:43.000000000 -0500
+@@ -343,7 +343,6 @@
+ outer_cache.inv_range = feroceon_l2_inv_range;
+ outer_cache.clean_range = feroceon_l2_clean_range;
+ outer_cache.flush_range = feroceon_l2_flush_range;
+- outer_cache.inv_all = l2_inv_all;
+
+ enable_l2();
+
+diff -Nur linux-3.14.36/arch/arm/mm/cache-l2x0.c linux-openelec/arch/arm/mm/cache-l2x0.c
+--- linux-3.14.36/arch/arm/mm/cache-l2x0.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/mm/cache-l2x0.c 2015-05-06 12:05:43.000000000 -0500
+@@ -16,18 +16,33 @@
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
++#include <linux/cpu.h>
+ #include <linux/err.h>
+ #include <linux/init.h>
++#include <linux/smp.h>
+ #include <linux/spinlock.h>
+ #include <linux/io.h>
+ #include <linux/of.h>
+ #include <linux/of_address.h>
+
+ #include <asm/cacheflush.h>
++#include <asm/cp15.h>
++#include <asm/cputype.h>
+ #include <asm/hardware/cache-l2x0.h>
+ #include "cache-tauros3.h"
+ #include "cache-aurora-l2.h"
+
++struct l2c_init_data {
++ const char *type;
++ unsigned way_size_0;
++ unsigned num_lock;
++ void (*of_parse)(const struct device_node *, u32 *, u32 *);
++ void (*enable)(void __iomem *, u32, unsigned);
++ void (*fixup)(void __iomem *, u32, struct outer_cache_fns *);
++ void (*save)(void __iomem *);
++ struct outer_cache_fns outer_cache;
++};
++
+ #define CACHE_LINE_SIZE 32
+
+ static void __iomem *l2x0_base;
+@@ -36,96 +51,116 @@
+ static u32 l2x0_size;
+ static unsigned long sync_reg_offset = L2X0_CACHE_SYNC;
+
+-/* Aurora don't have the cache ID register available, so we have to
+- * pass it though the device tree */
+-static u32 cache_id_part_number_from_dt;
+-
+ struct l2x0_regs l2x0_saved_regs;
+
+-struct l2x0_of_data {
+- void (*setup)(const struct device_node *, u32 *, u32 *);
+- void (*save)(void);
+- struct outer_cache_fns outer_cache;
+-};
+-
+-static bool of_init = false;
+-
+-static inline void cache_wait_way(void __iomem *reg, unsigned long mask)
++/*
++ * Common code for all cache controllers.
++ */
++static inline void l2c_wait_mask(void __iomem *reg, unsigned long mask)
+ {
+ /* wait for cache operation by line or way to complete */
+ while (readl_relaxed(reg) & mask)
+ cpu_relax();
+ }
+
+-#ifdef CONFIG_CACHE_PL310
+-static inline void cache_wait(void __iomem *reg, unsigned long mask)
++/*
++ * By default, we write directly to secure registers. Platforms must
++ * override this if they are running non-secure.
++ */
++static void l2c_write_sec(unsigned long val, void __iomem *base, unsigned reg)
+ {
+- /* cache operations by line are atomic on PL310 */
++ if (val == readl_relaxed(base + reg))
++ return;
++ if (outer_cache.write_sec)
++ outer_cache.write_sec(val, reg);
++ else
++ writel_relaxed(val, base + reg);
+ }
+-#else
+-#define cache_wait cache_wait_way
+-#endif
+
+-static inline void cache_sync(void)
++/*
++ * This should only be called when we have a requirement that the
++ * register be written due to a work-around, as platforms running
++ * in non-secure mode may not be able to access this register.
++ */
++static inline void l2c_set_debug(void __iomem *base, unsigned long val)
+ {
+- void __iomem *base = l2x0_base;
+-
+- writel_relaxed(0, base + sync_reg_offset);
+- cache_wait(base + L2X0_CACHE_SYNC, 1);
++ l2c_write_sec(val, base, L2X0_DEBUG_CTRL);
+ }
+
+-static inline void l2x0_clean_line(unsigned long addr)
++static void __l2c_op_way(void __iomem *reg)
+ {
+- void __iomem *base = l2x0_base;
+- cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
+- writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA);
++ writel_relaxed(l2x0_way_mask, reg);
++ l2c_wait_mask(reg, l2x0_way_mask);
+ }
+
+-static inline void l2x0_inv_line(unsigned long addr)
++static inline void l2c_unlock(void __iomem *base, unsigned num)
+ {
+- void __iomem *base = l2x0_base;
+- cache_wait(base + L2X0_INV_LINE_PA, 1);
+- writel_relaxed(addr, base + L2X0_INV_LINE_PA);
++ unsigned i;
++
++ for (i = 0; i < num; i++) {
++ writel_relaxed(0, base + L2X0_LOCKDOWN_WAY_D_BASE +
++ i * L2X0_LOCKDOWN_STRIDE);
++ writel_relaxed(0, base + L2X0_LOCKDOWN_WAY_I_BASE +
++ i * L2X0_LOCKDOWN_STRIDE);
++ }
+ }
+
+-#if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915)
+-static inline void debug_writel(unsigned long val)
++/*
++ * Enable the L2 cache controller. This function must only be
++ * called when the cache controller is known to be disabled.
++ */
++static void l2c_enable(void __iomem *base, u32 aux, unsigned num_lock)
+ {
+- if (outer_cache.set_debug)
+- outer_cache.set_debug(val);
++ unsigned long flags;
++
++ l2c_write_sec(aux, base, L2X0_AUX_CTRL);
++
++ l2c_unlock(base, num_lock);
++
++ local_irq_save(flags);
++ __l2c_op_way(base + L2X0_INV_WAY);
++ writel_relaxed(0, base + sync_reg_offset);
++ l2c_wait_mask(base + sync_reg_offset, 1);
++ local_irq_restore(flags);
++
++ l2c_write_sec(L2X0_CTRL_EN, base, L2X0_CTRL);
+ }
+
+-static void pl310_set_debug(unsigned long val)
++static void l2c_disable(void)
+ {
+- writel_relaxed(val, l2x0_base + L2X0_DEBUG_CTRL);
++ void __iomem *base = l2x0_base;
++
++ outer_cache.flush_all();
++ l2c_write_sec(0, base, L2X0_CTRL);
++ dsb(st);
+ }
+-#else
+-/* Optimised out for non-errata case */
+-static inline void debug_writel(unsigned long val)
++
++#ifdef CONFIG_CACHE_PL310
++static inline void cache_wait(void __iomem *reg, unsigned long mask)
+ {
++ /* cache operations by line are atomic on PL310 */
+ }
+-
+-#define pl310_set_debug NULL
++#else
++#define cache_wait l2c_wait_mask
+ #endif
+
+-#ifdef CONFIG_PL310_ERRATA_588369
+-static inline void l2x0_flush_line(unsigned long addr)
++static inline void cache_sync(void)
+ {
+ void __iomem *base = l2x0_base;
+
+- /* Clean by PA followed by Invalidate by PA */
+- cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
+- writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA);
+- cache_wait(base + L2X0_INV_LINE_PA, 1);
+- writel_relaxed(addr, base + L2X0_INV_LINE_PA);
++ writel_relaxed(0, base + sync_reg_offset);
++ cache_wait(base + L2X0_CACHE_SYNC, 1);
+ }
+-#else
+
+-static inline void l2x0_flush_line(unsigned long addr)
++#if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915)
++static inline void debug_writel(unsigned long val)
++{
++ l2c_set_debug(l2x0_base, val);
++}
++#else
++/* Optimised out for non-errata case */
++static inline void debug_writel(unsigned long val)
+ {
+- void __iomem *base = l2x0_base;
+- cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
+- writel_relaxed(addr, base + L2X0_CLEAN_INV_LINE_PA);
+ }
+ #endif
+
+@@ -141,8 +176,7 @@
+ static void __l2x0_flush_all(void)
+ {
+ debug_writel(0x03);
+- writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_INV_WAY);
+- cache_wait_way(l2x0_base + L2X0_CLEAN_INV_WAY, l2x0_way_mask);
++ __l2c_op_way(l2x0_base + L2X0_CLEAN_INV_WAY);
+ cache_sync();
+ debug_writel(0x00);
+ }
+@@ -157,274 +191,882 @@
+ raw_spin_unlock_irqrestore(&l2x0_lock, flags);
+ }
+
+-static void l2x0_clean_all(void)
++static void l2x0_disable(void)
+ {
+ unsigned long flags;
+
+- /* clean all ways */
+ raw_spin_lock_irqsave(&l2x0_lock, flags);
+- writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_WAY);
+- cache_wait_way(l2x0_base + L2X0_CLEAN_WAY, l2x0_way_mask);
+- cache_sync();
++ __l2x0_flush_all();
++ l2c_write_sec(0, l2x0_base, L2X0_CTRL);
++ dsb(st);
+ raw_spin_unlock_irqrestore(&l2x0_lock, flags);
+ }
+
+-static void l2x0_inv_all(void)
++static void l2c_save(void __iomem *base)
+ {
+- unsigned long flags;
++ l2x0_saved_regs.aux_ctrl = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
++}
+
+- /* invalidate all ways */
+- raw_spin_lock_irqsave(&l2x0_lock, flags);
+- /* Invalidating when L2 is enabled is a nono */
+- BUG_ON(readl(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN);
+- writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_INV_WAY);
+- cache_wait_way(l2x0_base + L2X0_INV_WAY, l2x0_way_mask);
+- cache_sync();
+- raw_spin_unlock_irqrestore(&l2x0_lock, flags);
++/*
++ * L2C-210 specific code.
++ *
++ * The L2C-2x0 PA, set/way and sync operations are atomic, but we must
++ * ensure that no background operation is running. The way operations
++ * are all background tasks.
++ *
++ * While a background operation is in progress, any new operation is
++ * ignored (unspecified whether this causes an error.) Thankfully, not
++ * used on SMP.
++ *
++ * Never has a different sync register other than L2X0_CACHE_SYNC, but
++ * we use sync_reg_offset here so we can share some of this with L2C-310.
++ */
++static void __l2c210_cache_sync(void __iomem *base)
++{
++ writel_relaxed(0, base + sync_reg_offset);
+ }
+
+-static void l2x0_inv_range(unsigned long start, unsigned long end)
++static void __l2c210_op_pa_range(void __iomem *reg, unsigned long start,
++ unsigned long end)
++{
++ while (start < end) {
++ writel_relaxed(start, reg);
++ start += CACHE_LINE_SIZE;
++ }
++}
++
++static void l2c210_inv_range(unsigned long start, unsigned long end)
+ {
+ void __iomem *base = l2x0_base;
+- unsigned long flags;
+
+- raw_spin_lock_irqsave(&l2x0_lock, flags);
+ if (start & (CACHE_LINE_SIZE - 1)) {
+ start &= ~(CACHE_LINE_SIZE - 1);
+- debug_writel(0x03);
+- l2x0_flush_line(start);
+- debug_writel(0x00);
++ writel_relaxed(start, base + L2X0_CLEAN_INV_LINE_PA);
+ start += CACHE_LINE_SIZE;
+ }
+
+ if (end & (CACHE_LINE_SIZE - 1)) {
+ end &= ~(CACHE_LINE_SIZE - 1);
+- debug_writel(0x03);
+- l2x0_flush_line(end);
+- debug_writel(0x00);
++ writel_relaxed(end, base + L2X0_CLEAN_INV_LINE_PA);
+ }
+
++ __l2c210_op_pa_range(base + L2X0_INV_LINE_PA, start, end);
++ __l2c210_cache_sync(base);
++}
++
++static void l2c210_clean_range(unsigned long start, unsigned long end)
++{
++ void __iomem *base = l2x0_base;
++
++ start &= ~(CACHE_LINE_SIZE - 1);
++ __l2c210_op_pa_range(base + L2X0_CLEAN_LINE_PA, start, end);
++ __l2c210_cache_sync(base);
++}
++
++static void l2c210_flush_range(unsigned long start, unsigned long end)
++{
++ void __iomem *base = l2x0_base;
++
++ start &= ~(CACHE_LINE_SIZE - 1);
++ __l2c210_op_pa_range(base + L2X0_CLEAN_INV_LINE_PA, start, end);
++ __l2c210_cache_sync(base);
++}
++
++static void l2c210_flush_all(void)
++{
++ void __iomem *base = l2x0_base;
++
++ BUG_ON(!irqs_disabled());
++
++ __l2c_op_way(base + L2X0_CLEAN_INV_WAY);
++ __l2c210_cache_sync(base);
++}
++
++static void l2c210_sync(void)
++{
++ __l2c210_cache_sync(l2x0_base);
++}
++
++static void l2c210_resume(void)
++{
++ void __iomem *base = l2x0_base;
++
++ if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN))
++ l2c_enable(base, l2x0_saved_regs.aux_ctrl, 1);
++}
++
++static const struct l2c_init_data l2c210_data __initconst = {
++ .type = "L2C-210",
++ .way_size_0 = SZ_8K,
++ .num_lock = 1,
++ .enable = l2c_enable,
++ .save = l2c_save,
++ .outer_cache = {
++ .inv_range = l2c210_inv_range,
++ .clean_range = l2c210_clean_range,
++ .flush_range = l2c210_flush_range,
++ .flush_all = l2c210_flush_all,
++ .disable = l2c_disable,
++ .sync = l2c210_sync,
++ .resume = l2c210_resume,
++ },
++};
++
++/*
++ * L2C-220 specific code.
++ *
++ * All operations are background operations: they have to be waited for.
++ * Conflicting requests generate a slave error (which will cause an
++ * imprecise abort.) Never uses sync_reg_offset, so we hard-code the
++ * sync register here.
++ *
++ * However, we can re-use the l2c210_resume call.
++ */
++static inline void __l2c220_cache_sync(void __iomem *base)
++{
++ writel_relaxed(0, base + L2X0_CACHE_SYNC);
++ l2c_wait_mask(base + L2X0_CACHE_SYNC, 1);
++}
++
++static void l2c220_op_way(void __iomem *base, unsigned reg)
++{
++ unsigned long flags;
++
++ raw_spin_lock_irqsave(&l2x0_lock, flags);
++ __l2c_op_way(base + reg);
++ __l2c220_cache_sync(base);
++ raw_spin_unlock_irqrestore(&l2x0_lock, flags);
++}
++
++static unsigned long l2c220_op_pa_range(void __iomem *reg, unsigned long start,
++ unsigned long end, unsigned long flags)
++{
++ raw_spinlock_t *lock = &l2x0_lock;
++
+ while (start < end) {
+ unsigned long blk_end = start + min(end - start, 4096UL);
+
+ while (start < blk_end) {
+- l2x0_inv_line(start);
++ l2c_wait_mask(reg, 1);
++ writel_relaxed(start, reg);
+ start += CACHE_LINE_SIZE;
+ }
+
+ if (blk_end < end) {
+- raw_spin_unlock_irqrestore(&l2x0_lock, flags);
+- raw_spin_lock_irqsave(&l2x0_lock, flags);
++ raw_spin_unlock_irqrestore(lock, flags);
++ raw_spin_lock_irqsave(lock, flags);
+ }
+ }
+- cache_wait(base + L2X0_INV_LINE_PA, 1);
+- cache_sync();
+- raw_spin_unlock_irqrestore(&l2x0_lock, flags);
++
++ return flags;
+ }
+
+-static void l2x0_clean_range(unsigned long start, unsigned long end)
++static void l2c220_inv_range(unsigned long start, unsigned long end)
+ {
+ void __iomem *base = l2x0_base;
+ unsigned long flags;
+
+- if ((end - start) >= l2x0_size) {
+- l2x0_clean_all();
+- return;
+- }
+-
+ raw_spin_lock_irqsave(&l2x0_lock, flags);
+- start &= ~(CACHE_LINE_SIZE - 1);
+- while (start < end) {
+- unsigned long blk_end = start + min(end - start, 4096UL);
+-
+- while (start < blk_end) {
+- l2x0_clean_line(start);
++ if ((start | end) & (CACHE_LINE_SIZE - 1)) {
++ if (start & (CACHE_LINE_SIZE - 1)) {
++ start &= ~(CACHE_LINE_SIZE - 1);
++ writel_relaxed(start, base + L2X0_CLEAN_INV_LINE_PA);
+ start += CACHE_LINE_SIZE;
+ }
+
+- if (blk_end < end) {
+- raw_spin_unlock_irqrestore(&l2x0_lock, flags);
+- raw_spin_lock_irqsave(&l2x0_lock, flags);
++ if (end & (CACHE_LINE_SIZE - 1)) {
++ end &= ~(CACHE_LINE_SIZE - 1);
++ l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1);
++ writel_relaxed(end, base + L2X0_CLEAN_INV_LINE_PA);
+ }
+ }
+- cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
+- cache_sync();
++
++ flags = l2c220_op_pa_range(base + L2X0_INV_LINE_PA,
++ start, end, flags);
++ l2c_wait_mask(base + L2X0_INV_LINE_PA, 1);
++ __l2c220_cache_sync(base);
+ raw_spin_unlock_irqrestore(&l2x0_lock, flags);
+ }
+
+-static void l2x0_flush_range(unsigned long start, unsigned long end)
++static void l2c220_clean_range(unsigned long start, unsigned long end)
+ {
+ void __iomem *base = l2x0_base;
+ unsigned long flags;
+
++ start &= ~(CACHE_LINE_SIZE - 1);
+ if ((end - start) >= l2x0_size) {
+- l2x0_flush_all();
++ l2c220_op_way(base, L2X0_CLEAN_WAY);
+ return;
+ }
+
+ raw_spin_lock_irqsave(&l2x0_lock, flags);
++ flags = l2c220_op_pa_range(base + L2X0_CLEAN_LINE_PA,
++ start, end, flags);
++ l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1);
++ __l2c220_cache_sync(base);
++ raw_spin_unlock_irqrestore(&l2x0_lock, flags);
++}
++
++static void l2c220_flush_range(unsigned long start, unsigned long end)
++{
++ void __iomem *base = l2x0_base;
++ unsigned long flags;
++
+ start &= ~(CACHE_LINE_SIZE - 1);
++ if ((end - start) >= l2x0_size) {
++ l2c220_op_way(base, L2X0_CLEAN_INV_WAY);
++ return;
++ }
++
++ raw_spin_lock_irqsave(&l2x0_lock, flags);
++ flags = l2c220_op_pa_range(base + L2X0_CLEAN_INV_LINE_PA,
++ start, end, flags);
++ l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1);
++ __l2c220_cache_sync(base);
++ raw_spin_unlock_irqrestore(&l2x0_lock, flags);
++}
++
++static void l2c220_flush_all(void)
++{
++ l2c220_op_way(l2x0_base, L2X0_CLEAN_INV_WAY);
++}
++
++static void l2c220_sync(void)
++{
++ unsigned long flags;
++
++ raw_spin_lock_irqsave(&l2x0_lock, flags);
++ __l2c220_cache_sync(l2x0_base);
++ raw_spin_unlock_irqrestore(&l2x0_lock, flags);
++}
++
++static void l2c220_enable(void __iomem *base, u32 aux, unsigned num_lock)
++{
++ /*
++ * Always enable non-secure access to the lockdown registers -
++ * we write to them as part of the L2C enable sequence so they
++ * need to be accessible.
++ */
++ aux |= L220_AUX_CTRL_NS_LOCKDOWN;
++
++ l2c_enable(base, aux, num_lock);
++}
++
++static const struct l2c_init_data l2c220_data = {
++ .type = "L2C-220",
++ .way_size_0 = SZ_8K,
++ .num_lock = 1,
++ .enable = l2c220_enable,
++ .save = l2c_save,
++ .outer_cache = {
++ .inv_range = l2c220_inv_range,
++ .clean_range = l2c220_clean_range,
++ .flush_range = l2c220_flush_range,
++ .flush_all = l2c220_flush_all,
++ .disable = l2c_disable,
++ .sync = l2c220_sync,
++ .resume = l2c210_resume,
++ },
++};
++
++/*
++ * L2C-310 specific code.
++ *
++ * Very similar to L2C-210, the PA, set/way and sync operations are atomic,
++ * and the way operations are all background tasks. However, issuing an
++ * operation while a background operation is in progress results in a
++ * SLVERR response. We can reuse:
++ *
++ * __l2c210_cache_sync (using sync_reg_offset)
++ * l2c210_sync
++ * l2c210_inv_range (if 588369 is not applicable)
++ * l2c210_clean_range
++ * l2c210_flush_range (if 588369 is not applicable)
++ * l2c210_flush_all (if 727915 is not applicable)
++ *
++ * Errata:
++ * 588369: PL310 R0P0->R1P0, fixed R2P0.
++ * Affects: all clean+invalidate operations
++ * clean and invalidate skips the invalidate step, so we need to issue
++ * separate operations. We also require the above debug workaround
++ * enclosing this code fragment on affected parts. On unaffected parts,
++ * we must not use this workaround without the debug register writes
++ * to avoid exposing a problem similar to 727915.
++ *
++ * 727915: PL310 R2P0->R3P0, fixed R3P1.
++ * Affects: clean+invalidate by way
++ * clean and invalidate by way runs in the background, and a store can
++ * hit the line between the clean operation and invalidate operation,
++ * resulting in the store being lost.
++ *
++ * 752271: PL310 R3P0->R3P1-50REL0, fixed R3P2.
++ * Affects: 8x64-bit (double fill) line fetches
++ * double fill line fetches can fail to cause dirty data to be evicted
++ * from the cache before the new data overwrites the second line.
++ *
++ * 753970: PL310 R3P0, fixed R3P1.
++ * Affects: sync
++ * prevents merging writes after the sync operation, until another L2C
++ * operation is performed (or a number of other conditions.)
++ *
++ * 769419: PL310 R0P0->R3P1, fixed R3P2.
++ * Affects: store buffer
++ * store buffer is not automatically drained.
++ */
++static void l2c310_inv_range_erratum(unsigned long start, unsigned long end)
++{
++ void __iomem *base = l2x0_base;
++
++ if ((start | end) & (CACHE_LINE_SIZE - 1)) {
++ unsigned long flags;
++
++ /* Erratum 588369 for both clean+invalidate operations */
++ raw_spin_lock_irqsave(&l2x0_lock, flags);
++ l2c_set_debug(base, 0x03);
++
++ if (start & (CACHE_LINE_SIZE - 1)) {
++ start &= ~(CACHE_LINE_SIZE - 1);
++ writel_relaxed(start, base + L2X0_CLEAN_LINE_PA);
++ writel_relaxed(start, base + L2X0_INV_LINE_PA);
++ start += CACHE_LINE_SIZE;
++ }
++
++ if (end & (CACHE_LINE_SIZE - 1)) {
++ end &= ~(CACHE_LINE_SIZE - 1);
++ writel_relaxed(end, base + L2X0_CLEAN_LINE_PA);
++ writel_relaxed(end, base + L2X0_INV_LINE_PA);
++ }
++
++ l2c_set_debug(base, 0x00);
++ raw_spin_unlock_irqrestore(&l2x0_lock, flags);
++ }
++
++ __l2c210_op_pa_range(base + L2X0_INV_LINE_PA, start, end);
++ __l2c210_cache_sync(base);
++}
++
++static void l2c310_flush_range_erratum(unsigned long start, unsigned long end)
++{
++ raw_spinlock_t *lock = &l2x0_lock;
++ unsigned long flags;
++ void __iomem *base = l2x0_base;
++
++ raw_spin_lock_irqsave(lock, flags);
+ while (start < end) {
+ unsigned long blk_end = start + min(end - start, 4096UL);
+
+- debug_writel(0x03);
++ l2c_set_debug(base, 0x03);
+ while (start < blk_end) {
+- l2x0_flush_line(start);
++ writel_relaxed(start, base + L2X0_CLEAN_LINE_PA);
++ writel_relaxed(start, base + L2X0_INV_LINE_PA);
+ start += CACHE_LINE_SIZE;
+ }
+- debug_writel(0x00);
++ l2c_set_debug(base, 0x00);
+
+ if (blk_end < end) {
+- raw_spin_unlock_irqrestore(&l2x0_lock, flags);
+- raw_spin_lock_irqsave(&l2x0_lock, flags);
++ raw_spin_unlock_irqrestore(lock, flags);
++ raw_spin_lock_irqsave(lock, flags);
+ }
+ }
+- cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
+- cache_sync();
+- raw_spin_unlock_irqrestore(&l2x0_lock, flags);
++ raw_spin_unlock_irqrestore(lock, flags);
++ __l2c210_cache_sync(base);
+ }
+
+-static void l2x0_disable(void)
++static void l2c310_flush_all_erratum(void)
+ {
++ void __iomem *base = l2x0_base;
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&l2x0_lock, flags);
+- __l2x0_flush_all();
+- writel_relaxed(0, l2x0_base + L2X0_CTRL);
+- dsb(st);
++ l2c_set_debug(base, 0x03);
++ __l2c_op_way(base + L2X0_CLEAN_INV_WAY);
++ l2c_set_debug(base, 0x00);
++ __l2c210_cache_sync(base);
+ raw_spin_unlock_irqrestore(&l2x0_lock, flags);
+ }
+
+-static void l2x0_unlock(u32 cache_id)
++static void __init l2c310_save(void __iomem *base)
+ {
+- int lockregs;
+- int i;
++ unsigned revision;
+
+- switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
+- case L2X0_CACHE_ID_PART_L310:
+- lockregs = 8;
+- break;
+- case AURORA_CACHE_ID:
+- lockregs = 4;
++ l2c_save(base);
++
++ l2x0_saved_regs.tag_latency = readl_relaxed(base +
++ L310_TAG_LATENCY_CTRL);
++ l2x0_saved_regs.data_latency = readl_relaxed(base +
++ L310_DATA_LATENCY_CTRL);
++ l2x0_saved_regs.filter_end = readl_relaxed(base +
++ L310_ADDR_FILTER_END);
++ l2x0_saved_regs.filter_start = readl_relaxed(base +
++ L310_ADDR_FILTER_START);
++
++ revision = readl_relaxed(base + L2X0_CACHE_ID) &
++ L2X0_CACHE_ID_RTL_MASK;
++
++ /* From r2p0, there is Prefetch offset/control register */
++ if (revision >= L310_CACHE_ID_RTL_R2P0)
++ l2x0_saved_regs.prefetch_ctrl = readl_relaxed(base +
++ L310_PREFETCH_CTRL);
++
++ /* From r3p0, there is Power control register */
++ if (revision >= L310_CACHE_ID_RTL_R3P0)
++ l2x0_saved_regs.pwr_ctrl = readl_relaxed(base +
++ L310_POWER_CTRL);
++}
++
++static void l2c310_resume(void)
++{
++ void __iomem *base = l2x0_base;
++
++ if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN)) {
++ unsigned revision;
++
++ /* restore pl310 setup */
++ writel_relaxed(l2x0_saved_regs.tag_latency,
++ base + L310_TAG_LATENCY_CTRL);
++ writel_relaxed(l2x0_saved_regs.data_latency,
++ base + L310_DATA_LATENCY_CTRL);
++ writel_relaxed(l2x0_saved_regs.filter_end,
++ base + L310_ADDR_FILTER_END);
++ writel_relaxed(l2x0_saved_regs.filter_start,
++ base + L310_ADDR_FILTER_START);
++
++ revision = readl_relaxed(base + L2X0_CACHE_ID) &
++ L2X0_CACHE_ID_RTL_MASK;
++
++ if (revision >= L310_CACHE_ID_RTL_R2P0)
++ l2c_write_sec(l2x0_saved_regs.prefetch_ctrl, base,
++ L310_PREFETCH_CTRL);
++ if (revision >= L310_CACHE_ID_RTL_R3P0)
++ l2c_write_sec(l2x0_saved_regs.pwr_ctrl, base,
++ L310_POWER_CTRL);
++
++ l2c_enable(base, l2x0_saved_regs.aux_ctrl, 8);
++
++ /* Re-enable full-line-of-zeros for Cortex-A9 */
++ if (l2x0_saved_regs.aux_ctrl & L310_AUX_CTRL_FULL_LINE_ZERO)
++ set_auxcr(get_auxcr() | BIT(3) | BIT(2) | BIT(1));
++ }
++}
++
++static int l2c310_cpu_enable_flz(struct notifier_block *nb, unsigned long act, void *data)
++{
++ switch (act & ~CPU_TASKS_FROZEN) {
++ case CPU_STARTING:
++ set_auxcr(get_auxcr() | BIT(3) | BIT(2) | BIT(1));
+ break;
+- default:
+- /* L210 and unknown types */
+- lockregs = 1;
++ case CPU_DYING:
++ set_auxcr(get_auxcr() & ~(BIT(3) | BIT(2) | BIT(1)));
+ break;
+ }
++ return NOTIFY_OK;
++}
+
+- for (i = 0; i < lockregs; i++) {
+- writel_relaxed(0x0, l2x0_base + L2X0_LOCKDOWN_WAY_D_BASE +
+- i * L2X0_LOCKDOWN_STRIDE);
+- writel_relaxed(0x0, l2x0_base + L2X0_LOCKDOWN_WAY_I_BASE +
+- i * L2X0_LOCKDOWN_STRIDE);
++static void __init l2c310_enable(void __iomem *base, u32 aux, unsigned num_lock)
++{
++ unsigned rev = readl_relaxed(base + L2X0_CACHE_ID) & L2X0_CACHE_ID_PART_MASK;
++ bool cortex_a9 = read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A9;
++
++ if (rev >= L310_CACHE_ID_RTL_R2P0) {
++ if (cortex_a9) {
++ aux |= L310_AUX_CTRL_EARLY_BRESP;
++ pr_info("L2C-310 enabling early BRESP for Cortex-A9\n");
++ } else if (aux & L310_AUX_CTRL_EARLY_BRESP) {
++ pr_warn("L2C-310 early BRESP only supported with Cortex-A9\n");
++ aux &= ~L310_AUX_CTRL_EARLY_BRESP;
++ }
++ }
++
++ if (cortex_a9) {
++ u32 aux_cur = readl_relaxed(base + L2X0_AUX_CTRL);
++ u32 acr = get_auxcr();
++
++ pr_debug("Cortex-A9 ACR=0x%08x\n", acr);
++
++ if (acr & BIT(3) && !(aux_cur & L310_AUX_CTRL_FULL_LINE_ZERO))
++ pr_err("L2C-310: full line of zeros enabled in Cortex-A9 but not L2C-310 - invalid\n");
++
++ if (aux & L310_AUX_CTRL_FULL_LINE_ZERO && !(acr & BIT(3)))
++ pr_err("L2C-310: enabling full line of zeros but not enabled in Cortex-A9\n");
++
++ if (!(aux & L310_AUX_CTRL_FULL_LINE_ZERO) && !outer_cache.write_sec) {
++ aux |= L310_AUX_CTRL_FULL_LINE_ZERO;
++ pr_info("L2C-310 full line of zeros enabled for Cortex-A9\n");
++ }
++ } else if (aux & (L310_AUX_CTRL_FULL_LINE_ZERO | L310_AUX_CTRL_EARLY_BRESP)) {
++ pr_err("L2C-310: disabling Cortex-A9 specific feature bits\n");
++ aux &= ~(L310_AUX_CTRL_FULL_LINE_ZERO | L310_AUX_CTRL_EARLY_BRESP);
++ }
++
++ if (aux & (L310_AUX_CTRL_DATA_PREFETCH | L310_AUX_CTRL_INSTR_PREFETCH)) {
++ u32 prefetch = readl_relaxed(base + L310_PREFETCH_CTRL);
++
++ pr_info("L2C-310 %s%s prefetch enabled, offset %u lines\n",
++ aux & L310_AUX_CTRL_INSTR_PREFETCH ? "I" : "",
++ aux & L310_AUX_CTRL_DATA_PREFETCH ? "D" : "",
++ 1 + (prefetch & L310_PREFETCH_CTRL_OFFSET_MASK));
++ }
++
++ /* r3p0 or later has power control register */
++ if (rev >= L310_CACHE_ID_RTL_R3P0) {
++ u32 power_ctrl;
++
++ l2c_write_sec(L310_DYNAMIC_CLK_GATING_EN | L310_STNDBY_MODE_EN,
++ base, L310_POWER_CTRL);
++ power_ctrl = readl_relaxed(base + L310_POWER_CTRL);
++ pr_info("L2C-310 dynamic clock gating %sabled, standby mode %sabled\n",
++ power_ctrl & L310_DYNAMIC_CLK_GATING_EN ? "en" : "dis",
++ power_ctrl & L310_STNDBY_MODE_EN ? "en" : "dis");
++ }
++
++ /*
++ * Always enable non-secure access to the lockdown registers -
++ * we write to them as part of the L2C enable sequence so they
++ * need to be accessible.
++ */
++ aux |= L310_AUX_CTRL_NS_LOCKDOWN;
++
++ l2c_enable(base, aux, num_lock);
++
++ if (aux & L310_AUX_CTRL_FULL_LINE_ZERO) {
++ set_auxcr(get_auxcr() | BIT(3) | BIT(2) | BIT(1));
++ cpu_notifier(l2c310_cpu_enable_flz, 0);
+ }
+ }
+
+-void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
++static void __init l2c310_fixup(void __iomem *base, u32 cache_id,
++ struct outer_cache_fns *fns)
+ {
+- u32 aux;
+- u32 cache_id;
+- u32 way_size = 0;
+- int ways;
+- int way_size_shift = L2X0_WAY_SIZE_SHIFT;
+- const char *type;
++ unsigned revision = cache_id & L2X0_CACHE_ID_RTL_MASK;
++ const char *errata[8];
++ unsigned n = 0;
+
+- l2x0_base = base;
+- if (cache_id_part_number_from_dt)
+- cache_id = cache_id_part_number_from_dt;
+- else
+- cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID);
+- aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
++ if (IS_ENABLED(CONFIG_PL310_ERRATA_588369) &&
++ revision < L310_CACHE_ID_RTL_R2P0 &&
++ /* For bcm compatibility */
++ fns->inv_range == l2c210_inv_range) {
++ fns->inv_range = l2c310_inv_range_erratum;
++ fns->flush_range = l2c310_flush_range_erratum;
++ errata[n++] = "588369";
++ }
++
++ if (IS_ENABLED(CONFIG_PL310_ERRATA_727915) &&
++ revision >= L310_CACHE_ID_RTL_R2P0 &&
++ revision < L310_CACHE_ID_RTL_R3P1) {
++ fns->flush_all = l2c310_flush_all_erratum;
++ errata[n++] = "727915";
++ }
++
++ if (revision >= L310_CACHE_ID_RTL_R3P0 &&
++ revision < L310_CACHE_ID_RTL_R3P2) {
++ u32 val = readl_relaxed(base + L310_PREFETCH_CTRL);
++ /* I don't think bit23 is required here... but iMX6 does so */
++ if (val & (BIT(30) | BIT(23))) {
++ val &= ~(BIT(30) | BIT(23));
++ l2c_write_sec(val, base, L310_PREFETCH_CTRL);
++ errata[n++] = "752271";
++ }
++ }
++
++ if (IS_ENABLED(CONFIG_PL310_ERRATA_753970) &&
++ revision == L310_CACHE_ID_RTL_R3P0) {
++ sync_reg_offset = L2X0_DUMMY_REG;
++ errata[n++] = "753970";
++ }
++
++ if (IS_ENABLED(CONFIG_PL310_ERRATA_769419))
++ errata[n++] = "769419";
++
++ if (n) {
++ unsigned i;
++
++ pr_info("L2C-310 errat%s", n > 1 ? "a" : "um");
++ for (i = 0; i < n; i++)
++ pr_cont(" %s", errata[i]);
++ pr_cont(" enabled\n");
++ }
++}
++
++static void l2c310_disable(void)
++{
++ /*
++ * If full-line-of-zeros is enabled, we must first disable it in the
++ * Cortex-A9 auxiliary control register before disabling the L2 cache.
++ */
++ if (l2x0_saved_regs.aux_ctrl & L310_AUX_CTRL_FULL_LINE_ZERO)
++ set_auxcr(get_auxcr() & ~(BIT(3) | BIT(2) | BIT(1)));
+
++ l2c_disable();
++}
++
++static const struct l2c_init_data l2c310_init_fns __initconst = {
++ .type = "L2C-310",
++ .way_size_0 = SZ_8K,
++ .num_lock = 8,
++ .enable = l2c310_enable,
++ .fixup = l2c310_fixup,
++ .save = l2c310_save,
++ .outer_cache = {
++ .inv_range = l2c210_inv_range,
++ .clean_range = l2c210_clean_range,
++ .flush_range = l2c210_flush_range,
++ .flush_all = l2c210_flush_all,
++ .disable = l2c310_disable,
++ .sync = l2c210_sync,
++ .resume = l2c310_resume,
++ },
++};
++
++static void __init __l2c_init(const struct l2c_init_data *data,
++ u32 aux_val, u32 aux_mask, u32 cache_id)
++{
++ struct outer_cache_fns fns;
++ unsigned way_size_bits, ways;
++ u32 aux, old_aux;
++
++ /*
++ * Sanity check the aux values. aux_mask is the bits we preserve
++ * from reading the hardware register, and aux_val is the bits we
++ * set.
++ */
++ if (aux_val & aux_mask)
++ pr_alert("L2C: platform provided aux values permit register corruption.\n");
++
++ old_aux = aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
+ aux &= aux_mask;
+ aux |= aux_val;
+
++ if (old_aux != aux)
++ pr_warn("L2C: DT/platform modifies aux control register: 0x%08x -> 0x%08x\n",
++ old_aux, aux);
++
+ /* Determine the number of ways */
+ switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
+ case L2X0_CACHE_ID_PART_L310:
++ if ((aux_val | ~aux_mask) & (L2C_AUX_CTRL_WAY_SIZE_MASK | L310_AUX_CTRL_ASSOCIATIVITY_16))
++ pr_warn("L2C: DT/platform tries to modify or specify cache size\n");
+ if (aux & (1 << 16))
+ ways = 16;
+ else
+ ways = 8;
+- type = "L310";
+-#ifdef CONFIG_PL310_ERRATA_753970
+- /* Unmapped register. */
+- sync_reg_offset = L2X0_DUMMY_REG;
+-#endif
+- if ((cache_id & L2X0_CACHE_ID_RTL_MASK) <= L2X0_CACHE_ID_RTL_R3P0)
+- outer_cache.set_debug = pl310_set_debug;
+ break;
++
+ case L2X0_CACHE_ID_PART_L210:
++ case L2X0_CACHE_ID_PART_L220:
+ ways = (aux >> 13) & 0xf;
+- type = "L210";
+ break;
+
+ case AURORA_CACHE_ID:
+- sync_reg_offset = AURORA_SYNC_REG;
+ ways = (aux >> 13) & 0xf;
+ ways = 2 << ((ways + 1) >> 2);
+- way_size_shift = AURORA_WAY_SIZE_SHIFT;
+- type = "Aurora";
+ break;
++
+ default:
+ /* Assume unknown chips have 8 ways */
+ ways = 8;
+- type = "L2x0 series";
+ break;
+ }
+
+ l2x0_way_mask = (1 << ways) - 1;
+
+ /*
+- * L2 cache Size = Way size * Number of ways
++ * way_size_0 is the size that a way_size value of zero would be
++ * given the calculation: way_size = way_size_0 << way_size_bits.
++ * So, if way_size_bits=0 is reserved, but way_size_bits=1 is 16k,
++ * then way_size_0 would be 8k.
++ *
++ * L2 cache size = number of ways * way size.
++ */
++ way_size_bits = (aux & L2C_AUX_CTRL_WAY_SIZE_MASK) >>
++ L2C_AUX_CTRL_WAY_SIZE_SHIFT;
++ l2x0_size = ways * (data->way_size_0 << way_size_bits);
++
++ fns = data->outer_cache;
++ fns.write_sec = outer_cache.write_sec;
++ if (data->fixup)
++ data->fixup(l2x0_base, cache_id, &fns);
++
++ /*
++ * Check if l2x0 controller is already enabled. If we are booting
++ * in non-secure mode accessing the below registers will fault.
+ */
+- way_size = (aux & L2X0_AUX_CTRL_WAY_SIZE_MASK) >> 17;
+- way_size = 1 << (way_size + way_size_shift);
++ if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN))
++ data->enable(l2x0_base, aux, data->num_lock);
+
+- l2x0_size = ways * way_size * SZ_1K;
++ outer_cache = fns;
+
+ /*
+- * Check if l2x0 controller is already enabled.
+- * If you are booting from non-secure mode
+- * accessing the below registers will fault.
++ * It is strange to save the register state before initialisation,
++ * but hey, this is what the DT implementations decided to do.
+ */
+- if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
+- /* Make sure that I&D is not locked down when starting */
+- l2x0_unlock(cache_id);
++ if (data->save)
++ data->save(l2x0_base);
++
++ /* Re-read it in case some bits are reserved. */
++ aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
++
++ pr_info("%s cache controller enabled, %d ways, %d kB\n",
++ data->type, ways, l2x0_size >> 10);
++ pr_info("%s: CACHE_ID 0x%08x, AUX_CTRL 0x%08x\n",
++ data->type, cache_id, aux);
++}
+
+- /* l2x0 controller is disabled */
+- writel_relaxed(aux, l2x0_base + L2X0_AUX_CTRL);
++void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
++{
++ const struct l2c_init_data *data;
++ u32 cache_id;
+
+- l2x0_inv_all();
++ l2x0_base = base;
+
+- /* enable L2X0 */
+- writel_relaxed(L2X0_CTRL_EN, l2x0_base + L2X0_CTRL);
++ cache_id = readl_relaxed(base + L2X0_CACHE_ID);
++
++ switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
++ default:
++ case L2X0_CACHE_ID_PART_L210:
++ data = &l2c210_data;
++ break;
++
++ case L2X0_CACHE_ID_PART_L220:
++ data = &l2c220_data;
++ break;
++
++ case L2X0_CACHE_ID_PART_L310:
++ data = &l2c310_init_fns;
++ break;
+ }
+
+- /* Re-read it in case some bits are reserved. */
+- aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
++ __l2c_init(data, aux_val, aux_mask, cache_id);
++}
++
++#ifdef CONFIG_OF
++static int l2_wt_override;
++
++/* Aurora don't have the cache ID register available, so we have to
++ * pass it though the device tree */
++static u32 cache_id_part_number_from_dt;
++
++static void __init l2x0_of_parse(const struct device_node *np,
++ u32 *aux_val, u32 *aux_mask)
++{
++ u32 data[2] = { 0, 0 };
++ u32 tag = 0;
++ u32 dirty = 0;
++ u32 val = 0, mask = 0;
++
++ of_property_read_u32(np, "arm,tag-latency", &tag);
++ if (tag) {
++ mask |= L2X0_AUX_CTRL_TAG_LATENCY_MASK;
++ val |= (tag - 1) << L2X0_AUX_CTRL_TAG_LATENCY_SHIFT;
++ }
++
++ of_property_read_u32_array(np, "arm,data-latency",
++ data, ARRAY_SIZE(data));
++ if (data[0] && data[1]) {
++ mask |= L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK |
++ L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK;
++ val |= ((data[0] - 1) << L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT) |
++ ((data[1] - 1) << L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT);
++ }
++
++ of_property_read_u32(np, "arm,dirty-latency", &dirty);
++ if (dirty) {
++ mask |= L2X0_AUX_CTRL_DIRTY_LATENCY_MASK;
++ val |= (dirty - 1) << L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT;
++ }
+
+- /* Save the value for resuming. */
+- l2x0_saved_regs.aux_ctrl = aux;
++ *aux_val &= ~mask;
++ *aux_val |= val;
++ *aux_mask &= ~mask;
++}
++
++static const struct l2c_init_data of_l2c210_data __initconst = {
++ .type = "L2C-210",
++ .way_size_0 = SZ_8K,
++ .num_lock = 1,
++ .of_parse = l2x0_of_parse,
++ .enable = l2c_enable,
++ .save = l2c_save,
++ .outer_cache = {
++ .inv_range = l2c210_inv_range,
++ .clean_range = l2c210_clean_range,
++ .flush_range = l2c210_flush_range,
++ .flush_all = l2c210_flush_all,
++ .disable = l2c_disable,
++ .sync = l2c210_sync,
++ .resume = l2c210_resume,
++ },
++};
++
++static const struct l2c_init_data of_l2c220_data __initconst = {
++ .type = "L2C-220",
++ .way_size_0 = SZ_8K,
++ .num_lock = 1,
++ .of_parse = l2x0_of_parse,
++ .enable = l2c220_enable,
++ .save = l2c_save,
++ .outer_cache = {
++ .inv_range = l2c220_inv_range,
++ .clean_range = l2c220_clean_range,
++ .flush_range = l2c220_flush_range,
++ .flush_all = l2c220_flush_all,
++ .disable = l2c_disable,
++ .sync = l2c220_sync,
++ .resume = l2c210_resume,
++ },
++};
++
++static void __init l2c310_of_parse(const struct device_node *np,
++ u32 *aux_val, u32 *aux_mask)
++{
++ u32 data[3] = { 0, 0, 0 };
++ u32 tag[3] = { 0, 0, 0 };
++ u32 filter[2] = { 0, 0 };
++
++ of_property_read_u32_array(np, "arm,tag-latency", tag, ARRAY_SIZE(tag));
++ if (tag[0] && tag[1] && tag[2])
++ writel_relaxed(
++ L310_LATENCY_CTRL_RD(tag[0] - 1) |
++ L310_LATENCY_CTRL_WR(tag[1] - 1) |
++ L310_LATENCY_CTRL_SETUP(tag[2] - 1),
++ l2x0_base + L310_TAG_LATENCY_CTRL);
++
++ of_property_read_u32_array(np, "arm,data-latency",
++ data, ARRAY_SIZE(data));
++ if (data[0] && data[1] && data[2])
++ writel_relaxed(
++ L310_LATENCY_CTRL_RD(data[0] - 1) |
++ L310_LATENCY_CTRL_WR(data[1] - 1) |
++ L310_LATENCY_CTRL_SETUP(data[2] - 1),
++ l2x0_base + L310_DATA_LATENCY_CTRL);
+
+- if (!of_init) {
+- outer_cache.inv_range = l2x0_inv_range;
+- outer_cache.clean_range = l2x0_clean_range;
+- outer_cache.flush_range = l2x0_flush_range;
+- outer_cache.sync = l2x0_cache_sync;
+- outer_cache.flush_all = l2x0_flush_all;
+- outer_cache.inv_all = l2x0_inv_all;
+- outer_cache.disable = l2x0_disable;
+- }
+-
+- pr_info("%s cache controller enabled\n", type);
+- pr_info("l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d kB\n",
+- ways, cache_id, aux, l2x0_size >> 10);
++ of_property_read_u32_array(np, "arm,filter-ranges",
++ filter, ARRAY_SIZE(filter));
++ if (filter[1]) {
++ writel_relaxed(ALIGN(filter[0] + filter[1], SZ_1M),
++ l2x0_base + L310_ADDR_FILTER_END);
++ writel_relaxed((filter[0] & ~(SZ_1M - 1)) | L310_ADDR_FILTER_EN,
++ l2x0_base + L310_ADDR_FILTER_START);
++ }
+ }
+
+-#ifdef CONFIG_OF
+-static int l2_wt_override;
++static const struct l2c_init_data of_l2c310_data __initconst = {
++ .type = "L2C-310",
++ .way_size_0 = SZ_8K,
++ .num_lock = 8,
++ .of_parse = l2c310_of_parse,
++ .enable = l2c310_enable,
++ .fixup = l2c310_fixup,
++ .save = l2c310_save,
++ .outer_cache = {
++ .inv_range = l2c210_inv_range,
++ .clean_range = l2c210_clean_range,
++ .flush_range = l2c210_flush_range,
++ .flush_all = l2c210_flush_all,
++ .disable = l2c310_disable,
++ .sync = l2c210_sync,
++ .resume = l2c310_resume,
++ },
++};
+
+ /*
+ * Note that the end addresses passed to Linux primitives are
+@@ -524,6 +1166,100 @@
+ }
+ }
+
++static void aurora_save(void __iomem *base)
++{
++ l2x0_saved_regs.ctrl = readl_relaxed(base + L2X0_CTRL);
++ l2x0_saved_regs.aux_ctrl = readl_relaxed(base + L2X0_AUX_CTRL);
++}
++
++static void aurora_resume(void)
++{
++ void __iomem *base = l2x0_base;
++
++ if (!(readl(base + L2X0_CTRL) & L2X0_CTRL_EN)) {
++ writel_relaxed(l2x0_saved_regs.aux_ctrl, base + L2X0_AUX_CTRL);
++ writel_relaxed(l2x0_saved_regs.ctrl, base + L2X0_CTRL);
++ }
++}
++
++/*
++ * For Aurora cache in no outer mode, enable via the CP15 coprocessor
++ * broadcasting of cache commands to L2.
++ */
++static void __init aurora_enable_no_outer(void __iomem *base, u32 aux,
++ unsigned num_lock)
++{
++ u32 u;
++
++ asm volatile("mrc p15, 1, %0, c15, c2, 0" : "=r" (u));
++ u |= AURORA_CTRL_FW; /* Set the FW bit */
++ asm volatile("mcr p15, 1, %0, c15, c2, 0" : : "r" (u));
++
++ isb();
++
++ l2c_enable(base, aux, num_lock);
++}
++
++static void __init aurora_fixup(void __iomem *base, u32 cache_id,
++ struct outer_cache_fns *fns)
++{
++ sync_reg_offset = AURORA_SYNC_REG;
++}
++
++static void __init aurora_of_parse(const struct device_node *np,
++ u32 *aux_val, u32 *aux_mask)
++{
++ u32 val = AURORA_ACR_REPLACEMENT_TYPE_SEMIPLRU;
++ u32 mask = AURORA_ACR_REPLACEMENT_MASK;
++
++ of_property_read_u32(np, "cache-id-part",
++ &cache_id_part_number_from_dt);
++
++ /* Determine and save the write policy */
++ l2_wt_override = of_property_read_bool(np, "wt-override");
++
++ if (l2_wt_override) {
++ val |= AURORA_ACR_FORCE_WRITE_THRO_POLICY;
++ mask |= AURORA_ACR_FORCE_WRITE_POLICY_MASK;
++ }
++
++ *aux_val &= ~mask;
++ *aux_val |= val;
++ *aux_mask &= ~mask;
++}
++
++static const struct l2c_init_data of_aurora_with_outer_data __initconst = {
++ .type = "Aurora",
++ .way_size_0 = SZ_4K,
++ .num_lock = 4,
++ .of_parse = aurora_of_parse,
++ .enable = l2c_enable,
++ .fixup = aurora_fixup,
++ .save = aurora_save,
++ .outer_cache = {
++ .inv_range = aurora_inv_range,
++ .clean_range = aurora_clean_range,
++ .flush_range = aurora_flush_range,
++ .flush_all = l2x0_flush_all,
++ .disable = l2x0_disable,
++ .sync = l2x0_cache_sync,
++ .resume = aurora_resume,
++ },
++};
++
++static const struct l2c_init_data of_aurora_no_outer_data __initconst = {
++ .type = "Aurora",
++ .way_size_0 = SZ_4K,
++ .num_lock = 4,
++ .of_parse = aurora_of_parse,
++ .enable = aurora_enable_no_outer,
++ .fixup = aurora_fixup,
++ .save = aurora_save,
++ .outer_cache = {
++ .resume = aurora_resume,
++ },
++};
++
+ /*
+ * For certain Broadcom SoCs, depending on the address range, different offsets
+ * need to be added to the address before passing it to L2 for
+@@ -588,16 +1324,16 @@
+
+ /* normal case, no cross section between start and end */
+ if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
+- l2x0_inv_range(new_start, new_end);
++ l2c210_inv_range(new_start, new_end);
+ return;
+ }
+
+ /* They cross sections, so it can only be a cross from section
+ * 2 to section 3
+ */
+- l2x0_inv_range(new_start,
++ l2c210_inv_range(new_start,
+ bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
+- l2x0_inv_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
++ l2c210_inv_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
+ new_end);
+ }
+
+@@ -610,26 +1346,21 @@
+ if (unlikely(end <= start))
+ return;
+
+- if ((end - start) >= l2x0_size) {
+- l2x0_clean_all();
+- return;
+- }
+-
+ new_start = bcm_l2_phys_addr(start);
+ new_end = bcm_l2_phys_addr(end);
+
+ /* normal case, no cross section between start and end */
+ if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
+- l2x0_clean_range(new_start, new_end);
++ l2c210_clean_range(new_start, new_end);
+ return;
+ }
+
+ /* They cross sections, so it can only be a cross from section
+ * 2 to section 3
+ */
+- l2x0_clean_range(new_start,
++ l2c210_clean_range(new_start,
+ bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
+- l2x0_clean_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
++ l2c210_clean_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
+ new_end);
+ }
+
+@@ -643,7 +1374,7 @@
+ return;
+
+ if ((end - start) >= l2x0_size) {
+- l2x0_flush_all();
++ outer_cache.flush_all();
+ return;
+ }
+
+@@ -652,283 +1383,67 @@
+
+ /* normal case, no cross section between start and end */
+ if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
+- l2x0_flush_range(new_start, new_end);
++ l2c210_flush_range(new_start, new_end);
+ return;
+ }
+
+ /* They cross sections, so it can only be a cross from section
+ * 2 to section 3
+ */
+- l2x0_flush_range(new_start,
++ l2c210_flush_range(new_start,
+ bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
+- l2x0_flush_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
++ l2c210_flush_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
+ new_end);
+ }
+
+-static void __init l2x0_of_setup(const struct device_node *np,
+- u32 *aux_val, u32 *aux_mask)
+-{
+- u32 data[2] = { 0, 0 };
+- u32 tag = 0;
+- u32 dirty = 0;
+- u32 val = 0, mask = 0;
+-
+- of_property_read_u32(np, "arm,tag-latency", &tag);
+- if (tag) {
+- mask |= L2X0_AUX_CTRL_TAG_LATENCY_MASK;
+- val |= (tag - 1) << L2X0_AUX_CTRL_TAG_LATENCY_SHIFT;
+- }
+-
+- of_property_read_u32_array(np, "arm,data-latency",
+- data, ARRAY_SIZE(data));
+- if (data[0] && data[1]) {
+- mask |= L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK |
+- L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK;
+- val |= ((data[0] - 1) << L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT) |
+- ((data[1] - 1) << L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT);
+- }
+-
+- of_property_read_u32(np, "arm,dirty-latency", &dirty);
+- if (dirty) {
+- mask |= L2X0_AUX_CTRL_DIRTY_LATENCY_MASK;
+- val |= (dirty - 1) << L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT;
+- }
+-
+- *aux_val &= ~mask;
+- *aux_val |= val;
+- *aux_mask &= ~mask;
+-}
+-
+-static void __init pl310_of_setup(const struct device_node *np,
+- u32 *aux_val, u32 *aux_mask)
+-{
+- u32 data[3] = { 0, 0, 0 };
+- u32 tag[3] = { 0, 0, 0 };
+- u32 filter[2] = { 0, 0 };
+-
+- of_property_read_u32_array(np, "arm,tag-latency", tag, ARRAY_SIZE(tag));
+- if (tag[0] && tag[1] && tag[2])
+- writel_relaxed(
+- ((tag[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT) |
+- ((tag[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT) |
+- ((tag[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT),
+- l2x0_base + L2X0_TAG_LATENCY_CTRL);
+-
+- of_property_read_u32_array(np, "arm,data-latency",
+- data, ARRAY_SIZE(data));
+- if (data[0] && data[1] && data[2])
+- writel_relaxed(
+- ((data[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT) |
+- ((data[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT) |
+- ((data[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT),
+- l2x0_base + L2X0_DATA_LATENCY_CTRL);
+-
+- of_property_read_u32_array(np, "arm,filter-ranges",
+- filter, ARRAY_SIZE(filter));
+- if (filter[1]) {
+- writel_relaxed(ALIGN(filter[0] + filter[1], SZ_1M),
+- l2x0_base + L2X0_ADDR_FILTER_END);
+- writel_relaxed((filter[0] & ~(SZ_1M - 1)) | L2X0_ADDR_FILTER_EN,
+- l2x0_base + L2X0_ADDR_FILTER_START);
+- }
+-}
+-
+-static void __init pl310_save(void)
+-{
+- u32 l2x0_revision = readl_relaxed(l2x0_base + L2X0_CACHE_ID) &
+- L2X0_CACHE_ID_RTL_MASK;
+-
+- l2x0_saved_regs.tag_latency = readl_relaxed(l2x0_base +
+- L2X0_TAG_LATENCY_CTRL);
+- l2x0_saved_regs.data_latency = readl_relaxed(l2x0_base +
+- L2X0_DATA_LATENCY_CTRL);
+- l2x0_saved_regs.filter_end = readl_relaxed(l2x0_base +
+- L2X0_ADDR_FILTER_END);
+- l2x0_saved_regs.filter_start = readl_relaxed(l2x0_base +
+- L2X0_ADDR_FILTER_START);
+-
+- if (l2x0_revision >= L2X0_CACHE_ID_RTL_R2P0) {
+- /*
+- * From r2p0, there is Prefetch offset/control register
+- */
+- l2x0_saved_regs.prefetch_ctrl = readl_relaxed(l2x0_base +
+- L2X0_PREFETCH_CTRL);
+- /*
+- * From r3p0, there is Power control register
+- */
+- if (l2x0_revision >= L2X0_CACHE_ID_RTL_R3P0)
+- l2x0_saved_regs.pwr_ctrl = readl_relaxed(l2x0_base +
+- L2X0_POWER_CTRL);
+- }
+-}
++/* Broadcom L2C-310 start from ARMs R3P2 or later, and require no fixups */
++static const struct l2c_init_data of_bcm_l2x0_data __initconst = {
++ .type = "BCM-L2C-310",
++ .way_size_0 = SZ_8K,
++ .num_lock = 8,
++ .of_parse = l2c310_of_parse,
++ .enable = l2c310_enable,
++ .save = l2c310_save,
++ .outer_cache = {
++ .inv_range = bcm_inv_range,
++ .clean_range = bcm_clean_range,
++ .flush_range = bcm_flush_range,
++ .flush_all = l2c210_flush_all,
++ .disable = l2c310_disable,
++ .sync = l2c210_sync,
++ .resume = l2c310_resume,
++ },
++};
+
+-static void aurora_save(void)
++static void __init tauros3_save(void __iomem *base)
+ {
+- l2x0_saved_regs.ctrl = readl_relaxed(l2x0_base + L2X0_CTRL);
+- l2x0_saved_regs.aux_ctrl = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
+-}
++ l2c_save(base);
+
+-static void __init tauros3_save(void)
+-{
+ l2x0_saved_regs.aux2_ctrl =
+- readl_relaxed(l2x0_base + TAUROS3_AUX2_CTRL);
++ readl_relaxed(base + TAUROS3_AUX2_CTRL);
+ l2x0_saved_regs.prefetch_ctrl =
+- readl_relaxed(l2x0_base + L2X0_PREFETCH_CTRL);
+-}
+-
+-static void l2x0_resume(void)
+-{
+- if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
+- /* restore aux ctrl and enable l2 */
+- l2x0_unlock(readl_relaxed(l2x0_base + L2X0_CACHE_ID));
+-
+- writel_relaxed(l2x0_saved_regs.aux_ctrl, l2x0_base +
+- L2X0_AUX_CTRL);
+-
+- l2x0_inv_all();
+-
+- writel_relaxed(L2X0_CTRL_EN, l2x0_base + L2X0_CTRL);
+- }
+-}
+-
+-static void pl310_resume(void)
+-{
+- u32 l2x0_revision;
+-
+- if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
+- /* restore pl310 setup */
+- writel_relaxed(l2x0_saved_regs.tag_latency,
+- l2x0_base + L2X0_TAG_LATENCY_CTRL);
+- writel_relaxed(l2x0_saved_regs.data_latency,
+- l2x0_base + L2X0_DATA_LATENCY_CTRL);
+- writel_relaxed(l2x0_saved_regs.filter_end,
+- l2x0_base + L2X0_ADDR_FILTER_END);
+- writel_relaxed(l2x0_saved_regs.filter_start,
+- l2x0_base + L2X0_ADDR_FILTER_START);
+-
+- l2x0_revision = readl_relaxed(l2x0_base + L2X0_CACHE_ID) &
+- L2X0_CACHE_ID_RTL_MASK;
+-
+- if (l2x0_revision >= L2X0_CACHE_ID_RTL_R2P0) {
+- writel_relaxed(l2x0_saved_regs.prefetch_ctrl,
+- l2x0_base + L2X0_PREFETCH_CTRL);
+- if (l2x0_revision >= L2X0_CACHE_ID_RTL_R3P0)
+- writel_relaxed(l2x0_saved_regs.pwr_ctrl,
+- l2x0_base + L2X0_POWER_CTRL);
+- }
+- }
+-
+- l2x0_resume();
+-}
+-
+-static void aurora_resume(void)
+-{
+- if (!(readl(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
+- writel_relaxed(l2x0_saved_regs.aux_ctrl,
+- l2x0_base + L2X0_AUX_CTRL);
+- writel_relaxed(l2x0_saved_regs.ctrl, l2x0_base + L2X0_CTRL);
+- }
++ readl_relaxed(base + L310_PREFETCH_CTRL);
+ }
+
+ static void tauros3_resume(void)
+ {
+- if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
++ void __iomem *base = l2x0_base;
++
++ if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN)) {
+ writel_relaxed(l2x0_saved_regs.aux2_ctrl,
+- l2x0_base + TAUROS3_AUX2_CTRL);
++ base + TAUROS3_AUX2_CTRL);
+ writel_relaxed(l2x0_saved_regs.prefetch_ctrl,
+- l2x0_base + L2X0_PREFETCH_CTRL);
+- }
+-
+- l2x0_resume();
+-}
+-
+-static void __init aurora_broadcast_l2_commands(void)
+-{
+- __u32 u;
+- /* Enable Broadcasting of cache commands to L2*/
+- __asm__ __volatile__("mrc p15, 1, %0, c15, c2, 0" : "=r"(u));
+- u |= AURORA_CTRL_FW; /* Set the FW bit */
+- __asm__ __volatile__("mcr p15, 1, %0, c15, c2, 0\n" : : "r"(u));
+- isb();
+-}
+-
+-static void __init aurora_of_setup(const struct device_node *np,
+- u32 *aux_val, u32 *aux_mask)
+-{
+- u32 val = AURORA_ACR_REPLACEMENT_TYPE_SEMIPLRU;
+- u32 mask = AURORA_ACR_REPLACEMENT_MASK;
++ base + L310_PREFETCH_CTRL);
+
+- of_property_read_u32(np, "cache-id-part",
+- &cache_id_part_number_from_dt);
+-
+- /* Determine and save the write policy */
+- l2_wt_override = of_property_read_bool(np, "wt-override");
+-
+- if (l2_wt_override) {
+- val |= AURORA_ACR_FORCE_WRITE_THRO_POLICY;
+- mask |= AURORA_ACR_FORCE_WRITE_POLICY_MASK;
++ l2c_enable(base, l2x0_saved_regs.aux_ctrl, 8);
+ }
+-
+- *aux_val &= ~mask;
+- *aux_val |= val;
+- *aux_mask &= ~mask;
+ }
+
+-static const struct l2x0_of_data pl310_data = {
+- .setup = pl310_of_setup,
+- .save = pl310_save,
+- .outer_cache = {
+- .resume = pl310_resume,
+- .inv_range = l2x0_inv_range,
+- .clean_range = l2x0_clean_range,
+- .flush_range = l2x0_flush_range,
+- .sync = l2x0_cache_sync,
+- .flush_all = l2x0_flush_all,
+- .inv_all = l2x0_inv_all,
+- .disable = l2x0_disable,
+- },
+-};
+-
+-static const struct l2x0_of_data l2x0_data = {
+- .setup = l2x0_of_setup,
+- .save = NULL,
+- .outer_cache = {
+- .resume = l2x0_resume,
+- .inv_range = l2x0_inv_range,
+- .clean_range = l2x0_clean_range,
+- .flush_range = l2x0_flush_range,
+- .sync = l2x0_cache_sync,
+- .flush_all = l2x0_flush_all,
+- .inv_all = l2x0_inv_all,
+- .disable = l2x0_disable,
+- },
+-};
+-
+-static const struct l2x0_of_data aurora_with_outer_data = {
+- .setup = aurora_of_setup,
+- .save = aurora_save,
+- .outer_cache = {
+- .resume = aurora_resume,
+- .inv_range = aurora_inv_range,
+- .clean_range = aurora_clean_range,
+- .flush_range = aurora_flush_range,
+- .sync = l2x0_cache_sync,
+- .flush_all = l2x0_flush_all,
+- .inv_all = l2x0_inv_all,
+- .disable = l2x0_disable,
+- },
+-};
+-
+-static const struct l2x0_of_data aurora_no_outer_data = {
+- .setup = aurora_of_setup,
+- .save = aurora_save,
+- .outer_cache = {
+- .resume = aurora_resume,
+- },
+-};
+-
+-static const struct l2x0_of_data tauros3_data = {
+- .setup = NULL,
++static const struct l2c_init_data of_tauros3_data __initconst = {
++ .type = "Tauros3",
++ .way_size_0 = SZ_8K,
++ .num_lock = 8,
++ .enable = l2c_enable,
+ .save = tauros3_save,
+ /* Tauros3 broadcasts L1 cache operations to L2 */
+ .outer_cache = {
+@@ -936,43 +1451,26 @@
+ },
+ };
+
+-static const struct l2x0_of_data bcm_l2x0_data = {
+- .setup = pl310_of_setup,
+- .save = pl310_save,
+- .outer_cache = {
+- .resume = pl310_resume,
+- .inv_range = bcm_inv_range,
+- .clean_range = bcm_clean_range,
+- .flush_range = bcm_flush_range,
+- .sync = l2x0_cache_sync,
+- .flush_all = l2x0_flush_all,
+- .inv_all = l2x0_inv_all,
+- .disable = l2x0_disable,
+- },
+-};
+-
++#define L2C_ID(name, fns) { .compatible = name, .data = (void *)&fns }
+ static const struct of_device_id l2x0_ids[] __initconst = {
+- { .compatible = "arm,l210-cache", .data = (void *)&l2x0_data },
+- { .compatible = "arm,l220-cache", .data = (void *)&l2x0_data },
+- { .compatible = "arm,pl310-cache", .data = (void *)&pl310_data },
+- { .compatible = "bcm,bcm11351-a2-pl310-cache", /* deprecated name */
+- .data = (void *)&bcm_l2x0_data},
+- { .compatible = "brcm,bcm11351-a2-pl310-cache",
+- .data = (void *)&bcm_l2x0_data},
+- { .compatible = "marvell,aurora-outer-cache",
+- .data = (void *)&aurora_with_outer_data},
+- { .compatible = "marvell,aurora-system-cache",
+- .data = (void *)&aurora_no_outer_data},
+- { .compatible = "marvell,tauros3-cache",
+- .data = (void *)&tauros3_data },
++ L2C_ID("arm,l210-cache", of_l2c210_data),
++ L2C_ID("arm,l220-cache", of_l2c220_data),
++ L2C_ID("arm,pl310-cache", of_l2c310_data),
++ L2C_ID("brcm,bcm11351-a2-pl310-cache", of_bcm_l2x0_data),
++ L2C_ID("marvell,aurora-outer-cache", of_aurora_with_outer_data),
++ L2C_ID("marvell,aurora-system-cache", of_aurora_no_outer_data),
++ L2C_ID("marvell,tauros3-cache", of_tauros3_data),
++ /* Deprecated IDs */
++ L2C_ID("bcm,bcm11351-a2-pl310-cache", of_bcm_l2x0_data),
+ {}
+ };
+
+ int __init l2x0_of_init(u32 aux_val, u32 aux_mask)
+ {
++ const struct l2c_init_data *data;
+ struct device_node *np;
+- const struct l2x0_of_data *data;
+ struct resource res;
++ u32 cache_id, old_aux;
+
+ np = of_find_matching_node(NULL, l2x0_ids);
+ if (!np)
+@@ -989,23 +1487,29 @@
+
+ data = of_match_node(l2x0_ids, np)->data;
+
+- /* L2 configuration can only be changed if the cache is disabled */
+- if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
+- if (data->setup)
+- data->setup(np, &aux_val, &aux_mask);
+-
+- /* For aurora cache in no outer mode select the
+- * correct mode using the coprocessor*/
+- if (data == &aurora_no_outer_data)
+- aurora_broadcast_l2_commands();
++ old_aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
++ if (old_aux != ((old_aux & aux_mask) | aux_val)) {
++ pr_warn("L2C: platform modifies aux control register: 0x%08x -> 0x%08x\n",
++ old_aux, (old_aux & aux_mask) | aux_val);
++ } else if (aux_mask != ~0U && aux_val != 0) {
++ pr_alert("L2C: platform provided aux values match the hardware, so have no effect. Please remove them.\n");
+ }
+
+- if (data->save)
+- data->save();
++ /* All L2 caches are unified, so this property should be specified */
++ if (!of_property_read_bool(np, "cache-unified"))
++ pr_err("L2C: device tree omits to specify unified cache\n");
++
++ /* L2 configuration can only be changed if the cache is disabled */
++ if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN))
++ if (data->of_parse)
++ data->of_parse(np, &aux_val, &aux_mask);
++
++ if (cache_id_part_number_from_dt)
++ cache_id = cache_id_part_number_from_dt;
++ else
++ cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID);
+
+- of_init = true;
+- memcpy(&outer_cache, &data->outer_cache, sizeof(outer_cache));
+- l2x0_init(l2x0_base, aux_val, aux_mask);
++ __l2c_init(data, aux_val, aux_mask, cache_id);
+
+ return 0;
+ }
+diff -Nur linux-3.14.36/arch/arm/mm/dma-mapping.c linux-openelec/arch/arm/mm/dma-mapping.c
+--- linux-3.14.36/arch/arm/mm/dma-mapping.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/mm/dma-mapping.c 2015-07-24 18:03:29.580842002 -0500
+@@ -26,6 +26,7 @@
+ #include <linux/io.h>
+ #include <linux/vmalloc.h>
+ #include <linux/sizes.h>
++#include <linux/cma.h>
+
+ #include <asm/memory.h>
+ #include <asm/highmem.h>
+diff -Nur linux-3.14.36/arch/arm/mm/dma-mapping.c.orig linux-openelec/arch/arm/mm/dma-mapping.c.orig
+--- linux-3.14.36/arch/arm/mm/dma-mapping.c.orig 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/arch/arm/mm/dma-mapping.c.orig 2015-05-06 12:05:43.000000000 -0500
+@@ -0,0 +1,2001 @@
++/*
++ * linux/arch/arm/mm/dma-mapping.c
++ *
++ * Copyright (C) 2000-2004 Russell King
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * DMA uncached mapping support.
++ */
++#include <linux/bootmem.h>
++#include <linux/module.h>
++#include <linux/mm.h>
++#include <linux/gfp.h>
++#include <linux/errno.h>
++#include <linux/list.h>
++#include <linux/init.h>
++#include <linux/device.h>
++#include <linux/dma-mapping.h>
++#include <linux/dma-contiguous.h>
++#include <linux/highmem.h>
++#include <linux/memblock.h>
++#include <linux/slab.h>
++#include <linux/iommu.h>
++#include <linux/io.h>
++#include <linux/vmalloc.h>
++#include <linux/sizes.h>
++#include <linux/cma.h>
++
++#include <asm/memory.h>
++#include <asm/highmem.h>
++#include <asm/cacheflush.h>
++#include <asm/tlbflush.h>
++#include <asm/mach/arch.h>
++#include <asm/dma-iommu.h>
++#include <asm/mach/map.h>
++#include <asm/system_info.h>
++#include <asm/dma-contiguous.h>
++
++#include "mm.h"
++
++/*
++ * The DMA API is built upon the notion of "buffer ownership". A buffer
++ * is either exclusively owned by the CPU (and therefore may be accessed
++ * by it) or exclusively owned by the DMA device. These helper functions
++ * represent the transitions between these two ownership states.
++ *
++ * Note, however, that on later ARMs, this notion does not work due to
++ * speculative prefetches. We model our approach on the assumption that
++ * the CPU does do speculative prefetches, which means we clean caches
++ * before transfers and delay cache invalidation until transfer completion.
++ *
++ */
++static void __dma_page_cpu_to_dev(struct page *, unsigned long,
++ size_t, enum dma_data_direction);
++static void __dma_page_dev_to_cpu(struct page *, unsigned long,
++ size_t, enum dma_data_direction);
++
++/**
++ * arm_dma_map_page - map a portion of a page for streaming DMA
++ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
++ * @page: page that buffer resides in
++ * @offset: offset into page for start of buffer
++ * @size: size of buffer to map
++ * @dir: DMA transfer direction
++ *
++ * Ensure that any data held in the cache is appropriately discarded
++ * or written back.
++ *
++ * The device owns this memory once this call has completed. The CPU
++ * can regain ownership by calling dma_unmap_page().
++ */
++static dma_addr_t arm_dma_map_page(struct device *dev, struct page *page,
++ unsigned long offset, size_t size, enum dma_data_direction dir,
++ struct dma_attrs *attrs)
++{
++ if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
++ __dma_page_cpu_to_dev(page, offset, size, dir);
++ return pfn_to_dma(dev, page_to_pfn(page)) + offset;
++}
++
++static dma_addr_t arm_coherent_dma_map_page(struct device *dev, struct page *page,
++ unsigned long offset, size_t size, enum dma_data_direction dir,
++ struct dma_attrs *attrs)
++{
++ return pfn_to_dma(dev, page_to_pfn(page)) + offset;
++}
++
++/**
++ * arm_dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
++ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
++ * @handle: DMA address of buffer
++ * @size: size of buffer (same as passed to dma_map_page)
++ * @dir: DMA transfer direction (same as passed to dma_map_page)
++ *
++ * Unmap a page streaming mode DMA translation. The handle and size
++ * must match what was provided in the previous dma_map_page() call.
++ * All other usages are undefined.
++ *
++ * After this call, reads by the CPU to the buffer are guaranteed to see
++ * whatever the device wrote there.
++ */
++static void arm_dma_unmap_page(struct device *dev, dma_addr_t handle,
++ size_t size, enum dma_data_direction dir,
++ struct dma_attrs *attrs)
++{
++ if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
++ __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)),
++ handle & ~PAGE_MASK, size, dir);
++}
++
++static void arm_dma_sync_single_for_cpu(struct device *dev,
++ dma_addr_t handle, size_t size, enum dma_data_direction dir)
++{
++ unsigned int offset = handle & (PAGE_SIZE - 1);
++ struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset));
++ __dma_page_dev_to_cpu(page, offset, size, dir);
++}
++
++static void arm_dma_sync_single_for_device(struct device *dev,
++ dma_addr_t handle, size_t size, enum dma_data_direction dir)
++{
++ unsigned int offset = handle & (PAGE_SIZE - 1);
++ struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset));
++ __dma_page_cpu_to_dev(page, offset, size, dir);
++}
++
++struct dma_map_ops arm_dma_ops = {
++ .alloc = arm_dma_alloc,
++ .free = arm_dma_free,
++ .mmap = arm_dma_mmap,
++ .get_sgtable = arm_dma_get_sgtable,
++ .map_page = arm_dma_map_page,
++ .unmap_page = arm_dma_unmap_page,
++ .map_sg = arm_dma_map_sg,
++ .unmap_sg = arm_dma_unmap_sg,
++ .sync_single_for_cpu = arm_dma_sync_single_for_cpu,
++ .sync_single_for_device = arm_dma_sync_single_for_device,
++ .sync_sg_for_cpu = arm_dma_sync_sg_for_cpu,
++ .sync_sg_for_device = arm_dma_sync_sg_for_device,
++ .set_dma_mask = arm_dma_set_mask,
++};
++EXPORT_SYMBOL(arm_dma_ops);
++
++static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
++ dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs);
++static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr,
++ dma_addr_t handle, struct dma_attrs *attrs);
++
++struct dma_map_ops arm_coherent_dma_ops = {
++ .alloc = arm_coherent_dma_alloc,
++ .free = arm_coherent_dma_free,
++ .mmap = arm_dma_mmap,
++ .get_sgtable = arm_dma_get_sgtable,
++ .map_page = arm_coherent_dma_map_page,
++ .map_sg = arm_dma_map_sg,
++ .set_dma_mask = arm_dma_set_mask,
++};
++EXPORT_SYMBOL(arm_coherent_dma_ops);
++
++static int __dma_supported(struct device *dev, u64 mask, bool warn)
++{
++ unsigned long max_dma_pfn;
++
++ /*
++ * If the mask allows for more memory than we can address,
++ * and we actually have that much memory, then we must
++ * indicate that DMA to this device is not supported.
++ */
++ if (sizeof(mask) != sizeof(dma_addr_t) &&
++ mask > (dma_addr_t)~0 &&
++ dma_to_pfn(dev, ~0) < max_pfn) {
++ if (warn) {
++ dev_warn(dev, "Coherent DMA mask %#llx is larger than dma_addr_t allows\n",
++ mask);
++ dev_warn(dev, "Driver did not use or check the return value from dma_set_coherent_mask()?\n");
++ }
++ return 0;
++ }
++
++ max_dma_pfn = min(max_pfn, arm_dma_pfn_limit);
++
++ /*
++ * Translate the device's DMA mask to a PFN limit. This
++ * PFN number includes the page which we can DMA to.
++ */
++ if (dma_to_pfn(dev, mask) < max_dma_pfn) {
++ if (warn)
++ dev_warn(dev, "Coherent DMA mask %#llx (pfn %#lx-%#lx) covers a smaller range of system memory than the DMA zone pfn 0x0-%#lx\n",
++ mask,
++ dma_to_pfn(dev, 0), dma_to_pfn(dev, mask) + 1,
++ max_dma_pfn + 1);
++ return 0;
++ }
++
++ return 1;
++}
++
++static u64 get_coherent_dma_mask(struct device *dev)
++{
++ u64 mask = (u64)DMA_BIT_MASK(32);
++
++ if (dev) {
++ mask = dev->coherent_dma_mask;
++
++ /*
++ * Sanity check the DMA mask - it must be non-zero, and
++ * must be able to be satisfied by a DMA allocation.
++ */
++ if (mask == 0) {
++ dev_warn(dev, "coherent DMA mask is unset\n");
++ return 0;
++ }
++
++ if (!__dma_supported(dev, mask, true))
++ return 0;
++ }
++
++ return mask;
++}
++
++static void __dma_clear_buffer(struct page *page, size_t size)
++{
++ /*
++ * Ensure that the allocated pages are zeroed, and that any data
++ * lurking in the kernel direct-mapped region is invalidated.
++ */
++ if (PageHighMem(page)) {
++ phys_addr_t base = __pfn_to_phys(page_to_pfn(page));
++ phys_addr_t end = base + size;
++ while (size > 0) {
++ void *ptr = kmap_atomic(page);
++ memset(ptr, 0, PAGE_SIZE);
++ dmac_flush_range(ptr, ptr + PAGE_SIZE);
++ kunmap_atomic(ptr);
++ page++;
++ size -= PAGE_SIZE;
++ }
++ outer_flush_range(base, end);
++ } else {
++ void *ptr = page_address(page);
++ memset(ptr, 0, size);
++ dmac_flush_range(ptr, ptr + size);
++ outer_flush_range(__pa(ptr), __pa(ptr) + size);
++ }
++}
++
++/*
++ * Allocate a DMA buffer for 'dev' of size 'size' using the
++ * specified gfp mask. Note that 'size' must be page aligned.
++ */
++static struct page *__dma_alloc_buffer(struct device *dev, size_t size, gfp_t gfp)
++{
++ unsigned long order = get_order(size);
++ struct page *page, *p, *e;
++
++ page = alloc_pages(gfp, order);
++ if (!page)
++ return NULL;
++
++ /*
++ * Now split the huge page and free the excess pages
++ */
++ split_page(page, order);
++ for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++)
++ __free_page(p);
++
++ __dma_clear_buffer(page, size);
++
++ return page;
++}
++
++/*
++ * Free a DMA buffer. 'size' must be page aligned.
++ */
++static void __dma_free_buffer(struct page *page, size_t size)
++{
++ struct page *e = page + (size >> PAGE_SHIFT);
++
++ while (page < e) {
++ __free_page(page);
++ page++;
++ }
++}
++
++#ifdef CONFIG_MMU
++#ifdef CONFIG_HUGETLB_PAGE
++#warning ARM Coherent DMA allocator does not (yet) support huge TLB
++#endif
++
++static void *__alloc_from_contiguous(struct device *dev, size_t size,
++ pgprot_t prot, struct page **ret_page,
++ const void *caller);
++
++static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
++ pgprot_t prot, struct page **ret_page,
++ const void *caller);
++
++static void *
++__dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot,
++ const void *caller)
++{
++ struct vm_struct *area;
++ unsigned long addr;
++
++ /*
++ * DMA allocation can be mapped to user space, so lets
++ * set VM_USERMAP flags too.
++ */
++ area = get_vm_area_caller(size, VM_ARM_DMA_CONSISTENT | VM_USERMAP,
++ caller);
++ if (!area)
++ return NULL;
++ addr = (unsigned long)area->addr;
++ area->phys_addr = __pfn_to_phys(page_to_pfn(page));
++
++ if (ioremap_page_range(addr, addr + size, area->phys_addr, prot)) {
++ vunmap((void *)addr);
++ return NULL;
++ }
++ return (void *)addr;
++}
++
++static void __dma_free_remap(void *cpu_addr, size_t size)
++{
++ unsigned int flags = VM_ARM_DMA_CONSISTENT | VM_USERMAP;
++ struct vm_struct *area = find_vm_area(cpu_addr);
++ if (!area || (area->flags & flags) != flags) {
++ WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr);
++ return;
++ }
++ unmap_kernel_range((unsigned long)cpu_addr, size);
++ vunmap(cpu_addr);
++}
++
++#define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K
++
++struct dma_pool {
++ size_t size;
++ spinlock_t lock;
++ unsigned long *bitmap;
++ unsigned long nr_pages;
++ void *vaddr;
++ struct page **pages;
++};
++
++static struct dma_pool atomic_pool = {
++ .size = DEFAULT_DMA_COHERENT_POOL_SIZE,
++};
++
++static int __init early_coherent_pool(char *p)
++{
++ atomic_pool.size = memparse(p, &p);
++ return 0;
++}
++early_param("coherent_pool", early_coherent_pool);
++
++void __init init_dma_coherent_pool_size(unsigned long size)
++{
++ /*
++ * Catch any attempt to set the pool size too late.
++ */
++ BUG_ON(atomic_pool.vaddr);
++
++ /*
++ * Set architecture specific coherent pool size only if
++ * it has not been changed by kernel command line parameter.
++ */
++ if (atomic_pool.size == DEFAULT_DMA_COHERENT_POOL_SIZE)
++ atomic_pool.size = size;
++}
++
++/*
++ * Initialise the coherent pool for atomic allocations.
++ */
++static int __init atomic_pool_init(void)
++{
++ struct dma_pool *pool = &atomic_pool;
++ pgprot_t prot = pgprot_dmacoherent(PAGE_KERNEL);
++ gfp_t gfp = GFP_KERNEL | GFP_DMA;
++ unsigned long nr_pages = pool->size >> PAGE_SHIFT;
++ unsigned long *bitmap;
++ struct page *page;
++ struct page **pages;
++ void *ptr;
++ int bitmap_size = BITS_TO_LONGS(nr_pages) * sizeof(long);
++
++ bitmap = kzalloc(bitmap_size, GFP_KERNEL);
++ if (!bitmap)
++ goto no_bitmap;
++
++ pages = kzalloc(nr_pages * sizeof(struct page *), GFP_KERNEL);
++ if (!pages)
++ goto no_pages;
++
++ if (IS_ENABLED(CONFIG_DMA_CMA))
++ ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page,
++ atomic_pool_init);
++ else
++ ptr = __alloc_remap_buffer(NULL, pool->size, gfp, prot, &page,
++ atomic_pool_init);
++ if (ptr) {
++ int i;
++
++ for (i = 0; i < nr_pages; i++)
++ pages[i] = page + i;
++
++ spin_lock_init(&pool->lock);
++ pool->vaddr = ptr;
++ pool->pages = pages;
++ pool->bitmap = bitmap;
++ pool->nr_pages = nr_pages;
++ pr_info("DMA: preallocated %u KiB pool for atomic coherent allocations\n",
++ (unsigned)pool->size / 1024);
++ return 0;
++ }
++
++ kfree(pages);
++no_pages:
++ kfree(bitmap);
++no_bitmap:
++ pr_err("DMA: failed to allocate %u KiB pool for atomic coherent allocation\n",
++ (unsigned)pool->size / 1024);
++ return -ENOMEM;
++}
++/*
++ * CMA is activated by core_initcall, so we must be called after it.
++ */
++postcore_initcall(atomic_pool_init);
++
++struct dma_contig_early_reserve {
++ phys_addr_t base;
++ unsigned long size;
++};
++
++static struct dma_contig_early_reserve dma_mmu_remap[MAX_CMA_AREAS] __initdata;
++
++static int dma_mmu_remap_num __initdata;
++
++void __init dma_contiguous_early_fixup(phys_addr_t base, unsigned long size)
++{
++ dma_mmu_remap[dma_mmu_remap_num].base = base;
++ dma_mmu_remap[dma_mmu_remap_num].size = size;
++ dma_mmu_remap_num++;
++}
++
++void __init dma_contiguous_remap(void)
++{
++ int i;
++ for (i = 0; i < dma_mmu_remap_num; i++) {
++ phys_addr_t start = dma_mmu_remap[i].base;
++ phys_addr_t end = start + dma_mmu_remap[i].size;
++ struct map_desc map;
++ unsigned long addr;
++
++ if (end > arm_lowmem_limit)
++ end = arm_lowmem_limit;
++ if (start >= end)
++ continue;
++
++ map.pfn = __phys_to_pfn(start);
++ map.virtual = __phys_to_virt(start);
++ map.length = end - start;
++ map.type = MT_MEMORY_DMA_READY;
++
++ /*
++ * Clear previous low-memory mapping
++ */
++ for (addr = __phys_to_virt(start); addr < __phys_to_virt(end);
++ addr += PMD_SIZE)
++ pmd_clear(pmd_off_k(addr));
++
++ iotable_init(&map, 1);
++ }
++}
++
++static int __dma_update_pte(pte_t *pte, pgtable_t token, unsigned long addr,
++ void *data)
++{
++ struct page *page = virt_to_page(addr);
++ pgprot_t prot = *(pgprot_t *)data;
++
++ set_pte_ext(pte, mk_pte(page, prot), 0);
++ return 0;
++}
++
++static void __dma_remap(struct page *page, size_t size, pgprot_t prot)
++{
++ unsigned long start = (unsigned long) page_address(page);
++ unsigned end = start + size;
++
++ apply_to_page_range(&init_mm, start, size, __dma_update_pte, &prot);
++ flush_tlb_kernel_range(start, end);
++}
++
++static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
++ pgprot_t prot, struct page **ret_page,
++ const void *caller)
++{
++ struct page *page;
++ void *ptr;
++ page = __dma_alloc_buffer(dev, size, gfp);
++ if (!page)
++ return NULL;
++
++ ptr = __dma_alloc_remap(page, size, gfp, prot, caller);
++ if (!ptr) {
++ __dma_free_buffer(page, size);
++ return NULL;
++ }
++
++ *ret_page = page;
++ return ptr;
++}
++
++static void *__alloc_from_pool(size_t size, struct page **ret_page)
++{
++ struct dma_pool *pool = &atomic_pool;
++ unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
++ unsigned int pageno;
++ unsigned long flags;
++ void *ptr = NULL;
++ unsigned long align_mask;
++
++ if (!pool->vaddr) {
++ WARN(1, "coherent pool not initialised!\n");
++ return NULL;
++ }
++
++ /*
++ * Align the region allocation - allocations from pool are rather
++ * small, so align them to their order in pages, minimum is a page
++ * size. This helps reduce fragmentation of the DMA space.
++ */
++ align_mask = (1 << get_order(size)) - 1;
++
++ spin_lock_irqsave(&pool->lock, flags);
++ pageno = bitmap_find_next_zero_area(pool->bitmap, pool->nr_pages,
++ 0, count, align_mask);
++ if (pageno < pool->nr_pages) {
++ bitmap_set(pool->bitmap, pageno, count);
++ ptr = pool->vaddr + PAGE_SIZE * pageno;
++ *ret_page = pool->pages[pageno];
++ } else {
++ pr_err_once("ERROR: %u KiB atomic DMA coherent pool is too small!\n"
++ "Please increase it with coherent_pool= kernel parameter!\n",
++ (unsigned)pool->size / 1024);
++ }
++ spin_unlock_irqrestore(&pool->lock, flags);
++
++ return ptr;
++}
++
++static bool __in_atomic_pool(void *start, size_t size)
++{
++ struct dma_pool *pool = &atomic_pool;
++ void *end = start + size;
++ void *pool_start = pool->vaddr;
++ void *pool_end = pool->vaddr + pool->size;
++
++ if (start < pool_start || start >= pool_end)
++ return false;
++
++ if (end <= pool_end)
++ return true;
++
++ WARN(1, "Wrong coherent size(%p-%p) from atomic pool(%p-%p)\n",
++ start, end - 1, pool_start, pool_end - 1);
++
++ return false;
++}
++
++static int __free_from_pool(void *start, size_t size)
++{
++ struct dma_pool *pool = &atomic_pool;
++ unsigned long pageno, count;
++ unsigned long flags;
++
++ if (!__in_atomic_pool(start, size))
++ return 0;
++
++ pageno = (start - pool->vaddr) >> PAGE_SHIFT;
++ count = size >> PAGE_SHIFT;
++
++ spin_lock_irqsave(&pool->lock, flags);
++ bitmap_clear(pool->bitmap, pageno, count);
++ spin_unlock_irqrestore(&pool->lock, flags);
++
++ return 1;
++}
++
++static void *__alloc_from_contiguous(struct device *dev, size_t size,
++ pgprot_t prot, struct page **ret_page,
++ const void *caller)
++{
++ unsigned long order = get_order(size);
++ size_t count = size >> PAGE_SHIFT;
++ struct page *page;
++ void *ptr;
++
++ page = dma_alloc_from_contiguous(dev, count, order);
++ if (!page)
++ return NULL;
++
++ __dma_clear_buffer(page, size);
++
++ if (PageHighMem(page)) {
++ ptr = __dma_alloc_remap(page, size, GFP_KERNEL, prot, caller);
++ if (!ptr) {
++ dma_release_from_contiguous(dev, page, count);
++ return NULL;
++ }
++ } else {
++ __dma_remap(page, size, prot);
++ ptr = page_address(page);
++ }
++ *ret_page = page;
++ return ptr;
++}
++
++static void __free_from_contiguous(struct device *dev, struct page *page,
++ void *cpu_addr, size_t size)
++{
++ if (PageHighMem(page))
++ __dma_free_remap(cpu_addr, size);
++ else
++ __dma_remap(page, size, PAGE_KERNEL);
++ dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
++}
++
++static inline pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot)
++{
++ prot = dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs) ?
++ pgprot_writecombine(prot) :
++ pgprot_dmacoherent(prot);
++ return prot;
++}
++
++#define nommu() 0
++
++#else /* !CONFIG_MMU */
++
++#define nommu() 1
++
++#define __get_dma_pgprot(attrs, prot) __pgprot(0)
++#define __alloc_remap_buffer(dev, size, gfp, prot, ret, c) NULL
++#define __alloc_from_pool(size, ret_page) NULL
++#define __alloc_from_contiguous(dev, size, prot, ret, c) NULL
++#define __free_from_pool(cpu_addr, size) 0
++#define __free_from_contiguous(dev, page, cpu_addr, size) do { } while (0)
++#define __dma_free_remap(cpu_addr, size) do { } while (0)
++
++#endif /* CONFIG_MMU */
++
++static void *__alloc_simple_buffer(struct device *dev, size_t size, gfp_t gfp,
++ struct page **ret_page)
++{
++ struct page *page;
++ page = __dma_alloc_buffer(dev, size, gfp);
++ if (!page)
++ return NULL;
++
++ *ret_page = page;
++ return page_address(page);
++}
++
++
++
++static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
++ gfp_t gfp, pgprot_t prot, bool is_coherent, const void *caller)
++{
++ u64 mask = get_coherent_dma_mask(dev);
++ struct page *page = NULL;
++ void *addr;
++
++#ifdef CONFIG_DMA_API_DEBUG
++ u64 limit = (mask + 1) & ~mask;
++ if (limit && size >= limit) {
++ dev_warn(dev, "coherent allocation too big (requested %#x mask %#llx)\n",
++ size, mask);
++ return NULL;
++ }
++#endif
++
++ if (!mask)
++ return NULL;
++
++ if (mask < 0xffffffffULL)
++ gfp |= GFP_DMA;
++
++ /*
++ * Following is a work-around (a.k.a. hack) to prevent pages
++ * with __GFP_COMP being passed to split_page() which cannot
++ * handle them. The real problem is that this flag probably
++ * should be 0 on ARM as it is not supported on this
++ * platform; see CONFIG_HUGETLBFS.
++ */
++ gfp &= ~(__GFP_COMP);
++
++ *handle = DMA_ERROR_CODE;
++ size = PAGE_ALIGN(size);
++
++ if (is_coherent || nommu())
++ addr = __alloc_simple_buffer(dev, size, gfp, &page);
++ else if (!(gfp & __GFP_WAIT))
++ addr = __alloc_from_pool(size, &page);
++ else if (!IS_ENABLED(CONFIG_DMA_CMA))
++ addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller);
++ else
++ addr = __alloc_from_contiguous(dev, size, prot, &page, caller);
++
++ if (addr)
++ *handle = pfn_to_dma(dev, page_to_pfn(page));
++
++ return addr;
++}
++
++/*
++ * Allocate DMA-coherent memory space and return both the kernel remapped
++ * virtual and bus address for that space.
++ */
++void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
++ gfp_t gfp, struct dma_attrs *attrs)
++{
++ pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL);
++ void *memory;
++
++ if (dma_alloc_from_coherent(dev, size, handle, &memory))
++ return memory;
++
++ return __dma_alloc(dev, size, handle, gfp, prot, false,
++ __builtin_return_address(0));
++}
++
++static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
++ dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
++{
++ pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL);
++ void *memory;
++
++ if (dma_alloc_from_coherent(dev, size, handle, &memory))
++ return memory;
++
++ return __dma_alloc(dev, size, handle, gfp, prot, true,
++ __builtin_return_address(0));
++}
++
++/*
++ * Create userspace mapping for the DMA-coherent memory.
++ */
++int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
++ void *cpu_addr, dma_addr_t dma_addr, size_t size,
++ struct dma_attrs *attrs)
++{
++ int ret = -ENXIO;
++#ifdef CONFIG_MMU
++ unsigned long nr_vma_pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
++ unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
++ unsigned long pfn = dma_to_pfn(dev, dma_addr);
++ unsigned long off = vma->vm_pgoff;
++
++ vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
++
++ if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
++ return ret;
++
++ if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) {
++ ret = remap_pfn_range(vma, vma->vm_start,
++ pfn + off,
++ vma->vm_end - vma->vm_start,
++ vma->vm_page_prot);
++ }
++#endif /* CONFIG_MMU */
++
++ return ret;
++}
++
++/*
++ * Free a buffer as defined by the above mapping.
++ */
++static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
++ dma_addr_t handle, struct dma_attrs *attrs,
++ bool is_coherent)
++{
++ struct page *page = pfn_to_page(dma_to_pfn(dev, handle));
++
++ if (dma_release_from_coherent(dev, get_order(size), cpu_addr))
++ return;
++
++ size = PAGE_ALIGN(size);
++
++ if (is_coherent || nommu()) {
++ __dma_free_buffer(page, size);
++ } else if (__free_from_pool(cpu_addr, size)) {
++ return;
++ } else if (!IS_ENABLED(CONFIG_DMA_CMA)) {
++ __dma_free_remap(cpu_addr, size);
++ __dma_free_buffer(page, size);
++ } else {
++ /*
++ * Non-atomic allocations cannot be freed with IRQs disabled
++ */
++ WARN_ON(irqs_disabled());
++ __free_from_contiguous(dev, page, cpu_addr, size);
++ }
++}
++
++void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
++ dma_addr_t handle, struct dma_attrs *attrs)
++{
++ __arm_dma_free(dev, size, cpu_addr, handle, attrs, false);
++}
++
++static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr,
++ dma_addr_t handle, struct dma_attrs *attrs)
++{
++ __arm_dma_free(dev, size, cpu_addr, handle, attrs, true);
++}
++
++int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
++ void *cpu_addr, dma_addr_t handle, size_t size,
++ struct dma_attrs *attrs)
++{
++ struct page *page = pfn_to_page(dma_to_pfn(dev, handle));
++ int ret;
++
++ ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
++ if (unlikely(ret))
++ return ret;
++
++ sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
++ return 0;
++}
++
++static void dma_cache_maint_page(struct page *page, unsigned long offset,
++ size_t size, enum dma_data_direction dir,
++ void (*op)(const void *, size_t, int))
++{
++ unsigned long pfn;
++ size_t left = size;
++
++ pfn = page_to_pfn(page) + offset / PAGE_SIZE;
++ offset %= PAGE_SIZE;
++
++ /*
++ * A single sg entry may refer to multiple physically contiguous
++ * pages. But we still need to process highmem pages individually.
++ * If highmem is not configured then the bulk of this loop gets
++ * optimized out.
++ */
++ do {
++ size_t len = left;
++ void *vaddr;
++
++ page = pfn_to_page(pfn);
++
++ if (PageHighMem(page)) {
++ if (len + offset > PAGE_SIZE)
++ len = PAGE_SIZE - offset;
++
++ if (cache_is_vipt_nonaliasing()) {
++ vaddr = kmap_atomic(page);
++ op(vaddr + offset, len, dir);
++ kunmap_atomic(vaddr);
++ } else {
++ vaddr = kmap_high_get(page);
++ if (vaddr) {
++ op(vaddr + offset, len, dir);
++ kunmap_high(page);
++ }
++ }
++ } else {
++ vaddr = page_address(page) + offset;
++ op(vaddr, len, dir);
++ }
++ offset = 0;
++ pfn++;
++ left -= len;
++ } while (left);
++}
++
++/*
++ * Make an area consistent for devices.
++ * Note: Drivers should NOT use this function directly, as it will break
++ * platforms with CONFIG_DMABOUNCE.
++ * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
++ */
++static void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
++ size_t size, enum dma_data_direction dir)
++{
++ unsigned long paddr;
++
++ dma_cache_maint_page(page, off, size, dir, dmac_map_area);
++
++ paddr = page_to_phys(page) + off;
++ if (dir == DMA_FROM_DEVICE) {
++ outer_inv_range(paddr, paddr + size);
++ } else {
++ outer_clean_range(paddr, paddr + size);
++ }
++ /* FIXME: non-speculating: flush on bidirectional mappings? */
++}
++
++static void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
++ size_t size, enum dma_data_direction dir)
++{
++ unsigned long paddr = page_to_phys(page) + off;
++
++ /* FIXME: non-speculating: not required */
++ /* don't bother invalidating if DMA to device */
++ if (dir != DMA_TO_DEVICE)
++ outer_inv_range(paddr, paddr + size);
++
++ dma_cache_maint_page(page, off, size, dir, dmac_unmap_area);
++
++ /*
++ * Mark the D-cache clean for these pages to avoid extra flushing.
++ */
++ if (dir != DMA_TO_DEVICE && size >= PAGE_SIZE) {
++ unsigned long pfn;
++ size_t left = size;
++
++ pfn = page_to_pfn(page) + off / PAGE_SIZE;
++ off %= PAGE_SIZE;
++ if (off) {
++ pfn++;
++ left -= PAGE_SIZE - off;
++ }
++ while (left >= PAGE_SIZE) {
++ page = pfn_to_page(pfn++);
++ set_bit(PG_dcache_clean, &page->flags);
++ left -= PAGE_SIZE;
++ }
++ }
++}
++
++/**
++ * arm_dma_map_sg - map a set of SG buffers for streaming mode DMA
++ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
++ * @sg: list of buffers
++ * @nents: number of buffers to map
++ * @dir: DMA transfer direction
++ *
++ * Map a set of buffers described by scatterlist in streaming mode for DMA.
++ * This is the scatter-gather version of the dma_map_single interface.
++ * Here the scatter gather list elements are each tagged with the
++ * appropriate dma address and length. They are obtained via
++ * sg_dma_{address,length}.
++ *
++ * Device ownership issues as mentioned for dma_map_single are the same
++ * here.
++ */
++int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
++ enum dma_data_direction dir, struct dma_attrs *attrs)
++{
++ struct dma_map_ops *ops = get_dma_ops(dev);
++ struct scatterlist *s;
++ int i, j;
++
++ for_each_sg(sg, s, nents, i) {
++#ifdef CONFIG_NEED_SG_DMA_LENGTH
++ s->dma_length = s->length;
++#endif
++ s->dma_address = ops->map_page(dev, sg_page(s), s->offset,
++ s->length, dir, attrs);
++ if (dma_mapping_error(dev, s->dma_address))
++ goto bad_mapping;
++ }
++ return nents;
++
++ bad_mapping:
++ for_each_sg(sg, s, i, j)
++ ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs);
++ return 0;
++}
++
++/**
++ * arm_dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
++ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
++ * @sg: list of buffers
++ * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
++ * @dir: DMA transfer direction (same as was passed to dma_map_sg)
++ *
++ * Unmap a set of streaming mode DMA translations. Again, CPU access
++ * rules concerning calls here are the same as for dma_unmap_single().
++ */
++void arm_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
++ enum dma_data_direction dir, struct dma_attrs *attrs)
++{
++ struct dma_map_ops *ops = get_dma_ops(dev);
++ struct scatterlist *s;
++
++ int i;
++
++ for_each_sg(sg, s, nents, i)
++ ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs);
++}
++
++/**
++ * arm_dma_sync_sg_for_cpu
++ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
++ * @sg: list of buffers
++ * @nents: number of buffers to map (returned from dma_map_sg)
++ * @dir: DMA transfer direction (same as was passed to dma_map_sg)
++ */
++void arm_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
++ int nents, enum dma_data_direction dir)
++{
++ struct dma_map_ops *ops = get_dma_ops(dev);
++ struct scatterlist *s;
++ int i;
++
++ for_each_sg(sg, s, nents, i)
++ ops->sync_single_for_cpu(dev, sg_dma_address(s), s->length,
++ dir);
++}
++
++/**
++ * arm_dma_sync_sg_for_device
++ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
++ * @sg: list of buffers
++ * @nents: number of buffers to map (returned from dma_map_sg)
++ * @dir: DMA transfer direction (same as was passed to dma_map_sg)
++ */
++void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
++ int nents, enum dma_data_direction dir)
++{
++ struct dma_map_ops *ops = get_dma_ops(dev);
++ struct scatterlist *s;
++ int i;
++
++ for_each_sg(sg, s, nents, i)
++ ops->sync_single_for_device(dev, sg_dma_address(s), s->length,
++ dir);
++}
++
++/*
++ * Return whether the given device DMA address mask can be supported
++ * properly. For example, if your device can only drive the low 24-bits
++ * during bus mastering, then you would pass 0x00ffffff as the mask
++ * to this function.
++ */
++int dma_supported(struct device *dev, u64 mask)
++{
++ return __dma_supported(dev, mask, false);
++}
++EXPORT_SYMBOL(dma_supported);
++
++int arm_dma_set_mask(struct device *dev, u64 dma_mask)
++{
++ if (!dev->dma_mask || !dma_supported(dev, dma_mask))
++ return -EIO;
++
++ *dev->dma_mask = dma_mask;
++
++ return 0;
++}
++
++#define PREALLOC_DMA_DEBUG_ENTRIES 4096
++
++static int __init dma_debug_do_init(void)
++{
++ dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
++ return 0;
++}
++fs_initcall(dma_debug_do_init);
++
++#ifdef CONFIG_ARM_DMA_USE_IOMMU
++
++/* IOMMU */
++
++static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping,
++ size_t size)
++{
++ unsigned int order = get_order(size);
++ unsigned int align = 0;
++ unsigned int count, start;
++ unsigned long flags;
++
++ if (order > CONFIG_ARM_DMA_IOMMU_ALIGNMENT)
++ order = CONFIG_ARM_DMA_IOMMU_ALIGNMENT;
++
++ count = ((PAGE_ALIGN(size) >> PAGE_SHIFT) +
++ (1 << mapping->order) - 1) >> mapping->order;
++
++ if (order > mapping->order)
++ align = (1 << (order - mapping->order)) - 1;
++
++ spin_lock_irqsave(&mapping->lock, flags);
++ start = bitmap_find_next_zero_area(mapping->bitmap, mapping->bits, 0,
++ count, align);
++ if (start > mapping->bits) {
++ spin_unlock_irqrestore(&mapping->lock, flags);
++ return DMA_ERROR_CODE;
++ }
++
++ bitmap_set(mapping->bitmap, start, count);
++ spin_unlock_irqrestore(&mapping->lock, flags);
++
++ return mapping->base + (start << (mapping->order + PAGE_SHIFT));
++}
++
++static inline void __free_iova(struct dma_iommu_mapping *mapping,
++ dma_addr_t addr, size_t size)
++{
++ unsigned int start = (addr - mapping->base) >>
++ (mapping->order + PAGE_SHIFT);
++ unsigned int count = ((size >> PAGE_SHIFT) +
++ (1 << mapping->order) - 1) >> mapping->order;
++ unsigned long flags;
++
++ spin_lock_irqsave(&mapping->lock, flags);
++ bitmap_clear(mapping->bitmap, start, count);
++ spin_unlock_irqrestore(&mapping->lock, flags);
++}
++
++static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
++ gfp_t gfp, struct dma_attrs *attrs)
++{
++ struct page **pages;
++ int count = size >> PAGE_SHIFT;
++ int array_size = count * sizeof(struct page *);
++ int i = 0;
++
++ if (array_size <= PAGE_SIZE)
++ pages = kzalloc(array_size, gfp);
++ else
++ pages = vzalloc(array_size);
++ if (!pages)
++ return NULL;
++
++ if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS, attrs))
++ {
++ unsigned long order = get_order(size);
++ struct page *page;
++
++ page = dma_alloc_from_contiguous(dev, count, order);
++ if (!page)
++ goto error;
++
++ __dma_clear_buffer(page, size);
++
++ for (i = 0; i < count; i++)
++ pages[i] = page + i;
++
++ return pages;
++ }
++
++ /*
++ * IOMMU can map any pages, so himem can also be used here
++ */
++ gfp |= __GFP_NOWARN | __GFP_HIGHMEM;
++
++ while (count) {
++ int j, order = __fls(count);
++
++ pages[i] = alloc_pages(gfp, order);
++ while (!pages[i] && order)
++ pages[i] = alloc_pages(gfp, --order);
++ if (!pages[i])
++ goto error;
++
++ if (order) {
++ split_page(pages[i], order);
++ j = 1 << order;
++ while (--j)
++ pages[i + j] = pages[i] + j;
++ }
++
++ __dma_clear_buffer(pages[i], PAGE_SIZE << order);
++ i += 1 << order;
++ count -= 1 << order;
++ }
++
++ return pages;
++error:
++ while (i--)
++ if (pages[i])
++ __free_pages(pages[i], 0);
++ if (array_size <= PAGE_SIZE)
++ kfree(pages);
++ else
++ vfree(pages);
++ return NULL;
++}
++
++static int __iommu_free_buffer(struct device *dev, struct page **pages,
++ size_t size, struct dma_attrs *attrs)
++{
++ int count = size >> PAGE_SHIFT;
++ int array_size = count * sizeof(struct page *);
++ int i;
++
++ if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS, attrs)) {
++ dma_release_from_contiguous(dev, pages[0], count);
++ } else {
++ for (i = 0; i < count; i++)
++ if (pages[i])
++ __free_pages(pages[i], 0);
++ }
++
++ if (array_size <= PAGE_SIZE)
++ kfree(pages);
++ else
++ vfree(pages);
++ return 0;
++}
++
++/*
++ * Create a CPU mapping for a specified pages
++ */
++static void *
++__iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot,
++ const void *caller)
++{
++ unsigned int i, nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
++ struct vm_struct *area;
++ unsigned long p;
++
++ area = get_vm_area_caller(size, VM_ARM_DMA_CONSISTENT | VM_USERMAP,
++ caller);
++ if (!area)
++ return NULL;
++
++ area->pages = pages;
++ area->nr_pages = nr_pages;
++ p = (unsigned long)area->addr;
++
++ for (i = 0; i < nr_pages; i++) {
++ phys_addr_t phys = __pfn_to_phys(page_to_pfn(pages[i]));
++ if (ioremap_page_range(p, p + PAGE_SIZE, phys, prot))
++ goto err;
++ p += PAGE_SIZE;
++ }
++ return area->addr;
++err:
++ unmap_kernel_range((unsigned long)area->addr, size);
++ vunmap(area->addr);
++ return NULL;
++}
++
++/*
++ * Create a mapping in device IO address space for specified pages
++ */
++static dma_addr_t
++__iommu_create_mapping(struct device *dev, struct page **pages, size_t size)
++{
++ struct dma_iommu_mapping *mapping = dev->archdata.mapping;
++ unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
++ dma_addr_t dma_addr, iova;
++ int i, ret = DMA_ERROR_CODE;
++
++ dma_addr = __alloc_iova(mapping, size);
++ if (dma_addr == DMA_ERROR_CODE)
++ return dma_addr;
++
++ iova = dma_addr;
++ for (i = 0; i < count; ) {
++ unsigned int next_pfn = page_to_pfn(pages[i]) + 1;
++ phys_addr_t phys = page_to_phys(pages[i]);
++ unsigned int len, j;
++
++ for (j = i + 1; j < count; j++, next_pfn++)
++ if (page_to_pfn(pages[j]) != next_pfn)
++ break;
++
++ len = (j - i) << PAGE_SHIFT;
++ ret = iommu_map(mapping->domain, iova, phys, len,
++ IOMMU_READ|IOMMU_WRITE);
++ if (ret < 0)
++ goto fail;
++ iova += len;
++ i = j;
++ }
++ return dma_addr;
++fail:
++ iommu_unmap(mapping->domain, dma_addr, iova-dma_addr);
++ __free_iova(mapping, dma_addr, size);
++ return DMA_ERROR_CODE;
++}
++
++static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t size)
++{
++ struct dma_iommu_mapping *mapping = dev->archdata.mapping;
++
++ /*
++ * add optional in-page offset from iova to size and align
++ * result to page size
++ */
++ size = PAGE_ALIGN((iova & ~PAGE_MASK) + size);
++ iova &= PAGE_MASK;
++
++ iommu_unmap(mapping->domain, iova, size);
++ __free_iova(mapping, iova, size);
++ return 0;
++}
++
++static struct page **__atomic_get_pages(void *addr)
++{
++ struct dma_pool *pool = &atomic_pool;
++ struct page **pages = pool->pages;
++ int offs = (addr - pool->vaddr) >> PAGE_SHIFT;
++
++ return pages + offs;
++}
++
++static struct page **__iommu_get_pages(void *cpu_addr, struct dma_attrs *attrs)
++{
++ struct vm_struct *area;
++
++ if (__in_atomic_pool(cpu_addr, PAGE_SIZE))
++ return __atomic_get_pages(cpu_addr);
++
++ if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs))
++ return cpu_addr;
++
++ area = find_vm_area(cpu_addr);
++ if (area && (area->flags & VM_ARM_DMA_CONSISTENT))
++ return area->pages;
++ return NULL;
++}
++
++static void *__iommu_alloc_atomic(struct device *dev, size_t size,
++ dma_addr_t *handle)
++{
++ struct page *page;
++ void *addr;
++
++ addr = __alloc_from_pool(size, &page);
++ if (!addr)
++ return NULL;
++
++ *handle = __iommu_create_mapping(dev, &page, size);
++ if (*handle == DMA_ERROR_CODE)
++ goto err_mapping;
++
++ return addr;
++
++err_mapping:
++ __free_from_pool(addr, size);
++ return NULL;
++}
++
++static void __iommu_free_atomic(struct device *dev, void *cpu_addr,
++ dma_addr_t handle, size_t size)
++{
++ __iommu_remove_mapping(dev, handle, size);
++ __free_from_pool(cpu_addr, size);
++}
++
++static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
++ dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
++{
++ pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL);
++ struct page **pages;
++ void *addr = NULL;
++
++ *handle = DMA_ERROR_CODE;
++ size = PAGE_ALIGN(size);
++
++ if (!(gfp & __GFP_WAIT))
++ return __iommu_alloc_atomic(dev, size, handle);
++
++ /*
++ * Following is a work-around (a.k.a. hack) to prevent pages
++ * with __GFP_COMP being passed to split_page() which cannot
++ * handle them. The real problem is that this flag probably
++ * should be 0 on ARM as it is not supported on this
++ * platform; see CONFIG_HUGETLBFS.
++ */
++ gfp &= ~(__GFP_COMP);
++
++ pages = __iommu_alloc_buffer(dev, size, gfp, attrs);
++ if (!pages)
++ return NULL;
++
++ *handle = __iommu_create_mapping(dev, pages, size);
++ if (*handle == DMA_ERROR_CODE)
++ goto err_buffer;
++
++ if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs))
++ return pages;
++
++ addr = __iommu_alloc_remap(pages, size, gfp, prot,
++ __builtin_return_address(0));
++ if (!addr)
++ goto err_mapping;
++
++ return addr;
++
++err_mapping:
++ __iommu_remove_mapping(dev, *handle, size);
++err_buffer:
++ __iommu_free_buffer(dev, pages, size, attrs);
++ return NULL;
++}
++
++static int arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
++ void *cpu_addr, dma_addr_t dma_addr, size_t size,
++ struct dma_attrs *attrs)
++{
++ unsigned long uaddr = vma->vm_start;
++ unsigned long usize = vma->vm_end - vma->vm_start;
++ struct page **pages = __iommu_get_pages(cpu_addr, attrs);
++
++ vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
++
++ if (!pages)
++ return -ENXIO;
++
++ do {
++ int ret = vm_insert_page(vma, uaddr, *pages++);
++ if (ret) {
++ pr_err("Remapping memory failed: %d\n", ret);
++ return ret;
++ }
++ uaddr += PAGE_SIZE;
++ usize -= PAGE_SIZE;
++ } while (usize > 0);
++
++ return 0;
++}
++
++/*
++ * free a page as defined by the above mapping.
++ * Must not be called with IRQs disabled.
++ */
++void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
++ dma_addr_t handle, struct dma_attrs *attrs)
++{
++ struct page **pages;
++ size = PAGE_ALIGN(size);
++
++ if (__in_atomic_pool(cpu_addr, size)) {
++ __iommu_free_atomic(dev, cpu_addr, handle, size);
++ return;
++ }
++
++ pages = __iommu_get_pages(cpu_addr, attrs);
++ if (!pages) {
++ WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr);
++ return;
++ }
++
++ if (!dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) {
++ unmap_kernel_range((unsigned long)cpu_addr, size);
++ vunmap(cpu_addr);
++ }
++
++ __iommu_remove_mapping(dev, handle, size);
++ __iommu_free_buffer(dev, pages, size, attrs);
++}
++
++static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
++ void *cpu_addr, dma_addr_t dma_addr,
++ size_t size, struct dma_attrs *attrs)
++{
++ unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
++ struct page **pages = __iommu_get_pages(cpu_addr, attrs);
++
++ if (!pages)
++ return -ENXIO;
++
++ return sg_alloc_table_from_pages(sgt, pages, count, 0, size,
++ GFP_KERNEL);
++}
++
++static int __dma_direction_to_prot(enum dma_data_direction dir)
++{
++ int prot;
++
++ switch (dir) {
++ case DMA_BIDIRECTIONAL:
++ prot = IOMMU_READ | IOMMU_WRITE;
++ break;
++ case DMA_TO_DEVICE:
++ prot = IOMMU_READ;
++ break;
++ case DMA_FROM_DEVICE:
++ prot = IOMMU_WRITE;
++ break;
++ default:
++ prot = 0;
++ }
++
++ return prot;
++}
++
++/*
++ * Map a part of the scatter-gather list into contiguous io address space
++ */
++static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
++ size_t size, dma_addr_t *handle,
++ enum dma_data_direction dir, struct dma_attrs *attrs,
++ bool is_coherent)
++{
++ struct dma_iommu_mapping *mapping = dev->archdata.mapping;
++ dma_addr_t iova, iova_base;
++ int ret = 0;
++ unsigned int count;
++ struct scatterlist *s;
++ int prot;
++
++ size = PAGE_ALIGN(size);
++ *handle = DMA_ERROR_CODE;
++
++ iova_base = iova = __alloc_iova(mapping, size);
++ if (iova == DMA_ERROR_CODE)
++ return -ENOMEM;
++
++ for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) {
++ phys_addr_t phys = page_to_phys(sg_page(s));
++ unsigned int len = PAGE_ALIGN(s->offset + s->length);
++
++ if (!is_coherent &&
++ !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
++ __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
++
++ prot = __dma_direction_to_prot(dir);
++
++ ret = iommu_map(mapping->domain, iova, phys, len, prot);
++ if (ret < 0)
++ goto fail;
++ count += len >> PAGE_SHIFT;
++ iova += len;
++ }
++ *handle = iova_base;
++
++ return 0;
++fail:
++ iommu_unmap(mapping->domain, iova_base, count * PAGE_SIZE);
++ __free_iova(mapping, iova_base, size);
++ return ret;
++}
++
++static int __iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents,
++ enum dma_data_direction dir, struct dma_attrs *attrs,
++ bool is_coherent)
++{
++ struct scatterlist *s = sg, *dma = sg, *start = sg;
++ int i, count = 0;
++ unsigned int offset = s->offset;
++ unsigned int size = s->offset + s->length;
++ unsigned int max = dma_get_max_seg_size(dev);
++
++ for (i = 1; i < nents; i++) {
++ s = sg_next(s);
++
++ s->dma_address = DMA_ERROR_CODE;
++ s->dma_length = 0;
++
++ if (s->offset || (size & ~PAGE_MASK) || size + s->length > max) {
++ if (__map_sg_chunk(dev, start, size, &dma->dma_address,
++ dir, attrs, is_coherent) < 0)
++ goto bad_mapping;
++
++ dma->dma_address += offset;
++ dma->dma_length = size - offset;
++
++ size = offset = s->offset;
++ start = s;
++ dma = sg_next(dma);
++ count += 1;
++ }
++ size += s->length;
++ }
++ if (__map_sg_chunk(dev, start, size, &dma->dma_address, dir, attrs,
++ is_coherent) < 0)
++ goto bad_mapping;
++
++ dma->dma_address += offset;
++ dma->dma_length = size - offset;
++
++ return count+1;
++
++bad_mapping:
++ for_each_sg(sg, s, count, i)
++ __iommu_remove_mapping(dev, sg_dma_address(s), sg_dma_len(s));
++ return 0;
++}
++
++/**
++ * arm_coherent_iommu_map_sg - map a set of SG buffers for streaming mode DMA
++ * @dev: valid struct device pointer
++ * @sg: list of buffers
++ * @nents: number of buffers to map
++ * @dir: DMA transfer direction
++ *
++ * Map a set of i/o coherent buffers described by scatterlist in streaming
++ * mode for DMA. The scatter gather list elements are merged together (if
++ * possible) and tagged with the appropriate dma address and length. They are
++ * obtained via sg_dma_{address,length}.
++ */
++int arm_coherent_iommu_map_sg(struct device *dev, struct scatterlist *sg,
++ int nents, enum dma_data_direction dir, struct dma_attrs *attrs)
++{
++ return __iommu_map_sg(dev, sg, nents, dir, attrs, true);
++}
++
++/**
++ * arm_iommu_map_sg - map a set of SG buffers for streaming mode DMA
++ * @dev: valid struct device pointer
++ * @sg: list of buffers
++ * @nents: number of buffers to map
++ * @dir: DMA transfer direction
++ *
++ * Map a set of buffers described by scatterlist in streaming mode for DMA.
++ * The scatter gather list elements are merged together (if possible) and
++ * tagged with the appropriate dma address and length. They are obtained via
++ * sg_dma_{address,length}.
++ */
++int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg,
++ int nents, enum dma_data_direction dir, struct dma_attrs *attrs)
++{
++ return __iommu_map_sg(dev, sg, nents, dir, attrs, false);
++}
++
++static void __iommu_unmap_sg(struct device *dev, struct scatterlist *sg,
++ int nents, enum dma_data_direction dir, struct dma_attrs *attrs,
++ bool is_coherent)
++{
++ struct scatterlist *s;
++ int i;
++
++ for_each_sg(sg, s, nents, i) {
++ if (sg_dma_len(s))
++ __iommu_remove_mapping(dev, sg_dma_address(s),
++ sg_dma_len(s));
++ if (!is_coherent &&
++ !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
++ __dma_page_dev_to_cpu(sg_page(s), s->offset,
++ s->length, dir);
++ }
++}
++
++/**
++ * arm_coherent_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
++ * @dev: valid struct device pointer
++ * @sg: list of buffers
++ * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
++ * @dir: DMA transfer direction (same as was passed to dma_map_sg)
++ *
++ * Unmap a set of streaming mode DMA translations. Again, CPU access
++ * rules concerning calls here are the same as for dma_unmap_single().
++ */
++void arm_coherent_iommu_unmap_sg(struct device *dev, struct scatterlist *sg,
++ int nents, enum dma_data_direction dir, struct dma_attrs *attrs)
++{
++ __iommu_unmap_sg(dev, sg, nents, dir, attrs, true);
++}
++
++/**
++ * arm_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
++ * @dev: valid struct device pointer
++ * @sg: list of buffers
++ * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
++ * @dir: DMA transfer direction (same as was passed to dma_map_sg)
++ *
++ * Unmap a set of streaming mode DMA translations. Again, CPU access
++ * rules concerning calls here are the same as for dma_unmap_single().
++ */
++void arm_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
++ enum dma_data_direction dir, struct dma_attrs *attrs)
++{
++ __iommu_unmap_sg(dev, sg, nents, dir, attrs, false);
++}
++
++/**
++ * arm_iommu_sync_sg_for_cpu
++ * @dev: valid struct device pointer
++ * @sg: list of buffers
++ * @nents: number of buffers to map (returned from dma_map_sg)
++ * @dir: DMA transfer direction (same as was passed to dma_map_sg)
++ */
++void arm_iommu_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
++ int nents, enum dma_data_direction dir)
++{
++ struct scatterlist *s;
++ int i;
++
++ for_each_sg(sg, s, nents, i)
++ __dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir);
++
++}
++
++/**
++ * arm_iommu_sync_sg_for_device
++ * @dev: valid struct device pointer
++ * @sg: list of buffers
++ * @nents: number of buffers to map (returned from dma_map_sg)
++ * @dir: DMA transfer direction (same as was passed to dma_map_sg)
++ */
++void arm_iommu_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
++ int nents, enum dma_data_direction dir)
++{
++ struct scatterlist *s;
++ int i;
++
++ for_each_sg(sg, s, nents, i)
++ __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
++}
++
++
++/**
++ * arm_coherent_iommu_map_page
++ * @dev: valid struct device pointer
++ * @page: page that buffer resides in
++ * @offset: offset into page for start of buffer
++ * @size: size of buffer to map
++ * @dir: DMA transfer direction
++ *
++ * Coherent IOMMU aware version of arm_dma_map_page()
++ */
++static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *page,
++ unsigned long offset, size_t size, enum dma_data_direction dir,
++ struct dma_attrs *attrs)
++{
++ struct dma_iommu_mapping *mapping = dev->archdata.mapping;
++ dma_addr_t dma_addr;
++ int ret, prot, len = PAGE_ALIGN(size + offset);
++
++ dma_addr = __alloc_iova(mapping, len);
++ if (dma_addr == DMA_ERROR_CODE)
++ return dma_addr;
++
++ prot = __dma_direction_to_prot(dir);
++
++ ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, prot);
++ if (ret < 0)
++ goto fail;
++
++ return dma_addr + offset;
++fail:
++ __free_iova(mapping, dma_addr, len);
++ return DMA_ERROR_CODE;
++}
++
++/**
++ * arm_iommu_map_page
++ * @dev: valid struct device pointer
++ * @page: page that buffer resides in
++ * @offset: offset into page for start of buffer
++ * @size: size of buffer to map
++ * @dir: DMA transfer direction
++ *
++ * IOMMU aware version of arm_dma_map_page()
++ */
++static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page,
++ unsigned long offset, size_t size, enum dma_data_direction dir,
++ struct dma_attrs *attrs)
++{
++ if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
++ __dma_page_cpu_to_dev(page, offset, size, dir);
++
++ return arm_coherent_iommu_map_page(dev, page, offset, size, dir, attrs);
++}
++
++/**
++ * arm_coherent_iommu_unmap_page
++ * @dev: valid struct device pointer
++ * @handle: DMA address of buffer
++ * @size: size of buffer (same as passed to dma_map_page)
++ * @dir: DMA transfer direction (same as passed to dma_map_page)
++ *
++ * Coherent IOMMU aware version of arm_dma_unmap_page()
++ */
++static void arm_coherent_iommu_unmap_page(struct device *dev, dma_addr_t handle,
++ size_t size, enum dma_data_direction dir,
++ struct dma_attrs *attrs)
++{
++ struct dma_iommu_mapping *mapping = dev->archdata.mapping;
++ dma_addr_t iova = handle & PAGE_MASK;
++ int offset = handle & ~PAGE_MASK;
++ int len = PAGE_ALIGN(size + offset);
++
++ if (!iova)
++ return;
++
++ iommu_unmap(mapping->domain, iova, len);
++ __free_iova(mapping, iova, len);
++}
++
++/**
++ * arm_iommu_unmap_page
++ * @dev: valid struct device pointer
++ * @handle: DMA address of buffer
++ * @size: size of buffer (same as passed to dma_map_page)
++ * @dir: DMA transfer direction (same as passed to dma_map_page)
++ *
++ * IOMMU aware version of arm_dma_unmap_page()
++ */
++static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle,
++ size_t size, enum dma_data_direction dir,
++ struct dma_attrs *attrs)
++{
++ struct dma_iommu_mapping *mapping = dev->archdata.mapping;
++ dma_addr_t iova = handle & PAGE_MASK;
++ struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
++ int offset = handle & ~PAGE_MASK;
++ int len = PAGE_ALIGN(size + offset);
++
++ if (!iova)
++ return;
++
++ if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
++ __dma_page_dev_to_cpu(page, offset, size, dir);
++
++ iommu_unmap(mapping->domain, iova, len);
++ __free_iova(mapping, iova, len);
++}
++
++static void arm_iommu_sync_single_for_cpu(struct device *dev,
++ dma_addr_t handle, size_t size, enum dma_data_direction dir)
++{
++ struct dma_iommu_mapping *mapping = dev->archdata.mapping;
++ dma_addr_t iova = handle & PAGE_MASK;
++ struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
++ unsigned int offset = handle & ~PAGE_MASK;
++
++ if (!iova)
++ return;
++
++ __dma_page_dev_to_cpu(page, offset, size, dir);
++}
++
++static void arm_iommu_sync_single_for_device(struct device *dev,
++ dma_addr_t handle, size_t size, enum dma_data_direction dir)
++{
++ struct dma_iommu_mapping *mapping = dev->archdata.mapping;
++ dma_addr_t iova = handle & PAGE_MASK;
++ struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
++ unsigned int offset = handle & ~PAGE_MASK;
++
++ if (!iova)
++ return;
++
++ __dma_page_cpu_to_dev(page, offset, size, dir);
++}
++
++struct dma_map_ops iommu_ops = {
++ .alloc = arm_iommu_alloc_attrs,
++ .free = arm_iommu_free_attrs,
++ .mmap = arm_iommu_mmap_attrs,
++ .get_sgtable = arm_iommu_get_sgtable,
++
++ .map_page = arm_iommu_map_page,
++ .unmap_page = arm_iommu_unmap_page,
++ .sync_single_for_cpu = arm_iommu_sync_single_for_cpu,
++ .sync_single_for_device = arm_iommu_sync_single_for_device,
++
++ .map_sg = arm_iommu_map_sg,
++ .unmap_sg = arm_iommu_unmap_sg,
++ .sync_sg_for_cpu = arm_iommu_sync_sg_for_cpu,
++ .sync_sg_for_device = arm_iommu_sync_sg_for_device,
++
++ .set_dma_mask = arm_dma_set_mask,
++};
++
++struct dma_map_ops iommu_coherent_ops = {
++ .alloc = arm_iommu_alloc_attrs,
++ .free = arm_iommu_free_attrs,
++ .mmap = arm_iommu_mmap_attrs,
++ .get_sgtable = arm_iommu_get_sgtable,
++
++ .map_page = arm_coherent_iommu_map_page,
++ .unmap_page = arm_coherent_iommu_unmap_page,
++
++ .map_sg = arm_coherent_iommu_map_sg,
++ .unmap_sg = arm_coherent_iommu_unmap_sg,
++
++ .set_dma_mask = arm_dma_set_mask,
++};
++
++/**
++ * arm_iommu_create_mapping
++ * @bus: pointer to the bus holding the client device (for IOMMU calls)
++ * @base: start address of the valid IO address space
++ * @size: size of the valid IO address space
++ * @order: accuracy of the IO addresses allocations
++ *
++ * Creates a mapping structure which holds information about used/unused
++ * IO address ranges, which is required to perform memory allocation and
++ * mapping with IOMMU aware functions.
++ *
++ * The client device need to be attached to the mapping with
++ * arm_iommu_attach_device function.
++ */
++struct dma_iommu_mapping *
++arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size,
++ int order)
++{
++ unsigned int count = size >> (PAGE_SHIFT + order);
++ unsigned int bitmap_size = BITS_TO_LONGS(count) * sizeof(long);
++ struct dma_iommu_mapping *mapping;
++ int err = -ENOMEM;
++
++ if (!count)
++ return ERR_PTR(-EINVAL);
++
++ mapping = kzalloc(sizeof(struct dma_iommu_mapping), GFP_KERNEL);
++ if (!mapping)
++ goto err;
++
++ mapping->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
++ if (!mapping->bitmap)
++ goto err2;
++
++ mapping->base = base;
++ mapping->bits = BITS_PER_BYTE * bitmap_size;
++ mapping->order = order;
++ spin_lock_init(&mapping->lock);
++
++ mapping->domain = iommu_domain_alloc(bus);
++ if (!mapping->domain)
++ goto err3;
++
++ kref_init(&mapping->kref);
++ return mapping;
++err3:
++ kfree(mapping->bitmap);
++err2:
++ kfree(mapping);
++err:
++ return ERR_PTR(err);
++}
++EXPORT_SYMBOL_GPL(arm_iommu_create_mapping);
++
++static void release_iommu_mapping(struct kref *kref)
++{
++ struct dma_iommu_mapping *mapping =
++ container_of(kref, struct dma_iommu_mapping, kref);
++
++ iommu_domain_free(mapping->domain);
++ kfree(mapping->bitmap);
++ kfree(mapping);
++}
++
++void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping)
++{
++ if (mapping)
++ kref_put(&mapping->kref, release_iommu_mapping);
++}
++EXPORT_SYMBOL_GPL(arm_iommu_release_mapping);
++
++/**
++ * arm_iommu_attach_device
++ * @dev: valid struct device pointer
++ * @mapping: io address space mapping structure (returned from
++ * arm_iommu_create_mapping)
++ *
++ * Attaches specified io address space mapping to the provided device,
++ * this replaces the dma operations (dma_map_ops pointer) with the
++ * IOMMU aware version. More than one client might be attached to
++ * the same io address space mapping.
++ */
++int arm_iommu_attach_device(struct device *dev,
++ struct dma_iommu_mapping *mapping)
++{
++ int err;
++
++ err = iommu_attach_device(mapping->domain, dev);
++ if (err)
++ return err;
++
++ kref_get(&mapping->kref);
++ dev->archdata.mapping = mapping;
++ set_dma_ops(dev, &iommu_ops);
++
++ pr_debug("Attached IOMMU controller to %s device.\n", dev_name(dev));
++ return 0;
++}
++EXPORT_SYMBOL_GPL(arm_iommu_attach_device);
++
++/**
++ * arm_iommu_detach_device
++ * @dev: valid struct device pointer
++ *
++ * Detaches the provided device from a previously attached map.
++ * This voids the dma operations (dma_map_ops pointer)
++ */
++void arm_iommu_detach_device(struct device *dev)
++{
++ struct dma_iommu_mapping *mapping;
++
++ mapping = to_dma_iommu_mapping(dev);
++ if (!mapping) {
++ dev_warn(dev, "Not attached\n");
++ return;
++ }
++
++ iommu_detach_device(mapping->domain, dev);
++ kref_put(&mapping->kref, release_iommu_mapping);
++ dev->archdata.mapping = NULL;
++ set_dma_ops(dev, NULL);
++
++ pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev));
++}
++EXPORT_SYMBOL_GPL(arm_iommu_detach_device);
++
++#endif
+diff -Nur linux-3.14.36/arch/arm/mm/fault.c linux-openelec/arch/arm/mm/fault.c
+--- linux-3.14.36/arch/arm/mm/fault.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/mm/fault.c 2015-05-06 12:05:43.000000000 -0500
+@@ -449,8 +449,16 @@
+
+ if (pud_none(*pud_k))
+ goto bad_area;
+- if (!pud_present(*pud))
++ if (!pud_present(*pud)) {
+ set_pud(pud, *pud_k);
++ /*
++ * There is a small window during free_pgtables() where the
++ * user *pud entry is 0 but the TLB has not been invalidated
++ * and we get a level 2 (pmd) translation fault caused by the
++ * intermediate TLB caching of the old level 1 (pud) entry.
++ */
++ flush_tlb_kernel_page(addr);
++ }
+
+ pmd = pmd_offset(pud, addr);
+ pmd_k = pmd_offset(pud_k, addr);
+@@ -473,8 +481,9 @@
+ #endif
+ if (pmd_none(pmd_k[index]))
+ goto bad_area;
++ if (!pmd_present(pmd[index]))
++ copy_pmd(pmd, pmd_k);
+
+- copy_pmd(pmd, pmd_k);
+ return 0;
+
+ bad_area:
+diff -Nur linux-3.14.36/arch/arm/mm/init.c linux-openelec/arch/arm/mm/init.c
+--- linux-3.14.36/arch/arm/mm/init.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/mm/init.c 2015-05-06 12:05:43.000000000 -0500
+@@ -327,7 +327,7 @@
+ * reserve memory for DMA contigouos allocations,
+ * must come from DMA area inside low memory
+ */
+- dma_contiguous_reserve(min(arm_dma_limit, arm_lowmem_limit));
++ dma_contiguous_reserve(arm_dma_limit);
+
+ arm_memblock_steal_permitted = false;
+ memblock_dump_all();
+diff -Nur linux-3.14.36/arch/arm/mm/Kconfig linux-openelec/arch/arm/mm/Kconfig
+--- linux-3.14.36/arch/arm/mm/Kconfig 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/mm/Kconfig 2015-07-24 18:03:29.012842002 -0500
+@@ -898,6 +898,57 @@
+ This option enables optimisations for the PL310 cache
+ controller.
+
++config PL310_ERRATA_588369
++ bool "PL310 errata: Clean & Invalidate maintenance operations do not invalidate clean lines"
++ depends on CACHE_L2X0
++ help
++ The PL310 L2 cache controller implements three types of Clean &
++ Invalidate maintenance operations: by Physical Address
++ (offset 0x7F0), by Index/Way (0x7F8) and by Way (0x7FC).
++ They are architecturally defined to behave as the execution of a
++ clean operation followed immediately by an invalidate operation,
++ both performing to the same memory location. This functionality
++ is not correctly implemented in PL310 as clean lines are not
++ invalidated as a result of these operations.
++
++config PL310_ERRATA_727915
++ bool "PL310 errata: Background Clean & Invalidate by Way operation can cause data corruption"
++ depends on CACHE_L2X0
++ help
++ PL310 implements the Clean & Invalidate by Way L2 cache maintenance
++ operation (offset 0x7FC). This operation runs in background so that
++ PL310 can handle normal accesses while it is in progress. Under very
++ rare circumstances, due to this erratum, write data can be lost when
++ PL310 treats a cacheable write transaction during a Clean &
++ Invalidate by Way operation.
++
++config PL310_ERRATA_753970
++ bool "PL310 errata: cache sync operation may be faulty"
++ depends on CACHE_PL310
++ help
++ This option enables the workaround for the 753970 PL310 (r3p0) erratum.
++
++ Under some condition the effect of cache sync operation on
++ the store buffer still remains when the operation completes.
++ This means that the store buffer is always asked to drain and
++ this prevents it from merging any further writes. The workaround
++ is to replace the normal offset of cache sync operation (0x730)
++ by another offset targeting an unmapped PL310 register 0x740.
++ This has the same effect as the cache sync operation: store buffer
++ drain and waiting for all buffers empty.
++
++config PL310_ERRATA_769419
++ bool "PL310 errata: no automatic Store Buffer drain"
++ depends on CACHE_L2X0
++ help
++ On revisions of the PL310 prior to r3p2, the Store Buffer does
++ not automatically drain. This can cause normal, non-cacheable
++ writes to be retained when the memory system is idle, leading
++ to suboptimal I/O performance for drivers using coherent DMA.
++ This option adds a write barrier to the cpu_idle loop so that,
++ on systems with an outer cache, the store buffer is drained
++ explicitly.
++
+ config CACHE_TAUROS2
+ bool "Enable the Tauros2 L2 cache controller"
+ depends on (ARCH_DOVE || ARCH_MMP || CPU_PJ4)
+diff -Nur linux-3.14.36/arch/arm/mm/l2c-common.c linux-openelec/arch/arm/mm/l2c-common.c
+--- linux-3.14.36/arch/arm/mm/l2c-common.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/arch/arm/mm/l2c-common.c 2015-05-06 12:05:43.000000000 -0500
+@@ -0,0 +1,20 @@
++/*
++ * Copyright (C) 2010 ARM Ltd.
++ * Written by Catalin Marinas <catalin.marinas@arm.com>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++#include <linux/bug.h>
++#include <linux/smp.h>
++#include <asm/outercache.h>
++
++void outer_disable(void)
++{
++ WARN_ON(!irqs_disabled());
++ WARN_ON(num_online_cpus() > 1);
++
++ if (outer_cache.disable)
++ outer_cache.disable();
++}
+diff -Nur linux-3.14.36/arch/arm/mm/l2c-l2x0-resume.S linux-openelec/arch/arm/mm/l2c-l2x0-resume.S
+--- linux-3.14.36/arch/arm/mm/l2c-l2x0-resume.S 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/arch/arm/mm/l2c-l2x0-resume.S 2015-05-06 12:05:43.000000000 -0500
+@@ -0,0 +1,58 @@
++/*
++ * L2C-310 early resume code. This can be used by platforms to restore
++ * the settings of their L2 cache controller before restoring the
++ * processor state.
++ *
++ * This code can only be used to if you are running in the secure world.
++ */
++#include <linux/linkage.h>
++#include <asm/hardware/cache-l2x0.h>
++
++ .text
++
++ENTRY(l2c310_early_resume)
++ adr r0, 1f
++ ldr r2, [r0]
++ add r0, r2, r0
++
++ ldmia r0, {r1, r2, r3, r4, r5, r6, r7, r8}
++ @ r1 = phys address of L2C-310 controller
++ @ r2 = aux_ctrl
++ @ r3 = tag_latency
++ @ r4 = data_latency
++ @ r5 = filter_start
++ @ r6 = filter_end
++ @ r7 = prefetch_ctrl
++ @ r8 = pwr_ctrl
++
++ @ Check that the address has been initialised
++ teq r1, #0
++ moveq pc, lr
++
++ @ The prefetch and power control registers are revision dependent
++ @ and can be written whether or not the L2 cache is enabled
++ ldr r0, [r1, #L2X0_CACHE_ID]
++ and r0, r0, #L2X0_CACHE_ID_RTL_MASK
++ cmp r0, #L310_CACHE_ID_RTL_R2P0
++ strcs r7, [r1, #L310_PREFETCH_CTRL]
++ cmp r0, #L310_CACHE_ID_RTL_R3P0
++ strcs r8, [r1, #L310_POWER_CTRL]
++
++ @ Don't setup the L2 cache if it is already enabled
++ ldr r0, [r1, #L2X0_CTRL]
++ tst r0, #L2X0_CTRL_EN
++ movne pc, lr
++
++ str r3, [r1, #L310_TAG_LATENCY_CTRL]
++ str r4, [r1, #L310_DATA_LATENCY_CTRL]
++ str r6, [r1, #L310_ADDR_FILTER_END]
++ str r5, [r1, #L310_ADDR_FILTER_START]
++
++ str r2, [r1, #L2X0_AUX_CTRL]
++ mov r9, #L2X0_CTRL_EN
++ str r9, [r1, #L2X0_CTRL]
++ mov pc, lr
++ENDPROC(l2c310_early_resume)
++
++ .align
++1: .long l2x0_saved_regs - .
+diff -Nur linux-3.14.36/arch/arm/mm/Makefile linux-openelec/arch/arm/mm/Makefile
+--- linux-3.14.36/arch/arm/mm/Makefile 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/mm/Makefile 2015-05-06 12:05:43.000000000 -0500
+@@ -95,7 +95,8 @@
+ AFLAGS_proc-v6.o :=-Wa,-march=armv6
+ AFLAGS_proc-v7.o :=-Wa,-march=armv7-a
+
++obj-$(CONFIG_OUTER_CACHE) += l2c-common.o
+ obj-$(CONFIG_CACHE_FEROCEON_L2) += cache-feroceon-l2.o
+-obj-$(CONFIG_CACHE_L2X0) += cache-l2x0.o
++obj-$(CONFIG_CACHE_L2X0) += cache-l2x0.o l2c-l2x0-resume.o
+ obj-$(CONFIG_CACHE_XSC3L2) += cache-xsc3l2.o
+ obj-$(CONFIG_CACHE_TAUROS2) += cache-tauros2.o
+diff -Nur linux-3.14.36/arch/arm/mm/proc-v7.S linux-openelec/arch/arm/mm/proc-v7.S
+--- linux-3.14.36/arch/arm/mm/proc-v7.S 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm/mm/proc-v7.S 2015-07-24 18:03:29.072842002 -0500
+@@ -334,6 +334,17 @@
+ mcrlt p15, 0, r10, c15, c0, 1 @ write diagnostic register
+ 1:
+ #endif
++#ifdef CONFIG_ARM_ERRATA_794072
++ mrc p15, 0, r10, c15, c0, 1 @ read diagnostic register
++ orr r10, r10, #1 << 4 @ set bit #4
++ mcr p15, 0, r10, c15, c0, 1 @ write diagnostic register
++#endif
++#ifdef CONFIG_ARM_ERRATA_761320
++ cmp r6, #0x40 @ present prior to r4p0
++ mrclt p15, 0, r10, c15, c0, 1 @ read diagnostic register
++ orrlt r10, r10, #1 << 21 @ set bit #21
++ mcrlt p15, 0, r10, c15, c0, 1 @ write diagnostic register
++#endif
+
+ /* Cortex-A15 Errata */
+ 3: ldr r10, =0x00000c0f @ Cortex-A15 primary part number
+diff -Nur linux-3.14.36/arch/arm64/boot/dts/apm-mustang.dts linux-openelec/arch/arm64/boot/dts/apm-mustang.dts
+--- linux-3.14.36/arch/arm64/boot/dts/apm-mustang.dts 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm64/boot/dts/apm-mustang.dts 2015-05-06 12:05:43.000000000 -0500
+@@ -24,3 +24,7 @@
+ reg = < 0x1 0x00000000 0x0 0x80000000 >; /* Updated by bootloader */
+ };
+ };
++
++&serial0 {
++ status = "ok";
++};
+diff -Nur linux-3.14.36/arch/arm64/boot/dts/apm-storm.dtsi linux-openelec/arch/arm64/boot/dts/apm-storm.dtsi
+--- linux-3.14.36/arch/arm64/boot/dts/apm-storm.dtsi 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm64/boot/dts/apm-storm.dtsi 2015-05-06 12:05:43.000000000 -0500
+@@ -176,16 +176,226 @@
+ reg-names = "csr-reg";
+ clock-output-names = "eth8clk";
+ };
++
++ sataphy1clk: sataphy1clk@1f21c000 {
++ compatible = "apm,xgene-device-clock";
++ #clock-cells = <1>;
++ clocks = <&socplldiv2 0>;
++ reg = <0x0 0x1f21c000 0x0 0x1000>;
++ reg-names = "csr-reg";
++ clock-output-names = "sataphy1clk";
++ status = "disabled";
++ csr-offset = <0x4>;
++ csr-mask = <0x00>;
++ enable-offset = <0x0>;
++ enable-mask = <0x06>;
++ };
++
++ sataphy2clk: sataphy1clk@1f22c000 {
++ compatible = "apm,xgene-device-clock";
++ #clock-cells = <1>;
++ clocks = <&socplldiv2 0>;
++ reg = <0x0 0x1f22c000 0x0 0x1000>;
++ reg-names = "csr-reg";
++ clock-output-names = "sataphy2clk";
++ status = "ok";
++ csr-offset = <0x4>;
++ csr-mask = <0x3a>;
++ enable-offset = <0x0>;
++ enable-mask = <0x06>;
++ };
++
++ sataphy3clk: sataphy1clk@1f23c000 {
++ compatible = "apm,xgene-device-clock";
++ #clock-cells = <1>;
++ clocks = <&socplldiv2 0>;
++ reg = <0x0 0x1f23c000 0x0 0x1000>;
++ reg-names = "csr-reg";
++ clock-output-names = "sataphy3clk";
++ status = "ok";
++ csr-offset = <0x4>;
++ csr-mask = <0x3a>;
++ enable-offset = <0x0>;
++ enable-mask = <0x06>;
++ };
++
++ sata01clk: sata01clk@1f21c000 {
++ compatible = "apm,xgene-device-clock";
++ #clock-cells = <1>;
++ clocks = <&socplldiv2 0>;
++ reg = <0x0 0x1f21c000 0x0 0x1000>;
++ reg-names = "csr-reg";
++ clock-output-names = "sata01clk";
++ csr-offset = <0x4>;
++ csr-mask = <0x05>;
++ enable-offset = <0x0>;
++ enable-mask = <0x39>;
++ };
++
++ sata23clk: sata23clk@1f22c000 {
++ compatible = "apm,xgene-device-clock";
++ #clock-cells = <1>;
++ clocks = <&socplldiv2 0>;
++ reg = <0x0 0x1f22c000 0x0 0x1000>;
++ reg-names = "csr-reg";
++ clock-output-names = "sata23clk";
++ csr-offset = <0x4>;
++ csr-mask = <0x05>;
++ enable-offset = <0x0>;
++ enable-mask = <0x39>;
++ };
++
++ sata45clk: sata45clk@1f23c000 {
++ compatible = "apm,xgene-device-clock";
++ #clock-cells = <1>;
++ clocks = <&socplldiv2 0>;
++ reg = <0x0 0x1f23c000 0x0 0x1000>;
++ reg-names = "csr-reg";
++ clock-output-names = "sata45clk";
++ csr-offset = <0x4>;
++ csr-mask = <0x05>;
++ enable-offset = <0x0>;
++ enable-mask = <0x39>;
++ };
++
++ rtcclk: rtcclk@17000000 {
++ compatible = "apm,xgene-device-clock";
++ #clock-cells = <1>;
++ clocks = <&socplldiv2 0>;
++ reg = <0x0 0x17000000 0x0 0x2000>;
++ reg-names = "csr-reg";
++ csr-offset = <0xc>;
++ csr-mask = <0x2>;
++ enable-offset = <0x10>;
++ enable-mask = <0x2>;
++ clock-output-names = "rtcclk";
++ };
+ };
+
+ serial0: serial@1c020000 {
++ status = "disabled";
+ device_type = "serial";
+- compatible = "ns16550";
++ compatible = "ns16550a";
+ reg = <0 0x1c020000 0x0 0x1000>;
+ reg-shift = <2>;
+ clock-frequency = <10000000>; /* Updated by bootloader */
+ interrupt-parent = <&gic>;
+ interrupts = <0x0 0x4c 0x4>;
+ };
++
++ serial1: serial@1c021000 {
++ status = "disabled";
++ device_type = "serial";
++ compatible = "ns16550a";
++ reg = <0 0x1c021000 0x0 0x1000>;
++ reg-shift = <2>;
++ clock-frequency = <10000000>; /* Updated by bootloader */
++ interrupt-parent = <&gic>;
++ interrupts = <0x0 0x4d 0x4>;
++ };
++
++ serial2: serial@1c022000 {
++ status = "disabled";
++ device_type = "serial";
++ compatible = "ns16550a";
++ reg = <0 0x1c022000 0x0 0x1000>;
++ reg-shift = <2>;
++ clock-frequency = <10000000>; /* Updated by bootloader */
++ interrupt-parent = <&gic>;
++ interrupts = <0x0 0x4e 0x4>;
++ };
++
++ serial3: serial@1c023000 {
++ status = "disabled";
++ device_type = "serial";
++ compatible = "ns16550a";
++ reg = <0 0x1c023000 0x0 0x1000>;
++ reg-shift = <2>;
++ clock-frequency = <10000000>; /* Updated by bootloader */
++ interrupt-parent = <&gic>;
++ interrupts = <0x0 0x4f 0x4>;
++ };
++
++ phy1: phy@1f21a000 {
++ compatible = "apm,xgene-phy";
++ reg = <0x0 0x1f21a000 0x0 0x100>;
++ #phy-cells = <1>;
++ clocks = <&sataphy1clk 0>;
++ status = "disabled";
++ apm,tx-boost-gain = <30 30 30 30 30 30>;
++ apm,tx-eye-tuning = <2 10 10 2 10 10>;
++ };
++
++ phy2: phy@1f22a000 {
++ compatible = "apm,xgene-phy";
++ reg = <0x0 0x1f22a000 0x0 0x100>;
++ #phy-cells = <1>;
++ clocks = <&sataphy2clk 0>;
++ status = "ok";
++ apm,tx-boost-gain = <30 30 30 30 30 30>;
++ apm,tx-eye-tuning = <1 10 10 2 10 10>;
++ };
++
++ phy3: phy@1f23a000 {
++ compatible = "apm,xgene-phy";
++ reg = <0x0 0x1f23a000 0x0 0x100>;
++ #phy-cells = <1>;
++ clocks = <&sataphy3clk 0>;
++ status = "ok";
++ apm,tx-boost-gain = <31 31 31 31 31 31>;
++ apm,tx-eye-tuning = <2 10 10 2 10 10>;
++ };
++
++ sata1: sata@1a000000 {
++ compatible = "apm,xgene-ahci";
++ reg = <0x0 0x1a000000 0x0 0x1000>,
++ <0x0 0x1f210000 0x0 0x1000>,
++ <0x0 0x1f21d000 0x0 0x1000>,
++ <0x0 0x1f21e000 0x0 0x1000>,
++ <0x0 0x1f217000 0x0 0x1000>;
++ interrupts = <0x0 0x86 0x4>;
++ dma-coherent;
++ status = "disabled";
++ clocks = <&sata01clk 0>;
++ phys = <&phy1 0>;
++ phy-names = "sata-phy";
++ };
++
++ sata2: sata@1a400000 {
++ compatible = "apm,xgene-ahci";
++ reg = <0x0 0x1a400000 0x0 0x1000>,
++ <0x0 0x1f220000 0x0 0x1000>,
++ <0x0 0x1f22d000 0x0 0x1000>,
++ <0x0 0x1f22e000 0x0 0x1000>,
++ <0x0 0x1f227000 0x0 0x1000>;
++ interrupts = <0x0 0x87 0x4>;
++ dma-coherent;
++ status = "ok";
++ clocks = <&sata23clk 0>;
++ phys = <&phy2 0>;
++ phy-names = "sata-phy";
++ };
++
++ sata3: sata@1a800000 {
++ compatible = "apm,xgene-ahci";
++ reg = <0x0 0x1a800000 0x0 0x1000>,
++ <0x0 0x1f230000 0x0 0x1000>,
++ <0x0 0x1f23d000 0x0 0x1000>,
++ <0x0 0x1f23e000 0x0 0x1000>;
++ interrupts = <0x0 0x88 0x4>;
++ dma-coherent;
++ status = "ok";
++ clocks = <&sata45clk 0>;
++ phys = <&phy3 0>;
++ phy-names = "sata-phy";
++ };
++
++ rtc: rtc@10510000 {
++ compatible = "apm,xgene-rtc";
++ reg = <0x0 0x10510000 0x0 0x400>;
++ interrupts = <0x0 0x46 0x4>;
++ #clock-cells = <1>;
++ clocks = <&rtcclk 0>;
++ };
+ };
+ };
+diff -Nur linux-3.14.36/arch/arm64/boot/dts/clcd-panels.dtsi linux-openelec/arch/arm64/boot/dts/clcd-panels.dtsi
+--- linux-3.14.36/arch/arm64/boot/dts/clcd-panels.dtsi 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/arch/arm64/boot/dts/clcd-panels.dtsi 2015-05-06 12:05:43.000000000 -0500
+@@ -0,0 +1,52 @@
++/*
++ * ARM Ltd. Versatile Express
++ *
++ */
++
++/ {
++ panels {
++ panel@0 {
++ compatible = "panel";
++ mode = "VGA";
++ refresh = <60>;
++ xres = <640>;
++ yres = <480>;
++ pixclock = <39721>;
++ left_margin = <40>;
++ right_margin = <24>;
++ upper_margin = <32>;
++ lower_margin = <11>;
++ hsync_len = <96>;
++ vsync_len = <2>;
++ sync = <0>;
++ vmode = "FB_VMODE_NONINTERLACED";
++
++ tim2 = "TIM2_BCD", "TIM2_IPC";
++ cntl = "CNTL_LCDTFT", "CNTL_BGR", "CNTL_LCDVCOMP(1)";
++ caps = "CLCD_CAP_5551", "CLCD_CAP_565", "CLCD_CAP_888";
++ bpp = <16>;
++ };
++
++ panel@1 {
++ compatible = "panel";
++ mode = "XVGA";
++ refresh = <60>;
++ xres = <1024>;
++ yres = <768>;
++ pixclock = <15748>;
++ left_margin = <152>;
++ right_margin = <48>;
++ upper_margin = <23>;
++ lower_margin = <3>;
++ hsync_len = <104>;
++ vsync_len = <4>;
++ sync = <0>;
++ vmode = "FB_VMODE_NONINTERLACED";
++
++ tim2 = "TIM2_BCD", "TIM2_IPC";
++ cntl = "CNTL_LCDTFT", "CNTL_BGR", "CNTL_LCDVCOMP(1)";
++ caps = "CLCD_CAP_5551", "CLCD_CAP_565", "CLCD_CAP_888";
++ bpp = <16>;
++ };
++ };
++};
+diff -Nur linux-3.14.36/arch/arm64/boot/dts/fvp-base-gicv2-psci.dts linux-openelec/arch/arm64/boot/dts/fvp-base-gicv2-psci.dts
+--- linux-3.14.36/arch/arm64/boot/dts/fvp-base-gicv2-psci.dts 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/arch/arm64/boot/dts/fvp-base-gicv2-psci.dts 2015-05-06 12:05:43.000000000 -0500
+@@ -0,0 +1,266 @@
++/*
++ * Copyright (c) 2013, ARM Limited. All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ *
++ * Redistributions of source code must retain the above copyright notice, this
++ * list of conditions and the following disclaimer.
++ *
++ * Redistributions in binary form must reproduce the above copyright notice,
++ * this list of conditions and the following disclaimer in the documentation
++ * and/or other materials provided with the distribution.
++ *
++ * Neither the name of ARM nor the names of its contributors may be used
++ * to endorse or promote products derived from this software without specific
++ * prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++
++/dts-v1/;
++
++/memreserve/ 0x80000000 0x00010000;
++
++/ {
++};
++
++/ {
++ model = "FVP Base";
++ compatible = "arm,vfp-base", "arm,vexpress";
++ interrupt-parent = <&gic>;
++ #address-cells = <2>;
++ #size-cells = <2>;
++
++ chosen { };
++
++ aliases {
++ serial0 = &v2m_serial0;
++ serial1 = &v2m_serial1;
++ serial2 = &v2m_serial2;
++ serial3 = &v2m_serial3;
++ };
++
++ psci {
++ compatible = "arm,psci";
++ method = "smc";
++ cpu_suspend = <0xc4000001>;
++ cpu_off = <0x84000002>;
++ cpu_on = <0xc4000003>;
++ };
++
++ cpus {
++ #address-cells = <2>;
++ #size-cells = <0>;
++
++ big0: cpu@0 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a57", "arm,armv8";
++ reg = <0x0 0x0>;
++ enable-method = "psci";
++ clock-frequency = <1000000>;
++ };
++ big1: cpu@1 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a57", "arm,armv8";
++ reg = <0x0 0x1>;
++ enable-method = "psci";
++ clock-frequency = <1000000>;
++ };
++ big2: cpu@2 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a57", "arm,armv8";
++ reg = <0x0 0x2>;
++ enable-method = "psci";
++ clock-frequency = <1000000>;
++ };
++ big3: cpu@3 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a57", "arm,armv8";
++ reg = <0x0 0x3>;
++ enable-method = "psci";
++ clock-frequency = <1000000>;
++ };
++ little0: cpu@100 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a53", "arm,armv8";
++ reg = <0x0 0x100>;
++ enable-method = "psci";
++ clock-frequency = <1000000>;
++ };
++ little1: cpu@101 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a53", "arm,armv8";
++ reg = <0x0 0x101>;
++ enable-method = "psci";
++ clock-frequency = <1000000>;
++ };
++ little2: cpu@102 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a53", "arm,armv8";
++ reg = <0x0 0x102>;
++ enable-method = "psci";
++ clock-frequency = <1000000>;
++ };
++ little3: cpu@103 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a53", "arm,armv8";
++ reg = <0x0 0x103>;
++ enable-method = "psci";
++ clock-frequency = <1000000>;
++ };
++
++ cpu-map {
++ cluster0 {
++ core0 {
++ cpu = <&big0>;
++ };
++ core1 {
++ cpu = <&big1>;
++ };
++ core2 {
++ cpu = <&big2>;
++ };
++ core3 {
++ cpu = <&big3>;
++ };
++ };
++ cluster1 {
++ core0 {
++ cpu = <&little0>;
++ };
++ core1 {
++ cpu = <&little1>;
++ };
++ core2 {
++ cpu = <&little2>;
++ };
++ core3 {
++ cpu = <&little3>;
++ };
++ };
++ };
++ };
++
++ memory@80000000 {
++ device_type = "memory";
++ reg = <0x00000000 0x80000000 0 0x80000000>,
++ <0x00000008 0x80000000 0 0x80000000>;
++ };
++
++ gic: interrupt-controller@2f000000 {
++ compatible = "arm,cortex-a15-gic", "arm,cortex-a9-gic";
++ #interrupt-cells = <3>;
++ #address-cells = <0>;
++ interrupt-controller;
++ reg = <0x0 0x2f000000 0 0x10000>,
++ <0x0 0x2c000000 0 0x2000>,
++ <0x0 0x2c010000 0 0x2000>,
++ <0x0 0x2c02F000 0 0x2000>;
++ interrupts = <1 9 0xf04>;
++ };
++
++ timer {
++ compatible = "arm,armv8-timer";
++ interrupts = <1 13 0xff01>,
++ <1 14 0xff01>,
++ <1 11 0xff01>,
++ <1 10 0xff01>;
++ clock-frequency = <100000000>;
++ };
++
++ timer@2a810000 {
++ compatible = "arm,armv7-timer-mem";
++ reg = <0x0 0x2a810000 0x0 0x10000>;
++ clock-frequency = <100000000>;
++ #address-cells = <2>;
++ #size-cells = <2>;
++ ranges;
++ frame@2a820000 {
++ frame-number = <0>;
++ interrupts = <0 25 4>;
++ reg = <0x0 0x2a820000 0x0 0x10000>;
++ };
++ };
++
++ pmu {
++ compatible = "arm,armv8-pmuv3";
++ interrupts = <0 60 4>,
++ <0 61 4>,
++ <0 62 4>,
++ <0 63 4>;
++ };
++
++ smb {
++ compatible = "simple-bus";
++
++ #address-cells = <2>;
++ #size-cells = <1>;
++ ranges = <0 0 0 0x08000000 0x04000000>,
++ <1 0 0 0x14000000 0x04000000>,
++ <2 0 0 0x18000000 0x04000000>,
++ <3 0 0 0x1c000000 0x04000000>,
++ <4 0 0 0x0c000000 0x04000000>,
++ <5 0 0 0x10000000 0x04000000>;
++
++ #interrupt-cells = <1>;
++ interrupt-map-mask = <0 0 63>;
++ interrupt-map = <0 0 0 &gic 0 0 4>,
++ <0 0 1 &gic 0 1 4>,
++ <0 0 2 &gic 0 2 4>,
++ <0 0 3 &gic 0 3 4>,
++ <0 0 4 &gic 0 4 4>,
++ <0 0 5 &gic 0 5 4>,
++ <0 0 6 &gic 0 6 4>,
++ <0 0 7 &gic 0 7 4>,
++ <0 0 8 &gic 0 8 4>,
++ <0 0 9 &gic 0 9 4>,
++ <0 0 10 &gic 0 10 4>,
++ <0 0 11 &gic 0 11 4>,
++ <0 0 12 &gic 0 12 4>,
++ <0 0 13 &gic 0 13 4>,
++ <0 0 14 &gic 0 14 4>,
++ <0 0 15 &gic 0 15 4>,
++ <0 0 16 &gic 0 16 4>,
++ <0 0 17 &gic 0 17 4>,
++ <0 0 18 &gic 0 18 4>,
++ <0 0 19 &gic 0 19 4>,
++ <0 0 20 &gic 0 20 4>,
++ <0 0 21 &gic 0 21 4>,
++ <0 0 22 &gic 0 22 4>,
++ <0 0 23 &gic 0 23 4>,
++ <0 0 24 &gic 0 24 4>,
++ <0 0 25 &gic 0 25 4>,
++ <0 0 26 &gic 0 26 4>,
++ <0 0 27 &gic 0 27 4>,
++ <0 0 28 &gic 0 28 4>,
++ <0 0 29 &gic 0 29 4>,
++ <0 0 30 &gic 0 30 4>,
++ <0 0 31 &gic 0 31 4>,
++ <0 0 32 &gic 0 32 4>,
++ <0 0 33 &gic 0 33 4>,
++ <0 0 34 &gic 0 34 4>,
++ <0 0 35 &gic 0 35 4>,
++ <0 0 36 &gic 0 36 4>,
++ <0 0 37 &gic 0 37 4>,
++ <0 0 38 &gic 0 38 4>,
++ <0 0 39 &gic 0 39 4>,
++ <0 0 40 &gic 0 40 4>,
++ <0 0 41 &gic 0 41 4>,
++ <0 0 42 &gic 0 42 4>;
++
++ /include/ "rtsm_ve-motherboard.dtsi"
++ };
++};
++
++/include/ "clcd-panels.dtsi"
+diff -Nur linux-3.14.36/arch/arm64/boot/dts/juno.dts linux-openelec/arch/arm64/boot/dts/juno.dts
+--- linux-3.14.36/arch/arm64/boot/dts/juno.dts 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/arch/arm64/boot/dts/juno.dts 2015-05-06 12:05:43.000000000 -0500
+@@ -0,0 +1,498 @@
++/*
++ * ARM Ltd. Juno Plaform
++ *
++ * Fast Models FVP v2 support
++ */
++
++/dts-v1/;
++
++#include <dt-bindings/interrupt-controller/arm-gic.h>
++
++/ {
++ model = "Juno";
++ compatible = "arm,juno", "arm,vexpress";
++ interrupt-parent = <&gic>;
++ #address-cells = <2>;
++ #size-cells = <2>;
++
++ aliases {
++ serial0 = &soc_uart0;
++ };
++
++ cpus {
++ #address-cells = <2>;
++ #size-cells = <0>;
++
++ cpu@100 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a53","arm,armv8";
++ reg = <0x0 0x100>;
++ enable-method = "psci";
++ };
++
++ cpu@101 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a53","arm,armv8";
++ reg = <0x0 0x101>;
++ enable-method = "psci";
++ };
++
++ cpu@102 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a53","arm,armv8";
++ reg = <0x0 0x102>;
++ enable-method = "psci";
++ };
++
++ cpu@103 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a53","arm,armv8";
++ reg = <0x0 0x103>;
++ enable-method = "psci";
++ };
++
++ cpu@0 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a57","arm,armv8";
++ reg = <0x0 0x0>;
++ enable-method = "psci";
++ };
++
++ cpu@1 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a57","arm,armv8";
++ reg = <0x0 0x1>;
++ enable-method = "psci";
++ };
++ };
++
++ memory@80000000 {
++ device_type = "memory";
++ reg = <0x00000000 0x80000000 0x0 0x80000000>,
++ <0x00000008 0x80000000 0x1 0x80000000>;
++ };
++
++ /* memory@14000000 {
++ device_type = "memory";
++ reg = <0x00000000 0x14000000 0x0 0x02000000>;
++ }; */
++
++ gic: interrupt-controller@2c001000 {
++ compatible = "arm,cortex-a15-gic", "arm,cortex-a9-gic";
++ #interrupt-cells = <3>;
++ #address-cells = <0>;
++ interrupt-controller;
++ reg = <0x0 0x2c010000 0 0x1000>,
++ <0x0 0x2c02f000 0 0x1000>,
++ <0x0 0x2c04f000 0 0x2000>,
++ <0x0 0x2c06f000 0 0x2000>;
++ interrupts = <GIC_PPI 9 0xf04>;
++ };
++
++ msi0: msi@2c1c0000 {
++ compatible = "arm,gic-msi";
++ reg = <0x0 0x2c1c0000 0 0x10000
++ 0x0 0x2c1d0000 0 0x10000
++ 0x0 0x2c1e0000 0 0x10000
++ 0x0 0x2c1f0000 0 0x10000>;
++ };
++
++ timer {
++ compatible = "arm,armv8-timer";
++ interrupts = <GIC_PPI 13 0xff01>,
++ <GIC_PPI 14 0xff01>,
++ <GIC_PPI 11 0xff01>,
++ <GIC_PPI 10 0xff01>;
++ };
++
++ pmu {
++ compatible = "arm,armv8-pmuv3";
++ interrupts = <GIC_SPI 60 4>,
++ <GIC_SPI 61 4>,
++ <GIC_SPI 62 4>,
++ <GIC_SPI 63 4>;
++ };
++
++ psci {
++ compatible = "arm,psci";
++ method = "smc";
++ cpu_suspend = <0xC4000001>;
++ cpu_off = <0x84000002>;
++ cpu_on = <0xC4000003>;
++ migrate = <0xC4000005>;
++ };
++
++ pci0: pci@30000000 {
++ compatible = "arm,pcie-xr3";
++ device_type = "pci";
++ reg = <0 0x7ff30000 0 0x1000
++ 0 0x7ff20000 0 0x10000
++ 0 0x40000000 0 0x10000000>;
++ bus-range = <0 255>;
++ #address-cells = <3>;
++ #size-cells = <2>;
++ ranges = <0x01000000 0x0 0x00000000 0x00 0x5ff00000 0x0 0x00100000
++ 0x02000000 0x0 0x00000000 0x40 0x00000000 0x0 0x80000000
++ 0x42000000 0x0 0x80000000 0x40 0x80000000 0x0 0x80000000>;
++ #interrupt-cells = <1>;
++ interrupt-map-mask = <0 0 0 7>;
++ interrupt-map = <0 0 0 1 &gic 0 136 4
++ 0 0 0 2 &gic 0 137 4
++ 0 0 0 3 &gic 0 138 4
++ 0 0 0 4 &gic 0 139 4>;
++ };
++
++ scpi: scpi@2b1f0000 {
++ compatible = "arm,scpi-mhu";
++ reg = <0x0 0x2b1f0000 0x0 0x10000>, /* MHU registers */
++ <0x0 0x2e000000 0x0 0x10000>; /* Payload area */
++ interrupts = <0 36 4>, /* low priority interrupt */
++ <0 35 4>, /* high priority interrupt */
++ <0 37 4>; /* secure channel interrupt */
++ #clock-cells = <1>;
++ clock-output-names = "a57", "a53", "gpu", "hdlcd0", "hdlcd1";
++ };
++
++ hdlcd0_osc: scpi_osc@3 {
++ compatible = "arm,scpi-osc";
++ #clock-cells = <0>;
++ clocks = <&scpi 3>;
++ frequency-range = <23000000 210000000>;
++ clock-output-names = "pxlclk0";
++ };
++
++ hdlcd1_osc: scpi_osc@4 {
++ compatible = "arm,scpi-osc";
++ #clock-cells = <0>;
++ clocks = <&scpi 4>;
++ frequency-range = <23000000 210000000>;
++ clock-output-names = "pxlclk1";
++ };
++
++ soc_uartclk: refclk72738khz {
++ compatible = "fixed-clock";
++ #clock-cells = <0>;
++ clock-frequency = <7273800>;
++ clock-output-names = "juno:uartclk";
++ };
++
++ soc_refclk24mhz: clk24mhz {
++ compatible = "fixed-clock";
++ #clock-cells = <0>;
++ clock-frequency = <24000000>;
++ clock-output-names = "juno:clk24mhz";
++ };
++
++ mb_eth25mhz: clk25mhz {
++ compatible = "fixed-clock";
++ #clock-cells = <0>;
++ clock-frequency = <25000000>;
++ clock-output-names = "ethclk25mhz";
++ };
++
++ soc_usb48mhz: clk48mhz {
++ compatible = "fixed-clock";
++ #clock-cells = <0>;
++ clock-frequency = <48000000>;
++ clock-output-names = "clk48mhz";
++ };
++
++ soc_smc50mhz: clk50mhz {
++ compatible = "fixed-clock";
++ #clock-cells = <0>;
++ clock-frequency = <50000000>;
++ clock-output-names = "smc_clk";
++ };
++
++ soc_refclk100mhz: refclk100mhz {
++ compatible = "fixed-clock";
++ #clock-cells = <0>;
++ clock-frequency = <100000000>;
++ clock-output-names = "apb_pclk";
++ };
++
++ soc_faxiclk: refclk533mhz {
++ compatible = "fixed-clock";
++ #clock-cells = <0>;
++ clock-frequency = <533000000>;
++ clock-output-names = "faxi_clk";
++ };
++
++ soc_fixed_3v3: fixedregulator@0 {
++ compatible = "regulator-fixed";
++ regulator-name = "3V3";
++ regulator-min-microvolt = <3300000>;
++ regulator-max-microvolt = <3300000>;
++ regulator-always-on;
++ };
++
++ memory-controller@7ffd0000 {
++ compatible = "arm,pl354", "arm,primecell";
++ reg = <0 0x7ffd0000 0 0x1000>;
++ interrupts = <0 86 4>,
++ <0 87 4>;
++ clocks = <&soc_smc50mhz>;
++ clock-names = "apb_pclk";
++ chip5-memwidth = <16>;
++ };
++
++ dma0: dma@0x7ff00000 {
++ compatible = "arm,pl330", "arm,primecell";
++ reg = <0x0 0x7ff00000 0 0x1000>;
++ interrupts = <0 95 4>,
++ <0 88 4>,
++ <0 89 4>,
++ <0 90 4>,
++ <0 91 4>,
++ <0 108 4>,
++ <0 109 4>,
++ <0 110 4>,
++ <0 111 4>;
++ #dma-cells = <1>;
++ #dma-channels = <8>;
++ #dma-requests = <32>;
++ clocks = <&soc_faxiclk>;
++ clock-names = "apb_pclk";
++ };
++
++ soc_uart0: uart@7ff80000 {
++ compatible = "arm,pl011", "arm,primecell";
++ reg = <0x0 0x7ff80000 0x0 0x1000>;
++ interrupts = <0 83 4>;
++ clocks = <&soc_uartclk>, <&soc_refclk100mhz>;
++ clock-names = "uartclk", "apb_pclk";
++ dmas = <&dma0 1
++ &dma0 2>;
++ dma-names = "rx", "tx";
++ };
++
++ /* this UART is reserved for secure software.
++ soc_uart1: uart@7ff70000 {
++ compatible = "arm,pl011", "arm,primecell";
++ reg = <0x0 0x7ff70000 0x0 0x1000>;
++ interrupts = <0 84 4>;
++ clocks = <&soc_uartclk>, <&soc_refclk100mhz>;
++ clock-names = "uartclk", "apb_pclk";
++ }; */
++
++ ulpi_phy: phy@0 {
++ compatible = "phy-ulpi-generic";
++ reg = <0x0 0x94 0x0 0x4>;
++ phy-id = <0>;
++ };
++
++ ehci@7ffc0000 {
++ compatible = "snps,ehci-h20ahb";
++ /* compatible = "arm,h20ahb-ehci"; */
++ reg = <0x0 0x7ffc0000 0x0 0x10000>;
++ interrupts = <0 117 4>;
++ clocks = <&soc_usb48mhz>;
++ clock-names = "otg";
++ phys = <&ulpi_phy>;
++ };
++
++ ohci@0x7ffb0000 {
++ compatible = "generic-ohci";
++ reg = <0x0 0x7ffb0000 0x0 0x10000>;
++ interrupts = <0 116 4>;
++ clocks = <&soc_usb48mhz>;
++ clock-names = "otg";
++ };
++
++ i2c@0x7ffa0000 {
++ #address-cells = <1>;
++ #size-cells = <0>;
++ compatible = "snps,designware-i2c";
++ reg = <0x0 0x7ffa0000 0x0 0x1000>;
++ interrupts = <0 104 4>;
++ clock-frequency = <400000>;
++ i2c-sda-hold-time-ns = <500>;
++ clocks = <&soc_smc50mhz>;
++
++ dvi0: dvi-transmitter@70 {
++ compatible = "nxp,tda998x";
++ reg = <0x70>;
++ };
++
++ dvi1: dvi-transmitter@71 {
++ compatible = "nxp,tda998x";
++ reg = <0x71>;
++ };
++ };
++
++ /* mmci@1c050000 {
++ compatible = "arm,pl180", "arm,primecell";
++ reg = <0x0 0x1c050000 0x0 0x1000>;
++ interrupts = <0 73 4>,
++ <0 74 4>;
++ max-frequency = <12000000>;
++ vmmc-supply = <&soc_fixed_3v3>;
++ clocks = <&soc_refclk24mhz>, <&soc_refclk100mhz>;
++ clock-names = "mclk", "apb_pclk";
++ }; */
++
++ hdlcd@7ff60000 {
++ compatible = "arm,hdlcd";
++ reg = <0 0x7ff60000 0 0x1000>;
++ interrupts = <0 85 4>;
++ clocks = <&hdlcd0_osc>;
++ clock-names = "pxlclk";
++ i2c-slave = <&dvi0>;
++
++ /* display-timings {
++ native-mode = <&timing0>;
++ timing0: timing@0 {
++ /* 1024 x 768 framebufer, standard VGA timings * /
++ clock-frequency = <65000>;
++ hactive = <1024>;
++ vactive = <768>;
++ hfront-porch = <24>;
++ hback-porch = <160>;
++ hsync-len = <136>;
++ vfront-porch = <3>;
++ vback-porch = <29>;
++ vsync-len = <6>;
++ };
++ }; */
++ };
++
++ hdlcd@7ff50000 {
++ compatible = "arm,hdlcd";
++ reg = <0 0x7ff50000 0 0x1000>;
++ interrupts = <0 93 4>;
++ clocks = <&hdlcd1_osc>;
++ clock-names = "pxlclk";
++ i2c-slave = <&dvi1>;
++
++ display-timings {
++ native-mode = <&timing1>;
++ timing1: timing@1 {
++ /* 1024 x 768 framebufer, standard VGA timings */
++ clock-frequency = <65000>;
++ hactive = <1024>;
++ vactive = <768>;
++ hfront-porch = <24>;
++ hback-porch = <160>;
++ hsync-len = <136>;
++ vfront-porch = <3>;
++ vback-porch = <29>;
++ vsync-len = <6>;
++ };
++ };
++ };
++
++ smb {
++ compatible = "simple-bus";
++ #address-cells = <2>;
++ #size-cells = <1>;
++ ranges = <0 0 0 0x08000000 0x04000000>,
++ <1 0 0 0x14000000 0x04000000>,
++ <2 0 0 0x18000000 0x04000000>,
++ <3 0 0 0x1c000000 0x04000000>,
++ <4 0 0 0x0c000000 0x04000000>,
++ <5 0 0 0x10000000 0x04000000>;
++
++ #interrupt-cells = <1>;
++ interrupt-map-mask = <0 0 15>;
++ interrupt-map = <0 0 0 &gic 0 68 4>,
++ <0 0 1 &gic 0 69 4>,
++ <0 0 2 &gic 0 70 4>,
++ <0 0 3 &gic 0 160 4>,
++ <0 0 4 &gic 0 161 4>,
++ <0 0 5 &gic 0 162 4>,
++ <0 0 6 &gic 0 163 4>,
++ <0 0 7 &gic 0 164 4>,
++ <0 0 8 &gic 0 165 4>,
++ <0 0 9 &gic 0 166 4>,
++ <0 0 10 &gic 0 167 4>,
++ <0 0 11 &gic 0 168 4>,
++ <0 0 12 &gic 0 169 4>;
++
++ motherboard {
++ model = "V2M-Juno";
++ arm,hbi = <0x252>;
++ arm,vexpress,site = <0>;
++ arm,v2m-memory-map = "rs1";
++ compatible = "arm,vexpress,v2p-p1", "simple-bus";
++ #address-cells = <2>; /* SMB chipselect number and offset */
++ #size-cells = <1>;
++ #interrupt-cells = <1>;
++ ranges;
++
++ usb@5,00000000 {
++ compatible = "nxp,usb-isp1763";
++ reg = <5 0x00000000 0x20000>;
++ bus-width = <16>;
++ interrupts = <4>;
++ };
++
++ ethernet@2,00000000 {
++ compatible = "smsc,lan9118", "smsc,lan9115";
++ reg = <2 0x00000000 0x10000>;
++ interrupts = <3>;
++ phy-mode = "mii";
++ reg-io-width = <4>;
++ smsc,irq-active-high;
++ smsc,irq-push-pull;
++ clocks = <&mb_eth25mhz>;
++ vdd33a-supply = <&soc_fixed_3v3>; /* change this */
++ vddvario-supply = <&soc_fixed_3v3>; /* and this */
++ };
++
++ iofpga@3,00000000 {
++ compatible = "arm,amba-bus", "simple-bus";
++ #address-cells = <1>;
++ #size-cells = <1>;
++ ranges = <0 3 0 0x200000>;
++
++ kmi@060000 {
++ compatible = "arm,pl050", "arm,primecell";
++ reg = <0x060000 0x1000>;
++ interrupts = <8>;
++ clocks = <&soc_refclk24mhz>, <&soc_smc50mhz>;
++ clock-names = "KMIREFCLK", "apb_pclk";
++ };
++
++ kmi@070000 {
++ compatible = "arm,pl050", "arm,primecell";
++ reg = <0x070000 0x1000>;
++ interrupts = <8>;
++ clocks = <&soc_refclk24mhz>, <&soc_smc50mhz>;
++ clock-names = "KMIREFCLK", "apb_pclk";
++ };
++
++ wdt@0f0000 {
++ compatible = "arm,sp805", "arm,primecell";
++ reg = <0x0f0000 0x10000>;
++ interrupts = <7>;
++ clocks = <&soc_refclk24mhz>, <&soc_smc50mhz>;
++ clock-names = "wdogclk", "apb_pclk";
++ };
++
++ v2m_timer01: timer@110000 {
++ compatible = "arm,sp804", "arm,primecell";
++ reg = <0x110000 0x10000>;
++ interrupts = <9>;
++ clocks = <&soc_refclk24mhz>, <&soc_smc50mhz>;
++ clock-names = "timclken1", "apb_pclk";
++ };
++
++ v2m_timer23: timer@120000 {
++ compatible = "arm,sp804", "arm,primecell";
++ reg = <0x120000 0x10000>;
++ interrupts = <9>;
++ clocks = <&soc_refclk24mhz>, <&soc_smc50mhz>;
++ clock-names = "timclken1", "apb_pclk";
++ };
++
++ rtc@170000 {
++ compatible = "arm,pl031", "arm,primecell";
++ reg = <0x170000 0x10000>;
++ interrupts = <0>;
++ clocks = <&soc_smc50mhz>;
++ clock-names = "apb_pclk";
++ };
++ };
++ };
++ };
++};
+diff -Nur linux-3.14.36/arch/arm64/boot/dts/Makefile linux-openelec/arch/arm64/boot/dts/Makefile
+--- linux-3.14.36/arch/arm64/boot/dts/Makefile 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm64/boot/dts/Makefile 2015-05-06 12:05:43.000000000 -0500
+@@ -1,5 +1,7 @@
+-dtb-$(CONFIG_ARCH_VEXPRESS) += rtsm_ve-aemv8a.dtb foundation-v8.dtb
++dtb-$(CONFIG_ARCH_VEXPRESS) += rtsm_ve-aemv8a.dtb foundation-v8.dtb \
++ fvp-base-gicv2-psci.dtb
+ dtb-$(CONFIG_ARCH_XGENE) += apm-mustang.dtb
++dtb-$(CONFIG_ARCH_VEXPRESS) += juno.dtb
+
+ targets += dtbs
+ targets += $(dtb-y)
+diff -Nur linux-3.14.36/arch/arm64/boot/dts/rtsm_ve-aemv8a.dts linux-openelec/arch/arm64/boot/dts/rtsm_ve-aemv8a.dts
+--- linux-3.14.36/arch/arm64/boot/dts/rtsm_ve-aemv8a.dts 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm64/boot/dts/rtsm_ve-aemv8a.dts 2015-05-06 12:05:43.000000000 -0500
+@@ -157,3 +157,5 @@
+ /include/ "rtsm_ve-motherboard.dtsi"
+ };
+ };
++
++/include/ "clcd-panels.dtsi"
+diff -Nur linux-3.14.36/arch/arm64/boot/dts/rtsm_ve-motherboard.dtsi linux-openelec/arch/arm64/boot/dts/rtsm_ve-motherboard.dtsi
+--- linux-3.14.36/arch/arm64/boot/dts/rtsm_ve-motherboard.dtsi 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm64/boot/dts/rtsm_ve-motherboard.dtsi 2015-05-06 12:05:43.000000000 -0500
+@@ -182,6 +182,9 @@
+ interrupts = <14>;
+ clocks = <&v2m_oscclk1>, <&v2m_clk24mhz>;
+ clock-names = "clcdclk", "apb_pclk";
++ mode = "XVGA";
++ use_dma = <0>;
++ framebuffer = <0x18000000 0x00180000>;
+ };
+
+ virtio_block@0130000 {
+diff -Nur linux-3.14.36/arch/arm64/crypto/aes-ce-ccm-core.S linux-openelec/arch/arm64/crypto/aes-ce-ccm-core.S
+--- linux-3.14.36/arch/arm64/crypto/aes-ce-ccm-core.S 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/arch/arm64/crypto/aes-ce-ccm-core.S 2015-05-06 12:05:43.000000000 -0500
+@@ -0,0 +1,222 @@
++/*
++ * aesce-ccm-core.S - AES-CCM transform for ARMv8 with Crypto Extensions
++ *
++ * Copyright (C) 2013 - 2014 Linaro Ltd <ard.biesheuvel@linaro.org>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include <linux/linkage.h>
++
++ .text
++ .arch armv8-a+crypto
++
++ /*
++ * void ce_aes_ccm_auth_data(u8 mac[], u8 const in[], u32 abytes,
++ * u32 *macp, u8 const rk[], u32 rounds);
++ */
++ENTRY(ce_aes_ccm_auth_data)
++ ldr w8, [x3] /* leftover from prev round? */
++ ld1 {v0.2d}, [x0] /* load mac */
++ cbz w8, 1f
++ sub w8, w8, #16
++ eor v1.16b, v1.16b, v1.16b
++0: ldrb w7, [x1], #1 /* get 1 byte of input */
++ subs w2, w2, #1
++ add w8, w8, #1
++ ins v1.b[0], w7
++ ext v1.16b, v1.16b, v1.16b, #1 /* rotate in the input bytes */
++ beq 8f /* out of input? */
++ cbnz w8, 0b
++ eor v0.16b, v0.16b, v1.16b
++1: ld1 {v3.2d}, [x4] /* load first round key */
++ prfm pldl1strm, [x1]
++ cmp w5, #12 /* which key size? */
++ add x6, x4, #16
++ sub w7, w5, #2 /* modified # of rounds */
++ bmi 2f
++ bne 5f
++ mov v5.16b, v3.16b
++ b 4f
++2: mov v4.16b, v3.16b
++ ld1 {v5.2d}, [x6], #16 /* load 2nd round key */
++3: aese v0.16b, v4.16b
++ aesmc v0.16b, v0.16b
++4: ld1 {v3.2d}, [x6], #16 /* load next round key */
++ aese v0.16b, v5.16b
++ aesmc v0.16b, v0.16b
++5: ld1 {v4.2d}, [x6], #16 /* load next round key */
++ subs w7, w7, #3
++ aese v0.16b, v3.16b
++ aesmc v0.16b, v0.16b
++ ld1 {v5.2d}, [x6], #16 /* load next round key */
++ bpl 3b
++ aese v0.16b, v4.16b
++ subs w2, w2, #16 /* last data? */
++ eor v0.16b, v0.16b, v5.16b /* final round */
++ bmi 6f
++ ld1 {v1.16b}, [x1], #16 /* load next input block */
++ eor v0.16b, v0.16b, v1.16b /* xor with mac */
++ bne 1b
++6: st1 {v0.2d}, [x0] /* store mac */
++ beq 10f
++ adds w2, w2, #16
++ beq 10f
++ mov w8, w2
++7: ldrb w7, [x1], #1
++ umov w6, v0.b[0]
++ eor w6, w6, w7
++ strb w6, [x0], #1
++ subs w2, w2, #1
++ beq 10f
++ ext v0.16b, v0.16b, v0.16b, #1 /* rotate out the mac bytes */
++ b 7b
++8: mov w7, w8
++ add w8, w8, #16
++9: ext v1.16b, v1.16b, v1.16b, #1
++ adds w7, w7, #1
++ bne 9b
++ eor v0.16b, v0.16b, v1.16b
++ st1 {v0.2d}, [x0]
++10: str w8, [x3]
++ ret
++ENDPROC(ce_aes_ccm_auth_data)
++
++ /*
++ * void ce_aes_ccm_final(u8 mac[], u8 const ctr[], u8 const rk[],
++ * u32 rounds);
++ */
++ENTRY(ce_aes_ccm_final)
++ ld1 {v3.2d}, [x2], #16 /* load first round key */
++ ld1 {v0.2d}, [x0] /* load mac */
++ cmp w3, #12 /* which key size? */
++ sub w3, w3, #2 /* modified # of rounds */
++ ld1 {v1.2d}, [x1] /* load 1st ctriv */
++ bmi 0f
++ bne 3f
++ mov v5.16b, v3.16b
++ b 2f
++0: mov v4.16b, v3.16b
++1: ld1 {v5.2d}, [x2], #16 /* load next round key */
++ aese v0.16b, v4.16b
++ aese v1.16b, v4.16b
++ aesmc v0.16b, v0.16b
++ aesmc v1.16b, v1.16b
++2: ld1 {v3.2d}, [x2], #16 /* load next round key */
++ aese v0.16b, v5.16b
++ aese v1.16b, v5.16b
++ aesmc v0.16b, v0.16b
++ aesmc v1.16b, v1.16b
++3: ld1 {v4.2d}, [x2], #16 /* load next round key */
++ subs w3, w3, #3
++ aese v0.16b, v3.16b
++ aese v1.16b, v3.16b
++ aesmc v0.16b, v0.16b
++ aesmc v1.16b, v1.16b
++ bpl 1b
++ aese v0.16b, v4.16b
++ aese v1.16b, v4.16b
++ /* final round key cancels out */
++ eor v0.16b, v0.16b, v1.16b /* en-/decrypt the mac */
++ st1 {v0.2d}, [x0] /* store result */
++ ret
++ENDPROC(ce_aes_ccm_final)
++
++ .macro aes_ccm_do_crypt,enc
++ ldr x8, [x6, #8] /* load lower ctr */
++ ld1 {v0.2d}, [x5] /* load mac */
++ rev x8, x8 /* keep swabbed ctr in reg */
++0: /* outer loop */
++ ld1 {v1.1d}, [x6] /* load upper ctr */
++ prfm pldl1strm, [x1]
++ add x8, x8, #1
++ rev x9, x8
++ cmp w4, #12 /* which key size? */
++ sub w7, w4, #2 /* get modified # of rounds */
++ ins v1.d[1], x9 /* no carry in lower ctr */
++ ld1 {v3.2d}, [x3] /* load first round key */
++ add x10, x3, #16
++ bmi 1f
++ bne 4f
++ mov v5.16b, v3.16b
++ b 3f
++1: mov v4.16b, v3.16b
++ ld1 {v5.2d}, [x10], #16 /* load 2nd round key */
++2: /* inner loop: 3 rounds, 2x interleaved */
++ aese v0.16b, v4.16b
++ aese v1.16b, v4.16b
++ aesmc v0.16b, v0.16b
++ aesmc v1.16b, v1.16b
++3: ld1 {v3.2d}, [x10], #16 /* load next round key */
++ aese v0.16b, v5.16b
++ aese v1.16b, v5.16b
++ aesmc v0.16b, v0.16b
++ aesmc v1.16b, v1.16b
++4: ld1 {v4.2d}, [x10], #16 /* load next round key */
++ subs w7, w7, #3
++ aese v0.16b, v3.16b
++ aese v1.16b, v3.16b
++ aesmc v0.16b, v0.16b
++ aesmc v1.16b, v1.16b
++ ld1 {v5.2d}, [x10], #16 /* load next round key */
++ bpl 2b
++ aese v0.16b, v4.16b
++ aese v1.16b, v4.16b
++ subs w2, w2, #16
++ bmi 6f /* partial block? */
++ ld1 {v2.16b}, [x1], #16 /* load next input block */
++ .if \enc == 1
++ eor v2.16b, v2.16b, v5.16b /* final round enc+mac */
++ eor v1.16b, v1.16b, v2.16b /* xor with crypted ctr */
++ .else
++ eor v2.16b, v2.16b, v1.16b /* xor with crypted ctr */
++ eor v1.16b, v2.16b, v5.16b /* final round enc */
++ .endif
++ eor v0.16b, v0.16b, v2.16b /* xor mac with pt ^ rk[last] */
++ st1 {v1.16b}, [x0], #16 /* write output block */
++ bne 0b
++ rev x8, x8
++ st1 {v0.2d}, [x5] /* store mac */
++ str x8, [x6, #8] /* store lsb end of ctr (BE) */
++5: ret
++
++6: eor v0.16b, v0.16b, v5.16b /* final round mac */
++ eor v1.16b, v1.16b, v5.16b /* final round enc */
++ st1 {v0.2d}, [x5] /* store mac */
++ add w2, w2, #16 /* process partial tail block */
++7: ldrb w9, [x1], #1 /* get 1 byte of input */
++ umov w6, v1.b[0] /* get top crypted ctr byte */
++ umov w7, v0.b[0] /* get top mac byte */
++ .if \enc == 1
++ eor w7, w7, w9
++ eor w9, w9, w6
++ .else
++ eor w9, w9, w6
++ eor w7, w7, w9
++ .endif
++ strb w9, [x0], #1 /* store out byte */
++ strb w7, [x5], #1 /* store mac byte */
++ subs w2, w2, #1
++ beq 5b
++ ext v0.16b, v0.16b, v0.16b, #1 /* shift out mac byte */
++ ext v1.16b, v1.16b, v1.16b, #1 /* shift out ctr byte */
++ b 7b
++ .endm
++
++ /*
++ * void ce_aes_ccm_encrypt(u8 out[], u8 const in[], u32 cbytes,
++ * u8 const rk[], u32 rounds, u8 mac[],
++ * u8 ctr[]);
++ * void ce_aes_ccm_decrypt(u8 out[], u8 const in[], u32 cbytes,
++ * u8 const rk[], u32 rounds, u8 mac[],
++ * u8 ctr[]);
++ */
++ENTRY(ce_aes_ccm_encrypt)
++ aes_ccm_do_crypt 1
++ENDPROC(ce_aes_ccm_encrypt)
++
++ENTRY(ce_aes_ccm_decrypt)
++ aes_ccm_do_crypt 0
++ENDPROC(ce_aes_ccm_decrypt)
+diff -Nur linux-3.14.36/arch/arm64/crypto/aes-ce-ccm-glue.c linux-openelec/arch/arm64/crypto/aes-ce-ccm-glue.c
+--- linux-3.14.36/arch/arm64/crypto/aes-ce-ccm-glue.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/arch/arm64/crypto/aes-ce-ccm-glue.c 2015-05-06 12:05:43.000000000 -0500
+@@ -0,0 +1,297 @@
++/*
++ * aes-ccm-glue.c - AES-CCM transform for ARMv8 with Crypto Extensions
++ *
++ * Copyright (C) 2013 - 2014 Linaro Ltd <ard.biesheuvel@linaro.org>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include <asm/neon.h>
++#include <asm/unaligned.h>
++#include <crypto/aes.h>
++#include <crypto/algapi.h>
++#include <crypto/scatterwalk.h>
++#include <linux/crypto.h>
++#include <linux/module.h>
++
++static int num_rounds(struct crypto_aes_ctx *ctx)
++{
++ /*
++ * # of rounds specified by AES:
++ * 128 bit key 10 rounds
++ * 192 bit key 12 rounds
++ * 256 bit key 14 rounds
++ * => n byte key => 6 + (n/4) rounds
++ */
++ return 6 + ctx->key_length / 4;
++}
++
++asmlinkage void ce_aes_ccm_auth_data(u8 mac[], u8 const in[], u32 abytes,
++ u32 *macp, u32 const rk[], u32 rounds);
++
++asmlinkage void ce_aes_ccm_encrypt(u8 out[], u8 const in[], u32 cbytes,
++ u32 const rk[], u32 rounds, u8 mac[],
++ u8 ctr[]);
++
++asmlinkage void ce_aes_ccm_decrypt(u8 out[], u8 const in[], u32 cbytes,
++ u32 const rk[], u32 rounds, u8 mac[],
++ u8 ctr[]);
++
++asmlinkage void ce_aes_ccm_final(u8 mac[], u8 const ctr[], u32 const rk[],
++ u32 rounds);
++
++static int ccm_setkey(struct crypto_aead *tfm, const u8 *in_key,
++ unsigned int key_len)
++{
++ struct crypto_aes_ctx *ctx = crypto_aead_ctx(tfm);
++ int ret;
++
++ ret = crypto_aes_expand_key(ctx, in_key, key_len);
++ if (!ret)
++ return 0;
++
++ tfm->base.crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
++ return -EINVAL;
++}
++
++static int ccm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
++{
++ if ((authsize & 1) || authsize < 4)
++ return -EINVAL;
++ return 0;
++}
++
++static int ccm_init_mac(struct aead_request *req, u8 maciv[], u32 msglen)
++{
++ struct crypto_aead *aead = crypto_aead_reqtfm(req);
++ __be32 *n = (__be32 *)&maciv[AES_BLOCK_SIZE - 8];
++ u32 l = req->iv[0] + 1;
++
++ /* verify that CCM dimension 'L' is set correctly in the IV */
++ if (l < 2 || l > 8)
++ return -EINVAL;
++
++ /* verify that msglen can in fact be represented in L bytes */
++ if (l < 4 && msglen >> (8 * l))
++ return -EOVERFLOW;
++
++ /*
++ * Even if the CCM spec allows L values of up to 8, the Linux cryptoapi
++ * uses a u32 type to represent msglen so the top 4 bytes are always 0.
++ */
++ n[0] = 0;
++ n[1] = cpu_to_be32(msglen);
++
++ memcpy(maciv, req->iv, AES_BLOCK_SIZE - l);
++
++ /*
++ * Meaning of byte 0 according to CCM spec (RFC 3610/NIST 800-38C)
++ * - bits 0..2 : max # of bytes required to represent msglen, minus 1
++ * (already set by caller)
++ * - bits 3..5 : size of auth tag (1 => 4 bytes, 2 => 6 bytes, etc)
++ * - bit 6 : indicates presence of authenticate-only data
++ */
++ maciv[0] |= (crypto_aead_authsize(aead) - 2) << 2;
++ if (req->assoclen)
++ maciv[0] |= 0x40;
++
++ memset(&req->iv[AES_BLOCK_SIZE - l], 0, l);
++ return 0;
++}
++
++static void ccm_calculate_auth_mac(struct aead_request *req, u8 mac[])
++{
++ struct crypto_aead *aead = crypto_aead_reqtfm(req);
++ struct crypto_aes_ctx *ctx = crypto_aead_ctx(aead);
++ struct __packed { __be16 l; __be32 h; u16 len; } ltag;
++ struct scatter_walk walk;
++ u32 len = req->assoclen;
++ u32 macp = 0;
++
++ /* prepend the AAD with a length tag */
++ if (len < 0xff00) {
++ ltag.l = cpu_to_be16(len);
++ ltag.len = 2;
++ } else {
++ ltag.l = cpu_to_be16(0xfffe);
++ put_unaligned_be32(len, &ltag.h);
++ ltag.len = 6;
++ }
++
++ ce_aes_ccm_auth_data(mac, (u8 *)&ltag, ltag.len, &macp, ctx->key_enc,
++ num_rounds(ctx));
++ scatterwalk_start(&walk, req->assoc);
++
++ do {
++ u32 n = scatterwalk_clamp(&walk, len);
++ u8 *p;
++
++ if (!n) {
++ scatterwalk_start(&walk, sg_next(walk.sg));
++ n = scatterwalk_clamp(&walk, len);
++ }
++ p = scatterwalk_map(&walk);
++ ce_aes_ccm_auth_data(mac, p, n, &macp, ctx->key_enc,
++ num_rounds(ctx));
++ len -= n;
++
++ scatterwalk_unmap(p);
++ scatterwalk_advance(&walk, n);
++ scatterwalk_done(&walk, 0, len);
++ } while (len);
++}
++
++static int ccm_encrypt(struct aead_request *req)
++{
++ struct crypto_aead *aead = crypto_aead_reqtfm(req);
++ struct crypto_aes_ctx *ctx = crypto_aead_ctx(aead);
++ struct blkcipher_desc desc = { .info = req->iv };
++ struct blkcipher_walk walk;
++ u8 __aligned(8) mac[AES_BLOCK_SIZE];
++ u8 buf[AES_BLOCK_SIZE];
++ u32 len = req->cryptlen;
++ int err;
++
++ err = ccm_init_mac(req, mac, len);
++ if (err)
++ return err;
++
++ kernel_neon_begin_partial(6);
++
++ if (req->assoclen)
++ ccm_calculate_auth_mac(req, mac);
++
++ /* preserve the original iv for the final round */
++ memcpy(buf, req->iv, AES_BLOCK_SIZE);
++
++ blkcipher_walk_init(&walk, req->dst, req->src, len);
++ err = blkcipher_aead_walk_virt_block(&desc, &walk, aead,
++ AES_BLOCK_SIZE);
++
++ while (walk.nbytes) {
++ u32 tail = walk.nbytes % AES_BLOCK_SIZE;
++
++ if (walk.nbytes == len)
++ tail = 0;
++
++ ce_aes_ccm_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
++ walk.nbytes - tail, ctx->key_enc,
++ num_rounds(ctx), mac, walk.iv);
++
++ len -= walk.nbytes - tail;
++ err = blkcipher_walk_done(&desc, &walk, tail);
++ }
++ if (!err)
++ ce_aes_ccm_final(mac, buf, ctx->key_enc, num_rounds(ctx));
++
++ kernel_neon_end();
++
++ if (err)
++ return err;
++
++ /* copy authtag to end of dst */
++ scatterwalk_map_and_copy(mac, req->dst, req->cryptlen,
++ crypto_aead_authsize(aead), 1);
++
++ return 0;
++}
++
++static int ccm_decrypt(struct aead_request *req)
++{
++ struct crypto_aead *aead = crypto_aead_reqtfm(req);
++ struct crypto_aes_ctx *ctx = crypto_aead_ctx(aead);
++ unsigned int authsize = crypto_aead_authsize(aead);
++ struct blkcipher_desc desc = { .info = req->iv };
++ struct blkcipher_walk walk;
++ u8 __aligned(8) mac[AES_BLOCK_SIZE];
++ u8 buf[AES_BLOCK_SIZE];
++ u32 len = req->cryptlen - authsize;
++ int err;
++
++ err = ccm_init_mac(req, mac, len);
++ if (err)
++ return err;
++
++ kernel_neon_begin_partial(6);
++
++ if (req->assoclen)
++ ccm_calculate_auth_mac(req, mac);
++
++ /* preserve the original iv for the final round */
++ memcpy(buf, req->iv, AES_BLOCK_SIZE);
++
++ blkcipher_walk_init(&walk, req->dst, req->src, len);
++ err = blkcipher_aead_walk_virt_block(&desc, &walk, aead,
++ AES_BLOCK_SIZE);
++
++ while (walk.nbytes) {
++ u32 tail = walk.nbytes % AES_BLOCK_SIZE;
++
++ if (walk.nbytes == len)
++ tail = 0;
++
++ ce_aes_ccm_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
++ walk.nbytes - tail, ctx->key_enc,
++ num_rounds(ctx), mac, walk.iv);
++
++ len -= walk.nbytes - tail;
++ err = blkcipher_walk_done(&desc, &walk, tail);
++ }
++ if (!err)
++ ce_aes_ccm_final(mac, buf, ctx->key_enc, num_rounds(ctx));
++
++ kernel_neon_end();
++
++ if (err)
++ return err;
++
++ /* compare calculated auth tag with the stored one */
++ scatterwalk_map_and_copy(buf, req->src, req->cryptlen - authsize,
++ authsize, 0);
++
++ if (memcmp(mac, buf, authsize))
++ return -EBADMSG;
++ return 0;
++}
++
++static struct crypto_alg ccm_aes_alg = {
++ .cra_name = "ccm(aes)",
++ .cra_driver_name = "ccm-aes-ce",
++ .cra_priority = 300,
++ .cra_flags = CRYPTO_ALG_TYPE_AEAD,
++ .cra_blocksize = 1,
++ .cra_ctxsize = sizeof(struct crypto_aes_ctx),
++ .cra_alignmask = 7,
++ .cra_type = &crypto_aead_type,
++ .cra_module = THIS_MODULE,
++ .cra_aead = {
++ .ivsize = AES_BLOCK_SIZE,
++ .maxauthsize = AES_BLOCK_SIZE,
++ .setkey = ccm_setkey,
++ .setauthsize = ccm_setauthsize,
++ .encrypt = ccm_encrypt,
++ .decrypt = ccm_decrypt,
++ }
++};
++
++static int __init aes_mod_init(void)
++{
++ if (!(elf_hwcap & HWCAP_AES))
++ return -ENODEV;
++ return crypto_register_alg(&ccm_aes_alg);
++}
++
++static void __exit aes_mod_exit(void)
++{
++ crypto_unregister_alg(&ccm_aes_alg);
++}
++
++module_init(aes_mod_init);
++module_exit(aes_mod_exit);
++
++MODULE_DESCRIPTION("Synchronous AES in CCM mode using ARMv8 Crypto Extensions");
++MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
++MODULE_LICENSE("GPL v2");
++MODULE_ALIAS("ccm(aes)");
+diff -Nur linux-3.14.36/arch/arm64/crypto/aes-ce-cipher.c linux-openelec/arch/arm64/crypto/aes-ce-cipher.c
+--- linux-3.14.36/arch/arm64/crypto/aes-ce-cipher.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/arch/arm64/crypto/aes-ce-cipher.c 2015-05-06 12:05:43.000000000 -0500
+@@ -0,0 +1,155 @@
++/*
++ * aes-ce-cipher.c - core AES cipher using ARMv8 Crypto Extensions
++ *
++ * Copyright (C) 2013 - 2014 Linaro Ltd <ard.biesheuvel@linaro.org>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include <asm/neon.h>
++#include <crypto/aes.h>
++#include <linux/cpufeature.h>
++#include <linux/crypto.h>
++#include <linux/module.h>
++
++MODULE_DESCRIPTION("Synchronous AES cipher using ARMv8 Crypto Extensions");
++MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
++MODULE_LICENSE("GPL v2");
++
++struct aes_block {
++ u8 b[AES_BLOCK_SIZE];
++};
++
++static int num_rounds(struct crypto_aes_ctx *ctx)
++{
++ /*
++ * # of rounds specified by AES:
++ * 128 bit key 10 rounds
++ * 192 bit key 12 rounds
++ * 256 bit key 14 rounds
++ * => n byte key => 6 + (n/4) rounds
++ */
++ return 6 + ctx->key_length / 4;
++}
++
++static void aes_cipher_encrypt(struct crypto_tfm *tfm, u8 dst[], u8 const src[])
++{
++ struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm);
++ struct aes_block *out = (struct aes_block *)dst;
++ struct aes_block const *in = (struct aes_block *)src;
++ void *dummy0;
++ int dummy1;
++
++ kernel_neon_begin_partial(4);
++
++ __asm__(" ld1 {v0.16b}, %[in] ;"
++ " ld1 {v1.2d}, [%[key]], #16 ;"
++ " cmp %w[rounds], #10 ;"
++ " bmi 0f ;"
++ " bne 3f ;"
++ " mov v3.16b, v1.16b ;"
++ " b 2f ;"
++ "0: mov v2.16b, v1.16b ;"
++ " ld1 {v3.2d}, [%[key]], #16 ;"
++ "1: aese v0.16b, v2.16b ;"
++ " aesmc v0.16b, v0.16b ;"
++ "2: ld1 {v1.2d}, [%[key]], #16 ;"
++ " aese v0.16b, v3.16b ;"
++ " aesmc v0.16b, v0.16b ;"
++ "3: ld1 {v2.2d}, [%[key]], #16 ;"
++ " subs %w[rounds], %w[rounds], #3 ;"
++ " aese v0.16b, v1.16b ;"
++ " aesmc v0.16b, v0.16b ;"
++ " ld1 {v3.2d}, [%[key]], #16 ;"
++ " bpl 1b ;"
++ " aese v0.16b, v2.16b ;"
++ " eor v0.16b, v0.16b, v3.16b ;"
++ " st1 {v0.16b}, %[out] ;"
++
++ : [out] "=Q"(*out),
++ [key] "=r"(dummy0),
++ [rounds] "=r"(dummy1)
++ : [in] "Q"(*in),
++ "1"(ctx->key_enc),
++ "2"(num_rounds(ctx) - 2)
++ : "cc");
++
++ kernel_neon_end();
++}
++
++static void aes_cipher_decrypt(struct crypto_tfm *tfm, u8 dst[], u8 const src[])
++{
++ struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm);
++ struct aes_block *out = (struct aes_block *)dst;
++ struct aes_block const *in = (struct aes_block *)src;
++ void *dummy0;
++ int dummy1;
++
++ kernel_neon_begin_partial(4);
++
++ __asm__(" ld1 {v0.16b}, %[in] ;"
++ " ld1 {v1.2d}, [%[key]], #16 ;"
++ " cmp %w[rounds], #10 ;"
++ " bmi 0f ;"
++ " bne 3f ;"
++ " mov v3.16b, v1.16b ;"
++ " b 2f ;"
++ "0: mov v2.16b, v1.16b ;"
++ " ld1 {v3.2d}, [%[key]], #16 ;"
++ "1: aesd v0.16b, v2.16b ;"
++ " aesimc v0.16b, v0.16b ;"
++ "2: ld1 {v1.2d}, [%[key]], #16 ;"
++ " aesd v0.16b, v3.16b ;"
++ " aesimc v0.16b, v0.16b ;"
++ "3: ld1 {v2.2d}, [%[key]], #16 ;"
++ " subs %w[rounds], %w[rounds], #3 ;"
++ " aesd v0.16b, v1.16b ;"
++ " aesimc v0.16b, v0.16b ;"
++ " ld1 {v3.2d}, [%[key]], #16 ;"
++ " bpl 1b ;"
++ " aesd v0.16b, v2.16b ;"
++ " eor v0.16b, v0.16b, v3.16b ;"
++ " st1 {v0.16b}, %[out] ;"
++
++ : [out] "=Q"(*out),
++ [key] "=r"(dummy0),
++ [rounds] "=r"(dummy1)
++ : [in] "Q"(*in),
++ "1"(ctx->key_dec),
++ "2"(num_rounds(ctx) - 2)
++ : "cc");
++
++ kernel_neon_end();
++}
++
++static struct crypto_alg aes_alg = {
++ .cra_name = "aes",
++ .cra_driver_name = "aes-ce",
++ .cra_priority = 300,
++ .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
++ .cra_blocksize = AES_BLOCK_SIZE,
++ .cra_ctxsize = sizeof(struct crypto_aes_ctx),
++ .cra_module = THIS_MODULE,
++ .cra_cipher = {
++ .cia_min_keysize = AES_MIN_KEY_SIZE,
++ .cia_max_keysize = AES_MAX_KEY_SIZE,
++ .cia_setkey = crypto_aes_set_key,
++ .cia_encrypt = aes_cipher_encrypt,
++ .cia_decrypt = aes_cipher_decrypt
++ }
++};
++
++static int __init aes_mod_init(void)
++{
++ return crypto_register_alg(&aes_alg);
++}
++
++static void __exit aes_mod_exit(void)
++{
++ crypto_unregister_alg(&aes_alg);
++}
++
++module_cpu_feature_match(AES, aes_mod_init);
++module_exit(aes_mod_exit);
+diff -Nur linux-3.14.36/arch/arm64/crypto/aes-ce.S linux-openelec/arch/arm64/crypto/aes-ce.S
+--- linux-3.14.36/arch/arm64/crypto/aes-ce.S 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/arch/arm64/crypto/aes-ce.S 2015-05-06 12:05:43.000000000 -0500
+@@ -0,0 +1,133 @@
++/*
++ * linux/arch/arm64/crypto/aes-ce.S - AES cipher for ARMv8 with
++ * Crypto Extensions
++ *
++ * Copyright (C) 2013 Linaro Ltd <ard.biesheuvel@linaro.org>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include <linux/linkage.h>
++
++#define AES_ENTRY(func) ENTRY(ce_ ## func)
++#define AES_ENDPROC(func) ENDPROC(ce_ ## func)
++
++ .arch armv8-a+crypto
++
++ /* preload all round keys */
++ .macro load_round_keys, rounds, rk
++ cmp \rounds, #12
++ blo 2222f /* 128 bits */
++ beq 1111f /* 192 bits */
++ ld1 {v17.16b-v18.16b}, [\rk], #32
++1111: ld1 {v19.16b-v20.16b}, [\rk], #32
++2222: ld1 {v21.16b-v24.16b}, [\rk], #64
++ ld1 {v25.16b-v28.16b}, [\rk], #64
++ ld1 {v29.16b-v31.16b}, [\rk]
++ .endm
++
++ /* prepare for encryption with key in rk[] */
++ .macro enc_prepare, rounds, rk, ignore
++ load_round_keys \rounds, \rk
++ .endm
++
++ /* prepare for encryption (again) but with new key in rk[] */
++ .macro enc_switch_key, rounds, rk, ignore
++ load_round_keys \rounds, \rk
++ .endm
++
++ /* prepare for decryption with key in rk[] */
++ .macro dec_prepare, rounds, rk, ignore
++ load_round_keys \rounds, \rk
++ .endm
++
++ .macro do_enc_Nx, de, mc, k, i0, i1, i2, i3
++ aes\de \i0\().16b, \k\().16b
++ .ifnb \i1
++ aes\de \i1\().16b, \k\().16b
++ .ifnb \i3
++ aes\de \i2\().16b, \k\().16b
++ aes\de \i3\().16b, \k\().16b
++ .endif
++ .endif
++ aes\mc \i0\().16b, \i0\().16b
++ .ifnb \i1
++ aes\mc \i1\().16b, \i1\().16b
++ .ifnb \i3
++ aes\mc \i2\().16b, \i2\().16b
++ aes\mc \i3\().16b, \i3\().16b
++ .endif
++ .endif
++ .endm
++
++ /* up to 4 interleaved encryption rounds with the same round key */
++ .macro round_Nx, enc, k, i0, i1, i2, i3
++ .ifc \enc, e
++ do_enc_Nx e, mc, \k, \i0, \i1, \i2, \i3
++ .else
++ do_enc_Nx d, imc, \k, \i0, \i1, \i2, \i3
++ .endif
++ .endm
++
++ /* up to 4 interleaved final rounds */
++ .macro fin_round_Nx, de, k, k2, i0, i1, i2, i3
++ aes\de \i0\().16b, \k\().16b
++ .ifnb \i1
++ aes\de \i1\().16b, \k\().16b
++ .ifnb \i3
++ aes\de \i2\().16b, \k\().16b
++ aes\de \i3\().16b, \k\().16b
++ .endif
++ .endif
++ eor \i0\().16b, \i0\().16b, \k2\().16b
++ .ifnb \i1
++ eor \i1\().16b, \i1\().16b, \k2\().16b
++ .ifnb \i3
++ eor \i2\().16b, \i2\().16b, \k2\().16b
++ eor \i3\().16b, \i3\().16b, \k2\().16b
++ .endif
++ .endif
++ .endm
++
++ /* up to 4 interleaved blocks */
++ .macro do_block_Nx, enc, rounds, i0, i1, i2, i3
++ cmp \rounds, #12
++ blo 2222f /* 128 bits */
++ beq 1111f /* 192 bits */
++ round_Nx \enc, v17, \i0, \i1, \i2, \i3
++ round_Nx \enc, v18, \i0, \i1, \i2, \i3
++1111: round_Nx \enc, v19, \i0, \i1, \i2, \i3
++ round_Nx \enc, v20, \i0, \i1, \i2, \i3
++2222: .irp key, v21, v22, v23, v24, v25, v26, v27, v28, v29
++ round_Nx \enc, \key, \i0, \i1, \i2, \i3
++ .endr
++ fin_round_Nx \enc, v30, v31, \i0, \i1, \i2, \i3
++ .endm
++
++ .macro encrypt_block, in, rounds, t0, t1, t2
++ do_block_Nx e, \rounds, \in
++ .endm
++
++ .macro encrypt_block2x, i0, i1, rounds, t0, t1, t2
++ do_block_Nx e, \rounds, \i0, \i1
++ .endm
++
++ .macro encrypt_block4x, i0, i1, i2, i3, rounds, t0, t1, t2
++ do_block_Nx e, \rounds, \i0, \i1, \i2, \i3
++ .endm
++
++ .macro decrypt_block, in, rounds, t0, t1, t2
++ do_block_Nx d, \rounds, \in
++ .endm
++
++ .macro decrypt_block2x, i0, i1, rounds, t0, t1, t2
++ do_block_Nx d, \rounds, \i0, \i1
++ .endm
++
++ .macro decrypt_block4x, i0, i1, i2, i3, rounds, t0, t1, t2
++ do_block_Nx d, \rounds, \i0, \i1, \i2, \i3
++ .endm
++
++#include "aes-modes.S"
+diff -Nur linux-3.14.36/arch/arm64/crypto/aes-glue.c linux-openelec/arch/arm64/crypto/aes-glue.c
+--- linux-3.14.36/arch/arm64/crypto/aes-glue.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/arch/arm64/crypto/aes-glue.c 2015-05-06 12:05:43.000000000 -0500
+@@ -0,0 +1,446 @@
++/*
++ * linux/arch/arm64/crypto/aes-glue.c - wrapper code for ARMv8 AES
++ *
++ * Copyright (C) 2013 Linaro Ltd <ard.biesheuvel@linaro.org>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include <asm/neon.h>
++#include <asm/hwcap.h>
++#include <crypto/aes.h>
++#include <crypto/ablk_helper.h>
++#include <crypto/algapi.h>
++#include <linux/module.h>
++#include <linux/cpufeature.h>
++
++#ifdef USE_V8_CRYPTO_EXTENSIONS
++#define MODE "ce"
++#define PRIO 300
++#define aes_ecb_encrypt ce_aes_ecb_encrypt
++#define aes_ecb_decrypt ce_aes_ecb_decrypt
++#define aes_cbc_encrypt ce_aes_cbc_encrypt
++#define aes_cbc_decrypt ce_aes_cbc_decrypt
++#define aes_ctr_encrypt ce_aes_ctr_encrypt
++#define aes_xts_encrypt ce_aes_xts_encrypt
++#define aes_xts_decrypt ce_aes_xts_decrypt
++MODULE_DESCRIPTION("AES-ECB/CBC/CTR/XTS using ARMv8 Crypto Extensions");
++#else
++#define MODE "neon"
++#define PRIO 200
++#define aes_ecb_encrypt neon_aes_ecb_encrypt
++#define aes_ecb_decrypt neon_aes_ecb_decrypt
++#define aes_cbc_encrypt neon_aes_cbc_encrypt
++#define aes_cbc_decrypt neon_aes_cbc_decrypt
++#define aes_ctr_encrypt neon_aes_ctr_encrypt
++#define aes_xts_encrypt neon_aes_xts_encrypt
++#define aes_xts_decrypt neon_aes_xts_decrypt
++MODULE_DESCRIPTION("AES-ECB/CBC/CTR/XTS using ARMv8 NEON");
++MODULE_ALIAS("ecb(aes)");
++MODULE_ALIAS("cbc(aes)");
++MODULE_ALIAS("ctr(aes)");
++MODULE_ALIAS("xts(aes)");
++#endif
++
++MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
++MODULE_LICENSE("GPL v2");
++
++/* defined in aes-modes.S */
++asmlinkage void aes_ecb_encrypt(u8 out[], u8 const in[], u8 const rk[],
++ int rounds, int blocks, int first);
++asmlinkage void aes_ecb_decrypt(u8 out[], u8 const in[], u8 const rk[],
++ int rounds, int blocks, int first);
++
++asmlinkage void aes_cbc_encrypt(u8 out[], u8 const in[], u8 const rk[],
++ int rounds, int blocks, u8 iv[], int first);
++asmlinkage void aes_cbc_decrypt(u8 out[], u8 const in[], u8 const rk[],
++ int rounds, int blocks, u8 iv[], int first);
++
++asmlinkage void aes_ctr_encrypt(u8 out[], u8 const in[], u8 const rk[],
++ int rounds, int blocks, u8 ctr[], int first);
++
++asmlinkage void aes_xts_encrypt(u8 out[], u8 const in[], u8 const rk1[],
++ int rounds, int blocks, u8 const rk2[], u8 iv[],
++ int first);
++asmlinkage void aes_xts_decrypt(u8 out[], u8 const in[], u8 const rk1[],
++ int rounds, int blocks, u8 const rk2[], u8 iv[],
++ int first);
++
++struct crypto_aes_xts_ctx {
++ struct crypto_aes_ctx key1;
++ struct crypto_aes_ctx __aligned(8) key2;
++};
++
++static int xts_set_key(struct crypto_tfm *tfm, const u8 *in_key,
++ unsigned int key_len)
++{
++ struct crypto_aes_xts_ctx *ctx = crypto_tfm_ctx(tfm);
++ int ret;
++
++ ret = crypto_aes_expand_key(&ctx->key1, in_key, key_len / 2);
++ if (!ret)
++ ret = crypto_aes_expand_key(&ctx->key2, &in_key[key_len / 2],
++ key_len / 2);
++ if (!ret)
++ return 0;
++
++ tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
++ return -EINVAL;
++}
++
++static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
++ struct scatterlist *src, unsigned int nbytes)
++{
++ struct crypto_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
++ int err, first, rounds = 6 + ctx->key_length / 4;
++ struct blkcipher_walk walk;
++ unsigned int blocks;
++
++ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
++ blkcipher_walk_init(&walk, dst, src, nbytes);
++ err = blkcipher_walk_virt(desc, &walk);
++
++ kernel_neon_begin();
++ for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
++ aes_ecb_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
++ (u8 *)ctx->key_enc, rounds, blocks, first);
++ err = blkcipher_walk_done(desc, &walk, 0);
++ }
++ kernel_neon_end();
++ return err;
++}
++
++static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
++ struct scatterlist *src, unsigned int nbytes)
++{
++ struct crypto_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
++ int err, first, rounds = 6 + ctx->key_length / 4;
++ struct blkcipher_walk walk;
++ unsigned int blocks;
++
++ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
++ blkcipher_walk_init(&walk, dst, src, nbytes);
++ err = blkcipher_walk_virt(desc, &walk);
++
++ kernel_neon_begin();
++ for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
++ aes_ecb_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
++ (u8 *)ctx->key_dec, rounds, blocks, first);
++ err = blkcipher_walk_done(desc, &walk, 0);
++ }
++ kernel_neon_end();
++ return err;
++}
++
++static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
++ struct scatterlist *src, unsigned int nbytes)
++{
++ struct crypto_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
++ int err, first, rounds = 6 + ctx->key_length / 4;
++ struct blkcipher_walk walk;
++ unsigned int blocks;
++
++ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
++ blkcipher_walk_init(&walk, dst, src, nbytes);
++ err = blkcipher_walk_virt(desc, &walk);
++
++ kernel_neon_begin();
++ for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
++ aes_cbc_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
++ (u8 *)ctx->key_enc, rounds, blocks, walk.iv,
++ first);
++ err = blkcipher_walk_done(desc, &walk, 0);
++ }
++ kernel_neon_end();
++ return err;
++}
++
++static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
++ struct scatterlist *src, unsigned int nbytes)
++{
++ struct crypto_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
++ int err, first, rounds = 6 + ctx->key_length / 4;
++ struct blkcipher_walk walk;
++ unsigned int blocks;
++
++ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
++ blkcipher_walk_init(&walk, dst, src, nbytes);
++ err = blkcipher_walk_virt(desc, &walk);
++
++ kernel_neon_begin();
++ for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
++ aes_cbc_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
++ (u8 *)ctx->key_dec, rounds, blocks, walk.iv,
++ first);
++ err = blkcipher_walk_done(desc, &walk, 0);
++ }
++ kernel_neon_end();
++ return err;
++}
++
++static int ctr_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
++ struct scatterlist *src, unsigned int nbytes)
++{
++ struct crypto_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
++ int err, first, rounds = 6 + ctx->key_length / 4;
++ struct blkcipher_walk walk;
++ int blocks;
++
++ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
++ blkcipher_walk_init(&walk, dst, src, nbytes);
++ err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
++
++ first = 1;
++ kernel_neon_begin();
++ while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) {
++ aes_ctr_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
++ (u8 *)ctx->key_enc, rounds, blocks, walk.iv,
++ first);
++ first = 0;
++ nbytes -= blocks * AES_BLOCK_SIZE;
++ if (nbytes && nbytes == walk.nbytes % AES_BLOCK_SIZE)
++ break;
++ err = blkcipher_walk_done(desc, &walk,
++ walk.nbytes % AES_BLOCK_SIZE);
++ }
++ if (nbytes) {
++ u8 *tdst = walk.dst.virt.addr + blocks * AES_BLOCK_SIZE;
++ u8 *tsrc = walk.src.virt.addr + blocks * AES_BLOCK_SIZE;
++ u8 __aligned(8) tail[AES_BLOCK_SIZE];
++
++ /*
++ * Minimum alignment is 8 bytes, so if nbytes is <= 8, we need
++ * to tell aes_ctr_encrypt() to only read half a block.
++ */
++ blocks = (nbytes <= 8) ? -1 : 1;
++
++ aes_ctr_encrypt(tail, tsrc, (u8 *)ctx->key_enc, rounds,
++ blocks, walk.iv, first);
++ memcpy(tdst, tail, nbytes);
++ err = blkcipher_walk_done(desc, &walk, 0);
++ }
++ kernel_neon_end();
++
++ return err;
++}
++
++static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
++ struct scatterlist *src, unsigned int nbytes)
++{
++ struct crypto_aes_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
++ int err, first, rounds = 6 + ctx->key1.key_length / 4;
++ struct blkcipher_walk walk;
++ unsigned int blocks;
++
++ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
++ blkcipher_walk_init(&walk, dst, src, nbytes);
++ err = blkcipher_walk_virt(desc, &walk);
++
++ kernel_neon_begin();
++ for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
++ aes_xts_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
++ (u8 *)ctx->key1.key_enc, rounds, blocks,
++ (u8 *)ctx->key2.key_enc, walk.iv, first);
++ err = blkcipher_walk_done(desc, &walk, 0);
++ }
++ kernel_neon_end();
++
++ return err;
++}
++
++static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
++ struct scatterlist *src, unsigned int nbytes)
++{
++ struct crypto_aes_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
++ int err, first, rounds = 6 + ctx->key1.key_length / 4;
++ struct blkcipher_walk walk;
++ unsigned int blocks;
++
++ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
++ blkcipher_walk_init(&walk, dst, src, nbytes);
++ err = blkcipher_walk_virt(desc, &walk);
++
++ kernel_neon_begin();
++ for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
++ aes_xts_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
++ (u8 *)ctx->key1.key_dec, rounds, blocks,
++ (u8 *)ctx->key2.key_enc, walk.iv, first);
++ err = blkcipher_walk_done(desc, &walk, 0);
++ }
++ kernel_neon_end();
++
++ return err;
++}
++
++static struct crypto_alg aes_algs[] = { {
++ .cra_name = "__ecb-aes-" MODE,
++ .cra_driver_name = "__driver-ecb-aes-" MODE,
++ .cra_priority = 0,
++ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
++ .cra_blocksize = AES_BLOCK_SIZE,
++ .cra_ctxsize = sizeof(struct crypto_aes_ctx),
++ .cra_alignmask = 7,
++ .cra_type = &crypto_blkcipher_type,
++ .cra_module = THIS_MODULE,
++ .cra_blkcipher = {
++ .min_keysize = AES_MIN_KEY_SIZE,
++ .max_keysize = AES_MAX_KEY_SIZE,
++ .ivsize = AES_BLOCK_SIZE,
++ .setkey = crypto_aes_set_key,
++ .encrypt = ecb_encrypt,
++ .decrypt = ecb_decrypt,
++ },
++}, {
++ .cra_name = "__cbc-aes-" MODE,
++ .cra_driver_name = "__driver-cbc-aes-" MODE,
++ .cra_priority = 0,
++ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
++ .cra_blocksize = AES_BLOCK_SIZE,
++ .cra_ctxsize = sizeof(struct crypto_aes_ctx),
++ .cra_alignmask = 7,
++ .cra_type = &crypto_blkcipher_type,
++ .cra_module = THIS_MODULE,
++ .cra_blkcipher = {
++ .min_keysize = AES_MIN_KEY_SIZE,
++ .max_keysize = AES_MAX_KEY_SIZE,
++ .ivsize = AES_BLOCK_SIZE,
++ .setkey = crypto_aes_set_key,
++ .encrypt = cbc_encrypt,
++ .decrypt = cbc_decrypt,
++ },
++}, {
++ .cra_name = "__ctr-aes-" MODE,
++ .cra_driver_name = "__driver-ctr-aes-" MODE,
++ .cra_priority = 0,
++ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
++ .cra_blocksize = 1,
++ .cra_ctxsize = sizeof(struct crypto_aes_ctx),
++ .cra_alignmask = 7,
++ .cra_type = &crypto_blkcipher_type,
++ .cra_module = THIS_MODULE,
++ .cra_blkcipher = {
++ .min_keysize = AES_MIN_KEY_SIZE,
++ .max_keysize = AES_MAX_KEY_SIZE,
++ .ivsize = AES_BLOCK_SIZE,
++ .setkey = crypto_aes_set_key,
++ .encrypt = ctr_encrypt,
++ .decrypt = ctr_encrypt,
++ },
++}, {
++ .cra_name = "__xts-aes-" MODE,
++ .cra_driver_name = "__driver-xts-aes-" MODE,
++ .cra_priority = 0,
++ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
++ .cra_blocksize = AES_BLOCK_SIZE,
++ .cra_ctxsize = sizeof(struct crypto_aes_xts_ctx),
++ .cra_alignmask = 7,
++ .cra_type = &crypto_blkcipher_type,
++ .cra_module = THIS_MODULE,
++ .cra_blkcipher = {
++ .min_keysize = 2 * AES_MIN_KEY_SIZE,
++ .max_keysize = 2 * AES_MAX_KEY_SIZE,
++ .ivsize = AES_BLOCK_SIZE,
++ .setkey = xts_set_key,
++ .encrypt = xts_encrypt,
++ .decrypt = xts_decrypt,
++ },
++}, {
++ .cra_name = "ecb(aes)",
++ .cra_driver_name = "ecb-aes-" MODE,
++ .cra_priority = PRIO,
++ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
++ .cra_blocksize = AES_BLOCK_SIZE,
++ .cra_ctxsize = sizeof(struct async_helper_ctx),
++ .cra_alignmask = 7,
++ .cra_type = &crypto_ablkcipher_type,
++ .cra_module = THIS_MODULE,
++ .cra_init = ablk_init,
++ .cra_exit = ablk_exit,
++ .cra_ablkcipher = {
++ .min_keysize = AES_MIN_KEY_SIZE,
++ .max_keysize = AES_MAX_KEY_SIZE,
++ .ivsize = AES_BLOCK_SIZE,
++ .setkey = ablk_set_key,
++ .encrypt = ablk_encrypt,
++ .decrypt = ablk_decrypt,
++ }
++}, {
++ .cra_name = "cbc(aes)",
++ .cra_driver_name = "cbc-aes-" MODE,
++ .cra_priority = PRIO,
++ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
++ .cra_blocksize = AES_BLOCK_SIZE,
++ .cra_ctxsize = sizeof(struct async_helper_ctx),
++ .cra_alignmask = 7,
++ .cra_type = &crypto_ablkcipher_type,
++ .cra_module = THIS_MODULE,
++ .cra_init = ablk_init,
++ .cra_exit = ablk_exit,
++ .cra_ablkcipher = {
++ .min_keysize = AES_MIN_KEY_SIZE,
++ .max_keysize = AES_MAX_KEY_SIZE,
++ .ivsize = AES_BLOCK_SIZE,
++ .setkey = ablk_set_key,
++ .encrypt = ablk_encrypt,
++ .decrypt = ablk_decrypt,
++ }
++}, {
++ .cra_name = "ctr(aes)",
++ .cra_driver_name = "ctr-aes-" MODE,
++ .cra_priority = PRIO,
++ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
++ .cra_blocksize = 1,
++ .cra_ctxsize = sizeof(struct async_helper_ctx),
++ .cra_alignmask = 7,
++ .cra_type = &crypto_ablkcipher_type,
++ .cra_module = THIS_MODULE,
++ .cra_init = ablk_init,
++ .cra_exit = ablk_exit,
++ .cra_ablkcipher = {
++ .min_keysize = AES_MIN_KEY_SIZE,
++ .max_keysize = AES_MAX_KEY_SIZE,
++ .ivsize = AES_BLOCK_SIZE,
++ .setkey = ablk_set_key,
++ .encrypt = ablk_encrypt,
++ .decrypt = ablk_decrypt,
++ }
++}, {
++ .cra_name = "xts(aes)",
++ .cra_driver_name = "xts-aes-" MODE,
++ .cra_priority = PRIO,
++ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
++ .cra_blocksize = AES_BLOCK_SIZE,
++ .cra_ctxsize = sizeof(struct async_helper_ctx),
++ .cra_alignmask = 7,
++ .cra_type = &crypto_ablkcipher_type,
++ .cra_module = THIS_MODULE,
++ .cra_init = ablk_init,
++ .cra_exit = ablk_exit,
++ .cra_ablkcipher = {
++ .min_keysize = 2 * AES_MIN_KEY_SIZE,
++ .max_keysize = 2 * AES_MAX_KEY_SIZE,
++ .ivsize = AES_BLOCK_SIZE,
++ .setkey = ablk_set_key,
++ .encrypt = ablk_encrypt,
++ .decrypt = ablk_decrypt,
++ }
++} };
++
++static int __init aes_init(void)
++{
++ return crypto_register_algs(aes_algs, ARRAY_SIZE(aes_algs));
++}
++
++static void __exit aes_exit(void)
++{
++ crypto_unregister_algs(aes_algs, ARRAY_SIZE(aes_algs));
++}
++
++#ifdef USE_V8_CRYPTO_EXTENSIONS
++module_cpu_feature_match(AES, aes_init);
++#else
++module_init(aes_init);
++#endif
++module_exit(aes_exit);
+diff -Nur linux-3.14.36/arch/arm64/crypto/aes-modes.S linux-openelec/arch/arm64/crypto/aes-modes.S
+--- linux-3.14.36/arch/arm64/crypto/aes-modes.S 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/arch/arm64/crypto/aes-modes.S 2015-05-06 12:05:43.000000000 -0500
+@@ -0,0 +1,532 @@
++/*
++ * linux/arch/arm64/crypto/aes-modes.S - chaining mode wrappers for AES
++ *
++ * Copyright (C) 2013 Linaro Ltd <ard.biesheuvel@linaro.org>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++/* included by aes-ce.S and aes-neon.S */
++
++ .text
++ .align 4
++
++/*
++ * There are several ways to instantiate this code:
++ * - no interleave, all inline
++ * - 2-way interleave, 2x calls out of line (-DINTERLEAVE=2)
++ * - 2-way interleave, all inline (-DINTERLEAVE=2 -DINTERLEAVE_INLINE)
++ * - 4-way interleave, 4x calls out of line (-DINTERLEAVE=4)
++ * - 4-way interleave, all inline (-DINTERLEAVE=4 -DINTERLEAVE_INLINE)
++ *
++ * Macros imported by this code:
++ * - enc_prepare - setup NEON registers for encryption
++ * - dec_prepare - setup NEON registers for decryption
++ * - enc_switch_key - change to new key after having prepared for encryption
++ * - encrypt_block - encrypt a single block
++ * - decrypt block - decrypt a single block
++ * - encrypt_block2x - encrypt 2 blocks in parallel (if INTERLEAVE == 2)
++ * - decrypt_block2x - decrypt 2 blocks in parallel (if INTERLEAVE == 2)
++ * - encrypt_block4x - encrypt 4 blocks in parallel (if INTERLEAVE == 4)
++ * - decrypt_block4x - decrypt 4 blocks in parallel (if INTERLEAVE == 4)
++ */
++
++#if defined(INTERLEAVE) && !defined(INTERLEAVE_INLINE)
++#define FRAME_PUSH stp x29, x30, [sp,#-16]! ; mov x29, sp
++#define FRAME_POP ldp x29, x30, [sp],#16
++
++#if INTERLEAVE == 2
++
++aes_encrypt_block2x:
++ encrypt_block2x v0, v1, w3, x2, x6, w7
++ ret
++ENDPROC(aes_encrypt_block2x)
++
++aes_decrypt_block2x:
++ decrypt_block2x v0, v1, w3, x2, x6, w7
++ ret
++ENDPROC(aes_decrypt_block2x)
++
++#elif INTERLEAVE == 4
++
++aes_encrypt_block4x:
++ encrypt_block4x v0, v1, v2, v3, w3, x2, x6, w7
++ ret
++ENDPROC(aes_encrypt_block4x)
++
++aes_decrypt_block4x:
++ decrypt_block4x v0, v1, v2, v3, w3, x2, x6, w7
++ ret
++ENDPROC(aes_decrypt_block4x)
++
++#else
++#error INTERLEAVE should equal 2 or 4
++#endif
++
++ .macro do_encrypt_block2x
++ bl aes_encrypt_block2x
++ .endm
++
++ .macro do_decrypt_block2x
++ bl aes_decrypt_block2x
++ .endm
++
++ .macro do_encrypt_block4x
++ bl aes_encrypt_block4x
++ .endm
++
++ .macro do_decrypt_block4x
++ bl aes_decrypt_block4x
++ .endm
++
++#else
++#define FRAME_PUSH
++#define FRAME_POP
++
++ .macro do_encrypt_block2x
++ encrypt_block2x v0, v1, w3, x2, x6, w7
++ .endm
++
++ .macro do_decrypt_block2x
++ decrypt_block2x v0, v1, w3, x2, x6, w7
++ .endm
++
++ .macro do_encrypt_block4x
++ encrypt_block4x v0, v1, v2, v3, w3, x2, x6, w7
++ .endm
++
++ .macro do_decrypt_block4x
++ decrypt_block4x v0, v1, v2, v3, w3, x2, x6, w7
++ .endm
++
++#endif
++
++ /*
++ * aes_ecb_encrypt(u8 out[], u8 const in[], u8 const rk[], int rounds,
++ * int blocks, int first)
++ * aes_ecb_decrypt(u8 out[], u8 const in[], u8 const rk[], int rounds,
++ * int blocks, int first)
++ */
++
++AES_ENTRY(aes_ecb_encrypt)
++ FRAME_PUSH
++ cbz w5, .LecbencloopNx
++
++ enc_prepare w3, x2, x5
++
++.LecbencloopNx:
++#if INTERLEAVE >= 2
++ subs w4, w4, #INTERLEAVE
++ bmi .Lecbenc1x
++#if INTERLEAVE == 2
++ ld1 {v0.16b-v1.16b}, [x1], #32 /* get 2 pt blocks */
++ do_encrypt_block2x
++ st1 {v0.16b-v1.16b}, [x0], #32
++#else
++ ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 pt blocks */
++ do_encrypt_block4x
++ st1 {v0.16b-v3.16b}, [x0], #64
++#endif
++ b .LecbencloopNx
++.Lecbenc1x:
++ adds w4, w4, #INTERLEAVE
++ beq .Lecbencout
++#endif
++.Lecbencloop:
++ ld1 {v0.16b}, [x1], #16 /* get next pt block */
++ encrypt_block v0, w3, x2, x5, w6
++ st1 {v0.16b}, [x0], #16
++ subs w4, w4, #1
++ bne .Lecbencloop
++.Lecbencout:
++ FRAME_POP
++ ret
++AES_ENDPROC(aes_ecb_encrypt)
++
++
++AES_ENTRY(aes_ecb_decrypt)
++ FRAME_PUSH
++ cbz w5, .LecbdecloopNx
++
++ dec_prepare w3, x2, x5
++
++.LecbdecloopNx:
++#if INTERLEAVE >= 2
++ subs w4, w4, #INTERLEAVE
++ bmi .Lecbdec1x
++#if INTERLEAVE == 2
++ ld1 {v0.16b-v1.16b}, [x1], #32 /* get 2 ct blocks */
++ do_decrypt_block2x
++ st1 {v0.16b-v1.16b}, [x0], #32
++#else
++ ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 ct blocks */
++ do_decrypt_block4x
++ st1 {v0.16b-v3.16b}, [x0], #64
++#endif
++ b .LecbdecloopNx
++.Lecbdec1x:
++ adds w4, w4, #INTERLEAVE
++ beq .Lecbdecout
++#endif
++.Lecbdecloop:
++ ld1 {v0.16b}, [x1], #16 /* get next ct block */
++ decrypt_block v0, w3, x2, x5, w6
++ st1 {v0.16b}, [x0], #16
++ subs w4, w4, #1
++ bne .Lecbdecloop
++.Lecbdecout:
++ FRAME_POP
++ ret
++AES_ENDPROC(aes_ecb_decrypt)
++
++
++ /*
++ * aes_cbc_encrypt(u8 out[], u8 const in[], u8 const rk[], int rounds,
++ * int blocks, u8 iv[], int first)
++ * aes_cbc_decrypt(u8 out[], u8 const in[], u8 const rk[], int rounds,
++ * int blocks, u8 iv[], int first)
++ */
++
++AES_ENTRY(aes_cbc_encrypt)
++ cbz w6, .Lcbcencloop
++
++ ld1 {v0.16b}, [x5] /* get iv */
++ enc_prepare w3, x2, x5
++
++.Lcbcencloop:
++ ld1 {v1.16b}, [x1], #16 /* get next pt block */
++ eor v0.16b, v0.16b, v1.16b /* ..and xor with iv */
++ encrypt_block v0, w3, x2, x5, w6
++ st1 {v0.16b}, [x0], #16
++ subs w4, w4, #1
++ bne .Lcbcencloop
++ ret
++AES_ENDPROC(aes_cbc_encrypt)
++
++
++AES_ENTRY(aes_cbc_decrypt)
++ FRAME_PUSH
++ cbz w6, .LcbcdecloopNx
++
++ ld1 {v7.16b}, [x5] /* get iv */
++ dec_prepare w3, x2, x5
++
++.LcbcdecloopNx:
++#if INTERLEAVE >= 2
++ subs w4, w4, #INTERLEAVE
++ bmi .Lcbcdec1x
++#if INTERLEAVE == 2
++ ld1 {v0.16b-v1.16b}, [x1], #32 /* get 2 ct blocks */
++ mov v2.16b, v0.16b
++ mov v3.16b, v1.16b
++ do_decrypt_block2x
++ eor v0.16b, v0.16b, v7.16b
++ eor v1.16b, v1.16b, v2.16b
++ mov v7.16b, v3.16b
++ st1 {v0.16b-v1.16b}, [x0], #32
++#else
++ ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 ct blocks */
++ mov v4.16b, v0.16b
++ mov v5.16b, v1.16b
++ mov v6.16b, v2.16b
++ do_decrypt_block4x
++ sub x1, x1, #16
++ eor v0.16b, v0.16b, v7.16b
++ eor v1.16b, v1.16b, v4.16b
++ ld1 {v7.16b}, [x1], #16 /* reload 1 ct block */
++ eor v2.16b, v2.16b, v5.16b
++ eor v3.16b, v3.16b, v6.16b
++ st1 {v0.16b-v3.16b}, [x0], #64
++#endif
++ b .LcbcdecloopNx
++.Lcbcdec1x:
++ adds w4, w4, #INTERLEAVE
++ beq .Lcbcdecout
++#endif
++.Lcbcdecloop:
++ ld1 {v1.16b}, [x1], #16 /* get next ct block */
++ mov v0.16b, v1.16b /* ...and copy to v0 */
++ decrypt_block v0, w3, x2, x5, w6
++ eor v0.16b, v0.16b, v7.16b /* xor with iv => pt */
++ mov v7.16b, v1.16b /* ct is next iv */
++ st1 {v0.16b}, [x0], #16
++ subs w4, w4, #1
++ bne .Lcbcdecloop
++.Lcbcdecout:
++ FRAME_POP
++ ret
++AES_ENDPROC(aes_cbc_decrypt)
++
++
++ /*
++ * aes_ctr_encrypt(u8 out[], u8 const in[], u8 const rk[], int rounds,
++ * int blocks, u8 ctr[], int first)
++ */
++
++AES_ENTRY(aes_ctr_encrypt)
++ FRAME_PUSH
++ cbnz w6, .Lctrfirst /* 1st time around? */
++ umov x5, v4.d[1] /* keep swabbed ctr in reg */
++ rev x5, x5
++#if INTERLEAVE >= 2
++ cmn w5, w4 /* 32 bit overflow? */
++ bcs .Lctrinc
++ add x5, x5, #1 /* increment BE ctr */
++ b .LctrincNx
++#else
++ b .Lctrinc
++#endif
++.Lctrfirst:
++ enc_prepare w3, x2, x6
++ ld1 {v4.16b}, [x5]
++ umov x5, v4.d[1] /* keep swabbed ctr in reg */
++ rev x5, x5
++#if INTERLEAVE >= 2
++ cmn w5, w4 /* 32 bit overflow? */
++ bcs .Lctrloop
++.LctrloopNx:
++ subs w4, w4, #INTERLEAVE
++ bmi .Lctr1x
++#if INTERLEAVE == 2
++ mov v0.8b, v4.8b
++ mov v1.8b, v4.8b
++ rev x7, x5
++ add x5, x5, #1
++ ins v0.d[1], x7
++ rev x7, x5
++ add x5, x5, #1
++ ins v1.d[1], x7
++ ld1 {v2.16b-v3.16b}, [x1], #32 /* get 2 input blocks */
++ do_encrypt_block2x
++ eor v0.16b, v0.16b, v2.16b
++ eor v1.16b, v1.16b, v3.16b
++ st1 {v0.16b-v1.16b}, [x0], #32
++#else
++ ldr q8, =0x30000000200000001 /* addends 1,2,3[,0] */
++ dup v7.4s, w5
++ mov v0.16b, v4.16b
++ add v7.4s, v7.4s, v8.4s
++ mov v1.16b, v4.16b
++ rev32 v8.16b, v7.16b
++ mov v2.16b, v4.16b
++ mov v3.16b, v4.16b
++ mov v1.s[3], v8.s[0]
++ mov v2.s[3], v8.s[1]
++ mov v3.s[3], v8.s[2]
++ ld1 {v5.16b-v7.16b}, [x1], #48 /* get 3 input blocks */
++ do_encrypt_block4x
++ eor v0.16b, v5.16b, v0.16b
++ ld1 {v5.16b}, [x1], #16 /* get 1 input block */
++ eor v1.16b, v6.16b, v1.16b
++ eor v2.16b, v7.16b, v2.16b
++ eor v3.16b, v5.16b, v3.16b
++ st1 {v0.16b-v3.16b}, [x0], #64
++ add x5, x5, #INTERLEAVE
++#endif
++ cbz w4, .LctroutNx
++.LctrincNx:
++ rev x7, x5
++ ins v4.d[1], x7
++ b .LctrloopNx
++.LctroutNx:
++ sub x5, x5, #1
++ rev x7, x5
++ ins v4.d[1], x7
++ b .Lctrout
++.Lctr1x:
++ adds w4, w4, #INTERLEAVE
++ beq .Lctrout
++#endif
++.Lctrloop:
++ mov v0.16b, v4.16b
++ encrypt_block v0, w3, x2, x6, w7
++ subs w4, w4, #1
++ bmi .Lctrhalfblock /* blocks < 0 means 1/2 block */
++ ld1 {v3.16b}, [x1], #16
++ eor v3.16b, v0.16b, v3.16b
++ st1 {v3.16b}, [x0], #16
++ beq .Lctrout
++.Lctrinc:
++ adds x5, x5, #1 /* increment BE ctr */
++ rev x7, x5
++ ins v4.d[1], x7
++ bcc .Lctrloop /* no overflow? */
++ umov x7, v4.d[0] /* load upper word of ctr */
++ rev x7, x7 /* ... to handle the carry */
++ add x7, x7, #1
++ rev x7, x7
++ ins v4.d[0], x7
++ b .Lctrloop
++.Lctrhalfblock:
++ ld1 {v3.8b}, [x1]
++ eor v3.8b, v0.8b, v3.8b
++ st1 {v3.8b}, [x0]
++.Lctrout:
++ FRAME_POP
++ ret
++AES_ENDPROC(aes_ctr_encrypt)
++ .ltorg
++
++
++ /*
++ * aes_xts_decrypt(u8 out[], u8 const in[], u8 const rk1[], int rounds,
++ * int blocks, u8 const rk2[], u8 iv[], int first)
++ * aes_xts_decrypt(u8 out[], u8 const in[], u8 const rk1[], int rounds,
++ * int blocks, u8 const rk2[], u8 iv[], int first)
++ */
++
++ .macro next_tweak, out, in, const, tmp
++ sshr \tmp\().2d, \in\().2d, #63
++ and \tmp\().16b, \tmp\().16b, \const\().16b
++ add \out\().2d, \in\().2d, \in\().2d
++ ext \tmp\().16b, \tmp\().16b, \tmp\().16b, #8
++ eor \out\().16b, \out\().16b, \tmp\().16b
++ .endm
++
++.Lxts_mul_x:
++ .word 1, 0, 0x87, 0
++
++AES_ENTRY(aes_xts_encrypt)
++ FRAME_PUSH
++ cbz w7, .LxtsencloopNx
++
++ ld1 {v4.16b}, [x6]
++ enc_prepare w3, x5, x6
++ encrypt_block v4, w3, x5, x6, w7 /* first tweak */
++ enc_switch_key w3, x2, x6
++ ldr q7, .Lxts_mul_x
++ b .LxtsencNx
++
++.LxtsencloopNx:
++ ldr q7, .Lxts_mul_x
++ next_tweak v4, v4, v7, v8
++.LxtsencNx:
++#if INTERLEAVE >= 2
++ subs w4, w4, #INTERLEAVE
++ bmi .Lxtsenc1x
++#if INTERLEAVE == 2
++ ld1 {v0.16b-v1.16b}, [x1], #32 /* get 2 pt blocks */
++ next_tweak v5, v4, v7, v8
++ eor v0.16b, v0.16b, v4.16b
++ eor v1.16b, v1.16b, v5.16b
++ do_encrypt_block2x
++ eor v0.16b, v0.16b, v4.16b
++ eor v1.16b, v1.16b, v5.16b
++ st1 {v0.16b-v1.16b}, [x0], #32
++ cbz w4, .LxtsencoutNx
++ next_tweak v4, v5, v7, v8
++ b .LxtsencNx
++.LxtsencoutNx:
++ mov v4.16b, v5.16b
++ b .Lxtsencout
++#else
++ ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 pt blocks */
++ next_tweak v5, v4, v7, v8
++ eor v0.16b, v0.16b, v4.16b
++ next_tweak v6, v5, v7, v8
++ eor v1.16b, v1.16b, v5.16b
++ eor v2.16b, v2.16b, v6.16b
++ next_tweak v7, v6, v7, v8
++ eor v3.16b, v3.16b, v7.16b
++ do_encrypt_block4x
++ eor v3.16b, v3.16b, v7.16b
++ eor v0.16b, v0.16b, v4.16b
++ eor v1.16b, v1.16b, v5.16b
++ eor v2.16b, v2.16b, v6.16b
++ st1 {v0.16b-v3.16b}, [x0], #64
++ mov v4.16b, v7.16b
++ cbz w4, .Lxtsencout
++ b .LxtsencloopNx
++#endif
++.Lxtsenc1x:
++ adds w4, w4, #INTERLEAVE
++ beq .Lxtsencout
++#endif
++.Lxtsencloop:
++ ld1 {v1.16b}, [x1], #16
++ eor v0.16b, v1.16b, v4.16b
++ encrypt_block v0, w3, x2, x6, w7
++ eor v0.16b, v0.16b, v4.16b
++ st1 {v0.16b}, [x0], #16
++ subs w4, w4, #1
++ beq .Lxtsencout
++ next_tweak v4, v4, v7, v8
++ b .Lxtsencloop
++.Lxtsencout:
++ FRAME_POP
++ ret
++AES_ENDPROC(aes_xts_encrypt)
++
++
++AES_ENTRY(aes_xts_decrypt)
++ FRAME_PUSH
++ cbz w7, .LxtsdecloopNx
++
++ ld1 {v4.16b}, [x6]
++ enc_prepare w3, x5, x6
++ encrypt_block v4, w3, x5, x6, w7 /* first tweak */
++ dec_prepare w3, x2, x6
++ ldr q7, .Lxts_mul_x
++ b .LxtsdecNx
++
++.LxtsdecloopNx:
++ ldr q7, .Lxts_mul_x
++ next_tweak v4, v4, v7, v8
++.LxtsdecNx:
++#if INTERLEAVE >= 2
++ subs w4, w4, #INTERLEAVE
++ bmi .Lxtsdec1x
++#if INTERLEAVE == 2
++ ld1 {v0.16b-v1.16b}, [x1], #32 /* get 2 ct blocks */
++ next_tweak v5, v4, v7, v8
++ eor v0.16b, v0.16b, v4.16b
++ eor v1.16b, v1.16b, v5.16b
++ do_decrypt_block2x
++ eor v0.16b, v0.16b, v4.16b
++ eor v1.16b, v1.16b, v5.16b
++ st1 {v0.16b-v1.16b}, [x0], #32
++ cbz w4, .LxtsdecoutNx
++ next_tweak v4, v5, v7, v8
++ b .LxtsdecNx
++.LxtsdecoutNx:
++ mov v4.16b, v5.16b
++ b .Lxtsdecout
++#else
++ ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 ct blocks */
++ next_tweak v5, v4, v7, v8
++ eor v0.16b, v0.16b, v4.16b
++ next_tweak v6, v5, v7, v8
++ eor v1.16b, v1.16b, v5.16b
++ eor v2.16b, v2.16b, v6.16b
++ next_tweak v7, v6, v7, v8
++ eor v3.16b, v3.16b, v7.16b
++ do_decrypt_block4x
++ eor v3.16b, v3.16b, v7.16b
++ eor v0.16b, v0.16b, v4.16b
++ eor v1.16b, v1.16b, v5.16b
++ eor v2.16b, v2.16b, v6.16b
++ st1 {v0.16b-v3.16b}, [x0], #64
++ mov v4.16b, v7.16b
++ cbz w4, .Lxtsdecout
++ b .LxtsdecloopNx
++#endif
++.Lxtsdec1x:
++ adds w4, w4, #INTERLEAVE
++ beq .Lxtsdecout
++#endif
++.Lxtsdecloop:
++ ld1 {v1.16b}, [x1], #16
++ eor v0.16b, v1.16b, v4.16b
++ decrypt_block v0, w3, x2, x6, w7
++ eor v0.16b, v0.16b, v4.16b
++ st1 {v0.16b}, [x0], #16
++ subs w4, w4, #1
++ beq .Lxtsdecout
++ next_tweak v4, v4, v7, v8
++ b .Lxtsdecloop
++.Lxtsdecout:
++ FRAME_POP
++ ret
++AES_ENDPROC(aes_xts_decrypt)
+diff -Nur linux-3.14.36/arch/arm64/crypto/aes-neon.S linux-openelec/arch/arm64/crypto/aes-neon.S
+--- linux-3.14.36/arch/arm64/crypto/aes-neon.S 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/arch/arm64/crypto/aes-neon.S 2015-05-06 12:05:43.000000000 -0500
+@@ -0,0 +1,382 @@
++/*
++ * linux/arch/arm64/crypto/aes-neon.S - AES cipher for ARMv8 NEON
++ *
++ * Copyright (C) 2013 Linaro Ltd <ard.biesheuvel@linaro.org>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include <linux/linkage.h>
++
++#define AES_ENTRY(func) ENTRY(neon_ ## func)
++#define AES_ENDPROC(func) ENDPROC(neon_ ## func)
++
++ /* multiply by polynomial 'x' in GF(2^8) */
++ .macro mul_by_x, out, in, temp, const
++ sshr \temp, \in, #7
++ add \out, \in, \in
++ and \temp, \temp, \const
++ eor \out, \out, \temp
++ .endm
++
++ /* preload the entire Sbox */
++ .macro prepare, sbox, shiftrows, temp
++ adr \temp, \sbox
++ movi v12.16b, #0x40
++ ldr q13, \shiftrows
++ movi v14.16b, #0x1b
++ ld1 {v16.16b-v19.16b}, [\temp], #64
++ ld1 {v20.16b-v23.16b}, [\temp], #64
++ ld1 {v24.16b-v27.16b}, [\temp], #64
++ ld1 {v28.16b-v31.16b}, [\temp]
++ .endm
++
++ /* do preload for encryption */
++ .macro enc_prepare, ignore0, ignore1, temp
++ prepare .LForward_Sbox, .LForward_ShiftRows, \temp
++ .endm
++
++ .macro enc_switch_key, ignore0, ignore1, temp
++ /* do nothing */
++ .endm
++
++ /* do preload for decryption */
++ .macro dec_prepare, ignore0, ignore1, temp
++ prepare .LReverse_Sbox, .LReverse_ShiftRows, \temp
++ .endm
++
++ /* apply SubBytes transformation using the the preloaded Sbox */
++ .macro sub_bytes, in
++ sub v9.16b, \in\().16b, v12.16b
++ tbl \in\().16b, {v16.16b-v19.16b}, \in\().16b
++ sub v10.16b, v9.16b, v12.16b
++ tbx \in\().16b, {v20.16b-v23.16b}, v9.16b
++ sub v11.16b, v10.16b, v12.16b
++ tbx \in\().16b, {v24.16b-v27.16b}, v10.16b
++ tbx \in\().16b, {v28.16b-v31.16b}, v11.16b
++ .endm
++
++ /* apply MixColumns transformation */
++ .macro mix_columns, in
++ mul_by_x v10.16b, \in\().16b, v9.16b, v14.16b
++ rev32 v8.8h, \in\().8h
++ eor \in\().16b, v10.16b, \in\().16b
++ shl v9.4s, v8.4s, #24
++ shl v11.4s, \in\().4s, #24
++ sri v9.4s, v8.4s, #8
++ sri v11.4s, \in\().4s, #8
++ eor v9.16b, v9.16b, v8.16b
++ eor v10.16b, v10.16b, v9.16b
++ eor \in\().16b, v10.16b, v11.16b
++ .endm
++
++ /* Inverse MixColumns: pre-multiply by { 5, 0, 4, 0 } */
++ .macro inv_mix_columns, in
++ mul_by_x v11.16b, \in\().16b, v10.16b, v14.16b
++ mul_by_x v11.16b, v11.16b, v10.16b, v14.16b
++ eor \in\().16b, \in\().16b, v11.16b
++ rev32 v11.8h, v11.8h
++ eor \in\().16b, \in\().16b, v11.16b
++ mix_columns \in
++ .endm
++
++ .macro do_block, enc, in, rounds, rk, rkp, i
++ ld1 {v15.16b}, [\rk]
++ add \rkp, \rk, #16
++ mov \i, \rounds
++1111: eor \in\().16b, \in\().16b, v15.16b /* ^round key */
++ tbl \in\().16b, {\in\().16b}, v13.16b /* ShiftRows */
++ sub_bytes \in
++ ld1 {v15.16b}, [\rkp], #16
++ subs \i, \i, #1
++ beq 2222f
++ .if \enc == 1
++ mix_columns \in
++ .else
++ inv_mix_columns \in
++ .endif
++ b 1111b
++2222: eor \in\().16b, \in\().16b, v15.16b /* ^round key */
++ .endm
++
++ .macro encrypt_block, in, rounds, rk, rkp, i
++ do_block 1, \in, \rounds, \rk, \rkp, \i
++ .endm
++
++ .macro decrypt_block, in, rounds, rk, rkp, i
++ do_block 0, \in, \rounds, \rk, \rkp, \i
++ .endm
++
++ /*
++ * Interleaved versions: functionally equivalent to the
++ * ones above, but applied to 2 or 4 AES states in parallel.
++ */
++
++ .macro sub_bytes_2x, in0, in1
++ sub v8.16b, \in0\().16b, v12.16b
++ sub v9.16b, \in1\().16b, v12.16b
++ tbl \in0\().16b, {v16.16b-v19.16b}, \in0\().16b
++ tbl \in1\().16b, {v16.16b-v19.16b}, \in1\().16b
++ sub v10.16b, v8.16b, v12.16b
++ sub v11.16b, v9.16b, v12.16b
++ tbx \in0\().16b, {v20.16b-v23.16b}, v8.16b
++ tbx \in1\().16b, {v20.16b-v23.16b}, v9.16b
++ sub v8.16b, v10.16b, v12.16b
++ sub v9.16b, v11.16b, v12.16b
++ tbx \in0\().16b, {v24.16b-v27.16b}, v10.16b
++ tbx \in1\().16b, {v24.16b-v27.16b}, v11.16b
++ tbx \in0\().16b, {v28.16b-v31.16b}, v8.16b
++ tbx \in1\().16b, {v28.16b-v31.16b}, v9.16b
++ .endm
++
++ .macro sub_bytes_4x, in0, in1, in2, in3
++ sub v8.16b, \in0\().16b, v12.16b
++ tbl \in0\().16b, {v16.16b-v19.16b}, \in0\().16b
++ sub v9.16b, \in1\().16b, v12.16b
++ tbl \in1\().16b, {v16.16b-v19.16b}, \in1\().16b
++ sub v10.16b, \in2\().16b, v12.16b
++ tbl \in2\().16b, {v16.16b-v19.16b}, \in2\().16b
++ sub v11.16b, \in3\().16b, v12.16b
++ tbl \in3\().16b, {v16.16b-v19.16b}, \in3\().16b
++ tbx \in0\().16b, {v20.16b-v23.16b}, v8.16b
++ tbx \in1\().16b, {v20.16b-v23.16b}, v9.16b
++ sub v8.16b, v8.16b, v12.16b
++ tbx \in2\().16b, {v20.16b-v23.16b}, v10.16b
++ sub v9.16b, v9.16b, v12.16b
++ tbx \in3\().16b, {v20.16b-v23.16b}, v11.16b
++ sub v10.16b, v10.16b, v12.16b
++ tbx \in0\().16b, {v24.16b-v27.16b}, v8.16b
++ sub v11.16b, v11.16b, v12.16b
++ tbx \in1\().16b, {v24.16b-v27.16b}, v9.16b
++ sub v8.16b, v8.16b, v12.16b
++ tbx \in2\().16b, {v24.16b-v27.16b}, v10.16b
++ sub v9.16b, v9.16b, v12.16b
++ tbx \in3\().16b, {v24.16b-v27.16b}, v11.16b
++ sub v10.16b, v10.16b, v12.16b
++ tbx \in0\().16b, {v28.16b-v31.16b}, v8.16b
++ sub v11.16b, v11.16b, v12.16b
++ tbx \in1\().16b, {v28.16b-v31.16b}, v9.16b
++ tbx \in2\().16b, {v28.16b-v31.16b}, v10.16b
++ tbx \in3\().16b, {v28.16b-v31.16b}, v11.16b
++ .endm
++
++ .macro mul_by_x_2x, out0, out1, in0, in1, tmp0, tmp1, const
++ sshr \tmp0\().16b, \in0\().16b, #7
++ add \out0\().16b, \in0\().16b, \in0\().16b
++ sshr \tmp1\().16b, \in1\().16b, #7
++ and \tmp0\().16b, \tmp0\().16b, \const\().16b
++ add \out1\().16b, \in1\().16b, \in1\().16b
++ and \tmp1\().16b, \tmp1\().16b, \const\().16b
++ eor \out0\().16b, \out0\().16b, \tmp0\().16b
++ eor \out1\().16b, \out1\().16b, \tmp1\().16b
++ .endm
++
++ .macro mix_columns_2x, in0, in1
++ mul_by_x_2x v8, v9, \in0, \in1, v10, v11, v14
++ rev32 v10.8h, \in0\().8h
++ rev32 v11.8h, \in1\().8h
++ eor \in0\().16b, v8.16b, \in0\().16b
++ eor \in1\().16b, v9.16b, \in1\().16b
++ shl v12.4s, v10.4s, #24
++ shl v13.4s, v11.4s, #24
++ eor v8.16b, v8.16b, v10.16b
++ sri v12.4s, v10.4s, #8
++ shl v10.4s, \in0\().4s, #24
++ eor v9.16b, v9.16b, v11.16b
++ sri v13.4s, v11.4s, #8
++ shl v11.4s, \in1\().4s, #24
++ sri v10.4s, \in0\().4s, #8
++ eor \in0\().16b, v8.16b, v12.16b
++ sri v11.4s, \in1\().4s, #8
++ eor \in1\().16b, v9.16b, v13.16b
++ eor \in0\().16b, v10.16b, \in0\().16b
++ eor \in1\().16b, v11.16b, \in1\().16b
++ .endm
++
++ .macro inv_mix_cols_2x, in0, in1
++ mul_by_x_2x v8, v9, \in0, \in1, v10, v11, v14
++ mul_by_x_2x v8, v9, v8, v9, v10, v11, v14
++ eor \in0\().16b, \in0\().16b, v8.16b
++ eor \in1\().16b, \in1\().16b, v9.16b
++ rev32 v8.8h, v8.8h
++ rev32 v9.8h, v9.8h
++ eor \in0\().16b, \in0\().16b, v8.16b
++ eor \in1\().16b, \in1\().16b, v9.16b
++ mix_columns_2x \in0, \in1
++ .endm
++
++ .macro inv_mix_cols_4x, in0, in1, in2, in3
++ mul_by_x_2x v8, v9, \in0, \in1, v10, v11, v14
++ mul_by_x_2x v10, v11, \in2, \in3, v12, v13, v14
++ mul_by_x_2x v8, v9, v8, v9, v12, v13, v14
++ mul_by_x_2x v10, v11, v10, v11, v12, v13, v14
++ eor \in0\().16b, \in0\().16b, v8.16b
++ eor \in1\().16b, \in1\().16b, v9.16b
++ eor \in2\().16b, \in2\().16b, v10.16b
++ eor \in3\().16b, \in3\().16b, v11.16b
++ rev32 v8.8h, v8.8h
++ rev32 v9.8h, v9.8h
++ rev32 v10.8h, v10.8h
++ rev32 v11.8h, v11.8h
++ eor \in0\().16b, \in0\().16b, v8.16b
++ eor \in1\().16b, \in1\().16b, v9.16b
++ eor \in2\().16b, \in2\().16b, v10.16b
++ eor \in3\().16b, \in3\().16b, v11.16b
++ mix_columns_2x \in0, \in1
++ mix_columns_2x \in2, \in3
++ .endm
++
++ .macro do_block_2x, enc, in0, in1 rounds, rk, rkp, i
++ ld1 {v15.16b}, [\rk]
++ add \rkp, \rk, #16
++ mov \i, \rounds
++1111: eor \in0\().16b, \in0\().16b, v15.16b /* ^round key */
++ eor \in1\().16b, \in1\().16b, v15.16b /* ^round key */
++ sub_bytes_2x \in0, \in1
++ tbl \in0\().16b, {\in0\().16b}, v13.16b /* ShiftRows */
++ tbl \in1\().16b, {\in1\().16b}, v13.16b /* ShiftRows */
++ ld1 {v15.16b}, [\rkp], #16
++ subs \i, \i, #1
++ beq 2222f
++ .if \enc == 1
++ mix_columns_2x \in0, \in1
++ ldr q13, .LForward_ShiftRows
++ .else
++ inv_mix_cols_2x \in0, \in1
++ ldr q13, .LReverse_ShiftRows
++ .endif
++ movi v12.16b, #0x40
++ b 1111b
++2222: eor \in0\().16b, \in0\().16b, v15.16b /* ^round key */
++ eor \in1\().16b, \in1\().16b, v15.16b /* ^round key */
++ .endm
++
++ .macro do_block_4x, enc, in0, in1, in2, in3, rounds, rk, rkp, i
++ ld1 {v15.16b}, [\rk]
++ add \rkp, \rk, #16
++ mov \i, \rounds
++1111: eor \in0\().16b, \in0\().16b, v15.16b /* ^round key */
++ eor \in1\().16b, \in1\().16b, v15.16b /* ^round key */
++ eor \in2\().16b, \in2\().16b, v15.16b /* ^round key */
++ eor \in3\().16b, \in3\().16b, v15.16b /* ^round key */
++ sub_bytes_4x \in0, \in1, \in2, \in3
++ tbl \in0\().16b, {\in0\().16b}, v13.16b /* ShiftRows */
++ tbl \in1\().16b, {\in1\().16b}, v13.16b /* ShiftRows */
++ tbl \in2\().16b, {\in2\().16b}, v13.16b /* ShiftRows */
++ tbl \in3\().16b, {\in3\().16b}, v13.16b /* ShiftRows */
++ ld1 {v15.16b}, [\rkp], #16
++ subs \i, \i, #1
++ beq 2222f
++ .if \enc == 1
++ mix_columns_2x \in0, \in1
++ mix_columns_2x \in2, \in3
++ ldr q13, .LForward_ShiftRows
++ .else
++ inv_mix_cols_4x \in0, \in1, \in2, \in3
++ ldr q13, .LReverse_ShiftRows
++ .endif
++ movi v12.16b, #0x40
++ b 1111b
++2222: eor \in0\().16b, \in0\().16b, v15.16b /* ^round key */
++ eor \in1\().16b, \in1\().16b, v15.16b /* ^round key */
++ eor \in2\().16b, \in2\().16b, v15.16b /* ^round key */
++ eor \in3\().16b, \in3\().16b, v15.16b /* ^round key */
++ .endm
++
++ .macro encrypt_block2x, in0, in1, rounds, rk, rkp, i
++ do_block_2x 1, \in0, \in1, \rounds, \rk, \rkp, \i
++ .endm
++
++ .macro decrypt_block2x, in0, in1, rounds, rk, rkp, i
++ do_block_2x 0, \in0, \in1, \rounds, \rk, \rkp, \i
++ .endm
++
++ .macro encrypt_block4x, in0, in1, in2, in3, rounds, rk, rkp, i
++ do_block_4x 1, \in0, \in1, \in2, \in3, \rounds, \rk, \rkp, \i
++ .endm
++
++ .macro decrypt_block4x, in0, in1, in2, in3, rounds, rk, rkp, i
++ do_block_4x 0, \in0, \in1, \in2, \in3, \rounds, \rk, \rkp, \i
++ .endm
++
++#include "aes-modes.S"
++
++ .text
++ .align 4
++.LForward_ShiftRows:
++ .byte 0x0, 0x5, 0xa, 0xf, 0x4, 0x9, 0xe, 0x3
++ .byte 0x8, 0xd, 0x2, 0x7, 0xc, 0x1, 0x6, 0xb
++
++.LReverse_ShiftRows:
++ .byte 0x0, 0xd, 0xa, 0x7, 0x4, 0x1, 0xe, 0xb
++ .byte 0x8, 0x5, 0x2, 0xf, 0xc, 0x9, 0x6, 0x3
++
++.LForward_Sbox:
++ .byte 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5
++ .byte 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76
++ .byte 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0
++ .byte 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0
++ .byte 0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc
++ .byte 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15
++ .byte 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a
++ .byte 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75
++ .byte 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0
++ .byte 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84
++ .byte 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b
++ .byte 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf
++ .byte 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85
++ .byte 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8
++ .byte 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5
++ .byte 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2
++ .byte 0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17
++ .byte 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73
++ .byte 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88
++ .byte 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb
++ .byte 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c
++ .byte 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79
++ .byte 0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9
++ .byte 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08
++ .byte 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6
++ .byte 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a
++ .byte 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e
++ .byte 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e
++ .byte 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94
++ .byte 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf
++ .byte 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68
++ .byte 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16
++
++.LReverse_Sbox:
++ .byte 0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38
++ .byte 0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb
++ .byte 0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87
++ .byte 0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb
++ .byte 0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d
++ .byte 0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e
++ .byte 0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2
++ .byte 0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25
++ .byte 0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16
++ .byte 0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92
++ .byte 0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda
++ .byte 0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84
++ .byte 0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a
++ .byte 0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06
++ .byte 0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02
++ .byte 0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b
++ .byte 0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea
++ .byte 0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73
++ .byte 0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85
++ .byte 0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e
++ .byte 0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89
++ .byte 0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b
++ .byte 0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20
++ .byte 0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4
++ .byte 0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31
++ .byte 0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f
++ .byte 0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d
++ .byte 0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef
++ .byte 0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0
++ .byte 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61
++ .byte 0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26
++ .byte 0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d
+diff -Nur linux-3.14.36/arch/arm64/crypto/ghash-ce-core.S linux-openelec/arch/arm64/crypto/ghash-ce-core.S
+--- linux-3.14.36/arch/arm64/crypto/ghash-ce-core.S 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/arch/arm64/crypto/ghash-ce-core.S 2015-05-06 12:05:43.000000000 -0500
+@@ -0,0 +1,79 @@
++/*
++ * Accelerated GHASH implementation with ARMv8 PMULL instructions.
++ *
++ * Copyright (C) 2014 Linaro Ltd. <ard.biesheuvel@linaro.org>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation.
++ */
++
++#include <linux/linkage.h>
++#include <asm/assembler.h>
++
++ SHASH .req v0
++ SHASH2 .req v1
++ T1 .req v2
++ T2 .req v3
++ MASK .req v4
++ XL .req v5
++ XM .req v6
++ XH .req v7
++ IN1 .req v7
++
++ .text
++ .arch armv8-a+crypto
++
++ /*
++ * void pmull_ghash_update(int blocks, u64 dg[], const char *src,
++ * struct ghash_key const *k, const char *head)
++ */
++ENTRY(pmull_ghash_update)
++ ld1 {SHASH.16b}, [x3]
++ ld1 {XL.16b}, [x1]
++ movi MASK.16b, #0xe1
++ ext SHASH2.16b, SHASH.16b, SHASH.16b, #8
++ shl MASK.2d, MASK.2d, #57
++ eor SHASH2.16b, SHASH2.16b, SHASH.16b
++
++ /* do the head block first, if supplied */
++ cbz x4, 0f
++ ld1 {T1.2d}, [x4]
++ b 1f
++
++0: ld1 {T1.2d}, [x2], #16
++ sub w0, w0, #1
++
++1: /* multiply XL by SHASH in GF(2^128) */
++CPU_LE( rev64 T1.16b, T1.16b )
++
++ ext T2.16b, XL.16b, XL.16b, #8
++ ext IN1.16b, T1.16b, T1.16b, #8
++ eor T1.16b, T1.16b, T2.16b
++ eor XL.16b, XL.16b, IN1.16b
++
++ pmull2 XH.1q, SHASH.2d, XL.2d // a1 * b1
++ eor T1.16b, T1.16b, XL.16b
++ pmull XL.1q, SHASH.1d, XL.1d // a0 * b0
++ pmull XM.1q, SHASH2.1d, T1.1d // (a1 + a0)(b1 + b0)
++
++ ext T1.16b, XL.16b, XH.16b, #8
++ eor T2.16b, XL.16b, XH.16b
++ eor XM.16b, XM.16b, T1.16b
++ eor XM.16b, XM.16b, T2.16b
++ pmull T2.1q, XL.1d, MASK.1d
++
++ mov XH.d[0], XM.d[1]
++ mov XM.d[1], XL.d[0]
++
++ eor XL.16b, XM.16b, T2.16b
++ ext T2.16b, XL.16b, XL.16b, #8
++ pmull XL.1q, XL.1d, MASK.1d
++ eor T2.16b, T2.16b, XH.16b
++ eor XL.16b, XL.16b, T2.16b
++
++ cbnz w0, 0b
++
++ st1 {XL.16b}, [x1]
++ ret
++ENDPROC(pmull_ghash_update)
+diff -Nur linux-3.14.36/arch/arm64/crypto/ghash-ce-glue.c linux-openelec/arch/arm64/crypto/ghash-ce-glue.c
+--- linux-3.14.36/arch/arm64/crypto/ghash-ce-glue.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/arch/arm64/crypto/ghash-ce-glue.c 2015-05-06 12:05:43.000000000 -0500
+@@ -0,0 +1,156 @@
++/*
++ * Accelerated GHASH implementation with ARMv8 PMULL instructions.
++ *
++ * Copyright (C) 2014 Linaro Ltd. <ard.biesheuvel@linaro.org>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation.
++ */
++
++#include <asm/neon.h>
++#include <asm/unaligned.h>
++#include <crypto/internal/hash.h>
++#include <linux/cpufeature.h>
++#include <linux/crypto.h>
++#include <linux/module.h>
++
++MODULE_DESCRIPTION("GHASH secure hash using ARMv8 Crypto Extensions");
++MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
++MODULE_LICENSE("GPL v2");
++
++#define GHASH_BLOCK_SIZE 16
++#define GHASH_DIGEST_SIZE 16
++
++struct ghash_key {
++ u64 a;
++ u64 b;
++};
++
++struct ghash_desc_ctx {
++ u64 digest[GHASH_DIGEST_SIZE/sizeof(u64)];
++ u8 buf[GHASH_BLOCK_SIZE];
++ u32 count;
++};
++
++asmlinkage void pmull_ghash_update(int blocks, u64 dg[], const char *src,
++ struct ghash_key const *k, const char *head);
++
++static int ghash_init(struct shash_desc *desc)
++{
++ struct ghash_desc_ctx *ctx = shash_desc_ctx(desc);
++
++ *ctx = (struct ghash_desc_ctx){};
++ return 0;
++}
++
++static int ghash_update(struct shash_desc *desc, const u8 *src,
++ unsigned int len)
++{
++ struct ghash_desc_ctx *ctx = shash_desc_ctx(desc);
++ unsigned int partial = ctx->count % GHASH_BLOCK_SIZE;
++
++ ctx->count += len;
++
++ if ((partial + len) >= GHASH_BLOCK_SIZE) {
++ struct ghash_key *key = crypto_shash_ctx(desc->tfm);
++ int blocks;
++
++ if (partial) {
++ int p = GHASH_BLOCK_SIZE - partial;
++
++ memcpy(ctx->buf + partial, src, p);
++ src += p;
++ len -= p;
++ }
++
++ blocks = len / GHASH_BLOCK_SIZE;
++ len %= GHASH_BLOCK_SIZE;
++
++ kernel_neon_begin_partial(8);
++ pmull_ghash_update(blocks, ctx->digest, src, key,
++ partial ? ctx->buf : NULL);
++ kernel_neon_end();
++ src += blocks * GHASH_BLOCK_SIZE;
++ partial = 0;
++ }
++ if (len)
++ memcpy(ctx->buf + partial, src, len);
++ return 0;
++}
++
++static int ghash_final(struct shash_desc *desc, u8 *dst)
++{
++ struct ghash_desc_ctx *ctx = shash_desc_ctx(desc);
++ unsigned int partial = ctx->count % GHASH_BLOCK_SIZE;
++
++ if (partial) {
++ struct ghash_key *key = crypto_shash_ctx(desc->tfm);
++
++ memset(ctx->buf + partial, 0, GHASH_BLOCK_SIZE - partial);
++
++ kernel_neon_begin_partial(8);
++ pmull_ghash_update(1, ctx->digest, ctx->buf, key, NULL);
++ kernel_neon_end();
++ }
++ put_unaligned_be64(ctx->digest[1], dst);
++ put_unaligned_be64(ctx->digest[0], dst + 8);
++
++ *ctx = (struct ghash_desc_ctx){};
++ return 0;
++}
++
++static int ghash_setkey(struct crypto_shash *tfm,
++ const u8 *inkey, unsigned int keylen)
++{
++ struct ghash_key *key = crypto_shash_ctx(tfm);
++ u64 a, b;
++
++ if (keylen != GHASH_BLOCK_SIZE) {
++ crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
++ return -EINVAL;
++ }
++
++ /* perform multiplication by 'x' in GF(2^128) */
++ b = get_unaligned_be64(inkey);
++ a = get_unaligned_be64(inkey + 8);
++
++ key->a = (a << 1) | (b >> 63);
++ key->b = (b << 1) | (a >> 63);
++
++ if (b >> 63)
++ key->b ^= 0xc200000000000000UL;
++
++ return 0;
++}
++
++static struct shash_alg ghash_alg = {
++ .digestsize = GHASH_DIGEST_SIZE,
++ .init = ghash_init,
++ .update = ghash_update,
++ .final = ghash_final,
++ .setkey = ghash_setkey,
++ .descsize = sizeof(struct ghash_desc_ctx),
++ .base = {
++ .cra_name = "ghash",
++ .cra_driver_name = "ghash-ce",
++ .cra_priority = 200,
++ .cra_flags = CRYPTO_ALG_TYPE_SHASH,
++ .cra_blocksize = GHASH_BLOCK_SIZE,
++ .cra_ctxsize = sizeof(struct ghash_key),
++ .cra_module = THIS_MODULE,
++ },
++};
++
++static int __init ghash_ce_mod_init(void)
++{
++ return crypto_register_shash(&ghash_alg);
++}
++
++static void __exit ghash_ce_mod_exit(void)
++{
++ crypto_unregister_shash(&ghash_alg);
++}
++
++module_cpu_feature_match(PMULL, ghash_ce_mod_init);
++module_exit(ghash_ce_mod_exit);
+diff -Nur linux-3.14.36/arch/arm64/crypto/Kconfig linux-openelec/arch/arm64/crypto/Kconfig
+--- linux-3.14.36/arch/arm64/crypto/Kconfig 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/arch/arm64/crypto/Kconfig 2015-05-06 12:05:43.000000000 -0500
+@@ -0,0 +1,53 @@
++
++menuconfig ARM64_CRYPTO
++ bool "ARM64 Accelerated Cryptographic Algorithms"
++ depends on ARM64
++ help
++ Say Y here to choose from a selection of cryptographic algorithms
++ implemented using ARM64 specific CPU features or instructions.
++
++if ARM64_CRYPTO
++
++config CRYPTO_SHA1_ARM64_CE
++ tristate "SHA-1 digest algorithm (ARMv8 Crypto Extensions)"
++ depends on ARM64 && KERNEL_MODE_NEON
++ select CRYPTO_HASH
++
++config CRYPTO_SHA2_ARM64_CE
++ tristate "SHA-224/SHA-256 digest algorithm (ARMv8 Crypto Extensions)"
++ depends on ARM64 && KERNEL_MODE_NEON
++ select CRYPTO_HASH
++
++config CRYPTO_GHASH_ARM64_CE
++ tristate "GHASH (for GCM chaining mode) using ARMv8 Crypto Extensions"
++ depends on ARM64 && KERNEL_MODE_NEON
++ select CRYPTO_HASH
++
++config CRYPTO_AES_ARM64_CE
++ tristate "AES core cipher using ARMv8 Crypto Extensions"
++ depends on ARM64 && KERNEL_MODE_NEON
++ select CRYPTO_ALGAPI
++ select CRYPTO_AES
++
++config CRYPTO_AES_ARM64_CE_CCM
++ tristate "AES in CCM mode using ARMv8 Crypto Extensions"
++ depends on ARM64 && KERNEL_MODE_NEON
++ select CRYPTO_ALGAPI
++ select CRYPTO_AES
++ select CRYPTO_AEAD
++
++config CRYPTO_AES_ARM64_CE_BLK
++ tristate "AES in ECB/CBC/CTR/XTS modes using ARMv8 Crypto Extensions"
++ depends on ARM64 && KERNEL_MODE_NEON
++ select CRYPTO_BLKCIPHER
++ select CRYPTO_AES
++ select CRYPTO_ABLK_HELPER
++
++config CRYPTO_AES_ARM64_NEON_BLK
++ tristate "AES in ECB/CBC/CTR/XTS modes using NEON instructions"
++ depends on ARM64 && KERNEL_MODE_NEON
++ select CRYPTO_BLKCIPHER
++ select CRYPTO_AES
++ select CRYPTO_ABLK_HELPER
++
++endif
+diff -Nur linux-3.14.36/arch/arm64/crypto/Makefile linux-openelec/arch/arm64/crypto/Makefile
+--- linux-3.14.36/arch/arm64/crypto/Makefile 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/arch/arm64/crypto/Makefile 2015-05-06 12:05:43.000000000 -0500
+@@ -0,0 +1,38 @@
++#
++# linux/arch/arm64/crypto/Makefile
++#
++# Copyright (C) 2014 Linaro Ltd <ard.biesheuvel@linaro.org>
++#
++# This program is free software; you can redistribute it and/or modify
++# it under the terms of the GNU General Public License version 2 as
++# published by the Free Software Foundation.
++#
++
++obj-$(CONFIG_CRYPTO_SHA1_ARM64_CE) += sha1-ce.o
++sha1-ce-y := sha1-ce-glue.o sha1-ce-core.o
++
++obj-$(CONFIG_CRYPTO_SHA2_ARM64_CE) += sha2-ce.o
++sha2-ce-y := sha2-ce-glue.o sha2-ce-core.o
++
++obj-$(CONFIG_CRYPTO_GHASH_ARM64_CE) += ghash-ce.o
++ghash-ce-y := ghash-ce-glue.o ghash-ce-core.o
++
++obj-$(CONFIG_CRYPTO_AES_ARM64_CE) += aes-ce-cipher.o
++CFLAGS_aes-ce-cipher.o += -march=armv8-a+crypto
++
++obj-$(CONFIG_CRYPTO_AES_ARM64_CE_CCM) += aes-ce-ccm.o
++aes-ce-ccm-y := aes-ce-ccm-glue.o aes-ce-ccm-core.o
++
++obj-$(CONFIG_CRYPTO_AES_ARM64_CE_BLK) += aes-ce-blk.o
++aes-ce-blk-y := aes-glue-ce.o aes-ce.o
++
++obj-$(CONFIG_CRYPTO_AES_ARM64_NEON_BLK) += aes-neon-blk.o
++aes-neon-blk-y := aes-glue-neon.o aes-neon.o
++
++AFLAGS_aes-ce.o := -DINTERLEAVE=2 -DINTERLEAVE_INLINE
++AFLAGS_aes-neon.o := -DINTERLEAVE=4
++
++CFLAGS_aes-glue-ce.o := -DUSE_V8_CRYPTO_EXTENSIONS
++
++$(obj)/aes-glue-%.o: $(src)/aes-glue.c FORCE
++ $(call if_changed_dep,cc_o_c)
+diff -Nur linux-3.14.36/arch/arm64/crypto/sha1-ce-core.S linux-openelec/arch/arm64/crypto/sha1-ce-core.S
+--- linux-3.14.36/arch/arm64/crypto/sha1-ce-core.S 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/arch/arm64/crypto/sha1-ce-core.S 2015-05-06 12:05:43.000000000 -0500
+@@ -0,0 +1,153 @@
++/*
++ * sha1-ce-core.S - SHA-1 secure hash using ARMv8 Crypto Extensions
++ *
++ * Copyright (C) 2014 Linaro Ltd <ard.biesheuvel@linaro.org>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include <linux/linkage.h>
++#include <asm/assembler.h>
++
++ .text
++ .arch armv8-a+crypto
++
++ k0 .req v0
++ k1 .req v1
++ k2 .req v2
++ k3 .req v3
++
++ t0 .req v4
++ t1 .req v5
++
++ dga .req q6
++ dgav .req v6
++ dgb .req s7
++ dgbv .req v7
++
++ dg0q .req q12
++ dg0s .req s12
++ dg0v .req v12
++ dg1s .req s13
++ dg1v .req v13
++ dg2s .req s14
++
++ .macro add_only, op, ev, rc, s0, dg1
++ .ifc \ev, ev
++ add t1.4s, v\s0\().4s, \rc\().4s
++ sha1h dg2s, dg0s
++ .ifnb \dg1
++ sha1\op dg0q, \dg1, t0.4s
++ .else
++ sha1\op dg0q, dg1s, t0.4s
++ .endif
++ .else
++ .ifnb \s0
++ add t0.4s, v\s0\().4s, \rc\().4s
++ .endif
++ sha1h dg1s, dg0s
++ sha1\op dg0q, dg2s, t1.4s
++ .endif
++ .endm
++
++ .macro add_update, op, ev, rc, s0, s1, s2, s3, dg1
++ sha1su0 v\s0\().4s, v\s1\().4s, v\s2\().4s
++ add_only \op, \ev, \rc, \s1, \dg1
++ sha1su1 v\s0\().4s, v\s3\().4s
++ .endm
++
++ /*
++ * The SHA1 round constants
++ */
++ .align 4
++.Lsha1_rcon:
++ .word 0x5a827999, 0x6ed9eba1, 0x8f1bbcdc, 0xca62c1d6
++
++ /*
++ * void sha1_ce_transform(int blocks, u8 const *src, u32 *state,
++ * u8 *head, long bytes)
++ */
++ENTRY(sha1_ce_transform)
++ /* load round constants */
++ adr x6, .Lsha1_rcon
++ ld1r {k0.4s}, [x6], #4
++ ld1r {k1.4s}, [x6], #4
++ ld1r {k2.4s}, [x6], #4
++ ld1r {k3.4s}, [x6]
++
++ /* load state */
++ ldr dga, [x2]
++ ldr dgb, [x2, #16]
++
++ /* load partial state (if supplied) */
++ cbz x3, 0f
++ ld1 {v8.4s-v11.4s}, [x3]
++ b 1f
++
++ /* load input */
++0: ld1 {v8.4s-v11.4s}, [x1], #64
++ sub w0, w0, #1
++
++1:
++CPU_LE( rev32 v8.16b, v8.16b )
++CPU_LE( rev32 v9.16b, v9.16b )
++CPU_LE( rev32 v10.16b, v10.16b )
++CPU_LE( rev32 v11.16b, v11.16b )
++
++2: add t0.4s, v8.4s, k0.4s
++ mov dg0v.16b, dgav.16b
++
++ add_update c, ev, k0, 8, 9, 10, 11, dgb
++ add_update c, od, k0, 9, 10, 11, 8
++ add_update c, ev, k0, 10, 11, 8, 9
++ add_update c, od, k0, 11, 8, 9, 10
++ add_update c, ev, k1, 8, 9, 10, 11
++
++ add_update p, od, k1, 9, 10, 11, 8
++ add_update p, ev, k1, 10, 11, 8, 9
++ add_update p, od, k1, 11, 8, 9, 10
++ add_update p, ev, k1, 8, 9, 10, 11
++ add_update p, od, k2, 9, 10, 11, 8
++
++ add_update m, ev, k2, 10, 11, 8, 9
++ add_update m, od, k2, 11, 8, 9, 10
++ add_update m, ev, k2, 8, 9, 10, 11
++ add_update m, od, k2, 9, 10, 11, 8
++ add_update m, ev, k3, 10, 11, 8, 9
++
++ add_update p, od, k3, 11, 8, 9, 10
++ add_only p, ev, k3, 9
++ add_only p, od, k3, 10
++ add_only p, ev, k3, 11
++ add_only p, od
++
++ /* update state */
++ add dgbv.2s, dgbv.2s, dg1v.2s
++ add dgav.4s, dgav.4s, dg0v.4s
++
++ cbnz w0, 0b
++
++ /*
++ * Final block: add padding and total bit count.
++ * Skip if we have no total byte count in x4. In that case, the input
++ * size was not a round multiple of the block size, and the padding is
++ * handled by the C code.
++ */
++ cbz x4, 3f
++ movi v9.2d, #0
++ mov x8, #0x80000000
++ movi v10.2d, #0
++ ror x7, x4, #29 // ror(lsl(x4, 3), 32)
++ fmov d8, x8
++ mov x4, #0
++ mov v11.d[0], xzr
++ mov v11.d[1], x7
++ b 2b
++
++ /* store new state */
++3: str dga, [x2]
++ str dgb, [x2, #16]
++ ret
++ENDPROC(sha1_ce_transform)
+diff -Nur linux-3.14.36/arch/arm64/crypto/sha1-ce-glue.c linux-openelec/arch/arm64/crypto/sha1-ce-glue.c
+--- linux-3.14.36/arch/arm64/crypto/sha1-ce-glue.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/arch/arm64/crypto/sha1-ce-glue.c 2015-05-06 12:05:43.000000000 -0500
+@@ -0,0 +1,174 @@
++/*
++ * sha1-ce-glue.c - SHA-1 secure hash using ARMv8 Crypto Extensions
++ *
++ * Copyright (C) 2014 Linaro Ltd <ard.biesheuvel@linaro.org>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include <asm/neon.h>
++#include <asm/unaligned.h>
++#include <crypto/internal/hash.h>
++#include <crypto/sha.h>
++#include <linux/cpufeature.h>
++#include <linux/crypto.h>
++#include <linux/module.h>
++
++MODULE_DESCRIPTION("SHA1 secure hash using ARMv8 Crypto Extensions");
++MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
++MODULE_LICENSE("GPL v2");
++
++asmlinkage void sha1_ce_transform(int blocks, u8 const *src, u32 *state,
++ u8 *head, long bytes);
++
++static int sha1_init(struct shash_desc *desc)
++{
++ struct sha1_state *sctx = shash_desc_ctx(desc);
++
++ *sctx = (struct sha1_state){
++ .state = { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 },
++ };
++ return 0;
++}
++
++static int sha1_update(struct shash_desc *desc, const u8 *data,
++ unsigned int len)
++{
++ struct sha1_state *sctx = shash_desc_ctx(desc);
++ unsigned int partial = sctx->count % SHA1_BLOCK_SIZE;
++
++ sctx->count += len;
++
++ if ((partial + len) >= SHA1_BLOCK_SIZE) {
++ int blocks;
++
++ if (partial) {
++ int p = SHA1_BLOCK_SIZE - partial;
++
++ memcpy(sctx->buffer + partial, data, p);
++ data += p;
++ len -= p;
++ }
++
++ blocks = len / SHA1_BLOCK_SIZE;
++ len %= SHA1_BLOCK_SIZE;
++
++ kernel_neon_begin_partial(16);
++ sha1_ce_transform(blocks, data, sctx->state,
++ partial ? sctx->buffer : NULL, 0);
++ kernel_neon_end();
++
++ data += blocks * SHA1_BLOCK_SIZE;
++ partial = 0;
++ }
++ if (len)
++ memcpy(sctx->buffer + partial, data, len);
++ return 0;
++}
++
++static int sha1_final(struct shash_desc *desc, u8 *out)
++{
++ static const u8 padding[SHA1_BLOCK_SIZE] = { 0x80, };
++
++ struct sha1_state *sctx = shash_desc_ctx(desc);
++ __be64 bits = cpu_to_be64(sctx->count << 3);
++ __be32 *dst = (__be32 *)out;
++ int i;
++
++ u32 padlen = SHA1_BLOCK_SIZE
++ - ((sctx->count + sizeof(bits)) % SHA1_BLOCK_SIZE);
++
++ sha1_update(desc, padding, padlen);
++ sha1_update(desc, (const u8 *)&bits, sizeof(bits));
++
++ for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(__be32); i++)
++ put_unaligned_be32(sctx->state[i], dst++);
++
++ *sctx = (struct sha1_state){};
++ return 0;
++}
++
++static int sha1_finup(struct shash_desc *desc, const u8 *data,
++ unsigned int len, u8 *out)
++{
++ struct sha1_state *sctx = shash_desc_ctx(desc);
++ __be32 *dst = (__be32 *)out;
++ int blocks;
++ int i;
++
++ if (sctx->count || !len || (len % SHA1_BLOCK_SIZE)) {
++ sha1_update(desc, data, len);
++ return sha1_final(desc, out);
++ }
++
++ /*
++ * Use a fast path if the input is a multiple of 64 bytes. In
++ * this case, there is no need to copy data around, and we can
++ * perform the entire digest calculation in a single invocation
++ * of sha1_ce_transform()
++ */
++ blocks = len / SHA1_BLOCK_SIZE;
++
++ kernel_neon_begin_partial(16);
++ sha1_ce_transform(blocks, data, sctx->state, NULL, len);
++ kernel_neon_end();
++
++ for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(__be32); i++)
++ put_unaligned_be32(sctx->state[i], dst++);
++
++ *sctx = (struct sha1_state){};
++ return 0;
++}
++
++static int sha1_export(struct shash_desc *desc, void *out)
++{
++ struct sha1_state *sctx = shash_desc_ctx(desc);
++ struct sha1_state *dst = out;
++
++ *dst = *sctx;
++ return 0;
++}
++
++static int sha1_import(struct shash_desc *desc, const void *in)
++{
++ struct sha1_state *sctx = shash_desc_ctx(desc);
++ struct sha1_state const *src = in;
++
++ *sctx = *src;
++ return 0;
++}
++
++static struct shash_alg alg = {
++ .init = sha1_init,
++ .update = sha1_update,
++ .final = sha1_final,
++ .finup = sha1_finup,
++ .export = sha1_export,
++ .import = sha1_import,
++ .descsize = sizeof(struct sha1_state),
++ .digestsize = SHA1_DIGEST_SIZE,
++ .statesize = sizeof(struct sha1_state),
++ .base = {
++ .cra_name = "sha1",
++ .cra_driver_name = "sha1-ce",
++ .cra_priority = 200,
++ .cra_flags = CRYPTO_ALG_TYPE_SHASH,
++ .cra_blocksize = SHA1_BLOCK_SIZE,
++ .cra_module = THIS_MODULE,
++ }
++};
++
++static int __init sha1_ce_mod_init(void)
++{
++ return crypto_register_shash(&alg);
++}
++
++static void __exit sha1_ce_mod_fini(void)
++{
++ crypto_unregister_shash(&alg);
++}
++
++module_cpu_feature_match(SHA1, sha1_ce_mod_init);
++module_exit(sha1_ce_mod_fini);
+diff -Nur linux-3.14.36/arch/arm64/crypto/sha2-ce-core.S linux-openelec/arch/arm64/crypto/sha2-ce-core.S
+--- linux-3.14.36/arch/arm64/crypto/sha2-ce-core.S 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/arch/arm64/crypto/sha2-ce-core.S 2015-05-06 12:05:43.000000000 -0500
+@@ -0,0 +1,156 @@
++/*
++ * sha2-ce-core.S - core SHA-224/SHA-256 transform using v8 Crypto Extensions
++ *
++ * Copyright (C) 2014 Linaro Ltd <ard.biesheuvel@linaro.org>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include <linux/linkage.h>
++#include <asm/assembler.h>
++
++ .text
++ .arch armv8-a+crypto
++
++ dga .req q20
++ dgav .req v20
++ dgb .req q21
++ dgbv .req v21
++
++ t0 .req v22
++ t1 .req v23
++
++ dg0q .req q24
++ dg0v .req v24
++ dg1q .req q25
++ dg1v .req v25
++ dg2q .req q26
++ dg2v .req v26
++
++ .macro add_only, ev, rc, s0
++ mov dg2v.16b, dg0v.16b
++ .ifeq \ev
++ add t1.4s, v\s0\().4s, \rc\().4s
++ sha256h dg0q, dg1q, t0.4s
++ sha256h2 dg1q, dg2q, t0.4s
++ .else
++ .ifnb \s0
++ add t0.4s, v\s0\().4s, \rc\().4s
++ .endif
++ sha256h dg0q, dg1q, t1.4s
++ sha256h2 dg1q, dg2q, t1.4s
++ .endif
++ .endm
++
++ .macro add_update, ev, rc, s0, s1, s2, s3
++ sha256su0 v\s0\().4s, v\s1\().4s
++ add_only \ev, \rc, \s1
++ sha256su1 v\s0\().4s, v\s2\().4s, v\s3\().4s
++ .endm
++
++ /*
++ * The SHA-256 round constants
++ */
++ .align 4
++.Lsha2_rcon:
++ .word 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5
++ .word 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5
++ .word 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3
++ .word 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174
++ .word 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc
++ .word 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da
++ .word 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7
++ .word 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967
++ .word 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13
++ .word 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85
++ .word 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3
++ .word 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070
++ .word 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5
++ .word 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3
++ .word 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208
++ .word 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2
++
++ /*
++ * void sha2_ce_transform(int blocks, u8 const *src, u32 *state,
++ * u8 *head, long bytes)
++ */
++ENTRY(sha2_ce_transform)
++ /* load round constants */
++ adr x8, .Lsha2_rcon
++ ld1 { v0.4s- v3.4s}, [x8], #64
++ ld1 { v4.4s- v7.4s}, [x8], #64
++ ld1 { v8.4s-v11.4s}, [x8], #64
++ ld1 {v12.4s-v15.4s}, [x8]
++
++ /* load state */
++ ldp dga, dgb, [x2]
++
++ /* load partial input (if supplied) */
++ cbz x3, 0f
++ ld1 {v16.4s-v19.4s}, [x3]
++ b 1f
++
++ /* load input */
++0: ld1 {v16.4s-v19.4s}, [x1], #64
++ sub w0, w0, #1
++
++1:
++CPU_LE( rev32 v16.16b, v16.16b )
++CPU_LE( rev32 v17.16b, v17.16b )
++CPU_LE( rev32 v18.16b, v18.16b )
++CPU_LE( rev32 v19.16b, v19.16b )
++
++2: add t0.4s, v16.4s, v0.4s
++ mov dg0v.16b, dgav.16b
++ mov dg1v.16b, dgbv.16b
++
++ add_update 0, v1, 16, 17, 18, 19
++ add_update 1, v2, 17, 18, 19, 16
++ add_update 0, v3, 18, 19, 16, 17
++ add_update 1, v4, 19, 16, 17, 18
++
++ add_update 0, v5, 16, 17, 18, 19
++ add_update 1, v6, 17, 18, 19, 16
++ add_update 0, v7, 18, 19, 16, 17
++ add_update 1, v8, 19, 16, 17, 18
++
++ add_update 0, v9, 16, 17, 18, 19
++ add_update 1, v10, 17, 18, 19, 16
++ add_update 0, v11, 18, 19, 16, 17
++ add_update 1, v12, 19, 16, 17, 18
++
++ add_only 0, v13, 17
++ add_only 1, v14, 18
++ add_only 0, v15, 19
++ add_only 1
++
++ /* update state */
++ add dgav.4s, dgav.4s, dg0v.4s
++ add dgbv.4s, dgbv.4s, dg1v.4s
++
++ /* handled all input blocks? */
++ cbnz w0, 0b
++
++ /*
++ * Final block: add padding and total bit count.
++ * Skip if we have no total byte count in x4. In that case, the input
++ * size was not a round multiple of the block size, and the padding is
++ * handled by the C code.
++ */
++ cbz x4, 3f
++ movi v17.2d, #0
++ mov x8, #0x80000000
++ movi v18.2d, #0
++ ror x7, x4, #29 // ror(lsl(x4, 3), 32)
++ fmov d16, x8
++ mov x4, #0
++ mov v19.d[0], xzr
++ mov v19.d[1], x7
++ b 2b
++
++ /* store new state */
++3: stp dga, dgb, [x2]
++ ret
++ENDPROC(sha2_ce_transform)
+diff -Nur linux-3.14.36/arch/arm64/crypto/sha2-ce-glue.c linux-openelec/arch/arm64/crypto/sha2-ce-glue.c
+--- linux-3.14.36/arch/arm64/crypto/sha2-ce-glue.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/arch/arm64/crypto/sha2-ce-glue.c 2015-05-06 12:05:43.000000000 -0500
+@@ -0,0 +1,255 @@
++/*
++ * sha2-ce-glue.c - SHA-224/SHA-256 using ARMv8 Crypto Extensions
++ *
++ * Copyright (C) 2014 Linaro Ltd <ard.biesheuvel@linaro.org>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include <asm/neon.h>
++#include <asm/unaligned.h>
++#include <crypto/internal/hash.h>
++#include <crypto/sha.h>
++#include <linux/cpufeature.h>
++#include <linux/crypto.h>
++#include <linux/module.h>
++
++MODULE_DESCRIPTION("SHA-224/SHA-256 secure hash using ARMv8 Crypto Extensions");
++MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
++MODULE_LICENSE("GPL v2");
++
++asmlinkage int sha2_ce_transform(int blocks, u8 const *src, u32 *state,
++ u8 *head, long bytes);
++
++static int sha224_init(struct shash_desc *desc)
++{
++ struct sha256_state *sctx = shash_desc_ctx(desc);
++
++ *sctx = (struct sha256_state){
++ .state = {
++ SHA224_H0, SHA224_H1, SHA224_H2, SHA224_H3,
++ SHA224_H4, SHA224_H5, SHA224_H6, SHA224_H7,
++ }
++ };
++ return 0;
++}
++
++static int sha256_init(struct shash_desc *desc)
++{
++ struct sha256_state *sctx = shash_desc_ctx(desc);
++
++ *sctx = (struct sha256_state){
++ .state = {
++ SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3,
++ SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7,
++ }
++ };
++ return 0;
++}
++
++static int sha2_update(struct shash_desc *desc, const u8 *data,
++ unsigned int len)
++{
++ struct sha256_state *sctx = shash_desc_ctx(desc);
++ unsigned int partial = sctx->count % SHA256_BLOCK_SIZE;
++
++ sctx->count += len;
++
++ if ((partial + len) >= SHA256_BLOCK_SIZE) {
++ int blocks;
++
++ if (partial) {
++ int p = SHA256_BLOCK_SIZE - partial;
++
++ memcpy(sctx->buf + partial, data, p);
++ data += p;
++ len -= p;
++ }
++
++ blocks = len / SHA256_BLOCK_SIZE;
++ len %= SHA256_BLOCK_SIZE;
++
++ kernel_neon_begin_partial(28);
++ sha2_ce_transform(blocks, data, sctx->state,
++ partial ? sctx->buf : NULL, 0);
++ kernel_neon_end();
++
++ data += blocks * SHA256_BLOCK_SIZE;
++ partial = 0;
++ }
++ if (len)
++ memcpy(sctx->buf + partial, data, len);
++ return 0;
++}
++
++static void sha2_final(struct shash_desc *desc)
++{
++ static const u8 padding[SHA256_BLOCK_SIZE] = { 0x80, };
++
++ struct sha256_state *sctx = shash_desc_ctx(desc);
++ __be64 bits = cpu_to_be64(sctx->count << 3);
++ u32 padlen = SHA256_BLOCK_SIZE
++ - ((sctx->count + sizeof(bits)) % SHA256_BLOCK_SIZE);
++
++ sha2_update(desc, padding, padlen);
++ sha2_update(desc, (const u8 *)&bits, sizeof(bits));
++}
++
++static int sha224_final(struct shash_desc *desc, u8 *out)
++{
++ struct sha256_state *sctx = shash_desc_ctx(desc);
++ __be32 *dst = (__be32 *)out;
++ int i;
++
++ sha2_final(desc);
++
++ for (i = 0; i < SHA224_DIGEST_SIZE / sizeof(__be32); i++)
++ put_unaligned_be32(sctx->state[i], dst++);
++
++ *sctx = (struct sha256_state){};
++ return 0;
++}
++
++static int sha256_final(struct shash_desc *desc, u8 *out)
++{
++ struct sha256_state *sctx = shash_desc_ctx(desc);
++ __be32 *dst = (__be32 *)out;
++ int i;
++
++ sha2_final(desc);
++
++ for (i = 0; i < SHA256_DIGEST_SIZE / sizeof(__be32); i++)
++ put_unaligned_be32(sctx->state[i], dst++);
++
++ *sctx = (struct sha256_state){};
++ return 0;
++}
++
++static void sha2_finup(struct shash_desc *desc, const u8 *data,
++ unsigned int len)
++{
++ struct sha256_state *sctx = shash_desc_ctx(desc);
++ int blocks;
++
++ if (sctx->count || !len || (len % SHA256_BLOCK_SIZE)) {
++ sha2_update(desc, data, len);
++ sha2_final(desc);
++ return;
++ }
++
++ /*
++ * Use a fast path if the input is a multiple of 64 bytes. In
++ * this case, there is no need to copy data around, and we can
++ * perform the entire digest calculation in a single invocation
++ * of sha2_ce_transform()
++ */
++ blocks = len / SHA256_BLOCK_SIZE;
++
++ kernel_neon_begin_partial(28);
++ sha2_ce_transform(blocks, data, sctx->state, NULL, len);
++ kernel_neon_end();
++ data += blocks * SHA256_BLOCK_SIZE;
++}
++
++static int sha224_finup(struct shash_desc *desc, const u8 *data,
++ unsigned int len, u8 *out)
++{
++ struct sha256_state *sctx = shash_desc_ctx(desc);
++ __be32 *dst = (__be32 *)out;
++ int i;
++
++ sha2_finup(desc, data, len);
++
++ for (i = 0; i < SHA224_DIGEST_SIZE / sizeof(__be32); i++)
++ put_unaligned_be32(sctx->state[i], dst++);
++
++ *sctx = (struct sha256_state){};
++ return 0;
++}
++
++static int sha256_finup(struct shash_desc *desc, const u8 *data,
++ unsigned int len, u8 *out)
++{
++ struct sha256_state *sctx = shash_desc_ctx(desc);
++ __be32 *dst = (__be32 *)out;
++ int i;
++
++ sha2_finup(desc, data, len);
++
++ for (i = 0; i < SHA256_DIGEST_SIZE / sizeof(__be32); i++)
++ put_unaligned_be32(sctx->state[i], dst++);
++
++ *sctx = (struct sha256_state){};
++ return 0;
++}
++
++static int sha2_export(struct shash_desc *desc, void *out)
++{
++ struct sha256_state *sctx = shash_desc_ctx(desc);
++ struct sha256_state *dst = out;
++
++ *dst = *sctx;
++ return 0;
++}
++
++static int sha2_import(struct shash_desc *desc, const void *in)
++{
++ struct sha256_state *sctx = shash_desc_ctx(desc);
++ struct sha256_state const *src = in;
++
++ *sctx = *src;
++ return 0;
++}
++
++static struct shash_alg algs[] = { {
++ .init = sha224_init,
++ .update = sha2_update,
++ .final = sha224_final,
++ .finup = sha224_finup,
++ .export = sha2_export,
++ .import = sha2_import,
++ .descsize = sizeof(struct sha256_state),
++ .digestsize = SHA224_DIGEST_SIZE,
++ .statesize = sizeof(struct sha256_state),
++ .base = {
++ .cra_name = "sha224",
++ .cra_driver_name = "sha224-ce",
++ .cra_priority = 200,
++ .cra_flags = CRYPTO_ALG_TYPE_SHASH,
++ .cra_blocksize = SHA256_BLOCK_SIZE,
++ .cra_module = THIS_MODULE,
++ }
++}, {
++ .init = sha256_init,
++ .update = sha2_update,
++ .final = sha256_final,
++ .finup = sha256_finup,
++ .export = sha2_export,
++ .import = sha2_import,
++ .descsize = sizeof(struct sha256_state),
++ .digestsize = SHA256_DIGEST_SIZE,
++ .statesize = sizeof(struct sha256_state),
++ .base = {
++ .cra_name = "sha256",
++ .cra_driver_name = "sha256-ce",
++ .cra_priority = 200,
++ .cra_flags = CRYPTO_ALG_TYPE_SHASH,
++ .cra_blocksize = SHA256_BLOCK_SIZE,
++ .cra_module = THIS_MODULE,
++ }
++} };
++
++static int __init sha2_ce_mod_init(void)
++{
++ return crypto_register_shashes(algs, ARRAY_SIZE(algs));
++}
++
++static void __exit sha2_ce_mod_fini(void)
++{
++ crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
++}
++
++module_cpu_feature_match(SHA2, sha2_ce_mod_init);
++module_exit(sha2_ce_mod_fini);
+diff -Nur linux-3.14.36/arch/arm64/include/asm/bL_switcher.h linux-openelec/arch/arm64/include/asm/bL_switcher.h
+--- linux-3.14.36/arch/arm64/include/asm/bL_switcher.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/arch/arm64/include/asm/bL_switcher.h 2015-05-06 12:05:43.000000000 -0500
+@@ -0,0 +1,54 @@
++/*
++ * Based on the stubs for the ARM implementation which is:
++ *
++ * Created by: Nicolas Pitre, April 2012
++ * Copyright: (C) 2012-2013 Linaro Limited
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#ifndef ASM_BL_SWITCHER_H
++#define ASM_BL_SWITCHER_H
++
++#include <linux/notifier.h>
++#include <linux/types.h>
++
++typedef void (*bL_switch_completion_handler)(void *cookie);
++
++static inline int bL_switch_request(unsigned int cpu,
++ unsigned int new_cluster_id)
++{
++ return -ENOTSUPP;
++}
++
++/*
++ * Register here to be notified about runtime enabling/disabling of
++ * the switcher.
++ *
++ * The notifier chain is called with the switcher activation lock held:
++ * the switcher will not be enabled or disabled during callbacks.
++ * Callbacks must not call bL_switcher_{get,put}_enabled().
++ */
++#define BL_NOTIFY_PRE_ENABLE 0
++#define BL_NOTIFY_POST_ENABLE 1
++#define BL_NOTIFY_PRE_DISABLE 2
++#define BL_NOTIFY_POST_DISABLE 3
++
++static inline int bL_switcher_register_notifier(struct notifier_block *nb)
++{
++ return 0;
++}
++
++static inline int bL_switcher_unregister_notifier(struct notifier_block *nb)
++{
++ return 0;
++}
++
++static inline bool bL_switcher_get_enabled(void) { return false; }
++static inline void bL_switcher_put_enabled(void) { }
++static inline int bL_switcher_trace_trigger(void) { return 0; }
++static inline int bL_switcher_get_logical_index(u32 mpidr) { return -EUNATCH; }
++
++#endif
+diff -Nur linux-3.14.36/arch/arm64/include/asm/cacheflush.h linux-openelec/arch/arm64/include/asm/cacheflush.h
+--- linux-3.14.36/arch/arm64/include/asm/cacheflush.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm64/include/asm/cacheflush.h 2015-05-06 12:05:43.000000000 -0500
+@@ -85,6 +85,13 @@
+ }
+
+ /*
++ * Cache maintenance functions used by the DMA API. No to be used directly.
++ */
++extern void __dma_map_area(const void *, size_t, int);
++extern void __dma_unmap_area(const void *, size_t, int);
++extern void __dma_flush_range(const void *, const void *);
++
++/*
+ * Copy user data from/to a page which is mapped into a different
+ * processes address space. Really, we want to allow our "user
+ * space" model to handle this.
+diff -Nur linux-3.14.36/arch/arm64/include/asm/compat.h linux-openelec/arch/arm64/include/asm/compat.h
+--- linux-3.14.36/arch/arm64/include/asm/compat.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm64/include/asm/compat.h 2015-07-24 18:03:28.688842002 -0500
+@@ -228,7 +228,7 @@
+ return (u32)(unsigned long)uptr;
+ }
+
+-#define compat_user_stack_pointer() (current_pt_regs()->compat_sp)
++#define compat_user_stack_pointer() (user_stack_pointer(current_pt_regs()))
+
+ static inline void __user *arch_compat_alloc_user_space(long len)
+ {
+@@ -305,11 +305,6 @@
+
+ #else /* !CONFIG_COMPAT */
+
+-static inline int is_compat_task(void)
+-{
+- return 0;
+-}
+-
+ static inline int is_compat_thread(struct thread_info *thread)
+ {
+ return 0;
+diff -Nur linux-3.14.36/arch/arm64/include/asm/cpufeature.h linux-openelec/arch/arm64/include/asm/cpufeature.h
+--- linux-3.14.36/arch/arm64/include/asm/cpufeature.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/arch/arm64/include/asm/cpufeature.h 2015-05-06 12:05:43.000000000 -0500
+@@ -0,0 +1,29 @@
++/*
++ * Copyright (C) 2014 Linaro Ltd. <ard.biesheuvel@linaro.org>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#ifndef __ASM_CPUFEATURE_H
++#define __ASM_CPUFEATURE_H
++
++#include <asm/hwcap.h>
++
++/*
++ * In the arm64 world (as in the ARM world), elf_hwcap is used both internally
++ * in the kernel and for user space to keep track of which optional features
++ * are supported by the current system. So let's map feature 'x' to HWCAP_x.
++ * Note that HWCAP_x constants are bit fields so we need to take the log.
++ */
++
++#define MAX_CPU_FEATURES (8 * sizeof(elf_hwcap))
++#define cpu_feature(x) ilog2(HWCAP_ ## x)
++
++static inline bool cpu_have_feature(unsigned int num)
++{
++ return elf_hwcap & (1UL << num);
++}
++
++#endif
+diff -Nur linux-3.14.36/arch/arm64/include/asm/debug-monitors.h linux-openelec/arch/arm64/include/asm/debug-monitors.h
+--- linux-3.14.36/arch/arm64/include/asm/debug-monitors.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm64/include/asm/debug-monitors.h 2015-05-06 12:05:43.000000000 -0500
+@@ -26,6 +26,53 @@
+ #define DBG_ESR_EVT_HWWP 0x2
+ #define DBG_ESR_EVT_BRK 0x6
+
++/*
++ * Break point instruction encoding
++ */
++#define BREAK_INSTR_SIZE 4
++
++/*
++ * ESR values expected for dynamic and compile time BRK instruction
++ */
++#define DBG_ESR_VAL_BRK(x) (0xf2000000 | ((x) & 0xfffff))
++
++/*
++ * #imm16 values used for BRK instruction generation
++ * Allowed values for kgbd are 0x400 - 0x7ff
++ * 0x400: for dynamic BRK instruction
++ * 0x401: for compile time BRK instruction
++ */
++#define KGDB_DYN_DGB_BRK_IMM 0x400
++#define KDBG_COMPILED_DBG_BRK_IMM 0x401
++
++/*
++ * BRK instruction encoding
++ * The #imm16 value should be placed at bits[20:5] within BRK ins
++ */
++#define AARCH64_BREAK_MON 0xd4200000
++
++/*
++ * Extract byte from BRK instruction
++ */
++#define KGDB_DYN_DGB_BRK_INS_BYTE(x) \
++ ((((AARCH64_BREAK_MON) & 0xffe0001f) >> (x * 8)) & 0xff)
++
++/*
++ * Extract byte from BRK #imm16
++ */
++#define KGBD_DYN_DGB_BRK_IMM_BYTE(x) \
++ (((((KGDB_DYN_DGB_BRK_IMM) & 0xffff) << 5) >> (x * 8)) & 0xff)
++
++#define KGDB_DYN_DGB_BRK_BYTE(x) \
++ (KGDB_DYN_DGB_BRK_INS_BYTE(x) | KGBD_DYN_DGB_BRK_IMM_BYTE(x))
++
++#define KGDB_DYN_BRK_INS_BYTE0 KGDB_DYN_DGB_BRK_BYTE(0)
++#define KGDB_DYN_BRK_INS_BYTE1 KGDB_DYN_DGB_BRK_BYTE(1)
++#define KGDB_DYN_BRK_INS_BYTE2 KGDB_DYN_DGB_BRK_BYTE(2)
++#define KGDB_DYN_BRK_INS_BYTE3 KGDB_DYN_DGB_BRK_BYTE(3)
++
++#define CACHE_FLUSH_IS_SAFE 1
++
+ enum debug_el {
+ DBG_ACTIVE_EL0 = 0,
+ DBG_ACTIVE_EL1,
+@@ -43,23 +90,6 @@
+ #ifndef __ASSEMBLY__
+ struct task_struct;
+
+-#define local_dbg_save(flags) \
+- do { \
+- typecheck(unsigned long, flags); \
+- asm volatile( \
+- "mrs %0, daif // local_dbg_save\n" \
+- "msr daifset, #8" \
+- : "=r" (flags) : : "memory"); \
+- } while (0)
+-
+-#define local_dbg_restore(flags) \
+- do { \
+- typecheck(unsigned long, flags); \
+- asm volatile( \
+- "msr daif, %0 // local_dbg_restore\n" \
+- : : "r" (flags) : "memory"); \
+- } while (0)
+-
+ #define DBG_ARCH_ID_RESERVED 0 /* In case of ptrace ABI updates. */
+
+ #define DBG_HOOK_HANDLED 0
+diff -Nur linux-3.14.36/arch/arm64/include/asm/dma-mapping.h linux-openelec/arch/arm64/include/asm/dma-mapping.h
+--- linux-3.14.36/arch/arm64/include/asm/dma-mapping.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm64/include/asm/dma-mapping.h 2015-05-06 12:05:43.000000000 -0500
+@@ -28,6 +28,8 @@
+
+ #define DMA_ERROR_CODE (~(dma_addr_t)0)
+ extern struct dma_map_ops *dma_ops;
++extern struct dma_map_ops coherent_swiotlb_dma_ops;
++extern struct dma_map_ops noncoherent_swiotlb_dma_ops;
+
+ static inline struct dma_map_ops *__generic_dma_ops(struct device *dev)
+ {
+@@ -45,6 +47,11 @@
+ return __generic_dma_ops(dev);
+ }
+
++static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
++{
++ dev->archdata.dma_ops = ops;
++}
++
+ #include <asm-generic/dma-mapping-common.h>
+
+ static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
+diff -Nur linux-3.14.36/arch/arm64/include/asm/ftrace.h linux-openelec/arch/arm64/include/asm/ftrace.h
+--- linux-3.14.36/arch/arm64/include/asm/ftrace.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/arch/arm64/include/asm/ftrace.h 2015-05-06 12:05:43.000000000 -0500
+@@ -0,0 +1,59 @@
++/*
++ * arch/arm64/include/asm/ftrace.h
++ *
++ * Copyright (C) 2013 Linaro Limited
++ * Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++#ifndef __ASM_FTRACE_H
++#define __ASM_FTRACE_H
++
++#include <asm/insn.h>
++
++#define MCOUNT_ADDR ((unsigned long)_mcount)
++#define MCOUNT_INSN_SIZE AARCH64_INSN_SIZE
++
++#ifndef __ASSEMBLY__
++#include <linux/compat.h>
++
++extern void _mcount(unsigned long);
++extern void *return_address(unsigned int);
++
++struct dyn_arch_ftrace {
++ /* No extra data needed for arm64 */
++};
++
++extern unsigned long ftrace_graph_call;
++
++static inline unsigned long ftrace_call_adjust(unsigned long addr)
++{
++ /*
++ * addr is the address of the mcount call instruction.
++ * recordmcount does the necessary offset calculation.
++ */
++ return addr;
++}
++
++#define ftrace_return_address(n) return_address(n)
++
++/*
++ * Because AArch32 mode does not share the same syscall table with AArch64,
++ * tracing compat syscalls may result in reporting bogus syscalls or even
++ * hang-up, so just do not trace them.
++ * See kernel/trace/trace_syscalls.c
++ *
++ * x86 code says:
++ * If the user realy wants these, then they should use the
++ * raw syscall tracepoints with filtering.
++ */
++#define ARCH_TRACE_IGNORE_COMPAT_SYSCALLS
++static inline bool arch_trace_is_compat_syscall(struct pt_regs *regs)
++{
++ return is_compat_task();
++}
++#endif /* ifndef __ASSEMBLY__ */
++
++#endif /* __ASM_FTRACE_H */
+diff -Nur linux-3.14.36/arch/arm64/include/asm/hwcap.h linux-openelec/arch/arm64/include/asm/hwcap.h
+--- linux-3.14.36/arch/arm64/include/asm/hwcap.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm64/include/asm/hwcap.h 2015-07-24 18:03:29.256842002 -0500
+@@ -33,6 +33,12 @@
+ #define COMPAT_HWCAP_LPAE (1 << 20)
+ #define COMPAT_HWCAP_EVTSTRM (1 << 21)
+
++#define COMPAT_HWCAP2_AES (1 << 0)
++#define COMPAT_HWCAP2_PMULL (1 << 1)
++#define COMPAT_HWCAP2_SHA1 (1 << 2)
++#define COMPAT_HWCAP2_SHA2 (1 << 3)
++#define COMPAT_HWCAP2_CRC32 (1 << 4)
++
+ #ifndef __ASSEMBLY__
+ /*
+ * This yields a mask that user programs can use to figure out what
+@@ -42,7 +48,8 @@
+
+ #ifdef CONFIG_COMPAT
+ #define COMPAT_ELF_HWCAP (compat_elf_hwcap)
+-extern unsigned int compat_elf_hwcap;
++#define COMPAT_ELF_HWCAP2 (compat_elf_hwcap2)
++extern unsigned int compat_elf_hwcap, compat_elf_hwcap2;
+ #endif
+
+ extern unsigned long elf_hwcap;
+diff -Nur linux-3.14.36/arch/arm64/include/asm/hwcap.h.orig linux-openelec/arch/arm64/include/asm/hwcap.h.orig
+--- linux-3.14.36/arch/arm64/include/asm/hwcap.h.orig 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/arch/arm64/include/asm/hwcap.h.orig 2015-05-06 12:05:43.000000000 -0500
+@@ -0,0 +1,56 @@
++/*
++ * Copyright (C) 2012 ARM Ltd.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program. If not, see <http://www.gnu.org/licenses/>.
++ */
++#ifndef __ASM_HWCAP_H
++#define __ASM_HWCAP_H
++
++#include <uapi/asm/hwcap.h>
++
++#define COMPAT_HWCAP_HALF (1 << 1)
++#define COMPAT_HWCAP_THUMB (1 << 2)
++#define COMPAT_HWCAP_FAST_MULT (1 << 4)
++#define COMPAT_HWCAP_VFP (1 << 6)
++#define COMPAT_HWCAP_EDSP (1 << 7)
++#define COMPAT_HWCAP_NEON (1 << 12)
++#define COMPAT_HWCAP_VFPv3 (1 << 13)
++#define COMPAT_HWCAP_TLS (1 << 15)
++#define COMPAT_HWCAP_VFPv4 (1 << 16)
++#define COMPAT_HWCAP_IDIVA (1 << 17)
++#define COMPAT_HWCAP_IDIVT (1 << 18)
++#define COMPAT_HWCAP_IDIV (COMPAT_HWCAP_IDIVA|COMPAT_HWCAP_IDIVT)
++#define COMPAT_HWCAP_EVTSTRM (1 << 21)
++
++#define COMPAT_HWCAP2_AES (1 << 0)
++#define COMPAT_HWCAP2_PMULL (1 << 1)
++#define COMPAT_HWCAP2_SHA1 (1 << 2)
++#define COMPAT_HWCAP2_SHA2 (1 << 3)
++#define COMPAT_HWCAP2_CRC32 (1 << 4)
++
++#ifndef __ASSEMBLY__
++/*
++ * This yields a mask that user programs can use to figure out what
++ * instruction set this cpu supports.
++ */
++#define ELF_HWCAP (elf_hwcap)
++
++#ifdef CONFIG_COMPAT
++#define COMPAT_ELF_HWCAP (compat_elf_hwcap)
++#define COMPAT_ELF_HWCAP2 (compat_elf_hwcap2)
++extern unsigned int compat_elf_hwcap, compat_elf_hwcap2;
++#endif
++
++extern unsigned long elf_hwcap;
++#endif
++#endif
+diff -Nur linux-3.14.36/arch/arm64/include/asm/insn.h linux-openelec/arch/arm64/include/asm/insn.h
+--- linux-3.14.36/arch/arm64/include/asm/insn.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm64/include/asm/insn.h 2015-05-06 12:05:43.000000000 -0500
+@@ -16,11 +16,14 @@
+ */
+ #ifndef __ASM_INSN_H
+ #define __ASM_INSN_H
++
+ #include <linux/types.h>
+
+ /* A64 instructions are always 32 bits. */
+ #define AARCH64_INSN_SIZE 4
+
++#ifndef __ASSEMBLY__
++
+ /*
+ * ARM Architecture Reference Manual for ARMv8 Profile-A, Issue A.a
+ * Section C3.1 "A64 instruction index by encoding":
+@@ -105,4 +108,6 @@
+ int aarch64_insn_patch_text_sync(void *addrs[], u32 insns[], int cnt);
+ int aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt);
+
++#endif /* __ASSEMBLY__ */
++
+ #endif /* __ASM_INSN_H */
+diff -Nur linux-3.14.36/arch/arm64/include/asm/irqflags.h linux-openelec/arch/arm64/include/asm/irqflags.h
+--- linux-3.14.36/arch/arm64/include/asm/irqflags.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm64/include/asm/irqflags.h 2015-05-06 12:05:43.000000000 -0500
+@@ -90,5 +90,28 @@
+ return flags & PSR_I_BIT;
+ }
+
++/*
++ * save and restore debug state
++ */
++#define local_dbg_save(flags) \
++ do { \
++ typecheck(unsigned long, flags); \
++ asm volatile( \
++ "mrs %0, daif // local_dbg_save\n" \
++ "msr daifset, #8" \
++ : "=r" (flags) : : "memory"); \
++ } while (0)
++
++#define local_dbg_restore(flags) \
++ do { \
++ typecheck(unsigned long, flags); \
++ asm volatile( \
++ "msr daif, %0 // local_dbg_restore\n" \
++ : : "r" (flags) : "memory"); \
++ } while (0)
++
++#define local_dbg_enable() asm("msr daifclr, #8" : : : "memory")
++#define local_dbg_disable() asm("msr daifset, #8" : : : "memory")
++
+ #endif
+ #endif
+diff -Nur linux-3.14.36/arch/arm64/include/asm/Kbuild linux-openelec/arch/arm64/include/asm/Kbuild
+--- linux-3.14.36/arch/arm64/include/asm/Kbuild 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm64/include/asm/Kbuild 2015-05-06 12:05:43.000000000 -0500
+@@ -35,6 +35,7 @@
+ generic-y += sembuf.h
+ generic-y += serial.h
+ generic-y += shmbuf.h
++generic-y += simd.h
+ generic-y += sizes.h
+ generic-y += socket.h
+ generic-y += sockios.h
+diff -Nur linux-3.14.36/arch/arm64/include/asm/kgdb.h linux-openelec/arch/arm64/include/asm/kgdb.h
+--- linux-3.14.36/arch/arm64/include/asm/kgdb.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/arch/arm64/include/asm/kgdb.h 2015-05-06 12:05:43.000000000 -0500
+@@ -0,0 +1,84 @@
++/*
++ * AArch64 KGDB support
++ *
++ * Based on arch/arm/include/kgdb.h
++ *
++ * Copyright (C) 2013 Cavium Inc.
++ * Author: Vijaya Kumar K <vijaya.kumar@caviumnetworks.com>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program. If not, see <http://www.gnu.org/licenses/>.
++ */
++
++#ifndef __ARM_KGDB_H
++#define __ARM_KGDB_H
++
++#include <linux/ptrace.h>
++#include <asm/debug-monitors.h>
++
++#ifndef __ASSEMBLY__
++
++static inline void arch_kgdb_breakpoint(void)
++{
++ asm ("brk %0" : : "I" (KDBG_COMPILED_DBG_BRK_IMM));
++}
++
++extern void kgdb_handle_bus_error(void);
++extern int kgdb_fault_expected;
++
++#endif /* !__ASSEMBLY__ */
++
++/*
++ * gdb is expecting the following registers layout.
++ *
++ * General purpose regs:
++ * r0-r30: 64 bit
++ * sp,pc : 64 bit
++ * pstate : 64 bit
++ * Total: 34
++ * FPU regs:
++ * f0-f31: 128 bit
++ * Total: 32
++ * Extra regs
++ * fpsr & fpcr: 32 bit
++ * Total: 2
++ *
++ */
++
++#define _GP_REGS 34
++#define _FP_REGS 32
++#define _EXTRA_REGS 2
++/*
++ * general purpose registers size in bytes.
++ * pstate is only 4 bytes. subtract 4 bytes
++ */
++#define GP_REG_BYTES (_GP_REGS * 8)
++#define DBG_MAX_REG_NUM (_GP_REGS + _FP_REGS + _EXTRA_REGS)
++
++/*
++ * Size of I/O buffer for gdb packet.
++ * considering to hold all register contents, size is set
++ */
++
++#define BUFMAX 2048
++
++/*
++ * Number of bytes required for gdb_regs buffer.
++ * _GP_REGS: 8 bytes, _FP_REGS: 16 bytes and _EXTRA_REGS: 4 bytes each
++ * GDB fails to connect for size beyond this with error
++ * "'g' packet reply is too long"
++ */
++
++#define NUMREGBYTES ((_GP_REGS * 8) + (_FP_REGS * 16) + \
++ (_EXTRA_REGS * 4))
++
++#endif /* __ASM_KGDB_H */
+diff -Nur linux-3.14.36/arch/arm64/include/asm/page.h linux-openelec/arch/arm64/include/asm/page.h
+--- linux-3.14.36/arch/arm64/include/asm/page.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm64/include/asm/page.h 2015-05-06 12:05:43.000000000 -0500
+@@ -31,6 +31,15 @@
+ /* We do define AT_SYSINFO_EHDR but don't use the gate mechanism */
+ #define __HAVE_ARCH_GATE_AREA 1
+
++/*
++ * The idmap and swapper page tables need some space reserved in the kernel
++ * image. The idmap only requires a pgd and a next level table to (section) map
++ * the kernel, while the swapper also maps the FDT and requires an additional
++ * table to map an early UART. See __create_page_tables for more information.
++ */
++#define SWAPPER_DIR_SIZE (3 * PAGE_SIZE)
++#define IDMAP_DIR_SIZE (2 * PAGE_SIZE)
++
+ #ifndef __ASSEMBLY__
+
+ #ifdef CONFIG_ARM64_64K_PAGES
+diff -Nur linux-3.14.36/arch/arm64/include/asm/pgtable.h linux-openelec/arch/arm64/include/asm/pgtable.h
+--- linux-3.14.36/arch/arm64/include/asm/pgtable.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm64/include/asm/pgtable.h 2015-05-06 12:05:43.000000000 -0500
+@@ -227,36 +227,36 @@
+
+ #define __HAVE_ARCH_PTE_SPECIAL
+
+-/*
+- * Software PMD bits for THP
+- */
++static inline pte_t pmd_pte(pmd_t pmd)
++{
++ return __pte(pmd_val(pmd));
++}
+
+-#define PMD_SECT_DIRTY (_AT(pmdval_t, 1) << 55)
+-#define PMD_SECT_SPLITTING (_AT(pmdval_t, 1) << 57)
++static inline pmd_t pte_pmd(pte_t pte)
++{
++ return __pmd(pte_val(pte));
++}
+
+ /*
+ * THP definitions.
+ */
+-#define pmd_young(pmd) (pmd_val(pmd) & PMD_SECT_AF)
+-
+-#define __HAVE_ARCH_PMD_WRITE
+-#define pmd_write(pmd) (!(pmd_val(pmd) & PMD_SECT_RDONLY))
+
+ #ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ #define pmd_trans_huge(pmd) (pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT))
+-#define pmd_trans_splitting(pmd) (pmd_val(pmd) & PMD_SECT_SPLITTING)
++#define pmd_trans_splitting(pmd) pte_special(pmd_pte(pmd))
+ #endif
+
+-#define PMD_BIT_FUNC(fn,op) \
+-static inline pmd_t pmd_##fn(pmd_t pmd) { pmd_val(pmd) op; return pmd; }
++#define pmd_young(pmd) pte_young(pmd_pte(pmd))
++#define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd)))
++#define pmd_mksplitting(pmd) pte_pmd(pte_mkspecial(pmd_pte(pmd)))
++#define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd)))
++#define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd)))
++#define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd)))
++#define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd)))
++#define pmd_mknotpresent(pmd) (__pmd(pmd_val(pmd) &= ~PMD_TYPE_MASK))
+
+-PMD_BIT_FUNC(wrprotect, |= PMD_SECT_RDONLY);
+-PMD_BIT_FUNC(mkold, &= ~PMD_SECT_AF);
+-PMD_BIT_FUNC(mksplitting, |= PMD_SECT_SPLITTING);
+-PMD_BIT_FUNC(mkwrite, &= ~PMD_SECT_RDONLY);
+-PMD_BIT_FUNC(mkdirty, |= PMD_SECT_DIRTY);
+-PMD_BIT_FUNC(mkyoung, |= PMD_SECT_AF);
+-PMD_BIT_FUNC(mknotpresent, &= ~PMD_TYPE_MASK);
++#define __HAVE_ARCH_PMD_WRITE
++#define pmd_write(pmd) pte_write(pmd_pte(pmd))
+
+ #define pmd_mkhuge(pmd) (__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT))
+
+@@ -266,16 +266,7 @@
+
+ #define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK))
+
+-static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
+-{
+- const pmdval_t mask = PMD_SECT_USER | PMD_SECT_PXN | PMD_SECT_UXN |
+- PMD_SECT_RDONLY | PMD_SECT_PROT_NONE |
+- PMD_SECT_VALID;
+- pmd_val(pmd) = (pmd_val(pmd) & ~mask) | (pgprot_val(newprot) & mask);
+- return pmd;
+-}
+-
+-#define set_pmd_at(mm, addr, pmdp, pmd) set_pmd(pmdp, pmd)
++#define set_pmd_at(mm, addr, pmdp, pmd) set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd))
+
+ static inline int has_transparent_hugepage(void)
+ {
+@@ -383,12 +374,14 @@
+ return pte;
+ }
+
++static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
++{
++ return pte_pmd(pte_modify(pmd_pte(pmd), newprot));
++}
++
+ extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
+ extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
+
+-#define SWAPPER_DIR_SIZE (3 * PAGE_SIZE)
+-#define IDMAP_DIR_SIZE (2 * PAGE_SIZE)
+-
+ /*
+ * Encode and decode a swap entry:
+ * bits 0-1: present (must be zero)
+diff -Nur linux-3.14.36/arch/arm64/include/asm/ptrace.h linux-openelec/arch/arm64/include/asm/ptrace.h
+--- linux-3.14.36/arch/arm64/include/asm/ptrace.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm64/include/asm/ptrace.h 2015-05-06 12:05:43.000000000 -0500
+@@ -68,6 +68,7 @@
+
+ /* Architecturally defined mapping between AArch32 and AArch64 registers */
+ #define compat_usr(x) regs[(x)]
++#define compat_fp regs[11]
+ #define compat_sp regs[13]
+ #define compat_lr regs[14]
+ #define compat_sp_hyp regs[15]
+@@ -132,7 +133,12 @@
+ (!((regs)->pstate & PSR_F_BIT))
+
+ #define user_stack_pointer(regs) \
+- ((regs)->sp)
++ (!compat_user_mode(regs)) ? ((regs)->sp) : ((regs)->compat_sp)
++
++static inline unsigned long regs_return_value(struct pt_regs *regs)
++{
++ return regs->regs[0];
++}
+
+ /*
+ * Are the current registers suitable for user mode? (used to maintain
+@@ -164,7 +170,7 @@
+ return 0;
+ }
+
+-#define instruction_pointer(regs) (regs)->pc
++#define instruction_pointer(regs) ((unsigned long)(regs)->pc)
+
+ #ifdef CONFIG_SMP
+ extern unsigned long profile_pc(struct pt_regs *regs);
+diff -Nur linux-3.14.36/arch/arm64/include/asm/syscall.h linux-openelec/arch/arm64/include/asm/syscall.h
+--- linux-3.14.36/arch/arm64/include/asm/syscall.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm64/include/asm/syscall.h 2015-05-06 12:05:43.000000000 -0500
+@@ -18,6 +18,7 @@
+
+ #include <linux/err.h>
+
++extern const void *sys_call_table[];
+
+ static inline int syscall_get_nr(struct task_struct *task,
+ struct pt_regs *regs)
+diff -Nur linux-3.14.36/arch/arm64/include/asm/thread_info.h linux-openelec/arch/arm64/include/asm/thread_info.h
+--- linux-3.14.36/arch/arm64/include/asm/thread_info.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm64/include/asm/thread_info.h 2015-05-06 12:05:43.000000000 -0500
+@@ -91,6 +91,9 @@
+ /*
+ * thread information flags:
+ * TIF_SYSCALL_TRACE - syscall trace active
++ * TIF_SYSCALL_TRACEPOINT - syscall tracepoint for ftrace
++ * TIF_SYSCALL_AUDIT - syscall auditing
++ * TIF_SECOMP - syscall secure computing
+ * TIF_SIGPENDING - signal pending
+ * TIF_NEED_RESCHED - rescheduling necessary
+ * TIF_NOTIFY_RESUME - callback before returning to user
+@@ -101,6 +104,9 @@
+ #define TIF_NEED_RESCHED 1
+ #define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
+ #define TIF_SYSCALL_TRACE 8
++#define TIF_SYSCALL_AUDIT 9
++#define TIF_SYSCALL_TRACEPOINT 10
++#define TIF_SECCOMP 11
+ #define TIF_POLLING_NRFLAG 16
+ #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
+ #define TIF_FREEZE 19
+@@ -112,10 +118,17 @@
+ #define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
+ #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
+ #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
++#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
++#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
++#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
++#define _TIF_SECCOMP (1 << TIF_SECCOMP)
+ #define _TIF_32BIT (1 << TIF_32BIT)
+
+ #define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
+ _TIF_NOTIFY_RESUME)
+
++#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
++ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
++
+ #endif /* __KERNEL__ */
+ #endif /* __ASM_THREAD_INFO_H */
+diff -Nur linux-3.14.36/arch/arm64/include/asm/topology.h linux-openelec/arch/arm64/include/asm/topology.h
+--- linux-3.14.36/arch/arm64/include/asm/topology.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/arch/arm64/include/asm/topology.h 2015-05-06 12:05:43.000000000 -0500
+@@ -0,0 +1,70 @@
++#ifndef __ASM_TOPOLOGY_H
++#define __ASM_TOPOLOGY_H
++
++#ifdef CONFIG_SMP
++
++#include <linux/cpumask.h>
++
++struct cpu_topology {
++ int thread_id;
++ int core_id;
++ int cluster_id;
++ cpumask_t thread_sibling;
++ cpumask_t core_sibling;
++};
++
++extern struct cpu_topology cpu_topology[NR_CPUS];
++
++#define topology_physical_package_id(cpu) (cpu_topology[cpu].cluster_id)
++#define topology_core_id(cpu) (cpu_topology[cpu].core_id)
++#define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_sibling)
++#define topology_thread_cpumask(cpu) (&cpu_topology[cpu].thread_sibling)
++
++#define mc_capable() (cpu_topology[0].cluster_id != -1)
++#define smt_capable() (cpu_topology[0].thread_id != -1)
++
++void init_cpu_topology(void);
++void store_cpu_topology(unsigned int cpuid);
++const struct cpumask *cpu_coregroup_mask(int cpu);
++
++#ifdef CONFIG_DISABLE_CPU_SCHED_DOMAIN_BALANCE
++/* Common values for CPUs */
++#ifndef SD_CPU_INIT
++#define SD_CPU_INIT (struct sched_domain) { \
++ .min_interval = 1, \
++ .max_interval = 4, \
++ .busy_factor = 64, \
++ .imbalance_pct = 125, \
++ .cache_nice_tries = 1, \
++ .busy_idx = 2, \
++ .idle_idx = 1, \
++ .newidle_idx = 0, \
++ .wake_idx = 0, \
++ .forkexec_idx = 0, \
++ \
++ .flags = 0*SD_LOAD_BALANCE \
++ | 1*SD_BALANCE_NEWIDLE \
++ | 1*SD_BALANCE_EXEC \
++ | 1*SD_BALANCE_FORK \
++ | 0*SD_BALANCE_WAKE \
++ | 1*SD_WAKE_AFFINE \
++ | 0*SD_SHARE_CPUPOWER \
++ | 0*SD_SHARE_PKG_RESOURCES \
++ | 0*SD_SERIALIZE \
++ , \
++ .last_balance = jiffies, \
++ .balance_interval = 1, \
++}
++#endif
++#endif /* CONFIG_DISABLE_CPU_SCHED_DOMAIN_BALANCE */
++
++#else
++
++static inline void init_cpu_topology(void) { }
++static inline void store_cpu_topology(unsigned int cpuid) { }
++
++#endif
++
++#include <asm-generic/topology.h>
++
++#endif /* _ASM_ARM_TOPOLOGY_H */
+diff -Nur linux-3.14.36/arch/arm64/include/asm/unistd.h linux-openelec/arch/arm64/include/asm/unistd.h
+--- linux-3.14.36/arch/arm64/include/asm/unistd.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm64/include/asm/unistd.h 2015-05-06 12:05:43.000000000 -0500
+@@ -28,3 +28,5 @@
+ #endif
+ #define __ARCH_WANT_SYS_CLONE
+ #include <uapi/asm/unistd.h>
++
++#define NR_syscalls (__NR_syscalls)
+diff -Nur linux-3.14.36/arch/arm64/include/uapi/asm/Kbuild linux-openelec/arch/arm64/include/uapi/asm/Kbuild
+--- linux-3.14.36/arch/arm64/include/uapi/asm/Kbuild 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm64/include/uapi/asm/Kbuild 2015-05-06 12:05:43.000000000 -0500
+@@ -9,6 +9,7 @@
+ header-y += fcntl.h
+ header-y += hwcap.h
+ header-y += kvm_para.h
++header-y += perf_regs.h
+ header-y += param.h
+ header-y += ptrace.h
+ header-y += setup.h
+diff -Nur linux-3.14.36/arch/arm64/include/uapi/asm/perf_regs.h linux-openelec/arch/arm64/include/uapi/asm/perf_regs.h
+--- linux-3.14.36/arch/arm64/include/uapi/asm/perf_regs.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/arch/arm64/include/uapi/asm/perf_regs.h 2015-05-06 12:05:43.000000000 -0500
+@@ -0,0 +1,40 @@
++#ifndef _ASM_ARM64_PERF_REGS_H
++#define _ASM_ARM64_PERF_REGS_H
++
++enum perf_event_arm_regs {
++ PERF_REG_ARM64_X0,
++ PERF_REG_ARM64_X1,
++ PERF_REG_ARM64_X2,
++ PERF_REG_ARM64_X3,
++ PERF_REG_ARM64_X4,
++ PERF_REG_ARM64_X5,
++ PERF_REG_ARM64_X6,
++ PERF_REG_ARM64_X7,
++ PERF_REG_ARM64_X8,
++ PERF_REG_ARM64_X9,
++ PERF_REG_ARM64_X10,
++ PERF_REG_ARM64_X11,
++ PERF_REG_ARM64_X12,
++ PERF_REG_ARM64_X13,
++ PERF_REG_ARM64_X14,
++ PERF_REG_ARM64_X15,
++ PERF_REG_ARM64_X16,
++ PERF_REG_ARM64_X17,
++ PERF_REG_ARM64_X18,
++ PERF_REG_ARM64_X19,
++ PERF_REG_ARM64_X20,
++ PERF_REG_ARM64_X21,
++ PERF_REG_ARM64_X22,
++ PERF_REG_ARM64_X23,
++ PERF_REG_ARM64_X24,
++ PERF_REG_ARM64_X25,
++ PERF_REG_ARM64_X26,
++ PERF_REG_ARM64_X27,
++ PERF_REG_ARM64_X28,
++ PERF_REG_ARM64_X29,
++ PERF_REG_ARM64_LR,
++ PERF_REG_ARM64_SP,
++ PERF_REG_ARM64_PC,
++ PERF_REG_ARM64_MAX,
++};
++#endif /* _ASM_ARM64_PERF_REGS_H */
+diff -Nur linux-3.14.36/arch/arm64/Kconfig linux-openelec/arch/arm64/Kconfig
+--- linux-3.14.36/arch/arm64/Kconfig 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm64/Kconfig 2015-05-06 12:05:43.000000000 -0500
+@@ -4,6 +4,7 @@
+ select ARCH_USE_CMPXCHG_LOCKREF
+ select ARCH_SUPPORTS_ATOMIC_RMW
+ select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
++ select ARCH_HAS_OPP
+ select ARCH_WANT_OPTIONAL_GPIOLIB
+ select ARCH_WANT_COMPAT_IPC_PARSE_VERSION
+ select ARCH_WANT_FRAME_POINTERS
+@@ -17,6 +18,7 @@
+ select DCACHE_WORD_ACCESS
+ select GENERIC_CLOCKEVENTS
+ select GENERIC_CLOCKEVENTS_BROADCAST if SMP
++ select GENERIC_CPU_AUTOPROBE
+ select GENERIC_IOMAP
+ select GENERIC_IRQ_PROBE
+ select GENERIC_IRQ_SHOW
+@@ -27,18 +29,27 @@
+ select GENERIC_TIME_VSYSCALL
+ select HARDIRQS_SW_RESEND
+ select HAVE_ARCH_JUMP_LABEL
++ select HAVE_ARCH_KGDB
+ select HAVE_ARCH_TRACEHOOK
++ select HAVE_C_RECORDMCOUNT
+ select HAVE_DEBUG_BUGVERBOSE
+ select HAVE_DEBUG_KMEMLEAK
+ select HAVE_DMA_API_DEBUG
+ select HAVE_DMA_ATTRS
+ select HAVE_DMA_CONTIGUOUS
+ select HAVE_EFFICIENT_UNALIGNED_ACCESS
++ select HAVE_DYNAMIC_FTRACE
++ select HAVE_FTRACE_MCOUNT_RECORD
++ select HAVE_FUNCTION_TRACER
++ select HAVE_FUNCTION_GRAPH_TRACER
+ select HAVE_GENERIC_DMA_COHERENT
+ select HAVE_HW_BREAKPOINT if PERF_EVENTS
+ select HAVE_MEMBLOCK
+ select HAVE_PATA_PLATFORM
+ select HAVE_PERF_EVENTS
++ select HAVE_PERF_REGS
++ select HAVE_PERF_USER_STACK_DUMP
++ select HAVE_SYSCALL_TRACEPOINTS
+ select IRQ_DOMAIN
+ select MODULES_USE_ELF_RELA
+ select NO_BOOTMEM
+@@ -86,7 +97,7 @@
+ config GENERIC_CALIBRATE_DELAY
+ def_bool y
+
+-config ZONE_DMA32
++config ZONE_DMA
+ def_bool y
+
+ config ARCH_DMA_ADDR_T_64BIT
+@@ -165,6 +176,134 @@
+
+ If you don't know what to do here, say N.
+
++config SCHED_MC
++ bool "Multi-core scheduler support"
++ depends on SMP
++ help
++ Multi-core scheduler support improves the CPU scheduler's decision
++ making when dealing with multi-core CPU chips at a cost of slightly
++ increased overhead in some places. If unsure say N here.
++
++config SCHED_SMT
++ bool "SMT scheduler support"
++ depends on SMP
++ help
++ Improves the CPU scheduler's decision making when dealing with
++ MultiThreading at a cost of slightly increased overhead in some
++ places. If unsure say N here.
++
++config SCHED_MC
++ bool "Multi-core scheduler support"
++ depends on ARM_CPU_TOPOLOGY
++ help
++ Multi-core scheduler support improves the CPU scheduler's decision
++ making when dealing with multi-core CPU chips at a cost of slightly
++ increased overhead in some places. If unsure say N here.
++
++config SCHED_SMT
++ bool "SMT scheduler support"
++ depends on ARM_CPU_TOPOLOGY
++ help
++ Improves the CPU scheduler's decision making when dealing with
++ MultiThreading at a cost of slightly increased overhead in some
++ places. If unsure say N here.
++
++config DISABLE_CPU_SCHED_DOMAIN_BALANCE
++ bool "(EXPERIMENTAL) Disable CPU level scheduler load-balancing"
++ help
++ Disables scheduler load-balancing at CPU sched domain level.
++
++config SCHED_HMP
++ bool "(EXPERIMENTAL) Heterogenous multiprocessor scheduling"
++ depends on DISABLE_CPU_SCHED_DOMAIN_BALANCE && SCHED_MC && FAIR_GROUP_SCHED && !SCHED_AUTOGROUP
++ help
++ Experimental scheduler optimizations for heterogeneous platforms.
++ Attempts to introspectively select task affinity to optimize power
++ and performance. Basic support for multiple (>2) cpu types is in place,
++ but it has only been tested with two types of cpus.
++ There is currently no support for migration of task groups, hence
++ !SCHED_AUTOGROUP. Furthermore, normal load-balancing must be disabled
++ between cpus of different type (DISABLE_CPU_SCHED_DOMAIN_BALANCE).
++
++config SCHED_HMP_PRIO_FILTER
++ bool "(EXPERIMENTAL) Filter HMP migrations by task priority"
++ depends on SCHED_HMP
++ help
++ Enables task priority based HMP migration filter. Any task with
++ a NICE value above the threshold will always be on low-power cpus
++ with less compute capacity.
++
++config SCHED_HMP_PRIO_FILTER_VAL
++ int "NICE priority threshold"
++ default 5
++ depends on SCHED_HMP_PRIO_FILTER
++
++config HMP_FAST_CPU_MASK
++ string "HMP scheduler fast CPU mask"
++ depends on SCHED_HMP
++ help
++ Leave empty to use device tree information.
++ Specify the cpuids of the fast CPUs in the system as a list string,
++ e.g. cpuid 0+1 should be specified as 0-1.
++
++config HMP_SLOW_CPU_MASK
++ string "HMP scheduler slow CPU mask"
++ depends on SCHED_HMP
++ help
++ Leave empty to use device tree information.
++ Specify the cpuids of the slow CPUs in the system as a list string,
++ e.g. cpuid 0+1 should be specified as 0-1.
++
++config HMP_VARIABLE_SCALE
++ bool "Allows changing the load tracking scale through sysfs"
++ depends on SCHED_HMP
++ help
++ When turned on, this option exports the thresholds and load average
++ period value for the load tracking patches through sysfs.
++ The values can be modified to change the rate of load accumulation
++ and the thresholds used for HMP migration.
++ The load_avg_period_ms is the time in ms to reach a load average of
++ 0.5 for an idle task of 0 load average ratio that start a busy loop.
++ The up_threshold and down_threshold is the value to go to a faster
++ CPU or to go back to a slower cpu.
++ The {up,down}_threshold are devided by 1024 before being compared
++ to the load average.
++ For examples, with load_avg_period_ms = 128 and up_threshold = 512,
++ a running task with a load of 0 will be migrated to a bigger CPU after
++ 128ms, because after 128ms its load_avg_ratio is 0.5 and the real
++ up_threshold is 0.5.
++ This patch has the same behavior as changing the Y of the load
++ average computation to
++ (1002/1024)^(LOAD_AVG_PERIOD/load_avg_period_ms)
++ but it remove intermadiate overflows in computation.
++
++config HMP_FREQUENCY_INVARIANT_SCALE
++ bool "(EXPERIMENTAL) Frequency-Invariant Tracked Load for HMP"
++ depends on HMP_VARIABLE_SCALE && CPU_FREQ
++ help
++ Scales the current load contribution in line with the frequency
++ of the CPU that the task was executed on.
++ In this version, we use a simple linear scale derived from the
++ maximum frequency reported by CPUFreq.
++ Restricting tracked load to be scaled by the CPU's frequency
++ represents the consumption of possible compute capacity
++ (rather than consumption of actual instantaneous capacity as
++ normal) and allows the HMP migration's simple threshold
++ migration strategy to interact more predictably with CPUFreq's
++ asynchronous compute capacity changes.
++
++config SCHED_HMP_LITTLE_PACKING
++ bool "Small task packing for HMP"
++ depends on SCHED_HMP
++ default n
++ help
++ Allows the HMP Scheduler to pack small tasks into CPUs in the
++ smallest HMP domain.
++ Controlled by two sysfs files in sys/kernel/hmp.
++ packing_enable: 1 to enable, 0 to disable packing. Default 1.
++ packing_limit: runqueue load ratio where a RQ is considered
++ to be full. Default is NICE_0_LOAD * 9/8.
++
+ config NR_CPUS
+ int "Maximum number of CPUs (2-32)"
+ range 2 32
+@@ -317,5 +456,8 @@
+ source "security/Kconfig"
+
+ source "crypto/Kconfig"
++if CRYPTO
++source "arch/arm64/crypto/Kconfig"
++endif
+
+ source "lib/Kconfig"
+diff -Nur linux-3.14.36/arch/arm64/kernel/arm64ksyms.c linux-openelec/arch/arm64/kernel/arm64ksyms.c
+--- linux-3.14.36/arch/arm64/kernel/arm64ksyms.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm64/kernel/arm64ksyms.c 2015-05-06 12:05:43.000000000 -0500
+@@ -56,3 +56,7 @@
+ EXPORT_SYMBOL(test_and_clear_bit);
+ EXPORT_SYMBOL(change_bit);
+ EXPORT_SYMBOL(test_and_change_bit);
++
++#ifdef CONFIG_FUNCTION_TRACER
++EXPORT_SYMBOL(_mcount);
++#endif
+diff -Nur linux-3.14.36/arch/arm64/kernel/debug-monitors.c linux-openelec/arch/arm64/kernel/debug-monitors.c
+--- linux-3.14.36/arch/arm64/kernel/debug-monitors.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm64/kernel/debug-monitors.c 2015-05-06 12:05:43.000000000 -0500
+@@ -138,6 +138,7 @@
+ {
+ asm volatile("msr oslar_el1, %0" : : "r" (0));
+ isb();
++ local_dbg_enable();
+ }
+
+ static int os_lock_notify(struct notifier_block *self,
+@@ -314,9 +315,6 @@
+ if (call_break_hook(regs, esr) == DBG_HOOK_HANDLED)
+ return 0;
+
+- pr_warn("unexpected brk exception at %lx, esr=0x%x\n",
+- (long)instruction_pointer(regs), esr);
+-
+ if (!user_mode(regs))
+ return -EFAULT;
+
+diff -Nur linux-3.14.36/arch/arm64/kernel/entry-ftrace.S linux-openelec/arch/arm64/kernel/entry-ftrace.S
+--- linux-3.14.36/arch/arm64/kernel/entry-ftrace.S 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/arch/arm64/kernel/entry-ftrace.S 2015-05-06 12:05:43.000000000 -0500
+@@ -0,0 +1,218 @@
++/*
++ * arch/arm64/kernel/entry-ftrace.S
++ *
++ * Copyright (C) 2013 Linaro Limited
++ * Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include <linux/linkage.h>
++#include <asm/ftrace.h>
++#include <asm/insn.h>
++
++/*
++ * Gcc with -pg will put the following code in the beginning of each function:
++ * mov x0, x30
++ * bl _mcount
++ * [function's body ...]
++ * "bl _mcount" may be replaced to "bl ftrace_caller" or NOP if dynamic
++ * ftrace is enabled.
++ *
++ * Please note that x0 as an argument will not be used here because we can
++ * get lr(x30) of instrumented function at any time by winding up call stack
++ * as long as the kernel is compiled without -fomit-frame-pointer.
++ * (or CONFIG_FRAME_POINTER, this is forced on arm64)
++ *
++ * stack layout after mcount_enter in _mcount():
++ *
++ * current sp/fp => 0:+-----+
++ * in _mcount() | x29 | -> instrumented function's fp
++ * +-----+
++ * | x30 | -> _mcount()'s lr (= instrumented function's pc)
++ * old sp => +16:+-----+
++ * when instrumented | |
++ * function calls | ... |
++ * _mcount() | |
++ * | |
++ * instrumented => +xx:+-----+
++ * function's fp | x29 | -> parent's fp
++ * +-----+
++ * | x30 | -> instrumented function's lr (= parent's pc)
++ * +-----+
++ * | ... |
++ */
++
++ .macro mcount_enter
++ stp x29, x30, [sp, #-16]!
++ mov x29, sp
++ .endm
++
++ .macro mcount_exit
++ ldp x29, x30, [sp], #16
++ ret
++ .endm
++
++ .macro mcount_adjust_addr rd, rn
++ sub \rd, \rn, #AARCH64_INSN_SIZE
++ .endm
++
++ /* for instrumented function's parent */
++ .macro mcount_get_parent_fp reg
++ ldr \reg, [x29]
++ ldr \reg, [\reg]
++ .endm
++
++ /* for instrumented function */
++ .macro mcount_get_pc0 reg
++ mcount_adjust_addr \reg, x30
++ .endm
++
++ .macro mcount_get_pc reg
++ ldr \reg, [x29, #8]
++ mcount_adjust_addr \reg, \reg
++ .endm
++
++ .macro mcount_get_lr reg
++ ldr \reg, [x29]
++ ldr \reg, [\reg, #8]
++ mcount_adjust_addr \reg, \reg
++ .endm
++
++ .macro mcount_get_lr_addr reg
++ ldr \reg, [x29]
++ add \reg, \reg, #8
++ .endm
++
++#ifndef CONFIG_DYNAMIC_FTRACE
++/*
++ * void _mcount(unsigned long return_address)
++ * @return_address: return address to instrumented function
++ *
++ * This function makes calls, if enabled, to:
++ * - tracer function to probe instrumented function's entry,
++ * - ftrace_graph_caller to set up an exit hook
++ */
++ENTRY(_mcount)
++#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
++ ldr x0, =ftrace_trace_stop
++ ldr x0, [x0] // if ftrace_trace_stop
++ ret // return;
++#endif
++ mcount_enter
++
++ ldr x0, =ftrace_trace_function
++ ldr x2, [x0]
++ adr x0, ftrace_stub
++ cmp x0, x2 // if (ftrace_trace_function
++ b.eq skip_ftrace_call // != ftrace_stub) {
++
++ mcount_get_pc x0 // function's pc
++ mcount_get_lr x1 // function's lr (= parent's pc)
++ blr x2 // (*ftrace_trace_function)(pc, lr);
++
++#ifndef CONFIG_FUNCTION_GRAPH_TRACER
++skip_ftrace_call: // return;
++ mcount_exit // }
++#else
++ mcount_exit // return;
++ // }
++skip_ftrace_call:
++ ldr x1, =ftrace_graph_return
++ ldr x2, [x1] // if ((ftrace_graph_return
++ cmp x0, x2 // != ftrace_stub)
++ b.ne ftrace_graph_caller
++
++ ldr x1, =ftrace_graph_entry // || (ftrace_graph_entry
++ ldr x2, [x1] // != ftrace_graph_entry_stub))
++ ldr x0, =ftrace_graph_entry_stub
++ cmp x0, x2
++ b.ne ftrace_graph_caller // ftrace_graph_caller();
++
++ mcount_exit
++#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
++ENDPROC(_mcount)
++
++#else /* CONFIG_DYNAMIC_FTRACE */
++/*
++ * _mcount() is used to build the kernel with -pg option, but all the branch
++ * instructions to _mcount() are replaced to NOP initially at kernel start up,
++ * and later on, NOP to branch to ftrace_caller() when enabled or branch to
++ * NOP when disabled per-function base.
++ */
++ENTRY(_mcount)
++ ret
++ENDPROC(_mcount)
++
++/*
++ * void ftrace_caller(unsigned long return_address)
++ * @return_address: return address to instrumented function
++ *
++ * This function is a counterpart of _mcount() in 'static' ftrace, and
++ * makes calls to:
++ * - tracer function to probe instrumented function's entry,
++ * - ftrace_graph_caller to set up an exit hook
++ */
++ENTRY(ftrace_caller)
++ mcount_enter
++
++ mcount_get_pc0 x0 // function's pc
++ mcount_get_lr x1 // function's lr
++
++ .global ftrace_call
++ftrace_call: // tracer(pc, lr);
++ nop // This will be replaced with "bl xxx"
++ // where xxx can be any kind of tracer.
++
++#ifdef CONFIG_FUNCTION_GRAPH_TRACER
++ .global ftrace_graph_call
++ftrace_graph_call: // ftrace_graph_caller();
++ nop // If enabled, this will be replaced
++ // "b ftrace_graph_caller"
++#endif
++
++ mcount_exit
++ENDPROC(ftrace_caller)
++#endif /* CONFIG_DYNAMIC_FTRACE */
++
++ENTRY(ftrace_stub)
++ ret
++ENDPROC(ftrace_stub)
++
++#ifdef CONFIG_FUNCTION_GRAPH_TRACER
++/*
++ * void ftrace_graph_caller(void)
++ *
++ * Called from _mcount() or ftrace_caller() when function_graph tracer is
++ * selected.
++ * This function w/ prepare_ftrace_return() fakes link register's value on
++ * the call stack in order to intercept instrumented function's return path
++ * and run return_to_handler() later on its exit.
++ */
++ENTRY(ftrace_graph_caller)
++ mcount_get_lr_addr x0 // pointer to function's saved lr
++ mcount_get_pc x1 // function's pc
++ mcount_get_parent_fp x2 // parent's fp
++ bl prepare_ftrace_return // prepare_ftrace_return(&lr, pc, fp)
++
++ mcount_exit
++ENDPROC(ftrace_graph_caller)
++
++/*
++ * void return_to_handler(void)
++ *
++ * Run ftrace_return_to_handler() before going back to parent.
++ * @fp is checked against the value passed by ftrace_graph_caller()
++ * only when CONFIG_FUNCTION_GRAPH_FP_TEST is enabled.
++ */
++ENTRY(return_to_handler)
++ str x0, [sp, #-16]!
++ mov x0, x29 // parent's fp
++ bl ftrace_return_to_handler// addr = ftrace_return_to_hander(fp);
++ mov x30, x0 // restore the original return address
++ ldr x0, [sp], #16
++ ret
++END(return_to_handler)
++#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+diff -Nur linux-3.14.36/arch/arm64/kernel/entry.S linux-openelec/arch/arm64/kernel/entry.S
+--- linux-3.14.36/arch/arm64/kernel/entry.S 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm64/kernel/entry.S 2015-05-06 12:05:43.000000000 -0500
+@@ -630,8 +630,9 @@
+ enable_irq
+
+ get_thread_info tsk
+- ldr x16, [tsk, #TI_FLAGS] // check for syscall tracing
+- tbnz x16, #TIF_SYSCALL_TRACE, __sys_trace // are we tracing syscalls?
++ ldr x16, [tsk, #TI_FLAGS] // check for syscall hooks
++ tst x16, #_TIF_SYSCALL_WORK
++ b.ne __sys_trace
+ adr lr, ret_fast_syscall // return address
+ cmp scno, sc_nr // check upper syscall limit
+ b.hs ni_sys
+@@ -647,9 +648,8 @@
+ * switches, and waiting for our parent to respond.
+ */
+ __sys_trace:
+- mov x1, sp
+- mov w0, #0 // trace entry
+- bl syscall_trace
++ mov x0, sp
++ bl syscall_trace_enter
+ adr lr, __sys_trace_return // return address
+ uxtw scno, w0 // syscall number (possibly new)
+ mov x1, sp // pointer to regs
+@@ -664,9 +664,8 @@
+
+ __sys_trace_return:
+ str x0, [sp] // save returned x0
+- mov x1, sp
+- mov w0, #1 // trace exit
+- bl syscall_trace
++ mov x0, sp
++ bl syscall_trace_exit
+ b ret_to_user
+
+ /*
+diff -Nur linux-3.14.36/arch/arm64/kernel/ftrace.c linux-openelec/arch/arm64/kernel/ftrace.c
+--- linux-3.14.36/arch/arm64/kernel/ftrace.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/arch/arm64/kernel/ftrace.c 2015-05-06 12:05:43.000000000 -0500
+@@ -0,0 +1,177 @@
++/*
++ * arch/arm64/kernel/ftrace.c
++ *
++ * Copyright (C) 2013 Linaro Limited
++ * Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include <linux/ftrace.h>
++#include <linux/swab.h>
++#include <linux/uaccess.h>
++
++#include <asm/cacheflush.h>
++#include <asm/ftrace.h>
++#include <asm/insn.h>
++
++#ifdef CONFIG_DYNAMIC_FTRACE
++/*
++ * Replace a single instruction, which may be a branch or NOP.
++ * If @validate == true, a replaced instruction is checked against 'old'.
++ */
++static int ftrace_modify_code(unsigned long pc, u32 old, u32 new,
++ bool validate)
++{
++ u32 replaced;
++
++ /*
++ * Note:
++ * Due to modules and __init, code can disappear and change,
++ * we need to protect against faulting as well as code changing.
++ * We do this by aarch64_insn_*() which use the probe_kernel_*().
++ *
++ * No lock is held here because all the modifications are run
++ * through stop_machine().
++ */
++ if (validate) {
++ if (aarch64_insn_read((void *)pc, &replaced))
++ return -EFAULT;
++
++ if (replaced != old)
++ return -EINVAL;
++ }
++ if (aarch64_insn_patch_text_nosync((void *)pc, new))
++ return -EPERM;
++
++ return 0;
++}
++
++/*
++ * Replace tracer function in ftrace_caller()
++ */
++int ftrace_update_ftrace_func(ftrace_func_t func)
++{
++ unsigned long pc;
++ u32 new;
++
++ pc = (unsigned long)&ftrace_call;
++ new = aarch64_insn_gen_branch_imm(pc, (unsigned long)func, true);
++
++ return ftrace_modify_code(pc, 0, new, false);
++}
++
++/*
++ * Turn on the call to ftrace_caller() in instrumented function
++ */
++int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
++{
++ unsigned long pc = rec->ip;
++ u32 old, new;
++
++ old = aarch64_insn_gen_nop();
++ new = aarch64_insn_gen_branch_imm(pc, addr, true);
++
++ return ftrace_modify_code(pc, old, new, true);
++}
++
++/*
++ * Turn off the call to ftrace_caller() in instrumented function
++ */
++int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
++ unsigned long addr)
++{
++ unsigned long pc = rec->ip;
++ u32 old, new;
++
++ old = aarch64_insn_gen_branch_imm(pc, addr, true);
++ new = aarch64_insn_gen_nop();
++
++ return ftrace_modify_code(pc, old, new, true);
++}
++
++int __init ftrace_dyn_arch_init(void *data)
++{
++ *(unsigned long *)data = 0;
++ return 0;
++}
++#endif /* CONFIG_DYNAMIC_FTRACE */
++
++#ifdef CONFIG_FUNCTION_GRAPH_TRACER
++/*
++ * function_graph tracer expects ftrace_return_to_handler() to be called
++ * on the way back to parent. For this purpose, this function is called
++ * in _mcount() or ftrace_caller() to replace return address (*parent) on
++ * the call stack to return_to_handler.
++ *
++ * Note that @frame_pointer is used only for sanity check later.
++ */
++void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
++ unsigned long frame_pointer)
++{
++ unsigned long return_hooker = (unsigned long)&return_to_handler;
++ unsigned long old;
++ struct ftrace_graph_ent trace;
++ int err;
++
++ if (unlikely(atomic_read(&current->tracing_graph_pause)))
++ return;
++
++ /*
++ * Note:
++ * No protection against faulting at *parent, which may be seen
++ * on other archs. It's unlikely on AArch64.
++ */
++ old = *parent;
++ *parent = return_hooker;
++
++ trace.func = self_addr;
++ trace.depth = current->curr_ret_stack + 1;
++
++ /* Only trace if the calling function expects to */
++ if (!ftrace_graph_entry(&trace)) {
++ *parent = old;
++ return;
++ }
++
++ err = ftrace_push_return_trace(old, self_addr, &trace.depth,
++ frame_pointer);
++ if (err == -EBUSY) {
++ *parent = old;
++ return;
++ }
++}
++
++#ifdef CONFIG_DYNAMIC_FTRACE
++/*
++ * Turn on/off the call to ftrace_graph_caller() in ftrace_caller()
++ * depending on @enable.
++ */
++static int ftrace_modify_graph_caller(bool enable)
++{
++ unsigned long pc = (unsigned long)&ftrace_graph_call;
++ u32 branch, nop;
++
++ branch = aarch64_insn_gen_branch_imm(pc,
++ (unsigned long)ftrace_graph_caller, false);
++ nop = aarch64_insn_gen_nop();
++
++ if (enable)
++ return ftrace_modify_code(pc, nop, branch, true);
++ else
++ return ftrace_modify_code(pc, branch, nop, true);
++}
++
++int ftrace_enable_ftrace_graph_caller(void)
++{
++ return ftrace_modify_graph_caller(true);
++}
++
++int ftrace_disable_ftrace_graph_caller(void)
++{
++ return ftrace_modify_graph_caller(false);
++}
++#endif /* CONFIG_DYNAMIC_FTRACE */
++#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+diff -Nur linux-3.14.36/arch/arm64/kernel/head.S linux-openelec/arch/arm64/kernel/head.S
+--- linux-3.14.36/arch/arm64/kernel/head.S 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm64/kernel/head.S 2015-05-06 12:05:43.000000000 -0500
+@@ -26,6 +26,7 @@
+ #include <asm/assembler.h>
+ #include <asm/ptrace.h>
+ #include <asm/asm-offsets.h>
++#include <asm/cache.h>
+ #include <asm/cputype.h>
+ #include <asm/memory.h>
+ #include <asm/thread_info.h>
+@@ -34,29 +35,17 @@
+ #include <asm/page.h>
+ #include <asm/virt.h>
+
+-/*
+- * swapper_pg_dir is the virtual address of the initial page table. We place
+- * the page tables 3 * PAGE_SIZE below KERNEL_RAM_VADDR. The idmap_pg_dir has
+- * 2 pages and is placed below swapper_pg_dir.
+- */
+ #define KERNEL_RAM_VADDR (PAGE_OFFSET + TEXT_OFFSET)
+
+ #if (KERNEL_RAM_VADDR & 0xfffff) != 0x80000
+ #error KERNEL_RAM_VADDR must start at 0xXXX80000
+ #endif
+
+-#define SWAPPER_DIR_SIZE (3 * PAGE_SIZE)
+-#define IDMAP_DIR_SIZE (2 * PAGE_SIZE)
+-
+- .globl swapper_pg_dir
+- .equ swapper_pg_dir, KERNEL_RAM_VADDR - SWAPPER_DIR_SIZE
+-
+- .globl idmap_pg_dir
+- .equ idmap_pg_dir, swapper_pg_dir - IDMAP_DIR_SIZE
+-
+- .macro pgtbl, ttb0, ttb1, phys
+- add \ttb1, \phys, #TEXT_OFFSET - SWAPPER_DIR_SIZE
+- sub \ttb0, \ttb1, #IDMAP_DIR_SIZE
++ .macro pgtbl, ttb0, ttb1, virt_to_phys
++ ldr \ttb1, =swapper_pg_dir
++ ldr \ttb0, =idmap_pg_dir
++ add \ttb1, \ttb1, \virt_to_phys
++ add \ttb0, \ttb0, \virt_to_phys
+ .endm
+
+ #ifdef CONFIG_ARM64_64K_PAGES
+@@ -229,7 +218,11 @@
+ cmp w20, #BOOT_CPU_MODE_EL2
+ b.ne 1f
+ add x1, x1, #4
+-1: str w20, [x1] // This CPU has booted in EL1
++1: dc cvac, x1 // Clean potentially dirty cache line
++ dsb sy
++ str w20, [x1] // This CPU has booted in EL1
++ dc civac, x1 // Clean&invalidate potentially stale cache line
++ dsb sy
+ ret
+ ENDPROC(set_cpu_boot_mode_flag)
+
+@@ -240,8 +233,9 @@
+ * This is not in .bss, because we set it sufficiently early that the boot-time
+ * zeroing of .bss would clobber it.
+ */
+- .pushsection .data
++ .pushsection .data..cacheline_aligned
+ ENTRY(__boot_cpu_mode)
++ .align L1_CACHE_SHIFT
+ .long BOOT_CPU_MODE_EL2
+ .long 0
+ .popsection
+@@ -298,7 +292,7 @@
+ mov x23, x0 // x23=current cpu_table
+ cbz x23, __error_p // invalid processor (x23=0)?
+
+- pgtbl x25, x26, x24 // x25=TTBR0, x26=TTBR1
++ pgtbl x25, x26, x28 // x25=TTBR0, x26=TTBR1
+ ldr x12, [x23, #CPU_INFO_SETUP]
+ add x12, x12, x28 // __virt_to_phys
+ blr x12 // initialise processor
+@@ -340,8 +334,13 @@
+ * x27 = *virtual* address to jump to upon completion
+ *
+ * other registers depend on the function called upon completion
++ *
++ * We align the entire function to the smallest power of two larger than it to
++ * ensure it fits within a single block map entry. Otherwise were PHYS_OFFSET
++ * close to the end of a 512MB or 1GB block we might require an additional
++ * table to map the entire function.
+ */
+- .align 6
++ .align 4
+ __turn_mmu_on:
+ msr sctlr_el1, x0
+ isb
+@@ -384,26 +383,18 @@
+ * Preserves: tbl, flags
+ * Corrupts: phys, start, end, pstate
+ */
+- .macro create_block_map, tbl, flags, phys, start, end, idmap=0
++ .macro create_block_map, tbl, flags, phys, start, end
+ lsr \phys, \phys, #BLOCK_SHIFT
+- .if \idmap
+- and \start, \phys, #PTRS_PER_PTE - 1 // table index
+- .else
+ lsr \start, \start, #BLOCK_SHIFT
+ and \start, \start, #PTRS_PER_PTE - 1 // table index
+- .endif
+ orr \phys, \flags, \phys, lsl #BLOCK_SHIFT // table entry
+- .ifnc \start,\end
+ lsr \end, \end, #BLOCK_SHIFT
+ and \end, \end, #PTRS_PER_PTE - 1 // table end index
+- .endif
+ 9999: str \phys, [\tbl, \start, lsl #3] // store the entry
+- .ifnc \start,\end
+ add \start, \start, #1 // next entry
+ add \phys, \phys, #BLOCK_SIZE // next block
+ cmp \start, \end
+ b.ls 9999b
+- .endif
+ .endm
+
+ /*
+@@ -415,7 +406,16 @@
+ * - UART mapping if CONFIG_EARLY_PRINTK is enabled (TTBR1)
+ */
+ __create_page_tables:
+- pgtbl x25, x26, x24 // idmap_pg_dir and swapper_pg_dir addresses
++ pgtbl x25, x26, x28 // idmap_pg_dir and swapper_pg_dir addresses
++ mov x27, lr
++
++ /*
++ * Invalidate the idmap and swapper page tables to avoid potential
++ * dirty cache lines being evicted.
++ */
++ mov x0, x25
++ add x1, x26, #SWAPPER_DIR_SIZE
++ bl __inval_cache_range
+
+ /*
+ * Clear the idmap and swapper page tables.
+@@ -435,9 +435,13 @@
+ * Create the identity mapping.
+ */
+ add x0, x25, #PAGE_SIZE // section table address
+- adr x3, __turn_mmu_on // virtual/physical address
++ ldr x3, =KERNEL_START
++ add x3, x3, x28 // __pa(KERNEL_START)
+ create_pgd_entry x25, x0, x3, x5, x6
+- create_block_map x0, x7, x3, x5, x5, idmap=1
++ ldr x6, =KERNEL_END
++ mov x5, x3 // __pa(KERNEL_START)
++ add x6, x6, x28 // __pa(KERNEL_END)
++ create_block_map x0, x7, x3, x5, x6
+
+ /*
+ * Map the kernel image (starting with PHYS_OFFSET).
+@@ -445,7 +449,7 @@
+ add x0, x26, #PAGE_SIZE // section table address
+ mov x5, #PAGE_OFFSET
+ create_pgd_entry x26, x0, x5, x3, x6
+- ldr x6, =KERNEL_END - 1
++ ldr x6, =KERNEL_END
+ mov x3, x24 // phys offset
+ create_block_map x0, x7, x3, x5, x6
+
+@@ -474,6 +478,17 @@
+ add x0, x26, #2 * PAGE_SIZE // section table address
+ create_pgd_entry x26, x0, x5, x6, x7
+ #endif
++
++ /*
++ * Since the page tables have been populated with non-cacheable
++ * accesses (MMU disabled), invalidate the idmap and swapper page
++ * tables again to remove any speculatively loaded cache lines.
++ */
++ mov x0, x25
++ add x1, x26, #SWAPPER_DIR_SIZE
++ bl __inval_cache_range
++
++ mov lr, x27
+ ret
+ ENDPROC(__create_page_tables)
+ .ltorg
+@@ -483,7 +498,7 @@
+ __switch_data:
+ .quad __mmap_switched
+ .quad __bss_start // x6
+- .quad _end // x7
++ .quad __bss_stop // x7
+ .quad processor_id // x4
+ .quad __fdt_pointer // x5
+ .quad memstart_addr // x6
+diff -Nur linux-3.14.36/arch/arm64/kernel/hw_breakpoint.c linux-openelec/arch/arm64/kernel/hw_breakpoint.c
+--- linux-3.14.36/arch/arm64/kernel/hw_breakpoint.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm64/kernel/hw_breakpoint.c 2015-05-06 12:05:43.000000000 -0500
+@@ -20,6 +20,7 @@
+
+ #define pr_fmt(fmt) "hw-breakpoint: " fmt
+
++#include <linux/compat.h>
+ #include <linux/cpu_pm.h>
+ #include <linux/errno.h>
+ #include <linux/hw_breakpoint.h>
+@@ -27,7 +28,6 @@
+ #include <linux/ptrace.h>
+ #include <linux/smp.h>
+
+-#include <asm/compat.h>
+ #include <asm/current.h>
+ #include <asm/debug-monitors.h>
+ #include <asm/hw_breakpoint.h>
+diff -Nur linux-3.14.36/arch/arm64/kernel/kgdb.c linux-openelec/arch/arm64/kernel/kgdb.c
+--- linux-3.14.36/arch/arm64/kernel/kgdb.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/arch/arm64/kernel/kgdb.c 2015-05-06 12:05:43.000000000 -0500
+@@ -0,0 +1,336 @@
++/*
++ * AArch64 KGDB support
++ *
++ * Based on arch/arm/kernel/kgdb.c
++ *
++ * Copyright (C) 2013 Cavium Inc.
++ * Author: Vijaya Kumar K <vijaya.kumar@caviumnetworks.com>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program. If not, see <http://www.gnu.org/licenses/>.
++ */
++
++#include <linux/irq.h>
++#include <linux/kdebug.h>
++#include <linux/kgdb.h>
++#include <asm/traps.h>
++
++struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = {
++ { "x0", 8, offsetof(struct pt_regs, regs[0])},
++ { "x1", 8, offsetof(struct pt_regs, regs[1])},
++ { "x2", 8, offsetof(struct pt_regs, regs[2])},
++ { "x3", 8, offsetof(struct pt_regs, regs[3])},
++ { "x4", 8, offsetof(struct pt_regs, regs[4])},
++ { "x5", 8, offsetof(struct pt_regs, regs[5])},
++ { "x6", 8, offsetof(struct pt_regs, regs[6])},
++ { "x7", 8, offsetof(struct pt_regs, regs[7])},
++ { "x8", 8, offsetof(struct pt_regs, regs[8])},
++ { "x9", 8, offsetof(struct pt_regs, regs[9])},
++ { "x10", 8, offsetof(struct pt_regs, regs[10])},
++ { "x11", 8, offsetof(struct pt_regs, regs[11])},
++ { "x12", 8, offsetof(struct pt_regs, regs[12])},
++ { "x13", 8, offsetof(struct pt_regs, regs[13])},
++ { "x14", 8, offsetof(struct pt_regs, regs[14])},
++ { "x15", 8, offsetof(struct pt_regs, regs[15])},
++ { "x16", 8, offsetof(struct pt_regs, regs[16])},
++ { "x17", 8, offsetof(struct pt_regs, regs[17])},
++ { "x18", 8, offsetof(struct pt_regs, regs[18])},
++ { "x19", 8, offsetof(struct pt_regs, regs[19])},
++ { "x20", 8, offsetof(struct pt_regs, regs[20])},
++ { "x21", 8, offsetof(struct pt_regs, regs[21])},
++ { "x22", 8, offsetof(struct pt_regs, regs[22])},
++ { "x23", 8, offsetof(struct pt_regs, regs[23])},
++ { "x24", 8, offsetof(struct pt_regs, regs[24])},
++ { "x25", 8, offsetof(struct pt_regs, regs[25])},
++ { "x26", 8, offsetof(struct pt_regs, regs[26])},
++ { "x27", 8, offsetof(struct pt_regs, regs[27])},
++ { "x28", 8, offsetof(struct pt_regs, regs[28])},
++ { "x29", 8, offsetof(struct pt_regs, regs[29])},
++ { "x30", 8, offsetof(struct pt_regs, regs[30])},
++ { "sp", 8, offsetof(struct pt_regs, sp)},
++ { "pc", 8, offsetof(struct pt_regs, pc)},
++ { "pstate", 8, offsetof(struct pt_regs, pstate)},
++ { "v0", 16, -1 },
++ { "v1", 16, -1 },
++ { "v2", 16, -1 },
++ { "v3", 16, -1 },
++ { "v4", 16, -1 },
++ { "v5", 16, -1 },
++ { "v6", 16, -1 },
++ { "v7", 16, -1 },
++ { "v8", 16, -1 },
++ { "v9", 16, -1 },
++ { "v10", 16, -1 },
++ { "v11", 16, -1 },
++ { "v12", 16, -1 },
++ { "v13", 16, -1 },
++ { "v14", 16, -1 },
++ { "v15", 16, -1 },
++ { "v16", 16, -1 },
++ { "v17", 16, -1 },
++ { "v18", 16, -1 },
++ { "v19", 16, -1 },
++ { "v20", 16, -1 },
++ { "v21", 16, -1 },
++ { "v22", 16, -1 },
++ { "v23", 16, -1 },
++ { "v24", 16, -1 },
++ { "v25", 16, -1 },
++ { "v26", 16, -1 },
++ { "v27", 16, -1 },
++ { "v28", 16, -1 },
++ { "v29", 16, -1 },
++ { "v30", 16, -1 },
++ { "v31", 16, -1 },
++ { "fpsr", 4, -1 },
++ { "fpcr", 4, -1 },
++};
++
++char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
++{
++ if (regno >= DBG_MAX_REG_NUM || regno < 0)
++ return NULL;
++
++ if (dbg_reg_def[regno].offset != -1)
++ memcpy(mem, (void *)regs + dbg_reg_def[regno].offset,
++ dbg_reg_def[regno].size);
++ else
++ memset(mem, 0, dbg_reg_def[regno].size);
++ return dbg_reg_def[regno].name;
++}
++
++int dbg_set_reg(int regno, void *mem, struct pt_regs *regs)
++{
++ if (regno >= DBG_MAX_REG_NUM || regno < 0)
++ return -EINVAL;
++
++ if (dbg_reg_def[regno].offset != -1)
++ memcpy((void *)regs + dbg_reg_def[regno].offset, mem,
++ dbg_reg_def[regno].size);
++ return 0;
++}
++
++void
++sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *task)
++{
++ struct pt_regs *thread_regs;
++
++ /* Initialize to zero */
++ memset((char *)gdb_regs, 0, NUMREGBYTES);
++ thread_regs = task_pt_regs(task);
++ memcpy((void *)gdb_regs, (void *)thread_regs->regs, GP_REG_BYTES);
++}
++
++void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
++{
++ regs->pc = pc;
++}
++
++static int compiled_break;
++
++static void kgdb_arch_update_addr(struct pt_regs *regs,
++ char *remcom_in_buffer)
++{
++ unsigned long addr;
++ char *ptr;
++
++ ptr = &remcom_in_buffer[1];
++ if (kgdb_hex2long(&ptr, &addr))
++ kgdb_arch_set_pc(regs, addr);
++ else if (compiled_break == 1)
++ kgdb_arch_set_pc(regs, regs->pc + 4);
++
++ compiled_break = 0;
++}
++
++int kgdb_arch_handle_exception(int exception_vector, int signo,
++ int err_code, char *remcom_in_buffer,
++ char *remcom_out_buffer,
++ struct pt_regs *linux_regs)
++{
++ int err;
++
++ switch (remcom_in_buffer[0]) {
++ case 'D':
++ case 'k':
++ /*
++ * Packet D (Detach), k (kill). No special handling
++ * is required here. Handle same as c packet.
++ */
++ case 'c':
++ /*
++ * Packet c (Continue) to continue executing.
++ * Set pc to required address.
++ * Try to read optional parameter and set pc.
++ * If this was a compiled breakpoint, we need to move
++ * to the next instruction else we will just breakpoint
++ * over and over again.
++ */
++ kgdb_arch_update_addr(linux_regs, remcom_in_buffer);
++ atomic_set(&kgdb_cpu_doing_single_step, -1);
++ kgdb_single_step = 0;
++
++ /*
++ * Received continue command, disable single step
++ */
++ if (kernel_active_single_step())
++ kernel_disable_single_step();
++
++ err = 0;
++ break;
++ case 's':
++ /*
++ * Update step address value with address passed
++ * with step packet.
++ * On debug exception return PC is copied to ELR
++ * So just update PC.
++ * If no step address is passed, resume from the address
++ * pointed by PC. Do not update PC
++ */
++ kgdb_arch_update_addr(linux_regs, remcom_in_buffer);
++ atomic_set(&kgdb_cpu_doing_single_step, raw_smp_processor_id());
++ kgdb_single_step = 1;
++
++ /*
++ * Enable single step handling
++ */
++ if (!kernel_active_single_step())
++ kernel_enable_single_step(linux_regs);
++ err = 0;
++ break;
++ default:
++ err = -1;
++ }
++ return err;
++}
++
++static int kgdb_brk_fn(struct pt_regs *regs, unsigned int esr)
++{
++ kgdb_handle_exception(1, SIGTRAP, 0, regs);
++ return 0;
++}
++
++static int kgdb_compiled_brk_fn(struct pt_regs *regs, unsigned int esr)
++{
++ compiled_break = 1;
++ kgdb_handle_exception(1, SIGTRAP, 0, regs);
++
++ return 0;
++}
++
++static int kgdb_step_brk_fn(struct pt_regs *regs, unsigned int esr)
++{
++ kgdb_handle_exception(1, SIGTRAP, 0, regs);
++ return 0;
++}
++
++static struct break_hook kgdb_brkpt_hook = {
++ .esr_mask = 0xffffffff,
++ .esr_val = DBG_ESR_VAL_BRK(KGDB_DYN_DGB_BRK_IMM),
++ .fn = kgdb_brk_fn
++};
++
++static struct break_hook kgdb_compiled_brkpt_hook = {
++ .esr_mask = 0xffffffff,
++ .esr_val = DBG_ESR_VAL_BRK(KDBG_COMPILED_DBG_BRK_IMM),
++ .fn = kgdb_compiled_brk_fn
++};
++
++static struct step_hook kgdb_step_hook = {
++ .fn = kgdb_step_brk_fn
++};
++
++static void kgdb_call_nmi_hook(void *ignored)
++{
++ kgdb_nmicallback(raw_smp_processor_id(), get_irq_regs());
++}
++
++void kgdb_roundup_cpus(unsigned long flags)
++{
++ local_irq_enable();
++ smp_call_function(kgdb_call_nmi_hook, NULL, 0);
++ local_irq_disable();
++}
++
++static int __kgdb_notify(struct die_args *args, unsigned long cmd)
++{
++ struct pt_regs *regs = args->regs;
++
++ if (kgdb_handle_exception(1, args->signr, cmd, regs))
++ return NOTIFY_DONE;
++ return NOTIFY_STOP;
++}
++
++static int
++kgdb_notify(struct notifier_block *self, unsigned long cmd, void *ptr)
++{
++ unsigned long flags;
++ int ret;
++
++ local_irq_save(flags);
++ ret = __kgdb_notify(ptr, cmd);
++ local_irq_restore(flags);
++
++ return ret;
++}
++
++static struct notifier_block kgdb_notifier = {
++ .notifier_call = kgdb_notify,
++ /*
++ * Want to be lowest priority
++ */
++ .priority = -INT_MAX,
++};
++
++/*
++ * kgdb_arch_init - Perform any architecture specific initalization.
++ * This function will handle the initalization of any architecture
++ * specific callbacks.
++ */
++int kgdb_arch_init(void)
++{
++ int ret = register_die_notifier(&kgdb_notifier);
++
++ if (ret != 0)
++ return ret;
++
++ register_break_hook(&kgdb_brkpt_hook);
++ register_break_hook(&kgdb_compiled_brkpt_hook);
++ register_step_hook(&kgdb_step_hook);
++ return 0;
++}
++
++/*
++ * kgdb_arch_exit - Perform any architecture specific uninitalization.
++ * This function will handle the uninitalization of any architecture
++ * specific callbacks, for dynamic registration and unregistration.
++ */
++void kgdb_arch_exit(void)
++{
++ unregister_break_hook(&kgdb_brkpt_hook);
++ unregister_break_hook(&kgdb_compiled_brkpt_hook);
++ unregister_step_hook(&kgdb_step_hook);
++ unregister_die_notifier(&kgdb_notifier);
++}
++
++/*
++ * ARM instructions are always in LE.
++ * Break instruction is encoded in LE format
++ */
++struct kgdb_arch arch_kgdb_ops = {
++ .gdb_bpt_instr = {
++ KGDB_DYN_BRK_INS_BYTE0,
++ KGDB_DYN_BRK_INS_BYTE1,
++ KGDB_DYN_BRK_INS_BYTE2,
++ KGDB_DYN_BRK_INS_BYTE3,
++ }
++};
+diff -Nur linux-3.14.36/arch/arm64/kernel/Makefile linux-openelec/arch/arm64/kernel/Makefile
+--- linux-3.14.36/arch/arm64/kernel/Makefile 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm64/kernel/Makefile 2015-05-06 12:05:43.000000000 -0500
+@@ -5,21 +5,29 @@
+ CPPFLAGS_vmlinux.lds := -DTEXT_OFFSET=$(TEXT_OFFSET)
+ AFLAGS_head.o := -DTEXT_OFFSET=$(TEXT_OFFSET)
+
++CFLAGS_REMOVE_ftrace.o = -pg
++CFLAGS_REMOVE_insn.o = -pg
++CFLAGS_REMOVE_return_address.o = -pg
++
+ # Object file lists.
+ arm64-obj-y := cputable.o debug-monitors.o entry.o irq.o fpsimd.o \
+ entry-fpsimd.o process.o ptrace.o setup.o signal.o \
+ sys.o stacktrace.o time.o traps.o io.o vdso.o \
+- hyp-stub.o psci.o cpu_ops.o insn.o
++ hyp-stub.o psci.o cpu_ops.o insn.o return_address.o
+
+ arm64-obj-$(CONFIG_COMPAT) += sys32.o kuser32.o signal32.o \
+ sys_compat.o
++arm64-obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o entry-ftrace.o
+ arm64-obj-$(CONFIG_MODULES) += arm64ksyms.o module.o
++arm64-obj-$(CONFIG_SMP) += smp.o smp_spin_table.o topology.o
+ arm64-obj-$(CONFIG_SMP) += smp.o smp_spin_table.o
++arm64-obj-$(CONFIG_PERF_EVENTS) += perf_regs.o
+ arm64-obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o
+-arm64-obj-$(CONFIG_HAVE_HW_BREAKPOINT)+= hw_breakpoint.o
++arm64-obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
+ arm64-obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
+ arm64-obj-$(CONFIG_ARM64_CPU_SUSPEND) += sleep.o suspend.o
+ arm64-obj-$(CONFIG_JUMP_LABEL) += jump_label.o
++arm64-obj-$(CONFIG_KGDB) += kgdb.o
+
+ obj-y += $(arm64-obj-y) vdso/
+ obj-m += $(arm64-obj-m)
+diff -Nur linux-3.14.36/arch/arm64/kernel/perf_event.c linux-openelec/arch/arm64/kernel/perf_event.c
+--- linux-3.14.36/arch/arm64/kernel/perf_event.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm64/kernel/perf_event.c 2015-05-06 12:05:43.000000000 -0500
+@@ -1348,8 +1348,8 @@
+ * Callchain handling code.
+ */
+ struct frame_tail {
+- struct frame_tail __user *fp;
+- unsigned long lr;
++ struct frame_tail __user *fp;
++ unsigned long lr;
+ } __attribute__((packed));
+
+ /*
+@@ -1386,22 +1386,84 @@
+ return buftail.fp;
+ }
+
++#ifdef CONFIG_COMPAT
++/*
++ * The registers we're interested in are at the end of the variable
++ * length saved register structure. The fp points at the end of this
++ * structure so the address of this struct is:
++ * (struct compat_frame_tail *)(xxx->fp)-1
++ *
++ * This code has been adapted from the ARM OProfile support.
++ */
++struct compat_frame_tail {
++ compat_uptr_t fp; /* a (struct compat_frame_tail *) in compat mode */
++ u32 sp;
++ u32 lr;
++} __attribute__((packed));
++
++static struct compat_frame_tail __user *
++compat_user_backtrace(struct compat_frame_tail __user *tail,
++ struct perf_callchain_entry *entry)
++{
++ struct compat_frame_tail buftail;
++ unsigned long err;
++
++ /* Also check accessibility of one struct frame_tail beyond */
++ if (!access_ok(VERIFY_READ, tail, sizeof(buftail)))
++ return NULL;
++
++ pagefault_disable();
++ err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail));
++ pagefault_enable();
++
++ if (err)
++ return NULL;
++
++ perf_callchain_store(entry, buftail.lr);
++
++ /*
++ * Frame pointers should strictly progress back up the stack
++ * (towards higher addresses).
++ */
++ if (tail + 1 >= (struct compat_frame_tail __user *)
++ compat_ptr(buftail.fp))
++ return NULL;
++
++ return (struct compat_frame_tail __user *)compat_ptr(buftail.fp) - 1;
++}
++#endif /* CONFIG_COMPAT */
++
+ void perf_callchain_user(struct perf_callchain_entry *entry,
+ struct pt_regs *regs)
+ {
+- struct frame_tail __user *tail;
+-
+ if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
+ /* We don't support guest os callchain now */
+ return;
+ }
+
+ perf_callchain_store(entry, regs->pc);
+- tail = (struct frame_tail __user *)regs->regs[29];
+
+- while (entry->nr < PERF_MAX_STACK_DEPTH &&
+- tail && !((unsigned long)tail & 0xf))
+- tail = user_backtrace(tail, entry);
++ if (!compat_user_mode(regs)) {
++ /* AARCH64 mode */
++ struct frame_tail __user *tail;
++
++ tail = (struct frame_tail __user *)regs->regs[29];
++
++ while (entry->nr < PERF_MAX_STACK_DEPTH &&
++ tail && !((unsigned long)tail & 0xf))
++ tail = user_backtrace(tail, entry);
++ } else {
++#ifdef CONFIG_COMPAT
++ /* AARCH32 compat mode */
++ struct compat_frame_tail __user *tail;
++
++ tail = (struct compat_frame_tail __user *)regs->compat_fp - 1;
++
++ while ((entry->nr < PERF_MAX_STACK_DEPTH) &&
++ tail && !((unsigned long)tail & 0x3))
++ tail = compat_user_backtrace(tail, entry);
++#endif
++ }
+ }
+
+ /*
+@@ -1429,6 +1491,7 @@
+ frame.fp = regs->regs[29];
+ frame.sp = regs->sp;
+ frame.pc = regs->pc;
++
+ walk_stackframe(&frame, callchain_trace, entry);
+ }
+
+diff -Nur linux-3.14.36/arch/arm64/kernel/perf_regs.c linux-openelec/arch/arm64/kernel/perf_regs.c
+--- linux-3.14.36/arch/arm64/kernel/perf_regs.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/arch/arm64/kernel/perf_regs.c 2015-05-06 12:05:43.000000000 -0500
+@@ -0,0 +1,46 @@
++#include <linux/errno.h>
++#include <linux/kernel.h>
++#include <linux/perf_event.h>
++#include <linux/bug.h>
++
++#include <asm/compat.h>
++#include <asm/perf_regs.h>
++#include <asm/ptrace.h>
++
++u64 perf_reg_value(struct pt_regs *regs, int idx)
++{
++ if (WARN_ON_ONCE((u32)idx >= PERF_REG_ARM64_MAX))
++ return 0;
++
++ /*
++ * Compat (i.e. 32 bit) mode:
++ * - PC has been set in the pt_regs struct in kernel_entry,
++ * - Handle SP and LR here.
++ */
++ if (compat_user_mode(regs)) {
++ if ((u32)idx == PERF_REG_ARM64_SP)
++ return regs->compat_sp;
++ if ((u32)idx == PERF_REG_ARM64_LR)
++ return regs->compat_lr;
++ }
++
++ return regs->regs[idx];
++}
++
++#define REG_RESERVED (~((1ULL << PERF_REG_ARM64_MAX) - 1))
++
++int perf_reg_validate(u64 mask)
++{
++ if (!mask || mask & REG_RESERVED)
++ return -EINVAL;
++
++ return 0;
++}
++
++u64 perf_reg_abi(struct task_struct *task)
++{
++ if (is_compat_thread(task_thread_info(task)))
++ return PERF_SAMPLE_REGS_ABI_32;
++ else
++ return PERF_SAMPLE_REGS_ABI_64;
++}
+diff -Nur linux-3.14.36/arch/arm64/kernel/process.c linux-openelec/arch/arm64/kernel/process.c
+--- linux-3.14.36/arch/arm64/kernel/process.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm64/kernel/process.c 2015-07-24 18:03:28.448842002 -0500
+@@ -20,6 +20,7 @@
+
+ #include <stdarg.h>
+
++#include <linux/compat.h>
+ #include <linux/export.h>
+ #include <linux/sched.h>
+ #include <linux/kernel.h>
+diff -Nur linux-3.14.36/arch/arm64/kernel/process.c.orig linux-openelec/arch/arm64/kernel/process.c.orig
+--- linux-3.14.36/arch/arm64/kernel/process.c.orig 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/arch/arm64/kernel/process.c.orig 2015-05-06 12:05:43.000000000 -0500
+@@ -0,0 +1,350 @@
++/*
++ * Based on arch/arm/kernel/process.c
++ *
++ * Original Copyright (C) 1995 Linus Torvalds
++ * Copyright (C) 1996-2000 Russell King - Converted to ARM.
++ * Copyright (C) 2012 ARM Ltd.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program. If not, see <http://www.gnu.org/licenses/>.
++ */
++
++#include <stdarg.h>
++
++#include <linux/compat.h>
++#include <linux/export.h>
++#include <linux/sched.h>
++#include <linux/kernel.h>
++#include <linux/mm.h>
++#include <linux/stddef.h>
++#include <linux/unistd.h>
++#include <linux/user.h>
++#include <linux/delay.h>
++#include <linux/reboot.h>
++#include <linux/interrupt.h>
++#include <linux/kallsyms.h>
++#include <linux/init.h>
++#include <linux/cpu.h>
++#include <linux/cpuidle.h>
++#include <linux/elfcore.h>
++#include <linux/pm.h>
++#include <linux/tick.h>
++#include <linux/utsname.h>
++#include <linux/uaccess.h>
++#include <linux/random.h>
++#include <linux/hw_breakpoint.h>
++#include <linux/personality.h>
++#include <linux/notifier.h>
++
++#include <asm/compat.h>
++#include <asm/cacheflush.h>
++#include <asm/fpsimd.h>
++#include <asm/mmu_context.h>
++#include <asm/processor.h>
++#include <asm/stacktrace.h>
++
++static void setup_restart(void)
++{
++ /*
++ * Tell the mm system that we are going to reboot -
++ * we may need it to insert some 1:1 mappings so that
++ * soft boot works.
++ */
++ setup_mm_for_reboot();
++
++ /* Clean and invalidate caches */
++ flush_cache_all();
++
++ /* Turn D-cache off */
++ cpu_cache_off();
++
++ /* Push out any further dirty data, and ensure cache is empty */
++ flush_cache_all();
++}
++
++void soft_restart(unsigned long addr)
++{
++ setup_restart();
++ cpu_reset(addr);
++}
++
++/*
++ * Function pointers to optional machine specific functions
++ */
++void (*pm_power_off)(void);
++EXPORT_SYMBOL_GPL(pm_power_off);
++
++void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
++EXPORT_SYMBOL_GPL(arm_pm_restart);
++
++/*
++ * This is our default idle handler.
++ */
++void arch_cpu_idle(void)
++{
++ /*
++ * This should do all the clock switching and wait for interrupt
++ * tricks
++ */
++ if (cpuidle_idle_call()) {
++ cpu_do_idle();
++ local_irq_enable();
++ }
++}
++
++#ifdef CONFIG_HOTPLUG_CPU
++void arch_cpu_idle_dead(void)
++{
++ cpu_die();
++}
++#endif
++
++void machine_shutdown(void)
++{
++#ifdef CONFIG_SMP
++ smp_send_stop();
++#endif
++}
++
++void machine_halt(void)
++{
++ machine_shutdown();
++ while (1);
++}
++
++void machine_power_off(void)
++{
++ machine_shutdown();
++ if (pm_power_off)
++ pm_power_off();
++}
++
++void machine_restart(char *cmd)
++{
++ machine_shutdown();
++
++ /* Disable interrupts first */
++ local_irq_disable();
++
++ /* Now call the architecture specific reboot code. */
++ if (arm_pm_restart)
++ arm_pm_restart(reboot_mode, cmd);
++
++ /*
++ * Whoops - the architecture was unable to reboot.
++ */
++ printk("Reboot failed -- System halted\n");
++ while (1);
++}
++
++void __show_regs(struct pt_regs *regs)
++{
++ int i, top_reg;
++ u64 lr, sp;
++
++ if (compat_user_mode(regs)) {
++ lr = regs->compat_lr;
++ sp = regs->compat_sp;
++ top_reg = 12;
++ } else {
++ lr = regs->regs[30];
++ sp = regs->sp;
++ top_reg = 29;
++ }
++
++ show_regs_print_info(KERN_DEFAULT);
++ print_symbol("PC is at %s\n", instruction_pointer(regs));
++ print_symbol("LR is at %s\n", lr);
++ printk("pc : [<%016llx>] lr : [<%016llx>] pstate: %08llx\n",
++ regs->pc, lr, regs->pstate);
++ printk("sp : %016llx\n", sp);
++ for (i = top_reg; i >= 0; i--) {
++ printk("x%-2d: %016llx ", i, regs->regs[i]);
++ if (i % 2 == 0)
++ printk("\n");
++ }
++ printk("\n");
++}
++
++void show_regs(struct pt_regs * regs)
++{
++ printk("\n");
++ __show_regs(regs);
++}
++
++/*
++ * Free current thread data structures etc..
++ */
++void exit_thread(void)
++{
++}
++
++void flush_thread(void)
++{
++ fpsimd_flush_thread();
++ flush_ptrace_hw_breakpoint(current);
++}
++
++void release_thread(struct task_struct *dead_task)
++{
++}
++
++int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
++{
++ fpsimd_save_state(&current->thread.fpsimd_state);
++ *dst = *src;
++ return 0;
++}
++
++asmlinkage void ret_from_fork(void) asm("ret_from_fork");
++
++int copy_thread(unsigned long clone_flags, unsigned long stack_start,
++ unsigned long stk_sz, struct task_struct *p)
++{
++ struct pt_regs *childregs = task_pt_regs(p);
++ unsigned long tls = p->thread.tp_value;
++
++ memset(&p->thread.cpu_context, 0, sizeof(struct cpu_context));
++
++ if (likely(!(p->flags & PF_KTHREAD))) {
++ *childregs = *current_pt_regs();
++ childregs->regs[0] = 0;
++ if (is_compat_thread(task_thread_info(p))) {
++ if (stack_start)
++ childregs->compat_sp = stack_start;
++ } else {
++ /*
++ * Read the current TLS pointer from tpidr_el0 as it may be
++ * out-of-sync with the saved value.
++ */
++ asm("mrs %0, tpidr_el0" : "=r" (tls));
++ if (stack_start) {
++ /* 16-byte aligned stack mandatory on AArch64 */
++ if (stack_start & 15)
++ return -EINVAL;
++ childregs->sp = stack_start;
++ }
++ }
++ /*
++ * If a TLS pointer was passed to clone (4th argument), use it
++ * for the new thread.
++ */
++ if (clone_flags & CLONE_SETTLS)
++ tls = childregs->regs[3];
++ } else {
++ memset(childregs, 0, sizeof(struct pt_regs));
++ childregs->pstate = PSR_MODE_EL1h;
++ p->thread.cpu_context.x19 = stack_start;
++ p->thread.cpu_context.x20 = stk_sz;
++ }
++ p->thread.cpu_context.pc = (unsigned long)ret_from_fork;
++ p->thread.cpu_context.sp = (unsigned long)childregs;
++ p->thread.tp_value = tls;
++
++ ptrace_hw_copy_thread(p);
++
++ return 0;
++}
++
++static void tls_thread_switch(struct task_struct *next)
++{
++ unsigned long tpidr, tpidrro;
++
++ if (!is_compat_task()) {
++ asm("mrs %0, tpidr_el0" : "=r" (tpidr));
++ current->thread.tp_value = tpidr;
++ }
++
++ if (is_compat_thread(task_thread_info(next))) {
++ tpidr = 0;
++ tpidrro = next->thread.tp_value;
++ } else {
++ tpidr = next->thread.tp_value;
++ tpidrro = 0;
++ }
++
++ asm(
++ " msr tpidr_el0, %0\n"
++ " msr tpidrro_el0, %1"
++ : : "r" (tpidr), "r" (tpidrro));
++}
++
++/*
++ * Thread switching.
++ */
++struct task_struct *__switch_to(struct task_struct *prev,
++ struct task_struct *next)
++{
++ struct task_struct *last;
++
++ fpsimd_thread_switch(next);
++ tls_thread_switch(next);
++ hw_breakpoint_thread_switch(next);
++ contextidr_thread_switch(next);
++
++ /*
++ * Complete any pending TLB or cache maintenance on this CPU in case
++ * the thread migrates to a different CPU.
++ */
++ dsb();
++
++ /* the actual thread switch */
++ last = cpu_switch_to(prev, next);
++
++ return last;
++}
++
++unsigned long get_wchan(struct task_struct *p)
++{
++ struct stackframe frame;
++ unsigned long stack_page;
++ int count = 0;
++ if (!p || p == current || p->state == TASK_RUNNING)
++ return 0;
++
++ frame.fp = thread_saved_fp(p);
++ frame.sp = thread_saved_sp(p);
++ frame.pc = thread_saved_pc(p);
++ stack_page = (unsigned long)task_stack_page(p);
++ do {
++ if (frame.sp < stack_page ||
++ frame.sp >= stack_page + THREAD_SIZE ||
++ unwind_frame(&frame))
++ return 0;
++ if (!in_sched_functions(frame.pc))
++ return frame.pc;
++ } while (count ++ < 16);
++ return 0;
++}
++
++unsigned long arch_align_stack(unsigned long sp)
++{
++ if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
++ sp -= get_random_int() & ~PAGE_MASK;
++ return sp & ~0xf;
++}
++
++static unsigned long randomize_base(unsigned long base)
++{
++ unsigned long range_end = base + (STACK_RND_MASK << PAGE_SHIFT) + 1;
++ return randomize_range(base, range_end, 0) ? : base;
++}
++
++unsigned long arch_randomize_brk(struct mm_struct *mm)
++{
++ return randomize_base(mm->brk);
++}
++
++unsigned long randomize_et_dyn(unsigned long base)
++{
++ return randomize_base(base);
++}
+diff -Nur linux-3.14.36/arch/arm64/kernel/ptrace.c linux-openelec/arch/arm64/kernel/ptrace.c
+--- linux-3.14.36/arch/arm64/kernel/ptrace.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm64/kernel/ptrace.c 2015-07-24 18:03:28.448842002 -0500
+@@ -19,6 +19,7 @@
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
++#include <linux/compat.h>
+ #include <linux/kernel.h>
+ #include <linux/sched.h>
+ #include <linux/mm.h>
+@@ -41,6 +42,9 @@
+ #include <asm/traps.h>
+ #include <asm/system_misc.h>
+
++#define CREATE_TRACE_POINTS
++#include <trace/events/syscalls.h>
++
+ /*
+ * TODO: does not yet catch signals sent when the child dies.
+ * in exit.c or in signal.c.
+@@ -1073,35 +1077,49 @@
+ return ptrace_request(child, request, addr, data);
+ }
+
+-asmlinkage int syscall_trace(int dir, struct pt_regs *regs)
++enum ptrace_syscall_dir {
++ PTRACE_SYSCALL_ENTER = 0,
++ PTRACE_SYSCALL_EXIT,
++};
++
++static void tracehook_report_syscall(struct pt_regs *regs,
++ enum ptrace_syscall_dir dir)
+ {
++ int regno;
+ unsigned long saved_reg;
+
+- if (!test_thread_flag(TIF_SYSCALL_TRACE))
+- return regs->syscallno;
+-
+- if (is_compat_task()) {
+- /* AArch32 uses ip (r12) for scratch */
+- saved_reg = regs->regs[12];
+- regs->regs[12] = dir;
+- } else {
+- /*
+- * Save X7. X7 is used to denote syscall entry/exit:
+- * X7 = 0 -> entry, = 1 -> exit
+- */
+- saved_reg = regs->regs[7];
+- regs->regs[7] = dir;
+- }
++ /*
++ * A scratch register (ip(r12) on AArch32, x7 on AArch64) is
++ * used to denote syscall entry/exit:
++ */
++ regno = (is_compat_task() ? 12 : 7);
++ saved_reg = regs->regs[regno];
++ regs->regs[regno] = dir;
+
+- if (dir)
++ if (dir == PTRACE_SYSCALL_EXIT)
+ tracehook_report_syscall_exit(regs, 0);
+ else if (tracehook_report_syscall_entry(regs))
+ regs->syscallno = ~0UL;
+
+- if (is_compat_task())
+- regs->regs[12] = saved_reg;
+- else
+- regs->regs[7] = saved_reg;
++ regs->regs[regno] = saved_reg;
++}
++
++asmlinkage int syscall_trace_enter(struct pt_regs *regs)
++{
++ if (test_thread_flag(TIF_SYSCALL_TRACE))
++ tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER);
++
++ if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
++ trace_sys_enter(regs, regs->syscallno);
+
+ return regs->syscallno;
+ }
++
++asmlinkage void syscall_trace_exit(struct pt_regs *regs)
++{
++ if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
++ trace_sys_exit(regs, regs_return_value(regs));
++
++ if (test_thread_flag(TIF_SYSCALL_TRACE))
++ tracehook_report_syscall(regs, PTRACE_SYSCALL_EXIT);
++}
+diff -Nur linux-3.14.36/arch/arm64/kernel/ptrace.c.orig linux-openelec/arch/arm64/kernel/ptrace.c.orig
+--- linux-3.14.36/arch/arm64/kernel/ptrace.c.orig 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/arch/arm64/kernel/ptrace.c.orig 2015-05-06 12:05:43.000000000 -0500
+@@ -0,0 +1,1124 @@
++/*
++ * Based on arch/arm/kernel/ptrace.c
++ *
++ * By Ross Biro 1/23/92
++ * edited by Linus Torvalds
++ * ARM modifications Copyright (C) 2000 Russell King
++ * Copyright (C) 2012 ARM Ltd.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program. If not, see <http://www.gnu.org/licenses/>.
++ */
++
++#include <linux/compat.h>
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/mm.h>
++#include <linux/smp.h>
++#include <linux/ptrace.h>
++#include <linux/user.h>
++#include <linux/security.h>
++#include <linux/init.h>
++#include <linux/signal.h>
++#include <linux/uaccess.h>
++#include <linux/perf_event.h>
++#include <linux/hw_breakpoint.h>
++#include <linux/regset.h>
++#include <linux/tracehook.h>
++#include <linux/elf.h>
++
++#include <asm/compat.h>
++#include <asm/debug-monitors.h>
++#include <asm/pgtable.h>
++#include <asm/traps.h>
++#include <asm/system_misc.h>
++
++#define CREATE_TRACE_POINTS
++#include <trace/events/syscalls.h>
++
++/*
++ * TODO: does not yet catch signals sent when the child dies.
++ * in exit.c or in signal.c.
++ */
++
++/*
++ * Called by kernel/ptrace.c when detaching..
++ */
++void ptrace_disable(struct task_struct *child)
++{
++}
++
++#ifdef CONFIG_HAVE_HW_BREAKPOINT
++/*
++ * Handle hitting a HW-breakpoint.
++ */
++static void ptrace_hbptriggered(struct perf_event *bp,
++ struct perf_sample_data *data,
++ struct pt_regs *regs)
++{
++ struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp);
++ siginfo_t info = {
++ .si_signo = SIGTRAP,
++ .si_errno = 0,
++ .si_code = TRAP_HWBKPT,
++ .si_addr = (void __user *)(bkpt->trigger),
++ };
++
++#ifdef CONFIG_COMPAT
++ int i;
++
++ if (!is_compat_task())
++ goto send_sig;
++
++ for (i = 0; i < ARM_MAX_BRP; ++i) {
++ if (current->thread.debug.hbp_break[i] == bp) {
++ info.si_errno = (i << 1) + 1;
++ break;
++ }
++ }
++ for (i = ARM_MAX_BRP; i < ARM_MAX_HBP_SLOTS && !bp; ++i) {
++ if (current->thread.debug.hbp_watch[i] == bp) {
++ info.si_errno = -((i << 1) + 1);
++ break;
++ }
++ }
++
++send_sig:
++#endif
++ force_sig_info(SIGTRAP, &info, current);
++}
++
++/*
++ * Unregister breakpoints from this task and reset the pointers in
++ * the thread_struct.
++ */
++void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
++{
++ int i;
++ struct thread_struct *t = &tsk->thread;
++
++ for (i = 0; i < ARM_MAX_BRP; i++) {
++ if (t->debug.hbp_break[i]) {
++ unregister_hw_breakpoint(t->debug.hbp_break[i]);
++ t->debug.hbp_break[i] = NULL;
++ }
++ }
++
++ for (i = 0; i < ARM_MAX_WRP; i++) {
++ if (t->debug.hbp_watch[i]) {
++ unregister_hw_breakpoint(t->debug.hbp_watch[i]);
++ t->debug.hbp_watch[i] = NULL;
++ }
++ }
++}
++
++void ptrace_hw_copy_thread(struct task_struct *tsk)
++{
++ memset(&tsk->thread.debug, 0, sizeof(struct debug_info));
++}
++
++static struct perf_event *ptrace_hbp_get_event(unsigned int note_type,
++ struct task_struct *tsk,
++ unsigned long idx)
++{
++ struct perf_event *bp = ERR_PTR(-EINVAL);
++
++ switch (note_type) {
++ case NT_ARM_HW_BREAK:
++ if (idx < ARM_MAX_BRP)
++ bp = tsk->thread.debug.hbp_break[idx];
++ break;
++ case NT_ARM_HW_WATCH:
++ if (idx < ARM_MAX_WRP)
++ bp = tsk->thread.debug.hbp_watch[idx];
++ break;
++ }
++
++ return bp;
++}
++
++static int ptrace_hbp_set_event(unsigned int note_type,
++ struct task_struct *tsk,
++ unsigned long idx,
++ struct perf_event *bp)
++{
++ int err = -EINVAL;
++
++ switch (note_type) {
++ case NT_ARM_HW_BREAK:
++ if (idx < ARM_MAX_BRP) {
++ tsk->thread.debug.hbp_break[idx] = bp;
++ err = 0;
++ }
++ break;
++ case NT_ARM_HW_WATCH:
++ if (idx < ARM_MAX_WRP) {
++ tsk->thread.debug.hbp_watch[idx] = bp;
++ err = 0;
++ }
++ break;
++ }
++
++ return err;
++}
++
++static struct perf_event *ptrace_hbp_create(unsigned int note_type,
++ struct task_struct *tsk,
++ unsigned long idx)
++{
++ struct perf_event *bp;
++ struct perf_event_attr attr;
++ int err, type;
++
++ switch (note_type) {
++ case NT_ARM_HW_BREAK:
++ type = HW_BREAKPOINT_X;
++ break;
++ case NT_ARM_HW_WATCH:
++ type = HW_BREAKPOINT_RW;
++ break;
++ default:
++ return ERR_PTR(-EINVAL);
++ }
++
++ ptrace_breakpoint_init(&attr);
++
++ /*
++ * Initialise fields to sane defaults
++ * (i.e. values that will pass validation).
++ */
++ attr.bp_addr = 0;
++ attr.bp_len = HW_BREAKPOINT_LEN_4;
++ attr.bp_type = type;
++ attr.disabled = 1;
++
++ bp = register_user_hw_breakpoint(&attr, ptrace_hbptriggered, NULL, tsk);
++ if (IS_ERR(bp))
++ return bp;
++
++ err = ptrace_hbp_set_event(note_type, tsk, idx, bp);
++ if (err)
++ return ERR_PTR(err);
++
++ return bp;
++}
++
++static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type,
++ struct arch_hw_breakpoint_ctrl ctrl,
++ struct perf_event_attr *attr)
++{
++ int err, len, type, disabled = !ctrl.enabled;
++
++ attr->disabled = disabled;
++ if (disabled)
++ return 0;
++
++ err = arch_bp_generic_fields(ctrl, &len, &type);
++ if (err)
++ return err;
++
++ switch (note_type) {
++ case NT_ARM_HW_BREAK:
++ if ((type & HW_BREAKPOINT_X) != type)
++ return -EINVAL;
++ break;
++ case NT_ARM_HW_WATCH:
++ if ((type & HW_BREAKPOINT_RW) != type)
++ return -EINVAL;
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ attr->bp_len = len;
++ attr->bp_type = type;
++
++ return 0;
++}
++
++static int ptrace_hbp_get_resource_info(unsigned int note_type, u32 *info)
++{
++ u8 num;
++ u32 reg = 0;
++
++ switch (note_type) {
++ case NT_ARM_HW_BREAK:
++ num = hw_breakpoint_slots(TYPE_INST);
++ break;
++ case NT_ARM_HW_WATCH:
++ num = hw_breakpoint_slots(TYPE_DATA);
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ reg |= debug_monitors_arch();
++ reg <<= 8;
++ reg |= num;
++
++ *info = reg;
++ return 0;
++}
++
++static int ptrace_hbp_get_ctrl(unsigned int note_type,
++ struct task_struct *tsk,
++ unsigned long idx,
++ u32 *ctrl)
++{
++ struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
++
++ if (IS_ERR(bp))
++ return PTR_ERR(bp);
++
++ *ctrl = bp ? encode_ctrl_reg(counter_arch_bp(bp)->ctrl) : 0;
++ return 0;
++}
++
++static int ptrace_hbp_get_addr(unsigned int note_type,
++ struct task_struct *tsk,
++ unsigned long idx,
++ u64 *addr)
++{
++ struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
++
++ if (IS_ERR(bp))
++ return PTR_ERR(bp);
++
++ *addr = bp ? bp->attr.bp_addr : 0;
++ return 0;
++}
++
++static struct perf_event *ptrace_hbp_get_initialised_bp(unsigned int note_type,
++ struct task_struct *tsk,
++ unsigned long idx)
++{
++ struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
++
++ if (!bp)
++ bp = ptrace_hbp_create(note_type, tsk, idx);
++
++ return bp;
++}
++
++static int ptrace_hbp_set_ctrl(unsigned int note_type,
++ struct task_struct *tsk,
++ unsigned long idx,
++ u32 uctrl)
++{
++ int err;
++ struct perf_event *bp;
++ struct perf_event_attr attr;
++ struct arch_hw_breakpoint_ctrl ctrl;
++
++ bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
++ if (IS_ERR(bp)) {
++ err = PTR_ERR(bp);
++ return err;
++ }
++
++ attr = bp->attr;
++ decode_ctrl_reg(uctrl, &ctrl);
++ err = ptrace_hbp_fill_attr_ctrl(note_type, ctrl, &attr);
++ if (err)
++ return err;
++
++ return modify_user_hw_breakpoint(bp, &attr);
++}
++
++static int ptrace_hbp_set_addr(unsigned int note_type,
++ struct task_struct *tsk,
++ unsigned long idx,
++ u64 addr)
++{
++ int err;
++ struct perf_event *bp;
++ struct perf_event_attr attr;
++
++ bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
++ if (IS_ERR(bp)) {
++ err = PTR_ERR(bp);
++ return err;
++ }
++
++ attr = bp->attr;
++ attr.bp_addr = addr;
++ err = modify_user_hw_breakpoint(bp, &attr);
++ return err;
++}
++
++#define PTRACE_HBP_ADDR_SZ sizeof(u64)
++#define PTRACE_HBP_CTRL_SZ sizeof(u32)
++#define PTRACE_HBP_PAD_SZ sizeof(u32)
++
++static int hw_break_get(struct task_struct *target,
++ const struct user_regset *regset,
++ unsigned int pos, unsigned int count,
++ void *kbuf, void __user *ubuf)
++{
++ unsigned int note_type = regset->core_note_type;
++ int ret, idx = 0, offset, limit;
++ u32 info, ctrl;
++ u64 addr;
++
++ /* Resource info */
++ ret = ptrace_hbp_get_resource_info(note_type, &info);
++ if (ret)
++ return ret;
++
++ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &info, 0,
++ sizeof(info));
++ if (ret)
++ return ret;
++
++ /* Pad */
++ offset = offsetof(struct user_hwdebug_state, pad);
++ ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, offset,
++ offset + PTRACE_HBP_PAD_SZ);
++ if (ret)
++ return ret;
++
++ /* (address, ctrl) registers */
++ offset = offsetof(struct user_hwdebug_state, dbg_regs);
++ limit = regset->n * regset->size;
++ while (count && offset < limit) {
++ ret = ptrace_hbp_get_addr(note_type, target, idx, &addr);
++ if (ret)
++ return ret;
++ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &addr,
++ offset, offset + PTRACE_HBP_ADDR_SZ);
++ if (ret)
++ return ret;
++ offset += PTRACE_HBP_ADDR_SZ;
++
++ ret = ptrace_hbp_get_ctrl(note_type, target, idx, &ctrl);
++ if (ret)
++ return ret;
++ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &ctrl,
++ offset, offset + PTRACE_HBP_CTRL_SZ);
++ if (ret)
++ return ret;
++ offset += PTRACE_HBP_CTRL_SZ;
++
++ ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
++ offset,
++ offset + PTRACE_HBP_PAD_SZ);
++ if (ret)
++ return ret;
++ offset += PTRACE_HBP_PAD_SZ;
++ idx++;
++ }
++
++ return 0;
++}
++
++static int hw_break_set(struct task_struct *target,
++ const struct user_regset *regset,
++ unsigned int pos, unsigned int count,
++ const void *kbuf, const void __user *ubuf)
++{
++ unsigned int note_type = regset->core_note_type;
++ int ret, idx = 0, offset, limit;
++ u32 ctrl;
++ u64 addr;
++
++ /* Resource info and pad */
++ offset = offsetof(struct user_hwdebug_state, dbg_regs);
++ ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 0, offset);
++ if (ret)
++ return ret;
++
++ /* (address, ctrl) registers */
++ limit = regset->n * regset->size;
++ while (count && offset < limit) {
++ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &addr,
++ offset, offset + PTRACE_HBP_ADDR_SZ);
++ if (ret)
++ return ret;
++ ret = ptrace_hbp_set_addr(note_type, target, idx, addr);
++ if (ret)
++ return ret;
++ offset += PTRACE_HBP_ADDR_SZ;
++
++ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl,
++ offset, offset + PTRACE_HBP_CTRL_SZ);
++ if (ret)
++ return ret;
++ ret = ptrace_hbp_set_ctrl(note_type, target, idx, ctrl);
++ if (ret)
++ return ret;
++ offset += PTRACE_HBP_CTRL_SZ;
++
++ ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
++ offset,
++ offset + PTRACE_HBP_PAD_SZ);
++ if (ret)
++ return ret;
++ offset += PTRACE_HBP_PAD_SZ;
++ idx++;
++ }
++
++ return 0;
++}
++#endif /* CONFIG_HAVE_HW_BREAKPOINT */
++
++static int gpr_get(struct task_struct *target,
++ const struct user_regset *regset,
++ unsigned int pos, unsigned int count,
++ void *kbuf, void __user *ubuf)
++{
++ struct user_pt_regs *uregs = &task_pt_regs(target)->user_regs;
++ return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0, -1);
++}
++
++static int gpr_set(struct task_struct *target, const struct user_regset *regset,
++ unsigned int pos, unsigned int count,
++ const void *kbuf, const void __user *ubuf)
++{
++ int ret;
++ struct user_pt_regs newregs;
++
++ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newregs, 0, -1);
++ if (ret)
++ return ret;
++
++ if (!valid_user_regs(&newregs))
++ return -EINVAL;
++
++ task_pt_regs(target)->user_regs = newregs;
++ return 0;
++}
++
++/*
++ * TODO: update fp accessors for lazy context switching (sync/flush hwstate)
++ */
++static int fpr_get(struct task_struct *target, const struct user_regset *regset,
++ unsigned int pos, unsigned int count,
++ void *kbuf, void __user *ubuf)
++{
++ struct user_fpsimd_state *uregs;
++ uregs = &target->thread.fpsimd_state.user_fpsimd;
++ return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0, -1);
++}
++
++static int fpr_set(struct task_struct *target, const struct user_regset *regset,
++ unsigned int pos, unsigned int count,
++ const void *kbuf, const void __user *ubuf)
++{
++ int ret;
++ struct user_fpsimd_state newstate;
++
++ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newstate, 0, -1);
++ if (ret)
++ return ret;
++
++ target->thread.fpsimd_state.user_fpsimd = newstate;
++ return ret;
++}
++
++static int tls_get(struct task_struct *target, const struct user_regset *regset,
++ unsigned int pos, unsigned int count,
++ void *kbuf, void __user *ubuf)
++{
++ unsigned long *tls = &target->thread.tp_value;
++ return user_regset_copyout(&pos, &count, &kbuf, &ubuf, tls, 0, -1);
++}
++
++static int tls_set(struct task_struct *target, const struct user_regset *regset,
++ unsigned int pos, unsigned int count,
++ const void *kbuf, const void __user *ubuf)
++{
++ int ret;
++ unsigned long tls;
++
++ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
++ if (ret)
++ return ret;
++
++ target->thread.tp_value = tls;
++ return ret;
++}
++
++enum aarch64_regset {
++ REGSET_GPR,
++ REGSET_FPR,
++ REGSET_TLS,
++#ifdef CONFIG_HAVE_HW_BREAKPOINT
++ REGSET_HW_BREAK,
++ REGSET_HW_WATCH,
++#endif
++};
++
++static const struct user_regset aarch64_regsets[] = {
++ [REGSET_GPR] = {
++ .core_note_type = NT_PRSTATUS,
++ .n = sizeof(struct user_pt_regs) / sizeof(u64),
++ .size = sizeof(u64),
++ .align = sizeof(u64),
++ .get = gpr_get,
++ .set = gpr_set
++ },
++ [REGSET_FPR] = {
++ .core_note_type = NT_PRFPREG,
++ .n = sizeof(struct user_fpsimd_state) / sizeof(u32),
++ /*
++ * We pretend we have 32-bit registers because the fpsr and
++ * fpcr are 32-bits wide.
++ */
++ .size = sizeof(u32),
++ .align = sizeof(u32),
++ .get = fpr_get,
++ .set = fpr_set
++ },
++ [REGSET_TLS] = {
++ .core_note_type = NT_ARM_TLS,
++ .n = 1,
++ .size = sizeof(void *),
++ .align = sizeof(void *),
++ .get = tls_get,
++ .set = tls_set,
++ },
++#ifdef CONFIG_HAVE_HW_BREAKPOINT
++ [REGSET_HW_BREAK] = {
++ .core_note_type = NT_ARM_HW_BREAK,
++ .n = sizeof(struct user_hwdebug_state) / sizeof(u32),
++ .size = sizeof(u32),
++ .align = sizeof(u32),
++ .get = hw_break_get,
++ .set = hw_break_set,
++ },
++ [REGSET_HW_WATCH] = {
++ .core_note_type = NT_ARM_HW_WATCH,
++ .n = sizeof(struct user_hwdebug_state) / sizeof(u32),
++ .size = sizeof(u32),
++ .align = sizeof(u32),
++ .get = hw_break_get,
++ .set = hw_break_set,
++ },
++#endif
++};
++
++static const struct user_regset_view user_aarch64_view = {
++ .name = "aarch64", .e_machine = EM_AARCH64,
++ .regsets = aarch64_regsets, .n = ARRAY_SIZE(aarch64_regsets)
++};
++
++#ifdef CONFIG_COMPAT
++#include <linux/compat.h>
++
++enum compat_regset {
++ REGSET_COMPAT_GPR,
++ REGSET_COMPAT_VFP,
++};
++
++static int compat_gpr_get(struct task_struct *target,
++ const struct user_regset *regset,
++ unsigned int pos, unsigned int count,
++ void *kbuf, void __user *ubuf)
++{
++ int ret = 0;
++ unsigned int i, start, num_regs;
++
++ /* Calculate the number of AArch32 registers contained in count */
++ num_regs = count / regset->size;
++
++ /* Convert pos into an register number */
++ start = pos / regset->size;
++
++ if (start + num_regs > regset->n)
++ return -EIO;
++
++ for (i = 0; i < num_regs; ++i) {
++ unsigned int idx = start + i;
++ compat_ulong_t reg;
++
++ switch (idx) {
++ case 15:
++ reg = task_pt_regs(target)->pc;
++ break;
++ case 16:
++ reg = task_pt_regs(target)->pstate;
++ break;
++ case 17:
++ reg = task_pt_regs(target)->orig_x0;
++ break;
++ default:
++ reg = task_pt_regs(target)->regs[idx];
++ }
++
++ if (kbuf) {
++ memcpy(kbuf, &reg, sizeof(reg));
++ kbuf += sizeof(reg);
++ } else {
++ ret = copy_to_user(ubuf, &reg, sizeof(reg));
++ if (ret)
++ break;
++
++ ubuf += sizeof(reg);
++ }
++ }
++
++ return ret;
++}
++
++static int compat_gpr_set(struct task_struct *target,
++ const struct user_regset *regset,
++ unsigned int pos, unsigned int count,
++ const void *kbuf, const void __user *ubuf)
++{
++ struct pt_regs newregs;
++ int ret = 0;
++ unsigned int i, start, num_regs;
++
++ /* Calculate the number of AArch32 registers contained in count */
++ num_regs = count / regset->size;
++
++ /* Convert pos into an register number */
++ start = pos / regset->size;
++
++ if (start + num_regs > regset->n)
++ return -EIO;
++
++ newregs = *task_pt_regs(target);
++
++ for (i = 0; i < num_regs; ++i) {
++ unsigned int idx = start + i;
++ compat_ulong_t reg;
++
++ if (kbuf) {
++ memcpy(&reg, kbuf, sizeof(reg));
++ kbuf += sizeof(reg);
++ } else {
++ ret = copy_from_user(&reg, ubuf, sizeof(reg));
++ if (ret)
++ return ret;
++
++ ubuf += sizeof(reg);
++ }
++
++ switch (idx) {
++ case 15:
++ newregs.pc = reg;
++ break;
++ case 16:
++ newregs.pstate = reg;
++ break;
++ case 17:
++ newregs.orig_x0 = reg;
++ break;
++ default:
++ newregs.regs[idx] = reg;
++ }
++
++ }
++
++ if (valid_user_regs(&newregs.user_regs))
++ *task_pt_regs(target) = newregs;
++ else
++ ret = -EINVAL;
++
++ return ret;
++}
++
++static int compat_vfp_get(struct task_struct *target,
++ const struct user_regset *regset,
++ unsigned int pos, unsigned int count,
++ void *kbuf, void __user *ubuf)
++{
++ struct user_fpsimd_state *uregs;
++ compat_ulong_t fpscr;
++ int ret;
++
++ uregs = &target->thread.fpsimd_state.user_fpsimd;
++
++ /*
++ * The VFP registers are packed into the fpsimd_state, so they all sit
++ * nicely together for us. We just need to create the fpscr separately.
++ */
++ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0,
++ VFP_STATE_SIZE - sizeof(compat_ulong_t));
++
++ if (count && !ret) {
++ fpscr = (uregs->fpsr & VFP_FPSCR_STAT_MASK) |
++ (uregs->fpcr & VFP_FPSCR_CTRL_MASK);
++ ret = put_user(fpscr, (compat_ulong_t *)ubuf);
++ }
++
++ return ret;
++}
++
++static int compat_vfp_set(struct task_struct *target,
++ const struct user_regset *regset,
++ unsigned int pos, unsigned int count,
++ const void *kbuf, const void __user *ubuf)
++{
++ struct user_fpsimd_state *uregs;
++ compat_ulong_t fpscr;
++ int ret;
++
++ if (pos + count > VFP_STATE_SIZE)
++ return -EIO;
++
++ uregs = &target->thread.fpsimd_state.user_fpsimd;
++
++ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0,
++ VFP_STATE_SIZE - sizeof(compat_ulong_t));
++
++ if (count && !ret) {
++ ret = get_user(fpscr, (compat_ulong_t *)ubuf);
++ uregs->fpsr = fpscr & VFP_FPSCR_STAT_MASK;
++ uregs->fpcr = fpscr & VFP_FPSCR_CTRL_MASK;
++ }
++
++ return ret;
++}
++
++static const struct user_regset aarch32_regsets[] = {
++ [REGSET_COMPAT_GPR] = {
++ .core_note_type = NT_PRSTATUS,
++ .n = COMPAT_ELF_NGREG,
++ .size = sizeof(compat_elf_greg_t),
++ .align = sizeof(compat_elf_greg_t),
++ .get = compat_gpr_get,
++ .set = compat_gpr_set
++ },
++ [REGSET_COMPAT_VFP] = {
++ .core_note_type = NT_ARM_VFP,
++ .n = VFP_STATE_SIZE / sizeof(compat_ulong_t),
++ .size = sizeof(compat_ulong_t),
++ .align = sizeof(compat_ulong_t),
++ .get = compat_vfp_get,
++ .set = compat_vfp_set
++ },
++};
++
++static const struct user_regset_view user_aarch32_view = {
++ .name = "aarch32", .e_machine = EM_ARM,
++ .regsets = aarch32_regsets, .n = ARRAY_SIZE(aarch32_regsets)
++};
++
++static int compat_ptrace_read_user(struct task_struct *tsk, compat_ulong_t off,
++ compat_ulong_t __user *ret)
++{
++ compat_ulong_t tmp;
++
++ if (off & 3)
++ return -EIO;
++
++ if (off == COMPAT_PT_TEXT_ADDR)
++ tmp = tsk->mm->start_code;
++ else if (off == COMPAT_PT_DATA_ADDR)
++ tmp = tsk->mm->start_data;
++ else if (off == COMPAT_PT_TEXT_END_ADDR)
++ tmp = tsk->mm->end_code;
++ else if (off < sizeof(compat_elf_gregset_t))
++ return copy_regset_to_user(tsk, &user_aarch32_view,
++ REGSET_COMPAT_GPR, off,
++ sizeof(compat_ulong_t), ret);
++ else if (off >= COMPAT_USER_SZ)
++ return -EIO;
++ else
++ tmp = 0;
++
++ return put_user(tmp, ret);
++}
++
++static int compat_ptrace_write_user(struct task_struct *tsk, compat_ulong_t off,
++ compat_ulong_t val)
++{
++ int ret;
++ mm_segment_t old_fs = get_fs();
++
++ if (off & 3 || off >= COMPAT_USER_SZ)
++ return -EIO;
++
++ if (off >= sizeof(compat_elf_gregset_t))
++ return 0;
++
++ set_fs(KERNEL_DS);
++ ret = copy_regset_from_user(tsk, &user_aarch32_view,
++ REGSET_COMPAT_GPR, off,
++ sizeof(compat_ulong_t),
++ &val);
++ set_fs(old_fs);
++
++ return ret;
++}
++
++#ifdef CONFIG_HAVE_HW_BREAKPOINT
++
++/*
++ * Convert a virtual register number into an index for a thread_info
++ * breakpoint array. Breakpoints are identified using positive numbers
++ * whilst watchpoints are negative. The registers are laid out as pairs
++ * of (address, control), each pair mapping to a unique hw_breakpoint struct.
++ * Register 0 is reserved for describing resource information.
++ */
++static int compat_ptrace_hbp_num_to_idx(compat_long_t num)
++{
++ return (abs(num) - 1) >> 1;
++}
++
++static int compat_ptrace_hbp_get_resource_info(u32 *kdata)
++{
++ u8 num_brps, num_wrps, debug_arch, wp_len;
++ u32 reg = 0;
++
++ num_brps = hw_breakpoint_slots(TYPE_INST);
++ num_wrps = hw_breakpoint_slots(TYPE_DATA);
++
++ debug_arch = debug_monitors_arch();
++ wp_len = 8;
++ reg |= debug_arch;
++ reg <<= 8;
++ reg |= wp_len;
++ reg <<= 8;
++ reg |= num_wrps;
++ reg <<= 8;
++ reg |= num_brps;
++
++ *kdata = reg;
++ return 0;
++}
++
++static int compat_ptrace_hbp_get(unsigned int note_type,
++ struct task_struct *tsk,
++ compat_long_t num,
++ u32 *kdata)
++{
++ u64 addr = 0;
++ u32 ctrl = 0;
++
++ int err, idx = compat_ptrace_hbp_num_to_idx(num);;
++
++ if (num & 1) {
++ err = ptrace_hbp_get_addr(note_type, tsk, idx, &addr);
++ *kdata = (u32)addr;
++ } else {
++ err = ptrace_hbp_get_ctrl(note_type, tsk, idx, &ctrl);
++ *kdata = ctrl;
++ }
++
++ return err;
++}
++
++static int compat_ptrace_hbp_set(unsigned int note_type,
++ struct task_struct *tsk,
++ compat_long_t num,
++ u32 *kdata)
++{
++ u64 addr;
++ u32 ctrl;
++
++ int err, idx = compat_ptrace_hbp_num_to_idx(num);
++
++ if (num & 1) {
++ addr = *kdata;
++ err = ptrace_hbp_set_addr(note_type, tsk, idx, addr);
++ } else {
++ ctrl = *kdata;
++ err = ptrace_hbp_set_ctrl(note_type, tsk, idx, ctrl);
++ }
++
++ return err;
++}
++
++static int compat_ptrace_gethbpregs(struct task_struct *tsk, compat_long_t num,
++ compat_ulong_t __user *data)
++{
++ int ret;
++ u32 kdata;
++ mm_segment_t old_fs = get_fs();
++
++ set_fs(KERNEL_DS);
++ /* Watchpoint */
++ if (num < 0) {
++ ret = compat_ptrace_hbp_get(NT_ARM_HW_WATCH, tsk, num, &kdata);
++ /* Resource info */
++ } else if (num == 0) {
++ ret = compat_ptrace_hbp_get_resource_info(&kdata);
++ /* Breakpoint */
++ } else {
++ ret = compat_ptrace_hbp_get(NT_ARM_HW_BREAK, tsk, num, &kdata);
++ }
++ set_fs(old_fs);
++
++ if (!ret)
++ ret = put_user(kdata, data);
++
++ return ret;
++}
++
++static int compat_ptrace_sethbpregs(struct task_struct *tsk, compat_long_t num,
++ compat_ulong_t __user *data)
++{
++ int ret;
++ u32 kdata = 0;
++ mm_segment_t old_fs = get_fs();
++
++ if (num == 0)
++ return 0;
++
++ ret = get_user(kdata, data);
++ if (ret)
++ return ret;
++
++ set_fs(KERNEL_DS);
++ if (num < 0)
++ ret = compat_ptrace_hbp_set(NT_ARM_HW_WATCH, tsk, num, &kdata);
++ else
++ ret = compat_ptrace_hbp_set(NT_ARM_HW_BREAK, tsk, num, &kdata);
++ set_fs(old_fs);
++
++ return ret;
++}
++#endif /* CONFIG_HAVE_HW_BREAKPOINT */
++
++long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
++ compat_ulong_t caddr, compat_ulong_t cdata)
++{
++ unsigned long addr = caddr;
++ unsigned long data = cdata;
++ void __user *datap = compat_ptr(data);
++ int ret;
++
++ switch (request) {
++ case PTRACE_PEEKUSR:
++ ret = compat_ptrace_read_user(child, addr, datap);
++ break;
++
++ case PTRACE_POKEUSR:
++ ret = compat_ptrace_write_user(child, addr, data);
++ break;
++
++ case COMPAT_PTRACE_GETREGS:
++ ret = copy_regset_to_user(child,
++ &user_aarch32_view,
++ REGSET_COMPAT_GPR,
++ 0, sizeof(compat_elf_gregset_t),
++ datap);
++ break;
++
++ case COMPAT_PTRACE_SETREGS:
++ ret = copy_regset_from_user(child,
++ &user_aarch32_view,
++ REGSET_COMPAT_GPR,
++ 0, sizeof(compat_elf_gregset_t),
++ datap);
++ break;
++
++ case COMPAT_PTRACE_GET_THREAD_AREA:
++ ret = put_user((compat_ulong_t)child->thread.tp_value,
++ (compat_ulong_t __user *)datap);
++ break;
++
++ case COMPAT_PTRACE_SET_SYSCALL:
++ task_pt_regs(child)->syscallno = data;
++ ret = 0;
++ break;
++
++ case COMPAT_PTRACE_GETVFPREGS:
++ ret = copy_regset_to_user(child,
++ &user_aarch32_view,
++ REGSET_COMPAT_VFP,
++ 0, VFP_STATE_SIZE,
++ datap);
++ break;
++
++ case COMPAT_PTRACE_SETVFPREGS:
++ ret = copy_regset_from_user(child,
++ &user_aarch32_view,
++ REGSET_COMPAT_VFP,
++ 0, VFP_STATE_SIZE,
++ datap);
++ break;
++
++#ifdef CONFIG_HAVE_HW_BREAKPOINT
++ case COMPAT_PTRACE_GETHBPREGS:
++ ret = compat_ptrace_gethbpregs(child, addr, datap);
++ break;
++
++ case COMPAT_PTRACE_SETHBPREGS:
++ ret = compat_ptrace_sethbpregs(child, addr, datap);
++ break;
++#endif
++
++ default:
++ ret = compat_ptrace_request(child, request, addr,
++ data);
++ break;
++ }
++
++ return ret;
++}
++#endif /* CONFIG_COMPAT */
++
++const struct user_regset_view *task_user_regset_view(struct task_struct *task)
++{
++#ifdef CONFIG_COMPAT
++ if (is_compat_thread(task_thread_info(task)))
++ return &user_aarch32_view;
++#endif
++ return &user_aarch64_view;
++}
++
++long arch_ptrace(struct task_struct *child, long request,
++ unsigned long addr, unsigned long data)
++{
++ return ptrace_request(child, request, addr, data);
++}
++
++enum ptrace_syscall_dir {
++ PTRACE_SYSCALL_ENTER = 0,
++ PTRACE_SYSCALL_EXIT,
++};
++
++static void tracehook_report_syscall(struct pt_regs *regs,
++ enum ptrace_syscall_dir dir)
++{
++ int regno;
++ unsigned long saved_reg;
++
++ /*
++ * A scratch register (ip(r12) on AArch32, x7 on AArch64) is
++ * used to denote syscall entry/exit:
++ */
++ regno = (is_compat_task() ? 12 : 7);
++ saved_reg = regs->regs[regno];
++ regs->regs[regno] = dir;
++
++ if (dir == PTRACE_SYSCALL_EXIT)
++ tracehook_report_syscall_exit(regs, 0);
++ else if (tracehook_report_syscall_entry(regs))
++ regs->syscallno = ~0UL;
++
++ regs->regs[regno] = saved_reg;
++}
++
++asmlinkage int syscall_trace_enter(struct pt_regs *regs)
++{
++ if (test_thread_flag(TIF_SYSCALL_TRACE))
++ tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER);
++
++ if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
++ trace_sys_enter(regs, regs->syscallno);
++
++ return regs->syscallno;
++}
++
++asmlinkage void syscall_trace_exit(struct pt_regs *regs)
++{
++ if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
++ trace_sys_exit(regs, regs_return_value(regs));
++
++ if (test_thread_flag(TIF_SYSCALL_TRACE))
++ tracehook_report_syscall(regs, PTRACE_SYSCALL_EXIT);
++}
+diff -Nur linux-3.14.36/arch/arm64/kernel/return_address.c linux-openelec/arch/arm64/kernel/return_address.c
+--- linux-3.14.36/arch/arm64/kernel/return_address.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/arch/arm64/kernel/return_address.c 2015-05-06 12:05:43.000000000 -0500
+@@ -0,0 +1,55 @@
++/*
++ * arch/arm64/kernel/return_address.c
++ *
++ * Copyright (C) 2013 Linaro Limited
++ * Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include <linux/export.h>
++#include <linux/ftrace.h>
++
++#include <asm/stacktrace.h>
++
++struct return_address_data {
++ unsigned int level;
++ void *addr;
++};
++
++static int save_return_addr(struct stackframe *frame, void *d)
++{
++ struct return_address_data *data = d;
++
++ if (!data->level) {
++ data->addr = (void *)frame->pc;
++ return 1;
++ } else {
++ --data->level;
++ return 0;
++ }
++}
++
++void *return_address(unsigned int level)
++{
++ struct return_address_data data;
++ struct stackframe frame;
++ register unsigned long current_sp asm ("sp");
++
++ data.level = level + 2;
++ data.addr = NULL;
++
++ frame.fp = (unsigned long)__builtin_frame_address(0);
++ frame.sp = current_sp;
++ frame.pc = (unsigned long)return_address; /* dummy */
++
++ walk_stackframe(&frame, save_return_addr, &data);
++
++ if (!data.level)
++ return data.addr;
++ else
++ return NULL;
++}
++EXPORT_SYMBOL_GPL(return_address);
+diff -Nur linux-3.14.36/arch/arm64/kernel/setup.c linux-openelec/arch/arm64/kernel/setup.c
+--- linux-3.14.36/arch/arm64/kernel/setup.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm64/kernel/setup.c 2015-07-24 18:03:29.644842002 -0500
+@@ -71,6 +71,7 @@
+ COMPAT_HWCAP_NEON|COMPAT_HWCAP_IDIV|\
+ COMPAT_HWCAP_LPAE)
+ unsigned int compat_elf_hwcap __read_mostly = COMPAT_ELF_HWCAP_DEFAULT;
++unsigned int compat_elf_hwcap2 __read_mostly;
+ #endif
+
+ static const char *cpu_name;
+@@ -258,6 +259,38 @@
+ block = (features >> 16) & 0xf;
+ if (block && !(block & 0x8))
+ elf_hwcap |= HWCAP_CRC32;
++
++#ifdef CONFIG_COMPAT
++ /*
++ * ID_ISAR5_EL1 carries similar information as above, but pertaining to
++ * the Aarch32 32-bit execution state.
++ */
++ features = read_cpuid(ID_ISAR5_EL1);
++ block = (features >> 4) & 0xf;
++ if (!(block & 0x8)) {
++ switch (block) {
++ default:
++ case 2:
++ compat_elf_hwcap2 |= COMPAT_HWCAP2_PMULL;
++ case 1:
++ compat_elf_hwcap2 |= COMPAT_HWCAP2_AES;
++ case 0:
++ break;
++ }
++ }
++
++ block = (features >> 8) & 0xf;
++ if (block && !(block & 0x8))
++ compat_elf_hwcap2 |= COMPAT_HWCAP2_SHA1;
++
++ block = (features >> 12) & 0xf;
++ if (block && !(block & 0x8))
++ compat_elf_hwcap2 |= COMPAT_HWCAP2_SHA2;
++
++ block = (features >> 16) & 0xf;
++ if (block && !(block & 0x8))
++ compat_elf_hwcap2 |= COMPAT_HWCAP2_CRC32;
++#endif
+ }
+
+ static void __init setup_machine_fdt(phys_addr_t dt_phys)
+@@ -374,7 +407,7 @@
+ of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
+ return 0;
+ }
+-arch_initcall(arm64_device_init);
++arch_initcall_sync(arm64_device_init);
+
+ static int __init topology_init(void)
+ {
+diff -Nur linux-3.14.36/arch/arm64/kernel/setup.c.orig linux-openelec/arch/arm64/kernel/setup.c.orig
+--- linux-3.14.36/arch/arm64/kernel/setup.c.orig 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/arch/arm64/kernel/setup.c.orig 2015-07-24 18:03:29.256842002 -0500
+@@ -0,0 +1,485 @@
++/*
++ * Based on arch/arm/kernel/setup.c
++ *
++ * Copyright (C) 1995-2001 Russell King
++ * Copyright (C) 2012 ARM Ltd.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program. If not, see <http://www.gnu.org/licenses/>.
++ */
++
++#include <linux/export.h>
++#include <linux/kernel.h>
++#include <linux/stddef.h>
++#include <linux/ioport.h>
++#include <linux/delay.h>
++#include <linux/utsname.h>
++#include <linux/initrd.h>
++#include <linux/console.h>
++#include <linux/bootmem.h>
++#include <linux/seq_file.h>
++#include <linux/screen_info.h>
++#include <linux/init.h>
++#include <linux/kexec.h>
++#include <linux/crash_dump.h>
++#include <linux/root_dev.h>
++#include <linux/clk-provider.h>
++#include <linux/cpu.h>
++#include <linux/interrupt.h>
++#include <linux/smp.h>
++#include <linux/fs.h>
++#include <linux/proc_fs.h>
++#include <linux/memblock.h>
++#include <linux/of_fdt.h>
++#include <linux/of_platform.h>
++
++#include <asm/cputype.h>
++#include <asm/elf.h>
++#include <asm/cputable.h>
++#include <asm/cpu_ops.h>
++#include <asm/sections.h>
++#include <asm/setup.h>
++#include <asm/smp_plat.h>
++#include <asm/cacheflush.h>
++#include <asm/tlbflush.h>
++#include <asm/traps.h>
++#include <asm/memblock.h>
++#include <asm/psci.h>
++
++unsigned int processor_id;
++EXPORT_SYMBOL(processor_id);
++
++unsigned long elf_hwcap __read_mostly;
++EXPORT_SYMBOL_GPL(elf_hwcap);
++
++#ifdef CONFIG_COMPAT
++#define COMPAT_ELF_HWCAP_DEFAULT \
++ (COMPAT_HWCAP_HALF|COMPAT_HWCAP_THUMB|\
++ COMPAT_HWCAP_FAST_MULT|COMPAT_HWCAP_EDSP|\
++ COMPAT_HWCAP_TLS|COMPAT_HWCAP_VFP|\
++ COMPAT_HWCAP_VFPv3|COMPAT_HWCAP_VFPv4|\
++ COMPAT_HWCAP_NEON|COMPAT_HWCAP_IDIV|\
++ COMPAT_HWCAP_LPAE)
++unsigned int compat_elf_hwcap __read_mostly = COMPAT_ELF_HWCAP_DEFAULT;
++unsigned int compat_elf_hwcap2 __read_mostly;
++#endif
++
++static const char *cpu_name;
++static const char *machine_name;
++phys_addr_t __fdt_pointer __initdata;
++
++/*
++ * Standard memory resources
++ */
++static struct resource mem_res[] = {
++ {
++ .name = "Kernel code",
++ .start = 0,
++ .end = 0,
++ .flags = IORESOURCE_MEM
++ },
++ {
++ .name = "Kernel data",
++ .start = 0,
++ .end = 0,
++ .flags = IORESOURCE_MEM
++ }
++};
++
++#define kernel_code mem_res[0]
++#define kernel_data mem_res[1]
++
++void __init early_print(const char *str, ...)
++{
++ char buf[256];
++ va_list ap;
++
++ va_start(ap, str);
++ vsnprintf(buf, sizeof(buf), str, ap);
++ va_end(ap);
++
++ printk("%s", buf);
++}
++
++void __init smp_setup_processor_id(void)
++{
++ /*
++ * clear __my_cpu_offset on boot CPU to avoid hang caused by
++ * using percpu variable early, for example, lockdep will
++ * access percpu variable inside lock_release
++ */
++ set_my_cpu_offset(0);
++}
++
++bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
++{
++ return phys_id == cpu_logical_map(cpu);
++}
++
++struct mpidr_hash mpidr_hash;
++#ifdef CONFIG_SMP
++/**
++ * smp_build_mpidr_hash - Pre-compute shifts required at each affinity
++ * level in order to build a linear index from an
++ * MPIDR value. Resulting algorithm is a collision
++ * free hash carried out through shifting and ORing
++ */
++static void __init smp_build_mpidr_hash(void)
++{
++ u32 i, affinity, fs[4], bits[4], ls;
++ u64 mask = 0;
++ /*
++ * Pre-scan the list of MPIDRS and filter out bits that do
++ * not contribute to affinity levels, ie they never toggle.
++ */
++ for_each_possible_cpu(i)
++ mask |= (cpu_logical_map(i) ^ cpu_logical_map(0));
++ pr_debug("mask of set bits %#llx\n", mask);
++ /*
++ * Find and stash the last and first bit set at all affinity levels to
++ * check how many bits are required to represent them.
++ */
++ for (i = 0; i < 4; i++) {
++ affinity = MPIDR_AFFINITY_LEVEL(mask, i);
++ /*
++ * Find the MSB bit and LSB bits position
++ * to determine how many bits are required
++ * to express the affinity level.
++ */
++ ls = fls(affinity);
++ fs[i] = affinity ? ffs(affinity) - 1 : 0;
++ bits[i] = ls - fs[i];
++ }
++ /*
++ * An index can be created from the MPIDR_EL1 by isolating the
++ * significant bits at each affinity level and by shifting
++ * them in order to compress the 32 bits values space to a
++ * compressed set of values. This is equivalent to hashing
++ * the MPIDR_EL1 through shifting and ORing. It is a collision free
++ * hash though not minimal since some levels might contain a number
++ * of CPUs that is not an exact power of 2 and their bit
++ * representation might contain holes, eg MPIDR_EL1[7:0] = {0x2, 0x80}.
++ */
++ mpidr_hash.shift_aff[0] = MPIDR_LEVEL_SHIFT(0) + fs[0];
++ mpidr_hash.shift_aff[1] = MPIDR_LEVEL_SHIFT(1) + fs[1] - bits[0];
++ mpidr_hash.shift_aff[2] = MPIDR_LEVEL_SHIFT(2) + fs[2] -
++ (bits[1] + bits[0]);
++ mpidr_hash.shift_aff[3] = MPIDR_LEVEL_SHIFT(3) +
++ fs[3] - (bits[2] + bits[1] + bits[0]);
++ mpidr_hash.mask = mask;
++ mpidr_hash.bits = bits[3] + bits[2] + bits[1] + bits[0];
++ pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] aff3[%u] mask[%#llx] bits[%u]\n",
++ mpidr_hash.shift_aff[0],
++ mpidr_hash.shift_aff[1],
++ mpidr_hash.shift_aff[2],
++ mpidr_hash.shift_aff[3],
++ mpidr_hash.mask,
++ mpidr_hash.bits);
++ /*
++ * 4x is an arbitrary value used to warn on a hash table much bigger
++ * than expected on most systems.
++ */
++ if (mpidr_hash_size() > 4 * num_possible_cpus())
++ pr_warn("Large number of MPIDR hash buckets detected\n");
++ __flush_dcache_area(&mpidr_hash, sizeof(struct mpidr_hash));
++}
++#endif
++
++static void __init setup_processor(void)
++{
++ struct cpu_info *cpu_info;
++ u64 features, block;
++
++ cpu_info = lookup_processor_type(read_cpuid_id());
++ if (!cpu_info) {
++ printk("CPU configuration botched (ID %08x), unable to continue.\n",
++ read_cpuid_id());
++ while (1);
++ }
++
++ cpu_name = cpu_info->cpu_name;
++
++ printk("CPU: %s [%08x] revision %d\n",
++ cpu_name, read_cpuid_id(), read_cpuid_id() & 15);
++
++ sprintf(init_utsname()->machine, ELF_PLATFORM);
++ elf_hwcap = 0;
++
++ /*
++ * ID_AA64ISAR0_EL1 contains 4-bit wide signed feature blocks.
++ * The blocks we test below represent incremental functionality
++ * for non-negative values. Negative values are reserved.
++ */
++ features = read_cpuid(ID_AA64ISAR0_EL1);
++ block = (features >> 4) & 0xf;
++ if (!(block & 0x8)) {
++ switch (block) {
++ default:
++ case 2:
++ elf_hwcap |= HWCAP_PMULL;
++ case 1:
++ elf_hwcap |= HWCAP_AES;
++ case 0:
++ break;
++ }
++ }
++
++ block = (features >> 8) & 0xf;
++ if (block && !(block & 0x8))
++ elf_hwcap |= HWCAP_SHA1;
++
++ block = (features >> 12) & 0xf;
++ if (block && !(block & 0x8))
++ elf_hwcap |= HWCAP_SHA2;
++
++ block = (features >> 16) & 0xf;
++ if (block && !(block & 0x8))
++ elf_hwcap |= HWCAP_CRC32;
++
++#ifdef CONFIG_COMPAT
++ /*
++ * ID_ISAR5_EL1 carries similar information as above, but pertaining to
++ * the Aarch32 32-bit execution state.
++ */
++ features = read_cpuid(ID_ISAR5_EL1);
++ block = (features >> 4) & 0xf;
++ if (!(block & 0x8)) {
++ switch (block) {
++ default:
++ case 2:
++ compat_elf_hwcap2 |= COMPAT_HWCAP2_PMULL;
++ case 1:
++ compat_elf_hwcap2 |= COMPAT_HWCAP2_AES;
++ case 0:
++ break;
++ }
++ }
++
++ block = (features >> 8) & 0xf;
++ if (block && !(block & 0x8))
++ compat_elf_hwcap2 |= COMPAT_HWCAP2_SHA1;
++
++ block = (features >> 12) & 0xf;
++ if (block && !(block & 0x8))
++ compat_elf_hwcap2 |= COMPAT_HWCAP2_SHA2;
++
++ block = (features >> 16) & 0xf;
++ if (block && !(block & 0x8))
++ compat_elf_hwcap2 |= COMPAT_HWCAP2_CRC32;
++#endif
++}
++
++static void __init setup_machine_fdt(phys_addr_t dt_phys)
++{
++ if (!dt_phys || !early_init_dt_scan(phys_to_virt(dt_phys))) {
++ early_print("\n"
++ "Error: invalid device tree blob at physical address 0x%p (virtual address 0x%p)\n"
++ "The dtb must be 8-byte aligned and passed in the first 512MB of memory\n"
++ "\nPlease check your bootloader.\n",
++ dt_phys, phys_to_virt(dt_phys));
++
++ while (true)
++ cpu_relax();
++ }
++
++ machine_name = of_flat_dt_get_machine_name();
++}
++
++/*
++ * Limit the memory size that was specified via FDT.
++ */
++static int __init early_mem(char *p)
++{
++ phys_addr_t limit;
++
++ if (!p)
++ return 1;
++
++ limit = memparse(p, &p) & PAGE_MASK;
++ pr_notice("Memory limited to %lldMB\n", limit >> 20);
++
++ memblock_enforce_memory_limit(limit);
++
++ return 0;
++}
++early_param("mem", early_mem);
++
++static void __init request_standard_resources(void)
++{
++ struct memblock_region *region;
++ struct resource *res;
++
++ kernel_code.start = virt_to_phys(_text);
++ kernel_code.end = virt_to_phys(_etext - 1);
++ kernel_data.start = virt_to_phys(_sdata);
++ kernel_data.end = virt_to_phys(_end - 1);
++
++ for_each_memblock(memory, region) {
++ res = alloc_bootmem_low(sizeof(*res));
++ res->name = "System RAM";
++ res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
++ res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
++ res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
++
++ request_resource(&iomem_resource, res);
++
++ if (kernel_code.start >= res->start &&
++ kernel_code.end <= res->end)
++ request_resource(res, &kernel_code);
++ if (kernel_data.start >= res->start &&
++ kernel_data.end <= res->end)
++ request_resource(res, &kernel_data);
++ }
++}
++
++u64 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = INVALID_HWID };
++
++void __init setup_arch(char **cmdline_p)
++{
++ /*
++ * Unmask asynchronous aborts early to catch possible system errors.
++ */
++ local_async_enable();
++
++ setup_processor();
++
++ setup_machine_fdt(__fdt_pointer);
++
++ init_mm.start_code = (unsigned long) _text;
++ init_mm.end_code = (unsigned long) _etext;
++ init_mm.end_data = (unsigned long) _edata;
++ init_mm.brk = (unsigned long) _end;
++
++ *cmdline_p = boot_command_line;
++
++ parse_early_param();
++
++ arm64_memblock_init();
++
++ paging_init();
++ request_standard_resources();
++
++ unflatten_device_tree();
++
++ psci_init();
++
++ cpu_logical_map(0) = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
++ cpu_read_bootcpu_ops();
++#ifdef CONFIG_SMP
++ smp_init_cpus();
++ smp_build_mpidr_hash();
++#endif
++
++#ifdef CONFIG_VT
++#if defined(CONFIG_VGA_CONSOLE)
++ conswitchp = &vga_con;
++#elif defined(CONFIG_DUMMY_CONSOLE)
++ conswitchp = &dummy_con;
++#endif
++#endif
++}
++
++static int __init arm64_device_init(void)
++{
++ of_clk_init(NULL);
++ of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
++ return 0;
++}
++arch_initcall_sync(arm64_device_init);
++
++static DEFINE_PER_CPU(struct cpu, cpu_data);
++
++static int __init topology_init(void)
++{
++ int i;
++
++ for_each_possible_cpu(i) {
++ struct cpu *cpu = &per_cpu(cpu_data, i);
++ cpu->hotpluggable = 1;
++ register_cpu(cpu, i);
++ }
++
++ return 0;
++}
++subsys_initcall(topology_init);
++
++static const char *hwcap_str[] = {
++ "fp",
++ "asimd",
++ "evtstrm",
++ "aes",
++ "pmull",
++ "sha1",
++ "sha2",
++ "crc32",
++ NULL
++};
++
++static int c_show(struct seq_file *m, void *v)
++{
++ int i;
++
++ seq_printf(m, "Processor\t: %s rev %d (%s)\n",
++ cpu_name, read_cpuid_id() & 15, ELF_PLATFORM);
++
++ for_each_online_cpu(i) {
++ /*
++ * glibc reads /proc/cpuinfo to determine the number of
++ * online processors, looking for lines beginning with
++ * "processor". Give glibc what it expects.
++ */
++#ifdef CONFIG_SMP
++ seq_printf(m, "processor\t: %d\n", i);
++#endif
++ }
++
++ /* dump out the processor features */
++ seq_puts(m, "Features\t: ");
++
++ for (i = 0; hwcap_str[i]; i++)
++ if (elf_hwcap & (1 << i))
++ seq_printf(m, "%s ", hwcap_str[i]);
++
++ seq_printf(m, "\nCPU implementer\t: 0x%02x\n", read_cpuid_id() >> 24);
++ seq_printf(m, "CPU architecture: AArch64\n");
++ seq_printf(m, "CPU variant\t: 0x%x\n", (read_cpuid_id() >> 20) & 15);
++ seq_printf(m, "CPU part\t: 0x%03x\n", (read_cpuid_id() >> 4) & 0xfff);
++ seq_printf(m, "CPU revision\t: %d\n", read_cpuid_id() & 15);
++
++ seq_puts(m, "\n");
++
++ seq_printf(m, "Hardware\t: %s\n", machine_name);
++
++ return 0;
++}
++
++static void *c_start(struct seq_file *m, loff_t *pos)
++{
++ return *pos < 1 ? (void *)1 : NULL;
++}
++
++static void *c_next(struct seq_file *m, void *v, loff_t *pos)
++{
++ ++*pos;
++ return NULL;
++}
++
++static void c_stop(struct seq_file *m, void *v)
++{
++}
++
++const struct seq_operations cpuinfo_op = {
++ .start = c_start,
++ .next = c_next,
++ .stop = c_stop,
++ .show = c_show
++};
+diff -Nur linux-3.14.36/arch/arm64/kernel/signal.c linux-openelec/arch/arm64/kernel/signal.c
+--- linux-3.14.36/arch/arm64/kernel/signal.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm64/kernel/signal.c 2015-05-06 12:05:43.000000000 -0500
+@@ -17,6 +17,7 @@
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
++#include <linux/compat.h>
+ #include <linux/errno.h>
+ #include <linux/signal.h>
+ #include <linux/personality.h>
+@@ -25,7 +26,6 @@
+ #include <linux/tracehook.h>
+ #include <linux/ratelimit.h>
+
+-#include <asm/compat.h>
+ #include <asm/debug-monitors.h>
+ #include <asm/elf.h>
+ #include <asm/cacheflush.h>
+diff -Nur linux-3.14.36/arch/arm64/kernel/smp.c linux-openelec/arch/arm64/kernel/smp.c
+--- linux-3.14.36/arch/arm64/kernel/smp.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm64/kernel/smp.c 2015-07-24 18:03:29.644842002 -0500
+@@ -114,6 +114,11 @@
+ return ret;
+ }
+
++static void smp_store_cpu_info(unsigned int cpuid)
++{
++ store_cpu_topology(cpuid);
++}
++
+ /*
+ * This is the secondary CPU boot entry. We're using this CPUs
+ * idle thread stack, but a set of temporary page tables.
+@@ -157,6 +162,8 @@
+ */
+ notify_cpu_starting(cpu);
+
++ smp_store_cpu_info(cpu);
++
+ /*
+ * OK, now it's safe to let the boot CPU continue. Wait for
+ * the CPU migration code to notice that the CPU is online
+@@ -395,6 +402,10 @@
+ int err;
+ unsigned int cpu, ncores = num_possible_cpus();
+
++ init_cpu_topology();
++
++ smp_store_cpu_info(smp_processor_id());
++
+ /*
+ * are we trying to boot more cores than exist?
+ */
+diff -Nur linux-3.14.36/arch/arm64/kernel/smp.c.orig linux-openelec/arch/arm64/kernel/smp.c.orig
+--- linux-3.14.36/arch/arm64/kernel/smp.c.orig 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/arch/arm64/kernel/smp.c.orig 2015-05-06 12:05:43.000000000 -0500
+@@ -0,0 +1,603 @@
++/*
++ * SMP initialisation and IPI support
++ * Based on arch/arm/kernel/smp.c
++ *
++ * Copyright (C) 2012 ARM Ltd.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program. If not, see <http://www.gnu.org/licenses/>.
++ */
++
++#include <linux/delay.h>
++#include <linux/init.h>
++#include <linux/spinlock.h>
++#include <linux/sched.h>
++#include <linux/interrupt.h>
++#include <linux/cache.h>
++#include <linux/profile.h>
++#include <linux/errno.h>
++#include <linux/mm.h>
++#include <linux/err.h>
++#include <linux/cpu.h>
++#include <linux/smp.h>
++#include <linux/seq_file.h>
++#include <linux/irq.h>
++#include <linux/percpu.h>
++#include <linux/clockchips.h>
++#include <linux/completion.h>
++#include <linux/of.h>
++
++#include <asm/atomic.h>
++#include <asm/cacheflush.h>
++#include <asm/cputype.h>
++#include <asm/cpu_ops.h>
++#include <asm/mmu_context.h>
++#include <asm/pgtable.h>
++#include <asm/pgalloc.h>
++#include <asm/processor.h>
++#include <asm/smp_plat.h>
++#include <asm/sections.h>
++#include <asm/tlbflush.h>
++#include <asm/ptrace.h>
++
++/*
++ * as from 2.5, kernels no longer have an init_tasks structure
++ * so we need some other way of telling a new secondary core
++ * where to place its SVC stack
++ */
++struct secondary_data secondary_data;
++
++enum ipi_msg_type {
++ IPI_RESCHEDULE,
++ IPI_CALL_FUNC,
++ IPI_CALL_FUNC_SINGLE,
++ IPI_CPU_STOP,
++ IPI_TIMER,
++};
++
++/*
++ * Boot a secondary CPU, and assign it the specified idle task.
++ * This also gives us the initial stack to use for this CPU.
++ */
++static int boot_secondary(unsigned int cpu, struct task_struct *idle)
++{
++ if (cpu_ops[cpu]->cpu_boot)
++ return cpu_ops[cpu]->cpu_boot(cpu);
++
++ return -EOPNOTSUPP;
++}
++
++static DECLARE_COMPLETION(cpu_running);
++
++int __cpu_up(unsigned int cpu, struct task_struct *idle)
++{
++ int ret;
++
++ /*
++ * We need to tell the secondary core where to find its stack and the
++ * page tables.
++ */
++ secondary_data.stack = task_stack_page(idle) + THREAD_START_SP;
++ __flush_dcache_area(&secondary_data, sizeof(secondary_data));
++
++ /*
++ * Now bring the CPU into our world.
++ */
++ ret = boot_secondary(cpu, idle);
++ if (ret == 0) {
++ /*
++ * CPU was successfully started, wait for it to come online or
++ * time out.
++ */
++ wait_for_completion_timeout(&cpu_running,
++ msecs_to_jiffies(1000));
++
++ if (!cpu_online(cpu)) {
++ pr_crit("CPU%u: failed to come online\n", cpu);
++ ret = -EIO;
++ }
++ } else {
++ pr_err("CPU%u: failed to boot: %d\n", cpu, ret);
++ }
++
++ secondary_data.stack = NULL;
++
++ return ret;
++}
++
++static void smp_store_cpu_info(unsigned int cpuid)
++{
++ store_cpu_topology(cpuid);
++}
++
++/*
++ * This is the secondary CPU boot entry. We're using this CPUs
++ * idle thread stack, but a set of temporary page tables.
++ */
++asmlinkage void secondary_start_kernel(void)
++{
++ struct mm_struct *mm = &init_mm;
++ unsigned int cpu = smp_processor_id();
++
++ /*
++ * All kernel threads share the same mm context; grab a
++ * reference and switch to it.
++ */
++ atomic_inc(&mm->mm_count);
++ current->active_mm = mm;
++ cpumask_set_cpu(cpu, mm_cpumask(mm));
++
++ set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
++ printk("CPU%u: Booted secondary processor\n", cpu);
++
++ /*
++ * TTBR0 is only used for the identity mapping at this stage. Make it
++ * point to zero page to avoid speculatively fetching new entries.
++ */
++ cpu_set_reserved_ttbr0();
++ flush_tlb_all();
++
++ preempt_disable();
++ trace_hardirqs_off();
++
++ if (cpu_ops[cpu]->cpu_postboot)
++ cpu_ops[cpu]->cpu_postboot();
++
++ /*
++ * Enable GIC and timers.
++ */
++ notify_cpu_starting(cpu);
++
++ smp_store_cpu_info(cpu);
++
++ /*
++ * OK, now it's safe to let the boot CPU continue. Wait for
++ * the CPU migration code to notice that the CPU is online
++ * before we continue.
++ */
++ set_cpu_online(cpu, true);
++ complete(&cpu_running);
++
++ local_irq_enable();
++ local_async_enable();
++
++ /*
++ * OK, it's off to the idle thread for us
++ */
++ cpu_startup_entry(CPUHP_ONLINE);
++}
++
++#ifdef CONFIG_HOTPLUG_CPU
++static int op_cpu_disable(unsigned int cpu)
++{
++ /*
++ * If we don't have a cpu_die method, abort before we reach the point
++ * of no return. CPU0 may not have an cpu_ops, so test for it.
++ */
++ if (!cpu_ops[cpu] || !cpu_ops[cpu]->cpu_die)
++ return -EOPNOTSUPP;
++
++ /*
++ * We may need to abort a hot unplug for some other mechanism-specific
++ * reason.
++ */
++ if (cpu_ops[cpu]->cpu_disable)
++ return cpu_ops[cpu]->cpu_disable(cpu);
++
++ return 0;
++}
++
++/*
++ * __cpu_disable runs on the processor to be shutdown.
++ */
++int __cpu_disable(void)
++{
++ unsigned int cpu = smp_processor_id();
++ int ret;
++
++ ret = op_cpu_disable(cpu);
++ if (ret)
++ return ret;
++
++ /*
++ * Take this CPU offline. Once we clear this, we can't return,
++ * and we must not schedule until we're ready to give up the cpu.
++ */
++ set_cpu_online(cpu, false);
++
++ /*
++ * OK - migrate IRQs away from this CPU
++ */
++ migrate_irqs();
++
++ /*
++ * Remove this CPU from the vm mask set of all processes.
++ */
++ clear_tasks_mm_cpumask(cpu);
++
++ return 0;
++}
++
++static DECLARE_COMPLETION(cpu_died);
++
++/*
++ * called on the thread which is asking for a CPU to be shutdown -
++ * waits until shutdown has completed, or it is timed out.
++ */
++void __cpu_die(unsigned int cpu)
++{
++ if (!wait_for_completion_timeout(&cpu_died, msecs_to_jiffies(5000))) {
++ pr_crit("CPU%u: cpu didn't die\n", cpu);
++ return;
++ }
++ pr_notice("CPU%u: shutdown\n", cpu);
++}
++
++/*
++ * Called from the idle thread for the CPU which has been shutdown.
++ *
++ * Note that we disable IRQs here, but do not re-enable them
++ * before returning to the caller. This is also the behaviour
++ * of the other hotplug-cpu capable cores, so presumably coming
++ * out of idle fixes this.
++ */
++void cpu_die(void)
++{
++ unsigned int cpu = smp_processor_id();
++
++ idle_task_exit();
++
++ local_irq_disable();
++
++ /* Tell __cpu_die() that this CPU is now safe to dispose of */
++ complete(&cpu_died);
++
++ /*
++ * Actually shutdown the CPU. This must never fail. The specific hotplug
++ * mechanism must perform all required cache maintenance to ensure that
++ * no dirty lines are lost in the process of shutting down the CPU.
++ */
++ cpu_ops[cpu]->cpu_die(cpu);
++
++ BUG();
++}
++#endif
++
++void __init smp_cpus_done(unsigned int max_cpus)
++{
++ pr_info("SMP: Total of %d processors activated.\n", num_online_cpus());
++}
++
++void __init smp_prepare_boot_cpu(void)
++{
++ set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
++}
++
++static void (*smp_cross_call)(const struct cpumask *, unsigned int);
++
++/*
++ * Enumerate the possible CPU set from the device tree and build the
++ * cpu logical map array containing MPIDR values related to logical
++ * cpus. Assumes that cpu_logical_map(0) has already been initialized.
++ */
++void __init smp_init_cpus(void)
++{
++ struct device_node *dn = NULL;
++ unsigned int i, cpu = 1;
++ bool bootcpu_valid = false;
++
++ while ((dn = of_find_node_by_type(dn, "cpu"))) {
++ const u32 *cell;
++ u64 hwid;
++
++ /*
++ * A cpu node with missing "reg" property is
++ * considered invalid to build a cpu_logical_map
++ * entry.
++ */
++ cell = of_get_property(dn, "reg", NULL);
++ if (!cell) {
++ pr_err("%s: missing reg property\n", dn->full_name);
++ goto next;
++ }
++ hwid = of_read_number(cell, of_n_addr_cells(dn));
++
++ /*
++ * Non affinity bits must be set to 0 in the DT
++ */
++ if (hwid & ~MPIDR_HWID_BITMASK) {
++ pr_err("%s: invalid reg property\n", dn->full_name);
++ goto next;
++ }
++
++ /*
++ * Duplicate MPIDRs are a recipe for disaster. Scan
++ * all initialized entries and check for
++ * duplicates. If any is found just ignore the cpu.
++ * cpu_logical_map was initialized to INVALID_HWID to
++ * avoid matching valid MPIDR values.
++ */
++ for (i = 1; (i < cpu) && (i < NR_CPUS); i++) {
++ if (cpu_logical_map(i) == hwid) {
++ pr_err("%s: duplicate cpu reg properties in the DT\n",
++ dn->full_name);
++ goto next;
++ }
++ }
++
++ /*
++ * The numbering scheme requires that the boot CPU
++ * must be assigned logical id 0. Record it so that
++ * the logical map built from DT is validated and can
++ * be used.
++ */
++ if (hwid == cpu_logical_map(0)) {
++ if (bootcpu_valid) {
++ pr_err("%s: duplicate boot cpu reg property in DT\n",
++ dn->full_name);
++ goto next;
++ }
++
++ bootcpu_valid = true;
++
++ /*
++ * cpu_logical_map has already been
++ * initialized and the boot cpu doesn't need
++ * the enable-method so continue without
++ * incrementing cpu.
++ */
++ continue;
++ }
++
++ if (cpu >= NR_CPUS)
++ goto next;
++
++ if (cpu_read_ops(dn, cpu) != 0)
++ goto next;
++
++ if (cpu_ops[cpu]->cpu_init(dn, cpu))
++ goto next;
++
++ pr_debug("cpu logical map 0x%llx\n", hwid);
++ cpu_logical_map(cpu) = hwid;
++next:
++ cpu++;
++ }
++
++ /* sanity check */
++ if (cpu > NR_CPUS)
++ pr_warning("no. of cores (%d) greater than configured maximum of %d - clipping\n",
++ cpu, NR_CPUS);
++
++ if (!bootcpu_valid) {
++ pr_err("DT missing boot CPU MPIDR, not enabling secondaries\n");
++ return;
++ }
++
++ /*
++ * All the cpus that made it to the cpu_logical_map have been
++ * validated so set them as possible cpus.
++ */
++ for (i = 0; i < NR_CPUS; i++)
++ if (cpu_logical_map(i) != INVALID_HWID)
++ set_cpu_possible(i, true);
++}
++
++void __init smp_prepare_cpus(unsigned int max_cpus)
++{
++ int err;
++ unsigned int cpu, ncores = num_possible_cpus();
++
++ init_cpu_topology();
++
++ smp_store_cpu_info(smp_processor_id());
++
++ /*
++ * are we trying to boot more cores than exist?
++ */
++ if (max_cpus > ncores)
++ max_cpus = ncores;
++
++ /* Don't bother if we're effectively UP */
++ if (max_cpus <= 1)
++ return;
++
++ /*
++ * Initialise the present map (which describes the set of CPUs
++ * actually populated at the present time) and release the
++ * secondaries from the bootloader.
++ *
++ * Make sure we online at most (max_cpus - 1) additional CPUs.
++ */
++ max_cpus--;
++ for_each_possible_cpu(cpu) {
++ if (max_cpus == 0)
++ break;
++
++ if (cpu == smp_processor_id())
++ continue;
++
++ if (!cpu_ops[cpu])
++ continue;
++
++ err = cpu_ops[cpu]->cpu_prepare(cpu);
++ if (err)
++ continue;
++
++ set_cpu_present(cpu, true);
++ max_cpus--;
++ }
++}
++
++
++void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
++{
++ smp_cross_call = fn;
++}
++
++void arch_send_call_function_ipi_mask(const struct cpumask *mask)
++{
++ smp_cross_call(mask, IPI_CALL_FUNC);
++}
++
++void arch_send_call_function_single_ipi(int cpu)
++{
++ smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
++}
++
++static const char *ipi_types[NR_IPI] = {
++#define S(x,s) [x - IPI_RESCHEDULE] = s
++ S(IPI_RESCHEDULE, "Rescheduling interrupts"),
++ S(IPI_CALL_FUNC, "Function call interrupts"),
++ S(IPI_CALL_FUNC_SINGLE, "Single function call interrupts"),
++ S(IPI_CPU_STOP, "CPU stop interrupts"),
++ S(IPI_TIMER, "Timer broadcast interrupts"),
++};
++
++void show_ipi_list(struct seq_file *p, int prec)
++{
++ unsigned int cpu, i;
++
++ for (i = 0; i < NR_IPI; i++) {
++ seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i + IPI_RESCHEDULE,
++ prec >= 4 ? " " : "");
++ for_each_online_cpu(cpu)
++ seq_printf(p, "%10u ",
++ __get_irq_stat(cpu, ipi_irqs[i]));
++ seq_printf(p, " %s\n", ipi_types[i]);
++ }
++}
++
++u64 smp_irq_stat_cpu(unsigned int cpu)
++{
++ u64 sum = 0;
++ int i;
++
++ for (i = 0; i < NR_IPI; i++)
++ sum += __get_irq_stat(cpu, ipi_irqs[i]);
++
++ return sum;
++}
++
++static DEFINE_RAW_SPINLOCK(stop_lock);
++
++/*
++ * ipi_cpu_stop - handle IPI from smp_send_stop()
++ */
++static void ipi_cpu_stop(unsigned int cpu)
++{
++ if (system_state == SYSTEM_BOOTING ||
++ system_state == SYSTEM_RUNNING) {
++ raw_spin_lock(&stop_lock);
++ pr_crit("CPU%u: stopping\n", cpu);
++ dump_stack();
++ raw_spin_unlock(&stop_lock);
++ }
++
++ set_cpu_online(cpu, false);
++
++ local_irq_disable();
++
++ while (1)
++ cpu_relax();
++}
++
++/*
++ * Main handler for inter-processor interrupts
++ */
++void handle_IPI(int ipinr, struct pt_regs *regs)
++{
++ unsigned int cpu = smp_processor_id();
++ struct pt_regs *old_regs = set_irq_regs(regs);
++
++ if (ipinr >= IPI_RESCHEDULE && ipinr < IPI_RESCHEDULE + NR_IPI)
++ __inc_irq_stat(cpu, ipi_irqs[ipinr - IPI_RESCHEDULE]);
++
++ switch (ipinr) {
++ case IPI_RESCHEDULE:
++ scheduler_ipi();
++ break;
++
++ case IPI_CALL_FUNC:
++ irq_enter();
++ generic_smp_call_function_interrupt();
++ irq_exit();
++ break;
++
++ case IPI_CALL_FUNC_SINGLE:
++ irq_enter();
++ generic_smp_call_function_single_interrupt();
++ irq_exit();
++ break;
++
++ case IPI_CPU_STOP:
++ irq_enter();
++ ipi_cpu_stop(cpu);
++ irq_exit();
++ break;
++
++#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
++ case IPI_TIMER:
++ irq_enter();
++ tick_receive_broadcast();
++ irq_exit();
++ break;
++#endif
++
++ default:
++ pr_crit("CPU%u: Unknown IPI message 0x%x\n", cpu, ipinr);
++ break;
++ }
++ set_irq_regs(old_regs);
++}
++
++void smp_send_reschedule(int cpu)
++{
++ smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
++}
++
++#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
++void tick_broadcast(const struct cpumask *mask)
++{
++ smp_cross_call(mask, IPI_TIMER);
++}
++#endif
++
++void smp_send_stop(void)
++{
++ unsigned long timeout;
++
++ if (num_online_cpus() > 1) {
++ cpumask_t mask;
++
++ cpumask_copy(&mask, cpu_online_mask);
++ cpu_clear(smp_processor_id(), mask);
++
++ smp_cross_call(&mask, IPI_CPU_STOP);
++ }
++
++ /* Wait up to one second for other CPUs to stop */
++ timeout = USEC_PER_SEC;
++ while (num_online_cpus() > 1 && timeout--)
++ udelay(1);
++
++ if (num_online_cpus() > 1)
++ pr_warning("SMP: failed to stop secondary CPUs\n");
++}
++
++/*
++ * not supported here
++ */
++int setup_profiling_timer(unsigned int multiplier)
++{
++ return -EINVAL;
++}
+diff -Nur linux-3.14.36/arch/arm64/kernel/stacktrace.c linux-openelec/arch/arm64/kernel/stacktrace.c
+--- linux-3.14.36/arch/arm64/kernel/stacktrace.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm64/kernel/stacktrace.c 2015-05-06 12:05:43.000000000 -0500
+@@ -35,7 +35,7 @@
+ * ldp x29, x30, [sp]
+ * add sp, sp, #0x10
+ */
+-int unwind_frame(struct stackframe *frame)
++int notrace unwind_frame(struct stackframe *frame)
+ {
+ unsigned long high, low;
+ unsigned long fp = frame->fp;
+diff -Nur linux-3.14.36/arch/arm64/kernel/topology.c linux-openelec/arch/arm64/kernel/topology.c
+--- linux-3.14.36/arch/arm64/kernel/topology.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/arch/arm64/kernel/topology.c 2015-05-06 12:05:43.000000000 -0500
+@@ -0,0 +1,558 @@
++/*
++ * arch/arm64/kernel/topology.c
++ *
++ * Copyright (C) 2011,2013,2014 Linaro Limited.
++ *
++ * Based on the arm32 version written by Vincent Guittot in turn based on
++ * arch/sh/kernel/topology.c
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file "COPYING" in the main directory of this archive
++ * for more details.
++ */
++
++#include <linux/cpu.h>
++#include <linux/cpumask.h>
++#include <linux/init.h>
++#include <linux/percpu.h>
++#include <linux/node.h>
++#include <linux/nodemask.h>
++#include <linux/of.h>
++#include <linux/sched.h>
++#include <linux/slab.h>
++
++#include <asm/topology.h>
++
++/*
++ * cpu power table
++ * This per cpu data structure describes the relative capacity of each core.
++ * On a heteregenous system, cores don't have the same computation capacity
++ * and we reflect that difference in the cpu_power field so the scheduler can
++ * take this difference into account during load balance. A per cpu structure
++ * is preferred because each CPU updates its own cpu_power field during the
++ * load balance except for idle cores. One idle core is selected to run the
++ * rebalance_domains for all idle cores and the cpu_power can be updated
++ * during this sequence.
++ */
++static DEFINE_PER_CPU(unsigned long, cpu_scale);
++
++unsigned long arch_scale_freq_power(struct sched_domain *sd, int cpu)
++{
++ return per_cpu(cpu_scale, cpu);
++}
++
++static void set_power_scale(unsigned int cpu, unsigned long power)
++{
++ per_cpu(cpu_scale, cpu) = power;
++}
++
++static int __init get_cpu_for_node(struct device_node *node)
++{
++ struct device_node *cpu_node;
++ int cpu;
++
++ cpu_node = of_parse_phandle(node, "cpu", 0);
++ if (!cpu_node)
++ return -1;
++
++ for_each_possible_cpu(cpu) {
++ if (of_get_cpu_node(cpu, NULL) == cpu_node) {
++ of_node_put(cpu_node);
++ return cpu;
++ }
++ }
++
++ pr_crit("Unable to find CPU node for %s\n", cpu_node->full_name);
++
++ of_node_put(cpu_node);
++ return -1;
++}
++
++static int __init parse_core(struct device_node *core, int cluster_id,
++ int core_id)
++{
++ char name[10];
++ bool leaf = true;
++ int i = 0;
++ int cpu;
++ struct device_node *t;
++
++ do {
++ snprintf(name, sizeof(name), "thread%d", i);
++ t = of_get_child_by_name(core, name);
++ if (t) {
++ leaf = false;
++ cpu = get_cpu_for_node(t);
++ if (cpu >= 0) {
++ cpu_topology[cpu].cluster_id = cluster_id;
++ cpu_topology[cpu].core_id = core_id;
++ cpu_topology[cpu].thread_id = i;
++ } else {
++ pr_err("%s: Can't get CPU for thread\n",
++ t->full_name);
++ of_node_put(t);
++ return -EINVAL;
++ }
++ of_node_put(t);
++ }
++ i++;
++ } while (t);
++
++ cpu = get_cpu_for_node(core);
++ if (cpu >= 0) {
++ if (!leaf) {
++ pr_err("%s: Core has both threads and CPU\n",
++ core->full_name);
++ return -EINVAL;
++ }
++
++ cpu_topology[cpu].cluster_id = cluster_id;
++ cpu_topology[cpu].core_id = core_id;
++ } else if (leaf) {
++ pr_err("%s: Can't get CPU for leaf core\n", core->full_name);
++ return -EINVAL;
++ }
++
++ return 0;
++}
++
++static int __init parse_cluster(struct device_node *cluster, int depth)
++{
++ char name[10];
++ bool leaf = true;
++ bool has_cores = false;
++ struct device_node *c;
++ static int cluster_id __initdata;
++ int core_id = 0;
++ int i, ret;
++
++ /*
++ * First check for child clusters; we currently ignore any
++ * information about the nesting of clusters and present the
++ * scheduler with a flat list of them.
++ */
++ i = 0;
++ do {
++ snprintf(name, sizeof(name), "cluster%d", i);
++ c = of_get_child_by_name(cluster, name);
++ if (c) {
++ leaf = false;
++ ret = parse_cluster(c, depth + 1);
++ of_node_put(c);
++ if (ret != 0)
++ return ret;
++ }
++ i++;
++ } while (c);
++
++ /* Now check for cores */
++ i = 0;
++ do {
++ snprintf(name, sizeof(name), "core%d", i);
++ c = of_get_child_by_name(cluster, name);
++ if (c) {
++ has_cores = true;
++
++ if (depth == 0) {
++ pr_err("%s: cpu-map children should be clusters\n",
++ c->full_name);
++ of_node_put(c);
++ return -EINVAL;
++ }
++
++ if (leaf) {
++ ret = parse_core(c, cluster_id, core_id++);
++ } else {
++ pr_err("%s: Non-leaf cluster with core %s\n",
++ cluster->full_name, name);
++ ret = -EINVAL;
++ }
++
++ of_node_put(c);
++ if (ret != 0)
++ return ret;
++ }
++ i++;
++ } while (c);
++
++ if (leaf && !has_cores)
++ pr_warn("%s: empty cluster\n", cluster->full_name);
++
++ if (leaf)
++ cluster_id++;
++
++ return 0;
++}
++
++struct cpu_efficiency {
++ const char *compatible;
++ unsigned long efficiency;
++};
++
++/*
++ * Table of relative efficiency of each processors
++ * The efficiency value must fit in 20bit and the final
++ * cpu_scale value must be in the range
++ * 0 < cpu_scale < 3*SCHED_POWER_SCALE/2
++ * in order to return at most 1 when DIV_ROUND_CLOSEST
++ * is used to compute the capacity of a CPU.
++ * Processors that are not defined in the table,
++ * use the default SCHED_POWER_SCALE value for cpu_scale.
++ */
++static const struct cpu_efficiency table_efficiency[] = {
++ { "arm,cortex-a57", 3891 },
++ { "arm,cortex-a53", 2048 },
++ { NULL, },
++};
++
++static unsigned long *__cpu_capacity;
++#define cpu_capacity(cpu) __cpu_capacity[cpu]
++
++static unsigned long middle_capacity = 1;
++
++/*
++ * Iterate all CPUs' descriptor in DT and compute the efficiency
++ * (as per table_efficiency). Also calculate a middle efficiency
++ * as close as possible to (max{eff_i} - min{eff_i}) / 2
++ * This is later used to scale the cpu_power field such that an
++ * 'average' CPU is of middle power. Also see the comments near
++ * table_efficiency[] and update_cpu_power().
++ */
++static int __init parse_dt_topology(void)
++{
++ struct device_node *cn, *map;
++ int ret = 0;
++ int cpu;
++
++ cn = of_find_node_by_path("/cpus");
++ if (!cn) {
++ pr_err("No CPU information found in DT\n");
++ return 0;
++ }
++
++ /*
++ * When topology is provided cpu-map is essentially a root
++ * cluster with restricted subnodes.
++ */
++ map = of_get_child_by_name(cn, "cpu-map");
++ if (!map)
++ goto out;
++
++ ret = parse_cluster(map, 0);
++ if (ret != 0)
++ goto out_map;
++
++ /*
++ * Check that all cores are in the topology; the SMP code will
++ * only mark cores described in the DT as possible.
++ */
++ for_each_possible_cpu(cpu) {
++ if (cpu_topology[cpu].cluster_id == -1) {
++ pr_err("CPU%d: No topology information specified\n",
++ cpu);
++ ret = -EINVAL;
++ }
++ }
++
++out_map:
++ of_node_put(map);
++out:
++ of_node_put(cn);
++ return ret;
++}
++
++static void __init parse_dt_cpu_power(void)
++{
++ const struct cpu_efficiency *cpu_eff;
++ struct device_node *cn;
++ unsigned long min_capacity = ULONG_MAX;
++ unsigned long max_capacity = 0;
++ unsigned long capacity = 0;
++ int cpu;
++
++ __cpu_capacity = kcalloc(nr_cpu_ids, sizeof(*__cpu_capacity),
++ GFP_NOWAIT);
++
++ for_each_possible_cpu(cpu) {
++ const u32 *rate;
++ int len;
++
++ /* Too early to use cpu->of_node */
++ cn = of_get_cpu_node(cpu, NULL);
++ if (!cn) {
++ pr_err("Missing device node for CPU %d\n", cpu);
++ continue;
++ }
++
++ for (cpu_eff = table_efficiency; cpu_eff->compatible; cpu_eff++)
++ if (of_device_is_compatible(cn, cpu_eff->compatible))
++ break;
++
++ if (cpu_eff->compatible == NULL) {
++ pr_warn("%s: Unknown CPU type\n", cn->full_name);
++ continue;
++ }
++
++ rate = of_get_property(cn, "clock-frequency", &len);
++ if (!rate || len != 4) {
++ pr_err("%s: Missing clock-frequency property\n",
++ cn->full_name);
++ continue;
++ }
++
++ capacity = ((be32_to_cpup(rate)) >> 20) * cpu_eff->efficiency;
++
++ /* Save min capacity of the system */
++ if (capacity < min_capacity)
++ min_capacity = capacity;
++
++ /* Save max capacity of the system */
++ if (capacity > max_capacity)
++ max_capacity = capacity;
++
++ cpu_capacity(cpu) = capacity;
++ }
++
++ /* If min and max capacities are equal we bypass the update of the
++ * cpu_scale because all CPUs have the same capacity. Otherwise, we
++ * compute a middle_capacity factor that will ensure that the capacity
++ * of an 'average' CPU of the system will be as close as possible to
++ * SCHED_POWER_SCALE, which is the default value, but with the
++ * constraint explained near table_efficiency[].
++ */
++ if (min_capacity == max_capacity)
++ return;
++ else if (4 * max_capacity < (3 * (max_capacity + min_capacity)))
++ middle_capacity = (min_capacity + max_capacity)
++ >> (SCHED_POWER_SHIFT+1);
++ else
++ middle_capacity = ((max_capacity / 3)
++ >> (SCHED_POWER_SHIFT-1)) + 1;
++}
++
++/*
++ * Look for a customed capacity of a CPU in the cpu_topo_data table during the
++ * boot. The update of all CPUs is in O(n^2) for heteregeneous system but the
++ * function returns directly for SMP system.
++ */
++static void update_cpu_power(unsigned int cpu)
++{
++ if (!cpu_capacity(cpu))
++ return;
++
++ set_power_scale(cpu, cpu_capacity(cpu) / middle_capacity);
++
++ pr_info("CPU%u: update cpu_power %lu\n",
++ cpu, arch_scale_freq_power(NULL, cpu));
++}
++
++/*
++ * cpu topology table
++ */
++struct cpu_topology cpu_topology[NR_CPUS];
++EXPORT_SYMBOL_GPL(cpu_topology);
++
++const struct cpumask *cpu_coregroup_mask(int cpu)
++{
++ return &cpu_topology[cpu].core_sibling;
++}
++
++static void update_siblings_masks(unsigned int cpuid)
++{
++ struct cpu_topology *cpu_topo, *cpuid_topo = &cpu_topology[cpuid];
++ int cpu;
++
++ if (cpuid_topo->cluster_id == -1) {
++ /*
++ * DT does not contain topology information for this cpu.
++ */
++ pr_debug("CPU%u: No topology information configured\n", cpuid);
++ return;
++ }
++
++ /* update core and thread sibling masks */
++ for_each_possible_cpu(cpu) {
++ cpu_topo = &cpu_topology[cpu];
++
++ if (cpuid_topo->cluster_id != cpu_topo->cluster_id)
++ continue;
++
++ cpumask_set_cpu(cpuid, &cpu_topo->core_sibling);
++ if (cpu != cpuid)
++ cpumask_set_cpu(cpu, &cpuid_topo->core_sibling);
++
++ if (cpuid_topo->core_id != cpu_topo->core_id)
++ continue;
++
++ cpumask_set_cpu(cpuid, &cpu_topo->thread_sibling);
++ if (cpu != cpuid)
++ cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling);
++ }
++}
++
++void store_cpu_topology(unsigned int cpuid)
++{
++ update_siblings_masks(cpuid);
++ update_cpu_power(cpuid);
++}
++
++#ifdef CONFIG_SCHED_HMP
++
++/*
++ * Retrieve logical cpu index corresponding to a given MPIDR[23:0]
++ * - mpidr: MPIDR[23:0] to be used for the look-up
++ *
++ * Returns the cpu logical index or -EINVAL on look-up error
++ */
++static inline int get_logical_index(u32 mpidr)
++{
++ int cpu;
++ for (cpu = 0; cpu < nr_cpu_ids; cpu++)
++ if (cpu_logical_map(cpu) == mpidr)
++ return cpu;
++ return -EINVAL;
++}
++
++static const char * const little_cores[] = {
++ "arm,cortex-a53",
++ NULL,
++};
++
++static bool is_little_cpu(struct device_node *cn)
++{
++ const char * const *lc;
++ for (lc = little_cores; *lc; lc++)
++ if (of_device_is_compatible(cn, *lc))
++ return true;
++ return false;
++}
++
++void __init arch_get_fast_and_slow_cpus(struct cpumask *fast,
++ struct cpumask *slow)
++{
++ struct device_node *cn = NULL;
++ int cpu;
++
++ cpumask_clear(fast);
++ cpumask_clear(slow);
++
++ /*
++ * Use the config options if they are given. This helps testing
++ * HMP scheduling on systems without a big.LITTLE architecture.
++ */
++ if (strlen(CONFIG_HMP_FAST_CPU_MASK) && strlen(CONFIG_HMP_SLOW_CPU_MASK)) {
++ if (cpulist_parse(CONFIG_HMP_FAST_CPU_MASK, fast))
++ WARN(1, "Failed to parse HMP fast cpu mask!\n");
++ if (cpulist_parse(CONFIG_HMP_SLOW_CPU_MASK, slow))
++ WARN(1, "Failed to parse HMP slow cpu mask!\n");
++ return;
++ }
++
++ /*
++ * Else, parse device tree for little cores.
++ */
++ while ((cn = of_find_node_by_type(cn, "cpu"))) {
++
++ const u32 *mpidr;
++ int len;
++
++ mpidr = of_get_property(cn, "reg", &len);
++ if (!mpidr || len != 8) {
++ pr_err("%s missing reg property\n", cn->full_name);
++ continue;
++ }
++
++ cpu = get_logical_index(be32_to_cpup(mpidr+1));
++ if (cpu == -EINVAL) {
++ pr_err("couldn't get logical index for mpidr %x\n",
++ be32_to_cpup(mpidr+1));
++ break;
++ }
++
++ if (is_little_cpu(cn))
++ cpumask_set_cpu(cpu, slow);
++ else
++ cpumask_set_cpu(cpu, fast);
++ }
++
++ if (!cpumask_empty(fast) && !cpumask_empty(slow))
++ return;
++
++ /*
++ * We didn't find both big and little cores so let's call all cores
++ * fast as this will keep the system running, with all cores being
++ * treated equal.
++ */
++ cpumask_setall(fast);
++ cpumask_clear(slow);
++}
++
++struct cpumask hmp_slow_cpu_mask;
++
++void __init arch_get_hmp_domains(struct list_head *hmp_domains_list)
++{
++ struct cpumask hmp_fast_cpu_mask;
++ struct hmp_domain *domain;
++
++ arch_get_fast_and_slow_cpus(&hmp_fast_cpu_mask, &hmp_slow_cpu_mask);
++
++ /*
++ * Initialize hmp_domains
++ * Must be ordered with respect to compute capacity.
++ * Fastest domain at head of list.
++ */
++ if(!cpumask_empty(&hmp_slow_cpu_mask)) {
++ domain = (struct hmp_domain *)
++ kmalloc(sizeof(struct hmp_domain), GFP_KERNEL);
++ cpumask_copy(&domain->possible_cpus, &hmp_slow_cpu_mask);
++ cpumask_and(&domain->cpus, cpu_online_mask, &domain->possible_cpus);
++ list_add(&domain->hmp_domains, hmp_domains_list);
++ }
++ domain = (struct hmp_domain *)
++ kmalloc(sizeof(struct hmp_domain), GFP_KERNEL);
++ cpumask_copy(&domain->possible_cpus, &hmp_fast_cpu_mask);
++ cpumask_and(&domain->cpus, cpu_online_mask, &domain->possible_cpus);
++ list_add(&domain->hmp_domains, hmp_domains_list);
++}
++#endif /* CONFIG_SCHED_HMP */
++
++static void __init reset_cpu_topology(void)
++{
++ unsigned int cpu;
++
++ for_each_possible_cpu(cpu) {
++ struct cpu_topology *cpu_topo = &cpu_topology[cpu];
++
++ cpu_topo->thread_id = -1;
++ cpu_topo->core_id = 0;
++ cpu_topo->cluster_id = -1;
++
++ cpumask_clear(&cpu_topo->core_sibling);
++ cpumask_set_cpu(cpu, &cpu_topo->core_sibling);
++ cpumask_clear(&cpu_topo->thread_sibling);
++ cpumask_set_cpu(cpu, &cpu_topo->thread_sibling);
++ }
++}
++
++static void __init reset_cpu_power(void)
++{
++ unsigned int cpu;
++
++ for_each_possible_cpu(cpu)
++ set_power_scale(cpu, SCHED_POWER_SCALE);
++}
++
++void __init init_cpu_topology(void)
++{
++ reset_cpu_topology();
++
++ /*
++ * Discard anything that was parsed if we hit an error so we
++ * don't use partial information.
++ */
++ if (parse_dt_topology())
++ reset_cpu_topology();
++
++ reset_cpu_power();
++ parse_dt_cpu_power();
++}
+diff -Nur linux-3.14.36/arch/arm64/kernel/vdso/Makefile linux-openelec/arch/arm64/kernel/vdso/Makefile
+--- linux-3.14.36/arch/arm64/kernel/vdso/Makefile 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm64/kernel/vdso/Makefile 2015-05-06 12:05:43.000000000 -0500
+@@ -47,9 +47,9 @@
+ $(call if_changed_dep,vdsoas)
+
+ # Actual build commands
+-quiet_cmd_vdsold = VDSOL $@
++quiet_cmd_vdsold = VDSOL $@
+ cmd_vdsold = $(CC) $(c_flags) -Wl,-n -Wl,-T $^ -o $@
+-quiet_cmd_vdsoas = VDSOA $@
++quiet_cmd_vdsoas = VDSOA $@
+ cmd_vdsoas = $(CC) $(a_flags) -c -o $@ $<
+
+ # Install commands for the unstripped file
+diff -Nur linux-3.14.36/arch/arm64/kernel/vdso.c linux-openelec/arch/arm64/kernel/vdso.c
+--- linux-3.14.36/arch/arm64/kernel/vdso.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm64/kernel/vdso.c 2015-05-06 12:05:43.000000000 -0500
+@@ -156,11 +156,12 @@
+ int uses_interp)
+ {
+ struct mm_struct *mm = current->mm;
+- unsigned long vdso_base, vdso_mapping_len;
++ unsigned long vdso_base, vdso_text_len, vdso_mapping_len;
+ int ret;
+
++ vdso_text_len = vdso_pages << PAGE_SHIFT;
+ /* Be sure to map the data page */
+- vdso_mapping_len = (vdso_pages + 1) << PAGE_SHIFT;
++ vdso_mapping_len = vdso_text_len + PAGE_SIZE;
+
+ down_write(&mm->mmap_sem);
+ vdso_base = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0);
+@@ -170,35 +171,52 @@
+ }
+ mm->context.vdso = (void *)vdso_base;
+
+- ret = install_special_mapping(mm, vdso_base, vdso_mapping_len,
++ ret = install_special_mapping(mm, vdso_base, vdso_text_len,
+ VM_READ|VM_EXEC|
+ VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
+ vdso_pagelist);
+- if (ret) {
+- mm->context.vdso = NULL;
++ if (ret)
++ goto up_fail;
++
++ vdso_base += vdso_text_len;
++ ret = install_special_mapping(mm, vdso_base, PAGE_SIZE,
++ VM_READ|VM_MAYREAD,
++ vdso_pagelist + vdso_pages);
++ if (ret)
+ goto up_fail;
+- }
+
+-up_fail:
+ up_write(&mm->mmap_sem);
++ return 0;
+
++up_fail:
++ mm->context.vdso = NULL;
++ up_write(&mm->mmap_sem);
+ return ret;
+ }
+
+ const char *arch_vma_name(struct vm_area_struct *vma)
+ {
++ unsigned long vdso_text;
++
++ if (!vma->vm_mm)
++ return NULL;
++
++ vdso_text = (unsigned long)vma->vm_mm->context.vdso;
++
+ /*
+ * We can re-use the vdso pointer in mm_context_t for identifying
+ * the vectors page for compat applications. The vDSO will always
+ * sit above TASK_UNMAPPED_BASE and so we don't need to worry about
+ * it conflicting with the vectors base.
+ */
+- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso) {
++ if (vma->vm_start == vdso_text) {
+ #ifdef CONFIG_COMPAT
+ if (vma->vm_start == AARCH32_VECTORS_BASE)
+ return "[vectors]";
+ #endif
+ return "[vdso]";
++ } else if (vma->vm_start == (vdso_text + (vdso_pages << PAGE_SHIFT))) {
++ return "[vvar]";
+ }
+
+ return NULL;
+diff -Nur linux-3.14.36/arch/arm64/kernel/vmlinux.lds.S linux-openelec/arch/arm64/kernel/vmlinux.lds.S
+--- linux-3.14.36/arch/arm64/kernel/vmlinux.lds.S 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm64/kernel/vmlinux.lds.S 2015-05-06 12:05:43.000000000 -0500
+@@ -104,6 +104,13 @@
+ _edata = .;
+
+ BSS_SECTION(0, 0, 0)
++
++ . = ALIGN(PAGE_SIZE);
++ idmap_pg_dir = .;
++ . += IDMAP_DIR_SIZE;
++ swapper_pg_dir = .;
++ . += SWAPPER_DIR_SIZE;
++
+ _end = .;
+
+ STABS_DEBUG
+diff -Nur linux-3.14.36/arch/arm64/Makefile linux-openelec/arch/arm64/Makefile
+--- linux-3.14.36/arch/arm64/Makefile 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm64/Makefile 2015-05-06 12:05:43.000000000 -0500
+@@ -45,6 +45,7 @@
+ core-y += arch/arm64/kernel/ arch/arm64/mm/
+ core-$(CONFIG_KVM) += arch/arm64/kvm/
+ core-$(CONFIG_XEN) += arch/arm64/xen/
++core-$(CONFIG_CRYPTO) += arch/arm64/crypto/
+ libs-y := arch/arm64/lib/ $(libs-y)
+ libs-y += $(LIBGCC)
+
+diff -Nur linux-3.14.36/arch/arm64/mm/cache.S linux-openelec/arch/arm64/mm/cache.S
+--- linux-3.14.36/arch/arm64/mm/cache.S 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm64/mm/cache.S 2015-05-06 12:05:43.000000000 -0500
+@@ -30,7 +30,7 @@
+ *
+ * Corrupted registers: x0-x7, x9-x11
+ */
+-ENTRY(__flush_dcache_all)
++__flush_dcache_all:
+ dsb sy // ensure ordering with previous memory accesses
+ mrs x0, clidr_el1 // read clidr
+ and x3, x0, #0x7000000 // extract loc from clidr
+@@ -166,3 +166,97 @@
+ dsb sy
+ ret
+ ENDPROC(__flush_dcache_area)
++
++/*
++ * __inval_cache_range(start, end)
++ * - start - start address of region
++ * - end - end address of region
++ */
++ENTRY(__inval_cache_range)
++ /* FALLTHROUGH */
++
++/*
++ * __dma_inv_range(start, end)
++ * - start - virtual start address of region
++ * - end - virtual end address of region
++ */
++__dma_inv_range:
++ dcache_line_size x2, x3
++ sub x3, x2, #1
++ tst x1, x3 // end cache line aligned?
++ bic x1, x1, x3
++ b.eq 1f
++ dc civac, x1 // clean & invalidate D / U line
++1: tst x0, x3 // start cache line aligned?
++ bic x0, x0, x3
++ b.eq 2f
++ dc civac, x0 // clean & invalidate D / U line
++ b 3f
++2: dc ivac, x0 // invalidate D / U line
++3: add x0, x0, x2
++ cmp x0, x1
++ b.lo 2b
++ dsb sy
++ ret
++ENDPROC(__inval_cache_range)
++ENDPROC(__dma_inv_range)
++
++/*
++ * __dma_clean_range(start, end)
++ * - start - virtual start address of region
++ * - end - virtual end address of region
++ */
++__dma_clean_range:
++ dcache_line_size x2, x3
++ sub x3, x2, #1
++ bic x0, x0, x3
++1: dc cvac, x0 // clean D / U line
++ add x0, x0, x2
++ cmp x0, x1
++ b.lo 1b
++ dsb sy
++ ret
++ENDPROC(__dma_clean_range)
++
++/*
++ * __dma_flush_range(start, end)
++ * - start - virtual start address of region
++ * - end - virtual end address of region
++ */
++ENTRY(__dma_flush_range)
++ dcache_line_size x2, x3
++ sub x3, x2, #1
++ bic x0, x0, x3
++1: dc civac, x0 // clean & invalidate D / U line
++ add x0, x0, x2
++ cmp x0, x1
++ b.lo 1b
++ dsb sy
++ ret
++ENDPROC(__dma_flush_range)
++
++/*
++ * __dma_map_area(start, size, dir)
++ * - start - kernel virtual start address
++ * - size - size of region
++ * - dir - DMA direction
++ */
++ENTRY(__dma_map_area)
++ add x1, x1, x0
++ cmp w2, #DMA_FROM_DEVICE
++ b.eq __dma_inv_range
++ b __dma_clean_range
++ENDPROC(__dma_map_area)
++
++/*
++ * __dma_unmap_area(start, size, dir)
++ * - start - kernel virtual start address
++ * - size - size of region
++ * - dir - DMA direction
++ */
++ENTRY(__dma_unmap_area)
++ add x1, x1, x0
++ cmp w2, #DMA_TO_DEVICE
++ b.ne __dma_inv_range
++ ret
++ENDPROC(__dma_unmap_area)
+diff -Nur linux-3.14.36/arch/arm64/mm/copypage.c linux-openelec/arch/arm64/mm/copypage.c
+--- linux-3.14.36/arch/arm64/mm/copypage.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm64/mm/copypage.c 2015-05-06 12:05:43.000000000 -0500
+@@ -27,8 +27,10 @@
+ copy_page(kto, kfrom);
+ __flush_dcache_area(kto, PAGE_SIZE);
+ }
++EXPORT_SYMBOL_GPL(__cpu_copy_user_page);
+
+ void __cpu_clear_user_page(void *kaddr, unsigned long vaddr)
+ {
+ clear_page(kaddr);
+ }
++EXPORT_SYMBOL_GPL(__cpu_clear_user_page);
+diff -Nur linux-3.14.36/arch/arm64/mm/dma-mapping.c linux-openelec/arch/arm64/mm/dma-mapping.c
+--- linux-3.14.36/arch/arm64/mm/dma-mapping.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm64/mm/dma-mapping.c 2015-05-06 12:05:43.000000000 -0500
+@@ -22,26 +22,39 @@
+ #include <linux/slab.h>
+ #include <linux/dma-mapping.h>
+ #include <linux/dma-contiguous.h>
++#include <linux/of.h>
++#include <linux/platform_device.h>
+ #include <linux/vmalloc.h>
+ #include <linux/swiotlb.h>
++#include <linux/amba/bus.h>
+
+ #include <asm/cacheflush.h>
+
+ struct dma_map_ops *dma_ops;
+ EXPORT_SYMBOL(dma_ops);
+
+-static void *arm64_swiotlb_alloc_coherent(struct device *dev, size_t size,
+- dma_addr_t *dma_handle, gfp_t flags,
+- struct dma_attrs *attrs)
++static pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot,
++ bool coherent)
++{
++ if (dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs))
++ return pgprot_writecombine(prot);
++ else if (!coherent)
++ return pgprot_dmacoherent(prot);
++ return prot;
++}
++
++static void *__dma_alloc_coherent(struct device *dev, size_t size,
++ dma_addr_t *dma_handle, gfp_t flags,
++ struct dma_attrs *attrs)
+ {
+ if (dev == NULL) {
+ WARN_ONCE(1, "Use an actual device structure for DMA allocation\n");
+ return NULL;
+ }
+
+- if (IS_ENABLED(CONFIG_ZONE_DMA32) &&
++ if (IS_ENABLED(CONFIG_ZONE_DMA) &&
+ dev->coherent_dma_mask <= DMA_BIT_MASK(32))
+- flags |= GFP_DMA32;
++ flags |= GFP_DMA;
+ if (IS_ENABLED(CONFIG_DMA_CMA)) {
+ struct page *page;
+
+@@ -58,9 +71,9 @@
+ }
+ }
+
+-static void arm64_swiotlb_free_coherent(struct device *dev, size_t size,
+- void *vaddr, dma_addr_t dma_handle,
+- struct dma_attrs *attrs)
++static void __dma_free_coherent(struct device *dev, size_t size,
++ void *vaddr, dma_addr_t dma_handle,
++ struct dma_attrs *attrs)
+ {
+ if (dev == NULL) {
+ WARN_ONCE(1, "Use an actual device structure for DMA allocation\n");
+@@ -78,9 +91,212 @@
+ }
+ }
+
+-static struct dma_map_ops arm64_swiotlb_dma_ops = {
+- .alloc = arm64_swiotlb_alloc_coherent,
+- .free = arm64_swiotlb_free_coherent,
++static void *__dma_alloc_noncoherent(struct device *dev, size_t size,
++ dma_addr_t *dma_handle, gfp_t flags,
++ struct dma_attrs *attrs)
++{
++ struct page *page, **map;
++ void *ptr, *coherent_ptr;
++ int order, i;
++
++ size = PAGE_ALIGN(size);
++ order = get_order(size);
++
++ ptr = __dma_alloc_coherent(dev, size, dma_handle, flags, attrs);
++ if (!ptr)
++ goto no_mem;
++ map = kmalloc(sizeof(struct page *) << order, flags & ~GFP_DMA);
++ if (!map)
++ goto no_map;
++
++ /* remove any dirty cache lines on the kernel alias */
++ __dma_flush_range(ptr, ptr + size);
++
++ /* create a coherent mapping */
++ page = virt_to_page(ptr);
++ for (i = 0; i < (size >> PAGE_SHIFT); i++)
++ map[i] = page + i;
++ coherent_ptr = vmap(map, size >> PAGE_SHIFT, VM_MAP,
++ __get_dma_pgprot(attrs, pgprot_default, false));
++ kfree(map);
++ if (!coherent_ptr)
++ goto no_map;
++
++ return coherent_ptr;
++
++no_map:
++ __dma_free_coherent(dev, size, ptr, *dma_handle, attrs);
++no_mem:
++ *dma_handle = ~0;
++ return NULL;
++}
++
++static void __dma_free_noncoherent(struct device *dev, size_t size,
++ void *vaddr, dma_addr_t dma_handle,
++ struct dma_attrs *attrs)
++{
++ void *swiotlb_addr = phys_to_virt(dma_to_phys(dev, dma_handle));
++
++ vunmap(vaddr);
++ __dma_free_coherent(dev, size, swiotlb_addr, dma_handle, attrs);
++}
++
++static dma_addr_t __swiotlb_map_page(struct device *dev, struct page *page,
++ unsigned long offset, size_t size,
++ enum dma_data_direction dir,
++ struct dma_attrs *attrs)
++{
++ dma_addr_t dev_addr;
++
++ dev_addr = swiotlb_map_page(dev, page, offset, size, dir, attrs);
++ __dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
++
++ return dev_addr;
++}
++
++
++static void __swiotlb_unmap_page(struct device *dev, dma_addr_t dev_addr,
++ size_t size, enum dma_data_direction dir,
++ struct dma_attrs *attrs)
++{
++ __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
++ swiotlb_unmap_page(dev, dev_addr, size, dir, attrs);
++}
++
++static int __swiotlb_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
++ int nelems, enum dma_data_direction dir,
++ struct dma_attrs *attrs)
++{
++ struct scatterlist *sg;
++ int i, ret;
++
++ ret = swiotlb_map_sg_attrs(dev, sgl, nelems, dir, attrs);
++ for_each_sg(sgl, sg, ret, i)
++ __dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
++ sg->length, dir);
++
++ return ret;
++}
++
++static void __swiotlb_unmap_sg_attrs(struct device *dev,
++ struct scatterlist *sgl, int nelems,
++ enum dma_data_direction dir,
++ struct dma_attrs *attrs)
++{
++ struct scatterlist *sg;
++ int i;
++
++ for_each_sg(sgl, sg, nelems, i)
++ __dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
++ sg->length, dir);
++ swiotlb_unmap_sg_attrs(dev, sgl, nelems, dir, attrs);
++}
++
++static void __swiotlb_sync_single_for_cpu(struct device *dev,
++ dma_addr_t dev_addr, size_t size,
++ enum dma_data_direction dir)
++{
++ __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
++ swiotlb_sync_single_for_cpu(dev, dev_addr, size, dir);
++}
++
++static void __swiotlb_sync_single_for_device(struct device *dev,
++ dma_addr_t dev_addr, size_t size,
++ enum dma_data_direction dir)
++{
++ swiotlb_sync_single_for_device(dev, dev_addr, size, dir);
++ __dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
++}
++
++static void __swiotlb_sync_sg_for_cpu(struct device *dev,
++ struct scatterlist *sgl, int nelems,
++ enum dma_data_direction dir)
++{
++ struct scatterlist *sg;
++ int i;
++
++ for_each_sg(sgl, sg, nelems, i)
++ __dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
++ sg->length, dir);
++ swiotlb_sync_sg_for_cpu(dev, sgl, nelems, dir);
++}
++
++static void __swiotlb_sync_sg_for_device(struct device *dev,
++ struct scatterlist *sgl, int nelems,
++ enum dma_data_direction dir)
++{
++ struct scatterlist *sg;
++ int i;
++
++ swiotlb_sync_sg_for_device(dev, sgl, nelems, dir);
++ for_each_sg(sgl, sg, nelems, i)
++ __dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
++ sg->length, dir);
++}
++
++/* vma->vm_page_prot must be set appropriately before calling this function */
++static int __dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
++ void *cpu_addr, dma_addr_t dma_addr, size_t size)
++{
++ int ret = -ENXIO;
++ unsigned long nr_vma_pages = (vma->vm_end - vma->vm_start) >>
++ PAGE_SHIFT;
++ unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
++ unsigned long pfn = dma_to_phys(dev, dma_addr) >> PAGE_SHIFT;
++ unsigned long off = vma->vm_pgoff;
++
++ if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
++ return ret;
++
++ if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) {
++ ret = remap_pfn_range(vma, vma->vm_start,
++ pfn + off,
++ vma->vm_end - vma->vm_start,
++ vma->vm_page_prot);
++ }
++
++ return ret;
++}
++
++static int __swiotlb_mmap_noncoherent(struct device *dev,
++ struct vm_area_struct *vma,
++ void *cpu_addr, dma_addr_t dma_addr, size_t size,
++ struct dma_attrs *attrs)
++{
++ vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot, false);
++ return __dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
++}
++
++static int __swiotlb_mmap_coherent(struct device *dev,
++ struct vm_area_struct *vma,
++ void *cpu_addr, dma_addr_t dma_addr, size_t size,
++ struct dma_attrs *attrs)
++{
++ /* Just use whatever page_prot attributes were specified */
++ return __dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
++}
++
++struct dma_map_ops noncoherent_swiotlb_dma_ops = {
++ .alloc = __dma_alloc_noncoherent,
++ .free = __dma_free_noncoherent,
++ .mmap = __swiotlb_mmap_noncoherent,
++ .map_page = __swiotlb_map_page,
++ .unmap_page = __swiotlb_unmap_page,
++ .map_sg = __swiotlb_map_sg_attrs,
++ .unmap_sg = __swiotlb_unmap_sg_attrs,
++ .sync_single_for_cpu = __swiotlb_sync_single_for_cpu,
++ .sync_single_for_device = __swiotlb_sync_single_for_device,
++ .sync_sg_for_cpu = __swiotlb_sync_sg_for_cpu,
++ .sync_sg_for_device = __swiotlb_sync_sg_for_device,
++ .dma_supported = swiotlb_dma_supported,
++ .mapping_error = swiotlb_dma_mapping_error,
++};
++EXPORT_SYMBOL(noncoherent_swiotlb_dma_ops);
++
++struct dma_map_ops coherent_swiotlb_dma_ops = {
++ .alloc = __dma_alloc_coherent,
++ .free = __dma_free_coherent,
++ .mmap = __swiotlb_mmap_coherent,
+ .map_page = swiotlb_map_page,
+ .unmap_page = swiotlb_unmap_page,
+ .map_sg = swiotlb_map_sg_attrs,
+@@ -92,12 +308,47 @@
+ .dma_supported = swiotlb_dma_supported,
+ .mapping_error = swiotlb_dma_mapping_error,
+ };
++EXPORT_SYMBOL(coherent_swiotlb_dma_ops);
+
+-void __init arm64_swiotlb_init(void)
++static int dma_bus_notifier(struct notifier_block *nb,
++ unsigned long event, void *_dev)
+ {
+- dma_ops = &arm64_swiotlb_dma_ops;
+- swiotlb_init(1);
++ struct device *dev = _dev;
++
++ if (event != BUS_NOTIFY_ADD_DEVICE)
++ return NOTIFY_DONE;
++
++ if (of_property_read_bool(dev->of_node, "dma-coherent"))
++ set_dma_ops(dev, &coherent_swiotlb_dma_ops);
++
++ return NOTIFY_OK;
++}
++
++static struct notifier_block platform_bus_nb = {
++ .notifier_call = dma_bus_notifier,
++};
++
++static struct notifier_block amba_bus_nb = {
++ .notifier_call = dma_bus_notifier,
++};
++
++extern int swiotlb_late_init_with_default_size(size_t default_size);
++
++static int __init swiotlb_late_init(void)
++{
++ size_t swiotlb_size = min(SZ_64M, MAX_ORDER_NR_PAGES << PAGE_SHIFT);
++
++ /*
++ * These must be registered before of_platform_populate().
++ */
++ bus_register_notifier(&platform_bus_type, &platform_bus_nb);
++ bus_register_notifier(&amba_bustype, &amba_bus_nb);
++
++ dma_ops = &noncoherent_swiotlb_dma_ops;
++
++ return swiotlb_late_init_with_default_size(swiotlb_size);
+ }
++arch_initcall(swiotlb_late_init);
+
+ #define PREALLOC_DMA_DEBUG_ENTRIES 4096
+
+diff -Nur linux-3.14.36/arch/arm64/mm/init.c linux-openelec/arch/arm64/mm/init.c
+--- linux-3.14.36/arch/arm64/mm/init.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm64/mm/init.c 2015-05-06 12:05:43.000000000 -0500
+@@ -30,6 +30,7 @@
+ #include <linux/memblock.h>
+ #include <linux/sort.h>
+ #include <linux/of_fdt.h>
++#include <linux/dma-mapping.h>
+ #include <linux/dma-contiguous.h>
+
+ #include <asm/sections.h>
+@@ -59,22 +60,22 @@
+ early_param("initrd", early_initrd);
+ #endif
+
+-#define MAX_DMA32_PFN ((4UL * 1024 * 1024 * 1024) >> PAGE_SHIFT)
+-
+ static void __init zone_sizes_init(unsigned long min, unsigned long max)
+ {
+ struct memblock_region *reg;
+ unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
+- unsigned long max_dma32 = min;
++ unsigned long max_dma = min;
+
+ memset(zone_size, 0, sizeof(zone_size));
+
+-#ifdef CONFIG_ZONE_DMA32
+ /* 4GB maximum for 32-bit only capable devices */
+- max_dma32 = max(min, min(max, MAX_DMA32_PFN));
+- zone_size[ZONE_DMA32] = max_dma32 - min;
+-#endif
+- zone_size[ZONE_NORMAL] = max - max_dma32;
++ if (IS_ENABLED(CONFIG_ZONE_DMA)) {
++ unsigned long max_dma_phys =
++ (unsigned long)dma_to_phys(NULL, DMA_BIT_MASK(32) + 1);
++ max_dma = max(min, min(max, max_dma_phys >> PAGE_SHIFT));
++ zone_size[ZONE_DMA] = max_dma - min;
++ }
++ zone_size[ZONE_NORMAL] = max - max_dma;
+
+ memcpy(zhole_size, zone_size, sizeof(zhole_size));
+
+@@ -84,15 +85,15 @@
+
+ if (start >= max)
+ continue;
+-#ifdef CONFIG_ZONE_DMA32
+- if (start < max_dma32) {
+- unsigned long dma_end = min(end, max_dma32);
+- zhole_size[ZONE_DMA32] -= dma_end - start;
++
++ if (IS_ENABLED(CONFIG_ZONE_DMA) && start < max_dma) {
++ unsigned long dma_end = min(end, max_dma);
++ zhole_size[ZONE_DMA] -= dma_end - start;
+ }
+-#endif
+- if (end > max_dma32) {
++
++ if (end > max_dma) {
+ unsigned long normal_end = min(end, max);
+- unsigned long normal_start = max(start, max_dma32);
++ unsigned long normal_start = max(start, max_dma);
+ zhole_size[ZONE_NORMAL] -= normal_end - normal_start;
+ }
+ }
+@@ -127,20 +128,16 @@
+ {
+ u64 *reserve_map, base, size;
+
+- /* Register the kernel text, kernel data and initrd with memblock */
++ /*
++ * Register the kernel text, kernel data, initrd, and initial
++ * pagetables with memblock.
++ */
+ memblock_reserve(__pa(_text), _end - _text);
+ #ifdef CONFIG_BLK_DEV_INITRD
+ if (initrd_start)
+ memblock_reserve(__virt_to_phys(initrd_start), initrd_end - initrd_start);
+ #endif
+
+- /*
+- * Reserve the page tables. These are already in use,
+- * and can only be in node 0.
+- */
+- memblock_reserve(__pa(swapper_pg_dir), SWAPPER_DIR_SIZE);
+- memblock_reserve(__pa(idmap_pg_dir), IDMAP_DIR_SIZE);
+-
+ /* Reserve the dtb region */
+ memblock_reserve(virt_to_phys(initial_boot_params),
+ be32_to_cpu(initial_boot_params->totalsize));
+@@ -261,8 +258,6 @@
+ */
+ void __init mem_init(void)
+ {
+- arm64_swiotlb_init();
+-
+ max_mapnr = pfn_to_page(max_pfn + PHYS_PFN_OFFSET) - mem_map;
+
+ #ifndef CONFIG_SPARSEMEM_VMEMMAP
+diff -Nur linux-3.14.36/arch/arm64/mm/proc.S linux-openelec/arch/arm64/mm/proc.S
+--- linux-3.14.36/arch/arm64/mm/proc.S 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/arm64/mm/proc.S 2015-05-06 12:05:43.000000000 -0500
+@@ -173,12 +173,6 @@
+ * value of the SCTLR_EL1 register.
+ */
+ ENTRY(__cpu_setup)
+- /*
+- * Preserve the link register across the function call.
+- */
+- mov x28, lr
+- bl __flush_dcache_all
+- mov lr, x28
+ ic iallu // I+BTB cache invalidate
+ tlbi vmalle1is // invalidate I + D TLBs
+ dsb sy
+diff -Nur linux-3.14.36/arch/avr32/kernel/cpu.c linux-openelec/arch/avr32/kernel/cpu.c
+--- linux-3.14.36/arch/avr32/kernel/cpu.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/avr32/kernel/cpu.c 2015-05-06 12:05:43.000000000 -0500
+@@ -39,10 +39,12 @@
+ size_t count)
+ {
+ unsigned long val;
+- char *endp;
++ int ret;
+
+- val = simple_strtoul(buf, &endp, 0);
+- if (endp == buf || val > 0x3f)
++ ret = kstrtoul(buf, 0, &val);
++ if (ret)
++ return ret;
++ if (val > 0x3f)
+ return -EINVAL;
+ val = (val << 12) | (sysreg_read(PCCR) & 0xfffc0fff);
+ sysreg_write(PCCR, val);
+@@ -61,11 +63,11 @@
+ const char *buf, size_t count)
+ {
+ unsigned long val;
+- char *endp;
++ int ret;
+
+- val = simple_strtoul(buf, &endp, 0);
+- if (endp == buf)
+- return -EINVAL;
++ ret = kstrtoul(buf, 0, &val);
++ if (ret)
++ return ret;
+ sysreg_write(PCNT0, val);
+
+ return count;
+@@ -84,10 +86,12 @@
+ size_t count)
+ {
+ unsigned long val;
+- char *endp;
++ int ret;
+
+- val = simple_strtoul(buf, &endp, 0);
+- if (endp == buf || val > 0x3f)
++ ret = kstrtoul(buf, 0, &val);
++ if (ret)
++ return ret;
++ if (val > 0x3f)
+ return -EINVAL;
+ val = (val << 18) | (sysreg_read(PCCR) & 0xff03ffff);
+ sysreg_write(PCCR, val);
+@@ -106,11 +110,11 @@
+ size_t count)
+ {
+ unsigned long val;
+- char *endp;
++ int ret;
+
+- val = simple_strtoul(buf, &endp, 0);
+- if (endp == buf)
+- return -EINVAL;
++ ret = kstrtoul(buf, 0, &val);
++ if (ret)
++ return ret;
+ sysreg_write(PCNT1, val);
+
+ return count;
+@@ -129,11 +133,11 @@
+ size_t count)
+ {
+ unsigned long val;
+- char *endp;
++ int ret;
+
+- val = simple_strtoul(buf, &endp, 0);
+- if (endp == buf)
+- return -EINVAL;
++ ret = kstrtoul(buf, 0, &val);
++ if (ret)
++ return ret;
+ sysreg_write(PCCNT, val);
+
+ return count;
+@@ -152,11 +156,11 @@
+ size_t count)
+ {
+ unsigned long pccr, val;
+- char *endp;
++ int ret;
+
+- val = simple_strtoul(buf, &endp, 0);
+- if (endp == buf)
+- return -EINVAL;
++ ret = kstrtoul(buf, 0, &val);
++ if (ret)
++ return ret;
+ if (val)
+ val = 1;
+
+diff -Nur linux-3.14.36/arch/blackfin/include/asm/ftrace.h linux-openelec/arch/blackfin/include/asm/ftrace.h
+--- linux-3.14.36/arch/blackfin/include/asm/ftrace.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/blackfin/include/asm/ftrace.h 2015-05-06 12:05:43.000000000 -0500
+@@ -66,16 +66,7 @@
+
+ #endif /* CONFIG_FRAME_POINTER */
+
+-#define HAVE_ARCH_CALLER_ADDR
+-
+-/* inline function or macro may lead to unexpected result */
+-#define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0))
+-#define CALLER_ADDR1 ((unsigned long)return_address(1))
+-#define CALLER_ADDR2 ((unsigned long)return_address(2))
+-#define CALLER_ADDR3 ((unsigned long)return_address(3))
+-#define CALLER_ADDR4 ((unsigned long)return_address(4))
+-#define CALLER_ADDR5 ((unsigned long)return_address(5))
+-#define CALLER_ADDR6 ((unsigned long)return_address(6))
++#define ftrace_return_address(n) return_address(n)
+
+ #endif /* __ASSEMBLY__ */
+
+diff -Nur linux-3.14.36/arch/hexagon/include/asm/elf.h linux-openelec/arch/hexagon/include/asm/elf.h
+--- linux-3.14.36/arch/hexagon/include/asm/elf.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/hexagon/include/asm/elf.h 2015-05-06 12:05:44.000000000 -0500
+@@ -1,7 +1,7 @@
+ /*
+ * ELF definitions for the Hexagon architecture
+ *
+- * Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
++ * Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+diff -Nur linux-3.14.36/arch/parisc/include/asm/ftrace.h linux-openelec/arch/parisc/include/asm/ftrace.h
+--- linux-3.14.36/arch/parisc/include/asm/ftrace.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/parisc/include/asm/ftrace.h 2015-05-06 12:05:43.000000000 -0500
+@@ -24,15 +24,7 @@
+
+ extern unsigned long return_address(unsigned int);
+
+-#define HAVE_ARCH_CALLER_ADDR
+-
+-#define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0))
+-#define CALLER_ADDR1 return_address(1)
+-#define CALLER_ADDR2 return_address(2)
+-#define CALLER_ADDR3 return_address(3)
+-#define CALLER_ADDR4 return_address(4)
+-#define CALLER_ADDR5 return_address(5)
+-#define CALLER_ADDR6 return_address(6)
++#define ftrace_return_address(n) return_address(n)
+
+ #endif /* __ASSEMBLY__ */
+
+diff -Nur linux-3.14.36/arch/s390/include/asm/cio.h linux-openelec/arch/s390/include/asm/cio.h
+--- linux-3.14.36/arch/s390/include/asm/cio.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/s390/include/asm/cio.h 2015-05-06 12:05:43.000000000 -0500
+@@ -199,7 +199,7 @@
+ /**
+ * struct irb - interruption response block
+ * @scsw: subchannel status word
+- * @esw: extened status word
++ * @esw: extended status word
+ * @ecw: extended control word
+ *
+ * The irb that is handed to the device driver when an interrupt occurs. For
+diff -Nur linux-3.14.36/arch/sh/include/asm/ftrace.h linux-openelec/arch/sh/include/asm/ftrace.h
+--- linux-3.14.36/arch/sh/include/asm/ftrace.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/sh/include/asm/ftrace.h 2015-05-06 12:05:44.000000000 -0500
+@@ -40,15 +40,7 @@
+ /* arch/sh/kernel/return_address.c */
+ extern void *return_address(unsigned int);
+
+-#define HAVE_ARCH_CALLER_ADDR
+-
+-#define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0))
+-#define CALLER_ADDR1 ((unsigned long)return_address(1))
+-#define CALLER_ADDR2 ((unsigned long)return_address(2))
+-#define CALLER_ADDR3 ((unsigned long)return_address(3))
+-#define CALLER_ADDR4 ((unsigned long)return_address(4))
+-#define CALLER_ADDR5 ((unsigned long)return_address(5))
+-#define CALLER_ADDR6 ((unsigned long)return_address(6))
++#define ftrace_return_address(n) return_address(n)
+
+ #endif /* __ASSEMBLY__ */
+
+diff -Nur linux-3.14.36/arch/x86/kernel/setup.c linux-openelec/arch/x86/kernel/setup.c
+--- linux-3.14.36/arch/x86/kernel/setup.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/arch/x86/kernel/setup.c 2015-05-06 12:05:44.000000000 -0500
+@@ -1120,7 +1120,7 @@
+ setup_real_mode();
+
+ memblock_set_current_limit(get_max_mapped());
+- dma_contiguous_reserve(0);
++ dma_contiguous_reserve(max_pfn_mapped << PAGE_SHIFT);
+
+ /*
+ * NOTE: On x86-32, only from this point on, fixmaps are ready for use.
+diff -Nur linux-3.14.36/block/bfq-cgroup.c linux-openelec/block/bfq-cgroup.c
+--- linux-3.14.36/block/bfq-cgroup.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/block/bfq-cgroup.c 2015-05-06 12:05:43.000000000 -0500
+@@ -0,0 +1,932 @@
++/*
++ * BFQ: CGROUPS support.
++ *
++ * Based on ideas and code from CFQ:
++ * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
++ *
++ * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
++ * Paolo Valente <paolo.valente@unimore.it>
++ *
++ * Copyright (C) 2010 Paolo Valente <paolo.valente@unimore.it>
++ *
++ * Licensed under the GPL-2 as detailed in the accompanying COPYING.BFQ
++ * file.
++ */
++
++#ifdef CONFIG_CGROUP_BFQIO
++
++static DEFINE_MUTEX(bfqio_mutex);
++
++static bool bfqio_is_removed(struct bfqio_cgroup *bgrp)
++{
++ return bgrp ? !bgrp->online : false;
++}
++
++static struct bfqio_cgroup bfqio_root_cgroup = {
++ .weight = BFQ_DEFAULT_GRP_WEIGHT,
++ .ioprio = BFQ_DEFAULT_GRP_IOPRIO,
++ .ioprio_class = BFQ_DEFAULT_GRP_CLASS,
++};
++
++static inline void bfq_init_entity(struct bfq_entity *entity,
++ struct bfq_group *bfqg)
++{
++ entity->weight = entity->new_weight;
++ entity->orig_weight = entity->new_weight;
++ entity->ioprio = entity->new_ioprio;
++ entity->ioprio_class = entity->new_ioprio_class;
++ entity->parent = bfqg->my_entity;
++ entity->sched_data = &bfqg->sched_data;
++}
++
++static struct bfqio_cgroup *css_to_bfqio(struct cgroup_subsys_state *css)
++{
++ return css ? container_of(css, struct bfqio_cgroup, css) : NULL;
++}
++
++/*
++ * Search the bfq_group for bfqd into the hash table (by now only a list)
++ * of bgrp. Must be called under rcu_read_lock().
++ */
++static struct bfq_group *bfqio_lookup_group(struct bfqio_cgroup *bgrp,
++ struct bfq_data *bfqd)
++{
++ struct bfq_group *bfqg;
++ void *key;
++
++ hlist_for_each_entry_rcu(bfqg, &bgrp->group_data, group_node) {
++ key = rcu_dereference(bfqg->bfqd);
++ if (key == bfqd)
++ return bfqg;
++ }
++
++ return NULL;
++}
++
++static inline void bfq_group_init_entity(struct bfqio_cgroup *bgrp,
++ struct bfq_group *bfqg)
++{
++ struct bfq_entity *entity = &bfqg->entity;
++
++ /*
++ * If the weight of the entity has never been set via the sysfs
++ * interface, then bgrp->weight == 0. In this case we initialize
++ * the weight from the current ioprio value. Otherwise, the group
++ * weight, if set, has priority over the ioprio value.
++ */
++ if (bgrp->weight == 0) {
++ entity->new_weight = bfq_ioprio_to_weight(bgrp->ioprio);
++ entity->new_ioprio = bgrp->ioprio;
++ } else {
++ entity->new_weight = bgrp->weight;
++ entity->new_ioprio = bfq_weight_to_ioprio(bgrp->weight);
++ }
++ entity->orig_weight = entity->weight = entity->new_weight;
++ entity->ioprio = entity->new_ioprio;
++ entity->ioprio_class = entity->new_ioprio_class = bgrp->ioprio_class;
++ entity->my_sched_data = &bfqg->sched_data;
++ bfqg->active_entities = 0;
++}
++
++static inline void bfq_group_set_parent(struct bfq_group *bfqg,
++ struct bfq_group *parent)
++{
++ struct bfq_entity *entity;
++
++ BUG_ON(parent == NULL);
++ BUG_ON(bfqg == NULL);
++
++ entity = &bfqg->entity;
++ entity->parent = parent->my_entity;
++ entity->sched_data = &parent->sched_data;
++}
++
++/**
++ * bfq_group_chain_alloc - allocate a chain of groups.
++ * @bfqd: queue descriptor.
++ * @css: the leaf cgroup_subsys_state this chain starts from.
++ *
++ * Allocate a chain of groups starting from the one belonging to
++ * @cgroup up to the root cgroup. Stop if a cgroup on the chain
++ * to the root has already an allocated group on @bfqd.
++ */
++static struct bfq_group *bfq_group_chain_alloc(struct bfq_data *bfqd,
++ struct cgroup_subsys_state *css)
++{
++ struct bfqio_cgroup *bgrp;
++ struct bfq_group *bfqg, *prev = NULL, *leaf = NULL;
++
++ for (; css != NULL; css = css->parent) {
++ bgrp = css_to_bfqio(css);
++
++ bfqg = bfqio_lookup_group(bgrp, bfqd);
++ if (bfqg != NULL) {
++ /*
++ * All the cgroups in the path from there to the
++ * root must have a bfq_group for bfqd, so we don't
++ * need any more allocations.
++ */
++ break;
++ }
++
++ bfqg = kzalloc(sizeof(*bfqg), GFP_ATOMIC);
++ if (bfqg == NULL)
++ goto cleanup;
++
++ bfq_group_init_entity(bgrp, bfqg);
++ bfqg->my_entity = &bfqg->entity;
++
++ if (leaf == NULL) {
++ leaf = bfqg;
++ prev = leaf;
++ } else {
++ bfq_group_set_parent(prev, bfqg);
++ /*
++ * Build a list of allocated nodes using the bfqd
++ * filed, that is still unused and will be
++ * initialized only after the node will be
++ * connected.
++ */
++ prev->bfqd = bfqg;
++ prev = bfqg;
++ }
++ }
++
++ return leaf;
++
++cleanup:
++ while (leaf != NULL) {
++ prev = leaf;
++ leaf = leaf->bfqd;
++ kfree(prev);
++ }
++
++ return NULL;
++}
++
++/**
++ * bfq_group_chain_link - link an allocated group chain to a cgroup
++ * hierarchy.
++ * @bfqd: the queue descriptor.
++ * @css: the leaf cgroup_subsys_state to start from.
++ * @leaf: the leaf group (to be associated to @cgroup).
++ *
++ * Try to link a chain of groups to a cgroup hierarchy, connecting the
++ * nodes bottom-up, so we can be sure that when we find a cgroup in the
++ * hierarchy that already as a group associated to @bfqd all the nodes
++ * in the path to the root cgroup have one too.
++ *
++ * On locking: the queue lock protects the hierarchy (there is a hierarchy
++ * per device) while the bfqio_cgroup lock protects the list of groups
++ * belonging to the same cgroup.
++ */
++static void bfq_group_chain_link(struct bfq_data *bfqd,
++ struct cgroup_subsys_state *css,
++ struct bfq_group *leaf)
++{
++ struct bfqio_cgroup *bgrp;
++ struct bfq_group *bfqg, *next, *prev = NULL;
++ unsigned long flags;
++
++ assert_spin_locked(bfqd->queue->queue_lock);
++
++ for (; css != NULL && leaf != NULL; css = css->parent) {
++ bgrp = css_to_bfqio(css);
++ next = leaf->bfqd;
++
++ bfqg = bfqio_lookup_group(bgrp, bfqd);
++ BUG_ON(bfqg != NULL);
++
++ spin_lock_irqsave(&bgrp->lock, flags);
++
++ rcu_assign_pointer(leaf->bfqd, bfqd);
++ hlist_add_head_rcu(&leaf->group_node, &bgrp->group_data);
++ hlist_add_head(&leaf->bfqd_node, &bfqd->group_list);
++
++ spin_unlock_irqrestore(&bgrp->lock, flags);
++
++ prev = leaf;
++ leaf = next;
++ }
++
++ BUG_ON(css == NULL && leaf != NULL);
++ if (css != NULL && prev != NULL) {
++ bgrp = css_to_bfqio(css);
++ bfqg = bfqio_lookup_group(bgrp, bfqd);
++ bfq_group_set_parent(prev, bfqg);
++ }
++}
++
++/**
++ * bfq_find_alloc_group - return the group associated to @bfqd in @cgroup.
++ * @bfqd: queue descriptor.
++ * @cgroup: cgroup being searched for.
++ *
++ * Return a group associated to @bfqd in @cgroup, allocating one if
++ * necessary. When a group is returned all the cgroups in the path
++ * to the root have a group associated to @bfqd.
++ *
++ * If the allocation fails, return the root group: this breaks guarantees
++ * but is a safe fallback. If this loss becomes a problem it can be
++ * mitigated using the equivalent weight (given by the product of the
++ * weights of the groups in the path from @group to the root) in the
++ * root scheduler.
++ *
++ * We allocate all the missing nodes in the path from the leaf cgroup
++ * to the root and we connect the nodes only after all the allocations
++ * have been successful.
++ */
++static struct bfq_group *bfq_find_alloc_group(struct bfq_data *bfqd,
++ struct cgroup_subsys_state *css)
++{
++ struct bfqio_cgroup *bgrp = css_to_bfqio(css);
++ struct bfq_group *bfqg;
++
++ bfqg = bfqio_lookup_group(bgrp, bfqd);
++ if (bfqg != NULL)
++ return bfqg;
++
++ bfqg = bfq_group_chain_alloc(bfqd, css);
++ if (bfqg != NULL)
++ bfq_group_chain_link(bfqd, css, bfqg);
++ else
++ bfqg = bfqd->root_group;
++
++ return bfqg;
++}
++
++/**
++ * bfq_bfqq_move - migrate @bfqq to @bfqg.
++ * @bfqd: queue descriptor.
++ * @bfqq: the queue to move.
++ * @entity: @bfqq's entity.
++ * @bfqg: the group to move to.
++ *
++ * Move @bfqq to @bfqg, deactivating it from its old group and reactivating
++ * it on the new one. Avoid putting the entity on the old group idle tree.
++ *
++ * Must be called under the queue lock; the cgroup owning @bfqg must
++ * not disappear (by now this just means that we are called under
++ * rcu_read_lock()).
++ */
++static void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
++ struct bfq_entity *entity, struct bfq_group *bfqg)
++{
++ int busy, resume;
++
++ busy = bfq_bfqq_busy(bfqq);
++ resume = !RB_EMPTY_ROOT(&bfqq->sort_list);
++
++ BUG_ON(resume && !entity->on_st);
++ BUG_ON(busy && !resume && entity->on_st &&
++ bfqq != bfqd->in_service_queue);
++
++ if (busy) {
++ BUG_ON(atomic_read(&bfqq->ref) < 2);
++
++ if (!resume)
++ bfq_del_bfqq_busy(bfqd, bfqq, 0);
++ else
++ bfq_deactivate_bfqq(bfqd, bfqq, 0);
++ } else if (entity->on_st)
++ bfq_put_idle_entity(bfq_entity_service_tree(entity), entity);
++
++ /*
++ * Here we use a reference to bfqg. We don't need a refcounter
++ * as the cgroup reference will not be dropped, so that its
++ * destroy() callback will not be invoked.
++ */
++ entity->parent = bfqg->my_entity;
++ entity->sched_data = &bfqg->sched_data;
++
++ if (busy && resume)
++ bfq_activate_bfqq(bfqd, bfqq);
++
++ if (bfqd->in_service_queue == NULL && !bfqd->rq_in_driver)
++ bfq_schedule_dispatch(bfqd);
++}
++
++/**
++ * __bfq_bic_change_cgroup - move @bic to @cgroup.
++ * @bfqd: the queue descriptor.
++ * @bic: the bic to move.
++ * @cgroup: the cgroup to move to.
++ *
++ * Move bic to cgroup, assuming that bfqd->queue is locked; the caller
++ * has to make sure that the reference to cgroup is valid across the call.
++ *
++ * NOTE: an alternative approach might have been to store the current
++ * cgroup in bfqq and getting a reference to it, reducing the lookup
++ * time here, at the price of slightly more complex code.
++ */
++static struct bfq_group *__bfq_bic_change_cgroup(struct bfq_data *bfqd,
++ struct bfq_io_cq *bic,
++ struct cgroup_subsys_state *css)
++{
++ struct bfq_queue *async_bfqq = bic_to_bfqq(bic, 0);
++ struct bfq_queue *sync_bfqq = bic_to_bfqq(bic, 1);
++ struct bfq_entity *entity;
++ struct bfq_group *bfqg;
++ struct bfqio_cgroup *bgrp;
++
++ bgrp = css_to_bfqio(css);
++
++ bfqg = bfq_find_alloc_group(bfqd, css);
++ if (async_bfqq != NULL) {
++ entity = &async_bfqq->entity;
++
++ if (entity->sched_data != &bfqg->sched_data) {
++ bic_set_bfqq(bic, NULL, 0);
++ bfq_log_bfqq(bfqd, async_bfqq,
++ "bic_change_group: %p %d",
++ async_bfqq, atomic_read(&async_bfqq->ref));
++ bfq_put_queue(async_bfqq);
++ }
++ }
++
++ if (sync_bfqq != NULL) {
++ entity = &sync_bfqq->entity;
++ if (entity->sched_data != &bfqg->sched_data)
++ bfq_bfqq_move(bfqd, sync_bfqq, entity, bfqg);
++ }
++
++ return bfqg;
++}
++
++/**
++ * bfq_bic_change_cgroup - move @bic to @cgroup.
++ * @bic: the bic being migrated.
++ * @cgroup: the destination cgroup.
++ *
++ * When the task owning @bic is moved to @cgroup, @bic is immediately
++ * moved into its new parent group.
++ */
++static void bfq_bic_change_cgroup(struct bfq_io_cq *bic,
++ struct cgroup_subsys_state *css)
++{
++ struct bfq_data *bfqd;
++ unsigned long uninitialized_var(flags);
++
++ bfqd = bfq_get_bfqd_locked(&(bic->icq.q->elevator->elevator_data),
++ &flags);
++ if (bfqd != NULL) {
++ __bfq_bic_change_cgroup(bfqd, bic, css);
++ bfq_put_bfqd_unlock(bfqd, &flags);
++ }
++}
++
++/**
++ * bfq_bic_update_cgroup - update the cgroup of @bic.
++ * @bic: the @bic to update.
++ *
++ * Make sure that @bic is enqueued in the cgroup of the current task.
++ * We need this in addition to moving bics during the cgroup attach
++ * phase because the task owning @bic could be at its first disk
++ * access or we may end up in the root cgroup as the result of a
++ * memory allocation failure and here we try to move to the right
++ * group.
++ *
++ * Must be called under the queue lock. It is safe to use the returned
++ * value even after the rcu_read_unlock() as the migration/destruction
++ * paths act under the queue lock too. IOW it is impossible to race with
++ * group migration/destruction and end up with an invalid group as:
++ * a) here cgroup has not yet been destroyed, nor its destroy callback
++ * has started execution, as current holds a reference to it,
++ * b) if it is destroyed after rcu_read_unlock() [after current is
++ * migrated to a different cgroup] its attach() callback will have
++ * taken care of remove all the references to the old cgroup data.
++ */
++static struct bfq_group *bfq_bic_update_cgroup(struct bfq_io_cq *bic)
++{
++ struct bfq_data *bfqd = bic_to_bfqd(bic);
++ struct bfq_group *bfqg;
++ struct cgroup_subsys_state *css;
++
++ BUG_ON(bfqd == NULL);
++
++ rcu_read_lock();
++ css = task_css(current, bfqio_subsys_id);
++ bfqg = __bfq_bic_change_cgroup(bfqd, bic, css);
++ rcu_read_unlock();
++
++ return bfqg;
++}
++
++/**
++ * bfq_flush_idle_tree - deactivate any entity on the idle tree of @st.
++ * @st: the service tree being flushed.
++ */
++static inline void bfq_flush_idle_tree(struct bfq_service_tree *st)
++{
++ struct bfq_entity *entity = st->first_idle;
++
++ for (; entity != NULL; entity = st->first_idle)
++ __bfq_deactivate_entity(entity, 0);
++}
++
++/**
++ * bfq_reparent_leaf_entity - move leaf entity to the root_group.
++ * @bfqd: the device data structure with the root group.
++ * @entity: the entity to move.
++ */
++static inline void bfq_reparent_leaf_entity(struct bfq_data *bfqd,
++ struct bfq_entity *entity)
++{
++ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
++
++ BUG_ON(bfqq == NULL);
++ bfq_bfqq_move(bfqd, bfqq, entity, bfqd->root_group);
++ return;
++}
++
++/**
++ * bfq_reparent_active_entities - move to the root group all active
++ * entities.
++ * @bfqd: the device data structure with the root group.
++ * @bfqg: the group to move from.
++ * @st: the service tree with the entities.
++ *
++ * Needs queue_lock to be taken and reference to be valid over the call.
++ */
++static inline void bfq_reparent_active_entities(struct bfq_data *bfqd,
++ struct bfq_group *bfqg,
++ struct bfq_service_tree *st)
++{
++ struct rb_root *active = &st->active;
++ struct bfq_entity *entity = NULL;
++
++ if (!RB_EMPTY_ROOT(&st->active))
++ entity = bfq_entity_of(rb_first(active));
++
++ for (; entity != NULL; entity = bfq_entity_of(rb_first(active)))
++ bfq_reparent_leaf_entity(bfqd, entity);
++
++ if (bfqg->sched_data.in_service_entity != NULL)
++ bfq_reparent_leaf_entity(bfqd,
++ bfqg->sched_data.in_service_entity);
++
++ return;
++}
++
++/**
++ * bfq_destroy_group - destroy @bfqg.
++ * @bgrp: the bfqio_cgroup containing @bfqg.
++ * @bfqg: the group being destroyed.
++ *
++ * Destroy @bfqg, making sure that it is not referenced from its parent.
++ */
++static void bfq_destroy_group(struct bfqio_cgroup *bgrp, struct bfq_group *bfqg)
++{
++ struct bfq_data *bfqd;
++ struct bfq_service_tree *st;
++ struct bfq_entity *entity = bfqg->my_entity;
++ unsigned long uninitialized_var(flags);
++ int i;
++
++ hlist_del(&bfqg->group_node);
++
++ /*
++ * Empty all service_trees belonging to this group before
++ * deactivating the group itself.
++ */
++ for (i = 0; i < BFQ_IOPRIO_CLASSES; i++) {
++ st = bfqg->sched_data.service_tree + i;
++
++ /*
++ * The idle tree may still contain bfq_queues belonging
++ * to exited task because they never migrated to a different
++ * cgroup from the one being destroyed now. No one else
++ * can access them so it's safe to act without any lock.
++ */
++ bfq_flush_idle_tree(st);
++
++ /*
++ * It may happen that some queues are still active
++ * (busy) upon group destruction (if the corresponding
++ * processes have been forced to terminate). We move
++ * all the leaf entities corresponding to these queues
++ * to the root_group.
++ * Also, it may happen that the group has an entity
++ * in service, which is disconnected from the active
++ * tree: it must be moved, too.
++ * There is no need to put the sync queues, as the
++ * scheduler has taken no reference.
++ */
++ bfqd = bfq_get_bfqd_locked(&bfqg->bfqd, &flags);
++ if (bfqd != NULL) {
++ bfq_reparent_active_entities(bfqd, bfqg, st);
++ bfq_put_bfqd_unlock(bfqd, &flags);
++ }
++ BUG_ON(!RB_EMPTY_ROOT(&st->active));
++ BUG_ON(!RB_EMPTY_ROOT(&st->idle));
++ }
++ BUG_ON(bfqg->sched_data.next_in_service != NULL);
++ BUG_ON(bfqg->sched_data.in_service_entity != NULL);
++
++ /*
++ * We may race with device destruction, take extra care when
++ * dereferencing bfqg->bfqd.
++ */
++ bfqd = bfq_get_bfqd_locked(&bfqg->bfqd, &flags);
++ if (bfqd != NULL) {
++ hlist_del(&bfqg->bfqd_node);
++ __bfq_deactivate_entity(entity, 0);
++ bfq_put_async_queues(bfqd, bfqg);
++ bfq_put_bfqd_unlock(bfqd, &flags);
++ }
++ BUG_ON(entity->tree != NULL);
++
++ /*
++ * No need to defer the kfree() to the end of the RCU grace
++ * period: we are called from the destroy() callback of our
++ * cgroup, so we can be sure that no one is a) still using
++ * this cgroup or b) doing lookups in it.
++ */
++ kfree(bfqg);
++}
++
++static void bfq_end_wr_async(struct bfq_data *bfqd)
++{
++ struct hlist_node *tmp;
++ struct bfq_group *bfqg;
++
++ hlist_for_each_entry_safe(bfqg, tmp, &bfqd->group_list, bfqd_node)
++ bfq_end_wr_async_queues(bfqd, bfqg);
++ bfq_end_wr_async_queues(bfqd, bfqd->root_group);
++}
++
++/**
++ * bfq_disconnect_groups - disconnect @bfqd from all its groups.
++ * @bfqd: the device descriptor being exited.
++ *
++ * When the device exits we just make sure that no lookup can return
++ * the now unused group structures. They will be deallocated on cgroup
++ * destruction.
++ */
++static void bfq_disconnect_groups(struct bfq_data *bfqd)
++{
++ struct hlist_node *tmp;
++ struct bfq_group *bfqg;
++
++ bfq_log(bfqd, "disconnect_groups beginning");
++ hlist_for_each_entry_safe(bfqg, tmp, &bfqd->group_list, bfqd_node) {
++ hlist_del(&bfqg->bfqd_node);
++
++ __bfq_deactivate_entity(bfqg->my_entity, 0);
++
++ /*
++ * Don't remove from the group hash, just set an
++ * invalid key. No lookups can race with the
++ * assignment as bfqd is being destroyed; this
++ * implies also that new elements cannot be added
++ * to the list.
++ */
++ rcu_assign_pointer(bfqg->bfqd, NULL);
++
++ bfq_log(bfqd, "disconnect_groups: put async for group %p",
++ bfqg);
++ bfq_put_async_queues(bfqd, bfqg);
++ }
++}
++
++static inline void bfq_free_root_group(struct bfq_data *bfqd)
++{
++ struct bfqio_cgroup *bgrp = &bfqio_root_cgroup;
++ struct bfq_group *bfqg = bfqd->root_group;
++
++ bfq_put_async_queues(bfqd, bfqg);
++
++ spin_lock_irq(&bgrp->lock);
++ hlist_del_rcu(&bfqg->group_node);
++ spin_unlock_irq(&bgrp->lock);
++
++ /*
++ * No need to synchronize_rcu() here: since the device is gone
++ * there cannot be any read-side access to its root_group.
++ */
++ kfree(bfqg);
++}
++
++static struct bfq_group *bfq_alloc_root_group(struct bfq_data *bfqd, int node)
++{
++ struct bfq_group *bfqg;
++ struct bfqio_cgroup *bgrp;
++ int i;
++
++ bfqg = kzalloc_node(sizeof(*bfqg), GFP_KERNEL, node);
++ if (bfqg == NULL)
++ return NULL;
++
++ bfqg->entity.parent = NULL;
++ for (i = 0; i < BFQ_IOPRIO_CLASSES; i++)
++ bfqg->sched_data.service_tree[i] = BFQ_SERVICE_TREE_INIT;
++
++ bgrp = &bfqio_root_cgroup;
++ spin_lock_irq(&bgrp->lock);
++ rcu_assign_pointer(bfqg->bfqd, bfqd);
++ hlist_add_head_rcu(&bfqg->group_node, &bgrp->group_data);
++ spin_unlock_irq(&bgrp->lock);
++
++ return bfqg;
++}
++
++#define SHOW_FUNCTION(__VAR) \
++static u64 bfqio_cgroup_##__VAR##_read(struct cgroup_subsys_state *css, \
++ struct cftype *cftype) \
++{ \
++ struct bfqio_cgroup *bgrp = css_to_bfqio(css); \
++ u64 ret = -ENODEV; \
++ \
++ mutex_lock(&bfqio_mutex); \
++ if (bfqio_is_removed(bgrp)) \
++ goto out_unlock; \
++ \
++ spin_lock_irq(&bgrp->lock); \
++ ret = bgrp->__VAR; \
++ spin_unlock_irq(&bgrp->lock); \
++ \
++out_unlock: \
++ mutex_unlock(&bfqio_mutex); \
++ return ret; \
++}
++
++SHOW_FUNCTION(weight);
++SHOW_FUNCTION(ioprio);
++SHOW_FUNCTION(ioprio_class);
++#undef SHOW_FUNCTION
++
++#define STORE_FUNCTION(__VAR, __MIN, __MAX) \
++static int bfqio_cgroup_##__VAR##_write(struct cgroup_subsys_state *css,\
++ struct cftype *cftype, \
++ u64 val) \
++{ \
++ struct bfqio_cgroup *bgrp = css_to_bfqio(css); \
++ struct bfq_group *bfqg; \
++ int ret = -EINVAL; \
++ \
++ if (val < (__MIN) || val > (__MAX)) \
++ return ret; \
++ \
++ ret = -ENODEV; \
++ mutex_lock(&bfqio_mutex); \
++ if (bfqio_is_removed(bgrp)) \
++ goto out_unlock; \
++ ret = 0; \
++ \
++ spin_lock_irq(&bgrp->lock); \
++ bgrp->__VAR = (unsigned short)val; \
++ hlist_for_each_entry(bfqg, &bgrp->group_data, group_node) { \
++ /* \
++ * Setting the ioprio_changed flag of the entity \
++ * to 1 with new_##__VAR == ##__VAR would re-set \
++ * the value of the weight to its ioprio mapping. \
++ * Set the flag only if necessary. \
++ */ \
++ if ((unsigned short)val != bfqg->entity.new_##__VAR) { \
++ bfqg->entity.new_##__VAR = (unsigned short)val; \
++ /* \
++ * Make sure that the above new value has been \
++ * stored in bfqg->entity.new_##__VAR before \
++ * setting the ioprio_changed flag. In fact, \
++ * this flag may be read asynchronously (in \
++ * critical sections protected by a different \
++ * lock than that held here), and finding this \
++ * flag set may cause the execution of the code \
++ * for updating parameters whose value may \
++ * depend also on bfqg->entity.new_##__VAR (in \
++ * __bfq_entity_update_weight_prio). \
++ * This barrier makes sure that the new value \
++ * of bfqg->entity.new_##__VAR is correctly \
++ * seen in that code. \
++ */ \
++ smp_wmb(); \
++ bfqg->entity.ioprio_changed = 1; \
++ } \
++ } \
++ spin_unlock_irq(&bgrp->lock); \
++ \
++out_unlock: \
++ mutex_unlock(&bfqio_mutex); \
++ return ret; \
++}
++
++STORE_FUNCTION(weight, BFQ_MIN_WEIGHT, BFQ_MAX_WEIGHT);
++STORE_FUNCTION(ioprio, 0, IOPRIO_BE_NR - 1);
++STORE_FUNCTION(ioprio_class, IOPRIO_CLASS_RT, IOPRIO_CLASS_IDLE);
++#undef STORE_FUNCTION
++
++static struct cftype bfqio_files[] = {
++ {
++ .name = "weight",
++ .read_u64 = bfqio_cgroup_weight_read,
++ .write_u64 = bfqio_cgroup_weight_write,
++ },
++ {
++ .name = "ioprio",
++ .read_u64 = bfqio_cgroup_ioprio_read,
++ .write_u64 = bfqio_cgroup_ioprio_write,
++ },
++ {
++ .name = "ioprio_class",
++ .read_u64 = bfqio_cgroup_ioprio_class_read,
++ .write_u64 = bfqio_cgroup_ioprio_class_write,
++ },
++ { }, /* terminate */
++};
++
++static struct cgroup_subsys_state *bfqio_create(struct cgroup_subsys_state
++ *parent_css)
++{
++ struct bfqio_cgroup *bgrp;
++
++ if (parent_css != NULL) {
++ bgrp = kzalloc(sizeof(*bgrp), GFP_KERNEL);
++ if (bgrp == NULL)
++ return ERR_PTR(-ENOMEM);
++ } else
++ bgrp = &bfqio_root_cgroup;
++
++ spin_lock_init(&bgrp->lock);
++ INIT_HLIST_HEAD(&bgrp->group_data);
++ bgrp->ioprio = BFQ_DEFAULT_GRP_IOPRIO;
++ bgrp->ioprio_class = BFQ_DEFAULT_GRP_CLASS;
++
++ return &bgrp->css;
++}
++
++/*
++ * We cannot support shared io contexts, as we have no means to support
++ * two tasks with the same ioc in two different groups without major rework
++ * of the main bic/bfqq data structures. By now we allow a task to change
++ * its cgroup only if it's the only owner of its ioc; the drawback of this
++ * behavior is that a group containing a task that forked using CLONE_IO
++ * will not be destroyed until the tasks sharing the ioc die.
++ */
++static int bfqio_can_attach(struct cgroup_subsys_state *css,
++ struct cgroup_taskset *tset)
++{
++ struct task_struct *task;
++ struct io_context *ioc;
++ int ret = 0;
++
++ cgroup_taskset_for_each(task, css, tset) {
++ /*
++ * task_lock() is needed to avoid races with
++ * exit_io_context()
++ */
++ task_lock(task);
++ ioc = task->io_context;
++ if (ioc != NULL && atomic_read(&ioc->nr_tasks) > 1)
++ /*
++ * ioc == NULL means that the task is either too
++ * young or exiting: if it has still no ioc the
++ * ioc can't be shared, if the task is exiting the
++ * attach will fail anyway, no matter what we
++ * return here.
++ */
++ ret = -EINVAL;
++ task_unlock(task);
++ if (ret)
++ break;
++ }
++
++ return ret;
++}
++
++static void bfqio_attach(struct cgroup_subsys_state *css,
++ struct cgroup_taskset *tset)
++{
++ struct task_struct *task;
++ struct io_context *ioc;
++ struct io_cq *icq;
++
++ /*
++ * IMPORTANT NOTE: The move of more than one process at a time to a
++ * new group has not yet been tested.
++ */
++ cgroup_taskset_for_each(task, css, tset) {
++ ioc = get_task_io_context(task, GFP_ATOMIC, NUMA_NO_NODE);
++ if (ioc) {
++ /*
++ * Handle cgroup change here.
++ */
++ rcu_read_lock();
++ hlist_for_each_entry_rcu(icq, &ioc->icq_list, ioc_node)
++ if (!strncmp(
++ icq->q->elevator->type->elevator_name,
++ "bfq", ELV_NAME_MAX))
++ bfq_bic_change_cgroup(icq_to_bic(icq),
++ css);
++ rcu_read_unlock();
++ put_io_context(ioc);
++ }
++ }
++}
++
++static void bfqio_destroy(struct cgroup_subsys_state *css)
++{
++ struct bfqio_cgroup *bgrp = css_to_bfqio(css);
++ struct hlist_node *tmp;
++ struct bfq_group *bfqg;
++
++ /*
++ * Since we are destroying the cgroup, there are no more tasks
++ * referencing it, and all the RCU grace periods that may have
++ * referenced it are ended (as the destruction of the parent
++ * cgroup is RCU-safe); bgrp->group_data will not be accessed by
++ * anything else and we don't need any synchronization.
++ */
++ hlist_for_each_entry_safe(bfqg, tmp, &bgrp->group_data, group_node)
++ bfq_destroy_group(bgrp, bfqg);
++
++ BUG_ON(!hlist_empty(&bgrp->group_data));
++
++ kfree(bgrp);
++}
++
++static int bfqio_css_online(struct cgroup_subsys_state *css)
++{
++ struct bfqio_cgroup *bgrp = css_to_bfqio(css);
++
++ mutex_lock(&bfqio_mutex);
++ bgrp->online = true;
++ mutex_unlock(&bfqio_mutex);
++
++ return 0;
++}
++
++static void bfqio_css_offline(struct cgroup_subsys_state *css)
++{
++ struct bfqio_cgroup *bgrp = css_to_bfqio(css);
++
++ mutex_lock(&bfqio_mutex);
++ bgrp->online = false;
++ mutex_unlock(&bfqio_mutex);
++}
++
++struct cgroup_subsys bfqio_subsys = {
++ .name = "bfqio",
++ .css_alloc = bfqio_create,
++ .css_online = bfqio_css_online,
++ .css_offline = bfqio_css_offline,
++ .can_attach = bfqio_can_attach,
++ .attach = bfqio_attach,
++ .css_free = bfqio_destroy,
++ .subsys_id = bfqio_subsys_id,
++ .base_cftypes = bfqio_files,
++};
++#else
++static inline void bfq_init_entity(struct bfq_entity *entity,
++ struct bfq_group *bfqg)
++{
++ entity->weight = entity->new_weight;
++ entity->orig_weight = entity->new_weight;
++ entity->ioprio = entity->new_ioprio;
++ entity->ioprio_class = entity->new_ioprio_class;
++ entity->sched_data = &bfqg->sched_data;
++}
++
++static inline struct bfq_group *
++bfq_bic_update_cgroup(struct bfq_io_cq *bic)
++{
++ struct bfq_data *bfqd = bic_to_bfqd(bic);
++ return bfqd->root_group;
++}
++
++static inline void bfq_bfqq_move(struct bfq_data *bfqd,
++ struct bfq_queue *bfqq,
++ struct bfq_entity *entity,
++ struct bfq_group *bfqg)
++{
++}
++
++static void bfq_end_wr_async(struct bfq_data *bfqd)
++{
++ bfq_end_wr_async_queues(bfqd, bfqd->root_group);
++}
++
++static inline void bfq_disconnect_groups(struct bfq_data *bfqd)
++{
++ bfq_put_async_queues(bfqd, bfqd->root_group);
++}
++
++static inline void bfq_free_root_group(struct bfq_data *bfqd)
++{
++ kfree(bfqd->root_group);
++}
++
++static struct bfq_group *bfq_alloc_root_group(struct bfq_data *bfqd, int node)
++{
++ struct bfq_group *bfqg;
++ int i;
++
++ bfqg = kmalloc_node(sizeof(*bfqg), GFP_KERNEL | __GFP_ZERO, node);
++ if (bfqg == NULL)
++ return NULL;
++
++ for (i = 0; i < BFQ_IOPRIO_CLASSES; i++)
++ bfqg->sched_data.service_tree[i] = BFQ_SERVICE_TREE_INIT;
++
++ return bfqg;
++}
++#endif
+diff -Nur linux-3.14.36/block/bfq.h linux-openelec/block/bfq.h
+--- linux-3.14.36/block/bfq.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/block/bfq.h 2015-05-06 12:05:43.000000000 -0500
+@@ -0,0 +1,770 @@
++/*
++ * BFQ-v7r5 for 3.14.0: data structures and common functions prototypes.
++ *
++ * Based on ideas and code from CFQ:
++ * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
++ *
++ * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
++ * Paolo Valente <paolo.valente@unimore.it>
++ *
++ * Copyright (C) 2010 Paolo Valente <paolo.valente@unimore.it>
++ */
++
++#ifndef _BFQ_H
++#define _BFQ_H
++
++#include <linux/blktrace_api.h>
++#include <linux/hrtimer.h>
++#include <linux/ioprio.h>
++#include <linux/rbtree.h>
++
++#define BFQ_IOPRIO_CLASSES 3
++#define BFQ_CL_IDLE_TIMEOUT (HZ/5)
++
++#define BFQ_MIN_WEIGHT 1
++#define BFQ_MAX_WEIGHT 1000
++
++#define BFQ_DEFAULT_GRP_WEIGHT 10
++#define BFQ_DEFAULT_GRP_IOPRIO 0
++#define BFQ_DEFAULT_GRP_CLASS IOPRIO_CLASS_BE
++
++struct bfq_entity;
++
++/**
++ * struct bfq_service_tree - per ioprio_class service tree.
++ * @active: tree for active entities (i.e., those backlogged).
++ * @idle: tree for idle entities (i.e., those not backlogged, with V <= F_i).
++ * @first_idle: idle entity with minimum F_i.
++ * @last_idle: idle entity with maximum F_i.
++ * @vtime: scheduler virtual time.
++ * @wsum: scheduler weight sum; active and idle entities contribute to it.
++ *
++ * Each service tree represents a B-WF2Q+ scheduler on its own. Each
++ * ioprio_class has its own independent scheduler, and so its own
++ * bfq_service_tree. All the fields are protected by the queue lock
++ * of the containing bfqd.
++ */
++struct bfq_service_tree {
++ struct rb_root active;
++ struct rb_root idle;
++
++ struct bfq_entity *first_idle;
++ struct bfq_entity *last_idle;
++
++ u64 vtime;
++ unsigned long wsum;
++};
++
++/**
++ * struct bfq_sched_data - multi-class scheduler.
++ * @in_service_entity: entity in service.
++ * @next_in_service: head-of-the-line entity in the scheduler.
++ * @service_tree: array of service trees, one per ioprio_class.
++ *
++ * bfq_sched_data is the basic scheduler queue. It supports three
++ * ioprio_classes, and can be used either as a toplevel queue or as
++ * an intermediate queue on a hierarchical setup.
++ * @next_in_service points to the active entity of the sched_data
++ * service trees that will be scheduled next.
++ *
++ * The supported ioprio_classes are the same as in CFQ, in descending
++ * priority order, IOPRIO_CLASS_RT, IOPRIO_CLASS_BE, IOPRIO_CLASS_IDLE.
++ * Requests from higher priority queues are served before all the
++ * requests from lower priority queues; among requests of the same
++ * queue requests are served according to B-WF2Q+.
++ * All the fields are protected by the queue lock of the containing bfqd.
++ */
++struct bfq_sched_data {
++ struct bfq_entity *in_service_entity;
++ struct bfq_entity *next_in_service;
++ struct bfq_service_tree service_tree[BFQ_IOPRIO_CLASSES];
++};
++
++/**
++ * struct bfq_weight_counter - counter of the number of all active entities
++ * with a given weight.
++ * @weight: weight of the entities that this counter refers to.
++ * @num_active: number of active entities with this weight.
++ * @weights_node: weights tree member (see bfq_data's @queue_weights_tree
++ * and @group_weights_tree).
++ */
++struct bfq_weight_counter {
++ short int weight;
++ unsigned int num_active;
++ struct rb_node weights_node;
++};
++
++/**
++ * struct bfq_entity - schedulable entity.
++ * @rb_node: service_tree member.
++ * @weight_counter: pointer to the weight counter associated with this entity.
++ * @on_st: flag, true if the entity is on a tree (either the active or
++ * the idle one of its service_tree).
++ * @finish: B-WF2Q+ finish timestamp (aka F_i).
++ * @start: B-WF2Q+ start timestamp (aka S_i).
++ * @tree: tree the entity is enqueued into; %NULL if not on a tree.
++ * @min_start: minimum start time of the (active) subtree rooted at
++ * this entity; used for O(log N) lookups into active trees.
++ * @service: service received during the last round of service.
++ * @budget: budget used to calculate F_i; F_i = S_i + @budget / @weight.
++ * @weight: weight of the queue
++ * @parent: parent entity, for hierarchical scheduling.
++ * @my_sched_data: for non-leaf nodes in the cgroup hierarchy, the
++ * associated scheduler queue, %NULL on leaf nodes.
++ * @sched_data: the scheduler queue this entity belongs to.
++ * @ioprio: the ioprio in use.
++ * @new_weight: when a weight change is requested, the new weight value.
++ * @orig_weight: original weight, used to implement weight boosting
++ * @new_ioprio: when an ioprio change is requested, the new ioprio value.
++ * @ioprio_class: the ioprio_class in use.
++ * @new_ioprio_class: when an ioprio_class change is requested, the new
++ * ioprio_class value.
++ * @ioprio_changed: flag, true when the user requested a weight, ioprio or
++ * ioprio_class change.
++ *
++ * A bfq_entity is used to represent either a bfq_queue (leaf node in the
++ * cgroup hierarchy) or a bfq_group into the upper level scheduler. Each
++ * entity belongs to the sched_data of the parent group in the cgroup
++ * hierarchy. Non-leaf entities have also their own sched_data, stored
++ * in @my_sched_data.
++ *
++ * Each entity stores independently its priority values; this would
++ * allow different weights on different devices, but this
++ * functionality is not exported to userspace by now. Priorities and
++ * weights are updated lazily, first storing the new values into the
++ * new_* fields, then setting the @ioprio_changed flag. As soon as
++ * there is a transition in the entity state that allows the priority
++ * update to take place the effective and the requested priority
++ * values are synchronized.
++ *
++ * Unless cgroups are used, the weight value is calculated from the
++ * ioprio to export the same interface as CFQ. When dealing with
++ * ``well-behaved'' queues (i.e., queues that do not spend too much
++ * time to consume their budget and have true sequential behavior, and
++ * when there are no external factors breaking anticipation) the
++ * relative weights at each level of the cgroups hierarchy should be
++ * guaranteed. All the fields are protected by the queue lock of the
++ * containing bfqd.
++ */
++struct bfq_entity {
++ struct rb_node rb_node;
++ struct bfq_weight_counter *weight_counter;
++
++ int on_st;
++
++ u64 finish;
++ u64 start;
++
++ struct rb_root *tree;
++
++ u64 min_start;
++
++ unsigned long service, budget;
++ unsigned short weight, new_weight;
++ unsigned short orig_weight;
++
++ struct bfq_entity *parent;
++
++ struct bfq_sched_data *my_sched_data;
++ struct bfq_sched_data *sched_data;
++
++ unsigned short ioprio, new_ioprio;
++ unsigned short ioprio_class, new_ioprio_class;
++
++ int ioprio_changed;
++};
++
++struct bfq_group;
++
++/**
++ * struct bfq_queue - leaf schedulable entity.
++ * @ref: reference counter.
++ * @bfqd: parent bfq_data.
++ * @new_bfqq: shared bfq_queue if queue is cooperating with
++ * one or more other queues.
++ * @pos_node: request-position tree member (see bfq_data's @rq_pos_tree).
++ * @pos_root: request-position tree root (see bfq_data's @rq_pos_tree).
++ * @sort_list: sorted list of pending requests.
++ * @next_rq: if fifo isn't expired, next request to serve.
++ * @queued: nr of requests queued in @sort_list.
++ * @allocated: currently allocated requests.
++ * @meta_pending: pending metadata requests.
++ * @fifo: fifo list of requests in sort_list.
++ * @entity: entity representing this queue in the scheduler.
++ * @max_budget: maximum budget allowed from the feedback mechanism.
++ * @budget_timeout: budget expiration (in jiffies).
++ * @dispatched: number of requests on the dispatch list or inside driver.
++ * @flags: status flags.
++ * @bfqq_list: node for active/idle bfqq list inside our bfqd.
++ * @seek_samples: number of seeks sampled
++ * @seek_total: sum of the distances of the seeks sampled
++ * @seek_mean: mean seek distance
++ * @last_request_pos: position of the last request enqueued
++ * @requests_within_timer: number of consecutive pairs of request completion
++ * and arrival, such that the queue becomes idle
++ * after the completion, but the next request arrives
++ * within an idle time slice; used only if the queue's
++ * IO_bound has been cleared.
++ * @pid: pid of the process owning the queue, used for logging purposes.
++ * @last_wr_start_finish: start time of the current weight-raising period if
++ * the @bfq-queue is being weight-raised, otherwise
++ * finish time of the last weight-raising period
++ * @wr_cur_max_time: current max raising time for this queue
++ * @soft_rt_next_start: minimum time instant such that, only if a new
++ * request is enqueued after this time instant in an
++ * idle @bfq_queue with no outstanding requests, then
++ * the task associated with the queue it is deemed as
++ * soft real-time (see the comments to the function
++ * bfq_bfqq_softrt_next_start())
++ * @last_idle_bklogged: time of the last transition of the @bfq_queue from
++ * idle to backlogged
++ * @service_from_backlogged: cumulative service received from the @bfq_queue
++ * since the last transition from idle to
++ * backlogged
++ * @bic: pointer to the bfq_io_cq owning the bfq_queue, set to %NULL if the
++ * queue is shared
++ *
++ * A bfq_queue is a leaf request queue; it can be associated with an
++ * io_context or more, if it is async or shared between cooperating
++ * processes. @cgroup holds a reference to the cgroup, to be sure that it
++ * does not disappear while a bfqq still references it (mostly to avoid
++ * races between request issuing and task migration followed by cgroup
++ * destruction).
++ * All the fields are protected by the queue lock of the containing bfqd.
++ */
++struct bfq_queue {
++ atomic_t ref;
++ struct bfq_data *bfqd;
++
++ /* fields for cooperating queues handling */
++ struct bfq_queue *new_bfqq;
++ struct rb_node pos_node;
++ struct rb_root *pos_root;
++
++ struct rb_root sort_list;
++ struct request *next_rq;
++ int queued[2];
++ int allocated[2];
++ int meta_pending;
++ struct list_head fifo;
++
++ struct bfq_entity entity;
++
++ unsigned long max_budget;
++ unsigned long budget_timeout;
++
++ int dispatched;
++
++ unsigned int flags;
++
++ struct list_head bfqq_list;
++
++ unsigned int seek_samples;
++ u64 seek_total;
++ sector_t seek_mean;
++ sector_t last_request_pos;
++
++ unsigned int requests_within_timer;
++
++ pid_t pid;
++ struct bfq_io_cq *bic;
++
++ /* weight-raising fields */
++ unsigned long wr_cur_max_time;
++ unsigned long soft_rt_next_start;
++ unsigned long last_wr_start_finish;
++ unsigned int wr_coeff;
++ unsigned long last_idle_bklogged;
++ unsigned long service_from_backlogged;
++};
++
++/**
++ * struct bfq_ttime - per process thinktime stats.
++ * @ttime_total: total process thinktime
++ * @ttime_samples: number of thinktime samples
++ * @ttime_mean: average process thinktime
++ */
++struct bfq_ttime {
++ unsigned long last_end_request;
++
++ unsigned long ttime_total;
++ unsigned long ttime_samples;
++ unsigned long ttime_mean;
++};
++
++/**
++ * struct bfq_io_cq - per (request_queue, io_context) structure.
++ * @icq: associated io_cq structure
++ * @bfqq: array of two process queues, the sync and the async
++ * @ttime: associated @bfq_ttime struct
++ * @wr_time_left: snapshot of the time left before weight raising ends
++ * for the sync queue associated to this process; this
++ * snapshot is taken to remember this value while the weight
++ * raising is suspended because the queue is merged with a
++ * shared queue, and is used to set @raising_cur_max_time
++ * when the queue is split from the shared queue and its
++ * weight is raised again
++ * @saved_idle_window: same purpose as the previous field for the idle
++ * window
++ * @saved_IO_bound: same purpose as the previous two fields for the I/O
++ * bound classification of a queue
++ * @cooperations: counter of consecutive successful queue merges underwent
++ * by any of the process' @bfq_queues
++ * @failed_cooperations: counter of consecutive failed queue merges of any
++ * of the process' @bfq_queues
++ */
++struct bfq_io_cq {
++ struct io_cq icq; /* must be the first member */
++ struct bfq_queue *bfqq[2];
++ struct bfq_ttime ttime;
++ int ioprio;
++
++ unsigned int wr_time_left;
++ unsigned int saved_idle_window;
++ unsigned int saved_IO_bound;
++
++ unsigned int cooperations;
++ unsigned int failed_cooperations;
++};
++
++enum bfq_device_speed {
++ BFQ_BFQD_FAST,
++ BFQ_BFQD_SLOW,
++};
++
++/**
++ * struct bfq_data - per device data structure.
++ * @queue: request queue for the managed device.
++ * @root_group: root bfq_group for the device.
++ * @rq_pos_tree: rbtree sorted by next_request position, used when
++ * determining if two or more queues have interleaving
++ * requests (see bfq_close_cooperator()).
++ * @active_numerous_groups: number of bfq_groups containing more than one
++ * active @bfq_entity.
++ * @queue_weights_tree: rbtree of weight counters of @bfq_queues, sorted by
++ * weight. Used to keep track of whether all @bfq_queues
++ * have the same weight. The tree contains one counter
++ * for each distinct weight associated to some active
++ * and not weight-raised @bfq_queue (see the comments to
++ * the functions bfq_weights_tree_[add|remove] for
++ * further details).
++ * @group_weights_tree: rbtree of non-queue @bfq_entity weight counters, sorted
++ * by weight. Used to keep track of whether all
++ * @bfq_groups have the same weight. The tree contains
++ * one counter for each distinct weight associated to
++ * some active @bfq_group (see the comments to the
++ * functions bfq_weights_tree_[add|remove] for further
++ * details).
++ * @busy_queues: number of bfq_queues containing requests (including the
++ * queue in service, even if it is idling).
++ * @busy_in_flight_queues: number of @bfq_queues containing pending or
++ * in-flight requests, plus the @bfq_queue in
++ * service, even if idle but waiting for the
++ * possible arrival of its next sync request. This
++ * field is updated only if the device is rotational,
++ * but used only if the device is also NCQ-capable.
++ * The reason why the field is updated also for non-
++ * NCQ-capable rotational devices is related to the
++ * fact that the value of @hw_tag may be set also
++ * later than when busy_in_flight_queues may need to
++ * be incremented for the first time(s). Taking also
++ * this possibility into account, to avoid unbalanced
++ * increments/decrements, would imply more overhead
++ * than just updating busy_in_flight_queues
++ * regardless of the value of @hw_tag.
++ * @const_seeky_busy_in_flight_queues: number of constantly-seeky @bfq_queues
++ * (that is, seeky queues that expired
++ * for budget timeout at least once)
++ * containing pending or in-flight
++ * requests, including the in-service
++ * @bfq_queue if constantly seeky. This
++ * field is updated only if the device
++ * is rotational, but used only if the
++ * device is also NCQ-capable (see the
++ * comments to @busy_in_flight_queues).
++ * @wr_busy_queues: number of weight-raised busy @bfq_queues.
++ * @queued: number of queued requests.
++ * @rq_in_driver: number of requests dispatched and waiting for completion.
++ * @sync_flight: number of sync requests in the driver.
++ * @max_rq_in_driver: max number of reqs in driver in the last
++ * @hw_tag_samples completed requests.
++ * @hw_tag_samples: nr of samples used to calculate hw_tag.
++ * @hw_tag: flag set to one if the driver is showing a queueing behavior.
++ * @budgets_assigned: number of budgets assigned.
++ * @idle_slice_timer: timer set when idling for the next sequential request
++ * from the queue in service.
++ * @unplug_work: delayed work to restart dispatching on the request queue.
++ * @in_service_queue: bfq_queue in service.
++ * @in_service_bic: bfq_io_cq (bic) associated with the @in_service_queue.
++ * @last_position: on-disk position of the last served request.
++ * @last_budget_start: beginning of the last budget.
++ * @last_idling_start: beginning of the last idle slice.
++ * @peak_rate: peak transfer rate observed for a budget.
++ * @peak_rate_samples: number of samples used to calculate @peak_rate.
++ * @bfq_max_budget: maximum budget allotted to a bfq_queue before
++ * rescheduling.
++ * @group_list: list of all the bfq_groups active on the device.
++ * @active_list: list of all the bfq_queues active on the device.
++ * @idle_list: list of all the bfq_queues idle on the device.
++ * @bfq_quantum: max number of requests dispatched per dispatch round.
++ * @bfq_fifo_expire: timeout for async/sync requests; when it expires
++ * requests are served in fifo order.
++ * @bfq_back_penalty: weight of backward seeks wrt forward ones.
++ * @bfq_back_max: maximum allowed backward seek.
++ * @bfq_slice_idle: maximum idling time.
++ * @bfq_user_max_budget: user-configured max budget value
++ * (0 for auto-tuning).
++ * @bfq_max_budget_async_rq: maximum budget (in nr of requests) allotted to
++ * async queues.
++ * @bfq_timeout: timeout for bfq_queues to consume their budget; used to
++ * to prevent seeky queues to impose long latencies to well
++ * behaved ones (this also implies that seeky queues cannot
++ * receive guarantees in the service domain; after a timeout
++ * they are charged for the whole allocated budget, to try
++ * to preserve a behavior reasonably fair among them, but
++ * without service-domain guarantees).
++ * @bfq_coop_thresh: number of queue merges after which a @bfq_queue is
++ * no more granted any weight-raising.
++ * @bfq_failed_cooperations: number of consecutive failed cooperation
++ * chances after which weight-raising is restored
++ * to a queue subject to more than bfq_coop_thresh
++ * queue merges.
++ * @bfq_requests_within_timer: number of consecutive requests that must be
++ * issued within the idle time slice to set
++ * again idling to a queue which was marked as
++ * non-I/O-bound (see the definition of the
++ * IO_bound flag for further details).
++ * @bfq_wr_coeff: Maximum factor by which the weight of a weight-raised
++ * queue is multiplied
++ * @bfq_wr_max_time: maximum duration of a weight-raising period (jiffies)
++ * @bfq_wr_rt_max_time: maximum duration for soft real-time processes
++ * @bfq_wr_min_idle_time: minimum idle period after which weight-raising
++ * may be reactivated for a queue (in jiffies)
++ * @bfq_wr_min_inter_arr_async: minimum period between request arrivals
++ * after which weight-raising may be
++ * reactivated for an already busy queue
++ * (in jiffies)
++ * @bfq_wr_max_softrt_rate: max service-rate for a soft real-time queue,
++ * sectors per seconds
++ * @RT_prod: cached value of the product R*T used for computing the maximum
++ * duration of the weight raising automatically
++ * @device_speed: device-speed class for the low-latency heuristic
++ * @oom_bfqq: fallback dummy bfqq for extreme OOM conditions
++ *
++ * All the fields are protected by the @queue lock.
++ */
++struct bfq_data {
++ struct request_queue *queue;
++
++ struct bfq_group *root_group;
++ struct rb_root rq_pos_tree;
++
++#ifdef CONFIG_CGROUP_BFQIO
++ int active_numerous_groups;
++#endif
++
++ struct rb_root queue_weights_tree;
++ struct rb_root group_weights_tree;
++
++ int busy_queues;
++ int busy_in_flight_queues;
++ int const_seeky_busy_in_flight_queues;
++ int wr_busy_queues;
++ int queued;
++ int rq_in_driver;
++ int sync_flight;
++
++ int max_rq_in_driver;
++ int hw_tag_samples;
++ int hw_tag;
++
++ int budgets_assigned;
++
++ struct timer_list idle_slice_timer;
++ struct work_struct unplug_work;
++
++ struct bfq_queue *in_service_queue;
++ struct bfq_io_cq *in_service_bic;
++
++ sector_t last_position;
++
++ ktime_t last_budget_start;
++ ktime_t last_idling_start;
++ int peak_rate_samples;
++ u64 peak_rate;
++ unsigned long bfq_max_budget;
++
++ struct hlist_head group_list;
++ struct list_head active_list;
++ struct list_head idle_list;
++
++ unsigned int bfq_quantum;
++ unsigned int bfq_fifo_expire[2];
++ unsigned int bfq_back_penalty;
++ unsigned int bfq_back_max;
++ unsigned int bfq_slice_idle;
++ u64 bfq_class_idle_last_service;
++
++ unsigned int bfq_user_max_budget;
++ unsigned int bfq_max_budget_async_rq;
++ unsigned int bfq_timeout[2];
++
++ unsigned int bfq_coop_thresh;
++ unsigned int bfq_failed_cooperations;
++ unsigned int bfq_requests_within_timer;
++
++ bool low_latency;
++
++ /* parameters of the low_latency heuristics */
++ unsigned int bfq_wr_coeff;
++ unsigned int bfq_wr_max_time;
++ unsigned int bfq_wr_rt_max_time;
++ unsigned int bfq_wr_min_idle_time;
++ unsigned long bfq_wr_min_inter_arr_async;
++ unsigned int bfq_wr_max_softrt_rate;
++ u64 RT_prod;
++ enum bfq_device_speed device_speed;
++
++ struct bfq_queue oom_bfqq;
++};
++
++enum bfqq_state_flags {
++ BFQ_BFQQ_FLAG_busy = 0, /* has requests or is in service */
++ BFQ_BFQQ_FLAG_wait_request, /* waiting for a request */
++ BFQ_BFQQ_FLAG_must_alloc, /* must be allowed rq alloc */
++ BFQ_BFQQ_FLAG_fifo_expire, /* FIFO checked in this slice */
++ BFQ_BFQQ_FLAG_idle_window, /* slice idling enabled */
++ BFQ_BFQQ_FLAG_prio_changed, /* task priority has changed */
++ BFQ_BFQQ_FLAG_sync, /* synchronous queue */
++ BFQ_BFQQ_FLAG_budget_new, /* no completion with this budget */
++ BFQ_BFQQ_FLAG_IO_bound, /*
++ * bfqq has timed-out at least once
++ * having consumed at most 2/10 of
++ * its budget
++ */
++ BFQ_BFQQ_FLAG_constantly_seeky, /*
++ * bfqq has proved to be slow and
++ * seeky until budget timeout
++ */
++ BFQ_BFQQ_FLAG_softrt_update, /*
++ * may need softrt-next-start
++ * update
++ */
++ BFQ_BFQQ_FLAG_coop, /* bfqq is shared */
++ BFQ_BFQQ_FLAG_split_coop, /* shared bfqq will be split */
++ BFQ_BFQQ_FLAG_just_split, /* queue has just been split */
++};
++
++#define BFQ_BFQQ_FNS(name) \
++static inline void bfq_mark_bfqq_##name(struct bfq_queue *bfqq) \
++{ \
++ (bfqq)->flags |= (1 << BFQ_BFQQ_FLAG_##name); \
++} \
++static inline void bfq_clear_bfqq_##name(struct bfq_queue *bfqq) \
++{ \
++ (bfqq)->flags &= ~(1 << BFQ_BFQQ_FLAG_##name); \
++} \
++static inline int bfq_bfqq_##name(const struct bfq_queue *bfqq) \
++{ \
++ return ((bfqq)->flags & (1 << BFQ_BFQQ_FLAG_##name)) != 0; \
++}
++
++BFQ_BFQQ_FNS(busy);
++BFQ_BFQQ_FNS(wait_request);
++BFQ_BFQQ_FNS(must_alloc);
++BFQ_BFQQ_FNS(fifo_expire);
++BFQ_BFQQ_FNS(idle_window);
++BFQ_BFQQ_FNS(prio_changed);
++BFQ_BFQQ_FNS(sync);
++BFQ_BFQQ_FNS(budget_new);
++BFQ_BFQQ_FNS(IO_bound);
++BFQ_BFQQ_FNS(constantly_seeky);
++BFQ_BFQQ_FNS(coop);
++BFQ_BFQQ_FNS(split_coop);
++BFQ_BFQQ_FNS(just_split);
++BFQ_BFQQ_FNS(softrt_update);
++#undef BFQ_BFQQ_FNS
++
++/* Logging facilities. */
++#define bfq_log_bfqq(bfqd, bfqq, fmt, args...) \
++ blk_add_trace_msg((bfqd)->queue, "bfq%d " fmt, (bfqq)->pid, ##args)
++
++#define bfq_log(bfqd, fmt, args...) \
++ blk_add_trace_msg((bfqd)->queue, "bfq " fmt, ##args)
++
++/* Expiration reasons. */
++enum bfqq_expiration {
++ BFQ_BFQQ_TOO_IDLE = 0, /*
++ * queue has been idling for
++ * too long
++ */
++ BFQ_BFQQ_BUDGET_TIMEOUT, /* budget took too long to be used */
++ BFQ_BFQQ_BUDGET_EXHAUSTED, /* budget consumed */
++ BFQ_BFQQ_NO_MORE_REQUESTS, /* the queue has no more requests */
++};
++
++#ifdef CONFIG_CGROUP_BFQIO
++/**
++ * struct bfq_group - per (device, cgroup) data structure.
++ * @entity: schedulable entity to insert into the parent group sched_data.
++ * @sched_data: own sched_data, to contain child entities (they may be
++ * both bfq_queues and bfq_groups).
++ * @group_node: node to be inserted into the bfqio_cgroup->group_data
++ * list of the containing cgroup's bfqio_cgroup.
++ * @bfqd_node: node to be inserted into the @bfqd->group_list list
++ * of the groups active on the same device; used for cleanup.
++ * @bfqd: the bfq_data for the device this group acts upon.
++ * @async_bfqq: array of async queues for all the tasks belonging to
++ * the group, one queue per ioprio value per ioprio_class,
++ * except for the idle class that has only one queue.
++ * @async_idle_bfqq: async queue for the idle class (ioprio is ignored).
++ * @my_entity: pointer to @entity, %NULL for the toplevel group; used
++ * to avoid too many special cases during group creation/
++ * migration.
++ * @active_entities: number of active entities belonging to the group;
++ * unused for the root group. Used to know whether there
++ * are groups with more than one active @bfq_entity
++ * (see the comments to the function
++ * bfq_bfqq_must_not_expire()).
++ *
++ * Each (device, cgroup) pair has its own bfq_group, i.e., for each cgroup
++ * there is a set of bfq_groups, each one collecting the lower-level
++ * entities belonging to the group that are acting on the same device.
++ *
++ * Locking works as follows:
++ * o @group_node is protected by the bfqio_cgroup lock, and is accessed
++ * via RCU from its readers.
++ * o @bfqd is protected by the queue lock, RCU is used to access it
++ * from the readers.
++ * o All the other fields are protected by the @bfqd queue lock.
++ */
++struct bfq_group {
++ struct bfq_entity entity;
++ struct bfq_sched_data sched_data;
++
++ struct hlist_node group_node;
++ struct hlist_node bfqd_node;
++
++ void *bfqd;
++
++ struct bfq_queue *async_bfqq[2][IOPRIO_BE_NR];
++ struct bfq_queue *async_idle_bfqq;
++
++ struct bfq_entity *my_entity;
++
++ int active_entities;
++};
++
++/**
++ * struct bfqio_cgroup - bfq cgroup data structure.
++ * @css: subsystem state for bfq in the containing cgroup.
++ * @online: flag marked when the subsystem is inserted.
++ * @weight: cgroup weight.
++ * @ioprio: cgroup ioprio.
++ * @ioprio_class: cgroup ioprio_class.
++ * @lock: spinlock that protects @ioprio, @ioprio_class and @group_data.
++ * @group_data: list containing the bfq_group belonging to this cgroup.
++ *
++ * @group_data is accessed using RCU, with @lock protecting the updates,
++ * @ioprio and @ioprio_class are protected by @lock.
++ */
++struct bfqio_cgroup {
++ struct cgroup_subsys_state css;
++ bool online;
++
++ unsigned short weight, ioprio, ioprio_class;
++
++ spinlock_t lock;
++ struct hlist_head group_data;
++};
++#else
++struct bfq_group {
++ struct bfq_sched_data sched_data;
++
++ struct bfq_queue *async_bfqq[2][IOPRIO_BE_NR];
++ struct bfq_queue *async_idle_bfqq;
++};
++#endif
++
++static inline struct bfq_service_tree *
++bfq_entity_service_tree(struct bfq_entity *entity)
++{
++ struct bfq_sched_data *sched_data = entity->sched_data;
++ unsigned int idx = entity->ioprio_class - 1;
++
++ BUG_ON(idx >= BFQ_IOPRIO_CLASSES);
++ BUG_ON(sched_data == NULL);
++
++ return sched_data->service_tree + idx;
++}
++
++static inline struct bfq_queue *bic_to_bfqq(struct bfq_io_cq *bic,
++ int is_sync)
++{
++ return bic->bfqq[!!is_sync];
++}
++
++static inline void bic_set_bfqq(struct bfq_io_cq *bic,
++ struct bfq_queue *bfqq, int is_sync)
++{
++ bic->bfqq[!!is_sync] = bfqq;
++}
++
++static inline struct bfq_data *bic_to_bfqd(struct bfq_io_cq *bic)
++{
++ return bic->icq.q->elevator->elevator_data;
++}
++
++/**
++ * bfq_get_bfqd_locked - get a lock to a bfqd using a RCU protected pointer.
++ * @ptr: a pointer to a bfqd.
++ * @flags: storage for the flags to be saved.
++ *
++ * This function allows bfqg->bfqd to be protected by the
++ * queue lock of the bfqd they reference; the pointer is dereferenced
++ * under RCU, so the storage for bfqd is assured to be safe as long
++ * as the RCU read side critical section does not end. After the
++ * bfqd->queue->queue_lock is taken the pointer is rechecked, to be
++ * sure that no other writer accessed it. If we raced with a writer,
++ * the function returns NULL, with the queue unlocked, otherwise it
++ * returns the dereferenced pointer, with the queue locked.
++ */
++static inline struct bfq_data *bfq_get_bfqd_locked(void **ptr,
++ unsigned long *flags)
++{
++ struct bfq_data *bfqd;
++
++ rcu_read_lock();
++ bfqd = rcu_dereference(*(struct bfq_data **)ptr);
++
++ if (bfqd != NULL) {
++ spin_lock_irqsave(bfqd->queue->queue_lock, *flags);
++ if (*ptr == bfqd)
++ goto out;
++ spin_unlock_irqrestore(bfqd->queue->queue_lock, *flags);
++ }
++
++ bfqd = NULL;
++out:
++ rcu_read_unlock();
++ return bfqd;
++}
++
++static inline void bfq_put_bfqd_unlock(struct bfq_data *bfqd,
++ unsigned long *flags)
++{
++ spin_unlock_irqrestore(bfqd->queue->queue_lock, *flags);
++}
++
++static void bfq_changed_ioprio(struct bfq_io_cq *bic);
++static void bfq_put_queue(struct bfq_queue *bfqq);
++static void bfq_dispatch_insert(struct request_queue *q, struct request *rq);
++static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd,
++ struct bfq_group *bfqg, int is_sync,
++ struct bfq_io_cq *bic, gfp_t gfp_mask);
++static void bfq_end_wr_async_queues(struct bfq_data *bfqd,
++ struct bfq_group *bfqg);
++static void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg);
++static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq);
++
++#endif /* _BFQ_H */
+diff -Nur linux-3.14.36/block/bfq-ioc.c linux-openelec/block/bfq-ioc.c
+--- linux-3.14.36/block/bfq-ioc.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/block/bfq-ioc.c 2015-05-06 12:05:43.000000000 -0500
+@@ -0,0 +1,36 @@
++/*
++ * BFQ: I/O context handling.
++ *
++ * Based on ideas and code from CFQ:
++ * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
++ *
++ * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
++ * Paolo Valente <paolo.valente@unimore.it>
++ *
++ * Copyright (C) 2010 Paolo Valente <paolo.valente@unimore.it>
++ */
++
++/**
++ * icq_to_bic - convert iocontext queue structure to bfq_io_cq.
++ * @icq: the iocontext queue.
++ */
++static inline struct bfq_io_cq *icq_to_bic(struct io_cq *icq)
++{
++ /* bic->icq is the first member, %NULL will convert to %NULL */
++ return container_of(icq, struct bfq_io_cq, icq);
++}
++
++/**
++ * bfq_bic_lookup - search into @ioc a bic associated to @bfqd.
++ * @bfqd: the lookup key.
++ * @ioc: the io_context of the process doing I/O.
++ *
++ * Queue lock must be held.
++ */
++static inline struct bfq_io_cq *bfq_bic_lookup(struct bfq_data *bfqd,
++ struct io_context *ioc)
++{
++ if (ioc)
++ return icq_to_bic(ioc_lookup_icq(ioc, bfqd->queue));
++ return NULL;
++}
+diff -Nur linux-3.14.36/block/bfq-iosched.c linux-openelec/block/bfq-iosched.c
+--- linux-3.14.36/block/bfq-iosched.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/block/bfq-iosched.c 2015-05-06 12:05:43.000000000 -0500
+@@ -0,0 +1,3919 @@
++/*
++ * Budget Fair Queueing (BFQ) disk scheduler.
++ *
++ * Based on ideas and code from CFQ:
++ * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
++ *
++ * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
++ * Paolo Valente <paolo.valente@unimore.it>
++ *
++ * Copyright (C) 2010 Paolo Valente <paolo.valente@unimore.it>
++ *
++ * Licensed under the GPL-2 as detailed in the accompanying COPYING.BFQ
++ * file.
++ *
++ * BFQ is a proportional-share storage-I/O scheduling algorithm based on
++ * the slice-by-slice service scheme of CFQ. But BFQ assigns budgets,
++ * measured in number of sectors, to processes instead of time slices. The
++ * device is not granted to the in-service process for a given time slice,
++ * but until it has exhausted its assigned budget. This change from the time
++ * to the service domain allows BFQ to distribute the device throughput
++ * among processes as desired, without any distortion due to ZBR, workload
++ * fluctuations or other factors. BFQ uses an ad hoc internal scheduler,
++ * called B-WF2Q+, to schedule processes according to their budgets. More
++ * precisely, BFQ schedules queues associated to processes. Thanks to the
++ * accurate policy of B-WF2Q+, BFQ can afford to assign high budgets to
++ * I/O-bound processes issuing sequential requests (to boost the
++ * throughput), and yet guarantee a low latency to interactive and soft
++ * real-time applications.
++ *
++ * BFQ is described in [1], where also a reference to the initial, more
++ * theoretical paper on BFQ can be found. The interested reader can find
++ * in the latter paper full details on the main algorithm, as well as
++ * formulas of the guarantees and formal proofs of all the properties.
++ * With respect to the version of BFQ presented in these papers, this
++ * implementation adds a few more heuristics, such as the one that
++ * guarantees a low latency to soft real-time applications, and a
++ * hierarchical extension based on H-WF2Q+.
++ *
++ * B-WF2Q+ is based on WF2Q+, that is described in [2], together with
++ * H-WF2Q+, while the augmented tree used to implement B-WF2Q+ with O(log N)
++ * complexity derives from the one introduced with EEVDF in [3].
++ *
++ * [1] P. Valente and M. Andreolini, ``Improving Application Responsiveness
++ * with the BFQ Disk I/O Scheduler'',
++ * Proceedings of the 5th Annual International Systems and Storage
++ * Conference (SYSTOR '12), June 2012.
++ *
++ * http://algogroup.unimo.it/people/paolo/disk_sched/bf1-v1-suite-results.pdf
++ *
++ * [2] Jon C.R. Bennett and H. Zhang, ``Hierarchical Packet Fair Queueing
++ * Algorithms,'' IEEE/ACM Transactions on Networking, 5(5):675-689,
++ * Oct 1997.
++ *
++ * http://www.cs.cmu.edu/~hzhang/papers/TON-97-Oct.ps.gz
++ *
++ * [3] I. Stoica and H. Abdel-Wahab, ``Earliest Eligible Virtual Deadline
++ * First: A Flexible and Accurate Mechanism for Proportional Share
++ * Resource Allocation,'' technical report.
++ *
++ * http://www.cs.berkeley.edu/~istoica/papers/eevdf-tr-95.pdf
++ */
++#include <linux/module.h>
++#include <linux/slab.h>
++#include <linux/blkdev.h>
++#include <linux/cgroup.h>
++#include <linux/elevator.h>
++#include <linux/jiffies.h>
++#include <linux/rbtree.h>
++#include <linux/ioprio.h>
++#include "bfq.h"
++#include "blk.h"
++
++/* Max number of dispatches in one round of service. */
++static const int bfq_quantum = 4;
++
++/* Expiration time of sync (0) and async (1) requests, in jiffies. */
++static const int bfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
++
++/* Maximum backwards seek, in KiB. */
++static const int bfq_back_max = 16 * 1024;
++
++/* Penalty of a backwards seek, in number of sectors. */
++static const int bfq_back_penalty = 2;
++
++/* Idling period duration, in jiffies. */
++static int bfq_slice_idle = HZ / 125;
++
++/* Default maximum budget values, in sectors and number of requests. */
++static const int bfq_default_max_budget = 16 * 1024;
++static const int bfq_max_budget_async_rq = 4;
++
++/*
++ * Async to sync throughput distribution is controlled as follows:
++ * when an async request is served, the entity is charged the number
++ * of sectors of the request, multiplied by the factor below
++ */
++static const int bfq_async_charge_factor = 10;
++
++/* Default timeout values, in jiffies, approximating CFQ defaults. */
++static const int bfq_timeout_sync = HZ / 8;
++static int bfq_timeout_async = HZ / 25;
++
++struct kmem_cache *bfq_pool;
++
++/* Below this threshold (in ms), we consider thinktime immediate. */
++#define BFQ_MIN_TT 2
++
++/* hw_tag detection: parallel requests threshold and min samples needed. */
++#define BFQ_HW_QUEUE_THRESHOLD 4
++#define BFQ_HW_QUEUE_SAMPLES 32
++
++#define BFQQ_SEEK_THR (sector_t)(8 * 1024)
++#define BFQQ_SEEKY(bfqq) ((bfqq)->seek_mean > BFQQ_SEEK_THR)
++
++/* Min samples used for peak rate estimation (for autotuning). */
++#define BFQ_PEAK_RATE_SAMPLES 32
++
++/* Shift used for peak rate fixed precision calculations. */
++#define BFQ_RATE_SHIFT 16
++
++/*
++ * By default, BFQ computes the duration of the weight raising for
++ * interactive applications automatically, using the following formula:
++ * duration = (R / r) * T, where r is the peak rate of the device, and
++ * R and T are two reference parameters.
++ * In particular, R is the peak rate of the reference device (see below),
++ * and T is a reference time: given the systems that are likely to be
++ * installed on the reference device according to its speed class, T is
++ * about the maximum time needed, under BFQ and while reading two files in
++ * parallel, to load typical large applications on these systems.
++ * In practice, the slower/faster the device at hand is, the more/less it
++ * takes to load applications with respect to the reference device.
++ * Accordingly, the longer/shorter BFQ grants weight raising to interactive
++ * applications.
++ *
++ * BFQ uses four different reference pairs (R, T), depending on:
++ * . whether the device is rotational or non-rotational;
++ * . whether the device is slow, such as old or portable HDDs, as well as
++ * SD cards, or fast, such as newer HDDs and SSDs.
++ *
++ * The device's speed class is dynamically (re)detected in
++ * bfq_update_peak_rate() every time the estimated peak rate is updated.
++ *
++ * In the following definitions, R_slow[0]/R_fast[0] and T_slow[0]/T_fast[0]
++ * are the reference values for a slow/fast rotational device, whereas
++ * R_slow[1]/R_fast[1] and T_slow[1]/T_fast[1] are the reference values for
++ * a slow/fast non-rotational device. Finally, device_speed_thresh are the
++ * thresholds used to switch between speed classes.
++ * Both the reference peak rates and the thresholds are measured in
++ * sectors/usec, left-shifted by BFQ_RATE_SHIFT.
++ */
++static int R_slow[2] = {1536, 10752};
++static int R_fast[2] = {17415, 34791};
++/*
++ * To improve readability, a conversion function is used to initialize the
++ * following arrays, which entails that they can be initialized only in a
++ * function.
++ */
++static int T_slow[2];
++static int T_fast[2];
++static int device_speed_thresh[2];
++
++#define BFQ_SERVICE_TREE_INIT ((struct bfq_service_tree) \
++ { RB_ROOT, RB_ROOT, NULL, NULL, 0, 0 })
++
++#define RQ_BIC(rq) ((struct bfq_io_cq *) (rq)->elv.priv[0])
++#define RQ_BFQQ(rq) ((rq)->elv.priv[1])
++
++static inline void bfq_schedule_dispatch(struct bfq_data *bfqd);
++
++#include "bfq-ioc.c"
++#include "bfq-sched.c"
++#include "bfq-cgroup.c"
++
++#define bfq_class_idle(bfqq) ((bfqq)->entity.ioprio_class ==\
++ IOPRIO_CLASS_IDLE)
++#define bfq_class_rt(bfqq) ((bfqq)->entity.ioprio_class ==\
++ IOPRIO_CLASS_RT)
++
++#define bfq_sample_valid(samples) ((samples) > 80)
++
++/*
++ * We regard a request as SYNC, if either it's a read or has the SYNC bit
++ * set (in which case it could also be a direct WRITE).
++ */
++static inline int bfq_bio_sync(struct bio *bio)
++{
++ if (bio_data_dir(bio) == READ || (bio->bi_rw & REQ_SYNC))
++ return 1;
++
++ return 0;
++}
++
++/*
++ * Scheduler run of queue, if there are requests pending and no one in the
++ * driver that will restart queueing.
++ */
++static inline void bfq_schedule_dispatch(struct bfq_data *bfqd)
++{
++ if (bfqd->queued != 0) {
++ bfq_log(bfqd, "schedule dispatch");
++ kblockd_schedule_work(bfqd->queue, &bfqd->unplug_work);
++ }
++}
++
++/*
++ * Lifted from AS - choose which of rq1 and rq2 that is best served now.
++ * We choose the request that is closesr to the head right now. Distance
++ * behind the head is penalized and only allowed to a certain extent.
++ */
++static struct request *bfq_choose_req(struct bfq_data *bfqd,
++ struct request *rq1,
++ struct request *rq2,
++ sector_t last)
++{
++ sector_t s1, s2, d1 = 0, d2 = 0;
++ unsigned long back_max;
++#define BFQ_RQ1_WRAP 0x01 /* request 1 wraps */
++#define BFQ_RQ2_WRAP 0x02 /* request 2 wraps */
++ unsigned wrap = 0; /* bit mask: requests behind the disk head? */
++
++ if (rq1 == NULL || rq1 == rq2)
++ return rq2;
++ if (rq2 == NULL)
++ return rq1;
++
++ if (rq_is_sync(rq1) && !rq_is_sync(rq2))
++ return rq1;
++ else if (rq_is_sync(rq2) && !rq_is_sync(rq1))
++ return rq2;
++ if ((rq1->cmd_flags & REQ_META) && !(rq2->cmd_flags & REQ_META))
++ return rq1;
++ else if ((rq2->cmd_flags & REQ_META) && !(rq1->cmd_flags & REQ_META))
++ return rq2;
++
++ s1 = blk_rq_pos(rq1);
++ s2 = blk_rq_pos(rq2);
++
++ /*
++ * By definition, 1KiB is 2 sectors.
++ */
++ back_max = bfqd->bfq_back_max * 2;
++
++ /*
++ * Strict one way elevator _except_ in the case where we allow
++ * short backward seeks which are biased as twice the cost of a
++ * similar forward seek.
++ */
++ if (s1 >= last)
++ d1 = s1 - last;
++ else if (s1 + back_max >= last)
++ d1 = (last - s1) * bfqd->bfq_back_penalty;
++ else
++ wrap |= BFQ_RQ1_WRAP;
++
++ if (s2 >= last)
++ d2 = s2 - last;
++ else if (s2 + back_max >= last)
++ d2 = (last - s2) * bfqd->bfq_back_penalty;
++ else
++ wrap |= BFQ_RQ2_WRAP;
++
++ /* Found required data */
++
++ /*
++ * By doing switch() on the bit mask "wrap" we avoid having to
++ * check two variables for all permutations: --> faster!
++ */
++ switch (wrap) {
++ case 0: /* common case for CFQ: rq1 and rq2 not wrapped */
++ if (d1 < d2)
++ return rq1;
++ else if (d2 < d1)
++ return rq2;
++ else {
++ if (s1 >= s2)
++ return rq1;
++ else
++ return rq2;
++ }
++
++ case BFQ_RQ2_WRAP:
++ return rq1;
++ case BFQ_RQ1_WRAP:
++ return rq2;
++ case (BFQ_RQ1_WRAP|BFQ_RQ2_WRAP): /* both rqs wrapped */
++ default:
++ /*
++ * Since both rqs are wrapped,
++ * start with the one that's further behind head
++ * (--> only *one* back seek required),
++ * since back seek takes more time than forward.
++ */
++ if (s1 <= s2)
++ return rq1;
++ else
++ return rq2;
++ }
++}
++
++static struct bfq_queue *
++bfq_rq_pos_tree_lookup(struct bfq_data *bfqd, struct rb_root *root,
++ sector_t sector, struct rb_node **ret_parent,
++ struct rb_node ***rb_link)
++{
++ struct rb_node **p, *parent;
++ struct bfq_queue *bfqq = NULL;
++
++ parent = NULL;
++ p = &root->rb_node;
++ while (*p) {
++ struct rb_node **n;
++
++ parent = *p;
++ bfqq = rb_entry(parent, struct bfq_queue, pos_node);
++
++ /*
++ * Sort strictly based on sector. Smallest to the left,
++ * largest to the right.
++ */
++ if (sector > blk_rq_pos(bfqq->next_rq))
++ n = &(*p)->rb_right;
++ else if (sector < blk_rq_pos(bfqq->next_rq))
++ n = &(*p)->rb_left;
++ else
++ break;
++ p = n;
++ bfqq = NULL;
++ }
++
++ *ret_parent = parent;
++ if (rb_link)
++ *rb_link = p;
++
++ bfq_log(bfqd, "rq_pos_tree_lookup %llu: returning %d",
++ (long long unsigned)sector,
++ bfqq != NULL ? bfqq->pid : 0);
++
++ return bfqq;
++}
++
++static void bfq_rq_pos_tree_add(struct bfq_data *bfqd, struct bfq_queue *bfqq)
++{
++ struct rb_node **p, *parent;
++ struct bfq_queue *__bfqq;
++
++ if (bfqq->pos_root != NULL) {
++ rb_erase(&bfqq->pos_node, bfqq->pos_root);
++ bfqq->pos_root = NULL;
++ }
++
++ if (bfq_class_idle(bfqq))
++ return;
++ if (!bfqq->next_rq)
++ return;
++
++ bfqq->pos_root = &bfqd->rq_pos_tree;
++ __bfqq = bfq_rq_pos_tree_lookup(bfqd, bfqq->pos_root,
++ blk_rq_pos(bfqq->next_rq), &parent, &p);
++ if (__bfqq == NULL) {
++ rb_link_node(&bfqq->pos_node, parent, p);
++ rb_insert_color(&bfqq->pos_node, bfqq->pos_root);
++ } else
++ bfqq->pos_root = NULL;
++}
++
++/*
++ * Tell whether there are active queues or groups with differentiated weights.
++ */
++static inline bool bfq_differentiated_weights(struct bfq_data *bfqd)
++{
++ BUG_ON(!bfqd->hw_tag);
++ /*
++ * For weights to differ, at least one of the trees must contain
++ * at least two nodes.
++ */
++ return (!RB_EMPTY_ROOT(&bfqd->queue_weights_tree) &&
++ (bfqd->queue_weights_tree.rb_node->rb_left ||
++ bfqd->queue_weights_tree.rb_node->rb_right)
++#ifdef CONFIG_CGROUP_BFQIO
++ ) ||
++ (!RB_EMPTY_ROOT(&bfqd->group_weights_tree) &&
++ (bfqd->group_weights_tree.rb_node->rb_left ||
++ bfqd->group_weights_tree.rb_node->rb_right)
++#endif
++ );
++}
++
++/*
++ * If the weight-counter tree passed as input contains no counter for
++ * the weight of the input entity, then add that counter; otherwise just
++ * increment the existing counter.
++ *
++ * Note that weight-counter trees contain few nodes in mostly symmetric
++ * scenarios. For example, if all queues have the same weight, then the
++ * weight-counter tree for the queues may contain at most one node.
++ * This holds even if low_latency is on, because weight-raised queues
++ * are not inserted in the tree.
++ * In most scenarios, the rate at which nodes are created/destroyed
++ * should be low too.
++ */
++static void bfq_weights_tree_add(struct bfq_data *bfqd,
++ struct bfq_entity *entity,
++ struct rb_root *root)
++{
++ struct rb_node **new = &(root->rb_node), *parent = NULL;
++
++ /*
++ * Do not insert if:
++ * - the device does not support queueing;
++ * - the entity is already associated with a counter, which happens if:
++ * 1) the entity is associated with a queue, 2) a request arrival
++ * has caused the queue to become both non-weight-raised, and hence
++ * change its weight, and backlogged; in this respect, each
++ * of the two events causes an invocation of this function,
++ * 3) this is the invocation of this function caused by the second
++ * event. This second invocation is actually useless, and we handle
++ * this fact by exiting immediately. More efficient or clearer
++ * solutions might possibly be adopted.
++ */
++ if (!bfqd->hw_tag || entity->weight_counter)
++ return;
++
++ while (*new) {
++ struct bfq_weight_counter *__counter = container_of(*new,
++ struct bfq_weight_counter,
++ weights_node);
++ parent = *new;
++
++ if (entity->weight == __counter->weight) {
++ entity->weight_counter = __counter;
++ goto inc_counter;
++ }
++ if (entity->weight < __counter->weight)
++ new = &((*new)->rb_left);
++ else
++ new = &((*new)->rb_right);
++ }
++
++ entity->weight_counter = kzalloc(sizeof(struct bfq_weight_counter),
++ GFP_ATOMIC);
++ entity->weight_counter->weight = entity->weight;
++ rb_link_node(&entity->weight_counter->weights_node, parent, new);
++ rb_insert_color(&entity->weight_counter->weights_node, root);
++
++inc_counter:
++ entity->weight_counter->num_active++;
++}
++
++/*
++ * Decrement the weight counter associated with the entity, and, if the
++ * counter reaches 0, remove the counter from the tree.
++ * See the comments to the function bfq_weights_tree_add() for considerations
++ * about overhead.
++ */
++static void bfq_weights_tree_remove(struct bfq_data *bfqd,
++ struct bfq_entity *entity,
++ struct rb_root *root)
++{
++ /*
++ * Check whether the entity is actually associated with a counter.
++ * In fact, the device may not be considered NCQ-capable for a while,
++ * which implies that no insertion in the weight trees is performed,
++ * after which the device may start to be deemed NCQ-capable, and hence
++ * this function may start to be invoked. This may cause the function
++ * to be invoked for entities that are not associated with any counter.
++ */
++ if (!entity->weight_counter)
++ return;
++
++ BUG_ON(RB_EMPTY_ROOT(root));
++ BUG_ON(entity->weight_counter->weight != entity->weight);
++
++ BUG_ON(!entity->weight_counter->num_active);
++ entity->weight_counter->num_active--;
++ if (entity->weight_counter->num_active > 0)
++ goto reset_entity_pointer;
++
++ rb_erase(&entity->weight_counter->weights_node, root);
++ kfree(entity->weight_counter);
++
++reset_entity_pointer:
++ entity->weight_counter = NULL;
++}
++
++static struct request *bfq_find_next_rq(struct bfq_data *bfqd,
++ struct bfq_queue *bfqq,
++ struct request *last)
++{
++ struct rb_node *rbnext = rb_next(&last->rb_node);
++ struct rb_node *rbprev = rb_prev(&last->rb_node);
++ struct request *next = NULL, *prev = NULL;
++
++ BUG_ON(RB_EMPTY_NODE(&last->rb_node));
++
++ if (rbprev != NULL)
++ prev = rb_entry_rq(rbprev);
++
++ if (rbnext != NULL)
++ next = rb_entry_rq(rbnext);
++ else {
++ rbnext = rb_first(&bfqq->sort_list);
++ if (rbnext && rbnext != &last->rb_node)
++ next = rb_entry_rq(rbnext);
++ }
++
++ return bfq_choose_req(bfqd, next, prev, blk_rq_pos(last));
++}
++
++/* see the definition of bfq_async_charge_factor for details */
++static inline unsigned long bfq_serv_to_charge(struct request *rq,
++ struct bfq_queue *bfqq)
++{
++ return blk_rq_sectors(rq) *
++ (1 + ((!bfq_bfqq_sync(bfqq)) * (bfqq->wr_coeff == 1) *
++ bfq_async_charge_factor));
++}
++
++/**
++ * bfq_updated_next_req - update the queue after a new next_rq selection.
++ * @bfqd: the device data the queue belongs to.
++ * @bfqq: the queue to update.
++ *
++ * If the first request of a queue changes we make sure that the queue
++ * has enough budget to serve at least its first request (if the
++ * request has grown). We do this because if the queue has not enough
++ * budget for its first request, it has to go through two dispatch
++ * rounds to actually get it dispatched.
++ */
++static void bfq_updated_next_req(struct bfq_data *bfqd,
++ struct bfq_queue *bfqq)
++{
++ struct bfq_entity *entity = &bfqq->entity;
++ struct bfq_service_tree *st = bfq_entity_service_tree(entity);
++ struct request *next_rq = bfqq->next_rq;
++ unsigned long new_budget;
++
++ if (next_rq == NULL)
++ return;
++
++ if (bfqq == bfqd->in_service_queue)
++ /*
++ * In order not to break guarantees, budgets cannot be
++ * changed after an entity has been selected.
++ */
++ return;
++
++ BUG_ON(entity->tree != &st->active);
++ BUG_ON(entity == entity->sched_data->in_service_entity);
++
++ new_budget = max_t(unsigned long, bfqq->max_budget,
++ bfq_serv_to_charge(next_rq, bfqq));
++ if (entity->budget != new_budget) {
++ entity->budget = new_budget;
++ bfq_log_bfqq(bfqd, bfqq, "updated next rq: new budget %lu",
++ new_budget);
++ bfq_activate_bfqq(bfqd, bfqq);
++ }
++}
++
++static inline unsigned int bfq_wr_duration(struct bfq_data *bfqd)
++{
++ u64 dur;
++
++ if (bfqd->bfq_wr_max_time > 0)
++ return bfqd->bfq_wr_max_time;
++
++ dur = bfqd->RT_prod;
++ do_div(dur, bfqd->peak_rate);
++
++ return dur;
++}
++
++static inline unsigned
++bfq_bfqq_cooperations(struct bfq_queue *bfqq)
++{
++ return bfqq->bic ? bfqq->bic->cooperations : 0;
++}
++
++static inline void
++bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_io_cq *bic)
++{
++ if (bic->saved_idle_window)
++ bfq_mark_bfqq_idle_window(bfqq);
++ else
++ bfq_clear_bfqq_idle_window(bfqq);
++ if (bic->saved_IO_bound)
++ bfq_mark_bfqq_IO_bound(bfqq);
++ else
++ bfq_clear_bfqq_IO_bound(bfqq);
++ if (bic->wr_time_left && bfqq->bfqd->low_latency &&
++ bic->cooperations < bfqq->bfqd->bfq_coop_thresh) {
++ /*
++ * Start a weight raising period with the duration given by
++ * the raising_time_left snapshot.
++ */
++ if (bfq_bfqq_busy(bfqq))
++ bfqq->bfqd->wr_busy_queues++;
++ bfqq->wr_coeff = bfqq->bfqd->bfq_wr_coeff;
++ bfqq->wr_cur_max_time = bic->wr_time_left;
++ bfqq->last_wr_start_finish = jiffies;
++ bfqq->entity.ioprio_changed = 1;
++ }
++ /*
++ * Clear wr_time_left to prevent bfq_bfqq_save_state() from
++ * getting confused about the queue's need of a weight-raising
++ * period.
++ */
++ bic->wr_time_left = 0;
++}
++
++/*
++ * Must be called with the queue_lock held.
++ */
++static int bfqq_process_refs(struct bfq_queue *bfqq)
++{
++ int process_refs, io_refs;
++
++ io_refs = bfqq->allocated[READ] + bfqq->allocated[WRITE];
++ process_refs = atomic_read(&bfqq->ref) - io_refs - bfqq->entity.on_st;
++ BUG_ON(process_refs < 0);
++ return process_refs;
++}
++
++static void bfq_add_request(struct request *rq)
++{
++ struct bfq_queue *bfqq = RQ_BFQQ(rq);
++ struct bfq_entity *entity = &bfqq->entity;
++ struct bfq_data *bfqd = bfqq->bfqd;
++ struct request *next_rq, *prev;
++ unsigned long old_wr_coeff = bfqq->wr_coeff;
++ int idle_for_long_time = 0;
++
++ bfq_log_bfqq(bfqd, bfqq, "add_request %d", rq_is_sync(rq));
++ bfqq->queued[rq_is_sync(rq)]++;
++ bfqd->queued++;
++
++ elv_rb_add(&bfqq->sort_list, rq);
++
++ /*
++ * Check if this request is a better next-serve candidate.
++ */
++ prev = bfqq->next_rq;
++ next_rq = bfq_choose_req(bfqd, bfqq->next_rq, rq, bfqd->last_position);
++ BUG_ON(next_rq == NULL);
++ bfqq->next_rq = next_rq;
++
++ /*
++ * Adjust priority tree position, if next_rq changes.
++ */
++ if (prev != bfqq->next_rq)
++ bfq_rq_pos_tree_add(bfqd, bfqq);
++
++ if (!bfq_bfqq_busy(bfqq)) {
++ int soft_rt = bfqd->bfq_wr_max_softrt_rate > 0 &&
++ bfq_bfqq_cooperations(bfqq) < bfqd->bfq_coop_thresh &&
++ time_is_before_jiffies(bfqq->soft_rt_next_start);
++ idle_for_long_time = bfq_bfqq_cooperations(bfqq) <
++ bfqd->bfq_coop_thresh &&
++ time_is_before_jiffies(
++ bfqq->budget_timeout +
++ bfqd->bfq_wr_min_idle_time);
++ entity->budget = max_t(unsigned long, bfqq->max_budget,
++ bfq_serv_to_charge(next_rq, bfqq));
++
++ if (!bfq_bfqq_IO_bound(bfqq)) {
++ if (time_before(jiffies,
++ RQ_BIC(rq)->ttime.last_end_request +
++ bfqd->bfq_slice_idle)) {
++ bfqq->requests_within_timer++;
++ if (bfqq->requests_within_timer >=
++ bfqd->bfq_requests_within_timer)
++ bfq_mark_bfqq_IO_bound(bfqq);
++ } else
++ bfqq->requests_within_timer = 0;
++ }
++
++ if (!bfqd->low_latency)
++ goto add_bfqq_busy;
++
++ if (bfq_bfqq_just_split(bfqq))
++ goto set_ioprio_changed;
++
++ /*
++ * If the queue:
++ * - is not being boosted,
++ * - has been idle for enough time,
++ * - is not a sync queue or is linked to a bfq_io_cq (it is
++ * shared "for its nature" or it is not shared and its
++ * requests have not been redirected to a shared queue)
++ * start a weight-raising period.
++ */
++ if (old_wr_coeff == 1 && (idle_for_long_time || soft_rt) &&
++ (!bfq_bfqq_sync(bfqq) || bfqq->bic != NULL)) {
++ bfqq->wr_coeff = bfqd->bfq_wr_coeff;
++ if (idle_for_long_time)
++ bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
++ else
++ bfqq->wr_cur_max_time =
++ bfqd->bfq_wr_rt_max_time;
++ bfq_log_bfqq(bfqd, bfqq,
++ "wrais starting at %lu, rais_max_time %u",
++ jiffies,
++ jiffies_to_msecs(bfqq->wr_cur_max_time));
++ } else if (old_wr_coeff > 1) {
++ if (idle_for_long_time)
++ bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
++ else if (bfq_bfqq_cooperations(bfqq) >=
++ bfqd->bfq_coop_thresh ||
++ (bfqq->wr_cur_max_time ==
++ bfqd->bfq_wr_rt_max_time &&
++ !soft_rt)) {
++ bfqq->wr_coeff = 1;
++ bfq_log_bfqq(bfqd, bfqq,
++ "wrais ending at %lu, rais_max_time %u",
++ jiffies,
++ jiffies_to_msecs(bfqq->
++ wr_cur_max_time));
++ } else if (time_before(
++ bfqq->last_wr_start_finish +
++ bfqq->wr_cur_max_time,
++ jiffies +
++ bfqd->bfq_wr_rt_max_time) &&
++ soft_rt) {
++ /*
++ *
++ * The remaining weight-raising time is lower
++ * than bfqd->bfq_wr_rt_max_time, which means
++ * that the application is enjoying weight
++ * raising either because deemed soft-rt in
++ * the near past, or because deemed interactive
++ * a long ago.
++ * In both cases, resetting now the current
++ * remaining weight-raising time for the
++ * application to the weight-raising duration
++ * for soft rt applications would not cause any
++ * latency increase for the application (as the
++ * new duration would be higher than the
++ * remaining time).
++ *
++ * In addition, the application is now meeting
++ * the requirements for being deemed soft rt.
++ * In the end we can correctly and safely
++ * (re)charge the weight-raising duration for
++ * the application with the weight-raising
++ * duration for soft rt applications.
++ *
++ * In particular, doing this recharge now, i.e.,
++ * before the weight-raising period for the
++ * application finishes, reduces the probability
++ * of the following negative scenario:
++ * 1) the weight of a soft rt application is
++ * raised at startup (as for any newly
++ * created application),
++ * 2) since the application is not interactive,
++ * at a certain time weight-raising is
++ * stopped for the application,
++ * 3) at that time the application happens to
++ * still have pending requests, and hence
++ * is destined to not have a chance to be
++ * deemed soft rt before these requests are
++ * completed (see the comments to the
++ * function bfq_bfqq_softrt_next_start()
++ * for details on soft rt detection),
++ * 4) these pending requests experience a high
++ * latency because the application is not
++ * weight-raised while they are pending.
++ */
++ bfqq->last_wr_start_finish = jiffies;
++ bfqq->wr_cur_max_time =
++ bfqd->bfq_wr_rt_max_time;
++ }
++ }
++set_ioprio_changed:
++ if (old_wr_coeff != bfqq->wr_coeff)
++ entity->ioprio_changed = 1;
++add_bfqq_busy:
++ bfqq->last_idle_bklogged = jiffies;
++ bfqq->service_from_backlogged = 0;
++ bfq_clear_bfqq_softrt_update(bfqq);
++ bfq_add_bfqq_busy(bfqd, bfqq);
++ } else {
++ if (bfqd->low_latency && old_wr_coeff == 1 && !rq_is_sync(rq) &&
++ time_is_before_jiffies(
++ bfqq->last_wr_start_finish +
++ bfqd->bfq_wr_min_inter_arr_async)) {
++ bfqq->wr_coeff = bfqd->bfq_wr_coeff;
++ bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
++
++ bfqd->wr_busy_queues++;
++ entity->ioprio_changed = 1;
++ bfq_log_bfqq(bfqd, bfqq,
++ "non-idle wrais starting at %lu, rais_max_time %u",
++ jiffies,
++ jiffies_to_msecs(bfqq->wr_cur_max_time));
++ }
++ if (prev != bfqq->next_rq)
++ bfq_updated_next_req(bfqd, bfqq);
++ }
++
++ if (bfqd->low_latency &&
++ (old_wr_coeff == 1 || bfqq->wr_coeff == 1 ||
++ idle_for_long_time))
++ bfqq->last_wr_start_finish = jiffies;
++}
++
++static struct request *bfq_find_rq_fmerge(struct bfq_data *bfqd,
++ struct bio *bio)
++{
++ struct task_struct *tsk = current;
++ struct bfq_io_cq *bic;
++ struct bfq_queue *bfqq;
++
++ bic = bfq_bic_lookup(bfqd, tsk->io_context);
++ if (bic == NULL)
++ return NULL;
++
++ bfqq = bic_to_bfqq(bic, bfq_bio_sync(bio));
++ if (bfqq != NULL)
++ return elv_rb_find(&bfqq->sort_list, bio_end_sector(bio));
++
++ return NULL;
++}
++
++static void bfq_activate_request(struct request_queue *q, struct request *rq)
++{
++ struct bfq_data *bfqd = q->elevator->elevator_data;
++
++ bfqd->rq_in_driver++;
++ bfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
++ bfq_log(bfqd, "activate_request: new bfqd->last_position %llu",
++ (long long unsigned)bfqd->last_position);
++}
++
++static inline void bfq_deactivate_request(struct request_queue *q,
++ struct request *rq)
++{
++ struct bfq_data *bfqd = q->elevator->elevator_data;
++
++ BUG_ON(bfqd->rq_in_driver == 0);
++ bfqd->rq_in_driver--;
++}
++
++static void bfq_remove_request(struct request *rq)
++{
++ struct bfq_queue *bfqq = RQ_BFQQ(rq);
++ struct bfq_data *bfqd = bfqq->bfqd;
++ const int sync = rq_is_sync(rq);
++
++ if (bfqq->next_rq == rq) {
++ bfqq->next_rq = bfq_find_next_rq(bfqd, bfqq, rq);
++ bfq_updated_next_req(bfqd, bfqq);
++ }
++
++ list_del_init(&rq->queuelist);
++ BUG_ON(bfqq->queued[sync] == 0);
++ bfqq->queued[sync]--;
++ bfqd->queued--;
++ elv_rb_del(&bfqq->sort_list, rq);
++
++ if (RB_EMPTY_ROOT(&bfqq->sort_list)) {
++ if (bfq_bfqq_busy(bfqq) && bfqq != bfqd->in_service_queue)
++ bfq_del_bfqq_busy(bfqd, bfqq, 1);
++ /*
++ * Remove queue from request-position tree as it is empty.
++ */
++ if (bfqq->pos_root != NULL) {
++ rb_erase(&bfqq->pos_node, bfqq->pos_root);
++ bfqq->pos_root = NULL;
++ }
++ }
++
++ if (rq->cmd_flags & REQ_META) {
++ BUG_ON(bfqq->meta_pending == 0);
++ bfqq->meta_pending--;
++ }
++}
++
++static int bfq_merge(struct request_queue *q, struct request **req,
++ struct bio *bio)
++{
++ struct bfq_data *bfqd = q->elevator->elevator_data;
++ struct request *__rq;
++
++ __rq = bfq_find_rq_fmerge(bfqd, bio);
++ if (__rq != NULL && elv_rq_merge_ok(__rq, bio)) {
++ *req = __rq;
++ return ELEVATOR_FRONT_MERGE;
++ }
++
++ return ELEVATOR_NO_MERGE;
++}
++
++static void bfq_merged_request(struct request_queue *q, struct request *req,
++ int type)
++{
++ if (type == ELEVATOR_FRONT_MERGE &&
++ rb_prev(&req->rb_node) &&
++ blk_rq_pos(req) <
++ blk_rq_pos(container_of(rb_prev(&req->rb_node),
++ struct request, rb_node))) {
++ struct bfq_queue *bfqq = RQ_BFQQ(req);
++ struct bfq_data *bfqd = bfqq->bfqd;
++ struct request *prev, *next_rq;
++
++ /* Reposition request in its sort_list */
++ elv_rb_del(&bfqq->sort_list, req);
++ elv_rb_add(&bfqq->sort_list, req);
++ /* Choose next request to be served for bfqq */
++ prev = bfqq->next_rq;
++ next_rq = bfq_choose_req(bfqd, bfqq->next_rq, req,
++ bfqd->last_position);
++ BUG_ON(next_rq == NULL);
++ bfqq->next_rq = next_rq;
++ /*
++ * If next_rq changes, update both the queue's budget to
++ * fit the new request and the queue's position in its
++ * rq_pos_tree.
++ */
++ if (prev != bfqq->next_rq) {
++ bfq_updated_next_req(bfqd, bfqq);
++ bfq_rq_pos_tree_add(bfqd, bfqq);
++ }
++ }
++}
++
++static void bfq_merged_requests(struct request_queue *q, struct request *rq,
++ struct request *next)
++{
++ struct bfq_queue *bfqq = RQ_BFQQ(rq);
++
++ /*
++ * Reposition in fifo if next is older than rq.
++ */
++ if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
++ time_before(rq_fifo_time(next), rq_fifo_time(rq))) {
++ list_move(&rq->queuelist, &next->queuelist);
++ rq_set_fifo_time(rq, rq_fifo_time(next));
++ }
++
++ if (bfqq->next_rq == next)
++ bfqq->next_rq = rq;
++
++ bfq_remove_request(next);
++}
++
++/* Must be called with bfqq != NULL */
++static inline void bfq_bfqq_end_wr(struct bfq_queue *bfqq)
++{
++ BUG_ON(bfqq == NULL);
++ if (bfq_bfqq_busy(bfqq))
++ bfqq->bfqd->wr_busy_queues--;
++ bfqq->wr_coeff = 1;
++ bfqq->wr_cur_max_time = 0;
++ /* Trigger a weight change on the next activation of the queue */
++ bfqq->entity.ioprio_changed = 1;
++}
++
++static void bfq_end_wr_async_queues(struct bfq_data *bfqd,
++ struct bfq_group *bfqg)
++{
++ int i, j;
++
++ for (i = 0; i < 2; i++)
++ for (j = 0; j < IOPRIO_BE_NR; j++)
++ if (bfqg->async_bfqq[i][j] != NULL)
++ bfq_bfqq_end_wr(bfqg->async_bfqq[i][j]);
++ if (bfqg->async_idle_bfqq != NULL)
++ bfq_bfqq_end_wr(bfqg->async_idle_bfqq);
++}
++
++static void bfq_end_wr(struct bfq_data *bfqd)
++{
++ struct bfq_queue *bfqq;
++
++ spin_lock_irq(bfqd->queue->queue_lock);
++
++ list_for_each_entry(bfqq, &bfqd->active_list, bfqq_list)
++ bfq_bfqq_end_wr(bfqq);
++ list_for_each_entry(bfqq, &bfqd->idle_list, bfqq_list)
++ bfq_bfqq_end_wr(bfqq);
++ bfq_end_wr_async(bfqd);
++
++ spin_unlock_irq(bfqd->queue->queue_lock);
++}
++
++static inline sector_t bfq_io_struct_pos(void *io_struct, bool request)
++{
++ if (request)
++ return blk_rq_pos(io_struct);
++ else
++ return ((struct bio *)io_struct)->bi_iter.bi_sector;
++}
++
++static inline sector_t bfq_dist_from(sector_t pos1,
++ sector_t pos2)
++{
++ if (pos1 >= pos2)
++ return pos1 - pos2;
++ else
++ return pos2 - pos1;
++}
++
++static inline int bfq_rq_close_to_sector(void *io_struct, bool request,
++ sector_t sector)
++{
++ return bfq_dist_from(bfq_io_struct_pos(io_struct, request), sector) <=
++ BFQQ_SEEK_THR;
++}
++
++static struct bfq_queue *bfqq_close(struct bfq_data *bfqd, sector_t sector)
++{
++ struct rb_root *root = &bfqd->rq_pos_tree;
++ struct rb_node *parent, *node;
++ struct bfq_queue *__bfqq;
++
++ if (RB_EMPTY_ROOT(root))
++ return NULL;
++
++ /*
++ * First, if we find a request starting at the end of the last
++ * request, choose it.
++ */
++ __bfqq = bfq_rq_pos_tree_lookup(bfqd, root, sector, &parent, NULL);
++ if (__bfqq != NULL)
++ return __bfqq;
++
++ /*
++ * If the exact sector wasn't found, the parent of the NULL leaf
++ * will contain the closest sector (rq_pos_tree sorted by
++ * next_request position).
++ */
++ __bfqq = rb_entry(parent, struct bfq_queue, pos_node);
++ if (bfq_rq_close_to_sector(__bfqq->next_rq, true, sector))
++ return __bfqq;
++
++ if (blk_rq_pos(__bfqq->next_rq) < sector)
++ node = rb_next(&__bfqq->pos_node);
++ else
++ node = rb_prev(&__bfqq->pos_node);
++ if (node == NULL)
++ return NULL;
++
++ __bfqq = rb_entry(node, struct bfq_queue, pos_node);
++ if (bfq_rq_close_to_sector(__bfqq->next_rq, true, sector))
++ return __bfqq;
++
++ return NULL;
++}
++
++/*
++ * bfqd - obvious
++ * cur_bfqq - passed in so that we don't decide that the current queue
++ * is closely cooperating with itself
++ * sector - used as a reference point to search for a close queue
++ */
++static struct bfq_queue *bfq_close_cooperator(struct bfq_data *bfqd,
++ struct bfq_queue *cur_bfqq,
++ sector_t sector)
++{
++ struct bfq_queue *bfqq;
++
++ if (bfq_class_idle(cur_bfqq))
++ return NULL;
++ if (!bfq_bfqq_sync(cur_bfqq))
++ return NULL;
++ if (BFQQ_SEEKY(cur_bfqq))
++ return NULL;
++
++ /* If device has only one backlogged bfq_queue, don't search. */
++ if (bfqd->busy_queues == 1)
++ return NULL;
++
++ /*
++ * We should notice if some of the queues are cooperating, e.g.
++ * working closely on the same area of the disk. In that case,
++ * we can group them together and don't waste time idling.
++ */
++ bfqq = bfqq_close(bfqd, sector);
++ if (bfqq == NULL || bfqq == cur_bfqq)
++ return NULL;
++
++ /*
++ * Do not merge queues from different bfq_groups.
++ */
++ if (bfqq->entity.parent != cur_bfqq->entity.parent)
++ return NULL;
++
++ /*
++ * It only makes sense to merge sync queues.
++ */
++ if (!bfq_bfqq_sync(bfqq))
++ return NULL;
++ if (BFQQ_SEEKY(bfqq))
++ return NULL;
++
++ /*
++ * Do not merge queues of different priority classes.
++ */
++ if (bfq_class_rt(bfqq) != bfq_class_rt(cur_bfqq))
++ return NULL;
++
++ return bfqq;
++}
++
++static struct bfq_queue *
++bfq_setup_merge(struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
++{
++ int process_refs, new_process_refs;
++ struct bfq_queue *__bfqq;
++
++ /*
++ * If there are no process references on the new_bfqq, then it is
++ * unsafe to follow the ->new_bfqq chain as other bfqq's in the chain
++ * may have dropped their last reference (not just their last process
++ * reference).
++ */
++ if (!bfqq_process_refs(new_bfqq))
++ return NULL;
++
++ /* Avoid a circular list and skip interim queue merges. */
++ while ((__bfqq = new_bfqq->new_bfqq)) {
++ if (__bfqq == bfqq)
++ return NULL;
++ new_bfqq = __bfqq;
++ }
++
++ process_refs = bfqq_process_refs(bfqq);
++ new_process_refs = bfqq_process_refs(new_bfqq);
++ /*
++ * If the process for the bfqq has gone away, there is no
++ * sense in merging the queues.
++ */
++ if (process_refs == 0 || new_process_refs == 0)
++ return NULL;
++
++ bfq_log_bfqq(bfqq->bfqd, bfqq, "scheduling merge with queue %d",
++ new_bfqq->pid);
++
++ /*
++ * Merging is just a redirection: the requests of the process
++ * owning one of the two queues are redirected to the other queue.
++ * The latter queue, in its turn, is set as shared if this is the
++ * first time that the requests of some process are redirected to
++ * it.
++ *
++ * We redirect bfqq to new_bfqq and not the opposite, because we
++ * are in the context of the process owning bfqq, hence we have
++ * the io_cq of this process. So we can immediately configure this
++ * io_cq to redirect the requests of the process to new_bfqq.
++ *
++ * NOTE, even if new_bfqq coincides with the in-service queue, the
++ * io_cq of new_bfqq is not available, because, if the in-service
++ * queue is shared, bfqd->in_service_bic may not point to the
++ * io_cq of the in-service queue.
++ * Redirecting the requests of the process owning bfqq to the
++ * currently in-service queue is in any case the best option, as
++ * we feed the in-service queue with new requests close to the
++ * last request served and, by doing so, hopefully increase the
++ * throughput.
++ */
++ bfqq->new_bfqq = new_bfqq;
++ atomic_add(process_refs, &new_bfqq->ref);
++ return new_bfqq;
++}
++
++/*
++ * Attempt to schedule a merge of bfqq with the currently in-service queue
++ * or with a close queue among the scheduled queues.
++ * Return NULL if no merge was scheduled, a pointer to the shared bfq_queue
++ * structure otherwise.
++ */
++static struct bfq_queue *
++bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
++ void *io_struct, bool request)
++{
++ struct bfq_queue *in_service_bfqq, *new_bfqq;
++
++ if (bfqq->new_bfqq)
++ return bfqq->new_bfqq;
++
++ if (!io_struct)
++ return NULL;
++
++ in_service_bfqq = bfqd->in_service_queue;
++
++ if (in_service_bfqq == NULL || in_service_bfqq == bfqq ||
++ !bfqd->in_service_bic)
++ goto check_scheduled;
++
++ if (bfq_class_idle(in_service_bfqq) || bfq_class_idle(bfqq))
++ goto check_scheduled;
++
++ if (bfq_class_rt(in_service_bfqq) != bfq_class_rt(bfqq))
++ goto check_scheduled;
++
++ if (in_service_bfqq->entity.parent != bfqq->entity.parent)
++ goto check_scheduled;
++
++ if (bfq_rq_close_to_sector(io_struct, request, bfqd->last_position) &&
++ bfq_bfqq_sync(in_service_bfqq) && bfq_bfqq_sync(bfqq)) {
++ new_bfqq = bfq_setup_merge(bfqq, in_service_bfqq);
++ if (new_bfqq != NULL)
++ return new_bfqq; /* Merge with in-service queue */
++ }
++
++ /*
++ * Check whether there is a cooperator among currently scheduled
++ * queues. The only thing we need is that the bio/request is not
++ * NULL, as we need it to establish whether a cooperator exists.
++ */
++check_scheduled:
++ new_bfqq = bfq_close_cooperator(bfqd, bfqq,
++ bfq_io_struct_pos(io_struct, request));
++ if (new_bfqq)
++ return bfq_setup_merge(bfqq, new_bfqq);
++
++ return NULL;
++}
++
++static inline void
++bfq_bfqq_save_state(struct bfq_queue *bfqq)
++{
++ /*
++ * If bfqq->bic == NULL, the queue is already shared or its requests
++ * have already been redirected to a shared queue; both idle window
++ * and weight raising state have already been saved. Do nothing.
++ */
++ if (bfqq->bic == NULL)
++ return;
++ if (bfqq->bic->wr_time_left)
++ /*
++ * This is the queue of a just-started process, and would
++ * deserve weight raising: we set wr_time_left to the full
++ * weight-raising duration to trigger weight-raising when
++ * and if the queue is split and the first request of the
++ * queue is enqueued.
++ */
++ bfqq->bic->wr_time_left = bfq_wr_duration(bfqq->bfqd);
++ else if (bfqq->wr_coeff > 1) {
++ unsigned long wr_duration =
++ jiffies - bfqq->last_wr_start_finish;
++ /*
++ * It may happen that a queue's weight raising period lasts
++ * longer than its wr_cur_max_time, as weight raising is
++ * handled only when a request is enqueued or dispatched (it
++ * does not use any timer). If the weight raising period is
++ * about to end, don't save it.
++ */
++ if (bfqq->wr_cur_max_time <= wr_duration)
++ bfqq->bic->wr_time_left = 0;
++ else
++ bfqq->bic->wr_time_left =
++ bfqq->wr_cur_max_time - wr_duration;
++ /*
++ * The bfq_queue is becoming shared or the requests of the
++ * process owning the queue are being redirected to a shared
++ * queue. Stop the weight raising period of the queue, as in
++ * both cases it should not be owned by an interactive or
++ * soft real-time application.
++ */
++ bfq_bfqq_end_wr(bfqq);
++ } else
++ bfqq->bic->wr_time_left = 0;
++ bfqq->bic->saved_idle_window = bfq_bfqq_idle_window(bfqq);
++ bfqq->bic->saved_IO_bound = bfq_bfqq_IO_bound(bfqq);
++ bfqq->bic->cooperations++;
++ bfqq->bic->failed_cooperations = 0;
++}
++
++static inline void
++bfq_get_bic_reference(struct bfq_queue *bfqq)
++{
++ /*
++ * If bfqq->bic has a non-NULL value, the bic to which it belongs
++ * is about to begin using a shared bfq_queue.
++ */
++ if (bfqq->bic)
++ atomic_long_inc(&bfqq->bic->icq.ioc->refcount);
++}
++
++static void
++bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic,
++ struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
++{
++ bfq_log_bfqq(bfqd, bfqq, "merging with queue %lu",
++ (long unsigned)new_bfqq->pid);
++ /* Save weight raising and idle window of the merged queues */
++ bfq_bfqq_save_state(bfqq);
++ bfq_bfqq_save_state(new_bfqq);
++ if (bfq_bfqq_IO_bound(bfqq))
++ bfq_mark_bfqq_IO_bound(new_bfqq);
++ bfq_clear_bfqq_IO_bound(bfqq);
++ /*
++ * Grab a reference to the bic, to prevent it from being destroyed
++ * before being possibly touched by a bfq_split_bfqq().
++ */
++ bfq_get_bic_reference(bfqq);
++ bfq_get_bic_reference(new_bfqq);
++ /*
++ * Merge queues (that is, let bic redirect its requests to new_bfqq)
++ */
++ bic_set_bfqq(bic, new_bfqq, 1);
++ bfq_mark_bfqq_coop(new_bfqq);
++ /*
++ * new_bfqq now belongs to at least two bics (it is a shared queue):
++ * set new_bfqq->bic to NULL. bfqq either:
++ * - does not belong to any bic any more, and hence bfqq->bic must
++ * be set to NULL, or
++ * - is a queue whose owning bics have already been redirected to a
++ * different queue, hence the queue is destined to not belong to
++ * any bic soon and bfqq->bic is already NULL (therefore the next
++ * assignment causes no harm).
++ */
++ new_bfqq->bic = NULL;
++ bfqq->bic = NULL;
++ bfq_put_queue(bfqq);
++}
++
++static inline void bfq_bfqq_increase_failed_cooperations(struct bfq_queue *bfqq)
++{
++ struct bfq_io_cq *bic = bfqq->bic;
++ struct bfq_data *bfqd = bfqq->bfqd;
++
++ if (bic && bfq_bfqq_cooperations(bfqq) >= bfqd->bfq_coop_thresh) {
++ bic->failed_cooperations++;
++ if (bic->failed_cooperations >= bfqd->bfq_failed_cooperations)
++ bic->cooperations = 0;
++ }
++}
++
++static int bfq_allow_merge(struct request_queue *q, struct request *rq,
++ struct bio *bio)
++{
++ struct bfq_data *bfqd = q->elevator->elevator_data;
++ struct bfq_io_cq *bic;
++ struct bfq_queue *bfqq, *new_bfqq;
++
++ /*
++ * Disallow merge of a sync bio into an async request.
++ */
++ if (bfq_bio_sync(bio) && !rq_is_sync(rq))
++ return 0;
++
++ /*
++ * Lookup the bfqq that this bio will be queued with. Allow
++ * merge only if rq is queued there.
++ * Queue lock is held here.
++ */
++ bic = bfq_bic_lookup(bfqd, current->io_context);
++ if (bic == NULL)
++ return 0;
++
++ bfqq = bic_to_bfqq(bic, bfq_bio_sync(bio));
++ /*
++ * We take advantage of this function to perform an early merge
++ * of the queues of possible cooperating processes.
++ */
++ if (bfqq != NULL) {
++ new_bfqq = bfq_setup_cooperator(bfqd, bfqq, bio, false);
++ if (new_bfqq != NULL) {
++ bfq_merge_bfqqs(bfqd, bic, bfqq, new_bfqq);
++ /*
++ * If we get here, the bio will be queued in the
++ * shared queue, i.e., new_bfqq, so use new_bfqq
++ * to decide whether bio and rq can be merged.
++ */
++ bfqq = new_bfqq;
++ } else
++ bfq_bfqq_increase_failed_cooperations(bfqq);
++ }
++
++ return bfqq == RQ_BFQQ(rq);
++}
++
++static void __bfq_set_in_service_queue(struct bfq_data *bfqd,
++ struct bfq_queue *bfqq)
++{
++ if (bfqq != NULL) {
++ bfq_mark_bfqq_must_alloc(bfqq);
++ bfq_mark_bfqq_budget_new(bfqq);
++ bfq_clear_bfqq_fifo_expire(bfqq);
++
++ bfqd->budgets_assigned = (bfqd->budgets_assigned*7 + 256) / 8;
++
++ bfq_log_bfqq(bfqd, bfqq,
++ "set_in_service_queue, cur-budget = %lu",
++ bfqq->entity.budget);
++ }
++
++ bfqd->in_service_queue = bfqq;
++}
++
++/*
++ * Get and set a new queue for service.
++ */
++static struct bfq_queue *bfq_set_in_service_queue(struct bfq_data *bfqd)
++{
++ struct bfq_queue *bfqq = bfq_get_next_queue(bfqd);
++
++ __bfq_set_in_service_queue(bfqd, bfqq);
++ return bfqq;
++}
++
++/*
++ * If enough samples have been computed, return the current max budget
++ * stored in bfqd, which is dynamically updated according to the
++ * estimated disk peak rate; otherwise return the default max budget
++ */
++static inline unsigned long bfq_max_budget(struct bfq_data *bfqd)
++{
++ if (bfqd->budgets_assigned < 194)
++ return bfq_default_max_budget;
++ else
++ return bfqd->bfq_max_budget;
++}
++
++/*
++ * Return min budget, which is a fraction of the current or default
++ * max budget (trying with 1/32)
++ */
++static inline unsigned long bfq_min_budget(struct bfq_data *bfqd)
++{
++ if (bfqd->budgets_assigned < 194)
++ return bfq_default_max_budget / 32;
++ else
++ return bfqd->bfq_max_budget / 32;
++}
++
++static void bfq_arm_slice_timer(struct bfq_data *bfqd)
++{
++ struct bfq_queue *bfqq = bfqd->in_service_queue;
++ struct bfq_io_cq *bic;
++ unsigned long sl;
++
++ BUG_ON(!RB_EMPTY_ROOT(&bfqq->sort_list));
++
++ /* Processes have exited, don't wait. */
++ bic = bfqd->in_service_bic;
++ if (bic == NULL || atomic_read(&bic->icq.ioc->active_ref) == 0)
++ return;
++
++ bfq_mark_bfqq_wait_request(bfqq);
++
++ /*
++ * We don't want to idle for seeks, but we do want to allow
++ * fair distribution of slice time for a process doing back-to-back
++ * seeks. So allow a little bit of time for him to submit a new rq.
++ *
++ * To prevent processes with (partly) seeky workloads from
++ * being too ill-treated, grant them a small fraction of the
++ * assigned budget before reducing the waiting time to
++ * BFQ_MIN_TT. This happened to help reduce latency.
++ */
++ sl = bfqd->bfq_slice_idle;
++ /*
++ * Unless the queue is being weight-raised, grant only minimum idle
++ * time if the queue either has been seeky for long enough or has
++ * already proved to be constantly seeky.
++ */
++ if (bfq_sample_valid(bfqq->seek_samples) &&
++ ((BFQQ_SEEKY(bfqq) && bfqq->entity.service >
++ bfq_max_budget(bfqq->bfqd) / 8) ||
++ bfq_bfqq_constantly_seeky(bfqq)) && bfqq->wr_coeff == 1)
++ sl = min(sl, msecs_to_jiffies(BFQ_MIN_TT));
++ else if (bfqq->wr_coeff > 1)
++ sl = sl * 3;
++ bfqd->last_idling_start = ktime_get();
++ mod_timer(&bfqd->idle_slice_timer, jiffies + sl);
++ bfq_log(bfqd, "arm idle: %u/%u ms",
++ jiffies_to_msecs(sl), jiffies_to_msecs(bfqd->bfq_slice_idle));
++}
++
++/*
++ * Set the maximum time for the in-service queue to consume its
++ * budget. This prevents seeky processes from lowering the disk
++ * throughput (always guaranteed with a time slice scheme as in CFQ).
++ */
++static void bfq_set_budget_timeout(struct bfq_data *bfqd)
++{
++ struct bfq_queue *bfqq = bfqd->in_service_queue;
++ unsigned int timeout_coeff;
++ if (bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time)
++ timeout_coeff = 1;
++ else
++ timeout_coeff = bfqq->entity.weight / bfqq->entity.orig_weight;
++
++ bfqd->last_budget_start = ktime_get();
++
++ bfq_clear_bfqq_budget_new(bfqq);
++ bfqq->budget_timeout = jiffies +
++ bfqd->bfq_timeout[bfq_bfqq_sync(bfqq)] * timeout_coeff;
++
++ bfq_log_bfqq(bfqd, bfqq, "set budget_timeout %u",
++ jiffies_to_msecs(bfqd->bfq_timeout[bfq_bfqq_sync(bfqq)] *
++ timeout_coeff));
++}
++
++/*
++ * Move request from internal lists to the request queue dispatch list.
++ */
++static void bfq_dispatch_insert(struct request_queue *q, struct request *rq)
++{
++ struct bfq_data *bfqd = q->elevator->elevator_data;
++ struct bfq_queue *bfqq = RQ_BFQQ(rq);
++
++ /*
++ * For consistency, the next instruction should have been executed
++ * after removing the request from the queue and dispatching it.
++ * We execute instead this instruction before bfq_remove_request()
++ * (and hence introduce a temporary inconsistency), for efficiency.
++ * In fact, in a forced_dispatch, this prevents two counters related
++ * to bfqq->dispatched to risk to be uselessly decremented if bfqq
++ * is not in service, and then to be incremented again after
++ * incrementing bfqq->dispatched.
++ */
++ bfqq->dispatched++;
++ bfq_remove_request(rq);
++ elv_dispatch_sort(q, rq);
++
++ if (bfq_bfqq_sync(bfqq))
++ bfqd->sync_flight++;
++}
++
++/*
++ * Return expired entry, or NULL to just start from scratch in rbtree.
++ */
++static struct request *bfq_check_fifo(struct bfq_queue *bfqq)
++{
++ struct request *rq = NULL;
++
++ if (bfq_bfqq_fifo_expire(bfqq))
++ return NULL;
++
++ bfq_mark_bfqq_fifo_expire(bfqq);
++
++ if (list_empty(&bfqq->fifo))
++ return NULL;
++
++ rq = rq_entry_fifo(bfqq->fifo.next);
++
++ if (time_before(jiffies, rq_fifo_time(rq)))
++ return NULL;
++
++ return rq;
++}
++
++static inline unsigned long bfq_bfqq_budget_left(struct bfq_queue *bfqq)
++{
++ struct bfq_entity *entity = &bfqq->entity;
++ return entity->budget - entity->service;
++}
++
++static void __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq)
++{
++ BUG_ON(bfqq != bfqd->in_service_queue);
++
++ __bfq_bfqd_reset_in_service(bfqd);
++
++ /*
++ * If this bfqq is shared between multiple processes, check
++ * to make sure that those processes are still issuing I/Os
++ * within the mean seek distance. If not, it may be time to
++ * break the queues apart again.
++ */
++ if (bfq_bfqq_coop(bfqq) && BFQQ_SEEKY(bfqq))
++ bfq_mark_bfqq_split_coop(bfqq);
++
++ if (RB_EMPTY_ROOT(&bfqq->sort_list)) {
++ /*
++ * Overloading budget_timeout field to store the time
++ * at which the queue remains with no backlog; used by
++ * the weight-raising mechanism.
++ */
++ bfqq->budget_timeout = jiffies;
++ bfq_del_bfqq_busy(bfqd, bfqq, 1);
++ } else {
++ bfq_activate_bfqq(bfqd, bfqq);
++ /*
++ * Resort priority tree of potential close cooperators.
++ */
++ bfq_rq_pos_tree_add(bfqd, bfqq);
++ }
++}
++
++/**
++ * __bfq_bfqq_recalc_budget - try to adapt the budget to the @bfqq behavior.
++ * @bfqd: device data.
++ * @bfqq: queue to update.
++ * @reason: reason for expiration.
++ *
++ * Handle the feedback on @bfqq budget. See the body for detailed
++ * comments.
++ */
++static void __bfq_bfqq_recalc_budget(struct bfq_data *bfqd,
++ struct bfq_queue *bfqq,
++ enum bfqq_expiration reason)
++{
++ struct request *next_rq;
++ unsigned long budget, min_budget;
++
++ budget = bfqq->max_budget;
++ min_budget = bfq_min_budget(bfqd);
++
++ BUG_ON(bfqq != bfqd->in_service_queue);
++
++ bfq_log_bfqq(bfqd, bfqq, "recalc_budg: last budg %lu, budg left %lu",
++ bfqq->entity.budget, bfq_bfqq_budget_left(bfqq));
++ bfq_log_bfqq(bfqd, bfqq, "recalc_budg: last max_budg %lu, min budg %lu",
++ budget, bfq_min_budget(bfqd));
++ bfq_log_bfqq(bfqd, bfqq, "recalc_budg: sync %d, seeky %d",
++ bfq_bfqq_sync(bfqq), BFQQ_SEEKY(bfqd->in_service_queue));
++
++ if (bfq_bfqq_sync(bfqq)) {
++ switch (reason) {
++ /*
++ * Caveat: in all the following cases we trade latency
++ * for throughput.
++ */
++ case BFQ_BFQQ_TOO_IDLE:
++ /*
++ * This is the only case where we may reduce
++ * the budget: if there is no request of the
++ * process still waiting for completion, then
++ * we assume (tentatively) that the timer has
++ * expired because the batch of requests of
++ * the process could have been served with a
++ * smaller budget. Hence, betting that
++ * process will behave in the same way when it
++ * becomes backlogged again, we reduce its
++ * next budget. As long as we guess right,
++ * this budget cut reduces the latency
++ * experienced by the process.
++ *
++ * However, if there are still outstanding
++ * requests, then the process may have not yet
++ * issued its next request just because it is
++ * still waiting for the completion of some of
++ * the still outstanding ones. So in this
++ * subcase we do not reduce its budget, on the
++ * contrary we increase it to possibly boost
++ * the throughput, as discussed in the
++ * comments to the BUDGET_TIMEOUT case.
++ */
++ if (bfqq->dispatched > 0) /* still outstanding reqs */
++ budget = min(budget * 2, bfqd->bfq_max_budget);
++ else {
++ if (budget > 5 * min_budget)
++ budget -= 4 * min_budget;
++ else
++ budget = min_budget;
++ }
++ break;
++ case BFQ_BFQQ_BUDGET_TIMEOUT:
++ /*
++ * We double the budget here because: 1) it
++ * gives the chance to boost the throughput if
++ * this is not a seeky process (which may have
++ * bumped into this timeout because of, e.g.,
++ * ZBR), 2) together with charge_full_budget
++ * it helps give seeky processes higher
++ * timestamps, and hence be served less
++ * frequently.
++ */
++ budget = min(budget * 2, bfqd->bfq_max_budget);
++ break;
++ case BFQ_BFQQ_BUDGET_EXHAUSTED:
++ /*
++ * The process still has backlog, and did not
++ * let either the budget timeout or the disk
++ * idling timeout expire. Hence it is not
++ * seeky, has a short thinktime and may be
++ * happy with a higher budget too. So
++ * definitely increase the budget of this good
++ * candidate to boost the disk throughput.
++ */
++ budget = min(budget * 4, bfqd->bfq_max_budget);
++ break;
++ case BFQ_BFQQ_NO_MORE_REQUESTS:
++ /*
++ * Leave the budget unchanged.
++ */
++ default:
++ return;
++ }
++ } else /* async queue */
++ /* async queues get always the maximum possible budget
++ * (their ability to dispatch is limited by
++ * @bfqd->bfq_max_budget_async_rq).
++ */
++ budget = bfqd->bfq_max_budget;
++
++ bfqq->max_budget = budget;
++
++ if (bfqd->budgets_assigned >= 194 && bfqd->bfq_user_max_budget == 0 &&
++ bfqq->max_budget > bfqd->bfq_max_budget)
++ bfqq->max_budget = bfqd->bfq_max_budget;
++
++ /*
++ * Make sure that we have enough budget for the next request.
++ * Since the finish time of the bfqq must be kept in sync with
++ * the budget, be sure to call __bfq_bfqq_expire() after the
++ * update.
++ */
++ next_rq = bfqq->next_rq;
++ if (next_rq != NULL)
++ bfqq->entity.budget = max_t(unsigned long, bfqq->max_budget,
++ bfq_serv_to_charge(next_rq, bfqq));
++ else
++ bfqq->entity.budget = bfqq->max_budget;
++
++ bfq_log_bfqq(bfqd, bfqq, "head sect: %u, new budget %lu",
++ next_rq != NULL ? blk_rq_sectors(next_rq) : 0,
++ bfqq->entity.budget);
++}
++
++static unsigned long bfq_calc_max_budget(u64 peak_rate, u64 timeout)
++{
++ unsigned long max_budget;
++
++ /*
++ * The max_budget calculated when autotuning is equal to the
++ * amount of sectors transfered in timeout_sync at the
++ * estimated peak rate.
++ */
++ max_budget = (unsigned long)(peak_rate * 1000 *
++ timeout >> BFQ_RATE_SHIFT);
++
++ return max_budget;
++}
++
++/*
++ * In addition to updating the peak rate, checks whether the process
++ * is "slow", and returns 1 if so. This slow flag is used, in addition
++ * to the budget timeout, to reduce the amount of service provided to
++ * seeky processes, and hence reduce their chances to lower the
++ * throughput. See the code for more details.
++ */
++static int bfq_update_peak_rate(struct bfq_data *bfqd, struct bfq_queue *bfqq,
++ int compensate, enum bfqq_expiration reason)
++{
++ u64 bw, usecs, expected, timeout;
++ ktime_t delta;
++ int update = 0;
++
++ if (!bfq_bfqq_sync(bfqq) || bfq_bfqq_budget_new(bfqq))
++ return 0;
++
++ if (compensate)
++ delta = bfqd->last_idling_start;
++ else
++ delta = ktime_get();
++ delta = ktime_sub(delta, bfqd->last_budget_start);
++ usecs = ktime_to_us(delta);
++
++ /* Don't trust short/unrealistic values. */
++ if (usecs < 100 || usecs >= LONG_MAX)
++ return 0;
++
++ /*
++ * Calculate the bandwidth for the last slice. We use a 64 bit
++ * value to store the peak rate, in sectors per usec in fixed
++ * point math. We do so to have enough precision in the estimate
++ * and to avoid overflows.
++ */
++ bw = (u64)bfqq->entity.service << BFQ_RATE_SHIFT;
++ do_div(bw, (unsigned long)usecs);
++
++ timeout = jiffies_to_msecs(bfqd->bfq_timeout[BLK_RW_SYNC]);
++
++ /*
++ * Use only long (> 20ms) intervals to filter out spikes for
++ * the peak rate estimation.
++ */
++ if (usecs > 20000) {
++ if (bw > bfqd->peak_rate ||
++ (!BFQQ_SEEKY(bfqq) &&
++ reason == BFQ_BFQQ_BUDGET_TIMEOUT)) {
++ bfq_log(bfqd, "measured bw =%llu", bw);
++ /*
++ * To smooth oscillations use a low-pass filter with
++ * alpha=7/8, i.e.,
++ * new_rate = (7/8) * old_rate + (1/8) * bw
++ */
++ do_div(bw, 8);
++ if (bw == 0)
++ return 0;
++ bfqd->peak_rate *= 7;
++ do_div(bfqd->peak_rate, 8);
++ bfqd->peak_rate += bw;
++ update = 1;
++ bfq_log(bfqd, "new peak_rate=%llu", bfqd->peak_rate);
++ }
++
++ update |= bfqd->peak_rate_samples == BFQ_PEAK_RATE_SAMPLES - 1;
++
++ if (bfqd->peak_rate_samples < BFQ_PEAK_RATE_SAMPLES)
++ bfqd->peak_rate_samples++;
++
++ if (bfqd->peak_rate_samples == BFQ_PEAK_RATE_SAMPLES &&
++ update) {
++ int dev_type = blk_queue_nonrot(bfqd->queue);
++ if (bfqd->bfq_user_max_budget == 0) {
++ bfqd->bfq_max_budget =
++ bfq_calc_max_budget(bfqd->peak_rate,
++ timeout);
++ bfq_log(bfqd, "new max_budget=%lu",
++ bfqd->bfq_max_budget);
++ }
++ if (bfqd->device_speed == BFQ_BFQD_FAST &&
++ bfqd->peak_rate < device_speed_thresh[dev_type]) {
++ bfqd->device_speed = BFQ_BFQD_SLOW;
++ bfqd->RT_prod = R_slow[dev_type] *
++ T_slow[dev_type];
++ } else if (bfqd->device_speed == BFQ_BFQD_SLOW &&
++ bfqd->peak_rate > device_speed_thresh[dev_type]) {
++ bfqd->device_speed = BFQ_BFQD_FAST;
++ bfqd->RT_prod = R_fast[dev_type] *
++ T_fast[dev_type];
++ }
++ }
++ }
++
++ /*
++ * If the process has been served for a too short time
++ * interval to let its possible sequential accesses prevail on
++ * the initial seek time needed to move the disk head on the
++ * first sector it requested, then give the process a chance
++ * and for the moment return false.
++ */
++ if (bfqq->entity.budget <= bfq_max_budget(bfqd) / 8)
++ return 0;
++
++ /*
++ * A process is considered ``slow'' (i.e., seeky, so that we
++ * cannot treat it fairly in the service domain, as it would
++ * slow down too much the other processes) if, when a slice
++ * ends for whatever reason, it has received service at a
++ * rate that would not be high enough to complete the budget
++ * before the budget timeout expiration.
++ */
++ expected = bw * 1000 * timeout >> BFQ_RATE_SHIFT;
++
++ /*
++ * Caveat: processes doing IO in the slower disk zones will
++ * tend to be slow(er) even if not seeky. And the estimated
++ * peak rate will actually be an average over the disk
++ * surface. Hence, to not be too harsh with unlucky processes,
++ * we keep a budget/3 margin of safety before declaring a
++ * process slow.
++ */
++ return expected > (4 * bfqq->entity.budget) / 3;
++}
++
++/*
++ * To be deemed as soft real-time, an application must meet two
++ * requirements. First, the application must not require an average
++ * bandwidth higher than the approximate bandwidth required to playback or
++ * record a compressed high-definition video.
++ * The next function is invoked on the completion of the last request of a
++ * batch, to compute the next-start time instant, soft_rt_next_start, such
++ * that, if the next request of the application does not arrive before
++ * soft_rt_next_start, then the above requirement on the bandwidth is met.
++ *
++ * The second requirement is that the request pattern of the application is
++ * isochronous, i.e., that, after issuing a request or a batch of requests,
++ * the application stops issuing new requests until all its pending requests
++ * have been completed. After that, the application may issue a new batch,
++ * and so on.
++ * For this reason the next function is invoked to compute
++ * soft_rt_next_start only for applications that meet this requirement,
++ * whereas soft_rt_next_start is set to infinity for applications that do
++ * not.
++ *
++ * Unfortunately, even a greedy application may happen to behave in an
++ * isochronous way if the CPU load is high. In fact, the application may
++ * stop issuing requests while the CPUs are busy serving other processes,
++ * then restart, then stop again for a while, and so on. In addition, if
++ * the disk achieves a low enough throughput with the request pattern
++ * issued by the application (e.g., because the request pattern is random
++ * and/or the device is slow), then the application may meet the above
++ * bandwidth requirement too. To prevent such a greedy application to be
++ * deemed as soft real-time, a further rule is used in the computation of
++ * soft_rt_next_start: soft_rt_next_start must be higher than the current
++ * time plus the maximum time for which the arrival of a request is waited
++ * for when a sync queue becomes idle, namely bfqd->bfq_slice_idle.
++ * This filters out greedy applications, as the latter issue instead their
++ * next request as soon as possible after the last one has been completed
++ * (in contrast, when a batch of requests is completed, a soft real-time
++ * application spends some time processing data).
++ *
++ * Unfortunately, the last filter may easily generate false positives if
++ * only bfqd->bfq_slice_idle is used as a reference time interval and one
++ * or both the following cases occur:
++ * 1) HZ is so low that the duration of a jiffy is comparable to or higher
++ * than bfqd->bfq_slice_idle. This happens, e.g., on slow devices with
++ * HZ=100.
++ * 2) jiffies, instead of increasing at a constant rate, may stop increasing
++ * for a while, then suddenly 'jump' by several units to recover the lost
++ * increments. This seems to happen, e.g., inside virtual machines.
++ * To address this issue, we do not use as a reference time interval just
++ * bfqd->bfq_slice_idle, but bfqd->bfq_slice_idle plus a few jiffies. In
++ * particular we add the minimum number of jiffies for which the filter
++ * seems to be quite precise also in embedded systems and KVM/QEMU virtual
++ * machines.
++ */
++static inline unsigned long bfq_bfqq_softrt_next_start(struct bfq_data *bfqd,
++ struct bfq_queue *bfqq)
++{
++ return max(bfqq->last_idle_bklogged +
++ HZ * bfqq->service_from_backlogged /
++ bfqd->bfq_wr_max_softrt_rate,
++ jiffies + bfqq->bfqd->bfq_slice_idle + 4);
++}
++
++/*
++ * Return the largest-possible time instant such that, for as long as possible,
++ * the current time will be lower than this time instant according to the macro
++ * time_is_before_jiffies().
++ */
++static inline unsigned long bfq_infinity_from_now(unsigned long now)
++{
++ return now + ULONG_MAX / 2;
++}
++
++/**
++ * bfq_bfqq_expire - expire a queue.
++ * @bfqd: device owning the queue.
++ * @bfqq: the queue to expire.
++ * @compensate: if true, compensate for the time spent idling.
++ * @reason: the reason causing the expiration.
++ *
++ *
++ * If the process associated to the queue is slow (i.e., seeky), or in
++ * case of budget timeout, or, finally, if it is async, we
++ * artificially charge it an entire budget (independently of the
++ * actual service it received). As a consequence, the queue will get
++ * higher timestamps than the correct ones upon reactivation, and
++ * hence it will be rescheduled as if it had received more service
++ * than what it actually received. In the end, this class of processes
++ * will receive less service in proportion to how slowly they consume
++ * their budgets (and hence how seriously they tend to lower the
++ * throughput).
++ *
++ * In contrast, when a queue expires because it has been idling for
++ * too much or because it exhausted its budget, we do not touch the
++ * amount of service it has received. Hence when the queue will be
++ * reactivated and its timestamps updated, the latter will be in sync
++ * with the actual service received by the queue until expiration.
++ *
++ * Charging a full budget to the first type of queues and the exact
++ * service to the others has the effect of using the WF2Q+ policy to
++ * schedule the former on a timeslice basis, without violating the
++ * service domain guarantees of the latter.
++ */
++static void bfq_bfqq_expire(struct bfq_data *bfqd,
++ struct bfq_queue *bfqq,
++ int compensate,
++ enum bfqq_expiration reason)
++{
++ int slow;
++ BUG_ON(bfqq != bfqd->in_service_queue);
++
++ /* Update disk peak rate for autotuning and check whether the
++ * process is slow (see bfq_update_peak_rate).
++ */
++ slow = bfq_update_peak_rate(bfqd, bfqq, compensate, reason);
++
++ /*
++ * As above explained, 'punish' slow (i.e., seeky), timed-out
++ * and async queues, to favor sequential sync workloads.
++ *
++ * Processes doing I/O in the slower disk zones will tend to be
++ * slow(er) even if not seeky. Hence, since the estimated peak
++ * rate is actually an average over the disk surface, these
++ * processes may timeout just for bad luck. To avoid punishing
++ * them we do not charge a full budget to a process that
++ * succeeded in consuming at least 2/3 of its budget.
++ */
++ if (slow || (reason == BFQ_BFQQ_BUDGET_TIMEOUT &&
++ bfq_bfqq_budget_left(bfqq) >= bfqq->entity.budget / 3))
++ bfq_bfqq_charge_full_budget(bfqq);
++
++ bfqq->service_from_backlogged += bfqq->entity.service;
++
++ if (BFQQ_SEEKY(bfqq) && reason == BFQ_BFQQ_BUDGET_TIMEOUT &&
++ !bfq_bfqq_constantly_seeky(bfqq)) {
++ bfq_mark_bfqq_constantly_seeky(bfqq);
++ if (!blk_queue_nonrot(bfqd->queue))
++ bfqd->const_seeky_busy_in_flight_queues++;
++ }
++
++ if (reason == BFQ_BFQQ_TOO_IDLE &&
++ bfqq->entity.service <= 2 * bfqq->entity.budget / 10 )
++ bfq_clear_bfqq_IO_bound(bfqq);
++
++ if (bfqd->low_latency && bfqq->wr_coeff == 1)
++ bfqq->last_wr_start_finish = jiffies;
++
++ if (bfqd->low_latency && bfqd->bfq_wr_max_softrt_rate > 0 &&
++ RB_EMPTY_ROOT(&bfqq->sort_list)) {
++ /*
++ * If we get here, and there are no outstanding requests,
++ * then the request pattern is isochronous (see the comments
++ * to the function bfq_bfqq_softrt_next_start()). Hence we
++ * can compute soft_rt_next_start. If, instead, the queue
++ * still has outstanding requests, then we have to wait
++ * for the completion of all the outstanding requests to
++ * discover whether the request pattern is actually
++ * isochronous.
++ */
++ if (bfqq->dispatched == 0)
++ bfqq->soft_rt_next_start =
++ bfq_bfqq_softrt_next_start(bfqd, bfqq);
++ else {
++ /*
++ * The application is still waiting for the
++ * completion of one or more requests:
++ * prevent it from possibly being incorrectly
++ * deemed as soft real-time by setting its
++ * soft_rt_next_start to infinity. In fact,
++ * without this assignment, the application
++ * would be incorrectly deemed as soft
++ * real-time if:
++ * 1) it issued a new request before the
++ * completion of all its in-flight
++ * requests, and
++ * 2) at that time, its soft_rt_next_start
++ * happened to be in the past.
++ */
++ bfqq->soft_rt_next_start =
++ bfq_infinity_from_now(jiffies);
++ /*
++ * Schedule an update of soft_rt_next_start to when
++ * the task may be discovered to be isochronous.
++ */
++ bfq_mark_bfqq_softrt_update(bfqq);
++ }
++ }
++
++ bfq_log_bfqq(bfqd, bfqq,
++ "expire (%d, slow %d, num_disp %d, idle_win %d)", reason,
++ slow, bfqq->dispatched, bfq_bfqq_idle_window(bfqq));
++
++ /*
++ * Increase, decrease or leave budget unchanged according to
++ * reason.
++ */
++ __bfq_bfqq_recalc_budget(bfqd, bfqq, reason);
++ __bfq_bfqq_expire(bfqd, bfqq);
++}
++
++/*
++ * Budget timeout is not implemented through a dedicated timer, but
++ * just checked on request arrivals and completions, as well as on
++ * idle timer expirations.
++ */
++static int bfq_bfqq_budget_timeout(struct bfq_queue *bfqq)
++{
++ if (bfq_bfqq_budget_new(bfqq) ||
++ time_before(jiffies, bfqq->budget_timeout))
++ return 0;
++ return 1;
++}
++
++/*
++ * If we expire a queue that is waiting for the arrival of a new
++ * request, we may prevent the fictitious timestamp back-shifting that
++ * allows the guarantees of the queue to be preserved (see [1] for
++ * this tricky aspect). Hence we return true only if this condition
++ * does not hold, or if the queue is slow enough to deserve only to be
++ * kicked off for preserving a high throughput.
++*/
++static inline int bfq_may_expire_for_budg_timeout(struct bfq_queue *bfqq)
++{
++ bfq_log_bfqq(bfqq->bfqd, bfqq,
++ "may_budget_timeout: wait_request %d left %d timeout %d",
++ bfq_bfqq_wait_request(bfqq),
++ bfq_bfqq_budget_left(bfqq) >= bfqq->entity.budget / 3,
++ bfq_bfqq_budget_timeout(bfqq));
++
++ return (!bfq_bfqq_wait_request(bfqq) ||
++ bfq_bfqq_budget_left(bfqq) >= bfqq->entity.budget / 3)
++ &&
++ bfq_bfqq_budget_timeout(bfqq);
++}
++
++/*
++ * Device idling is allowed only for the queues for which this function
++ * returns true. For this reason, the return value of this function plays a
++ * critical role for both throughput boosting and service guarantees. The
++ * return value is computed through a logical expression. In this rather
++ * long comment, we try to briefly describe all the details and motivations
++ * behind the components of this logical expression.
++ *
++ * First, the expression may be true only for sync queues. Besides, if
++ * bfqq is also being weight-raised, then the expression always evaluates
++ * to true, as device idling is instrumental for preserving low-latency
++ * guarantees (see [1]). Otherwise, the expression evaluates to true only
++ * if bfqq has a non-null idle window and at least one of the following
++ * two conditions holds. The first condition is that the device is not
++ * performing NCQ, because idling the device most certainly boosts the
++ * throughput if this condition holds and bfqq has been granted a non-null
++ * idle window. The second compound condition is made of the logical AND of
++ * two components.
++ *
++ * The first component is true only if there is no weight-raised busy
++ * queue. This guarantees that the device is not idled for a sync non-
++ * weight-raised queue when there are busy weight-raised queues. The former
++ * is then expired immediately if empty. Combined with the timestamping
++ * rules of BFQ (see [1] for details), this causes sync non-weight-raised
++ * queues to get a lower number of requests served, and hence to ask for a
++ * lower number of requests from the request pool, before the busy weight-
++ * raised queues get served again.
++ *
++ * This is beneficial for the processes associated with weight-raised
++ * queues, when the request pool is saturated (e.g., in the presence of
++ * write hogs). In fact, if the processes associated with the other queues
++ * ask for requests at a lower rate, then weight-raised processes have a
++ * higher probability to get a request from the pool immediately (or at
++ * least soon) when they need one. Hence they have a higher probability to
++ * actually get a fraction of the disk throughput proportional to their
++ * high weight. This is especially true with NCQ-capable drives, which
++ * enqueue several requests in advance and further reorder internally-
++ * queued requests.
++ *
++ * In the end, mistreating non-weight-raised queues when there are busy
++ * weight-raised queues seems to mitigate starvation problems in the
++ * presence of heavy write workloads and NCQ, and hence to guarantee a
++ * higher application and system responsiveness in these hostile scenarios.
++ *
++ * If the first component of the compound condition is instead true, i.e.,
++ * there is no weight-raised busy queue, then the second component of the
++ * compound condition takes into account service-guarantee and throughput
++ * issues related to NCQ (recall that the compound condition is evaluated
++ * only if the device is detected as supporting NCQ).
++ *
++ * As for service guarantees, allowing the drive to enqueue more than one
++ * request at a time, and hence delegating de facto final scheduling
++ * decisions to the drive's internal scheduler, causes loss of control on
++ * the actual request service order. In this respect, when the drive is
++ * allowed to enqueue more than one request at a time, the service
++ * distribution enforced by the drive's internal scheduler is likely to
++ * coincide with the desired device-throughput distribution only in the
++ * following, perfectly symmetric, scenario:
++ * 1) all active queues have the same weight,
++ * 2) all active groups at the same level in the groups tree have the same
++ * weight,
++ * 3) all active groups at the same level in the groups tree have the same
++ * number of children.
++ *
++ * Even in such a scenario, sequential I/O may still receive a preferential
++ * treatment, but this is not likely to be a big issue with flash-based
++ * devices, because of their non-dramatic loss of throughput with random
++ * I/O. Things do differ with HDDs, for which additional care is taken, as
++ * explained after completing the discussion for flash-based devices.
++ *
++ * Unfortunately, keeping the necessary state for evaluating exactly the
++ * above symmetry conditions would be quite complex and time-consuming.
++ * Therefore BFQ evaluates instead the following stronger sub-conditions,
++ * for which it is much easier to maintain the needed state:
++ * 1) all active queues have the same weight,
++ * 2) all active groups have the same weight,
++ * 3) all active groups have at most one active child each.
++ * In particular, the last two conditions are always true if hierarchical
++ * support and the cgroups interface are not enabled, hence no state needs
++ * to be maintained in this case.
++ *
++ * According to the above considerations, the second component of the
++ * compound condition evaluates to true if any of the above symmetry
++ * sub-condition does not hold, or the device is not flash-based. Therefore,
++ * if also the first component is true, then idling is allowed for a sync
++ * queue. These are the only sub-conditions considered if the device is
++ * flash-based, as, for such a device, it is sensible to force idling only
++ * for service-guarantee issues. In fact, as for throughput, idling
++ * NCQ-capable flash-based devices would not boost the throughput even
++ * with sequential I/O; rather it would lower the throughput in proportion
++ * to how fast the device is. In the end, (only) if all the three
++ * sub-conditions hold and the device is flash-based, the compound
++ * condition evaluates to false and therefore no idling is performed.
++ *
++ * As already said, things change with a rotational device, where idling
++ * boosts the throughput with sequential I/O (even with NCQ). Hence, for
++ * such a device the second component of the compound condition evaluates
++ * to true also if the following additional sub-condition does not hold:
++ * the queue is constantly seeky. Unfortunately, this different behavior
++ * with respect to flash-based devices causes an additional asymmetry: if
++ * some sync queues enjoy idling and some other sync queues do not, then
++ * the latter get a low share of the device throughput, simply because the
++ * former get many requests served after being set as in service, whereas
++ * the latter do not. As a consequence, to guarantee the desired throughput
++ * distribution, on HDDs the compound expression evaluates to true (and
++ * hence device idling is performed) also if the following last symmetry
++ * condition does not hold: no other queue is benefiting from idling. Also
++ * this last condition is actually replaced with a simpler-to-maintain and
++ * stronger condition: there is no busy queue which is not constantly seeky
++ * (and hence may also benefit from idling).
++ *
++ * To sum up, when all the required symmetry and throughput-boosting
++ * sub-conditions hold, the second component of the compound condition
++ * evaluates to false, and hence no idling is performed. This helps to
++ * keep the drives' internal queues full on NCQ-capable devices, and hence
++ * to boost the throughput, without causing 'almost' any loss of service
++ * guarantees. The 'almost' follows from the fact that, if the internal
++ * queue of one such device is filled while all the sub-conditions hold,
++ * but at some point in time some sub-condition stops to hold, then it may
++ * become impossible to let requests be served in the new desired order
++ * until all the requests already queued in the device have been served.
++ */
++static inline bool bfq_bfqq_must_not_expire(struct bfq_queue *bfqq)
++{
++ struct bfq_data *bfqd = bfqq->bfqd;
++#ifdef CONFIG_CGROUP_BFQIO
++#define symmetric_scenario (!bfqd->active_numerous_groups && \
++ !bfq_differentiated_weights(bfqd))
++#else
++#define symmetric_scenario (!bfq_differentiated_weights(bfqd))
++#endif
++#define cond_for_seeky_on_ncq_hdd (bfq_bfqq_constantly_seeky(bfqq) && \
++ bfqd->busy_in_flight_queues == \
++ bfqd->const_seeky_busy_in_flight_queues)
++/*
++ * Condition for expiring a non-weight-raised queue (and hence not idling
++ * the device).
++ */
++#define cond_for_expiring_non_wr (bfqd->hw_tag && \
++ (bfqd->wr_busy_queues > 0 || \
++ (symmetric_scenario && \
++ (blk_queue_nonrot(bfqd->queue) || \
++ cond_for_seeky_on_ncq_hdd))))
++
++ return bfq_bfqq_sync(bfqq) &&
++ (bfq_bfqq_IO_bound(bfqq) || bfqq->wr_coeff > 1) &&
++ (bfqq->wr_coeff > 1 ||
++ (bfq_bfqq_idle_window(bfqq) &&
++ !cond_for_expiring_non_wr)
++ );
++}
++
++/*
++ * If the in-service queue is empty but sync, and the function
++ * bfq_bfqq_must_not_expire returns true, then:
++ * 1) the queue must remain in service and cannot be expired, and
++ * 2) the disk must be idled to wait for the possible arrival of a new
++ * request for the queue.
++ * See the comments to the function bfq_bfqq_must_not_expire for the reasons
++ * why performing device idling is the best choice to boost the throughput
++ * and preserve service guarantees when bfq_bfqq_must_not_expire itself
++ * returns true.
++ */
++static inline bool bfq_bfqq_must_idle(struct bfq_queue *bfqq)
++{
++ struct bfq_data *bfqd = bfqq->bfqd;
++
++ return RB_EMPTY_ROOT(&bfqq->sort_list) && bfqd->bfq_slice_idle != 0 &&
++ bfq_bfqq_must_not_expire(bfqq);
++}
++
++/*
++ * Select a queue for service. If we have a current queue in service,
++ * check whether to continue servicing it, or retrieve and set a new one.
++ */
++static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd)
++{
++ struct bfq_queue *bfqq;
++ struct request *next_rq;
++ enum bfqq_expiration reason = BFQ_BFQQ_BUDGET_TIMEOUT;
++
++ bfqq = bfqd->in_service_queue;
++ if (bfqq == NULL)
++ goto new_queue;
++
++ bfq_log_bfqq(bfqd, bfqq, "select_queue: already in-service queue");
++
++ if (bfq_may_expire_for_budg_timeout(bfqq) &&
++ !timer_pending(&bfqd->idle_slice_timer) &&
++ !bfq_bfqq_must_idle(bfqq))
++ goto expire;
++
++ next_rq = bfqq->next_rq;
++ /*
++ * If bfqq has requests queued and it has enough budget left to
++ * serve them, keep the queue, otherwise expire it.
++ */
++ if (next_rq != NULL) {
++ if (bfq_serv_to_charge(next_rq, bfqq) >
++ bfq_bfqq_budget_left(bfqq)) {
++ reason = BFQ_BFQQ_BUDGET_EXHAUSTED;
++ goto expire;
++ } else {
++ /*
++ * The idle timer may be pending because we may
++ * not disable disk idling even when a new request
++ * arrives.
++ */
++ if (timer_pending(&bfqd->idle_slice_timer)) {
++ /*
++ * If we get here: 1) at least a new request
++ * has arrived but we have not disabled the
++ * timer because the request was too small,
++ * 2) then the block layer has unplugged
++ * the device, causing the dispatch to be
++ * invoked.
++ *
++ * Since the device is unplugged, now the
++ * requests are probably large enough to
++ * provide a reasonable throughput.
++ * So we disable idling.
++ */
++ bfq_clear_bfqq_wait_request(bfqq);
++ del_timer(&bfqd->idle_slice_timer);
++ }
++ goto keep_queue;
++ }
++ }
++
++ /*
++ * No requests pending. If the in-service queue still has requests
++ * in flight (possibly waiting for a completion) or is idling for a
++ * new request, then keep it.
++ */
++ if (timer_pending(&bfqd->idle_slice_timer) ||
++ (bfqq->dispatched != 0 && bfq_bfqq_must_not_expire(bfqq))) {
++ bfqq = NULL;
++ goto keep_queue;
++ }
++
++ reason = BFQ_BFQQ_NO_MORE_REQUESTS;
++expire:
++ bfq_bfqq_expire(bfqd, bfqq, 0, reason);
++new_queue:
++ bfqq = bfq_set_in_service_queue(bfqd);
++ bfq_log(bfqd, "select_queue: new queue %d returned",
++ bfqq != NULL ? bfqq->pid : 0);
++keep_queue:
++ return bfqq;
++}
++
++static void bfq_update_wr_data(struct bfq_data *bfqd, struct bfq_queue *bfqq)
++{
++ struct bfq_entity *entity = &bfqq->entity;
++ if (bfqq->wr_coeff > 1) { /* queue is being weight-raised */
++ bfq_log_bfqq(bfqd, bfqq,
++ "raising period dur %u/%u msec, old coeff %u, w %d(%d)",
++ jiffies_to_msecs(jiffies - bfqq->last_wr_start_finish),
++ jiffies_to_msecs(bfqq->wr_cur_max_time),
++ bfqq->wr_coeff,
++ bfqq->entity.weight, bfqq->entity.orig_weight);
++
++ BUG_ON(bfqq != bfqd->in_service_queue && entity->weight !=
++ entity->orig_weight * bfqq->wr_coeff);
++ if (entity->ioprio_changed)
++ bfq_log_bfqq(bfqd, bfqq, "WARN: pending prio change");
++
++ /*
++ * If too much time has elapsed from the beginning
++ * of this weight-raising period, or the queue has
++ * exceeded the acceptable number of cooperations,
++ * stop it.
++ */
++ if (bfq_bfqq_cooperations(bfqq) >= bfqd->bfq_coop_thresh ||
++ time_is_before_jiffies(bfqq->last_wr_start_finish +
++ bfqq->wr_cur_max_time)) {
++ bfqq->last_wr_start_finish = jiffies;
++ bfq_log_bfqq(bfqd, bfqq,
++ "wrais ending at %lu, rais_max_time %u",
++ bfqq->last_wr_start_finish,
++ jiffies_to_msecs(bfqq->wr_cur_max_time));
++ bfq_bfqq_end_wr(bfqq);
++ }
++ }
++ /* Update weight both if it must be raised and if it must be lowered */
++ if ((entity->weight > entity->orig_weight) != (bfqq->wr_coeff > 1))
++ __bfq_entity_update_weight_prio(
++ bfq_entity_service_tree(entity),
++ entity);
++}
++
++/*
++ * Dispatch one request from bfqq, moving it to the request queue
++ * dispatch list.
++ */
++static int bfq_dispatch_request(struct bfq_data *bfqd,
++ struct bfq_queue *bfqq)
++{
++ int dispatched = 0;
++ struct request *rq;
++ unsigned long service_to_charge;
++
++ BUG_ON(RB_EMPTY_ROOT(&bfqq->sort_list));
++
++ /* Follow expired path, else get first next available. */
++ rq = bfq_check_fifo(bfqq);
++ if (rq == NULL)
++ rq = bfqq->next_rq;
++ service_to_charge = bfq_serv_to_charge(rq, bfqq);
++
++ if (service_to_charge > bfq_bfqq_budget_left(bfqq)) {
++ /*
++ * This may happen if the next rq is chosen in fifo order
++ * instead of sector order. The budget is properly
++ * dimensioned to be always sufficient to serve the next
++ * request only if it is chosen in sector order. The reason
++ * is that it would be quite inefficient and little useful
++ * to always make sure that the budget is large enough to
++ * serve even the possible next rq in fifo order.
++ * In fact, requests are seldom served in fifo order.
++ *
++ * Expire the queue for budget exhaustion, and make sure
++ * that the next act_budget is enough to serve the next
++ * request, even if it comes from the fifo expired path.
++ */
++ bfqq->next_rq = rq;
++ /*
++ * Since this dispatch is failed, make sure that
++ * a new one will be performed
++ */
++ if (!bfqd->rq_in_driver)
++ bfq_schedule_dispatch(bfqd);
++ goto expire;
++ }
++
++ /* Finally, insert request into driver dispatch list. */
++ bfq_bfqq_served(bfqq, service_to_charge);
++ bfq_dispatch_insert(bfqd->queue, rq);
++
++ bfq_update_wr_data(bfqd, bfqq);
++
++ bfq_log_bfqq(bfqd, bfqq,
++ "dispatched %u sec req (%llu), budg left %lu",
++ blk_rq_sectors(rq),
++ (long long unsigned)blk_rq_pos(rq),
++ bfq_bfqq_budget_left(bfqq));
++
++ dispatched++;
++
++ if (bfqd->in_service_bic == NULL) {
++ atomic_long_inc(&RQ_BIC(rq)->icq.ioc->refcount);
++ bfqd->in_service_bic = RQ_BIC(rq);
++ }
++
++ if (bfqd->busy_queues > 1 && ((!bfq_bfqq_sync(bfqq) &&
++ dispatched >= bfqd->bfq_max_budget_async_rq) ||
++ bfq_class_idle(bfqq)))
++ goto expire;
++
++ return dispatched;
++
++expire:
++ bfq_bfqq_expire(bfqd, bfqq, 0, BFQ_BFQQ_BUDGET_EXHAUSTED);
++ return dispatched;
++}
++
++static int __bfq_forced_dispatch_bfqq(struct bfq_queue *bfqq)
++{
++ int dispatched = 0;
++
++ while (bfqq->next_rq != NULL) {
++ bfq_dispatch_insert(bfqq->bfqd->queue, bfqq->next_rq);
++ dispatched++;
++ }
++
++ BUG_ON(!list_empty(&bfqq->fifo));
++ return dispatched;
++}
++
++/*
++ * Drain our current requests.
++ * Used for barriers and when switching io schedulers on-the-fly.
++ */
++static int bfq_forced_dispatch(struct bfq_data *bfqd)
++{
++ struct bfq_queue *bfqq, *n;
++ struct bfq_service_tree *st;
++ int dispatched = 0;
++
++ bfqq = bfqd->in_service_queue;
++ if (bfqq != NULL)
++ __bfq_bfqq_expire(bfqd, bfqq);
++
++ /*
++ * Loop through classes, and be careful to leave the scheduler
++ * in a consistent state, as feedback mechanisms and vtime
++ * updates cannot be disabled during the process.
++ */
++ list_for_each_entry_safe(bfqq, n, &bfqd->active_list, bfqq_list) {
++ st = bfq_entity_service_tree(&bfqq->entity);
++
++ dispatched += __bfq_forced_dispatch_bfqq(bfqq);
++ bfqq->max_budget = bfq_max_budget(bfqd);
++
++ bfq_forget_idle(st);
++ }
++
++ BUG_ON(bfqd->busy_queues != 0);
++
++ return dispatched;
++}
++
++static int bfq_dispatch_requests(struct request_queue *q, int force)
++{
++ struct bfq_data *bfqd = q->elevator->elevator_data;
++ struct bfq_queue *bfqq;
++ int max_dispatch;
++
++ bfq_log(bfqd, "dispatch requests: %d busy queues", bfqd->busy_queues);
++ if (bfqd->busy_queues == 0)
++ return 0;
++
++ if (unlikely(force))
++ return bfq_forced_dispatch(bfqd);
++
++ bfqq = bfq_select_queue(bfqd);
++ if (bfqq == NULL)
++ return 0;
++
++ max_dispatch = bfqd->bfq_quantum;
++ if (bfq_class_idle(bfqq))
++ max_dispatch = 1;
++
++ if (!bfq_bfqq_sync(bfqq))
++ max_dispatch = bfqd->bfq_max_budget_async_rq;
++
++ if (bfqq->dispatched >= max_dispatch) {
++ if (bfqd->busy_queues > 1)
++ return 0;
++ if (bfqq->dispatched >= 4 * max_dispatch)
++ return 0;
++ }
++
++ if (bfqd->sync_flight != 0 && !bfq_bfqq_sync(bfqq))
++ return 0;
++
++ bfq_clear_bfqq_wait_request(bfqq);
++ BUG_ON(timer_pending(&bfqd->idle_slice_timer));
++
++ if (!bfq_dispatch_request(bfqd, bfqq))
++ return 0;
++
++ bfq_log_bfqq(bfqd, bfqq, "dispatched one request of %d (max_disp %d)",
++ bfqq->pid, max_dispatch);
++
++ return 1;
++}
++
++/*
++ * Task holds one reference to the queue, dropped when task exits. Each rq
++ * in-flight on this queue also holds a reference, dropped when rq is freed.
++ *
++ * Queue lock must be held here.
++ */
++static void bfq_put_queue(struct bfq_queue *bfqq)
++{
++ struct bfq_data *bfqd = bfqq->bfqd;
++
++ BUG_ON(atomic_read(&bfqq->ref) <= 0);
++
++ bfq_log_bfqq(bfqd, bfqq, "put_queue: %p %d", bfqq,
++ atomic_read(&bfqq->ref));
++ if (!atomic_dec_and_test(&bfqq->ref))
++ return;
++
++ BUG_ON(rb_first(&bfqq->sort_list) != NULL);
++ BUG_ON(bfqq->allocated[READ] + bfqq->allocated[WRITE] != 0);
++ BUG_ON(bfqq->entity.tree != NULL);
++ BUG_ON(bfq_bfqq_busy(bfqq));
++ BUG_ON(bfqd->in_service_queue == bfqq);
++
++ bfq_log_bfqq(bfqd, bfqq, "put_queue: %p freed", bfqq);
++
++ kmem_cache_free(bfq_pool, bfqq);
++}
++
++static void bfq_put_cooperator(struct bfq_queue *bfqq)
++{
++ struct bfq_queue *__bfqq, *next;
++
++ /*
++ * If this queue was scheduled to merge with another queue, be
++ * sure to drop the reference taken on that queue (and others in
++ * the merge chain). See bfq_setup_merge and bfq_merge_bfqqs.
++ */
++ __bfqq = bfqq->new_bfqq;
++ while (__bfqq) {
++ if (__bfqq == bfqq)
++ break;
++ next = __bfqq->new_bfqq;
++ bfq_put_queue(__bfqq);
++ __bfqq = next;
++ }
++}
++
++static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq)
++{
++ if (bfqq == bfqd->in_service_queue) {
++ __bfq_bfqq_expire(bfqd, bfqq);
++ bfq_schedule_dispatch(bfqd);
++ }
++
++ bfq_log_bfqq(bfqd, bfqq, "exit_bfqq: %p, %d", bfqq,
++ atomic_read(&bfqq->ref));
++
++ bfq_put_cooperator(bfqq);
++
++ bfq_put_queue(bfqq);
++}
++
++static inline void bfq_init_icq(struct io_cq *icq)
++{
++ struct bfq_io_cq *bic = icq_to_bic(icq);
++
++ bic->ttime.last_end_request = jiffies;
++ /*
++ * A newly created bic indicates that the process has just
++ * started doing I/O, and is probably mapping into memory its
++ * executable and libraries: it definitely needs weight raising.
++ * There is however the possibility that the process performs,
++ * for a while, I/O close to some other process. EQM intercepts
++ * this behavior and may merge the queue corresponding to the
++ * process with some other queue, BEFORE the weight of the queue
++ * is raised. Merged queues are not weight-raised (they are assumed
++ * to belong to processes that benefit only from high throughput).
++ * If the merge is basically the consequence of an accident, then
++ * the queue will be split soon and will get back its old weight.
++ * It is then important to write down somewhere that this queue
++ * does need weight raising, even if it did not make it to get its
++ * weight raised before being merged. To this purpose, we overload
++ * the field raising_time_left and assign 1 to it, to mark the queue
++ * as needing weight raising.
++ */
++ bic->wr_time_left = 1;
++}
++
++static void bfq_exit_icq(struct io_cq *icq)
++{
++ struct bfq_io_cq *bic = icq_to_bic(icq);
++ struct bfq_data *bfqd = bic_to_bfqd(bic);
++
++ if (bic->bfqq[BLK_RW_ASYNC]) {
++ bfq_exit_bfqq(bfqd, bic->bfqq[BLK_RW_ASYNC]);
++ bic->bfqq[BLK_RW_ASYNC] = NULL;
++ }
++
++ if (bic->bfqq[BLK_RW_SYNC]) {
++ /*
++ * If the bic is using a shared queue, put the reference
++ * taken on the io_context when the bic started using a
++ * shared bfq_queue.
++ */
++ if (bfq_bfqq_coop(bic->bfqq[BLK_RW_SYNC]))
++ put_io_context(icq->ioc);
++ bfq_exit_bfqq(bfqd, bic->bfqq[BLK_RW_SYNC]);
++ bic->bfqq[BLK_RW_SYNC] = NULL;
++ }
++}
++
++/*
++ * Update the entity prio values; note that the new values will not
++ * be used until the next (re)activation.
++ */
++static void bfq_init_prio_data(struct bfq_queue *bfqq, struct bfq_io_cq *bic)
++{
++ struct task_struct *tsk = current;
++ int ioprio_class;
++
++ if (!bfq_bfqq_prio_changed(bfqq))
++ return;
++
++ ioprio_class = IOPRIO_PRIO_CLASS(bic->ioprio);
++ switch (ioprio_class) {
++ default:
++ dev_err(bfqq->bfqd->queue->backing_dev_info.dev,
++ "bfq: bad prio %x\n", ioprio_class);
++ case IOPRIO_CLASS_NONE:
++ /*
++ * No prio set, inherit CPU scheduling settings.
++ */
++ bfqq->entity.new_ioprio = task_nice_ioprio(tsk);
++ bfqq->entity.new_ioprio_class = task_nice_ioclass(tsk);
++ break;
++ case IOPRIO_CLASS_RT:
++ bfqq->entity.new_ioprio = IOPRIO_PRIO_DATA(bic->ioprio);
++ bfqq->entity.new_ioprio_class = IOPRIO_CLASS_RT;
++ break;
++ case IOPRIO_CLASS_BE:
++ bfqq->entity.new_ioprio = IOPRIO_PRIO_DATA(bic->ioprio);
++ bfqq->entity.new_ioprio_class = IOPRIO_CLASS_BE;
++ break;
++ case IOPRIO_CLASS_IDLE:
++ bfqq->entity.new_ioprio_class = IOPRIO_CLASS_IDLE;
++ bfqq->entity.new_ioprio = 7;
++ bfq_clear_bfqq_idle_window(bfqq);
++ break;
++ }
++
++ bfqq->entity.ioprio_changed = 1;
++
++ bfq_clear_bfqq_prio_changed(bfqq);
++}
++
++static void bfq_changed_ioprio(struct bfq_io_cq *bic)
++{
++ struct bfq_data *bfqd;
++ struct bfq_queue *bfqq, *new_bfqq;
++ struct bfq_group *bfqg;
++ unsigned long uninitialized_var(flags);
++ int ioprio = bic->icq.ioc->ioprio;
++
++ bfqd = bfq_get_bfqd_locked(&(bic->icq.q->elevator->elevator_data),
++ &flags);
++ /*
++ * This condition may trigger on a newly created bic, be sure to
++ * drop the lock before returning.
++ */
++ if (unlikely(bfqd == NULL) || likely(bic->ioprio == ioprio))
++ goto out;
++
++ bfqq = bic->bfqq[BLK_RW_ASYNC];
++ if (bfqq != NULL) {
++ bfqg = container_of(bfqq->entity.sched_data, struct bfq_group,
++ sched_data);
++ new_bfqq = bfq_get_queue(bfqd, bfqg, BLK_RW_ASYNC, bic,
++ GFP_ATOMIC);
++ if (new_bfqq != NULL) {
++ bic->bfqq[BLK_RW_ASYNC] = new_bfqq;
++ bfq_log_bfqq(bfqd, bfqq,
++ "changed_ioprio: bfqq %p %d",
++ bfqq, atomic_read(&bfqq->ref));
++ bfq_put_queue(bfqq);
++ }
++ }
++
++ bfqq = bic->bfqq[BLK_RW_SYNC];
++ if (bfqq != NULL)
++ bfq_mark_bfqq_prio_changed(bfqq);
++
++ bic->ioprio = ioprio;
++
++out:
++ bfq_put_bfqd_unlock(bfqd, &flags);
++}
++
++static void bfq_init_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
++ pid_t pid, int is_sync)
++{
++ RB_CLEAR_NODE(&bfqq->entity.rb_node);
++ INIT_LIST_HEAD(&bfqq->fifo);
++
++ atomic_set(&bfqq->ref, 0);
++ bfqq->bfqd = bfqd;
++
++ bfq_mark_bfqq_prio_changed(bfqq);
++
++ if (is_sync) {
++ if (!bfq_class_idle(bfqq))
++ bfq_mark_bfqq_idle_window(bfqq);
++ bfq_mark_bfqq_sync(bfqq);
++ }
++ bfq_mark_bfqq_IO_bound(bfqq);
++
++ /* Tentative initial value to trade off between thr and lat */
++ bfqq->max_budget = (2 * bfq_max_budget(bfqd)) / 3;
++ bfqq->pid = pid;
++
++ bfqq->wr_coeff = 1;
++ bfqq->last_wr_start_finish = 0;
++ /*
++ * Set to the value for which bfqq will not be deemed as
++ * soft rt when it becomes backlogged.
++ */
++ bfqq->soft_rt_next_start = bfq_infinity_from_now(jiffies);
++}
++
++static struct bfq_queue *bfq_find_alloc_queue(struct bfq_data *bfqd,
++ struct bfq_group *bfqg,
++ int is_sync,
++ struct bfq_io_cq *bic,
++ gfp_t gfp_mask)
++{
++ struct bfq_queue *bfqq, *new_bfqq = NULL;
++
++retry:
++ /* bic always exists here */
++ bfqq = bic_to_bfqq(bic, is_sync);
++
++ /*
++ * Always try a new alloc if we fall back to the OOM bfqq
++ * originally, since it should just be a temporary situation.
++ */
++ if (bfqq == NULL || bfqq == &bfqd->oom_bfqq) {
++ bfqq = NULL;
++ if (new_bfqq != NULL) {
++ bfqq = new_bfqq;
++ new_bfqq = NULL;
++ } else if (gfp_mask & __GFP_WAIT) {
++ spin_unlock_irq(bfqd->queue->queue_lock);
++ new_bfqq = kmem_cache_alloc_node(bfq_pool,
++ gfp_mask | __GFP_ZERO,
++ bfqd->queue->node);
++ spin_lock_irq(bfqd->queue->queue_lock);
++ if (new_bfqq != NULL)
++ goto retry;
++ } else {
++ bfqq = kmem_cache_alloc_node(bfq_pool,
++ gfp_mask | __GFP_ZERO,
++ bfqd->queue->node);
++ }
++
++ if (bfqq != NULL) {
++ bfq_init_bfqq(bfqd, bfqq, current->pid, is_sync);
++ bfq_log_bfqq(bfqd, bfqq, "allocated");
++ } else {
++ bfqq = &bfqd->oom_bfqq;
++ bfq_log_bfqq(bfqd, bfqq, "using oom bfqq");
++ }
++
++ bfq_init_prio_data(bfqq, bic);
++ bfq_init_entity(&bfqq->entity, bfqg);
++ }
++
++ if (new_bfqq != NULL)
++ kmem_cache_free(bfq_pool, new_bfqq);
++
++ return bfqq;
++}
++
++static struct bfq_queue **bfq_async_queue_prio(struct bfq_data *bfqd,
++ struct bfq_group *bfqg,
++ int ioprio_class, int ioprio)
++{
++ switch (ioprio_class) {
++ case IOPRIO_CLASS_RT:
++ return &bfqg->async_bfqq[0][ioprio];
++ case IOPRIO_CLASS_NONE:
++ ioprio = IOPRIO_NORM;
++ /* fall through */
++ case IOPRIO_CLASS_BE:
++ return &bfqg->async_bfqq[1][ioprio];
++ case IOPRIO_CLASS_IDLE:
++ return &bfqg->async_idle_bfqq;
++ default:
++ BUG();
++ }
++}
++
++static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd,
++ struct bfq_group *bfqg, int is_sync,
++ struct bfq_io_cq *bic, gfp_t gfp_mask)
++{
++ const int ioprio = IOPRIO_PRIO_DATA(bic->ioprio);
++ const int ioprio_class = IOPRIO_PRIO_CLASS(bic->ioprio);
++ struct bfq_queue **async_bfqq = NULL;
++ struct bfq_queue *bfqq = NULL;
++
++ if (!is_sync) {
++ async_bfqq = bfq_async_queue_prio(bfqd, bfqg, ioprio_class,
++ ioprio);
++ bfqq = *async_bfqq;
++ }
++
++ if (bfqq == NULL)
++ bfqq = bfq_find_alloc_queue(bfqd, bfqg, is_sync, bic, gfp_mask);
++
++ /*
++ * Pin the queue now that it's allocated, scheduler exit will
++ * prune it.
++ */
++ if (!is_sync && *async_bfqq == NULL) {
++ atomic_inc(&bfqq->ref);
++ bfq_log_bfqq(bfqd, bfqq, "get_queue, bfqq not in async: %p, %d",
++ bfqq, atomic_read(&bfqq->ref));
++ *async_bfqq = bfqq;
++ }
++
++ atomic_inc(&bfqq->ref);
++ bfq_log_bfqq(bfqd, bfqq, "get_queue, at end: %p, %d", bfqq,
++ atomic_read(&bfqq->ref));
++ return bfqq;
++}
++
++static void bfq_update_io_thinktime(struct bfq_data *bfqd,
++ struct bfq_io_cq *bic)
++{
++ unsigned long elapsed = jiffies - bic->ttime.last_end_request;
++ unsigned long ttime = min(elapsed, 2UL * bfqd->bfq_slice_idle);
++
++ bic->ttime.ttime_samples = (7*bic->ttime.ttime_samples + 256) / 8;
++ bic->ttime.ttime_total = (7*bic->ttime.ttime_total + 256*ttime) / 8;
++ bic->ttime.ttime_mean = (bic->ttime.ttime_total + 128) /
++ bic->ttime.ttime_samples;
++}
++
++static void bfq_update_io_seektime(struct bfq_data *bfqd,
++ struct bfq_queue *bfqq,
++ struct request *rq)
++{
++ sector_t sdist;
++ u64 total;
++
++ if (bfqq->last_request_pos < blk_rq_pos(rq))
++ sdist = blk_rq_pos(rq) - bfqq->last_request_pos;
++ else
++ sdist = bfqq->last_request_pos - blk_rq_pos(rq);
++
++ /*
++ * Don't allow the seek distance to get too large from the
++ * odd fragment, pagein, etc.
++ */
++ if (bfqq->seek_samples == 0) /* first request, not really a seek */
++ sdist = 0;
++ else if (bfqq->seek_samples <= 60) /* second & third seek */
++ sdist = min(sdist, (bfqq->seek_mean * 4) + 2*1024*1024);
++ else
++ sdist = min(sdist, (bfqq->seek_mean * 4) + 2*1024*64);
++
++ bfqq->seek_samples = (7*bfqq->seek_samples + 256) / 8;
++ bfqq->seek_total = (7*bfqq->seek_total + (u64)256*sdist) / 8;
++ total = bfqq->seek_total + (bfqq->seek_samples/2);
++ do_div(total, bfqq->seek_samples);
++ bfqq->seek_mean = (sector_t)total;
++
++ bfq_log_bfqq(bfqd, bfqq, "dist=%llu mean=%llu", (u64)sdist,
++ (u64)bfqq->seek_mean);
++}
++
++/*
++ * Disable idle window if the process thinks too long or seeks so much that
++ * it doesn't matter.
++ */
++static void bfq_update_idle_window(struct bfq_data *bfqd,
++ struct bfq_queue *bfqq,
++ struct bfq_io_cq *bic)
++{
++ int enable_idle;
++
++ /* Don't idle for async or idle io prio class. */
++ if (!bfq_bfqq_sync(bfqq) || bfq_class_idle(bfqq))
++ return;
++
++ /* Idle window just restored, statistics are meaningless. */
++ if (bfq_bfqq_just_split(bfqq))
++ return;
++
++ enable_idle = bfq_bfqq_idle_window(bfqq);
++
++ if (atomic_read(&bic->icq.ioc->active_ref) == 0 ||
++ bfqd->bfq_slice_idle == 0 ||
++ (bfqd->hw_tag && BFQQ_SEEKY(bfqq) &&
++ bfqq->wr_coeff == 1))
++ enable_idle = 0;
++ else if (bfq_sample_valid(bic->ttime.ttime_samples)) {
++ if (bic->ttime.ttime_mean > bfqd->bfq_slice_idle &&
++ bfqq->wr_coeff == 1)
++ enable_idle = 0;
++ else
++ enable_idle = 1;
++ }
++ bfq_log_bfqq(bfqd, bfqq, "update_idle_window: enable_idle %d",
++ enable_idle);
++
++ if (enable_idle)
++ bfq_mark_bfqq_idle_window(bfqq);
++ else
++ bfq_clear_bfqq_idle_window(bfqq);
++}
++
++/*
++ * Called when a new fs request (rq) is added to bfqq. Check if there's
++ * something we should do about it.
++ */
++static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq,
++ struct request *rq)
++{
++ struct bfq_io_cq *bic = RQ_BIC(rq);
++
++ if (rq->cmd_flags & REQ_META)
++ bfqq->meta_pending++;
++
++ bfq_update_io_thinktime(bfqd, bic);
++ bfq_update_io_seektime(bfqd, bfqq, rq);
++ if (!BFQQ_SEEKY(bfqq) && bfq_bfqq_constantly_seeky(bfqq)) {
++ bfq_clear_bfqq_constantly_seeky(bfqq);
++ if (!blk_queue_nonrot(bfqd->queue)) {
++ BUG_ON(!bfqd->const_seeky_busy_in_flight_queues);
++ bfqd->const_seeky_busy_in_flight_queues--;
++ }
++ }
++ if (bfqq->entity.service > bfq_max_budget(bfqd) / 8 ||
++ !BFQQ_SEEKY(bfqq))
++ bfq_update_idle_window(bfqd, bfqq, bic);
++ bfq_clear_bfqq_just_split(bfqq);
++
++ bfq_log_bfqq(bfqd, bfqq,
++ "rq_enqueued: idle_window=%d (seeky %d, mean %llu)",
++ bfq_bfqq_idle_window(bfqq), BFQQ_SEEKY(bfqq),
++ (long long unsigned)bfqq->seek_mean);
++
++ bfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
++
++ if (bfqq == bfqd->in_service_queue && bfq_bfqq_wait_request(bfqq)) {
++ int small_req = bfqq->queued[rq_is_sync(rq)] == 1 &&
++ blk_rq_sectors(rq) < 32;
++ int budget_timeout = bfq_bfqq_budget_timeout(bfqq);
++
++ /*
++ * There is just this request queued: if the request
++ * is small and the queue is not to be expired, then
++ * just exit.
++ *
++ * In this way, if the disk is being idled to wait for
++ * a new request from the in-service queue, we avoid
++ * unplugging the device and committing the disk to serve
++ * just a small request. On the contrary, we wait for
++ * the block layer to decide when to unplug the device:
++ * hopefully, new requests will be merged to this one
++ * quickly, then the device will be unplugged and
++ * larger requests will be dispatched.
++ */
++ if (small_req && !budget_timeout)
++ return;
++
++ /*
++ * A large enough request arrived, or the queue is to
++ * be expired: in both cases disk idling is to be
++ * stopped, so clear wait_request flag and reset
++ * timer.
++ */
++ bfq_clear_bfqq_wait_request(bfqq);
++ del_timer(&bfqd->idle_slice_timer);
++
++ /*
++ * The queue is not empty, because a new request just
++ * arrived. Hence we can safely expire the queue, in
++ * case of budget timeout, without risking that the
++ * timestamps of the queue are not updated correctly.
++ * See [1] for more details.
++ */
++ if (budget_timeout)
++ bfq_bfqq_expire(bfqd, bfqq, 0, BFQ_BFQQ_BUDGET_TIMEOUT);
++
++ /*
++ * Let the request rip immediately, or let a new queue be
++ * selected if bfqq has just been expired.
++ */
++ __blk_run_queue(bfqd->queue);
++ }
++}
++
++static void bfq_insert_request(struct request_queue *q, struct request *rq)
++{
++ struct bfq_data *bfqd = q->elevator->elevator_data;
++ struct bfq_queue *bfqq = RQ_BFQQ(rq), *new_bfqq;
++
++ assert_spin_locked(bfqd->queue->queue_lock);
++
++ /*
++ * An unplug may trigger a requeue of a request from the device
++ * driver: make sure we are in process context while trying to
++ * merge two bfq_queues.
++ */
++ if (!in_interrupt()) {
++ new_bfqq = bfq_setup_cooperator(bfqd, bfqq, rq, true);
++ if (new_bfqq != NULL) {
++ if (bic_to_bfqq(RQ_BIC(rq), 1) != bfqq)
++ new_bfqq = bic_to_bfqq(RQ_BIC(rq), 1);
++ /*
++ * Release the request's reference to the old bfqq
++ * and make sure one is taken to the shared queue.
++ */
++ new_bfqq->allocated[rq_data_dir(rq)]++;
++ bfqq->allocated[rq_data_dir(rq)]--;
++ atomic_inc(&new_bfqq->ref);
++ bfq_put_queue(bfqq);
++ if (bic_to_bfqq(RQ_BIC(rq), 1) == bfqq)
++ bfq_merge_bfqqs(bfqd, RQ_BIC(rq),
++ bfqq, new_bfqq);
++ rq->elv.priv[1] = new_bfqq;
++ bfqq = new_bfqq;
++ } else
++ bfq_bfqq_increase_failed_cooperations(bfqq);
++ }
++
++ bfq_init_prio_data(bfqq, RQ_BIC(rq));
++
++ bfq_add_request(rq);
++
++ /*
++ * Here a newly-created bfq_queue has already started a weight-raising
++ * period: clear raising_time_left to prevent bfq_bfqq_save_state()
++ * from assigning it a full weight-raising period. See the detailed
++ * comments about this field in bfq_init_icq().
++ */
++ if (bfqq->bic != NULL)
++ bfqq->bic->wr_time_left = 0;
++ rq_set_fifo_time(rq, jiffies + bfqd->bfq_fifo_expire[rq_is_sync(rq)]);
++ list_add_tail(&rq->queuelist, &bfqq->fifo);
++
++ bfq_rq_enqueued(bfqd, bfqq, rq);
++}
++
++static void bfq_update_hw_tag(struct bfq_data *bfqd)
++{
++ bfqd->max_rq_in_driver = max(bfqd->max_rq_in_driver,
++ bfqd->rq_in_driver);
++
++ if (bfqd->hw_tag == 1)
++ return;
++
++ /*
++ * This sample is valid if the number of outstanding requests
++ * is large enough to allow a queueing behavior. Note that the
++ * sum is not exact, as it's not taking into account deactivated
++ * requests.
++ */
++ if (bfqd->rq_in_driver + bfqd->queued < BFQ_HW_QUEUE_THRESHOLD)
++ return;
++
++ if (bfqd->hw_tag_samples++ < BFQ_HW_QUEUE_SAMPLES)
++ return;
++
++ bfqd->hw_tag = bfqd->max_rq_in_driver > BFQ_HW_QUEUE_THRESHOLD;
++ bfqd->max_rq_in_driver = 0;
++ bfqd->hw_tag_samples = 0;
++}
++
++static void bfq_completed_request(struct request_queue *q, struct request *rq)
++{
++ struct bfq_queue *bfqq = RQ_BFQQ(rq);
++ struct bfq_data *bfqd = bfqq->bfqd;
++ bool sync = bfq_bfqq_sync(bfqq);
++
++ bfq_log_bfqq(bfqd, bfqq, "completed one req with %u sects left (%d)",
++ blk_rq_sectors(rq), sync);
++
++ bfq_update_hw_tag(bfqd);
++
++ BUG_ON(!bfqd->rq_in_driver);
++ BUG_ON(!bfqq->dispatched);
++ bfqd->rq_in_driver--;
++ bfqq->dispatched--;
++
++ if (!bfqq->dispatched && !bfq_bfqq_busy(bfqq)) {
++ bfq_weights_tree_remove(bfqd, &bfqq->entity,
++ &bfqd->queue_weights_tree);
++ if (!blk_queue_nonrot(bfqd->queue)) {
++ BUG_ON(!bfqd->busy_in_flight_queues);
++ bfqd->busy_in_flight_queues--;
++ if (bfq_bfqq_constantly_seeky(bfqq)) {
++ BUG_ON(!bfqd->
++ const_seeky_busy_in_flight_queues);
++ bfqd->const_seeky_busy_in_flight_queues--;
++ }
++ }
++ }
++
++ if (sync) {
++ bfqd->sync_flight--;
++ RQ_BIC(rq)->ttime.last_end_request = jiffies;
++ }
++
++ /*
++ * If we are waiting to discover whether the request pattern of the
++ * task associated with the queue is actually isochronous, and
++ * both requisites for this condition to hold are satisfied, then
++ * compute soft_rt_next_start (see the comments to the function
++ * bfq_bfqq_softrt_next_start()).
++ */
++ if (bfq_bfqq_softrt_update(bfqq) && bfqq->dispatched == 0 &&
++ RB_EMPTY_ROOT(&bfqq->sort_list))
++ bfqq->soft_rt_next_start =
++ bfq_bfqq_softrt_next_start(bfqd, bfqq);
++
++ /*
++ * If this is the in-service queue, check if it needs to be expired,
++ * or if we want to idle in case it has no pending requests.
++ */
++ if (bfqd->in_service_queue == bfqq) {
++ if (bfq_bfqq_budget_new(bfqq))
++ bfq_set_budget_timeout(bfqd);
++
++ if (bfq_bfqq_must_idle(bfqq)) {
++ bfq_arm_slice_timer(bfqd);
++ goto out;
++ } else if (bfq_may_expire_for_budg_timeout(bfqq))
++ bfq_bfqq_expire(bfqd, bfqq, 0, BFQ_BFQQ_BUDGET_TIMEOUT);
++ else if (RB_EMPTY_ROOT(&bfqq->sort_list) &&
++ (bfqq->dispatched == 0 ||
++ !bfq_bfqq_must_not_expire(bfqq)))
++ bfq_bfqq_expire(bfqd, bfqq, 0,
++ BFQ_BFQQ_NO_MORE_REQUESTS);
++ }
++
++ if (!bfqd->rq_in_driver)
++ bfq_schedule_dispatch(bfqd);
++
++out:
++ return;
++}
++
++static inline int __bfq_may_queue(struct bfq_queue *bfqq)
++{
++ if (bfq_bfqq_wait_request(bfqq) && bfq_bfqq_must_alloc(bfqq)) {
++ bfq_clear_bfqq_must_alloc(bfqq);
++ return ELV_MQUEUE_MUST;
++ }
++
++ return ELV_MQUEUE_MAY;
++}
++
++static int bfq_may_queue(struct request_queue *q, int rw)
++{
++ struct bfq_data *bfqd = q->elevator->elevator_data;
++ struct task_struct *tsk = current;
++ struct bfq_io_cq *bic;
++ struct bfq_queue *bfqq;
++
++ /*
++ * Don't force setup of a queue from here, as a call to may_queue
++ * does not necessarily imply that a request actually will be
++ * queued. So just lookup a possibly existing queue, or return
++ * 'may queue' if that fails.
++ */
++ bic = bfq_bic_lookup(bfqd, tsk->io_context);
++ if (bic == NULL)
++ return ELV_MQUEUE_MAY;
++
++ bfqq = bic_to_bfqq(bic, rw_is_sync(rw));
++ if (bfqq != NULL) {
++ bfq_init_prio_data(bfqq, bic);
++
++ return __bfq_may_queue(bfqq);
++ }
++
++ return ELV_MQUEUE_MAY;
++}
++
++/*
++ * Queue lock held here.
++ */
++static void bfq_put_request(struct request *rq)
++{
++ struct bfq_queue *bfqq = RQ_BFQQ(rq);
++
++ if (bfqq != NULL) {
++ const int rw = rq_data_dir(rq);
++
++ BUG_ON(!bfqq->allocated[rw]);
++ bfqq->allocated[rw]--;
++
++ rq->elv.priv[0] = NULL;
++ rq->elv.priv[1] = NULL;
++
++ bfq_log_bfqq(bfqq->bfqd, bfqq, "put_request %p, %d",
++ bfqq, atomic_read(&bfqq->ref));
++ bfq_put_queue(bfqq);
++ }
++}
++
++/*
++ * Returns NULL if a new bfqq should be allocated, or the old bfqq if this
++ * was the last process referring to said bfqq.
++ */
++static struct bfq_queue *
++bfq_split_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq)
++{
++ bfq_log_bfqq(bfqq->bfqd, bfqq, "splitting queue");
++
++ put_io_context(bic->icq.ioc);
++
++ if (bfqq_process_refs(bfqq) == 1) {
++ bfqq->pid = current->pid;
++ bfq_clear_bfqq_coop(bfqq);
++ bfq_clear_bfqq_split_coop(bfqq);
++ return bfqq;
++ }
++
++ bic_set_bfqq(bic, NULL, 1);
++
++ bfq_put_cooperator(bfqq);
++
++ bfq_put_queue(bfqq);
++ return NULL;
++}
++
++/*
++ * Allocate bfq data structures associated with this request.
++ */
++static int bfq_set_request(struct request_queue *q, struct request *rq,
++ struct bio *bio, gfp_t gfp_mask)
++{
++ struct bfq_data *bfqd = q->elevator->elevator_data;
++ struct bfq_io_cq *bic = icq_to_bic(rq->elv.icq);
++ const int rw = rq_data_dir(rq);
++ const int is_sync = rq_is_sync(rq);
++ struct bfq_queue *bfqq;
++ struct bfq_group *bfqg;
++ unsigned long flags;
++ bool split = false;
++
++ might_sleep_if(gfp_mask & __GFP_WAIT);
++
++ bfq_changed_ioprio(bic);
++
++ spin_lock_irqsave(q->queue_lock, flags);
++
++ if (bic == NULL)
++ goto queue_fail;
++
++ bfqg = bfq_bic_update_cgroup(bic);
++
++new_queue:
++ bfqq = bic_to_bfqq(bic, is_sync);
++ if (bfqq == NULL || bfqq == &bfqd->oom_bfqq) {
++ bfqq = bfq_get_queue(bfqd, bfqg, is_sync, bic, gfp_mask);
++ bic_set_bfqq(bic, bfqq, is_sync);
++ } else {
++ /* If the queue was seeky for too long, break it apart. */
++ if (bfq_bfqq_coop(bfqq) && bfq_bfqq_split_coop(bfqq)) {
++ bfq_log_bfqq(bfqd, bfqq, "breaking apart bfqq");
++ bfqq = bfq_split_bfqq(bic, bfqq);
++ split = true;
++ if (!bfqq)
++ goto new_queue;
++ }
++ }
++
++ bfqq->allocated[rw]++;
++ atomic_inc(&bfqq->ref);
++ bfq_log_bfqq(bfqd, bfqq, "set_request: bfqq %p, %d", bfqq,
++ atomic_read(&bfqq->ref));
++
++ rq->elv.priv[0] = bic;
++ rq->elv.priv[1] = bfqq;
++
++ /*
++ * If a bfq_queue has only one process reference, it is owned
++ * by only one bfq_io_cq: we can set the bic field of the
++ * bfq_queue to the address of that structure. Also, if the
++ * queue has just been split, mark a flag so that the
++ * information is available to the other scheduler hooks.
++ */
++ if (bfqq_process_refs(bfqq) == 1) {
++ bfqq->bic = bic;
++ if (split) {
++ bfq_mark_bfqq_just_split(bfqq);
++ /*
++ * If the queue has just been split from a shared
++ * queue, restore the idle window and the possible
++ * weight raising period.
++ */
++ bfq_bfqq_resume_state(bfqq, bic);
++ }
++ }
++
++ spin_unlock_irqrestore(q->queue_lock, flags);
++
++ return 0;
++
++queue_fail:
++ bfq_schedule_dispatch(bfqd);
++ spin_unlock_irqrestore(q->queue_lock, flags);
++
++ return 1;
++}
++
++static void bfq_kick_queue(struct work_struct *work)
++{
++ struct bfq_data *bfqd =
++ container_of(work, struct bfq_data, unplug_work);
++ struct request_queue *q = bfqd->queue;
++
++ spin_lock_irq(q->queue_lock);
++ __blk_run_queue(q);
++ spin_unlock_irq(q->queue_lock);
++}
++
++/*
++ * Handler of the expiration of the timer running if the in-service queue
++ * is idling inside its time slice.
++ */
++static void bfq_idle_slice_timer(unsigned long data)
++{
++ struct bfq_data *bfqd = (struct bfq_data *)data;
++ struct bfq_queue *bfqq;
++ unsigned long flags;
++ enum bfqq_expiration reason;
++
++ spin_lock_irqsave(bfqd->queue->queue_lock, flags);
++
++ bfqq = bfqd->in_service_queue;
++ /*
++ * Theoretical race here: the in-service queue can be NULL or
++ * different from the queue that was idling if the timer handler
++ * spins on the queue_lock and a new request arrives for the
++ * current queue and there is a full dispatch cycle that changes
++ * the in-service queue. This can hardly happen, but in the worst
++ * case we just expire a queue too early.
++ */
++ if (bfqq != NULL) {
++ bfq_log_bfqq(bfqd, bfqq, "slice_timer expired");
++ if (bfq_bfqq_budget_timeout(bfqq))
++ /*
++ * Also here the queue can be safely expired
++ * for budget timeout without wasting
++ * guarantees
++ */
++ reason = BFQ_BFQQ_BUDGET_TIMEOUT;
++ else if (bfqq->queued[0] == 0 && bfqq->queued[1] == 0)
++ /*
++ * The queue may not be empty upon timer expiration,
++ * because we may not disable the timer when the
++ * first request of the in-service queue arrives
++ * during disk idling.
++ */
++ reason = BFQ_BFQQ_TOO_IDLE;
++ else
++ goto schedule_dispatch;
++
++ bfq_bfqq_expire(bfqd, bfqq, 1, reason);
++ }
++
++schedule_dispatch:
++ bfq_schedule_dispatch(bfqd);
++
++ spin_unlock_irqrestore(bfqd->queue->queue_lock, flags);
++}
++
++static void bfq_shutdown_timer_wq(struct bfq_data *bfqd)
++{
++ del_timer_sync(&bfqd->idle_slice_timer);
++ cancel_work_sync(&bfqd->unplug_work);
++}
++
++static inline void __bfq_put_async_bfqq(struct bfq_data *bfqd,
++ struct bfq_queue **bfqq_ptr)
++{
++ struct bfq_group *root_group = bfqd->root_group;
++ struct bfq_queue *bfqq = *bfqq_ptr;
++
++ bfq_log(bfqd, "put_async_bfqq: %p", bfqq);
++ if (bfqq != NULL) {
++ bfq_bfqq_move(bfqd, bfqq, &bfqq->entity, root_group);
++ bfq_log_bfqq(bfqd, bfqq, "put_async_bfqq: putting %p, %d",
++ bfqq, atomic_read(&bfqq->ref));
++ bfq_put_queue(bfqq);
++ *bfqq_ptr = NULL;
++ }
++}
++
++/*
++ * Release all the bfqg references to its async queues. If we are
++ * deallocating the group these queues may still contain requests, so
++ * we reparent them to the root cgroup (i.e., the only one that will
++ * exist for sure until all the requests on a device are gone).
++ */
++static void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg)
++{
++ int i, j;
++
++ for (i = 0; i < 2; i++)
++ for (j = 0; j < IOPRIO_BE_NR; j++)
++ __bfq_put_async_bfqq(bfqd, &bfqg->async_bfqq[i][j]);
++
++ __bfq_put_async_bfqq(bfqd, &bfqg->async_idle_bfqq);
++}
++
++static void bfq_exit_queue(struct elevator_queue *e)
++{
++ struct bfq_data *bfqd = e->elevator_data;
++ struct request_queue *q = bfqd->queue;
++ struct bfq_queue *bfqq, *n;
++
++ bfq_shutdown_timer_wq(bfqd);
++
++ spin_lock_irq(q->queue_lock);
++
++ BUG_ON(bfqd->in_service_queue != NULL);
++ list_for_each_entry_safe(bfqq, n, &bfqd->idle_list, bfqq_list)
++ bfq_deactivate_bfqq(bfqd, bfqq, 0);
++
++ bfq_disconnect_groups(bfqd);
++ spin_unlock_irq(q->queue_lock);
++
++ bfq_shutdown_timer_wq(bfqd);
++
++ synchronize_rcu();
++
++ BUG_ON(timer_pending(&bfqd->idle_slice_timer));
++
++ bfq_free_root_group(bfqd);
++ kfree(bfqd);
++}
++
++static int bfq_init_queue(struct request_queue *q, struct elevator_type *e)
++{
++ struct bfq_group *bfqg;
++ struct bfq_data *bfqd;
++ struct elevator_queue *eq;
++
++ eq = elevator_alloc(q, e);
++ if (eq == NULL)
++ return -ENOMEM;
++
++ bfqd = kzalloc_node(sizeof(*bfqd), GFP_KERNEL, q->node);
++ if (bfqd == NULL) {
++ kobject_put(&eq->kobj);
++ return -ENOMEM;
++ }
++ eq->elevator_data = bfqd;
++
++ /*
++ * Our fallback bfqq if bfq_find_alloc_queue() runs into OOM issues.
++ * Grab a permanent reference to it, so that the normal code flow
++ * will not attempt to free it.
++ */
++ bfq_init_bfqq(bfqd, &bfqd->oom_bfqq, 1, 0);
++ atomic_inc(&bfqd->oom_bfqq.ref);
++
++ bfqd->queue = q;
++
++ spin_lock_irq(q->queue_lock);
++ q->elevator = eq;
++ spin_unlock_irq(q->queue_lock);
++
++ bfqg = bfq_alloc_root_group(bfqd, q->node);
++ if (bfqg == NULL) {
++ kfree(bfqd);
++ kobject_put(&eq->kobj);
++ return -ENOMEM;
++ }
++
++ bfqd->root_group = bfqg;
++#ifdef CONFIG_CGROUP_BFQIO
++ bfqd->active_numerous_groups = 0;
++#endif
++
++ init_timer(&bfqd->idle_slice_timer);
++ bfqd->idle_slice_timer.function = bfq_idle_slice_timer;
++ bfqd->idle_slice_timer.data = (unsigned long)bfqd;
++
++ bfqd->rq_pos_tree = RB_ROOT;
++ bfqd->queue_weights_tree = RB_ROOT;
++ bfqd->group_weights_tree = RB_ROOT;
++
++ INIT_WORK(&bfqd->unplug_work, bfq_kick_queue);
++
++ INIT_LIST_HEAD(&bfqd->active_list);
++ INIT_LIST_HEAD(&bfqd->idle_list);
++
++ bfqd->hw_tag = -1;
++
++ bfqd->bfq_max_budget = bfq_default_max_budget;
++
++ bfqd->bfq_quantum = bfq_quantum;
++ bfqd->bfq_fifo_expire[0] = bfq_fifo_expire[0];
++ bfqd->bfq_fifo_expire[1] = bfq_fifo_expire[1];
++ bfqd->bfq_back_max = bfq_back_max;
++ bfqd->bfq_back_penalty = bfq_back_penalty;
++ bfqd->bfq_slice_idle = bfq_slice_idle;
++ bfqd->bfq_class_idle_last_service = 0;
++ bfqd->bfq_max_budget_async_rq = bfq_max_budget_async_rq;
++ bfqd->bfq_timeout[BLK_RW_ASYNC] = bfq_timeout_async;
++ bfqd->bfq_timeout[BLK_RW_SYNC] = bfq_timeout_sync;
++
++ bfqd->bfq_coop_thresh = 2;
++ bfqd->bfq_failed_cooperations = 7000;
++ bfqd->bfq_requests_within_timer = 120;
++
++ bfqd->low_latency = true;
++
++ bfqd->bfq_wr_coeff = 20;
++ bfqd->bfq_wr_rt_max_time = msecs_to_jiffies(300);
++ bfqd->bfq_wr_max_time = 0;
++ bfqd->bfq_wr_min_idle_time = msecs_to_jiffies(2000);
++ bfqd->bfq_wr_min_inter_arr_async = msecs_to_jiffies(500);
++ bfqd->bfq_wr_max_softrt_rate = 7000; /*
++ * Approximate rate required
++ * to playback or record a
++ * high-definition compressed
++ * video.
++ */
++ bfqd->wr_busy_queues = 0;
++ bfqd->busy_in_flight_queues = 0;
++ bfqd->const_seeky_busy_in_flight_queues = 0;
++
++ /*
++ * Begin by assuming, optimistically, that the device peak rate is
++ * equal to the highest reference rate.
++ */
++ bfqd->RT_prod = R_fast[blk_queue_nonrot(bfqd->queue)] *
++ T_fast[blk_queue_nonrot(bfqd->queue)];
++ bfqd->peak_rate = R_fast[blk_queue_nonrot(bfqd->queue)];
++ bfqd->device_speed = BFQ_BFQD_FAST;
++
++ return 0;
++}
++
++static void bfq_slab_kill(void)
++{
++ if (bfq_pool != NULL)
++ kmem_cache_destroy(bfq_pool);
++}
++
++static int __init bfq_slab_setup(void)
++{
++ bfq_pool = KMEM_CACHE(bfq_queue, 0);
++ if (bfq_pool == NULL)
++ return -ENOMEM;
++ return 0;
++}
++
++static ssize_t bfq_var_show(unsigned int var, char *page)
++{
++ return sprintf(page, "%d\n", var);
++}
++
++static ssize_t bfq_var_store(unsigned long *var, const char *page,
++ size_t count)
++{
++ unsigned long new_val;
++ int ret = kstrtoul(page, 10, &new_val);
++
++ if (ret == 0)
++ *var = new_val;
++
++ return count;
++}
++
++static ssize_t bfq_wr_max_time_show(struct elevator_queue *e, char *page)
++{
++ struct bfq_data *bfqd = e->elevator_data;
++ return sprintf(page, "%d\n", bfqd->bfq_wr_max_time > 0 ?
++ jiffies_to_msecs(bfqd->bfq_wr_max_time) :
++ jiffies_to_msecs(bfq_wr_duration(bfqd)));
++}
++
++static ssize_t bfq_weights_show(struct elevator_queue *e, char *page)
++{
++ struct bfq_queue *bfqq;
++ struct bfq_data *bfqd = e->elevator_data;
++ ssize_t num_char = 0;
++
++ num_char += sprintf(page + num_char, "Tot reqs queued %d\n\n",
++ bfqd->queued);
++
++ spin_lock_irq(bfqd->queue->queue_lock);
++
++ num_char += sprintf(page + num_char, "Active:\n");
++ list_for_each_entry(bfqq, &bfqd->active_list, bfqq_list) {
++ num_char += sprintf(page + num_char,
++ "pid%d: weight %hu, nr_queued %d %d, dur %d/%u\n",
++ bfqq->pid,
++ bfqq->entity.weight,
++ bfqq->queued[0],
++ bfqq->queued[1],
++ jiffies_to_msecs(jiffies - bfqq->last_wr_start_finish),
++ jiffies_to_msecs(bfqq->wr_cur_max_time));
++ }
++
++ num_char += sprintf(page + num_char, "Idle:\n");
++ list_for_each_entry(bfqq, &bfqd->idle_list, bfqq_list) {
++ num_char += sprintf(page + num_char,
++ "pid%d: weight %hu, dur %d/%u\n",
++ bfqq->pid,
++ bfqq->entity.weight,
++ jiffies_to_msecs(jiffies -
++ bfqq->last_wr_start_finish),
++ jiffies_to_msecs(bfqq->wr_cur_max_time));
++ }
++
++ spin_unlock_irq(bfqd->queue->queue_lock);
++
++ return num_char;
++}
++
++#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
++static ssize_t __FUNC(struct elevator_queue *e, char *page) \
++{ \
++ struct bfq_data *bfqd = e->elevator_data; \
++ unsigned int __data = __VAR; \
++ if (__CONV) \
++ __data = jiffies_to_msecs(__data); \
++ return bfq_var_show(__data, (page)); \
++}
++SHOW_FUNCTION(bfq_quantum_show, bfqd->bfq_quantum, 0);
++SHOW_FUNCTION(bfq_fifo_expire_sync_show, bfqd->bfq_fifo_expire[1], 1);
++SHOW_FUNCTION(bfq_fifo_expire_async_show, bfqd->bfq_fifo_expire[0], 1);
++SHOW_FUNCTION(bfq_back_seek_max_show, bfqd->bfq_back_max, 0);
++SHOW_FUNCTION(bfq_back_seek_penalty_show, bfqd->bfq_back_penalty, 0);
++SHOW_FUNCTION(bfq_slice_idle_show, bfqd->bfq_slice_idle, 1);
++SHOW_FUNCTION(bfq_max_budget_show, bfqd->bfq_user_max_budget, 0);
++SHOW_FUNCTION(bfq_max_budget_async_rq_show,
++ bfqd->bfq_max_budget_async_rq, 0);
++SHOW_FUNCTION(bfq_timeout_sync_show, bfqd->bfq_timeout[BLK_RW_SYNC], 1);
++SHOW_FUNCTION(bfq_timeout_async_show, bfqd->bfq_timeout[BLK_RW_ASYNC], 1);
++SHOW_FUNCTION(bfq_low_latency_show, bfqd->low_latency, 0);
++SHOW_FUNCTION(bfq_wr_coeff_show, bfqd->bfq_wr_coeff, 0);
++SHOW_FUNCTION(bfq_wr_rt_max_time_show, bfqd->bfq_wr_rt_max_time, 1);
++SHOW_FUNCTION(bfq_wr_min_idle_time_show, bfqd->bfq_wr_min_idle_time, 1);
++SHOW_FUNCTION(bfq_wr_min_inter_arr_async_show, bfqd->bfq_wr_min_inter_arr_async,
++ 1);
++SHOW_FUNCTION(bfq_wr_max_softrt_rate_show, bfqd->bfq_wr_max_softrt_rate, 0);
++#undef SHOW_FUNCTION
++
++#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
++static ssize_t \
++__FUNC(struct elevator_queue *e, const char *page, size_t count) \
++{ \
++ struct bfq_data *bfqd = e->elevator_data; \
++ unsigned long uninitialized_var(__data); \
++ int ret = bfq_var_store(&__data, (page), count); \
++ if (__data < (MIN)) \
++ __data = (MIN); \
++ else if (__data > (MAX)) \
++ __data = (MAX); \
++ if (__CONV) \
++ *(__PTR) = msecs_to_jiffies(__data); \
++ else \
++ *(__PTR) = __data; \
++ return ret; \
++}
++STORE_FUNCTION(bfq_quantum_store, &bfqd->bfq_quantum, 1, INT_MAX, 0);
++STORE_FUNCTION(bfq_fifo_expire_sync_store, &bfqd->bfq_fifo_expire[1], 1,
++ INT_MAX, 1);
++STORE_FUNCTION(bfq_fifo_expire_async_store, &bfqd->bfq_fifo_expire[0], 1,
++ INT_MAX, 1);
++STORE_FUNCTION(bfq_back_seek_max_store, &bfqd->bfq_back_max, 0, INT_MAX, 0);
++STORE_FUNCTION(bfq_back_seek_penalty_store, &bfqd->bfq_back_penalty, 1,
++ INT_MAX, 0);
++STORE_FUNCTION(bfq_slice_idle_store, &bfqd->bfq_slice_idle, 0, INT_MAX, 1);
++STORE_FUNCTION(bfq_max_budget_async_rq_store, &bfqd->bfq_max_budget_async_rq,
++ 1, INT_MAX, 0);
++STORE_FUNCTION(bfq_timeout_async_store, &bfqd->bfq_timeout[BLK_RW_ASYNC], 0,
++ INT_MAX, 1);
++STORE_FUNCTION(bfq_wr_coeff_store, &bfqd->bfq_wr_coeff, 1, INT_MAX, 0);
++STORE_FUNCTION(bfq_wr_max_time_store, &bfqd->bfq_wr_max_time, 0, INT_MAX, 1);
++STORE_FUNCTION(bfq_wr_rt_max_time_store, &bfqd->bfq_wr_rt_max_time, 0, INT_MAX,
++ 1);
++STORE_FUNCTION(bfq_wr_min_idle_time_store, &bfqd->bfq_wr_min_idle_time, 0,
++ INT_MAX, 1);
++STORE_FUNCTION(bfq_wr_min_inter_arr_async_store,
++ &bfqd->bfq_wr_min_inter_arr_async, 0, INT_MAX, 1);
++STORE_FUNCTION(bfq_wr_max_softrt_rate_store, &bfqd->bfq_wr_max_softrt_rate, 0,
++ INT_MAX, 0);
++#undef STORE_FUNCTION
++
++/* do nothing for the moment */
++static ssize_t bfq_weights_store(struct elevator_queue *e,
++ const char *page, size_t count)
++{
++ return count;
++}
++
++static inline unsigned long bfq_estimated_max_budget(struct bfq_data *bfqd)
++{
++ u64 timeout = jiffies_to_msecs(bfqd->bfq_timeout[BLK_RW_SYNC]);
++
++ if (bfqd->peak_rate_samples >= BFQ_PEAK_RATE_SAMPLES)
++ return bfq_calc_max_budget(bfqd->peak_rate, timeout);
++ else
++ return bfq_default_max_budget;
++}
++
++static ssize_t bfq_max_budget_store(struct elevator_queue *e,
++ const char *page, size_t count)
++{
++ struct bfq_data *bfqd = e->elevator_data;
++ unsigned long uninitialized_var(__data);
++ int ret = bfq_var_store(&__data, (page), count);
++
++ if (__data == 0)
++ bfqd->bfq_max_budget = bfq_estimated_max_budget(bfqd);
++ else {
++ if (__data > INT_MAX)
++ __data = INT_MAX;
++ bfqd->bfq_max_budget = __data;
++ }
++
++ bfqd->bfq_user_max_budget = __data;
++
++ return ret;
++}
++
++static ssize_t bfq_timeout_sync_store(struct elevator_queue *e,
++ const char *page, size_t count)
++{
++ struct bfq_data *bfqd = e->elevator_data;
++ unsigned long uninitialized_var(__data);
++ int ret = bfq_var_store(&__data, (page), count);
++
++ if (__data < 1)
++ __data = 1;
++ else if (__data > INT_MAX)
++ __data = INT_MAX;
++
++ bfqd->bfq_timeout[BLK_RW_SYNC] = msecs_to_jiffies(__data);
++ if (bfqd->bfq_user_max_budget == 0)
++ bfqd->bfq_max_budget = bfq_estimated_max_budget(bfqd);
++
++ return ret;
++}
++
++static ssize_t bfq_low_latency_store(struct elevator_queue *e,
++ const char *page, size_t count)
++{
++ struct bfq_data *bfqd = e->elevator_data;
++ unsigned long uninitialized_var(__data);
++ int ret = bfq_var_store(&__data, (page), count);
++
++ if (__data > 1)
++ __data = 1;
++ if (__data == 0 && bfqd->low_latency != 0)
++ bfq_end_wr(bfqd);
++ bfqd->low_latency = __data;
++
++ return ret;
++}
++
++#define BFQ_ATTR(name) \
++ __ATTR(name, S_IRUGO|S_IWUSR, bfq_##name##_show, bfq_##name##_store)
++
++static struct elv_fs_entry bfq_attrs[] = {
++ BFQ_ATTR(quantum),
++ BFQ_ATTR(fifo_expire_sync),
++ BFQ_ATTR(fifo_expire_async),
++ BFQ_ATTR(back_seek_max),
++ BFQ_ATTR(back_seek_penalty),
++ BFQ_ATTR(slice_idle),
++ BFQ_ATTR(max_budget),
++ BFQ_ATTR(max_budget_async_rq),
++ BFQ_ATTR(timeout_sync),
++ BFQ_ATTR(timeout_async),
++ BFQ_ATTR(low_latency),
++ BFQ_ATTR(wr_coeff),
++ BFQ_ATTR(wr_max_time),
++ BFQ_ATTR(wr_rt_max_time),
++ BFQ_ATTR(wr_min_idle_time),
++ BFQ_ATTR(wr_min_inter_arr_async),
++ BFQ_ATTR(wr_max_softrt_rate),
++ BFQ_ATTR(weights),
++ __ATTR_NULL
++};
++
++static struct elevator_type iosched_bfq = {
++ .ops = {
++ .elevator_merge_fn = bfq_merge,
++ .elevator_merged_fn = bfq_merged_request,
++ .elevator_merge_req_fn = bfq_merged_requests,
++ .elevator_allow_merge_fn = bfq_allow_merge,
++ .elevator_dispatch_fn = bfq_dispatch_requests,
++ .elevator_add_req_fn = bfq_insert_request,
++ .elevator_activate_req_fn = bfq_activate_request,
++ .elevator_deactivate_req_fn = bfq_deactivate_request,
++ .elevator_completed_req_fn = bfq_completed_request,
++ .elevator_former_req_fn = elv_rb_former_request,
++ .elevator_latter_req_fn = elv_rb_latter_request,
++ .elevator_init_icq_fn = bfq_init_icq,
++ .elevator_exit_icq_fn = bfq_exit_icq,
++ .elevator_set_req_fn = bfq_set_request,
++ .elevator_put_req_fn = bfq_put_request,
++ .elevator_may_queue_fn = bfq_may_queue,
++ .elevator_init_fn = bfq_init_queue,
++ .elevator_exit_fn = bfq_exit_queue,
++ },
++ .icq_size = sizeof(struct bfq_io_cq),
++ .icq_align = __alignof__(struct bfq_io_cq),
++ .elevator_attrs = bfq_attrs,
++ .elevator_name = "bfq",
++ .elevator_owner = THIS_MODULE,
++};
++
++static int __init bfq_init(void)
++{
++ /*
++ * Can be 0 on HZ < 1000 setups.
++ */
++ if (bfq_slice_idle == 0)
++ bfq_slice_idle = 1;
++
++ if (bfq_timeout_async == 0)
++ bfq_timeout_async = 1;
++
++ if (bfq_slab_setup())
++ return -ENOMEM;
++
++ /*
++ * Times to load large popular applications for the typical systems
++ * installed on the reference devices (see the comments before the
++ * definitions of the two arrays).
++ */
++ T_slow[0] = msecs_to_jiffies(2600);
++ T_slow[1] = msecs_to_jiffies(1000);
++ T_fast[0] = msecs_to_jiffies(5500);
++ T_fast[1] = msecs_to_jiffies(2000);
++
++ /*
++ * Thresholds that determine the switch between speed classes (see
++ * the comments before the definition of the array).
++ */
++ device_speed_thresh[0] = (R_fast[0] + R_slow[0]) / 2;
++ device_speed_thresh[1] = (R_fast[1] + R_slow[1]) / 2;
++
++ elv_register(&iosched_bfq);
++ pr_info("BFQ I/O-scheduler version: v7r5");
++
++ return 0;
++}
++
++static void __exit bfq_exit(void)
++{
++ elv_unregister(&iosched_bfq);
++ bfq_slab_kill();
++}
++
++module_init(bfq_init);
++module_exit(bfq_exit);
++
++MODULE_AUTHOR("Fabio Checconi, Paolo Valente");
++MODULE_LICENSE("GPL");
+diff -Nur linux-3.14.36/block/bfq-sched.c linux-openelec/block/bfq-sched.c
+--- linux-3.14.36/block/bfq-sched.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/block/bfq-sched.c 2015-05-06 12:05:43.000000000 -0500
+@@ -0,0 +1,1179 @@
++/*
++ * BFQ: Hierarchical B-WF2Q+ scheduler.
++ *
++ * Based on ideas and code from CFQ:
++ * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
++ *
++ * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
++ * Paolo Valente <paolo.valente@unimore.it>
++ *
++ * Copyright (C) 2010 Paolo Valente <paolo.valente@unimore.it>
++ */
++
++#ifdef CONFIG_CGROUP_BFQIO
++#define for_each_entity(entity) \
++ for (; entity != NULL; entity = entity->parent)
++
++#define for_each_entity_safe(entity, parent) \
++ for (; entity && ({ parent = entity->parent; 1; }); entity = parent)
++
++static struct bfq_entity *bfq_lookup_next_entity(struct bfq_sched_data *sd,
++ int extract,
++ struct bfq_data *bfqd);
++
++static inline void bfq_update_budget(struct bfq_entity *next_in_service)
++{
++ struct bfq_entity *bfqg_entity;
++ struct bfq_group *bfqg;
++ struct bfq_sched_data *group_sd;
++
++ BUG_ON(next_in_service == NULL);
++
++ group_sd = next_in_service->sched_data;
++
++ bfqg = container_of(group_sd, struct bfq_group, sched_data);
++ /*
++ * bfq_group's my_entity field is not NULL only if the group
++ * is not the root group. We must not touch the root entity
++ * as it must never become an in-service entity.
++ */
++ bfqg_entity = bfqg->my_entity;
++ if (bfqg_entity != NULL)
++ bfqg_entity->budget = next_in_service->budget;
++}
++
++static int bfq_update_next_in_service(struct bfq_sched_data *sd)
++{
++ struct bfq_entity *next_in_service;
++
++ if (sd->in_service_entity != NULL)
++ /* will update/requeue at the end of service */
++ return 0;
++
++ /*
++ * NOTE: this can be improved in many ways, such as returning
++ * 1 (and thus propagating upwards the update) only when the
++ * budget changes, or caching the bfqq that will be scheduled
++ * next from this subtree. By now we worry more about
++ * correctness than about performance...
++ */
++ next_in_service = bfq_lookup_next_entity(sd, 0, NULL);
++ sd->next_in_service = next_in_service;
++
++ if (next_in_service != NULL)
++ bfq_update_budget(next_in_service);
++
++ return 1;
++}
++
++static inline void bfq_check_next_in_service(struct bfq_sched_data *sd,
++ struct bfq_entity *entity)
++{
++ BUG_ON(sd->next_in_service != entity);
++}
++#else
++#define for_each_entity(entity) \
++ for (; entity != NULL; entity = NULL)
++
++#define for_each_entity_safe(entity, parent) \
++ for (parent = NULL; entity != NULL; entity = parent)
++
++static inline int bfq_update_next_in_service(struct bfq_sched_data *sd)
++{
++ return 0;
++}
++
++static inline void bfq_check_next_in_service(struct bfq_sched_data *sd,
++ struct bfq_entity *entity)
++{
++}
++
++static inline void bfq_update_budget(struct bfq_entity *next_in_service)
++{
++}
++#endif
++
++/*
++ * Shift for timestamp calculations. This actually limits the maximum
++ * service allowed in one timestamp delta (small shift values increase it),
++ * the maximum total weight that can be used for the queues in the system
++ * (big shift values increase it), and the period of virtual time
++ * wraparounds.
++ */
++#define WFQ_SERVICE_SHIFT 22
++
++/**
++ * bfq_gt - compare two timestamps.
++ * @a: first ts.
++ * @b: second ts.
++ *
++ * Return @a > @b, dealing with wrapping correctly.
++ */
++static inline int bfq_gt(u64 a, u64 b)
++{
++ return (s64)(a - b) > 0;
++}
++
++static inline struct bfq_queue *bfq_entity_to_bfqq(struct bfq_entity *entity)
++{
++ struct bfq_queue *bfqq = NULL;
++
++ BUG_ON(entity == NULL);
++
++ if (entity->my_sched_data == NULL)
++ bfqq = container_of(entity, struct bfq_queue, entity);
++
++ return bfqq;
++}
++
++
++/**
++ * bfq_delta - map service into the virtual time domain.
++ * @service: amount of service.
++ * @weight: scale factor (weight of an entity or weight sum).
++ */
++static inline u64 bfq_delta(unsigned long service,
++ unsigned long weight)
++{
++ u64 d = (u64)service << WFQ_SERVICE_SHIFT;
++
++ do_div(d, weight);
++ return d;
++}
++
++/**
++ * bfq_calc_finish - assign the finish time to an entity.
++ * @entity: the entity to act upon.
++ * @service: the service to be charged to the entity.
++ */
++static inline void bfq_calc_finish(struct bfq_entity *entity,
++ unsigned long service)
++{
++ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
++
++ BUG_ON(entity->weight == 0);
++
++ entity->finish = entity->start +
++ bfq_delta(service, entity->weight);
++
++ if (bfqq != NULL) {
++ bfq_log_bfqq(bfqq->bfqd, bfqq,
++ "calc_finish: serv %lu, w %d",
++ service, entity->weight);
++ bfq_log_bfqq(bfqq->bfqd, bfqq,
++ "calc_finish: start %llu, finish %llu, delta %llu",
++ entity->start, entity->finish,
++ bfq_delta(service, entity->weight));
++ }
++}
++
++/**
++ * bfq_entity_of - get an entity from a node.
++ * @node: the node field of the entity.
++ *
++ * Convert a node pointer to the relative entity. This is used only
++ * to simplify the logic of some functions and not as the generic
++ * conversion mechanism because, e.g., in the tree walking functions,
++ * the check for a %NULL value would be redundant.
++ */
++static inline struct bfq_entity *bfq_entity_of(struct rb_node *node)
++{
++ struct bfq_entity *entity = NULL;
++
++ if (node != NULL)
++ entity = rb_entry(node, struct bfq_entity, rb_node);
++
++ return entity;
++}
++
++/**
++ * bfq_extract - remove an entity from a tree.
++ * @root: the tree root.
++ * @entity: the entity to remove.
++ */
++static inline void bfq_extract(struct rb_root *root,
++ struct bfq_entity *entity)
++{
++ BUG_ON(entity->tree != root);
++
++ entity->tree = NULL;
++ rb_erase(&entity->rb_node, root);
++}
++
++/**
++ * bfq_idle_extract - extract an entity from the idle tree.
++ * @st: the service tree of the owning @entity.
++ * @entity: the entity being removed.
++ */
++static void bfq_idle_extract(struct bfq_service_tree *st,
++ struct bfq_entity *entity)
++{
++ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
++ struct rb_node *next;
++
++ BUG_ON(entity->tree != &st->idle);
++
++ if (entity == st->first_idle) {
++ next = rb_next(&entity->rb_node);
++ st->first_idle = bfq_entity_of(next);
++ }
++
++ if (entity == st->last_idle) {
++ next = rb_prev(&entity->rb_node);
++ st->last_idle = bfq_entity_of(next);
++ }
++
++ bfq_extract(&st->idle, entity);
++
++ if (bfqq != NULL)
++ list_del(&bfqq->bfqq_list);
++}
++
++/**
++ * bfq_insert - generic tree insertion.
++ * @root: tree root.
++ * @entity: entity to insert.
++ *
++ * This is used for the idle and the active tree, since they are both
++ * ordered by finish time.
++ */
++static void bfq_insert(struct rb_root *root, struct bfq_entity *entity)
++{
++ struct bfq_entity *entry;
++ struct rb_node **node = &root->rb_node;
++ struct rb_node *parent = NULL;
++
++ BUG_ON(entity->tree != NULL);
++
++ while (*node != NULL) {
++ parent = *node;
++ entry = rb_entry(parent, struct bfq_entity, rb_node);
++
++ if (bfq_gt(entry->finish, entity->finish))
++ node = &parent->rb_left;
++ else
++ node = &parent->rb_right;
++ }
++
++ rb_link_node(&entity->rb_node, parent, node);
++ rb_insert_color(&entity->rb_node, root);
++
++ entity->tree = root;
++}
++
++/**
++ * bfq_update_min - update the min_start field of a entity.
++ * @entity: the entity to update.
++ * @node: one of its children.
++ *
++ * This function is called when @entity may store an invalid value for
++ * min_start due to updates to the active tree. The function assumes
++ * that the subtree rooted at @node (which may be its left or its right
++ * child) has a valid min_start value.
++ */
++static inline void bfq_update_min(struct bfq_entity *entity,
++ struct rb_node *node)
++{
++ struct bfq_entity *child;
++
++ if (node != NULL) {
++ child = rb_entry(node, struct bfq_entity, rb_node);
++ if (bfq_gt(entity->min_start, child->min_start))
++ entity->min_start = child->min_start;
++ }
++}
++
++/**
++ * bfq_update_active_node - recalculate min_start.
++ * @node: the node to update.
++ *
++ * @node may have changed position or one of its children may have moved,
++ * this function updates its min_start value. The left and right subtrees
++ * are assumed to hold a correct min_start value.
++ */
++static inline void bfq_update_active_node(struct rb_node *node)
++{
++ struct bfq_entity *entity = rb_entry(node, struct bfq_entity, rb_node);
++
++ entity->min_start = entity->start;
++ bfq_update_min(entity, node->rb_right);
++ bfq_update_min(entity, node->rb_left);
++}
++
++/**
++ * bfq_update_active_tree - update min_start for the whole active tree.
++ * @node: the starting node.
++ *
++ * @node must be the deepest modified node after an update. This function
++ * updates its min_start using the values held by its children, assuming
++ * that they did not change, and then updates all the nodes that may have
++ * changed in the path to the root. The only nodes that may have changed
++ * are the ones in the path or their siblings.
++ */
++static void bfq_update_active_tree(struct rb_node *node)
++{
++ struct rb_node *parent;
++
++up:
++ bfq_update_active_node(node);
++
++ parent = rb_parent(node);
++ if (parent == NULL)
++ return;
++
++ if (node == parent->rb_left && parent->rb_right != NULL)
++ bfq_update_active_node(parent->rb_right);
++ else if (parent->rb_left != NULL)
++ bfq_update_active_node(parent->rb_left);
++
++ node = parent;
++ goto up;
++}
++
++static void bfq_weights_tree_add(struct bfq_data *bfqd,
++ struct bfq_entity *entity,
++ struct rb_root *root);
++
++static void bfq_weights_tree_remove(struct bfq_data *bfqd,
++ struct bfq_entity *entity,
++ struct rb_root *root);
++
++
++/**
++ * bfq_active_insert - insert an entity in the active tree of its
++ * group/device.
++ * @st: the service tree of the entity.
++ * @entity: the entity being inserted.
++ *
++ * The active tree is ordered by finish time, but an extra key is kept
++ * per each node, containing the minimum value for the start times of
++ * its children (and the node itself), so it's possible to search for
++ * the eligible node with the lowest finish time in logarithmic time.
++ */
++static void bfq_active_insert(struct bfq_service_tree *st,
++ struct bfq_entity *entity)
++{
++ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
++ struct rb_node *node = &entity->rb_node;
++#ifdef CONFIG_CGROUP_BFQIO
++ struct bfq_sched_data *sd = NULL;
++ struct bfq_group *bfqg = NULL;
++ struct bfq_data *bfqd = NULL;
++#endif
++
++ bfq_insert(&st->active, entity);
++
++ if (node->rb_left != NULL)
++ node = node->rb_left;
++ else if (node->rb_right != NULL)
++ node = node->rb_right;
++
++ bfq_update_active_tree(node);
++
++#ifdef CONFIG_CGROUP_BFQIO
++ sd = entity->sched_data;
++ bfqg = container_of(sd, struct bfq_group, sched_data);
++ BUG_ON(!bfqg);
++ bfqd = (struct bfq_data *)bfqg->bfqd;
++#endif
++ if (bfqq != NULL)
++ list_add(&bfqq->bfqq_list, &bfqq->bfqd->active_list);
++#ifdef CONFIG_CGROUP_BFQIO
++ else { /* bfq_group */
++ BUG_ON(!bfqd);
++ bfq_weights_tree_add(bfqd, entity, &bfqd->group_weights_tree);
++ }
++ if (bfqg != bfqd->root_group) {
++ BUG_ON(!bfqg);
++ BUG_ON(!bfqd);
++ bfqg->active_entities++;
++ if (bfqg->active_entities == 2)
++ bfqd->active_numerous_groups++;
++ }
++#endif
++}
++
++/**
++ * bfq_ioprio_to_weight - calc a weight from an ioprio.
++ * @ioprio: the ioprio value to convert.
++ */
++static inline unsigned short bfq_ioprio_to_weight(int ioprio)
++{
++ BUG_ON(ioprio < 0 || ioprio >= IOPRIO_BE_NR);
++ return IOPRIO_BE_NR - ioprio;
++}
++
++/**
++ * bfq_weight_to_ioprio - calc an ioprio from a weight.
++ * @weight: the weight value to convert.
++ *
++ * To preserve as mush as possible the old only-ioprio user interface,
++ * 0 is used as an escape ioprio value for weights (numerically) equal or
++ * larger than IOPRIO_BE_NR
++ */
++static inline unsigned short bfq_weight_to_ioprio(int weight)
++{
++ BUG_ON(weight < BFQ_MIN_WEIGHT || weight > BFQ_MAX_WEIGHT);
++ return IOPRIO_BE_NR - weight < 0 ? 0 : IOPRIO_BE_NR - weight;
++}
++
++static inline void bfq_get_entity(struct bfq_entity *entity)
++{
++ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
++
++ if (bfqq != NULL) {
++ atomic_inc(&bfqq->ref);
++ bfq_log_bfqq(bfqq->bfqd, bfqq, "get_entity: %p %d",
++ bfqq, atomic_read(&bfqq->ref));
++ }
++}
++
++/**
++ * bfq_find_deepest - find the deepest node that an extraction can modify.
++ * @node: the node being removed.
++ *
++ * Do the first step of an extraction in an rb tree, looking for the
++ * node that will replace @node, and returning the deepest node that
++ * the following modifications to the tree can touch. If @node is the
++ * last node in the tree return %NULL.
++ */
++static struct rb_node *bfq_find_deepest(struct rb_node *node)
++{
++ struct rb_node *deepest;
++
++ if (node->rb_right == NULL && node->rb_left == NULL)
++ deepest = rb_parent(node);
++ else if (node->rb_right == NULL)
++ deepest = node->rb_left;
++ else if (node->rb_left == NULL)
++ deepest = node->rb_right;
++ else {
++ deepest = rb_next(node);
++ if (deepest->rb_right != NULL)
++ deepest = deepest->rb_right;
++ else if (rb_parent(deepest) != node)
++ deepest = rb_parent(deepest);
++ }
++
++ return deepest;
++}
++
++/**
++ * bfq_active_extract - remove an entity from the active tree.
++ * @st: the service_tree containing the tree.
++ * @entity: the entity being removed.
++ */
++static void bfq_active_extract(struct bfq_service_tree *st,
++ struct bfq_entity *entity)
++{
++ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
++ struct rb_node *node;
++#ifdef CONFIG_CGROUP_BFQIO
++ struct bfq_sched_data *sd = NULL;
++ struct bfq_group *bfqg = NULL;
++ struct bfq_data *bfqd = NULL;
++#endif
++
++ node = bfq_find_deepest(&entity->rb_node);
++ bfq_extract(&st->active, entity);
++
++ if (node != NULL)
++ bfq_update_active_tree(node);
++
++#ifdef CONFIG_CGROUP_BFQIO
++ sd = entity->sched_data;
++ bfqg = container_of(sd, struct bfq_group, sched_data);
++ BUG_ON(!bfqg);
++ bfqd = (struct bfq_data *)bfqg->bfqd;
++#endif
++ if (bfqq != NULL)
++ list_del(&bfqq->bfqq_list);
++#ifdef CONFIG_CGROUP_BFQIO
++ else { /* bfq_group */
++ BUG_ON(!bfqd);
++ bfq_weights_tree_remove(bfqd, entity,
++ &bfqd->group_weights_tree);
++ }
++ if (bfqg != bfqd->root_group) {
++ BUG_ON(!bfqg);
++ BUG_ON(!bfqd);
++ BUG_ON(!bfqg->active_entities);
++ bfqg->active_entities--;
++ if (bfqg->active_entities == 1) {
++ BUG_ON(!bfqd->active_numerous_groups);
++ bfqd->active_numerous_groups--;
++ }
++ }
++#endif
++}
++
++/**
++ * bfq_idle_insert - insert an entity into the idle tree.
++ * @st: the service tree containing the tree.
++ * @entity: the entity to insert.
++ */
++static void bfq_idle_insert(struct bfq_service_tree *st,
++ struct bfq_entity *entity)
++{
++ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
++ struct bfq_entity *first_idle = st->first_idle;
++ struct bfq_entity *last_idle = st->last_idle;
++
++ if (first_idle == NULL || bfq_gt(first_idle->finish, entity->finish))
++ st->first_idle = entity;
++ if (last_idle == NULL || bfq_gt(entity->finish, last_idle->finish))
++ st->last_idle = entity;
++
++ bfq_insert(&st->idle, entity);
++
++ if (bfqq != NULL)
++ list_add(&bfqq->bfqq_list, &bfqq->bfqd->idle_list);
++}
++
++/**
++ * bfq_forget_entity - remove an entity from the wfq trees.
++ * @st: the service tree.
++ * @entity: the entity being removed.
++ *
++ * Update the device status and forget everything about @entity, putting
++ * the device reference to it, if it is a queue. Entities belonging to
++ * groups are not refcounted.
++ */
++static void bfq_forget_entity(struct bfq_service_tree *st,
++ struct bfq_entity *entity)
++{
++ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
++ struct bfq_sched_data *sd;
++
++ BUG_ON(!entity->on_st);
++
++ entity->on_st = 0;
++ st->wsum -= entity->weight;
++ if (bfqq != NULL) {
++ sd = entity->sched_data;
++ bfq_log_bfqq(bfqq->bfqd, bfqq, "forget_entity: %p %d",
++ bfqq, atomic_read(&bfqq->ref));
++ bfq_put_queue(bfqq);
++ }
++}
++
++/**
++ * bfq_put_idle_entity - release the idle tree ref of an entity.
++ * @st: service tree for the entity.
++ * @entity: the entity being released.
++ */
++static void bfq_put_idle_entity(struct bfq_service_tree *st,
++ struct bfq_entity *entity)
++{
++ bfq_idle_extract(st, entity);
++ bfq_forget_entity(st, entity);
++}
++
++/**
++ * bfq_forget_idle - update the idle tree if necessary.
++ * @st: the service tree to act upon.
++ *
++ * To preserve the global O(log N) complexity we only remove one entry here;
++ * as the idle tree will not grow indefinitely this can be done safely.
++ */
++static void bfq_forget_idle(struct bfq_service_tree *st)
++{
++ struct bfq_entity *first_idle = st->first_idle;
++ struct bfq_entity *last_idle = st->last_idle;
++
++ if (RB_EMPTY_ROOT(&st->active) && last_idle != NULL &&
++ !bfq_gt(last_idle->finish, st->vtime)) {
++ /*
++ * Forget the whole idle tree, increasing the vtime past
++ * the last finish time of idle entities.
++ */
++ st->vtime = last_idle->finish;
++ }
++
++ if (first_idle != NULL && !bfq_gt(first_idle->finish, st->vtime))
++ bfq_put_idle_entity(st, first_idle);
++}
++
++static struct bfq_service_tree *
++__bfq_entity_update_weight_prio(struct bfq_service_tree *old_st,
++ struct bfq_entity *entity)
++{
++ struct bfq_service_tree *new_st = old_st;
++
++ if (entity->ioprio_changed) {
++ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
++ unsigned short prev_weight, new_weight;
++ struct bfq_data *bfqd = NULL;
++ struct rb_root *root;
++#ifdef CONFIG_CGROUP_BFQIO
++ struct bfq_sched_data *sd;
++ struct bfq_group *bfqg;
++#endif
++
++ if (bfqq != NULL)
++ bfqd = bfqq->bfqd;
++#ifdef CONFIG_CGROUP_BFQIO
++ else {
++ sd = entity->my_sched_data;
++ bfqg = container_of(sd, struct bfq_group, sched_data);
++ BUG_ON(!bfqg);
++ bfqd = (struct bfq_data *)bfqg->bfqd;
++ BUG_ON(!bfqd);
++ }
++#endif
++
++ BUG_ON(old_st->wsum < entity->weight);
++ old_st->wsum -= entity->weight;
++
++ if (entity->new_weight != entity->orig_weight) {
++ entity->orig_weight = entity->new_weight;
++ entity->ioprio =
++ bfq_weight_to_ioprio(entity->orig_weight);
++ } else if (entity->new_ioprio != entity->ioprio) {
++ entity->ioprio = entity->new_ioprio;
++ entity->orig_weight =
++ bfq_ioprio_to_weight(entity->ioprio);
++ } else
++ entity->new_weight = entity->orig_weight =
++ bfq_ioprio_to_weight(entity->ioprio);
++
++ entity->ioprio_class = entity->new_ioprio_class;
++ entity->ioprio_changed = 0;
++
++ /*
++ * NOTE: here we may be changing the weight too early,
++ * this will cause unfairness. The correct approach
++ * would have required additional complexity to defer
++ * weight changes to the proper time instants (i.e.,
++ * when entity->finish <= old_st->vtime).
++ */
++ new_st = bfq_entity_service_tree(entity);
++
++ prev_weight = entity->weight;
++ new_weight = entity->orig_weight *
++ (bfqq != NULL ? bfqq->wr_coeff : 1);
++ /*
++ * If the weight of the entity changes, remove the entity
++ * from its old weight counter (if there is a counter
++ * associated with the entity), and add it to the counter
++ * associated with its new weight.
++ */
++ if (prev_weight != new_weight) {
++ root = bfqq ? &bfqd->queue_weights_tree :
++ &bfqd->group_weights_tree;
++ bfq_weights_tree_remove(bfqd, entity, root);
++ }
++ entity->weight = new_weight;
++ /*
++ * Add the entity to its weights tree only if it is
++ * not associated with a weight-raised queue.
++ */
++ if (prev_weight != new_weight &&
++ (bfqq ? bfqq->wr_coeff == 1 : 1))
++ /* If we get here, root has been initialized. */
++ bfq_weights_tree_add(bfqd, entity, root);
++
++ new_st->wsum += entity->weight;
++
++ if (new_st != old_st)
++ entity->start = new_st->vtime;
++ }
++
++ return new_st;
++}
++
++/**
++ * bfq_bfqq_served - update the scheduler status after selection for
++ * service.
++ * @bfqq: the queue being served.
++ * @served: bytes to transfer.
++ *
++ * NOTE: this can be optimized, as the timestamps of upper level entities
++ * are synchronized every time a new bfqq is selected for service. By now,
++ * we keep it to better check consistency.
++ */
++static void bfq_bfqq_served(struct bfq_queue *bfqq, unsigned long served)
++{
++ struct bfq_entity *entity = &bfqq->entity;
++ struct bfq_service_tree *st;
++
++ for_each_entity(entity) {
++ st = bfq_entity_service_tree(entity);
++
++ entity->service += served;
++ BUG_ON(entity->service > entity->budget);
++ BUG_ON(st->wsum == 0);
++
++ st->vtime += bfq_delta(served, st->wsum);
++ bfq_forget_idle(st);
++ }
++ bfq_log_bfqq(bfqq->bfqd, bfqq, "bfqq_served %lu secs", served);
++}
++
++/**
++ * bfq_bfqq_charge_full_budget - set the service to the entity budget.
++ * @bfqq: the queue that needs a service update.
++ *
++ * When it's not possible to be fair in the service domain, because
++ * a queue is not consuming its budget fast enough (the meaning of
++ * fast depends on the timeout parameter), we charge it a full
++ * budget. In this way we should obtain a sort of time-domain
++ * fairness among all the seeky/slow queues.
++ */
++static inline void bfq_bfqq_charge_full_budget(struct bfq_queue *bfqq)
++{
++ struct bfq_entity *entity = &bfqq->entity;
++
++ bfq_log_bfqq(bfqq->bfqd, bfqq, "charge_full_budget");
++
++ bfq_bfqq_served(bfqq, entity->budget - entity->service);
++}
++
++/**
++ * __bfq_activate_entity - activate an entity.
++ * @entity: the entity being activated.
++ *
++ * Called whenever an entity is activated, i.e., it is not active and one
++ * of its children receives a new request, or has to be reactivated due to
++ * budget exhaustion. It uses the current budget of the entity (and the
++ * service received if @entity is active) of the queue to calculate its
++ * timestamps.
++ */
++static void __bfq_activate_entity(struct bfq_entity *entity)
++{
++ struct bfq_sched_data *sd = entity->sched_data;
++ struct bfq_service_tree *st = bfq_entity_service_tree(entity);
++
++ if (entity == sd->in_service_entity) {
++ BUG_ON(entity->tree != NULL);
++ /*
++ * If we are requeueing the current entity we have
++ * to take care of not charging to it service it has
++ * not received.
++ */
++ bfq_calc_finish(entity, entity->service);
++ entity->start = entity->finish;
++ sd->in_service_entity = NULL;
++ } else if (entity->tree == &st->active) {
++ /*
++ * Requeueing an entity due to a change of some
++ * next_in_service entity below it. We reuse the
++ * old start time.
++ */
++ bfq_active_extract(st, entity);
++ } else if (entity->tree == &st->idle) {
++ /*
++ * Must be on the idle tree, bfq_idle_extract() will
++ * check for that.
++ */
++ bfq_idle_extract(st, entity);
++ entity->start = bfq_gt(st->vtime, entity->finish) ?
++ st->vtime : entity->finish;
++ } else {
++ /*
++ * The finish time of the entity may be invalid, and
++ * it is in the past for sure, otherwise the queue
++ * would have been on the idle tree.
++ */
++ entity->start = st->vtime;
++ st->wsum += entity->weight;
++ bfq_get_entity(entity);
++
++ BUG_ON(entity->on_st);
++ entity->on_st = 1;
++ }
++
++ st = __bfq_entity_update_weight_prio(st, entity);
++ bfq_calc_finish(entity, entity->budget);
++ bfq_active_insert(st, entity);
++}
++
++/**
++ * bfq_activate_entity - activate an entity and its ancestors if necessary.
++ * @entity: the entity to activate.
++ *
++ * Activate @entity and all the entities on the path from it to the root.
++ */
++static void bfq_activate_entity(struct bfq_entity *entity)
++{
++ struct bfq_sched_data *sd;
++
++ for_each_entity(entity) {
++ __bfq_activate_entity(entity);
++
++ sd = entity->sched_data;
++ if (!bfq_update_next_in_service(sd))
++ /*
++ * No need to propagate the activation to the
++ * upper entities, as they will be updated when
++ * the in-service entity is rescheduled.
++ */
++ break;
++ }
++}
++
++/**
++ * __bfq_deactivate_entity - deactivate an entity from its service tree.
++ * @entity: the entity to deactivate.
++ * @requeue: if false, the entity will not be put into the idle tree.
++ *
++ * Deactivate an entity, independently from its previous state. If the
++ * entity was not on a service tree just return, otherwise if it is on
++ * any scheduler tree, extract it from that tree, and if necessary
++ * and if the caller did not specify @requeue, put it on the idle tree.
++ *
++ * Return %1 if the caller should update the entity hierarchy, i.e.,
++ * if the entity was in service or if it was the next_in_service for
++ * its sched_data; return %0 otherwise.
++ */
++static int __bfq_deactivate_entity(struct bfq_entity *entity, int requeue)
++{
++ struct bfq_sched_data *sd = entity->sched_data;
++ struct bfq_service_tree *st = bfq_entity_service_tree(entity);
++ int was_in_service = entity == sd->in_service_entity;
++ int ret = 0;
++
++ if (!entity->on_st)
++ return 0;
++
++ BUG_ON(was_in_service && entity->tree != NULL);
++
++ if (was_in_service) {
++ bfq_calc_finish(entity, entity->service);
++ sd->in_service_entity = NULL;
++ } else if (entity->tree == &st->active)
++ bfq_active_extract(st, entity);
++ else if (entity->tree == &st->idle)
++ bfq_idle_extract(st, entity);
++ else if (entity->tree != NULL)
++ BUG();
++
++ if (was_in_service || sd->next_in_service == entity)
++ ret = bfq_update_next_in_service(sd);
++
++ if (!requeue || !bfq_gt(entity->finish, st->vtime))
++ bfq_forget_entity(st, entity);
++ else
++ bfq_idle_insert(st, entity);
++
++ BUG_ON(sd->in_service_entity == entity);
++ BUG_ON(sd->next_in_service == entity);
++
++ return ret;
++}
++
++/**
++ * bfq_deactivate_entity - deactivate an entity.
++ * @entity: the entity to deactivate.
++ * @requeue: true if the entity can be put on the idle tree
++ */
++static void bfq_deactivate_entity(struct bfq_entity *entity, int requeue)
++{
++ struct bfq_sched_data *sd;
++ struct bfq_entity *parent;
++
++ for_each_entity_safe(entity, parent) {
++ sd = entity->sched_data;
++
++ if (!__bfq_deactivate_entity(entity, requeue))
++ /*
++ * The parent entity is still backlogged, and
++ * we don't need to update it as it is still
++ * in service.
++ */
++ break;
++
++ if (sd->next_in_service != NULL)
++ /*
++ * The parent entity is still backlogged and
++ * the budgets on the path towards the root
++ * need to be updated.
++ */
++ goto update;
++
++ /*
++ * If we reach there the parent is no more backlogged and
++ * we want to propagate the dequeue upwards.
++ */
++ requeue = 1;
++ }
++
++ return;
++
++update:
++ entity = parent;
++ for_each_entity(entity) {
++ __bfq_activate_entity(entity);
++
++ sd = entity->sched_data;
++ if (!bfq_update_next_in_service(sd))
++ break;
++ }
++}
++
++/**
++ * bfq_update_vtime - update vtime if necessary.
++ * @st: the service tree to act upon.
++ *
++ * If necessary update the service tree vtime to have at least one
++ * eligible entity, skipping to its start time. Assumes that the
++ * active tree of the device is not empty.
++ *
++ * NOTE: this hierarchical implementation updates vtimes quite often,
++ * we may end up with reactivated processes getting timestamps after a
++ * vtime skip done because we needed a ->first_active entity on some
++ * intermediate node.
++ */
++static void bfq_update_vtime(struct bfq_service_tree *st)
++{
++ struct bfq_entity *entry;
++ struct rb_node *node = st->active.rb_node;
++
++ entry = rb_entry(node, struct bfq_entity, rb_node);
++ if (bfq_gt(entry->min_start, st->vtime)) {
++ st->vtime = entry->min_start;
++ bfq_forget_idle(st);
++ }
++}
++
++/**
++ * bfq_first_active_entity - find the eligible entity with
++ * the smallest finish time
++ * @st: the service tree to select from.
++ *
++ * This function searches the first schedulable entity, starting from the
++ * root of the tree and going on the left every time on this side there is
++ * a subtree with at least one eligible (start >= vtime) entity. The path on
++ * the right is followed only if a) the left subtree contains no eligible
++ * entities and b) no eligible entity has been found yet.
++ */
++static struct bfq_entity *bfq_first_active_entity(struct bfq_service_tree *st)
++{
++ struct bfq_entity *entry, *first = NULL;
++ struct rb_node *node = st->active.rb_node;
++
++ while (node != NULL) {
++ entry = rb_entry(node, struct bfq_entity, rb_node);
++left:
++ if (!bfq_gt(entry->start, st->vtime))
++ first = entry;
++
++ BUG_ON(bfq_gt(entry->min_start, st->vtime));
++
++ if (node->rb_left != NULL) {
++ entry = rb_entry(node->rb_left,
++ struct bfq_entity, rb_node);
++ if (!bfq_gt(entry->min_start, st->vtime)) {
++ node = node->rb_left;
++ goto left;
++ }
++ }
++ if (first != NULL)
++ break;
++ node = node->rb_right;
++ }
++
++ BUG_ON(first == NULL && !RB_EMPTY_ROOT(&st->active));
++ return first;
++}
++
++/**
++ * __bfq_lookup_next_entity - return the first eligible entity in @st.
++ * @st: the service tree.
++ *
++ * Update the virtual time in @st and return the first eligible entity
++ * it contains.
++ */
++static struct bfq_entity *__bfq_lookup_next_entity(struct bfq_service_tree *st,
++ bool force)
++{
++ struct bfq_entity *entity, *new_next_in_service = NULL;
++
++ if (RB_EMPTY_ROOT(&st->active))
++ return NULL;
++
++ bfq_update_vtime(st);
++ entity = bfq_first_active_entity(st);
++ BUG_ON(bfq_gt(entity->start, st->vtime));
++
++ /*
++ * If the chosen entity does not match with the sched_data's
++ * next_in_service and we are forcedly serving the IDLE priority
++ * class tree, bubble up budget update.
++ */
++ if (unlikely(force && entity != entity->sched_data->next_in_service)) {
++ new_next_in_service = entity;
++ for_each_entity(new_next_in_service)
++ bfq_update_budget(new_next_in_service);
++ }
++
++ return entity;
++}
++
++/**
++ * bfq_lookup_next_entity - return the first eligible entity in @sd.
++ * @sd: the sched_data.
++ * @extract: if true the returned entity will be also extracted from @sd.
++ *
++ * NOTE: since we cache the next_in_service entity at each level of the
++ * hierarchy, the complexity of the lookup can be decreased with
++ * absolutely no effort just returning the cached next_in_service value;
++ * we prefer to do full lookups to test the consistency of * the data
++ * structures.
++ */
++static struct bfq_entity *bfq_lookup_next_entity(struct bfq_sched_data *sd,
++ int extract,
++ struct bfq_data *bfqd)
++{
++ struct bfq_service_tree *st = sd->service_tree;
++ struct bfq_entity *entity;
++ int i = 0;
++
++ BUG_ON(sd->in_service_entity != NULL);
++
++ if (bfqd != NULL &&
++ jiffies - bfqd->bfq_class_idle_last_service > BFQ_CL_IDLE_TIMEOUT) {
++ entity = __bfq_lookup_next_entity(st + BFQ_IOPRIO_CLASSES - 1,
++ true);
++ if (entity != NULL) {
++ i = BFQ_IOPRIO_CLASSES - 1;
++ bfqd->bfq_class_idle_last_service = jiffies;
++ sd->next_in_service = entity;
++ }
++ }
++ for (; i < BFQ_IOPRIO_CLASSES; i++) {
++ entity = __bfq_lookup_next_entity(st + i, false);
++ if (entity != NULL) {
++ if (extract) {
++ bfq_check_next_in_service(sd, entity);
++ bfq_active_extract(st + i, entity);
++ sd->in_service_entity = entity;
++ sd->next_in_service = NULL;
++ }
++ break;
++ }
++ }
++
++ return entity;
++}
++
++/*
++ * Get next queue for service.
++ */
++static struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd)
++{
++ struct bfq_entity *entity = NULL;
++ struct bfq_sched_data *sd;
++ struct bfq_queue *bfqq;
++
++ BUG_ON(bfqd->in_service_queue != NULL);
++
++ if (bfqd->busy_queues == 0)
++ return NULL;
++
++ sd = &bfqd->root_group->sched_data;
++ for (; sd != NULL; sd = entity->my_sched_data) {
++ entity = bfq_lookup_next_entity(sd, 1, bfqd);
++ BUG_ON(entity == NULL);
++ entity->service = 0;
++ }
++
++ bfqq = bfq_entity_to_bfqq(entity);
++ BUG_ON(bfqq == NULL);
++
++ return bfqq;
++}
++
++static void __bfq_bfqd_reset_in_service(struct bfq_data *bfqd)
++{
++ if (bfqd->in_service_bic != NULL) {
++ put_io_context(bfqd->in_service_bic->icq.ioc);
++ bfqd->in_service_bic = NULL;
++ }
++
++ bfqd->in_service_queue = NULL;
++ del_timer(&bfqd->idle_slice_timer);
++}
++
++static void bfq_deactivate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
++ int requeue)
++{
++ struct bfq_entity *entity = &bfqq->entity;
++
++ if (bfqq == bfqd->in_service_queue)
++ __bfq_bfqd_reset_in_service(bfqd);
++
++ bfq_deactivate_entity(entity, requeue);
++}
++
++static void bfq_activate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq)
++{
++ struct bfq_entity *entity = &bfqq->entity;
++
++ bfq_activate_entity(entity);
++}
++
++/*
++ * Called when the bfqq no longer has requests pending, remove it from
++ * the service tree.
++ */
++static void bfq_del_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq,
++ int requeue)
++{
++ BUG_ON(!bfq_bfqq_busy(bfqq));
++ BUG_ON(!RB_EMPTY_ROOT(&bfqq->sort_list));
++
++ bfq_log_bfqq(bfqd, bfqq, "del from busy");
++
++ bfq_clear_bfqq_busy(bfqq);
++
++ BUG_ON(bfqd->busy_queues == 0);
++ bfqd->busy_queues--;
++
++ if (!bfqq->dispatched) {
++ bfq_weights_tree_remove(bfqd, &bfqq->entity,
++ &bfqd->queue_weights_tree);
++ if (!blk_queue_nonrot(bfqd->queue)) {
++ BUG_ON(!bfqd->busy_in_flight_queues);
++ bfqd->busy_in_flight_queues--;
++ if (bfq_bfqq_constantly_seeky(bfqq)) {
++ BUG_ON(!bfqd->
++ const_seeky_busy_in_flight_queues);
++ bfqd->const_seeky_busy_in_flight_queues--;
++ }
++ }
++ }
++ if (bfqq->wr_coeff > 1)
++ bfqd->wr_busy_queues--;
++
++ bfq_deactivate_bfqq(bfqd, bfqq, requeue);
++}
++
++/*
++ * Called when an inactive queue receives a new request.
++ */
++static void bfq_add_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq)
++{
++ BUG_ON(bfq_bfqq_busy(bfqq));
++ BUG_ON(bfqq == bfqd->in_service_queue);
++
++ bfq_log_bfqq(bfqd, bfqq, "add to busy");
++
++ bfq_activate_bfqq(bfqd, bfqq);
++
++ bfq_mark_bfqq_busy(bfqq);
++ bfqd->busy_queues++;
++
++ if (!bfqq->dispatched) {
++ if (bfqq->wr_coeff == 1)
++ bfq_weights_tree_add(bfqd, &bfqq->entity,
++ &bfqd->queue_weights_tree);
++ if (!blk_queue_nonrot(bfqd->queue)) {
++ bfqd->busy_in_flight_queues++;
++ if (bfq_bfqq_constantly_seeky(bfqq))
++ bfqd->const_seeky_busy_in_flight_queues++;
++ }
++ }
++ if (bfqq->wr_coeff > 1)
++ bfqd->wr_busy_queues++;
++}
+diff -Nur linux-3.14.36/block/blk-core.c linux-openelec/block/blk-core.c
+--- linux-3.14.36/block/blk-core.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/block/blk-core.c 2015-05-06 12:05:43.000000000 -0500
+@@ -1928,7 +1928,7 @@
+ * in some cases below, so export this function.
+ * Request stacking drivers like request-based dm may change the queue
+ * limits while requests are in the queue (e.g. dm's table swapping).
+- * Such request stacking drivers should check those requests agaist
++ * Such request stacking drivers should check those requests against
+ * the new queue limits again when they dispatch those requests,
+ * although such checkings are also done against the old queue limits
+ * when submitting requests.
+diff -Nur linux-3.14.36/block/blk-map.c linux-openelec/block/blk-map.c
+--- linux-3.14.36/block/blk-map.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/block/blk-map.c 2015-05-06 12:05:43.000000000 -0500
+@@ -285,7 +285,7 @@
+ *
+ * Description:
+ * Data will be mapped directly if possible. Otherwise a bounce
+- * buffer is used. Can be called multple times to append multple
++ * buffer is used. Can be called multiple times to append multiple
+ * buffers.
+ */
+ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
+diff -Nur linux-3.14.36/block/Kconfig.iosched linux-openelec/block/Kconfig.iosched
+--- linux-3.14.36/block/Kconfig.iosched 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/block/Kconfig.iosched 2015-05-06 12:05:43.000000000 -0500
+@@ -39,6 +39,27 @@
+ ---help---
+ Enable group IO scheduling in CFQ.
+
++config IOSCHED_BFQ
++ tristate "BFQ I/O scheduler"
++ default n
++ ---help---
++ The BFQ I/O scheduler tries to distribute bandwidth among
++ all processes according to their weights.
++ It aims at distributing the bandwidth as desired, independently of
++ the disk parameters and with any workload. It also tries to
++ guarantee low latency to interactive and soft real-time
++ applications. If compiled built-in (saying Y here), BFQ can
++ be configured to support hierarchical scheduling.
++
++config CGROUP_BFQIO
++ bool "BFQ hierarchical scheduling support"
++ depends on CGROUPS && IOSCHED_BFQ=y
++ default n
++ ---help---
++ Enable hierarchical scheduling in BFQ, using the cgroups
++ filesystem interface. The name of the subsystem will be
++ bfqio.
++
+ choice
+ prompt "Default I/O scheduler"
+ default DEFAULT_CFQ
+@@ -52,6 +73,16 @@
+ config DEFAULT_CFQ
+ bool "CFQ" if IOSCHED_CFQ=y
+
++ config DEFAULT_BFQ
++ bool "BFQ" if IOSCHED_BFQ=y
++ help
++ Selects BFQ as the default I/O scheduler which will be
++ used by default for all block devices.
++ The BFQ I/O scheduler aims at distributing the bandwidth
++ as desired, independently of the disk parameters and with
++ any workload. It also tries to guarantee low latency to
++ interactive and soft real-time applications.
++
+ config DEFAULT_NOOP
+ bool "No-op"
+
+@@ -61,6 +92,7 @@
+ string
+ default "deadline" if DEFAULT_DEADLINE
+ default "cfq" if DEFAULT_CFQ
++ default "bfq" if DEFAULT_BFQ
+ default "noop" if DEFAULT_NOOP
+
+ endmenu
+diff -Nur linux-3.14.36/block/Makefile linux-openelec/block/Makefile
+--- linux-3.14.36/block/Makefile 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/block/Makefile 2015-05-06 12:05:43.000000000 -0500
+@@ -16,6 +16,7 @@
+ obj-$(CONFIG_IOSCHED_NOOP) += noop-iosched.o
+ obj-$(CONFIG_IOSCHED_DEADLINE) += deadline-iosched.o
+ obj-$(CONFIG_IOSCHED_CFQ) += cfq-iosched.o
++obj-$(CONFIG_IOSCHED_BFQ) += bfq-iosched.o
+
+ obj-$(CONFIG_BLOCK_COMPAT) += compat_ioctl.o
+ obj-$(CONFIG_BLK_DEV_INTEGRITY) += blk-integrity.o
+diff -Nur linux-3.14.36/crypto/blkcipher.c linux-openelec/crypto/blkcipher.c
+--- linux-3.14.36/crypto/blkcipher.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/crypto/blkcipher.c 2015-05-06 12:05:43.000000000 -0500
+@@ -70,14 +70,12 @@
+ return max(start, end_page);
+ }
+
+-static inline unsigned int blkcipher_done_slow(struct crypto_blkcipher *tfm,
+- struct blkcipher_walk *walk,
++static inline unsigned int blkcipher_done_slow(struct blkcipher_walk *walk,
+ unsigned int bsize)
+ {
+ u8 *addr;
+- unsigned int alignmask = crypto_blkcipher_alignmask(tfm);
+
+- addr = (u8 *)ALIGN((unsigned long)walk->buffer, alignmask + 1);
++ addr = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1);
+ addr = blkcipher_get_spot(addr, bsize);
+ scatterwalk_copychunks(addr, &walk->out, bsize, 1);
+ return bsize;
+@@ -105,7 +103,6 @@
+ int blkcipher_walk_done(struct blkcipher_desc *desc,
+ struct blkcipher_walk *walk, int err)
+ {
+- struct crypto_blkcipher *tfm = desc->tfm;
+ unsigned int nbytes = 0;
+
+ if (likely(err >= 0)) {
+@@ -117,7 +114,7 @@
+ err = -EINVAL;
+ goto err;
+ } else
+- n = blkcipher_done_slow(tfm, walk, n);
++ n = blkcipher_done_slow(walk, n);
+
+ nbytes = walk->total - n;
+ err = 0;
+@@ -136,7 +133,7 @@
+ }
+
+ if (walk->iv != desc->info)
+- memcpy(desc->info, walk->iv, crypto_blkcipher_ivsize(tfm));
++ memcpy(desc->info, walk->iv, walk->ivsize);
+ if (walk->buffer != walk->page)
+ kfree(walk->buffer);
+ if (walk->page)
+@@ -226,22 +223,20 @@
+ static int blkcipher_walk_next(struct blkcipher_desc *desc,
+ struct blkcipher_walk *walk)
+ {
+- struct crypto_blkcipher *tfm = desc->tfm;
+- unsigned int alignmask = crypto_blkcipher_alignmask(tfm);
+ unsigned int bsize;
+ unsigned int n;
+ int err;
+
+ n = walk->total;
+- if (unlikely(n < crypto_blkcipher_blocksize(tfm))) {
++ if (unlikely(n < walk->cipher_blocksize)) {
+ desc->flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
+ return blkcipher_walk_done(desc, walk, -EINVAL);
+ }
+
+ walk->flags &= ~(BLKCIPHER_WALK_SLOW | BLKCIPHER_WALK_COPY |
+ BLKCIPHER_WALK_DIFF);
+- if (!scatterwalk_aligned(&walk->in, alignmask) ||
+- !scatterwalk_aligned(&walk->out, alignmask)) {
++ if (!scatterwalk_aligned(&walk->in, walk->alignmask) ||
++ !scatterwalk_aligned(&walk->out, walk->alignmask)) {
+ walk->flags |= BLKCIPHER_WALK_COPY;
+ if (!walk->page) {
+ walk->page = (void *)__get_free_page(GFP_ATOMIC);
+@@ -250,12 +245,12 @@
+ }
+ }
+
+- bsize = min(walk->blocksize, n);
++ bsize = min(walk->walk_blocksize, n);
+ n = scatterwalk_clamp(&walk->in, n);
+ n = scatterwalk_clamp(&walk->out, n);
+
+ if (unlikely(n < bsize)) {
+- err = blkcipher_next_slow(desc, walk, bsize, alignmask);
++ err = blkcipher_next_slow(desc, walk, bsize, walk->alignmask);
+ goto set_phys_lowmem;
+ }
+
+@@ -277,28 +272,26 @@
+ return err;
+ }
+
+-static inline int blkcipher_copy_iv(struct blkcipher_walk *walk,
+- struct crypto_blkcipher *tfm,
+- unsigned int alignmask)
+-{
+- unsigned bs = walk->blocksize;
+- unsigned int ivsize = crypto_blkcipher_ivsize(tfm);
+- unsigned aligned_bs = ALIGN(bs, alignmask + 1);
+- unsigned int size = aligned_bs * 2 + ivsize + max(aligned_bs, ivsize) -
+- (alignmask + 1);
++static inline int blkcipher_copy_iv(struct blkcipher_walk *walk)
++{
++ unsigned bs = walk->walk_blocksize;
++ unsigned aligned_bs = ALIGN(bs, walk->alignmask + 1);
++ unsigned int size = aligned_bs * 2 +
++ walk->ivsize + max(aligned_bs, walk->ivsize) -
++ (walk->alignmask + 1);
+ u8 *iv;
+
+- size += alignmask & ~(crypto_tfm_ctx_alignment() - 1);
++ size += walk->alignmask & ~(crypto_tfm_ctx_alignment() - 1);
+ walk->buffer = kmalloc(size, GFP_ATOMIC);
+ if (!walk->buffer)
+ return -ENOMEM;
+
+- iv = (u8 *)ALIGN((unsigned long)walk->buffer, alignmask + 1);
++ iv = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1);
+ iv = blkcipher_get_spot(iv, bs) + aligned_bs;
+ iv = blkcipher_get_spot(iv, bs) + aligned_bs;
+- iv = blkcipher_get_spot(iv, ivsize);
++ iv = blkcipher_get_spot(iv, walk->ivsize);
+
+- walk->iv = memcpy(iv, walk->iv, ivsize);
++ walk->iv = memcpy(iv, walk->iv, walk->ivsize);
+ return 0;
+ }
+
+@@ -306,7 +299,10 @@
+ struct blkcipher_walk *walk)
+ {
+ walk->flags &= ~BLKCIPHER_WALK_PHYS;
+- walk->blocksize = crypto_blkcipher_blocksize(desc->tfm);
++ walk->walk_blocksize = crypto_blkcipher_blocksize(desc->tfm);
++ walk->cipher_blocksize = walk->walk_blocksize;
++ walk->ivsize = crypto_blkcipher_ivsize(desc->tfm);
++ walk->alignmask = crypto_blkcipher_alignmask(desc->tfm);
+ return blkcipher_walk_first(desc, walk);
+ }
+ EXPORT_SYMBOL_GPL(blkcipher_walk_virt);
+@@ -315,7 +311,10 @@
+ struct blkcipher_walk *walk)
+ {
+ walk->flags |= BLKCIPHER_WALK_PHYS;
+- walk->blocksize = crypto_blkcipher_blocksize(desc->tfm);
++ walk->walk_blocksize = crypto_blkcipher_blocksize(desc->tfm);
++ walk->cipher_blocksize = walk->walk_blocksize;
++ walk->ivsize = crypto_blkcipher_ivsize(desc->tfm);
++ walk->alignmask = crypto_blkcipher_alignmask(desc->tfm);
+ return blkcipher_walk_first(desc, walk);
+ }
+ EXPORT_SYMBOL_GPL(blkcipher_walk_phys);
+@@ -323,9 +322,6 @@
+ static int blkcipher_walk_first(struct blkcipher_desc *desc,
+ struct blkcipher_walk *walk)
+ {
+- struct crypto_blkcipher *tfm = desc->tfm;
+- unsigned int alignmask = crypto_blkcipher_alignmask(tfm);
+-
+ if (WARN_ON_ONCE(in_irq()))
+ return -EDEADLK;
+
+@@ -335,8 +331,8 @@
+
+ walk->buffer = NULL;
+ walk->iv = desc->info;
+- if (unlikely(((unsigned long)walk->iv & alignmask))) {
+- int err = blkcipher_copy_iv(walk, tfm, alignmask);
++ if (unlikely(((unsigned long)walk->iv & walk->alignmask))) {
++ int err = blkcipher_copy_iv(walk);
+ if (err)
+ return err;
+ }
+@@ -353,11 +349,28 @@
+ unsigned int blocksize)
+ {
+ walk->flags &= ~BLKCIPHER_WALK_PHYS;
+- walk->blocksize = blocksize;
++ walk->walk_blocksize = blocksize;
++ walk->cipher_blocksize = crypto_blkcipher_blocksize(desc->tfm);
++ walk->ivsize = crypto_blkcipher_ivsize(desc->tfm);
++ walk->alignmask = crypto_blkcipher_alignmask(desc->tfm);
+ return blkcipher_walk_first(desc, walk);
+ }
+ EXPORT_SYMBOL_GPL(blkcipher_walk_virt_block);
+
++int blkcipher_aead_walk_virt_block(struct blkcipher_desc *desc,
++ struct blkcipher_walk *walk,
++ struct crypto_aead *tfm,
++ unsigned int blocksize)
++{
++ walk->flags &= ~BLKCIPHER_WALK_PHYS;
++ walk->walk_blocksize = blocksize;
++ walk->cipher_blocksize = crypto_aead_blocksize(tfm);
++ walk->ivsize = crypto_aead_ivsize(tfm);
++ walk->alignmask = crypto_aead_alignmask(tfm);
++ return blkcipher_walk_first(desc, walk);
++}
++EXPORT_SYMBOL_GPL(blkcipher_aead_walk_virt_block);
++
+ static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key,
+ unsigned int keylen)
+ {
+diff -Nur linux-3.14.36/crypto/tcrypt.c linux-openelec/crypto/tcrypt.c
+--- linux-3.14.36/crypto/tcrypt.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/crypto/tcrypt.c 2015-05-06 12:05:43.000000000 -0500
+@@ -33,6 +33,7 @@
+ #include <linux/jiffies.h>
+ #include <linux/timex.h>
+ #include <linux/interrupt.h>
++#include <linux/sched.h>
+ #include "tcrypt.h"
+ #include "internal.h"
+
+@@ -447,6 +448,7 @@
+ goto out;
+ }
+
++ schedule();
+ printk("test %u (%d bit key, %d byte blocks): ", i,
+ *keysize * 8, *b_size);
+
+@@ -713,6 +715,7 @@
+ if (speed[i].klen)
+ crypto_hash_setkey(tfm, tvmem[0], speed[i].klen);
+
++ schedule();
+ printk(KERN_INFO "test%3u "
+ "(%5u byte blocks,%5u bytes per update,%4u updates): ",
+ i, speed[i].blen, speed[i].plen, speed[i].blen / speed[i].plen);
+@@ -953,6 +956,7 @@
+ break;
+ }
+
++ schedule();
+ pr_info("test%3u "
+ "(%5u byte blocks,%5u bytes per update,%4u updates): ",
+ i, speed[i].blen, speed[i].plen, speed[i].blen / speed[i].plen);
+@@ -1118,6 +1122,7 @@
+ goto out_free_req;
+ }
+
++ schedule();
+ pr_info("test %u (%d bit key, %d byte blocks): ", i,
+ *keysize * 8, *b_size);
+
+@@ -1199,6 +1204,7 @@
+ printk("alg %s ", *name);
+ printk(crypto_has_alg(*name, 0, 0) ?
+ "found\n" : "not found\n");
++ schedule();
+ name++;
+ }
+ }
+diff -Nur linux-3.14.36/Documentation/ABI/testing/sysfs-class-net-statistics linux-openelec/Documentation/ABI/testing/sysfs-class-net-statistics
+--- linux-3.14.36/Documentation/ABI/testing/sysfs-class-net-statistics 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/Documentation/ABI/testing/sysfs-class-net-statistics 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,201 @@
++What: /sys/class/<iface>/statistics/collisions
++Date: April 2005
++KernelVersion: 2.6.12
++Contact: netdev@vger.kernel.org
++Description:
++ Indicates the number of collisions seen by this network device.
++ This value might not be relevant with all MAC layers.
++
++What: /sys/class/<iface>/statistics/multicast
++Date: April 2005
++KernelVersion: 2.6.12
++Contact: netdev@vger.kernel.org
++Description:
++ Indicates the number of multicast packets received by this
++ network device.
++
++What: /sys/class/<iface>/statistics/rx_bytes
++Date: April 2005
++KernelVersion: 2.6.12
++Contact: netdev@vger.kernel.org
++Description:
++ Indicates the number of bytes received by this network device.
++ See the network driver for the exact meaning of when this
++ value is incremented.
++
++What: /sys/class/<iface>/statistics/rx_compressed
++Date: April 2005
++KernelVersion: 2.6.12
++Contact: netdev@vger.kernel.org
++Description:
++ Indicates the number of compressed packets received by this
++ network device. This value might only be relevant for interfaces
++ that support packet compression (e.g: PPP).
++
++What: /sys/class/<iface>/statistics/rx_crc_errors
++Date: April 2005
++KernelVersion: 2.6.12
++Contact: netdev@vger.kernel.org
++Description:
++ Indicates the number of packets received with a CRC (FCS) error
++ by this network device. Note that the specific meaning might
++ depend on the MAC layer used by the interface.
++
++What: /sys/class/<iface>/statistics/rx_dropped
++Date: April 2005
++KernelVersion: 2.6.12
++Contact: netdev@vger.kernel.org
++Description:
++ Indicates the number of packets received by the network device
++ but dropped, that are not forwarded to the upper layers for
++ packet processing. See the network driver for the exact
++ meaning of this value.
++
++What: /sys/class/<iface>/statistics/rx_fifo_errors
++Date: April 2005
++KernelVersion: 2.6.12
++Contact: netdev@vger.kernel.org
++Description:
++ Indicates the number of receive FIFO errors seen by this
++ network device. See the network driver for the exact
++ meaning of this value.
++
++What: /sys/class/<iface>/statistics/rx_frame_errors
++Date: April 2005
++KernelVersion: 2.6.12
++Contact: netdev@vger.kernel.org
++Description:
++ Indicates the number of received frames with error, such as
++ alignment errors. Note that the specific meaning depends on
++ on the MAC layer protocol used. See the network driver for
++ the exact meaning of this value.
++
++What: /sys/class/<iface>/statistics/rx_length_errors
++Date: April 2005
++KernelVersion: 2.6.12
++Contact: netdev@vger.kernel.org
++Description:
++ Indicates the number of received error packet with a length
++ error, oversized or undersized. See the network driver for the
++ exact meaning of this value.
++
++What: /sys/class/<iface>/statistics/rx_missed_errors
++Date: April 2005
++KernelVersion: 2.6.12
++Contact: netdev@vger.kernel.org
++Description:
++ Indicates the number of received packets that have been missed
++ due to lack of capacity in the receive side. See the network
++ driver for the exact meaning of this value.
++
++What: /sys/class/<iface>/statistics/rx_over_errors
++Date: April 2005
++KernelVersion: 2.6.12
++Contact: netdev@vger.kernel.org
++Description:
++ Indicates the number of received packets that are oversized
++ compared to what the network device is configured to accept
++ (e.g: larger than MTU). See the network driver for the exact
++ meaning of this value.
++
++What: /sys/class/<iface>/statistics/rx_packets
++Date: April 2005
++KernelVersion: 2.6.12
++Contact: netdev@vger.kernel.org
++Description:
++ Indicates the total number of good packets received by this
++ network device.
++
++What: /sys/class/<iface>/statistics/tx_aborted_errors
++Date: April 2005
++KernelVersion: 2.6.12
++Contact: netdev@vger.kernel.org
++Description:
++ Indicates the number of packets that have been aborted
++ during transmission by a network device (e.g: because of
++ a medium collision). See the network driver for the exact
++ meaning of this value.
++
++What: /sys/class/<iface>/statistics/tx_bytes
++Date: April 2005
++KernelVersion: 2.6.12
++Contact: netdev@vger.kernel.org
++Description:
++ Indicates the number of bytes transmitted by a network
++ device. See the network driver for the exact meaning of this
++ value, in particular whether this accounts for all successfully
++ transmitted packets or all packets that have been queued for
++ transmission.
++
++What: /sys/class/<iface>/statistics/tx_carrier_errors
++Date: April 2005
++KernelVersion: 2.6.12
++Contact: netdev@vger.kernel.org
++Description:
++ Indicates the number of packets that could not be transmitted
++ because of carrier errors (e.g: physical link down). See the
++ network driver for the exact meaning of this value.
++
++What: /sys/class/<iface>/statistics/tx_compressed
++Date: April 2005
++KernelVersion: 2.6.12
++Contact: netdev@vger.kernel.org
++Description:
++ Indicates the number of transmitted compressed packets. Note
++ this might only be relevant for devices that support
++ compression (e.g: PPP).
++
++What: /sys/class/<iface>/statistics/tx_dropped
++Date: April 2005
++KernelVersion: 2.6.12
++Contact: netdev@vger.kernel.org
++Description:
++ Indicates the number of packets dropped during transmission.
++ See the driver for the exact reasons as to why the packets were
++ dropped.
++
++What: /sys/class/<iface>/statistics/tx_errors
++Date: April 2005
++KernelVersion: 2.6.12
++Contact: netdev@vger.kernel.org
++Description:
++ Indicates the number of packets in error during transmission by
++ a network device. See the driver for the exact reasons as to
++ why the packets were dropped.
++
++What: /sys/class/<iface>/statistics/tx_fifo_errors
++Date: April 2005
++KernelVersion: 2.6.12
++Contact: netdev@vger.kernel.org
++Description:
++ Indicates the number of packets having caused a transmit
++ FIFO error. See the driver for the exact reasons as to why the
++ packets were dropped.
++
++What: /sys/class/<iface>/statistics/tx_heartbeat_errors
++Date: April 2005
++KernelVersion: 2.6.12
++Contact: netdev@vger.kernel.org
++Description:
++ Indicates the number of packets transmitted that have been
++ reported as heartbeat errors. See the driver for the exact
++ reasons as to why the packets were dropped.
++
++What: /sys/class/<iface>/statistics/tx_packets
++Date: April 2005
++KernelVersion: 2.6.12
++Contact: netdev@vger.kernel.org
++Description:
++ Indicates the number of packets transmitted by a network
++ device. See the driver for whether this reports the number of all
++ attempted or successful transmissions.
++
++What: /sys/class/<iface>/statistics/tx_window_errors
++Date: April 2005
++KernelVersion: 2.6.12
++Contact: netdev@vger.kernel.org
++Description:
++ Indicates the number of packets not successfully transmitted
++ due to a window collision. The specific meaning depends on the
++ MAC layer used. On Ethernet this is usually used to report
++ late collisions errors.
+diff -Nur linux-3.14.36/Documentation/arm64/booting.txt linux-openelec/Documentation/arm64/booting.txt
+--- linux-3.14.36/Documentation/arm64/booting.txt 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/Documentation/arm64/booting.txt 2015-05-06 12:05:42.000000000 -0500
+@@ -111,8 +111,14 @@
+ - Caches, MMUs
+ The MMU must be off.
+ Instruction cache may be on or off.
+- Data cache must be off and invalidated.
+- External caches (if present) must be configured and disabled.
++ The address range corresponding to the loaded kernel image must be
++ cleaned to the PoC. In the presence of a system cache or other
++ coherent masters with caches enabled, this will typically require
++ cache maintenance by VA rather than set/way operations.
++ System caches which respect the architected cache maintenance by VA
++ operations must be configured and may be enabled.
++ System caches which do not respect architected cache maintenance by VA
++ operations (not recommended) must be configured and disabled.
+
+ - Architected timers
+ CNTFRQ must be programmed with the timer frequency and CNTVOFF must
+diff -Nur linux-3.14.36/Documentation/devicetree/bindings/arm/imx/busfreq-imx6.txt linux-openelec/Documentation/devicetree/bindings/arm/imx/busfreq-imx6.txt
+--- linux-3.14.36/Documentation/devicetree/bindings/arm/imx/busfreq-imx6.txt 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/Documentation/devicetree/bindings/arm/imx/busfreq-imx6.txt 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,64 @@
++Freescale Busfreq driver
++
++It is a generic driver that manages the frequency of the DDR, AHB and AXI buses in the iMX6x architecture.
++It works for both SMP and UP systems and for both DDR3 and LPDDR2 memory types.
++
++Required properties are listed below:
++- compatible: should be "fsl,imx6_busfreq"
++- clocks: Lists the various clocks used by the busfreq driver
++- interrupts - Lists the interrupts used by the busfreq driver. This is needed only for SMP architecutre.
++- fsl,max_ddr_freq - The max ddr freq for this chip
++
++Examples:
++For SOC imx6q.dtsi:
++ busfreq { /* BUSFREQ */
++ compatible = "fsl,imx6_busfreq";
++ clocks = <&clks 171>, <&clks 6>, <&clks 11>, <&clks 104>, <&clks 172>, <&clks 58>,
++ <&clks 18>, <&clks 60>, <&clks 20>, <&clks 3>;
++ clock-names = "pll2_bus", "pll2_pfd2_396m", "pll2_198m", "arm", "pll3_usb_otg", "periph",
++ "periph_pre", "periph_clk2", "periph_clk2_sel", "osc";
++ interrupts = <0 107 0x04>, <0 112 0x4>, <0 113 0x4>, <0 114 0x4>;
++ interrupt-names = "irq_busfreq_0", "irq_busfreq_1", "irq_busfreq_2", "irq_busfreq_3";
++ fsl,max_ddr_freq = <528000000>;
++ };
++
++The Freescale Busfreq driver supports the following setpoints for the DDR freq:
++enum bus_freq_mode {
++ BUS_FREQ_HIGH, -> The max freq the SOC supports
++ BUS_FREQ_MED, -> Medium setpoint (ex 400MHz for DDR3 when the max is 528MHz)
++ BUS_FREQ_AUDIO, -> Audio playback freq (50MHz)
++ BUS_FREQ_LOW, -> Low power IDLE freq (24MHz)
++};
++
++Currently the Freescale Busfreq driver implementation requires drivers to call the following APIs:
++1. request_bus_freq(enum bus_freq_mode):
++ The driver is requesting the system and ddr freq to be set to the requested value. The driver should call this
++ API before it even enables its clocks.
++
++2. release_bus_freq(enum bus_freq_mode):
++ The driver no longer needs the system and ddr freq at the required value. The driver should call this API after
++ its work is done and it has disabled its clocks.
++
++Examples:
++In the IPU driver, the requesting and releasing of the required bus frequency is tied into the runtime PM implementation:
++
++int ipu_runtime_suspend(struct device *dev)
++{
++ release_bus_freq(BUS_FREQ_HIGH);
++ dev_dbg(dev, "ipu busfreq high release.\n");
++
++ return 0;
++}
++
++int ipu_runtime_resume(struct device *dev)
++{
++ request_bus_freq(BUS_FREQ_HIGH);
++ dev_dbg(dev, "ipu busfreq high requst.\n");
++
++ return 0;
++}
++
++static const struct dev_pm_ops ipu_pm_ops = {
++ SET_RUNTIME_PM_OPS(ipu_runtime_suspend, ipu_runtime_resume, NULL)
++ SET_SYSTEM_SLEEP_PM_OPS(ipu_suspend, ipu_resume)
++};
+diff -Nur linux-3.14.36/Documentation/devicetree/bindings/arm/imx/gpc.txt linux-openelec/Documentation/devicetree/bindings/arm/imx/gpc.txt
+--- linux-3.14.36/Documentation/devicetree/bindings/arm/imx/gpc.txt 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/Documentation/devicetree/bindings/arm/imx/gpc.txt 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,20 @@
++Freescale imx GPC bindings
++
++Optional properties:
++- fsl,cpu_pupscr_sw2iso: for powering up CPU, number of 32K clock cycle PGC will wait before negating isolation signal.
++- fsl,cpu_pupscr_sw: for powering up CPU, number of 32K clock cycle PGC will wait before asserting isolation signal.
++- fsl,cpu_pdnscr_iso2sw: for powering down CPU, number of ipg clock cycle PGC will wait before negating isolation signal.
++- fsl,cpu_pdnscr_iso: for powering down CPU, number of ipg clock cycle PGC will wait before asserting isolation signal.
++
++These properties are for adjusting the GPC PGC CPU power up/down setting, if there is no such property in dts, then default
++value in GPC PGC registers will be used.
++
++
++Example:
++
++ &gpc {
++ fsl,cpu_pupscr_sw2iso = <0xf>;
++ fsl,cpu_pupscr_sw = <0xf>;
++ fsl,cpu_pdnscr_iso2sw = <0x1>;
++ fsl,cpu_pdnscr_iso = <0x1>;
++ };
+diff -Nur linux-3.14.36/Documentation/devicetree/bindings/arm/pmu.txt linux-openelec/Documentation/devicetree/bindings/arm/pmu.txt
+--- linux-3.14.36/Documentation/devicetree/bindings/arm/pmu.txt 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/Documentation/devicetree/bindings/arm/pmu.txt 2015-05-06 12:05:42.000000000 -0500
+@@ -17,6 +17,9 @@
+ "arm,arm1176-pmu"
+ "arm,arm1136-pmu"
+ - interrupts : 1 combined interrupt or 1 per core.
++- cluster : a phandle to the cluster to which it belongs
++ If there are more than one cluster with same CPU type
++ then there should be separate PMU nodes per cluster.
+
+ Example:
+
+diff -Nur linux-3.14.36/Documentation/devicetree/bindings/ata/ahci-platform.txt linux-openelec/Documentation/devicetree/bindings/ata/ahci-platform.txt
+--- linux-3.14.36/Documentation/devicetree/bindings/ata/ahci-platform.txt 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/Documentation/devicetree/bindings/ata/ahci-platform.txt 2015-05-06 12:05:42.000000000 -0500
+@@ -4,12 +4,19 @@
+ Each SATA controller should have its own node.
+
+ Required properties:
+-- compatible : compatible list, contains "snps,spear-ahci"
++- compatible : compatible list, contains "snps,spear-ahci",
++ "fsl,imx53-ahci" or "fsl,imx6q-ahci"
+ - interrupts : <interrupt mapping for SATA IRQ>
+ - reg : <registers mapping>
+
+ Optional properties:
+ - dma-coherent : Present if dma operations are coherent
++- clocks : a list of phandle + clock specifier pairs
++- target-supply : regulator for SATA target power
++
++"fsl,imx53-ahci", "fsl,imx6q-ahci" required properties:
++- clocks : must contain the sata, sata_ref and ahb clocks
++- clock-names : must contain "ahb" for the ahb clock
+
+ Example:
+ sata@ffe08000 {
+diff -Nur linux-3.14.36/Documentation/devicetree/bindings/clock/imx6q-clock.txt linux-openelec/Documentation/devicetree/bindings/clock/imx6q-clock.txt
+--- linux-3.14.36/Documentation/devicetree/bindings/clock/imx6q-clock.txt 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/Documentation/devicetree/bindings/clock/imx6q-clock.txt 2015-05-06 12:05:42.000000000 -0500
+@@ -89,8 +89,6 @@
+ gpu3d_shader 74
+ ipu1_podf 75
+ ipu2_podf 76
+- ldb_di0_podf 77
+- ldb_di1_podf 78
+ ipu1_di0_pre 79
+ ipu1_di1_pre 80
+ ipu2_di0_pre 81
+@@ -220,6 +218,20 @@
+ lvds2_sel 205
+ lvds1_gate 206
+ lvds2_gate 207
++ gpt_3m 208
++ video_27m 209
++ ldb_di0_div_7 210
++ ldb_di1_div_7 211
++ ldb_di0_div_sel 212
++ ldb_di1_div_sel 213
++ caam_mem 214
++ caam_aclk 215
++ caam_ipg 216
++ epit1 217
++ epit2 218
++ tzasc2 219
++ lvds1_in 220
++ lvds1_out 221
+
+ Examples:
+
+diff -Nur linux-3.14.36/Documentation/devicetree/bindings/dma/fsl-imx-sdma.txt linux-openelec/Documentation/devicetree/bindings/dma/fsl-imx-sdma.txt
+--- linux-3.14.36/Documentation/devicetree/bindings/dma/fsl-imx-sdma.txt 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/Documentation/devicetree/bindings/dma/fsl-imx-sdma.txt 2015-05-06 12:05:42.000000000 -0500
+@@ -47,6 +47,7 @@
+ 20 ASRC
+ 21 ESAI
+ 22 SSI Dual FIFO (needs firmware ver >= 2)
++ 23 HDMI Audio
+
+ The third cell specifies the transfer priority as below.
+
+diff -Nur linux-3.14.36/Documentation/devicetree/bindings/fb/fsl_ipuv3_fb.txt linux-openelec/Documentation/devicetree/bindings/fb/fsl_ipuv3_fb.txt
+--- linux-3.14.36/Documentation/devicetree/bindings/fb/fsl_ipuv3_fb.txt 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/Documentation/devicetree/bindings/fb/fsl_ipuv3_fb.txt 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,146 @@
++* FSL IPUv3 Display/FB
++
++The FSL IPUv3 is Image Processing Unit version 3, a part of video and graphics
++subsystem in an application processor. The goal of the IPU is to provide
++comprehensive support for the flow of data from an image sensor or/and to a
++display device.
++
++Two IPU units are on the imx6q SOC while only one IPU unit on the imx6dl SOC.
++Each IPU unit has two display interfaces.
++
++For LDB/LVDS panel, there are two LVDS channels(LVDS0 and LVDS1) which can
++transfer video data, these two channels can be used as
++split/dual/single/separate mode.
++-split mode means display data from DI0 or DI1 will send to both channels
++ LVDS0+LVDS1.
++-dual mode means display data from DI0 or DI1 will be duplicated on LVDS0
++ and LVDS1, it said, LVDS0 and LVDS1 has the same content.
++-single mode means only work for DI0/DI1->LVDS0 or DI0/DI1->LVDS1.
++-separate mode means you can make DI0/DI1->LVDS0 and DI0/DI1->LVDS1 work
++ at the same time.
++ "ldb=spl0/1" -- split mode on DI0/1
++ "ldb=dul0/1" -- dual mode on DI0/1
++ "ldb=sin0/1" -- single mode on LVDS0/1
++ "ldb=sep0/1" -- separate mode begin from LVDS0/1
++
++Required properties for IPU:
++- bypass_reset :Bypass reset to avoid display channel being.
++ stopped by probe since it may start to work in bootloader: 0 or 1.
++- compatible : should be "fsl,imx6q-ipu".
++- reg : the register address range.
++- interrupts : the error and sync interrupts request.
++- clocks : the clock sources that it depends on.
++- clock-names: the related clock names.
++- resets : IPU reset specifier. See reset.txt and fsl,imx-src.txt in
++ Documentation/devicetree/bindings/reset/ for details.
++
++Required properties for fb:
++- compatible : should be "fsl,mxc_sdc_fb".
++- disp_dev : display device: "ldb", "lcd", "hdmi", "mipi_dsi".
++- mode_str : video mode string: "LDB-XGA" or "LDB-1080P60" for ldb,
++ "CLAA-WVGA" for lcd, "TRULY-WVGA" for TRULY mipi_dsi lcd panel,
++ "1920x1080M@60" for hdmi.
++- default_bpp : default bits per pixel: 8/16/24/32
++- int_clk : use internal clock as pixel clock: 0 or 1
++- late_init : to avoid display channel being re-initialized
++ as we've probably setup the channel in bootloader: 0 or 1
++- interface_pix_fmt : display interface pixel format as below:
++ RGB666 IPU_PIX_FMT_RGB666
++ RGB565 IPU_PIX_FMT_RGB565
++ RGB24 IPU_PIX_FMT_RGB24
++ BGR24 IPU_PIX_FMT_BGR24
++ GBR24 IPU_PIX_FMT_GBR24
++ YUV444 IPU_PIX_FMT_YUV444
++ LVDS666 IPU_PIX_FMT_LVDS666
++ YUYV IPU_PIX_FMT_YUYV
++ UYVY IPU_PIX_FMT_UYVY
++ YVYV IPU_PIX_FMT_YVYU
++ VYUY IPU_PIX_FMT_VYUY
++
++Required properties for display:
++- compatible : should be "fsl,lcd" for lcd panel, "fsl,imx6q-ldb" for ldb
++- reg : the register address range if necessary to have.
++- interrupts : the error and sync interrupts if necessary to have.
++- clocks : the clock sources that it depends on if necessary to have.
++- clock-names: the related clock names if necessary to have.
++- ipu_id : ipu id for the first display device: 0 or 1
++- disp_id : display interface id for the first display interface: 0 or 1
++- default_ifmt : save as above display interface pixel format for lcd
++- pinctrl-names : should be "default"
++- pinctrl-0 : should be pinctrl_ipu1_1 or pinctrl_ipu2_1, which depends on the
++ IPU connected.
++- sec_ipu_id : secondary ipu id for the second display device(ldb only): 0 or 1
++- sec_disp_id : secondary display interface id for the second display
++ device(ldb only): 0 or 1
++- ext_ref : reference resistor select for ldb only: 0 or 1
++- mode : ldb mode as below:
++ spl0 LDB_SPL_DI0
++ spl1 LDB_SPL_DI1
++ dul0 LDB_DUL_DI0
++ dul1 LDB_DUL_DI1
++ sin0 LDB_SIN0
++ sin1 LDB_SIN1
++ sep0 LDB_SEP0
++ sep1 LDB_SEP1
++- gpr : the mux controller for the display engine's display interfaces and the display encoder
++ (only valid for mipi dsi now).
++- disp-power-on-supply : the regulator to control display panel's power.
++ (only valid for mipi dsi now).
++- resets : the gpio pin to reset the display device(only valid for mipi display panel now).
++- lcd_panel : the video mode name for the display device(only valid for mipi display panel now).
++- dev_id : the display engine's identity within the system, which intends to replace ipu_id
++ (only valid for mipi dsi now).
++
++Example for IPU:
++ ipu1: ipu@02400000 {
++ compatible = "fsl,imx6q-ipu";
++ reg = <0x02400000 0x400000>;
++ interrupts = <0 6 0x4 0 5 0x4>;
++ clocks = <&clks 130>, <&clks 131>, <&clks 132>,
++ <&clks 39>, <&clks 40>,
++ <&clks 135>, <&clks 136>;
++ clock-names = "bus", "di0", "di1",
++ "di0_sel", "di1_sel",
++ "ldb_di0", "ldb_di1";
++ resets = <&src 2>;
++ bypass_reset = <0>;
++ };
++
++Example for fb:
++ fb0 {
++ compatible = "fsl,mxc_sdc_fb";
++ disp_dev = "ldb";
++ interface_pix_fmt = "RGB666";
++ mode_str ="LDB-XGA";
++ default_bpp = <16>;
++ int_clk = <0>;
++ late_init = <0>;
++ status = "okay";
++ };
++
++Example for ldb display:
++ ldb@020e0000 {
++ ipu_id = <1>;
++ disp_id = <0>;
++ ext_ref = <1>;
++ mode = "sep0";
++ sec_ipu_id = <1>;
++ sec_disp_id = <1>;
++ status = "okay";
++ };
++
++Example for mipi dsi display:
++ mipi_dsi: mipi@021e0000 {
++ compatible = "fsl,imx6q-mipi-dsi";
++ reg = <0x021e0000 0x4000>;
++ interrupts = <0 102 0x04>;
++ gpr = <&gpr>;
++ clocks = <&clks 138>, <&clks 204>;
++ clock-names = "mipi_pllref_clk", "mipi_cfg_clk";
++ dev_id = <0>;
++ disp_id = <0>;
++ lcd_panel = "TRULY-WVGA";
++ disp-power-on-supply = <&reg_mipi_dsi_pwr_on>
++ resets = <&mipi_dsi_reset>;
++ status = "okay";
++ };
+diff -Nur linux-3.14.36/Documentation/devicetree/bindings/leds/leds-pwm.txt linux-openelec/Documentation/devicetree/bindings/leds/leds-pwm.txt
+--- linux-3.14.36/Documentation/devicetree/bindings/leds/leds-pwm.txt 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/Documentation/devicetree/bindings/leds/leds-pwm.txt 2015-05-06 12:05:42.000000000 -0500
+@@ -13,6 +13,8 @@
+ For the pwms and pwm-names property please refer to:
+ Documentation/devicetree/bindings/pwm/pwm.txt
+ - max-brightness : Maximum brightness possible for the LED
++- active-low : (optional) For PWMs where the LED is wired to supply
++ rather than ground.
+ - label : (optional)
+ see Documentation/devicetree/bindings/leds/common.txt
+ - linux,default-trigger : (optional)
+diff -Nur linux-3.14.36/Documentation/devicetree/bindings/mailbox/mailbox.txt linux-openelec/Documentation/devicetree/bindings/mailbox/mailbox.txt
+--- linux-3.14.36/Documentation/devicetree/bindings/mailbox/mailbox.txt 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/Documentation/devicetree/bindings/mailbox/mailbox.txt 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,33 @@
++* Generic Mailbox Controller and client driver bindings
++
++Generic binding to provide a way for Mailbox controller drivers to
++assign appropriate mailbox channel to client drivers.
++
++* Mailbox Controller
++
++Required property:
++- #mbox-cells: Must be at least 1. Number of cells in a mailbox
++ specifier.
++
++Example:
++ mailbox: mailbox {
++ ...
++ #mbox-cells = <1>;
++ };
++
++
++* Mailbox Client
++
++Required property:
++- mbox: List of phandle and mailbox channel specifier.
++
++- mbox-names: List of identifier strings for each mailbox channel
++ required by the client.
++
++Example:
++ pwr_cntrl: power {
++ ...
++ mbox-names = "pwr-ctrl", "rpc";
++ mbox = <&mailbox 0
++ &mailbox 1>;
++ };
+diff -Nur linux-3.14.36/Documentation/devicetree/bindings/mlb/mlb150.txt linux-openelec/Documentation/devicetree/bindings/mlb/mlb150.txt
+--- linux-3.14.36/Documentation/devicetree/bindings/mlb/mlb150.txt 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/Documentation/devicetree/bindings/mlb/mlb150.txt 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,22 @@
++* Freescale Media Local Bus Host Controller (MLB) for i.MX6Q/DL
++
++The Media Local Bus Host Controller on Freescale i.MX family
++provides an interface for MOST network.
++
++Required properties:
++- compatible : Should be "fsl,<chip>-mlb150"
++- reg : Should contain mlb registers location and length
++- interrupts : Should contain mlb interrupt
++- clocks: Should contain the mlb clock sources
++- clock-names: Should be the names of mlb clock sources
++- iram : phandle pointing to the SRAM device node
++
++Examples:
++mlb@0218c000 {
++ compatible = "fsl,imx6q-mlb150";
++ reg = <0x0218c000 0x4000>;
++ interrupts = <0 53 0x04 0 117 0x04 0 126 0x04>;
++ clocks = <&clks 139>, <&clks 175>;
++ clock-names = "mlb", "pll8_mlb";
++ iram = <&ocram>;
++};
+diff -Nur linux-3.14.36/Documentation/devicetree/bindings/mmc/mmc.txt linux-openelec/Documentation/devicetree/bindings/mmc/mmc.txt
+--- linux-3.14.36/Documentation/devicetree/bindings/mmc/mmc.txt 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/Documentation/devicetree/bindings/mmc/mmc.txt 2015-05-06 12:05:42.000000000 -0500
+@@ -5,6 +5,8 @@
+ Interpreted by the OF core:
+ - reg: Registers location and length.
+ - interrupts: Interrupts used by the MMC controller.
++- clocks: Clocks needed for the host controller, if any.
++- clock-names: Goes with clocks above.
+
+ Card detection:
+ If no property below is supplied, host native card detect is used.
+@@ -30,6 +32,15 @@
+ - cap-sdio-irq: enable SDIO IRQ signalling on this interface
+ - full-pwr-cycle: full power cycle of the card is supported
+
++Card power and reset control:
++The following properties can be specified for cases where the MMC
++peripheral needs additional reset, regulator and clock lines. It is for
++example common for WiFi/BT adapters to have these separate from the main
++MMC bus:
++ - card-reset-gpios: Specify GPIOs for card reset (reset active low)
++ - card-external-vcc-supply: Regulator to drive (independent) card VCC
++ - clock with name "card_ext_clock": External clock provided to the card
++
+ *NOTE* on CD and WP polarity. To use common for all SD/MMC host controllers line
+ polarity properties, we have to fix the meaning of the "normal" and "inverted"
+ line levels. We choose to follow the SDHCI standard, which specifies both those
+diff -Nur linux-3.14.36/Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt linux-openelec/Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt
+--- linux-3.14.36/Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt 2015-05-06 12:05:42.000000000 -0500
+@@ -71,6 +71,13 @@
+ name for integer state ID 0, list entry 1 for state ID 1, and
+ so on.
+
++pinctrl-assert-gpios:
++ List of phandles, each pointing at a GPIO which is used by some
++ board design to steer pins between two peripherals on the board.
++ It plays like a board level pin multiplexer to choose different
++ functions for given pins by pulling up/down the GPIOs. See
++ bindings/gpio/gpio.txt for details of how to specify GPIO.
++
+ For example:
+
+ /* For a client device requiring named states */
+diff -Nur linux-3.14.36/Documentation/devicetree/bindings/reset/gpio-reset.txt linux-openelec/Documentation/devicetree/bindings/reset/gpio-reset.txt
+--- linux-3.14.36/Documentation/devicetree/bindings/reset/gpio-reset.txt 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/Documentation/devicetree/bindings/reset/gpio-reset.txt 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,35 @@
++GPIO reset controller
++=====================
++
++A GPIO reset controller controls a single GPIO that is connected to the reset
++pin of a peripheral IC. Please also refer to reset.txt in this directory for
++common reset controller binding usage.
++
++Required properties:
++- compatible: Should be "gpio-reset"
++- reset-gpios: A gpio used as reset line. The gpio specifier for this property
++ depends on the gpio controller that provides the gpio.
++- #reset-cells: 0, see below
++
++Optional properties:
++- reset-delay-us: delay in microseconds. The gpio reset line will be asserted for
++ this duration to reset.
++- initially-in-reset: boolean. If not set, the initial state should be a
++ deasserted reset line. If this property exists, the
++ reset line should be kept in reset.
++
++example:
++
++sii902x_reset: gpio-reset {
++ compatible = "gpio-reset";
++ reset-gpios = <&gpio5 0 GPIO_ACTIVE_LOW>;
++ reset-delay-us = <10000>;
++ initially-in-reset;
++ #reset-cells = <0>;
++};
++
++/* Device with nRESET pin connected to GPIO5_0 */
++sii902x@39 {
++ /* ... */
++ resets = <&sii902x_reset>; /* active-low GPIO5_0, 10 ms delay */
++};
+diff -Nur linux-3.14.36/Documentation/devicetree/bindings/sound/cs42888.txt linux-openelec/Documentation/devicetree/bindings/sound/cs42888.txt
+--- linux-3.14.36/Documentation/devicetree/bindings/sound/cs42888.txt 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/Documentation/devicetree/bindings/sound/cs42888.txt 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,29 @@
++CS42888 audio CODEC
++
++This device supports I2C only.
++
++Required properties:
++
++ - compatible: "cirrus,cs42888"
++ - reg: the I2C address of the device.
++ - clocks: Phandle to the clock node.
++ - clock-names: Contains name for each entry in clocks.
++ "codec_osc" : the external oscillator.
++ "esai" : the hckt clock from esai.
++ - <name>-supply: Phandle to the regulator <name>.
++
++Note: cs42888 needs a regulators node and a clocks node.
++
++Example:
++In this case, the clock is external oscillator.
++
++codec: cs42888@48 {
++ compatible = "cirrus,cs42888";
++ reg = <0x048>;
++ clocks = <&codec_osc 0>;
++ clock-names = "codec_osc";
++ VA-supply = <&reg_audio>;
++ VD-supply = <&reg_audio>;
++ VLS-supply = <&reg_audio>;
++ VLC-supply = <&reg_audio>;
++};
+diff -Nur linux-3.14.36/Documentation/devicetree/bindings/sound/fsl-asrc-p2p.txt linux-openelec/Documentation/devicetree/bindings/sound/fsl-asrc-p2p.txt
+--- linux-3.14.36/Documentation/devicetree/bindings/sound/fsl-asrc-p2p.txt 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/Documentation/devicetree/bindings/sound/fsl-asrc-p2p.txt 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,23 @@
++* Freescale Asynchronous Sample Rate Converter (ASRC)
++
++This document is for asrc p2p node. p2p is one of asrc mode. asrc p2p depend on
++MXC_ASRC.
++
++Required properties:
++ - compatible: Should be "fsl,<chip>-asrc-p2p".
++ - fsl,output-rate: the output rate of asrc p2p. which can be <32000> to <192000>,
++ - fsl,output-width: the output width of asrc p2p. which can be <16>, <24>.
++ - fsl,asrc-dma-rx-events: The rx dma event of the asrc, <a b c> corresponding
++ to 3 pair of asrc.
++ - fsl,asrc-dma-tx-events: The tx dma event of the esai, <a b c> corresponding
++ to 3 pair of asrc.
++
++Example:
++asrc_p2p: asrc_p2p {
++ compatible = "fsl,imx6q-asrc-p2p";
++ fsl,output-rate = <48000>;
++ fsl,output-width = <16>;
++ fsl,asrc-dma-rx-events = <17 18 19>;
++ fsl,asrc-dma-tx-events = <20 21 22>;
++ status = "okay";
++};
+diff -Nur linux-3.14.36/Documentation/devicetree/bindings/sound/imx-audio-cs42888.txt linux-openelec/Documentation/devicetree/bindings/sound/imx-audio-cs42888.txt
+--- linux-3.14.36/Documentation/devicetree/bindings/sound/imx-audio-cs42888.txt 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/Documentation/devicetree/bindings/sound/imx-audio-cs42888.txt 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,25 @@
++Freescale i.MX audio complex with CS42888 codec
++
++Required properties:
++- compatible : "fsl,imx-audio-cs42888"
++- model : The user-visible name of this sound complex
++- esai-controller : The phandle of the i.MX SSI controller
++- audio-codec : The phandle of the CS42888 audio codec
++
++Optional properties:
++- asrc-controller : The phandle of the i.MX ASRC controller
++- audio-routing : A list of the connections between audio components.
++ Each entry is a pair of strings, the first being the connection's sink,
++ the second being the connection's source. Valid names could be power
++ supplies, CS42888 pins, and the jacks on the board:
++
++Example:
++
++sound {
++ compatible = "fsl,imx6q-sabresd-wm8962",
++ "fsl,imx-audio-wm8962";
++ model = "cs42888-audio";
++ esai-controller = <&esai>;
++ asrc-controller = <&asrc_p2p>;
++ audio-codec = <&codec>;
++};
+diff -Nur linux-3.14.36/Documentation/devicetree/bindings/sound/imx-audio-wm8962.txt linux-openelec/Documentation/devicetree/bindings/sound/imx-audio-wm8962.txt
+--- linux-3.14.36/Documentation/devicetree/bindings/sound/imx-audio-wm8962.txt 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/Documentation/devicetree/bindings/sound/imx-audio-wm8962.txt 2015-05-06 12:05:42.000000000 -0500
+@@ -24,6 +24,12 @@
+ Note: The AUDMUX port numbering should start at 1, which is consistent with
+ hardware manual.
+
++Optional properties:
++- hp-det-gpios : The gpio pin to detect plug in/out event that happens to
++ Headphone jack.
++- mic-det-gpios: The gpio pin to detect plug in/out event that happens to
++ Microphone jack.
++
+ Example:
+
+ sound {
+@@ -43,4 +49,6 @@
+ "DMICDAT", "DMIC";
+ mux-int-port = <2>;
+ mux-ext-port = <3>;
++ hp-det-gpios = <&gpio7 8 1>;
++ mic-det-gpios = <&gpio1 9 1>;
+ };
+diff -Nur linux-3.14.36/Documentation/devicetree/bindings/sound/wm8962.txt linux-openelec/Documentation/devicetree/bindings/sound/wm8962.txt
+--- linux-3.14.36/Documentation/devicetree/bindings/sound/wm8962.txt 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/Documentation/devicetree/bindings/sound/wm8962.txt 2015-05-06 12:05:42.000000000 -0500
+@@ -13,6 +13,14 @@
+ of R51 (Class D Control 2) gets set, indicating that the speaker is
+ in mono mode.
+
++ - amic-mono: This is a boolean property. If present, indicating that the
++ analog micphone is hardware mono input, the driver would enable monomix
++ for it.
++
++ - dmic-mono: This is a boolean property. If present, indicating that the
++ digital micphone is hardware mono input, the driver would enable monomix
++ for it.
++
+ - mic-cfg : Default register value for R48 (Additional Control 4).
+ If absent, the default should be the register default.
+
+diff -Nur linux-3.14.36/Documentation/devicetree/bindings/usb/ci-hdrc-imx.txt linux-openelec/Documentation/devicetree/bindings/usb/ci-hdrc-imx.txt
+--- linux-3.14.36/Documentation/devicetree/bindings/usb/ci-hdrc-imx.txt 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/Documentation/devicetree/bindings/usb/ci-hdrc-imx.txt 2015-07-24 18:03:30.212842002 -0500
+@@ -18,6 +18,8 @@
+ - vbus-supply: regulator for vbus
+ - disable-over-current: disable over current detect
+ - external-vbus-divider: enables off-chip resistor divider for Vbus
++- clocks: phandle to the clock that drives the USB hub
++- clock-names: must be "phy"
+
+ Examples:
+ usb@02184000 { /* USB OTG */
+diff -Nur linux-3.14.36/Documentation/devicetree/bindings/usb/mxs-phy.txt linux-openelec/Documentation/devicetree/bindings/usb/mxs-phy.txt
+--- linux-3.14.36/Documentation/devicetree/bindings/usb/mxs-phy.txt 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/Documentation/devicetree/bindings/usb/mxs-phy.txt 2015-05-06 12:05:42.000000000 -0500
+@@ -1,13 +1,16 @@
+ * Freescale MXS USB Phy Device
+
+ Required properties:
+-- compatible: Should be "fsl,imx23-usbphy"
++- compatible: "fsl,imx23-usbphy" for imx23 and imx28, "fsl,imx6q-usbphy"
++for imx6dq and imx6dl, "fsl,imx6sl-usbphy" for imx6sl
+ - reg: Should contain registers location and length
+ - interrupts: Should contain phy interrupt
++- fsl,anatop: phandle for anatop register, it is only for imx6 SoC series
+
+ Example:
+ usbphy1: usbphy@020c9000 {
+ compatible = "fsl,imx6q-usbphy", "fsl,imx23-usbphy";
+ reg = <0x020c9000 0x1000>;
+ interrupts = <0 44 0x04>;
++ fsl,anatop = <&anatop>;
+ };
+diff -Nur linux-3.14.36/Documentation/devicetree/bindings/video/fsl,csi-v4l2-capture.txt linux-openelec/Documentation/devicetree/bindings/video/fsl,csi-v4l2-capture.txt
+--- linux-3.14.36/Documentation/devicetree/bindings/video/fsl,csi-v4l2-capture.txt 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/Documentation/devicetree/bindings/video/fsl,csi-v4l2-capture.txt 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,61 @@
++* Freescale CMOS Sensor Interface (CSI) V4L2 Capture
++
++Required properties for CSI
++- compatible: "fsl,<soc>-csi". Supported chip includes imx6sl
++- reg: Address and length of the register set for CSI
++- interrupts: Should contain CSI interrupts
++
++Required properties for v4l2_capture
++- compatible: should be "fsl,<soc>-csi-v4l2", supported socs include imx6sl
++
++Required properties for sensor
++- compatible: "<vendor>,<sensor>"
++ please check the supported sensor in the Supported Sensor fields.
++- reg: sensor I2C slave address
++- pinctrl-names: should be "default" for parallel sensor
++- pinctrl-0: should depend on the connection between sensor and i.MX
++ connection between sensor and i.MX could be only legacy parallel on i.MX6SL
++- clocks: should be the clock source provided to sensor.
++- clock-names: should be "csi_mclk"
++- AVDD-supply: set according to the board.
++- DVDD-supply: set according to the board.
++- pwn-gpios: set according to the board.
++- rst-gpios: set according to the board.
++- csi_id: csi id for v4l2 capture device
++ should be 0 for i.MX6SL
++- mclk: should the value of mclk clock send out the sensor. unit is Hz.
++- mclk_source: should be 0 for i.MX6SL
++
++Supported Sensor
++- ovti, ov5640
++
++Example for CSI:
++ csi: csi@020e4000 {
++ compatible = "fsl,imx6sl-csi";
++ reg = <0x020e4000 0x4000>;
++ interrupts = <0 7 0x04>;
++ status = "disabled";
++ };
++
++Examples for v4l2_capture:
++ csi_v4l2_cap {
++ compatible = "fsl,imx6q-v4l2-capture";
++ status = "okay";
++ };
++
++Examples for sensors:
++ ov564x: ov564x@3c {
++ compatible = "ovti,ov564x";
++ reg = <0x3c>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_csi_0>;
++ clocks = <&clks IMX6SL_CLK_CSI>;
++ clock-names = "csi_mclk";
++ AVDD-supply = <&vgen6_reg>; /* 2.8v */
++ DVDD-supply = <&vgen2_reg>; /* 1.5v*/
++ pwn-gpios = <&gpio1 25 1>;
++ rst-gpios = <&gpio1 26 0>;
++ csi_id = <0>;
++ mclk = <24000000>;
++ mclk_source = <0>;
++ };
+diff -Nur linux-3.14.36/Documentation/devicetree/bindings/video/fsl,mipi-csi2.txt linux-openelec/Documentation/devicetree/bindings/video/fsl,mipi-csi2.txt
+--- linux-3.14.36/Documentation/devicetree/bindings/video/fsl,mipi-csi2.txt 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/Documentation/devicetree/bindings/video/fsl,mipi-csi2.txt 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,42 @@
++* Freescale MIPI CSI2 Controller for i.MX6DQ/i.MX6SDL
++
++Required properties for mipi csi2 controller:
++- compatible: should be "fsl,imx6q-mipi-csi2"
++- reg: <base addr, range> contains mipi csi2 register base address and range
++- interrupts: <type num flag> where type is a interrupt type, num is the
++ interrupt number and flag is a field that level/trigger information for
++ the interrupt.
++- clocks: the clock sources that mipi csi2 depends on.
++- clock-names: the name is related to the clock source one by one.
++- status: should be set to "disable".
++
++Required properties for mipi csi2 on specified board:
++- ipu_id: ipu id which mipi csi2 connected to.
++ should be 0 or 1 for i.MX6DQ; should be 0 for i.MX6SDL
++- csi_id: csi id which mipi csi2 connected to.
++ should be 0 or 1 for i.MX6DQ/i.MX6SDL
++- v_channel: virtual channel which send to MIPI CSI2 controller
++ should keep consistent with the input MIPI signal.
++- lanes: data lanes of input MIPI signal. The maximum data lanes is 4.
++ should keep consistent with the input MIPI signal.
++- status: should be set to "okay".
++
++Examples:
++for SOC imx6qdl.dtsi:
++ mipi_csi@021dc000 {
++ compatible = "fsl,imx6q-mipi-csi2";
++ reg = <0x021dc000 0x4000>;
++ interrupts = <0 100 0x04>, <0 101 0x04>;
++ clocks = <&clks 138>, <&clks 53>, <&clks 204>;
++ clock-names = "dphy_clk", "pixel_clk", "cfg_clk";
++ status = "disabled";
++ };
++
++for board imx6qdl-sabresd.dtsi:
++ mipi_csi@021dc000 {
++ status = "okay";
++ ipu_id = <0>;
++ csi_id = <1>;
++ v_channel = <0>;
++ lanes = <2>;
++ };
+diff -Nur linux-3.14.36/Documentation/devicetree/bindings/video/fsl,pxp.txt linux-openelec/Documentation/devicetree/bindings/video/fsl,pxp.txt
+--- linux-3.14.36/Documentation/devicetree/bindings/video/fsl,pxp.txt 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/Documentation/devicetree/bindings/video/fsl,pxp.txt 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,30 @@
++* Freescale PxP Controller for i.MX6DL, i.MX6SL
++
++Required properties for PxP controller:
++- compatible: should be "fsl,<soc>-pxp-dma"
++- reg: <base addr, range> contains pxp register base address and range
++- interrupts: <type num flag> where type is an interrupt type, num is the
++ interrupt number and flag is a field that level/trigger information for
++ the interrupt.
++- clocks: the clock sources that pxp depends on.
++- clock-names: the name is related to the clock source
++
++Required properties for pxp on specified board:
++- status: should be set to "okay" if want to use PxP
++
++Examples:
++for SOC imx6dl.dtsi:
++ pxp@020f0000 {
++ compatible = "fsl,imx6dl-pxp-dma";
++ reg = <0x020f0000 0x4000>;
++ interrupts = <0 98 0x04>;
++ clocks = <&clks 133>;
++ clock-names = "pxp-axi";
++ status = "disabled";
++ };
++
++
++for board imx6dl-sabresd.dts:
++ &pxp {
++ status = "okay";
++ };
+diff -Nur linux-3.14.36/Documentation/devicetree/bindings/video/fsl,v4l2-capture.txt linux-openelec/Documentation/devicetree/bindings/video/fsl,v4l2-capture.txt
+--- linux-3.14.36/Documentation/devicetree/bindings/video/fsl,v4l2-capture.txt 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/Documentation/devicetree/bindings/video/fsl,v4l2-capture.txt 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,102 @@
++* Freescale V4L2 Capture for i.MX6DQ/i.MX6SDL
++
++Required board properties for IPUv3 capture:
++- clocks: should include the clock provided by i.MX6 to sensor
++- clock-names: sensor clock's name should be "ipux_csiy"
++ x should be 1 or 2 for i.MX6DQ; should be 1 for i.MX6SDL
++ y is 0 or 1 for i.MX6DQ/i.MX6SDL
++Note: other detailed information for IPUv3, please refer to
++Documentation/devicetree/bindings/fb/fsl_ipuv3_fb.txt
++
++Required properties for v4l2_capture
++- compatible: should be "fsl,imx6q-v4l2-capture"
++- ipu_id: ipu id for v4l2 capture device
++ should be 0 or 1 for i.MX6DQ; should be 0 for i.MX6SDL
++- csi_id: csi id for v4l2 capture device
++ should be 0 or 1 for i.MX6DQ/i.MX6SDL
++- mclk_source: should be 0 or 1. two mclk sources at most now
++- status: should be set to "okay" to enable this device
++
++Required properties for sensor
++- compatible: "<vendor>,<sensor>"
++ please check the supported sensor in the Supported Sensor fields.
++- reg: sensor I2C slave address
++- pinctrl-names: should be "default" for parallel sensor
++- pinctrl-0: should depend on the connection between sensor and i.MX
++ connection between sensor and i.MX could be MIPI-CSI2 or legacy parallel
++- clocks: should be the clock source provided to sensor.
++- clock-names: should be "csi_mclk"
++- DOVDD-supply: set according to the board.
++- AVDD-supply: set according to the board.
++- DVDD-supply: set according to the board.
++- pwn-gpios: set according to the board.
++- rst-gpios: set according to the board.
++- csi_id: csi id for v4l2 capture device
++ should be 0 or 1 for i.MX6DQ/i.MX6SDL.
++- mclk: should the value of mclk clock send out the sensor. unit is Hz.
++- mclk_source: should be 0 or 1 and should be the same as the setting in
++ v4l2_capture.
++- cvbs: 1 for CVBS input, 0 YPbPr input. This property is only needed for
++ adv7180 tv decoder.
++
++Supported Sensor
++- ov5640
++- ov5642
++- ov5640_mipi
++- adv7180
++
++
++Example for IPUv3 including capture settings on imx6q-sabresd.dts:
++ ipu1: ipu@02400000 { /* IPU1 */
++ compatible = "fsl,imx6q-ipuv3";
++ reg = <0x02400000 0x400000>;
++ interrupts = <0 5 0x04>, < 0 6 0x04>;
++ clocks = <&clks 130>, <&clks 131>, <&clks 132>, <&clks 39>, <&clks 40>, <&clks 169>;
++ clock-names = "ipu1", "ipu1_di0", "ipu1_di1", "ipu1_di0_sel", "ipu1_di1_sel", "ipu1_csi0";
++ status = "disabled";
++ };
++
++Examples for v4l2_capture:
++ v4l2_cap {
++ compatible = "fsl,imx6q-v4l2-capture";
++ ipu_id = <0>;
++ csi_id = <0>;
++ mclk_source = <0>;
++ status = "okay";
++ };
++
++Examples for sensors:
++ ov5642: ov5642@3c {
++ compatible = "ovti,ov5642";
++ reg = <0x3c>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_ipu1_2>;
++ clocks = <&clks 201>;
++ clock-names = "csi_mclk";
++ DOVDD-supply = <&vgen4_reg>; /* 1.8v */
++ AVDD-supply = <&vgen3_reg>; /* 2.8v, on rev C board is VGEN3 */
++ DVDD-supply = <&vgen2_reg>; /* 1.5v*/
++ pwn-gpios = <&gpio1 16 1>; /* active low: SD1_DAT0 */
++ rst-gpios = <&gpio1 17 0>; /* active high: SD1_DAT1 */
++ csi_id = <0>;
++ mclk = <24000000>;
++ mclk_source = <0>;
++ };
++
++ adv7180: adv7180@21 {
++ compatible = "adv,adv7180";
++ reg = <0x21>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_ipu1_3>;
++ clocks = <&clks 201>;
++ clock-names = "csi_mclk";
++ DOVDD-supply = <&reg_3p3v>; /* 3.3v, enabled via 2.8 VGEN6 */
++ AVDD-supply = <&reg_3p3v>; /* 1.8v */
++ DVDD-supply = <&reg_3p3v>; /* 1.8v */
++ PVDD-supply = <&reg_3p3v>; /* 1.8v */
++ pwn-gpios = <&max7310_b 2 0>;
++ csi_id = <0>;
++ mclk = <24000000>;
++ mclk_source = <0>;
++ cvbs = <1>;
++ };
+diff -Nur linux-3.14.36/Documentation/devicetree/bindings/video/mxc_hdmi_video.txt linux-openelec/Documentation/devicetree/bindings/video/mxc_hdmi_video.txt
+--- linux-3.14.36/Documentation/devicetree/bindings/video/mxc_hdmi_video.txt 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/Documentation/devicetree/bindings/video/mxc_hdmi_video.txt 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,20 @@
++Device-Tree bindings for hdmi video driver
++
++Required properties:
++- compatible: value should be "fsl,imx6q-hdmi-video".
++- fsl,hdcp: define the property in dts, hdmi driver will initalize for hdcp,
++ otherwise hdcp function will not supported.
++- fsl,phy_reg_vlev: hdmi phy register,Voltage Level Control Register offset 0x0e,
++ adjust hdmi phy signal voltage level.
++- fsl,phy_reg_cksymtx: hdmi phy register, clock symbol and transmitter control
++ register offset 0x09, adjust hdmi signal pre-emphasis.
++
++Example:
++
++ hdmi_video {
++ compatible = "fsl,imx6q-hdmi-video";
++ fsl,hdcp;
++ fsl,phy_reg_vlev = <0x0294>;
++ fsl,phy_reg_cksymtx = <0x800d>;
++ };
++
+diff -Nur linux-3.14.36/Documentation/filesystems/hfsplus.txt linux-openelec/Documentation/filesystems/hfsplus.txt
+--- linux-3.14.36/Documentation/filesystems/hfsplus.txt 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/Documentation/filesystems/hfsplus.txt 2015-05-06 12:05:42.000000000 -0500
+@@ -56,4 +56,4 @@
+
+ kernel source: <file:fs/hfsplus>
+
+-Apple Technote 1150 http://developer.apple.com/technotes/tn/tn1150.html
++Apple Technote 1150 https://developer.apple.com/legacy/library/technotes/tn/tn1150.html
+diff -Nur linux-3.14.36/Documentation/kernel-parameters.txt linux-openelec/Documentation/kernel-parameters.txt
+--- linux-3.14.36/Documentation/kernel-parameters.txt 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/Documentation/kernel-parameters.txt 2015-07-24 18:03:29.364842002 -0500
+@@ -603,8 +603,11 @@
+ Also note the kernel might malfunction if you disable
+ some critical bits.
+
+- cma=nn[MG] [ARM,KNL]
+- Sets the size of kernel global memory area for contiguous
++ cma=nn[MG]@[start[MG][-end[MG]]]
++ [ARM,X86,KNL]
++ Sets the size of kernel global memory area for
++ contiguous memory allocations and optionally the
++ placement constraint by the physical address range of
+ memory allocations. For more information, see
+ include/linux/dma-contiguous.h
+
+diff -Nur linux-3.14.36/Documentation/kernel-parameters.txt.orig linux-openelec/Documentation/kernel-parameters.txt.orig
+--- linux-3.14.36/Documentation/kernel-parameters.txt.orig 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/Documentation/kernel-parameters.txt.orig 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,3610 @@
++ Kernel Parameters
++ ~~~~~~~~~~~~~~~~~
++
++The following is a consolidated list of the kernel parameters as implemented
++(mostly) by the __setup() macro and sorted into English Dictionary order
++(defined as ignoring all punctuation and sorting digits before letters in a
++case insensitive manner), and with descriptions where known.
++
++Module parameters for loadable modules are specified only as the
++parameter name with optional '=' and value as appropriate, such as:
++
++ modprobe usbcore blinkenlights=1
++
++Module parameters for modules that are built into the kernel image
++are specified on the kernel command line with the module name plus
++'.' plus parameter name, with '=' and value if appropriate, such as:
++
++ usbcore.blinkenlights=1
++
++Hyphens (dashes) and underscores are equivalent in parameter names, so
++ log_buf_len=1M print-fatal-signals=1
++can also be entered as
++ log-buf-len=1M print_fatal_signals=1
++
++
++This document may not be entirely up to date and comprehensive. The command
++"modinfo -p ${modulename}" shows a current list of all parameters of a loadable
++module. Loadable modules, after being loaded into the running kernel, also
++reveal their parameters in /sys/module/${modulename}/parameters/. Some of these
++parameters may be changed at runtime by the command
++"echo -n ${value} > /sys/module/${modulename}/parameters/${parm}".
++
++The parameters listed below are only valid if certain kernel build options were
++enabled and if respective hardware is present. The text in square brackets at
++the beginning of each description states the restrictions within which a
++parameter is applicable:
++
++ ACPI ACPI support is enabled.
++ AGP AGP (Accelerated Graphics Port) is enabled.
++ ALSA ALSA sound support is enabled.
++ APIC APIC support is enabled.
++ APM Advanced Power Management support is enabled.
++ ARM ARM architecture is enabled.
++ AVR32 AVR32 architecture is enabled.
++ AX25 Appropriate AX.25 support is enabled.
++ BLACKFIN Blackfin architecture is enabled.
++ CLK Common clock infrastructure is enabled.
++ CMA Contiguous Memory Area support is enabled.
++ DRM Direct Rendering Management support is enabled.
++ DYNAMIC_DEBUG Build in debug messages and enable them at runtime
++ EDD BIOS Enhanced Disk Drive Services (EDD) is enabled
++ EFI EFI Partitioning (GPT) is enabled
++ EIDE EIDE/ATAPI support is enabled.
++ EVM Extended Verification Module
++ FB The frame buffer device is enabled.
++ FTRACE Function tracing enabled.
++ GCOV GCOV profiling is enabled.
++ HW Appropriate hardware is enabled.
++ IA-64 IA-64 architecture is enabled.
++ IMA Integrity measurement architecture is enabled.
++ IOSCHED More than one I/O scheduler is enabled.
++ IP_PNP IP DHCP, BOOTP, or RARP is enabled.
++ IPV6 IPv6 support is enabled.
++ ISAPNP ISA PnP code is enabled.
++ ISDN Appropriate ISDN support is enabled.
++ JOY Appropriate joystick support is enabled.
++ KGDB Kernel debugger support is enabled.
++ KVM Kernel Virtual Machine support is enabled.
++ LIBATA Libata driver is enabled
++ LP Printer support is enabled.
++ LOOP Loopback device support is enabled.
++ M68k M68k architecture is enabled.
++ These options have more detailed description inside of
++ Documentation/m68k/kernel-options.txt.
++ MDA MDA console support is enabled.
++ MIPS MIPS architecture is enabled.
++ MOUSE Appropriate mouse support is enabled.
++ MSI Message Signaled Interrupts (PCI).
++ MTD MTD (Memory Technology Device) support is enabled.
++ NET Appropriate network support is enabled.
++ NUMA NUMA support is enabled.
++ NFS Appropriate NFS support is enabled.
++ OSS OSS sound support is enabled.
++ PV_OPS A paravirtualized kernel is enabled.
++ PARIDE The ParIDE (parallel port IDE) subsystem is enabled.
++ PARISC The PA-RISC architecture is enabled.
++ PCI PCI bus support is enabled.
++ PCIE PCI Express support is enabled.
++ PCMCIA The PCMCIA subsystem is enabled.
++ PNP Plug & Play support is enabled.
++ PPC PowerPC architecture is enabled.
++ PPT Parallel port support is enabled.
++ PS2 Appropriate PS/2 support is enabled.
++ RAM RAM disk support is enabled.
++ S390 S390 architecture is enabled.
++ SCSI Appropriate SCSI support is enabled.
++ A lot of drivers have their options described inside
++ the Documentation/scsi/ sub-directory.
++ SECURITY Different security models are enabled.
++ SELINUX SELinux support is enabled.
++ APPARMOR AppArmor support is enabled.
++ SERIAL Serial support is enabled.
++ SH SuperH architecture is enabled.
++ SMP The kernel is an SMP kernel.
++ SPARC Sparc architecture is enabled.
++ SWSUSP Software suspend (hibernation) is enabled.
++ SUSPEND System suspend states are enabled.
++ TPM TPM drivers are enabled.
++ TS Appropriate touchscreen support is enabled.
++ UMS USB Mass Storage support is enabled.
++ USB USB support is enabled.
++ USBHID USB Human Interface Device support is enabled.
++ V4L Video For Linux support is enabled.
++ VMMIO Driver for memory mapped virtio devices is enabled.
++ VGA The VGA console has been enabled.
++ VT Virtual terminal support is enabled.
++ WDT Watchdog support is enabled.
++ XT IBM PC/XT MFM hard disk support is enabled.
++ X86-32 X86-32, aka i386 architecture is enabled.
++ X86-64 X86-64 architecture is enabled.
++ More X86-64 boot options can be found in
++ Documentation/x86/x86_64/boot-options.txt .
++ X86 Either 32-bit or 64-bit x86 (same as X86-32+X86-64)
++ XEN Xen support is enabled
++
++In addition, the following text indicates that the option:
++
++ BUGS= Relates to possible processor bugs on the said processor.
++ KNL Is a kernel start-up parameter.
++ BOOT Is a boot loader parameter.
++
++Parameters denoted with BOOT are actually interpreted by the boot
++loader, and have no meaning to the kernel directly.
++Do not modify the syntax of boot loader parameters without extreme
++need or coordination with <Documentation/x86/boot.txt>.
++
++There are also arch-specific kernel-parameters not documented here.
++See for example <Documentation/x86/x86_64/boot-options.txt>.
++
++Note that ALL kernel parameters listed below are CASE SENSITIVE, and that
++a trailing = on the name of any parameter states that that parameter will
++be entered as an environment variable, whereas its absence indicates that
++it will appear as a kernel argument readable via /proc/cmdline by programs
++running once the system is up.
++
++The number of kernel parameters is not limited, but the length of the
++complete command line (parameters including spaces etc.) is limited to
++a fixed number of characters. This limit depends on the architecture
++and is between 256 and 4096 characters. It is defined in the file
++./include/asm/setup.h as COMMAND_LINE_SIZE.
++
++Finally, the [KMG] suffix is commonly described after a number of kernel
++parameter values. These 'K', 'M', and 'G' letters represent the _binary_
++multipliers 'Kilo', 'Mega', and 'Giga', equalling 2^10, 2^20, and 2^30
++bytes respectively. Such letter suffixes can also be entirely omitted.
++
++
++ acpi= [HW,ACPI,X86]
++ Advanced Configuration and Power Interface
++ Format: { force | off | strict | noirq | rsdt }
++ force -- enable ACPI if default was off
++ off -- disable ACPI if default was on
++ noirq -- do not use ACPI for IRQ routing
++ strict -- Be less tolerant of platforms that are not
++ strictly ACPI specification compliant.
++ rsdt -- prefer RSDT over (default) XSDT
++ copy_dsdt -- copy DSDT to memory
++
++ See also Documentation/power/runtime_pm.txt, pci=noacpi
++
++ acpi_rsdp= [ACPI,EFI,KEXEC]
++ Pass the RSDP address to the kernel, mostly used
++ on machines running EFI runtime service to boot the
++ second kernel for kdump.
++
++ acpi_apic_instance= [ACPI, IOAPIC]
++ Format: <int>
++ 2: use 2nd APIC table, if available
++ 1,0: use 1st APIC table
++ default: 0
++
++ acpi_backlight= [HW,ACPI]
++ acpi_backlight=vendor
++ acpi_backlight=video
++ If set to vendor, prefer vendor specific driver
++ (e.g. thinkpad_acpi, sony_acpi, etc.) instead
++ of the ACPI video.ko driver.
++
++ acpi.debug_layer= [HW,ACPI,ACPI_DEBUG]
++ acpi.debug_level= [HW,ACPI,ACPI_DEBUG]
++ Format: <int>
++ CONFIG_ACPI_DEBUG must be enabled to produce any ACPI
++ debug output. Bits in debug_layer correspond to a
++ _COMPONENT in an ACPI source file, e.g.,
++ #define _COMPONENT ACPI_PCI_COMPONENT
++ Bits in debug_level correspond to a level in
++ ACPI_DEBUG_PRINT statements, e.g.,
++ ACPI_DEBUG_PRINT((ACPI_DB_INFO, ...
++ The debug_level mask defaults to "info". See
++ Documentation/acpi/debug.txt for more information about
++ debug layers and levels.
++
++ Enable processor driver info messages:
++ acpi.debug_layer=0x20000000
++ Enable PCI/PCI interrupt routing info messages:
++ acpi.debug_layer=0x400000
++ Enable AML "Debug" output, i.e., stores to the Debug
++ object while interpreting AML:
++ acpi.debug_layer=0xffffffff acpi.debug_level=0x2
++ Enable all messages related to ACPI hardware:
++ acpi.debug_layer=0x2 acpi.debug_level=0xffffffff
++
++ Some values produce so much output that the system is
++ unusable. The "log_buf_len" parameter may be useful
++ if you need to capture more output.
++
++ acpi_irq_balance [HW,ACPI]
++ ACPI will balance active IRQs
++ default in APIC mode
++
++ acpi_irq_nobalance [HW,ACPI]
++ ACPI will not move active IRQs (default)
++ default in PIC mode
++
++ acpi_irq_isa= [HW,ACPI] If irq_balance, mark listed IRQs used by ISA
++ Format: <irq>,<irq>...
++
++ acpi_irq_pci= [HW,ACPI] If irq_balance, clear listed IRQs for
++ use by PCI
++ Format: <irq>,<irq>...
++
++ acpi_no_auto_ssdt [HW,ACPI] Disable automatic loading of SSDT
++
++ acpi_os_name= [HW,ACPI] Tell ACPI BIOS the name of the OS
++ Format: To spoof as Windows 98: ="Microsoft Windows"
++
++ acpi_osi= [HW,ACPI] Modify list of supported OS interface strings
++ acpi_osi="string1" # add string1
++ acpi_osi="!string2" # remove string2
++ acpi_osi=!* # remove all strings
++ acpi_osi=! # disable all built-in OS vendor
++ strings
++ acpi_osi= # disable all strings
++
++ 'acpi_osi=!' can be used in combination with single or
++ multiple 'acpi_osi="string1"' to support specific OS
++ vendor string(s). Note that such command can only
++ affect the default state of the OS vendor strings, thus
++ it cannot affect the default state of the feature group
++ strings and the current state of the OS vendor strings,
++ specifying it multiple times through kernel command line
++ is meaningless. This command is useful when one do not
++ care about the state of the feature group strings which
++ should be controlled by the OSPM.
++ Examples:
++ 1. 'acpi_osi=! acpi_osi="Windows 2000"' is equivalent
++ to 'acpi_osi="Windows 2000" acpi_osi=!', they all
++ can make '_OSI("Windows 2000")' TRUE.
++
++ 'acpi_osi=' cannot be used in combination with other
++ 'acpi_osi=' command lines, the _OSI method will not
++ exist in the ACPI namespace. NOTE that such command can
++ only affect the _OSI support state, thus specifying it
++ multiple times through kernel command line is also
++ meaningless.
++ Examples:
++ 1. 'acpi_osi=' can make 'CondRefOf(_OSI, Local1)'
++ FALSE.
++
++ 'acpi_osi=!*' can be used in combination with single or
++ multiple 'acpi_osi="string1"' to support specific
++ string(s). Note that such command can affect the
++ current state of both the OS vendor strings and the
++ feature group strings, thus specifying it multiple times
++ through kernel command line is meaningful. But it may
++ still not able to affect the final state of a string if
++ there are quirks related to this string. This command
++ is useful when one want to control the state of the
++ feature group strings to debug BIOS issues related to
++ the OSPM features.
++ Examples:
++ 1. 'acpi_osi="Module Device" acpi_osi=!*' can make
++ '_OSI("Module Device")' FALSE.
++ 2. 'acpi_osi=!* acpi_osi="Module Device"' can make
++ '_OSI("Module Device")' TRUE.
++ 3. 'acpi_osi=! acpi_osi=!* acpi_osi="Windows 2000"' is
++ equivalent to
++ 'acpi_osi=!* acpi_osi=! acpi_osi="Windows 2000"'
++ and
++ 'acpi_osi=!* acpi_osi="Windows 2000" acpi_osi=!',
++ they all will make '_OSI("Windows 2000")' TRUE.
++
++ acpi_pm_good [X86]
++ Override the pmtimer bug detection: force the kernel
++ to assume that this machine's pmtimer latches its value
++ and always returns good values.
++
++ acpi_sci= [HW,ACPI] ACPI System Control Interrupt trigger mode
++ Format: { level | edge | high | low }
++
++ acpi_serialize [HW,ACPI] force serialization of AML methods
++
++ acpi_skip_timer_override [HW,ACPI]
++ Recognize and ignore IRQ0/pin2 Interrupt Override.
++ For broken nForce2 BIOS resulting in XT-PIC timer.
++
++ acpi_sleep= [HW,ACPI] Sleep options
++ Format: { s3_bios, s3_mode, s3_beep, s4_nohwsig,
++ old_ordering, nonvs, sci_force_enable }
++ See Documentation/power/video.txt for information on
++ s3_bios and s3_mode.
++ s3_beep is for debugging; it makes the PC's speaker beep
++ as soon as the kernel's real-mode entry point is called.
++ s4_nohwsig prevents ACPI hardware signature from being
++ used during resume from hibernation.
++ old_ordering causes the ACPI 1.0 ordering of the _PTS
++ control method, with respect to putting devices into
++ low power states, to be enforced (the ACPI 2.0 ordering
++ of _PTS is used by default).
++ nonvs prevents the kernel from saving/restoring the
++ ACPI NVS memory during suspend/hibernation and resume.
++ sci_force_enable causes the kernel to set SCI_EN directly
++ on resume from S1/S3 (which is against the ACPI spec,
++ but some broken systems don't work without it).
++
++ acpi_use_timer_override [HW,ACPI]
++ Use timer override. For some broken Nvidia NF5 boards
++ that require a timer override, but don't have HPET
++
++ acpi_enforce_resources= [ACPI]
++ { strict | lax | no }
++ Check for resource conflicts between native drivers
++ and ACPI OperationRegions (SystemIO and SystemMemory
++ only). IO ports and memory declared in ACPI might be
++ used by the ACPI subsystem in arbitrary AML code and
++ can interfere with legacy drivers.
++ strict (default): access to resources claimed by ACPI
++ is denied; legacy drivers trying to access reserved
++ resources will fail to bind to device using them.
++ lax: access to resources claimed by ACPI is allowed;
++ legacy drivers trying to access reserved resources
++ will bind successfully but a warning message is logged.
++ no: ACPI OperationRegions are not marked as reserved,
++ no further checks are performed.
++
++ acpi_no_memhotplug [ACPI] Disable memory hotplug. Useful for kdump
++ kernels.
++
++ add_efi_memmap [EFI; X86] Include EFI memory map in
++ kernel's map of available physical RAM.
++
++ agp= [AGP]
++ { off | try_unsupported }
++ off: disable AGP support
++ try_unsupported: try to drive unsupported chipsets
++ (may crash computer or cause data corruption)
++
++ ALSA [HW,ALSA]
++ See Documentation/sound/alsa/alsa-parameters.txt
++
++ alignment= [KNL,ARM]
++ Allow the default userspace alignment fault handler
++ behaviour to be specified. Bit 0 enables warnings,
++ bit 1 enables fixups, and bit 2 sends a segfault.
++
++ align_va_addr= [X86-64]
++ Align virtual addresses by clearing slice [14:12] when
++ allocating a VMA at process creation time. This option
++ gives you up to 3% performance improvement on AMD F15h
++ machines (where it is enabled by default) for a
++ CPU-intensive style benchmark, and it can vary highly in
++ a microbenchmark depending on workload and compiler.
++
++ 32: only for 32-bit processes
++ 64: only for 64-bit processes
++ on: enable for both 32- and 64-bit processes
++ off: disable for both 32- and 64-bit processes
++
++ alloc_snapshot [FTRACE]
++ Allocate the ftrace snapshot buffer on boot up when the
++ main buffer is allocated. This is handy if debugging
++ and you need to use tracing_snapshot() on boot up, and
++ do not want to use tracing_snapshot_alloc() as it needs
++ to be done where GFP_KERNEL allocations are allowed.
++
++ amd_iommu= [HW,X86-64]
++ Pass parameters to the AMD IOMMU driver in the system.
++ Possible values are:
++ fullflush - enable flushing of IO/TLB entries when
++ they are unmapped. Otherwise they are
++ flushed before they will be reused, which
++ is a lot of faster
++ off - do not initialize any AMD IOMMU found in
++ the system
++ force_isolation - Force device isolation for all
++ devices. The IOMMU driver is not
++ allowed anymore to lift isolation
++ requirements as needed. This option
++ does not override iommu=pt
++
++ amd_iommu_dump= [HW,X86-64]
++ Enable AMD IOMMU driver option to dump the ACPI table
++ for AMD IOMMU. With this option enabled, AMD IOMMU
++ driver will print ACPI tables for AMD IOMMU during
++ IOMMU initialization.
++
++ amijoy.map= [HW,JOY] Amiga joystick support
++ Map of devices attached to JOY0DAT and JOY1DAT
++ Format: <a>,<b>
++ See also Documentation/input/joystick.txt
++
++ analog.map= [HW,JOY] Analog joystick and gamepad support
++ Specifies type or capabilities of an analog joystick
++ connected to one of 16 gameports
++ Format: <type1>,<type2>,..<type16>
++
++ apc= [HW,SPARC]
++ Power management functions (SPARCstation-4/5 + deriv.)
++ Format: noidle
++ Disable APC CPU standby support. SPARCstation-Fox does
++ not play well with APC CPU idle - disable it if you have
++ APC and your system crashes randomly.
++
++ apic= [APIC,X86-32] Advanced Programmable Interrupt Controller
++ Change the output verbosity whilst booting
++ Format: { quiet (default) | verbose | debug }
++ Change the amount of debugging information output
++ when initialising the APIC and IO-APIC components.
++
++ autoconf= [IPV6]
++ See Documentation/networking/ipv6.txt.
++
++ show_lapic= [APIC,X86] Advanced Programmable Interrupt Controller
++ Limit apic dumping. The parameter defines the maximal
++ number of local apics being dumped. Also it is possible
++ to set it to "all" by meaning -- no limit here.
++ Format: { 1 (default) | 2 | ... | all }.
++ The parameter valid if only apic=debug or
++ apic=verbose is specified.
++ Example: apic=debug show_lapic=all
++
++ apm= [APM] Advanced Power Management
++ See header of arch/x86/kernel/apm_32.c.
++
++ arcrimi= [HW,NET] ARCnet - "RIM I" (entirely mem-mapped) cards
++ Format: <io>,<irq>,<nodeID>
++
++ ataflop= [HW,M68k]
++
++ atarimouse= [HW,MOUSE] Atari Mouse
++
++ atkbd.extra= [HW] Enable extra LEDs and keys on IBM RapidAccess,
++ EzKey and similar keyboards
++
++ atkbd.reset= [HW] Reset keyboard during initialization
++
++ atkbd.set= [HW] Select keyboard code set
++ Format: <int> (2 = AT (default), 3 = PS/2)
++
++ atkbd.scroll= [HW] Enable scroll wheel on MS Office and similar
++ keyboards
++
++ atkbd.softraw= [HW] Choose between synthetic and real raw mode
++ Format: <bool> (0 = real, 1 = synthetic (default))
++
++ atkbd.softrepeat= [HW]
++ Use software keyboard repeat
++
++ audit= [KNL] Enable the audit sub-system
++ Format: { "0" | "1" } (0 = disabled, 1 = enabled)
++ 0 - kernel audit is disabled and can not be enabled
++ until the next reboot
++ unset - kernel audit is initialized but disabled and
++ will be fully enabled by the userspace auditd.
++ 1 - kernel audit is initialized and partially enabled,
++ storing at most audit_backlog_limit messages in
++ RAM until it is fully enabled by the userspace
++ auditd.
++ Default: unset
++
++ audit_backlog_limit= [KNL] Set the audit queue size limit.
++ Format: <int> (must be >=0)
++ Default: 64
++
++ baycom_epp= [HW,AX25]
++ Format: <io>,<mode>
++
++ baycom_par= [HW,AX25] BayCom Parallel Port AX.25 Modem
++ Format: <io>,<mode>
++ See header of drivers/net/hamradio/baycom_par.c.
++
++ baycom_ser_fdx= [HW,AX25]
++ BayCom Serial Port AX.25 Modem (Full Duplex Mode)
++ Format: <io>,<irq>,<mode>[,<baud>]
++ See header of drivers/net/hamradio/baycom_ser_fdx.c.
++
++ baycom_ser_hdx= [HW,AX25]
++ BayCom Serial Port AX.25 Modem (Half Duplex Mode)
++ Format: <io>,<irq>,<mode>
++ See header of drivers/net/hamradio/baycom_ser_hdx.c.
++
++ blkdevparts= Manual partition parsing of block device(s) for
++ embedded devices based on command line input.
++ See Documentation/block/cmdline-partition.txt
++
++ boot_delay= Milliseconds to delay each printk during boot.
++ Values larger than 10 seconds (10000) are changed to
++ no delay (0).
++ Format: integer
++
++ bootmem_debug [KNL] Enable bootmem allocator debug messages.
++
++ bttv.card= [HW,V4L] bttv (bt848 + bt878 based grabber cards)
++ bttv.radio= Most important insmod options are available as
++ kernel args too.
++ bttv.pll= See Documentation/video4linux/bttv/Insmod-options
++ bttv.tuner=
++
++ bulk_remove=off [PPC] This parameter disables the use of the pSeries
++ firmware feature for flushing multiple hpte entries
++ at a time.
++
++ c101= [NET] Moxa C101 synchronous serial card
++
++ cachesize= [BUGS=X86-32] Override level 2 CPU cache size detection.
++ Sometimes CPU hardware bugs make them report the cache
++ size incorrectly. The kernel will attempt work arounds
++ to fix known problems, but for some CPUs it is not
++ possible to determine what the correct size should be.
++ This option provides an override for these situations.
++
++ ccw_timeout_log [S390]
++ See Documentation/s390/CommonIO for details.
++
++ cgroup_disable= [KNL] Disable a particular controller
++ Format: {name of the controller(s) to disable}
++ The effects of cgroup_disable=foo are:
++ - foo isn't auto-mounted if you mount all cgroups in
++ a single hierarchy
++ - foo isn't visible as an individually mountable
++ subsystem
++ {Currently only "memory" controller deal with this and
++ cut the overhead, others just disable the usage. So
++ only cgroup_disable=memory is actually worthy}
++
++ checkreqprot [SELINUX] Set initial checkreqprot flag value.
++ Format: { "0" | "1" }
++ See security/selinux/Kconfig help text.
++ 0 -- check protection applied by kernel (includes
++ any implied execute protection).
++ 1 -- check protection requested by application.
++ Default value is set via a kernel config option.
++ Value can be changed at runtime via
++ /selinux/checkreqprot.
++
++ cio_ignore= [S390]
++ See Documentation/s390/CommonIO for details.
++ clk_ignore_unused
++ [CLK]
++ Keep all clocks already enabled by bootloader on,
++ even if no driver has claimed them. This is useful
++ for debug and development, but should not be
++ needed on a platform with proper driver support.
++ For more information, see Documentation/clk.txt.
++
++ clock= [BUGS=X86-32, HW] gettimeofday clocksource override.
++ [Deprecated]
++ Forces specified clocksource (if available) to be used
++ when calculating gettimeofday(). If specified
++ clocksource is not available, it defaults to PIT.
++ Format: { pit | tsc | cyclone | pmtmr }
++
++ clocksource= Override the default clocksource
++ Format: <string>
++ Override the default clocksource and use the clocksource
++ with the name specified.
++ Some clocksource names to choose from, depending on
++ the platform:
++ [all] jiffies (this is the base, fallback clocksource)
++ [ACPI] acpi_pm
++ [ARM] imx_timer1,OSTS,netx_timer,mpu_timer2,
++ pxa_timer,timer3,32k_counter,timer0_1
++ [AVR32] avr32
++ [X86-32] pit,hpet,tsc;
++ scx200_hrt on Geode; cyclone on IBM x440
++ [MIPS] MIPS
++ [PARISC] cr16
++ [S390] tod
++ [SH] SuperH
++ [SPARC64] tick
++ [X86-64] hpet,tsc
++
++ clearcpuid=BITNUM [X86]
++ Disable CPUID feature X for the kernel. See
++ arch/x86/include/asm/cpufeature.h for the valid bit
++ numbers. Note the Linux specific bits are not necessarily
++ stable over kernel options, but the vendor specific
++ ones should be.
++ Also note that user programs calling CPUID directly
++ or using the feature without checking anything
++ will still see it. This just prevents it from
++ being used by the kernel or shown in /proc/cpuinfo.
++ Also note the kernel might malfunction if you disable
++ some critical bits.
++
++ cma=nn[MG]@[start[MG][-end[MG]]]
++ [ARM,X86,KNL]
++ Sets the size of kernel global memory area for
++ contiguous memory allocations and optionally the
++ placement constraint by the physical address range of
++ memory allocations. For more information, see
++ include/linux/dma-contiguous.h
++
++ cmo_free_hint= [PPC] Format: { yes | no }
++ Specify whether pages are marked as being inactive
++ when they are freed. This is used in CMO environments
++ to determine OS memory pressure for page stealing by
++ a hypervisor.
++ Default: yes
++
++ coherent_pool=nn[KMG] [ARM,KNL]
++ Sets the size of memory pool for coherent, atomic dma
++ allocations, by default set to 256K.
++
++ code_bytes [X86] How many bytes of object code to print
++ in an oops report.
++ Range: 0 - 8192
++ Default: 64
++
++ com20020= [HW,NET] ARCnet - COM20020 chipset
++ Format:
++ <io>[,<irq>[,<nodeID>[,<backplane>[,<ckp>[,<timeout>]]]]]
++
++ com90io= [HW,NET] ARCnet - COM90xx chipset (IO-mapped buffers)
++ Format: <io>[,<irq>]
++
++ com90xx= [HW,NET]
++ ARCnet - COM90xx chipset (memory-mapped buffers)
++ Format: <io>[,<irq>[,<memstart>]]
++
++ condev= [HW,S390] console device
++ conmode=
++
++ console= [KNL] Output console device and options.
++
++ tty<n> Use the virtual console device <n>.
++
++ ttyS<n>[,options]
++ ttyUSB0[,options]
++ Use the specified serial port. The options are of
++ the form "bbbbpnf", where "bbbb" is the baud rate,
++ "p" is parity ("n", "o", or "e"), "n" is number of
++ bits, and "f" is flow control ("r" for RTS or
++ omit it). Default is "9600n8".
++
++ See Documentation/serial-console.txt for more
++ information. See
++ Documentation/networking/netconsole.txt for an
++ alternative.
++
++ uart[8250],io,<addr>[,options]
++ uart[8250],mmio,<addr>[,options]
++ Start an early, polled-mode console on the 8250/16550
++ UART at the specified I/O port or MMIO address,
++ switching to the matching ttyS device later. The
++ options are the same as for ttyS, above.
++ hvc<n> Use the hypervisor console device <n>. This is for
++ both Xen and PowerPC hypervisors.
++
++ If the device connected to the port is not a TTY but a braille
++ device, prepend "brl," before the device type, for instance
++ console=brl,ttyS0
++ For now, only VisioBraille is supported.
++
++ consoleblank= [KNL] The console blank (screen saver) timeout in
++ seconds. Defaults to 10*60 = 10mins. A value of 0
++ disables the blank timer.
++
++ coredump_filter=
++ [KNL] Change the default value for
++ /proc/<pid>/coredump_filter.
++ See also Documentation/filesystems/proc.txt.
++
++ cpuidle.off=1 [CPU_IDLE]
++ disable the cpuidle sub-system
++
++ cpcihp_generic= [HW,PCI] Generic port I/O CompactPCI driver
++ Format:
++ <first_slot>,<last_slot>,<port>,<enum_bit>[,<debug>]
++
++ crashkernel=size[KMG][@offset[KMG]]
++ [KNL] Using kexec, Linux can switch to a 'crash kernel'
++ upon panic. This parameter reserves the physical
++ memory region [offset, offset + size] for that kernel
++ image. If '@offset' is omitted, then a suitable offset
++ is selected automatically. Check
++ Documentation/kdump/kdump.txt for further details.
++
++ crashkernel=range1:size1[,range2:size2,...][@offset]
++ [KNL] Same as above, but depends on the memory
++ in the running system. The syntax of range is
++ start-[end] where start and end are both
++ a memory unit (amount[KMG]). See also
++ Documentation/kdump/kdump.txt for an example.
++
++ crashkernel=size[KMG],high
++ [KNL, x86_64] range could be above 4G. Allow kernel
++ to allocate physical memory region from top, so could
++ be above 4G if system have more than 4G ram installed.
++ Otherwise memory region will be allocated below 4G, if
++ available.
++ It will be ignored if crashkernel=X is specified.
++ crashkernel=size[KMG],low
++ [KNL, x86_64] range under 4G. When crashkernel=X,high
++ is passed, kernel could allocate physical memory region
++ above 4G, that cause second kernel crash on system
++ that require some amount of low memory, e.g. swiotlb
++ requires at least 64M+32K low memory. Kernel would
++ try to allocate 72M below 4G automatically.
++ This one let user to specify own low range under 4G
++ for second kernel instead.
++ 0: to disable low allocation.
++ It will be ignored when crashkernel=X,high is not used
++ or memory reserved is below 4G.
++
++ cs89x0_dma= [HW,NET]
++ Format: <dma>
++
++ cs89x0_media= [HW,NET]
++ Format: { rj45 | aui | bnc }
++
++ dasd= [HW,NET]
++ See header of drivers/s390/block/dasd_devmap.c.
++
++ db9.dev[2|3]= [HW,JOY] Multisystem joystick support via parallel port
++ (one device per port)
++ Format: <port#>,<type>
++ See also Documentation/input/joystick-parport.txt
++
++ ddebug_query= [KNL,DYNAMIC_DEBUG] Enable debug messages at early boot
++ time. See Documentation/dynamic-debug-howto.txt for
++ details. Deprecated, see dyndbg.
++
++ debug [KNL] Enable kernel debugging (events log level).
++
++ debug_locks_verbose=
++ [KNL] verbose self-tests
++ Format=<0|1>
++ Print debugging info while doing the locking API
++ self-tests.
++ We default to 0 (no extra messages), setting it to
++ 1 will print _a lot_ more information - normally
++ only useful to kernel developers.
++
++ debug_objects [KNL] Enable object debugging
++
++ no_debug_objects
++ [KNL] Disable object debugging
++
++ debug_guardpage_minorder=
++ [KNL] When CONFIG_DEBUG_PAGEALLOC is set, this
++ parameter allows control of the order of pages that will
++ be intentionally kept free (and hence protected) by the
++ buddy allocator. Bigger value increase the probability
++ of catching random memory corruption, but reduce the
++ amount of memory for normal system use. The maximum
++ possible value is MAX_ORDER/2. Setting this parameter
++ to 1 or 2 should be enough to identify most random
++ memory corruption problems caused by bugs in kernel or
++ driver code when a CPU writes to (or reads from) a
++ random memory location. Note that there exists a class
++ of memory corruptions problems caused by buggy H/W or
++ F/W or by drivers badly programing DMA (basically when
++ memory is written at bus level and the CPU MMU is
++ bypassed) which are not detectable by
++ CONFIG_DEBUG_PAGEALLOC, hence this option will not help
++ tracking down these problems.
++
++ debugpat [X86] Enable PAT debugging
++
++ decnet.addr= [HW,NET]
++ Format: <area>[,<node>]
++ See also Documentation/networking/decnet.txt.
++
++ default_hugepagesz=
++ [same as hugepagesz=] The size of the default
++ HugeTLB page size. This is the size represented by
++ the legacy /proc/ hugepages APIs, used for SHM, and
++ default size when mounting hugetlbfs filesystems.
++ Defaults to the default architecture's huge page size
++ if not specified.
++
++ dhash_entries= [KNL]
++ Set number of hash buckets for dentry cache.
++
++ digi= [HW,SERIAL]
++ IO parameters + enable/disable command.
++
++ digiepca= [HW,SERIAL]
++ See drivers/char/README.epca and
++ Documentation/serial/digiepca.txt.
++
++ disable= [IPV6]
++ See Documentation/networking/ipv6.txt.
++
++ disable_cpu_apicid= [X86,APIC,SMP]
++ Format: <int>
++ The number of initial APIC ID for the
++ corresponding CPU to be disabled at boot,
++ mostly used for the kdump 2nd kernel to
++ disable BSP to wake up multiple CPUs without
++ causing system reset or hang due to sending
++ INIT from AP to BSP.
++
++ disable_ddw [PPC/PSERIES]
++ Disable Dynamic DMA Window support. Use this if
++ to workaround buggy firmware.
++
++ disable_ipv6= [IPV6]
++ See Documentation/networking/ipv6.txt.
++
++ disable_mtrr_cleanup [X86]
++ The kernel tries to adjust MTRR layout from continuous
++ to discrete, to make X server driver able to add WB
++ entry later. This parameter disables that.
++
++ disable_mtrr_trim [X86, Intel and AMD only]
++ By default the kernel will trim any uncacheable
++ memory out of your available memory pool based on
++ MTRR settings. This parameter disables that behavior,
++ possibly causing your machine to run very slowly.
++
++ disable_timer_pin_1 [X86]
++ Disable PIN 1 of APIC timer
++ Can be useful to work around chipset bugs.
++
++ dma_debug=off If the kernel is compiled with DMA_API_DEBUG support,
++ this option disables the debugging code at boot.
++
++ dma_debug_entries=<number>
++ This option allows to tune the number of preallocated
++ entries for DMA-API debugging code. One entry is
++ required per DMA-API allocation. Use this if the
++ DMA-API debugging code disables itself because the
++ architectural default is too low.
++
++ dma_debug_driver=<driver_name>
++ With this option the DMA-API debugging driver
++ filter feature can be enabled at boot time. Just
++ pass the driver to filter for as the parameter.
++ The filter can be disabled or changed to another
++ driver later using sysfs.
++
++ drm_kms_helper.edid_firmware=[<connector>:]<file>
++ Broken monitors, graphic adapters and KVMs may
++ send no or incorrect EDID data sets. This parameter
++ allows to specify an EDID data set in the
++ /lib/firmware directory that is used instead.
++ Generic built-in EDID data sets are used, if one of
++ edid/1024x768.bin, edid/1280x1024.bin,
++ edid/1680x1050.bin, or edid/1920x1080.bin is given
++ and no file with the same name exists. Details and
++ instructions how to build your own EDID data are
++ available in Documentation/EDID/HOWTO.txt. An EDID
++ data set will only be used for a particular connector,
++ if its name and a colon are prepended to the EDID
++ name.
++
++ dscc4.setup= [NET]
++
++ dyndbg[="val"] [KNL,DYNAMIC_DEBUG]
++ module.dyndbg[="val"]
++ Enable debug messages at boot time. See
++ Documentation/dynamic-debug-howto.txt for details.
++
++ earlycon= [KNL] Output early console device and options.
++ uart[8250],io,<addr>[,options]
++ uart[8250],mmio,<addr>[,options]
++ uart[8250],mmio32,<addr>[,options]
++ Start an early, polled-mode console on the 8250/16550
++ UART at the specified I/O port or MMIO address.
++ MMIO inter-register address stride is either 8-bit
++ (mmio) or 32-bit (mmio32).
++ The options are the same as for ttyS, above.
++
++ earlyprintk= [X86,SH,BLACKFIN,ARM]
++ earlyprintk=vga
++ earlyprintk=efi
++ earlyprintk=xen
++ earlyprintk=serial[,ttySn[,baudrate]]
++ earlyprintk=serial[,0x...[,baudrate]]
++ earlyprintk=ttySn[,baudrate]
++ earlyprintk=dbgp[debugController#]
++
++ earlyprintk is useful when the kernel crashes before
++ the normal console is initialized. It is not enabled by
++ default because it has some cosmetic problems.
++
++ Append ",keep" to not disable it when the real console
++ takes over.
++
++ Only one of vga, efi, serial, or usb debug port can
++ be used at a time.
++
++ Currently only ttyS0 and ttyS1 may be specified by
++ name. Other I/O ports may be explicitly specified
++ on some architectures (x86 and arm at least) by
++ replacing ttySn with an I/O port address, like this:
++ earlyprintk=serial,0x1008,115200
++ You can find the port for a given device in
++ /proc/tty/driver/serial:
++ 2: uart:ST16650V2 port:00001008 irq:18 ...
++
++ Interaction with the standard serial driver is not
++ very good.
++
++ The VGA and EFI output is eventually overwritten by
++ the real console.
++
++ The xen output can only be used by Xen PV guests.
++
++ edac_report= [HW,EDAC] Control how to report EDAC event
++ Format: {"on" | "off" | "force"}
++ on: enable EDAC to report H/W event. May be overridden
++ by other higher priority error reporting module.
++ off: disable H/W event reporting through EDAC.
++ force: enforce the use of EDAC to report H/W event.
++ default: on.
++
++ ekgdboc= [X86,KGDB] Allow early kernel console debugging
++ ekgdboc=kbd
++
++ This is designed to be used in conjunction with
++ the boot argument: earlyprintk=vga
++
++ edd= [EDD]
++ Format: {"off" | "on" | "skip[mbr]"}
++
++ efi= [EFI]
++ Format: { "old_map" }
++ old_map [X86-64]: switch to the old ioremap-based EFI
++ runtime services mapping. 32-bit still uses this one by
++ default.
++
++ efi_no_storage_paranoia [EFI; X86]
++ Using this parameter you can use more than 50% of
++ your efi variable storage. Use this parameter only if
++ you are really sure that your UEFI does sane gc and
++ fulfills the spec otherwise your board may brick.
++
++ eisa_irq_edge= [PARISC,HW]
++ See header of drivers/parisc/eisa.c.
++
++ elanfreq= [X86-32]
++ See comment before function elanfreq_setup() in
++ arch/x86/kernel/cpu/cpufreq/elanfreq.c.
++
++ elevator= [IOSCHED]
++ Format: {"cfq" | "deadline" | "noop"}
++ See Documentation/block/cfq-iosched.txt and
++ Documentation/block/deadline-iosched.txt for details.
++
++ elfcorehdr=[size[KMG]@]offset[KMG] [IA64,PPC,SH,X86,S390]
++ Specifies physical address of start of kernel core
++ image elf header and optionally the size. Generally
++ kexec loader will pass this option to capture kernel.
++ See Documentation/kdump/kdump.txt for details.
++
++ enable_mtrr_cleanup [X86]
++ The kernel tries to adjust MTRR layout from continuous
++ to discrete, to make X server driver able to add WB
++ entry later. This parameter enables that.
++
++ enable_timer_pin_1 [X86]
++ Enable PIN 1 of APIC timer
++ Can be useful to work around chipset bugs
++ (in particular on some ATI chipsets).
++ The kernel tries to set a reasonable default.
++
++ enforcing [SELINUX] Set initial enforcing status.
++ Format: {"0" | "1"}
++ See security/selinux/Kconfig help text.
++ 0 -- permissive (log only, no denials).
++ 1 -- enforcing (deny and log).
++ Default value is 0.
++ Value can be changed at runtime via /selinux/enforce.
++
++ erst_disable [ACPI]
++ Disable Error Record Serialization Table (ERST)
++ support.
++
++ ether= [HW,NET] Ethernet cards parameters
++ This option is obsoleted by the "netdev=" option, which
++ has equivalent usage. See its documentation for details.
++
++ evm= [EVM]
++ Format: { "fix" }
++ Permit 'security.evm' to be updated regardless of
++ current integrity status.
++
++ failslab=
++ fail_page_alloc=
++ fail_make_request=[KNL]
++ General fault injection mechanism.
++ Format: <interval>,<probability>,<space>,<times>
++ See also Documentation/fault-injection/.
++
++ floppy= [HW]
++ See Documentation/blockdev/floppy.txt.
++
++ force_pal_cache_flush
++ [IA-64] Avoid check_sal_cache_flush which may hang on
++ buggy SAL_CACHE_FLUSH implementations. Using this
++ parameter will force ia64_sal_cache_flush to call
++ ia64_pal_cache_flush instead of SAL_CACHE_FLUSH.
++
++ ftrace=[tracer]
++ [FTRACE] will set and start the specified tracer
++ as early as possible in order to facilitate early
++ boot debugging.
++
++ ftrace_dump_on_oops[=orig_cpu]
++ [FTRACE] will dump the trace buffers on oops.
++ If no parameter is passed, ftrace will dump
++ buffers of all CPUs, but if you pass orig_cpu, it will
++ dump only the buffer of the CPU that triggered the
++ oops.
++
++ ftrace_filter=[function-list]
++ [FTRACE] Limit the functions traced by the function
++ tracer at boot up. function-list is a comma separated
++ list of functions. This list can be changed at run
++ time by the set_ftrace_filter file in the debugfs
++ tracing directory.
++
++ ftrace_notrace=[function-list]
++ [FTRACE] Do not trace the functions specified in
++ function-list. This list can be changed at run time
++ by the set_ftrace_notrace file in the debugfs
++ tracing directory.
++
++ ftrace_graph_filter=[function-list]
++ [FTRACE] Limit the top level callers functions traced
++ by the function graph tracer at boot up.
++ function-list is a comma separated list of functions
++ that can be changed at run time by the
++ set_graph_function file in the debugfs tracing directory.
++
++ gamecon.map[2|3]=
++ [HW,JOY] Multisystem joystick and NES/SNES/PSX pad
++ support via parallel port (up to 5 devices per port)
++ Format: <port#>,<pad1>,<pad2>,<pad3>,<pad4>,<pad5>
++ See also Documentation/input/joystick-parport.txt
++
++ gamma= [HW,DRM]
++
++ gart_fix_e820= [X86_64] disable the fix e820 for K8 GART
++ Format: off | on
++ default: on
++
++ gcov_persist= [GCOV] When non-zero (default), profiling data for
++ kernel modules is saved and remains accessible via
++ debugfs, even when the module is unloaded/reloaded.
++ When zero, profiling data is discarded and associated
++ debugfs files are removed at module unload time.
++
++ gpt [EFI] Forces disk with valid GPT signature but
++ invalid Protective MBR to be treated as GPT. If the
++ primary GPT is corrupted, it enables the backup/alternate
++ GPT to be used instead.
++
++ grcan.enable0= [HW] Configuration of physical interface 0. Determines
++ the "Enable 0" bit of the configuration register.
++ Format: 0 | 1
++ Default: 0
++ grcan.enable1= [HW] Configuration of physical interface 1. Determines
++ the "Enable 0" bit of the configuration register.
++ Format: 0 | 1
++ Default: 0
++ grcan.select= [HW] Select which physical interface to use.
++ Format: 0 | 1
++ Default: 0
++ grcan.txsize= [HW] Sets the size of the tx buffer.
++ Format: <unsigned int> such that (txsize & ~0x1fffc0) == 0.
++ Default: 1024
++ grcan.rxsize= [HW] Sets the size of the rx buffer.
++ Format: <unsigned int> such that (rxsize & ~0x1fffc0) == 0.
++ Default: 1024
++
++ hashdist= [KNL,NUMA] Large hashes allocated during boot
++ are distributed across NUMA nodes. Defaults on
++ for 64-bit NUMA, off otherwise.
++ Format: 0 | 1 (for off | on)
++
++ hcl= [IA-64] SGI's Hardware Graph compatibility layer
++
++ hd= [EIDE] (E)IDE hard drive subsystem geometry
++ Format: <cyl>,<head>,<sect>
++
++ hest_disable [ACPI]
++ Disable Hardware Error Source Table (HEST) support;
++ corresponding firmware-first mode error processing
++ logic will be disabled.
++
++ highmem=nn[KMG] [KNL,BOOT] forces the highmem zone to have an exact
++ size of <nn>. This works even on boxes that have no
++ highmem otherwise. This also works to reduce highmem
++ size on bigger boxes.
++
++ highres= [KNL] Enable/disable high resolution timer mode.
++ Valid parameters: "on", "off"
++ Default: "on"
++
++ hisax= [HW,ISDN]
++ See Documentation/isdn/README.HiSax.
++
++ hlt [BUGS=ARM,SH]
++
++ hpet= [X86-32,HPET] option to control HPET usage
++ Format: { enable (default) | disable | force |
++ verbose }
++ disable: disable HPET and use PIT instead
++ force: allow force enabled of undocumented chips (ICH4,
++ VIA, nVidia)
++ verbose: show contents of HPET registers during setup
++
++ hpet_mmap= [X86, HPET_MMAP] Allow userspace to mmap HPET
++ registers. Default set by CONFIG_HPET_MMAP_DEFAULT.
++
++ hugepages= [HW,X86-32,IA-64] HugeTLB pages to allocate at boot.
++ hugepagesz= [HW,IA-64,PPC,X86-64] The size of the HugeTLB pages.
++ On x86-64 and powerpc, this option can be specified
++ multiple times interleaved with hugepages= to reserve
++ huge pages of different sizes. Valid pages sizes on
++ x86-64 are 2M (when the CPU supports "pse") and 1G
++ (when the CPU supports the "pdpe1gb" cpuinfo flag)
++ Note that 1GB pages can only be allocated at boot time
++ using hugepages= and not freed afterwards.
++
++ hvc_iucv= [S390] Number of z/VM IUCV hypervisor console (HVC)
++ terminal devices. Valid values: 0..8
++ hvc_iucv_allow= [S390] Comma-separated list of z/VM user IDs.
++ If specified, z/VM IUCV HVC accepts connections
++ from listed z/VM user IDs only.
++
++ hwthread_map= [METAG] Comma-separated list of Linux cpu id to
++ hardware thread id mappings.
++ Format: <cpu>:<hwthread>
++
++ keep_bootcon [KNL]
++ Do not unregister boot console at start. This is only
++ useful for debugging when something happens in the window
++ between unregistering the boot console and initializing
++ the real console.
++
++ i2c_bus= [HW] Override the default board specific I2C bus speed
++ or register an additional I2C bus that is not
++ registered from board initialization code.
++ Format:
++ <bus_id>,<clkrate>
++
++ i8042.debug [HW] Toggle i8042 debug mode
++ i8042.direct [HW] Put keyboard port into non-translated mode
++ i8042.dumbkbd [HW] Pretend that controller can only read data from
++ keyboard and cannot control its state
++ (Don't attempt to blink the leds)
++ i8042.noaux [HW] Don't check for auxiliary (== mouse) port
++ i8042.nokbd [HW] Don't check/create keyboard port
++ i8042.noloop [HW] Disable the AUX Loopback command while probing
++ for the AUX port
++ i8042.nomux [HW] Don't check presence of an active multiplexing
++ controller
++ i8042.nopnp [HW] Don't use ACPIPnP / PnPBIOS to discover KBD/AUX
++ controllers
++ i8042.notimeout [HW] Ignore timeout condition signalled by controller
++ i8042.reset [HW] Reset the controller during init and cleanup
++ i8042.unlock [HW] Unlock (ignore) the keylock
++
++ i810= [HW,DRM]
++
++ i8k.ignore_dmi [HW] Continue probing hardware even if DMI data
++ indicates that the driver is running on unsupported
++ hardware.
++ i8k.force [HW] Activate i8k driver even if SMM BIOS signature
++ does not match list of supported models.
++ i8k.power_status
++ [HW] Report power status in /proc/i8k
++ (disabled by default)
++ i8k.restricted [HW] Allow controlling fans only if SYS_ADMIN
++ capability is set.
++
++ i915.invert_brightness=
++ [DRM] Invert the sense of the variable that is used to
++ set the brightness of the panel backlight. Normally a
++ brightness value of 0 indicates backlight switched off,
++ and the maximum of the brightness value sets the backlight
++ to maximum brightness. If this parameter is set to 0
++ (default) and the machine requires it, or this parameter
++ is set to 1, a brightness value of 0 sets the backlight
++ to maximum brightness, and the maximum of the brightness
++ value switches the backlight off.
++ -1 -- never invert brightness
++ 0 -- machine default
++ 1 -- force brightness inversion
++
++ icn= [HW,ISDN]
++ Format: <io>[,<membase>[,<icn_id>[,<icn_id2>]]]
++
++ ide-core.nodma= [HW] (E)IDE subsystem
++ Format: =0.0 to prevent dma on hda, =0.1 hdb =1.0 hdc
++ .vlb_clock .pci_clock .noflush .nohpa .noprobe .nowerr
++ .cdrom .chs .ignore_cable are additional options
++ See Documentation/ide/ide.txt.
++
++ ide-pci-generic.all-generic-ide [HW] (E)IDE subsystem
++ Claim all unknown PCI IDE storage controllers.
++
++ idle= [X86]
++ Format: idle=poll, idle=halt, idle=nomwait
++ Poll forces a polling idle loop that can slightly
++ improve the performance of waking up a idle CPU, but
++ will use a lot of power and make the system run hot.
++ Not recommended.
++ idle=halt: Halt is forced to be used for CPU idle.
++ In such case C2/C3 won't be used again.
++ idle=nomwait: Disable mwait for CPU C-states
++
++ ignore_loglevel [KNL]
++ Ignore loglevel setting - this will print /all/
++ kernel messages to the console. Useful for debugging.
++ We also add it as printk module parameter, so users
++ could change it dynamically, usually by
++ /sys/module/printk/parameters/ignore_loglevel.
++
++ ihash_entries= [KNL]
++ Set number of hash buckets for inode cache.
++
++ ima_appraise= [IMA] appraise integrity measurements
++ Format: { "off" | "enforce" | "fix" }
++ default: "enforce"
++
++ ima_appraise_tcb [IMA]
++ The builtin appraise policy appraises all files
++ owned by uid=0.
++
++ ima_hash= [IMA]
++ Format: { md5 | sha1 | rmd160 | sha256 | sha384
++ | sha512 | ... }
++ default: "sha1"
++
++ The list of supported hash algorithms is defined
++ in crypto/hash_info.h.
++
++ ima_tcb [IMA]
++ Load a policy which meets the needs of the Trusted
++ Computing Base. This means IMA will measure all
++ programs exec'd, files mmap'd for exec, and all files
++ opened for read by uid=0.
++
++ ima_template= [IMA]
++ Select one of defined IMA measurements template formats.
++ Formats: { "ima" | "ima-ng" }
++ Default: "ima-ng"
++
++ init= [KNL]
++ Format: <full_path>
++ Run specified binary instead of /sbin/init as init
++ process.
++
++ initcall_debug [KNL] Trace initcalls as they are executed. Useful
++ for working out where the kernel is dying during
++ startup.
++
++ initrd= [BOOT] Specify the location of the initial ramdisk
++
++ inport.irq= [HW] Inport (ATI XL and Microsoft) busmouse driver
++ Format: <irq>
++
++ int_pln_enable [x86] Enable power limit notification interrupt
++
++ integrity_audit=[IMA]
++ Format: { "0" | "1" }
++ 0 -- basic integrity auditing messages. (Default)
++ 1 -- additional integrity auditing messages.
++
++ intel_iommu= [DMAR] Intel IOMMU driver (DMAR) option
++ on
++ Enable intel iommu driver.
++ off
++ Disable intel iommu driver.
++ igfx_off [Default Off]
++ By default, gfx is mapped as normal device. If a gfx
++ device has a dedicated DMAR unit, the DMAR unit is
++ bypassed by not enabling DMAR with this option. In
++ this case, gfx device will use physical address for
++ DMA.
++ forcedac [x86_64]
++ With this option iommu will not optimize to look
++ for io virtual address below 32-bit forcing dual
++ address cycle on pci bus for cards supporting greater
++ than 32-bit addressing. The default is to look
++ for translation below 32-bit and if not available
++ then look in the higher range.
++ strict [Default Off]
++ With this option on every unmap_single operation will
++ result in a hardware IOTLB flush operation as opposed
++ to batching them for performance.
++ sp_off [Default Off]
++ By default, super page will be supported if Intel IOMMU
++ has the capability. With this option, super page will
++ not be supported.
++
++ intel_idle.max_cstate= [KNL,HW,ACPI,X86]
++ 0 disables intel_idle and fall back on acpi_idle.
++ 1 to 6 specify maximum depth of C-state.
++
++ intel_pstate= [X86]
++ disable
++ Do not enable intel_pstate as the default
++ scaling driver for the supported processors
++
++ intremap= [X86-64, Intel-IOMMU]
++ on enable Interrupt Remapping (default)
++ off disable Interrupt Remapping
++ nosid disable Source ID checking
++ no_x2apic_optout
++ BIOS x2APIC opt-out request will be ignored
++
++ iomem= Disable strict checking of access to MMIO memory
++ strict regions from userspace.
++ relaxed
++
++ iommu= [x86]
++ off
++ force
++ noforce
++ biomerge
++ panic
++ nopanic
++ merge
++ nomerge
++ forcesac
++ soft
++ pt [x86, IA-64]
++
++
++ io7= [HW] IO7 for Marvel based alpha systems
++ See comment before marvel_specify_io7 in
++ arch/alpha/kernel/core_marvel.c.
++
++ io_delay= [X86] I/O delay method
++ 0x80
++ Standard port 0x80 based delay
++ 0xed
++ Alternate port 0xed based delay (needed on some systems)
++ udelay
++ Simple two microseconds delay
++ none
++ No delay
++
++ ip= [IP_PNP]
++ See Documentation/filesystems/nfs/nfsroot.txt.
++
++ ip2= [HW] Set IO/IRQ pairs for up to 4 IntelliPort boards
++ See comment before ip2_setup() in
++ drivers/char/ip2/ip2base.c.
++
++ irqfixup [HW]
++ When an interrupt is not handled search all handlers
++ for it. Intended to get systems with badly broken
++ firmware running.
++
++ irqpoll [HW]
++ When an interrupt is not handled search all handlers
++ for it. Also check all handlers each timer
++ interrupt. Intended to get systems with badly broken
++ firmware running.
++
++ isapnp= [ISAPNP]
++ Format: <RDP>,<reset>,<pci_scan>,<verbosity>
++
++ isolcpus= [KNL,SMP] Isolate CPUs from the general scheduler.
++ Format:
++ <cpu number>,...,<cpu number>
++ or
++ <cpu number>-<cpu number>
++ (must be a positive range in ascending order)
++ or a mixture
++ <cpu number>,...,<cpu number>-<cpu number>
++
++ This option can be used to specify one or more CPUs
++ to isolate from the general SMP balancing and scheduling
++ algorithms. You can move a process onto or off an
++ "isolated" CPU via the CPU affinity syscalls or cpuset.
++ <cpu number> begins at 0 and the maximum value is
++ "number of CPUs in system - 1".
++
++ This option is the preferred way to isolate CPUs. The
++ alternative -- manually setting the CPU mask of all
++ tasks in the system -- can cause problems and
++ suboptimal load balancer performance.
++
++ iucv= [HW,NET]
++
++ ivrs_ioapic [HW,X86_64]
++ Provide an override to the IOAPIC-ID<->DEVICE-ID
++ mapping provided in the IVRS ACPI table. For
++ example, to map IOAPIC-ID decimal 10 to
++ PCI device 00:14.0 write the parameter as:
++ ivrs_ioapic[10]=00:14.0
++
++ ivrs_hpet [HW,X86_64]
++ Provide an override to the HPET-ID<->DEVICE-ID
++ mapping provided in the IVRS ACPI table. For
++ example, to map HPET-ID decimal 0 to
++ PCI device 00:14.0 write the parameter as:
++ ivrs_hpet[0]=00:14.0
++
++ js= [HW,JOY] Analog joystick
++ See Documentation/input/joystick.txt.
++
++ keepinitrd [HW,ARM]
++
++ kernelcore=nn[KMG] [KNL,X86,IA-64,PPC] This parameter
++ specifies the amount of memory usable by the kernel
++ for non-movable allocations. The requested amount is
++ spread evenly throughout all nodes in the system. The
++ remaining memory in each node is used for Movable
++ pages. In the event, a node is too small to have both
++ kernelcore and Movable pages, kernelcore pages will
++ take priority and other nodes will have a larger number
++ of Movable pages. The Movable zone is used for the
++ allocation of pages that may be reclaimed or moved
++ by the page migration subsystem. This means that
++ HugeTLB pages may not be allocated from this zone.
++ Note that allocations like PTEs-from-HighMem still
++ use the HighMem zone if it exists, and the Normal
++ zone if it does not.
++
++ kgdbdbgp= [KGDB,HW] kgdb over EHCI usb debug port.
++ Format: <Controller#>[,poll interval]
++ The controller # is the number of the ehci usb debug
++ port as it is probed via PCI. The poll interval is
++ optional and is the number seconds in between
++ each poll cycle to the debug port in case you need
++ the functionality for interrupting the kernel with
++ gdb or control-c on the dbgp connection. When
++ not using this parameter you use sysrq-g to break into
++ the kernel debugger.
++
++ kgdboc= [KGDB,HW] kgdb over consoles.
++ Requires a tty driver that supports console polling,
++ or a supported polling keyboard driver (non-usb).
++ Serial only format: <serial_device>[,baud]
++ keyboard only format: kbd
++ keyboard and serial format: kbd,<serial_device>[,baud]
++ Optional Kernel mode setting:
++ kms, kbd format: kms,kbd
++ kms, kbd and serial format: kms,kbd,<ser_dev>[,baud]
++
++ kgdbwait [KGDB] Stop kernel execution and enter the
++ kernel debugger at the earliest opportunity.
++
++ kmac= [MIPS] korina ethernet MAC address.
++ Configure the RouterBoard 532 series on-chip
++ Ethernet adapter MAC address.
++
++ kmemleak= [KNL] Boot-time kmemleak enable/disable
++ Valid arguments: on, off
++ Default: on
++
++ kmemcheck= [X86] Boot-time kmemcheck enable/disable/one-shot mode
++ Valid arguments: 0, 1, 2
++ kmemcheck=0 (disabled)
++ kmemcheck=1 (enabled)
++ kmemcheck=2 (one-shot mode)
++ Default: 2 (one-shot mode)
++
++ kstack=N [X86] Print N words from the kernel stack
++ in oops dumps.
++
++ kvm.ignore_msrs=[KVM] Ignore guest accesses to unhandled MSRs.
++ Default is 0 (don't ignore, but inject #GP)
++
++ kvm.mmu_audit= [KVM] This is a R/W parameter which allows audit
++ KVM MMU at runtime.
++ Default is 0 (off)
++
++ kvm-amd.nested= [KVM,AMD] Allow nested virtualization in KVM/SVM.
++ Default is 1 (enabled)
++
++ kvm-amd.npt= [KVM,AMD] Disable nested paging (virtualized MMU)
++ for all guests.
++ Default is 1 (enabled) if in 64-bit or 32-bit PAE mode.
++
++ kvm-intel.ept= [KVM,Intel] Disable extended page tables
++ (virtualized MMU) support on capable Intel chips.
++ Default is 1 (enabled)
++
++ kvm-intel.emulate_invalid_guest_state=
++ [KVM,Intel] Enable emulation of invalid guest states
++ Default is 0 (disabled)
++
++ kvm-intel.flexpriority=
++ [KVM,Intel] Disable FlexPriority feature (TPR shadow).
++ Default is 1 (enabled)
++
++ kvm-intel.nested=
++ [KVM,Intel] Enable VMX nesting (nVMX).
++ Default is 0 (disabled)
++
++ kvm-intel.unrestricted_guest=
++ [KVM,Intel] Disable unrestricted guest feature
++ (virtualized real and unpaged mode) on capable
++ Intel chips. Default is 1 (enabled)
++
++ kvm-intel.vpid= [KVM,Intel] Disable Virtual Processor Identification
++ feature (tagged TLBs) on capable Intel chips.
++ Default is 1 (enabled)
++
++ l2cr= [PPC]
++
++ l3cr= [PPC]
++
++ lapic [X86-32,APIC] Enable the local APIC even if BIOS
++ disabled it.
++
++ lapic= [x86,APIC] "notscdeadline" Do not use TSC deadline
++ value for LAPIC timer one-shot implementation. Default
++ back to the programmable timer unit in the LAPIC.
++
++ lapic_timer_c2_ok [X86,APIC] trust the local apic timer
++ in C2 power state.
++
++ libata.dma= [LIBATA] DMA control
++ libata.dma=0 Disable all PATA and SATA DMA
++ libata.dma=1 PATA and SATA Disk DMA only
++ libata.dma=2 ATAPI (CDROM) DMA only
++ libata.dma=4 Compact Flash DMA only
++ Combinations also work, so libata.dma=3 enables DMA
++ for disks and CDROMs, but not CFs.
++
++ libata.ignore_hpa= [LIBATA] Ignore HPA limit
++ libata.ignore_hpa=0 keep BIOS limits (default)
++ libata.ignore_hpa=1 ignore limits, using full disk
++
++ libata.noacpi [LIBATA] Disables use of ACPI in libata suspend/resume
++ when set.
++ Format: <int>
++
++ libata.force= [LIBATA] Force configurations. The format is comma
++ separated list of "[ID:]VAL" where ID is
++ PORT[.DEVICE]. PORT and DEVICE are decimal numbers
++ matching port, link or device. Basically, it matches
++ the ATA ID string printed on console by libata. If
++ the whole ID part is omitted, the last PORT and DEVICE
++ values are used. If ID hasn't been specified yet, the
++ configuration applies to all ports, links and devices.
++
++ If only DEVICE is omitted, the parameter applies to
++ the port and all links and devices behind it. DEVICE
++ number of 0 either selects the first device or the
++ first fan-out link behind PMP device. It does not
++ select the host link. DEVICE number of 15 selects the
++ host link and device attached to it.
++
++ The VAL specifies the configuration to force. As long
++ as there's no ambiguity shortcut notation is allowed.
++ For example, both 1.5 and 1.5G would work for 1.5Gbps.
++ The following configurations can be forced.
++
++ * Cable type: 40c, 80c, short40c, unk, ign or sata.
++ Any ID with matching PORT is used.
++
++ * SATA link speed limit: 1.5Gbps or 3.0Gbps.
++
++ * Transfer mode: pio[0-7], mwdma[0-4] and udma[0-7].
++ udma[/][16,25,33,44,66,100,133] notation is also
++ allowed.
++
++ * [no]ncq: Turn on or off NCQ.
++
++ * nohrst, nosrst, norst: suppress hard, soft
++ and both resets.
++
++ * rstonce: only attempt one reset during
++ hot-unplug link recovery
++
++ * dump_id: dump IDENTIFY data.
++
++ * atapi_dmadir: Enable ATAPI DMADIR bridge support
++
++ * disable: Disable this device.
++
++ If there are multiple matching configurations changing
++ the same attribute, the last one is used.
++
++ memblock=debug [KNL] Enable memblock debug messages.
++
++ load_ramdisk= [RAM] List of ramdisks to load from floppy
++ See Documentation/blockdev/ramdisk.txt.
++
++ lockd.nlm_grace_period=P [NFS] Assign grace period.
++ Format: <integer>
++
++ lockd.nlm_tcpport=N [NFS] Assign TCP port.
++ Format: <integer>
++
++ lockd.nlm_timeout=T [NFS] Assign timeout value.
++ Format: <integer>
++
++ lockd.nlm_udpport=M [NFS] Assign UDP port.
++ Format: <integer>
++
++ logibm.irq= [HW,MOUSE] Logitech Bus Mouse Driver
++ Format: <irq>
++
++ loglevel= All Kernel Messages with a loglevel smaller than the
++ console loglevel will be printed to the console. It can
++ also be changed with klogd or other programs. The
++ loglevels are defined as follows:
++
++ 0 (KERN_EMERG) system is unusable
++ 1 (KERN_ALERT) action must be taken immediately
++ 2 (KERN_CRIT) critical conditions
++ 3 (KERN_ERR) error conditions
++ 4 (KERN_WARNING) warning conditions
++ 5 (KERN_NOTICE) normal but significant condition
++ 6 (KERN_INFO) informational
++ 7 (KERN_DEBUG) debug-level messages
++
++ log_buf_len=n[KMG] Sets the size of the printk ring buffer,
++ in bytes. n must be a power of two. The default
++ size is set in the kernel config file.
++
++ logo.nologo [FB] Disables display of the built-in Linux logo.
++ This may be used to provide more screen space for
++ kernel log messages and is useful when debugging
++ kernel boot problems.
++
++ lp=0 [LP] Specify parallel ports to use, e.g,
++ lp=port[,port...] lp=none,parport0 (lp0 not configured, lp1 uses
++ lp=reset first parallel port). 'lp=0' disables the
++ lp=auto printer driver. 'lp=reset' (which can be
++ specified in addition to the ports) causes
++ attached printers to be reset. Using
++ lp=port1,port2,... specifies the parallel ports
++ to associate lp devices with, starting with
++ lp0. A port specification may be 'none' to skip
++ that lp device, or a parport name such as
++ 'parport0'. Specifying 'lp=auto' instead of a
++ port specification list means that device IDs
++ from each port should be examined, to see if
++ an IEEE 1284-compliant printer is attached; if
++ so, the driver will manage that printer.
++ See also header of drivers/char/lp.c.
++
++ lpj=n [KNL]
++ Sets loops_per_jiffy to given constant, thus avoiding
++ time-consuming boot-time autodetection (up to 250 ms per
++ CPU). 0 enables autodetection (default). To determine
++ the correct value for your kernel, boot with normal
++ autodetection and see what value is printed. Note that
++ on SMP systems the preset will be applied to all CPUs,
++ which is likely to cause problems if your CPUs need
++ significantly divergent settings. An incorrect value
++ will cause delays in the kernel to be wrong, leading to
++ unpredictable I/O errors and other breakage. Although
++ unlikely, in the extreme case this might damage your
++ hardware.
++
++ ltpc= [NET]
++ Format: <io>,<irq>,<dma>
++
++ machvec= [IA-64] Force the use of a particular machine-vector
++ (machvec) in a generic kernel.
++ Example: machvec=hpzx1_swiotlb
++
++ machtype= [Loongson] Share the same kernel image file between different
++ yeeloong laptop.
++ Example: machtype=lemote-yeeloong-2f-7inch
++
++ max_addr=nn[KMG] [KNL,BOOT,ia64] All physical memory greater
++ than or equal to this physical address is ignored.
++
++ maxcpus= [SMP] Maximum number of processors that an SMP kernel
++ should make use of. maxcpus=n : n >= 0 limits the
++ kernel to using 'n' processors. n=0 is a special case,
++ it is equivalent to "nosmp", which also disables
++ the IO APIC.
++
++ max_loop= [LOOP] The number of loop block devices that get
++ (loop.max_loop) unconditionally pre-created at init time. The default
++ number is configured by BLK_DEV_LOOP_MIN_COUNT. Instead
++ of statically allocating a predefined number, loop
++ devices can be requested on-demand with the
++ /dev/loop-control interface.
++
++ mce [X86-32] Machine Check Exception
++
++ mce=option [X86-64] See Documentation/x86/x86_64/boot-options.txt
++
++ md= [HW] RAID subsystems devices and level
++ See Documentation/md.txt.
++
++ mdacon= [MDA]
++ Format: <first>,<last>
++ Specifies range of consoles to be captured by the MDA.
++
++ mem=nn[KMG] [KNL,BOOT] Force usage of a specific amount of memory
++ Amount of memory to be used when the kernel is not able
++ to see the whole system memory or for test.
++ [X86] Work as limiting max address. Use together
++ with memmap= to avoid physical address space collisions.
++ Without memmap= PCI devices could be placed at addresses
++ belonging to unused RAM.
++
++ mem=nopentium [BUGS=X86-32] Disable usage of 4MB pages for kernel
++ memory.
++
++ memchunk=nn[KMG]
++ [KNL,SH] Allow user to override the default size for
++ per-device physically contiguous DMA buffers.
++
++ memmap=exactmap [KNL,X86] Enable setting of an exact
++ E820 memory map, as specified by the user.
++ Such memmap=exactmap lines can be constructed based on
++ BIOS output or other requirements. See the memmap=nn@ss
++ option description.
++
++ memmap=nn[KMG]@ss[KMG]
++ [KNL] Force usage of a specific region of memory.
++ Region of memory to be used is from ss to ss+nn.
++
++ memmap=nn[KMG]#ss[KMG]
++ [KNL,ACPI] Mark specific memory as ACPI data.
++ Region of memory to be marked is from ss to ss+nn.
++
++ memmap=nn[KMG]$ss[KMG]
++ [KNL,ACPI] Mark specific memory as reserved.
++ Region of memory to be reserved is from ss to ss+nn.
++ Example: Exclude memory from 0x18690000-0x1869ffff
++ memmap=64K$0x18690000
++ or
++ memmap=0x10000$0x18690000
++
++ memory_corruption_check=0/1 [X86]
++ Some BIOSes seem to corrupt the first 64k of
++ memory when doing things like suspend/resume.
++ Setting this option will scan the memory
++ looking for corruption. Enabling this will
++ both detect corruption and prevent the kernel
++ from using the memory being corrupted.
++ However, its intended as a diagnostic tool; if
++ repeatable BIOS-originated corruption always
++ affects the same memory, you can use memmap=
++ to prevent the kernel from using that memory.
++
++ memory_corruption_check_size=size [X86]
++ By default it checks for corruption in the low
++ 64k, making this memory unavailable for normal
++ use. Use this parameter to scan for
++ corruption in more or less memory.
++
++ memory_corruption_check_period=seconds [X86]
++ By default it checks for corruption every 60
++ seconds. Use this parameter to check at some
++ other rate. 0 disables periodic checking.
++
++ memtest= [KNL,X86] Enable memtest
++ Format: <integer>
++ default : 0 <disable>
++ Specifies the number of memtest passes to be
++ performed. Each pass selects another test
++ pattern from a given set of patterns. Memtest
++ fills the memory with this pattern, validates
++ memory contents and reserves bad memory
++ regions that are detected.
++
++ meye.*= [HW] Set MotionEye Camera parameters
++ See Documentation/video4linux/meye.txt.
++
++ mfgpt_irq= [IA-32] Specify the IRQ to use for the
++ Multi-Function General Purpose Timers on AMD Geode
++ platforms.
++
++ mfgptfix [X86-32] Fix MFGPT timers on AMD Geode platforms when
++ the BIOS has incorrectly applied a workaround. TinyBIOS
++ version 0.98 is known to be affected, 0.99 fixes the
++ problem by letting the user disable the workaround.
++
++ mga= [HW,DRM]
++
++ min_addr=nn[KMG] [KNL,BOOT,ia64] All physical memory below this
++ physical address is ignored.
++
++ mini2440= [ARM,HW,KNL]
++ Format:[0..2][b][c][t]
++ Default: "0tb"
++ MINI2440 configuration specification:
++ 0 - The attached screen is the 3.5" TFT
++ 1 - The attached screen is the 7" TFT
++ 2 - The VGA Shield is attached (1024x768)
++ Leaving out the screen size parameter will not load
++ the TFT driver, and the framebuffer will be left
++ unconfigured.
++ b - Enable backlight. The TFT backlight pin will be
++ linked to the kernel VESA blanking code and a GPIO
++ LED. This parameter is not necessary when using the
++ VGA shield.
++ c - Enable the s3c camera interface.
++ t - Reserved for enabling touchscreen support. The
++ touchscreen support is not enabled in the mainstream
++ kernel as of 2.6.30, a preliminary port can be found
++ in the "bleeding edge" mini2440 support kernel at
++ http://repo.or.cz/w/linux-2.6/mini2440.git
++
++ mminit_loglevel=
++ [KNL] When CONFIG_DEBUG_MEMORY_INIT is set, this
++ parameter allows control of the logging verbosity for
++ the additional memory initialisation checks. A value
++ of 0 disables mminit logging and a level of 4 will
++ log everything. Information is printed at KERN_DEBUG
++ so loglevel=8 may also need to be specified.
++
++ module.sig_enforce
++ [KNL] When CONFIG_MODULE_SIG is set, this means that
++ modules without (valid) signatures will fail to load.
++ Note that if CONFIG_MODULE_SIG_FORCE is set, that
++ is always true, so this option does nothing.
++
++ mousedev.tap_time=
++ [MOUSE] Maximum time between finger touching and
++ leaving touchpad surface for touch to be considered
++ a tap and be reported as a left button click (for
++ touchpads working in absolute mode only).
++ Format: <msecs>
++ mousedev.xres= [MOUSE] Horizontal screen resolution, used for devices
++ reporting absolute coordinates, such as tablets
++ mousedev.yres= [MOUSE] Vertical screen resolution, used for devices
++ reporting absolute coordinates, such as tablets
++
++ movablecore=nn[KMG] [KNL,X86,IA-64,PPC] This parameter
++ is similar to kernelcore except it specifies the
++ amount of memory used for migratable allocations.
++ If both kernelcore and movablecore is specified,
++ then kernelcore will be at *least* the specified
++ value but may be more. If movablecore on its own
++ is specified, the administrator must be careful
++ that the amount of memory usable for all allocations
++ is not too small.
++
++ movable_node [KNL,X86] Boot-time switch to enable the effects
++ of CONFIG_MOVABLE_NODE=y. See mm/Kconfig for details.
++
++ MTD_Partition= [MTD]
++ Format: <name>,<region-number>,<size>,<offset>
++
++ MTD_Region= [MTD] Format:
++ <name>,<region-number>[,<base>,<size>,<buswidth>,<altbuswidth>]
++
++ mtdparts= [MTD]
++ See drivers/mtd/cmdlinepart.c.
++
++ multitce=off [PPC] This parameter disables the use of the pSeries
++ firmware feature for updating multiple TCE entries
++ at a time.
++
++ onenand.bdry= [HW,MTD] Flex-OneNAND Boundary Configuration
++
++ Format: [die0_boundary][,die0_lock][,die1_boundary][,die1_lock]
++
++ boundary - index of last SLC block on Flex-OneNAND.
++ The remaining blocks are configured as MLC blocks.
++ lock - Configure if Flex-OneNAND boundary should be locked.
++ Once locked, the boundary cannot be changed.
++ 1 indicates lock status, 0 indicates unlock status.
++
++ mtdset= [ARM]
++ ARM/S3C2412 JIVE boot control
++
++ See arch/arm/mach-s3c2412/mach-jive.c
++
++ mtouchusb.raw_coordinates=
++ [HW] Make the MicroTouch USB driver use raw coordinates
++ ('y', default) or cooked coordinates ('n')
++
++ mtrr_chunk_size=nn[KMG] [X86]
++ used for mtrr cleanup. It is largest continuous chunk
++ that could hold holes aka. UC entries.
++
++ mtrr_gran_size=nn[KMG] [X86]
++ Used for mtrr cleanup. It is granularity of mtrr block.
++ Default is 1.
++ Large value could prevent small alignment from
++ using up MTRRs.
++
++ mtrr_spare_reg_nr=n [X86]
++ Format: <integer>
++ Range: 0,7 : spare reg number
++ Default : 1
++ Used for mtrr cleanup. It is spare mtrr entries number.
++ Set to 2 or more if your graphical card needs more.
++
++ n2= [NET] SDL Inc. RISCom/N2 synchronous serial card
++
++ netdev= [NET] Network devices parameters
++ Format: <irq>,<io>,<mem_start>,<mem_end>,<name>
++ Note that mem_start is often overloaded to mean
++ something different and driver-specific.
++ This usage is only documented in each driver source
++ file if at all.
++
++ nf_conntrack.acct=
++ [NETFILTER] Enable connection tracking flow accounting
++ 0 to disable accounting
++ 1 to enable accounting
++ Default value is 0.
++
++ nfsaddrs= [NFS] Deprecated. Use ip= instead.
++ See Documentation/filesystems/nfs/nfsroot.txt.
++
++ nfsroot= [NFS] nfs root filesystem for disk-less boxes.
++ See Documentation/filesystems/nfs/nfsroot.txt.
++
++ nfsrootdebug [NFS] enable nfsroot debugging messages.
++ See Documentation/filesystems/nfs/nfsroot.txt.
++
++ nfs.callback_tcpport=
++ [NFS] set the TCP port on which the NFSv4 callback
++ channel should listen.
++
++ nfs.cache_getent=
++ [NFS] sets the pathname to the program which is used
++ to update the NFS client cache entries.
++
++ nfs.cache_getent_timeout=
++ [NFS] sets the timeout after which an attempt to
++ update a cache entry is deemed to have failed.
++
++ nfs.idmap_cache_timeout=
++ [NFS] set the maximum lifetime for idmapper cache
++ entries.
++
++ nfs.enable_ino64=
++ [NFS] enable 64-bit inode numbers.
++ If zero, the NFS client will fake up a 32-bit inode
++ number for the readdir() and stat() syscalls instead
++ of returning the full 64-bit number.
++ The default is to return 64-bit inode numbers.
++
++ nfs.max_session_slots=
++ [NFSv4.1] Sets the maximum number of session slots
++ the client will attempt to negotiate with the server.
++ This limits the number of simultaneous RPC requests
++ that the client can send to the NFSv4.1 server.
++ Note that there is little point in setting this
++ value higher than the max_tcp_slot_table_limit.
++
++ nfs.nfs4_disable_idmapping=
++ [NFSv4] When set to the default of '1', this option
++ ensures that both the RPC level authentication
++ scheme and the NFS level operations agree to use
++ numeric uids/gids if the mount is using the
++ 'sec=sys' security flavour. In effect it is
++ disabling idmapping, which can make migration from
++ legacy NFSv2/v3 systems to NFSv4 easier.
++ Servers that do not support this mode of operation
++ will be autodetected by the client, and it will fall
++ back to using the idmapper.
++ To turn off this behaviour, set the value to '0'.
++ nfs.nfs4_unique_id=
++ [NFS4] Specify an additional fixed unique ident-
++ ification string that NFSv4 clients can insert into
++ their nfs_client_id4 string. This is typically a
++ UUID that is generated at system install time.
++
++ nfs.send_implementation_id =
++ [NFSv4.1] Send client implementation identification
++ information in exchange_id requests.
++ If zero, no implementation identification information
++ will be sent.
++ The default is to send the implementation identification
++ information.
++
++ nfs.recover_lost_locks =
++ [NFSv4] Attempt to recover locks that were lost due
++ to a lease timeout on the server. Please note that
++ doing this risks data corruption, since there are
++ no guarantees that the file will remain unchanged
++ after the locks are lost.
++ If you want to enable the kernel legacy behaviour of
++ attempting to recover these locks, then set this
++ parameter to '1'.
++ The default parameter value of '0' causes the kernel
++ not to attempt recovery of lost locks.
++
++ nfsd.nfs4_disable_idmapping=
++ [NFSv4] When set to the default of '1', the NFSv4
++ server will return only numeric uids and gids to
++ clients using auth_sys, and will accept numeric uids
++ and gids from such clients. This is intended to ease
++ migration from NFSv2/v3.
++
++ objlayoutdriver.osd_login_prog=
++ [NFS] [OBJLAYOUT] sets the pathname to the program which
++ is used to automatically discover and login into new
++ osd-targets. Please see:
++ Documentation/filesystems/pnfs.txt for more explanations
++
++ nmi_debug= [KNL,AVR32,SH] Specify one or more actions to take
++ when a NMI is triggered.
++ Format: [state][,regs][,debounce][,die]
++
++ nmi_watchdog= [KNL,BUGS=X86] Debugging features for SMP kernels
++ Format: [panic,][nopanic,][num]
++ Valid num: 0
++ 0 - turn nmi_watchdog off
++ When panic is specified, panic when an NMI watchdog
++ timeout occurs (or 'nopanic' to override the opposite
++ default).
++ This is useful when you use a panic=... timeout and
++ need the box quickly up again.
++
++ netpoll.carrier_timeout=
++ [NET] Specifies amount of time (in seconds) that
++ netpoll should wait for a carrier. By default netpoll
++ waits 4 seconds.
++
++ no387 [BUGS=X86-32] Tells the kernel to use the 387 maths
++ emulation library even if a 387 maths coprocessor
++ is present.
++
++ no_console_suspend
++ [HW] Never suspend the console
++ Disable suspending of consoles during suspend and
++ hibernate operations. Once disabled, debugging
++ messages can reach various consoles while the rest
++ of the system is being put to sleep (ie, while
++ debugging driver suspend/resume hooks). This may
++ not work reliably with all consoles, but is known
++ to work with serial and VGA consoles.
++ To facilitate more flexible debugging, we also add
++ console_suspend, a printk module parameter to control
++ it. Users could use console_suspend (usually
++ /sys/module/printk/parameters/console_suspend) to
++ turn on/off it dynamically.
++
++ noaliencache [MM, NUMA, SLAB] Disables the allocation of alien
++ caches in the slab allocator. Saves per-node memory,
++ but will impact performance.
++
++ noalign [KNL,ARM]
++
++ noapic [SMP,APIC] Tells the kernel to not make use of any
++ IOAPICs that may be present in the system.
++
++ nokaslr [X86]
++ Disable kernel base offset ASLR (Address Space
++ Layout Randomization) if built into the kernel.
++
++ noautogroup Disable scheduler automatic task group creation.
++
++ nobats [PPC] Do not use BATs for mapping kernel lowmem
++ on "Classic" PPC cores.
++
++ nocache [ARM]
++
++ noclflush [BUGS=X86] Don't use the CLFLUSH instruction
++
++ nodelayacct [KNL] Disable per-task delay accounting
++
++ nodisconnect [HW,SCSI,M68K] Disables SCSI disconnects.
++
++ nodsp [SH] Disable hardware DSP at boot time.
++
++ noefi [X86] Disable EFI runtime services support.
++
++ noexec [IA-64]
++
++ noexec [X86]
++ On X86-32 available only on PAE configured kernels.
++ noexec=on: enable non-executable mappings (default)
++ noexec=off: disable non-executable mappings
++
++ nosmap [X86]
++ Disable SMAP (Supervisor Mode Access Prevention)
++ even if it is supported by processor.
++
++ nosmep [X86]
++ Disable SMEP (Supervisor Mode Execution Prevention)
++ even if it is supported by processor.
++
++ noexec32 [X86-64]
++ This affects only 32-bit executables.
++ noexec32=on: enable non-executable mappings (default)
++ read doesn't imply executable mappings
++ noexec32=off: disable non-executable mappings
++ read implies executable mappings
++
++ nofpu [SH] Disable hardware FPU at boot time.
++
++ nofxsr [BUGS=X86-32] Disables x86 floating point extended
++ register save and restore. The kernel will only save
++ legacy floating-point registers on task switch.
++
++ noxsave [BUGS=X86] Disables x86 extended register state save
++ and restore using xsave. The kernel will fallback to
++ enabling legacy floating-point and sse state.
++
++ eagerfpu= [X86]
++ on enable eager fpu restore
++ off disable eager fpu restore
++ auto selects the default scheme, which automatically
++ enables eagerfpu restore for xsaveopt.
++
++ nohlt [BUGS=ARM,SH] Tells the kernel that the sleep(SH) or
++ wfi(ARM) instruction doesn't work correctly and not to
++ use it. This is also useful when using JTAG debugger.
++
++ no_file_caps Tells the kernel not to honor file capabilities. The
++ only way then for a file to be executed with privilege
++ is to be setuid root or executed by root.
++
++ nohalt [IA-64] Tells the kernel not to use the power saving
++ function PAL_HALT_LIGHT when idle. This increases
++ power-consumption. On the positive side, it reduces
++ interrupt wake-up latency, which may improve performance
++ in certain environments such as networked servers or
++ real-time systems.
++
++ nohz= [KNL] Boottime enable/disable dynamic ticks
++ Valid arguments: on, off
++ Default: on
++
++ nohz_full= [KNL,BOOT]
++ In kernels built with CONFIG_NO_HZ_FULL=y, set
++ the specified list of CPUs whose tick will be stopped
++ whenever possible. The boot CPU will be forced outside
++ the range to maintain the timekeeping.
++ The CPUs in this range must also be included in the
++ rcu_nocbs= set.
++
++ noiotrap [SH] Disables trapped I/O port accesses.
++
++ noirqdebug [X86-32] Disables the code which attempts to detect and
++ disable unhandled interrupt sources.
++
++ no_timer_check [X86,APIC] Disables the code which tests for
++ broken timer IRQ sources.
++
++ noisapnp [ISAPNP] Disables ISA PnP code.
++
++ noinitrd [RAM] Tells the kernel not to load any configured
++ initial RAM disk.
++
++ nointremap [X86-64, Intel-IOMMU] Do not enable interrupt
++ remapping.
++ [Deprecated - use intremap=off]
++
++ nointroute [IA-64]
++
++ nojitter [IA-64] Disables jitter checking for ITC timers.
++
++ no-kvmclock [X86,KVM] Disable paravirtualized KVM clock driver
++
++ no-kvmapf [X86,KVM] Disable paravirtualized asynchronous page
++ fault handling.
++
++ no-steal-acc [X86,KVM] Disable paravirtualized steal time accounting.
++ steal time is computed, but won't influence scheduler
++ behaviour
++
++ nolapic [X86-32,APIC] Do not enable or use the local APIC.
++
++ nolapic_timer [X86-32,APIC] Do not use the local APIC timer.
++
++ noltlbs [PPC] Do not use large page/tlb entries for kernel
++ lowmem mapping on PPC40x.
++
++ nomca [IA-64] Disable machine check abort handling
++
++ nomce [X86-32] Machine Check Exception
++
++ nomfgpt [X86-32] Disable Multi-Function General Purpose
++ Timer usage (for AMD Geode machines).
++
++ nonmi_ipi [X86] Disable using NMI IPIs during panic/reboot to
++ shutdown the other cpus. Instead use the REBOOT_VECTOR
++ irq.
++
++ nomodule Disable module load
++
++ nopat [X86] Disable PAT (page attribute table extension of
++ pagetables) support.
++
++ norandmaps Don't use address space randomization. Equivalent to
++ echo 0 > /proc/sys/kernel/randomize_va_space
++
++ noreplace-paravirt [X86,IA-64,PV_OPS] Don't patch paravirt_ops
++
++ noreplace-smp [X86-32,SMP] Don't replace SMP instructions
++ with UP alternatives
++
++ nordrand [X86] Disable the direct use of the RDRAND
++ instruction even if it is supported by the
++ processor. RDRAND is still available to user
++ space applications.
++
++ noresume [SWSUSP] Disables resume and restores original swap
++ space.
++
++ no-scroll [VGA] Disables scrollback.
++ This is required for the Braillex ib80-piezo Braille
++ reader made by F.H. Papenmeier (Germany).
++
++ nosbagart [IA-64]
++
++ nosep [BUGS=X86-32] Disables x86 SYSENTER/SYSEXIT support.
++
++ nosmp [SMP] Tells an SMP kernel to act as a UP kernel,
++ and disable the IO APIC. legacy for "maxcpus=0".
++
++ nosoftlockup [KNL] Disable the soft-lockup detector.
++
++ nosync [HW,M68K] Disables sync negotiation for all devices.
++
++ notsc [BUGS=X86-32] Disable Time Stamp Counter
++
++ nousb [USB] Disable the USB subsystem
++
++ nowatchdog [KNL] Disable the lockup detector (NMI watchdog).
++
++ nowb [ARM]
++
++ nox2apic [X86-64,APIC] Do not enable x2APIC mode.
++
++ cpu0_hotplug [X86] Turn on CPU0 hotplug feature when
++ CONFIG_BOOTPARAM_HOTPLUG_CPU0 is off.
++ Some features depend on CPU0. Known dependencies are:
++ 1. Resume from suspend/hibernate depends on CPU0.
++ Suspend/hibernate will fail if CPU0 is offline and you
++ need to online CPU0 before suspend/hibernate.
++ 2. PIC interrupts also depend on CPU0. CPU0 can't be
++ removed if a PIC interrupt is detected.
++ It's said poweroff/reboot may depend on CPU0 on some
++ machines although I haven't seen such issues so far
++ after CPU0 is offline on a few tested machines.
++ If the dependencies are under your control, you can
++ turn on cpu0_hotplug.
++
++ nptcg= [IA-64] Override max number of concurrent global TLB
++ purges which is reported from either PAL_VM_SUMMARY or
++ SAL PALO.
++
++ nr_cpus= [SMP] Maximum number of processors that an SMP kernel
++ could support. nr_cpus=n : n >= 1 limits the kernel to
++ supporting 'n' processors. Later in runtime you can not
++ use hotplug cpu feature to put more cpu back to online.
++ just like you compile the kernel NR_CPUS=n
++
++ nr_uarts= [SERIAL] maximum number of UARTs to be registered.
++
++ numa_balancing= [KNL,X86] Enable or disable automatic NUMA balancing.
++ Allowed values are enable and disable
++
++ numa_zonelist_order= [KNL, BOOT] Select zonelist order for NUMA.
++ one of ['zone', 'node', 'default'] can be specified
++ This can be set from sysctl after boot.
++ See Documentation/sysctl/vm.txt for details.
++
++ ohci1394_dma=early [HW] enable debugging via the ohci1394 driver.
++ See Documentation/debugging-via-ohci1394.txt for more
++ info.
++
++ olpc_ec_timeout= [OLPC] ms delay when issuing EC commands
++ Rather than timing out after 20 ms if an EC
++ command is not properly ACKed, override the length
++ of the timeout. We have interrupts disabled while
++ waiting for the ACK, so if this is set too high
++ interrupts *may* be lost!
++
++ omap_mux= [OMAP] Override bootloader pin multiplexing.
++ Format: <mux_mode0.mode_name=value>...
++ For example, to override I2C bus2:
++ omap_mux=i2c2_scl.i2c2_scl=0x100,i2c2_sda.i2c2_sda=0x100
++
++ oprofile.timer= [HW]
++ Use timer interrupt instead of performance counters
++
++ oprofile.cpu_type= Force an oprofile cpu type
++ This might be useful if you have an older oprofile
++ userland or if you want common events.
++ Format: { arch_perfmon }
++ arch_perfmon: [X86] Force use of architectural
++ perfmon on Intel CPUs instead of the
++ CPU specific event set.
++ timer: [X86] Force use of architectural NMI
++ timer mode (see also oprofile.timer
++ for generic hr timer mode)
++ [s390] Force legacy basic mode sampling
++ (report cpu_type "timer")
++
++ oops=panic Always panic on oopses. Default is to just kill the
++ process, but there is a small probability of
++ deadlocking the machine.
++ This will also cause panics on machine check exceptions.
++ Useful together with panic=30 to trigger a reboot.
++
++ OSS [HW,OSS]
++ See Documentation/sound/oss/oss-parameters.txt
++
++ panic= [KNL] Kernel behaviour on panic: delay <timeout>
++ timeout > 0: seconds before rebooting
++ timeout = 0: wait forever
++ timeout < 0: reboot immediately
++ Format: <timeout>
++
++ parkbd.port= [HW] Parallel port number the keyboard adapter is
++ connected to, default is 0.
++ Format: <parport#>
++ parkbd.mode= [HW] Parallel port keyboard adapter mode of operation,
++ 0 for XT, 1 for AT (default is AT).
++ Format: <mode>
++
++ parport= [HW,PPT] Specify parallel ports. 0 disables.
++ Format: { 0 | auto | 0xBBB[,IRQ[,DMA]] }
++ Use 'auto' to force the driver to use any
++ IRQ/DMA settings detected (the default is to
++ ignore detected IRQ/DMA settings because of
++ possible conflicts). You can specify the base
++ address, IRQ, and DMA settings; IRQ and DMA
++ should be numbers, or 'auto' (for using detected
++ settings on that particular port), or 'nofifo'
++ (to avoid using a FIFO even if it is detected).
++ Parallel ports are assigned in the order they
++ are specified on the command line, starting
++ with parport0.
++
++ parport_init_mode= [HW,PPT]
++ Configure VIA parallel port to operate in
++ a specific mode. This is necessary on Pegasos
++ computer where firmware has no options for setting
++ up parallel port mode and sets it to spp.
++ Currently this function knows 686a and 8231 chips.
++ Format: [spp|ps2|epp|ecp|ecpepp]
++
++ pause_on_oops=
++ Halt all CPUs after the first oops has been printed for
++ the specified number of seconds. This is to be used if
++ your oopses keep scrolling off the screen.
++
++ pcbit= [HW,ISDN]
++
++ pcd. [PARIDE]
++ See header of drivers/block/paride/pcd.c.
++ See also Documentation/blockdev/paride.txt.
++
++ pci=option[,option...] [PCI] various PCI subsystem options:
++ earlydump [X86] dump PCI config space before the kernel
++ changes anything
++ off [X86] don't probe for the PCI bus
++ bios [X86-32] force use of PCI BIOS, don't access
++ the hardware directly. Use this if your machine
++ has a non-standard PCI host bridge.
++ nobios [X86-32] disallow use of PCI BIOS, only direct
++ hardware access methods are allowed. Use this
++ if you experience crashes upon bootup and you
++ suspect they are caused by the BIOS.
++ conf1 [X86] Force use of PCI Configuration
++ Mechanism 1.
++ conf2 [X86] Force use of PCI Configuration
++ Mechanism 2.
++ noaer [PCIE] If the PCIEAER kernel config parameter is
++ enabled, this kernel boot option can be used to
++ disable the use of PCIE advanced error reporting.
++ nodomains [PCI] Disable support for multiple PCI
++ root domains (aka PCI segments, in ACPI-speak).
++ nommconf [X86] Disable use of MMCONFIG for PCI
++ Configuration
++ check_enable_amd_mmconf [X86] check for and enable
++ properly configured MMIO access to PCI
++ config space on AMD family 10h CPU
++ nomsi [MSI] If the PCI_MSI kernel config parameter is
++ enabled, this kernel boot option can be used to
++ disable the use of MSI interrupts system-wide.
++ noioapicquirk [APIC] Disable all boot interrupt quirks.
++ Safety option to keep boot IRQs enabled. This
++ should never be necessary.
++ ioapicreroute [APIC] Enable rerouting of boot IRQs to the
++ primary IO-APIC for bridges that cannot disable
++ boot IRQs. This fixes a source of spurious IRQs
++ when the system masks IRQs.
++ noioapicreroute [APIC] Disable workaround that uses the
++ boot IRQ equivalent of an IRQ that connects to
++ a chipset where boot IRQs cannot be disabled.
++ The opposite of ioapicreroute.
++ biosirq [X86-32] Use PCI BIOS calls to get the interrupt
++ routing table. These calls are known to be buggy
++ on several machines and they hang the machine
++ when used, but on other computers it's the only
++ way to get the interrupt routing table. Try
++ this option if the kernel is unable to allocate
++ IRQs or discover secondary PCI buses on your
++ motherboard.
++ rom [X86] Assign address space to expansion ROMs.
++ Use with caution as certain devices share
++ address decoders between ROMs and other
++ resources.
++ norom [X86] Do not assign address space to
++ expansion ROMs that do not already have
++ BIOS assigned address ranges.
++ nobar [X86] Do not assign address space to the
++ BARs that weren't assigned by the BIOS.
++ irqmask=0xMMMM [X86] Set a bit mask of IRQs allowed to be
++ assigned automatically to PCI devices. You can
++ make the kernel exclude IRQs of your ISA cards
++ this way.
++ pirqaddr=0xAAAAA [X86] Specify the physical address
++ of the PIRQ table (normally generated
++ by the BIOS) if it is outside the
++ F0000h-100000h range.
++ lastbus=N [X86] Scan all buses thru bus #N. Can be
++ useful if the kernel is unable to find your
++ secondary buses and you want to tell it
++ explicitly which ones they are.
++ assign-busses [X86] Always assign all PCI bus
++ numbers ourselves, overriding
++ whatever the firmware may have done.
++ usepirqmask [X86] Honor the possible IRQ mask stored
++ in the BIOS $PIR table. This is needed on
++ some systems with broken BIOSes, notably
++ some HP Pavilion N5400 and Omnibook XE3
++ notebooks. This will have no effect if ACPI
++ IRQ routing is enabled.
++ noacpi [X86] Do not use ACPI for IRQ routing
++ or for PCI scanning.
++ use_crs [X86] Use PCI host bridge window information
++ from ACPI. On BIOSes from 2008 or later, this
++ is enabled by default. If you need to use this,
++ please report a bug.
++ nocrs [X86] Ignore PCI host bridge windows from ACPI.
++ If you need to use this, please report a bug.
++ routeirq Do IRQ routing for all PCI devices.
++ This is normally done in pci_enable_device(),
++ so this option is a temporary workaround
++ for broken drivers that don't call it.
++ skip_isa_align [X86] do not align io start addr, so can
++ handle more pci cards
++ firmware [ARM] Do not re-enumerate the bus but instead
++ just use the configuration from the
++ bootloader. This is currently used on
++ IXP2000 systems where the bus has to be
++ configured a certain way for adjunct CPUs.
++ noearly [X86] Don't do any early type 1 scanning.
++ This might help on some broken boards which
++ machine check when some devices' config space
++ is read. But various workarounds are disabled
++ and some IOMMU drivers will not work.
++ bfsort Sort PCI devices into breadth-first order.
++ This sorting is done to get a device
++ order compatible with older (<= 2.4) kernels.
++ nobfsort Don't sort PCI devices into breadth-first order.
++ pcie_bus_tune_off Disable PCIe MPS (Max Payload Size)
++ tuning and use the BIOS-configured MPS defaults.
++ pcie_bus_safe Set every device's MPS to the largest value
++ supported by all devices below the root complex.
++ pcie_bus_perf Set device MPS to the largest allowable MPS
++ based on its parent bus. Also set MRRS (Max
++ Read Request Size) to the largest supported
++ value (no larger than the MPS that the device
++ or bus can support) for best performance.
++ pcie_bus_peer2peer Set every device's MPS to 128B, which
++ every device is guaranteed to support. This
++ configuration allows peer-to-peer DMA between
++ any pair of devices, possibly at the cost of
++ reduced performance. This also guarantees
++ that hot-added devices will work.
++ cbiosize=nn[KMG] The fixed amount of bus space which is
++ reserved for the CardBus bridge's IO window.
++ The default value is 256 bytes.
++ cbmemsize=nn[KMG] The fixed amount of bus space which is
++ reserved for the CardBus bridge's memory
++ window. The default value is 64 megabytes.
++ resource_alignment=
++ Format:
++ [<order of align>@][<domain>:]<bus>:<slot>.<func>[; ...]
++ Specifies alignment and device to reassign
++ aligned memory resources.
++ If <order of align> is not specified,
++ PAGE_SIZE is used as alignment.
++ PCI-PCI bridge can be specified, if resource
++ windows need to be expanded.
++ ecrc= Enable/disable PCIe ECRC (transaction layer
++ end-to-end CRC checking).
++ bios: Use BIOS/firmware settings. This is the
++ the default.
++ off: Turn ECRC off
++ on: Turn ECRC on.
++ hpiosize=nn[KMG] The fixed amount of bus space which is
++ reserved for hotplug bridge's IO window.
++ Default size is 256 bytes.
++ hpmemsize=nn[KMG] The fixed amount of bus space which is
++ reserved for hotplug bridge's memory window.
++ Default size is 2 megabytes.
++ realloc= Enable/disable reallocating PCI bridge resources
++ if allocations done by BIOS are too small to
++ accommodate resources required by all child
++ devices.
++ off: Turn realloc off
++ on: Turn realloc on
++ realloc same as realloc=on
++ noari do not use PCIe ARI.
++ pcie_scan_all Scan all possible PCIe devices. Otherwise we
++ only look for one device below a PCIe downstream
++ port.
++
++ pcie_aspm= [PCIE] Forcibly enable or disable PCIe Active State Power
++ Management.
++ off Disable ASPM.
++ force Enable ASPM even on devices that claim not to support it.
++ WARNING: Forcing ASPM on may cause system lockups.
++
++ pcie_hp= [PCIE] PCI Express Hotplug driver options:
++ nomsi Do not use MSI for PCI Express Native Hotplug (this
++ makes all PCIe ports use INTx for hotplug services).
++
++ pcie_ports= [PCIE] PCIe ports handling:
++ auto Ask the BIOS whether or not to use native PCIe services
++ associated with PCIe ports (PME, hot-plug, AER). Use
++ them only if that is allowed by the BIOS.
++ native Use native PCIe services associated with PCIe ports
++ unconditionally.
++ compat Treat PCIe ports as PCI-to-PCI bridges, disable the PCIe
++ ports driver.
++
++ pcie_pme= [PCIE,PM] Native PCIe PME signaling options:
++ nomsi Do not use MSI for native PCIe PME signaling (this makes
++ all PCIe root ports use INTx for all services).
++
++ pcmv= [HW,PCMCIA] BadgePAD 4
++
++ pd. [PARIDE]
++ See Documentation/blockdev/paride.txt.
++
++ pdcchassis= [PARISC,HW] Disable/Enable PDC Chassis Status codes at
++ boot time.
++ Format: { 0 | 1 }
++ See arch/parisc/kernel/pdc_chassis.c
++
++ percpu_alloc= Select which percpu first chunk allocator to use.
++ Currently supported values are "embed" and "page".
++ Archs may support subset or none of the selections.
++ See comments in mm/percpu.c for details on each
++ allocator. This parameter is primarily for debugging
++ and performance comparison.
++
++ pf. [PARIDE]
++ See Documentation/blockdev/paride.txt.
++
++ pg. [PARIDE]
++ See Documentation/blockdev/paride.txt.
++
++ pirq= [SMP,APIC] Manual mp-table setup
++ See Documentation/x86/i386/IO-APIC.txt.
++
++ plip= [PPT,NET] Parallel port network link
++ Format: { parport<nr> | timid | 0 }
++ See also Documentation/parport.txt.
++
++ pmtmr= [X86] Manual setup of pmtmr I/O Port.
++ Override pmtimer IOPort with a hex value.
++ e.g. pmtmr=0x508
++
++ pnp.debug=1 [PNP]
++ Enable PNP debug messages (depends on the
++ CONFIG_PNP_DEBUG_MESSAGES option). Change at run-time
++ via /sys/module/pnp/parameters/debug. We always show
++ current resource usage; turning this on also shows
++ possible settings and some assignment information.
++
++ pnpacpi= [ACPI]
++ { off }
++
++ pnpbios= [ISAPNP]
++ { on | off | curr | res | no-curr | no-res }
++
++ pnp_reserve_irq=
++ [ISAPNP] Exclude IRQs for the autoconfiguration
++
++ pnp_reserve_dma=
++ [ISAPNP] Exclude DMAs for the autoconfiguration
++
++ pnp_reserve_io= [ISAPNP] Exclude I/O ports for the autoconfiguration
++ Ranges are in pairs (I/O port base and size).
++
++ pnp_reserve_mem=
++ [ISAPNP] Exclude memory regions for the
++ autoconfiguration.
++ Ranges are in pairs (memory base and size).
++
++ ports= [IP_VS_FTP] IPVS ftp helper module
++ Default is 21.
++ Up to 8 (IP_VS_APP_MAX_PORTS) ports
++ may be specified.
++ Format: <port>,<port>....
++
++ print-fatal-signals=
++ [KNL] debug: print fatal signals
++
++ If enabled, warn about various signal handling
++ related application anomalies: too many signals,
++ too many POSIX.1 timers, fatal signals causing a
++ coredump - etc.
++
++ If you hit the warning due to signal overflow,
++ you might want to try "ulimit -i unlimited".
++
++ default: off.
++
++ printk.always_kmsg_dump=
++ Trigger kmsg_dump for cases other than kernel oops or
++ panics
++ Format: <bool> (1/Y/y=enable, 0/N/n=disable)
++ default: disabled
++
++ printk.time= Show timing data prefixed to each printk message line
++ Format: <bool> (1/Y/y=enable, 0/N/n=disable)
++
++ processor.max_cstate= [HW,ACPI]
++ Limit processor to maximum C-state
++ max_cstate=9 overrides any DMI blacklist limit.
++
++ processor.nocst [HW,ACPI]
++ Ignore the _CST method to determine C-states,
++ instead using the legacy FADT method
++
++ profile= [KNL] Enable kernel profiling via /proc/profile
++ Format: [schedule,]<number>
++ Param: "schedule" - profile schedule points.
++ Param: <number> - step/bucket size as a power of 2 for
++ statistical time based profiling.
++ Param: "sleep" - profile D-state sleeping (millisecs).
++ Requires CONFIG_SCHEDSTATS
++ Param: "kvm" - profile VM exits.
++
++ prompt_ramdisk= [RAM] List of RAM disks to prompt for floppy disk
++ before loading.
++ See Documentation/blockdev/ramdisk.txt.
++
++ psmouse.proto= [HW,MOUSE] Highest PS2 mouse protocol extension to
++ probe for; one of (bare|imps|exps|lifebook|any).
++ psmouse.rate= [HW,MOUSE] Set desired mouse report rate, in reports
++ per second.
++ psmouse.resetafter= [HW,MOUSE]
++ Try to reset the device after so many bad packets
++ (0 = never).
++ psmouse.resolution=
++ [HW,MOUSE] Set desired mouse resolution, in dpi.
++ psmouse.smartscroll=
++ [HW,MOUSE] Controls Logitech smartscroll autorepeat.
++ 0 = disabled, 1 = enabled (default).
++
++ pstore.backend= Specify the name of the pstore backend to use
++
++ pt. [PARIDE]
++ See Documentation/blockdev/paride.txt.
++
++ pty.legacy_count=
++ [KNL] Number of legacy pty's. Overwrites compiled-in
++ default number.
++
++ quiet [KNL] Disable most log messages
++
++ r128= [HW,DRM]
++
++ raid= [HW,RAID]
++ See Documentation/md.txt.
++
++ ramdisk_blocksize= [RAM]
++ See Documentation/blockdev/ramdisk.txt.
++
++ ramdisk_size= [RAM] Sizes of RAM disks in kilobytes
++ See Documentation/blockdev/ramdisk.txt.
++
++ rcu_nocbs= [KNL]
++ In kernels built with CONFIG_RCU_NOCB_CPU=y, set
++ the specified list of CPUs to be no-callback CPUs.
++ Invocation of these CPUs' RCU callbacks will
++ be offloaded to "rcuox/N" kthreads created for
++ that purpose, where "x" is "b" for RCU-bh, "p"
++ for RCU-preempt, and "s" for RCU-sched, and "N"
++ is the CPU number. This reduces OS jitter on the
++ offloaded CPUs, which can be useful for HPC and
++ real-time workloads. It can also improve energy
++ efficiency for asymmetric multiprocessors.
++
++ rcu_nocb_poll [KNL]
++ Rather than requiring that offloaded CPUs
++ (specified by rcu_nocbs= above) explicitly
++ awaken the corresponding "rcuoN" kthreads,
++ make these kthreads poll for callbacks.
++ This improves the real-time response for the
++ offloaded CPUs by relieving them of the need to
++ wake up the corresponding kthread, but degrades
++ energy efficiency by requiring that the kthreads
++ periodically wake up to do the polling.
++
++ rcutree.blimit= [KNL]
++ Set maximum number of finished RCU callbacks to
++ process in one batch.
++
++ rcutree.rcu_fanout_leaf= [KNL]
++ Increase the number of CPUs assigned to each
++ leaf rcu_node structure. Useful for very large
++ systems.
++
++ rcutree.jiffies_till_first_fqs= [KNL]
++ Set delay from grace-period initialization to
++ first attempt to force quiescent states.
++ Units are jiffies, minimum value is zero,
++ and maximum value is HZ.
++
++ rcutree.jiffies_till_next_fqs= [KNL]
++ Set delay between subsequent attempts to force
++ quiescent states. Units are jiffies, minimum
++ value is one, and maximum value is HZ.
++
++ rcutree.qhimark= [KNL]
++ Set threshold of queued RCU callbacks beyond which
++ batch limiting is disabled.
++
++ rcutree.qlowmark= [KNL]
++ Set threshold of queued RCU callbacks below which
++ batch limiting is re-enabled.
++
++ rcutree.rcu_idle_gp_delay= [KNL]
++ Set wakeup interval for idle CPUs that have
++ RCU callbacks (RCU_FAST_NO_HZ=y).
++
++ rcutree.rcu_idle_lazy_gp_delay= [KNL]
++ Set wakeup interval for idle CPUs that have
++ only "lazy" RCU callbacks (RCU_FAST_NO_HZ=y).
++ Lazy RCU callbacks are those which RCU can
++ prove do nothing more than free memory.
++
++ rcutorture.fqs_duration= [KNL]
++ Set duration of force_quiescent_state bursts.
++
++ rcutorture.fqs_holdoff= [KNL]
++ Set holdoff time within force_quiescent_state bursts.
++
++ rcutorture.fqs_stutter= [KNL]
++ Set wait time between force_quiescent_state bursts.
++
++ rcutorture.gp_exp= [KNL]
++ Use expedited update-side primitives.
++
++ rcutorture.gp_normal= [KNL]
++ Use normal (non-expedited) update-side primitives.
++ If both gp_exp and gp_normal are set, do both.
++ If neither gp_exp nor gp_normal are set, still
++ do both.
++
++ rcutorture.n_barrier_cbs= [KNL]
++ Set callbacks/threads for rcu_barrier() testing.
++
++ rcutorture.nfakewriters= [KNL]
++ Set number of concurrent RCU writers. These just
++ stress RCU, they don't participate in the actual
++ test, hence the "fake".
++
++ rcutorture.nreaders= [KNL]
++ Set number of RCU readers.
++
++ rcutorture.object_debug= [KNL]
++ Enable debug-object double-call_rcu() testing.
++
++ rcutorture.onoff_holdoff= [KNL]
++ Set time (s) after boot for CPU-hotplug testing.
++
++ rcutorture.onoff_interval= [KNL]
++ Set time (s) between CPU-hotplug operations, or
++ zero to disable CPU-hotplug testing.
++
++ rcutorture.rcutorture_runnable= [BOOT]
++ Start rcutorture running at boot time.
++
++ rcutorture.shuffle_interval= [KNL]
++ Set task-shuffle interval (s). Shuffling tasks
++ allows some CPUs to go into dyntick-idle mode
++ during the rcutorture test.
++
++ rcutorture.shutdown_secs= [KNL]
++ Set time (s) after boot system shutdown. This
++ is useful for hands-off automated testing.
++
++ rcutorture.stall_cpu= [KNL]
++ Duration of CPU stall (s) to test RCU CPU stall
++ warnings, zero to disable.
++
++ rcutorture.stall_cpu_holdoff= [KNL]
++ Time to wait (s) after boot before inducing stall.
++
++ rcutorture.stat_interval= [KNL]
++ Time (s) between statistics printk()s.
++
++ rcutorture.stutter= [KNL]
++ Time (s) to stutter testing, for example, specifying
++ five seconds causes the test to run for five seconds,
++ wait for five seconds, and so on. This tests RCU's
++ ability to transition abruptly to and from idle.
++
++ rcutorture.test_boost= [KNL]
++ Test RCU priority boosting? 0=no, 1=maybe, 2=yes.
++ "Maybe" means test if the RCU implementation
++ under test support RCU priority boosting.
++
++ rcutorture.test_boost_duration= [KNL]
++ Duration (s) of each individual boost test.
++
++ rcutorture.test_boost_interval= [KNL]
++ Interval (s) between each boost test.
++
++ rcutorture.test_no_idle_hz= [KNL]
++ Test RCU's dyntick-idle handling. See also the
++ rcutorture.shuffle_interval parameter.
++
++ rcutorture.torture_type= [KNL]
++ Specify the RCU implementation to test.
++
++ rcutorture.verbose= [KNL]
++ Enable additional printk() statements.
++
++ rcupdate.rcu_expedited= [KNL]
++ Use expedited grace-period primitives, for
++ example, synchronize_rcu_expedited() instead
++ of synchronize_rcu(). This reduces latency,
++ but can increase CPU utilization, degrade
++ real-time latency, and degrade energy efficiency.
++
++ rcupdate.rcu_cpu_stall_suppress= [KNL]
++ Suppress RCU CPU stall warning messages.
++
++ rcupdate.rcu_cpu_stall_timeout= [KNL]
++ Set timeout for RCU CPU stall warning messages.
++
++ rdinit= [KNL]
++ Format: <full_path>
++ Run specified binary instead of /init from the ramdisk,
++ used for early userspace startup. See initrd.
++
++ reboot= [KNL]
++ Format (x86 or x86_64):
++ [w[arm] | c[old] | h[ard] | s[oft] | g[pio]] \
++ [[,]s[mp]#### \
++ [[,]b[ios] | a[cpi] | k[bd] | t[riple] | e[fi] | p[ci]] \
++ [[,]f[orce]
++ Where reboot_mode is one of warm (soft) or cold (hard) or gpio,
++ reboot_type is one of bios, acpi, kbd, triple, efi, or pci,
++ reboot_force is either force or not specified,
++ reboot_cpu is s[mp]#### with #### being the processor
++ to be used for rebooting.
++
++ relax_domain_level=
++ [KNL, SMP] Set scheduler's default relax_domain_level.
++ See Documentation/cgroups/cpusets.txt.
++
++ reserve= [KNL,BUGS] Force the kernel to ignore some iomem area
++
++ reservetop= [X86-32]
++ Format: nn[KMG]
++ Reserves a hole at the top of the kernel virtual
++ address space.
++
++ reservelow= [X86]
++ Format: nn[K]
++ Set the amount of memory to reserve for BIOS at
++ the bottom of the address space.
++
++ reset_devices [KNL] Force drivers to reset the underlying device
++ during initialization.
++
++ resume= [SWSUSP]
++ Specify the partition device for software suspend
++ Format:
++ {/dev/<dev> | PARTUUID=<uuid> | <int>:<int> | <hex>}
++
++ resume_offset= [SWSUSP]
++ Specify the offset from the beginning of the partition
++ given by "resume=" at which the swap header is located,
++ in <PAGE_SIZE> units (needed only for swap files).
++ See Documentation/power/swsusp-and-swap-files.txt
++
++ resumedelay= [HIBERNATION] Delay (in seconds) to pause before attempting to
++ read the resume files
++
++ resumewait [HIBERNATION] Wait (indefinitely) for resume device to show up.
++ Useful for devices that are detected asynchronously
++ (e.g. USB and MMC devices).
++
++ hibernate= [HIBERNATION]
++ noresume Don't check if there's a hibernation image
++ present during boot.
++ nocompress Don't compress/decompress hibernation images.
++
++ retain_initrd [RAM] Keep initrd memory after extraction
++
++ rhash_entries= [KNL,NET]
++ Set number of hash buckets for route cache
++
++ riscom8= [HW,SERIAL]
++ Format: <io_board1>[,<io_board2>[,...<io_boardN>]]
++
++ ro [KNL] Mount root device read-only on boot
++
++ root= [KNL] Root filesystem
++ See name_to_dev_t comment in init/do_mounts.c.
++
++ rootdelay= [KNL] Delay (in seconds) to pause before attempting to
++ mount the root filesystem
++
++ rootflags= [KNL] Set root filesystem mount option string
++
++ rootfstype= [KNL] Set root filesystem type
++
++ rootwait [KNL] Wait (indefinitely) for root device to show up.
++ Useful for devices that are detected asynchronously
++ (e.g. USB and MMC devices).
++
++ rproc_mem=nn[KMG][@address]
++ [KNL,ARM,CMA] Remoteproc physical memory block.
++ Memory area to be used by remote processor image,
++ managed by CMA.
++
++ rw [KNL] Mount root device read-write on boot
++
++ S [KNL] Run init in single mode
++
++ sa1100ir [NET]
++ See drivers/net/irda/sa1100_ir.c.
++
++ sbni= [NET] Granch SBNI12 leased line adapter
++
++ sched_debug [KNL] Enables verbose scheduler debug messages.
++
++ skew_tick= [KNL] Offset the periodic timer tick per cpu to mitigate
++ xtime_lock contention on larger systems, and/or RCU lock
++ contention on all systems with CONFIG_MAXSMP set.
++ Format: { "0" | "1" }
++ 0 -- disable. (may be 1 via CONFIG_CMDLINE="skew_tick=1"
++ 1 -- enable.
++ Note: increases power consumption, thus should only be
++ enabled if running jitter sensitive (HPC/RT) workloads.
++
++ security= [SECURITY] Choose a security module to enable at boot.
++ If this boot parameter is not specified, only the first
++ security module asking for security registration will be
++ loaded. An invalid security module name will be treated
++ as if no module has been chosen.
++
++ selinux= [SELINUX] Disable or enable SELinux at boot time.
++ Format: { "0" | "1" }
++ See security/selinux/Kconfig help text.
++ 0 -- disable.
++ 1 -- enable.
++ Default value is set via kernel config option.
++ If enabled at boot time, /selinux/disable can be used
++ later to disable prior to initial policy load.
++
++ apparmor= [APPARMOR] Disable or enable AppArmor at boot time
++ Format: { "0" | "1" }
++ See security/apparmor/Kconfig help text
++ 0 -- disable.
++ 1 -- enable.
++ Default value is set via kernel config option.
++
++ serialnumber [BUGS=X86-32]
++
++ shapers= [NET]
++ Maximal number of shapers.
++
++ show_msr= [x86] show boot-time MSR settings
++ Format: { <integer> }
++ Show boot-time (BIOS-initialized) MSR settings.
++ The parameter means the number of CPUs to show,
++ for example 1 means boot CPU only.
++
++ simeth= [IA-64]
++ simscsi=
++
++ slram= [HW,MTD]
++
++ slab_max_order= [MM, SLAB]
++ Determines the maximum allowed order for slabs.
++ A high setting may cause OOMs due to memory
++ fragmentation. Defaults to 1 for systems with
++ more than 32MB of RAM, 0 otherwise.
++
++ slub_debug[=options[,slabs]] [MM, SLUB]
++ Enabling slub_debug allows one to determine the
++ culprit if slab objects become corrupted. Enabling
++ slub_debug can create guard zones around objects and
++ may poison objects when not in use. Also tracks the
++ last alloc / free. For more information see
++ Documentation/vm/slub.txt.
++
++ slub_max_order= [MM, SLUB]
++ Determines the maximum allowed order for slabs.
++ A high setting may cause OOMs due to memory
++ fragmentation. For more information see
++ Documentation/vm/slub.txt.
++
++ slub_min_objects= [MM, SLUB]
++ The minimum number of objects per slab. SLUB will
++ increase the slab order up to slub_max_order to
++ generate a sufficiently large slab able to contain
++ the number of objects indicated. The higher the number
++ of objects the smaller the overhead of tracking slabs
++ and the less frequently locks need to be acquired.
++ For more information see Documentation/vm/slub.txt.
++
++ slub_min_order= [MM, SLUB]
++ Determines the minimum page order for slabs. Must be
++ lower than slub_max_order.
++ For more information see Documentation/vm/slub.txt.
++
++ slub_nomerge [MM, SLUB]
++ Disable merging of slabs with similar size. May be
++ necessary if there is some reason to distinguish
++ allocs to different slabs. Debug options disable
++ merging on their own.
++ For more information see Documentation/vm/slub.txt.
++
++ smart2= [HW]
++ Format: <io1>[,<io2>[,...,<io8>]]
++
++ smsc-ircc2.nopnp [HW] Don't use PNP to discover SMC devices
++ smsc-ircc2.ircc_cfg= [HW] Device configuration I/O port
++ smsc-ircc2.ircc_sir= [HW] SIR base I/O port
++ smsc-ircc2.ircc_fir= [HW] FIR base I/O port
++ smsc-ircc2.ircc_irq= [HW] IRQ line
++ smsc-ircc2.ircc_dma= [HW] DMA channel
++ smsc-ircc2.ircc_transceiver= [HW] Transceiver type:
++ 0: Toshiba Satellite 1800 (GP data pin select)
++ 1: Fast pin select (default)
++ 2: ATC IRMode
++
++ softlockup_panic=
++ [KNL] Should the soft-lockup detector generate panics.
++ Format: <integer>
++
++ sonypi.*= [HW] Sony Programmable I/O Control Device driver
++ See Documentation/laptops/sonypi.txt
++
++ specialix= [HW,SERIAL] Specialix multi-serial port adapter
++ See Documentation/serial/specialix.txt.
++
++ spia_io_base= [HW,MTD]
++ spia_fio_base=
++ spia_pedr=
++ spia_peddr=
++
++ stacktrace [FTRACE]
++ Enabled the stack tracer on boot up.
++
++ stacktrace_filter=[function-list]
++ [FTRACE] Limit the functions that the stack tracer
++ will trace at boot up. function-list is a comma separated
++ list of functions. This list can be changed at run
++ time by the stack_trace_filter file in the debugfs
++ tracing directory. Note, this enables stack tracing
++ and the stacktrace above is not needed.
++
++ sti= [PARISC,HW]
++ Format: <num>
++ Set the STI (builtin display/keyboard on the HP-PARISC
++ machines) console (graphic card) which should be used
++ as the initial boot-console.
++ See also comment in drivers/video/console/sticore.c.
++
++ sti_font= [HW]
++ See comment in drivers/video/console/sticore.c.
++
++ stifb= [HW]
++ Format: bpp:<bpp1>[:<bpp2>[:<bpp3>...]]
++
++ sunrpc.min_resvport=
++ sunrpc.max_resvport=
++ [NFS,SUNRPC]
++ SunRPC servers often require that client requests
++ originate from a privileged port (i.e. a port in the
++ range 0 < portnr < 1024).
++ An administrator who wishes to reserve some of these
++ ports for other uses may adjust the range that the
++ kernel's sunrpc client considers to be privileged
++ using these two parameters to set the minimum and
++ maximum port values.
++
++ sunrpc.pool_mode=
++ [NFS]
++ Control how the NFS server code allocates CPUs to
++ service thread pools. Depending on how many NICs
++ you have and where their interrupts are bound, this
++ option will affect which CPUs will do NFS serving.
++ Note: this parameter cannot be changed while the
++ NFS server is running.
++
++ auto the server chooses an appropriate mode
++ automatically using heuristics
++ global a single global pool contains all CPUs
++ percpu one pool for each CPU
++ pernode one pool for each NUMA node (equivalent
++ to global on non-NUMA machines)
++
++ sunrpc.tcp_slot_table_entries=
++ sunrpc.udp_slot_table_entries=
++ [NFS,SUNRPC]
++ Sets the upper limit on the number of simultaneous
++ RPC calls that can be sent from the client to a
++ server. Increasing these values may allow you to
++ improve throughput, but will also increase the
++ amount of memory reserved for use by the client.
++
++ swapaccount=[0|1]
++ [KNL] Enable accounting of swap in memory resource
++ controller if no parameter or 1 is given or disable
++ it if 0 is given (See Documentation/cgroups/memory.txt)
++
++ swiotlb= [ARM,IA-64,PPC,MIPS,X86]
++ Format: { <int> | force }
++ <int> -- Number of I/O TLB slabs
++ force -- force using of bounce buffers even if they
++ wouldn't be automatically used by the kernel
++
++ switches= [HW,M68k]
++
++ sysfs.deprecated=0|1 [KNL]
++ Enable/disable old style sysfs layout for old udev
++ on older distributions. When this option is enabled
++ very new udev will not work anymore. When this option
++ is disabled (or CONFIG_SYSFS_DEPRECATED not compiled)
++ in older udev will not work anymore.
++ Default depends on CONFIG_SYSFS_DEPRECATED_V2 set in
++ the kernel configuration.
++
++ sysrq_always_enabled
++ [KNL]
++ Ignore sysrq setting - this boot parameter will
++ neutralize any effect of /proc/sys/kernel/sysrq.
++ Useful for debugging.
++
++ tdfx= [HW,DRM]
++
++ test_suspend= [SUSPEND]
++ Specify "mem" (for Suspend-to-RAM) or "standby" (for
++ standby suspend) as the system sleep state to briefly
++ enter during system startup. The system is woken from
++ this state using a wakeup-capable RTC alarm.
++
++ thash_entries= [KNL,NET]
++ Set number of hash buckets for TCP connection
++
++ thermal.act= [HW,ACPI]
++ -1: disable all active trip points in all thermal zones
++ <degrees C>: override all lowest active trip points
++
++ thermal.crt= [HW,ACPI]
++ -1: disable all critical trip points in all thermal zones
++ <degrees C>: override all critical trip points
++
++ thermal.nocrt= [HW,ACPI]
++ Set to disable actions on ACPI thermal zone
++ critical and hot trip points.
++
++ thermal.off= [HW,ACPI]
++ 1: disable ACPI thermal control
++
++ thermal.psv= [HW,ACPI]
++ -1: disable all passive trip points
++ <degrees C>: override all passive trip points to this
++ value
++
++ thermal.tzp= [HW,ACPI]
++ Specify global default ACPI thermal zone polling rate
++ <deci-seconds>: poll all this frequency
++ 0: no polling (default)
++
++ threadirqs [KNL]
++ Force threading of all interrupt handlers except those
++ marked explicitly IRQF_NO_THREAD.
++
++ tmem [KNL,XEN]
++ Enable the Transcendent memory driver if built-in.
++
++ tmem.cleancache=0|1 [KNL, XEN]
++ Default is on (1). Disable the usage of the cleancache
++ API to send anonymous pages to the hypervisor.
++
++ tmem.frontswap=0|1 [KNL, XEN]
++ Default is on (1). Disable the usage of the frontswap
++ API to send swap pages to the hypervisor. If disabled
++ the selfballooning and selfshrinking are force disabled.
++
++ tmem.selfballooning=0|1 [KNL, XEN]
++ Default is on (1). Disable the driving of swap pages
++ to the hypervisor.
++
++ tmem.selfshrinking=0|1 [KNL, XEN]
++ Default is on (1). Partial swapoff that immediately
++ transfers pages from Xen hypervisor back to the
++ kernel based on different criteria.
++
++ topology= [S390]
++ Format: {off | on}
++ Specify if the kernel should make use of the cpu
++ topology information if the hardware supports this.
++ The scheduler will make use of this information and
++ e.g. base its process migration decisions on it.
++ Default is on.
++
++ tp720= [HW,PS2]
++
++ tpm_suspend_pcr=[HW,TPM]
++ Format: integer pcr id
++ Specify that at suspend time, the tpm driver
++ should extend the specified pcr with zeros,
++ as a workaround for some chips which fail to
++ flush the last written pcr on TPM_SaveState.
++ This will guarantee that all the other pcrs
++ are saved.
++
++ trace_buf_size=nn[KMG]
++ [FTRACE] will set tracing buffer size.
++
++ trace_event=[event-list]
++ [FTRACE] Set and start specified trace events in order
++ to facilitate early boot debugging.
++ See also Documentation/trace/events.txt
++
++ trace_options=[option-list]
++ [FTRACE] Enable or disable tracer options at boot.
++ The option-list is a comma delimited list of options
++ that can be enabled or disabled just as if you were
++ to echo the option name into
++
++ /sys/kernel/debug/tracing/trace_options
++
++ For example, to enable stacktrace option (to dump the
++ stack trace of each event), add to the command line:
++
++ trace_options=stacktrace
++
++ See also Documentation/trace/ftrace.txt "trace options"
++ section.
++
++ traceoff_on_warning
++ [FTRACE] enable this option to disable tracing when a
++ warning is hit. This turns off "tracing_on". Tracing can
++ be enabled again by echoing '1' into the "tracing_on"
++ file located in /sys/kernel/debug/tracing/
++
++ This option is useful, as it disables the trace before
++ the WARNING dump is called, which prevents the trace to
++ be filled with content caused by the warning output.
++
++ This option can also be set at run time via the sysctl
++ option: kernel/traceoff_on_warning
++
++ transparent_hugepage=
++ [KNL]
++ Format: [always|madvise|never]
++ Can be used to control the default behavior of the system
++ with respect to transparent hugepages.
++ See Documentation/vm/transhuge.txt for more details.
++
++ tsc= Disable clocksource stability checks for TSC.
++ Format: <string>
++ [x86] reliable: mark tsc clocksource as reliable, this
++ disables clocksource verification at runtime, as well
++ as the stability checks done at bootup. Used to enable
++ high-resolution timer mode on older hardware, and in
++ virtualized environment.
++ [x86] noirqtime: Do not use TSC to do irq accounting.
++ Used to run time disable IRQ_TIME_ACCOUNTING on any
++ platforms where RDTSC is slow and this accounting
++ can add overhead.
++
++ turbografx.map[2|3]= [HW,JOY]
++ TurboGraFX parallel port interface
++ Format:
++ <port#>,<js1>,<js2>,<js3>,<js4>,<js5>,<js6>,<js7>
++ See also Documentation/input/joystick-parport.txt
++
++ udbg-immortal [PPC] When debugging early kernel crashes that
++ happen after console_init() and before a proper
++ console driver takes over, this boot options might
++ help "seeing" what's going on.
++
++ uhash_entries= [KNL,NET]
++ Set number of hash buckets for UDP/UDP-Lite connections
++
++ uhci-hcd.ignore_oc=
++ [USB] Ignore overcurrent events (default N).
++ Some badly-designed motherboards generate lots of
++ bogus events, for ports that aren't wired to
++ anything. Set this parameter to avoid log spamming.
++ Note that genuine overcurrent events won't be
++ reported either.
++
++ unknown_nmi_panic
++ [X86] Cause panic on unknown NMI.
++
++ usbcore.authorized_default=
++ [USB] Default USB device authorization:
++ (default -1 = authorized except for wireless USB,
++ 0 = not authorized, 1 = authorized)
++
++ usbcore.autosuspend=
++ [USB] The autosuspend time delay (in seconds) used
++ for newly-detected USB devices (default 2). This
++ is the time required before an idle device will be
++ autosuspended. Devices for which the delay is set
++ to a negative value won't be autosuspended at all.
++
++ usbcore.usbfs_snoop=
++ [USB] Set to log all usbfs traffic (default 0 = off).
++
++ usbcore.blinkenlights=
++ [USB] Set to cycle leds on hubs (default 0 = off).
++
++ usbcore.old_scheme_first=
++ [USB] Start with the old device initialization
++ scheme (default 0 = off).
++
++ usbcore.usbfs_memory_mb=
++ [USB] Memory limit (in MB) for buffers allocated by
++ usbfs (default = 16, 0 = max = 2047).
++
++ usbcore.use_both_schemes=
++ [USB] Try the other device initialization scheme
++ if the first one fails (default 1 = enabled).
++
++ usbcore.initial_descriptor_timeout=
++ [USB] Specifies timeout for the initial 64-byte
++ USB_REQ_GET_DESCRIPTOR request in milliseconds
++ (default 5000 = 5.0 seconds).
++
++ usbhid.mousepoll=
++ [USBHID] The interval which mice are to be polled at.
++
++ usb-storage.delay_use=
++ [UMS] The delay in seconds before a new device is
++ scanned for Logical Units (default 5).
++
++ usb-storage.quirks=
++ [UMS] A list of quirks entries to supplement or
++ override the built-in unusual_devs list. List
++ entries are separated by commas. Each entry has
++ the form VID:PID:Flags where VID and PID are Vendor
++ and Product ID values (4-digit hex numbers) and
++ Flags is a set of characters, each corresponding
++ to a common usb-storage quirk flag as follows:
++ a = SANE_SENSE (collect more than 18 bytes
++ of sense data);
++ b = BAD_SENSE (don't collect more than 18
++ bytes of sense data);
++ c = FIX_CAPACITY (decrease the reported
++ device capacity by one sector);
++ d = NO_READ_DISC_INFO (don't use
++ READ_DISC_INFO command);
++ e = NO_READ_CAPACITY_16 (don't use
++ READ_CAPACITY_16 command);
++ h = CAPACITY_HEURISTICS (decrease the
++ reported device capacity by one
++ sector if the number is odd);
++ i = IGNORE_DEVICE (don't bind to this
++ device);
++ l = NOT_LOCKABLE (don't try to lock and
++ unlock ejectable media);
++ m = MAX_SECTORS_64 (don't transfer more
++ than 64 sectors = 32 KB at a time);
++ n = INITIAL_READ10 (force a retry of the
++ initial READ(10) command);
++ o = CAPACITY_OK (accept the capacity
++ reported by the device);
++ p = WRITE_CACHE (the device cache is ON
++ by default);
++ r = IGNORE_RESIDUE (the device reports
++ bogus residue values);
++ s = SINGLE_LUN (the device has only one
++ Logical Unit);
++ w = NO_WP_DETECT (don't test whether the
++ medium is write-protected).
++ Example: quirks=0419:aaf5:rl,0421:0433:rc
++
++ user_debug= [KNL,ARM]
++ Format: <int>
++ See arch/arm/Kconfig.debug help text.
++ 1 - undefined instruction events
++ 2 - system calls
++ 4 - invalid data aborts
++ 8 - SIGSEGV faults
++ 16 - SIGBUS faults
++ Example: user_debug=31
++
++ userpte=
++ [X86] Flags controlling user PTE allocations.
++
++ nohigh = do not allocate PTE pages in
++ HIGHMEM regardless of setting
++ of CONFIG_HIGHPTE.
++
++ vdso= [X86,SH]
++ vdso=2: enable compat VDSO (default with COMPAT_VDSO)
++ vdso=1: enable VDSO (default)
++ vdso=0: disable VDSO mapping
++
++ vdso32= [X86]
++ vdso32=2: enable compat VDSO (default with COMPAT_VDSO)
++ vdso32=1: enable 32-bit VDSO (default)
++ vdso32=0: disable 32-bit VDSO mapping
++
++ vector= [IA-64,SMP]
++ vector=percpu: enable percpu vector domain
++
++ video= [FB] Frame buffer configuration
++ See Documentation/fb/modedb.txt.
++
++ video.brightness_switch_enabled= [0,1]
++ If set to 1, on receiving an ACPI notify event
++ generated by hotkey, video driver will adjust brightness
++ level and then send out the event to user space through
++ the allocated input device; If set to 0, video driver
++ will only send out the event without touching backlight
++ brightness level.
++ default: 1
++
++ virtio_mmio.device=
++ [VMMIO] Memory mapped virtio (platform) device.
++
++ <size>@<baseaddr>:<irq>[:<id>]
++ where:
++ <size> := size (can use standard suffixes
++ like K, M and G)
++ <baseaddr> := physical base address
++ <irq> := interrupt number (as passed to
++ request_irq())
++ <id> := (optional) platform device id
++ example:
++ virtio_mmio.device=1K@0x100b0000:48:7
++
++ Can be used multiple times for multiple devices.
++
++ vga= [BOOT,X86-32] Select a particular video mode
++ See Documentation/x86/boot.txt and
++ Documentation/svga.txt.
++ Use vga=ask for menu.
++ This is actually a boot loader parameter; the value is
++ passed to the kernel using a special protocol.
++
++ vmalloc=nn[KMG] [KNL,BOOT] Forces the vmalloc area to have an exact
++ size of <nn>. This can be used to increase the
++ minimum size (128MB on x86). It can also be used to
++ decrease the size and leave more room for directly
++ mapped kernel RAM.
++
++ vmhalt= [KNL,S390] Perform z/VM CP command after system halt.
++ Format: <command>
++
++ vmpanic= [KNL,S390] Perform z/VM CP command after kernel panic.
++ Format: <command>
++
++ vmpoff= [KNL,S390] Perform z/VM CP command after power off.
++ Format: <command>
++
++ vsyscall= [X86-64]
++ Controls the behavior of vsyscalls (i.e. calls to
++ fixed addresses of 0xffffffffff600x00 from legacy
++ code). Most statically-linked binaries and older
++ versions of glibc use these calls. Because these
++ functions are at fixed addresses, they make nice
++ targets for exploits that can control RIP.
++
++ emulate [default] Vsyscalls turn into traps and are
++ emulated reasonably safely.
++
++ native Vsyscalls are native syscall instructions.
++ This is a little bit faster than trapping
++ and makes a few dynamic recompilers work
++ better than they would in emulation mode.
++ It also makes exploits much easier to write.
++
++ none Vsyscalls don't work at all. This makes
++ them quite hard to use for exploits but
++ might break your system.
++
++ vt.color= [VT] Default text color.
++ Format: 0xYX, X = foreground, Y = background.
++ Default: 0x07 = light gray on black.
++
++ vt.cur_default= [VT] Default cursor shape.
++ Format: 0xCCBBAA, where AA, BB, and CC are the same as
++ the parameters of the <Esc>[?A;B;Cc escape sequence;
++ see VGA-softcursor.txt. Default: 2 = underline.
++
++ vt.default_blu= [VT]
++ Format: <blue0>,<blue1>,<blue2>,...,<blue15>
++ Change the default blue palette of the console.
++ This is a 16-member array composed of values
++ ranging from 0-255.
++
++ vt.default_grn= [VT]
++ Format: <green0>,<green1>,<green2>,...,<green15>
++ Change the default green palette of the console.
++ This is a 16-member array composed of values
++ ranging from 0-255.
++
++ vt.default_red= [VT]
++ Format: <red0>,<red1>,<red2>,...,<red15>
++ Change the default red palette of the console.
++ This is a 16-member array composed of values
++ ranging from 0-255.
++
++ vt.default_utf8=
++ [VT]
++ Format=<0|1>
++ Set system-wide default UTF-8 mode for all tty's.
++ Default is 1, i.e. UTF-8 mode is enabled for all
++ newly opened terminals.
++
++ vt.global_cursor_default=
++ [VT]
++ Format=<-1|0|1>
++ Set system-wide default for whether a cursor
++ is shown on new VTs. Default is -1,
++ i.e. cursors will be created by default unless
++ overridden by individual drivers. 0 will hide
++ cursors, 1 will display them.
++
++ vt.italic= [VT] Default color for italic text; 0-15.
++ Default: 2 = green.
++
++ vt.underline= [VT] Default color for underlined text; 0-15.
++ Default: 3 = cyan.
++
++ watchdog timers [HW,WDT] For information on watchdog timers,
++ see Documentation/watchdog/watchdog-parameters.txt
++ or other driver-specific files in the
++ Documentation/watchdog/ directory.
++
++ workqueue.disable_numa
++ By default, all work items queued to unbound
++ workqueues are affine to the NUMA nodes they're
++ issued on, which results in better behavior in
++ general. If NUMA affinity needs to be disabled for
++ whatever reason, this option can be used. Note
++ that this also can be controlled per-workqueue for
++ workqueues visible under /sys/bus/workqueue/.
++
++ workqueue.power_efficient
++ Per-cpu workqueues are generally preferred because
++ they show better performance thanks to cache
++ locality; unfortunately, per-cpu workqueues tend to
++ be more power hungry than unbound workqueues.
++
++ Enabling this makes the per-cpu workqueues which
++ were observed to contribute significantly to power
++ consumption unbound, leading to measurably lower
++ power usage at the cost of small performance
++ overhead.
++
++ The default value of this parameter is determined by
++ the config option CONFIG_WQ_POWER_EFFICIENT_DEFAULT.
++
++ x2apic_phys [X86-64,APIC] Use x2apic physical mode instead of
++ default x2apic cluster mode on platforms
++ supporting x2apic.
++
++ x86_intel_mid_timer= [X86-32,APBT]
++ Choose timer option for x86 Intel MID platform.
++ Two valid options are apbt timer only and lapic timer
++ plus one apbt timer for broadcast timer.
++ x86_intel_mid_timer=apbt_only | lapic_and_apbt
++
++ xen_emul_unplug= [HW,X86,XEN]
++ Unplug Xen emulated devices
++ Format: [unplug0,][unplug1]
++ ide-disks -- unplug primary master IDE devices
++ aux-ide-disks -- unplug non-primary-master IDE devices
++ nics -- unplug network devices
++ all -- unplug all emulated devices (NICs and IDE disks)
++ unnecessary -- unplugging emulated devices is
++ unnecessary even if the host did not respond to
++ the unplug protocol
++ never -- do not unplug even if version check succeeds
++
++ xen_nopvspin [X86,XEN]
++ Disables the ticketlock slowpath using Xen PV
++ optimizations.
++
++ xirc2ps_cs= [NET,PCMCIA]
++ Format:
++ <irq>,<irq_mask>,<io>,<full_duplex>,<do_sound>,<lockup_hack>[,<irq2>[,<irq3>[,<irq4>]]]
++
++______________________________________________________________________
++
++TODO:
++
++ Add more DRM drivers.
+diff -Nur linux-3.14.36/Documentation/networking/gianfar.txt linux-openelec/Documentation/networking/gianfar.txt
+--- linux-3.14.36/Documentation/networking/gianfar.txt 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/Documentation/networking/gianfar.txt 2015-05-06 12:05:42.000000000 -0500
+@@ -1,38 +1,8 @@
+ The Gianfar Ethernet Driver
+-Sysfs File description
+
+ Author: Andy Fleming <afleming@freescale.com>
+ Updated: 2005-07-28
+
+-SYSFS
+-
+-Several of the features of the gianfar driver are controlled
+-through sysfs files. These are:
+-
+-bd_stash:
+-To stash RX Buffer Descriptors in the L2, echo 'on' or '1' to
+-bd_stash, echo 'off' or '0' to disable
+-
+-rx_stash_len:
+-To stash the first n bytes of the packet in L2, echo the number
+-of bytes to buf_stash_len. echo 0 to disable.
+-
+-WARNING: You could really screw these up if you set them too low or high!
+-fifo_threshold:
+-To change the number of bytes the controller needs in the
+-fifo before it starts transmission, echo the number of bytes to
+-fifo_thresh. Range should be 0-511.
+-
+-fifo_starve:
+-When the FIFO has less than this many bytes during a transmit, it
+-enters starve mode, and increases the priority of TX memory
+-transactions. To change, echo the number of bytes to
+-fifo_starve. Range should be 0-511.
+-
+-fifo_starve_off:
+-Once in starve mode, the FIFO remains there until it has this
+-many bytes. To change, echo the number of bytes to
+-fifo_starve_off. Range should be 0-511.
+
+ CHECKSUM OFFLOADING
+
+diff -Nur linux-3.14.36/drivers/ata/acard-ahci.c linux-openelec/drivers/ata/acard-ahci.c
+--- linux-3.14.36/drivers/ata/acard-ahci.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/ata/acard-ahci.c 2015-05-06 12:05:42.000000000 -0500
+@@ -36,7 +36,6 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <linux/interrupt.h>
+diff -Nur linux-3.14.36/drivers/ata/ahci.c linux-openelec/drivers/ata/ahci.c
+--- linux-3.14.36/drivers/ata/ahci.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/ata/ahci.c 2015-07-24 18:03:29.216842002 -0500
+@@ -35,7 +35,6 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <linux/interrupt.h>
+@@ -610,6 +609,7 @@
+ unsigned long deadline)
+ {
+ struct ata_port *ap = link->ap;
++ struct ahci_host_priv *hpriv = ap->host->private_data;
+ bool online;
+ int rc;
+
+@@ -620,7 +620,7 @@
+ rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context),
+ deadline, &online, NULL);
+
+- ahci_start_engine(ap);
++ hpriv->start_engine(ap);
+
+ DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
+
+@@ -635,6 +635,7 @@
+ {
+ struct ata_port *ap = link->ap;
+ struct ahci_port_priv *pp = ap->private_data;
++ struct ahci_host_priv *hpriv = ap->host->private_data;
+ u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
+ struct ata_taskfile tf;
+ bool online;
+@@ -650,7 +651,7 @@
+ rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context),
+ deadline, &online, NULL);
+
+- ahci_start_engine(ap);
++ hpriv->start_engine(ap);
+
+ /* The pseudo configuration device on SIMG4726 attached to
+ * ASUS P5W-DH Deluxe doesn't send signature FIS after
+@@ -1146,6 +1147,17 @@
+ return pdev->bus->number == (val >> 8) && pdev->devfn == (val & 0xff);
+ }
+
++static bool ahci_broken_devslp(struct pci_dev *pdev)
++{
++ /* device with broken DEVSLP but still showing SDS capability */
++ static const struct pci_device_id ids[] = {
++ { PCI_VDEVICE(INTEL, 0x0f23)}, /* Valleyview SoC */
++ {}
++ };
++
++ return pci_match_id(ids, pdev);
++}
++
+ #ifdef CONFIG_ATA_ACPI
+ static void ahci_gtf_filter_workaround(struct ata_host *host)
+ {
+@@ -1397,6 +1409,10 @@
+
+ hpriv->mmio = pcim_iomap_table(pdev)[ahci_pci_bar];
+
++ /* must set flag prior to save config in order to take effect */
++ if (ahci_broken_devslp(pdev))
++ hpriv->flags |= AHCI_HFLAG_NO_DEVSLP;
++
+ /* save initial config */
+ ahci_pci_save_initial_config(pdev, hpriv);
+
+diff -Nur linux-3.14.36/drivers/ata/ahci.c.orig linux-openelec/drivers/ata/ahci.c.orig
+--- linux-3.14.36/drivers/ata/ahci.c.orig 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/ata/ahci.c.orig 2015-07-24 18:03:29.024842002 -0500
+@@ -0,0 +1,1533 @@
++/*
++ * ahci.c - AHCI SATA support
++ *
++ * Maintained by: Tejun Heo <tj@kernel.org>
++ * Please ALWAYS copy linux-ide@vger.kernel.org
++ * on emails.
++ *
++ * Copyright 2004-2005 Red Hat, Inc.
++ *
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2, or (at your option)
++ * any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; see the file COPYING. If not, write to
++ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
++ *
++ *
++ * libata documentation is available via 'make {ps|pdf}docs',
++ * as Documentation/DocBook/libata.*
++ *
++ * AHCI hardware documentation:
++ * http://www.intel.com/technology/serialata/pdf/rev1_0.pdf
++ * http://www.intel.com/technology/serialata/pdf/rev1_1.pdf
++ *
++ */
++
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/pci.h>
++#include <linux/blkdev.h>
++#include <linux/delay.h>
++#include <linux/interrupt.h>
++#include <linux/dma-mapping.h>
++#include <linux/device.h>
++#include <linux/dmi.h>
++#include <linux/gfp.h>
++#include <scsi/scsi_host.h>
++#include <scsi/scsi_cmnd.h>
++#include <linux/libata.h>
++#include "ahci.h"
++
++#define DRV_NAME "ahci"
++#define DRV_VERSION "3.0"
++
++enum {
++ AHCI_PCI_BAR_STA2X11 = 0,
++ AHCI_PCI_BAR_ENMOTUS = 2,
++ AHCI_PCI_BAR_STANDARD = 5,
++};
++
++enum board_ids {
++ /* board IDs by feature in alphabetical order */
++ board_ahci,
++ board_ahci_ign_iferr,
++ board_ahci_nomsi,
++ board_ahci_noncq,
++ board_ahci_nosntf,
++ board_ahci_yes_fbs,
++
++ /* board IDs for specific chipsets in alphabetical order */
++ board_ahci_mcp65,
++ board_ahci_mcp77,
++ board_ahci_mcp89,
++ board_ahci_mv,
++ board_ahci_sb600,
++ board_ahci_sb700, /* for SB700 and SB800 */
++ board_ahci_vt8251,
++
++ /* aliases */
++ board_ahci_mcp_linux = board_ahci_mcp65,
++ board_ahci_mcp67 = board_ahci_mcp65,
++ board_ahci_mcp73 = board_ahci_mcp65,
++ board_ahci_mcp79 = board_ahci_mcp77,
++};
++
++static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
++static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
++ unsigned long deadline);
++static void ahci_mcp89_apple_enable(struct pci_dev *pdev);
++static bool is_mcp89_apple(struct pci_dev *pdev);
++static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
++ unsigned long deadline);
++#ifdef CONFIG_PM
++static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg);
++static int ahci_pci_device_resume(struct pci_dev *pdev);
++#endif
++
++static struct scsi_host_template ahci_sht = {
++ AHCI_SHT("ahci"),
++};
++
++static struct ata_port_operations ahci_vt8251_ops = {
++ .inherits = &ahci_ops,
++ .hardreset = ahci_vt8251_hardreset,
++};
++
++static struct ata_port_operations ahci_p5wdh_ops = {
++ .inherits = &ahci_ops,
++ .hardreset = ahci_p5wdh_hardreset,
++};
++
++static const struct ata_port_info ahci_port_info[] = {
++ /* by features */
++ [board_ahci] = {
++ .flags = AHCI_FLAG_COMMON,
++ .pio_mask = ATA_PIO4,
++ .udma_mask = ATA_UDMA6,
++ .port_ops = &ahci_ops,
++ },
++ [board_ahci_ign_iferr] = {
++ AHCI_HFLAGS (AHCI_HFLAG_IGN_IRQ_IF_ERR),
++ .flags = AHCI_FLAG_COMMON,
++ .pio_mask = ATA_PIO4,
++ .udma_mask = ATA_UDMA6,
++ .port_ops = &ahci_ops,
++ },
++ [board_ahci_nomsi] = {
++ AHCI_HFLAGS (AHCI_HFLAG_NO_MSI),
++ .flags = AHCI_FLAG_COMMON,
++ .pio_mask = ATA_PIO4,
++ .udma_mask = ATA_UDMA6,
++ .port_ops = &ahci_ops,
++ },
++ [board_ahci_noncq] = {
++ AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ),
++ .flags = AHCI_FLAG_COMMON,
++ .pio_mask = ATA_PIO4,
++ .udma_mask = ATA_UDMA6,
++ .port_ops = &ahci_ops,
++ },
++ [board_ahci_nosntf] = {
++ AHCI_HFLAGS (AHCI_HFLAG_NO_SNTF),
++ .flags = AHCI_FLAG_COMMON,
++ .pio_mask = ATA_PIO4,
++ .udma_mask = ATA_UDMA6,
++ .port_ops = &ahci_ops,
++ },
++ [board_ahci_yes_fbs] = {
++ AHCI_HFLAGS (AHCI_HFLAG_YES_FBS),
++ .flags = AHCI_FLAG_COMMON,
++ .pio_mask = ATA_PIO4,
++ .udma_mask = ATA_UDMA6,
++ .port_ops = &ahci_ops,
++ },
++ /* by chipsets */
++ [board_ahci_mcp65] = {
++ AHCI_HFLAGS (AHCI_HFLAG_NO_FPDMA_AA | AHCI_HFLAG_NO_PMP |
++ AHCI_HFLAG_YES_NCQ),
++ .flags = AHCI_FLAG_COMMON | ATA_FLAG_NO_DIPM,
++ .pio_mask = ATA_PIO4,
++ .udma_mask = ATA_UDMA6,
++ .port_ops = &ahci_ops,
++ },
++ [board_ahci_mcp77] = {
++ AHCI_HFLAGS (AHCI_HFLAG_NO_FPDMA_AA | AHCI_HFLAG_NO_PMP),
++ .flags = AHCI_FLAG_COMMON,
++ .pio_mask = ATA_PIO4,
++ .udma_mask = ATA_UDMA6,
++ .port_ops = &ahci_ops,
++ },
++ [board_ahci_mcp89] = {
++ AHCI_HFLAGS (AHCI_HFLAG_NO_FPDMA_AA),
++ .flags = AHCI_FLAG_COMMON,
++ .pio_mask = ATA_PIO4,
++ .udma_mask = ATA_UDMA6,
++ .port_ops = &ahci_ops,
++ },
++ [board_ahci_mv] = {
++ AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_MSI |
++ AHCI_HFLAG_MV_PATA | AHCI_HFLAG_NO_PMP),
++ .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA,
++ .pio_mask = ATA_PIO4,
++ .udma_mask = ATA_UDMA6,
++ .port_ops = &ahci_ops,
++ },
++ [board_ahci_sb600] = {
++ AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL |
++ AHCI_HFLAG_NO_MSI | AHCI_HFLAG_SECT255 |
++ AHCI_HFLAG_32BIT_ONLY),
++ .flags = AHCI_FLAG_COMMON,
++ .pio_mask = ATA_PIO4,
++ .udma_mask = ATA_UDMA6,
++ .port_ops = &ahci_pmp_retry_srst_ops,
++ },
++ [board_ahci_sb700] = { /* for SB700 and SB800 */
++ AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL),
++ .flags = AHCI_FLAG_COMMON,
++ .pio_mask = ATA_PIO4,
++ .udma_mask = ATA_UDMA6,
++ .port_ops = &ahci_pmp_retry_srst_ops,
++ },
++ [board_ahci_vt8251] = {
++ AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_PMP),
++ .flags = AHCI_FLAG_COMMON,
++ .pio_mask = ATA_PIO4,
++ .udma_mask = ATA_UDMA6,
++ .port_ops = &ahci_vt8251_ops,
++ },
++};
++
++static const struct pci_device_id ahci_pci_tbl[] = {
++ /* Intel */
++ { PCI_VDEVICE(INTEL, 0x2652), board_ahci }, /* ICH6 */
++ { PCI_VDEVICE(INTEL, 0x2653), board_ahci }, /* ICH6M */
++ { PCI_VDEVICE(INTEL, 0x27c1), board_ahci }, /* ICH7 */
++ { PCI_VDEVICE(INTEL, 0x27c5), board_ahci }, /* ICH7M */
++ { PCI_VDEVICE(INTEL, 0x27c3), board_ahci }, /* ICH7R */
++ { PCI_VDEVICE(AL, 0x5288), board_ahci_ign_iferr }, /* ULi M5288 */
++ { PCI_VDEVICE(INTEL, 0x2681), board_ahci }, /* ESB2 */
++ { PCI_VDEVICE(INTEL, 0x2682), board_ahci }, /* ESB2 */
++ { PCI_VDEVICE(INTEL, 0x2683), board_ahci }, /* ESB2 */
++ { PCI_VDEVICE(INTEL, 0x27c6), board_ahci }, /* ICH7-M DH */
++ { PCI_VDEVICE(INTEL, 0x2821), board_ahci }, /* ICH8 */
++ { PCI_VDEVICE(INTEL, 0x2822), board_ahci_nosntf }, /* ICH8 */
++ { PCI_VDEVICE(INTEL, 0x2824), board_ahci }, /* ICH8 */
++ { PCI_VDEVICE(INTEL, 0x2829), board_ahci }, /* ICH8M */
++ { PCI_VDEVICE(INTEL, 0x282a), board_ahci }, /* ICH8M */
++ { PCI_VDEVICE(INTEL, 0x2922), board_ahci }, /* ICH9 */
++ { PCI_VDEVICE(INTEL, 0x2923), board_ahci }, /* ICH9 */
++ { PCI_VDEVICE(INTEL, 0x2924), board_ahci }, /* ICH9 */
++ { PCI_VDEVICE(INTEL, 0x2925), board_ahci }, /* ICH9 */
++ { PCI_VDEVICE(INTEL, 0x2927), board_ahci }, /* ICH9 */
++ { PCI_VDEVICE(INTEL, 0x2929), board_ahci }, /* ICH9M */
++ { PCI_VDEVICE(INTEL, 0x292a), board_ahci }, /* ICH9M */
++ { PCI_VDEVICE(INTEL, 0x292b), board_ahci }, /* ICH9M */
++ { PCI_VDEVICE(INTEL, 0x292c), board_ahci }, /* ICH9M */
++ { PCI_VDEVICE(INTEL, 0x292f), board_ahci }, /* ICH9M */
++ { PCI_VDEVICE(INTEL, 0x294d), board_ahci }, /* ICH9 */
++ { PCI_VDEVICE(INTEL, 0x294e), board_ahci }, /* ICH9M */
++ { PCI_VDEVICE(INTEL, 0x502a), board_ahci }, /* Tolapai */
++ { PCI_VDEVICE(INTEL, 0x502b), board_ahci }, /* Tolapai */
++ { PCI_VDEVICE(INTEL, 0x3a05), board_ahci }, /* ICH10 */
++ { PCI_VDEVICE(INTEL, 0x3a22), board_ahci }, /* ICH10 */
++ { PCI_VDEVICE(INTEL, 0x3a25), board_ahci }, /* ICH10 */
++ { PCI_VDEVICE(INTEL, 0x3b22), board_ahci }, /* PCH AHCI */
++ { PCI_VDEVICE(INTEL, 0x3b23), board_ahci }, /* PCH AHCI */
++ { PCI_VDEVICE(INTEL, 0x3b24), board_ahci }, /* PCH RAID */
++ { PCI_VDEVICE(INTEL, 0x3b25), board_ahci }, /* PCH RAID */
++ { PCI_VDEVICE(INTEL, 0x3b29), board_ahci }, /* PCH AHCI */
++ { PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */
++ { PCI_VDEVICE(INTEL, 0x3b2c), board_ahci }, /* PCH RAID */
++ { PCI_VDEVICE(INTEL, 0x3b2f), board_ahci }, /* PCH AHCI */
++ { PCI_VDEVICE(INTEL, 0x1c02), board_ahci }, /* CPT AHCI */
++ { PCI_VDEVICE(INTEL, 0x1c03), board_ahci }, /* CPT AHCI */
++ { PCI_VDEVICE(INTEL, 0x1c04), board_ahci }, /* CPT RAID */
++ { PCI_VDEVICE(INTEL, 0x1c05), board_ahci }, /* CPT RAID */
++ { PCI_VDEVICE(INTEL, 0x1c06), board_ahci }, /* CPT RAID */
++ { PCI_VDEVICE(INTEL, 0x1c07), board_ahci }, /* CPT RAID */
++ { PCI_VDEVICE(INTEL, 0x1d02), board_ahci }, /* PBG AHCI */
++ { PCI_VDEVICE(INTEL, 0x1d04), board_ahci }, /* PBG RAID */
++ { PCI_VDEVICE(INTEL, 0x1d06), board_ahci }, /* PBG RAID */
++ { PCI_VDEVICE(INTEL, 0x2826), board_ahci }, /* PBG RAID */
++ { PCI_VDEVICE(INTEL, 0x2323), board_ahci }, /* DH89xxCC AHCI */
++ { PCI_VDEVICE(INTEL, 0x1e02), board_ahci }, /* Panther Point AHCI */
++ { PCI_VDEVICE(INTEL, 0x1e03), board_ahci }, /* Panther Point AHCI */
++ { PCI_VDEVICE(INTEL, 0x1e04), board_ahci }, /* Panther Point RAID */
++ { PCI_VDEVICE(INTEL, 0x1e05), board_ahci }, /* Panther Point RAID */
++ { PCI_VDEVICE(INTEL, 0x1e06), board_ahci }, /* Panther Point RAID */
++ { PCI_VDEVICE(INTEL, 0x1e07), board_ahci }, /* Panther Point RAID */
++ { PCI_VDEVICE(INTEL, 0x1e0e), board_ahci }, /* Panther Point RAID */
++ { PCI_VDEVICE(INTEL, 0x8c02), board_ahci }, /* Lynx Point AHCI */
++ { PCI_VDEVICE(INTEL, 0x8c03), board_ahci }, /* Lynx Point AHCI */
++ { PCI_VDEVICE(INTEL, 0x8c04), board_ahci }, /* Lynx Point RAID */
++ { PCI_VDEVICE(INTEL, 0x8c05), board_ahci }, /* Lynx Point RAID */
++ { PCI_VDEVICE(INTEL, 0x8c06), board_ahci }, /* Lynx Point RAID */
++ { PCI_VDEVICE(INTEL, 0x8c07), board_ahci }, /* Lynx Point RAID */
++ { PCI_VDEVICE(INTEL, 0x8c0e), board_ahci }, /* Lynx Point RAID */
++ { PCI_VDEVICE(INTEL, 0x8c0f), board_ahci }, /* Lynx Point RAID */
++ { PCI_VDEVICE(INTEL, 0x9c02), board_ahci }, /* Lynx Point-LP AHCI */
++ { PCI_VDEVICE(INTEL, 0x9c03), board_ahci }, /* Lynx Point-LP AHCI */
++ { PCI_VDEVICE(INTEL, 0x9c04), board_ahci }, /* Lynx Point-LP RAID */
++ { PCI_VDEVICE(INTEL, 0x9c05), board_ahci }, /* Lynx Point-LP RAID */
++ { PCI_VDEVICE(INTEL, 0x9c06), board_ahci }, /* Lynx Point-LP RAID */
++ { PCI_VDEVICE(INTEL, 0x9c07), board_ahci }, /* Lynx Point-LP RAID */
++ { PCI_VDEVICE(INTEL, 0x9c0e), board_ahci }, /* Lynx Point-LP RAID */
++ { PCI_VDEVICE(INTEL, 0x9c0f), board_ahci }, /* Lynx Point-LP RAID */
++ { PCI_VDEVICE(INTEL, 0x1f22), board_ahci }, /* Avoton AHCI */
++ { PCI_VDEVICE(INTEL, 0x1f23), board_ahci }, /* Avoton AHCI */
++ { PCI_VDEVICE(INTEL, 0x1f24), board_ahci }, /* Avoton RAID */
++ { PCI_VDEVICE(INTEL, 0x1f25), board_ahci }, /* Avoton RAID */
++ { PCI_VDEVICE(INTEL, 0x1f26), board_ahci }, /* Avoton RAID */
++ { PCI_VDEVICE(INTEL, 0x1f27), board_ahci }, /* Avoton RAID */
++ { PCI_VDEVICE(INTEL, 0x1f2e), board_ahci }, /* Avoton RAID */
++ { PCI_VDEVICE(INTEL, 0x1f2f), board_ahci }, /* Avoton RAID */
++ { PCI_VDEVICE(INTEL, 0x1f32), board_ahci }, /* Avoton AHCI */
++ { PCI_VDEVICE(INTEL, 0x1f33), board_ahci }, /* Avoton AHCI */
++ { PCI_VDEVICE(INTEL, 0x1f34), board_ahci }, /* Avoton RAID */
++ { PCI_VDEVICE(INTEL, 0x1f35), board_ahci }, /* Avoton RAID */
++ { PCI_VDEVICE(INTEL, 0x1f36), board_ahci }, /* Avoton RAID */
++ { PCI_VDEVICE(INTEL, 0x1f37), board_ahci }, /* Avoton RAID */
++ { PCI_VDEVICE(INTEL, 0x1f3e), board_ahci }, /* Avoton RAID */
++ { PCI_VDEVICE(INTEL, 0x1f3f), board_ahci }, /* Avoton RAID */
++ { PCI_VDEVICE(INTEL, 0x2823), board_ahci }, /* Wellsburg RAID */
++ { PCI_VDEVICE(INTEL, 0x2827), board_ahci }, /* Wellsburg RAID */
++ { PCI_VDEVICE(INTEL, 0x8d02), board_ahci }, /* Wellsburg AHCI */
++ { PCI_VDEVICE(INTEL, 0x8d04), board_ahci }, /* Wellsburg RAID */
++ { PCI_VDEVICE(INTEL, 0x8d06), board_ahci }, /* Wellsburg RAID */
++ { PCI_VDEVICE(INTEL, 0x8d0e), board_ahci }, /* Wellsburg RAID */
++ { PCI_VDEVICE(INTEL, 0x8d62), board_ahci }, /* Wellsburg AHCI */
++ { PCI_VDEVICE(INTEL, 0x8d64), board_ahci }, /* Wellsburg RAID */
++ { PCI_VDEVICE(INTEL, 0x8d66), board_ahci }, /* Wellsburg RAID */
++ { PCI_VDEVICE(INTEL, 0x8d6e), board_ahci }, /* Wellsburg RAID */
++ { PCI_VDEVICE(INTEL, 0x23a3), board_ahci }, /* Coleto Creek AHCI */
++ { PCI_VDEVICE(INTEL, 0x9c83), board_ahci }, /* Wildcat Point-LP AHCI */
++ { PCI_VDEVICE(INTEL, 0x9c85), board_ahci }, /* Wildcat Point-LP RAID */
++ { PCI_VDEVICE(INTEL, 0x9c87), board_ahci }, /* Wildcat Point-LP RAID */
++ { PCI_VDEVICE(INTEL, 0x9c8f), board_ahci }, /* Wildcat Point-LP RAID */
++ { PCI_VDEVICE(INTEL, 0x8c82), board_ahci }, /* 9 Series AHCI */
++ { PCI_VDEVICE(INTEL, 0x8c83), board_ahci }, /* 9 Series AHCI */
++ { PCI_VDEVICE(INTEL, 0x8c84), board_ahci }, /* 9 Series RAID */
++ { PCI_VDEVICE(INTEL, 0x8c85), board_ahci }, /* 9 Series RAID */
++ { PCI_VDEVICE(INTEL, 0x8c86), board_ahci }, /* 9 Series RAID */
++ { PCI_VDEVICE(INTEL, 0x8c87), board_ahci }, /* 9 Series RAID */
++ { PCI_VDEVICE(INTEL, 0x8c8e), board_ahci }, /* 9 Series RAID */
++ { PCI_VDEVICE(INTEL, 0x8c8f), board_ahci }, /* 9 Series RAID */
++ { PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H AHCI */
++ { PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H RAID */
++ { PCI_VDEVICE(INTEL, 0xa105), board_ahci }, /* Sunrise Point-H RAID */
++ { PCI_VDEVICE(INTEL, 0xa107), board_ahci }, /* Sunrise Point-H RAID */
++ { PCI_VDEVICE(INTEL, 0xa10f), board_ahci }, /* Sunrise Point-H RAID */
++
++ /* JMicron 360/1/3/5/6, match class to avoid IDE function */
++ { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
++ PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci_ign_iferr },
++ /* JMicron 362B and 362C have an AHCI function with IDE class code */
++ { PCI_VDEVICE(JMICRON, 0x2362), board_ahci_ign_iferr },
++ { PCI_VDEVICE(JMICRON, 0x236f), board_ahci_ign_iferr },
++
++ /* ATI */
++ { PCI_VDEVICE(ATI, 0x4380), board_ahci_sb600 }, /* ATI SB600 */
++ { PCI_VDEVICE(ATI, 0x4390), board_ahci_sb700 }, /* ATI SB700/800 */
++ { PCI_VDEVICE(ATI, 0x4391), board_ahci_sb700 }, /* ATI SB700/800 */
++ { PCI_VDEVICE(ATI, 0x4392), board_ahci_sb700 }, /* ATI SB700/800 */
++ { PCI_VDEVICE(ATI, 0x4393), board_ahci_sb700 }, /* ATI SB700/800 */
++ { PCI_VDEVICE(ATI, 0x4394), board_ahci_sb700 }, /* ATI SB700/800 */
++ { PCI_VDEVICE(ATI, 0x4395), board_ahci_sb700 }, /* ATI SB700/800 */
++
++ /* AMD */
++ { PCI_VDEVICE(AMD, 0x7800), board_ahci }, /* AMD Hudson-2 */
++ { PCI_VDEVICE(AMD, 0x7900), board_ahci }, /* AMD CZ */
++ /* AMD is using RAID class only for ahci controllers */
++ { PCI_VENDOR_ID_AMD, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
++ PCI_CLASS_STORAGE_RAID << 8, 0xffffff, board_ahci },
++
++ /* VIA */
++ { PCI_VDEVICE(VIA, 0x3349), board_ahci_vt8251 }, /* VIA VT8251 */
++ { PCI_VDEVICE(VIA, 0x6287), board_ahci_vt8251 }, /* VIA VT8251 */
++
++ /* NVIDIA */
++ { PCI_VDEVICE(NVIDIA, 0x044c), board_ahci_mcp65 }, /* MCP65 */
++ { PCI_VDEVICE(NVIDIA, 0x044d), board_ahci_mcp65 }, /* MCP65 */
++ { PCI_VDEVICE(NVIDIA, 0x044e), board_ahci_mcp65 }, /* MCP65 */
++ { PCI_VDEVICE(NVIDIA, 0x044f), board_ahci_mcp65 }, /* MCP65 */
++ { PCI_VDEVICE(NVIDIA, 0x045c), board_ahci_mcp65 }, /* MCP65 */
++ { PCI_VDEVICE(NVIDIA, 0x045d), board_ahci_mcp65 }, /* MCP65 */
++ { PCI_VDEVICE(NVIDIA, 0x045e), board_ahci_mcp65 }, /* MCP65 */
++ { PCI_VDEVICE(NVIDIA, 0x045f), board_ahci_mcp65 }, /* MCP65 */
++ { PCI_VDEVICE(NVIDIA, 0x0550), board_ahci_mcp67 }, /* MCP67 */
++ { PCI_VDEVICE(NVIDIA, 0x0551), board_ahci_mcp67 }, /* MCP67 */
++ { PCI_VDEVICE(NVIDIA, 0x0552), board_ahci_mcp67 }, /* MCP67 */
++ { PCI_VDEVICE(NVIDIA, 0x0553), board_ahci_mcp67 }, /* MCP67 */
++ { PCI_VDEVICE(NVIDIA, 0x0554), board_ahci_mcp67 }, /* MCP67 */
++ { PCI_VDEVICE(NVIDIA, 0x0555), board_ahci_mcp67 }, /* MCP67 */
++ { PCI_VDEVICE(NVIDIA, 0x0556), board_ahci_mcp67 }, /* MCP67 */
++ { PCI_VDEVICE(NVIDIA, 0x0557), board_ahci_mcp67 }, /* MCP67 */
++ { PCI_VDEVICE(NVIDIA, 0x0558), board_ahci_mcp67 }, /* MCP67 */
++ { PCI_VDEVICE(NVIDIA, 0x0559), board_ahci_mcp67 }, /* MCP67 */
++ { PCI_VDEVICE(NVIDIA, 0x055a), board_ahci_mcp67 }, /* MCP67 */
++ { PCI_VDEVICE(NVIDIA, 0x055b), board_ahci_mcp67 }, /* MCP67 */
++ { PCI_VDEVICE(NVIDIA, 0x0580), board_ahci_mcp_linux }, /* Linux ID */
++ { PCI_VDEVICE(NVIDIA, 0x0581), board_ahci_mcp_linux }, /* Linux ID */
++ { PCI_VDEVICE(NVIDIA, 0x0582), board_ahci_mcp_linux }, /* Linux ID */
++ { PCI_VDEVICE(NVIDIA, 0x0583), board_ahci_mcp_linux }, /* Linux ID */
++ { PCI_VDEVICE(NVIDIA, 0x0584), board_ahci_mcp_linux }, /* Linux ID */
++ { PCI_VDEVICE(NVIDIA, 0x0585), board_ahci_mcp_linux }, /* Linux ID */
++ { PCI_VDEVICE(NVIDIA, 0x0586), board_ahci_mcp_linux }, /* Linux ID */
++ { PCI_VDEVICE(NVIDIA, 0x0587), board_ahci_mcp_linux }, /* Linux ID */
++ { PCI_VDEVICE(NVIDIA, 0x0588), board_ahci_mcp_linux }, /* Linux ID */
++ { PCI_VDEVICE(NVIDIA, 0x0589), board_ahci_mcp_linux }, /* Linux ID */
++ { PCI_VDEVICE(NVIDIA, 0x058a), board_ahci_mcp_linux }, /* Linux ID */
++ { PCI_VDEVICE(NVIDIA, 0x058b), board_ahci_mcp_linux }, /* Linux ID */
++ { PCI_VDEVICE(NVIDIA, 0x058c), board_ahci_mcp_linux }, /* Linux ID */
++ { PCI_VDEVICE(NVIDIA, 0x058d), board_ahci_mcp_linux }, /* Linux ID */
++ { PCI_VDEVICE(NVIDIA, 0x058e), board_ahci_mcp_linux }, /* Linux ID */
++ { PCI_VDEVICE(NVIDIA, 0x058f), board_ahci_mcp_linux }, /* Linux ID */
++ { PCI_VDEVICE(NVIDIA, 0x07f0), board_ahci_mcp73 }, /* MCP73 */
++ { PCI_VDEVICE(NVIDIA, 0x07f1), board_ahci_mcp73 }, /* MCP73 */
++ { PCI_VDEVICE(NVIDIA, 0x07f2), board_ahci_mcp73 }, /* MCP73 */
++ { PCI_VDEVICE(NVIDIA, 0x07f3), board_ahci_mcp73 }, /* MCP73 */
++ { PCI_VDEVICE(NVIDIA, 0x07f4), board_ahci_mcp73 }, /* MCP73 */
++ { PCI_VDEVICE(NVIDIA, 0x07f5), board_ahci_mcp73 }, /* MCP73 */
++ { PCI_VDEVICE(NVIDIA, 0x07f6), board_ahci_mcp73 }, /* MCP73 */
++ { PCI_VDEVICE(NVIDIA, 0x07f7), board_ahci_mcp73 }, /* MCP73 */
++ { PCI_VDEVICE(NVIDIA, 0x07f8), board_ahci_mcp73 }, /* MCP73 */
++ { PCI_VDEVICE(NVIDIA, 0x07f9), board_ahci_mcp73 }, /* MCP73 */
++ { PCI_VDEVICE(NVIDIA, 0x07fa), board_ahci_mcp73 }, /* MCP73 */
++ { PCI_VDEVICE(NVIDIA, 0x07fb), board_ahci_mcp73 }, /* MCP73 */
++ { PCI_VDEVICE(NVIDIA, 0x0ad0), board_ahci_mcp77 }, /* MCP77 */
++ { PCI_VDEVICE(NVIDIA, 0x0ad1), board_ahci_mcp77 }, /* MCP77 */
++ { PCI_VDEVICE(NVIDIA, 0x0ad2), board_ahci_mcp77 }, /* MCP77 */
++ { PCI_VDEVICE(NVIDIA, 0x0ad3), board_ahci_mcp77 }, /* MCP77 */
++ { PCI_VDEVICE(NVIDIA, 0x0ad4), board_ahci_mcp77 }, /* MCP77 */
++ { PCI_VDEVICE(NVIDIA, 0x0ad5), board_ahci_mcp77 }, /* MCP77 */
++ { PCI_VDEVICE(NVIDIA, 0x0ad6), board_ahci_mcp77 }, /* MCP77 */
++ { PCI_VDEVICE(NVIDIA, 0x0ad7), board_ahci_mcp77 }, /* MCP77 */
++ { PCI_VDEVICE(NVIDIA, 0x0ad8), board_ahci_mcp77 }, /* MCP77 */
++ { PCI_VDEVICE(NVIDIA, 0x0ad9), board_ahci_mcp77 }, /* MCP77 */
++ { PCI_VDEVICE(NVIDIA, 0x0ada), board_ahci_mcp77 }, /* MCP77 */
++ { PCI_VDEVICE(NVIDIA, 0x0adb), board_ahci_mcp77 }, /* MCP77 */
++ { PCI_VDEVICE(NVIDIA, 0x0ab4), board_ahci_mcp79 }, /* MCP79 */
++ { PCI_VDEVICE(NVIDIA, 0x0ab5), board_ahci_mcp79 }, /* MCP79 */
++ { PCI_VDEVICE(NVIDIA, 0x0ab6), board_ahci_mcp79 }, /* MCP79 */
++ { PCI_VDEVICE(NVIDIA, 0x0ab7), board_ahci_mcp79 }, /* MCP79 */
++ { PCI_VDEVICE(NVIDIA, 0x0ab8), board_ahci_mcp79 }, /* MCP79 */
++ { PCI_VDEVICE(NVIDIA, 0x0ab9), board_ahci_mcp79 }, /* MCP79 */
++ { PCI_VDEVICE(NVIDIA, 0x0aba), board_ahci_mcp79 }, /* MCP79 */
++ { PCI_VDEVICE(NVIDIA, 0x0abb), board_ahci_mcp79 }, /* MCP79 */
++ { PCI_VDEVICE(NVIDIA, 0x0abc), board_ahci_mcp79 }, /* MCP79 */
++ { PCI_VDEVICE(NVIDIA, 0x0abd), board_ahci_mcp79 }, /* MCP79 */
++ { PCI_VDEVICE(NVIDIA, 0x0abe), board_ahci_mcp79 }, /* MCP79 */
++ { PCI_VDEVICE(NVIDIA, 0x0abf), board_ahci_mcp79 }, /* MCP79 */
++ { PCI_VDEVICE(NVIDIA, 0x0d84), board_ahci_mcp89 }, /* MCP89 */
++ { PCI_VDEVICE(NVIDIA, 0x0d85), board_ahci_mcp89 }, /* MCP89 */
++ { PCI_VDEVICE(NVIDIA, 0x0d86), board_ahci_mcp89 }, /* MCP89 */
++ { PCI_VDEVICE(NVIDIA, 0x0d87), board_ahci_mcp89 }, /* MCP89 */
++ { PCI_VDEVICE(NVIDIA, 0x0d88), board_ahci_mcp89 }, /* MCP89 */
++ { PCI_VDEVICE(NVIDIA, 0x0d89), board_ahci_mcp89 }, /* MCP89 */
++ { PCI_VDEVICE(NVIDIA, 0x0d8a), board_ahci_mcp89 }, /* MCP89 */
++ { PCI_VDEVICE(NVIDIA, 0x0d8b), board_ahci_mcp89 }, /* MCP89 */
++ { PCI_VDEVICE(NVIDIA, 0x0d8c), board_ahci_mcp89 }, /* MCP89 */
++ { PCI_VDEVICE(NVIDIA, 0x0d8d), board_ahci_mcp89 }, /* MCP89 */
++ { PCI_VDEVICE(NVIDIA, 0x0d8e), board_ahci_mcp89 }, /* MCP89 */
++ { PCI_VDEVICE(NVIDIA, 0x0d8f), board_ahci_mcp89 }, /* MCP89 */
++
++ /* SiS */
++ { PCI_VDEVICE(SI, 0x1184), board_ahci }, /* SiS 966 */
++ { PCI_VDEVICE(SI, 0x1185), board_ahci }, /* SiS 968 */
++ { PCI_VDEVICE(SI, 0x0186), board_ahci }, /* SiS 968 */
++
++ /* ST Microelectronics */
++ { PCI_VDEVICE(STMICRO, 0xCC06), board_ahci }, /* ST ConneXt */
++
++ /* Marvell */
++ { PCI_VDEVICE(MARVELL, 0x6145), board_ahci_mv }, /* 6145 */
++ { PCI_VDEVICE(MARVELL, 0x6121), board_ahci_mv }, /* 6121 */
++ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9123),
++ .class = PCI_CLASS_STORAGE_SATA_AHCI,
++ .class_mask = 0xffffff,
++ .driver_data = board_ahci_yes_fbs }, /* 88se9128 */
++ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9125),
++ .driver_data = board_ahci_yes_fbs }, /* 88se9125 */
++ { PCI_DEVICE_SUB(PCI_VENDOR_ID_MARVELL_EXT, 0x9178,
++ PCI_VENDOR_ID_MARVELL_EXT, 0x9170),
++ .driver_data = board_ahci_yes_fbs }, /* 88se9170 */
++ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x917a),
++ .driver_data = board_ahci_yes_fbs }, /* 88se9172 */
++ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9172),
++ .driver_data = board_ahci_yes_fbs }, /* 88se9182 */
++ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9182),
++ .driver_data = board_ahci_yes_fbs }, /* 88se9172 */
++ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9192),
++ .driver_data = board_ahci_yes_fbs }, /* 88se9172 on some Gigabyte */
++ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x91a0),
++ .driver_data = board_ahci_yes_fbs },
++ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x91a3),
++ .driver_data = board_ahci_yes_fbs },
++ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9230),
++ .driver_data = board_ahci_yes_fbs },
++ { PCI_DEVICE(PCI_VENDOR_ID_TTI, 0x0642),
++ .driver_data = board_ahci_yes_fbs },
++
++ /* Promise */
++ { PCI_VDEVICE(PROMISE, 0x3f20), board_ahci }, /* PDC42819 */
++ { PCI_VDEVICE(PROMISE, 0x3781), board_ahci }, /* FastTrak TX8660 ahci-mode */
++
++ /* Asmedia */
++ { PCI_VDEVICE(ASMEDIA, 0x0601), board_ahci }, /* ASM1060 */
++ { PCI_VDEVICE(ASMEDIA, 0x0602), board_ahci }, /* ASM1060 */
++ { PCI_VDEVICE(ASMEDIA, 0x0611), board_ahci }, /* ASM1061 */
++ { PCI_VDEVICE(ASMEDIA, 0x0612), board_ahci }, /* ASM1062 */
++
++ /*
++ * Samsung SSDs found on some macbooks. NCQ times out if MSI is
++ * enabled. https://bugzilla.kernel.org/show_bug.cgi?id=60731
++ */
++ { PCI_VDEVICE(SAMSUNG, 0x1600), board_ahci_nomsi },
++
++ /* Enmotus */
++ { PCI_DEVICE(0x1c44, 0x8000), board_ahci },
++
++ /* Generic, PCI class code for AHCI */
++ { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
++ PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci },
++
++ { } /* terminate list */
++};
++
++
++static struct pci_driver ahci_pci_driver = {
++ .name = DRV_NAME,
++ .id_table = ahci_pci_tbl,
++ .probe = ahci_init_one,
++ .remove = ata_pci_remove_one,
++#ifdef CONFIG_PM
++ .suspend = ahci_pci_device_suspend,
++ .resume = ahci_pci_device_resume,
++#endif
++};
++
++#if defined(CONFIG_PATA_MARVELL) || defined(CONFIG_PATA_MARVELL_MODULE)
++static int marvell_enable;
++#else
++static int marvell_enable = 1;
++#endif
++module_param(marvell_enable, int, 0644);
++MODULE_PARM_DESC(marvell_enable, "Marvell SATA via AHCI (1 = enabled)");
++
++
++static void ahci_pci_save_initial_config(struct pci_dev *pdev,
++ struct ahci_host_priv *hpriv)
++{
++ unsigned int force_port_map = 0;
++ unsigned int mask_port_map = 0;
++
++ if (pdev->vendor == PCI_VENDOR_ID_JMICRON && pdev->device == 0x2361) {
++ dev_info(&pdev->dev, "JMB361 has only one port\n");
++ force_port_map = 1;
++ }
++
++ /*
++ * Temporary Marvell 6145 hack: PATA port presence
++ * is asserted through the standard AHCI port
++ * presence register, as bit 4 (counting from 0)
++ */
++ if (hpriv->flags & AHCI_HFLAG_MV_PATA) {
++ if (pdev->device == 0x6121)
++ mask_port_map = 0x3;
++ else
++ mask_port_map = 0xf;
++ dev_info(&pdev->dev,
++ "Disabling your PATA port. Use the boot option 'ahci.marvell_enable=0' to avoid this.\n");
++ }
++
++ ahci_save_initial_config(&pdev->dev, hpriv, force_port_map,
++ mask_port_map);
++}
++
++static int ahci_pci_reset_controller(struct ata_host *host)
++{
++ struct pci_dev *pdev = to_pci_dev(host->dev);
++
++ ahci_reset_controller(host);
++
++ if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
++ struct ahci_host_priv *hpriv = host->private_data;
++ u16 tmp16;
++
++ /* configure PCS */
++ pci_read_config_word(pdev, 0x92, &tmp16);
++ if ((tmp16 & hpriv->port_map) != hpriv->port_map) {
++ tmp16 |= hpriv->port_map;
++ pci_write_config_word(pdev, 0x92, tmp16);
++ }
++ }
++
++ return 0;
++}
++
++static void ahci_pci_init_controller(struct ata_host *host)
++{
++ struct ahci_host_priv *hpriv = host->private_data;
++ struct pci_dev *pdev = to_pci_dev(host->dev);
++ void __iomem *port_mmio;
++ u32 tmp;
++ int mv;
++
++ if (hpriv->flags & AHCI_HFLAG_MV_PATA) {
++ if (pdev->device == 0x6121)
++ mv = 2;
++ else
++ mv = 4;
++ port_mmio = __ahci_port_base(host, mv);
++
++ writel(0, port_mmio + PORT_IRQ_MASK);
++
++ /* clear port IRQ */
++ tmp = readl(port_mmio + PORT_IRQ_STAT);
++ VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp);
++ if (tmp)
++ writel(tmp, port_mmio + PORT_IRQ_STAT);
++ }
++
++ ahci_init_controller(host);
++}
++
++static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
++ unsigned long deadline)
++{
++ struct ata_port *ap = link->ap;
++ struct ahci_host_priv *hpriv = ap->host->private_data;
++ bool online;
++ int rc;
++
++ DPRINTK("ENTER\n");
++
++ ahci_stop_engine(ap);
++
++ rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context),
++ deadline, &online, NULL);
++
++ hpriv->start_engine(ap);
++
++ DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
++
++ /* vt8251 doesn't clear BSY on signature FIS reception,
++ * request follow-up softreset.
++ */
++ return online ? -EAGAIN : rc;
++}
++
++static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
++ unsigned long deadline)
++{
++ struct ata_port *ap = link->ap;
++ struct ahci_port_priv *pp = ap->private_data;
++ struct ahci_host_priv *hpriv = ap->host->private_data;
++ u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
++ struct ata_taskfile tf;
++ bool online;
++ int rc;
++
++ ahci_stop_engine(ap);
++
++ /* clear D2H reception area to properly wait for D2H FIS */
++ ata_tf_init(link->device, &tf);
++ tf.command = ATA_BUSY;
++ ata_tf_to_fis(&tf, 0, 0, d2h_fis);
++
++ rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context),
++ deadline, &online, NULL);
++
++ hpriv->start_engine(ap);
++
++ /* The pseudo configuration device on SIMG4726 attached to
++ * ASUS P5W-DH Deluxe doesn't send signature FIS after
++ * hardreset if no device is attached to the first downstream
++ * port && the pseudo device locks up on SRST w/ PMP==0. To
++ * work around this, wait for !BSY only briefly. If BSY isn't
++ * cleared, perform CLO and proceed to IDENTIFY (achieved by
++ * ATA_LFLAG_NO_SRST and ATA_LFLAG_ASSUME_ATA).
++ *
++ * Wait for two seconds. Devices attached to downstream port
++ * which can't process the following IDENTIFY after this will
++ * have to be reset again. For most cases, this should
++ * suffice while making probing snappish enough.
++ */
++ if (online) {
++ rc = ata_wait_after_reset(link, jiffies + 2 * HZ,
++ ahci_check_ready);
++ if (rc)
++ ahci_kick_engine(ap);
++ }
++ return rc;
++}
++
++#ifdef CONFIG_PM
++static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
++{
++ struct ata_host *host = pci_get_drvdata(pdev);
++ struct ahci_host_priv *hpriv = host->private_data;
++ void __iomem *mmio = hpriv->mmio;
++ u32 ctl;
++
++ if (mesg.event & PM_EVENT_SUSPEND &&
++ hpriv->flags & AHCI_HFLAG_NO_SUSPEND) {
++ dev_err(&pdev->dev,
++ "BIOS update required for suspend/resume\n");
++ return -EIO;
++ }
++
++ if (mesg.event & PM_EVENT_SLEEP) {
++ /* AHCI spec rev1.1 section 8.3.3:
++ * Software must disable interrupts prior to requesting a
++ * transition of the HBA to D3 state.
++ */
++ ctl = readl(mmio + HOST_CTL);
++ ctl &= ~HOST_IRQ_EN;
++ writel(ctl, mmio + HOST_CTL);
++ readl(mmio + HOST_CTL); /* flush */
++ }
++
++ return ata_pci_device_suspend(pdev, mesg);
++}
++
++static int ahci_pci_device_resume(struct pci_dev *pdev)
++{
++ struct ata_host *host = pci_get_drvdata(pdev);
++ int rc;
++
++ rc = ata_pci_device_do_resume(pdev);
++ if (rc)
++ return rc;
++
++ /* Apple BIOS helpfully mangles the registers on resume */
++ if (is_mcp89_apple(pdev))
++ ahci_mcp89_apple_enable(pdev);
++
++ if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
++ rc = ahci_pci_reset_controller(host);
++ if (rc)
++ return rc;
++
++ ahci_pci_init_controller(host);
++ }
++
++ ata_host_resume(host);
++
++ return 0;
++}
++#endif
++
++static int ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac)
++{
++ int rc;
++
++ /*
++ * If the device fixup already set the dma_mask to some non-standard
++ * value, don't extend it here. This happens on STA2X11, for example.
++ */
++ if (pdev->dma_mask && pdev->dma_mask < DMA_BIT_MASK(32))
++ return 0;
++
++ if (using_dac &&
++ !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
++ rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
++ if (rc) {
++ rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
++ if (rc) {
++ dev_err(&pdev->dev,
++ "64-bit DMA enable failed\n");
++ return rc;
++ }
++ }
++ } else {
++ rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
++ if (rc) {
++ dev_err(&pdev->dev, "32-bit DMA enable failed\n");
++ return rc;
++ }
++ rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
++ if (rc) {
++ dev_err(&pdev->dev,
++ "32-bit consistent DMA enable failed\n");
++ return rc;
++ }
++ }
++ return 0;
++}
++
++static void ahci_pci_print_info(struct ata_host *host)
++{
++ struct pci_dev *pdev = to_pci_dev(host->dev);
++ u16 cc;
++ const char *scc_s;
++
++ pci_read_config_word(pdev, 0x0a, &cc);
++ if (cc == PCI_CLASS_STORAGE_IDE)
++ scc_s = "IDE";
++ else if (cc == PCI_CLASS_STORAGE_SATA)
++ scc_s = "SATA";
++ else if (cc == PCI_CLASS_STORAGE_RAID)
++ scc_s = "RAID";
++ else
++ scc_s = "unknown";
++
++ ahci_print_info(host, scc_s);
++}
++
++/* On ASUS P5W DH Deluxe, the second port of PCI device 00:1f.2 is
++ * hardwired to on-board SIMG 4726. The chipset is ICH8 and doesn't
++ * support PMP and the 4726 either directly exports the device
++ * attached to the first downstream port or acts as a hardware storage
++ * controller and emulate a single ATA device (can be RAID 0/1 or some
++ * other configuration).
++ *
++ * When there's no device attached to the first downstream port of the
++ * 4726, "Config Disk" appears, which is a pseudo ATA device to
++ * configure the 4726. However, ATA emulation of the device is very
++ * lame. It doesn't send signature D2H Reg FIS after the initial
++ * hardreset, pukes on SRST w/ PMP==0 and has bunch of other issues.
++ *
++ * The following function works around the problem by always using
++ * hardreset on the port and not depending on receiving signature FIS
++ * afterward. If signature FIS isn't received soon, ATA class is
++ * assumed without follow-up softreset.
++ */
++static void ahci_p5wdh_workaround(struct ata_host *host)
++{
++ static struct dmi_system_id sysids[] = {
++ {
++ .ident = "P5W DH Deluxe",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR,
++ "ASUSTEK COMPUTER INC"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "P5W DH Deluxe"),
++ },
++ },
++ { }
++ };
++ struct pci_dev *pdev = to_pci_dev(host->dev);
++
++ if (pdev->bus->number == 0 && pdev->devfn == PCI_DEVFN(0x1f, 2) &&
++ dmi_check_system(sysids)) {
++ struct ata_port *ap = host->ports[1];
++
++ dev_info(&pdev->dev,
++ "enabling ASUS P5W DH Deluxe on-board SIMG4726 workaround\n");
++
++ ap->ops = &ahci_p5wdh_ops;
++ ap->link.flags |= ATA_LFLAG_NO_SRST | ATA_LFLAG_ASSUME_ATA;
++ }
++}
++
++/*
++ * Macbook7,1 firmware forcibly disables MCP89 AHCI and changes PCI ID when
++ * booting in BIOS compatibility mode. We restore the registers but not ID.
++ */
++static void ahci_mcp89_apple_enable(struct pci_dev *pdev)
++{
++ u32 val;
++
++ printk(KERN_INFO "ahci: enabling MCP89 AHCI mode\n");
++
++ pci_read_config_dword(pdev, 0xf8, &val);
++ val |= 1 << 0x1b;
++ /* the following changes the device ID, but appears not to affect function */
++ /* val = (val & ~0xf0000000) | 0x80000000; */
++ pci_write_config_dword(pdev, 0xf8, val);
++
++ pci_read_config_dword(pdev, 0x54c, &val);
++ val |= 1 << 0xc;
++ pci_write_config_dword(pdev, 0x54c, val);
++
++ pci_read_config_dword(pdev, 0x4a4, &val);
++ val &= 0xff;
++ val |= 0x01060100;
++ pci_write_config_dword(pdev, 0x4a4, val);
++
++ pci_read_config_dword(pdev, 0x54c, &val);
++ val &= ~(1 << 0xc);
++ pci_write_config_dword(pdev, 0x54c, val);
++
++ pci_read_config_dword(pdev, 0xf8, &val);
++ val &= ~(1 << 0x1b);
++ pci_write_config_dword(pdev, 0xf8, val);
++}
++
++static bool is_mcp89_apple(struct pci_dev *pdev)
++{
++ return pdev->vendor == PCI_VENDOR_ID_NVIDIA &&
++ pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP89_SATA &&
++ pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE &&
++ pdev->subsystem_device == 0xcb89;
++}
++
++/* only some SB600 ahci controllers can do 64bit DMA */
++static bool ahci_sb600_enable_64bit(struct pci_dev *pdev)
++{
++ static const struct dmi_system_id sysids[] = {
++ /*
++ * The oldest version known to be broken is 0901 and
++ * working is 1501 which was released on 2007-10-26.
++ * Enable 64bit DMA on 1501 and anything newer.
++ *
++ * Please read bko#9412 for more info.
++ */
++ {
++ .ident = "ASUS M2A-VM",
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR,
++ "ASUSTeK Computer INC."),
++ DMI_MATCH(DMI_BOARD_NAME, "M2A-VM"),
++ },
++ .driver_data = "20071026", /* yyyymmdd */
++ },
++ /*
++ * All BIOS versions for the MSI K9A2 Platinum (MS-7376)
++ * support 64bit DMA.
++ *
++ * BIOS versions earlier than 1.5 had the Manufacturer DMI
++ * fields as "MICRO-STAR INTERANTIONAL CO.,LTD".
++ * This spelling mistake was fixed in BIOS version 1.5, so
++ * 1.5 and later have the Manufacturer as
++ * "MICRO-STAR INTERNATIONAL CO.,LTD".
++ * So try to match on DMI_BOARD_VENDOR of "MICRO-STAR INTER".
++ *
++ * BIOS versions earlier than 1.9 had a Board Product Name
++ * DMI field of "MS-7376". This was changed to be
++ * "K9A2 Platinum (MS-7376)" in version 1.9, but we can still
++ * match on DMI_BOARD_NAME of "MS-7376".
++ */
++ {
++ .ident = "MSI K9A2 Platinum",
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR,
++ "MICRO-STAR INTER"),
++ DMI_MATCH(DMI_BOARD_NAME, "MS-7376"),
++ },
++ },
++ /*
++ * All BIOS versions for the MSI K9AGM2 (MS-7327) support
++ * 64bit DMA.
++ *
++ * This board also had the typo mentioned above in the
++ * Manufacturer DMI field (fixed in BIOS version 1.5), so
++ * match on DMI_BOARD_VENDOR of "MICRO-STAR INTER" again.
++ */
++ {
++ .ident = "MSI K9AGM2",
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR,
++ "MICRO-STAR INTER"),
++ DMI_MATCH(DMI_BOARD_NAME, "MS-7327"),
++ },
++ },
++ /*
++ * All BIOS versions for the Asus M3A support 64bit DMA.
++ * (all release versions from 0301 to 1206 were tested)
++ */
++ {
++ .ident = "ASUS M3A",
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR,
++ "ASUSTeK Computer INC."),
++ DMI_MATCH(DMI_BOARD_NAME, "M3A"),
++ },
++ },
++ { }
++ };
++ const struct dmi_system_id *match;
++ int year, month, date;
++ char buf[9];
++
++ match = dmi_first_match(sysids);
++ if (pdev->bus->number != 0 || pdev->devfn != PCI_DEVFN(0x12, 0) ||
++ !match)
++ return false;
++
++ if (!match->driver_data)
++ goto enable_64bit;
++
++ dmi_get_date(DMI_BIOS_DATE, &year, &month, &date);
++ snprintf(buf, sizeof(buf), "%04d%02d%02d", year, month, date);
++
++ if (strcmp(buf, match->driver_data) >= 0)
++ goto enable_64bit;
++ else {
++ dev_warn(&pdev->dev,
++ "%s: BIOS too old, forcing 32bit DMA, update BIOS\n",
++ match->ident);
++ return false;
++ }
++
++enable_64bit:
++ dev_warn(&pdev->dev, "%s: enabling 64bit DMA\n", match->ident);
++ return true;
++}
++
++static bool ahci_broken_system_poweroff(struct pci_dev *pdev)
++{
++ static const struct dmi_system_id broken_systems[] = {
++ {
++ .ident = "HP Compaq nx6310",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq nx6310"),
++ },
++ /* PCI slot number of the controller */
++ .driver_data = (void *)0x1FUL,
++ },
++ {
++ .ident = "HP Compaq 6720s",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq 6720s"),
++ },
++ /* PCI slot number of the controller */
++ .driver_data = (void *)0x1FUL,
++ },
++
++ { } /* terminate list */
++ };
++ const struct dmi_system_id *dmi = dmi_first_match(broken_systems);
++
++ if (dmi) {
++ unsigned long slot = (unsigned long)dmi->driver_data;
++ /* apply the quirk only to on-board controllers */
++ return slot == PCI_SLOT(pdev->devfn);
++ }
++
++ return false;
++}
++
++static bool ahci_broken_suspend(struct pci_dev *pdev)
++{
++ static const struct dmi_system_id sysids[] = {
++ /*
++ * On HP dv[4-6] and HDX18 with earlier BIOSen, link
++ * to the harddisk doesn't become online after
++ * resuming from STR. Warn and fail suspend.
++ *
++ * http://bugzilla.kernel.org/show_bug.cgi?id=12276
++ *
++ * Use dates instead of versions to match as HP is
++ * apparently recycling both product and version
++ * strings.
++ *
++ * http://bugzilla.kernel.org/show_bug.cgi?id=15462
++ */
++ {
++ .ident = "dv4",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
++ DMI_MATCH(DMI_PRODUCT_NAME,
++ "HP Pavilion dv4 Notebook PC"),
++ },
++ .driver_data = "20090105", /* F.30 */
++ },
++ {
++ .ident = "dv5",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
++ DMI_MATCH(DMI_PRODUCT_NAME,
++ "HP Pavilion dv5 Notebook PC"),
++ },
++ .driver_data = "20090506", /* F.16 */
++ },
++ {
++ .ident = "dv6",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
++ DMI_MATCH(DMI_PRODUCT_NAME,
++ "HP Pavilion dv6 Notebook PC"),
++ },
++ .driver_data = "20090423", /* F.21 */
++ },
++ {
++ .ident = "HDX18",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
++ DMI_MATCH(DMI_PRODUCT_NAME,
++ "HP HDX18 Notebook PC"),
++ },
++ .driver_data = "20090430", /* F.23 */
++ },
++ /*
++ * Acer eMachines G725 has the same problem. BIOS
++ * V1.03 is known to be broken. V3.04 is known to
++ * work. Between, there are V1.06, V2.06 and V3.03
++ * that we don't have much idea about. For now,
++ * blacklist anything older than V3.04.
++ *
++ * http://bugzilla.kernel.org/show_bug.cgi?id=15104
++ */
++ {
++ .ident = "G725",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "eMachines"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "eMachines G725"),
++ },
++ .driver_data = "20091216", /* V3.04 */
++ },
++ { } /* terminate list */
++ };
++ const struct dmi_system_id *dmi = dmi_first_match(sysids);
++ int year, month, date;
++ char buf[9];
++
++ if (!dmi || pdev->bus->number || pdev->devfn != PCI_DEVFN(0x1f, 2))
++ return false;
++
++ dmi_get_date(DMI_BIOS_DATE, &year, &month, &date);
++ snprintf(buf, sizeof(buf), "%04d%02d%02d", year, month, date);
++
++ return strcmp(buf, dmi->driver_data) < 0;
++}
++
++static bool ahci_broken_online(struct pci_dev *pdev)
++{
++#define ENCODE_BUSDEVFN(bus, slot, func) \
++ (void *)(unsigned long)(((bus) << 8) | PCI_DEVFN((slot), (func)))
++ static const struct dmi_system_id sysids[] = {
++ /*
++ * There are several gigabyte boards which use
++ * SIMG5723s configured as hardware RAID. Certain
++ * 5723 firmware revisions shipped there keep the link
++ * online but fail to answer properly to SRST or
++ * IDENTIFY when no device is attached downstream
++ * causing libata to retry quite a few times leading
++ * to excessive detection delay.
++ *
++ * As these firmwares respond to the second reset try
++ * with invalid device signature, considering unknown
++ * sig as offline works around the problem acceptably.
++ */
++ {
++ .ident = "EP45-DQ6",
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR,
++ "Gigabyte Technology Co., Ltd."),
++ DMI_MATCH(DMI_BOARD_NAME, "EP45-DQ6"),
++ },
++ .driver_data = ENCODE_BUSDEVFN(0x0a, 0x00, 0),
++ },
++ {
++ .ident = "EP45-DS5",
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR,
++ "Gigabyte Technology Co., Ltd."),
++ DMI_MATCH(DMI_BOARD_NAME, "EP45-DS5"),
++ },
++ .driver_data = ENCODE_BUSDEVFN(0x03, 0x00, 0),
++ },
++ { } /* terminate list */
++ };
++#undef ENCODE_BUSDEVFN
++ const struct dmi_system_id *dmi = dmi_first_match(sysids);
++ unsigned int val;
++
++ if (!dmi)
++ return false;
++
++ val = (unsigned long)dmi->driver_data;
++
++ return pdev->bus->number == (val >> 8) && pdev->devfn == (val & 0xff);
++}
++
++static bool ahci_broken_devslp(struct pci_dev *pdev)
++{
++ /* device with broken DEVSLP but still showing SDS capability */
++ static const struct pci_device_id ids[] = {
++ { PCI_VDEVICE(INTEL, 0x0f23)}, /* Valleyview SoC */
++ {}
++ };
++
++ return pci_match_id(ids, pdev);
++}
++
++#ifdef CONFIG_ATA_ACPI
++static void ahci_gtf_filter_workaround(struct ata_host *host)
++{
++ static const struct dmi_system_id sysids[] = {
++ /*
++ * Aspire 3810T issues a bunch of SATA enable commands
++ * via _GTF including an invalid one and one which is
++ * rejected by the device. Among the successful ones
++ * is FPDMA non-zero offset enable which when enabled
++ * only on the drive side leads to NCQ command
++ * failures. Filter it out.
++ */
++ {
++ .ident = "Aspire 3810T",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 3810T"),
++ },
++ .driver_data = (void *)ATA_ACPI_FILTER_FPDMA_OFFSET,
++ },
++ { }
++ };
++ const struct dmi_system_id *dmi = dmi_first_match(sysids);
++ unsigned int filter;
++ int i;
++
++ if (!dmi)
++ return;
++
++ filter = (unsigned long)dmi->driver_data;
++ dev_info(host->dev, "applying extra ACPI _GTF filter 0x%x for %s\n",
++ filter, dmi->ident);
++
++ for (i = 0; i < host->n_ports; i++) {
++ struct ata_port *ap = host->ports[i];
++ struct ata_link *link;
++ struct ata_device *dev;
++
++ ata_for_each_link(link, ap, EDGE)
++ ata_for_each_dev(dev, link, ALL)
++ dev->gtf_filter |= filter;
++ }
++}
++#else
++static inline void ahci_gtf_filter_workaround(struct ata_host *host)
++{}
++#endif
++
++static int ahci_init_interrupts(struct pci_dev *pdev, unsigned int n_ports,
++ struct ahci_host_priv *hpriv)
++{
++ int rc, nvec;
++
++ if (hpriv->flags & AHCI_HFLAG_NO_MSI)
++ goto intx;
++
++ rc = pci_msi_vec_count(pdev);
++ if (rc < 0)
++ goto intx;
++
++ /*
++ * If number of MSIs is less than number of ports then Sharing Last
++ * Message mode could be enforced. In this case assume that advantage
++ * of multipe MSIs is negated and use single MSI mode instead.
++ */
++ if (rc < n_ports)
++ goto single_msi;
++
++ nvec = rc;
++ rc = pci_enable_msi_block(pdev, nvec);
++ if (rc < 0)
++ goto intx;
++ else if (rc > 0)
++ goto single_msi;
++
++ /* fallback to single MSI mode if the controller enforced MRSM mode */
++ if (readl(hpriv->mmio + HOST_CTL) & HOST_MRSM) {
++ pci_disable_msi(pdev);
++ printk(KERN_INFO "ahci: MRSM is on, fallback to single MSI\n");
++ goto single_msi;
++ }
++
++ return nvec;
++
++single_msi:
++ rc = pci_enable_msi(pdev);
++ if (rc)
++ goto intx;
++ return 1;
++
++intx:
++ pci_intx(pdev, 1);
++ return 0;
++}
++
++/**
++ * ahci_host_activate - start AHCI host, request IRQs and register it
++ * @host: target ATA host
++ * @irq: base IRQ number to request
++ * @n_msis: number of MSIs allocated for this host
++ * @irq_handler: irq_handler used when requesting IRQs
++ * @irq_flags: irq_flags used when requesting IRQs
++ *
++ * Similar to ata_host_activate, but requests IRQs according to AHCI-1.1
++ * when multiple MSIs were allocated. That is one MSI per port, starting
++ * from @irq.
++ *
++ * LOCKING:
++ * Inherited from calling layer (may sleep).
++ *
++ * RETURNS:
++ * 0 on success, -errno otherwise.
++ */
++int ahci_host_activate(struct ata_host *host, int irq, unsigned int n_msis)
++{
++ int i, rc;
++
++ /* Sharing Last Message among several ports is not supported */
++ if (n_msis < host->n_ports)
++ return -EINVAL;
++
++ rc = ata_host_start(host);
++ if (rc)
++ return rc;
++
++ for (i = 0; i < host->n_ports; i++) {
++ struct ahci_port_priv *pp = host->ports[i]->private_data;
++
++ /* Do not receive interrupts sent by dummy ports */
++ if (!pp) {
++ disable_irq(irq + i);
++ continue;
++ }
++
++ rc = devm_request_threaded_irq(host->dev, irq + i,
++ ahci_hw_interrupt,
++ ahci_thread_fn, IRQF_SHARED,
++ pp->irq_desc, host->ports[i]);
++ if (rc)
++ goto out_free_irqs;
++ }
++
++ for (i = 0; i < host->n_ports; i++)
++ ata_port_desc(host->ports[i], "irq %d", irq + i);
++
++ rc = ata_host_register(host, &ahci_sht);
++ if (rc)
++ goto out_free_all_irqs;
++
++ return 0;
++
++out_free_all_irqs:
++ i = host->n_ports;
++out_free_irqs:
++ for (i--; i >= 0; i--)
++ devm_free_irq(host->dev, irq + i, host->ports[i]);
++
++ return rc;
++}
++
++static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
++{
++ unsigned int board_id = ent->driver_data;
++ struct ata_port_info pi = ahci_port_info[board_id];
++ const struct ata_port_info *ppi[] = { &pi, NULL };
++ struct device *dev = &pdev->dev;
++ struct ahci_host_priv *hpriv;
++ struct ata_host *host;
++ int n_ports, n_msis, i, rc;
++ int ahci_pci_bar = AHCI_PCI_BAR_STANDARD;
++
++ VPRINTK("ENTER\n");
++
++ WARN_ON((int)ATA_MAX_QUEUE > AHCI_MAX_CMDS);
++
++ ata_print_version_once(&pdev->dev, DRV_VERSION);
++
++ /* The AHCI driver can only drive the SATA ports, the PATA driver
++ can drive them all so if both drivers are selected make sure
++ AHCI stays out of the way */
++ if (pdev->vendor == PCI_VENDOR_ID_MARVELL && !marvell_enable)
++ return -ENODEV;
++
++ /* Apple BIOS on MCP89 prevents us using AHCI */
++ if (is_mcp89_apple(pdev))
++ ahci_mcp89_apple_enable(pdev);
++
++ /* Promise's PDC42819 is a SAS/SATA controller that has an AHCI mode.
++ * At the moment, we can only use the AHCI mode. Let the users know
++ * that for SAS drives they're out of luck.
++ */
++ if (pdev->vendor == PCI_VENDOR_ID_PROMISE)
++ dev_info(&pdev->dev,
++ "PDC42819 can only drive SATA devices with this driver\n");
++
++ /* Both Connext and Enmotus devices use non-standard BARs */
++ if (pdev->vendor == PCI_VENDOR_ID_STMICRO && pdev->device == 0xCC06)
++ ahci_pci_bar = AHCI_PCI_BAR_STA2X11;
++ else if (pdev->vendor == 0x1c44 && pdev->device == 0x8000)
++ ahci_pci_bar = AHCI_PCI_BAR_ENMOTUS;
++
++ /* acquire resources */
++ rc = pcim_enable_device(pdev);
++ if (rc)
++ return rc;
++
++ if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
++ (pdev->device == 0x2652 || pdev->device == 0x2653)) {
++ u8 map;
++
++ /* ICH6s share the same PCI ID for both piix and ahci
++ * modes. Enabling ahci mode while MAP indicates
++ * combined mode is a bad idea. Yield to ata_piix.
++ */
++ pci_read_config_byte(pdev, ICH_MAP, &map);
++ if (map & 0x3) {
++ dev_info(&pdev->dev,
++ "controller is in combined mode, can't enable AHCI mode\n");
++ return -ENODEV;
++ }
++ }
++
++ /* AHCI controllers often implement SFF compatible interface.
++ * Grab all PCI BARs just in case.
++ */
++ rc = pcim_iomap_regions_request_all(pdev, 1 << ahci_pci_bar, DRV_NAME);
++ if (rc == -EBUSY)
++ pcim_pin_device(pdev);
++ if (rc)
++ return rc;
++
++ hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
++ if (!hpriv)
++ return -ENOMEM;
++ hpriv->flags |= (unsigned long)pi.private_data;
++
++ /* MCP65 revision A1 and A2 can't do MSI */
++ if (board_id == board_ahci_mcp65 &&
++ (pdev->revision == 0xa1 || pdev->revision == 0xa2))
++ hpriv->flags |= AHCI_HFLAG_NO_MSI;
++
++ /* SB800 does NOT need the workaround to ignore SERR_INTERNAL */
++ if (board_id == board_ahci_sb700 && pdev->revision >= 0x40)
++ hpriv->flags &= ~AHCI_HFLAG_IGN_SERR_INTERNAL;
++
++ /* only some SB600s can do 64bit DMA */
++ if (ahci_sb600_enable_64bit(pdev))
++ hpriv->flags &= ~AHCI_HFLAG_32BIT_ONLY;
++
++ hpriv->mmio = pcim_iomap_table(pdev)[ahci_pci_bar];
++
++ /* must set flag prior to save config in order to take effect */
++ if (ahci_broken_devslp(pdev))
++ hpriv->flags |= AHCI_HFLAG_NO_DEVSLP;
++
++ /* save initial config */
++ ahci_pci_save_initial_config(pdev, hpriv);
++
++ /* prepare host */
++ if (hpriv->cap & HOST_CAP_NCQ) {
++ pi.flags |= ATA_FLAG_NCQ;
++ /*
++ * Auto-activate optimization is supposed to be
++ * supported on all AHCI controllers indicating NCQ
++ * capability, but it seems to be broken on some
++ * chipsets including NVIDIAs.
++ */
++ if (!(hpriv->flags & AHCI_HFLAG_NO_FPDMA_AA))
++ pi.flags |= ATA_FLAG_FPDMA_AA;
++
++ /*
++ * All AHCI controllers should be forward-compatible
++ * with the new auxiliary field. This code should be
++ * conditionalized if any buggy AHCI controllers are
++ * encountered.
++ */
++ pi.flags |= ATA_FLAG_FPDMA_AUX;
++ }
++
++ if (hpriv->cap & HOST_CAP_PMP)
++ pi.flags |= ATA_FLAG_PMP;
++
++ ahci_set_em_messages(hpriv, &pi);
++
++ if (ahci_broken_system_poweroff(pdev)) {
++ pi.flags |= ATA_FLAG_NO_POWEROFF_SPINDOWN;
++ dev_info(&pdev->dev,
++ "quirky BIOS, skipping spindown on poweroff\n");
++ }
++
++ if (ahci_broken_suspend(pdev)) {
++ hpriv->flags |= AHCI_HFLAG_NO_SUSPEND;
++ dev_warn(&pdev->dev,
++ "BIOS update required for suspend/resume\n");
++ }
++
++ if (ahci_broken_online(pdev)) {
++ hpriv->flags |= AHCI_HFLAG_SRST_TOUT_IS_OFFLINE;
++ dev_info(&pdev->dev,
++ "online status unreliable, applying workaround\n");
++ }
++
++ /* CAP.NP sometimes indicate the index of the last enabled
++ * port, at other times, that of the last possible port, so
++ * determining the maximum port number requires looking at
++ * both CAP.NP and port_map.
++ */
++ n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map));
++
++ n_msis = ahci_init_interrupts(pdev, n_ports, hpriv);
++ if (n_msis > 1)
++ hpriv->flags |= AHCI_HFLAG_MULTI_MSI;
++
++ host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
++ if (!host)
++ return -ENOMEM;
++ host->private_data = hpriv;
++
++ if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss)
++ host->flags |= ATA_HOST_PARALLEL_SCAN;
++ else
++ dev_info(&pdev->dev, "SSS flag set, parallel bus scan disabled\n");
++
++ if (pi.flags & ATA_FLAG_EM)
++ ahci_reset_em(host);
++
++ for (i = 0; i < host->n_ports; i++) {
++ struct ata_port *ap = host->ports[i];
++
++ ata_port_pbar_desc(ap, ahci_pci_bar, -1, "abar");
++ ata_port_pbar_desc(ap, ahci_pci_bar,
++ 0x100 + ap->port_no * 0x80, "port");
++
++ /* set enclosure management message type */
++ if (ap->flags & ATA_FLAG_EM)
++ ap->em_message_type = hpriv->em_msg_type;
++
++
++ /* disabled/not-implemented port */
++ if (!(hpriv->port_map & (1 << i)))
++ ap->ops = &ata_dummy_port_ops;
++ }
++
++ /* apply workaround for ASUS P5W DH Deluxe mainboard */
++ ahci_p5wdh_workaround(host);
++
++ /* apply gtf filter quirk */
++ ahci_gtf_filter_workaround(host);
++
++ /* initialize adapter */
++ rc = ahci_configure_dma_masks(pdev, hpriv->cap & HOST_CAP_64);
++ if (rc)
++ return rc;
++
++ rc = ahci_pci_reset_controller(host);
++ if (rc)
++ return rc;
++
++ ahci_pci_init_controller(host);
++ ahci_pci_print_info(host);
++
++ pci_set_master(pdev);
++
++ if (hpriv->flags & AHCI_HFLAG_MULTI_MSI)
++ return ahci_host_activate(host, pdev->irq, n_msis);
++
++ return ata_host_activate(host, pdev->irq, ahci_interrupt, IRQF_SHARED,
++ &ahci_sht);
++}
++
++module_pci_driver(ahci_pci_driver);
++
++MODULE_AUTHOR("Jeff Garzik");
++MODULE_DESCRIPTION("AHCI SATA low-level driver");
++MODULE_LICENSE("GPL");
++MODULE_DEVICE_TABLE(pci, ahci_pci_tbl);
++MODULE_VERSION(DRV_VERSION);
+diff -Nur linux-3.14.36/drivers/ata/ahci.h linux-openelec/drivers/ata/ahci.h
+--- linux-3.14.36/drivers/ata/ahci.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/ata/ahci.h 2015-05-06 12:05:42.000000000 -0500
+@@ -37,6 +37,8 @@
+
+ #include <linux/clk.h>
+ #include <linux/libata.h>
++#include <linux/phy/phy.h>
++#include <linux/regulator/consumer.h>
+
+ /* Enclosure Management Control */
+ #define EM_CTRL_MSG_TYPE 0x000f0000
+@@ -51,6 +53,7 @@
+
+ enum {
+ AHCI_MAX_PORTS = 32,
++ AHCI_MAX_CLKS = 3,
+ AHCI_MAX_SG = 168, /* hardware max is 64K */
+ AHCI_DMA_BOUNDARY = 0xffffffff,
+ AHCI_MAX_CMDS = 32,
+@@ -233,6 +236,8 @@
+ port start (wait until
+ error-handling stage) */
+ AHCI_HFLAG_MULTI_MSI = (1 << 16), /* multiple PCI MSIs */
++ AHCI_HFLAG_NO_DEVSLP = (1 << 17), /* no device sleep */
++ AHCI_HFLAG_NO_FBS = (1 << 18), /* no FBS */
+
+ /* ap->flags bits */
+
+@@ -322,8 +327,17 @@
+ u32 em_loc; /* enclosure management location */
+ u32 em_buf_sz; /* EM buffer size in byte */
+ u32 em_msg_type; /* EM message type */
+- struct clk *clk; /* Only for platforms supporting clk */
++ bool got_runtime_pm; /* Did we do pm_runtime_get? */
++ struct clk *clks[AHCI_MAX_CLKS]; /* Optional */
++ struct regulator *target_pwr; /* Optional */
++ struct phy *phy; /* If platform uses phy */
+ void *plat_data; /* Other platform data */
++ /*
++ * Optional ahci_start_engine override, if not set this gets set to the
++ * default ahci_start_engine during ahci_save_initial_config, this can
++ * be overridden anytime before the host is activated.
++ */
++ void (*start_engine)(struct ata_port *ap);
+ };
+
+ extern int ahci_ignore_sss;
+diff -Nur linux-3.14.36/drivers/ata/ahci_imx.c linux-openelec/drivers/ata/ahci_imx.c
+--- linux-3.14.36/drivers/ata/ahci_imx.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/ata/ahci_imx.c 2015-05-06 12:05:42.000000000 -0500
+@@ -26,12 +26,29 @@
+ #include <linux/mfd/syscon.h>
+ #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
+ #include <linux/libata.h>
++#include <linux/busfreq-imx6.h>
+ #include "ahci.h"
+
+ enum {
+- PORT_PHY_CTL = 0x178, /* Port0 PHY Control */
+- PORT_PHY_CTL_PDDQ_LOC = 0x100000, /* PORT_PHY_CTL bits */
+- HOST_TIMER1MS = 0xe0, /* Timer 1-ms */
++ /* Timer 1-ms Register */
++ IMX_TIMER1MS = 0x00e0,
++ /* Port0 PHY Control Register */
++ IMX_P0PHYCR = 0x0178,
++ IMX_P0PHYCR_TEST_PDDQ = 1 << 20,
++ IMX_P0PHYCR_CR_READ = 1 << 19,
++ IMX_P0PHYCR_CR_WRITE = 1 << 18,
++ IMX_P0PHYCR_CR_CAP_DATA = 1 << 17,
++ IMX_P0PHYCR_CR_CAP_ADDR = 1 << 16,
++ /* Port0 PHY Status Register */
++ IMX_P0PHYSR = 0x017c,
++ IMX_P0PHYSR_CR_ACK = 1 << 18,
++ IMX_P0PHYSR_CR_DATA_OUT = 0xffff << 0,
++ /* Lane0 Output Status Register */
++ IMX_LANE0_OUT_STAT = 0x2003,
++ IMX_LANE0_OUT_STAT_RX_PLL_STATE = 1 << 1,
++ /* Clock Reset Register */
++ IMX_CLOCK_RESET = 0x7f3f,
++ IMX_CLOCK_RESET_RESET = 1 << 0,
+ };
+
+ enum ahci_imx_type {
+@@ -42,62 +59,230 @@
+ struct imx_ahci_priv {
+ struct platform_device *ahci_pdev;
+ enum ahci_imx_type type;
+-
+- /* i.MX53 clock */
+- struct clk *sata_gate_clk;
+- /* Common clock */
+- struct clk *sata_ref_clk;
+ struct clk *ahb_clk;
+-
+ struct regmap *gpr;
+ bool no_device;
+ bool first_time;
++ u32 phy_params;
+ };
+
+ static int ahci_imx_hotplug;
+ module_param_named(hotplug, ahci_imx_hotplug, int, 0644);
+ MODULE_PARM_DESC(hotplug, "AHCI IMX hot-plug support (0=Don't support, 1=support)");
+
+-static int imx_sata_clock_enable(struct device *dev)
++static void ahci_imx_host_stop(struct ata_host *host);
++
++static int imx_phy_crbit_assert(void __iomem *mmio, u32 bit, bool assert)
++{
++ int timeout = 10;
++ u32 crval;
++ u32 srval;
++
++ /* Assert or deassert the bit */
++ crval = readl(mmio + IMX_P0PHYCR);
++ if (assert)
++ crval |= bit;
++ else
++ crval &= ~bit;
++ writel(crval, mmio + IMX_P0PHYCR);
++
++ /* Wait for the cr_ack signal */
++ do {
++ srval = readl(mmio + IMX_P0PHYSR);
++ if ((assert ? srval : ~srval) & IMX_P0PHYSR_CR_ACK)
++ break;
++ usleep_range(100, 200);
++ } while (--timeout);
++
++ return timeout ? 0 : -ETIMEDOUT;
++}
++
++static int imx_phy_reg_addressing(u16 addr, void __iomem *mmio)
+ {
+- struct imx_ahci_priv *imxpriv = dev_get_drvdata(dev->parent);
++ u32 crval = addr;
+ int ret;
+
+- if (imxpriv->type == AHCI_IMX53) {
+- ret = clk_prepare_enable(imxpriv->sata_gate_clk);
+- if (ret < 0) {
+- dev_err(dev, "prepare-enable sata_gate clock err:%d\n",
+- ret);
+- return ret;
+- }
++ /* Supply the address on cr_data_in */
++ writel(crval, mmio + IMX_P0PHYCR);
++
++ /* Assert the cr_cap_addr signal */
++ ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_CAP_ADDR, true);
++ if (ret)
++ return ret;
++
++ /* Deassert cr_cap_addr */
++ ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_CAP_ADDR, false);
++ if (ret)
++ return ret;
++
++ return 0;
++}
++
++static int imx_phy_reg_write(u16 val, void __iomem *mmio)
++{
++ u32 crval = val;
++ int ret;
++
++ /* Supply the data on cr_data_in */
++ writel(crval, mmio + IMX_P0PHYCR);
++
++ /* Assert the cr_cap_data signal */
++ ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_CAP_DATA, true);
++ if (ret)
++ return ret;
++
++ /* Deassert cr_cap_data */
++ ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_CAP_DATA, false);
++ if (ret)
++ return ret;
++
++ if (val & IMX_CLOCK_RESET_RESET) {
++ /*
++ * In case we're resetting the phy, it's unable to acknowledge,
++ * so we return immediately here.
++ */
++ crval |= IMX_P0PHYCR_CR_WRITE;
++ writel(crval, mmio + IMX_P0PHYCR);
++ goto out;
+ }
+
+- ret = clk_prepare_enable(imxpriv->sata_ref_clk);
+- if (ret < 0) {
+- dev_err(dev, "prepare-enable sata_ref clock err:%d\n",
+- ret);
+- goto clk_err;
++ /* Assert the cr_write signal */
++ ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_WRITE, true);
++ if (ret)
++ return ret;
++
++ /* Deassert cr_write */
++ ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_WRITE, false);
++ if (ret)
++ return ret;
++
++out:
++ return 0;
++}
++
++static int imx_phy_reg_read(u16 *val, void __iomem *mmio)
++{
++ int ret;
++
++ /* Assert the cr_read signal */
++ ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_READ, true);
++ if (ret)
++ return ret;
++
++ /* Capture the data from cr_data_out[] */
++ *val = readl(mmio + IMX_P0PHYSR) & IMX_P0PHYSR_CR_DATA_OUT;
++
++ /* Deassert cr_read */
++ ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_READ, false);
++ if (ret)
++ return ret;
++
++ return 0;
++}
++
++static int imx_sata_phy_reset(struct ahci_host_priv *hpriv)
++{
++ void __iomem *mmio = hpriv->mmio;
++ int timeout = 10;
++ u16 val;
++ int ret;
++
++ /* Reset SATA PHY by setting RESET bit of PHY register CLOCK_RESET */
++ ret = imx_phy_reg_addressing(IMX_CLOCK_RESET, mmio);
++ if (ret)
++ return ret;
++ ret = imx_phy_reg_write(IMX_CLOCK_RESET_RESET, mmio);
++ if (ret)
++ return ret;
++
++ /* Wait for PHY RX_PLL to be stable */
++ do {
++ usleep_range(100, 200);
++ ret = imx_phy_reg_addressing(IMX_LANE0_OUT_STAT, mmio);
++ if (ret)
++ return ret;
++ ret = imx_phy_reg_read(&val, mmio);
++ if (ret)
++ return ret;
++ if (val & IMX_LANE0_OUT_STAT_RX_PLL_STATE)
++ break;
++ } while (--timeout);
++
++ return timeout ? 0 : -ETIMEDOUT;
++}
++
++static int imx_sata_enable(struct ahci_host_priv *hpriv)
++{
++ struct imx_ahci_priv *imxpriv = hpriv->plat_data;
++ struct device *dev = &imxpriv->ahci_pdev->dev;
++ int ret;
++
++ if (imxpriv->no_device)
++ return 0;
++
++ if (hpriv->target_pwr) {
++ ret = regulator_enable(hpriv->target_pwr);
++ if (ret)
++ return ret;
+ }
+
++ request_bus_freq(BUS_FREQ_HIGH);
++
++ ret = ahci_platform_enable_clks(hpriv);
++ if (ret < 0)
++ goto disable_regulator;
++
+ if (imxpriv->type == AHCI_IMX6Q) {
++ /*
++ * set PHY Paremeters, two steps to configure the GPR13,
++ * one write for rest of parameters, mask of first write
++ * is 0x07ffffff, and the other one write for setting
++ * the mpll_clk_en.
++ */
++ regmap_update_bits(imxpriv->gpr, IOMUXC_GPR13,
++ IMX6Q_GPR13_SATA_RX_EQ_VAL_MASK |
++ IMX6Q_GPR13_SATA_RX_LOS_LVL_MASK |
++ IMX6Q_GPR13_SATA_RX_DPLL_MODE_MASK |
++ IMX6Q_GPR13_SATA_SPD_MODE_MASK |
++ IMX6Q_GPR13_SATA_MPLL_SS_EN |
++ IMX6Q_GPR13_SATA_TX_ATTEN_MASK |
++ IMX6Q_GPR13_SATA_TX_BOOST_MASK |
++ IMX6Q_GPR13_SATA_TX_LVL_MASK |
++ IMX6Q_GPR13_SATA_MPLL_CLK_EN |
++ IMX6Q_GPR13_SATA_TX_EDGE_RATE,
++ imxpriv->phy_params);
+ regmap_update_bits(imxpriv->gpr, IOMUXC_GPR13,
+ IMX6Q_GPR13_SATA_MPLL_CLK_EN,
+ IMX6Q_GPR13_SATA_MPLL_CLK_EN);
++
++ usleep_range(100, 200);
++
++ ret = imx_sata_phy_reset(hpriv);
++ if (ret) {
++ dev_err(dev, "failed to reset phy: %d\n", ret);
++ goto disable_regulator;
++ }
+ }
+
+ usleep_range(1000, 2000);
+
+ return 0;
+
+-clk_err:
+- if (imxpriv->type == AHCI_IMX53)
+- clk_disable_unprepare(imxpriv->sata_gate_clk);
++disable_regulator:
++ release_bus_freq(BUS_FREQ_HIGH);
++
++ if (hpriv->target_pwr)
++ regulator_disable(hpriv->target_pwr);
++
+ return ret;
+ }
+
+-static void imx_sata_clock_disable(struct device *dev)
++static void imx_sata_disable(struct ahci_host_priv *hpriv)
+ {
+- struct imx_ahci_priv *imxpriv = dev_get_drvdata(dev->parent);
++ struct imx_ahci_priv *imxpriv = hpriv->plat_data;
++
++ if (imxpriv->no_device)
++ return;
+
+ if (imxpriv->type == AHCI_IMX6Q) {
+ regmap_update_bits(imxpriv->gpr, IOMUXC_GPR13,
+@@ -105,10 +290,12 @@
+ !IMX6Q_GPR13_SATA_MPLL_CLK_EN);
+ }
+
+- clk_disable_unprepare(imxpriv->sata_ref_clk);
++ ahci_platform_disable_clks(hpriv);
+
+- if (imxpriv->type == AHCI_IMX53)
+- clk_disable_unprepare(imxpriv->sata_gate_clk);
++ release_bus_freq(BUS_FREQ_HIGH);
++
++ if (hpriv->target_pwr)
++ regulator_disable(hpriv->target_pwr);
+ }
+
+ static void ahci_imx_error_handler(struct ata_port *ap)
+@@ -118,7 +305,7 @@
+ struct ata_host *host = dev_get_drvdata(ap->dev);
+ struct ahci_host_priv *hpriv = host->private_data;
+ void __iomem *mmio = hpriv->mmio;
+- struct imx_ahci_priv *imxpriv = dev_get_drvdata(ap->dev->parent);
++ struct imx_ahci_priv *imxpriv = hpriv->plat_data;
+
+ ahci_error_handler(ap);
+
+@@ -134,17 +321,23 @@
+ * without full reset once the pddq mode is enabled making it
+ * impossible to use as part of libata LPM.
+ */
+- reg_val = readl(mmio + PORT_PHY_CTL);
+- writel(reg_val | PORT_PHY_CTL_PDDQ_LOC, mmio + PORT_PHY_CTL);
+- imx_sata_clock_disable(ap->dev);
++ reg_val = readl(mmio + IMX_P0PHYCR);
++ writel(reg_val | IMX_P0PHYCR_TEST_PDDQ, mmio + IMX_P0PHYCR);
++ imx_sata_disable(hpriv);
+ imxpriv->no_device = true;
++
++ dev_info(ap->dev, "no device found, disabling link.\n");
++ dev_info(ap->dev, "pass " MODULE_PARAM_PREFIX
++ ".hotplug=1 to enable hotplug\n");
+ }
+
+ static int ahci_imx_softreset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline)
+ {
+ struct ata_port *ap = link->ap;
+- struct imx_ahci_priv *imxpriv = dev_get_drvdata(ap->dev->parent);
++ struct ata_host *host = dev_get_drvdata(ap->dev);
++ struct ahci_host_priv *hpriv = host->private_data;
++ struct imx_ahci_priv *imxpriv = hpriv->plat_data;
+ int ret = -EIO;
+
+ if (imxpriv->type == AHCI_IMX53)
+@@ -156,7 +349,8 @@
+ }
+
+ static struct ata_port_operations ahci_imx_ops = {
+- .inherits = &ahci_platform_ops,
++ .inherits = &ahci_ops,
++ .host_stop = ahci_imx_host_stop,
+ .error_handler = ahci_imx_error_handler,
+ .softreset = ahci_imx_softreset,
+ };
+@@ -168,234 +362,306 @@
+ .port_ops = &ahci_imx_ops,
+ };
+
+-static int imx_sata_init(struct device *dev, void __iomem *mmio)
+-{
+- int ret = 0;
+- unsigned int reg_val;
+- struct imx_ahci_priv *imxpriv = dev_get_drvdata(dev->parent);
+-
+- ret = imx_sata_clock_enable(dev);
+- if (ret < 0)
+- return ret;
++static const struct of_device_id imx_ahci_of_match[] = {
++ { .compatible = "fsl,imx53-ahci", .data = (void *)AHCI_IMX53 },
++ { .compatible = "fsl,imx6q-ahci", .data = (void *)AHCI_IMX6Q },
++ {},
++};
++MODULE_DEVICE_TABLE(of, imx_ahci_of_match);
+
+- /*
+- * Configure the HWINIT bits of the HOST_CAP and HOST_PORTS_IMPL,
+- * and IP vendor specific register HOST_TIMER1MS.
+- * Configure CAP_SSS (support stagered spin up).
+- * Implement the port0.
+- * Get the ahb clock rate, and configure the TIMER1MS register.
+- */
+- reg_val = readl(mmio + HOST_CAP);
+- if (!(reg_val & HOST_CAP_SSS)) {
+- reg_val |= HOST_CAP_SSS;
+- writel(reg_val, mmio + HOST_CAP);
+- }
+- reg_val = readl(mmio + HOST_PORTS_IMPL);
+- if (!(reg_val & 0x1)) {
+- reg_val |= 0x1;
+- writel(reg_val, mmio + HOST_PORTS_IMPL);
+- }
++struct reg_value {
++ u32 of_value;
++ u32 reg_value;
++};
+
+- reg_val = clk_get_rate(imxpriv->ahb_clk) / 1000;
+- writel(reg_val, mmio + HOST_TIMER1MS);
++struct reg_property {
++ const char *name;
++ const struct reg_value *values;
++ size_t num_values;
++ u32 def_value;
++ u32 set_value;
++};
+
+- return 0;
+-}
++static const struct reg_value gpr13_tx_level[] = {
++ { 937, IMX6Q_GPR13_SATA_TX_LVL_0_937_V },
++ { 947, IMX6Q_GPR13_SATA_TX_LVL_0_947_V },
++ { 957, IMX6Q_GPR13_SATA_TX_LVL_0_957_V },
++ { 966, IMX6Q_GPR13_SATA_TX_LVL_0_966_V },
++ { 976, IMX6Q_GPR13_SATA_TX_LVL_0_976_V },
++ { 986, IMX6Q_GPR13_SATA_TX_LVL_0_986_V },
++ { 996, IMX6Q_GPR13_SATA_TX_LVL_0_996_V },
++ { 1005, IMX6Q_GPR13_SATA_TX_LVL_1_005_V },
++ { 1015, IMX6Q_GPR13_SATA_TX_LVL_1_015_V },
++ { 1025, IMX6Q_GPR13_SATA_TX_LVL_1_025_V },
++ { 1035, IMX6Q_GPR13_SATA_TX_LVL_1_035_V },
++ { 1045, IMX6Q_GPR13_SATA_TX_LVL_1_045_V },
++ { 1054, IMX6Q_GPR13_SATA_TX_LVL_1_054_V },
++ { 1064, IMX6Q_GPR13_SATA_TX_LVL_1_064_V },
++ { 1074, IMX6Q_GPR13_SATA_TX_LVL_1_074_V },
++ { 1084, IMX6Q_GPR13_SATA_TX_LVL_1_084_V },
++ { 1094, IMX6Q_GPR13_SATA_TX_LVL_1_094_V },
++ { 1104, IMX6Q_GPR13_SATA_TX_LVL_1_104_V },
++ { 1113, IMX6Q_GPR13_SATA_TX_LVL_1_113_V },
++ { 1123, IMX6Q_GPR13_SATA_TX_LVL_1_123_V },
++ { 1133, IMX6Q_GPR13_SATA_TX_LVL_1_133_V },
++ { 1143, IMX6Q_GPR13_SATA_TX_LVL_1_143_V },
++ { 1152, IMX6Q_GPR13_SATA_TX_LVL_1_152_V },
++ { 1162, IMX6Q_GPR13_SATA_TX_LVL_1_162_V },
++ { 1172, IMX6Q_GPR13_SATA_TX_LVL_1_172_V },
++ { 1182, IMX6Q_GPR13_SATA_TX_LVL_1_182_V },
++ { 1191, IMX6Q_GPR13_SATA_TX_LVL_1_191_V },
++ { 1201, IMX6Q_GPR13_SATA_TX_LVL_1_201_V },
++ { 1211, IMX6Q_GPR13_SATA_TX_LVL_1_211_V },
++ { 1221, IMX6Q_GPR13_SATA_TX_LVL_1_221_V },
++ { 1230, IMX6Q_GPR13_SATA_TX_LVL_1_230_V },
++ { 1240, IMX6Q_GPR13_SATA_TX_LVL_1_240_V }
++};
+
+-static void imx_sata_exit(struct device *dev)
+-{
+- imx_sata_clock_disable(dev);
+-}
++static const struct reg_value gpr13_tx_boost[] = {
++ { 0, IMX6Q_GPR13_SATA_TX_BOOST_0_00_DB },
++ { 370, IMX6Q_GPR13_SATA_TX_BOOST_0_37_DB },
++ { 740, IMX6Q_GPR13_SATA_TX_BOOST_0_74_DB },
++ { 1110, IMX6Q_GPR13_SATA_TX_BOOST_1_11_DB },
++ { 1480, IMX6Q_GPR13_SATA_TX_BOOST_1_48_DB },
++ { 1850, IMX6Q_GPR13_SATA_TX_BOOST_1_85_DB },
++ { 2220, IMX6Q_GPR13_SATA_TX_BOOST_2_22_DB },
++ { 2590, IMX6Q_GPR13_SATA_TX_BOOST_2_59_DB },
++ { 2960, IMX6Q_GPR13_SATA_TX_BOOST_2_96_DB },
++ { 3330, IMX6Q_GPR13_SATA_TX_BOOST_3_33_DB },
++ { 3700, IMX6Q_GPR13_SATA_TX_BOOST_3_70_DB },
++ { 4070, IMX6Q_GPR13_SATA_TX_BOOST_4_07_DB },
++ { 4440, IMX6Q_GPR13_SATA_TX_BOOST_4_44_DB },
++ { 4810, IMX6Q_GPR13_SATA_TX_BOOST_4_81_DB },
++ { 5280, IMX6Q_GPR13_SATA_TX_BOOST_5_28_DB },
++ { 5750, IMX6Q_GPR13_SATA_TX_BOOST_5_75_DB }
++};
+
+-static int imx_ahci_suspend(struct device *dev)
+-{
+- struct imx_ahci_priv *imxpriv = dev_get_drvdata(dev->parent);
++static const struct reg_value gpr13_tx_atten[] = {
++ { 8, IMX6Q_GPR13_SATA_TX_ATTEN_8_16 },
++ { 9, IMX6Q_GPR13_SATA_TX_ATTEN_9_16 },
++ { 10, IMX6Q_GPR13_SATA_TX_ATTEN_10_16 },
++ { 12, IMX6Q_GPR13_SATA_TX_ATTEN_12_16 },
++ { 14, IMX6Q_GPR13_SATA_TX_ATTEN_14_16 },
++ { 16, IMX6Q_GPR13_SATA_TX_ATTEN_16_16 },
++};
+
+- /*
+- * If no_device is set, The CLKs had been gated off in the
+- * initialization so don't do it again here.
+- */
+- if (!imxpriv->no_device)
+- imx_sata_clock_disable(dev);
++static const struct reg_value gpr13_rx_eq[] = {
++ { 500, IMX6Q_GPR13_SATA_RX_EQ_VAL_0_5_DB },
++ { 1000, IMX6Q_GPR13_SATA_RX_EQ_VAL_1_0_DB },
++ { 1500, IMX6Q_GPR13_SATA_RX_EQ_VAL_1_5_DB },
++ { 2000, IMX6Q_GPR13_SATA_RX_EQ_VAL_2_0_DB },
++ { 2500, IMX6Q_GPR13_SATA_RX_EQ_VAL_2_5_DB },
++ { 3000, IMX6Q_GPR13_SATA_RX_EQ_VAL_3_0_DB },
++ { 3500, IMX6Q_GPR13_SATA_RX_EQ_VAL_3_5_DB },
++ { 4000, IMX6Q_GPR13_SATA_RX_EQ_VAL_4_0_DB },
++};
+
+- return 0;
+-}
++static const struct reg_property gpr13_props[] = {
++ {
++ .name = "fsl,transmit-level-mV",
++ .values = gpr13_tx_level,
++ .num_values = ARRAY_SIZE(gpr13_tx_level),
++ .def_value = IMX6Q_GPR13_SATA_TX_LVL_1_025_V,
++ }, {
++ .name = "fsl,transmit-boost-mdB",
++ .values = gpr13_tx_boost,
++ .num_values = ARRAY_SIZE(gpr13_tx_boost),
++ .def_value = IMX6Q_GPR13_SATA_TX_BOOST_3_33_DB,
++ }, {
++ .name = "fsl,transmit-atten-16ths",
++ .values = gpr13_tx_atten,
++ .num_values = ARRAY_SIZE(gpr13_tx_atten),
++ .def_value = IMX6Q_GPR13_SATA_TX_ATTEN_9_16,
++ }, {
++ .name = "fsl,receive-eq-mdB",
++ .values = gpr13_rx_eq,
++ .num_values = ARRAY_SIZE(gpr13_rx_eq),
++ .def_value = IMX6Q_GPR13_SATA_RX_EQ_VAL_3_0_DB,
++ }, {
++ .name = "fsl,no-spread-spectrum",
++ .def_value = IMX6Q_GPR13_SATA_MPLL_SS_EN,
++ .set_value = 0,
++ },
++};
+
+-static int imx_ahci_resume(struct device *dev)
++static u32 imx_ahci_parse_props(struct device *dev,
++ const struct reg_property *prop, size_t num)
+ {
+- struct imx_ahci_priv *imxpriv = dev_get_drvdata(dev->parent);
+- int ret = 0;
+-
+- if (!imxpriv->no_device)
+- ret = imx_sata_clock_enable(dev);
++ struct device_node *np = dev->of_node;
++ u32 reg_value = 0;
++ int i, j;
++
++ for (i = 0; i < num; i++, prop++) {
++ u32 of_val;
++
++ if (prop->num_values == 0) {
++ if (of_property_read_bool(np, prop->name))
++ reg_value |= prop->set_value;
++ else
++ reg_value |= prop->def_value;
++ continue;
++ }
+
+- return ret;
+-}
++ if (of_property_read_u32(np, prop->name, &of_val)) {
++ dev_info(dev, "%s not specified, using %08x\n",
++ prop->name, prop->def_value);
++ reg_value |= prop->def_value;
++ continue;
++ }
+
+-static struct ahci_platform_data imx_sata_pdata = {
+- .init = imx_sata_init,
+- .exit = imx_sata_exit,
+- .ata_port_info = &ahci_imx_port_info,
+- .suspend = imx_ahci_suspend,
+- .resume = imx_ahci_resume,
++ for (j = 0; j < prop->num_values; j++) {
++ if (prop->values[j].of_value == of_val) {
++ dev_info(dev, "%s value %u, using %08x\n",
++ prop->name, of_val, prop->values[j].reg_value);
++ reg_value |= prop->values[j].reg_value;
++ break;
++ }
++ }
+
+-};
++ if (j == prop->num_values) {
++ dev_err(dev, "DT property %s is not a valid value\n",
++ prop->name);
++ reg_value |= prop->def_value;
++ }
++ }
+
+-static const struct of_device_id imx_ahci_of_match[] = {
+- { .compatible = "fsl,imx53-ahci", .data = (void *)AHCI_IMX53 },
+- { .compatible = "fsl,imx6q-ahci", .data = (void *)AHCI_IMX6Q },
+- {},
+-};
+-MODULE_DEVICE_TABLE(of, imx_ahci_of_match);
++ return reg_value;
++}
+
+ static int imx_ahci_probe(struct platform_device *pdev)
+ {
+ struct device *dev = &pdev->dev;
+- struct resource *mem, *irq, res[2];
+ const struct of_device_id *of_id;
+- enum ahci_imx_type type;
+- const struct ahci_platform_data *pdata = NULL;
++ struct ahci_host_priv *hpriv;
+ struct imx_ahci_priv *imxpriv;
+- struct device *ahci_dev;
+- struct platform_device *ahci_pdev;
++ unsigned int reg_val;
+ int ret;
+
+ of_id = of_match_device(imx_ahci_of_match, dev);
+ if (!of_id)
+ return -EINVAL;
+
+- type = (enum ahci_imx_type)of_id->data;
+- pdata = &imx_sata_pdata;
+-
+ imxpriv = devm_kzalloc(dev, sizeof(*imxpriv), GFP_KERNEL);
+- if (!imxpriv) {
+- dev_err(dev, "can't alloc ahci_host_priv\n");
++ if (!imxpriv)
+ return -ENOMEM;
+- }
+-
+- ahci_pdev = platform_device_alloc("ahci", -1);
+- if (!ahci_pdev)
+- return -ENODEV;
+-
+- ahci_dev = &ahci_pdev->dev;
+- ahci_dev->parent = dev;
+
++ imxpriv->ahci_pdev = pdev;
+ imxpriv->no_device = false;
+ imxpriv->first_time = true;
+- imxpriv->type = type;
+-
++ imxpriv->type = (enum ahci_imx_type)of_id->data;
+ imxpriv->ahb_clk = devm_clk_get(dev, "ahb");
+ if (IS_ERR(imxpriv->ahb_clk)) {
+ dev_err(dev, "can't get ahb clock.\n");
+- ret = PTR_ERR(imxpriv->ahb_clk);
+- goto err_out;
+- }
+-
+- if (type == AHCI_IMX53) {
+- imxpriv->sata_gate_clk = devm_clk_get(dev, "sata_gate");
+- if (IS_ERR(imxpriv->sata_gate_clk)) {
+- dev_err(dev, "can't get sata_gate clock.\n");
+- ret = PTR_ERR(imxpriv->sata_gate_clk);
+- goto err_out;
+- }
+- }
+-
+- imxpriv->sata_ref_clk = devm_clk_get(dev, "sata_ref");
+- if (IS_ERR(imxpriv->sata_ref_clk)) {
+- dev_err(dev, "can't get sata_ref clock.\n");
+- ret = PTR_ERR(imxpriv->sata_ref_clk);
+- goto err_out;
++ return PTR_ERR(imxpriv->ahb_clk);
+ }
+
+- imxpriv->ahci_pdev = ahci_pdev;
+- platform_set_drvdata(pdev, imxpriv);
+-
+- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+- irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+- if (!mem || !irq) {
+- dev_err(dev, "no mmio/irq resource\n");
+- ret = -ENOMEM;
+- goto err_out;
+- }
+-
+- res[0] = *mem;
+- res[1] = *irq;
+-
+- ahci_dev->coherent_dma_mask = DMA_BIT_MASK(32);
+- ahci_dev->dma_mask = &ahci_dev->coherent_dma_mask;
+- ahci_dev->of_node = dev->of_node;
++ if (imxpriv->type == AHCI_IMX6Q) {
++ u32 reg_value;
+
+- if (type == AHCI_IMX6Q) {
+ imxpriv->gpr = syscon_regmap_lookup_by_compatible(
+ "fsl,imx6q-iomuxc-gpr");
+ if (IS_ERR(imxpriv->gpr)) {
+ dev_err(dev,
+ "failed to find fsl,imx6q-iomux-gpr regmap\n");
+- ret = PTR_ERR(imxpriv->gpr);
+- goto err_out;
++ return PTR_ERR(imxpriv->gpr);
+ }
+
+- /*
+- * Set PHY Paremeters, two steps to configure the GPR13,
+- * one write for rest of parameters, mask of first write
+- * is 0x07fffffe, and the other one write for setting
+- * the mpll_clk_en happens in imx_sata_clock_enable().
+- */
+- regmap_update_bits(imxpriv->gpr, IOMUXC_GPR13,
+- IMX6Q_GPR13_SATA_RX_EQ_VAL_MASK |
+- IMX6Q_GPR13_SATA_RX_LOS_LVL_MASK |
+- IMX6Q_GPR13_SATA_RX_DPLL_MODE_MASK |
+- IMX6Q_GPR13_SATA_SPD_MODE_MASK |
+- IMX6Q_GPR13_SATA_MPLL_SS_EN |
+- IMX6Q_GPR13_SATA_TX_ATTEN_MASK |
+- IMX6Q_GPR13_SATA_TX_BOOST_MASK |
+- IMX6Q_GPR13_SATA_TX_LVL_MASK |
+- IMX6Q_GPR13_SATA_MPLL_CLK_EN |
+- IMX6Q_GPR13_SATA_TX_EDGE_RATE,
+- IMX6Q_GPR13_SATA_RX_EQ_VAL_3_0_DB |
++ reg_value = imx_ahci_parse_props(dev, gpr13_props,
++ ARRAY_SIZE(gpr13_props));
++
++ imxpriv->phy_params =
+ IMX6Q_GPR13_SATA_RX_LOS_LVL_SATA2M |
+ IMX6Q_GPR13_SATA_RX_DPLL_MODE_2P_4F |
+ IMX6Q_GPR13_SATA_SPD_MODE_3P0G |
+- IMX6Q_GPR13_SATA_MPLL_SS_EN |
+- IMX6Q_GPR13_SATA_TX_ATTEN_9_16 |
+- IMX6Q_GPR13_SATA_TX_BOOST_3_33_DB |
+- IMX6Q_GPR13_SATA_TX_LVL_1_025_V);
++ reg_value;
+ }
+
+- ret = platform_device_add_resources(ahci_pdev, res, 2);
++ hpriv = ahci_platform_get_resources(pdev);
++ if (IS_ERR(hpriv))
++ return PTR_ERR(hpriv);
++
++ hpriv->plat_data = imxpriv;
++
++ ret = imx_sata_enable(hpriv);
+ if (ret)
+- goto err_out;
++ return ret;
+
+- ret = platform_device_add_data(ahci_pdev, pdata, sizeof(*pdata));
++ /*
++ * Configure the HWINIT bits of the HOST_CAP and HOST_PORTS_IMPL,
++ * and IP vendor specific register IMX_TIMER1MS.
++ * Configure CAP_SSS (support stagered spin up).
++ * Implement the port0.
++ * Get the ahb clock rate, and configure the TIMER1MS register.
++ */
++ reg_val = readl(hpriv->mmio + HOST_CAP);
++ if (!(reg_val & HOST_CAP_SSS)) {
++ reg_val |= HOST_CAP_SSS;
++ writel(reg_val, hpriv->mmio + HOST_CAP);
++ }
++ reg_val = readl(hpriv->mmio + HOST_PORTS_IMPL);
++ if (!(reg_val & 0x1)) {
++ reg_val |= 0x1;
++ writel(reg_val, hpriv->mmio + HOST_PORTS_IMPL);
++ }
++
++ reg_val = clk_get_rate(imxpriv->ahb_clk) / 1000;
++ writel(reg_val, hpriv->mmio + IMX_TIMER1MS);
++
++ ret = ahci_platform_init_host(pdev, hpriv, &ahci_imx_port_info,
++ 0, 0, 0);
+ if (ret)
+- goto err_out;
++ imx_sata_disable(hpriv);
++
++ return ret;
++}
+
+- ret = platform_device_add(ahci_pdev);
+- if (ret) {
+-err_out:
+- platform_device_put(ahci_pdev);
++static void ahci_imx_host_stop(struct ata_host *host)
++{
++ struct ahci_host_priv *hpriv = host->private_data;
++
++ imx_sata_disable(hpriv);
++}
++
++#ifdef CONFIG_PM_SLEEP
++static int imx_ahci_suspend(struct device *dev)
++{
++ struct ata_host *host = dev_get_drvdata(dev);
++ struct ahci_host_priv *hpriv = host->private_data;
++ int ret;
++
++ ret = ahci_platform_suspend_host(dev);
++ if (ret)
+ return ret;
+- }
++
++ imx_sata_disable(hpriv);
+
+ return 0;
+ }
+
+-static int imx_ahci_remove(struct platform_device *pdev)
++static int imx_ahci_resume(struct device *dev)
+ {
+- struct imx_ahci_priv *imxpriv = platform_get_drvdata(pdev);
+- struct platform_device *ahci_pdev = imxpriv->ahci_pdev;
++ struct ata_host *host = dev_get_drvdata(dev);
++ struct ahci_host_priv *hpriv = host->private_data;
++ int ret;
+
+- platform_device_unregister(ahci_pdev);
+- return 0;
++ ret = imx_sata_enable(hpriv);
++ if (ret)
++ return ret;
++
++ return ahci_platform_resume_host(dev);
+ }
++#endif
++
++static SIMPLE_DEV_PM_OPS(ahci_imx_pm_ops, imx_ahci_suspend, imx_ahci_resume);
+
+ static struct platform_driver imx_ahci_driver = {
+ .probe = imx_ahci_probe,
+- .remove = imx_ahci_remove,
++ .remove = ata_platform_remove_one,
+ .driver = {
+ .name = "ahci-imx",
+ .owner = THIS_MODULE,
+ .of_match_table = imx_ahci_of_match,
++ .pm = &ahci_imx_pm_ops,
+ },
+ };
+ module_platform_driver(imx_ahci_driver);
+diff -Nur linux-3.14.36/drivers/ata/ahci_platform.c linux-openelec/drivers/ata/ahci_platform.c
+--- linux-3.14.36/drivers/ata/ahci_platform.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/ata/ahci_platform.c 2015-05-06 12:05:42.000000000 -0500
+@@ -12,135 +12,36 @@
+ * any later version.
+ */
+
+-#include <linux/clk.h>
+ #include <linux/kernel.h>
+-#include <linux/gfp.h>
+ #include <linux/module.h>
+ #include <linux/pm.h>
+-#include <linux/init.h>
+-#include <linux/interrupt.h>
+ #include <linux/device.h>
+ #include <linux/platform_device.h>
+ #include <linux/libata.h>
+ #include <linux/ahci_platform.h>
+ #include "ahci.h"
+
+-static void ahci_host_stop(struct ata_host *host);
+-
+-enum ahci_type {
+- AHCI, /* standard platform ahci */
+- IMX53_AHCI, /* ahci on i.mx53 */
+- STRICT_AHCI, /* delayed DMA engine start */
+-};
+-
+-static struct platform_device_id ahci_devtype[] = {
+- {
+- .name = "ahci",
+- .driver_data = AHCI,
+- }, {
+- .name = "imx53-ahci",
+- .driver_data = IMX53_AHCI,
+- }, {
+- .name = "strict-ahci",
+- .driver_data = STRICT_AHCI,
+- }, {
+- /* sentinel */
+- }
+-};
+-MODULE_DEVICE_TABLE(platform, ahci_devtype);
+-
+-struct ata_port_operations ahci_platform_ops = {
+- .inherits = &ahci_ops,
+- .host_stop = ahci_host_stop,
+-};
+-EXPORT_SYMBOL_GPL(ahci_platform_ops);
+-
+-static struct ata_port_operations ahci_platform_retry_srst_ops = {
+- .inherits = &ahci_pmp_retry_srst_ops,
+- .host_stop = ahci_host_stop,
+-};
+-
+-static const struct ata_port_info ahci_port_info[] = {
+- /* by features */
+- [AHCI] = {
+- .flags = AHCI_FLAG_COMMON,
+- .pio_mask = ATA_PIO4,
+- .udma_mask = ATA_UDMA6,
+- .port_ops = &ahci_platform_ops,
+- },
+- [IMX53_AHCI] = {
+- .flags = AHCI_FLAG_COMMON,
+- .pio_mask = ATA_PIO4,
+- .udma_mask = ATA_UDMA6,
+- .port_ops = &ahci_platform_retry_srst_ops,
+- },
+- [STRICT_AHCI] = {
+- AHCI_HFLAGS (AHCI_HFLAG_DELAY_ENGINE),
+- .flags = AHCI_FLAG_COMMON,
+- .pio_mask = ATA_PIO4,
+- .udma_mask = ATA_UDMA6,
+- .port_ops = &ahci_platform_ops,
+- },
+-};
+-
+-static struct scsi_host_template ahci_platform_sht = {
+- AHCI_SHT("ahci_platform"),
++static const struct ata_port_info ahci_port_info = {
++ .flags = AHCI_FLAG_COMMON,
++ .pio_mask = ATA_PIO4,
++ .udma_mask = ATA_UDMA6,
++ .port_ops = &ahci_platform_ops,
+ };
+
+ static int ahci_probe(struct platform_device *pdev)
+ {
+ struct device *dev = &pdev->dev;
+ struct ahci_platform_data *pdata = dev_get_platdata(dev);
+- const struct platform_device_id *id = platform_get_device_id(pdev);
+- struct ata_port_info pi = ahci_port_info[id ? id->driver_data : 0];
+- const struct ata_port_info *ppi[] = { &pi, NULL };
+ struct ahci_host_priv *hpriv;
+- struct ata_host *host;
+- struct resource *mem;
+- int irq;
+- int n_ports;
+- int i;
+ int rc;
+
+- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+- if (!mem) {
+- dev_err(dev, "no mmio space\n");
+- return -EINVAL;
+- }
+-
+- irq = platform_get_irq(pdev, 0);
+- if (irq <= 0) {
+- dev_err(dev, "no irq\n");
+- return -EINVAL;
+- }
+-
+- if (pdata && pdata->ata_port_info)
+- pi = *pdata->ata_port_info;
+-
+- hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
+- if (!hpriv) {
+- dev_err(dev, "can't alloc ahci_host_priv\n");
+- return -ENOMEM;
+- }
+-
+- hpriv->flags |= (unsigned long)pi.private_data;
++ hpriv = ahci_platform_get_resources(pdev);
++ if (IS_ERR(hpriv))
++ return PTR_ERR(hpriv);
+
+- hpriv->mmio = devm_ioremap(dev, mem->start, resource_size(mem));
+- if (!hpriv->mmio) {
+- dev_err(dev, "can't map %pR\n", mem);
+- return -ENOMEM;
+- }
+-
+- hpriv->clk = clk_get(dev, NULL);
+- if (IS_ERR(hpriv->clk)) {
+- dev_err(dev, "can't get clock\n");
+- } else {
+- rc = clk_prepare_enable(hpriv->clk);
+- if (rc) {
+- dev_err(dev, "clock prepare enable failed");
+- goto free_clk;
+- }
+- }
++ rc = ahci_platform_enable_resources(hpriv);
++ if (rc)
++ return rc;
+
+ /*
+ * Some platforms might need to prepare for mmio region access,
+@@ -151,69 +52,10 @@
+ if (pdata && pdata->init) {
+ rc = pdata->init(dev, hpriv->mmio);
+ if (rc)
+- goto disable_unprepare_clk;
+- }
+-
+- ahci_save_initial_config(dev, hpriv,
+- pdata ? pdata->force_port_map : 0,
+- pdata ? pdata->mask_port_map : 0);
+-
+- /* prepare host */
+- if (hpriv->cap & HOST_CAP_NCQ)
+- pi.flags |= ATA_FLAG_NCQ;
+-
+- if (hpriv->cap & HOST_CAP_PMP)
+- pi.flags |= ATA_FLAG_PMP;
+-
+- ahci_set_em_messages(hpriv, &pi);
+-
+- /* CAP.NP sometimes indicate the index of the last enabled
+- * port, at other times, that of the last possible port, so
+- * determining the maximum port number requires looking at
+- * both CAP.NP and port_map.
+- */
+- n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map));
+-
+- host = ata_host_alloc_pinfo(dev, ppi, n_ports);
+- if (!host) {
+- rc = -ENOMEM;
+- goto pdata_exit;
++ goto disable_resources;
+ }
+
+- host->private_data = hpriv;
+-
+- if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss)
+- host->flags |= ATA_HOST_PARALLEL_SCAN;
+- else
+- dev_info(dev, "SSS flag set, parallel bus scan disabled\n");
+-
+- if (pi.flags & ATA_FLAG_EM)
+- ahci_reset_em(host);
+-
+- for (i = 0; i < host->n_ports; i++) {
+- struct ata_port *ap = host->ports[i];
+-
+- ata_port_desc(ap, "mmio %pR", mem);
+- ata_port_desc(ap, "port 0x%x", 0x100 + ap->port_no * 0x80);
+-
+- /* set enclosure management message type */
+- if (ap->flags & ATA_FLAG_EM)
+- ap->em_message_type = hpriv->em_msg_type;
+-
+- /* disabled/not-implemented port */
+- if (!(hpriv->port_map & (1 << i)))
+- ap->ops = &ata_dummy_port_ops;
+- }
+-
+- rc = ahci_reset_controller(host);
+- if (rc)
+- goto pdata_exit;
+-
+- ahci_init_controller(host);
+- ahci_print_info(host, "platform");
+-
+- rc = ata_host_activate(host, irq, ahci_interrupt, IRQF_SHARED,
+- &ahci_platform_sht);
++ rc = ahci_platform_init_host(pdev, hpriv, &ahci_port_info, 0, 0, 0);
+ if (rc)
+ goto pdata_exit;
+
+@@ -221,115 +63,19 @@
+ pdata_exit:
+ if (pdata && pdata->exit)
+ pdata->exit(dev);
+-disable_unprepare_clk:
+- if (!IS_ERR(hpriv->clk))
+- clk_disable_unprepare(hpriv->clk);
+-free_clk:
+- if (!IS_ERR(hpriv->clk))
+- clk_put(hpriv->clk);
+- return rc;
+-}
+-
+-static void ahci_host_stop(struct ata_host *host)
+-{
+- struct device *dev = host->dev;
+- struct ahci_platform_data *pdata = dev_get_platdata(dev);
+- struct ahci_host_priv *hpriv = host->private_data;
+-
+- if (pdata && pdata->exit)
+- pdata->exit(dev);
+-
+- if (!IS_ERR(hpriv->clk)) {
+- clk_disable_unprepare(hpriv->clk);
+- clk_put(hpriv->clk);
+- }
+-}
+-
+-#ifdef CONFIG_PM_SLEEP
+-static int ahci_suspend(struct device *dev)
+-{
+- struct ahci_platform_data *pdata = dev_get_platdata(dev);
+- struct ata_host *host = dev_get_drvdata(dev);
+- struct ahci_host_priv *hpriv = host->private_data;
+- void __iomem *mmio = hpriv->mmio;
+- u32 ctl;
+- int rc;
+-
+- if (hpriv->flags & AHCI_HFLAG_NO_SUSPEND) {
+- dev_err(dev, "firmware update required for suspend/resume\n");
+- return -EIO;
+- }
+-
+- /*
+- * AHCI spec rev1.1 section 8.3.3:
+- * Software must disable interrupts prior to requesting a
+- * transition of the HBA to D3 state.
+- */
+- ctl = readl(mmio + HOST_CTL);
+- ctl &= ~HOST_IRQ_EN;
+- writel(ctl, mmio + HOST_CTL);
+- readl(mmio + HOST_CTL); /* flush */
+-
+- rc = ata_host_suspend(host, PMSG_SUSPEND);
+- if (rc)
+- return rc;
+-
+- if (pdata && pdata->suspend)
+- return pdata->suspend(dev);
+-
+- if (!IS_ERR(hpriv->clk))
+- clk_disable_unprepare(hpriv->clk);
+-
+- return 0;
+-}
+-
+-static int ahci_resume(struct device *dev)
+-{
+- struct ahci_platform_data *pdata = dev_get_platdata(dev);
+- struct ata_host *host = dev_get_drvdata(dev);
+- struct ahci_host_priv *hpriv = host->private_data;
+- int rc;
+-
+- if (!IS_ERR(hpriv->clk)) {
+- rc = clk_prepare_enable(hpriv->clk);
+- if (rc) {
+- dev_err(dev, "clock prepare enable failed");
+- return rc;
+- }
+- }
+-
+- if (pdata && pdata->resume) {
+- rc = pdata->resume(dev);
+- if (rc)
+- goto disable_unprepare_clk;
+- }
+-
+- if (dev->power.power_state.event == PM_EVENT_SUSPEND) {
+- rc = ahci_reset_controller(host);
+- if (rc)
+- goto disable_unprepare_clk;
+-
+- ahci_init_controller(host);
+- }
+-
+- ata_host_resume(host);
+-
+- return 0;
+-
+-disable_unprepare_clk:
+- if (!IS_ERR(hpriv->clk))
+- clk_disable_unprepare(hpriv->clk);
+-
++disable_resources:
++ ahci_platform_disable_resources(hpriv);
+ return rc;
+ }
+-#endif
+
+-static SIMPLE_DEV_PM_OPS(ahci_pm_ops, ahci_suspend, ahci_resume);
++static SIMPLE_DEV_PM_OPS(ahci_pm_ops, ahci_platform_suspend,
++ ahci_platform_resume);
+
+ static const struct of_device_id ahci_of_match[] = {
+ { .compatible = "snps,spear-ahci", },
+ { .compatible = "snps,exynos5440-ahci", },
+ { .compatible = "ibm,476gtr-ahci", },
++ { .compatible = "snps,dwc-ahci", },
+ {},
+ };
+ MODULE_DEVICE_TABLE(of, ahci_of_match);
+@@ -343,7 +89,6 @@
+ .of_match_table = ahci_of_match,
+ .pm = &ahci_pm_ops,
+ },
+- .id_table = ahci_devtype,
+ };
+ module_platform_driver(ahci_driver);
+
+diff -Nur linux-3.14.36/drivers/ata/ata_generic.c linux-openelec/drivers/ata/ata_generic.c
+--- linux-3.14.36/drivers/ata/ata_generic.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/ata/ata_generic.c 2015-05-06 12:05:42.000000000 -0500
+@@ -19,7 +19,6 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <scsi/scsi_host.h>
+diff -Nur linux-3.14.36/drivers/ata/Kconfig linux-openelec/drivers/ata/Kconfig
+--- linux-3.14.36/drivers/ata/Kconfig 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/ata/Kconfig 2015-05-06 12:05:42.000000000 -0500
+@@ -99,7 +99,7 @@
+
+ config AHCI_IMX
+ tristate "Freescale i.MX AHCI SATA support"
+- depends on SATA_AHCI_PLATFORM && MFD_SYSCON
++ depends on MFD_SYSCON
+ help
+ This option enables support for the Freescale i.MX SoC's
+ onboard AHCI SATA.
+diff -Nur linux-3.14.36/drivers/ata/libahci.c linux-openelec/drivers/ata/libahci.c
+--- linux-3.14.36/drivers/ata/libahci.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/ata/libahci.c 2015-05-06 12:05:42.000000000 -0500
+@@ -35,7 +35,6 @@
+ #include <linux/kernel.h>
+ #include <linux/gfp.h>
+ #include <linux/module.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <linux/interrupt.h>
+@@ -394,6 +393,9 @@
+ *
+ * If inconsistent, config values are fixed up by this function.
+ *
++ * If it is not set already this function sets hpriv->start_engine to
++ * ahci_start_engine.
++ *
+ * LOCKING:
+ * None.
+ */
+@@ -450,11 +452,23 @@
+ cap &= ~HOST_CAP_SNTF;
+ }
+
++ if ((cap2 & HOST_CAP2_SDS) && (hpriv->flags & AHCI_HFLAG_NO_DEVSLP)) {
++ dev_info(dev,
++ "controller can't do DEVSLP, turning off\n");
++ cap2 &= ~HOST_CAP2_SDS;
++ cap2 &= ~HOST_CAP2_SADM;
++ }
++
+ if (!(cap & HOST_CAP_FBS) && (hpriv->flags & AHCI_HFLAG_YES_FBS)) {
+ dev_info(dev, "controller can do FBS, turning on CAP_FBS\n");
+ cap |= HOST_CAP_FBS;
+ }
+
++ if ((cap & HOST_CAP_FBS) && (hpriv->flags & AHCI_HFLAG_NO_FBS)) {
++ dev_info(dev, "controller can't do FBS, turning off CAP_FBS\n");
++ cap &= ~HOST_CAP_FBS;
++ }
++
+ if (force_port_map && port_map != force_port_map) {
+ dev_info(dev, "forcing port_map 0x%x -> 0x%x\n",
+ port_map, force_port_map);
+@@ -500,6 +514,9 @@
+ hpriv->cap = cap;
+ hpriv->cap2 = cap2;
+ hpriv->port_map = port_map;
++
++ if (!hpriv->start_engine)
++ hpriv->start_engine = ahci_start_engine;
+ }
+ EXPORT_SYMBOL_GPL(ahci_save_initial_config);
+
+@@ -766,7 +783,7 @@
+
+ /* enable DMA */
+ if (!(hpriv->flags & AHCI_HFLAG_DELAY_ENGINE))
+- ahci_start_engine(ap);
++ hpriv->start_engine(ap);
+
+ /* turn on LEDs */
+ if (ap->flags & ATA_FLAG_EM) {
+@@ -1234,7 +1251,7 @@
+
+ /* restart engine */
+ out_restart:
+- ahci_start_engine(ap);
++ hpriv->start_engine(ap);
+ return rc;
+ }
+ EXPORT_SYMBOL_GPL(ahci_kick_engine);
+@@ -1426,6 +1443,7 @@
+ const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
+ struct ata_port *ap = link->ap;
+ struct ahci_port_priv *pp = ap->private_data;
++ struct ahci_host_priv *hpriv = ap->host->private_data;
+ u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
+ struct ata_taskfile tf;
+ bool online;
+@@ -1443,7 +1461,7 @@
+ rc = sata_link_hardreset(link, timing, deadline, &online,
+ ahci_check_ready);
+
+- ahci_start_engine(ap);
++ hpriv->start_engine(ap);
+
+ if (online)
+ *class = ahci_dev_classify(ap);
+@@ -2007,10 +2025,12 @@
+
+ void ahci_error_handler(struct ata_port *ap)
+ {
++ struct ahci_host_priv *hpriv = ap->host->private_data;
++
+ if (!(ap->pflags & ATA_PFLAG_FROZEN)) {
+ /* restart engine */
+ ahci_stop_engine(ap);
+- ahci_start_engine(ap);
++ hpriv->start_engine(ap);
+ }
+
+ sata_pmp_error_handler(ap);
+@@ -2031,6 +2051,7 @@
+
+ static void ahci_set_aggressive_devslp(struct ata_port *ap, bool sleep)
+ {
++ struct ahci_host_priv *hpriv = ap->host->private_data;
+ void __iomem *port_mmio = ahci_port_base(ap);
+ struct ata_device *dev = ap->link.device;
+ u32 devslp, dm, dito, mdat, deto;
+@@ -2094,7 +2115,7 @@
+ PORT_DEVSLP_ADSE);
+ writel(devslp, port_mmio + PORT_DEVSLP);
+
+- ahci_start_engine(ap);
++ hpriv->start_engine(ap);
+
+ /* enable device sleep feature for the drive */
+ err_mask = ata_dev_set_feature(dev,
+@@ -2106,6 +2127,7 @@
+
+ static void ahci_enable_fbs(struct ata_port *ap)
+ {
++ struct ahci_host_priv *hpriv = ap->host->private_data;
+ struct ahci_port_priv *pp = ap->private_data;
+ void __iomem *port_mmio = ahci_port_base(ap);
+ u32 fbs;
+@@ -2134,11 +2156,12 @@
+ } else
+ dev_err(ap->host->dev, "Failed to enable FBS\n");
+
+- ahci_start_engine(ap);
++ hpriv->start_engine(ap);
+ }
+
+ static void ahci_disable_fbs(struct ata_port *ap)
+ {
++ struct ahci_host_priv *hpriv = ap->host->private_data;
+ struct ahci_port_priv *pp = ap->private_data;
+ void __iomem *port_mmio = ahci_port_base(ap);
+ u32 fbs;
+@@ -2166,7 +2189,7 @@
+ pp->fbs_enabled = false;
+ }
+
+- ahci_start_engine(ap);
++ hpriv->start_engine(ap);
+ }
+
+ static void ahci_pmp_attach(struct ata_port *ap)
+diff -Nur linux-3.14.36/drivers/ata/libahci_platform.c linux-openelec/drivers/ata/libahci_platform.c
+--- linux-3.14.36/drivers/ata/libahci_platform.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/ata/libahci_platform.c 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,544 @@
++/*
++ * AHCI SATA platform library
++ *
++ * Copyright 2004-2005 Red Hat, Inc.
++ * Jeff Garzik <jgarzik@pobox.com>
++ * Copyright 2010 MontaVista Software, LLC.
++ * Anton Vorontsov <avorontsov@ru.mvista.com>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2, or (at your option)
++ * any later version.
++ */
++
++#include <linux/clk.h>
++#include <linux/kernel.h>
++#include <linux/gfp.h>
++#include <linux/module.h>
++#include <linux/pm.h>
++#include <linux/interrupt.h>
++#include <linux/device.h>
++#include <linux/platform_device.h>
++#include <linux/libata.h>
++#include <linux/ahci_platform.h>
++#include <linux/phy/phy.h>
++#include <linux/pm_runtime.h>
++#include "ahci.h"
++
++static void ahci_host_stop(struct ata_host *host);
++
++struct ata_port_operations ahci_platform_ops = {
++ .inherits = &ahci_ops,
++ .host_stop = ahci_host_stop,
++};
++EXPORT_SYMBOL_GPL(ahci_platform_ops);
++
++static struct scsi_host_template ahci_platform_sht = {
++ AHCI_SHT("ahci_platform"),
++};
++
++/**
++ * ahci_platform_enable_clks - Enable platform clocks
++ * @hpriv: host private area to store config values
++ *
++ * This function enables all the clks found in hpriv->clks, starting at
++ * index 0. If any clk fails to enable it disables all the clks already
++ * enabled in reverse order, and then returns an error.
++ *
++ * RETURNS:
++ * 0 on success otherwise a negative error code
++ */
++int ahci_platform_enable_clks(struct ahci_host_priv *hpriv)
++{
++ int c, rc;
++
++ for (c = 0; c < AHCI_MAX_CLKS && hpriv->clks[c]; c++) {
++ rc = clk_prepare_enable(hpriv->clks[c]);
++ if (rc)
++ goto disable_unprepare_clk;
++ }
++ return 0;
++
++disable_unprepare_clk:
++ while (--c >= 0)
++ clk_disable_unprepare(hpriv->clks[c]);
++ return rc;
++}
++EXPORT_SYMBOL_GPL(ahci_platform_enable_clks);
++
++/**
++ * ahci_platform_disable_clks - Disable platform clocks
++ * @hpriv: host private area to store config values
++ *
++ * This function disables all the clks found in hpriv->clks, in reverse
++ * order of ahci_platform_enable_clks (starting at the end of the array).
++ */
++void ahci_platform_disable_clks(struct ahci_host_priv *hpriv)
++{
++ int c;
++
++ for (c = AHCI_MAX_CLKS - 1; c >= 0; c--)
++ if (hpriv->clks[c])
++ clk_disable_unprepare(hpriv->clks[c]);
++}
++EXPORT_SYMBOL_GPL(ahci_platform_disable_clks);
++
++/**
++ * ahci_platform_enable_resources - Enable platform resources
++ * @hpriv: host private area to store config values
++ *
++ * This function enables all ahci_platform managed resources in the
++ * following order:
++ * 1) Regulator
++ * 2) Clocks (through ahci_platform_enable_clks)
++ * 3) Phy
++ *
++ * If resource enabling fails at any point the previous enabled resources
++ * are disabled in reverse order.
++ *
++ * RETURNS:
++ * 0 on success otherwise a negative error code
++ */
++int ahci_platform_enable_resources(struct ahci_host_priv *hpriv)
++{
++ int rc;
++
++ if (hpriv->target_pwr) {
++ rc = regulator_enable(hpriv->target_pwr);
++ if (rc)
++ return rc;
++ }
++
++ rc = ahci_platform_enable_clks(hpriv);
++ if (rc)
++ goto disable_regulator;
++
++ if (hpriv->phy) {
++ rc = phy_init(hpriv->phy);
++ if (rc)
++ goto disable_clks;
++
++ rc = phy_power_on(hpriv->phy);
++ if (rc) {
++ phy_exit(hpriv->phy);
++ goto disable_clks;
++ }
++ }
++
++ return 0;
++
++disable_clks:
++ ahci_platform_disable_clks(hpriv);
++
++disable_regulator:
++ if (hpriv->target_pwr)
++ regulator_disable(hpriv->target_pwr);
++ return rc;
++}
++EXPORT_SYMBOL_GPL(ahci_platform_enable_resources);
++
++/**
++ * ahci_platform_disable_resources - Disable platform resources
++ * @hpriv: host private area to store config values
++ *
++ * This function disables all ahci_platform managed resources in the
++ * following order:
++ * 1) Phy
++ * 2) Clocks (through ahci_platform_disable_clks)
++ * 3) Regulator
++ */
++void ahci_platform_disable_resources(struct ahci_host_priv *hpriv)
++{
++ if (hpriv->phy) {
++ phy_power_off(hpriv->phy);
++ phy_exit(hpriv->phy);
++ }
++
++ ahci_platform_disable_clks(hpriv);
++
++ if (hpriv->target_pwr)
++ regulator_disable(hpriv->target_pwr);
++}
++EXPORT_SYMBOL_GPL(ahci_platform_disable_resources);
++
++static void ahci_platform_put_resources(struct device *dev, void *res)
++{
++ struct ahci_host_priv *hpriv = res;
++ int c;
++
++ if (hpriv->got_runtime_pm) {
++ pm_runtime_put_sync(dev);
++ pm_runtime_disable(dev);
++ }
++
++ for (c = 0; c < AHCI_MAX_CLKS && hpriv->clks[c]; c++)
++ clk_put(hpriv->clks[c]);
++}
++
++/**
++ * ahci_platform_get_resources - Get platform resources
++ * @pdev: platform device to get resources for
++ *
++ * This function allocates an ahci_host_priv struct, and gets the following
++ * resources, storing a reference to them inside the returned struct:
++ *
++ * 1) mmio registers (IORESOURCE_MEM 0, mandatory)
++ * 2) regulator for controlling the targets power (optional)
++ * 3) 0 - AHCI_MAX_CLKS clocks, as specified in the devs devicetree node,
++ * or for non devicetree enabled platforms a single clock
++ * 4) phy (optional)
++ *
++ * RETURNS:
++ * The allocated ahci_host_priv on success, otherwise an ERR_PTR value
++ */
++struct ahci_host_priv *ahci_platform_get_resources(struct platform_device *pdev)
++{
++ struct device *dev = &pdev->dev;
++ struct ahci_host_priv *hpriv;
++ struct clk *clk;
++ int i, rc = -ENOMEM;
++
++ if (!devres_open_group(dev, NULL, GFP_KERNEL))
++ return ERR_PTR(-ENOMEM);
++
++ hpriv = devres_alloc(ahci_platform_put_resources, sizeof(*hpriv),
++ GFP_KERNEL);
++ if (!hpriv)
++ goto err_out;
++
++ devres_add(dev, hpriv);
++
++ hpriv->mmio = devm_ioremap_resource(dev,
++ platform_get_resource(pdev, IORESOURCE_MEM, 0));
++ if (IS_ERR(hpriv->mmio)) {
++ dev_err(dev, "no mmio space\n");
++ rc = PTR_ERR(hpriv->mmio);
++ goto err_out;
++ }
++
++ hpriv->target_pwr = devm_regulator_get_optional(dev, "target");
++ if (IS_ERR(hpriv->target_pwr)) {
++ rc = PTR_ERR(hpriv->target_pwr);
++ if (rc == -EPROBE_DEFER)
++ goto err_out;
++ hpriv->target_pwr = NULL;
++ }
++
++ for (i = 0; i < AHCI_MAX_CLKS; i++) {
++ /*
++ * For now we must use clk_get(dev, NULL) for the first clock,
++ * because some platforms (da850, spear13xx) are not yet
++ * converted to use devicetree for clocks. For new platforms
++ * this is equivalent to of_clk_get(dev->of_node, 0).
++ */
++ if (i == 0)
++ clk = clk_get(dev, NULL);
++ else
++ clk = of_clk_get(dev->of_node, i);
++
++ if (IS_ERR(clk)) {
++ rc = PTR_ERR(clk);
++ if (rc == -EPROBE_DEFER)
++ goto err_out;
++ break;
++ }
++ hpriv->clks[i] = clk;
++ }
++
++ hpriv->phy = devm_phy_get(dev, "sata-phy");
++ if (IS_ERR(hpriv->phy)) {
++ rc = PTR_ERR(hpriv->phy);
++ switch (rc) {
++ case -ENODEV:
++ case -ENOSYS:
++ /* continue normally */
++ hpriv->phy = NULL;
++ break;
++
++ case -EPROBE_DEFER:
++ goto err_out;
++
++ default:
++ dev_err(dev, "couldn't get sata-phy\n");
++ goto err_out;
++ }
++ }
++
++ pm_runtime_enable(dev);
++ pm_runtime_get_sync(dev);
++ hpriv->got_runtime_pm = true;
++
++ devres_remove_group(dev, NULL);
++ return hpriv;
++
++err_out:
++ devres_release_group(dev, NULL);
++ return ERR_PTR(rc);
++}
++EXPORT_SYMBOL_GPL(ahci_platform_get_resources);
++
++/**
++ * ahci_platform_init_host - Bring up an ahci-platform host
++ * @pdev: platform device pointer for the host
++ * @hpriv: ahci-host private data for the host
++ * @pi_template: template for the ata_port_info to use
++ * @host_flags: ahci host flags used in ahci_host_priv
++ * @force_port_map: param passed to ahci_save_initial_config
++ * @mask_port_map: param passed to ahci_save_initial_config
++ *
++ * This function does all the usual steps needed to bring up an
++ * ahci-platform host, note any necessary resources (ie clks, phy, etc.)
++ * must be initialized / enabled before calling this.
++ *
++ * RETURNS:
++ * 0 on success otherwise a negative error code
++ */
++int ahci_platform_init_host(struct platform_device *pdev,
++ struct ahci_host_priv *hpriv,
++ const struct ata_port_info *pi_template,
++ unsigned long host_flags,
++ unsigned int force_port_map,
++ unsigned int mask_port_map)
++{
++ struct device *dev = &pdev->dev;
++ struct ata_port_info pi = *pi_template;
++ const struct ata_port_info *ppi[] = { &pi, NULL };
++ struct ata_host *host;
++ int i, irq, n_ports, rc;
++
++ irq = platform_get_irq(pdev, 0);
++ if (irq <= 0) {
++ dev_err(dev, "no irq\n");
++ return -EINVAL;
++ }
++
++ /* prepare host */
++ pi.private_data = (void *)host_flags;
++ hpriv->flags |= host_flags;
++
++ ahci_save_initial_config(dev, hpriv, force_port_map, mask_port_map);
++
++ if (hpriv->cap & HOST_CAP_NCQ)
++ pi.flags |= ATA_FLAG_NCQ;
++
++ if (hpriv->cap & HOST_CAP_PMP)
++ pi.flags |= ATA_FLAG_PMP;
++
++ ahci_set_em_messages(hpriv, &pi);
++
++ /* CAP.NP sometimes indicate the index of the last enabled
++ * port, at other times, that of the last possible port, so
++ * determining the maximum port number requires looking at
++ * both CAP.NP and port_map.
++ */
++ n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map));
++
++ host = ata_host_alloc_pinfo(dev, ppi, n_ports);
++ if (!host)
++ return -ENOMEM;
++
++ host->private_data = hpriv;
++
++ if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss)
++ host->flags |= ATA_HOST_PARALLEL_SCAN;
++ else
++ dev_info(dev, "SSS flag set, parallel bus scan disabled\n");
++
++ if (pi.flags & ATA_FLAG_EM)
++ ahci_reset_em(host);
++
++ for (i = 0; i < host->n_ports; i++) {
++ struct ata_port *ap = host->ports[i];
++
++ ata_port_desc(ap, "mmio %pR",
++ platform_get_resource(pdev, IORESOURCE_MEM, 0));
++ ata_port_desc(ap, "port 0x%x", 0x100 + ap->port_no * 0x80);
++
++ /* set enclosure management message type */
++ if (ap->flags & ATA_FLAG_EM)
++ ap->em_message_type = hpriv->em_msg_type;
++
++ /* disabled/not-implemented port */
++ if (!(hpriv->port_map & (1 << i)))
++ ap->ops = &ata_dummy_port_ops;
++ }
++
++ rc = ahci_reset_controller(host);
++ if (rc)
++ return rc;
++
++ ahci_init_controller(host);
++ ahci_print_info(host, "platform");
++
++ return ata_host_activate(host, irq, ahci_interrupt, IRQF_SHARED,
++ &ahci_platform_sht);
++}
++EXPORT_SYMBOL_GPL(ahci_platform_init_host);
++
++static void ahci_host_stop(struct ata_host *host)
++{
++ struct device *dev = host->dev;
++ struct ahci_platform_data *pdata = dev_get_platdata(dev);
++ struct ahci_host_priv *hpriv = host->private_data;
++
++ if (pdata && pdata->exit)
++ pdata->exit(dev);
++
++ ahci_platform_disable_resources(hpriv);
++}
++
++#ifdef CONFIG_PM_SLEEP
++/**
++ * ahci_platform_suspend_host - Suspend an ahci-platform host
++ * @dev: device pointer for the host
++ *
++ * This function does all the usual steps needed to suspend an
++ * ahci-platform host, note any necessary resources (ie clks, phy, etc.)
++ * must be disabled after calling this.
++ *
++ * RETURNS:
++ * 0 on success otherwise a negative error code
++ */
++int ahci_platform_suspend_host(struct device *dev)
++{
++ struct ata_host *host = dev_get_drvdata(dev);
++ struct ahci_host_priv *hpriv = host->private_data;
++ void __iomem *mmio = hpriv->mmio;
++ u32 ctl;
++
++ if (hpriv->flags & AHCI_HFLAG_NO_SUSPEND) {
++ dev_err(dev, "firmware update required for suspend/resume\n");
++ return -EIO;
++ }
++
++ /*
++ * AHCI spec rev1.1 section 8.3.3:
++ * Software must disable interrupts prior to requesting a
++ * transition of the HBA to D3 state.
++ */
++ ctl = readl(mmio + HOST_CTL);
++ ctl &= ~HOST_IRQ_EN;
++ writel(ctl, mmio + HOST_CTL);
++ readl(mmio + HOST_CTL); /* flush */
++
++ return ata_host_suspend(host, PMSG_SUSPEND);
++}
++EXPORT_SYMBOL_GPL(ahci_platform_suspend_host);
++
++/**
++ * ahci_platform_resume_host - Resume an ahci-platform host
++ * @dev: device pointer for the host
++ *
++ * This function does all the usual steps needed to resume an ahci-platform
++ * host, note any necessary resources (ie clks, phy, etc.) must be
++ * initialized / enabled before calling this.
++ *
++ * RETURNS:
++ * 0 on success otherwise a negative error code
++ */
++int ahci_platform_resume_host(struct device *dev)
++{
++ struct ata_host *host = dev_get_drvdata(dev);
++ int rc;
++
++ if (dev->power.power_state.event == PM_EVENT_SUSPEND) {
++ rc = ahci_reset_controller(host);
++ if (rc)
++ return rc;
++
++ ahci_init_controller(host);
++ }
++
++ ata_host_resume(host);
++
++ return 0;
++}
++EXPORT_SYMBOL_GPL(ahci_platform_resume_host);
++
++/**
++ * ahci_platform_suspend - Suspend an ahci-platform device
++ * @dev: the platform device to suspend
++ *
++ * This function suspends the host associated with the device, followed by
++ * disabling all the resources of the device.
++ *
++ * RETURNS:
++ * 0 on success otherwise a negative error code
++ */
++int ahci_platform_suspend(struct device *dev)
++{
++ struct ahci_platform_data *pdata = dev_get_platdata(dev);
++ struct ata_host *host = dev_get_drvdata(dev);
++ struct ahci_host_priv *hpriv = host->private_data;
++ int rc;
++
++ rc = ahci_platform_suspend_host(dev);
++ if (rc)
++ return rc;
++
++ if (pdata && pdata->suspend) {
++ rc = pdata->suspend(dev);
++ if (rc)
++ goto resume_host;
++ }
++
++ ahci_platform_disable_resources(hpriv);
++
++ return 0;
++
++resume_host:
++ ahci_platform_resume_host(dev);
++ return rc;
++}
++EXPORT_SYMBOL_GPL(ahci_platform_suspend);
++
++/**
++ * ahci_platform_resume - Resume an ahci-platform device
++ * @dev: the platform device to resume
++ *
++ * This function enables all the resources of the device followed by
++ * resuming the host associated with the device.
++ *
++ * RETURNS:
++ * 0 on success otherwise a negative error code
++ */
++int ahci_platform_resume(struct device *dev)
++{
++ struct ahci_platform_data *pdata = dev_get_platdata(dev);
++ struct ata_host *host = dev_get_drvdata(dev);
++ struct ahci_host_priv *hpriv = host->private_data;
++ int rc;
++
++ rc = ahci_platform_enable_resources(hpriv);
++ if (rc)
++ return rc;
++
++ if (pdata && pdata->resume) {
++ rc = pdata->resume(dev);
++ if (rc)
++ goto disable_resources;
++ }
++
++ rc = ahci_platform_resume_host(dev);
++ if (rc)
++ goto disable_resources;
++
++ /* We resumed so update PM runtime state */
++ pm_runtime_disable(dev);
++ pm_runtime_set_active(dev);
++ pm_runtime_enable(dev);
++
++ return 0;
++
++disable_resources:
++ ahci_platform_disable_resources(hpriv);
++
++ return rc;
++}
++EXPORT_SYMBOL_GPL(ahci_platform_resume);
++#endif
++
++MODULE_DESCRIPTION("AHCI SATA platform library");
++MODULE_AUTHOR("Anton Vorontsov <avorontsov@ru.mvista.com>");
++MODULE_LICENSE("GPL");
+diff -Nur linux-3.14.36/drivers/ata/libata-core.c linux-openelec/drivers/ata/libata-core.c
+--- linux-3.14.36/drivers/ata/libata-core.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/ata/libata-core.c 2015-07-24 18:03:28.456842002 -0500
+@@ -1524,7 +1524,7 @@
+ * @dev: Device to which the command is sent
+ * @tf: Taskfile registers for the command and the result
+ * @cdb: CDB for packet command
+- * @dma_dir: Data tranfer direction of the command
++ * @dma_dir: Data transfer direction of the command
+ * @sgl: sg list for the data buffer of the command
+ * @n_elem: Number of sg entries
+ * @timeout: Timeout in msecs (0 for default)
+@@ -1712,7 +1712,7 @@
+ * @dev: Device to which the command is sent
+ * @tf: Taskfile registers for the command and the result
+ * @cdb: CDB for packet command
+- * @dma_dir: Data tranfer direction of the command
++ * @dma_dir: Data transfer direction of the command
+ * @buf: Data buffer of the command
+ * @buflen: Length of data buffer
+ * @timeout: Timeout in msecs (0 for default)
+diff -Nur linux-3.14.36/drivers/ata/Makefile linux-openelec/drivers/ata/Makefile
+--- linux-3.14.36/drivers/ata/Makefile 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/ata/Makefile 2015-05-06 12:05:42.000000000 -0500
+@@ -4,13 +4,13 @@
+ # non-SFF interface
+ obj-$(CONFIG_SATA_AHCI) += ahci.o libahci.o
+ obj-$(CONFIG_SATA_ACARD_AHCI) += acard-ahci.o libahci.o
+-obj-$(CONFIG_SATA_AHCI_PLATFORM) += ahci_platform.o libahci.o
++obj-$(CONFIG_SATA_AHCI_PLATFORM) += ahci_platform.o libahci.o libahci_platform.o
+ obj-$(CONFIG_SATA_FSL) += sata_fsl.o
+ obj-$(CONFIG_SATA_INIC162X) += sata_inic162x.o
+ obj-$(CONFIG_SATA_SIL24) += sata_sil24.o
+ obj-$(CONFIG_SATA_DWC) += sata_dwc_460ex.o
+ obj-$(CONFIG_SATA_HIGHBANK) += sata_highbank.o libahci.o
+-obj-$(CONFIG_AHCI_IMX) += ahci_imx.o
++obj-$(CONFIG_AHCI_IMX) += ahci_imx.o libahci.o libahci_platform.o
+
+ # SFF w/ custom DMA
+ obj-$(CONFIG_PDC_ADMA) += pdc_adma.o
+diff -Nur linux-3.14.36/drivers/ata/pata_acpi.c linux-openelec/drivers/ata/pata_acpi.c
+--- linux-3.14.36/drivers/ata/pata_acpi.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/ata/pata_acpi.c 2015-05-06 12:05:42.000000000 -0500
+@@ -7,7 +7,6 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <linux/device.h>
+diff -Nur linux-3.14.36/drivers/ata/pata_amd.c linux-openelec/drivers/ata/pata_amd.c
+--- linux-3.14.36/drivers/ata/pata_amd.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/ata/pata_amd.c 2015-05-06 12:05:42.000000000 -0500
+@@ -17,7 +17,6 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <scsi/scsi_host.h>
+diff -Nur linux-3.14.36/drivers/ata/pata_artop.c linux-openelec/drivers/ata/pata_artop.c
+--- linux-3.14.36/drivers/ata/pata_artop.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/ata/pata_artop.c 2015-05-06 12:05:42.000000000 -0500
+@@ -19,7 +19,6 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <linux/device.h>
+diff -Nur linux-3.14.36/drivers/ata/pata_at91.c linux-openelec/drivers/ata/pata_at91.c
+--- linux-3.14.36/drivers/ata/pata_at91.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/ata/pata_at91.c 2015-05-06 12:05:42.000000000 -0500
+@@ -18,7 +18,6 @@
+
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/gfp.h>
+ #include <scsi/scsi_host.h>
+diff -Nur linux-3.14.36/drivers/ata/pata_atiixp.c linux-openelec/drivers/ata/pata_atiixp.c
+--- linux-3.14.36/drivers/ata/pata_atiixp.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/ata/pata_atiixp.c 2015-05-06 12:05:42.000000000 -0500
+@@ -15,7 +15,6 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <scsi/scsi_host.h>
+diff -Nur linux-3.14.36/drivers/ata/pata_atp867x.c linux-openelec/drivers/ata/pata_atp867x.c
+--- linux-3.14.36/drivers/ata/pata_atp867x.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/ata/pata_atp867x.c 2015-05-06 12:05:42.000000000 -0500
+@@ -29,7 +29,6 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <linux/device.h>
+diff -Nur linux-3.14.36/drivers/ata/pata_cmd640.c linux-openelec/drivers/ata/pata_cmd640.c
+--- linux-3.14.36/drivers/ata/pata_cmd640.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/ata/pata_cmd640.c 2015-05-06 12:05:42.000000000 -0500
+@@ -15,7 +15,6 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <linux/gfp.h>
+diff -Nur linux-3.14.36/drivers/ata/pata_cmd64x.c linux-openelec/drivers/ata/pata_cmd64x.c
+--- linux-3.14.36/drivers/ata/pata_cmd64x.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/ata/pata_cmd64x.c 2015-05-06 12:05:42.000000000 -0500
+@@ -26,7 +26,6 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <scsi/scsi_host.h>
+diff -Nur linux-3.14.36/drivers/ata/pata_cs5520.c linux-openelec/drivers/ata/pata_cs5520.c
+--- linux-3.14.36/drivers/ata/pata_cs5520.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/ata/pata_cs5520.c 2015-05-06 12:05:42.000000000 -0500
+@@ -34,7 +34,6 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <scsi/scsi_host.h>
+diff -Nur linux-3.14.36/drivers/ata/pata_cs5530.c linux-openelec/drivers/ata/pata_cs5530.c
+--- linux-3.14.36/drivers/ata/pata_cs5530.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/ata/pata_cs5530.c 2015-05-06 12:05:42.000000000 -0500
+@@ -26,7 +26,6 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <scsi/scsi_host.h>
+diff -Nur linux-3.14.36/drivers/ata/pata_cs5535.c linux-openelec/drivers/ata/pata_cs5535.c
+--- linux-3.14.36/drivers/ata/pata_cs5535.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/ata/pata_cs5535.c 2015-05-06 12:05:42.000000000 -0500
+@@ -31,7 +31,6 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <scsi/scsi_host.h>
+diff -Nur linux-3.14.36/drivers/ata/pata_cs5536.c linux-openelec/drivers/ata/pata_cs5536.c
+--- linux-3.14.36/drivers/ata/pata_cs5536.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/ata/pata_cs5536.c 2015-05-06 12:05:42.000000000 -0500
+@@ -33,7 +33,6 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <linux/libata.h>
+diff -Nur linux-3.14.36/drivers/ata/pata_cypress.c linux-openelec/drivers/ata/pata_cypress.c
+--- linux-3.14.36/drivers/ata/pata_cypress.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/ata/pata_cypress.c 2015-05-06 12:05:42.000000000 -0500
+@@ -11,7 +11,6 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <scsi/scsi_host.h>
+diff -Nur linux-3.14.36/drivers/ata/pata_efar.c linux-openelec/drivers/ata/pata_efar.c
+--- linux-3.14.36/drivers/ata/pata_efar.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/ata/pata_efar.c 2015-05-06 12:05:42.000000000 -0500
+@@ -14,7 +14,6 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <linux/device.h>
+diff -Nur linux-3.14.36/drivers/ata/pata_ep93xx.c linux-openelec/drivers/ata/pata_ep93xx.c
+--- linux-3.14.36/drivers/ata/pata_ep93xx.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/ata/pata_ep93xx.c 2015-05-06 12:05:42.000000000 -0500
+@@ -34,7 +34,6 @@
+ #include <linux/err.h>
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <scsi/scsi_host.h>
+ #include <linux/ata.h>
+diff -Nur linux-3.14.36/drivers/ata/pata_hpt366.c linux-openelec/drivers/ata/pata_hpt366.c
+--- linux-3.14.36/drivers/ata/pata_hpt366.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/ata/pata_hpt366.c 2015-05-06 12:05:42.000000000 -0500
+@@ -19,7 +19,6 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <scsi/scsi_host.h>
+diff -Nur linux-3.14.36/drivers/ata/pata_hpt37x.c linux-openelec/drivers/ata/pata_hpt37x.c
+--- linux-3.14.36/drivers/ata/pata_hpt37x.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/ata/pata_hpt37x.c 2015-05-06 12:05:42.000000000 -0500
+@@ -19,7 +19,6 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <scsi/scsi_host.h>
+diff -Nur linux-3.14.36/drivers/ata/pata_hpt3x2n.c linux-openelec/drivers/ata/pata_hpt3x2n.c
+--- linux-3.14.36/drivers/ata/pata_hpt3x2n.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/ata/pata_hpt3x2n.c 2015-05-06 12:05:42.000000000 -0500
+@@ -20,7 +20,6 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <scsi/scsi_host.h>
+diff -Nur linux-3.14.36/drivers/ata/pata_hpt3x3.c linux-openelec/drivers/ata/pata_hpt3x3.c
+--- linux-3.14.36/drivers/ata/pata_hpt3x3.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/ata/pata_hpt3x3.c 2015-05-06 12:05:42.000000000 -0500
+@@ -16,7 +16,6 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <scsi/scsi_host.h>
+diff -Nur linux-3.14.36/drivers/ata/pata_imx.c linux-openelec/drivers/ata/pata_imx.c
+--- linux-3.14.36/drivers/ata/pata_imx.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/ata/pata_imx.c 2015-05-06 12:05:42.000000000 -0500
+@@ -15,7 +15,6 @@
+ */
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <scsi/scsi_host.h>
+ #include <linux/ata.h>
+diff -Nur linux-3.14.36/drivers/ata/pata_it8213.c linux-openelec/drivers/ata/pata_it8213.c
+--- linux-3.14.36/drivers/ata/pata_it8213.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/ata/pata_it8213.c 2015-05-06 12:05:42.000000000 -0500
+@@ -10,7 +10,6 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <linux/device.h>
+diff -Nur linux-3.14.36/drivers/ata/pata_it821x.c linux-openelec/drivers/ata/pata_it821x.c
+--- linux-3.14.36/drivers/ata/pata_it821x.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/ata/pata_it821x.c 2015-05-06 12:05:42.000000000 -0500
+@@ -72,7 +72,6 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <linux/slab.h>
+diff -Nur linux-3.14.36/drivers/ata/pata_jmicron.c linux-openelec/drivers/ata/pata_jmicron.c
+--- linux-3.14.36/drivers/ata/pata_jmicron.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/ata/pata_jmicron.c 2015-05-06 12:05:42.000000000 -0500
+@@ -10,7 +10,6 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <linux/device.h>
+diff -Nur linux-3.14.36/drivers/ata/pata_marvell.c linux-openelec/drivers/ata/pata_marvell.c
+--- linux-3.14.36/drivers/ata/pata_marvell.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/ata/pata_marvell.c 2015-05-06 12:05:42.000000000 -0500
+@@ -11,7 +11,6 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <linux/device.h>
+diff -Nur linux-3.14.36/drivers/ata/pata_mpiix.c linux-openelec/drivers/ata/pata_mpiix.c
+--- linux-3.14.36/drivers/ata/pata_mpiix.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/ata/pata_mpiix.c 2015-05-06 12:05:42.000000000 -0500
+@@ -28,7 +28,6 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <scsi/scsi_host.h>
+diff -Nur linux-3.14.36/drivers/ata/pata_netcell.c linux-openelec/drivers/ata/pata_netcell.c
+--- linux-3.14.36/drivers/ata/pata_netcell.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/ata/pata_netcell.c 2015-05-06 12:05:42.000000000 -0500
+@@ -7,7 +7,6 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <linux/device.h>
+diff -Nur linux-3.14.36/drivers/ata/pata_ninja32.c linux-openelec/drivers/ata/pata_ninja32.c
+--- linux-3.14.36/drivers/ata/pata_ninja32.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/ata/pata_ninja32.c 2015-05-06 12:05:42.000000000 -0500
+@@ -37,7 +37,6 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <scsi/scsi_host.h>
+diff -Nur linux-3.14.36/drivers/ata/pata_ns87410.c linux-openelec/drivers/ata/pata_ns87410.c
+--- linux-3.14.36/drivers/ata/pata_ns87410.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/ata/pata_ns87410.c 2015-05-06 12:05:42.000000000 -0500
+@@ -20,7 +20,6 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <scsi/scsi_host.h>
+diff -Nur linux-3.14.36/drivers/ata/pata_ns87415.c linux-openelec/drivers/ata/pata_ns87415.c
+--- linux-3.14.36/drivers/ata/pata_ns87415.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/ata/pata_ns87415.c 2015-05-06 12:05:42.000000000 -0500
+@@ -25,7 +25,6 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <linux/device.h>
+diff -Nur linux-3.14.36/drivers/ata/pata_oldpiix.c linux-openelec/drivers/ata/pata_oldpiix.c
+--- linux-3.14.36/drivers/ata/pata_oldpiix.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/ata/pata_oldpiix.c 2015-05-06 12:05:42.000000000 -0500
+@@ -16,7 +16,6 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <linux/device.h>
+diff -Nur linux-3.14.36/drivers/ata/pata_opti.c linux-openelec/drivers/ata/pata_opti.c
+--- linux-3.14.36/drivers/ata/pata_opti.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/ata/pata_opti.c 2015-05-06 12:05:42.000000000 -0500
+@@ -26,7 +26,6 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <scsi/scsi_host.h>
+diff -Nur linux-3.14.36/drivers/ata/pata_optidma.c linux-openelec/drivers/ata/pata_optidma.c
+--- linux-3.14.36/drivers/ata/pata_optidma.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/ata/pata_optidma.c 2015-05-06 12:05:42.000000000 -0500
+@@ -25,7 +25,6 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <scsi/scsi_host.h>
+diff -Nur linux-3.14.36/drivers/ata/pata_pcmcia.c linux-openelec/drivers/ata/pata_pcmcia.c
+--- linux-3.14.36/drivers/ata/pata_pcmcia.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/ata/pata_pcmcia.c 2015-05-06 12:05:42.000000000 -0500
+@@ -26,7 +26,6 @@
+
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <linux/slab.h>
+diff -Nur linux-3.14.36/drivers/ata/pata_pdc2027x.c linux-openelec/drivers/ata/pata_pdc2027x.c
+--- linux-3.14.36/drivers/ata/pata_pdc2027x.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/ata/pata_pdc2027x.c 2015-05-06 12:05:42.000000000 -0500
+@@ -25,7 +25,6 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <linux/device.h>
+diff -Nur linux-3.14.36/drivers/ata/pata_pdc202xx_old.c linux-openelec/drivers/ata/pata_pdc202xx_old.c
+--- linux-3.14.36/drivers/ata/pata_pdc202xx_old.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/ata/pata_pdc202xx_old.c 2015-05-06 12:05:42.000000000 -0500
+@@ -15,7 +15,6 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <scsi/scsi_host.h>
+diff -Nur linux-3.14.36/drivers/ata/pata_piccolo.c linux-openelec/drivers/ata/pata_piccolo.c
+--- linux-3.14.36/drivers/ata/pata_piccolo.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/ata/pata_piccolo.c 2015-05-06 12:05:42.000000000 -0500
+@@ -18,7 +18,6 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <scsi/scsi_host.h>
+diff -Nur linux-3.14.36/drivers/ata/pata_platform.c linux-openelec/drivers/ata/pata_platform.c
+--- linux-3.14.36/drivers/ata/pata_platform.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/ata/pata_platform.c 2015-05-06 12:05:42.000000000 -0500
+@@ -13,7 +13,6 @@
+ */
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <scsi/scsi_host.h>
+ #include <linux/ata.h>
+diff -Nur linux-3.14.36/drivers/ata/pata_pxa.c linux-openelec/drivers/ata/pata_pxa.c
+--- linux-3.14.36/drivers/ata/pata_pxa.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/ata/pata_pxa.c 2015-05-06 12:05:42.000000000 -0500
+@@ -20,7 +20,6 @@
+
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/ata.h>
+ #include <linux/libata.h>
+diff -Nur linux-3.14.36/drivers/ata/pata_radisys.c linux-openelec/drivers/ata/pata_radisys.c
+--- linux-3.14.36/drivers/ata/pata_radisys.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/ata/pata_radisys.c 2015-05-06 12:05:42.000000000 -0500
+@@ -15,7 +15,6 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <linux/device.h>
+diff -Nur linux-3.14.36/drivers/ata/pata_rdc.c linux-openelec/drivers/ata/pata_rdc.c
+--- linux-3.14.36/drivers/ata/pata_rdc.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/ata/pata_rdc.c 2015-05-06 12:05:42.000000000 -0500
+@@ -24,7 +24,6 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <linux/device.h>
+diff -Nur linux-3.14.36/drivers/ata/pata_rz1000.c linux-openelec/drivers/ata/pata_rz1000.c
+--- linux-3.14.36/drivers/ata/pata_rz1000.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/ata/pata_rz1000.c 2015-05-06 12:05:42.000000000 -0500
+@@ -14,7 +14,6 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <scsi/scsi_host.h>
+diff -Nur linux-3.14.36/drivers/ata/pata_sc1200.c linux-openelec/drivers/ata/pata_sc1200.c
+--- linux-3.14.36/drivers/ata/pata_sc1200.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/ata/pata_sc1200.c 2015-05-06 12:05:42.000000000 -0500
+@@ -32,7 +32,6 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <scsi/scsi_host.h>
+diff -Nur linux-3.14.36/drivers/ata/pata_scc.c linux-openelec/drivers/ata/pata_scc.c
+--- linux-3.14.36/drivers/ata/pata_scc.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/ata/pata_scc.c 2015-07-24 18:03:28.456842002 -0500
+@@ -35,7 +35,6 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <linux/device.h>
+diff -Nur linux-3.14.36/drivers/ata/pata_scc.c.orig linux-openelec/drivers/ata/pata_scc.c.orig
+--- linux-3.14.36/drivers/ata/pata_scc.c.orig 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/ata/pata_scc.c.orig 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,1111 @@
++/*
++ * Support for IDE interfaces on Celleb platform
++ *
++ * (C) Copyright 2006 TOSHIBA CORPORATION
++ *
++ * This code is based on drivers/ata/ata_piix.c:
++ * Copyright 2003-2005 Red Hat Inc
++ * Copyright 2003-2005 Jeff Garzik
++ * Copyright (C) 1998-1999 Andrzej Krzysztofowicz, Author and Maintainer
++ * Copyright (C) 1998-2000 Andre Hedrick <andre@linux-ide.org>
++ * Copyright (C) 2003 Red Hat Inc
++ *
++ * and drivers/ata/ahci.c:
++ * Copyright 2004-2005 Red Hat, Inc.
++ *
++ * and drivers/ata/libata-core.c:
++ * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
++ * Copyright 2003-2004 Jeff Garzik
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
++ */
++
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/pci.h>
++#include <linux/blkdev.h>
++#include <linux/delay.h>
++#include <linux/device.h>
++#include <scsi/scsi_host.h>
++#include <linux/libata.h>
++
++#define DRV_NAME "pata_scc"
++#define DRV_VERSION "0.3"
++
++#define PCI_DEVICE_ID_TOSHIBA_SCC_ATA 0x01b4
++
++/* PCI BARs */
++#define SCC_CTRL_BAR 0
++#define SCC_BMID_BAR 1
++
++/* offset of CTRL registers */
++#define SCC_CTL_PIOSHT 0x000
++#define SCC_CTL_PIOCT 0x004
++#define SCC_CTL_MDMACT 0x008
++#define SCC_CTL_MCRCST 0x00C
++#define SCC_CTL_SDMACT 0x010
++#define SCC_CTL_SCRCST 0x014
++#define SCC_CTL_UDENVT 0x018
++#define SCC_CTL_TDVHSEL 0x020
++#define SCC_CTL_MODEREG 0x024
++#define SCC_CTL_ECMODE 0xF00
++#define SCC_CTL_MAEA0 0xF50
++#define SCC_CTL_MAEC0 0xF54
++#define SCC_CTL_CCKCTRL 0xFF0
++
++/* offset of BMID registers */
++#define SCC_DMA_CMD 0x000
++#define SCC_DMA_STATUS 0x004
++#define SCC_DMA_TABLE_OFS 0x008
++#define SCC_DMA_INTMASK 0x010
++#define SCC_DMA_INTST 0x014
++#define SCC_DMA_PTERADD 0x018
++#define SCC_REG_CMD_ADDR 0x020
++#define SCC_REG_DATA 0x000
++#define SCC_REG_ERR 0x004
++#define SCC_REG_FEATURE 0x004
++#define SCC_REG_NSECT 0x008
++#define SCC_REG_LBAL 0x00C
++#define SCC_REG_LBAM 0x010
++#define SCC_REG_LBAH 0x014
++#define SCC_REG_DEVICE 0x018
++#define SCC_REG_STATUS 0x01C
++#define SCC_REG_CMD 0x01C
++#define SCC_REG_ALTSTATUS 0x020
++
++/* register value */
++#define TDVHSEL_MASTER 0x00000001
++#define TDVHSEL_SLAVE 0x00000004
++
++#define MODE_JCUSFEN 0x00000080
++
++#define ECMODE_VALUE 0x01
++
++#define CCKCTRL_ATARESET 0x00040000
++#define CCKCTRL_BUFCNT 0x00020000
++#define CCKCTRL_CRST 0x00010000
++#define CCKCTRL_OCLKEN 0x00000100
++#define CCKCTRL_ATACLKOEN 0x00000002
++#define CCKCTRL_LCLKEN 0x00000001
++
++#define QCHCD_IOS_SS 0x00000001
++
++#define QCHSD_STPDIAG 0x00020000
++
++#define INTMASK_MSK 0xD1000012
++#define INTSTS_SERROR 0x80000000
++#define INTSTS_PRERR 0x40000000
++#define INTSTS_RERR 0x10000000
++#define INTSTS_ICERR 0x01000000
++#define INTSTS_BMSINT 0x00000010
++#define INTSTS_BMHE 0x00000008
++#define INTSTS_IOIRQS 0x00000004
++#define INTSTS_INTRQ 0x00000002
++#define INTSTS_ACTEINT 0x00000001
++
++
++/* PIO transfer mode table */
++/* JCHST */
++static const unsigned long JCHSTtbl[2][7] = {
++ {0x0E, 0x05, 0x02, 0x03, 0x02, 0x00, 0x00}, /* 100MHz */
++ {0x13, 0x07, 0x04, 0x04, 0x03, 0x00, 0x00} /* 133MHz */
++};
++
++/* JCHHT */
++static const unsigned long JCHHTtbl[2][7] = {
++ {0x0E, 0x02, 0x02, 0x02, 0x02, 0x00, 0x00}, /* 100MHz */
++ {0x13, 0x03, 0x03, 0x03, 0x03, 0x00, 0x00} /* 133MHz */
++};
++
++/* JCHCT */
++static const unsigned long JCHCTtbl[2][7] = {
++ {0x1D, 0x1D, 0x1C, 0x0B, 0x06, 0x00, 0x00}, /* 100MHz */
++ {0x27, 0x26, 0x26, 0x0E, 0x09, 0x00, 0x00} /* 133MHz */
++};
++
++/* DMA transfer mode table */
++/* JCHDCTM/JCHDCTS */
++static const unsigned long JCHDCTxtbl[2][7] = {
++ {0x0A, 0x06, 0x04, 0x03, 0x01, 0x00, 0x00}, /* 100MHz */
++ {0x0E, 0x09, 0x06, 0x04, 0x02, 0x01, 0x00} /* 133MHz */
++};
++
++/* JCSTWTM/JCSTWTS */
++static const unsigned long JCSTWTxtbl[2][7] = {
++ {0x06, 0x04, 0x03, 0x02, 0x02, 0x02, 0x00}, /* 100MHz */
++ {0x09, 0x06, 0x04, 0x02, 0x02, 0x02, 0x02} /* 133MHz */
++};
++
++/* JCTSS */
++static const unsigned long JCTSStbl[2][7] = {
++ {0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x00}, /* 100MHz */
++ {0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05} /* 133MHz */
++};
++
++/* JCENVT */
++static const unsigned long JCENVTtbl[2][7] = {
++ {0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00}, /* 100MHz */
++ {0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02} /* 133MHz */
++};
++
++/* JCACTSELS/JCACTSELM */
++static const unsigned long JCACTSELtbl[2][7] = {
++ {0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x00}, /* 100MHz */
++ {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01} /* 133MHz */
++};
++
++static const struct pci_device_id scc_pci_tbl[] = {
++ { PCI_VDEVICE(TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_SCC_ATA), 0},
++ { } /* terminate list */
++};
++
++/**
++ * scc_set_piomode - Initialize host controller PATA PIO timings
++ * @ap: Port whose timings we are configuring
++ * @adev: um
++ *
++ * Set PIO mode for device.
++ *
++ * LOCKING:
++ * None (inherited from caller).
++ */
++
++static void scc_set_piomode (struct ata_port *ap, struct ata_device *adev)
++{
++ unsigned int pio = adev->pio_mode - XFER_PIO_0;
++ void __iomem *ctrl_base = ap->host->iomap[SCC_CTRL_BAR];
++ void __iomem *cckctrl_port = ctrl_base + SCC_CTL_CCKCTRL;
++ void __iomem *piosht_port = ctrl_base + SCC_CTL_PIOSHT;
++ void __iomem *pioct_port = ctrl_base + SCC_CTL_PIOCT;
++ unsigned long reg;
++ int offset;
++
++ reg = in_be32(cckctrl_port);
++ if (reg & CCKCTRL_ATACLKOEN)
++ offset = 1; /* 133MHz */
++ else
++ offset = 0; /* 100MHz */
++
++ reg = JCHSTtbl[offset][pio] << 16 | JCHHTtbl[offset][pio];
++ out_be32(piosht_port, reg);
++ reg = JCHCTtbl[offset][pio];
++ out_be32(pioct_port, reg);
++}
++
++/**
++ * scc_set_dmamode - Initialize host controller PATA DMA timings
++ * @ap: Port whose timings we are configuring
++ * @adev: um
++ *
++ * Set UDMA mode for device.
++ *
++ * LOCKING:
++ * None (inherited from caller).
++ */
++
++static void scc_set_dmamode (struct ata_port *ap, struct ata_device *adev)
++{
++ unsigned int udma = adev->dma_mode;
++ unsigned int is_slave = (adev->devno != 0);
++ u8 speed = udma;
++ void __iomem *ctrl_base = ap->host->iomap[SCC_CTRL_BAR];
++ void __iomem *cckctrl_port = ctrl_base + SCC_CTL_CCKCTRL;
++ void __iomem *mdmact_port = ctrl_base + SCC_CTL_MDMACT;
++ void __iomem *mcrcst_port = ctrl_base + SCC_CTL_MCRCST;
++ void __iomem *sdmact_port = ctrl_base + SCC_CTL_SDMACT;
++ void __iomem *scrcst_port = ctrl_base + SCC_CTL_SCRCST;
++ void __iomem *udenvt_port = ctrl_base + SCC_CTL_UDENVT;
++ void __iomem *tdvhsel_port = ctrl_base + SCC_CTL_TDVHSEL;
++ int offset, idx;
++
++ if (in_be32(cckctrl_port) & CCKCTRL_ATACLKOEN)
++ offset = 1; /* 133MHz */
++ else
++ offset = 0; /* 100MHz */
++
++ if (speed >= XFER_UDMA_0)
++ idx = speed - XFER_UDMA_0;
++ else
++ return;
++
++ if (is_slave) {
++ out_be32(sdmact_port, JCHDCTxtbl[offset][idx]);
++ out_be32(scrcst_port, JCSTWTxtbl[offset][idx]);
++ out_be32(tdvhsel_port,
++ (in_be32(tdvhsel_port) & ~TDVHSEL_SLAVE) | (JCACTSELtbl[offset][idx] << 2));
++ } else {
++ out_be32(mdmact_port, JCHDCTxtbl[offset][idx]);
++ out_be32(mcrcst_port, JCSTWTxtbl[offset][idx]);
++ out_be32(tdvhsel_port,
++ (in_be32(tdvhsel_port) & ~TDVHSEL_MASTER) | JCACTSELtbl[offset][idx]);
++ }
++ out_be32(udenvt_port,
++ JCTSStbl[offset][idx] << 16 | JCENVTtbl[offset][idx]);
++}
++
++unsigned long scc_mode_filter(struct ata_device *adev, unsigned long mask)
++{
++ /* errata A308 workaround: limit ATAPI UDMA mode to UDMA4 */
++ if (adev->class == ATA_DEV_ATAPI &&
++ (mask & (0xE0 << ATA_SHIFT_UDMA))) {
++ printk(KERN_INFO "%s: limit ATAPI UDMA to UDMA4\n", DRV_NAME);
++ mask &= ~(0xE0 << ATA_SHIFT_UDMA);
++ }
++ return mask;
++}
++
++/**
++ * scc_tf_load - send taskfile registers to host controller
++ * @ap: Port to which output is sent
++ * @tf: ATA taskfile register set
++ *
++ * Note: Original code is ata_sff_tf_load().
++ */
++
++static void scc_tf_load (struct ata_port *ap, const struct ata_taskfile *tf)
++{
++ struct ata_ioports *ioaddr = &ap->ioaddr;
++ unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
++
++ if (tf->ctl != ap->last_ctl) {
++ out_be32(ioaddr->ctl_addr, tf->ctl);
++ ap->last_ctl = tf->ctl;
++ ata_wait_idle(ap);
++ }
++
++ if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
++ out_be32(ioaddr->feature_addr, tf->hob_feature);
++ out_be32(ioaddr->nsect_addr, tf->hob_nsect);
++ out_be32(ioaddr->lbal_addr, tf->hob_lbal);
++ out_be32(ioaddr->lbam_addr, tf->hob_lbam);
++ out_be32(ioaddr->lbah_addr, tf->hob_lbah);
++ VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
++ tf->hob_feature,
++ tf->hob_nsect,
++ tf->hob_lbal,
++ tf->hob_lbam,
++ tf->hob_lbah);
++ }
++
++ if (is_addr) {
++ out_be32(ioaddr->feature_addr, tf->feature);
++ out_be32(ioaddr->nsect_addr, tf->nsect);
++ out_be32(ioaddr->lbal_addr, tf->lbal);
++ out_be32(ioaddr->lbam_addr, tf->lbam);
++ out_be32(ioaddr->lbah_addr, tf->lbah);
++ VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
++ tf->feature,
++ tf->nsect,
++ tf->lbal,
++ tf->lbam,
++ tf->lbah);
++ }
++
++ if (tf->flags & ATA_TFLAG_DEVICE) {
++ out_be32(ioaddr->device_addr, tf->device);
++ VPRINTK("device 0x%X\n", tf->device);
++ }
++
++ ata_wait_idle(ap);
++}
++
++/**
++ * scc_check_status - Read device status reg & clear interrupt
++ * @ap: port where the device is
++ *
++ * Note: Original code is ata_check_status().
++ */
++
++static u8 scc_check_status (struct ata_port *ap)
++{
++ return in_be32(ap->ioaddr.status_addr);
++}
++
++/**
++ * scc_tf_read - input device's ATA taskfile shadow registers
++ * @ap: Port from which input is read
++ * @tf: ATA taskfile register set for storing input
++ *
++ * Note: Original code is ata_sff_tf_read().
++ */
++
++static void scc_tf_read (struct ata_port *ap, struct ata_taskfile *tf)
++{
++ struct ata_ioports *ioaddr = &ap->ioaddr;
++
++ tf->command = scc_check_status(ap);
++ tf->feature = in_be32(ioaddr->error_addr);
++ tf->nsect = in_be32(ioaddr->nsect_addr);
++ tf->lbal = in_be32(ioaddr->lbal_addr);
++ tf->lbam = in_be32(ioaddr->lbam_addr);
++ tf->lbah = in_be32(ioaddr->lbah_addr);
++ tf->device = in_be32(ioaddr->device_addr);
++
++ if (tf->flags & ATA_TFLAG_LBA48) {
++ out_be32(ioaddr->ctl_addr, tf->ctl | ATA_HOB);
++ tf->hob_feature = in_be32(ioaddr->error_addr);
++ tf->hob_nsect = in_be32(ioaddr->nsect_addr);
++ tf->hob_lbal = in_be32(ioaddr->lbal_addr);
++ tf->hob_lbam = in_be32(ioaddr->lbam_addr);
++ tf->hob_lbah = in_be32(ioaddr->lbah_addr);
++ out_be32(ioaddr->ctl_addr, tf->ctl);
++ ap->last_ctl = tf->ctl;
++ }
++}
++
++/**
++ * scc_exec_command - issue ATA command to host controller
++ * @ap: port to which command is being issued
++ * @tf: ATA taskfile register set
++ *
++ * Note: Original code is ata_sff_exec_command().
++ */
++
++static void scc_exec_command (struct ata_port *ap,
++ const struct ata_taskfile *tf)
++{
++ DPRINTK("ata%u: cmd 0x%X\n", ap->print_id, tf->command);
++
++ out_be32(ap->ioaddr.command_addr, tf->command);
++ ata_sff_pause(ap);
++}
++
++/**
++ * scc_check_altstatus - Read device alternate status reg
++ * @ap: port where the device is
++ */
++
++static u8 scc_check_altstatus (struct ata_port *ap)
++{
++ return in_be32(ap->ioaddr.altstatus_addr);
++}
++
++/**
++ * scc_dev_select - Select device 0/1 on ATA bus
++ * @ap: ATA channel to manipulate
++ * @device: ATA device (numbered from zero) to select
++ *
++ * Note: Original code is ata_sff_dev_select().
++ */
++
++static void scc_dev_select (struct ata_port *ap, unsigned int device)
++{
++ u8 tmp;
++
++ if (device == 0)
++ tmp = ATA_DEVICE_OBS;
++ else
++ tmp = ATA_DEVICE_OBS | ATA_DEV1;
++
++ out_be32(ap->ioaddr.device_addr, tmp);
++ ata_sff_pause(ap);
++}
++
++/**
++ * scc_set_devctl - Write device control reg
++ * @ap: port where the device is
++ * @ctl: value to write
++ */
++
++static void scc_set_devctl(struct ata_port *ap, u8 ctl)
++{
++ out_be32(ap->ioaddr.ctl_addr, ctl);
++}
++
++/**
++ * scc_bmdma_setup - Set up PCI IDE BMDMA transaction
++ * @qc: Info associated with this ATA transaction.
++ *
++ * Note: Original code is ata_bmdma_setup().
++ */
++
++static void scc_bmdma_setup (struct ata_queued_cmd *qc)
++{
++ struct ata_port *ap = qc->ap;
++ unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
++ u8 dmactl;
++ void __iomem *mmio = ap->ioaddr.bmdma_addr;
++
++ /* load PRD table addr */
++ out_be32(mmio + SCC_DMA_TABLE_OFS, ap->bmdma_prd_dma);
++
++ /* specify data direction, triple-check start bit is clear */
++ dmactl = in_be32(mmio + SCC_DMA_CMD);
++ dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
++ if (!rw)
++ dmactl |= ATA_DMA_WR;
++ out_be32(mmio + SCC_DMA_CMD, dmactl);
++
++ /* issue r/w command */
++ ap->ops->sff_exec_command(ap, &qc->tf);
++}
++
++/**
++ * scc_bmdma_start - Start a PCI IDE BMDMA transaction
++ * @qc: Info associated with this ATA transaction.
++ *
++ * Note: Original code is ata_bmdma_start().
++ */
++
++static void scc_bmdma_start (struct ata_queued_cmd *qc)
++{
++ struct ata_port *ap = qc->ap;
++ u8 dmactl;
++ void __iomem *mmio = ap->ioaddr.bmdma_addr;
++
++ /* start host DMA transaction */
++ dmactl = in_be32(mmio + SCC_DMA_CMD);
++ out_be32(mmio + SCC_DMA_CMD, dmactl | ATA_DMA_START);
++}
++
++/**
++ * scc_devchk - PATA device presence detection
++ * @ap: ATA channel to examine
++ * @device: Device to examine (starting at zero)
++ *
++ * Note: Original code is ata_devchk().
++ */
++
++static unsigned int scc_devchk (struct ata_port *ap,
++ unsigned int device)
++{
++ struct ata_ioports *ioaddr = &ap->ioaddr;
++ u8 nsect, lbal;
++
++ ap->ops->sff_dev_select(ap, device);
++
++ out_be32(ioaddr->nsect_addr, 0x55);
++ out_be32(ioaddr->lbal_addr, 0xaa);
++
++ out_be32(ioaddr->nsect_addr, 0xaa);
++ out_be32(ioaddr->lbal_addr, 0x55);
++
++ out_be32(ioaddr->nsect_addr, 0x55);
++ out_be32(ioaddr->lbal_addr, 0xaa);
++
++ nsect = in_be32(ioaddr->nsect_addr);
++ lbal = in_be32(ioaddr->lbal_addr);
++
++ if ((nsect == 0x55) && (lbal == 0xaa))
++ return 1; /* we found a device */
++
++ return 0; /* nothing found */
++}
++
++/**
++ * scc_wait_after_reset - wait for devices to become ready after reset
++ *
++ * Note: Original code is ata_sff_wait_after_reset
++ */
++
++static int scc_wait_after_reset(struct ata_link *link, unsigned int devmask,
++ unsigned long deadline)
++{
++ struct ata_port *ap = link->ap;
++ struct ata_ioports *ioaddr = &ap->ioaddr;
++ unsigned int dev0 = devmask & (1 << 0);
++ unsigned int dev1 = devmask & (1 << 1);
++ int rc, ret = 0;
++
++ /* Spec mandates ">= 2ms" before checking status. We wait
++ * 150ms, because that was the magic delay used for ATAPI
++ * devices in Hale Landis's ATADRVR, for the period of time
++ * between when the ATA command register is written, and then
++ * status is checked. Because waiting for "a while" before
++ * checking status is fine, post SRST, we perform this magic
++ * delay here as well.
++ *
++ * Old drivers/ide uses the 2mS rule and then waits for ready.
++ */
++ ata_msleep(ap, 150);
++
++ /* always check readiness of the master device */
++ rc = ata_sff_wait_ready(link, deadline);
++ /* -ENODEV means the odd clown forgot the D7 pulldown resistor
++ * and TF status is 0xff, bail out on it too.
++ */
++ if (rc)
++ return rc;
++
++ /* if device 1 was found in ata_devchk, wait for register
++ * access briefly, then wait for BSY to clear.
++ */
++ if (dev1) {
++ int i;
++
++ ap->ops->sff_dev_select(ap, 1);
++
++ /* Wait for register access. Some ATAPI devices fail
++ * to set nsect/lbal after reset, so don't waste too
++ * much time on it. We're gonna wait for !BSY anyway.
++ */
++ for (i = 0; i < 2; i++) {
++ u8 nsect, lbal;
++
++ nsect = in_be32(ioaddr->nsect_addr);
++ lbal = in_be32(ioaddr->lbal_addr);
++ if ((nsect == 1) && (lbal == 1))
++ break;
++ ata_msleep(ap, 50); /* give drive a breather */
++ }
++
++ rc = ata_sff_wait_ready(link, deadline);
++ if (rc) {
++ if (rc != -ENODEV)
++ return rc;
++ ret = rc;
++ }
++ }
++
++ /* is all this really necessary? */
++ ap->ops->sff_dev_select(ap, 0);
++ if (dev1)
++ ap->ops->sff_dev_select(ap, 1);
++ if (dev0)
++ ap->ops->sff_dev_select(ap, 0);
++
++ return ret;
++}
++
++/**
++ * scc_bus_softreset - PATA device software reset
++ *
++ * Note: Original code is ata_bus_softreset().
++ */
++
++static unsigned int scc_bus_softreset(struct ata_port *ap, unsigned int devmask,
++ unsigned long deadline)
++{
++ struct ata_ioports *ioaddr = &ap->ioaddr;
++
++ DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
++
++ /* software reset. causes dev0 to be selected */
++ out_be32(ioaddr->ctl_addr, ap->ctl);
++ udelay(20);
++ out_be32(ioaddr->ctl_addr, ap->ctl | ATA_SRST);
++ udelay(20);
++ out_be32(ioaddr->ctl_addr, ap->ctl);
++
++ scc_wait_after_reset(&ap->link, devmask, deadline);
++
++ return 0;
++}
++
++/**
++ * scc_softreset - reset host port via ATA SRST
++ * @ap: port to reset
++ * @classes: resulting classes of attached devices
++ * @deadline: deadline jiffies for the operation
++ *
++ * Note: Original code is ata_sff_softreset().
++ */
++
++static int scc_softreset(struct ata_link *link, unsigned int *classes,
++ unsigned long deadline)
++{
++ struct ata_port *ap = link->ap;
++ unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
++ unsigned int devmask = 0, err_mask;
++ u8 err;
++
++ DPRINTK("ENTER\n");
++
++ /* determine if device 0/1 are present */
++ if (scc_devchk(ap, 0))
++ devmask |= (1 << 0);
++ if (slave_possible && scc_devchk(ap, 1))
++ devmask |= (1 << 1);
++
++ /* select device 0 again */
++ ap->ops->sff_dev_select(ap, 0);
++
++ /* issue bus reset */
++ DPRINTK("about to softreset, devmask=%x\n", devmask);
++ err_mask = scc_bus_softreset(ap, devmask, deadline);
++ if (err_mask) {
++ ata_port_err(ap, "SRST failed (err_mask=0x%x)\n", err_mask);
++ return -EIO;
++ }
++
++ /* determine by signature whether we have ATA or ATAPI devices */
++ classes[0] = ata_sff_dev_classify(&ap->link.device[0],
++ devmask & (1 << 0), &err);
++ if (slave_possible && err != 0x81)
++ classes[1] = ata_sff_dev_classify(&ap->link.device[1],
++ devmask & (1 << 1), &err);
++
++ DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
++ return 0;
++}
++
++/**
++ * scc_bmdma_stop - Stop PCI IDE BMDMA transfer
++ * @qc: Command we are ending DMA for
++ */
++
++static void scc_bmdma_stop (struct ata_queued_cmd *qc)
++{
++ struct ata_port *ap = qc->ap;
++ void __iomem *ctrl_base = ap->host->iomap[SCC_CTRL_BAR];
++ void __iomem *bmid_base = ap->host->iomap[SCC_BMID_BAR];
++ u32 reg;
++
++ while (1) {
++ reg = in_be32(bmid_base + SCC_DMA_INTST);
++
++ if (reg & INTSTS_SERROR) {
++ printk(KERN_WARNING "%s: SERROR\n", DRV_NAME);
++ out_be32(bmid_base + SCC_DMA_INTST, INTSTS_SERROR|INTSTS_BMSINT);
++ out_be32(bmid_base + SCC_DMA_CMD,
++ in_be32(bmid_base + SCC_DMA_CMD) & ~ATA_DMA_START);
++ continue;
++ }
++
++ if (reg & INTSTS_PRERR) {
++ u32 maea0, maec0;
++ maea0 = in_be32(ctrl_base + SCC_CTL_MAEA0);
++ maec0 = in_be32(ctrl_base + SCC_CTL_MAEC0);
++ printk(KERN_WARNING "%s: PRERR [addr:%x cmd:%x]\n", DRV_NAME, maea0, maec0);
++ out_be32(bmid_base + SCC_DMA_INTST, INTSTS_PRERR|INTSTS_BMSINT);
++ out_be32(bmid_base + SCC_DMA_CMD,
++ in_be32(bmid_base + SCC_DMA_CMD) & ~ATA_DMA_START);
++ continue;
++ }
++
++ if (reg & INTSTS_RERR) {
++ printk(KERN_WARNING "%s: Response Error\n", DRV_NAME);
++ out_be32(bmid_base + SCC_DMA_INTST, INTSTS_RERR|INTSTS_BMSINT);
++ out_be32(bmid_base + SCC_DMA_CMD,
++ in_be32(bmid_base + SCC_DMA_CMD) & ~ATA_DMA_START);
++ continue;
++ }
++
++ if (reg & INTSTS_ICERR) {
++ out_be32(bmid_base + SCC_DMA_CMD,
++ in_be32(bmid_base + SCC_DMA_CMD) & ~ATA_DMA_START);
++ printk(KERN_WARNING "%s: Illegal Configuration\n", DRV_NAME);
++ out_be32(bmid_base + SCC_DMA_INTST, INTSTS_ICERR|INTSTS_BMSINT);
++ continue;
++ }
++
++ if (reg & INTSTS_BMSINT) {
++ unsigned int classes;
++ unsigned long deadline = ata_deadline(jiffies, ATA_TMOUT_BOOT);
++ printk(KERN_WARNING "%s: Internal Bus Error\n", DRV_NAME);
++ out_be32(bmid_base + SCC_DMA_INTST, INTSTS_BMSINT);
++ /* TBD: SW reset */
++ scc_softreset(&ap->link, &classes, deadline);
++ continue;
++ }
++
++ if (reg & INTSTS_BMHE) {
++ out_be32(bmid_base + SCC_DMA_INTST, INTSTS_BMHE);
++ continue;
++ }
++
++ if (reg & INTSTS_ACTEINT) {
++ out_be32(bmid_base + SCC_DMA_INTST, INTSTS_ACTEINT);
++ continue;
++ }
++
++ if (reg & INTSTS_IOIRQS) {
++ out_be32(bmid_base + SCC_DMA_INTST, INTSTS_IOIRQS);
++ continue;
++ }
++ break;
++ }
++
++ /* clear start/stop bit */
++ out_be32(bmid_base + SCC_DMA_CMD,
++ in_be32(bmid_base + SCC_DMA_CMD) & ~ATA_DMA_START);
++
++ /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
++ ata_sff_dma_pause(ap); /* dummy read */
++}
++
++/**
++ * scc_bmdma_status - Read PCI IDE BMDMA status
++ * @ap: Port associated with this ATA transaction.
++ */
++
++static u8 scc_bmdma_status (struct ata_port *ap)
++{
++ void __iomem *mmio = ap->ioaddr.bmdma_addr;
++ u8 host_stat = in_be32(mmio + SCC_DMA_STATUS);
++ u32 int_status = in_be32(mmio + SCC_DMA_INTST);
++ struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
++ static int retry = 0;
++
++ /* return if IOS_SS is cleared */
++ if (!(in_be32(mmio + SCC_DMA_CMD) & ATA_DMA_START))
++ return host_stat;
++
++ /* errata A252,A308 workaround: Step4 */
++ if ((scc_check_altstatus(ap) & ATA_ERR)
++ && (int_status & INTSTS_INTRQ))
++ return (host_stat | ATA_DMA_INTR);
++
++ /* errata A308 workaround Step5 */
++ if (int_status & INTSTS_IOIRQS) {
++ host_stat |= ATA_DMA_INTR;
++
++ /* We don't check ATAPI DMA because it is limited to UDMA4 */
++ if ((qc->tf.protocol == ATA_PROT_DMA &&
++ qc->dev->xfer_mode > XFER_UDMA_4)) {
++ if (!(int_status & INTSTS_ACTEINT)) {
++ printk(KERN_WARNING "ata%u: operation failed (transfer data loss)\n",
++ ap->print_id);
++ host_stat |= ATA_DMA_ERR;
++ if (retry++)
++ ap->udma_mask &= ~(1 << qc->dev->xfer_mode);
++ } else
++ retry = 0;
++ }
++ }
++
++ return host_stat;
++}
++
++/**
++ * scc_data_xfer - Transfer data by PIO
++ * @dev: device for this I/O
++ * @buf: data buffer
++ * @buflen: buffer length
++ * @rw: read/write
++ *
++ * Note: Original code is ata_sff_data_xfer().
++ */
++
++static unsigned int scc_data_xfer (struct ata_device *dev, unsigned char *buf,
++ unsigned int buflen, int rw)
++{
++ struct ata_port *ap = dev->link->ap;
++ unsigned int words = buflen >> 1;
++ unsigned int i;
++ __le16 *buf16 = (__le16 *) buf;
++ void __iomem *mmio = ap->ioaddr.data_addr;
++
++ /* Transfer multiple of 2 bytes */
++ if (rw == READ)
++ for (i = 0; i < words; i++)
++ buf16[i] = cpu_to_le16(in_be32(mmio));
++ else
++ for (i = 0; i < words; i++)
++ out_be32(mmio, le16_to_cpu(buf16[i]));
++
++ /* Transfer trailing 1 byte, if any. */
++ if (unlikely(buflen & 0x01)) {
++ __le16 align_buf[1] = { 0 };
++ unsigned char *trailing_buf = buf + buflen - 1;
++
++ if (rw == READ) {
++ align_buf[0] = cpu_to_le16(in_be32(mmio));
++ memcpy(trailing_buf, align_buf, 1);
++ } else {
++ memcpy(align_buf, trailing_buf, 1);
++ out_be32(mmio, le16_to_cpu(align_buf[0]));
++ }
++ words++;
++ }
++
++ return words << 1;
++}
++
++/**
++ * scc_postreset - standard postreset callback
++ * @ap: the target ata_port
++ * @classes: classes of attached devices
++ *
++ * Note: Original code is ata_sff_postreset().
++ */
++
++static void scc_postreset(struct ata_link *link, unsigned int *classes)
++{
++ struct ata_port *ap = link->ap;
++
++ DPRINTK("ENTER\n");
++
++ /* is double-select really necessary? */
++ if (classes[0] != ATA_DEV_NONE)
++ ap->ops->sff_dev_select(ap, 1);
++ if (classes[1] != ATA_DEV_NONE)
++ ap->ops->sff_dev_select(ap, 0);
++
++ /* bail out if no device is present */
++ if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
++ DPRINTK("EXIT, no device\n");
++ return;
++ }
++
++ /* set up device control */
++ out_be32(ap->ioaddr.ctl_addr, ap->ctl);
++
++ DPRINTK("EXIT\n");
++}
++
++/**
++ * scc_irq_clear - Clear PCI IDE BMDMA interrupt.
++ * @ap: Port associated with this ATA transaction.
++ *
++ * Note: Original code is ata_bmdma_irq_clear().
++ */
++
++static void scc_irq_clear (struct ata_port *ap)
++{
++ void __iomem *mmio = ap->ioaddr.bmdma_addr;
++
++ if (!mmio)
++ return;
++
++ out_be32(mmio + SCC_DMA_STATUS, in_be32(mmio + SCC_DMA_STATUS));
++}
++
++/**
++ * scc_port_start - Set port up for dma.
++ * @ap: Port to initialize
++ *
++ * Allocate space for PRD table using ata_bmdma_port_start().
++ * Set PRD table address for PTERADD. (PRD Transfer End Read)
++ */
++
++static int scc_port_start (struct ata_port *ap)
++{
++ void __iomem *mmio = ap->ioaddr.bmdma_addr;
++ int rc;
++
++ rc = ata_bmdma_port_start(ap);
++ if (rc)
++ return rc;
++
++ out_be32(mmio + SCC_DMA_PTERADD, ap->bmdma_prd_dma);
++ return 0;
++}
++
++/**
++ * scc_port_stop - Undo scc_port_start()
++ * @ap: Port to shut down
++ *
++ * Reset PTERADD.
++ */
++
++static void scc_port_stop (struct ata_port *ap)
++{
++ void __iomem *mmio = ap->ioaddr.bmdma_addr;
++
++ out_be32(mmio + SCC_DMA_PTERADD, 0);
++}
++
++static struct scsi_host_template scc_sht = {
++ ATA_BMDMA_SHT(DRV_NAME),
++};
++
++static struct ata_port_operations scc_pata_ops = {
++ .inherits = &ata_bmdma_port_ops,
++
++ .set_piomode = scc_set_piomode,
++ .set_dmamode = scc_set_dmamode,
++ .mode_filter = scc_mode_filter,
++
++ .sff_tf_load = scc_tf_load,
++ .sff_tf_read = scc_tf_read,
++ .sff_exec_command = scc_exec_command,
++ .sff_check_status = scc_check_status,
++ .sff_check_altstatus = scc_check_altstatus,
++ .sff_dev_select = scc_dev_select,
++ .sff_set_devctl = scc_set_devctl,
++
++ .bmdma_setup = scc_bmdma_setup,
++ .bmdma_start = scc_bmdma_start,
++ .bmdma_stop = scc_bmdma_stop,
++ .bmdma_status = scc_bmdma_status,
++ .sff_data_xfer = scc_data_xfer,
++
++ .cable_detect = ata_cable_80wire,
++ .softreset = scc_softreset,
++ .postreset = scc_postreset,
++
++ .sff_irq_clear = scc_irq_clear,
++
++ .port_start = scc_port_start,
++ .port_stop = scc_port_stop,
++};
++
++static struct ata_port_info scc_port_info[] = {
++ {
++ .flags = ATA_FLAG_SLAVE_POSS,
++ .pio_mask = ATA_PIO4,
++ /* No MWDMA */
++ .udma_mask = ATA_UDMA6,
++ .port_ops = &scc_pata_ops,
++ },
++};
++
++/**
++ * scc_reset_controller - initialize SCC PATA controller.
++ */
++
++static int scc_reset_controller(struct ata_host *host)
++{
++ void __iomem *ctrl_base = host->iomap[SCC_CTRL_BAR];
++ void __iomem *bmid_base = host->iomap[SCC_BMID_BAR];
++ void __iomem *cckctrl_port = ctrl_base + SCC_CTL_CCKCTRL;
++ void __iomem *mode_port = ctrl_base + SCC_CTL_MODEREG;
++ void __iomem *ecmode_port = ctrl_base + SCC_CTL_ECMODE;
++ void __iomem *intmask_port = bmid_base + SCC_DMA_INTMASK;
++ void __iomem *dmastatus_port = bmid_base + SCC_DMA_STATUS;
++ u32 reg = 0;
++
++ out_be32(cckctrl_port, reg);
++ reg |= CCKCTRL_ATACLKOEN;
++ out_be32(cckctrl_port, reg);
++ reg |= CCKCTRL_LCLKEN | CCKCTRL_OCLKEN;
++ out_be32(cckctrl_port, reg);
++ reg |= CCKCTRL_CRST;
++ out_be32(cckctrl_port, reg);
++
++ for (;;) {
++ reg = in_be32(cckctrl_port);
++ if (reg & CCKCTRL_CRST)
++ break;
++ udelay(5000);
++ }
++
++ reg |= CCKCTRL_ATARESET;
++ out_be32(cckctrl_port, reg);
++ out_be32(ecmode_port, ECMODE_VALUE);
++ out_be32(mode_port, MODE_JCUSFEN);
++ out_be32(intmask_port, INTMASK_MSK);
++
++ if (in_be32(dmastatus_port) & QCHSD_STPDIAG) {
++ printk(KERN_WARNING "%s: failed to detect 80c cable. (PDIAG# is high)\n", DRV_NAME);
++ return -EIO;
++ }
++
++ return 0;
++}
++
++/**
++ * scc_setup_ports - initialize ioaddr with SCC PATA port offsets.
++ * @ioaddr: IO address structure to be initialized
++ * @base: base address of BMID region
++ */
++
++static void scc_setup_ports (struct ata_ioports *ioaddr, void __iomem *base)
++{
++ ioaddr->cmd_addr = base + SCC_REG_CMD_ADDR;
++ ioaddr->altstatus_addr = ioaddr->cmd_addr + SCC_REG_ALTSTATUS;
++ ioaddr->ctl_addr = ioaddr->cmd_addr + SCC_REG_ALTSTATUS;
++ ioaddr->bmdma_addr = base;
++ ioaddr->data_addr = ioaddr->cmd_addr + SCC_REG_DATA;
++ ioaddr->error_addr = ioaddr->cmd_addr + SCC_REG_ERR;
++ ioaddr->feature_addr = ioaddr->cmd_addr + SCC_REG_FEATURE;
++ ioaddr->nsect_addr = ioaddr->cmd_addr + SCC_REG_NSECT;
++ ioaddr->lbal_addr = ioaddr->cmd_addr + SCC_REG_LBAL;
++ ioaddr->lbam_addr = ioaddr->cmd_addr + SCC_REG_LBAM;
++ ioaddr->lbah_addr = ioaddr->cmd_addr + SCC_REG_LBAH;
++ ioaddr->device_addr = ioaddr->cmd_addr + SCC_REG_DEVICE;
++ ioaddr->status_addr = ioaddr->cmd_addr + SCC_REG_STATUS;
++ ioaddr->command_addr = ioaddr->cmd_addr + SCC_REG_CMD;
++}
++
++static int scc_host_init(struct ata_host *host)
++{
++ struct pci_dev *pdev = to_pci_dev(host->dev);
++ int rc;
++
++ rc = scc_reset_controller(host);
++ if (rc)
++ return rc;
++
++ rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
++ if (rc)
++ return rc;
++ rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
++ if (rc)
++ return rc;
++
++ scc_setup_ports(&host->ports[0]->ioaddr, host->iomap[SCC_BMID_BAR]);
++
++ pci_set_master(pdev);
++
++ return 0;
++}
++
++/**
++ * scc_init_one - Register SCC PATA device with kernel services
++ * @pdev: PCI device to register
++ * @ent: Entry in scc_pci_tbl matching with @pdev
++ *
++ * LOCKING:
++ * Inherited from PCI layer (may sleep).
++ *
++ * RETURNS:
++ * Zero on success, or -ERRNO value.
++ */
++
++static int scc_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
++{
++ unsigned int board_idx = (unsigned int) ent->driver_data;
++ const struct ata_port_info *ppi[] = { &scc_port_info[board_idx], NULL };
++ struct ata_host *host;
++ int rc;
++
++ ata_print_version_once(&pdev->dev, DRV_VERSION);
++
++ host = ata_host_alloc_pinfo(&pdev->dev, ppi, 1);
++ if (!host)
++ return -ENOMEM;
++
++ rc = pcim_enable_device(pdev);
++ if (rc)
++ return rc;
++
++ rc = pcim_iomap_regions(pdev, (1 << SCC_CTRL_BAR) | (1 << SCC_BMID_BAR), DRV_NAME);
++ if (rc == -EBUSY)
++ pcim_pin_device(pdev);
++ if (rc)
++ return rc;
++ host->iomap = pcim_iomap_table(pdev);
++
++ ata_port_pbar_desc(host->ports[0], SCC_CTRL_BAR, -1, "ctrl");
++ ata_port_pbar_desc(host->ports[0], SCC_BMID_BAR, -1, "bmid");
++
++ rc = scc_host_init(host);
++ if (rc)
++ return rc;
++
++ return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt,
++ IRQF_SHARED, &scc_sht);
++}
++
++static struct pci_driver scc_pci_driver = {
++ .name = DRV_NAME,
++ .id_table = scc_pci_tbl,
++ .probe = scc_init_one,
++ .remove = ata_pci_remove_one,
++#ifdef CONFIG_PM
++ .suspend = ata_pci_device_suspend,
++ .resume = ata_pci_device_resume,
++#endif
++};
++
++module_pci_driver(scc_pci_driver);
++
++MODULE_AUTHOR("Toshiba corp");
++MODULE_DESCRIPTION("SCSI low-level driver for Toshiba SCC PATA controller");
++MODULE_LICENSE("GPL");
++MODULE_DEVICE_TABLE(pci, scc_pci_tbl);
++MODULE_VERSION(DRV_VERSION);
+diff -Nur linux-3.14.36/drivers/ata/pata_sch.c linux-openelec/drivers/ata/pata_sch.c
+--- linux-3.14.36/drivers/ata/pata_sch.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/ata/pata_sch.c 2015-05-06 12:05:42.000000000 -0500
+@@ -27,7 +27,6 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <linux/device.h>
+diff -Nur linux-3.14.36/drivers/ata/pata_serverworks.c linux-openelec/drivers/ata/pata_serverworks.c
+--- linux-3.14.36/drivers/ata/pata_serverworks.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/ata/pata_serverworks.c 2015-07-24 18:03:28.780842002 -0500
+@@ -34,7 +34,6 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <scsi/scsi_host.h>
+diff -Nur linux-3.14.36/drivers/ata/pata_serverworks.c.orig linux-openelec/drivers/ata/pata_serverworks.c.orig
+--- linux-3.14.36/drivers/ata/pata_serverworks.c.orig 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/ata/pata_serverworks.c.orig 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,483 @@
++/*
++ * pata_serverworks.c - Serverworks PATA for new ATA layer
++ * (C) 2005 Red Hat Inc
++ * (C) 2010 Bartlomiej Zolnierkiewicz
++ *
++ * based upon
++ *
++ * serverworks.c
++ *
++ * Copyright (C) 1998-2000 Michel Aubry
++ * Copyright (C) 1998-2000 Andrzej Krzysztofowicz
++ * Copyright (C) 1998-2000 Andre Hedrick <andre@linux-ide.org>
++ * Portions copyright (c) 2001 Sun Microsystems
++ *
++ *
++ * RCC/ServerWorks IDE driver for Linux
++ *
++ * OSB4: `Open South Bridge' IDE Interface (fn 1)
++ * supports UDMA mode 2 (33 MB/s)
++ *
++ * CSB5: `Champion South Bridge' IDE Interface (fn 1)
++ * all revisions support UDMA mode 4 (66 MB/s)
++ * revision A2.0 and up support UDMA mode 5 (100 MB/s)
++ *
++ * *** The CSB5 does not provide ANY register ***
++ * *** to detect 80-conductor cable presence. ***
++ *
++ * CSB6: `Champion South Bridge' IDE Interface (optional: third channel)
++ *
++ * Documentation:
++ * Available under NDA only. Errata info very hard to get.
++ */
++
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/pci.h>
++#include <linux/blkdev.h>
++#include <linux/delay.h>
++#include <scsi/scsi_host.h>
++#include <linux/libata.h>
++
++#define DRV_NAME "pata_serverworks"
++#define DRV_VERSION "0.4.3"
++
++#define SVWKS_CSB5_REVISION_NEW 0x92 /* min PCI_REVISION_ID for UDMA5 (A2.0) */
++#define SVWKS_CSB6_REVISION 0xa0 /* min PCI_REVISION_ID for UDMA4 (A1.0) */
++
++/* Seagate Barracuda ATA IV Family drives in UDMA mode 5
++ * can overrun their FIFOs when used with the CSB5 */
++
++static const char *csb_bad_ata100[] = {
++ "ST320011A",
++ "ST340016A",
++ "ST360021A",
++ "ST380021A",
++ NULL
++};
++
++/**
++ * oem_cable - Dell/Sun serverworks cable detection
++ * @ap: ATA port to do cable detect
++ *
++ * Dell PowerEdge and Sun Cobalt 'Alpine' hide the 40/80 pin select
++ * for their interfaces in the top two bits of the subsystem ID.
++ */
++
++static int oem_cable(struct ata_port *ap)
++{
++ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
++
++ if (pdev->subsystem_device & (1 << (ap->port_no + 14)))
++ return ATA_CBL_PATA80;
++ return ATA_CBL_PATA40;
++}
++
++struct sv_cable_table {
++ int device;
++ int subvendor;
++ int (*cable_detect)(struct ata_port *ap);
++};
++
++static struct sv_cable_table cable_detect[] = {
++ { PCI_DEVICE_ID_SERVERWORKS_CSB5IDE, PCI_VENDOR_ID_DELL, oem_cable },
++ { PCI_DEVICE_ID_SERVERWORKS_CSB6IDE, PCI_VENDOR_ID_DELL, oem_cable },
++ { PCI_DEVICE_ID_SERVERWORKS_CSB5IDE, PCI_VENDOR_ID_SUN, oem_cable },
++ { PCI_DEVICE_ID_SERVERWORKS_OSB4IDE, PCI_ANY_ID, ata_cable_40wire },
++ { PCI_DEVICE_ID_SERVERWORKS_CSB5IDE, PCI_ANY_ID, ata_cable_unknown },
++ { PCI_DEVICE_ID_SERVERWORKS_CSB6IDE, PCI_ANY_ID, ata_cable_unknown },
++ { PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2, PCI_ANY_ID, ata_cable_unknown },
++ { PCI_DEVICE_ID_SERVERWORKS_HT1000IDE, PCI_ANY_ID, ata_cable_unknown },
++ { }
++};
++
++/**
++ * serverworks_cable_detect - cable detection
++ * @ap: ATA port
++ *
++ * Perform cable detection according to the device and subvendor
++ * identifications
++ */
++
++static int serverworks_cable_detect(struct ata_port *ap)
++{
++ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
++ struct sv_cable_table *cb = cable_detect;
++
++ while(cb->device) {
++ if (cb->device == pdev->device &&
++ (cb->subvendor == pdev->subsystem_vendor ||
++ cb->subvendor == PCI_ANY_ID)) {
++ return cb->cable_detect(ap);
++ }
++ cb++;
++ }
++
++ BUG();
++ return -1; /* kill compiler warning */
++}
++
++/**
++ * serverworks_is_csb - Check for CSB or OSB
++ * @pdev: PCI device to check
++ *
++ * Returns true if the device being checked is known to be a CSB
++ * series device.
++ */
++
++static u8 serverworks_is_csb(struct pci_dev *pdev)
++{
++ switch (pdev->device) {
++ case PCI_DEVICE_ID_SERVERWORKS_CSB5IDE:
++ case PCI_DEVICE_ID_SERVERWORKS_CSB6IDE:
++ case PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2:
++ case PCI_DEVICE_ID_SERVERWORKS_HT1000IDE:
++ return 1;
++ default:
++ break;
++ }
++ return 0;
++}
++
++/**
++ * serverworks_osb4_filter - mode selection filter
++ * @adev: ATA device
++ * @mask: Mask of proposed modes
++ *
++ * Filter the offered modes for the device to apply controller
++ * specific rules. OSB4 requires no UDMA for disks due to a FIFO
++ * bug we hit.
++ */
++
++static unsigned long serverworks_osb4_filter(struct ata_device *adev, unsigned long mask)
++{
++ if (adev->class == ATA_DEV_ATA)
++ mask &= ~ATA_MASK_UDMA;
++ return mask;
++}
++
++
++/**
++ * serverworks_csb_filter - mode selection filter
++ * @adev: ATA device
++ * @mask: Mask of proposed modes
++ *
++ * Check the blacklist and disable UDMA5 if matched
++ */
++
++static unsigned long serverworks_csb_filter(struct ata_device *adev, unsigned long mask)
++{
++ const char *p;
++ char model_num[ATA_ID_PROD_LEN + 1];
++ int i;
++
++ /* Disk, UDMA */
++ if (adev->class != ATA_DEV_ATA)
++ return mask;
++
++ /* Actually do need to check */
++ ata_id_c_string(adev->id, model_num, ATA_ID_PROD, sizeof(model_num));
++
++ for (i = 0; (p = csb_bad_ata100[i]) != NULL; i++) {
++ if (!strcmp(p, model_num))
++ mask &= ~(0xE0 << ATA_SHIFT_UDMA);
++ }
++ return mask;
++}
++
++/**
++ * serverworks_set_piomode - set initial PIO mode data
++ * @ap: ATA interface
++ * @adev: ATA device
++ *
++ * Program the OSB4/CSB5 timing registers for PIO. The PIO register
++ * load is done as a simple lookup.
++ */
++static void serverworks_set_piomode(struct ata_port *ap, struct ata_device *adev)
++{
++ static const u8 pio_mode[] = { 0x5d, 0x47, 0x34, 0x22, 0x20 };
++ int offset = 1 + 2 * ap->port_no - adev->devno;
++ int devbits = (2 * ap->port_no + adev->devno) * 4;
++ u16 csb5_pio;
++ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
++ int pio = adev->pio_mode - XFER_PIO_0;
++
++ pci_write_config_byte(pdev, 0x40 + offset, pio_mode[pio]);
++
++ /* The OSB4 just requires the timing but the CSB series want the
++ mode number as well */
++ if (serverworks_is_csb(pdev)) {
++ pci_read_config_word(pdev, 0x4A, &csb5_pio);
++ csb5_pio &= ~(0x0F << devbits);
++ pci_write_config_word(pdev, 0x4A, csb5_pio | (pio << devbits));
++ }
++}
++
++/**
++ * serverworks_set_dmamode - set initial DMA mode data
++ * @ap: ATA interface
++ * @adev: ATA device
++ *
++ * Program the MWDMA/UDMA modes for the serverworks OSB4/CSB5
++ * chipset. The MWDMA mode values are pulled from a lookup table
++ * while the chipset uses mode number for UDMA.
++ */
++
++static void serverworks_set_dmamode(struct ata_port *ap, struct ata_device *adev)
++{
++ static const u8 dma_mode[] = { 0x77, 0x21, 0x20 };
++ int offset = 1 + 2 * ap->port_no - adev->devno;
++ int devbits = 2 * ap->port_no + adev->devno;
++ u8 ultra;
++ u8 ultra_cfg;
++ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
++
++ pci_read_config_byte(pdev, 0x54, &ultra_cfg);
++ pci_read_config_byte(pdev, 0x56 + ap->port_no, &ultra);
++ ultra &= ~(0x0F << (adev->devno * 4));
++
++ if (adev->dma_mode >= XFER_UDMA_0) {
++ pci_write_config_byte(pdev, 0x44 + offset, 0x20);
++
++ ultra |= (adev->dma_mode - XFER_UDMA_0)
++ << (adev->devno * 4);
++ ultra_cfg |= (1 << devbits);
++ } else {
++ pci_write_config_byte(pdev, 0x44 + offset,
++ dma_mode[adev->dma_mode - XFER_MW_DMA_0]);
++ ultra_cfg &= ~(1 << devbits);
++ }
++ pci_write_config_byte(pdev, 0x56 + ap->port_no, ultra);
++ pci_write_config_byte(pdev, 0x54, ultra_cfg);
++}
++
++static struct scsi_host_template serverworks_sht = {
++ ATA_BMDMA_SHT(DRV_NAME),
++};
++
++static struct ata_port_operations serverworks_osb4_port_ops = {
++ .inherits = &ata_bmdma_port_ops,
++ .cable_detect = serverworks_cable_detect,
++ .mode_filter = serverworks_osb4_filter,
++ .set_piomode = serverworks_set_piomode,
++ .set_dmamode = serverworks_set_dmamode,
++};
++
++static struct ata_port_operations serverworks_csb_port_ops = {
++ .inherits = &serverworks_osb4_port_ops,
++ .mode_filter = serverworks_csb_filter,
++};
++
++static int serverworks_fixup_osb4(struct pci_dev *pdev)
++{
++ u32 reg;
++ struct pci_dev *isa_dev = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
++ PCI_DEVICE_ID_SERVERWORKS_OSB4, NULL);
++ if (isa_dev) {
++ pci_read_config_dword(isa_dev, 0x64, &reg);
++ reg &= ~0x00002000; /* disable 600ns interrupt mask */
++ if (!(reg & 0x00004000))
++ printk(KERN_DEBUG DRV_NAME ": UDMA not BIOS enabled.\n");
++ reg |= 0x00004000; /* enable UDMA/33 support */
++ pci_write_config_dword(isa_dev, 0x64, reg);
++ pci_dev_put(isa_dev);
++ return 0;
++ }
++ printk(KERN_WARNING DRV_NAME ": Unable to find bridge.\n");
++ return -ENODEV;
++}
++
++static int serverworks_fixup_csb(struct pci_dev *pdev)
++{
++ u8 btr;
++
++ /* Third Channel Test */
++ if (!(PCI_FUNC(pdev->devfn) & 1)) {
++ struct pci_dev * findev = NULL;
++ u32 reg4c = 0;
++ findev = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
++ PCI_DEVICE_ID_SERVERWORKS_CSB5, NULL);
++ if (findev) {
++ pci_read_config_dword(findev, 0x4C, &reg4c);
++ reg4c &= ~0x000007FF;
++ reg4c |= 0x00000040;
++ reg4c |= 0x00000020;
++ pci_write_config_dword(findev, 0x4C, reg4c);
++ pci_dev_put(findev);
++ }
++ } else {
++ struct pci_dev * findev = NULL;
++ u8 reg41 = 0;
++
++ findev = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
++ PCI_DEVICE_ID_SERVERWORKS_CSB6, NULL);
++ if (findev) {
++ pci_read_config_byte(findev, 0x41, &reg41);
++ reg41 &= ~0x40;
++ pci_write_config_byte(findev, 0x41, reg41);
++ pci_dev_put(findev);
++ }
++ }
++ /* setup the UDMA Control register
++ *
++ * 1. clear bit 6 to enable DMA
++ * 2. enable DMA modes with bits 0-1
++ * 00 : legacy
++ * 01 : udma2
++ * 10 : udma2/udma4
++ * 11 : udma2/udma4/udma5
++ */
++ pci_read_config_byte(pdev, 0x5A, &btr);
++ btr &= ~0x40;
++ if (!(PCI_FUNC(pdev->devfn) & 1))
++ btr |= 0x2;
++ else
++ btr |= (pdev->revision >= SVWKS_CSB5_REVISION_NEW) ? 0x3 : 0x2;
++ pci_write_config_byte(pdev, 0x5A, btr);
++
++ return btr;
++}
++
++static void serverworks_fixup_ht1000(struct pci_dev *pdev)
++{
++ u8 btr;
++ /* Setup HT1000 SouthBridge Controller - Single Channel Only */
++ pci_read_config_byte(pdev, 0x5A, &btr);
++ btr &= ~0x40;
++ btr |= 0x3;
++ pci_write_config_byte(pdev, 0x5A, btr);
++}
++
++static int serverworks_fixup(struct pci_dev *pdev)
++{
++ int rc = 0;
++
++ /* Force master latency timer to 64 PCI clocks */
++ pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x40);
++
++ switch (pdev->device) {
++ case PCI_DEVICE_ID_SERVERWORKS_OSB4IDE:
++ rc = serverworks_fixup_osb4(pdev);
++ break;
++ case PCI_DEVICE_ID_SERVERWORKS_CSB5IDE:
++ ata_pci_bmdma_clear_simplex(pdev);
++ /* fall through */
++ case PCI_DEVICE_ID_SERVERWORKS_CSB6IDE:
++ case PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2:
++ rc = serverworks_fixup_csb(pdev);
++ break;
++ case PCI_DEVICE_ID_SERVERWORKS_HT1000IDE:
++ serverworks_fixup_ht1000(pdev);
++ break;
++ }
++
++ return rc;
++}
++
++static int serverworks_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
++{
++ static const struct ata_port_info info[4] = {
++ { /* OSB4 */
++ .flags = ATA_FLAG_SLAVE_POSS,
++ .pio_mask = ATA_PIO4,
++ .mwdma_mask = ATA_MWDMA2,
++ .udma_mask = ATA_UDMA2,
++ .port_ops = &serverworks_osb4_port_ops
++ }, { /* OSB4 no UDMA */
++ .flags = ATA_FLAG_SLAVE_POSS,
++ .pio_mask = ATA_PIO4,
++ .mwdma_mask = ATA_MWDMA2,
++ /* No UDMA */
++ .port_ops = &serverworks_osb4_port_ops
++ }, { /* CSB5 */
++ .flags = ATA_FLAG_SLAVE_POSS,
++ .pio_mask = ATA_PIO4,
++ .mwdma_mask = ATA_MWDMA2,
++ .udma_mask = ATA_UDMA4,
++ .port_ops = &serverworks_csb_port_ops
++ }, { /* CSB5 - later revisions*/
++ .flags = ATA_FLAG_SLAVE_POSS,
++ .pio_mask = ATA_PIO4,
++ .mwdma_mask = ATA_MWDMA2,
++ .udma_mask = ATA_UDMA5,
++ .port_ops = &serverworks_csb_port_ops
++ }
++ };
++ const struct ata_port_info *ppi[] = { &info[id->driver_data], NULL };
++ int rc;
++
++ rc = pcim_enable_device(pdev);
++ if (rc)
++ return rc;
++
++ rc = serverworks_fixup(pdev);
++
++ /* OSB4 : South Bridge and IDE */
++ if (pdev->device == PCI_DEVICE_ID_SERVERWORKS_OSB4IDE) {
++ /* Select non UDMA capable OSB4 if we can't do fixups */
++ if (rc < 0)
++ ppi[0] = &info[1];
++ }
++ /* setup CSB5/CSB6 : South Bridge and IDE option RAID */
++ else if ((pdev->device == PCI_DEVICE_ID_SERVERWORKS_CSB5IDE) ||
++ (pdev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE) ||
++ (pdev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2)) {
++
++ /* If the returned btr is the newer revision then
++ select the right info block */
++ if (rc == 3)
++ ppi[0] = &info[3];
++
++ /* Is this the 3rd channel CSB6 IDE ? */
++ if (pdev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2)
++ ppi[1] = &ata_dummy_port_info;
++ }
++
++ return ata_pci_bmdma_init_one(pdev, ppi, &serverworks_sht, NULL, 0);
++}
++
++#ifdef CONFIG_PM
++static int serverworks_reinit_one(struct pci_dev *pdev)
++{
++ struct ata_host *host = pci_get_drvdata(pdev);
++ int rc;
++
++ rc = ata_pci_device_do_resume(pdev);
++ if (rc)
++ return rc;
++
++ (void)serverworks_fixup(pdev);
++
++ ata_host_resume(host);
++ return 0;
++}
++#endif
++
++static const struct pci_device_id serverworks[] = {
++ { PCI_VDEVICE(SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_OSB4IDE), 0},
++ { PCI_VDEVICE(SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB5IDE), 2},
++ { PCI_VDEVICE(SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB6IDE), 2},
++ { PCI_VDEVICE(SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2), 2},
++ { PCI_VDEVICE(SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000IDE), 2},
++
++ { },
++};
++
++static struct pci_driver serverworks_pci_driver = {
++ .name = DRV_NAME,
++ .id_table = serverworks,
++ .probe = serverworks_init_one,
++ .remove = ata_pci_remove_one,
++#ifdef CONFIG_PM
++ .suspend = ata_pci_device_suspend,
++ .resume = serverworks_reinit_one,
++#endif
++};
++
++module_pci_driver(serverworks_pci_driver);
++
++MODULE_AUTHOR("Alan Cox");
++MODULE_DESCRIPTION("low-level driver for Serverworks OSB4/CSB5/CSB6");
++MODULE_LICENSE("GPL");
++MODULE_DEVICE_TABLE(pci, serverworks);
++MODULE_VERSION(DRV_VERSION);
+diff -Nur linux-3.14.36/drivers/ata/pata_sil680.c linux-openelec/drivers/ata/pata_sil680.c
+--- linux-3.14.36/drivers/ata/pata_sil680.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/ata/pata_sil680.c 2015-05-06 12:05:42.000000000 -0500
+@@ -25,7 +25,6 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <scsi/scsi_host.h>
+diff -Nur linux-3.14.36/drivers/ata/pata_sis.c linux-openelec/drivers/ata/pata_sis.c
+--- linux-3.14.36/drivers/ata/pata_sis.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/ata/pata_sis.c 2015-05-06 12:05:42.000000000 -0500
+@@ -26,7 +26,6 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <linux/device.h>
+diff -Nur linux-3.14.36/drivers/ata/pata_sl82c105.c linux-openelec/drivers/ata/pata_sl82c105.c
+--- linux-3.14.36/drivers/ata/pata_sl82c105.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/ata/pata_sl82c105.c 2015-05-06 12:05:42.000000000 -0500
+@@ -19,7 +19,6 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <scsi/scsi_host.h>
+diff -Nur linux-3.14.36/drivers/ata/pata_triflex.c linux-openelec/drivers/ata/pata_triflex.c
+--- linux-3.14.36/drivers/ata/pata_triflex.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/ata/pata_triflex.c 2015-05-06 12:05:42.000000000 -0500
+@@ -36,7 +36,6 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <scsi/scsi_host.h>
+diff -Nur linux-3.14.36/drivers/ata/pata_via.c linux-openelec/drivers/ata/pata_via.c
+--- linux-3.14.36/drivers/ata/pata_via.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/ata/pata_via.c 2015-05-06 12:05:42.000000000 -0500
+@@ -55,7 +55,6 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <linux/gfp.h>
+diff -Nur linux-3.14.36/drivers/ata/pdc_adma.c linux-openelec/drivers/ata/pdc_adma.c
+--- linux-3.14.36/drivers/ata/pdc_adma.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/ata/pdc_adma.c 2015-05-06 12:05:42.000000000 -0500
+@@ -36,7 +36,6 @@
+ #include <linux/module.h>
+ #include <linux/gfp.h>
+ #include <linux/pci.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <linux/interrupt.h>
+diff -Nur linux-3.14.36/drivers/ata/sata_dwc_460ex.c linux-openelec/drivers/ata/sata_dwc_460ex.c
+--- linux-3.14.36/drivers/ata/sata_dwc_460ex.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/ata/sata_dwc_460ex.c 2015-07-24 18:03:29.512842002 -0500
+@@ -29,7 +29,6 @@
+
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+-#include <linux/init.h>
+ #include <linux/device.h>
+ #include <linux/of_address.h>
+ #include <linux/of_irq.h>
+diff -Nur linux-3.14.36/drivers/ata/sata_dwc_460ex.c.orig linux-openelec/drivers/ata/sata_dwc_460ex.c.orig
+--- linux-3.14.36/drivers/ata/sata_dwc_460ex.c.orig 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/ata/sata_dwc_460ex.c.orig 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,1823 @@
++/*
++ * drivers/ata/sata_dwc_460ex.c
++ *
++ * Synopsys DesignWare Cores (DWC) SATA host driver
++ *
++ * Author: Mark Miesfeld <mmiesfeld@amcc.com>
++ *
++ * Ported from 2.6.19.2 to 2.6.25/26 by Stefan Roese <sr@denx.de>
++ * Copyright 2008 DENX Software Engineering
++ *
++ * Based on versions provided by AMCC and Synopsys which are:
++ * Copyright 2006 Applied Micro Circuits Corporation
++ * COPYRIGHT (C) 2005 SYNOPSYS, INC. ALL RIGHTS RESERVED
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License as published by the
++ * Free Software Foundation; either version 2 of the License, or (at your
++ * option) any later version.
++ */
++
++#ifdef CONFIG_SATA_DWC_DEBUG
++#define DEBUG
++#endif
++
++#ifdef CONFIG_SATA_DWC_VDEBUG
++#define VERBOSE_DEBUG
++#define DEBUG_NCQ
++#endif
++
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/device.h>
++#include <linux/of_address.h>
++#include <linux/of_irq.h>
++#include <linux/of_platform.h>
++#include <linux/platform_device.h>
++#include <linux/libata.h>
++#include <linux/slab.h>
++#include "libata.h"
++
++#include <scsi/scsi_host.h>
++#include <scsi/scsi_cmnd.h>
++
++/* These two are defined in "libata.h" */
++#undef DRV_NAME
++#undef DRV_VERSION
++
++#define DRV_NAME "sata-dwc"
++#define DRV_VERSION "1.3"
++
++/* SATA DMA driver Globals */
++#define DMA_NUM_CHANS 1
++#define DMA_NUM_CHAN_REGS 8
++
++/* SATA DMA Register definitions */
++#define AHB_DMA_BRST_DFLT 64 /* 16 data items burst length*/
++
++struct dmareg {
++ u32 low; /* Low bits 0-31 */
++ u32 high; /* High bits 32-63 */
++};
++
++/* DMA Per Channel registers */
++struct dma_chan_regs {
++ struct dmareg sar; /* Source Address */
++ struct dmareg dar; /* Destination address */
++ struct dmareg llp; /* Linked List Pointer */
++ struct dmareg ctl; /* Control */
++ struct dmareg sstat; /* Source Status not implemented in core */
++ struct dmareg dstat; /* Destination Status not implemented in core*/
++ struct dmareg sstatar; /* Source Status Address not impl in core */
++ struct dmareg dstatar; /* Destination Status Address not implemente */
++ struct dmareg cfg; /* Config */
++ struct dmareg sgr; /* Source Gather */
++ struct dmareg dsr; /* Destination Scatter */
++};
++
++/* Generic Interrupt Registers */
++struct dma_interrupt_regs {
++ struct dmareg tfr; /* Transfer Interrupt */
++ struct dmareg block; /* Block Interrupt */
++ struct dmareg srctran; /* Source Transfer Interrupt */
++ struct dmareg dsttran; /* Dest Transfer Interrupt */
++ struct dmareg error; /* Error */
++};
++
++struct ahb_dma_regs {
++ struct dma_chan_regs chan_regs[DMA_NUM_CHAN_REGS];
++ struct dma_interrupt_regs interrupt_raw; /* Raw Interrupt */
++ struct dma_interrupt_regs interrupt_status; /* Interrupt Status */
++ struct dma_interrupt_regs interrupt_mask; /* Interrupt Mask */
++ struct dma_interrupt_regs interrupt_clear; /* Interrupt Clear */
++ struct dmareg statusInt; /* Interrupt combined*/
++ struct dmareg rq_srcreg; /* Src Trans Req */
++ struct dmareg rq_dstreg; /* Dst Trans Req */
++ struct dmareg rq_sgl_srcreg; /* Sngl Src Trans Req*/
++ struct dmareg rq_sgl_dstreg; /* Sngl Dst Trans Req*/
++ struct dmareg rq_lst_srcreg; /* Last Src Trans Req*/
++ struct dmareg rq_lst_dstreg; /* Last Dst Trans Req*/
++ struct dmareg dma_cfg; /* DMA Config */
++ struct dmareg dma_chan_en; /* DMA Channel Enable*/
++ struct dmareg dma_id; /* DMA ID */
++ struct dmareg dma_test; /* DMA Test */
++ struct dmareg res1; /* reserved */
++ struct dmareg res2; /* reserved */
++ /*
++ * DMA Comp Params
++ * Param 6 = dma_param[0], Param 5 = dma_param[1],
++ * Param 4 = dma_param[2] ...
++ */
++ struct dmareg dma_params[6];
++};
++
++/* Data structure for linked list item */
++struct lli {
++ u32 sar; /* Source Address */
++ u32 dar; /* Destination address */
++ u32 llp; /* Linked List Pointer */
++ struct dmareg ctl; /* Control */
++ struct dmareg dstat; /* Destination Status */
++};
++
++enum {
++ SATA_DWC_DMAC_LLI_SZ = (sizeof(struct lli)),
++ SATA_DWC_DMAC_LLI_NUM = 256,
++ SATA_DWC_DMAC_LLI_TBL_SZ = (SATA_DWC_DMAC_LLI_SZ * \
++ SATA_DWC_DMAC_LLI_NUM),
++ SATA_DWC_DMAC_TWIDTH_BYTES = 4,
++ SATA_DWC_DMAC_CTRL_TSIZE_MAX = (0x00000800 * \
++ SATA_DWC_DMAC_TWIDTH_BYTES),
++};
++
++/* DMA Register Operation Bits */
++enum {
++ DMA_EN = 0x00000001, /* Enable AHB DMA */
++ DMA_CTL_LLP_SRCEN = 0x10000000, /* Blk chain enable Src */
++ DMA_CTL_LLP_DSTEN = 0x08000000, /* Blk chain enable Dst */
++};
++
++#define DMA_CTL_BLK_TS(size) ((size) & 0x000000FFF) /* Blk Transfer size */
++#define DMA_CHANNEL(ch) (0x00000001 << (ch)) /* Select channel */
++ /* Enable channel */
++#define DMA_ENABLE_CHAN(ch) ((0x00000001 << (ch)) | \
++ ((0x000000001 << (ch)) << 8))
++ /* Disable channel */
++#define DMA_DISABLE_CHAN(ch) (0x00000000 | ((0x000000001 << (ch)) << 8))
++ /* Transfer Type & Flow Controller */
++#define DMA_CTL_TTFC(type) (((type) & 0x7) << 20)
++#define DMA_CTL_SMS(num) (((num) & 0x3) << 25) /* Src Master Select */
++#define DMA_CTL_DMS(num) (((num) & 0x3) << 23)/* Dst Master Select */
++ /* Src Burst Transaction Length */
++#define DMA_CTL_SRC_MSIZE(size) (((size) & 0x7) << 14)
++ /* Dst Burst Transaction Length */
++#define DMA_CTL_DST_MSIZE(size) (((size) & 0x7) << 11)
++ /* Source Transfer Width */
++#define DMA_CTL_SRC_TRWID(size) (((size) & 0x7) << 4)
++ /* Destination Transfer Width */
++#define DMA_CTL_DST_TRWID(size) (((size) & 0x7) << 1)
++
++/* Assign HW handshaking interface (x) to destination / source peripheral */
++#define DMA_CFG_HW_HS_DEST(int_num) (((int_num) & 0xF) << 11)
++#define DMA_CFG_HW_HS_SRC(int_num) (((int_num) & 0xF) << 7)
++#define DMA_CFG_HW_CH_PRIOR(int_num) (((int_num) & 0xF) << 5)
++#define DMA_LLP_LMS(addr, master) (((addr) & 0xfffffffc) | (master))
++
++/*
++ * This define is used to set block chaining disabled in the control low
++ * register. It is already in little endian format so it can be &'d dirctly.
++ * It is essentially: cpu_to_le32(~(DMA_CTL_LLP_SRCEN | DMA_CTL_LLP_DSTEN))
++ */
++enum {
++ DMA_CTL_LLP_DISABLE_LE32 = 0xffffffe7,
++ DMA_CTL_TTFC_P2M_DMAC = 0x00000002, /* Per to mem, DMAC cntr */
++ DMA_CTL_TTFC_M2P_PER = 0x00000003, /* Mem to per, peripheral cntr */
++ DMA_CTL_SINC_INC = 0x00000000, /* Source Address Increment */
++ DMA_CTL_SINC_DEC = 0x00000200,
++ DMA_CTL_SINC_NOCHANGE = 0x00000400,
++ DMA_CTL_DINC_INC = 0x00000000, /* Destination Address Increment */
++ DMA_CTL_DINC_DEC = 0x00000080,
++ DMA_CTL_DINC_NOCHANGE = 0x00000100,
++ DMA_CTL_INT_EN = 0x00000001, /* Interrupt Enable */
++
++/* Channel Configuration Register high bits */
++ DMA_CFG_FCMOD_REQ = 0x00000001, /* Flow Control - request based */
++ DMA_CFG_PROTCTL = (0x00000003 << 2),/* Protection Control */
++
++/* Channel Configuration Register low bits */
++ DMA_CFG_RELD_DST = 0x80000000, /* Reload Dest / Src Addr */
++ DMA_CFG_RELD_SRC = 0x40000000,
++ DMA_CFG_HS_SELSRC = 0x00000800, /* Software handshake Src/ Dest */
++ DMA_CFG_HS_SELDST = 0x00000400,
++ DMA_CFG_FIFOEMPTY = (0x00000001 << 9), /* FIFO Empty bit */
++
++/* Channel Linked List Pointer Register */
++ DMA_LLP_AHBMASTER1 = 0, /* List Master Select */
++ DMA_LLP_AHBMASTER2 = 1,
++
++ SATA_DWC_MAX_PORTS = 1,
++
++ SATA_DWC_SCR_OFFSET = 0x24,
++ SATA_DWC_REG_OFFSET = 0x64,
++};
++
++/* DWC SATA Registers */
++struct sata_dwc_regs {
++ u32 fptagr; /* 1st party DMA tag */
++ u32 fpbor; /* 1st party DMA buffer offset */
++ u32 fptcr; /* 1st party DMA Xfr count */
++ u32 dmacr; /* DMA Control */
++ u32 dbtsr; /* DMA Burst Transac size */
++ u32 intpr; /* Interrupt Pending */
++ u32 intmr; /* Interrupt Mask */
++ u32 errmr; /* Error Mask */
++ u32 llcr; /* Link Layer Control */
++ u32 phycr; /* PHY Control */
++ u32 physr; /* PHY Status */
++ u32 rxbistpd; /* Recvd BIST pattern def register */
++ u32 rxbistpd1; /* Recvd BIST data dword1 */
++ u32 rxbistpd2; /* Recvd BIST pattern data dword2 */
++ u32 txbistpd; /* Trans BIST pattern def register */
++ u32 txbistpd1; /* Trans BIST data dword1 */
++ u32 txbistpd2; /* Trans BIST data dword2 */
++ u32 bistcr; /* BIST Control Register */
++ u32 bistfctr; /* BIST FIS Count Register */
++ u32 bistsr; /* BIST Status Register */
++ u32 bistdecr; /* BIST Dword Error count register */
++ u32 res[15]; /* Reserved locations */
++ u32 testr; /* Test Register */
++ u32 versionr; /* Version Register */
++ u32 idr; /* ID Register */
++ u32 unimpl[192]; /* Unimplemented */
++ u32 dmadr[256]; /* FIFO Locations in DMA Mode */
++};
++
++enum {
++ SCR_SCONTROL_DET_ENABLE = 0x00000001,
++ SCR_SSTATUS_DET_PRESENT = 0x00000001,
++ SCR_SERROR_DIAG_X = 0x04000000,
++/* DWC SATA Register Operations */
++ SATA_DWC_TXFIFO_DEPTH = 0x01FF,
++ SATA_DWC_RXFIFO_DEPTH = 0x01FF,
++ SATA_DWC_DMACR_TMOD_TXCHEN = 0x00000004,
++ SATA_DWC_DMACR_TXCHEN = (0x00000001 | SATA_DWC_DMACR_TMOD_TXCHEN),
++ SATA_DWC_DMACR_RXCHEN = (0x00000002 | SATA_DWC_DMACR_TMOD_TXCHEN),
++ SATA_DWC_DMACR_TXRXCH_CLEAR = SATA_DWC_DMACR_TMOD_TXCHEN,
++ SATA_DWC_INTPR_DMAT = 0x00000001,
++ SATA_DWC_INTPR_NEWFP = 0x00000002,
++ SATA_DWC_INTPR_PMABRT = 0x00000004,
++ SATA_DWC_INTPR_ERR = 0x00000008,
++ SATA_DWC_INTPR_NEWBIST = 0x00000010,
++ SATA_DWC_INTPR_IPF = 0x10000000,
++ SATA_DWC_INTMR_DMATM = 0x00000001,
++ SATA_DWC_INTMR_NEWFPM = 0x00000002,
++ SATA_DWC_INTMR_PMABRTM = 0x00000004,
++ SATA_DWC_INTMR_ERRM = 0x00000008,
++ SATA_DWC_INTMR_NEWBISTM = 0x00000010,
++ SATA_DWC_LLCR_SCRAMEN = 0x00000001,
++ SATA_DWC_LLCR_DESCRAMEN = 0x00000002,
++ SATA_DWC_LLCR_RPDEN = 0x00000004,
++/* This is all error bits, zero's are reserved fields. */
++ SATA_DWC_SERROR_ERR_BITS = 0x0FFF0F03
++};
++
++#define SATA_DWC_SCR0_SPD_GET(v) (((v) >> 4) & 0x0000000F)
++#define SATA_DWC_DMACR_TX_CLEAR(v) (((v) & ~SATA_DWC_DMACR_TXCHEN) |\
++ SATA_DWC_DMACR_TMOD_TXCHEN)
++#define SATA_DWC_DMACR_RX_CLEAR(v) (((v) & ~SATA_DWC_DMACR_RXCHEN) |\
++ SATA_DWC_DMACR_TMOD_TXCHEN)
++#define SATA_DWC_DBTSR_MWR(size) (((size)/4) & SATA_DWC_TXFIFO_DEPTH)
++#define SATA_DWC_DBTSR_MRD(size) ((((size)/4) & SATA_DWC_RXFIFO_DEPTH)\
++ << 16)
++struct sata_dwc_device {
++ struct device *dev; /* generic device struct */
++ struct ata_probe_ent *pe; /* ptr to probe-ent */
++ struct ata_host *host;
++ u8 *reg_base;
++ struct sata_dwc_regs *sata_dwc_regs; /* DW Synopsys SATA specific */
++ int irq_dma;
++};
++
++#define SATA_DWC_QCMD_MAX 32
++
++struct sata_dwc_device_port {
++ struct sata_dwc_device *hsdev;
++ int cmd_issued[SATA_DWC_QCMD_MAX];
++ struct lli *llit[SATA_DWC_QCMD_MAX]; /* DMA LLI table */
++ dma_addr_t llit_dma[SATA_DWC_QCMD_MAX];
++ u32 dma_chan[SATA_DWC_QCMD_MAX];
++ int dma_pending[SATA_DWC_QCMD_MAX];
++};
++
++/*
++ * Commonly used DWC SATA driver Macros
++ */
++#define HSDEV_FROM_HOST(host) ((struct sata_dwc_device *)\
++ (host)->private_data)
++#define HSDEV_FROM_AP(ap) ((struct sata_dwc_device *)\
++ (ap)->host->private_data)
++#define HSDEVP_FROM_AP(ap) ((struct sata_dwc_device_port *)\
++ (ap)->private_data)
++#define HSDEV_FROM_QC(qc) ((struct sata_dwc_device *)\
++ (qc)->ap->host->private_data)
++#define HSDEV_FROM_HSDEVP(p) ((struct sata_dwc_device *)\
++ (hsdevp)->hsdev)
++
++enum {
++ SATA_DWC_CMD_ISSUED_NOT = 0,
++ SATA_DWC_CMD_ISSUED_PEND = 1,
++ SATA_DWC_CMD_ISSUED_EXEC = 2,
++ SATA_DWC_CMD_ISSUED_NODATA = 3,
++
++ SATA_DWC_DMA_PENDING_NONE = 0,
++ SATA_DWC_DMA_PENDING_TX = 1,
++ SATA_DWC_DMA_PENDING_RX = 2,
++};
++
++struct sata_dwc_host_priv {
++ void __iomem *scr_addr_sstatus;
++ u32 sata_dwc_sactive_issued ;
++ u32 sata_dwc_sactive_queued ;
++ u32 dma_interrupt_count;
++ struct ahb_dma_regs *sata_dma_regs;
++ struct device *dwc_dev;
++ int dma_channel;
++};
++struct sata_dwc_host_priv host_pvt;
++/*
++ * Prototypes
++ */
++static void sata_dwc_bmdma_start_by_tag(struct ata_queued_cmd *qc, u8 tag);
++static int sata_dwc_qc_complete(struct ata_port *ap, struct ata_queued_cmd *qc,
++ u32 check_status);
++static void sata_dwc_dma_xfer_complete(struct ata_port *ap, u32 check_status);
++static void sata_dwc_port_stop(struct ata_port *ap);
++static void sata_dwc_clear_dmacr(struct sata_dwc_device_port *hsdevp, u8 tag);
++static int dma_dwc_init(struct sata_dwc_device *hsdev, int irq);
++static void dma_dwc_exit(struct sata_dwc_device *hsdev);
++static int dma_dwc_xfer_setup(struct scatterlist *sg, int num_elems,
++ struct lli *lli, dma_addr_t dma_lli,
++ void __iomem *addr, int dir);
++static void dma_dwc_xfer_start(int dma_ch);
++
++static const char *get_prot_descript(u8 protocol)
++{
++ switch ((enum ata_tf_protocols)protocol) {
++ case ATA_PROT_NODATA:
++ return "ATA no data";
++ case ATA_PROT_PIO:
++ return "ATA PIO";
++ case ATA_PROT_DMA:
++ return "ATA DMA";
++ case ATA_PROT_NCQ:
++ return "ATA NCQ";
++ case ATAPI_PROT_NODATA:
++ return "ATAPI no data";
++ case ATAPI_PROT_PIO:
++ return "ATAPI PIO";
++ case ATAPI_PROT_DMA:
++ return "ATAPI DMA";
++ default:
++ return "unknown";
++ }
++}
++
++static const char *get_dma_dir_descript(int dma_dir)
++{
++ switch ((enum dma_data_direction)dma_dir) {
++ case DMA_BIDIRECTIONAL:
++ return "bidirectional";
++ case DMA_TO_DEVICE:
++ return "to device";
++ case DMA_FROM_DEVICE:
++ return "from device";
++ default:
++ return "none";
++ }
++}
++
++static void sata_dwc_tf_dump(struct ata_taskfile *tf)
++{
++ dev_vdbg(host_pvt.dwc_dev, "taskfile cmd: 0x%02x protocol: %s flags:"
++ "0x%lx device: %x\n", tf->command,
++ get_prot_descript(tf->protocol), tf->flags, tf->device);
++ dev_vdbg(host_pvt.dwc_dev, "feature: 0x%02x nsect: 0x%x lbal: 0x%x "
++ "lbam: 0x%x lbah: 0x%x\n", tf->feature, tf->nsect, tf->lbal,
++ tf->lbam, tf->lbah);
++ dev_vdbg(host_pvt.dwc_dev, "hob_feature: 0x%02x hob_nsect: 0x%x "
++ "hob_lbal: 0x%x hob_lbam: 0x%x hob_lbah: 0x%x\n",
++ tf->hob_feature, tf->hob_nsect, tf->hob_lbal, tf->hob_lbam,
++ tf->hob_lbah);
++}
++
++/*
++ * Function: get_burst_length_encode
++ * arguments: datalength: length in bytes of data
++ * returns value to be programmed in register corresponding to data length
++ * This value is effectively the log(base 2) of the length
++ */
++static int get_burst_length_encode(int datalength)
++{
++ int items = datalength >> 2; /* div by 4 to get lword count */
++
++ if (items >= 64)
++ return 5;
++
++ if (items >= 32)
++ return 4;
++
++ if (items >= 16)
++ return 3;
++
++ if (items >= 8)
++ return 2;
++
++ if (items >= 4)
++ return 1;
++
++ return 0;
++}
++
++static void clear_chan_interrupts(int c)
++{
++ out_le32(&(host_pvt.sata_dma_regs->interrupt_clear.tfr.low),
++ DMA_CHANNEL(c));
++ out_le32(&(host_pvt.sata_dma_regs->interrupt_clear.block.low),
++ DMA_CHANNEL(c));
++ out_le32(&(host_pvt.sata_dma_regs->interrupt_clear.srctran.low),
++ DMA_CHANNEL(c));
++ out_le32(&(host_pvt.sata_dma_regs->interrupt_clear.dsttran.low),
++ DMA_CHANNEL(c));
++ out_le32(&(host_pvt.sata_dma_regs->interrupt_clear.error.low),
++ DMA_CHANNEL(c));
++}
++
++/*
++ * Function: dma_request_channel
++ * arguments: None
++ * returns channel number if available else -1
++ * This function assigns the next available DMA channel from the list to the
++ * requester
++ */
++static int dma_request_channel(void)
++{
++ /* Check if the channel is not currently in use */
++ if (!(in_le32(&(host_pvt.sata_dma_regs->dma_chan_en.low)) &
++ DMA_CHANNEL(host_pvt.dma_channel)))
++ return host_pvt.dma_channel;
++ dev_err(host_pvt.dwc_dev, "%s Channel %d is currently in use\n",
++ __func__, host_pvt.dma_channel);
++ return -1;
++}
++
++/*
++ * Function: dma_dwc_interrupt
++ * arguments: irq, dev_id, pt_regs
++ * returns channel number if available else -1
++ * Interrupt Handler for DW AHB SATA DMA
++ */
++static irqreturn_t dma_dwc_interrupt(int irq, void *hsdev_instance)
++{
++ int chan;
++ u32 tfr_reg, err_reg;
++ unsigned long flags;
++ struct sata_dwc_device *hsdev =
++ (struct sata_dwc_device *)hsdev_instance;
++ struct ata_host *host = (struct ata_host *)hsdev->host;
++ struct ata_port *ap;
++ struct sata_dwc_device_port *hsdevp;
++ u8 tag = 0;
++ unsigned int port = 0;
++
++ spin_lock_irqsave(&host->lock, flags);
++ ap = host->ports[port];
++ hsdevp = HSDEVP_FROM_AP(ap);
++ tag = ap->link.active_tag;
++
++ tfr_reg = in_le32(&(host_pvt.sata_dma_regs->interrupt_status.tfr\
++ .low));
++ err_reg = in_le32(&(host_pvt.sata_dma_regs->interrupt_status.error\
++ .low));
++
++ dev_dbg(ap->dev, "eot=0x%08x err=0x%08x pending=%d active port=%d\n",
++ tfr_reg, err_reg, hsdevp->dma_pending[tag], port);
++
++ chan = host_pvt.dma_channel;
++ if (chan >= 0) {
++ /* Check for end-of-transfer interrupt. */
++ if (tfr_reg & DMA_CHANNEL(chan)) {
++ /*
++ * Each DMA command produces 2 interrupts. Only
++ * complete the command after both interrupts have been
++ * seen. (See sata_dwc_isr())
++ */
++ host_pvt.dma_interrupt_count++;
++ sata_dwc_clear_dmacr(hsdevp, tag);
++
++ if (hsdevp->dma_pending[tag] ==
++ SATA_DWC_DMA_PENDING_NONE) {
++ dev_err(ap->dev, "DMA not pending eot=0x%08x "
++ "err=0x%08x tag=0x%02x pending=%d\n",
++ tfr_reg, err_reg, tag,
++ hsdevp->dma_pending[tag]);
++ }
++
++ if ((host_pvt.dma_interrupt_count % 2) == 0)
++ sata_dwc_dma_xfer_complete(ap, 1);
++
++ /* Clear the interrupt */
++ out_le32(&(host_pvt.sata_dma_regs->interrupt_clear\
++ .tfr.low),
++ DMA_CHANNEL(chan));
++ }
++
++ /* Check for error interrupt. */
++ if (err_reg & DMA_CHANNEL(chan)) {
++ /* TODO Need error handler ! */
++ dev_err(ap->dev, "error interrupt err_reg=0x%08x\n",
++ err_reg);
++
++ /* Clear the interrupt. */
++ out_le32(&(host_pvt.sata_dma_regs->interrupt_clear\
++ .error.low),
++ DMA_CHANNEL(chan));
++ }
++ }
++ spin_unlock_irqrestore(&host->lock, flags);
++ return IRQ_HANDLED;
++}
++
++/*
++ * Function: dma_request_interrupts
++ * arguments: hsdev
++ * returns status
++ * This function registers ISR for a particular DMA channel interrupt
++ */
++static int dma_request_interrupts(struct sata_dwc_device *hsdev, int irq)
++{
++ int retval = 0;
++ int chan = host_pvt.dma_channel;
++
++ if (chan >= 0) {
++ /* Unmask error interrupt */
++ out_le32(&(host_pvt.sata_dma_regs)->interrupt_mask.error.low,
++ DMA_ENABLE_CHAN(chan));
++
++ /* Unmask end-of-transfer interrupt */
++ out_le32(&(host_pvt.sata_dma_regs)->interrupt_mask.tfr.low,
++ DMA_ENABLE_CHAN(chan));
++ }
++
++ retval = request_irq(irq, dma_dwc_interrupt, 0, "SATA DMA", hsdev);
++ if (retval) {
++ dev_err(host_pvt.dwc_dev, "%s: could not get IRQ %d\n",
++ __func__, irq);
++ return -ENODEV;
++ }
++
++ /* Mark this interrupt as requested */
++ hsdev->irq_dma = irq;
++ return 0;
++}
++
++/*
++ * Function: map_sg_to_lli
++ * The Synopsis driver has a comment proposing that better performance
++ * is possible by only enabling interrupts on the last item in the linked list.
++ * However, it seems that could be a problem if an error happened on one of the
++ * first items. The transfer would halt, but no error interrupt would occur.
++ * Currently this function sets interrupts enabled for each linked list item:
++ * DMA_CTL_INT_EN.
++ */
++static int map_sg_to_lli(struct scatterlist *sg, int num_elems,
++ struct lli *lli, dma_addr_t dma_lli,
++ void __iomem *dmadr_addr, int dir)
++{
++ int i, idx = 0;
++ int fis_len = 0;
++ dma_addr_t next_llp;
++ int bl;
++ int sms_val, dms_val;
++
++ sms_val = 0;
++ dms_val = 1 + host_pvt.dma_channel;
++ dev_dbg(host_pvt.dwc_dev, "%s: sg=%p nelem=%d lli=%p dma_lli=0x%08x"
++ " dmadr=0x%08x\n", __func__, sg, num_elems, lli, (u32)dma_lli,
++ (u32)dmadr_addr);
++
++ bl = get_burst_length_encode(AHB_DMA_BRST_DFLT);
++
++ for (i = 0; i < num_elems; i++, sg++) {
++ u32 addr, offset;
++ u32 sg_len, len;
++
++ addr = (u32) sg_dma_address(sg);
++ sg_len = sg_dma_len(sg);
++
++ dev_dbg(host_pvt.dwc_dev, "%s: elem=%d sg_addr=0x%x sg_len"
++ "=%d\n", __func__, i, addr, sg_len);
++
++ while (sg_len) {
++ if (idx >= SATA_DWC_DMAC_LLI_NUM) {
++ /* The LLI table is not large enough. */
++ dev_err(host_pvt.dwc_dev, "LLI table overrun "
++ "(idx=%d)\n", idx);
++ break;
++ }
++ len = (sg_len > SATA_DWC_DMAC_CTRL_TSIZE_MAX) ?
++ SATA_DWC_DMAC_CTRL_TSIZE_MAX : sg_len;
++
++ offset = addr & 0xffff;
++ if ((offset + sg_len) > 0x10000)
++ len = 0x10000 - offset;
++
++ /*
++ * Make sure a LLI block is not created that will span
++ * 8K max FIS boundary. If the block spans such a FIS
++ * boundary, there is a chance that a DMA burst will
++ * cross that boundary -- this results in an error in
++ * the host controller.
++ */
++ if (fis_len + len > 8192) {
++ dev_dbg(host_pvt.dwc_dev, "SPLITTING: fis_len="
++ "%d(0x%x) len=%d(0x%x)\n", fis_len,
++ fis_len, len, len);
++ len = 8192 - fis_len;
++ fis_len = 0;
++ } else {
++ fis_len += len;
++ }
++ if (fis_len == 8192)
++ fis_len = 0;
++
++ /*
++ * Set DMA addresses and lower half of control register
++ * based on direction.
++ */
++ if (dir == DMA_FROM_DEVICE) {
++ lli[idx].dar = cpu_to_le32(addr);
++ lli[idx].sar = cpu_to_le32((u32)dmadr_addr);
++
++ lli[idx].ctl.low = cpu_to_le32(
++ DMA_CTL_TTFC(DMA_CTL_TTFC_P2M_DMAC) |
++ DMA_CTL_SMS(sms_val) |
++ DMA_CTL_DMS(dms_val) |
++ DMA_CTL_SRC_MSIZE(bl) |
++ DMA_CTL_DST_MSIZE(bl) |
++ DMA_CTL_SINC_NOCHANGE |
++ DMA_CTL_SRC_TRWID(2) |
++ DMA_CTL_DST_TRWID(2) |
++ DMA_CTL_INT_EN |
++ DMA_CTL_LLP_SRCEN |
++ DMA_CTL_LLP_DSTEN);
++ } else { /* DMA_TO_DEVICE */
++ lli[idx].sar = cpu_to_le32(addr);
++ lli[idx].dar = cpu_to_le32((u32)dmadr_addr);
++
++ lli[idx].ctl.low = cpu_to_le32(
++ DMA_CTL_TTFC(DMA_CTL_TTFC_M2P_PER) |
++ DMA_CTL_SMS(dms_val) |
++ DMA_CTL_DMS(sms_val) |
++ DMA_CTL_SRC_MSIZE(bl) |
++ DMA_CTL_DST_MSIZE(bl) |
++ DMA_CTL_DINC_NOCHANGE |
++ DMA_CTL_SRC_TRWID(2) |
++ DMA_CTL_DST_TRWID(2) |
++ DMA_CTL_INT_EN |
++ DMA_CTL_LLP_SRCEN |
++ DMA_CTL_LLP_DSTEN);
++ }
++
++ dev_dbg(host_pvt.dwc_dev, "%s setting ctl.high len: "
++ "0x%08x val: 0x%08x\n", __func__,
++ len, DMA_CTL_BLK_TS(len / 4));
++
++ /* Program the LLI CTL high register */
++ lli[idx].ctl.high = cpu_to_le32(DMA_CTL_BLK_TS\
++ (len / 4));
++
++ /* Program the next pointer. The next pointer must be
++ * the physical address, not the virtual address.
++ */
++ next_llp = (dma_lli + ((idx + 1) * sizeof(struct \
++ lli)));
++
++ /* The last 2 bits encode the list master select. */
++ next_llp = DMA_LLP_LMS(next_llp, DMA_LLP_AHBMASTER2);
++
++ lli[idx].llp = cpu_to_le32(next_llp);
++ idx++;
++ sg_len -= len;
++ addr += len;
++ }
++ }
++
++ /*
++ * The last next ptr has to be zero and the last control low register
++ * has to have LLP_SRC_EN and LLP_DST_EN (linked list pointer source
++ * and destination enable) set back to 0 (disabled.) This is what tells
++ * the core that this is the last item in the linked list.
++ */
++ if (idx) {
++ lli[idx-1].llp = 0x00000000;
++ lli[idx-1].ctl.low &= DMA_CTL_LLP_DISABLE_LE32;
++
++ /* Flush cache to memory */
++ dma_cache_sync(NULL, lli, (sizeof(struct lli) * idx),
++ DMA_BIDIRECTIONAL);
++ }
++
++ return idx;
++}
++
++/*
++ * Function: dma_dwc_xfer_start
++ * arguments: Channel number
++ * Return : None
++ * Enables the DMA channel
++ */
++static void dma_dwc_xfer_start(int dma_ch)
++{
++ /* Enable the DMA channel */
++ out_le32(&(host_pvt.sata_dma_regs->dma_chan_en.low),
++ in_le32(&(host_pvt.sata_dma_regs->dma_chan_en.low)) |
++ DMA_ENABLE_CHAN(dma_ch));
++}
++
++static int dma_dwc_xfer_setup(struct scatterlist *sg, int num_elems,
++ struct lli *lli, dma_addr_t dma_lli,
++ void __iomem *addr, int dir)
++{
++ int dma_ch;
++ int num_lli;
++ /* Acquire DMA channel */
++ dma_ch = dma_request_channel();
++ if (dma_ch == -1) {
++ dev_err(host_pvt.dwc_dev, "%s: dma channel unavailable\n",
++ __func__);
++ return -EAGAIN;
++ }
++
++ /* Convert SG list to linked list of items (LLIs) for AHB DMA */
++ num_lli = map_sg_to_lli(sg, num_elems, lli, dma_lli, addr, dir);
++
++ dev_dbg(host_pvt.dwc_dev, "%s sg: 0x%p, count: %d lli: %p dma_lli:"
++ " 0x%0xlx addr: %p lli count: %d\n", __func__, sg, num_elems,
++ lli, (u32)dma_lli, addr, num_lli);
++
++ clear_chan_interrupts(dma_ch);
++
++ /* Program the CFG register. */
++ out_le32(&(host_pvt.sata_dma_regs->chan_regs[dma_ch].cfg.high),
++ DMA_CFG_HW_HS_SRC(dma_ch) | DMA_CFG_HW_HS_DEST(dma_ch) |
++ DMA_CFG_PROTCTL | DMA_CFG_FCMOD_REQ);
++ out_le32(&(host_pvt.sata_dma_regs->chan_regs[dma_ch].cfg.low),
++ DMA_CFG_HW_CH_PRIOR(dma_ch));
++
++ /* Program the address of the linked list */
++ out_le32(&(host_pvt.sata_dma_regs->chan_regs[dma_ch].llp.low),
++ DMA_LLP_LMS(dma_lli, DMA_LLP_AHBMASTER2));
++
++ /* Program the CTL register with src enable / dst enable */
++ out_le32(&(host_pvt.sata_dma_regs->chan_regs[dma_ch].ctl.low),
++ DMA_CTL_LLP_SRCEN | DMA_CTL_LLP_DSTEN);
++ return dma_ch;
++}
++
++/*
++ * Function: dma_dwc_exit
++ * arguments: None
++ * returns status
++ * This function exits the SATA DMA driver
++ */
++static void dma_dwc_exit(struct sata_dwc_device *hsdev)
++{
++ dev_dbg(host_pvt.dwc_dev, "%s:\n", __func__);
++ if (host_pvt.sata_dma_regs) {
++ iounmap(host_pvt.sata_dma_regs);
++ host_pvt.sata_dma_regs = NULL;
++ }
++
++ if (hsdev->irq_dma) {
++ free_irq(hsdev->irq_dma, hsdev);
++ hsdev->irq_dma = 0;
++ }
++}
++
++/*
++ * Function: dma_dwc_init
++ * arguments: hsdev
++ * returns status
++ * This function initializes the SATA DMA driver
++ */
++static int dma_dwc_init(struct sata_dwc_device *hsdev, int irq)
++{
++ int err;
++
++ err = dma_request_interrupts(hsdev, irq);
++ if (err) {
++ dev_err(host_pvt.dwc_dev, "%s: dma_request_interrupts returns"
++ " %d\n", __func__, err);
++ goto error_out;
++ }
++
++ /* Enabe DMA */
++ out_le32(&(host_pvt.sata_dma_regs->dma_cfg.low), DMA_EN);
++
++ dev_notice(host_pvt.dwc_dev, "DMA initialized\n");
++ dev_dbg(host_pvt.dwc_dev, "SATA DMA registers=0x%p\n", host_pvt.\
++ sata_dma_regs);
++
++ return 0;
++
++error_out:
++ dma_dwc_exit(hsdev);
++
++ return err;
++}
++
++static int sata_dwc_scr_read(struct ata_link *link, unsigned int scr, u32 *val)
++{
++ if (scr > SCR_NOTIFICATION) {
++ dev_err(link->ap->dev, "%s: Incorrect SCR offset 0x%02x\n",
++ __func__, scr);
++ return -EINVAL;
++ }
++
++ *val = in_le32((void *)link->ap->ioaddr.scr_addr + (scr * 4));
++ dev_dbg(link->ap->dev, "%s: id=%d reg=%d val=val=0x%08x\n",
++ __func__, link->ap->print_id, scr, *val);
++
++ return 0;
++}
++
++static int sata_dwc_scr_write(struct ata_link *link, unsigned int scr, u32 val)
++{
++ dev_dbg(link->ap->dev, "%s: id=%d reg=%d val=val=0x%08x\n",
++ __func__, link->ap->print_id, scr, val);
++ if (scr > SCR_NOTIFICATION) {
++ dev_err(link->ap->dev, "%s: Incorrect SCR offset 0x%02x\n",
++ __func__, scr);
++ return -EINVAL;
++ }
++ out_le32((void *)link->ap->ioaddr.scr_addr + (scr * 4), val);
++
++ return 0;
++}
++
++static u32 core_scr_read(unsigned int scr)
++{
++ return in_le32((void __iomem *)(host_pvt.scr_addr_sstatus) +\
++ (scr * 4));
++}
++
++static void core_scr_write(unsigned int scr, u32 val)
++{
++ out_le32((void __iomem *)(host_pvt.scr_addr_sstatus) + (scr * 4),
++ val);
++}
++
++static void clear_serror(void)
++{
++ u32 val;
++ val = core_scr_read(SCR_ERROR);
++ core_scr_write(SCR_ERROR, val);
++
++}
++
++static void clear_interrupt_bit(struct sata_dwc_device *hsdev, u32 bit)
++{
++ out_le32(&hsdev->sata_dwc_regs->intpr,
++ in_le32(&hsdev->sata_dwc_regs->intpr));
++}
++
++static u32 qcmd_tag_to_mask(u8 tag)
++{
++ return 0x00000001 << (tag & 0x1f);
++}
++
++/* See ahci.c */
++static void sata_dwc_error_intr(struct ata_port *ap,
++ struct sata_dwc_device *hsdev, uint intpr)
++{
++ struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
++ struct ata_eh_info *ehi = &ap->link.eh_info;
++ unsigned int err_mask = 0, action = 0;
++ struct ata_queued_cmd *qc;
++ u32 serror;
++ u8 status, tag;
++ u32 err_reg;
++
++ ata_ehi_clear_desc(ehi);
++
++ serror = core_scr_read(SCR_ERROR);
++ status = ap->ops->sff_check_status(ap);
++
++ err_reg = in_le32(&(host_pvt.sata_dma_regs->interrupt_status.error.\
++ low));
++ tag = ap->link.active_tag;
++
++ dev_err(ap->dev, "%s SCR_ERROR=0x%08x intpr=0x%08x status=0x%08x "
++ "dma_intp=%d pending=%d issued=%d dma_err_status=0x%08x\n",
++ __func__, serror, intpr, status, host_pvt.dma_interrupt_count,
++ hsdevp->dma_pending[tag], hsdevp->cmd_issued[tag], err_reg);
++
++ /* Clear error register and interrupt bit */
++ clear_serror();
++ clear_interrupt_bit(hsdev, SATA_DWC_INTPR_ERR);
++
++ /* This is the only error happening now. TODO check for exact error */
++
++ err_mask |= AC_ERR_HOST_BUS;
++ action |= ATA_EH_RESET;
++
++ /* Pass this on to EH */
++ ehi->serror |= serror;
++ ehi->action |= action;
++
++ qc = ata_qc_from_tag(ap, tag);
++ if (qc)
++ qc->err_mask |= err_mask;
++ else
++ ehi->err_mask |= err_mask;
++
++ ata_port_abort(ap);
++}
++
++/*
++ * Function : sata_dwc_isr
++ * arguments : irq, void *dev_instance, struct pt_regs *regs
++ * Return value : irqreturn_t - status of IRQ
++ * This Interrupt handler called via port ops registered function.
++ * .irq_handler = sata_dwc_isr
++ */
++static irqreturn_t sata_dwc_isr(int irq, void *dev_instance)
++{
++ struct ata_host *host = (struct ata_host *)dev_instance;
++ struct sata_dwc_device *hsdev = HSDEV_FROM_HOST(host);
++ struct ata_port *ap;
++ struct ata_queued_cmd *qc;
++ unsigned long flags;
++ u8 status, tag;
++ int handled, num_processed, port = 0;
++ uint intpr, sactive, sactive2, tag_mask;
++ struct sata_dwc_device_port *hsdevp;
++ host_pvt.sata_dwc_sactive_issued = 0;
++
++ spin_lock_irqsave(&host->lock, flags);
++
++ /* Read the interrupt register */
++ intpr = in_le32(&hsdev->sata_dwc_regs->intpr);
++
++ ap = host->ports[port];
++ hsdevp = HSDEVP_FROM_AP(ap);
++
++ dev_dbg(ap->dev, "%s intpr=0x%08x active_tag=%d\n", __func__, intpr,
++ ap->link.active_tag);
++
++ /* Check for error interrupt */
++ if (intpr & SATA_DWC_INTPR_ERR) {
++ sata_dwc_error_intr(ap, hsdev, intpr);
++ handled = 1;
++ goto DONE;
++ }
++
++ /* Check for DMA SETUP FIS (FP DMA) interrupt */
++ if (intpr & SATA_DWC_INTPR_NEWFP) {
++ clear_interrupt_bit(hsdev, SATA_DWC_INTPR_NEWFP);
++
++ tag = (u8)(in_le32(&hsdev->sata_dwc_regs->fptagr));
++ dev_dbg(ap->dev, "%s: NEWFP tag=%d\n", __func__, tag);
++ if (hsdevp->cmd_issued[tag] != SATA_DWC_CMD_ISSUED_PEND)
++ dev_warn(ap->dev, "CMD tag=%d not pending?\n", tag);
++
++ host_pvt.sata_dwc_sactive_issued |= qcmd_tag_to_mask(tag);
++
++ qc = ata_qc_from_tag(ap, tag);
++ /*
++ * Start FP DMA for NCQ command. At this point the tag is the
++ * active tag. It is the tag that matches the command about to
++ * be completed.
++ */
++ qc->ap->link.active_tag = tag;
++ sata_dwc_bmdma_start_by_tag(qc, tag);
++
++ handled = 1;
++ goto DONE;
++ }
++ sactive = core_scr_read(SCR_ACTIVE);
++ tag_mask = (host_pvt.sata_dwc_sactive_issued | sactive) ^ sactive;
++
++ /* If no sactive issued and tag_mask is zero then this is not NCQ */
++ if (host_pvt.sata_dwc_sactive_issued == 0 && tag_mask == 0) {
++ if (ap->link.active_tag == ATA_TAG_POISON)
++ tag = 0;
++ else
++ tag = ap->link.active_tag;
++ qc = ata_qc_from_tag(ap, tag);
++
++ /* DEV interrupt w/ no active qc? */
++ if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
++ dev_err(ap->dev, "%s interrupt with no active qc "
++ "qc=%p\n", __func__, qc);
++ ap->ops->sff_check_status(ap);
++ handled = 1;
++ goto DONE;
++ }
++ status = ap->ops->sff_check_status(ap);
++
++ qc->ap->link.active_tag = tag;
++ hsdevp->cmd_issued[tag] = SATA_DWC_CMD_ISSUED_NOT;
++
++ if (status & ATA_ERR) {
++ dev_dbg(ap->dev, "interrupt ATA_ERR (0x%x)\n", status);
++ sata_dwc_qc_complete(ap, qc, 1);
++ handled = 1;
++ goto DONE;
++ }
++
++ dev_dbg(ap->dev, "%s non-NCQ cmd interrupt, protocol: %s\n",
++ __func__, get_prot_descript(qc->tf.protocol));
++DRVSTILLBUSY:
++ if (ata_is_dma(qc->tf.protocol)) {
++ /*
++ * Each DMA transaction produces 2 interrupts. The DMAC
++ * transfer complete interrupt and the SATA controller
++ * operation done interrupt. The command should be
++ * completed only after both interrupts are seen.
++ */
++ host_pvt.dma_interrupt_count++;
++ if (hsdevp->dma_pending[tag] == \
++ SATA_DWC_DMA_PENDING_NONE) {
++ dev_err(ap->dev, "%s: DMA not pending "
++ "intpr=0x%08x status=0x%08x pending"
++ "=%d\n", __func__, intpr, status,
++ hsdevp->dma_pending[tag]);
++ }
++
++ if ((host_pvt.dma_interrupt_count % 2) == 0)
++ sata_dwc_dma_xfer_complete(ap, 1);
++ } else if (ata_is_pio(qc->tf.protocol)) {
++ ata_sff_hsm_move(ap, qc, status, 0);
++ handled = 1;
++ goto DONE;
++ } else {
++ if (unlikely(sata_dwc_qc_complete(ap, qc, 1)))
++ goto DRVSTILLBUSY;
++ }
++
++ handled = 1;
++ goto DONE;
++ }
++
++ /*
++ * This is a NCQ command. At this point we need to figure out for which
++ * tags we have gotten a completion interrupt. One interrupt may serve
++ * as completion for more than one operation when commands are queued
++ * (NCQ). We need to process each completed command.
++ */
++
++ /* process completed commands */
++ sactive = core_scr_read(SCR_ACTIVE);
++ tag_mask = (host_pvt.sata_dwc_sactive_issued | sactive) ^ sactive;
++
++ if (sactive != 0 || (host_pvt.sata_dwc_sactive_issued) > 1 || \
++ tag_mask > 1) {
++ dev_dbg(ap->dev, "%s NCQ:sactive=0x%08x sactive_issued=0x%08x"
++ "tag_mask=0x%08x\n", __func__, sactive,
++ host_pvt.sata_dwc_sactive_issued, tag_mask);
++ }
++
++ if ((tag_mask | (host_pvt.sata_dwc_sactive_issued)) != \
++ (host_pvt.sata_dwc_sactive_issued)) {
++ dev_warn(ap->dev, "Bad tag mask? sactive=0x%08x "
++ "(host_pvt.sata_dwc_sactive_issued)=0x%08x tag_mask"
++ "=0x%08x\n", sactive, host_pvt.sata_dwc_sactive_issued,
++ tag_mask);
++ }
++
++ /* read just to clear ... not bad if currently still busy */
++ status = ap->ops->sff_check_status(ap);
++ dev_dbg(ap->dev, "%s ATA status register=0x%x\n", __func__, status);
++
++ tag = 0;
++ num_processed = 0;
++ while (tag_mask) {
++ num_processed++;
++ while (!(tag_mask & 0x00000001)) {
++ tag++;
++ tag_mask <<= 1;
++ }
++
++ tag_mask &= (~0x00000001);
++ qc = ata_qc_from_tag(ap, tag);
++
++ /* To be picked up by completion functions */
++ qc->ap->link.active_tag = tag;
++ hsdevp->cmd_issued[tag] = SATA_DWC_CMD_ISSUED_NOT;
++
++ /* Let libata/scsi layers handle error */
++ if (status & ATA_ERR) {
++ dev_dbg(ap->dev, "%s ATA_ERR (0x%x)\n", __func__,
++ status);
++ sata_dwc_qc_complete(ap, qc, 1);
++ handled = 1;
++ goto DONE;
++ }
++
++ /* Process completed command */
++ dev_dbg(ap->dev, "%s NCQ command, protocol: %s\n", __func__,
++ get_prot_descript(qc->tf.protocol));
++ if (ata_is_dma(qc->tf.protocol)) {
++ host_pvt.dma_interrupt_count++;
++ if (hsdevp->dma_pending[tag] == \
++ SATA_DWC_DMA_PENDING_NONE)
++ dev_warn(ap->dev, "%s: DMA not pending?\n",
++ __func__);
++ if ((host_pvt.dma_interrupt_count % 2) == 0)
++ sata_dwc_dma_xfer_complete(ap, 1);
++ } else {
++ if (unlikely(sata_dwc_qc_complete(ap, qc, 1)))
++ goto STILLBUSY;
++ }
++ continue;
++
++STILLBUSY:
++ ap->stats.idle_irq++;
++ dev_warn(ap->dev, "STILL BUSY IRQ ata%d: irq trap\n",
++ ap->print_id);
++ } /* while tag_mask */
++
++ /*
++ * Check to see if any commands completed while we were processing our
++ * initial set of completed commands (read status clears interrupts,
++ * so we might miss a completed command interrupt if one came in while
++ * we were processing --we read status as part of processing a completed
++ * command).
++ */
++ sactive2 = core_scr_read(SCR_ACTIVE);
++ if (sactive2 != sactive) {
++ dev_dbg(ap->dev, "More completed - sactive=0x%x sactive2"
++ "=0x%x\n", sactive, sactive2);
++ }
++ handled = 1;
++
++DONE:
++ spin_unlock_irqrestore(&host->lock, flags);
++ return IRQ_RETVAL(handled);
++}
++
++static void sata_dwc_clear_dmacr(struct sata_dwc_device_port *hsdevp, u8 tag)
++{
++ struct sata_dwc_device *hsdev = HSDEV_FROM_HSDEVP(hsdevp);
++
++ if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_RX) {
++ out_le32(&(hsdev->sata_dwc_regs->dmacr),
++ SATA_DWC_DMACR_RX_CLEAR(
++ in_le32(&(hsdev->sata_dwc_regs->dmacr))));
++ } else if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_TX) {
++ out_le32(&(hsdev->sata_dwc_regs->dmacr),
++ SATA_DWC_DMACR_TX_CLEAR(
++ in_le32(&(hsdev->sata_dwc_regs->dmacr))));
++ } else {
++ /*
++ * This should not happen, it indicates the driver is out of
++ * sync. If it does happen, clear dmacr anyway.
++ */
++ dev_err(host_pvt.dwc_dev, "%s DMA protocol RX and"
++ "TX DMA not pending tag=0x%02x pending=%d"
++ " dmacr: 0x%08x\n", __func__, tag,
++ hsdevp->dma_pending[tag],
++ in_le32(&(hsdev->sata_dwc_regs->dmacr)));
++ out_le32(&(hsdev->sata_dwc_regs->dmacr),
++ SATA_DWC_DMACR_TXRXCH_CLEAR);
++ }
++}
++
++static void sata_dwc_dma_xfer_complete(struct ata_port *ap, u32 check_status)
++{
++ struct ata_queued_cmd *qc;
++ struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
++ struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
++ u8 tag = 0;
++
++ tag = ap->link.active_tag;
++ qc = ata_qc_from_tag(ap, tag);
++ if (!qc) {
++ dev_err(ap->dev, "failed to get qc");
++ return;
++ }
++
++#ifdef DEBUG_NCQ
++ if (tag > 0) {
++ dev_info(ap->dev, "%s tag=%u cmd=0x%02x dma dir=%s proto=%s "
++ "dmacr=0x%08x\n", __func__, qc->tag, qc->tf.command,
++ get_dma_dir_descript(qc->dma_dir),
++ get_prot_descript(qc->tf.protocol),
++ in_le32(&(hsdev->sata_dwc_regs->dmacr)));
++ }
++#endif
++
++ if (ata_is_dma(qc->tf.protocol)) {
++ if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_NONE) {
++ dev_err(ap->dev, "%s DMA protocol RX and TX DMA not "
++ "pending dmacr: 0x%08x\n", __func__,
++ in_le32(&(hsdev->sata_dwc_regs->dmacr)));
++ }
++
++ hsdevp->dma_pending[tag] = SATA_DWC_DMA_PENDING_NONE;
++ sata_dwc_qc_complete(ap, qc, check_status);
++ ap->link.active_tag = ATA_TAG_POISON;
++ } else {
++ sata_dwc_qc_complete(ap, qc, check_status);
++ }
++}
++
++static int sata_dwc_qc_complete(struct ata_port *ap, struct ata_queued_cmd *qc,
++ u32 check_status)
++{
++ u8 status = 0;
++ u32 mask = 0x0;
++ u8 tag = qc->tag;
++ struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
++ host_pvt.sata_dwc_sactive_queued = 0;
++ dev_dbg(ap->dev, "%s checkstatus? %x\n", __func__, check_status);
++
++ if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_TX)
++ dev_err(ap->dev, "TX DMA PENDING\n");
++ else if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_RX)
++ dev_err(ap->dev, "RX DMA PENDING\n");
++ dev_dbg(ap->dev, "QC complete cmd=0x%02x status=0x%02x ata%u:"
++ " protocol=%d\n", qc->tf.command, status, ap->print_id,
++ qc->tf.protocol);
++
++ /* clear active bit */
++ mask = (~(qcmd_tag_to_mask(tag)));
++ host_pvt.sata_dwc_sactive_queued = (host_pvt.sata_dwc_sactive_queued) \
++ & mask;
++ host_pvt.sata_dwc_sactive_issued = (host_pvt.sata_dwc_sactive_issued) \
++ & mask;
++ ata_qc_complete(qc);
++ return 0;
++}
++
++static void sata_dwc_enable_interrupts(struct sata_dwc_device *hsdev)
++{
++ /* Enable selective interrupts by setting the interrupt maskregister*/
++ out_le32(&hsdev->sata_dwc_regs->intmr,
++ SATA_DWC_INTMR_ERRM |
++ SATA_DWC_INTMR_NEWFPM |
++ SATA_DWC_INTMR_PMABRTM |
++ SATA_DWC_INTMR_DMATM);
++ /*
++ * Unmask the error bits that should trigger an error interrupt by
++ * setting the error mask register.
++ */
++ out_le32(&hsdev->sata_dwc_regs->errmr, SATA_DWC_SERROR_ERR_BITS);
++
++ dev_dbg(host_pvt.dwc_dev, "%s: INTMR = 0x%08x, ERRMR = 0x%08x\n",
++ __func__, in_le32(&hsdev->sata_dwc_regs->intmr),
++ in_le32(&hsdev->sata_dwc_regs->errmr));
++}
++
++static void sata_dwc_setup_port(struct ata_ioports *port, unsigned long base)
++{
++ port->cmd_addr = (void *)base + 0x00;
++ port->data_addr = (void *)base + 0x00;
++
++ port->error_addr = (void *)base + 0x04;
++ port->feature_addr = (void *)base + 0x04;
++
++ port->nsect_addr = (void *)base + 0x08;
++
++ port->lbal_addr = (void *)base + 0x0c;
++ port->lbam_addr = (void *)base + 0x10;
++ port->lbah_addr = (void *)base + 0x14;
++
++ port->device_addr = (void *)base + 0x18;
++ port->command_addr = (void *)base + 0x1c;
++ port->status_addr = (void *)base + 0x1c;
++
++ port->altstatus_addr = (void *)base + 0x20;
++ port->ctl_addr = (void *)base + 0x20;
++}
++
++/*
++ * Function : sata_dwc_port_start
++ * arguments : struct ata_ioports *port
++ * Return value : returns 0 if success, error code otherwise
++ * This function allocates the scatter gather LLI table for AHB DMA
++ */
++static int sata_dwc_port_start(struct ata_port *ap)
++{
++ int err = 0;
++ struct sata_dwc_device *hsdev;
++ struct sata_dwc_device_port *hsdevp = NULL;
++ struct device *pdev;
++ int i;
++
++ hsdev = HSDEV_FROM_AP(ap);
++
++ dev_dbg(ap->dev, "%s: port_no=%d\n", __func__, ap->port_no);
++
++ hsdev->host = ap->host;
++ pdev = ap->host->dev;
++ if (!pdev) {
++ dev_err(ap->dev, "%s: no ap->host->dev\n", __func__);
++ err = -ENODEV;
++ goto CLEANUP;
++ }
++
++ /* Allocate Port Struct */
++ hsdevp = kzalloc(sizeof(*hsdevp), GFP_KERNEL);
++ if (!hsdevp) {
++ dev_err(ap->dev, "%s: kmalloc failed for hsdevp\n", __func__);
++ err = -ENOMEM;
++ goto CLEANUP;
++ }
++ hsdevp->hsdev = hsdev;
++
++ for (i = 0; i < SATA_DWC_QCMD_MAX; i++)
++ hsdevp->cmd_issued[i] = SATA_DWC_CMD_ISSUED_NOT;
++
++ ap->bmdma_prd = 0; /* set these so libata doesn't use them */
++ ap->bmdma_prd_dma = 0;
++
++ /*
++ * DMA - Assign scatter gather LLI table. We can't use the libata
++ * version since it's PRD is IDE PCI specific.
++ */
++ for (i = 0; i < SATA_DWC_QCMD_MAX; i++) {
++ hsdevp->llit[i] = dma_alloc_coherent(pdev,
++ SATA_DWC_DMAC_LLI_TBL_SZ,
++ &(hsdevp->llit_dma[i]),
++ GFP_ATOMIC);
++ if (!hsdevp->llit[i]) {
++ dev_err(ap->dev, "%s: dma_alloc_coherent failed\n",
++ __func__);
++ err = -ENOMEM;
++ goto CLEANUP_ALLOC;
++ }
++ }
++
++ if (ap->port_no == 0) {
++ dev_dbg(ap->dev, "%s: clearing TXCHEN, RXCHEN in DMAC\n",
++ __func__);
++ out_le32(&hsdev->sata_dwc_regs->dmacr,
++ SATA_DWC_DMACR_TXRXCH_CLEAR);
++
++ dev_dbg(ap->dev, "%s: setting burst size in DBTSR\n",
++ __func__);
++ out_le32(&hsdev->sata_dwc_regs->dbtsr,
++ (SATA_DWC_DBTSR_MWR(AHB_DMA_BRST_DFLT) |
++ SATA_DWC_DBTSR_MRD(AHB_DMA_BRST_DFLT)));
++ }
++
++ /* Clear any error bits before libata starts issuing commands */
++ clear_serror();
++ ap->private_data = hsdevp;
++ dev_dbg(ap->dev, "%s: done\n", __func__);
++ return 0;
++
++CLEANUP_ALLOC:
++ kfree(hsdevp);
++CLEANUP:
++ dev_dbg(ap->dev, "%s: fail. ap->id = %d\n", __func__, ap->print_id);
++ return err;
++}
++
++static void sata_dwc_port_stop(struct ata_port *ap)
++{
++ int i;
++ struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
++ struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
++
++ dev_dbg(ap->dev, "%s: ap->id = %d\n", __func__, ap->print_id);
++
++ if (hsdevp && hsdev) {
++ /* deallocate LLI table */
++ for (i = 0; i < SATA_DWC_QCMD_MAX; i++) {
++ dma_free_coherent(ap->host->dev,
++ SATA_DWC_DMAC_LLI_TBL_SZ,
++ hsdevp->llit[i], hsdevp->llit_dma[i]);
++ }
++
++ kfree(hsdevp);
++ }
++ ap->private_data = NULL;
++}
++
++/*
++ * Function : sata_dwc_exec_command_by_tag
++ * arguments : ata_port *ap, ata_taskfile *tf, u8 tag, u32 cmd_issued
++ * Return value : None
++ * This function keeps track of individual command tag ids and calls
++ * ata_exec_command in libata
++ */
++static void sata_dwc_exec_command_by_tag(struct ata_port *ap,
++ struct ata_taskfile *tf,
++ u8 tag, u32 cmd_issued)
++{
++ unsigned long flags;
++ struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
++
++ dev_dbg(ap->dev, "%s cmd(0x%02x): %s tag=%d\n", __func__, tf->command,
++ ata_get_cmd_descript(tf->command), tag);
++
++ spin_lock_irqsave(&ap->host->lock, flags);
++ hsdevp->cmd_issued[tag] = cmd_issued;
++ spin_unlock_irqrestore(&ap->host->lock, flags);
++ /*
++ * Clear SError before executing a new command.
++ * sata_dwc_scr_write and read can not be used here. Clearing the PM
++ * managed SError register for the disk needs to be done before the
++ * task file is loaded.
++ */
++ clear_serror();
++ ata_sff_exec_command(ap, tf);
++}
++
++static void sata_dwc_bmdma_setup_by_tag(struct ata_queued_cmd *qc, u8 tag)
++{
++ sata_dwc_exec_command_by_tag(qc->ap, &qc->tf, tag,
++ SATA_DWC_CMD_ISSUED_PEND);
++}
++
++static void sata_dwc_bmdma_setup(struct ata_queued_cmd *qc)
++{
++ u8 tag = qc->tag;
++
++ if (ata_is_ncq(qc->tf.protocol)) {
++ dev_dbg(qc->ap->dev, "%s: ap->link.sactive=0x%08x tag=%d\n",
++ __func__, qc->ap->link.sactive, tag);
++ } else {
++ tag = 0;
++ }
++ sata_dwc_bmdma_setup_by_tag(qc, tag);
++}
++
++static void sata_dwc_bmdma_start_by_tag(struct ata_queued_cmd *qc, u8 tag)
++{
++ int start_dma;
++ u32 reg, dma_chan;
++ struct sata_dwc_device *hsdev = HSDEV_FROM_QC(qc);
++ struct ata_port *ap = qc->ap;
++ struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
++ int dir = qc->dma_dir;
++ dma_chan = hsdevp->dma_chan[tag];
++
++ if (hsdevp->cmd_issued[tag] != SATA_DWC_CMD_ISSUED_NOT) {
++ start_dma = 1;
++ if (dir == DMA_TO_DEVICE)
++ hsdevp->dma_pending[tag] = SATA_DWC_DMA_PENDING_TX;
++ else
++ hsdevp->dma_pending[tag] = SATA_DWC_DMA_PENDING_RX;
++ } else {
++ dev_err(ap->dev, "%s: Command not pending cmd_issued=%d "
++ "(tag=%d) DMA NOT started\n", __func__,
++ hsdevp->cmd_issued[tag], tag);
++ start_dma = 0;
++ }
++
++ dev_dbg(ap->dev, "%s qc=%p tag: %x cmd: 0x%02x dma_dir: %s "
++ "start_dma? %x\n", __func__, qc, tag, qc->tf.command,
++ get_dma_dir_descript(qc->dma_dir), start_dma);
++ sata_dwc_tf_dump(&(qc->tf));
++
++ if (start_dma) {
++ reg = core_scr_read(SCR_ERROR);
++ if (reg & SATA_DWC_SERROR_ERR_BITS) {
++ dev_err(ap->dev, "%s: ****** SError=0x%08x ******\n",
++ __func__, reg);
++ }
++
++ if (dir == DMA_TO_DEVICE)
++ out_le32(&hsdev->sata_dwc_regs->dmacr,
++ SATA_DWC_DMACR_TXCHEN);
++ else
++ out_le32(&hsdev->sata_dwc_regs->dmacr,
++ SATA_DWC_DMACR_RXCHEN);
++
++ /* Enable AHB DMA transfer on the specified channel */
++ dma_dwc_xfer_start(dma_chan);
++ }
++}
++
++static void sata_dwc_bmdma_start(struct ata_queued_cmd *qc)
++{
++ u8 tag = qc->tag;
++
++ if (ata_is_ncq(qc->tf.protocol)) {
++ dev_dbg(qc->ap->dev, "%s: ap->link.sactive=0x%08x tag=%d\n",
++ __func__, qc->ap->link.sactive, tag);
++ } else {
++ tag = 0;
++ }
++ dev_dbg(qc->ap->dev, "%s\n", __func__);
++ sata_dwc_bmdma_start_by_tag(qc, tag);
++}
++
++/*
++ * Function : sata_dwc_qc_prep_by_tag
++ * arguments : ata_queued_cmd *qc, u8 tag
++ * Return value : None
++ * qc_prep for a particular queued command based on tag
++ */
++static void sata_dwc_qc_prep_by_tag(struct ata_queued_cmd *qc, u8 tag)
++{
++ struct scatterlist *sg = qc->sg;
++ struct ata_port *ap = qc->ap;
++ int dma_chan;
++ struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
++ struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
++
++ dev_dbg(ap->dev, "%s: port=%d dma dir=%s n_elem=%d\n",
++ __func__, ap->port_no, get_dma_dir_descript(qc->dma_dir),
++ qc->n_elem);
++
++ dma_chan = dma_dwc_xfer_setup(sg, qc->n_elem, hsdevp->llit[tag],
++ hsdevp->llit_dma[tag],
++ (void *__iomem)(&hsdev->sata_dwc_regs->\
++ dmadr), qc->dma_dir);
++ if (dma_chan < 0) {
++ dev_err(ap->dev, "%s: dma_dwc_xfer_setup returns err %d\n",
++ __func__, dma_chan);
++ return;
++ }
++ hsdevp->dma_chan[tag] = dma_chan;
++}
++
++static unsigned int sata_dwc_qc_issue(struct ata_queued_cmd *qc)
++{
++ u32 sactive;
++ u8 tag = qc->tag;
++ struct ata_port *ap = qc->ap;
++
++#ifdef DEBUG_NCQ
++ if (qc->tag > 0 || ap->link.sactive > 1)
++ dev_info(ap->dev, "%s ap id=%d cmd(0x%02x)=%s qc tag=%d "
++ "prot=%s ap active_tag=0x%08x ap sactive=0x%08x\n",
++ __func__, ap->print_id, qc->tf.command,
++ ata_get_cmd_descript(qc->tf.command),
++ qc->tag, get_prot_descript(qc->tf.protocol),
++ ap->link.active_tag, ap->link.sactive);
++#endif
++
++ if (!ata_is_ncq(qc->tf.protocol))
++ tag = 0;
++ sata_dwc_qc_prep_by_tag(qc, tag);
++
++ if (ata_is_ncq(qc->tf.protocol)) {
++ sactive = core_scr_read(SCR_ACTIVE);
++ sactive |= (0x00000001 << tag);
++ core_scr_write(SCR_ACTIVE, sactive);
++
++ dev_dbg(qc->ap->dev, "%s: tag=%d ap->link.sactive = 0x%08x "
++ "sactive=0x%08x\n", __func__, tag, qc->ap->link.sactive,
++ sactive);
++
++ ap->ops->sff_tf_load(ap, &qc->tf);
++ sata_dwc_exec_command_by_tag(ap, &qc->tf, qc->tag,
++ SATA_DWC_CMD_ISSUED_PEND);
++ } else {
++ ata_sff_qc_issue(qc);
++ }
++ return 0;
++}
++
++/*
++ * Function : sata_dwc_qc_prep
++ * arguments : ata_queued_cmd *qc
++ * Return value : None
++ * qc_prep for a particular queued command
++ */
++
++static void sata_dwc_qc_prep(struct ata_queued_cmd *qc)
++{
++ if ((qc->dma_dir == DMA_NONE) || (qc->tf.protocol == ATA_PROT_PIO))
++ return;
++
++#ifdef DEBUG_NCQ
++ if (qc->tag > 0)
++ dev_info(qc->ap->dev, "%s: qc->tag=%d ap->active_tag=0x%08x\n",
++ __func__, qc->tag, qc->ap->link.active_tag);
++
++ return ;
++#endif
++}
++
++static void sata_dwc_error_handler(struct ata_port *ap)
++{
++ ata_sff_error_handler(ap);
++}
++
++int sata_dwc_hardreset(struct ata_link *link, unsigned int *class,
++ unsigned long deadline)
++{
++ struct sata_dwc_device *hsdev = HSDEV_FROM_AP(link->ap);
++ int ret;
++
++ ret = sata_sff_hardreset(link, class, deadline);
++
++ sata_dwc_enable_interrupts(hsdev);
++
++ /* Reconfigure the DMA control register */
++ out_le32(&hsdev->sata_dwc_regs->dmacr,
++ SATA_DWC_DMACR_TXRXCH_CLEAR);
++
++ /* Reconfigure the DMA Burst Transaction Size register */
++ out_le32(&hsdev->sata_dwc_regs->dbtsr,
++ SATA_DWC_DBTSR_MWR(AHB_DMA_BRST_DFLT) |
++ SATA_DWC_DBTSR_MRD(AHB_DMA_BRST_DFLT));
++
++ return ret;
++}
++
++/*
++ * scsi mid-layer and libata interface structures
++ */
++static struct scsi_host_template sata_dwc_sht = {
++ ATA_NCQ_SHT(DRV_NAME),
++ /*
++ * test-only: Currently this driver doesn't handle NCQ
++ * correctly. We enable NCQ but set the queue depth to a
++ * max of 1. This will get fixed in in a future release.
++ */
++ .sg_tablesize = LIBATA_MAX_PRD,
++ .can_queue = ATA_DEF_QUEUE, /* ATA_MAX_QUEUE */
++ .dma_boundary = ATA_DMA_BOUNDARY,
++};
++
++static struct ata_port_operations sata_dwc_ops = {
++ .inherits = &ata_sff_port_ops,
++
++ .error_handler = sata_dwc_error_handler,
++ .hardreset = sata_dwc_hardreset,
++
++ .qc_prep = sata_dwc_qc_prep,
++ .qc_issue = sata_dwc_qc_issue,
++
++ .scr_read = sata_dwc_scr_read,
++ .scr_write = sata_dwc_scr_write,
++
++ .port_start = sata_dwc_port_start,
++ .port_stop = sata_dwc_port_stop,
++
++ .bmdma_setup = sata_dwc_bmdma_setup,
++ .bmdma_start = sata_dwc_bmdma_start,
++};
++
++static const struct ata_port_info sata_dwc_port_info[] = {
++ {
++ .flags = ATA_FLAG_SATA | ATA_FLAG_NCQ,
++ .pio_mask = ATA_PIO4,
++ .udma_mask = ATA_UDMA6,
++ .port_ops = &sata_dwc_ops,
++ },
++};
++
++static int sata_dwc_probe(struct platform_device *ofdev)
++{
++ struct sata_dwc_device *hsdev;
++ u32 idr, versionr;
++ char *ver = (char *)&versionr;
++ u8 *base = NULL;
++ int err = 0;
++ int irq, rc;
++ struct ata_host *host;
++ struct ata_port_info pi = sata_dwc_port_info[0];
++ const struct ata_port_info *ppi[] = { &pi, NULL };
++ struct device_node *np = ofdev->dev.of_node;
++ u32 dma_chan;
++
++ /* Allocate DWC SATA device */
++ hsdev = kzalloc(sizeof(*hsdev), GFP_KERNEL);
++ if (hsdev == NULL) {
++ dev_err(&ofdev->dev, "kmalloc failed for hsdev\n");
++ err = -ENOMEM;
++ goto error;
++ }
++
++ if (of_property_read_u32(np, "dma-channel", &dma_chan)) {
++ dev_warn(&ofdev->dev, "no dma-channel property set."
++ " Use channel 0\n");
++ dma_chan = 0;
++ }
++ host_pvt.dma_channel = dma_chan;
++
++ /* Ioremap SATA registers */
++ base = of_iomap(ofdev->dev.of_node, 0);
++ if (!base) {
++ dev_err(&ofdev->dev, "ioremap failed for SATA register"
++ " address\n");
++ err = -ENODEV;
++ goto error_kmalloc;
++ }
++ hsdev->reg_base = base;
++ dev_dbg(&ofdev->dev, "ioremap done for SATA register address\n");
++
++ /* Synopsys DWC SATA specific Registers */
++ hsdev->sata_dwc_regs = (void *__iomem)(base + SATA_DWC_REG_OFFSET);
++
++ /* Allocate and fill host */
++ host = ata_host_alloc_pinfo(&ofdev->dev, ppi, SATA_DWC_MAX_PORTS);
++ if (!host) {
++ dev_err(&ofdev->dev, "ata_host_alloc_pinfo failed\n");
++ err = -ENOMEM;
++ goto error_iomap;
++ }
++
++ host->private_data = hsdev;
++
++ /* Setup port */
++ host->ports[0]->ioaddr.cmd_addr = base;
++ host->ports[0]->ioaddr.scr_addr = base + SATA_DWC_SCR_OFFSET;
++ host_pvt.scr_addr_sstatus = base + SATA_DWC_SCR_OFFSET;
++ sata_dwc_setup_port(&host->ports[0]->ioaddr, (unsigned long)base);
++
++ /* Read the ID and Version Registers */
++ idr = in_le32(&hsdev->sata_dwc_regs->idr);
++ versionr = in_le32(&hsdev->sata_dwc_regs->versionr);
++ dev_notice(&ofdev->dev, "id %d, controller version %c.%c%c\n",
++ idr, ver[0], ver[1], ver[2]);
++
++ /* Get SATA DMA interrupt number */
++ irq = irq_of_parse_and_map(ofdev->dev.of_node, 1);
++ if (irq == NO_IRQ) {
++ dev_err(&ofdev->dev, "no SATA DMA irq\n");
++ err = -ENODEV;
++ goto error_out;
++ }
++
++ /* Get physical SATA DMA register base address */
++ host_pvt.sata_dma_regs = of_iomap(ofdev->dev.of_node, 1);
++ if (!(host_pvt.sata_dma_regs)) {
++ dev_err(&ofdev->dev, "ioremap failed for AHBDMA register"
++ " address\n");
++ err = -ENODEV;
++ goto error_out;
++ }
++
++ /* Save dev for later use in dev_xxx() routines */
++ host_pvt.dwc_dev = &ofdev->dev;
++
++ /* Initialize AHB DMAC */
++ dma_dwc_init(hsdev, irq);
++
++ /* Enable SATA Interrupts */
++ sata_dwc_enable_interrupts(hsdev);
++
++ /* Get SATA interrupt number */
++ irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
++ if (irq == NO_IRQ) {
++ dev_err(&ofdev->dev, "no SATA DMA irq\n");
++ err = -ENODEV;
++ goto error_out;
++ }
++
++ /*
++ * Now, register with libATA core, this will also initiate the
++ * device discovery process, invoking our port_start() handler &
++ * error_handler() to execute a dummy Softreset EH session
++ */
++ rc = ata_host_activate(host, irq, sata_dwc_isr, 0, &sata_dwc_sht);
++
++ if (rc != 0)
++ dev_err(&ofdev->dev, "failed to activate host");
++
++ dev_set_drvdata(&ofdev->dev, host);
++ return 0;
++
++error_out:
++ /* Free SATA DMA resources */
++ dma_dwc_exit(hsdev);
++
++error_iomap:
++ iounmap(base);
++error_kmalloc:
++ kfree(hsdev);
++error:
++ return err;
++}
++
++static int sata_dwc_remove(struct platform_device *ofdev)
++{
++ struct device *dev = &ofdev->dev;
++ struct ata_host *host = dev_get_drvdata(dev);
++ struct sata_dwc_device *hsdev = host->private_data;
++
++ ata_host_detach(host);
++ dev_set_drvdata(dev, NULL);
++
++ /* Free SATA DMA resources */
++ dma_dwc_exit(hsdev);
++
++ iounmap(hsdev->reg_base);
++ kfree(hsdev);
++ kfree(host);
++ dev_dbg(&ofdev->dev, "done\n");
++ return 0;
++}
++
++static const struct of_device_id sata_dwc_match[] = {
++ { .compatible = "amcc,sata-460ex", },
++ {}
++};
++MODULE_DEVICE_TABLE(of, sata_dwc_match);
++
++static struct platform_driver sata_dwc_driver = {
++ .driver = {
++ .name = DRV_NAME,
++ .owner = THIS_MODULE,
++ .of_match_table = sata_dwc_match,
++ },
++ .probe = sata_dwc_probe,
++ .remove = sata_dwc_remove,
++};
++
++module_platform_driver(sata_dwc_driver);
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Mark Miesfeld <mmiesfeld@amcc.com>");
++MODULE_DESCRIPTION("DesignWare Cores SATA controller low lever driver");
++MODULE_VERSION(DRV_VERSION);
+diff -Nur linux-3.14.36/drivers/ata/sata_highbank.c linux-openelec/drivers/ata/sata_highbank.c
+--- linux-3.14.36/drivers/ata/sata_highbank.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/ata/sata_highbank.c 2015-05-06 12:05:42.000000000 -0500
+@@ -19,7 +19,6 @@
+ #include <linux/kernel.h>
+ #include <linux/gfp.h>
+ #include <linux/module.h>
+-#include <linux/init.h>
+ #include <linux/types.h>
+ #include <linux/err.h>
+ #include <linux/io.h>
+@@ -403,6 +402,7 @@
+ static const unsigned long timing[] = { 5, 100, 500};
+ struct ata_port *ap = link->ap;
+ struct ahci_port_priv *pp = ap->private_data;
++ struct ahci_host_priv *hpriv = ap->host->private_data;
+ u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
+ struct ata_taskfile tf;
+ bool online;
+@@ -431,7 +431,7 @@
+ break;
+ } while (!online && retry--);
+
+- ahci_start_engine(ap);
++ hpriv->start_engine(ap);
+
+ if (online)
+ *class = ahci_dev_classify(ap);
+diff -Nur linux-3.14.36/drivers/ata/sata_nv.c linux-openelec/drivers/ata/sata_nv.c
+--- linux-3.14.36/drivers/ata/sata_nv.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/ata/sata_nv.c 2015-05-06 12:05:42.000000000 -0500
+@@ -40,7 +40,6 @@
+ #include <linux/module.h>
+ #include <linux/gfp.h>
+ #include <linux/pci.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <linux/interrupt.h>
+diff -Nur linux-3.14.36/drivers/ata/sata_promise.c linux-openelec/drivers/ata/sata_promise.c
+--- linux-3.14.36/drivers/ata/sata_promise.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/ata/sata_promise.c 2015-05-06 12:05:42.000000000 -0500
+@@ -35,7 +35,6 @@
+ #include <linux/module.h>
+ #include <linux/gfp.h>
+ #include <linux/pci.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <linux/interrupt.h>
+diff -Nur linux-3.14.36/drivers/ata/sata_qstor.c linux-openelec/drivers/ata/sata_qstor.c
+--- linux-3.14.36/drivers/ata/sata_qstor.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/ata/sata_qstor.c 2015-05-06 12:05:42.000000000 -0500
+@@ -31,7 +31,6 @@
+ #include <linux/module.h>
+ #include <linux/gfp.h>
+ #include <linux/pci.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <linux/interrupt.h>
+diff -Nur linux-3.14.36/drivers/ata/sata_sil.c linux-openelec/drivers/ata/sata_sil.c
+--- linux-3.14.36/drivers/ata/sata_sil.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/ata/sata_sil.c 2015-05-06 12:05:42.000000000 -0500
+@@ -37,7 +37,6 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <linux/interrupt.h>
+diff -Nur linux-3.14.36/drivers/ata/sata_sis.c linux-openelec/drivers/ata/sata_sis.c
+--- linux-3.14.36/drivers/ata/sata_sis.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/ata/sata_sis.c 2015-05-06 12:05:42.000000000 -0500
+@@ -33,7 +33,6 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <linux/interrupt.h>
+diff -Nur linux-3.14.36/drivers/ata/sata_svw.c linux-openelec/drivers/ata/sata_svw.c
+--- linux-3.14.36/drivers/ata/sata_svw.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/ata/sata_svw.c 2015-05-06 12:05:42.000000000 -0500
+@@ -39,7 +39,6 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <linux/interrupt.h>
+diff -Nur linux-3.14.36/drivers/ata/sata_sx4.c linux-openelec/drivers/ata/sata_sx4.c
+--- linux-3.14.36/drivers/ata/sata_sx4.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/ata/sata_sx4.c 2015-05-06 12:05:42.000000000 -0500
+@@ -82,7 +82,6 @@
+ #include <linux/module.h>
+ #include <linux/pci.h>
+ #include <linux/slab.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <linux/interrupt.h>
+diff -Nur linux-3.14.36/drivers/ata/sata_uli.c linux-openelec/drivers/ata/sata_uli.c
+--- linux-3.14.36/drivers/ata/sata_uli.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/ata/sata_uli.c 2015-05-06 12:05:42.000000000 -0500
+@@ -28,7 +28,6 @@
+ #include <linux/module.h>
+ #include <linux/gfp.h>
+ #include <linux/pci.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <linux/interrupt.h>
+diff -Nur linux-3.14.36/drivers/ata/sata_via.c linux-openelec/drivers/ata/sata_via.c
+--- linux-3.14.36/drivers/ata/sata_via.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/ata/sata_via.c 2015-05-06 12:05:42.000000000 -0500
+@@ -36,7 +36,6 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <linux/device.h>
+diff -Nur linux-3.14.36/drivers/ata/sata_vsc.c linux-openelec/drivers/ata/sata_vsc.c
+--- linux-3.14.36/drivers/ata/sata_vsc.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/ata/sata_vsc.c 2015-05-06 12:05:42.000000000 -0500
+@@ -37,7 +37,6 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <linux/interrupt.h>
+diff -Nur linux-3.14.36/drivers/base/bus.c linux-openelec/drivers/base/bus.c
+--- linux-3.14.36/drivers/base/bus.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/base/bus.c 2015-07-24 18:03:29.300842002 -0500
+@@ -1220,7 +1220,7 @@
+ * with the name of the subsystem. The root device can carry subsystem-
+ * wide attributes. All registered devices are below this single root
+ * device and are named after the subsystem with a simple enumeration
+- * number appended. The registered devices are not explicitely named;
++ * number appended. The registered devices are not explicitly named;
+ * only 'id' in the device needs to be set.
+ *
+ * Do not use this interface for anything new, it exists for compatibility
+diff -Nur linux-3.14.36/drivers/base/cpu.c linux-openelec/drivers/base/cpu.c
+--- linux-3.14.36/drivers/base/cpu.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/base/cpu.c 2015-05-06 12:05:42.000000000 -0500
+@@ -15,6 +15,7 @@
+ #include <linux/percpu.h>
+ #include <linux/acpi.h>
+ #include <linux/of.h>
++#include <linux/cpufeature.h>
+
+ #include "base.h"
+
+@@ -286,6 +287,45 @@
+ */
+ }
+
++#ifdef CONFIG_HAVE_CPU_AUTOPROBE
++#ifdef CONFIG_GENERIC_CPU_AUTOPROBE
++static ssize_t print_cpu_modalias(struct device *dev,
++ struct device_attribute *attr,
++ char *buf)
++{
++ ssize_t n;
++ u32 i;
++
++ n = sprintf(buf, "cpu:type:" CPU_FEATURE_TYPEFMT ":feature:",
++ CPU_FEATURE_TYPEVAL);
++
++ for (i = 0; i < MAX_CPU_FEATURES; i++)
++ if (cpu_have_feature(i)) {
++ if (PAGE_SIZE < n + sizeof(",XXXX\n")) {
++ WARN(1, "CPU features overflow page\n");
++ break;
++ }
++ n += sprintf(&buf[n], ",%04X", i);
++ }
++ buf[n++] = '\n';
++ return n;
++}
++#else
++#define print_cpu_modalias arch_print_cpu_modalias
++#endif
++
++static int cpu_uevent(struct device *dev, struct kobj_uevent_env *env)
++{
++ char *buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
++ if (buf) {
++ print_cpu_modalias(NULL, NULL, buf);
++ add_uevent_var(env, "MODALIAS=%s", buf);
++ kfree(buf);
++ }
++ return 0;
++}
++#endif
++
+ /*
+ * register_cpu - Setup a sysfs device for a CPU.
+ * @cpu - cpu->hotpluggable field set to 1 will generate a control file in
+@@ -306,8 +346,8 @@
+ cpu->dev.offline_disabled = !cpu->hotpluggable;
+ cpu->dev.offline = !cpu_online(num);
+ cpu->dev.of_node = of_get_cpu_node(num, NULL);
+-#ifdef CONFIG_ARCH_HAS_CPU_AUTOPROBE
+- cpu->dev.bus->uevent = arch_cpu_uevent;
++#ifdef CONFIG_HAVE_CPU_AUTOPROBE
++ cpu->dev.bus->uevent = cpu_uevent;
+ #endif
+ cpu->dev.groups = common_cpu_attr_groups;
+ if (cpu->hotpluggable)
+@@ -330,8 +370,8 @@
+ }
+ EXPORT_SYMBOL_GPL(get_cpu_device);
+
+-#ifdef CONFIG_ARCH_HAS_CPU_AUTOPROBE
+-static DEVICE_ATTR(modalias, 0444, arch_print_cpu_modalias, NULL);
++#ifdef CONFIG_HAVE_CPU_AUTOPROBE
++static DEVICE_ATTR(modalias, 0444, print_cpu_modalias, NULL);
+ #endif
+
+ static struct attribute *cpu_root_attrs[] = {
+@@ -344,7 +384,7 @@
+ &cpu_attrs[2].attr.attr,
+ &dev_attr_kernel_max.attr,
+ &dev_attr_offline.attr,
+-#ifdef CONFIG_ARCH_HAS_CPU_AUTOPROBE
++#ifdef CONFIG_HAVE_CPU_AUTOPROBE
+ &dev_attr_modalias.attr,
+ #endif
+ NULL
+diff -Nur linux-3.14.36/drivers/base/dma-buf.c linux-openelec/drivers/base/dma-buf.c
+--- linux-3.14.36/drivers/base/dma-buf.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/base/dma-buf.c 2015-05-06 12:05:42.000000000 -0500
+@@ -251,9 +251,8 @@
+ * @dmabuf: [in] buffer to attach device to.
+ * @dev: [in] device to be attached.
+ *
+- * Returns struct dma_buf_attachment * for this attachment; may return negative
+- * error codes.
+- *
++ * Returns struct dma_buf_attachment * for this attachment; returns ERR_PTR on
++ * error.
+ */
+ struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
+ struct device *dev)
+@@ -319,9 +318,8 @@
+ * @attach: [in] attachment whose scatterlist is to be returned
+ * @direction: [in] direction of DMA transfer
+ *
+- * Returns sg_table containing the scatterlist to be returned; may return NULL
+- * or ERR_PTR.
+- *
++ * Returns sg_table containing the scatterlist to be returned; returns ERR_PTR
++ * on error.
+ */
+ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
+ enum dma_data_direction direction)
+@@ -334,6 +332,8 @@
+ return ERR_PTR(-EINVAL);
+
+ sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
++ if (!sg_table)
++ sg_table = ERR_PTR(-ENOMEM);
+
+ return sg_table;
+ }
+@@ -544,6 +544,8 @@
+ * These calls are optional in drivers. The intended use for them
+ * is for mapping objects linear in kernel space for high use objects.
+ * Please attempt to use kmap/kunmap before thinking about these interfaces.
++ *
++ * Returns NULL on error.
+ */
+ void *dma_buf_vmap(struct dma_buf *dmabuf)
+ {
+@@ -566,7 +568,9 @@
+ BUG_ON(dmabuf->vmap_ptr);
+
+ ptr = dmabuf->ops->vmap(dmabuf);
+- if (IS_ERR_OR_NULL(ptr))
++ if (WARN_ON_ONCE(IS_ERR(ptr)))
++ ptr = NULL;
++ if (!ptr)
+ goto out_unlock;
+
+ dmabuf->vmap_ptr = ptr;
+diff -Nur linux-3.14.36/drivers/base/dma-contiguous.c linux-openelec/drivers/base/dma-contiguous.c
+--- linux-3.14.36/drivers/base/dma-contiguous.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/base/dma-contiguous.c 2015-05-06 12:05:42.000000000 -0500
+@@ -24,22 +24,9 @@
+
+ #include <linux/memblock.h>
+ #include <linux/err.h>
+-#include <linux/mm.h>
+-#include <linux/mutex.h>
+-#include <linux/page-isolation.h>
+ #include <linux/sizes.h>
+-#include <linux/slab.h>
+-#include <linux/swap.h>
+-#include <linux/mm_types.h>
+ #include <linux/dma-contiguous.h>
+-
+-struct cma {
+- unsigned long base_pfn;
+- unsigned long count;
+- unsigned long *bitmap;
+-};
+-
+-struct cma *dma_contiguous_default_area;
++#include <linux/cma.h>
+
+ #ifdef CONFIG_CMA_SIZE_MBYTES
+ #define CMA_SIZE_MBYTES CONFIG_CMA_SIZE_MBYTES
+@@ -47,6 +34,8 @@
+ #define CMA_SIZE_MBYTES 0
+ #endif
+
++struct cma *dma_contiguous_default_area;
++
+ /*
+ * Default global CMA area size can be defined in kernel's .config.
+ * This is useful mainly for distro maintainers to create a kernel
+@@ -59,11 +48,22 @@
+ */
+ static const phys_addr_t size_bytes = CMA_SIZE_MBYTES * SZ_1M;
+ static phys_addr_t size_cmdline = -1;
++static phys_addr_t base_cmdline;
++static phys_addr_t limit_cmdline;
+
+ static int __init early_cma(char *p)
+ {
+ pr_debug("%s(%s)\n", __func__, p);
+ size_cmdline = memparse(p, &p);
++ if (*p != '@')
++ return 0;
++ base_cmdline = memparse(p + 1, &p);
++ if (*p != '-') {
++ limit_cmdline = base_cmdline + size_cmdline;
++ return 0;
++ }
++ limit_cmdline = memparse(p + 1, &p);
++
+ return 0;
+ }
+ early_param("cma", early_cma);
+@@ -107,11 +107,18 @@
+ void __init dma_contiguous_reserve(phys_addr_t limit)
+ {
+ phys_addr_t selected_size = 0;
++ phys_addr_t selected_base = 0;
++ phys_addr_t selected_limit = limit;
++ bool fixed = false;
+
+ pr_debug("%s(limit %08lx)\n", __func__, (unsigned long)limit);
+
+ if (size_cmdline != -1) {
+ selected_size = size_cmdline;
++ selected_base = base_cmdline;
++ selected_limit = min_not_zero(limit_cmdline, limit);
++ if (base_cmdline + size_cmdline == limit_cmdline)
++ fixed = true;
+ } else {
+ #ifdef CONFIG_CMA_SIZE_SEL_MBYTES
+ selected_size = size_bytes;
+@@ -128,68 +135,12 @@
+ pr_debug("%s: reserving %ld MiB for global area\n", __func__,
+ (unsigned long)selected_size / SZ_1M);
+
+- dma_contiguous_reserve_area(selected_size, 0, limit,
+- &dma_contiguous_default_area);
+- }
+-};
+-
+-static DEFINE_MUTEX(cma_mutex);
+-
+-static int __init cma_activate_area(struct cma *cma)
+-{
+- int bitmap_size = BITS_TO_LONGS(cma->count) * sizeof(long);
+- unsigned long base_pfn = cma->base_pfn, pfn = base_pfn;
+- unsigned i = cma->count >> pageblock_order;
+- struct zone *zone;
+-
+- cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
+-
+- if (!cma->bitmap)
+- return -ENOMEM;
+-
+- WARN_ON_ONCE(!pfn_valid(pfn));
+- zone = page_zone(pfn_to_page(pfn));
+-
+- do {
+- unsigned j;
+- base_pfn = pfn;
+- for (j = pageblock_nr_pages; j; --j, pfn++) {
+- WARN_ON_ONCE(!pfn_valid(pfn));
+- /*
+- * alloc_contig_range requires the pfn range
+- * specified to be in the same zone. Make this
+- * simple by forcing the entire CMA resv range
+- * to be in the same zone.
+- */
+- if (page_zone(pfn_to_page(pfn)) != zone)
+- goto err;
+- }
+- init_cma_reserved_pageblock(pfn_to_page(base_pfn));
+- } while (--i);
+-
+- return 0;
+-
+-err:
+- kfree(cma->bitmap);
+- return -EINVAL;
+-}
+-
+-static struct cma cma_areas[MAX_CMA_AREAS];
+-static unsigned cma_area_count;
+-
+-static int __init cma_init_reserved_areas(void)
+-{
+- int i;
+-
+- for (i = 0; i < cma_area_count; i++) {
+- int ret = cma_activate_area(&cma_areas[i]);
+- if (ret)
+- return ret;
++ dma_contiguous_reserve_area(selected_size, selected_base,
++ selected_limit,
++ &dma_contiguous_default_area,
++ fixed);
+ }
+-
+- return 0;
+ }
+-core_initcall(cma_init_reserved_areas);
+
+ /**
+ * dma_contiguous_reserve_area() - reserve custom contiguous area
+@@ -197,78 +148,32 @@
+ * @base: Base address of the reserved area optional, use 0 for any
+ * @limit: End address of the reserved memory (optional, 0 for any).
+ * @res_cma: Pointer to store the created cma region.
++ * @fixed: hint about where to place the reserved area
+ *
+ * This function reserves memory from early allocator. It should be
+ * called by arch specific code once the early allocator (memblock or bootmem)
+ * has been activated and all other subsystems have already allocated/reserved
+ * memory. This function allows to create custom reserved areas for specific
+ * devices.
++ *
++ * If @fixed is true, reserve contiguous area at exactly @base. If false,
++ * reserve in range from @base to @limit.
+ */
+ int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
+- phys_addr_t limit, struct cma **res_cma)
++ phys_addr_t limit, struct cma **res_cma,
++ bool fixed)
+ {
+- struct cma *cma = &cma_areas[cma_area_count];
+- phys_addr_t alignment;
+- int ret = 0;
+-
+- pr_debug("%s(size %lx, base %08lx, limit %08lx)\n", __func__,
+- (unsigned long)size, (unsigned long)base,
+- (unsigned long)limit);
+-
+- /* Sanity checks */
+- if (cma_area_count == ARRAY_SIZE(cma_areas)) {
+- pr_err("Not enough slots for CMA reserved regions!\n");
+- return -ENOSPC;
+- }
+-
+- if (!size)
+- return -EINVAL;
+-
+- /* Sanitise input arguments */
+- alignment = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order);
+- base = ALIGN(base, alignment);
+- size = ALIGN(size, alignment);
+- limit &= ~(alignment - 1);
+-
+- /* Reserve memory */
+- if (base) {
+- if (memblock_is_region_reserved(base, size) ||
+- memblock_reserve(base, size) < 0) {
+- ret = -EBUSY;
+- goto err;
+- }
+- } else {
+- /*
+- * Use __memblock_alloc_base() since
+- * memblock_alloc_base() panic()s.
+- */
+- phys_addr_t addr = __memblock_alloc_base(size, alignment, limit);
+- if (!addr) {
+- ret = -ENOMEM;
+- goto err;
+- } else {
+- base = addr;
+- }
+- }
+-
+- /*
+- * Each reserved area must be initialised later, when more kernel
+- * subsystems (like slab allocator) are available.
+- */
+- cma->base_pfn = PFN_DOWN(base);
+- cma->count = size >> PAGE_SHIFT;
+- *res_cma = cma;
+- cma_area_count++;
++ int ret;
+
+- pr_info("CMA: reserved %ld MiB at %08lx\n", (unsigned long)size / SZ_1M,
+- (unsigned long)base);
++ ret = cma_declare_contiguous(base, size, limit, 0, 0, fixed, res_cma);
++ if (ret)
++ return ret;
+
+ /* Architecture specific contiguous memory fixup. */
+- dma_contiguous_early_fixup(base, size);
++ dma_contiguous_early_fixup(cma_get_base(*res_cma),
++ cma_get_size(*res_cma));
++
+ return 0;
+-err:
+- pr_err("CMA: failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M);
+- return ret;
+ }
+
+ /**
+@@ -279,57 +184,16 @@
+ *
+ * This function allocates memory buffer for specified device. It uses
+ * device specific contiguous memory area if available or the default
+- * global one. Requires architecture specific get_dev_cma_area() helper
++ * global one. Requires architecture specific dev_get_cma_area() helper
+ * function.
+ */
+ struct page *dma_alloc_from_contiguous(struct device *dev, int count,
+ unsigned int align)
+ {
+- unsigned long mask, pfn, pageno, start = 0;
+- struct cma *cma = dev_get_cma_area(dev);
+- struct page *page = NULL;
+- int ret;
+-
+- if (!cma || !cma->count)
+- return NULL;
+-
+ if (align > CONFIG_CMA_ALIGNMENT)
+ align = CONFIG_CMA_ALIGNMENT;
+
+- pr_debug("%s(cma %p, count %d, align %d)\n", __func__, (void *)cma,
+- count, align);
+-
+- if (!count)
+- return NULL;
+-
+- mask = (1 << align) - 1;
+-
+- mutex_lock(&cma_mutex);
+-
+- for (;;) {
+- pageno = bitmap_find_next_zero_area(cma->bitmap, cma->count,
+- start, count, mask);
+- if (pageno >= cma->count)
+- break;
+-
+- pfn = cma->base_pfn + pageno;
+- ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA);
+- if (ret == 0) {
+- bitmap_set(cma->bitmap, pageno, count);
+- page = pfn_to_page(pfn);
+- break;
+- } else if (ret != -EBUSY) {
+- break;
+- }
+- pr_debug("%s(): memory range at %p is busy, retrying\n",
+- __func__, pfn_to_page(pfn));
+- /* try again with a bit different memory target */
+- start = pageno + mask + 1;
+- }
+-
+- mutex_unlock(&cma_mutex);
+- pr_debug("%s(): returned %p\n", __func__, page);
+- return page;
++ return cma_alloc(dev_get_cma_area(dev), count, align);
+ }
+
+ /**
+@@ -345,25 +209,5 @@
+ bool dma_release_from_contiguous(struct device *dev, struct page *pages,
+ int count)
+ {
+- struct cma *cma = dev_get_cma_area(dev);
+- unsigned long pfn;
+-
+- if (!cma || !pages)
+- return false;
+-
+- pr_debug("%s(page %p)\n", __func__, (void *)pages);
+-
+- pfn = page_to_pfn(pages);
+-
+- if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
+- return false;
+-
+- VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);
+-
+- mutex_lock(&cma_mutex);
+- bitmap_clear(cma->bitmap, pfn - cma->base_pfn, count);
+- free_contig_range(pfn, count);
+- mutex_unlock(&cma_mutex);
+-
+- return true;
++ return cma_release(dev_get_cma_area(dev), pages, count);
+ }
+diff -Nur linux-3.14.36/drivers/base/Kconfig linux-openelec/drivers/base/Kconfig
+--- linux-3.14.36/drivers/base/Kconfig 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/base/Kconfig 2015-05-06 12:05:42.000000000 -0500
+@@ -185,6 +185,14 @@
+ bool
+ default n
+
++config HAVE_CPU_AUTOPROBE
++ def_bool ARCH_HAS_CPU_AUTOPROBE
++
++config GENERIC_CPU_AUTOPROBE
++ bool
++ depends on !ARCH_HAS_CPU_AUTOPROBE
++ select HAVE_CPU_AUTOPROBE
++
+ config SOC_BUS
+ bool
+
+@@ -266,16 +274,6 @@
+
+ If unsure, leave the default value "8".
+
+-config CMA_AREAS
+- int "Maximum count of the CMA device-private areas"
+- default 7
+- help
+- CMA allows to create CMA areas for particular devices. This parameter
+- sets the maximum number of such device private CMA areas in the
+- system.
+-
+- If unsure, leave the default value "7".
+-
+ endif
+
+ endmenu
+diff -Nur linux-3.14.36/drivers/base/regmap/Kconfig linux-openelec/drivers/base/regmap/Kconfig
+--- linux-3.14.36/drivers/base/regmap/Kconfig 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/base/regmap/Kconfig 2015-07-24 18:03:30.304842002 -0500
+@@ -3,11 +3,14 @@
+ # subsystems should select the appropriate symbols.
+
+ config REGMAP
+- default y if (REGMAP_I2C || REGMAP_SPI || REGMAP_SPMI || REGMAP_MMIO || REGMAP_IRQ)
++ default y if (REGMAP_I2C || REGMAP_SPI || REGMAP_SPMI || REGMAP_AC97 || REGMAP_MMIO || REGMAP_IRQ)
+ select LZO_COMPRESS
+ select LZO_DECOMPRESS
+ select IRQ_DOMAIN if REGMAP_IRQ
+ bool
++
++config REGMAP_AC97
++ tristate
+
+ config REGMAP_I2C
+ tristate
+diff -Nur linux-3.14.36/drivers/base/regmap/Makefile linux-openelec/drivers/base/regmap/Makefile
+--- linux-3.14.36/drivers/base/regmap/Makefile 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/base/regmap/Makefile 2015-07-24 18:03:30.304842002 -0500
+@@ -1,6 +1,7 @@
+ obj-$(CONFIG_REGMAP) += regmap.o regcache.o
+ obj-$(CONFIG_REGMAP) += regcache-rbtree.o regcache-lzo.o regcache-flat.o
+ obj-$(CONFIG_DEBUG_FS) += regmap-debugfs.o
++obj-$(CONFIG_REGMAP_AC97) += regmap-ac97.o
+ obj-$(CONFIG_REGMAP_I2C) += regmap-i2c.o
+ obj-$(CONFIG_REGMAP_SPI) += regmap-spi.o
+ obj-$(CONFIG_REGMAP_SPMI) += regmap-spmi.o
+diff -Nur linux-3.14.36/drivers/base/regmap/regmap-ac97.c linux-openelec/drivers/base/regmap/regmap-ac97.c
+--- linux-3.14.36/drivers/base/regmap/regmap-ac97.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/base/regmap/regmap-ac97.c 2015-07-24 18:03:30.304842002 -0500
+@@ -0,0 +1,113 @@
++/*
++ * Register map access API - AC'97 support
++ *
++ * Copyright 2013 Linaro Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program. If not, see <http://www.gnu.org/licenses/>.
++ */
++
++#include <linux/clk.h>
++#include <linux/err.h>
++#include <linux/init.h>
++#include <linux/io.h>
++#include <linux/module.h>
++#include <linux/regmap.h>
++#include <linux/slab.h>
++
++#include <sound/ac97_codec.h>
++
++bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg)
++{
++ switch (reg) {
++ case AC97_RESET:
++ case AC97_POWERDOWN:
++ case AC97_INT_PAGING:
++ case AC97_EXTENDED_ID:
++ case AC97_EXTENDED_STATUS:
++ case AC97_EXTENDED_MID:
++ case AC97_EXTENDED_MSTATUS:
++ case AC97_GPIO_STATUS:
++ case AC97_MISC_AFE:
++ case AC97_VENDOR_ID1:
++ case AC97_VENDOR_ID2:
++ case AC97_CODEC_CLASS_REV:
++ case AC97_PCI_SVID:
++ case AC97_PCI_SID:
++ case AC97_FUNC_SELECT:
++ case AC97_FUNC_INFO:
++ case AC97_SENSE_INFO:
++ return true;
++ default:
++ return false;
++ }
++}
++EXPORT_SYMBOL_GPL(regmap_ac97_default_volatile);
++
++static int regmap_ac97_reg_read(void *context, unsigned int reg,
++ unsigned int *val)
++{
++ struct snd_ac97 *ac97 = context;
++ *val = ac97->bus->ops->read(ac97, reg);
++
++ return 0;
++}
++
++static int regmap_ac97_reg_write(void *context, unsigned int reg,
++ unsigned int val)
++{
++ struct snd_ac97 *ac97 = context;
++
++ ac97->bus->ops->write(ac97, reg, val);
++
++ return 0;
++}
++
++static const struct regmap_bus ac97_regmap_bus = {
++ .reg_write = regmap_ac97_reg_write,
++ .reg_read = regmap_ac97_reg_read,
++};
++
++/**
++ * regmap_init_ac97(): Initialise AC'97 register map
++ *
++ * @ac97: Device that will be interacted with
++ * @config: Configuration for register map
++ *
++ * The return value will be an ERR_PTR() on error or a valid pointer to
++ * a struct regmap.
++ */
++struct regmap *regmap_init_ac97(struct snd_ac97 *ac97,
++ const struct regmap_config *config)
++{
++ return regmap_init(&ac97->dev, &ac97_regmap_bus, ac97, config);
++}
++EXPORT_SYMBOL_GPL(regmap_init_ac97);
++
++/**
++ * devm_regmap_init_ac97(): Initialise AC'97 register map
++ *
++ * @ac97: Device that will be interacted with
++ * @config: Configuration for register map
++ *
++ * The return value will be an ERR_PTR() on error or a valid pointer
++ * to a struct regmap. The regmap will be automatically freed by the
++ * device management code.
++ */
++struct regmap *devm_regmap_init_ac97(struct snd_ac97 *ac97,
++ const struct regmap_config *config)
++{
++ return devm_regmap_init(&ac97->dev, &ac97_regmap_bus, ac97, config);
++}
++EXPORT_SYMBOL_GPL(devm_regmap_init_ac97);
++
++MODULE_LICENSE("GPL v2");
+diff -Nur linux-3.14.36/drivers/base/regmap/regmap.c linux-openelec/drivers/base/regmap/regmap.c
+--- linux-3.14.36/drivers/base/regmap/regmap.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/base/regmap/regmap.c 2015-07-24 18:03:30.312842002 -0500
+@@ -35,10 +35,14 @@
+ unsigned int mask, unsigned int val,
+ bool *change);
+
++static int _regmap_bus_reg_read(void *context, unsigned int reg,
++ unsigned int *val);
+ static int _regmap_bus_read(void *context, unsigned int reg,
+ unsigned int *val);
+ static int _regmap_bus_formatted_write(void *context, unsigned int reg,
+ unsigned int val);
++static int _regmap_bus_reg_write(void *context, unsigned int reg,
++ unsigned int val);
+ static int _regmap_bus_raw_write(void *context, unsigned int reg,
+ unsigned int val);
+
+@@ -472,6 +476,12 @@
+
+ map->defer_caching = false;
+ goto skip_format_initialization;
++ } else if (!bus->read || !bus->write) {
++ map->reg_read = _regmap_bus_reg_read;
++ map->reg_write = _regmap_bus_reg_write;
++
++ map->defer_caching = false;
++ goto skip_format_initialization;
+ } else {
+ map->reg_read = _regmap_bus_read;
+ }
+@@ -1267,6 +1277,14 @@
+ return ret;
+ }
+
++static int _regmap_bus_reg_write(void *context, unsigned int reg,
++ unsigned int val)
++{
++ struct regmap *map = context;
++
++ return map->bus->reg_write(map->bus_context, reg, val);
++}
++
+ static int _regmap_bus_raw_write(void *context, unsigned int reg,
+ unsigned int val)
+ {
+@@ -1708,6 +1726,14 @@
+ return ret;
+ }
+
++static int _regmap_bus_reg_read(void *context, unsigned int reg,
++ unsigned int *val)
++{
++ struct regmap *map = context;
++
++ return map->bus->reg_read(map->bus_context, reg, val);
++}
++
+ static int _regmap_bus_read(void *context, unsigned int reg,
+ unsigned int *val)
+ {
+diff -Nur linux-3.14.36/drivers/bus/arm-cci.c linux-openelec/drivers/bus/arm-cci.c
+--- linux-3.14.36/drivers/bus/arm-cci.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/bus/arm-cci.c 2015-05-06 12:05:42.000000000 -0500
+@@ -26,6 +26,7 @@
+
+ #include <asm/cacheflush.h>
+ #include <asm/irq_regs.h>
++#include <asm/psci.h>
+ #include <asm/pmu.h>
+ #include <asm/smp_plat.h>
+
+@@ -544,6 +545,7 @@
+
+ cci_pmu->plat_device = pdev;
+ cci_pmu->num_events = pmu_get_max_counters();
++ cpumask_setall(&cci_pmu->valid_cpus);
+
+ return armpmu_register(cci_pmu, -1);
+ }
+@@ -969,6 +971,11 @@
+ const char *match_str;
+ bool is_ace;
+
++ if (psci_probe() == 0) {
++ pr_debug("psci found. Aborting cci probe\n");
++ return -ENODEV;
++ }
++
+ np = of_find_matching_node(NULL, arm_cci_matches);
+ if (!np)
+ return -ENODEV;
+diff -Nur linux-3.14.36/drivers/char/fsl_otp.c linux-openelec/drivers/char/fsl_otp.c
+--- linux-3.14.36/drivers/char/fsl_otp.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/char/fsl_otp.c 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,316 @@
++/*
++ * Freescale On-Chip OTP driver
++ *
++ * Copyright (C) 2010-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ */
++
++#include <linux/clk.h>
++#include <linux/delay.h>
++#include <linux/err.h>
++#include <linux/init.h>
++#include <linux/io.h>
++#include <linux/kobject.h>
++#include <linux/module.h>
++#include <linux/mutex.h>
++#include <linux/of.h>
++#include <linux/platform_device.h>
++#include <linux/slab.h>
++#include <linux/sysfs.h>
++#include <linux/fsl_otp.h>
++
++#define HW_OCOTP_CTRL 0x00000000
++#define HW_OCOTP_CTRL_SET 0x00000004
++#define BP_OCOTP_CTRL_WR_UNLOCK 16
++#define BM_OCOTP_CTRL_WR_UNLOCK 0xFFFF0000
++#define BM_OCOTP_CTRL_RELOAD_SHADOWS 0x00000400
++#define BM_OCOTP_CTRL_ERROR 0x00000200
++#define BM_OCOTP_CTRL_BUSY 0x00000100
++#define BP_OCOTP_CTRL_ADDR 0
++#define BM_OCOTP_CTRL_ADDR 0x0000007F
++
++#define HW_OCOTP_TIMING 0x00000010
++#define BP_OCOTP_TIMING_STROBE_READ 16
++#define BM_OCOTP_TIMING_STROBE_READ 0x003F0000
++#define BP_OCOTP_TIMING_RELAX 12
++#define BM_OCOTP_TIMING_RELAX 0x0000F000
++#define BP_OCOTP_TIMING_STROBE_PROG 0
++#define BM_OCOTP_TIMING_STROBE_PROG 0x00000FFF
++
++#define HW_OCOTP_DATA 0x00000020
++
++#define HW_OCOTP_CUST_N(n) (0x00000400 + (n) * 0x10)
++#define BF(value, field) (((value) << BP_##field) & BM_##field)
++
++#define DEF_RELAX 20 /* > 16.5ns */
++
++#define BANK(a, b, c, d, e, f, g, h) { \
++ "HW_OCOTP_"#a, "HW_OCOTP_"#b, "HW_OCOTP_"#c, "HW_OCOTP_"#d, \
++ "HW_OCOTP_"#e, "HW_OCOTP_"#f, "HW_OCOTP_"#g, "HW_OCOTP_"#h, \
++}
++
++static const char *imx6q_otp_desc[16][8] = {
++ BANK(LOCK, CFG0, CFG1, CFG2, CFG3, CFG4, CFG5, CFG6),
++ BANK(MEM0, MEM1, MEM2, MEM3, MEM4, ANA0, ANA1, ANA2),
++ BANK(OTPMK0, OTPMK1, OTPMK2, OTPMK3, OTPMK4, OTPMK5, OTPMK6, OTPMK7),
++ BANK(SRK0, SRK1, SRK2, SRK3, SRK4, SRK5, SRK6, SRK7),
++ BANK(RESP0, HSJC_RESP1, MAC0, MAC1, HDCP_KSV0, HDCP_KSV1, GP1, GP2),
++ BANK(DTCP_KEY0, DTCP_KEY1, DTCP_KEY2, DTCP_KEY3, DTCP_KEY4, MISC_CONF, FIELD_RETURN, SRK_REVOKE),
++ BANK(HDCP_KEY0, HDCP_KEY1, HDCP_KEY2, HDCP_KEY3, HDCP_KEY4, HDCP_KEY5, HDCP_KEY6, HDCP_KEY7),
++ BANK(HDCP_KEY8, HDCP_KEY9, HDCP_KEY10, HDCP_KEY11, HDCP_KEY12, HDCP_KEY13, HDCP_KEY14, HDCP_KEY15),
++ BANK(HDCP_KEY16, HDCP_KEY17, HDCP_KEY18, HDCP_KEY19, HDCP_KEY20, HDCP_KEY21, HDCP_KEY22, HDCP_KEY23),
++ BANK(HDCP_KEY24, HDCP_KEY25, HDCP_KEY26, HDCP_KEY27, HDCP_KEY28, HDCP_KEY29, HDCP_KEY30, HDCP_KEY31),
++ BANK(HDCP_KEY32, HDCP_KEY33, HDCP_KEY34, HDCP_KEY35, HDCP_KEY36, HDCP_KEY37, HDCP_KEY38, HDCP_KEY39),
++ BANK(HDCP_KEY40, HDCP_KEY41, HDCP_KEY42, HDCP_KEY43, HDCP_KEY44, HDCP_KEY45, HDCP_KEY46, HDCP_KEY47),
++ BANK(HDCP_KEY48, HDCP_KEY49, HDCP_KEY50, HDCP_KEY51, HDCP_KEY52, HDCP_KEY53, HDCP_KEY54, HDCP_KEY55),
++ BANK(HDCP_KEY56, HDCP_KEY57, HDCP_KEY58, HDCP_KEY59, HDCP_KEY60, HDCP_KEY61, HDCP_KEY62, HDCP_KEY63),
++ BANK(HDCP_KEY64, HDCP_KEY65, HDCP_KEY66, HDCP_KEY67, HDCP_KEY68, HDCP_KEY69, HDCP_KEY70, HDCP_KEY71),
++ BANK(CRC0, CRC1, CRC2, CRC3, CRC4, CRC5, CRC6, CRC7),
++};
++
++static DEFINE_MUTEX(otp_mutex);
++static void __iomem *otp_base;
++static struct clk *otp_clk;
++struct kobject *otp_kobj;
++struct kobj_attribute *otp_kattr;
++struct attribute_group *otp_attr_group;
++
++static void set_otp_timing(void)
++{
++ unsigned long clk_rate = 0;
++ unsigned long strobe_read, relex, strobe_prog;
++ u32 timing = 0;
++
++ clk_rate = clk_get_rate(otp_clk);
++
++ /* do optimization for too many zeros */
++ relex = clk_rate / (1000000000 / DEF_RELAX) - 1;
++ strobe_prog = clk_rate / (1000000000 / 10000) + 2 * (DEF_RELAX + 1) - 1;
++ strobe_read = clk_rate / (1000000000 / 40) + 2 * (DEF_RELAX + 1) - 1;
++
++ timing = BF(relex, OCOTP_TIMING_RELAX);
++ timing |= BF(strobe_read, OCOTP_TIMING_STROBE_READ);
++ timing |= BF(strobe_prog, OCOTP_TIMING_STROBE_PROG);
++
++ __raw_writel(timing, otp_base + HW_OCOTP_TIMING);
++}
++
++static int otp_wait_busy(u32 flags)
++{
++ int count;
++ u32 c;
++
++ for (count = 10000; count >= 0; count--) {
++ c = __raw_readl(otp_base + HW_OCOTP_CTRL);
++ if (!(c & (BM_OCOTP_CTRL_BUSY | BM_OCOTP_CTRL_ERROR | flags)))
++ break;
++ cpu_relax();
++ }
++
++ if (count < 0)
++ return -ETIMEDOUT;
++
++ return 0;
++}
++
++int fsl_otp_readl(unsigned long offset, u32 *value)
++{
++ int ret = 0;
++
++ ret = clk_prepare_enable(otp_clk);
++ if (ret)
++ return ret;
++
++ mutex_lock(&otp_mutex);
++
++ set_otp_timing();
++ ret = otp_wait_busy(0);
++ if (ret)
++ goto out;
++
++ *value = __raw_readl(otp_base + offset);
++
++out:
++ mutex_unlock(&otp_mutex);
++ clk_disable_unprepare(otp_clk);
++ return ret;
++}
++EXPORT_SYMBOL(fsl_otp_readl);
++
++static ssize_t fsl_otp_show(struct kobject *kobj, struct kobj_attribute *attr,
++ char *buf)
++{
++ unsigned int index = attr - otp_kattr;
++ u32 value = 0;
++ int ret;
++
++ ret = fsl_otp_readl(HW_OCOTP_CUST_N(index), &value);
++
++ return ret ? 0 : sprintf(buf, "0x%x\n", value);
++}
++
++#ifdef CONFIG_FSL_OTP_WRITE_ENABLE
++static int otp_write_bits(int addr, u32 data, u32 magic)
++{
++ u32 c; /* for control register */
++
++ /* init the control register */
++ c = __raw_readl(otp_base + HW_OCOTP_CTRL);
++ c &= ~BM_OCOTP_CTRL_ADDR;
++ c |= BF(addr, OCOTP_CTRL_ADDR);
++ c |= BF(magic, OCOTP_CTRL_WR_UNLOCK);
++ __raw_writel(c, otp_base + HW_OCOTP_CTRL);
++
++ /* init the data register */
++ __raw_writel(data, otp_base + HW_OCOTP_DATA);
++ otp_wait_busy(0);
++
++ mdelay(2); /* Write Postamble */
++
++ return 0;
++}
++
++static ssize_t fsl_otp_store(struct kobject *kobj, struct kobj_attribute *attr,
++ const char *buf, size_t count)
++{
++ unsigned int index = attr - otp_kattr;
++ u32 value;
++ int ret;
++
++ sscanf(buf, "0x%x", &value);
++
++ ret = clk_prepare_enable(otp_clk);
++ if (ret)
++ return 0;
++
++ mutex_lock(&otp_mutex);
++
++ set_otp_timing();
++ ret = otp_wait_busy(0);
++ if (ret)
++ goto out;
++
++ otp_write_bits(index, value, 0x3e77);
++
++ /* Reload all the shadow registers */
++ __raw_writel(BM_OCOTP_CTRL_RELOAD_SHADOWS,
++ otp_base + HW_OCOTP_CTRL_SET);
++ udelay(1);
++ otp_wait_busy(BM_OCOTP_CTRL_RELOAD_SHADOWS);
++
++out:
++ mutex_unlock(&otp_mutex);
++ clk_disable_unprepare(otp_clk);
++ return ret ? 0 : count;
++}
++#endif
++
++static int fsl_otp_probe(struct platform_device *pdev)
++{
++ struct resource *res;
++ struct attribute **attrs;
++ const char **desc;
++ int i, num;
++ int ret;
++
++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++ otp_base = devm_ioremap_resource(&pdev->dev, res);
++ if (IS_ERR(otp_base)) {
++ ret = PTR_ERR(otp_base);
++ dev_err(&pdev->dev, "failed to ioremap resource: %d\n", ret);
++ return ret;
++ }
++
++ otp_clk = devm_clk_get(&pdev->dev, NULL);
++ if (IS_ERR(otp_clk)) {
++ ret = PTR_ERR(otp_clk);
++ dev_err(&pdev->dev, "failed to get clock: %d\n", ret);
++ return ret;
++ }
++
++ desc = (const char **) imx6q_otp_desc;
++ num = sizeof(imx6q_otp_desc) / sizeof(void *);
++
++ /* The last one is NULL, which is used to detect the end */
++ attrs = devm_kzalloc(&pdev->dev, (num + 1) * sizeof(*attrs),
++ GFP_KERNEL);
++ otp_kattr = devm_kzalloc(&pdev->dev, num * sizeof(*otp_kattr),
++ GFP_KERNEL);
++ otp_attr_group = devm_kzalloc(&pdev->dev, sizeof(*otp_attr_group),
++ GFP_KERNEL);
++ if (!attrs || !otp_kattr || !otp_attr_group)
++ return -ENOMEM;
++
++ for (i = 0; i < num; i++) {
++ sysfs_attr_init(&otp_kattr[i].attr);
++ otp_kattr[i].attr.name = desc[i];
++#ifdef CONFIG_FSL_OTP_WRITE_ENABLE
++ otp_kattr[i].attr.mode = 0600;
++ otp_kattr[i].store = fsl_otp_store;
++#else
++ otp_kattr[i].attr.mode = 0400;
++#endif
++ otp_kattr[i].show = fsl_otp_show;
++ attrs[i] = &otp_kattr[i].attr;
++ }
++ otp_attr_group->attrs = attrs;
++
++ otp_kobj = kobject_create_and_add("fsl_otp", NULL);
++ if (!otp_kobj) {
++ dev_err(&pdev->dev, "failed to add kobject\n");
++ return -ENOMEM;
++ }
++
++ ret = sysfs_create_group(otp_kobj, otp_attr_group);
++ if (ret) {
++ dev_err(&pdev->dev, "failed to create sysfs group: %d\n", ret);
++ kobject_put(otp_kobj);
++ return ret;
++ }
++
++ mutex_init(&otp_mutex);
++
++ return 0;
++}
++
++static int fsl_otp_remove(struct platform_device *pdev)
++{
++ sysfs_remove_group(otp_kobj, otp_attr_group);
++ kobject_put(otp_kobj);
++
++ return 0;
++}
++
++static const struct of_device_id fsl_otp_dt_ids[] = {
++ { .compatible = "fsl,imx6q-ocotp", },
++ { /* sentinel */ }
++};
++MODULE_DEVICE_TABLE(of, fsl_otp_dt_ids);
++
++static struct platform_driver fsl_otp_driver = {
++ .driver = {
++ .name = "imx-ocotp",
++ .owner = THIS_MODULE,
++ .of_match_table = fsl_otp_dt_ids,
++ },
++ .probe = fsl_otp_probe,
++ .remove = fsl_otp_remove,
++};
++module_platform_driver(fsl_otp_driver);
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Huang Shijie <b32955@freescale.com>");
++MODULE_DESCRIPTION("Freescale i.MX OCOTP driver");
+diff -Nur linux-3.14.36/drivers/char/Kconfig linux-openelec/drivers/char/Kconfig
+--- linux-3.14.36/drivers/char/Kconfig 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/char/Kconfig 2015-05-06 12:05:42.000000000 -0500
+@@ -82,6 +82,21 @@
+
+ If unsure, say N.
+
++config FSL_OTP
++ tristate "Freescale On-Chip OTP Memory Support"
++ depends on HAS_IOMEM && OF
++ help
++ If you say Y here, you will get support for a character device
++ interface into the One Time Programmable memory pages that are
++ stored on the some Freescale i.MX processors. This will not get
++ you access to the secure memory pages however. You will need to
++ write your own secure code and reader for that.
++
++ To compile this driver as a module, choose M here: the module
++ will be called fsl_otp.
++
++ If unsure, it is safe to say Y.
++
+ config PRINTER
+ tristate "Parallel printer support"
+ depends on PARPORT
+diff -Nur linux-3.14.36/drivers/char/Makefile linux-openelec/drivers/char/Makefile
+--- linux-3.14.36/drivers/char/Makefile 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/char/Makefile 2015-05-06 12:05:42.000000000 -0500
+@@ -16,6 +16,7 @@
+ obj-$(CONFIG_IBM_BSR) += bsr.o
+ obj-$(CONFIG_SGI_MBCS) += mbcs.o
+ obj-$(CONFIG_BFIN_OTP) += bfin-otp.o
++obj-$(CONFIG_FSL_OTP) += fsl_otp.o
+
+ obj-$(CONFIG_PRINTER) += lp.o
+
+diff -Nur linux-3.14.36/drivers/clk/clk.c linux-openelec/drivers/clk/clk.c
+--- linux-3.14.36/drivers/clk/clk.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/clk/clk.c 2015-07-24 18:03:29.384842002 -0500
+@@ -1707,6 +1707,7 @@
+ */
+ int clk_set_parent(struct clk *clk, struct clk *parent)
+ {
++ struct clk *child;
+ int ret = 0;
+ int p_index = 0;
+ unsigned long p_rate = 0;
+@@ -1733,6 +1734,18 @@
+ goto out;
+ }
+
++ /* check two consecutive basic mux clocks */
++ if (clk->flags & CLK_IS_BASIC_MUX) {
++ hlist_for_each_entry(child, &clk->children, child_node) {
++ if (child->flags & CLK_IS_BASIC_MUX) {
++ pr_err("%s: failed to switch parent of %s due to child mux %s\n",
++ __func__, clk->name, child->name);
++ ret = -EBUSY;
++ goto out;
++ }
++ }
++ }
++
+ /* try finding the new parent index */
+ if (parent) {
+ p_index = clk_fetch_parent_index(clk, parent);
+diff -Nur linux-3.14.36/drivers/clk/clk.c.orig linux-openelec/drivers/clk/clk.c.orig
+--- linux-3.14.36/drivers/clk/clk.c.orig 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/clk/clk.c.orig 2015-07-24 18:03:28.460842002 -0500
+@@ -0,0 +1,2558 @@
++/*
++ * Copyright (C) 2010-2011 Canonical Ltd <jeremy.kerr@canonical.com>
++ * Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * Standard functionality for the common clock API. See Documentation/clk.txt
++ */
++
++#include <linux/clk-private.h>
++#include <linux/module.h>
++#include <linux/mutex.h>
++#include <linux/spinlock.h>
++#include <linux/err.h>
++#include <linux/list.h>
++#include <linux/slab.h>
++#include <linux/of.h>
++#include <linux/device.h>
++#include <linux/init.h>
++#include <linux/sched.h>
++
++#include "clk.h"
++
++static DEFINE_SPINLOCK(enable_lock);
++static DEFINE_MUTEX(prepare_lock);
++
++static struct task_struct *prepare_owner;
++static struct task_struct *enable_owner;
++
++static int prepare_refcnt;
++static int enable_refcnt;
++
++static HLIST_HEAD(clk_root_list);
++static HLIST_HEAD(clk_orphan_list);
++static LIST_HEAD(clk_notifier_list);
++
++/*** locking ***/
++static void clk_prepare_lock(void)
++{
++ if (!mutex_trylock(&prepare_lock)) {
++ if (prepare_owner == current) {
++ prepare_refcnt++;
++ return;
++ }
++ mutex_lock(&prepare_lock);
++ }
++ WARN_ON_ONCE(prepare_owner != NULL);
++ WARN_ON_ONCE(prepare_refcnt != 0);
++ prepare_owner = current;
++ prepare_refcnt = 1;
++}
++
++static void clk_prepare_unlock(void)
++{
++ WARN_ON_ONCE(prepare_owner != current);
++ WARN_ON_ONCE(prepare_refcnt == 0);
++
++ if (--prepare_refcnt)
++ return;
++ prepare_owner = NULL;
++ mutex_unlock(&prepare_lock);
++}
++
++static unsigned long clk_enable_lock(void)
++{
++ unsigned long flags;
++
++ if (!spin_trylock_irqsave(&enable_lock, flags)) {
++ if (enable_owner == current) {
++ enable_refcnt++;
++ return flags;
++ }
++ spin_lock_irqsave(&enable_lock, flags);
++ }
++ WARN_ON_ONCE(enable_owner != NULL);
++ WARN_ON_ONCE(enable_refcnt != 0);
++ enable_owner = current;
++ enable_refcnt = 1;
++ return flags;
++}
++
++static void clk_enable_unlock(unsigned long flags)
++{
++ WARN_ON_ONCE(enable_owner != current);
++ WARN_ON_ONCE(enable_refcnt == 0);
++
++ if (--enable_refcnt)
++ return;
++ enable_owner = NULL;
++ spin_unlock_irqrestore(&enable_lock, flags);
++}
++
++/*** debugfs support ***/
++
++#ifdef CONFIG_DEBUG_FS
++#include <linux/debugfs.h>
++
++static struct dentry *rootdir;
++static struct dentry *orphandir;
++static int inited = 0;
++
++static void clk_summary_show_one(struct seq_file *s, struct clk *c, int level)
++{
++ if (!c)
++ return;
++
++ seq_printf(s, "%*s%-*s %-11d %-12d %-10lu %-11lu",
++ level * 3 + 1, "",
++ 30 - level * 3, c->name,
++ c->enable_count, c->prepare_count, clk_get_rate(c),
++ clk_get_accuracy(c));
++ seq_printf(s, "\n");
++}
++
++static void clk_summary_show_subtree(struct seq_file *s, struct clk *c,
++ int level)
++{
++ struct clk *child;
++
++ if (!c)
++ return;
++
++ clk_summary_show_one(s, c, level);
++
++ hlist_for_each_entry(child, &c->children, child_node)
++ clk_summary_show_subtree(s, child, level + 1);
++}
++
++static int clk_summary_show(struct seq_file *s, void *data)
++{
++ struct clk *c;
++
++ seq_printf(s, " clock enable_cnt prepare_cnt rate accuracy\n");
++ seq_printf(s, "---------------------------------------------------------------------------------\n");
++
++ clk_prepare_lock();
++
++ hlist_for_each_entry(c, &clk_root_list, child_node)
++ clk_summary_show_subtree(s, c, 0);
++
++ hlist_for_each_entry(c, &clk_orphan_list, child_node)
++ clk_summary_show_subtree(s, c, 0);
++
++ clk_prepare_unlock();
++
++ return 0;
++}
++
++
++static int clk_summary_open(struct inode *inode, struct file *file)
++{
++ return single_open(file, clk_summary_show, inode->i_private);
++}
++
++static const struct file_operations clk_summary_fops = {
++ .open = clk_summary_open,
++ .read = seq_read,
++ .llseek = seq_lseek,
++ .release = single_release,
++};
++
++static void clk_dump_one(struct seq_file *s, struct clk *c, int level)
++{
++ if (!c)
++ return;
++
++ seq_printf(s, "\"%s\": { ", c->name);
++ seq_printf(s, "\"enable_count\": %d,", c->enable_count);
++ seq_printf(s, "\"prepare_count\": %d,", c->prepare_count);
++ seq_printf(s, "\"rate\": %lu", clk_get_rate(c));
++ seq_printf(s, "\"accuracy\": %lu", clk_get_accuracy(c));
++}
++
++static void clk_dump_subtree(struct seq_file *s, struct clk *c, int level)
++{
++ struct clk *child;
++
++ if (!c)
++ return;
++
++ clk_dump_one(s, c, level);
++
++ hlist_for_each_entry(child, &c->children, child_node) {
++ seq_printf(s, ",");
++ clk_dump_subtree(s, child, level + 1);
++ }
++
++ seq_printf(s, "}");
++}
++
++static int clk_dump(struct seq_file *s, void *data)
++{
++ struct clk *c;
++ bool first_node = true;
++
++ seq_printf(s, "{");
++
++ clk_prepare_lock();
++
++ hlist_for_each_entry(c, &clk_root_list, child_node) {
++ if (!first_node)
++ seq_printf(s, ",");
++ first_node = false;
++ clk_dump_subtree(s, c, 0);
++ }
++
++ hlist_for_each_entry(c, &clk_orphan_list, child_node) {
++ seq_printf(s, ",");
++ clk_dump_subtree(s, c, 0);
++ }
++
++ clk_prepare_unlock();
++
++ seq_printf(s, "}");
++ return 0;
++}
++
++
++static int clk_dump_open(struct inode *inode, struct file *file)
++{
++ return single_open(file, clk_dump, inode->i_private);
++}
++
++static const struct file_operations clk_dump_fops = {
++ .open = clk_dump_open,
++ .read = seq_read,
++ .llseek = seq_lseek,
++ .release = single_release,
++};
++
++/* caller must hold prepare_lock */
++static int clk_debug_create_one(struct clk *clk, struct dentry *pdentry)
++{
++ struct dentry *d;
++ int ret = -ENOMEM;
++
++ if (!clk || !pdentry) {
++ ret = -EINVAL;
++ goto out;
++ }
++
++ d = debugfs_create_dir(clk->name, pdentry);
++ if (!d)
++ goto out;
++
++ clk->dentry = d;
++
++ d = debugfs_create_u32("clk_rate", S_IRUGO, clk->dentry,
++ (u32 *)&clk->rate);
++ if (!d)
++ goto err_out;
++
++ d = debugfs_create_u32("clk_accuracy", S_IRUGO, clk->dentry,
++ (u32 *)&clk->accuracy);
++ if (!d)
++ goto err_out;
++
++ d = debugfs_create_x32("clk_flags", S_IRUGO, clk->dentry,
++ (u32 *)&clk->flags);
++ if (!d)
++ goto err_out;
++
++ d = debugfs_create_u32("clk_prepare_count", S_IRUGO, clk->dentry,
++ (u32 *)&clk->prepare_count);
++ if (!d)
++ goto err_out;
++
++ d = debugfs_create_u32("clk_enable_count", S_IRUGO, clk->dentry,
++ (u32 *)&clk->enable_count);
++ if (!d)
++ goto err_out;
++
++ d = debugfs_create_u32("clk_notifier_count", S_IRUGO, clk->dentry,
++ (u32 *)&clk->notifier_count);
++ if (!d)
++ goto err_out;
++
++ ret = 0;
++ goto out;
++
++err_out:
++ debugfs_remove_recursive(clk->dentry);
++ clk->dentry = NULL;
++out:
++ return ret;
++}
++
++/* caller must hold prepare_lock */
++static int clk_debug_create_subtree(struct clk *clk, struct dentry *pdentry)
++{
++ struct clk *child;
++ int ret = -EINVAL;;
++
++ if (!clk || !pdentry)
++ goto out;
++
++ ret = clk_debug_create_one(clk, pdentry);
++
++ if (ret)
++ goto out;
++
++ hlist_for_each_entry(child, &clk->children, child_node)
++ clk_debug_create_subtree(child, clk->dentry);
++
++ ret = 0;
++out:
++ return ret;
++}
++
++/**
++ * clk_debug_register - add a clk node to the debugfs clk tree
++ * @clk: the clk being added to the debugfs clk tree
++ *
++ * Dynamically adds a clk to the debugfs clk tree if debugfs has been
++ * initialized. Otherwise it bails out early since the debugfs clk tree
++ * will be created lazily by clk_debug_init as part of a late_initcall.
++ *
++ * Caller must hold prepare_lock. Only clk_init calls this function (so
++ * far) so this is taken care.
++ */
++static int clk_debug_register(struct clk *clk)
++{
++ struct clk *parent;
++ struct dentry *pdentry;
++ int ret = 0;
++
++ if (!inited)
++ goto out;
++
++ parent = clk->parent;
++
++ /*
++ * Check to see if a clk is a root clk. Also check that it is
++ * safe to add this clk to debugfs
++ */
++ if (!parent)
++ if (clk->flags & CLK_IS_ROOT)
++ pdentry = rootdir;
++ else
++ pdentry = orphandir;
++ else
++ if (parent->dentry)
++ pdentry = parent->dentry;
++ else
++ goto out;
++
++ ret = clk_debug_create_subtree(clk, pdentry);
++
++out:
++ return ret;
++}
++
++ /**
++ * clk_debug_unregister - remove a clk node from the debugfs clk tree
++ * @clk: the clk being removed from the debugfs clk tree
++ *
++ * Dynamically removes a clk and all it's children clk nodes from the
++ * debugfs clk tree if clk->dentry points to debugfs created by
++ * clk_debug_register in __clk_init.
++ *
++ * Caller must hold prepare_lock.
++ */
++static void clk_debug_unregister(struct clk *clk)
++{
++ debugfs_remove_recursive(clk->dentry);
++}
++
++/**
++ * clk_debug_reparent - reparent clk node in the debugfs clk tree
++ * @clk: the clk being reparented
++ * @new_parent: the new clk parent, may be NULL
++ *
++ * Rename clk entry in the debugfs clk tree if debugfs has been
++ * initialized. Otherwise it bails out early since the debugfs clk tree
++ * will be created lazily by clk_debug_init as part of a late_initcall.
++ *
++ * Caller must hold prepare_lock.
++ */
++static void clk_debug_reparent(struct clk *clk, struct clk *new_parent)
++{
++ struct dentry *d;
++ struct dentry *new_parent_d;
++
++ if (!inited)
++ return;
++
++ if (new_parent)
++ new_parent_d = new_parent->dentry;
++ else
++ new_parent_d = orphandir;
++
++ d = debugfs_rename(clk->dentry->d_parent, clk->dentry,
++ new_parent_d, clk->name);
++ if (d)
++ clk->dentry = d;
++ else
++ pr_debug("%s: failed to rename debugfs entry for %s\n",
++ __func__, clk->name);
++}
++
++/**
++ * clk_debug_init - lazily create the debugfs clk tree visualization
++ *
++ * clks are often initialized very early during boot before memory can
++ * be dynamically allocated and well before debugfs is setup.
++ * clk_debug_init walks the clk tree hierarchy while holding
++ * prepare_lock and creates the topology as part of a late_initcall,
++ * thus insuring that clks initialized very early will still be
++ * represented in the debugfs clk tree. This function should only be
++ * called once at boot-time, and all other clks added dynamically will
++ * be done so with clk_debug_register.
++ */
++static int __init clk_debug_init(void)
++{
++ struct clk *clk;
++ struct dentry *d;
++
++ rootdir = debugfs_create_dir("clk", NULL);
++
++ if (!rootdir)
++ return -ENOMEM;
++
++ d = debugfs_create_file("clk_summary", S_IRUGO, rootdir, NULL,
++ &clk_summary_fops);
++ if (!d)
++ return -ENOMEM;
++
++ d = debugfs_create_file("clk_dump", S_IRUGO, rootdir, NULL,
++ &clk_dump_fops);
++ if (!d)
++ return -ENOMEM;
++
++ orphandir = debugfs_create_dir("orphans", rootdir);
++
++ if (!orphandir)
++ return -ENOMEM;
++
++ clk_prepare_lock();
++
++ hlist_for_each_entry(clk, &clk_root_list, child_node)
++ clk_debug_create_subtree(clk, rootdir);
++
++ hlist_for_each_entry(clk, &clk_orphan_list, child_node)
++ clk_debug_create_subtree(clk, orphandir);
++
++ inited = 1;
++
++ clk_prepare_unlock();
++
++ return 0;
++}
++late_initcall(clk_debug_init);
++#else
++static inline int clk_debug_register(struct clk *clk) { return 0; }
++static inline void clk_debug_reparent(struct clk *clk, struct clk *new_parent)
++{
++}
++static inline void clk_debug_unregister(struct clk *clk)
++{
++}
++#endif
++
++/* caller must hold prepare_lock */
++static void clk_unprepare_unused_subtree(struct clk *clk)
++{
++ struct clk *child;
++
++ if (!clk)
++ return;
++
++ hlist_for_each_entry(child, &clk->children, child_node)
++ clk_unprepare_unused_subtree(child);
++
++ if (clk->prepare_count)
++ return;
++
++ if (clk->flags & CLK_IGNORE_UNUSED)
++ return;
++
++ if (__clk_is_prepared(clk)) {
++ if (clk->ops->unprepare_unused)
++ clk->ops->unprepare_unused(clk->hw);
++ else if (clk->ops->unprepare)
++ clk->ops->unprepare(clk->hw);
++ }
++}
++
++/* caller must hold prepare_lock */
++static void clk_disable_unused_subtree(struct clk *clk)
++{
++ struct clk *child;
++ unsigned long flags;
++
++ if (!clk)
++ goto out;
++
++ hlist_for_each_entry(child, &clk->children, child_node)
++ clk_disable_unused_subtree(child);
++
++ flags = clk_enable_lock();
++
++ if (clk->enable_count)
++ goto unlock_out;
++
++ if (clk->flags & CLK_IGNORE_UNUSED)
++ goto unlock_out;
++
++ /*
++ * some gate clocks have special needs during the disable-unused
++ * sequence. call .disable_unused if available, otherwise fall
++ * back to .disable
++ */
++ if (__clk_is_enabled(clk)) {
++ if (clk->ops->disable_unused)
++ clk->ops->disable_unused(clk->hw);
++ else if (clk->ops->disable)
++ clk->ops->disable(clk->hw);
++ }
++
++unlock_out:
++ clk_enable_unlock(flags);
++
++out:
++ return;
++}
++
++static bool clk_ignore_unused;
++static int __init clk_ignore_unused_setup(char *__unused)
++{
++ clk_ignore_unused = true;
++ return 1;
++}
++__setup("clk_ignore_unused", clk_ignore_unused_setup);
++
++static int clk_disable_unused(void)
++{
++ struct clk *clk;
++
++ if (clk_ignore_unused) {
++ pr_warn("clk: Not disabling unused clocks\n");
++ return 0;
++ }
++
++ clk_prepare_lock();
++
++ hlist_for_each_entry(clk, &clk_root_list, child_node)
++ clk_disable_unused_subtree(clk);
++
++ hlist_for_each_entry(clk, &clk_orphan_list, child_node)
++ clk_disable_unused_subtree(clk);
++
++ hlist_for_each_entry(clk, &clk_root_list, child_node)
++ clk_unprepare_unused_subtree(clk);
++
++ hlist_for_each_entry(clk, &clk_orphan_list, child_node)
++ clk_unprepare_unused_subtree(clk);
++
++ clk_prepare_unlock();
++
++ return 0;
++}
++late_initcall_sync(clk_disable_unused);
++
++/*** helper functions ***/
++
++const char *__clk_get_name(struct clk *clk)
++{
++ return !clk ? NULL : clk->name;
++}
++EXPORT_SYMBOL_GPL(__clk_get_name);
++
++struct clk_hw *__clk_get_hw(struct clk *clk)
++{
++ return !clk ? NULL : clk->hw;
++}
++EXPORT_SYMBOL_GPL(__clk_get_hw);
++
++u8 __clk_get_num_parents(struct clk *clk)
++{
++ return !clk ? 0 : clk->num_parents;
++}
++EXPORT_SYMBOL_GPL(__clk_get_num_parents);
++
++struct clk *__clk_get_parent(struct clk *clk)
++{
++ return !clk ? NULL : clk->parent;
++}
++EXPORT_SYMBOL_GPL(__clk_get_parent);
++
++struct clk *clk_get_parent_by_index(struct clk *clk, u8 index)
++{
++ if (!clk || index >= clk->num_parents)
++ return NULL;
++ else if (!clk->parents)
++ return __clk_lookup(clk->parent_names[index]);
++ else if (!clk->parents[index])
++ return clk->parents[index] =
++ __clk_lookup(clk->parent_names[index]);
++ else
++ return clk->parents[index];
++}
++EXPORT_SYMBOL_GPL(clk_get_parent_by_index);
++
++unsigned int __clk_get_enable_count(struct clk *clk)
++{
++ return !clk ? 0 : clk->enable_count;
++}
++
++unsigned int __clk_get_prepare_count(struct clk *clk)
++{
++ return !clk ? 0 : clk->prepare_count;
++}
++
++unsigned long __clk_get_rate(struct clk *clk)
++{
++ unsigned long ret;
++
++ if (!clk) {
++ ret = 0;
++ goto out;
++ }
++
++ ret = clk->rate;
++
++ if (clk->flags & CLK_IS_ROOT)
++ goto out;
++
++ if (!clk->parent)
++ ret = 0;
++
++out:
++ return ret;
++}
++EXPORT_SYMBOL_GPL(__clk_get_rate);
++
++unsigned long __clk_get_accuracy(struct clk *clk)
++{
++ if (!clk)
++ return 0;
++
++ return clk->accuracy;
++}
++
++unsigned long __clk_get_flags(struct clk *clk)
++{
++ return !clk ? 0 : clk->flags;
++}
++EXPORT_SYMBOL_GPL(__clk_get_flags);
++
++bool __clk_is_prepared(struct clk *clk)
++{
++ int ret;
++
++ if (!clk)
++ return false;
++
++ /*
++ * .is_prepared is optional for clocks that can prepare
++ * fall back to software usage counter if it is missing
++ */
++ if (!clk->ops->is_prepared) {
++ ret = clk->prepare_count ? 1 : 0;
++ goto out;
++ }
++
++ ret = clk->ops->is_prepared(clk->hw);
++out:
++ return !!ret;
++}
++
++bool __clk_is_enabled(struct clk *clk)
++{
++ int ret;
++
++ if (!clk)
++ return false;
++
++ /*
++ * .is_enabled is only mandatory for clocks that gate
++ * fall back to software usage counter if .is_enabled is missing
++ */
++ if (!clk->ops->is_enabled) {
++ ret = clk->enable_count ? 1 : 0;
++ goto out;
++ }
++
++ ret = clk->ops->is_enabled(clk->hw);
++out:
++ return !!ret;
++}
++EXPORT_SYMBOL_GPL(__clk_is_enabled);
++
++static struct clk *__clk_lookup_subtree(const char *name, struct clk *clk)
++{
++ struct clk *child;
++ struct clk *ret;
++
++ if (!strcmp(clk->name, name))
++ return clk;
++
++ hlist_for_each_entry(child, &clk->children, child_node) {
++ ret = __clk_lookup_subtree(name, child);
++ if (ret)
++ return ret;
++ }
++
++ return NULL;
++}
++
++struct clk *__clk_lookup(const char *name)
++{
++ struct clk *root_clk;
++ struct clk *ret;
++
++ if (!name)
++ return NULL;
++
++ /* search the 'proper' clk tree first */
++ hlist_for_each_entry(root_clk, &clk_root_list, child_node) {
++ ret = __clk_lookup_subtree(name, root_clk);
++ if (ret)
++ return ret;
++ }
++
++ /* if not found, then search the orphan tree */
++ hlist_for_each_entry(root_clk, &clk_orphan_list, child_node) {
++ ret = __clk_lookup_subtree(name, root_clk);
++ if (ret)
++ return ret;
++ }
++
++ return NULL;
++}
++
++/*
++ * Helper for finding best parent to provide a given frequency. This can be used
++ * directly as a determine_rate callback (e.g. for a mux), or from a more
++ * complex clock that may combine a mux with other operations.
++ */
++long __clk_mux_determine_rate(struct clk_hw *hw, unsigned long rate,
++ unsigned long *best_parent_rate,
++ struct clk **best_parent_p)
++{
++ struct clk *clk = hw->clk, *parent, *best_parent = NULL;
++ int i, num_parents;
++ unsigned long parent_rate, best = 0;
++
++ /* if NO_REPARENT flag set, pass through to current parent */
++ if (clk->flags & CLK_SET_RATE_NO_REPARENT) {
++ parent = clk->parent;
++ if (clk->flags & CLK_SET_RATE_PARENT)
++ best = __clk_round_rate(parent, rate);
++ else if (parent)
++ best = __clk_get_rate(parent);
++ else
++ best = __clk_get_rate(clk);
++ goto out;
++ }
++
++ /* find the parent that can provide the fastest rate <= rate */
++ num_parents = clk->num_parents;
++ for (i = 0; i < num_parents; i++) {
++ parent = clk_get_parent_by_index(clk, i);
++ if (!parent)
++ continue;
++ if (clk->flags & CLK_SET_RATE_PARENT)
++ parent_rate = __clk_round_rate(parent, rate);
++ else
++ parent_rate = __clk_get_rate(parent);
++ if (parent_rate <= rate && parent_rate > best) {
++ best_parent = parent;
++ best = parent_rate;
++ }
++ }
++
++out:
++ if (best_parent)
++ *best_parent_p = best_parent;
++ *best_parent_rate = best;
++
++ return best;
++}
++EXPORT_SYMBOL_GPL(__clk_mux_determine_rate);
++
++/*** clk api ***/
++
++void __clk_unprepare(struct clk *clk)
++{
++ if (!clk)
++ return;
++
++ if (WARN_ON(clk->prepare_count == 0))
++ return;
++
++ if (--clk->prepare_count > 0)
++ return;
++
++ WARN_ON(clk->enable_count > 0);
++
++ if (clk->ops->unprepare)
++ clk->ops->unprepare(clk->hw);
++
++ __clk_unprepare(clk->parent);
++}
++
++/**
++ * clk_unprepare - undo preparation of a clock source
++ * @clk: the clk being unprepared
++ *
++ * clk_unprepare may sleep, which differentiates it from clk_disable. In a
++ * simple case, clk_unprepare can be used instead of clk_disable to gate a clk
++ * if the operation may sleep. One example is a clk which is accessed over
++ * I2c. In the complex case a clk gate operation may require a fast and a slow
++ * part. It is this reason that clk_unprepare and clk_disable are not mutually
++ * exclusive. In fact clk_disable must be called before clk_unprepare.
++ */
++void clk_unprepare(struct clk *clk)
++{
++ clk_prepare_lock();
++ __clk_unprepare(clk);
++ clk_prepare_unlock();
++}
++EXPORT_SYMBOL_GPL(clk_unprepare);
++
++int __clk_prepare(struct clk *clk)
++{
++ int ret = 0;
++
++ if (!clk)
++ return 0;
++
++ if (clk->prepare_count == 0) {
++ ret = __clk_prepare(clk->parent);
++ if (ret)
++ return ret;
++
++ if (clk->ops->prepare) {
++ ret = clk->ops->prepare(clk->hw);
++ if (ret) {
++ __clk_unprepare(clk->parent);
++ return ret;
++ }
++ }
++ }
++
++ clk->prepare_count++;
++
++ return 0;
++}
++
++/**
++ * clk_prepare - prepare a clock source
++ * @clk: the clk being prepared
++ *
++ * clk_prepare may sleep, which differentiates it from clk_enable. In a simple
++ * case, clk_prepare can be used instead of clk_enable to ungate a clk if the
++ * operation may sleep. One example is a clk which is accessed over I2c. In
++ * the complex case a clk ungate operation may require a fast and a slow part.
++ * It is this reason that clk_prepare and clk_enable are not mutually
++ * exclusive. In fact clk_prepare must be called before clk_enable.
++ * Returns 0 on success, -EERROR otherwise.
++ */
++int clk_prepare(struct clk *clk)
++{
++ int ret;
++
++ clk_prepare_lock();
++ ret = __clk_prepare(clk);
++ clk_prepare_unlock();
++
++ return ret;
++}
++EXPORT_SYMBOL_GPL(clk_prepare);
++
++static void __clk_disable(struct clk *clk)
++{
++ if (!clk)
++ return;
++
++ if (WARN_ON(IS_ERR(clk)))
++ return;
++
++ if (WARN_ON(clk->enable_count == 0))
++ return;
++
++ if (--clk->enable_count > 0)
++ return;
++
++ if (clk->ops->disable)
++ clk->ops->disable(clk->hw);
++
++ __clk_disable(clk->parent);
++}
++
++/**
++ * clk_disable - gate a clock
++ * @clk: the clk being gated
++ *
++ * clk_disable must not sleep, which differentiates it from clk_unprepare. In
++ * a simple case, clk_disable can be used instead of clk_unprepare to gate a
++ * clk if the operation is fast and will never sleep. One example is a
++ * SoC-internal clk which is controlled via simple register writes. In the
++ * complex case a clk gate operation may require a fast and a slow part. It is
++ * this reason that clk_unprepare and clk_disable are not mutually exclusive.
++ * In fact clk_disable must be called before clk_unprepare.
++ */
++void clk_disable(struct clk *clk)
++{
++ unsigned long flags;
++
++ flags = clk_enable_lock();
++ __clk_disable(clk);
++ clk_enable_unlock(flags);
++}
++EXPORT_SYMBOL_GPL(clk_disable);
++
++static int __clk_enable(struct clk *clk)
++{
++ int ret = 0;
++
++ if (!clk)
++ return 0;
++
++ if (WARN_ON(clk->prepare_count == 0))
++ return -ESHUTDOWN;
++
++ if (clk->enable_count == 0) {
++ ret = __clk_enable(clk->parent);
++
++ if (ret)
++ return ret;
++
++ if (clk->ops->enable) {
++ ret = clk->ops->enable(clk->hw);
++ if (ret) {
++ __clk_disable(clk->parent);
++ return ret;
++ }
++ }
++ }
++
++ clk->enable_count++;
++ return 0;
++}
++
++/**
++ * clk_enable - ungate a clock
++ * @clk: the clk being ungated
++ *
++ * clk_enable must not sleep, which differentiates it from clk_prepare. In a
++ * simple case, clk_enable can be used instead of clk_prepare to ungate a clk
++ * if the operation will never sleep. One example is a SoC-internal clk which
++ * is controlled via simple register writes. In the complex case a clk ungate
++ * operation may require a fast and a slow part. It is this reason that
++ * clk_enable and clk_prepare are not mutually exclusive. In fact clk_prepare
++ * must be called before clk_enable. Returns 0 on success, -EERROR
++ * otherwise.
++ */
++int clk_enable(struct clk *clk)
++{
++ unsigned long flags;
++ int ret;
++
++ flags = clk_enable_lock();
++ ret = __clk_enable(clk);
++ clk_enable_unlock(flags);
++
++ return ret;
++}
++EXPORT_SYMBOL_GPL(clk_enable);
++
++/**
++ * __clk_round_rate - round the given rate for a clk
++ * @clk: round the rate of this clock
++ * @rate: the rate which is to be rounded
++ *
++ * Caller must hold prepare_lock. Useful for clk_ops such as .set_rate
++ */
++unsigned long __clk_round_rate(struct clk *clk, unsigned long rate)
++{
++ unsigned long parent_rate = 0;
++ struct clk *parent;
++
++ if (!clk)
++ return 0;
++
++ parent = clk->parent;
++ if (parent)
++ parent_rate = parent->rate;
++
++ if (clk->ops->determine_rate)
++ return clk->ops->determine_rate(clk->hw, rate, &parent_rate,
++ &parent);
++ else if (clk->ops->round_rate)
++ return clk->ops->round_rate(clk->hw, rate, &parent_rate);
++ else if (clk->flags & CLK_SET_RATE_PARENT)
++ return __clk_round_rate(clk->parent, rate);
++ else
++ return clk->rate;
++}
++
++/**
++ * clk_round_rate - round the given rate for a clk
++ * @clk: the clk for which we are rounding a rate
++ * @rate: the rate which is to be rounded
++ *
++ * Takes in a rate as input and rounds it to a rate that the clk can actually
++ * use which is then returned. If clk doesn't support round_rate operation
++ * then the parent rate is returned.
++ */
++long clk_round_rate(struct clk *clk, unsigned long rate)
++{
++ unsigned long ret;
++
++ clk_prepare_lock();
++ ret = __clk_round_rate(clk, rate);
++ clk_prepare_unlock();
++
++ return ret;
++}
++EXPORT_SYMBOL_GPL(clk_round_rate);
++
++/**
++ * __clk_notify - call clk notifier chain
++ * @clk: struct clk * that is changing rate
++ * @msg: clk notifier type (see include/linux/clk.h)
++ * @old_rate: old clk rate
++ * @new_rate: new clk rate
++ *
++ * Triggers a notifier call chain on the clk rate-change notification
++ * for 'clk'. Passes a pointer to the struct clk and the previous
++ * and current rates to the notifier callback. Intended to be called by
++ * internal clock code only. Returns NOTIFY_DONE from the last driver
++ * called if all went well, or NOTIFY_STOP or NOTIFY_BAD immediately if
++ * a driver returns that.
++ */
++static int __clk_notify(struct clk *clk, unsigned long msg,
++ unsigned long old_rate, unsigned long new_rate)
++{
++ struct clk_notifier *cn;
++ struct clk_notifier_data cnd;
++ int ret = NOTIFY_DONE;
++
++ cnd.clk = clk;
++ cnd.old_rate = old_rate;
++ cnd.new_rate = new_rate;
++
++ list_for_each_entry(cn, &clk_notifier_list, node) {
++ if (cn->clk == clk) {
++ ret = srcu_notifier_call_chain(&cn->notifier_head, msg,
++ &cnd);
++ break;
++ }
++ }
++
++ return ret;
++}
++
++/**
++ * __clk_recalc_accuracies
++ * @clk: first clk in the subtree
++ *
++ * Walks the subtree of clks starting with clk and recalculates accuracies as
++ * it goes. Note that if a clk does not implement the .recalc_accuracy
++ * callback then it is assumed that the clock will take on the accuracy of it's
++ * parent.
++ *
++ * Caller must hold prepare_lock.
++ */
++static void __clk_recalc_accuracies(struct clk *clk)
++{
++ unsigned long parent_accuracy = 0;
++ struct clk *child;
++
++ if (clk->parent)
++ parent_accuracy = clk->parent->accuracy;
++
++ if (clk->ops->recalc_accuracy)
++ clk->accuracy = clk->ops->recalc_accuracy(clk->hw,
++ parent_accuracy);
++ else
++ clk->accuracy = parent_accuracy;
++
++ hlist_for_each_entry(child, &clk->children, child_node)
++ __clk_recalc_accuracies(child);
++}
++
++/**
++ * clk_get_accuracy - return the accuracy of clk
++ * @clk: the clk whose accuracy is being returned
++ *
++ * Simply returns the cached accuracy of the clk, unless
++ * CLK_GET_ACCURACY_NOCACHE flag is set, which means a recalc_rate will be
++ * issued.
++ * If clk is NULL then returns 0.
++ */
++long clk_get_accuracy(struct clk *clk)
++{
++ unsigned long accuracy;
++
++ clk_prepare_lock();
++ if (clk && (clk->flags & CLK_GET_ACCURACY_NOCACHE))
++ __clk_recalc_accuracies(clk);
++
++ accuracy = __clk_get_accuracy(clk);
++ clk_prepare_unlock();
++
++ return accuracy;
++}
++EXPORT_SYMBOL_GPL(clk_get_accuracy);
++
++/**
++ * __clk_recalc_rates
++ * @clk: first clk in the subtree
++ * @msg: notification type (see include/linux/clk.h)
++ *
++ * Walks the subtree of clks starting with clk and recalculates rates as it
++ * goes. Note that if a clk does not implement the .recalc_rate callback then
++ * it is assumed that the clock will take on the rate of its parent.
++ *
++ * clk_recalc_rates also propagates the POST_RATE_CHANGE notification,
++ * if necessary.
++ *
++ * Caller must hold prepare_lock.
++ */
++static void __clk_recalc_rates(struct clk *clk, unsigned long msg)
++{
++ unsigned long old_rate;
++ unsigned long parent_rate = 0;
++ struct clk *child;
++
++ old_rate = clk->rate;
++
++ if (clk->parent)
++ parent_rate = clk->parent->rate;
++
++ if (clk->ops->recalc_rate)
++ clk->rate = clk->ops->recalc_rate(clk->hw, parent_rate);
++ else
++ clk->rate = parent_rate;
++
++ /*
++ * ignore NOTIFY_STOP and NOTIFY_BAD return values for POST_RATE_CHANGE
++ * & ABORT_RATE_CHANGE notifiers
++ */
++ if (clk->notifier_count && msg)
++ __clk_notify(clk, msg, old_rate, clk->rate);
++
++ hlist_for_each_entry(child, &clk->children, child_node)
++ __clk_recalc_rates(child, msg);
++}
++
++/**
++ * clk_get_rate - return the rate of clk
++ * @clk: the clk whose rate is being returned
++ *
++ * Simply returns the cached rate of the clk, unless CLK_GET_RATE_NOCACHE flag
++ * is set, which means a recalc_rate will be issued.
++ * If clk is NULL then returns 0.
++ */
++unsigned long clk_get_rate(struct clk *clk)
++{
++ unsigned long rate;
++
++ clk_prepare_lock();
++
++ if (clk && (clk->flags & CLK_GET_RATE_NOCACHE))
++ __clk_recalc_rates(clk, 0);
++
++ rate = __clk_get_rate(clk);
++ clk_prepare_unlock();
++
++ return rate;
++}
++EXPORT_SYMBOL_GPL(clk_get_rate);
++
++static int clk_fetch_parent_index(struct clk *clk, struct clk *parent)
++{
++ int i;
++
++ if (!clk->parents) {
++ clk->parents = kcalloc(clk->num_parents,
++ sizeof(struct clk *), GFP_KERNEL);
++ if (!clk->parents)
++ return -ENOMEM;
++ }
++
++ /*
++ * find index of new parent clock using cached parent ptrs,
++ * or if not yet cached, use string name comparison and cache
++ * them now to avoid future calls to __clk_lookup.
++ */
++ for (i = 0; i < clk->num_parents; i++) {
++ if (clk->parents[i] == parent)
++ return i;
++
++ if (clk->parents[i])
++ continue;
++
++ if (!strcmp(clk->parent_names[i], parent->name)) {
++ clk->parents[i] = __clk_lookup(parent->name);
++ return i;
++ }
++ }
++
++ return -EINVAL;
++}
++
++static void clk_reparent(struct clk *clk, struct clk *new_parent)
++{
++ hlist_del(&clk->child_node);
++
++ if (new_parent) {
++ /* avoid duplicate POST_RATE_CHANGE notifications */
++ if (new_parent->new_child == clk)
++ new_parent->new_child = NULL;
++
++ hlist_add_head(&clk->child_node, &new_parent->children);
++ } else {
++ hlist_add_head(&clk->child_node, &clk_orphan_list);
++ }
++
++ clk->parent = new_parent;
++}
++
++static struct clk *__clk_set_parent_before(struct clk *clk, struct clk *parent)
++{
++ unsigned long flags;
++ struct clk *old_parent = clk->parent;
++
++ /*
++ * Migrate prepare state between parents and prevent race with
++ * clk_enable().
++ *
++ * If the clock is not prepared, then a race with
++ * clk_enable/disable() is impossible since we already have the
++ * prepare lock (future calls to clk_enable() need to be preceded by
++ * a clk_prepare()).
++ *
++ * If the clock is prepared, migrate the prepared state to the new
++ * parent and also protect against a race with clk_enable() by
++ * forcing the clock and the new parent on. This ensures that all
++ * future calls to clk_enable() are practically NOPs with respect to
++ * hardware and software states.
++ *
++ * See also: Comment for clk_set_parent() below.
++ */
++ if (clk->prepare_count) {
++ __clk_prepare(parent);
++ clk_enable(parent);
++ clk_enable(clk);
++ }
++
++ /* update the clk tree topology */
++ flags = clk_enable_lock();
++ clk_reparent(clk, parent);
++ clk_enable_unlock(flags);
++
++ return old_parent;
++}
++
++static void __clk_set_parent_after(struct clk *clk, struct clk *parent,
++ struct clk *old_parent)
++{
++ /*
++ * Finish the migration of prepare state and undo the changes done
++ * for preventing a race with clk_enable().
++ */
++ if (clk->prepare_count) {
++ clk_disable(clk);
++ clk_disable(old_parent);
++ __clk_unprepare(old_parent);
++ }
++
++ /* update debugfs with new clk tree topology */
++ clk_debug_reparent(clk, parent);
++}
++
++static int __clk_set_parent(struct clk *clk, struct clk *parent, u8 p_index)
++{
++ unsigned long flags;
++ int ret = 0;
++ struct clk *old_parent;
++
++ old_parent = __clk_set_parent_before(clk, parent);
++
++ /* change clock input source */
++ if (parent && clk->ops->set_parent)
++ ret = clk->ops->set_parent(clk->hw, p_index);
++
++ if (ret) {
++ flags = clk_enable_lock();
++ clk_reparent(clk, old_parent);
++ clk_enable_unlock(flags);
++
++ if (clk->prepare_count) {
++ clk_disable(clk);
++ clk_disable(parent);
++ __clk_unprepare(parent);
++ }
++ return ret;
++ }
++
++ __clk_set_parent_after(clk, parent, old_parent);
++
++ return 0;
++}
++
++/**
++ * __clk_speculate_rates
++ * @clk: first clk in the subtree
++ * @parent_rate: the "future" rate of clk's parent
++ *
++ * Walks the subtree of clks starting with clk, speculating rates as it
++ * goes and firing off PRE_RATE_CHANGE notifications as necessary.
++ *
++ * Unlike clk_recalc_rates, clk_speculate_rates exists only for sending
++ * pre-rate change notifications and returns early if no clks in the
++ * subtree have subscribed to the notifications. Note that if a clk does not
++ * implement the .recalc_rate callback then it is assumed that the clock will
++ * take on the rate of its parent.
++ *
++ * Caller must hold prepare_lock.
++ */
++static int __clk_speculate_rates(struct clk *clk, unsigned long parent_rate)
++{
++ struct clk *child;
++ unsigned long new_rate;
++ int ret = NOTIFY_DONE;
++
++ if (clk->ops->recalc_rate)
++ new_rate = clk->ops->recalc_rate(clk->hw, parent_rate);
++ else
++ new_rate = parent_rate;
++
++ /* abort rate change if a driver returns NOTIFY_BAD or NOTIFY_STOP */
++ if (clk->notifier_count)
++ ret = __clk_notify(clk, PRE_RATE_CHANGE, clk->rate, new_rate);
++
++ if (ret & NOTIFY_STOP_MASK)
++ goto out;
++
++ hlist_for_each_entry(child, &clk->children, child_node) {
++ ret = __clk_speculate_rates(child, new_rate);
++ if (ret & NOTIFY_STOP_MASK)
++ break;
++ }
++
++out:
++ return ret;
++}
++
++static void clk_calc_subtree(struct clk *clk, unsigned long new_rate,
++ struct clk *new_parent, u8 p_index)
++{
++ struct clk *child;
++
++ clk->new_rate = new_rate;
++ clk->new_parent = new_parent;
++ clk->new_parent_index = p_index;
++ /* include clk in new parent's PRE_RATE_CHANGE notifications */
++ clk->new_child = NULL;
++ if (new_parent && new_parent != clk->parent)
++ new_parent->new_child = clk;
++
++ hlist_for_each_entry(child, &clk->children, child_node) {
++ if (child->ops->recalc_rate)
++ child->new_rate = child->ops->recalc_rate(child->hw, new_rate);
++ else
++ child->new_rate = new_rate;
++ clk_calc_subtree(child, child->new_rate, NULL, 0);
++ }
++}
++
++/*
++ * calculate the new rates returning the topmost clock that has to be
++ * changed.
++ */
++static struct clk *clk_calc_new_rates(struct clk *clk, unsigned long rate)
++{
++ struct clk *top = clk;
++ struct clk *old_parent, *parent;
++ unsigned long best_parent_rate = 0;
++ unsigned long new_rate;
++ int p_index = 0;
++
++ /* sanity */
++ if (IS_ERR_OR_NULL(clk))
++ return NULL;
++
++ /* save parent rate, if it exists */
++ parent = old_parent = clk->parent;
++ if (parent)
++ best_parent_rate = parent->rate;
++
++ /* find the closest rate and parent clk/rate */
++ if (clk->ops->determine_rate) {
++ new_rate = clk->ops->determine_rate(clk->hw, rate,
++ &best_parent_rate,
++ &parent);
++ } else if (clk->ops->round_rate) {
++ new_rate = clk->ops->round_rate(clk->hw, rate,
++ &best_parent_rate);
++ } else if (!parent || !(clk->flags & CLK_SET_RATE_PARENT)) {
++ /* pass-through clock without adjustable parent */
++ clk->new_rate = clk->rate;
++ return NULL;
++ } else {
++ /* pass-through clock with adjustable parent */
++ top = clk_calc_new_rates(parent, rate);
++ new_rate = parent->new_rate;
++ goto out;
++ }
++
++ /* some clocks must be gated to change parent */
++ if (parent != old_parent &&
++ (clk->flags & CLK_SET_PARENT_GATE) && clk->prepare_count) {
++ pr_debug("%s: %s not gated but wants to reparent\n",
++ __func__, clk->name);
++ return NULL;
++ }
++
++ /* try finding the new parent index */
++ if (parent) {
++ p_index = clk_fetch_parent_index(clk, parent);
++ if (p_index < 0) {
++ pr_debug("%s: clk %s can not be parent of clk %s\n",
++ __func__, parent->name, clk->name);
++ return NULL;
++ }
++ }
++
++ if ((clk->flags & CLK_SET_RATE_PARENT) && parent &&
++ best_parent_rate != parent->rate)
++ top = clk_calc_new_rates(parent, best_parent_rate);
++
++out:
++ clk_calc_subtree(clk, new_rate, parent, p_index);
++
++ return top;
++}
++
++/*
++ * Notify about rate changes in a subtree. Always walk down the whole tree
++ * so that in case of an error we can walk down the whole tree again and
++ * abort the change.
++ */
++static struct clk *clk_propagate_rate_change(struct clk *clk, unsigned long event)
++{
++ struct clk *child, *tmp_clk, *fail_clk = NULL;
++ int ret = NOTIFY_DONE;
++
++ if (clk->rate == clk->new_rate)
++ return NULL;
++
++ if (clk->notifier_count) {
++ ret = __clk_notify(clk, event, clk->rate, clk->new_rate);
++ if (ret & NOTIFY_STOP_MASK)
++ fail_clk = clk;
++ }
++
++ hlist_for_each_entry(child, &clk->children, child_node) {
++ /* Skip children who will be reparented to another clock */
++ if (child->new_parent && child->new_parent != clk)
++ continue;
++ tmp_clk = clk_propagate_rate_change(child, event);
++ if (tmp_clk)
++ fail_clk = tmp_clk;
++ }
++
++ /* handle the new child who might not be in clk->children yet */
++ if (clk->new_child) {
++ tmp_clk = clk_propagate_rate_change(clk->new_child, event);
++ if (tmp_clk)
++ fail_clk = tmp_clk;
++ }
++
++ return fail_clk;
++}
++
++/*
++ * walk down a subtree and set the new rates notifying the rate
++ * change on the way
++ */
++static void clk_change_rate(struct clk *clk)
++{
++ struct clk *child;
++ struct hlist_node *tmp;
++ unsigned long old_rate;
++ unsigned long best_parent_rate = 0;
++ bool skip_set_rate = false;
++ struct clk *old_parent;
++
++ old_rate = clk->rate;
++
++ if (clk->new_parent)
++ best_parent_rate = clk->new_parent->rate;
++ else if (clk->parent)
++ best_parent_rate = clk->parent->rate;
++
++ if (clk->new_parent && clk->new_parent != clk->parent) {
++ old_parent = __clk_set_parent_before(clk, clk->new_parent);
++
++ if (clk->ops->set_rate_and_parent) {
++ skip_set_rate = true;
++ clk->ops->set_rate_and_parent(clk->hw, clk->new_rate,
++ best_parent_rate,
++ clk->new_parent_index);
++ } else if (clk->ops->set_parent) {
++ clk->ops->set_parent(clk->hw, clk->new_parent_index);
++ }
++
++ __clk_set_parent_after(clk, clk->new_parent, old_parent);
++ }
++
++ if (!skip_set_rate && clk->ops->set_rate)
++ clk->ops->set_rate(clk->hw, clk->new_rate, best_parent_rate);
++
++ if (clk->ops->recalc_rate)
++ clk->rate = clk->ops->recalc_rate(clk->hw, best_parent_rate);
++ else
++ clk->rate = best_parent_rate;
++
++ if (clk->notifier_count && old_rate != clk->rate)
++ __clk_notify(clk, POST_RATE_CHANGE, old_rate, clk->rate);
++
++ /*
++ * Use safe iteration, as change_rate can actually swap parents
++ * for certain clock types.
++ */
++ hlist_for_each_entry_safe(child, tmp, &clk->children, child_node) {
++ /* Skip children who will be reparented to another clock */
++ if (child->new_parent && child->new_parent != clk)
++ continue;
++ clk_change_rate(child);
++ }
++
++ /* handle the new child who might not be in clk->children yet */
++ if (clk->new_child)
++ clk_change_rate(clk->new_child);
++}
++
++/**
++ * clk_set_rate - specify a new rate for clk
++ * @clk: the clk whose rate is being changed
++ * @rate: the new rate for clk
++ *
++ * In the simplest case clk_set_rate will only adjust the rate of clk.
++ *
++ * Setting the CLK_SET_RATE_PARENT flag allows the rate change operation to
++ * propagate up to clk's parent; whether or not this happens depends on the
++ * outcome of clk's .round_rate implementation. If *parent_rate is unchanged
++ * after calling .round_rate then upstream parent propagation is ignored. If
++ * *parent_rate comes back with a new rate for clk's parent then we propagate
++ * up to clk's parent and set its rate. Upward propagation will continue
++ * until either a clk does not support the CLK_SET_RATE_PARENT flag or
++ * .round_rate stops requesting changes to clk's parent_rate.
++ *
++ * Rate changes are accomplished via tree traversal that also recalculates the
++ * rates for the clocks and fires off POST_RATE_CHANGE notifiers.
++ *
++ * Returns 0 on success, -EERROR otherwise.
++ */
++int clk_set_rate(struct clk *clk, unsigned long rate)
++{
++ struct clk *top, *fail_clk;
++ int ret = 0;
++
++ if (!clk)
++ return 0;
++
++ /* prevent racing with updates to the clock topology */
++ clk_prepare_lock();
++
++ /* bail early if nothing to do */
++ if (rate == clk_get_rate(clk))
++ goto out;
++
++ if ((clk->flags & CLK_SET_RATE_GATE) && clk->prepare_count) {
++ ret = -EBUSY;
++ goto out;
++ }
++
++ /* calculate new rates and get the topmost changed clock */
++ top = clk_calc_new_rates(clk, rate);
++ if (!top) {
++ ret = -EINVAL;
++ goto out;
++ }
++
++ /* notify that we are about to change rates */
++ fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE);
++ if (fail_clk) {
++ pr_warn("%s: failed to set %s rate\n", __func__,
++ fail_clk->name);
++ clk_propagate_rate_change(top, ABORT_RATE_CHANGE);
++ ret = -EBUSY;
++ goto out;
++ }
++
++ /* change the rates */
++ clk_change_rate(top);
++
++out:
++ clk_prepare_unlock();
++
++ return ret;
++}
++EXPORT_SYMBOL_GPL(clk_set_rate);
++
++/**
++ * clk_get_parent - return the parent of a clk
++ * @clk: the clk whose parent gets returned
++ *
++ * Simply returns clk->parent. Returns NULL if clk is NULL.
++ */
++struct clk *clk_get_parent(struct clk *clk)
++{
++ struct clk *parent;
++
++ clk_prepare_lock();
++ parent = __clk_get_parent(clk);
++ clk_prepare_unlock();
++
++ return parent;
++}
++EXPORT_SYMBOL_GPL(clk_get_parent);
++
++/*
++ * .get_parent is mandatory for clocks with multiple possible parents. It is
++ * optional for single-parent clocks. Always call .get_parent if it is
++ * available and WARN if it is missing for multi-parent clocks.
++ *
++ * For single-parent clocks without .get_parent, first check to see if the
++ * .parents array exists, and if so use it to avoid an expensive tree
++ * traversal. If .parents does not exist then walk the tree with __clk_lookup.
++ */
++static struct clk *__clk_init_parent(struct clk *clk)
++{
++ struct clk *ret = NULL;
++ u8 index;
++
++ /* handle the trivial cases */
++
++ if (!clk->num_parents)
++ goto out;
++
++ if (clk->num_parents == 1) {
++ if (IS_ERR_OR_NULL(clk->parent))
++ ret = clk->parent = __clk_lookup(clk->parent_names[0]);
++ ret = clk->parent;
++ goto out;
++ }
++
++ if (!clk->ops->get_parent) {
++ WARN(!clk->ops->get_parent,
++ "%s: multi-parent clocks must implement .get_parent\n",
++ __func__);
++ goto out;
++ };
++
++ /*
++ * Do our best to cache parent clocks in clk->parents. This prevents
++ * unnecessary and expensive calls to __clk_lookup. We don't set
++ * clk->parent here; that is done by the calling function
++ */
++
++ index = clk->ops->get_parent(clk->hw);
++
++ if (!clk->parents)
++ clk->parents =
++ kcalloc(clk->num_parents, sizeof(struct clk *),
++ GFP_KERNEL);
++
++ ret = clk_get_parent_by_index(clk, index);
++
++out:
++ return ret;
++}
++
++void __clk_reparent(struct clk *clk, struct clk *new_parent)
++{
++ clk_reparent(clk, new_parent);
++ clk_debug_reparent(clk, new_parent);
++ __clk_recalc_accuracies(clk);
++ __clk_recalc_rates(clk, POST_RATE_CHANGE);
++}
++
++/**
++ * clk_set_parent - switch the parent of a mux clk
++ * @clk: the mux clk whose input we are switching
++ * @parent: the new input to clk
++ *
++ * Re-parent clk to use parent as its new input source. If clk is in
++ * prepared state, the clk will get enabled for the duration of this call. If
++ * that's not acceptable for a specific clk (Eg: the consumer can't handle
++ * that, the reparenting is glitchy in hardware, etc), use the
++ * CLK_SET_PARENT_GATE flag to allow reparenting only when clk is unprepared.
++ *
++ * After successfully changing clk's parent clk_set_parent will update the
++ * clk topology, sysfs topology and propagate rate recalculation via
++ * __clk_recalc_rates.
++ *
++ * Returns 0 on success, -EERROR otherwise.
++ */
++int clk_set_parent(struct clk *clk, struct clk *parent)
++{
++ struct clk *child;
++ int ret = 0;
++ int p_index = 0;
++ unsigned long p_rate = 0;
++
++ if (!clk)
++ return 0;
++
++ if (!clk->ops)
++ return -EINVAL;
++
++ /* verify ops for for multi-parent clks */
++ if ((clk->num_parents > 1) && (!clk->ops->set_parent))
++ return -ENOSYS;
++
++ /* prevent racing with updates to the clock topology */
++ clk_prepare_lock();
++
++ if (clk->parent == parent)
++ goto out;
++
++ /* check that we are allowed to re-parent if the clock is in use */
++ if ((clk->flags & CLK_SET_PARENT_GATE) && clk->prepare_count) {
++ ret = -EBUSY;
++ goto out;
++ }
++
++ /* check two consecutive basic mux clocks */
++ if (clk->flags & CLK_IS_BASIC_MUX) {
++ hlist_for_each_entry(child, &clk->children, child_node) {
++ if (child->flags & CLK_IS_BASIC_MUX) {
++ pr_err("%s: failed to switch parent of %s due to child mux %s\n",
++ __func__, clk->name, child->name);
++ ret = -EBUSY;
++ goto out;
++ }
++ }
++ }
++
++ /* try finding the new parent index */
++ if (parent) {
++ p_index = clk_fetch_parent_index(clk, parent);
++ p_rate = parent->rate;
++ if (p_index < 0) {
++ pr_debug("%s: clk %s can not be parent of clk %s\n",
++ __func__, parent->name, clk->name);
++ ret = p_index;
++ goto out;
++ }
++ }
++
++ /* propagate PRE_RATE_CHANGE notifications */
++ ret = __clk_speculate_rates(clk, p_rate);
++
++ /* abort if a driver objects */
++ if (ret & NOTIFY_STOP_MASK)
++ goto out;
++
++ /* do the re-parent */
++ ret = __clk_set_parent(clk, parent, p_index);
++
++ /* propagate rate an accuracy recalculation accordingly */
++ if (ret) {
++ __clk_recalc_rates(clk, ABORT_RATE_CHANGE);
++ } else {
++ __clk_recalc_rates(clk, POST_RATE_CHANGE);
++ __clk_recalc_accuracies(clk);
++ }
++
++out:
++ clk_prepare_unlock();
++
++ return ret;
++}
++EXPORT_SYMBOL_GPL(clk_set_parent);
++
++/**
++ * __clk_init - initialize the data structures in a struct clk
++ * @dev: device initializing this clk, placeholder for now
++ * @clk: clk being initialized
++ *
++ * Initializes the lists in struct clk, queries the hardware for the
++ * parent and rate and sets them both.
++ */
++int __clk_init(struct device *dev, struct clk *clk)
++{
++ int i, ret = 0;
++ struct clk *orphan;
++ struct hlist_node *tmp2;
++
++ if (!clk)
++ return -EINVAL;
++
++ clk_prepare_lock();
++
++ /* check to see if a clock with this name is already registered */
++ if (__clk_lookup(clk->name)) {
++ pr_debug("%s: clk %s already initialized\n",
++ __func__, clk->name);
++ ret = -EEXIST;
++ goto out;
++ }
++
++ /* check that clk_ops are sane. See Documentation/clk.txt */
++ if (clk->ops->set_rate &&
++ !((clk->ops->round_rate || clk->ops->determine_rate) &&
++ clk->ops->recalc_rate)) {
++ pr_warning("%s: %s must implement .round_rate or .determine_rate in addition to .recalc_rate\n",
++ __func__, clk->name);
++ ret = -EINVAL;
++ goto out;
++ }
++
++ if (clk->ops->set_parent && !clk->ops->get_parent) {
++ pr_warning("%s: %s must implement .get_parent & .set_parent\n",
++ __func__, clk->name);
++ ret = -EINVAL;
++ goto out;
++ }
++
++ if (clk->ops->set_rate_and_parent &&
++ !(clk->ops->set_parent && clk->ops->set_rate)) {
++ pr_warn("%s: %s must implement .set_parent & .set_rate\n",
++ __func__, clk->name);
++ ret = -EINVAL;
++ goto out;
++ }
++
++ /* throw a WARN if any entries in parent_names are NULL */
++ for (i = 0; i < clk->num_parents; i++)
++ WARN(!clk->parent_names[i],
++ "%s: invalid NULL in %s's .parent_names\n",
++ __func__, clk->name);
++
++ /*
++ * Allocate an array of struct clk *'s to avoid unnecessary string
++ * look-ups of clk's possible parents. This can fail for clocks passed
++ * in to clk_init during early boot; thus any access to clk->parents[]
++ * must always check for a NULL pointer and try to populate it if
++ * necessary.
++ *
++ * If clk->parents is not NULL we skip this entire block. This allows
++ * for clock drivers to statically initialize clk->parents.
++ */
++ if (clk->num_parents > 1 && !clk->parents) {
++ clk->parents = kcalloc(clk->num_parents, sizeof(struct clk *),
++ GFP_KERNEL);
++ /*
++ * __clk_lookup returns NULL for parents that have not been
++ * clk_init'd; thus any access to clk->parents[] must check
++ * for a NULL pointer. We can always perform lazy lookups for
++ * missing parents later on.
++ */
++ if (clk->parents)
++ for (i = 0; i < clk->num_parents; i++)
++ clk->parents[i] =
++ __clk_lookup(clk->parent_names[i]);
++ }
++
++ clk->parent = __clk_init_parent(clk);
++
++ /*
++ * Populate clk->parent if parent has already been __clk_init'd. If
++ * parent has not yet been __clk_init'd then place clk in the orphan
++ * list. If clk has set the CLK_IS_ROOT flag then place it in the root
++ * clk list.
++ *
++ * Every time a new clk is clk_init'd then we walk the list of orphan
++ * clocks and re-parent any that are children of the clock currently
++ * being clk_init'd.
++ */
++ if (clk->parent)
++ hlist_add_head(&clk->child_node,
++ &clk->parent->children);
++ else if (clk->flags & CLK_IS_ROOT)
++ hlist_add_head(&clk->child_node, &clk_root_list);
++ else
++ hlist_add_head(&clk->child_node, &clk_orphan_list);
++
++ /*
++ * Set clk's accuracy. The preferred method is to use
++ * .recalc_accuracy. For simple clocks and lazy developers the default
++ * fallback is to use the parent's accuracy. If a clock doesn't have a
++ * parent (or is orphaned) then accuracy is set to zero (perfect
++ * clock).
++ */
++ if (clk->ops->recalc_accuracy)
++ clk->accuracy = clk->ops->recalc_accuracy(clk->hw,
++ __clk_get_accuracy(clk->parent));
++ else if (clk->parent)
++ clk->accuracy = clk->parent->accuracy;
++ else
++ clk->accuracy = 0;
++
++ /*
++ * Set clk's rate. The preferred method is to use .recalc_rate. For
++ * simple clocks and lazy developers the default fallback is to use the
++ * parent's rate. If a clock doesn't have a parent (or is orphaned)
++ * then rate is set to zero.
++ */
++ if (clk->ops->recalc_rate)
++ clk->rate = clk->ops->recalc_rate(clk->hw,
++ __clk_get_rate(clk->parent));
++ else if (clk->parent)
++ clk->rate = clk->parent->rate;
++ else
++ clk->rate = 0;
++
++ clk_debug_register(clk);
++ /*
++ * walk the list of orphan clocks and reparent any that are children of
++ * this clock
++ */
++ hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) {
++ if (orphan->num_parents && orphan->ops->get_parent) {
++ i = orphan->ops->get_parent(orphan->hw);
++ if (!strcmp(clk->name, orphan->parent_names[i]))
++ __clk_reparent(orphan, clk);
++ continue;
++ }
++
++ for (i = 0; i < orphan->num_parents; i++)
++ if (!strcmp(clk->name, orphan->parent_names[i])) {
++ __clk_reparent(orphan, clk);
++ break;
++ }
++ }
++
++ /*
++ * optional platform-specific magic
++ *
++ * The .init callback is not used by any of the basic clock types, but
++ * exists for weird hardware that must perform initialization magic.
++ * Please consider other ways of solving initialization problems before
++ * using this callback, as its use is discouraged.
++ */
++ if (clk->ops->init)
++ clk->ops->init(clk->hw);
++
++ kref_init(&clk->ref);
++out:
++ clk_prepare_unlock();
++
++ return ret;
++}
++
++/**
++ * __clk_register - register a clock and return a cookie.
++ *
++ * Same as clk_register, except that the .clk field inside hw shall point to a
++ * preallocated (generally statically allocated) struct clk. None of the fields
++ * of the struct clk need to be initialized.
++ *
++ * The data pointed to by .init and .clk field shall NOT be marked as init
++ * data.
++ *
++ * __clk_register is only exposed via clk-private.h and is intended for use with
++ * very large numbers of clocks that need to be statically initialized. It is
++ * a layering violation to include clk-private.h from any code which implements
++ * a clock's .ops; as such any statically initialized clock data MUST be in a
++ * separate C file from the logic that implements its operations. Returns 0
++ * on success, otherwise an error code.
++ */
++struct clk *__clk_register(struct device *dev, struct clk_hw *hw)
++{
++ int ret;
++ struct clk *clk;
++
++ clk = hw->clk;
++ clk->name = hw->init->name;
++ clk->ops = hw->init->ops;
++ clk->hw = hw;
++ clk->flags = hw->init->flags;
++ clk->parent_names = hw->init->parent_names;
++ clk->num_parents = hw->init->num_parents;
++ if (dev && dev->driver)
++ clk->owner = dev->driver->owner;
++ else
++ clk->owner = NULL;
++
++ ret = __clk_init(dev, clk);
++ if (ret)
++ return ERR_PTR(ret);
++
++ return clk;
++}
++EXPORT_SYMBOL_GPL(__clk_register);
++
++/**
++ * clk_register - allocate a new clock, register it and return an opaque cookie
++ * @dev: device that is registering this clock
++ * @hw: link to hardware-specific clock data
++ *
++ * clk_register is the primary interface for populating the clock tree with new
++ * clock nodes. It returns a pointer to the newly allocated struct clk which
++ * cannot be dereferenced by driver code but may be used in conjuction with the
++ * rest of the clock API. In the event of an error clk_register will return an
++ * error code; drivers must test for an error code after calling clk_register.
++ */
++struct clk *clk_register(struct device *dev, struct clk_hw *hw)
++{
++ int i, ret;
++ struct clk *clk;
++
++ clk = kzalloc(sizeof(*clk), GFP_KERNEL);
++ if (!clk) {
++ pr_err("%s: could not allocate clk\n", __func__);
++ ret = -ENOMEM;
++ goto fail_out;
++ }
++
++ clk->name = kstrdup(hw->init->name, GFP_KERNEL);
++ if (!clk->name) {
++ pr_err("%s: could not allocate clk->name\n", __func__);
++ ret = -ENOMEM;
++ goto fail_name;
++ }
++ clk->ops = hw->init->ops;
++ if (dev && dev->driver)
++ clk->owner = dev->driver->owner;
++ clk->hw = hw;
++ clk->flags = hw->init->flags;
++ clk->num_parents = hw->init->num_parents;
++ hw->clk = clk;
++
++ /* allocate local copy in case parent_names is __initdata */
++ clk->parent_names = kcalloc(clk->num_parents, sizeof(char *),
++ GFP_KERNEL);
++
++ if (!clk->parent_names) {
++ pr_err("%s: could not allocate clk->parent_names\n", __func__);
++ ret = -ENOMEM;
++ goto fail_parent_names;
++ }
++
++
++ /* copy each string name in case parent_names is __initdata */
++ for (i = 0; i < clk->num_parents; i++) {
++ clk->parent_names[i] = kstrdup(hw->init->parent_names[i],
++ GFP_KERNEL);
++ if (!clk->parent_names[i]) {
++ pr_err("%s: could not copy parent_names\n", __func__);
++ ret = -ENOMEM;
++ goto fail_parent_names_copy;
++ }
++ }
++
++ ret = __clk_init(dev, clk);
++ if (!ret)
++ return clk;
++
++fail_parent_names_copy:
++ while (--i >= 0)
++ kfree(clk->parent_names[i]);
++ kfree(clk->parent_names);
++fail_parent_names:
++ kfree(clk->name);
++fail_name:
++ kfree(clk);
++fail_out:
++ return ERR_PTR(ret);
++}
++EXPORT_SYMBOL_GPL(clk_register);
++
++/*
++ * Free memory allocated for a clock.
++ * Caller must hold prepare_lock.
++ */
++static void __clk_release(struct kref *ref)
++{
++ struct clk *clk = container_of(ref, struct clk, ref);
++ int i = clk->num_parents;
++
++ kfree(clk->parents);
++ while (--i >= 0)
++ kfree(clk->parent_names[i]);
++
++ kfree(clk->parent_names);
++ kfree(clk->name);
++ kfree(clk);
++}
++
++/*
++ * Empty clk_ops for unregistered clocks. These are used temporarily
++ * after clk_unregister() was called on a clock and until last clock
++ * consumer calls clk_put() and the struct clk object is freed.
++ */
++static int clk_nodrv_prepare_enable(struct clk_hw *hw)
++{
++ return -ENXIO;
++}
++
++static void clk_nodrv_disable_unprepare(struct clk_hw *hw)
++{
++ WARN_ON_ONCE(1);
++}
++
++static int clk_nodrv_set_rate(struct clk_hw *hw, unsigned long rate,
++ unsigned long parent_rate)
++{
++ return -ENXIO;
++}
++
++static int clk_nodrv_set_parent(struct clk_hw *hw, u8 index)
++{
++ return -ENXIO;
++}
++
++static const struct clk_ops clk_nodrv_ops = {
++ .enable = clk_nodrv_prepare_enable,
++ .disable = clk_nodrv_disable_unprepare,
++ .prepare = clk_nodrv_prepare_enable,
++ .unprepare = clk_nodrv_disable_unprepare,
++ .set_rate = clk_nodrv_set_rate,
++ .set_parent = clk_nodrv_set_parent,
++};
++
++/**
++ * clk_unregister - unregister a currently registered clock
++ * @clk: clock to unregister
++ */
++void clk_unregister(struct clk *clk)
++{
++ unsigned long flags;
++
++ if (!clk || WARN_ON_ONCE(IS_ERR(clk)))
++ return;
++
++ clk_prepare_lock();
++
++ if (clk->ops == &clk_nodrv_ops) {
++ pr_err("%s: unregistered clock: %s\n", __func__, clk->name);
++ goto out;
++ }
++ /*
++ * Assign empty clock ops for consumers that might still hold
++ * a reference to this clock.
++ */
++ flags = clk_enable_lock();
++ clk->ops = &clk_nodrv_ops;
++ clk_enable_unlock(flags);
++
++ if (!hlist_empty(&clk->children)) {
++ struct clk *child;
++ struct hlist_node *t;
++
++ /* Reparent all children to the orphan list. */
++ hlist_for_each_entry_safe(child, t, &clk->children, child_node)
++ clk_set_parent(child, NULL);
++ }
++
++ clk_debug_unregister(clk);
++
++ hlist_del_init(&clk->child_node);
++
++ if (clk->prepare_count)
++ pr_warn("%s: unregistering prepared clock: %s\n",
++ __func__, clk->name);
++
++ kref_put(&clk->ref, __clk_release);
++out:
++ clk_prepare_unlock();
++}
++EXPORT_SYMBOL_GPL(clk_unregister);
++
++static void devm_clk_release(struct device *dev, void *res)
++{
++ clk_unregister(*(struct clk **)res);
++}
++
++/**
++ * devm_clk_register - resource managed clk_register()
++ * @dev: device that is registering this clock
++ * @hw: link to hardware-specific clock data
++ *
++ * Managed clk_register(). Clocks returned from this function are
++ * automatically clk_unregister()ed on driver detach. See clk_register() for
++ * more information.
++ */
++struct clk *devm_clk_register(struct device *dev, struct clk_hw *hw)
++{
++ struct clk *clk;
++ struct clk **clkp;
++
++ clkp = devres_alloc(devm_clk_release, sizeof(*clkp), GFP_KERNEL);
++ if (!clkp)
++ return ERR_PTR(-ENOMEM);
++
++ clk = clk_register(dev, hw);
++ if (!IS_ERR(clk)) {
++ *clkp = clk;
++ devres_add(dev, clkp);
++ } else {
++ devres_free(clkp);
++ }
++
++ return clk;
++}
++EXPORT_SYMBOL_GPL(devm_clk_register);
++
++static int devm_clk_match(struct device *dev, void *res, void *data)
++{
++ struct clk *c = res;
++ if (WARN_ON(!c))
++ return 0;
++ return c == data;
++}
++
++/**
++ * devm_clk_unregister - resource managed clk_unregister()
++ * @clk: clock to unregister
++ *
++ * Deallocate a clock allocated with devm_clk_register(). Normally
++ * this function will not need to be called and the resource management
++ * code will ensure that the resource is freed.
++ */
++void devm_clk_unregister(struct device *dev, struct clk *clk)
++{
++ WARN_ON(devres_release(dev, devm_clk_release, devm_clk_match, clk));
++}
++EXPORT_SYMBOL_GPL(devm_clk_unregister);
++
++/*
++ * clkdev helpers
++ */
++int __clk_get(struct clk *clk)
++{
++ if (clk) {
++ if (!try_module_get(clk->owner))
++ return 0;
++
++ kref_get(&clk->ref);
++ }
++ return 1;
++}
++
++void __clk_put(struct clk *clk)
++{
++ if (!clk || WARN_ON_ONCE(IS_ERR(clk)))
++ return;
++
++ clk_prepare_lock();
++ kref_put(&clk->ref, __clk_release);
++ clk_prepare_unlock();
++
++ module_put(clk->owner);
++}
++
++/*** clk rate change notifiers ***/
++
++/**
++ * clk_notifier_register - add a clk rate change notifier
++ * @clk: struct clk * to watch
++ * @nb: struct notifier_block * with callback info
++ *
++ * Request notification when clk's rate changes. This uses an SRCU
++ * notifier because we want it to block and notifier unregistrations are
++ * uncommon. The callbacks associated with the notifier must not
++ * re-enter into the clk framework by calling any top-level clk APIs;
++ * this will cause a nested prepare_lock mutex.
++ *
++ * Pre-change notifier callbacks will be passed the current, pre-change
++ * rate of the clk via struct clk_notifier_data.old_rate. The new,
++ * post-change rate of the clk is passed via struct
++ * clk_notifier_data.new_rate.
++ *
++ * Post-change notifiers will pass the now-current, post-change rate of
++ * the clk in both struct clk_notifier_data.old_rate and struct
++ * clk_notifier_data.new_rate.
++ *
++ * Abort-change notifiers are effectively the opposite of pre-change
++ * notifiers: the original pre-change clk rate is passed in via struct
++ * clk_notifier_data.new_rate and the failed post-change rate is passed
++ * in via struct clk_notifier_data.old_rate.
++ *
++ * clk_notifier_register() must be called from non-atomic context.
++ * Returns -EINVAL if called with null arguments, -ENOMEM upon
++ * allocation failure; otherwise, passes along the return value of
++ * srcu_notifier_chain_register().
++ */
++int clk_notifier_register(struct clk *clk, struct notifier_block *nb)
++{
++ struct clk_notifier *cn;
++ int ret = -ENOMEM;
++
++ if (!clk || !nb)
++ return -EINVAL;
++
++ clk_prepare_lock();
++
++ /* search the list of notifiers for this clk */
++ list_for_each_entry(cn, &clk_notifier_list, node)
++ if (cn->clk == clk)
++ break;
++
++ /* if clk wasn't in the notifier list, allocate new clk_notifier */
++ if (cn->clk != clk) {
++ cn = kzalloc(sizeof(struct clk_notifier), GFP_KERNEL);
++ if (!cn)
++ goto out;
++
++ cn->clk = clk;
++ srcu_init_notifier_head(&cn->notifier_head);
++
++ list_add(&cn->node, &clk_notifier_list);
++ }
++
++ ret = srcu_notifier_chain_register(&cn->notifier_head, nb);
++
++ clk->notifier_count++;
++
++out:
++ clk_prepare_unlock();
++
++ return ret;
++}
++EXPORT_SYMBOL_GPL(clk_notifier_register);
++
++/**
++ * clk_notifier_unregister - remove a clk rate change notifier
++ * @clk: struct clk *
++ * @nb: struct notifier_block * with callback info
++ *
++ * Request no further notification for changes to 'clk' and frees memory
++ * allocated in clk_notifier_register.
++ *
++ * Returns -EINVAL if called with null arguments; otherwise, passes
++ * along the return value of srcu_notifier_chain_unregister().
++ */
++int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
++{
++ struct clk_notifier *cn = NULL;
++ int ret = -EINVAL;
++
++ if (!clk || !nb)
++ return -EINVAL;
++
++ clk_prepare_lock();
++
++ list_for_each_entry(cn, &clk_notifier_list, node)
++ if (cn->clk == clk)
++ break;
++
++ if (cn->clk == clk) {
++ ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb);
++
++ clk->notifier_count--;
++
++ /* XXX the notifier code should handle this better */
++ if (!cn->notifier_head.head) {
++ srcu_cleanup_notifier_head(&cn->notifier_head);
++ list_del(&cn->node);
++ kfree(cn);
++ }
++
++ } else {
++ ret = -ENOENT;
++ }
++
++ clk_prepare_unlock();
++
++ return ret;
++}
++EXPORT_SYMBOL_GPL(clk_notifier_unregister);
++
++#ifdef CONFIG_OF
++/**
++ * struct of_clk_provider - Clock provider registration structure
++ * @link: Entry in global list of clock providers
++ * @node: Pointer to device tree node of clock provider
++ * @get: Get clock callback. Returns NULL or a struct clk for the
++ * given clock specifier
++ * @data: context pointer to be passed into @get callback
++ */
++struct of_clk_provider {
++ struct list_head link;
++
++ struct device_node *node;
++ struct clk *(*get)(struct of_phandle_args *clkspec, void *data);
++ void *data;
++};
++
++static const struct of_device_id __clk_of_table_sentinel
++ __used __section(__clk_of_table_end);
++
++static LIST_HEAD(of_clk_providers);
++static DEFINE_MUTEX(of_clk_mutex);
++
++/* of_clk_provider list locking helpers */
++void of_clk_lock(void)
++{
++ mutex_lock(&of_clk_mutex);
++}
++
++void of_clk_unlock(void)
++{
++ mutex_unlock(&of_clk_mutex);
++}
++
++struct clk *of_clk_src_simple_get(struct of_phandle_args *clkspec,
++ void *data)
++{
++ return data;
++}
++EXPORT_SYMBOL_GPL(of_clk_src_simple_get);
++
++struct clk *of_clk_src_onecell_get(struct of_phandle_args *clkspec, void *data)
++{
++ struct clk_onecell_data *clk_data = data;
++ unsigned int idx = clkspec->args[0];
++
++ if (idx >= clk_data->clk_num) {
++ pr_err("%s: invalid clock index %d\n", __func__, idx);
++ return ERR_PTR(-EINVAL);
++ }
++
++ return clk_data->clks[idx];
++}
++EXPORT_SYMBOL_GPL(of_clk_src_onecell_get);
++
++/**
++ * of_clk_add_provider() - Register a clock provider for a node
++ * @np: Device node pointer associated with clock provider
++ * @clk_src_get: callback for decoding clock
++ * @data: context pointer for @clk_src_get callback.
++ */
++int of_clk_add_provider(struct device_node *np,
++ struct clk *(*clk_src_get)(struct of_phandle_args *clkspec,
++ void *data),
++ void *data)
++{
++ struct of_clk_provider *cp;
++
++ cp = kzalloc(sizeof(struct of_clk_provider), GFP_KERNEL);
++ if (!cp)
++ return -ENOMEM;
++
++ cp->node = of_node_get(np);
++ cp->data = data;
++ cp->get = clk_src_get;
++
++ mutex_lock(&of_clk_mutex);
++ list_add(&cp->link, &of_clk_providers);
++ mutex_unlock(&of_clk_mutex);
++ pr_debug("Added clock from %s\n", np->full_name);
++
++ return 0;
++}
++EXPORT_SYMBOL_GPL(of_clk_add_provider);
++
++/**
++ * of_clk_del_provider() - Remove a previously registered clock provider
++ * @np: Device node pointer associated with clock provider
++ */
++void of_clk_del_provider(struct device_node *np)
++{
++ struct of_clk_provider *cp;
++
++ mutex_lock(&of_clk_mutex);
++ list_for_each_entry(cp, &of_clk_providers, link) {
++ if (cp->node == np) {
++ list_del(&cp->link);
++ of_node_put(cp->node);
++ kfree(cp);
++ break;
++ }
++ }
++ mutex_unlock(&of_clk_mutex);
++}
++EXPORT_SYMBOL_GPL(of_clk_del_provider);
++
++struct clk *__of_clk_get_from_provider(struct of_phandle_args *clkspec)
++{
++ struct of_clk_provider *provider;
++ struct clk *clk = ERR_PTR(-ENOENT);
++
++ /* Check if we have such a provider in our array */
++ list_for_each_entry(provider, &of_clk_providers, link) {
++ if (provider->node == clkspec->np)
++ clk = provider->get(clkspec, provider->data);
++ if (!IS_ERR(clk))
++ break;
++ }
++
++ return clk;
++}
++
++struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec)
++{
++ struct clk *clk;
++
++ mutex_lock(&of_clk_mutex);
++ clk = __of_clk_get_from_provider(clkspec);
++ mutex_unlock(&of_clk_mutex);
++
++ return clk;
++}
++
++int of_clk_get_parent_count(struct device_node *np)
++{
++ return of_count_phandle_with_args(np, "clocks", "#clock-cells");
++}
++EXPORT_SYMBOL_GPL(of_clk_get_parent_count);
++
++const char *of_clk_get_parent_name(struct device_node *np, int index)
++{
++ struct of_phandle_args clkspec;
++ const char *clk_name;
++ int rc;
++
++ if (index < 0)
++ return NULL;
++
++ rc = of_parse_phandle_with_args(np, "clocks", "#clock-cells", index,
++ &clkspec);
++ if (rc)
++ return NULL;
++
++ if (of_property_read_string_index(clkspec.np, "clock-output-names",
++ clkspec.args_count ? clkspec.args[0] : 0,
++ &clk_name) < 0)
++ clk_name = clkspec.np->name;
++
++ of_node_put(clkspec.np);
++ return clk_name;
++}
++EXPORT_SYMBOL_GPL(of_clk_get_parent_name);
++
++/**
++ * of_clk_init() - Scan and init clock providers from the DT
++ * @matches: array of compatible values and init functions for providers.
++ *
++ * This function scans the device tree for matching clock providers and
++ * calls their initialization functions
++ */
++void __init of_clk_init(const struct of_device_id *matches)
++{
++ const struct of_device_id *match;
++ struct device_node *np;
++
++ if (!matches)
++ matches = &__clk_of_table;
++
++ for_each_matching_node_and_match(np, matches, &match) {
++ of_clk_init_cb_t clk_init_cb = match->data;
++ clk_init_cb(np);
++ }
++}
++#endif
+diff -Nur linux-3.14.36/drivers/clk/clk-mux.c linux-openelec/drivers/clk/clk-mux.c
+--- linux-3.14.36/drivers/clk/clk-mux.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/clk/clk-mux.c 2015-05-06 12:05:42.000000000 -0500
+@@ -143,7 +143,7 @@
+ init.ops = &clk_mux_ro_ops;
+ else
+ init.ops = &clk_mux_ops;
+- init.flags = flags | CLK_IS_BASIC;
++ init.flags = flags | CLK_IS_BASIC | CLK_IS_BASIC_MUX;
+ init.parent_names = parent_names;
+ init.num_parents = num_parents;
+
+diff -Nur linux-3.14.36/drivers/cpufreq/cpufreq_interactive.c linux-openelec/drivers/cpufreq/cpufreq_interactive.c
+--- linux-3.14.36/drivers/cpufreq/cpufreq_interactive.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/cpufreq/cpufreq_interactive.c 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,1349 @@
++/*
++ * drivers/cpufreq/cpufreq_interactive.c
++ *
++ * Copyright (C) 2010 Google, Inc.
++ *
++ * This software is licensed under the terms of the GNU General Public
++ * License version 2, as published by the Free Software Foundation, and
++ * may be copied, distributed, and modified under those terms.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * Author: Mike Chan (mike@android.com)
++ *
++ */
++
++#include <linux/cpu.h>
++#include <linux/cpumask.h>
++#include <linux/cpufreq.h>
++#include <linux/module.h>
++#include <linux/moduleparam.h>
++#include <linux/rwsem.h>
++#include <linux/sched.h>
++#include <linux/sched/rt.h>
++#include <linux/tick.h>
++#include <linux/time.h>
++#include <linux/timer.h>
++#include <linux/workqueue.h>
++#include <linux/kthread.h>
++#include <linux/slab.h>
++
++#define CREATE_TRACE_POINTS
++#include <trace/events/cpufreq_interactive.h>
++
++struct cpufreq_interactive_cpuinfo {
++ struct timer_list cpu_timer;
++ struct timer_list cpu_slack_timer;
++ spinlock_t load_lock; /* protects the next 4 fields */
++ u64 time_in_idle;
++ u64 time_in_idle_timestamp;
++ u64 cputime_speedadj;
++ u64 cputime_speedadj_timestamp;
++ struct cpufreq_policy *policy;
++ struct cpufreq_frequency_table *freq_table;
++ unsigned int target_freq;
++ unsigned int floor_freq;
++ u64 floor_validate_time;
++ u64 hispeed_validate_time;
++ struct rw_semaphore enable_sem;
++ int governor_enabled;
++};
++
++static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo);
++
++/* realtime thread handles frequency scaling */
++static struct task_struct *speedchange_task;
++static cpumask_t speedchange_cpumask;
++static spinlock_t speedchange_cpumask_lock;
++static struct mutex gov_lock;
++
++/* Target load. Lower values result in higher CPU speeds. */
++#define DEFAULT_TARGET_LOAD 90
++static unsigned int default_target_loads[] = {DEFAULT_TARGET_LOAD};
++
++#define DEFAULT_TIMER_RATE (20 * USEC_PER_MSEC)
++#define DEFAULT_ABOVE_HISPEED_DELAY DEFAULT_TIMER_RATE
++static unsigned int default_above_hispeed_delay[] = {
++ DEFAULT_ABOVE_HISPEED_DELAY };
++
++struct cpufreq_interactive_tunables {
++ int usage_count;
++ /* Hi speed to bump to from lo speed when load burst (default max) */
++ unsigned int hispeed_freq;
++ /* Go to hi speed when CPU load at or above this value. */
++#define DEFAULT_GO_HISPEED_LOAD 99
++ unsigned long go_hispeed_load;
++ /* Target load. Lower values result in higher CPU speeds. */
++ spinlock_t target_loads_lock;
++ unsigned int *target_loads;
++ int ntarget_loads;
++ /*
++ * The minimum amount of time to spend at a frequency before we can ramp
++ * down.
++ */
++#define DEFAULT_MIN_SAMPLE_TIME (80 * USEC_PER_MSEC)
++ unsigned long min_sample_time;
++ /*
++ * The sample rate of the timer used to increase frequency
++ */
++ unsigned long timer_rate;
++ /*
++ * Wait this long before raising speed above hispeed, by default a
++ * single timer interval.
++ */
++ spinlock_t above_hispeed_delay_lock;
++ unsigned int *above_hispeed_delay;
++ int nabove_hispeed_delay;
++ /* Non-zero means indefinite speed boost active */
++ int boost_val;
++ /* Duration of a boot pulse in usecs */
++ int boostpulse_duration_val;
++ /* End time of boost pulse in ktime converted to usecs */
++ u64 boostpulse_endtime;
++ /*
++ * Max additional time to wait in idle, beyond timer_rate, at speeds
++ * above minimum before wakeup to reduce speed, or -1 if unnecessary.
++ */
++#define DEFAULT_TIMER_SLACK (4 * DEFAULT_TIMER_RATE)
++ int timer_slack_val;
++ bool io_is_busy;
++};
++
++/* For cases where we have single governor instance for system */
++struct cpufreq_interactive_tunables *common_tunables;
++
++static struct attribute_group *get_sysfs_attr(void);
++
++static void cpufreq_interactive_timer_resched(
++ struct cpufreq_interactive_cpuinfo *pcpu)
++{
++ struct cpufreq_interactive_tunables *tunables =
++ pcpu->policy->governor_data;
++ unsigned long expires;
++ unsigned long flags;
++
++ spin_lock_irqsave(&pcpu->load_lock, flags);
++ pcpu->time_in_idle =
++ get_cpu_idle_time(smp_processor_id(),
++ &pcpu->time_in_idle_timestamp,
++ tunables->io_is_busy);
++ pcpu->cputime_speedadj = 0;
++ pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
++ expires = jiffies + usecs_to_jiffies(tunables->timer_rate);
++ mod_timer_pinned(&pcpu->cpu_timer, expires);
++
++ if (tunables->timer_slack_val >= 0 &&
++ pcpu->target_freq > pcpu->policy->min) {
++ expires += usecs_to_jiffies(tunables->timer_slack_val);
++ mod_timer_pinned(&pcpu->cpu_slack_timer, expires);
++ }
++
++ spin_unlock_irqrestore(&pcpu->load_lock, flags);
++}
++
++/* The caller shall take enable_sem write semaphore to avoid any timer race.
++ * The cpu_timer and cpu_slack_timer must be deactivated when calling this
++ * function.
++ */
++static void cpufreq_interactive_timer_start(
++ struct cpufreq_interactive_tunables *tunables, int cpu)
++{
++ struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
++ unsigned long expires = jiffies +
++ usecs_to_jiffies(tunables->timer_rate);
++ unsigned long flags;
++
++ pcpu->cpu_timer.expires = expires;
++ add_timer_on(&pcpu->cpu_timer, cpu);
++ if (tunables->timer_slack_val >= 0 &&
++ pcpu->target_freq > pcpu->policy->min) {
++ expires += usecs_to_jiffies(tunables->timer_slack_val);
++ pcpu->cpu_slack_timer.expires = expires;
++ add_timer_on(&pcpu->cpu_slack_timer, cpu);
++ }
++
++ spin_lock_irqsave(&pcpu->load_lock, flags);
++ pcpu->time_in_idle =
++ get_cpu_idle_time(cpu, &pcpu->time_in_idle_timestamp,
++ tunables->io_is_busy);
++ pcpu->cputime_speedadj = 0;
++ pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
++ spin_unlock_irqrestore(&pcpu->load_lock, flags);
++}
++
++static unsigned int freq_to_above_hispeed_delay(
++ struct cpufreq_interactive_tunables *tunables,
++ unsigned int freq)
++{
++ int i;
++ unsigned int ret;
++ unsigned long flags;
++
++ spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
++
++ for (i = 0; i < tunables->nabove_hispeed_delay - 1 &&
++ freq >= tunables->above_hispeed_delay[i+1]; i += 2)
++ ;
++
++ ret = tunables->above_hispeed_delay[i];
++ spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
++ return ret;
++}
++
++static unsigned int freq_to_targetload(
++ struct cpufreq_interactive_tunables *tunables, unsigned int freq)
++{
++ int i;
++ unsigned int ret;
++ unsigned long flags;
++
++ spin_lock_irqsave(&tunables->target_loads_lock, flags);
++
++ for (i = 0; i < tunables->ntarget_loads - 1 &&
++ freq >= tunables->target_loads[i+1]; i += 2)
++ ;
++
++ ret = tunables->target_loads[i];
++ spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
++ return ret;
++}
++
++/*
++ * If increasing frequencies never map to a lower target load then
++ * choose_freq() will find the minimum frequency that does not exceed its
++ * target load given the current load.
++ */
++static unsigned int choose_freq(struct cpufreq_interactive_cpuinfo *pcpu,
++ unsigned int loadadjfreq)
++{
++ unsigned int freq = pcpu->policy->cur;
++ unsigned int prevfreq, freqmin, freqmax;
++ unsigned int tl;
++ int index;
++
++ freqmin = 0;
++ freqmax = UINT_MAX;
++
++ do {
++ prevfreq = freq;
++ tl = freq_to_targetload(pcpu->policy->governor_data, freq);
++
++ /*
++ * Find the lowest frequency where the computed load is less
++ * than or equal to the target load.
++ */
++
++ if (cpufreq_frequency_table_target(
++ pcpu->policy, pcpu->freq_table, loadadjfreq / tl,
++ CPUFREQ_RELATION_L, &index))
++ break;
++ freq = pcpu->freq_table[index].frequency;
++
++ if (freq > prevfreq) {
++ /* The previous frequency is too low. */
++ freqmin = prevfreq;
++
++ if (freq >= freqmax) {
++ /*
++ * Find the highest frequency that is less
++ * than freqmax.
++ */
++ if (cpufreq_frequency_table_target(
++ pcpu->policy, pcpu->freq_table,
++ freqmax - 1, CPUFREQ_RELATION_H,
++ &index))
++ break;
++ freq = pcpu->freq_table[index].frequency;
++
++ if (freq == freqmin) {
++ /*
++ * The first frequency below freqmax
++ * has already been found to be too
++ * low. freqmax is the lowest speed
++ * we found that is fast enough.
++ */
++ freq = freqmax;
++ break;
++ }
++ }
++ } else if (freq < prevfreq) {
++ /* The previous frequency is high enough. */
++ freqmax = prevfreq;
++
++ if (freq <= freqmin) {
++ /*
++ * Find the lowest frequency that is higher
++ * than freqmin.
++ */
++ if (cpufreq_frequency_table_target(
++ pcpu->policy, pcpu->freq_table,
++ freqmin + 1, CPUFREQ_RELATION_L,
++ &index))
++ break;
++ freq = pcpu->freq_table[index].frequency;
++
++ /*
++ * If freqmax is the first frequency above
++ * freqmin then we have already found that
++ * this speed is fast enough.
++ */
++ if (freq == freqmax)
++ break;
++ }
++ }
++
++ /* If same frequency chosen as previous then done. */
++ } while (freq != prevfreq);
++
++ return freq;
++}
++
++static u64 update_load(int cpu)
++{
++ struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
++ struct cpufreq_interactive_tunables *tunables =
++ pcpu->policy->governor_data;
++ u64 now;
++ u64 now_idle;
++ unsigned int delta_idle;
++ unsigned int delta_time;
++ u64 active_time;
++
++ now_idle = get_cpu_idle_time(cpu, &now, tunables->io_is_busy);
++ delta_idle = (unsigned int)(now_idle - pcpu->time_in_idle);
++ delta_time = (unsigned int)(now - pcpu->time_in_idle_timestamp);
++
++ if (delta_time <= delta_idle)
++ active_time = 0;
++ else
++ active_time = delta_time - delta_idle;
++
++ pcpu->cputime_speedadj += active_time * pcpu->policy->cur;
++
++ pcpu->time_in_idle = now_idle;
++ pcpu->time_in_idle_timestamp = now;
++ return now;
++}
++
++static void cpufreq_interactive_timer(unsigned long data)
++{
++ u64 now;
++ unsigned int delta_time;
++ u64 cputime_speedadj;
++ int cpu_load;
++ struct cpufreq_interactive_cpuinfo *pcpu =
++ &per_cpu(cpuinfo, data);
++ struct cpufreq_interactive_tunables *tunables =
++ pcpu->policy->governor_data;
++ unsigned int new_freq;
++ unsigned int loadadjfreq;
++ unsigned int index;
++ unsigned long flags;
++ bool boosted;
++
++ if (!down_read_trylock(&pcpu->enable_sem))
++ return;
++ if (!pcpu->governor_enabled)
++ goto exit;
++
++ spin_lock_irqsave(&pcpu->load_lock, flags);
++ now = update_load(data);
++ delta_time = (unsigned int)(now - pcpu->cputime_speedadj_timestamp);
++ cputime_speedadj = pcpu->cputime_speedadj;
++ spin_unlock_irqrestore(&pcpu->load_lock, flags);
++
++ if (WARN_ON_ONCE(!delta_time))
++ goto rearm;
++
++ do_div(cputime_speedadj, delta_time);
++ loadadjfreq = (unsigned int)cputime_speedadj * 100;
++ cpu_load = loadadjfreq / pcpu->target_freq;
++ boosted = tunables->boost_val || now < tunables->boostpulse_endtime;
++
++ if (cpu_load >= tunables->go_hispeed_load || boosted) {
++ if (pcpu->target_freq < tunables->hispeed_freq) {
++ new_freq = tunables->hispeed_freq;
++ } else {
++ new_freq = choose_freq(pcpu, loadadjfreq);
++
++ if (new_freq < tunables->hispeed_freq)
++ new_freq = tunables->hispeed_freq;
++ }
++ } else {
++ new_freq = choose_freq(pcpu, loadadjfreq);
++ }
++
++ if (pcpu->target_freq >= tunables->hispeed_freq &&
++ new_freq > pcpu->target_freq &&
++ now - pcpu->hispeed_validate_time <
++ freq_to_above_hispeed_delay(tunables, pcpu->target_freq)) {
++ trace_cpufreq_interactive_notyet(
++ data, cpu_load, pcpu->target_freq,
++ pcpu->policy->cur, new_freq);
++ goto rearm;
++ }
++
++ pcpu->hispeed_validate_time = now;
++
++ if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table,
++ new_freq, CPUFREQ_RELATION_L,
++ &index))
++ goto rearm;
++
++ new_freq = pcpu->freq_table[index].frequency;
++
++ /*
++ * Do not scale below floor_freq unless we have been at or above the
++ * floor frequency for the minimum sample time since last validated.
++ */
++ if (new_freq < pcpu->floor_freq) {
++ if (now - pcpu->floor_validate_time <
++ tunables->min_sample_time) {
++ trace_cpufreq_interactive_notyet(
++ data, cpu_load, pcpu->target_freq,
++ pcpu->policy->cur, new_freq);
++ goto rearm;
++ }
++ }
++
++ /*
++ * Update the timestamp for checking whether speed has been held at
++ * or above the selected frequency for a minimum of min_sample_time,
++ * if not boosted to hispeed_freq. If boosted to hispeed_freq then we
++ * allow the speed to drop as soon as the boostpulse duration expires
++ * (or the indefinite boost is turned off).
++ */
++
++ if (!boosted || new_freq > tunables->hispeed_freq) {
++ pcpu->floor_freq = new_freq;
++ pcpu->floor_validate_time = now;
++ }
++
++ if (pcpu->target_freq == new_freq) {
++ trace_cpufreq_interactive_already(
++ data, cpu_load, pcpu->target_freq,
++ pcpu->policy->cur, new_freq);
++ goto rearm_if_notmax;
++ }
++
++ trace_cpufreq_interactive_target(data, cpu_load, pcpu->target_freq,
++ pcpu->policy->cur, new_freq);
++
++ pcpu->target_freq = new_freq;
++ spin_lock_irqsave(&speedchange_cpumask_lock, flags);
++ cpumask_set_cpu(data, &speedchange_cpumask);
++ spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
++ wake_up_process(speedchange_task);
++
++rearm_if_notmax:
++ /*
++ * Already set max speed and don't see a need to change that,
++ * wait until next idle to re-evaluate, don't need timer.
++ */
++ if (pcpu->target_freq == pcpu->policy->max)
++ goto exit;
++
++rearm:
++ if (!timer_pending(&pcpu->cpu_timer))
++ cpufreq_interactive_timer_resched(pcpu);
++
++exit:
++ up_read(&pcpu->enable_sem);
++ return;
++}
++
++static void cpufreq_interactive_idle_start(void)
++{
++ struct cpufreq_interactive_cpuinfo *pcpu =
++ &per_cpu(cpuinfo, smp_processor_id());
++ int pending;
++
++ if (!down_read_trylock(&pcpu->enable_sem))
++ return;
++ if (!pcpu->governor_enabled) {
++ up_read(&pcpu->enable_sem);
++ return;
++ }
++
++ pending = timer_pending(&pcpu->cpu_timer);
++
++ if (pcpu->target_freq != pcpu->policy->min) {
++ /*
++ * Entering idle while not at lowest speed. On some
++ * platforms this can hold the other CPU(s) at that speed
++ * even though the CPU is idle. Set a timer to re-evaluate
++ * speed so this idle CPU doesn't hold the other CPUs above
++ * min indefinitely. This should probably be a quirk of
++ * the CPUFreq driver.
++ */
++ if (!pending)
++ cpufreq_interactive_timer_resched(pcpu);
++ }
++
++ up_read(&pcpu->enable_sem);
++}
++
++static void cpufreq_interactive_idle_end(void)
++{
++ struct cpufreq_interactive_cpuinfo *pcpu =
++ &per_cpu(cpuinfo, smp_processor_id());
++
++ if (!down_read_trylock(&pcpu->enable_sem))
++ return;
++ if (!pcpu->governor_enabled) {
++ up_read(&pcpu->enable_sem);
++ return;
++ }
++
++ /* Arm the timer for 1-2 ticks later if not already. */
++ if (!timer_pending(&pcpu->cpu_timer)) {
++ cpufreq_interactive_timer_resched(pcpu);
++ } else if (time_after_eq(jiffies, pcpu->cpu_timer.expires)) {
++ del_timer(&pcpu->cpu_timer);
++ del_timer(&pcpu->cpu_slack_timer);
++ cpufreq_interactive_timer(smp_processor_id());
++ }
++
++ up_read(&pcpu->enable_sem);
++}
++
++static int cpufreq_interactive_speedchange_task(void *data)
++{
++ unsigned int cpu;
++ cpumask_t tmp_mask;
++ unsigned long flags;
++ struct cpufreq_interactive_cpuinfo *pcpu;
++
++ while (1) {
++ set_current_state(TASK_INTERRUPTIBLE);
++ spin_lock_irqsave(&speedchange_cpumask_lock, flags);
++
++ if (cpumask_empty(&speedchange_cpumask)) {
++ spin_unlock_irqrestore(&speedchange_cpumask_lock,
++ flags);
++ schedule();
++
++ if (kthread_should_stop())
++ break;
++
++ spin_lock_irqsave(&speedchange_cpumask_lock, flags);
++ }
++
++ set_current_state(TASK_RUNNING);
++ tmp_mask = speedchange_cpumask;
++ cpumask_clear(&speedchange_cpumask);
++ spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
++
++ for_each_cpu(cpu, &tmp_mask) {
++ unsigned int j;
++ unsigned int max_freq = 0;
++
++ pcpu = &per_cpu(cpuinfo, cpu);
++ if (!down_read_trylock(&pcpu->enable_sem))
++ continue;
++ if (!pcpu->governor_enabled) {
++ up_read(&pcpu->enable_sem);
++ continue;
++ }
++
++ for_each_cpu(j, pcpu->policy->cpus) {
++ struct cpufreq_interactive_cpuinfo *pjcpu =
++ &per_cpu(cpuinfo, j);
++
++ if (pjcpu->target_freq > max_freq)
++ max_freq = pjcpu->target_freq;
++ }
++
++ if (max_freq != pcpu->policy->cur)
++ __cpufreq_driver_target(pcpu->policy,
++ max_freq,
++ CPUFREQ_RELATION_H);
++ trace_cpufreq_interactive_setspeed(cpu,
++ pcpu->target_freq,
++ pcpu->policy->cur);
++
++ up_read(&pcpu->enable_sem);
++ }
++ }
++
++ return 0;
++}
++
++static void cpufreq_interactive_boost(void)
++{
++ int i;
++ int anyboost = 0;
++ unsigned long flags;
++ struct cpufreq_interactive_cpuinfo *pcpu;
++ struct cpufreq_interactive_tunables *tunables;
++
++ spin_lock_irqsave(&speedchange_cpumask_lock, flags);
++
++ for_each_online_cpu(i) {
++ pcpu = &per_cpu(cpuinfo, i);
++ tunables = pcpu->policy->governor_data;
++
++ if (pcpu->target_freq < tunables->hispeed_freq) {
++ pcpu->target_freq = tunables->hispeed_freq;
++ cpumask_set_cpu(i, &speedchange_cpumask);
++ pcpu->hispeed_validate_time =
++ ktime_to_us(ktime_get());
++ anyboost = 1;
++ }
++
++ /*
++ * Set floor freq and (re)start timer for when last
++ * validated.
++ */
++
++ pcpu->floor_freq = tunables->hispeed_freq;
++ pcpu->floor_validate_time = ktime_to_us(ktime_get());
++ }
++
++ spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
++
++ if (anyboost)
++ wake_up_process(speedchange_task);
++}
++
++static int cpufreq_interactive_notifier(
++ struct notifier_block *nb, unsigned long val, void *data)
++{
++ struct cpufreq_freqs *freq = data;
++ struct cpufreq_interactive_cpuinfo *pcpu;
++ int cpu;
++ unsigned long flags;
++
++ if (val == CPUFREQ_POSTCHANGE) {
++ pcpu = &per_cpu(cpuinfo, freq->cpu);
++ if (!down_read_trylock(&pcpu->enable_sem))
++ return 0;
++ if (!pcpu->governor_enabled) {
++ up_read(&pcpu->enable_sem);
++ return 0;
++ }
++
++ for_each_cpu(cpu, pcpu->policy->cpus) {
++ struct cpufreq_interactive_cpuinfo *pjcpu =
++ &per_cpu(cpuinfo, cpu);
++ if (cpu != freq->cpu) {
++ if (!down_read_trylock(&pjcpu->enable_sem))
++ continue;
++ if (!pjcpu->governor_enabled) {
++ up_read(&pjcpu->enable_sem);
++ continue;
++ }
++ }
++ spin_lock_irqsave(&pjcpu->load_lock, flags);
++ update_load(cpu);
++ spin_unlock_irqrestore(&pjcpu->load_lock, flags);
++ if (cpu != freq->cpu)
++ up_read(&pjcpu->enable_sem);
++ }
++
++ up_read(&pcpu->enable_sem);
++ }
++ return 0;
++}
++
++static struct notifier_block cpufreq_notifier_block = {
++ .notifier_call = cpufreq_interactive_notifier,
++};
++
++static unsigned int *get_tokenized_data(const char *buf, int *num_tokens)
++{
++ const char *cp;
++ int i;
++ int ntokens = 1;
++ unsigned int *tokenized_data;
++ int err = -EINVAL;
++
++ cp = buf;
++ while ((cp = strpbrk(cp + 1, " :")))
++ ntokens++;
++
++ if (!(ntokens & 0x1))
++ goto err;
++
++ tokenized_data = kmalloc(ntokens * sizeof(unsigned int), GFP_KERNEL);
++ if (!tokenized_data) {
++ err = -ENOMEM;
++ goto err;
++ }
++
++ cp = buf;
++ i = 0;
++ while (i < ntokens) {
++ if (sscanf(cp, "%u", &tokenized_data[i++]) != 1)
++ goto err_kfree;
++
++ cp = strpbrk(cp, " :");
++ if (!cp)
++ break;
++ cp++;
++ }
++
++ if (i != ntokens)
++ goto err_kfree;
++
++ *num_tokens = ntokens;
++ return tokenized_data;
++
++err_kfree:
++ kfree(tokenized_data);
++err:
++ return ERR_PTR(err);
++}
++
++static ssize_t show_target_loads(
++ struct cpufreq_interactive_tunables *tunables,
++ char *buf)
++{
++ int i;
++ ssize_t ret = 0;
++ unsigned long flags;
++
++ spin_lock_irqsave(&tunables->target_loads_lock, flags);
++
++ for (i = 0; i < tunables->ntarget_loads; i++)
++ ret += sprintf(buf + ret, "%u%s", tunables->target_loads[i],
++ i & 0x1 ? ":" : " ");
++
++ sprintf(buf + ret - 1, "\n");
++ spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
++ return ret;
++}
++
++static ssize_t store_target_loads(
++ struct cpufreq_interactive_tunables *tunables,
++ const char *buf, size_t count)
++{
++ int ntokens;
++ unsigned int *new_target_loads = NULL;
++ unsigned long flags;
++
++ new_target_loads = get_tokenized_data(buf, &ntokens);
++ if (IS_ERR(new_target_loads))
++ return PTR_RET(new_target_loads);
++
++ spin_lock_irqsave(&tunables->target_loads_lock, flags);
++ if (tunables->target_loads != default_target_loads)
++ kfree(tunables->target_loads);
++ tunables->target_loads = new_target_loads;
++ tunables->ntarget_loads = ntokens;
++ spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
++ return count;
++}
++
++static ssize_t show_above_hispeed_delay(
++ struct cpufreq_interactive_tunables *tunables, char *buf)
++{
++ int i;
++ ssize_t ret = 0;
++ unsigned long flags;
++
++ spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
++
++ for (i = 0; i < tunables->nabove_hispeed_delay; i++)
++ ret += sprintf(buf + ret, "%u%s",
++ tunables->above_hispeed_delay[i],
++ i & 0x1 ? ":" : " ");
++
++ sprintf(buf + ret - 1, "\n");
++ spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
++ return ret;
++}
++
++static ssize_t store_above_hispeed_delay(
++ struct cpufreq_interactive_tunables *tunables,
++ const char *buf, size_t count)
++{
++ int ntokens;
++ unsigned int *new_above_hispeed_delay = NULL;
++ unsigned long flags;
++
++ new_above_hispeed_delay = get_tokenized_data(buf, &ntokens);
++ if (IS_ERR(new_above_hispeed_delay))
++ return PTR_RET(new_above_hispeed_delay);
++
++ spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
++ if (tunables->above_hispeed_delay != default_above_hispeed_delay)
++ kfree(tunables->above_hispeed_delay);
++ tunables->above_hispeed_delay = new_above_hispeed_delay;
++ tunables->nabove_hispeed_delay = ntokens;
++ spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
++ return count;
++
++}
++
++static ssize_t show_hispeed_freq(struct cpufreq_interactive_tunables *tunables,
++ char *buf)
++{
++ return sprintf(buf, "%u\n", tunables->hispeed_freq);
++}
++
++static ssize_t store_hispeed_freq(struct cpufreq_interactive_tunables *tunables,
++ const char *buf, size_t count)
++{
++ int ret;
++ long unsigned int val;
++
++ ret = strict_strtoul(buf, 0, &val);
++ if (ret < 0)
++ return ret;
++ tunables->hispeed_freq = val;
++ return count;
++}
++
++static ssize_t show_go_hispeed_load(struct cpufreq_interactive_tunables
++ *tunables, char *buf)
++{
++ return sprintf(buf, "%lu\n", tunables->go_hispeed_load);
++}
++
++static ssize_t store_go_hispeed_load(struct cpufreq_interactive_tunables
++ *tunables, const char *buf, size_t count)
++{
++ int ret;
++ unsigned long val;
++
++ ret = strict_strtoul(buf, 0, &val);
++ if (ret < 0)
++ return ret;
++ tunables->go_hispeed_load = val;
++ return count;
++}
++
++static ssize_t show_min_sample_time(struct cpufreq_interactive_tunables
++ *tunables, char *buf)
++{
++ return sprintf(buf, "%lu\n", tunables->min_sample_time);
++}
++
++static ssize_t store_min_sample_time(struct cpufreq_interactive_tunables
++ *tunables, const char *buf, size_t count)
++{
++ int ret;
++ unsigned long val;
++
++ ret = strict_strtoul(buf, 0, &val);
++ if (ret < 0)
++ return ret;
++ tunables->min_sample_time = val;
++ return count;
++}
++
++static ssize_t show_timer_rate(struct cpufreq_interactive_tunables *tunables,
++ char *buf)
++{
++ return sprintf(buf, "%lu\n", tunables->timer_rate);
++}
++
++static ssize_t store_timer_rate(struct cpufreq_interactive_tunables *tunables,
++ const char *buf, size_t count)
++{
++ int ret;
++ unsigned long val;
++
++ ret = strict_strtoul(buf, 0, &val);
++ if (ret < 0)
++ return ret;
++ tunables->timer_rate = val;
++ return count;
++}
++
++static ssize_t show_timer_slack(struct cpufreq_interactive_tunables *tunables,
++ char *buf)
++{
++ return sprintf(buf, "%d\n", tunables->timer_slack_val);
++}
++
++static ssize_t store_timer_slack(struct cpufreq_interactive_tunables *tunables,
++ const char *buf, size_t count)
++{
++ int ret;
++ unsigned long val;
++
++ ret = kstrtol(buf, 10, &val);
++ if (ret < 0)
++ return ret;
++
++ tunables->timer_slack_val = val;
++ return count;
++}
++
++static ssize_t show_boost(struct cpufreq_interactive_tunables *tunables,
++ char *buf)
++{
++ return sprintf(buf, "%d\n", tunables->boost_val);
++}
++
++static ssize_t store_boost(struct cpufreq_interactive_tunables *tunables,
++ const char *buf, size_t count)
++{
++ int ret;
++ unsigned long val;
++
++ ret = kstrtoul(buf, 0, &val);
++ if (ret < 0)
++ return ret;
++
++ tunables->boost_val = val;
++
++ if (tunables->boost_val) {
++ trace_cpufreq_interactive_boost("on");
++ cpufreq_interactive_boost();
++ } else {
++ trace_cpufreq_interactive_unboost("off");
++ }
++
++ return count;
++}
++
++static ssize_t store_boostpulse(struct cpufreq_interactive_tunables *tunables,
++ const char *buf, size_t count)
++{
++ int ret;
++ unsigned long val;
++
++ ret = kstrtoul(buf, 0, &val);
++ if (ret < 0)
++ return ret;
++
++ tunables->boostpulse_endtime = ktime_to_us(ktime_get()) +
++ tunables->boostpulse_duration_val;
++ trace_cpufreq_interactive_boost("pulse");
++ cpufreq_interactive_boost();
++ return count;
++}
++
++static ssize_t show_boostpulse_duration(struct cpufreq_interactive_tunables
++ *tunables, char *buf)
++{
++ return sprintf(buf, "%d\n", tunables->boostpulse_duration_val);
++}
++
++static ssize_t store_boostpulse_duration(struct cpufreq_interactive_tunables
++ *tunables, const char *buf, size_t count)
++{
++ int ret;
++ unsigned long val;
++
++ ret = kstrtoul(buf, 0, &val);
++ if (ret < 0)
++ return ret;
++
++ tunables->boostpulse_duration_val = val;
++ return count;
++}
++
++static ssize_t show_io_is_busy(struct cpufreq_interactive_tunables *tunables,
++ char *buf)
++{
++ return sprintf(buf, "%u\n", tunables->io_is_busy);
++}
++
++static ssize_t store_io_is_busy(struct cpufreq_interactive_tunables *tunables,
++ const char *buf, size_t count)
++{
++ int ret;
++ unsigned long val;
++
++ ret = kstrtoul(buf, 0, &val);
++ if (ret < 0)
++ return ret;
++ tunables->io_is_busy = val;
++ return count;
++}
++
++/*
++ * Create show/store routines
++ * - sys: One governor instance for complete SYSTEM
++ * - pol: One governor instance per struct cpufreq_policy
++ */
++#define show_gov_pol_sys(file_name) \
++static ssize_t show_##file_name##_gov_sys \
++(struct kobject *kobj, struct attribute *attr, char *buf) \
++{ \
++ return show_##file_name(common_tunables, buf); \
++} \
++ \
++static ssize_t show_##file_name##_gov_pol \
++(struct cpufreq_policy *policy, char *buf) \
++{ \
++ return show_##file_name(policy->governor_data, buf); \
++}
++
++#define store_gov_pol_sys(file_name) \
++static ssize_t store_##file_name##_gov_sys \
++(struct kobject *kobj, struct attribute *attr, const char *buf, \
++ size_t count) \
++{ \
++ return store_##file_name(common_tunables, buf, count); \
++} \
++ \
++static ssize_t store_##file_name##_gov_pol \
++(struct cpufreq_policy *policy, const char *buf, size_t count) \
++{ \
++ return store_##file_name(policy->governor_data, buf, count); \
++}
++
++#define show_store_gov_pol_sys(file_name) \
++show_gov_pol_sys(file_name); \
++store_gov_pol_sys(file_name)
++
++show_store_gov_pol_sys(target_loads);
++show_store_gov_pol_sys(above_hispeed_delay);
++show_store_gov_pol_sys(hispeed_freq);
++show_store_gov_pol_sys(go_hispeed_load);
++show_store_gov_pol_sys(min_sample_time);
++show_store_gov_pol_sys(timer_rate);
++show_store_gov_pol_sys(timer_slack);
++show_store_gov_pol_sys(boost);
++store_gov_pol_sys(boostpulse);
++show_store_gov_pol_sys(boostpulse_duration);
++show_store_gov_pol_sys(io_is_busy);
++
++#define gov_sys_attr_rw(_name) \
++static struct global_attr _name##_gov_sys = \
++__ATTR(_name, 0644, show_##_name##_gov_sys, store_##_name##_gov_sys)
++
++#define gov_pol_attr_rw(_name) \
++static struct freq_attr _name##_gov_pol = \
++__ATTR(_name, 0644, show_##_name##_gov_pol, store_##_name##_gov_pol)
++
++#define gov_sys_pol_attr_rw(_name) \
++ gov_sys_attr_rw(_name); \
++ gov_pol_attr_rw(_name)
++
++gov_sys_pol_attr_rw(target_loads);
++gov_sys_pol_attr_rw(above_hispeed_delay);
++gov_sys_pol_attr_rw(hispeed_freq);
++gov_sys_pol_attr_rw(go_hispeed_load);
++gov_sys_pol_attr_rw(min_sample_time);
++gov_sys_pol_attr_rw(timer_rate);
++gov_sys_pol_attr_rw(timer_slack);
++gov_sys_pol_attr_rw(boost);
++gov_sys_pol_attr_rw(boostpulse_duration);
++gov_sys_pol_attr_rw(io_is_busy);
++
++static struct global_attr boostpulse_gov_sys =
++ __ATTR(boostpulse, 0200, NULL, store_boostpulse_gov_sys);
++
++static struct freq_attr boostpulse_gov_pol =
++ __ATTR(boostpulse, 0200, NULL, store_boostpulse_gov_pol);
++
++/* One Governor instance for entire system */
++static struct attribute *interactive_attributes_gov_sys[] = {
++ &target_loads_gov_sys.attr,
++ &above_hispeed_delay_gov_sys.attr,
++ &hispeed_freq_gov_sys.attr,
++ &go_hispeed_load_gov_sys.attr,
++ &min_sample_time_gov_sys.attr,
++ &timer_rate_gov_sys.attr,
++ &timer_slack_gov_sys.attr,
++ &boost_gov_sys.attr,
++ &boostpulse_gov_sys.attr,
++ &boostpulse_duration_gov_sys.attr,
++ &io_is_busy_gov_sys.attr,
++ NULL,
++};
++
++static struct attribute_group interactive_attr_group_gov_sys = {
++ .attrs = interactive_attributes_gov_sys,
++ .name = "interactive",
++};
++
++/* Per policy governor instance */
++static struct attribute *interactive_attributes_gov_pol[] = {
++ &target_loads_gov_pol.attr,
++ &above_hispeed_delay_gov_pol.attr,
++ &hispeed_freq_gov_pol.attr,
++ &go_hispeed_load_gov_pol.attr,
++ &min_sample_time_gov_pol.attr,
++ &timer_rate_gov_pol.attr,
++ &timer_slack_gov_pol.attr,
++ &boost_gov_pol.attr,
++ &boostpulse_gov_pol.attr,
++ &boostpulse_duration_gov_pol.attr,
++ &io_is_busy_gov_pol.attr,
++ NULL,
++};
++
++static struct attribute_group interactive_attr_group_gov_pol = {
++ .attrs = interactive_attributes_gov_pol,
++ .name = "interactive",
++};
++
++static struct attribute_group *get_sysfs_attr(void)
++{
++ if (have_governor_per_policy())
++ return &interactive_attr_group_gov_pol;
++ else
++ return &interactive_attr_group_gov_sys;
++}
++
++static int cpufreq_interactive_idle_notifier(struct notifier_block *nb,
++ unsigned long val,
++ void *data)
++{
++ switch (val) {
++ case IDLE_START:
++ cpufreq_interactive_idle_start();
++ break;
++ case IDLE_END:
++ cpufreq_interactive_idle_end();
++ break;
++ }
++
++ return 0;
++}
++
++static struct notifier_block cpufreq_interactive_idle_nb = {
++ .notifier_call = cpufreq_interactive_idle_notifier,
++};
++
++static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
++ unsigned int event)
++{
++ int rc;
++ unsigned int j;
++ struct cpufreq_interactive_cpuinfo *pcpu;
++ struct cpufreq_frequency_table *freq_table;
++ struct cpufreq_interactive_tunables *tunables;
++
++ if (have_governor_per_policy())
++ tunables = policy->governor_data;
++ else
++ tunables = common_tunables;
++
++ WARN_ON(!tunables && (event != CPUFREQ_GOV_POLICY_INIT));
++
++ switch (event) {
++ case CPUFREQ_GOV_POLICY_INIT:
++ if (have_governor_per_policy()) {
++ WARN_ON(tunables);
++ } else if (tunables) {
++ tunables->usage_count++;
++ policy->governor_data = tunables;
++ return 0;
++ }
++
++ tunables = kzalloc(sizeof(*tunables), GFP_KERNEL);
++ if (!tunables) {
++ pr_err("%s: POLICY_INIT: kzalloc failed\n", __func__);
++ return -ENOMEM;
++ }
++
++ tunables->usage_count = 1;
++ tunables->above_hispeed_delay = default_above_hispeed_delay;
++ tunables->nabove_hispeed_delay =
++ ARRAY_SIZE(default_above_hispeed_delay);
++ tunables->go_hispeed_load = DEFAULT_GO_HISPEED_LOAD;
++ tunables->target_loads = default_target_loads;
++ tunables->ntarget_loads = ARRAY_SIZE(default_target_loads);
++ tunables->min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
++ tunables->timer_rate = DEFAULT_TIMER_RATE;
++ tunables->boostpulse_duration_val = DEFAULT_MIN_SAMPLE_TIME;
++ tunables->timer_slack_val = DEFAULT_TIMER_SLACK;
++
++ spin_lock_init(&tunables->target_loads_lock);
++ spin_lock_init(&tunables->above_hispeed_delay_lock);
++
++ policy->governor_data = tunables;
++ if (!have_governor_per_policy()) {
++ common_tunables = tunables;
++ WARN_ON(cpufreq_get_global_kobject());
++ }
++
++ rc = sysfs_create_group(get_governor_parent_kobj(policy),
++ get_sysfs_attr());
++ if (rc) {
++ kfree(tunables);
++ policy->governor_data = NULL;
++ if (!have_governor_per_policy())
++ common_tunables = NULL;
++ return rc;
++ }
++
++ if (!policy->governor->initialized) {
++ idle_notifier_register(&cpufreq_interactive_idle_nb);
++ cpufreq_register_notifier(&cpufreq_notifier_block,
++ CPUFREQ_TRANSITION_NOTIFIER);
++ }
++
++ break;
++
++ case CPUFREQ_GOV_POLICY_EXIT:
++ if (!--tunables->usage_count) {
++ sysfs_remove_group(get_governor_parent_kobj(policy),
++ get_sysfs_attr());
++
++ if (!have_governor_per_policy())
++ cpufreq_put_global_kobject();
++
++ if (policy->governor->initialized == 1) {
++ cpufreq_unregister_notifier(&cpufreq_notifier_block,
++ CPUFREQ_TRANSITION_NOTIFIER);
++ idle_notifier_unregister(&cpufreq_interactive_idle_nb);
++ }
++
++ kfree(tunables);
++ common_tunables = NULL;
++ }
++
++ policy->governor_data = NULL;
++ break;
++
++ case CPUFREQ_GOV_START:
++ mutex_lock(&gov_lock);
++
++ freq_table = cpufreq_frequency_get_table(policy->cpu);
++ if (!tunables->hispeed_freq)
++ tunables->hispeed_freq = policy->max;
++
++ for_each_cpu(j, policy->cpus) {
++ pcpu = &per_cpu(cpuinfo, j);
++ pcpu->policy = policy;
++ pcpu->target_freq = policy->cur;
++ pcpu->freq_table = freq_table;
++ pcpu->floor_freq = pcpu->target_freq;
++ pcpu->floor_validate_time =
++ ktime_to_us(ktime_get());
++ pcpu->hispeed_validate_time =
++ pcpu->floor_validate_time;
++ down_write(&pcpu->enable_sem);
++ del_timer_sync(&pcpu->cpu_timer);
++ del_timer_sync(&pcpu->cpu_slack_timer);
++ cpufreq_interactive_timer_start(tunables, j);
++ pcpu->governor_enabled = 1;
++ up_write(&pcpu->enable_sem);
++ }
++
++ mutex_unlock(&gov_lock);
++ break;
++
++ case CPUFREQ_GOV_STOP:
++ mutex_lock(&gov_lock);
++ for_each_cpu(j, policy->cpus) {
++ pcpu = &per_cpu(cpuinfo, j);
++ down_write(&pcpu->enable_sem);
++ pcpu->governor_enabled = 0;
++ del_timer_sync(&pcpu->cpu_timer);
++ del_timer_sync(&pcpu->cpu_slack_timer);
++ up_write(&pcpu->enable_sem);
++ }
++
++ mutex_unlock(&gov_lock);
++ break;
++
++ case CPUFREQ_GOV_LIMITS:
++ if (policy->max < policy->cur)
++ __cpufreq_driver_target(policy,
++ policy->max, CPUFREQ_RELATION_H);
++ else if (policy->min > policy->cur)
++ __cpufreq_driver_target(policy,
++ policy->min, CPUFREQ_RELATION_L);
++ for_each_cpu(j, policy->cpus) {
++ pcpu = &per_cpu(cpuinfo, j);
++
++ /* hold write semaphore to avoid race */
++ down_write(&pcpu->enable_sem);
++ if (pcpu->governor_enabled == 0) {
++ up_write(&pcpu->enable_sem);
++ continue;
++ }
++
++ /* update target_freq firstly */
++ if (policy->max < pcpu->target_freq)
++ pcpu->target_freq = policy->max;
++ else if (policy->min > pcpu->target_freq)
++ pcpu->target_freq = policy->min;
++
++ /* Reschedule timer.
++ * Delete the timers, else the timer callback may
++ * return without re-arm the timer when failed
++ * acquire the semaphore. This race may cause timer
++ * stopped unexpectedly.
++ */
++ del_timer_sync(&pcpu->cpu_timer);
++ del_timer_sync(&pcpu->cpu_slack_timer);
++ cpufreq_interactive_timer_start(tunables, j);
++ up_write(&pcpu->enable_sem);
++ }
++ break;
++ }
++ return 0;
++}
++
++#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
++static
++#endif
++struct cpufreq_governor cpufreq_gov_interactive = {
++ .name = "interactive",
++ .governor = cpufreq_governor_interactive,
++ .max_transition_latency = 10000000,
++ .owner = THIS_MODULE,
++};
++
++static void cpufreq_interactive_nop_timer(unsigned long data)
++{
++}
++
++static int __init cpufreq_interactive_init(void)
++{
++ unsigned int i;
++ struct cpufreq_interactive_cpuinfo *pcpu;
++ struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
++
++ /* Initalize per-cpu timers */
++ for_each_possible_cpu(i) {
++ pcpu = &per_cpu(cpuinfo, i);
++ init_timer_deferrable(&pcpu->cpu_timer);
++ pcpu->cpu_timer.function = cpufreq_interactive_timer;
++ pcpu->cpu_timer.data = i;
++ init_timer(&pcpu->cpu_slack_timer);
++ pcpu->cpu_slack_timer.function = cpufreq_interactive_nop_timer;
++ spin_lock_init(&pcpu->load_lock);
++ init_rwsem(&pcpu->enable_sem);
++ }
++
++ spin_lock_init(&speedchange_cpumask_lock);
++ mutex_init(&gov_lock);
++ speedchange_task =
++ kthread_create(cpufreq_interactive_speedchange_task, NULL,
++ "cfinteractive");
++ if (IS_ERR(speedchange_task))
++ return PTR_ERR(speedchange_task);
++
++ sched_setscheduler_nocheck(speedchange_task, SCHED_FIFO, &param);
++ get_task_struct(speedchange_task);
++
++ /* NB: wake up so the thread does not look hung to the freezer */
++ wake_up_process(speedchange_task);
++
++ return cpufreq_register_governor(&cpufreq_gov_interactive);
++}
++
++#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
++fs_initcall(cpufreq_interactive_init);
++#else
++module_init(cpufreq_interactive_init);
++#endif
++
++static void __exit cpufreq_interactive_exit(void)
++{
++ cpufreq_unregister_governor(&cpufreq_gov_interactive);
++ kthread_stop(speedchange_task);
++ put_task_struct(speedchange_task);
++}
++
++module_exit(cpufreq_interactive_exit);
++
++MODULE_AUTHOR("Mike Chan <mike@android.com>");
++MODULE_DESCRIPTION("'cpufreq_interactive' - A cpufreq governor for "
++ "Latency sensitive workloads");
++MODULE_LICENSE("GPL");
+diff -Nur linux-3.14.36/drivers/cpufreq/highbank-cpufreq.c linux-openelec/drivers/cpufreq/highbank-cpufreq.c
+--- linux-3.14.36/drivers/cpufreq/highbank-cpufreq.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/cpufreq/highbank-cpufreq.c 2015-05-06 12:05:42.000000000 -0500
+@@ -19,7 +19,7 @@
+ #include <linux/cpu.h>
+ #include <linux/err.h>
+ #include <linux/of.h>
+-#include <linux/mailbox.h>
++#include <linux/pl320-ipc.h>
+ #include <linux/platform_device.h>
+
+ #define HB_CPUFREQ_CHANGE_NOTE 0x80000001
+diff -Nur linux-3.14.36/drivers/cpufreq/imx6-cpufreq.c linux-openelec/drivers/cpufreq/imx6-cpufreq.c
+--- linux-3.14.36/drivers/cpufreq/imx6-cpufreq.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/cpufreq/imx6-cpufreq.c 2015-07-24 18:03:30.408842002 -0500
+@@ -0,0 +1,400 @@
++/*
++ * Copyright (C) 2013 Freescale Semiconductor, Inc.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include <linux/busfreq-imx6.h>
++#include <linux/clk.h>
++#include <linux/cpu.h>
++#include <linux/cpufreq.h>
++#include <linux/delay.h>
++#include <linux/err.h>
++#include <linux/module.h>
++#include <linux/of.h>
++#include <linux/pm_opp.h>
++#include <linux/platform_device.h>
++#include <linux/regulator/consumer.h>
++#include <linux/suspend.h>
++
++#define PU_SOC_VOLTAGE_NORMAL 1250000
++#define PU_SOC_VOLTAGE_HIGH 1275000
++#define FREQ_1P2_GHZ 1200000000
++
++static struct regulator *arm_reg;
++static struct regulator *pu_reg;
++static struct regulator *soc_reg;
++
++static struct clk *arm_clk;
++static struct clk *pll1_sys_clk;
++static struct clk *pll1_sw_clk;
++static struct clk *step_clk;
++static struct clk *pll2_pfd2_396m_clk;
++
++static struct device *cpu_dev;
++static struct cpufreq_frequency_table *freq_table;
++static unsigned int transition_latency;
++static struct mutex set_cpufreq_lock;
++
++static u32 *imx6_soc_volt;
++static u32 soc_opp_count;
++
++static int imx6_set_target(struct cpufreq_policy *policy, unsigned int index)
++{
++ struct dev_pm_opp *opp;
++ unsigned long freq_hz, volt, volt_old;
++ unsigned int old_freq, new_freq;
++ int ret;
++
++ mutex_lock(&set_cpufreq_lock);
++
++ new_freq = freq_table[index].frequency;
++ freq_hz = new_freq * 1000;
++ old_freq = clk_get_rate(arm_clk) / 1000;
++
++ rcu_read_lock();
++ opp = dev_pm_opp_find_freq_ceil(cpu_dev, &freq_hz);
++ if (IS_ERR(opp)) {
++ rcu_read_unlock();
++ dev_err(cpu_dev, "failed to find OPP for %ld\n", freq_hz);
++ ret = PTR_ERR(opp);
++ goto unlock;
++ }
++
++ volt = dev_pm_opp_get_voltage(opp);
++ rcu_read_unlock();
++ volt_old = regulator_get_voltage(arm_reg);
++
++ dev_dbg(cpu_dev, "%u MHz, %ld mV --> %u MHz, %ld mV\n",
++ old_freq / 1000, volt_old / 1000,
++ new_freq / 1000, volt / 1000);
++
++ /*
++ * CPU freq is increasing, so need to ensure
++ * that bus frequency is increased too.
++ */
++ if (old_freq == freq_table[0].frequency)
++ request_bus_freq(BUS_FREQ_HIGH);
++
++ /* scaling up? scale voltage before frequency */
++ if (new_freq > old_freq) {
++ if (regulator_is_enabled(pu_reg)) {
++ ret = regulator_set_voltage_tol(pu_reg, imx6_soc_volt[index], 0);
++ if (ret) {
++ dev_err(cpu_dev, "failed to scale vddpu up: %d\n", ret);
++ goto unlock;
++ }
++ }
++ ret = regulator_set_voltage_tol(soc_reg, imx6_soc_volt[index], 0);
++ if (ret) {
++ dev_err(cpu_dev, "failed to scale vddsoc up: %d\n", ret);
++ goto unlock;
++ }
++ ret = regulator_set_voltage_tol(arm_reg, volt, 0);
++ if (ret) {
++ dev_err(cpu_dev,
++ "failed to scale vddarm up: %d\n", ret);
++ goto unlock;
++ }
++ }
++
++ /*
++ * The setpoints are selected per PLL/PDF frequencies, so we need to
++ * reprogram PLL for frequency scaling. The procedure of reprogramming
++ * PLL1 is as below.
++ *
++ * - Enable pll2_pfd2_396m_clk and reparent pll1_sw_clk to it
++ * - Reprogram pll1_sys_clk and reparent pll1_sw_clk back to it
++ * - Disable pll2_pfd2_396m_clk
++ */
++ clk_set_parent(step_clk, pll2_pfd2_396m_clk);
++ clk_set_parent(pll1_sw_clk, step_clk);
++ if (freq_hz > clk_get_rate(pll2_pfd2_396m_clk)) {
++ clk_set_rate(pll1_sys_clk, new_freq * 1000);
++ clk_set_parent(pll1_sw_clk, pll1_sys_clk);
++ }
++
++ /* Ensure the arm clock divider is what we expect */
++ ret = clk_set_rate(arm_clk, new_freq * 1000);
++ if (ret) {
++ dev_err(cpu_dev, "failed to set clock rate: %d\n", ret);
++ regulator_set_voltage_tol(arm_reg, volt_old, 0);
++ goto unlock;
++ }
++
++ /* scaling down? scale voltage after frequency */
++ if (new_freq < old_freq) {
++ ret = regulator_set_voltage_tol(arm_reg, volt, 0);
++ if (ret) {
++ dev_warn(cpu_dev,
++ "failed to scale vddarm down: %d\n", ret);
++ ret = 0;
++ }
++ ret = regulator_set_voltage_tol(soc_reg, imx6_soc_volt[index], 0);
++ if (ret) {
++ dev_warn(cpu_dev, "failed to scale vddsoc down: %d\n", ret);
++ ret = 0;
++ }
++ if (regulator_is_enabled(pu_reg)) {
++ ret = regulator_set_voltage_tol(pu_reg, imx6_soc_volt[index], 0);
++ if (ret) {
++ dev_warn(cpu_dev, "failed to scale vddpu down: %d\n", ret);
++ ret = 0;
++ }
++ }
++ }
++
++ if (policy->cur == freq_table[0].frequency)
++ release_bus_freq(BUS_FREQ_HIGH);
++
++unlock:
++ mutex_unlock(&set_cpufreq_lock);
++ return ret;
++}
++
++static int imx6_cpufreq_init(struct cpufreq_policy *policy)
++{
++ policy->clk = arm_clk;
++
++ if (policy->cur > freq_table[0].frequency)
++ request_bus_freq(BUS_FREQ_HIGH);
++
++ return cpufreq_generic_init(policy, freq_table, transition_latency);
++}
++
++static struct cpufreq_driver imx6_cpufreq_driver = {
++ .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK,
++ .verify = cpufreq_generic_frequency_table_verify,
++ .target_index = imx6_set_target,
++ .get = cpufreq_generic_get,
++ .init = imx6_cpufreq_init,
++ .exit = cpufreq_generic_exit,
++ .name = "imx6-cpufreq",
++ .attr = cpufreq_generic_attr,
++};
++
++static int imx6_cpufreq_pm_notify(struct notifier_block *nb,
++ unsigned long event, void *dummy)
++{
++ struct cpufreq_policy *data = cpufreq_cpu_get(0);
++ static u32 cpufreq_policy_min_pre_suspend;
++
++ /*
++ * During suspend/resume, When cpufreq driver try to increase
++ * voltage/freq, it needs to control I2C/SPI to communicate
++ * with external PMIC to adjust voltage, but these I2C/SPI
++ * devices may be already suspended, to avoid such scenario,
++ * we just increase cpufreq to highest setpoint before suspend.
++ */
++ switch (event) {
++ case PM_SUSPEND_PREPARE:
++ cpufreq_policy_min_pre_suspend = data->user_policy.min;
++ data->user_policy.min = data->user_policy.max;
++ break;
++ case PM_POST_SUSPEND:
++ data->user_policy.min = cpufreq_policy_min_pre_suspend;
++ break;
++ default:
++ break;
++ }
++
++ cpufreq_update_policy(0);
++
++ return NOTIFY_OK;
++}
++
++static struct notifier_block imx6_cpufreq_pm_notifier = {
++ .notifier_call = imx6_cpufreq_pm_notify,
++};
++
++static int imx6_cpufreq_probe(struct platform_device *pdev)
++{
++ struct device_node *np;
++ struct dev_pm_opp *opp;
++ unsigned long min_volt, max_volt;
++ int num, ret;
++ const struct property *prop;
++ const __be32 *val;
++ u32 nr, i, j;
++
++ cpu_dev = get_cpu_device(0);
++ if (!cpu_dev) {
++ pr_err("failed to get cpu0 device\n");
++ return -ENODEV;
++ }
++
++ np = of_node_get(cpu_dev->of_node);
++ if (!np) {
++ dev_err(cpu_dev, "failed to find cpu0 node\n");
++ return -ENOENT;
++ }
++
++ arm_clk = devm_clk_get(cpu_dev, "arm");
++ pll1_sys_clk = devm_clk_get(cpu_dev, "pll1_sys");
++ pll1_sw_clk = devm_clk_get(cpu_dev, "pll1_sw");
++ step_clk = devm_clk_get(cpu_dev, "step");
++ pll2_pfd2_396m_clk = devm_clk_get(cpu_dev, "pll2_pfd2_396m");
++ if (IS_ERR(arm_clk) || IS_ERR(pll1_sys_clk) || IS_ERR(pll1_sw_clk) ||
++ IS_ERR(step_clk) || IS_ERR(pll2_pfd2_396m_clk)) {
++ dev_err(cpu_dev, "failed to get clocks\n");
++ ret = -ENOENT;
++ goto put_node;
++ }
++
++ arm_reg = devm_regulator_get(cpu_dev, "arm");
++ pu_reg = devm_regulator_get(cpu_dev, "pu");
++ soc_reg = devm_regulator_get(cpu_dev, "soc");
++ if (IS_ERR(arm_reg) || IS_ERR(pu_reg) || IS_ERR(soc_reg)) {
++ dev_err(cpu_dev, "failed to get regulators\n");
++ ret = -ENOENT;
++ goto put_node;
++ }
++
++ /*
++ * We expect an OPP table supplied by platform.
++ * Just, incase the platform did not supply the OPP
++ * table, it will try to get it.
++ */
++ num = dev_pm_opp_get_opp_count(cpu_dev);
++ if (num < 0) {
++ ret = of_init_opp_table(cpu_dev);
++ if (ret < 0) {
++ dev_err(cpu_dev, "failed to init OPP table: %d\n", ret);
++ goto put_node;
++ }
++
++ num = dev_pm_opp_get_opp_count(cpu_dev);
++ if (num < 0) {
++ ret = num;
++ dev_err(cpu_dev, "no OPP table is found: %d\n", ret);
++ goto put_node;
++ }
++ }
++
++ ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
++ if (ret) {
++ dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret);
++ goto put_node;
++ }
++
++ /* Make imx6_soc_volt array's size same as arm opp number */
++ imx6_soc_volt = devm_kzalloc(cpu_dev, sizeof(*imx6_soc_volt) * num, GFP_KERNEL);
++ if (imx6_soc_volt == NULL) {
++ ret = -ENOMEM;
++ goto free_freq_table;
++ }
++
++ prop = of_find_property(np, "fsl,soc-operating-points", NULL);
++ if (!prop || !prop->value)
++ goto soc_opp_out;
++
++ /*
++ * Each OPP is a set of tuples consisting of frequency and
++ * voltage like <freq-kHz vol-uV>.
++ */
++ nr = prop->length / sizeof(u32);
++ if (nr % 2 || (nr / 2) < num)
++ goto soc_opp_out;
++
++ for (j = 0; j < num; j++) {
++ val = prop->value;
++ for (i = 0; i < nr / 2; i++) {
++ unsigned long freq = be32_to_cpup(val++);
++ unsigned long volt = be32_to_cpup(val++);
++ if (freq_table[j].frequency == freq) {
++ imx6_soc_volt[soc_opp_count++] = volt;
++#ifdef CONFIG_MX6_VPU_352M
++ if (freq == 792000) {
++ pr_info("increase SOC/PU voltage for VPU352MHz\n");
++ imx6_soc_volt[soc_opp_count-1] = 1250000;
++ }
++#endif
++
++ break;
++ }
++ }
++ }
++
++soc_opp_out:
++ /* use fixed soc opp volt if no valid soc opp info found in dtb */
++ if (soc_opp_count != num) {
++ dev_warn(cpu_dev, "can NOT find valid fsl,soc-operating-points property in dtb, use default value!\n");
++ for (j = 0; j < num; j++)
++ imx6_soc_volt[j] = PU_SOC_VOLTAGE_NORMAL;
++ if (freq_table[num - 1].frequency * 1000 == FREQ_1P2_GHZ)
++ imx6_soc_volt[num - 1] = PU_SOC_VOLTAGE_HIGH;
++ }
++
++ if (of_property_read_u32(np, "clock-latency", &transition_latency))
++ transition_latency = CPUFREQ_ETERNAL;
++
++ /*
++ * Calculate the ramp time for max voltage change in the
++ * VDDSOC and VDDPU regulators.
++ */
++ ret = regulator_set_voltage_time(soc_reg, imx6_soc_volt[0], imx6_soc_volt[num - 1]);
++ if (ret > 0)
++ transition_latency += ret * 1000;
++ ret = regulator_set_voltage_time(pu_reg, imx6_soc_volt[0], imx6_soc_volt[num - 1]);
++ if (ret > 0)
++ transition_latency += ret * 1000;
++
++ /*
++ * OPP is maintained in order of increasing frequency, and
++ * freq_table initialised from OPP is therefore sorted in the
++ * same order.
++ */
++ rcu_read_lock();
++ opp = dev_pm_opp_find_freq_exact(cpu_dev,
++ freq_table[0].frequency * 1000, true);
++ min_volt = dev_pm_opp_get_voltage(opp);
++ opp = dev_pm_opp_find_freq_exact(cpu_dev,
++ freq_table[--num].frequency * 1000, true);
++ max_volt = dev_pm_opp_get_voltage(opp);
++ rcu_read_unlock();
++ ret = regulator_set_voltage_time(arm_reg, min_volt, max_volt);
++ if (ret > 0)
++ transition_latency += ret * 1000;
++
++ mutex_init(&set_cpufreq_lock);
++ register_pm_notifier(&imx6_cpufreq_pm_notifier);
++
++ ret = cpufreq_register_driver(&imx6_cpufreq_driver);
++ if (ret) {
++ dev_err(cpu_dev, "failed register driver: %d\n", ret);
++ goto free_freq_table;
++ }
++
++ of_node_put(np);
++ return 0;
++
++free_freq_table:
++ dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
++put_node:
++ of_node_put(np);
++ return ret;
++}
++
++static int imx6_cpufreq_remove(struct platform_device *pdev)
++{
++ cpufreq_unregister_driver(&imx6_cpufreq_driver);
++ dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
++
++ return 0;
++}
++
++static struct platform_driver imx6_cpufreq_platdrv = {
++ .driver = {
++ .name = "imx6-cpufreq",
++ .owner = THIS_MODULE,
++ },
++ .probe = imx6_cpufreq_probe,
++ .remove = imx6_cpufreq_remove,
++};
++module_platform_driver(imx6_cpufreq_platdrv);
++
++MODULE_AUTHOR("Shawn Guo <shawn.guo@linaro.org>");
++MODULE_DESCRIPTION("Freescale i.MX6Q cpufreq driver");
++MODULE_LICENSE("GPL");
+diff -Nur linux-3.14.36/drivers/cpufreq/imx6q-cpufreq.c linux-openelec/drivers/cpufreq/imx6q-cpufreq.c
+--- linux-3.14.36/drivers/cpufreq/imx6q-cpufreq.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/cpufreq/imx6q-cpufreq.c 1969-12-31 18:00:00.000000000 -0600
+@@ -1,330 +0,0 @@
+-/*
+- * Copyright (C) 2013 Freescale Semiconductor, Inc.
+- *
+- * This program is free software; you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License version 2 as
+- * published by the Free Software Foundation.
+- */
+-
+-#include <linux/clk.h>
+-#include <linux/cpu.h>
+-#include <linux/cpufreq.h>
+-#include <linux/delay.h>
+-#include <linux/err.h>
+-#include <linux/module.h>
+-#include <linux/of.h>
+-#include <linux/pm_opp.h>
+-#include <linux/platform_device.h>
+-#include <linux/regulator/consumer.h>
+-
+-#define PU_SOC_VOLTAGE_NORMAL 1250000
+-#define PU_SOC_VOLTAGE_HIGH 1275000
+-#define FREQ_1P2_GHZ 1200000000
+-
+-static struct regulator *arm_reg;
+-static struct regulator *pu_reg;
+-static struct regulator *soc_reg;
+-
+-static struct clk *arm_clk;
+-static struct clk *pll1_sys_clk;
+-static struct clk *pll1_sw_clk;
+-static struct clk *step_clk;
+-static struct clk *pll2_pfd2_396m_clk;
+-
+-static struct device *cpu_dev;
+-static struct cpufreq_frequency_table *freq_table;
+-static unsigned int transition_latency;
+-
+-static u32 *imx6_soc_volt;
+-static u32 soc_opp_count;
+-
+-static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index)
+-{
+- struct dev_pm_opp *opp;
+- unsigned long freq_hz, volt, volt_old;
+- unsigned int old_freq, new_freq;
+- int ret;
+-
+- new_freq = freq_table[index].frequency;
+- freq_hz = new_freq * 1000;
+- old_freq = clk_get_rate(arm_clk) / 1000;
+-
+- rcu_read_lock();
+- opp = dev_pm_opp_find_freq_ceil(cpu_dev, &freq_hz);
+- if (IS_ERR(opp)) {
+- rcu_read_unlock();
+- dev_err(cpu_dev, "failed to find OPP for %ld\n", freq_hz);
+- return PTR_ERR(opp);
+- }
+-
+- volt = dev_pm_opp_get_voltage(opp);
+- rcu_read_unlock();
+- volt_old = regulator_get_voltage(arm_reg);
+-
+- dev_dbg(cpu_dev, "%u MHz, %ld mV --> %u MHz, %ld mV\n",
+- old_freq / 1000, volt_old / 1000,
+- new_freq / 1000, volt / 1000);
+-
+- /* scaling up? scale voltage before frequency */
+- if (new_freq > old_freq) {
+- ret = regulator_set_voltage_tol(pu_reg, imx6_soc_volt[index], 0);
+- if (ret) {
+- dev_err(cpu_dev, "failed to scale vddpu up: %d\n", ret);
+- return ret;
+- }
+- ret = regulator_set_voltage_tol(soc_reg, imx6_soc_volt[index], 0);
+- if (ret) {
+- dev_err(cpu_dev, "failed to scale vddsoc up: %d\n", ret);
+- return ret;
+- }
+- ret = regulator_set_voltage_tol(arm_reg, volt, 0);
+- if (ret) {
+- dev_err(cpu_dev,
+- "failed to scale vddarm up: %d\n", ret);
+- return ret;
+- }
+- }
+-
+- /*
+- * The setpoints are selected per PLL/PDF frequencies, so we need to
+- * reprogram PLL for frequency scaling. The procedure of reprogramming
+- * PLL1 is as below.
+- *
+- * - Enable pll2_pfd2_396m_clk and reparent pll1_sw_clk to it
+- * - Reprogram pll1_sys_clk and reparent pll1_sw_clk back to it
+- * - Disable pll2_pfd2_396m_clk
+- */
+- clk_set_parent(step_clk, pll2_pfd2_396m_clk);
+- clk_set_parent(pll1_sw_clk, step_clk);
+- if (freq_hz > clk_get_rate(pll2_pfd2_396m_clk)) {
+- clk_set_rate(pll1_sys_clk, new_freq * 1000);
+- clk_set_parent(pll1_sw_clk, pll1_sys_clk);
+- }
+-
+- /* Ensure the arm clock divider is what we expect */
+- ret = clk_set_rate(arm_clk, new_freq * 1000);
+- if (ret) {
+- dev_err(cpu_dev, "failed to set clock rate: %d\n", ret);
+- regulator_set_voltage_tol(arm_reg, volt_old, 0);
+- return ret;
+- }
+-
+- /* scaling down? scale voltage after frequency */
+- if (new_freq < old_freq) {
+- ret = regulator_set_voltage_tol(arm_reg, volt, 0);
+- if (ret) {
+- dev_warn(cpu_dev,
+- "failed to scale vddarm down: %d\n", ret);
+- ret = 0;
+- }
+- ret = regulator_set_voltage_tol(soc_reg, imx6_soc_volt[index], 0);
+- if (ret) {
+- dev_warn(cpu_dev, "failed to scale vddsoc down: %d\n", ret);
+- ret = 0;
+- }
+- ret = regulator_set_voltage_tol(pu_reg, imx6_soc_volt[index], 0);
+- if (ret) {
+- dev_warn(cpu_dev, "failed to scale vddpu down: %d\n", ret);
+- ret = 0;
+- }
+- }
+-
+- return 0;
+-}
+-
+-static int imx6q_cpufreq_init(struct cpufreq_policy *policy)
+-{
+- policy->clk = arm_clk;
+- return cpufreq_generic_init(policy, freq_table, transition_latency);
+-}
+-
+-static struct cpufreq_driver imx6q_cpufreq_driver = {
+- .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK,
+- .verify = cpufreq_generic_frequency_table_verify,
+- .target_index = imx6q_set_target,
+- .get = cpufreq_generic_get,
+- .init = imx6q_cpufreq_init,
+- .exit = cpufreq_generic_exit,
+- .name = "imx6q-cpufreq",
+- .attr = cpufreq_generic_attr,
+-};
+-
+-static int imx6q_cpufreq_probe(struct platform_device *pdev)
+-{
+- struct device_node *np;
+- struct dev_pm_opp *opp;
+- unsigned long min_volt, max_volt;
+- int num, ret;
+- const struct property *prop;
+- const __be32 *val;
+- u32 nr, i, j;
+-
+- cpu_dev = get_cpu_device(0);
+- if (!cpu_dev) {
+- pr_err("failed to get cpu0 device\n");
+- return -ENODEV;
+- }
+-
+- np = of_node_get(cpu_dev->of_node);
+- if (!np) {
+- dev_err(cpu_dev, "failed to find cpu0 node\n");
+- return -ENOENT;
+- }
+-
+- arm_clk = devm_clk_get(cpu_dev, "arm");
+- pll1_sys_clk = devm_clk_get(cpu_dev, "pll1_sys");
+- pll1_sw_clk = devm_clk_get(cpu_dev, "pll1_sw");
+- step_clk = devm_clk_get(cpu_dev, "step");
+- pll2_pfd2_396m_clk = devm_clk_get(cpu_dev, "pll2_pfd2_396m");
+- if (IS_ERR(arm_clk) || IS_ERR(pll1_sys_clk) || IS_ERR(pll1_sw_clk) ||
+- IS_ERR(step_clk) || IS_ERR(pll2_pfd2_396m_clk)) {
+- dev_err(cpu_dev, "failed to get clocks\n");
+- ret = -ENOENT;
+- goto put_node;
+- }
+-
+- arm_reg = devm_regulator_get(cpu_dev, "arm");
+- pu_reg = devm_regulator_get(cpu_dev, "pu");
+- soc_reg = devm_regulator_get(cpu_dev, "soc");
+- if (IS_ERR(arm_reg) || IS_ERR(pu_reg) || IS_ERR(soc_reg)) {
+- dev_err(cpu_dev, "failed to get regulators\n");
+- ret = -ENOENT;
+- goto put_node;
+- }
+-
+- /*
+- * We expect an OPP table supplied by platform.
+- * Just, incase the platform did not supply the OPP
+- * table, it will try to get it.
+- */
+- num = dev_pm_opp_get_opp_count(cpu_dev);
+- if (num < 0) {
+- ret = of_init_opp_table(cpu_dev);
+- if (ret < 0) {
+- dev_err(cpu_dev, "failed to init OPP table: %d\n", ret);
+- goto put_node;
+- }
+-
+- num = dev_pm_opp_get_opp_count(cpu_dev);
+- if (num < 0) {
+- ret = num;
+- dev_err(cpu_dev, "no OPP table is found: %d\n", ret);
+- goto put_node;
+- }
+- }
+-
+- ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
+- if (ret) {
+- dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret);
+- goto put_node;
+- }
+-
+- /* Make imx6_soc_volt array's size same as arm opp number */
+- imx6_soc_volt = devm_kzalloc(cpu_dev, sizeof(*imx6_soc_volt) * num, GFP_KERNEL);
+- if (imx6_soc_volt == NULL) {
+- ret = -ENOMEM;
+- goto free_freq_table;
+- }
+-
+- prop = of_find_property(np, "fsl,soc-operating-points", NULL);
+- if (!prop || !prop->value)
+- goto soc_opp_out;
+-
+- /*
+- * Each OPP is a set of tuples consisting of frequency and
+- * voltage like <freq-kHz vol-uV>.
+- */
+- nr = prop->length / sizeof(u32);
+- if (nr % 2 || (nr / 2) < num)
+- goto soc_opp_out;
+-
+- for (j = 0; j < num; j++) {
+- val = prop->value;
+- for (i = 0; i < nr / 2; i++) {
+- unsigned long freq = be32_to_cpup(val++);
+- unsigned long volt = be32_to_cpup(val++);
+- if (freq_table[j].frequency == freq) {
+- imx6_soc_volt[soc_opp_count++] = volt;
+- break;
+- }
+- }
+- }
+-
+-soc_opp_out:
+- /* use fixed soc opp volt if no valid soc opp info found in dtb */
+- if (soc_opp_count != num) {
+- dev_warn(cpu_dev, "can NOT find valid fsl,soc-operating-points property in dtb, use default value!\n");
+- for (j = 0; j < num; j++)
+- imx6_soc_volt[j] = PU_SOC_VOLTAGE_NORMAL;
+- if (freq_table[num - 1].frequency * 1000 == FREQ_1P2_GHZ)
+- imx6_soc_volt[num - 1] = PU_SOC_VOLTAGE_HIGH;
+- }
+-
+- if (of_property_read_u32(np, "clock-latency", &transition_latency))
+- transition_latency = CPUFREQ_ETERNAL;
+-
+- /*
+- * Calculate the ramp time for max voltage change in the
+- * VDDSOC and VDDPU regulators.
+- */
+- ret = regulator_set_voltage_time(soc_reg, imx6_soc_volt[0], imx6_soc_volt[num - 1]);
+- if (ret > 0)
+- transition_latency += ret * 1000;
+- ret = regulator_set_voltage_time(pu_reg, imx6_soc_volt[0], imx6_soc_volt[num - 1]);
+- if (ret > 0)
+- transition_latency += ret * 1000;
+-
+- /*
+- * OPP is maintained in order of increasing frequency, and
+- * freq_table initialised from OPP is therefore sorted in the
+- * same order.
+- */
+- rcu_read_lock();
+- opp = dev_pm_opp_find_freq_exact(cpu_dev,
+- freq_table[0].frequency * 1000, true);
+- min_volt = dev_pm_opp_get_voltage(opp);
+- opp = dev_pm_opp_find_freq_exact(cpu_dev,
+- freq_table[--num].frequency * 1000, true);
+- max_volt = dev_pm_opp_get_voltage(opp);
+- rcu_read_unlock();
+- ret = regulator_set_voltage_time(arm_reg, min_volt, max_volt);
+- if (ret > 0)
+- transition_latency += ret * 1000;
+-
+- ret = cpufreq_register_driver(&imx6q_cpufreq_driver);
+- if (ret) {
+- dev_err(cpu_dev, "failed register driver: %d\n", ret);
+- goto free_freq_table;
+- }
+-
+- of_node_put(np);
+- return 0;
+-
+-free_freq_table:
+- dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
+-put_node:
+- of_node_put(np);
+- return ret;
+-}
+-
+-static int imx6q_cpufreq_remove(struct platform_device *pdev)
+-{
+- cpufreq_unregister_driver(&imx6q_cpufreq_driver);
+- dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
+-
+- return 0;
+-}
+-
+-static struct platform_driver imx6q_cpufreq_platdrv = {
+- .driver = {
+- .name = "imx6q-cpufreq",
+- .owner = THIS_MODULE,
+- },
+- .probe = imx6q_cpufreq_probe,
+- .remove = imx6q_cpufreq_remove,
+-};
+-module_platform_driver(imx6q_cpufreq_platdrv);
+-
+-MODULE_AUTHOR("Shawn Guo <shawn.guo@linaro.org>");
+-MODULE_DESCRIPTION("Freescale i.MX6Q cpufreq driver");
+-MODULE_LICENSE("GPL");
+diff -Nur linux-3.14.36/drivers/cpufreq/Kconfig linux-openelec/drivers/cpufreq/Kconfig
+--- linux-3.14.36/drivers/cpufreq/Kconfig 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/cpufreq/Kconfig 2015-05-06 12:05:42.000000000 -0500
+@@ -91,6 +91,15 @@
+ governor. If unsure have a look at the help section of the
+ driver. Fallback governor will be the performance governor.
+
++config CPU_FREQ_DEFAULT_GOV_INTERACTIVE
++ bool "interactive"
++ select CPU_FREQ_GOV_INTERACTIVE
++ help
++ Use the CPUFreq governor 'interactive' as default. This allows
++ you to get a full dynamic cpu frequency capable system by simply
++ loading your cpufreq low-level hardware driver, using the
++ 'interactive' governor for latency-sensitive workloads.
++
+ config CPU_FREQ_DEFAULT_GOV_CONSERVATIVE
+ bool "conservative"
+ select CPU_FREQ_GOV_CONSERVATIVE
+@@ -157,6 +166,24 @@
+
+ For details, take a look at linux/Documentation/cpu-freq.
+
++ If in doubt, say N.
++
++config CPU_FREQ_GOV_INTERACTIVE
++ tristate "'interactive' cpufreq policy governor"
++ default n
++ help
++ 'interactive' - This driver adds a dynamic cpufreq policy governor
++ designed for latency-sensitive workloads.
++
++ This governor attempts to reduce the latency of clock
++ increases so that the system is more responsive to
++ interactive workloads.
++
++ To compile this driver as a module, choose M here: the
++ module will be called cpufreq_interactive.
++
++ For details, take a look at linux/Documentation/cpu-freq.
++
+ If in doubt, say N.
+
+ config CPU_FREQ_GOV_CONSERVATIVE
+diff -Nur linux-3.14.36/drivers/cpufreq/Kconfig.arm linux-openelec/drivers/cpufreq/Kconfig.arm
+--- linux-3.14.36/drivers/cpufreq/Kconfig.arm 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/cpufreq/Kconfig.arm 2015-05-06 12:05:42.000000000 -0500
+@@ -4,7 +4,8 @@
+
+ config ARM_BIG_LITTLE_CPUFREQ
+ tristate "Generic ARM big LITTLE CPUfreq driver"
+- depends on ARM && BIG_LITTLE && ARM_CPU_TOPOLOGY && HAVE_CLK
++ depends on (BIG_LITTLE && ARM_CPU_TOPOLOGY) || (ARM64 && SMP)
++ depends on HAVE_CLK
+ select PM_OPP
+ help
+ This enables the Generic CPUfreq driver for ARM big.LITTLE platforms.
+@@ -95,7 +96,7 @@
+
+ If in doubt, say N.
+
+-config ARM_IMX6Q_CPUFREQ
++config ARM_IMX6_CPUFREQ
+ tristate "Freescale i.MX6 cpufreq support"
+ depends on ARCH_MXC
+ depends on REGULATOR_ANATOP
+diff -Nur linux-3.14.36/drivers/cpufreq/Makefile linux-openelec/drivers/cpufreq/Makefile
+--- linux-3.14.36/drivers/cpufreq/Makefile 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/cpufreq/Makefile 2015-05-06 12:05:42.000000000 -0500
+@@ -8,6 +8,7 @@
+ obj-$(CONFIG_CPU_FREQ_GOV_POWERSAVE) += cpufreq_powersave.o
+ obj-$(CONFIG_CPU_FREQ_GOV_USERSPACE) += cpufreq_userspace.o
+ obj-$(CONFIG_CPU_FREQ_GOV_ONDEMAND) += cpufreq_ondemand.o
++obj-$(CONFIG_CPU_FREQ_GOV_INTERACTIVE) += cpufreq_interactive.o
+ obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE) += cpufreq_conservative.o
+ obj-$(CONFIG_CPU_FREQ_GOV_COMMON) += cpufreq_governor.o
+
+@@ -55,7 +56,7 @@
+ obj-$(CONFIG_ARM_EXYNOS5250_CPUFREQ) += exynos5250-cpufreq.o
+ obj-$(CONFIG_ARM_EXYNOS5440_CPUFREQ) += exynos5440-cpufreq.o
+ obj-$(CONFIG_ARM_HIGHBANK_CPUFREQ) += highbank-cpufreq.o
+-obj-$(CONFIG_ARM_IMX6Q_CPUFREQ) += imx6q-cpufreq.o
++obj-$(CONFIG_ARM_IMX6_CPUFREQ) += imx6-cpufreq.o
+ obj-$(CONFIG_ARM_INTEGRATOR) += integrator-cpufreq.o
+ obj-$(CONFIG_ARM_KIRKWOOD_CPUFREQ) += kirkwood-cpufreq.o
+ obj-$(CONFIG_ARM_OMAP2PLUS_CPUFREQ) += omap-cpufreq.o
+diff -Nur linux-3.14.36/drivers/crypto/caam/secvio.c linux-openelec/drivers/crypto/caam/secvio.c
+--- linux-3.14.36/drivers/crypto/caam/secvio.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/crypto/caam/secvio.c 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,335 @@
++
++/*
++ * CAAM/SEC 4.x Security Violation Handler
++ * Copyright (C) 2013 Freescale Semiconductor, Inc., All Rights Reserved
++ */
++
++#include "compat.h"
++#include "intern.h"
++#include "secvio.h"
++#include "regs.h"
++
++/*
++ * These names are associated with each violation handler.
++ * The source names were taken from MX6, and are based on recommendations
++ * for most common SoCs.
++ */
++static const u8 *violation_src_name[] = {
++ "CAAM Security Violation",
++ "JTAG Alarm",
++ "Watchdog",
++ "(reserved)",
++ "External Boot",
++ "Tamper Detect",
++};
++
++/* Top-level security violation interrupt */
++static irqreturn_t caam_secvio_interrupt(int irq, void *snvsdev)
++{
++ struct device *dev = snvsdev;
++ struct caam_drv_private_secvio *svpriv = dev_get_drvdata(dev);
++ u32 irqstate;
++
++ /* Check the HP secvio status register */
++ irqstate = rd_reg32(&svpriv->svregs->hp.secvio_status) |
++ HP_SECVIOST_SECVIOMASK;
++
++ if (!irqstate)
++ return IRQ_NONE;
++
++ /* Mask out one or more causes for deferred service */
++ clrbits32(&svpriv->svregs->hp.secvio_int_ctl, irqstate);
++
++ /* Now ACK causes */
++ setbits32(&svpriv->svregs->hp.secvio_status, irqstate);
++
++ /* And run deferred service */
++ preempt_disable();
++ tasklet_schedule(&svpriv->irqtask[smp_processor_id()]);
++ preempt_enable();
++
++ return IRQ_HANDLED;
++}
++
++/* Deferred service handler. Tasklet arg is simply the SNVS dev */
++static void caam_secvio_dispatch(unsigned long indev)
++{
++ struct device *dev = (struct device *)indev;
++ struct caam_drv_private_secvio *svpriv = dev_get_drvdata(dev);
++ unsigned long flags, cause;
++ int i;
++
++
++ /*
++ * Capture the interrupt cause, using masked interrupts as
++ * identification. This only works if all are enabled; if
++ * this changes in the future, a "cause queue" will have to
++ * be built
++ */
++ cause = rd_reg32(&svpriv->svregs->hp.secvio_int_ctl) &
++ (HP_SECVIO_INTEN_SRC5 | HP_SECVIO_INTEN_SRC4 |
++ HP_SECVIO_INTEN_SRC3 | HP_SECVIO_INTEN_SRC2 |
++ HP_SECVIO_INTEN_SRC1 | HP_SECVIO_INTEN_SRC0);
++
++ /* Look through causes, call each handler if exists */
++ for (i = 0; i < MAX_SECVIO_SOURCES; i++)
++ if (cause & (1 << i)) {
++ spin_lock_irqsave(&svpriv->svlock, flags);
++ svpriv->intsrc[i].handler(dev, i,
++ svpriv->intsrc[i].ext);
++ spin_unlock_irqrestore(&svpriv->svlock, flags);
++ };
++
++ /* Re-enable now-serviced interrupts */
++ setbits32(&svpriv->svregs->hp.secvio_int_ctl, cause);
++}
++
++/*
++ * Default cause handler, used in lieu of an application-defined handler.
++ * All it does at this time is print a console message. It could force a halt.
++ */
++static void caam_secvio_default(struct device *dev, u32 cause, void *ext)
++{
++ struct caam_drv_private_secvio *svpriv = dev_get_drvdata(dev);
++
++ dev_err(dev, "Unhandled Security Violation Interrupt %d = %s\n",
++ cause, svpriv->intsrc[cause].intname);
++}
++
++/*
++ * Install an application-defined handler for a specified cause
++ * Arguments:
++ * - dev points to SNVS-owning device
++ * - cause interrupt source cause
++ * - handler application-defined handler, gets called with dev
++ * source cause, and locally-defined handler argument
++ * - cause_description points to a string to override the default cause
++ * name, this can be used as an alternate for error
++ * messages and such. If left NULL, the default
++ * description string is used.
++ * - ext pointer to any extra data needed by the handler.
++ */
++int caam_secvio_install_handler(struct device *dev, enum secvio_cause cause,
++ void (*handler)(struct device *dev, u32 cause,
++ void *ext),
++ u8 *cause_description, void *ext)
++{
++ unsigned long flags;
++ struct caam_drv_private_secvio *svpriv;
++
++ svpriv = dev_get_drvdata(dev);
++
++ if ((handler == NULL) || (cause > SECVIO_CAUSE_SOURCE_5))
++ return -EINVAL;
++
++ spin_lock_irqsave(&svpriv->svlock, flags);
++ svpriv->intsrc[cause].handler = handler;
++ if (cause_description != NULL)
++ svpriv->intsrc[cause].intname = cause_description;
++ if (ext != NULL)
++ svpriv->intsrc[cause].ext = ext;
++ spin_unlock_irqrestore(&svpriv->svlock, flags);
++
++ return 0;
++}
++EXPORT_SYMBOL(caam_secvio_install_handler);
++
++/*
++ * Remove an application-defined handler for a specified cause (and, by
++ * implication, restore the "default".
++ * Arguments:
++ * - dev points to SNVS-owning device
++ * - cause interrupt source cause
++ */
++int caam_secvio_remove_handler(struct device *dev, enum secvio_cause cause)
++{
++ unsigned long flags;
++ struct caam_drv_private_secvio *svpriv;
++
++ svpriv = dev_get_drvdata(dev);
++
++ if (cause > SECVIO_CAUSE_SOURCE_5)
++ return -EINVAL;
++
++ spin_lock_irqsave(&svpriv->svlock, flags);
++ svpriv->intsrc[cause].intname = violation_src_name[cause];
++ svpriv->intsrc[cause].handler = caam_secvio_default;
++ svpriv->intsrc[cause].ext = NULL;
++ spin_unlock_irqrestore(&svpriv->svlock, flags);
++ return 0;
++}
++EXPORT_SYMBOL(caam_secvio_remove_handler);
++
++int caam_secvio_startup(struct platform_device *pdev)
++{
++ struct device *ctrldev, *svdev;
++ struct caam_drv_private *ctrlpriv;
++ struct caam_drv_private_secvio *svpriv;
++ struct platform_device *svpdev;
++ struct device_node *np;
++ const void *prop;
++ int i, error, secvio_inten_src;
++
++ ctrldev = &pdev->dev;
++ ctrlpriv = dev_get_drvdata(ctrldev);
++ /*
++ * Set up the private block for secure memory
++ * Only one instance is possible
++ */
++ svpriv = kzalloc(sizeof(struct caam_drv_private_secvio), GFP_KERNEL);
++ if (svpriv == NULL) {
++ dev_err(ctrldev, "can't alloc private mem for secvio\n");
++ return -ENOMEM;
++ }
++ svpriv->parentdev = ctrldev;
++
++ /* Create the security violation dev */
++#ifdef CONFIG_OF
++
++ np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-caam-secvio");
++ if (!np)
++ return -ENODEV;
++
++ ctrlpriv->secvio_irq = of_irq_to_resource(np, 0, NULL);
++
++ prop = of_get_property(np, "secvio_src", NULL);
++ if (prop)
++ secvio_inten_src = of_read_ulong(prop, 1);
++ else
++ secvio_inten_src = HP_SECVIO_INTEN_ALL;
++
++ printk(KERN_ERR "secvio_inten_src = %x\n", secvio_inten_src);
++
++ svpdev = of_platform_device_create(np, NULL, ctrldev);
++ if (!svpdev)
++ return -ENODEV;
++
++#else
++ svpdev = platform_device_register_data(ctrldev, "caam_secvio", 0,
++ svpriv,
++ sizeof(struct caam_drv_private_secvio));
++
++ secvio_inten_src = HP_SECVIO_INTEN_ALL;
++#endif
++ if (svpdev == NULL) {
++ kfree(svpriv);
++ return -EINVAL;
++ }
++ svdev = &svpdev->dev;
++ dev_set_drvdata(svdev, svpriv);
++ ctrlpriv->secviodev = svdev;
++ svpriv->svregs = ctrlpriv->snvs;
++
++ /*
++ * Now we have all the dev data set up. Init interrupt
++ * source descriptions
++ */
++ for (i = 0; i < MAX_SECVIO_SOURCES; i++) {
++ svpriv->intsrc[i].intname = violation_src_name[i];
++ svpriv->intsrc[i].handler = caam_secvio_default;
++ }
++
++ /* Connect main handler */
++ for_each_possible_cpu(i)
++ tasklet_init(&svpriv->irqtask[i], caam_secvio_dispatch,
++ (unsigned long)svdev);
++
++ error = request_irq(ctrlpriv->secvio_irq, caam_secvio_interrupt,
++ IRQF_SHARED, "caam_secvio", svdev);
++ if (error) {
++ dev_err(svdev, "can't connect secvio interrupt\n");
++ irq_dispose_mapping(ctrlpriv->secvio_irq);
++ ctrlpriv->secvio_irq = 0;
++ return -EINVAL;
++ }
++
++ /* Enable all sources */
++ wr_reg32(&svpriv->svregs->hp.secvio_int_ctl, secvio_inten_src);
++
++ dev_info(svdev, "security violation service handlers armed\n");
++
++ return 0;
++}
++
++void caam_secvio_shutdown(struct platform_device *pdev)
++{
++ struct device *ctrldev, *svdev;
++ struct caam_drv_private *priv;
++ struct caam_drv_private_secvio *svpriv;
++ int i;
++
++ ctrldev = &pdev->dev;
++ priv = dev_get_drvdata(ctrldev);
++ svdev = priv->secviodev;
++ svpriv = dev_get_drvdata(svdev);
++
++ /* Shut off all sources */
++
++ wr_reg32(&svpriv->svregs->hp.secvio_int_ctl, 0);
++
++ /* Remove tasklets and release interrupt */
++ for_each_possible_cpu(i)
++ tasklet_kill(&svpriv->irqtask[i]);
++
++ free_irq(priv->secvio_irq, svdev);
++
++ kfree(svpriv);
++}
++
++
++#ifdef CONFIG_OF
++static void __exit caam_secvio_exit(void)
++{
++ struct device_node *dev_node;
++ struct platform_device *pdev;
++
++ dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
++ if (!dev_node) {
++ dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
++ if (!dev_node)
++ return;
++ }
++
++ pdev = of_find_device_by_node(dev_node);
++ if (!pdev)
++ return;
++
++ of_node_get(dev_node);
++
++ caam_secvio_shutdown(pdev);
++
++}
++
++static int __init caam_secvio_init(void)
++{
++ struct device_node *dev_node;
++ struct platform_device *pdev;
++
++ /*
++ * Do of_find_compatible_node() then of_find_device_by_node()
++ * once a functional device tree is available
++ */
++ dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
++ if (!dev_node) {
++ dev_node = of_find_compatible_node(NULL, NULL,
++ "arm,imx6-caam-secvio");
++ if (!dev_node)
++ return -ENODEV;
++ }
++
++ pdev = of_find_device_by_node(dev_node);
++ if (!pdev)
++ return -ENODEV;
++
++ of_node_put(dev_node);
++
++ return caam_secvio_startup(pdev);
++}
++
++module_init(caam_secvio_init);
++module_exit(caam_secvio_exit);
++
++MODULE_LICENSE("Dual BSD/GPL");
++MODULE_DESCRIPTION("FSL CAAM/SNVS Security Violation Handler");
++MODULE_AUTHOR("Freescale Semiconductor - NMSG/MAD");
++#endif
+diff -Nur linux-3.14.36/drivers/crypto/caam/secvio.h linux-openelec/drivers/crypto/caam/secvio.h
+--- linux-3.14.36/drivers/crypto/caam/secvio.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/crypto/caam/secvio.h 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,64 @@
++
++/*
++ * CAAM Security Violation Handler
++ * Copyright (C) 2013 Freescale Semiconductor, Inc., All Rights Reserved
++ */
++
++#ifndef SECVIO_H
++#define SECVIO_H
++
++#include "snvsregs.h"
++
++
++/*
++ * Defines the published interfaces to install/remove application-specified
++ * handlers for catching violations
++ */
++
++#define MAX_SECVIO_SOURCES 6
++
++/* these are the untranslated causes */
++enum secvio_cause {
++ SECVIO_CAUSE_SOURCE_0,
++ SECVIO_CAUSE_SOURCE_1,
++ SECVIO_CAUSE_SOURCE_2,
++ SECVIO_CAUSE_SOURCE_3,
++ SECVIO_CAUSE_SOURCE_4,
++ SECVIO_CAUSE_SOURCE_5
++};
++
++/* These are common "recommended" cause definitions for most devices */
++#define SECVIO_CAUSE_CAAM_VIOLATION SECVIO_CAUSE_SOURCE_0
++#define SECVIO_CAUSE JTAG_ALARM SECVIO_CAUSE_SOURCE_1
++#define SECVIO_CAUSE_WATCHDOG SECVIO_CAUSE_SOURCE_2
++#define SECVIO_CAUSE_EXTERNAL_BOOT SECVIO_CAUSE_SOURCE_4
++#define SECVIO_CAUSE_TAMPER_DETECT SECVIO_CAUSE_SOURCE_5
++
++int caam_secvio_install_handler(struct device *dev, enum secvio_cause cause,
++ void (*handler)(struct device *dev, u32 cause,
++ void *ext),
++ u8 *cause_description, void *ext);
++int caam_secvio_remove_handler(struct device *dev, enum secvio_cause cause);
++
++/*
++ * Private data definitions for the secvio "driver"
++ */
++
++struct secvio_int_src {
++ const u8 *intname; /* Points to a descriptive name for source */
++ void *ext; /* Extended data to pass to the handler */
++ void (*handler)(struct device *dev, u32 cause, void *ext);
++};
++
++struct caam_drv_private_secvio {
++ struct device *parentdev; /* points back to the controller */
++ spinlock_t svlock ____cacheline_aligned;
++ struct tasklet_struct irqtask[NR_CPUS];
++ struct snvs_full __iomem *svregs; /* both HP and LP domains */
++
++ /* Registered handlers for each violation */
++ struct secvio_int_src intsrc[MAX_SECVIO_SOURCES];
++
++};
++
++#endif /* SECVIO_H */
+diff -Nur linux-3.14.36/drivers/crypto/caam/sm.h linux-openelec/drivers/crypto/caam/sm.h
+--- linux-3.14.36/drivers/crypto/caam/sm.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/crypto/caam/sm.h 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,88 @@
++
++/*
++ * CAAM Secure Memory/Keywrap API Definitions
++ * Copyright (C) 2008-2013 Freescale Semiconductor, Inc.
++ */
++
++#ifndef SM_H
++#define SM_H
++
++
++/* Storage access permissions */
++#define SM_PERM_READ 0x01
++#define SM_PERM_WRITE 0x02
++#define SM_PERM_BLOB 0x03
++
++
++/* Keystore maintenance functions */
++void sm_init_keystore(struct device *dev);
++u32 sm_detect_keystore_units(struct device *dev);
++int sm_establish_keystore(struct device *dev, u32 unit);
++void sm_release_keystore(struct device *dev, u32 unit);
++void caam_sm_shutdown(struct platform_device *pdev);
++int caam_sm_example_init(struct platform_device *pdev);
++
++/* Keystore accessor functions */
++extern int sm_keystore_slot_alloc(struct device *dev, u32 unit, u32 size,
++ u32 *slot);
++extern int sm_keystore_slot_dealloc(struct device *dev, u32 unit, u32 slot);
++extern int sm_keystore_slot_load(struct device *dev, u32 unit, u32 slot,
++ const u8 *key_data, u32 key_length);
++extern int sm_keystore_slot_read(struct device *dev, u32 unit, u32 slot,
++ u32 key_length, u8 *key_data);
++extern int sm_keystore_slot_encapsulate(struct device *dev, u32 unit,
++ u32 inslot, u32 outslot, u16 secretlen,
++ u8 *keymod, u16 keymodlen);
++extern int sm_keystore_slot_decapsulate(struct device *dev, u32 unit,
++ u32 inslot, u32 outslot, u16 secretlen,
++ u8 *keymod, u16 keymodlen);
++
++/* Data structure to hold per-slot information */
++struct keystore_data_slot_info {
++ u8 allocated; /* Track slot assignments */
++ u32 key_length; /* Size of the key */
++};
++
++/* Data structure to hold keystore information */
++struct keystore_data {
++ void *base_address; /* Base of the Secure Partition */
++ u32 slot_count; /* Number of slots in the keystore */
++ struct keystore_data_slot_info *slot; /* Per-slot information */
++};
++
++/* store the detected attributes of a secure memory page */
++struct sm_page_descriptor {
++ u16 phys_pagenum; /* may be discontiguous */
++ u16 own_part; /* Owning partition */
++ void *pg_base; /* Calculated virtual address */
++ struct keystore_data *ksdata;
++};
++
++struct caam_drv_private_sm {
++ struct device *parentdev; /* this ends up as the controller */
++ struct device *smringdev; /* ring that owns this instance */
++ spinlock_t kslock ____cacheline_aligned;
++
++ /* Default parameters for geometry */
++ u32 max_pages; /* maximum pages this instance can support */
++ u32 top_partition; /* highest partition number in this instance */
++ u32 top_page; /* highest page number in this instance */
++ u32 page_size; /* page size */
++ u32 slot_size; /* selected size of each storage block */
++
++ /* Partition/Page Allocation Map */
++ u32 localpages; /* Number of pages we can access */
++ struct sm_page_descriptor *pagedesc; /* Allocated per-page */
++
++ /* Installed handlers for keystore access */
++ int (*data_init)(struct device *dev, u32 unit);
++ void (*data_cleanup)(struct device *dev, u32 unit);
++ int (*slot_alloc)(struct device *dev, u32 unit, u32 size, u32 *slot);
++ int (*slot_dealloc)(struct device *dev, u32 unit, u32 slot);
++ void *(*slot_get_address)(struct device *dev, u32 unit, u32 handle);
++ u32 (*slot_get_base)(struct device *dev, u32 unit, u32 handle);
++ u32 (*slot_get_offset)(struct device *dev, u32 unit, u32 handle);
++ u32 (*slot_get_slot_size)(struct device *dev, u32 unit, u32 handle);
++};
++
++#endif /* SM_H */
+diff -Nur linux-3.14.36/drivers/crypto/caam/sm_store.c linux-openelec/drivers/crypto/caam/sm_store.c
+--- linux-3.14.36/drivers/crypto/caam/sm_store.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/crypto/caam/sm_store.c 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,896 @@
++
++/*
++ * CAAM Secure Memory Storage Interface
++ * Copyright (C) 2008-2013 Freescale Semiconductor, Inc.
++ *
++ * Loosely based on the SHW Keystore API for SCC/SCC2
++ * Experimental implementation and NOT intended for upstream use. Expect
++ * this interface to be amended significantly in the future once it becomes
++ * integrated into live applications.
++ *
++ * Known issues:
++ *
++ * - Executes one instance of an secure memory "driver". This is tied to the
++ * fact that job rings can't run as standalone instances in the present
++ * configuration.
++ *
++ * - It does not expose a userspace interface. The value of a userspace
++ * interface for access to secrets is a point for further architectural
++ * discussion.
++ *
++ * - Partition/permission management is not part of this interface. It
++ * depends on some level of "knowledge" agreed upon between bootloader,
++ * provisioning applications, and OS-hosted software (which uses this
++ * driver).
++ *
++ * - No means of identifying the location or purpose of secrets managed by
++ * this interface exists; "slot location" and format of a given secret
++ * needs to be agreed upon between bootloader, provisioner, and OS-hosted
++ * application.
++ */
++
++#include "compat.h"
++#include "regs.h"
++#include "jr.h"
++#include "desc.h"
++#include "intern.h"
++#include "error.h"
++#include "sm.h"
++
++#ifdef SM_DEBUG_CONT
++void sm_show_page(struct device *dev, struct sm_page_descriptor *pgdesc)
++{
++ struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
++ u32 i, *smdata;
++
++ dev_info(dev, "physical page %d content at 0x%08x\n",
++ pgdesc->phys_pagenum, pgdesc->pg_base);
++ smdata = pgdesc->pg_base;
++ for (i = 0; i < (smpriv->page_size / sizeof(u32)); i += 4)
++ dev_info(dev, "[0x%08x] 0x%08x 0x%08x 0x%08x 0x%08x\n",
++ (u32)&smdata[i], smdata[i], smdata[i+1], smdata[i+2],
++ smdata[i+3]);
++}
++#endif
++
++/*
++ * Construct a secure memory blob encapsulation job descriptor
++ *
++ * - desc pointer to hold new (to be allocated) pointer to the generated
++ * descriptor for later use. Calling thread can kfree the
++ * descriptor after execution.
++ * - keymod Physical pointer to key modifier (contiguous piece).
++ * - keymodsz Size of key modifier in bytes (should normally be 8).
++ * - secretbuf Physical pointer (within an accessible secure memory page)
++ * of the secret to be encapsulated.
++ * - outbuf Physical pointer (within an accessible secure memory page)
++ * of the encapsulated output. This will be larger than the
++ * input secret because of the added encapsulation data.
++ * - secretsz Size of input secret, in bytes.
++ * - auth If nonzero, use AES-CCM for encapsulation, else use ECB
++ *
++ * Note: this uses 32-bit pointers at present
++ */
++#define INITIAL_DESCSZ 16 /* size of tmp buffer for descriptor const. */
++static int blob_encap_desc(u32 **desc, dma_addr_t keymod, u16 keymodsz,
++ dma_addr_t secretbuf, dma_addr_t outbuf,
++ u16 secretsz, bool auth)
++{
++ u32 *tdesc, tmpdesc[INITIAL_DESCSZ];
++ u16 dsize, idx;
++
++ memset(tmpdesc, 0, INITIAL_DESCSZ * sizeof(u32));
++ idx = 1;
++
++ /* Load key modifier */
++ tmpdesc[idx++] = CMD_LOAD | LDST_CLASS_2_CCB | LDST_SRCDST_BYTE_KEY |
++ ((12 << LDST_OFFSET_SHIFT) & LDST_OFFSET_MASK) |
++ (keymodsz & LDST_LEN_MASK);
++
++ tmpdesc[idx++] = (u32)keymod;
++
++ /* Encapsulate to secure memory */
++ tmpdesc[idx++] = CMD_SEQ_IN_PTR | secretsz;
++ tmpdesc[idx++] = (u32)secretbuf;
++
++ /* Add space for BKEK and MAC tag */
++ tmpdesc[idx++] = CMD_SEQ_IN_PTR | (secretsz + (32 + 16));
++
++ tmpdesc[idx++] = (u32)outbuf;
++ tmpdesc[idx] = CMD_OPERATION | OP_TYPE_ENCAP_PROTOCOL | OP_PCLID_BLOB |
++ OP_PCL_BLOB_PTXT_SECMEM;
++ if (auth)
++ tmpdesc[idx] |= OP_PCL_BLOB_EKT;
++
++ idx++;
++ tmpdesc[0] = CMD_DESC_HDR | HDR_ONE | (idx & HDR_DESCLEN_MASK);
++ dsize = idx * sizeof(u32);
++
++ tdesc = kmalloc(dsize, GFP_KERNEL | GFP_DMA);
++ if (tdesc == NULL)
++ return 0;
++
++ memcpy(tdesc, tmpdesc, dsize);
++ *desc = tdesc;
++ return dsize;
++}
++
++/*
++ * Construct a secure memory blob decapsulation job descriptor
++ *
++ * - desc pointer to hold new (to be allocated) pointer to the generated
++ * descriptor for later use. Calling thread can kfree the
++ * descriptor after execution.
++ * - keymod Physical pointer to key modifier (contiguous piece).
++ * - keymodsz Size of key modifier in bytes (should normally be 16).
++ * - blobbuf Physical pointer (within an accessible secure memory page)
++ * of the blob to be decapsulated.
++ * - outbuf Physical pointer (within an accessible secure memory page)
++ * of the decapsulated output.
++ * - secretsz Size of input blob, in bytes.
++ * - auth If nonzero, assume AES-CCM for decapsulation, else use ECB
++ *
++ * Note: this uses 32-bit pointers at present
++ */
++static int blob_decap_desc(u32 **desc, dma_addr_t keymod, u16 keymodsz,
++ dma_addr_t blobbuf, dma_addr_t outbuf,
++ u16 blobsz, bool auth)
++{
++ u32 *tdesc, tmpdesc[INITIAL_DESCSZ];
++ u16 dsize, idx;
++
++ memset(tmpdesc, 0, INITIAL_DESCSZ * sizeof(u32));
++ idx = 1;
++
++ /* Load key modifier */
++ tmpdesc[idx++] = CMD_LOAD | LDST_CLASS_2_CCB | LDST_SRCDST_BYTE_KEY |
++ ((12 << LDST_OFFSET_SHIFT) & LDST_OFFSET_MASK) |
++ (keymodsz & LDST_LEN_MASK);
++
++ tmpdesc[idx++] = (u32)keymod;
++
++ /* Compensate BKEK + MAC tag */
++ tmpdesc[idx++] = CMD_SEQ_IN_PTR | (blobsz + 32 + 16);
++
++ tmpdesc[idx++] = (u32)blobbuf;
++ tmpdesc[idx++] = CMD_SEQ_OUT_PTR | blobsz;
++ tmpdesc[idx++] = (u32)outbuf;
++
++ /* Decapsulate from secure memory partition to black blob */
++ tmpdesc[idx] = CMD_OPERATION | OP_TYPE_DECAP_PROTOCOL | OP_PCLID_BLOB |
++ OP_PCL_BLOB_PTXT_SECMEM | OP_PCL_BLOB_BLACK;
++ if (auth)
++ tmpdesc[idx] |= OP_PCL_BLOB_EKT;
++
++ idx++;
++ tmpdesc[0] = CMD_DESC_HDR | HDR_ONE | (idx & HDR_DESCLEN_MASK);
++ dsize = idx * sizeof(u32);
++
++ tdesc = kmalloc(dsize, GFP_KERNEL | GFP_DMA);
++ if (tdesc == NULL)
++ return 0;
++
++ memcpy(tdesc, tmpdesc, dsize);
++ *desc = tdesc;
++ return dsize;
++}
++
++/*
++ * Pseudo-synchronous ring access functions for carrying out key
++ * encapsulation and decapsulation
++ */
++
++struct sm_key_job_result {
++ int error;
++ struct completion completion;
++};
++
++void sm_key_job_done(struct device *dev, u32 *desc, u32 err, void *context)
++{
++ struct sm_key_job_result *res = context;
++
++ res->error = err; /* save off the error for postprocessing */
++ complete(&res->completion); /* mark us complete */
++}
++
++static int sm_key_job(struct device *ksdev, u32 *jobdesc)
++{
++ struct sm_key_job_result testres;
++ struct caam_drv_private_sm *kspriv;
++ int rtn = 0;
++
++ kspriv = dev_get_drvdata(ksdev);
++
++ init_completion(&testres.completion);
++
++ rtn = caam_jr_enqueue(kspriv->smringdev, jobdesc, sm_key_job_done,
++ &testres);
++ if (!rtn) {
++ wait_for_completion_interruptible(&testres.completion);
++ rtn = testres.error;
++ }
++ return rtn;
++}
++
++/*
++ * Following section establishes the default methods for keystore access
++ * They are NOT intended for use external to this module
++ *
++ * In the present version, these are the only means for the higher-level
++ * interface to deal with the mechanics of accessing the phyiscal keystore
++ */
++
++
++int slot_alloc(struct device *dev, u32 unit, u32 size, u32 *slot)
++{
++ struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
++ struct keystore_data *ksdata = smpriv->pagedesc[unit].ksdata;
++ u32 i;
++#ifdef SM_DEBUG
++ dev_info(dev, "slot_alloc(): requesting slot for %d bytes\n", size);
++#endif
++
++ if (size > smpriv->slot_size)
++ return -EKEYREJECTED;
++
++ for (i = 0; i < ksdata->slot_count; i++) {
++ if (ksdata->slot[i].allocated == 0) {
++ ksdata->slot[i].allocated = 1;
++ (*slot) = i;
++#ifdef SM_DEBUG
++ dev_info(dev, "slot_alloc(): new slot %d allocated\n",
++ *slot);
++#endif
++ return 0;
++ }
++ }
++
++ return -ENOSPC;
++}
++EXPORT_SYMBOL(slot_alloc);
++
++int slot_dealloc(struct device *dev, u32 unit, u32 slot)
++{
++ struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
++ struct keystore_data *ksdata = smpriv->pagedesc[unit].ksdata;
++ u8 __iomem *slotdata;
++
++#ifdef SM_DEBUG
++ dev_info(dev, "slot_dealloc(): releasing slot %d\n", slot);
++#endif
++ if (slot >= ksdata->slot_count)
++ return -EINVAL;
++ slotdata = ksdata->base_address + slot * smpriv->slot_size;
++
++ if (ksdata->slot[slot].allocated == 1) {
++ /* Forcibly overwrite the data from the keystore */
++ memset(ksdata->base_address + slot * smpriv->slot_size, 0,
++ smpriv->slot_size);
++
++ ksdata->slot[slot].allocated = 0;
++#ifdef SM_DEBUG
++ dev_info(dev, "slot_dealloc(): slot %d released\n", slot);
++#endif
++ return 0;
++ }
++
++ return -EINVAL;
++}
++EXPORT_SYMBOL(slot_dealloc);
++
++void *slot_get_address(struct device *dev, u32 unit, u32 slot)
++{
++ struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
++ struct keystore_data *ksdata = smpriv->pagedesc[unit].ksdata;
++
++ if (slot >= ksdata->slot_count)
++ return NULL;
++
++#ifdef SM_DEBUG
++ dev_info(dev, "slot_get_address(): slot %d is 0x%08x\n", slot,
++ (u32)ksdata->base_address + slot * smpriv->slot_size);
++#endif
++
++ return ksdata->base_address + slot * smpriv->slot_size;
++}
++
++u32 slot_get_base(struct device *dev, u32 unit, u32 slot)
++{
++ struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
++ struct keystore_data *ksdata = smpriv->pagedesc[unit].ksdata;
++
++ /*
++ * There could potentially be more than one secure partition object
++ * associated with this keystore. For now, there is just one.
++ */
++
++ (void)slot;
++
++#ifdef SM_DEBUG
++ dev_info(dev, "slot_get_base(): slot %d = 0x%08x\n",
++ slot, (u32)ksdata->base_address);
++#endif
++
++ return (u32)(ksdata->base_address);
++}
++
++u32 slot_get_offset(struct device *dev, u32 unit, u32 slot)
++{
++ struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
++ struct keystore_data *ksdata = smpriv->pagedesc[unit].ksdata;
++
++ if (slot >= ksdata->slot_count)
++ return -EINVAL;
++
++#ifdef SM_DEBUG
++ dev_info(dev, "slot_get_offset(): slot %d = %d\n", slot,
++ slot * smpriv->slot_size);
++#endif
++
++ return slot * smpriv->slot_size;
++}
++
++u32 slot_get_slot_size(struct device *dev, u32 unit, u32 slot)
++{
++ struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
++
++
++#ifdef SM_DEBUG
++ dev_info(dev, "slot_get_slot_size(): slot %d = %d\n", slot,
++ smpriv->slot_size);
++#endif
++ /* All slots are the same size in the default implementation */
++ return smpriv->slot_size;
++}
++
++
++
++int kso_init_data(struct device *dev, u32 unit)
++{
++ struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
++ int retval = -EINVAL;
++ struct keystore_data *keystore_data = NULL;
++ u32 slot_count;
++ u32 keystore_data_size;
++
++ /*
++ * Calculate the required size of the keystore data structure, based
++ * on the number of keys that can fit in the partition.
++ */
++ slot_count = smpriv->page_size / smpriv->slot_size;
++#ifdef SM_DEBUG
++ dev_info(dev, "kso_init_data: %d slots initializing\n", slot_count);
++#endif
++
++ keystore_data_size = sizeof(struct keystore_data) +
++ slot_count *
++ sizeof(struct keystore_data_slot_info);
++
++ keystore_data = kzalloc(keystore_data_size, GFP_KERNEL);
++
++ if (keystore_data == NULL) {
++ retval = -ENOSPC;
++ goto out;
++ }
++
++#ifdef SM_DEBUG
++ dev_info(dev, "kso_init_data: keystore data size = %d\n",
++ keystore_data_size);
++#endif
++
++ /*
++ * Place the slot information structure directly after the keystore data
++ * structure.
++ */
++ keystore_data->slot = (struct keystore_data_slot_info *)
++ (keystore_data + 1);
++ keystore_data->slot_count = slot_count;
++
++ smpriv->pagedesc[unit].ksdata = keystore_data;
++ smpriv->pagedesc[unit].ksdata->base_address =
++ smpriv->pagedesc[unit].pg_base;
++
++ retval = 0;
++
++out:
++ if (retval != 0)
++ if (keystore_data != NULL)
++ kfree(keystore_data);
++
++
++ return retval;
++}
++
++void kso_cleanup_data(struct device *dev, u32 unit)
++{
++ struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
++ struct keystore_data *keystore_data = NULL;
++
++ if (smpriv->pagedesc[unit].ksdata != NULL)
++ keystore_data = smpriv->pagedesc[unit].ksdata;
++
++ /* Release the allocated keystore management data */
++ kfree(smpriv->pagedesc[unit].ksdata);
++
++ return;
++}
++
++
++
++/*
++ * Keystore management section
++ */
++
++void sm_init_keystore(struct device *dev)
++{
++ struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
++
++ smpriv->data_init = kso_init_data;
++ smpriv->data_cleanup = kso_cleanup_data;
++ smpriv->slot_alloc = slot_alloc;
++ smpriv->slot_dealloc = slot_dealloc;
++ smpriv->slot_get_address = slot_get_address;
++ smpriv->slot_get_base = slot_get_base;
++ smpriv->slot_get_offset = slot_get_offset;
++ smpriv->slot_get_slot_size = slot_get_slot_size;
++#ifdef SM_DEBUG
++ dev_info(dev, "sm_init_keystore(): handlers installed\n");
++#endif
++}
++EXPORT_SYMBOL(sm_init_keystore);
++
++/* Return available pages/units */
++u32 sm_detect_keystore_units(struct device *dev)
++{
++ struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
++
++ return smpriv->localpages;
++}
++EXPORT_SYMBOL(sm_detect_keystore_units);
++
++/*
++ * Do any keystore specific initializations
++ */
++int sm_establish_keystore(struct device *dev, u32 unit)
++{
++ struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
++
++#ifdef SM_DEBUG
++ dev_info(dev, "sm_establish_keystore(): unit %d initializing\n", unit);
++#endif
++
++ if (smpriv->data_init == NULL)
++ return -EINVAL;
++
++ /* Call the data_init function for any user setup */
++ return smpriv->data_init(dev, unit);
++}
++EXPORT_SYMBOL(sm_establish_keystore);
++
++void sm_release_keystore(struct device *dev, u32 unit)
++{
++ struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
++
++#ifdef SM_DEBUG
++ dev_info(dev, "sm_establish_keystore(): unit %d releasing\n", unit);
++#endif
++ if ((smpriv != NULL) && (smpriv->data_cleanup != NULL))
++ smpriv->data_cleanup(dev, unit);
++
++ return;
++}
++EXPORT_SYMBOL(sm_release_keystore);
++
++/*
++ * Subsequent interfacce (sm_keystore_*) forms the accessor interfacce to
++ * the keystore
++ */
++int sm_keystore_slot_alloc(struct device *dev, u32 unit, u32 size, u32 *slot)
++{
++ struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
++ int retval = -EINVAL;
++
++ spin_lock(&smpriv->kslock);
++
++ if ((smpriv->slot_alloc == NULL) ||
++ (smpriv->pagedesc[unit].ksdata == NULL))
++ goto out;
++
++ retval = smpriv->slot_alloc(dev, unit, size, slot);
++
++out:
++ spin_unlock(&smpriv->kslock);
++ return retval;
++}
++EXPORT_SYMBOL(sm_keystore_slot_alloc);
++
++int sm_keystore_slot_dealloc(struct device *dev, u32 unit, u32 slot)
++{
++ struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
++ int retval = -EINVAL;
++
++ spin_lock(&smpriv->kslock);
++
++ if ((smpriv->slot_alloc == NULL) ||
++ (smpriv->pagedesc[unit].ksdata == NULL))
++ goto out;
++
++ retval = smpriv->slot_dealloc(dev, unit, slot);
++out:
++ spin_unlock(&smpriv->kslock);
++ return retval;
++}
++EXPORT_SYMBOL(sm_keystore_slot_dealloc);
++
++int sm_keystore_slot_load(struct device *dev, u32 unit, u32 slot,
++ const u8 *key_data, u32 key_length)
++{
++ struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
++ int retval = -EINVAL;
++ u32 slot_size;
++ u32 i;
++ u8 __iomem *slot_location;
++
++ spin_lock(&smpriv->kslock);
++
++ slot_size = smpriv->slot_get_slot_size(dev, unit, slot);
++
++ if (key_length > slot_size) {
++ retval = -EFBIG;
++ goto out;
++ }
++
++ slot_location = smpriv->slot_get_address(dev, unit, slot);
++
++ for (i = 0; i < key_length; i++)
++ slot_location[i] = key_data[i];
++
++ retval = 0;
++
++out:
++ spin_unlock(&smpriv->kslock);
++ return retval;
++}
++EXPORT_SYMBOL(sm_keystore_slot_load);
++
++int sm_keystore_slot_read(struct device *dev, u32 unit, u32 slot,
++ u32 key_length, u8 *key_data)
++{
++ struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
++ int retval = -EINVAL;
++ u8 __iomem *slot_addr;
++ u32 slot_size;
++
++ spin_lock(&smpriv->kslock);
++
++ slot_addr = smpriv->slot_get_address(dev, unit, slot);
++ slot_size = smpriv->slot_get_slot_size(dev, unit, slot);
++
++ if (key_length > slot_size) {
++ retval = -EKEYREJECTED;
++ goto out;
++ }
++
++ memcpy(key_data, slot_addr, key_length);
++ retval = 0;
++
++out:
++ spin_unlock(&smpriv->kslock);
++ return retval;
++}
++EXPORT_SYMBOL(sm_keystore_slot_read);
++
++int sm_keystore_slot_encapsulate(struct device *dev, u32 unit, u32 inslot,
++ u32 outslot, u16 secretlen, u8 *keymod,
++ u16 keymodlen)
++{
++ struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
++ int retval = 0;
++ u32 slot_length, dsize, jstat;
++ u32 __iomem *encapdesc = NULL;
++ u8 __iomem *lkeymod, *inpslotaddr, *outslotaddr;
++ dma_addr_t keymod_dma;
++
++ /* Ensure that the full blob will fit in the key slot */
++ slot_length = smpriv->slot_get_slot_size(dev, unit, outslot);
++ if ((secretlen + 48) > slot_length)
++ goto out;
++
++ /* Get the base addresses of both keystore slots */
++ inpslotaddr = (u8 *)smpriv->slot_get_address(dev, unit, inslot);
++ outslotaddr = (u8 *)smpriv->slot_get_address(dev, unit, outslot);
++
++ /* Build the key modifier */
++ lkeymod = kmalloc(keymodlen, GFP_KERNEL | GFP_DMA);
++ memcpy(lkeymod, keymod, keymodlen);
++ keymod_dma = dma_map_single(dev, lkeymod, keymodlen, DMA_TO_DEVICE);
++ dma_sync_single_for_device(dev, keymod_dma, keymodlen, DMA_TO_DEVICE);
++
++ /* Build the encapsulation job descriptor */
++ dsize = blob_encap_desc(&encapdesc, keymod_dma, keymodlen,
++ __pa(inpslotaddr), __pa(outslotaddr),
++ secretlen, 0);
++ if (!dsize) {
++ dev_err(dev, "can't alloc an encap descriptor\n");
++ retval = -ENOMEM;
++ goto out;
++ }
++ jstat = sm_key_job(dev, encapdesc);
++
++ dma_unmap_single(dev, keymod_dma, keymodlen, DMA_TO_DEVICE);
++ kfree(encapdesc);
++
++out:
++ return retval;
++
++}
++EXPORT_SYMBOL(sm_keystore_slot_encapsulate);
++
++int sm_keystore_slot_decapsulate(struct device *dev, u32 unit, u32 inslot,
++ u32 outslot, u16 secretlen, u8 *keymod,
++ u16 keymodlen)
++{
++ struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
++ int retval = 0;
++ u32 slot_length, dsize, jstat;
++ u32 __iomem *decapdesc = NULL;
++ u8 __iomem *lkeymod, *inpslotaddr, *outslotaddr;
++ dma_addr_t keymod_dma;
++
++ /* Ensure that the decap data will fit in the key slot */
++ slot_length = smpriv->slot_get_slot_size(dev, unit, outslot);
++ if (secretlen > slot_length)
++ goto out;
++
++ /* Get the base addresses of both keystore slots */
++ inpslotaddr = (u8 *)smpriv->slot_get_address(dev, unit, inslot);
++ outslotaddr = (u8 *)smpriv->slot_get_address(dev, unit, outslot);
++
++ /* Build the key modifier */
++ lkeymod = kmalloc(keymodlen, GFP_KERNEL | GFP_DMA);
++ memcpy(lkeymod, keymod, keymodlen);
++ keymod_dma = dma_map_single(dev, lkeymod, keymodlen, DMA_TO_DEVICE);
++ dma_sync_single_for_device(dev, keymod_dma, keymodlen, DMA_TO_DEVICE);
++
++ /* Build the decapsulation job descriptor */
++ dsize = blob_decap_desc(&decapdesc, keymod_dma, keymodlen,
++ __pa(inpslotaddr), __pa(outslotaddr),
++ secretlen, 0);
++ if (!dsize) {
++ dev_err(dev, "can't alloc a decap descriptor\n");
++ retval = -ENOMEM;
++ goto out;
++ }
++ jstat = sm_key_job(dev, decapdesc);
++
++ dma_unmap_single(dev, keymod_dma, keymodlen, DMA_TO_DEVICE);
++ kfree(decapdesc);
++
++out:
++ return retval;
++
++}
++EXPORT_SYMBOL(sm_keystore_slot_decapsulate);
++
++
++/*
++ * Initialization/shutdown subsystem
++ * Assumes statically-invoked startup/shutdown from the controller driver
++ * for the present time, to be reworked when a device tree becomes
++ * available. This code will not modularize in present form.
++ *
++ * Also, simply uses ring 0 for execution at the present
++ */
++
++int caam_sm_startup(struct platform_device *pdev)
++{
++ struct device *ctrldev, *smdev;
++ struct caam_drv_private *ctrlpriv;
++ struct caam_drv_private_sm *smpriv;
++ struct caam_drv_private_jr *jrpriv; /* need this for reg page */
++ struct platform_device *sm_pdev;
++ struct sm_page_descriptor *lpagedesc;
++ u32 page, pgstat, lpagect, detectedpage;
++
++ struct device_node *np;
++ ctrldev = &pdev->dev;
++ ctrlpriv = dev_get_drvdata(ctrldev);
++
++ /*
++ * Set up the private block for secure memory
++ * Only one instance is possible
++ */
++ smpriv = kzalloc(sizeof(struct caam_drv_private_sm), GFP_KERNEL);
++ if (smpriv == NULL) {
++ dev_err(ctrldev, "can't alloc private mem for secure memory\n");
++ return -ENOMEM;
++ }
++ smpriv->parentdev = ctrldev; /* copy of parent dev is handy */
++
++ /* Create the dev */
++#ifdef CONFIG_OF
++ np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-caam-sm");
++ sm_pdev = of_platform_device_create(np, "caam_sm", ctrldev);
++#else
++ sm_pdev = platform_device_register_data(ctrldev, "caam_sm", 0,
++ smpriv,
++ sizeof(struct caam_drv_private_sm));
++#endif
++ if (sm_pdev == NULL) {
++ kfree(smpriv);
++ return -EINVAL;
++ }
++ smdev = &sm_pdev->dev;
++ dev_set_drvdata(smdev, smpriv);
++ ctrlpriv->smdev = smdev;
++
++ /*
++ * Collect configuration limit data for reference
++ * This batch comes from the partition data/vid registers in perfmon
++ */
++ smpriv->max_pages = ((rd_reg32(&ctrlpriv->ctrl->perfmon.smpart)
++ & SMPART_MAX_NUMPG_MASK) >>
++ SMPART_MAX_NUMPG_SHIFT) + 1;
++ smpriv->top_partition = ((rd_reg32(&ctrlpriv->ctrl->perfmon.smpart)
++ & SMPART_MAX_PNUM_MASK) >>
++ SMPART_MAX_PNUM_SHIFT) + 1;
++ smpriv->top_page = ((rd_reg32(&ctrlpriv->ctrl->perfmon.smpart)
++ & SMPART_MAX_PG_MASK) >> SMPART_MAX_PG_SHIFT) + 1;
++ smpriv->page_size = 1024 << ((rd_reg32(&ctrlpriv->ctrl->perfmon.smvid)
++ & SMVID_PG_SIZE_MASK) >> SMVID_PG_SIZE_SHIFT);
++ smpriv->slot_size = 1 << CONFIG_CRYPTO_DEV_FSL_CAAM_SM_SLOTSIZE;
++
++#ifdef SM_DEBUG
++ dev_info(smdev, "max pages = %d, top partition = %d\n",
++ smpriv->max_pages, smpriv->top_partition);
++ dev_info(smdev, "top page = %d, page size = %d (total = %d)\n",
++ smpriv->top_page, smpriv->page_size,
++ smpriv->top_page * smpriv->page_size);
++ dev_info(smdev, "selected slot size = %d\n", smpriv->slot_size);
++#endif
++
++ /*
++ * Now probe for partitions/pages to which we have access. Note that
++ * these have likely been set up by a bootloader or platform
++ * provisioning application, so we have to assume that we "inherit"
++ * a configuration and work within the constraints of what it might be.
++ *
++ * Assume use of the zeroth ring in the present iteration (until
++ * we can divorce the controller and ring drivers, and then assign
++ * an SM instance to any ring instance).
++ */
++ smpriv->smringdev = ctrlpriv->jrdev[0];
++ jrpriv = dev_get_drvdata(smpriv->smringdev);
++ lpagect = 0;
++ lpagedesc = kzalloc(sizeof(struct sm_page_descriptor)
++ * smpriv->max_pages, GFP_KERNEL);
++ if (lpagedesc == NULL) {
++ kfree(smpriv);
++ return -ENOMEM;
++ }
++
++ for (page = 0; page < smpriv->max_pages; page++) {
++ wr_reg32(&jrpriv->rregs->sm_cmd,
++ ((page << SMC_PAGE_SHIFT) & SMC_PAGE_MASK) |
++ (SMC_CMD_PAGE_INQUIRY & SMC_CMD_MASK));
++ pgstat = rd_reg32(&jrpriv->rregs->sm_status);
++ if (((pgstat & SMCS_PGWON_MASK) >> SMCS_PGOWN_SHIFT)
++ == SMCS_PGOWN_OWNED) { /* our page? */
++ lpagedesc[page].phys_pagenum =
++ (pgstat & SMCS_PAGE_MASK) >> SMCS_PAGE_SHIFT;
++ lpagedesc[page].own_part =
++ (pgstat & SMCS_PART_SHIFT) >> SMCS_PART_MASK;
++ lpagedesc[page].pg_base = ctrlpriv->sm_base +
++ ((smpriv->page_size * page) / sizeof(u32));
++ lpagect++;
++#ifdef SM_DEBUG
++ dev_info(smdev,
++ "physical page %d, owning partition = %d\n",
++ lpagedesc[page].phys_pagenum,
++ lpagedesc[page].own_part);
++#endif
++ }
++ }
++
++ smpriv->pagedesc = kzalloc(sizeof(struct sm_page_descriptor) * lpagect,
++ GFP_KERNEL);
++ if (smpriv->pagedesc == NULL) {
++ kfree(lpagedesc);
++ kfree(smpriv);
++ return -ENOMEM;
++ }
++ smpriv->localpages = lpagect;
++
++ detectedpage = 0;
++ for (page = 0; page < smpriv->max_pages; page++) {
++ if (lpagedesc[page].pg_base != NULL) { /* e.g. live entry */
++ memcpy(&smpriv->pagedesc[detectedpage],
++ &lpagedesc[page],
++ sizeof(struct sm_page_descriptor));
++#ifdef SM_DEBUG_CONT
++ sm_show_page(smdev, &smpriv->pagedesc[detectedpage]);
++#endif
++ detectedpage++;
++ }
++ }
++
++ kfree(lpagedesc);
++
++ sm_init_keystore(smdev);
++
++ return 0;
++}
++
++void caam_sm_shutdown(struct platform_device *pdev)
++{
++ struct device *ctrldev, *smdev;
++ struct caam_drv_private *priv;
++ struct caam_drv_private_sm *smpriv;
++
++ ctrldev = &pdev->dev;
++ priv = dev_get_drvdata(ctrldev);
++ smdev = priv->smdev;
++ smpriv = dev_get_drvdata(smdev);
++
++ kfree(smpriv->pagedesc);
++ kfree(smpriv);
++}
++EXPORT_SYMBOL(caam_sm_shutdown);
++#ifdef CONFIG_OF
++static void __exit caam_sm_exit(void)
++{
++ struct device_node *dev_node;
++ struct platform_device *pdev;
++
++ dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
++ if (!dev_node) {
++ dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
++ if (!dev_node)
++ return;
++ }
++
++ pdev = of_find_device_by_node(dev_node);
++ if (!pdev)
++ return;
++
++ of_node_put(dev_node);
++
++ caam_sm_shutdown(pdev);
++
++ return;
++}
++
++static int __init caam_sm_init(void)
++{
++ struct device_node *dev_node;
++ struct platform_device *pdev;
++
++ /*
++ * Do of_find_compatible_node() then of_find_device_by_node()
++ * once a functional device tree is available
++ */
++ dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
++ if (!dev_node) {
++ dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
++ if (!dev_node)
++ return -ENODEV;
++ }
++
++ pdev = of_find_device_by_node(dev_node);
++ if (!pdev)
++ return -ENODEV;
++
++ of_node_get(dev_node);
++
++ caam_sm_startup(pdev);
++
++ return 0;
++}
++
++module_init(caam_sm_init);
++module_exit(caam_sm_exit);
++
++MODULE_LICENSE("Dual BSD/GPL");
++MODULE_DESCRIPTION("FSL CAAM Secure Memory / Keystore");
++MODULE_AUTHOR("Freescale Semiconductor - NMSG/MAD");
++#endif
+diff -Nur linux-3.14.36/drivers/crypto/caam/sm_test.c linux-openelec/drivers/crypto/caam/sm_test.c
+--- linux-3.14.36/drivers/crypto/caam/sm_test.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/crypto/caam/sm_test.c 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,844 @@
++/*
++ * Secure Memory / Keystore Exemplification Module
++ * Copyright (C) 2013 Freescale Semiconductor, Inc. All Rights Reserved
++ *
++ * Serves as a functional example, and as a self-contained unit test for
++ * the functionality contained in sm_store.c.
++ *
++ * The example function, caam_sm_example_init(), runs a thread that:
++ *
++ * - initializes a set of fixed keys
++ * - stores one copy in clear buffers
++ * - stores them again in secure memory
++ * - extracts stored keys back out for use
++ * - intializes 3 data buffers for a test:
++ * (1) containing cleartext
++ * (2) to hold ciphertext encrypted with an extracted black key
++ * (3) to hold extracted cleartext decrypted with an equivalent clear key
++ *
++ * The function then builds simple job descriptors that reference the key
++ * material and buffers as initialized, and executes an encryption job
++ * with a black key, and a decryption job using a the same key held in the
++ * clear. The output of the decryption job is compared to the original
++ * cleartext; if they don't compare correctly, one can assume a key problem
++ * exists, where the function will exit with an error.
++ *
++ * This module can use a substantial amount of refactoring, which may occur
++ * after the API gets some mileage. Furthermore, expect this module to
++ * eventually disappear once the API is integrated into "real" software.
++ */
++
++#include "compat.h"
++#include "intern.h"
++#include "desc.h"
++#include "error.h"
++#include "jr.h"
++#include "sm.h"
++
++static u8 skeymod[] = {
++ 0x0f, 0x0e, 0x0d, 0x0c, 0x0b, 0x0a, 0x09, 0x08,
++ 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01, 0x00
++};
++static u8 symkey[] = {
++ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
++ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
++ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
++ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f
++};
++
++static u8 symdata[] = {
++ 0x00, 0x01, 0x02, 0x03, 0x04, 0x0f, 0x06, 0x07,
++ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
++ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
++ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
++ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
++ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
++ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
++ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
++ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
++ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
++ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57,
++ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f,
++ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
++ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
++ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
++ 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f,
++ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
++ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
++ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
++ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f,
++ 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7,
++ 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf,
++ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7,
++ 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf,
++ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7,
++ 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf,
++ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7,
++ 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf,
++ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7,
++ 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef,
++ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7,
++ 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff
++};
++
++static int mk_job_desc(u32 *desc, dma_addr_t key, u16 keysz, dma_addr_t indata,
++ dma_addr_t outdata, u16 sz, u32 cipherdir, u32 keymode)
++{
++ desc[1] = CMD_KEY | CLASS_1 | (keysz & KEY_LENGTH_MASK) | keymode;
++ desc[2] = (u32)key;
++ desc[3] = CMD_OPERATION | OP_TYPE_CLASS1_ALG | OP_ALG_AAI_ECB |
++ cipherdir;
++ desc[4] = CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 |
++ FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1 | sz;
++ desc[5] = (u32)indata;
++ desc[6] = CMD_FIFO_STORE | FIFOST_TYPE_MESSAGE_DATA | sz;
++ desc[7] = (u32)outdata;
++
++ desc[0] = CMD_DESC_HDR | HDR_ONE | (8 & HDR_DESCLEN_MASK);
++ return 8 * sizeof(u32);
++}
++
++struct exec_test_result {
++ int error;
++ struct completion completion;
++};
++
++void exec_test_done(struct device *dev, u32 *desc, u32 err, void *context)
++{
++ struct exec_test_result *res = context;
++
++ if (err) {
++ char tmp[CAAM_ERROR_STR_MAX];
++ dev_err(dev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
++ }
++
++ res->error = err;
++ complete(&res->completion);
++}
++
++static int exec_test_job(struct device *ksdev, u32 *jobdesc)
++{
++ struct exec_test_result testres;
++ struct caam_drv_private_sm *kspriv;
++ int rtn = 0;
++
++ kspriv = dev_get_drvdata(ksdev);
++
++ init_completion(&testres.completion);
++
++ rtn = caam_jr_enqueue(kspriv->smringdev, jobdesc, exec_test_done,
++ &testres);
++ if (!rtn) {
++ wait_for_completion_interruptible(&testres.completion);
++ rtn = testres.error;
++ }
++ return rtn;
++}
++
++
++int caam_sm_example_init(struct platform_device *pdev)
++{
++ struct device *ctrldev, *ksdev;
++ struct caam_drv_private *ctrlpriv;
++ struct caam_drv_private_sm *kspriv;
++ u32 unit, units, jdescsz;
++ int stat, jstat, rtnval = 0;
++ u8 __iomem *syminp, *symint, *symout = NULL;
++ dma_addr_t syminp_dma, symint_dma, symout_dma;
++ u8 __iomem *black_key_des, *black_key_aes128;
++ u8 __iomem *black_key_aes256;
++ dma_addr_t black_key_des_dma, black_key_aes128_dma;
++ dma_addr_t black_key_aes256_dma;
++ u8 __iomem *clear_key_des, *clear_key_aes128, *clear_key_aes256;
++ dma_addr_t clear_key_des_dma, clear_key_aes128_dma;
++ dma_addr_t clear_key_aes256_dma;
++ u32 __iomem *jdesc;
++ u32 keyslot_des, keyslot_aes128, keyslot_aes256 = 0;
++
++ jdesc = NULL;
++ black_key_des = black_key_aes128 = black_key_aes256 = NULL;
++ clear_key_des = clear_key_aes128 = clear_key_aes256 = NULL;
++
++ /* We can lose this cruft once we can get a pdev by name */
++ ctrldev = &pdev->dev;
++ ctrlpriv = dev_get_drvdata(ctrldev);
++ ksdev = ctrlpriv->smdev;
++ kspriv = dev_get_drvdata(ksdev);
++ if (kspriv == NULL)
++ return -ENODEV;
++
++ /* Now that we have the dev for the single SM instance, connect */
++#ifdef SM_TEST_DETAIL
++ dev_info(ksdev, "caam_sm_test_init() running\n");
++#endif
++ /* Probe to see what keystores are available to us */
++ units = sm_detect_keystore_units(ksdev);
++ if (!units)
++ dev_err(ksdev, "caam_sm_test: no keystore units available\n");
++
++ /*
++ * MX6 bootloader stores some stuff in unit 0, so let's
++ * use 1 or above
++ */
++ if (units < 2) {
++ dev_err(ksdev, "caam_sm_test: insufficient keystore units\n");
++ return -ENODEV;
++ }
++ unit = 1;
++
++#ifdef SM_TEST_DETAIL
++ dev_info(ksdev, "caam_sm_test: %d keystore units available\n", units);
++#endif
++
++ /* Initialize/Establish Keystore */
++ sm_establish_keystore(ksdev, unit); /* Initalize store in #1 */
++
++ /*
++ * Top of main test thread
++ */
++
++ /* Allocate test data blocks (input, intermediate, output) */
++ syminp = kmalloc(256, GFP_KERNEL | GFP_DMA);
++ symint = kmalloc(256, GFP_KERNEL | GFP_DMA);
++ symout = kmalloc(256, GFP_KERNEL | GFP_DMA);
++ if ((syminp == NULL) || (symint == NULL) || (symout == NULL)) {
++ rtnval = -ENOMEM;
++ dev_err(ksdev, "caam_sm_test: can't get test data buffers\n");
++ goto freemem;
++ }
++
++ /* Allocate storage for 3 black keys: encapsulated 8, 16, 32 */
++ black_key_des = kmalloc(16, GFP_KERNEL | GFP_DMA); /* padded to 16... */
++ black_key_aes128 = kmalloc(16, GFP_KERNEL | GFP_DMA);
++ black_key_aes256 = kmalloc(16, GFP_KERNEL | GFP_DMA);
++ if ((black_key_des == NULL) || (black_key_aes128 == NULL) ||
++ (black_key_aes256 == NULL)) {
++ rtnval = -ENOMEM;
++ dev_err(ksdev, "caam_sm_test: can't black key buffers\n");
++ goto freemem;
++ }
++
++ clear_key_des = kmalloc(8, GFP_KERNEL | GFP_DMA);
++ clear_key_aes128 = kmalloc(16, GFP_KERNEL | GFP_DMA);
++ clear_key_aes256 = kmalloc(32, GFP_KERNEL | GFP_DMA);
++ if ((clear_key_des == NULL) || (clear_key_aes128 == NULL) ||
++ (clear_key_aes256 == NULL)) {
++ rtnval = -ENOMEM;
++ dev_err(ksdev, "caam_sm_test: can't get clear key buffers\n");
++ goto freemem;
++ }
++
++ /* Allocate storage for job descriptor */
++ jdesc = kmalloc(8 * sizeof(u32), GFP_KERNEL | GFP_DMA);
++ if (jdesc == NULL) {
++ rtnval = -ENOMEM;
++ dev_err(ksdev, "caam_sm_test: can't get descriptor buffers\n");
++ goto freemem;
++ }
++
++#ifdef SM_TEST_DETAIL
++ dev_info(ksdev, "caam_sm_test: all buffers allocated\n");
++#endif
++
++ /* Load up input data block, clear outputs */
++ memcpy(syminp, symdata, 256);
++ memset(symint, 0, 256);
++ memset(symout, 0, 256);
++#ifdef SM_TEST_DETAIL
++ dev_info(ksdev, "0x%02x 0x%02x 0x%02x 0x%02x " \
++ "0x%02x 0x%02x 0x%02x 0x%02x\n",
++ syminp[0], syminp[1], syminp[2], syminp[3],
++ syminp[4], syminp[5], syminp[6], syminp[7]);
++ dev_info(ksdev, "0x%02x 0x%02x 0x%02x 0x%02x " \
++ "0x%02x 0x%02x 0x%02x 0x%02x\n",
++ symint[0], symint[1], symint[2], symint[3],
++ symint[4], symint[5], symint[6], symint[7]);
++ dev_info(ksdev, "0x%02x 0x%02x 0x%02x 0x%02x " \
++ "0x%02x 0x%02x 0x%02x 0x%02x\n",
++ symout[0], symout[1], symout[2], symout[3],
++ symout[4], symout[5], symout[6], symout[7]);
++
++ dev_info(ksdev, "caam_sm_test: data buffers initialized\n");
++#endif
++
++ /* Load up clear keys */
++ memcpy(clear_key_des, symkey, 8);
++ memcpy(clear_key_aes128, symkey, 16);
++ memcpy(clear_key_aes256, symkey, 32);
++
++#ifdef SM_TEST_DETAIL
++ dev_info(ksdev, "caam_sm_test: all clear keys loaded\n");
++#endif
++
++ /*
++ * Place clear keys in keystore.
++ * All the interesting stuff happens here.
++ */
++ /* 8 bit DES key */
++ stat = sm_keystore_slot_alloc(ksdev, unit, 8, &keyslot_des);
++ if (stat)
++ goto freemem;
++#ifdef SM_TEST_DETAIL
++ dev_info(ksdev, "caam_sm_test: 8 byte key slot in %d\n", keyslot_des);
++#endif
++ stat = sm_keystore_slot_load(ksdev, unit, keyslot_des, clear_key_des,
++ 8);
++ if (stat) {
++#ifdef SM_TEST_DETAIL
++ dev_info(ksdev, "caam_sm_test: can't load 8 byte key in %d\n",
++ keyslot_des);
++#endif
++ sm_keystore_slot_dealloc(ksdev, unit, keyslot_des);
++ goto freemem;
++ }
++
++ /* 16 bit AES key */
++ stat = sm_keystore_slot_alloc(ksdev, unit, 16, &keyslot_aes128);
++ if (stat) {
++ sm_keystore_slot_dealloc(ksdev, unit, keyslot_des);
++ goto freemem;
++ }
++#ifdef SM_TEST_DETAIL
++ dev_info(ksdev, "caam_sm_test: 16 byte key slot in %d\n",
++ keyslot_aes128);
++#endif
++ stat = sm_keystore_slot_load(ksdev, unit, keyslot_aes128,
++ clear_key_aes128, 16);
++ if (stat) {
++#ifdef SM_TEST_DETAIL
++ dev_info(ksdev, "caam_sm_test: can't load 16 byte key in %d\n",
++ keyslot_aes128);
++#endif
++ sm_keystore_slot_dealloc(ksdev, unit, keyslot_aes128);
++ sm_keystore_slot_dealloc(ksdev, unit, keyslot_des);
++ goto freemem;
++ }
++
++ /* 32 bit AES key */
++ stat = sm_keystore_slot_alloc(ksdev, unit, 32, &keyslot_aes256);
++ if (stat) {
++ sm_keystore_slot_dealloc(ksdev, unit, keyslot_aes128);
++ sm_keystore_slot_dealloc(ksdev, unit, keyslot_des);
++ goto freemem;
++ }
++#ifdef SM_TEST_DETAIL
++ dev_info(ksdev, "caam_sm_test: 32 byte key slot in %d\n",
++ keyslot_aes256);
++#endif
++ stat = sm_keystore_slot_load(ksdev, unit, keyslot_aes256,
++ clear_key_aes256, 32);
++ if (stat) {
++#ifdef SM_TEST_DETAIL
++ dev_info(ksdev, "caam_sm_test: can't load 32 byte key in %d\n",
++ keyslot_aes128);
++#endif
++ sm_keystore_slot_dealloc(ksdev, unit, keyslot_aes256);
++ sm_keystore_slot_dealloc(ksdev, unit, keyslot_aes128);
++ sm_keystore_slot_dealloc(ksdev, unit, keyslot_des);
++ goto freemem;
++ }
++
++ /* Encapsulate all keys as SM blobs */
++ stat = sm_keystore_slot_encapsulate(ksdev, unit, keyslot_des,
++ keyslot_des, 8, skeymod, 8);
++ if (stat) {
++ dev_info(ksdev, "caam_sm_test: can't encapsulate DES key\n");
++ goto freekeys;
++ }
++
++ stat = sm_keystore_slot_encapsulate(ksdev, unit, keyslot_aes128,
++ keyslot_aes128, 16, skeymod, 8);
++ if (stat) {
++ dev_info(ksdev, "caam_sm_test: can't encapsulate AES128 key\n");
++ goto freekeys;
++ }
++
++ stat = sm_keystore_slot_encapsulate(ksdev, unit, keyslot_aes256,
++ keyslot_aes256, 32, skeymod, 8);
++ if (stat) {
++ dev_info(ksdev, "caam_sm_test: can't encapsulate AES256 key\n");
++ goto freekeys;
++ }
++
++ /* Now decapsulate as black key blobs */
++ stat = sm_keystore_slot_decapsulate(ksdev, unit, keyslot_des,
++ keyslot_des, 8, skeymod, 8);
++ if (stat) {
++ dev_info(ksdev, "caam_sm_test: can't decapsulate DES key\n");
++ goto freekeys;
++ }
++
++ stat = sm_keystore_slot_decapsulate(ksdev, unit, keyslot_aes128,
++ keyslot_aes128, 16, skeymod, 8);
++ if (stat) {
++ dev_info(ksdev, "caam_sm_test: can't decapsulate AES128 key\n");
++ goto freekeys;
++ }
++
++ stat = sm_keystore_slot_decapsulate(ksdev, unit, keyslot_aes256,
++ keyslot_aes256, 32, skeymod, 8);
++ if (stat) {
++ dev_info(ksdev, "caam_sm_test: can't decapsulate AES128 key\n");
++ goto freekeys;
++ }
++
++ /* Extract 8/16/32 byte black keys */
++ sm_keystore_slot_read(ksdev, unit, keyslot_des, 8, black_key_des);
++ sm_keystore_slot_read(ksdev, unit, keyslot_aes128, 16,
++ black_key_aes128);
++ sm_keystore_slot_read(ksdev, unit, keyslot_aes256, 32,
++ black_key_aes256);
++
++#ifdef SM_TEST_DETAIL
++ dev_info(ksdev, "caam_sm_test: all black keys extracted\n");
++#endif
++
++ /* DES encrypt using 8 byte black key */
++ black_key_des_dma = dma_map_single(ksdev, black_key_des, 8,
++ DMA_TO_DEVICE);
++ dma_sync_single_for_device(ksdev, black_key_des_dma, 8, DMA_TO_DEVICE);
++ syminp_dma = dma_map_single(ksdev, syminp, 256, DMA_TO_DEVICE);
++ dma_sync_single_for_device(ksdev, syminp_dma, 256, DMA_TO_DEVICE);
++ symint_dma = dma_map_single(ksdev, symint, 256, DMA_FROM_DEVICE);
++
++ jdescsz = mk_job_desc(jdesc, black_key_des_dma, 8, syminp_dma,
++ symint_dma, 256,
++ OP_ALG_ENCRYPT | OP_ALG_ALGSEL_DES, 0);
++
++#ifdef SM_TEST_DETAIL
++ dev_info(ksdev, "jobdesc:\n");
++ dev_info(ksdev, "0x%08x\n", jdesc[0]);
++ dev_info(ksdev, "0x%08x\n", jdesc[1]);
++ dev_info(ksdev, "0x%08x\n", jdesc[2]);
++ dev_info(ksdev, "0x%08x\n", jdesc[3]);
++ dev_info(ksdev, "0x%08x\n", jdesc[4]);
++ dev_info(ksdev, "0x%08x\n", jdesc[5]);
++ dev_info(ksdev, "0x%08x\n", jdesc[6]);
++ dev_info(ksdev, "0x%08x\n", jdesc[7]);
++#endif
++
++ jstat = exec_test_job(ksdev, jdesc);
++
++ dma_sync_single_for_cpu(ksdev, symint_dma, 256, DMA_FROM_DEVICE);
++ dma_unmap_single(ksdev, symint_dma, 256, DMA_FROM_DEVICE);
++ dma_unmap_single(ksdev, syminp_dma, 256, DMA_TO_DEVICE);
++ dma_unmap_single(ksdev, black_key_des_dma, 8, DMA_TO_DEVICE);
++
++#ifdef SM_TEST_DETAIL
++ dev_info(ksdev, "input block:\n");
++ dev_info(ksdev, "0x%02x 0x%02x 0x%02x 0x%02x " \
++ "0x%02x 0x%02x 0x%02x 0x%02x\n",
++ syminp[0], syminp[1], syminp[2], syminp[3],
++ syminp[4], syminp[5], syminp[6], syminp[7]);
++ dev_info(ksdev, "0x%02x 0x%02x 0x%02x 0x%02x " \
++ "0x%02x 0x%02x 0x%02x 0x%02x\n",
++ syminp[8], syminp[9], syminp[10], syminp[11],
++ syminp[12], syminp[13], syminp[14], syminp[15]);
++ dev_info(ksdev, "intermediate block:\n");
++ dev_info(ksdev, "0x%02x 0x%02x 0x%02x 0x%02x " \
++ "0x%02x 0x%02x 0x%02x 0x%02x\n",
++ symint[0], symint[1], symint[2], symint[3],
++ symint[4], symint[5], symint[6], symint[7]);
++ dev_info(ksdev, "0x%02x 0x%02x 0x%02x 0x%02x " \
++ "0x%02x 0x%02x 0x%02x 0x%02x\n",
++ symint[8], symint[9], symint[10], symint[11],
++ symint[12], symint[13], symint[14], symint[15]);
++ dev_info(ksdev, "caam_sm_test: encrypt cycle with 8 byte key\n");
++#endif
++
++ /* DES decrypt using 8 byte clear key */
++ clear_key_des_dma = dma_map_single(ksdev, clear_key_des, 8,
++ DMA_TO_DEVICE);
++ dma_sync_single_for_device(ksdev, clear_key_des_dma, 8, DMA_TO_DEVICE);
++ symint_dma = dma_map_single(ksdev, symint, 256, DMA_TO_DEVICE);
++ dma_sync_single_for_device(ksdev, symint_dma, 256, DMA_TO_DEVICE);
++ symout_dma = dma_map_single(ksdev, symout, 256, DMA_FROM_DEVICE);
++
++ jdescsz = mk_job_desc(jdesc, clear_key_des_dma, 8, symint_dma,
++ symout_dma, 256,
++ OP_ALG_DECRYPT | OP_ALG_ALGSEL_DES, 0);
++
++#ifdef SM_TEST_DETAIL
++ dev_info(ksdev, "jobdesc:\n");
++ dev_info(ksdev, "0x%08x\n", jdesc[0]);
++ dev_info(ksdev, "0x%08x\n", jdesc[1]);
++ dev_info(ksdev, "0x%08x\n", jdesc[2]);
++ dev_info(ksdev, "0x%08x\n", jdesc[3]);
++ dev_info(ksdev, "0x%08x\n", jdesc[4]);
++ dev_info(ksdev, "0x%08x\n", jdesc[5]);
++ dev_info(ksdev, "0x%08x\n", jdesc[6]);
++ dev_info(ksdev, "0x%08x\n", jdesc[7]);
++#endif
++
++ jstat = exec_test_job(ksdev, jdesc);
++
++ dma_sync_single_for_cpu(ksdev, symout_dma, 256, DMA_FROM_DEVICE);
++ dma_unmap_single(ksdev, symout_dma, 256, DMA_FROM_DEVICE);
++ dma_unmap_single(ksdev, symint_dma, 256, DMA_TO_DEVICE);
++ dma_unmap_single(ksdev, clear_key_des_dma, 8, DMA_TO_DEVICE);
++
++#ifdef SM_TEST_DETAIL
++ dev_info(ksdev, "intermediate block:\n");
++ dev_info(ksdev, "0x%02x 0x%02x 0x%02x 0x%02x " \
++ "0x%02x 0x%02x 0x%02x 0x%02x\n",
++ symint[0], symint[1], symint[2], symint[3],
++ symint[4], symint[5], symint[6], symint[7]);
++ dev_info(ksdev, "0x%02x 0x%02x 0x%02x 0x%02x " \
++ "0x%02x 0x%02x 0x%02x 0x%02x\n",
++ symint[8], symint[9], symint[10], symint[11],
++ symint[12], symint[13], symint[14], symint[15]);
++ dev_info(ksdev, "decrypted block:\n");
++ dev_info(ksdev, "0x%02x 0x%02x 0x%02x 0x%02x " \
++ "0x%02x 0x%02x 0x%02x 0x%02x\n",
++ symout[0], symout[1], symout[2], symout[3],
++ symout[4], symout[5], symout[6], symout[7]);
++ dev_info(ksdev, "0x%02x 0x%02x 0x%02x 0x%02x " \
++ "0x%02x 0x%02x 0x%02x 0x%02x\n",
++ symout[8], symout[9], symout[10], symout[11],
++ symout[12], symout[13], symout[14], symout[15]);
++ dev_info(ksdev, "caam_sm_test: decrypt cycle with 8 byte key\n");
++#endif
++
++ /* Check result */
++ if (memcmp(symout, syminp, 256)) {
++ dev_info(ksdev, "caam_sm_test: 8-byte key test mismatch\n");
++ rtnval = -1;
++ goto freekeys;
++ } else
++ dev_info(ksdev, "caam_sm_test: 8-byte key test match OK\n");
++
++ /* AES-128 encrypt using 16 byte black key */
++ black_key_aes128_dma = dma_map_single(ksdev, black_key_aes128, 16,
++ DMA_TO_DEVICE);
++ dma_sync_single_for_device(ksdev, black_key_aes128_dma, 16,
++ DMA_TO_DEVICE);
++ syminp_dma = dma_map_single(ksdev, syminp, 256, DMA_TO_DEVICE);
++ dma_sync_single_for_device(ksdev, syminp_dma, 256, DMA_TO_DEVICE);
++ symint_dma = dma_map_single(ksdev, symint, 256, DMA_FROM_DEVICE);
++
++ jdescsz = mk_job_desc(jdesc, black_key_aes128_dma, 16, syminp_dma,
++ symint_dma, 256,
++ OP_ALG_ENCRYPT | OP_ALG_ALGSEL_AES, 0);
++
++#ifdef SM_TEST_DETAIL
++ dev_info(ksdev, "jobdesc:\n");
++ dev_info(ksdev, "0x%08x\n", jdesc[0]);
++ dev_info(ksdev, "0x%08x\n", jdesc[1]);
++ dev_info(ksdev, "0x%08x\n", jdesc[2]);
++ dev_info(ksdev, "0x%08x\n", jdesc[3]);
++ dev_info(ksdev, "0x%08x\n", jdesc[4]);
++ dev_info(ksdev, "0x%08x\n", jdesc[5]);
++ dev_info(ksdev, "0x%08x\n", jdesc[6]);
++ dev_info(ksdev, "0x%08x\n", jdesc[7]);
++#endif
++
++ jstat = exec_test_job(ksdev, jdesc);
++
++ dma_sync_single_for_cpu(ksdev, symint_dma, 256, DMA_FROM_DEVICE);
++ dma_unmap_single(ksdev, symint_dma, 256, DMA_FROM_DEVICE);
++ dma_unmap_single(ksdev, syminp_dma, 256, DMA_TO_DEVICE);
++ dma_unmap_single(ksdev, black_key_aes128_dma, 16, DMA_TO_DEVICE);
++
++#ifdef SM_TEST_DETAIL
++ dev_info(ksdev, "input block:\n");
++ dev_info(ksdev, "0x%02x 0x%02x 0x%02x 0x%02x " \
++ "0x%02x 0x%02x 0x%02x 0x%02x\n",
++ syminp[0], syminp[1], syminp[2], syminp[3],
++ syminp[4], syminp[5], syminp[6], syminp[7]);
++ dev_info(ksdev, "0x%02x 0x%02x 0x%02x 0x%02x " \
++ "0x%02x 0x%02x 0x%02x 0x%02x\n",
++ syminp[8], syminp[9], syminp[10], syminp[11],
++ syminp[12], syminp[13], syminp[14], syminp[15]);
++ dev_info(ksdev, "intermediate block:\n");
++ dev_info(ksdev, "0x%02x 0x%02x 0x%02x 0x%02x " \
++ "0x%02x 0x%02x 0x%02x 0x%02x\n",
++ symint[0], symint[1], symint[2], symint[3],
++ symint[4], symint[5], symint[6], symint[7]);
++ dev_info(ksdev, "0x%02x 0x%02x 0x%02x 0x%02x " \
++ "0x%02x 0x%02x 0x%02x 0x%02x\n",
++ symint[8], symint[9], symint[10], symint[11],
++ symint[12], symint[13], symint[14], symint[15]);
++ dev_info(ksdev, "caam_sm_test: encrypt cycle with 16 byte key\n");
++#endif
++
++ /* AES-128 decrypt using 16 byte clear key */
++ clear_key_aes128_dma = dma_map_single(ksdev, clear_key_aes128, 16,
++ DMA_TO_DEVICE);
++ dma_sync_single_for_device(ksdev, clear_key_aes128_dma, 16,
++ DMA_TO_DEVICE);
++ symint_dma = dma_map_single(ksdev, symint, 256, DMA_TO_DEVICE);
++ dma_sync_single_for_device(ksdev, symint_dma, 256, DMA_TO_DEVICE);
++ symout_dma = dma_map_single(ksdev, symout, 256, DMA_FROM_DEVICE);
++
++ jdescsz = mk_job_desc(jdesc, clear_key_aes128_dma, 16, symint_dma,
++ symout_dma, 256,
++ OP_ALG_DECRYPT | OP_ALG_ALGSEL_AES, 0);
++
++#ifdef SM_TEST_DETAIL
++ dev_info(ksdev, "jobdesc:\n");
++ dev_info(ksdev, "0x%08x\n", jdesc[0]);
++ dev_info(ksdev, "0x%08x\n", jdesc[1]);
++ dev_info(ksdev, "0x%08x\n", jdesc[2]);
++ dev_info(ksdev, "0x%08x\n", jdesc[3]);
++ dev_info(ksdev, "0x%08x\n", jdesc[4]);
++ dev_info(ksdev, "0x%08x\n", jdesc[5]);
++ dev_info(ksdev, "0x%08x\n", jdesc[6]);
++ dev_info(ksdev, "0x%08x\n", jdesc[7]);
++#endif
++ jstat = exec_test_job(ksdev, jdesc);
++
++ dma_sync_single_for_cpu(ksdev, symout_dma, 256, DMA_FROM_DEVICE);
++ dma_unmap_single(ksdev, symout_dma, 256, DMA_FROM_DEVICE);
++ dma_unmap_single(ksdev, symint_dma, 256, DMA_TO_DEVICE);
++ dma_unmap_single(ksdev, clear_key_aes128_dma, 16, DMA_TO_DEVICE);
++
++#ifdef SM_TEST_DETAIL
++ dev_info(ksdev, "intermediate block:\n");
++ dev_info(ksdev, "0x%02x 0x%02x 0x%02x 0x%02x " \
++ "0x%02x 0x%02x 0x%02x 0x%02x\n",
++ symint[0], symint[1], symint[2], symint[3],
++ symint[4], symint[5], symint[6], symint[7]);
++ dev_info(ksdev, "0x%02x 0x%02x 0x%02x 0x%02x " \
++ "0x%02x 0x%02x 0x%02x 0x%02x\n",
++ symint[8], symint[9], symint[10], symint[11],
++ symint[12], symint[13], symint[14], symint[15]);
++ dev_info(ksdev, "decrypted block:\n");
++ dev_info(ksdev, "0x%02x 0x%02x 0x%02x 0x%02x " \
++ "0x%02x 0x%02x 0x%02x 0x%02x\n",
++ symout[0], symout[1], symout[2], symout[3],
++ symout[4], symout[5], symout[6], symout[7]);
++ dev_info(ksdev, "0x%02x 0x%02x 0x%02x 0x%02x " \
++ "0x%02x 0x%02x 0x%02x 0x%02x\n",
++ symout[8], symout[9], symout[10], symout[11],
++ symout[12], symout[13], symout[14], symout[15]);
++ dev_info(ksdev, "caam_sm_test: decrypt cycle with 16 byte key\n");
++#endif
++
++ /* Check result */
++ if (memcmp(symout, syminp, 256)) {
++ dev_info(ksdev, "caam_sm_test: 16-byte key test mismatch\n");
++ rtnval = -1;
++ goto freekeys;
++ } else
++ dev_info(ksdev, "caam_sm_test: 16-byte key test match OK\n");
++
++ /* AES-256 encrypt using 32 byte black key */
++ black_key_aes256_dma = dma_map_single(ksdev, black_key_aes256, 32,
++ DMA_TO_DEVICE);
++ dma_sync_single_for_device(ksdev, black_key_aes256_dma, 32,
++ DMA_TO_DEVICE);
++ syminp_dma = dma_map_single(ksdev, syminp, 256, DMA_TO_DEVICE);
++ dma_sync_single_for_device(ksdev, syminp_dma, 256, DMA_TO_DEVICE);
++ symint_dma = dma_map_single(ksdev, symint, 256, DMA_FROM_DEVICE);
++
++ jdescsz = mk_job_desc(jdesc, black_key_aes256_dma, 32, syminp_dma,
++ symint_dma, 256,
++ OP_ALG_ENCRYPT | OP_ALG_ALGSEL_AES, 0);
++
++#ifdef SM_TEST_DETAIL
++ dev_info(ksdev, "jobdesc:\n");
++ dev_info(ksdev, "0x%08x\n", jdesc[0]);
++ dev_info(ksdev, "0x%08x\n", jdesc[1]);
++ dev_info(ksdev, "0x%08x\n", jdesc[2]);
++ dev_info(ksdev, "0x%08x\n", jdesc[3]);
++ dev_info(ksdev, "0x%08x\n", jdesc[4]);
++ dev_info(ksdev, "0x%08x\n", jdesc[5]);
++ dev_info(ksdev, "0x%08x\n", jdesc[6]);
++ dev_info(ksdev, "0x%08x\n", jdesc[7]);
++#endif
++
++ jstat = exec_test_job(ksdev, jdesc);
++
++ dma_sync_single_for_cpu(ksdev, symint_dma, 256, DMA_FROM_DEVICE);
++ dma_unmap_single(ksdev, symint_dma, 256, DMA_FROM_DEVICE);
++ dma_unmap_single(ksdev, syminp_dma, 256, DMA_TO_DEVICE);
++ dma_unmap_single(ksdev, black_key_aes256_dma, 32, DMA_TO_DEVICE);
++
++#ifdef SM_TEST_DETAIL
++ dev_info(ksdev, "input block:\n");
++ dev_info(ksdev, "0x%02x 0x%02x 0x%02x 0x%02x " \
++ "0x%02x 0x%02x 0x%02x 0x%02x\n",
++ syminp[0], syminp[1], syminp[2], syminp[3],
++ syminp[4], syminp[5], syminp[6], syminp[7]);
++ dev_info(ksdev, "0x%02x 0x%02x 0x%02x 0x%02x " \
++ "0x%02x 0x%02x 0x%02x 0x%02x\n",
++ syminp[8], syminp[9], syminp[10], syminp[11],
++ syminp[12], syminp[13], syminp[14], syminp[15]);
++ dev_info(ksdev, "intermediate block:\n");
++ dev_info(ksdev, "0x%02x 0x%02x 0x%02x 0x%02x " \
++ "0x%02x 0x%02x 0x%02x 0x%02x\n",
++ symint[0], symint[1], symint[2], symint[3],
++ symint[4], symint[5], symint[6], symint[7]);
++ dev_info(ksdev, "0x%02x 0x%02x 0x%02x 0x%02x " \
++ "0x%02x 0x%02x 0x%02x 0x%02x\n",
++ symint[8], symint[9], symint[10], symint[11],
++ symint[12], symint[13], symint[14], symint[15]);
++ dev_info(ksdev, "caam_sm_test: encrypt cycle with 32 byte key\n");
++#endif
++
++ /* AES-256 decrypt using 32-byte black key */
++ clear_key_aes256_dma = dma_map_single(ksdev, clear_key_aes256, 32,
++ DMA_TO_DEVICE);
++ dma_sync_single_for_device(ksdev, clear_key_aes256_dma, 32,
++ DMA_TO_DEVICE);
++ symint_dma = dma_map_single(ksdev, symint, 256, DMA_TO_DEVICE);
++ dma_sync_single_for_device(ksdev, symint_dma, 256, DMA_TO_DEVICE);
++ symout_dma = dma_map_single(ksdev, symout, 256, DMA_FROM_DEVICE);
++
++ jdescsz = mk_job_desc(jdesc, clear_key_aes256_dma, 32, symint_dma,
++ symout_dma, 256,
++ OP_ALG_DECRYPT | OP_ALG_ALGSEL_AES, 0);
++
++#ifdef SM_TEST_DETAIL
++ dev_info(ksdev, "jobdesc:\n");
++ dev_info(ksdev, "0x%08x\n", jdesc[0]);
++ dev_info(ksdev, "0x%08x\n", jdesc[1]);
++ dev_info(ksdev, "0x%08x\n", jdesc[2]);
++ dev_info(ksdev, "0x%08x\n", jdesc[3]);
++ dev_info(ksdev, "0x%08x\n", jdesc[4]);
++ dev_info(ksdev, "0x%08x\n", jdesc[5]);
++ dev_info(ksdev, "0x%08x\n", jdesc[6]);
++ dev_info(ksdev, "0x%08x\n", jdesc[7]);
++#endif
++
++ jstat = exec_test_job(ksdev, jdesc);
++
++ dma_sync_single_for_cpu(ksdev, symout_dma, 256, DMA_FROM_DEVICE);
++ dma_unmap_single(ksdev, symout_dma, 256, DMA_FROM_DEVICE);
++ dma_unmap_single(ksdev, symint_dma, 256, DMA_TO_DEVICE);
++ dma_unmap_single(ksdev, clear_key_aes256_dma, 32, DMA_TO_DEVICE);
++
++#ifdef SM_TEST_DETAIL
++ dev_info(ksdev, "intermediate block:\n");
++ dev_info(ksdev, "0x%02x 0x%02x 0x%02x 0x%02x " \
++ "0x%02x 0x%02x 0x%02x 0x%02x\n",
++ symint[0], symint[1], symint[2], symint[3],
++ symint[4], symint[5], symint[6], symint[7]);
++ dev_info(ksdev, "0x%02x 0x%02x 0x%02x 0x%02x " \
++ "0x%02x 0x%02x 0x%02x 0x%02x\n",
++ symint[8], symint[9], symint[10], symint[11],
++ symint[12], symint[13], symint[14], symint[15]);
++ dev_info(ksdev, "decrypted block:\n");
++ dev_info(ksdev, "0x%02x 0x%02x 0x%02x 0x%02x " \
++ "0x%02x 0x%02x 0x%02x 0x%02x\n",
++ symout[0], symout[1], symout[2], symout[3],
++ symout[4], symout[5], symout[6], symout[7]);
++ dev_info(ksdev, "0x%02x 0x%02x 0x%02x 0x%02x " \
++ "0x%02x 0x%02x 0x%02x 0x%02x\n",
++ symout[8], symout[9], symout[10], symout[11],
++ symout[12], symout[13], symout[14], symout[15]);
++ dev_info(ksdev, "caam_sm_test: decrypt cycle with 32 byte key\n");
++#endif
++
++ /* Check result */
++ if (memcmp(symout, syminp, 256)) {
++ dev_info(ksdev, "caam_sm_test: 32-byte key test mismatch\n");
++ rtnval = -1;
++ goto freekeys;
++ } else
++ dev_info(ksdev, "caam_sm_test: 32-byte key test match OK\n");
++
++
++ /* Remove 8/16/32 byte keys from keystore */
++freekeys:
++ stat = sm_keystore_slot_dealloc(ksdev, unit, keyslot_des);
++ if (stat)
++ dev_info(ksdev, "caam_sm_test: can't release slot %d\n",
++ keyslot_des);
++
++ stat = sm_keystore_slot_dealloc(ksdev, unit, keyslot_aes128);
++ if (stat)
++ dev_info(ksdev, "caam_sm_test: can't release slot %d\n",
++ keyslot_aes128);
++
++ stat = sm_keystore_slot_dealloc(ksdev, unit, keyslot_aes256);
++ if (stat)
++ dev_info(ksdev, "caam_sm_test: can't release slot %d\n",
++ keyslot_aes256);
++
++
++ /* Free resources */
++freemem:
++#ifdef SM_TEST_DETAIL
++ dev_info(ksdev, "caam_sm_test: cleaning up\n");
++#endif
++ kfree(syminp);
++ kfree(symint);
++ kfree(symout);
++ kfree(clear_key_des);
++ kfree(clear_key_aes128);
++ kfree(clear_key_aes256);
++ kfree(black_key_des);
++ kfree(black_key_aes128);
++ kfree(black_key_aes256);
++ kfree(jdesc);
++
++ /* Disconnect from keystore and leave */
++ sm_release_keystore(ksdev, unit);
++
++ return rtnval;
++}
++EXPORT_SYMBOL(caam_sm_example_init);
++
++void caam_sm_example_shutdown(void)
++{
++ /* unused in present version */
++ struct device_node *dev_node;
++ struct platform_device *pdev;
++
++ /*
++ * Do of_find_compatible_node() then of_find_device_by_node()
++ * once a functional device tree is available
++ */
++ dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
++ if (!dev_node) {
++ dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
++ if (!dev_node)
++ return;
++ }
++
++ pdev = of_find_device_by_node(dev_node);
++ if (!pdev)
++ return;
++
++ of_node_get(dev_node);
++
++}
++
++static int __init caam_sm_test_init(void)
++{
++ struct device_node *dev_node;
++ struct platform_device *pdev;
++
++ /*
++ * Do of_find_compatible_node() then of_find_device_by_node()
++ * once a functional device tree is available
++ */
++ dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
++ if (!dev_node) {
++ dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
++ if (!dev_node)
++ return -ENODEV;
++ }
++
++ pdev = of_find_device_by_node(dev_node);
++ if (!pdev)
++ return -ENODEV;
++
++ of_node_put(dev_node);
++
++ caam_sm_example_init(pdev);
++
++ return 0;
++}
++
++
++/* Module-based initialization needs to wait for dev tree */
++#ifdef CONFIG_OF
++module_init(caam_sm_test_init);
++module_exit(caam_sm_example_shutdown);
++
++MODULE_LICENSE("Dual BSD/GPL");
++MODULE_DESCRIPTION("FSL CAAM Keystore Usage Example");
++MODULE_AUTHOR("Freescale Semiconductor - NMSG/MAD");
++#endif
+diff -Nur linux-3.14.36/drivers/crypto/caam/snvsregs.h linux-openelec/drivers/crypto/caam/snvsregs.h
+--- linux-3.14.36/drivers/crypto/caam/snvsregs.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/crypto/caam/snvsregs.h 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,237 @@
++/*
++ * SNVS hardware register-level view
++ *
++ * Copyright (C) 2013 Freescale Semiconductor, Inc., All Rights Reserved
++ */
++
++#ifndef SNVSREGS_H
++#define SNVSREGS_H
++
++#include <linux/types.h>
++#include <linux/io.h>
++
++/*
++ * SNVS High Power Domain
++ * Includes security violations, HA counter, RTC, alarm
++ */
++struct snvs_hp {
++ u32 lock;
++ u32 cmd;
++ u32 ctl;
++ u32 secvio_int_en; /* Security Violation Interrupt Enable */
++ u32 secvio_int_ctl; /* Security Violation Interrupt Control */
++ u32 status;
++ u32 secvio_status; /* Security Violation Status */
++ u32 ha_counteriv; /* High Assurance Counter IV */
++ u32 ha_counter; /* High Assurance Counter */
++ u32 rtc_msb; /* Real Time Clock/Counter MSB */
++ u32 rtc_lsb; /* Real Time Counter LSB */
++ u32 time_alarm_msb; /* Time Alarm MSB */
++ u32 time_alarm_lsb; /* Time Alarm LSB */
++};
++
++#define HP_LOCK_HAC_LCK 0x00040000
++#define HP_LOCK_HPSICR_LCK 0x00020000
++#define HP_LOCK_HPSVCR_LCK 0x00010000
++#define HP_LOCK_MKEYSEL_LCK 0x00000200
++#define HP_LOCK_TAMPCFG_LCK 0x00000100
++#define HP_LOCK_TAMPFLT_LCK 0x00000080
++#define HP_LOCK_SECVIO_LCK 0x00000040
++#define HP_LOCK_GENP_LCK 0x00000020
++#define HP_LOCK_MONOCTR_LCK 0x00000010
++#define HP_LOCK_CALIB_LCK 0x00000008
++#define HP_LOCK_SRTC_LCK 0x00000004
++#define HP_LOCK_ZMK_RD_LCK 0x00000002
++#define HP_LOCK_ZMK_WT_LCK 0x00000001
++
++#define HP_CMD_NONPRIV_AXS 0x80000000
++#define HP_CMD_HAC_STOP 0x00080000
++#define HP_CMD_HAC_CLEAR 0x00040000
++#define HP_CMD_HAC_LOAD 0x00020000
++#define HP_CMD_HAC_CFG_EN 0x00010000
++#define HP_CMD_SNVS_MSTR_KEY 0x00002000
++#define HP_CMD_PROG_ZMK 0x00001000
++#define HP_CMD_SW_LPSV 0x00000400
++#define HP_CMD_SW_FSV 0x00000200
++#define HP_CMD_SW_SV 0x00000100
++#define HP_CMD_LP_SWR_DIS 0x00000020
++#define HP_CMD_LP_SWR 0x00000010
++#define HP_CMD_SSM_SFNS_DIS 0x00000004
++#define HP_CMD_SSM_ST_DIS 0x00000002
++#define HP_CMD_SMM_ST 0x00000001
++
++#define HP_CTL_TIME_SYNC 0x00010000
++#define HP_CTL_CAL_VAL_SHIFT 10
++#define HP_CTL_CAL_VAL_MASK (0x1f << HP_CTL_CALIB_SHIFT)
++#define HP_CTL_CALIB_EN 0x00000100
++#define HP_CTL_PI_FREQ_SHIFT 4
++#define HP_CTL_PI_FREQ_MASK (0xf << HP_CTL_PI_FREQ_SHIFT)
++#define HP_CTL_PI_EN 0x00000008
++#define HP_CTL_TIMEALARM_EN 0x00000002
++#define HP_CTL_RTC_EN 0x00000001
++
++#define HP_SECVIO_INTEN_EN 0x10000000
++#define HP_SECVIO_INTEN_SRC5 0x00000020
++#define HP_SECVIO_INTEN_SRC4 0x00000010
++#define HP_SECVIO_INTEN_SRC3 0x00000008
++#define HP_SECVIO_INTEN_SRC2 0x00000004
++#define HP_SECVIO_INTEN_SRC1 0x00000002
++#define HP_SECVIO_INTEN_SRC0 0x00000001
++#define HP_SECVIO_INTEN_ALL 0x8000003f
++
++#define HP_SECVIO_ICTL_CFG_SHIFT 30
++#define HP_SECVIO_ICTL_CFG_MASK (0x3 << HP_SECVIO_ICTL_CFG_SHIFT)
++#define HP_SECVIO_ICTL_CFG5_SHIFT 5
++#define HP_SECVIO_ICTL_CFG5_MASK (0x3 << HP_SECVIO_ICTL_CFG5_SHIFT)
++#define HP_SECVIO_ICTL_CFG_DISABLE 0
++#define HP_SECVIO_ICTL_CFG_NONFATAL 1
++#define HP_SECVIO_ICTL_CFG_FATAL 2
++#define HP_SECVIO_ICTL_CFG4_FATAL 0x00000010
++#define HP_SECVIO_ICTL_CFG3_FATAL 0x00000008
++#define HP_SECVIO_ICTL_CFG2_FATAL 0x00000004
++#define HP_SECVIO_ICTL_CFG1_FATAL 0x00000002
++#define HP_SECVIO_ICTL_CFG0_FATAL 0x00000001
++
++#define HP_STATUS_ZMK_ZERO 0x80000000
++#define HP_STATUS_OTPMK_ZERO 0x08000000
++#define HP_STATUS_OTPMK_SYN_SHIFT 16
++#define HP_STATUS_OTPMK_SYN_MASK (0x1ff << HP_STATUS_OTPMK_SYN_SHIFT)
++#define HP_STATUS_SSM_ST_SHIFT 8
++#define HP_STATUS_SSM_ST_MASK (0xf << HP_STATUS_SSM_ST_SHIFT)
++#define HP_STATUS_SSM_ST_INIT 0
++#define HP_STATUS_SSM_ST_HARDFAIL 1
++#define HP_STATUS_SSM_ST_SOFTFAIL 3
++#define HP_STATUS_SSM_ST_INITINT 8
++#define HP_STATUS_SSM_ST_CHECK 9
++#define HP_STATUS_SSM_ST_NONSECURE 11
++#define HP_STATUS_SSM_ST_TRUSTED 13
++#define HP_STATUS_SSM_ST_SECURE 15
++
++#define HP_SECVIOST_ZMK_ECC_FAIL 0x08000000 /* write to clear */
++#define HP_SECVIOST_ZMK_SYN_SHIFT 16
++#define HP_SECVIOST_ZMK_SYN_MASK (0x1ff << HP_SECVIOST_ZMK_SYN_SHIFT)
++#define HP_SECVIOST_SECVIO5 0x00000020
++#define HP_SECVIOST_SECVIO4 0x00000010
++#define HP_SECVIOST_SECVIO3 0x00000008
++#define HP_SECVIOST_SECVIO2 0x00000004
++#define HP_SECVIOST_SECVIO1 0x00000002
++#define HP_SECVIOST_SECVIO0 0x00000001
++#define HP_SECVIOST_SECVIOMASK 0x0000003f
++
++/*
++ * SNVS Low Power Domain
++ * Includes glitch detector, SRTC, alarm, monotonic counter, ZMK
++ */
++struct snvs_lp {
++ u32 lock;
++ u32 ctl;
++ u32 mstr_key_ctl; /* Master Key Control */
++ u32 secvio_ctl; /* Security Violation Control */
++ u32 tamper_filt_cfg; /* Tamper Glitch Filters Configuration */
++ u32 tamper_det_cfg; /* Tamper Detectors Configuration */
++ u32 status;
++ u32 srtc_msb; /* Secure Real Time Clock/Counter MSB */
++ u32 srtc_lsb; /* Secure Real Time Clock/Counter LSB */
++ u32 time_alarm; /* Time Alarm */
++ u32 smc_msb; /* Secure Monotonic Counter MSB */
++ u32 smc_lsb; /* Secure Monotonic Counter LSB */
++ u32 pwr_glitch_det; /* Power Glitch Detector */
++ u32 gen_purpose;
++ u32 zmk[8]; /* Zeroizable Master Key */
++};
++
++#define LP_LOCK_MKEYSEL_LCK 0x00000200
++#define LP_LOCK_TAMPDET_LCK 0x00000100
++#define LP_LOCK_TAMPFLT_LCK 0x00000080
++#define LP_LOCK_SECVIO_LCK 0x00000040
++#define LP_LOCK_GENP_LCK 0x00000020
++#define LP_LOCK_MONOCTR_LCK 0x00000010
++#define LP_LOCK_CALIB_LCK 0x00000008
++#define LP_LOCK_SRTC_LCK 0x00000004
++#define LP_LOCK_ZMK_RD_LCK 0x00000002
++#define LP_LOCK_ZMK_WT_LCK 0x00000001
++
++#define LP_CTL_CAL_VAL_SHIFT 10
++#define LP_CTL_CAL_VAL_MASK (0x1f << LP_CTL_CAL_VAL_SHIFT)
++#define LP_CTL_CALIB_EN 0x00000100
++#define LP_CTL_SRTC_INVAL_EN 0x00000010
++#define LP_CTL_WAKE_INT_EN 0x00000008
++#define LP_CTL_MONOCTR_EN 0x00000004
++#define LP_CTL_TIMEALARM_EN 0x00000002
++#define LP_CTL_SRTC_EN 0x00000001
++
++#define LP_MKEYCTL_ZMKECC_SHIFT 8
++#define LP_MKEYCTL_ZMKECC_MASK (0xff << LP_MKEYCTL_ZMKECC_SHIFT)
++#define LP_MKEYCTL_ZMKECC_EN 0x00000010
++#define LP_MKEYCTL_ZMKECC_VAL 0x00000008
++#define LP_MKEYCTL_ZMKECC_PROG 0x00000004
++#define LP_MKEYCTL_MKSEL_SHIFT 0
++#define LP_MKEYCTL_MKSEL_MASK (3 << LP_MKEYCTL_MKSEL_SHIFT)
++#define LP_MKEYCTL_MK_OTP 0
++#define LP_MKEYCTL_MK_ZMK 2
++#define LP_MKEYCTL_MK_COMB 3
++
++#define LP_SECVIO_CTL_SRC5 0x20
++#define LP_SECVIO_CTL_SRC4 0x10
++#define LP_SECVIO_CTL_SRC3 0x08
++#define LP_SECVIO_CTL_SRC2 0x04
++#define LP_SECVIO_CTL_SRC1 0x02
++#define LP_SECVIO_CTL_SRC0 0x01
++
++#define LP_TAMPFILT_EXT2_EN 0x80000000
++#define LP_TAMPFILT_EXT2_SHIFT 24
++#define LP_TAMPFILT_EXT2_MASK (0x1f << LP_TAMPFILT_EXT2_SHIFT)
++#define LP_TAMPFILT_EXT1_EN 0x00800000
++#define LP_TAMPFILT_EXT1_SHIFT 16
++#define LP_TAMPFILT_EXT1_MASK (0x1f << LP_TAMPFILT_EXT1_SHIFT)
++#define LP_TAMPFILT_WM_EN 0x00000080
++#define LP_TAMPFILT_WM_SHIFT 0
++#define LP_TAMPFILT_WM_MASK (0x1f << LP_TAMPFILT_WM_SHIFT)
++
++#define LP_TAMPDET_OSC_BPS 0x10000000
++#define LP_TAMPDET_VRC_SHIFT 24
++#define LP_TAMPDET_VRC_MASK (3 << LP_TAMPFILT_VRC_SHIFT)
++#define LP_TAMPDET_HTDC_SHIFT 20
++#define LP_TAMPDET_HTDC_MASK (3 << LP_TAMPFILT_HTDC_SHIFT)
++#define LP_TAMPDET_LTDC_SHIFT 16
++#define LP_TAMPDET_LTDC_MASK (3 << LP_TAMPFILT_LTDC_SHIFT)
++#define LP_TAMPDET_POR_OBS 0x00008000
++#define LP_TAMPDET_PFD_OBS 0x00004000
++#define LP_TAMPDET_ET2_EN 0x00000400
++#define LP_TAMPDET_ET1_EN 0x00000200
++#define LP_TAMPDET_WMT2_EN 0x00000100
++#define LP_TAMPDET_WMT1_EN 0x00000080
++#define LP_TAMPDET_VT_EN 0x00000040
++#define LP_TAMPDET_TT_EN 0x00000020
++#define LP_TAMPDET_CT_EN 0x00000010
++#define LP_TAMPDET_MCR_EN 0x00000004
++#define LP_TAMPDET_SRTCR_EN 0x00000002
++
++#define LP_STATUS_SECURE
++#define LP_STATUS_NONSECURE
++#define LP_STATUS_SCANEXIT 0x00100000 /* all write 1 clear here on */
++#define LP_STATUS_EXT_SECVIO 0x00010000
++#define LP_STATUS_ET2 0x00000400
++#define LP_STATUS_ET1 0x00000200
++#define LP_STATUS_WMT2 0x00000100
++#define LP_STATUS_WMT1 0x00000080
++#define LP_STATUS_VTD 0x00000040
++#define LP_STATUS_TTD 0x00000020
++#define LP_STATUS_CTD 0x00000010
++#define LP_STATUS_PGD 0x00000008
++#define LP_STATUS_MCR 0x00000004
++#define LP_STATUS_SRTCR 0x00000002
++#define LP_STATUS_LPTA 0x00000001
++
++/* Full SNVS register page, including version/options */
++struct snvs_full {
++ struct snvs_hp hp;
++ struct snvs_lp lp;
++ u32 rsvd[731]; /* deadspace 0x08c-0xbf7 */
++
++ /* Version / Revision / Option ID space - end of register page */
++ u32 vid; /* 0xbf8 HP Version ID (VID 1) */
++ u32 opt_rev; /* 0xbfc HP Options / Revision (VID 2) */
++};
++
++#endif /* SNVSREGS_H */
+diff -Nur linux-3.14.36/drivers/dma/imx-sdma.c linux-openelec/drivers/dma/imx-sdma.c
+--- linux-3.14.36/drivers/dma/imx-sdma.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/dma/imx-sdma.c 2015-05-06 12:05:42.000000000 -0500
+@@ -29,6 +29,7 @@
+ #include <linux/semaphore.h>
+ #include <linux/spinlock.h>
+ #include <linux/device.h>
++#include <linux/genalloc.h>
+ #include <linux/dma-mapping.h>
+ #include <linux/firmware.h>
+ #include <linux/slab.h>
+@@ -232,6 +233,14 @@
+
+ struct sdma_engine;
+
++enum sdma_mode {
++ SDMA_MODE_INVALID = 0,
++ SDMA_MODE_LOOP,
++ SDMA_MODE_NORMAL,
++ SDMA_MODE_P2P,
++ SDMA_MODE_NO_BD,
++};
++
+ /**
+ * struct sdma_channel - housekeeping for a SDMA channel
+ *
+@@ -244,6 +253,7 @@
+ * @word_size peripheral access size
+ * @buf_tail ID of the buffer that was processed
+ * @num_bd max NUM_BD. number of descriptors currently handling
++ * @bd_iram flag indicating the memory location of buffer descriptor
+ */
+ struct sdma_channel {
+ struct sdma_engine *sdma;
+@@ -255,14 +265,19 @@
+ enum dma_slave_buswidth word_size;
+ unsigned int buf_tail;
+ unsigned int num_bd;
++ unsigned int period_len;
+ struct sdma_buffer_descriptor *bd;
+ dma_addr_t bd_phys;
++ bool bd_iram;
+ unsigned int pc_from_device, pc_to_device;
+- unsigned long flags;
+- dma_addr_t per_address;
++ unsigned int device_to_device;
++ unsigned int other_script;
++ enum sdma_mode mode;
++ dma_addr_t per_address, per_address2;
+ unsigned long event_mask[2];
+ unsigned long watermark_level;
+ u32 shp_addr, per_addr;
++ u32 data_addr1, data_addr2;
+ struct dma_chan chan;
+ spinlock_t lock;
+ struct dma_async_tx_descriptor desc;
+@@ -272,8 +287,6 @@
+ struct tasklet_struct tasklet;
+ };
+
+-#define IMX_DMA_SG_LOOP BIT(0)
+-
+ #define MAX_DMA_CHANNELS 32
+ #define MXC_SDMA_DEFAULT_PRIORITY 1
+ #define MXC_SDMA_MIN_PRIORITY 1
+@@ -325,6 +338,7 @@
+ spinlock_t channel_0_lock;
+ u32 script_number;
+ struct sdma_script_start_addrs *script_addrs;
++ struct gen_pool *iram_pool;
+ const struct sdma_driver_data *drvdata;
+ };
+
+@@ -540,12 +554,14 @@
+ dma_addr_t buf_phys;
+ int ret;
+ unsigned long flags;
++ bool use_iram = true;
+
+- buf_virt = dma_alloc_coherent(NULL,
+- size,
+- &buf_phys, GFP_KERNEL);
++ buf_virt = gen_pool_dma_alloc(sdma->iram_pool, size, &buf_phys);
+ if (!buf_virt) {
+- return -ENOMEM;
++ use_iram = false;
++ buf_virt = dma_alloc_coherent(NULL, size, &buf_phys, GFP_KERNEL);
++ if (!buf_virt)
++ return -ENOMEM;
+ }
+
+ spin_lock_irqsave(&sdma->channel_0_lock, flags);
+@@ -562,7 +578,10 @@
+
+ spin_unlock_irqrestore(&sdma->channel_0_lock, flags);
+
+- dma_free_coherent(NULL, size, buf_virt, buf_phys);
++ if (use_iram)
++ gen_pool_free(sdma->iram_pool, (unsigned long)buf_virt, size);
++ else
++ dma_free_coherent(NULL, size, buf_virt, buf_phys);
+
+ return ret;
+ }
+@@ -593,6 +612,12 @@
+
+ static void sdma_handle_channel_loop(struct sdma_channel *sdmac)
+ {
++ if (sdmac->desc.callback)
++ sdmac->desc.callback(sdmac->desc.callback_param);
++}
++
++static void sdma_update_channel_loop(struct sdma_channel *sdmac)
++{
+ struct sdma_buffer_descriptor *bd;
+
+ /*
+@@ -607,15 +632,10 @@
+
+ if (bd->mode.status & BD_RROR)
+ sdmac->status = DMA_ERROR;
+- else
+- sdmac->status = DMA_IN_PROGRESS;
+
+ bd->mode.status |= BD_DONE;
+ sdmac->buf_tail++;
+ sdmac->buf_tail %= sdmac->num_bd;
+-
+- if (sdmac->desc.callback)
+- sdmac->desc.callback(sdmac->desc.callback_param);
+ }
+ }
+
+@@ -647,14 +667,31 @@
+ sdmac->desc.callback(sdmac->desc.callback_param);
+ }
+
++static void sdma_handle_other_intr(struct sdma_channel *sdmac)
++{
++ if (sdmac->desc.callback)
++ sdmac->desc.callback(sdmac->desc.callback_param);
++}
++
+ static void sdma_tasklet(unsigned long data)
+ {
+ struct sdma_channel *sdmac = (struct sdma_channel *) data;
++ struct sdma_engine *sdma = sdmac->sdma;
+
+- if (sdmac->flags & IMX_DMA_SG_LOOP)
++ switch (sdmac->mode) {
++ case SDMA_MODE_LOOP:
+ sdma_handle_channel_loop(sdmac);
+- else
++ break;
++ case SDMA_MODE_NORMAL:
+ mxc_sdma_handle_channel_normal(sdmac);
++ break;
++ case SDMA_MODE_NO_BD:
++ sdma_handle_other_intr(sdmac);
++ break;
++ default:
++ dev_err(sdma->dev, "invalid SDMA MODE!\n");
++ break;
++ }
+ }
+
+ static irqreturn_t sdma_int_handler(int irq, void *dev_id)
+@@ -671,6 +708,9 @@
+ int channel = fls(stat) - 1;
+ struct sdma_channel *sdmac = &sdma->channel[channel];
+
++ if (sdmac->mode & SDMA_MODE_LOOP)
++ sdma_update_channel_loop(sdmac);
++
+ tasklet_schedule(&sdmac->tasklet);
+
+ __clear_bit(channel, &stat);
+@@ -692,9 +732,12 @@
+ * two peripherals or memory-to-memory transfers
+ */
+ int per_2_per = 0, emi_2_emi = 0;
++ int other = 0;
+
+ sdmac->pc_from_device = 0;
+ sdmac->pc_to_device = 0;
++ sdmac->device_to_device = 0;
++ sdmac->other_script = 0;
+
+ switch (peripheral_type) {
+ case IMX_DMATYPE_MEMORY:
+@@ -740,8 +783,8 @@
+ emi_2_per = sdma->script_addrs->mcu_2_shp_addr;
+ break;
+ case IMX_DMATYPE_ASRC:
+- per_2_emi = sdma->script_addrs->asrc_2_mcu_addr;
+- emi_2_per = sdma->script_addrs->asrc_2_mcu_addr;
++ per_2_emi = sdma->script_addrs->shp_2_mcu_addr;
++ emi_2_per = sdma->script_addrs->mcu_2_shp_addr;
+ per_2_per = sdma->script_addrs->per_2_per_addr;
+ break;
+ case IMX_DMATYPE_MSHC:
+@@ -758,12 +801,17 @@
+ case IMX_DMATYPE_IPU_MEMORY:
+ emi_2_per = sdma->script_addrs->ext_mem_2_ipu_addr;
+ break;
++ case IMX_DMATYPE_HDMI:
++ other = sdma->script_addrs->hdmi_dma_addr;
++ break;
+ default:
+ break;
+ }
+
+ sdmac->pc_from_device = per_2_emi;
+ sdmac->pc_to_device = emi_2_per;
++ sdmac->device_to_device = per_2_per;
++ sdmac->other_script = other;
+ }
+
+ static int sdma_load_context(struct sdma_channel *sdmac)
+@@ -776,11 +824,14 @@
+ int ret;
+ unsigned long flags;
+
+- if (sdmac->direction == DMA_DEV_TO_MEM) {
++ if (sdmac->direction == DMA_DEV_TO_MEM)
+ load_address = sdmac->pc_from_device;
+- } else {
++ else if (sdmac->direction == DMA_DEV_TO_DEV)
++ load_address = sdmac->device_to_device;
++ else if (sdmac->direction == DMA_MEM_TO_DEV)
+ load_address = sdmac->pc_to_device;
+- }
++ else
++ load_address = sdmac->other_script;
+
+ if (load_address < 0)
+ return load_address;
+@@ -800,11 +851,16 @@
+ /* Send by context the event mask,base address for peripheral
+ * and watermark level
+ */
+- context->gReg[0] = sdmac->event_mask[1];
+- context->gReg[1] = sdmac->event_mask[0];
+- context->gReg[2] = sdmac->per_addr;
+- context->gReg[6] = sdmac->shp_addr;
+- context->gReg[7] = sdmac->watermark_level;
++ if (sdmac->peripheral_type == IMX_DMATYPE_HDMI) {
++ context->gReg[4] = sdmac->data_addr1;
++ context->gReg[6] = sdmac->data_addr2;
++ } else {
++ context->gReg[0] = sdmac->event_mask[1];
++ context->gReg[1] = sdmac->event_mask[0];
++ context->gReg[2] = sdmac->per_addr;
++ context->gReg[6] = sdmac->shp_addr;
++ context->gReg[7] = sdmac->watermark_level;
++ }
+
+ bd0->mode.command = C0_SETDM;
+ bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD;
+@@ -829,6 +885,7 @@
+
+ static int sdma_config_channel(struct sdma_channel *sdmac)
+ {
++ struct imx_dma_data *data = sdmac->chan.private;
+ int ret;
+
+ sdma_disable_channel(sdmac);
+@@ -837,12 +894,19 @@
+ sdmac->event_mask[1] = 0;
+ sdmac->shp_addr = 0;
+ sdmac->per_addr = 0;
++ sdmac->data_addr1 = 0;
++ sdmac->data_addr2 = 0;
+
+ if (sdmac->event_id0) {
+ if (sdmac->event_id0 >= sdmac->sdma->drvdata->num_events)
+ return -EINVAL;
+ sdma_event_enable(sdmac, sdmac->event_id0);
+ }
++ if (sdmac->event_id1) {
++ if (sdmac->event_id1 >= sdmac->sdma->drvdata->num_events)
++ return -EINVAL;
++ sdma_event_enable(sdmac, sdmac->event_id1);
++ }
+
+ switch (sdmac->peripheral_type) {
+ case IMX_DMATYPE_DSP:
+@@ -862,19 +926,75 @@
+ (sdmac->peripheral_type != IMX_DMATYPE_DSP)) {
+ /* Handle multiple event channels differently */
+ if (sdmac->event_id1) {
+- sdmac->event_mask[1] = BIT(sdmac->event_id1 % 32);
+- if (sdmac->event_id1 > 31)
+- __set_bit(31, &sdmac->watermark_level);
+- sdmac->event_mask[0] = BIT(sdmac->event_id0 % 32);
+- if (sdmac->event_id0 > 31)
+- __set_bit(30, &sdmac->watermark_level);
++ if (sdmac->event_id0 > 31) {
++ sdmac->event_mask[0] |= 0;
++ __set_bit(28, &sdmac->watermark_level);
++ sdmac->event_mask[1] |=
++ BIT(sdmac->event_id0 % 32);
++ } else {
++ sdmac->event_mask[1] |= 0;
++ sdmac->event_mask[0] |=
++ BIT(sdmac->event_id0 % 32);
++ }
++ if (sdmac->event_id1 > 31) {
++ sdmac->event_mask[0] |= 0;
++ __set_bit(29, &sdmac->watermark_level);
++ sdmac->event_mask[1] |=
++ BIT(sdmac->event_id1 % 32);
++ } else {
++ sdmac->event_mask[1] |= 0;
++ sdmac->event_mask[0] |=
++ BIT(sdmac->event_id1 % 32);
++ }
++ /* BIT 11:
++ * 1 : Source on SPBA
++ * 0 : Source on AIPS
++ */
++ __set_bit(11, &sdmac->watermark_level);
++ /* BIT 12:
++ * 1 : Destination on SPBA
++ * 0 : Destination on AIPS
++ */
++ __set_bit(12, &sdmac->watermark_level);
++ __set_bit(31, &sdmac->watermark_level);
++ /* BIT 31:
++ * 1 : Amount of samples to be transferred is
++ * unknown and script will keep on transferring
++ * samples as long as both events are detected
++ * and script must be manually stopped by the
++ * application.
++ * 0 : The amount of samples to be is equal to
++ * the count field of mode word
++ * */
++ __set_bit(25, &sdmac->watermark_level);
++ __clear_bit(24, &sdmac->watermark_level);
+ } else {
+- __set_bit(sdmac->event_id0, sdmac->event_mask);
++ if (sdmac->event_id0 > 31) {
++ sdmac->event_mask[0] = 0;
++ sdmac->event_mask[1] |=
++ BIT(sdmac->event_id0 % 32);
++ } else {
++ sdmac->event_mask[0] |=
++ BIT(sdmac->event_id0 % 32);
++ sdmac->event_mask[1] = 0;
++ }
+ }
+ /* Watermark Level */
+ sdmac->watermark_level |= sdmac->watermark_level;
+ /* Address */
+- sdmac->shp_addr = sdmac->per_address;
++ if (sdmac->direction == DMA_DEV_TO_DEV) {
++ sdmac->shp_addr = sdmac->per_address2;
++ sdmac->per_addr = sdmac->per_address;
++ } else if (sdmac->direction == DMA_TRANS_NONE) {
++ if (sdmac->peripheral_type != IMX_DMATYPE_HDMI ||
++ !data->data_addr1 || !data->data_addr2)
++ return -EINVAL;
++ sdmac->data_addr1 = *(u32 *)data->data_addr1;
++ sdmac->data_addr2 = *(u32 *)data->data_addr2;
++ sdmac->watermark_level = 0;
++ } else {
++ sdmac->shp_addr = sdmac->per_address;
++ }
+ } else {
+ sdmac->watermark_level = 0; /* FIXME: M3_BASE_ADDRESS */
+ }
+@@ -906,10 +1026,15 @@
+ int channel = sdmac->channel;
+ int ret = -EBUSY;
+
+- sdmac->bd = dma_alloc_coherent(NULL, PAGE_SIZE, &sdmac->bd_phys, GFP_KERNEL);
++ sdmac->bd_iram = true;
++ sdmac->bd = gen_pool_dma_alloc(sdma->iram_pool, PAGE_SIZE, &sdmac->bd_phys);
+ if (!sdmac->bd) {
+- ret = -ENOMEM;
+- goto out;
++ sdmac->bd_iram = false;
++ sdmac->bd = dma_alloc_coherent(NULL, PAGE_SIZE, &sdmac->bd_phys, GFP_KERNEL);
++ if (!sdmac->bd) {
++ ret = -ENOMEM;
++ goto out;
++ }
+ }
+
+ memset(sdmac->bd, 0, PAGE_SIZE);
+@@ -967,7 +1092,8 @@
+ }
+
+ sdmac->peripheral_type = data->peripheral_type;
+- sdmac->event_id0 = data->dma_request;
++ sdmac->event_id0 = data->dma_request0;
++ sdmac->event_id1 = data->dma_request1;
+
+ clk_enable(sdmac->sdma->clk_ipg);
+ clk_enable(sdmac->sdma->clk_ahb);
+@@ -985,6 +1111,9 @@
+ /* txd.flags will be overwritten in prep funcs */
+ sdmac->desc.flags = DMA_CTRL_ACK;
+
++ /* Set SDMA channel mode to unvalid to avoid misconfig */
++ sdmac->mode = SDMA_MODE_INVALID;
++
+ return 0;
+ }
+
+@@ -1005,7 +1134,10 @@
+
+ sdma_set_channel_priority(sdmac, 0);
+
+- dma_free_coherent(NULL, PAGE_SIZE, sdmac->bd, sdmac->bd_phys);
++ if (sdmac->bd_iram)
++ gen_pool_free(sdma->iram_pool, (unsigned long)sdmac->bd, PAGE_SIZE);
++ else
++ dma_free_coherent(NULL, PAGE_SIZE, sdmac->bd, sdmac->bd_phys);
+
+ clk_disable(sdma->clk_ipg);
+ clk_disable(sdma->clk_ahb);
+@@ -1026,7 +1158,7 @@
+ return NULL;
+ sdmac->status = DMA_IN_PROGRESS;
+
+- sdmac->flags = 0;
++ sdmac->mode = SDMA_MODE_NORMAL;
+
+ sdmac->buf_tail = 0;
+
+@@ -1119,9 +1251,9 @@
+ {
+ struct sdma_channel *sdmac = to_sdma_chan(chan);
+ struct sdma_engine *sdma = sdmac->sdma;
+- int num_periods = buf_len / period_len;
+ int channel = sdmac->channel;
+ int ret, i = 0, buf = 0;
++ int num_periods;
+
+ dev_dbg(sdma->dev, "%s channel: %d\n", __func__, channel);
+
+@@ -1131,13 +1263,35 @@
+ sdmac->status = DMA_IN_PROGRESS;
+
+ sdmac->buf_tail = 0;
++ sdmac->period_len = period_len;
+
+- sdmac->flags |= IMX_DMA_SG_LOOP;
+ sdmac->direction = direction;
++
++ switch (sdmac->direction) {
++ case DMA_DEV_TO_DEV:
++ sdmac->mode = SDMA_MODE_P2P;
++ break;
++ case DMA_TRANS_NONE:
++ sdmac->mode = SDMA_MODE_NO_BD;
++ break;
++ case DMA_MEM_TO_DEV:
++ case DMA_DEV_TO_MEM:
++ sdmac->mode = SDMA_MODE_LOOP;
++ break;
++ default:
++ dev_err(sdma->dev, "invalid SDMA direction %d\n", direction);
++ return NULL;
++ }
++
+ ret = sdma_load_context(sdmac);
+ if (ret)
+ goto err_out;
+
++ if (period_len)
++ num_periods = buf_len / period_len;
++ else
++ return &sdmac->desc;
++
+ if (num_periods > NUM_BD) {
+ dev_err(sdma->dev, "SDMA channel %d: maximum number of sg exceeded: %d > %d\n",
+ channel, num_periods, NUM_BD);
+@@ -1202,18 +1356,31 @@
+ sdma_disable_channel(sdmac);
+ return 0;
+ case DMA_SLAVE_CONFIG:
+- if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) {
++ if (dmaengine_cfg->direction == DMA_DEV_TO_DEV) {
++ sdmac->per_address = dmaengine_cfg->src_addr;
++ sdmac->per_address2 = dmaengine_cfg->dst_addr;
++ sdmac->watermark_level = 0;
++ sdmac->watermark_level |=
++ dmaengine_cfg->src_maxburst;
++ sdmac->watermark_level |=
++ dmaengine_cfg->dst_maxburst << 16;
++ sdmac->word_size = dmaengine_cfg->dst_addr_width;
++ } else if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) {
+ sdmac->per_address = dmaengine_cfg->src_addr;
+ sdmac->watermark_level = dmaengine_cfg->src_maxburst *
+ dmaengine_cfg->src_addr_width;
+ sdmac->word_size = dmaengine_cfg->src_addr_width;
+- } else {
++ } else if (dmaengine_cfg->direction == DMA_MEM_TO_DEV) {
+ sdmac->per_address = dmaengine_cfg->dst_addr;
+ sdmac->watermark_level = dmaengine_cfg->dst_maxburst *
+ dmaengine_cfg->dst_addr_width;
+ sdmac->word_size = dmaengine_cfg->dst_addr_width;
+ }
+ sdmac->direction = dmaengine_cfg->direction;
++ if (dmaengine_cfg->dma_request0)
++ sdmac->event_id0 = dmaengine_cfg->dma_request0;
++ if (dmaengine_cfg->dma_request1)
++ sdmac->event_id1 = dmaengine_cfg->dma_request1;
+ return sdma_config_channel(sdmac);
+ default:
+ return -ENOSYS;
+@@ -1227,9 +1394,15 @@
+ struct dma_tx_state *txstate)
+ {
+ struct sdma_channel *sdmac = to_sdma_chan(chan);
++ u32 residue;
++
++ if (sdmac->mode & SDMA_MODE_LOOP)
++ residue = (sdmac->num_bd - sdmac->buf_tail) * sdmac->period_len;
++ else
++ residue = sdmac->chn_count - sdmac->chn_real_count;
+
+ dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie,
+- sdmac->chn_count - sdmac->chn_real_count);
++ residue);
+
+ return sdmac->status;
+ }
+@@ -1285,7 +1458,10 @@
+ goto err_firmware;
+ switch (header->version_major) {
+ case 1:
+- sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1;
++ if (header->version_minor > 0)
++ sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V2;
++ else
++ sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1;
+ break;
+ case 2:
+ sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V2;
+@@ -1331,7 +1507,7 @@
+
+ static int __init sdma_init(struct sdma_engine *sdma)
+ {
+- int i, ret;
++ int i, ret, ccbsize;
+ dma_addr_t ccb_phys;
+
+ clk_enable(sdma->clk_ipg);
+@@ -1340,14 +1516,17 @@
+ /* Be sure SDMA has not started yet */
+ writel_relaxed(0, sdma->regs + SDMA_H_C0PTR);
+
+- sdma->channel_control = dma_alloc_coherent(NULL,
+- MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control) +
+- sizeof(struct sdma_context_data),
+- &ccb_phys, GFP_KERNEL);
++ ccbsize = MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control)
++ + sizeof(struct sdma_context_data);
+
++ sdma->channel_control = gen_pool_dma_alloc(sdma->iram_pool, ccbsize, &ccb_phys);
+ if (!sdma->channel_control) {
+- ret = -ENOMEM;
+- goto err_dma_alloc;
++ sdma->channel_control = dma_alloc_coherent(NULL, ccbsize,
++ &ccb_phys, GFP_KERNEL);
++ if (!sdma->channel_control) {
++ ret = -ENOMEM;
++ goto err_dma_alloc;
++ }
+ }
+
+ sdma->context = (void *)sdma->channel_control +
+@@ -1422,9 +1601,10 @@
+ if (dma_spec->args_count != 3)
+ return NULL;
+
+- data.dma_request = dma_spec->args[0];
++ data.dma_request0 = dma_spec->args[0];
+ data.peripheral_type = dma_spec->args[1];
+ data.priority = dma_spec->args[2];
++ data.dma_request1 = 0;
+
+ return dma_request_channel(mask, sdma_filter_fn, &data);
+ }
+@@ -1542,6 +1722,11 @@
+ &sdma->dma_device.channels);
+ }
+
++ if (np)
++ sdma->iram_pool = of_get_named_gen_pool(np, "iram", 0);
++ if (!sdma->iram_pool)
++ dev_warn(&pdev->dev, "no iram assigned, using external mem\n");
++
+ ret = sdma_init(sdma);
+ if (ret)
+ goto err_init;
+diff -Nur linux-3.14.36/drivers/dma/Kconfig linux-openelec/drivers/dma/Kconfig
+--- linux-3.14.36/drivers/dma/Kconfig 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/dma/Kconfig 2015-05-06 12:05:42.000000000 -0500
+@@ -137,6 +137,19 @@
+ To avoid bloating the irq_desc[] array we allocate a sufficient
+ number of IRQ slots and map them dynamically to specific sources.
+
++config MXC_PXP_V2
++ bool "MXC PxP V2 support"
++ depends on ARM
++ select DMA_ENGINE
++ help
++ Support the PxP (Pixel Pipeline) on i.MX6 DualLite and i.MX6 SoloLite.
++ If unsure, select N.
++
++config MXC_PXP_CLIENT_DEVICE
++ bool "MXC PxP Client Device"
++ default y
++ depends on MXC_PXP_V2
++
+ config TXX9_DMAC
+ tristate "Toshiba TXx9 SoC DMA support"
+ depends on MACH_TX49XX || MACH_TX39XX
+diff -Nur linux-3.14.36/drivers/dma/Makefile linux-openelec/drivers/dma/Makefile
+--- linux-3.14.36/drivers/dma/Makefile 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/dma/Makefile 2015-05-06 12:05:42.000000000 -0500
+@@ -18,6 +18,7 @@
+ obj-$(CONFIG_DW_DMAC_CORE) += dw/
+ obj-$(CONFIG_AT_HDMAC) += at_hdmac.o
+ obj-$(CONFIG_MX3_IPU) += ipu/
++obj-$(CONFIG_MXC_PXP_V2) += pxp/
+ obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o
+ obj-$(CONFIG_SH_DMAE_BASE) += sh/
+ obj-$(CONFIG_COH901318) += coh901318.o coh901318_lli.o
+diff -Nur linux-3.14.36/drivers/dma/pxp/Makefile linux-openelec/drivers/dma/pxp/Makefile
+--- linux-3.14.36/drivers/dma/pxp/Makefile 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/dma/pxp/Makefile 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,2 @@
++obj-$(CONFIG_MXC_PXP_V2) += pxp_dma_v2.o
++obj-$(CONFIG_MXC_PXP_CLIENT_DEVICE) += pxp_device.o
+diff -Nur linux-3.14.36/drivers/dma/pxp/pxp_device.c linux-openelec/drivers/dma/pxp/pxp_device.c
+--- linux-3.14.36/drivers/dma/pxp/pxp_device.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/dma/pxp/pxp_device.c 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,765 @@
++/*
++ * Copyright (C) 2010-2014 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ *
++ */
++#include <linux/interrupt.h>
++#include <linux/miscdevice.h>
++#include <linux/platform_device.h>
++#include <linux/fs.h>
++#include <linux/slab.h>
++#include <linux/uaccess.h>
++#include <linux/delay.h>
++#include <linux/dmaengine.h>
++#include <linux/dma-mapping.h>
++#include <linux/sched.h>
++#include <linux/module.h>
++#include <linux/pxp_device.h>
++#include <linux/atomic.h>
++#include <linux/platform_data/dma-imx.h>
++
++#define BUFFER_HASH_ORDER 4
++
++static struct pxp_buffer_hash bufhash;
++static struct pxp_irq_info irq_info[NR_PXP_VIRT_CHANNEL];
++
++static int pxp_ht_create(struct pxp_buffer_hash *hash, int order)
++{
++ unsigned long i;
++ unsigned long table_size;
++
++ table_size = 1U << order;
++
++ hash->order = order;
++ hash->hash_table = kmalloc(sizeof(*hash->hash_table) * table_size, GFP_KERNEL);
++
++ if (!hash->hash_table) {
++ pr_err("%s: Out of memory for hash table\n", __func__);
++ return -ENOMEM;
++ }
++
++ for (i = 0; i < table_size; i++)
++ INIT_HLIST_HEAD(&hash->hash_table[i]);
++
++ return 0;
++}
++
++static int pxp_ht_insert_item(struct pxp_buffer_hash *hash,
++ struct pxp_buf_obj *new)
++{
++ unsigned long hashkey;
++ struct hlist_head *h_list;
++
++ hashkey = hash_long(new->offset >> PAGE_SHIFT, hash->order);
++ h_list = &hash->hash_table[hashkey];
++
++ spin_lock(&hash->hash_lock);
++ hlist_add_head_rcu(&new->item, h_list);
++ spin_unlock(&hash->hash_lock);
++
++ return 0;
++}
++
++static int pxp_ht_remove_item(struct pxp_buffer_hash *hash,
++ struct pxp_buf_obj *obj)
++{
++ spin_lock(&hash->hash_lock);
++ hlist_del_init_rcu(&obj->item);
++ spin_unlock(&hash->hash_lock);
++ return 0;
++}
++
++static struct hlist_node *pxp_ht_find_key(struct pxp_buffer_hash *hash,
++ unsigned long key)
++{
++ struct pxp_buf_obj *entry;
++ struct hlist_head *h_list;
++ unsigned long hashkey;
++
++ hashkey = hash_long(key, hash->order);
++ h_list = &hash->hash_table[hashkey];
++
++ hlist_for_each_entry_rcu(entry, h_list, item) {
++ if (entry->offset >> PAGE_SHIFT == key)
++ return &entry->item;
++ }
++
++ return NULL;
++}
++
++static void pxp_ht_destroy(struct pxp_buffer_hash *hash)
++{
++ kfree(hash->hash_table);
++ hash->hash_table = NULL;
++}
++
++static int pxp_buffer_handle_create(struct pxp_file *file_priv,
++ struct pxp_buf_obj *obj,
++ uint32_t *handlep)
++{
++ int ret;
++
++ idr_preload(GFP_KERNEL);
++ spin_lock(&file_priv->buffer_lock);
++
++ ret = idr_alloc(&file_priv->buffer_idr, obj, 1, 0, GFP_NOWAIT);
++
++ spin_unlock(&file_priv->buffer_lock);
++ idr_preload_end();
++
++ if (ret < 0)
++ return ret;
++
++ *handlep = ret;
++
++ return 0;
++}
++
++static struct pxp_buf_obj *
++pxp_buffer_object_lookup(struct pxp_file *file_priv,
++ uint32_t handle)
++{
++ struct pxp_buf_obj *obj;
++
++ spin_lock(&file_priv->buffer_lock);
++
++ obj = idr_find(&file_priv->buffer_idr, handle);
++ if (!obj) {
++ spin_unlock(&file_priv->buffer_lock);
++ return NULL;
++ }
++
++ spin_unlock(&file_priv->buffer_lock);
++
++ return obj;
++}
++
++static int pxp_buffer_handle_delete(struct pxp_file *file_priv,
++ uint32_t handle)
++{
++ struct pxp_buf_obj *obj;
++
++ spin_lock(&file_priv->buffer_lock);
++
++ obj = idr_find(&file_priv->buffer_idr, handle);
++ if (!obj) {
++ spin_unlock(&file_priv->buffer_lock);
++ return -EINVAL;
++ }
++
++ idr_remove(&file_priv->buffer_idr, handle);
++ spin_unlock(&file_priv->buffer_lock);
++
++ return 0;
++}
++
++static int pxp_channel_handle_create(struct pxp_file *file_priv,
++ struct pxp_chan_obj *obj,
++ uint32_t *handlep)
++{
++ int ret;
++
++ idr_preload(GFP_KERNEL);
++ spin_lock(&file_priv->channel_lock);
++
++ ret = idr_alloc(&file_priv->channel_idr, obj, 0, 0, GFP_NOWAIT);
++
++ spin_unlock(&file_priv->channel_lock);
++ idr_preload_end();
++
++ if (ret < 0)
++ return ret;
++
++ *handlep = ret;
++
++ return 0;
++}
++
++static struct pxp_chan_obj *
++pxp_channel_object_lookup(struct pxp_file *file_priv,
++ uint32_t handle)
++{
++ struct pxp_chan_obj *obj;
++
++ spin_lock(&file_priv->channel_lock);
++
++ obj = idr_find(&file_priv->channel_idr, handle);
++ if (!obj) {
++ spin_unlock(&file_priv->channel_lock);
++ return NULL;
++ }
++
++ spin_unlock(&file_priv->channel_lock);
++
++ return obj;
++}
++
++static int pxp_channel_handle_delete(struct pxp_file *file_priv,
++ uint32_t handle)
++{
++ struct pxp_chan_obj *obj;
++
++ spin_lock(&file_priv->channel_lock);
++
++ obj = idr_find(&file_priv->channel_idr, handle);
++ if (!obj) {
++ spin_unlock(&file_priv->channel_lock);
++ return -EINVAL;
++ }
++
++ idr_remove(&file_priv->channel_idr, handle);
++ spin_unlock(&file_priv->channel_lock);
++
++ return 0;
++}
++
++static int pxp_alloc_dma_buffer(struct pxp_buf_obj *obj)
++{
++ obj->virtual = dma_alloc_coherent(NULL, PAGE_ALIGN(obj->size),
++ (dma_addr_t *) (&obj->offset),
++ GFP_DMA | GFP_KERNEL);
++ pr_debug("[ALLOC] mem alloc phys_addr = 0x%lx\n", obj->offset);
++
++ if (obj->virtual == NULL) {
++ printk(KERN_ERR "Physical memory allocation error!\n");
++ return -1;
++ }
++
++ return 0;
++}
++
++static void pxp_free_dma_buffer(struct pxp_buf_obj *obj)
++{
++ if (obj->virtual != NULL) {
++ dma_free_coherent(0, PAGE_ALIGN(obj->size),
++ obj->virtual, (dma_addr_t)obj->offset);
++ }
++}
++
++static int
++pxp_buffer_object_free(int id, void *ptr, void *data)
++{
++ struct pxp_file *file_priv = data;
++ struct pxp_buf_obj *obj = ptr;
++ int ret;
++
++ ret = pxp_buffer_handle_delete(file_priv, obj->handle);
++ if (ret < 0)
++ return ret;
++
++ pxp_ht_remove_item(&bufhash, obj);
++ pxp_free_dma_buffer(obj);
++ kfree(obj);
++
++ return 0;
++}
++
++static int
++pxp_channel_object_free(int id, void *ptr, void *data)
++{
++ struct pxp_file *file_priv = data;
++ struct pxp_chan_obj *obj = ptr;
++ int chan_id;
++
++ chan_id = obj->chan->chan_id;
++ wait_event(irq_info[chan_id].waitq,
++ atomic_read(&irq_info[chan_id].irq_pending) == 0);
++
++ pxp_channel_handle_delete(file_priv, obj->handle);
++ dma_release_channel(obj->chan);
++ kfree(obj);
++
++ return 0;
++}
++
++static void pxp_free_buffers(struct pxp_file *file_priv)
++{
++ idr_for_each(&file_priv->buffer_idr,
++ &pxp_buffer_object_free, file_priv);
++ idr_destroy(&file_priv->buffer_idr);
++}
++
++static void pxp_free_channels(struct pxp_file *file_priv)
++{
++ idr_for_each(&file_priv->channel_idr,
++ &pxp_channel_object_free, file_priv);
++ idr_destroy(&file_priv->channel_idr);
++}
++
++/* Callback function triggered after PxP receives an EOF interrupt */
++static void pxp_dma_done(void *arg)
++{
++ struct pxp_tx_desc *tx_desc = to_tx_desc(arg);
++ struct dma_chan *chan = tx_desc->txd.chan;
++ struct pxp_channel *pxp_chan = to_pxp_channel(chan);
++ int chan_id = pxp_chan->dma_chan.chan_id;
++
++ pr_debug("DMA Done ISR, chan_id %d\n", chan_id);
++
++ atomic_dec(&irq_info[chan_id].irq_pending);
++ irq_info[chan_id].hist_status = tx_desc->hist_status;
++
++ wake_up(&(irq_info[chan_id].waitq));
++}
++
++static int pxp_ioc_config_chan(struct pxp_file *priv, unsigned long arg)
++{
++ struct scatterlist sg[3];
++ struct pxp_tx_desc *desc;
++ struct dma_async_tx_descriptor *txd;
++ struct pxp_config_data pxp_conf;
++ dma_cookie_t cookie;
++ int handle, chan_id;
++ int i, length, ret;
++ struct dma_chan *chan;
++ struct pxp_chan_obj *obj;
++
++ ret = copy_from_user(&pxp_conf,
++ (struct pxp_config_data *)arg,
++ sizeof(struct pxp_config_data));
++ if (ret)
++ return -EFAULT;
++
++ handle = pxp_conf.handle;
++ obj = pxp_channel_object_lookup(priv, handle);
++ if (!obj)
++ return -EINVAL;
++ chan = obj->chan;
++ chan_id = chan->chan_id;
++
++ sg_init_table(sg, 3);
++
++ txd = chan->device->device_prep_slave_sg(chan,
++ sg, 3,
++ DMA_TO_DEVICE,
++ DMA_PREP_INTERRUPT,
++ NULL);
++ if (!txd) {
++ pr_err("Error preparing a DMA transaction descriptor.\n");
++ return -EIO;
++ }
++
++ txd->callback_param = txd;
++ txd->callback = pxp_dma_done;
++
++ desc = to_tx_desc(txd);
++
++ length = desc->len;
++ for (i = 0; i < length; i++) {
++ if (i == 0) { /* S0 */
++ memcpy(&desc->proc_data,
++ &pxp_conf.proc_data,
++ sizeof(struct pxp_proc_data));
++ memcpy(&desc->layer_param.s0_param,
++ &pxp_conf.s0_param,
++ sizeof(struct pxp_layer_param));
++ } else if (i == 1) { /* Output */
++ memcpy(&desc->layer_param.out_param,
++ &pxp_conf.out_param,
++ sizeof(struct pxp_layer_param));
++ } else {
++ /* OverLay */
++ memcpy(&desc->layer_param.ol_param,
++ &pxp_conf.ol_param,
++ sizeof(struct pxp_layer_param));
++ }
++
++ desc = desc->next;
++ }
++
++ cookie = txd->tx_submit(txd);
++ if (cookie < 0) {
++ pr_err("Error tx_submit\n");
++ return -EIO;
++ }
++
++ atomic_inc(&irq_info[chan_id].irq_pending);
++
++ return 0;
++}
++
++static int pxp_device_open(struct inode *inode, struct file *filp)
++{
++ struct pxp_file *priv;
++
++ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
++
++ if (!priv)
++ return -ENOMEM;
++
++ filp->private_data = priv;
++ priv->filp = filp;
++
++ idr_init(&priv->buffer_idr);
++ spin_lock_init(&priv->buffer_lock);
++
++ idr_init(&priv->channel_idr);
++ spin_lock_init(&priv->channel_lock);
++
++ return 0;
++}
++
++static int pxp_device_release(struct inode *inode, struct file *filp)
++{
++ struct pxp_file *priv = filp->private_data;
++
++ if (priv) {
++ pxp_free_channels(priv);
++ pxp_free_buffers(priv);
++ kfree(priv);
++ filp->private_data = NULL;
++ }
++
++ return 0;
++}
++
++static int pxp_device_mmap(struct file *file, struct vm_area_struct *vma)
++{
++ int request_size;
++ struct hlist_node *node;
++ struct pxp_buf_obj *obj;
++
++ request_size = vma->vm_end - vma->vm_start;
++
++ pr_debug("start=0x%x, pgoff=0x%x, size=0x%x\n",
++ (unsigned int)(vma->vm_start), (unsigned int)(vma->vm_pgoff),
++ request_size);
++
++ node = pxp_ht_find_key(&bufhash, vma->vm_pgoff);
++ if (!node)
++ return -EINVAL;
++
++ obj = list_entry(node, struct pxp_buf_obj, item);
++ if (obj->offset + (obj->size >> PAGE_SHIFT) <
++ (vma->vm_pgoff + vma_pages(vma)))
++ return -ENOMEM;
++
++ switch (obj->mem_type) {
++ case MEMORY_TYPE_UNCACHED:
++ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
++ break;
++ case MEMORY_TYPE_WC:
++ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
++ break;
++ case MEMORY_TYPE_CACHED:
++ break;
++ default:
++ pr_err("%s: invalid memory type!\n", __func__);
++ return -EINVAL;
++ }
++
++ return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
++ request_size, vma->vm_page_prot) ? -EAGAIN : 0;
++}
++
++static bool chan_filter(struct dma_chan *chan, void *arg)
++{
++ if (imx_dma_is_pxp(chan))
++ return true;
++ else
++ return false;
++}
++
++static long pxp_device_ioctl(struct file *filp,
++ unsigned int cmd, unsigned long arg)
++{
++ int ret = 0;
++ struct pxp_file *file_priv = filp->private_data;
++
++ switch (cmd) {
++ case PXP_IOC_GET_CHAN:
++ {
++ int ret;
++ struct dma_chan *chan = NULL;
++ dma_cap_mask_t mask;
++ struct pxp_chan_obj *obj = NULL;
++
++ pr_debug("drv: PXP_IOC_GET_CHAN Line %d\n", __LINE__);
++
++ dma_cap_zero(mask);
++ dma_cap_set(DMA_SLAVE, mask);
++ dma_cap_set(DMA_PRIVATE, mask);
++
++ chan = dma_request_channel(mask, chan_filter, NULL);
++ if (!chan) {
++ pr_err("Unsccessfully received channel!\n");
++ return -EBUSY;
++ }
++
++ pr_debug("Successfully received channel."
++ "chan_id %d\n", chan->chan_id);
++
++ obj = kzalloc(sizeof(*obj), GFP_KERNEL);
++ if (!obj) {
++ dma_release_channel(chan);
++ return -ENOMEM;
++ }
++ obj->chan = chan;
++
++ ret = pxp_channel_handle_create(file_priv, obj,
++ &obj->handle);
++ if (ret) {
++ dma_release_channel(chan);
++ kfree(obj);
++ return ret;
++ }
++
++ init_waitqueue_head(&(irq_info[chan->chan_id].waitq));
++ if (put_user(obj->handle, (u32 __user *) arg)) {
++ pxp_channel_handle_delete(file_priv, obj->handle);
++ dma_release_channel(chan);
++ kfree(obj);
++ return -EFAULT;
++ }
++
++ break;
++ }
++ case PXP_IOC_PUT_CHAN:
++ {
++ int handle;
++ struct pxp_chan_obj *obj;
++
++ if (get_user(handle, (u32 __user *) arg))
++ return -EFAULT;
++
++ pr_debug("%d release handle %d\n", __LINE__, handle);
++
++ obj = pxp_channel_object_lookup(file_priv, handle);
++ if (!obj)
++ return -EINVAL;
++
++ pxp_channel_handle_delete(file_priv, obj->handle);
++ dma_release_channel(obj->chan);
++ kfree(obj);
++
++ break;
++ }
++ case PXP_IOC_CONFIG_CHAN:
++ {
++ int ret;
++
++ ret = pxp_ioc_config_chan(file_priv, arg);
++ if (ret)
++ return ret;
++
++ break;
++ }
++ case PXP_IOC_START_CHAN:
++ {
++ int handle;
++ struct pxp_chan_obj *obj = NULL;
++
++ if (get_user(handle, (u32 __user *) arg))
++ return -EFAULT;
++
++ obj = pxp_channel_object_lookup(file_priv, handle);
++ if (!obj)
++ return -EINVAL;
++
++ dma_async_issue_pending(obj->chan);
++
++ break;
++ }
++ case PXP_IOC_GET_PHYMEM:
++ {
++ struct pxp_mem_desc buffer;
++ struct pxp_buf_obj *obj;
++
++ ret = copy_from_user(&buffer,
++ (struct pxp_mem_desc *)arg,
++ sizeof(struct pxp_mem_desc));
++ if (ret)
++ return -EFAULT;
++
++ pr_debug("[ALLOC] mem alloc size = 0x%x\n",
++ buffer.size);
++
++ obj = kzalloc(sizeof(*obj), GFP_KERNEL);
++ if (!obj)
++ return -ENOMEM;
++ obj->size = buffer.size;
++ obj->mem_type = buffer.mtype;
++
++ ret = pxp_alloc_dma_buffer(obj);
++ if (ret == -1) {
++ printk(KERN_ERR
++ "Physical memory allocation error!\n");
++ kfree(obj);
++ return ret;
++ }
++
++ ret = pxp_buffer_handle_create(file_priv, obj, &obj->handle);
++ if (ret) {
++ pxp_free_dma_buffer(obj);
++ kfree(obj);
++ return ret;
++ }
++ buffer.handle = obj->handle;
++ buffer.phys_addr = obj->offset;
++
++ ret = copy_to_user((void __user *)arg, &buffer,
++ sizeof(struct pxp_mem_desc));
++ if (ret) {
++ pxp_buffer_handle_delete(file_priv, buffer.handle);
++ pxp_free_dma_buffer(obj);
++ kfree(obj);
++ return -EFAULT;
++ }
++
++ pxp_ht_insert_item(&bufhash, obj);
++
++ break;
++ }
++ case PXP_IOC_PUT_PHYMEM:
++ {
++ struct pxp_mem_desc pxp_mem;
++ struct pxp_buf_obj *obj;
++
++ ret = copy_from_user(&pxp_mem,
++ (struct pxp_mem_desc *)arg,
++ sizeof(struct pxp_mem_desc));
++ if (ret)
++ return -EACCES;
++
++ obj = pxp_buffer_object_lookup(file_priv, pxp_mem.handle);
++ if (!obj)
++ return -EINVAL;
++
++ ret = pxp_buffer_handle_delete(file_priv, obj->handle);
++ if (ret)
++ return ret;
++
++ pxp_ht_remove_item(&bufhash, obj);
++ pxp_free_dma_buffer(obj);
++ kfree(obj);
++
++ break;
++ }
++ case PXP_IOC_FLUSH_PHYMEM:
++ {
++ int ret;
++ struct pxp_mem_flush flush;
++ struct pxp_buf_obj *obj;
++
++ ret = copy_from_user(&flush,
++ (struct pxp_mem_flush *)arg,
++ sizeof(struct pxp_mem_flush));
++ if (ret)
++ return -EACCES;
++
++ obj = pxp_buffer_object_lookup(file_priv, flush.handle);
++ if (!obj)
++ return -EINVAL;
++
++ switch (flush.type) {
++ case CACHE_CLEAN:
++ dma_sync_single_for_device(NULL, obj->offset,
++ obj->size, DMA_TO_DEVICE);
++ break;
++ case CACHE_INVALIDATE:
++ dma_sync_single_for_device(NULL, obj->offset,
++ obj->size, DMA_FROM_DEVICE);
++ break;
++ case CACHE_FLUSH:
++ dma_sync_single_for_device(NULL, obj->offset,
++ obj->size, DMA_TO_DEVICE);
++ dma_sync_single_for_device(NULL, obj->offset,
++ obj->size, DMA_FROM_DEVICE);
++ break;
++ default:
++ pr_err("%s: invalid cache flush type\n", __func__);
++ return -EINVAL;
++ }
++
++ break;
++ }
++ case PXP_IOC_WAIT4CMPLT:
++ {
++ struct pxp_chan_handle chan_handle;
++ int ret, chan_id, handle;
++ struct pxp_chan_obj *obj = NULL;
++
++ ret = copy_from_user(&chan_handle,
++ (struct pxp_chan_handle *)arg,
++ sizeof(struct pxp_chan_handle));
++ if (ret)
++ return -EFAULT;
++
++ handle = chan_handle.handle;
++ obj = pxp_channel_object_lookup(file_priv, handle);
++ if (!obj)
++ return -EINVAL;
++ chan_id = obj->chan->chan_id;
++
++ ret = wait_event_interruptible
++ (irq_info[chan_id].waitq,
++ (atomic_read(&irq_info[chan_id].irq_pending) == 0));
++ if (ret < 0) {
++ printk(KERN_WARNING
++ "WAIT4CMPLT: signal received.\n");
++ return -ERESTARTSYS;
++ }
++
++ chan_handle.hist_status = irq_info[chan_id].hist_status;
++ ret = copy_to_user((struct pxp_chan_handle *)arg,
++ &chan_handle,
++ sizeof(struct pxp_chan_handle));
++ if (ret)
++ return -EFAULT;
++ break;
++ }
++ default:
++ break;
++ }
++
++ return 0;
++}
++
++static const struct file_operations pxp_device_fops = {
++ .open = pxp_device_open,
++ .release = pxp_device_release,
++ .unlocked_ioctl = pxp_device_ioctl,
++ .mmap = pxp_device_mmap,
++};
++
++static struct miscdevice pxp_device_miscdev = {
++ .minor = MISC_DYNAMIC_MINOR,
++ .name = "pxp_device",
++ .fops = &pxp_device_fops,
++};
++
++int register_pxp_device(void)
++{
++ int ret;
++
++ ret = misc_register(&pxp_device_miscdev);
++ if (ret)
++ return ret;
++
++ ret = pxp_ht_create(&bufhash, BUFFER_HASH_ORDER);
++ if (ret)
++ return ret;
++ spin_lock_init(&(bufhash.hash_lock));
++
++ pr_debug("PxP_Device registered Successfully\n");
++ return 0;
++}
++
++void unregister_pxp_device(void)
++{
++ pxp_ht_destroy(&bufhash);
++ misc_deregister(&pxp_device_miscdev);
++}
+diff -Nur linux-3.14.36/drivers/dma/pxp/pxp_dma_v2.c linux-openelec/drivers/dma/pxp/pxp_dma_v2.c
+--- linux-3.14.36/drivers/dma/pxp/pxp_dma_v2.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/dma/pxp/pxp_dma_v2.c 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,1854 @@
++/*
++ * Copyright (C) 2010-2013 Freescale Semiconductor, Inc.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ *
++ */
++/*
++ * Based on STMP378X PxP driver
++ * Copyright 2008-2009 Embedded Alley Solutions, Inc All Rights Reserved.
++ */
++
++#include <linux/dma-mapping.h>
++#include <linux/init.h>
++#include <linux/interrupt.h>
++#include <linux/io.h>
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/mutex.h>
++#include <linux/platform_device.h>
++#include <linux/slab.h>
++#include <linux/vmalloc.h>
++#include <linux/dmaengine.h>
++#include <linux/pxp_dma.h>
++#include <linux/timer.h>
++#include <linux/clk.h>
++#include <linux/workqueue.h>
++#include <linux/sched.h>
++#include <linux/of.h>
++#include <linux/kthread.h>
++
++#include "regs-pxp_v2.h"
++
++#define PXP_DOWNSCALE_THRESHOLD 0x4000
++
++static LIST_HEAD(head);
++static int timeout_in_ms = 600;
++static unsigned int block_size;
++static struct kmem_cache *tx_desc_cache;
++
++struct pxp_dma {
++ struct dma_device dma;
++};
++
++struct pxps {
++ struct platform_device *pdev;
++ struct clk *clk;
++ void __iomem *base;
++ int irq; /* PXP IRQ to the CPU */
++
++ spinlock_t lock;
++ struct mutex clk_mutex;
++ int clk_stat;
++#define CLK_STAT_OFF 0
++#define CLK_STAT_ON 1
++ int pxp_ongoing;
++ int lut_state;
++
++ struct device *dev;
++ struct pxp_dma pxp_dma;
++ struct pxp_channel channel[NR_PXP_VIRT_CHANNEL];
++ struct work_struct work;
++
++ /* describes most recent processing configuration */
++ struct pxp_config_data pxp_conf_state;
++
++ /* to turn clock off when pxp is inactive */
++ struct timer_list clk_timer;
++
++ /* for pxp config dispatch asynchronously*/
++ struct task_struct *dispatch;
++ wait_queue_head_t thread_waitq;
++ struct completion complete;
++};
++
++#define to_pxp_dma(d) container_of(d, struct pxp_dma, dma)
++#define to_tx_desc(tx) container_of(tx, struct pxp_tx_desc, txd)
++#define to_pxp_channel(d) container_of(d, struct pxp_channel, dma_chan)
++#define to_pxp(id) container_of(id, struct pxps, pxp_dma)
++
++#define PXP_DEF_BUFS 2
++#define PXP_MIN_PIX 8
++
++static uint32_t pxp_s0_formats[] = {
++ PXP_PIX_FMT_RGB32,
++ PXP_PIX_FMT_RGB565,
++ PXP_PIX_FMT_RGB555,
++ PXP_PIX_FMT_YUV420P,
++ PXP_PIX_FMT_YUV422P,
++};
++
++/*
++ * PXP common functions
++ */
++static void dump_pxp_reg(struct pxps *pxp)
++{
++ dev_dbg(pxp->dev, "PXP_CTRL 0x%x",
++ __raw_readl(pxp->base + HW_PXP_CTRL));
++ dev_dbg(pxp->dev, "PXP_STAT 0x%x",
++ __raw_readl(pxp->base + HW_PXP_STAT));
++ dev_dbg(pxp->dev, "PXP_OUT_CTRL 0x%x",
++ __raw_readl(pxp->base + HW_PXP_OUT_CTRL));
++ dev_dbg(pxp->dev, "PXP_OUT_BUF 0x%x",
++ __raw_readl(pxp->base + HW_PXP_OUT_BUF));
++ dev_dbg(pxp->dev, "PXP_OUT_BUF2 0x%x",
++ __raw_readl(pxp->base + HW_PXP_OUT_BUF2));
++ dev_dbg(pxp->dev, "PXP_OUT_PITCH 0x%x",
++ __raw_readl(pxp->base + HW_PXP_OUT_PITCH));
++ dev_dbg(pxp->dev, "PXP_OUT_LRC 0x%x",
++ __raw_readl(pxp->base + HW_PXP_OUT_LRC));
++ dev_dbg(pxp->dev, "PXP_OUT_PS_ULC 0x%x",
++ __raw_readl(pxp->base + HW_PXP_OUT_PS_ULC));
++ dev_dbg(pxp->dev, "PXP_OUT_PS_LRC 0x%x",
++ __raw_readl(pxp->base + HW_PXP_OUT_PS_LRC));
++ dev_dbg(pxp->dev, "PXP_OUT_AS_ULC 0x%x",
++ __raw_readl(pxp->base + HW_PXP_OUT_AS_ULC));
++ dev_dbg(pxp->dev, "PXP_OUT_AS_LRC 0x%x",
++ __raw_readl(pxp->base + HW_PXP_OUT_AS_LRC));
++ dev_dbg(pxp->dev, "PXP_PS_CTRL 0x%x",
++ __raw_readl(pxp->base + HW_PXP_PS_CTRL));
++ dev_dbg(pxp->dev, "PXP_PS_BUF 0x%x",
++ __raw_readl(pxp->base + HW_PXP_PS_BUF));
++ dev_dbg(pxp->dev, "PXP_PS_UBUF 0x%x",
++ __raw_readl(pxp->base + HW_PXP_PS_UBUF));
++ dev_dbg(pxp->dev, "PXP_PS_VBUF 0x%x",
++ __raw_readl(pxp->base + HW_PXP_PS_VBUF));
++ dev_dbg(pxp->dev, "PXP_PS_PITCH 0x%x",
++ __raw_readl(pxp->base + HW_PXP_PS_PITCH));
++ dev_dbg(pxp->dev, "PXP_PS_BACKGROUND 0x%x",
++ __raw_readl(pxp->base + HW_PXP_PS_BACKGROUND));
++ dev_dbg(pxp->dev, "PXP_PS_SCALE 0x%x",
++ __raw_readl(pxp->base + HW_PXP_PS_SCALE));
++ dev_dbg(pxp->dev, "PXP_PS_OFFSET 0x%x",
++ __raw_readl(pxp->base + HW_PXP_PS_OFFSET));
++ dev_dbg(pxp->dev, "PXP_PS_CLRKEYLOW 0x%x",
++ __raw_readl(pxp->base + HW_PXP_PS_CLRKEYLOW));
++ dev_dbg(pxp->dev, "PXP_PS_CLRKEYHIGH 0x%x",
++ __raw_readl(pxp->base + HW_PXP_PS_CLRKEYHIGH));
++ dev_dbg(pxp->dev, "PXP_AS_CTRL 0x%x",
++ __raw_readl(pxp->base + HW_PXP_AS_CTRL));
++ dev_dbg(pxp->dev, "PXP_AS_BUF 0x%x",
++ __raw_readl(pxp->base + HW_PXP_AS_BUF));
++ dev_dbg(pxp->dev, "PXP_AS_PITCH 0x%x",
++ __raw_readl(pxp->base + HW_PXP_AS_PITCH));
++ dev_dbg(pxp->dev, "PXP_AS_CLRKEYLOW 0x%x",
++ __raw_readl(pxp->base + HW_PXP_AS_CLRKEYLOW));
++ dev_dbg(pxp->dev, "PXP_AS_CLRKEYHIGH 0x%x",
++ __raw_readl(pxp->base + HW_PXP_AS_CLRKEYHIGH));
++ dev_dbg(pxp->dev, "PXP_CSC1_COEF0 0x%x",
++ __raw_readl(pxp->base + HW_PXP_CSC1_COEF0));
++ dev_dbg(pxp->dev, "PXP_CSC1_COEF1 0x%x",
++ __raw_readl(pxp->base + HW_PXP_CSC1_COEF1));
++ dev_dbg(pxp->dev, "PXP_CSC1_COEF2 0x%x",
++ __raw_readl(pxp->base + HW_PXP_CSC1_COEF2));
++ dev_dbg(pxp->dev, "PXP_CSC2_CTRL 0x%x",
++ __raw_readl(pxp->base + HW_PXP_CSC2_CTRL));
++ dev_dbg(pxp->dev, "PXP_CSC2_COEF0 0x%x",
++ __raw_readl(pxp->base + HW_PXP_CSC2_COEF0));
++ dev_dbg(pxp->dev, "PXP_CSC2_COEF1 0x%x",
++ __raw_readl(pxp->base + HW_PXP_CSC2_COEF1));
++ dev_dbg(pxp->dev, "PXP_CSC2_COEF2 0x%x",
++ __raw_readl(pxp->base + HW_PXP_CSC2_COEF2));
++ dev_dbg(pxp->dev, "PXP_CSC2_COEF3 0x%x",
++ __raw_readl(pxp->base + HW_PXP_CSC2_COEF3));
++ dev_dbg(pxp->dev, "PXP_CSC2_COEF4 0x%x",
++ __raw_readl(pxp->base + HW_PXP_CSC2_COEF4));
++ dev_dbg(pxp->dev, "PXP_CSC2_COEF5 0x%x",
++ __raw_readl(pxp->base + HW_PXP_CSC2_COEF5));
++ dev_dbg(pxp->dev, "PXP_LUT_CTRL 0x%x",
++ __raw_readl(pxp->base + HW_PXP_LUT_CTRL));
++ dev_dbg(pxp->dev, "PXP_LUT_ADDR 0x%x",
++ __raw_readl(pxp->base + HW_PXP_LUT_ADDR));
++ dev_dbg(pxp->dev, "PXP_LUT_DATA 0x%x",
++ __raw_readl(pxp->base + HW_PXP_LUT_DATA));
++ dev_dbg(pxp->dev, "PXP_LUT_EXTMEM 0x%x",
++ __raw_readl(pxp->base + HW_PXP_LUT_EXTMEM));
++ dev_dbg(pxp->dev, "PXP_CFA 0x%x",
++ __raw_readl(pxp->base + HW_PXP_CFA));
++ dev_dbg(pxp->dev, "PXP_HIST_CTRL 0x%x",
++ __raw_readl(pxp->base + HW_PXP_HIST_CTRL));
++ dev_dbg(pxp->dev, "PXP_HIST2_PARAM 0x%x",
++ __raw_readl(pxp->base + HW_PXP_HIST2_PARAM));
++ dev_dbg(pxp->dev, "PXP_HIST4_PARAM 0x%x",
++ __raw_readl(pxp->base + HW_PXP_HIST4_PARAM));
++ dev_dbg(pxp->dev, "PXP_HIST8_PARAM0 0x%x",
++ __raw_readl(pxp->base + HW_PXP_HIST8_PARAM0));
++ dev_dbg(pxp->dev, "PXP_HIST8_PARAM1 0x%x",
++ __raw_readl(pxp->base + HW_PXP_HIST8_PARAM1));
++ dev_dbg(pxp->dev, "PXP_HIST16_PARAM0 0x%x",
++ __raw_readl(pxp->base + HW_PXP_HIST16_PARAM0));
++ dev_dbg(pxp->dev, "PXP_HIST16_PARAM1 0x%x",
++ __raw_readl(pxp->base + HW_PXP_HIST16_PARAM1));
++ dev_dbg(pxp->dev, "PXP_HIST16_PARAM2 0x%x",
++ __raw_readl(pxp->base + HW_PXP_HIST16_PARAM2));
++ dev_dbg(pxp->dev, "PXP_HIST16_PARAM3 0x%x",
++ __raw_readl(pxp->base + HW_PXP_HIST16_PARAM3));
++ dev_dbg(pxp->dev, "PXP_POWER 0x%x",
++ __raw_readl(pxp->base + HW_PXP_POWER));
++ dev_dbg(pxp->dev, "PXP_NEXT 0x%x",
++ __raw_readl(pxp->base + HW_PXP_NEXT));
++ dev_dbg(pxp->dev, "PXP_DEBUGCTRL 0x%x",
++ __raw_readl(pxp->base + HW_PXP_DEBUGCTRL));
++ dev_dbg(pxp->dev, "PXP_DEBUG 0x%x",
++ __raw_readl(pxp->base + HW_PXP_DEBUG));
++ dev_dbg(pxp->dev, "PXP_VERSION 0x%x",
++ __raw_readl(pxp->base + HW_PXP_VERSION));
++}
++
++static bool is_yuv(u32 pix_fmt)
++{
++ if ((pix_fmt == PXP_PIX_FMT_YUYV) |
++ (pix_fmt == PXP_PIX_FMT_UYVY) |
++ (pix_fmt == PXP_PIX_FMT_YVYU) |
++ (pix_fmt == PXP_PIX_FMT_VYUY) |
++ (pix_fmt == PXP_PIX_FMT_Y41P) |
++ (pix_fmt == PXP_PIX_FMT_YUV444) |
++ (pix_fmt == PXP_PIX_FMT_NV12) |
++ (pix_fmt == PXP_PIX_FMT_NV16) |
++ (pix_fmt == PXP_PIX_FMT_NV61) |
++ (pix_fmt == PXP_PIX_FMT_GREY) |
++ (pix_fmt == PXP_PIX_FMT_GY04) |
++ (pix_fmt == PXP_PIX_FMT_YVU410P) |
++ (pix_fmt == PXP_PIX_FMT_YUV410P) |
++ (pix_fmt == PXP_PIX_FMT_YVU420P) |
++ (pix_fmt == PXP_PIX_FMT_YUV420P) |
++ (pix_fmt == PXP_PIX_FMT_YUV420P2) |
++ (pix_fmt == PXP_PIX_FMT_YVU422P) |
++ (pix_fmt == PXP_PIX_FMT_YUV422P)) {
++ return true;
++ } else {
++ return false;
++ }
++}
++
++static void pxp_set_ctrl(struct pxps *pxp)
++{
++ struct pxp_config_data *pxp_conf = &pxp->pxp_conf_state;
++ struct pxp_proc_data *proc_data = &pxp_conf->proc_data;
++ u32 ctrl;
++ u32 fmt_ctrl;
++ int need_swap = 0; /* to support YUYV and YVYU formats */
++
++ /* Configure S0 input format */
++ switch (pxp_conf->s0_param.pixel_fmt) {
++ case PXP_PIX_FMT_RGB32:
++ fmt_ctrl = BV_PXP_PS_CTRL_FORMAT__RGB888;
++ break;
++ case PXP_PIX_FMT_RGB565:
++ fmt_ctrl = BV_PXP_PS_CTRL_FORMAT__RGB565;
++ break;
++ case PXP_PIX_FMT_RGB555:
++ fmt_ctrl = BV_PXP_PS_CTRL_FORMAT__RGB555;
++ break;
++ case PXP_PIX_FMT_YUV420P:
++ fmt_ctrl = BV_PXP_PS_CTRL_FORMAT__YUV420;
++ break;
++ case PXP_PIX_FMT_YVU420P:
++ fmt_ctrl = BV_PXP_PS_CTRL_FORMAT__YUV420;
++ break;
++ case PXP_PIX_FMT_GREY:
++ fmt_ctrl = BV_PXP_PS_CTRL_FORMAT__Y8;
++ break;
++ case PXP_PIX_FMT_GY04:
++ fmt_ctrl = BV_PXP_PS_CTRL_FORMAT__Y4;
++ break;
++ case PXP_PIX_FMT_YUV422P:
++ fmt_ctrl = BV_PXP_PS_CTRL_FORMAT__YUV422;
++ break;
++ case PXP_PIX_FMT_UYVY:
++ fmt_ctrl = BV_PXP_PS_CTRL_FORMAT__UYVY1P422;
++ break;
++ case PXP_PIX_FMT_YUYV:
++ fmt_ctrl = BV_PXP_PS_CTRL_FORMAT__UYVY1P422;
++ need_swap = 1;
++ break;
++ case PXP_PIX_FMT_VYUY:
++ fmt_ctrl = BV_PXP_PS_CTRL_FORMAT__VYUY1P422;
++ break;
++ case PXP_PIX_FMT_YVYU:
++ fmt_ctrl = BV_PXP_PS_CTRL_FORMAT__VYUY1P422;
++ need_swap = 1;
++ break;
++ case PXP_PIX_FMT_NV12:
++ fmt_ctrl = BV_PXP_PS_CTRL_FORMAT__YUV2P420;
++ break;
++ case PXP_PIX_FMT_NV21:
++ fmt_ctrl = BV_PXP_PS_CTRL_FORMAT__YVU2P420;
++ break;
++ case PXP_PIX_FMT_NV16:
++ fmt_ctrl = BV_PXP_PS_CTRL_FORMAT__YUV2P422;
++ break;
++ case PXP_PIX_FMT_NV61:
++ fmt_ctrl = BV_PXP_PS_CTRL_FORMAT__YVU2P422;
++ break;
++ default:
++ fmt_ctrl = 0;
++ }
++
++ ctrl = BF_PXP_PS_CTRL_FORMAT(fmt_ctrl) | BF_PXP_PS_CTRL_SWAP(need_swap);
++ __raw_writel(ctrl, pxp->base + HW_PXP_PS_CTRL_SET);
++
++ /* Configure output format based on out_channel format */
++ switch (pxp_conf->out_param.pixel_fmt) {
++ case PXP_PIX_FMT_RGB32:
++ fmt_ctrl = BV_PXP_OUT_CTRL_FORMAT__RGB888;
++ break;
++ case PXP_PIX_FMT_BGRA32:
++ fmt_ctrl = BV_PXP_OUT_CTRL_FORMAT__ARGB8888;
++ break;
++ case PXP_PIX_FMT_RGB24:
++ fmt_ctrl = BV_PXP_OUT_CTRL_FORMAT__RGB888P;
++ break;
++ case PXP_PIX_FMT_RGB565:
++ fmt_ctrl = BV_PXP_OUT_CTRL_FORMAT__RGB565;
++ break;
++ case PXP_PIX_FMT_RGB555:
++ fmt_ctrl = BV_PXP_OUT_CTRL_FORMAT__RGB555;
++ break;
++ case PXP_PIX_FMT_GREY:
++ fmt_ctrl = BV_PXP_OUT_CTRL_FORMAT__Y8;
++ break;
++ case PXP_PIX_FMT_GY04:
++ fmt_ctrl = BV_PXP_OUT_CTRL_FORMAT__Y4;
++ break;
++ case PXP_PIX_FMT_UYVY:
++ fmt_ctrl = BV_PXP_OUT_CTRL_FORMAT__UYVY1P422;
++ break;
++ case PXP_PIX_FMT_VYUY:
++ fmt_ctrl = BV_PXP_OUT_CTRL_FORMAT__VYUY1P422;
++ break;
++ case PXP_PIX_FMT_NV12:
++ fmt_ctrl = BV_PXP_OUT_CTRL_FORMAT__YUV2P420;
++ break;
++ case PXP_PIX_FMT_NV21:
++ fmt_ctrl = BV_PXP_OUT_CTRL_FORMAT__YVU2P420;
++ break;
++ case PXP_PIX_FMT_NV16:
++ fmt_ctrl = BV_PXP_OUT_CTRL_FORMAT__YUV2P422;
++ break;
++ case PXP_PIX_FMT_NV61:
++ fmt_ctrl = BV_PXP_OUT_CTRL_FORMAT__YVU2P422;
++ break;
++ default:
++ fmt_ctrl = 0;
++ }
++
++ ctrl = BF_PXP_OUT_CTRL_FORMAT(fmt_ctrl);
++ __raw_writel(ctrl, pxp->base + HW_PXP_OUT_CTRL);
++
++ ctrl = 0;
++ if (proc_data->scaling)
++ ;
++ if (proc_data->vflip)
++ ctrl |= BM_PXP_CTRL_VFLIP;
++ if (proc_data->hflip)
++ ctrl |= BM_PXP_CTRL_HFLIP;
++ if (proc_data->rotate) {
++ ctrl |= BF_PXP_CTRL_ROTATE(proc_data->rotate / 90);
++ if (proc_data->rot_pos)
++ ctrl |= BM_PXP_CTRL_ROT_POS;
++ }
++
++ /* In default, the block size is set to 8x8
++ * But block size can be set to 16x16 due to
++ * blocksize variable modification
++ */
++ ctrl |= block_size << 23;
++
++ __raw_writel(ctrl, pxp->base + HW_PXP_CTRL);
++}
++
++static int pxp_start(struct pxps *pxp)
++{
++ __raw_writel(BM_PXP_CTRL_IRQ_ENABLE, pxp->base + HW_PXP_CTRL_SET);
++ __raw_writel(BM_PXP_CTRL_ENABLE, pxp->base + HW_PXP_CTRL_SET);
++ dump_pxp_reg(pxp);
++
++ return 0;
++}
++
++static void pxp_set_outbuf(struct pxps *pxp)
++{
++ struct pxp_config_data *pxp_conf = &pxp->pxp_conf_state;
++ struct pxp_layer_param *out_params = &pxp_conf->out_param;
++
++ __raw_writel(out_params->paddr, pxp->base + HW_PXP_OUT_BUF);
++
++ __raw_writel(BF_PXP_OUT_LRC_X(out_params->width - 1) |
++ BF_PXP_OUT_LRC_Y(out_params->height - 1),
++ pxp->base + HW_PXP_OUT_LRC);
++
++ if (out_params->pixel_fmt == PXP_PIX_FMT_RGB24) {
++ __raw_writel(out_params->stride * 3,
++ pxp->base + HW_PXP_OUT_PITCH);
++ } else if (out_params->pixel_fmt == PXP_PIX_FMT_BGRA32 ||
++ out_params->pixel_fmt == PXP_PIX_FMT_RGB32) {
++ __raw_writel(out_params->stride << 2,
++ pxp->base + HW_PXP_OUT_PITCH);
++ } else if (out_params->pixel_fmt == PXP_PIX_FMT_RGB565) {
++ __raw_writel(out_params->stride << 1,
++ pxp->base + HW_PXP_OUT_PITCH);
++ } else if (out_params->pixel_fmt == PXP_PIX_FMT_UYVY ||
++ (out_params->pixel_fmt == PXP_PIX_FMT_VYUY)) {
++ __raw_writel(out_params->stride << 1,
++ pxp->base + HW_PXP_OUT_PITCH);
++ } else if (out_params->pixel_fmt == PXP_PIX_FMT_GREY ||
++ out_params->pixel_fmt == PXP_PIX_FMT_NV12 ||
++ out_params->pixel_fmt == PXP_PIX_FMT_NV21 ||
++ out_params->pixel_fmt == PXP_PIX_FMT_NV16 ||
++ out_params->pixel_fmt == PXP_PIX_FMT_NV61) {
++ __raw_writel(out_params->stride,
++ pxp->base + HW_PXP_OUT_PITCH);
++ } else if (out_params->pixel_fmt == PXP_PIX_FMT_GY04) {
++ __raw_writel(out_params->stride >> 1,
++ pxp->base + HW_PXP_OUT_PITCH);
++ } else {
++ __raw_writel(0, pxp->base + HW_PXP_OUT_PITCH);
++ }
++
++ /* set global alpha if necessary */
++ if (out_params->global_alpha_enable) {
++ __raw_writel(out_params->global_alpha << 24,
++ pxp->base + HW_PXP_OUT_CTRL_SET);
++ __raw_writel(BM_PXP_OUT_CTRL_ALPHA_OUTPUT,
++ pxp->base + HW_PXP_OUT_CTRL_SET);
++ }
++}
++
++static void pxp_set_s0colorkey(struct pxps *pxp)
++{
++ struct pxp_config_data *pxp_conf = &pxp->pxp_conf_state;
++ struct pxp_layer_param *s0_params = &pxp_conf->s0_param;
++
++ /* Low and high are set equal. V4L does not allow a chromakey range */
++ if (s0_params->color_key_enable == 0 || s0_params->color_key == -1) {
++ /* disable color key */
++ __raw_writel(0xFFFFFF, pxp->base + HW_PXP_PS_CLRKEYLOW);
++ __raw_writel(0, pxp->base + HW_PXP_PS_CLRKEYHIGH);
++ } else {
++ __raw_writel(s0_params->color_key,
++ pxp->base + HW_PXP_PS_CLRKEYLOW);
++ __raw_writel(s0_params->color_key,
++ pxp->base + HW_PXP_PS_CLRKEYHIGH);
++ }
++}
++
++static void pxp_set_olcolorkey(int layer_no, struct pxps *pxp)
++{
++ struct pxp_config_data *pxp_conf = &pxp->pxp_conf_state;
++ struct pxp_layer_param *ol_params = &pxp_conf->ol_param[layer_no];
++
++ /* Low and high are set equal. V4L does not allow a chromakey range */
++ if (ol_params->color_key_enable != 0 && ol_params->color_key != -1) {
++ __raw_writel(ol_params->color_key,
++ pxp->base + HW_PXP_AS_CLRKEYLOW);
++ __raw_writel(ol_params->color_key,
++ pxp->base + HW_PXP_AS_CLRKEYHIGH);
++ } else {
++ /* disable color key */
++ __raw_writel(0xFFFFFF, pxp->base + HW_PXP_AS_CLRKEYLOW);
++ __raw_writel(0, pxp->base + HW_PXP_AS_CLRKEYHIGH);
++ }
++}
++
++static void pxp_set_oln(int layer_no, struct pxps *pxp)
++{
++ struct pxp_config_data *pxp_conf = &pxp->pxp_conf_state;
++ struct pxp_layer_param *olparams_data = &pxp_conf->ol_param[layer_no];
++ dma_addr_t phys_addr = olparams_data->paddr;
++ u32 pitch = olparams_data->stride ? olparams_data->stride :
++ olparams_data->width;
++
++ __raw_writel(phys_addr, pxp->base + HW_PXP_AS_BUF);
++
++ /* Fixme */
++ if (olparams_data->width == 0 && olparams_data->height == 0) {
++ __raw_writel(0xffffffff, pxp->base + HW_PXP_OUT_AS_ULC);
++ __raw_writel(0x0, pxp->base + HW_PXP_OUT_AS_LRC);
++ } else {
++ __raw_writel(0x0, pxp->base + HW_PXP_OUT_AS_ULC);
++ if (pxp_conf->proc_data.rotate == 90 ||
++ pxp_conf->proc_data.rotate == 270) {
++ if (pxp_conf->proc_data.rot_pos == 1) {
++ __raw_writel(BF_PXP_OUT_AS_LRC_X(olparams_data->height - 1) |
++ BF_PXP_OUT_AS_LRC_Y(olparams_data->width - 1),
++ pxp->base + HW_PXP_OUT_AS_LRC);
++ } else {
++ __raw_writel(BF_PXP_OUT_AS_LRC_X(olparams_data->width - 1) |
++ BF_PXP_OUT_AS_LRC_Y(olparams_data->height - 1),
++ pxp->base + HW_PXP_OUT_AS_LRC);
++ }
++ } else {
++ __raw_writel(BF_PXP_OUT_AS_LRC_X(olparams_data->width - 1) |
++ BF_PXP_OUT_AS_LRC_Y(olparams_data->height - 1),
++ pxp->base + HW_PXP_OUT_AS_LRC);
++ }
++ }
++
++ if ((olparams_data->pixel_fmt == PXP_PIX_FMT_BGRA32) |
++ (olparams_data->pixel_fmt == PXP_PIX_FMT_RGB32)) {
++ __raw_writel(pitch << 2,
++ pxp->base + HW_PXP_AS_PITCH);
++ } else if (olparams_data->pixel_fmt == PXP_PIX_FMT_RGB565) {
++ __raw_writel(pitch << 1,
++ pxp->base + HW_PXP_AS_PITCH);
++ } else {
++ __raw_writel(0, pxp->base + HW_PXP_AS_PITCH);
++ }
++}
++
++static void pxp_set_olparam(int layer_no, struct pxps *pxp)
++{
++ struct pxp_config_data *pxp_conf = &pxp->pxp_conf_state;
++ struct pxp_layer_param *olparams_data = &pxp_conf->ol_param[layer_no];
++ u32 olparam;
++
++ olparam = BF_PXP_AS_CTRL_ALPHA(olparams_data->global_alpha);
++ if (olparams_data->pixel_fmt == PXP_PIX_FMT_RGB32) {
++ olparam |=
++ BF_PXP_AS_CTRL_FORMAT(BV_PXP_AS_CTRL_FORMAT__RGB888);
++ } else if (olparams_data->pixel_fmt == PXP_PIX_FMT_BGRA32) {
++ olparam |=
++ BF_PXP_AS_CTRL_FORMAT(BV_PXP_AS_CTRL_FORMAT__ARGB8888);
++ if (!olparams_data->combine_enable) {
++ olparam |=
++ BF_PXP_AS_CTRL_ALPHA_CTRL
++ (BV_PXP_AS_CTRL_ALPHA_CTRL__ROPs);
++ olparam |= 0x3 << 16;
++ }
++ } else if (olparams_data->pixel_fmt == PXP_PIX_FMT_RGB565) {
++ olparam |=
++ BF_PXP_AS_CTRL_FORMAT(BV_PXP_AS_CTRL_FORMAT__RGB565);
++ }
++ if (olparams_data->global_alpha_enable) {
++ if (olparams_data->global_override) {
++ olparam |=
++ BF_PXP_AS_CTRL_ALPHA_CTRL
++ (BV_PXP_AS_CTRL_ALPHA_CTRL__Override);
++ } else {
++ olparam |=
++ BF_PXP_AS_CTRL_ALPHA_CTRL
++ (BV_PXP_AS_CTRL_ALPHA_CTRL__Multiply);
++ }
++ if (olparams_data->alpha_invert)
++ olparam |= BM_PXP_AS_CTRL_ALPHA_INVERT;
++ }
++ if (olparams_data->color_key_enable)
++ olparam |= BM_PXP_AS_CTRL_ENABLE_COLORKEY;
++
++ __raw_writel(olparam, pxp->base + HW_PXP_AS_CTRL);
++}
++
++static void pxp_set_s0param(struct pxps *pxp)
++{
++ struct pxp_config_data *pxp_conf = &pxp->pxp_conf_state;
++ struct pxp_proc_data *proc_data = &pxp_conf->proc_data;
++ u32 s0param;
++
++ /* contains the coordinate for the PS in the OUTPUT buffer. */
++ if ((pxp_conf->s0_param).width == 0 &&
++ (pxp_conf->s0_param).height == 0) {
++ __raw_writel(0xffffffff, pxp->base + HW_PXP_OUT_PS_ULC);
++ __raw_writel(0x0, pxp->base + HW_PXP_OUT_PS_LRC);
++ } else {
++ s0param = BF_PXP_OUT_PS_ULC_X(proc_data->drect.left);
++ s0param |= BF_PXP_OUT_PS_ULC_Y(proc_data->drect.top);
++ __raw_writel(s0param, pxp->base + HW_PXP_OUT_PS_ULC);
++ s0param = BF_PXP_OUT_PS_LRC_X(proc_data->drect.left +
++ proc_data->drect.width - 1);
++ s0param |= BF_PXP_OUT_PS_LRC_Y(proc_data->drect.top +
++ proc_data->drect.height - 1);
++ __raw_writel(s0param, pxp->base + HW_PXP_OUT_PS_LRC);
++ }
++}
++
++/* crop behavior is re-designed in h/w. */
++static void pxp_set_s0crop(struct pxps *pxp)
++{
++ /*
++ * place-holder, it's implemented in other functions in this driver.
++ * Refer to "Clipping source images" section in RM for detail.
++ */
++}
++
++static int pxp_set_scaling(struct pxps *pxp)
++{
++ int ret = 0;
++ u32 xscale, yscale, s0scale;
++ u32 decx, decy, xdec = 0, ydec = 0;
++ struct pxp_proc_data *proc_data = &pxp->pxp_conf_state.proc_data;
++
++ if (((proc_data->srect.width == proc_data->drect.width) &&
++ (proc_data->srect.height == proc_data->drect.height)) ||
++ ((proc_data->srect.width == 0) && (proc_data->srect.height == 0))) {
++ proc_data->scaling = 0;
++ __raw_writel(0x10001000, pxp->base + HW_PXP_PS_SCALE);
++ __raw_writel(0, pxp->base + HW_PXP_PS_CTRL);
++ goto out;
++ }
++
++ proc_data->scaling = 1;
++ decx = proc_data->srect.width / proc_data->drect.width;
++ decy = proc_data->srect.height / proc_data->drect.height;
++ if (decx > 0) {
++ if (decx >= 2 && decx < 4) {
++ decx = 2;
++ xdec = 1;
++ } else if (decx >= 4 && decx < 8) {
++ decx = 4;
++ xdec = 2;
++ } else if (decx >= 8) {
++ decx = 8;
++ xdec = 3;
++ }
++ xscale = proc_data->srect.width * 0x1000 /
++ (proc_data->drect.width * decx);
++ } else
++ xscale = proc_data->srect.width * 0x1000 /
++ proc_data->drect.width;
++ if (decy > 0) {
++ if (decy >= 2 && decy < 4) {
++ decy = 2;
++ ydec = 1;
++ } else if (decy >= 4 && decy < 8) {
++ decy = 4;
++ ydec = 2;
++ } else if (decy >= 8) {
++ decy = 8;
++ ydec = 3;
++ }
++ yscale = proc_data->srect.height * 0x1000 /
++ (proc_data->drect.height * decy);
++ } else
++ yscale = proc_data->srect.height * 0x1000 /
++ proc_data->drect.height;
++
++ __raw_writel((xdec << 10) | (ydec << 8), pxp->base + HW_PXP_PS_CTRL);
++
++ if (xscale > PXP_DOWNSCALE_THRESHOLD)
++ xscale = PXP_DOWNSCALE_THRESHOLD;
++ if (yscale > PXP_DOWNSCALE_THRESHOLD)
++ yscale = PXP_DOWNSCALE_THRESHOLD;
++ s0scale = BF_PXP_PS_SCALE_YSCALE(yscale) |
++ BF_PXP_PS_SCALE_XSCALE(xscale);
++ __raw_writel(s0scale, pxp->base + HW_PXP_PS_SCALE);
++
++out:
++ pxp_set_ctrl(pxp);
++
++ return ret;
++}
++
++static void pxp_set_bg(struct pxps *pxp)
++{
++ __raw_writel(pxp->pxp_conf_state.proc_data.bgcolor,
++ pxp->base + HW_PXP_PS_BACKGROUND);
++}
++
++static void pxp_set_lut(struct pxps *pxp)
++{
++ struct pxp_config_data *pxp_conf = &pxp->pxp_conf_state;
++ int lut_op = pxp_conf->proc_data.lut_transform;
++ u32 reg_val;
++ int i;
++ bool use_cmap = (lut_op & PXP_LUT_USE_CMAP) ? true : false;
++ u8 *cmap = pxp_conf->proc_data.lut_map;
++ u32 entry_src;
++ u32 pix_val;
++ u8 entry[4];
++
++ /*
++ * If LUT already configured as needed, return...
++ * Unless CMAP is needed and it has been updated.
++ */
++ if ((pxp->lut_state == lut_op) &&
++ !(use_cmap && pxp_conf->proc_data.lut_map_updated))
++ return;
++
++ if (lut_op == PXP_LUT_NONE) {
++ __raw_writel(BM_PXP_LUT_CTRL_BYPASS,
++ pxp->base + HW_PXP_LUT_CTRL);
++ } else if (((lut_op & PXP_LUT_INVERT) != 0)
++ && ((lut_op & PXP_LUT_BLACK_WHITE) != 0)) {
++ /* Fill out LUT table with inverted monochromized values */
++
++ /* clear bypass bit, set lookup mode & out mode */
++ __raw_writel(BF_PXP_LUT_CTRL_LOOKUP_MODE
++ (BV_PXP_LUT_CTRL_LOOKUP_MODE__DIRECT_Y8) |
++ BF_PXP_LUT_CTRL_OUT_MODE
++ (BV_PXP_LUT_CTRL_OUT_MODE__Y8),
++ pxp->base + HW_PXP_LUT_CTRL);
++
++ /* Initialize LUT address to 0 and set NUM_BYTES to 0 */
++ __raw_writel(0, pxp->base + HW_PXP_LUT_ADDR);
++
++ /* LUT address pointer auto-increments after each data write */
++ for (pix_val = 0; pix_val < 256; pix_val += 4) {
++ for (i = 0; i < 4; i++) {
++ entry_src = use_cmap ?
++ cmap[pix_val + i] : pix_val + i;
++ entry[i] = (entry_src < 0x80) ? 0xFF : 0x00;
++ }
++ reg_val = (entry[3] << 24) | (entry[2] << 16) |
++ (entry[1] << 8) | entry[0];
++ __raw_writel(reg_val, pxp->base + HW_PXP_LUT_DATA);
++ }
++ } else if ((lut_op & PXP_LUT_INVERT) != 0) {
++ /* Fill out LUT table with 8-bit inverted values */
++
++ /* clear bypass bit, set lookup mode & out mode */
++ __raw_writel(BF_PXP_LUT_CTRL_LOOKUP_MODE
++ (BV_PXP_LUT_CTRL_LOOKUP_MODE__DIRECT_Y8) |
++ BF_PXP_LUT_CTRL_OUT_MODE
++ (BV_PXP_LUT_CTRL_OUT_MODE__Y8),
++ pxp->base + HW_PXP_LUT_CTRL);
++
++ /* Initialize LUT address to 0 and set NUM_BYTES to 0 */
++ __raw_writel(0, pxp->base + HW_PXP_LUT_ADDR);
++
++ /* LUT address pointer auto-increments after each data write */
++ for (pix_val = 0; pix_val < 256; pix_val += 4) {
++ for (i = 0; i < 4; i++) {
++ entry_src = use_cmap ?
++ cmap[pix_val + i] : pix_val + i;
++ entry[i] = ~entry_src & 0xFF;
++ }
++ reg_val = (entry[3] << 24) | (entry[2] << 16) |
++ (entry[1] << 8) | entry[0];
++ __raw_writel(reg_val, pxp->base + HW_PXP_LUT_DATA);
++ }
++ } else if ((lut_op & PXP_LUT_BLACK_WHITE) != 0) {
++ /* Fill out LUT table with 8-bit monochromized values */
++
++ /* clear bypass bit, set lookup mode & out mode */
++ __raw_writel(BF_PXP_LUT_CTRL_LOOKUP_MODE
++ (BV_PXP_LUT_CTRL_LOOKUP_MODE__DIRECT_Y8) |
++ BF_PXP_LUT_CTRL_OUT_MODE
++ (BV_PXP_LUT_CTRL_OUT_MODE__Y8),
++ pxp->base + HW_PXP_LUT_CTRL);
++
++ /* Initialize LUT address to 0 and set NUM_BYTES to 0 */
++ __raw_writel(0, pxp->base + HW_PXP_LUT_ADDR);
++
++ /* LUT address pointer auto-increments after each data write */
++ for (pix_val = 0; pix_val < 256; pix_val += 4) {
++ for (i = 0; i < 4; i++) {
++ entry_src = use_cmap ?
++ cmap[pix_val + i] : pix_val + i;
++ entry[i] = (entry_src < 0x80) ? 0x00 : 0xFF;
++ }
++ reg_val = (entry[3] << 24) | (entry[2] << 16) |
++ (entry[1] << 8) | entry[0];
++ __raw_writel(reg_val, pxp->base + HW_PXP_LUT_DATA);
++ }
++ } else if (use_cmap) {
++ /* Fill out LUT table using colormap values */
++
++ /* clear bypass bit, set lookup mode & out mode */
++ __raw_writel(BF_PXP_LUT_CTRL_LOOKUP_MODE
++ (BV_PXP_LUT_CTRL_LOOKUP_MODE__DIRECT_Y8) |
++ BF_PXP_LUT_CTRL_OUT_MODE
++ (BV_PXP_LUT_CTRL_OUT_MODE__Y8),
++ pxp->base + HW_PXP_LUT_CTRL);
++
++ /* Initialize LUT address to 0 and set NUM_BYTES to 0 */
++ __raw_writel(0, pxp->base + HW_PXP_LUT_ADDR);
++
++ /* LUT address pointer auto-increments after each data write */
++ for (pix_val = 0; pix_val < 256; pix_val += 4) {
++ for (i = 0; i < 4; i++)
++ entry[i] = cmap[pix_val + i];
++ reg_val = (entry[3] << 24) | (entry[2] << 16) |
++ (entry[1] << 8) | entry[0];
++ __raw_writel(reg_val, pxp->base + HW_PXP_LUT_DATA);
++ }
++ }
++
++ pxp->lut_state = lut_op;
++}
++
++static void pxp_set_csc(struct pxps *pxp)
++{
++ struct pxp_config_data *pxp_conf = &pxp->pxp_conf_state;
++ struct pxp_layer_param *s0_params = &pxp_conf->s0_param;
++ struct pxp_layer_param *ol_params = &pxp_conf->ol_param[0];
++ struct pxp_layer_param *out_params = &pxp_conf->out_param;
++
++ bool input_is_YUV = is_yuv(s0_params->pixel_fmt);
++ bool output_is_YUV = is_yuv(out_params->pixel_fmt);
++
++ if (input_is_YUV && output_is_YUV) {
++ /*
++ * Input = YUV, Output = YUV
++ * No CSC unless we need to do combining
++ */
++ if (ol_params->combine_enable) {
++ /* Must convert to RGB for combining with RGB overlay */
++
++ /* CSC1 - YUV->RGB */
++ __raw_writel(0x04030000, pxp->base + HW_PXP_CSC1_COEF0);
++ __raw_writel(0x01230208, pxp->base + HW_PXP_CSC1_COEF1);
++ __raw_writel(0x076b079c, pxp->base + HW_PXP_CSC1_COEF2);
++
++ /* CSC2 - RGB->YUV */
++ __raw_writel(0x4, pxp->base + HW_PXP_CSC2_CTRL);
++ __raw_writel(0x0096004D, pxp->base + HW_PXP_CSC2_COEF0);
++ __raw_writel(0x05DA001D, pxp->base + HW_PXP_CSC2_COEF1);
++ __raw_writel(0x007005B6, pxp->base + HW_PXP_CSC2_COEF2);
++ __raw_writel(0x057C009E, pxp->base + HW_PXP_CSC2_COEF3);
++ __raw_writel(0x000005E6, pxp->base + HW_PXP_CSC2_COEF4);
++ __raw_writel(0x00000000, pxp->base + HW_PXP_CSC2_COEF5);
++ } else {
++ /* Input & Output both YUV, so bypass both CSCs */
++
++ /* CSC1 - Bypass */
++ __raw_writel(0x40000000, pxp->base + HW_PXP_CSC1_COEF0);
++
++ /* CSC2 - Bypass */
++ __raw_writel(0x1, pxp->base + HW_PXP_CSC2_CTRL);
++ }
++ } else if (input_is_YUV && !output_is_YUV) {
++ /*
++ * Input = YUV, Output = RGB
++ * Use CSC1 to convert to RGB
++ */
++
++ /* CSC1 - YUV->RGB */
++ __raw_writel(0x84ab01f0, pxp->base + HW_PXP_CSC1_COEF0);
++ __raw_writel(0x01980204, pxp->base + HW_PXP_CSC1_COEF1);
++ __raw_writel(0x0730079c, pxp->base + HW_PXP_CSC1_COEF2);
++
++ /* CSC2 - Bypass */
++ __raw_writel(0x1, pxp->base + HW_PXP_CSC2_CTRL);
++ } else if (!input_is_YUV && output_is_YUV) {
++ /*
++ * Input = RGB, Output = YUV
++ * Use CSC2 to convert to YUV
++ */
++
++ /* CSC1 - Bypass */
++ __raw_writel(0x40000000, pxp->base + HW_PXP_CSC1_COEF0);
++
++ /* CSC2 - RGB->YUV */
++ __raw_writel(0x4, pxp->base + HW_PXP_CSC2_CTRL);
++ __raw_writel(0x0096004D, pxp->base + HW_PXP_CSC2_COEF0);
++ __raw_writel(0x05DA001D, pxp->base + HW_PXP_CSC2_COEF1);
++ __raw_writel(0x007005B6, pxp->base + HW_PXP_CSC2_COEF2);
++ __raw_writel(0x057C009E, pxp->base + HW_PXP_CSC2_COEF3);
++ __raw_writel(0x000005E6, pxp->base + HW_PXP_CSC2_COEF4);
++ __raw_writel(0x00000000, pxp->base + HW_PXP_CSC2_COEF5);
++ } else {
++ /*
++ * Input = RGB, Output = RGB
++ * Input & Output both RGB, so bypass both CSCs
++ */
++
++ /* CSC1 - Bypass */
++ __raw_writel(0x40000000, pxp->base + HW_PXP_CSC1_COEF0);
++
++ /* CSC2 - Bypass */
++ __raw_writel(0x1, pxp->base + HW_PXP_CSC2_CTRL);
++ }
++
++ /* YCrCb colorspace */
++ /* Not sure when we use this...no YCrCb formats are defined for PxP */
++ /*
++ __raw_writel(0x84ab01f0, HW_PXP_CSCCOEFF0_ADDR);
++ __raw_writel(0x01230204, HW_PXP_CSCCOEFF1_ADDR);
++ __raw_writel(0x0730079c, HW_PXP_CSCCOEFF2_ADDR);
++ */
++
++}
++
++static void pxp_set_s0buf(struct pxps *pxp)
++{
++ struct pxp_config_data *pxp_conf = &pxp->pxp_conf_state;
++ struct pxp_layer_param *s0_params = &pxp_conf->s0_param;
++ struct pxp_proc_data *proc_data = &pxp_conf->proc_data;
++ dma_addr_t Y, U, V;
++ dma_addr_t Y1, U1, V1;
++ u32 offset, bpp = 1;
++ u32 pitch = s0_params->stride ? s0_params->stride :
++ s0_params->width;
++
++ Y = s0_params->paddr;
++
++ if (s0_params->pixel_fmt == PXP_PIX_FMT_RGB565)
++ bpp = 2;
++ else if (s0_params->pixel_fmt == PXP_PIX_FMT_RGB32)
++ bpp = 4;
++ offset = (proc_data->srect.top * s0_params->width +
++ proc_data->srect.left) * bpp;
++ /* clipping or cropping */
++ Y1 = Y + offset;
++ __raw_writel(Y1, pxp->base + HW_PXP_PS_BUF);
++ if ((s0_params->pixel_fmt == PXP_PIX_FMT_YUV420P) ||
++ (s0_params->pixel_fmt == PXP_PIX_FMT_YVU420P) ||
++ (s0_params->pixel_fmt == PXP_PIX_FMT_GREY) ||
++ (s0_params->pixel_fmt == PXP_PIX_FMT_YUV422P)) {
++ /* Set to 1 if YUV format is 4:2:2 rather than 4:2:0 */
++ int s = 2;
++ if (s0_params->pixel_fmt == PXP_PIX_FMT_YUV422P)
++ s = 1;
++
++ offset = proc_data->srect.top * s0_params->width / 4 +
++ proc_data->srect.left / 2;
++ U = Y + (s0_params->width * s0_params->height);
++ U1 = U + offset;
++ V = U + ((s0_params->width * s0_params->height) >> s);
++ V1 = V + offset;
++ if (s0_params->pixel_fmt == PXP_PIX_FMT_YVU420P) {
++ __raw_writel(V1, pxp->base + HW_PXP_PS_UBUF);
++ __raw_writel(U1, pxp->base + HW_PXP_PS_VBUF);
++ } else {
++ __raw_writel(U1, pxp->base + HW_PXP_PS_UBUF);
++ __raw_writel(V1, pxp->base + HW_PXP_PS_VBUF);
++ }
++ } else if ((s0_params->pixel_fmt == PXP_PIX_FMT_NV12) ||
++ (s0_params->pixel_fmt == PXP_PIX_FMT_NV21) ||
++ (s0_params->pixel_fmt == PXP_PIX_FMT_NV16) ||
++ (s0_params->pixel_fmt == PXP_PIX_FMT_NV61)) {
++ int s = 2;
++ if ((s0_params->pixel_fmt == PXP_PIX_FMT_NV16) ||
++ (s0_params->pixel_fmt == PXP_PIX_FMT_NV61))
++ s = 1;
++
++ offset = (proc_data->srect.top * s0_params->width +
++ proc_data->srect.left) / s;
++ U = Y + (s0_params->width * s0_params->height);
++ U1 = U + offset;
++
++ __raw_writel(U1, pxp->base + HW_PXP_PS_UBUF);
++ }
++
++ /* TODO: only support RGB565, Y8, Y4, YUV420 */
++ if (s0_params->pixel_fmt == PXP_PIX_FMT_GREY ||
++ s0_params->pixel_fmt == PXP_PIX_FMT_YUV420P ||
++ s0_params->pixel_fmt == PXP_PIX_FMT_YVU420P ||
++ s0_params->pixel_fmt == PXP_PIX_FMT_NV12 ||
++ s0_params->pixel_fmt == PXP_PIX_FMT_NV21 ||
++ s0_params->pixel_fmt == PXP_PIX_FMT_NV16 ||
++ s0_params->pixel_fmt == PXP_PIX_FMT_NV61 ||
++ s0_params->pixel_fmt == PXP_PIX_FMT_YUV422P) {
++ __raw_writel(pitch, pxp->base + HW_PXP_PS_PITCH);
++ }
++ else if (s0_params->pixel_fmt == PXP_PIX_FMT_GY04)
++ __raw_writel(pitch >> 1,
++ pxp->base + HW_PXP_PS_PITCH);
++ else if (s0_params->pixel_fmt == PXP_PIX_FMT_RGB32)
++ __raw_writel(pitch << 2,
++ pxp->base + HW_PXP_PS_PITCH);
++ else if (s0_params->pixel_fmt == PXP_PIX_FMT_UYVY ||
++ s0_params->pixel_fmt == PXP_PIX_FMT_YUYV ||
++ s0_params->pixel_fmt == PXP_PIX_FMT_VYUY ||
++ s0_params->pixel_fmt == PXP_PIX_FMT_YVYU)
++ __raw_writel(pitch << 1,
++ pxp->base + HW_PXP_PS_PITCH);
++ else if (s0_params->pixel_fmt == PXP_PIX_FMT_RGB565)
++ __raw_writel(pitch << 1,
++ pxp->base + HW_PXP_PS_PITCH);
++ else
++ __raw_writel(0, pxp->base + HW_PXP_PS_PITCH);
++}
++
++/**
++ * pxp_config() - configure PxP for a processing task
++ * @pxps: PXP context.
++ * @pxp_chan: PXP channel.
++ * @return: 0 on success or negative error code on failure.
++ */
++static int pxp_config(struct pxps *pxp, struct pxp_channel *pxp_chan)
++{
++ struct pxp_config_data *pxp_conf_data = &pxp->pxp_conf_state;
++ int ol_nr;
++ int i;
++
++ /* Configure PxP regs */
++ pxp_set_ctrl(pxp);
++ pxp_set_s0param(pxp);
++ pxp_set_s0crop(pxp);
++ pxp_set_scaling(pxp);
++ ol_nr = pxp_conf_data->layer_nr - 2;
++ while (ol_nr > 0) {
++ i = pxp_conf_data->layer_nr - 2 - ol_nr;
++ pxp_set_oln(i, pxp);
++ pxp_set_olparam(i, pxp);
++ /* only the color key in higher overlay will take effect. */
++ pxp_set_olcolorkey(i, pxp);
++ ol_nr--;
++ }
++ pxp_set_s0colorkey(pxp);
++ pxp_set_csc(pxp);
++ pxp_set_bg(pxp);
++ pxp_set_lut(pxp);
++
++ pxp_set_s0buf(pxp);
++ pxp_set_outbuf(pxp);
++
++ return 0;
++}
++
++static void pxp_clk_enable(struct pxps *pxp)
++{
++ mutex_lock(&pxp->clk_mutex);
++
++ if (pxp->clk_stat == CLK_STAT_ON) {
++ mutex_unlock(&pxp->clk_mutex);
++ return;
++ }
++
++ clk_prepare_enable(pxp->clk);
++ pxp->clk_stat = CLK_STAT_ON;
++
++ mutex_unlock(&pxp->clk_mutex);
++}
++
++static void pxp_clk_disable(struct pxps *pxp)
++{
++ unsigned long flags;
++
++ mutex_lock(&pxp->clk_mutex);
++
++ if (pxp->clk_stat == CLK_STAT_OFF) {
++ mutex_unlock(&pxp->clk_mutex);
++ return;
++ }
++
++ spin_lock_irqsave(&pxp->lock, flags);
++ if ((pxp->pxp_ongoing == 0) && list_empty(&head)) {
++ spin_unlock_irqrestore(&pxp->lock, flags);
++ clk_disable_unprepare(pxp->clk);
++ pxp->clk_stat = CLK_STAT_OFF;
++ } else
++ spin_unlock_irqrestore(&pxp->lock, flags);
++
++ mutex_unlock(&pxp->clk_mutex);
++}
++
++static inline void clkoff_callback(struct work_struct *w)
++{
++ struct pxps *pxp = container_of(w, struct pxps, work);
++
++ pxp_clk_disable(pxp);
++}
++
++static void pxp_clkoff_timer(unsigned long arg)
++{
++ struct pxps *pxp = (struct pxps *)arg;
++
++ if ((pxp->pxp_ongoing == 0) && list_empty(&head))
++ schedule_work(&pxp->work);
++ else
++ mod_timer(&pxp->clk_timer,
++ jiffies + msecs_to_jiffies(timeout_in_ms));
++}
++
++static struct pxp_tx_desc *pxpdma_first_queued(struct pxp_channel *pxp_chan)
++{
++ return list_entry(pxp_chan->queue.next, struct pxp_tx_desc, list);
++}
++
++/* called with pxp_chan->lock held */
++static void __pxpdma_dostart(struct pxp_channel *pxp_chan)
++{
++ struct pxp_dma *pxp_dma = to_pxp_dma(pxp_chan->dma_chan.device);
++ struct pxps *pxp = to_pxp(pxp_dma);
++ struct pxp_tx_desc *desc;
++ struct pxp_tx_desc *child;
++ int i = 0;
++
++ /* S0 */
++ desc = list_first_entry(&head, struct pxp_tx_desc, list);
++ memcpy(&pxp->pxp_conf_state.s0_param,
++ &desc->layer_param.s0_param, sizeof(struct pxp_layer_param));
++ memcpy(&pxp->pxp_conf_state.proc_data,
++ &desc->proc_data, sizeof(struct pxp_proc_data));
++
++ /* Save PxP configuration */
++ list_for_each_entry(child, &desc->tx_list, list) {
++ if (i == 0) { /* Output */
++ memcpy(&pxp->pxp_conf_state.out_param,
++ &child->layer_param.out_param,
++ sizeof(struct pxp_layer_param));
++ } else { /* Overlay */
++ memcpy(&pxp->pxp_conf_state.ol_param[i - 1],
++ &child->layer_param.ol_param,
++ sizeof(struct pxp_layer_param));
++ }
++
++ i++;
++ }
++ pr_debug("%s:%d S0 w/h %d/%d paddr %08x\n", __func__, __LINE__,
++ pxp->pxp_conf_state.s0_param.width,
++ pxp->pxp_conf_state.s0_param.height,
++ pxp->pxp_conf_state.s0_param.paddr);
++ pr_debug("%s:%d OUT w/h %d/%d paddr %08x\n", __func__, __LINE__,
++ pxp->pxp_conf_state.out_param.width,
++ pxp->pxp_conf_state.out_param.height,
++ pxp->pxp_conf_state.out_param.paddr);
++}
++
++static void pxpdma_dostart_work(struct pxps *pxp)
++{
++ struct pxp_channel *pxp_chan = NULL;
++ unsigned long flags;
++ struct pxp_tx_desc *desc = NULL;
++
++ spin_lock_irqsave(&pxp->lock, flags);
++
++ desc = list_entry(head.next, struct pxp_tx_desc, list);
++ pxp_chan = to_pxp_channel(desc->txd.chan);
++
++ __pxpdma_dostart(pxp_chan);
++
++ /* Configure PxP */
++ pxp_config(pxp, pxp_chan);
++
++ pxp_start(pxp);
++
++ spin_unlock_irqrestore(&pxp->lock, flags);
++}
++
++static void pxpdma_dequeue(struct pxp_channel *pxp_chan, struct pxps *pxp)
++{
++ unsigned long flags;
++ struct pxp_tx_desc *desc = NULL;
++
++ do {
++ desc = pxpdma_first_queued(pxp_chan);
++ spin_lock_irqsave(&pxp->lock, flags);
++ list_move_tail(&desc->list, &head);
++ spin_unlock_irqrestore(&pxp->lock, flags);
++ } while (!list_empty(&pxp_chan->queue));
++}
++
++static dma_cookie_t pxp_tx_submit(struct dma_async_tx_descriptor *tx)
++{
++ struct pxp_tx_desc *desc = to_tx_desc(tx);
++ struct pxp_channel *pxp_chan = to_pxp_channel(tx->chan);
++ dma_cookie_t cookie;
++
++ dev_dbg(&pxp_chan->dma_chan.dev->device, "received TX\n");
++
++ /* pxp_chan->lock can be taken under ichan->lock, but not v.v. */
++ spin_lock(&pxp_chan->lock);
++
++ cookie = pxp_chan->dma_chan.cookie;
++
++ if (++cookie < 0)
++ cookie = 1;
++
++ /* from dmaengine.h: "last cookie value returned to client" */
++ pxp_chan->dma_chan.cookie = cookie;
++ tx->cookie = cookie;
++
++ /* Here we add the tx descriptor to our PxP task queue. */
++ list_add_tail(&desc->list, &pxp_chan->queue);
++
++ spin_unlock(&pxp_chan->lock);
++
++ dev_dbg(&pxp_chan->dma_chan.dev->device, "done TX\n");
++
++ return cookie;
++}
++
++/**
++ * pxp_init_channel() - initialize a PXP channel.
++ * @pxp_dma: PXP DMA context.
++ * @pchan: pointer to the channel object.
++ * @return 0 on success or negative error code on failure.
++ */
++static int pxp_init_channel(struct pxp_dma *pxp_dma,
++ struct pxp_channel *pxp_chan)
++{
++ int ret = 0;
++
++ /*
++ * We are using _virtual_ channel here.
++ * Each channel contains all parameters of corresponding layers
++ * for one transaction; each layer is represented as one descriptor
++ * (i.e., pxp_tx_desc) here.
++ */
++
++ INIT_LIST_HEAD(&pxp_chan->queue);
++
++ return ret;
++}
++
++static irqreturn_t pxp_irq(int irq, void *dev_id)
++{
++ struct pxps *pxp = dev_id;
++ struct pxp_channel *pxp_chan;
++ struct pxp_tx_desc *desc;
++ struct pxp_tx_desc *child, *_child;
++ dma_async_tx_callback callback;
++ void *callback_param;
++ unsigned long flags;
++ u32 hist_status;
++
++ dump_pxp_reg(pxp);
++
++ hist_status =
++ __raw_readl(pxp->base + HW_PXP_HIST_CTRL) & BM_PXP_HIST_CTRL_STATUS;
++
++ __raw_writel(BM_PXP_STAT_IRQ, pxp->base + HW_PXP_STAT_CLR);
++
++ spin_lock_irqsave(&pxp->lock, flags);
++
++ if (list_empty(&head)) {
++ pxp->pxp_ongoing = 0;
++ spin_unlock_irqrestore(&pxp->lock, flags);
++ return IRQ_NONE;
++ }
++
++ /* Get descriptor and call callback */
++ desc = list_entry(head.next, struct pxp_tx_desc, list);
++ pxp_chan = to_pxp_channel(desc->txd.chan);
++
++ pxp_chan->completed = desc->txd.cookie;
++
++ callback = desc->txd.callback;
++ callback_param = desc->txd.callback_param;
++
++ /* Send histogram status back to caller */
++ desc->hist_status = hist_status;
++
++ if ((desc->txd.flags & DMA_PREP_INTERRUPT) && callback)
++ callback(callback_param);
++
++ pxp_chan->status = PXP_CHANNEL_INITIALIZED;
++
++ list_for_each_entry_safe(child, _child, &desc->tx_list, list) {
++ list_del_init(&child->list);
++ kmem_cache_free(tx_desc_cache, (void *)child);
++ }
++ list_del_init(&desc->list);
++ kmem_cache_free(tx_desc_cache, (void *)desc);
++
++ complete(&pxp->complete);
++ pxp->pxp_ongoing = 0;
++ mod_timer(&pxp->clk_timer, jiffies + msecs_to_jiffies(timeout_in_ms));
++
++ spin_unlock_irqrestore(&pxp->lock, flags);
++
++ return IRQ_HANDLED;
++}
++
++/* allocate/free dma tx descriptor dynamically*/
++static struct pxp_tx_desc *pxpdma_desc_alloc(struct pxp_channel *pxp_chan)
++{
++ struct pxp_tx_desc *desc = NULL;
++ struct dma_async_tx_descriptor *txd = NULL;
++
++ desc = kmem_cache_alloc(tx_desc_cache, GFP_KERNEL | __GFP_ZERO);
++ if (desc == NULL)
++ return NULL;
++
++ INIT_LIST_HEAD(&desc->list);
++ INIT_LIST_HEAD(&desc->tx_list);
++ txd = &desc->txd;
++ dma_async_tx_descriptor_init(txd, &pxp_chan->dma_chan);
++ txd->tx_submit = pxp_tx_submit;
++
++ return desc;
++}
++
++/* Allocate and initialise a transfer descriptor. */
++static struct dma_async_tx_descriptor *pxp_prep_slave_sg(struct dma_chan *chan,
++ struct scatterlist
++ *sgl,
++ unsigned int sg_len,
++ enum
++ dma_transfer_direction
++ direction,
++ unsigned long tx_flags,
++ void *context)
++{
++ struct pxp_channel *pxp_chan = to_pxp_channel(chan);
++ struct pxp_dma *pxp_dma = to_pxp_dma(chan->device);
++ struct pxps *pxp = to_pxp(pxp_dma);
++ struct pxp_tx_desc *desc = NULL;
++ struct pxp_tx_desc *first = NULL, *prev = NULL;
++ struct scatterlist *sg;
++ dma_addr_t phys_addr;
++ int i;
++
++ if (direction != DMA_DEV_TO_MEM && direction != DMA_MEM_TO_DEV) {
++ dev_err(chan->device->dev, "Invalid DMA direction %d!\n",
++ direction);
++ return NULL;
++ }
++
++ if (unlikely(sg_len < 2))
++ return NULL;
++
++ for_each_sg(sgl, sg, sg_len, i) {
++ desc = pxpdma_desc_alloc(pxp_chan);
++ if (!desc) {
++ dev_err(chan->device->dev, "no enough memory to allocate tx descriptor\n");
++ return NULL;
++ }
++
++ phys_addr = sg_dma_address(sg);
++
++ if (!first) {
++ first = desc;
++
++ desc->layer_param.s0_param.paddr = phys_addr;
++ } else {
++ list_add_tail(&desc->list, &first->tx_list);
++ prev->next = desc;
++ desc->next = NULL;
++
++ if (i == 1)
++ desc->layer_param.out_param.paddr = phys_addr;
++ else
++ desc->layer_param.ol_param.paddr = phys_addr;
++ }
++
++ prev = desc;
++ }
++
++ pxp->pxp_conf_state.layer_nr = sg_len;
++ first->txd.flags = tx_flags;
++ first->len = sg_len;
++ pr_debug("%s:%d first %p, first->len %d, flags %08x\n",
++ __func__, __LINE__, first, first->len, first->txd.flags);
++
++ return &first->txd;
++}
++
++static void pxp_issue_pending(struct dma_chan *chan)
++{
++ struct pxp_channel *pxp_chan = to_pxp_channel(chan);
++ struct pxp_dma *pxp_dma = to_pxp_dma(chan->device);
++ struct pxps *pxp = to_pxp(pxp_dma);
++
++ spin_lock(&pxp_chan->lock);
++
++ if (list_empty(&pxp_chan->queue)) {
++ spin_unlock(&pxp_chan->lock);
++ return;
++ }
++
++ pxpdma_dequeue(pxp_chan, pxp);
++ pxp_chan->status = PXP_CHANNEL_READY;
++
++ spin_unlock(&pxp_chan->lock);
++
++ pxp_clk_enable(pxp);
++ wake_up_interruptible(&pxp->thread_waitq);
++}
++
++static void __pxp_terminate_all(struct dma_chan *chan)
++{
++ struct pxp_channel *pxp_chan = to_pxp_channel(chan);
++
++ pxp_chan->status = PXP_CHANNEL_INITIALIZED;
++}
++
++static int pxp_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
++ unsigned long arg)
++{
++ struct pxp_channel *pxp_chan = to_pxp_channel(chan);
++
++ /* Only supports DMA_TERMINATE_ALL */
++ if (cmd != DMA_TERMINATE_ALL)
++ return -ENXIO;
++
++ spin_lock(&pxp_chan->lock);
++ __pxp_terminate_all(chan);
++ spin_unlock(&pxp_chan->lock);
++
++ return 0;
++}
++
++static int pxp_alloc_chan_resources(struct dma_chan *chan)
++{
++ struct pxp_channel *pxp_chan = to_pxp_channel(chan);
++ struct pxp_dma *pxp_dma = to_pxp_dma(chan->device);
++ int ret;
++
++ /* dmaengine.c now guarantees to only offer free channels */
++ BUG_ON(chan->client_count > 1);
++ WARN_ON(pxp_chan->status != PXP_CHANNEL_FREE);
++
++ chan->cookie = 1;
++ pxp_chan->completed = -ENXIO;
++
++ pr_debug("%s dma_chan.chan_id %d\n", __func__, chan->chan_id);
++ ret = pxp_init_channel(pxp_dma, pxp_chan);
++ if (ret < 0)
++ goto err_chan;
++
++ pxp_chan->status = PXP_CHANNEL_INITIALIZED;
++
++ dev_dbg(&chan->dev->device, "Found channel 0x%x, irq %d\n",
++ chan->chan_id, pxp_chan->eof_irq);
++
++ return ret;
++
++err_chan:
++ return ret;
++}
++
++static void pxp_free_chan_resources(struct dma_chan *chan)
++{
++ struct pxp_channel *pxp_chan = to_pxp_channel(chan);
++
++ spin_lock(&pxp_chan->lock);
++
++ __pxp_terminate_all(chan);
++
++ pxp_chan->status = PXP_CHANNEL_FREE;
++
++ spin_unlock(&pxp_chan->lock);
++}
++
++static enum dma_status pxp_tx_status(struct dma_chan *chan,
++ dma_cookie_t cookie,
++ struct dma_tx_state *txstate)
++{
++ struct pxp_channel *pxp_chan = to_pxp_channel(chan);
++
++ if (cookie != chan->cookie)
++ return DMA_ERROR;
++
++ if (txstate) {
++ txstate->last = pxp_chan->completed;
++ txstate->used = chan->cookie;
++ txstate->residue = 0;
++ }
++ return DMA_COMPLETE;
++}
++
++static int pxp_hw_init(struct pxps *pxp)
++{
++ struct pxp_config_data *pxp_conf = &pxp->pxp_conf_state;
++ struct pxp_proc_data *proc_data = &pxp_conf->proc_data;
++ u32 reg_val;
++
++ /* Pull PxP out of reset */
++ __raw_writel(0, pxp->base + HW_PXP_CTRL);
++
++ /* Config defaults */
++
++ /* Initialize non-channel-specific PxP parameters */
++ proc_data->drect.left = proc_data->srect.left = 0;
++ proc_data->drect.top = proc_data->srect.top = 0;
++ proc_data->drect.width = proc_data->srect.width = 0;
++ proc_data->drect.height = proc_data->srect.height = 0;
++ proc_data->scaling = 0;
++ proc_data->hflip = 0;
++ proc_data->vflip = 0;
++ proc_data->rotate = 0;
++ proc_data->bgcolor = 0;
++
++ /* Initialize S0 channel parameters */
++ pxp_conf->s0_param.pixel_fmt = pxp_s0_formats[0];
++ pxp_conf->s0_param.width = 0;
++ pxp_conf->s0_param.height = 0;
++ pxp_conf->s0_param.color_key = -1;
++ pxp_conf->s0_param.color_key_enable = false;
++
++ /* Initialize OL channel parameters */
++ pxp_conf->ol_param[0].combine_enable = false;
++ pxp_conf->ol_param[0].width = 0;
++ pxp_conf->ol_param[0].height = 0;
++ pxp_conf->ol_param[0].pixel_fmt = PXP_PIX_FMT_RGB565;
++ pxp_conf->ol_param[0].color_key_enable = false;
++ pxp_conf->ol_param[0].color_key = -1;
++ pxp_conf->ol_param[0].global_alpha_enable = false;
++ pxp_conf->ol_param[0].global_alpha = 0;
++ pxp_conf->ol_param[0].local_alpha_enable = false;
++
++ /* Initialize Output channel parameters */
++ pxp_conf->out_param.width = 0;
++ pxp_conf->out_param.height = 0;
++ pxp_conf->out_param.pixel_fmt = PXP_PIX_FMT_RGB565;
++
++ proc_data->overlay_state = 0;
++
++ /* Write default h/w config */
++ pxp_set_ctrl(pxp);
++ pxp_set_s0param(pxp);
++ pxp_set_s0crop(pxp);
++ /*
++ * simply program the ULC to a higher value than the LRC
++ * to avoid any AS pixels to show up in the output buffer.
++ */
++ __raw_writel(0xFFFFFFFF, pxp->base + HW_PXP_OUT_AS_ULC);
++ pxp_set_olparam(0, pxp);
++ pxp_set_olcolorkey(0, pxp);
++
++ pxp_set_s0colorkey(pxp);
++ pxp_set_csc(pxp);
++ pxp_set_bg(pxp);
++ pxp_set_lut(pxp);
++
++ /* One-time histogram configuration */
++ reg_val =
++ BF_PXP_HIST_CTRL_PANEL_MODE(BV_PXP_HIST_CTRL_PANEL_MODE__GRAY16);
++ __raw_writel(reg_val, pxp->base + HW_PXP_HIST_CTRL);
++
++ reg_val = BF_PXP_HIST2_PARAM_VALUE0(0x00) |
++ BF_PXP_HIST2_PARAM_VALUE1(0x00F);
++ __raw_writel(reg_val, pxp->base + HW_PXP_HIST2_PARAM);
++
++ reg_val = BF_PXP_HIST4_PARAM_VALUE0(0x00) |
++ BF_PXP_HIST4_PARAM_VALUE1(0x05) |
++ BF_PXP_HIST4_PARAM_VALUE2(0x0A) | BF_PXP_HIST4_PARAM_VALUE3(0x0F);
++ __raw_writel(reg_val, pxp->base + HW_PXP_HIST4_PARAM);
++
++ reg_val = BF_PXP_HIST8_PARAM0_VALUE0(0x00) |
++ BF_PXP_HIST8_PARAM0_VALUE1(0x02) |
++ BF_PXP_HIST8_PARAM0_VALUE2(0x04) | BF_PXP_HIST8_PARAM0_VALUE3(0x06);
++ __raw_writel(reg_val, pxp->base + HW_PXP_HIST8_PARAM0);
++ reg_val = BF_PXP_HIST8_PARAM1_VALUE4(0x09) |
++ BF_PXP_HIST8_PARAM1_VALUE5(0x0B) |
++ BF_PXP_HIST8_PARAM1_VALUE6(0x0D) | BF_PXP_HIST8_PARAM1_VALUE7(0x0F);
++ __raw_writel(reg_val, pxp->base + HW_PXP_HIST8_PARAM1);
++
++ reg_val = BF_PXP_HIST16_PARAM0_VALUE0(0x00) |
++ BF_PXP_HIST16_PARAM0_VALUE1(0x01) |
++ BF_PXP_HIST16_PARAM0_VALUE2(0x02) |
++ BF_PXP_HIST16_PARAM0_VALUE3(0x03);
++ __raw_writel(reg_val, pxp->base + HW_PXP_HIST16_PARAM0);
++ reg_val = BF_PXP_HIST16_PARAM1_VALUE4(0x04) |
++ BF_PXP_HIST16_PARAM1_VALUE5(0x05) |
++ BF_PXP_HIST16_PARAM1_VALUE6(0x06) |
++ BF_PXP_HIST16_PARAM1_VALUE7(0x07);
++ __raw_writel(reg_val, pxp->base + HW_PXP_HIST16_PARAM1);
++ reg_val = BF_PXP_HIST16_PARAM2_VALUE8(0x08) |
++ BF_PXP_HIST16_PARAM2_VALUE9(0x09) |
++ BF_PXP_HIST16_PARAM2_VALUE10(0x0A) |
++ BF_PXP_HIST16_PARAM2_VALUE11(0x0B);
++ __raw_writel(reg_val, pxp->base + HW_PXP_HIST16_PARAM2);
++ reg_val = BF_PXP_HIST16_PARAM3_VALUE12(0x0C) |
++ BF_PXP_HIST16_PARAM3_VALUE13(0x0D) |
++ BF_PXP_HIST16_PARAM3_VALUE14(0x0E) |
++ BF_PXP_HIST16_PARAM3_VALUE15(0x0F);
++ __raw_writel(reg_val, pxp->base + HW_PXP_HIST16_PARAM3);
++
++ return 0;
++}
++
++static int pxp_dma_init(struct pxps *pxp)
++{
++ struct pxp_dma *pxp_dma = &pxp->pxp_dma;
++ struct dma_device *dma = &pxp_dma->dma;
++ int i;
++
++ dma_cap_set(DMA_SLAVE, dma->cap_mask);
++ dma_cap_set(DMA_PRIVATE, dma->cap_mask);
++
++ /* Compulsory common fields */
++ dma->dev = pxp->dev;
++ dma->device_alloc_chan_resources = pxp_alloc_chan_resources;
++ dma->device_free_chan_resources = pxp_free_chan_resources;
++ dma->device_tx_status = pxp_tx_status;
++ dma->device_issue_pending = pxp_issue_pending;
++
++ /* Compulsory for DMA_SLAVE fields */
++ dma->device_prep_slave_sg = pxp_prep_slave_sg;
++ dma->device_control = pxp_control;
++
++ /* Initialize PxP Channels */
++ INIT_LIST_HEAD(&dma->channels);
++ for (i = 0; i < NR_PXP_VIRT_CHANNEL; i++) {
++ struct pxp_channel *pxp_chan = pxp->channel + i;
++ struct dma_chan *dma_chan = &pxp_chan->dma_chan;
++
++ spin_lock_init(&pxp_chan->lock);
++
++ /* Only one EOF IRQ for PxP, shared by all channels */
++ pxp_chan->eof_irq = pxp->irq;
++ pxp_chan->status = PXP_CHANNEL_FREE;
++ pxp_chan->completed = -ENXIO;
++ snprintf(pxp_chan->eof_name, sizeof(pxp_chan->eof_name),
++ "PXP EOF %d", i);
++
++ dma_chan->device = &pxp_dma->dma;
++ dma_chan->cookie = 1;
++ dma_chan->chan_id = i;
++ list_add_tail(&dma_chan->device_node, &dma->channels);
++ }
++
++ return dma_async_device_register(&pxp_dma->dma);
++}
++
++static ssize_t clk_off_timeout_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ return sprintf(buf, "%d\n", timeout_in_ms);
++}
++
++static ssize_t clk_off_timeout_store(struct device *dev,
++ struct device_attribute *attr,
++ const char *buf, size_t count)
++{
++ int val;
++ if (sscanf(buf, "%d", &val) > 0) {
++ timeout_in_ms = val;
++ return count;
++ }
++ return -EINVAL;
++}
++
++static DEVICE_ATTR(clk_off_timeout, 0644, clk_off_timeout_show,
++ clk_off_timeout_store);
++
++static ssize_t block_size_show(struct device *dev,
++ struct device_attribute *attr,
++ char *buf)
++{
++ return sprintf(buf, "%d\n", block_size);
++}
++
++static ssize_t block_size_store(struct device *dev,
++ struct device_attribute *attr,
++ const char *buf, size_t count)
++{
++ char **last = NULL;
++
++ block_size = simple_strtoul(buf, last, 0);
++ if (block_size > 1)
++ block_size = 1;
++
++ return count;
++}
++static DEVICE_ATTR(block_size, S_IWUSR | S_IRUGO,
++ block_size_show, block_size_store);
++
++static const struct of_device_id imx_pxpdma_dt_ids[] = {
++ { .compatible = "fsl,imx6dl-pxp-dma", },
++ { /* sentinel */ }
++};
++MODULE_DEVICE_TABLE(of, imx_pxpdma_dt_ids);
++
++static int has_pending_task(struct pxps *pxp, struct pxp_channel *task)
++{
++ int found;
++ unsigned long flags;
++
++ spin_lock_irqsave(&pxp->lock, flags);
++ found = !list_empty(&head);
++ spin_unlock_irqrestore(&pxp->lock, flags);
++
++ return found;
++}
++
++static int pxp_dispatch_thread(void *argv)
++{
++ struct pxps *pxp = (struct pxps *)argv;
++ struct pxp_channel *pending = NULL;
++ unsigned long flags;
++
++ while (!kthread_should_stop()) {
++ int ret;
++ ret = wait_event_interruptible(pxp->thread_waitq,
++ has_pending_task(pxp, pending));
++ if (signal_pending(current))
++ continue;
++
++ if (kthread_should_stop())
++ break;
++
++ spin_lock_irqsave(&pxp->lock, flags);
++ pxp->pxp_ongoing = 1;
++ spin_unlock_irqrestore(&pxp->lock, flags);
++ init_completion(&pxp->complete);
++ pxpdma_dostart_work(pxp);
++ ret = wait_for_completion_timeout(&pxp->complete, 2 * HZ);
++ if (ret == 0) {
++ printk(KERN_EMERG "%s: task is timeout\n\n", __func__);
++ break;
++ }
++ }
++
++ return 0;
++}
++
++static int pxp_probe(struct platform_device *pdev)
++{
++ struct pxps *pxp;
++ struct resource *res;
++ int irq;
++ int err = 0;
++
++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++ irq = platform_get_irq(pdev, 0);
++ if (!res || irq < 0) {
++ err = -ENODEV;
++ goto exit;
++ }
++
++ pxp = devm_kzalloc(&pdev->dev, sizeof(*pxp), GFP_KERNEL);
++ if (!pxp) {
++ dev_err(&pdev->dev, "failed to allocate control object\n");
++ err = -ENOMEM;
++ goto exit;
++ }
++
++ pxp->dev = &pdev->dev;
++
++ platform_set_drvdata(pdev, pxp);
++ pxp->irq = irq;
++
++ pxp->pxp_ongoing = 0;
++ pxp->lut_state = 0;
++
++ spin_lock_init(&pxp->lock);
++ mutex_init(&pxp->clk_mutex);
++
++ pxp->base = devm_request_and_ioremap(&pdev->dev, res);
++ if (pxp->base == NULL) {
++ dev_err(&pdev->dev, "Couldn't ioremap regs\n");
++ err = -ENODEV;
++ goto exit;
++ }
++
++ pxp->pdev = pdev;
++
++ pxp->clk = devm_clk_get(&pdev->dev, "pxp-axi");
++ clk_prepare_enable(pxp->clk);
++
++ err = pxp_hw_init(pxp);
++ clk_disable_unprepare(pxp->clk);
++ if (err) {
++ dev_err(&pdev->dev, "failed to initialize hardware\n");
++ goto exit;
++ }
++
++ err = devm_request_irq(&pdev->dev, pxp->irq, pxp_irq, 0,
++ "pxp-dmaengine", pxp);
++ if (err)
++ goto exit;
++ /* Initialize DMA engine */
++ err = pxp_dma_init(pxp);
++ if (err < 0)
++ goto exit;
++
++ if (device_create_file(&pdev->dev, &dev_attr_clk_off_timeout)) {
++ dev_err(&pdev->dev,
++ "Unable to create file from clk_off_timeout\n");
++ goto exit;
++ }
++
++ device_create_file(&pdev->dev, &dev_attr_block_size);
++ dump_pxp_reg(pxp);
++
++ INIT_WORK(&pxp->work, clkoff_callback);
++ init_timer(&pxp->clk_timer);
++ pxp->clk_timer.function = pxp_clkoff_timer;
++ pxp->clk_timer.data = (unsigned long)pxp;
++
++ /* allocate a kernel thread to dispatch pxp conf */
++ pxp->dispatch = kthread_run(pxp_dispatch_thread, pxp, "pxp_dispatch");
++ if (IS_ERR(pxp->dispatch)) {
++ err = PTR_ERR(pxp->dispatch);
++ goto exit;
++ }
++ init_waitqueue_head(&pxp->thread_waitq);
++ tx_desc_cache = kmem_cache_create("tx_desc", sizeof(struct pxp_tx_desc),
++ 0, SLAB_HWCACHE_ALIGN, NULL);
++ if (!tx_desc_cache) {
++ err = -ENOMEM;
++ goto exit;
++ }
++
++ register_pxp_device();
++
++exit:
++ if (err)
++ dev_err(&pdev->dev, "Exiting (unsuccessfully) pxp_probe()\n");
++ return err;
++}
++
++static int pxp_remove(struct platform_device *pdev)
++{
++ struct pxps *pxp = platform_get_drvdata(pdev);
++
++ unregister_pxp_device();
++ kmem_cache_destroy(tx_desc_cache);
++ kthread_stop(pxp->dispatch);
++ cancel_work_sync(&pxp->work);
++ del_timer_sync(&pxp->clk_timer);
++ clk_disable_unprepare(pxp->clk);
++ device_remove_file(&pdev->dev, &dev_attr_clk_off_timeout);
++ device_remove_file(&pdev->dev, &dev_attr_block_size);
++ dma_async_device_unregister(&(pxp->pxp_dma.dma));
++
++ return 0;
++}
++
++#ifdef CONFIG_PM
++static int pxp_suspend(struct platform_device *pdev, pm_message_t state)
++{
++ struct pxps *pxp = platform_get_drvdata(pdev);
++
++ pxp_clk_enable(pxp);
++ while (__raw_readl(pxp->base + HW_PXP_CTRL) & BM_PXP_CTRL_ENABLE)
++ ;
++
++ __raw_writel(BM_PXP_CTRL_SFTRST, pxp->base + HW_PXP_CTRL);
++ pxp_clk_disable(pxp);
++
++ return 0;
++}
++
++static int pxp_resume(struct platform_device *pdev)
++{
++ struct pxps *pxp = platform_get_drvdata(pdev);
++
++ pxp_clk_enable(pxp);
++ /* Pull PxP out of reset */
++ __raw_writel(0, pxp->base + HW_PXP_CTRL);
++ pxp_clk_disable(pxp);
++
++ return 0;
++}
++#else
++#define pxp_suspend NULL
++#define pxp_resume NULL
++#endif
++
++static struct platform_driver pxp_driver = {
++ .driver = {
++ .name = "imx-pxp",
++ .of_match_table = of_match_ptr(imx_pxpdma_dt_ids),
++ },
++ .probe = pxp_probe,
++ .remove = pxp_remove,
++ .suspend = pxp_suspend,
++ .resume = pxp_resume,
++};
++
++module_platform_driver(pxp_driver);
++
++
++MODULE_DESCRIPTION("i.MX PxP driver");
++MODULE_AUTHOR("Freescale Semiconductor, Inc.");
++MODULE_LICENSE("GPL");
+diff -Nur linux-3.14.36/drivers/dma/pxp/regs-pxp_v2.h linux-openelec/drivers/dma/pxp/regs-pxp_v2.h
+--- linux-3.14.36/drivers/dma/pxp/regs-pxp_v2.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/dma/pxp/regs-pxp_v2.h 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,1152 @@
++/*
++ * Freescale PXP Register Definitions
++ *
++ * Copyright (C) 2012-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ *
++ * This file is created by xml file. Don't Edit it.
++ *
++ * Xml Revision: 1.29
++ * Template revision: 1.3
++ */
++
++#ifndef __ARCH_ARM___PXP_H
++#define __ARCH_ARM___PXP_H
++
++#define HW_PXP_CTRL (0x00000000)
++#define HW_PXP_CTRL_SET (0x00000004)
++#define HW_PXP_CTRL_CLR (0x00000008)
++#define HW_PXP_CTRL_TOG (0x0000000c)
++
++#define BM_PXP_CTRL_SFTRST 0x80000000
++#define BM_PXP_CTRL_CLKGATE 0x40000000
++#define BM_PXP_CTRL_RSVD4 0x20000000
++#define BM_PXP_CTRL_EN_REPEAT 0x10000000
++#define BP_PXP_CTRL_RSVD3 26
++#define BM_PXP_CTRL_RSVD3 0x0C000000
++#define BF_PXP_CTRL_RSVD3(v) \
++ (((v) << 26) & BM_PXP_CTRL_RSVD3)
++#define BP_PXP_CTRL_INTERLACED_INPUT 24
++#define BM_PXP_CTRL_INTERLACED_INPUT 0x03000000
++#define BF_PXP_CTRL_INTERLACED_INPUT(v) \
++ (((v) << 24) & BM_PXP_CTRL_INTERLACED_INPUT)
++#define BV_PXP_CTRL_INTERLACED_INPUT__PROGRESSIVE 0x0
++#define BV_PXP_CTRL_INTERLACED_INPUT__FIELD0 0x2
++#define BV_PXP_CTRL_INTERLACED_INPUT__FIELD1 0x3
++#define BM_PXP_CTRL_BLOCK_SIZE 0x00800000
++#define BV_PXP_CTRL_BLOCK_SIZE__8X8 0x0
++#define BV_PXP_CTRL_BLOCK_SIZE__16X16 0x1
++#define BM_PXP_CTRL_ROT_POS 0x00400000
++#define BM_PXP_CTRL_IN_PLACE 0x00200000
++#define BP_PXP_CTRL_RSVD1 12
++#define BM_PXP_CTRL_RSVD1 0x001FF000
++#define BF_PXP_CTRL_RSVD1(v) \
++ (((v) << 12) & BM_PXP_CTRL_RSVD1)
++#define BM_PXP_CTRL_VFLIP 0x00000800
++#define BM_PXP_CTRL_HFLIP 0x00000400
++#define BP_PXP_CTRL_ROTATE 8
++#define BM_PXP_CTRL_ROTATE 0x00000300
++#define BF_PXP_CTRL_ROTATE(v) \
++ (((v) << 8) & BM_PXP_CTRL_ROTATE)
++#define BV_PXP_CTRL_ROTATE__ROT_0 0x0
++#define BV_PXP_CTRL_ROTATE__ROT_90 0x1
++#define BV_PXP_CTRL_ROTATE__ROT_180 0x2
++#define BV_PXP_CTRL_ROTATE__ROT_270 0x3
++#define BP_PXP_CTRL_RSVD0 5
++#define BM_PXP_CTRL_RSVD0 0x000000E0
++#define BF_PXP_CTRL_RSVD0(v) \
++ (((v) << 5) & BM_PXP_CTRL_RSVD0)
++#define BM_PXP_CTRL_ENABLE_LCD_HANDSHAKE 0x00000010
++#define BM_PXP_CTRL_LUT_DMA_IRQ_ENABLE 0x00000008
++#define BM_PXP_CTRL_NEXT_IRQ_ENABLE 0x00000004
++#define BM_PXP_CTRL_IRQ_ENABLE 0x00000002
++#define BM_PXP_CTRL_ENABLE 0x00000001
++
++#define HW_PXP_STAT (0x00000010)
++#define HW_PXP_STAT_SET (0x00000014)
++#define HW_PXP_STAT_CLR (0x00000018)
++#define HW_PXP_STAT_TOG (0x0000001c)
++
++#define BP_PXP_STAT_BLOCKX 24
++#define BM_PXP_STAT_BLOCKX 0xFF000000
++#define BF_PXP_STAT_BLOCKX(v) \
++ (((v) << 24) & BM_PXP_STAT_BLOCKX)
++#define BP_PXP_STAT_BLOCKY 16
++#define BM_PXP_STAT_BLOCKY 0x00FF0000
++#define BF_PXP_STAT_BLOCKY(v) \
++ (((v) << 16) & BM_PXP_STAT_BLOCKY)
++#define BP_PXP_STAT_RSVD2 9
++#define BM_PXP_STAT_RSVD2 0x0000FE00
++#define BF_PXP_STAT_RSVD2(v) \
++ (((v) << 9) & BM_PXP_STAT_RSVD2)
++#define BM_PXP_STAT_LUT_DMA_LOAD_DONE_IRQ 0x00000100
++#define BP_PXP_STAT_AXI_ERROR_ID 4
++#define BM_PXP_STAT_AXI_ERROR_ID 0x000000F0
++#define BF_PXP_STAT_AXI_ERROR_ID(v) \
++ (((v) << 4) & BM_PXP_STAT_AXI_ERROR_ID)
++#define BM_PXP_STAT_NEXT_IRQ 0x00000008
++#define BM_PXP_STAT_AXI_READ_ERROR 0x00000004
++#define BM_PXP_STAT_AXI_WRITE_ERROR 0x00000002
++#define BM_PXP_STAT_IRQ 0x00000001
++
++#define HW_PXP_OUT_CTRL (0x00000020)
++#define HW_PXP_OUT_CTRL_SET (0x00000024)
++#define HW_PXP_OUT_CTRL_CLR (0x00000028)
++#define HW_PXP_OUT_CTRL_TOG (0x0000002c)
++
++#define BP_PXP_OUT_CTRL_ALPHA 24
++#define BM_PXP_OUT_CTRL_ALPHA 0xFF000000
++#define BF_PXP_OUT_CTRL_ALPHA(v) \
++ (((v) << 24) & BM_PXP_OUT_CTRL_ALPHA)
++#define BM_PXP_OUT_CTRL_ALPHA_OUTPUT 0x00800000
++#define BP_PXP_OUT_CTRL_RSVD1 10
++#define BM_PXP_OUT_CTRL_RSVD1 0x007FFC00
++#define BF_PXP_OUT_CTRL_RSVD1(v) \
++ (((v) << 10) & BM_PXP_OUT_CTRL_RSVD1)
++#define BP_PXP_OUT_CTRL_INTERLACED_OUTPUT 8
++#define BM_PXP_OUT_CTRL_INTERLACED_OUTPUT 0x00000300
++#define BF_PXP_OUT_CTRL_INTERLACED_OUTPUT(v) \
++ (((v) << 8) & BM_PXP_OUT_CTRL_INTERLACED_OUTPUT)
++#define BV_PXP_OUT_CTRL_INTERLACED_OUTPUT__PROGRESSIVE 0x0
++#define BV_PXP_OUT_CTRL_INTERLACED_OUTPUT__FIELD0 0x1
++#define BV_PXP_OUT_CTRL_INTERLACED_OUTPUT__FIELD1 0x2
++#define BV_PXP_OUT_CTRL_INTERLACED_OUTPUT__INTERLACED 0x3
++#define BP_PXP_OUT_CTRL_RSVD0 5
++#define BM_PXP_OUT_CTRL_RSVD0 0x000000E0
++#define BF_PXP_OUT_CTRL_RSVD0(v) \
++ (((v) << 5) & BM_PXP_OUT_CTRL_RSVD0)
++#define BP_PXP_OUT_CTRL_FORMAT 0
++#define BM_PXP_OUT_CTRL_FORMAT 0x0000001F
++#define BF_PXP_OUT_CTRL_FORMAT(v) \
++ (((v) << 0) & BM_PXP_OUT_CTRL_FORMAT)
++#define BV_PXP_OUT_CTRL_FORMAT__ARGB8888 0x0
++#define BV_PXP_OUT_CTRL_FORMAT__RGB888 0x4
++#define BV_PXP_OUT_CTRL_FORMAT__RGB888P 0x5
++#define BV_PXP_OUT_CTRL_FORMAT__ARGB1555 0x8
++#define BV_PXP_OUT_CTRL_FORMAT__ARGB4444 0x9
++#define BV_PXP_OUT_CTRL_FORMAT__RGB555 0xC
++#define BV_PXP_OUT_CTRL_FORMAT__RGB444 0xD
++#define BV_PXP_OUT_CTRL_FORMAT__RGB565 0xE
++#define BV_PXP_OUT_CTRL_FORMAT__YUV1P444 0x10
++#define BV_PXP_OUT_CTRL_FORMAT__UYVY1P422 0x12
++#define BV_PXP_OUT_CTRL_FORMAT__VYUY1P422 0x13
++#define BV_PXP_OUT_CTRL_FORMAT__Y8 0x14
++#define BV_PXP_OUT_CTRL_FORMAT__Y4 0x15
++#define BV_PXP_OUT_CTRL_FORMAT__YUV2P422 0x18
++#define BV_PXP_OUT_CTRL_FORMAT__YUV2P420 0x19
++#define BV_PXP_OUT_CTRL_FORMAT__YVU2P422 0x1A
++#define BV_PXP_OUT_CTRL_FORMAT__YVU2P420 0x1B
++
++#define HW_PXP_OUT_BUF (0x00000030)
++
++#define BP_PXP_OUT_BUF_ADDR 0
++#define BM_PXP_OUT_BUF_ADDR 0xFFFFFFFF
++#define BF_PXP_OUT_BUF_ADDR(v) (v)
++
++#define HW_PXP_OUT_BUF2 (0x00000040)
++
++#define BP_PXP_OUT_BUF2_ADDR 0
++#define BM_PXP_OUT_BUF2_ADDR 0xFFFFFFFF
++#define BF_PXP_OUT_BUF2_ADDR(v) (v)
++
++#define HW_PXP_OUT_PITCH (0x00000050)
++
++#define BP_PXP_OUT_PITCH_RSVD 16
++#define BM_PXP_OUT_PITCH_RSVD 0xFFFF0000
++#define BF_PXP_OUT_PITCH_RSVD(v) \
++ (((v) << 16) & BM_PXP_OUT_PITCH_RSVD)
++#define BP_PXP_OUT_PITCH_PITCH 0
++#define BM_PXP_OUT_PITCH_PITCH 0x0000FFFF
++#define BF_PXP_OUT_PITCH_PITCH(v) \
++ (((v) << 0) & BM_PXP_OUT_PITCH_PITCH)
++
++#define HW_PXP_OUT_LRC (0x00000060)
++
++#define BP_PXP_OUT_LRC_RSVD1 30
++#define BM_PXP_OUT_LRC_RSVD1 0xC0000000
++#define BF_PXP_OUT_LRC_RSVD1(v) \
++ (((v) << 30) & BM_PXP_OUT_LRC_RSVD1)
++#define BP_PXP_OUT_LRC_X 16
++#define BM_PXP_OUT_LRC_X 0x3FFF0000
++#define BF_PXP_OUT_LRC_X(v) \
++ (((v) << 16) & BM_PXP_OUT_LRC_X)
++#define BP_PXP_OUT_LRC_RSVD0 14
++#define BM_PXP_OUT_LRC_RSVD0 0x0000C000
++#define BF_PXP_OUT_LRC_RSVD0(v) \
++ (((v) << 14) & BM_PXP_OUT_LRC_RSVD0)
++#define BP_PXP_OUT_LRC_Y 0
++#define BM_PXP_OUT_LRC_Y 0x00003FFF
++#define BF_PXP_OUT_LRC_Y(v) \
++ (((v) << 0) & BM_PXP_OUT_LRC_Y)
++
++#define HW_PXP_OUT_PS_ULC (0x00000070)
++
++#define BP_PXP_OUT_PS_ULC_RSVD1 30
++#define BM_PXP_OUT_PS_ULC_RSVD1 0xC0000000
++#define BF_PXP_OUT_PS_ULC_RSVD1(v) \
++ (((v) << 30) & BM_PXP_OUT_PS_ULC_RSVD1)
++#define BP_PXP_OUT_PS_ULC_X 16
++#define BM_PXP_OUT_PS_ULC_X 0x3FFF0000
++#define BF_PXP_OUT_PS_ULC_X(v) \
++ (((v) << 16) & BM_PXP_OUT_PS_ULC_X)
++#define BP_PXP_OUT_PS_ULC_RSVD0 14
++#define BM_PXP_OUT_PS_ULC_RSVD0 0x0000C000
++#define BF_PXP_OUT_PS_ULC_RSVD0(v) \
++ (((v) << 14) & BM_PXP_OUT_PS_ULC_RSVD0)
++#define BP_PXP_OUT_PS_ULC_Y 0
++#define BM_PXP_OUT_PS_ULC_Y 0x00003FFF
++#define BF_PXP_OUT_PS_ULC_Y(v) \
++ (((v) << 0) & BM_PXP_OUT_PS_ULC_Y)
++
++#define HW_PXP_OUT_PS_LRC (0x00000080)
++
++#define BP_PXP_OUT_PS_LRC_RSVD1 30
++#define BM_PXP_OUT_PS_LRC_RSVD1 0xC0000000
++#define BF_PXP_OUT_PS_LRC_RSVD1(v) \
++ (((v) << 30) & BM_PXP_OUT_PS_LRC_RSVD1)
++#define BP_PXP_OUT_PS_LRC_X 16
++#define BM_PXP_OUT_PS_LRC_X 0x3FFF0000
++#define BF_PXP_OUT_PS_LRC_X(v) \
++ (((v) << 16) & BM_PXP_OUT_PS_LRC_X)
++#define BP_PXP_OUT_PS_LRC_RSVD0 14
++#define BM_PXP_OUT_PS_LRC_RSVD0 0x0000C000
++#define BF_PXP_OUT_PS_LRC_RSVD0(v) \
++ (((v) << 14) & BM_PXP_OUT_PS_LRC_RSVD0)
++#define BP_PXP_OUT_PS_LRC_Y 0
++#define BM_PXP_OUT_PS_LRC_Y 0x00003FFF
++#define BF_PXP_OUT_PS_LRC_Y(v) \
++ (((v) << 0) & BM_PXP_OUT_PS_LRC_Y)
++
++#define HW_PXP_OUT_AS_ULC (0x00000090)
++
++#define BP_PXP_OUT_AS_ULC_RSVD1 30
++#define BM_PXP_OUT_AS_ULC_RSVD1 0xC0000000
++#define BF_PXP_OUT_AS_ULC_RSVD1(v) \
++ (((v) << 30) & BM_PXP_OUT_AS_ULC_RSVD1)
++#define BP_PXP_OUT_AS_ULC_X 16
++#define BM_PXP_OUT_AS_ULC_X 0x3FFF0000
++#define BF_PXP_OUT_AS_ULC_X(v) \
++ (((v) << 16) & BM_PXP_OUT_AS_ULC_X)
++#define BP_PXP_OUT_AS_ULC_RSVD0 14
++#define BM_PXP_OUT_AS_ULC_RSVD0 0x0000C000
++#define BF_PXP_OUT_AS_ULC_RSVD0(v) \
++ (((v) << 14) & BM_PXP_OUT_AS_ULC_RSVD0)
++#define BP_PXP_OUT_AS_ULC_Y 0
++#define BM_PXP_OUT_AS_ULC_Y 0x00003FFF
++#define BF_PXP_OUT_AS_ULC_Y(v) \
++ (((v) << 0) & BM_PXP_OUT_AS_ULC_Y)
++
++#define HW_PXP_OUT_AS_LRC (0x000000a0)
++
++#define BP_PXP_OUT_AS_LRC_RSVD1 30
++#define BM_PXP_OUT_AS_LRC_RSVD1 0xC0000000
++#define BF_PXP_OUT_AS_LRC_RSVD1(v) \
++ (((v) << 30) & BM_PXP_OUT_AS_LRC_RSVD1)
++#define BP_PXP_OUT_AS_LRC_X 16
++#define BM_PXP_OUT_AS_LRC_X 0x3FFF0000
++#define BF_PXP_OUT_AS_LRC_X(v) \
++ (((v) << 16) & BM_PXP_OUT_AS_LRC_X)
++#define BP_PXP_OUT_AS_LRC_RSVD0 14
++#define BM_PXP_OUT_AS_LRC_RSVD0 0x0000C000
++#define BF_PXP_OUT_AS_LRC_RSVD0(v) \
++ (((v) << 14) & BM_PXP_OUT_AS_LRC_RSVD0)
++#define BP_PXP_OUT_AS_LRC_Y 0
++#define BM_PXP_OUT_AS_LRC_Y 0x00003FFF
++#define BF_PXP_OUT_AS_LRC_Y(v) \
++ (((v) << 0) & BM_PXP_OUT_AS_LRC_Y)
++
++#define HW_PXP_PS_CTRL (0x000000b0)
++#define HW_PXP_PS_CTRL_SET (0x000000b4)
++#define HW_PXP_PS_CTRL_CLR (0x000000b8)
++#define HW_PXP_PS_CTRL_TOG (0x000000bc)
++
++#define BP_PXP_PS_CTRL_RSVD1 12
++#define BM_PXP_PS_CTRL_RSVD1 0xFFFFF000
++#define BF_PXP_PS_CTRL_RSVD1(v) \
++ (((v) << 12) & BM_PXP_PS_CTRL_RSVD1)
++#define BP_PXP_PS_CTRL_DECX 10
++#define BM_PXP_PS_CTRL_DECX 0x00000C00
++#define BF_PXP_PS_CTRL_DECX(v) \
++ (((v) << 10) & BM_PXP_PS_CTRL_DECX)
++#define BV_PXP_PS_CTRL_DECX__DISABLE 0x0
++#define BV_PXP_PS_CTRL_DECX__DECX2 0x1
++#define BV_PXP_PS_CTRL_DECX__DECX4 0x2
++#define BV_PXP_PS_CTRL_DECX__DECX8 0x3
++#define BP_PXP_PS_CTRL_DECY 8
++#define BM_PXP_PS_CTRL_DECY 0x00000300
++#define BF_PXP_PS_CTRL_DECY(v) \
++ (((v) << 8) & BM_PXP_PS_CTRL_DECY)
++#define BV_PXP_PS_CTRL_DECY__DISABLE 0x0
++#define BV_PXP_PS_CTRL_DECY__DECY2 0x1
++#define BV_PXP_PS_CTRL_DECY__DECY4 0x2
++#define BV_PXP_PS_CTRL_DECY__DECY8 0x3
++#define BP_PXP_PS_CTRL_SWAP 5
++#define BM_PXP_PS_CTRL_SWAP 0x000000E0
++#define BF_PXP_PS_CTRL_SWAP(v) \
++ (((v) << 5) & BM_PXP_PS_CTRL_SWAP)
++#define BP_PXP_PS_CTRL_FORMAT 0
++#define BM_PXP_PS_CTRL_FORMAT 0x0000001F
++#define BF_PXP_PS_CTRL_FORMAT(v) \
++ (((v) << 0) & BM_PXP_PS_CTRL_FORMAT)
++#define BV_PXP_PS_CTRL_FORMAT__RGB888 0x4
++#define BV_PXP_PS_CTRL_FORMAT__RGB555 0xC
++#define BV_PXP_PS_CTRL_FORMAT__RGB444 0xD
++#define BV_PXP_PS_CTRL_FORMAT__RGB565 0xE
++#define BV_PXP_PS_CTRL_FORMAT__YUV1P444 0x10
++#define BV_PXP_PS_CTRL_FORMAT__UYVY1P422 0x12
++#define BV_PXP_PS_CTRL_FORMAT__VYUY1P422 0x13
++#define BV_PXP_PS_CTRL_FORMAT__Y8 0x14
++#define BV_PXP_PS_CTRL_FORMAT__Y4 0x15
++#define BV_PXP_PS_CTRL_FORMAT__YUV2P422 0x18
++#define BV_PXP_PS_CTRL_FORMAT__YUV2P420 0x19
++#define BV_PXP_PS_CTRL_FORMAT__YVU2P422 0x1A
++#define BV_PXP_PS_CTRL_FORMAT__YVU2P420 0x1B
++#define BV_PXP_PS_CTRL_FORMAT__YUV422 0x1E
++#define BV_PXP_PS_CTRL_FORMAT__YUV420 0x1F
++
++#define HW_PXP_PS_BUF (0x000000c0)
++
++#define BP_PXP_PS_BUF_ADDR 0
++#define BM_PXP_PS_BUF_ADDR 0xFFFFFFFF
++#define BF_PXP_PS_BUF_ADDR(v) (v)
++
++#define HW_PXP_PS_UBUF (0x000000d0)
++
++#define BP_PXP_PS_UBUF_ADDR 0
++#define BM_PXP_PS_UBUF_ADDR 0xFFFFFFFF
++#define BF_PXP_PS_UBUF_ADDR(v) (v)
++
++#define HW_PXP_PS_VBUF (0x000000e0)
++
++#define BP_PXP_PS_VBUF_ADDR 0
++#define BM_PXP_PS_VBUF_ADDR 0xFFFFFFFF
++#define BF_PXP_PS_VBUF_ADDR(v) (v)
++
++#define HW_PXP_PS_PITCH (0x000000f0)
++
++#define BP_PXP_PS_PITCH_RSVD 16
++#define BM_PXP_PS_PITCH_RSVD 0xFFFF0000
++#define BF_PXP_PS_PITCH_RSVD(v) \
++ (((v) << 16) & BM_PXP_PS_PITCH_RSVD)
++#define BP_PXP_PS_PITCH_PITCH 0
++#define BM_PXP_PS_PITCH_PITCH 0x0000FFFF
++#define BF_PXP_PS_PITCH_PITCH(v) \
++ (((v) << 0) & BM_PXP_PS_PITCH_PITCH)
++
++#define HW_PXP_PS_BACKGROUND (0x00000100)
++
++#define BP_PXP_PS_BACKGROUND_RSVD 24
++#define BM_PXP_PS_BACKGROUND_RSVD 0xFF000000
++#define BF_PXP_PS_BACKGROUND_RSVD(v) \
++ (((v) << 24) & BM_PXP_PS_BACKGROUND_RSVD)
++#define BP_PXP_PS_BACKGROUND_COLOR 0
++#define BM_PXP_PS_BACKGROUND_COLOR 0x00FFFFFF
++#define BF_PXP_PS_BACKGROUND_COLOR(v) \
++ (((v) << 0) & BM_PXP_PS_BACKGROUND_COLOR)
++
++#define HW_PXP_PS_SCALE (0x00000110)
++
++#define BM_PXP_PS_SCALE_RSVD2 0x80000000
++#define BP_PXP_PS_SCALE_YSCALE 16
++#define BM_PXP_PS_SCALE_YSCALE 0x7FFF0000
++#define BF_PXP_PS_SCALE_YSCALE(v) \
++ (((v) << 16) & BM_PXP_PS_SCALE_YSCALE)
++#define BM_PXP_PS_SCALE_RSVD1 0x00008000
++#define BP_PXP_PS_SCALE_XSCALE 0
++#define BM_PXP_PS_SCALE_XSCALE 0x00007FFF
++#define BF_PXP_PS_SCALE_XSCALE(v) \
++ (((v) << 0) & BM_PXP_PS_SCALE_XSCALE)
++
++#define HW_PXP_PS_OFFSET (0x00000120)
++
++#define BP_PXP_PS_OFFSET_RSVD2 28
++#define BM_PXP_PS_OFFSET_RSVD2 0xF0000000
++#define BF_PXP_PS_OFFSET_RSVD2(v) \
++ (((v) << 28) & BM_PXP_PS_OFFSET_RSVD2)
++#define BP_PXP_PS_OFFSET_YOFFSET 16
++#define BM_PXP_PS_OFFSET_YOFFSET 0x0FFF0000
++#define BF_PXP_PS_OFFSET_YOFFSET(v) \
++ (((v) << 16) & BM_PXP_PS_OFFSET_YOFFSET)
++#define BP_PXP_PS_OFFSET_RSVD1 12
++#define BM_PXP_PS_OFFSET_RSVD1 0x0000F000
++#define BF_PXP_PS_OFFSET_RSVD1(v) \
++ (((v) << 12) & BM_PXP_PS_OFFSET_RSVD1)
++#define BP_PXP_PS_OFFSET_XOFFSET 0
++#define BM_PXP_PS_OFFSET_XOFFSET 0x00000FFF
++#define BF_PXP_PS_OFFSET_XOFFSET(v) \
++ (((v) << 0) & BM_PXP_PS_OFFSET_XOFFSET)
++
++#define HW_PXP_PS_CLRKEYLOW (0x00000130)
++
++#define BP_PXP_PS_CLRKEYLOW_RSVD1 24
++#define BM_PXP_PS_CLRKEYLOW_RSVD1 0xFF000000
++#define BF_PXP_PS_CLRKEYLOW_RSVD1(v) \
++ (((v) << 24) & BM_PXP_PS_CLRKEYLOW_RSVD1)
++#define BP_PXP_PS_CLRKEYLOW_PIXEL 0
++#define BM_PXP_PS_CLRKEYLOW_PIXEL 0x00FFFFFF
++#define BF_PXP_PS_CLRKEYLOW_PIXEL(v) \
++ (((v) << 0) & BM_PXP_PS_CLRKEYLOW_PIXEL)
++
++#define HW_PXP_PS_CLRKEYHIGH (0x00000140)
++
++#define BP_PXP_PS_CLRKEYHIGH_RSVD1 24
++#define BM_PXP_PS_CLRKEYHIGH_RSVD1 0xFF000000
++#define BF_PXP_PS_CLRKEYHIGH_RSVD1(v) \
++ (((v) << 24) & BM_PXP_PS_CLRKEYHIGH_RSVD1)
++#define BP_PXP_PS_CLRKEYHIGH_PIXEL 0
++#define BM_PXP_PS_CLRKEYHIGH_PIXEL 0x00FFFFFF
++#define BF_PXP_PS_CLRKEYHIGH_PIXEL(v) \
++ (((v) << 0) & BM_PXP_PS_CLRKEYHIGH_PIXEL)
++
++#define HW_PXP_AS_CTRL (0x00000150)
++
++#define BP_PXP_AS_CTRL_RSVD1 21
++#define BM_PXP_AS_CTRL_RSVD1 0xFFE00000
++#define BF_PXP_AS_CTRL_RSVD1(v) \
++ (((v) << 21) & BM_PXP_AS_CTRL_RSVD1)
++#define BM_PXP_AS_CTRL_ALPHA_INVERT 0x00100000
++#define BP_PXP_AS_CTRL_ROP 16
++#define BM_PXP_AS_CTRL_ROP 0x000F0000
++#define BF_PXP_AS_CTRL_ROP(v) \
++ (((v) << 16) & BM_PXP_AS_CTRL_ROP)
++#define BV_PXP_AS_CTRL_ROP__MASKAS 0x0
++#define BV_PXP_AS_CTRL_ROP__MASKNOTAS 0x1
++#define BV_PXP_AS_CTRL_ROP__MASKASNOT 0x2
++#define BV_PXP_AS_CTRL_ROP__MERGEAS 0x3
++#define BV_PXP_AS_CTRL_ROP__MERGENOTAS 0x4
++#define BV_PXP_AS_CTRL_ROP__MERGEASNOT 0x5
++#define BV_PXP_AS_CTRL_ROP__NOTCOPYAS 0x6
++#define BV_PXP_AS_CTRL_ROP__NOT 0x7
++#define BV_PXP_AS_CTRL_ROP__NOTMASKAS 0x8
++#define BV_PXP_AS_CTRL_ROP__NOTMERGEAS 0x9
++#define BV_PXP_AS_CTRL_ROP__XORAS 0xA
++#define BV_PXP_AS_CTRL_ROP__NOTXORAS 0xB
++#define BP_PXP_AS_CTRL_ALPHA 8
++#define BM_PXP_AS_CTRL_ALPHA 0x0000FF00
++#define BF_PXP_AS_CTRL_ALPHA(v) \
++ (((v) << 8) & BM_PXP_AS_CTRL_ALPHA)
++#define BP_PXP_AS_CTRL_FORMAT 4
++#define BM_PXP_AS_CTRL_FORMAT 0x000000F0
++#define BF_PXP_AS_CTRL_FORMAT(v) \
++ (((v) << 4) & BM_PXP_AS_CTRL_FORMAT)
++#define BV_PXP_AS_CTRL_FORMAT__ARGB8888 0x0
++#define BV_PXP_AS_CTRL_FORMAT__RGB888 0x4
++#define BV_PXP_AS_CTRL_FORMAT__ARGB1555 0x8
++#define BV_PXP_AS_CTRL_FORMAT__ARGB4444 0x9
++#define BV_PXP_AS_CTRL_FORMAT__RGB555 0xC
++#define BV_PXP_AS_CTRL_FORMAT__RGB444 0xD
++#define BV_PXP_AS_CTRL_FORMAT__RGB565 0xE
++#define BM_PXP_AS_CTRL_ENABLE_COLORKEY 0x00000008
++#define BP_PXP_AS_CTRL_ALPHA_CTRL 1
++#define BM_PXP_AS_CTRL_ALPHA_CTRL 0x00000006
++#define BF_PXP_AS_CTRL_ALPHA_CTRL(v) \
++ (((v) << 1) & BM_PXP_AS_CTRL_ALPHA_CTRL)
++#define BV_PXP_AS_CTRL_ALPHA_CTRL__Embedded 0x0
++#define BV_PXP_AS_CTRL_ALPHA_CTRL__Override 0x1
++#define BV_PXP_AS_CTRL_ALPHA_CTRL__Multiply 0x2
++#define BV_PXP_AS_CTRL_ALPHA_CTRL__ROPs 0x3
++#define BM_PXP_AS_CTRL_RSVD0 0x00000001
++
++#define HW_PXP_AS_BUF (0x00000160)
++
++#define BP_PXP_AS_BUF_ADDR 0
++#define BM_PXP_AS_BUF_ADDR 0xFFFFFFFF
++#define BF_PXP_AS_BUF_ADDR(v) (v)
++
++#define HW_PXP_AS_PITCH (0x00000170)
++
++#define BP_PXP_AS_PITCH_RSVD 16
++#define BM_PXP_AS_PITCH_RSVD 0xFFFF0000
++#define BF_PXP_AS_PITCH_RSVD(v) \
++ (((v) << 16) & BM_PXP_AS_PITCH_RSVD)
++#define BP_PXP_AS_PITCH_PITCH 0
++#define BM_PXP_AS_PITCH_PITCH 0x0000FFFF
++#define BF_PXP_AS_PITCH_PITCH(v) \
++ (((v) << 0) & BM_PXP_AS_PITCH_PITCH)
++
++#define HW_PXP_AS_CLRKEYLOW (0x00000180)
++
++#define BP_PXP_AS_CLRKEYLOW_RSVD1 24
++#define BM_PXP_AS_CLRKEYLOW_RSVD1 0xFF000000
++#define BF_PXP_AS_CLRKEYLOW_RSVD1(v) \
++ (((v) << 24) & BM_PXP_AS_CLRKEYLOW_RSVD1)
++#define BP_PXP_AS_CLRKEYLOW_PIXEL 0
++#define BM_PXP_AS_CLRKEYLOW_PIXEL 0x00FFFFFF
++#define BF_PXP_AS_CLRKEYLOW_PIXEL(v) \
++ (((v) << 0) & BM_PXP_AS_CLRKEYLOW_PIXEL)
++
++#define HW_PXP_AS_CLRKEYHIGH (0x00000190)
++
++#define BP_PXP_AS_CLRKEYHIGH_RSVD1 24
++#define BM_PXP_AS_CLRKEYHIGH_RSVD1 0xFF000000
++#define BF_PXP_AS_CLRKEYHIGH_RSVD1(v) \
++ (((v) << 24) & BM_PXP_AS_CLRKEYHIGH_RSVD1)
++#define BP_PXP_AS_CLRKEYHIGH_PIXEL 0
++#define BM_PXP_AS_CLRKEYHIGH_PIXEL 0x00FFFFFF
++#define BF_PXP_AS_CLRKEYHIGH_PIXEL(v) \
++ (((v) << 0) & BM_PXP_AS_CLRKEYHIGH_PIXEL)
++
++#define HW_PXP_CSC1_COEF0 (0x000001a0)
++
++#define BM_PXP_CSC1_COEF0_YCBCR_MODE 0x80000000
++#define BM_PXP_CSC1_COEF0_BYPASS 0x40000000
++#define BM_PXP_CSC1_COEF0_RSVD1 0x20000000
++#define BP_PXP_CSC1_COEF0_C0 18
++#define BM_PXP_CSC1_COEF0_C0 0x1FFC0000
++#define BF_PXP_CSC1_COEF0_C0(v) \
++ (((v) << 18) & BM_PXP_CSC1_COEF0_C0)
++#define BP_PXP_CSC1_COEF0_UV_OFFSET 9
++#define BM_PXP_CSC1_COEF0_UV_OFFSET 0x0003FE00
++#define BF_PXP_CSC1_COEF0_UV_OFFSET(v) \
++ (((v) << 9) & BM_PXP_CSC1_COEF0_UV_OFFSET)
++#define BP_PXP_CSC1_COEF0_Y_OFFSET 0
++#define BM_PXP_CSC1_COEF0_Y_OFFSET 0x000001FF
++#define BF_PXP_CSC1_COEF0_Y_OFFSET(v) \
++ (((v) << 0) & BM_PXP_CSC1_COEF0_Y_OFFSET)
++
++#define HW_PXP_CSC1_COEF1 (0x000001b0)
++
++#define BP_PXP_CSC1_COEF1_RSVD1 27
++#define BM_PXP_CSC1_COEF1_RSVD1 0xF8000000
++#define BF_PXP_CSC1_COEF1_RSVD1(v) \
++ (((v) << 27) & BM_PXP_CSC1_COEF1_RSVD1)
++#define BP_PXP_CSC1_COEF1_C1 16
++#define BM_PXP_CSC1_COEF1_C1 0x07FF0000
++#define BF_PXP_CSC1_COEF1_C1(v) \
++ (((v) << 16) & BM_PXP_CSC1_COEF1_C1)
++#define BP_PXP_CSC1_COEF1_RSVD0 11
++#define BM_PXP_CSC1_COEF1_RSVD0 0x0000F800
++#define BF_PXP_CSC1_COEF1_RSVD0(v) \
++ (((v) << 11) & BM_PXP_CSC1_COEF1_RSVD0)
++#define BP_PXP_CSC1_COEF1_C4 0
++#define BM_PXP_CSC1_COEF1_C4 0x000007FF
++#define BF_PXP_CSC1_COEF1_C4(v) \
++ (((v) << 0) & BM_PXP_CSC1_COEF1_C4)
++
++#define HW_PXP_CSC1_COEF2 (0x000001c0)
++
++#define BP_PXP_CSC1_COEF2_RSVD1 27
++#define BM_PXP_CSC1_COEF2_RSVD1 0xF8000000
++#define BF_PXP_CSC1_COEF2_RSVD1(v) \
++ (((v) << 27) & BM_PXP_CSC1_COEF2_RSVD1)
++#define BP_PXP_CSC1_COEF2_C2 16
++#define BM_PXP_CSC1_COEF2_C2 0x07FF0000
++#define BF_PXP_CSC1_COEF2_C2(v) \
++ (((v) << 16) & BM_PXP_CSC1_COEF2_C2)
++#define BP_PXP_CSC1_COEF2_RSVD0 11
++#define BM_PXP_CSC1_COEF2_RSVD0 0x0000F800
++#define BF_PXP_CSC1_COEF2_RSVD0(v) \
++ (((v) << 11) & BM_PXP_CSC1_COEF2_RSVD0)
++#define BP_PXP_CSC1_COEF2_C3 0
++#define BM_PXP_CSC1_COEF2_C3 0x000007FF
++#define BF_PXP_CSC1_COEF2_C3(v) \
++ (((v) << 0) & BM_PXP_CSC1_COEF2_C3)
++
++#define HW_PXP_CSC2_CTRL (0x000001d0)
++
++#define BP_PXP_CSC2_CTRL_RSVD 3
++#define BM_PXP_CSC2_CTRL_RSVD 0xFFFFFFF8
++#define BF_PXP_CSC2_CTRL_RSVD(v) \
++ (((v) << 3) & BM_PXP_CSC2_CTRL_RSVD)
++#define BP_PXP_CSC2_CTRL_CSC_MODE 1
++#define BM_PXP_CSC2_CTRL_CSC_MODE 0x00000006
++#define BF_PXP_CSC2_CTRL_CSC_MODE(v) \
++ (((v) << 1) & BM_PXP_CSC2_CTRL_CSC_MODE)
++#define BV_PXP_CSC2_CTRL_CSC_MODE__YUV2RGB 0x0
++#define BV_PXP_CSC2_CTRL_CSC_MODE__YCbCr2RGB 0x1
++#define BV_PXP_CSC2_CTRL_CSC_MODE__RGB2YUV 0x2
++#define BV_PXP_CSC2_CTRL_CSC_MODE__RGB2YCbCr 0x3
++#define BM_PXP_CSC2_CTRL_BYPASS 0x00000001
++
++#define HW_PXP_CSC2_COEF0 (0x000001e0)
++
++#define BP_PXP_CSC2_COEF0_RSVD1 27
++#define BM_PXP_CSC2_COEF0_RSVD1 0xF8000000
++#define BF_PXP_CSC2_COEF0_RSVD1(v) \
++ (((v) << 27) & BM_PXP_CSC2_COEF0_RSVD1)
++#define BP_PXP_CSC2_COEF0_A2 16
++#define BM_PXP_CSC2_COEF0_A2 0x07FF0000
++#define BF_PXP_CSC2_COEF0_A2(v) \
++ (((v) << 16) & BM_PXP_CSC2_COEF0_A2)
++#define BP_PXP_CSC2_COEF0_RSVD0 11
++#define BM_PXP_CSC2_COEF0_RSVD0 0x0000F800
++#define BF_PXP_CSC2_COEF0_RSVD0(v) \
++ (((v) << 11) & BM_PXP_CSC2_COEF0_RSVD0)
++#define BP_PXP_CSC2_COEF0_A1 0
++#define BM_PXP_CSC2_COEF0_A1 0x000007FF
++#define BF_PXP_CSC2_COEF0_A1(v) \
++ (((v) << 0) & BM_PXP_CSC2_COEF0_A1)
++
++#define HW_PXP_CSC2_COEF1 (0x000001f0)
++
++#define BP_PXP_CSC2_COEF1_RSVD1 27
++#define BM_PXP_CSC2_COEF1_RSVD1 0xF8000000
++#define BF_PXP_CSC2_COEF1_RSVD1(v) \
++ (((v) << 27) & BM_PXP_CSC2_COEF1_RSVD1)
++#define BP_PXP_CSC2_COEF1_B1 16
++#define BM_PXP_CSC2_COEF1_B1 0x07FF0000
++#define BF_PXP_CSC2_COEF1_B1(v) \
++ (((v) << 16) & BM_PXP_CSC2_COEF1_B1)
++#define BP_PXP_CSC2_COEF1_RSVD0 11
++#define BM_PXP_CSC2_COEF1_RSVD0 0x0000F800
++#define BF_PXP_CSC2_COEF1_RSVD0(v) \
++ (((v) << 11) & BM_PXP_CSC2_COEF1_RSVD0)
++#define BP_PXP_CSC2_COEF1_A3 0
++#define BM_PXP_CSC2_COEF1_A3 0x000007FF
++#define BF_PXP_CSC2_COEF1_A3(v) \
++ (((v) << 0) & BM_PXP_CSC2_COEF1_A3)
++
++#define HW_PXP_CSC2_COEF2 (0x00000200)
++
++#define BP_PXP_CSC2_COEF2_RSVD1 27
++#define BM_PXP_CSC2_COEF2_RSVD1 0xF8000000
++#define BF_PXP_CSC2_COEF2_RSVD1(v) \
++ (((v) << 27) & BM_PXP_CSC2_COEF2_RSVD1)
++#define BP_PXP_CSC2_COEF2_B3 16
++#define BM_PXP_CSC2_COEF2_B3 0x07FF0000
++#define BF_PXP_CSC2_COEF2_B3(v) \
++ (((v) << 16) & BM_PXP_CSC2_COEF2_B3)
++#define BP_PXP_CSC2_COEF2_RSVD0 11
++#define BM_PXP_CSC2_COEF2_RSVD0 0x0000F800
++#define BF_PXP_CSC2_COEF2_RSVD0(v) \
++ (((v) << 11) & BM_PXP_CSC2_COEF2_RSVD0)
++#define BP_PXP_CSC2_COEF2_B2 0
++#define BM_PXP_CSC2_COEF2_B2 0x000007FF
++#define BF_PXP_CSC2_COEF2_B2(v) \
++ (((v) << 0) & BM_PXP_CSC2_COEF2_B2)
++
++#define HW_PXP_CSC2_COEF3 (0x00000210)
++
++#define BP_PXP_CSC2_COEF3_RSVD1 27
++#define BM_PXP_CSC2_COEF3_RSVD1 0xF8000000
++#define BF_PXP_CSC2_COEF3_RSVD1(v) \
++ (((v) << 27) & BM_PXP_CSC2_COEF3_RSVD1)
++#define BP_PXP_CSC2_COEF3_C2 16
++#define BM_PXP_CSC2_COEF3_C2 0x07FF0000
++#define BF_PXP_CSC2_COEF3_C2(v) \
++ (((v) << 16) & BM_PXP_CSC2_COEF3_C2)
++#define BP_PXP_CSC2_COEF3_RSVD0 11
++#define BM_PXP_CSC2_COEF3_RSVD0 0x0000F800
++#define BF_PXP_CSC2_COEF3_RSVD0(v) \
++ (((v) << 11) & BM_PXP_CSC2_COEF3_RSVD0)
++#define BP_PXP_CSC2_COEF3_C1 0
++#define BM_PXP_CSC2_COEF3_C1 0x000007FF
++#define BF_PXP_CSC2_COEF3_C1(v) \
++ (((v) << 0) & BM_PXP_CSC2_COEF3_C1)
++
++#define HW_PXP_CSC2_COEF4 (0x00000220)
++
++#define BP_PXP_CSC2_COEF4_RSVD1 25
++#define BM_PXP_CSC2_COEF4_RSVD1 0xFE000000
++#define BF_PXP_CSC2_COEF4_RSVD1(v) \
++ (((v) << 25) & BM_PXP_CSC2_COEF4_RSVD1)
++#define BP_PXP_CSC2_COEF4_D1 16
++#define BM_PXP_CSC2_COEF4_D1 0x01FF0000
++#define BF_PXP_CSC2_COEF4_D1(v) \
++ (((v) << 16) & BM_PXP_CSC2_COEF4_D1)
++#define BP_PXP_CSC2_COEF4_RSVD0 11
++#define BM_PXP_CSC2_COEF4_RSVD0 0x0000F800
++#define BF_PXP_CSC2_COEF4_RSVD0(v) \
++ (((v) << 11) & BM_PXP_CSC2_COEF4_RSVD0)
++#define BP_PXP_CSC2_COEF4_C3 0
++#define BM_PXP_CSC2_COEF4_C3 0x000007FF
++#define BF_PXP_CSC2_COEF4_C3(v) \
++ (((v) << 0) & BM_PXP_CSC2_COEF4_C3)
++
++#define HW_PXP_CSC2_COEF5 (0x00000230)
++
++#define BP_PXP_CSC2_COEF5_RSVD1 25
++#define BM_PXP_CSC2_COEF5_RSVD1 0xFE000000
++#define BF_PXP_CSC2_COEF5_RSVD1(v) \
++ (((v) << 25) & BM_PXP_CSC2_COEF5_RSVD1)
++#define BP_PXP_CSC2_COEF5_D3 16
++#define BM_PXP_CSC2_COEF5_D3 0x01FF0000
++#define BF_PXP_CSC2_COEF5_D3(v) \
++ (((v) << 16) & BM_PXP_CSC2_COEF5_D3)
++#define BP_PXP_CSC2_COEF5_RSVD0 9
++#define BM_PXP_CSC2_COEF5_RSVD0 0x0000FE00
++#define BF_PXP_CSC2_COEF5_RSVD0(v) \
++ (((v) << 9) & BM_PXP_CSC2_COEF5_RSVD0)
++#define BP_PXP_CSC2_COEF5_D2 0
++#define BM_PXP_CSC2_COEF5_D2 0x000001FF
++#define BF_PXP_CSC2_COEF5_D2(v) \
++ (((v) << 0) & BM_PXP_CSC2_COEF5_D2)
++
++#define HW_PXP_LUT_CTRL (0x00000240)
++
++#define BM_PXP_LUT_CTRL_BYPASS 0x80000000
++#define BP_PXP_LUT_CTRL_RSVD3 26
++#define BM_PXP_LUT_CTRL_RSVD3 0x7C000000
++#define BF_PXP_LUT_CTRL_RSVD3(v) \
++ (((v) << 26) & BM_PXP_LUT_CTRL_RSVD3)
++#define BP_PXP_LUT_CTRL_LOOKUP_MODE 24
++#define BM_PXP_LUT_CTRL_LOOKUP_MODE 0x03000000
++#define BF_PXP_LUT_CTRL_LOOKUP_MODE(v) \
++ (((v) << 24) & BM_PXP_LUT_CTRL_LOOKUP_MODE)
++#define BV_PXP_LUT_CTRL_LOOKUP_MODE__CACHE_RGB565 0x0
++#define BV_PXP_LUT_CTRL_LOOKUP_MODE__DIRECT_Y8 0x1
++#define BV_PXP_LUT_CTRL_LOOKUP_MODE__DIRECT_RGB444 0x2
++#define BV_PXP_LUT_CTRL_LOOKUP_MODE__DIRECT_RGB454 0x3
++#define BP_PXP_LUT_CTRL_RSVD2 18
++#define BM_PXP_LUT_CTRL_RSVD2 0x00FC0000
++#define BF_PXP_LUT_CTRL_RSVD2(v) \
++ (((v) << 18) & BM_PXP_LUT_CTRL_RSVD2)
++#define BP_PXP_LUT_CTRL_OUT_MODE 16
++#define BM_PXP_LUT_CTRL_OUT_MODE 0x00030000
++#define BF_PXP_LUT_CTRL_OUT_MODE(v) \
++ (((v) << 16) & BM_PXP_LUT_CTRL_OUT_MODE)
++#define BV_PXP_LUT_CTRL_OUT_MODE__RESERVED 0x0
++#define BV_PXP_LUT_CTRL_OUT_MODE__Y8 0x1
++#define BV_PXP_LUT_CTRL_OUT_MODE__RGBW4444CFA 0x2
++#define BV_PXP_LUT_CTRL_OUT_MODE__RGB888 0x3
++#define BP_PXP_LUT_CTRL_RSVD1 11
++#define BM_PXP_LUT_CTRL_RSVD1 0x0000F800
++#define BF_PXP_LUT_CTRL_RSVD1(v) \
++ (((v) << 11) & BM_PXP_LUT_CTRL_RSVD1)
++#define BM_PXP_LUT_CTRL_SEL_8KB 0x00000400
++#define BM_PXP_LUT_CTRL_LRU_UPD 0x00000200
++#define BM_PXP_LUT_CTRL_INVALID 0x00000100
++#define BP_PXP_LUT_CTRL_RSVD0 1
++#define BM_PXP_LUT_CTRL_RSVD0 0x000000FE
++#define BF_PXP_LUT_CTRL_RSVD0(v) \
++ (((v) << 1) & BM_PXP_LUT_CTRL_RSVD0)
++#define BM_PXP_LUT_CTRL_DMA_START 0x00000001
++
++#define HW_PXP_LUT_ADDR (0x00000250)
++
++#define BM_PXP_LUT_ADDR_RSVD2 0x80000000
++#define BP_PXP_LUT_ADDR_NUM_BYTES 16
++#define BM_PXP_LUT_ADDR_NUM_BYTES 0x7FFF0000
++#define BF_PXP_LUT_ADDR_NUM_BYTES(v) \
++ (((v) << 16) & BM_PXP_LUT_ADDR_NUM_BYTES)
++#define BP_PXP_LUT_ADDR_RSVD1 14
++#define BM_PXP_LUT_ADDR_RSVD1 0x0000C000
++#define BF_PXP_LUT_ADDR_RSVD1(v) \
++ (((v) << 14) & BM_PXP_LUT_ADDR_RSVD1)
++#define BP_PXP_LUT_ADDR_ADDR 0
++#define BM_PXP_LUT_ADDR_ADDR 0x00003FFF
++#define BF_PXP_LUT_ADDR_ADDR(v) \
++ (((v) << 0) & BM_PXP_LUT_ADDR_ADDR)
++
++#define HW_PXP_LUT_DATA (0x00000260)
++
++#define BP_PXP_LUT_DATA_DATA 0
++#define BM_PXP_LUT_DATA_DATA 0xFFFFFFFF
++#define BF_PXP_LUT_DATA_DATA(v) (v)
++
++#define HW_PXP_LUT_EXTMEM (0x00000270)
++
++#define BP_PXP_LUT_EXTMEM_ADDR 0
++#define BM_PXP_LUT_EXTMEM_ADDR 0xFFFFFFFF
++#define BF_PXP_LUT_EXTMEM_ADDR(v) (v)
++
++#define HW_PXP_CFA (0x00000280)
++
++#define BP_PXP_CFA_DATA 0
++#define BM_PXP_CFA_DATA 0xFFFFFFFF
++#define BF_PXP_CFA_DATA(v) (v)
++
++#define HW_PXP_HIST_CTRL (0x00000290)
++
++#define BP_PXP_HIST_CTRL_RSVD 6
++#define BM_PXP_HIST_CTRL_RSVD 0xFFFFFFC0
++#define BF_PXP_HIST_CTRL_RSVD(v) \
++ (((v) << 6) & BM_PXP_HIST_CTRL_RSVD)
++#define BP_PXP_HIST_CTRL_PANEL_MODE 4
++#define BM_PXP_HIST_CTRL_PANEL_MODE 0x00000030
++#define BF_PXP_HIST_CTRL_PANEL_MODE(v) \
++ (((v) << 4) & BM_PXP_HIST_CTRL_PANEL_MODE)
++#define BV_PXP_HIST_CTRL_PANEL_MODE__GRAY4 0x0
++#define BV_PXP_HIST_CTRL_PANEL_MODE__GRAY8 0x1
++#define BV_PXP_HIST_CTRL_PANEL_MODE__GRAY16 0x2
++#define BV_PXP_HIST_CTRL_PANEL_MODE__GRAY32 0x3
++#define BP_PXP_HIST_CTRL_STATUS 0
++#define BM_PXP_HIST_CTRL_STATUS 0x0000000F
++#define BF_PXP_HIST_CTRL_STATUS(v) \
++ (((v) << 0) & BM_PXP_HIST_CTRL_STATUS)
++
++#define HW_PXP_HIST2_PARAM (0x000002a0)
++
++#define BP_PXP_HIST2_PARAM_RSVD 16
++#define BM_PXP_HIST2_PARAM_RSVD 0xFFFF0000
++#define BF_PXP_HIST2_PARAM_RSVD(v) \
++ (((v) << 16) & BM_PXP_HIST2_PARAM_RSVD)
++#define BP_PXP_HIST2_PARAM_RSVD1 13
++#define BM_PXP_HIST2_PARAM_RSVD1 0x0000E000
++#define BF_PXP_HIST2_PARAM_RSVD1(v) \
++ (((v) << 13) & BM_PXP_HIST2_PARAM_RSVD1)
++#define BP_PXP_HIST2_PARAM_VALUE1 8
++#define BM_PXP_HIST2_PARAM_VALUE1 0x00001F00
++#define BF_PXP_HIST2_PARAM_VALUE1(v) \
++ (((v) << 8) & BM_PXP_HIST2_PARAM_VALUE1)
++#define BP_PXP_HIST2_PARAM_RSVD0 5
++#define BM_PXP_HIST2_PARAM_RSVD0 0x000000E0
++#define BF_PXP_HIST2_PARAM_RSVD0(v) \
++ (((v) << 5) & BM_PXP_HIST2_PARAM_RSVD0)
++#define BP_PXP_HIST2_PARAM_VALUE0 0
++#define BM_PXP_HIST2_PARAM_VALUE0 0x0000001F
++#define BF_PXP_HIST2_PARAM_VALUE0(v) \
++ (((v) << 0) & BM_PXP_HIST2_PARAM_VALUE0)
++
++#define HW_PXP_HIST4_PARAM (0x000002b0)
++
++#define BP_PXP_HIST4_PARAM_RSVD3 29
++#define BM_PXP_HIST4_PARAM_RSVD3 0xE0000000
++#define BF_PXP_HIST4_PARAM_RSVD3(v) \
++ (((v) << 29) & BM_PXP_HIST4_PARAM_RSVD3)
++#define BP_PXP_HIST4_PARAM_VALUE3 24
++#define BM_PXP_HIST4_PARAM_VALUE3 0x1F000000
++#define BF_PXP_HIST4_PARAM_VALUE3(v) \
++ (((v) << 24) & BM_PXP_HIST4_PARAM_VALUE3)
++#define BP_PXP_HIST4_PARAM_RSVD2 21
++#define BM_PXP_HIST4_PARAM_RSVD2 0x00E00000
++#define BF_PXP_HIST4_PARAM_RSVD2(v) \
++ (((v) << 21) & BM_PXP_HIST4_PARAM_RSVD2)
++#define BP_PXP_HIST4_PARAM_VALUE2 16
++#define BM_PXP_HIST4_PARAM_VALUE2 0x001F0000
++#define BF_PXP_HIST4_PARAM_VALUE2(v) \
++ (((v) << 16) & BM_PXP_HIST4_PARAM_VALUE2)
++#define BP_PXP_HIST4_PARAM_RSVD1 13
++#define BM_PXP_HIST4_PARAM_RSVD1 0x0000E000
++#define BF_PXP_HIST4_PARAM_RSVD1(v) \
++ (((v) << 13) & BM_PXP_HIST4_PARAM_RSVD1)
++#define BP_PXP_HIST4_PARAM_VALUE1 8
++#define BM_PXP_HIST4_PARAM_VALUE1 0x00001F00
++#define BF_PXP_HIST4_PARAM_VALUE1(v) \
++ (((v) << 8) & BM_PXP_HIST4_PARAM_VALUE1)
++#define BP_PXP_HIST4_PARAM_RSVD0 5
++#define BM_PXP_HIST4_PARAM_RSVD0 0x000000E0
++#define BF_PXP_HIST4_PARAM_RSVD0(v) \
++ (((v) << 5) & BM_PXP_HIST4_PARAM_RSVD0)
++#define BP_PXP_HIST4_PARAM_VALUE0 0
++#define BM_PXP_HIST4_PARAM_VALUE0 0x0000001F
++#define BF_PXP_HIST4_PARAM_VALUE0(v) \
++ (((v) << 0) & BM_PXP_HIST4_PARAM_VALUE0)
++
++#define HW_PXP_HIST8_PARAM0 (0x000002c0)
++
++#define BP_PXP_HIST8_PARAM0_RSVD3 29
++#define BM_PXP_HIST8_PARAM0_RSVD3 0xE0000000
++#define BF_PXP_HIST8_PARAM0_RSVD3(v) \
++ (((v) << 29) & BM_PXP_HIST8_PARAM0_RSVD3)
++#define BP_PXP_HIST8_PARAM0_VALUE3 24
++#define BM_PXP_HIST8_PARAM0_VALUE3 0x1F000000
++#define BF_PXP_HIST8_PARAM0_VALUE3(v) \
++ (((v) << 24) & BM_PXP_HIST8_PARAM0_VALUE3)
++#define BP_PXP_HIST8_PARAM0_RSVD2 21
++#define BM_PXP_HIST8_PARAM0_RSVD2 0x00E00000
++#define BF_PXP_HIST8_PARAM0_RSVD2(v) \
++ (((v) << 21) & BM_PXP_HIST8_PARAM0_RSVD2)
++#define BP_PXP_HIST8_PARAM0_VALUE2 16
++#define BM_PXP_HIST8_PARAM0_VALUE2 0x001F0000
++#define BF_PXP_HIST8_PARAM0_VALUE2(v) \
++ (((v) << 16) & BM_PXP_HIST8_PARAM0_VALUE2)
++#define BP_PXP_HIST8_PARAM0_RSVD1 13
++#define BM_PXP_HIST8_PARAM0_RSVD1 0x0000E000
++#define BF_PXP_HIST8_PARAM0_RSVD1(v) \
++ (((v) << 13) & BM_PXP_HIST8_PARAM0_RSVD1)
++#define BP_PXP_HIST8_PARAM0_VALUE1 8
++#define BM_PXP_HIST8_PARAM0_VALUE1 0x00001F00
++#define BF_PXP_HIST8_PARAM0_VALUE1(v) \
++ (((v) << 8) & BM_PXP_HIST8_PARAM0_VALUE1)
++#define BP_PXP_HIST8_PARAM0_RSVD0 5
++#define BM_PXP_HIST8_PARAM0_RSVD0 0x000000E0
++#define BF_PXP_HIST8_PARAM0_RSVD0(v) \
++ (((v) << 5) & BM_PXP_HIST8_PARAM0_RSVD0)
++#define BP_PXP_HIST8_PARAM0_VALUE0 0
++#define BM_PXP_HIST8_PARAM0_VALUE0 0x0000001F
++#define BF_PXP_HIST8_PARAM0_VALUE0(v) \
++ (((v) << 0) & BM_PXP_HIST8_PARAM0_VALUE0)
++
++#define HW_PXP_HIST8_PARAM1 (0x000002d0)
++
++#define BP_PXP_HIST8_PARAM1_RSVD7 29
++#define BM_PXP_HIST8_PARAM1_RSVD7 0xE0000000
++#define BF_PXP_HIST8_PARAM1_RSVD7(v) \
++ (((v) << 29) & BM_PXP_HIST8_PARAM1_RSVD7)
++#define BP_PXP_HIST8_PARAM1_VALUE7 24
++#define BM_PXP_HIST8_PARAM1_VALUE7 0x1F000000
++#define BF_PXP_HIST8_PARAM1_VALUE7(v) \
++ (((v) << 24) & BM_PXP_HIST8_PARAM1_VALUE7)
++#define BP_PXP_HIST8_PARAM1_RSVD6 21
++#define BM_PXP_HIST8_PARAM1_RSVD6 0x00E00000
++#define BF_PXP_HIST8_PARAM1_RSVD6(v) \
++ (((v) << 21) & BM_PXP_HIST8_PARAM1_RSVD6)
++#define BP_PXP_HIST8_PARAM1_VALUE6 16
++#define BM_PXP_HIST8_PARAM1_VALUE6 0x001F0000
++#define BF_PXP_HIST8_PARAM1_VALUE6(v) \
++ (((v) << 16) & BM_PXP_HIST8_PARAM1_VALUE6)
++#define BP_PXP_HIST8_PARAM1_RSVD5 13
++#define BM_PXP_HIST8_PARAM1_RSVD5 0x0000E000
++#define BF_PXP_HIST8_PARAM1_RSVD5(v) \
++ (((v) << 13) & BM_PXP_HIST8_PARAM1_RSVD5)
++#define BP_PXP_HIST8_PARAM1_VALUE5 8
++#define BM_PXP_HIST8_PARAM1_VALUE5 0x00001F00
++#define BF_PXP_HIST8_PARAM1_VALUE5(v) \
++ (((v) << 8) & BM_PXP_HIST8_PARAM1_VALUE5)
++#define BP_PXP_HIST8_PARAM1_RSVD4 5
++#define BM_PXP_HIST8_PARAM1_RSVD4 0x000000E0
++#define BF_PXP_HIST8_PARAM1_RSVD4(v) \
++ (((v) << 5) & BM_PXP_HIST8_PARAM1_RSVD4)
++#define BP_PXP_HIST8_PARAM1_VALUE4 0
++#define BM_PXP_HIST8_PARAM1_VALUE4 0x0000001F
++#define BF_PXP_HIST8_PARAM1_VALUE4(v) \
++ (((v) << 0) & BM_PXP_HIST8_PARAM1_VALUE4)
++
++#define HW_PXP_HIST16_PARAM0 (0x000002e0)
++
++#define BP_PXP_HIST16_PARAM0_RSVD3 29
++#define BM_PXP_HIST16_PARAM0_RSVD3 0xE0000000
++#define BF_PXP_HIST16_PARAM0_RSVD3(v) \
++ (((v) << 29) & BM_PXP_HIST16_PARAM0_RSVD3)
++#define BP_PXP_HIST16_PARAM0_VALUE3 24
++#define BM_PXP_HIST16_PARAM0_VALUE3 0x1F000000
++#define BF_PXP_HIST16_PARAM0_VALUE3(v) \
++ (((v) << 24) & BM_PXP_HIST16_PARAM0_VALUE3)
++#define BP_PXP_HIST16_PARAM0_RSVD2 21
++#define BM_PXP_HIST16_PARAM0_RSVD2 0x00E00000
++#define BF_PXP_HIST16_PARAM0_RSVD2(v) \
++ (((v) << 21) & BM_PXP_HIST16_PARAM0_RSVD2)
++#define BP_PXP_HIST16_PARAM0_VALUE2 16
++#define BM_PXP_HIST16_PARAM0_VALUE2 0x001F0000
++#define BF_PXP_HIST16_PARAM0_VALUE2(v) \
++ (((v) << 16) & BM_PXP_HIST16_PARAM0_VALUE2)
++#define BP_PXP_HIST16_PARAM0_RSVD1 13
++#define BM_PXP_HIST16_PARAM0_RSVD1 0x0000E000
++#define BF_PXP_HIST16_PARAM0_RSVD1(v) \
++ (((v) << 13) & BM_PXP_HIST16_PARAM0_RSVD1)
++#define BP_PXP_HIST16_PARAM0_VALUE1 8
++#define BM_PXP_HIST16_PARAM0_VALUE1 0x00001F00
++#define BF_PXP_HIST16_PARAM0_VALUE1(v) \
++ (((v) << 8) & BM_PXP_HIST16_PARAM0_VALUE1)
++#define BP_PXP_HIST16_PARAM0_RSVD0 5
++#define BM_PXP_HIST16_PARAM0_RSVD0 0x000000E0
++#define BF_PXP_HIST16_PARAM0_RSVD0(v) \
++ (((v) << 5) & BM_PXP_HIST16_PARAM0_RSVD0)
++#define BP_PXP_HIST16_PARAM0_VALUE0 0
++#define BM_PXP_HIST16_PARAM0_VALUE0 0x0000001F
++#define BF_PXP_HIST16_PARAM0_VALUE0(v) \
++ (((v) << 0) & BM_PXP_HIST16_PARAM0_VALUE0)
++
++#define HW_PXP_HIST16_PARAM1 (0x000002f0)
++
++#define BP_PXP_HIST16_PARAM1_RSVD7 29
++#define BM_PXP_HIST16_PARAM1_RSVD7 0xE0000000
++#define BF_PXP_HIST16_PARAM1_RSVD7(v) \
++ (((v) << 29) & BM_PXP_HIST16_PARAM1_RSVD7)
++#define BP_PXP_HIST16_PARAM1_VALUE7 24
++#define BM_PXP_HIST16_PARAM1_VALUE7 0x1F000000
++#define BF_PXP_HIST16_PARAM1_VALUE7(v) \
++ (((v) << 24) & BM_PXP_HIST16_PARAM1_VALUE7)
++#define BP_PXP_HIST16_PARAM1_RSVD6 21
++#define BM_PXP_HIST16_PARAM1_RSVD6 0x00E00000
++#define BF_PXP_HIST16_PARAM1_RSVD6(v) \
++ (((v) << 21) & BM_PXP_HIST16_PARAM1_RSVD6)
++#define BP_PXP_HIST16_PARAM1_VALUE6 16
++#define BM_PXP_HIST16_PARAM1_VALUE6 0x001F0000
++#define BF_PXP_HIST16_PARAM1_VALUE6(v) \
++ (((v) << 16) & BM_PXP_HIST16_PARAM1_VALUE6)
++#define BP_PXP_HIST16_PARAM1_RSVD5 13
++#define BM_PXP_HIST16_PARAM1_RSVD5 0x0000E000
++#define BF_PXP_HIST16_PARAM1_RSVD5(v) \
++ (((v) << 13) & BM_PXP_HIST16_PARAM1_RSVD5)
++#define BP_PXP_HIST16_PARAM1_VALUE5 8
++#define BM_PXP_HIST16_PARAM1_VALUE5 0x00001F00
++#define BF_PXP_HIST16_PARAM1_VALUE5(v) \
++ (((v) << 8) & BM_PXP_HIST16_PARAM1_VALUE5)
++#define BP_PXP_HIST16_PARAM1_RSVD4 5
++#define BM_PXP_HIST16_PARAM1_RSVD4 0x000000E0
++#define BF_PXP_HIST16_PARAM1_RSVD4(v) \
++ (((v) << 5) & BM_PXP_HIST16_PARAM1_RSVD4)
++#define BP_PXP_HIST16_PARAM1_VALUE4 0
++#define BM_PXP_HIST16_PARAM1_VALUE4 0x0000001F
++#define BF_PXP_HIST16_PARAM1_VALUE4(v) \
++ (((v) << 0) & BM_PXP_HIST16_PARAM1_VALUE4)
++
++#define HW_PXP_HIST16_PARAM2 (0x00000300)
++
++#define BP_PXP_HIST16_PARAM2_RSVD11 29
++#define BM_PXP_HIST16_PARAM2_RSVD11 0xE0000000
++#define BF_PXP_HIST16_PARAM2_RSVD11(v) \
++ (((v) << 29) & BM_PXP_HIST16_PARAM2_RSVD11)
++#define BP_PXP_HIST16_PARAM2_VALUE11 24
++#define BM_PXP_HIST16_PARAM2_VALUE11 0x1F000000
++#define BF_PXP_HIST16_PARAM2_VALUE11(v) \
++ (((v) << 24) & BM_PXP_HIST16_PARAM2_VALUE11)
++#define BP_PXP_HIST16_PARAM2_RSVD10 21
++#define BM_PXP_HIST16_PARAM2_RSVD10 0x00E00000
++#define BF_PXP_HIST16_PARAM2_RSVD10(v) \
++ (((v) << 21) & BM_PXP_HIST16_PARAM2_RSVD10)
++#define BP_PXP_HIST16_PARAM2_VALUE10 16
++#define BM_PXP_HIST16_PARAM2_VALUE10 0x001F0000
++#define BF_PXP_HIST16_PARAM2_VALUE10(v) \
++ (((v) << 16) & BM_PXP_HIST16_PARAM2_VALUE10)
++#define BP_PXP_HIST16_PARAM2_RSVD9 13
++#define BM_PXP_HIST16_PARAM2_RSVD9 0x0000E000
++#define BF_PXP_HIST16_PARAM2_RSVD9(v) \
++ (((v) << 13) & BM_PXP_HIST16_PARAM2_RSVD9)
++#define BP_PXP_HIST16_PARAM2_VALUE9 8
++#define BM_PXP_HIST16_PARAM2_VALUE9 0x00001F00
++#define BF_PXP_HIST16_PARAM2_VALUE9(v) \
++ (((v) << 8) & BM_PXP_HIST16_PARAM2_VALUE9)
++#define BP_PXP_HIST16_PARAM2_RSVD8 5
++#define BM_PXP_HIST16_PARAM2_RSVD8 0x000000E0
++#define BF_PXP_HIST16_PARAM2_RSVD8(v) \
++ (((v) << 5) & BM_PXP_HIST16_PARAM2_RSVD8)
++#define BP_PXP_HIST16_PARAM2_VALUE8 0
++#define BM_PXP_HIST16_PARAM2_VALUE8 0x0000001F
++#define BF_PXP_HIST16_PARAM2_VALUE8(v) \
++ (((v) << 0) & BM_PXP_HIST16_PARAM2_VALUE8)
++
++#define HW_PXP_HIST16_PARAM3 (0x00000310)
++
++#define BP_PXP_HIST16_PARAM3_RSVD15 29
++#define BM_PXP_HIST16_PARAM3_RSVD15 0xE0000000
++#define BF_PXP_HIST16_PARAM3_RSVD15(v) \
++ (((v) << 29) & BM_PXP_HIST16_PARAM3_RSVD15)
++#define BP_PXP_HIST16_PARAM3_VALUE15 24
++#define BM_PXP_HIST16_PARAM3_VALUE15 0x1F000000
++#define BF_PXP_HIST16_PARAM3_VALUE15(v) \
++ (((v) << 24) & BM_PXP_HIST16_PARAM3_VALUE15)
++#define BP_PXP_HIST16_PARAM3_RSVD14 21
++#define BM_PXP_HIST16_PARAM3_RSVD14 0x00E00000
++#define BF_PXP_HIST16_PARAM3_RSVD14(v) \
++ (((v) << 21) & BM_PXP_HIST16_PARAM3_RSVD14)
++#define BP_PXP_HIST16_PARAM3_VALUE14 16
++#define BM_PXP_HIST16_PARAM3_VALUE14 0x001F0000
++#define BF_PXP_HIST16_PARAM3_VALUE14(v) \
++ (((v) << 16) & BM_PXP_HIST16_PARAM3_VALUE14)
++#define BP_PXP_HIST16_PARAM3_RSVD13 13
++#define BM_PXP_HIST16_PARAM3_RSVD13 0x0000E000
++#define BF_PXP_HIST16_PARAM3_RSVD13(v) \
++ (((v) << 13) & BM_PXP_HIST16_PARAM3_RSVD13)
++#define BP_PXP_HIST16_PARAM3_VALUE13 8
++#define BM_PXP_HIST16_PARAM3_VALUE13 0x00001F00
++#define BF_PXP_HIST16_PARAM3_VALUE13(v) \
++ (((v) << 8) & BM_PXP_HIST16_PARAM3_VALUE13)
++#define BP_PXP_HIST16_PARAM3_RSVD12 5
++#define BM_PXP_HIST16_PARAM3_RSVD12 0x000000E0
++#define BF_PXP_HIST16_PARAM3_RSVD12(v) \
++ (((v) << 5) & BM_PXP_HIST16_PARAM3_RSVD12)
++#define BP_PXP_HIST16_PARAM3_VALUE12 0
++#define BM_PXP_HIST16_PARAM3_VALUE12 0x0000001F
++#define BF_PXP_HIST16_PARAM3_VALUE12(v) \
++ (((v) << 0) & BM_PXP_HIST16_PARAM3_VALUE12)
++
++#define HW_PXP_POWER (0x00000320)
++
++#define BP_PXP_POWER_CTRL 12
++#define BM_PXP_POWER_CTRL 0xFFFFF000
++#define BF_PXP_POWER_CTRL(v) \
++ (((v) << 12) & BM_PXP_POWER_CTRL)
++#define BP_PXP_POWER_ROT_MEM_LP_STATE 9
++#define BM_PXP_POWER_ROT_MEM_LP_STATE 0x00000E00
++#define BF_PXP_POWER_ROT_MEM_LP_STATE(v) \
++ (((v) << 9) & BM_PXP_POWER_ROT_MEM_LP_STATE)
++#define BV_PXP_POWER_ROT_MEM_LP_STATE__NONE 0x0
++#define BV_PXP_POWER_ROT_MEM_LP_STATE__LS 0x1
++#define BV_PXP_POWER_ROT_MEM_LP_STATE__DS 0x2
++#define BV_PXP_POWER_ROT_MEM_LP_STATE__SD 0x4
++#define BP_PXP_POWER_LUT_LP_STATE_WAY1_BANKN 6
++#define BM_PXP_POWER_LUT_LP_STATE_WAY1_BANKN 0x000001C0
++#define BF_PXP_POWER_LUT_LP_STATE_WAY1_BANKN(v) \
++ (((v) << 6) & BM_PXP_POWER_LUT_LP_STATE_WAY1_BANKN)
++#define BV_PXP_POWER_LUT_LP_STATE_WAY1_BANKN__NONE 0x0
++#define BV_PXP_POWER_LUT_LP_STATE_WAY1_BANKN__LS 0x1
++#define BV_PXP_POWER_LUT_LP_STATE_WAY1_BANKN__DS 0x2
++#define BV_PXP_POWER_LUT_LP_STATE_WAY1_BANKN__SD 0x4
++#define BP_PXP_POWER_LUT_LP_STATE_WAY0_BANKN 3
++#define BM_PXP_POWER_LUT_LP_STATE_WAY0_BANKN 0x00000038
++#define BF_PXP_POWER_LUT_LP_STATE_WAY0_BANKN(v) \
++ (((v) << 3) & BM_PXP_POWER_LUT_LP_STATE_WAY0_BANKN)
++#define BV_PXP_POWER_LUT_LP_STATE_WAY0_BANKN__NONE 0x0
++#define BV_PXP_POWER_LUT_LP_STATE_WAY0_BANKN__LS 0x1
++#define BV_PXP_POWER_LUT_LP_STATE_WAY0_BANKN__DS 0x2
++#define BV_PXP_POWER_LUT_LP_STATE_WAY0_BANKN__SD 0x4
++#define BP_PXP_POWER_LUT_LP_STATE_WAY0_BANK0 0
++#define BM_PXP_POWER_LUT_LP_STATE_WAY0_BANK0 0x00000007
++#define BF_PXP_POWER_LUT_LP_STATE_WAY0_BANK0(v) \
++ (((v) << 0) & BM_PXP_POWER_LUT_LP_STATE_WAY0_BANK0)
++#define BV_PXP_POWER_LUT_LP_STATE_WAY0_BANK0__NONE 0x0
++#define BV_PXP_POWER_LUT_LP_STATE_WAY0_BANK0__LS 0x1
++#define BV_PXP_POWER_LUT_LP_STATE_WAY0_BANK0__DS 0x2
++#define BV_PXP_POWER_LUT_LP_STATE_WAY0_BANK0__SD 0x4
++
++#define HW_PXP_NEXT (0x00000400)
++
++#define BP_PXP_NEXT_POINTER 2
++#define BM_PXP_NEXT_POINTER 0xFFFFFFFC
++#define BF_PXP_NEXT_POINTER(v) \
++ (((v) << 2) & BM_PXP_NEXT_POINTER)
++#define BM_PXP_NEXT_RSVD 0x00000002
++#define BM_PXP_NEXT_ENABLED 0x00000001
++
++#define HW_PXP_DEBUGCTRL (0x00000410)
++
++#define BP_PXP_DEBUGCTRL_RSVD 12
++#define BM_PXP_DEBUGCTRL_RSVD 0xFFFFF000
++#define BF_PXP_DEBUGCTRL_RSVD(v) \
++ (((v) << 12) & BM_PXP_DEBUGCTRL_RSVD)
++#define BP_PXP_DEBUGCTRL_LUT_CLR_STAT_CNT 8
++#define BM_PXP_DEBUGCTRL_LUT_CLR_STAT_CNT 0x00000F00
++#define BF_PXP_DEBUGCTRL_LUT_CLR_STAT_CNT(v) \
++ (((v) << 8) & BM_PXP_DEBUGCTRL_LUT_CLR_STAT_CNT)
++#define BV_PXP_DEBUGCTRL_LUT_CLR_STAT_CNT__NONE 0x0
++#define BV_PXP_DEBUGCTRL_LUT_CLR_STAT_CNT__MISS_CNT 0x1
++#define BV_PXP_DEBUGCTRL_LUT_CLR_STAT_CNT__HIT_CNT 0x2
++#define BV_PXP_DEBUGCTRL_LUT_CLR_STAT_CNT__LAT_CNT 0x4
++#define BV_PXP_DEBUGCTRL_LUT_CLR_STAT_CNT__MAX_LAT 0x8
++#define BP_PXP_DEBUGCTRL_SELECT 0
++#define BM_PXP_DEBUGCTRL_SELECT 0x000000FF
++#define BF_PXP_DEBUGCTRL_SELECT(v) \
++ (((v) << 0) & BM_PXP_DEBUGCTRL_SELECT)
++#define BV_PXP_DEBUGCTRL_SELECT__NONE 0x0
++#define BV_PXP_DEBUGCTRL_SELECT__CTRL 0x1
++#define BV_PXP_DEBUGCTRL_SELECT__PSBUF 0x2
++#define BV_PXP_DEBUGCTRL_SELECT__PSBAX 0x3
++#define BV_PXP_DEBUGCTRL_SELECT__PSBAY 0x4
++#define BV_PXP_DEBUGCTRL_SELECT__ASBUF 0x5
++#define BV_PXP_DEBUGCTRL_SELECT__ROTATION 0x6
++#define BV_PXP_DEBUGCTRL_SELECT__OUTBUF0 0x7
++#define BV_PXP_DEBUGCTRL_SELECT__OUTBUF1 0x8
++#define BV_PXP_DEBUGCTRL_SELECT__OUTBUF2 0x9
++#define BV_PXP_DEBUGCTRL_SELECT__LUT_STAT 0x10
++#define BV_PXP_DEBUGCTRL_SELECT__LUT_MISS 0x11
++#define BV_PXP_DEBUGCTRL_SELECT__LUT_HIT 0x12
++#define BV_PXP_DEBUGCTRL_SELECT__LUT_LAT 0x13
++#define BV_PXP_DEBUGCTRL_SELECT__LUT_MAX_LAT 0x14
++
++#define HW_PXP_DEBUG (0x00000420)
++
++#define BP_PXP_DEBUG_DATA 0
++#define BM_PXP_DEBUG_DATA 0xFFFFFFFF
++#define BF_PXP_DEBUG_DATA(v) (v)
++
++#define HW_PXP_VERSION (0x00000430)
++
++#define BP_PXP_VERSION_MAJOR 24
++#define BM_PXP_VERSION_MAJOR 0xFF000000
++#define BF_PXP_VERSION_MAJOR(v) \
++ (((v) << 24) & BM_PXP_VERSION_MAJOR)
++#define BP_PXP_VERSION_MINOR 16
++#define BM_PXP_VERSION_MINOR 0x00FF0000
++#define BF_PXP_VERSION_MINOR(v) \
++ (((v) << 16) & BM_PXP_VERSION_MINOR)
++#define BP_PXP_VERSION_STEP 0
++#define BM_PXP_VERSION_STEP 0x0000FFFF
++#define BF_PXP_VERSION_STEP(v) \
++ (((v) << 0) & BM_PXP_VERSION_STEP)
++#endif /* __ARCH_ARM___PXP_H */
+diff -Nur linux-3.14.36/drivers/gpio/gpio-pca953x.c linux-openelec/drivers/gpio/gpio-pca953x.c
+--- linux-3.14.36/drivers/gpio/gpio-pca953x.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/gpio/gpio-pca953x.c 2015-05-06 12:05:42.000000000 -0500
+@@ -19,6 +19,7 @@
+ #include <linux/irqdomain.h>
+ #include <linux/i2c.h>
+ #include <linux/platform_data/pca953x.h>
++#include <linux/reset.h>
+ #include <linux/slab.h>
+ #ifdef CONFIG_OF_GPIO
+ #include <linux/of_platform.h>
+@@ -741,6 +742,10 @@
+
+ mutex_init(&chip->i2c_lock);
+
++ ret = device_reset(&client->dev);
++ if (ret == -ENODEV)
++ return -EPROBE_DEFER;
++
+ /* initialize cached registers from their original values.
+ * we can't share this chip with another i2c master.
+ */
+diff -Nur linux-3.14.36/drivers/gpu/drm/drm_crtc_helper.c linux-openelec/drivers/gpu/drm/drm_crtc_helper.c
+--- linux-3.14.36/drivers/gpu/drm/drm_crtc_helper.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/gpu/drm/drm_crtc_helper.c 2015-05-06 12:05:42.000000000 -0500
+@@ -564,7 +564,7 @@
+ * Caller must hold mode config lock.
+ *
+ * Setup a new configuration, provided by the upper layers (either an ioctl call
+- * from userspace or internally e.g. from the fbdev suppport code) in @set, and
++ * from userspace or internally e.g. from the fbdev support code) in @set, and
+ * enable it. This is the main helper functions for drivers that implement
+ * kernel mode setting with the crtc helper functions and the assorted
+ * ->prepare(), ->modeset() and ->commit() helper callbacks.
+diff -Nur linux-3.14.36/drivers/gpu/drm/drm_prime.c linux-openelec/drivers/gpu/drm/drm_prime.c
+--- linux-3.14.36/drivers/gpu/drm/drm_prime.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/gpu/drm/drm_prime.c 2015-05-06 12:05:42.000000000 -0500
+@@ -471,7 +471,7 @@
+ get_dma_buf(dma_buf);
+
+ sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
+- if (IS_ERR_OR_NULL(sgt)) {
++ if (IS_ERR(sgt)) {
+ ret = PTR_ERR(sgt);
+ goto fail_detach;
+ }
+diff -Nur linux-3.14.36/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c linux-openelec/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
+--- linux-3.14.36/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c 2015-05-06 12:05:42.000000000 -0500
+@@ -224,7 +224,7 @@
+ get_dma_buf(dma_buf);
+
+ sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
+- if (IS_ERR_OR_NULL(sgt)) {
++ if (IS_ERR(sgt)) {
+ ret = PTR_ERR(sgt);
+ goto err_buf_detach;
+ }
+diff -Nur linux-3.14.36/drivers/gpu/drm/Kconfig linux-openelec/drivers/gpu/drm/Kconfig
+--- linux-3.14.36/drivers/gpu/drm/Kconfig 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/gpu/drm/Kconfig 2015-05-06 12:05:42.000000000 -0500
+@@ -166,6 +166,13 @@
+ Choose this option if you have a Savage3D/4/SuperSavage/Pro/Twister
+ chipset. If M is selected the module will be called savage.
+
++config DRM_VIVANTE
++ tristate "Vivante GCCore"
++ depends on DRM
++ help
++ Choose this option if you have a Vivante graphics card.
++ If M is selected, the module will be called vivante.
++
+ source "drivers/gpu/drm/exynos/Kconfig"
+
+ source "drivers/gpu/drm/vmwgfx/Kconfig"
+diff -Nur linux-3.14.36/drivers/gpu/drm/Makefile linux-openelec/drivers/gpu/drm/Makefile
+--- linux-3.14.36/drivers/gpu/drm/Makefile 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/gpu/drm/Makefile 2015-05-06 12:05:42.000000000 -0500
+@@ -1,3 +1,24 @@
++##############################################################################
++#
++# Copyright (C) 2005 - 2013 by Vivante Corp.
++#
++# This program is free software; you can redistribute it and/or modify
++# it under the terms of the GNU General Public License as published by
++# the Free Software Foundation; either version 2 of the license, or
++# (at your option) any later version.
++#
++# This program is distributed in the hope that it will be useful,
++# but WITHOUT ANY WARRANTY; without even the implied warranty of
++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++# GNU General Public License for more details.
++#
++# You should have received a copy of the GNU General Public License
++# along with this program; if not write to the Free Software
++# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++#
++##############################################################################
++
++
+ #
+ # Makefile for the drm device driver. This driver provides support for the
+ # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
+@@ -35,6 +56,7 @@
+ obj-$(CONFIG_DRM_MIPI_DSI) += drm_mipi_dsi.o
+ obj-$(CONFIG_DRM_USB) += drm_usb.o
+ obj-$(CONFIG_DRM_TTM) += ttm/
++obj-$(CONFIG_DRM_VIVANTE) += vivante/
+ obj-$(CONFIG_DRM_TDFX) += tdfx/
+ obj-$(CONFIG_DRM_R128) += r128/
+ obj-$(CONFIG_DRM_RADEON)+= radeon/
+diff -Nur linux-3.14.36/drivers/gpu/drm/vivante/Makefile linux-openelec/drivers/gpu/drm/vivante/Makefile
+--- linux-3.14.36/drivers/gpu/drm/vivante/Makefile 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/gpu/drm/vivante/Makefile 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,29 @@
++##############################################################################
++#
++# Copyright (C) 2005 - 2013 by Vivante Corp.
++#
++# This program is free software; you can redistribute it and/or modify
++# it under the terms of the GNU General Public License as published by
++# the Free Software Foundation; either version 2 of the license, or
++# (at your option) any later version.
++#
++# This program is distributed in the hope that it will be useful,
++# but WITHOUT ANY WARRANTY; without even the implied warranty of
++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++# GNU General Public License for more details.
++#
++# You should have received a copy of the GNU General Public License
++# along with this program; if not write to the Free Software
++# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++#
++##############################################################################
++
++
++#
++# Makefile for the drm device driver. This driver provides support for the
++# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
++
++ccflags-y := -Iinclude/drm
++vivante-y := vivante_drv.o
++
++obj-$(CONFIG_DRM_VIVANTE) += vivante.o
+diff -Nur linux-3.14.36/drivers/gpu/drm/vivante/vivante_drv.c linux-openelec/drivers/gpu/drm/vivante/vivante_drv.c
+--- linux-3.14.36/drivers/gpu/drm/vivante/vivante_drv.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/gpu/drm/vivante/vivante_drv.c 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,108 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++/* vivante_drv.c -- vivante driver -*- linux-c -*-
++ *
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ * Rickard E. (Rik) Faith <faith@valinux.com>
++ * Daryll Strauss <daryll@valinux.com>
++ * Gareth Hughes <gareth@valinux.com>
++ */
++
++#include <linux/version.h>
++#include <linux/module.h>
++
++#include "drmP.h"
++#include "vivante_drv.h"
++
++#include "drm_pciids.h"
++
++static char platformdevicename[] = "Vivante GCCore";
++static struct platform_device *pplatformdev;
++
++static const struct file_operations viv_driver_fops = {
++ .owner = THIS_MODULE,
++ .open = drm_open,
++ .release = drm_release,
++ .unlocked_ioctl = drm_ioctl,
++ .mmap = drm_mmap,
++ .poll = drm_poll,
++ .llseek = noop_llseek,
++};
++
++static struct drm_driver driver = {
++ .fops = &viv_driver_fops,
++ .name = DRIVER_NAME,
++ .desc = DRIVER_DESC,
++ .date = DRIVER_DATE,
++ .major = DRIVER_MAJOR,
++ .minor = DRIVER_MINOR,
++ .patchlevel = DRIVER_PATCHLEVEL,
++};
++
++static int __init vivante_init(void)
++{
++ int retcode;
++
++ pplatformdev = platform_device_register_simple(platformdevicename,
++ -1, NULL, 0);
++ if (pplatformdev == NULL)
++ printk(KERN_ERR"Platform device is null\n");
++
++ retcode = drm_platform_init(&driver, pplatformdev);
++
++ return retcode;
++}
++
++static void __exit vivante_exit(void)
++{
++ if (pplatformdev) {
++ platform_device_unregister(pplatformdev);
++ pplatformdev = NULL;
++ }
++}
++
++module_init(vivante_init);
++module_exit(vivante_exit);
++
++MODULE_AUTHOR(DRIVER_AUTHOR);
++MODULE_DESCRIPTION(DRIVER_DESC);
++MODULE_LICENSE("GPL and additional rights");
+diff -Nur linux-3.14.36/drivers/gpu/drm/vivante/vivante_drv.h linux-openelec/drivers/gpu/drm/vivante/vivante_drv.h
+--- linux-3.14.36/drivers/gpu/drm/vivante/vivante_drv.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/gpu/drm/vivante/vivante_drv.h 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,66 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++/* vivante_drv.h -- Vivante DRM template customization -*- linux-c -*-
++ * Created: Wed Feb 14 12:32:32 2012 by John Zhao
++ */
++/*
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ * Gareth Hughes <gareth@valinux.com>
++ */
++
++#ifndef __VIVANTE_DRV_H__
++#define __VIVANTE_DRV_H__
++
++/* General customization:
++ */
++
++#define DRIVER_AUTHOR "Vivante Inc."
++
++#define DRIVER_NAME "vivante"
++#define DRIVER_DESC "Vivante GCCore"
++#define DRIVER_DATE "20120216"
++
++#define DRIVER_MAJOR 1
++#define DRIVER_MINOR 0
++#define DRIVER_PATCHLEVEL 0
++
++#endif
+diff -Nur linux-3.14.36/drivers/hid/hid-core.c linux-openelec/drivers/hid/hid-core.c
+--- linux-3.14.36/drivers/hid/hid-core.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/hid/hid-core.c 2015-07-24 18:03:30.048842002 -0500
+@@ -1816,7 +1816,11 @@
+ { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_18) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_PKB1700) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_WKB2000) },
++ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_OUYA, USB_DEVICE_ID_OUYA_CONTROLLER) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PETALYNX, USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_PHILIPS, USB_DEVICE_ID_PHILIPS_SPINEL_PLUS_1) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_PHILIPS, USB_DEVICE_ID_PHILIPS_SPINEL_PLUS_2) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_PHILIPS, USB_DEVICE_ID_PHILIPS_SPINEL_PLUS_3) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_KEYBOARD) },
+ #if IS_ENABLED(CONFIG_HID_ROCCAT)
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ARVO) },
+@@ -1840,6 +1844,7 @@
+ { HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_IR_REMOTE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD_MOUSE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SKYCABLE, USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER) },
++ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SMK, USB_DEVICE_ID_SONY_PS3_BDREMOTE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_BUZZ_CONTROLLER) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_WIRELESS_BUZZ_CONTROLLER) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_BDREMOTE) },
+@@ -1863,6 +1868,7 @@
+ { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb65a) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_BT) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_PRO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED, USB_DEVICE_ID_TOPSEED_CYBERLINK) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED2, USB_DEVICE_ID_TOPSEED2_RF_COMBO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_TWINHAN, USB_DEVICE_ID_TWINHAN_IR_REMOTE) },
+diff -Nur linux-3.14.36/drivers/hid/hid-core.c.orig linux-openelec/drivers/hid/hid-core.c.orig
+--- linux-3.14.36/drivers/hid/hid-core.c.orig 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/hid/hid-core.c.orig 2015-07-24 18:03:29.976842002 -0500
+@@ -0,0 +1,2642 @@
++/*
++ * HID support for Linux
++ *
++ * Copyright (c) 1999 Andreas Gal
++ * Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz>
++ * Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc
++ * Copyright (c) 2006-2012 Jiri Kosina
++ */
++
++/*
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License as published by the Free
++ * Software Foundation; either version 2 of the License, or (at your option)
++ * any later version.
++ */
++
++#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
++
++#include <linux/module.h>
++#include <linux/slab.h>
++#include <linux/init.h>
++#include <linux/kernel.h>
++#include <linux/list.h>
++#include <linux/mm.h>
++#include <linux/spinlock.h>
++#include <asm/unaligned.h>
++#include <asm/byteorder.h>
++#include <linux/input.h>
++#include <linux/wait.h>
++#include <linux/vmalloc.h>
++#include <linux/sched.h>
++#include <linux/semaphore.h>
++
++#include <linux/hid.h>
++#include <linux/hiddev.h>
++#include <linux/hid-debug.h>
++#include <linux/hidraw.h>
++
++#include "hid-ids.h"
++
++/*
++ * Version Information
++ */
++
++#define DRIVER_DESC "HID core driver"
++#define DRIVER_LICENSE "GPL"
++
++int hid_debug = 0;
++module_param_named(debug, hid_debug, int, 0600);
++MODULE_PARM_DESC(debug, "toggle HID debugging messages");
++EXPORT_SYMBOL_GPL(hid_debug);
++
++static int hid_ignore_special_drivers = 0;
++module_param_named(ignore_special_drivers, hid_ignore_special_drivers, int, 0600);
++MODULE_PARM_DESC(debug, "Ignore any special drivers and handle all devices by generic driver");
++
++/*
++ * Register a new report for a device.
++ */
++
++struct hid_report *hid_register_report(struct hid_device *device, unsigned type, unsigned id)
++{
++ struct hid_report_enum *report_enum = device->report_enum + type;
++ struct hid_report *report;
++
++ if (id >= HID_MAX_IDS)
++ return NULL;
++ if (report_enum->report_id_hash[id])
++ return report_enum->report_id_hash[id];
++
++ report = kzalloc(sizeof(struct hid_report), GFP_KERNEL);
++ if (!report)
++ return NULL;
++
++ if (id != 0)
++ report_enum->numbered = 1;
++
++ report->id = id;
++ report->type = type;
++ report->size = 0;
++ report->device = device;
++ report_enum->report_id_hash[id] = report;
++
++ list_add_tail(&report->list, &report_enum->report_list);
++
++ return report;
++}
++EXPORT_SYMBOL_GPL(hid_register_report);
++
++/*
++ * Register a new field for this report.
++ */
++
++static struct hid_field *hid_register_field(struct hid_report *report, unsigned usages, unsigned values)
++{
++ struct hid_field *field;
++
++ if (report->maxfield == HID_MAX_FIELDS) {
++ hid_err(report->device, "too many fields in report\n");
++ return NULL;
++ }
++
++ field = kzalloc((sizeof(struct hid_field) +
++ usages * sizeof(struct hid_usage) +
++ values * sizeof(unsigned)), GFP_KERNEL);
++ if (!field)
++ return NULL;
++
++ field->index = report->maxfield++;
++ report->field[field->index] = field;
++ field->usage = (struct hid_usage *)(field + 1);
++ field->value = (s32 *)(field->usage + usages);
++ field->report = report;
++
++ return field;
++}
++
++/*
++ * Open a collection. The type/usage is pushed on the stack.
++ */
++
++static int open_collection(struct hid_parser *parser, unsigned type)
++{
++ struct hid_collection *collection;
++ unsigned usage;
++
++ usage = parser->local.usage[0];
++
++ if (parser->collection_stack_ptr == HID_COLLECTION_STACK_SIZE) {
++ hid_err(parser->device, "collection stack overflow\n");
++ return -EINVAL;
++ }
++
++ if (parser->device->maxcollection == parser->device->collection_size) {
++ collection = kmalloc(sizeof(struct hid_collection) *
++ parser->device->collection_size * 2, GFP_KERNEL);
++ if (collection == NULL) {
++ hid_err(parser->device, "failed to reallocate collection array\n");
++ return -ENOMEM;
++ }
++ memcpy(collection, parser->device->collection,
++ sizeof(struct hid_collection) *
++ parser->device->collection_size);
++ memset(collection + parser->device->collection_size, 0,
++ sizeof(struct hid_collection) *
++ parser->device->collection_size);
++ kfree(parser->device->collection);
++ parser->device->collection = collection;
++ parser->device->collection_size *= 2;
++ }
++
++ parser->collection_stack[parser->collection_stack_ptr++] =
++ parser->device->maxcollection;
++
++ collection = parser->device->collection +
++ parser->device->maxcollection++;
++ collection->type = type;
++ collection->usage = usage;
++ collection->level = parser->collection_stack_ptr - 1;
++
++ if (type == HID_COLLECTION_APPLICATION)
++ parser->device->maxapplication++;
++
++ return 0;
++}
++
++/*
++ * Close a collection.
++ */
++
++static int close_collection(struct hid_parser *parser)
++{
++ if (!parser->collection_stack_ptr) {
++ hid_err(parser->device, "collection stack underflow\n");
++ return -EINVAL;
++ }
++ parser->collection_stack_ptr--;
++ return 0;
++}
++
++/*
++ * Climb up the stack, search for the specified collection type
++ * and return the usage.
++ */
++
++static unsigned hid_lookup_collection(struct hid_parser *parser, unsigned type)
++{
++ struct hid_collection *collection = parser->device->collection;
++ int n;
++
++ for (n = parser->collection_stack_ptr - 1; n >= 0; n--) {
++ unsigned index = parser->collection_stack[n];
++ if (collection[index].type == type)
++ return collection[index].usage;
++ }
++ return 0; /* we know nothing about this usage type */
++}
++
++/*
++ * Add a usage to the temporary parser table.
++ */
++
++static int hid_add_usage(struct hid_parser *parser, unsigned usage)
++{
++ if (parser->local.usage_index >= HID_MAX_USAGES) {
++ hid_err(parser->device, "usage index exceeded\n");
++ return -1;
++ }
++ parser->local.usage[parser->local.usage_index] = usage;
++ parser->local.collection_index[parser->local.usage_index] =
++ parser->collection_stack_ptr ?
++ parser->collection_stack[parser->collection_stack_ptr - 1] : 0;
++ parser->local.usage_index++;
++ return 0;
++}
++
++/*
++ * Register a new field for this report.
++ */
++
++static int hid_add_field(struct hid_parser *parser, unsigned report_type, unsigned flags)
++{
++ struct hid_report *report;
++ struct hid_field *field;
++ unsigned usages;
++ unsigned offset;
++ unsigned i;
++
++ report = hid_register_report(parser->device, report_type, parser->global.report_id);
++ if (!report) {
++ hid_err(parser->device, "hid_register_report failed\n");
++ return -1;
++ }
++
++ /* Handle both signed and unsigned cases properly */
++ if ((parser->global.logical_minimum < 0 &&
++ parser->global.logical_maximum <
++ parser->global.logical_minimum) ||
++ (parser->global.logical_minimum >= 0 &&
++ (__u32)parser->global.logical_maximum <
++ (__u32)parser->global.logical_minimum)) {
++ dbg_hid("logical range invalid 0x%x 0x%x\n",
++ parser->global.logical_minimum,
++ parser->global.logical_maximum);
++ return -1;
++ }
++
++ offset = report->size;
++ report->size += parser->global.report_size * parser->global.report_count;
++
++ if (!parser->local.usage_index) /* Ignore padding fields */
++ return 0;
++
++ usages = max_t(unsigned, parser->local.usage_index,
++ parser->global.report_count);
++
++ field = hid_register_field(report, usages, parser->global.report_count);
++ if (!field)
++ return 0;
++
++ field->physical = hid_lookup_collection(parser, HID_COLLECTION_PHYSICAL);
++ field->logical = hid_lookup_collection(parser, HID_COLLECTION_LOGICAL);
++ field->application = hid_lookup_collection(parser, HID_COLLECTION_APPLICATION);
++
++ for (i = 0; i < usages; i++) {
++ unsigned j = i;
++ /* Duplicate the last usage we parsed if we have excess values */
++ if (i >= parser->local.usage_index)
++ j = parser->local.usage_index - 1;
++ field->usage[i].hid = parser->local.usage[j];
++ field->usage[i].collection_index =
++ parser->local.collection_index[j];
++ field->usage[i].usage_index = i;
++ }
++
++ field->maxusage = usages;
++ field->flags = flags;
++ field->report_offset = offset;
++ field->report_type = report_type;
++ field->report_size = parser->global.report_size;
++ field->report_count = parser->global.report_count;
++ field->logical_minimum = parser->global.logical_minimum;
++ field->logical_maximum = parser->global.logical_maximum;
++ field->physical_minimum = parser->global.physical_minimum;
++ field->physical_maximum = parser->global.physical_maximum;
++ field->unit_exponent = parser->global.unit_exponent;
++ field->unit = parser->global.unit;
++
++ return 0;
++}
++
++/*
++ * Read data value from item.
++ */
++
++static u32 item_udata(struct hid_item *item)
++{
++ switch (item->size) {
++ case 1: return item->data.u8;
++ case 2: return item->data.u16;
++ case 4: return item->data.u32;
++ }
++ return 0;
++}
++
++static s32 item_sdata(struct hid_item *item)
++{
++ switch (item->size) {
++ case 1: return item->data.s8;
++ case 2: return item->data.s16;
++ case 4: return item->data.s32;
++ }
++ return 0;
++}
++
++/*
++ * Process a global item.
++ */
++
++static int hid_parser_global(struct hid_parser *parser, struct hid_item *item)
++{
++ __s32 raw_value;
++ switch (item->tag) {
++ case HID_GLOBAL_ITEM_TAG_PUSH:
++
++ if (parser->global_stack_ptr == HID_GLOBAL_STACK_SIZE) {
++ hid_err(parser->device, "global environment stack overflow\n");
++ return -1;
++ }
++
++ memcpy(parser->global_stack + parser->global_stack_ptr++,
++ &parser->global, sizeof(struct hid_global));
++ return 0;
++
++ case HID_GLOBAL_ITEM_TAG_POP:
++
++ if (!parser->global_stack_ptr) {
++ hid_err(parser->device, "global environment stack underflow\n");
++ return -1;
++ }
++
++ memcpy(&parser->global, parser->global_stack +
++ --parser->global_stack_ptr, sizeof(struct hid_global));
++ return 0;
++
++ case HID_GLOBAL_ITEM_TAG_USAGE_PAGE:
++ parser->global.usage_page = item_udata(item);
++ return 0;
++
++ case HID_GLOBAL_ITEM_TAG_LOGICAL_MINIMUM:
++ parser->global.logical_minimum = item_sdata(item);
++ return 0;
++
++ case HID_GLOBAL_ITEM_TAG_LOGICAL_MAXIMUM:
++ if (parser->global.logical_minimum < 0)
++ parser->global.logical_maximum = item_sdata(item);
++ else
++ parser->global.logical_maximum = item_udata(item);
++ return 0;
++
++ case HID_GLOBAL_ITEM_TAG_PHYSICAL_MINIMUM:
++ parser->global.physical_minimum = item_sdata(item);
++ return 0;
++
++ case HID_GLOBAL_ITEM_TAG_PHYSICAL_MAXIMUM:
++ if (parser->global.physical_minimum < 0)
++ parser->global.physical_maximum = item_sdata(item);
++ else
++ parser->global.physical_maximum = item_udata(item);
++ return 0;
++
++ case HID_GLOBAL_ITEM_TAG_UNIT_EXPONENT:
++ /* Many devices provide unit exponent as a two's complement
++ * nibble due to the common misunderstanding of HID
++ * specification 1.11, 6.2.2.7 Global Items. Attempt to handle
++ * both this and the standard encoding. */
++ raw_value = item_sdata(item);
++ if (!(raw_value & 0xfffffff0))
++ parser->global.unit_exponent = hid_snto32(raw_value, 4);
++ else
++ parser->global.unit_exponent = raw_value;
++ return 0;
++
++ case HID_GLOBAL_ITEM_TAG_UNIT:
++ parser->global.unit = item_udata(item);
++ return 0;
++
++ case HID_GLOBAL_ITEM_TAG_REPORT_SIZE:
++ parser->global.report_size = item_udata(item);
++ if (parser->global.report_size > 128) {
++ hid_err(parser->device, "invalid report_size %d\n",
++ parser->global.report_size);
++ return -1;
++ }
++ return 0;
++
++ case HID_GLOBAL_ITEM_TAG_REPORT_COUNT:
++ parser->global.report_count = item_udata(item);
++ if (parser->global.report_count > HID_MAX_USAGES) {
++ hid_err(parser->device, "invalid report_count %d\n",
++ parser->global.report_count);
++ return -1;
++ }
++ return 0;
++
++ case HID_GLOBAL_ITEM_TAG_REPORT_ID:
++ parser->global.report_id = item_udata(item);
++ if (parser->global.report_id == 0 ||
++ parser->global.report_id >= HID_MAX_IDS) {
++ hid_err(parser->device, "report_id %u is invalid\n",
++ parser->global.report_id);
++ return -1;
++ }
++ return 0;
++
++ default:
++ hid_err(parser->device, "unknown global tag 0x%x\n", item->tag);
++ return -1;
++ }
++}
++
++/*
++ * Process a local item.
++ */
++
++static int hid_parser_local(struct hid_parser *parser, struct hid_item *item)
++{
++ __u32 data;
++ unsigned n;
++
++ data = item_udata(item);
++
++ switch (item->tag) {
++ case HID_LOCAL_ITEM_TAG_DELIMITER:
++
++ if (data) {
++ /*
++ * We treat items before the first delimiter
++ * as global to all usage sets (branch 0).
++ * In the moment we process only these global
++ * items and the first delimiter set.
++ */
++ if (parser->local.delimiter_depth != 0) {
++ hid_err(parser->device, "nested delimiters\n");
++ return -1;
++ }
++ parser->local.delimiter_depth++;
++ parser->local.delimiter_branch++;
++ } else {
++ if (parser->local.delimiter_depth < 1) {
++ hid_err(parser->device, "bogus close delimiter\n");
++ return -1;
++ }
++ parser->local.delimiter_depth--;
++ }
++ return 0;
++
++ case HID_LOCAL_ITEM_TAG_USAGE:
++
++ if (parser->local.delimiter_branch > 1) {
++ dbg_hid("alternative usage ignored\n");
++ return 0;
++ }
++
++ if (item->size <= 2)
++ data = (parser->global.usage_page << 16) + data;
++
++ return hid_add_usage(parser, data);
++
++ case HID_LOCAL_ITEM_TAG_USAGE_MINIMUM:
++
++ if (parser->local.delimiter_branch > 1) {
++ dbg_hid("alternative usage ignored\n");
++ return 0;
++ }
++
++ if (item->size <= 2)
++ data = (parser->global.usage_page << 16) + data;
++
++ parser->local.usage_minimum = data;
++ return 0;
++
++ case HID_LOCAL_ITEM_TAG_USAGE_MAXIMUM:
++
++ if (parser->local.delimiter_branch > 1) {
++ dbg_hid("alternative usage ignored\n");
++ return 0;
++ }
++
++ if (item->size <= 2)
++ data = (parser->global.usage_page << 16) + data;
++
++ for (n = parser->local.usage_minimum; n <= data; n++)
++ if (hid_add_usage(parser, n)) {
++ dbg_hid("hid_add_usage failed\n");
++ return -1;
++ }
++ return 0;
++
++ default:
++
++ dbg_hid("unknown local item tag 0x%x\n", item->tag);
++ return 0;
++ }
++ return 0;
++}
++
++/*
++ * Process a main item.
++ */
++
++static int hid_parser_main(struct hid_parser *parser, struct hid_item *item)
++{
++ __u32 data;
++ int ret;
++
++ data = item_udata(item);
++
++ switch (item->tag) {
++ case HID_MAIN_ITEM_TAG_BEGIN_COLLECTION:
++ ret = open_collection(parser, data & 0xff);
++ break;
++ case HID_MAIN_ITEM_TAG_END_COLLECTION:
++ ret = close_collection(parser);
++ break;
++ case HID_MAIN_ITEM_TAG_INPUT:
++ ret = hid_add_field(parser, HID_INPUT_REPORT, data);
++ break;
++ case HID_MAIN_ITEM_TAG_OUTPUT:
++ ret = hid_add_field(parser, HID_OUTPUT_REPORT, data);
++ break;
++ case HID_MAIN_ITEM_TAG_FEATURE:
++ ret = hid_add_field(parser, HID_FEATURE_REPORT, data);
++ break;
++ default:
++ hid_err(parser->device, "unknown main item tag 0x%x\n", item->tag);
++ ret = 0;
++ }
++
++ memset(&parser->local, 0, sizeof(parser->local)); /* Reset the local parser environment */
++
++ return ret;
++}
++
++/*
++ * Process a reserved item.
++ */
++
++static int hid_parser_reserved(struct hid_parser *parser, struct hid_item *item)
++{
++ dbg_hid("reserved item type, tag 0x%x\n", item->tag);
++ return 0;
++}
++
++/*
++ * Free a report and all registered fields. The field->usage and
++ * field->value table's are allocated behind the field, so we need
++ * only to free(field) itself.
++ */
++
++static void hid_free_report(struct hid_report *report)
++{
++ unsigned n;
++
++ for (n = 0; n < report->maxfield; n++)
++ kfree(report->field[n]);
++ kfree(report);
++}
++
++/*
++ * Close report. This function returns the device
++ * state to the point prior to hid_open_report().
++ */
++static void hid_close_report(struct hid_device *device)
++{
++ unsigned i, j;
++
++ for (i = 0; i < HID_REPORT_TYPES; i++) {
++ struct hid_report_enum *report_enum = device->report_enum + i;
++
++ for (j = 0; j < HID_MAX_IDS; j++) {
++ struct hid_report *report = report_enum->report_id_hash[j];
++ if (report)
++ hid_free_report(report);
++ }
++ memset(report_enum, 0, sizeof(*report_enum));
++ INIT_LIST_HEAD(&report_enum->report_list);
++ }
++
++ kfree(device->rdesc);
++ device->rdesc = NULL;
++ device->rsize = 0;
++
++ kfree(device->collection);
++ device->collection = NULL;
++ device->collection_size = 0;
++ device->maxcollection = 0;
++ device->maxapplication = 0;
++
++ device->status &= ~HID_STAT_PARSED;
++}
++
++/*
++ * Free a device structure, all reports, and all fields.
++ */
++
++static void hid_device_release(struct device *dev)
++{
++ struct hid_device *hid = container_of(dev, struct hid_device, dev);
++
++ hid_close_report(hid);
++ kfree(hid->dev_rdesc);
++ kfree(hid);
++}
++
++/*
++ * Fetch a report description item from the data stream. We support long
++ * items, though they are not used yet.
++ */
++
++static u8 *fetch_item(__u8 *start, __u8 *end, struct hid_item *item)
++{
++ u8 b;
++
++ if ((end - start) <= 0)
++ return NULL;
++
++ b = *start++;
++
++ item->type = (b >> 2) & 3;
++ item->tag = (b >> 4) & 15;
++
++ if (item->tag == HID_ITEM_TAG_LONG) {
++
++ item->format = HID_ITEM_FORMAT_LONG;
++
++ if ((end - start) < 2)
++ return NULL;
++
++ item->size = *start++;
++ item->tag = *start++;
++
++ if ((end - start) < item->size)
++ return NULL;
++
++ item->data.longdata = start;
++ start += item->size;
++ return start;
++ }
++
++ item->format = HID_ITEM_FORMAT_SHORT;
++ item->size = b & 3;
++
++ switch (item->size) {
++ case 0:
++ return start;
++
++ case 1:
++ if ((end - start) < 1)
++ return NULL;
++ item->data.u8 = *start++;
++ return start;
++
++ case 2:
++ if ((end - start) < 2)
++ return NULL;
++ item->data.u16 = get_unaligned_le16(start);
++ start = (__u8 *)((__le16 *)start + 1);
++ return start;
++
++ case 3:
++ item->size++;
++ if ((end - start) < 4)
++ return NULL;
++ item->data.u32 = get_unaligned_le32(start);
++ start = (__u8 *)((__le32 *)start + 1);
++ return start;
++ }
++
++ return NULL;
++}
++
++static void hid_scan_input_usage(struct hid_parser *parser, u32 usage)
++{
++ struct hid_device *hid = parser->device;
++
++ if (usage == HID_DG_CONTACTID)
++ hid->group = HID_GROUP_MULTITOUCH;
++}
++
++static void hid_scan_feature_usage(struct hid_parser *parser, u32 usage)
++{
++ if (usage == 0xff0000c5 && parser->global.report_count == 256 &&
++ parser->global.report_size == 8)
++ parser->scan_flags |= HID_SCAN_FLAG_MT_WIN_8;
++}
++
++static void hid_scan_collection(struct hid_parser *parser, unsigned type)
++{
++ struct hid_device *hid = parser->device;
++
++ if (((parser->global.usage_page << 16) == HID_UP_SENSOR) &&
++ type == HID_COLLECTION_PHYSICAL)
++ hid->group = HID_GROUP_SENSOR_HUB;
++}
++
++static int hid_scan_main(struct hid_parser *parser, struct hid_item *item)
++{
++ __u32 data;
++ int i;
++
++ data = item_udata(item);
++
++ switch (item->tag) {
++ case HID_MAIN_ITEM_TAG_BEGIN_COLLECTION:
++ hid_scan_collection(parser, data & 0xff);
++ break;
++ case HID_MAIN_ITEM_TAG_END_COLLECTION:
++ break;
++ case HID_MAIN_ITEM_TAG_INPUT:
++ /* ignore constant inputs, they will be ignored by hid-input */
++ if (data & HID_MAIN_ITEM_CONSTANT)
++ break;
++ for (i = 0; i < parser->local.usage_index; i++)
++ hid_scan_input_usage(parser, parser->local.usage[i]);
++ break;
++ case HID_MAIN_ITEM_TAG_OUTPUT:
++ break;
++ case HID_MAIN_ITEM_TAG_FEATURE:
++ for (i = 0; i < parser->local.usage_index; i++)
++ hid_scan_feature_usage(parser, parser->local.usage[i]);
++ break;
++ }
++
++ /* Reset the local parser environment */
++ memset(&parser->local, 0, sizeof(parser->local));
++
++ return 0;
++}
++
++/*
++ * Scan a report descriptor before the device is added to the bus.
++ * Sets device groups and other properties that determine what driver
++ * to load.
++ */
++static int hid_scan_report(struct hid_device *hid)
++{
++ struct hid_parser *parser;
++ struct hid_item item;
++ __u8 *start = hid->dev_rdesc;
++ __u8 *end = start + hid->dev_rsize;
++ static int (*dispatch_type[])(struct hid_parser *parser,
++ struct hid_item *item) = {
++ hid_scan_main,
++ hid_parser_global,
++ hid_parser_local,
++ hid_parser_reserved
++ };
++
++ parser = vzalloc(sizeof(struct hid_parser));
++ if (!parser)
++ return -ENOMEM;
++
++ parser->device = hid;
++ hid->group = HID_GROUP_GENERIC;
++
++ /*
++ * The parsing is simpler than the one in hid_open_report() as we should
++ * be robust against hid errors. Those errors will be raised by
++ * hid_open_report() anyway.
++ */
++ while ((start = fetch_item(start, end, &item)) != NULL)
++ dispatch_type[item.type](parser, &item);
++
++ /*
++ * Handle special flags set during scanning.
++ */
++ if ((parser->scan_flags & HID_SCAN_FLAG_MT_WIN_8) &&
++ (hid->group == HID_GROUP_MULTITOUCH))
++ hid->group = HID_GROUP_MULTITOUCH_WIN_8;
++
++ vfree(parser);
++ return 0;
++}
++
++/**
++ * hid_parse_report - parse device report
++ *
++ * @device: hid device
++ * @start: report start
++ * @size: report size
++ *
++ * Allocate the device report as read by the bus driver. This function should
++ * only be called from parse() in ll drivers.
++ */
++int hid_parse_report(struct hid_device *hid, __u8 *start, unsigned size)
++{
++ hid->dev_rdesc = kmemdup(start, size, GFP_KERNEL);
++ if (!hid->dev_rdesc)
++ return -ENOMEM;
++ hid->dev_rsize = size;
++ return 0;
++}
++EXPORT_SYMBOL_GPL(hid_parse_report);
++
++static const char * const hid_report_names[] = {
++ "HID_INPUT_REPORT",
++ "HID_OUTPUT_REPORT",
++ "HID_FEATURE_REPORT",
++};
++/**
++ * hid_validate_values - validate existing device report's value indexes
++ *
++ * @device: hid device
++ * @type: which report type to examine
++ * @id: which report ID to examine (0 for first)
++ * @field_index: which report field to examine
++ * @report_counts: expected number of values
++ *
++ * Validate the number of values in a given field of a given report, after
++ * parsing.
++ */
++struct hid_report *hid_validate_values(struct hid_device *hid,
++ unsigned int type, unsigned int id,
++ unsigned int field_index,
++ unsigned int report_counts)
++{
++ struct hid_report *report;
++
++ if (type > HID_FEATURE_REPORT) {
++ hid_err(hid, "invalid HID report type %u\n", type);
++ return NULL;
++ }
++
++ if (id >= HID_MAX_IDS) {
++ hid_err(hid, "invalid HID report id %u\n", id);
++ return NULL;
++ }
++
++ /*
++ * Explicitly not using hid_get_report() here since it depends on
++ * ->numbered being checked, which may not always be the case when
++ * drivers go to access report values.
++ */
++ if (id == 0) {
++ /*
++ * Validating on id 0 means we should examine the first
++ * report in the list.
++ */
++ report = list_entry(
++ hid->report_enum[type].report_list.next,
++ struct hid_report, list);
++ } else {
++ report = hid->report_enum[type].report_id_hash[id];
++ }
++ if (!report) {
++ hid_err(hid, "missing %s %u\n", hid_report_names[type], id);
++ return NULL;
++ }
++ if (report->maxfield <= field_index) {
++ hid_err(hid, "not enough fields in %s %u\n",
++ hid_report_names[type], id);
++ return NULL;
++ }
++ if (report->field[field_index]->report_count < report_counts) {
++ hid_err(hid, "not enough values in %s %u field %u\n",
++ hid_report_names[type], id, field_index);
++ return NULL;
++ }
++ return report;
++}
++EXPORT_SYMBOL_GPL(hid_validate_values);
++
++/**
++ * hid_open_report - open a driver-specific device report
++ *
++ * @device: hid device
++ *
++ * Parse a report description into a hid_device structure. Reports are
++ * enumerated, fields are attached to these reports.
++ * 0 returned on success, otherwise nonzero error value.
++ *
++ * This function (or the equivalent hid_parse() macro) should only be
++ * called from probe() in drivers, before starting the device.
++ */
++int hid_open_report(struct hid_device *device)
++{
++ struct hid_parser *parser;
++ struct hid_item item;
++ unsigned int size;
++ __u8 *start;
++ __u8 *buf;
++ __u8 *end;
++ int ret;
++ static int (*dispatch_type[])(struct hid_parser *parser,
++ struct hid_item *item) = {
++ hid_parser_main,
++ hid_parser_global,
++ hid_parser_local,
++ hid_parser_reserved
++ };
++
++ if (WARN_ON(device->status & HID_STAT_PARSED))
++ return -EBUSY;
++
++ start = device->dev_rdesc;
++ if (WARN_ON(!start))
++ return -ENODEV;
++ size = device->dev_rsize;
++
++ buf = kmemdup(start, size, GFP_KERNEL);
++ if (buf == NULL)
++ return -ENOMEM;
++
++ if (device->driver->report_fixup)
++ start = device->driver->report_fixup(device, buf, &size);
++ else
++ start = buf;
++
++ start = kmemdup(start, size, GFP_KERNEL);
++ kfree(buf);
++ if (start == NULL)
++ return -ENOMEM;
++
++ device->rdesc = start;
++ device->rsize = size;
++
++ parser = vzalloc(sizeof(struct hid_parser));
++ if (!parser) {
++ ret = -ENOMEM;
++ goto err;
++ }
++
++ parser->device = device;
++
++ end = start + size;
++
++ device->collection = kcalloc(HID_DEFAULT_NUM_COLLECTIONS,
++ sizeof(struct hid_collection), GFP_KERNEL);
++ if (!device->collection) {
++ ret = -ENOMEM;
++ goto err;
++ }
++ device->collection_size = HID_DEFAULT_NUM_COLLECTIONS;
++
++ ret = -EINVAL;
++ while ((start = fetch_item(start, end, &item)) != NULL) {
++
++ if (item.format != HID_ITEM_FORMAT_SHORT) {
++ hid_err(device, "unexpected long global item\n");
++ goto err;
++ }
++
++ if (dispatch_type[item.type](parser, &item)) {
++ hid_err(device, "item %u %u %u %u parsing failed\n",
++ item.format, (unsigned)item.size,
++ (unsigned)item.type, (unsigned)item.tag);
++ goto err;
++ }
++
++ if (start == end) {
++ if (parser->collection_stack_ptr) {
++ hid_err(device, "unbalanced collection at end of report description\n");
++ goto err;
++ }
++ if (parser->local.delimiter_depth) {
++ hid_err(device, "unbalanced delimiter at end of report description\n");
++ goto err;
++ }
++ vfree(parser);
++ device->status |= HID_STAT_PARSED;
++ return 0;
++ }
++ }
++
++ hid_err(device, "item fetching failed at offset %d\n", (int)(end - start));
++err:
++ vfree(parser);
++ hid_close_report(device);
++ return ret;
++}
++EXPORT_SYMBOL_GPL(hid_open_report);
++
++/*
++ * Convert a signed n-bit integer to signed 32-bit integer. Common
++ * cases are done through the compiler, the screwed things has to be
++ * done by hand.
++ */
++
++static s32 snto32(__u32 value, unsigned n)
++{
++ switch (n) {
++ case 8: return ((__s8)value);
++ case 16: return ((__s16)value);
++ case 32: return ((__s32)value);
++ }
++ return value & (1 << (n - 1)) ? value | (-1 << n) : value;
++}
++
++s32 hid_snto32(__u32 value, unsigned n)
++{
++ return snto32(value, n);
++}
++EXPORT_SYMBOL_GPL(hid_snto32);
++
++/*
++ * Convert a signed 32-bit integer to a signed n-bit integer.
++ */
++
++static u32 s32ton(__s32 value, unsigned n)
++{
++ s32 a = value >> (n - 1);
++ if (a && a != -1)
++ return value < 0 ? 1 << (n - 1) : (1 << (n - 1)) - 1;
++ return value & ((1 << n) - 1);
++}
++
++/*
++ * Extract/implement a data field from/to a little endian report (bit array).
++ *
++ * Code sort-of follows HID spec:
++ * http://www.usb.org/developers/devclass_docs/HID1_11.pdf
++ *
++ * While the USB HID spec allows unlimited length bit fields in "report
++ * descriptors", most devices never use more than 16 bits.
++ * One model of UPS is claimed to report "LINEV" as a 32-bit field.
++ * Search linux-kernel and linux-usb-devel archives for "hid-core extract".
++ */
++
++static __u32 extract(const struct hid_device *hid, __u8 *report,
++ unsigned offset, unsigned n)
++{
++ u64 x;
++
++ if (n > 32)
++ hid_warn(hid, "extract() called with n (%d) > 32! (%s)\n",
++ n, current->comm);
++
++ report += offset >> 3; /* adjust byte index */
++ offset &= 7; /* now only need bit offset into one byte */
++ x = get_unaligned_le64(report);
++ x = (x >> offset) & ((1ULL << n) - 1); /* extract bit field */
++ return (u32) x;
++}
++
++/*
++ * "implement" : set bits in a little endian bit stream.
++ * Same concepts as "extract" (see comments above).
++ * The data mangled in the bit stream remains in little endian
++ * order the whole time. It make more sense to talk about
++ * endianness of register values by considering a register
++ * a "cached" copy of the little endiad bit stream.
++ */
++static void implement(const struct hid_device *hid, __u8 *report,
++ unsigned offset, unsigned n, __u32 value)
++{
++ u64 x;
++ u64 m = (1ULL << n) - 1;
++
++ if (n > 32)
++ hid_warn(hid, "%s() called with n (%d) > 32! (%s)\n",
++ __func__, n, current->comm);
++
++ if (value > m)
++ hid_warn(hid, "%s() called with too large value %d! (%s)\n",
++ __func__, value, current->comm);
++ WARN_ON(value > m);
++ value &= m;
++
++ report += offset >> 3;
++ offset &= 7;
++
++ x = get_unaligned_le64(report);
++ x &= ~(m << offset);
++ x |= ((u64)value) << offset;
++ put_unaligned_le64(x, report);
++}
++
++/*
++ * Search an array for a value.
++ */
++
++static int search(__s32 *array, __s32 value, unsigned n)
++{
++ while (n--) {
++ if (*array++ == value)
++ return 0;
++ }
++ return -1;
++}
++
++/**
++ * hid_match_report - check if driver's raw_event should be called
++ *
++ * @hid: hid device
++ * @report_type: type to match against
++ *
++ * compare hid->driver->report_table->report_type to report->type
++ */
++static int hid_match_report(struct hid_device *hid, struct hid_report *report)
++{
++ const struct hid_report_id *id = hid->driver->report_table;
++
++ if (!id) /* NULL means all */
++ return 1;
++
++ for (; id->report_type != HID_TERMINATOR; id++)
++ if (id->report_type == HID_ANY_ID ||
++ id->report_type == report->type)
++ return 1;
++ return 0;
++}
++
++/**
++ * hid_match_usage - check if driver's event should be called
++ *
++ * @hid: hid device
++ * @usage: usage to match against
++ *
++ * compare hid->driver->usage_table->usage_{type,code} to
++ * usage->usage_{type,code}
++ */
++static int hid_match_usage(struct hid_device *hid, struct hid_usage *usage)
++{
++ const struct hid_usage_id *id = hid->driver->usage_table;
++
++ if (!id) /* NULL means all */
++ return 1;
++
++ for (; id->usage_type != HID_ANY_ID - 1; id++)
++ if ((id->usage_hid == HID_ANY_ID ||
++ id->usage_hid == usage->hid) &&
++ (id->usage_type == HID_ANY_ID ||
++ id->usage_type == usage->type) &&
++ (id->usage_code == HID_ANY_ID ||
++ id->usage_code == usage->code))
++ return 1;
++ return 0;
++}
++
++static void hid_process_event(struct hid_device *hid, struct hid_field *field,
++ struct hid_usage *usage, __s32 value, int interrupt)
++{
++ struct hid_driver *hdrv = hid->driver;
++ int ret;
++
++ if (!list_empty(&hid->debug_list))
++ hid_dump_input(hid, usage, value);
++
++ if (hdrv && hdrv->event && hid_match_usage(hid, usage)) {
++ ret = hdrv->event(hid, field, usage, value);
++ if (ret != 0) {
++ if (ret < 0)
++ hid_err(hid, "%s's event failed with %d\n",
++ hdrv->name, ret);
++ return;
++ }
++ }
++
++ if (hid->claimed & HID_CLAIMED_INPUT)
++ hidinput_hid_event(hid, field, usage, value);
++ if (hid->claimed & HID_CLAIMED_HIDDEV && interrupt && hid->hiddev_hid_event)
++ hid->hiddev_hid_event(hid, field, usage, value);
++}
++
++/*
++ * Analyse a received field, and fetch the data from it. The field
++ * content is stored for next report processing (we do differential
++ * reporting to the layer).
++ */
++
++static void hid_input_field(struct hid_device *hid, struct hid_field *field,
++ __u8 *data, int interrupt)
++{
++ unsigned n;
++ unsigned count = field->report_count;
++ unsigned offset = field->report_offset;
++ unsigned size = field->report_size;
++ __s32 min = field->logical_minimum;
++ __s32 max = field->logical_maximum;
++ __s32 *value;
++
++ value = kmalloc(sizeof(__s32) * count, GFP_ATOMIC);
++ if (!value)
++ return;
++
++ for (n = 0; n < count; n++) {
++
++ value[n] = min < 0 ?
++ snto32(extract(hid, data, offset + n * size, size),
++ size) :
++ extract(hid, data, offset + n * size, size);
++
++ /* Ignore report if ErrorRollOver */
++ if (!(field->flags & HID_MAIN_ITEM_VARIABLE) &&
++ value[n] >= min && value[n] <= max &&
++ field->usage[value[n] - min].hid == HID_UP_KEYBOARD + 1)
++ goto exit;
++ }
++
++ for (n = 0; n < count; n++) {
++
++ if (HID_MAIN_ITEM_VARIABLE & field->flags) {
++ hid_process_event(hid, field, &field->usage[n], value[n], interrupt);
++ continue;
++ }
++
++ if (field->value[n] >= min && field->value[n] <= max
++ && field->usage[field->value[n] - min].hid
++ && search(value, field->value[n], count))
++ hid_process_event(hid, field, &field->usage[field->value[n] - min], 0, interrupt);
++
++ if (value[n] >= min && value[n] <= max
++ && field->usage[value[n] - min].hid
++ && search(field->value, value[n], count))
++ hid_process_event(hid, field, &field->usage[value[n] - min], 1, interrupt);
++ }
++
++ memcpy(field->value, value, count * sizeof(__s32));
++exit:
++ kfree(value);
++}
++
++/*
++ * Output the field into the report.
++ */
++
++static void hid_output_field(const struct hid_device *hid,
++ struct hid_field *field, __u8 *data)
++{
++ unsigned count = field->report_count;
++ unsigned offset = field->report_offset;
++ unsigned size = field->report_size;
++ unsigned n;
++
++ for (n = 0; n < count; n++) {
++ if (field->logical_minimum < 0) /* signed values */
++ implement(hid, data, offset + n * size, size,
++ s32ton(field->value[n], size));
++ else /* unsigned values */
++ implement(hid, data, offset + n * size, size,
++ field->value[n]);
++ }
++}
++
++/*
++ * Create a report. 'data' has to be allocated using
++ * hid_alloc_report_buf() so that it has proper size.
++ */
++
++void hid_output_report(struct hid_report *report, __u8 *data)
++{
++ unsigned n;
++
++ if (report->id > 0)
++ *data++ = report->id;
++
++ memset(data, 0, ((report->size - 1) >> 3) + 1);
++ for (n = 0; n < report->maxfield; n++)
++ hid_output_field(report->device, report->field[n], data);
++}
++EXPORT_SYMBOL_GPL(hid_output_report);
++
++/*
++ * Allocator for buffer that is going to be passed to hid_output_report()
++ */
++u8 *hid_alloc_report_buf(struct hid_report *report, gfp_t flags)
++{
++ /*
++ * 7 extra bytes are necessary to achieve proper functionality
++ * of implement() working on 8 byte chunks
++ */
++
++ int len = ((report->size - 1) >> 3) + 1 + (report->id > 0) + 7;
++
++ return kmalloc(len, flags);
++}
++EXPORT_SYMBOL_GPL(hid_alloc_report_buf);
++
++/*
++ * Set a field value. The report this field belongs to has to be
++ * created and transferred to the device, to set this value in the
++ * device.
++ */
++
++int hid_set_field(struct hid_field *field, unsigned offset, __s32 value)
++{
++ unsigned size;
++
++ if (!field)
++ return -1;
++
++ size = field->report_size;
++
++ hid_dump_input(field->report->device, field->usage + offset, value);
++
++ if (offset >= field->report_count) {
++ hid_err(field->report->device, "offset (%d) exceeds report_count (%d)\n",
++ offset, field->report_count);
++ return -1;
++ }
++ if (field->logical_minimum < 0) {
++ if (value != snto32(s32ton(value, size), size)) {
++ hid_err(field->report->device, "value %d is out of range\n", value);
++ return -1;
++ }
++ }
++ field->value[offset] = value;
++ return 0;
++}
++EXPORT_SYMBOL_GPL(hid_set_field);
++
++static struct hid_report *hid_get_report(struct hid_report_enum *report_enum,
++ const u8 *data)
++{
++ struct hid_report *report;
++ unsigned int n = 0; /* Normally report number is 0 */
++
++ /* Device uses numbered reports, data[0] is report number */
++ if (report_enum->numbered)
++ n = *data;
++
++ report = report_enum->report_id_hash[n];
++ if (report == NULL)
++ dbg_hid("undefined report_id %u received\n", n);
++
++ return report;
++}
++
++int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, int size,
++ int interrupt)
++{
++ struct hid_report_enum *report_enum = hid->report_enum + type;
++ struct hid_report *report;
++ struct hid_driver *hdrv;
++ unsigned int a;
++ int rsize, csize = size;
++ u8 *cdata = data;
++ int ret = 0;
++
++ report = hid_get_report(report_enum, data);
++ if (!report)
++ goto out;
++
++ if (report_enum->numbered) {
++ cdata++;
++ csize--;
++ }
++
++ rsize = ((report->size - 1) >> 3) + 1;
++
++ if (rsize > HID_MAX_BUFFER_SIZE)
++ rsize = HID_MAX_BUFFER_SIZE;
++
++ if (csize < rsize) {
++ dbg_hid("report %d is too short, (%d < %d)\n", report->id,
++ csize, rsize);
++ memset(cdata + csize, 0, rsize - csize);
++ }
++
++ if ((hid->claimed & HID_CLAIMED_HIDDEV) && hid->hiddev_report_event)
++ hid->hiddev_report_event(hid, report);
++ if (hid->claimed & HID_CLAIMED_HIDRAW) {
++ ret = hidraw_report_event(hid, data, size);
++ if (ret)
++ goto out;
++ }
++
++ if (hid->claimed != HID_CLAIMED_HIDRAW && report->maxfield) {
++ for (a = 0; a < report->maxfield; a++)
++ hid_input_field(hid, report->field[a], cdata, interrupt);
++ hdrv = hid->driver;
++ if (hdrv && hdrv->report)
++ hdrv->report(hid, report);
++ }
++
++ if (hid->claimed & HID_CLAIMED_INPUT)
++ hidinput_report_event(hid, report);
++out:
++ return ret;
++}
++EXPORT_SYMBOL_GPL(hid_report_raw_event);
++
++/**
++ * hid_input_report - report data from lower layer (usb, bt...)
++ *
++ * @hid: hid device
++ * @type: HID report type (HID_*_REPORT)
++ * @data: report contents
++ * @size: size of data parameter
++ * @interrupt: distinguish between interrupt and control transfers
++ *
++ * This is data entry for lower layers.
++ */
++int hid_input_report(struct hid_device *hid, int type, u8 *data, int size, int interrupt)
++{
++ struct hid_report_enum *report_enum;
++ struct hid_driver *hdrv;
++ struct hid_report *report;
++ int ret = 0;
++
++ if (!hid)
++ return -ENODEV;
++
++ if (down_trylock(&hid->driver_input_lock))
++ return -EBUSY;
++
++ if (!hid->driver) {
++ ret = -ENODEV;
++ goto unlock;
++ }
++ report_enum = hid->report_enum + type;
++ hdrv = hid->driver;
++
++ if (!size) {
++ dbg_hid("empty report\n");
++ ret = -1;
++ goto unlock;
++ }
++
++ /* Avoid unnecessary overhead if debugfs is disabled */
++ if (!list_empty(&hid->debug_list))
++ hid_dump_report(hid, type, data, size);
++
++ report = hid_get_report(report_enum, data);
++
++ if (!report) {
++ ret = -1;
++ goto unlock;
++ }
++
++ if (hdrv && hdrv->raw_event && hid_match_report(hid, report)) {
++ ret = hdrv->raw_event(hid, report, data, size);
++ if (ret < 0)
++ goto unlock;
++ }
++
++ ret = hid_report_raw_event(hid, type, data, size, interrupt);
++
++unlock:
++ up(&hid->driver_input_lock);
++ return ret;
++}
++EXPORT_SYMBOL_GPL(hid_input_report);
++
++static bool hid_match_one_id(struct hid_device *hdev,
++ const struct hid_device_id *id)
++{
++ return (id->bus == HID_BUS_ANY || id->bus == hdev->bus) &&
++ (id->group == HID_GROUP_ANY || id->group == hdev->group) &&
++ (id->vendor == HID_ANY_ID || id->vendor == hdev->vendor) &&
++ (id->product == HID_ANY_ID || id->product == hdev->product);
++}
++
++const struct hid_device_id *hid_match_id(struct hid_device *hdev,
++ const struct hid_device_id *id)
++{
++ for (; id->bus; id++)
++ if (hid_match_one_id(hdev, id))
++ return id;
++
++ return NULL;
++}
++
++static const struct hid_device_id hid_hiddev_list[] = {
++ { HID_USB_DEVICE(USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS1) },
++ { }
++};
++
++static bool hid_hiddev(struct hid_device *hdev)
++{
++ return !!hid_match_id(hdev, hid_hiddev_list);
++}
++
++
++static ssize_t
++read_report_descriptor(struct file *filp, struct kobject *kobj,
++ struct bin_attribute *attr,
++ char *buf, loff_t off, size_t count)
++{
++ struct device *dev = container_of(kobj, struct device, kobj);
++ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
++
++ if (off >= hdev->rsize)
++ return 0;
++
++ if (off + count > hdev->rsize)
++ count = hdev->rsize - off;
++
++ memcpy(buf, hdev->rdesc + off, count);
++
++ return count;
++}
++
++static struct bin_attribute dev_bin_attr_report_desc = {
++ .attr = { .name = "report_descriptor", .mode = 0444 },
++ .read = read_report_descriptor,
++ .size = HID_MAX_DESCRIPTOR_SIZE,
++};
++
++int hid_connect(struct hid_device *hdev, unsigned int connect_mask)
++{
++ static const char *types[] = { "Device", "Pointer", "Mouse", "Device",
++ "Joystick", "Gamepad", "Keyboard", "Keypad",
++ "Multi-Axis Controller"
++ };
++ const char *type, *bus;
++ char buf[64];
++ unsigned int i;
++ int len;
++ int ret;
++
++ if (hdev->quirks & HID_QUIRK_HIDDEV_FORCE)
++ connect_mask |= (HID_CONNECT_HIDDEV_FORCE | HID_CONNECT_HIDDEV);
++ if (hdev->quirks & HID_QUIRK_HIDINPUT_FORCE)
++ connect_mask |= HID_CONNECT_HIDINPUT_FORCE;
++ if (hdev->bus != BUS_USB)
++ connect_mask &= ~HID_CONNECT_HIDDEV;
++ if (hid_hiddev(hdev))
++ connect_mask |= HID_CONNECT_HIDDEV_FORCE;
++
++ if ((connect_mask & HID_CONNECT_HIDINPUT) && !hidinput_connect(hdev,
++ connect_mask & HID_CONNECT_HIDINPUT_FORCE))
++ hdev->claimed |= HID_CLAIMED_INPUT;
++
++ if ((connect_mask & HID_CONNECT_HIDDEV) && hdev->hiddev_connect &&
++ !hdev->hiddev_connect(hdev,
++ connect_mask & HID_CONNECT_HIDDEV_FORCE))
++ hdev->claimed |= HID_CLAIMED_HIDDEV;
++ if ((connect_mask & HID_CONNECT_HIDRAW) && !hidraw_connect(hdev))
++ hdev->claimed |= HID_CLAIMED_HIDRAW;
++
++ /* Drivers with the ->raw_event callback set are not required to connect
++ * to any other listener. */
++ if (!hdev->claimed && !hdev->driver->raw_event) {
++ hid_err(hdev, "device has no listeners, quitting\n");
++ return -ENODEV;
++ }
++
++ if ((hdev->claimed & HID_CLAIMED_INPUT) &&
++ (connect_mask & HID_CONNECT_FF) && hdev->ff_init)
++ hdev->ff_init(hdev);
++
++ len = 0;
++ if (hdev->claimed & HID_CLAIMED_INPUT)
++ len += sprintf(buf + len, "input");
++ if (hdev->claimed & HID_CLAIMED_HIDDEV)
++ len += sprintf(buf + len, "%shiddev%d", len ? "," : "",
++ hdev->minor);
++ if (hdev->claimed & HID_CLAIMED_HIDRAW)
++ len += sprintf(buf + len, "%shidraw%d", len ? "," : "",
++ ((struct hidraw *)hdev->hidraw)->minor);
++
++ type = "Device";
++ for (i = 0; i < hdev->maxcollection; i++) {
++ struct hid_collection *col = &hdev->collection[i];
++ if (col->type == HID_COLLECTION_APPLICATION &&
++ (col->usage & HID_USAGE_PAGE) == HID_UP_GENDESK &&
++ (col->usage & 0xffff) < ARRAY_SIZE(types)) {
++ type = types[col->usage & 0xffff];
++ break;
++ }
++ }
++
++ switch (hdev->bus) {
++ case BUS_USB:
++ bus = "USB";
++ break;
++ case BUS_BLUETOOTH:
++ bus = "BLUETOOTH";
++ break;
++ default:
++ bus = "<UNKNOWN>";
++ }
++
++ ret = device_create_bin_file(&hdev->dev, &dev_bin_attr_report_desc);
++ if (ret)
++ hid_warn(hdev,
++ "can't create sysfs report descriptor attribute err: %d\n", ret);
++
++ hid_info(hdev, "%s: %s HID v%x.%02x %s [%s] on %s\n",
++ buf, bus, hdev->version >> 8, hdev->version & 0xff,
++ type, hdev->name, hdev->phys);
++
++ return 0;
++}
++EXPORT_SYMBOL_GPL(hid_connect);
++
++void hid_disconnect(struct hid_device *hdev)
++{
++ device_remove_bin_file(&hdev->dev, &dev_bin_attr_report_desc);
++ if (hdev->claimed & HID_CLAIMED_INPUT)
++ hidinput_disconnect(hdev);
++ if (hdev->claimed & HID_CLAIMED_HIDDEV)
++ hdev->hiddev_disconnect(hdev);
++ if (hdev->claimed & HID_CLAIMED_HIDRAW)
++ hidraw_disconnect(hdev);
++}
++EXPORT_SYMBOL_GPL(hid_disconnect);
++
++/*
++ * A list of devices for which there is a specialized driver on HID bus.
++ *
++ * Please note that for multitouch devices (driven by hid-multitouch driver),
++ * there is a proper autodetection and autoloading in place (based on presence
++ * of HID_DG_CONTACTID), so those devices don't need to be added to this list,
++ * as we are doing the right thing in hid_scan_usage().
++ *
++ * Autodetection for (USB) HID sensor hubs exists too. If a collection of type
++ * physical is found inside a usage page of type sensor, hid-sensor-hub will be
++ * used as a driver. See hid_scan_report().
++ */
++static const struct hid_device_id hid_have_special_driver[] = {
++ { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_WCP32PU) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_X5_005D) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_RP_649) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_ACRUX, 0x0802) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_ACRUX, 0xf705) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MIGHTYMOUSE) },
++ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICMOUSE) },
++ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICTRACKPAD) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ANSI) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ISO) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_ANSI) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_ISO) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_JIS) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_ANSI) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_ISO) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_JIS) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_ANSI) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_ISO) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_JIS) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_MINI_ANSI) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_MINI_ISO) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_MINI_JIS) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_ANSI) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_ISO) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_JIS) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_ANSI) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_ISO) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_JIS) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL2) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL3) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL4) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL5) },
++ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI) },
++ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ISO) },
++ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_JIS) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ANSI) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ISO) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_JIS) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_ANSI) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_ISO) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_JIS) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_ANSI) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_ISO) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_JIS) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4_ANSI) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4_ISO) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4_JIS) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_ANSI) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_ISO) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ISO) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_JIS) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_ANSI) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_ISO) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_ANSI) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_ISO) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_JIS) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_ISO) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_JIS) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ISO) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_JIS) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_ANSI) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_ISO) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_JIS) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ISO) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_JIS) },
++ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI) },
++ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO) },
++ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) },
++ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI) },
++ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO) },
++ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_JIS) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_AUREAL, USB_DEVICE_ID_AUREAL_W01RN) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_BELKIN, USB_DEVICE_ID_FLIP_KVM) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE_2) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION_SOLAR) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_TACTICAL_PAD) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS2) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_AK1D) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_PRODIKEYS_PCMIDI) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_1) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_2) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_3) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_4) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_MOUSE) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0006) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0011) },
++ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_ELO, 0x0009) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_ELO, 0x0030) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_EMS, USB_DEVICE_ID_EMS_TRIO_LINKER_PLUS_II) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_EZKEY, USB_DEVICE_ID_BTC_8193) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_GAMERON, USB_DEVICE_ID_GAMERON_DUAL_PSX_ADAPTOR) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_GAMERON, USB_DEVICE_ID_GAMERON_DUAL_PCS_ADAPTOR) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, 0x0003) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, 0x0012) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_2) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_3) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK, USB_DEVICE_ID_HOLTEK_ON_LINE_GRIP) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A04A) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A067) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A070) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A072) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_HUION, USB_DEVICE_ID_HUION_580) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_JESS2, USB_DEVICE_ID_JESS2_COLOR_RUMBLE_PAD) },
++ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ION, USB_DEVICE_ID_ICADE) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_KENSINGTON, USB_DEVICE_ID_KS_SLIMBLADE) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_KEYTOUCH, USB_DEVICE_ID_KEYTOUCH_IEC) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_GENIUS_GILA_GAMING_MOUSE) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_GENIUS_MANTICORE) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_GENIUS_GX_IMPERATOR) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_ERGO_525V) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_I405X) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_LCPOWER, USB_DEVICE_ID_LCPOWER_LC1000 ) },
++#if IS_ENABLED(CONFIG_HID_LENOVO_TPKBD)
++ { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_TPKBD) },
++#endif
++ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_MX3000_RECEIVER) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER_2) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RECEIVER) },
++ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_PS3) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_DESKTOP) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_EDGE) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_MINI) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_ELITE_KBD) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_CORDLESS_DESKTOP_LX500) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_EXTREME_3D) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WHEEL) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD_CORD) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2_2) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_F3D) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_FFG ) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_FORCE3D_PRO) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_FLIGHT_SYSTEM_G940) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOMO_WHEEL) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOMO_WHEEL2) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_VIBRATION_WHEEL) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DFP_WHEEL) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DFGT_WHEEL) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G25_WHEEL) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G27_WHEEL) },
++#if IS_ENABLED(CONFIG_HID_LOGITECH_DJ)
++ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER_2) },
++#endif
++ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WII_WHEEL) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACETRAVELLER) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACENAVIGATOR) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD_BOOTLOADER) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_MOUSE_4500) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_SIDEWINDER_GV) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE4K) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE4K_JP) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_LK6K) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_USB) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_WIRELESS_OPTICAL_DESKTOP_3_0) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_MONTEREY, USB_DEVICE_ID_GENIUS_KB29E) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_1) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_2) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_3) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_4) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_5) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_6) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_7) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_8) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_9) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_10) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_11) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_12) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_13) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_14) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_15) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_16) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_17) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_18) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_PKB1700) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_WKB2000) },
++ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_OUYA, USB_DEVICE_ID_OUYA_CONTROLLER) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_PETALYNX, USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_PHILIPS, USB_DEVICE_ID_PHILIPS_SPINEL_PLUS_1) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_PHILIPS, USB_DEVICE_ID_PHILIPS_SPINEL_PLUS_2) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_PHILIPS, USB_DEVICE_ID_PHILIPS_SPINEL_PLUS_3) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_KEYBOARD) },
++#if IS_ENABLED(CONFIG_HID_ROCCAT)
++ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ARVO) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ISKU) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ISKUFX) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONE) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONEPLUS) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONEPURE) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONEPURE_OPTICAL) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONEXTD) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KOVAPLUS) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_LUA) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_PYRA_WIRED) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_PYRA_WIRELESS) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_RYOS_MK) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_RYOS_MK_GLOW) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_RYOS_MK_PRO) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_SAVU) },
++#endif
++ { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_PS1000) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_IR_REMOTE) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD_MOUSE) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_SKYCABLE, USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_BUZZ_CONTROLLER) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_WIRELESS_BUZZ_CONTROLLER) },
++ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_BDREMOTE) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_NAVIGATION_CONTROLLER) },
++ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER) },
++ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGP_MOUSE) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_STEELSERIES, USB_DEVICE_ID_STEELSERIES_SRWS1) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_SUNPLUS, USB_DEVICE_ID_SUNPLUS_WDESKTOP) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_THINGM, USB_DEVICE_ID_BLINK1) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb300) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb304) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb323) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb324) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb651) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb653) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb654) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb65a) },
++ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_BT) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_PRO) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED, USB_DEVICE_ID_TOPSEED_CYBERLINK) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED2, USB_DEVICE_ID_TOPSEED2_RF_COMBO) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_TWINHAN, USB_DEVICE_ID_TWINHAN_IR_REMOTE) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_PF1209) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP4030U) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP5540U) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP8060U) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP1062) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_WIRELESS_TABLET_TWHL850) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_TWHA60) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SMARTJOY_PLUS) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SUPER_JOY_BOX_3) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_DUAL_USB_JOYPAD) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_JOY_BOX_3_PRO) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_DUAL_BOX_PRO) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_JOY_BOX_5_PRO) },
++ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_WACOM, USB_DEVICE_ID_WACOM_GRAPHIRE_BLUETOOTH) },
++ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_WACOM, USB_DEVICE_ID_WACOM_INTUOS4_BLUETOOTH) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SLIM_TABLET_5_8_INCH) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SLIM_TABLET_12_1_INCH) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_Q_PAD) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_PID_0038) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_10_6_INCH) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_14_1_INCH) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SIRIUS_BATTERY_FREE_TABLET) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_X_TENSIONS, USB_DEVICE_ID_SPEEDLINK_VAD_CEZANNE) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_XIN_MO_DUAL_ARCADE) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0005) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0030) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_ZYDACRON, USB_DEVICE_ID_ZYDACRON_REMOTE_CONTROL) },
++
++ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_BT) },
++ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE) },
++ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE2) },
++ { }
++};
++
++struct hid_dynid {
++ struct list_head list;
++ struct hid_device_id id;
++};
++
++/**
++ * store_new_id - add a new HID device ID to this driver and re-probe devices
++ * @driver: target device driver
++ * @buf: buffer for scanning device ID data
++ * @count: input size
++ *
++ * Adds a new dynamic hid device ID to this driver,
++ * and causes the driver to probe for all devices again.
++ */
++static ssize_t store_new_id(struct device_driver *drv, const char *buf,
++ size_t count)
++{
++ struct hid_driver *hdrv = container_of(drv, struct hid_driver, driver);
++ struct hid_dynid *dynid;
++ __u32 bus, vendor, product;
++ unsigned long driver_data = 0;
++ int ret;
++
++ ret = sscanf(buf, "%x %x %x %lx",
++ &bus, &vendor, &product, &driver_data);
++ if (ret < 3)
++ return -EINVAL;
++
++ dynid = kzalloc(sizeof(*dynid), GFP_KERNEL);
++ if (!dynid)
++ return -ENOMEM;
++
++ dynid->id.bus = bus;
++ dynid->id.group = HID_GROUP_ANY;
++ dynid->id.vendor = vendor;
++ dynid->id.product = product;
++ dynid->id.driver_data = driver_data;
++
++ spin_lock(&hdrv->dyn_lock);
++ list_add_tail(&dynid->list, &hdrv->dyn_list);
++ spin_unlock(&hdrv->dyn_lock);
++
++ ret = driver_attach(&hdrv->driver);
++
++ return ret ? : count;
++}
++static DRIVER_ATTR(new_id, S_IWUSR, NULL, store_new_id);
++
++static void hid_free_dynids(struct hid_driver *hdrv)
++{
++ struct hid_dynid *dynid, *n;
++
++ spin_lock(&hdrv->dyn_lock);
++ list_for_each_entry_safe(dynid, n, &hdrv->dyn_list, list) {
++ list_del(&dynid->list);
++ kfree(dynid);
++ }
++ spin_unlock(&hdrv->dyn_lock);
++}
++
++static const struct hid_device_id *hid_match_device(struct hid_device *hdev,
++ struct hid_driver *hdrv)
++{
++ struct hid_dynid *dynid;
++
++ spin_lock(&hdrv->dyn_lock);
++ list_for_each_entry(dynid, &hdrv->dyn_list, list) {
++ if (hid_match_one_id(hdev, &dynid->id)) {
++ spin_unlock(&hdrv->dyn_lock);
++ return &dynid->id;
++ }
++ }
++ spin_unlock(&hdrv->dyn_lock);
++
++ return hid_match_id(hdev, hdrv->id_table);
++}
++
++static int hid_bus_match(struct device *dev, struct device_driver *drv)
++{
++ struct hid_driver *hdrv = container_of(drv, struct hid_driver, driver);
++ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
++
++ return hid_match_device(hdev, hdrv) != NULL;
++}
++
++static int hid_device_probe(struct device *dev)
++{
++ struct hid_driver *hdrv = container_of(dev->driver,
++ struct hid_driver, driver);
++ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
++ const struct hid_device_id *id;
++ int ret = 0;
++
++ if (down_interruptible(&hdev->driver_lock))
++ return -EINTR;
++ if (down_interruptible(&hdev->driver_input_lock)) {
++ ret = -EINTR;
++ goto unlock_driver_lock;
++ }
++ hdev->io_started = false;
++
++ if (!hdev->driver) {
++ id = hid_match_device(hdev, hdrv);
++ if (id == NULL) {
++ ret = -ENODEV;
++ goto unlock;
++ }
++
++ hdev->driver = hdrv;
++ if (hdrv->probe) {
++ ret = hdrv->probe(hdev, id);
++ } else { /* default probe */
++ ret = hid_open_report(hdev);
++ if (!ret)
++ ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
++ }
++ if (ret) {
++ hid_close_report(hdev);
++ hdev->driver = NULL;
++ }
++ }
++unlock:
++ if (!hdev->io_started)
++ up(&hdev->driver_input_lock);
++unlock_driver_lock:
++ up(&hdev->driver_lock);
++ return ret;
++}
++
++static int hid_device_remove(struct device *dev)
++{
++ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
++ struct hid_driver *hdrv;
++ int ret = 0;
++
++ if (down_interruptible(&hdev->driver_lock))
++ return -EINTR;
++ if (down_interruptible(&hdev->driver_input_lock)) {
++ ret = -EINTR;
++ goto unlock_driver_lock;
++ }
++ hdev->io_started = false;
++
++ hdrv = hdev->driver;
++ if (hdrv) {
++ if (hdrv->remove)
++ hdrv->remove(hdev);
++ else /* default remove */
++ hid_hw_stop(hdev);
++ hid_close_report(hdev);
++ hdev->driver = NULL;
++ }
++
++ if (!hdev->io_started)
++ up(&hdev->driver_input_lock);
++unlock_driver_lock:
++ up(&hdev->driver_lock);
++ return ret;
++}
++
++static ssize_t modalias_show(struct device *dev, struct device_attribute *a,
++ char *buf)
++{
++ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
++ int len;
++
++ len = snprintf(buf, PAGE_SIZE, "hid:b%04Xg%04Xv%08Xp%08X\n",
++ hdev->bus, hdev->group, hdev->vendor, hdev->product);
++
++ return (len >= PAGE_SIZE) ? (PAGE_SIZE - 1) : len;
++}
++static DEVICE_ATTR_RO(modalias);
++
++static struct attribute *hid_dev_attrs[] = {
++ &dev_attr_modalias.attr,
++ NULL,
++};
++ATTRIBUTE_GROUPS(hid_dev);
++
++static int hid_uevent(struct device *dev, struct kobj_uevent_env *env)
++{
++ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
++
++ if (add_uevent_var(env, "HID_ID=%04X:%08X:%08X",
++ hdev->bus, hdev->vendor, hdev->product))
++ return -ENOMEM;
++
++ if (add_uevent_var(env, "HID_NAME=%s", hdev->name))
++ return -ENOMEM;
++
++ if (add_uevent_var(env, "HID_PHYS=%s", hdev->phys))
++ return -ENOMEM;
++
++ if (add_uevent_var(env, "HID_UNIQ=%s", hdev->uniq))
++ return -ENOMEM;
++
++ if (add_uevent_var(env, "MODALIAS=hid:b%04Xg%04Xv%08Xp%08X",
++ hdev->bus, hdev->group, hdev->vendor, hdev->product))
++ return -ENOMEM;
++
++ return 0;
++}
++
++static struct bus_type hid_bus_type = {
++ .name = "hid",
++ .dev_groups = hid_dev_groups,
++ .match = hid_bus_match,
++ .probe = hid_device_probe,
++ .remove = hid_device_remove,
++ .uevent = hid_uevent,
++};
++
++/* a list of devices that shouldn't be handled by HID core at all */
++static const struct hid_device_id hid_ignore_list[] = {
++ { HID_USB_DEVICE(USB_VENDOR_ID_ACECAD, USB_DEVICE_ID_ACECAD_FLAIR) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_ACECAD, USB_DEVICE_ID_ACECAD_302) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_ADS_TECH, USB_DEVICE_ID_ADS_TECH_RADIO_SI470X) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_01) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_10) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_20) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_21) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_22) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_23) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_24) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_AIRCABLE, USB_DEVICE_ID_AIRCABLE1) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_ALCOR, USB_DEVICE_ID_ALCOR_USBRS232) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_LCM)},
++ { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_LCM2)},
++ { HID_USB_DEVICE(USB_VENDOR_ID_AVERMEDIA, USB_DEVICE_ID_AVER_FM_MR800) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_AXENTIA, USB_DEVICE_ID_AXENTIA_FM_RADIO) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_BERKSHIRE, USB_DEVICE_ID_BERKSHIRE_PCWD) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_CIDC, 0x0103) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_CYGNAL, USB_DEVICE_ID_CYGNAL_RADIO_SI470X) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_CYGNAL, USB_DEVICE_ID_CYGNAL_RADIO_SI4713) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_CMEDIA, USB_DEVICE_ID_CM109) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_HIDCOM) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_ULTRAMOUSE) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_DEALEXTREAME, USB_DEVICE_ID_DEALEXTREAME_RADIO_SI4701) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EARTHMATE) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EM_LT20) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, 0x0004) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, 0x000a) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_ESSENTIAL_REALITY, USB_DEVICE_ID_ESSENTIAL_REALITY_P5) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC5UH) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC4UM) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0001) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0002) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0004) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_GLAB, USB_DEVICE_ID_4_PHIDGETSERVO_30) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_GLAB, USB_DEVICE_ID_1_PHIDGETSERVO_30) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_GLAB, USB_DEVICE_ID_0_0_4_IF_KIT) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_GLAB, USB_DEVICE_ID_0_16_16_IF_KIT) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_GLAB, USB_DEVICE_ID_8_8_8_IF_KIT) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_GLAB, USB_DEVICE_ID_0_8_7_IF_KIT) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_GLAB, USB_DEVICE_ID_0_8_8_IF_KIT) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_GLAB, USB_DEVICE_ID_PHIDGET_MOTORCONTROL) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_GOTOP, USB_DEVICE_ID_SUPER_Q2) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_GOTOP, USB_DEVICE_ID_GOGOPEN) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_GOTOP, USB_DEVICE_ID_PENPOWER) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_GRETAGMACBETH, USB_DEVICE_ID_GRETAGMACBETH_HUEY) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_GRIFFIN, USB_DEVICE_ID_POWERMATE) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_GRIFFIN, USB_DEVICE_ID_SOUNDKNOB) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_GRIFFIN, USB_DEVICE_ID_RADIOSHARK) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_90) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_100) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_101) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_103) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_104) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_105) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_106) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_107) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_108) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_200) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_201) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_202) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_203) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_204) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_205) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_206) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_207) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_300) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_301) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_302) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_303) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_304) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_305) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_306) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_307) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_308) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_309) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_400) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_401) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_402) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_403) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_404) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_405) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_500) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_501) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_502) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_503) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_504) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1000) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1001) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1002) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1003) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1004) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1005) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1006) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1007) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_IMATION, USB_DEVICE_ID_DISC_STAKKA) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_JABRA, USB_DEVICE_ID_JABRA_SPEAK_410) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_JABRA, USB_DEVICE_ID_JABRA_SPEAK_510) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_KBGEAR, USB_DEVICE_ID_KBGEAR_JAMSTUDIO) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_KWORLD, USB_DEVICE_ID_KWORLD_RADIO_FM700) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_GPEN_560) },
++ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_KYE, 0x0058) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_CASSY) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_CASSY2) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_POCKETCASSY) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_POCKETCASSY2) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MOBILECASSY) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MOBILECASSY2) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYVOLTAGE) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYCURRENT) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYTIME) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYTEMPERATURE) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYPH) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_JWM) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_DMMP) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_UMIP) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_UMIC) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_UMIB) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_XRAY) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_XRAY2) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_VIDEOCOM) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MOTOR) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_COM3LAB) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_TELEPORT) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_NETWORKANALYSER) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_POWERCONTROL) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MACHINETEST) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MOSTANALYSER) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MOSTANALYSER2) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_ABSESP) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_AUTODATABUS) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MCT) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_HYBRID) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_HEATCONTROL) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_BEATPAD) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_MCC, USB_DEVICE_ID_MCC_PMD1024LS) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_MCC, USB_DEVICE_ID_MCC_PMD1208LS) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICKIT1) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICKIT2) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_NATIONAL_SEMICONDUCTOR, USB_DEVICE_ID_N_S_HARMONY) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100 + 20) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100 + 30) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100 + 100) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100 + 108) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100 + 118) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100 + 200) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100 + 300) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100 + 400) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100 + 500) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_PANJIT, 0x0001) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_PANJIT, 0x0002) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_PANJIT, 0x0003) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_PANJIT, 0x0004) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_PHILIPS, USB_DEVICE_ID_PHILIPS_IEEE802154_DONGLE) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_POWERCOM, USB_DEVICE_ID_POWERCOM_UPS) },
++#if defined(CONFIG_MOUSE_SYNAPTICS_USB) || defined(CONFIG_MOUSE_SYNAPTICS_USB_MODULE)
++ { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_TP) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_INT_TP) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_CPAD) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_STICK) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_WP) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_COMP_TP) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_WTP) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_DPAD) },
++#endif
++ { HID_USB_DEVICE(USB_VENDOR_ID_VERNIER, USB_DEVICE_ID_VERNIER_LABPRO) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_VERNIER, USB_DEVICE_ID_VERNIER_GOTEMP) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_VERNIER, USB_DEVICE_ID_VERNIER_SKIP) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_VERNIER, USB_DEVICE_ID_VERNIER_CYCLOPS) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_VERNIER, USB_DEVICE_ID_VERNIER_LCSPEC) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_WACOM, HID_ANY_ID) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_4_PHIDGETSERVO_20) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_1_PHIDGETSERVO_20) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_8_8_4_IF_KIT) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_YEALINK, USB_DEVICE_ID_YEALINK_P1K_P4K_B2K) },
++ { }
++};
++
++/**
++ * hid_mouse_ignore_list - mouse devices which should not be handled by the hid layer
++ *
++ * There are composite devices for which we want to ignore only a certain
++ * interface. This is a list of devices for which only the mouse interface will
++ * be ignored. This allows a dedicated driver to take care of the interface.
++ */
++static const struct hid_device_id hid_mouse_ignore_list[] = {
++ /* appletouch driver */
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ANSI) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ISO) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_ANSI) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_ISO) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_JIS) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_ANSI) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_ISO) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_JIS) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_ANSI) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_ISO) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_JIS) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_ANSI) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_ISO) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_JIS) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ANSI) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ISO) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_JIS) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_ANSI) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_ISO) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_JIS) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_ANSI) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_ISO) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_JIS) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4_ANSI) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4_ISO) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4_JIS) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_ANSI) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_ISO) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ISO) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_JIS) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_ANSI) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_ISO) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_ISO) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_JIS) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ISO) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_JIS) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_ANSI) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_ISO) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_JIS) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ISO) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_JIS) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
++ { }
++};
++
++bool hid_ignore(struct hid_device *hdev)
++{
++ if (hdev->quirks & HID_QUIRK_NO_IGNORE)
++ return false;
++ if (hdev->quirks & HID_QUIRK_IGNORE)
++ return true;
++
++ switch (hdev->vendor) {
++ case USB_VENDOR_ID_CODEMERCS:
++ /* ignore all Code Mercenaries IOWarrior devices */
++ if (hdev->product >= USB_DEVICE_ID_CODEMERCS_IOW_FIRST &&
++ hdev->product <= USB_DEVICE_ID_CODEMERCS_IOW_LAST)
++ return true;
++ break;
++ case USB_VENDOR_ID_LOGITECH:
++ if (hdev->product >= USB_DEVICE_ID_LOGITECH_HARMONY_FIRST &&
++ hdev->product <= USB_DEVICE_ID_LOGITECH_HARMONY_LAST)
++ return true;
++ /*
++ * The Keene FM transmitter USB device has the same USB ID as
++ * the Logitech AudioHub Speaker, but it should ignore the hid.
++ * Check if the name is that of the Keene device.
++ * For reference: the name of the AudioHub is
++ * "HOLTEK AudioHub Speaker".
++ */
++ if (hdev->product == USB_DEVICE_ID_LOGITECH_AUDIOHUB &&
++ !strcmp(hdev->name, "HOLTEK B-LINK USB Audio "))
++ return true;
++ break;
++ case USB_VENDOR_ID_SOUNDGRAPH:
++ if (hdev->product >= USB_DEVICE_ID_SOUNDGRAPH_IMON_FIRST &&
++ hdev->product <= USB_DEVICE_ID_SOUNDGRAPH_IMON_LAST)
++ return true;
++ break;
++ case USB_VENDOR_ID_HANWANG:
++ if (hdev->product >= USB_DEVICE_ID_HANWANG_TABLET_FIRST &&
++ hdev->product <= USB_DEVICE_ID_HANWANG_TABLET_LAST)
++ return true;
++ break;
++ case USB_VENDOR_ID_JESS:
++ if (hdev->product == USB_DEVICE_ID_JESS_YUREX &&
++ hdev->type == HID_TYPE_USBNONE)
++ return true;
++ break;
++ case USB_VENDOR_ID_VELLEMAN:
++ /* These are not HID devices. They are handled by comedi. */
++ if ((hdev->product >= USB_DEVICE_ID_VELLEMAN_K8055_FIRST &&
++ hdev->product <= USB_DEVICE_ID_VELLEMAN_K8055_LAST) ||
++ (hdev->product >= USB_DEVICE_ID_VELLEMAN_K8061_FIRST &&
++ hdev->product <= USB_DEVICE_ID_VELLEMAN_K8061_LAST))
++ return true;
++ break;
++ case USB_VENDOR_ID_ATMEL_V_USB:
++ /* Masterkit MA901 usb radio based on Atmel tiny85 chip and
++ * it has the same USB ID as many Atmel V-USB devices. This
++ * usb radio is handled by radio-ma901.c driver so we want
++ * ignore the hid. Check the name, bus, product and ignore
++ * if we have MA901 usb radio.
++ */
++ if (hdev->product == USB_DEVICE_ID_ATMEL_V_USB &&
++ hdev->bus == BUS_USB &&
++ strncmp(hdev->name, "www.masterkit.ru MA901", 22) == 0)
++ return true;
++ break;
++ }
++
++ if (hdev->type == HID_TYPE_USBMOUSE &&
++ hid_match_id(hdev, hid_mouse_ignore_list))
++ return true;
++
++ return !!hid_match_id(hdev, hid_ignore_list);
++}
++EXPORT_SYMBOL_GPL(hid_ignore);
++
++int hid_add_device(struct hid_device *hdev)
++{
++ static atomic_t id = ATOMIC_INIT(0);
++ int ret;
++
++ if (WARN_ON(hdev->status & HID_STAT_ADDED))
++ return -EBUSY;
++
++ /* we need to kill them here, otherwise they will stay allocated to
++ * wait for coming driver */
++ if (hid_ignore(hdev))
++ return -ENODEV;
++
++ /*
++ * Read the device report descriptor once and use as template
++ * for the driver-specific modifications.
++ */
++ ret = hdev->ll_driver->parse(hdev);
++ if (ret)
++ return ret;
++ if (!hdev->dev_rdesc)
++ return -ENODEV;
++
++ /*
++ * Scan generic devices for group information
++ */
++ if (hid_ignore_special_drivers ||
++ !hid_match_id(hdev, hid_have_special_driver)) {
++ ret = hid_scan_report(hdev);
++ if (ret)
++ hid_warn(hdev, "bad device descriptor (%d)\n", ret);
++ }
++
++ /* XXX hack, any other cleaner solution after the driver core
++ * is converted to allow more than 20 bytes as the device name? */
++ dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
++ hdev->vendor, hdev->product, atomic_inc_return(&id));
++
++ hid_debug_register(hdev, dev_name(&hdev->dev));
++ ret = device_add(&hdev->dev);
++ if (!ret)
++ hdev->status |= HID_STAT_ADDED;
++ else
++ hid_debug_unregister(hdev);
++
++ return ret;
++}
++EXPORT_SYMBOL_GPL(hid_add_device);
++
++/**
++ * hid_allocate_device - allocate new hid device descriptor
++ *
++ * Allocate and initialize hid device, so that hid_destroy_device might be
++ * used to free it.
++ *
++ * New hid_device pointer is returned on success, otherwise ERR_PTR encoded
++ * error value.
++ */
++struct hid_device *hid_allocate_device(void)
++{
++ struct hid_device *hdev;
++ int ret = -ENOMEM;
++
++ hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
++ if (hdev == NULL)
++ return ERR_PTR(ret);
++
++ device_initialize(&hdev->dev);
++ hdev->dev.release = hid_device_release;
++ hdev->dev.bus = &hid_bus_type;
++
++ hid_close_report(hdev);
++
++ init_waitqueue_head(&hdev->debug_wait);
++ INIT_LIST_HEAD(&hdev->debug_list);
++ spin_lock_init(&hdev->debug_list_lock);
++ sema_init(&hdev->driver_lock, 1);
++ sema_init(&hdev->driver_input_lock, 1);
++
++ return hdev;
++}
++EXPORT_SYMBOL_GPL(hid_allocate_device);
++
++static void hid_remove_device(struct hid_device *hdev)
++{
++ if (hdev->status & HID_STAT_ADDED) {
++ device_del(&hdev->dev);
++ hid_debug_unregister(hdev);
++ hdev->status &= ~HID_STAT_ADDED;
++ }
++ kfree(hdev->dev_rdesc);
++ hdev->dev_rdesc = NULL;
++ hdev->dev_rsize = 0;
++}
++
++/**
++ * hid_destroy_device - free previously allocated device
++ *
++ * @hdev: hid device
++ *
++ * If you allocate hid_device through hid_allocate_device, you should ever
++ * free by this function.
++ */
++void hid_destroy_device(struct hid_device *hdev)
++{
++ hid_remove_device(hdev);
++ put_device(&hdev->dev);
++}
++EXPORT_SYMBOL_GPL(hid_destroy_device);
++
++int __hid_register_driver(struct hid_driver *hdrv, struct module *owner,
++ const char *mod_name)
++{
++ int ret;
++
++ hdrv->driver.name = hdrv->name;
++ hdrv->driver.bus = &hid_bus_type;
++ hdrv->driver.owner = owner;
++ hdrv->driver.mod_name = mod_name;
++
++ INIT_LIST_HEAD(&hdrv->dyn_list);
++ spin_lock_init(&hdrv->dyn_lock);
++
++ ret = driver_register(&hdrv->driver);
++ if (ret)
++ return ret;
++
++ ret = driver_create_file(&hdrv->driver, &driver_attr_new_id);
++ if (ret)
++ driver_unregister(&hdrv->driver);
++
++ return ret;
++}
++EXPORT_SYMBOL_GPL(__hid_register_driver);
++
++void hid_unregister_driver(struct hid_driver *hdrv)
++{
++ driver_remove_file(&hdrv->driver, &driver_attr_new_id);
++ driver_unregister(&hdrv->driver);
++ hid_free_dynids(hdrv);
++}
++EXPORT_SYMBOL_GPL(hid_unregister_driver);
++
++int hid_check_keys_pressed(struct hid_device *hid)
++{
++ struct hid_input *hidinput;
++ int i;
++
++ if (!(hid->claimed & HID_CLAIMED_INPUT))
++ return 0;
++
++ list_for_each_entry(hidinput, &hid->inputs, list) {
++ for (i = 0; i < BITS_TO_LONGS(KEY_MAX); i++)
++ if (hidinput->input->key[i])
++ return 1;
++ }
++
++ return 0;
++}
++
++EXPORT_SYMBOL_GPL(hid_check_keys_pressed);
++
++static int __init hid_init(void)
++{
++ int ret;
++
++ if (hid_debug)
++ pr_warn("hid_debug is now used solely for parser and driver debugging.\n"
++ "debugfs is now used for inspecting the device (report descriptor, reports)\n");
++
++ ret = bus_register(&hid_bus_type);
++ if (ret) {
++ pr_err("can't register hid bus\n");
++ goto err;
++ }
++
++ ret = hidraw_init();
++ if (ret)
++ goto err_bus;
++
++ hid_debug_init();
++
++ return 0;
++err_bus:
++ bus_unregister(&hid_bus_type);
++err:
++ return ret;
++}
++
++static void __exit hid_exit(void)
++{
++ hid_debug_exit();
++ hidraw_exit();
++ bus_unregister(&hid_bus_type);
++}
++
++module_init(hid_init);
++module_exit(hid_exit);
++
++MODULE_AUTHOR("Andreas Gal");
++MODULE_AUTHOR("Vojtech Pavlik");
++MODULE_AUTHOR("Jiri Kosina");
++MODULE_LICENSE(DRIVER_LICENSE);
++
+diff -Nur linux-3.14.36/drivers/hid/hid-ids.h linux-openelec/drivers/hid/hid-ids.h
+--- linux-3.14.36/drivers/hid/hid-ids.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/hid/hid-ids.h 2015-07-24 18:03:30.048842002 -0500
+@@ -697,6 +697,9 @@
+ #define USB_DEVICE_ID_ORTEK_PKB1700 0x1700
+ #define USB_DEVICE_ID_ORTEK_WKB2000 0x2000
+
++#define USB_VENDOR_ID_OUYA 0x2836
++#define USB_DEVICE_ID_OUYA_CONTROLLER 0x0001
++
+ #define USB_VENDOR_ID_PANASONIC 0x04da
+ #define USB_DEVICE_ID_PANABOARD_UBT780 0x1044
+ #define USB_DEVICE_ID_PANABOARD_UBT880 0x104d
+@@ -714,6 +717,9 @@
+
+ #define USB_VENDOR_ID_PHILIPS 0x0471
+ #define USB_DEVICE_ID_PHILIPS_IEEE802154_DONGLE 0x0617
++#define USB_DEVICE_ID_PHILIPS_SPINEL_PLUS_1 0x206c
++#define USB_DEVICE_ID_PHILIPS_SPINEL_PLUS_2 0x20cc
++#define USB_DEVICE_ID_PHILIPS_SPINEL_PLUS_3 0x0613
+
+ #define USB_VENDOR_ID_PI_ENGINEERING 0x05f3
+ #define USB_DEVICE_ID_PI_ENGINEERING_VEC_USB_FOOTPEDAL 0xff
+@@ -784,6 +790,7 @@
+ #define USB_VENDOR_ID_SKYCABLE 0x1223
+ #define USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER 0x3F07
+
++#define USB_VENDOR_ID_SMK 0x0609
+ #define USB_VENDOR_ID_SONY 0x054c
+ #define USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE 0x024b
+ #define USB_DEVICE_ID_SONY_VAIO_VGP_MOUSE 0x0374
+@@ -844,6 +851,7 @@
+ #define USB_VENDOR_ID_TIVO 0x150a
+ #define USB_DEVICE_ID_TIVO_SLIDE_BT 0x1200
+ #define USB_DEVICE_ID_TIVO_SLIDE 0x1201
++#define USB_DEVICE_ID_TIVO_SLIDE_PRO 0x1203
+
+ #define USB_VENDOR_ID_TOPSEED 0x0766
+ #define USB_DEVICE_ID_TOPSEED_CYBERLINK 0x0204
+diff -Nur linux-3.14.36/drivers/hid/hid-ids.h.orig linux-openelec/drivers/hid/hid-ids.h.orig
+--- linux-3.14.36/drivers/hid/hid-ids.h.orig 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/hid/hid-ids.h.orig 2015-07-24 18:03:29.980842002 -0500
+@@ -0,0 +1,976 @@
++/*
++ * USB HID quirks support for Linux
++ *
++ * Copyright (c) 1999 Andreas Gal
++ * Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz>
++ * Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc
++ * Copyright (c) 2006-2007 Jiri Kosina
++ */
++
++/*
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License as published by the Free
++ * Software Foundation; either version 2 of the License, or (at your option)
++ * any later version.
++ */
++
++#ifndef HID_IDS_H_FILE
++#define HID_IDS_H_FILE
++
++#define USB_VENDOR_ID_3M 0x0596
++#define USB_DEVICE_ID_3M1968 0x0500
++#define USB_DEVICE_ID_3M2256 0x0502
++#define USB_DEVICE_ID_3M3266 0x0506
++
++#define USB_VENDOR_ID_A4TECH 0x09da
++#define USB_DEVICE_ID_A4TECH_WCP32PU 0x0006
++#define USB_DEVICE_ID_A4TECH_X5_005D 0x000a
++#define USB_DEVICE_ID_A4TECH_RP_649 0x001a
++
++#define USB_VENDOR_ID_AASHIMA 0x06d6
++#define USB_DEVICE_ID_AASHIMA_GAMEPAD 0x0025
++#define USB_DEVICE_ID_AASHIMA_PREDATOR 0x0026
++
++#define USB_VENDOR_ID_ACECAD 0x0460
++#define USB_DEVICE_ID_ACECAD_FLAIR 0x0004
++#define USB_DEVICE_ID_ACECAD_302 0x0008
++
++#define USB_VENDOR_ID_ACRUX 0x1a34
++
++#define USB_VENDOR_ID_ACTIONSTAR 0x2101
++#define USB_DEVICE_ID_ACTIONSTAR_1011 0x1011
++
++#define USB_VENDOR_ID_ADS_TECH 0x06e1
++#define USB_DEVICE_ID_ADS_TECH_RADIO_SI470X 0xa155
++
++#define USB_VENDOR_ID_AFATECH 0x15a4
++#define USB_DEVICE_ID_AFATECH_AF9016 0x9016
++
++#define USB_VENDOR_ID_AIPTEK 0x08ca
++#define USB_DEVICE_ID_AIPTEK_01 0x0001
++#define USB_DEVICE_ID_AIPTEK_10 0x0010
++#define USB_DEVICE_ID_AIPTEK_20 0x0020
++#define USB_DEVICE_ID_AIPTEK_21 0x0021
++#define USB_DEVICE_ID_AIPTEK_22 0x0022
++#define USB_DEVICE_ID_AIPTEK_23 0x0023
++#define USB_DEVICE_ID_AIPTEK_24 0x0024
++
++#define USB_VENDOR_ID_AIRCABLE 0x16CA
++#define USB_DEVICE_ID_AIRCABLE1 0x1502
++
++#define USB_VENDOR_ID_AIREN 0x1a2c
++#define USB_DEVICE_ID_AIREN_SLIMPLUS 0x0002
++
++#define USB_VENDOR_ID_ALCOR 0x058f
++#define USB_DEVICE_ID_ALCOR_USBRS232 0x9720
++
++#define USB_VENDOR_ID_ALPS 0x0433
++#define USB_DEVICE_ID_IBM_GAMEPAD 0x1101
++
++#define USB_VENDOR_ID_APPLE 0x05ac
++#define USB_DEVICE_ID_APPLE_MIGHTYMOUSE 0x0304
++#define USB_DEVICE_ID_APPLE_MAGICMOUSE 0x030d
++#define USB_DEVICE_ID_APPLE_MAGICTRACKPAD 0x030e
++#define USB_DEVICE_ID_APPLE_FOUNTAIN_ANSI 0x020e
++#define USB_DEVICE_ID_APPLE_FOUNTAIN_ISO 0x020f
++#define USB_DEVICE_ID_APPLE_GEYSER_ANSI 0x0214
++#define USB_DEVICE_ID_APPLE_GEYSER_ISO 0x0215
++#define USB_DEVICE_ID_APPLE_GEYSER_JIS 0x0216
++#define USB_DEVICE_ID_APPLE_GEYSER3_ANSI 0x0217
++#define USB_DEVICE_ID_APPLE_GEYSER3_ISO 0x0218
++#define USB_DEVICE_ID_APPLE_GEYSER3_JIS 0x0219
++#define USB_DEVICE_ID_APPLE_GEYSER4_ANSI 0x021a
++#define USB_DEVICE_ID_APPLE_GEYSER4_ISO 0x021b
++#define USB_DEVICE_ID_APPLE_GEYSER4_JIS 0x021c
++#define USB_DEVICE_ID_APPLE_ALU_MINI_ANSI 0x021d
++#define USB_DEVICE_ID_APPLE_ALU_MINI_ISO 0x021e
++#define USB_DEVICE_ID_APPLE_ALU_MINI_JIS 0x021f
++#define USB_DEVICE_ID_APPLE_ALU_ANSI 0x0220
++#define USB_DEVICE_ID_APPLE_ALU_ISO 0x0221
++#define USB_DEVICE_ID_APPLE_ALU_JIS 0x0222
++#define USB_DEVICE_ID_APPLE_WELLSPRING_ANSI 0x0223
++#define USB_DEVICE_ID_APPLE_WELLSPRING_ISO 0x0224
++#define USB_DEVICE_ID_APPLE_WELLSPRING_JIS 0x0225
++#define USB_DEVICE_ID_APPLE_GEYSER4_HF_ANSI 0x0229
++#define USB_DEVICE_ID_APPLE_GEYSER4_HF_ISO 0x022a
++#define USB_DEVICE_ID_APPLE_GEYSER4_HF_JIS 0x022b
++#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI 0x022c
++#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_ISO 0x022d
++#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_JIS 0x022e
++#define USB_DEVICE_ID_APPLE_WELLSPRING2_ANSI 0x0230
++#define USB_DEVICE_ID_APPLE_WELLSPRING2_ISO 0x0231
++#define USB_DEVICE_ID_APPLE_WELLSPRING2_JIS 0x0232
++#define USB_DEVICE_ID_APPLE_WELLSPRING3_ANSI 0x0236
++#define USB_DEVICE_ID_APPLE_WELLSPRING3_ISO 0x0237
++#define USB_DEVICE_ID_APPLE_WELLSPRING3_JIS 0x0238
++#define USB_DEVICE_ID_APPLE_WELLSPRING4_ANSI 0x023f
++#define USB_DEVICE_ID_APPLE_WELLSPRING4_ISO 0x0240
++#define USB_DEVICE_ID_APPLE_WELLSPRING4_JIS 0x0241
++#define USB_DEVICE_ID_APPLE_WELLSPRING4A_ANSI 0x0242
++#define USB_DEVICE_ID_APPLE_WELLSPRING4A_ISO 0x0243
++#define USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS 0x0244
++#define USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI 0x0245
++#define USB_DEVICE_ID_APPLE_WELLSPRING5_ISO 0x0246
++#define USB_DEVICE_ID_APPLE_WELLSPRING5_JIS 0x0247
++#define USB_DEVICE_ID_APPLE_ALU_REVB_ANSI 0x024f
++#define USB_DEVICE_ID_APPLE_ALU_REVB_ISO 0x0250
++#define USB_DEVICE_ID_APPLE_ALU_REVB_JIS 0x0251
++#define USB_DEVICE_ID_APPLE_WELLSPRING5A_ANSI 0x0252
++#define USB_DEVICE_ID_APPLE_WELLSPRING5A_ISO 0x0253
++#define USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS 0x0254
++#define USB_DEVICE_ID_APPLE_WELLSPRING7A_ANSI 0x0259
++#define USB_DEVICE_ID_APPLE_WELLSPRING7A_ISO 0x025a
++#define USB_DEVICE_ID_APPLE_WELLSPRING7A_JIS 0x025b
++#define USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI 0x0249
++#define USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO 0x024a
++#define USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS 0x024b
++#define USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI 0x024c
++#define USB_DEVICE_ID_APPLE_WELLSPRING6_ISO 0x024d
++#define USB_DEVICE_ID_APPLE_WELLSPRING6_JIS 0x024e
++#define USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI 0x0262
++#define USB_DEVICE_ID_APPLE_WELLSPRING7_ISO 0x0263
++#define USB_DEVICE_ID_APPLE_WELLSPRING7_JIS 0x0264
++#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI 0x0239
++#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO 0x023a
++#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS 0x023b
++#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI 0x0255
++#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO 0x0256
++#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_JIS 0x0257
++#define USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI 0x0290
++#define USB_DEVICE_ID_APPLE_WELLSPRING8_ISO 0x0291
++#define USB_DEVICE_ID_APPLE_WELLSPRING8_JIS 0x0292
++#define USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY 0x030a
++#define USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY 0x030b
++#define USB_DEVICE_ID_APPLE_IRCONTROL 0x8240
++#define USB_DEVICE_ID_APPLE_IRCONTROL2 0x1440
++#define USB_DEVICE_ID_APPLE_IRCONTROL3 0x8241
++#define USB_DEVICE_ID_APPLE_IRCONTROL4 0x8242
++#define USB_DEVICE_ID_APPLE_IRCONTROL5 0x8243
++
++#define USB_VENDOR_ID_ASUS 0x0486
++#define USB_DEVICE_ID_ASUS_T91MT 0x0185
++#define USB_DEVICE_ID_ASUSTEK_MULTITOUCH_YFO 0x0186
++
++#define USB_VENDOR_ID_ASUSTEK 0x0b05
++#define USB_DEVICE_ID_ASUSTEK_LCM 0x1726
++#define USB_DEVICE_ID_ASUSTEK_LCM2 0x175b
++
++#define USB_VENDOR_ID_ATEN 0x0557
++#define USB_DEVICE_ID_ATEN_UC100KM 0x2004
++#define USB_DEVICE_ID_ATEN_CS124U 0x2202
++#define USB_DEVICE_ID_ATEN_2PORTKVM 0x2204
++#define USB_DEVICE_ID_ATEN_4PORTKVM 0x2205
++#define USB_DEVICE_ID_ATEN_4PORTKVMC 0x2208
++
++#define USB_VENDOR_ID_ATMEL 0x03eb
++#define USB_DEVICE_ID_ATMEL_MULTITOUCH 0x211c
++#define USB_DEVICE_ID_ATMEL_MXT_DIGITIZER 0x2118
++#define USB_VENDOR_ID_ATMEL_V_USB 0x16c0
++#define USB_DEVICE_ID_ATMEL_V_USB 0x05df
++
++#define USB_VENDOR_ID_AUREAL 0x0755
++#define USB_DEVICE_ID_AUREAL_W01RN 0x2626
++
++#define USB_VENDOR_ID_AVERMEDIA 0x07ca
++#define USB_DEVICE_ID_AVER_FM_MR800 0xb800
++
++#define USB_VENDOR_ID_AXENTIA 0x12cf
++#define USB_DEVICE_ID_AXENTIA_FM_RADIO 0x7111
++
++#define USB_VENDOR_ID_BAANTO 0x2453
++#define USB_DEVICE_ID_BAANTO_MT_190W2 0x0100
++
++#define USB_VENDOR_ID_BELKIN 0x050d
++#define USB_DEVICE_ID_FLIP_KVM 0x3201
++
++#define USB_VENDOR_ID_BERKSHIRE 0x0c98
++#define USB_DEVICE_ID_BERKSHIRE_PCWD 0x1140
++
++#define USB_VENDOR_ID_BTC 0x046e
++#define USB_DEVICE_ID_BTC_EMPREX_REMOTE 0x5578
++#define USB_DEVICE_ID_BTC_EMPREX_REMOTE_2 0x5577
++
++#define USB_VENDOR_ID_CANDO 0x2087
++#define USB_DEVICE_ID_CANDO_PIXCIR_MULTI_TOUCH 0x0703
++#define USB_DEVICE_ID_CANDO_MULTI_TOUCH 0x0a01
++#define USB_DEVICE_ID_CANDO_MULTI_TOUCH_10_1 0x0a02
++#define USB_DEVICE_ID_CANDO_MULTI_TOUCH_11_6 0x0b03
++#define USB_DEVICE_ID_CANDO_MULTI_TOUCH_15_6 0x0f01
++
++#define USB_VENDOR_ID_CH 0x068e
++#define USB_DEVICE_ID_CH_PRO_THROTTLE 0x00f1
++#define USB_DEVICE_ID_CH_PRO_PEDALS 0x00f2
++#define USB_DEVICE_ID_CH_FIGHTERSTICK 0x00f3
++#define USB_DEVICE_ID_CH_COMBATSTICK 0x00f4
++#define USB_DEVICE_ID_CH_FLIGHT_SIM_ECLIPSE_YOKE 0x0051
++#define USB_DEVICE_ID_CH_FLIGHT_SIM_YOKE 0x00ff
++#define USB_DEVICE_ID_CH_3AXIS_5BUTTON_STICK 0x00d3
++#define USB_DEVICE_ID_CH_AXIS_295 0x001c
++
++#define USB_VENDOR_ID_CHERRY 0x046a
++#define USB_DEVICE_ID_CHERRY_CYMOTION 0x0023
++#define USB_DEVICE_ID_CHERRY_CYMOTION_SOLAR 0x0027
++
++#define USB_VENDOR_ID_CHIC 0x05fe
++#define USB_DEVICE_ID_CHIC_GAMEPAD 0x0014
++
++#define USB_VENDOR_ID_CHICONY 0x04f2
++#define USB_DEVICE_ID_CHICONY_TACTICAL_PAD 0x0418
++#define USB_DEVICE_ID_CHICONY_MULTI_TOUCH 0xb19d
++#define USB_DEVICE_ID_CHICONY_WIRELESS 0x0618
++#define USB_DEVICE_ID_CHICONY_WIRELESS2 0x1123
++#define USB_DEVICE_ID_CHICONY_AK1D 0x1125
++
++#define USB_VENDOR_ID_CHUNGHWAT 0x2247
++#define USB_DEVICE_ID_CHUNGHWAT_MULTITOUCH 0x0001
++
++#define USB_VENDOR_ID_CIDC 0x1677
++
++#define USB_VENDOR_ID_CMEDIA 0x0d8c
++#define USB_DEVICE_ID_CM109 0x000e
++
++#define USB_VENDOR_ID_CODEMERCS 0x07c0
++#define USB_DEVICE_ID_CODEMERCS_IOW_FIRST 0x1500
++#define USB_DEVICE_ID_CODEMERCS_IOW_LAST 0x15ff
++
++#define USB_VENDOR_ID_CREATIVELABS 0x041e
++#define USB_DEVICE_ID_PRODIKEYS_PCMIDI 0x2801
++
++#define USB_VENDOR_ID_CVTOUCH 0x1ff7
++#define USB_DEVICE_ID_CVTOUCH_SCREEN 0x0013
++
++#define USB_VENDOR_ID_CYGNAL 0x10c4
++#define USB_DEVICE_ID_CYGNAL_RADIO_SI470X 0x818a
++#define USB_DEVICE_ID_FOCALTECH_FTXXXX_MULTITOUCH 0x81b9
++
++#define USB_DEVICE_ID_CYGNAL_RADIO_SI4713 0x8244
++
++#define USB_VENDOR_ID_CYPRESS 0x04b4
++#define USB_DEVICE_ID_CYPRESS_MOUSE 0x0001
++#define USB_DEVICE_ID_CYPRESS_HIDCOM 0x5500
++#define USB_DEVICE_ID_CYPRESS_ULTRAMOUSE 0x7417
++#define USB_DEVICE_ID_CYPRESS_BARCODE_1 0xde61
++#define USB_DEVICE_ID_CYPRESS_BARCODE_2 0xde64
++#define USB_DEVICE_ID_CYPRESS_BARCODE_3 0xbca1
++#define USB_DEVICE_ID_CYPRESS_BARCODE_4 0xed81
++#define USB_DEVICE_ID_CYPRESS_TRUETOUCH 0xc001
++
++#define USB_VENDOR_ID_DATA_MODUL 0x7374
++#define USB_VENDOR_ID_DATA_MODUL_EASYMAXTOUCH 0x1201
++
++#define USB_VENDOR_ID_DEALEXTREAME 0x10c5
++#define USB_DEVICE_ID_DEALEXTREAME_RADIO_SI4701 0x819a
++
++#define USB_VENDOR_ID_DELORME 0x1163
++#define USB_DEVICE_ID_DELORME_EARTHMATE 0x0100
++#define USB_DEVICE_ID_DELORME_EM_LT20 0x0200
++
++#define USB_VENDOR_ID_DMI 0x0c0b
++#define USB_DEVICE_ID_DMI_ENC 0x5fab
++
++#define USB_VENDOR_ID_DRAGONRISE 0x0079
++
++#define USB_VENDOR_ID_DWAV 0x0eef
++#define USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER 0x0001
++#define USB_DEVICE_ID_DWAV_TOUCHCONTROLLER 0x0002
++#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_480D 0x480d
++#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_480E 0x480e
++#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_7207 0x7207
++#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_720C 0x720c
++#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_7224 0x7224
++#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_722A 0x722A
++#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_725E 0x725e
++#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_7262 0x7262
++#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_726B 0x726b
++#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_72A1 0x72a1
++#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_72AA 0x72aa
++#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_72C4 0x72c4
++#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_72D0 0x72d0
++#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_72FA 0x72fa
++#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_7302 0x7302
++#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_7349 0x7349
++#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_73F7 0x73f7
++#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_A001 0xa001
++
++#define USB_VENDOR_ID_ELAN 0x04f3
++#define USB_DEVICE_ID_ELAN_TOUCHSCREEN 0x0089
++#define USB_DEVICE_ID_ELAN_TOUCHSCREEN_009B 0x009b
++#define USB_DEVICE_ID_ELAN_TOUCHSCREEN_016F 0x016f
++
++#define USB_VENDOR_ID_ELECOM 0x056e
++#define USB_DEVICE_ID_ELECOM_BM084 0x0061
++
++#define USB_VENDOR_ID_DREAM_CHEEKY 0x1d34
++
++#define USB_VENDOR_ID_ELO 0x04E7
++#define USB_DEVICE_ID_ELO_TS2515 0x0022
++#define USB_DEVICE_ID_ELO_TS2700 0x0020
++
++#define USB_VENDOR_ID_EMS 0x2006
++#define USB_DEVICE_ID_EMS_TRIO_LINKER_PLUS_II 0x0118
++
++#define USB_VENDOR_ID_FLATFROG 0x25b5
++#define USB_DEVICE_ID_MULTITOUCH_3200 0x0002
++
++#define USB_VENDOR_ID_ESSENTIAL_REALITY 0x0d7f
++#define USB_DEVICE_ID_ESSENTIAL_REALITY_P5 0x0100
++
++#define USB_VENDOR_ID_ETT 0x0664
++#define USB_DEVICE_ID_TC5UH 0x0309
++#define USB_DEVICE_ID_TC4UM 0x0306
++
++#define USB_VENDOR_ID_ETURBOTOUCH 0x22b9
++#define USB_DEVICE_ID_ETURBOTOUCH 0x0006
++
++#define USB_VENDOR_ID_EZKEY 0x0518
++#define USB_DEVICE_ID_BTC_8193 0x0002
++
++#define USB_VENDOR_ID_FORMOSA 0x147a
++#define USB_DEVICE_ID_FORMOSA_IR_RECEIVER 0xe03e
++
++#define USB_VENDOR_ID_FREESCALE 0x15A2
++#define USB_DEVICE_ID_FREESCALE_MX28 0x004F
++
++#define USB_VENDOR_ID_FRUCTEL 0x25B6
++#define USB_DEVICE_ID_GAMETEL_MT_MODE 0x0002
++
++#define USB_VENDOR_ID_GAMERON 0x0810
++#define USB_DEVICE_ID_GAMERON_DUAL_PSX_ADAPTOR 0x0001
++#define USB_DEVICE_ID_GAMERON_DUAL_PCS_ADAPTOR 0x0002
++
++#define USB_VENDOR_ID_GENERAL_TOUCH 0x0dfc
++#define USB_DEVICE_ID_GENERAL_TOUCH_WIN7_TWOFINGERS 0x0003
++#define USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PWT_TENFINGERS 0x0100
++#define USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_0101 0x0101
++#define USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_0102 0x0102
++#define USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_0106 0x0106
++#define USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_010A 0x010a
++#define USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_E100 0xe100
++
++#define USB_VENDOR_ID_GLAB 0x06c2
++#define USB_DEVICE_ID_4_PHIDGETSERVO_30 0x0038
++#define USB_DEVICE_ID_1_PHIDGETSERVO_30 0x0039
++#define USB_DEVICE_ID_0_0_4_IF_KIT 0x0040
++#define USB_DEVICE_ID_0_16_16_IF_KIT 0x0044
++#define USB_DEVICE_ID_8_8_8_IF_KIT 0x0045
++#define USB_DEVICE_ID_0_8_7_IF_KIT 0x0051
++#define USB_DEVICE_ID_0_8_8_IF_KIT 0x0053
++#define USB_DEVICE_ID_PHIDGET_MOTORCONTROL 0x0058
++
++#define USB_VENDOR_ID_GOODTOUCH 0x1aad
++#define USB_DEVICE_ID_GOODTOUCH_000f 0x000f
++
++#define USB_VENDOR_ID_GOTOP 0x08f2
++#define USB_DEVICE_ID_SUPER_Q2 0x007f
++#define USB_DEVICE_ID_GOGOPEN 0x00ce
++#define USB_DEVICE_ID_PENPOWER 0x00f4
++
++#define USB_VENDOR_ID_GREENASIA 0x0e8f
++#define USB_DEVICE_ID_GREENASIA_DUAL_USB_JOYPAD 0x3013
++
++#define USB_VENDOR_ID_GRETAGMACBETH 0x0971
++#define USB_DEVICE_ID_GRETAGMACBETH_HUEY 0x2005
++
++#define USB_VENDOR_ID_GRIFFIN 0x077d
++#define USB_DEVICE_ID_POWERMATE 0x0410
++#define USB_DEVICE_ID_SOUNDKNOB 0x04AA
++#define USB_DEVICE_ID_RADIOSHARK 0x627a
++
++#define USB_VENDOR_ID_GTCO 0x078c
++#define USB_DEVICE_ID_GTCO_90 0x0090
++#define USB_DEVICE_ID_GTCO_100 0x0100
++#define USB_DEVICE_ID_GTCO_101 0x0101
++#define USB_DEVICE_ID_GTCO_103 0x0103
++#define USB_DEVICE_ID_GTCO_104 0x0104
++#define USB_DEVICE_ID_GTCO_105 0x0105
++#define USB_DEVICE_ID_GTCO_106 0x0106
++#define USB_DEVICE_ID_GTCO_107 0x0107
++#define USB_DEVICE_ID_GTCO_108 0x0108
++#define USB_DEVICE_ID_GTCO_200 0x0200
++#define USB_DEVICE_ID_GTCO_201 0x0201
++#define USB_DEVICE_ID_GTCO_202 0x0202
++#define USB_DEVICE_ID_GTCO_203 0x0203
++#define USB_DEVICE_ID_GTCO_204 0x0204
++#define USB_DEVICE_ID_GTCO_205 0x0205
++#define USB_DEVICE_ID_GTCO_206 0x0206
++#define USB_DEVICE_ID_GTCO_207 0x0207
++#define USB_DEVICE_ID_GTCO_300 0x0300
++#define USB_DEVICE_ID_GTCO_301 0x0301
++#define USB_DEVICE_ID_GTCO_302 0x0302
++#define USB_DEVICE_ID_GTCO_303 0x0303
++#define USB_DEVICE_ID_GTCO_304 0x0304
++#define USB_DEVICE_ID_GTCO_305 0x0305
++#define USB_DEVICE_ID_GTCO_306 0x0306
++#define USB_DEVICE_ID_GTCO_307 0x0307
++#define USB_DEVICE_ID_GTCO_308 0x0308
++#define USB_DEVICE_ID_GTCO_309 0x0309
++#define USB_DEVICE_ID_GTCO_400 0x0400
++#define USB_DEVICE_ID_GTCO_401 0x0401
++#define USB_DEVICE_ID_GTCO_402 0x0402
++#define USB_DEVICE_ID_GTCO_403 0x0403
++#define USB_DEVICE_ID_GTCO_404 0x0404
++#define USB_DEVICE_ID_GTCO_405 0x0405
++#define USB_DEVICE_ID_GTCO_500 0x0500
++#define USB_DEVICE_ID_GTCO_501 0x0501
++#define USB_DEVICE_ID_GTCO_502 0x0502
++#define USB_DEVICE_ID_GTCO_503 0x0503
++#define USB_DEVICE_ID_GTCO_504 0x0504
++#define USB_DEVICE_ID_GTCO_1000 0x1000
++#define USB_DEVICE_ID_GTCO_1001 0x1001
++#define USB_DEVICE_ID_GTCO_1002 0x1002
++#define USB_DEVICE_ID_GTCO_1003 0x1003
++#define USB_DEVICE_ID_GTCO_1004 0x1004
++#define USB_DEVICE_ID_GTCO_1005 0x1005
++#define USB_DEVICE_ID_GTCO_1006 0x1006
++#define USB_DEVICE_ID_GTCO_1007 0x1007
++
++#define USB_VENDOR_ID_GYRATION 0x0c16
++#define USB_DEVICE_ID_GYRATION_REMOTE 0x0002
++#define USB_DEVICE_ID_GYRATION_REMOTE_2 0x0003
++#define USB_DEVICE_ID_GYRATION_REMOTE_3 0x0008
++
++#define USB_VENDOR_ID_HANWANG 0x0b57
++#define USB_DEVICE_ID_HANWANG_TABLET_FIRST 0x5000
++#define USB_DEVICE_ID_HANWANG_TABLET_LAST 0x8fff
++
++#define USB_VENDOR_ID_HANVON 0x20b3
++#define USB_DEVICE_ID_HANVON_MULTITOUCH 0x0a18
++
++#define USB_VENDOR_ID_HANVON_ALT 0x22ed
++#define USB_DEVICE_ID_HANVON_ALT_MULTITOUCH 0x1010
++
++#define USB_VENDOR_ID_HAPP 0x078b
++#define USB_DEVICE_ID_UGCI_DRIVING 0x0010
++#define USB_DEVICE_ID_UGCI_FLYING 0x0020
++#define USB_DEVICE_ID_UGCI_FIGHTING 0x0030
++
++#define USB_VENDOR_ID_HUION 0x256c
++#define USB_DEVICE_ID_HUION_580 0x006e
++
++#define USB_VENDOR_ID_IDEACOM 0x1cb6
++#define USB_DEVICE_ID_IDEACOM_IDC6650 0x6650
++#define USB_DEVICE_ID_IDEACOM_IDC6651 0x6651
++
++#define USB_VENDOR_ID_ILITEK 0x222a
++#define USB_DEVICE_ID_ILITEK_MULTITOUCH 0x0001
++
++#define USB_VENDOR_ID_INTEL_0 0x8086
++#define USB_VENDOR_ID_INTEL_1 0x8087
++#define USB_DEVICE_ID_INTEL_HID_SENSOR 0x09fa
++
++#define USB_VENDOR_ID_STM_0 0x0483
++#define USB_DEVICE_ID_STM_HID_SENSOR 0x91d1
++
++#define USB_VENDOR_ID_ION 0x15e4
++#define USB_DEVICE_ID_ICADE 0x0132
++
++#define USB_VENDOR_ID_HOLTEK 0x1241
++#define USB_DEVICE_ID_HOLTEK_ON_LINE_GRIP 0x5015
++
++#define USB_VENDOR_ID_HOLTEK_ALT 0x04d9
++#define USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD 0xa055
++#define USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A04A 0xa04a
++#define USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A067 0xa067
++#define USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A070 0xa070
++#define USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A072 0xa072
++#define USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081 0xa081
++
++#define USB_VENDOR_ID_IMATION 0x0718
++#define USB_DEVICE_ID_DISC_STAKKA 0xd000
++
++#define USB_VENDOR_ID_IRTOUCHSYSTEMS 0x6615
++#define USB_DEVICE_ID_IRTOUCH_INFRARED_USB 0x0070
++
++#define USB_VENDOR_ID_JABRA 0x0b0e
++#define USB_DEVICE_ID_JABRA_SPEAK_410 0x0412
++#define USB_DEVICE_ID_JABRA_SPEAK_510 0x0420
++
++#define USB_VENDOR_ID_JESS 0x0c45
++#define USB_DEVICE_ID_JESS_YUREX 0x1010
++
++#define USB_VENDOR_ID_JESS2 0x0f30
++#define USB_DEVICE_ID_JESS2_COLOR_RUMBLE_PAD 0x0111
++
++#define USB_VENDOR_ID_KBGEAR 0x084e
++#define USB_DEVICE_ID_KBGEAR_JAMSTUDIO 0x1001
++
++#define USB_VENDOR_ID_KENSINGTON 0x047d
++#define USB_DEVICE_ID_KS_SLIMBLADE 0x2041
++
++#define USB_VENDOR_ID_KWORLD 0x1b80
++#define USB_DEVICE_ID_KWORLD_RADIO_FM700 0xd700
++
++#define USB_VENDOR_ID_KEYTOUCH 0x0926
++#define USB_DEVICE_ID_KEYTOUCH_IEC 0x3333
++
++#define USB_VENDOR_ID_KYE 0x0458
++#define USB_DEVICE_ID_KYE_ERGO_525V 0x0087
++#define USB_DEVICE_ID_GENIUS_GILA_GAMING_MOUSE 0x0138
++#define USB_DEVICE_ID_GENIUS_MANTICORE 0x0153
++#define USB_DEVICE_ID_GENIUS_GX_IMPERATOR 0x4018
++#define USB_DEVICE_ID_KYE_GPEN_560 0x5003
++#define USB_DEVICE_ID_KYE_EASYPEN_I405X 0x5010
++#define USB_DEVICE_ID_KYE_MOUSEPEN_I608X 0x5011
++#define USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2 0x501a
++#define USB_DEVICE_ID_KYE_EASYPEN_M610X 0x5013
++
++#define USB_VENDOR_ID_LABTEC 0x1020
++#define USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD 0x0006
++
++#define USB_VENDOR_ID_LCPOWER 0x1241
++#define USB_DEVICE_ID_LCPOWER_LC1000 0xf767
++
++#define USB_VENDOR_ID_LD 0x0f11
++#define USB_DEVICE_ID_LD_CASSY 0x1000
++#define USB_DEVICE_ID_LD_CASSY2 0x1001
++#define USB_DEVICE_ID_LD_POCKETCASSY 0x1010
++#define USB_DEVICE_ID_LD_POCKETCASSY2 0x1011
++#define USB_DEVICE_ID_LD_MOBILECASSY 0x1020
++#define USB_DEVICE_ID_LD_MOBILECASSY2 0x1021
++#define USB_DEVICE_ID_LD_MICROCASSYVOLTAGE 0x1031
++#define USB_DEVICE_ID_LD_MICROCASSYCURRENT 0x1032
++#define USB_DEVICE_ID_LD_MICROCASSYTIME 0x1033
++#define USB_DEVICE_ID_LD_MICROCASSYTEMPERATURE 0x1035
++#define USB_DEVICE_ID_LD_MICROCASSYPH 0x1038
++#define USB_DEVICE_ID_LD_JWM 0x1080
++#define USB_DEVICE_ID_LD_DMMP 0x1081
++#define USB_DEVICE_ID_LD_UMIP 0x1090
++#define USB_DEVICE_ID_LD_UMIC 0x10A0
++#define USB_DEVICE_ID_LD_UMIB 0x10B0
++#define USB_DEVICE_ID_LD_XRAY 0x1100
++#define USB_DEVICE_ID_LD_XRAY2 0x1101
++#define USB_DEVICE_ID_LD_XRAYCT 0x1110
++#define USB_DEVICE_ID_LD_VIDEOCOM 0x1200
++#define USB_DEVICE_ID_LD_MOTOR 0x1210
++#define USB_DEVICE_ID_LD_COM3LAB 0x2000
++#define USB_DEVICE_ID_LD_TELEPORT 0x2010
++#define USB_DEVICE_ID_LD_NETWORKANALYSER 0x2020
++#define USB_DEVICE_ID_LD_POWERCONTROL 0x2030
++#define USB_DEVICE_ID_LD_MACHINETEST 0x2040
++#define USB_DEVICE_ID_LD_MOSTANALYSER 0x2050
++#define USB_DEVICE_ID_LD_MOSTANALYSER2 0x2051
++#define USB_DEVICE_ID_LD_ABSESP 0x2060
++#define USB_DEVICE_ID_LD_AUTODATABUS 0x2070
++#define USB_DEVICE_ID_LD_MCT 0x2080
++#define USB_DEVICE_ID_LD_HYBRID 0x2090
++#define USB_DEVICE_ID_LD_HEATCONTROL 0x20A0
++
++#define USB_VENDOR_ID_LENOVO 0x17ef
++#define USB_DEVICE_ID_LENOVO_TPKBD 0x6009
++
++#define USB_VENDOR_ID_LG 0x1fd2
++#define USB_DEVICE_ID_LG_MULTITOUCH 0x0064
++
++#define USB_VENDOR_ID_LOGITECH 0x046d
++#define USB_DEVICE_ID_LOGITECH_AUDIOHUB 0x0a0e
++#define USB_DEVICE_ID_LOGITECH_RECEIVER 0xc101
++#define USB_DEVICE_ID_LOGITECH_HARMONY_FIRST 0xc110
++#define USB_DEVICE_ID_LOGITECH_HARMONY_LAST 0xc14f
++#define USB_DEVICE_ID_LOGITECH_HARMONY_PS3 0x0306
++#define USB_DEVICE_ID_LOGITECH_RUMBLEPAD_CORD 0xc20a
++#define USB_DEVICE_ID_LOGITECH_RUMBLEPAD 0xc211
++#define USB_DEVICE_ID_LOGITECH_EXTREME_3D 0xc215
++#define USB_DEVICE_ID_LOGITECH_DUAL_ACTION 0xc216
++#define USB_DEVICE_ID_LOGITECH_RUMBLEPAD2 0xc218
++#define USB_DEVICE_ID_LOGITECH_RUMBLEPAD2_2 0xc219
++#define USB_DEVICE_ID_LOGITECH_WINGMAN_F3D 0xc283
++#define USB_DEVICE_ID_LOGITECH_FORCE3D_PRO 0xc286
++#define USB_DEVICE_ID_LOGITECH_FLIGHT_SYSTEM_G940 0xc287
++#define USB_DEVICE_ID_LOGITECH_WINGMAN_FFG 0xc293
++#define USB_DEVICE_ID_LOGITECH_WHEEL 0xc294
++#define USB_DEVICE_ID_LOGITECH_MOMO_WHEEL 0xc295
++#define USB_DEVICE_ID_LOGITECH_DFP_WHEEL 0xc298
++#define USB_DEVICE_ID_LOGITECH_G25_WHEEL 0xc299
++#define USB_DEVICE_ID_LOGITECH_DFGT_WHEEL 0xc29a
++#define USB_DEVICE_ID_LOGITECH_G27_WHEEL 0xc29b
++#define USB_DEVICE_ID_LOGITECH_WII_WHEEL 0xc29c
++#define USB_DEVICE_ID_LOGITECH_ELITE_KBD 0xc30a
++#define USB_DEVICE_ID_S510_RECEIVER 0xc50c
++#define USB_DEVICE_ID_S510_RECEIVER_2 0xc517
++#define USB_DEVICE_ID_LOGITECH_CORDLESS_DESKTOP_LX500 0xc512
++#define USB_DEVICE_ID_MX3000_RECEIVER 0xc513
++#define USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER 0xc52b
++#define USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER_2 0xc532
++#define USB_DEVICE_ID_SPACETRAVELLER 0xc623
++#define USB_DEVICE_ID_SPACENAVIGATOR 0xc626
++#define USB_DEVICE_ID_DINOVO_DESKTOP 0xc704
++#define USB_DEVICE_ID_DINOVO_EDGE 0xc714
++#define USB_DEVICE_ID_DINOVO_MINI 0xc71f
++#define USB_DEVICE_ID_LOGITECH_MOMO_WHEEL2 0xca03
++#define USB_DEVICE_ID_LOGITECH_VIBRATION_WHEEL 0xca04
++
++#define USB_VENDOR_ID_LUMIO 0x202e
++#define USB_DEVICE_ID_CRYSTALTOUCH 0x0006
++#define USB_DEVICE_ID_CRYSTALTOUCH_DUAL 0x0007
++
++#define USB_VENDOR_ID_MADCATZ 0x0738
++#define USB_DEVICE_ID_MADCATZ_BEATPAD 0x4540
++
++#define USB_VENDOR_ID_MCC 0x09db
++#define USB_DEVICE_ID_MCC_PMD1024LS 0x0076
++#define USB_DEVICE_ID_MCC_PMD1208LS 0x007a
++
++#define USB_VENDOR_ID_MGE 0x0463
++#define USB_DEVICE_ID_MGE_UPS 0xffff
++#define USB_DEVICE_ID_MGE_UPS1 0x0001
++
++#define USB_VENDOR_ID_MICROCHIP 0x04d8
++#define USB_DEVICE_ID_PICKIT1 0x0032
++#define USB_DEVICE_ID_PICKIT2 0x0033
++#define USB_DEVICE_ID_PICOLCD 0xc002
++#define USB_DEVICE_ID_PICOLCD_BOOTLOADER 0xf002
++
++#define USB_VENDOR_ID_MICROSOFT 0x045e
++#define USB_DEVICE_ID_SIDEWINDER_GV 0x003b
++#define USB_DEVICE_ID_WIRELESS_OPTICAL_DESKTOP_3_0 0x009d
++#define USB_DEVICE_ID_MS_NE4K 0x00db
++#define USB_DEVICE_ID_MS_NE4K_JP 0x00dc
++#define USB_DEVICE_ID_MS_LK6K 0x00f9
++#define USB_DEVICE_ID_MS_PRESENTER_8K_BT 0x0701
++#define USB_DEVICE_ID_MS_PRESENTER_8K_USB 0x0713
++#define USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K 0x0730
++#define USB_DEVICE_ID_MS_COMFORT_MOUSE_4500 0x076c
++
++#define USB_VENDOR_ID_MOJO 0x8282
++#define USB_DEVICE_ID_RETRO_ADAPTER 0x3201
++
++#define USB_VENDOR_ID_MONTEREY 0x0566
++#define USB_DEVICE_ID_GENIUS_KB29E 0x3004
++
++#define USB_VENDOR_ID_MSI 0x1770
++#define USB_DEVICE_ID_MSI_GX680R_LED_PANEL 0xff00
++
++#define USB_VENDOR_ID_NATIONAL_SEMICONDUCTOR 0x0400
++#define USB_DEVICE_ID_N_S_HARMONY 0xc359
++
++#define USB_VENDOR_ID_NATSU 0x08b7
++#define USB_DEVICE_ID_NATSU_GAMEPAD 0x0001
++
++#define USB_VENDOR_ID_NCR 0x0404
++#define USB_DEVICE_ID_NCR_FIRST 0x0300
++#define USB_DEVICE_ID_NCR_LAST 0x03ff
++
++#define USB_VENDOR_ID_NEC 0x073e
++#define USB_DEVICE_ID_NEC_USB_GAME_PAD 0x0301
++
++#define USB_VENDOR_ID_NEXIO 0x1870
++#define USB_DEVICE_ID_NEXIO_MULTITOUCH_420 0x010d
++#define USB_DEVICE_ID_NEXIO_MULTITOUCH_PTI0750 0x0110
++
++#define USB_VENDOR_ID_NEXTWINDOW 0x1926
++#define USB_DEVICE_ID_NEXTWINDOW_TOUCHSCREEN 0x0003
++
++#define USB_VENDOR_ID_NINTENDO 0x057e
++#define USB_DEVICE_ID_NINTENDO_WIIMOTE 0x0306
++#define USB_DEVICE_ID_NINTENDO_WIIMOTE2 0x0330
++
++#define USB_VENDOR_ID_NOVATEK 0x0603
++#define USB_DEVICE_ID_NOVATEK_PCT 0x0600
++#define USB_DEVICE_ID_NOVATEK_MOUSE 0x1602
++
++#define USB_VENDOR_ID_NTRIG 0x1b96
++#define USB_DEVICE_ID_NTRIG_TOUCH_SCREEN 0x0001
++#define USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_1 0x0003
++#define USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_2 0x0004
++#define USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_3 0x0005
++#define USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_4 0x0006
++#define USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_5 0x0007
++#define USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_6 0x0008
++#define USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_7 0x0009
++#define USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_8 0x000A
++#define USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_9 0x000B
++#define USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_10 0x000C
++#define USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_11 0x000D
++#define USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_12 0x000E
++#define USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_13 0x000F
++#define USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_14 0x0010
++#define USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_15 0x0011
++#define USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_16 0x0012
++#define USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_17 0x0013
++#define USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_18 0x0014
++#define USB_DEVICE_ID_NTRIG_DUOSENSE 0x1500
++
++#define USB_VENDOR_ID_ONTRAK 0x0a07
++#define USB_DEVICE_ID_ONTRAK_ADU100 0x0064
++
++#define USB_VENDOR_ID_ORTEK 0x05a4
++#define USB_DEVICE_ID_ORTEK_PKB1700 0x1700
++#define USB_DEVICE_ID_ORTEK_WKB2000 0x2000
++
++#define USB_VENDOR_ID_OUYA 0x2836
++#define USB_DEVICE_ID_OUYA_CONTROLLER 0x0001
++
++#define USB_VENDOR_ID_PANASONIC 0x04da
++#define USB_DEVICE_ID_PANABOARD_UBT780 0x1044
++#define USB_DEVICE_ID_PANABOARD_UBT880 0x104d
++
++#define USB_VENDOR_ID_PANJIT 0x134c
++
++#define USB_VENDOR_ID_PANTHERLORD 0x0810
++#define USB_DEVICE_ID_PANTHERLORD_TWIN_USB_JOYSTICK 0x0001
++
++#define USB_VENDOR_ID_PENMOUNT 0x14e1
++#define USB_DEVICE_ID_PENMOUNT_PCI 0x3500
++
++#define USB_VENDOR_ID_PETALYNX 0x18b1
++#define USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE 0x0037
++
++#define USB_VENDOR_ID_PHILIPS 0x0471
++#define USB_DEVICE_ID_PHILIPS_IEEE802154_DONGLE 0x0617
++#define USB_DEVICE_ID_PHILIPS_SPINEL_PLUS_1 0x206c
++#define USB_DEVICE_ID_PHILIPS_SPINEL_PLUS_2 0x20cc
++#define USB_DEVICE_ID_PHILIPS_SPINEL_PLUS_3 0x0613
++
++#define USB_VENDOR_ID_PI_ENGINEERING 0x05f3
++#define USB_DEVICE_ID_PI_ENGINEERING_VEC_USB_FOOTPEDAL 0xff
++
++#define USB_VENDOR_ID_PIXART 0x093a
++#define USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN 0x8001
++#define USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN1 0x8002
++#define USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN2 0x8003
++
++#define USB_VENDOR_ID_PLAYDOTCOM 0x0b43
++#define USB_DEVICE_ID_PLAYDOTCOM_EMS_USBII 0x0003
++
++#define USB_VENDOR_ID_POWERCOM 0x0d9f
++#define USB_DEVICE_ID_POWERCOM_UPS 0x0002
++
++#define USB_VENDOR_ID_PRODIGE 0x05af
++#define USB_DEVICE_ID_PRODIGE_CORDLESS 0x3062
++
++#define USB_VENDOR_ID_QUANTA 0x0408
++#define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH 0x3000
++#define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3001 0x3001
++#define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3008 0x3008
++
++#define USB_VENDOR_ID_REALTEK 0x0bda
++#define USB_DEVICE_ID_REALTEK_READER 0x0152
++
++#define USB_VENDOR_ID_ROCCAT 0x1e7d
++#define USB_DEVICE_ID_ROCCAT_ARVO 0x30d4
++#define USB_DEVICE_ID_ROCCAT_ISKU 0x319c
++#define USB_DEVICE_ID_ROCCAT_ISKUFX 0x3264
++#define USB_DEVICE_ID_ROCCAT_KONE 0x2ced
++#define USB_DEVICE_ID_ROCCAT_KONEPLUS 0x2d51
++#define USB_DEVICE_ID_ROCCAT_KONEPURE 0x2dbe
++#define USB_DEVICE_ID_ROCCAT_KONEPURE_OPTICAL 0x2db4
++#define USB_DEVICE_ID_ROCCAT_KONEXTD 0x2e22
++#define USB_DEVICE_ID_ROCCAT_KOVAPLUS 0x2d50
++#define USB_DEVICE_ID_ROCCAT_LUA 0x2c2e
++#define USB_DEVICE_ID_ROCCAT_PYRA_WIRED 0x2c24
++#define USB_DEVICE_ID_ROCCAT_PYRA_WIRELESS 0x2cf6
++#define USB_DEVICE_ID_ROCCAT_RYOS_MK 0x3138
++#define USB_DEVICE_ID_ROCCAT_RYOS_MK_GLOW 0x31ce
++#define USB_DEVICE_ID_ROCCAT_RYOS_MK_PRO 0x3232
++#define USB_DEVICE_ID_ROCCAT_SAVU 0x2d5a
++
++#define USB_VENDOR_ID_SAITEK 0x06a3
++#define USB_DEVICE_ID_SAITEK_RUMBLEPAD 0xff17
++#define USB_DEVICE_ID_SAITEK_PS1000 0x0621
++
++#define USB_VENDOR_ID_SAMSUNG 0x0419
++#define USB_DEVICE_ID_SAMSUNG_IR_REMOTE 0x0001
++#define USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD_MOUSE 0x0600
++
++#define USB_VENDOR_ID_SENNHEISER 0x1395
++#define USB_DEVICE_ID_SENNHEISER_BTD500USB 0x002c
++
++#define USB_VENDOR_ID_SIGMA_MICRO 0x1c4f
++#define USB_DEVICE_ID_SIGMA_MICRO_KEYBOARD 0x0002
++
++#define USB_VENDOR_ID_SIGMATEL 0x066F
++#define USB_DEVICE_ID_SIGMATEL_STMP3780 0x3780
++
++#define USB_VENDOR_ID_SIS_TOUCH 0x0457
++#define USB_DEVICE_ID_SIS9200_TOUCH 0x9200
++#define USB_DEVICE_ID_SIS817_TOUCH 0x0817
++#define USB_DEVICE_ID_SIS_TS 0x1013
++#define USB_DEVICE_ID_SIS1030_TOUCH 0x1030
++
++#define USB_VENDOR_ID_SKYCABLE 0x1223
++#define USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER 0x3F07
++
++#define USB_VENDOR_ID_SONY 0x054c
++#define USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE 0x024b
++#define USB_DEVICE_ID_SONY_VAIO_VGP_MOUSE 0x0374
++#define USB_DEVICE_ID_SONY_PS3_BDREMOTE 0x0306
++#define USB_DEVICE_ID_SONY_PS3_CONTROLLER 0x0268
++#define USB_DEVICE_ID_SONY_PS4_CONTROLLER 0x05c4
++#define USB_DEVICE_ID_SONY_NAVIGATION_CONTROLLER 0x042f
++#define USB_DEVICE_ID_SONY_BUZZ_CONTROLLER 0x0002
++#define USB_DEVICE_ID_SONY_WIRELESS_BUZZ_CONTROLLER 0x1000
++
++#define USB_VENDOR_ID_SOUNDGRAPH 0x15c2
++#define USB_DEVICE_ID_SOUNDGRAPH_IMON_FIRST 0x0034
++#define USB_DEVICE_ID_SOUNDGRAPH_IMON_LAST 0x0046
++
++#define USB_VENDOR_ID_STANTUM 0x1f87
++#define USB_DEVICE_ID_MTP 0x0002
++
++#define USB_VENDOR_ID_STANTUM_STM 0x0483
++#define USB_DEVICE_ID_MTP_STM 0x3261
++
++#define USB_VENDOR_ID_STANTUM_SITRONIX 0x1403
++#define USB_DEVICE_ID_MTP_SITRONIX 0x5001
++
++#define USB_VENDOR_ID_STEELSERIES 0x1038
++#define USB_DEVICE_ID_STEELSERIES_SRWS1 0x1410
++
++#define USB_VENDOR_ID_SUN 0x0430
++#define USB_DEVICE_ID_RARITAN_KVM_DONGLE 0xcdab
++
++#define USB_VENDOR_ID_SUNPLUS 0x04fc
++#define USB_DEVICE_ID_SUNPLUS_WDESKTOP 0x05d8
++
++#define USB_VENDOR_ID_SYMBOL 0x05e0
++#define USB_DEVICE_ID_SYMBOL_SCANNER_1 0x0800
++#define USB_DEVICE_ID_SYMBOL_SCANNER_2 0x1300
++
++#define USB_VENDOR_ID_SYNAPTICS 0x06cb
++#define USB_DEVICE_ID_SYNAPTICS_TP 0x0001
++#define USB_DEVICE_ID_SYNAPTICS_INT_TP 0x0002
++#define USB_DEVICE_ID_SYNAPTICS_CPAD 0x0003
++#define USB_DEVICE_ID_SYNAPTICS_TS 0x0006
++#define USB_DEVICE_ID_SYNAPTICS_STICK 0x0007
++#define USB_DEVICE_ID_SYNAPTICS_WP 0x0008
++#define USB_DEVICE_ID_SYNAPTICS_COMP_TP 0x0009
++#define USB_DEVICE_ID_SYNAPTICS_WTP 0x0010
++#define USB_DEVICE_ID_SYNAPTICS_DPAD 0x0013
++#define USB_DEVICE_ID_SYNAPTICS_LTS1 0x0af8
++#define USB_DEVICE_ID_SYNAPTICS_LTS2 0x1d10
++#define USB_DEVICE_ID_SYNAPTICS_HD 0x0ac3
++#define USB_DEVICE_ID_SYNAPTICS_QUAD_HD 0x1ac3
++#define USB_DEVICE_ID_SYNAPTICS_TP_V103 0x5710
++
++#define USB_VENDOR_ID_THINGM 0x27b8
++#define USB_DEVICE_ID_BLINK1 0x01ed
++
++#define USB_VENDOR_ID_THRUSTMASTER 0x044f
++
++#define USB_VENDOR_ID_TIVO 0x150a
++#define USB_DEVICE_ID_TIVO_SLIDE_BT 0x1200
++#define USB_DEVICE_ID_TIVO_SLIDE 0x1201
++#define USB_DEVICE_ID_TIVO_SLIDE_PRO 0x1203
++
++#define USB_VENDOR_ID_TOPSEED 0x0766
++#define USB_DEVICE_ID_TOPSEED_CYBERLINK 0x0204
++
++#define USB_VENDOR_ID_TOPSEED2 0x1784
++#define USB_DEVICE_ID_TOPSEED2_RF_COMBO 0x0004
++#define USB_DEVICE_ID_TOPSEED2_PERIPAD_701 0x0016
++
++#define USB_VENDOR_ID_TOPMAX 0x0663
++#define USB_DEVICE_ID_TOPMAX_COBRAPAD 0x0103
++
++#define USB_VENDOR_ID_TOUCH_INTL 0x1e5e
++#define USB_DEVICE_ID_TOUCH_INTL_MULTI_TOUCH 0x0313
++
++#define USB_VENDOR_ID_TOUCHPACK 0x1bfd
++#define USB_DEVICE_ID_TOUCHPACK_RTS 0x1688
++
++#define USB_VENDOR_ID_TPV 0x25aa
++#define USB_DEVICE_ID_TPV_OPTICAL_TOUCHSCREEN 0x8883
++
++#define USB_VENDOR_ID_TURBOX 0x062a
++#define USB_DEVICE_ID_TURBOX_KEYBOARD 0x0201
++#define USB_DEVICE_ID_TURBOX_TOUCHSCREEN_MOSART 0x7100
++
++#define USB_VENDOR_ID_TWINHAN 0x6253
++#define USB_DEVICE_ID_TWINHAN_IR_REMOTE 0x0100
++
++#define USB_VENDOR_ID_UCLOGIC 0x5543
++#define USB_DEVICE_ID_UCLOGIC_TABLET_PF1209 0x0042
++#define USB_DEVICE_ID_UCLOGIC_TABLET_KNA5 0x6001
++#define USB_DEVICE_ID_UCLOGIC_TABLET_TWA60 0x0064
++#define USB_DEVICE_ID_UCLOGIC_TABLET_WP4030U 0x0003
++#define USB_DEVICE_ID_UCLOGIC_TABLET_WP5540U 0x0004
++#define USB_DEVICE_ID_UCLOGIC_TABLET_WP8060U 0x0005
++#define USB_DEVICE_ID_UCLOGIC_TABLET_WP1062 0x0064
++#define USB_DEVICE_ID_UCLOGIC_WIRELESS_TABLET_TWHL850 0x0522
++#define USB_DEVICE_ID_UCLOGIC_TABLET_TWHA60 0x0781
++
++#define USB_VENDOR_ID_UNITEC 0x227d
++#define USB_DEVICE_ID_UNITEC_USB_TOUCH_0709 0x0709
++#define USB_DEVICE_ID_UNITEC_USB_TOUCH_0A19 0x0a19
++
++#define USB_VENDOR_ID_VELLEMAN 0x10cf
++#define USB_DEVICE_ID_VELLEMAN_K8055_FIRST 0x5500
++#define USB_DEVICE_ID_VELLEMAN_K8055_LAST 0x5503
++#define USB_DEVICE_ID_VELLEMAN_K8061_FIRST 0x8061
++#define USB_DEVICE_ID_VELLEMAN_K8061_LAST 0x8068
++
++#define USB_VENDOR_ID_VERNIER 0x08f7
++#define USB_DEVICE_ID_VERNIER_LABPRO 0x0001
++#define USB_DEVICE_ID_VERNIER_GOTEMP 0x0002
++#define USB_DEVICE_ID_VERNIER_SKIP 0x0003
++#define USB_DEVICE_ID_VERNIER_CYCLOPS 0x0004
++#define USB_DEVICE_ID_VERNIER_LCSPEC 0x0006
++
++#define USB_VENDOR_ID_WACOM 0x056a
++#define USB_DEVICE_ID_WACOM_GRAPHIRE_BLUETOOTH 0x81
++#define USB_DEVICE_ID_WACOM_INTUOS4_BLUETOOTH 0x00BD
++
++#define USB_VENDOR_ID_WALTOP 0x172f
++#define USB_DEVICE_ID_WALTOP_SLIM_TABLET_5_8_INCH 0x0032
++#define USB_DEVICE_ID_WALTOP_SLIM_TABLET_12_1_INCH 0x0034
++#define USB_DEVICE_ID_WALTOP_Q_PAD 0x0037
++#define USB_DEVICE_ID_WALTOP_PID_0038 0x0038
++#define USB_DEVICE_ID_WALTOP_MEDIA_TABLET_10_6_INCH 0x0501
++#define USB_DEVICE_ID_WALTOP_MEDIA_TABLET_14_1_INCH 0x0500
++#define USB_DEVICE_ID_WALTOP_SIRIUS_BATTERY_FREE_TABLET 0x0502
++
++#define USB_VENDOR_ID_WISEGROUP 0x0925
++#define USB_DEVICE_ID_SMARTJOY_PLUS 0x0005
++#define USB_DEVICE_ID_1_PHIDGETSERVO_20 0x8101
++#define USB_DEVICE_ID_4_PHIDGETSERVO_20 0x8104
++#define USB_DEVICE_ID_8_8_4_IF_KIT 0x8201
++#define USB_DEVICE_ID_SUPER_JOY_BOX_3 0x8888
++#define USB_DEVICE_ID_QUAD_USB_JOYPAD 0x8800
++#define USB_DEVICE_ID_DUAL_USB_JOYPAD 0x8866
++
++#define USB_VENDOR_ID_WISEGROUP_LTD 0x6666
++#define USB_VENDOR_ID_WISEGROUP_LTD2 0x6677
++#define USB_DEVICE_ID_SMARTJOY_DUAL_PLUS 0x8802
++#define USB_DEVICE_ID_SUPER_JOY_BOX_3_PRO 0x8801
++#define USB_DEVICE_ID_SUPER_DUAL_BOX_PRO 0x8802
++#define USB_DEVICE_ID_SUPER_JOY_BOX_5_PRO 0x8804
++
++#define USB_VENDOR_ID_WISTRON 0x0fb8
++#define USB_DEVICE_ID_WISTRON_OPTICAL_TOUCH 0x1109
++
++#define USB_VENDOR_ID_X_TENSIONS 0x1ae7
++#define USB_DEVICE_ID_SPEEDLINK_VAD_CEZANNE 0x9001
++
++#define USB_VENDOR_ID_XAT 0x2505
++#define USB_DEVICE_ID_XAT_CSR 0x0220
++
++#define USB_VENDOR_ID_XIN_MO 0x16c0
++#define USB_DEVICE_ID_XIN_MO_DUAL_ARCADE 0x05e1
++
++#define USB_VENDOR_ID_XIROKU 0x1477
++#define USB_DEVICE_ID_XIROKU_SPX 0x1006
++#define USB_DEVICE_ID_XIROKU_MPX 0x1007
++#define USB_DEVICE_ID_XIROKU_CSR 0x100e
++#define USB_DEVICE_ID_XIROKU_SPX1 0x1021
++#define USB_DEVICE_ID_XIROKU_CSR1 0x1022
++#define USB_DEVICE_ID_XIROKU_MPX1 0x1023
++#define USB_DEVICE_ID_XIROKU_SPX2 0x1024
++#define USB_DEVICE_ID_XIROKU_CSR2 0x1025
++#define USB_DEVICE_ID_XIROKU_MPX2 0x1026
++
++#define USB_VENDOR_ID_YEALINK 0x6993
++#define USB_DEVICE_ID_YEALINK_P1K_P4K_B2K 0xb001
++
++#define USB_VENDOR_ID_ZEROPLUS 0x0c12
++
++#define USB_VENDOR_ID_ZYDACRON 0x13EC
++#define USB_DEVICE_ID_ZYDACRON_REMOTE_CONTROL 0x0006
++
++#define USB_VENDOR_ID_ZYTRONIC 0x14c8
++#define USB_DEVICE_ID_ZYTRONIC_ZXY100 0x0005
++
++#define USB_VENDOR_ID_PRIMAX 0x0461
++#define USB_DEVICE_ID_PRIMAX_KEYBOARD 0x4e05
++
++
++#endif
+diff -Nur linux-3.14.36/drivers/hid/hid-ouya.c linux-openelec/drivers/hid/hid-ouya.c
+--- linux-3.14.36/drivers/hid/hid-ouya.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/hid/hid-ouya.c 2015-07-24 18:03:29.964842002 -0500
+@@ -0,0 +1,260 @@
++/*
++ * HID driver for OUYA Game Controller(s)
++ *
++ * Copyright (c) 2013 OUYA
++ */
++
++#include <linux/device.h>
++#include <linux/input.h>
++#include <linux/hid.h>
++#include <linux/module.h>
++
++#include "hid-ids.h"
++
++#define OUYA_TOUCHPAD_FIXUP (1 << 0)
++
++struct ouya_sc {
++ unsigned long quirks;
++};
++
++/* Fixed report descriptor */
++static __u8 ouya_rdesc_fixed[] = {
++
++ 0x05, 0x01, /* Usage Page (Desktop), */
++ 0x09, 0x05, /* Usage (Game Pad), */
++
++ 0xA1, 0x01, /* Collection (Application), */
++ 0x85, 0x07, /* Report ID (7), */
++
++ 0xA1, 0x00, /* Collection (Physical), */
++ 0x09, 0x30, /* Usage (X), */
++ 0x09, 0x31, /* Usage (Y), */
++ 0x15, 0x00, /* Logical Minimum (0), */
++ 0x26, 0xFF, 0x00, /* Logical Maximum (255), */
++ 0x35, 0x00, /* Physical Minimum (0), */
++ 0x46, 0xFF, 0x00, /* Physical Maximum (255), */
++ 0x95, 0x02, /* Report Count (2), */
++ 0x75, 0x08, /* Report Size (8), */
++ 0x81, 0x02, /* Input (Variable), */
++ 0xC0, /* End Collection, */
++
++ 0xA1, 0x00, /* Collection (Physical), */
++ 0x09, 0x33, /* Usage (Rx), */
++ 0x09, 0x34, /* Usage (Ry), */
++ 0x15, 0x00, /* Logical Minimum (0), */
++ 0x26, 0xFF, 0x00, /* Logical Maximum (255), */
++ 0x35, 0x00, /* Physical Minimum (0), */
++ 0x46, 0xFF, 0x00, /* Physical Maximum (255), */
++ 0x95, 0x02, /* Report Count (2), */
++ 0x75, 0x08, /* Report Size (8), */
++ 0x81, 0x02, /* Input (Variable), */
++ 0xC0, /* End Collection, */
++
++ 0xA1, 0x00, /* Collection (Physical), */
++ 0x09, 0x32, /* Usage (Z), */
++ 0x15, 0x00, /* Logical Minimum (0), */
++ 0x26, 0xFF, 0x00, /* Logical Maximum (255), */
++ 0x35, 0x00, /* Physical Minimum (0), */
++ 0x46, 0xFF, 0x00, /* Physical Maximum (255), */
++ 0x95, 0x01, /* Report Count (1), */
++ 0x75, 0x08, /* Report Size (8), */
++ 0x81, 0x02, /* Input (Variable), */
++ 0xC0, /* End Collection, */
++
++ 0xA1, 0x00, /* Collection (Physical), */
++ 0x09, 0x35, /* Usage (Rz), */
++ 0x15, 0x00, /* Logical Minimum (0), */
++ 0x26, 0xFF, 0x00, /* Logical Maximum (255), */
++ 0x35, 0x00, /* Physical Minimum (0), */
++ 0x46, 0xFF, 0x00, /* Physical Maximum (255), */
++ 0x95, 0x01, /* Report Count (1), */
++ 0x75, 0x08, /* Report Size (8), */
++ 0x81, 0x02, /* Input (Variable), */
++ 0xC0, /* End Collection, */
++
++ 0x05, 0x09, /* Usage Page (Button), */
++ 0x19, 0x01, /* Usage Minimum (01h), */
++ 0x29, 0x10, /* Usage Maximum (10h), */
++ 0x95, 0x10, /* Report Count (16), */
++ 0x75, 0x01, /* Report Size (1), */
++ 0x81, 0x02, /* Input (Variable), */
++
++ /* ORIGINAL REPORT DESCRIPTOR FOR TOUCHPAD INPUT */
++ /* 06 00 ff a1 02 09 02 15 00 26 ff 00 35 00 46 ff 00 95 03 75 08 81 02 c0 */
++
++ 0x06, 0x00, 0xFF, /* Usage Page (Custom), */
++ 0x09, 0x02, /* Usage (Mouse), */
++ 0x09, 0x01, /* Usage (Pointer), */
++ 0xA1, 0x00, /* Collection (Physical), */
++ 0x05, 0x09, /* Usage Page (Button), */
++ 0x19, 0x01, /* Usage Minimum (01h), */
++ 0x29, 0x03, /* Usage Maximum (03h), */
++ 0x15, 0x00, /* Logical Minimum (0), */
++ 0x25, 0x01, /* Logical Maximum (1), */
++ 0x95, 0x03, /* Report Count (3), */
++ 0x75, 0x01, /* Report Size (1), */
++ 0x81, 0x02, /* Input (Variable), */
++ 0x95, 0x01, /* Report Count (1), */
++ 0x75, 0x05, /* Report Size (5), */
++ 0x81, 0x01, /* Input (Constant), */
++ 0x05, 0x01, /* Usage Page (Desktop), */
++ 0x09, 0x30, /* Usage (X), */
++ 0x09, 0x31, /* Usage (Y), */
++ 0x15, 0x81, /* Logical Minimum (-127), */
++ 0x25, 0x7f, /* Logical Maximum (127), */
++ 0x95, 0x02, /* Report Count (2), */
++ 0x75, 0x08, /* Report Size (8), */
++ 0x81, 0x06, /* Input (Relative), */
++ 0xC0, /* End Collection, */
++
++ 0x06, 0x00, 0xFF, /* Usage Page (Custom), */
++ 0xA1, 0x02, /* Collection (Logical), */
++ 0x75, 0x08, /* Report Size (8), */
++ 0x95, 0x07, /* Report Count (7), */
++ 0x46, 0xFF, 0x00, /* Physical Maximum (255), */
++ 0x26, 0xFF, 0x00, /* Logical Maximum (255), */
++ 0x09, 0x01, /* Usage (Pointer), */
++ 0x91, 0x02, /* Output (Variable), */
++ 0xC0, /* End Collection, */
++
++ 0xC0, /* End Collection */
++
++
++ 0x06, 0x00, 0xFF, /* Usage Page (Custom), */
++ 0x05, 0x0C, /* Usage Page (Consumer), */
++ 0x09, 0x01, /* Usage (Consumer Control), */
++
++ 0xA1, 0x01, /* Collection (Application), */
++ 0x85, 0x03, /* Report ID (3), */
++ 0x05, 0x01, /* Usage Page (Desktop), */
++ 0x09, 0x06, /* Usage (Keyboard), */
++ 0xA1, 0x02, /* Collection (Logical), */
++ 0x05, 0x06, /* Usage Page (Generic), */
++ 0x09, 0x20, /* Usage (Battery Strgth), */
++ 0x15, 0x00, /* Logical Minimum (0), */
++ 0x26, 0xFF, 0x00, /* Logical Maximum (255), */
++ 0x75, 0x08, /* Report Size (8), */
++ 0x95, 0x01, /* Report Count (1), */
++ 0x81, 0x02, /* Input (Variable), */
++ 0x06, 0xBC, 0xFF, /* Usage Page (Custom), */
++
++ 0x0A, 0xAD, 0xBD, /* UNKNOWN */
++
++ 0x75, 0x08, /* Report Size (8), */
++ 0x95, 0x06, /* Report Count (6), */
++ 0x81, 0x02, /* Input (Variable), */
++ 0xC0, /* End Collection, */
++
++ 0xC0, /* End Collection */
++
++ 0x00
++};
++
++static __u8 *ouya_report_fixup(struct hid_device *hdev, __u8 *rdesc,
++ unsigned int *rsize)
++{
++ struct ouya_sc *sc = hid_get_drvdata(hdev);
++
++ if (sc->quirks & OUYA_TOUCHPAD_FIXUP) {
++ rdesc = ouya_rdesc_fixed;
++ *rsize = sizeof(ouya_rdesc_fixed);
++ }
++ return rdesc;
++}
++
++static int ouya_input_mapping(struct hid_device *hdev, struct hid_input *hi,
++ struct hid_field *field, struct hid_usage *usage,
++ unsigned long **bit, int *max)
++{
++ struct ouya_sc *sc = hid_get_drvdata(hdev);
++
++ if (!(sc->quirks & OUYA_TOUCHPAD_FIXUP)) {
++ return 0;
++ }
++
++ if ((usage->hid & 0x90000) == 0x90000 &&
++ (field->physical & 0xff000000) == 0xff000000 &&
++ usage->collection_index == 5 &&
++ field->report_count == 3) {
++
++ hid_map_usage(hi, usage, bit, max, EV_KEY, BTN_MOUSE + (usage->hid - 0x90001));
++
++ return 1;
++ }
++
++ return 0;
++}
++
++static int ouya_probe(struct hid_device *hdev, const struct hid_device_id *id)
++{
++ int ret;
++ struct ouya_sc *sc;
++
++ sc = kzalloc(sizeof(*sc), GFP_KERNEL);
++ if (sc == NULL) {
++ hid_err(hdev, "can't alloc ouya descriptor\n");
++ return -ENOMEM;
++ }
++
++ if(((hdev->version & 0xff00) == 0x0100 && (hdev->version & 0xff) >= 0x04) ||
++ ((hdev->version & 0xff00) == 0xe100 && (hdev->version & 0xff) >= 0x3a)) {
++ hid_info(hdev, "ouya controller - new version\n");
++ sc->quirks = OUYA_TOUCHPAD_FIXUP;
++ } else {
++ sc->quirks = 0;
++ }
++ hid_set_drvdata(hdev, sc);
++
++ ret = hid_parse(hdev);
++ if (ret) {
++ hid_err(hdev, "parse failed\n");
++ goto err_free;
++ }
++
++ ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT |
++ HID_CONNECT_HIDDEV_FORCE);
++ if (ret) {
++ hid_err(hdev, "hw start failed\n");
++ goto err_free;
++ }
++
++ return 0;
++
++err_free:
++ kfree(sc);
++ return ret;
++}
++
++static void ouya_remove(struct hid_device *hdev)
++{
++ hid_hw_stop(hdev);
++ kfree(hid_get_drvdata(hdev));
++}
++
++static const struct hid_device_id ouya_devices[] = {
++ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_OUYA, USB_DEVICE_ID_OUYA_CONTROLLER) },
++ { }
++};
++MODULE_DEVICE_TABLE(hid, ouya_devices);
++
++static struct hid_driver ouya_driver = {
++ .name = "ouya",
++ .id_table = ouya_devices,
++ .probe = ouya_probe,
++ .remove = ouya_remove,
++ .input_mapping = ouya_input_mapping,
++ .report_fixup = ouya_report_fixup
++};
++
++static int __init ouya_init(void)
++{
++ return hid_register_driver(&ouya_driver);
++}
++
++static void __exit ouya_exit(void)
++{
++ hid_unregister_driver(&ouya_driver);
++}
++
++module_init(ouya_init);
++module_exit(ouya_exit);
+diff -Nur linux-3.14.36/drivers/hid/hid-sony.c linux-openelec/drivers/hid/hid-sony.c
+--- linux-3.14.36/drivers/hid/hid-sony.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/hid/hid-sony.c 2015-07-24 18:03:30.052842002 -0500
+@@ -546,6 +546,24 @@
+ return 1;
+ }
+
++static int ps3remote_setup_repeat(struct hid_device *hdev)
++{
++ struct hid_input *hidinput = list_first_entry(&hdev->inputs,
++ struct hid_input, list);
++ struct input_dev *input = hidinput->input;
++
++ /*
++ * Set up autorepeat defaults per the remote control subsystem;
++ * this must be done after hid_hw_start(), as having these non-zero
++ * at the time of input_register_device() tells the input system that
++ * the hardware does the autorepeat, and the PS3 remote does not.
++ */
++ set_bit(EV_REP, input->evbit);
++ input->rep[REP_DELAY] = 500;
++ input->rep[REP_PERIOD] = 125;
++
++ return 0;
++}
+
+ /* Sony Vaio VGX has wrongly mouse pointer declared as constant */
+ static __u8 *sony_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+@@ -1074,6 +1092,8 @@
+ }
+ else if (sc->quirks & SIXAXIS_CONTROLLER_BT)
+ ret = sixaxis_set_operational_bt(hdev);
++ else if (sc->quirks & PS3REMOTE)
++ ret = ps3remote_setup_repeat(hdev);
+ else if (sc->quirks & DUALSHOCK4_CONTROLLER_USB) {
+ /* Report 5 (31 bytes) is used to send data to the controller via USB */
+ ret = sony_set_output_report(sc, 0x05, 248);
+@@ -1150,6 +1170,9 @@
+ .driver_data = DUALSHOCK4_CONTROLLER_USB },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER),
+ .driver_data = DUALSHOCK4_CONTROLLER_BT },
++ /* SMK-Link Universal Remote Control VP3700 */
++ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SMK, USB_DEVICE_ID_SONY_PS3_BDREMOTE),
++ .driver_data = PS3REMOTE },
+ { }
+ };
+ MODULE_DEVICE_TABLE(hid, sony_devices);
+diff -Nur linux-3.14.36/drivers/hid/hid-sony.c.orig linux-openelec/drivers/hid/hid-sony.c.orig
+--- linux-3.14.36/drivers/hid/hid-sony.c.orig 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/hid/hid-sony.c.orig 2015-07-24 18:03:30.044842002 -0500
+@@ -0,0 +1,1188 @@
++/*
++ * HID driver for Sony / PS2 / PS3 BD devices.
++ *
++ * Copyright (c) 1999 Andreas Gal
++ * Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz>
++ * Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc
++ * Copyright (c) 2008 Jiri Slaby
++ * Copyright (c) 2012 David Dillow <dave@thedillows.org>
++ * Copyright (c) 2006-2013 Jiri Kosina
++ * Copyright (c) 2013 Colin Leitner <colin.leitner@gmail.com>
++ */
++
++/*
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License as published by the Free
++ * Software Foundation; either version 2 of the License, or (at your option)
++ * any later version.
++ */
++
++/* NOTE: in order for the Sony PS3 BD Remote Control to be found by
++ * a Bluetooth host, the key combination Start+Enter has to be kept pressed
++ * for about 7 seconds with the Bluetooth Host Controller in discovering mode.
++ *
++ * There will be no PIN request from the device.
++ */
++
++#include <linux/device.h>
++#include <linux/hid.h>
++#include <linux/module.h>
++#include <linux/slab.h>
++#include <linux/usb.h>
++#include <linux/leds.h>
++
++#include "hid-ids.h"
++
++#define VAIO_RDESC_CONSTANT BIT(0)
++#define SIXAXIS_CONTROLLER_USB BIT(1)
++#define SIXAXIS_CONTROLLER_BT BIT(2)
++#define BUZZ_CONTROLLER BIT(3)
++#define PS3REMOTE BIT(4)
++#define DUALSHOCK4_CONTROLLER_USB BIT(5)
++#define DUALSHOCK4_CONTROLLER_BT BIT(6)
++
++#define SONY_LED_SUPPORT (SIXAXIS_CONTROLLER_USB | BUZZ_CONTROLLER | DUALSHOCK4_CONTROLLER_USB)
++#define SONY_FF_SUPPORT (SIXAXIS_CONTROLLER_USB | DUALSHOCK4_CONTROLLER_USB)
++
++#define MAX_LEDS 4
++
++static const u8 sixaxis_rdesc_fixup[] = {
++ 0x95, 0x13, 0x09, 0x01, 0x81, 0x02, 0x95, 0x0C,
++ 0x81, 0x01, 0x75, 0x10, 0x95, 0x04, 0x26, 0xFF,
++ 0x03, 0x46, 0xFF, 0x03, 0x09, 0x01, 0x81, 0x02
++};
++
++static const u8 sixaxis_rdesc_fixup2[] = {
++ 0x05, 0x01, 0x09, 0x04, 0xa1, 0x01, 0xa1, 0x02,
++ 0x85, 0x01, 0x75, 0x08, 0x95, 0x01, 0x15, 0x00,
++ 0x26, 0xff, 0x00, 0x81, 0x03, 0x75, 0x01, 0x95,
++ 0x13, 0x15, 0x00, 0x25, 0x01, 0x35, 0x00, 0x45,
++ 0x01, 0x05, 0x09, 0x19, 0x01, 0x29, 0x13, 0x81,
++ 0x02, 0x75, 0x01, 0x95, 0x0d, 0x06, 0x00, 0xff,
++ 0x81, 0x03, 0x15, 0x00, 0x26, 0xff, 0x00, 0x05,
++ 0x01, 0x09, 0x01, 0xa1, 0x00, 0x75, 0x08, 0x95,
++ 0x04, 0x35, 0x00, 0x46, 0xff, 0x00, 0x09, 0x30,
++ 0x09, 0x31, 0x09, 0x32, 0x09, 0x35, 0x81, 0x02,
++ 0xc0, 0x05, 0x01, 0x95, 0x13, 0x09, 0x01, 0x81,
++ 0x02, 0x95, 0x0c, 0x81, 0x01, 0x75, 0x10, 0x95,
++ 0x04, 0x26, 0xff, 0x03, 0x46, 0xff, 0x03, 0x09,
++ 0x01, 0x81, 0x02, 0xc0, 0xa1, 0x02, 0x85, 0x02,
++ 0x75, 0x08, 0x95, 0x30, 0x09, 0x01, 0xb1, 0x02,
++ 0xc0, 0xa1, 0x02, 0x85, 0xee, 0x75, 0x08, 0x95,
++ 0x30, 0x09, 0x01, 0xb1, 0x02, 0xc0, 0xa1, 0x02,
++ 0x85, 0xef, 0x75, 0x08, 0x95, 0x30, 0x09, 0x01,
++ 0xb1, 0x02, 0xc0, 0xc0,
++};
++
++/* The default descriptor doesn't provide mapping for the accelerometers
++ * or orientation sensors. This fixed descriptor maps the accelerometers
++ * to usage values 0x40, 0x41 and 0x42 and maps the orientation sensors
++ * to usage values 0x43, 0x44 and 0x45.
++ */
++static u8 dualshock4_usb_rdesc[] = {
++ 0x05, 0x01, /* Usage Page (Desktop), */
++ 0x09, 0x05, /* Usage (Gamepad), */
++ 0xA1, 0x01, /* Collection (Application), */
++ 0x85, 0x01, /* Report ID (1), */
++ 0x09, 0x30, /* Usage (X), */
++ 0x09, 0x31, /* Usage (Y), */
++ 0x09, 0x32, /* Usage (Z), */
++ 0x09, 0x35, /* Usage (Rz), */
++ 0x15, 0x00, /* Logical Minimum (0), */
++ 0x26, 0xFF, 0x00, /* Logical Maximum (255), */
++ 0x75, 0x08, /* Report Size (8), */
++ 0x95, 0x04, /* Report Count (4), */
++ 0x81, 0x02, /* Input (Variable), */
++ 0x09, 0x39, /* Usage (Hat Switch), */
++ 0x15, 0x00, /* Logical Minimum (0), */
++ 0x25, 0x07, /* Logical Maximum (7), */
++ 0x35, 0x00, /* Physical Minimum (0), */
++ 0x46, 0x3B, 0x01, /* Physical Maximum (315), */
++ 0x65, 0x14, /* Unit (Degrees), */
++ 0x75, 0x04, /* Report Size (4), */
++ 0x95, 0x01, /* Report Count (1), */
++ 0x81, 0x42, /* Input (Variable, Null State), */
++ 0x65, 0x00, /* Unit, */
++ 0x05, 0x09, /* Usage Page (Button), */
++ 0x19, 0x01, /* Usage Minimum (01h), */
++ 0x29, 0x0E, /* Usage Maximum (0Eh), */
++ 0x15, 0x00, /* Logical Minimum (0), */
++ 0x25, 0x01, /* Logical Maximum (1), */
++ 0x75, 0x01, /* Report Size (1), */
++ 0x95, 0x0E, /* Report Count (14), */
++ 0x81, 0x02, /* Input (Variable), */
++ 0x06, 0x00, 0xFF, /* Usage Page (FF00h), */
++ 0x09, 0x20, /* Usage (20h), */
++ 0x75, 0x06, /* Report Size (6), */
++ 0x95, 0x01, /* Report Count (1), */
++ 0x15, 0x00, /* Logical Minimum (0), */
++ 0x25, 0x7F, /* Logical Maximum (127), */
++ 0x81, 0x02, /* Input (Variable), */
++ 0x05, 0x01, /* Usage Page (Desktop), */
++ 0x09, 0x33, /* Usage (Rx), */
++ 0x09, 0x34, /* Usage (Ry), */
++ 0x15, 0x00, /* Logical Minimum (0), */
++ 0x26, 0xFF, 0x00, /* Logical Maximum (255), */
++ 0x75, 0x08, /* Report Size (8), */
++ 0x95, 0x02, /* Report Count (2), */
++ 0x81, 0x02, /* Input (Variable), */
++ 0x06, 0x00, 0xFF, /* Usage Page (FF00h), */
++ 0x09, 0x21, /* Usage (21h), */
++ 0x95, 0x03, /* Report Count (3), */
++ 0x81, 0x02, /* Input (Variable), */
++ 0x05, 0x01, /* Usage Page (Desktop), */
++ 0x19, 0x40, /* Usage Minimum (40h), */
++ 0x29, 0x42, /* Usage Maximum (42h), */
++ 0x16, 0x00, 0x80, /* Logical Minimum (-32768), */
++ 0x26, 0x00, 0x7F, /* Logical Maximum (32767), */
++ 0x75, 0x10, /* Report Size (16), */
++ 0x95, 0x03, /* Report Count (3), */
++ 0x81, 0x02, /* Input (Variable), */
++ 0x19, 0x43, /* Usage Minimum (43h), */
++ 0x29, 0x45, /* Usage Maximum (45h), */
++ 0x16, 0xFF, 0xBF, /* Logical Minimum (-16385), */
++ 0x26, 0x00, 0x40, /* Logical Maximum (16384), */
++ 0x95, 0x03, /* Report Count (3), */
++ 0x81, 0x02, /* Input (Variable), */
++ 0x06, 0x00, 0xFF, /* Usage Page (FF00h), */
++ 0x09, 0x21, /* Usage (21h), */
++ 0x15, 0x00, /* Logical Minimum (0), */
++ 0x25, 0xFF, /* Logical Maximum (255), */
++ 0x75, 0x08, /* Report Size (8), */
++ 0x95, 0x27, /* Report Count (39), */
++ 0x81, 0x02, /* Input (Variable), */
++ 0x85, 0x05, /* Report ID (5), */
++ 0x09, 0x22, /* Usage (22h), */
++ 0x95, 0x1F, /* Report Count (31), */
++ 0x91, 0x02, /* Output (Variable), */
++ 0x85, 0x04, /* Report ID (4), */
++ 0x09, 0x23, /* Usage (23h), */
++ 0x95, 0x24, /* Report Count (36), */
++ 0xB1, 0x02, /* Feature (Variable), */
++ 0x85, 0x02, /* Report ID (2), */
++ 0x09, 0x24, /* Usage (24h), */
++ 0x95, 0x24, /* Report Count (36), */
++ 0xB1, 0x02, /* Feature (Variable), */
++ 0x85, 0x08, /* Report ID (8), */
++ 0x09, 0x25, /* Usage (25h), */
++ 0x95, 0x03, /* Report Count (3), */
++ 0xB1, 0x02, /* Feature (Variable), */
++ 0x85, 0x10, /* Report ID (16), */
++ 0x09, 0x26, /* Usage (26h), */
++ 0x95, 0x04, /* Report Count (4), */
++ 0xB1, 0x02, /* Feature (Variable), */
++ 0x85, 0x11, /* Report ID (17), */
++ 0x09, 0x27, /* Usage (27h), */
++ 0x95, 0x02, /* Report Count (2), */
++ 0xB1, 0x02, /* Feature (Variable), */
++ 0x85, 0x12, /* Report ID (18), */
++ 0x06, 0x02, 0xFF, /* Usage Page (FF02h), */
++ 0x09, 0x21, /* Usage (21h), */
++ 0x95, 0x0F, /* Report Count (15), */
++ 0xB1, 0x02, /* Feature (Variable), */
++ 0x85, 0x13, /* Report ID (19), */
++ 0x09, 0x22, /* Usage (22h), */
++ 0x95, 0x16, /* Report Count (22), */
++ 0xB1, 0x02, /* Feature (Variable), */
++ 0x85, 0x14, /* Report ID (20), */
++ 0x06, 0x05, 0xFF, /* Usage Page (FF05h), */
++ 0x09, 0x20, /* Usage (20h), */
++ 0x95, 0x10, /* Report Count (16), */
++ 0xB1, 0x02, /* Feature (Variable), */
++ 0x85, 0x15, /* Report ID (21), */
++ 0x09, 0x21, /* Usage (21h), */
++ 0x95, 0x2C, /* Report Count (44), */
++ 0xB1, 0x02, /* Feature (Variable), */
++ 0x06, 0x80, 0xFF, /* Usage Page (FF80h), */
++ 0x85, 0x80, /* Report ID (128), */
++ 0x09, 0x20, /* Usage (20h), */
++ 0x95, 0x06, /* Report Count (6), */
++ 0xB1, 0x02, /* Feature (Variable), */
++ 0x85, 0x81, /* Report ID (129), */
++ 0x09, 0x21, /* Usage (21h), */
++ 0x95, 0x06, /* Report Count (6), */
++ 0xB1, 0x02, /* Feature (Variable), */
++ 0x85, 0x82, /* Report ID (130), */
++ 0x09, 0x22, /* Usage (22h), */
++ 0x95, 0x05, /* Report Count (5), */
++ 0xB1, 0x02, /* Feature (Variable), */
++ 0x85, 0x83, /* Report ID (131), */
++ 0x09, 0x23, /* Usage (23h), */
++ 0x95, 0x01, /* Report Count (1), */
++ 0xB1, 0x02, /* Feature (Variable), */
++ 0x85, 0x84, /* Report ID (132), */
++ 0x09, 0x24, /* Usage (24h), */
++ 0x95, 0x04, /* Report Count (4), */
++ 0xB1, 0x02, /* Feature (Variable), */
++ 0x85, 0x85, /* Report ID (133), */
++ 0x09, 0x25, /* Usage (25h), */
++ 0x95, 0x06, /* Report Count (6), */
++ 0xB1, 0x02, /* Feature (Variable), */
++ 0x85, 0x86, /* Report ID (134), */
++ 0x09, 0x26, /* Usage (26h), */
++ 0x95, 0x06, /* Report Count (6), */
++ 0xB1, 0x02, /* Feature (Variable), */
++ 0x85, 0x87, /* Report ID (135), */
++ 0x09, 0x27, /* Usage (27h), */
++ 0x95, 0x23, /* Report Count (35), */
++ 0xB1, 0x02, /* Feature (Variable), */
++ 0x85, 0x88, /* Report ID (136), */
++ 0x09, 0x28, /* Usage (28h), */
++ 0x95, 0x22, /* Report Count (34), */
++ 0xB1, 0x02, /* Feature (Variable), */
++ 0x85, 0x89, /* Report ID (137), */
++ 0x09, 0x29, /* Usage (29h), */
++ 0x95, 0x02, /* Report Count (2), */
++ 0xB1, 0x02, /* Feature (Variable), */
++ 0x85, 0x90, /* Report ID (144), */
++ 0x09, 0x30, /* Usage (30h), */
++ 0x95, 0x05, /* Report Count (5), */
++ 0xB1, 0x02, /* Feature (Variable), */
++ 0x85, 0x91, /* Report ID (145), */
++ 0x09, 0x31, /* Usage (31h), */
++ 0x95, 0x03, /* Report Count (3), */
++ 0xB1, 0x02, /* Feature (Variable), */
++ 0x85, 0x92, /* Report ID (146), */
++ 0x09, 0x32, /* Usage (32h), */
++ 0x95, 0x03, /* Report Count (3), */
++ 0xB1, 0x02, /* Feature (Variable), */
++ 0x85, 0x93, /* Report ID (147), */
++ 0x09, 0x33, /* Usage (33h), */
++ 0x95, 0x0C, /* Report Count (12), */
++ 0xB1, 0x02, /* Feature (Variable), */
++ 0x85, 0xA0, /* Report ID (160), */
++ 0x09, 0x40, /* Usage (40h), */
++ 0x95, 0x06, /* Report Count (6), */
++ 0xB1, 0x02, /* Feature (Variable), */
++ 0x85, 0xA1, /* Report ID (161), */
++ 0x09, 0x41, /* Usage (41h), */
++ 0x95, 0x01, /* Report Count (1), */
++ 0xB1, 0x02, /* Feature (Variable), */
++ 0x85, 0xA2, /* Report ID (162), */
++ 0x09, 0x42, /* Usage (42h), */
++ 0x95, 0x01, /* Report Count (1), */
++ 0xB1, 0x02, /* Feature (Variable), */
++ 0x85, 0xA3, /* Report ID (163), */
++ 0x09, 0x43, /* Usage (43h), */
++ 0x95, 0x30, /* Report Count (48), */
++ 0xB1, 0x02, /* Feature (Variable), */
++ 0x85, 0xA4, /* Report ID (164), */
++ 0x09, 0x44, /* Usage (44h), */
++ 0x95, 0x0D, /* Report Count (13), */
++ 0xB1, 0x02, /* Feature (Variable), */
++ 0x85, 0xA5, /* Report ID (165), */
++ 0x09, 0x45, /* Usage (45h), */
++ 0x95, 0x15, /* Report Count (21), */
++ 0xB1, 0x02, /* Feature (Variable), */
++ 0x85, 0xA6, /* Report ID (166), */
++ 0x09, 0x46, /* Usage (46h), */
++ 0x95, 0x15, /* Report Count (21), */
++ 0xB1, 0x02, /* Feature (Variable), */
++ 0x85, 0xF0, /* Report ID (240), */
++ 0x09, 0x47, /* Usage (47h), */
++ 0x95, 0x3F, /* Report Count (63), */
++ 0xB1, 0x02, /* Feature (Variable), */
++ 0x85, 0xF1, /* Report ID (241), */
++ 0x09, 0x48, /* Usage (48h), */
++ 0x95, 0x3F, /* Report Count (63), */
++ 0xB1, 0x02, /* Feature (Variable), */
++ 0x85, 0xF2, /* Report ID (242), */
++ 0x09, 0x49, /* Usage (49h), */
++ 0x95, 0x0F, /* Report Count (15), */
++ 0xB1, 0x02, /* Feature (Variable), */
++ 0x85, 0xA7, /* Report ID (167), */
++ 0x09, 0x4A, /* Usage (4Ah), */
++ 0x95, 0x01, /* Report Count (1), */
++ 0xB1, 0x02, /* Feature (Variable), */
++ 0x85, 0xA8, /* Report ID (168), */
++ 0x09, 0x4B, /* Usage (4Bh), */
++ 0x95, 0x01, /* Report Count (1), */
++ 0xB1, 0x02, /* Feature (Variable), */
++ 0x85, 0xA9, /* Report ID (169), */
++ 0x09, 0x4C, /* Usage (4Ch), */
++ 0x95, 0x08, /* Report Count (8), */
++ 0xB1, 0x02, /* Feature (Variable), */
++ 0x85, 0xAA, /* Report ID (170), */
++ 0x09, 0x4E, /* Usage (4Eh), */
++ 0x95, 0x01, /* Report Count (1), */
++ 0xB1, 0x02, /* Feature (Variable), */
++ 0x85, 0xAB, /* Report ID (171), */
++ 0x09, 0x4F, /* Usage (4Fh), */
++ 0x95, 0x39, /* Report Count (57), */
++ 0xB1, 0x02, /* Feature (Variable), */
++ 0x85, 0xAC, /* Report ID (172), */
++ 0x09, 0x50, /* Usage (50h), */
++ 0x95, 0x39, /* Report Count (57), */
++ 0xB1, 0x02, /* Feature (Variable), */
++ 0x85, 0xAD, /* Report ID (173), */
++ 0x09, 0x51, /* Usage (51h), */
++ 0x95, 0x0B, /* Report Count (11), */
++ 0xB1, 0x02, /* Feature (Variable), */
++ 0x85, 0xAE, /* Report ID (174), */
++ 0x09, 0x52, /* Usage (52h), */
++ 0x95, 0x01, /* Report Count (1), */
++ 0xB1, 0x02, /* Feature (Variable), */
++ 0x85, 0xAF, /* Report ID (175), */
++ 0x09, 0x53, /* Usage (53h), */
++ 0x95, 0x02, /* Report Count (2), */
++ 0xB1, 0x02, /* Feature (Variable), */
++ 0x85, 0xB0, /* Report ID (176), */
++ 0x09, 0x54, /* Usage (54h), */
++ 0x95, 0x3F, /* Report Count (63), */
++ 0xB1, 0x02, /* Feature (Variable), */
++ 0xC0 /* End Collection */
++};
++
++static __u8 ps3remote_rdesc[] = {
++ 0x05, 0x01, /* GUsagePage Generic Desktop */
++ 0x09, 0x05, /* LUsage 0x05 [Game Pad] */
++ 0xA1, 0x01, /* MCollection Application (mouse, keyboard) */
++
++ /* Use collection 1 for joypad buttons */
++ 0xA1, 0x02, /* MCollection Logical (interrelated data) */
++
++ /* Ignore the 1st byte, maybe it is used for a controller
++ * number but it's not needed for correct operation */
++ 0x75, 0x08, /* GReportSize 0x08 [8] */
++ 0x95, 0x01, /* GReportCount 0x01 [1] */
++ 0x81, 0x01, /* MInput 0x01 (Const[0] Arr[1] Abs[2]) */
++
++ /* Bytes from 2nd to 4th are a bitmap for joypad buttons, for these
++ * buttons multiple keypresses are allowed */
++ 0x05, 0x09, /* GUsagePage Button */
++ 0x19, 0x01, /* LUsageMinimum 0x01 [Button 1 (primary/trigger)] */
++ 0x29, 0x18, /* LUsageMaximum 0x18 [Button 24] */
++ 0x14, /* GLogicalMinimum [0] */
++ 0x25, 0x01, /* GLogicalMaximum 0x01 [1] */
++ 0x75, 0x01, /* GReportSize 0x01 [1] */
++ 0x95, 0x18, /* GReportCount 0x18 [24] */
++ 0x81, 0x02, /* MInput 0x02 (Data[0] Var[1] Abs[2]) */
++
++ 0xC0, /* MEndCollection */
++
++ /* Use collection 2 for remote control buttons */
++ 0xA1, 0x02, /* MCollection Logical (interrelated data) */
++
++ /* 5th byte is used for remote control buttons */
++ 0x05, 0x09, /* GUsagePage Button */
++ 0x18, /* LUsageMinimum [No button pressed] */
++ 0x29, 0xFE, /* LUsageMaximum 0xFE [Button 254] */
++ 0x14, /* GLogicalMinimum [0] */
++ 0x26, 0xFE, 0x00, /* GLogicalMaximum 0x00FE [254] */
++ 0x75, 0x08, /* GReportSize 0x08 [8] */
++ 0x95, 0x01, /* GReportCount 0x01 [1] */
++ 0x80, /* MInput */
++
++ /* Ignore bytes from 6th to 11th, 6th to 10th are always constant at
++ * 0xff and 11th is for press indication */
++ 0x75, 0x08, /* GReportSize 0x08 [8] */
++ 0x95, 0x06, /* GReportCount 0x06 [6] */
++ 0x81, 0x01, /* MInput 0x01 (Const[0] Arr[1] Abs[2]) */
++
++ /* 12th byte is for battery strength */
++ 0x05, 0x06, /* GUsagePage Generic Device Controls */
++ 0x09, 0x20, /* LUsage 0x20 [Battery Strength] */
++ 0x14, /* GLogicalMinimum [0] */
++ 0x25, 0x05, /* GLogicalMaximum 0x05 [5] */
++ 0x75, 0x08, /* GReportSize 0x08 [8] */
++ 0x95, 0x01, /* GReportCount 0x01 [1] */
++ 0x81, 0x02, /* MInput 0x02 (Data[0] Var[1] Abs[2]) */
++
++ 0xC0, /* MEndCollection */
++
++ 0xC0 /* MEndCollection [Game Pad] */
++};
++
++static const unsigned int ps3remote_keymap_joypad_buttons[] = {
++ [0x01] = KEY_SELECT,
++ [0x02] = BTN_THUMBL, /* L3 */
++ [0x03] = BTN_THUMBR, /* R3 */
++ [0x04] = BTN_START,
++ [0x05] = KEY_UP,
++ [0x06] = KEY_RIGHT,
++ [0x07] = KEY_DOWN,
++ [0x08] = KEY_LEFT,
++ [0x09] = BTN_TL2, /* L2 */
++ [0x0a] = BTN_TR2, /* R2 */
++ [0x0b] = BTN_TL, /* L1 */
++ [0x0c] = BTN_TR, /* R1 */
++ [0x0d] = KEY_OPTION, /* options/triangle */
++ [0x0e] = KEY_BACK, /* back/circle */
++ [0x0f] = BTN_0, /* cross */
++ [0x10] = KEY_SCREEN, /* view/square */
++ [0x11] = KEY_HOMEPAGE, /* PS button */
++ [0x14] = KEY_ENTER,
++};
++static const unsigned int ps3remote_keymap_remote_buttons[] = {
++ [0x00] = KEY_1,
++ [0x01] = KEY_2,
++ [0x02] = KEY_3,
++ [0x03] = KEY_4,
++ [0x04] = KEY_5,
++ [0x05] = KEY_6,
++ [0x06] = KEY_7,
++ [0x07] = KEY_8,
++ [0x08] = KEY_9,
++ [0x09] = KEY_0,
++ [0x0e] = KEY_ESC, /* return */
++ [0x0f] = KEY_CLEAR,
++ [0x16] = KEY_EJECTCD,
++ [0x1a] = KEY_MENU, /* top menu */
++ [0x28] = KEY_TIME,
++ [0x30] = KEY_PREVIOUS,
++ [0x31] = KEY_NEXT,
++ [0x32] = KEY_PLAY,
++ [0x33] = KEY_REWIND, /* scan back */
++ [0x34] = KEY_FORWARD, /* scan forward */
++ [0x38] = KEY_STOP,
++ [0x39] = KEY_PAUSE,
++ [0x40] = KEY_CONTEXT_MENU, /* pop up/menu */
++ [0x60] = KEY_FRAMEBACK, /* slow/step back */
++ [0x61] = KEY_FRAMEFORWARD, /* slow/step forward */
++ [0x63] = KEY_SUBTITLE,
++ [0x64] = KEY_AUDIO,
++ [0x65] = KEY_ANGLE,
++ [0x70] = KEY_INFO, /* display */
++ [0x80] = KEY_BLUE,
++ [0x81] = KEY_RED,
++ [0x82] = KEY_GREEN,
++ [0x83] = KEY_YELLOW,
++};
++
++static const unsigned int buzz_keymap[] = {
++ /* The controller has 4 remote buzzers, each with one LED and 5
++ * buttons.
++ *
++ * We use the mapping chosen by the controller, which is:
++ *
++ * Key Offset
++ * -------------------
++ * Buzz 1
++ * Blue 5
++ * Orange 4
++ * Green 3
++ * Yellow 2
++ *
++ * So, for example, the orange button on the third buzzer is mapped to
++ * BTN_TRIGGER_HAPPY14
++ */
++ [ 1] = BTN_TRIGGER_HAPPY1,
++ [ 2] = BTN_TRIGGER_HAPPY2,
++ [ 3] = BTN_TRIGGER_HAPPY3,
++ [ 4] = BTN_TRIGGER_HAPPY4,
++ [ 5] = BTN_TRIGGER_HAPPY5,
++ [ 6] = BTN_TRIGGER_HAPPY6,
++ [ 7] = BTN_TRIGGER_HAPPY7,
++ [ 8] = BTN_TRIGGER_HAPPY8,
++ [ 9] = BTN_TRIGGER_HAPPY9,
++ [10] = BTN_TRIGGER_HAPPY10,
++ [11] = BTN_TRIGGER_HAPPY11,
++ [12] = BTN_TRIGGER_HAPPY12,
++ [13] = BTN_TRIGGER_HAPPY13,
++ [14] = BTN_TRIGGER_HAPPY14,
++ [15] = BTN_TRIGGER_HAPPY15,
++ [16] = BTN_TRIGGER_HAPPY16,
++ [17] = BTN_TRIGGER_HAPPY17,
++ [18] = BTN_TRIGGER_HAPPY18,
++ [19] = BTN_TRIGGER_HAPPY19,
++ [20] = BTN_TRIGGER_HAPPY20,
++};
++
++struct sony_sc {
++ struct hid_device *hdev;
++ struct led_classdev *leds[MAX_LEDS];
++ struct hid_report *output_report;
++ unsigned long quirks;
++ struct work_struct state_worker;
++
++#ifdef CONFIG_SONY_FF
++ __u8 left;
++ __u8 right;
++#endif
++
++ __u8 worker_initialized;
++ __u8 led_state[MAX_LEDS];
++ __u8 led_count;
++};
++
++static __u8 *ps3remote_fixup(struct hid_device *hdev, __u8 *rdesc,
++ unsigned int *rsize)
++{
++ *rsize = sizeof(ps3remote_rdesc);
++ return ps3remote_rdesc;
++}
++
++static int ps3remote_mapping(struct hid_device *hdev, struct hid_input *hi,
++ struct hid_field *field, struct hid_usage *usage,
++ unsigned long **bit, int *max)
++{
++ unsigned int key = usage->hid & HID_USAGE;
++
++ if ((usage->hid & HID_USAGE_PAGE) != HID_UP_BUTTON)
++ return -1;
++
++ switch (usage->collection_index) {
++ case 1:
++ if (key >= ARRAY_SIZE(ps3remote_keymap_joypad_buttons))
++ return -1;
++
++ key = ps3remote_keymap_joypad_buttons[key];
++ if (!key)
++ return -1;
++ break;
++ case 2:
++ if (key >= ARRAY_SIZE(ps3remote_keymap_remote_buttons))
++ return -1;
++
++ key = ps3remote_keymap_remote_buttons[key];
++ if (!key)
++ return -1;
++ break;
++ default:
++ return -1;
++ }
++
++ hid_map_usage_clear(hi, usage, bit, max, EV_KEY, key);
++ return 1;
++}
++
++static int ps3remote_setup_repeat(struct hid_device *hdev)
++{
++ struct hid_input *hidinput = list_first_entry(&hdev->inputs,
++ struct hid_input, list);
++ struct input_dev *input = hidinput->input;
++
++ /*
++ * Set up autorepeat defaults per the remote control subsystem;
++ * this must be done after hid_hw_start(), as having these non-zero
++ * at the time of input_register_device() tells the input system that
++ * the hardware does the autorepeat, and the PS3 remote does not.
++ */
++ set_bit(EV_REP, input->evbit);
++ input->rep[REP_DELAY] = 500;
++ input->rep[REP_PERIOD] = 125;
++
++ return 0;
++}
++
++/* Sony Vaio VGX has wrongly mouse pointer declared as constant */
++static __u8 *sony_report_fixup(struct hid_device *hdev, __u8 *rdesc,
++ unsigned int *rsize)
++{
++ struct sony_sc *sc = hid_get_drvdata(hdev);
++
++ /*
++ * Some Sony RF receivers wrongly declare the mouse pointer as a
++ * a constant non-data variable.
++ */
++ if ((sc->quirks & VAIO_RDESC_CONSTANT) && *rsize >= 56 &&
++ /* usage page: generic desktop controls */
++ /* rdesc[0] == 0x05 && rdesc[1] == 0x01 && */
++ /* usage: mouse */
++ rdesc[2] == 0x09 && rdesc[3] == 0x02 &&
++ /* input (usage page for x,y axes): constant, variable, relative */
++ rdesc[54] == 0x81 && rdesc[55] == 0x07) {
++ hid_info(hdev, "Fixing up Sony RF Receiver report descriptor\n");
++ /* input: data, variable, relative */
++ rdesc[55] = 0x06;
++ }
++
++ /*
++ * The default Dualshock 4 USB descriptor doesn't assign
++ * the gyroscope values to corresponding axes so we need a
++ * modified one.
++ */
++ if ((sc->quirks & DUALSHOCK4_CONTROLLER_USB) && *rsize == 467) {
++ hid_info(hdev, "Using modified Dualshock 4 report descriptor with gyroscope axes\n");
++ rdesc = dualshock4_usb_rdesc;
++ *rsize = sizeof(dualshock4_usb_rdesc);
++ }
++
++ /* The HID descriptor exposed over BT has a trailing zero byte */
++ if ((((sc->quirks & SIXAXIS_CONTROLLER_USB) && *rsize == 148) ||
++ ((sc->quirks & SIXAXIS_CONTROLLER_BT) && *rsize == 149)) &&
++ rdesc[83] == 0x75) {
++ hid_info(hdev, "Fixing up Sony Sixaxis report descriptor\n");
++ memcpy((void *)&rdesc[83], (void *)&sixaxis_rdesc_fixup,
++ sizeof(sixaxis_rdesc_fixup));
++ } else if (sc->quirks & SIXAXIS_CONTROLLER_USB &&
++ *rsize > sizeof(sixaxis_rdesc_fixup2)) {
++ hid_info(hdev, "Sony Sixaxis clone detected. Using original report descriptor (size: %d clone; %d new)\n",
++ *rsize, (int)sizeof(sixaxis_rdesc_fixup2));
++ *rsize = sizeof(sixaxis_rdesc_fixup2);
++ memcpy(rdesc, &sixaxis_rdesc_fixup2, *rsize);
++ }
++
++ if (sc->quirks & PS3REMOTE)
++ return ps3remote_fixup(hdev, rdesc, rsize);
++
++ return rdesc;
++}
++
++static int sony_raw_event(struct hid_device *hdev, struct hid_report *report,
++ __u8 *rd, int size)
++{
++ struct sony_sc *sc = hid_get_drvdata(hdev);
++
++ /* Sixaxis HID report has acclerometers/gyro with MSByte first, this
++ * has to be BYTE_SWAPPED before passing up to joystick interface
++ */
++ if ((sc->quirks & (SIXAXIS_CONTROLLER_USB | SIXAXIS_CONTROLLER_BT)) &&
++ rd[0] == 0x01 && size == 49) {
++ swap(rd[41], rd[42]);
++ swap(rd[43], rd[44]);
++ swap(rd[45], rd[46]);
++ swap(rd[47], rd[48]);
++ }
++
++ return 0;
++}
++
++static int sony_mapping(struct hid_device *hdev, struct hid_input *hi,
++ struct hid_field *field, struct hid_usage *usage,
++ unsigned long **bit, int *max)
++{
++ struct sony_sc *sc = hid_get_drvdata(hdev);
++
++ if (sc->quirks & BUZZ_CONTROLLER) {
++ unsigned int key = usage->hid & HID_USAGE;
++
++ if ((usage->hid & HID_USAGE_PAGE) != HID_UP_BUTTON)
++ return -1;
++
++ switch (usage->collection_index) {
++ case 1:
++ if (key >= ARRAY_SIZE(buzz_keymap))
++ return -1;
++
++ key = buzz_keymap[key];
++ if (!key)
++ return -1;
++ break;
++ default:
++ return -1;
++ }
++
++ hid_map_usage_clear(hi, usage, bit, max, EV_KEY, key);
++ return 1;
++ }
++
++ if (sc->quirks & PS3REMOTE)
++ return ps3remote_mapping(hdev, hi, field, usage, bit, max);
++
++ /* Let hid-core decide for the others */
++ return 0;
++}
++
++/*
++ * The Sony Sixaxis does not handle HID Output Reports on the Interrupt EP
++ * like it should according to usbhid/hid-core.c::usbhid_output_raw_report()
++ * so we need to override that forcing HID Output Reports on the Control EP.
++ *
++ * There is also another issue about HID Output Reports via USB, the Sixaxis
++ * does not want the report_id as part of the data packet, so we have to
++ * discard buf[0] when sending the actual control message, even for numbered
++ * reports, humpf!
++ */
++static int sixaxis_usb_output_raw_report(struct hid_device *hid, __u8 *buf,
++ size_t count, unsigned char report_type)
++{
++ struct usb_interface *intf = to_usb_interface(hid->dev.parent);
++ struct usb_device *dev = interface_to_usbdev(intf);
++ struct usb_host_interface *interface = intf->cur_altsetting;
++ int report_id = buf[0];
++ int ret;
++
++ if (report_type == HID_OUTPUT_REPORT) {
++ /* Don't send the Report ID */
++ buf++;
++ count--;
++ }
++
++ ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
++ HID_REQ_SET_REPORT,
++ USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
++ ((report_type + 1) << 8) | report_id,
++ interface->desc.bInterfaceNumber, buf, count,
++ USB_CTRL_SET_TIMEOUT);
++
++ /* Count also the Report ID, in case of an Output report. */
++ if (ret > 0 && report_type == HID_OUTPUT_REPORT)
++ ret++;
++
++ return ret;
++}
++
++/*
++ * Sending HID_REQ_GET_REPORT changes the operation mode of the ps3 controller
++ * to "operational". Without this, the ps3 controller will not report any
++ * events.
++ */
++static int sixaxis_set_operational_usb(struct hid_device *hdev)
++{
++ int ret;
++ char *buf = kmalloc(18, GFP_KERNEL);
++
++ if (!buf)
++ return -ENOMEM;
++
++ ret = hdev->hid_get_raw_report(hdev, 0xf2, buf, 17, HID_FEATURE_REPORT);
++
++ if (ret < 0)
++ hid_err(hdev, "can't set operational mode\n");
++
++ kfree(buf);
++
++ return ret;
++}
++
++static int sixaxis_set_operational_bt(struct hid_device *hdev)
++{
++ unsigned char buf[] = { 0xf4, 0x42, 0x03, 0x00, 0x00 };
++ return hdev->hid_output_raw_report(hdev, buf, sizeof(buf), HID_FEATURE_REPORT);
++}
++
++static void buzz_set_leds(struct hid_device *hdev, const __u8 *leds)
++{
++ struct list_head *report_list =
++ &hdev->report_enum[HID_OUTPUT_REPORT].report_list;
++ struct hid_report *report = list_entry(report_list->next,
++ struct hid_report, list);
++ __s32 *value = report->field[0]->value;
++
++ value[0] = 0x00;
++ value[1] = leds[0] ? 0xff : 0x00;
++ value[2] = leds[1] ? 0xff : 0x00;
++ value[3] = leds[2] ? 0xff : 0x00;
++ value[4] = leds[3] ? 0xff : 0x00;
++ value[5] = 0x00;
++ value[6] = 0x00;
++ hid_hw_request(hdev, report, HID_REQ_SET_REPORT);
++}
++
++static void sony_set_leds(struct hid_device *hdev, const __u8 *leds, int count)
++{
++ struct sony_sc *drv_data = hid_get_drvdata(hdev);
++ int n;
++
++ BUG_ON(count > MAX_LEDS);
++
++ if (drv_data->quirks & BUZZ_CONTROLLER && count == 4) {
++ buzz_set_leds(hdev, leds);
++ } else if ((drv_data->quirks & SIXAXIS_CONTROLLER_USB) ||
++ (drv_data->quirks & DUALSHOCK4_CONTROLLER_USB)) {
++ for (n = 0; n < count; n++)
++ drv_data->led_state[n] = leds[n];
++ schedule_work(&drv_data->state_worker);
++ }
++}
++
++static void sony_led_set_brightness(struct led_classdev *led,
++ enum led_brightness value)
++{
++ struct device *dev = led->dev->parent;
++ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
++ struct sony_sc *drv_data;
++
++ int n;
++
++ drv_data = hid_get_drvdata(hdev);
++ if (!drv_data) {
++ hid_err(hdev, "No device data\n");
++ return;
++ }
++
++ for (n = 0; n < drv_data->led_count; n++) {
++ if (led == drv_data->leds[n]) {
++ if (value != drv_data->led_state[n]) {
++ drv_data->led_state[n] = value;
++ sony_set_leds(hdev, drv_data->led_state, drv_data->led_count);
++ }
++ break;
++ }
++ }
++}
++
++static enum led_brightness sony_led_get_brightness(struct led_classdev *led)
++{
++ struct device *dev = led->dev->parent;
++ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
++ struct sony_sc *drv_data;
++
++ int n;
++ int on = 0;
++
++ drv_data = hid_get_drvdata(hdev);
++ if (!drv_data) {
++ hid_err(hdev, "No device data\n");
++ return LED_OFF;
++ }
++
++ for (n = 0; n < drv_data->led_count; n++) {
++ if (led == drv_data->leds[n]) {
++ on = !!(drv_data->led_state[n]);
++ break;
++ }
++ }
++
++ return on ? LED_FULL : LED_OFF;
++}
++
++static void sony_leds_remove(struct hid_device *hdev)
++{
++ struct sony_sc *drv_data;
++ struct led_classdev *led;
++ int n;
++
++ drv_data = hid_get_drvdata(hdev);
++ BUG_ON(!(drv_data->quirks & SONY_LED_SUPPORT));
++
++ for (n = 0; n < drv_data->led_count; n++) {
++ led = drv_data->leds[n];
++ drv_data->leds[n] = NULL;
++ if (!led)
++ continue;
++ led_classdev_unregister(led);
++ kfree(led);
++ }
++
++ drv_data->led_count = 0;
++}
++
++static int sony_leds_init(struct hid_device *hdev)
++{
++ struct sony_sc *drv_data;
++ int n, ret = 0;
++ int max_brightness;
++ int use_colors;
++ struct led_classdev *led;
++ size_t name_sz;
++ char *name;
++ size_t name_len;
++ const char *name_fmt;
++ static const char * const color_str[] = { "red", "green", "blue" };
++ static const __u8 initial_values[MAX_LEDS] = { 0x00, 0x00, 0x00, 0x00 };
++
++ drv_data = hid_get_drvdata(hdev);
++ BUG_ON(!(drv_data->quirks & SONY_LED_SUPPORT));
++
++ if (drv_data->quirks & BUZZ_CONTROLLER) {
++ drv_data->led_count = 4;
++ max_brightness = 1;
++ use_colors = 0;
++ name_len = strlen("::buzz#");
++ name_fmt = "%s::buzz%d";
++ /* Validate expected report characteristics. */
++ if (!hid_validate_values(hdev, HID_OUTPUT_REPORT, 0, 0, 7))
++ return -ENODEV;
++ } else if (drv_data->quirks & DUALSHOCK4_CONTROLLER_USB) {
++ drv_data->led_count = 3;
++ max_brightness = 255;
++ use_colors = 1;
++ name_len = 0;
++ name_fmt = "%s:%s";
++ } else {
++ drv_data->led_count = 4;
++ max_brightness = 1;
++ use_colors = 0;
++ name_len = strlen("::sony#");
++ name_fmt = "%s::sony%d";
++ }
++
++ /* Clear LEDs as we have no way of reading their initial state. This is
++ * only relevant if the driver is loaded after somebody actively set the
++ * LEDs to on */
++ sony_set_leds(hdev, initial_values, drv_data->led_count);
++
++ name_sz = strlen(dev_name(&hdev->dev)) + name_len + 1;
++
++ for (n = 0; n < drv_data->led_count; n++) {
++
++ if (use_colors)
++ name_sz = strlen(dev_name(&hdev->dev)) + strlen(color_str[n]) + 2;
++
++ led = kzalloc(sizeof(struct led_classdev) + name_sz, GFP_KERNEL);
++ if (!led) {
++ hid_err(hdev, "Couldn't allocate memory for LED %d\n", n);
++ ret = -ENOMEM;
++ goto error_leds;
++ }
++
++ name = (void *)(&led[1]);
++ if (use_colors)
++ snprintf(name, name_sz, name_fmt, dev_name(&hdev->dev), color_str[n]);
++ else
++ snprintf(name, name_sz, name_fmt, dev_name(&hdev->dev), n + 1);
++ led->name = name;
++ led->brightness = 0;
++ led->max_brightness = max_brightness;
++ led->brightness_get = sony_led_get_brightness;
++ led->brightness_set = sony_led_set_brightness;
++
++ ret = led_classdev_register(&hdev->dev, led);
++ if (ret) {
++ hid_err(hdev, "Failed to register LED %d\n", n);
++ kfree(led);
++ goto error_leds;
++ }
++
++ drv_data->leds[n] = led;
++ }
++
++ return ret;
++
++error_leds:
++ sony_leds_remove(hdev);
++
++ return ret;
++}
++
++static void sixaxis_state_worker(struct work_struct *work)
++{
++ struct sony_sc *sc = container_of(work, struct sony_sc, state_worker);
++ unsigned char buf[] = {
++ 0x01,
++ 0x00, 0xff, 0x00, 0xff, 0x00,
++ 0x00, 0x00, 0x00, 0x00, 0x00,
++ 0xff, 0x27, 0x10, 0x00, 0x32,
++ 0xff, 0x27, 0x10, 0x00, 0x32,
++ 0xff, 0x27, 0x10, 0x00, 0x32,
++ 0xff, 0x27, 0x10, 0x00, 0x32,
++ 0x00, 0x00, 0x00, 0x00, 0x00
++ };
++
++#ifdef CONFIG_SONY_FF
++ buf[3] = sc->right ? 1 : 0;
++ buf[5] = sc->left;
++#endif
++
++ buf[10] |= sc->led_state[0] << 1;
++ buf[10] |= sc->led_state[1] << 2;
++ buf[10] |= sc->led_state[2] << 3;
++ buf[10] |= sc->led_state[3] << 4;
++
++ sc->hdev->hid_output_raw_report(sc->hdev, buf, sizeof(buf),
++ HID_OUTPUT_REPORT);
++}
++
++static void dualshock4_state_worker(struct work_struct *work)
++{
++ struct sony_sc *sc = container_of(work, struct sony_sc, state_worker);
++ struct hid_device *hdev = sc->hdev;
++ struct hid_report *report = sc->output_report;
++ __s32 *value = report->field[0]->value;
++
++ value[0] = 0x03;
++
++#ifdef CONFIG_SONY_FF
++ value[3] = sc->right;
++ value[4] = sc->left;
++#endif
++
++ value[5] = sc->led_state[0];
++ value[6] = sc->led_state[1];
++ value[7] = sc->led_state[2];
++
++ hid_hw_request(hdev, report, HID_REQ_SET_REPORT);
++}
++
++#ifdef CONFIG_SONY_FF
++static int sony_play_effect(struct input_dev *dev, void *data,
++ struct ff_effect *effect)
++{
++ struct hid_device *hid = input_get_drvdata(dev);
++ struct sony_sc *sc = hid_get_drvdata(hid);
++
++ if (effect->type != FF_RUMBLE)
++ return 0;
++
++ sc->left = effect->u.rumble.strong_magnitude / 256;
++ sc->right = effect->u.rumble.weak_magnitude / 256;
++
++ schedule_work(&sc->state_worker);
++ return 0;
++}
++
++static int sony_init_ff(struct hid_device *hdev)
++{
++ struct hid_input *hidinput = list_entry(hdev->inputs.next,
++ struct hid_input, list);
++ struct input_dev *input_dev = hidinput->input;
++
++ input_set_capability(input_dev, EV_FF, FF_RUMBLE);
++ return input_ff_create_memless(input_dev, NULL, sony_play_effect);
++}
++
++#else
++static int sony_init_ff(struct hid_device *hdev)
++{
++ return 0;
++}
++#endif
++
++static int sony_set_output_report(struct sony_sc *sc, int req_id, int req_size)
++{
++ struct list_head *head, *list;
++ struct hid_report *report;
++ struct hid_device *hdev = sc->hdev;
++
++ list = &hdev->report_enum[HID_OUTPUT_REPORT].report_list;
++
++ list_for_each(head, list) {
++ report = list_entry(head, struct hid_report, list);
++
++ if (report->id == req_id) {
++ if (report->size < req_size) {
++ hid_err(hdev, "Output report 0x%02x (%i bits) is smaller than requested size (%i bits)\n",
++ req_id, report->size, req_size);
++ return -EINVAL;
++ }
++ sc->output_report = report;
++ return 0;
++ }
++ }
++
++ hid_err(hdev, "Unable to locate output report 0x%02x\n", req_id);
++
++ return -EINVAL;
++}
++
++static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
++{
++ int ret;
++ unsigned long quirks = id->driver_data;
++ struct sony_sc *sc;
++ unsigned int connect_mask = HID_CONNECT_DEFAULT;
++
++ sc = devm_kzalloc(&hdev->dev, sizeof(*sc), GFP_KERNEL);
++ if (sc == NULL) {
++ hid_err(hdev, "can't alloc sony descriptor\n");
++ return -ENOMEM;
++ }
++
++ sc->quirks = quirks;
++ hid_set_drvdata(hdev, sc);
++ sc->hdev = hdev;
++
++ ret = hid_parse(hdev);
++ if (ret) {
++ hid_err(hdev, "parse failed\n");
++ return ret;
++ }
++
++ if (sc->quirks & VAIO_RDESC_CONSTANT)
++ connect_mask |= HID_CONNECT_HIDDEV_FORCE;
++ else if (sc->quirks & SIXAXIS_CONTROLLER_USB)
++ connect_mask |= HID_CONNECT_HIDDEV_FORCE;
++ else if (sc->quirks & SIXAXIS_CONTROLLER_BT)
++ connect_mask |= HID_CONNECT_HIDDEV_FORCE;
++
++ ret = hid_hw_start(hdev, connect_mask);
++ if (ret) {
++ hid_err(hdev, "hw start failed\n");
++ return ret;
++ }
++
++ if (sc->quirks & SIXAXIS_CONTROLLER_USB) {
++ hdev->hid_output_raw_report = sixaxis_usb_output_raw_report;
++ ret = sixaxis_set_operational_usb(hdev);
++
++ sc->worker_initialized = 1;
++ INIT_WORK(&sc->state_worker, sixaxis_state_worker);
++ }
++ else if (sc->quirks & SIXAXIS_CONTROLLER_BT)
++ ret = sixaxis_set_operational_bt(hdev);
++ else if (sc->quirks & PS3REMOTE)
++ ret = ps3remote_setup_repeat(hdev);
++ else if (sc->quirks & DUALSHOCK4_CONTROLLER_USB) {
++ /* Report 5 (31 bytes) is used to send data to the controller via USB */
++ ret = sony_set_output_report(sc, 0x05, 248);
++ if (ret < 0)
++ goto err_stop;
++
++ sc->worker_initialized = 1;
++ INIT_WORK(&sc->state_worker, dualshock4_state_worker);
++ } else {
++ ret = 0;
++ }
++
++ if (ret < 0)
++ goto err_stop;
++
++ if (sc->quirks & SONY_LED_SUPPORT) {
++ ret = sony_leds_init(hdev);
++ if (ret < 0)
++ goto err_stop;
++ }
++
++ if (sc->quirks & SONY_FF_SUPPORT) {
++ ret = sony_init_ff(hdev);
++ if (ret < 0)
++ goto err_stop;
++ }
++
++ return 0;
++err_stop:
++ if (sc->quirks & SONY_LED_SUPPORT)
++ sony_leds_remove(hdev);
++ hid_hw_stop(hdev);
++ return ret;
++}
++
++static void sony_remove(struct hid_device *hdev)
++{
++ struct sony_sc *sc = hid_get_drvdata(hdev);
++
++ if (sc->quirks & SONY_LED_SUPPORT)
++ sony_leds_remove(hdev);
++
++ if (sc->worker_initialized)
++ cancel_work_sync(&sc->state_worker);
++
++ hid_hw_stop(hdev);
++}
++
++static const struct hid_device_id sony_devices[] = {
++ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER),
++ .driver_data = SIXAXIS_CONTROLLER_USB },
++ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_NAVIGATION_CONTROLLER),
++ .driver_data = SIXAXIS_CONTROLLER_USB },
++ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER),
++ .driver_data = SIXAXIS_CONTROLLER_BT },
++ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE),
++ .driver_data = VAIO_RDESC_CONSTANT },
++ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGP_MOUSE),
++ .driver_data = VAIO_RDESC_CONSTANT },
++ /* Wired Buzz Controller. Reported as Sony Hub from its USB ID and as
++ * Logitech joystick from the device descriptor. */
++ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_BUZZ_CONTROLLER),
++ .driver_data = BUZZ_CONTROLLER },
++ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_WIRELESS_BUZZ_CONTROLLER),
++ .driver_data = BUZZ_CONTROLLER },
++ /* PS3 BD Remote Control */
++ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_BDREMOTE),
++ .driver_data = PS3REMOTE },
++ /* Logitech Harmony Adapter for PS3 */
++ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_PS3),
++ .driver_data = PS3REMOTE },
++ /* Sony Dualshock 4 controllers for PS4 */
++ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER),
++ .driver_data = DUALSHOCK4_CONTROLLER_USB },
++ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER),
++ .driver_data = DUALSHOCK4_CONTROLLER_BT },
++ { }
++};
++MODULE_DEVICE_TABLE(hid, sony_devices);
++
++static struct hid_driver sony_driver = {
++ .name = "sony",
++ .id_table = sony_devices,
++ .input_mapping = sony_mapping,
++ .probe = sony_probe,
++ .remove = sony_remove,
++ .report_fixup = sony_report_fixup,
++ .raw_event = sony_raw_event
++};
++module_hid_driver(sony_driver);
++
++MODULE_LICENSE("GPL");
+diff -Nur linux-3.14.36/drivers/hid/hid-spinelplus.c linux-openelec/drivers/hid/hid-spinelplus.c
+--- linux-3.14.36/drivers/hid/hid-spinelplus.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/hid/hid-spinelplus.c 2015-07-24 18:03:29.980842002 -0500
+@@ -0,0 +1,104 @@
++/*
++ * HID driver for "PHILIPS MCE USB IR Receiver- Spinel plus" remotes
++ *
++ * Copyright (c) 2010 Panagiotis Skintzos
++ *
++ * Renamed to Spinel, cleanup and modified to also support
++ * Spinel Plus 0471:20CC by Stephan Raue 2012.
++ */
++
++/*
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License as published by the Free
++ * Software Foundation; either version 2 of the License, or (at your option)
++ * any later version.
++ */
++
++#include <linux/device.h>
++#include <linux/input.h>
++#include <linux/hid.h>
++#include <linux/module.h>
++
++#include "hid-ids.h"
++
++#define spinelplus_map_key(c) set_bit(EV_REP, hi->input->evbit); \
++ hid_map_usage_clear(hi, usage, bit, max, EV_KEY, (c))
++
++static int spinelplus_input_mapping(struct hid_device *hdev,
++ struct hid_input *hi, struct hid_field *field, struct hid_usage *usage,
++ unsigned long **bit, int *max)
++{
++ switch (usage->hid) {
++ case 0xffbc000d: spinelplus_map_key(KEY_MEDIA); break;
++ case 0xffbc0024: spinelplus_map_key(KEY_MEDIA); break;
++ case 0xffbc0027: spinelplus_map_key(KEY_ZOOM); break;
++ case 0xffbc0033: spinelplus_map_key(KEY_HOME); break;
++ case 0xffbc0035: spinelplus_map_key(KEY_CAMERA); break;
++ case 0xffbc0036: spinelplus_map_key(KEY_EPG); break;
++ case 0xffbc0037: spinelplus_map_key(KEY_DVD); break;
++ case 0xffbc0038: spinelplus_map_key(KEY_HOME); break;
++ case 0xffbc0039: spinelplus_map_key(KEY_MP3); break;
++ case 0xffbc003a: spinelplus_map_key(KEY_VIDEO); break;
++ case 0xffbc005a: spinelplus_map_key(KEY_TEXT); break;
++ case 0xffbc005b: spinelplus_map_key(KEY_RED); break;
++ case 0xffbc005c: spinelplus_map_key(KEY_GREEN); break;
++ case 0xffbc005d: spinelplus_map_key(KEY_YELLOW); break;
++ case 0xffbc005e: spinelplus_map_key(KEY_BLUE); break;
++ default:
++ return 0;
++ }
++ return 1;
++}
++
++static int spinelplus_probe(struct hid_device *hdev,
++ const struct hid_device_id *id)
++{
++ int ret;
++ /* Connect only to hid input (not hiddev & hidraw)*/
++ unsigned int cmask = HID_CONNECT_HIDINPUT;
++
++ ret = hid_parse(hdev);
++ if (ret) {
++ dev_err(&hdev->dev, "parse failed\n");
++ goto err_free;
++ }
++
++ ret = hid_hw_start(hdev, cmask);
++ if (ret) {
++ dev_err(&hdev->dev, "hw start failed\n");
++ goto err_free;
++ }
++
++ return 0;
++err_free:
++ return ret;
++}
++
++static const struct hid_device_id spinelplus_devices[] = {
++ { HID_USB_DEVICE(USB_VENDOR_ID_PHILIPS,USB_DEVICE_ID_PHILIPS_SPINEL_PLUS_1) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_PHILIPS,USB_DEVICE_ID_PHILIPS_SPINEL_PLUS_2) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_PHILIPS,USB_DEVICE_ID_PHILIPS_SPINEL_PLUS_3) },
++ { }
++};
++MODULE_DEVICE_TABLE(hid, spinelplus_devices);
++
++static struct hid_driver spinelplus_driver = {
++ .name = "SpinelPlus",
++ .id_table = spinelplus_devices,
++ .input_mapping = spinelplus_input_mapping,
++ .probe = spinelplus_probe,
++};
++
++static int __init spinelplus_init(void)
++{
++ return hid_register_driver(&spinelplus_driver);
++}
++
++static void __exit spinelplus_exit(void)
++{
++ hid_unregister_driver(&spinelplus_driver);
++}
++
++module_init(spinelplus_init);
++module_exit(spinelplus_exit);
++MODULE_LICENSE("GPL");
+diff -Nur linux-3.14.36/drivers/hid/hid-tivo.c linux-openelec/drivers/hid/hid-tivo.c
+--- linux-3.14.36/drivers/hid/hid-tivo.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/hid/hid-tivo.c 2015-07-24 18:03:29.956842002 -0500
+@@ -64,6 +64,7 @@
+ /* TiVo Slide Bluetooth remote, pairs with a Broadcom dongle */
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_BT) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_PRO) },
+ { }
+ };
+ MODULE_DEVICE_TABLE(hid, tivo_devices);
+diff -Nur linux-3.14.36/drivers/hid/Kconfig linux-openelec/drivers/hid/Kconfig
+--- linux-3.14.36/drivers/hid/Kconfig 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/hid/Kconfig 2015-07-24 18:03:29.980842002 -0500
+@@ -490,6 +490,12 @@
+ - Ortek WKB-2000
+ - Skycable wireless presenter
+
++config HID_OUYA
++ tristate "OUYA Game Controller"
++ depends on USB_HID
++ ---help---
++ Support for OUYA Game Controller.
++
+ config HID_PANTHERLORD
+ tristate "Pantherlord/GreenAsia game controller"
+ depends on HID
+@@ -640,6 +646,12 @@
+ ---help---
+ Support for Steelseries SRW-S1 steering wheel
+
++config HID_SPINELPLUS
++ tristate "Spinel Plus remote control"
++ depends on USB_HID
++ ---help---
++ Say Y here if you have a Spinel Plus (0471:206c/20cc/0613) remote
++
+ config HID_SUNPLUS
+ tristate "Sunplus wireless desktop"
+ depends on HID
+diff -Nur linux-3.14.36/drivers/hid/Kconfig.orig linux-openelec/drivers/hid/Kconfig.orig
+--- linux-3.14.36/drivers/hid/Kconfig.orig 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/hid/Kconfig.orig 2015-07-24 18:03:29.964842002 -0500
+@@ -0,0 +1,817 @@
++#
++# HID driver configuration
++#
++menu "HID support"
++ depends on INPUT
++
++config HID
++ tristate "HID bus support"
++ depends on INPUT
++ default y
++ ---help---
++ A human interface device (HID) is a type of computer device that
++ interacts directly with and takes input from humans. The term "HID"
++ most commonly used to refer to the USB-HID specification, but other
++ devices (such as, but not strictly limited to, Bluetooth) are
++ designed using HID specification (this involves certain keyboards,
++ mice, tablets, etc). This option adds the HID bus to the kernel,
++ together with generic HID layer code. The HID devices are added and
++ removed from the HID bus by the transport-layer drivers, such as
++ usbhid (USB_HID) and hidp (BT_HIDP).
++
++ For docs and specs, see http://www.usb.org/developers/hidpage/
++
++ If unsure, say Y.
++
++if HID
++
++config HID_BATTERY_STRENGTH
++ bool "Battery level reporting for HID devices"
++ depends on HID && POWER_SUPPLY && HID = POWER_SUPPLY
++ default n
++ ---help---
++ This option adds support of reporting battery strength (for HID devices
++ that support this feature) through power_supply class so that userspace
++ tools, such as upower, can display it.
++
++config HIDRAW
++ bool "/dev/hidraw raw HID device support"
++ depends on HID
++ ---help---
++ Say Y here if you want to support HID devices (from the USB
++ specification standpoint) that aren't strictly user interface
++ devices, like monitor controls and Uninterruptable Power Supplies.
++
++ This module supports these devices separately using a separate
++ event interface on /dev/hidraw.
++
++ There is also a /dev/hiddev configuration option in the USB HID
++ configuration menu. In comparison to hiddev, this device does not process
++ the hid events at all (no parsing, no lookups). This lets applications
++ to work on raw hid events when they want to, and avoid using transport-specific
++ userspace libhid/libusb libraries.
++
++ If unsure, say Y.
++
++config UHID
++ tristate "User-space I/O driver support for HID subsystem"
++ depends on HID
++ default n
++ ---help---
++ Say Y here if you want to provide HID I/O Drivers from user-space.
++ This allows to write I/O drivers in user-space and feed the data from
++ the device into the kernel. The kernel parses the HID reports, loads the
++ corresponding HID Device Driver or provides input devices on top of your
++ user-space device.
++
++ This driver cannot be used to parse HID-reports in user-space and write
++ special HID-drivers. You should use hidraw for that.
++ Instead, this driver allows to write the transport-layer driver in
++ user-space like USB-HID and Bluetooth-HID do in kernel-space.
++
++ If unsure, say N.
++
++ To compile this driver as a module, choose M here: the
++ module will be called uhid.
++
++config HID_GENERIC
++ tristate "Generic HID driver"
++ depends on HID
++ default HID
++ ---help---
++ Support for generic devices on the HID bus. This includes most
++ keyboards and mice, joysticks, tablets and digitizers.
++
++ To compile this driver as a module, choose M here: the module
++ will be called hid-generic.
++
++ If unsure, say Y.
++
++menu "Special HID drivers"
++ depends on HID
++
++config HID_A4TECH
++ tristate "A4 tech mice" if EXPERT
++ depends on HID
++ default !EXPERT
++ ---help---
++ Support for A4 tech X5 and WOP-35 / Trust 450L mice.
++
++config HID_ACRUX
++ tristate "ACRUX game controller support"
++ depends on HID
++ ---help---
++ Say Y here if you want to enable support for ACRUX game controllers.
++
++config HID_ACRUX_FF
++ bool "ACRUX force feedback support"
++ depends on HID_ACRUX
++ select INPUT_FF_MEMLESS
++ ---help---
++ Say Y here if you want to enable force feedback support for ACRUX
++ game controllers.
++
++config HID_APPLE
++ tristate "Apple {i,Power,Mac}Books" if EXPERT
++ depends on HID
++ default !EXPERT
++ ---help---
++ Support for some Apple devices which less or more break
++ HID specification.
++
++ Say Y here if you want support for keyboards of Apple iBooks, PowerBooks,
++ MacBooks, MacBook Pros and Apple Aluminum.
++
++config HID_APPLEIR
++ tristate "Apple infrared receiver"
++ depends on (USB_HID)
++ ---help---
++ Support for Apple infrared remote control. All the Apple computers from
++ 2005 onwards include such a port, except the unibody Macbook (2009),
++ and Mac Pros. This receiver is also used in the Apple TV set-top box
++ prior to the 2010 model.
++
++ Say Y here if you want support for Apple infrared remote control.
++
++config HID_AUREAL
++ tristate "Aureal"
++ depends on HID
++ ---help---
++ Support for Aureal Cy se W-01RN Remote Controller and other Aureal derived remotes.
++
++config HID_BELKIN
++ tristate "Belkin Flip KVM and Wireless keyboard" if EXPERT
++ depends on HID
++ default !EXPERT
++ ---help---
++ Support for Belkin Flip KVM and Wireless keyboard.
++
++config HID_CHERRY
++ tristate "Cherry Cymotion keyboard" if EXPERT
++ depends on HID
++ default !EXPERT
++ ---help---
++ Support for Cherry Cymotion keyboard.
++
++config HID_CHICONY
++ tristate "Chicony Tactical pad" if EXPERT
++ depends on HID
++ default !EXPERT
++ ---help---
++ Support for Chicony Tactical pad.
++
++config HID_PRODIKEYS
++ tristate "Prodikeys PC-MIDI Keyboard support"
++ depends on HID && SND
++ select SND_RAWMIDI
++ ---help---
++ Support for Prodikeys PC-MIDI Keyboard device support.
++ Say Y here to enable support for this device.
++ - Prodikeys PC-MIDI keyboard.
++ The Prodikeys PC-MIDI acts as a USB Audio device, with one MIDI
++ input and one MIDI output. These MIDI jacks appear as
++ a sound "card" in the ALSA sound system.
++ Note: if you say N here, this device will still function as a basic
++ multimedia keyboard, but will lack support for the musical keyboard
++ and some additional multimedia keys.
++
++config HID_CYPRESS
++ tristate "Cypress mouse and barcode readers" if EXPERT
++ depends on HID
++ default !EXPERT
++ ---help---
++ Support for cypress mouse and barcode readers.
++
++config HID_DRAGONRISE
++ tristate "DragonRise Inc. game controller"
++ depends on HID
++ ---help---
++ Say Y here if you have DragonRise Inc. game controllers.
++ These might be branded as:
++ - Tesun USB-703
++ - Media-tech MT1504 "Rogue"
++ - DVTech JS19 "Gear"
++ - Defender Game Master
++
++config DRAGONRISE_FF
++ bool "DragonRise Inc. force feedback"
++ depends on HID_DRAGONRISE
++ select INPUT_FF_MEMLESS
++ ---help---
++ Say Y here if you want to enable force feedback support for DragonRise Inc.
++ game controllers.
++
++config HID_EMS_FF
++ tristate "EMS Production Inc. force feedback support"
++ depends on HID
++ select INPUT_FF_MEMLESS
++ ---help---
++ Say Y here if you want to enable force feedback support for devices by
++ EMS Production Ltd.
++ Currently the following devices are known to be supported:
++ - Trio Linker Plus II
++
++config HID_ELECOM
++ tristate "ELECOM BM084 bluetooth mouse"
++ depends on HID
++ ---help---
++ Support for the ELECOM BM084 (bluetooth mouse).
++
++config HID_ELO
++ tristate "ELO USB 4000/4500 touchscreen"
++ depends on USB_HID
++ ---help---
++ Support for the ELO USB 4000/4500 touchscreens. Note that this is for
++ different devices than those handled by CONFIG_TOUCHSCREEN_USB_ELO.
++
++config HID_EZKEY
++ tristate "Ezkey BTC 8193 keyboard" if EXPERT
++ depends on HID
++ default !EXPERT
++ ---help---
++ Support for Ezkey BTC 8193 keyboard.
++
++config HID_HOLTEK
++ tristate "Holtek HID devices"
++ depends on USB_HID
++ ---help---
++ Support for Holtek based devices:
++ - Holtek On Line Grip based game controller
++ - Trust GXT 18 Gaming Keyboard
++ - Sharkoon Drakonia / Perixx MX-2000 gaming mice
++ - Tracer Sniper TRM-503 / NOVA Gaming Slider X200 /
++ Zalman ZM-GM1
++ - SHARKOON DarkGlider Gaming mouse
++ - LEETGION Hellion Gaming Mouse
++
++config HOLTEK_FF
++ bool "Holtek On Line Grip force feedback support"
++ depends on HID_HOLTEK
++ select INPUT_FF_MEMLESS
++ ---help---
++ Say Y here if you have a Holtek On Line Grip based game controller
++ and want to have force feedback support for it.
++
++config HID_HUION
++ tristate "Huion tablets"
++ depends on USB_HID
++ ---help---
++ Support for Huion 580 tablet.
++
++config HID_KEYTOUCH
++ tristate "Keytouch HID devices"
++ depends on HID
++ ---help---
++ Support for Keytouch HID devices not fully compliant with
++ the specification. Currently supported:
++ - Keytouch IEC 60945
++
++config HID_KYE
++ tristate "KYE/Genius devices"
++ depends on HID
++ ---help---
++ Support for KYE/Genius devices not fully compliant with HID standard:
++ - Ergo Mouse
++ - EasyPen i405X tablet
++ - MousePen i608X tablet
++ - EasyPen M610X tablet
++
++config HID_UCLOGIC
++ tristate "UC-Logic"
++ depends on HID
++ ---help---
++ Support for UC-Logic tablets.
++
++config HID_WALTOP
++ tristate "Waltop"
++ depends on HID
++ ---help---
++ Support for Waltop tablets.
++
++config HID_GYRATION
++ tristate "Gyration remote control"
++ depends on HID
++ ---help---
++ Support for Gyration remote control.
++
++config HID_ICADE
++ tristate "ION iCade arcade controller"
++ depends on HID
++ ---help---
++ Support for the ION iCade arcade controller to work as a joystick.
++
++ To compile this driver as a module, choose M here: the
++ module will be called hid-icade.
++
++config HID_TWINHAN
++ tristate "Twinhan IR remote control"
++ depends on HID
++ ---help---
++ Support for Twinhan IR remote control.
++
++config HID_KENSINGTON
++ tristate "Kensington Slimblade Trackball" if EXPERT
++ depends on HID
++ default !EXPERT
++ ---help---
++ Support for Kensington Slimblade Trackball.
++
++config HID_LCPOWER
++ tristate "LC-Power"
++ depends on HID
++ ---help---
++ Support for LC-Power RC1000MCE RF remote control.
++
++config HID_LENOVO_TPKBD
++ tristate "Lenovo ThinkPad USB Keyboard with TrackPoint"
++ depends on HID
++ select NEW_LEDS
++ select LEDS_CLASS
++ ---help---
++ Support for the Lenovo ThinkPad USB Keyboard with TrackPoint.
++
++ Say Y here if you have a Lenovo ThinkPad USB Keyboard with TrackPoint
++ and would like to use device-specific features like changing the
++ sensitivity of the trackpoint, using the microphone mute button or
++ controlling the mute and microphone mute LEDs.
++
++config HID_LOGITECH
++ tristate "Logitech devices" if EXPERT
++ depends on HID
++ default !EXPERT
++ ---help---
++ Support for Logitech devices that are not fully compliant with HID standard.
++
++config HID_LOGITECH_DJ
++ tristate "Logitech Unifying receivers full support"
++ depends on HIDRAW
++ depends on HID_LOGITECH
++ ---help---
++ Say Y if you want support for Logitech Unifying receivers and devices.
++ Unifying receivers are capable of pairing up to 6 Logitech compliant
++ devices to the same receiver. Without this driver it will be handled by
++ generic USB_HID driver and all incoming events will be multiplexed
++ into a single mouse and a single keyboard device.
++
++config LOGITECH_FF
++ bool "Logitech force feedback support"
++ depends on HID_LOGITECH
++ select INPUT_FF_MEMLESS
++ help
++ Say Y here if you have one of these devices:
++ - Logitech WingMan Cordless RumblePad
++ - Logitech WingMan Cordless RumblePad 2
++ - Logitech WingMan Force 3D
++ - Logitech Formula Force EX
++ - Logitech WingMan Formula Force GP
++
++ and if you want to enable force feedback for them.
++ Note: if you say N here, this device will still be supported, but without
++ force feedback.
++
++config LOGIRUMBLEPAD2_FF
++ bool "Logitech force feedback support (variant 2)"
++ depends on HID_LOGITECH
++ select INPUT_FF_MEMLESS
++ help
++ Say Y here if you want to enable force feedback support for:
++ - Logitech RumblePad
++ - Logitech Rumblepad 2
++ - Logitech Formula Vibration Feedback Wheel
++
++config LOGIG940_FF
++ bool "Logitech Flight System G940 force feedback support"
++ depends on HID_LOGITECH
++ select INPUT_FF_MEMLESS
++ help
++ Say Y here if you want to enable force feedback support for Logitech
++ Flight System G940 devices.
++
++config LOGIWHEELS_FF
++ bool "Logitech wheels configuration and force feedback support"
++ depends on HID_LOGITECH
++ select INPUT_FF_MEMLESS
++ default LOGITECH_FF
++ help
++ Say Y here if you want to enable force feedback and range setting
++ support for following Logitech wheels:
++ - Logitech Driving Force
++ - Logitech Driving Force Pro
++ - Logitech Driving Force GT
++ - Logitech G25
++ - Logitech G27
++ - Logitech MOMO/MOMO 2
++ - Logitech Formula Force EX
++
++config HID_MAGICMOUSE
++ tristate "Apple Magic Mouse/Trackpad multi-touch support"
++ depends on HID
++ ---help---
++ Support for the Apple Magic Mouse/Trackpad multi-touch.
++
++ Say Y here if you want support for the multi-touch features of the
++ Apple Wireless "Magic" Mouse and the Apple Wireless "Magic" Trackpad.
++
++config HID_MICROSOFT
++ tristate "Microsoft non-fully HID-compliant devices" if EXPERT
++ depends on HID
++ default !EXPERT
++ ---help---
++ Support for Microsoft devices that are not fully compliant with HID standard.
++
++config HID_MONTEREY
++ tristate "Monterey Genius KB29E keyboard" if EXPERT
++ depends on HID
++ default !EXPERT
++ ---help---
++ Support for Monterey Genius KB29E.
++
++config HID_MULTITOUCH
++ tristate "HID Multitouch panels"
++ depends on HID
++ ---help---
++ Generic support for HID multitouch panels.
++
++ Say Y here if you have one of the following devices:
++ - 3M PCT touch screens
++ - ActionStar dual touch panels
++ - Atmel panels
++ - Cando dual touch panels
++ - Chunghwa panels
++ - CVTouch panels
++ - Cypress TrueTouch panels
++ - Elan Microelectronics touch panels
++ - Elo TouchSystems IntelliTouch Plus panels
++ - GeneralTouch 'Sensing Win7-TwoFinger' panels
++ - GoodTouch panels
++ - Hanvon dual touch panels
++ - Ilitek dual touch panels
++ - IrTouch Infrared USB panels
++ - LG Display panels (Dell ST2220Tc)
++ - Lumio CrystalTouch panels
++ - MosArt dual-touch panels
++ - Panasonic multitouch panels
++ - PenMount dual touch panels
++ - Perixx Peripad 701 touchpad
++ - PixArt optical touch screen
++ - Pixcir dual touch panels
++ - Quanta panels
++ - eGalax dual-touch panels, including the Joojoo and Wetab tablets
++ - SiS multitouch panels
++ - Stantum multitouch panels
++ - Touch International Panels
++ - Unitec Panels
++ - Wistron optical touch panels
++ - XAT optical touch panels
++ - Xiroku optical touch panels
++ - Zytronic touch panels
++
++ If unsure, say N.
++
++ To compile this driver as a module, choose M here: the
++ module will be called hid-multitouch.
++
++config HID_NTRIG
++ tristate "N-Trig touch screen"
++ depends on USB_HID
++ ---help---
++ Support for N-Trig touch screen.
++
++config HID_ORTEK
++ tristate "Ortek PKB-1700/WKB-2000/Skycable wireless keyboard and mouse trackpad"
++ depends on HID
++ ---help---
++ There are certain devices which have LogicalMaximum wrong in the keyboard
++ usage page of their report descriptor. The most prevailing ones so far
++ are manufactured by Ortek, thus the name of the driver. Currently
++ supported devices by this driver are
++
++ - Ortek PKB-1700
++ - Ortek WKB-2000
++ - Skycable wireless presenter
++
++config HID_OUYA
++ tristate "OUYA Game Controller"
++ depends on USB_HID
++ ---help---
++ Support for OUYA Game Controller.
++
++config HID_PANTHERLORD
++ tristate "Pantherlord/GreenAsia game controller"
++ depends on HID
++ ---help---
++ Say Y here if you have a PantherLord/GreenAsia based game controller
++ or adapter.
++
++config PANTHERLORD_FF
++ bool "Pantherlord force feedback support"
++ depends on HID_PANTHERLORD
++ select INPUT_FF_MEMLESS
++ ---help---
++ Say Y here if you have a PantherLord/GreenAsia based game controller
++ or adapter and want to enable force feedback support for it.
++
++config HID_PETALYNX
++ tristate "Petalynx Maxter remote control"
++ depends on HID
++ ---help---
++ Support for Petalynx Maxter remote control.
++
++config HID_PICOLCD
++ tristate "PicoLCD (graphic version)"
++ depends on HID
++ ---help---
++ This provides support for Minibox PicoLCD devices, currently
++ only the graphical ones are supported.
++
++ This includes support for the following device features:
++ - Keypad
++ - Switching between Firmware and Flash mode
++ - EEProm / Flash access (via debugfs)
++ Features selectively enabled:
++ - Framebuffer for monochrome 256x64 display
++ - Backlight control
++ - Contrast control
++ - General purpose outputs
++ Features that are not (yet) supported:
++ - IR
++
++config HID_PICOLCD_FB
++ bool "Framebuffer support" if EXPERT
++ default !EXPERT
++ depends on HID_PICOLCD
++ depends on HID_PICOLCD=FB || FB=y
++ select FB_DEFERRED_IO
++ select FB_SYS_FILLRECT
++ select FB_SYS_COPYAREA
++ select FB_SYS_IMAGEBLIT
++ select FB_SYS_FOPS
++ ---help---
++ Provide access to PicoLCD's 256x64 monochrome display via a
++ framebuffer device.
++
++config HID_PICOLCD_BACKLIGHT
++ bool "Backlight control" if EXPERT
++ default !EXPERT
++ depends on HID_PICOLCD
++ depends on HID_PICOLCD=BACKLIGHT_CLASS_DEVICE || BACKLIGHT_CLASS_DEVICE=y
++ ---help---
++ Provide access to PicoLCD's backlight control via backlight
++ class.
++
++config HID_PICOLCD_LCD
++ bool "Contrast control" if EXPERT
++ default !EXPERT
++ depends on HID_PICOLCD
++ depends on HID_PICOLCD=LCD_CLASS_DEVICE || LCD_CLASS_DEVICE=y
++ ---help---
++ Provide access to PicoLCD's LCD contrast via lcd class.
++
++config HID_PICOLCD_LEDS
++ bool "GPO via leds class" if EXPERT
++ default !EXPERT
++ depends on HID_PICOLCD
++ depends on HID_PICOLCD=LEDS_CLASS || LEDS_CLASS=y
++ ---help---
++ Provide access to PicoLCD's GPO pins via leds class.
++
++config HID_PICOLCD_CIR
++ bool "CIR via RC class" if EXPERT
++ default !EXPERT
++ depends on HID_PICOLCD
++ depends on HID_PICOLCD=RC_CORE || RC_CORE=y
++ ---help---
++ Provide access to PicoLCD's CIR interface via remote control (LIRC).
++
++config HID_PRIMAX
++ tristate "Primax non-fully HID-compliant devices"
++ depends on HID
++ ---help---
++ Support for Primax devices that are not fully compliant with the
++ HID standard.
++
++config HID_ROCCAT
++ tristate "Roccat device support"
++ depends on USB_HID
++ ---help---
++ Support for Roccat devices.
++ Say Y here if you have a Roccat mouse or keyboard and want
++ support for its special functionalities.
++
++config HID_SAITEK
++ tristate "Saitek non-fully HID-compliant devices"
++ depends on HID
++ ---help---
++ Support for Saitek devices that are not fully compliant with the
++ HID standard.
++
++ Currently only supports the PS1000 controller.
++
++config HID_SAMSUNG
++ tristate "Samsung InfraRed remote control or keyboards"
++ depends on HID
++ ---help---
++ Support for Samsung InfraRed remote control or keyboards.
++
++config HID_SONY
++ tristate "Sony PS2/3 accessories"
++ depends on USB_HID
++ depends on NEW_LEDS
++ depends on LEDS_CLASS
++ ---help---
++ Support for
++
++ * Sony PS3 6-axis controllers
++ * Buzz controllers
++ * Sony PS3 Blue-ray Disk Remote Control (Bluetooth)
++ * Logitech Harmony adapter for Sony Playstation 3 (Bluetooth)
++
++config SONY_FF
++ bool "Sony PS2/3 accessories force feedback support"
++ depends on HID_SONY
++ select INPUT_FF_MEMLESS
++ ---help---
++ Say Y here if you have a Sony PS2/3 accessory and want to enable force
++ feedback support for it.
++
++config HID_SPEEDLINK
++ tristate "Speedlink VAD Cezanne mouse support"
++ depends on HID
++ ---help---
++ Support for Speedlink Vicious and Divine Cezanne mouse.
++
++config HID_STEELSERIES
++ tristate "Steelseries SRW-S1 steering wheel support"
++ depends on HID
++ ---help---
++ Support for Steelseries SRW-S1 steering wheel
++
++config HID_SUNPLUS
++ tristate "Sunplus wireless desktop"
++ depends on HID
++ ---help---
++ Support for Sunplus wireless desktop.
++
++config HID_GREENASIA
++ tristate "GreenAsia (Product ID 0x12) game controller support"
++ depends on HID
++ ---help---
++ Say Y here if you have a GreenAsia (Product ID 0x12) based game
++ controller or adapter.
++
++config GREENASIA_FF
++ bool "GreenAsia (Product ID 0x12) force feedback support"
++ depends on HID_GREENASIA
++ select INPUT_FF_MEMLESS
++ ---help---
++ Say Y here if you have a GreenAsia (Product ID 0x12) based game controller
++ (like MANTA Warrior MM816 and SpeedLink Strike2 SL-6635) or adapter
++ and want to enable force feedback support for it.
++
++config HID_HYPERV_MOUSE
++ tristate "Microsoft Hyper-V mouse driver"
++ depends on HYPERV
++ ---help---
++ Select this option to enable the Hyper-V mouse driver.
++
++config HID_SMARTJOYPLUS
++ tristate "SmartJoy PLUS PS2/USB adapter support"
++ depends on HID
++ ---help---
++ Support for SmartJoy PLUS PS2/USB adapter, Super Dual Box,
++ Super Joy Box 3 Pro, Super Dual Box Pro, and Super Joy Box 5 Pro.
++
++ Note that DDR (Dance Dance Revolution) mode is not supported, nor
++ is pressure sensitive buttons on the pro models.
++
++config SMARTJOYPLUS_FF
++ bool "SmartJoy PLUS PS2/USB adapter force feedback support"
++ depends on HID_SMARTJOYPLUS
++ select INPUT_FF_MEMLESS
++ ---help---
++ Say Y here if you have a SmartJoy PLUS PS2/USB adapter and want to
++ enable force feedback support for it.
++
++config HID_TIVO
++ tristate "TiVo Slide Bluetooth remote control support"
++ depends on HID
++ ---help---
++ Say Y if you have a TiVo Slide Bluetooth remote control.
++
++config HID_TOPSEED
++ tristate "TopSeed Cyberlink, BTC Emprex, Conceptronic remote control support"
++ depends on HID
++ ---help---
++ Say Y if you have a TopSeed Cyberlink or BTC Emprex or Conceptronic
++ CLLRCMCE remote control.
++
++config HID_THINGM
++ tristate "ThingM blink(1) USB RGB LED"
++ depends on HID
++ depends on LEDS_CLASS
++ ---help---
++ Support for the ThingM blink(1) USB RGB LED. This driver registers a
++ Linux LED class instance, plus additional sysfs attributes to control
++ RGB colors, fade time and playing. The device is exposed through hidraw
++ to access other functions.
++
++config HID_THRUSTMASTER
++ tristate "ThrustMaster devices support"
++ depends on HID
++ ---help---
++ Say Y here if you have a THRUSTMASTER FireStore Dual Power 2 or
++ a THRUSTMASTER Ferrari GT Rumble Wheel.
++
++config THRUSTMASTER_FF
++ bool "ThrustMaster devices force feedback support"
++ depends on HID_THRUSTMASTER
++ select INPUT_FF_MEMLESS
++ ---help---
++ Say Y here if you have a THRUSTMASTER FireStore Dual Power 2 or 3,
++ a THRUSTMASTER Dual Trigger 3-in-1 or a THRUSTMASTER Ferrari GT
++ Rumble Force or Force Feedback Wheel.
++
++config HID_WACOM
++ tristate "Wacom Bluetooth devices support"
++ depends on HID
++ depends on LEDS_CLASS
++ select POWER_SUPPLY
++ ---help---
++ Support for Wacom Graphire Bluetooth and Intuos4 WL tablets.
++
++config HID_WIIMOTE
++ tristate "Nintendo Wii / Wii U peripherals"
++ depends on HID
++ depends on LEDS_CLASS
++ select POWER_SUPPLY
++ select INPUT_FF_MEMLESS
++ ---help---
++ Support for Nintendo Wii and Wii U Bluetooth peripherals. Supported
++ devices are the Wii Remote and its extension devices, but also devices
++ based on the Wii Remote like the Wii U Pro Controller or the
++ Wii Balance Board.
++
++ Support for all official Nintendo extensions is available, however, 3rd
++ party extensions might not be supported. Please report these devices to:
++ http://github.com/dvdhrm/xwiimote/issues
++
++ Other Nintendo Wii U peripherals that are IEEE 802.11 based (including
++ the Wii U Gamepad) might be supported in the future. But currently
++ support is limited to Bluetooth based devices.
++
++ If unsure, say N.
++
++ To compile this driver as a module, choose M here: the
++ module will be called hid-wiimote.
++
++config HID_XINMO
++ tristate "Xin-Mo non-fully compliant devices"
++ depends on HID
++ ---help---
++ Support for Xin-Mo devices that are not fully compliant with the HID
++ standard. Currently only supports the Xin-Mo Dual Arcade. Say Y here
++ if you have a Xin-Mo Dual Arcade controller.
++
++config HID_ZEROPLUS
++ tristate "Zeroplus based game controller support"
++ depends on HID
++ ---help---
++ Say Y here if you have a Zeroplus based game controller.
++
++config ZEROPLUS_FF
++ bool "Zeroplus based game controller force feedback support"
++ depends on HID_ZEROPLUS
++ select INPUT_FF_MEMLESS
++ ---help---
++ Say Y here if you have a Zeroplus based game controller and want
++ to have force feedback support for it.
++
++config HID_ZYDACRON
++ tristate "Zydacron remote control support"
++ depends on HID
++ ---help---
++ Support for Zydacron remote control.
++
++config HID_SENSOR_HUB
++ tristate "HID Sensors framework support"
++ depends on HID
++ select MFD_CORE
++ default n
++ ---help---
++ Support for HID Sensor framework. This creates a MFD instance
++ for a sensor hub and identifies all the sensors connected to it.
++ Each sensor is registered as a MFD cell, so that sensor specific
++ processing can be done in a separate driver. Each sensor
++ drivers can use the service provided by this driver to register
++ for events and handle data streams. Each sensor driver can format
++ data and present to user mode using input or IIO interface.
++
++endmenu
++
++endif # HID
++
++source "drivers/hid/usbhid/Kconfig"
++
++source "drivers/hid/i2c-hid/Kconfig"
++
++endmenu
+diff -Nur linux-3.14.36/drivers/hid/Makefile linux-openelec/drivers/hid/Makefile
+--- linux-3.14.36/drivers/hid/Makefile 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/hid/Makefile 2015-07-24 18:03:29.980842002 -0500
+@@ -67,6 +67,7 @@
+ obj-$(CONFIG_HID_MULTITOUCH) += hid-multitouch.o
+ obj-$(CONFIG_HID_NTRIG) += hid-ntrig.o
+ obj-$(CONFIG_HID_ORTEK) += hid-ortek.o
++obj-$(CONFIG_HID_OUYA) += hid-ouya.o
+ obj-$(CONFIG_HID_PRODIKEYS) += hid-prodikeys.o
+ obj-$(CONFIG_HID_PANTHERLORD) += hid-pl.o
+ obj-$(CONFIG_HID_PETALYNX) += hid-petalynx.o
+@@ -101,6 +102,7 @@
+ obj-$(CONFIG_HID_SMARTJOYPLUS) += hid-sjoy.o
+ obj-$(CONFIG_HID_SONY) += hid-sony.o
+ obj-$(CONFIG_HID_SPEEDLINK) += hid-speedlink.o
++obj-$(CONFIG_HID_SPINELPLUS) += hid-spinelplus.o
+ obj-$(CONFIG_HID_STEELSERIES) += hid-steelseries.o
+ obj-$(CONFIG_HID_SUNPLUS) += hid-sunplus.o
+ obj-$(CONFIG_HID_GREENASIA) += hid-gaff.o
+diff -Nur linux-3.14.36/drivers/hid/Makefile.orig linux-openelec/drivers/hid/Makefile.orig
+--- linux-3.14.36/drivers/hid/Makefile.orig 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/hid/Makefile.orig 2015-07-24 18:03:29.968842002 -0500
+@@ -0,0 +1,126 @@
++#
++# Makefile for the HID driver
++#
++hid-y := hid-core.o hid-input.o
++
++ifdef CONFIG_DEBUG_FS
++ hid-objs += hid-debug.o
++endif
++
++obj-$(CONFIG_HID) += hid.o
++obj-$(CONFIG_UHID) += uhid.o
++
++obj-$(CONFIG_HID_GENERIC) += hid-generic.o
++
++hid-$(CONFIG_HIDRAW) += hidraw.o
++
++hid-logitech-y := hid-lg.o
++ifdef CONFIG_LOGITECH_FF
++ hid-logitech-y += hid-lgff.o
++endif
++ifdef CONFIG_LOGIRUMBLEPAD2_FF
++ hid-logitech-y += hid-lg2ff.o
++endif
++ifdef CONFIG_LOGIG940_FF
++ hid-logitech-y += hid-lg3ff.o
++endif
++ifdef CONFIG_LOGIWHEELS_FF
++ hid-logitech-y += hid-lg4ff.o
++endif
++
++hid-wiimote-y := hid-wiimote-core.o hid-wiimote-modules.o
++ifdef CONFIG_DEBUG_FS
++ hid-wiimote-y += hid-wiimote-debug.o
++endif
++
++obj-$(CONFIG_HID_A4TECH) += hid-a4tech.o
++obj-$(CONFIG_HID_ACRUX) += hid-axff.o
++obj-$(CONFIG_HID_APPLE) += hid-apple.o
++obj-$(CONFIG_HID_APPLEIR) += hid-appleir.o
++obj-$(CONFIG_HID_AUREAL) += hid-aureal.o
++obj-$(CONFIG_HID_BELKIN) += hid-belkin.o
++obj-$(CONFIG_HID_CHERRY) += hid-cherry.o
++obj-$(CONFIG_HID_CHICONY) += hid-chicony.o
++obj-$(CONFIG_HID_CYPRESS) += hid-cypress.o
++obj-$(CONFIG_HID_DRAGONRISE) += hid-dr.o
++obj-$(CONFIG_HID_EMS_FF) += hid-emsff.o
++obj-$(CONFIG_HID_ELECOM) += hid-elecom.o
++obj-$(CONFIG_HID_ELO) += hid-elo.o
++obj-$(CONFIG_HID_EZKEY) += hid-ezkey.o
++obj-$(CONFIG_HID_GYRATION) += hid-gyration.o
++obj-$(CONFIG_HID_HOLTEK) += hid-holtek-kbd.o
++obj-$(CONFIG_HID_HOLTEK) += hid-holtek-mouse.o
++obj-$(CONFIG_HID_HOLTEK) += hid-holtekff.o
++obj-$(CONFIG_HID_HUION) += hid-huion.o
++obj-$(CONFIG_HID_HYPERV_MOUSE) += hid-hyperv.o
++obj-$(CONFIG_HID_ICADE) += hid-icade.o
++obj-$(CONFIG_HID_KENSINGTON) += hid-kensington.o
++obj-$(CONFIG_HID_KEYTOUCH) += hid-keytouch.o
++obj-$(CONFIG_HID_KYE) += hid-kye.o
++obj-$(CONFIG_HID_LCPOWER) += hid-lcpower.o
++obj-$(CONFIG_HID_LENOVO_TPKBD) += hid-lenovo-tpkbd.o
++obj-$(CONFIG_HID_LOGITECH) += hid-logitech.o
++obj-$(CONFIG_HID_LOGITECH_DJ) += hid-logitech-dj.o
++obj-$(CONFIG_HID_MAGICMOUSE) += hid-magicmouse.o
++obj-$(CONFIG_HID_MICROSOFT) += hid-microsoft.o
++obj-$(CONFIG_HID_MONTEREY) += hid-monterey.o
++obj-$(CONFIG_HID_MULTITOUCH) += hid-multitouch.o
++obj-$(CONFIG_HID_NTRIG) += hid-ntrig.o
++obj-$(CONFIG_HID_ORTEK) += hid-ortek.o
++obj-$(CONFIG_HID_OUYA) += hid-ouya.o
++obj-$(CONFIG_HID_PRODIKEYS) += hid-prodikeys.o
++obj-$(CONFIG_HID_PANTHERLORD) += hid-pl.o
++obj-$(CONFIG_HID_PETALYNX) += hid-petalynx.o
++obj-$(CONFIG_HID_PICOLCD) += hid-picolcd.o
++hid-picolcd-y += hid-picolcd_core.o
++ifdef CONFIG_HID_PICOLCD_FB
++hid-picolcd-y += hid-picolcd_fb.o
++endif
++ifdef CONFIG_HID_PICOLCD_BACKLIGHT
++hid-picolcd-y += hid-picolcd_backlight.o
++endif
++ifdef CONFIG_HID_PICOLCD_LCD
++hid-picolcd-y += hid-picolcd_lcd.o
++endif
++ifdef CONFIG_HID_PICOLCD_LEDS
++hid-picolcd-y += hid-picolcd_leds.o
++endif
++ifdef CONFIG_HID_PICOLCD_CIR
++hid-picolcd-y += hid-picolcd_cir.o
++endif
++ifdef CONFIG_DEBUG_FS
++hid-picolcd-y += hid-picolcd_debugfs.o
++endif
++
++obj-$(CONFIG_HID_PRIMAX) += hid-primax.o
++obj-$(CONFIG_HID_ROCCAT) += hid-roccat.o hid-roccat-common.o \
++ hid-roccat-arvo.o hid-roccat-isku.o hid-roccat-kone.o \
++ hid-roccat-koneplus.o hid-roccat-konepure.o hid-roccat-kovaplus.o \
++ hid-roccat-lua.o hid-roccat-pyra.o hid-roccat-ryos.o hid-roccat-savu.o
++obj-$(CONFIG_HID_SAITEK) += hid-saitek.o
++obj-$(CONFIG_HID_SAMSUNG) += hid-samsung.o
++obj-$(CONFIG_HID_SMARTJOYPLUS) += hid-sjoy.o
++obj-$(CONFIG_HID_SONY) += hid-sony.o
++obj-$(CONFIG_HID_SPEEDLINK) += hid-speedlink.o
++obj-$(CONFIG_HID_STEELSERIES) += hid-steelseries.o
++obj-$(CONFIG_HID_SUNPLUS) += hid-sunplus.o
++obj-$(CONFIG_HID_GREENASIA) += hid-gaff.o
++obj-$(CONFIG_HID_THINGM) += hid-thingm.o
++obj-$(CONFIG_HID_THRUSTMASTER) += hid-tmff.o
++obj-$(CONFIG_HID_TIVO) += hid-tivo.o
++obj-$(CONFIG_HID_TOPSEED) += hid-topseed.o
++obj-$(CONFIG_HID_TWINHAN) += hid-twinhan.o
++obj-$(CONFIG_HID_UCLOGIC) += hid-uclogic.o
++obj-$(CONFIG_HID_XINMO) += hid-xinmo.o
++obj-$(CONFIG_HID_ZEROPLUS) += hid-zpff.o
++obj-$(CONFIG_HID_ZYDACRON) += hid-zydacron.o
++obj-$(CONFIG_HID_WACOM) += hid-wacom.o
++obj-$(CONFIG_HID_WALTOP) += hid-waltop.o
++obj-$(CONFIG_HID_WIIMOTE) += hid-wiimote.o
++obj-$(CONFIG_HID_SENSOR_HUB) += hid-sensor-hub.o
++
++obj-$(CONFIG_USB_HID) += usbhid/
++obj-$(CONFIG_USB_MOUSE) += usbhid/
++obj-$(CONFIG_USB_KBD) += usbhid/
++
++obj-$(CONFIG_I2C_HID) += i2c-hid/
+diff -Nur linux-3.14.36/drivers/hwmon/Kconfig linux-openelec/drivers/hwmon/Kconfig
+--- linux-3.14.36/drivers/hwmon/Kconfig 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/hwmon/Kconfig 2015-05-06 12:05:42.000000000 -0500
+@@ -1584,4 +1584,19 @@
+
+ endif # ACPI
+
++config SENSORS_MAG3110
++ tristate "Freescale MAG3110 e-compass sensor"
++ depends on I2C && SYSFS
++ help
++ If you say yes here you get support for the Freescale MAG3110
++ e-compass sensor.
++ This driver can also be built as a module. If so, the module
++ will be called mag3110.
++
++config MXC_MMA8451
++ tristate "MMA8451 device driver"
++ depends on I2C
++ depends on INPUT_POLLDEV
++ default y
++
+ endif # HWMON
+diff -Nur linux-3.14.36/drivers/hwmon/mag3110.c linux-openelec/drivers/hwmon/mag3110.c
+--- linux-3.14.36/drivers/hwmon/mag3110.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/hwmon/mag3110.c 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,611 @@
++/*
++ *
++ * Copyright (C) 2011-2013 Freescale Semiconductor, Inc.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
++ */
++
++#include <linux/module.h>
++#include <linux/kernel.h>
++#include <linux/slab.h>
++#include <linux/interrupt.h>
++#include <linux/delay.h>
++#include <linux/i2c.h>
++#include <linux/irq.h>
++#include <linux/platform_device.h>
++#include <linux/input-polldev.h>
++#include <linux/hwmon.h>
++#include <linux/input.h>
++#include <linux/wait.h>
++#include <linux/workqueue.h>
++#include <linux/of.h>
++#include <linux/regulator/consumer.h>
++
++#define MAG3110_DRV_NAME "mag3110"
++#define MAG3110_ID 0xC4
++#define MAG3110_XYZ_DATA_LEN 6
++#define MAG3110_STATUS_ZYXDR 0x08
++
++#define MAG3110_AC_MASK (0x01)
++#define MAG3110_AC_OFFSET 0
++#define MAG3110_DR_MODE_MASK (0x7 << 5)
++#define MAG3110_DR_MODE_OFFSET 5
++#define MAG3110_IRQ_USED 0
++
++#define POLL_INTERVAL_MAX 500
++#define POLL_INTERVAL 100
++#define INT_TIMEOUT 1000
++#define DEFAULT_POSITION 2
++/* register enum for mag3110 registers */
++enum {
++ MAG3110_DR_STATUS = 0x00,
++ MAG3110_OUT_X_MSB,
++ MAG3110_OUT_X_LSB,
++ MAG3110_OUT_Y_MSB,
++ MAG3110_OUT_Y_LSB,
++ MAG3110_OUT_Z_MSB,
++ MAG3110_OUT_Z_LSB,
++ MAG3110_WHO_AM_I,
++
++ MAG3110_OFF_X_MSB,
++ MAG3110_OFF_X_LSB,
++ MAG3110_OFF_Y_MSB,
++ MAG3110_OFF_Y_LSB,
++ MAG3110_OFF_Z_MSB,
++ MAG3110_OFF_Z_LSB,
++
++ MAG3110_DIE_TEMP,
++
++ MAG3110_CTRL_REG1 = 0x10,
++ MAG3110_CTRL_REG2,
++};
++enum {
++ MAG_STANDBY,
++ MAG_ACTIVED
++};
++struct mag3110_data {
++ struct i2c_client *client;
++ struct input_polled_dev *poll_dev;
++ struct device *hwmon_dev;
++ wait_queue_head_t waitq;
++ bool data_ready;
++ u8 ctl_reg1;
++ int active;
++ int position;
++};
++static short MAGHAL[8][3][3] = {
++ { {0, 1, 0}, {-1, 0, 0}, {0, 0, 1} },
++ { {1, 0, 0}, {0, 1, 0}, {0, 0, 1} },
++ { {0, -1, 0}, {1, 0, 0}, {0, 0, 1} },
++ { {-1, 0, 0}, {0, -1, 0}, {0, 0, 1} },
++
++ { {0, 1, 0}, {1, 0, 0}, {0, 0, -1} },
++ { {1, 0, 0}, {0, -1, 0}, {0, 0, -1} },
++ { {0, -1, 0}, {-1, 0, 0}, {0, 0, -1} },
++ { {-1, 0, 0}, {0, 1, 0}, {0, 0, -1} },
++};
++
++static struct mag3110_data *mag3110_pdata;
++/*!
++ * This function do one mag3110 register read.
++ */
++static DEFINE_MUTEX(mag3110_lock);
++static int mag3110_adjust_position(short *x, short *y, short *z)
++{
++ short rawdata[3], data[3];
++ int i, j;
++ int position = mag3110_pdata->position;
++ if (position < 0 || position > 7)
++ position = 0;
++ rawdata[0] = *x;
++ rawdata[1] = *y;
++ rawdata[2] = *z;
++ for (i = 0; i < 3; i++) {
++ data[i] = 0;
++ for (j = 0; j < 3; j++)
++ data[i] += rawdata[j] * MAGHAL[position][i][j];
++ }
++ *x = data[0];
++ *y = data[1];
++ *z = data[2];
++ return 0;
++}
++
++static int mag3110_read_reg(struct i2c_client *client, u8 reg)
++{
++ return i2c_smbus_read_byte_data(client, reg);
++}
++
++/*!
++ * This function do one mag3110 register write.
++ */
++static int mag3110_write_reg(struct i2c_client *client, u8 reg, char value)
++{
++ int ret;
++
++ ret = i2c_smbus_write_byte_data(client, reg, value);
++ if (ret < 0)
++ dev_err(&client->dev, "i2c write failed\n");
++ return ret;
++}
++
++/*!
++ * This function do multiple mag3110 registers read.
++ */
++static int mag3110_read_block_data(struct i2c_client *client, u8 reg,
++ int count, u8 *addr)
++{
++ if (i2c_smbus_read_i2c_block_data(client, reg, count, addr) < count) {
++ dev_err(&client->dev, "i2c block read failed\n");
++ return -1;
++ }
++
++ return count;
++}
++
++/*
++ * Initialization function
++ */
++static int mag3110_init_client(struct i2c_client *client)
++{
++ int val, ret;
++
++ /* enable automatic resets */
++ val = 0x80;
++ ret = mag3110_write_reg(client, MAG3110_CTRL_REG2, val);
++
++ /* set default data rate to 10HZ */
++ val = mag3110_read_reg(client, MAG3110_CTRL_REG1);
++ val |= (0x0 << MAG3110_DR_MODE_OFFSET);
++ ret = mag3110_write_reg(client, MAG3110_CTRL_REG1, val);
++
++ return ret;
++}
++
++/***************************************************************
++*
++* read sensor data from mag3110
++*
++***************************************************************/
++static int mag3110_read_data(short *x, short *y, short *z)
++{
++ struct mag3110_data *data;
++ int retry = 3;
++ u8 tmp_data[MAG3110_XYZ_DATA_LEN];
++ int result;
++ if (!mag3110_pdata || mag3110_pdata->active == MAG_STANDBY)
++ return -EINVAL;
++
++ data = mag3110_pdata;
++#if MAG3110_IRQ_USED
++ if (!wait_event_interruptible_timeout
++ (data->waitq, data->data_ready != 0,
++ msecs_to_jiffies(INT_TIMEOUT))) {
++ dev_dbg(&data->client->dev, "interrupt not received\n");
++ return -ETIME;
++ }
++#else
++ do {
++ msleep(1);
++ result = i2c_smbus_read_byte_data(data->client,
++ MAG3110_DR_STATUS);
++ retry--;
++ } while (!(result & MAG3110_STATUS_ZYXDR) && retry > 0);
++ /* Clear data_ready flag after data is read out */
++ if (retry == 0)
++ return -EINVAL;
++#endif
++
++ data->data_ready = 0;
++
++ if (mag3110_read_block_data(data->client,
++ MAG3110_OUT_X_MSB, MAG3110_XYZ_DATA_LEN,
++ tmp_data) < 0)
++ return -1;
++
++ *x = ((tmp_data[0] << 8) & 0xff00) | tmp_data[1];
++ *y = ((tmp_data[2] << 8) & 0xff00) | tmp_data[3];
++ *z = ((tmp_data[4] << 8) & 0xff00) | tmp_data[5];
++
++ return 0;
++}
++
++static void report_abs(void)
++{
++ struct input_dev *idev;
++ short x, y, z;
++
++ mutex_lock(&mag3110_lock);
++ if (mag3110_read_data(&x, &y, &z) != 0)
++ goto out;
++ mag3110_adjust_position(&x, &y, &z);
++ idev = mag3110_pdata->poll_dev->input;
++ input_report_abs(idev, ABS_X, x);
++ input_report_abs(idev, ABS_Y, y);
++ input_report_abs(idev, ABS_Z, z);
++ input_sync(idev);
++out:
++ mutex_unlock(&mag3110_lock);
++}
++
++static void mag3110_dev_poll(struct input_polled_dev *dev)
++{
++ report_abs();
++}
++
++#if MAG3110_IRQ_USED
++static irqreturn_t mag3110_irq_handler(int irq, void *dev_id)
++{
++ mag3110_pdata->data_ready = 1;
++ wake_up_interruptible(&mag3110_pdata->waitq);
++
++ return IRQ_HANDLED;
++}
++#endif
++static ssize_t mag3110_enable_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct i2c_client *client;
++ int val;
++ mutex_lock(&mag3110_lock);
++ client = mag3110_pdata->client;
++ val = mag3110_read_reg(client, MAG3110_CTRL_REG1) & MAG3110_AC_MASK;
++
++ mutex_unlock(&mag3110_lock);
++ return sprintf(buf, "%d\n", val);
++}
++
++static ssize_t mag3110_enable_store(struct device *dev,
++ struct device_attribute *attr,
++ const char *buf, size_t count)
++{
++ struct i2c_client *client;
++ int reg, ret;
++ long enable;
++ u8 tmp_data[MAG3110_XYZ_DATA_LEN];
++
++ ret = strict_strtol(buf, 10, &enable);
++ if (ret) {
++ dev_err(dev, "string to long error\n");
++ return ret;
++ }
++
++ mutex_lock(&mag3110_lock);
++ client = mag3110_pdata->client;
++ reg = mag3110_read_reg(client, MAG3110_CTRL_REG1);
++ if (enable && mag3110_pdata->active == MAG_STANDBY) {
++ reg |= MAG3110_AC_MASK;
++ ret = mag3110_write_reg(client, MAG3110_CTRL_REG1, reg);
++ if (!ret)
++ mag3110_pdata->active = MAG_ACTIVED;
++ } else if (!enable && mag3110_pdata->active == MAG_ACTIVED) {
++ reg &= ~MAG3110_AC_MASK;
++ ret = mag3110_write_reg(client, MAG3110_CTRL_REG1, reg);
++ if (!ret)
++ mag3110_pdata->active = MAG_STANDBY;
++ }
++
++ if (mag3110_pdata->active == MAG_ACTIVED) {
++ msleep(100);
++ /* Read out MSB data to clear interrupt flag automatically */
++ mag3110_read_block_data(client, MAG3110_OUT_X_MSB,
++ MAG3110_XYZ_DATA_LEN, tmp_data);
++ }
++ mutex_unlock(&mag3110_lock);
++ return count;
++}
++
++static DEVICE_ATTR(enable, S_IWUSR | S_IRUGO,
++ mag3110_enable_show, mag3110_enable_store);
++
++static ssize_t mag3110_dr_mode_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct i2c_client *client;
++ int val;
++
++ client = mag3110_pdata->client;
++ val = (mag3110_read_reg(client, MAG3110_CTRL_REG1)
++ & MAG3110_DR_MODE_MASK) >> MAG3110_DR_MODE_OFFSET;
++
++ return sprintf(buf, "%d\n", val);
++}
++
++static ssize_t mag3110_dr_mode_store(struct device *dev,
++ struct device_attribute *attr,
++ const char *buf, size_t count)
++{
++ struct i2c_client *client;
++ int reg, ret;
++ unsigned long val;
++
++ /* This must be done when mag3110 is disabled */
++ if ((strict_strtoul(buf, 10, &val) < 0) || (val > 7))
++ return -EINVAL;
++
++ client = mag3110_pdata->client;
++ reg = mag3110_read_reg(client, MAG3110_CTRL_REG1) &
++ ~MAG3110_DR_MODE_MASK;
++ reg |= (val << MAG3110_DR_MODE_OFFSET);
++ /* MAG3110_CTRL_REG1 bit 5-7: data rate mode */
++ ret = mag3110_write_reg(client, MAG3110_CTRL_REG1, reg);
++ if (ret < 0)
++ return ret;
++
++ return count;
++}
++
++static DEVICE_ATTR(dr_mode, S_IWUSR | S_IRUGO,
++ mag3110_dr_mode_show, mag3110_dr_mode_store);
++
++static ssize_t mag3110_position_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ int val;
++ mutex_lock(&mag3110_lock);
++ val = mag3110_pdata->position;
++ mutex_unlock(&mag3110_lock);
++ return sprintf(buf, "%d\n", val);
++}
++
++static ssize_t mag3110_position_store(struct device *dev,
++ struct device_attribute *attr,
++ const char *buf, size_t count)
++{
++ long position;
++ int ret;
++ ret = strict_strtol(buf, 10, &position);
++ if (ret) {
++ dev_err(dev, "string to long error\n");
++ return ret;
++ }
++
++ mutex_lock(&mag3110_lock);
++ mag3110_pdata->position = (int)position;
++ mutex_unlock(&mag3110_lock);
++ return count;
++}
++
++static DEVICE_ATTR(position, S_IWUSR | S_IRUGO,
++ mag3110_position_show, mag3110_position_store);
++
++static struct attribute *mag3110_attributes[] = {
++ &dev_attr_enable.attr,
++ &dev_attr_dr_mode.attr,
++ &dev_attr_position.attr,
++ NULL
++};
++
++static const struct attribute_group mag3110_attr_group = {
++ .attrs = mag3110_attributes,
++};
++
++static int mag3110_probe(struct i2c_client *client,
++ const struct i2c_device_id *id)
++{
++ struct i2c_adapter *adapter;
++ struct input_dev *idev;
++ struct mag3110_data *data;
++ int ret = 0;
++ struct regulator *vdd, *vdd_io;
++ u32 pos = 0;
++ struct device_node *of_node = client->dev.of_node;
++ vdd = NULL;
++ vdd_io = NULL;
++
++ vdd = devm_regulator_get(&client->dev, "vdd");
++ if (!IS_ERR(vdd)) {
++ ret = regulator_enable(vdd);
++ if (ret) {
++ dev_err(&client->dev, "vdd set voltage error\n");
++ return ret;
++ }
++ }
++
++ vdd_io = devm_regulator_get(&client->dev, "vddio");
++ if (!IS_ERR(vdd_io)) {
++ ret = regulator_enable(vdd_io);
++ if (ret) {
++ dev_err(&client->dev, "vddio set voltage error\n");
++ return ret;
++ }
++ }
++
++ adapter = to_i2c_adapter(client->dev.parent);
++ if (!i2c_check_functionality(adapter,
++ I2C_FUNC_SMBUS_BYTE |
++ I2C_FUNC_SMBUS_BYTE_DATA |
++ I2C_FUNC_SMBUS_I2C_BLOCK))
++ return -EIO;
++
++ dev_info(&client->dev, "check mag3110 chip ID\n");
++ ret = mag3110_read_reg(client, MAG3110_WHO_AM_I);
++
++ if (MAG3110_ID != ret) {
++ dev_err(&client->dev,
++ "read chip ID 0x%x is not equal to 0x%x!\n", ret,
++ MAG3110_ID);
++ return -EINVAL;
++ }
++ data = kzalloc(sizeof(struct mag3110_data), GFP_KERNEL);
++ if (!data)
++ return -ENOMEM;
++ data->client = client;
++ i2c_set_clientdata(client, data);
++ /* Init queue */
++ init_waitqueue_head(&data->waitq);
++
++ data->hwmon_dev = hwmon_device_register(&client->dev);
++ if (IS_ERR(data->hwmon_dev)) {
++ dev_err(&client->dev, "hwmon register failed!\n");
++ ret = PTR_ERR(data->hwmon_dev);
++ goto error_rm_dev_sysfs;
++ }
++
++ /*input poll device register */
++ data->poll_dev = input_allocate_polled_device();
++ if (!data->poll_dev) {
++ dev_err(&client->dev, "alloc poll device failed!\n");
++ ret = -ENOMEM;
++ goto error_rm_hwmon_dev;
++ }
++ data->poll_dev->poll = mag3110_dev_poll;
++ data->poll_dev->poll_interval = POLL_INTERVAL;
++ data->poll_dev->poll_interval_max = POLL_INTERVAL_MAX;
++ idev = data->poll_dev->input;
++ idev->name = MAG3110_DRV_NAME;
++ idev->id.bustype = BUS_I2C;
++ idev->evbit[0] = BIT_MASK(EV_ABS);
++ input_set_abs_params(idev, ABS_X, -15000, 15000, 0, 0);
++ input_set_abs_params(idev, ABS_Y, -15000, 15000, 0, 0);
++ input_set_abs_params(idev, ABS_Z, -15000, 15000, 0, 0);
++ ret = input_register_polled_device(data->poll_dev);
++ if (ret) {
++ dev_err(&client->dev, "register poll device failed!\n");
++ goto error_free_poll_dev;
++ }
++
++ /*create device group in sysfs as user interface */
++ ret = sysfs_create_group(&idev->dev.kobj, &mag3110_attr_group);
++ if (ret) {
++ dev_err(&client->dev, "create device file failed!\n");
++ ret = -EINVAL;
++ goto error_rm_poll_dev;
++ }
++ /* set irq type to edge rising */
++#if MAG3110_IRQ_USED
++ ret = request_irq(client->irq, mag3110_irq_handler,
++ IRQF_TRIGGER_RISING, client->dev.driver->name, idev);
++ if (ret < 0) {
++ dev_err(&client->dev, "failed to register irq %d!\n",
++ client->irq);
++ goto error_rm_dev_sysfs;
++ }
++#endif
++ /* Initialize mag3110 chip */
++ mag3110_init_client(client);
++ mag3110_pdata = data;
++ mag3110_pdata->active = MAG_STANDBY;
++ ret = of_property_read_u32(of_node, "position", &pos);
++ if (ret)
++ pos = DEFAULT_POSITION;
++ mag3110_pdata->position = (int)pos;
++ dev_info(&client->dev, "mag3110 is probed\n");
++ return 0;
++error_rm_dev_sysfs:
++ sysfs_remove_group(&client->dev.kobj, &mag3110_attr_group);
++error_rm_poll_dev:
++ input_unregister_polled_device(data->poll_dev);
++error_free_poll_dev:
++ input_free_polled_device(data->poll_dev);
++error_rm_hwmon_dev:
++ hwmon_device_unregister(data->hwmon_dev);
++
++ kfree(data);
++ mag3110_pdata = NULL;
++
++ return ret;
++}
++
++static int mag3110_remove(struct i2c_client *client)
++{
++ struct mag3110_data *data;
++ int ret;
++
++ data = i2c_get_clientdata(client);
++
++ data->ctl_reg1 = mag3110_read_reg(client, MAG3110_CTRL_REG1);
++ ret = mag3110_write_reg(client, MAG3110_CTRL_REG1,
++ data->ctl_reg1 & ~MAG3110_AC_MASK);
++
++ free_irq(client->irq, data);
++ input_unregister_polled_device(data->poll_dev);
++ input_free_polled_device(data->poll_dev);
++ hwmon_device_unregister(data->hwmon_dev);
++ sysfs_remove_group(&client->dev.kobj, &mag3110_attr_group);
++ kfree(data);
++ mag3110_pdata = NULL;
++
++ return ret;
++}
++
++#ifdef CONFIG_PM
++static int mag3110_suspend(struct i2c_client *client, pm_message_t mesg)
++{
++ int ret = 0;
++ struct mag3110_data *data = i2c_get_clientdata(client);
++ if (data->active == MAG_ACTIVED) {
++ data->ctl_reg1 = mag3110_read_reg(client, MAG3110_CTRL_REG1);
++ ret = mag3110_write_reg(client, MAG3110_CTRL_REG1,
++ data->ctl_reg1 & ~MAG3110_AC_MASK);
++ }
++ return ret;
++}
++
++static int mag3110_resume(struct i2c_client *client)
++{
++ int ret = 0;
++ u8 tmp_data[MAG3110_XYZ_DATA_LEN];
++ struct mag3110_data *data = i2c_get_clientdata(client);
++ if (data->active == MAG_ACTIVED) {
++ ret = mag3110_write_reg(client, MAG3110_CTRL_REG1,
++ data->ctl_reg1);
++
++ if (data->ctl_reg1 & MAG3110_AC_MASK) {
++ /* Read out MSB data to clear interrupt
++ flag automatically */
++ mag3110_read_block_data(client, MAG3110_OUT_X_MSB,
++ MAG3110_XYZ_DATA_LEN, tmp_data);
++ }
++ }
++ return ret;
++}
++
++#else
++#define mag3110_suspend NULL
++#define mag3110_resume NULL
++#endif /* CONFIG_PM */
++
++static const struct i2c_device_id mag3110_id[] = {
++ {MAG3110_DRV_NAME, 0},
++ {}
++};
++
++MODULE_DEVICE_TABLE(i2c, mag3110_id);
++static struct i2c_driver mag3110_driver = {
++ .driver = {.name = MAG3110_DRV_NAME,
++ .owner = THIS_MODULE,},
++ .suspend = mag3110_suspend,
++ .resume = mag3110_resume,
++ .probe = mag3110_probe,
++ .remove = mag3110_remove,
++ .id_table = mag3110_id,
++};
++
++static int __init mag3110_init(void)
++{
++ return i2c_add_driver(&mag3110_driver);
++}
++
++static void __exit mag3110_exit(void)
++{
++ i2c_del_driver(&mag3110_driver);
++}
++
++module_init(mag3110_init);
++module_exit(mag3110_exit);
++MODULE_AUTHOR("Freescale Semiconductor, Inc.");
++MODULE_DESCRIPTION("Freescale mag3110 3-axis magnetometer driver");
++MODULE_LICENSE("GPL");
+diff -Nur linux-3.14.36/drivers/hwmon/Makefile linux-openelec/drivers/hwmon/Makefile
+--- linux-3.14.36/drivers/hwmon/Makefile 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/hwmon/Makefile 2015-05-06 12:05:42.000000000 -0500
+@@ -142,6 +142,8 @@
+ obj-$(CONFIG_SENSORS_W83L786NG) += w83l786ng.o
+ obj-$(CONFIG_SENSORS_WM831X) += wm831x-hwmon.o
+ obj-$(CONFIG_SENSORS_WM8350) += wm8350-hwmon.o
++obj-$(CONFIG_SENSORS_MAG3110) += mag3110.o
++obj-$(CONFIG_MXC_MMA8451) += mxc_mma8451.o
+
+ obj-$(CONFIG_PMBUS) += pmbus/
+
+diff -Nur linux-3.14.36/drivers/hwmon/mxc_mma8451.c linux-openelec/drivers/hwmon/mxc_mma8451.c
+--- linux-3.14.36/drivers/hwmon/mxc_mma8451.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/hwmon/mxc_mma8451.c 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,598 @@
++/*
++ * mma8451.c - Linux kernel modules for 3-Axis Orientation/Motion
++ * Detection Sensor
++ *
++ * Copyright (C) 2010-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ */
++
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/slab.h>
++#include <linux/i2c.h>
++#include <linux/pm.h>
++#include <linux/mutex.h>
++#include <linux/delay.h>
++#include <linux/interrupt.h>
++#include <linux/irq.h>
++#include <linux/hwmon-sysfs.h>
++#include <linux/err.h>
++#include <linux/hwmon.h>
++#include <linux/input-polldev.h>
++#include <linux/of.h>
++#include <linux/regulator/consumer.h>
++
++#define MMA8451_I2C_ADDR 0x1C
++#define MMA8451_ID 0x1A
++#define MMA8452_ID 0x2A
++#define MMA8453_ID 0x3A
++
++#define POLL_INTERVAL_MIN 1
++#define POLL_INTERVAL_MAX 500
++#define POLL_INTERVAL 100 /* msecs */
++#define INPUT_FUZZ 32
++#define INPUT_FLAT 32
++#define MODE_CHANGE_DELAY_MS 100
++
++#define MMA8451_STATUS_ZYXDR 0x08
++#define MMA8451_BUF_SIZE 7
++#define DEFAULT_POSITION 0
++
++/* register enum for mma8451 registers */
++enum {
++ MMA8451_STATUS = 0x00,
++ MMA8451_OUT_X_MSB,
++ MMA8451_OUT_X_LSB,
++ MMA8451_OUT_Y_MSB,
++ MMA8451_OUT_Y_LSB,
++ MMA8451_OUT_Z_MSB,
++ MMA8451_OUT_Z_LSB,
++
++ MMA8451_F_SETUP = 0x09,
++ MMA8451_TRIG_CFG,
++ MMA8451_SYSMOD,
++ MMA8451_INT_SOURCE,
++ MMA8451_WHO_AM_I,
++ MMA8451_XYZ_DATA_CFG,
++ MMA8451_HP_FILTER_CUTOFF,
++
++ MMA8451_PL_STATUS,
++ MMA8451_PL_CFG,
++ MMA8451_PL_COUNT,
++ MMA8451_PL_BF_ZCOMP,
++ MMA8451_P_L_THS_REG,
++
++ MMA8451_FF_MT_CFG,
++ MMA8451_FF_MT_SRC,
++ MMA8451_FF_MT_THS,
++ MMA8451_FF_MT_COUNT,
++
++ MMA8451_TRANSIENT_CFG = 0x1D,
++ MMA8451_TRANSIENT_SRC,
++ MMA8451_TRANSIENT_THS,
++ MMA8451_TRANSIENT_COUNT,
++
++ MMA8451_PULSE_CFG,
++ MMA8451_PULSE_SRC,
++ MMA8451_PULSE_THSX,
++ MMA8451_PULSE_THSY,
++ MMA8451_PULSE_THSZ,
++ MMA8451_PULSE_TMLT,
++ MMA8451_PULSE_LTCY,
++ MMA8451_PULSE_WIND,
++
++ MMA8451_ASLP_COUNT,
++ MMA8451_CTRL_REG1,
++ MMA8451_CTRL_REG2,
++ MMA8451_CTRL_REG3,
++ MMA8451_CTRL_REG4,
++ MMA8451_CTRL_REG5,
++
++ MMA8451_OFF_X,
++ MMA8451_OFF_Y,
++ MMA8451_OFF_Z,
++
++ MMA8451_REG_END,
++};
++
++/* The sensitivity is represented in counts/g. In 2g mode the
++sensitivity is 1024 counts/g. In 4g mode the sensitivity is 512
++counts/g and in 8g mode the sensitivity is 256 counts/g.
++ */
++enum {
++ MODE_2G = 0,
++ MODE_4G,
++ MODE_8G,
++};
++
++enum {
++ MMA_STANDBY = 0,
++ MMA_ACTIVED,
++};
++
++/* mma8451 status */
++struct mma8451_status {
++ u8 mode;
++ u8 ctl_reg1;
++ int active;
++ int position;
++};
++
++static struct mma8451_status mma_status;
++static struct input_polled_dev *mma8451_idev;
++static struct device *hwmon_dev;
++static struct i2c_client *mma8451_i2c_client;
++
++static int senstive_mode = MODE_2G;
++static int ACCHAL[8][3][3] = {
++ { {0, -1, 0}, {1, 0, 0}, {0, 0, 1} },
++ { {-1, 0, 0}, {0, -1, 0}, {0, 0, 1} },
++ { {0, 1, 0}, {-1, 0, 0}, {0, 0, 1} },
++ { {1, 0, 0}, {0, 1, 0}, {0, 0, 1} },
++
++ { {0, -1, 0}, {-1, 0, 0}, {0, 0, -1} },
++ { {-1, 0, 0}, {0, 1, 0}, {0, 0, -1} },
++ { {0, 1, 0}, {1, 0, 0}, {0, 0, -1} },
++ { {1, 0, 0}, {0, -1, 0}, {0, 0, -1} },
++};
++
++static DEFINE_MUTEX(mma8451_lock);
++static int mma8451_adjust_position(short *x, short *y, short *z)
++{
++ short rawdata[3], data[3];
++ int i, j;
++ int position = mma_status.position;
++ if (position < 0 || position > 7)
++ position = 0;
++ rawdata[0] = *x;
++ rawdata[1] = *y;
++ rawdata[2] = *z;
++ for (i = 0; i < 3; i++) {
++ data[i] = 0;
++ for (j = 0; j < 3; j++)
++ data[i] += rawdata[j] * ACCHAL[position][i][j];
++ }
++ *x = data[0];
++ *y = data[1];
++ *z = data[2];
++ return 0;
++}
++
++static int mma8451_change_mode(struct i2c_client *client, int mode)
++{
++ int result;
++
++ mma_status.ctl_reg1 = 0;
++ result = i2c_smbus_write_byte_data(client, MMA8451_CTRL_REG1, 0);
++ if (result < 0)
++ goto out;
++ mma_status.active = MMA_STANDBY;
++
++ result = i2c_smbus_write_byte_data(client, MMA8451_XYZ_DATA_CFG,
++ mode);
++ if (result < 0)
++ goto out;
++ mdelay(MODE_CHANGE_DELAY_MS);
++ mma_status.mode = mode;
++
++ return 0;
++out:
++ dev_err(&client->dev, "error when init mma8451:(%d)", result);
++ return result;
++}
++
++static int mma8451_read_data(short *x, short *y, short *z)
++{
++ u8 tmp_data[MMA8451_BUF_SIZE];
++ int ret;
++
++ ret = i2c_smbus_read_i2c_block_data(mma8451_i2c_client,
++ MMA8451_OUT_X_MSB, 7, tmp_data);
++ if (ret < MMA8451_BUF_SIZE) {
++ dev_err(&mma8451_i2c_client->dev, "i2c block read failed\n");
++ return -EIO;
++ }
++
++ *x = ((tmp_data[0] << 8) & 0xff00) | tmp_data[1];
++ *y = ((tmp_data[2] << 8) & 0xff00) | tmp_data[3];
++ *z = ((tmp_data[4] << 8) & 0xff00) | tmp_data[5];
++ return 0;
++}
++
++static void report_abs(void)
++{
++ short x, y, z;
++ int result;
++ int retry = 3;
++
++ mutex_lock(&mma8451_lock);
++ if (mma_status.active == MMA_STANDBY)
++ goto out;
++ /* wait for the data ready */
++ do {
++ result = i2c_smbus_read_byte_data(mma8451_i2c_client,
++ MMA8451_STATUS);
++ retry--;
++ msleep(1);
++ } while (!(result & MMA8451_STATUS_ZYXDR) && retry > 0);
++ if (retry == 0)
++ goto out;
++ if (mma8451_read_data(&x, &y, &z) != 0)
++ goto out;
++ mma8451_adjust_position(&x, &y, &z);
++ input_report_abs(mma8451_idev->input, ABS_X, x);
++ input_report_abs(mma8451_idev->input, ABS_Y, y);
++ input_report_abs(mma8451_idev->input, ABS_Z, z);
++ input_sync(mma8451_idev->input);
++out:
++ mutex_unlock(&mma8451_lock);
++}
++
++static void mma8451_dev_poll(struct input_polled_dev *dev)
++{
++ report_abs();
++}
++
++static ssize_t mma8451_enable_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct i2c_client *client;
++ u8 val;
++ int enable;
++
++ mutex_lock(&mma8451_lock);
++ client = mma8451_i2c_client;
++ val = i2c_smbus_read_byte_data(client, MMA8451_CTRL_REG1);
++ if ((val & 0x01) && mma_status.active == MMA_ACTIVED)
++ enable = 1;
++ else
++ enable = 0;
++ mutex_unlock(&mma8451_lock);
++ return sprintf(buf, "%d\n", enable);
++}
++
++static ssize_t mma8451_enable_store(struct device *dev,
++ struct device_attribute *attr,
++ const char *buf, size_t count)
++{
++ struct i2c_client *client;
++ int ret;
++ unsigned long enable;
++ u8 val = 0;
++
++ ret = strict_strtoul(buf, 10, &enable);
++ if (ret) {
++ dev_err(dev, "string transform error\n");
++ return ret;
++ }
++
++ mutex_lock(&mma8451_lock);
++ client = mma8451_i2c_client;
++ enable = (enable > 0) ? 1 : 0;
++ if (enable && mma_status.active == MMA_STANDBY) {
++ val = i2c_smbus_read_byte_data(client, MMA8451_CTRL_REG1);
++ ret =
++ i2c_smbus_write_byte_data(client, MMA8451_CTRL_REG1,
++ val | 0x01);
++ if (!ret)
++ mma_status.active = MMA_ACTIVED;
++
++ } else if (enable == 0 && mma_status.active == MMA_ACTIVED) {
++ val = i2c_smbus_read_byte_data(client, MMA8451_CTRL_REG1);
++ ret =
++ i2c_smbus_write_byte_data(client, MMA8451_CTRL_REG1,
++ val & 0xFE);
++ if (!ret)
++ mma_status.active = MMA_STANDBY;
++
++ }
++ mutex_unlock(&mma8451_lock);
++ return count;
++}
++
++static ssize_t mma8451_position_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ int position = 0;
++ mutex_lock(&mma8451_lock);
++ position = mma_status.position;
++ mutex_unlock(&mma8451_lock);
++ return sprintf(buf, "%d\n", position);
++}
++
++static ssize_t mma8451_position_store(struct device *dev,
++ struct device_attribute *attr,
++ const char *buf, size_t count)
++{
++ unsigned long position;
++ int ret;
++ ret = strict_strtoul(buf, 10, &position);
++ if (ret) {
++ dev_err(dev, "string transform error\n");
++ return ret;
++ }
++
++ mutex_lock(&mma8451_lock);
++ mma_status.position = (int)position;
++ mutex_unlock(&mma8451_lock);
++ return count;
++}
++
++static ssize_t mma8451_scalemode_show(struct device *dev,
++ struct device_attribute *attr,
++ char *buf)
++{
++ int mode = 0;
++ mutex_lock(&mma8451_lock);
++ mode = (int)mma_status.mode;
++ mutex_unlock(&mma8451_lock);
++
++ return sprintf(buf, "%d\n", mode);
++}
++
++static ssize_t mma8451_scalemode_store(struct device *dev,
++ struct device_attribute *attr,
++ const char *buf, size_t count)
++{
++ unsigned long mode;
++ int ret, active_save;
++ struct i2c_client *client = mma8451_i2c_client;
++
++ ret = strict_strtoul(buf, 10, &mode);
++ if (ret) {
++ dev_err(dev, "string transform error\n");
++ goto out;
++ }
++
++ if (mode > MODE_8G) {
++ dev_warn(dev, "not supported mode\n");
++ ret = count;
++ goto out;
++ }
++
++ mutex_lock(&mma8451_lock);
++ if (mode == mma_status.mode) {
++ ret = count;
++ goto out_unlock;
++ }
++
++ active_save = mma_status.active;
++ ret = mma8451_change_mode(client, mode);
++ if (ret)
++ goto out_unlock;
++
++ if (active_save == MMA_ACTIVED) {
++ ret = i2c_smbus_write_byte_data(client, MMA8451_CTRL_REG1, 1);
++
++ if (ret)
++ goto out_unlock;
++ mma_status.active = active_save;
++ }
++
++out_unlock:
++ mutex_unlock(&mma8451_lock);
++out:
++ return ret;
++}
++
++static DEVICE_ATTR(enable, S_IWUSR | S_IRUGO,
++ mma8451_enable_show, mma8451_enable_store);
++static DEVICE_ATTR(position, S_IWUSR | S_IRUGO,
++ mma8451_position_show, mma8451_position_store);
++static DEVICE_ATTR(scalemode, S_IWUSR | S_IRUGO,
++ mma8451_scalemode_show, mma8451_scalemode_store);
++
++static struct attribute *mma8451_attributes[] = {
++ &dev_attr_enable.attr,
++ &dev_attr_position.attr,
++ &dev_attr_scalemode.attr,
++ NULL
++};
++
++static const struct attribute_group mma8451_attr_group = {
++ .attrs = mma8451_attributes,
++};
++
++static int mma8451_probe(struct i2c_client *client,
++ const struct i2c_device_id *id)
++{
++ int result, client_id;
++ struct input_dev *idev;
++ struct i2c_adapter *adapter;
++ u32 pos;
++ struct device_node *of_node = client->dev.of_node;
++ struct regulator *vdd, *vdd_io;
++
++ mma8451_i2c_client = client;
++
++ vdd = devm_regulator_get(&client->dev, "vdd");
++ if (!IS_ERR(vdd)) {
++ result = regulator_enable(vdd);
++ if (result) {
++ dev_err(&client->dev, "vdd set voltage error\n");
++ return result;
++ }
++ }
++
++ vdd_io = devm_regulator_get(&client->dev, "vddio");
++ if (!IS_ERR(vdd_io)) {
++ result = regulator_enable(vdd_io);
++ if (result) {
++ dev_err(&client->dev, "vddio set voltage error\n");
++ return result;
++ }
++ }
++
++ adapter = to_i2c_adapter(client->dev.parent);
++ result = i2c_check_functionality(adapter,
++ I2C_FUNC_SMBUS_BYTE |
++ I2C_FUNC_SMBUS_BYTE_DATA);
++ if (!result)
++ goto err_out;
++
++ client_id = i2c_smbus_read_byte_data(client, MMA8451_WHO_AM_I);
++ if (client_id != MMA8451_ID && client_id != MMA8452_ID
++ && client_id != MMA8453_ID) {
++ dev_err(&client->dev,
++ "read chip ID 0x%x is not equal to 0x%x or 0x%x!\n",
++ result, MMA8451_ID, MMA8452_ID);
++ result = -EINVAL;
++ goto err_out;
++ }
++
++ /* Initialize the MMA8451 chip */
++ result = mma8451_change_mode(client, senstive_mode);
++ if (result) {
++ dev_err(&client->dev,
++ "error when init mma8451 chip:(%d)\n", result);
++ goto err_out;
++ }
++
++ hwmon_dev = hwmon_device_register(&client->dev);
++ if (!hwmon_dev) {
++ result = -ENOMEM;
++ dev_err(&client->dev, "error when register hwmon device\n");
++ goto err_out;
++ }
++
++ mma8451_idev = input_allocate_polled_device();
++ if (!mma8451_idev) {
++ result = -ENOMEM;
++ dev_err(&client->dev, "alloc poll device failed!\n");
++ goto err_alloc_poll_device;
++ }
++ mma8451_idev->poll = mma8451_dev_poll;
++ mma8451_idev->poll_interval = POLL_INTERVAL;
++ mma8451_idev->poll_interval_min = POLL_INTERVAL_MIN;
++ mma8451_idev->poll_interval_max = POLL_INTERVAL_MAX;
++ idev = mma8451_idev->input;
++ idev->name = "mma845x";
++ idev->id.bustype = BUS_I2C;
++ idev->evbit[0] = BIT_MASK(EV_ABS);
++
++ input_set_abs_params(idev, ABS_X, -8192, 8191, INPUT_FUZZ, INPUT_FLAT);
++ input_set_abs_params(idev, ABS_Y, -8192, 8191, INPUT_FUZZ, INPUT_FLAT);
++ input_set_abs_params(idev, ABS_Z, -8192, 8191, INPUT_FUZZ, INPUT_FLAT);
++
++ result = input_register_polled_device(mma8451_idev);
++ if (result) {
++ dev_err(&client->dev, "register poll device failed!\n");
++ goto err_register_polled_device;
++ }
++ result = sysfs_create_group(&idev->dev.kobj, &mma8451_attr_group);
++ if (result) {
++ dev_err(&client->dev, "create device file failed!\n");
++ result = -EINVAL;
++ goto err_register_polled_device;
++ }
++
++ result = of_property_read_u32(of_node, "position", &pos);
++ if (result)
++ pos = DEFAULT_POSITION;
++ mma_status.position = (int)pos;
++
++ return 0;
++err_register_polled_device:
++ input_free_polled_device(mma8451_idev);
++err_alloc_poll_device:
++ hwmon_device_unregister(&client->dev);
++err_out:
++ return result;
++}
++
++static int mma8451_stop_chip(struct i2c_client *client)
++{
++ int ret = 0;
++ if (mma_status.active == MMA_ACTIVED) {
++ mma_status.ctl_reg1 = i2c_smbus_read_byte_data(client,
++ MMA8451_CTRL_REG1);
++ ret = i2c_smbus_write_byte_data(client, MMA8451_CTRL_REG1,
++ mma_status.ctl_reg1 & 0xFE);
++ }
++ return ret;
++}
++
++static int mma8451_remove(struct i2c_client *client)
++{
++ int ret;
++ ret = mma8451_stop_chip(client);
++ hwmon_device_unregister(hwmon_dev);
++
++ return ret;
++}
++
++#ifdef CONFIG_PM_SLEEP
++static int mma8451_suspend(struct device *dev)
++{
++ struct i2c_client *client = to_i2c_client(dev);
++
++ return mma8451_stop_chip(client);
++}
++
++static int mma8451_resume(struct device *dev)
++{
++ int ret = 0;
++ struct i2c_client *client = to_i2c_client(dev);
++ if (mma_status.active == MMA_ACTIVED)
++ ret = i2c_smbus_write_byte_data(client, MMA8451_CTRL_REG1,
++ mma_status.ctl_reg1);
++ return ret;
++
++}
++#endif
++
++static const struct i2c_device_id mma8451_id[] = {
++ {"mma8451", 0},
++};
++
++MODULE_DEVICE_TABLE(i2c, mma8451_id);
++
++static SIMPLE_DEV_PM_OPS(mma8451_pm_ops, mma8451_suspend, mma8451_resume);
++static struct i2c_driver mma8451_driver = {
++ .driver = {
++ .name = "mma8451",
++ .owner = THIS_MODULE,
++ .pm = &mma8451_pm_ops,
++ },
++ .probe = mma8451_probe,
++ .remove = mma8451_remove,
++ .id_table = mma8451_id,
++};
++
++static int __init mma8451_init(void)
++{
++ /* register driver */
++ int res;
++
++ res = i2c_add_driver(&mma8451_driver);
++ if (res < 0) {
++ printk(KERN_INFO "add mma8451 i2c driver failed\n");
++ return -ENODEV;
++ }
++ return res;
++}
++
++static void __exit mma8451_exit(void)
++{
++ i2c_del_driver(&mma8451_driver);
++}
++
++MODULE_AUTHOR("Freescale Semiconductor, Inc.");
++MODULE_DESCRIPTION("MMA8451 3-Axis Orientation/Motion Detection Sensor driver");
++MODULE_LICENSE("GPL");
++
++module_init(mma8451_init);
++module_exit(mma8451_exit);
+diff -Nur linux-3.14.36/drivers/i2c/busses/i2c-imx.c linux-openelec/drivers/i2c/busses/i2c-imx.c
+--- linux-3.14.36/drivers/i2c/busses/i2c-imx.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/i2c/busses/i2c-imx.c 2015-05-06 12:05:42.000000000 -0500
+@@ -184,6 +184,9 @@
+ int stopped;
+ unsigned int ifdr; /* IMX_I2C_IFDR */
+ const struct imx_i2c_hwdata *hwdata;
++
++ unsigned int cur_clk;
++ unsigned int bitrate;
+ };
+
+ static const struct imx_i2c_hwdata imx1_i2c_hwdata = {
+@@ -305,6 +308,51 @@
+ return 0;
+ }
+
++static void i2c_imx_set_clk(struct imx_i2c_struct *i2c_imx)
++{
++ struct imx_i2c_clk_pair *i2c_clk_div = i2c_imx->hwdata->clk_div;
++ unsigned ndivs = i2c_imx->hwdata->ndivs;
++ unsigned int i2c_clk_rate;
++ unsigned int div;
++ int i;
++
++ /* Divider value calculation */
++ i2c_clk_rate = clk_get_rate(i2c_imx->clk);
++ if (i2c_imx->cur_clk == i2c_clk_rate)
++ return;
++ else
++ i2c_imx->cur_clk = i2c_clk_rate;
++
++ div = (i2c_clk_rate + i2c_imx->bitrate - 1) / i2c_imx->bitrate;
++ if (div < i2c_clk_div[0].div)
++ i = 0;
++ else if (div > i2c_clk_div[ndivs - 1].div)
++ i = ndivs - 1;
++ else
++ for (i = 0; i2c_clk_div[i].div < div; i++)
++ ;
++
++ /* Store divider value */
++ i2c_imx->ifdr = imx_i2c_clk_div[i].val;
++
++ /*
++ * There dummy delay is calculated.
++ * It should be about one I2C clock period long.
++ * This delay is used in I2C bus disable function
++ * to fix chip hardware bug.
++ */
++ i2c_imx->disable_delay = (500000U * i2c_clk_div[i].div
++ + (i2c_clk_rate / 2) - 1) / (i2c_clk_rate / 2);
++
++ /* dev_dbg() can't be used, because adapter is not yet registered */
++#ifdef CONFIG_I2C_DEBUG_BUS
++ dev_dbg(&i2c_imx->adapter.dev, "<%s> I2C_CLK=%d, REQ DIV=%d\n",
++ __func__, i2c_clk_rate, div);
++ dev_dbg(&i2c_imx->adapter.dev, "<%s> IFDR[IC]=0x%x, REAL DIV=%d\n",
++ __func__, i2c_clk_div[i].val, i2c_clk_div[i].div);
++#endif
++}
++
+ static int i2c_imx_start(struct imx_i2c_struct *i2c_imx)
+ {
+ unsigned int temp = 0;
+@@ -312,6 +360,7 @@
+
+ dev_dbg(&i2c_imx->adapter.dev, "<%s>\n", __func__);
+
++ i2c_imx_set_clk(i2c_imx);
+ result = clk_prepare_enable(i2c_imx->clk);
+ if (result)
+ return result;
+@@ -367,45 +416,6 @@
+ clk_disable_unprepare(i2c_imx->clk);
+ }
+
+-static void i2c_imx_set_clk(struct imx_i2c_struct *i2c_imx,
+- unsigned int rate)
+-{
+- struct imx_i2c_clk_pair *i2c_clk_div = i2c_imx->hwdata->clk_div;
+- unsigned int i2c_clk_rate;
+- unsigned int div;
+- int i;
+-
+- /* Divider value calculation */
+- i2c_clk_rate = clk_get_rate(i2c_imx->clk);
+- div = (i2c_clk_rate + rate - 1) / rate;
+- if (div < i2c_clk_div[0].div)
+- i = 0;
+- else if (div > i2c_clk_div[i2c_imx->hwdata->ndivs - 1].div)
+- i = i2c_imx->hwdata->ndivs - 1;
+- else
+- for (i = 0; i2c_clk_div[i].div < div; i++);
+-
+- /* Store divider value */
+- i2c_imx->ifdr = i2c_clk_div[i].val;
+-
+- /*
+- * There dummy delay is calculated.
+- * It should be about one I2C clock period long.
+- * This delay is used in I2C bus disable function
+- * to fix chip hardware bug.
+- */
+- i2c_imx->disable_delay = (500000U * i2c_clk_div[i].div
+- + (i2c_clk_rate / 2) - 1) / (i2c_clk_rate / 2);
+-
+- /* dev_dbg() can't be used, because adapter is not yet registered */
+-#ifdef CONFIG_I2C_DEBUG_BUS
+- dev_dbg(&i2c_imx->adapter.dev, "<%s> I2C_CLK=%d, REQ DIV=%d\n",
+- __func__, i2c_clk_rate, div);
+- dev_dbg(&i2c_imx->adapter.dev, "<%s> IFDR[IC]=0x%x, REAL DIV=%d\n",
+- __func__, i2c_clk_div[i].val, i2c_clk_div[i].div);
+-#endif
+-}
+-
+ static irqreturn_t i2c_imx_isr(int irq, void *dev_id)
+ {
+ struct imx_i2c_struct *i2c_imx = dev_id;
+@@ -600,7 +610,6 @@
+ struct imxi2c_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ void __iomem *base;
+ int irq, ret;
+- u32 bitrate;
+
+ dev_dbg(&pdev->dev, "<%s>\n", __func__);
+
+@@ -664,12 +673,12 @@
+ i2c_set_adapdata(&i2c_imx->adapter, i2c_imx);
+
+ /* Set up clock divider */
+- bitrate = IMX_I2C_BIT_RATE;
++ i2c_imx->bitrate = IMX_I2C_BIT_RATE;
+ ret = of_property_read_u32(pdev->dev.of_node,
+- "clock-frequency", &bitrate);
++ "clock-frequency", &i2c_imx->bitrate);
+ if (ret < 0 && pdata && pdata->bitrate)
+- bitrate = pdata->bitrate;
+- i2c_imx_set_clk(i2c_imx, bitrate);
++ i2c_imx->bitrate = pdata->bitrate;
++ i2c_imx_set_clk(i2c_imx);
+
+ /* Set up chip registers to defaults */
+ imx_i2c_write_reg(i2c_imx->hwdata->i2cr_ien_opcode ^ I2CR_IEN,
+diff -Nur linux-3.14.36/drivers/input/joystick/xpad.c linux-openelec/drivers/input/joystick/xpad.c
+--- linux-3.14.36/drivers/input/joystick/xpad.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/input/joystick/xpad.c 2015-07-24 18:03:30.064842002 -0500
+@@ -176,7 +176,6 @@
+ { 0x1bad, 0xf901, "Gamestop Xbox 360 Controller", 0, XTYPE_XBOX360 },
+ { 0x1bad, 0xf903, "Tron Xbox 360 controller", 0, XTYPE_XBOX360 },
+ { 0x24c6, 0x5300, "PowerA MINI PROEX Controller", 0, XTYPE_XBOX360 },
+- { 0xffff, 0xffff, "Chinese-made Xbox Controller", 0, XTYPE_XBOX },
+ { 0x0000, 0x0000, "Generic X-Box pad", 0, XTYPE_UNKNOWN }
+ };
+
+@@ -282,17 +281,21 @@
+ struct urb *irq_out; /* urb for interrupt out report */
+ unsigned char *odata; /* output data */
+ dma_addr_t odata_dma;
+- struct mutex odata_mutex;
++ spinlock_t odata_lock;
+ #endif
+
+ #if defined(CONFIG_JOYSTICK_XPAD_LEDS)
+ struct xpad_led *led;
+ #endif
++
++ int joydev_id;
+
+ char phys[64]; /* physical device path */
+
+ int mapping; /* map d-pad to buttons or to axes */
+ int xtype; /* type of xbox device */
++
++ const char *name;
+ };
+
+ /*
+@@ -436,6 +439,109 @@
+
+ input_sync(dev);
+ }
++static void xpad_send_led_command(struct usb_xpad *xpad, int command);
++static int xpad_open(struct input_dev *dev);
++static void xpad_close(struct input_dev *dev);
++static void xpad_set_up_abs(struct input_dev *input_dev, signed short abs);
++static int xpad_init_ff(struct usb_xpad *xpad);
++static int xpad_find_joydev(struct device *dev, void *data)
++{
++ if (strstr(dev_name(dev), "js"))
++ return 1;
++
++ return 0;
++}
++
++static struct workqueue_struct *my_wq;
++
++typedef struct {
++ struct work_struct my_work;
++ struct usb_xpad *xpad;
++} my_work_t;
++
++static void my_wq_function( struct work_struct *work)
++{
++ my_work_t *my_work = (my_work_t *)work;
++
++ struct usb_xpad *xpad = my_work->xpad;
++
++ if (xpad->pad_present) {
++
++ struct input_dev *input_dev;
++ int i;
++
++ input_dev = input_allocate_device();
++
++ xpad->dev = input_dev;
++ input_dev->name = xpad->name;
++ input_dev->phys = xpad->phys;
++ usb_to_input_id(xpad->udev, &input_dev->id);
++ input_dev->dev.parent = &xpad->intf->dev;
++
++ input_set_drvdata(input_dev, xpad);
++
++ input_dev->open = xpad_open;
++ input_dev->close = xpad_close;
++
++ input_dev->evbit[0] = BIT_MASK(EV_KEY);
++
++ if (!(xpad->mapping & MAP_STICKS_TO_NULL)) {
++ input_dev->evbit[0] |= BIT_MASK(EV_ABS);
++ /* set up axes */
++ for (i = 0; xpad_abs[i] >= 0; i++)
++ xpad_set_up_abs(input_dev, xpad_abs[i]);
++ }
++
++ /* set up standard buttons */
++ for (i = 0; xpad_common_btn[i] >= 0; i++)
++ __set_bit(xpad_common_btn[i], input_dev->keybit);
++
++ /* set up model-specific ones */
++ if (xpad->xtype == XTYPE_XBOX360 || xpad->xtype == XTYPE_XBOX360W) {
++ for (i = 0; xpad360_btn[i] >= 0; i++)
++ __set_bit(xpad360_btn[i], input_dev->keybit);
++ } else {
++ for (i = 0; xpad_btn[i] >= 0; i++)
++ __set_bit(xpad_btn[i], input_dev->keybit);
++ }
++
++ if (xpad->mapping & MAP_DPAD_TO_BUTTONS) {
++ for (i = 0; xpad_btn_pad[i] >= 0; i++)
++ __set_bit(xpad_btn_pad[i], input_dev->keybit);
++ } else {
++ for (i = 0; xpad_abs_pad[i] >= 0; i++)
++ xpad_set_up_abs(input_dev, xpad_abs_pad[i]);
++ }
++
++ if (xpad->mapping & MAP_TRIGGERS_TO_BUTTONS) {
++ for (i = 0; xpad_btn_triggers[i] >= 0; i++)
++ __set_bit(xpad_btn_triggers[i], input_dev->keybit);
++ } else {
++ for (i = 0; xpad_abs_triggers[i] >= 0; i++)
++ xpad_set_up_abs(input_dev, xpad_abs_triggers[i]);
++ }
++
++ input_register_device(xpad->dev);
++
++ {
++ struct device* joydev_dev = device_find_child(&xpad->dev->dev, NULL, xpad_find_joydev);
++
++ if (joydev_dev) {
++// printk("found joydev child with minor %i\n", MINOR(joydev_dev->devt));
++ xpad->joydev_id = MINOR(joydev_dev->devt);
++ xpad_send_led_command(xpad, (xpad->joydev_id % 4) + 2);
++ }
++ }
++
++ xpad_init_ff(xpad);
++ } else {
++ input_unregister_device(xpad->dev);
++ }
++
++ kfree( (void *)work );
++
++ return;
++}
+
+ /*
+ * xpad360w_process_packet
+@@ -457,11 +563,35 @@
+ /* Presence change */
+ if (data[0] & 0x08) {
+ if (data[1] & 0x80) {
+- xpad->pad_present = 1;
+- usb_submit_urb(xpad->bulk_out, GFP_ATOMIC);
+- } else
+- xpad->pad_present = 0;
++
++ if (!xpad->pad_present)
++ {
++ my_work_t * work;
++ xpad->pad_present = 1;
++ usb_submit_urb(xpad->bulk_out, GFP_ATOMIC);
++
++ work = (my_work_t *)kmalloc(sizeof(my_work_t), GFP_KERNEL);
++ INIT_WORK( (struct work_struct *)work, my_wq_function );
++ work->xpad = xpad;
++ queue_work( my_wq, (struct work_struct *)work );
++ }
++
++ } else {
++ if (xpad->pad_present)
++ {
++ my_work_t * work;
++ xpad->pad_present = 0;
++
++ work = (my_work_t *)kmalloc(sizeof(my_work_t), GFP_KERNEL);
++ INIT_WORK( (struct work_struct *)work, my_wq_function );
++ work->xpad = xpad;
++ queue_work( my_wq, (struct work_struct *)work );
++ }
++// printk("got kill packet for id %i\n", xpad->joydev_id);
++ }
+ }
++
++// printk("xpad packet %hhX %hhX %hhX %hhX %hhX %hhX\n", data[0], data[1], data[2], data[3], data[4], data[5]);
+
+ /* Valid pad data */
+ if (!(data[1] & 0x1))
+@@ -477,6 +607,8 @@
+ int retval, status;
+
+ status = urb->status;
++
++// printk("xpad_irq_in %i\n", status);
+
+ switch (status) {
+ case 0:
+@@ -585,8 +717,6 @@
+ goto fail1;
+ }
+
+- mutex_init(&xpad->odata_mutex);
+-
+ xpad->irq_out = usb_alloc_urb(0, GFP_KERNEL);
+ if (!xpad->irq_out) {
+ error = -ENOMEM;
+@@ -715,15 +845,38 @@
+
+ static void xpad_send_led_command(struct usb_xpad *xpad, int command)
+ {
+- if (command >= 0 && command < 14) {
+- mutex_lock(&xpad->odata_mutex);
+- xpad->odata[0] = 0x01;
+- xpad->odata[1] = 0x03;
+- xpad->odata[2] = command;
+- xpad->irq_out->transfer_buffer_length = 3;
+- usb_submit_urb(xpad->irq_out, GFP_KERNEL);
+- mutex_unlock(&xpad->odata_mutex);
++ if ((unsigned)command > 15)
++ return;
++
++ spin_lock(&xpad->odata_lock);
++
++ switch (xpad->xtype) {
++
++ case XTYPE_XBOX360:
++ xpad->odata[0] = 0x01;
++ xpad->odata[1] = 0x03;
++ xpad->odata[2] = command;
++ xpad->irq_out->transfer_buffer_length = 3;
++ break;
++ case XTYPE_XBOX360W:
++ xpad->odata[0] = 0x00;
++ xpad->odata[1] = 0x00;
++ xpad->odata[2] = 0x08;
++ xpad->odata[3] = 0x40 + (command % 0x0e);
++ xpad->odata[4] = 0x00;
++ xpad->odata[5] = 0x00;
++ xpad->odata[6] = 0x00;
++ xpad->odata[7] = 0x00;
++ xpad->odata[8] = 0x00;
++ xpad->odata[9] = 0x00;
++ xpad->odata[10] = 0x00;
++ xpad->odata[11] = 0x00;
++ xpad->irq_out->transfer_buffer_length = 12;
++ break;
+ }
++
++ usb_submit_urb(xpad->irq_out, GFP_KERNEL);
++ spin_unlock(&xpad->odata_lock);
+ }
+
+ static void xpad_led_set(struct led_classdev *led_cdev,
+@@ -742,8 +895,10 @@
+ struct xpad_led *led;
+ struct led_classdev *led_cdev;
+ int error;
++
++// printk("xpad_led_probe\n");
+
+- if (xpad->xtype != XTYPE_XBOX360)
++ if (xpad->xtype != XTYPE_XBOX360 && xpad->xtype != XTYPE_XBOX360W)
+ return 0;
+
+ xpad->led = led = kzalloc(sizeof(struct xpad_led), GFP_KERNEL);
+@@ -766,11 +921,6 @@
+ return error;
+ }
+
+- /*
+- * Light up the segment corresponding to controller number
+- */
+- xpad_send_led_command(xpad, (led_no % 4) + 2);
+-
+ return 0;
+ }
+
+@@ -792,6 +942,7 @@
+ static int xpad_open(struct input_dev *dev)
+ {
+ struct usb_xpad *xpad = input_get_drvdata(dev);
++// printk("xpad open driver data %x\n", (unsigned int)xpad);
+
+ /* URB was submitted in probe */
+ if (xpad->xtype == XTYPE_XBOX360W)
+@@ -840,23 +991,24 @@
+ {
+ struct usb_device *udev = interface_to_usbdev(intf);
+ struct usb_xpad *xpad;
+- struct input_dev *input_dev;
+ struct usb_endpoint_descriptor *ep_irq_in;
+ int i, error;
++ struct input_dev *input_dev;
++
++ if (!my_wq) {
++ my_wq = create_workqueue("xpad_queue");
++ }
+
+ for (i = 0; xpad_device[i].idVendor; i++) {
+ if ((le16_to_cpu(udev->descriptor.idVendor) == xpad_device[i].idVendor) &&
+ (le16_to_cpu(udev->descriptor.idProduct) == xpad_device[i].idProduct))
+ break;
+ }
+-
++
+ xpad = kzalloc(sizeof(struct usb_xpad), GFP_KERNEL);
+- input_dev = input_allocate_device();
+- if (!xpad || !input_dev) {
+- error = -ENOMEM;
+- goto fail1;
+- }
+
++ xpad->name = xpad_device[i].name;
++
+ xpad->idata = usb_alloc_coherent(udev, XPAD_PKT_LEN,
+ GFP_KERNEL, &xpad->idata_dma);
+ if (!xpad->idata) {
+@@ -892,65 +1044,12 @@
+ xpad->mapping |= MAP_STICKS_TO_NULL;
+ }
+
+- xpad->dev = input_dev;
+- usb_make_path(udev, xpad->phys, sizeof(xpad->phys));
+- strlcat(xpad->phys, "/input0", sizeof(xpad->phys));
+-
+- input_dev->name = xpad_device[i].name;
+- input_dev->phys = xpad->phys;
+- usb_to_input_id(udev, &input_dev->id);
+- input_dev->dev.parent = &intf->dev;
+-
+- input_set_drvdata(input_dev, xpad);
+-
+- input_dev->open = xpad_open;
+- input_dev->close = xpad_close;
+-
+- input_dev->evbit[0] = BIT_MASK(EV_KEY);
+-
+- if (!(xpad->mapping & MAP_STICKS_TO_NULL)) {
+- input_dev->evbit[0] |= BIT_MASK(EV_ABS);
+- /* set up axes */
+- for (i = 0; xpad_abs[i] >= 0; i++)
+- xpad_set_up_abs(input_dev, xpad_abs[i]);
+- }
+-
+- /* set up standard buttons */
+- for (i = 0; xpad_common_btn[i] >= 0; i++)
+- __set_bit(xpad_common_btn[i], input_dev->keybit);
+-
+- /* set up model-specific ones */
+- if (xpad->xtype == XTYPE_XBOX360 || xpad->xtype == XTYPE_XBOX360W) {
+- for (i = 0; xpad360_btn[i] >= 0; i++)
+- __set_bit(xpad360_btn[i], input_dev->keybit);
+- } else {
+- for (i = 0; xpad_btn[i] >= 0; i++)
+- __set_bit(xpad_btn[i], input_dev->keybit);
+- }
+-
+- if (xpad->mapping & MAP_DPAD_TO_BUTTONS) {
+- for (i = 0; xpad_btn_pad[i] >= 0; i++)
+- __set_bit(xpad_btn_pad[i], input_dev->keybit);
+- } else {
+- for (i = 0; xpad_abs_pad[i] >= 0; i++)
+- xpad_set_up_abs(input_dev, xpad_abs_pad[i]);
+- }
+-
+- if (xpad->mapping & MAP_TRIGGERS_TO_BUTTONS) {
+- for (i = 0; xpad_btn_triggers[i] >= 0; i++)
+- __set_bit(xpad_btn_triggers[i], input_dev->keybit);
+- } else {
+- for (i = 0; xpad_abs_triggers[i] >= 0; i++)
+- xpad_set_up_abs(input_dev, xpad_abs_triggers[i]);
+- }
+-
+ error = xpad_init_output(intf, xpad);
+ if (error)
+ goto fail3;
+
+- error = xpad_init_ff(xpad);
+- if (error)
+- goto fail4;
++ usb_make_path(xpad->udev, xpad->phys, sizeof(xpad->phys));
++ strlcat(xpad->phys, "/input0", sizeof(xpad->phys));
+
+ error = xpad_led_probe(xpad);
+ if (error)
+@@ -964,10 +1063,6 @@
+ xpad->irq_in->transfer_dma = xpad->idata_dma;
+ xpad->irq_in->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+
+- error = input_register_device(xpad->dev);
+- if (error)
+- goto fail6;
+-
+ usb_set_intfdata(intf, xpad);
+
+ if (xpad->xtype == XTYPE_XBOX360W) {
+@@ -975,6 +1070,7 @@
+ * Setup the message to set the LEDs on the
+ * controller when it shows up
+ */
++ spin_lock(&xpad->odata_lock);
+ xpad->bulk_out = usb_alloc_urb(0, GFP_KERNEL);
+ if (!xpad->bulk_out) {
+ error = -ENOMEM;
+@@ -1026,23 +1122,55 @@
+ */
+ xpad->irq_in->dev = xpad->udev;
+ error = usb_submit_urb(xpad->irq_in, GFP_KERNEL);
++
++ spin_unlock(&xpad->odata_lock);
+ if (error)
+ goto fail9;
++
++ // I don't know how to check controller state on driver load so just slam them
++ // off so that people have to turn them on, triggering a state update
++
++ // got the power off packet from an OSX reverse-engineered driver:
++ // http://tattiebogle.net/index.php/ProjectRoot/Xbox360Controller/OsxDriver#toc1
++ spin_lock(&xpad->odata_lock);
++ xpad->odata[0] = 0x00;
++ xpad->odata[1] = 0x00;
++ xpad->odata[2] = 0x08;
++ xpad->odata[3] = 0xC0;
++ xpad->odata[4] = 0x00;
++ xpad->odata[5] = 0x00;
++ xpad->odata[6] = 0x00;
++ xpad->odata[7] = 0x00;
++ xpad->odata[8] = 0x00;
++ xpad->odata[9] = 0x00;
++ xpad->odata[10] = 0x00;
++ xpad->odata[11] = 0x00;
++ xpad->irq_out->transfer_buffer_length = 12;
++ usb_submit_urb(xpad->irq_out, GFP_KERNEL);
++ spin_unlock(&xpad->odata_lock);
++ } else {
++ my_work_t *work;
++ xpad->pad_present = 1;
++
++ work = (my_work_t *)kmalloc(sizeof(my_work_t), GFP_KERNEL);
++ INIT_WORK( (struct work_struct *)work, my_wq_function );
++ work->xpad = xpad;
++ queue_work( my_wq, (struct work_struct *)work );
+ }
+
+ return 0;
+
+ fail9: kfree(xpad->bdata);
+ fail8: usb_free_urb(xpad->bulk_out);
+- fail7: input_unregister_device(input_dev);
+- input_dev = NULL;
++ fail7: //input_unregister_device(input_dev);
++ //input_dev = NULL;
+ fail6: xpad_led_disconnect(xpad);
+- fail5: if (input_dev)
+- input_ff_destroy(input_dev);
++ fail5: //if (input_dev)
++ //input_ff_destroy(input_dev);
+ fail4: xpad_deinit_output(xpad);
+ fail3: usb_free_urb(xpad->irq_in);
+ fail2: usb_free_coherent(udev, XPAD_PKT_LEN, xpad->idata, xpad->idata_dma);
+- fail1: input_free_device(input_dev);
++ fail1: //input_free_device(input_dev);
+ kfree(xpad);
+ return error;
+
+@@ -1052,8 +1180,14 @@
+ {
+ struct usb_xpad *xpad = usb_get_intfdata (intf);
+
++// printk("xpad_disconnect\n");
+ xpad_led_disconnect(xpad);
+- input_unregister_device(xpad->dev);
++
++ if (xpad->pad_present)
++ {
++ xpad->pad_present = 0;
++ input_unregister_device(xpad->dev);
++ }
+ xpad_deinit_output(xpad);
+
+ if (xpad->xtype == XTYPE_XBOX360W) {
+diff -Nur linux-3.14.36/drivers/input/joystick/xpad.c.orig linux-openelec/drivers/input/joystick/xpad.c.orig
+--- linux-3.14.36/drivers/input/joystick/xpad.c.orig 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/input/joystick/xpad.c.orig 2015-07-24 18:03:30.056842002 -0500
+@@ -0,0 +1,1085 @@
++/*
++ * X-Box gamepad driver
++ *
++ * Copyright (c) 2002 Marko Friedemann <mfr@bmx-chemnitz.de>
++ * 2004 Oliver Schwartz <Oliver.Schwartz@gmx.de>,
++ * Steven Toth <steve@toth.demon.co.uk>,
++ * Franz Lehner <franz@caos.at>,
++ * Ivan Hawkes <blackhawk@ivanhawkes.com>
++ * 2005 Dominic Cerquetti <binary1230@yahoo.com>
++ * 2006 Adam Buchbinder <adam.buchbinder@gmail.com>
++ * 2007 Jan Kratochvil <honza@jikos.cz>
++ * 2010 Christoph Fritz <chf.fritz@googlemail.com>
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation; either version 2 of
++ * the License, or (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ *
++ *
++ * This driver is based on:
++ * - information from http://euc.jp/periphs/xbox-controller.ja.html
++ * - the iForce driver drivers/char/joystick/iforce.c
++ * - the skeleton-driver drivers/usb/usb-skeleton.c
++ * - Xbox 360 information http://www.free60.org/wiki/Gamepad
++ *
++ * Thanks to:
++ * - ITO Takayuki for providing essential xpad information on his website
++ * - Vojtech Pavlik - iforce driver / input subsystem
++ * - Greg Kroah-Hartman - usb-skeleton driver
++ * - XBOX Linux project - extra USB id's
++ *
++ * TODO:
++ * - fine tune axes (especially trigger axes)
++ * - fix "analog" buttons (reported as digital now)
++ * - get rumble working
++ * - need USB IDs for other dance pads
++ *
++ * History:
++ *
++ * 2002-06-27 - 0.0.1 : first version, just said "XBOX HID controller"
++ *
++ * 2002-07-02 - 0.0.2 : basic working version
++ * - all axes and 9 of the 10 buttons work (german InterAct device)
++ * - the black button does not work
++ *
++ * 2002-07-14 - 0.0.3 : rework by Vojtech Pavlik
++ * - indentation fixes
++ * - usb + input init sequence fixes
++ *
++ * 2002-07-16 - 0.0.4 : minor changes, merge with Vojtech's v0.0.3
++ * - verified the lack of HID and report descriptors
++ * - verified that ALL buttons WORK
++ * - fixed d-pad to axes mapping
++ *
++ * 2002-07-17 - 0.0.5 : simplified d-pad handling
++ *
++ * 2004-10-02 - 0.0.6 : DDR pad support
++ * - borrowed from the XBOX linux kernel
++ * - USB id's for commonly used dance pads are present
++ * - dance pads will map D-PAD to buttons, not axes
++ * - pass the module paramater 'dpad_to_buttons' to force
++ * the D-PAD to map to buttons if your pad is not detected
++ *
++ * Later changes can be tracked in SCM.
++ */
++
++#include <linux/kernel.h>
++#include <linux/slab.h>
++#include <linux/stat.h>
++#include <linux/module.h>
++#include <linux/usb/input.h>
++
++#define DRIVER_AUTHOR "Marko Friedemann <mfr@bmx-chemnitz.de>"
++#define DRIVER_DESC "X-Box pad driver"
++
++#define XPAD_PKT_LEN 32
++
++/* xbox d-pads should map to buttons, as is required for DDR pads
++ but we map them to axes when possible to simplify things */
++#define MAP_DPAD_TO_BUTTONS (1 << 0)
++#define MAP_TRIGGERS_TO_BUTTONS (1 << 1)
++#define MAP_STICKS_TO_NULL (1 << 2)
++#define DANCEPAD_MAP_CONFIG (MAP_DPAD_TO_BUTTONS | \
++ MAP_TRIGGERS_TO_BUTTONS | MAP_STICKS_TO_NULL)
++
++#define XTYPE_XBOX 0
++#define XTYPE_XBOX360 1
++#define XTYPE_XBOX360W 2
++#define XTYPE_UNKNOWN 3
++
++static bool dpad_to_buttons;
++module_param(dpad_to_buttons, bool, S_IRUGO);
++MODULE_PARM_DESC(dpad_to_buttons, "Map D-PAD to buttons rather than axes for unknown pads");
++
++static bool triggers_to_buttons;
++module_param(triggers_to_buttons, bool, S_IRUGO);
++MODULE_PARM_DESC(triggers_to_buttons, "Map triggers to buttons rather than axes for unknown pads");
++
++static bool sticks_to_null;
++module_param(sticks_to_null, bool, S_IRUGO);
++MODULE_PARM_DESC(sticks_to_null, "Do not map sticks at all for unknown pads");
++
++static const struct xpad_device {
++ u16 idVendor;
++ u16 idProduct;
++ char *name;
++ u8 mapping;
++ u8 xtype;
++} xpad_device[] = {
++ { 0x045e, 0x0202, "Microsoft X-Box pad v1 (US)", 0, XTYPE_XBOX },
++ { 0x045e, 0x0285, "Microsoft X-Box pad (Japan)", 0, XTYPE_XBOX },
++ { 0x045e, 0x0287, "Microsoft Xbox Controller S", 0, XTYPE_XBOX },
++ { 0x045e, 0x0289, "Microsoft X-Box pad v2 (US)", 0, XTYPE_XBOX },
++ { 0x045e, 0x028e, "Microsoft X-Box 360 pad", 0, XTYPE_XBOX360 },
++ { 0x045e, 0x0291, "Xbox 360 Wireless Receiver (XBOX)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360W },
++ { 0x045e, 0x0719, "Xbox 360 Wireless Receiver", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360W },
++ { 0x044f, 0x0f07, "Thrustmaster, Inc. Controller", 0, XTYPE_XBOX },
++ { 0x046d, 0xc21d, "Logitech Gamepad F310", 0, XTYPE_XBOX360 },
++ { 0x046d, 0xc21f, "Logitech Gamepad F710", 0, XTYPE_XBOX360 },
++ { 0x046d, 0xc242, "Logitech Chillstream Controller", 0, XTYPE_XBOX360 },
++ { 0x046d, 0xca84, "Logitech Xbox Cordless Controller", 0, XTYPE_XBOX },
++ { 0x046d, 0xca88, "Logitech Compact Controller for Xbox", 0, XTYPE_XBOX },
++ { 0x05fd, 0x1007, "Mad Catz Controller (unverified)", 0, XTYPE_XBOX },
++ { 0x05fd, 0x107a, "InterAct 'PowerPad Pro' X-Box pad (Germany)", 0, XTYPE_XBOX },
++ { 0x0738, 0x4516, "Mad Catz Control Pad", 0, XTYPE_XBOX },
++ { 0x0738, 0x4522, "Mad Catz LumiCON", 0, XTYPE_XBOX },
++ { 0x0738, 0x4526, "Mad Catz Control Pad Pro", 0, XTYPE_XBOX },
++ { 0x0738, 0x4536, "Mad Catz MicroCON", 0, XTYPE_XBOX },
++ { 0x0738, 0x4540, "Mad Catz Beat Pad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
++ { 0x0738, 0x4556, "Mad Catz Lynx Wireless Controller", 0, XTYPE_XBOX },
++ { 0x0738, 0x4716, "Mad Catz Wired Xbox 360 Controller", 0, XTYPE_XBOX360 },
++ { 0x0738, 0x4728, "Mad Catz Street Fighter IV FightPad", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
++ { 0x0738, 0x4738, "Mad Catz Wired Xbox 360 Controller (SFIV)", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
++ { 0x0738, 0x6040, "Mad Catz Beat Pad Pro", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
++ { 0x0738, 0xbeef, "Mad Catz JOYTECH NEO SE Advanced GamePad", XTYPE_XBOX360 },
++ { 0x0c12, 0x8802, "Zeroplus Xbox Controller", 0, XTYPE_XBOX },
++ { 0x0c12, 0x8809, "RedOctane Xbox Dance Pad", DANCEPAD_MAP_CONFIG, XTYPE_XBOX },
++ { 0x0c12, 0x880a, "Pelican Eclipse PL-2023", 0, XTYPE_XBOX },
++ { 0x0c12, 0x8810, "Zeroplus Xbox Controller", 0, XTYPE_XBOX },
++ { 0x0c12, 0x9902, "HAMA VibraX - *FAULTY HARDWARE*", 0, XTYPE_XBOX },
++ { 0x0d2f, 0x0002, "Andamiro Pump It Up pad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
++ { 0x0e4c, 0x1097, "Radica Gamester Controller", 0, XTYPE_XBOX },
++ { 0x0e4c, 0x2390, "Radica Games Jtech Controller", 0, XTYPE_XBOX },
++ { 0x0e6f, 0x0003, "Logic3 Freebird wireless Controller", 0, XTYPE_XBOX },
++ { 0x0e6f, 0x0005, "Eclipse wireless Controller", 0, XTYPE_XBOX },
++ { 0x0e6f, 0x0006, "Edge wireless Controller", 0, XTYPE_XBOX },
++ { 0x0e6f, 0x0105, "HSM3 Xbox360 dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
++ { 0x0e6f, 0x0201, "Pelican PL-3601 'TSZ' Wired Xbox 360 Controller", 0, XTYPE_XBOX360 },
++ { 0x0e6f, 0x0213, "Afterglow Gamepad for Xbox 360", 0, XTYPE_XBOX360 },
++ { 0x0e8f, 0x0201, "SmartJoy Frag Xpad/PS2 adaptor", 0, XTYPE_XBOX },
++ { 0x0f0d, 0x000d, "Hori Fighting Stick EX2", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
++ { 0x0f0d, 0x0016, "Hori Real Arcade Pro.EX", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
++ { 0x0f30, 0x0202, "Joytech Advanced Controller", 0, XTYPE_XBOX },
++ { 0x0f30, 0x8888, "BigBen XBMiniPad Controller", 0, XTYPE_XBOX },
++ { 0x102c, 0xff0c, "Joytech Wireless Advanced Controller", 0, XTYPE_XBOX },
++ { 0x12ab, 0x0004, "Honey Bee Xbox360 dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
++ { 0x12ab, 0x8809, "Xbox DDR dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
++ { 0x1430, 0x4748, "RedOctane Guitar Hero X-plorer", 0, XTYPE_XBOX360 },
++ { 0x1430, 0x8888, "TX6500+ Dance Pad (first generation)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
++ { 0x146b, 0x0601, "BigBen Interactive XBOX 360 Controller", 0, XTYPE_XBOX360 },
++ { 0x1689, 0xfd00, "Razer Onza Tournament Edition", 0, XTYPE_XBOX360 },
++ { 0x1689, 0xfd01, "Razer Onza Classic Edition", 0, XTYPE_XBOX360 },
++ { 0x1bad, 0x0002, "Harmonix Rock Band Guitar", 0, XTYPE_XBOX360 },
++ { 0x1bad, 0x0003, "Harmonix Rock Band Drumkit", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
++ { 0x1bad, 0xf016, "Mad Catz Xbox 360 Controller", 0, XTYPE_XBOX360 },
++ { 0x1bad, 0xf028, "Street Fighter IV FightPad", 0, XTYPE_XBOX360 },
++ { 0x1bad, 0xf901, "Gamestop Xbox 360 Controller", 0, XTYPE_XBOX360 },
++ { 0x1bad, 0xf903, "Tron Xbox 360 controller", 0, XTYPE_XBOX360 },
++ { 0x24c6, 0x5300, "PowerA MINI PROEX Controller", 0, XTYPE_XBOX360 },
++ { 0x0000, 0x0000, "Generic X-Box pad", 0, XTYPE_UNKNOWN }
++};
++
++/* buttons shared with xbox and xbox360 */
++static const signed short xpad_common_btn[] = {
++ BTN_A, BTN_B, BTN_X, BTN_Y, /* "analog" buttons */
++ BTN_START, BTN_SELECT, BTN_THUMBL, BTN_THUMBR, /* start/back/sticks */
++ -1 /* terminating entry */
++};
++
++/* original xbox controllers only */
++static const signed short xpad_btn[] = {
++ BTN_C, BTN_Z, /* "analog" buttons */
++ -1 /* terminating entry */
++};
++
++/* used when dpad is mapped to buttons */
++static const signed short xpad_btn_pad[] = {
++ BTN_TRIGGER_HAPPY1, BTN_TRIGGER_HAPPY2, /* d-pad left, right */
++ BTN_TRIGGER_HAPPY3, BTN_TRIGGER_HAPPY4, /* d-pad up, down */
++ -1 /* terminating entry */
++};
++
++/* used when triggers are mapped to buttons */
++static const signed short xpad_btn_triggers[] = {
++ BTN_TL2, BTN_TR2, /* triggers left/right */
++ -1
++};
++
++
++static const signed short xpad360_btn[] = { /* buttons for x360 controller */
++ BTN_TL, BTN_TR, /* Button LB/RB */
++ BTN_MODE, /* The big X button */
++ -1
++};
++
++static const signed short xpad_abs[] = {
++ ABS_X, ABS_Y, /* left stick */
++ ABS_RX, ABS_RY, /* right stick */
++ -1 /* terminating entry */
++};
++
++/* used when dpad is mapped to axes */
++static const signed short xpad_abs_pad[] = {
++ ABS_HAT0X, ABS_HAT0Y, /* d-pad axes */
++ -1 /* terminating entry */
++};
++
++/* used when triggers are mapped to axes */
++static const signed short xpad_abs_triggers[] = {
++ ABS_Z, ABS_RZ, /* triggers left/right */
++ -1
++};
++
++/* Xbox 360 has a vendor-specific class, so we cannot match it with only
++ * USB_INTERFACE_INFO (also specifically refused by USB subsystem), so we
++ * match against vendor id as well. Wired Xbox 360 devices have protocol 1,
++ * wireless controllers have protocol 129. */
++#define XPAD_XBOX360_VENDOR_PROTOCOL(vend,pr) \
++ .match_flags = USB_DEVICE_ID_MATCH_VENDOR | USB_DEVICE_ID_MATCH_INT_INFO, \
++ .idVendor = (vend), \
++ .bInterfaceClass = USB_CLASS_VENDOR_SPEC, \
++ .bInterfaceSubClass = 93, \
++ .bInterfaceProtocol = (pr)
++#define XPAD_XBOX360_VENDOR(vend) \
++ { XPAD_XBOX360_VENDOR_PROTOCOL(vend,1) }, \
++ { XPAD_XBOX360_VENDOR_PROTOCOL(vend,129) }
++
++static struct usb_device_id xpad_table[] = {
++ { USB_INTERFACE_INFO('X', 'B', 0) }, /* X-Box USB-IF not approved class */
++ XPAD_XBOX360_VENDOR(0x045e), /* Microsoft X-Box 360 controllers */
++ XPAD_XBOX360_VENDOR(0x046d), /* Logitech X-Box 360 style controllers */
++ XPAD_XBOX360_VENDOR(0x0738), /* Mad Catz X-Box 360 controllers */
++ { USB_DEVICE(0x0738, 0x4540) }, /* Mad Catz Beat Pad */
++ XPAD_XBOX360_VENDOR(0x0e6f), /* 0x0e6f X-Box 360 controllers */
++ XPAD_XBOX360_VENDOR(0x12ab), /* X-Box 360 dance pads */
++ XPAD_XBOX360_VENDOR(0x1430), /* RedOctane X-Box 360 controllers */
++ XPAD_XBOX360_VENDOR(0x146b), /* BigBen Interactive Controllers */
++ XPAD_XBOX360_VENDOR(0x1bad), /* Harminix Rock Band Guitar and Drums */
++ XPAD_XBOX360_VENDOR(0x0f0d), /* Hori Controllers */
++ XPAD_XBOX360_VENDOR(0x1689), /* Razer Onza */
++ XPAD_XBOX360_VENDOR(0x24c6), /* PowerA Controllers */
++ { }
++};
++
++MODULE_DEVICE_TABLE(usb, xpad_table);
++
++struct usb_xpad {
++ struct input_dev *dev; /* input device interface */
++ struct usb_device *udev; /* usb device */
++ struct usb_interface *intf; /* usb interface */
++
++ int pad_present;
++
++ struct urb *irq_in; /* urb for interrupt in report */
++ unsigned char *idata; /* input data */
++ dma_addr_t idata_dma;
++
++ struct urb *bulk_out;
++ unsigned char *bdata;
++
++#if defined(CONFIG_JOYSTICK_XPAD_FF) || defined(CONFIG_JOYSTICK_XPAD_LEDS)
++ struct urb *irq_out; /* urb for interrupt out report */
++ unsigned char *odata; /* output data */
++ dma_addr_t odata_dma;
++ struct mutex odata_mutex;
++#endif
++
++#if defined(CONFIG_JOYSTICK_XPAD_LEDS)
++ struct xpad_led *led;
++#endif
++
++ char phys[64]; /* physical device path */
++
++ int mapping; /* map d-pad to buttons or to axes */
++ int xtype; /* type of xbox device */
++};
++
++/*
++ * xpad_process_packet
++ *
++ * Completes a request by converting the data into events for the
++ * input subsystem.
++ *
++ * The used report descriptor was taken from ITO Takayukis website:
++ * http://euc.jp/periphs/xbox-controller.ja.html
++ */
++
++static void xpad_process_packet(struct usb_xpad *xpad, u16 cmd, unsigned char *data)
++{
++ struct input_dev *dev = xpad->dev;
++
++ if (!(xpad->mapping & MAP_STICKS_TO_NULL)) {
++ /* left stick */
++ input_report_abs(dev, ABS_X,
++ (__s16) le16_to_cpup((__le16 *)(data + 12)));
++ input_report_abs(dev, ABS_Y,
++ ~(__s16) le16_to_cpup((__le16 *)(data + 14)));
++
++ /* right stick */
++ input_report_abs(dev, ABS_RX,
++ (__s16) le16_to_cpup((__le16 *)(data + 16)));
++ input_report_abs(dev, ABS_RY,
++ ~(__s16) le16_to_cpup((__le16 *)(data + 18)));
++ }
++
++ /* triggers left/right */
++ if (xpad->mapping & MAP_TRIGGERS_TO_BUTTONS) {
++ input_report_key(dev, BTN_TL2, data[10]);
++ input_report_key(dev, BTN_TR2, data[11]);
++ } else {
++ input_report_abs(dev, ABS_Z, data[10]);
++ input_report_abs(dev, ABS_RZ, data[11]);
++ }
++
++ /* digital pad */
++ if (xpad->mapping & MAP_DPAD_TO_BUTTONS) {
++ /* dpad as buttons (left, right, up, down) */
++ input_report_key(dev, BTN_TRIGGER_HAPPY1, data[2] & 0x04);
++ input_report_key(dev, BTN_TRIGGER_HAPPY2, data[2] & 0x08);
++ input_report_key(dev, BTN_TRIGGER_HAPPY3, data[2] & 0x01);
++ input_report_key(dev, BTN_TRIGGER_HAPPY4, data[2] & 0x02);
++ } else {
++ input_report_abs(dev, ABS_HAT0X,
++ !!(data[2] & 0x08) - !!(data[2] & 0x04));
++ input_report_abs(dev, ABS_HAT0Y,
++ !!(data[2] & 0x02) - !!(data[2] & 0x01));
++ }
++
++ /* start/back buttons and stick press left/right */
++ input_report_key(dev, BTN_START, data[2] & 0x10);
++ input_report_key(dev, BTN_SELECT, data[2] & 0x20);
++ input_report_key(dev, BTN_THUMBL, data[2] & 0x40);
++ input_report_key(dev, BTN_THUMBR, data[2] & 0x80);
++
++ /* "analog" buttons A, B, X, Y */
++ input_report_key(dev, BTN_A, data[4]);
++ input_report_key(dev, BTN_B, data[5]);
++ input_report_key(dev, BTN_X, data[6]);
++ input_report_key(dev, BTN_Y, data[7]);
++
++ /* "analog" buttons black, white */
++ input_report_key(dev, BTN_C, data[8]);
++ input_report_key(dev, BTN_Z, data[9]);
++
++ input_sync(dev);
++}
++
++/*
++ * xpad360_process_packet
++ *
++ * Completes a request by converting the data into events for the
++ * input subsystem. It is version for xbox 360 controller
++ *
++ * The used report descriptor was taken from:
++ * http://www.free60.org/wiki/Gamepad
++ */
++
++static void xpad360_process_packet(struct usb_xpad *xpad,
++ u16 cmd, unsigned char *data)
++{
++ struct input_dev *dev = xpad->dev;
++
++ /* digital pad */
++ if (xpad->mapping & MAP_DPAD_TO_BUTTONS) {
++ /* dpad as buttons (left, right, up, down) */
++ input_report_key(dev, BTN_TRIGGER_HAPPY1, data[2] & 0x04);
++ input_report_key(dev, BTN_TRIGGER_HAPPY2, data[2] & 0x08);
++ input_report_key(dev, BTN_TRIGGER_HAPPY3, data[2] & 0x01);
++ input_report_key(dev, BTN_TRIGGER_HAPPY4, data[2] & 0x02);
++ } else {
++ input_report_abs(dev, ABS_HAT0X,
++ !!(data[2] & 0x08) - !!(data[2] & 0x04));
++ input_report_abs(dev, ABS_HAT0Y,
++ !!(data[2] & 0x02) - !!(data[2] & 0x01));
++ }
++
++ /* start/back buttons */
++ input_report_key(dev, BTN_START, data[2] & 0x10);
++ input_report_key(dev, BTN_SELECT, data[2] & 0x20);
++
++ /* stick press left/right */
++ input_report_key(dev, BTN_THUMBL, data[2] & 0x40);
++ input_report_key(dev, BTN_THUMBR, data[2] & 0x80);
++
++ /* buttons A,B,X,Y,TL,TR and MODE */
++ input_report_key(dev, BTN_A, data[3] & 0x10);
++ input_report_key(dev, BTN_B, data[3] & 0x20);
++ input_report_key(dev, BTN_X, data[3] & 0x40);
++ input_report_key(dev, BTN_Y, data[3] & 0x80);
++ input_report_key(dev, BTN_TL, data[3] & 0x01);
++ input_report_key(dev, BTN_TR, data[3] & 0x02);
++ input_report_key(dev, BTN_MODE, data[3] & 0x04);
++
++ if (!(xpad->mapping & MAP_STICKS_TO_NULL)) {
++ /* left stick */
++ input_report_abs(dev, ABS_X,
++ (__s16) le16_to_cpup((__le16 *)(data + 6)));
++ input_report_abs(dev, ABS_Y,
++ ~(__s16) le16_to_cpup((__le16 *)(data + 8)));
++
++ /* right stick */
++ input_report_abs(dev, ABS_RX,
++ (__s16) le16_to_cpup((__le16 *)(data + 10)));
++ input_report_abs(dev, ABS_RY,
++ ~(__s16) le16_to_cpup((__le16 *)(data + 12)));
++ }
++
++ /* triggers left/right */
++ if (xpad->mapping & MAP_TRIGGERS_TO_BUTTONS) {
++ input_report_key(dev, BTN_TL2, data[4]);
++ input_report_key(dev, BTN_TR2, data[5]);
++ } else {
++ input_report_abs(dev, ABS_Z, data[4]);
++ input_report_abs(dev, ABS_RZ, data[5]);
++ }
++
++ input_sync(dev);
++}
++
++/*
++ * xpad360w_process_packet
++ *
++ * Completes a request by converting the data into events for the
++ * input subsystem. It is version for xbox 360 wireless controller.
++ *
++ * Byte.Bit
++ * 00.1 - Status change: The controller or headset has connected/disconnected
++ * Bits 01.7 and 01.6 are valid
++ * 01.7 - Controller present
++ * 01.6 - Headset present
++ * 01.1 - Pad state (Bytes 4+) valid
++ *
++ */
++
++static void xpad360w_process_packet(struct usb_xpad *xpad, u16 cmd, unsigned char *data)
++{
++ /* Presence change */
++ if (data[0] & 0x08) {
++ if (data[1] & 0x80) {
++ xpad->pad_present = 1;
++ usb_submit_urb(xpad->bulk_out, GFP_ATOMIC);
++ } else
++ xpad->pad_present = 0;
++ }
++
++ /* Valid pad data */
++ if (!(data[1] & 0x1))
++ return;
++
++ xpad360_process_packet(xpad, cmd, &data[4]);
++}
++
++static void xpad_irq_in(struct urb *urb)
++{
++ struct usb_xpad *xpad = urb->context;
++ struct device *dev = &xpad->intf->dev;
++ int retval, status;
++
++ status = urb->status;
++
++ switch (status) {
++ case 0:
++ /* success */
++ break;
++ case -ECONNRESET:
++ case -ENOENT:
++ case -ESHUTDOWN:
++ /* this urb is terminated, clean up */
++ dev_dbg(dev, "%s - urb shutting down with status: %d\n",
++ __func__, status);
++ return;
++ default:
++ dev_dbg(dev, "%s - nonzero urb status received: %d\n",
++ __func__, status);
++ goto exit;
++ }
++
++ switch (xpad->xtype) {
++ case XTYPE_XBOX360:
++ xpad360_process_packet(xpad, 0, xpad->idata);
++ break;
++ case XTYPE_XBOX360W:
++ xpad360w_process_packet(xpad, 0, xpad->idata);
++ break;
++ default:
++ xpad_process_packet(xpad, 0, xpad->idata);
++ }
++
++exit:
++ retval = usb_submit_urb(urb, GFP_ATOMIC);
++ if (retval)
++ dev_err(dev, "%s - usb_submit_urb failed with result %d\n",
++ __func__, retval);
++}
++
++static void xpad_bulk_out(struct urb *urb)
++{
++ struct usb_xpad *xpad = urb->context;
++ struct device *dev = &xpad->intf->dev;
++
++ switch (urb->status) {
++ case 0:
++ /* success */
++ break;
++ case -ECONNRESET:
++ case -ENOENT:
++ case -ESHUTDOWN:
++ /* this urb is terminated, clean up */
++ dev_dbg(dev, "%s - urb shutting down with status: %d\n",
++ __func__, urb->status);
++ break;
++ default:
++ dev_dbg(dev, "%s - nonzero urb status received: %d\n",
++ __func__, urb->status);
++ }
++}
++
++#if defined(CONFIG_JOYSTICK_XPAD_FF) || defined(CONFIG_JOYSTICK_XPAD_LEDS)
++static void xpad_irq_out(struct urb *urb)
++{
++ struct usb_xpad *xpad = urb->context;
++ struct device *dev = &xpad->intf->dev;
++ int retval, status;
++
++ status = urb->status;
++
++ switch (status) {
++ case 0:
++ /* success */
++ return;
++
++ case -ECONNRESET:
++ case -ENOENT:
++ case -ESHUTDOWN:
++ /* this urb is terminated, clean up */
++ dev_dbg(dev, "%s - urb shutting down with status: %d\n",
++ __func__, status);
++ return;
++
++ default:
++ dev_dbg(dev, "%s - nonzero urb status received: %d\n",
++ __func__, status);
++ goto exit;
++ }
++
++exit:
++ retval = usb_submit_urb(urb, GFP_ATOMIC);
++ if (retval)
++ dev_err(dev, "%s - usb_submit_urb failed with result %d\n",
++ __func__, retval);
++}
++
++static int xpad_init_output(struct usb_interface *intf, struct usb_xpad *xpad)
++{
++ struct usb_endpoint_descriptor *ep_irq_out;
++ int error;
++
++ if (xpad->xtype == XTYPE_UNKNOWN)
++ return 0;
++
++ xpad->odata = usb_alloc_coherent(xpad->udev, XPAD_PKT_LEN,
++ GFP_KERNEL, &xpad->odata_dma);
++ if (!xpad->odata) {
++ error = -ENOMEM;
++ goto fail1;
++ }
++
++ mutex_init(&xpad->odata_mutex);
++
++ xpad->irq_out = usb_alloc_urb(0, GFP_KERNEL);
++ if (!xpad->irq_out) {
++ error = -ENOMEM;
++ goto fail2;
++ }
++
++ ep_irq_out = &intf->cur_altsetting->endpoint[1].desc;
++ usb_fill_int_urb(xpad->irq_out, xpad->udev,
++ usb_sndintpipe(xpad->udev, ep_irq_out->bEndpointAddress),
++ xpad->odata, XPAD_PKT_LEN,
++ xpad_irq_out, xpad, ep_irq_out->bInterval);
++ xpad->irq_out->transfer_dma = xpad->odata_dma;
++ xpad->irq_out->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
++
++ return 0;
++
++ fail2: usb_free_coherent(xpad->udev, XPAD_PKT_LEN, xpad->odata, xpad->odata_dma);
++ fail1: return error;
++}
++
++static void xpad_stop_output(struct usb_xpad *xpad)
++{
++ if (xpad->xtype != XTYPE_UNKNOWN)
++ usb_kill_urb(xpad->irq_out);
++}
++
++static void xpad_deinit_output(struct usb_xpad *xpad)
++{
++ if (xpad->xtype != XTYPE_UNKNOWN) {
++ usb_free_urb(xpad->irq_out);
++ usb_free_coherent(xpad->udev, XPAD_PKT_LEN,
++ xpad->odata, xpad->odata_dma);
++ }
++}
++#else
++static int xpad_init_output(struct usb_interface *intf, struct usb_xpad *xpad) { return 0; }
++static void xpad_deinit_output(struct usb_xpad *xpad) {}
++static void xpad_stop_output(struct usb_xpad *xpad) {}
++#endif
++
++#ifdef CONFIG_JOYSTICK_XPAD_FF
++static int xpad_play_effect(struct input_dev *dev, void *data, struct ff_effect *effect)
++{
++ struct usb_xpad *xpad = input_get_drvdata(dev);
++
++ if (effect->type == FF_RUMBLE) {
++ __u16 strong = effect->u.rumble.strong_magnitude;
++ __u16 weak = effect->u.rumble.weak_magnitude;
++
++ switch (xpad->xtype) {
++
++ case XTYPE_XBOX:
++ xpad->odata[0] = 0x00;
++ xpad->odata[1] = 0x06;
++ xpad->odata[2] = 0x00;
++ xpad->odata[3] = strong / 256; /* left actuator */
++ xpad->odata[4] = 0x00;
++ xpad->odata[5] = weak / 256; /* right actuator */
++ xpad->irq_out->transfer_buffer_length = 6;
++
++ return usb_submit_urb(xpad->irq_out, GFP_ATOMIC);
++
++ case XTYPE_XBOX360:
++ xpad->odata[0] = 0x00;
++ xpad->odata[1] = 0x08;
++ xpad->odata[2] = 0x00;
++ xpad->odata[3] = strong / 256; /* left actuator? */
++ xpad->odata[4] = weak / 256; /* right actuator? */
++ xpad->odata[5] = 0x00;
++ xpad->odata[6] = 0x00;
++ xpad->odata[7] = 0x00;
++ xpad->irq_out->transfer_buffer_length = 8;
++
++ return usb_submit_urb(xpad->irq_out, GFP_ATOMIC);
++
++ case XTYPE_XBOX360W:
++ xpad->odata[0] = 0x00;
++ xpad->odata[1] = 0x01;
++ xpad->odata[2] = 0x0F;
++ xpad->odata[3] = 0xC0;
++ xpad->odata[4] = 0x00;
++ xpad->odata[5] = strong / 256;
++ xpad->odata[6] = weak / 256;
++ xpad->odata[7] = 0x00;
++ xpad->odata[8] = 0x00;
++ xpad->odata[9] = 0x00;
++ xpad->odata[10] = 0x00;
++ xpad->odata[11] = 0x00;
++ xpad->irq_out->transfer_buffer_length = 12;
++
++ return usb_submit_urb(xpad->irq_out, GFP_ATOMIC);
++
++ default:
++ dev_dbg(&xpad->dev->dev,
++ "%s - rumble command sent to unsupported xpad type: %d\n",
++ __func__, xpad->xtype);
++ return -1;
++ }
++ }
++
++ return 0;
++}
++
++static int xpad_init_ff(struct usb_xpad *xpad)
++{
++ if (xpad->xtype == XTYPE_UNKNOWN)
++ return 0;
++
++ input_set_capability(xpad->dev, EV_FF, FF_RUMBLE);
++
++ return input_ff_create_memless(xpad->dev, NULL, xpad_play_effect);
++}
++
++#else
++static int xpad_init_ff(struct usb_xpad *xpad) { return 0; }
++#endif
++
++#if defined(CONFIG_JOYSTICK_XPAD_LEDS)
++#include <linux/leds.h>
++
++struct xpad_led {
++ char name[16];
++ struct led_classdev led_cdev;
++ struct usb_xpad *xpad;
++};
++
++static void xpad_send_led_command(struct usb_xpad *xpad, int command)
++{
++ if (command >= 0 && command < 14) {
++ mutex_lock(&xpad->odata_mutex);
++ xpad->odata[0] = 0x01;
++ xpad->odata[1] = 0x03;
++ xpad->odata[2] = command;
++ xpad->irq_out->transfer_buffer_length = 3;
++ usb_submit_urb(xpad->irq_out, GFP_KERNEL);
++ mutex_unlock(&xpad->odata_mutex);
++ }
++}
++
++static void xpad_led_set(struct led_classdev *led_cdev,
++ enum led_brightness value)
++{
++ struct xpad_led *xpad_led = container_of(led_cdev,
++ struct xpad_led, led_cdev);
++
++ xpad_send_led_command(xpad_led->xpad, value);
++}
++
++static int xpad_led_probe(struct usb_xpad *xpad)
++{
++ static atomic_t led_seq = ATOMIC_INIT(0);
++ long led_no;
++ struct xpad_led *led;
++ struct led_classdev *led_cdev;
++ int error;
++
++ if (xpad->xtype != XTYPE_XBOX360)
++ return 0;
++
++ xpad->led = led = kzalloc(sizeof(struct xpad_led), GFP_KERNEL);
++ if (!led)
++ return -ENOMEM;
++
++ led_no = (long)atomic_inc_return(&led_seq) - 1;
++
++ snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
++ led->xpad = xpad;
++
++ led_cdev = &led->led_cdev;
++ led_cdev->name = led->name;
++ led_cdev->brightness_set = xpad_led_set;
++
++ error = led_classdev_register(&xpad->udev->dev, led_cdev);
++ if (error) {
++ kfree(led);
++ xpad->led = NULL;
++ return error;
++ }
++
++ /*
++ * Light up the segment corresponding to controller number
++ */
++ xpad_send_led_command(xpad, (led_no % 4) + 2);
++
++ return 0;
++}
++
++static void xpad_led_disconnect(struct usb_xpad *xpad)
++{
++ struct xpad_led *xpad_led = xpad->led;
++
++ if (xpad_led) {
++ led_classdev_unregister(&xpad_led->led_cdev);
++ kfree(xpad_led);
++ }
++}
++#else
++static int xpad_led_probe(struct usb_xpad *xpad) { return 0; }
++static void xpad_led_disconnect(struct usb_xpad *xpad) { }
++#endif
++
++
++static int xpad_open(struct input_dev *dev)
++{
++ struct usb_xpad *xpad = input_get_drvdata(dev);
++
++ /* URB was submitted in probe */
++ if (xpad->xtype == XTYPE_XBOX360W)
++ return 0;
++
++ xpad->irq_in->dev = xpad->udev;
++ if (usb_submit_urb(xpad->irq_in, GFP_KERNEL))
++ return -EIO;
++
++ return 0;
++}
++
++static void xpad_close(struct input_dev *dev)
++{
++ struct usb_xpad *xpad = input_get_drvdata(dev);
++
++ if (xpad->xtype != XTYPE_XBOX360W)
++ usb_kill_urb(xpad->irq_in);
++
++ xpad_stop_output(xpad);
++}
++
++static void xpad_set_up_abs(struct input_dev *input_dev, signed short abs)
++{
++ set_bit(abs, input_dev->absbit);
++
++ switch (abs) {
++ case ABS_X:
++ case ABS_Y:
++ case ABS_RX:
++ case ABS_RY: /* the two sticks */
++ input_set_abs_params(input_dev, abs, -32768, 32767, 16, 128);
++ break;
++ case ABS_Z:
++ case ABS_RZ: /* the triggers (if mapped to axes) */
++ input_set_abs_params(input_dev, abs, 0, 255, 0, 0);
++ break;
++ case ABS_HAT0X:
++ case ABS_HAT0Y: /* the d-pad (only if dpad is mapped to axes */
++ input_set_abs_params(input_dev, abs, -1, 1, 0, 0);
++ break;
++ }
++}
++
++static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id)
++{
++ struct usb_device *udev = interface_to_usbdev(intf);
++ struct usb_xpad *xpad;
++ struct input_dev *input_dev;
++ struct usb_endpoint_descriptor *ep_irq_in;
++ int i, error;
++
++ for (i = 0; xpad_device[i].idVendor; i++) {
++ if ((le16_to_cpu(udev->descriptor.idVendor) == xpad_device[i].idVendor) &&
++ (le16_to_cpu(udev->descriptor.idProduct) == xpad_device[i].idProduct))
++ break;
++ }
++
++ xpad = kzalloc(sizeof(struct usb_xpad), GFP_KERNEL);
++ input_dev = input_allocate_device();
++ if (!xpad || !input_dev) {
++ error = -ENOMEM;
++ goto fail1;
++ }
++
++ xpad->idata = usb_alloc_coherent(udev, XPAD_PKT_LEN,
++ GFP_KERNEL, &xpad->idata_dma);
++ if (!xpad->idata) {
++ error = -ENOMEM;
++ goto fail1;
++ }
++
++ xpad->irq_in = usb_alloc_urb(0, GFP_KERNEL);
++ if (!xpad->irq_in) {
++ error = -ENOMEM;
++ goto fail2;
++ }
++
++ xpad->udev = udev;
++ xpad->intf = intf;
++ xpad->mapping = xpad_device[i].mapping;
++ xpad->xtype = xpad_device[i].xtype;
++
++ if (xpad->xtype == XTYPE_UNKNOWN) {
++ if (intf->cur_altsetting->desc.bInterfaceClass == USB_CLASS_VENDOR_SPEC) {
++ if (intf->cur_altsetting->desc.bInterfaceProtocol == 129)
++ xpad->xtype = XTYPE_XBOX360W;
++ else
++ xpad->xtype = XTYPE_XBOX360;
++ } else
++ xpad->xtype = XTYPE_XBOX;
++
++ if (dpad_to_buttons)
++ xpad->mapping |= MAP_DPAD_TO_BUTTONS;
++ if (triggers_to_buttons)
++ xpad->mapping |= MAP_TRIGGERS_TO_BUTTONS;
++ if (sticks_to_null)
++ xpad->mapping |= MAP_STICKS_TO_NULL;
++ }
++
++ xpad->dev = input_dev;
++ usb_make_path(udev, xpad->phys, sizeof(xpad->phys));
++ strlcat(xpad->phys, "/input0", sizeof(xpad->phys));
++
++ input_dev->name = xpad_device[i].name;
++ input_dev->phys = xpad->phys;
++ usb_to_input_id(udev, &input_dev->id);
++ input_dev->dev.parent = &intf->dev;
++
++ input_set_drvdata(input_dev, xpad);
++
++ input_dev->open = xpad_open;
++ input_dev->close = xpad_close;
++
++ input_dev->evbit[0] = BIT_MASK(EV_KEY);
++
++ if (!(xpad->mapping & MAP_STICKS_TO_NULL)) {
++ input_dev->evbit[0] |= BIT_MASK(EV_ABS);
++ /* set up axes */
++ for (i = 0; xpad_abs[i] >= 0; i++)
++ xpad_set_up_abs(input_dev, xpad_abs[i]);
++ }
++
++ /* set up standard buttons */
++ for (i = 0; xpad_common_btn[i] >= 0; i++)
++ __set_bit(xpad_common_btn[i], input_dev->keybit);
++
++ /* set up model-specific ones */
++ if (xpad->xtype == XTYPE_XBOX360 || xpad->xtype == XTYPE_XBOX360W) {
++ for (i = 0; xpad360_btn[i] >= 0; i++)
++ __set_bit(xpad360_btn[i], input_dev->keybit);
++ } else {
++ for (i = 0; xpad_btn[i] >= 0; i++)
++ __set_bit(xpad_btn[i], input_dev->keybit);
++ }
++
++ if (xpad->mapping & MAP_DPAD_TO_BUTTONS) {
++ for (i = 0; xpad_btn_pad[i] >= 0; i++)
++ __set_bit(xpad_btn_pad[i], input_dev->keybit);
++ } else {
++ for (i = 0; xpad_abs_pad[i] >= 0; i++)
++ xpad_set_up_abs(input_dev, xpad_abs_pad[i]);
++ }
++
++ if (xpad->mapping & MAP_TRIGGERS_TO_BUTTONS) {
++ for (i = 0; xpad_btn_triggers[i] >= 0; i++)
++ __set_bit(xpad_btn_triggers[i], input_dev->keybit);
++ } else {
++ for (i = 0; xpad_abs_triggers[i] >= 0; i++)
++ xpad_set_up_abs(input_dev, xpad_abs_triggers[i]);
++ }
++
++ error = xpad_init_output(intf, xpad);
++ if (error)
++ goto fail3;
++
++ error = xpad_init_ff(xpad);
++ if (error)
++ goto fail4;
++
++ error = xpad_led_probe(xpad);
++ if (error)
++ goto fail5;
++
++ ep_irq_in = &intf->cur_altsetting->endpoint[0].desc;
++ usb_fill_int_urb(xpad->irq_in, udev,
++ usb_rcvintpipe(udev, ep_irq_in->bEndpointAddress),
++ xpad->idata, XPAD_PKT_LEN, xpad_irq_in,
++ xpad, ep_irq_in->bInterval);
++ xpad->irq_in->transfer_dma = xpad->idata_dma;
++ xpad->irq_in->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
++
++ error = input_register_device(xpad->dev);
++ if (error)
++ goto fail6;
++
++ usb_set_intfdata(intf, xpad);
++
++ if (xpad->xtype == XTYPE_XBOX360W) {
++ /*
++ * Setup the message to set the LEDs on the
++ * controller when it shows up
++ */
++ xpad->bulk_out = usb_alloc_urb(0, GFP_KERNEL);
++ if (!xpad->bulk_out) {
++ error = -ENOMEM;
++ goto fail7;
++ }
++
++ xpad->bdata = kzalloc(XPAD_PKT_LEN, GFP_KERNEL);
++ if (!xpad->bdata) {
++ error = -ENOMEM;
++ goto fail8;
++ }
++
++ xpad->bdata[2] = 0x08;
++ switch (intf->cur_altsetting->desc.bInterfaceNumber) {
++ case 0:
++ xpad->bdata[3] = 0x42;
++ break;
++ case 2:
++ xpad->bdata[3] = 0x43;
++ break;
++ case 4:
++ xpad->bdata[3] = 0x44;
++ break;
++ case 6:
++ xpad->bdata[3] = 0x45;
++ }
++
++ ep_irq_in = &intf->cur_altsetting->endpoint[1].desc;
++ if (usb_endpoint_is_bulk_out(ep_irq_in)) {
++ usb_fill_bulk_urb(xpad->bulk_out, udev,
++ usb_sndbulkpipe(udev,
++ ep_irq_in->bEndpointAddress),
++ xpad->bdata, XPAD_PKT_LEN,
++ xpad_bulk_out, xpad);
++ } else {
++ usb_fill_int_urb(xpad->bulk_out, udev,
++ usb_sndintpipe(udev,
++ ep_irq_in->bEndpointAddress),
++ xpad->bdata, XPAD_PKT_LEN,
++ xpad_bulk_out, xpad, 0);
++ }
++
++ /*
++ * Submit the int URB immediately rather than waiting for open
++ * because we get status messages from the device whether
++ * or not any controllers are attached. In fact, it's
++ * exactly the message that a controller has arrived that
++ * we're waiting for.
++ */
++ xpad->irq_in->dev = xpad->udev;
++ error = usb_submit_urb(xpad->irq_in, GFP_KERNEL);
++ if (error)
++ goto fail9;
++ }
++
++ return 0;
++
++ fail9: kfree(xpad->bdata);
++ fail8: usb_free_urb(xpad->bulk_out);
++ fail7: input_unregister_device(input_dev);
++ input_dev = NULL;
++ fail6: xpad_led_disconnect(xpad);
++ fail5: if (input_dev)
++ input_ff_destroy(input_dev);
++ fail4: xpad_deinit_output(xpad);
++ fail3: usb_free_urb(xpad->irq_in);
++ fail2: usb_free_coherent(udev, XPAD_PKT_LEN, xpad->idata, xpad->idata_dma);
++ fail1: input_free_device(input_dev);
++ kfree(xpad);
++ return error;
++
++}
++
++static void xpad_disconnect(struct usb_interface *intf)
++{
++ struct usb_xpad *xpad = usb_get_intfdata (intf);
++
++ xpad_led_disconnect(xpad);
++ input_unregister_device(xpad->dev);
++ xpad_deinit_output(xpad);
++
++ if (xpad->xtype == XTYPE_XBOX360W) {
++ usb_kill_urb(xpad->bulk_out);
++ usb_free_urb(xpad->bulk_out);
++ usb_kill_urb(xpad->irq_in);
++ }
++
++ usb_free_urb(xpad->irq_in);
++ usb_free_coherent(xpad->udev, XPAD_PKT_LEN,
++ xpad->idata, xpad->idata_dma);
++
++ kfree(xpad->bdata);
++ kfree(xpad);
++
++ usb_set_intfdata(intf, NULL);
++}
++
++static struct usb_driver xpad_driver = {
++ .name = "xpad",
++ .probe = xpad_probe,
++ .disconnect = xpad_disconnect,
++ .id_table = xpad_table,
++};
++
++module_usb_driver(xpad_driver);
++
++MODULE_AUTHOR(DRIVER_AUTHOR);
++MODULE_DESCRIPTION(DRIVER_DESC);
++MODULE_LICENSE("GPL");
+diff -Nur linux-3.14.36/drivers/input/keyboard/gpio_keys.c linux-openelec/drivers/input/keyboard/gpio_keys.c
+--- linux-3.14.36/drivers/input/keyboard/gpio_keys.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/input/keyboard/gpio_keys.c 2015-05-06 12:05:42.000000000 -0500
+@@ -3,6 +3,7 @@
+ *
+ * Copyright 2005 Phil Blundell
+ * Copyright 2010, 2011 David Jander <david@protonic.nl>
++ * Copyright (C) 2013 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+@@ -473,6 +474,8 @@
+
+ isr = gpio_keys_gpio_isr;
+ irqflags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING;
++ if (bdata->button->wakeup)
++ irqflags |= IRQF_NO_SUSPEND;
+
+ } else {
+ if (!button->irq) {
+diff -Nur linux-3.14.36/drivers/input/keyboard/imx_keypad.c linux-openelec/drivers/input/keyboard/imx_keypad.c
+--- linux-3.14.36/drivers/input/keyboard/imx_keypad.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/input/keyboard/imx_keypad.c 2015-05-06 12:05:42.000000000 -0500
+@@ -1,6 +1,7 @@
+ /*
+ * Driver for the IMX keypad port.
+ * Copyright (C) 2009 Alberto Panizzo <maramaopercheseimorto@gmail.com>
++ * Copyright (C) 2013 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+@@ -548,6 +549,8 @@
+
+ if (device_may_wakeup(&pdev->dev))
+ enable_irq_wake(kbd->irq);
++ else
++ pinctrl_pm_select_sleep_state(dev);
+
+ return 0;
+ }
+@@ -561,6 +564,8 @@
+
+ if (device_may_wakeup(&pdev->dev))
+ disable_irq_wake(kbd->irq);
++ else
++ pinctrl_pm_select_default_state(dev);
+
+ mutex_lock(&input_dev->mutex);
+
+diff -Nur linux-3.14.36/drivers/input/misc/mma8450.c linux-openelec/drivers/input/misc/mma8450.c
+--- linux-3.14.36/drivers/input/misc/mma8450.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/input/misc/mma8450.c 2015-05-06 12:05:42.000000000 -0500
+@@ -1,7 +1,7 @@
+ /*
+ * Driver for Freescale's 3-Axis Accelerometer MMA8450
+ *
+- * Copyright (C) 2011 Freescale Semiconductor, Inc. All Rights Reserved.
++ * Copyright (C) 2011-2013 Freescale Semiconductor, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+@@ -25,6 +25,7 @@
+ #include <linux/i2c.h>
+ #include <linux/input-polldev.h>
+ #include <linux/of_device.h>
++#include <linux/mutex.h>
+
+ #define MMA8450_DRV_NAME "mma8450"
+
+@@ -51,11 +52,22 @@
+
+ #define MMA8450_CTRL_REG1 0x38
+ #define MMA8450_CTRL_REG2 0x39
++#define MMA8450_ID 0xC6
++#define MMA8450_WHO_AM_I 0x0F
++
++enum {
++ MODE_STANDBY = 0,
++ MODE_2G,
++ MODE_4G,
++ MODE_8G,
++};
+
+ /* mma8450 status */
+ struct mma8450 {
+ struct i2c_client *client;
+ struct input_polled_dev *idev;
++ struct mutex mma8450_lock;
++ u8 mode;
+ };
+
+ static int mma8450_read(struct mma8450 *m, unsigned off)
+@@ -112,16 +124,19 @@
+ int ret;
+ u8 buf[6];
+
+- ret = mma8450_read(m, MMA8450_STATUS);
+- if (ret < 0)
+- return;
++ mutex_lock(&m->mma8450_lock);
+
+- if (!(ret & MMA8450_STATUS_ZXYDR))
++ ret = mma8450_read(m, MMA8450_STATUS);
++ if (ret < 0 || !(ret & MMA8450_STATUS_ZXYDR)) {
++ mutex_unlock(&m->mma8450_lock);
+ return;
++ }
+
+ ret = mma8450_read_block(m, MMA8450_OUT_X_LSB, buf, sizeof(buf));
+- if (ret < 0)
++ if (ret < 0) {
++ mutex_unlock(&m->mma8450_lock);
+ return;
++ }
+
+ x = ((int)(s8)buf[1] << 4) | (buf[0] & 0xf);
+ y = ((int)(s8)buf[3] << 4) | (buf[2] & 0xf);
+@@ -131,10 +146,12 @@
+ input_report_abs(dev->input, ABS_Y, y);
+ input_report_abs(dev->input, ABS_Z, z);
+ input_sync(dev->input);
++
++ mutex_unlock(&m->mma8450_lock);
+ }
+
+ /* Initialize the MMA8450 chip */
+-static void mma8450_open(struct input_polled_dev *dev)
++static s32 mma8450_open(struct input_polled_dev *dev)
+ {
+ struct mma8450 *m = dev->private;
+ int err;
+@@ -142,18 +159,20 @@
+ /* enable all events from X/Y/Z, no FIFO */
+ err = mma8450_write(m, MMA8450_XYZ_DATA_CFG, 0x07);
+ if (err)
+- return;
++ return err;
+
+ /*
+ * Sleep mode poll rate - 50Hz
+ * System output data rate - 400Hz
+- * Full scale selection - Active, +/- 2G
++ * Standby mode
+ */
+- err = mma8450_write(m, MMA8450_CTRL_REG1, 0x01);
+- if (err < 0)
+- return;
+-
++ err = mma8450_write(m, MMA8450_CTRL_REG1, MODE_STANDBY);
++ if (err)
++ return err;
++ m->mode = MODE_STANDBY;
+ msleep(MODE_CHANGE_DELAY_MS);
++
++ return 0;
+ }
+
+ static void mma8450_close(struct input_polled_dev *dev)
+@@ -164,6 +183,76 @@
+ mma8450_write(m, MMA8450_CTRL_REG2, 0x01);
+ }
+
++static ssize_t mma8450_scalemode_show(struct device *dev,
++ struct device_attribute *attr,
++ char *buf)
++{
++ int mode = 0;
++ struct mma8450 *m;
++ struct i2c_client *client = to_i2c_client(dev);
++
++ m = i2c_get_clientdata(client);
++
++ mutex_lock(&m->mma8450_lock);
++ mode = (int)m->mode;
++ mutex_unlock(&m->mma8450_lock);
++
++ return sprintf(buf, "%d\n", mode);
++}
++
++static ssize_t mma8450_scalemode_store(struct device *dev,
++ struct device_attribute *attr,
++ const char *buf, size_t count)
++{
++ unsigned long mode;
++ int ret;
++ struct mma8450 *m = NULL;
++ struct i2c_client *client = to_i2c_client(dev);
++
++ ret = strict_strtoul(buf, 10, &mode);
++ if (ret) {
++ dev_err(dev, "string transform error\n");
++ return ret;
++ }
++
++ if (mode > MODE_8G) {
++ dev_warn(dev, "not supported mode %d\n", (int)mode);
++ return count;
++ }
++
++ m = i2c_get_clientdata(client);
++
++ mutex_lock(&m->mma8450_lock);
++ if (mode == m->mode) {
++ mutex_unlock(&m->mma8450_lock);
++ return count;
++ }
++
++ ret = mma8450_write(m, MMA8450_CTRL_REG1, mode);
++ if (ret < 0) {
++ mutex_unlock(&m->mma8450_lock);
++ return ret;
++ }
++
++ msleep(MODE_CHANGE_DELAY_MS);
++ m->mode = (u8)mode;
++ mutex_unlock(&m->mma8450_lock);
++
++ return count;
++}
++
++static DEVICE_ATTR(scalemode, S_IWUSR | S_IRUGO,
++ mma8450_scalemode_show, mma8450_scalemode_store);
++
++static struct attribute *mma8450_attributes[] = {
++ &dev_attr_scalemode.attr,
++ NULL
++};
++
++static const struct attribute_group mma8450_attr_group = {
++ .attrs = mma8450_attributes,
++};
++
+ /*
+ * I2C init/probing/exit functions
+ */
+@@ -172,7 +261,25 @@
+ {
+ struct input_polled_dev *idev;
+ struct mma8450 *m;
+- int err;
++ int err, client_id;
++ struct i2c_adapter *adapter = NULL;
++
++ adapter = to_i2c_adapter(c->dev.parent);
++ err = i2c_check_functionality(adapter,
++ I2C_FUNC_SMBUS_BYTE |
++ I2C_FUNC_SMBUS_BYTE_DATA);
++ if (!err)
++ goto err_out;
++
++ client_id = i2c_smbus_read_byte_data(c, MMA8450_WHO_AM_I);
++
++ if (MMA8450_ID != client_id) {
++ dev_err(&c->dev,
++ "read chip ID 0x%x is not equal to 0x%x!\n", client_id,
++ MMA8450_ID);
++ err = -EINVAL;
++ goto err_out;
++ }
+
+ m = kzalloc(sizeof(struct mma8450), GFP_KERNEL);
+ idev = input_allocate_polled_device();
+@@ -183,6 +290,7 @@
+
+ m->client = c;
+ m->idev = idev;
++ i2c_set_clientdata(c, m);
+
+ idev->private = m;
+ idev->input->name = MMA8450_DRV_NAME;
+@@ -190,8 +298,6 @@
+ idev->poll = mma8450_poll;
+ idev->poll_interval = POLL_INTERVAL;
+ idev->poll_interval_max = POLL_INTERVAL_MAX;
+- idev->open = mma8450_open;
+- idev->close = mma8450_close;
+
+ __set_bit(EV_ABS, idev->input->evbit);
+ input_set_abs_params(idev->input, ABS_X, -2048, 2047, 32, 32);
+@@ -206,11 +312,32 @@
+
+ i2c_set_clientdata(c, m);
+
++ mutex_init(&m->mma8450_lock);
++
++ err = mma8450_open(idev);
++ if (err) {
++ dev_err(&c->dev, "failed to initialize mma8450\n");
++ goto err_unreg_dev;
++ }
++
++ err = sysfs_create_group(&c->dev.kobj, &mma8450_attr_group);
++ if (err) {
++ dev_err(&c->dev, "create device file failed!\n");
++ err = -EINVAL;
++ goto err_close;
++ }
++
+ return 0;
+
++err_close:
++ mma8450_close(idev);
++err_unreg_dev:
++ mutex_destroy(&m->mma8450_lock);
++ input_unregister_polled_device(idev);
+ err_free_mem:
+ input_free_polled_device(idev);
+ kfree(m);
++err_out:
+ return err;
+ }
+
+@@ -219,6 +346,9 @@
+ struct mma8450 *m = i2c_get_clientdata(c);
+ struct input_polled_dev *idev = m->idev;
+
++ sysfs_remove_group(&c->dev.kobj, &mma8450_attr_group);
++ mma8450_close(idev);
++ mutex_destroy(&m->mma8450_lock);
+ input_unregister_polled_device(idev);
+ input_free_polled_device(idev);
+ kfree(m);
+diff -Nur linux-3.14.36/drivers/input/sparse-keymap.c linux-openelec/drivers/input/sparse-keymap.c
+--- linux-3.14.36/drivers/input/sparse-keymap.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/input/sparse-keymap.c 2015-05-06 12:05:42.000000000 -0500
+@@ -236,7 +236,7 @@
+ * in an input device that was set up by sparse_keymap_setup().
+ * NOTE: It is safe to cal this function while input device is
+ * still registered (however the drivers should care not to try to
+- * use freed keymap and thus have to shut off interrups/polling
++ * use freed keymap and thus have to shut off interrupts/polling
+ * before freeing the keymap).
+ */
+ void sparse_keymap_free(struct input_dev *dev)
+diff -Nur linux-3.14.36/drivers/input/touchscreen/st1232.c linux-openelec/drivers/input/touchscreen/st1232.c
+--- linux-3.14.36/drivers/input/touchscreen/st1232.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/input/touchscreen/st1232.c 2015-07-24 18:03:30.272842002 -0500
+@@ -1,12 +1,13 @@
+ /*
+- * ST1232 Touchscreen Controller Driver
++ * ST1232/ST1332 Touchscreen Controller Driver
+ *
+ * Copyright (C) 2010 Renesas Solutions Corp.
+- * Tony SIM <chinyeow.sim.xt@renesas.com>
++ * Tony SIM <chinyeow.sim.xt@renesas.com>
++ * Copyright (C) 2015 Peter Vicman <peter.vicman@gmail.com>
+ *
+ * Using code from:
+ * - android.git.kernel.org: projects/kernel/common.git: synaptics_i2c_rmi.c
+- * Copyright (C) 2007 Google, Inc.
++ * Copyright (C) 2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+@@ -30,282 +31,383 @@
+ #include <linux/slab.h>
+ #include <linux/types.h>
+ #include <linux/platform_data/st1232_pdata.h>
++#include <linux/workqueue.h>
+
+-#define ST1232_TS_NAME "st1232-ts"
+-
+-#define MIN_X 0x00
+-#define MIN_Y 0x00
+-#define MAX_X 0x31f /* (800 - 1) */
+-#define MAX_Y 0x1df /* (480 - 1) */
+-#define MAX_AREA 0xff
+-#define MAX_FINGERS 2
++#define ST1232_TS_NAME "st1232-ts"
++#define MIN_X 0
++#define MIN_Y 0
++#define MAX_X (800 - 1)
++#define MAX_Y (480 - 1)
++#define MAX_AREA 0xff
++#define MAX_FINGERS 2
++#define SAMPLE_DELAY 20 /* msecs */
++#define REVERSE_X(x) if (reverse_x == true) { x = MAX_X - (x); } else {}
++#define REVERSE_Y(y) if (reverse_y == true) { y = MAX_Y - (y); } else {}
+
+ struct st1232_ts_finger {
+- u16 x;
+- u16 y;
+- u8 t;
+- bool is_valid;
++ u16 x;
++ u16 y;
++ u8 t;
++ bool is_valid;
+ };
+
+ struct st1232_ts_data {
+- struct i2c_client *client;
+- struct input_dev *input_dev;
+- struct st1232_ts_finger finger[MAX_FINGERS];
+- struct dev_pm_qos_request low_latency_req;
+- int reset_gpio;
++ struct i2c_client *client;
++ struct input_dev *input_dev;
++ struct st1232_ts_finger finger[MAX_FINGERS];
++ struct dev_pm_qos_request low_latency_req;
++ int reset_gpio;
++ struct delayed_work work;
+ };
+
++static bool multitouch = false;
++module_param(multitouch, bool, 0);
++MODULE_PARM_DESC(multitouch, " If multitouch is set to 1 ts acts as multitouch");
++
++static bool reverse_x = false;
++module_param(reverse_x, bool, 0600);
++MODULE_PARM_DESC(reverse_x, " If reverse_x is set to 1 x coordinates are reversed");
++
++static bool reverse_y = false;
++module_param(reverse_y, bool, 0600);
++MODULE_PARM_DESC(reverse_y, " If reverse_y is set to 1 y coordinates are reversed");
++
++static int offset_x = 0;
++module_param(offset_x, int, 0600);
++MODULE_PARM_DESC(offset_x, " Offset value for x axis");
++
++static int offset_y = 0;
++module_param(offset_y, int, 0600);
++MODULE_PARM_DESC(offset_y, " Offset value for y axis");
++
+ static int st1232_ts_read_data(struct st1232_ts_data *ts)
+ {
+- struct st1232_ts_finger *finger = ts->finger;
+- struct i2c_client *client = ts->client;
+- struct i2c_msg msg[2];
+- int error;
+- u8 start_reg;
+- u8 buf[10];
+-
+- /* read touchscreen data from ST1232 */
+- msg[0].addr = client->addr;
+- msg[0].flags = 0;
+- msg[0].len = 1;
+- msg[0].buf = &start_reg;
+- start_reg = 0x10;
+-
+- msg[1].addr = ts->client->addr;
+- msg[1].flags = I2C_M_RD;
+- msg[1].len = sizeof(buf);
+- msg[1].buf = buf;
+-
+- error = i2c_transfer(client->adapter, msg, 2);
+- if (error < 0)
+- return error;
+-
+- /* get "valid" bits */
+- finger[0].is_valid = buf[2] >> 7;
+- finger[1].is_valid = buf[5] >> 7;
+-
+- /* get xy coordinate */
+- if (finger[0].is_valid) {
+- finger[0].x = ((buf[2] & 0x0070) << 4) | buf[3];
+- finger[0].y = ((buf[2] & 0x0007) << 8) | buf[4];
+- finger[0].t = buf[8];
+- }
+-
+- if (finger[1].is_valid) {
+- finger[1].x = ((buf[5] & 0x0070) << 4) | buf[6];
+- finger[1].y = ((buf[5] & 0x0007) << 8) | buf[7];
+- finger[1].t = buf[9];
+- }
++ struct st1232_ts_finger *finger = ts->finger;
++ struct i2c_client *client = ts->client;
++ struct i2c_msg msg[2];
++ int error;
++ u8 start_reg;
++ u8 buf[10];
++
++ /* read touchscreen data from ST1232 */
++ msg[0].addr = client->addr;
++ msg[0].flags = 0;
++ msg[0].len = 1;
++ msg[0].buf = &start_reg;
++ start_reg = 0x10;
++
++ msg[1].addr = ts->client->addr;
++ msg[1].flags = I2C_M_RD;
++ msg[1].len = sizeof(buf);
++ msg[1].buf = buf;
++
++ error = i2c_transfer(client->adapter, msg, 2);
++ if (error < 0)
++ return error;
++
++ memset(finger, 0x0, sizeof(struct st1232_ts_finger) * MAX_FINGERS);
++
++ /* get "valid" bits from fingers
++ and combine with "valid" bits from coordinates */
++ finger[0].is_valid = (buf[0] & 0x07); /* only 3 bits on st1332 */
++ finger[0].is_valid &= (buf[2] >> 7);
++ finger[1].is_valid = (buf[0] & 0x07);
++ finger[1].is_valid &= (buf[5] >> 7);
++
++ /* get xy coordinates and strength */
++ if (finger[0].is_valid) {
++ finger[0].x = ((buf[2] & 0x0070) << 4) | buf[3];
++ finger[0].y = ((buf[2] & 0x0007) << 8) | buf[4];
++ finger[0].t = buf[8];
++
++ REVERSE_X(finger[0].x)
++ REVERSE_Y(finger[0].y)
++
++ finger[0].x += offset_x;
++ finger[0].y += offset_y;
++ }
++
++ if (finger[1].is_valid) {
++ finger[1].x = ((buf[5] & 0x0070) << 4) | buf[6];
++ finger[1].y = ((buf[5] & 0x0007) << 8) | buf[7];
++ finger[1].t = buf[9];
++
++ REVERSE_X(finger[1].x)
++ REVERSE_Y(finger[1].y)
++ }
+
+- return 0;
++ return 0;
+ }
+
+-static irqreturn_t st1232_ts_irq_handler(int irq, void *dev_id)
++static void st1232_ts_finger_released(struct work_struct *work)
+ {
+- struct st1232_ts_data *ts = dev_id;
+- struct st1232_ts_finger *finger = ts->finger;
+- struct input_dev *input_dev = ts->input_dev;
+- int count = 0;
+- int i, ret;
+-
+- ret = st1232_ts_read_data(ts);
+- if (ret < 0)
+- goto end;
+-
+- /* multi touch protocol */
+- for (i = 0; i < MAX_FINGERS; i++) {
+- if (!finger[i].is_valid)
+- continue;
+-
+- input_report_abs(input_dev, ABS_MT_TOUCH_MAJOR, finger[i].t);
+- input_report_abs(input_dev, ABS_MT_POSITION_X, finger[i].x);
+- input_report_abs(input_dev, ABS_MT_POSITION_Y, finger[i].y);
+- input_mt_sync(input_dev);
+- count++;
+- }
+-
+- /* SYN_MT_REPORT only if no contact */
+- if (!count) {
+- input_mt_sync(input_dev);
+- if (ts->low_latency_req.dev) {
+- dev_pm_qos_remove_request(&ts->low_latency_req);
+- ts->low_latency_req.dev = NULL;
+- }
+- } else if (!ts->low_latency_req.dev) {
+- /* First contact, request 100 us latency. */
+- dev_pm_qos_add_ancestor_request(&ts->client->dev,
+- &ts->low_latency_req, 100);
+- }
++ struct st1232_ts_data *ts = container_of(work, struct st1232_ts_data, work.work);
++ struct st1232_ts_finger *finger = ts->finger;
++ struct input_dev *input_dev = ts->input_dev;
++ int ret;
++
++ ret = st1232_ts_read_data(ts);
++ if (ret < 0)
++ goto end;
++
++ /* finger is a pointer to finger[0] */
++ if (finger->is_valid)
++ goto end; /* finger (still) touched */
++
++ /* finger released */
++ input_report_abs(input_dev, ABS_PRESSURE, 0);
++ input_report_key(input_dev, BTN_TOUCH, 0);
++ input_sync(input_dev);
++
++end:
++ return;
++}
+
+- /* SYN_REPORT */
+- input_sync(input_dev);
++static irqreturn_t st1232_ts_irq_handler(int irq, void *dev_id)
++{
++ struct st1232_ts_data *ts = dev_id;
++ struct st1232_ts_finger *finger = ts->finger;
++ struct input_dev *input_dev = ts->input_dev;
++ int count = 0;
++ int i, ret;
++
++ if (multitouch == false) {
++ /*
++ * Cancel scheduled polling for release if we have new value
++ * available. Wait if the polling is already running.
++ */
++ cancel_delayed_work_sync(&ts->work);
++ }
++
++ ret = st1232_ts_read_data(ts);
++ if (ret < 0)
++ goto end;
++
++ if (multitouch == false) {
++ if (finger->is_valid) {
++ input_report_abs(input_dev, ABS_X, finger->x);
++ input_report_abs(input_dev, ABS_Y, finger->y);
++ input_report_abs(input_dev, ABS_PRESSURE, finger->t);
++ input_report_key(input_dev, BTN_TOUCH, 1);
++ input_sync(input_dev);
++ }
++ } else {
++ /* multi touch protocol */
++ for (i = 0; i < MAX_FINGERS; i++) {
++ if (!finger[i].is_valid)
++ continue;
++
++ input_report_abs(input_dev, ABS_MT_TOUCH_MAJOR, finger[i].t);
++ input_report_abs(input_dev, ABS_MT_POSITION_X, finger[i].x);
++ input_report_abs(input_dev, ABS_MT_POSITION_Y, finger[i].y);
++ input_mt_sync(input_dev);
++ count++;
++ }
++
++ /* SYN_MT_REPORT only if no contact */
++ if (!count) {
++ input_mt_sync(input_dev);
++ if (ts->low_latency_req.dev) {
++ dev_pm_qos_remove_request(&ts->low_latency_req);
++ ts->low_latency_req.dev = NULL;
++ }
++ } else if (!ts->low_latency_req.dev) {
++ /* First contact, request 100 us latency. */
++ dev_pm_qos_add_ancestor_request(&ts->client->dev, &ts->low_latency_req, 100);
++ }
++
++ /* SYN_REPORT */
++ input_sync(input_dev);
++ }
+
+ end:
+- return IRQ_HANDLED;
++ if (multitouch == false) {
++ /* start polling for st1232_ts_read_data to detect release */
++ schedule_delayed_work(&ts->work, msecs_to_jiffies(SAMPLE_DELAY));
++ }
++
++ return IRQ_HANDLED;
+ }
+
+ static void st1232_ts_power(struct st1232_ts_data *ts, bool poweron)
+ {
+- if (gpio_is_valid(ts->reset_gpio))
+- gpio_direction_output(ts->reset_gpio, poweron);
++ if (gpio_is_valid(ts->reset_gpio))
++ gpio_direction_output(ts->reset_gpio, poweron);
+ }
+
+ static int st1232_ts_probe(struct i2c_client *client,
+- const struct i2c_device_id *id)
++ const struct i2c_device_id *id)
+ {
+- struct st1232_ts_data *ts;
+- struct st1232_pdata *pdata = dev_get_platdata(&client->dev);
+- struct input_dev *input_dev;
+- int error;
+-
+- if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+- dev_err(&client->dev, "need I2C_FUNC_I2C\n");
+- return -EIO;
+- }
+-
+- if (!client->irq) {
+- dev_err(&client->dev, "no IRQ?\n");
+- return -EINVAL;
+- }
+-
+- ts = devm_kzalloc(&client->dev, sizeof(*ts), GFP_KERNEL);
+- if (!ts)
+- return -ENOMEM;
+-
+- input_dev = devm_input_allocate_device(&client->dev);
+- if (!input_dev)
+- return -ENOMEM;
+-
+- ts->client = client;
+- ts->input_dev = input_dev;
+-
+- if (pdata)
+- ts->reset_gpio = pdata->reset_gpio;
+- else if (client->dev.of_node)
+- ts->reset_gpio = of_get_gpio(client->dev.of_node, 0);
+- else
+- ts->reset_gpio = -ENODEV;
+-
+- if (gpio_is_valid(ts->reset_gpio)) {
+- error = devm_gpio_request(&client->dev, ts->reset_gpio, NULL);
+- if (error) {
+- dev_err(&client->dev,
+- "Unable to request GPIO pin %d.\n",
+- ts->reset_gpio);
+- return error;
+- }
+- }
+-
+- st1232_ts_power(ts, true);
+-
+- input_dev->name = "st1232-touchscreen";
+- input_dev->id.bustype = BUS_I2C;
+- input_dev->dev.parent = &client->dev;
+-
+- __set_bit(EV_SYN, input_dev->evbit);
+- __set_bit(EV_KEY, input_dev->evbit);
+- __set_bit(EV_ABS, input_dev->evbit);
+-
+- input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR, 0, MAX_AREA, 0, 0);
+- input_set_abs_params(input_dev, ABS_MT_POSITION_X, MIN_X, MAX_X, 0, 0);
+- input_set_abs_params(input_dev, ABS_MT_POSITION_Y, MIN_Y, MAX_Y, 0, 0);
+-
+- error = devm_request_threaded_irq(&client->dev, client->irq,
+- NULL, st1232_ts_irq_handler,
+- IRQF_ONESHOT,
+- client->name, ts);
+- if (error) {
+- dev_err(&client->dev, "Failed to register interrupt\n");
+- return error;
+- }
+-
+- error = input_register_device(ts->input_dev);
+- if (error) {
+- dev_err(&client->dev, "Unable to register %s input device\n",
+- input_dev->name);
+- return error;
+- }
++ struct st1232_ts_data *ts;
++ struct st1232_pdata *pdata = dev_get_platdata(&client->dev);
++ struct input_dev *input_dev;
++ int error;
++
++ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
++ dev_err(&client->dev, "need I2C_FUNC_I2C\n");
++ return -EIO;
++ }
++
++ if (!client->irq) {
++ dev_err(&client->dev, "no IRQ?\n");
++ return -EINVAL;
++ }
++
++ ts = devm_kzalloc(&client->dev, sizeof(*ts), GFP_KERNEL);
++ if (!ts)
++ return -ENOMEM;
++
++ input_dev = devm_input_allocate_device(&client->dev);
++ if (!input_dev)
++ return -ENOMEM;
++
++ ts->client = client;
++ ts->input_dev = input_dev;
++
++ if (pdata)
++ ts->reset_gpio = pdata->reset_gpio;
++ else if (client->dev.of_node)
++ ts->reset_gpio = of_get_gpio(client->dev.of_node, 0);
++ else
++ ts->reset_gpio = -ENODEV;
++
++ if (gpio_is_valid(ts->reset_gpio)) {
++ error = devm_gpio_request(&client->dev, ts->reset_gpio, NULL);
++ if (error) {
++ dev_err(&client->dev, "Unable to request GPIO pin %d.\n", ts->reset_gpio);
++ return error;
++ }
++ }
++
++ st1232_ts_power(ts, true);
++
++ input_dev->name = ST1232_TS_NAME;
++ input_dev->id.bustype = BUS_I2C;
++ input_dev->dev.parent = &client->dev;
++
++ if (multitouch == false) {
++ input_dev->phys = ST1232_TS_NAME"/input0";
++
++ __set_bit(BTN_TOUCH, input_dev->keybit);
++ __set_bit(EV_KEY, input_dev->evbit);
++ __set_bit(EV_ABS, input_dev->evbit);
++ __set_bit(ABS_X, input_dev->absbit);
++ __set_bit(ABS_Y, input_dev->absbit);
++ __set_bit(ABS_PRESSURE, input_dev->absbit);
++
++ input_set_abs_params(input_dev, ABS_X, MIN_X, MAX_X, 0, 0);
++ input_set_abs_params(input_dev, ABS_Y, MIN_Y, MAX_Y, 0, 0);
++ input_set_abs_params(input_dev, ABS_PRESSURE, 0x0, 0xff, 0, 0);
++
++ INIT_DELAYED_WORK(&ts->work, st1232_ts_finger_released);
++ } else {
++ __set_bit(EV_SYN, input_dev->evbit);
++ __set_bit(EV_KEY, input_dev->evbit);
++ __set_bit(EV_ABS, input_dev->evbit);
++
++ input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR, 0, MAX_AREA, 0, 0);
++ input_set_abs_params(input_dev, ABS_MT_POSITION_X, MIN_X, MAX_X, 0, 0);
++ input_set_abs_params(input_dev, ABS_MT_POSITION_Y, MIN_Y, MAX_Y, 0, 0);
++ }
++
++ error = devm_request_threaded_irq(&client->dev, client->irq,
++ NULL, st1232_ts_irq_handler,
++ IRQF_ONESHOT,
++ client->name, ts);
++ if (error) {
++ dev_err(&client->dev, "Failed to register interrupt\n");
++ return error;
++ }
++
++ error = input_register_device(ts->input_dev);
++ if (error) {
++ dev_err(&client->dev, "Unable to register %s input device\n",
++ input_dev->name);
++ return error;
++ }
+
+- i2c_set_clientdata(client, ts);
+- device_init_wakeup(&client->dev, 1);
++ i2c_set_clientdata(client, ts);
++ device_init_wakeup(&client->dev, 1);
+
+- return 0;
++ return 0;
+ }
+
+ static int st1232_ts_remove(struct i2c_client *client)
+ {
+- struct st1232_ts_data *ts = i2c_get_clientdata(client);
++ struct st1232_ts_data *ts = i2c_get_clientdata(client);
+
+- device_init_wakeup(&client->dev, 0);
+- st1232_ts_power(ts, false);
++ device_init_wakeup(&client->dev, 0);
++ st1232_ts_power(ts, false);
++ cancel_delayed_work_sync(&ts->work);
+
+- return 0;
++ return 0;
+ }
+
+ #ifdef CONFIG_PM_SLEEP
+ static int st1232_ts_suspend(struct device *dev)
+ {
+- struct i2c_client *client = to_i2c_client(dev);
+- struct st1232_ts_data *ts = i2c_get_clientdata(client);
++ struct i2c_client *client = to_i2c_client(dev);
++ struct st1232_ts_data *ts = i2c_get_clientdata(client);
+
+- if (device_may_wakeup(&client->dev)) {
+- enable_irq_wake(client->irq);
+- } else {
+- disable_irq(client->irq);
+- st1232_ts_power(ts, false);
+- }
++ if (device_may_wakeup(&client->dev)) {
++ enable_irq_wake(client->irq);
++ } else {
++ disable_irq(client->irq);
++ cancel_delayed_work_sync(&ts->work);
++ st1232_ts_power(ts, false);
++ }
+
+- return 0;
++ return 0;
+ }
+
+ static int st1232_ts_resume(struct device *dev)
+ {
+- struct i2c_client *client = to_i2c_client(dev);
+- struct st1232_ts_data *ts = i2c_get_clientdata(client);
++ struct i2c_client *client = to_i2c_client(dev);
++ struct st1232_ts_data *ts = i2c_get_clientdata(client);
+
+- if (device_may_wakeup(&client->dev)) {
+- disable_irq_wake(client->irq);
+- } else {
+- st1232_ts_power(ts, true);
+- enable_irq(client->irq);
+- }
++ if (device_may_wakeup(&client->dev)) {
++ disable_irq_wake(client->irq);
++ } else {
++ st1232_ts_power(ts, true);
++ schedule_delayed_work(&ts->work, HZ / 50);
++ enable_irq(client->irq);
++ }
+
+- return 0;
++ return 0;
+ }
+-
+ #endif
+
+ static SIMPLE_DEV_PM_OPS(st1232_ts_pm_ops,
+- st1232_ts_suspend, st1232_ts_resume);
++ st1232_ts_suspend, st1232_ts_resume);
+
+ static const struct i2c_device_id st1232_ts_id[] = {
+- { ST1232_TS_NAME, 0 },
+- { }
++ { ST1232_TS_NAME, 0 },
++ { }
+ };
+ MODULE_DEVICE_TABLE(i2c, st1232_ts_id);
+
+ #ifdef CONFIG_OF
+ static const struct of_device_id st1232_ts_dt_ids[] = {
+- { .compatible = "sitronix,st1232", },
+- { }
++ { .compatible = "sitronix,st1232", },
++ { }
+ };
+ MODULE_DEVICE_TABLE(of, st1232_ts_dt_ids);
+ #endif
+
+ static struct i2c_driver st1232_ts_driver = {
+- .probe = st1232_ts_probe,
+- .remove = st1232_ts_remove,
+- .id_table = st1232_ts_id,
+- .driver = {
+- .name = ST1232_TS_NAME,
+- .owner = THIS_MODULE,
+- .of_match_table = of_match_ptr(st1232_ts_dt_ids),
+- .pm = &st1232_ts_pm_ops,
+- },
++ .probe = st1232_ts_probe,
++ .remove = st1232_ts_remove,
++ .id_table = st1232_ts_id,
++ .driver = {
++ .name = ST1232_TS_NAME,
++ .owner = THIS_MODULE,
++ .of_match_table = of_match_ptr(st1232_ts_dt_ids),
++ .pm = &st1232_ts_pm_ops,
++ },
+ };
+
+ module_i2c_driver(st1232_ts_driver);
+
+-MODULE_AUTHOR("Tony SIM <chinyeow.sim.xt@renesas.com>");
+-MODULE_DESCRIPTION("SITRONIX ST1232 Touchscreen Controller Driver");
++MODULE_AUTHOR("Tony SIM <chinyeow.sim.xt@renesas.com>, Peter Vicman <peter.vicman@gmail.com>");
++MODULE_DESCRIPTION("SITRONIX ST1232/ST1332 Touchscreen Controller Driver");
+ MODULE_LICENSE("GPL");
+diff -Nur linux-3.14.36/drivers/Kconfig linux-openelec/drivers/Kconfig
+--- linux-3.14.36/drivers/Kconfig 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/Kconfig 2015-05-06 12:05:42.000000000 -0500
+@@ -96,6 +96,8 @@
+
+ source "drivers/memstick/Kconfig"
+
++source "drivers/mxc/Kconfig"
++
+ source "drivers/leds/Kconfig"
+
+ source "drivers/accessibility/Kconfig"
+diff -Nur linux-3.14.36/drivers/leds/leds-gpio.c linux-openelec/drivers/leds/leds-gpio.c
+--- linux-3.14.36/drivers/leds/leds-gpio.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/leds/leds-gpio.c 2015-05-06 12:05:42.000000000 -0500
+@@ -3,7 +3,7 @@
+ *
+ * Copyright (C) 2007 8D Technologies inc.
+ * Raphael Assenat <raph@8d.com>
+- * Copyright (C) 2008 Freescale Semiconductor, Inc.
++ * Copyright (C) 2008, 2014 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+@@ -203,6 +203,8 @@
+ else
+ led.default_state = LEDS_GPIO_DEFSTATE_OFF;
+ }
++ if (of_get_property(child, "retain-state-suspended", NULL))
++ led.retain_state_suspended = 1;
+
+ ret = create_gpio_led(&led, &priv->leds[priv->num_leds++],
+ &pdev->dev, NULL);
+diff -Nur linux-3.14.36/drivers/leds/leds-pwm.c linux-openelec/drivers/leds/leds-pwm.c
+--- linux-3.14.36/drivers/leds/leds-pwm.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/leds/leds-pwm.c 2015-05-06 12:05:42.000000000 -0500
+@@ -70,6 +70,10 @@
+
+ duty *= brightness;
+ do_div(duty, max);
++
++ if (led_dat->active_low)
++ duty = led_dat->period - duty;
++
+ led_dat->duty = duty;
+
+ if (led_dat->can_sleep)
+@@ -93,55 +97,75 @@
+ }
+ }
+
+-static int led_pwm_create_of(struct platform_device *pdev,
+- struct led_pwm_priv *priv)
++static int led_pwm_add(struct device *dev, struct led_pwm_priv *priv,
++ struct led_pwm *led, struct device_node *child)
+ {
+- struct device_node *child;
++ struct led_pwm_data *led_data = &priv->leds[priv->num_leds];
+ int ret;
+
+- for_each_child_of_node(pdev->dev.of_node, child) {
+- struct led_pwm_data *led_dat = &priv->leds[priv->num_leds];
++ led_data->active_low = led->active_low;
++ led_data->period = led->pwm_period_ns;
++ led_data->cdev.name = led->name;
++ led_data->cdev.default_trigger = led->default_trigger;
++ led_data->cdev.brightness_set = led_pwm_set;
++ led_data->cdev.brightness = LED_OFF;
++ led_data->cdev.max_brightness = led->max_brightness;
++ led_data->cdev.flags = LED_CORE_SUSPENDRESUME;
++
++ if (child)
++ led_data->pwm = devm_of_pwm_get(dev, child, NULL);
++ else
++ led_data->pwm = devm_pwm_get(dev, led->name);
++ if (IS_ERR(led_data->pwm)) {
++ ret = PTR_ERR(led_data->pwm);
++ dev_err(dev, "unable to request PWM for %s: %d\n",
++ led->name, ret);
++ return ret;
++ }
+
+- led_dat->cdev.name = of_get_property(child, "label",
+- NULL) ? : child->name;
++ if (child)
++ led_data->period = pwm_get_period(led_data->pwm);
+
+- led_dat->pwm = devm_of_pwm_get(&pdev->dev, child, NULL);
+- if (IS_ERR(led_dat->pwm)) {
+- dev_err(&pdev->dev, "unable to request PWM for %s\n",
+- led_dat->cdev.name);
+- ret = PTR_ERR(led_dat->pwm);
+- goto err;
+- }
+- /* Get the period from PWM core when n*/
+- led_dat->period = pwm_get_period(led_dat->pwm);
++ led_data->can_sleep = pwm_can_sleep(led_data->pwm);
++ if (led_data->can_sleep)
++ INIT_WORK(&led_data->work, led_pwm_work);
+
+- led_dat->cdev.default_trigger = of_get_property(child,
++ ret = led_classdev_register(dev, &led_data->cdev);
++ if (ret == 0) {
++ priv->num_leds++;
++ } else {
++ dev_err(dev, "failed to register PWM led for %s: %d\n",
++ led->name, ret);
++ }
++
++ return ret;
++}
++
++static int led_pwm_create_of(struct device *dev, struct led_pwm_priv *priv)
++{
++ struct device_node *child;
++ struct led_pwm led;
++ int ret = 0;
++
++ memset(&led, 0, sizeof(led));
++
++ for_each_child_of_node(dev->of_node, child) {
++ led.name = of_get_property(child, "label", NULL) ? :
++ child->name;
++
++ led.default_trigger = of_get_property(child,
+ "linux,default-trigger", NULL);
++ led.active_low = of_property_read_bool(child, "active-low");
+ of_property_read_u32(child, "max-brightness",
+- &led_dat->cdev.max_brightness);
++ &led.max_brightness);
+
+- led_dat->cdev.brightness_set = led_pwm_set;
+- led_dat->cdev.brightness = LED_OFF;
+- led_dat->cdev.flags |= LED_CORE_SUSPENDRESUME;
+-
+- led_dat->can_sleep = pwm_can_sleep(led_dat->pwm);
+- if (led_dat->can_sleep)
+- INIT_WORK(&led_dat->work, led_pwm_work);
+-
+- ret = led_classdev_register(&pdev->dev, &led_dat->cdev);
+- if (ret < 0) {
+- dev_err(&pdev->dev, "failed to register for %s\n",
+- led_dat->cdev.name);
++ ret = led_pwm_add(dev, priv, &led, child);
++ if (ret) {
+ of_node_put(child);
+- goto err;
++ break;
+ }
+- priv->num_leds++;
+ }
+
+- return 0;
+-err:
+- led_pwm_cleanup(priv);
+-
+ return ret;
+ }
+
+@@ -167,51 +191,23 @@
+
+ if (pdata) {
+ for (i = 0; i < count; i++) {
+- struct led_pwm *cur_led = &pdata->leds[i];
+- struct led_pwm_data *led_dat = &priv->leds[i];
+-
+- led_dat->pwm = devm_pwm_get(&pdev->dev, cur_led->name);
+- if (IS_ERR(led_dat->pwm)) {
+- ret = PTR_ERR(led_dat->pwm);
+- dev_err(&pdev->dev,
+- "unable to request PWM for %s\n",
+- cur_led->name);
+- goto err;
+- }
+-
+- led_dat->cdev.name = cur_led->name;
+- led_dat->cdev.default_trigger = cur_led->default_trigger;
+- led_dat->active_low = cur_led->active_low;
+- led_dat->period = cur_led->pwm_period_ns;
+- led_dat->cdev.brightness_set = led_pwm_set;
+- led_dat->cdev.brightness = LED_OFF;
+- led_dat->cdev.max_brightness = cur_led->max_brightness;
+- led_dat->cdev.flags |= LED_CORE_SUSPENDRESUME;
+-
+- led_dat->can_sleep = pwm_can_sleep(led_dat->pwm);
+- if (led_dat->can_sleep)
+- INIT_WORK(&led_dat->work, led_pwm_work);
+-
+- ret = led_classdev_register(&pdev->dev, &led_dat->cdev);
+- if (ret < 0)
+- goto err;
++ ret = led_pwm_add(&pdev->dev, priv, &pdata->leds[i],
++ NULL);
++ if (ret)
++ break;
+ }
+- priv->num_leds = count;
+ } else {
+- ret = led_pwm_create_of(pdev, priv);
+- if (ret)
+- return ret;
++ ret = led_pwm_create_of(&pdev->dev, priv);
++ }
++
++ if (ret) {
++ led_pwm_cleanup(priv);
++ return ret;
+ }
+
+ platform_set_drvdata(pdev, priv);
+
+ return 0;
+-
+-err:
+- priv->num_leds = i;
+- led_pwm_cleanup(priv);
+-
+- return ret;
+ }
+
+ static int led_pwm_remove(struct platform_device *pdev)
+diff -Nur linux-3.14.36/drivers/mailbox/mailbox.c linux-openelec/drivers/mailbox/mailbox.c
+--- linux-3.14.36/drivers/mailbox/mailbox.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mailbox/mailbox.c 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,488 @@
++/*
++ * Mailbox: Common code for Mailbox controllers and users
++ *
++ * Copyright (C) 2014 Linaro Ltd.
++ * Author: Jassi Brar <jassisinghbrar@gmail.com>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include <linux/interrupt.h>
++#include <linux/spinlock.h>
++#include <linux/mutex.h>
++#include <linux/delay.h>
++#include <linux/slab.h>
++#include <linux/err.h>
++#include <linux/module.h>
++#include <linux/device.h>
++#include <linux/mailbox_client.h>
++#include <linux/mailbox_controller.h>
++
++#define TXDONE_BY_IRQ (1 << 0) /* controller has remote RTR irq */
++#define TXDONE_BY_POLL (1 << 1) /* controller can read status of last TX */
++#define TXDONE_BY_ACK (1 << 2) /* S/W ACK recevied by Client ticks the TX */
++
++static LIST_HEAD(mbox_cons);
++static DEFINE_MUTEX(con_mutex);
++
++static int _add_to_rbuf(struct mbox_chan *chan, void *mssg)
++{
++ int idx;
++ unsigned long flags;
++
++ spin_lock_irqsave(&chan->lock, flags);
++
++ /* See if there is any space left */
++ if (chan->msg_count == MBOX_TX_QUEUE_LEN) {
++ spin_unlock_irqrestore(&chan->lock, flags);
++ return -ENOMEM;
++ }
++
++ idx = chan->msg_free;
++ chan->msg_data[idx] = mssg;
++ chan->msg_count++;
++
++ if (idx == MBOX_TX_QUEUE_LEN - 1)
++ chan->msg_free = 0;
++ else
++ chan->msg_free++;
++
++ spin_unlock_irqrestore(&chan->lock, flags);
++
++ return idx;
++}
++
++static void _msg_submit(struct mbox_chan *chan)
++{
++ unsigned count, idx;
++ unsigned long flags;
++ void *data;
++ int err;
++
++ spin_lock_irqsave(&chan->lock, flags);
++
++ if (!chan->msg_count || chan->active_req) {
++ spin_unlock_irqrestore(&chan->lock, flags);
++ return;
++ }
++
++ count = chan->msg_count;
++ idx = chan->msg_free;
++ if (idx >= count)
++ idx -= count;
++ else
++ idx += MBOX_TX_QUEUE_LEN - count;
++
++ data = chan->msg_data[idx];
++
++ /* Try to submit a message to the MBOX controller */
++ err = chan->mbox->ops->send_data(chan, data);
++ if (!err) {
++ chan->active_req = data;
++ chan->msg_count--;
++ }
++
++ spin_unlock_irqrestore(&chan->lock, flags);
++}
++
++static void tx_tick(struct mbox_chan *chan, int r)
++{
++ unsigned long flags;
++ void *mssg;
++
++ spin_lock_irqsave(&chan->lock, flags);
++ mssg = chan->active_req;
++ chan->active_req = NULL;
++ spin_unlock_irqrestore(&chan->lock, flags);
++
++ /* Submit next message */
++ _msg_submit(chan);
++
++ /* Notify the client */
++ if (chan->cl->tx_block)
++ complete(&chan->tx_complete);
++ else if (mssg && chan->cl->tx_done)
++ chan->cl->tx_done(chan->cl, mssg, r);
++}
++
++static void poll_txdone(unsigned long data)
++{
++ struct mbox_controller *mbox = (struct mbox_controller *)data;
++ bool txdone, resched = false;
++ int i;
++
++ for (i = 0; i < mbox->num_chans; i++) {
++ struct mbox_chan *chan = &mbox->chans[i];
++
++ if (chan->active_req && chan->cl) {
++ resched = true;
++ txdone = chan->mbox->ops->last_tx_done(chan);
++ if (txdone)
++ tx_tick(chan, 0);
++ }
++ }
++
++ if (resched)
++ mod_timer(&mbox->poll,
++ jiffies + msecs_to_jiffies(mbox->period));
++}
++
++/**
++ * mbox_chan_received_data - A way for controller driver to push data
++ * received from remote to the upper layer.
++ * @chan: Pointer to the mailbox channel on which RX happened.
++ * @data: Client specific message typecasted as void *
++ *
++ * After startup and before shutdown any data received on the chan
++ * is passed on to the API via atomic mbox_chan_received_data().
++ * The controller should ACK the RX only after this call returns.
++ */
++void mbox_chan_received_data(struct mbox_chan *chan, void *mssg)
++{
++ /* No buffering the received data */
++ if (chan->cl->rx_callback)
++ chan->cl->rx_callback(chan->cl, mssg);
++}
++EXPORT_SYMBOL_GPL(mbox_chan_received_data);
++
++/**
++ * mbox_chan_txdone - A way for controller driver to notify the
++ * framework that the last TX has completed.
++ * @chan: Pointer to the mailbox chan on which TX happened.
++ * @r: Status of last TX - OK or ERROR
++ *
++ * The controller that has IRQ for TX ACK calls this atomic API
++ * to tick the TX state machine. It works only if txdone_irq
++ * is set by the controller.
++ */
++void mbox_chan_txdone(struct mbox_chan *chan, int r)
++{
++ if (unlikely(!(chan->txdone_method & TXDONE_BY_IRQ))) {
++ pr_err("Controller can't run the TX ticker\n");
++ return;
++ }
++
++ tx_tick(chan, r);
++}
++EXPORT_SYMBOL_GPL(mbox_chan_txdone);
++
++/**
++ * mbox_client_txdone - The way for a client to run the TX state machine.
++ * @chan: Mailbox channel assigned to this client.
++ * @r: Success status of last transmission.
++ *
++ * The client/protocol had received some 'ACK' packet and it notifies
++ * the API that the last packet was sent successfully. This only works
++ * if the controller can't sense TX-Done.
++ */
++void mbox_client_txdone(struct mbox_chan *chan, int r)
++{
++ if (unlikely(!(chan->txdone_method & TXDONE_BY_ACK))) {
++ pr_err("Client can't run the TX ticker\n");
++ return;
++ }
++
++ tx_tick(chan, r);
++}
++EXPORT_SYMBOL_GPL(mbox_client_txdone);
++
++/**
++ * mbox_client_peek_data - A way for client driver to pull data
++ * received from remote by the controller.
++ * @chan: Mailbox channel assigned to this client.
++ *
++ * A poke to controller driver for any received data.
++ * The data is actually passed onto client via the
++ * mbox_chan_received_data()
++ * The call can be made from atomic context, so the controller's
++ * implementation of peek_data() must not sleep.
++ *
++ * Return: True, if controller has, and is going to push after this,
++ * some data.
++ * False, if controller doesn't have any data to be read.
++ */
++bool mbox_client_peek_data(struct mbox_chan *chan)
++{
++ if (chan->mbox->ops->peek_data)
++ return chan->mbox->ops->peek_data(chan);
++
++ return false;
++}
++EXPORT_SYMBOL_GPL(mbox_client_peek_data);
++
++/**
++ * mbox_send_message - For client to submit a message to be
++ * sent to the remote.
++ * @chan: Mailbox channel assigned to this client.
++ * @mssg: Client specific message typecasted.
++ *
++ * For client to submit data to the controller destined for a remote
++ * processor. If the client had set 'tx_block', the call will return
++ * either when the remote receives the data or when 'tx_tout' millisecs
++ * run out.
++ * In non-blocking mode, the requests are buffered by the API and a
++ * non-negative token is returned for each queued request. If the request
++ * is not queued, a negative token is returned. Upon failure or successful
++ * TX, the API calls 'tx_done' from atomic context, from which the client
++ * could submit yet another request.
++ * In blocking mode, 'tx_done' is not called, effectively making the
++ * queue length 1.
++ * The pointer to message should be preserved until it is sent
++ * over the chan, i.e, tx_done() is made.
++ * This function could be called from atomic context as it simply
++ * queues the data and returns a token against the request.
++ *
++ * Return: Non-negative integer for successful submission (non-blocking mode)
++ * or transmission over chan (blocking mode).
++ * Negative value denotes failure.
++ */
++int mbox_send_message(struct mbox_chan *chan, void *mssg)
++{
++ int t;
++
++ if (!chan || !chan->cl)
++ return -EINVAL;
++
++ t = _add_to_rbuf(chan, mssg);
++ if (t < 0) {
++ pr_err("Try increasing MBOX_TX_QUEUE_LEN\n");
++ return t;
++ }
++
++ _msg_submit(chan);
++
++ reinit_completion(&chan->tx_complete);
++
++ if (chan->txdone_method == TXDONE_BY_POLL)
++ poll_txdone((unsigned long)chan->mbox);
++
++ if (chan->cl->tx_block && chan->active_req) {
++ unsigned long wait;
++ int ret;
++
++ if (!chan->cl->tx_tout) /* wait for ever */
++ wait = msecs_to_jiffies(3600000);
++ else
++ wait = msecs_to_jiffies(chan->cl->tx_tout);
++
++ ret = wait_for_completion_timeout(&chan->tx_complete, wait);
++ if (ret == 0) {
++ t = -EIO;
++ tx_tick(chan, -EIO);
++ }
++ }
++
++ return t;
++}
++EXPORT_SYMBOL_GPL(mbox_send_message);
++
++/**
++ * mbox_request_channel - Request a mailbox channel.
++ * @cl: Identity of the client requesting the channel.
++ *
++ * The Client specifies its requirements and capabilities while asking for
++ * a mailbox channel. It can't be called from atomic context.
++ * The channel is exclusively allocated and can't be used by another
++ * client before the owner calls mbox_free_channel.
++ * After assignment, any packet received on this channel will be
++ * handed over to the client via the 'rx_callback'.
++ * The framework holds reference to the client, so the mbox_client
++ * structure shouldn't be modified until the mbox_free_channel returns.
++ *
++ * Return: Pointer to the channel assigned to the client if successful.
++ * ERR_PTR for request failure.
++ */
++struct mbox_chan *mbox_request_channel(struct mbox_client *cl)
++{
++ struct device *dev = cl->dev;
++ struct mbox_controller *mbox;
++ struct of_phandle_args spec;
++ struct mbox_chan *chan;
++ unsigned long flags;
++ int count, i, ret;
++
++ if (!dev || !dev->of_node) {
++ pr_err("%s: No owner device node\n", __func__);
++ return ERR_PTR(-ENODEV);
++ }
++
++ count = of_property_count_strings(dev->of_node, "mbox-names");
++ if (count < 0) {
++ pr_err("%s: mbox-names property of node '%s' missing\n",
++ __func__, dev->of_node->full_name);
++ return ERR_PTR(-ENODEV);
++ }
++
++ mutex_lock(&con_mutex);
++
++ ret = -ENODEV;
++ for (i = 0; i < count; i++) {
++ const char *s;
++
++ if (of_property_read_string_index(dev->of_node,
++ "mbox-names", i, &s))
++ continue;
++
++ if (strcmp(cl->chan_name, s))
++ continue;
++
++ if (of_parse_phandle_with_args(dev->of_node,
++ "mbox", "#mbox-cells", i, &spec))
++ continue;
++
++ chan = NULL;
++ list_for_each_entry(mbox, &mbox_cons, node)
++ if (mbox->dev->of_node == spec.np) {
++ chan = mbox->of_xlate(mbox, &spec);
++ break;
++ }
++
++ of_node_put(spec.np);
++
++ if (!chan)
++ continue;
++
++ ret = -EBUSY;
++ if (!chan->cl && try_module_get(mbox->dev->driver->owner))
++ break;
++ }
++
++ if (i == count) {
++ mutex_unlock(&con_mutex);
++ return ERR_PTR(ret);
++ }
++
++ spin_lock_irqsave(&chan->lock, flags);
++ chan->msg_free = 0;
++ chan->msg_count = 0;
++ chan->active_req = NULL;
++ chan->cl = cl;
++ init_completion(&chan->tx_complete);
++
++ if (chan->txdone_method == TXDONE_BY_POLL
++ && cl->knows_txdone)
++ chan->txdone_method |= TXDONE_BY_ACK;
++ spin_unlock_irqrestore(&chan->lock, flags);
++
++ ret = chan->mbox->ops->startup(chan);
++ if (ret) {
++ pr_err("Unable to startup the chan (%d)\n", ret);
++ mbox_free_channel(chan);
++ chan = ERR_PTR(ret);
++ }
++
++ mutex_unlock(&con_mutex);
++ return chan;
++}
++EXPORT_SYMBOL_GPL(mbox_request_channel);
++
++/**
++ * mbox_free_channel - The client relinquishes control of a mailbox
++ * channel by this call.
++ * @chan: The mailbox channel to be freed.
++ */
++void mbox_free_channel(struct mbox_chan *chan)
++{
++ unsigned long flags;
++
++ if (!chan || !chan->cl)
++ return;
++
++ chan->mbox->ops->shutdown(chan);
++
++ /* The queued TX requests are simply aborted, no callbacks are made */
++ spin_lock_irqsave(&chan->lock, flags);
++ chan->cl = NULL;
++ chan->active_req = NULL;
++ if (chan->txdone_method == (TXDONE_BY_POLL | TXDONE_BY_ACK))
++ chan->txdone_method = TXDONE_BY_POLL;
++
++ module_put(chan->mbox->dev->driver->owner);
++ spin_unlock_irqrestore(&chan->lock, flags);
++}
++EXPORT_SYMBOL_GPL(mbox_free_channel);
++
++static struct mbox_chan *
++of_mbox_index_xlate(struct mbox_controller *mbox,
++ const struct of_phandle_args *sp)
++{
++ int ind = sp->args[0];
++
++ if (ind >= mbox->num_chans)
++ return NULL;
++
++ return &mbox->chans[ind];
++}
++
++/**
++ * mbox_controller_register - Register the mailbox controller
++ * @mbox: Pointer to the mailbox controller.
++ *
++ * The controller driver registers its communication chans
++ */
++int mbox_controller_register(struct mbox_controller *mbox)
++{
++ int i, txdone;
++
++ /* Sanity check */
++ if (!mbox || !mbox->dev || !mbox->ops || !mbox->num_chans)
++ return -EINVAL;
++
++ if (mbox->txdone_irq)
++ txdone = TXDONE_BY_IRQ;
++ else if (mbox->txdone_poll)
++ txdone = TXDONE_BY_POLL;
++ else /* It has to be ACK then */
++ txdone = TXDONE_BY_ACK;
++
++ if (txdone == TXDONE_BY_POLL) {
++ mbox->poll.function = &poll_txdone;
++ mbox->poll.data = (unsigned long)mbox;
++ init_timer(&mbox->poll);
++ }
++
++ for (i = 0; i < mbox->num_chans; i++) {
++ struct mbox_chan *chan = &mbox->chans[i];
++ chan->cl = NULL;
++ chan->mbox = mbox;
++ chan->txdone_method = txdone;
++ spin_lock_init(&chan->lock);
++ }
++
++ if (!mbox->of_xlate)
++ mbox->of_xlate = of_mbox_index_xlate;
++
++ mutex_lock(&con_mutex);
++ list_add_tail(&mbox->node, &mbox_cons);
++ mutex_unlock(&con_mutex);
++
++ return 0;
++}
++EXPORT_SYMBOL_GPL(mbox_controller_register);
++
++/**
++ * mbox_controller_unregister - UnRegister the mailbox controller
++ * @mbox: Pointer to the mailbox controller.
++ */
++void mbox_controller_unregister(struct mbox_controller *mbox)
++{
++ int i;
++
++ if (!mbox)
++ return;
++
++ mutex_lock(&con_mutex);
++
++ list_del(&mbox->node);
++
++ for (i = 0; i < mbox->num_chans; i++)
++ mbox_free_channel(&mbox->chans[i]);
++
++ if (mbox->txdone_poll)
++ del_timer_sync(&mbox->poll);
++
++ mutex_unlock(&con_mutex);
++}
++EXPORT_SYMBOL_GPL(mbox_controller_unregister);
+diff -Nur linux-3.14.36/drivers/mailbox/Makefile linux-openelec/drivers/mailbox/Makefile
+--- linux-3.14.36/drivers/mailbox/Makefile 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/mailbox/Makefile 2015-05-06 12:05:42.000000000 -0500
+@@ -1,3 +1,7 @@
++# Generic MAILBOX API
++
++obj-$(CONFIG_MAILBOX) += mailbox.o
++
+ obj-$(CONFIG_PL320_MBOX) += pl320-ipc.o
+
+ obj-$(CONFIG_OMAP_MBOX) += omap-mailbox.o
+diff -Nur linux-3.14.36/drivers/mailbox/pl320-ipc.c linux-openelec/drivers/mailbox/pl320-ipc.c
+--- linux-3.14.36/drivers/mailbox/pl320-ipc.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/mailbox/pl320-ipc.c 2015-05-06 12:05:42.000000000 -0500
+@@ -26,7 +26,7 @@
+ #include <linux/device.h>
+ #include <linux/amba/bus.h>
+
+-#include <linux/mailbox.h>
++#include <linux/pl320-ipc.h>
+
+ #define IPCMxSOURCE(m) ((m) * 0x40)
+ #define IPCMxDSET(m) (((m) * 0x40) + 0x004)
+diff -Nur linux-3.14.36/drivers/Makefile linux-openelec/drivers/Makefile
+--- linux-3.14.36/drivers/Makefile 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/Makefile 2015-05-06 12:05:42.000000000 -0500
+@@ -111,6 +111,7 @@
+ obj-$(CONFIG_CPU_FREQ) += cpufreq/
+ obj-$(CONFIG_CPU_IDLE) += cpuidle/
+ obj-y += mmc/
++obj-$(CONFIG_ARCH_MXC) += mxc/
+ obj-$(CONFIG_MEMSTICK) += memstick/
+ obj-y += leds/
+ obj-$(CONFIG_INFINIBAND) += infiniband/
+diff -Nur linux-3.14.36/drivers/media/common/b2c2/flexcop-common.h linux-openelec/drivers/media/common/b2c2/flexcop-common.h
+--- linux-3.14.36/drivers/media/common/b2c2/flexcop-common.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/media/common/b2c2/flexcop-common.h 2015-07-24 18:03:30.356842002 -0500
+@@ -91,6 +91,8 @@
+ int feedcount;
+ int pid_filtering;
+ int fullts_streaming_state;
++ /* the stream will be activated by an externally (by the fe for example) */
++ int need_external_stream_control;
+
+ /* bus specific callbacks */
+ flexcop_ibi_value(*read_ibi_reg) (struct flexcop_device *,
+@@ -177,6 +179,8 @@
+ struct dvb_demux_feed *dvbdmxfeed, int onoff);
+ void flexcop_hw_filter_init(struct flexcop_device *fc);
+
++extern void flexcop_external_stream_control(struct dvb_frontend *fe, u8 onoff);
++
+ void flexcop_smc_ctrl(struct flexcop_device *fc, int onoff);
+
+ void flexcop_set_mac_filter(struct flexcop_device *fc, u8 mac[6]);
+diff -Nur linux-3.14.36/drivers/media/common/b2c2/flexcop-fe-tuner.c linux-openelec/drivers/media/common/b2c2/flexcop-fe-tuner.c
+--- linux-3.14.36/drivers/media/common/b2c2/flexcop-fe-tuner.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/media/common/b2c2/flexcop-fe-tuner.c 2015-07-24 18:03:30.356842002 -0500
+@@ -12,6 +12,7 @@
+ #include "cx24113.h"
+ #include "cx24123.h"
+ #include "isl6421.h"
++#include "cx24120.h"
+ #include "mt352.h"
+ #include "bcm3510.h"
+ #include "nxt200x.h"
+@@ -26,6 +27,15 @@
+ #define FE_SUPPORTED(fe) (defined(CONFIG_DVB_##fe) || \
+ (defined(CONFIG_DVB_##fe##_MODULE) && defined(MODULE)))
+
++#if FE_SUPPORTED(BCM3510) || FE_SUPPORTED(CX24120)
++static int flexcop_fe_request_firmware(struct dvb_frontend *fe,
++ const struct firmware **fw, char* name)
++{
++ struct flexcop_device *fc = fe->dvb->priv;
++ return request_firmware(fw, name, fc->dev);
++}
++#endif
++
+ /* lnb control */
+ #if FE_SUPPORTED(MT312) || FE_SUPPORTED(STV0299)
+ static int flexcop_set_voltage(struct dvb_frontend *fe, fe_sec_voltage_t voltage)
+@@ -445,13 +455,6 @@
+
+ /* AirStar ATSC 1st generation */
+ #if FE_SUPPORTED(BCM3510)
+-static int flexcop_fe_request_firmware(struct dvb_frontend *fe,
+- const struct firmware **fw, char* name)
+-{
+- struct flexcop_device *fc = fe->dvb->priv;
+- return request_firmware(fw, name, fc->dev);
+-}
+-
+ static struct bcm3510_config air2pc_atsc_first_gen_config = {
+ .demod_address = 0x0f,
+ .request_firmware = flexcop_fe_request_firmware,
+@@ -619,10 +622,40 @@
+ #define cablestar2_attach NULL
+ #endif
+
++/* SkyStar S2 PCI DVB-S/S2 card based on Conexant cx24120/cx24118 */
++#if FE_SUPPORTED(CX24120) && FE_SUPPORTED(ISL6421)
++static const struct cx24120_config skystar2_rev3_3_cx24120_config = {
++ .i2c_addr = 0x55,
++ .request_firmware = flexcop_fe_request_firmware,
++};
++
++static int skystarS2_rev33_attach(struct flexcop_device *fc, struct i2c_adapter *i2c)
++{
++// struct dvb_frontend_ops *ops;
++
++ fc->fe = dvb_attach(cx24120_attach,
++ &skystar2_rev3_3_cx24120_config, i2c);
++ if (fc->fe == NULL) return 0;
++ fc->dev_type = FC_SKYS2_REV33;
++ fc->fc_i2c_adap[2].no_base_addr = 1;
++ if ( (dvb_attach(isl6421_attach, fc->fe,
++ &fc->fc_i2c_adap[2].i2c_adap, 0x08, 0, 0, false) == NULL) ) {
++ err("ISL6421 could NOT be attached!");
++ return 0;
++ }
++ info("ISL6421 successfully attached.");
++// ops = &fc->fe->ops;
++ return 1;
++}
++#else
++#define skystarS2_rev33_attach NULL
++#endif
++
+ static struct {
+ flexcop_device_type_t type;
+ int (*attach)(struct flexcop_device *, struct i2c_adapter *);
+ } flexcop_frontends[] = {
++ { FC_SKYS2_REV33, skystarS2_rev33_attach },
+ { FC_SKY_REV27, skystar2_rev27_attach },
+ { FC_SKY_REV28, skystar2_rev28_attach },
+ { FC_SKY_REV26, skystar2_rev26_attach },
+diff -Nur linux-3.14.36/drivers/media/common/b2c2/flexcop-hw-filter.c linux-openelec/drivers/media/common/b2c2/flexcop-hw-filter.c
+--- linux-3.14.36/drivers/media/common/b2c2/flexcop-hw-filter.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/media/common/b2c2/flexcop-hw-filter.c 2015-07-24 18:03:30.356842002 -0500
+@@ -11,6 +11,12 @@
+ deb_ts("rcv_data is now: '%s'\n", onoff ? "on" : "off");
+ }
+
++void flexcop_external_stream_control(struct dvb_frontend *fe, u8 onoff)
++{
++ struct flexcop_device *fc = fe->dvb->priv;
++ flexcop_rcv_data_ctrl(fc, onoff);
++}
++
+ void flexcop_smc_ctrl(struct flexcop_device *fc, int onoff)
+ {
+ flexcop_set_ibi_value(ctrl_208, SMC_Enable_sig, onoff);
+@@ -199,6 +205,7 @@
+
+ /* if it was the first or last feed request change the stream-status */
+ if (fc->feedcount == onoff) {
++ if (!fc->need_external_stream_control)
+ flexcop_rcv_data_ctrl(fc, onoff);
+ if (fc->stream_control) /* device specific stream control */
+ fc->stream_control(fc, onoff);
+diff -Nur linux-3.14.36/drivers/media/common/b2c2/flexcop-misc.c linux-openelec/drivers/media/common/b2c2/flexcop-misc.c
+--- linux-3.14.36/drivers/media/common/b2c2/flexcop-misc.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/media/common/b2c2/flexcop-misc.c 2015-07-24 18:03:30.356842002 -0500
+@@ -56,6 +56,7 @@
+ [FC_SKY_REV26] = "Sky2PC/SkyStar 2 DVB-S rev 2.6",
+ [FC_SKY_REV27] = "Sky2PC/SkyStar 2 DVB-S rev 2.7a/u",
+ [FC_SKY_REV28] = "Sky2PC/SkyStar 2 DVB-S rev 2.8",
++ [FC_SKYS2_REV33]= "Sky2PC/SkyStar S2 DVB-S/S2 rev 3.3",
+ };
+
+ static const char *flexcop_bus_names[] = {
+diff -Nur linux-3.14.36/drivers/media/common/b2c2/flexcop-reg.h linux-openelec/drivers/media/common/b2c2/flexcop-reg.h
+--- linux-3.14.36/drivers/media/common/b2c2/flexcop-reg.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/media/common/b2c2/flexcop-reg.h 2015-07-24 18:03:30.356842002 -0500
+@@ -24,6 +24,7 @@
+ FC_SKY_REV26,
+ FC_SKY_REV27,
+ FC_SKY_REV28,
++ FC_SKYS2_REV33,
+ } flexcop_device_type_t;
+
+ typedef enum {
+diff -Nur linux-3.14.36/drivers/media/common/b2c2/Kconfig linux-openelec/drivers/media/common/b2c2/Kconfig
+--- linux-3.14.36/drivers/media/common/b2c2/Kconfig 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/media/common/b2c2/Kconfig 2015-07-24 18:03:30.356842002 -0500
+@@ -3,6 +3,7 @@
+ depends on DVB_CORE && I2C
+ depends on DVB_B2C2_FLEXCOP_PCI || DVB_B2C2_FLEXCOP_USB
+ default y
++ select DVB_CX24120 if !DVB_FE_CUSTOMISE
+ select DVB_PLL if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_STV0299 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_MT352 if MEDIA_SUBDRV_AUTOSELECT
+diff -Nur linux-3.14.36/drivers/media/dvb-core/dvb-usb-ids.h linux-openelec/drivers/media/dvb-core/dvb-usb-ids.h
+--- linux-3.14.36/drivers/media/dvb-core/dvb-usb-ids.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/media/dvb-core/dvb-usb-ids.h 2015-07-24 18:03:30.144842002 -0500
+@@ -285,6 +285,8 @@
+ #define USB_PID_REALTEK_RTL2832U 0x2832
+ #define USB_PID_TECHNOTREND_CONNECT_S2_3600 0x3007
+ #define USB_PID_TECHNOTREND_CONNECT_S2_3650_CI 0x300a
++#define USB_PID_TECHNOTREND_CONNECT_CT2_4650_CI 0x3012
++#define USB_PID_TECHNOTREND_TVSTICK_CT2_4400 0x3014
+ #define USB_PID_NEBULA_DIGITV 0x0201
+ #define USB_PID_DVICO_BLUEBIRD_LGDT 0xd820
+ #define USB_PID_DVICO_BLUEBIRD_LG064F_COLD 0xd500
+diff -Nur linux-3.14.36/drivers/media/dvb-frontends/cx24120.c linux-openelec/drivers/media/dvb-frontends/cx24120.c
+--- linux-3.14.36/drivers/media/dvb-frontends/cx24120.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/media/dvb-frontends/cx24120.c 2015-07-24 18:03:30.360842002 -0500
+@@ -0,0 +1,1053 @@
++/*
++ Conexant cx24120/cx24118 - DVBS/S2 Satellite demod/tuner driver
++ Version 0.0.4a 03.04.2012
++
++ Copyright (C) 2009 Sergey Tyurin <forum.free-x.de>
++ Updated 2012 by Jannis Achstetter <jannis_achstetter@web.de>
++
++ This program is free software; you can redistribute it and/or modify
++ it under the terms of the GNU General Public License as published by
++ the Free Software Foundation; either version 2 of the License, or
++ (at your option) any later version.
++
++ This program is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ GNU General Public License for more details.
++
++ You should have received a copy of the GNU General Public License
++ along with this program; if not, write to the Free Software
++ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*/
++
++#include <linux/slab.h>
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/moduleparam.h>
++#include <linux/init.h>
++#include <linux/firmware.h>
++#include "dvb_frontend.h"
++#include "cx24120.h"
++#include "cx24120_const.h"
++
++//==========================
++#define dbginfo(args...) do { if(cx24120_debug) { printk(KERN_DEBUG "CX24120: %s: >>> ", __func__); \
++ printk(args); } } while (0)
++#define info(args...) do { printk(KERN_INFO "CX24120: %s: -> ", __func__); \
++ printk(args); } while (0)
++#define err(args...) do { printk(KERN_ERR "CX24120: %s: ### ERROR: ", __func__); \
++ printk(args); } while (0)
++//==========================
++
++static int cx24120_debug=0;
++static int reg_debug=0;
++MODULE_DESCRIPTION("DVB Frontend module for Conexant CX24120/CX24118 hardware");
++module_param(cx24120_debug, int, 0644);
++MODULE_PARM_DESC(cx24120_debug, "Activates frontend debugging (default:0)");
++
++// ##############################
++struct cx24120_state {
++ struct i2c_adapter *i2c;
++ const struct cx24120_config *config;
++ struct dvb_frontend frontend;
++ u8 need_set_mpeg_out;
++ u8 attached;
++ u8 dvb_s2_mode;
++ u8 cold_init;
++};
++// #####################################
++// #### Command message to firmware ####
++struct cx24120_cmd { // total size = 36
++ u8 id; // [00] - message id
++ u8 arg[30]; // [04] - message first byte
++ u8 len; // [34] - message lengh or first registers to read
++ u8 reg; // [35] - number of registers to read
++};
++
++//===================================================================
++static int cx24120_readreg(struct cx24120_state *state, u8 reg)
++{
++ int ret;
++ u8 buf = 0;
++ struct i2c_msg msg[] = {
++ { .addr = state->config->i2c_addr,
++ .flags = 0,
++ .len = 1,
++ .buf = &reg },
++
++ { .addr = state->config->i2c_addr,
++ .flags = I2C_M_RD,
++ .len = 1,
++ .buf = &buf }
++ };
++ ret = i2c_transfer(state->i2c, msg, 2);
++ if (ret != 2) {
++ err("Read error: reg=0x%02x, ret=0x%02x)\n", reg, ret);
++ return ret;
++ }
++ if (reg_debug) dbginfo("reg=0x%02x; data=0x%02x\n", reg, buf);
++ return buf;
++} // end cx24120_readreg
++//===================================================================
++static int cx24120_writereg(struct cx24120_state *state, u8 reg, u8 data)
++{
++ u8 buf[] = { reg, data };
++ struct i2c_msg msg = {
++ .addr = state->config->i2c_addr,
++ .flags = 0,
++ .buf = buf,
++ .len = 2 };
++ int ret;
++ ret = i2c_transfer(state->i2c, &msg, 1);
++ if (ret != 1) {
++ err("Write error: i2c_write error(err == %i, 0x%02x: 0x%02x)\n", ret, reg, data);
++ return ret;
++ }
++ if (reg_debug) dbginfo("reg=0x%02x; data=0x%02x\n", reg, data);
++ return 0;
++} // end cx24120_writereg
++//===================================================================
++static int cx24120_writeregN(struct cx24120_state *state, u8 reg, const u8 *values, u16 len, u8 incr)
++{
++ u8 buf[5]; /* maximum 4 data bytes at once - flexcop limitation (very limited i2c-interface this one) */
++ struct i2c_msg msg = {
++ .addr = state->config->i2c_addr,
++ .flags = 0,
++ .buf = buf,
++ .len = len };
++ int ret;
++
++ do {
++ buf[0] = reg;
++ msg.len = len > 4 ? 4 : len;
++ memcpy(&buf[1], values, msg.len);
++ len -= msg.len; // data length revers counter
++ values += msg.len; // incr data pointer
++ if (incr) reg += msg.len;
++ msg.len++; /* don't forget the addr byte */
++ ret = i2c_transfer(state->i2c, &msg, 1);
++ if (ret != 1) {
++ err("i2c_write error(err == %i, 0x%02x)\n", ret, reg);
++ return ret;
++ }
++ if (reg_debug) {
++ if( !(reg == 0xFA) && !(reg == 0x20) && !(reg == 0x21)) { // Exclude firmware upload & diseqc messages
++ dbginfo("reg=0x%02x; data=0x%02x,0x%02x,0x%02x,0x%02x\n", // from debug
++ reg, buf[1], buf[2], buf[3], buf[4]);
++ }
++ }
++ } while (len);
++ return 0;
++} // end cx24120_writeregN
++//===================================================================
++static struct dvb_frontend_ops cx24120_ops;
++//===================================================================
++struct dvb_frontend *cx24120_attach(const struct cx24120_config *config, struct i2c_adapter *i2c)
++{
++ struct cx24120_state *state = NULL;
++ int demod_rev;
++
++ info("Conexant cx24120/cx24118 - DVBS/S2 Satellite demod/tuner\n");
++ info("Driver version: 'SVT - 0.0.4a 03.04.2012'\n");
++ state = kzalloc(sizeof(struct cx24120_state),
++ GFP_KERNEL);
++ if (state == NULL) {
++ err("### Unable to allocate memory for cx24120_state structure. :(\n");
++ goto error;
++ }
++ /* setup the state */
++ state->config = config;
++ state->i2c = i2c;
++ /* check if the demod is present and has proper type */
++ demod_rev = cx24120_readreg(state, CX24120_REG_REVISION);
++ switch (demod_rev) {
++ case 0x07:
++ info("Demod CX24120 rev. 0x07 detected.\n");
++ break;
++ case 0x05:
++ info("Demod CX24120 rev. 0x05 detected.\n");
++ break;
++ default:
++ err("### Unsupported demod revision: 0x%x detected. Exit.\n", demod_rev);
++ goto error;
++ }
++ /* create dvb_frontend */
++ state->attached = 0x10; // set attached flag
++ state->cold_init=0;
++ memcpy(&state->frontend.ops, &cx24120_ops, sizeof(struct dvb_frontend_ops));
++ state->frontend.demodulator_priv = state;
++ info("Conexant cx24120/cx24118 - DVBS/S2 Satellite demod/tuner ATTACHED.\n");
++ return &state->frontend;
++
++error:
++ kfree(state);
++ return NULL;
++}
++EXPORT_SYMBOL(cx24120_attach); // end cx24120_attach
++//===================================================================
++static int cx24120_test_rom(struct cx24120_state *state)
++{
++ int err, ret;
++ err = cx24120_readreg(state, 0xFD);
++ if (err & 4 )
++ {
++ ret = cx24120_readreg(state, 0xDF) & 0xFE;
++ err = cx24120_writereg(state, 0xDF, ret);
++ }
++ return err;
++} // end cx24120_test_rom
++//===================================================================
++static int cx24120_read_snr(struct dvb_frontend *fe, u16 *snr)
++{
++ struct cx24120_state *state = fe->demodulator_priv;
++
++ *snr = (cx24120_readreg(state, CX24120_REG_QUALITY_H)<<8) |
++ (cx24120_readreg(state, CX24120_REG_QUALITY_L));
++ dbginfo("read SNR index = %d\n", *snr);
++
++ return 0;
++}
++EXPORT_SYMBOL(cx24120_read_snr); // end cx24120_read_snr
++//===================================================================
++static int cx24120_read_ber(struct dvb_frontend *fe, u32 *ber)
++{
++ struct cx24120_state *state = fe->demodulator_priv;
++
++ *ber = (cx24120_readreg(state, CX24120_REG_BER_HH) << 24) | // BER high byte of high word
++ (cx24120_readreg(state, CX24120_REG_BER_HL) << 16) | // BER low byte of high word
++ (cx24120_readreg(state, CX24120_REG_BER_LH) << 8) | // BER high byte of low word
++ cx24120_readreg(state, CX24120_REG_BER_LL); // BER low byte of low word
++ dbginfo("read BER index = %d\n", *ber);
++
++ return 0;
++}
++EXPORT_SYMBOL(cx24120_read_ber); // end cx24120_read_ber
++//===================================================================
++static int cx24120_message_send(struct cx24120_state *state, struct cx24120_cmd *cmd);
++//===================================================================
++static int cx24120_msg_mpeg_output_global_config(struct cx24120_state *state, u8 flag)
++{
++ u8 tristate;
++ struct cx24120_cmd cmd;
++
++ memset(&cmd, 0, sizeof(struct cx24120_cmd));
++
++ cmd.id = 0x13; // (19) message Enable/Disable mpeg output ???
++ cmd.arg[0] = 1;
++ cmd.arg[1] = 0;
++ tristate = flag ? 0 : (u8)(-1);
++ cmd.arg[2] = tristate;
++ cmd.arg[3] = 1;
++ cmd.len = 4;
++
++ if(flag) dbginfo("MPEG output DISABLED\n");
++ else dbginfo("MPEG output ENABLED\n");
++
++ return cx24120_message_send(state, &cmd);
++} // end cx24120_msg_mpeg_output_global_config
++//===================================================================
++static int cx24120_message_send(struct cx24120_state *state, struct cx24120_cmd *cmd)
++{
++ u8 xxzz;
++ u32 msg_cmd_mask;
++ int ret, ficus;
++
++ if(state->dvb_s2_mode & 0x02) { // is MPEG enabled?
++ // if yes:
++ xxzz = cmd->id - 0x11; // look for specific message id
++ if ( xxzz <= 0x13 ) {
++ msg_cmd_mask = 1 << xxzz;
++ //0x0F8021 // if cmd_id 17 or 22 or 33-36, 42, 47, 57-61 etc. disable mpeg output
++ if ( msg_cmd_mask & 0x0F8021 ) { // 000011111000000000100001b
++ cx24120_msg_mpeg_output_global_config(state, 0);
++ msleep(100);
++ state->dvb_s2_mode &= 0xFD; // reset mpeg out enable flag
++ }
++ }
++ }
++ ret = cx24120_writereg(state, 0x00 /* reg id*/, cmd->id /* value */); // message start & target
++ ret = cx24120_writeregN(state, 0x01 /* reg msg*/, &cmd->arg[0], cmd->len /* len*/, 1 /* incr */); // message data
++ ret = cx24120_writereg(state, 0x1F /* reg msg_end */, 0x01 /* value */); // message end
++
++ ficus = 1000;
++ while ( cx24120_readreg(state, 0x1F)) { // is command done???
++ msleep(1);
++ if( !(--ficus)) {
++ err("Too long waiting 'done' state from reg(0x1F). :(\n");
++ return -EREMOTEIO;
++ }
++ }
++ dbginfo("Successfully send message 0x%02x\n", cmd->id);
++
++ if ( cmd->reg > 30 ) {
++ err("Too much registers to read. cmd->reg = %d", cmd->reg);
++ return -EREMOTEIO;
++ }
++ ficus = 0;
++ if ( cmd->reg ) { // cmd->reg - qty consecutive regs to read
++ while ( ficus < cmd->reg ){ // starts from reg No cmd->len
++ // number of registers to read is cmd->reg
++ // and write results starts from cmd->arg[0].
++ cmd->arg[ficus] = cx24120_readreg(state, (cmd->len+ficus+1));
++ ++ficus;
++ }
++ }
++ return 0;
++} // end cx24120_message_send
++//===================================================================
++static int cx24120_set_frontend(struct dvb_frontend *fe)
++{
++ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
++ struct cx24120_state *state = fe->demodulator_priv;
++ struct cx24120_cmd cmd;
++ u32 srate, freq;
++ fe_code_rate_t fec;
++ fe_spectral_inversion_t inversion;
++ u8 smbr1, smbr2;
++ int ret;
++
++ memset(&cmd, 0, sizeof(struct cx24120_cmd));
++
++ cmd.id = CMD_TUNEREQUEST; // 0x11 set tuner parametrs
++ cmd.len = 15;
++
++ freq = p->frequency;
++ srate = p->symbol_rate;
++ fec = p->fec_inner;
++ inversion = p->inversion;
++
++ // check symbol rate
++ if ( srate > 31000000 ) { // if symbol rate > 31 000
++ smbr1 = (-(srate < 31000001) & 3) + 2; // ebp
++ smbr2 = (-(srate < 31000001) & 6) + 4; // edi
++ } else {
++ smbr1 = 3;
++ smbr2 = 6;
++ }
++
++ ret = cx24120_writereg(state, 0xE6, smbr1);
++ ret = cx24120_readreg(state, 0xF0);
++ ret &= 0xFFFFFFF0;
++ ret |= smbr2;
++ ret = cx24120_writereg(state, 0xF0, ret);
++
++ cmd.arg[0] = 0; // CMD_TUNER_REQUEST
++
++ // Frequency
++ cmd.arg[1] = (freq & 0xFF0000) >> 16; /* intermediate frequency in kHz */
++ cmd.arg[2] = (freq & 0x00FF00) >> 8;
++ cmd.arg[3] = (freq & 0x0000FF);
++
++ // Symbol Rate
++ cmd.arg[4] = ((srate/1000) & 0xFF00) >> 8;
++ cmd.arg[5] = ((srate/1000) & 0x00FF);
++
++ // Inversion
++ if ( inversion ) {
++ if ( inversion == 1 ) cmd.arg[6] = 4;
++ else cmd.arg[6] = 0x0C;
++ } else {
++ cmd.arg[6] = 0;
++ }
++
++ // FEC
++ switch ( fec ) // fec = p->u.qpsk.fec_inner
++ {
++ case 1: // FEC_1_2
++ cmd.arg[7] = 0x2E; break; // [11] = 0 by memset
++ case 2: // FEC_2_3
++ cmd.arg[7] = 0x2F; break;
++ case 3: // FEC_3_4
++ cmd.arg[7] = 0x30; break;
++ case 5: // FEC_5_6
++ cmd.arg[7] = 0x31; break;
++ case 7: // FEC_7_8
++ cmd.arg[7] = 0x32; break;
++ default: // FEC_NONE, FEC_4_5, FEC_6_7, FEC_8_9,
++ // FEC_AUTO, FEC_3_5, FEC_9_10
++ if ( state->dvb_s2_mode & 1 ) { // if DVB-S2 mode
++ cmd.arg[7] = 0;
++ cmd.arg[11] = 0;
++ } else {
++ cmd.arg[7] = 0x2E;
++ cmd.arg[11] = 0xAC;
++ }
++ break;
++ }
++ cmd.arg[8] = 0x13;
++ cmd.arg[9] = 0x88;
++ cmd.arg[10] = 0;
++ cmd.arg[12] = smbr2;
++ cmd.arg[13] = smbr1;
++ cmd.arg[14] = 0;
++
++ state->need_set_mpeg_out |= 0x01; // after tune we need restart mpeg out ?????
++
++ return cx24120_message_send(state, &cmd);
++
++}
++EXPORT_SYMBOL(cx24120_set_frontend); // end cx24120_set_frontend
++//===================================================================
++void cx24120_message_fill(struct cx24120_cmd *cmd,
++ u8 msg_id,
++ u8 *msg_addr,
++ u8 msg_len,
++ u8 num_regs)
++{
++ cmd->id = msg_id;
++ memcpy(&cmd->arg[0], msg_addr, msg_len);
++ cmd->len = msg_len;
++ cmd->reg = num_regs;
++} // end cx24120_message_fill
++//===================================================================
++static int cx24120_read_signal_strength(struct dvb_frontend *fe, u16 *signal_strength)
++{
++ struct cx24120_state *state = fe->demodulator_priv;
++ struct cx24120_cmd cmd;
++ int result, sigstr_h, sigstr_l;
++
++ cx24120_message_fill(&cmd, 0x1A/*msg_id*/, &cx24120_msg_read_sigstr[0], 1/*msg_len*/, 0/*num_regs*/);
++
++ if( !(cx24120_message_send(state, &cmd)) ) {
++ sigstr_h = (cx24120_readreg(state, CX24120_REG_SIGSTR_H) >> 6) << 8;
++ sigstr_l = cx24120_readreg(state, CX24120_REG_SIGSTR_L );
++ dbginfo("Signal strength from firmware= 0x%x\n", (sigstr_h | sigstr_l));
++ *signal_strength = ((sigstr_h | sigstr_l) << 5) & 0x0000FFFF;
++ dbginfo("Signal strength= 0x%x\n", *signal_strength);
++ result = 0;
++ } else {
++ err("error reading signal strength\n");
++ result = -EREMOTEIO;
++ }
++ return result;
++}
++EXPORT_SYMBOL(cx24120_read_signal_strength); // end cx24120_read_signal_strength
++//===================================================================
++static int cx24120_msg_mpeg_output_config(struct cx24120_state *state, u8 num,
++ struct cx24120_skystar2_mpeg_config *config_msg)
++{
++ struct cx24120_cmd cmd;
++
++ memset(&cmd, 0, sizeof(struct cx24120_cmd));
++
++ cmd.id = CMD_MPEG_INIT; // cmd->id=20 - message id
++ cmd.len = 7;
++ cmd.arg[0] = num; // sequental number - can be 0,1,2
++ cmd.arg[1] = ((config_msg->x1 & 0x01) << 1) |
++ ((config_msg->x1 >> 1) & 0x01);
++ cmd.arg[2] = 0x05;
++ cmd.arg[3] = 0x02;
++ cmd.arg[4] = ((config_msg->x2 >> 1) & 0x01);
++ cmd.arg[5] = (config_msg->x2 & 0xF0) | (config_msg->x3 & 0x0F);
++ cmd.arg[6] = state->attached; /* 0x10 if succesfully attached */
++
++ return cx24120_message_send(state, &cmd);
++} // end cx24120_msg_mpeg_output_config
++//===================================================================
++static int cx24120_diseqc_send_burst(struct dvb_frontend *fe, fe_sec_mini_cmd_t burst)
++{
++ struct cx24120_state *state = fe->demodulator_priv;
++ struct cx24120_cmd cmd;
++
++ memset(&cmd, 0, sizeof(struct cx24120_cmd));
++
++ cmd.id = CMD_DISEQC_BURST;
++ cmd.arg[0] = 0x00;
++ if (burst)
++ cmd.arg[1] = 0x01;
++ dbginfo("burst sent.\n");
++
++ return cx24120_message_send(state, &cmd);
++}
++EXPORT_SYMBOL(cx24120_diseqc_send_burst); // end cx24120_diseqc_send_burst
++//===================================================================
++static int cx24120_set_tone(struct dvb_frontend *fe, fe_sec_tone_mode_t tone)
++{
++ struct cx24120_state *state = fe->demodulator_priv;
++ struct cx24120_cmd cmd;
++
++ dbginfo("cmd(0x23,4) - tone = %d\n", tone);
++ if ((tone != SEC_TONE_ON) && (tone != SEC_TONE_OFF)) {
++ err("Invalid tone=%d\n", tone);
++ return -EINVAL;
++ }
++ memset(&cmd, 0, sizeof(struct cx24120_cmd));
++ cmd.id = CMD_SETTONE; // 0x23
++ cmd.len = 4;
++ if (!tone)
++ cmd.arg[3] = 0x01;
++ return cx24120_message_send(state, &cmd);
++}
++EXPORT_SYMBOL(cx24120_set_tone); // end cx24120_set_tone
++//===================================================================
++static int cx24120_set_voltage(struct dvb_frontend *fe, fe_sec_voltage_t voltage)
++{
++ struct cx24120_state *state = fe->demodulator_priv;
++ struct cx24120_cmd cmd;
++
++ memset(&cmd, 0, sizeof(struct cx24120_cmd));
++ cmd.id = CMD_SETVOLTAGE; //
++ cmd.len = 2;
++ if (!(voltage - 1))
++ cmd.arg[1] = 0x01;
++ return cx24120_message_send(state, &cmd);
++}
++EXPORT_SYMBOL(cx24120_set_voltage); // end cx24120_set_voltage
++//===================================================================
++static int cx24120_send_diseqc_msg(struct dvb_frontend *fe, struct dvb_diseqc_master_cmd *d)
++{
++ struct cx24120_state *state = fe->demodulator_priv;
++ struct cx24120_cmd cmd;
++ int back_count;
++
++ dbginfo("Start sending diseqc sequence===============\n");
++
++ memset(&cmd, 0, sizeof(struct cx24120_cmd));
++
++ cmd.id = CMD_DISEQC_MSG1; // 0x20
++ cmd.len = 11;
++ cmd.arg[0] = 0x00;
++ cmd.arg[1] = 0x00;
++ cmd.arg[2] = 0x03;
++ cmd.arg[3] = 0x16;
++ cmd.arg[4] = 0x28;
++ cmd.arg[5] = 0x01;
++ cmd.arg[6] = 0x01;
++ cmd.arg[7] = 0x14;
++ cmd.arg[8] = 0x19;
++ cmd.arg[9] = 0x14;
++ cmd.arg[10] = 0x1E;
++ if ( cx24120_message_send(state, &cmd) ) {
++ err("send 1st message(0x%x) filed==========\n", cmd.id);
++ return -EREMOTEIO;
++ }
++ cmd.id = CMD_DISEQC_MSG2; // 0x21
++ cmd.len = d->msg_len + 6;
++ cmd.arg[0] = 0x00;
++ cmd.arg[1] = 0x01;
++ cmd.arg[2] = 0x02;
++ cmd.arg[3] = 0x00;
++ cmd.arg[4] = 0x00;
++ cmd.arg[5] = d->msg_len;
++
++ memcpy(&cmd.arg[6], &d->msg, d->msg_len);
++
++ if ( cx24120_message_send(state, &cmd) ) {
++ err("send 2d message(0x%x) filed========\n", cmd.id);
++ return -EREMOTEIO;
++ }
++ back_count = 100;
++ do {
++ if ( !(cx24120_readreg(state, 0x93) & 0x01) ) {
++ dbginfo("diseqc sequence sent success==========.\n");
++ return 0;
++ }
++ msleep(5);
++ --back_count;
++ } while ( back_count );
++ err("Too long waiting for diseqc.=============\n");
++ return -ETIMEDOUT;
++}
++EXPORT_SYMBOL(cx24120_send_diseqc_msg); // end cx24120_send_diseqc_msg
++//===================================================================
++static int cx24120_read_status(struct dvb_frontend *fe, fe_status_t *status)
++{
++ struct cx24120_state *state = fe->demodulator_priv;
++ struct cx24120_cmd cmd;
++ int ret, clock_seq_num, GettedFEC;
++ u8 mode_code, mode_8PSK_flag, attached_flag, clock_id;
++
++ ret = cx24120_readreg(state, CX24120_REG_STATUS); //0x3A
++ dbginfo("status = 0x%x\n", ret);
++ *status = 0;
++ if ( ret & CX24120_HAS_SIGNAL ) *status = FE_HAS_SIGNAL;
++ if ( ret & CX24120_HAS_CARRIER) *status |= FE_HAS_CARRIER;
++ if ( ret & CX24120_HAS_VITERBI) *status |= (FE_HAS_VITERBI + FE_HAS_SYNC);
++
++ if ( ret & CX24120_HAS_LOCK ) { // 0x08
++ *status |= FE_HAS_LOCK;
++ if ( state->need_set_mpeg_out & 1 ) { // just tuned???
++ memset(&cmd, 0, sizeof(struct cx24120_cmd));
++ cmd.id = CMD_CLOCK_READ;
++ cmd.arg[0] = 0x00;
++ cmd.len = 1; // cmd.reg != 0, so it is first register to read
++ cmd.reg = 6; // number of registers to read (0x01-0x06)
++ if ( !cx24120_message_send(state, &cmd) ) { // in cmd[0]-[5] - result
++ // 0x02-0x07
++ ret = cx24120_readreg(state, CX24120_REG_FECMODE) & 0x3F; // ntv - 0x8E(142) & 3F = 14
++ GettedFEC = ret; // 0x0d= 13
++ dbginfo("Get FEC: %d\n", ret);
++ if ( state->dvb_s2_mode & 0x01 ) { // is DVB-S2?
++ switch (ret-4) {
++ case 0:
++ mode_code = 0x01; goto mode_QPSK; // FEC_1_2 - qpsk only
++ case 1:
++ case 8:
++ mode_code = 0x64; goto mode_8PSK; // FEC_3_5 (10)- 8PSK only
++ case 2:
++ case 9:
++ mode_code = 0x02; goto mode_8PSK; // FEC_2_3
++ case 3:
++ case 10:
++ mode_code = 0x03; goto mode_8PSK; // FEC_3_4 // 14-4=10 - ntv+
++ case 4:
++ mode_code = 0x04; goto mode_QPSK; // FEC_4_5 - qpsk only
++ case 5:
++ case 11:
++ mode_code = 0x05; goto mode_8PSK; // FEC_5_6
++ case 6:
++ case 12:
++ mode_code = 0x08; goto mode_8PSK; // FEC_8_9
++ case 7:
++ case 13:
++ mode_code = 0x65; goto mode_8PSK; // FEC_9_10 (11)- 8PSK only
++ default:
++ info("Unknown DVB-S2 modefec (not QPSK or 8PSK): %d\n", ret-4);
++ mode_code = 0x01; // set like for mode 0
++ mode_8PSK:
++ if ( ret > 11 ) { // 14
++ mode_8PSK_flag = 0x63; // DVB-S2-8PSK flag
++ dbginfo("DVB-S2: 8PSK mode: %d, mode_code= 0x%x\n", ret-4, mode_code);
++ } else {
++ mode_QPSK:
++ mode_8PSK_flag = 0x00;
++ dbginfo("DVB-S2: QPSK mode: %d\n", ret-4);
++ }
++ break;
++ } // end switch
++ } // end if dvb_s2_mode // dvb-s2
++ else { // state->dvb_s2_mode & 1 = 0 -> #### DVB-S
++ switch ( ret - 2 ) {
++ case 0:
++ mode_code = 2; break; // FEC_2_3
++ case 1:
++ mode_code = 3; break; // FEC_3_4
++ case 2:
++ mode_code = 4; break; // FEC_4_5
++ case 3:
++ mode_code = 5; break; // FEC_5_6
++ case 4:
++ mode_code = 6; break; // FEC_6_7
++ case 5:
++ mode_code = 7; break; // FEC_7_8
++ default:
++ mode_code = 1;break; // FEC_1_2
++ }
++ mode_8PSK_flag = 0;
++ } // end of switch for dvb-s
++
++ attached_flag = 0x10;
++ if (state->attached == 0x10) // must be 0x10 if successfully attached in flexcop_fe_tuner
++ attached_flag = 0;
++ ret = 0;
++ if ( state->dvb_s2_mode & 0x01 ) // if dvb-s2
++ ret = (cx24120_readreg(state, CX24120_REG_FECMODE) >> 7) & 0x01; // QPSK or 8PSK ???
++ // bit 4 bit 5 bit 0 bit 3
++ clock_id = (ret << 3) | attached_flag | (state->dvb_s2_mode & 1) | 4; // possible id: 4, 5, 13. 12-impossible,
++ // ntv S2 = 0x8E -> 8 | 1 | 4 = 13 // because 7th bit of ret - is S2 flag
++ // 1/2 S2 = 0x0d -> 0 | 1 | 4 = 5
++ dbginfo("Check clock table for: clock_id=0x%x, 8PSK_mask=0x%x, mode_code=0x%x\n",
++ clock_id, mode_8PSK_flag, mode_code);
++
++ clock_seq_num = 0;
++ while ( (clock_ratios_table[clock_seq_num].ratio_id != clock_id) ||
++ (clock_ratios_table[clock_seq_num].mode_xPSK != mode_8PSK_flag) ||
++ (clock_ratios_table[clock_seq_num].fec_mode != mode_code) )
++ {
++ /* dbginfo("Check table string(%d): clock_id=%d, 8PSK_flag=%d, mode_code=%d\n", clock_seq_num,
++ * clock_ratios_table[clock_seq_num].ratio_id,
++ * clock_ratios_table[clock_seq_num].mode_xPSK,
++ * clock_ratios_table[clock_seq_num].fec_mode);
++ */
++ ++clock_seq_num;
++ if ( clock_seq_num == ARRAY_SIZE(clock_ratios_table) ) {
++ info("Check in clock table filed: unsupported modulation tuned - data reception in danger. :(\n");
++ goto settings_end;
++ }
++ }
++ //###############################
++ dbginfo("Check succesful: GetFEC: %d; post lock: m=%d, n=%d; clock_seq_idx: %d m=%d, n=%d, rate=%d\n",
++ GettedFEC,
++ cmd.arg[2] | (cmd.arg[1] << 8) | (cmd.arg[0] << 16), // registers was readed early
++ cmd.arg[5] | (cmd.arg[4] << 8) | (cmd.arg[3] << 16), // in message with id = 0x16
++ clock_seq_num,
++ clock_ratios_table[clock_seq_num].m_rat,
++ clock_ratios_table[clock_seq_num].n_rat,
++ clock_ratios_table[clock_seq_num].rate);
++ //###############################
++ cmd.id = CMD_CLOCK_SET;
++ cmd.len = 10;
++ cmd.reg = 0;
++ cmd.arg[0] = 0;
++ cmd.arg[1] = state->attached; // must be 0x10 if successfully attached in flexcop_fe_tuner
++
++ cmd.arg[2] = (clock_ratios_table[clock_seq_num].m_rat >> 16) & 0xFF;
++ cmd.arg[3] = (clock_ratios_table[clock_seq_num].m_rat >> 8) & 0xFF;
++ cmd.arg[4] = (clock_ratios_table[clock_seq_num].m_rat >> 0) & 0xFF;
++
++ cmd.arg[5] = (clock_ratios_table[clock_seq_num].n_rat >> 16) & 0xFF;
++ cmd.arg[6] = (clock_ratios_table[clock_seq_num].n_rat >> 8) & 0xFF;
++ cmd.arg[7] = (clock_ratios_table[clock_seq_num].n_rat >> 0) & 0xFF;
++
++ cmd.arg[8] = (clock_ratios_table[clock_seq_num].rate >> 8) & 0xFF;
++ cmd.arg[9] = (clock_ratios_table[clock_seq_num].rate >> 0) & 0xFF;
++
++ cx24120_message_send(state, &cmd);
++
++ settings_end:
++ msleep(200);
++ cx24120_msg_mpeg_output_global_config(state, 1);
++ state->dvb_s2_mode |= 0x02; // set mpeg flag
++ state->need_set_mpeg_out &= 0xFE; // clocks set done -> clear flag
++ }
++ }
++ }
++ return 0;
++}
++EXPORT_SYMBOL(cx24120_read_status); // end cx24120_read_status
++//===================================================================
++int cx24120_init(struct dvb_frontend *fe)
++{
++ const struct firmware *fw;
++ struct cx24120_state *state = fe->demodulator_priv;
++ struct cx24120_cmd cmd;
++ u8 ret, ret_EA, reg1, fL, fH;
++ u32 vco, xtal_khz;
++ u64 inv_vco, res, xxyyzz;
++ int reset_result;
++
++ if( state->cold_init ) return 0;
++
++ ret = cx24120_writereg(state, 0xEA, 0x00);
++ ret = cx24120_test_rom(state);
++ ret = cx24120_readreg(state, 0xFB) & 0xFE;
++ ret = cx24120_writereg(state, 0xFB, ret);
++ ret = cx24120_readreg(state, 0xFC) & 0xFE;
++ ret = cx24120_writereg(state, 0xFC, ret);
++ ret = cx24120_writereg(state, 0xC3, 0x04);
++ ret = cx24120_writereg(state, 0xC4, 0x04);
++ ret = cx24120_writereg(state, 0xCE, 0x00);
++ ret = cx24120_writereg(state, 0xCF, 0x00);
++ ret_EA = cx24120_readreg(state, 0xEA) & 0xFE;
++ ret = cx24120_writereg(state, 0xEA, ret_EA);
++ ret = cx24120_writereg(state, 0xEB, 0x0C);
++ ret = cx24120_writereg(state, 0xEC, 0x06);
++ ret = cx24120_writereg(state, 0xED, 0x05);
++ ret = cx24120_writereg(state, 0xEE, 0x03);
++ ret = cx24120_writereg(state, 0xEF, 0x05);
++ ret = cx24120_writereg(state, 0xF3, 0x03);
++ ret = cx24120_writereg(state, 0xF4, 0x44);
++
++ reg1 = 0xF0;
++ do {
++ cx24120_writereg(state, reg1, 0x04);
++ cx24120_writereg(state, reg1 - 10, 0x02);
++ ++reg1;
++ } while ( reg1 != 0xF3 );
++
++ ret = cx24120_writereg(state, 0xEA, (ret_EA | 0x01));
++ reg1 = 0xC5;
++ do {
++ ret = cx24120_writereg(state, reg1, 0x00);
++ ret = cx24120_writereg(state, reg1 + 1, 0x00);
++ reg1 += 2;
++ } while ( reg1 != 0xCB );
++
++ ret = cx24120_writereg(state, 0xE4, 0x03);
++ ret = cx24120_writereg(state, 0xEB, 0x0A);
++
++ dbginfo("Requesting firmware (%s) to download...\n", CX24120_FIRMWARE);
++ ret = state->config->request_firmware(fe, &fw, CX24120_FIRMWARE);
++ if (ret) {
++ err("Could not load firmware (%s): %d\n", CX24120_FIRMWARE, ret);
++ return ret;
++ }
++ dbginfo("Firmware found and it size is %d bytes (%02x %02x .. %02x %02x)\n",
++ (int)fw->size, // firmware_size in bytes u32*
++ fw->data[0], // fw 1st byte
++ fw->data[1], // fw 2d byte
++ fw->data[fw->size - 2], // fw before last byte
++ fw->data[fw->size - 1]); // fw last byte
++
++ ret = cx24120_test_rom(state);
++ ret = cx24120_readreg(state, 0xFB) & 0xFE;
++ ret = cx24120_writereg(state, 0xFB, ret);
++ ret = cx24120_writereg(state, 0xE0, 0x76);
++ ret = cx24120_writereg(state, 0xF7, 0x81);
++ ret = cx24120_writereg(state, 0xF8, 0x00);
++ ret = cx24120_writereg(state, 0xF9, 0x00);
++ ret = cx24120_writeregN(state, 0xFA, fw->data, (fw->size - 1), 0x00);
++ ret = cx24120_writereg(state, 0xF7, 0xC0);
++ ret = cx24120_writereg(state, 0xE0, 0x00);
++ ret = (fw->size - 2) & 0x00FF;
++ ret = cx24120_writereg(state, 0xF8, ret); // ret now is 0x7a
++ ret = ((fw->size - 2) >> 8) & 0x00FF;
++ ret = cx24120_writereg(state, 0xF9, ret); // ret now is 0xaf
++ ret = cx24120_writereg(state, 0xF7, 0x00);
++ ret = cx24120_writereg(state, 0xDC, 0x00);
++ ret = cx24120_writereg(state, 0xDC, 0x07);
++ msleep(500);
++
++ ret = cx24120_readreg(state, 0xE1); // now is 0xd5 - last byte of the firmware
++ if ( ret == fw->data[fw->size - 1] ) {
++ dbginfo("Firmware uploaded successfully\n");
++ reset_result = 0;
++ } else {
++ err("Firmware upload failed. Last byte returned=0x%x\n", ret );
++ reset_result = -EREMOTEIO;
++ }
++ ret = cx24120_writereg(state, 0xDC, 0x00);
++ release_firmware(fw);
++ if (reset_result)
++ return reset_result;
++
++ //================== Start tuner
++ cx24120_message_fill(&cmd, CMD_START_TUNER, &cx24120_msg_tuner_init[0], 3, 0); // 0x1B
++ if(cx24120_message_send(state, &cmd)) {
++ err("Error tuner start! :(\n");
++ return -EREMOTEIO;
++ }
++ memset(&cmd, 0, sizeof(struct cx24120_cmd));
++
++ cmd.id = CMD_VCO_SET; // 0x10
++ cmd.len = 12;
++
++ // ######################
++ // Calc VCO
++ xtal_khz = 10111;
++ xxyyzz = 0x400000000ULL; // 17179869184
++ vco = xtal_khz * 10 * 4; // 404440
++ inv_vco = xxyyzz / vco; // 42478 = 0x00A5EE
++ res = xxyyzz % vco; // 66864 = 0x010530
++
++ if( inv_vco > xtal_khz * 10 * 2) ++inv_vco;
++
++ fH = (inv_vco >> 8) & 0xFF;
++ fL = (inv_vco) & 0xFF;
++ dbginfo("vco= %d, inv_vco= %lld, res= %lld, fL= 0x%x, fH= 0x%x\n", vco, inv_vco, res, fL, fH);
++ // ######################
++
++ cmd.arg[0] = 0x06;
++ cmd.arg[1] = 0x2B;
++ cmd.arg[2] = 0xD8;
++ cmd.arg[3] = fH; // 0xA5
++ cmd.arg[4] = fL; // 0xEE
++ cmd.arg[5] = 0x03;
++ cmd.arg[6] = 0x9D;
++ cmd.arg[7] = 0xFC;
++ cmd.arg[8] = 0x06;
++ cmd.arg[9] = 0x03;
++ cmd.arg[10] = 0x27;
++ cmd.arg[11] = 0x7F;
++
++ if(cx24120_message_send(state, &cmd)) {
++ err("Error set VCO! :(\n");
++ return -EREMOTEIO;
++ }
++ memset(&cmd, 0, sizeof(struct cx24120_cmd));
++ // set bandwidth
++ cmd.id = CMD_BANDWIDTH; // 0x15
++ cmd.len = 12;
++ cmd.arg[0] = 0x00;
++ cmd.arg[1] = 0x00;
++ cmd.arg[2] = 0x00;
++ cmd.arg[3] = 0x00;
++ cmd.arg[4] = 0x05;
++ cmd.arg[5] = 0x02;
++ cmd.arg[6] = 0x02;
++ cmd.arg[7] = 0x00;
++ cmd.arg[8] = 0x05;
++ cmd.arg[9] = 0x02;
++ cmd.arg[10] = 0x02;
++ cmd.arg[11] = 0x00;
++
++ if ( cx24120_message_send(state, &cmd) ) {
++ err("Error set bandwidth! :(\n");
++ return -EREMOTEIO;
++ }
++ ret = cx24120_readreg(state, 0xBA);
++ if ( ret > 3) {
++ dbginfo("Reset-readreg 0xBA: %x\n", ret);
++ err("Error intitilizing tuner! :(\n");
++ return -EREMOTEIO;
++ }
++ dbginfo("Tuner initialized correctly.\n");
++
++ ret = cx24120_writereg(state, 0xEB, 0x0A);
++ if (cx24120_msg_mpeg_output_global_config(state, 0) ||
++ cx24120_msg_mpeg_output_config(state, 0, &initial_mpeg_config) ||
++ cx24120_msg_mpeg_output_config(state, 1, &initial_mpeg_config) ||
++ cx24120_msg_mpeg_output_config(state, 2, &initial_mpeg_config) )
++ {
++ err("Error initilizing mpeg output. :(\n");
++ return -EREMOTEIO;
++ } else {
++ cmd.id = 0x3C; // 60
++ cmd.len = 0x03;
++ cmd.arg[0] = 0x00;
++ cmd.arg[1] = 0x10;
++ cmd.arg[2] = 0x10;
++ if(cx24120_message_send(state, &cmd)) {
++ err("Error sending final init message. :(\n");
++ return -EREMOTEIO;
++ }
++ }
++ state->cold_init=1;
++ return 0;
++}
++EXPORT_SYMBOL(cx24120_init); // end cx24120_reset
++//===================================================================
++static int cx24120_tune(struct dvb_frontend *fe, bool re_tune,
++ unsigned int mode_flags, unsigned int *delay, fe_status_t *p_status)
++{
++ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
++ struct cx24120_state *state = fe->demodulator_priv;
++ int delay_cnt, sd_idx = 0;
++ fe_status_t status;
++
++ if (re_tune) {
++
++// dbginfo("Compare symrate with table: symrate= %d, in table= %d\n",
++// p->u.qpsk.symbol_rate, symrates_pairs[sd_idx].symrate);
++
++ while ( p->symbol_rate > symrates_pairs[sd_idx].symrate ) {
++ ++sd_idx;
++ }
++ dbginfo("Found symrate delay = %d\n", symrates_pairs[sd_idx].delay);
++ state->dvb_s2_mode &= 0xFE; // clear bit -> try not DVB-S2
++ dbginfo("trying DVB-S =================\n");
++ cx24120_set_frontend(fe);
++
++ delay_cnt = symrates_pairs[sd_idx].delay;
++ dbginfo("Wait for LOCK for DVB-S =================\n");
++ while (delay_cnt >= 0) {
++ cx24120_read_status(fe, &status);
++ if (status & FE_HAS_LOCK) {
++ dbginfo("DVB-S LOCKED================\n");
++ break;
++ }
++ msleep(100);
++ delay_cnt -=100;
++ }
++ dbginfo("Waiting finished - NO lock for DVB-S =================\n");
++
++ cx24120_read_status(fe, &status);
++ if ( !(status & FE_HAS_LOCK) ) { // if no lock on S
++ dbginfo("trying DVB-S2 ++++++++++++++++++++++++++\n");
++ state->dvb_s2_mode |= 0x01; // may be it locked on S2 ?
++ p->fec_inner = FEC_AUTO;
++ cx24120_set_frontend(fe);
++ delay_cnt = symrates_pairs[sd_idx].delay;
++ dbginfo("Wait for LOCK for DVB-S2 ++++++++++++++++\n");
++ while (delay_cnt >= 0) {
++ cx24120_read_status(fe, &status);
++ if (status & FE_HAS_LOCK) {
++ dbginfo("DVB-S2 LOCKED++++++++++++++++\n");
++ break;
++ }
++ msleep(100);
++ delay_cnt -=100;
++ }
++ dbginfo("Waiting finished - NO lock for DVB-S2 ++++++++++++++++\n");
++ }
++ }
++ return 0;
++}
++EXPORT_SYMBOL(cx24120_tune); // end of cx24120_tune
++//===================================================================
++static int cx24120_get_algo(struct dvb_frontend *fe)
++{
++ return DVBFE_ALGO_HW;
++}
++EXPORT_SYMBOL(cx24120_get_algo);
++//===================================================================
++static int cx24120_sleep(struct dvb_frontend *fe)
++{
++ return 0;
++}
++EXPORT_SYMBOL(cx24120_sleep);
++//===================================================================
++/*static int cx24120_wakeup(struct dvb_frontend *fe)
++ * {
++ * return 0;
++ * }
++ * EXPORT_SYMBOL(cx24120_wakeup);
++ */
++//===================================================================
++static int cx24120_get_frontend(struct dvb_frontend *fe)
++{
++ return 0;
++}
++EXPORT_SYMBOL(cx24120_get_frontend);
++//===================================================================
++static void cx24120_release(struct dvb_frontend *fe)
++{
++ struct cx24120_state *state = fe->demodulator_priv;
++ dbginfo("Clear state structure\n");
++ kfree(state);
++}
++EXPORT_SYMBOL(cx24120_release);
++//===================================================================
++static int cx24120_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks) // UNCORRECTED_BLOCKS
++{
++ struct cx24120_state *state = fe->demodulator_priv;
++
++ *ucblocks = (cx24120_readreg(state, CX24120_REG_UCB_H) << 8) |
++ cx24120_readreg(state, CX24120_REG_UCB_L);
++ dbginfo("Blocks = %d\n", *ucblocks);
++ return 0;
++}
++EXPORT_SYMBOL(cx24120_read_ucblocks);
++// ########################################################################################
++static struct dvb_frontend_ops cx24120_ops = {
++
++ .delsys = { SYS_DVBS2 },
++ .info = {
++ .name = "Conexant CX24120/CX24118",
++ .frequency_min = 950000,
++ .frequency_max = 2150000,
++ .frequency_stepsize = 1011, /* kHz for QPSK frontends */
++ .frequency_tolerance = 5000,
++ .symbol_rate_min = 1000000,
++ .symbol_rate_max = 45000000,
++ .caps = // 0x500006ff
++ FE_CAN_INVERSION_AUTO | //0x00 000 001
++ FE_CAN_FEC_1_2 | //0x00 000 002
++ FE_CAN_FEC_2_3 | //0x00 000 004
++ FE_CAN_FEC_3_4 | //0x00 000 008
++ FE_CAN_FEC_4_5 | //0x00 000 010
++ FE_CAN_FEC_5_6 | //0x00 000 020
++ FE_CAN_FEC_6_7 | //0x00 000 040
++ FE_CAN_FEC_7_8 | //0x00 000 080
++ FE_CAN_FEC_AUTO | //0x00 000 200
++ FE_CAN_QPSK | //0x00 000 400
++//??? FE_HAS_EXTENDED_CAPS | //0x00 800 000 /* We need more bitspace for newer APIs, indicate this. */
++ FE_CAN_2G_MODULATION | //0x10 000 000 /* frontend supports "2nd generation modulation" (DVB-S2) */
++ FE_CAN_RECOVER //0x40 000 000 /* frontend can recover from a cable unplug automatically */
++ }, //sum=50 000 6FF
++ .release = cx24120_release,
++
++ .init = cx24120_init,
++ .sleep = cx24120_sleep,
++
++ .tune = cx24120_tune,
++ .get_frontend_algo = cx24120_get_algo,
++ .set_frontend = cx24120_set_frontend,
++
++ .get_frontend = cx24120_get_frontend,
++ .read_status = cx24120_read_status,
++ .read_ber = cx24120_read_ber,
++ .read_signal_strength = cx24120_read_signal_strength,
++ .read_snr = cx24120_read_snr,
++ .read_ucblocks = cx24120_read_ucblocks,
++
++ .diseqc_send_master_cmd = cx24120_send_diseqc_msg,
++
++ .diseqc_send_burst = cx24120_diseqc_send_burst,
++ .set_tone = cx24120_set_tone,
++ .set_voltage = cx24120_set_voltage,
++};
++//===================================================================
++MODULE_PARM_DESC(cx24120_debug, "prints some verbose debugging information (default:0)");
++MODULE_AUTHOR("Sergey Tyurin");
++MODULE_LICENSE("GPL");
+diff -Nur linux-3.14.36/drivers/media/dvb-frontends/cx24120_const.h linux-openelec/drivers/media/dvb-frontends/cx24120_const.h
+--- linux-3.14.36/drivers/media/dvb-frontends/cx24120_const.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/media/dvb-frontends/cx24120_const.h 2015-07-24 18:03:30.360842002 -0500
+@@ -0,0 +1,259 @@
++/*
++ * Conexant CX24120/CX24118 - DVB-S/S2 demod/tuner driver
++ * DVBS/S2 Satellite demod/tuner driver static definitins
++ *
++ * Copyright (C) 2009 Sergey Tyurin <forum.free-x.de>
++ * Updated 2012 by Jannis Achstetter <jannis_achstetter@web.de>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ */
++
++#define CX24120_FIRMWARE "dvb-fe-cx24120-1.20.58.2.fw"
++
++// ##############################
++// ### cx24120 i2c registers ###
++#define CX24120_REG_CMD_START (0x00) // write cmd_id, and then start write args to next register:
++#define CX24120_REG_CMD_ARGS (0x01) // write command arguments, max 4 at once, then next 4, etc.
++#define CX24120_REG_CMD_END (0x1F) // write 0x01 for end, and read it for command result
++
++#define CX24120_REG_FECMODE (0x39) // FEC status
++#define CX24120_REG_STATUS (0x3A) // Tuner status - signal, carrier, sync, lock ...
++#define CX24120_REG_QUALITY_H (0x40) // SNR high byte
++#define CX24120_REG_QUALITY_L (0x41) // SNR low byte
++
++#define CX24120_REG_BER_HH (0x47) // BER high byte of high word
++#define CX24120_REG_BER_HL (0x48) // BER low byte of high word
++#define CX24120_REG_BER_LH (0x49) // BER high byte of low word
++#define CX24120_REG_BER_LL (0x4A) // BER low byte of low word
++
++#define CX24120_REG_SIGSTR_H (0x3A) // Signal strength high byte & ??? status register ???
++#define CX24120_REG_SIGSTR_L (0x3B) // Signal strength low byte
++
++#define CX24120_REG_UCB_H (0x50) // UCB high byte
++#define CX24120_REG_UCB_L (0x51) // UCB low byte
++
++#define CX24120_REG_REVISION (0xFF) // Chip revision (ro). Must be 0x7 or 0x5
++
++// ##############################
++/* Command messages */
++enum command_message_id {
++ CMD_VCO_SET = 0x10, // cmdlen = 12;
++ CMD_TUNEREQUEST = 0x11, // cmd.len = 15;
++
++ CMD_MPEG_ONOFF = 0x13, // cmd.len = 4;
++ CMD_MPEG_INIT = 0x14, // cmd.len = 7;
++ CMD_BANDWIDTH = 0x15, // cmd.len = 12;
++ CMD_CLOCK_READ = 0x16, // read clock from registers 0x01-0x06
++ CMD_CLOCK_SET = 0x17, // cmd.len = 10;
++
++ CMD_DISEQC_MSG1 = 0x20, // cmd.len = 11;
++ CMD_DISEQC_MSG2 = 0x21, // cmd.len = d->msg_len + 6;
++ CMD_SETVOLTAGE = 0x22, // cmd.len = 2;
++ CMD_SETTONE = 0x23, // cmd.len = 4;
++ CMD_DISEQC_BURST = 0x24, // cmd.len not used !!!
++
++ CMD_READ_SNR = 0x1A, // Read signal strength
++ CMD_START_TUNER = 0x1B, // ???
++
++ CMD_TUNER_INIT = 0x3C, // cmd.len = 0x03;
++};
++// ##############################
++/* signal status */
++#define CX24120_HAS_SIGNAL (0x01)
++#define CX24120_HAS_CARRIER (0x02)
++#define CX24120_HAS_VITERBI (0x04)
++#define CX24120_HAS_LOCK (0x08)
++#define CX24120_HAS_UNK1 (0x10)
++#define CX24120_HAS_UNK2 (0x20)
++#define CX24120_STATUS_MASK (0x0f)
++#define CX24120_SIGNAL_MASK (0xc0)
++
++static u8 cx24120_msg_tuner_init[] = { 0,0,0,0,0,0 };
++static u8 cx24120_msg_read_sigstr[] = {0,0};
++
++static struct cx24120_skystar2_mpeg_config {
++ u8 x1;
++ u8 x2;
++ u8 x3;
++} initial_mpeg_config = {
++ 0xA1, // 10100001
++ 0x76, // 01110110
++ 0x07, // 00000111
++};
++
++static struct cx24120_symrate_delay {
++ u32 symrate;
++ u32 delay;
++} symrates_pairs[] = {
++ { 3000000, 15000 },
++ { 6000000, 10000 },
++ { 8000000, 5000 },
++ { 10000000, 2000 },
++ {0x0FFFFFFFF, 400 },
++};
++
++static struct cx24120_clock_ratios_table {
++ u32 ratio_id;
++ u32 mode_xPSK;
++ u32 fec_mode;
++ u32 m_rat;
++ u32 n_rat;
++ u32 rate;
++} clock_ratios_table[] = {
++{ 21 , 0 , 1 , 770068 , 763515 , 258 },
++{ 21 , 0 , 100 , 97409 , 80370 , 310 },
++{ 21 , 0 , 2 , 137293 , 101802 , 345 },
++{ 21 , 0 , 3 , 4633447 , 3054060 , 388 },
++{ 21 , 0 , 4 , 2472041 , 1527030 , 414 },
++{ 21 , 0 , 5 , 85904 , 50901 , 432 },
++{ 21 , 0 , 8 , 2751229 , 1527030 , 461 },
++{ 21 , 0 , 101 , 1392872 , 763515 , 467 },
++{ 21 , 99 , 100 , 1850771 , 1019430 , 464 },
++{ 21 , 99 , 2 , 137293 , 67962 , 517 },
++{ 21 , 99 , 3 , 4633447 , 2038860 , 581 }, // was 4 - ERRORR! FEC_4_5 not in DVB-S2
++{ 21 , 99 , 5 , 85904 , 33981 , 647 },
++{ 21 , 99 , 8 , 2751229 , 1019430 , 690 },
++{ 21 , 99 , 101 , 1392872 , 509715 , 699 },
++{ 29 , 0 , 1 , 770068 , 782127 , 252 },
++{ 29 , 0 , 100 , 1850771 , 1564254 , 302 },
++{ 29 , 0 , 2 , 686465 , 521418 , 337 },
++{ 29 , 0 , 3 , 4633447 , 3128508 , 379 },
++{ 29 , 0 , 4 , 2472041 , 1564254 , 404 },
++{ 29 , 0 , 5 , 429520 , 260709 , 421 },
++{ 29 , 0 , 8 , 2751229 , 1564254 , 450 },
++{ 29 , 0 , 101 , 1392872 , 782127 , 455 },
++{ 29 , 99 , 100 , 1850771 , 1043118 , 454 },
++{ 29 , 99 , 2 , 686465 , 347706 , 505 },
++{ 29 , 99 , 3 , 4633447 , 2086236 , 568 }, // was 4 - ERRORR! FEC_4_5 not in DVB-S2
++{ 29 , 99 , 5 , 429520 , 173853 , 632 },
++{ 29 , 99 , 8 , 2751229 , 1043118 , 675 },
++{ 29 , 99 , 101 , 1392872 , 521559 , 683 },
++{ 17 , 0 , 1 , 766052 , 763515 , 256 },
++{ 17 , 0 , 100 , 96901 , 80370 , 308 },
++{ 17 , 0 , 2 , 136577 , 101802 , 343 },
++{ 17 , 0 , 3 , 4609283 , 3054060 , 386 },
++{ 17 , 0 , 4 , 2459149 , 1527030 , 412 },
++{ 17 , 0 , 5 , 85456 , 50901 , 429 },
++{ 17 , 0 , 8 , 2736881 , 1527030 , 458 },
++{ 17 , 0 , 101 , 1385608 , 763515 , 464 },
++{ 17 , 99 , 100 , 1841119 , 1019430 , 462 },
++{ 17 , 99 , 2 , 136577 , 67962 , 514 },
++{ 17 , 99 , 3 , 4609283 , 2038860 , 578 }, // was 4 - ERRORR! FEC_4_5 not in DVB-S2
++{ 17 , 99 , 5 , 85456 , 33981 , 643 },
++{ 17 , 99 , 8 , 2736881 , 1019430 , 687 },
++{ 17 , 99 , 101 , 1385608 , 509715 , 695 },
++{ 25 , 0 , 1 , 766052 , 782127 , 250 },
++{ 25 , 0 , 100 , 1841119 , 1564254 , 301 },
++{ 25 , 0 , 2 , 682885 , 521418 , 335 },
++{ 25 , 0 , 3 , 4609283 , 3128508 , 377 },
++{ 25 , 0 , 4 , 2459149 , 1564254 , 402 },
++{ 25 , 0 , 5 , 427280 , 260709 , 419 },
++{ 25 , 0 , 8 , 2736881 , 1564254 , 447 },
++{ 25 , 0 , 101 , 1385608 , 782127 , 453 },
++{ 25 , 99 , 100 , 1841119 , 1043118 , 451 },
++{ 25 , 99 , 2 , 682885 , 347706 , 502 },
++{ 25 , 99 , 3 , 4609283 , 2086236 , 565 }, // was 4 - ERRORR! FEC_4_5 not in DVB-S2
++{ 25 , 99 , 5 , 427280 , 173853 , 629 },
++{ 25 , 99 , 8 , 2736881 , 1043118 , 671 },
++{ 25 , 99 , 101 , 1385608 , 521559 , 680 },
++{ 5 , 0 , 1 , 273088 , 254505 , 274 },
++{ 5 , 0 , 100 , 17272 , 13395 , 330 },
++{ 5 , 0 , 2 , 24344 , 16967 , 367 },
++{ 5 , 0 , 3 , 410788 , 254505 , 413 },
++{ 5 , 0 , 4 , 438328 , 254505 , 440 },
++{ 5 , 0 , 5 , 30464 , 16967 , 459 },
++{ 5 , 0 , 8 , 487832 , 254505 , 490 },
++{ 5 , 0 , 101 , 493952 , 254505 , 496 },
++{ 5 , 99 , 100 , 328168 , 169905 , 494 },
++{ 5 , 99 , 2 , 24344 , 11327 , 550 }, // work for 0x0d - 11278V - DVB-S2 - 8PSK MPEG-4/HD
++{ 5 , 99 , 3 , 410788 , 169905 , 618 }, // 0x0e S2 8psk // was 4 - ERRORR! FEC_4_5 not in DVB-S2
++{ 5 , 99 , 5 , 30464 , 11327 , 688 },
++{ 5 , 99 , 8 , 487832 , 169905 , 735 },
++{ 5 , 99 , 101 , 493952 , 169905 , 744 },
++{ 13 , 0 , 1 , 273088 , 260709 , 268 },
++{ 13 , 0 , 100 , 328168 , 260709 , 322 },
++{ 13 , 0 , 2 , 121720 , 86903 , 358 },
++{ 13 , 0 , 3 , 410788 , 260709 , 403 },
++{ 13 , 0 , 4 , 438328 , 260709 , 430 },
++{ 13 , 0 , 5 , 152320 , 86903 , 448 },
++{ 13 , 0 , 8 , 487832 , 260709 , 479 },
++{ 13 , 0 , 101 , 493952 , 260709 , 485 },
++{ 13 , 99 , 100 , 328168 , 173853 , 483 },
++{ 13 , 99 , 2 , 121720 , 57951 , 537 }, // work for 0x8d - dvb-s2 8psk
++{ 13 , 99 , 3 , 410788 , 173853 , 604 }, // was 4 - ERRORR! FEC_4_5 not in DVB-S2
++{ 13 , 99 , 5 , 152320 , 57951 , 672 },
++{ 13 , 99 , 8 , 487832 , 173853 , 718 },
++{ 13 , 99 , 101 , 493952 , 173853 , 727 },
++{ 1 , 0 , 1 , 815248 , 763515 , 273 },
++{ 1 , 0 , 100 , 51562 , 40185 , 328 },
++{ 1 , 0 , 2 , 72674 , 50901 , 365 },
++{ 1 , 0 , 3 , 1226323 , 763515 , 411 },
++{ 1 , 0 , 4 , 1308538 , 763515 , 438 },
++{ 1 , 0 , 5 , 90944 , 50901 , 457 },
++{ 1 , 0 , 8 , 1456322 , 763515 , 488 },
++{ 1 , 0 , 101 , 1474592 , 763515 , 494 },
++{ 1 , 99 , 100 , 979678 , 509715 , 492 },
++{ 1 , 99 , 2 , 72674 , 33981 , 547 },
++{ 1 , 99 , 3 , 1226323 , 509715 , 615 }, // was 4 - ERRORR!? FEC_4_5 not in DVB-S2
++{ 1 , 99 , 5 , 90944 , 33981 , 685 },
++{ 1 , 99 , 8 , 1456322 , 509715 , 731 },
++{ 1 , 99 , 101 , 1474592 , 509715 , 740 },
++{ 9 , 0 , 1 , 815248 , 782127 , 266 },
++{ 9 , 0 , 100 , 979678 , 782127 , 320 },
++{ 9 , 0 , 2 , 363370 , 260709 , 356 },
++{ 9 , 0 , 3 , 1226323 , 782127 , 401 },
++{ 9 , 0 , 4 , 1308538 , 782127 , 428 },
++{ 9 , 0 , 5 , 454720 , 260709 , 446 },
++{ 9 , 0 , 8 , 1456322 , 782127 , 476 },
++{ 9 , 0 , 101 , 1474592 , 782127 , 482 },
++{ 9 , 99 , 100 , 979678 , 521559 , 480 },
++{ 9 , 99 , 2 , 363370 , 173853 , 535 },
++{ 9 , 99 , 3 , 1226323 , 521559 , 601 }, // was 4 - ERRORR! FEC_4_5 not in DVB-S2
++{ 9 , 99 , 5 , 454720 , 173853 , 669 },
++{ 9 , 99 , 8 , 1456322 , 521559 , 714 },
++{ 9 , 99 , 101 , 1474592 , 521559 , 723 },
++{ 18 , 0 , 1 , 535 , 588 , 233 },
++{ 18 , 0 , 2 , 1070 , 882 , 311 },
++{ 18 , 0 , 6 , 3210 , 2058 , 399 },
++{ 16 , 0 , 1 , 763 , 816 , 239 },
++{ 16 , 0 , 2 , 1526 , 1224 , 319 },
++{ 16 , 0 , 3 , 2289 , 1632 , 359 },
++{ 16 , 0 , 5 , 3815 , 2448 , 399 },
++{ 16 , 0 , 7 , 5341 , 3264 , 419 },
++{ 22 , 0 , 1 , 535 , 588 , 233 },
++{ 22 , 0 , 2 , 1070 , 882 , 311 },
++{ 22 , 0 , 6 , 3210 , 2058 , 399 },
++{ 20 , 0 , 1 , 143429 , 152592 , 241 },
++{ 20 , 0 , 2 , 286858 , 228888 , 321 },
++{ 20 , 0 , 3 , 430287 , 305184 , 361 },
++{ 20 , 0 , 5 , 717145 , 457776 , 401 },
++{ 20 , 0 , 7 , 1004003 , 610368 , 421 },
++{ 2 , 0 , 1 , 584 , 588 , 254 },
++{ 2 , 0 , 2 , 1169 , 882 , 339 },
++{ 2 , 0 , 6 , 3507 , 2058 , 436 },
++{ 0 , 0 , 1 , 812 , 816 , 255 },
++{ 0 , 0 , 2 , 1624 , 1224 , 340 },
++{ 0 , 0 , 3 , 2436 , 1632 , 382 },
++{ 0 , 0 , 5 , 4060 , 2448 , 425 },
++{ 0 , 0 , 7 , 5684 , 3264 , 446 },
++{ 6 , 0 , 1 , 584 , 588 , 254 },
++{ 6 , 0 , 2 , 1168 , 882 , 339 },
++{ 6 , 0 , 6 , 3504 , 2058 , 436 },
++{ 4 , 0 , 1 , 152592 , 152592 , 256 },
++{ 4 , 0 , 2 , 305184 , 228888 , 341 },
++{ 4 , 0 , 3 , 457776 , 305184 , 384 },
++{ 4 , 0 , 5 , 762960 , 457776 , 427 },
++{ 4 , 0 , 7 , 1068144 , 610368 , 448 },
++};
+diff -Nur linux-3.14.36/drivers/media/dvb-frontends/cx24120.h linux-openelec/drivers/media/dvb-frontends/cx24120.h
+--- linux-3.14.36/drivers/media/dvb-frontends/cx24120.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/media/dvb-frontends/cx24120.h 2015-07-24 18:03:30.360842002 -0500
+@@ -0,0 +1,59 @@
++/*
++ * Conexant CX24120/CX24118 - DVB-S/S2 demod/tuner driver
++ *
++ * Copyright (C) 2008 Patrick Boettcher <pb@linuxtv.org>
++ * Copyright (C) 2009 Sergey Tyurin <forum.free-x.de>
++ * Updated 2012 by Jannis Achstetter <jannis_achstetter@web.de>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ */
++
++#ifndef CX24120_H
++#define CX24120_H
++
++#include <linux/dvb/frontend.h>
++
++struct firmware;
++struct dvb_frontend;
++struct i2c_adapter;
++
++struct cx24120_config
++{
++ u8 i2c_addr;
++ int (*request_firmware)(struct dvb_frontend *fe, const struct firmware **fw, char *name);
++ void (*stream_control)(struct dvb_frontend *fe, u8 onoff);
++};
++
++#if defined(CONFIG_DVB_CX24120) || \
++ (defined(CONFIG_DVB_CX24120_MODULE) && defined(MODULE))
++extern struct dvb_frontend *cx24120_attach(const struct cx24120_config *config,
++ struct i2c_adapter *i2c);
++extern int cx24120_reset(struct dvb_frontend *fe);
++#else
++static inline
++struct dvb_frontend *cx24120_attach(const struct cx24120_config *config,
++ struct i2c_adapter *i2c)
++{
++ printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
++ return NULL;
++}
++static inline int cx24120_reset(struct dvb_frontend *fe)
++{
++ printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
++ return -ENODEV;
++}
++#endif
++
++#endif
+diff -Nur linux-3.14.36/drivers/media/dvb-frontends/cxd2820r_c.c linux-openelec/drivers/media/dvb-frontends/cxd2820r_c.c
+--- linux-3.14.36/drivers/media/dvb-frontends/cxd2820r_c.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/media/dvb-frontends/cxd2820r_c.c 2015-07-24 18:03:30.140842002 -0500
+@@ -45,6 +45,7 @@
+ { 0x1008b, 0x07, 0xff },
+ { 0x1001f, priv->cfg.if_agc_polarity << 7, 0x80 },
+ { 0x10070, priv->cfg.ts_mode, 0xff },
++ { 0x10071, !priv->cfg.ts_clock_inv << 4, 0x10 },
+ };
+
+ dev_dbg(&priv->i2c->dev, "%s: frequency=%d symbol_rate=%d\n", __func__,
+diff -Nur linux-3.14.36/drivers/media/dvb-frontends/cxd2820r.h linux-openelec/drivers/media/dvb-frontends/cxd2820r.h
+--- linux-3.14.36/drivers/media/dvb-frontends/cxd2820r.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/media/dvb-frontends/cxd2820r.h 2015-07-24 18:03:30.140842002 -0500
+@@ -52,6 +52,12 @@
+ */
+ u8 ts_mode;
+
++ /* TS clock inverted.
++ * Default: 0
++ * Values: 0, 1
++ */
++ bool ts_clock_inv;
++
+ /* IF AGC polarity.
+ * Default: 0
+ * Values: 0, 1
+diff -Nur linux-3.14.36/drivers/media/dvb-frontends/cxd2820r_t2.c linux-openelec/drivers/media/dvb-frontends/cxd2820r_t2.c
+--- linux-3.14.36/drivers/media/dvb-frontends/cxd2820r_t2.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/media/dvb-frontends/cxd2820r_t2.c 2015-07-24 18:03:30.140842002 -0500
+@@ -47,6 +47,7 @@
+ { 0x02083, 0x0a, 0xff },
+ { 0x020cb, priv->cfg.if_agc_polarity << 6, 0x40 },
+ { 0x02070, priv->cfg.ts_mode, 0xff },
++ { 0x02071, !priv->cfg.ts_clock_inv << 6, 0x40 },
+ { 0x020b5, priv->cfg.spec_inv << 4, 0x10 },
+ { 0x02567, 0x07, 0x0f },
+ { 0x02569, 0x03, 0x03 },
+diff -Nur linux-3.14.36/drivers/media/dvb-frontends/cxd2820r_t.c linux-openelec/drivers/media/dvb-frontends/cxd2820r_t.c
+--- linux-3.14.36/drivers/media/dvb-frontends/cxd2820r_t.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/media/dvb-frontends/cxd2820r_t.c 2015-07-24 18:03:30.140842002 -0500
+@@ -46,6 +46,7 @@
+ { 0x00088, 0x01, 0xff },
+
+ { 0x00070, priv->cfg.ts_mode, 0xff },
++ { 0x00071, !priv->cfg.ts_clock_inv << 4, 0x10 },
+ { 0x000cb, priv->cfg.if_agc_polarity << 6, 0x40 },
+ { 0x000a5, 0x00, 0x01 },
+ { 0x00082, 0x20, 0x60 },
+diff -Nur linux-3.14.36/drivers/media/dvb-frontends/dvbsky_m88dc2800.c linux-openelec/drivers/media/dvb-frontends/dvbsky_m88dc2800.c
+--- linux-3.14.36/drivers/media/dvb-frontends/dvbsky_m88dc2800.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/media/dvb-frontends/dvbsky_m88dc2800.c 2015-07-24 18:03:30.116842002 -0500
+@@ -0,0 +1,2124 @@
++/*
++ M88DC2800/M88TC2800 - DVB-C demodulator and tuner from Montage
++
++ Copyright (C) 2012 Max nibble<nibble.max@gmail.com>
++ Copyright (C) 2011 Montage Technology / www.montage-tech.com
++
++ This program is free software; you can redistribute it and/or modify
++ it under the terms of the GNU General Public License as published by
++ the Free Software Foundation; either version 2 of the License, or
++ (at your option) any later version.
++
++ This program is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ GNU General Public License for more details.
++
++ You should have received a copy of the GNU General Public License
++ along with this program; if not, write to the Free Software
++ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*/
++
++#include <linux/delay.h>
++#include <linux/errno.h>
++#include <linux/init.h>
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/string.h>
++#include <linux/slab.h>
++#include <asm/div64.h>
++#include "dvb_frontend.h"
++#include "dvbsky_m88dc2800.h"
++
++struct dvbsky_m88dc2800_state {
++ struct i2c_adapter *i2c;
++ const struct dvbsky_m88dc2800_config *config;
++ struct dvb_frontend frontend;
++ u32 freq;
++ u32 ber;
++ u32 sym;
++ u16 qam;
++ u8 inverted;
++ u32 xtal;
++ /* tuner state */
++ u8 tuner_init_OK; /* Tuner initialize status */
++ u8 tuner_dev_addr; /* Tuner device address */
++ u32 tuner_freq; /* RF frequency to be set, unit: KHz */
++ u16 tuner_qam; /* Reserved */
++ u16 tuner_mode;
++ u8 tuner_bandwidth; /* Bandwidth of the channel, unit: MHz, 6/7/8 */
++ u8 tuner_loopthrough; /* Tuner loop through switch, 0/1 */
++ u32 tuner_crystal; /* Tuner crystal frequency, unit: KHz */
++ u32 tuner_dac; /* Tuner DAC frequency, unit: KHz */
++ u16 tuner_mtt; /* Tuner chip version, D1: 0x0d, E0: 0x0e, E1: 0x8e */
++ u16 tuner_custom_cfg;
++ u32 tuner_version; /* Tuner driver version number */
++ u32 tuner_time;
++};
++
++static int debug;
++module_param(debug, int, 0644);
++MODULE_PARM_DESC(debug, "Activates frontend debugging (default:0)");
++
++#define dprintk(args...) \
++ do { \
++ if (debug) \
++ printk(KERN_INFO "m88dc2800: " args); \
++ } while (0)
++
++
++static int dvbsky_m88dc2800_i2c_write(struct dvbsky_m88dc2800_state *state, u8 addr,
++ u8 * p_data, u8 len)
++{
++ struct i2c_msg msg = { .flags = 0 };
++
++ msg.addr = addr;
++ msg.buf = p_data;
++ msg.len = len;
++
++ return i2c_transfer(state->i2c, &msg, 1);
++}
++
++static int dvbsky_m88dc2800_i2c_read(struct dvbsky_m88dc2800_state *state, u8 addr,
++ u8 * p_data, u8 len)
++{
++ struct i2c_msg msg = { .flags = I2C_M_RD };
++
++ msg.addr = addr;
++ msg.buf = p_data;
++ msg.len = len;
++
++ return i2c_transfer(state->i2c, &msg, 1);
++}
++
++/*demod register operations.*/
++static int WriteReg(struct dvbsky_m88dc2800_state *state, u8 reg, u8 data)
++{
++ u8 buf[] = { reg, data };
++ u8 addr = state->config->demod_address;
++ int err;
++
++ dprintk("%s: write reg 0x%02x, value 0x%02x\n", __func__, reg, data);
++
++ err = dvbsky_m88dc2800_i2c_write(state, addr, buf, 2);
++
++ if (err != 1) {
++ printk(KERN_ERR
++ "%s: writereg error(err == %i, reg == 0x%02x,"
++ " value == 0x%02x)\n", __func__, err, reg, data);
++ return -EIO;
++ }
++ return 0;
++}
++
++static int ReadReg(struct dvbsky_m88dc2800_state *state, u8 reg)
++{
++ int ret;
++ u8 b0[] = { reg };
++ u8 b1[] = { 0 };
++ u8 addr = state->config->demod_address;
++
++ ret = dvbsky_m88dc2800_i2c_write(state, addr, b0, 1);
++
++ if (ret != 1) {
++ printk(KERN_ERR "%s: reg=0x%x (error=%d)\n",
++ __func__, reg, ret);
++ return -EIO;
++ }
++
++ ret = dvbsky_m88dc2800_i2c_read(state, addr, b1, 1);
++
++ dprintk("%s: read reg 0x%02x, value 0x%02x\n", __func__, reg, b1[0]);
++ return b1[0];
++}
++
++static int _mt_fe_tn_set_reg(struct dvbsky_m88dc2800_state *state, u8 reg,
++ u8 data)
++{
++ int ret;
++ u8 buf[2];
++ u8 addr = state->tuner_dev_addr;
++
++ buf[1] = ReadReg(state, 0x86);
++ buf[1] |= 0x80;
++ ret = WriteReg(state, 0x86, buf[1]);
++
++ buf[0] = reg;
++ buf[1] = data;
++
++ ret = dvbsky_m88dc2800_i2c_write(state, addr, buf, 2);
++ if (ret != 1)
++ return -EIO;
++ return 0;
++}
++
++static int _mt_fe_tn_get_reg(struct dvbsky_m88dc2800_state *state, u8 reg,
++ u8 * p_data)
++{
++ int ret;
++ u8 buf[2];
++ u8 addr = state->tuner_dev_addr;
++
++ buf[1] = ReadReg(state, 0x86);
++ buf[1] |= 0x80;
++ ret = WriteReg(state, 0x86, buf[1]);
++
++ buf[0] = reg;
++ ret = dvbsky_m88dc2800_i2c_write(state, addr, buf, 1);
++
++ msleep(1);
++
++ buf[1] = ReadReg(state, 0x86);
++ buf[1] |= 0x80;
++ ret = WriteReg(state, 0x86, buf[1]);
++
++ return dvbsky_m88dc2800_i2c_read(state, addr, p_data, 1);
++}
++
++/* Tuner operation functions.*/
++static int _mt_fe_tn_set_RF_front_tc2800(struct dvbsky_m88dc2800_state *state)
++{
++ u32 freq_KHz = state->tuner_freq;
++ u8 a, b, c;
++ if (state->tuner_mtt == 0xD1) { /* D1 */
++ if (freq_KHz <= 123000) {
++ if (freq_KHz <= 56000) {
++ a = 0x00; b = 0x00; c = 0x00;
++ } else if (freq_KHz <= 64000) {
++ a = 0x10; b = 0x01; c = 0x08;
++ } else if (freq_KHz <= 72000) {
++ a = 0x20; b = 0x02; c = 0x10;
++ } else if (freq_KHz <= 80000) {
++ a = 0x30; b = 0x03; c = 0x18;
++ } else if (freq_KHz <= 88000) {
++ a = 0x40; b = 0x04; c = 0x20;
++ } else if (freq_KHz <= 96000) {
++ a = 0x50; b = 0x05; c = 0x28;
++ } else if (freq_KHz <= 104000) {
++ a = 0x60; b = 0x06; c = 0x30;
++ } else {
++ a = 0x70; b = 0x07; c = 0x38;
++ }
++ _mt_fe_tn_set_reg(state, 0x58, 0x9b);
++ _mt_fe_tn_set_reg(state, 0x59, a);
++ _mt_fe_tn_set_reg(state, 0x5d, b);
++ _mt_fe_tn_set_reg(state, 0x5e, c);
++ _mt_fe_tn_set_reg(state, 0x5a, 0x75);
++ _mt_fe_tn_set_reg(state, 0x73, 0x0c);
++ } else { /* if (freq_KHz > 112000) */
++ _mt_fe_tn_set_reg(state, 0x58, 0x7b);
++ if (freq_KHz <= 304000) {
++ if (freq_KHz <= 136000) {
++ _mt_fe_tn_set_reg(state, 0x5e, 0x40);
++ } else if (freq_KHz <= 160000) {
++ _mt_fe_tn_set_reg(state, 0x5e, 0x48);
++ } else if (freq_KHz <= 184000) {
++ _mt_fe_tn_set_reg(state, 0x5e, 0x50);
++ } else if (freq_KHz <= 208000) {
++ _mt_fe_tn_set_reg(state, 0x5e, 0x58);
++ } else if (freq_KHz <= 232000) {
++ _mt_fe_tn_set_reg(state, 0x5e, 0x60);
++ } else if (freq_KHz <= 256000) {
++ _mt_fe_tn_set_reg(state, 0x5e, 0x68);
++ } else if (freq_KHz <= 280000) {
++ _mt_fe_tn_set_reg(state, 0x5e, 0x70);
++ } else { /* if (freq_KHz <= 304000) */
++ _mt_fe_tn_set_reg(state, 0x5e, 0x78);
++ }
++ if (freq_KHz <= 171000) {
++ _mt_fe_tn_set_reg(state, 0x73, 0x08);
++ } else if (freq_KHz <= 211000) {
++ _mt_fe_tn_set_reg(state, 0x73, 0x0a);
++ } else {
++ _mt_fe_tn_set_reg(state, 0x73, 0x0e);
++ }
++ } else { /* if (freq_KHz > 304000) */
++ _mt_fe_tn_set_reg(state, 0x5e, 0x88);
++ if (freq_KHz <= 400000) {
++ _mt_fe_tn_set_reg(state, 0x73, 0x0c);
++ } else if (freq_KHz <= 450000) {
++ _mt_fe_tn_set_reg(state, 0x73, 0x09);
++ } else if (freq_KHz <= 550000) {
++ _mt_fe_tn_set_reg(state, 0x73, 0x0e);
++ } else if (freq_KHz <= 650000) {
++ _mt_fe_tn_set_reg(state, 0x73, 0x0d);
++ } else { /*if (freq_KHz > 650000) */
++ _mt_fe_tn_set_reg(state, 0x73, 0x0e);
++ }
++ }
++ }
++ if (freq_KHz > 800000)
++ _mt_fe_tn_set_reg(state, 0x87, 0x24);
++ else if (freq_KHz > 700000)
++ _mt_fe_tn_set_reg(state, 0x87, 0x34);
++ else if (freq_KHz > 500000)
++ _mt_fe_tn_set_reg(state, 0x87, 0x44);
++ else if (freq_KHz > 300000)
++ _mt_fe_tn_set_reg(state, 0x87, 0x43);
++ else if (freq_KHz > 220000)
++ _mt_fe_tn_set_reg(state, 0x87, 0x54);
++ else if (freq_KHz > 110000)
++ _mt_fe_tn_set_reg(state, 0x87, 0x14);
++ else
++ _mt_fe_tn_set_reg(state, 0x87, 0x54);
++ if (freq_KHz > 600000)
++ _mt_fe_tn_set_reg(state, 0x6a, 0x53);
++ else if (freq_KHz > 500000)
++ _mt_fe_tn_set_reg(state, 0x6a, 0x57);
++ else
++ _mt_fe_tn_set_reg(state, 0x6a, 0x59);
++ if (freq_KHz < 200000) {
++ _mt_fe_tn_set_reg(state, 0x20, 0x5d);
++ } else if (freq_KHz < 500000) {
++ _mt_fe_tn_set_reg(state, 0x20, 0x7d);
++ } else {
++ _mt_fe_tn_set_reg(state, 0x20, 0xfd);
++ } /* end of 0xD1 */
++ } else if (state->tuner_mtt == 0xE1) { /* E1 */
++ if (freq_KHz <= 112000) { /* 123MHz */
++ if (freq_KHz <= 56000) {
++ _mt_fe_tn_set_reg(state, 0x5c, 0x01);
++ } else if (freq_KHz <= 64000) {
++ _mt_fe_tn_set_reg(state, 0x5c, 0x09);
++ } else if (freq_KHz <= 72000) {
++ _mt_fe_tn_set_reg(state, 0x5c, 0x11);
++ } else if (freq_KHz <= 80000) {
++ _mt_fe_tn_set_reg(state, 0x5c, 0x19);
++ } else if (freq_KHz <= 88000) {
++ _mt_fe_tn_set_reg(state, 0x5c, 0x21);
++ } else if (freq_KHz <= 96000) {
++ _mt_fe_tn_set_reg(state, 0x5c, 0x29);
++ } else if (freq_KHz <= 104000) {
++ _mt_fe_tn_set_reg(state, 0x5c, 0x31);
++ } else { /* if (freq_KHz <= 112000) */
++ _mt_fe_tn_set_reg(state, 0x5c, 0x39);
++ }
++ _mt_fe_tn_set_reg(state, 0x5b, 0x30);
++ } else { /* if (freq_KHz > 112000) */
++ if (freq_KHz <= 304000) {
++ if (freq_KHz <= 136000) {
++ _mt_fe_tn_set_reg(state, 0x5c, 0x41);
++ } else if (freq_KHz <= 160000) {
++ _mt_fe_tn_set_reg(state, 0x5c, 0x49);
++ } else if (freq_KHz <= 184000) {
++ _mt_fe_tn_set_reg(state, 0x5c, 0x51);
++ } else if (freq_KHz <= 208000) {
++ _mt_fe_tn_set_reg(state, 0x5c, 0x59);
++ } else if (freq_KHz <= 232000) {
++ _mt_fe_tn_set_reg(state, 0x5c, 0x61);
++ } else if (freq_KHz <= 256000) {
++ _mt_fe_tn_set_reg(state, 0x5c, 0x69);
++ } else if (freq_KHz <= 280000) {
++ _mt_fe_tn_set_reg(state, 0x5c, 0x71);
++ } else { /* if (freq_KHz <= 304000) */
++ _mt_fe_tn_set_reg(state, 0x5c, 0x79);
++ }
++ if (freq_KHz <= 150000) {
++ _mt_fe_tn_set_reg(state, 0x5b, 0x28);
++ } else if (freq_KHz <= 256000) {
++ _mt_fe_tn_set_reg(state, 0x5b, 0x29);
++ } else {
++ _mt_fe_tn_set_reg(state, 0x5b, 0x2a);
++ }
++ } else { /* if (freq_KHz > 304000) */
++ if (freq_KHz <= 400000) {
++ _mt_fe_tn_set_reg(state, 0x5c, 0x89);
++ } else if (freq_KHz <= 450000) {
++ _mt_fe_tn_set_reg(state, 0x5c, 0x91);
++ } else if (freq_KHz <= 650000) {
++ _mt_fe_tn_set_reg(state, 0x5c, 0x98);
++ } else if (freq_KHz <= 850000) {
++ _mt_fe_tn_set_reg(state, 0x5c, 0xa0);
++ } else {
++ _mt_fe_tn_set_reg(state, 0x5c, 0xa8);
++ }
++ _mt_fe_tn_set_reg(state, 0x5b, 0x08);
++ }
++ }
++ } /* end of 0xE1 */
++ return 0;
++}
++
++static int _mt_fe_tn_cali_PLL_tc2800(struct dvbsky_m88dc2800_state *state,
++ u32 freq_KHz,
++ u32 cali_freq_thres_div2,
++ u32 cali_freq_thres_div3r,
++ u32 cali_freq_thres_div3)
++{
++ s32 N, F, MUL;
++ u8 buf, tmp, tmp2;
++ s32 M;
++ const s32 crystal_KHz = state->tuner_crystal;
++ if (state->tuner_mtt == 0xD1) {
++ M = state->tuner_crystal / 4000;
++ if (freq_KHz > cali_freq_thres_div2) {
++ MUL = 4;
++ tmp = 2;
++ } else if (freq_KHz > 300000) {
++ MUL = 8;
++ tmp = 3;
++ } else if (freq_KHz > (cali_freq_thres_div2 / 2)) {
++ MUL = 8;
++ tmp = 4;
++ } else if (freq_KHz > (cali_freq_thres_div2 / 4)) {
++ MUL = 16;
++ tmp = 5;
++ } else if (freq_KHz > (cali_freq_thres_div2 / 8)) {
++ MUL = 32;
++ tmp = 6;
++ } else if (freq_KHz > (cali_freq_thres_div2 / 16)) {
++ MUL = 64;
++ tmp = 7;
++ } else { /* invalid */
++ MUL = 0;
++ tmp = 0;
++ return 1;
++ }
++ } else if (state->tuner_mtt == 0xE1) {
++ M = state->tuner_crystal / 1000;
++ _mt_fe_tn_set_reg(state, 0x30, 0xff);
++ _mt_fe_tn_set_reg(state, 0x32, 0xe0);
++ _mt_fe_tn_set_reg(state, 0x33, 0x86);
++ _mt_fe_tn_set_reg(state, 0x37, 0x70);
++ _mt_fe_tn_set_reg(state, 0x38, 0x20);
++ _mt_fe_tn_set_reg(state, 0x39, 0x18);
++ _mt_fe_tn_set_reg(state, 0x89, 0x83);
++ if (freq_KHz > cali_freq_thres_div2) {
++ M = M / 4;
++ MUL = 4;
++ tmp = 2;
++ tmp2 = M + 16; /* 48 */
++ } else if (freq_KHz > cali_freq_thres_div3r) {
++ M = M / 3;
++ MUL = 6;
++ tmp = 2;
++ tmp2 = M + 32; /* 32 */
++ } else if (freq_KHz > cali_freq_thres_div3) {
++ M = M / 3;
++ MUL = 6;
++ tmp = 2;
++ tmp2 = M; /* 16 */
++ } else if (freq_KHz > 304000) {
++ M = M / 4;
++ MUL = 8;
++ tmp = 3;
++ tmp2 = M + 16; /* 48 */
++ } else if (freq_KHz > (cali_freq_thres_div2 / 2)) {
++ M = M / 4;
++ MUL = 8;
++ tmp = 4;
++ tmp2 = M + 16; /* 48 */
++ } else if (freq_KHz > (cali_freq_thres_div3r / 2)) {
++ M = M / 3;
++ MUL = 12;
++ tmp = 4;
++ tmp2 = M + 32; /* 32 */
++ } else if (freq_KHz > (cali_freq_thres_div3 / 2)) {
++ M = M / 3;
++ MUL = 12;
++ tmp = 4;
++ tmp2 = M; /* 16 */
++ } else if (freq_KHz > (cali_freq_thres_div2 / 4)) {
++ M = M / 4;
++ MUL = 16;
++ tmp = 5;
++ tmp2 = M + 16; /* 48 */
++ } else if (freq_KHz > (cali_freq_thres_div3r / 4)) {
++ M = M / 3;
++ MUL = 24;
++ tmp = 5;
++ tmp2 = M + 32; /* 32 */
++ } else if (freq_KHz > (cali_freq_thres_div3 / 4)) {
++ M = M / 3;
++ MUL = 24;
++ tmp = 5;
++ tmp2 = M; /* 16 */
++ } else if (freq_KHz > (cali_freq_thres_div2 / 8)) {
++ M = M / 4;
++ MUL = 32;
++ tmp = 6;
++ tmp2 = M + 16; /* 48 */
++ } else if (freq_KHz > (cali_freq_thres_div3r / 8)) {
++ M = M / 3;
++ MUL = 48;
++ tmp = 6;
++ tmp2 = M + 32; /* 32 */
++ } else if (freq_KHz > (cali_freq_thres_div3 / 8)) {
++ M = M / 3;
++ MUL = 48;
++ tmp = 6;
++ tmp2 = M; /* 16 */
++ } else if (freq_KHz > (cali_freq_thres_div2 / 16)) {
++ M = M / 4;
++ MUL = 64;
++ tmp = 7;
++ tmp2 = M + 16; /* 48 */
++ } else if (freq_KHz > (cali_freq_thres_div3r / 16)) {
++ M = M / 3;
++ MUL = 96;
++ tmp = 7;
++ tmp2 = M + 32; /* 32 */
++ } else if (freq_KHz > (cali_freq_thres_div3 / 16)) {
++ M = M / 3;
++ MUL = 96;
++ tmp = 7;
++ tmp2 = M; /* 16 */
++ } else { /* invalid */
++ M = M / 4;
++ MUL = 0;
++ tmp = 0;
++ tmp2 = 48;
++ return 1;
++ }
++ if (freq_KHz == 291000) {
++ M = state->tuner_crystal / 1000 / 3;
++ MUL = 12;
++ tmp = 4;
++ tmp2 = M + 32; /* 32 */
++ }
++ /*
++ if (freq_KHz == 578000) {
++ M = state->tuner_crystal / 1000 / 4;
++ MUL = 4;
++ tmp = 2;
++ tmp2 = M + 16; // 48
++ }
++ */
++ if (freq_KHz == 690000) {
++ M = state->tuner_crystal / 1000 / 3;
++ MUL = 4;
++ tmp = 2;
++ tmp2 = M + 16; /* 48 */
++ }
++ _mt_fe_tn_get_reg(state, 0x33, &buf);
++ buf &= 0xc0;
++ buf += tmp2;
++ _mt_fe_tn_set_reg(state, 0x33, buf);
++ } else {
++ return 1;
++ }
++ _mt_fe_tn_get_reg(state, 0x39, &buf);
++ buf &= 0xf8;
++ buf += tmp;
++ _mt_fe_tn_set_reg(state, 0x39, buf);
++ N = (freq_KHz * MUL * M / crystal_KHz) / 2 * 2 - 256;
++ buf = (N >> 8) & 0xcf;
++ if (state->tuner_mtt == 0xE1) {
++ buf |= 0x30;
++ }
++ _mt_fe_tn_set_reg(state, 0x34, buf);
++ buf = N & 0xff;
++ _mt_fe_tn_set_reg(state, 0x35, buf);
++ F = ((freq_KHz * MUL * M / (crystal_KHz / 1000) / 2) -
++ (freq_KHz * MUL * M / crystal_KHz / 2 * 1000)) * 64 / 1000;
++ buf = F & 0xff;
++ _mt_fe_tn_set_reg(state, 0x36, buf);
++ if (F == 0) {
++ if (state->tuner_mtt == 0xD1) {
++ _mt_fe_tn_set_reg(state, 0x3d, 0xca);
++ } else if (state->tuner_mtt == 0xE1) {
++ _mt_fe_tn_set_reg(state, 0x3d, 0xfe);
++ } else {
++ return 1;
++ }
++ _mt_fe_tn_set_reg(state, 0x3e, 0x9c);
++ _mt_fe_tn_set_reg(state, 0x3f, 0x34);
++ }
++ if (F > 0) {
++ if (state->tuner_mtt == 0xD1) {
++ if ((F == 32) || (F == 16) || (F == 48)) {
++ _mt_fe_tn_set_reg(state, 0x3e, 0xa4);
++ _mt_fe_tn_set_reg(state, 0x3d, 0x4a);
++ _mt_fe_tn_set_reg(state, 0x3f, 0x36);
++ } else {
++ _mt_fe_tn_set_reg(state, 0x3e, 0xa4);
++ _mt_fe_tn_set_reg(state, 0x3d, 0x4a);
++ _mt_fe_tn_set_reg(state, 0x3f, 0x36);
++ }
++ } else if (state->tuner_mtt == 0xE1) {
++ _mt_fe_tn_set_reg(state, 0x3e, 0xa4);
++ _mt_fe_tn_set_reg(state, 0x3d, 0x7e);
++ _mt_fe_tn_set_reg(state, 0x3f, 0x36);
++ _mt_fe_tn_set_reg(state, 0x89, 0x84);
++ _mt_fe_tn_get_reg(state, 0x39, &buf);
++ buf = buf & 0x1f;
++ _mt_fe_tn_set_reg(state, 0x39, buf);
++ _mt_fe_tn_get_reg(state, 0x32, &buf);
++ buf = buf | 0x02;
++ _mt_fe_tn_set_reg(state, 0x32, buf);
++ } else {
++ return 1;
++ }
++ }
++ _mt_fe_tn_set_reg(state, 0x41, 0x00);
++ if (state->tuner_mtt == 0xD1) {
++ msleep(5);
++ } else if (state->tuner_mtt == 0xE1) {
++ msleep(2);
++ } else {
++ return 1;
++ }
++ _mt_fe_tn_set_reg(state, 0x41, 0x02);
++ _mt_fe_tn_set_reg(state, 0x30, 0x7f);
++ _mt_fe_tn_set_reg(state, 0x30, 0xff);
++ _mt_fe_tn_set_reg(state, 0x31, 0x80);
++ _mt_fe_tn_set_reg(state, 0x31, 0x00);
++
++ return 0;
++}
++
++static int _mt_fe_tn_set_PLL_freq_tc2800(struct dvbsky_m88dc2800_state *state)
++{
++ u8 buf, buf1;
++ u32 freq_thres_div2_KHz, freq_thres_div3r_KHz,
++ freq_thres_div3_KHz;
++ const u32 freq_KHz = state->tuner_freq;
++ if (state->tuner_mtt == 0xD1) {
++ _mt_fe_tn_set_reg(state, 0x32, 0xe1);
++ _mt_fe_tn_set_reg(state, 0x33, 0xa6);
++ _mt_fe_tn_set_reg(state, 0x37, 0x7f);
++ _mt_fe_tn_set_reg(state, 0x38, 0x20);
++ _mt_fe_tn_set_reg(state, 0x39, 0x18);
++ _mt_fe_tn_set_reg(state, 0x40, 0x40);
++ freq_thres_div2_KHz = 520000;
++ _mt_fe_tn_cali_PLL_tc2800(state, freq_KHz,
++ freq_thres_div2_KHz, 0, 0);
++ msleep(5);
++ _mt_fe_tn_get_reg(state, 0x3a, &buf);
++ buf1 = buf;
++ buf = buf & 0x03;
++ buf1 = buf1 & 0x01;
++ if ((buf1 == 0) || (buf == 3)) {
++ freq_thres_div2_KHz = 420000;
++ _mt_fe_tn_cali_PLL_tc2800(state, freq_KHz,
++ freq_thres_div2_KHz, 0,
++ 0);
++ msleep(5);
++ _mt_fe_tn_get_reg(state, 0x3a, &buf);
++ buf = buf & 0x07;
++ if (buf == 5) {
++ freq_thres_div2_KHz = 520000;
++ _mt_fe_tn_cali_PLL_tc2800(state, freq_KHz,
++ freq_thres_div2_KHz,
++ 0, 0);
++ msleep(5);
++ }
++ }
++ _mt_fe_tn_get_reg(state, 0x38, &buf);
++ _mt_fe_tn_set_reg(state, 0x38, buf);
++ _mt_fe_tn_get_reg(state, 0x32, &buf);
++ buf = buf | 0x10;
++ _mt_fe_tn_set_reg(state, 0x32, buf);
++ _mt_fe_tn_set_reg(state, 0x30, 0x7f);
++ _mt_fe_tn_set_reg(state, 0x30, 0xff);
++ _mt_fe_tn_get_reg(state, 0x32, &buf);
++ buf = buf & 0xdf;
++ _mt_fe_tn_set_reg(state, 0x32, buf);
++ _mt_fe_tn_set_reg(state, 0x40, 0x0);
++ _mt_fe_tn_set_reg(state, 0x30, 0x7f);
++ _mt_fe_tn_set_reg(state, 0x30, 0xff);
++ _mt_fe_tn_set_reg(state, 0x31, 0x80);
++ _mt_fe_tn_set_reg(state, 0x31, 0x00);
++ msleep(5);
++ _mt_fe_tn_get_reg(state, 0x39, &buf);
++ buf = buf >> 5;
++ if (buf < 5) {
++ _mt_fe_tn_get_reg(state, 0x39, &buf);
++ buf = buf | 0xa0;
++ buf = buf & 0xbf;
++ _mt_fe_tn_set_reg(state, 0x39, buf);
++ _mt_fe_tn_get_reg(state, 0x32, &buf);
++ buf = buf | 0x02;
++ _mt_fe_tn_set_reg(state, 0x32, buf);
++ }
++ _mt_fe_tn_get_reg(state, 0x37, &buf);
++ if (buf > 0x70) {
++ buf = 0x7f;
++ _mt_fe_tn_set_reg(state, 0x40, 0x40);
++ }
++ _mt_fe_tn_set_reg(state, 0x37, buf);
++ _mt_fe_tn_get_reg(state, 0x38, &buf);
++ if (buf < 0x0f) {
++ buf = (buf & 0x0f) << 2;
++ buf = buf + 0x0f;
++ _mt_fe_tn_set_reg(state, 0x37, buf);
++ } else if (buf < 0x1f) {
++ buf = buf + 0x0f;
++ _mt_fe_tn_set_reg(state, 0x37, buf);
++ }
++ _mt_fe_tn_get_reg(state, 0x32, &buf);
++ buf = (buf | 0x20) & 0xef;
++ _mt_fe_tn_set_reg(state, 0x32, buf);
++ _mt_fe_tn_set_reg(state, 0x41, 0x00);
++ msleep(5);
++ _mt_fe_tn_set_reg(state, 0x41, 0x02);
++ } else if (state->tuner_mtt == 0xE1) {
++ freq_thres_div2_KHz = 580000;
++ freq_thres_div3r_KHz = 500000;
++ freq_thres_div3_KHz = 440000;
++ _mt_fe_tn_cali_PLL_tc2800(state, freq_KHz,
++ freq_thres_div2_KHz,
++ freq_thres_div3r_KHz,
++ freq_thres_div3_KHz);
++ msleep(3);
++ _mt_fe_tn_get_reg(state, 0x38, &buf);
++ _mt_fe_tn_set_reg(state, 0x38, buf);
++ _mt_fe_tn_set_reg(state, 0x30, 0x7f);
++ _mt_fe_tn_set_reg(state, 0x30, 0xff);
++ _mt_fe_tn_set_reg(state, 0x31, 0x80);
++ _mt_fe_tn_set_reg(state, 0x31, 0x00);
++ msleep(3);
++ _mt_fe_tn_get_reg(state, 0x38, &buf);
++ _mt_fe_tn_set_reg(state, 0x38, buf);
++ _mt_fe_tn_get_reg(state, 0x32, &buf);
++ buf = buf | 0x10;
++ _mt_fe_tn_set_reg(state, 0x32, buf);
++ _mt_fe_tn_set_reg(state, 0x30, 0x7f);
++ _mt_fe_tn_set_reg(state, 0x30, 0xff);
++ _mt_fe_tn_get_reg(state, 0x32, &buf);
++ buf = buf & 0xdf;
++ _mt_fe_tn_set_reg(state, 0x32, buf);
++ _mt_fe_tn_set_reg(state, 0x31, 0x80);
++ _mt_fe_tn_set_reg(state, 0x31, 0x00);
++ msleep(3);
++ _mt_fe_tn_get_reg(state, 0x37, &buf);
++ _mt_fe_tn_set_reg(state, 0x37, buf);
++ /*
++ if ((freq_KHz == 802000) || (freq_KHz == 826000)) {
++ _mt_fe_tn_set_reg(state, 0x37, 0x5e);
++ }
++ */
++ _mt_fe_tn_get_reg(state, 0x32, &buf);
++ buf = (buf & 0xef) | 0x30;
++ _mt_fe_tn_set_reg(state, 0x32, buf);
++ _mt_fe_tn_set_reg(state, 0x41, 0x00);
++ msleep(2);
++ _mt_fe_tn_set_reg(state, 0x41, 0x02);
++ } else {
++ return 1;
++ }
++ return 0;
++}
++
++static int _mt_fe_tn_set_BB_tc2800(struct dvbsky_m88dc2800_state *state)
++{
++ return 0;
++}
++
++ static int _mt_fe_tn_set_appendix_tc2800(struct dvbsky_m88dc2800_state *state)
++
++{
++ u8 buf;
++ const u32 freq_KHz = state->tuner_freq;
++ if (state->tuner_mtt == 0xD1) {
++ if ((freq_KHz == 123000) || (freq_KHz == 147000) ||
++ (freq_KHz == 171000) || (freq_KHz == 195000)) {
++ _mt_fe_tn_set_reg(state, 0x20, 0x1b);
++ }
++ if ((freq_KHz == 371000) || (freq_KHz == 419000) ||
++ (freq_KHz == 610000) || (freq_KHz == 730000) ||
++ (freq_KHz == 754000) || (freq_KHz == 826000)) {
++ _mt_fe_tn_get_reg(state, 0x0d, &buf);
++ _mt_fe_tn_set_reg(state, 0x0d, (u8) (buf + 1));
++ }
++ if ((freq_KHz == 522000) || (freq_KHz == 578000) ||
++ (freq_KHz == 634000) || (freq_KHz == 690000) ||
++ (freq_KHz == 834000)) {
++ _mt_fe_tn_get_reg(state, 0x0d, &buf);
++ _mt_fe_tn_set_reg(state, 0x0d, (u8) (buf - 1));
++ }
++ } else if (state->tuner_mtt == 0xE1) {
++ _mt_fe_tn_set_reg(state, 0x20, 0xfc);
++ if (freq_KHz == 123000 || freq_KHz == 147000 ||
++ freq_KHz == 171000 || freq_KHz == 195000 ||
++ freq_KHz == 219000 || freq_KHz == 267000 ||
++ freq_KHz == 291000 || freq_KHz == 339000 ||
++ freq_KHz == 387000 || freq_KHz == 435000 ||
++ freq_KHz == 482000 || freq_KHz == 530000 ||
++ freq_KHz == 722000 ||
++ (state->tuner_custom_cfg == 1 && freq_KHz == 315000)) {
++ _mt_fe_tn_set_reg(state, 0x20, 0x5c);
++ }
++ }
++ return 0;
++}
++
++ static int _mt_fe_tn_set_DAC_tc2800(struct dvbsky_m88dc2800_state *state)
++{
++ u8 buf, tempnumber;
++ s32 N;
++ s32 f1f2number, f1, f2, delta1, Totalnum1;
++ s32 cntT, cntin, NCOI, z0, z1, z2, tmp;
++ u32 fc, fadc, fsd, f2d;
++ u32 FreqTrue108_Hz;
++ s32 M = state->tuner_crystal / 4000;
++ /* const u8 bandwidth = state->tuner_bandwidth; */
++ const u16 DAC_fre = 108;
++ const u32 crystal_KHz = state->tuner_crystal;
++ const u32 DACFreq_KHz = state->tuner_dac;
++ const u32 freq_KHz = state->tuner_freq;
++
++ if (state->tuner_mtt == 0xE1) {
++ _mt_fe_tn_get_reg(state, 0x33, &buf);
++ M = buf & 0x0f;
++ if (M == 0)
++ M = 6;
++ }
++ _mt_fe_tn_get_reg(state, 0x34, &buf);
++ N = buf & 0x07;
++ _mt_fe_tn_get_reg(state, 0x35, &buf);
++ N = (N << 8) + buf;
++ buf = ((N + 256) * crystal_KHz / M / DAC_fre + 500) / 1000;
++ if (state->tuner_mtt == 0xE1) {
++ _mt_fe_tn_set_appendix_tc2800(state);
++ if (freq_KHz == 187000 || freq_KHz == 195000 ||
++ freq_KHz == 131000 || freq_KHz == 211000 ||
++ freq_KHz == 219000 || freq_KHz == 227000 ||
++ freq_KHz == 267000 || freq_KHz == 299000 ||
++ freq_KHz == 347000 || freq_KHz == 363000 ||
++ freq_KHz == 395000 || freq_KHz == 403000 ||
++ freq_KHz == 435000 || freq_KHz == 482000 ||
++ freq_KHz == 474000 || freq_KHz == 490000 ||
++ freq_KHz == 610000 || freq_KHz == 642000 ||
++ freq_KHz == 666000 || freq_KHz == 722000 ||
++ freq_KHz == 754000 ||
++ ((freq_KHz == 379000 || freq_KHz == 467000 ||
++ freq_KHz == 762000) && state->tuner_custom_cfg != 1)) {
++ buf = buf + 1;
++ }
++ if (freq_KHz == 123000 || freq_KHz == 139000 ||
++ freq_KHz == 147000 || freq_KHz == 171000 ||
++ freq_KHz == 179000 || freq_KHz == 203000 ||
++ freq_KHz == 235000 || freq_KHz == 251000 ||
++ freq_KHz == 259000 || freq_KHz == 283000 ||
++ freq_KHz == 331000 || freq_KHz == 363000 ||
++ freq_KHz == 371000 || freq_KHz == 387000 ||
++ freq_KHz == 411000 || freq_KHz == 427000 ||
++ freq_KHz == 443000 || freq_KHz == 451000 ||
++ freq_KHz == 459000 || freq_KHz == 506000 ||
++ freq_KHz == 514000 || freq_KHz == 538000 ||
++ freq_KHz == 546000 || freq_KHz == 554000 ||
++ freq_KHz == 562000 || freq_KHz == 570000 ||
++ freq_KHz == 578000 || freq_KHz == 602000 ||
++ freq_KHz == 626000 || freq_KHz == 658000 ||
++ freq_KHz == 690000 || freq_KHz == 714000 ||
++ freq_KHz == 746000 || freq_KHz == 522000 ||
++ freq_KHz == 826000 || freq_KHz == 155000 ||
++ freq_KHz == 530000 ||
++ ((freq_KHz == 275000 || freq_KHz == 355000) &&
++ state->tuner_custom_cfg != 1) ||
++ ((freq_KHz == 467000 || freq_KHz == 762000 ||
++ freq_KHz == 778000 || freq_KHz == 818000) &&
++ state->tuner_custom_cfg == 1)) {
++ buf = buf - 1;
++ }
++ }
++ _mt_fe_tn_set_reg(state, 0x0e, buf);
++ _mt_fe_tn_set_reg(state, 0x0d, buf);
++ f1f2number =
++ (((DACFreq_KHz * M * buf) / crystal_KHz) << 16) / (N + 256) +
++ (((DACFreq_KHz * M * buf) % crystal_KHz) << 16) / ((N + 256) *
++ crystal_KHz);
++ _mt_fe_tn_set_reg(state, 0xf1, (f1f2number & 0xff00) >> 8);
++ _mt_fe_tn_set_reg(state, 0xf2, f1f2number & 0x00ff);
++ FreqTrue108_Hz =
++ (N + 256) * crystal_KHz / (M * buf) * 1000 +
++ (((N + 256) * crystal_KHz) % (M * buf)) * 1000 / (M * buf);
++ f1 = 4096;
++ fc = FreqTrue108_Hz;
++ fadc = fc / 4;
++ fsd = 27000000;
++ f2d = state->tuner_bandwidth * 1000 / 2 - 150;
++ f2 = (fsd / 250) * f2d / ((fc + 500) / 1000);
++ delta1 = ((f1 - f2) << 15) / f2;
++ Totalnum1 = ((f1 - f2) << 15) - delta1 * f2;
++ cntT = f2;
++ cntin = Totalnum1;
++ NCOI = delta1;
++ z0 = cntin;
++ z1 = cntT;
++ z2 = NCOI;
++ tempnumber = (z0 & 0xff00) >> 8;
++ _mt_fe_tn_set_reg(state, 0xc9, (u8) (tempnumber & 0x0f));
++ tempnumber = (z0 & 0xff);
++ _mt_fe_tn_set_reg(state, 0xca, tempnumber);
++ tempnumber = (z1 & 0xff00) >> 8;
++ _mt_fe_tn_set_reg(state, 0xcb, tempnumber);
++ tempnumber = (z1 & 0xff);
++ _mt_fe_tn_set_reg(state, 0xcc, tempnumber);
++ tempnumber = (z2 & 0xff00) >> 8;
++ _mt_fe_tn_set_reg(state, 0xcd, tempnumber);
++ tempnumber = (z2 & 0xff);
++ _mt_fe_tn_set_reg(state, 0xce, tempnumber);
++ tmp = f1;
++ f1 = f2;
++ f2 = tmp / 2;
++ delta1 = ((f1 - f2) << 15) / f2;
++ Totalnum1 = ((f1 - f2) << 15) - delta1 * f2;
++ NCOI = (f1 << 15) / f2 - (1 << 15);
++ cntT = f2;
++ cntin = Totalnum1;
++ z0 = cntin;
++ z1 = cntT;
++ z2 = NCOI;
++ tempnumber = (z0 & 0xff00) >> 8;
++ _mt_fe_tn_set_reg(state, 0xd9, (u8) (tempnumber & 0x0f));
++ tempnumber = (z0 & 0xff);
++ _mt_fe_tn_set_reg(state, 0xda, tempnumber);
++ tempnumber = (z1 & 0xff00) >> 8;
++ _mt_fe_tn_set_reg(state, 0xdb, tempnumber);
++ tempnumber = (z1 & 0xff);
++ _mt_fe_tn_set_reg(state, 0xdc, tempnumber);
++ tempnumber = (z2 & 0xff00) >> 8;
++ _mt_fe_tn_set_reg(state, 0xdd, tempnumber);
++ tempnumber = (z2 & 0xff);
++ _mt_fe_tn_set_reg(state, 0xde, tempnumber);
++
++ return 0;
++}
++
++static int _mt_fe_tn_preset_tc2800(struct dvbsky_m88dc2800_state *state)
++{
++ if (state->tuner_mtt == 0xD1) {
++ _mt_fe_tn_set_reg(state, 0x19, 0x4a);
++ _mt_fe_tn_set_reg(state, 0x1b, 0x4b);
++ _mt_fe_tn_set_reg(state, 0x04, 0x04);
++ _mt_fe_tn_set_reg(state, 0x17, 0x0d);
++ _mt_fe_tn_set_reg(state, 0x62, 0x6c);
++ _mt_fe_tn_set_reg(state, 0x63, 0xf4);
++ _mt_fe_tn_set_reg(state, 0x1f, 0x0e);
++ _mt_fe_tn_set_reg(state, 0x6b, 0xf4);
++ _mt_fe_tn_set_reg(state, 0x14, 0x01);
++ _mt_fe_tn_set_reg(state, 0x5a, 0x75);
++ _mt_fe_tn_set_reg(state, 0x66, 0x74);
++ _mt_fe_tn_set_reg(state, 0x72, 0xe0);
++ _mt_fe_tn_set_reg(state, 0x70, 0x07);
++ _mt_fe_tn_set_reg(state, 0x15, 0x7b);
++ _mt_fe_tn_set_reg(state, 0x55, 0x71);
++ _mt_fe_tn_set_reg(state, 0x75, 0x55);
++ _mt_fe_tn_set_reg(state, 0x76, 0xac);
++ _mt_fe_tn_set_reg(state, 0x77, 0x6c);
++ _mt_fe_tn_set_reg(state, 0x78, 0x8b);
++ _mt_fe_tn_set_reg(state, 0x79, 0x42);
++ _mt_fe_tn_set_reg(state, 0x7a, 0xd2);
++ _mt_fe_tn_set_reg(state, 0x81, 0x01);
++ _mt_fe_tn_set_reg(state, 0x82, 0x00);
++ _mt_fe_tn_set_reg(state, 0x82, 0x02);
++ _mt_fe_tn_set_reg(state, 0x82, 0x04);
++ _mt_fe_tn_set_reg(state, 0x82, 0x06);
++ _mt_fe_tn_set_reg(state, 0x82, 0x08);
++ _mt_fe_tn_set_reg(state, 0x82, 0x09);
++ _mt_fe_tn_set_reg(state, 0x82, 0x29);
++ _mt_fe_tn_set_reg(state, 0x82, 0x49);
++ _mt_fe_tn_set_reg(state, 0x82, 0x58);
++ _mt_fe_tn_set_reg(state, 0x82, 0x59);
++ _mt_fe_tn_set_reg(state, 0x82, 0x98);
++ _mt_fe_tn_set_reg(state, 0x82, 0x99);
++ _mt_fe_tn_set_reg(state, 0x10, 0x05);
++ _mt_fe_tn_set_reg(state, 0x10, 0x0d);
++ _mt_fe_tn_set_reg(state, 0x11, 0x95);
++ _mt_fe_tn_set_reg(state, 0x11, 0x9d);
++ if (state->tuner_loopthrough != 0) {
++ _mt_fe_tn_set_reg(state, 0x67, 0x25);
++ } else {
++ _mt_fe_tn_set_reg(state, 0x67, 0x05);
++ }
++ } else if (state->tuner_mtt == 0xE1) {
++ _mt_fe_tn_set_reg(state, 0x1b, 0x47);
++ if (state->tuner_mode == 0) { /* DVB-C */
++ _mt_fe_tn_set_reg(state, 0x66, 0x74);
++ _mt_fe_tn_set_reg(state, 0x62, 0x2c);
++ _mt_fe_tn_set_reg(state, 0x63, 0x54);
++ _mt_fe_tn_set_reg(state, 0x68, 0x0b);
++ _mt_fe_tn_set_reg(state, 0x14, 0x00);
++ } else { /* CTTB */
++ _mt_fe_tn_set_reg(state, 0x66, 0x74);
++ _mt_fe_tn_set_reg(state, 0x62, 0x0c);
++ _mt_fe_tn_set_reg(state, 0x63, 0x54);
++ _mt_fe_tn_set_reg(state, 0x68, 0x0b);
++ _mt_fe_tn_set_reg(state, 0x14, 0x05);
++ }
++ _mt_fe_tn_set_reg(state, 0x6f, 0x00);
++ _mt_fe_tn_set_reg(state, 0x84, 0x04);
++ _mt_fe_tn_set_reg(state, 0x5e, 0xbe);
++ _mt_fe_tn_set_reg(state, 0x87, 0x07);
++ _mt_fe_tn_set_reg(state, 0x8a, 0x1f);
++ _mt_fe_tn_set_reg(state, 0x8b, 0x1f);
++ _mt_fe_tn_set_reg(state, 0x88, 0x30);
++ _mt_fe_tn_set_reg(state, 0x58, 0x34);
++ _mt_fe_tn_set_reg(state, 0x61, 0x8c);
++ _mt_fe_tn_set_reg(state, 0x6a, 0x42);
++ }
++ return 0;
++}
++
++static int mt_fe_tn_wakeup_tc2800(struct dvbsky_m88dc2800_state *state)
++{
++ _mt_fe_tn_set_reg(state, 0x16, 0xb1);
++ _mt_fe_tn_set_reg(state, 0x09, 0x7d);
++ return 0;
++}
++
++ static int mt_fe_tn_sleep_tc2800(struct dvbsky_m88dc2800_state *state)
++{
++ _mt_fe_tn_set_reg(state, 0x16, 0xb0);
++ _mt_fe_tn_set_reg(state, 0x09, 0x6d);
++ return 0;
++}
++
++ static int mt_fe_tn_init_tc2800(struct dvbsky_m88dc2800_state *state)
++{
++ if (state->tuner_init_OK != 1) {
++ state->tuner_dev_addr = 0x61; /* TUNER_I2C_ADDR_TC2800 */
++ state->tuner_freq = 650000;
++ state->tuner_qam = 0;
++ state->tuner_mode = 0; // 0: DVB-C, 1: CTTB
++ state->tuner_bandwidth = 8;
++ state->tuner_loopthrough = 0;
++ state->tuner_crystal = 24000;
++ state->tuner_dac = 7200;
++ state->tuner_mtt = 0x00;
++ state->tuner_custom_cfg = 0;
++ state->tuner_version = 30022; /* Driver version number */
++ state->tuner_time = 12092611;
++ state->tuner_init_OK = 1;
++ }
++ _mt_fe_tn_set_reg(state, 0x2b, 0x46);
++ _mt_fe_tn_set_reg(state, 0x2c, 0x75);
++ if (state->tuner_mtt == 0x00) {
++ u8 tmp = 0;
++ _mt_fe_tn_get_reg(state, 0x01, &tmp);
++ printk(KERN_INFO "m88dc2800: tuner id = 0x%02x ", tmp);
++ switch (tmp) {
++ case 0x0d:
++ state->tuner_mtt = 0xD1;
++ break;
++ case 0x8e:
++ default:
++ state->tuner_mtt = 0xE1;
++ break;
++ }
++ }
++ return 0;
++}
++
++ static int mt_fe_tn_set_freq_tc2800(struct dvbsky_m88dc2800_state *state,
++ u32 freq_KHz)
++{
++ u8 buf;
++ u8 buf1;
++
++ mt_fe_tn_init_tc2800(state);
++ state->tuner_freq = freq_KHz;
++ _mt_fe_tn_set_reg(state, 0x21, freq_KHz > 500000 ? 0xb9 : 0x99);
++ mt_fe_tn_wakeup_tc2800(state);
++ _mt_fe_tn_set_reg(state, 0x05, 0x7f);
++ _mt_fe_tn_set_reg(state, 0x06, 0xf8);
++ _mt_fe_tn_set_RF_front_tc2800(state);
++ _mt_fe_tn_set_PLL_freq_tc2800(state);
++ _mt_fe_tn_set_DAC_tc2800(state);
++ _mt_fe_tn_set_BB_tc2800(state);
++ _mt_fe_tn_preset_tc2800(state);
++ _mt_fe_tn_set_reg(state, 0x05, 0x00);
++ _mt_fe_tn_set_reg(state, 0x06, 0x00);
++ if (state->tuner_mtt == 0xD1) {
++ _mt_fe_tn_set_reg(state, 0x00, 0x01);
++ _mt_fe_tn_set_reg(state, 0x00, 0x00);
++ msleep(5);
++ _mt_fe_tn_set_reg(state, 0x41, 0x00);
++ msleep(5);
++ _mt_fe_tn_set_reg(state, 0x41, 0x02);
++
++ _mt_fe_tn_get_reg(state, 0x69, &buf1);
++ buf1 = buf1 & 0x0f;
++ _mt_fe_tn_get_reg(state, 0x61, &buf);
++ buf = buf & 0x0f;
++ if (buf == 0x0c)
++ _mt_fe_tn_set_reg(state, 0x6a, 0x59);
++ if (buf1 > 0x02) {
++ if (freq_KHz > 600000)
++ _mt_fe_tn_set_reg(state, 0x66, 0x44);
++ else if (freq_KHz > 500000)
++ _mt_fe_tn_set_reg(state, 0x66, 0x64);
++ else
++ _mt_fe_tn_set_reg(state, 0x66, 0x74);
++ }
++ if (buf1 < 0x03) {
++ if (freq_KHz > 800000)
++ _mt_fe_tn_set_reg(state, 0x87, 0x64);
++ else if (freq_KHz > 600000)
++ _mt_fe_tn_set_reg(state, 0x87, 0x54);
++ else if (freq_KHz > 500000)
++ _mt_fe_tn_set_reg(state, 0x87, 0x54);
++ else if (freq_KHz > 300000)
++ _mt_fe_tn_set_reg(state, 0x87, 0x43);
++ else if (freq_KHz > 220000)
++ _mt_fe_tn_set_reg(state, 0x87, 0x54);
++ else if (freq_KHz > 110000)
++ _mt_fe_tn_set_reg(state, 0x87, 0x14);
++ else
++ _mt_fe_tn_set_reg(state, 0x87, 0x54);
++ msleep(5);
++ } else if (buf < 0x0c) {
++ if (freq_KHz > 800000)
++ _mt_fe_tn_set_reg(state, 0x87, 0x14);
++ else if (freq_KHz > 600000)
++ _mt_fe_tn_set_reg(state, 0x87, 0x14);
++ else if (freq_KHz > 500000)
++ _mt_fe_tn_set_reg(state, 0x87, 0x34);
++ else if (freq_KHz > 300000)
++ _mt_fe_tn_set_reg(state, 0x87, 0x43);
++ else if (freq_KHz > 220000)
++ _mt_fe_tn_set_reg(state, 0x87, 0x54);
++ else if (freq_KHz > 110000)
++ _mt_fe_tn_set_reg(state, 0x87, 0x14);
++ else
++ _mt_fe_tn_set_reg(state, 0x87, 0x54);
++ msleep(5);
++ }
++ } else if ((state->tuner_mtt == 0xE1)) {
++ _mt_fe_tn_set_reg(state, 0x00, 0x01);
++ _mt_fe_tn_set_reg(state, 0x00, 0x00);
++ msleep(20);
++ _mt_fe_tn_get_reg(state, 0x32, &buf);
++ buf = (buf & 0xef) | 0x28;
++ _mt_fe_tn_set_reg(state, 0x32, buf);
++ msleep(50);
++ _mt_fe_tn_get_reg(state, 0x38, &buf);
++ _mt_fe_tn_set_reg(state, 0x38, buf);
++ _mt_fe_tn_get_reg(state, 0x32, &buf);
++ buf = (buf & 0xf7) | 0x10;
++ _mt_fe_tn_set_reg(state, 0x32, buf);
++ msleep(10);
++ _mt_fe_tn_get_reg(state, 0x69, &buf);
++ buf = buf & 0x03;
++ _mt_fe_tn_set_reg(state, 0x2a, buf);
++ if (buf > 0) {
++ msleep(20);
++ _mt_fe_tn_get_reg(state, 0x84, &buf);
++ buf = buf & 0x1f;
++ _mt_fe_tn_set_reg(state, 0x68, 0x0a);
++ _mt_fe_tn_get_reg(state, 0x88, &buf1);
++ buf1 = buf1 & 0x1f;
++ if (buf <= buf1)
++ _mt_fe_tn_set_reg(state, 0x66, 0x44);
++ else
++ _mt_fe_tn_set_reg(state, 0x66, 0x74);
++ } else {
++ if (freq_KHz <= 600000)
++ _mt_fe_tn_set_reg(state, 0x68, 0x0c);
++ else
++ _mt_fe_tn_set_reg(state, 0x68, 0x0e);
++ _mt_fe_tn_set_reg(state, 0x30, 0xfb);
++ _mt_fe_tn_set_reg(state, 0x30, 0xff);
++ _mt_fe_tn_set_reg(state, 0x31, 0x04);
++ _mt_fe_tn_set_reg(state, 0x31, 0x00);
++ }
++ if (state->tuner_loopthrough != 0) {
++ _mt_fe_tn_get_reg(state, 0x28, &buf);
++ if (buf == 0) {
++ _mt_fe_tn_set_reg(state, 0x28, 0xff);
++ _mt_fe_tn_get_reg(state, 0x61, &buf);
++ buf = buf & 0x0f;
++ if (buf > 9)
++ _mt_fe_tn_set_reg(state, 0x67, 0x74);
++ else if (buf > 6)
++ _mt_fe_tn_set_reg(state, 0x67, 0x64);
++ else if (buf > 3)
++ _mt_fe_tn_set_reg(state, 0x67, 0x54);
++ else
++ _mt_fe_tn_set_reg(state, 0x67, 0x44);
++ }
++ } else {
++ _mt_fe_tn_set_reg(state, 0x67, 0x34);
++ }
++ } else {
++ return 1;
++ }
++ return 0;
++}
++
++
++/*
++static int mt_fe_tn_set_BB_filter_band_tc2800(struct dvbsky_m88dc2800_state *state,
++ u8 bandwidth)
++{
++ u8 buf, tmp;
++
++ _mt_fe_tn_get_reg(state, 0x53, &tmp);
++
++ if (bandwidth == 6)
++ buf = 0x01 << 1;
++ else if (bandwidth == 7)
++ buf = 0x02 << 1;
++ else if (bandwidth == 8)
++ buf = 0x04 << 1;
++ else
++ buf = 0x04 << 1;
++
++ tmp &= 0xf1;
++ tmp |= buf;
++ _mt_fe_tn_set_reg(state, 0x53, tmp);
++ state->tuner_bandwidth = bandwidth;
++ return 0;
++}
++*/
++
++static s32 mt_fe_tn_get_signal_strength_tc2800(struct dvbsky_m88dc2800_state
++ *state)
++{
++ s32 level = -107;
++ s32 tmp1, tmp2, tmp3, tmp4, tmp5, tmp6;
++ s32 val1, val2, val;
++ s32 result2, result3, result4, result5, result6;
++ s32 append;
++ u8 tmp;
++ s32 freq_KHz = (s32) state->tuner_freq;
++ if (state->tuner_mtt == 0xD1) {
++ _mt_fe_tn_get_reg(state, 0x61, &tmp);
++ tmp1 = tmp & 0x0f;
++ _mt_fe_tn_get_reg(state, 0x69, &tmp);
++ tmp2 = tmp & 0x0f;
++ _mt_fe_tn_get_reg(state, 0x73, &tmp);
++ tmp3 = tmp & 0x07;
++ _mt_fe_tn_get_reg(state, 0x7c, &tmp);
++ tmp4 = (tmp >> 4) & 0x0f;
++ _mt_fe_tn_get_reg(state, 0x7b, &tmp);
++ tmp5 = tmp & 0x0f;
++ _mt_fe_tn_get_reg(state, 0x7f, &tmp);
++ tmp6 = (tmp >> 5) & 0x01;
++ if (tmp1 > 6) {
++ val1 = 0;
++ if (freq_KHz <= 200000) {
++ val2 = (tmp1 - 6) * 267;
++ } else if (freq_KHz <= 600000) {
++ val2 = (tmp1 - 6) * 280;
++ } else {
++ val2 = (tmp1 - 6) * 290;
++ }
++ val = val1 + val2;
++ } else {
++ if (tmp1 == 0) {
++ val1 = -550;
++ } else {
++ val1 = 0;
++ }
++ if ((tmp1 < 4) && (freq_KHz >= 506000)) {
++ val1 = -850;
++ }
++ val2 = 0;
++ val = val1 + val2;
++ }
++ if (freq_KHz <= 95000) {
++ result2 = tmp2 * 289;
++ } else if (freq_KHz <= 155000) {
++ result2 = tmp2 * 278;
++ } else if (freq_KHz <= 245000) {
++ result2 = tmp2 * 267;
++ } else if (freq_KHz <= 305000) {
++ result2 = tmp2 * 256;
++ } else if (freq_KHz <= 335000) {
++ result2 = tmp2 * 244;
++ } else if (freq_KHz <= 425000) {
++ result2 = tmp2 * 233;
++ } else if (freq_KHz <= 575000) {
++ result2 = tmp2 * 222;
++ } else if (freq_KHz <= 665000) {
++ result2 = tmp2 * 211;
++ } else {
++ result2 = tmp2 * 200;
++ }
++ result3 = (6 - tmp3) * 100;
++ result4 = 300 * tmp4;
++ result5 = 50 * tmp5;
++ result6 = 300 * tmp6;
++ if (freq_KHz < 105000) {
++ append = -450;
++ } else if (freq_KHz <= 227000) {
++ append = -4 * (freq_KHz / 1000 - 100) + 150;
++ } else if (freq_KHz <= 305000) {
++ append = -4 * (freq_KHz / 1000 - 100);
++ } else if (freq_KHz <= 419000) {
++ append = 500 - 40 * (freq_KHz / 1000 - 300) / 17 + 130;
++ } else if (freq_KHz <= 640000) {
++ append = 500 - 40 * (freq_KHz / 1000 - 300) / 17;
++ } else {
++ append = -500;
++ }
++ level = append - (val + result2 + result3 + result4 +
++ result5 + result6);
++ level /= 100;
++ } else if (state->tuner_mtt == 0xE1) {
++ _mt_fe_tn_get_reg(state, 0x61, &tmp);
++ tmp1 = tmp & 0x0f;
++ _mt_fe_tn_get_reg(state, 0x84, &tmp);
++ tmp2 = tmp & 0x1f;
++ _mt_fe_tn_get_reg(state, 0x69, &tmp);
++ tmp3 = tmp & 0x03;
++ _mt_fe_tn_get_reg(state, 0x73, &tmp);
++ tmp4 = tmp & 0x0f;
++ _mt_fe_tn_get_reg(state, 0x7c, &tmp);
++ tmp5 = (tmp >> 4) & 0x0f;
++ _mt_fe_tn_get_reg(state, 0x7b, &tmp);
++ tmp6 = tmp & 0x0f;
++ if (freq_KHz < 151000) {
++ result2 = (1150 - freq_KHz / 100) * 163 / 33 + 4230;
++ result3 = (1150 - freq_KHz / 100) * 115 / 33 + 1850;
++ result4 = -3676 * (freq_KHz / 1000) / 100 + 6115;
++ } else if (freq_KHz < 257000) {
++ result2 = (1540 - freq_KHz / 100) * 11 / 4 + 3870;
++ result3 = (1540 - freq_KHz / 100) * 205 / 96 + 2100;
++ result4 = -21 * freq_KHz / 1000 + 5084;
++ } else if (freq_KHz < 305000) {
++ result2 = (2620 - freq_KHz / 100) * 5 / 3 + 2770;
++ result3 = (2620 - freq_KHz / 100) * 10 / 7 + 1700;
++ result4 = 650;
++ } else if (freq_KHz < 449000) {
++ result2 = (307 - freq_KHz / 1000) * 82 / 27 + 11270;
++ result3 = (3100 - freq_KHz / 100) * 5 / 3 + 10000;
++ result4 = 134 * freq_KHz / 10000 + 11875;
++ } else {
++ result2 = (307 - freq_KHz / 1000) * 82 / 27 + 11270;
++ result3 = 8400;
++ result4 = 5300;
++ }
++ if (tmp1 > 6) {
++ val1 = result2;
++ val2 = 2900;
++ val = 500;
++ } else if (tmp1 > 0) {
++ val1 = result3;
++ val2 = 2700;
++ val = 500;
++ } else {
++ val1 = result4;
++ val2 = 2700;
++ val = 400;
++ }
++ level = val1 - (val2 * tmp1 + 500 * tmp2 + 3000 * tmp3 -
++ 500 * tmp4 + 3000 * tmp5 + val * tmp6) - 1000;
++ level /= 1000;
++ }
++ return level;
++}
++
++
++/* m88dc2800 operation functions */
++u8 M88DC2000GetLock(struct dvbsky_m88dc2800_state * state)
++{
++ u8 u8ret = 0;
++ if (ReadReg(state, 0x80) < 0x06) {
++ if ((ReadReg(state, 0xdf) & 0x80) == 0x80
++ &&(ReadReg(state, 0x91) & 0x23) == 0x03
++ &&(ReadReg(state, 0x43) & 0x08) == 0x08)
++ u8ret = 1;
++ else
++ u8ret = 0;
++ } else {
++ if ((ReadReg(state, 0x85) & 0x08) == 0x08)
++ u8ret = 1;
++ else
++ u8ret = 0;
++ }
++ dprintk("%s, lock=%d\n", __func__, u8ret);
++ return u8ret;
++}
++
++static int M88DC2000SetTsType(struct dvbsky_m88dc2800_state *state, u8 type)
++{
++ u8 regC2H;
++
++ if (type == 3) {
++ WriteReg(state, 0x84, 0x6A);
++ WriteReg(state, 0xC0, 0x43);
++ WriteReg(state, 0xE2, 0x06);
++ regC2H = ReadReg(state, 0xC2);
++ regC2H &= 0xC0;
++ regC2H |= 0x1B;
++ WriteReg(state, 0xC2, regC2H);
++ WriteReg(state, 0xC1, 0x60); /* common interface */
++ } else if (type == 1) {
++ WriteReg(state, 0x84, 0x6A);
++ WriteReg(state, 0xC0, 0x47); /* serial format */
++ WriteReg(state, 0xE2, 0x02);
++ regC2H = ReadReg(state, 0xC2);
++ regC2H &= 0xC7;
++ WriteReg(state, 0xC2, regC2H);
++ WriteReg(state, 0xC1, 0x00);
++ } else {
++ WriteReg(state, 0x84, 0x6C);
++ WriteReg(state, 0xC0, 0x43); /* parallel format */
++ WriteReg(state, 0xE2, 0x06);
++ regC2H = ReadReg(state, 0xC2);
++ regC2H &= 0xC7;
++ WriteReg(state, 0xC2, regC2H);
++ WriteReg(state, 0xC1, 0x00);
++ }
++ return 0;
++}
++
++static int M88DC2000RegInitial_TC2800(struct dvbsky_m88dc2800_state *state)
++{
++ u8 RegE3H, RegE4H;
++
++ WriteReg(state, 0x00, 0x48);
++ WriteReg(state, 0x01, 0x09);
++ WriteReg(state, 0xFB, 0x0A);
++ WriteReg(state, 0xFC, 0x0B);
++ WriteReg(state, 0x02, 0x0B);
++ WriteReg(state, 0x03, 0x18);
++ WriteReg(state, 0x05, 0x0D);
++ WriteReg(state, 0x36, 0x80);
++ WriteReg(state, 0x43, 0x40);
++ WriteReg(state, 0x55, 0x7A);
++ WriteReg(state, 0x56, 0xD9);
++ WriteReg(state, 0x57, 0xDF);
++ WriteReg(state, 0x58, 0x39);
++ WriteReg(state, 0x5A, 0x00);
++ WriteReg(state, 0x5C, 0x71);
++ WriteReg(state, 0x5D, 0x23);
++ WriteReg(state, 0x86, 0x40);
++ WriteReg(state, 0xF9, 0x08);
++ WriteReg(state, 0x61, 0x40);
++ WriteReg(state, 0x62, 0x0A);
++ WriteReg(state, 0x90, 0x06);
++ WriteReg(state, 0xDE, 0x00);
++ WriteReg(state, 0xA0, 0x03);
++ WriteReg(state, 0xDF, 0x81);
++ WriteReg(state, 0xFA, 0x40);
++ WriteReg(state, 0x37, 0x10);
++ WriteReg(state, 0xF0, 0x40);
++ WriteReg(state, 0xF2, 0x9C);
++ WriteReg(state, 0xF3, 0x40);
++ RegE3H = ReadReg(state, 0xE3);
++ RegE4H = ReadReg(state, 0xE4);
++ if (((RegE3H & 0xC0) == 0x00) && ((RegE4H & 0xC0) == 0x00)) {
++ WriteReg(state, 0x30, 0xFF);
++ WriteReg(state, 0x31, 0x00);
++ WriteReg(state, 0x32, 0x00);
++ WriteReg(state, 0x33, 0x00);
++ WriteReg(state, 0x35, 0x32);
++ WriteReg(state, 0x40, 0x00);
++ WriteReg(state, 0x41, 0x10);
++ WriteReg(state, 0xF1, 0x02);
++ WriteReg(state, 0xF4, 0x04);
++ WriteReg(state, 0xF5, 0x00);
++ WriteReg(state, 0x42, 0x14);
++ WriteReg(state, 0xE1, 0x25);
++ } else if (((RegE3H & 0xC0) == 0x80) && ((RegE4H & 0xC0) == 0x40)) {
++ WriteReg(state, 0x30, 0xFF);
++ WriteReg(state, 0x31, 0x00);
++ WriteReg(state, 0x32, 0x00);
++ WriteReg(state, 0x33, 0x00);
++ WriteReg(state, 0x35, 0x32);
++ WriteReg(state, 0x39, 0x00);
++ WriteReg(state, 0x3A, 0x00);
++ WriteReg(state, 0x40, 0x00);
++ WriteReg(state, 0x41, 0x10);
++ WriteReg(state, 0xF1, 0x00);
++ WriteReg(state, 0xF4, 0x00);
++ WriteReg(state, 0xF5, 0x40);
++ WriteReg(state, 0x42, 0x14);
++ WriteReg(state, 0xE1, 0x25);
++ } else if ((RegE3H == 0x80 || RegE3H == 0x81)
++ && (RegE4H == 0x80 || RegE4H == 0x81)) {
++ WriteReg(state, 0x30, 0xFF);
++ WriteReg(state, 0x31, 0x00);
++ WriteReg(state, 0x32, 0x00);
++ WriteReg(state, 0x33, 0x00);
++ WriteReg(state, 0x35, 0x32);
++ WriteReg(state, 0x39, 0x00);
++ WriteReg(state, 0x3A, 0x00);
++ WriteReg(state, 0xF1, 0x00);
++ WriteReg(state, 0xF4, 0x00);
++ WriteReg(state, 0xF5, 0x40);
++ WriteReg(state, 0x42, 0x24);
++ WriteReg(state, 0xE1, 0x25);
++ WriteReg(state, 0x92, 0x7F);
++ WriteReg(state, 0x93, 0x91);
++ WriteReg(state, 0x95, 0x00);
++ WriteReg(state, 0x2B, 0x33);
++ WriteReg(state, 0x2A, 0x2A);
++ WriteReg(state, 0x2E, 0x80);
++ WriteReg(state, 0x25, 0x25);
++ WriteReg(state, 0x2D, 0xFF);
++ WriteReg(state, 0x26, 0xFF);
++ WriteReg(state, 0x27, 0x00);
++ WriteReg(state, 0x24, 0x25);
++ WriteReg(state, 0xA4, 0xFF);
++ WriteReg(state, 0xA3, 0x0D);
++ } else {
++ WriteReg(state, 0x30, 0xFF);
++ WriteReg(state, 0x31, 0x00);
++ WriteReg(state, 0x32, 0x00);
++ WriteReg(state, 0x33, 0x00);
++ WriteReg(state, 0x35, 0x32);
++ WriteReg(state, 0x39, 0x00);
++ WriteReg(state, 0x3A, 0x00);
++ WriteReg(state, 0xF1, 0x00);
++ WriteReg(state, 0xF4, 0x00);
++ WriteReg(state, 0xF5, 0x40);
++ WriteReg(state, 0x42, 0x24);
++ WriteReg(state, 0xE1, 0x27);
++ WriteReg(state, 0x92, 0x7F);
++ WriteReg(state, 0x93, 0x91);
++ WriteReg(state, 0x95, 0x00);
++ WriteReg(state, 0x2B, 0x33);
++ WriteReg(state, 0x2A, 0x2A);
++ WriteReg(state, 0x2E, 0x80);
++ WriteReg(state, 0x25, 0x25);
++ WriteReg(state, 0x2D, 0xFF);
++ WriteReg(state, 0x26, 0xFF);
++ WriteReg(state, 0x27, 0x00);
++ WriteReg(state, 0x24, 0x25);
++ WriteReg(state, 0xA4, 0xFF);
++ WriteReg(state, 0xA3, 0x10);
++ }
++ WriteReg(state, 0xF6, 0x4E);
++ WriteReg(state, 0xF7, 0x20);
++ WriteReg(state, 0x89, 0x02);
++ WriteReg(state, 0x14, 0x08);
++ WriteReg(state, 0x6F, 0x0D);
++ WriteReg(state, 0x10, 0xFF);
++ WriteReg(state, 0x11, 0x00);
++ WriteReg(state, 0x12, 0x30);
++ WriteReg(state, 0x13, 0x23);
++ WriteReg(state, 0x60, 0x00);
++ WriteReg(state, 0x69, 0x00);
++ WriteReg(state, 0x6A, 0x03);
++ WriteReg(state, 0xE0, 0x75);
++ WriteReg(state, 0x8D, 0x29);
++ WriteReg(state, 0x4E, 0xD8);
++ WriteReg(state, 0x88, 0x80);
++ WriteReg(state, 0x52, 0x79);
++ WriteReg(state, 0x53, 0x03);
++ WriteReg(state, 0x59, 0x30);
++ WriteReg(state, 0x5E, 0x02);
++ WriteReg(state, 0x5F, 0x0F);
++ WriteReg(state, 0x71, 0x03);
++ WriteReg(state, 0x72, 0x12);
++ WriteReg(state, 0x73, 0x12);
++
++ return 0;
++}
++
++static int M88DC2000AutoTSClock_P(struct dvbsky_m88dc2800_state *state, u32 sym,
++ u16 qam)
++{
++ u32 dataRate;
++ u8 clk_div, value;
++ printk(KERN_INFO
++ "m88dc2800: M88DC2000AutoTSClock_P, symrate=%d qam=%d\n",
++ sym, qam);
++ switch (qam) {
++ case 16:
++ dataRate = 4;
++ break;
++ case 32:
++ dataRate = 5;
++ break;
++ case 128:
++ dataRate = 7;
++ break;
++ case 256:
++ dataRate = 8;
++ break;
++ case 64:
++ default:
++ dataRate = 6;
++ break;
++ }
++ dataRate *= sym * 105;
++ dataRate /= 800;
++ if (dataRate <= 4115)
++ clk_div = 0x05;
++ else if (dataRate <= 4800)
++ clk_div = 0x04;
++ else if (dataRate <= 5760)
++ clk_div = 0x03;
++ else if (dataRate <= 7200)
++ clk_div = 0x02;
++ else if (dataRate <= 9600)
++ clk_div = 0x01;
++ else
++ clk_div = 0x00;
++ value = ReadReg(state, 0xC2);
++ value &= 0xc0;
++ value |= clk_div;
++ WriteReg(state, 0xC2, value);
++ return 0;
++}
++
++static int M88DC2000AutoTSClock_C(struct dvbsky_m88dc2800_state *state, u32 sym,
++ u16 qam)
++{
++ u32 dataRate;
++ u8 clk_div, value;
++ printk(KERN_INFO
++ "m88dc2800: M88DC2000AutoTSClock_C, symrate=%d qam=%d\n",
++ sym, qam);
++ switch (qam) {
++ case 16:
++ dataRate = 4;
++ break;
++ case 32:
++ dataRate = 5;
++ break;
++ case 128:
++ dataRate = 7;
++ break;
++ case 256:
++ dataRate = 8;
++ break;
++ case 64:
++ default:
++ dataRate = 6;
++ break;
++ }
++ dataRate *= sym * 105;
++ dataRate /= 800;
++ if (dataRate <= 4115)
++ clk_div = 0x3F;
++ else if (dataRate <= 4800)
++ clk_div = 0x36;
++ else if (dataRate <= 5760)
++ clk_div = 0x2D;
++ else if (dataRate <= 7200)
++ clk_div = 0x24;
++ else if (dataRate <= 9600)
++ clk_div = 0x1B;
++ else
++ clk_div = 0x12;
++ value = ReadReg(state, 0xC2);
++ value &= 0xc0;
++ value |= clk_div;
++ WriteReg(state, 0xC2, value);
++ return 0;
++}
++
++static int M88DC2000SetTxMode(struct dvbsky_m88dc2800_state *state, u8 inverted,
++ u8 j83)
++{
++ u8 value = 0;
++ if (inverted)
++ value |= 0x08; /* spectrum inverted */
++ if (j83)
++ value |= 0x01; /* J83C */
++ WriteReg(state, 0x83, value);
++ return 0;
++}
++
++static int M88DC2000SoftReset(struct dvbsky_m88dc2800_state *state)
++{
++ WriteReg(state, 0x80, 0x01);
++ WriteReg(state, 0x82, 0x00);
++ msleep(1);
++ WriteReg(state, 0x80, 0x00);
++ return 0;
++}
++
++static int M88DC2000SetSym(struct dvbsky_m88dc2800_state *state, u32 sym, u32 xtal)
++{
++ u8 value;
++ u8 reg6FH, reg12H;
++ u64 fValue;
++ u32 dwValue;
++
++ printk(KERN_INFO "%s, sym=%d, xtal=%d\n", __func__, sym, xtal);
++ fValue = 4294967296 * (sym + 10);
++ do_div(fValue, xtal);
++
++ /* fValue = 4294967296 * (sym + 10) / xtal; */
++ dwValue = (u32) fValue;
++ printk(KERN_INFO "%s, fvalue1=%x\n", __func__, dwValue);
++ WriteReg(state, 0x58, (u8) ((dwValue >> 24) & 0xff));
++ WriteReg(state, 0x57, (u8) ((dwValue >> 16) & 0xff));
++ WriteReg(state, 0x56, (u8) ((dwValue >> 8) & 0xff));
++ WriteReg(state, 0x55, (u8) ((dwValue >> 0) & 0xff));
++
++ /* fValue = 2048 * xtal / sym; */
++ fValue = 2048 * xtal;
++ do_div(fValue, sym);
++ dwValue = (u32) fValue;
++ printk(KERN_INFO "%s, fvalue2=%x\n", __func__, dwValue);
++ WriteReg(state, 0x5D, (u8) ((dwValue >> 8) & 0xff));
++ WriteReg(state, 0x5C, (u8) ((dwValue >> 0) & 0xff));
++ value = ReadReg(state, 0x5A);
++ if (((dwValue >> 16) & 0x0001) == 0)
++ value &= 0x7F;
++ else
++ value |= 0x80;
++ WriteReg(state, 0x5A, value);
++ value = ReadReg(state, 0x89);
++ if (sym <= 1800)
++ value |= 0x01;
++ else
++ value &= 0xFE;
++ WriteReg(state, 0x89, value);
++ if (sym >= 6700) {
++ reg6FH = 0x0D;
++ reg12H = 0x30;
++ } else if (sym >= 4000) {
++ fValue = 22 * 4096 / sym;
++ reg6FH = (u8) fValue;
++ reg12H = 0x30;
++ } else if (sym >= 2000) {
++ fValue = 14 * 4096 / sym;
++ reg6FH = (u8) fValue;
++ reg12H = 0x20;
++ } else {
++ fValue = 7 * 4096 / sym;
++ reg6FH = (u8) fValue;
++ reg12H = 0x10;
++ }
++ WriteReg(state, 0x6F, reg6FH);
++ WriteReg(state, 0x12, reg12H);
++ if (((ReadReg(state, 0xE3) & 0x80) == 0x80)
++ && ((ReadReg(state, 0xE4) & 0x80) == 0x80)) {
++ if (sym < 3000) {
++ WriteReg(state, 0x6C, 0x16);
++ WriteReg(state, 0x6D, 0x10);
++ WriteReg(state, 0x6E, 0x18);
++ } else {
++ WriteReg(state, 0x6C, 0x14);
++ WriteReg(state, 0x6D, 0x0E);
++ WriteReg(state, 0x6E, 0x36);
++ }
++ } else {
++ WriteReg(state, 0x6C, 0x16);
++ WriteReg(state, 0x6D, 0x10);
++ WriteReg(state, 0x6E, 0x18);
++ }
++ return 0;
++}
++
++static int M88DC2000SetQAM(struct dvbsky_m88dc2800_state *state, u16 qam)
++{
++ u8 reg00H, reg4AH, regC2H, reg44H, reg4CH, reg4DH, reg74H, value;
++ u8 reg8BH, reg8EH;
++ printk(KERN_INFO "%s, qam=%d\n", __func__, qam);
++ regC2H = ReadReg(state, 0xC2);
++ regC2H &= 0xF8;
++ switch (qam) {
++ case 16: /* 16 QAM */
++ reg00H = 0x08;
++ reg4AH = 0x0F;
++ regC2H |= 0x02;
++ reg44H = 0xAA;
++ reg4CH = 0x0C;
++ reg4DH = 0xF7;
++ reg74H = 0x0E;
++ if (((ReadReg(state, 0xE3) & 0x80) == 0x80)
++ && ((ReadReg(state, 0xE4) & 0x80) == 0x80)) {
++ reg8BH = 0x5A;
++ reg8EH = 0xBD;
++ } else {
++ reg8BH = 0x5B;
++ reg8EH = 0x9D;
++ }
++ WriteReg(state, 0x6E, 0x18);
++ break;
++ case 32: /* 32 QAM */
++ reg00H = 0x18;
++ reg4AH = 0xFB;
++ regC2H |= 0x02;
++ reg44H = 0xAA;
++ reg4CH = 0x0C;
++ reg4DH = 0xF7;
++ reg74H = 0x0E;
++ if (((ReadReg(state, 0xE3) & 0x80) == 0x80)
++ && ((ReadReg(state, 0xE4) & 0x80) == 0x80)) {
++ reg8BH = 0x5A;
++ reg8EH = 0xBD;
++ } else {
++ reg8BH = 0x5B;
++ reg8EH = 0x9D;
++ }
++ WriteReg(state, 0x6E, 0x18);
++ break;
++ case 64: /* 64 QAM */
++ reg00H = 0x48;
++ reg4AH = 0xCD;
++ regC2H |= 0x02;
++ reg44H = 0xAA;
++ reg4CH = 0x0C;
++ reg4DH = 0xF7;
++ reg74H = 0x0E;
++ if (((ReadReg(state, 0xE3) & 0x80) == 0x80)
++ && ((ReadReg(state, 0xE4) & 0x80) == 0x80)) {
++ reg8BH = 0x5A;
++ reg8EH = 0xBD;
++ } else {
++ reg8BH = 0x5B;
++ reg8EH = 0x9D;
++ }
++ break;
++ case 128: /* 128 QAM */
++ reg00H = 0x28;
++ reg4AH = 0xFF;
++ regC2H |= 0x02;
++ reg44H = 0xA9;
++ reg4CH = 0x08;
++ reg4DH = 0xF5;
++ reg74H = 0x0E;
++ reg8BH = 0x5B;
++ reg8EH = 0x9D;
++ break;
++ case 256: /* 256 QAM */
++ reg00H = 0x38;
++ reg4AH = 0xCD;
++ if (((ReadReg(state, 0xE3) & 0x80) == 0x80)
++ && ((ReadReg(state, 0xE4) & 0x80) == 0x80)) {
++ regC2H |= 0x02;
++ } else {
++ regC2H |= 0x01;
++ }
++ reg44H = 0xA9;
++ reg4CH = 0x08;
++ reg4DH = 0xF5;
++ reg74H = 0x0E;
++ reg8BH = 0x5B;
++ reg8EH = 0x9D;
++ break;
++ default: /* 64 QAM */
++ reg00H = 0x48;
++ reg4AH = 0xCD;
++ regC2H |= 0x02;
++ reg44H = 0xAA;
++ reg4CH = 0x0C;
++ reg4DH = 0xF7;
++ reg74H = 0x0E;
++ if (((ReadReg(state, 0xE3) & 0x80) == 0x80)
++ && ((ReadReg(state, 0xE4) & 0x80) == 0x80)) {
++ reg8BH = 0x5A;
++ reg8EH = 0xBD;
++ } else {
++ reg8BH = 0x5B;
++ reg8EH = 0x9D;
++ }
++ break;
++ }
++ WriteReg(state, 0x00, reg00H);
++ value = ReadReg(state, 0x88);
++ value |= 0x08;
++ WriteReg(state, 0x88, value);
++ WriteReg(state, 0x4B, 0xFF);
++ WriteReg(state, 0x4A, reg4AH);
++ value &= 0xF7;
++ WriteReg(state, 0x88, value);
++ WriteReg(state, 0xC2, regC2H);
++ WriteReg(state, 0x44, reg44H);
++ WriteReg(state, 0x4C, reg4CH);
++ WriteReg(state, 0x4D, reg4DH);
++ WriteReg(state, 0x74, reg74H);
++ WriteReg(state, 0x8B, reg8BH);
++ WriteReg(state, 0x8E, reg8EH);
++ return 0;
++}
++
++static int M88DC2000WriteTuner_TC2800(struct dvbsky_m88dc2800_state *state,
++ u32 freq_KHz)
++{
++ printk(KERN_INFO "%s, freq=%d KHz\n", __func__, freq_KHz);
++ return mt_fe_tn_set_freq_tc2800(state, freq_KHz);
++}
++
++static int dvbsky_m88dc2800_init(struct dvb_frontend *fe)
++{
++ dprintk("%s()\n", __func__);
++ return 0;
++}
++
++static int dvbsky_m88dc2800_set_parameters(struct dvb_frontend *fe)
++{
++ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
++ u8 is_annex_c, is_update;
++ u16 temp_qam;
++ s32 waiting_time;
++ struct dvbsky_m88dc2800_state *state = fe->demodulator_priv;
++
++ is_annex_c = c->delivery_system == SYS_DVBC_ANNEX_C ? 1 : 0;
++
++ switch (c->modulation) {
++ case QAM_16:
++ temp_qam = 16;
++ break;
++ case QAM_32:
++ temp_qam = 32;
++ break;
++ case QAM_128:
++ temp_qam = 128;
++ break;
++ case QAM_256:
++ temp_qam = 256;
++ break;
++ default: /* QAM_64 */
++ temp_qam = 64;
++ break;
++ }
++
++ state->inverted = c->inversion == INVERSION_ON ? 1 : 0;
++
++ printk(KERN_INFO
++ "m88dc2800: state, freq=%d qam=%d sym=%d inverted=%d xtal=%d\n",
++ state->freq, state->qam, state->sym, state->inverted,
++ state->xtal);
++ printk(KERN_INFO
++ "m88dc2800: set frequency to %d qam=%d symrate=%d annex-c=%d\n",
++ c->frequency, temp_qam, c->symbol_rate, is_annex_c);
++
++ is_update = 0;
++ WriteReg(state, 0x80, 0x01);
++ if (c->frequency != state->freq) {
++ M88DC2000WriteTuner_TC2800(state, c->frequency / 1000);
++ state->freq = c->frequency;
++ }
++ if (c->symbol_rate != state->sym) {
++ M88DC2000SetSym(state, c->symbol_rate / 1000, state->xtal);
++ state->sym = c->symbol_rate;
++ is_update = 1;
++ }
++ if (temp_qam != state->qam) {
++ M88DC2000SetQAM(state, temp_qam);
++ state->qam = temp_qam;
++ is_update = 1;
++ }
++
++ if (is_update != 0) {
++ if (state->config->ts_mode == 3)
++ M88DC2000AutoTSClock_C(state, state->sym / 1000,
++ temp_qam);
++ else
++ M88DC2000AutoTSClock_P(state, state->sym / 1000,
++ temp_qam);
++ }
++
++ M88DC2000SetTxMode(state, state->inverted, is_annex_c);
++ M88DC2000SoftReset(state);
++ if (((ReadReg(state, 0xE3) & 0x80) == 0x80)
++ && ((ReadReg(state, 0xE4) & 0x80) == 0x80))
++ waiting_time = 800;
++ else
++ waiting_time = 500;
++ while (waiting_time > 0) {
++ msleep(50);
++ waiting_time -= 50;
++ if (M88DC2000GetLock(state))
++ return 0;
++ }
++
++ state->inverted = (state->inverted != 0) ? 0 : 1;
++ M88DC2000SetTxMode(state, state->inverted, is_annex_c);
++ M88DC2000SoftReset(state);
++ if (((ReadReg(state, 0xE3) & 0x80) == 0x80) &&
++ ((ReadReg(state, 0xE4) & 0x80) == 0x80))
++ waiting_time = 800;
++ else
++ waiting_time = 500;
++ while (waiting_time > 0) {
++ msleep(50);
++ waiting_time -= 50;
++ if (M88DC2000GetLock(state))
++ return 0;
++ }
++ return 0;
++}
++
++static int dvbsky_m88dc2800_read_status(struct dvb_frontend *fe,
++ fe_status_t * status)
++{
++ struct dvbsky_m88dc2800_state *state = fe->demodulator_priv;
++ *status = 0;
++
++ if (M88DC2000GetLock(state)) {
++ *status = FE_HAS_SIGNAL | FE_HAS_CARRIER
++ |FE_HAS_SYNC | FE_HAS_VITERBI | FE_HAS_LOCK;
++ }
++ return 0;
++}
++
++static int dvbsky_m88dc2800_read_ber(struct dvb_frontend *fe, u32 * ber)
++{
++ struct dvbsky_m88dc2800_state *state = fe->demodulator_priv;
++ u16 tmp;
++
++ if (M88DC2000GetLock(state) == 0) {
++ state->ber = 0;
++ } else if ((ReadReg(state, 0xA0) & 0x80) != 0x80) {
++ tmp = ReadReg(state, 0xA2) << 8;
++ tmp += ReadReg(state, 0xA1);
++ state->ber = tmp;
++ WriteReg(state, 0xA0, 0x05);
++ WriteReg(state, 0xA0, 0x85);
++ }
++ *ber = state->ber;
++ return 0;
++}
++
++static int dvbsky_m88dc2800_read_signal_strength(struct dvb_frontend *fe,
++ u16 * strength)
++{
++ struct dvbsky_m88dc2800_state *state = fe->demodulator_priv;
++ s16 tuner_strength;
++
++ tuner_strength = mt_fe_tn_get_signal_strength_tc2800(state);
++ *strength = tuner_strength < -107 ? 0 : tuner_strength + 107;
++
++ return 0;
++}
++
++static int dvbsky_m88dc2800_read_snr(struct dvb_frontend *fe, u16 * snr)
++{
++ static const u32 mes_log[] = {
++ 0, 3010, 4771, 6021, 6990, 7781, 8451, 9031, 9542, 10000,
++ 10414, 10792, 11139, 11461, 11761, 12041, 12304, 12553, 12788,
++ 13010, 13222, 13424, 13617, 13802, 13979, 14150, 14314, 14472,
++ 14624, 14771, 14914, 15052, 15185, 15315, 15441, 15563, 15682,
++ 15798, 15911, 16021, 16128, 16232, 16335, 16435, 16532, 16628,
++ 16721, 16812, 16902, 16990, 17076, 17160, 17243, 17324, 17404,
++ 17482, 17559, 17634, 17709, 17782, 17853, 17924, 17993, 18062,
++ 18129, 18195, 18261, 18325, 18388, 18451, 18513, 18573, 18633,
++ 18692, 18751, 18808, 18865, 18921, 18976, 19031
++ };
++ struct dvbsky_m88dc2800_state *state = fe->demodulator_priv;
++ u8 i;
++ u32 _snr, mse;
++
++ if ((ReadReg(state, 0x91) & 0x23) != 0x03) {
++ *snr = 0;
++ return 0;
++ }
++ mse = 0;
++ for (i = 0; i < 30; i++) {
++ mse += (ReadReg(state, 0x08) << 8) + ReadReg(state, 0x07);
++ }
++ mse /= 30;
++ if (mse > 80)
++ mse = 80;
++ switch (state->qam) {
++ case 16:
++ _snr = 34080;
++ break; /* 16QAM */
++ case 32:
++ _snr = 37600;
++ break; /* 32QAM */
++ case 64:
++ _snr = 40310;
++ break; /* 64QAM */
++ case 128:
++ _snr = 43720;
++ break; /* 128QAM */
++ case 256:
++ _snr = 46390;
++ break; /* 256QAM */
++ default:
++ _snr = 40310;
++ break;
++ }
++ _snr -= mes_log[mse - 1]; /* C - 10*log10(MSE) */
++ _snr /= 1000;
++ if (_snr > 0xff)
++ _snr = 0xff;
++ *snr = _snr;
++ return 0;
++}
++
++static int dvbsky_m88dc2800_read_ucblocks(struct dvb_frontend *fe, u32 * ucblocks)
++{
++ struct dvbsky_m88dc2800_state *state = fe->demodulator_priv;
++ u8 u8Value;
++
++ u8Value = ReadReg(state, 0xdf);
++ u8Value |= 0x02; /* Hold */
++ WriteReg(state, 0xdf, u8Value);
++
++ *ucblocks = ReadReg(state, 0xd5);
++ *ucblocks = (*ucblocks << 8) | ReadReg(state, 0xd4);
++
++ u8Value &= 0xfe; /* Clear */
++ WriteReg(state, 0xdf, u8Value);
++ u8Value &= 0xfc; /* Update */
++ u8Value |= 0x01;
++ WriteReg(state, 0xdf, u8Value);
++
++ return 0;
++}
++
++static int dvbsky_m88dc2800_sleep(struct dvb_frontend *fe)
++{
++ struct dvbsky_m88dc2800_state *state = fe->demodulator_priv;
++
++ mt_fe_tn_sleep_tc2800(state);
++ state->freq = 0;
++
++ return 0;
++}
++
++static void dvbsky_m88dc2800_release(struct dvb_frontend *fe)
++{
++ struct dvbsky_m88dc2800_state *state = fe->demodulator_priv;
++ kfree(state);
++}
++
++static struct dvb_frontend_ops dvbsky_m88dc2800_ops;
++
++struct dvb_frontend *dvbsky_m88dc2800_attach(const struct dvbsky_m88dc2800_config
++ *config, struct i2c_adapter *i2c)
++{
++ struct dvbsky_m88dc2800_state *state = NULL;
++
++ /* allocate memory for the internal state */
++ state = kzalloc(sizeof(struct dvbsky_m88dc2800_state), GFP_KERNEL);
++ if (state == NULL)
++ goto error;
++
++ /* setup the state */
++ state->config = config;
++ state->i2c = i2c;
++ state->xtal = 28800;
++
++ WriteReg(state, 0x80, 0x01);
++ M88DC2000RegInitial_TC2800(state);
++ M88DC2000SetTsType(state, state->config->ts_mode);
++ mt_fe_tn_init_tc2800(state);
++
++ /* create dvb_frontend */
++ memcpy(&state->frontend.ops, &dvbsky_m88dc2800_ops,
++ sizeof(struct dvb_frontend_ops));
++ state->frontend.demodulator_priv = state;
++ return &state->frontend;
++
++ error:
++ kfree(state);
++ return NULL;
++}
++
++EXPORT_SYMBOL(dvbsky_m88dc2800_attach);
++
++static struct dvb_frontend_ops dvbsky_m88dc2800_ops = {
++ .delsys = {SYS_DVBC_ANNEX_A, SYS_DVBC_ANNEX_C},
++ .info = {
++ .name = "Montage M88DC2800 DVB-C",
++ .frequency_stepsize = 62500,
++ .frequency_min = 48000000,
++ .frequency_max = 870000000,
++ .symbol_rate_min = 870000,
++ .symbol_rate_max = 9000000,
++ .caps = FE_CAN_QAM_16 | FE_CAN_QAM_32 | FE_CAN_QAM_64 |
++ FE_CAN_QAM_128 | FE_CAN_QAM_256 | FE_CAN_FEC_AUTO
++ },
++ .release = dvbsky_m88dc2800_release,
++ .init = dvbsky_m88dc2800_init,
++ .sleep = dvbsky_m88dc2800_sleep,
++ .set_frontend = dvbsky_m88dc2800_set_parameters,
++ .read_status = dvbsky_m88dc2800_read_status,
++ .read_ber = dvbsky_m88dc2800_read_ber,
++ .read_signal_strength = dvbsky_m88dc2800_read_signal_strength,
++ .read_snr = dvbsky_m88dc2800_read_snr,
++ .read_ucblocks = dvbsky_m88dc2800_read_ucblocks,
++};
++
++MODULE_DESCRIPTION("Montage DVB-C demodulator driver");
++MODULE_AUTHOR("Max Nibble <nibble.max@gmail.com>");
++MODULE_LICENSE("GPL");
++MODULE_VERSION("1.00");
+diff -Nur linux-3.14.36/drivers/media/dvb-frontends/dvbsky_m88dc2800.h linux-openelec/drivers/media/dvb-frontends/dvbsky_m88dc2800.h
+--- linux-3.14.36/drivers/media/dvb-frontends/dvbsky_m88dc2800.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/media/dvb-frontends/dvbsky_m88dc2800.h 2015-07-24 18:03:30.116842002 -0500
+@@ -0,0 +1,44 @@
++/*
++ M88DC2800/M88TC2800 - DVB-C demodulator and tuner from Montage
++
++ Copyright (C) 2012 Max Nibble <nibble.max@gmail.com>
++ Copyright (C) 2011 Montage Technology - www.montage-tech.com
++
++ This program is free software; you can redistribute it and/or modify
++ it under the terms of the GNU General Public License as published by
++ the Free Software Foundation; either version 2 of the License, or
++ (at your option) any later version.
++
++ This program is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ GNU General Public License for more details.
++
++ You should have received a copy of the GNU General Public License
++ along with this program; if not, write to the Free Software
++ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*/
++
++#ifndef dvbsky_m88dc2800_H
++#define dvbsky_m88dc2800_H
++
++#include <linux/kconfig.h>
++#include <linux/dvb/frontend.h>
++
++struct dvbsky_m88dc2800_config {
++ u8 demod_address;
++ u8 ts_mode;
++};
++
++#if IS_ENABLED(CONFIG_DVB_DVBSKY_M88DC2800)
++extern struct dvb_frontend* dvbsky_m88dc2800_attach(const struct dvbsky_m88dc2800_config* config,
++ struct i2c_adapter* i2c);
++#else
++static inline struct dvb_frontend* dvbsky_m88dc2800_attach(const struct dvbsky_m88dc2800_config* config,
++ struct i2c_adapter* i2c)
++{
++ printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
++ return NULL;
++}
++#endif /* CONFIG_DVB_DVBSKY_M88DC2800 */
++#endif /* dvbsky_m88dc2800_H */
+diff -Nur linux-3.14.36/drivers/media/dvb-frontends/dvbsky_m88ds3103.c linux-openelec/drivers/media/dvb-frontends/dvbsky_m88ds3103.c
+--- linux-3.14.36/drivers/media/dvb-frontends/dvbsky_m88ds3103.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/media/dvb-frontends/dvbsky_m88ds3103.c 2015-07-24 18:03:30.120842002 -0500
+@@ -0,0 +1,1707 @@
++/*
++ Montage Technology M88DS3103/M88TS2022 - DVBS/S2 Satellite demod/tuner driver
++
++ Copyright (C) 2011 Max nibble<nibble.max@gmail.com>
++ Copyright (C) 2010 Montage Technology<www.montage-tech.com>
++ Copyright (C) 2009 Konstantin Dimitrov.
++
++ This program is free software; you can redistribute it and/or modify
++ it under the terms of the GNU General Public License as published by
++ the Free Software Foundation; either version 2 of the License, or
++ (at your option) any later version.
++
++ This program is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ GNU General Public License for more details.
++
++ You should have received a copy of the GNU General Public License
++ along with this program; if not, write to the Free Software
++ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ */
++
++#include <linux/slab.h>
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/moduleparam.h>
++#include <linux/init.h>
++#include <linux/firmware.h>
++
++#include "dvb_frontend.h"
++#include "dvbsky_m88ds3103.h"
++#include "dvbsky_m88ds3103_priv.h"
++
++static int debug;
++module_param(debug, int, 0644);
++MODULE_PARM_DESC(debug, "Activates frontend debugging (default:0)");
++
++#define dprintk(args...) \
++ do { \
++ if (debug) \
++ printk(KERN_INFO "m88ds3103: " args); \
++ } while (0)
++
++/*demod register operations.*/
++static int dvbsky_m88ds3103_writereg(struct dvbsky_m88ds3103_state *state, int reg, int data)
++{
++ u8 buf[] = { reg, data };
++ struct i2c_msg msg = { .addr = state->config->demod_address,
++ .flags = 0, .buf = buf, .len = 2 };
++ int err;
++
++ if (debug > 1)
++ printk("m88ds3103: %s: write reg 0x%02x, value 0x%02x\n",
++ __func__, reg, data);
++
++ err = i2c_transfer(state->i2c, &msg, 1);
++ if (err != 1) {
++ printk(KERN_ERR "%s: writereg error(err == %i, reg == 0x%02x,"
++ " value == 0x%02x)\n", __func__, err, reg, data);
++ return -EREMOTEIO;
++ }
++ return 0;
++}
++
++static int dvbsky_m88ds3103_readreg(struct dvbsky_m88ds3103_state *state, u8 reg)
++{
++ int ret;
++ u8 b0[] = { reg };
++ u8 b1[] = { 0 };
++ struct i2c_msg msg[] = {
++ { .addr = state->config->demod_address, .flags = 0,
++ .buf = b0, .len = 1 },
++ { .addr = state->config->demod_address, .flags = I2C_M_RD,
++ .buf = b1, .len = 1 }
++ };
++ ret = i2c_transfer(state->i2c, msg, 2);
++
++ if (ret != 2) {
++ printk(KERN_ERR "%s: reg=0x%x (error=%d)\n",
++ __func__, reg, ret);
++ return ret;
++ }
++
++ if (debug > 1)
++ printk(KERN_INFO "m88ds3103: read reg 0x%02x, value 0x%02x\n",
++ reg, b1[0]);
++
++ return b1[0];
++}
++
++/*tuner register operations.*/
++static int dvbsky_m88ds3103_tuner_writereg(struct dvbsky_m88ds3103_state *state, int reg, int data)
++{
++ u8 buf[] = { reg, data };
++ struct i2c_msg msg = { .addr = 0x60,
++ .flags = 0, .buf = buf, .len = 2 };
++ int err;
++
++ dvbsky_m88ds3103_writereg(state, 0x03, 0x11);
++ err = i2c_transfer(state->i2c, &msg, 1);
++
++ if (err != 1) {
++ printk("%s: writereg error(err == %i, reg == 0x%02x,"
++ " value == 0x%02x)\n", __func__, err, reg, data);
++ return -EREMOTEIO;
++ }
++
++ return 0;
++}
++
++static int dvbsky_m88ds3103_tuner_readreg(struct dvbsky_m88ds3103_state *state, u8 reg)
++{
++ int ret;
++ u8 b0[] = { reg };
++ u8 b1[] = { 0 };
++ struct i2c_msg msg[] = {
++ { .addr = 0x60, .flags = 0,
++ .buf = b0, .len = 1 },
++ { .addr = 0x60, .flags = I2C_M_RD,
++ .buf = b1, .len = 1 }
++ };
++
++ dvbsky_m88ds3103_writereg(state, 0x03, 0x11);
++ ret = i2c_transfer(state->i2c, msg, 2);
++
++ if (ret != 2) {
++ printk(KERN_ERR "%s: reg=0x%x(error=%d)\n", __func__, reg, ret);
++ return ret;
++ }
++
++ return b1[0];
++}
++
++/* Bulk demod I2C write, for firmware download. */
++static int dvbsky_m88ds3103_writeregN(struct dvbsky_m88ds3103_state *state, int reg,
++ const u8 *data, u16 len)
++{
++ int ret = -EREMOTEIO;
++ struct i2c_msg msg;
++ u8 *buf;
++
++ buf = kmalloc(len + 1, GFP_KERNEL);
++ if (buf == NULL) {
++ printk("Unable to kmalloc\n");
++ ret = -ENOMEM;
++ goto error;
++ }
++
++ *(buf) = reg;
++ memcpy(buf + 1, data, len);
++
++ msg.addr = state->config->demod_address;
++ msg.flags = 0;
++ msg.buf = buf;
++ msg.len = len + 1;
++
++ if (debug > 1)
++ printk(KERN_INFO "m88ds3103: %s: write regN 0x%02x, len = %d\n",
++ __func__, reg, len);
++
++ ret = i2c_transfer(state->i2c, &msg, 1);
++ if (ret != 1) {
++ printk(KERN_ERR "%s: writereg error(err == %i, reg == 0x%02x\n",
++ __func__, ret, reg);
++ ret = -EREMOTEIO;
++ }
++
++error:
++ kfree(buf);
++
++ return ret;
++}
++
++static int dvbsky_m88ds3103_load_firmware(struct dvb_frontend *fe)
++{
++ struct dvbsky_m88ds3103_state *state = fe->demodulator_priv;
++ const struct firmware *fw;
++ int i, ret = 0;
++
++ dprintk("%s()\n", __func__);
++
++ if (state->skip_fw_load)
++ return 0;
++ /* Load firmware */
++ /* request the firmware, this will block until someone uploads it */
++ if(state->demod_id == DS3000_ID){
++ printk(KERN_INFO "%s: Waiting for firmware upload (%s)...\n", __func__,
++ DS3000_DEFAULT_FIRMWARE);
++ ret = request_firmware(&fw, DS3000_DEFAULT_FIRMWARE,
++ state->i2c->dev.parent);
++ }else if(state->demod_id == DS3103_ID){
++ printk(KERN_INFO "%s: Waiting for firmware upload (%s)...\n", __func__,
++ DS3103_DEFAULT_FIRMWARE);
++ ret = request_firmware(&fw, DS3103_DEFAULT_FIRMWARE,
++ state->i2c->dev.parent);
++ }
++
++ printk(KERN_INFO "%s: Waiting for firmware upload(2)...\n", __func__);
++ if (ret) {
++ printk(KERN_ERR "%s: No firmware uploaded (timeout or file not "
++ "found?)\n", __func__);
++ return ret;
++ }
++
++ /* Make sure we don't recurse back through here during loading */
++ state->skip_fw_load = 1;
++
++ dprintk("Firmware is %zu bytes (%02x %02x .. %02x %02x)\n",
++ fw->size,
++ fw->data[0],
++ fw->data[1],
++ fw->data[fw->size - 2],
++ fw->data[fw->size - 1]);
++
++ /* stop internal mcu. */
++ dvbsky_m88ds3103_writereg(state, 0xb2, 0x01);
++ /* split firmware to download.*/
++ for(i = 0; i < FW_DOWN_LOOP; i++){
++ ret = dvbsky_m88ds3103_writeregN(state, 0xb0, &(fw->data[FW_DOWN_SIZE*i]), FW_DOWN_SIZE);
++ if(ret != 1) break;
++ }
++ /* start internal mcu. */
++ if(ret == 1)
++ dvbsky_m88ds3103_writereg(state, 0xb2, 0x00);
++
++ release_firmware(fw);
++
++ dprintk("%s: Firmware upload %s\n", __func__,
++ ret == 1 ? "complete" : "failed");
++
++ if(ret == 1) ret = 0;
++
++ /* Ensure firmware is always loaded if required */
++ state->skip_fw_load = 0;
++
++ return ret;
++}
++
++
++static int dvbsky_m88ds3103_set_voltage(struct dvb_frontend *fe, fe_sec_voltage_t voltage)
++{
++ struct dvbsky_m88ds3103_state *state = fe->demodulator_priv;
++ u8 data;
++
++ dprintk("%s(%d)\n", __func__, voltage);
++
++ dprintk("m88ds3103:pin_ctrl = (%02x)\n", state->config->pin_ctrl);
++
++ if(state->config->set_voltage)
++ state->config->set_voltage(fe, voltage);
++
++ data = dvbsky_m88ds3103_readreg(state, 0xa2);
++
++ if(state->config->pin_ctrl & 0x80){ /*If control pin is assigned.*/
++ data &= ~0x03; /* bit0 V/H, bit1 off/on */
++ if(state->config->pin_ctrl & 0x02)
++ data |= 0x02;
++
++ switch (voltage) {
++ case SEC_VOLTAGE_18:
++ if((state->config->pin_ctrl & 0x01) == 0)
++ data |= 0x01;
++ break;
++ case SEC_VOLTAGE_13:
++ if(state->config->pin_ctrl & 0x01)
++ data |= 0x01;
++ break;
++ case SEC_VOLTAGE_OFF:
++ if(state->config->pin_ctrl & 0x02)
++ data &= ~0x02;
++ else
++ data |= 0x02;
++ break;
++ }
++ }
++
++ dvbsky_m88ds3103_writereg(state, 0xa2, data);
++
++ return 0;
++}
++
++static int dvbsky_m88ds3103_read_status(struct dvb_frontend *fe, fe_status_t* status)
++{
++ struct dvbsky_m88ds3103_state *state = fe->demodulator_priv;
++ int lock = 0;
++
++ *status = 0;
++
++ switch (state->delivery_system){
++ case SYS_DVBS:
++ lock = dvbsky_m88ds3103_readreg(state, 0xd1);
++ dprintk("%s: SYS_DVBS status=%x.\n", __func__, lock);
++
++ if ((lock & 0x07) == 0x07){
++ /*if((dvbsky_m88ds3103_readreg(state, 0x0d) & 0x07) == 0x07)*/
++ *status = FE_HAS_SIGNAL | FE_HAS_CARRIER
++ | FE_HAS_VITERBI | FE_HAS_SYNC | FE_HAS_LOCK;
++
++ }
++ break;
++ case SYS_DVBS2:
++ lock = dvbsky_m88ds3103_readreg(state, 0x0d);
++ dprintk("%s: SYS_DVBS2 status=%x.\n", __func__, lock);
++
++ if ((lock & 0x8f) == 0x8f)
++ *status = FE_HAS_SIGNAL | FE_HAS_CARRIER
++ | FE_HAS_VITERBI | FE_HAS_SYNC | FE_HAS_LOCK;
++
++ break;
++ default:
++ break;
++ }
++
++ return 0;
++}
++
++static int dvbsky_m88ds3103_read_ber(struct dvb_frontend *fe, u32* ber)
++{
++ struct dvbsky_m88ds3103_state *state = fe->demodulator_priv;
++ u8 tmp1, tmp2, tmp3;
++ u32 ldpc_frame_cnt, pre_err_packags, code_rate_fac = 0;
++
++ dprintk("%s()\n", __func__);
++
++ switch (state->delivery_system) {
++ case SYS_DVBS:
++ dvbsky_m88ds3103_writereg(state, 0xf9, 0x04);
++ tmp3 = dvbsky_m88ds3103_readreg(state, 0xf8);
++ if ((tmp3&0x10) == 0){
++ tmp1 = dvbsky_m88ds3103_readreg(state, 0xf7);
++ tmp2 = dvbsky_m88ds3103_readreg(state, 0xf6);
++ tmp3 |= 0x10;
++ dvbsky_m88ds3103_writereg(state, 0xf8, tmp3);
++ state->preBer = (tmp1<<8) | tmp2;
++ }
++ break;
++ case SYS_DVBS2:
++ tmp1 = dvbsky_m88ds3103_readreg(state, 0x7e) & 0x0f;
++ switch(tmp1){
++ case 0: code_rate_fac = 16008 - 80; break;
++ case 1: code_rate_fac = 21408 - 80; break;
++ case 2: code_rate_fac = 25728 - 80; break;
++ case 3: code_rate_fac = 32208 - 80; break;
++ case 4: code_rate_fac = 38688 - 80; break;
++ case 5: code_rate_fac = 43040 - 80; break;
++ case 6: code_rate_fac = 48408 - 80; break;
++ case 7: code_rate_fac = 51648 - 80; break;
++ case 8: code_rate_fac = 53840 - 80; break;
++ case 9: code_rate_fac = 57472 - 80; break;
++ case 10: code_rate_fac = 58192 - 80; break;
++ }
++
++ tmp1 = dvbsky_m88ds3103_readreg(state, 0xd7) & 0xff;
++ tmp2 = dvbsky_m88ds3103_readreg(state, 0xd6) & 0xff;
++ tmp3 = dvbsky_m88ds3103_readreg(state, 0xd5) & 0xff;
++ ldpc_frame_cnt = (tmp1 << 16) | (tmp2 << 8) | tmp3;
++
++ tmp1 = dvbsky_m88ds3103_readreg(state, 0xf8) & 0xff;
++ tmp2 = dvbsky_m88ds3103_readreg(state, 0xf7) & 0xff;
++ pre_err_packags = tmp1<<8 | tmp2;
++
++ if (ldpc_frame_cnt > 1000){
++ dvbsky_m88ds3103_writereg(state, 0xd1, 0x01);
++ dvbsky_m88ds3103_writereg(state, 0xf9, 0x01);
++ dvbsky_m88ds3103_writereg(state, 0xf9, 0x00);
++ dvbsky_m88ds3103_writereg(state, 0xd1, 0x00);
++ state->preBer = pre_err_packags;
++ }
++ break;
++ default:
++ break;
++ }
++ *ber = state->preBer;
++
++ return 0;
++}
++
++static int dvbsky_m88ds3103_read_signal_strength(struct dvb_frontend *fe,
++ u16 *signal_strength)
++{
++ struct dvbsky_m88ds3103_state *state = fe->demodulator_priv;
++ u16 gain;
++ u8 gain1, gain2, gain3 = 0;
++
++ dprintk("%s()\n", __func__);
++
++ gain1 = dvbsky_m88ds3103_tuner_readreg(state, 0x3d) & 0x1f;
++ dprintk("%s: gain1 = 0x%02x \n", __func__, gain1);
++
++ if (gain1 > 15) gain1 = 15;
++ gain2 = dvbsky_m88ds3103_tuner_readreg(state, 0x21) & 0x1f;
++ dprintk("%s: gain2 = 0x%02x \n", __func__, gain2);
++
++ if(state->tuner_id == TS2022_ID){
++ gain3 = (dvbsky_m88ds3103_tuner_readreg(state, 0x66)>>3) & 0x07;
++ dprintk("%s: gain3 = 0x%02x \n", __func__, gain3);
++
++ if (gain2 > 16) gain2 = 16;
++ if (gain2 < 2) gain2 = 2;
++ if (gain3 > 6) gain3 = 6;
++ }else{
++ if (gain2 > 13) gain2 = 13;
++ gain3 = 0;
++ }
++
++ gain = gain1*23 + gain2*35 + gain3*29;
++ *signal_strength = 60000 - gain*55;
++
++ return 0;
++}
++
++
++static int dvbsky_m88ds3103_read_snr(struct dvb_frontend *fe, u16 *p_snr)
++{
++ struct dvbsky_m88ds3103_state *state = fe->demodulator_priv;
++ u8 val, npow1, npow2, spow1, cnt;
++ u16 tmp, snr;
++ u32 npow, spow, snr_total;
++ static const u16 mes_log10[] ={
++ 0, 3010, 4771, 6021, 6990, 7781, 8451, 9031, 9542, 10000,
++ 10414, 10792, 11139, 11461, 11761, 12041, 12304, 12553, 12788, 13010,
++ 13222, 13424, 13617, 13802, 13979, 14150, 14314, 14472, 14624, 14771,
++ 14914, 15052, 15185, 15315, 15441, 15563, 15682, 15798, 15911, 16021,
++ 16128, 16232, 16335, 16435, 16532, 16628, 16721, 16812, 16902, 16990,
++ 17076, 17160, 17243, 17324, 17404, 17482, 17559, 17634, 17709, 17782,
++ 17853, 17924, 17993, 18062, 18129, 18195, 18261, 18325, 18388, 18451,
++ 18513, 18573, 18633, 18692, 18751, 18808, 18865, 18921, 18976, 19031
++ };
++ static const u16 mes_loge[] ={
++ 0, 6931, 10986, 13863, 16094, 17918, 19459, 20794, 21972, 23026,
++ 23979, 24849, 25649, 26391, 27081, 27726, 28332, 28904, 29444, 29957,
++ 30445, 30910, 31355, 31781, 32189, 32581, 32958, 33322, 33673, 34012,
++ 34340, 34657,
++ };
++
++ dprintk("%s()\n", __func__);
++
++ snr = 0;
++
++ switch (state->delivery_system){
++ case SYS_DVBS:
++ cnt = 10; snr_total = 0;
++ while(cnt > 0){
++ val = dvbsky_m88ds3103_readreg(state, 0xff);
++ snr_total += val;
++ cnt--;
++ }
++ tmp = (u16)(snr_total/80);
++ if(tmp > 0){
++ if (tmp > 32) tmp = 32;
++ snr = (mes_loge[tmp - 1] * 100) / 45;
++ }else{
++ snr = 0;
++ }
++ break;
++ case SYS_DVBS2:
++ cnt = 10; npow = 0; spow = 0;
++ while(cnt >0){
++ npow1 = dvbsky_m88ds3103_readreg(state, 0x8c) & 0xff;
++ npow2 = dvbsky_m88ds3103_readreg(state, 0x8d) & 0xff;
++ npow += (((npow1 & 0x3f) + (u16)(npow2 << 6)) >> 2);
++
++ spow1 = dvbsky_m88ds3103_readreg(state, 0x8e) & 0xff;
++ spow += ((spow1 * spow1) >> 1);
++ cnt--;
++ }
++ npow /= 10; spow /= 10;
++ if(spow == 0){
++ snr = 0;
++ }else if(npow == 0){
++ snr = 19;
++ }else{
++ if(spow > npow){
++ tmp = (u16)(spow / npow);
++ if (tmp > 80) tmp = 80;
++ snr = mes_log10[tmp - 1]*3;
++ }else{
++ tmp = (u16)(npow / spow);
++ if (tmp > 80) tmp = 80;
++ snr = -(mes_log10[tmp - 1] / 1000);
++ }
++ }
++ break;
++ default:
++ break;
++ }
++ *p_snr = snr;
++
++ return 0;
++}
++
++
++static int dvbsky_m88ds3103_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
++{
++ struct dvbsky_m88ds3103_state *state = fe->demodulator_priv;
++ u8 tmp1, tmp2, tmp3, data;
++
++ dprintk("%s()\n", __func__);
++
++ switch (state->delivery_system) {
++ case SYS_DVBS:
++ data = dvbsky_m88ds3103_readreg(state, 0xf8);
++ data |= 0x40;
++ dvbsky_m88ds3103_writereg(state, 0xf8, data);
++ tmp1 = dvbsky_m88ds3103_readreg(state, 0xf5);
++ tmp2 = dvbsky_m88ds3103_readreg(state, 0xf4);
++ *ucblocks = (tmp1 <<8) | tmp2;
++ data &= ~0x20;
++ dvbsky_m88ds3103_writereg(state, 0xf8, data);
++ data |= 0x20;
++ dvbsky_m88ds3103_writereg(state, 0xf8, data);
++ data &= ~0x40;
++ dvbsky_m88ds3103_writereg(state, 0xf8, data);
++ break;
++ case SYS_DVBS2:
++ tmp1 = dvbsky_m88ds3103_readreg(state, 0xda);
++ tmp2 = dvbsky_m88ds3103_readreg(state, 0xd9);
++ tmp3 = dvbsky_m88ds3103_readreg(state, 0xd8);
++ *ucblocks = (tmp1 <<16)|(tmp2 <<8)|tmp3;
++ data = dvbsky_m88ds3103_readreg(state, 0xd1);
++ data |= 0x01;
++ dvbsky_m88ds3103_writereg(state, 0xd1, data);
++ data &= ~0x01;
++ dvbsky_m88ds3103_writereg(state, 0xd1, data);
++ break;
++ default:
++ break;
++ }
++ return 0;
++}
++
++static int dvbsky_m88ds3103_set_tone(struct dvb_frontend *fe, fe_sec_tone_mode_t tone)
++{
++ struct dvbsky_m88ds3103_state *state = fe->demodulator_priv;
++ u8 data_a1, data_a2;
++
++ dprintk("%s(%d)\n", __func__, tone);
++ if ((tone != SEC_TONE_ON) && (tone != SEC_TONE_OFF)) {
++ printk(KERN_ERR "%s: Invalid, tone=%d\n", __func__, tone);
++ return -EINVAL;
++ }
++
++ data_a1 = dvbsky_m88ds3103_readreg(state, 0xa1);
++ data_a2 = dvbsky_m88ds3103_readreg(state, 0xa2);
++ if(state->demod_id == DS3103_ID)
++ data_a2 &= 0xdf; /* Normal mode */
++ switch (tone) {
++ case SEC_TONE_ON:
++ dprintk("%s: SEC_TONE_ON\n", __func__);
++ data_a1 |= 0x04;
++ data_a1 &= ~0x03;
++ data_a1 &= ~0x40;
++ data_a2 &= ~0xc0;
++ break;
++ case SEC_TONE_OFF:
++ dprintk("%s: SEC_TONE_OFF\n", __func__);
++ data_a2 &= ~0xc0;
++ data_a2 |= 0x80;
++ break;
++ }
++ dvbsky_m88ds3103_writereg(state, 0xa2, data_a2);
++ dvbsky_m88ds3103_writereg(state, 0xa1, data_a1);
++ return 0;
++}
++
++static int dvbsky_m88ds3103_send_diseqc_msg(struct dvb_frontend *fe,
++ struct dvb_diseqc_master_cmd *d)
++{
++ struct dvbsky_m88ds3103_state *state = fe->demodulator_priv;
++ int i, ret = 0;
++ u8 tmp, time_out;
++
++ /* Dump DiSEqC message */
++ if (debug) {
++ printk(KERN_INFO "m88ds3103: %s(", __func__);
++ for (i = 0 ; i < d->msg_len ;) {
++ printk(KERN_INFO "0x%02x", d->msg[i]);
++ if (++i < d->msg_len)
++ printk(KERN_INFO ", ");
++ }
++ }
++
++ tmp = dvbsky_m88ds3103_readreg(state, 0xa2);
++ tmp &= ~0xc0;
++ if(state->demod_id == DS3103_ID)
++ tmp &= ~0x20;
++ dvbsky_m88ds3103_writereg(state, 0xa2, tmp);
++
++ for (i = 0; i < d->msg_len; i ++)
++ dvbsky_m88ds3103_writereg(state, (0xa3+i), d->msg[i]);
++
++ tmp = dvbsky_m88ds3103_readreg(state, 0xa1);
++ tmp &= ~0x38;
++ tmp &= ~0x40;
++ tmp |= ((d->msg_len-1) << 3) | 0x07;
++ tmp &= ~0x80;
++ dvbsky_m88ds3103_writereg(state, 0xa1, tmp);
++ /* 1.5 * 9 * 8 = 108ms */
++ time_out = 150;
++ while (time_out > 0){
++ msleep(10);
++ time_out -= 10;
++ tmp = dvbsky_m88ds3103_readreg(state, 0xa1);
++ if ((tmp & 0x40) == 0)
++ break;
++ }
++ if (time_out == 0){
++ tmp = dvbsky_m88ds3103_readreg(state, 0xa1);
++ tmp &= ~0x80;
++ tmp |= 0x40;
++ dvbsky_m88ds3103_writereg(state, 0xa1, tmp);
++ ret = 1;
++ }
++ tmp = dvbsky_m88ds3103_readreg(state, 0xa2);
++ tmp &= ~0xc0;
++ tmp |= 0x80;
++ dvbsky_m88ds3103_writereg(state, 0xa2, tmp);
++ return ret;
++}
++
++
++static int dvbsky_m88ds3103_diseqc_send_burst(struct dvb_frontend *fe,
++ fe_sec_mini_cmd_t burst)
++{
++ struct dvbsky_m88ds3103_state *state = fe->demodulator_priv;
++ u8 val, time_out;
++
++ dprintk("%s()\n", __func__);
++
++ val = dvbsky_m88ds3103_readreg(state, 0xa2);
++ val &= ~0xc0;
++ if(state->demod_id == DS3103_ID)
++ val &= 0xdf; /* Normal mode */
++ dvbsky_m88ds3103_writereg(state, 0xa2, val);
++ /* DiSEqC burst */
++ if (burst == SEC_MINI_B)
++ dvbsky_m88ds3103_writereg(state, 0xa1, 0x01);
++ else
++ dvbsky_m88ds3103_writereg(state, 0xa1, 0x02);
++
++ msleep(13);
++
++ time_out = 5;
++ do{
++ val = dvbsky_m88ds3103_readreg(state, 0xa1);
++ if ((val & 0x40) == 0)
++ break;
++ msleep(1);
++ time_out --;
++ } while (time_out > 0);
++
++ val = dvbsky_m88ds3103_readreg(state, 0xa2);
++ val &= ~0xc0;
++ val |= 0x80;
++ dvbsky_m88ds3103_writereg(state, 0xa2, val);
++
++ return 0;
++}
++
++static void dvbsky_m88ds3103_release(struct dvb_frontend *fe)
++{
++ struct dvbsky_m88ds3103_state *state = fe->demodulator_priv;
++
++ dprintk("%s\n", __func__);
++ kfree(state);
++}
++
++static int dvbsky_m88ds3103_check_id(struct dvbsky_m88ds3103_state *state)
++{
++ int val_00, val_01;
++
++ /*check demod id*/
++ val_01 = dvbsky_m88ds3103_readreg(state, 0x01);
++ printk(KERN_INFO "DS3000 chip version: %x attached.\n", val_01);
++
++ if(val_01 == 0xD0)
++ state->demod_id = DS3103_ID;
++ else if(val_01 == 0xC0)
++ state->demod_id = DS3000_ID;
++ else
++ state->demod_id = UNKNOW_ID;
++
++ /*check tuner id*/
++ val_00 = dvbsky_m88ds3103_tuner_readreg(state, 0x00);
++ printk(KERN_INFO "TS202x chip version[1]: %x attached.\n", val_00);
++ val_00 &= 0x03;
++ if(val_00 == 0)
++ {
++ dvbsky_m88ds3103_tuner_writereg(state, 0x00, 0x01);
++ msleep(3);
++ }
++ dvbsky_m88ds3103_tuner_writereg(state, 0x00, 0x03);
++ msleep(5);
++
++ val_00 = dvbsky_m88ds3103_tuner_readreg(state, 0x00);
++ printk(KERN_INFO "TS202x chip version[2]: %x attached.\n", val_00);
++ val_00 &= 0xff;
++ if((val_00 == 0x01) || (val_00 == 0x41) || (val_00 == 0x81))
++ state->tuner_id = TS2020_ID;
++ else if(((val_00 & 0xc0)== 0xc0) || (val_00 == 0x83))
++ state->tuner_id = TS2022_ID;
++ else
++ state->tuner_id = UNKNOW_ID;
++
++ return state->demod_id;
++}
++
++static struct dvb_frontend_ops dvbsky_m88ds3103_ops;
++static int dvbsky_m88ds3103_initilaze(struct dvb_frontend *fe);
++
++struct dvb_frontend *dvbsky_m88ds3103_attach(const struct dvbsky_m88ds3103_config *config,
++ struct i2c_adapter *i2c)
++{
++ struct dvbsky_m88ds3103_state *state = NULL;
++
++ dprintk("%s\n", __func__);
++
++ /* allocate memory for the internal state */
++ state = kzalloc(sizeof(struct dvbsky_m88ds3103_state), GFP_KERNEL);
++ if (state == NULL) {
++ printk(KERN_ERR "Unable to kmalloc\n");
++ goto error2;
++ }
++
++ state->config = config;
++ state->i2c = i2c;
++ state->preBer = 0xffff;
++ state->delivery_system = SYS_DVBS; /*Default to DVB-S.*/
++
++ /* check demod id */
++ if(dvbsky_m88ds3103_check_id(state) == UNKNOW_ID){
++ printk(KERN_ERR "Unable to find Montage chip\n");
++ goto error3;
++ }
++
++ memcpy(&state->frontend.ops, &dvbsky_m88ds3103_ops,
++ sizeof(struct dvb_frontend_ops));
++ state->frontend.demodulator_priv = state;
++
++ dvbsky_m88ds3103_initilaze(&state->frontend);
++
++ return &state->frontend;
++
++error3:
++ kfree(state);
++error2:
++ return NULL;
++}
++EXPORT_SYMBOL(dvbsky_m88ds3103_attach);
++
++static int dvbsky_m88ds3103_set_carrier_offset(struct dvb_frontend *fe,
++ s32 carrier_offset_khz)
++{
++ struct dvbsky_m88ds3103_state *state = fe->demodulator_priv;
++ s32 tmp;
++
++ tmp = carrier_offset_khz;
++ tmp *= 65536;
++
++ tmp = (2*tmp + MT_FE_MCLK_KHZ) / (2*MT_FE_MCLK_KHZ);
++
++ if (tmp < 0)
++ tmp += 65536;
++
++ dvbsky_m88ds3103_writereg(state, 0x5f, tmp >> 8);
++ dvbsky_m88ds3103_writereg(state, 0x5e, tmp & 0xff);
++
++ return 0;
++}
++
++static int dvbsky_m88ds3103_set_symrate(struct dvb_frontend *fe)
++{
++ struct dvbsky_m88ds3103_state *state = fe->demodulator_priv;
++ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
++ u16 value;
++
++ value = (((c->symbol_rate / 1000) << 15) + (MT_FE_MCLK_KHZ / 4)) / (MT_FE_MCLK_KHZ / 2);
++ dvbsky_m88ds3103_writereg(state, 0x61, value & 0x00ff);
++ dvbsky_m88ds3103_writereg(state, 0x62, (value & 0xff00) >> 8);
++
++ return 0;
++}
++
++static int dvbsky_m88ds3103_set_CCI(struct dvb_frontend *fe)
++{
++ struct dvbsky_m88ds3103_state *state = fe->demodulator_priv;
++ u8 tmp;
++
++ tmp = dvbsky_m88ds3103_readreg(state, 0x56);
++ tmp &= ~0x01;
++ dvbsky_m88ds3103_writereg(state, 0x56, tmp);
++
++ tmp = dvbsky_m88ds3103_readreg(state, 0x76);
++ tmp &= ~0x80;
++ dvbsky_m88ds3103_writereg(state, 0x76, tmp);
++
++ return 0;
++}
++
++static int dvbsky_m88ds3103_init_reg(struct dvbsky_m88ds3103_state *state, const u8 *p_reg_tab, u32 size)
++{
++ u32 i;
++
++ for(i = 0; i < size; i+=2)
++ dvbsky_m88ds3103_writereg(state, p_reg_tab[i], p_reg_tab[i+1]);
++
++ return 0;
++}
++
++static int dvbsky_m88ds3103_get_locked_sym_rate(struct dvbsky_m88ds3103_state *state, u32 *sym_rate_KSs)
++{
++ u16 tmp;
++ u32 sym_rate_tmp;
++ u8 val_0x6d, val_0x6e;
++
++ val_0x6d = dvbsky_m88ds3103_readreg(state, 0x6d);
++ val_0x6e = dvbsky_m88ds3103_readreg(state, 0x6e);
++
++ tmp = (u16)((val_0x6e<<8) | val_0x6d);
++
++ sym_rate_tmp = (u32)(tmp * MT_FE_MCLK_KHZ);
++ sym_rate_tmp = (u32)(sym_rate_tmp / (1<<16));
++ *sym_rate_KSs = sym_rate_tmp;
++
++ return 0;
++}
++
++static int dvbsky_m88ds3103_get_channel_info(struct dvbsky_m88ds3103_state *state, u8 *p_mode, u8 *p_coderate)
++{
++ u8 tmp, val_0x7E;
++
++ if(state->delivery_system == SYS_DVBS2){
++ val_0x7E = dvbsky_m88ds3103_readreg(state, 0x7e);
++ tmp = (u8)((val_0x7E&0xC0) >> 6);
++ *p_mode = tmp;
++ tmp = (u8)(val_0x7E & 0x0f);
++ *p_coderate = tmp;
++ } else {
++ *p_mode = 0;
++ tmp = dvbsky_m88ds3103_readreg(state, 0xe6);
++ tmp = (u8)(tmp >> 5);
++ *p_coderate = tmp;
++ }
++
++ return 0;
++}
++
++static int dvbsky_m88ds3103_set_clock_ratio(struct dvbsky_m88ds3103_state *state)
++{
++ u8 val, mod_fac, tmp1, tmp2;
++ u32 input_datarate, locked_sym_rate_KSs;
++ u32 MClk_KHz = 96000;
++ u8 mod_mode, code_rate, divid_ratio = 0;
++
++ locked_sym_rate_KSs = 0;
++ dvbsky_m88ds3103_get_locked_sym_rate(state, &locked_sym_rate_KSs);
++ if(locked_sym_rate_KSs == 0)
++ return 0;
++
++ dvbsky_m88ds3103_get_channel_info(state, &mod_mode, &code_rate);
++
++ if (state->delivery_system == SYS_DVBS2)
++ {
++ switch(mod_mode) {
++ case 1: mod_fac = 3; break;
++ case 2: mod_fac = 4; break;
++ case 3: mod_fac = 5; break;
++ default: mod_fac = 2; break;
++ }
++
++ switch(code_rate) {
++ case 0: input_datarate = locked_sym_rate_KSs*mod_fac/8/4; break;
++ case 1: input_datarate = locked_sym_rate_KSs*mod_fac/8/3; break;
++ case 2: input_datarate = locked_sym_rate_KSs*mod_fac*2/8/5; break;
++ case 3: input_datarate = locked_sym_rate_KSs*mod_fac/8/2; break;
++ case 4: input_datarate = locked_sym_rate_KSs*mod_fac*3/8/5; break;
++ case 5: input_datarate = locked_sym_rate_KSs*mod_fac*2/8/3; break;
++ case 6: input_datarate = locked_sym_rate_KSs*mod_fac*3/8/4; break;
++ case 7: input_datarate = locked_sym_rate_KSs*mod_fac*4/8/5; break;
++ case 8: input_datarate = locked_sym_rate_KSs*mod_fac*5/8/6; break;
++ case 9: input_datarate = locked_sym_rate_KSs*mod_fac*8/8/9; break;
++ case 10: input_datarate = locked_sym_rate_KSs*mod_fac*9/8/10; break;
++ default: input_datarate = locked_sym_rate_KSs*mod_fac*2/8/3; break;
++ }
++
++ if(state->demod_id == DS3000_ID)
++ input_datarate = input_datarate * 115 / 100;
++
++ if(input_datarate < 4800) {tmp1 = 15;tmp2 = 15;} //4.8MHz TS clock
++ else if(input_datarate < 4966) {tmp1 = 14;tmp2 = 15;} //4.966MHz TS clock
++ else if(input_datarate < 5143) {tmp1 = 14;tmp2 = 14;} //5.143MHz TS clock
++ else if(input_datarate < 5333) {tmp1 = 13;tmp2 = 14;} //5.333MHz TS clock
++ else if(input_datarate < 5538) {tmp1 = 13;tmp2 = 13;} //5.538MHz TS clock
++ else if(input_datarate < 5760) {tmp1 = 12;tmp2 = 13;} //5.76MHz TS clock allan 0809
++ else if(input_datarate < 6000) {tmp1 = 12;tmp2 = 12;} //6MHz TS clock
++ else if(input_datarate < 6260) {tmp1 = 11;tmp2 = 12;} //6.26MHz TS clock
++ else if(input_datarate < 6545) {tmp1 = 11;tmp2 = 11;} //6.545MHz TS clock
++ else if(input_datarate < 6857) {tmp1 = 10;tmp2 = 11;} //6.857MHz TS clock
++ else if(input_datarate < 7200) {tmp1 = 10;tmp2 = 10;} //7.2MHz TS clock
++ else if(input_datarate < 7578) {tmp1 = 9;tmp2 = 10;} //7.578MHz TS clock
++ else if(input_datarate < 8000) {tmp1 = 9;tmp2 = 9;} //8MHz TS clock
++ else if(input_datarate < 8470) {tmp1 = 8;tmp2 = 9;} //8.47MHz TS clock
++ else if(input_datarate < 9000) {tmp1 = 8;tmp2 = 8;} //9MHz TS clock
++ else if(input_datarate < 9600) {tmp1 = 7;tmp2 = 8;} //9.6MHz TS clock
++ else if(input_datarate < 10285) {tmp1 = 7;tmp2 = 7;} //10.285MHz TS clock
++ else if(input_datarate < 12000) {tmp1 = 6;tmp2 = 6;} //12MHz TS clock
++ else if(input_datarate < 14400) {tmp1 = 5;tmp2 = 5;} //14.4MHz TS clock
++ else if(input_datarate < 18000) {tmp1 = 4;tmp2 = 4;} //18MHz TS clock
++ else {tmp1 = 3;tmp2 = 3;} //24MHz TS clock
++
++ if(state->demod_id == DS3000_ID) {
++ val = (u8)((tmp1<<4) + tmp2);
++ dvbsky_m88ds3103_writereg(state, 0xfe, val);
++ } else {
++ tmp1 = dvbsky_m88ds3103_readreg(state, 0x22);
++ tmp2 = dvbsky_m88ds3103_readreg(state, 0x24);
++
++ tmp1 >>= 6;
++ tmp1 &= 0x03;
++ tmp2 >>= 6;
++ tmp2 &= 0x03;
++
++ if((tmp1 == 0x00) && (tmp2 == 0x01))
++ MClk_KHz = 144000;
++ else if((tmp1 == 0x00) && (tmp2 == 0x03))
++ MClk_KHz = 72000;
++ else if((tmp1 == 0x01) && (tmp2 == 0x01))
++ MClk_KHz = 115200;
++ else if((tmp1 == 0x02) && (tmp2 == 0x01))
++ MClk_KHz = 96000;
++ else if((tmp1 == 0x03) && (tmp2 == 0x00))
++ MClk_KHz = 192000;
++ else
++ return 0;
++
++ if(input_datarate < 5200) /*Max. 2011-12-23 11:55*/
++ input_datarate = 5200;
++
++ if(input_datarate != 0)
++ divid_ratio = (u8)(MClk_KHz / input_datarate);
++ else
++ divid_ratio = 0xFF;
++
++ if(divid_ratio > 128)
++ divid_ratio = 128;
++
++ if(divid_ratio < 2)
++ divid_ratio = 2;
++
++ tmp1 = (u8)(divid_ratio / 2);
++ tmp2 = (u8)(divid_ratio / 2);
++
++ if((divid_ratio % 2) != 0)
++ tmp2 += 1;
++
++ tmp1 -= 1;
++ tmp2 -= 1;
++
++ tmp1 &= 0x3f;
++ tmp2 &= 0x3f;
++
++ val = dvbsky_m88ds3103_readreg(state, 0xfe);
++ val &= 0xF0;
++ val |= (tmp2 >> 2) & 0x0f;
++ dvbsky_m88ds3103_writereg(state, 0xfe, val);
++
++ val = (u8)((tmp2 & 0x03) << 6);
++ val |= tmp1;
++ dvbsky_m88ds3103_writereg(state, 0xea, val);
++ }
++ } else {
++ mod_fac = 2;
++
++ switch(code_rate) {
++ case 4: input_datarate = locked_sym_rate_KSs*mod_fac/2/8; break;
++ case 3: input_datarate = locked_sym_rate_KSs*mod_fac*2/3/8; break;
++ case 2: input_datarate = locked_sym_rate_KSs*mod_fac*3/4/8; break;
++ case 1: input_datarate = locked_sym_rate_KSs*mod_fac*5/6/8; break;
++ case 0: input_datarate = locked_sym_rate_KSs*mod_fac*7/8/8; break;
++ default: input_datarate = locked_sym_rate_KSs*mod_fac*3/4/8; break;
++ }
++
++ if(state->demod_id == DS3000_ID)
++ input_datarate = input_datarate * 115 / 100;
++
++ if(input_datarate < 6857) {tmp1 = 7;tmp2 = 7;} //6.857MHz TS clock
++ else if(input_datarate < 7384) {tmp1 = 6;tmp2 = 7;} //7.384MHz TS clock
++ else if(input_datarate < 8000) {tmp1 = 6;tmp2 = 6;} //8MHz TS clock
++ else if(input_datarate < 8727) {tmp1 = 5;tmp2 = 6;} //8.727MHz TS clock
++ else if(input_datarate < 9600) {tmp1 = 5;tmp2 = 5;} //9.6MHz TS clock
++ else if(input_datarate < 10666) {tmp1 = 4;tmp2 = 5;} //10.666MHz TS clock
++ else if(input_datarate < 12000) {tmp1 = 4;tmp2 = 4;} //12MHz TS clock
++ else if(input_datarate < 13714) {tmp1 = 3;tmp2 = 4;} //13.714MHz TS clock
++ else if(input_datarate < 16000) {tmp1 = 3;tmp2 = 3;} //16MHz TS clock
++ else if(input_datarate < 19200) {tmp1 = 2;tmp2 = 3;} //19.2MHz TS clock
++ else {tmp1 = 2;tmp2 = 2;} //24MHz TS clock
++
++ if(state->demod_id == DS3000_ID) {
++ val = dvbsky_m88ds3103_readreg(state, 0xfe);
++ val &= 0xc0;
++ val |= ((u8)((tmp1<<3) + tmp2));
++ dvbsky_m88ds3103_writereg(state, 0xfe, val);
++ } else {
++ if(input_datarate < 5200) /*Max. 2011-12-23 11:55*/
++ input_datarate = 5200;
++
++ if(input_datarate != 0)
++ divid_ratio = (u8)(MClk_KHz / input_datarate);
++ else
++ divid_ratio = 0xFF;
++
++ if(divid_ratio > 128)
++ divid_ratio = 128;
++
++ if(divid_ratio < 2)
++ divid_ratio = 2;
++
++ tmp1 = (u8)(divid_ratio / 2);
++ tmp2 = (u8)(divid_ratio / 2);
++
++ if((divid_ratio % 2) != 0)
++ tmp2 += 1;
++
++ tmp1 -= 1;
++ tmp2 -= 1;
++
++ tmp1 &= 0x3f;
++ tmp2 &= 0x3f;
++
++ val = dvbsky_m88ds3103_readreg(state, 0xfe);
++ val &= 0xF0;
++ val |= (tmp2 >> 2) & 0x0f;
++ dvbsky_m88ds3103_writereg(state, 0xfe, val);
++
++ val = (u8)((tmp2 & 0x03) << 6);
++ val |= tmp1;
++ dvbsky_m88ds3103_writereg(state, 0xea, val);
++ }
++ }
++ return 0;
++}
++
++static int dvbsky_m88ds3103_demod_connect(struct dvb_frontend *fe, s32 carrier_offset_khz)
++{
++ struct dvbsky_m88ds3103_state *state = fe->demodulator_priv;
++ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
++ u16 value;
++ u8 val1,val2,data;
++
++ dprintk("connect delivery system = %d\n", state->delivery_system);
++
++ /* ds3000 global reset */
++ dvbsky_m88ds3103_writereg(state, 0x07, 0x80);
++ dvbsky_m88ds3103_writereg(state, 0x07, 0x00);
++ /* ds3000 build-in uC reset */
++ dvbsky_m88ds3103_writereg(state, 0xb2, 0x01);
++ /* ds3000 software reset */
++ dvbsky_m88ds3103_writereg(state, 0x00, 0x01);
++
++ switch (state->delivery_system) {
++ case SYS_DVBS:
++ /* initialise the demod in DVB-S mode */
++ if(state->demod_id == DS3000_ID){
++ dvbsky_m88ds3103_init_reg(state, ds3000_dvbs_init_tab, sizeof(ds3000_dvbs_init_tab));
++
++ value = dvbsky_m88ds3103_readreg(state, 0xfe);
++ value &= 0xc0;
++ value |= 0x1b;
++ dvbsky_m88ds3103_writereg(state, 0xfe, value);
++
++ if(state->config->ci_mode)
++ val1 = 0x80;
++ else if(state->config->ts_mode)
++ val1 = 0x60;
++ else
++ val1 = 0x20;
++ dvbsky_m88ds3103_writereg(state, 0xfd, val1);
++
++ }else if(state->demod_id == DS3103_ID){
++ dvbsky_m88ds3103_init_reg(state, ds3103_dvbs_init_tab, sizeof(ds3103_dvbs_init_tab));
++
++ /* set ts clock */
++ if(state->config->ci_mode == 2){
++ val1 = 6; val2 = 6;
++ }else if(state->config->ts_mode == 0) {
++ val1 = 3; val2 = 3;
++ }else{
++ val1 = 0; val2 = 0;
++ }
++ val1 -= 1; val2 -= 1;
++ val1 &= 0x3f; val2 &= 0x3f;
++ data = dvbsky_m88ds3103_readreg(state, 0xfe);
++ data &= 0xf0;
++ data |= (val2 >> 2) & 0x0f;
++ dvbsky_m88ds3103_writereg(state, 0xfe, data);
++ data = (val2 & 0x03) << 6;
++ data |= val1;
++ dvbsky_m88ds3103_writereg(state, 0xea, data);
++
++ dvbsky_m88ds3103_writereg(state, 0x4d, 0xfd & dvbsky_m88ds3103_readreg(state, 0x4d));
++ dvbsky_m88ds3103_writereg(state, 0x30, 0xef & dvbsky_m88ds3103_readreg(state, 0x30));
++
++ /* set master clock */
++ val1 = dvbsky_m88ds3103_readreg(state, 0x22);
++ val2 = dvbsky_m88ds3103_readreg(state, 0x24);
++
++ val1 &= 0x3f;
++ val2 &= 0x3f;
++ val1 |= 0x80;
++ val2 |= 0x40;
++
++ dvbsky_m88ds3103_writereg(state, 0x22, val1);
++ dvbsky_m88ds3103_writereg(state, 0x24, val2);
++
++ if(state->config->ci_mode){
++ if(state->config->ci_mode == 2)
++ val1 = 0x43;
++ else
++ val1 = 0x03;
++ }
++ else if(state->config->ts_mode)
++ val1 = 0x06;
++ else
++ val1 = 0x42;
++ dvbsky_m88ds3103_writereg(state, 0xfd, val1);
++ }
++ break;
++ case SYS_DVBS2:
++ /* initialise the demod in DVB-S2 mode */
++ if(state->demod_id == DS3000_ID){
++ dvbsky_m88ds3103_init_reg(state, ds3000_dvbs2_init_tab, sizeof(ds3000_dvbs2_init_tab));
++
++ if (c->symbol_rate >= 30000000)
++ dvbsky_m88ds3103_writereg(state, 0xfe, 0x54);
++ else
++ dvbsky_m88ds3103_writereg(state, 0xfe, 0x98);
++
++ }else if(state->demod_id == DS3103_ID){
++ dvbsky_m88ds3103_init_reg(state, ds3103_dvbs2_init_tab, sizeof(ds3103_dvbs2_init_tab));
++
++ /* set ts clock */
++ if(state->config->ci_mode == 2){
++ val1 = 6; val2 = 6;
++ }else if(state->config->ts_mode == 0){
++ val1 = 5; val2 = 4;
++ }else{
++ val1 = 0; val2 = 0;
++ }
++ val1 -= 1; val2 -= 1;
++ val1 &= 0x3f; val2 &= 0x3f;
++ data = dvbsky_m88ds3103_readreg(state, 0xfe);
++ data &= 0xf0;
++ data |= (val2 >> 2) & 0x0f;
++ dvbsky_m88ds3103_writereg(state, 0xfe, data);
++ data = (val2 & 0x03) << 6;
++ data |= val1;
++ dvbsky_m88ds3103_writereg(state, 0xea, data);
++
++ dvbsky_m88ds3103_writereg(state, 0x4d, 0xfd & dvbsky_m88ds3103_readreg(state, 0x4d));
++ dvbsky_m88ds3103_writereg(state, 0x30, 0xef & dvbsky_m88ds3103_readreg(state, 0x30));
++
++ /* set master clock */
++ val1 = dvbsky_m88ds3103_readreg(state, 0x22);
++ val2 = dvbsky_m88ds3103_readreg(state, 0x24);
++
++ val1 &= 0x3f;
++ val2 &= 0x3f;
++ if((state->config->ci_mode == 2) || (state->config->ts_mode == 1)){
++ val1 |= 0x80;
++ val2 |= 0x40;
++ }else{
++ if (c->symbol_rate >= 28000000){
++ val1 |= 0xc0;
++ }else if (c->symbol_rate >= 18000000){
++ val2 |= 0x40;
++ }else{
++ val1 |= 0x80;
++ val2 |= 0x40;
++ }
++ }
++ dvbsky_m88ds3103_writereg(state, 0x22, val1);
++ dvbsky_m88ds3103_writereg(state, 0x24, val2);
++ }
++
++ if(state->config->ci_mode){
++ if(state->config->ci_mode == 2)
++ val1 = 0x43;
++ else
++ val1 = 0x03;
++ }
++ else if(state->config->ts_mode)
++ val1 = 0x06;
++ else
++ val1 = 0x42;
++ dvbsky_m88ds3103_writereg(state, 0xfd, val1);
++
++ break;
++ default:
++ return 1;
++ }
++ /* disable 27MHz clock output */
++ dvbsky_m88ds3103_writereg(state, 0x29, 0x80);
++ /* enable ac coupling */
++ dvbsky_m88ds3103_writereg(state, 0x25, 0x8a);
++
++ if ((c->symbol_rate / 1000) <= 3000){
++ dvbsky_m88ds3103_writereg(state, 0xc3, 0x08); /* 8 * 32 * 100 / 64 = 400*/
++ dvbsky_m88ds3103_writereg(state, 0xc8, 0x20);
++ dvbsky_m88ds3103_writereg(state, 0xc4, 0x08); /* 8 * 0 * 100 / 128 = 0*/
++ dvbsky_m88ds3103_writereg(state, 0xc7, 0x00);
++ }else if((c->symbol_rate / 1000) <= 10000){
++ dvbsky_m88ds3103_writereg(state, 0xc3, 0x08); /* 8 * 16 * 100 / 64 = 200*/
++ dvbsky_m88ds3103_writereg(state, 0xc8, 0x10);
++ dvbsky_m88ds3103_writereg(state, 0xc4, 0x08); /* 8 * 0 * 100 / 128 = 0*/
++ dvbsky_m88ds3103_writereg(state, 0xc7, 0x00);
++ }else{
++ dvbsky_m88ds3103_writereg(state, 0xc3, 0x08); /* 8 * 6 * 100 / 64 = 75*/
++ dvbsky_m88ds3103_writereg(state, 0xc8, 0x06);
++ dvbsky_m88ds3103_writereg(state, 0xc4, 0x08); /* 8 * 0 * 100 / 128 = 0*/
++ dvbsky_m88ds3103_writereg(state, 0xc7, 0x00);
++ }
++
++ dvbsky_m88ds3103_set_symrate(fe);
++
++ dvbsky_m88ds3103_set_CCI(fe);
++
++ dvbsky_m88ds3103_set_carrier_offset(fe, carrier_offset_khz);
++
++ /* ds3000 out of software reset */
++ dvbsky_m88ds3103_writereg(state, 0x00, 0x00);
++ /* start ds3000 build-in uC */
++ dvbsky_m88ds3103_writereg(state, 0xb2, 0x00);
++
++ return 0;
++}
++
++static int dvbsky_m88ds3103_set_frontend(struct dvb_frontend *fe)
++{
++ struct dvbsky_m88ds3103_state *state = fe->demodulator_priv;
++ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
++
++ int i;
++ fe_status_t status;
++ u8 lpf_mxdiv, mlpf_max, mlpf_min, nlpf, div4, capCode, changePLL;
++ s32 offset_khz, lpf_offset_KHz;
++ u16 value, ndiv, lpf_coeff;
++ u32 f3db, gdiv28, realFreq;
++ u8 RFgain;
++
++ dprintk("%s() ", __func__);
++ dprintk("c frequency = %d\n", c->frequency);
++ dprintk("symbol rate = %d\n", c->symbol_rate);
++ dprintk("delivery system = %d\n", c->delivery_system);
++
++ state->delivery_system = c->delivery_system;
++
++ realFreq = c->frequency;
++ lpf_offset_KHz = 0;
++ if(c->symbol_rate < 5000000){
++ lpf_offset_KHz = FREQ_OFFSET_AT_SMALL_SYM_RATE_KHz;
++ realFreq += FREQ_OFFSET_AT_SMALL_SYM_RATE_KHz;
++ }
++
++ if (state->config->set_ts_params)
++ state->config->set_ts_params(fe, 0);
++
++ div4 = 0;
++ RFgain = 0;
++ if(state->tuner_id == TS2022_ID){
++ dvbsky_m88ds3103_tuner_writereg(state, 0x10, 0x0a);
++ dvbsky_m88ds3103_tuner_writereg(state, 0x11, 0x40);
++ if (realFreq < 1103000) {
++ dvbsky_m88ds3103_tuner_writereg(state, 0x10, 0x1b);
++ div4 = 1;
++ ndiv = (realFreq * (6 + 8) * 4)/MT_FE_CRYSTAL_KHZ;
++ }else {
++ ndiv = (realFreq * (6 + 8) * 2)/MT_FE_CRYSTAL_KHZ;
++ }
++ ndiv = ndiv + ndiv%2;
++ if(ndiv < 4095)
++ ndiv = ndiv - 1024;
++ else if (ndiv < 6143)
++ ndiv = ndiv + 1024;
++ else
++ ndiv = ndiv + 3072;
++
++ dvbsky_m88ds3103_tuner_writereg(state, 0x01, (ndiv & 0x3f00) >> 8);
++ }else{
++ dvbsky_m88ds3103_tuner_writereg(state, 0x10, 0x00);
++ if (realFreq < 1146000){
++ dvbsky_m88ds3103_tuner_writereg(state, 0x10, 0x11);
++ div4 = 1;
++ ndiv = (realFreq * (6 + 8) * 4) / MT_FE_CRYSTAL_KHZ;
++ }else{
++ dvbsky_m88ds3103_tuner_writereg(state, 0x10, 0x01);
++ ndiv = (realFreq * (6 + 8) * 2) / MT_FE_CRYSTAL_KHZ;
++ }
++ ndiv = ndiv + ndiv%2;
++ ndiv = ndiv - 1024;
++ dvbsky_m88ds3103_tuner_writereg(state, 0x01, (ndiv>>8)&0x0f);
++ }
++ /* set pll */
++ dvbsky_m88ds3103_tuner_writereg(state, 0x02, ndiv & 0x00ff);
++ dvbsky_m88ds3103_tuner_writereg(state, 0x03, 0x06);
++ dvbsky_m88ds3103_tuner_writereg(state, 0x51, 0x0f);
++ dvbsky_m88ds3103_tuner_writereg(state, 0x51, 0x1f);
++ dvbsky_m88ds3103_tuner_writereg(state, 0x50, 0x10);
++ dvbsky_m88ds3103_tuner_writereg(state, 0x50, 0x00);
++
++ if(state->tuner_id == TS2022_ID){
++ if(( realFreq >= 1650000 ) && (realFreq <= 1850000)){
++ msleep(5);
++ value = dvbsky_m88ds3103_tuner_readreg(state, 0x14);
++ value &= 0x7f;
++ if(value < 64){
++ dvbsky_m88ds3103_tuner_writereg(state, 0x10, 0x82);
++ dvbsky_m88ds3103_tuner_writereg(state, 0x11, 0x6f);
++
++ dvbsky_m88ds3103_tuner_writereg(state, 0x51, 0x0f);
++ dvbsky_m88ds3103_tuner_writereg(state, 0x51, 0x1f);
++ dvbsky_m88ds3103_tuner_writereg(state, 0x50, 0x10);
++ dvbsky_m88ds3103_tuner_writereg(state, 0x50, 0x00);
++ }
++ }
++ msleep(5);
++ value = dvbsky_m88ds3103_tuner_readreg(state, 0x14);
++ value &= 0x1f;
++
++ if(value > 19){
++ value = dvbsky_m88ds3103_tuner_readreg(state, 0x10);
++ value &= 0x1d;
++ dvbsky_m88ds3103_tuner_writereg(state, 0x10, value);
++ }
++ }else{
++ msleep(5);
++ value = dvbsky_m88ds3103_tuner_readreg(state, 0x66);
++ changePLL = (((value & 0x80) >> 7) != div4);
++
++ if(changePLL){
++ dvbsky_m88ds3103_tuner_writereg(state, 0x10, 0x11);
++ div4 = 1;
++ ndiv = (realFreq * (6 + 8) * 4)/MT_FE_CRYSTAL_KHZ;
++ ndiv = ndiv + ndiv%2;
++ ndiv = ndiv - 1024;
++
++ dvbsky_m88ds3103_tuner_writereg(state, 0x01, (ndiv>>8) & 0x0f);
++ dvbsky_m88ds3103_tuner_writereg(state, 0x02, ndiv & 0xff);
++
++ dvbsky_m88ds3103_tuner_writereg(state, 0x51, 0x0f);
++ dvbsky_m88ds3103_tuner_writereg(state, 0x51, 0x1f);
++ dvbsky_m88ds3103_tuner_writereg(state, 0x50, 0x10);
++ dvbsky_m88ds3103_tuner_writereg(state, 0x50, 0x00);
++ }
++ }
++ /*set the RF gain*/
++ if(state->tuner_id == TS2020_ID)
++ dvbsky_m88ds3103_tuner_writereg(state, 0x60, 0x79);
++
++ dvbsky_m88ds3103_tuner_writereg(state, 0x51, 0x17);
++ dvbsky_m88ds3103_tuner_writereg(state, 0x51, 0x1f);
++ dvbsky_m88ds3103_tuner_writereg(state, 0x50, 0x08);
++ dvbsky_m88ds3103_tuner_writereg(state, 0x50, 0x00);
++ msleep(5);
++
++ if(state->tuner_id == TS2020_ID){
++ RFgain = dvbsky_m88ds3103_tuner_readreg(state, 0x3d);
++ RFgain &= 0x0f;
++ if(RFgain < 15){
++ if(RFgain < 4)
++ RFgain = 0;
++ else
++ RFgain = RFgain -3;
++ value = ((RFgain << 3) | 0x01) & 0x79;
++ dvbsky_m88ds3103_tuner_writereg(state, 0x60, value);
++ dvbsky_m88ds3103_tuner_writereg(state, 0x51, 0x17);
++ dvbsky_m88ds3103_tuner_writereg(state, 0x51, 0x1f);
++ dvbsky_m88ds3103_tuner_writereg(state, 0x50, 0x08);
++ dvbsky_m88ds3103_tuner_writereg(state, 0x50, 0x00);
++ }
++ }
++
++ /* set the LPF */
++ if(state->tuner_id == TS2022_ID){
++ dvbsky_m88ds3103_tuner_writereg(state, 0x25, 0x00);
++ dvbsky_m88ds3103_tuner_writereg(state, 0x27, 0x70);
++ dvbsky_m88ds3103_tuner_writereg(state, 0x41, 0x09);
++ dvbsky_m88ds3103_tuner_writereg(state, 0x08, 0x0b);
++ }
++
++ f3db = ((c->symbol_rate / 1000) *135) / 200 + 2000;
++ f3db += lpf_offset_KHz;
++ if (f3db < 7000)
++ f3db = 7000;
++ if (f3db > 40000)
++ f3db = 40000;
++
++ gdiv28 = (MT_FE_CRYSTAL_KHZ / 1000 * 1694 + 500) / 1000;
++ dvbsky_m88ds3103_tuner_writereg(state, 0x04, gdiv28 & 0xff);
++ dvbsky_m88ds3103_tuner_writereg(state, 0x51, 0x1b);
++ dvbsky_m88ds3103_tuner_writereg(state, 0x51, 0x1f);
++ dvbsky_m88ds3103_tuner_writereg(state, 0x50, 0x04);
++ dvbsky_m88ds3103_tuner_writereg(state, 0x50, 0x00);
++ msleep(5);
++
++ value = dvbsky_m88ds3103_tuner_readreg(state, 0x26);
++ capCode = value & 0x3f;
++ if(state->tuner_id == TS2022_ID){
++ dvbsky_m88ds3103_tuner_writereg(state, 0x41, 0x0d);
++
++ dvbsky_m88ds3103_tuner_writereg(state, 0x51, 0x1b);
++ dvbsky_m88ds3103_tuner_writereg(state, 0x51, 0x1f);
++ dvbsky_m88ds3103_tuner_writereg(state, 0x50, 0x04);
++ dvbsky_m88ds3103_tuner_writereg(state, 0x50, 0x00);
++
++ msleep(2);
++
++ value = dvbsky_m88ds3103_tuner_readreg(state, 0x26);
++ value &= 0x3f;
++ value = (capCode + value) / 2;
++ }
++ else
++ value = capCode;
++
++ gdiv28 = gdiv28 * 207 / (value * 2 + 151);
++ mlpf_max = gdiv28 * 135 / 100;
++ mlpf_min = gdiv28 * 78 / 100;
++ if (mlpf_max > 63)
++ mlpf_max = 63;
++
++ if(state->tuner_id == TS2022_ID)
++ lpf_coeff = 3200;
++ else
++ lpf_coeff = 2766;
++
++ nlpf = (f3db * gdiv28 * 2 / lpf_coeff / (MT_FE_CRYSTAL_KHZ / 1000) + 1) / 2 ;
++ if (nlpf > 23) nlpf = 23;
++ if (nlpf < 1) nlpf = 1;
++
++ lpf_mxdiv = (nlpf * (MT_FE_CRYSTAL_KHZ / 1000) * lpf_coeff * 2 / f3db + 1) / 2;
++
++ if (lpf_mxdiv < mlpf_min){
++ nlpf++;
++ lpf_mxdiv = (nlpf * (MT_FE_CRYSTAL_KHZ / 1000) * lpf_coeff * 2 / f3db + 1) / 2;
++ }
++
++ if (lpf_mxdiv > mlpf_max)
++ lpf_mxdiv = mlpf_max;
++
++ dvbsky_m88ds3103_tuner_writereg(state, 0x04, lpf_mxdiv);
++ dvbsky_m88ds3103_tuner_writereg(state, 0x06, nlpf);
++ dvbsky_m88ds3103_tuner_writereg(state, 0x51, 0x1b);
++ dvbsky_m88ds3103_tuner_writereg(state, 0x51, 0x1f);
++ dvbsky_m88ds3103_tuner_writereg(state, 0x50, 0x04);
++ dvbsky_m88ds3103_tuner_writereg(state, 0x50, 0x00);
++ msleep(5);
++
++ if(state->tuner_id == TS2022_ID){
++ msleep(2);
++ value = dvbsky_m88ds3103_tuner_readreg(state, 0x26);
++ capCode = value & 0x3f;
++
++ dvbsky_m88ds3103_tuner_writereg(state, 0x41, 0x09);
++
++ dvbsky_m88ds3103_tuner_writereg(state, 0x51, 0x1b);
++ dvbsky_m88ds3103_tuner_writereg(state, 0x51, 0x1f);
++ dvbsky_m88ds3103_tuner_writereg(state, 0x50, 0x04);
++ dvbsky_m88ds3103_tuner_writereg(state, 0x50, 0x00);
++
++ msleep(2);
++ value = dvbsky_m88ds3103_tuner_readreg(state, 0x26);
++ value &= 0x3f;
++ value = (capCode + value) / 2;
++
++ value = value | 0x80;
++ dvbsky_m88ds3103_tuner_writereg(state, 0x25, value);
++ dvbsky_m88ds3103_tuner_writereg(state, 0x27, 0x30);
++
++ dvbsky_m88ds3103_tuner_writereg(state, 0x08, 0x09);
++ }
++
++ /* Set the BB gain */
++ dvbsky_m88ds3103_tuner_writereg(state, 0x51, 0x1e);
++ dvbsky_m88ds3103_tuner_writereg(state, 0x51, 0x1f);
++ dvbsky_m88ds3103_tuner_writereg(state, 0x50, 0x01);
++ dvbsky_m88ds3103_tuner_writereg(state, 0x50, 0x00);
++ if(state->tuner_id == TS2020_ID){
++ if(RFgain == 15){
++ msleep(40);
++ value = dvbsky_m88ds3103_tuner_readreg(state, 0x21);
++ value &= 0x0f;
++ if(value < 3){
++ dvbsky_m88ds3103_tuner_writereg(state, 0x60, 0x61);
++ dvbsky_m88ds3103_tuner_writereg(state, 0x51, 0x17);
++ dvbsky_m88ds3103_tuner_writereg(state, 0x51, 0x1f);
++ dvbsky_m88ds3103_tuner_writereg(state, 0x50, 0x08);
++ dvbsky_m88ds3103_tuner_writereg(state, 0x50, 0x00);
++ }
++ }
++ }
++ msleep(60);
++
++ offset_khz = (ndiv - ndiv % 2 + 1024) * MT_FE_CRYSTAL_KHZ
++ / (6 + 8) / (div4 + 1) / 2 - realFreq;
++
++ dvbsky_m88ds3103_demod_connect(fe, offset_khz+lpf_offset_KHz);
++
++ for (i = 0; i < 30 ; i++) {
++ dvbsky_m88ds3103_read_status(fe, &status);
++ if (status & FE_HAS_LOCK){
++ break;
++ }
++ msleep(20);
++ }
++
++ if (status & FE_HAS_LOCK){
++ if(state->config->ci_mode == 2)
++ dvbsky_m88ds3103_set_clock_ratio(state);
++ if(state->config->start_ctrl){
++ if(state->first_lock == 0){
++ state->config->start_ctrl(fe);
++ state->first_lock = 1;
++ }
++ }
++ }
++
++ return 0;
++}
++
++static int dvbsky_m88ds3103_tune(struct dvb_frontend *fe,
++ bool re_tune,
++ unsigned int mode_flags,
++ unsigned int *delay,
++ fe_status_t *status)
++{
++ *delay = HZ / 5;
++
++ dprintk("%s() ", __func__);
++ dprintk("re_tune = %d\n", re_tune);
++
++ if (re_tune) {
++ int ret = dvbsky_m88ds3103_set_frontend(fe);
++ if (ret)
++ return ret;
++ }
++
++ return dvbsky_m88ds3103_read_status(fe, status);
++}
++
++static enum dvbfe_algo dvbsky_m88ds3103_get_algo(struct dvb_frontend *fe)
++{
++ return DVBFE_ALGO_HW;
++}
++
++ /*
++ * Power config will reset and load initial firmware if required
++ */
++static int dvbsky_m88ds3103_initilaze(struct dvb_frontend *fe)
++{
++ struct dvbsky_m88ds3103_state *state = fe->demodulator_priv;
++ int ret;
++
++ dprintk("%s()\n", __func__);
++ /* hard reset */
++ dvbsky_m88ds3103_writereg(state, 0x07, 0x80);
++ dvbsky_m88ds3103_writereg(state, 0x07, 0x00);
++ msleep(1);
++
++ dvbsky_m88ds3103_writereg(state, 0x08, 0x01 | dvbsky_m88ds3103_readreg(state, 0x08));
++ msleep(1);
++
++ if(state->tuner_id == TS2020_ID){
++ /* TS2020 init */
++ dvbsky_m88ds3103_tuner_writereg(state, 0x42, 0x73);
++ msleep(2);
++ dvbsky_m88ds3103_tuner_writereg(state, 0x05, 0x01);
++ dvbsky_m88ds3103_tuner_writereg(state, 0x62, 0xb5);
++ dvbsky_m88ds3103_tuner_writereg(state, 0x07, 0x02);
++ dvbsky_m88ds3103_tuner_writereg(state, 0x08, 0x01);
++ }
++ else if(state->tuner_id == TS2022_ID){
++ /* TS2022 init */
++ dvbsky_m88ds3103_tuner_writereg(state, 0x62, 0x6c);
++ msleep(2);
++ dvbsky_m88ds3103_tuner_writereg(state, 0x42, 0x6c);
++ msleep(2);
++ dvbsky_m88ds3103_tuner_writereg(state, 0x7d, 0x9d);
++ dvbsky_m88ds3103_tuner_writereg(state, 0x7c, 0x9a);
++ dvbsky_m88ds3103_tuner_writereg(state, 0x7a, 0x76);
++
++ dvbsky_m88ds3103_tuner_writereg(state, 0x3b, 0x01);
++ dvbsky_m88ds3103_tuner_writereg(state, 0x63, 0x88);
++
++ dvbsky_m88ds3103_tuner_writereg(state, 0x61, 0x85);
++ dvbsky_m88ds3103_tuner_writereg(state, 0x22, 0x30);
++ dvbsky_m88ds3103_tuner_writereg(state, 0x30, 0x40);
++ dvbsky_m88ds3103_tuner_writereg(state, 0x20, 0x23);
++ dvbsky_m88ds3103_tuner_writereg(state, 0x24, 0x02);
++ dvbsky_m88ds3103_tuner_writereg(state, 0x12, 0xa0);
++ }
++
++ if(state->demod_id == DS3103_ID){
++ dvbsky_m88ds3103_writereg(state, 0x07, 0xe0);
++ dvbsky_m88ds3103_writereg(state, 0x07, 0x00);
++ msleep(1);
++ }
++ dvbsky_m88ds3103_writereg(state, 0xb2, 0x01);
++
++ /* Load the firmware if required */
++ ret = dvbsky_m88ds3103_load_firmware(fe);
++ if (ret != 0){
++ printk(KERN_ERR "%s: Unable initialize firmware\n", __func__);
++ return ret;
++ }
++ if(state->demod_id == DS3103_ID){
++ dvbsky_m88ds3103_writereg(state, 0x4d, 0xfd & dvbsky_m88ds3103_readreg(state, 0x4d));
++ dvbsky_m88ds3103_writereg(state, 0x30, 0xef & dvbsky_m88ds3103_readreg(state, 0x30));
++ }
++
++ return 0;
++}
++
++/*
++ * Initialise or wake up device
++ */
++static int dvbsky_m88ds3103_initfe(struct dvb_frontend *fe)
++{
++ struct dvbsky_m88ds3103_state *state = fe->demodulator_priv;
++ u8 val;
++
++ dprintk("%s()\n", __func__);
++
++ /* 1st step to wake up demod */
++ dvbsky_m88ds3103_writereg(state, 0x08, 0x01 | dvbsky_m88ds3103_readreg(state, 0x08));
++ dvbsky_m88ds3103_writereg(state, 0x04, 0xfe & dvbsky_m88ds3103_readreg(state, 0x04));
++ dvbsky_m88ds3103_writereg(state, 0x23, 0xef & dvbsky_m88ds3103_readreg(state, 0x23));
++
++ /* 2nd step to wake up tuner */
++ val = dvbsky_m88ds3103_tuner_readreg(state, 0x00) & 0xff;
++ if((val & 0x01) == 0){
++ dvbsky_m88ds3103_tuner_writereg(state, 0x00, 0x01);
++ msleep(50);
++ }
++ dvbsky_m88ds3103_tuner_writereg(state, 0x00, 0x03);
++ msleep(50);
++
++ return 0;
++}
++
++/* Put device to sleep */
++static int dvbsky_m88ds3103_sleep(struct dvb_frontend *fe)
++{
++ struct dvbsky_m88ds3103_state *state = fe->demodulator_priv;
++
++ dprintk("%s()\n", __func__);
++
++ /* 1st step to sleep tuner */
++ dvbsky_m88ds3103_tuner_writereg(state, 0x00, 0x00);
++
++ /* 2nd step to sleep demod */
++ dvbsky_m88ds3103_writereg(state, 0x08, 0xfe & dvbsky_m88ds3103_readreg(state, 0x08));
++ dvbsky_m88ds3103_writereg(state, 0x04, 0x01 | dvbsky_m88ds3103_readreg(state, 0x04));
++ dvbsky_m88ds3103_writereg(state, 0x23, 0x10 | dvbsky_m88ds3103_readreg(state, 0x23));
++
++
++ return 0;
++}
++
++static struct dvb_frontend_ops dvbsky_m88ds3103_ops = {
++ .delsys = { SYS_DVBS, SYS_DVBS2},
++ .info = {
++ .name = "Montage DS3103/TS2022",
++ .type = FE_QPSK,
++ .frequency_min = 950000,
++ .frequency_max = 2150000,
++ .frequency_stepsize = 1011, /* kHz for QPSK frontends */
++ .frequency_tolerance = 5000,
++ .symbol_rate_min = 1000000,
++ .symbol_rate_max = 45000000,
++ .caps = FE_CAN_INVERSION_AUTO |
++ FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 |
++ FE_CAN_FEC_4_5 | FE_CAN_FEC_5_6 | FE_CAN_FEC_6_7 |
++ FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO |
++ FE_CAN_2G_MODULATION |
++ FE_CAN_QPSK | FE_CAN_RECOVER
++ },
++
++ .release = dvbsky_m88ds3103_release,
++
++ .init = dvbsky_m88ds3103_initfe,
++ .sleep = dvbsky_m88ds3103_sleep,
++ .read_status = dvbsky_m88ds3103_read_status,
++ .read_ber = dvbsky_m88ds3103_read_ber,
++ .read_signal_strength = dvbsky_m88ds3103_read_signal_strength,
++ .read_snr = dvbsky_m88ds3103_read_snr,
++ .read_ucblocks = dvbsky_m88ds3103_read_ucblocks,
++ .set_tone = dvbsky_m88ds3103_set_tone,
++ .set_voltage = dvbsky_m88ds3103_set_voltage,
++ .diseqc_send_master_cmd = dvbsky_m88ds3103_send_diseqc_msg,
++ .diseqc_send_burst = dvbsky_m88ds3103_diseqc_send_burst,
++ .get_frontend_algo = dvbsky_m88ds3103_get_algo,
++ .tune = dvbsky_m88ds3103_tune,
++ .set_frontend = dvbsky_m88ds3103_set_frontend,
++};
++
++MODULE_DESCRIPTION("DVB Frontend module for Montage DS3103/TS2022 hardware");
++MODULE_AUTHOR("Max nibble");
++MODULE_LICENSE("GPL");
+diff -Nur linux-3.14.36/drivers/media/dvb-frontends/dvbsky_m88ds3103.h linux-openelec/drivers/media/dvb-frontends/dvbsky_m88ds3103.h
+--- linux-3.14.36/drivers/media/dvb-frontends/dvbsky_m88ds3103.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/media/dvb-frontends/dvbsky_m88ds3103.h 2015-07-24 18:03:30.120842002 -0500
+@@ -0,0 +1,53 @@
++/*
++ Montage Technology M88DS3103/M88TS2022 - DVBS/S2 Satellite demod/tuner driver
++
++ This program is free software; you can redistribute it and/or modify
++ it under the terms of the GNU General Public License as published by
++ the Free Software Foundation; either version 2 of the License, or
++ (at your option) any later version.
++
++ This program is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ GNU General Public License for more details.
++
++ You should have received a copy of the GNU General Public License
++ along with this program; if not, write to the Free Software
++ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ */
++
++#ifndef dvbsky_m88ds3103_H
++#define dvbsky_m88ds3103_H
++
++#include <linux/kconfig.h>
++#include <linux/dvb/frontend.h>
++
++struct dvbsky_m88ds3103_config {
++ /* the demodulator's i2c address */
++ u8 demod_address;
++ u8 ci_mode;
++ u8 pin_ctrl;
++ u8 ts_mode; /* 0: Parallel, 1: Serial */
++
++ /* Set device param to start dma */
++ int (*set_ts_params)(struct dvb_frontend *fe, int is_punctured);
++ /* Start to transfer data */
++ int (*start_ctrl)(struct dvb_frontend *fe);
++ /* Set LNB voltage */
++ int (*set_voltage)(struct dvb_frontend* fe, fe_sec_voltage_t voltage);
++};
++
++#if IS_ENABLED(CONFIG_DVB_DVBSKY_M88DS3103)
++extern struct dvb_frontend *dvbsky_m88ds3103_attach(
++ const struct dvbsky_m88ds3103_config *config,
++ struct i2c_adapter *i2c);
++#else
++static inline struct dvb_frontend *dvbsky_m88ds3103_attach(
++ const struct dvbsky_m88ds3103_config *config,
++ struct i2c_adapter *i2c)
++{
++ printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
++ return NULL;
++}
++#endif /* CONFIG_DVB_DVBSKY_M88DS3103 */
++#endif /* dvbsky_m88ds3103_H */
+diff -Nur linux-3.14.36/drivers/media/dvb-frontends/dvbsky_m88ds3103_priv.h linux-openelec/drivers/media/dvb-frontends/dvbsky_m88ds3103_priv.h
+--- linux-3.14.36/drivers/media/dvb-frontends/dvbsky_m88ds3103_priv.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/media/dvb-frontends/dvbsky_m88ds3103_priv.h 2015-07-24 18:03:30.120842002 -0500
+@@ -0,0 +1,403 @@
++/*
++ Montage Technology M88DS3103/M88TS2022 - DVBS/S2 Satellite demod/tuner driver
++
++ This program is free software; you can redistribute it and/or modify
++ it under the terms of the GNU General Public License as published by
++ the Free Software Foundation; either version 2 of the License, or
++ (at your option) any later version.
++
++ This program is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ GNU General Public License for more details.
++
++ You should have received a copy of the GNU General Public License
++ along with this program; if not, write to the Free Software
++ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ */
++
++#ifndef dvbsky_m88ds3103_PRIV_H
++#define dvbsky_m88ds3103_PRIV_H
++
++#define FW_DOWN_SIZE 32
++#define FW_DOWN_LOOP (8192/FW_DOWN_SIZE)
++#define DS3103_DEFAULT_FIRMWARE "dvb-fe-ds3103.fw"
++#define DS3000_DEFAULT_FIRMWARE "dvb-fe-ds300x.fw"
++#define MT_FE_MCLK_KHZ 96000 /* in kHz */
++#define MT_FE_CRYSTAL_KHZ 27000 /* in kHz */
++#define FREQ_OFFSET_AT_SMALL_SYM_RATE_KHz 3000
++#define DS3000_ID 0x3000
++#define DS3103_ID 0x3103
++#define TS2020_ID 0x2020
++#define TS2022_ID 0x2022
++#define UNKNOW_ID 0x0000
++
++struct dvbsky_m88ds3103_state {
++ struct i2c_adapter *i2c;
++ const struct dvbsky_m88ds3103_config *config;
++
++ struct dvb_frontend frontend;
++
++ u32 preBer;
++ u8 skip_fw_load;
++ u8 first_lock; /* The first time of signal lock */
++ u16 demod_id; /* demod chip type */
++ u16 tuner_id; /* tuner chip type */
++ fe_delivery_system_t delivery_system;
++};
++
++/* For M88DS3103 demod dvbs mode.*/
++static u8 ds3103_dvbs_init_tab[] = {
++ 0x23, 0x07,
++ 0x08, 0x03,
++ 0x0c, 0x02,
++ 0x21, 0x54,
++ 0x25, 0x82,
++ 0x27, 0x31,
++ 0x30, 0x08,
++ 0x31, 0x40,
++ 0x32, 0x32,
++ 0x33, 0x35,
++ 0x35, 0xff,
++ 0x3a, 0x00,
++ 0x37, 0x10,
++ 0x38, 0x10,
++ 0x39, 0x02,
++ 0x42, 0x60,
++ 0x4a, 0x80,
++ 0x4b, 0x04,
++ 0x4d, 0x91,
++ 0x5d, 0xc8,
++ 0x50, 0x36,
++ 0x51, 0x36,
++ 0x52, 0x36,
++ 0x53, 0x36,
++ 0x63, 0x0f,
++ 0x64, 0x30,
++ 0x65, 0x40,
++ 0x68, 0x26,
++ 0x69, 0x4c,
++ 0x70, 0x20,
++ 0x71, 0x70,
++ 0x72, 0x04,
++ 0x73, 0x00,
++ 0x70, 0x40,
++ 0x71, 0x70,
++ 0x72, 0x04,
++ 0x73, 0x00,
++ 0x70, 0x60,
++ 0x71, 0x70,
++ 0x72, 0x04,
++ 0x73, 0x00,
++ 0x70, 0x80,
++ 0x71, 0x70,
++ 0x72, 0x04,
++ 0x73, 0x00,
++ 0x70, 0xa0,
++ 0x71, 0x70,
++ 0x72, 0x04,
++ 0x73, 0x00,
++ 0x70, 0x1f,
++ 0x76, 0x38,
++ 0x77, 0xa6,
++ 0x78, 0x0c,
++ 0x79, 0x80,
++ 0x7f, 0x14,
++ 0x7c, 0x00,
++ 0xae, 0x82,
++ 0x80, 0x64,
++ 0x81, 0x66,
++ 0x82, 0x44,
++ 0x85, 0x04,
++ 0xcd, 0xf4,
++ 0x90, 0x33,
++ 0xa0, 0x44,
++ 0xc0, 0x08,
++ 0xc3, 0x10,
++ 0xc4, 0x08,
++ 0xc5, 0xf0,
++ 0xc6, 0xff,
++ 0xc7, 0x00,
++ 0xc8, 0x1a,
++ 0xc9, 0x80,
++ 0xe0, 0xf8,
++ 0xe6, 0x8b,
++ 0xd0, 0x40,
++ 0xf8, 0x20,
++ 0xfa, 0x0f,
++ 0x00, 0x00,
++ 0xbd, 0x01,
++ 0xb8, 0x00,
++};
++/* For M88DS3103 demod dvbs2 mode.*/
++static u8 ds3103_dvbs2_init_tab[] = {
++ 0x23, 0x07,
++ 0x08, 0x07,
++ 0x0c, 0x02,
++ 0x21, 0x54,
++ 0x25, 0x82,
++ 0x27, 0x31,
++ 0x30, 0x08,
++ 0x32, 0x32,
++ 0x33, 0x35,
++ 0x35, 0xff,
++ 0x3a, 0x00,
++ 0x37, 0x10,
++ 0x38, 0x10,
++ 0x39, 0x02,
++ 0x42, 0x60,
++ 0x4a, 0x80,
++ 0x4b, 0x04,
++ 0x4d, 0x91,
++ 0x5d, 0xc8,
++ 0x50, 0x36,
++ 0x51, 0x36,
++ 0x52, 0x36,
++ 0x53, 0x36,
++ 0x63, 0x0f,
++ 0x64, 0x10,
++ 0x65, 0x20,
++ 0x68, 0x46,
++ 0x69, 0xcd,
++ 0x70, 0x20,
++ 0x71, 0x70,
++ 0x72, 0x04,
++ 0x73, 0x00,
++ 0x70, 0x40,
++ 0x71, 0x70,
++ 0x72, 0x04,
++ 0x73, 0x00,
++ 0x70, 0x60,
++ 0x71, 0x70,
++ 0x72, 0x04,
++ 0x73, 0x00,
++ 0x70, 0x80,
++ 0x71, 0x70,
++ 0x72, 0x04,
++ 0x73, 0x00,
++ 0x70, 0xa0,
++ 0x71, 0x70,
++ 0x72, 0x04,
++ 0x73, 0x00,
++ 0x70, 0x1f,
++ 0x76, 0x38,
++ 0x77, 0xa6,
++ 0x78, 0x0c,
++ 0x79, 0x80,
++ 0x7f, 0x14,
++ 0x85, 0x08,
++ 0xcd, 0xf4,
++ 0x90, 0x33,
++ 0x86, 0x00,
++ 0x87, 0x0f,
++ 0x89, 0x00,
++ 0x8b, 0x44,
++ 0x8c, 0x66,
++ 0x9d, 0xc1,
++ 0x8a, 0x10,
++ 0xad, 0x40,
++ 0xa0, 0x44,
++ 0xc0, 0x08,
++ 0xc1, 0x10,
++ 0xc2, 0x08,
++ 0xc3, 0x10,
++ 0xc4, 0x08,
++ 0xc5, 0xf0,
++ 0xc6, 0xff,
++ 0xc7, 0x00,
++ 0xc8, 0x1a,
++ 0xc9, 0x80,
++ 0xca, 0x23,
++ 0xcb, 0x24,
++ 0xcc, 0xf4,
++ 0xce, 0x74,
++ 0x00, 0x00,
++ 0xbd, 0x01,
++ 0xb8, 0x00,
++};
++
++/* For M88DS3000 demod dvbs mode.*/
++static u8 ds3000_dvbs_init_tab[] = {
++ 0x23, 0x05,
++ 0x08, 0x03,
++ 0x0c, 0x02,
++ 0x21, 0x54,
++ 0x25, 0x82,
++ 0x27, 0x31,
++ 0x30, 0x08,
++ 0x31, 0x40,
++ 0x32, 0x32,
++ 0x33, 0x35,
++ 0x35, 0xff,
++ 0x3a, 0x00,
++ 0x37, 0x10,
++ 0x38, 0x10,
++ 0x39, 0x02,
++ 0x42, 0x60,
++ 0x4a, 0x40,
++ 0x4b, 0x04,
++ 0x4d, 0x91,
++ 0x5d, 0xc8,
++ 0x50, 0x77,
++ 0x51, 0x77,
++ 0x52, 0x36,
++ 0x53, 0x36,
++ 0x56, 0x01,
++ 0x63, 0x47,
++ 0x64, 0x30,
++ 0x65, 0x40,
++ 0x68, 0x26,
++ 0x69, 0x4c,
++ 0x70, 0x20,
++ 0x71, 0x70,
++ 0x72, 0x04,
++ 0x73, 0x00,
++ 0x70, 0x40,
++ 0x71, 0x70,
++ 0x72, 0x04,
++ 0x73, 0x00,
++ 0x70, 0x60,
++ 0x71, 0x70,
++ 0x72, 0x04,
++ 0x73, 0x00,
++ 0x70, 0x80,
++ 0x71, 0x70,
++ 0x72, 0x04,
++ 0x73, 0x00,
++ 0x70, 0xa0,
++ 0x71, 0x70,
++ 0x72, 0x04,
++ 0x73, 0x00,
++ 0x70, 0x1f,
++ 0x76, 0x00,
++ 0x77, 0xd1,
++ 0x78, 0x0c,
++ 0x79, 0x80,
++ 0x7f, 0x04,
++ 0x7c, 0x00,
++ 0x80, 0x86,
++ 0x81, 0xa6,
++ 0x85, 0x04,
++ 0xcd, 0xf4,
++ 0x90, 0x33,
++ 0xa0, 0x44,
++ 0xc0, 0x18,
++ 0xc3, 0x10,
++ 0xc4, 0x08,
++ 0xc5, 0x80,
++ 0xc6, 0x80,
++ 0xc7, 0x0a,
++ 0xc8, 0x1a,
++ 0xc9, 0x80,
++ 0xfe, 0xb6,
++ 0xe0, 0xf8,
++ 0xe6, 0x8b,
++ 0xd0, 0x40,
++ 0xf8, 0x20,
++ 0xfa, 0x0f,
++ 0xad, 0x20,
++ 0xae, 0x07,
++ 0xb8, 0x00,
++};
++
++/* For M88DS3000 demod dvbs2 mode.*/
++static u8 ds3000_dvbs2_init_tab[] = {
++ 0x23, 0x0f,
++ 0x08, 0x07,
++ 0x0c, 0x02,
++ 0x21, 0x54,
++ 0x25, 0x82,
++ 0x27, 0x31,
++ 0x30, 0x08,
++ 0x31, 0x32,
++ 0x32, 0x32,
++ 0x33, 0x35,
++ 0x35, 0xff,
++ 0x3a, 0x00,
++ 0x37, 0x10,
++ 0x38, 0x10,
++ 0x39, 0x02,
++ 0x42, 0x60,
++ 0x4a, 0x80,
++ 0x4b, 0x04,
++ 0x4d, 0x91,
++ 0x5d, 0x88,
++ 0x50, 0x36,
++ 0x51, 0x36,
++ 0x52, 0x36,
++ 0x53, 0x36,
++ 0x63, 0x60,
++ 0x64, 0x10,
++ 0x65, 0x10,
++ 0x68, 0x04,
++ 0x69, 0x29,
++ 0x70, 0x20,
++ 0x71, 0x70,
++ 0x72, 0x04,
++ 0x73, 0x00,
++ 0x70, 0x40,
++ 0x71, 0x70,
++ 0x72, 0x04,
++ 0x73, 0x00,
++ 0x70, 0x60,
++ 0x71, 0x70,
++ 0x72, 0x04,
++ 0x73, 0x00,
++ 0x70, 0x80,
++ 0x71, 0x70,
++ 0x72, 0x04,
++ 0x73, 0x00,
++ 0x70, 0xa0,
++ 0x71, 0x70,
++ 0x72, 0x04,
++ 0x73, 0x00,
++ 0x70, 0x1f,
++ 0xa0, 0x44,
++ 0xc0, 0x08,
++ 0xc1, 0x10,
++ 0xc2, 0x08,
++ 0xc3, 0x10,
++ 0xc4, 0x08,
++ 0xc5, 0xf0,
++ 0xc6, 0xf0,
++ 0xc7, 0x0a,
++ 0xc8, 0x1a,
++ 0xc9, 0x80,
++ 0xca, 0x23,
++ 0xcb, 0x24,
++ 0xce, 0x74,
++ 0x56, 0x01,
++ 0x90, 0x03,
++ 0x76, 0x80,
++ 0x77, 0x42,
++ 0x78, 0x0a,
++ 0x79, 0x80,
++ 0xad, 0x40,
++ 0xae, 0x07,
++ 0x7f, 0xd4,
++ 0x7c, 0x00,
++ 0x80, 0xa8,
++ 0x81, 0xda,
++ 0x7c, 0x01,
++ 0x80, 0xda,
++ 0x81, 0xec,
++ 0x7c, 0x02,
++ 0x80, 0xca,
++ 0x81, 0xeb,
++ 0x7c, 0x03,
++ 0x80, 0xba,
++ 0x81, 0xdb,
++ 0x85, 0x08,
++ 0x86, 0x00,
++ 0x87, 0x02,
++ 0x89, 0x80,
++ 0x8b, 0x44,
++ 0x8c, 0xaa,
++ 0x8a, 0x10,
++ 0xba, 0x00,
++ 0xf5, 0x04,
++ 0xd2, 0x32,
++ 0xb8, 0x00,
++};
++
++#endif /* dvbsky_m88ds3103_PRIV_H */
+diff -Nur linux-3.14.36/drivers/media/dvb-frontends/Kconfig linux-openelec/drivers/media/dvb-frontends/Kconfig
+--- linux-3.14.36/drivers/media/dvb-frontends/Kconfig 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/media/dvb-frontends/Kconfig 2015-07-24 18:03:30.364842002 -0500
+@@ -4,6 +4,13 @@
+ comment "Multistandard (satellite) frontends"
+ depends on DVB_CORE
+
++config DVB_CX24120
++ tristate "Conexant CX24120 based"
++ depends on DVB_CORE && I2C
++ default m if DVB_FE_CUSTOMISE
++ help
++ A DVB-S/DVB-S2 tuner module. Say Y when you want to support this frontend.
++
+ config DVB_STB0899
+ tristate "STB0899 based"
+ depends on DVB_CORE && I2C
+@@ -214,6 +221,20 @@
+ help
+ A Dual DVB-S/S2 tuner module. Say Y when you want to support this frontend.
+
++config DVB_DVBSKY_M88DS3103
++ tristate "Montage M88DS3103 based (dvbsky)"
++ depends on DVB_CORE && I2C
++ default m if !MEDIA_SUBDRV_AUTOSELECT
++ help
++ A DVB-S/S2 tuner module. Say Y when you want to support this frontend.
++
++config DVB_DVBSKY_M88DC2800
++ tristate "Montage M88DC2800 based (dvbsky)"
++ depends on DVB_CORE && I2C
++ default m if !MEDIA_SUBDRV_AUTOSELECT
++ help
++ A DVB-C tuner module. Say Y when you want to support this frontend.
++
+ config DVB_SI21XX
+ tristate "Silicon Labs SI21XX based"
+ depends on DVB_CORE && I2C
+@@ -753,6 +774,16 @@
+ tristate "Afatech AF9033 DVB-T demodulator"
+ depends on DVB_CORE && I2C
+ default m if !MEDIA_SUBDRV_AUTOSELECT
++
++config DVB_SI2168
++ tristate "Afatech AF9033 DVB-T demodulator"
++ depends on DVB_CORE && I2C
++ default m if !MEDIA_SUBDRV_AUTOSELECT
++
++config DVB_SP2
++ tristate "Afatech AF9033 DVB-T demodulator"
++ depends on DVB_CORE && I2C
++ default m if !MEDIA_SUBDRV_AUTOSELECT
+
+ comment "Tools to develop new frontends"
+
+diff -Nur linux-3.14.36/drivers/media/dvb-frontends/Kconfig.orig linux-openelec/drivers/media/dvb-frontends/Kconfig.orig
+--- linux-3.14.36/drivers/media/dvb-frontends/Kconfig.orig 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/media/dvb-frontends/Kconfig.orig 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,762 @@
++menu "Customise DVB Frontends"
++ visible if !MEDIA_SUBDRV_AUTOSELECT
++
++comment "Multistandard (satellite) frontends"
++ depends on DVB_CORE
++
++config DVB_STB0899
++ tristate "STB0899 based"
++ depends on DVB_CORE && I2C
++ default m if !MEDIA_SUBDRV_AUTOSELECT
++ help
++ A DVB-S/S2/DSS Multistandard demodulator. Say Y when you want
++ to support this demodulator based frontends
++
++config DVB_STB6100
++ tristate "STB6100 based tuners"
++ depends on DVB_CORE && I2C
++ default m if !MEDIA_SUBDRV_AUTOSELECT
++ help
++ A Silicon tuner from ST used in conjunction with the STB0899
++ demodulator. Say Y when you want to support this tuner.
++
++config DVB_STV090x
++ tristate "STV0900/STV0903(A/B) based"
++ depends on DVB_CORE && I2C
++ default m if !MEDIA_SUBDRV_AUTOSELECT
++ help
++ DVB-S/S2/DSS Multistandard Professional/Broadcast demodulators.
++ Say Y when you want to support these frontends.
++
++config DVB_STV6110x
++ tristate "STV6110/(A) based tuners"
++ depends on DVB_CORE && I2C
++ default m if !MEDIA_SUBDRV_AUTOSELECT
++ help
++ A Silicon tuner that supports DVB-S and DVB-S2 modes
++
++config DVB_M88DS3103
++ tristate "Montage M88DS3103"
++ depends on DVB_CORE && I2C && I2C_MUX
++ default m if !MEDIA_SUBDRV_AUTOSELECT
++ help
++ Say Y when you want to support this frontend.
++
++comment "Multistandard (cable + terrestrial) frontends"
++ depends on DVB_CORE
++
++config DVB_DRXK
++ tristate "Micronas DRXK based"
++ depends on DVB_CORE && I2C
++ default m if !MEDIA_SUBDRV_AUTOSELECT
++ help
++ Micronas DRX-K DVB-C/T demodulator.
++
++ Say Y when you want to support this frontend.
++
++config DVB_TDA18271C2DD
++ tristate "NXP TDA18271C2 silicon tuner"
++ depends on DVB_CORE && I2C
++ default m if !MEDIA_SUBDRV_AUTOSELECT
++ help
++ NXP TDA18271 silicon tuner.
++
++ Say Y when you want to support this tuner.
++
++comment "DVB-S (satellite) frontends"
++ depends on DVB_CORE
++
++config DVB_CX24110
++ tristate "Conexant CX24110 based"
++ depends on DVB_CORE && I2C
++ default m if !MEDIA_SUBDRV_AUTOSELECT
++ help
++ A DVB-S tuner module. Say Y when you want to support this frontend.
++
++config DVB_CX24123
++ tristate "Conexant CX24123 based"
++ depends on DVB_CORE && I2C
++ default m if !MEDIA_SUBDRV_AUTOSELECT
++ help
++ A DVB-S tuner module. Say Y when you want to support this frontend.
++
++config DVB_MT312
++ tristate "Zarlink VP310/MT312/ZL10313 based"
++ depends on DVB_CORE && I2C
++ default m if !MEDIA_SUBDRV_AUTOSELECT
++ help
++ A DVB-S tuner module. Say Y when you want to support this frontend.
++
++config DVB_ZL10036
++ tristate "Zarlink ZL10036 silicon tuner"
++ depends on DVB_CORE && I2C
++ default m if !MEDIA_SUBDRV_AUTOSELECT
++ help
++ A DVB-S tuner module. Say Y when you want to support this frontend.
++
++config DVB_ZL10039
++ tristate "Zarlink ZL10039 silicon tuner"
++ depends on DVB_CORE && I2C
++ default m if !MEDIA_SUBDRV_AUTOSELECT
++ help
++ A DVB-S tuner module. Say Y when you want to support this frontend.
++
++config DVB_S5H1420
++ tristate "Samsung S5H1420 based"
++ depends on DVB_CORE && I2C
++ default m if !MEDIA_SUBDRV_AUTOSELECT
++ help
++ A DVB-S tuner module. Say Y when you want to support this frontend.
++
++config DVB_STV0288
++ tristate "ST STV0288 based"
++ depends on DVB_CORE && I2C
++ default m if !MEDIA_SUBDRV_AUTOSELECT
++ help
++ A DVB-S tuner module. Say Y when you want to support this frontend.
++
++config DVB_STB6000
++ tristate "ST STB6000 silicon tuner"
++ depends on DVB_CORE && I2C
++ default m if !MEDIA_SUBDRV_AUTOSELECT
++ help
++ A DVB-S silicon tuner module. Say Y when you want to support this tuner.
++
++config DVB_STV0299
++ tristate "ST STV0299 based"
++ depends on DVB_CORE && I2C
++ default m if !MEDIA_SUBDRV_AUTOSELECT
++ help
++ A DVB-S tuner module. Say Y when you want to support this frontend.
++
++config DVB_STV6110
++ tristate "ST STV6110 silicon tuner"
++ depends on DVB_CORE && I2C
++ default m if !MEDIA_SUBDRV_AUTOSELECT
++ help
++ A DVB-S silicon tuner module. Say Y when you want to support this tuner.
++
++config DVB_STV0900
++ tristate "ST STV0900 based"
++ depends on DVB_CORE && I2C
++ default m if !MEDIA_SUBDRV_AUTOSELECT
++ help
++ A DVB-S/S2 demodulator. Say Y when you want to support this frontend.
++
++config DVB_TDA8083
++ tristate "Philips TDA8083 based"
++ depends on DVB_CORE && I2C
++ default m if !MEDIA_SUBDRV_AUTOSELECT
++ help
++ A DVB-S tuner module. Say Y when you want to support this frontend.
++
++config DVB_TDA10086
++ tristate "Philips TDA10086 based"
++ depends on DVB_CORE && I2C
++ default m if !MEDIA_SUBDRV_AUTOSELECT
++ help
++ A DVB-S tuner module. Say Y when you want to support this frontend.
++
++config DVB_TDA8261
++ tristate "Philips TDA8261 based"
++ depends on DVB_CORE && I2C
++ default m if !MEDIA_SUBDRV_AUTOSELECT
++ help
++ A DVB-S tuner module. Say Y when you want to support this frontend.
++
++config DVB_VES1X93
++ tristate "VLSI VES1893 or VES1993 based"
++ depends on DVB_CORE && I2C
++ default m if !MEDIA_SUBDRV_AUTOSELECT
++ help
++ A DVB-S tuner module. Say Y when you want to support this frontend.
++
++config DVB_TUNER_ITD1000
++ tristate "Integrant ITD1000 Zero IF tuner for DVB-S/DSS"
++ depends on DVB_CORE && I2C
++ default m if !MEDIA_SUBDRV_AUTOSELECT
++ help
++ A DVB-S tuner module. Say Y when you want to support this frontend.
++
++config DVB_TUNER_CX24113
++ tristate "Conexant CX24113/CX24128 tuner for DVB-S/DSS"
++ depends on DVB_CORE && I2C
++ default m if !MEDIA_SUBDRV_AUTOSELECT
++ help
++ A DVB-S tuner module. Say Y when you want to support this frontend.
++
++
++config DVB_TDA826X
++ tristate "Philips TDA826X silicon tuner"
++ depends on DVB_CORE && I2C
++ default m if !MEDIA_SUBDRV_AUTOSELECT
++ help
++ A DVB-S silicon tuner module. Say Y when you want to support this tuner.
++
++config DVB_TUA6100
++ tristate "Infineon TUA6100 PLL"
++ depends on DVB_CORE && I2C
++ default m if !MEDIA_SUBDRV_AUTOSELECT
++ help
++ A DVB-S PLL chip.
++
++config DVB_CX24116
++ tristate "Conexant CX24116 based"
++ depends on DVB_CORE && I2C
++ default m if !MEDIA_SUBDRV_AUTOSELECT
++ help
++ A DVB-S/S2 tuner module. Say Y when you want to support this frontend.
++
++config DVB_CX24117
++ tristate "Conexant CX24117 based"
++ depends on DVB_CORE && I2C
++ default m if !MEDIA_SUBDRV_AUTOSELECT
++ help
++ A Dual DVB-S/S2 tuner module. Say Y when you want to support this frontend.
++
++config DVB_SI21XX
++ tristate "Silicon Labs SI21XX based"
++ depends on DVB_CORE && I2C
++ default m if !MEDIA_SUBDRV_AUTOSELECT
++ help
++ A DVB-S tuner module. Say Y when you want to support this frontend.
++
++config DVB_TS2020
++ tristate "Montage Tehnology TS2020 based tuners"
++ depends on DVB_CORE && I2C
++ default m if !MEDIA_SUBDRV_AUTOSELECT
++ help
++ A DVB-S/S2 silicon tuner. Say Y when you want to support this tuner.
++
++config DVB_DS3000
++ tristate "Montage Tehnology DS3000 based"
++ depends on DVB_CORE && I2C
++ default m if !MEDIA_SUBDRV_AUTOSELECT
++ help
++ A DVB-S/S2 tuner module. Say Y when you want to support this frontend.
++
++config DVB_MB86A16
++ tristate "Fujitsu MB86A16 based"
++ depends on DVB_CORE && I2C
++ default m if !MEDIA_SUBDRV_AUTOSELECT
++ help
++ A DVB-S/DSS Direct Conversion reveiver.
++ Say Y when you want to support this frontend.
++
++config DVB_TDA10071
++ tristate "NXP TDA10071"
++ depends on DVB_CORE && I2C
++ default m if !MEDIA_SUBDRV_AUTOSELECT
++ help
++ Say Y when you want to support this frontend.
++
++comment "DVB-T (terrestrial) frontends"
++ depends on DVB_CORE
++
++config DVB_SP8870
++ tristate "Spase sp8870 based"
++ depends on DVB_CORE && I2C
++ default m if !MEDIA_SUBDRV_AUTOSELECT
++ help
++ A DVB-T tuner module. Say Y when you want to support this frontend.
++
++ This driver needs external firmware. Please use the command
++ "<kerneldir>/Documentation/dvb/get_dvb_firmware sp8870" to
++ download/extract it, and then copy it to /usr/lib/hotplug/firmware
++ or /lib/firmware (depending on configuration of firmware hotplug).
++
++config DVB_SP887X
++ tristate "Spase sp887x based"
++ depends on DVB_CORE && I2C
++ default m if !MEDIA_SUBDRV_AUTOSELECT
++ help
++ A DVB-T tuner module. Say Y when you want to support this frontend.
++
++ This driver needs external firmware. Please use the command
++ "<kerneldir>/Documentation/dvb/get_dvb_firmware sp887x" to
++ download/extract it, and then copy it to /usr/lib/hotplug/firmware
++ or /lib/firmware (depending on configuration of firmware hotplug).
++
++config DVB_CX22700
++ tristate "Conexant CX22700 based"
++ depends on DVB_CORE && I2C
++ default m if !MEDIA_SUBDRV_AUTOSELECT
++ help
++ A DVB-T tuner module. Say Y when you want to support this frontend.
++
++config DVB_CX22702
++ tristate "Conexant cx22702 demodulator (OFDM)"
++ depends on DVB_CORE && I2C
++ default m if !MEDIA_SUBDRV_AUTOSELECT
++ help
++ A DVB-T tuner module. Say Y when you want to support this frontend.
++
++config DVB_S5H1432
++ tristate "Samsung s5h1432 demodulator (OFDM)"
++ depends on DVB_CORE && I2C
++ default m if !MEDIA_SUBDRV_AUTOSELECT
++ help
++ A DVB-T tuner module. Say Y when you want to support this frontend.
++
++config DVB_DRXD
++ tristate "Micronas DRXD driver"
++ depends on DVB_CORE && I2C
++ default m if !MEDIA_SUBDRV_AUTOSELECT
++ help
++ A DVB-T tuner module. Say Y when you want to support this frontend.
++
++ Note: this driver was based on vendor driver reference code (released
++ under the GPL) as opposed to the existing drx397xd driver, which
++ was written via reverse engineering.
++
++config DVB_L64781
++ tristate "LSI L64781"
++ depends on DVB_CORE && I2C
++ default m if !MEDIA_SUBDRV_AUTOSELECT
++ help
++ A DVB-T tuner module. Say Y when you want to support this frontend.
++
++config DVB_TDA1004X
++ tristate "Philips TDA10045H/TDA10046H based"
++ depends on DVB_CORE && I2C
++ default m if !MEDIA_SUBDRV_AUTOSELECT
++ help
++ A DVB-T tuner module. Say Y when you want to support this frontend.
++
++ This driver needs external firmware. Please use the commands
++ "<kerneldir>/Documentation/dvb/get_dvb_firmware tda10045",
++ "<kerneldir>/Documentation/dvb/get_dvb_firmware tda10046" to
++ download/extract them, and then copy them to /usr/lib/hotplug/firmware
++ or /lib/firmware (depending on configuration of firmware hotplug).
++
++config DVB_NXT6000
++ tristate "NxtWave Communications NXT6000 based"
++ depends on DVB_CORE && I2C
++ default m if !MEDIA_SUBDRV_AUTOSELECT
++ help
++ A DVB-T tuner module. Say Y when you want to support this frontend.
++
++config DVB_MT352
++ tristate "Zarlink MT352 based"
++ depends on DVB_CORE && I2C
++ default m if !MEDIA_SUBDRV_AUTOSELECT
++ help
++ A DVB-T tuner module. Say Y when you want to support this frontend.
++
++config DVB_ZL10353
++ tristate "Zarlink ZL10353 based"
++ depends on DVB_CORE && I2C
++ default m if !MEDIA_SUBDRV_AUTOSELECT
++ help
++ A DVB-T tuner module. Say Y when you want to support this frontend.
++
++config DVB_DIB3000MB
++ tristate "DiBcom 3000M-B"
++ depends on DVB_CORE && I2C
++ default m if !MEDIA_SUBDRV_AUTOSELECT
++ help
++ A DVB-T tuner module. Designed for mobile usage. Say Y when you want
++ to support this frontend.
++
++config DVB_DIB3000MC
++ tristate "DiBcom 3000P/M-C"
++ depends on DVB_CORE && I2C
++ default m if !MEDIA_SUBDRV_AUTOSELECT
++ help
++ A DVB-T tuner module. Designed for mobile usage. Say Y when you want
++ to support this frontend.
++
++config DVB_DIB7000M
++ tristate "DiBcom 7000MA/MB/PA/PB/MC"
++ depends on DVB_CORE && I2C
++ default m if !MEDIA_SUBDRV_AUTOSELECT
++ help
++ A DVB-T tuner module. Designed for mobile usage. Say Y when you want
++ to support this frontend.
++
++config DVB_DIB7000P
++ tristate "DiBcom 7000PC"
++ depends on DVB_CORE && I2C
++ default m if !MEDIA_SUBDRV_AUTOSELECT
++ help
++ A DVB-T tuner module. Designed for mobile usage. Say Y when you want
++ to support this frontend.
++
++config DVB_DIB9000
++ tristate "DiBcom 9000"
++ depends on DVB_CORE && I2C
++ default m if !MEDIA_SUBDRV_AUTOSELECT
++ help
++ A DVB-T tuner module. Designed for mobile usage. Say Y when you want
++ to support this frontend.
++
++config DVB_TDA10048
++ tristate "Philips TDA10048HN based"
++ depends on DVB_CORE && I2C
++ default m if !MEDIA_SUBDRV_AUTOSELECT
++ help
++ A DVB-T tuner module. Say Y when you want to support this frontend.
++
++config DVB_AF9013
++ tristate "Afatech AF9013 demodulator"
++ depends on DVB_CORE && I2C
++ default m if !MEDIA_SUBDRV_AUTOSELECT
++ help
++ Say Y when you want to support this frontend.
++
++config DVB_EC100
++ tristate "E3C EC100"
++ depends on DVB_CORE && I2C
++ default m if !MEDIA_SUBDRV_AUTOSELECT
++ help
++ Say Y when you want to support this frontend.
++
++config DVB_HD29L2
++ tristate "HDIC HD29L2"
++ depends on DVB_CORE && I2C
++ default m if !MEDIA_SUBDRV_AUTOSELECT
++ help
++ Say Y when you want to support this frontend.
++
++config DVB_STV0367
++ tristate "ST STV0367 based"
++ depends on DVB_CORE && I2C
++ default m if !MEDIA_SUBDRV_AUTOSELECT
++ help
++ A DVB-T/C tuner module. Say Y when you want to support this frontend.
++
++config DVB_CXD2820R
++ tristate "Sony CXD2820R"
++ depends on DVB_CORE && I2C
++ default m if !MEDIA_SUBDRV_AUTOSELECT
++ help
++ Say Y when you want to support this frontend.
++
++config DVB_RTL2830
++ tristate "Realtek RTL2830 DVB-T"
++ depends on DVB_CORE && I2C
++ default m if !MEDIA_SUBDRV_AUTOSELECT
++ help
++ Say Y when you want to support this frontend.
++
++config DVB_RTL2832
++ tristate "Realtek RTL2832 DVB-T"
++ depends on DVB_CORE && I2C
++ default m if !MEDIA_SUBDRV_AUTOSELECT
++ help
++ Say Y when you want to support this frontend.
++
++comment "DVB-C (cable) frontends"
++ depends on DVB_CORE
++
++config DVB_VES1820
++ tristate "VLSI VES1820 based"
++ depends on DVB_CORE && I2C
++ default m if !MEDIA_SUBDRV_AUTOSELECT
++ help
++ A DVB-C tuner module. Say Y when you want to support this frontend.
++
++config DVB_TDA10021
++ tristate "Philips TDA10021 based"
++ depends on DVB_CORE && I2C
++ default m if !MEDIA_SUBDRV_AUTOSELECT
++ help
++ A DVB-C tuner module. Say Y when you want to support this frontend.
++
++config DVB_TDA10023
++ tristate "Philips TDA10023 based"
++ depends on DVB_CORE && I2C
++ default m if !MEDIA_SUBDRV_AUTOSELECT
++ help
++ A DVB-C tuner module. Say Y when you want to support this frontend.
++
++config DVB_STV0297
++ tristate "ST STV0297 based"
++ depends on DVB_CORE && I2C
++ default m if !MEDIA_SUBDRV_AUTOSELECT
++ help
++ A DVB-C tuner module. Say Y when you want to support this frontend.
++
++comment "ATSC (North American/Korean Terrestrial/Cable DTV) frontends"
++ depends on DVB_CORE
++
++config DVB_NXT200X
++ tristate "NxtWave Communications NXT2002/NXT2004 based"
++ depends on DVB_CORE && I2C
++ default m if !MEDIA_SUBDRV_AUTOSELECT
++ help
++ An ATSC 8VSB and QAM64/256 tuner module. Say Y when you want
++ to support this frontend.
++
++ This driver needs external firmware. Please use the commands
++ "<kerneldir>/Documentation/dvb/get_dvb_firmware nxt2002" and
++ "<kerneldir>/Documentation/dvb/get_dvb_firmware nxt2004" to
++ download/extract them, and then copy them to /usr/lib/hotplug/firmware
++ or /lib/firmware (depending on configuration of firmware hotplug).
++
++config DVB_OR51211
++ tristate "Oren OR51211 based"
++ depends on DVB_CORE && I2C
++ default m if !MEDIA_SUBDRV_AUTOSELECT
++ help
++ An ATSC 8VSB tuner module. Say Y when you want to support this frontend.
++
++ This driver needs external firmware. Please use the command
++ "<kerneldir>/Documentation/dvb/get_dvb_firmware or51211" to
++ download it, and then copy it to /usr/lib/hotplug/firmware
++ or /lib/firmware (depending on configuration of firmware hotplug).
++
++config DVB_OR51132
++ tristate "Oren OR51132 based"
++ depends on DVB_CORE && I2C
++ default m if !MEDIA_SUBDRV_AUTOSELECT
++ help
++ An ATSC 8VSB and QAM64/256 tuner module. Say Y when you want
++ to support this frontend.
++
++ This driver needs external firmware. Please use the commands
++ "<kerneldir>/Documentation/dvb/get_dvb_firmware or51132_vsb" and/or
++ "<kerneldir>/Documentation/dvb/get_dvb_firmware or51132_qam" to
++ download firmwares for 8VSB and QAM64/256, respectively. Copy them to
++ /usr/lib/hotplug/firmware or /lib/firmware (depending on
++ configuration of firmware hotplug).
++
++config DVB_BCM3510
++ tristate "Broadcom BCM3510"
++ depends on DVB_CORE && I2C
++ default m if !MEDIA_SUBDRV_AUTOSELECT
++ help
++ An ATSC 8VSB/16VSB and QAM64/256 tuner module. Say Y when you want to
++ support this frontend.
++
++config DVB_LGDT330X
++ tristate "LG Electronics LGDT3302/LGDT3303 based"
++ depends on DVB_CORE && I2C
++ default m if !MEDIA_SUBDRV_AUTOSELECT
++ help
++ An ATSC 8VSB and QAM64/256 tuner module. Say Y when you want
++ to support this frontend.
++
++config DVB_LGDT3305
++ tristate "LG Electronics LGDT3304 and LGDT3305 based"
++ depends on DVB_CORE && I2C
++ default m if !MEDIA_SUBDRV_AUTOSELECT
++ help
++ An ATSC 8VSB and QAM64/256 tuner module. Say Y when you want
++ to support this frontend.
++
++config DVB_LG2160
++ tristate "LG Electronics LG216x based"
++ depends on DVB_CORE && I2C
++ default m if !MEDIA_SUBDRV_AUTOSELECT
++ help
++ An ATSC/MH demodulator module. Say Y when you want
++ to support this frontend.
++
++config DVB_S5H1409
++ tristate "Samsung S5H1409 based"
++ depends on DVB_CORE && I2C
++ default m if !MEDIA_SUBDRV_AUTOSELECT
++ help
++ An ATSC 8VSB and QAM64/256 tuner module. Say Y when you want
++ to support this frontend.
++
++config DVB_AU8522
++ depends on I2C
++ tristate
++
++config DVB_AU8522_DTV
++ tristate "Auvitek AU8522 based DTV demod"
++ depends on DVB_CORE && I2C
++ select DVB_AU8522
++ default m if !MEDIA_SUBDRV_AUTOSELECT
++ help
++ An ATSC 8VSB, QAM64/256 & NTSC demodulator module. Say Y when
++ you want to enable DTV demodulation support for this frontend.
++
++config DVB_AU8522_V4L
++ tristate "Auvitek AU8522 based ATV demod"
++ depends on VIDEO_V4L2 && I2C
++ select DVB_AU8522
++ default m if !MEDIA_SUBDRV_AUTOSELECT
++ help
++ An ATSC 8VSB, QAM64/256 & NTSC demodulator module. Say Y when
++ you want to enable ATV demodulation support for this frontend.
++
++config DVB_S5H1411
++ tristate "Samsung S5H1411 based"
++ depends on DVB_CORE && I2C
++ default m if !MEDIA_SUBDRV_AUTOSELECT
++ help
++ An ATSC 8VSB and QAM64/256 tuner module. Say Y when you want
++ to support this frontend.
++
++comment "ISDB-T (terrestrial) frontends"
++ depends on DVB_CORE
++
++config DVB_S921
++ tristate "Sharp S921 frontend"
++ depends on DVB_CORE && I2C
++ default m if !MEDIA_SUBDRV_AUTOSELECT
++ help
++ AN ISDB-T DQPSK, QPSK, 16QAM and 64QAM 1seg tuner module.
++ Say Y when you want to support this frontend.
++
++config DVB_DIB8000
++ tristate "DiBcom 8000MB/MC"
++ depends on DVB_CORE && I2C
++ default m if !MEDIA_SUBDRV_AUTOSELECT
++ help
++ A driver for DiBcom's DiB8000 ISDB-T/ISDB-Tsb demodulator.
++ Say Y when you want to support this frontend.
++
++config DVB_MB86A20S
++ tristate "Fujitsu mb86a20s"
++ depends on DVB_CORE && I2C
++ default m if !MEDIA_SUBDRV_AUTOSELECT
++ help
++ A driver for Fujitsu mb86a20s ISDB-T/ISDB-Tsb demodulator.
++ Say Y when you want to support this frontend.
++
++comment "Digital terrestrial only tuners/PLL"
++ depends on DVB_CORE
++
++config DVB_PLL
++ tristate "Generic I2C PLL based tuners"
++ depends on DVB_CORE && I2C
++ default m if !MEDIA_SUBDRV_AUTOSELECT
++ help
++ This module drives a number of tuners based on PLL chips with a
++ common I2C interface. Say Y when you want to support these tuners.
++
++config DVB_TUNER_DIB0070
++ tristate "DiBcom DiB0070 silicon base-band tuner"
++ depends on I2C
++ default m if !MEDIA_SUBDRV_AUTOSELECT
++ help
++ A driver for the silicon baseband tuner DiB0070 from DiBcom.
++ This device is only used inside a SiP called together with a
++ demodulator for now.
++
++config DVB_TUNER_DIB0090
++ tristate "DiBcom DiB0090 silicon base-band tuner"
++ depends on I2C
++ default m if !MEDIA_SUBDRV_AUTOSELECT
++ help
++ A driver for the silicon baseband tuner DiB0090 from DiBcom.
++ This device is only used inside a SiP called together with a
++ demodulator for now.
++
++comment "SEC control devices for DVB-S"
++ depends on DVB_CORE
++
++config DVB_LNBP21
++ tristate "LNBP21/LNBH24 SEC controllers"
++ depends on DVB_CORE && I2C
++ default m if !MEDIA_SUBDRV_AUTOSELECT
++ help
++ An SEC control chips.
++
++config DVB_LNBP22
++ tristate "LNBP22 SEC controllers"
++ depends on DVB_CORE && I2C
++ default m if !MEDIA_SUBDRV_AUTOSELECT
++ help
++ LNB power supply and control voltage
++ regulator chip with step-up converter
++ and I2C interface.
++ Say Y when you want to support this chip.
++
++config DVB_ISL6405
++ tristate "ISL6405 SEC controller"
++ depends on DVB_CORE && I2C
++ default m if !MEDIA_SUBDRV_AUTOSELECT
++ help
++ An SEC control chip.
++
++config DVB_ISL6421
++ tristate "ISL6421 SEC controller"
++ depends on DVB_CORE && I2C
++ default m if !MEDIA_SUBDRV_AUTOSELECT
++ help
++ An SEC control chip.
++
++config DVB_ISL6423
++ tristate "ISL6423 SEC controller"
++ depends on DVB_CORE && I2C
++ default m if !MEDIA_SUBDRV_AUTOSELECT
++ help
++ A SEC controller chip from Intersil
++
++config DVB_A8293
++ tristate "Allegro A8293"
++ depends on DVB_CORE && I2C
++ default m if !MEDIA_SUBDRV_AUTOSELECT
++
++config DVB_LGS8GL5
++ tristate "Silicon Legend LGS-8GL5 demodulator (OFDM)"
++ depends on DVB_CORE && I2C
++ default m if !MEDIA_SUBDRV_AUTOSELECT
++ help
++ A DMB-TH tuner module. Say Y when you want to support this frontend.
++
++config DVB_LGS8GXX
++ tristate "Legend Silicon LGS8913/LGS8GL5/LGS8GXX DMB-TH demodulator"
++ depends on DVB_CORE && I2C
++ select FW_LOADER
++ default m if !MEDIA_SUBDRV_AUTOSELECT
++ help
++ A DMB-TH tuner module. Say Y when you want to support this frontend.
++
++config DVB_ATBM8830
++ tristate "AltoBeam ATBM8830/8831 DMB-TH demodulator"
++ depends on DVB_CORE && I2C
++ default m if !MEDIA_SUBDRV_AUTOSELECT
++ help
++ A DMB-TH tuner module. Say Y when you want to support this frontend.
++
++config DVB_TDA665x
++ tristate "TDA665x tuner"
++ depends on DVB_CORE && I2C
++ default m if !MEDIA_SUBDRV_AUTOSELECT
++ help
++ Support for tuner modules based on Philips TDA6650/TDA6651 chips.
++ Say Y when you want to support this chip.
++
++ Currently supported tuners:
++ * Panasonic ENV57H12D5 (ET-50DT)
++
++config DVB_IX2505V
++ tristate "Sharp IX2505V silicon tuner"
++ depends on DVB_CORE && I2C
++ default m if !MEDIA_SUBDRV_AUTOSELECT
++ help
++ A DVB-S tuner module. Say Y when you want to support this frontend.
++
++config DVB_IT913X_FE
++ tristate "it913x frontend and it9137 tuner"
++ depends on DVB_CORE && I2C
++ default m if !MEDIA_SUBDRV_AUTOSELECT
++ help
++ A DVB-T tuner module.
++ Say Y when you want to support this frontend.
++
++config DVB_M88RS2000
++ tristate "M88RS2000 DVB-S demodulator and tuner"
++ depends on DVB_CORE && I2C
++ default m if !MEDIA_SUBDRV_AUTOSELECT
++ help
++ A DVB-S tuner module.
++ Say Y when you want to support this frontend.
++
++config DVB_AF9033
++ tristate "Afatech AF9033 DVB-T demodulator"
++ depends on DVB_CORE && I2C
++ default m if !MEDIA_SUBDRV_AUTOSELECT
++
++comment "Tools to develop new frontends"
++
++config DVB_DUMMY_FE
++ tristate "Dummy frontend driver"
++ default n
++endmenu
+diff -Nur linux-3.14.36/drivers/media/dvb-frontends/m88ds3103.c linux-openelec/drivers/media/dvb-frontends/m88ds3103.c
+--- linux-3.14.36/drivers/media/dvb-frontends/m88ds3103.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/media/dvb-frontends/m88ds3103.c 2015-07-24 18:03:30.144842002 -0500
+@@ -1,5 +1,5 @@
+ /*
+- * Montage M88DS3103 demodulator driver
++ * Montage M88DS3103/M88RS6000 demodulator driver
+ *
+ * Copyright (C) 2013 Antti Palosaari <crope@iki.fi>
+ *
+@@ -159,9 +159,10 @@
+ {
+ int ret, i, j;
+ u8 buf[83];
++
+ dev_dbg(&priv->i2c->dev, "%s: tab_len=%d\n", __func__, tab_len);
+
+- if (tab_len > 83) {
++ if (tab_len > 86) {
+ ret = -EINVAL;
+ goto err;
+ }
+@@ -244,11 +245,12 @@
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+ int ret, len;
+ const struct m88ds3103_reg_val *init;
+- u8 u8tmp, u8tmp1, u8tmp2;
+- u8 buf[2];
+- u16 u16tmp, divide_ratio;
+- u32 tuner_frequency, target_mclk, ts_clk;
++ u8 u8tmp, u8tmp1 = 0, u8tmp2 = 0; /* silence compiler warning */
++ u8 buf[3];
++ u16 u16tmp, divide_ratio = 0;
++ u32 tuner_frequency, target_mclk;
+ s32 s32tmp;
++
+ dev_dbg(&priv->i2c->dev,
+ "%s: delivery_system=%d modulation=%d frequency=%d symbol_rate=%d inversion=%d pilot=%d rolloff=%d\n",
+ __func__, c->delivery_system,
+@@ -260,6 +262,22 @@
+ goto err;
+ }
+
++ /* reset */
++ ret = m88ds3103_wr_reg(priv, 0x07, 0x80);
++ if (ret)
++ goto err;
++
++ ret = m88ds3103_wr_reg(priv, 0x07, 0x00);
++ if (ret)
++ goto err;
++
++ /* Disable demod clock path */
++ if (priv->chip_id == M88RS6000_CHIP_ID) {
++ ret = m88ds3103_wr_reg(priv, 0x06, 0xe0);
++ if (ret)
++ goto err;
++ }
++
+ /* program tuner */
+ if (fe->ops.tuner_ops.set_params) {
+ ret = fe->ops.tuner_ops.set_params(fe);
+@@ -271,54 +289,53 @@
+ ret = fe->ops.tuner_ops.get_frequency(fe, &tuner_frequency);
+ if (ret)
+ goto err;
++ } else {
++ /*
++ * Use nominal target frequency as tuner driver does not provide
++ * actual frequency used. Carrier offset calculation is not
++ * valid.
++ */
++ tuner_frequency = c->frequency;
+ }
+
+- /* reset */
+- ret = m88ds3103_wr_reg(priv, 0x07, 0x80);
+- if (ret)
+- goto err;
+-
+- ret = m88ds3103_wr_reg(priv, 0x07, 0x00);
+- if (ret)
+- goto err;
+-
+- ret = m88ds3103_wr_reg(priv, 0xb2, 0x01);
+- if (ret)
+- goto err;
++ /* select M88RS6000 demod main mclk and ts mclk from tuner die. */
++ if (priv->chip_id == M88RS6000_CHIP_ID) {
++ if (c->symbol_rate > 45010000)
++ priv->mclk_khz = 110250;
++ else
++ priv->mclk_khz = 96000;
+
+- ret = m88ds3103_wr_reg(priv, 0x00, 0x01);
+- if (ret)
+- goto err;
++ if (c->delivery_system == SYS_DVBS)
++ target_mclk = 96000;
++ else
++ target_mclk = 144000;
+
+- switch (c->delivery_system) {
+- case SYS_DVBS:
+- len = ARRAY_SIZE(m88ds3103_dvbs_init_reg_vals);
+- init = m88ds3103_dvbs_init_reg_vals;
+- target_mclk = 96000;
+- break;
+- case SYS_DVBS2:
+- len = ARRAY_SIZE(m88ds3103_dvbs2_init_reg_vals);
+- init = m88ds3103_dvbs2_init_reg_vals;
++ /* Enable demod clock path */
++ ret = m88ds3103_wr_reg(priv, 0x06, 0x00);
++ if (ret)
++ goto err;
++ usleep_range(10000, 20000);
++ } else {
++ /* set M88DS3103 mclk and ts mclk. */
++ priv->mclk_khz = 96000;
+
+ switch (priv->cfg->ts_mode) {
+ case M88DS3103_TS_SERIAL:
+ case M88DS3103_TS_SERIAL_D7:
+- if (c->symbol_rate < 18000000)
+- target_mclk = 96000;
+- else
+- target_mclk = 144000;
++ target_mclk = priv->cfg->ts_clk;
+ break;
+ case M88DS3103_TS_PARALLEL:
+- case M88DS3103_TS_PARALLEL_12:
+- case M88DS3103_TS_PARALLEL_16:
+- case M88DS3103_TS_PARALLEL_19_2:
+ case M88DS3103_TS_CI:
+- if (c->symbol_rate < 18000000)
++ if (c->delivery_system == SYS_DVBS)
+ target_mclk = 96000;
+- else if (c->symbol_rate < 28000000)
+- target_mclk = 144000;
+- else
+- target_mclk = 192000;
++ else {
++ if (c->symbol_rate < 18000000)
++ target_mclk = 96000;
++ else if (c->symbol_rate < 28000000)
++ target_mclk = 144000;
++ else
++ target_mclk = 192000;
++ }
+ break;
+ default:
+ dev_dbg(&priv->i2c->dev, "%s: invalid ts_mode\n",
+@@ -326,6 +343,55 @@
+ ret = -EINVAL;
+ goto err;
+ }
++
++ switch (target_mclk) {
++ case 96000:
++ u8tmp1 = 0x02; /* 0b10 */
++ u8tmp2 = 0x01; /* 0b01 */
++ break;
++ case 144000:
++ u8tmp1 = 0x00; /* 0b00 */
++ u8tmp2 = 0x01; /* 0b01 */
++ break;
++ case 192000:
++ u8tmp1 = 0x03; /* 0b11 */
++ u8tmp2 = 0x00; /* 0b00 */
++ break;
++ }
++ ret = m88ds3103_wr_reg_mask(priv, 0x22, u8tmp1 << 6, 0xc0);
++ if (ret)
++ goto err;
++ ret = m88ds3103_wr_reg_mask(priv, 0x24, u8tmp2 << 6, 0xc0);
++ if (ret)
++ goto err;
++ }
++
++ ret = m88ds3103_wr_reg(priv, 0xb2, 0x01);
++ if (ret)
++ goto err;
++
++ ret = m88ds3103_wr_reg(priv, 0x00, 0x01);
++ if (ret)
++ goto err;
++
++ switch (c->delivery_system) {
++ case SYS_DVBS:
++ if (priv->chip_id == M88RS6000_CHIP_ID) {
++ len = ARRAY_SIZE(m88rs6000_dvbs_init_reg_vals);
++ init = m88rs6000_dvbs_init_reg_vals;
++ } else {
++ len = ARRAY_SIZE(m88ds3103_dvbs_init_reg_vals);
++ init = m88ds3103_dvbs_init_reg_vals;
++ }
++ break;
++ case SYS_DVBS2:
++ if (priv->chip_id == M88RS6000_CHIP_ID) {
++ len = ARRAY_SIZE(m88rs6000_dvbs2_init_reg_vals);
++ init = m88rs6000_dvbs2_init_reg_vals;
++ } else {
++ len = ARRAY_SIZE(m88ds3103_dvbs2_init_reg_vals);
++ init = m88ds3103_dvbs2_init_reg_vals;
++ }
+ break;
+ default:
+ dev_dbg(&priv->i2c->dev, "%s: invalid delivery_system\n",
+@@ -341,37 +407,44 @@
+ goto err;
+ }
+
+- u8tmp1 = 0; /* silence compiler warning */
++ if (priv->chip_id == M88RS6000_CHIP_ID) {
++ if ((c->delivery_system == SYS_DVBS2)
++ && ((c->symbol_rate / 1000) <= 5000)) {
++ ret = m88ds3103_wr_reg(priv, 0xc0, 0x04);
++ if (ret)
++ goto err;
++ buf[0] = 0x09;
++ buf[1] = 0x22;
++ buf[2] = 0x88;
++ ret = m88ds3103_wr_regs(priv, 0x8a, buf, 3);
++ if (ret)
++ goto err;
++ }
++ ret = m88ds3103_wr_reg_mask(priv, 0x9d, 0x08, 0x08);
++ if (ret)
++ goto err;
++ ret = m88ds3103_wr_reg(priv, 0xf1, 0x01);
++ if (ret)
++ goto err;
++ ret = m88ds3103_wr_reg_mask(priv, 0x30, 0x80, 0x80);
++ if (ret)
++ goto err;
++ }
++
+ switch (priv->cfg->ts_mode) {
+ case M88DS3103_TS_SERIAL:
+ u8tmp1 = 0x00;
+- ts_clk = 0;
+- u8tmp = 0x46;
++ u8tmp = 0x06;
+ break;
+ case M88DS3103_TS_SERIAL_D7:
+ u8tmp1 = 0x20;
+- ts_clk = 0;
+- u8tmp = 0x46;
++ u8tmp = 0x06;
+ break;
+ case M88DS3103_TS_PARALLEL:
+- ts_clk = 24000;
+- u8tmp = 0x42;
+- break;
+- case M88DS3103_TS_PARALLEL_12:
+- ts_clk = 12000;
+- u8tmp = 0x42;
+- break;
+- case M88DS3103_TS_PARALLEL_16:
+- ts_clk = 16000;
+- u8tmp = 0x42;
+- break;
+- case M88DS3103_TS_PARALLEL_19_2:
+- ts_clk = 19200;
+- u8tmp = 0x42;
++ u8tmp = 0x02;
+ break;
+ case M88DS3103_TS_CI:
+- ts_clk = 6000;
+- u8tmp = 0x43;
++ u8tmp = 0x03;
+ break;
+ default:
+ dev_dbg(&priv->i2c->dev, "%s: invalid ts_mode\n", __func__);
+@@ -379,6 +452,9 @@
+ goto err;
+ }
+
++ if (priv->cfg->ts_clk_pol)
++ u8tmp |= 0x40;
++
+ /* TS mode */
+ ret = m88ds3103_wr_reg(priv, 0xfd, u8tmp);
+ if (ret)
+@@ -390,21 +466,20 @@
+ ret = m88ds3103_wr_reg_mask(priv, 0x29, u8tmp1, 0x20);
+ if (ret)
+ goto err;
+- }
+-
+- if (ts_clk) {
+- divide_ratio = DIV_ROUND_UP(target_mclk, ts_clk);
+- u8tmp1 = divide_ratio / 2;
+- u8tmp2 = DIV_ROUND_UP(divide_ratio, 2);
+- } else {
+- divide_ratio = 0;
+ u8tmp1 = 0;
+ u8tmp2 = 0;
++ break;
++ default:
++ if (priv->cfg->ts_clk) {
++ divide_ratio = DIV_ROUND_UP(target_mclk, priv->cfg->ts_clk);
++ u8tmp1 = divide_ratio / 2;
++ u8tmp2 = DIV_ROUND_UP(divide_ratio, 2);
++ }
+ }
+
+ dev_dbg(&priv->i2c->dev,
+ "%s: target_mclk=%d ts_clk=%d divide_ratio=%d\n",
+- __func__, target_mclk, ts_clk, divide_ratio);
++ __func__, target_mclk, priv->cfg->ts_clk, divide_ratio);
+
+ u8tmp1--;
+ u8tmp2--;
+@@ -427,41 +502,6 @@
+ if (ret)
+ goto err;
+
+- switch (target_mclk) {
+- case 72000:
+- u8tmp1 = 0x00; /* 0b00 */
+- u8tmp2 = 0x03; /* 0b11 */
+- break;
+- case 96000:
+- u8tmp1 = 0x02; /* 0b10 */
+- u8tmp2 = 0x01; /* 0b01 */
+- break;
+- case 115200:
+- u8tmp1 = 0x01; /* 0b01 */
+- u8tmp2 = 0x01; /* 0b01 */
+- break;
+- case 144000:
+- u8tmp1 = 0x00; /* 0b00 */
+- u8tmp2 = 0x01; /* 0b01 */
+- break;
+- case 192000:
+- u8tmp1 = 0x03; /* 0b11 */
+- u8tmp2 = 0x00; /* 0b00 */
+- break;
+- default:
+- dev_dbg(&priv->i2c->dev, "%s: invalid target_mclk\n", __func__);
+- ret = -EINVAL;
+- goto err;
+- }
+-
+- ret = m88ds3103_wr_reg_mask(priv, 0x22, u8tmp1 << 6, 0xc0);
+- if (ret)
+- goto err;
+-
+- ret = m88ds3103_wr_reg_mask(priv, 0x24, u8tmp2 << 6, 0xc0);
+- if (ret)
+- goto err;
+-
+ if (c->symbol_rate <= 3000000)
+ u8tmp = 0x20;
+ else if (c->symbol_rate <= 10000000)
+@@ -485,7 +525,7 @@
+ if (ret)
+ goto err;
+
+- u16tmp = DIV_ROUND_CLOSEST((c->symbol_rate / 1000) << 15, M88DS3103_MCLK_KHZ / 2);
++ u16tmp = DIV_ROUND_CLOSEST((c->symbol_rate / 1000) << 15, priv->mclk_khz / 2);
+ buf[0] = (u16tmp >> 0) & 0xff;
+ buf[1] = (u16tmp >> 8) & 0xff;
+ ret = m88ds3103_wr_regs(priv, 0x61, buf, 2);
+@@ -508,7 +548,7 @@
+ (tuner_frequency - c->frequency));
+
+ s32tmp = 0x10000 * (tuner_frequency - c->frequency);
+- s32tmp = DIV_ROUND_CLOSEST(s32tmp, M88DS3103_MCLK_KHZ);
++ s32tmp = DIV_ROUND_CLOSEST(s32tmp, priv->mclk_khz);
+ if (s32tmp < 0)
+ s32tmp += 0x10000;
+
+@@ -539,8 +579,9 @@
+ struct m88ds3103_priv *priv = fe->demodulator_priv;
+ int ret, len, remaining;
+ const struct firmware *fw = NULL;
+- u8 *fw_file = M88DS3103_FIRMWARE;
++ u8 *fw_file;
+ u8 u8tmp;
++
+ dev_dbg(&priv->i2c->dev, "%s:\n", __func__);
+
+ /* set cold state by default */
+@@ -559,15 +600,6 @@
+ if (ret)
+ goto err;
+
+- /* reset */
+- ret = m88ds3103_wr_reg(priv, 0x07, 0x60);
+- if (ret)
+- goto err;
+-
+- ret = m88ds3103_wr_reg(priv, 0x07, 0x00);
+- if (ret)
+- goto err;
+-
+ /* firmware status */
+ ret = m88ds3103_rd_reg(priv, 0xb9, &u8tmp);
+ if (ret)
+@@ -578,10 +610,23 @@
+ if (u8tmp)
+ goto skip_fw_download;
+
++ /* global reset, global diseqc reset, golbal fec reset */
++ ret = m88ds3103_wr_reg(priv, 0x07, 0xe0);
++ if (ret)
++ goto err;
++
++ ret = m88ds3103_wr_reg(priv, 0x07, 0x00);
++ if (ret)
++ goto err;
++
+ /* cold state - try to download firmware */
+ dev_info(&priv->i2c->dev, "%s: found a '%s' in cold state\n",
+ KBUILD_MODNAME, m88ds3103_ops.info.name);
+
++ if (priv->chip_id == M88RS6000_CHIP_ID)
++ fw_file = M88RS6000_FIRMWARE;
++ else
++ fw_file = M88DS3103_FIRMWARE;
+ /* request the firmware, this will block and timeout */
+ ret = request_firmware(&fw, fw_file, priv->i2c->dev.parent);
+ if (ret) {
+@@ -595,7 +640,7 @@
+
+ ret = m88ds3103_wr_reg(priv, 0xb2, 0x01);
+ if (ret)
+- goto err;
++ goto error_fw_release;
+
+ for (remaining = fw->size; remaining > 0;
+ remaining -= (priv->cfg->i2c_wr_max - 1)) {
+@@ -609,13 +654,13 @@
+ dev_err(&priv->i2c->dev,
+ "%s: firmware download failed=%d\n",
+ KBUILD_MODNAME, ret);
+- goto err;
++ goto error_fw_release;
+ }
+ }
+
+ ret = m88ds3103_wr_reg(priv, 0xb2, 0x00);
+ if (ret)
+- goto err;
++ goto error_fw_release;
+
+ release_firmware(fw);
+ fw = NULL;
+@@ -641,10 +686,10 @@
+ priv->warm = true;
+
+ return 0;
+-err:
+- if (fw)
+- release_firmware(fw);
+
++error_fw_release:
++ release_firmware(fw);
++err:
+ dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
+ return ret;
+ }
+@@ -653,12 +698,18 @@
+ {
+ struct m88ds3103_priv *priv = fe->demodulator_priv;
+ int ret;
++ u8 u8tmp;
++
+ dev_dbg(&priv->i2c->dev, "%s:\n", __func__);
+
+ priv->delivery_system = SYS_UNDEFINED;
+
+ /* TS Hi-Z */
+- ret = m88ds3103_wr_reg_mask(priv, 0x27, 0x00, 0x01);
++ if (priv->chip_id == M88RS6000_CHIP_ID)
++ u8tmp = 0x29;
++ else
++ u8tmp = 0x27;
++ ret = m88ds3103_wr_reg_mask(priv, u8tmp, 0x00, 0x01);
+ if (ret)
+ goto err;
+
+@@ -687,6 +738,7 @@
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+ int ret;
+ u8 buf[3];
++
+ dev_dbg(&priv->i2c->dev, "%s:\n", __func__);
+
+ if (!priv->warm || !(priv->fe_status & FE_HAS_LOCK)) {
+@@ -711,9 +763,6 @@
+ case 1:
+ c->inversion = INVERSION_ON;
+ break;
+- default:
+- dev_dbg(&priv->i2c->dev, "%s: invalid inversion\n",
+- __func__);
+ }
+
+ switch ((buf[1] >> 5) & 0x07) {
+@@ -793,9 +842,6 @@
+ case 1:
+ c->pilot = PILOT_ON;
+ break;
+- default:
+- dev_dbg(&priv->i2c->dev, "%s: invalid pilot\n",
+- __func__);
+ }
+
+ switch ((buf[0] >> 6) & 0x07) {
+@@ -823,9 +869,6 @@
+ case 1:
+ c->inversion = INVERSION_ON;
+ break;
+- default:
+- dev_dbg(&priv->i2c->dev, "%s: invalid inversion\n",
+- __func__);
+ }
+
+ switch ((buf[2] >> 0) & 0x03) {
+@@ -855,7 +898,7 @@
+ goto err;
+
+ c->symbol_rate = 1ull * ((buf[1] << 8) | (buf[0] << 0)) *
+- M88DS3103_MCLK_KHZ * 1000 / 0x10000;
++ priv->mclk_khz * 1000 / 0x10000;
+
+ return 0;
+ err:
+@@ -871,6 +914,7 @@
+ u8 buf[3];
+ u16 noise, signal;
+ u32 noise_tot, signal_tot;
++
+ dev_dbg(&priv->i2c->dev, "%s:\n", __func__);
+ /* reports SNR in resolution of 0.1 dB */
+
+@@ -893,7 +937,7 @@
+ /* SNR(X) dB = 10 * ln(X) / ln(10) dB */
+ tmp = DIV_ROUND_CLOSEST(tmp, 8 * M88DS3103_SNR_ITERATIONS);
+ if (tmp)
+- *snr = 100ul * intlog2(tmp) / intlog2(10);
++ *snr = div_u64((u64) 100 * intlog2(tmp), intlog2(10));
+ else
+ *snr = 0;
+ break;
+@@ -922,7 +966,7 @@
+ /* SNR(X) dB = 10 * log10(X) dB */
+ if (signal > noise) {
+ tmp = signal / noise;
+- *snr = 100ul * intlog10(tmp) / (1 << 24);
++ *snr = div_u64((u64) 100 * intlog10(tmp), (1 << 24));
+ } else {
+ *snr = 0;
+ }
+@@ -940,6 +984,87 @@
+ return ret;
+ }
+
++static int m88ds3103_read_ber(struct dvb_frontend *fe, u32 *ber)
++{
++ struct m88ds3103_priv *priv = fe->demodulator_priv;
++ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
++ int ret;
++ unsigned int utmp;
++ u8 buf[3], u8tmp;
++
++ dev_dbg(&priv->i2c->dev, "%s:\n", __func__);
++
++ switch (c->delivery_system) {
++ case SYS_DVBS:
++ ret = m88ds3103_wr_reg(priv, 0xf9, 0x04);
++ if (ret)
++ goto err;
++
++ ret = m88ds3103_rd_reg(priv, 0xf8, &u8tmp);
++ if (ret)
++ goto err;
++
++ if (!(u8tmp & 0x10)) {
++ u8tmp |= 0x10;
++
++ ret = m88ds3103_rd_regs(priv, 0xf6, buf, 2);
++ if (ret)
++ goto err;
++
++ priv->ber = (buf[1] << 8) | (buf[0] << 0);
++
++ /* restart counters */
++ ret = m88ds3103_wr_reg(priv, 0xf8, u8tmp);
++ if (ret)
++ goto err;
++ }
++ break;
++ case SYS_DVBS2:
++ ret = m88ds3103_rd_regs(priv, 0xd5, buf, 3);
++ if (ret)
++ goto err;
++
++ utmp = (buf[2] << 16) | (buf[1] << 8) | (buf[0] << 0);
++
++ if (utmp > 3000) {
++ ret = m88ds3103_rd_regs(priv, 0xf7, buf, 2);
++ if (ret)
++ goto err;
++
++ priv->ber = (buf[1] << 8) | (buf[0] << 0);
++
++ /* restart counters */
++ ret = m88ds3103_wr_reg(priv, 0xd1, 0x01);
++ if (ret)
++ goto err;
++
++ ret = m88ds3103_wr_reg(priv, 0xf9, 0x01);
++ if (ret)
++ goto err;
++
++ ret = m88ds3103_wr_reg(priv, 0xf9, 0x00);
++ if (ret)
++ goto err;
++
++ ret = m88ds3103_wr_reg(priv, 0xd1, 0x00);
++ if (ret)
++ goto err;
++ }
++ break;
++ default:
++ dev_dbg(&priv->i2c->dev, "%s: invalid delivery_system\n",
++ __func__);
++ ret = -EINVAL;
++ goto err;
++ }
++
++ *ber = priv->ber;
++
++ return 0;
++err:
++ dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
++ return ret;
++}
+
+ static int m88ds3103_set_tone(struct dvb_frontend *fe,
+ fe_sec_tone_mode_t fe_sec_tone_mode)
+@@ -947,6 +1072,7 @@
+ struct m88ds3103_priv *priv = fe->demodulator_priv;
+ int ret;
+ u8 u8tmp, tone, reg_a1_mask;
++
+ dev_dbg(&priv->i2c->dev, "%s: fe_sec_tone_mode=%d\n", __func__,
+ fe_sec_tone_mode);
+
+@@ -958,7 +1084,7 @@
+ switch (fe_sec_tone_mode) {
+ case SEC_TONE_ON:
+ tone = 0;
+- reg_a1_mask = 0x87;
++ reg_a1_mask = 0x47;
+ break;
+ case SEC_TONE_OFF:
+ tone = 1;
+@@ -987,12 +1113,64 @@
+ return ret;
+ }
+
++static int m88ds3103_set_voltage(struct dvb_frontend *fe,
++ fe_sec_voltage_t fe_sec_voltage)
++{
++ struct m88ds3103_priv *priv = fe->demodulator_priv;
++ int ret;
++ u8 u8tmp;
++ bool voltage_sel, voltage_dis;
++
++ dev_dbg(&priv->i2c->dev, "%s: fe_sec_voltage=%d\n", __func__,
++ fe_sec_voltage);
++
++ if (!priv->warm) {
++ ret = -EAGAIN;
++ goto err;
++ }
++
++ switch (fe_sec_voltage) {
++ case SEC_VOLTAGE_18:
++ voltage_sel = true;
++ voltage_dis = false;
++ break;
++ case SEC_VOLTAGE_13:
++ voltage_sel = false;
++ voltage_dis = false;
++ break;
++ case SEC_VOLTAGE_OFF:
++ voltage_sel = false;
++ voltage_dis = true;
++ break;
++ default:
++ dev_dbg(&priv->i2c->dev, "%s: invalid fe_sec_voltage\n",
++ __func__);
++ ret = -EINVAL;
++ goto err;
++ }
++
++ /* output pin polarity */
++ voltage_sel ^= priv->cfg->lnb_hv_pol;
++ voltage_dis ^= priv->cfg->lnb_en_pol;
++
++ u8tmp = voltage_dis << 1 | voltage_sel << 0;
++ ret = m88ds3103_wr_reg_mask(priv, 0xa2, u8tmp, 0x03);
++ if (ret)
++ goto err;
++
++ return 0;
++err:
++ dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
++ return ret;
++}
++
+ static int m88ds3103_diseqc_send_master_cmd(struct dvb_frontend *fe,
+ struct dvb_diseqc_master_cmd *diseqc_cmd)
+ {
+ struct m88ds3103_priv *priv = fe->demodulator_priv;
+ int ret, i;
+ u8 u8tmp;
++
+ dev_dbg(&priv->i2c->dev, "%s: msg=%*ph\n", __func__,
+ diseqc_cmd->msg_len, diseqc_cmd->msg);
+
+@@ -1064,6 +1242,7 @@
+ struct m88ds3103_priv *priv = fe->demodulator_priv;
+ int ret, i;
+ u8 u8tmp, burst;
++
+ dev_dbg(&priv->i2c->dev, "%s: fe_sec_mini_cmd=%d\n", __func__,
+ fe_sec_mini_cmd);
+
+@@ -1136,6 +1315,7 @@
+ static void m88ds3103_release(struct dvb_frontend *fe)
+ {
+ struct m88ds3103_priv *priv = fe->demodulator_priv;
++
+ i2c_del_mux_adapter(priv->i2c_adapter);
+ kfree(priv);
+ }
+@@ -1198,18 +1378,22 @@
+ priv->i2c = i2c;
+ mutex_init(&priv->i2c_mutex);
+
+- ret = m88ds3103_rd_reg(priv, 0x01, &chip_id);
++ /* 0x00: chip id[6:0], 0x01: chip ver[7:0], 0x02: chip ver[15:8] */
++ ret = m88ds3103_rd_reg(priv, 0x00, &chip_id);
+ if (ret)
+ goto err;
+
+- dev_dbg(&priv->i2c->dev, "%s: chip_id=%02x\n", __func__, chip_id);
++ chip_id >>= 1;
++ dev_info(&priv->i2c->dev, "%s: chip_id=%02x\n", __func__, chip_id);
+
+ switch (chip_id) {
+- case 0xd0:
++ case M88RS6000_CHIP_ID:
++ case M88DS3103_CHIP_ID:
+ break;
+ default:
+ goto err;
+ }
++ priv->chip_id = chip_id;
+
+ switch (priv->cfg->clock_out) {
+ case M88DS3103_CLOCK_OUT_DISABLED:
+@@ -1225,6 +1409,11 @@
+ goto err;
+ }
+
++ /* 0x29 register is defined differently for m88rs6000. */
++ /* set internal tuner address to 0x21 */
++ if (chip_id == M88RS6000_CHIP_ID)
++ u8tmp = 0x00;
++
+ ret = m88ds3103_wr_reg(priv, 0x29, u8tmp);
+ if (ret)
+ goto err;
+@@ -1252,6 +1441,9 @@
+
+ /* create dvb_frontend */
+ memcpy(&priv->fe.ops, &m88ds3103_ops, sizeof(struct dvb_frontend_ops));
++ if (priv->chip_id == M88RS6000_CHIP_ID)
++ strncpy(priv->fe.ops.info.name,
++ "Montage M88RS6000", sizeof(priv->fe.ops.info.name));
+ priv->fe.demodulator_priv = priv;
+
+ return &priv->fe;
+@@ -1298,14 +1490,17 @@
+
+ .read_status = m88ds3103_read_status,
+ .read_snr = m88ds3103_read_snr,
++ .read_ber = m88ds3103_read_ber,
+
+ .diseqc_send_master_cmd = m88ds3103_diseqc_send_master_cmd,
+ .diseqc_send_burst = m88ds3103_diseqc_send_burst,
+
+ .set_tone = m88ds3103_set_tone,
++ .set_voltage = m88ds3103_set_voltage,
+ };
+
+ MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
+ MODULE_DESCRIPTION("Montage M88DS3103 DVB-S/S2 demodulator driver");
+ MODULE_LICENSE("GPL");
+ MODULE_FIRMWARE(M88DS3103_FIRMWARE);
++MODULE_FIRMWARE(M88RS6000_FIRMWARE);
+diff -Nur linux-3.14.36/drivers/media/dvb-frontends/m88ds3103.h linux-openelec/drivers/media/dvb-frontends/m88ds3103.h
+--- linux-3.14.36/drivers/media/dvb-frontends/m88ds3103.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/media/dvb-frontends/m88ds3103.h 2015-07-24 18:03:30.148842002 -0500
+@@ -47,14 +47,23 @@
+ */
+ #define M88DS3103_TS_SERIAL 0 /* TS output pin D0, normal */
+ #define M88DS3103_TS_SERIAL_D7 1 /* TS output pin D7 */
+-#define M88DS3103_TS_PARALLEL 2 /* 24 MHz, normal */
+-#define M88DS3103_TS_PARALLEL_12 3 /* 12 MHz */
+-#define M88DS3103_TS_PARALLEL_16 4 /* 16 MHz */
+-#define M88DS3103_TS_PARALLEL_19_2 5 /* 19.2 MHz */
+-#define M88DS3103_TS_CI 6 /* 6 MHz */
++#define M88DS3103_TS_PARALLEL 2 /* TS Parallel mode */
++#define M88DS3103_TS_CI 3 /* TS CI Mode */
+ u8 ts_mode;
+
+ /*
++ * TS clk in KHz
++ * Default: 0.
++ */
++ u32 ts_clk;
++
++ /*
++ * TS clk polarity.
++ * Default: 0. 1-active at falling edge; 0-active at rising edge.
++ */
++ u8 ts_clk_pol:1;
++
++ /*
+ * spectrum inversion
+ * Default: 0
+ */
+@@ -86,6 +95,22 @@
+ * Default: none, must set
+ */
+ u8 agc;
++
++ /*
++ * LNB H/V pin polarity
++ * Default: 0.
++ * 1: pin high set to VOLTAGE_13, pin low to set VOLTAGE_18.
++ * 0: pin high set to VOLTAGE_18, pin low to set VOLTAGE_13.
++ */
++ u8 lnb_hv_pol:1;
++
++ /*
++ * LNB enable pin polarity
++ * Default: 0.
++ * 1: pin high to enable, pin low to disable.
++ * 0: pin high to disable, pin low to enable.
++ */
++ u8 lnb_en_pol:1;
+ };
+
+ /*
+diff -Nur linux-3.14.36/drivers/media/dvb-frontends/m88ds3103_priv.h linux-openelec/drivers/media/dvb-frontends/m88ds3103_priv.h
+--- linux-3.14.36/drivers/media/dvb-frontends/m88ds3103_priv.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/media/dvb-frontends/m88ds3103_priv.h 2015-07-24 18:03:30.148842002 -0500
+@@ -22,9 +22,13 @@
+ #include "dvb_math.h"
+ #include <linux/firmware.h>
+ #include <linux/i2c-mux.h>
++#include <linux/math64.h>
+
+ #define M88DS3103_FIRMWARE "dvb-demod-m88ds3103.fw"
++#define M88RS6000_FIRMWARE "dvb-demod-m88rs6000.fw"
+ #define M88DS3103_MCLK_KHZ 96000
++#define M88RS6000_CHIP_ID 0x74
++#define M88DS3103_CHIP_ID 0x70
+
+ struct m88ds3103_priv {
+ struct i2c_adapter *i2c;
+@@ -34,8 +38,13 @@
+ struct dvb_frontend fe;
+ fe_delivery_system_t delivery_system;
+ fe_status_t fe_status;
++ u32 ber;
+ bool warm; /* FW running */
+ struct i2c_adapter *i2c_adapter;
++ /* auto detect chip id to do different config */
++ u8 chip_id;
++ /* main mclk is calculated for M88RS6000 dynamically */
++ u32 mclk_khz;
+ };
+
+ struct m88ds3103_reg_val {
+@@ -212,4 +221,178 @@
+ {0xb8, 0x00},
+ };
+
++static const struct m88ds3103_reg_val m88rs6000_dvbs_init_reg_vals[] = {
++ {0x23, 0x07},
++ {0x08, 0x03},
++ {0x0c, 0x02},
++ {0x20, 0x00},
++ {0x21, 0x54},
++ {0x25, 0x82},
++ {0x27, 0x31},
++ {0x30, 0x08},
++ {0x31, 0x40},
++ {0x32, 0x32},
++ {0x33, 0x35},
++ {0x35, 0xff},
++ {0x3a, 0x00},
++ {0x37, 0x10},
++ {0x38, 0x10},
++ {0x39, 0x02},
++ {0x42, 0x60},
++ {0x4a, 0x80},
++ {0x4b, 0x04},
++ {0x4d, 0x91},
++ {0x5d, 0xc8},
++ {0x50, 0x36},
++ {0x51, 0x36},
++ {0x52, 0x36},
++ {0x53, 0x36},
++ {0x63, 0x0f},
++ {0x64, 0x30},
++ {0x65, 0x40},
++ {0x68, 0x26},
++ {0x69, 0x4c},
++ {0x70, 0x20},
++ {0x71, 0x70},
++ {0x72, 0x04},
++ {0x73, 0x00},
++ {0x70, 0x40},
++ {0x71, 0x70},
++ {0x72, 0x04},
++ {0x73, 0x00},
++ {0x70, 0x60},
++ {0x71, 0x70},
++ {0x72, 0x04},
++ {0x73, 0x00},
++ {0x70, 0x80},
++ {0x71, 0x70},
++ {0x72, 0x04},
++ {0x73, 0x00},
++ {0x70, 0xa0},
++ {0x71, 0x70},
++ {0x72, 0x04},
++ {0x73, 0x00},
++ {0x70, 0x1f},
++ {0x76, 0x38},
++ {0x77, 0xa6},
++ {0x78, 0x0c},
++ {0x79, 0x80},
++ {0x7f, 0x14},
++ {0x7c, 0x00},
++ {0xae, 0x82},
++ {0x80, 0x64},
++ {0x81, 0x66},
++ {0x82, 0x44},
++ {0x85, 0x04},
++ {0xcd, 0xf4},
++ {0x90, 0x33},
++ {0xa0, 0x44},
++ {0xbe, 0x00},
++ {0xc0, 0x08},
++ {0xc3, 0x10},
++ {0xc4, 0x08},
++ {0xc5, 0xf0},
++ {0xc6, 0xff},
++ {0xc7, 0x00},
++ {0xc8, 0x1a},
++ {0xc9, 0x80},
++ {0xe0, 0xf8},
++ {0xe6, 0x8b},
++ {0xd0, 0x40},
++ {0xf8, 0x20},
++ {0xfa, 0x0f},
++ {0x00, 0x00},
++ {0xbd, 0x01},
++ {0xb8, 0x00},
++ {0x29, 0x11},
++};
++
++static const struct m88ds3103_reg_val m88rs6000_dvbs2_init_reg_vals[] = {
++ {0x23, 0x07},
++ {0x08, 0x07},
++ {0x0c, 0x02},
++ {0x20, 0x00},
++ {0x21, 0x54},
++ {0x25, 0x82},
++ {0x27, 0x31},
++ {0x30, 0x08},
++ {0x32, 0x32},
++ {0x33, 0x35},
++ {0x35, 0xff},
++ {0x3a, 0x00},
++ {0x37, 0x10},
++ {0x38, 0x10},
++ {0x39, 0x02},
++ {0x42, 0x60},
++ {0x4a, 0x80},
++ {0x4b, 0x04},
++ {0x4d, 0x91},
++ {0x5d, 0xc8},
++ {0x50, 0x36},
++ {0x51, 0x36},
++ {0x52, 0x36},
++ {0x53, 0x36},
++ {0x63, 0x0f},
++ {0x64, 0x10},
++ {0x65, 0x20},
++ {0x68, 0x46},
++ {0x69, 0xcd},
++ {0x70, 0x20},
++ {0x71, 0x70},
++ {0x72, 0x04},
++ {0x73, 0x00},
++ {0x70, 0x40},
++ {0x71, 0x70},
++ {0x72, 0x04},
++ {0x73, 0x00},
++ {0x70, 0x60},
++ {0x71, 0x70},
++ {0x72, 0x04},
++ {0x73, 0x00},
++ {0x70, 0x80},
++ {0x71, 0x70},
++ {0x72, 0x04},
++ {0x73, 0x00},
++ {0x70, 0xa0},
++ {0x71, 0x70},
++ {0x72, 0x04},
++ {0x73, 0x00},
++ {0x70, 0x1f},
++ {0x76, 0x38},
++ {0x77, 0xa6},
++ {0x78, 0x0c},
++ {0x79, 0x80},
++ {0x7f, 0x14},
++ {0x85, 0x08},
++ {0xcd, 0xf4},
++ {0x90, 0x33},
++ {0x86, 0x00},
++ {0x87, 0x0f},
++ {0x89, 0x00},
++ {0x8b, 0x44},
++ {0x8c, 0x66},
++ {0x9d, 0xc1},
++ {0x8a, 0x10},
++ {0xad, 0x40},
++ {0xa0, 0x44},
++ {0xbe, 0x00},
++ {0xc0, 0x08},
++ {0xc1, 0x10},
++ {0xc2, 0x08},
++ {0xc3, 0x10},
++ {0xc4, 0x08},
++ {0xc5, 0xf0},
++ {0xc6, 0xff},
++ {0xc7, 0x00},
++ {0xc8, 0x1a},
++ {0xc9, 0x80},
++ {0xca, 0x23},
++ {0xcb, 0x24},
++ {0xcc, 0xf4},
++ {0xce, 0x74},
++ {0x00, 0x00},
++ {0xbd, 0x01},
++ {0xb8, 0x00},
++ {0x29, 0x01},
++};
+ #endif
+diff -Nur linux-3.14.36/drivers/media/dvb-frontends/Makefile linux-openelec/drivers/media/dvb-frontends/Makefile
+--- linux-3.14.36/drivers/media/dvb-frontends/Makefile 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/media/dvb-frontends/Makefile 2015-07-24 18:03:30.364842002 -0500
+@@ -19,6 +19,10 @@
+ obj-$(CONFIG_DVB_CX22700) += cx22700.o
+ obj-$(CONFIG_DVB_S5H1432) += s5h1432.o
+ obj-$(CONFIG_DVB_CX24110) += cx24110.o
++
++# inserted by Custler
++obj-$(CONFIG_DVB_CX24120) += cx24120.o
++
+ obj-$(CONFIG_DVB_TDA8083) += tda8083.o
+ obj-$(CONFIG_DVB_L64781) += l64781.o
+ obj-$(CONFIG_DVB_DIB3000MB) += dib3000mb.o
+@@ -105,4 +109,7 @@
+ obj-$(CONFIG_DVB_RTL2832) += rtl2832.o
+ obj-$(CONFIG_DVB_M88RS2000) += m88rs2000.o
+ obj-$(CONFIG_DVB_AF9033) += af9033.o
+-
++obj-$(CONFIG_DVB_SP2) += sp2.o
++obj-$(CONFIG_DVB_SI2168) += si2168.o
++obj-$(CONFIG_DVB_DVBSKY_M88DS3103) += dvbsky_m88ds3103.o
++obj-$(CONFIG_DVB_DVBSKY_M88DC2800) += dvbsky_m88dc2800.o
+diff -Nur linux-3.14.36/drivers/media/dvb-frontends/Makefile.orig linux-openelec/drivers/media/dvb-frontends/Makefile.orig
+--- linux-3.14.36/drivers/media/dvb-frontends/Makefile.orig 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/media/dvb-frontends/Makefile.orig 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,108 @@
++#
++# Makefile for the kernel DVB frontend device drivers.
++#
++
++ccflags-y += -I$(srctree)/drivers/media/dvb-core/
++ccflags-y += -I$(srctree)/drivers/media/tuners/
++
++stb0899-objs := stb0899_drv.o stb0899_algo.o
++stv0900-objs := stv0900_core.o stv0900_sw.o
++drxd-objs := drxd_firm.o drxd_hard.o
++cxd2820r-objs := cxd2820r_core.o cxd2820r_c.o cxd2820r_t.o cxd2820r_t2.o
++drxk-objs := drxk_hard.o
++
++obj-$(CONFIG_DVB_PLL) += dvb-pll.o
++obj-$(CONFIG_DVB_STV0299) += stv0299.o
++obj-$(CONFIG_DVB_STB0899) += stb0899.o
++obj-$(CONFIG_DVB_STB6100) += stb6100.o
++obj-$(CONFIG_DVB_SP8870) += sp8870.o
++obj-$(CONFIG_DVB_CX22700) += cx22700.o
++obj-$(CONFIG_DVB_S5H1432) += s5h1432.o
++obj-$(CONFIG_DVB_CX24110) += cx24110.o
++obj-$(CONFIG_DVB_TDA8083) += tda8083.o
++obj-$(CONFIG_DVB_L64781) += l64781.o
++obj-$(CONFIG_DVB_DIB3000MB) += dib3000mb.o
++obj-$(CONFIG_DVB_DIB3000MC) += dib3000mc.o dibx000_common.o
++obj-$(CONFIG_DVB_DIB7000M) += dib7000m.o dibx000_common.o
++obj-$(CONFIG_DVB_DIB7000P) += dib7000p.o dibx000_common.o
++obj-$(CONFIG_DVB_DIB8000) += dib8000.o dibx000_common.o
++obj-$(CONFIG_DVB_DIB9000) += dib9000.o dibx000_common.o
++obj-$(CONFIG_DVB_MT312) += mt312.o
++obj-$(CONFIG_DVB_VES1820) += ves1820.o
++obj-$(CONFIG_DVB_VES1X93) += ves1x93.o
++obj-$(CONFIG_DVB_TDA1004X) += tda1004x.o
++obj-$(CONFIG_DVB_SP887X) += sp887x.o
++obj-$(CONFIG_DVB_NXT6000) += nxt6000.o
++obj-$(CONFIG_DVB_MT352) += mt352.o
++obj-$(CONFIG_DVB_ZL10036) += zl10036.o
++obj-$(CONFIG_DVB_ZL10039) += zl10039.o
++obj-$(CONFIG_DVB_ZL10353) += zl10353.o
++obj-$(CONFIG_DVB_CX22702) += cx22702.o
++obj-$(CONFIG_DVB_DRXD) += drxd.o
++obj-$(CONFIG_DVB_TDA10021) += tda10021.o
++obj-$(CONFIG_DVB_TDA10023) += tda10023.o
++obj-$(CONFIG_DVB_STV0297) += stv0297.o
++obj-$(CONFIG_DVB_NXT200X) += nxt200x.o
++obj-$(CONFIG_DVB_OR51211) += or51211.o
++obj-$(CONFIG_DVB_OR51132) += or51132.o
++obj-$(CONFIG_DVB_BCM3510) += bcm3510.o
++obj-$(CONFIG_DVB_S5H1420) += s5h1420.o
++obj-$(CONFIG_DVB_LGDT330X) += lgdt330x.o
++obj-$(CONFIG_DVB_LGDT3305) += lgdt3305.o
++obj-$(CONFIG_DVB_LG2160) += lg2160.o
++obj-$(CONFIG_DVB_CX24123) += cx24123.o
++obj-$(CONFIG_DVB_LNBP21) += lnbp21.o
++obj-$(CONFIG_DVB_LNBP22) += lnbp22.o
++obj-$(CONFIG_DVB_ISL6405) += isl6405.o
++obj-$(CONFIG_DVB_ISL6421) += isl6421.o
++obj-$(CONFIG_DVB_TDA10086) += tda10086.o
++obj-$(CONFIG_DVB_TDA826X) += tda826x.o
++obj-$(CONFIG_DVB_TDA8261) += tda8261.o
++obj-$(CONFIG_DVB_TUNER_DIB0070) += dib0070.o
++obj-$(CONFIG_DVB_TUNER_DIB0090) += dib0090.o
++obj-$(CONFIG_DVB_TUA6100) += tua6100.o
++obj-$(CONFIG_DVB_S5H1409) += s5h1409.o
++obj-$(CONFIG_DVB_TUNER_ITD1000) += itd1000.o
++obj-$(CONFIG_DVB_AU8522) += au8522_common.o
++obj-$(CONFIG_DVB_AU8522_DTV) += au8522_dig.o
++obj-$(CONFIG_DVB_AU8522_V4L) += au8522_decoder.o
++obj-$(CONFIG_DVB_TDA10048) += tda10048.o
++obj-$(CONFIG_DVB_TUNER_CX24113) += cx24113.o
++obj-$(CONFIG_DVB_S5H1411) += s5h1411.o
++obj-$(CONFIG_DVB_LGS8GL5) += lgs8gl5.o
++obj-$(CONFIG_DVB_TDA665x) += tda665x.o
++obj-$(CONFIG_DVB_LGS8GXX) += lgs8gxx.o
++obj-$(CONFIG_DVB_ATBM8830) += atbm8830.o
++obj-$(CONFIG_DVB_DUMMY_FE) += dvb_dummy_fe.o
++obj-$(CONFIG_DVB_AF9013) += af9013.o
++obj-$(CONFIG_DVB_CX24116) += cx24116.o
++obj-$(CONFIG_DVB_CX24117) += cx24117.o
++obj-$(CONFIG_DVB_SI21XX) += si21xx.o
++obj-$(CONFIG_DVB_STV0288) += stv0288.o
++obj-$(CONFIG_DVB_STB6000) += stb6000.o
++obj-$(CONFIG_DVB_S921) += s921.o
++obj-$(CONFIG_DVB_STV6110) += stv6110.o
++obj-$(CONFIG_DVB_STV0900) += stv0900.o
++obj-$(CONFIG_DVB_STV090x) += stv090x.o
++obj-$(CONFIG_DVB_STV6110x) += stv6110x.o
++obj-$(CONFIG_DVB_M88DS3103) += m88ds3103.o
++obj-$(CONFIG_DVB_ISL6423) += isl6423.o
++obj-$(CONFIG_DVB_EC100) += ec100.o
++obj-$(CONFIG_DVB_HD29L2) += hd29l2.o
++obj-$(CONFIG_DVB_DS3000) += ds3000.o
++obj-$(CONFIG_DVB_TS2020) += ts2020.o
++obj-$(CONFIG_DVB_MB86A16) += mb86a16.o
++obj-$(CONFIG_DVB_MB86A20S) += mb86a20s.o
++obj-$(CONFIG_DVB_IX2505V) += ix2505v.o
++obj-$(CONFIG_DVB_STV0367) += stv0367.o
++obj-$(CONFIG_DVB_CXD2820R) += cxd2820r.o
++obj-$(CONFIG_DVB_DRXK) += drxk.o
++obj-$(CONFIG_DVB_TDA18271C2DD) += tda18271c2dd.o
++obj-$(CONFIG_DVB_IT913X_FE) += it913x-fe.o
++obj-$(CONFIG_DVB_A8293) += a8293.o
++obj-$(CONFIG_DVB_TDA10071) += tda10071.o
++obj-$(CONFIG_DVB_RTL2830) += rtl2830.o
++obj-$(CONFIG_DVB_RTL2832) += rtl2832.o
++obj-$(CONFIG_DVB_M88RS2000) += m88rs2000.o
++obj-$(CONFIG_DVB_AF9033) += af9033.o
++
+diff -Nur linux-3.14.36/drivers/media/dvb-frontends/si2168.c linux-openelec/drivers/media/dvb-frontends/si2168.c
+--- linux-3.14.36/drivers/media/dvb-frontends/si2168.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/media/dvb-frontends/si2168.c 2015-07-24 18:03:30.148842002 -0500
+@@ -0,0 +1,756 @@
++/*
++ * Silicon Labs Si2168 DVB-T/T2/C demodulator driver
++ *
++ * Copyright (C) 2014 Antti Palosaari <crope@iki.fi>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ */
++
++#include "si2168_priv.h"
++
++static const struct dvb_frontend_ops si2168_ops;
++
++/* execute firmware command */
++static int si2168_cmd_execute(struct si2168 *s, struct si2168_cmd *cmd)
++{
++ int ret;
++ unsigned long timeout;
++
++ mutex_lock(&s->i2c_mutex);
++
++ if (cmd->wlen) {
++ /* write cmd and args for firmware */
++ ret = i2c_master_send(s->client, cmd->args, cmd->wlen);
++ if (ret < 0) {
++ goto err_mutex_unlock;
++ } else if (ret != cmd->wlen) {
++ ret = -EREMOTEIO;
++ goto err_mutex_unlock;
++ }
++ }
++
++ if (cmd->rlen) {
++ /* wait cmd execution terminate */
++ #define TIMEOUT 50
++ timeout = jiffies + msecs_to_jiffies(TIMEOUT);
++ while (!time_after(jiffies, timeout)) {
++ ret = i2c_master_recv(s->client, cmd->args, cmd->rlen);
++ if (ret < 0) {
++ goto err_mutex_unlock;
++ } else if (ret != cmd->rlen) {
++ ret = -EREMOTEIO;
++ goto err_mutex_unlock;
++ }
++
++ /* firmware ready? */
++ if ((cmd->args[0] >> 7) & 0x01)
++ break;
++ }
++
++ dev_dbg(&s->client->dev, "cmd execution took %d ms\n",
++ jiffies_to_msecs(jiffies) -
++ (jiffies_to_msecs(timeout) - TIMEOUT));
++
++ if (!((cmd->args[0] >> 7) & 0x01)) {
++ ret = -ETIMEDOUT;
++ goto err_mutex_unlock;
++ }
++ }
++
++ ret = 0;
++
++err_mutex_unlock:
++ mutex_unlock(&s->i2c_mutex);
++ if (ret)
++ goto err;
++
++ return 0;
++err:
++ dev_dbg(&s->client->dev, "failed=%d\n", ret);
++ return ret;
++}
++
++static int si2168_read_status(struct dvb_frontend *fe, fe_status_t *status)
++{
++ struct si2168 *s = fe->demodulator_priv;
++ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
++ int ret;
++ struct si2168_cmd cmd;
++
++ *status = 0;
++
++ if (!s->active) {
++ ret = -EAGAIN;
++ goto err;
++ }
++
++ switch (c->delivery_system) {
++ case SYS_DVBT:
++ memcpy(cmd.args, "\xa0\x01", 2);
++ cmd.wlen = 2;
++ cmd.rlen = 13;
++ break;
++ case SYS_DVBC_ANNEX_A:
++ memcpy(cmd.args, "\x90\x01", 2);
++ cmd.wlen = 2;
++ cmd.rlen = 9;
++ break;
++ case SYS_DVBT2:
++ memcpy(cmd.args, "\x50\x01", 2);
++ cmd.wlen = 2;
++ cmd.rlen = 14;
++ break;
++ default:
++ ret = -EINVAL;
++ goto err;
++ }
++
++ ret = si2168_cmd_execute(s, &cmd);
++ if (ret)
++ goto err;
++
++ /*
++ * Possible values seen, in order from strong signal to weak:
++ * 16 0001 0110 full lock
++ * 1e 0001 1110 partial lock
++ * 1a 0001 1010 partial lock
++ * 18 0001 1000 no lock
++ *
++ * [b3:b1] lock bits
++ * [b4] statistics ready? Set in a few secs after lock is gained.
++ */
++
++ switch ((cmd.args[2] >> 1) & 0x03) {
++ case 0x01:
++ *status = FE_HAS_SIGNAL | FE_HAS_CARRIER;
++ break;
++ case 0x03:
++ *status = FE_HAS_SIGNAL | FE_HAS_CARRIER | FE_HAS_VITERBI |
++ FE_HAS_SYNC | FE_HAS_LOCK;
++ break;
++ }
++
++ s->fe_status = *status;
++
++ if (*status & FE_HAS_LOCK) {
++ c->cnr.len = 1;
++ c->cnr.stat[0].scale = FE_SCALE_DECIBEL;
++ c->cnr.stat[0].svalue = cmd.args[3] * 1000 / 4;
++ } else {
++ c->cnr.len = 1;
++ c->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
++ }
++
++ dev_dbg(&s->client->dev, "status=%02x args=%*ph\n",
++ *status, cmd.rlen, cmd.args);
++
++ return 0;
++err:
++ dev_dbg(&s->client->dev, "failed=%d\n", ret);
++ return ret;
++}
++
++static int si2168_set_frontend(struct dvb_frontend *fe)
++{
++ struct si2168 *s = fe->demodulator_priv;
++ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
++ int ret;
++ struct si2168_cmd cmd;
++ u8 bandwidth, delivery_system;
++
++ dev_dbg(&s->client->dev,
++ "delivery_system=%u modulation=%u frequency=%u bandwidth_hz=%u symbol_rate=%u inversion=%u, stream_id=%d\n",
++ c->delivery_system, c->modulation,
++ c->frequency, c->bandwidth_hz, c->symbol_rate,
++ c->inversion, c->stream_id);
++
++ if (!s->active) {
++ ret = -EAGAIN;
++ goto err;
++ }
++
++ switch (c->delivery_system) {
++ case SYS_DVBT:
++ delivery_system = 0x20;
++ break;
++ case SYS_DVBC_ANNEX_A:
++ delivery_system = 0x30;
++ break;
++ case SYS_DVBT2:
++ delivery_system = 0x70;
++ break;
++ default:
++ ret = -EINVAL;
++ goto err;
++ }
++
++ if (c->bandwidth_hz <= 5000000)
++ bandwidth = 0x05;
++ else if (c->bandwidth_hz <= 6000000)
++ bandwidth = 0x06;
++ else if (c->bandwidth_hz <= 7000000)
++ bandwidth = 0x07;
++ else if (c->bandwidth_hz <= 8000000)
++ bandwidth = 0x08;
++ else if (c->bandwidth_hz <= 9000000)
++ bandwidth = 0x09;
++ else if (c->bandwidth_hz <= 10000000)
++ bandwidth = 0x0a;
++ else
++ bandwidth = 0x0f;
++
++ /* program tuner */
++ if (fe->ops.tuner_ops.set_params) {
++ ret = fe->ops.tuner_ops.set_params(fe);
++ if (ret)
++ goto err;
++ }
++
++ memcpy(cmd.args, "\x88\x02\x02\x02\x02", 5);
++ cmd.wlen = 5;
++ cmd.rlen = 5;
++ ret = si2168_cmd_execute(s, &cmd);
++ if (ret)
++ goto err;
++
++ /* that has no big effect */
++ if (c->delivery_system == SYS_DVBT)
++ memcpy(cmd.args, "\x89\x21\x06\x11\xff\x98", 6);
++ else if (c->delivery_system == SYS_DVBC_ANNEX_A)
++ memcpy(cmd.args, "\x89\x21\x06\x11\x89\xf0", 6);
++ else if (c->delivery_system == SYS_DVBT2)
++ memcpy(cmd.args, "\x89\x21\x06\x11\x89\x20", 6);
++ cmd.wlen = 6;
++ cmd.rlen = 3;
++ ret = si2168_cmd_execute(s, &cmd);
++ if (ret)
++ goto err;
++
++ if (c->delivery_system == SYS_DVBT2) {
++ /* select PLP */
++ cmd.args[0] = 0x52;
++ cmd.args[1] = c->stream_id & 0xff;
++ cmd.args[2] = c->stream_id == NO_STREAM_ID_FILTER ? 0 : 1;
++ cmd.wlen = 3;
++ cmd.rlen = 1;
++ ret = si2168_cmd_execute(s, &cmd);
++ if (ret)
++ goto err;
++ }
++
++ memcpy(cmd.args, "\x51\x03", 2);
++ cmd.wlen = 2;
++ cmd.rlen = 12;
++ ret = si2168_cmd_execute(s, &cmd);
++ if (ret)
++ goto err;
++
++ memcpy(cmd.args, "\x12\x08\x04", 3);
++ cmd.wlen = 3;
++ cmd.rlen = 3;
++ ret = si2168_cmd_execute(s, &cmd);
++ if (ret)
++ goto err;
++
++ memcpy(cmd.args, "\x14\x00\x0c\x10\x12\x00", 6);
++ cmd.wlen = 6;
++ cmd.rlen = 4;
++ ret = si2168_cmd_execute(s, &cmd);
++ if (ret)
++ goto err;
++
++ memcpy(cmd.args, "\x14\x00\x06\x10\x24\x00", 6);
++ cmd.wlen = 6;
++ cmd.rlen = 4;
++ ret = si2168_cmd_execute(s, &cmd);
++ if (ret)
++ goto err;
++
++ memcpy(cmd.args, "\x14\x00\x07\x10\x00\x24", 6);
++ cmd.wlen = 6;
++ cmd.rlen = 4;
++ ret = si2168_cmd_execute(s, &cmd);
++ if (ret)
++ goto err;
++
++ memcpy(cmd.args, "\x14\x00\x0a\x10\x00\x00", 6);
++ cmd.args[4] = delivery_system | bandwidth;
++ cmd.wlen = 6;
++ cmd.rlen = 4;
++ ret = si2168_cmd_execute(s, &cmd);
++ if (ret)
++ goto err;
++
++ /* set DVB-C symbol rate */
++ if (c->delivery_system == SYS_DVBC_ANNEX_A) {
++ memcpy(cmd.args, "\x14\x00\x02\x11", 4);
++ cmd.args[4] = (c->symbol_rate / 1000) & 0xff;
++ cmd.args[5] = ((c->symbol_rate / 1000) >> 8) & 0xff;
++ cmd.wlen = 6;
++ cmd.rlen = 4;
++ ret = si2168_cmd_execute(s, &cmd);
++ if (ret)
++ goto err;
++ }
++
++ memcpy(cmd.args, "\x14\x00\x0f\x10\x10\x00", 6);
++ cmd.wlen = 6;
++ cmd.rlen = 4;
++ ret = si2168_cmd_execute(s, &cmd);
++ if (ret)
++ goto err;
++
++ memcpy(cmd.args, "\x14\x00\x09\x10\xe3\x08", 6);
++ cmd.args[5] |= s->ts_clock_inv ? 0x00 : 0x10;
++ cmd.wlen = 6;
++ cmd.rlen = 4;
++ ret = si2168_cmd_execute(s, &cmd);
++ if (ret)
++ goto err;
++
++ memcpy(cmd.args, "\x14\x00\x08\x10\xd7\x05", 6);
++ cmd.args[5] |= s->ts_clock_inv ? 0x00 : 0x10;
++ cmd.wlen = 6;
++ cmd.rlen = 4;
++ ret = si2168_cmd_execute(s, &cmd);
++ if (ret)
++ goto err;
++
++ memcpy(cmd.args, "\x14\x00\x01\x12\x00\x00", 6);
++ cmd.wlen = 6;
++ cmd.rlen = 4;
++ ret = si2168_cmd_execute(s, &cmd);
++ if (ret)
++ goto err;
++
++ memcpy(cmd.args, "\x14\x00\x01\x03\x0c\x00", 6);
++ cmd.wlen = 6;
++ cmd.rlen = 4;
++ ret = si2168_cmd_execute(s, &cmd);
++ if (ret)
++ goto err;
++
++ memcpy(cmd.args, "\x85", 1);
++ cmd.wlen = 1;
++ cmd.rlen = 1;
++ ret = si2168_cmd_execute(s, &cmd);
++ if (ret)
++ goto err;
++
++ s->delivery_system = c->delivery_system;
++
++ return 0;
++err:
++ dev_dbg(&s->client->dev, "failed=%d\n", ret);
++ return ret;
++}
++
++static int si2168_init(struct dvb_frontend *fe)
++{
++ struct si2168 *s = fe->demodulator_priv;
++ int ret, len, remaining;
++ const struct firmware *fw = NULL;
++ u8 *fw_file;
++ const unsigned int i2c_wr_max = 8;
++ struct si2168_cmd cmd;
++ unsigned int chip_id;
++
++ dev_dbg(&s->client->dev, "\n");
++
++ /* initialize */
++ memcpy(cmd.args, "\xc0\x12\x00\x0c\x00\x0d\x16\x00\x00\x00\x00\x00\x00", 13);
++ cmd.wlen = 13;
++ cmd.rlen = 0;
++ ret = si2168_cmd_execute(s, &cmd);
++ if (ret)
++ goto err;
++
++ if (s->fw_loaded) {
++ /* resume */
++ memcpy(cmd.args, "\xc0\x06\x08\x0f\x00\x20\x21\x01", 8);
++ cmd.wlen = 8;
++ cmd.rlen = 1;
++ ret = si2168_cmd_execute(s, &cmd);
++ if (ret)
++ goto err;
++
++ memcpy(cmd.args, "\x85", 1);
++ cmd.wlen = 1;
++ cmd.rlen = 1;
++ ret = si2168_cmd_execute(s, &cmd);
++ if (ret)
++ goto err;
++
++ goto warm;
++ }
++
++ /* power up */
++ memcpy(cmd.args, "\xc0\x06\x01\x0f\x00\x20\x20\x01", 8);
++ cmd.wlen = 8;
++ cmd.rlen = 1;
++ ret = si2168_cmd_execute(s, &cmd);
++ if (ret)
++ goto err;
++
++ /* query chip revision */
++ memcpy(cmd.args, "\x02", 1);
++ cmd.wlen = 1;
++ cmd.rlen = 13;
++ ret = si2168_cmd_execute(s, &cmd);
++ if (ret)
++ goto err;
++
++ chip_id = cmd.args[1] << 24 | cmd.args[2] << 16 | cmd.args[3] << 8 |
++ cmd.args[4] << 0;
++
++ #define SI2168_A20 ('A' << 24 | 68 << 16 | '2' << 8 | '0' << 0)
++ #define SI2168_A30 ('A' << 24 | 68 << 16 | '3' << 8 | '0' << 0)
++ #define SI2168_B40 ('B' << 24 | 68 << 16 | '4' << 8 | '0' << 0)
++
++ switch (chip_id) {
++ case SI2168_A20:
++ fw_file = SI2168_A20_FIRMWARE;
++ break;
++ case SI2168_A30:
++ fw_file = SI2168_A30_FIRMWARE;
++ break;
++ case SI2168_B40:
++ fw_file = SI2168_B40_FIRMWARE;
++ break;
++ default:
++ dev_err(&s->client->dev,
++ "unknown chip version Si21%d-%c%c%c\n",
++ cmd.args[2], cmd.args[1],
++ cmd.args[3], cmd.args[4]);
++ ret = -EINVAL;
++ goto err;
++ }
++
++ /* cold state - try to download firmware */
++ dev_info(&s->client->dev, "found a '%s' in cold state\n",
++ si2168_ops.info.name);
++
++ /* request the firmware, this will block and timeout */
++ ret = request_firmware(&fw, fw_file, &s->client->dev);
++ if (ret) {
++ /* fallback mechanism to handle old name for Si2168 B40 fw */
++ if (chip_id == SI2168_B40) {
++ fw_file = SI2168_B40_FIRMWARE_FALLBACK;
++ ret = request_firmware(&fw, fw_file, &s->client->dev);
++ }
++
++ if (ret == 0) {
++ dev_notice(&s->client->dev,
++ "please install firmware file '%s'\n",
++ SI2168_B40_FIRMWARE);
++ } else {
++ dev_err(&s->client->dev,
++ "firmware file '%s' not found\n",
++ fw_file);
++ goto error_fw_release;
++ }
++ }
++
++ dev_info(&s->client->dev, "downloading firmware from file '%s'\n",
++ fw_file);
++
++ if ((fw->size % 17 == 0) && (fw->data[0] > 5)) {
++ /* firmware is in the new format */
++ for (remaining = fw->size; remaining > 0; remaining -= 17) {
++ len = fw->data[fw->size - remaining];
++ memcpy(cmd.args, &fw->data[(fw->size - remaining) + 1], len);
++ cmd.wlen = len;
++ cmd.rlen = 1;
++ ret = si2168_cmd_execute(s, &cmd);
++ if (ret) {
++ dev_err(&s->client->dev,
++ "firmware download failed=%d\n",
++ ret);
++ goto error_fw_release;
++ }
++ }
++ } else {
++ /* firmware is in the old format */
++ for (remaining = fw->size; remaining > 0; remaining -= i2c_wr_max) {
++ len = remaining;
++ if (len > i2c_wr_max)
++ len = i2c_wr_max;
++
++ memcpy(cmd.args, &fw->data[fw->size - remaining], len);
++ cmd.wlen = len;
++ cmd.rlen = 1;
++ ret = si2168_cmd_execute(s, &cmd);
++ if (ret) {
++ dev_err(&s->client->dev,
++ "firmware download failed=%d\n",
++ ret);
++ goto error_fw_release;
++ }
++ }
++ }
++
++ release_firmware(fw);
++ fw = NULL;
++
++ memcpy(cmd.args, "\x01\x01", 2);
++ cmd.wlen = 2;
++ cmd.rlen = 1;
++ ret = si2168_cmd_execute(s, &cmd);
++ if (ret)
++ goto err;
++
++ /* query firmware version */
++ memcpy(cmd.args, "\x11", 1);
++ cmd.wlen = 1;
++ cmd.rlen = 10;
++ ret = si2168_cmd_execute(s, &cmd);
++ if (ret)
++ goto err;
++
++ dev_dbg(&s->client->dev, "firmware version: %c.%c.%d\n",
++ cmd.args[6], cmd.args[7], cmd.args[8]);
++
++ /* set ts mode */
++ memcpy(cmd.args, "\x14\x00\x01\x10\x10\x00", 6);
++ cmd.args[4] |= s->ts_mode;
++ cmd.wlen = 6;
++ cmd.rlen = 4;
++ ret = si2168_cmd_execute(s, &cmd);
++ if (ret)
++ goto err;
++
++ s->fw_loaded = true;
++
++ dev_info(&s->client->dev, "found a '%s' in warm state\n",
++ si2168_ops.info.name);
++warm:
++ s->active = true;
++
++ return 0;
++
++error_fw_release:
++ release_firmware(fw);
++err:
++ dev_dbg(&s->client->dev, "failed=%d\n", ret);
++ return ret;
++}
++
++static int si2168_sleep(struct dvb_frontend *fe)
++{
++ struct si2168 *s = fe->demodulator_priv;
++ int ret;
++ struct si2168_cmd cmd;
++
++ dev_dbg(&s->client->dev, "\n");
++
++ s->active = false;
++
++ memcpy(cmd.args, "\x13", 1);
++ cmd.wlen = 1;
++ cmd.rlen = 0;
++ ret = si2168_cmd_execute(s, &cmd);
++ if (ret)
++ goto err;
++
++ return 0;
++err:
++ dev_dbg(&s->client->dev, "failed=%d\n", ret);
++ return ret;
++}
++
++static int si2168_get_tune_settings(struct dvb_frontend *fe,
++ struct dvb_frontend_tune_settings *s)
++{
++ s->min_delay_ms = 900;
++
++ return 0;
++}
++
++/*
++ * I2C gate logic
++ * We must use unlocked i2c_transfer() here because I2C lock is already taken
++ * by tuner driver.
++ */
++static int si2168_select(struct i2c_adapter *adap, void *mux_priv, u32 chan)
++{
++ struct si2168 *s = mux_priv;
++ int ret;
++ struct i2c_msg gate_open_msg = {
++ .addr = s->client->addr,
++ .flags = 0,
++ .len = 3,
++ .buf = "\xc0\x0d\x01",
++ };
++
++ mutex_lock(&s->i2c_mutex);
++
++ /* open tuner I2C gate */
++ ret = __i2c_transfer(s->client->adapter, &gate_open_msg, 1);
++ if (ret != 1) {
++ dev_warn(&s->client->dev, "i2c write failed=%d\n", ret);
++ if (ret >= 0)
++ ret = -EREMOTEIO;
++ } else {
++ ret = 0;
++ }
++
++ return ret;
++}
++
++static int si2168_deselect(struct i2c_adapter *adap, void *mux_priv, u32 chan)
++{
++ struct si2168 *s = mux_priv;
++ int ret;
++ struct i2c_msg gate_close_msg = {
++ .addr = s->client->addr,
++ .flags = 0,
++ .len = 3,
++ .buf = "\xc0\x0d\x00",
++ };
++
++ /* close tuner I2C gate */
++ ret = __i2c_transfer(s->client->adapter, &gate_close_msg, 1);
++ if (ret != 1) {
++ dev_warn(&s->client->dev, "i2c write failed=%d\n", ret);
++ if (ret >= 0)
++ ret = -EREMOTEIO;
++ } else {
++ ret = 0;
++ }
++
++ mutex_unlock(&s->i2c_mutex);
++
++ return ret;
++}
++
++static const struct dvb_frontend_ops si2168_ops = {
++ .delsys = {SYS_DVBT, SYS_DVBT2, SYS_DVBC_ANNEX_A},
++ .info = {
++ .name = "Silicon Labs Si2168",
++ .caps = FE_CAN_FEC_1_2 |
++ FE_CAN_FEC_2_3 |
++ FE_CAN_FEC_3_4 |
++ FE_CAN_FEC_5_6 |
++ FE_CAN_FEC_7_8 |
++ FE_CAN_FEC_AUTO |
++ FE_CAN_QPSK |
++ FE_CAN_QAM_16 |
++ FE_CAN_QAM_32 |
++ FE_CAN_QAM_64 |
++ FE_CAN_QAM_128 |
++ FE_CAN_QAM_256 |
++ FE_CAN_QAM_AUTO |
++ FE_CAN_TRANSMISSION_MODE_AUTO |
++ FE_CAN_GUARD_INTERVAL_AUTO |
++ FE_CAN_HIERARCHY_AUTO |
++ FE_CAN_MUTE_TS |
++ FE_CAN_2G_MODULATION |
++ FE_CAN_MULTISTREAM
++ },
++
++ .get_tune_settings = si2168_get_tune_settings,
++
++ .init = si2168_init,
++ .sleep = si2168_sleep,
++
++ .set_frontend = si2168_set_frontend,
++
++ .read_status = si2168_read_status,
++};
++
++static int si2168_probe(struct i2c_client *client,
++ const struct i2c_device_id *id)
++{
++ struct si2168_config *config = client->dev.platform_data;
++ struct si2168 *s;
++ int ret;
++
++ dev_dbg(&client->dev, "\n");
++
++ s = kzalloc(sizeof(struct si2168), GFP_KERNEL);
++ if (!s) {
++ ret = -ENOMEM;
++ dev_err(&client->dev, "kzalloc() failed\n");
++ goto err;
++ }
++
++ s->client = client;
++ mutex_init(&s->i2c_mutex);
++
++ /* create mux i2c adapter for tuner */
++ s->adapter = i2c_add_mux_adapter(client->adapter, &client->dev, s,
++ 0, 0, 0, si2168_select, si2168_deselect);
++ if (s->adapter == NULL) {
++ ret = -ENODEV;
++ goto err;
++ }
++
++ /* create dvb_frontend */
++ memcpy(&s->fe.ops, &si2168_ops, sizeof(struct dvb_frontend_ops));
++ s->fe.demodulator_priv = s;
++
++ *config->i2c_adapter = s->adapter;
++ *config->fe = &s->fe;
++ s->ts_mode = config->ts_mode;
++ s->ts_clock_inv = config->ts_clock_inv;
++ s->fw_loaded = false;
++
++ i2c_set_clientdata(client, s);
++
++ dev_info(&s->client->dev,
++ "Silicon Labs Si2168 successfully attached\n");
++ return 0;
++err:
++ kfree(s);
++ dev_dbg(&client->dev, "failed=%d\n", ret);
++ return ret;
++}
++
++static int si2168_remove(struct i2c_client *client)
++{
++ struct si2168 *s = i2c_get_clientdata(client);
++
++ dev_dbg(&client->dev, "\n");
++
++ i2c_del_mux_adapter(s->adapter);
++
++ s->fe.ops.release = NULL;
++ s->fe.demodulator_priv = NULL;
++
++ kfree(s);
++
++ return 0;
++}
++
++static const struct i2c_device_id si2168_id[] = {
++ {"si2168", 0},
++ {}
++};
++MODULE_DEVICE_TABLE(i2c, si2168_id);
++
++static struct i2c_driver si2168_driver = {
++ .driver = {
++ .owner = THIS_MODULE,
++ .name = "si2168",
++ },
++ .probe = si2168_probe,
++ .remove = si2168_remove,
++ .id_table = si2168_id,
++};
++
++module_i2c_driver(si2168_driver);
++
++MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
++MODULE_DESCRIPTION("Silicon Labs Si2168 DVB-T/T2/C demodulator driver");
++MODULE_LICENSE("GPL");
++MODULE_FIRMWARE(SI2168_A20_FIRMWARE);
++MODULE_FIRMWARE(SI2168_A30_FIRMWARE);
++MODULE_FIRMWARE(SI2168_B40_FIRMWARE);
+diff -Nur linux-3.14.36/drivers/media/dvb-frontends/si2168.h linux-openelec/drivers/media/dvb-frontends/si2168.h
+--- linux-3.14.36/drivers/media/dvb-frontends/si2168.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/media/dvb-frontends/si2168.h 2015-07-24 18:03:30.148842002 -0500
+@@ -0,0 +1,49 @@
++/*
++ * Silicon Labs Si2168 DVB-T/T2/C demodulator driver
++ *
++ * Copyright (C) 2014 Antti Palosaari <crope@iki.fi>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ */
++
++#ifndef SI2168_H
++#define SI2168_H
++
++#include <linux/dvb/frontend.h>
++/*
++ * I2C address
++ * 0x64
++ */
++struct si2168_config {
++ /*
++ * frontend
++ * returned by driver
++ */
++ struct dvb_frontend **fe;
++
++ /*
++ * tuner I2C adapter
++ * returned by driver
++ */
++ struct i2c_adapter **i2c_adapter;
++
++ /* TS mode */
++ u8 ts_mode;
++
++ /* TS clock inverted */
++ bool ts_clock_inv;
++
++};
++
++#define SI2168_TS_PARALLEL 0x06
++#define SI2168_TS_SERIAL 0x03
++
++#endif
+diff -Nur linux-3.14.36/drivers/media/dvb-frontends/si2168_priv.h linux-openelec/drivers/media/dvb-frontends/si2168_priv.h
+--- linux-3.14.36/drivers/media/dvb-frontends/si2168_priv.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/media/dvb-frontends/si2168_priv.h 2015-07-24 18:03:30.148842002 -0500
+@@ -0,0 +1,52 @@
++/*
++ * Silicon Labs Si2168 DVB-T/T2/C demodulator driver
++ *
++ * Copyright (C) 2014 Antti Palosaari <crope@iki.fi>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ */
++
++#ifndef SI2168_PRIV_H
++#define SI2168_PRIV_H
++
++#include "si2168.h"
++#include "dvb_frontend.h"
++#include <linux/firmware.h>
++#include <linux/i2c-mux.h>
++
++#define SI2168_A20_FIRMWARE "dvb-demod-si2168-a20-01.fw"
++#define SI2168_A30_FIRMWARE "dvb-demod-si2168-a30-01.fw"
++#define SI2168_B40_FIRMWARE "dvb-demod-si2168-b40-01.fw"
++#define SI2168_B40_FIRMWARE_FALLBACK "dvb-demod-si2168-02.fw"
++
++/* state struct */
++struct si2168 {
++ struct i2c_client *client;
++ struct i2c_adapter *adapter;
++ struct mutex i2c_mutex;
++ struct dvb_frontend fe;
++ fe_delivery_system_t delivery_system;
++ fe_status_t fe_status;
++ bool active;
++ bool fw_loaded;
++ u8 ts_mode;
++ bool ts_clock_inv;
++};
++
++/* firmare command struct */
++#define SI2168_ARGLEN 30
++struct si2168_cmd {
++ u8 args[SI2168_ARGLEN];
++ unsigned wlen;
++ unsigned rlen;
++};
++
++#endif
+diff -Nur linux-3.14.36/drivers/media/dvb-frontends/sp2.c linux-openelec/drivers/media/dvb-frontends/sp2.c
+--- linux-3.14.36/drivers/media/dvb-frontends/sp2.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/media/dvb-frontends/sp2.c 2015-07-24 18:03:30.148842002 -0500
+@@ -0,0 +1,444 @@
++/*
++ * CIMaX SP2/SP2HF (Atmel T90FJR) CI driver
++ *
++ * Copyright (C) 2014 Olli Salonen <olli.salonen@iki.fi>
++ *
++ * Heavily based on CIMax2(R) SP2 driver in conjunction with NetUp Dual
++ * DVB-S2 CI card (cimax2) with following copyrights:
++ *
++ * Copyright (C) 2009 NetUP Inc.
++ * Copyright (C) 2009 Igor M. Liplianin <liplianin@netup.ru>
++ * Copyright (C) 2009 Abylay Ospan <aospan@netup.ru>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ */
++
++#include "sp2_priv.h"
++
++static int sp2_read_i2c(struct sp2 *s, u8 reg, u8 *buf, int len)
++{
++ int ret;
++ struct i2c_client *client = s->client;
++ struct i2c_adapter *adap = client->adapter;
++ struct i2c_msg msg[] = {
++ {
++ .addr = client->addr,
++ .flags = 0,
++ .buf = &reg,
++ .len = 1
++ }, {
++ .addr = client->addr,
++ .flags = I2C_M_RD,
++ .buf = buf,
++ .len = len
++ }
++ };
++
++ ret = i2c_transfer(adap, msg, 2);
++
++ if (ret != 2) {
++ dev_err(&client->dev, "i2c read error, reg = 0x%02x, status = %d\n",
++ reg, ret);
++ if (ret < 0)
++ return ret;
++ else
++ return -EIO;
++ }
++
++ dev_dbg(&s->client->dev, "addr=0x%04x, reg = 0x%02x, data = %02x\n",
++ client->addr, reg, buf[0]);
++
++ return 0;
++}
++
++static int sp2_write_i2c(struct sp2 *s, u8 reg, u8 *buf, int len)
++{
++ int ret;
++ u8 buffer[35];
++ struct i2c_client *client = s->client;
++ struct i2c_adapter *adap = client->adapter;
++ struct i2c_msg msg = {
++ .addr = client->addr,
++ .flags = 0,
++ .buf = &buffer[0],
++ .len = len + 1
++ };
++
++ if ((len + 1) > sizeof(buffer)) {
++ dev_err(&client->dev, "i2c wr reg=%02x: len=%d is too big!\n",
++ reg, len);
++ return -EINVAL;
++ }
++
++ buffer[0] = reg;
++ memcpy(&buffer[1], buf, len);
++
++ ret = i2c_transfer(adap, &msg, 1);
++
++ if (ret != 1) {
++ dev_err(&client->dev, "i2c write error, reg = 0x%02x, status = %d\n",
++ reg, ret);
++ if (ret < 0)
++ return ret;
++ else
++ return -EIO;
++ }
++
++ dev_dbg(&s->client->dev, "addr=0x%04x, reg = 0x%02x, data = %*ph\n",
++ client->addr, reg, len, buf);
++
++ return 0;
++}
++
++static int sp2_ci_op_cam(struct dvb_ca_en50221 *en50221, int slot, u8 acs,
++ u8 read, int addr, u8 data)
++{
++ struct sp2 *s = en50221->data;
++ u8 store;
++ int mem, ret;
++ int (*ci_op_cam)(void*, u8, int, u8, int*) = s->ci_control;
++
++ if (slot != 0)
++ return -EINVAL;
++
++ /*
++ * change module access type between IO space and attribute memory
++ * when needed
++ */
++ if (s->module_access_type != acs) {
++ ret = sp2_read_i2c(s, 0x00, &store, 1);
++
++ if (ret)
++ return ret;
++
++ store &= ~(SP2_MOD_CTL_ACS1 | SP2_MOD_CTL_ACS0);
++ store |= acs;
++
++ ret = sp2_write_i2c(s, 0x00, &store, 1);
++ if (ret)
++ return ret;
++ }
++
++ s->module_access_type = acs;
++
++ /* implementation of ci_op_cam is device specific */
++ if (ci_op_cam) {
++ ret = ci_op_cam(s->priv, read, addr, data, &mem);
++ } else {
++ dev_err(&s->client->dev, "callback not defined");
++ return -EINVAL;
++ }
++
++ if (ret)
++ return ret;
++
++ dev_dbg(&s->client->dev, "%s: slot=%d, addr=0x%04x, %s, data=%x",
++ (read) ? "read" : "write", slot, addr,
++ (acs == SP2_CI_ATTR_ACS) ? "attr" : "io",
++ (read) ? mem : data);
++
++ if (read)
++ return mem;
++ else
++ return 0;
++
++}
++
++int sp2_ci_read_attribute_mem(struct dvb_ca_en50221 *en50221,
++ int slot, int addr)
++{
++ return sp2_ci_op_cam(en50221, slot, SP2_CI_ATTR_ACS,
++ SP2_CI_RD, addr, 0);
++}
++
++int sp2_ci_write_attribute_mem(struct dvb_ca_en50221 *en50221,
++ int slot, int addr, u8 data)
++{
++ return sp2_ci_op_cam(en50221, slot, SP2_CI_ATTR_ACS,
++ SP2_CI_WR, addr, data);
++}
++
++int sp2_ci_read_cam_control(struct dvb_ca_en50221 *en50221,
++ int slot, u8 addr)
++{
++ return sp2_ci_op_cam(en50221, slot, SP2_CI_IO_ACS,
++ SP2_CI_RD, addr, 0);
++}
++
++int sp2_ci_write_cam_control(struct dvb_ca_en50221 *en50221,
++ int slot, u8 addr, u8 data)
++{
++ return sp2_ci_op_cam(en50221, slot, SP2_CI_IO_ACS,
++ SP2_CI_WR, addr, data);
++}
++
++int sp2_ci_slot_reset(struct dvb_ca_en50221 *en50221, int slot)
++{
++ struct sp2 *s = en50221->data;
++ u8 buf;
++ int ret;
++
++ dev_dbg(&s->client->dev, "slot: %d\n", slot);
++
++ if (slot != 0)
++ return -EINVAL;
++
++ /* RST on */
++ buf = SP2_MOD_CTL_RST;
++ ret = sp2_write_i2c(s, 0x00, &buf, 1);
++
++ if (ret)
++ return ret;
++
++ usleep_range(500, 600);
++
++ /* RST off */
++ buf = 0x00;
++ ret = sp2_write_i2c(s, 0x00, &buf, 1);
++
++ if (ret)
++ return ret;
++
++ msleep(1000);
++
++ return 0;
++}
++
++int sp2_ci_slot_shutdown(struct dvb_ca_en50221 *en50221, int slot)
++{
++ struct sp2 *s = en50221->data;
++
++ dev_dbg(&s->client->dev, "slot:%d\n", slot);
++
++ /* not implemented */
++ return 0;
++}
++
++int sp2_ci_slot_ts_enable(struct dvb_ca_en50221 *en50221, int slot)
++{
++ struct sp2 *s = en50221->data;
++ u8 buf;
++
++ dev_dbg(&s->client->dev, "slot:%d\n", slot);
++
++ if (slot != 0)
++ return -EINVAL;
++
++ sp2_read_i2c(s, 0x00, &buf, 1);
++
++ /* disable bypass and enable TS */
++ buf |= (SP2_MOD_CTL_TSOEN | SP2_MOD_CTL_TSIEN);
++ return sp2_write_i2c(s, 0, &buf, 1);
++}
++
++int sp2_ci_poll_slot_status(struct dvb_ca_en50221 *en50221,
++ int slot, int open)
++{
++ struct sp2 *s = en50221->data;
++ u8 buf[2];
++ int ret;
++
++ dev_dbg(&s->client->dev, "slot:%d open:%d\n", slot, open);
++
++ /*
++ * CAM module INSERT/REMOVE processing. Slow operation because of i2c
++ * transfers. Throttle read to one per sec.
++ */
++ if (time_after(jiffies, s->next_status_checked_time)) {
++ ret = sp2_read_i2c(s, 0x00, buf, 1);
++ s->next_status_checked_time = jiffies + msecs_to_jiffies(1000);
++
++ if (ret)
++ return 0;
++
++ if (buf[0] & SP2_MOD_CTL_DET)
++ s->status = DVB_CA_EN50221_POLL_CAM_PRESENT |
++ DVB_CA_EN50221_POLL_CAM_READY;
++ else
++ s->status = 0;
++ }
++
++ return s->status;
++}
++
++static int sp2_init(struct sp2 *s)
++{
++ int ret = 0;
++ u8 buf;
++ u8 cimax_init[34] = {
++ 0x00, /* module A control*/
++ 0x00, /* auto select mask high A */
++ 0x00, /* auto select mask low A */
++ 0x00, /* auto select pattern high A */
++ 0x00, /* auto select pattern low A */
++ 0x44, /* memory access time A, 600 ns */
++ 0x00, /* invert input A */
++ 0x00, /* RFU */
++ 0x00, /* RFU */
++ 0x00, /* module B control*/
++ 0x00, /* auto select mask high B */
++ 0x00, /* auto select mask low B */
++ 0x00, /* auto select pattern high B */
++ 0x00, /* auto select pattern low B */
++ 0x44, /* memory access time B, 600 ns */
++ 0x00, /* invert input B */
++ 0x00, /* RFU */
++ 0x00, /* RFU */
++ 0x00, /* auto select mask high Ext */
++ 0x00, /* auto select mask low Ext */
++ 0x00, /* auto select pattern high Ext */
++ 0x00, /* auto select pattern low Ext */
++ 0x00, /* RFU */
++ 0x02, /* destination - module A */
++ 0x01, /* power control reg, VCC power on */
++ 0x00, /* RFU */
++ 0x00, /* int status read only */
++ 0x00, /* Interrupt Mask Register */
++ 0x05, /* EXTINT=active-high, INT=push-pull */
++ 0x00, /* USCG1 */
++ 0x04, /* ack active low */
++ 0x00, /* LOCK = 0 */
++ 0x22, /* unknown */
++ 0x00, /* synchronization? */
++ };
++
++ dev_dbg(&s->client->dev, "\n");
++
++ s->ca.owner = THIS_MODULE;
++ s->ca.read_attribute_mem = sp2_ci_read_attribute_mem;
++ s->ca.write_attribute_mem = sp2_ci_write_attribute_mem;
++ s->ca.read_cam_control = sp2_ci_read_cam_control;
++ s->ca.write_cam_control = sp2_ci_write_cam_control;
++ s->ca.slot_reset = sp2_ci_slot_reset;
++ s->ca.slot_shutdown = sp2_ci_slot_shutdown;
++ s->ca.slot_ts_enable = sp2_ci_slot_ts_enable;
++ s->ca.poll_slot_status = sp2_ci_poll_slot_status;
++ s->ca.data = s;
++ s->module_access_type = 0;
++
++ /* initialize all regs */
++ ret = sp2_write_i2c(s, 0x00, &cimax_init[0], 34);
++ if (ret)
++ goto err;
++
++ /* lock registers */
++ buf = 1;
++ ret = sp2_write_i2c(s, 0x1f, &buf, 1);
++ if (ret)
++ goto err;
++
++ /* power on slots */
++ ret = sp2_write_i2c(s, 0x18, &buf, 1);
++ if (ret)
++ goto err;
++
++ ret = dvb_ca_en50221_init(s->dvb_adap, &s->ca, 0, 1);
++ if (ret)
++ goto err;
++
++ return 0;
++
++err:
++ dev_dbg(&s->client->dev, "init failed=%d\n", ret);
++ return ret;
++}
++
++static int sp2_exit(struct i2c_client *client)
++{
++ struct sp2 *s;
++
++ dev_dbg(&client->dev, "\n");
++
++ if (client == NULL)
++ return 0;
++
++ s = i2c_get_clientdata(client);
++ if (s == NULL)
++ return 0;
++
++ if (s->ca.data == NULL)
++ return 0;
++
++ dvb_ca_en50221_release(&s->ca);
++
++ return 0;
++}
++
++static int sp2_probe(struct i2c_client *client,
++ const struct i2c_device_id *id)
++{
++ struct sp2_config *cfg = client->dev.platform_data;
++ struct sp2 *s;
++ int ret;
++
++ dev_dbg(&client->dev, "\n");
++
++ s = kzalloc(sizeof(struct sp2), GFP_KERNEL);
++ if (!s) {
++ ret = -ENOMEM;
++ dev_err(&client->dev, "kzalloc() failed\n");
++ goto err;
++ }
++
++ s->client = client;
++ s->dvb_adap = cfg->dvb_adap;
++ s->priv = cfg->priv;
++ s->ci_control = cfg->ci_control;
++
++ i2c_set_clientdata(client, s);
++
++ ret = sp2_init(s);
++ if (ret)
++ goto err;
++
++ dev_info(&s->client->dev, "CIMaX SP2 successfully attached\n");
++ return 0;
++err:
++ dev_dbg(&client->dev, "init failed=%d\n", ret);
++ kfree(s);
++
++ return ret;
++}
++
++static int sp2_remove(struct i2c_client *client)
++{
++ struct sp2 *s = i2c_get_clientdata(client);
++
++ dev_dbg(&client->dev, "\n");
++
++ sp2_exit(client);
++ if (s != NULL)
++ kfree(s);
++
++ return 0;
++}
++
++static const struct i2c_device_id sp2_id[] = {
++ {"sp2", 0},
++ {}
++};
++MODULE_DEVICE_TABLE(i2c, sp2_id);
++
++static struct i2c_driver sp2_driver = {
++ .driver = {
++ .owner = THIS_MODULE,
++ .name = "sp2",
++ },
++ .probe = sp2_probe,
++ .remove = sp2_remove,
++ .id_table = sp2_id,
++};
++
++module_i2c_driver(sp2_driver);
++
++MODULE_DESCRIPTION("CIMaX SP2/HF CI driver");
++MODULE_AUTHOR("Olli Salonen <olli.salonen@iki.fi>");
++MODULE_LICENSE("GPL");
+diff -Nur linux-3.14.36/drivers/media/dvb-frontends/sp2.h linux-openelec/drivers/media/dvb-frontends/sp2.h
+--- linux-3.14.36/drivers/media/dvb-frontends/sp2.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/media/dvb-frontends/sp2.h 2015-07-24 18:03:30.148842002 -0500
+@@ -0,0 +1,53 @@
++/*
++ * CIMaX SP2/HF CI driver
++ *
++ * Copyright (C) 2014 Olli Salonen <olli.salonen@iki.fi>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ */
++
++#ifndef SP2_H
++#define SP2_H
++
++#include <linux/kconfig.h>
++#include "dvb_ca_en50221.h"
++
++/*
++ * I2C address
++ * 0x40 (port 0)
++ * 0x41 (port 1)
++ */
++struct sp2_config {
++ /* dvb_adapter to attach the ci to */
++ struct dvb_adapter *dvb_adap;
++
++ /* function ci_control handles the device specific ci ops */
++ void *ci_control;
++
++ /* priv is passed back to function ci_control */
++ void *priv;
++};
++
++extern int sp2_ci_read_attribute_mem(struct dvb_ca_en50221 *en50221,
++ int slot, int addr);
++extern int sp2_ci_write_attribute_mem(struct dvb_ca_en50221 *en50221,
++ int slot, int addr, u8 data);
++extern int sp2_ci_read_cam_control(struct dvb_ca_en50221 *en50221,
++ int slot, u8 addr);
++extern int sp2_ci_write_cam_control(struct dvb_ca_en50221 *en50221,
++ int slot, u8 addr, u8 data);
++extern int sp2_ci_slot_reset(struct dvb_ca_en50221 *en50221, int slot);
++extern int sp2_ci_slot_shutdown(struct dvb_ca_en50221 *en50221, int slot);
++extern int sp2_ci_slot_ts_enable(struct dvb_ca_en50221 *en50221, int slot);
++extern int sp2_ci_poll_slot_status(struct dvb_ca_en50221 *en50221,
++ int slot, int open);
++
++#endif
+diff -Nur linux-3.14.36/drivers/media/dvb-frontends/sp2_priv.h linux-openelec/drivers/media/dvb-frontends/sp2_priv.h
+--- linux-3.14.36/drivers/media/dvb-frontends/sp2_priv.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/media/dvb-frontends/sp2_priv.h 2015-07-24 18:03:30.148842002 -0500
+@@ -0,0 +1,50 @@
++/*
++ * CIMaX SP2/HF CI driver
++ *
++ * Copyright (C) 2014 Olli Salonen <olli.salonen@iki.fi>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ */
++
++#ifndef SP2_PRIV_H
++#define SP2_PRIV_H
++
++#include "sp2.h"
++#include "dvb_frontend.h"
++
++/* state struct */
++struct sp2 {
++ int status;
++ struct i2c_client *client;
++ struct dvb_adapter *dvb_adap;
++ struct dvb_ca_en50221 ca;
++ int module_access_type;
++ unsigned long next_status_checked_time;
++ void *priv;
++ void *ci_control;
++};
++
++#define SP2_CI_ATTR_ACS 0x00
++#define SP2_CI_IO_ACS 0x04
++#define SP2_CI_WR 0
++#define SP2_CI_RD 1
++
++/* Module control register (0x00 module A, 0x09 module B) bits */
++#define SP2_MOD_CTL_DET 0x01
++#define SP2_MOD_CTL_AUTO 0x02
++#define SP2_MOD_CTL_ACS0 0x04
++#define SP2_MOD_CTL_ACS1 0x08
++#define SP2_MOD_CTL_HAD 0x10
++#define SP2_MOD_CTL_TSIEN 0x20
++#define SP2_MOD_CTL_TSOEN 0x40
++#define SP2_MOD_CTL_RST 0x80
++
++#endif
+diff -Nur linux-3.14.36/drivers/media/dvb-frontends/stb0899_algo.c linux-openelec/drivers/media/dvb-frontends/stb0899_algo.c
+--- linux-3.14.36/drivers/media/dvb-frontends/stb0899_algo.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/media/dvb-frontends/stb0899_algo.c 2015-07-24 18:03:30.132842002 -0500
+@@ -206,7 +206,6 @@
+ static enum stb0899_status stb0899_search_tmg(struct stb0899_state *state)
+ {
+ struct stb0899_internal *internal = &state->internal;
+- struct stb0899_params *params = &state->params;
+
+ short int derot_step, derot_freq = 0, derot_limit, next_loop = 3;
+ int index = 0;
+@@ -216,10 +215,9 @@
+
+ /* timing loop computation & symbol rate optimisation */
+ derot_limit = (internal->sub_range / 2L) / internal->mclk;
+- derot_step = (params->srate / 2L) / internal->mclk;
++ derot_step = internal->derot_step * 4; /* dertot_step = decreasing delta */
+
+ while ((stb0899_check_tmg(state) != TIMINGOK) && next_loop) {
+- index++;
+ derot_freq += index * internal->direction * derot_step; /* next derot zig zag position */
+
+ if (abs(derot_freq) > derot_limit)
+@@ -230,6 +228,7 @@
+ STB0899_SETFIELD_VAL(CFRL, cfr[1], LSB(internal->inversion * derot_freq));
+ stb0899_write_regs(state, STB0899_CFRM, cfr, 2); /* derotator frequency */
+ }
++ index++;
+ internal->direction = -internal->direction; /* Change zigzag direction */
+ }
+
+@@ -278,14 +277,18 @@
+ {
+ struct stb0899_internal *internal = &state->internal;
+
+- short int derot_freq = 0, last_derot_freq = 0, derot_limit, next_loop = 3;
++ short int derot_freq = 0, last_derot_freq = 0, derot_limit, derot_step, next_loop = 3;
+ int index = 0;
++ int base_freq;
+ u8 cfr[2];
+ u8 reg;
+
+ internal->status = NOCARRIER;
+ derot_limit = (internal->sub_range / 2L) / internal->mclk;
+ derot_freq = internal->derot_freq;
++ derot_step = internal->derot_step * 2;
++ last_derot_freq = internal->derot_freq;
++ base_freq = internal->derot_freq;
+
+ reg = stb0899_read_reg(state, STB0899_CFD);
+ STB0899_SETFIELD_VAL(CFD_ON, reg, 1);
+@@ -294,11 +297,10 @@
+ do {
+ dprintk(state->verbose, FE_DEBUG, 1, "Derot Freq=%d, mclk=%d", derot_freq, internal->mclk);
+ if (stb0899_check_carrier(state) == NOCARRIER) {
+- index++;
+ last_derot_freq = derot_freq;
+- derot_freq += index * internal->direction * internal->derot_step; /* next zig zag derotator position */
++ derot_freq += index * internal->direction * derot_step; /* next zig zag derotator position */
+
+- if(abs(derot_freq) > derot_limit)
++ if (derot_freq > base_freq + derot_limit || derot_freq < base_freq - derot_limit)
+ next_loop--;
+
+ if (next_loop) {
+@@ -310,9 +312,10 @@
+ STB0899_SETFIELD_VAL(CFRL, cfr[1], LSB(internal->inversion * derot_freq));
+ stb0899_write_regs(state, STB0899_CFRM, cfr, 2); /* derotator frequency */
+ }
++ index++;
++ internal->direction = -internal->direction; /* Change zigzag direction */
+ }
+
+- internal->direction = -internal->direction; /* Change zigzag direction */
+ } while ((internal->status != CARRIEROK) && next_loop);
+
+ if (internal->status == CARRIEROK) {
+@@ -338,6 +341,7 @@
+ int lock = 0, index = 0, dataTime = 500, loop;
+ u8 reg;
+
++ msleep(1);
+ internal->status = NODATA;
+
+ /* RESET FEC */
+@@ -348,6 +352,7 @@
+ reg = stb0899_read_reg(state, STB0899_TSTRES);
+ STB0899_SETFIELD_VAL(FRESACS, reg, 0);
+ stb0899_write_reg(state, STB0899_TSTRES, reg);
++ msleep(1);
+
+ if (params->srate <= 2000000)
+ dataTime = 2000;
+@@ -363,6 +368,7 @@
+
+ stb0899_write_reg(state, STB0899_DSTATUS2, 0x00); /* force search loop */
+ while (1) {
++ msleep(1); // Alex: added 1 mSec
+ /* WARNING! VIT LOCKED has to be tested before VIT_END_LOOOP */
+ reg = stb0899_read_reg(state, STB0899_VSTATUS);
+ lock = STB0899_GETFIELD(VSTATUS_LOCKEDVIT, reg);
+@@ -390,20 +396,21 @@
+ short int derot_freq, derot_step, derot_limit, next_loop = 3;
+ u8 cfr[2];
+ u8 reg;
+- int index = 1;
++ int index = 0;
++ int base_freq;
+
+ struct stb0899_internal *internal = &state->internal;
+- struct stb0899_params *params = &state->params;
+
+- derot_step = (params->srate / 4L) / internal->mclk;
++ derot_step = internal->derot_step;
+ derot_limit = (internal->sub_range / 2L) / internal->mclk;
+ derot_freq = internal->derot_freq;
++ base_freq = internal->derot_freq;
+
+ do {
+ if ((internal->status != CARRIEROK) || (stb0899_check_data(state) != DATAOK)) {
+
+ derot_freq += index * internal->direction * derot_step; /* next zig zag derotator position */
+- if (abs(derot_freq) > derot_limit)
++ if (derot_freq > base_freq + derot_limit || derot_freq < base_freq - derot_limit)
+ next_loop--;
+
+ if (next_loop) {
+@@ -417,9 +424,9 @@
+ stb0899_write_regs(state, STB0899_CFRM, cfr, 2); /* derotator frequency */
+
+ stb0899_check_carrier(state);
+- index++;
+ }
+ }
++ index++;
+ internal->direction = -internal->direction; /* change zig zag direction */
+ } while ((internal->status != DATAOK) && next_loop);
+
+diff -Nur linux-3.14.36/drivers/media/dvb-frontends/stb0899_algo.c.orig linux-openelec/drivers/media/dvb-frontends/stb0899_algo.c.orig
+--- linux-3.14.36/drivers/media/dvb-frontends/stb0899_algo.c.orig 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/media/dvb-frontends/stb0899_algo.c.orig 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,1536 @@
++/*
++ STB0899 Multistandard Frontend driver
++ Copyright (C) Manu Abraham (abraham.manu@gmail.com)
++
++ Copyright (C) ST Microelectronics
++
++ This program is free software; you can redistribute it and/or modify
++ it under the terms of the GNU General Public License as published by
++ the Free Software Foundation; either version 2 of the License, or
++ (at your option) any later version.
++
++ This program is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ GNU General Public License for more details.
++
++ You should have received a copy of the GNU General Public License
++ along with this program; if not, write to the Free Software
++ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*/
++
++#include "stb0899_drv.h"
++#include "stb0899_priv.h"
++#include "stb0899_reg.h"
++
++static inline u32 stb0899_do_div(u64 n, u32 d)
++{
++ /* wrap do_div() for ease of use */
++
++ do_div(n, d);
++ return n;
++}
++
++#if 0
++/* These functions are currently unused */
++/*
++ * stb0899_calc_srate
++ * Compute symbol rate
++ */
++static u32 stb0899_calc_srate(u32 master_clk, u8 *sfr)
++{
++ u64 tmp;
++
++ /* srate = (SFR * master_clk) >> 20 */
++
++ /* sfr is of size 20 bit, stored with an offset of 4 bit */
++ tmp = (((u32)sfr[0]) << 16) | (((u32)sfr[1]) << 8) | sfr[2];
++ tmp &= ~0xf;
++ tmp *= master_clk;
++ tmp >>= 24;
++
++ return tmp;
++}
++
++/*
++ * stb0899_get_srate
++ * Get the current symbol rate
++ */
++static u32 stb0899_get_srate(struct stb0899_state *state)
++{
++ struct stb0899_internal *internal = &state->internal;
++ u8 sfr[3];
++
++ stb0899_read_regs(state, STB0899_SFRH, sfr, 3);
++
++ return stb0899_calc_srate(internal->master_clk, sfr);
++}
++#endif
++
++/*
++ * stb0899_set_srate
++ * Set symbol frequency
++ * MasterClock: master clock frequency (hz)
++ * SymbolRate: symbol rate (bauds)
++ * return symbol frequency
++ */
++static u32 stb0899_set_srate(struct stb0899_state *state, u32 master_clk, u32 srate)
++{
++ u32 tmp;
++ u8 sfr[3];
++
++ dprintk(state->verbose, FE_DEBUG, 1, "-->");
++ /*
++ * in order to have the maximum precision, the symbol rate entered into
++ * the chip is computed as the closest value of the "true value".
++ * In this purpose, the symbol rate value is rounded (1 is added on the bit
++ * below the LSB )
++ *
++ * srate = (SFR * master_clk) >> 20
++ * <=>
++ * SFR = srate << 20 / master_clk
++ *
++ * rounded:
++ * SFR = (srate << 21 + master_clk) / (2 * master_clk)
++ *
++ * stored as 20 bit number with an offset of 4 bit:
++ * sfr = SFR << 4;
++ */
++
++ tmp = stb0899_do_div((((u64)srate) << 21) + master_clk, 2 * master_clk);
++ tmp <<= 4;
++
++ sfr[0] = tmp >> 16;
++ sfr[1] = tmp >> 8;
++ sfr[2] = tmp;
++
++ stb0899_write_regs(state, STB0899_SFRH, sfr, 3);
++
++ return srate;
++}
++
++/*
++ * stb0899_calc_derot_time
++ * Compute the amount of time needed by the derotator to lock
++ * SymbolRate: Symbol rate
++ * return: derotator time constant (ms)
++ */
++static long stb0899_calc_derot_time(long srate)
++{
++ if (srate > 0)
++ return (100000 / (srate / 1000));
++ else
++ return 0;
++}
++
++/*
++ * stb0899_carr_width
++ * Compute the width of the carrier
++ * return: width of carrier (kHz or Mhz)
++ */
++long stb0899_carr_width(struct stb0899_state *state)
++{
++ struct stb0899_internal *internal = &state->internal;
++
++ return (internal->srate + (internal->srate * internal->rolloff) / 100);
++}
++
++/*
++ * stb0899_first_subrange
++ * Compute the first subrange of the search
++ */
++static void stb0899_first_subrange(struct stb0899_state *state)
++{
++ struct stb0899_internal *internal = &state->internal;
++ struct stb0899_params *params = &state->params;
++ struct stb0899_config *config = state->config;
++
++ int range = 0;
++ u32 bandwidth = 0;
++
++ if (config->tuner_get_bandwidth) {
++ stb0899_i2c_gate_ctrl(&state->frontend, 1);
++ config->tuner_get_bandwidth(&state->frontend, &bandwidth);
++ stb0899_i2c_gate_ctrl(&state->frontend, 0);
++ range = bandwidth - stb0899_carr_width(state) / 2;
++ }
++
++ if (range > 0)
++ internal->sub_range = min(internal->srch_range, range);
++ else
++ internal->sub_range = 0;
++
++ internal->freq = params->freq;
++ internal->tuner_offst = 0L;
++ internal->sub_dir = 1;
++}
++
++/*
++ * stb0899_check_tmg
++ * check for timing lock
++ * internal.Ttiming: time to wait for loop lock
++ */
++static enum stb0899_status stb0899_check_tmg(struct stb0899_state *state)
++{
++ struct stb0899_internal *internal = &state->internal;
++ int lock;
++ u8 reg;
++ s8 timing;
++
++ msleep(internal->t_derot);
++
++ stb0899_write_reg(state, STB0899_RTF, 0xf2);
++ reg = stb0899_read_reg(state, STB0899_TLIR);
++ lock = STB0899_GETFIELD(TLIR_TMG_LOCK_IND, reg);
++ timing = stb0899_read_reg(state, STB0899_RTF);
++
++ if (lock >= 42) {
++ if ((lock > 48) && (abs(timing) >= 110)) {
++ internal->status = ANALOGCARRIER;
++ dprintk(state->verbose, FE_DEBUG, 1, "-->ANALOG Carrier !");
++ } else {
++ internal->status = TIMINGOK;
++ dprintk(state->verbose, FE_DEBUG, 1, "------->TIMING OK !");
++ }
++ } else {
++ internal->status = NOTIMING;
++ dprintk(state->verbose, FE_DEBUG, 1, "-->NO TIMING !");
++ }
++ return internal->status;
++}
++
++/*
++ * stb0899_search_tmg
++ * perform a fs/2 zig-zag to find timing
++ */
++static enum stb0899_status stb0899_search_tmg(struct stb0899_state *state)
++{
++ struct stb0899_internal *internal = &state->internal;
++ struct stb0899_params *params = &state->params;
++
++ short int derot_step, derot_freq = 0, derot_limit, next_loop = 3;
++ int index = 0;
++ u8 cfr[2];
++
++ internal->status = NOTIMING;
++
++ /* timing loop computation & symbol rate optimisation */
++ derot_limit = (internal->sub_range / 2L) / internal->mclk;
++ derot_step = (params->srate / 2L) / internal->mclk;
++
++ while ((stb0899_check_tmg(state) != TIMINGOK) && next_loop) {
++ index++;
++ derot_freq += index * internal->direction * derot_step; /* next derot zig zag position */
++
++ if (abs(derot_freq) > derot_limit)
++ next_loop--;
++
++ if (next_loop) {
++ STB0899_SETFIELD_VAL(CFRM, cfr[0], MSB(internal->inversion * derot_freq));
++ STB0899_SETFIELD_VAL(CFRL, cfr[1], LSB(internal->inversion * derot_freq));
++ stb0899_write_regs(state, STB0899_CFRM, cfr, 2); /* derotator frequency */
++ }
++ internal->direction = -internal->direction; /* Change zigzag direction */
++ }
++
++ if (internal->status == TIMINGOK) {
++ stb0899_read_regs(state, STB0899_CFRM, cfr, 2); /* get derotator frequency */
++ internal->derot_freq = internal->inversion * MAKEWORD16(cfr[0], cfr[1]);
++ dprintk(state->verbose, FE_DEBUG, 1, "------->TIMING OK ! Derot Freq = %d", internal->derot_freq);
++ }
++
++ return internal->status;
++}
++
++/*
++ * stb0899_check_carrier
++ * Check for carrier found
++ */
++static enum stb0899_status stb0899_check_carrier(struct stb0899_state *state)
++{
++ struct stb0899_internal *internal = &state->internal;
++ u8 reg;
++
++ msleep(internal->t_derot); /* wait for derotator ok */
++
++ reg = stb0899_read_reg(state, STB0899_CFD);
++ STB0899_SETFIELD_VAL(CFD_ON, reg, 1);
++ stb0899_write_reg(state, STB0899_CFD, reg);
++
++ reg = stb0899_read_reg(state, STB0899_DSTATUS);
++ dprintk(state->verbose, FE_DEBUG, 1, "--------------------> STB0899_DSTATUS=[0x%02x]", reg);
++ if (STB0899_GETFIELD(CARRIER_FOUND, reg)) {
++ internal->status = CARRIEROK;
++ dprintk(state->verbose, FE_DEBUG, 1, "-------------> CARRIEROK !");
++ } else {
++ internal->status = NOCARRIER;
++ dprintk(state->verbose, FE_DEBUG, 1, "-------------> NOCARRIER !");
++ }
++
++ return internal->status;
++}
++
++/*
++ * stb0899_search_carrier
++ * Search for a QPSK carrier with the derotator
++ */
++static enum stb0899_status stb0899_search_carrier(struct stb0899_state *state)
++{
++ struct stb0899_internal *internal = &state->internal;
++
++ short int derot_freq = 0, last_derot_freq = 0, derot_limit, next_loop = 3;
++ int index = 0;
++ u8 cfr[2];
++ u8 reg;
++
++ internal->status = NOCARRIER;
++ derot_limit = (internal->sub_range / 2L) / internal->mclk;
++ derot_freq = internal->derot_freq;
++
++ reg = stb0899_read_reg(state, STB0899_CFD);
++ STB0899_SETFIELD_VAL(CFD_ON, reg, 1);
++ stb0899_write_reg(state, STB0899_CFD, reg);
++
++ do {
++ dprintk(state->verbose, FE_DEBUG, 1, "Derot Freq=%d, mclk=%d", derot_freq, internal->mclk);
++ if (stb0899_check_carrier(state) == NOCARRIER) {
++ index++;
++ last_derot_freq = derot_freq;
++ derot_freq += index * internal->direction * internal->derot_step; /* next zig zag derotator position */
++
++ if(abs(derot_freq) > derot_limit)
++ next_loop--;
++
++ if (next_loop) {
++ reg = stb0899_read_reg(state, STB0899_CFD);
++ STB0899_SETFIELD_VAL(CFD_ON, reg, 1);
++ stb0899_write_reg(state, STB0899_CFD, reg);
++
++ STB0899_SETFIELD_VAL(CFRM, cfr[0], MSB(internal->inversion * derot_freq));
++ STB0899_SETFIELD_VAL(CFRL, cfr[1], LSB(internal->inversion * derot_freq));
++ stb0899_write_regs(state, STB0899_CFRM, cfr, 2); /* derotator frequency */
++ }
++ }
++
++ internal->direction = -internal->direction; /* Change zigzag direction */
++ } while ((internal->status != CARRIEROK) && next_loop);
++
++ if (internal->status == CARRIEROK) {
++ stb0899_read_regs(state, STB0899_CFRM, cfr, 2); /* get derotator frequency */
++ internal->derot_freq = internal->inversion * MAKEWORD16(cfr[0], cfr[1]);
++ dprintk(state->verbose, FE_DEBUG, 1, "----> CARRIER OK !, Derot Freq=%d", internal->derot_freq);
++ } else {
++ internal->derot_freq = last_derot_freq;
++ }
++
++ return internal->status;
++}
++
++/*
++ * stb0899_check_data
++ * Check for data found
++ */
++static enum stb0899_status stb0899_check_data(struct stb0899_state *state)
++{
++ struct stb0899_internal *internal = &state->internal;
++ struct stb0899_params *params = &state->params;
++
++ int lock = 0, index = 0, dataTime = 500, loop;
++ u8 reg;
++
++ internal->status = NODATA;
++
++ /* RESET FEC */
++ reg = stb0899_read_reg(state, STB0899_TSTRES);
++ STB0899_SETFIELD_VAL(FRESACS, reg, 1);
++ stb0899_write_reg(state, STB0899_TSTRES, reg);
++ msleep(1);
++ reg = stb0899_read_reg(state, STB0899_TSTRES);
++ STB0899_SETFIELD_VAL(FRESACS, reg, 0);
++ stb0899_write_reg(state, STB0899_TSTRES, reg);
++
++ if (params->srate <= 2000000)
++ dataTime = 2000;
++ else if (params->srate <= 5000000)
++ dataTime = 1500;
++ else if (params->srate <= 15000000)
++ dataTime = 1000;
++ else
++ dataTime = 500;
++
++ /* clear previous failed END_LOOPVIT */
++ stb0899_read_reg(state, STB0899_VSTATUS);
++
++ stb0899_write_reg(state, STB0899_DSTATUS2, 0x00); /* force search loop */
++ while (1) {
++ /* WARNING! VIT LOCKED has to be tested before VIT_END_LOOOP */
++ reg = stb0899_read_reg(state, STB0899_VSTATUS);
++ lock = STB0899_GETFIELD(VSTATUS_LOCKEDVIT, reg);
++ loop = STB0899_GETFIELD(VSTATUS_END_LOOPVIT, reg);
++
++ if (lock || loop || (index > dataTime))
++ break;
++ index++;
++ }
++
++ if (lock) { /* DATA LOCK indicator */
++ internal->status = DATAOK;
++ dprintk(state->verbose, FE_DEBUG, 1, "-----------------> DATA OK !");
++ }
++
++ return internal->status;
++}
++
++/*
++ * stb0899_search_data
++ * Search for a QPSK carrier with the derotator
++ */
++static enum stb0899_status stb0899_search_data(struct stb0899_state *state)
++{
++ short int derot_freq, derot_step, derot_limit, next_loop = 3;
++ u8 cfr[2];
++ u8 reg;
++ int index = 1;
++
++ struct stb0899_internal *internal = &state->internal;
++ struct stb0899_params *params = &state->params;
++
++ derot_step = (params->srate / 4L) / internal->mclk;
++ derot_limit = (internal->sub_range / 2L) / internal->mclk;
++ derot_freq = internal->derot_freq;
++
++ do {
++ if ((internal->status != CARRIEROK) || (stb0899_check_data(state) != DATAOK)) {
++
++ derot_freq += index * internal->direction * derot_step; /* next zig zag derotator position */
++ if (abs(derot_freq) > derot_limit)
++ next_loop--;
++
++ if (next_loop) {
++ dprintk(state->verbose, FE_DEBUG, 1, "Derot freq=%d, mclk=%d", derot_freq, internal->mclk);
++ reg = stb0899_read_reg(state, STB0899_CFD);
++ STB0899_SETFIELD_VAL(CFD_ON, reg, 1);
++ stb0899_write_reg(state, STB0899_CFD, reg);
++
++ STB0899_SETFIELD_VAL(CFRM, cfr[0], MSB(internal->inversion * derot_freq));
++ STB0899_SETFIELD_VAL(CFRL, cfr[1], LSB(internal->inversion * derot_freq));
++ stb0899_write_regs(state, STB0899_CFRM, cfr, 2); /* derotator frequency */
++
++ stb0899_check_carrier(state);
++ index++;
++ }
++ }
++ internal->direction = -internal->direction; /* change zig zag direction */
++ } while ((internal->status != DATAOK) && next_loop);
++
++ if (internal->status == DATAOK) {
++ stb0899_read_regs(state, STB0899_CFRM, cfr, 2); /* get derotator frequency */
++
++ /* store autodetected IQ swapping as default for DVB-S2 tuning */
++ reg = stb0899_read_reg(state, STB0899_IQSWAP);
++ if (STB0899_GETFIELD(SYM, reg))
++ internal->inversion = IQ_SWAP_ON;
++ else
++ internal->inversion = IQ_SWAP_OFF;
++
++ internal->derot_freq = internal->inversion * MAKEWORD16(cfr[0], cfr[1]);
++ dprintk(state->verbose, FE_DEBUG, 1, "------> DATAOK ! Derot Freq=%d", internal->derot_freq);
++ }
++
++ return internal->status;
++}
++
++/*
++ * stb0899_check_range
++ * check if the found frequency is in the correct range
++ */
++static enum stb0899_status stb0899_check_range(struct stb0899_state *state)
++{
++ struct stb0899_internal *internal = &state->internal;
++ struct stb0899_params *params = &state->params;
++
++ int range_offst, tp_freq;
++
++ range_offst = internal->srch_range / 2000;
++ tp_freq = internal->freq - (internal->derot_freq * internal->mclk) / 1000;
++
++ if ((tp_freq >= params->freq - range_offst) && (tp_freq <= params->freq + range_offst)) {
++ internal->status = RANGEOK;
++ dprintk(state->verbose, FE_DEBUG, 1, "----> RANGEOK !");
++ } else {
++ internal->status = OUTOFRANGE;
++ dprintk(state->verbose, FE_DEBUG, 1, "----> OUT OF RANGE !");
++ }
++
++ return internal->status;
++}
++
++/*
++ * NextSubRange
++ * Compute the next subrange of the search
++ */
++static void next_sub_range(struct stb0899_state *state)
++{
++ struct stb0899_internal *internal = &state->internal;
++ struct stb0899_params *params = &state->params;
++
++ long old_sub_range;
++
++ if (internal->sub_dir > 0) {
++ old_sub_range = internal->sub_range;
++ internal->sub_range = min((internal->srch_range / 2) -
++ (internal->tuner_offst + internal->sub_range / 2),
++ internal->sub_range);
++
++ if (internal->sub_range < 0)
++ internal->sub_range = 0;
++
++ internal->tuner_offst += (old_sub_range + internal->sub_range) / 2;
++ }
++
++ internal->freq = params->freq + (internal->sub_dir * internal->tuner_offst) / 1000;
++ internal->sub_dir = -internal->sub_dir;
++}
++
++/*
++ * stb0899_dvbs_algo
++ * Search for a signal, timing, carrier and data for a
++ * given frequency in a given range
++ */
++enum stb0899_status stb0899_dvbs_algo(struct stb0899_state *state)
++{
++ struct stb0899_params *params = &state->params;
++ struct stb0899_internal *internal = &state->internal;
++ struct stb0899_config *config = state->config;
++
++ u8 bclc, reg;
++ u8 cfr[2];
++ u8 eq_const[10];
++ s32 clnI = 3;
++ u32 bandwidth = 0;
++
++ /* BETA values rated @ 99MHz */
++ s32 betaTab[5][4] = {
++ /* 5 10 20 30MBps */
++ { 37, 34, 32, 31 }, /* QPSK 1/2 */
++ { 37, 35, 33, 31 }, /* QPSK 2/3 */
++ { 37, 35, 33, 31 }, /* QPSK 3/4 */
++ { 37, 36, 33, 32 }, /* QPSK 5/6 */
++ { 37, 36, 33, 32 } /* QPSK 7/8 */
++ };
++
++ internal->direction = 1;
++
++ stb0899_set_srate(state, internal->master_clk, params->srate);
++ /* Carrier loop optimization versus symbol rate for acquisition*/
++ if (params->srate <= 5000000) {
++ stb0899_write_reg(state, STB0899_ACLC, 0x89);
++ bclc = stb0899_read_reg(state, STB0899_BCLC);
++ STB0899_SETFIELD_VAL(BETA, bclc, 0x1c);
++ stb0899_write_reg(state, STB0899_BCLC, bclc);
++ clnI = 0;
++ } else if (params->srate <= 15000000) {
++ stb0899_write_reg(state, STB0899_ACLC, 0xc9);
++ bclc = stb0899_read_reg(state, STB0899_BCLC);
++ STB0899_SETFIELD_VAL(BETA, bclc, 0x22);
++ stb0899_write_reg(state, STB0899_BCLC, bclc);
++ clnI = 1;
++ } else if(params->srate <= 25000000) {
++ stb0899_write_reg(state, STB0899_ACLC, 0x89);
++ bclc = stb0899_read_reg(state, STB0899_BCLC);
++ STB0899_SETFIELD_VAL(BETA, bclc, 0x27);
++ stb0899_write_reg(state, STB0899_BCLC, bclc);
++ clnI = 2;
++ } else {
++ stb0899_write_reg(state, STB0899_ACLC, 0xc8);
++ bclc = stb0899_read_reg(state, STB0899_BCLC);
++ STB0899_SETFIELD_VAL(BETA, bclc, 0x29);
++ stb0899_write_reg(state, STB0899_BCLC, bclc);
++ clnI = 3;
++ }
++
++ dprintk(state->verbose, FE_DEBUG, 1, "Set the timing loop to acquisition");
++ /* Set the timing loop to acquisition */
++ stb0899_write_reg(state, STB0899_RTC, 0x46);
++ stb0899_write_reg(state, STB0899_CFD, 0xee);
++
++ /* !! WARNING !!
++ * Do not read any status variables while acquisition,
++ * If any needed, read before the acquisition starts
++ * querying status while acquiring causes the
++ * acquisition to go bad and hence no locks.
++ */
++ dprintk(state->verbose, FE_DEBUG, 1, "Derot Percent=%d Srate=%d mclk=%d",
++ internal->derot_percent, params->srate, internal->mclk);
++
++ /* Initial calculations */
++ internal->derot_step = internal->derot_percent * (params->srate / 1000L) / internal->mclk; /* DerotStep/1000 * Fsymbol */
++ internal->t_derot = stb0899_calc_derot_time(params->srate);
++ internal->t_data = 500;
++
++ dprintk(state->verbose, FE_DEBUG, 1, "RESET stream merger");
++ /* RESET Stream merger */
++ reg = stb0899_read_reg(state, STB0899_TSTRES);
++ STB0899_SETFIELD_VAL(FRESRS, reg, 1);
++ stb0899_write_reg(state, STB0899_TSTRES, reg);
++
++ /*
++ * Set KDIVIDER to an intermediate value between
++ * 1/2 and 7/8 for acquisition
++ */
++ reg = stb0899_read_reg(state, STB0899_DEMAPVIT);
++ STB0899_SETFIELD_VAL(DEMAPVIT_KDIVIDER, reg, 60);
++ stb0899_write_reg(state, STB0899_DEMAPVIT, reg);
++
++ stb0899_write_reg(state, STB0899_EQON, 0x01); /* Equalizer OFF while acquiring */
++ stb0899_write_reg(state, STB0899_VITSYNC, 0x19);
++
++ stb0899_first_subrange(state);
++ do {
++ /* Initialisations */
++ cfr[0] = cfr[1] = 0;
++ stb0899_write_regs(state, STB0899_CFRM, cfr, 2); /* RESET derotator frequency */
++
++ stb0899_write_reg(state, STB0899_RTF, 0);
++ reg = stb0899_read_reg(state, STB0899_CFD);
++ STB0899_SETFIELD_VAL(CFD_ON, reg, 1);
++ stb0899_write_reg(state, STB0899_CFD, reg);
++
++ internal->derot_freq = 0;
++ internal->status = NOAGC1;
++
++ /* enable tuner I/O */
++ stb0899_i2c_gate_ctrl(&state->frontend, 1);
++
++ /* Move tuner to frequency */
++ dprintk(state->verbose, FE_DEBUG, 1, "Tuner set frequency");
++ if (state->config->tuner_set_frequency)
++ state->config->tuner_set_frequency(&state->frontend, internal->freq);
++
++ if (state->config->tuner_get_frequency)
++ state->config->tuner_get_frequency(&state->frontend, &internal->freq);
++
++ msleep(internal->t_agc1 + internal->t_agc2 + internal->t_derot); /* AGC1, AGC2 and timing loop */
++ dprintk(state->verbose, FE_DEBUG, 1, "current derot freq=%d", internal->derot_freq);
++ internal->status = AGC1OK;
++
++ /* There is signal in the band */
++ if (config->tuner_get_bandwidth)
++ config->tuner_get_bandwidth(&state->frontend, &bandwidth);
++
++ /* disable tuner I/O */
++ stb0899_i2c_gate_ctrl(&state->frontend, 0);
++
++ if (params->srate <= bandwidth / 2)
++ stb0899_search_tmg(state); /* For low rates (SCPC) */
++ else
++ stb0899_check_tmg(state); /* For high rates (MCPC) */
++
++ if (internal->status == TIMINGOK) {
++ dprintk(state->verbose, FE_DEBUG, 1,
++ "TIMING OK ! Derot freq=%d, mclk=%d",
++ internal->derot_freq, internal->mclk);
++
++ if (stb0899_search_carrier(state) == CARRIEROK) { /* Search for carrier */
++ dprintk(state->verbose, FE_DEBUG, 1,
++ "CARRIER OK ! Derot freq=%d, mclk=%d",
++ internal->derot_freq, internal->mclk);
++
++ if (stb0899_search_data(state) == DATAOK) { /* Check for data */
++ dprintk(state->verbose, FE_DEBUG, 1,
++ "DATA OK ! Derot freq=%d, mclk=%d",
++ internal->derot_freq, internal->mclk);
++
++ if (stb0899_check_range(state) == RANGEOK) {
++ dprintk(state->verbose, FE_DEBUG, 1,
++ "RANGE OK ! derot freq=%d, mclk=%d",
++ internal->derot_freq, internal->mclk);
++
++ internal->freq = params->freq - ((internal->derot_freq * internal->mclk) / 1000);
++ reg = stb0899_read_reg(state, STB0899_PLPARM);
++ internal->fecrate = STB0899_GETFIELD(VITCURPUN, reg);
++ dprintk(state->verbose, FE_DEBUG, 1,
++ "freq=%d, internal resultant freq=%d",
++ params->freq, internal->freq);
++
++ dprintk(state->verbose, FE_DEBUG, 1,
++ "internal puncture rate=%d",
++ internal->fecrate);
++ }
++ }
++ }
++ }
++ if (internal->status != RANGEOK)
++ next_sub_range(state);
++
++ } while (internal->sub_range && internal->status != RANGEOK);
++
++ /* Set the timing loop to tracking */
++ stb0899_write_reg(state, STB0899_RTC, 0x33);
++ stb0899_write_reg(state, STB0899_CFD, 0xf7);
++ /* if locked and range ok, set Kdiv */
++ if (internal->status == RANGEOK) {
++ dprintk(state->verbose, FE_DEBUG, 1, "Locked & Range OK !");
++ stb0899_write_reg(state, STB0899_EQON, 0x41); /* Equalizer OFF while acquiring */
++ stb0899_write_reg(state, STB0899_VITSYNC, 0x39); /* SN to b'11 for acquisition */
++
++ /*
++ * Carrier loop optimization versus
++ * symbol Rate/Puncture Rate for Tracking
++ */
++ reg = stb0899_read_reg(state, STB0899_BCLC);
++ switch (internal->fecrate) {
++ case STB0899_FEC_1_2: /* 13 */
++ stb0899_write_reg(state, STB0899_DEMAPVIT, 0x1a);
++ STB0899_SETFIELD_VAL(BETA, reg, betaTab[0][clnI]);
++ stb0899_write_reg(state, STB0899_BCLC, reg);
++ break;
++ case STB0899_FEC_2_3: /* 18 */
++ stb0899_write_reg(state, STB0899_DEMAPVIT, 44);
++ STB0899_SETFIELD_VAL(BETA, reg, betaTab[1][clnI]);
++ stb0899_write_reg(state, STB0899_BCLC, reg);
++ break;
++ case STB0899_FEC_3_4: /* 21 */
++ stb0899_write_reg(state, STB0899_DEMAPVIT, 60);
++ STB0899_SETFIELD_VAL(BETA, reg, betaTab[2][clnI]);
++ stb0899_write_reg(state, STB0899_BCLC, reg);
++ break;
++ case STB0899_FEC_5_6: /* 24 */
++ stb0899_write_reg(state, STB0899_DEMAPVIT, 75);
++ STB0899_SETFIELD_VAL(BETA, reg, betaTab[3][clnI]);
++ stb0899_write_reg(state, STB0899_BCLC, reg);
++ break;
++ case STB0899_FEC_6_7: /* 25 */
++ stb0899_write_reg(state, STB0899_DEMAPVIT, 88);
++ stb0899_write_reg(state, STB0899_ACLC, 0x88);
++ stb0899_write_reg(state, STB0899_BCLC, 0x9a);
++ break;
++ case STB0899_FEC_7_8: /* 26 */
++ stb0899_write_reg(state, STB0899_DEMAPVIT, 94);
++ STB0899_SETFIELD_VAL(BETA, reg, betaTab[4][clnI]);
++ stb0899_write_reg(state, STB0899_BCLC, reg);
++ break;
++ default:
++ dprintk(state->verbose, FE_DEBUG, 1, "Unsupported Puncture Rate");
++ break;
++ }
++ /* release stream merger RESET */
++ reg = stb0899_read_reg(state, STB0899_TSTRES);
++ STB0899_SETFIELD_VAL(FRESRS, reg, 0);
++ stb0899_write_reg(state, STB0899_TSTRES, reg);
++
++ /* disable carrier detector */
++ reg = stb0899_read_reg(state, STB0899_CFD);
++ STB0899_SETFIELD_VAL(CFD_ON, reg, 0);
++ stb0899_write_reg(state, STB0899_CFD, reg);
++
++ stb0899_read_regs(state, STB0899_EQUAI1, eq_const, 10);
++ }
++
++ return internal->status;
++}
++
++/*
++ * stb0899_dvbs2_config_uwp
++ * Configure UWP state machine
++ */
++static void stb0899_dvbs2_config_uwp(struct stb0899_state *state)
++{
++ struct stb0899_internal *internal = &state->internal;
++ struct stb0899_config *config = state->config;
++ u32 uwp1, uwp2, uwp3, reg;
++
++ uwp1 = STB0899_READ_S2REG(STB0899_S2DEMOD, UWP_CNTRL1);
++ uwp2 = STB0899_READ_S2REG(STB0899_S2DEMOD, UWP_CNTRL2);
++ uwp3 = STB0899_READ_S2REG(STB0899_S2DEMOD, UWP_CNTRL3);
++
++ STB0899_SETFIELD_VAL(UWP_ESN0_AVE, uwp1, config->esno_ave);
++ STB0899_SETFIELD_VAL(UWP_ESN0_QUANT, uwp1, config->esno_quant);
++ STB0899_SETFIELD_VAL(UWP_TH_SOF, uwp1, config->uwp_threshold_sof);
++
++ STB0899_SETFIELD_VAL(FE_COARSE_TRK, uwp2, internal->av_frame_coarse);
++ STB0899_SETFIELD_VAL(FE_FINE_TRK, uwp2, internal->av_frame_fine);
++ STB0899_SETFIELD_VAL(UWP_MISS_TH, uwp2, config->miss_threshold);
++
++ STB0899_SETFIELD_VAL(UWP_TH_ACQ, uwp3, config->uwp_threshold_acq);
++ STB0899_SETFIELD_VAL(UWP_TH_TRACK, uwp3, config->uwp_threshold_track);
++
++ stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_UWP_CNTRL1, STB0899_OFF0_UWP_CNTRL1, uwp1);
++ stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_UWP_CNTRL2, STB0899_OFF0_UWP_CNTRL2, uwp2);
++ stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_UWP_CNTRL3, STB0899_OFF0_UWP_CNTRL3, uwp3);
++
++ reg = STB0899_READ_S2REG(STB0899_S2DEMOD, SOF_SRCH_TO);
++ STB0899_SETFIELD_VAL(SOF_SEARCH_TIMEOUT, reg, config->sof_search_timeout);
++ stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_SOF_SRCH_TO, STB0899_OFF0_SOF_SRCH_TO, reg);
++}
++
++/*
++ * stb0899_dvbs2_config_csm_auto
++ * Set CSM to AUTO mode
++ */
++static void stb0899_dvbs2_config_csm_auto(struct stb0899_state *state)
++{
++ u32 reg;
++
++ reg = STB0899_READ_S2REG(STB0899_S2DEMOD, CSM_CNTRL1);
++ STB0899_SETFIELD_VAL(CSM_AUTO_PARAM, reg, 1);
++ stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_CSM_CNTRL1, STB0899_OFF0_CSM_CNTRL1, reg);
++}
++
++static long Log2Int(int number)
++{
++ int i;
++
++ i = 0;
++ while ((1 << i) <= abs(number))
++ i++;
++
++ if (number == 0)
++ i = 1;
++
++ return i - 1;
++}
++
++/*
++ * stb0899_dvbs2_calc_srate
++ * compute BTR_NOM_FREQ for the symbol rate
++ */
++static u32 stb0899_dvbs2_calc_srate(struct stb0899_state *state)
++{
++ struct stb0899_internal *internal = &state->internal;
++ struct stb0899_config *config = state->config;
++
++ u32 dec_ratio, dec_rate, decim, remain, intval, btr_nom_freq;
++ u32 master_clk, srate;
++
++ dec_ratio = (internal->master_clk * 2) / (5 * internal->srate);
++ dec_ratio = (dec_ratio == 0) ? 1 : dec_ratio;
++ dec_rate = Log2Int(dec_ratio);
++ decim = 1 << dec_rate;
++ master_clk = internal->master_clk / 1000;
++ srate = internal->srate / 1000;
++
++ if (decim <= 4) {
++ intval = (decim * (1 << (config->btr_nco_bits - 1))) / master_clk;
++ remain = (decim * (1 << (config->btr_nco_bits - 1))) % master_clk;
++ } else {
++ intval = (1 << (config->btr_nco_bits - 1)) / (master_clk / 100) * decim / 100;
++ remain = (decim * (1 << (config->btr_nco_bits - 1))) % master_clk;
++ }
++ btr_nom_freq = (intval * srate) + ((remain * srate) / master_clk);
++
++ return btr_nom_freq;
++}
++
++/*
++ * stb0899_dvbs2_calc_dev
++ * compute the correction to be applied to symbol rate
++ */
++static u32 stb0899_dvbs2_calc_dev(struct stb0899_state *state)
++{
++ struct stb0899_internal *internal = &state->internal;
++ u32 dec_ratio, correction, master_clk, srate;
++
++ dec_ratio = (internal->master_clk * 2) / (5 * internal->srate);
++ dec_ratio = (dec_ratio == 0) ? 1 : dec_ratio;
++
++ master_clk = internal->master_clk / 1000; /* for integer Caculation*/
++ srate = internal->srate / 1000; /* for integer Caculation*/
++ correction = (512 * master_clk) / (2 * dec_ratio * srate);
++
++ return correction;
++}
++
++/*
++ * stb0899_dvbs2_set_srate
++ * Set DVBS2 symbol rate
++ */
++static void stb0899_dvbs2_set_srate(struct stb0899_state *state)
++{
++ struct stb0899_internal *internal = &state->internal;
++
++ u32 dec_ratio, dec_rate, win_sel, decim, f_sym, btr_nom_freq;
++ u32 correction, freq_adj, band_lim, decim_cntrl, reg;
++ u8 anti_alias;
++
++ /*set decimation to 1*/
++ dec_ratio = (internal->master_clk * 2) / (5 * internal->srate);
++ dec_ratio = (dec_ratio == 0) ? 1 : dec_ratio;
++ dec_rate = Log2Int(dec_ratio);
++
++ win_sel = 0;
++ if (dec_rate >= 5)
++ win_sel = dec_rate - 4;
++
++ decim = (1 << dec_rate);
++ /* (FSamp/Fsymbol *100) for integer Caculation */
++ f_sym = internal->master_clk / ((decim * internal->srate) / 1000);
++
++ if (f_sym <= 2250) /* don't band limit signal going into btr block*/
++ band_lim = 1;
++ else
++ band_lim = 0; /* band limit signal going into btr block*/
++
++ decim_cntrl = ((win_sel << 3) & 0x18) + ((band_lim << 5) & 0x20) + (dec_rate & 0x7);
++ stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_DECIM_CNTRL, STB0899_OFF0_DECIM_CNTRL, decim_cntrl);
++
++ if (f_sym <= 3450)
++ anti_alias = 0;
++ else if (f_sym <= 4250)
++ anti_alias = 1;
++ else
++ anti_alias = 2;
++
++ stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_ANTI_ALIAS_SEL, STB0899_OFF0_ANTI_ALIAS_SEL, anti_alias);
++ btr_nom_freq = stb0899_dvbs2_calc_srate(state);
++ stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_BTR_NOM_FREQ, STB0899_OFF0_BTR_NOM_FREQ, btr_nom_freq);
++
++ correction = stb0899_dvbs2_calc_dev(state);
++ reg = STB0899_READ_S2REG(STB0899_S2DEMOD, BTR_CNTRL);
++ STB0899_SETFIELD_VAL(BTR_FREQ_CORR, reg, correction);
++ stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_BTR_CNTRL, STB0899_OFF0_BTR_CNTRL, reg);
++
++ /* scale UWP+CSM frequency to sample rate*/
++ freq_adj = internal->srate / (internal->master_clk / 4096);
++ stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_FREQ_ADJ_SCALE, STB0899_OFF0_FREQ_ADJ_SCALE, freq_adj);
++}
++
++/*
++ * stb0899_dvbs2_set_btr_loopbw
++ * set bit timing loop bandwidth as a percentage of the symbol rate
++ */
++static void stb0899_dvbs2_set_btr_loopbw(struct stb0899_state *state)
++{
++ struct stb0899_internal *internal = &state->internal;
++ struct stb0899_config *config = state->config;
++
++ u32 sym_peak = 23, zeta = 707, loopbw_percent = 60;
++ s32 dec_ratio, dec_rate, k_btr1_rshft, k_btr1, k_btr0_rshft;
++ s32 k_btr0, k_btr2_rshft, k_direct_shift, k_indirect_shift;
++ u32 decim, K, wn, k_direct, k_indirect;
++ u32 reg;
++
++ dec_ratio = (internal->master_clk * 2) / (5 * internal->srate);
++ dec_ratio = (dec_ratio == 0) ? 1 : dec_ratio;
++ dec_rate = Log2Int(dec_ratio);
++ decim = (1 << dec_rate);
++
++ sym_peak *= 576000;
++ K = (1 << config->btr_nco_bits) / (internal->master_clk / 1000);
++ K *= (internal->srate / 1000000) * decim; /*k=k 10^-8*/
++
++ if (K != 0) {
++ K = sym_peak / K;
++ wn = (4 * zeta * zeta) + 1000000;
++ wn = (2 * (loopbw_percent * 1000) * 40 * zeta) /wn; /*wn =wn 10^-8*/
++
++ k_indirect = (wn * wn) / K;
++ k_indirect = k_indirect; /*kindirect = kindirect 10^-6*/
++ k_direct = (2 * wn * zeta) / K; /*kDirect = kDirect 10^-2*/
++ k_direct *= 100;
++
++ k_direct_shift = Log2Int(k_direct) - Log2Int(10000) - 2;
++ k_btr1_rshft = (-1 * k_direct_shift) + config->btr_gain_shift_offset;
++ k_btr1 = k_direct / (1 << k_direct_shift);
++ k_btr1 /= 10000;
++
++ k_indirect_shift = Log2Int(k_indirect + 15) - 20 /*- 2*/;
++ k_btr0_rshft = (-1 * k_indirect_shift) + config->btr_gain_shift_offset;
++ k_btr0 = k_indirect * (1 << (-k_indirect_shift));
++ k_btr0 /= 1000000;
++
++ k_btr2_rshft = 0;
++ if (k_btr0_rshft > 15) {
++ k_btr2_rshft = k_btr0_rshft - 15;
++ k_btr0_rshft = 15;
++ }
++ reg = STB0899_READ_S2REG(STB0899_S2DEMOD, BTR_LOOP_GAIN);
++ STB0899_SETFIELD_VAL(KBTR0_RSHFT, reg, k_btr0_rshft);
++ STB0899_SETFIELD_VAL(KBTR0, reg, k_btr0);
++ STB0899_SETFIELD_VAL(KBTR1_RSHFT, reg, k_btr1_rshft);
++ STB0899_SETFIELD_VAL(KBTR1, reg, k_btr1);
++ STB0899_SETFIELD_VAL(KBTR2_RSHFT, reg, k_btr2_rshft);
++ stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_BTR_LOOP_GAIN, STB0899_OFF0_BTR_LOOP_GAIN, reg);
++ } else
++ stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_BTR_LOOP_GAIN, STB0899_OFF0_BTR_LOOP_GAIN, 0xc4c4f);
++}
++
++/*
++ * stb0899_dvbs2_set_carr_freq
++ * set nominal frequency for carrier search
++ */
++static void stb0899_dvbs2_set_carr_freq(struct stb0899_state *state, s32 carr_freq, u32 master_clk)
++{
++ struct stb0899_config *config = state->config;
++ s32 crl_nom_freq;
++ u32 reg;
++
++ crl_nom_freq = (1 << config->crl_nco_bits) / master_clk;
++ crl_nom_freq *= carr_freq;
++ reg = STB0899_READ_S2REG(STB0899_S2DEMOD, CRL_NOM_FREQ);
++ STB0899_SETFIELD_VAL(CRL_NOM_FREQ, reg, crl_nom_freq);
++ stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_CRL_NOM_FREQ, STB0899_OFF0_CRL_NOM_FREQ, reg);
++}
++
++/*
++ * stb0899_dvbs2_init_calc
++ * Initialize DVBS2 UWP, CSM, carrier and timing loops
++ */
++static void stb0899_dvbs2_init_calc(struct stb0899_state *state)
++{
++ struct stb0899_internal *internal = &state->internal;
++ s32 steps, step_size;
++ u32 range, reg;
++
++ /* config uwp and csm */
++ stb0899_dvbs2_config_uwp(state);
++ stb0899_dvbs2_config_csm_auto(state);
++
++ /* initialize BTR */
++ stb0899_dvbs2_set_srate(state);
++ stb0899_dvbs2_set_btr_loopbw(state);
++
++ if (internal->srate / 1000000 >= 15)
++ step_size = (1 << 17) / 5;
++ else if (internal->srate / 1000000 >= 10)
++ step_size = (1 << 17) / 7;
++ else if (internal->srate / 1000000 >= 5)
++ step_size = (1 << 17) / 10;
++ else
++ step_size = (1 << 17) / 4;
++
++ range = internal->srch_range / 1000000;
++ steps = (10 * range * (1 << 17)) / (step_size * (internal->srate / 1000000));
++ steps = (steps + 6) / 10;
++ steps = (steps == 0) ? 1 : steps;
++ if (steps % 2 == 0)
++ stb0899_dvbs2_set_carr_freq(state, internal->center_freq -
++ (internal->step_size * (internal->srate / 20000000)),
++ (internal->master_clk) / 1000000);
++ else
++ stb0899_dvbs2_set_carr_freq(state, internal->center_freq, (internal->master_clk) / 1000000);
++
++ /*Set Carrier Search params (zigzag, num steps and freq step size*/
++ reg = STB0899_READ_S2REG(STB0899_S2DEMOD, ACQ_CNTRL2);
++ STB0899_SETFIELD_VAL(ZIGZAG, reg, 1);
++ STB0899_SETFIELD_VAL(NUM_STEPS, reg, steps);
++ STB0899_SETFIELD_VAL(FREQ_STEPSIZE, reg, step_size);
++ stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_ACQ_CNTRL2, STB0899_OFF0_ACQ_CNTRL2, reg);
++}
++
++/*
++ * stb0899_dvbs2_btr_init
++ * initialize the timing loop
++ */
++static void stb0899_dvbs2_btr_init(struct stb0899_state *state)
++{
++ u32 reg;
++
++ /* set enable BTR loopback */
++ reg = STB0899_READ_S2REG(STB0899_S2DEMOD, BTR_CNTRL);
++ STB0899_SETFIELD_VAL(INTRP_PHS_SENSE, reg, 1);
++ STB0899_SETFIELD_VAL(BTR_ERR_ENA, reg, 1);
++ stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_BTR_CNTRL, STB0899_OFF0_BTR_CNTRL, reg);
++
++ /* fix btr freq accum at 0 */
++ stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_BTR_FREQ_INIT, STB0899_OFF0_BTR_FREQ_INIT, 0x10000000);
++ stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_BTR_FREQ_INIT, STB0899_OFF0_BTR_FREQ_INIT, 0x00000000);
++
++ /* fix btr freq accum at 0 */
++ stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_BTR_PHS_INIT, STB0899_OFF0_BTR_PHS_INIT, 0x10000000);
++ stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_BTR_PHS_INIT, STB0899_OFF0_BTR_PHS_INIT, 0x00000000);
++}
++
++/*
++ * stb0899_dvbs2_reacquire
++ * trigger a DVB-S2 acquisition
++ */
++static void stb0899_dvbs2_reacquire(struct stb0899_state *state)
++{
++ u32 reg = 0;
++
++ /* demod soft reset */
++ STB0899_SETFIELD_VAL(DVBS2_RESET, reg, 1);
++ stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_RESET_CNTRL, STB0899_OFF0_RESET_CNTRL, reg);
++
++ /*Reset Timing Loop */
++ stb0899_dvbs2_btr_init(state);
++
++ /* reset Carrier loop */
++ stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_CRL_FREQ_INIT, STB0899_OFF0_CRL_FREQ_INIT, (1 << 30));
++ stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_CRL_FREQ_INIT, STB0899_OFF0_CRL_FREQ_INIT, 0);
++ stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_CRL_LOOP_GAIN, STB0899_OFF0_CRL_LOOP_GAIN, 0);
++ stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_CRL_PHS_INIT, STB0899_OFF0_CRL_PHS_INIT, (1 << 30));
++ stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_CRL_PHS_INIT, STB0899_OFF0_CRL_PHS_INIT, 0);
++
++ /*release demod soft reset */
++ reg = 0;
++ STB0899_SETFIELD_VAL(DVBS2_RESET, reg, 0);
++ stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_RESET_CNTRL, STB0899_OFF0_RESET_CNTRL, reg);
++
++ /* start acquisition process */
++ stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_ACQUIRE_TRIG, STB0899_OFF0_ACQUIRE_TRIG, 1);
++ stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_LOCK_LOST, STB0899_OFF0_LOCK_LOST, 0);
++
++ /* equalizer Init */
++ stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_EQUALIZER_INIT, STB0899_OFF0_EQUALIZER_INIT, 1);
++
++ /*Start equilizer */
++ stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_EQUALIZER_INIT, STB0899_OFF0_EQUALIZER_INIT, 0);
++
++ reg = STB0899_READ_S2REG(STB0899_S2DEMOD, EQ_CNTRL);
++ STB0899_SETFIELD_VAL(EQ_SHIFT, reg, 0);
++ STB0899_SETFIELD_VAL(EQ_DISABLE_UPDATE, reg, 0);
++ STB0899_SETFIELD_VAL(EQ_DELAY, reg, 0x05);
++ STB0899_SETFIELD_VAL(EQ_ADAPT_MODE, reg, 0x01);
++ stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_EQ_CNTRL, STB0899_OFF0_EQ_CNTRL, reg);
++
++ /* RESET Packet delineator */
++ stb0899_write_reg(state, STB0899_PDELCTRL, 0x4a);
++}
++
++/*
++ * stb0899_dvbs2_get_dmd_status
++ * get DVB-S2 Demod LOCK status
++ */
++static enum stb0899_status stb0899_dvbs2_get_dmd_status(struct stb0899_state *state, int timeout)
++{
++ int time = -10, lock = 0, uwp, csm;
++ u32 reg;
++
++ do {
++ reg = STB0899_READ_S2REG(STB0899_S2DEMOD, DMD_STATUS);
++ dprintk(state->verbose, FE_DEBUG, 1, "DMD_STATUS=[0x%02x]", reg);
++ if (STB0899_GETFIELD(IF_AGC_LOCK, reg))
++ dprintk(state->verbose, FE_DEBUG, 1, "------------->IF AGC LOCKED !");
++ reg = STB0899_READ_S2REG(STB0899_S2DEMOD, DMD_STAT2);
++ dprintk(state->verbose, FE_DEBUG, 1, "----------->DMD STAT2=[0x%02x]", reg);
++ uwp = STB0899_GETFIELD(UWP_LOCK, reg);
++ csm = STB0899_GETFIELD(CSM_LOCK, reg);
++ if (uwp && csm)
++ lock = 1;
++
++ time += 10;
++ msleep(10);
++
++ } while ((!lock) && (time <= timeout));
++
++ if (lock) {
++ dprintk(state->verbose, FE_DEBUG, 1, "----------------> DVB-S2 LOCK !");
++ return DVBS2_DEMOD_LOCK;
++ } else {
++ return DVBS2_DEMOD_NOLOCK;
++ }
++}
++
++/*
++ * stb0899_dvbs2_get_data_lock
++ * get FEC status
++ */
++static int stb0899_dvbs2_get_data_lock(struct stb0899_state *state, int timeout)
++{
++ int time = 0, lock = 0;
++ u8 reg;
++
++ while ((!lock) && (time < timeout)) {
++ reg = stb0899_read_reg(state, STB0899_CFGPDELSTATUS1);
++ dprintk(state->verbose, FE_DEBUG, 1, "---------> CFGPDELSTATUS=[0x%02x]", reg);
++ lock = STB0899_GETFIELD(CFGPDELSTATUS_LOCK, reg);
++ time++;
++ }
++
++ return lock;
++}
++
++/*
++ * stb0899_dvbs2_get_fec_status
++ * get DVB-S2 FEC LOCK status
++ */
++static enum stb0899_status stb0899_dvbs2_get_fec_status(struct stb0899_state *state, int timeout)
++{
++ int time = 0, Locked;
++
++ do {
++ Locked = stb0899_dvbs2_get_data_lock(state, 1);
++ time++;
++ msleep(1);
++
++ } while ((!Locked) && (time < timeout));
++
++ if (Locked) {
++ dprintk(state->verbose, FE_DEBUG, 1, "---------->DVB-S2 FEC LOCK !");
++ return DVBS2_FEC_LOCK;
++ } else {
++ return DVBS2_FEC_NOLOCK;
++ }
++}
++
++
++/*
++ * stb0899_dvbs2_init_csm
++ * set parameters for manual mode
++ */
++static void stb0899_dvbs2_init_csm(struct stb0899_state *state, int pilots, enum stb0899_modcod modcod)
++{
++ struct stb0899_internal *internal = &state->internal;
++
++ s32 dvt_tbl = 1, two_pass = 0, agc_gain = 6, agc_shift = 0, loop_shift = 0, phs_diff_thr = 0x80;
++ s32 gamma_acq, gamma_rho_acq, gamma_trk, gamma_rho_trk, lock_count_thr;
++ u32 csm1, csm2, csm3, csm4;
++
++ if (((internal->master_clk / internal->srate) <= 4) && (modcod <= 11) && (pilots == 1)) {
++ switch (modcod) {
++ case STB0899_QPSK_12:
++ gamma_acq = 25;
++ gamma_rho_acq = 2700;
++ gamma_trk = 12;
++ gamma_rho_trk = 180;
++ lock_count_thr = 8;
++ break;
++ case STB0899_QPSK_35:
++ gamma_acq = 38;
++ gamma_rho_acq = 7182;
++ gamma_trk = 14;
++ gamma_rho_trk = 308;
++ lock_count_thr = 8;
++ break;
++ case STB0899_QPSK_23:
++ gamma_acq = 42;
++ gamma_rho_acq = 9408;
++ gamma_trk = 17;
++ gamma_rho_trk = 476;
++ lock_count_thr = 8;
++ break;
++ case STB0899_QPSK_34:
++ gamma_acq = 53;
++ gamma_rho_acq = 16642;
++ gamma_trk = 19;
++ gamma_rho_trk = 646;
++ lock_count_thr = 8;
++ break;
++ case STB0899_QPSK_45:
++ gamma_acq = 53;
++ gamma_rho_acq = 17119;
++ gamma_trk = 22;
++ gamma_rho_trk = 880;
++ lock_count_thr = 8;
++ break;
++ case STB0899_QPSK_56:
++ gamma_acq = 55;
++ gamma_rho_acq = 19250;
++ gamma_trk = 23;
++ gamma_rho_trk = 989;
++ lock_count_thr = 8;
++ break;
++ case STB0899_QPSK_89:
++ gamma_acq = 60;
++ gamma_rho_acq = 24240;
++ gamma_trk = 24;
++ gamma_rho_trk = 1176;
++ lock_count_thr = 8;
++ break;
++ case STB0899_QPSK_910:
++ gamma_acq = 66;
++ gamma_rho_acq = 29634;
++ gamma_trk = 24;
++ gamma_rho_trk = 1176;
++ lock_count_thr = 8;
++ break;
++ default:
++ gamma_acq = 66;
++ gamma_rho_acq = 29634;
++ gamma_trk = 24;
++ gamma_rho_trk = 1176;
++ lock_count_thr = 8;
++ break;
++ }
++
++ csm1 = STB0899_READ_S2REG(STB0899_S2DEMOD, CSM_CNTRL1);
++ STB0899_SETFIELD_VAL(CSM_AUTO_PARAM, csm1, 0);
++ stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_CSM_CNTRL1, STB0899_OFF0_CSM_CNTRL1, csm1);
++
++ csm1 = STB0899_READ_S2REG(STB0899_S2DEMOD, CSM_CNTRL1);
++ csm2 = STB0899_READ_S2REG(STB0899_S2DEMOD, CSM_CNTRL2);
++ csm3 = STB0899_READ_S2REG(STB0899_S2DEMOD, CSM_CNTRL3);
++ csm4 = STB0899_READ_S2REG(STB0899_S2DEMOD, CSM_CNTRL4);
++
++ STB0899_SETFIELD_VAL(CSM_DVT_TABLE, csm1, dvt_tbl);
++ STB0899_SETFIELD_VAL(CSM_TWO_PASS, csm1, two_pass);
++ STB0899_SETFIELD_VAL(CSM_AGC_GAIN, csm1, agc_gain);
++ STB0899_SETFIELD_VAL(CSM_AGC_SHIFT, csm1, agc_shift);
++ STB0899_SETFIELD_VAL(FE_LOOP_SHIFT, csm1, loop_shift);
++ STB0899_SETFIELD_VAL(CSM_GAMMA_ACQ, csm2, gamma_acq);
++ STB0899_SETFIELD_VAL(CSM_GAMMA_RHOACQ, csm2, gamma_rho_acq);
++ STB0899_SETFIELD_VAL(CSM_GAMMA_TRACK, csm3, gamma_trk);
++ STB0899_SETFIELD_VAL(CSM_GAMMA_RHOTRACK, csm3, gamma_rho_trk);
++ STB0899_SETFIELD_VAL(CSM_LOCKCOUNT_THRESH, csm4, lock_count_thr);
++ STB0899_SETFIELD_VAL(CSM_PHASEDIFF_THRESH, csm4, phs_diff_thr);
++
++ stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_CSM_CNTRL1, STB0899_OFF0_CSM_CNTRL1, csm1);
++ stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_CSM_CNTRL2, STB0899_OFF0_CSM_CNTRL2, csm2);
++ stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_CSM_CNTRL3, STB0899_OFF0_CSM_CNTRL3, csm3);
++ stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_CSM_CNTRL4, STB0899_OFF0_CSM_CNTRL4, csm4);
++ }
++}
++
++/*
++ * stb0899_dvbs2_get_srate
++ * get DVB-S2 Symbol Rate
++ */
++static u32 stb0899_dvbs2_get_srate(struct stb0899_state *state)
++{
++ struct stb0899_internal *internal = &state->internal;
++ struct stb0899_config *config = state->config;
++
++ u32 bTrNomFreq, srate, decimRate, intval1, intval2, reg;
++ int div1, div2, rem1, rem2;
++
++ div1 = config->btr_nco_bits / 2;
++ div2 = config->btr_nco_bits - div1 - 1;
++
++ bTrNomFreq = STB0899_READ_S2REG(STB0899_S2DEMOD, BTR_NOM_FREQ);
++
++ reg = STB0899_READ_S2REG(STB0899_S2DEMOD, DECIM_CNTRL);
++ decimRate = STB0899_GETFIELD(DECIM_RATE, reg);
++ decimRate = (1 << decimRate);
++
++ intval1 = internal->master_clk / (1 << div1);
++ intval2 = bTrNomFreq / (1 << div2);
++
++ rem1 = internal->master_clk % (1 << div1);
++ rem2 = bTrNomFreq % (1 << div2);
++ /* only for integer calculation */
++ srate = (intval1 * intval2) + ((intval1 * rem2) / (1 << div2)) + ((intval2 * rem1) / (1 << div1));
++ srate /= decimRate; /*symbrate = (btrnomfreq_register_val*MasterClock)/2^(27+decim_rate_field) */
++
++ return srate;
++}
++
++/*
++ * stb0899_dvbs2_algo
++ * Search for signal, timing, carrier and data for a given
++ * frequency in a given range
++ */
++enum stb0899_status stb0899_dvbs2_algo(struct stb0899_state *state)
++{
++ struct stb0899_internal *internal = &state->internal;
++ enum stb0899_modcod modcod;
++
++ s32 offsetfreq, searchTime, FecLockTime, pilots, iqSpectrum;
++ int i = 0;
++ u32 reg, csm1;
++
++ if (internal->srate <= 2000000) {
++ searchTime = 5000; /* 5000 ms max time to lock UWP and CSM, SYMB <= 2Mbs */
++ FecLockTime = 350; /* 350 ms max time to lock FEC, SYMB <= 2Mbs */
++ } else if (internal->srate <= 5000000) {
++ searchTime = 2500; /* 2500 ms max time to lock UWP and CSM, 2Mbs < SYMB <= 5Mbs */
++ FecLockTime = 170; /* 170 ms max time to lock FEC, 2Mbs< SYMB <= 5Mbs */
++ } else if (internal->srate <= 10000000) {
++ searchTime = 1500; /* 1500 ms max time to lock UWP and CSM, 5Mbs <SYMB <= 10Mbs */
++ FecLockTime = 80; /* 80 ms max time to lock FEC, 5Mbs< SYMB <= 10Mbs */
++ } else if (internal->srate <= 15000000) {
++ searchTime = 500; /* 500 ms max time to lock UWP and CSM, 10Mbs <SYMB <= 15Mbs */
++ FecLockTime = 50; /* 50 ms max time to lock FEC, 10Mbs< SYMB <= 15Mbs */
++ } else if (internal->srate <= 20000000) {
++ searchTime = 300; /* 300 ms max time to lock UWP and CSM, 15Mbs < SYMB <= 20Mbs */
++ FecLockTime = 30; /* 50 ms max time to lock FEC, 15Mbs< SYMB <= 20Mbs */
++ } else if (internal->srate <= 25000000) {
++ searchTime = 250; /* 250 ms max time to lock UWP and CSM, 20 Mbs < SYMB <= 25Mbs */
++ FecLockTime = 25; /* 25 ms max time to lock FEC, 20Mbs< SYMB <= 25Mbs */
++ } else {
++ searchTime = 150; /* 150 ms max time to lock UWP and CSM, SYMB > 25Mbs */
++ FecLockTime = 20; /* 20 ms max time to lock FEC, 20Mbs< SYMB <= 25Mbs */
++ }
++
++ /* Maintain Stream Merger in reset during acquisition */
++ reg = stb0899_read_reg(state, STB0899_TSTRES);
++ STB0899_SETFIELD_VAL(FRESRS, reg, 1);
++ stb0899_write_reg(state, STB0899_TSTRES, reg);
++
++ /* enable tuner I/O */
++ stb0899_i2c_gate_ctrl(&state->frontend, 1);
++
++ /* Move tuner to frequency */
++ if (state->config->tuner_set_frequency)
++ state->config->tuner_set_frequency(&state->frontend, internal->freq);
++ if (state->config->tuner_get_frequency)
++ state->config->tuner_get_frequency(&state->frontend, &internal->freq);
++
++ /* disable tuner I/O */
++ stb0899_i2c_gate_ctrl(&state->frontend, 0);
++
++ /* Set IF AGC to acquisition */
++ reg = STB0899_READ_S2REG(STB0899_S2DEMOD, IF_AGC_CNTRL);
++ STB0899_SETFIELD_VAL(IF_LOOP_GAIN, reg, 4);
++ STB0899_SETFIELD_VAL(IF_AGC_REF, reg, 32);
++ stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_IF_AGC_CNTRL, STB0899_OFF0_IF_AGC_CNTRL, reg);
++
++ reg = STB0899_READ_S2REG(STB0899_S2DEMOD, IF_AGC_CNTRL2);
++ STB0899_SETFIELD_VAL(IF_AGC_DUMP_PER, reg, 0);
++ stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_IF_AGC_CNTRL2, STB0899_OFF0_IF_AGC_CNTRL2, reg);
++
++ /* Initialisation */
++ stb0899_dvbs2_init_calc(state);
++
++ reg = STB0899_READ_S2REG(STB0899_S2DEMOD, DMD_CNTRL2);
++ switch (internal->inversion) {
++ case IQ_SWAP_OFF:
++ STB0899_SETFIELD_VAL(SPECTRUM_INVERT, reg, 0);
++ break;
++ case IQ_SWAP_ON:
++ STB0899_SETFIELD_VAL(SPECTRUM_INVERT, reg, 1);
++ break;
++ }
++ stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_DMD_CNTRL2, STB0899_OFF0_DMD_CNTRL2, reg);
++ stb0899_dvbs2_reacquire(state);
++
++ /* Wait for demod lock (UWP and CSM) */
++ internal->status = stb0899_dvbs2_get_dmd_status(state, searchTime);
++
++ if (internal->status == DVBS2_DEMOD_LOCK) {
++ dprintk(state->verbose, FE_DEBUG, 1, "------------> DVB-S2 DEMOD LOCK !");
++ i = 0;
++ /* Demod Locked, check FEC status */
++ internal->status = stb0899_dvbs2_get_fec_status(state, FecLockTime);
++
++ /*If false lock (UWP and CSM Locked but no FEC) try 3 time max*/
++ while ((internal->status != DVBS2_FEC_LOCK) && (i < 3)) {
++ /* Read the frequency offset*/
++ offsetfreq = STB0899_READ_S2REG(STB0899_S2DEMOD, CRL_FREQ);
++
++ /* Set the Nominal frequency to the found frequency offset for the next reacquire*/
++ reg = STB0899_READ_S2REG(STB0899_S2DEMOD, CRL_NOM_FREQ);
++ STB0899_SETFIELD_VAL(CRL_NOM_FREQ, reg, offsetfreq);
++ stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_CRL_NOM_FREQ, STB0899_OFF0_CRL_NOM_FREQ, reg);
++ stb0899_dvbs2_reacquire(state);
++ internal->status = stb0899_dvbs2_get_fec_status(state, searchTime);
++ i++;
++ }
++ }
++
++ if (internal->status != DVBS2_FEC_LOCK) {
++ reg = STB0899_READ_S2REG(STB0899_S2DEMOD, DMD_CNTRL2);
++ iqSpectrum = STB0899_GETFIELD(SPECTRUM_INVERT, reg);
++ /* IQ Spectrum Inversion */
++ STB0899_SETFIELD_VAL(SPECTRUM_INVERT, reg, !iqSpectrum);
++ stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_DMD_CNTRL2, STB0899_OFF0_DMD_CNTRL2, reg);
++ /* start acquistion process */
++ stb0899_dvbs2_reacquire(state);
++
++ /* Wait for demod lock (UWP and CSM) */
++ internal->status = stb0899_dvbs2_get_dmd_status(state, searchTime);
++ if (internal->status == DVBS2_DEMOD_LOCK) {
++ i = 0;
++ /* Demod Locked, check FEC */
++ internal->status = stb0899_dvbs2_get_fec_status(state, FecLockTime);
++ /*try thrice for false locks, (UWP and CSM Locked but no FEC) */
++ while ((internal->status != DVBS2_FEC_LOCK) && (i < 3)) {
++ /* Read the frequency offset*/
++ offsetfreq = STB0899_READ_S2REG(STB0899_S2DEMOD, CRL_FREQ);
++
++ /* Set the Nominal frequency to the found frequency offset for the next reacquire*/
++ reg = STB0899_READ_S2REG(STB0899_S2DEMOD, CRL_NOM_FREQ);
++ STB0899_SETFIELD_VAL(CRL_NOM_FREQ, reg, offsetfreq);
++ stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_CRL_NOM_FREQ, STB0899_OFF0_CRL_NOM_FREQ, reg);
++
++ stb0899_dvbs2_reacquire(state);
++ internal->status = stb0899_dvbs2_get_fec_status(state, searchTime);
++ i++;
++ }
++ }
++/*
++ if (pParams->DVBS2State == FE_DVBS2_FEC_LOCKED)
++ pParams->IQLocked = !iqSpectrum;
++*/
++ }
++ if (internal->status == DVBS2_FEC_LOCK) {
++ dprintk(state->verbose, FE_DEBUG, 1, "----------------> DVB-S2 FEC Lock !");
++ reg = STB0899_READ_S2REG(STB0899_S2DEMOD, UWP_STAT2);
++ modcod = STB0899_GETFIELD(UWP_DECODE_MOD, reg) >> 2;
++ pilots = STB0899_GETFIELD(UWP_DECODE_MOD, reg) & 0x01;
++
++ if ((((10 * internal->master_clk) / (internal->srate / 10)) <= 410) &&
++ (INRANGE(STB0899_QPSK_23, modcod, STB0899_QPSK_910)) &&
++ (pilots == 1)) {
++
++ stb0899_dvbs2_init_csm(state, pilots, modcod);
++ /* Wait for UWP,CSM and data LOCK 20ms max */
++ internal->status = stb0899_dvbs2_get_fec_status(state, FecLockTime);
++
++ i = 0;
++ while ((internal->status != DVBS2_FEC_LOCK) && (i < 3)) {
++ csm1 = STB0899_READ_S2REG(STB0899_S2DEMOD, CSM_CNTRL1);
++ STB0899_SETFIELD_VAL(CSM_TWO_PASS, csm1, 1);
++ stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_CSM_CNTRL1, STB0899_OFF0_CSM_CNTRL1, csm1);
++ csm1 = STB0899_READ_S2REG(STB0899_S2DEMOD, CSM_CNTRL1);
++ STB0899_SETFIELD_VAL(CSM_TWO_PASS, csm1, 0);
++ stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_CSM_CNTRL1, STB0899_OFF0_CSM_CNTRL1, csm1);
++
++ internal->status = stb0899_dvbs2_get_fec_status(state, FecLockTime);
++ i++;
++ }
++ }
++
++ if ((((10 * internal->master_clk) / (internal->srate / 10)) <= 410) &&
++ (INRANGE(STB0899_QPSK_12, modcod, STB0899_QPSK_35)) &&
++ (pilots == 1)) {
++
++ /* Equalizer Disable update */
++ reg = STB0899_READ_S2REG(STB0899_S2DEMOD, EQ_CNTRL);
++ STB0899_SETFIELD_VAL(EQ_DISABLE_UPDATE, reg, 1);
++ stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_EQ_CNTRL, STB0899_OFF0_EQ_CNTRL, reg);
++ }
++
++ /* slow down the Equalizer once locked */
++ reg = STB0899_READ_S2REG(STB0899_S2DEMOD, EQ_CNTRL);
++ STB0899_SETFIELD_VAL(EQ_SHIFT, reg, 0x02);
++ stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_EQ_CNTRL, STB0899_OFF0_EQ_CNTRL, reg);
++
++ /* Store signal parameters */
++ offsetfreq = STB0899_READ_S2REG(STB0899_S2DEMOD, CRL_FREQ);
++
++ /* sign extend 30 bit value before using it in calculations */
++ if (offsetfreq & (1 << 29))
++ offsetfreq |= -1 << 30;
++
++ offsetfreq = offsetfreq / ((1 << 30) / 1000);
++ offsetfreq *= (internal->master_clk / 1000000);
++
++ /* store current inversion for next run */
++ reg = STB0899_READ_S2REG(STB0899_S2DEMOD, DMD_CNTRL2);
++ if (STB0899_GETFIELD(SPECTRUM_INVERT, reg))
++ internal->inversion = IQ_SWAP_ON;
++ else
++ internal->inversion = IQ_SWAP_OFF;
++
++ internal->freq = internal->freq + offsetfreq;
++ internal->srate = stb0899_dvbs2_get_srate(state);
++
++ reg = STB0899_READ_S2REG(STB0899_S2DEMOD, UWP_STAT2);
++ internal->modcod = STB0899_GETFIELD(UWP_DECODE_MOD, reg) >> 2;
++ internal->pilots = STB0899_GETFIELD(UWP_DECODE_MOD, reg) & 0x01;
++ internal->frame_length = (STB0899_GETFIELD(UWP_DECODE_MOD, reg) >> 1) & 0x01;
++
++ /* Set IF AGC to tracking */
++ reg = STB0899_READ_S2REG(STB0899_S2DEMOD, IF_AGC_CNTRL);
++ STB0899_SETFIELD_VAL(IF_LOOP_GAIN, reg, 3);
++
++ /* if QPSK 1/2,QPSK 3/5 or QPSK 2/3 set IF AGC reference to 16 otherwise 32*/
++ if (INRANGE(STB0899_QPSK_12, internal->modcod, STB0899_QPSK_23))
++ STB0899_SETFIELD_VAL(IF_AGC_REF, reg, 16);
++
++ stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_IF_AGC_CNTRL, STB0899_OFF0_IF_AGC_CNTRL, reg);
++
++ reg = STB0899_READ_S2REG(STB0899_S2DEMOD, IF_AGC_CNTRL2);
++ STB0899_SETFIELD_VAL(IF_AGC_DUMP_PER, reg, 7);
++ stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_IF_AGC_CNTRL2, STB0899_OFF0_IF_AGC_CNTRL2, reg);
++ }
++
++ /* Release Stream Merger Reset */
++ reg = stb0899_read_reg(state, STB0899_TSTRES);
++ STB0899_SETFIELD_VAL(FRESRS, reg, 0);
++ stb0899_write_reg(state, STB0899_TSTRES, reg);
++
++ return internal->status;
++}
+diff -Nur linux-3.14.36/drivers/media/dvb-frontends/stb0899_drv.c linux-openelec/drivers/media/dvb-frontends/stb0899_drv.c
+--- linux-3.14.36/drivers/media/dvb-frontends/stb0899_drv.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/media/dvb-frontends/stb0899_drv.c 2015-07-24 18:03:30.136842002 -0500
+@@ -981,6 +981,16 @@
+
+ *strength = stb0899_table_lookup(stb0899_dvbsrf_tab, ARRAY_SIZE(stb0899_dvbsrf_tab) - 1, val);
+ *strength += 750;
++
++ const int MIN_STRENGTH_DVBS = 0;
++ const int MAX_STRENGTH_DVBS = 680;
++ if (*strength < MIN_STRENGTH_DVBS)
++ *strength = 0;
++ else if(*strength > MAX_STRENGTH_DVBS)
++ *strength = 0xFFFF;
++ else
++ *strength = (*strength - MIN_STRENGTH_DVBS) * 0xFFFF / (MAX_STRENGTH_DVBS - MIN_STRENGTH_DVBS);
++
+ dprintk(state->verbose, FE_DEBUG, 1, "AGCIQVALUE = 0x%02x, C = %d * 0.1 dBm",
+ val & 0xff, *strength);
+ }
+@@ -993,6 +1003,7 @@
+
+ *strength = stb0899_table_lookup(stb0899_dvbs2rf_tab, ARRAY_SIZE(stb0899_dvbs2rf_tab) - 1, val);
+ *strength += 950;
++ *strength = *strength << 4;
+ dprintk(state->verbose, FE_DEBUG, 1, "IF_AGC_GAIN = 0x%04x, C = %d * 0.1 dBm",
+ val & 0x3fff, *strength);
+ }
+@@ -1026,6 +1037,16 @@
+ val = MAKEWORD16(buf[0], buf[1]);
+
+ *snr = stb0899_table_lookup(stb0899_cn_tab, ARRAY_SIZE(stb0899_cn_tab) - 1, val);
++
++ const int MIN_SNR_DVBS = 0;
++ const int MAX_SNR_DVBS = 200;
++ if (*snr < MIN_SNR_DVBS)
++ *snr = 0;
++ else if(*snr > MAX_SNR_DVBS)
++ *snr = 0xFFFF;
++ else
++ *snr = (*snr - MIN_SNR_DVBS) * 0xFFFF / (MAX_SNR_DVBS - MIN_SNR_DVBS);
++
+ dprintk(state->verbose, FE_DEBUG, 1, "NIR = 0x%02x%02x = %u, C/N = %d * 0.1 dBm\n",
+ buf[0], buf[1], val, *snr);
+ }
+@@ -1050,6 +1071,16 @@
+ val = (quantn - estn) / 10;
+ }
+ *snr = val;
++
++ const int MIN_SNR_DVBS2 = 10;
++ const int MAX_SNR_DVBS2 = 70;
++ if (*snr < MIN_SNR_DVBS2)
++ *snr = 0;
++ else if(*snr > MAX_SNR_DVBS2)
++ *snr = 0xFFFF;
++ else
++ *snr = (*snr - MIN_SNR_DVBS2) * 0xFFFF / (MAX_SNR_DVBS2 - MIN_SNR_DVBS2);
++
+ dprintk(state->verbose, FE_DEBUG, 1, "Es/N0 quant = %d (%d) estimate = %u (%d), C/N = %d * 0.1 dBm",
+ quant, quantn, est, estn, val);
+ }
+@@ -1591,7 +1622,7 @@
+ .frequency_max = 2150000,
+ .frequency_stepsize = 0,
+ .frequency_tolerance = 0,
+- .symbol_rate_min = 5000000,
++ .symbol_rate_min = 1000000,
+ .symbol_rate_max = 45000000,
+
+ .caps = FE_CAN_INVERSION_AUTO |
+diff -Nur linux-3.14.36/drivers/media/dvb-frontends/stb0899_drv.c.orig linux-openelec/drivers/media/dvb-frontends/stb0899_drv.c.orig
+--- linux-3.14.36/drivers/media/dvb-frontends/stb0899_drv.c.orig 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/media/dvb-frontends/stb0899_drv.c.orig 2015-07-24 18:03:30.072842002 -0500
+@@ -0,0 +1,1661 @@
++/*
++ STB0899 Multistandard Frontend driver
++ Copyright (C) Manu Abraham (abraham.manu@gmail.com)
++
++ Copyright (C) ST Microelectronics
++
++ This program is free software; you can redistribute it and/or modify
++ it under the terms of the GNU General Public License as published by
++ the Free Software Foundation; either version 2 of the License, or
++ (at your option) any later version.
++
++ This program is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ GNU General Public License for more details.
++
++ You should have received a copy of the GNU General Public License
++ along with this program; if not, write to the Free Software
++ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*/
++
++#include <linux/init.h>
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/slab.h>
++#include <linux/string.h>
++
++#include <linux/dvb/frontend.h>
++#include "dvb_frontend.h"
++
++#include "stb0899_drv.h"
++#include "stb0899_priv.h"
++#include "stb0899_reg.h"
++
++/* Max transfer size done by I2C transfer functions */
++#define MAX_XFER_SIZE 64
++
++static unsigned int verbose = 0;//1;
++module_param(verbose, int, 0644);
++
++/* C/N in dB/10, NIRM/NIRL */
++static const struct stb0899_tab stb0899_cn_tab[] = {
++ { 200, 2600 },
++ { 190, 2700 },
++ { 180, 2860 },
++ { 170, 3020 },
++ { 160, 3210 },
++ { 150, 3440 },
++ { 140, 3710 },
++ { 130, 4010 },
++ { 120, 4360 },
++ { 110, 4740 },
++ { 100, 5190 },
++ { 90, 5670 },
++ { 80, 6200 },
++ { 70, 6770 },
++ { 60, 7360 },
++ { 50, 7970 },
++ { 40, 8250 },
++ { 30, 9000 },
++ { 20, 9450 },
++ { 15, 9600 },
++};
++
++/* DVB-S AGCIQ_VALUE vs. signal level in dBm/10.
++ * As measured, connected to a modulator.
++ * -8.0 to -50.0 dBm directly connected,
++ * -52.0 to -74.8 with extra attenuation.
++ * Cut-off to AGCIQ_VALUE = 0x80 below -74.8dBm.
++ * Crude linear extrapolation below -84.8dBm and above -8.0dBm.
++ */
++static const struct stb0899_tab stb0899_dvbsrf_tab[] = {
++ { -750, -128 },
++ { -748, -94 },
++ { -745, -92 },
++ { -735, -90 },
++ { -720, -87 },
++ { -670, -77 },
++ { -640, -70 },
++ { -610, -62 },
++ { -600, -60 },
++ { -590, -56 },
++ { -560, -41 },
++ { -540, -25 },
++ { -530, -17 },
++ { -520, -11 },
++ { -500, 1 },
++ { -490, 6 },
++ { -480, 10 },
++ { -440, 22 },
++ { -420, 27 },
++ { -400, 31 },
++ { -380, 34 },
++ { -340, 40 },
++ { -320, 43 },
++ { -280, 48 },
++ { -250, 52 },
++ { -230, 55 },
++ { -180, 61 },
++ { -140, 66 },
++ { -90, 73 },
++ { -80, 74 },
++ { 500, 127 }
++};
++
++/* DVB-S2 IF_AGC_GAIN vs. signal level in dBm/10.
++ * As measured, connected to a modulator.
++ * -8.0 to -50.1 dBm directly connected,
++ * -53.0 to -76.6 with extra attenuation.
++ * Cut-off to IF_AGC_GAIN = 0x3fff below -76.6dBm.
++ * Crude linear extrapolation below -76.6dBm and above -8.0dBm.
++ */
++static const struct stb0899_tab stb0899_dvbs2rf_tab[] = {
++ { 700, 0 },
++ { -80, 3217 },
++ { -150, 3893 },
++ { -190, 4217 },
++ { -240, 4621 },
++ { -280, 4945 },
++ { -320, 5273 },
++ { -350, 5545 },
++ { -370, 5741 },
++ { -410, 6147 },
++ { -450, 6671 },
++ { -490, 7413 },
++ { -501, 7665 },
++ { -530, 8767 },
++ { -560, 10219 },
++ { -580, 10939 },
++ { -590, 11518 },
++ { -600, 11723 },
++ { -650, 12659 },
++ { -690, 13219 },
++ { -730, 13645 },
++ { -750, 13909 },
++ { -766, 14153 },
++ { -950, 16383 }
++};
++
++/* DVB-S2 Es/N0 quant in dB/100 vs read value * 100*/
++static struct stb0899_tab stb0899_quant_tab[] = {
++ { 0, 0 },
++ { 0, 100 },
++ { 600, 200 },
++ { 950, 299 },
++ { 1200, 398 },
++ { 1400, 501 },
++ { 1560, 603 },
++ { 1690, 700 },
++ { 1810, 804 },
++ { 1910, 902 },
++ { 2000, 1000 },
++ { 2080, 1096 },
++ { 2160, 1202 },
++ { 2230, 1303 },
++ { 2350, 1496 },
++ { 2410, 1603 },
++ { 2460, 1698 },
++ { 2510, 1799 },
++ { 2600, 1995 },
++ { 2650, 2113 },
++ { 2690, 2213 },
++ { 2720, 2291 },
++ { 2760, 2399 },
++ { 2800, 2512 },
++ { 2860, 2692 },
++ { 2930, 2917 },
++ { 2960, 3020 },
++ { 3010, 3199 },
++ { 3040, 3311 },
++ { 3060, 3388 },
++ { 3120, 3631 },
++ { 3190, 3936 },
++ { 3400, 5012 },
++ { 3610, 6383 },
++ { 3800, 7943 },
++ { 4210, 12735 },
++ { 4500, 17783 },
++ { 4690, 22131 },
++ { 4810, 25410 }
++};
++
++/* DVB-S2 Es/N0 estimate in dB/100 vs read value */
++static struct stb0899_tab stb0899_est_tab[] = {
++ { 0, 0 },
++ { 0, 1 },
++ { 301, 2 },
++ { 1204, 16 },
++ { 1806, 64 },
++ { 2408, 256 },
++ { 2709, 512 },
++ { 3010, 1023 },
++ { 3311, 2046 },
++ { 3612, 4093 },
++ { 3823, 6653 },
++ { 3913, 8185 },
++ { 4010, 10233 },
++ { 4107, 12794 },
++ { 4214, 16368 },
++ { 4266, 18450 },
++ { 4311, 20464 },
++ { 4353, 22542 },
++ { 4391, 24604 },
++ { 4425, 26607 },
++ { 4457, 28642 },
++ { 4487, 30690 },
++ { 4515, 32734 },
++ { 4612, 40926 },
++ { 4692, 49204 },
++ { 4816, 65464 },
++ { 4913, 81846 },
++ { 4993, 98401 },
++ { 5060, 114815 },
++ { 5118, 131220 },
++ { 5200, 158489 },
++ { 5300, 199526 },
++ { 5400, 251189 },
++ { 5500, 316228 },
++ { 5600, 398107 },
++ { 5720, 524807 },
++ { 5721, 526017 },
++};
++
++static int _stb0899_read_reg(struct stb0899_state *state, unsigned int reg)
++{
++ int ret;
++
++ u8 b0[] = { reg >> 8, reg & 0xff };
++ u8 buf;
++
++ struct i2c_msg msg[] = {
++ {
++ .addr = state->config->demod_address,
++ .flags = 0,
++ .buf = b0,
++ .len = 2
++ },{
++ .addr = state->config->demod_address,
++ .flags = I2C_M_RD,
++ .buf = &buf,
++ .len = 1
++ }
++ };
++
++ ret = i2c_transfer(state->i2c, msg, 2);
++ if (ret != 2) {
++ if (ret != -ERESTARTSYS)
++ dprintk(state->verbose, FE_ERROR, 1,
++ "Read error, Reg=[0x%02x], Status=%d",
++ reg, ret);
++
++ return ret < 0 ? ret : -EREMOTEIO;
++ }
++ if (unlikely(*state->verbose >= FE_DEBUGREG))
++ dprintk(state->verbose, FE_ERROR, 1, "Reg=[0x%02x], data=%02x",
++ reg, buf);
++
++ return (unsigned int)buf;
++}
++
++int stb0899_read_reg(struct stb0899_state *state, unsigned int reg)
++{
++ int result;
++
++ result = _stb0899_read_reg(state, reg);
++ /*
++ * Bug ID 9:
++ * access to 0xf2xx/0xf6xx
++ * must be followed by read from 0xf2ff/0xf6ff.
++ */
++ if ((reg != 0xf2ff) && (reg != 0xf6ff) &&
++ (((reg & 0xff00) == 0xf200) || ((reg & 0xff00) == 0xf600)))
++ _stb0899_read_reg(state, (reg | 0x00ff));
++
++ return result;
++}
++
++u32 _stb0899_read_s2reg(struct stb0899_state *state,
++ u32 stb0899_i2cdev,
++ u32 stb0899_base_addr,
++ u16 stb0899_reg_offset)
++{
++ int status;
++ u32 data;
++ u8 buf[7] = { 0 };
++ u16 tmpaddr;
++
++ u8 buf_0[] = {
++ GETBYTE(stb0899_i2cdev, BYTE1), /* 0xf3 S2 Base Address (MSB) */
++ GETBYTE(stb0899_i2cdev, BYTE0), /* 0xfc S2 Base Address (LSB) */
++ GETBYTE(stb0899_base_addr, BYTE0), /* 0x00 Base Address (LSB) */
++ GETBYTE(stb0899_base_addr, BYTE1), /* 0x04 Base Address (LSB) */
++ GETBYTE(stb0899_base_addr, BYTE2), /* 0x00 Base Address (MSB) */
++ GETBYTE(stb0899_base_addr, BYTE3), /* 0x00 Base Address (MSB) */
++ };
++ u8 buf_1[] = {
++ 0x00, /* 0xf3 Reg Offset */
++ 0x00, /* 0x44 Reg Offset */
++ };
++
++ struct i2c_msg msg_0 = {
++ .addr = state->config->demod_address,
++ .flags = 0,
++ .buf = buf_0,
++ .len = 6
++ };
++
++ struct i2c_msg msg_1 = {
++ .addr = state->config->demod_address,
++ .flags = 0,
++ .buf = buf_1,
++ .len = 2
++ };
++
++ struct i2c_msg msg_r = {
++ .addr = state->config->demod_address,
++ .flags = I2C_M_RD,
++ .buf = buf,
++ .len = 4
++ };
++
++ tmpaddr = stb0899_reg_offset & 0xff00;
++ if (!(stb0899_reg_offset & 0x8))
++ tmpaddr = stb0899_reg_offset | 0x20;
++
++ buf_1[0] = GETBYTE(tmpaddr, BYTE1);
++ buf_1[1] = GETBYTE(tmpaddr, BYTE0);
++
++ status = i2c_transfer(state->i2c, &msg_0, 1);
++ if (status < 1) {
++ if (status != -ERESTARTSYS)
++ printk(KERN_ERR "%s ERR(1), Device=[0x%04x], Base address=[0x%08x], Offset=[0x%04x], Status=%d\n",
++ __func__, stb0899_i2cdev, stb0899_base_addr, stb0899_reg_offset, status);
++
++ goto err;
++ }
++
++ /* Dummy */
++ status = i2c_transfer(state->i2c, &msg_1, 1);
++ if (status < 1)
++ goto err;
++
++ status = i2c_transfer(state->i2c, &msg_r, 1);
++ if (status < 1)
++ goto err;
++
++ buf_1[0] = GETBYTE(stb0899_reg_offset, BYTE1);
++ buf_1[1] = GETBYTE(stb0899_reg_offset, BYTE0);
++
++ /* Actual */
++ status = i2c_transfer(state->i2c, &msg_1, 1);
++ if (status < 1) {
++ if (status != -ERESTARTSYS)
++ printk(KERN_ERR "%s ERR(2), Device=[0x%04x], Base address=[0x%08x], Offset=[0x%04x], Status=%d\n",
++ __func__, stb0899_i2cdev, stb0899_base_addr, stb0899_reg_offset, status);
++ goto err;
++ }
++
++ status = i2c_transfer(state->i2c, &msg_r, 1);
++ if (status < 1) {
++ if (status != -ERESTARTSYS)
++ printk(KERN_ERR "%s ERR(3), Device=[0x%04x], Base address=[0x%08x], Offset=[0x%04x], Status=%d\n",
++ __func__, stb0899_i2cdev, stb0899_base_addr, stb0899_reg_offset, status);
++ return status < 0 ? status : -EREMOTEIO;
++ }
++
++ data = MAKEWORD32(buf[3], buf[2], buf[1], buf[0]);
++ if (unlikely(*state->verbose >= FE_DEBUGREG))
++ printk(KERN_DEBUG "%s Device=[0x%04x], Base address=[0x%08x], Offset=[0x%04x], Data=[0x%08x]\n",
++ __func__, stb0899_i2cdev, stb0899_base_addr, stb0899_reg_offset, data);
++
++ return data;
++
++err:
++ return status < 0 ? status : -EREMOTEIO;
++}
++
++int stb0899_write_s2reg(struct stb0899_state *state,
++ u32 stb0899_i2cdev,
++ u32 stb0899_base_addr,
++ u16 stb0899_reg_offset,
++ u32 stb0899_data)
++{
++ int status;
++
++ /* Base Address Setup */
++ u8 buf_0[] = {
++ GETBYTE(stb0899_i2cdev, BYTE1), /* 0xf3 S2 Base Address (MSB) */
++ GETBYTE(stb0899_i2cdev, BYTE0), /* 0xfc S2 Base Address (LSB) */
++ GETBYTE(stb0899_base_addr, BYTE0), /* 0x00 Base Address (LSB) */
++ GETBYTE(stb0899_base_addr, BYTE1), /* 0x04 Base Address (LSB) */
++ GETBYTE(stb0899_base_addr, BYTE2), /* 0x00 Base Address (MSB) */
++ GETBYTE(stb0899_base_addr, BYTE3), /* 0x00 Base Address (MSB) */
++ };
++ u8 buf_1[] = {
++ 0x00, /* 0xf3 Reg Offset */
++ 0x00, /* 0x44 Reg Offset */
++ 0x00, /* data */
++ 0x00, /* data */
++ 0x00, /* data */
++ 0x00, /* data */
++ };
++
++ struct i2c_msg msg_0 = {
++ .addr = state->config->demod_address,
++ .flags = 0,
++ .buf = buf_0,
++ .len = 6
++ };
++
++ struct i2c_msg msg_1 = {
++ .addr = state->config->demod_address,
++ .flags = 0,
++ .buf = buf_1,
++ .len = 6
++ };
++
++ buf_1[0] = GETBYTE(stb0899_reg_offset, BYTE1);
++ buf_1[1] = GETBYTE(stb0899_reg_offset, BYTE0);
++ buf_1[2] = GETBYTE(stb0899_data, BYTE0);
++ buf_1[3] = GETBYTE(stb0899_data, BYTE1);
++ buf_1[4] = GETBYTE(stb0899_data, BYTE2);
++ buf_1[5] = GETBYTE(stb0899_data, BYTE3);
++
++ if (unlikely(*state->verbose >= FE_DEBUGREG))
++ printk(KERN_DEBUG "%s Device=[0x%04x], Base Address=[0x%08x], Offset=[0x%04x], Data=[0x%08x]\n",
++ __func__, stb0899_i2cdev, stb0899_base_addr, stb0899_reg_offset, stb0899_data);
++
++ status = i2c_transfer(state->i2c, &msg_0, 1);
++ if (unlikely(status < 1)) {
++ if (status != -ERESTARTSYS)
++ printk(KERN_ERR "%s ERR (1), Device=[0x%04x], Base Address=[0x%08x], Offset=[0x%04x], Data=[0x%08x], status=%d\n",
++ __func__, stb0899_i2cdev, stb0899_base_addr, stb0899_reg_offset, stb0899_data, status);
++ goto err;
++ }
++ status = i2c_transfer(state->i2c, &msg_1, 1);
++ if (unlikely(status < 1)) {
++ if (status != -ERESTARTSYS)
++ printk(KERN_ERR "%s ERR (2), Device=[0x%04x], Base Address=[0x%08x], Offset=[0x%04x], Data=[0x%08x], status=%d\n",
++ __func__, stb0899_i2cdev, stb0899_base_addr, stb0899_reg_offset, stb0899_data, status);
++
++ return status < 0 ? status : -EREMOTEIO;
++ }
++
++ return 0;
++
++err:
++ return status < 0 ? status : -EREMOTEIO;
++}
++
++int stb0899_read_regs(struct stb0899_state *state, unsigned int reg, u8 *buf, u32 count)
++{
++ int status;
++
++ u8 b0[] = { reg >> 8, reg & 0xff };
++
++ struct i2c_msg msg[] = {
++ {
++ .addr = state->config->demod_address,
++ .flags = 0,
++ .buf = b0,
++ .len = 2
++ },{
++ .addr = state->config->demod_address,
++ .flags = I2C_M_RD,
++ .buf = buf,
++ .len = count
++ }
++ };
++
++ status = i2c_transfer(state->i2c, msg, 2);
++ if (status != 2) {
++ if (status != -ERESTARTSYS)
++ printk(KERN_ERR "%s Read error, Reg=[0x%04x], Count=%u, Status=%d\n",
++ __func__, reg, count, status);
++ goto err;
++ }
++ /*
++ * Bug ID 9:
++ * access to 0xf2xx/0xf6xx
++ * must be followed by read from 0xf2ff/0xf6ff.
++ */
++ if ((reg != 0xf2ff) && (reg != 0xf6ff) &&
++ (((reg & 0xff00) == 0xf200) || ((reg & 0xff00) == 0xf600)))
++ _stb0899_read_reg(state, (reg | 0x00ff));
++
++ if (unlikely(*state->verbose >= FE_DEBUGREG)) {
++ int i;
++
++ printk(KERN_DEBUG "%s [0x%04x]:", __func__, reg);
++ for (i = 0; i < count; i++) {
++ printk(" %02x", buf[i]);
++ }
++ printk("\n");
++ }
++
++ return 0;
++err:
++ return status < 0 ? status : -EREMOTEIO;
++}
++
++int stb0899_write_regs(struct stb0899_state *state, unsigned int reg, u8 *data, u32 count)
++{
++ int ret;
++ u8 buf[MAX_XFER_SIZE];
++ struct i2c_msg i2c_msg = {
++ .addr = state->config->demod_address,
++ .flags = 0,
++ .buf = buf,
++ .len = 2 + count
++ };
++
++ if (2 + count > sizeof(buf)) {
++ printk(KERN_WARNING
++ "%s: i2c wr reg=%04x: len=%d is too big!\n",
++ KBUILD_MODNAME, reg, count);
++ return -EINVAL;
++ }
++
++ buf[0] = reg >> 8;
++ buf[1] = reg & 0xff;
++ memcpy(&buf[2], data, count);
++
++ if (unlikely(*state->verbose >= FE_DEBUGREG)) {
++ int i;
++
++ printk(KERN_DEBUG "%s [0x%04x]:", __func__, reg);
++ for (i = 0; i < count; i++)
++ printk(" %02x", data[i]);
++ printk("\n");
++ }
++ ret = i2c_transfer(state->i2c, &i2c_msg, 1);
++
++ /*
++ * Bug ID 9:
++ * access to 0xf2xx/0xf6xx
++ * must be followed by read from 0xf2ff/0xf6ff.
++ */
++ if ((((reg & 0xff00) == 0xf200) || ((reg & 0xff00) == 0xf600)))
++ stb0899_read_reg(state, (reg | 0x00ff));
++
++ if (ret != 1) {
++ if (ret != -ERESTARTSYS)
++ dprintk(state->verbose, FE_ERROR, 1, "Reg=[0x%04x], Data=[0x%02x ...], Count=%u, Status=%d",
++ reg, data[0], count, ret);
++ return ret < 0 ? ret : -EREMOTEIO;
++ }
++
++ return 0;
++}
++
++int stb0899_write_reg(struct stb0899_state *state, unsigned int reg, u8 data)
++{
++ return stb0899_write_regs(state, reg, &data, 1);
++}
++
++/*
++ * stb0899_get_mclk
++ * Get STB0899 master clock frequency
++ * ExtClk: external clock frequency (Hz)
++ */
++static u32 stb0899_get_mclk(struct stb0899_state *state)
++{
++ u32 mclk = 0, div = 0;
++
++ div = stb0899_read_reg(state, STB0899_NCOARSE);
++ mclk = (div + 1) * state->config->xtal_freq / 6;
++ dprintk(state->verbose, FE_DEBUG, 1, "div=%d, mclk=%d", div, mclk);
++
++ return mclk;
++}
++
++/*
++ * stb0899_set_mclk
++ * Set STB0899 master Clock frequency
++ * Mclk: demodulator master clock
++ * ExtClk: external clock frequency (Hz)
++ */
++static void stb0899_set_mclk(struct stb0899_state *state, u32 Mclk)
++{
++ struct stb0899_internal *internal = &state->internal;
++ u8 mdiv = 0;
++
++ dprintk(state->verbose, FE_DEBUG, 1, "state->config=%p", state->config);
++ mdiv = ((6 * Mclk) / state->config->xtal_freq) - 1;
++ dprintk(state->verbose, FE_DEBUG, 1, "mdiv=%d", mdiv);
++
++ stb0899_write_reg(state, STB0899_NCOARSE, mdiv);
++ internal->master_clk = stb0899_get_mclk(state);
++
++ dprintk(state->verbose, FE_DEBUG, 1, "MasterCLOCK=%d", internal->master_clk);
++}
++
++static int stb0899_postproc(struct stb0899_state *state, u8 ctl, int enable)
++{
++ struct stb0899_config *config = state->config;
++ const struct stb0899_postproc *postproc = config->postproc;
++
++ /* post process event */
++ if (postproc) {
++ if (enable) {
++ if (postproc[ctl].level == STB0899_GPIOPULLUP)
++ stb0899_write_reg(state, postproc[ctl].gpio, 0x02);
++ else
++ stb0899_write_reg(state, postproc[ctl].gpio, 0x82);
++ } else {
++ if (postproc[ctl].level == STB0899_GPIOPULLUP)
++ stb0899_write_reg(state, postproc[ctl].gpio, 0x82);
++ else
++ stb0899_write_reg(state, postproc[ctl].gpio, 0x02);
++ }
++ }
++ return 0;
++}
++
++static void stb0899_release(struct dvb_frontend *fe)
++{
++ struct stb0899_state *state = fe->demodulator_priv;
++
++ dprintk(state->verbose, FE_DEBUG, 1, "Release Frontend");
++ /* post process event */
++ stb0899_postproc(state, STB0899_POSTPROC_GPIO_POWER, 0);
++ kfree(state);
++}
++
++/*
++ * stb0899_get_alpha
++ * return: rolloff
++ */
++static int stb0899_get_alpha(struct stb0899_state *state)
++{
++ u8 mode_coeff;
++
++ mode_coeff = stb0899_read_reg(state, STB0899_DEMOD);
++
++ if (STB0899_GETFIELD(MODECOEFF, mode_coeff) == 1)
++ return 20;
++ else
++ return 35;
++}
++
++/*
++ * stb0899_init_calc
++ */
++static void stb0899_init_calc(struct stb0899_state *state)
++{
++ struct stb0899_internal *internal = &state->internal;
++ int master_clk;
++ u8 agc[2];
++ u32 reg;
++
++ /* Read registers (in burst mode) */
++ stb0899_read_regs(state, STB0899_AGC1REF, agc, 2); /* AGC1R and AGC2O */
++
++ /* Initial calculations */
++ master_clk = stb0899_get_mclk(state);
++ internal->t_agc1 = 0;
++ internal->t_agc2 = 0;
++ internal->master_clk = master_clk;
++ internal->mclk = master_clk / 65536L;
++ internal->rolloff = stb0899_get_alpha(state);
++
++ /* DVBS2 Initial calculations */
++ /* Set AGC value to the middle */
++ internal->agc_gain = 8154;
++ reg = STB0899_READ_S2REG(STB0899_S2DEMOD, IF_AGC_CNTRL);
++ STB0899_SETFIELD_VAL(IF_GAIN_INIT, reg, internal->agc_gain);
++ stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_IF_AGC_CNTRL, STB0899_OFF0_IF_AGC_CNTRL, reg);
++
++ reg = STB0899_READ_S2REG(STB0899_S2DEMOD, RRC_ALPHA);
++ internal->rrc_alpha = STB0899_GETFIELD(RRC_ALPHA, reg);
++
++ internal->center_freq = 0;
++ internal->av_frame_coarse = 10;
++ internal->av_frame_fine = 20;
++ internal->step_size = 2;
++/*
++ if ((pParams->SpectralInv == FE_IQ_NORMAL) || (pParams->SpectralInv == FE_IQ_AUTO))
++ pParams->IQLocked = 0;
++ else
++ pParams->IQLocked = 1;
++*/
++}
++
++static int stb0899_wait_diseqc_fifo_empty(struct stb0899_state *state, int timeout)
++{
++ u8 reg = 0;
++ unsigned long start = jiffies;
++
++ while (1) {
++ reg = stb0899_read_reg(state, STB0899_DISSTATUS);
++ if (!STB0899_GETFIELD(FIFOFULL, reg))
++ break;
++ if ((jiffies - start) > timeout) {
++ dprintk(state->verbose, FE_ERROR, 1, "timed out !!");
++ return -ETIMEDOUT;
++ }
++ }
++
++ return 0;
++}
++
++static int stb0899_send_diseqc_msg(struct dvb_frontend *fe, struct dvb_diseqc_master_cmd *cmd)
++{
++ struct stb0899_state *state = fe->demodulator_priv;
++ u8 reg, i;
++
++ if (cmd->msg_len > 8)
++ return -EINVAL;
++
++ /* enable FIFO precharge */
++ reg = stb0899_read_reg(state, STB0899_DISCNTRL1);
++ STB0899_SETFIELD_VAL(DISPRECHARGE, reg, 1);
++ stb0899_write_reg(state, STB0899_DISCNTRL1, reg);
++ for (i = 0; i < cmd->msg_len; i++) {
++ /* wait for FIFO empty */
++ if (stb0899_wait_diseqc_fifo_empty(state, 100) < 0)
++ return -ETIMEDOUT;
++
++ stb0899_write_reg(state, STB0899_DISFIFO, cmd->msg[i]);
++ }
++ reg = stb0899_read_reg(state, STB0899_DISCNTRL1);
++ STB0899_SETFIELD_VAL(DISPRECHARGE, reg, 0);
++ stb0899_write_reg(state, STB0899_DISCNTRL1, reg);
++ msleep(100);
++ return 0;
++}
++
++static int stb0899_wait_diseqc_rxidle(struct stb0899_state *state, int timeout)
++{
++ u8 reg = 0;
++ unsigned long start = jiffies;
++
++ while (!STB0899_GETFIELD(RXEND, reg)) {
++ reg = stb0899_read_reg(state, STB0899_DISRX_ST0);
++ if (jiffies - start > timeout) {
++ dprintk(state->verbose, FE_ERROR, 1, "timed out!!");
++ return -ETIMEDOUT;
++ }
++ msleep(10);
++ }
++
++ return 0;
++}
++
++static int stb0899_recv_slave_reply(struct dvb_frontend *fe, struct dvb_diseqc_slave_reply *reply)
++{
++ struct stb0899_state *state = fe->demodulator_priv;
++ u8 reg, length = 0, i;
++ int result;
++
++ if (stb0899_wait_diseqc_rxidle(state, 100) < 0)
++ return -ETIMEDOUT;
++
++ reg = stb0899_read_reg(state, STB0899_DISRX_ST0);
++ if (STB0899_GETFIELD(RXEND, reg)) {
++
++ reg = stb0899_read_reg(state, STB0899_DISRX_ST1);
++ length = STB0899_GETFIELD(FIFOBYTENBR, reg);
++
++ if (length > sizeof (reply->msg)) {
++ result = -EOVERFLOW;
++ goto exit;
++ }
++ reply->msg_len = length;
++
++ /* extract data */
++ for (i = 0; i < length; i++)
++ reply->msg[i] = stb0899_read_reg(state, STB0899_DISFIFO);
++ }
++
++ return 0;
++exit:
++
++ return result;
++}
++
++static int stb0899_wait_diseqc_txidle(struct stb0899_state *state, int timeout)
++{
++ u8 reg = 0;
++ unsigned long start = jiffies;
++
++ while (!STB0899_GETFIELD(TXIDLE, reg)) {
++ reg = stb0899_read_reg(state, STB0899_DISSTATUS);
++ if (jiffies - start > timeout) {
++ dprintk(state->verbose, FE_ERROR, 1, "timed out!!");
++ return -ETIMEDOUT;
++ }
++ msleep(10);
++ }
++ return 0;
++}
++
++static int stb0899_send_diseqc_burst(struct dvb_frontend *fe, fe_sec_mini_cmd_t burst)
++{
++ struct stb0899_state *state = fe->demodulator_priv;
++ u8 reg, old_state;
++
++ /* wait for diseqc idle */
++ if (stb0899_wait_diseqc_txidle(state, 100) < 0)
++ return -ETIMEDOUT;
++
++ reg = stb0899_read_reg(state, STB0899_DISCNTRL1);
++ old_state = reg;
++ /* set to burst mode */
++ STB0899_SETFIELD_VAL(DISEQCMODE, reg, 0x03);
++ STB0899_SETFIELD_VAL(DISPRECHARGE, reg, 0x01);
++ stb0899_write_reg(state, STB0899_DISCNTRL1, reg);
++ switch (burst) {
++ case SEC_MINI_A:
++ /* unmodulated */
++ stb0899_write_reg(state, STB0899_DISFIFO, 0x00);
++ break;
++ case SEC_MINI_B:
++ /* modulated */
++ stb0899_write_reg(state, STB0899_DISFIFO, 0xff);
++ break;
++ }
++ reg = stb0899_read_reg(state, STB0899_DISCNTRL1);
++ STB0899_SETFIELD_VAL(DISPRECHARGE, reg, 0x00);
++ stb0899_write_reg(state, STB0899_DISCNTRL1, reg);
++ /* wait for diseqc idle */
++ if (stb0899_wait_diseqc_txidle(state, 100) < 0)
++ return -ETIMEDOUT;
++
++ /* restore state */
++ stb0899_write_reg(state, STB0899_DISCNTRL1, old_state);
++
++ return 0;
++}
++
++static int stb0899_diseqc_init(struct stb0899_state *state)
++{
++/*
++ struct dvb_diseqc_slave_reply rx_data;
++*/
++ u8 f22_tx, reg;
++
++ u32 mclk, tx_freq = 22000;/* count = 0, i; */
++ reg = stb0899_read_reg(state, STB0899_DISCNTRL2);
++ STB0899_SETFIELD_VAL(ONECHIP_TRX, reg, 0);
++ stb0899_write_reg(state, STB0899_DISCNTRL2, reg);
++
++ /* disable Tx spy */
++ reg = stb0899_read_reg(state, STB0899_DISCNTRL1);
++ STB0899_SETFIELD_VAL(DISEQCRESET, reg, 1);
++ stb0899_write_reg(state, STB0899_DISCNTRL1, reg);
++
++ reg = stb0899_read_reg(state, STB0899_DISCNTRL1);
++ STB0899_SETFIELD_VAL(DISEQCRESET, reg, 0);
++ stb0899_write_reg(state, STB0899_DISCNTRL1, reg);
++
++ mclk = stb0899_get_mclk(state);
++ f22_tx = mclk / (tx_freq * 32);
++ stb0899_write_reg(state, STB0899_DISF22, f22_tx); /* DiSEqC Tx freq */
++ state->rx_freq = 20000;
++
++ return 0;
++}
++
++static int stb0899_sleep(struct dvb_frontend *fe)
++{
++ struct stb0899_state *state = fe->demodulator_priv;
++/*
++ u8 reg;
++*/
++ dprintk(state->verbose, FE_DEBUG, 1, "Going to Sleep .. (Really tired .. :-))");
++ /* post process event */
++ stb0899_postproc(state, STB0899_POSTPROC_GPIO_POWER, 0);
++
++ return 0;
++}
++
++static int stb0899_wakeup(struct dvb_frontend *fe)
++{
++ int rc;
++ struct stb0899_state *state = fe->demodulator_priv;
++
++ if ((rc = stb0899_write_reg(state, STB0899_SYNTCTRL, STB0899_SELOSCI)))
++ return rc;
++ /* Activate all clocks; DVB-S2 registers are inaccessible otherwise. */
++ if ((rc = stb0899_write_reg(state, STB0899_STOPCLK1, 0x00)))
++ return rc;
++ if ((rc = stb0899_write_reg(state, STB0899_STOPCLK2, 0x00)))
++ return rc;
++
++ /* post process event */
++ stb0899_postproc(state, STB0899_POSTPROC_GPIO_POWER, 1);
++
++ return 0;
++}
++
++static int stb0899_init(struct dvb_frontend *fe)
++{
++ int i;
++ struct stb0899_state *state = fe->demodulator_priv;
++ struct stb0899_config *config = state->config;
++
++ dprintk(state->verbose, FE_DEBUG, 1, "Initializing STB0899 ... ");
++
++ /* init device */
++ dprintk(state->verbose, FE_DEBUG, 1, "init device");
++ for (i = 0; config->init_dev[i].address != 0xffff; i++)
++ stb0899_write_reg(state, config->init_dev[i].address, config->init_dev[i].data);
++
++ dprintk(state->verbose, FE_DEBUG, 1, "init S2 demod");
++ /* init S2 demod */
++ for (i = 0; config->init_s2_demod[i].offset != 0xffff; i++)
++ stb0899_write_s2reg(state, STB0899_S2DEMOD,
++ config->init_s2_demod[i].base_address,
++ config->init_s2_demod[i].offset,
++ config->init_s2_demod[i].data);
++
++ dprintk(state->verbose, FE_DEBUG, 1, "init S1 demod");
++ /* init S1 demod */
++ for (i = 0; config->init_s1_demod[i].address != 0xffff; i++)
++ stb0899_write_reg(state, config->init_s1_demod[i].address, config->init_s1_demod[i].data);
++
++ dprintk(state->verbose, FE_DEBUG, 1, "init S2 FEC");
++ /* init S2 fec */
++ for (i = 0; config->init_s2_fec[i].offset != 0xffff; i++)
++ stb0899_write_s2reg(state, STB0899_S2FEC,
++ config->init_s2_fec[i].base_address,
++ config->init_s2_fec[i].offset,
++ config->init_s2_fec[i].data);
++
++ dprintk(state->verbose, FE_DEBUG, 1, "init TST");
++ /* init test */
++ for (i = 0; config->init_tst[i].address != 0xffff; i++)
++ stb0899_write_reg(state, config->init_tst[i].address, config->init_tst[i].data);
++
++ stb0899_init_calc(state);
++ stb0899_diseqc_init(state);
++
++ return 0;
++}
++
++static int stb0899_table_lookup(const struct stb0899_tab *tab, int max, int val)
++{
++ int res = 0;
++ int min = 0, med;
++
++ if (val < tab[min].read)
++ res = tab[min].real;
++ else if (val >= tab[max].read)
++ res = tab[max].real;
++ else {
++ while ((max - min) > 1) {
++ med = (max + min) / 2;
++ if (val >= tab[min].read && val < tab[med].read)
++ max = med;
++ else
++ min = med;
++ }
++ res = ((val - tab[min].read) *
++ (tab[max].real - tab[min].real) /
++ (tab[max].read - tab[min].read)) +
++ tab[min].real;
++ }
++
++ return res;
++}
++
++static int stb0899_read_signal_strength(struct dvb_frontend *fe, u16 *strength)
++{
++ struct stb0899_state *state = fe->demodulator_priv;
++ struct stb0899_internal *internal = &state->internal;
++
++ int val;
++ u32 reg;
++ *strength = 0;
++ switch (state->delsys) {
++ case SYS_DVBS:
++ case SYS_DSS:
++ if (internal->lock) {
++ reg = stb0899_read_reg(state, STB0899_VSTATUS);
++ if (STB0899_GETFIELD(VSTATUS_LOCKEDVIT, reg)) {
++
++ reg = stb0899_read_reg(state, STB0899_AGCIQIN);
++ val = (s32)(s8)STB0899_GETFIELD(AGCIQVALUE, reg);
++
++ *strength = stb0899_table_lookup(stb0899_dvbsrf_tab, ARRAY_SIZE(stb0899_dvbsrf_tab) - 1, val);
++ *strength += 750;
++ dprintk(state->verbose, FE_DEBUG, 1, "AGCIQVALUE = 0x%02x, C = %d * 0.1 dBm",
++ val & 0xff, *strength);
++ }
++ }
++ break;
++ case SYS_DVBS2:
++ if (internal->lock) {
++ reg = STB0899_READ_S2REG(STB0899_S2DEMOD, IF_AGC_GAIN);
++ val = STB0899_GETFIELD(IF_AGC_GAIN, reg);
++
++ *strength = stb0899_table_lookup(stb0899_dvbs2rf_tab, ARRAY_SIZE(stb0899_dvbs2rf_tab) - 1, val);
++ *strength += 950;
++ dprintk(state->verbose, FE_DEBUG, 1, "IF_AGC_GAIN = 0x%04x, C = %d * 0.1 dBm",
++ val & 0x3fff, *strength);
++ }
++ break;
++ default:
++ dprintk(state->verbose, FE_DEBUG, 1, "Unsupported delivery system");
++ return -EINVAL;
++ }
++
++ return 0;
++}
++
++static int stb0899_read_snr(struct dvb_frontend *fe, u16 *snr)
++{
++ struct stb0899_state *state = fe->demodulator_priv;
++ struct stb0899_internal *internal = &state->internal;
++
++ unsigned int val, quant, quantn = -1, est, estn = -1;
++ u8 buf[2];
++ u32 reg;
++
++ *snr = 0;
++ reg = stb0899_read_reg(state, STB0899_VSTATUS);
++ switch (state->delsys) {
++ case SYS_DVBS:
++ case SYS_DSS:
++ if (internal->lock) {
++ if (STB0899_GETFIELD(VSTATUS_LOCKEDVIT, reg)) {
++
++ stb0899_read_regs(state, STB0899_NIRM, buf, 2);
++ val = MAKEWORD16(buf[0], buf[1]);
++
++ *snr = stb0899_table_lookup(stb0899_cn_tab, ARRAY_SIZE(stb0899_cn_tab) - 1, val);
++ dprintk(state->verbose, FE_DEBUG, 1, "NIR = 0x%02x%02x = %u, C/N = %d * 0.1 dBm\n",
++ buf[0], buf[1], val, *snr);
++ }
++ }
++ break;
++ case SYS_DVBS2:
++ if (internal->lock) {
++ reg = STB0899_READ_S2REG(STB0899_S2DEMOD, UWP_CNTRL1);
++ quant = STB0899_GETFIELD(UWP_ESN0_QUANT, reg);
++ reg = STB0899_READ_S2REG(STB0899_S2DEMOD, UWP_STAT2);
++ est = STB0899_GETFIELD(ESN0_EST, reg);
++ if (est == 1)
++ val = 301; /* C/N = 30.1 dB */
++ else if (est == 2)
++ val = 270; /* C/N = 27.0 dB */
++ else {
++ /* quantn = 100 * log(quant^2) */
++ quantn = stb0899_table_lookup(stb0899_quant_tab, ARRAY_SIZE(stb0899_quant_tab) - 1, quant * 100);
++ /* estn = 100 * log(est) */
++ estn = stb0899_table_lookup(stb0899_est_tab, ARRAY_SIZE(stb0899_est_tab) - 1, est);
++ /* snr(dBm/10) = -10*(log(est)-log(quant^2)) => snr(dBm/10) = (100*log(quant^2)-100*log(est))/10 */
++ val = (quantn - estn) / 10;
++ }
++ *snr = val;
++ dprintk(state->verbose, FE_DEBUG, 1, "Es/N0 quant = %d (%d) estimate = %u (%d), C/N = %d * 0.1 dBm",
++ quant, quantn, est, estn, val);
++ }
++ break;
++ default:
++ dprintk(state->verbose, FE_DEBUG, 1, "Unsupported delivery system");
++ return -EINVAL;
++ }
++
++ return 0;
++}
++
++static int stb0899_read_status(struct dvb_frontend *fe, enum fe_status *status)
++{
++ struct stb0899_state *state = fe->demodulator_priv;
++ struct stb0899_internal *internal = &state->internal;
++ u8 reg;
++ *status = 0;
++
++ switch (state->delsys) {
++ case SYS_DVBS:
++ case SYS_DSS:
++ dprintk(state->verbose, FE_DEBUG, 1, "Delivery system DVB-S/DSS");
++ if (internal->lock) {
++ reg = stb0899_read_reg(state, STB0899_VSTATUS);
++ if (STB0899_GETFIELD(VSTATUS_LOCKEDVIT, reg)) {
++ dprintk(state->verbose, FE_DEBUG, 1, "--------> FE_HAS_CARRIER | FE_HAS_LOCK");
++ *status |= FE_HAS_SIGNAL | FE_HAS_CARRIER | FE_HAS_LOCK;
++
++ reg = stb0899_read_reg(state, STB0899_PLPARM);
++ if (STB0899_GETFIELD(VITCURPUN, reg)) {
++ dprintk(state->verbose, FE_DEBUG, 1, "--------> FE_HAS_VITERBI | FE_HAS_SYNC");
++ *status |= FE_HAS_VITERBI | FE_HAS_SYNC;
++ /* post process event */
++ stb0899_postproc(state, STB0899_POSTPROC_GPIO_LOCK, 1);
++ }
++ }
++ }
++ break;
++ case SYS_DVBS2:
++ dprintk(state->verbose, FE_DEBUG, 1, "Delivery system DVB-S2");
++ if (internal->lock) {
++ reg = STB0899_READ_S2REG(STB0899_S2DEMOD, DMD_STAT2);
++ if (STB0899_GETFIELD(UWP_LOCK, reg) && STB0899_GETFIELD(CSM_LOCK, reg)) {
++ *status |= FE_HAS_CARRIER;
++ dprintk(state->verbose, FE_DEBUG, 1,
++ "UWP & CSM Lock ! ---> DVB-S2 FE_HAS_CARRIER");
++
++ reg = stb0899_read_reg(state, STB0899_CFGPDELSTATUS1);
++ if (STB0899_GETFIELD(CFGPDELSTATUS_LOCK, reg)) {
++ *status |= FE_HAS_LOCK;
++ dprintk(state->verbose, FE_DEBUG, 1,
++ "Packet Delineator Locked ! -----> DVB-S2 FE_HAS_LOCK");
++
++ }
++ if (STB0899_GETFIELD(CONTINUOUS_STREAM, reg)) {
++ *status |= FE_HAS_VITERBI;
++ dprintk(state->verbose, FE_DEBUG, 1,
++ "Packet Delineator found VITERBI ! -----> DVB-S2 FE_HAS_VITERBI");
++ }
++ if (STB0899_GETFIELD(ACCEPTED_STREAM, reg)) {
++ *status |= FE_HAS_SYNC;
++ dprintk(state->verbose, FE_DEBUG, 1,
++ "Packet Delineator found SYNC ! -----> DVB-S2 FE_HAS_SYNC");
++ /* post process event */
++ stb0899_postproc(state, STB0899_POSTPROC_GPIO_LOCK, 1);
++ }
++ }
++ }
++ break;
++ default:
++ dprintk(state->verbose, FE_DEBUG, 1, "Unsupported delivery system");
++ return -EINVAL;
++ }
++ return 0;
++}
++
++/*
++ * stb0899_get_error
++ * viterbi error for DVB-S/DSS
++ * packet error for DVB-S2
++ * Bit Error Rate or Packet Error Rate * 10 ^ 7
++ */
++static int stb0899_read_ber(struct dvb_frontend *fe, u32 *ber)
++{
++ struct stb0899_state *state = fe->demodulator_priv;
++ struct stb0899_internal *internal = &state->internal;
++
++ u8 lsb, msb;
++
++ *ber = 0;
++
++ switch (state->delsys) {
++ case SYS_DVBS:
++ case SYS_DSS:
++ if (internal->lock) {
++ lsb = stb0899_read_reg(state, STB0899_ECNT1L);
++ msb = stb0899_read_reg(state, STB0899_ECNT1M);
++ *ber = MAKEWORD16(msb, lsb);
++ /* Viterbi Check */
++ if (STB0899_GETFIELD(VSTATUS_PRFVIT, internal->v_status)) {
++ /* Error Rate */
++ *ber *= 9766;
++ /* ber = ber * 10 ^ 7 */
++ *ber /= (-1 + (1 << (2 * STB0899_GETFIELD(NOE, internal->err_ctrl))));
++ *ber /= 8;
++ }
++ }
++ break;
++ case SYS_DVBS2:
++ if (internal->lock) {
++ lsb = stb0899_read_reg(state, STB0899_ECNT1L);
++ msb = stb0899_read_reg(state, STB0899_ECNT1M);
++ *ber = MAKEWORD16(msb, lsb);
++ /* ber = ber * 10 ^ 7 */
++ *ber *= 10000000;
++ *ber /= (-1 + (1 << (4 + 2 * STB0899_GETFIELD(NOE, internal->err_ctrl))));
++ }
++ break;
++ default:
++ dprintk(state->verbose, FE_DEBUG, 1, "Unsupported delivery system");
++ return -EINVAL;
++ }
++
++ return 0;
++}
++
++static int stb0899_set_voltage(struct dvb_frontend *fe, fe_sec_voltage_t voltage)
++{
++ struct stb0899_state *state = fe->demodulator_priv;
++
++ switch (voltage) {
++ case SEC_VOLTAGE_13:
++ stb0899_write_reg(state, STB0899_GPIO00CFG, 0x82);
++ stb0899_write_reg(state, STB0899_GPIO01CFG, 0x02);
++ stb0899_write_reg(state, STB0899_GPIO02CFG, 0x00);
++ break;
++ case SEC_VOLTAGE_18:
++ stb0899_write_reg(state, STB0899_GPIO00CFG, 0x02);
++ stb0899_write_reg(state, STB0899_GPIO01CFG, 0x02);
++ stb0899_write_reg(state, STB0899_GPIO02CFG, 0x82);
++ break;
++ case SEC_VOLTAGE_OFF:
++ stb0899_write_reg(state, STB0899_GPIO00CFG, 0x82);
++ stb0899_write_reg(state, STB0899_GPIO01CFG, 0x82);
++ stb0899_write_reg(state, STB0899_GPIO02CFG, 0x82);
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ return 0;
++}
++
++static int stb0899_set_tone(struct dvb_frontend *fe, fe_sec_tone_mode_t tone)
++{
++ struct stb0899_state *state = fe->demodulator_priv;
++ struct stb0899_internal *internal = &state->internal;
++
++ u8 div, reg;
++
++ /* wait for diseqc idle */
++ if (stb0899_wait_diseqc_txidle(state, 100) < 0)
++ return -ETIMEDOUT;
++
++ switch (tone) {
++ case SEC_TONE_ON:
++ div = (internal->master_clk / 100) / 5632;
++ div = (div + 5) / 10;
++ stb0899_write_reg(state, STB0899_DISEQCOCFG, 0x66);
++ reg = stb0899_read_reg(state, STB0899_ACRPRESC);
++ STB0899_SETFIELD_VAL(ACRPRESC, reg, 0x03);
++ stb0899_write_reg(state, STB0899_ACRPRESC, reg);
++ stb0899_write_reg(state, STB0899_ACRDIV1, div);
++ break;
++ case SEC_TONE_OFF:
++ stb0899_write_reg(state, STB0899_DISEQCOCFG, 0x20);
++ break;
++ default:
++ return -EINVAL;
++ }
++ return 0;
++}
++
++int stb0899_i2c_gate_ctrl(struct dvb_frontend *fe, int enable)
++{
++ int i2c_stat;
++ struct stb0899_state *state = fe->demodulator_priv;
++
++ i2c_stat = stb0899_read_reg(state, STB0899_I2CRPT);
++ if (i2c_stat < 0)
++ goto err;
++
++ if (enable) {
++ dprintk(state->verbose, FE_DEBUG, 1, "Enabling I2C Repeater ...");
++ i2c_stat |= STB0899_I2CTON;
++ if (stb0899_write_reg(state, STB0899_I2CRPT, i2c_stat) < 0)
++ goto err;
++ } else {
++ dprintk(state->verbose, FE_DEBUG, 1, "Disabling I2C Repeater ...");
++ i2c_stat &= ~STB0899_I2CTON;
++ if (stb0899_write_reg(state, STB0899_I2CRPT, i2c_stat) < 0)
++ goto err;
++ }
++ return 0;
++err:
++ dprintk(state->verbose, FE_ERROR, 1, "I2C Repeater control failed");
++ return -EREMOTEIO;
++}
++
++
++static inline void CONVERT32(u32 x, char *str)
++{
++ *str++ = (x >> 24) & 0xff;
++ *str++ = (x >> 16) & 0xff;
++ *str++ = (x >> 8) & 0xff;
++ *str++ = (x >> 0) & 0xff;
++ *str = '\0';
++}
++
++static int stb0899_get_dev_id(struct stb0899_state *state)
++{
++ u8 chip_id, release;
++ u16 id;
++ u32 demod_ver = 0, fec_ver = 0;
++ char demod_str[5] = { 0 };
++ char fec_str[5] = { 0 };
++
++ id = stb0899_read_reg(state, STB0899_DEV_ID);
++ dprintk(state->verbose, FE_DEBUG, 1, "ID reg=[0x%02x]", id);
++ chip_id = STB0899_GETFIELD(CHIP_ID, id);
++ release = STB0899_GETFIELD(CHIP_REL, id);
++
++ dprintk(state->verbose, FE_ERROR, 1, "Device ID=[%d], Release=[%d]",
++ chip_id, release);
++
++ CONVERT32(STB0899_READ_S2REG(STB0899_S2DEMOD, DMD_CORE_ID), (char *)&demod_str);
++
++ demod_ver = STB0899_READ_S2REG(STB0899_S2DEMOD, DMD_VERSION_ID);
++ dprintk(state->verbose, FE_ERROR, 1, "Demodulator Core ID=[%s], Version=[%d]", (char *) &demod_str, demod_ver);
++ CONVERT32(STB0899_READ_S2REG(STB0899_S2FEC, FEC_CORE_ID_REG), (char *)&fec_str);
++ fec_ver = STB0899_READ_S2REG(STB0899_S2FEC, FEC_VER_ID_REG);
++ if (! (chip_id > 0)) {
++ dprintk(state->verbose, FE_ERROR, 1, "couldn't find a STB 0899");
++
++ return -ENODEV;
++ }
++ dprintk(state->verbose, FE_ERROR, 1, "FEC Core ID=[%s], Version=[%d]", (char*) &fec_str, fec_ver);
++
++ return 0;
++}
++
++static void stb0899_set_delivery(struct stb0899_state *state)
++{
++ u8 reg;
++ u8 stop_clk[2];
++
++ stop_clk[0] = stb0899_read_reg(state, STB0899_STOPCLK1);
++ stop_clk[1] = stb0899_read_reg(state, STB0899_STOPCLK2);
++
++ switch (state->delsys) {
++ case SYS_DVBS:
++ dprintk(state->verbose, FE_DEBUG, 1, "Delivery System -- DVB-S");
++ /* FECM/Viterbi ON */
++ reg = stb0899_read_reg(state, STB0899_FECM);
++ STB0899_SETFIELD_VAL(FECM_RSVD0, reg, 0);
++ STB0899_SETFIELD_VAL(FECM_VITERBI_ON, reg, 1);
++ stb0899_write_reg(state, STB0899_FECM, reg);
++
++ stb0899_write_reg(state, STB0899_RSULC, 0xb1);
++ stb0899_write_reg(state, STB0899_TSULC, 0x40);
++ stb0899_write_reg(state, STB0899_RSLLC, 0x42);
++ stb0899_write_reg(state, STB0899_TSLPL, 0x12);
++
++ reg = stb0899_read_reg(state, STB0899_TSTRES);
++ STB0899_SETFIELD_VAL(FRESLDPC, reg, 1);
++ stb0899_write_reg(state, STB0899_TSTRES, reg);
++
++ STB0899_SETFIELD_VAL(STOP_CHK8PSK, stop_clk[0], 1);
++ STB0899_SETFIELD_VAL(STOP_CKFEC108, stop_clk[0], 1);
++ STB0899_SETFIELD_VAL(STOP_CKFEC216, stop_clk[0], 1);
++
++ STB0899_SETFIELD_VAL(STOP_CKPKDLIN108, stop_clk[1], 1);
++ STB0899_SETFIELD_VAL(STOP_CKPKDLIN216, stop_clk[1], 1);
++
++ STB0899_SETFIELD_VAL(STOP_CKINTBUF216, stop_clk[0], 1);
++ STB0899_SETFIELD_VAL(STOP_CKCORE216, stop_clk[0], 0);
++
++ STB0899_SETFIELD_VAL(STOP_CKS2DMD108, stop_clk[1], 1);
++ break;
++ case SYS_DVBS2:
++ /* FECM/Viterbi OFF */
++ reg = stb0899_read_reg(state, STB0899_FECM);
++ STB0899_SETFIELD_VAL(FECM_RSVD0, reg, 0);
++ STB0899_SETFIELD_VAL(FECM_VITERBI_ON, reg, 0);
++ stb0899_write_reg(state, STB0899_FECM, reg);
++
++ stb0899_write_reg(state, STB0899_RSULC, 0xb1);
++ stb0899_write_reg(state, STB0899_TSULC, 0x42);
++ stb0899_write_reg(state, STB0899_RSLLC, 0x40);
++ stb0899_write_reg(state, STB0899_TSLPL, 0x02);
++
++ reg = stb0899_read_reg(state, STB0899_TSTRES);
++ STB0899_SETFIELD_VAL(FRESLDPC, reg, 0);
++ stb0899_write_reg(state, STB0899_TSTRES, reg);
++
++ STB0899_SETFIELD_VAL(STOP_CHK8PSK, stop_clk[0], 1);
++ STB0899_SETFIELD_VAL(STOP_CKFEC108, stop_clk[0], 0);
++ STB0899_SETFIELD_VAL(STOP_CKFEC216, stop_clk[0], 0);
++
++ STB0899_SETFIELD_VAL(STOP_CKPKDLIN108, stop_clk[1], 0);
++ STB0899_SETFIELD_VAL(STOP_CKPKDLIN216, stop_clk[1], 0);
++
++ STB0899_SETFIELD_VAL(STOP_CKINTBUF216, stop_clk[0], 0);
++ STB0899_SETFIELD_VAL(STOP_CKCORE216, stop_clk[0], 0);
++
++ STB0899_SETFIELD_VAL(STOP_CKS2DMD108, stop_clk[1], 0);
++ break;
++ case SYS_DSS:
++ /* FECM/Viterbi ON */
++ reg = stb0899_read_reg(state, STB0899_FECM);
++ STB0899_SETFIELD_VAL(FECM_RSVD0, reg, 1);
++ STB0899_SETFIELD_VAL(FECM_VITERBI_ON, reg, 1);
++ stb0899_write_reg(state, STB0899_FECM, reg);
++
++ stb0899_write_reg(state, STB0899_RSULC, 0xa1);
++ stb0899_write_reg(state, STB0899_TSULC, 0x61);
++ stb0899_write_reg(state, STB0899_RSLLC, 0x42);
++
++ reg = stb0899_read_reg(state, STB0899_TSTRES);
++ STB0899_SETFIELD_VAL(FRESLDPC, reg, 1);
++ stb0899_write_reg(state, STB0899_TSTRES, reg);
++
++ STB0899_SETFIELD_VAL(STOP_CHK8PSK, stop_clk[0], 1);
++ STB0899_SETFIELD_VAL(STOP_CKFEC108, stop_clk[0], 1);
++ STB0899_SETFIELD_VAL(STOP_CKFEC216, stop_clk[0], 1);
++
++ STB0899_SETFIELD_VAL(STOP_CKPKDLIN108, stop_clk[1], 1);
++ STB0899_SETFIELD_VAL(STOP_CKPKDLIN216, stop_clk[1], 1);
++
++ STB0899_SETFIELD_VAL(STOP_CKCORE216, stop_clk[0], 0);
++
++ STB0899_SETFIELD_VAL(STOP_CKS2DMD108, stop_clk[1], 1);
++ break;
++ default:
++ dprintk(state->verbose, FE_ERROR, 1, "Unsupported delivery system");
++ break;
++ }
++ STB0899_SETFIELD_VAL(STOP_CKADCI108, stop_clk[0], 0);
++ stb0899_write_regs(state, STB0899_STOPCLK1, stop_clk, 2);
++}
++
++/*
++ * stb0899_set_iterations
++ * set the LDPC iteration scale function
++ */
++static void stb0899_set_iterations(struct stb0899_state *state)
++{
++ struct stb0899_internal *internal = &state->internal;
++ struct stb0899_config *config = state->config;
++
++ s32 iter_scale;
++ u32 reg;
++
++ iter_scale = 17 * (internal->master_clk / 1000);
++ iter_scale += 410000;
++ iter_scale /= (internal->srate / 1000000);
++ iter_scale /= 1000;
++
++ if (iter_scale > config->ldpc_max_iter)
++ iter_scale = config->ldpc_max_iter;
++
++ reg = STB0899_READ_S2REG(STB0899_S2FEC, MAX_ITER);
++ STB0899_SETFIELD_VAL(MAX_ITERATIONS, reg, iter_scale);
++ stb0899_write_s2reg(state, STB0899_S2FEC, STB0899_BASE_MAX_ITER, STB0899_OFF0_MAX_ITER, reg);
++}
++
++static enum dvbfe_search stb0899_search(struct dvb_frontend *fe)
++{
++ struct stb0899_state *state = fe->demodulator_priv;
++ struct stb0899_params *i_params = &state->params;
++ struct stb0899_internal *internal = &state->internal;
++ struct stb0899_config *config = state->config;
++ struct dtv_frontend_properties *props = &fe->dtv_property_cache;
++
++ u32 SearchRange, gain;
++
++ i_params->freq = props->frequency;
++ i_params->srate = props->symbol_rate;
++ state->delsys = props->delivery_system;
++ dprintk(state->verbose, FE_DEBUG, 1, "delivery system=%d", state->delsys);
++
++ SearchRange = 10000000;
++ dprintk(state->verbose, FE_DEBUG, 1, "Frequency=%d, Srate=%d", i_params->freq, i_params->srate);
++ /* checking Search Range is meaningless for a fixed 3 Mhz */
++ if (INRANGE(i_params->srate, 1000000, 45000000)) {
++ dprintk(state->verbose, FE_DEBUG, 1, "Parameters IN RANGE");
++ stb0899_set_delivery(state);
++
++ if (state->config->tuner_set_rfsiggain) {
++ if (internal->srate > 15000000)
++ gain = 8; /* 15Mb < srate < 45Mb, gain = 8dB */
++ else if (internal->srate > 5000000)
++ gain = 12; /* 5Mb < srate < 15Mb, gain = 12dB */
++ else
++ gain = 14; /* 1Mb < srate < 5Mb, gain = 14db */
++ state->config->tuner_set_rfsiggain(fe, gain);
++ }
++
++ if (i_params->srate <= 5000000)
++ stb0899_set_mclk(state, config->lo_clk);
++ else
++ stb0899_set_mclk(state, config->hi_clk);
++
++ switch (state->delsys) {
++ case SYS_DVBS:
++ case SYS_DSS:
++ dprintk(state->verbose, FE_DEBUG, 1, "DVB-S delivery system");
++ internal->freq = i_params->freq;
++ internal->srate = i_params->srate;
++ /*
++ * search = user search range +
++ * 500Khz +
++ * 2 * Tuner_step_size +
++ * 10% of the symbol rate
++ */
++ internal->srch_range = SearchRange + 1500000 + (i_params->srate / 5);
++ internal->derot_percent = 30;
++
++ /* What to do for tuners having no bandwidth setup ? */
++ /* enable tuner I/O */
++ stb0899_i2c_gate_ctrl(&state->frontend, 1);
++
++ if (state->config->tuner_set_bandwidth)
++ state->config->tuner_set_bandwidth(fe, (13 * (stb0899_carr_width(state) + SearchRange)) / 10);
++ if (state->config->tuner_get_bandwidth)
++ state->config->tuner_get_bandwidth(fe, &internal->tuner_bw);
++
++ /* disable tuner I/O */
++ stb0899_i2c_gate_ctrl(&state->frontend, 0);
++
++ /* Set DVB-S1 AGC */
++ stb0899_write_reg(state, STB0899_AGCRFCFG, 0x11);
++
++ /* Run the search algorithm */
++ dprintk(state->verbose, FE_DEBUG, 1, "running DVB-S search algo ..");
++ if (stb0899_dvbs_algo(state) == RANGEOK) {
++ internal->lock = 1;
++ dprintk(state->verbose, FE_DEBUG, 1,
++ "-------------------------------------> DVB-S LOCK !");
++
++// stb0899_write_reg(state, STB0899_ERRCTRL1, 0x3d); /* Viterbi Errors */
++// internal->v_status = stb0899_read_reg(state, STB0899_VSTATUS);
++// internal->err_ctrl = stb0899_read_reg(state, STB0899_ERRCTRL1);
++// dprintk(state->verbose, FE_DEBUG, 1, "VSTATUS=0x%02x", internal->v_status);
++// dprintk(state->verbose, FE_DEBUG, 1, "ERR_CTRL=0x%02x", internal->err_ctrl);
++
++ return DVBFE_ALGO_SEARCH_SUCCESS;
++ } else {
++ internal->lock = 0;
++
++ return DVBFE_ALGO_SEARCH_FAILED;
++ }
++ break;
++ case SYS_DVBS2:
++ internal->freq = i_params->freq;
++ internal->srate = i_params->srate;
++ internal->srch_range = SearchRange;
++
++ /* enable tuner I/O */
++ stb0899_i2c_gate_ctrl(&state->frontend, 1);
++
++ if (state->config->tuner_set_bandwidth)
++ state->config->tuner_set_bandwidth(fe, (stb0899_carr_width(state) + SearchRange));
++ if (state->config->tuner_get_bandwidth)
++ state->config->tuner_get_bandwidth(fe, &internal->tuner_bw);
++
++ /* disable tuner I/O */
++ stb0899_i2c_gate_ctrl(&state->frontend, 0);
++
++// pParams->SpectralInv = pSearch->IQ_Inversion;
++
++ /* Set DVB-S2 AGC */
++ stb0899_write_reg(state, STB0899_AGCRFCFG, 0x1c);
++
++ /* Set IterScale =f(MCLK,SYMB) */
++ stb0899_set_iterations(state);
++
++ /* Run the search algorithm */
++ dprintk(state->verbose, FE_DEBUG, 1, "running DVB-S2 search algo ..");
++ if (stb0899_dvbs2_algo(state) == DVBS2_FEC_LOCK) {
++ internal->lock = 1;
++ dprintk(state->verbose, FE_DEBUG, 1,
++ "-------------------------------------> DVB-S2 LOCK !");
++
++// stb0899_write_reg(state, STB0899_ERRCTRL1, 0xb6); /* Packet Errors */
++// internal->v_status = stb0899_read_reg(state, STB0899_VSTATUS);
++// internal->err_ctrl = stb0899_read_reg(state, STB0899_ERRCTRL1);
++
++ return DVBFE_ALGO_SEARCH_SUCCESS;
++ } else {
++ internal->lock = 0;
++
++ return DVBFE_ALGO_SEARCH_FAILED;
++ }
++ break;
++ default:
++ dprintk(state->verbose, FE_ERROR, 1, "Unsupported delivery system");
++ return DVBFE_ALGO_SEARCH_INVALID;
++ }
++ }
++
++ return DVBFE_ALGO_SEARCH_ERROR;
++}
++
++static int stb0899_get_frontend(struct dvb_frontend *fe)
++{
++ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
++ struct stb0899_state *state = fe->demodulator_priv;
++ struct stb0899_internal *internal = &state->internal;
++
++ dprintk(state->verbose, FE_DEBUG, 1, "Get params");
++ p->symbol_rate = internal->srate;
++ p->frequency = internal->freq;
++
++ return 0;
++}
++
++static enum dvbfe_algo stb0899_frontend_algo(struct dvb_frontend *fe)
++{
++ return DVBFE_ALGO_CUSTOM;
++}
++
++static struct dvb_frontend_ops stb0899_ops = {
++ .delsys = { SYS_DVBS, SYS_DVBS2, SYS_DSS },
++ .info = {
++ .name = "STB0899 Multistandard",
++ .frequency_min = 950000,
++ .frequency_max = 2150000,
++ .frequency_stepsize = 0,
++ .frequency_tolerance = 0,
++ .symbol_rate_min = 1000000,
++ .symbol_rate_max = 45000000,
++
++ .caps = FE_CAN_INVERSION_AUTO |
++ FE_CAN_FEC_AUTO |
++ FE_CAN_2G_MODULATION |
++ FE_CAN_QPSK
++ },
++
++ .release = stb0899_release,
++ .init = stb0899_init,
++ .sleep = stb0899_sleep,
++// .wakeup = stb0899_wakeup,
++
++ .i2c_gate_ctrl = stb0899_i2c_gate_ctrl,
++
++ .get_frontend_algo = stb0899_frontend_algo,
++ .search = stb0899_search,
++ .get_frontend = stb0899_get_frontend,
++
++
++ .read_status = stb0899_read_status,
++ .read_snr = stb0899_read_snr,
++ .read_signal_strength = stb0899_read_signal_strength,
++ .read_ber = stb0899_read_ber,
++
++ .set_voltage = stb0899_set_voltage,
++ .set_tone = stb0899_set_tone,
++
++ .diseqc_send_master_cmd = stb0899_send_diseqc_msg,
++ .diseqc_recv_slave_reply = stb0899_recv_slave_reply,
++ .diseqc_send_burst = stb0899_send_diseqc_burst,
++};
++
++struct dvb_frontend *stb0899_attach(struct stb0899_config *config, struct i2c_adapter *i2c)
++{
++ struct stb0899_state *state = NULL;
++
++ state = kzalloc(sizeof (struct stb0899_state), GFP_KERNEL);
++ if (state == NULL)
++ goto error;
++
++ state->verbose = &verbose;
++ state->config = config;
++ state->i2c = i2c;
++ state->frontend.ops = stb0899_ops;
++ state->frontend.demodulator_priv = state;
++ /* use configured inversion as default -- we'll later autodetect inversion */
++ state->internal.inversion = config->inversion;
++
++ stb0899_wakeup(&state->frontend);
++ if (stb0899_get_dev_id(state) == -ENODEV) {
++ printk("%s: Exiting .. !\n", __func__);
++ goto error;
++ }
++
++ printk("%s: Attaching STB0899 \n", __func__);
++ return &state->frontend;
++
++error:
++ kfree(state);
++ return NULL;
++}
++EXPORT_SYMBOL(stb0899_attach);
++MODULE_PARM_DESC(verbose, "Set Verbosity level");
++MODULE_AUTHOR("Manu Abraham");
++MODULE_DESCRIPTION("STB0899 Multi-Std frontend");
++MODULE_LICENSE("GPL");
+diff -Nur linux-3.14.36/drivers/media/pci/cx23885/cimax2.c linux-openelec/drivers/media/pci/cx23885/cimax2.c
+--- linux-3.14.36/drivers/media/pci/cx23885/cimax2.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/media/pci/cx23885/cimax2.c 2015-07-24 18:03:30.120842002 -0500
+@@ -426,7 +426,7 @@
+ return state->status;
+ }
+
+-int netup_ci_init(struct cx23885_tsport *port)
++int netup_ci_init(struct cx23885_tsport *port, bool isDVBSky)
+ {
+ struct netup_ci_state *state;
+ u8 cimax_init[34] = {
+@@ -475,6 +475,11 @@
+ goto err;
+ }
+
++ if(isDVBSky) {
++ cimax_init[32] = 0x22;
++ cimax_init[33] = 0x00;
++ }
++
+ port->port_priv = state;
+
+ switch (port->nr) {
+@@ -548,3 +553,19 @@
+ dvb_ca_en50221_release(&state->ca);
+ kfree(state);
+ }
++
++/* CI irq handler for DVBSky board*/
++int dvbsky_ci_slot_status(struct cx23885_dev *dev)
++{
++ struct cx23885_tsport *port = NULL;
++ struct netup_ci_state *state = NULL;
++
++ ci_dbg_print("%s:\n", __func__);
++
++ port = &dev->ts1;
++ state = port->port_priv;
++ schedule_work(&state->work);
++ ci_dbg_print("%s: Wakeup CI0\n", __func__);
++
++ return 1;
++}
+diff -Nur linux-3.14.36/drivers/media/pci/cx23885/cimax2.h linux-openelec/drivers/media/pci/cx23885/cimax2.h
+--- linux-3.14.36/drivers/media/pci/cx23885/cimax2.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/media/pci/cx23885/cimax2.h 2015-07-24 18:03:30.120842002 -0500
+@@ -41,7 +41,9 @@
+ extern int netup_ci_slot_status(struct cx23885_dev *dev, u32 pci_status);
+ extern int netup_poll_ci_slot_status(struct dvb_ca_en50221 *en50221,
+ int slot, int open);
+-extern int netup_ci_init(struct cx23885_tsport *port);
++extern int netup_ci_init(struct cx23885_tsport *port, bool isDVBSky);
+ extern void netup_ci_exit(struct cx23885_tsport *port);
+
++extern int dvbsky_ci_slot_status(struct cx23885_dev *dev);
++
+ #endif
+diff -Nur linux-3.14.36/drivers/media/pci/cx23885/cx23885-cards.c linux-openelec/drivers/media/pci/cx23885/cx23885-cards.c
+--- linux-3.14.36/drivers/media/pci/cx23885/cx23885-cards.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/media/pci/cx23885/cx23885-cards.c 2015-07-24 18:03:30.124842002 -0500
+@@ -613,6 +613,34 @@
+ .name = "TeVii S471",
+ .portb = CX23885_MPEG_DVB,
+ },
++ [CX23885_BOARD_BST_PS8512] = {
++ .name = "Bestunar PS8512",
++ .portb = CX23885_MPEG_DVB,
++ },
++ [CX23885_BOARD_DVBSKY_S950] = {
++ .name = "DVBSKY S950",
++ .portb = CX23885_MPEG_DVB,
++ },
++ [CX23885_BOARD_DVBSKY_S952] = {
++ .name = "DVBSKY S952",
++ .portb = CX23885_MPEG_DVB,
++ .portc = CX23885_MPEG_DVB,
++ },
++ [CX23885_BOARD_DVBSKY_S950_CI] = {
++ .ci_type = 3,
++ .name = "DVBSKY S950CI DVB-S2 CI",
++ .portb = CX23885_MPEG_DVB,
++ },
++ [CX23885_BOARD_DVBSKY_C2800E_CI] = {
++ .ci_type = 3,
++ .name = "DVBSKY C2800E DVB-C CI",
++ .portb = CX23885_MPEG_DVB,
++ },
++ [CX23885_BOARD_DVBSKY_T9580] = {
++ .name = "DVBSKY T9580",
++ .portb = CX23885_MPEG_DVB,
++ .portc = CX23885_MPEG_DVB,
++ },
+ [CX23885_BOARD_PROF_8000] = {
+ .name = "Prof Revolution DVB-S2 8000",
+ .portb = CX23885_MPEG_DVB,
+@@ -874,6 +902,30 @@
+ .subdevice = 0x9022,
+ .card = CX23885_BOARD_TEVII_S471,
+ }, {
++ .subvendor = 0x14f1,
++ .subdevice = 0x8512,
++ .card = CX23885_BOARD_BST_PS8512,
++ }, {
++ .subvendor = 0x4254,
++ .subdevice = 0x0950,
++ .card = CX23885_BOARD_DVBSKY_S950,
++ }, {
++ .subvendor = 0x4254,
++ .subdevice = 0x0952,
++ .card = CX23885_BOARD_DVBSKY_S952,
++ }, {
++ .subvendor = 0x4254,
++ .subdevice = 0x950C,
++ .card = CX23885_BOARD_DVBSKY_S950_CI,
++ }, {
++ .subvendor = 0x4254,
++ .subdevice = 0x2800,
++ .card = CX23885_BOARD_DVBSKY_C2800E_CI,
++ }, {
++ .subvendor = 0x4254,
++ .subdevice = 0x9580,
++ .card = CX23885_BOARD_DVBSKY_T9580,
++ }, {
+ .subvendor = 0x8000,
+ .subdevice = 0x3034,
+ .card = CX23885_BOARD_PROF_8000,
+@@ -1483,9 +1535,84 @@
+ cx_set(GP0_IO, 0x00040004);
+ mdelay(60);
+ break;
++ case CX23885_BOARD_DVBSKY_S950:
++ case CX23885_BOARD_BST_PS8512:
++ cx23885_gpio_enable(dev, GPIO_2, 1);
++ cx23885_gpio_clear(dev, GPIO_2);
++ msleep(100);
++ cx23885_gpio_set(dev, GPIO_2);
++ break;
++ case CX23885_BOARD_DVBSKY_S952:
++ case CX23885_BOARD_DVBSKY_T9580:
++ cx_write(MC417_CTL, 0x00000037);/* enable GPIO3-18 pins */
++
++ cx23885_gpio_enable(dev, GPIO_2, 1);
++ cx23885_gpio_enable(dev, GPIO_11, 1);
++
++ cx23885_gpio_clear(dev, GPIO_2);
++ cx23885_gpio_clear(dev, GPIO_11);
++ msleep(100);
++ cx23885_gpio_set(dev, GPIO_2);
++ cx23885_gpio_set(dev, GPIO_11);
++ break;
++ case CX23885_BOARD_DVBSKY_S950_CI:
++ case CX23885_BOARD_DVBSKY_C2800E_CI:
++ /* GPIO-0 INTA from CiMax, input
++ GPIO-1 reset CiMax, output, high active
++ GPIO-2 reset demod, output, low active
++ GPIO-3 to GPIO-10 data/addr for CAM
++ GPIO-11 ~CS0 to CiMax1
++ GPIO-12 ~CS1 to CiMax2
++ GPIO-13 ADL0 load LSB addr
++ GPIO-14 ADL1 load MSB addr
++ GPIO-15 ~RDY from CiMax
++ GPIO-17 ~RD to CiMax
++ GPIO-18 ~WR to CiMax
++ */
++ cx_set(GP0_IO, 0x00060002); /* GPIO 1/2 as output */
++ cx_clear(GP0_IO, 0x00010004); /*GPIO 0 as input*/
++ mdelay(100);/* reset delay */
++ cx_set(GP0_IO, 0x00060004); /* GPIO as out, reset high */
++ cx_clear(GP0_IO, 0x00010002);
++ cx_write(MC417_CTL, 0x00000037);/* enable GPIO3-18 pins */
++ /* GPIO-15 IN as ~ACK, rest as OUT */
++ cx_write(MC417_OEN, 0x00001000);
++ /* ~RD, ~WR high; ADL0, ADL1 low; ~CS0, ~CS1 high */
++ cx_write(MC417_RWD, 0x0000c300);
++ /* enable irq */
++ cx_write(GPIO_ISM, 0x00000000);/* INTERRUPTS active low*/
++ break;
+ }
+ }
+
++static int cx23885_ir_patch(struct i2c_adapter *i2c, u8 reg, u8 mask)
++{
++ struct i2c_msg msgs[2];
++ u8 tx_buf[2], rx_buf[1];
++ /* Write register address */
++ tx_buf[0] = reg;
++ msgs[0].addr = 0x4c;
++ msgs[0].flags = 0;
++ msgs[0].len = 1;
++ msgs[0].buf = (char *) tx_buf;
++ /* Read data from register */
++ msgs[1].addr = 0x4c;
++ msgs[1].flags = I2C_M_RD;
++ msgs[1].len = 1;
++ msgs[1].buf = (char *) rx_buf;
++
++ i2c_transfer(i2c, msgs, 2);
++
++ tx_buf[0] = reg;
++ tx_buf[1] = rx_buf[0] | mask;
++ msgs[0].addr = 0x4c;
++ msgs[0].flags = 0;
++ msgs[0].len = 2;
++ msgs[0].buf = (char *) tx_buf;
++
++ return i2c_transfer(i2c, msgs, 1);
++}
++
+ int cx23885_ir_init(struct cx23885_dev *dev)
+ {
+ static struct v4l2_subdev_io_pin_config ir_rxtx_pin_cfg[] = {
+@@ -1573,6 +1700,23 @@
+ v4l2_subdev_call(dev->sd_cx25840, core, s_io_pin_config,
+ ir_rx_pin_cfg_count, ir_rx_pin_cfg);
+ break;
++ case CX23885_BOARD_BST_PS8512:
++ case CX23885_BOARD_DVBSKY_S950:
++ case CX23885_BOARD_DVBSKY_S952:
++ case CX23885_BOARD_DVBSKY_S950_CI:
++ case CX23885_BOARD_DVBSKY_C2800E_CI:
++ case CX23885_BOARD_DVBSKY_T9580:
++ dev->sd_ir = cx23885_find_hw(dev, CX23885_HW_AV_CORE);
++ if (dev->sd_ir == NULL) {
++ ret = -ENODEV;
++ break;
++ }
++ v4l2_subdev_call(dev->sd_cx25840, core, s_io_pin_config,
++ ir_rx_pin_cfg_count, ir_rx_pin_cfg);
++
++ cx23885_ir_patch(&(dev->i2c_bus[2].i2c_adap),0x1f,0x80);
++ cx23885_ir_patch(&(dev->i2c_bus[2].i2c_adap),0x23,0x80);
++ break;
+ case CX23885_BOARD_HAUPPAUGE_HVR1250:
+ if (!enable_885_ir)
+ break;
+@@ -1602,6 +1746,12 @@
+ cx23888_ir_remove(dev);
+ dev->sd_ir = NULL;
+ break;
++ case CX23885_BOARD_BST_PS8512:
++ case CX23885_BOARD_DVBSKY_S950:
++ case CX23885_BOARD_DVBSKY_S952:
++ case CX23885_BOARD_DVBSKY_S950_CI:
++ case CX23885_BOARD_DVBSKY_C2800E_CI:
++ case CX23885_BOARD_DVBSKY_T9580:
+ case CX23885_BOARD_TERRATEC_CINERGY_T_PCIE_DUAL:
+ case CX23885_BOARD_TEVII_S470:
+ case CX23885_BOARD_HAUPPAUGE_HVR1250:
+@@ -1649,6 +1799,12 @@
+ if (dev->sd_ir)
+ cx23885_irq_add_enable(dev, PCI_MSK_IR);
+ break;
++ case CX23885_BOARD_BST_PS8512:
++ case CX23885_BOARD_DVBSKY_S950:
++ case CX23885_BOARD_DVBSKY_S952:
++ case CX23885_BOARD_DVBSKY_S950_CI:
++ case CX23885_BOARD_DVBSKY_C2800E_CI:
++ case CX23885_BOARD_DVBSKY_T9580:
+ case CX23885_BOARD_TERRATEC_CINERGY_T_PCIE_DUAL:
+ case CX23885_BOARD_TEVII_S470:
+ case CX23885_BOARD_HAUPPAUGE_HVR1250:
+@@ -1752,6 +1908,10 @@
+ ts1->ts_clk_en_val = 0x1; /* Enable TS_CLK */
+ ts1->src_sel_val = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO;
+ break;
++ case CX23885_BOARD_BST_PS8512:
++ case CX23885_BOARD_DVBSKY_S950:
++ case CX23885_BOARD_DVBSKY_S950_CI:
++ case CX23885_BOARD_DVBSKY_C2800E_CI:
+ case CX23885_BOARD_TEVII_S470:
+ case CX23885_BOARD_TEVII_S471:
+ case CX23885_BOARD_DVBWORLD_2005:
+@@ -1800,6 +1960,22 @@
+ ts1->ts_clk_en_val = 0x1; /* Enable TS_CLK */
+ ts1->src_sel_val = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO;
+ break;
++ case CX23885_BOARD_DVBSKY_S952:
++ ts1->gen_ctrl_val = 0x5; /* Parallel */
++ ts1->ts_clk_en_val = 0x1; /* Enable TS_CLK */
++ ts1->src_sel_val = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO;
++ ts2->gen_ctrl_val = 0xe; /* Serial bus + punctured clock */
++ ts2->ts_clk_en_val = 0x1; /* Enable TS_CLK */
++ ts2->src_sel_val = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO;
++ break;
++ case CX23885_BOARD_DVBSKY_T9580:
++ ts1->gen_ctrl_val = 0x5; /* Parallel */
++ ts1->ts_clk_en_val = 0x1; /* Enable TS_CLK */
++ ts1->src_sel_val = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO;
++ ts2->gen_ctrl_val = 0x8; /* Serial bus */
++ ts2->ts_clk_en_val = 0x1; /* Enable TS_CLK */
++ ts2->src_sel_val = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO;
++ break;
+ case CX23885_BOARD_HAUPPAUGE_HVR1250:
+ case CX23885_BOARD_HAUPPAUGE_HVR1500:
+ case CX23885_BOARD_HAUPPAUGE_HVR1500Q:
+@@ -1857,6 +2033,12 @@
+ case CX23885_BOARD_MPX885:
+ case CX23885_BOARD_MYGICA_X8507:
+ case CX23885_BOARD_TERRATEC_CINERGY_T_PCIE_DUAL:
++ case CX23885_BOARD_BST_PS8512:
++ case CX23885_BOARD_DVBSKY_S950:
++ case CX23885_BOARD_DVBSKY_S952:
++ case CX23885_BOARD_DVBSKY_S950_CI:
++ case CX23885_BOARD_DVBSKY_C2800E_CI:
++ case CX23885_BOARD_DVBSKY_T9580:
+ case CX23885_BOARD_AVERMEDIA_HC81R:
+ case CX23885_BOARD_TBS_6980:
+ case CX23885_BOARD_TBS_6981:
+diff -Nur linux-3.14.36/drivers/media/pci/cx23885/cx23885-core.c linux-openelec/drivers/media/pci/cx23885/cx23885-core.c
+--- linux-3.14.36/drivers/media/pci/cx23885/cx23885-core.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/media/pci/cx23885/cx23885-core.c 2015-07-24 18:03:30.124842002 -0500
+@@ -1909,6 +1909,10 @@
+ (pci_status & PCI_MSK_GPIO0))
+ handled += altera_ci_irq(dev);
+
++ if (cx23885_boards[dev->board].ci_type == 3 &&
++ (pci_status & PCI_MSK_GPIO0))
++ handled += dvbsky_ci_slot_status(dev);
++
+ if (ts1_status) {
+ if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB)
+ handled += cx23885_irq_ts(ts1, ts1_status);
+@@ -2141,6 +2145,8 @@
+ cx23885_irq_add_enable(dev, PCI_MSK_GPIO1 | PCI_MSK_GPIO0);
+ break;
+ case CX23885_BOARD_NETUP_DUAL_DVB_T_C_CI_RF:
++ case CX23885_BOARD_DVBSKY_S950_CI:
++ case CX23885_BOARD_DVBSKY_C2800E_CI:
+ cx23885_irq_add_enable(dev, PCI_MSK_GPIO0);
+ break;
+ }
+diff -Nur linux-3.14.36/drivers/media/pci/cx23885/cx23885-dvb.c linux-openelec/drivers/media/pci/cx23885/cx23885-dvb.c
+--- linux-3.14.36/drivers/media/pci/cx23885/cx23885-dvb.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/media/pci/cx23885/cx23885-dvb.c 2015-07-24 18:03:30.128842002 -0500
+@@ -52,6 +52,8 @@
+ #include "lnbh24.h"
+ #include "cx24116.h"
+ #include "cx24117.h"
++#include "dvbsky_m88ds3103.h"
++#include "dvbsky_m88dc2800.h"
+ #include "cimax2.h"
+ #include "lgs8gxx.h"
+ #include "netup-eeprom.h"
+@@ -507,6 +509,93 @@
+ .if_khz = 4000,
+ };
+
++/* bst control */
++int bst_set_voltage(struct dvb_frontend *fe, fe_sec_voltage_t voltage)
++{
++ struct cx23885_tsport *port = fe->dvb->priv;
++ struct cx23885_dev *dev = port->dev;
++
++ cx23885_gpio_enable(dev, GPIO_1, 1);
++ cx23885_gpio_enable(dev, GPIO_0, 1);
++
++ switch (voltage) {
++ case SEC_VOLTAGE_13:
++ cx23885_gpio_set(dev, GPIO_1);
++ cx23885_gpio_clear(dev, GPIO_0);
++ break;
++ case SEC_VOLTAGE_18:
++ cx23885_gpio_set(dev, GPIO_1);
++ cx23885_gpio_set(dev, GPIO_0);
++ break;
++ case SEC_VOLTAGE_OFF:
++ cx23885_gpio_clear(dev, GPIO_1);
++ cx23885_gpio_clear(dev, GPIO_0);
++ break;
++ }
++ return 0;
++}
++
++int dvbsky_set_voltage_sec(struct dvb_frontend *fe, fe_sec_voltage_t voltage)
++{
++ struct cx23885_tsport *port = fe->dvb->priv;
++ struct cx23885_dev *dev = port->dev;
++
++ cx23885_gpio_enable(dev, GPIO_12, 1);
++ cx23885_gpio_enable(dev, GPIO_13, 1);
++
++ switch (voltage) {
++ case SEC_VOLTAGE_13:
++ cx23885_gpio_set(dev, GPIO_13);
++ cx23885_gpio_clear(dev, GPIO_12);
++ break;
++ case SEC_VOLTAGE_18:
++ cx23885_gpio_set(dev, GPIO_13);
++ cx23885_gpio_set(dev, GPIO_12);
++ break;
++ case SEC_VOLTAGE_OFF:
++ cx23885_gpio_clear(dev, GPIO_13);
++ cx23885_gpio_clear(dev, GPIO_12);
++ break;
++ }
++ return 0;
++}
++
++/* bestunar single dvb-s2 */
++static struct dvbsky_m88ds3103_config bst_ds3103_config = {
++ .demod_address = 0x68,
++ .ci_mode = 0,
++ .pin_ctrl = 0x82,
++ .ts_mode = 0,
++ .set_voltage = bst_set_voltage,
++};
++/* DVBSKY dual dvb-s2 */
++static struct dvbsky_m88ds3103_config dvbsky_ds3103_config_pri = {
++ .demod_address = 0x68,
++ .ci_mode = 0,
++ .pin_ctrl = 0x82,
++ .ts_mode = 0,
++ .set_voltage = bst_set_voltage,
++};
++static struct dvbsky_m88ds3103_config dvbsky_ds3103_config_sec = {
++ .demod_address = 0x68,
++ .ci_mode = 0,
++ .pin_ctrl = 0x82,
++ .ts_mode = 1,
++ .set_voltage = dvbsky_set_voltage_sec,
++};
++
++static struct dvbsky_m88ds3103_config dvbsky_ds3103_ci_config = {
++ .demod_address = 0x68,
++ .ci_mode = 2,
++ .pin_ctrl = 0x82,
++ .ts_mode = 0,
++};
++
++static struct dvbsky_m88dc2800_config dvbsky_dc2800_config = {
++ .demod_address = 0x1c,
++ .ts_mode = 3,
++};
++
+ static struct stv090x_config prof_8000_stv090x_config = {
+ .device = STV0903,
+ .demod_mode = STV090x_SINGLE,
+@@ -1311,6 +1400,57 @@
+ &tevii_ts2020_config, &i2c_bus->i2c_adap);
+ }
+ break;
++ case CX23885_BOARD_BST_PS8512:
++ case CX23885_BOARD_DVBSKY_S950:
++ i2c_bus = &dev->i2c_bus[1];
++ fe0->dvb.frontend = dvb_attach(dvbsky_m88ds3103_attach,
++ &bst_ds3103_config,
++ &i2c_bus->i2c_adap);
++ break;
++ case CX23885_BOARD_DVBSKY_S952:
++ switch (port->nr) {
++ /* port B */
++ case 1:
++ i2c_bus = &dev->i2c_bus[1];
++ fe0->dvb.frontend = dvb_attach(dvbsky_m88ds3103_attach,
++ &dvbsky_ds3103_config_pri,
++ &i2c_bus->i2c_adap);
++ break;
++ /* port C */
++ case 2:
++ i2c_bus = &dev->i2c_bus[0];
++ fe0->dvb.frontend = dvb_attach(dvbsky_m88ds3103_attach,
++ &dvbsky_ds3103_config_sec,
++ &i2c_bus->i2c_adap);
++ break;
++ }
++ break;
++ case CX23885_BOARD_DVBSKY_S950_CI:
++ i2c_bus = &dev->i2c_bus[1];
++ fe0->dvb.frontend = dvb_attach(dvbsky_m88ds3103_attach,
++ &dvbsky_ds3103_ci_config,
++ &i2c_bus->i2c_adap);
++ break;
++ case CX23885_BOARD_DVBSKY_C2800E_CI:
++ i2c_bus = &dev->i2c_bus[1];
++ fe0->dvb.frontend = dvb_attach(dvbsky_m88dc2800_attach,
++ &dvbsky_dc2800_config,
++ &i2c_bus->i2c_adap);
++ break;
++ case CX23885_BOARD_DVBSKY_T9580:
++ switch (port->nr) {
++ /* port B */
++ case 1:
++ i2c_bus = &dev->i2c_bus[1];
++ fe0->dvb.frontend = dvb_attach(dvbsky_m88ds3103_attach,
++ &dvbsky_ds3103_config_pri,
++ &i2c_bus->i2c_adap);
++ break;
++ /* port C */
++ case 2:
++ break;
++ }
++ break;
+ case CX23885_BOARD_PROF_8000:
+ i2c_bus = &dev->i2c_bus[0];
+
+@@ -1386,7 +1526,7 @@
+ printk(KERN_INFO "NetUP Dual DVB-S2 CI card port%d MAC=%pM\n",
+ port->nr, port->frontends.adapter.proposed_mac);
+
+- netup_ci_init(port);
++ netup_ci_init(port, false);
+ break;
+ }
+ case CX23885_BOARD_NETUP_DUAL_DVB_T_C_CI_RF: {
+@@ -1413,6 +1553,41 @@
+ memcpy(port->frontends.adapter.proposed_mac, eeprom + 0xa0, 6);
+ break;
+ }
++ case CX23885_BOARD_BST_PS8512:
++ case CX23885_BOARD_DVBSKY_S950:
++ case CX23885_BOARD_DVBSKY_S952:
++ case CX23885_BOARD_DVBSKY_T9580:{
++ u8 eeprom[256]; /* 24C02 i2c eeprom */
++
++ if(port->nr > 2)
++ break;
++
++ dev->i2c_bus[0].i2c_client.addr = 0xa0 >> 1;
++ tveeprom_read(&dev->i2c_bus[0].i2c_client, eeprom, sizeof(eeprom));
++ printk(KERN_INFO "DVBSKY PCIe MAC= %pM\n", eeprom + 0xc0+(port->nr-1)*8);
++ memcpy(port->frontends.adapter.proposed_mac, eeprom + 0xc0 +
++ (port->nr-1)*8, 6);
++ break;
++ }
++ case CX23885_BOARD_DVBSKY_S950_CI: {
++ u8 eeprom[256]; /* 24C02 i2c eeprom */
++
++ if(port->nr > 2)
++ break;
++
++ dev->i2c_bus[0].i2c_client.addr = 0xa0 >> 1;
++ tveeprom_read(&dev->i2c_bus[0].i2c_client, eeprom, sizeof(eeprom));
++ printk(KERN_INFO "DVBSKY PCIe MAC= %pM\n", eeprom + 0xc0+(port->nr-1)*8);
++ memcpy(port->frontends.adapter.proposed_mac, eeprom + 0xc0 +
++ (port->nr-1)*8, 6);
++
++ netup_ci_init(port, true);
++ break;
++ }
++ case CX23885_BOARD_DVBSKY_C2800E_CI: {
++ netup_ci_init(port, true);
++ break;
++ }
+ }
+
+ return ret;
+@@ -1495,6 +1670,8 @@
+
+ switch (port->dev->board) {
+ case CX23885_BOARD_NETUP_DUAL_DVBS2_CI:
++ case CX23885_BOARD_DVBSKY_S950_CI:
++ case CX23885_BOARD_DVBSKY_C2800E_CI:
+ netup_ci_exit(port);
+ break;
+ case CX23885_BOARD_NETUP_DUAL_DVB_T_C_CI_RF:
+diff -Nur linux-3.14.36/drivers/media/pci/cx23885/cx23885.h linux-openelec/drivers/media/pci/cx23885/cx23885.h
+--- linux-3.14.36/drivers/media/pci/cx23885/cx23885.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/media/pci/cx23885/cx23885.h 2015-07-24 18:03:30.128842002 -0500
+@@ -97,6 +97,14 @@
+ #define CX23885_BOARD_TBS_6980 41
+ #define CX23885_BOARD_LEADTEK_WINFAST_PXPVR2200 42
+
++#define CX23885_BOARD_BASE_INDEX 43
++#define CX23885_BOARD_BST_PS8512 (CX23885_BOARD_BASE_INDEX)
++#define CX23885_BOARD_DVBSKY_S952 (CX23885_BOARD_BASE_INDEX+1)
++#define CX23885_BOARD_DVBSKY_S950 (CX23885_BOARD_BASE_INDEX+2)
++#define CX23885_BOARD_DVBSKY_S950_CI (CX23885_BOARD_BASE_INDEX+3)
++#define CX23885_BOARD_DVBSKY_C2800E_CI (CX23885_BOARD_BASE_INDEX+4)
++#define CX23885_BOARD_DVBSKY_T9580 (CX23885_BOARD_BASE_INDEX+5)
++
+ #define GPIO_0 0x00000001
+ #define GPIO_1 0x00000002
+ #define GPIO_2 0x00000004
+@@ -234,7 +242,7 @@
+ */
+ u32 clk_freq;
+ struct cx23885_input input[MAX_CX23885_INPUT];
+- int ci_type; /* for NetUP */
++ int ci_type; /* 1 and 2 for NetUP, 3 for DVBSky. */
+ /* Force bottom field first during DMA (888 workaround) */
+ u32 force_bff;
+ };
+diff -Nur linux-3.14.36/drivers/media/pci/cx23885/cx23885-input.c linux-openelec/drivers/media/pci/cx23885/cx23885-input.c
+--- linux-3.14.36/drivers/media/pci/cx23885/cx23885-input.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/media/pci/cx23885/cx23885-input.c 2015-07-24 18:03:30.128842002 -0500
+@@ -89,6 +89,12 @@
+ case CX23885_BOARD_TERRATEC_CINERGY_T_PCIE_DUAL:
+ case CX23885_BOARD_TEVII_S470:
+ case CX23885_BOARD_HAUPPAUGE_HVR1250:
++ case CX23885_BOARD_BST_PS8512:
++ case CX23885_BOARD_DVBSKY_S950:
++ case CX23885_BOARD_DVBSKY_S952:
++ case CX23885_BOARD_DVBSKY_S950_CI:
++ case CX23885_BOARD_DVBSKY_C2800E_CI:
++ case CX23885_BOARD_DVBSKY_T9580:
+ case CX23885_BOARD_MYGICA_X8507:
+ case CX23885_BOARD_TBS_6980:
+ case CX23885_BOARD_TBS_6981:
+@@ -143,6 +149,12 @@
+ case CX23885_BOARD_HAUPPAUGE_HVR1850:
+ case CX23885_BOARD_HAUPPAUGE_HVR1290:
+ case CX23885_BOARD_HAUPPAUGE_HVR1250:
++ case CX23885_BOARD_BST_PS8512:
++ case CX23885_BOARD_DVBSKY_S950:
++ case CX23885_BOARD_DVBSKY_S952:
++ case CX23885_BOARD_DVBSKY_S950_CI:
++ case CX23885_BOARD_DVBSKY_C2800E_CI:
++ case CX23885_BOARD_DVBSKY_T9580:
+ case CX23885_BOARD_MYGICA_X8507:
+ /*
+ * The IR controller on this board only returns pulse widths.
+@@ -295,6 +307,18 @@
+ /* A guess at the remote */
+ rc_map = RC_MAP_TEVII_NEC;
+ break;
++ case CX23885_BOARD_BST_PS8512:
++ case CX23885_BOARD_DVBSKY_S950:
++ case CX23885_BOARD_DVBSKY_S952:
++ case CX23885_BOARD_DVBSKY_S950_CI:
++ case CX23885_BOARD_DVBSKY_C2800E_CI:
++ case CX23885_BOARD_DVBSKY_T9580:
++ /* Integrated CX2388[58] IR controller */
++ driver_type = RC_DRIVER_IR_RAW;
++ allowed_protos = RC_BIT_ALL;
++ /* A guess at the remote */
++ rc_map = RC_MAP_DVBSKY;
++ break;
+ case CX23885_BOARD_MYGICA_X8507:
+ /* Integrated CX23885 IR controller */
+ driver_type = RC_DRIVER_IR_RAW;
+diff -Nur linux-3.14.36/drivers/media/pci/cx23885/Kconfig linux-openelec/drivers/media/pci/cx23885/Kconfig
+--- linux-3.14.36/drivers/media/pci/cx23885/Kconfig 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/media/pci/cx23885/Kconfig 2015-07-24 18:03:30.120842002 -0500
+@@ -23,6 +23,8 @@
+ select DVB_STB6100 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_STV6110 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_CX24116 if MEDIA_SUBDRV_AUTOSELECT
++ select DVB_DVBSKY_M88DS3103 if MEDIA_SUBDRV_AUTOSELECT
++ select DVB_DVBSKY_M88DC2800 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_CX24117 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_STV0900 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_DS3000 if MEDIA_SUBDRV_AUTOSELECT
+diff -Nur linux-3.14.36/drivers/media/pci/cx88/cx88-cards.c linux-openelec/drivers/media/pci/cx88/cx88-cards.c
+--- linux-3.14.36/drivers/media/pci/cx88/cx88-cards.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/media/pci/cx88/cx88-cards.c 2015-07-24 18:03:30.128842002 -0500
+@@ -2314,6 +2314,18 @@
+ } },
+ .mpeg = CX88_MPEG_DVB,
+ },
++ [CX88_BOARD_BST_PS8312] = {
++ .name = "Bestunar PS8312 DVB-S/S2",
++ .tuner_type = UNSET,
++ .radio_type = UNSET,
++ .tuner_addr = ADDR_UNSET,
++ .radio_addr = ADDR_UNSET,
++ .input = { {
++ .type = CX88_VMUX_DVB,
++ .vmux = 0,
++ } },
++ .mpeg = CX88_MPEG_DVB,
++ },
+ };
+
+ /* ------------------------------------------------------------------ */
+@@ -2818,6 +2830,10 @@
+ .subvendor = 0x1822,
+ .subdevice = 0x0023,
+ .card = CX88_BOARD_TWINHAN_VP1027_DVBS,
++ }, {
++ .subvendor = 0x14f1,
++ .subdevice = 0x8312,
++ .card = CX88_BOARD_BST_PS8312,
+ },
+ };
+
+@@ -3551,6 +3567,12 @@
+ cx_write(MO_SRST_IO, 1);
+ msleep(100);
+ break;
++ case CX88_BOARD_BST_PS8312:
++ cx_write(MO_GP1_IO, 0x808000);
++ msleep(100);
++ cx_write(MO_GP1_IO, 0x808080);
++ msleep(100);
++ break;
+ } /*end switch() */
+
+
+diff -Nur linux-3.14.36/drivers/media/pci/cx88/cx88-dvb.c linux-openelec/drivers/media/pci/cx88/cx88-dvb.c
+--- linux-3.14.36/drivers/media/pci/cx88/cx88-dvb.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/media/pci/cx88/cx88-dvb.c 2015-07-24 18:03:30.128842002 -0500
+@@ -54,6 +54,7 @@
+ #include "stv0288.h"
+ #include "stb6000.h"
+ #include "cx24116.h"
++#include "dvbsky_m88ds3103.h"
+ #include "stv0900.h"
+ #include "stb6100.h"
+ #include "stb6100_proc.h"
+@@ -459,6 +460,56 @@
+ return core->prev_set_voltage(fe, voltage);
+ return 0;
+ }
++/*CX88_BOARD_BST_PS8312*/
++static int bst_dvbs_set_voltage(struct dvb_frontend *fe,
++ fe_sec_voltage_t voltage)
++{
++ struct cx8802_dev *dev= fe->dvb->priv;
++ struct cx88_core *core = dev->core;
++
++ cx_write(MO_GP1_IO, 0x111111);
++ switch (voltage) {
++ case SEC_VOLTAGE_13:
++ cx_write(MO_GP1_IO, 0x020200);
++ break;
++ case SEC_VOLTAGE_18:
++ cx_write(MO_GP1_IO, 0x020202);
++ break;
++ case SEC_VOLTAGE_OFF:
++ cx_write(MO_GP1_IO, 0x111100);
++ break;
++ }
++
++ if (core->prev_set_voltage)
++ return core->prev_set_voltage(fe, voltage);
++ return 0;
++}
++
++static int bst_dvbs_set_voltage_v2(struct dvb_frontend *fe,
++ fe_sec_voltage_t voltage)
++{
++ struct cx8802_dev *dev= fe->dvb->priv;
++ struct cx88_core *core = dev->core;
++
++ cx_write(MO_GP1_IO, 0x111101);
++ switch (voltage) {
++ case SEC_VOLTAGE_13:
++ cx_write(MO_GP1_IO, 0x020200);
++ break;
++ case SEC_VOLTAGE_18:
++
++ cx_write(MO_GP1_IO, 0x020202);
++ break;
++ case SEC_VOLTAGE_OFF:
++
++ cx_write(MO_GP1_IO, 0x111110);
++ break;
++ }
++
++ if (core->prev_set_voltage)
++ return core->prev_set_voltage(fe, voltage);
++ return 0;
++}
+
+ static int vp1027_set_voltage(struct dvb_frontend *fe,
+ fe_sec_voltage_t voltage)
+@@ -706,6 +757,11 @@
+ .clk_out_div = 1,
+ };
+
++static struct dvbsky_m88ds3103_config dvbsky_ds3103_config = {
++ .demod_address = 0x68,
++ .set_ts_params = ds3000_set_ts_param,
++};
++
+ static const struct stv0900_config prof_7301_stv0900_config = {
+ .demod_address = 0x6a,
+ /* demod_mode = 0,*/
+@@ -1487,6 +1543,35 @@
+ tevii_dvbs_set_voltage;
+ }
+ break;
++ case CX88_BOARD_BST_PS8312:
++ fe0->dvb.frontend = dvb_attach(dvbsky_m88ds3103_attach,
++ &dvbsky_ds3103_config,
++ &core->i2c_adap);
++ if (fe0->dvb.frontend != NULL){
++ int ret;
++ u8 b0[] = { 0x60 };
++ u8 b1[2] = { 0 };
++ struct i2c_msg msg[] = {
++ {
++ .addr = 0x50,
++ .flags = 0,
++ .buf = b0,
++ .len = 1
++ }, {
++ .addr = 0x50,
++ .flags = I2C_M_RD,
++ .buf = b1,
++ .len = 2
++ }
++ };
++ ret = i2c_transfer(&core->i2c_adap, msg, 2);
++ printk("PS8312: config = %02x, %02x", b1[0],b1[1]);
++ if(b1[0] == 0xaa)
++ fe0->dvb.frontend->ops.set_voltage = bst_dvbs_set_voltage_v2;
++ else
++ fe0->dvb.frontend->ops.set_voltage = bst_dvbs_set_voltage;
++ }
++ break;
+ case CX88_BOARD_OMICOM_SS4_PCI:
+ case CX88_BOARD_TBS_8920:
+ case CX88_BOARD_PROF_7300:
+diff -Nur linux-3.14.36/drivers/media/pci/cx88/cx88.h linux-openelec/drivers/media/pci/cx88/cx88.h
+--- linux-3.14.36/drivers/media/pci/cx88/cx88.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/media/pci/cx88/cx88.h 2015-07-24 18:03:30.128842002 -0500
+@@ -237,6 +237,7 @@
+ #define CX88_BOARD_WINFAST_DTV1800H_XC4000 88
+ #define CX88_BOARD_WINFAST_TV2000_XP_GLOBAL_6F36 89
+ #define CX88_BOARD_WINFAST_TV2000_XP_GLOBAL_6F43 90
++#define CX88_BOARD_BST_PS8312 91
+
+ enum cx88_itype {
+ CX88_VMUX_COMPOSITE1 = 1,
+diff -Nur linux-3.14.36/drivers/media/pci/cx88/cx88-input.c linux-openelec/drivers/media/pci/cx88/cx88-input.c
+--- linux-3.14.36/drivers/media/pci/cx88/cx88-input.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/media/pci/cx88/cx88-input.c 2015-07-24 18:03:30.128842002 -0500
+@@ -419,6 +419,10 @@
+ rc_type = RC_BIT_NEC;
+ ir->sampling = 0xff00; /* address */
+ break;
++ case CX88_BOARD_BST_PS8312:
++ ir_codes = RC_MAP_DVBSKY;
++ ir->sampling = 0xff00; /* address */
++ break;
+ }
+
+ if (!ir_codes) {
+diff -Nur linux-3.14.36/drivers/media/pci/cx88/Kconfig linux-openelec/drivers/media/pci/cx88/Kconfig
+--- linux-3.14.36/drivers/media/pci/cx88/Kconfig 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/media/pci/cx88/Kconfig 2015-07-24 18:03:30.128842002 -0500
+@@ -57,6 +57,7 @@
+ select DVB_ISL6421 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_S5H1411 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_CX24116 if MEDIA_SUBDRV_AUTOSELECT
++ select DVB_DVBSKY_M88DS3103 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_STV0299 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_STV0288 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_STB6000 if MEDIA_SUBDRV_AUTOSELECT
+diff -Nur linux-3.14.36/drivers/media/platform/Kconfig linux-openelec/drivers/media/platform/Kconfig
+--- linux-3.14.36/drivers/media/platform/Kconfig 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/media/platform/Kconfig 2015-05-06 12:05:42.000000000 -0500
+@@ -115,6 +115,21 @@
+ To compile this driver as a module, choose M here: the module
+ will be called s3c-camif.
+
++config VIDEO_MXC_OUTPUT
++ tristate "MXC Video For Linux Video Output"
++ depends on VIDEO_DEV && ARCH_MXC && FB_MXC
++ select VIDEOBUF_DMA_CONTIG
++ ---help---
++ This is the video4linux2 output driver based on MXC module.
++
++config VIDEO_MXC_CAPTURE
++ tristate "MXC Video For Linux Video Capture"
++ depends on VIDEO_V4L2 && VIDEO_V4L2_INT_DEVICE
++ ---help---
++ This is the video4linux2 capture driver based on i.MX video-in module.
++
++source "drivers/media/platform/mxc/capture/Kconfig"
++source "drivers/media/platform/mxc/output/Kconfig"
+ source "drivers/media/platform/soc_camera/Kconfig"
+ source "drivers/media/platform/exynos4-is/Kconfig"
+ source "drivers/media/platform/s5p-tv/Kconfig"
+diff -Nur linux-3.14.36/drivers/media/platform/Makefile linux-openelec/drivers/media/platform/Makefile
+--- linux-3.14.36/drivers/media/platform/Makefile 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/media/platform/Makefile 2015-05-06 12:05:42.000000000 -0500
+@@ -51,4 +51,7 @@
+
+ obj-$(CONFIG_ARCH_OMAP) += omap/
+
++obj-$(CONFIG_VIDEO_MXC_CAPTURE) += mxc/capture/
++obj-$(CONFIG_VIDEO_MXC_OUTPUT) += mxc/output/
++
+ ccflags-y += -I$(srctree)/drivers/media/i2c
+diff -Nur linux-3.14.36/drivers/media/platform/mxc/capture/adv7180.c linux-openelec/drivers/media/platform/mxc/capture/adv7180.c
+--- linux-3.14.36/drivers/media/platform/mxc/capture/adv7180.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/media/platform/mxc/capture/adv7180.c 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,1344 @@
++/*
++ * Copyright 2005-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/*!
++ * @file adv7180.c
++ *
++ * @brief Analog Device ADV7180 video decoder functions
++ *
++ * @ingroup Camera
++ */
++
++#include <linux/clk.h>
++#include <linux/delay.h>
++#include <linux/device.h>
++#include <linux/i2c.h>
++#include <linux/init.h>
++#include <linux/module.h>
++#include <linux/of_device.h>
++#include <linux/of_gpio.h>
++#include <linux/pinctrl/consumer.h>
++#include <linux/regulator/consumer.h>
++#include <media/v4l2-chip-ident.h>
++#include <media/v4l2-int-device.h>
++#include "mxc_v4l2_capture.h"
++
++#define ADV7180_VOLTAGE_ANALOG 1800000
++#define ADV7180_VOLTAGE_DIGITAL_CORE 1800000
++#define ADV7180_VOLTAGE_DIGITAL_IO 3300000
++#define ADV7180_VOLTAGE_PLL 1800000
++
++static struct regulator *dvddio_regulator;
++static struct regulator *dvdd_regulator;
++static struct regulator *avdd_regulator;
++static struct regulator *pvdd_regulator;
++static int pwn_gpio;
++
++static int adv7180_probe(struct i2c_client *adapter,
++ const struct i2c_device_id *id);
++static int adv7180_detach(struct i2c_client *client);
++
++static const struct i2c_device_id adv7180_id[] = {
++ {"adv7180", 0},
++ {},
++};
++
++MODULE_DEVICE_TABLE(i2c, adv7180_id);
++
++static struct i2c_driver adv7180_i2c_driver = {
++ .driver = {
++ .owner = THIS_MODULE,
++ .name = "adv7180",
++ },
++ .probe = adv7180_probe,
++ .remove = adv7180_detach,
++ .id_table = adv7180_id,
++};
++
++/*!
++ * Maintains the information on the current state of the sensor.
++ */
++struct sensor {
++ struct sensor_data sen;
++ v4l2_std_id std_id;
++} adv7180_data;
++
++
++/*! List of input video formats supported. The video formats is corresponding
++ * with v4l2 id in video_fmt_t
++ */
++typedef enum {
++ ADV7180_NTSC = 0, /*!< Locked on (M) NTSC video signal. */
++ ADV7180_PAL, /*!< (B, G, H, I, N)PAL video signal. */
++ ADV7180_NOT_LOCKED, /*!< Not locked on a signal. */
++} video_fmt_idx;
++
++/*! Number of video standards supported (including 'not locked' signal). */
++#define ADV7180_STD_MAX (ADV7180_PAL + 1)
++
++/*! Video format structure. */
++typedef struct {
++ int v4l2_id; /*!< Video for linux ID. */
++ char name[16]; /*!< Name (e.g., "NTSC", "PAL", etc.) */
++ u16 raw_width; /*!< Raw width. */
++ u16 raw_height; /*!< Raw height. */
++ u16 active_width; /*!< Active width. */
++ u16 active_height; /*!< Active height. */
++} video_fmt_t;
++
++/*! Description of video formats supported.
++ *
++ * PAL: raw=720x625, active=720x576.
++ * NTSC: raw=720x525, active=720x480.
++ */
++static video_fmt_t video_fmts[] = {
++ { /*! NTSC */
++ .v4l2_id = V4L2_STD_NTSC,
++ .name = "NTSC",
++ .raw_width = 720, /* SENS_FRM_WIDTH */
++ .raw_height = 525, /* SENS_FRM_HEIGHT */
++ .active_width = 720, /* ACT_FRM_WIDTH plus 1 */
++ .active_height = 480, /* ACT_FRM_WIDTH plus 1 */
++ },
++ { /*! (B, G, H, I, N) PAL */
++ .v4l2_id = V4L2_STD_PAL,
++ .name = "PAL",
++ .raw_width = 720,
++ .raw_height = 625,
++ .active_width = 720,
++ .active_height = 576,
++ },
++ { /*! Unlocked standard */
++ .v4l2_id = V4L2_STD_ALL,
++ .name = "Autodetect",
++ .raw_width = 720,
++ .raw_height = 625,
++ .active_width = 720,
++ .active_height = 576,
++ },
++};
++
++/*!* Standard index of ADV7180. */
++static video_fmt_idx video_idx = ADV7180_PAL;
++
++/*! @brief This mutex is used to provide mutual exclusion.
++ *
++ * Create a mutex that can be used to provide mutually exclusive
++ * read/write access to the globally accessible data structures
++ * and variables that were defined above.
++ */
++static DEFINE_MUTEX(mutex);
++
++#define IF_NAME "adv7180"
++#define ADV7180_INPUT_CTL 0x00 /* Input Control */
++#define ADV7180_STATUS_1 0x10 /* Status #1 */
++#define ADV7180_BRIGHTNESS 0x0a /* Brightness */
++#define ADV7180_IDENT 0x11 /* IDENT */
++#define ADV7180_VSYNC_FIELD_CTL_1 0x31 /* VSYNC Field Control #1 */
++#define ADV7180_MANUAL_WIN_CTL 0x3d /* Manual Window Control */
++#define ADV7180_SD_SATURATION_CB 0xe3 /* SD Saturation Cb */
++#define ADV7180_SD_SATURATION_CR 0xe4 /* SD Saturation Cr */
++#define ADV7180_PWR_MNG 0x0f /* Power Management */
++
++/* supported controls */
++/* This hasn't been fully implemented yet.
++ * This is how it should work, though. */
++static struct v4l2_queryctrl adv7180_qctrl[] = {
++ {
++ .id = V4L2_CID_BRIGHTNESS,
++ .type = V4L2_CTRL_TYPE_INTEGER,
++ .name = "Brightness",
++ .minimum = 0, /* check this value */
++ .maximum = 255, /* check this value */
++ .step = 1, /* check this value */
++ .default_value = 127, /* check this value */
++ .flags = 0,
++ }, {
++ .id = V4L2_CID_SATURATION,
++ .type = V4L2_CTRL_TYPE_INTEGER,
++ .name = "Saturation",
++ .minimum = 0, /* check this value */
++ .maximum = 255, /* check this value */
++ .step = 0x1, /* check this value */
++ .default_value = 127, /* check this value */
++ .flags = 0,
++ }
++};
++
++static inline void adv7180_power_down(int enable)
++{
++ gpio_set_value_cansleep(pwn_gpio, !enable);
++ msleep(2);
++}
++
++static int adv7180_regulator_enable(struct device *dev)
++{
++ int ret = 0;
++
++ dvddio_regulator = devm_regulator_get(dev, "DOVDD");
++
++ if (!IS_ERR(dvddio_regulator)) {
++ regulator_set_voltage(dvddio_regulator,
++ ADV7180_VOLTAGE_DIGITAL_IO,
++ ADV7180_VOLTAGE_DIGITAL_IO);
++ ret = regulator_enable(dvddio_regulator);
++ if (ret) {
++ dev_err(dev, "set io voltage failed\n");
++ return ret;
++ } else {
++ dev_dbg(dev, "set io voltage ok\n");
++ }
++ } else {
++ dev_warn(dev, "cannot get io voltage\n");
++ }
++
++ dvdd_regulator = devm_regulator_get(dev, "DVDD");
++ if (!IS_ERR(dvdd_regulator)) {
++ regulator_set_voltage(dvdd_regulator,
++ ADV7180_VOLTAGE_DIGITAL_CORE,
++ ADV7180_VOLTAGE_DIGITAL_CORE);
++ ret = regulator_enable(dvdd_regulator);
++ if (ret) {
++ dev_err(dev, "set core voltage failed\n");
++ return ret;
++ } else {
++ dev_dbg(dev, "set core voltage ok\n");
++ }
++ } else {
++ dev_warn(dev, "cannot get core voltage\n");
++ }
++
++ avdd_regulator = devm_regulator_get(dev, "AVDD");
++ if (!IS_ERR(avdd_regulator)) {
++ regulator_set_voltage(avdd_regulator,
++ ADV7180_VOLTAGE_ANALOG,
++ ADV7180_VOLTAGE_ANALOG);
++ ret = regulator_enable(avdd_regulator);
++ if (ret) {
++ dev_err(dev, "set analog voltage failed\n");
++ return ret;
++ } else {
++ dev_dbg(dev, "set analog voltage ok\n");
++ }
++ } else {
++ dev_warn(dev, "cannot get analog voltage\n");
++ }
++
++ pvdd_regulator = devm_regulator_get(dev, "PVDD");
++ if (!IS_ERR(pvdd_regulator)) {
++ regulator_set_voltage(pvdd_regulator,
++ ADV7180_VOLTAGE_PLL,
++ ADV7180_VOLTAGE_PLL);
++ ret = regulator_enable(pvdd_regulator);
++ if (ret) {
++ dev_err(dev, "set pll voltage failed\n");
++ return ret;
++ } else {
++ dev_dbg(dev, "set pll voltage ok\n");
++ }
++ } else {
++ dev_warn(dev, "cannot get pll voltage\n");
++ }
++
++ return ret;
++}
++
++
++/***********************************************************************
++ * I2C transfert.
++ ***********************************************************************/
++
++/*! Read one register from a ADV7180 i2c slave device.
++ *
++ * @param *reg register in the device we wish to access.
++ *
++ * @return 0 if success, an error code otherwise.
++ */
++static inline int adv7180_read(u8 reg)
++{
++ int val;
++
++ val = i2c_smbus_read_byte_data(adv7180_data.sen.i2c_client, reg);
++ if (val < 0) {
++ dev_dbg(&adv7180_data.sen.i2c_client->dev,
++ "%s:read reg error: reg=%2x\n", __func__, reg);
++ return -1;
++ }
++ return val;
++}
++
++/*! Write one register of a ADV7180 i2c slave device.
++ *
++ * @param *reg register in the device we wish to access.
++ *
++ * @return 0 if success, an error code otherwise.
++ */
++static int adv7180_write_reg(u8 reg, u8 val)
++{
++ s32 ret;
++
++ ret = i2c_smbus_write_byte_data(adv7180_data.sen.i2c_client, reg, val);
++ if (ret < 0) {
++ dev_dbg(&adv7180_data.sen.i2c_client->dev,
++ "%s:write reg error:reg=%2x,val=%2x\n", __func__,
++ reg, val);
++ return -1;
++ }
++ return 0;
++}
++
++/***********************************************************************
++ * mxc_v4l2_capture interface.
++ ***********************************************************************/
++
++/*!
++ * Return attributes of current video standard.
++ * Since this device autodetects the current standard, this function also
++ * sets the values that need to be changed if the standard changes.
++ * There is no set std equivalent function.
++ *
++ * @return None.
++ */
++static void adv7180_get_std(v4l2_std_id *std)
++{
++ int tmp;
++ int idx;
++
++ dev_dbg(&adv7180_data.sen.i2c_client->dev, "In adv7180_get_std\n");
++
++ /* Read the AD_RESULT to get the detect output video standard */
++ tmp = adv7180_read(ADV7180_STATUS_1) & 0x70;
++
++ mutex_lock(&mutex);
++ if (tmp == 0x40) {
++ /* PAL */
++ *std = V4L2_STD_PAL;
++ idx = ADV7180_PAL;
++ } else if (tmp == 0) {
++ /*NTSC*/
++ *std = V4L2_STD_NTSC;
++ idx = ADV7180_NTSC;
++ } else {
++ *std = V4L2_STD_ALL;
++ idx = ADV7180_NOT_LOCKED;
++ dev_dbg(&adv7180_data.sen.i2c_client->dev,
++ "Got invalid video standard!\n");
++ }
++ mutex_unlock(&mutex);
++
++ /* This assumes autodetect which this device uses. */
++ if (*std != adv7180_data.std_id) {
++ video_idx = idx;
++ adv7180_data.std_id = *std;
++ adv7180_data.sen.pix.width = video_fmts[video_idx].raw_width;
++ adv7180_data.sen.pix.height = video_fmts[video_idx].raw_height;
++ }
++}
++
++/***********************************************************************
++ * IOCTL Functions from v4l2_int_ioctl_desc.
++ ***********************************************************************/
++
++/*!
++ * ioctl_g_ifparm - V4L2 sensor interface handler for vidioc_int_g_ifparm_num
++ * s: pointer to standard V4L2 device structure
++ * p: pointer to standard V4L2 vidioc_int_g_ifparm_num ioctl structure
++ *
++ * Gets slave interface parameters.
++ * Calculates the required xclk value to support the requested
++ * clock parameters in p. This value is returned in the p
++ * parameter.
++ *
++ * vidioc_int_g_ifparm returns platform-specific information about the
++ * interface settings used by the sensor.
++ *
++ * Called on open.
++ */
++static int ioctl_g_ifparm(struct v4l2_int_device *s, struct v4l2_ifparm *p)
++{
++ dev_dbg(&adv7180_data.sen.i2c_client->dev, "adv7180:ioctl_g_ifparm\n");
++
++ if (s == NULL) {
++ pr_err(" ERROR!! no slave device set!\n");
++ return -1;
++ }
++
++ /* Initialize structure to 0s then set any non-0 values. */
++ memset(p, 0, sizeof(*p));
++ p->if_type = V4L2_IF_TYPE_BT656; /* This is the only possibility. */
++ p->u.bt656.mode = V4L2_IF_TYPE_BT656_MODE_NOBT_8BIT;
++ p->u.bt656.nobt_hs_inv = 1;
++ p->u.bt656.bt_sync_correct = 1;
++
++ /* ADV7180 has a dedicated clock so no clock settings needed. */
++
++ return 0;
++}
++
++/*!
++ * Sets the camera power.
++ *
++ * s pointer to the camera device
++ * on if 1, power is to be turned on. 0 means power is to be turned off
++ *
++ * ioctl_s_power - V4L2 sensor interface handler for vidioc_int_s_power_num
++ * @s: pointer to standard V4L2 device structure
++ * @on: power state to which device is to be set
++ *
++ * Sets devices power state to requrested state, if possible.
++ * This is called on open, close, suspend and resume.
++ */
++static int ioctl_s_power(struct v4l2_int_device *s, int on)
++{
++ struct sensor *sensor = s->priv;
++
++ dev_dbg(&adv7180_data.sen.i2c_client->dev, "adv7180:ioctl_s_power\n");
++
++ if (on && !sensor->sen.on) {
++ if (adv7180_write_reg(ADV7180_PWR_MNG, 0x04) != 0)
++ return -EIO;
++
++ /*
++ * FIXME:Additional 400ms to wait the chip to be stable?
++ * This is a workaround for preview scrolling issue.
++ */
++ msleep(400);
++ } else if (!on && sensor->sen.on) {
++ if (adv7180_write_reg(ADV7180_PWR_MNG, 0x24) != 0)
++ return -EIO;
++ }
++
++ sensor->sen.on = on;
++
++ return 0;
++}
++
++/*!
++ * ioctl_g_parm - V4L2 sensor interface handler for VIDIOC_G_PARM ioctl
++ * @s: pointer to standard V4L2 device structure
++ * @a: pointer to standard V4L2 VIDIOC_G_PARM ioctl structure
++ *
++ * Returns the sensor's video CAPTURE parameters.
++ */
++static int ioctl_g_parm(struct v4l2_int_device *s, struct v4l2_streamparm *a)
++{
++ struct sensor *sensor = s->priv;
++ struct v4l2_captureparm *cparm = &a->parm.capture;
++
++ dev_dbg(&adv7180_data.sen.i2c_client->dev, "In adv7180:ioctl_g_parm\n");
++
++ switch (a->type) {
++ /* These are all the possible cases. */
++ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
++ pr_debug(" type is V4L2_BUF_TYPE_VIDEO_CAPTURE\n");
++ memset(a, 0, sizeof(*a));
++ a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
++ cparm->capability = sensor->sen.streamcap.capability;
++ cparm->timeperframe = sensor->sen.streamcap.timeperframe;
++ cparm->capturemode = sensor->sen.streamcap.capturemode;
++ break;
++
++ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
++ case V4L2_BUF_TYPE_VIDEO_OVERLAY:
++ case V4L2_BUF_TYPE_VBI_CAPTURE:
++ case V4L2_BUF_TYPE_VBI_OUTPUT:
++ case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
++ case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
++ break;
++
++ default:
++ pr_debug("ioctl_g_parm:type is unknown %d\n", a->type);
++ break;
++ }
++
++ return 0;
++}
++
++/*!
++ * ioctl_s_parm - V4L2 sensor interface handler for VIDIOC_S_PARM ioctl
++ * @s: pointer to standard V4L2 device structure
++ * @a: pointer to standard V4L2 VIDIOC_S_PARM ioctl structure
++ *
++ * Configures the sensor to use the input parameters, if possible. If
++ * not possible, reverts to the old parameters and returns the
++ * appropriate error code.
++ *
++ * This driver cannot change these settings.
++ */
++static int ioctl_s_parm(struct v4l2_int_device *s, struct v4l2_streamparm *a)
++{
++ dev_dbg(&adv7180_data.sen.i2c_client->dev, "In adv7180:ioctl_s_parm\n");
++
++ switch (a->type) {
++ /* These are all the possible cases. */
++ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
++ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
++ case V4L2_BUF_TYPE_VIDEO_OVERLAY:
++ case V4L2_BUF_TYPE_VBI_CAPTURE:
++ case V4L2_BUF_TYPE_VBI_OUTPUT:
++ case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
++ case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
++ break;
++
++ default:
++ pr_debug(" type is unknown - %d\n", a->type);
++ break;
++ }
++
++ return 0;
++}
++
++/*!
++ * ioctl_g_fmt_cap - V4L2 sensor interface handler for ioctl_g_fmt_cap
++ * @s: pointer to standard V4L2 device structure
++ * @f: pointer to standard V4L2 v4l2_format structure
++ *
++ * Returns the sensor's current pixel format in the v4l2_format
++ * parameter.
++ */
++static int ioctl_g_fmt_cap(struct v4l2_int_device *s, struct v4l2_format *f)
++{
++ struct sensor *sensor = s->priv;
++
++ dev_dbg(&adv7180_data.sen.i2c_client->dev, "adv7180:ioctl_g_fmt_cap\n");
++
++ switch (f->type) {
++ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
++ pr_debug(" Returning size of %dx%d\n",
++ sensor->sen.pix.width, sensor->sen.pix.height);
++ f->fmt.pix = sensor->sen.pix;
++ break;
++
++ case V4L2_BUF_TYPE_PRIVATE: {
++ v4l2_std_id std;
++ adv7180_get_std(&std);
++ f->fmt.pix.pixelformat = (u32)std;
++ }
++ break;
++
++ default:
++ f->fmt.pix = sensor->sen.pix;
++ break;
++ }
++
++ return 0;
++}
++
++/*!
++ * ioctl_queryctrl - V4L2 sensor interface handler for VIDIOC_QUERYCTRL ioctl
++ * @s: pointer to standard V4L2 device structure
++ * @qc: standard V4L2 VIDIOC_QUERYCTRL ioctl structure
++ *
++ * If the requested control is supported, returns the control information
++ * from the video_control[] array. Otherwise, returns -EINVAL if the
++ * control is not supported.
++ */
++static int ioctl_queryctrl(struct v4l2_int_device *s,
++ struct v4l2_queryctrl *qc)
++{
++ int i;
++
++ dev_dbg(&adv7180_data.sen.i2c_client->dev, "adv7180:ioctl_queryctrl\n");
++
++ for (i = 0; i < ARRAY_SIZE(adv7180_qctrl); i++)
++ if (qc->id && qc->id == adv7180_qctrl[i].id) {
++ memcpy(qc, &(adv7180_qctrl[i]),
++ sizeof(*qc));
++ return 0;
++ }
++
++ return -EINVAL;
++}
++
++/*!
++ * ioctl_g_ctrl - V4L2 sensor interface handler for VIDIOC_G_CTRL ioctl
++ * @s: pointer to standard V4L2 device structure
++ * @vc: standard V4L2 VIDIOC_G_CTRL ioctl structure
++ *
++ * If the requested control is supported, returns the control's current
++ * value from the video_control[] array. Otherwise, returns -EINVAL
++ * if the control is not supported.
++ */
++static int ioctl_g_ctrl(struct v4l2_int_device *s, struct v4l2_control *vc)
++{
++ int ret = 0;
++ int sat = 0;
++
++ dev_dbg(&adv7180_data.sen.i2c_client->dev, "In adv7180:ioctl_g_ctrl\n");
++
++ switch (vc->id) {
++ case V4L2_CID_BRIGHTNESS:
++ dev_dbg(&adv7180_data.sen.i2c_client->dev,
++ " V4L2_CID_BRIGHTNESS\n");
++ adv7180_data.sen.brightness = adv7180_read(ADV7180_BRIGHTNESS);
++ vc->value = adv7180_data.sen.brightness;
++ break;
++ case V4L2_CID_CONTRAST:
++ dev_dbg(&adv7180_data.sen.i2c_client->dev,
++ " V4L2_CID_CONTRAST\n");
++ vc->value = adv7180_data.sen.contrast;
++ break;
++ case V4L2_CID_SATURATION:
++ dev_dbg(&adv7180_data.sen.i2c_client->dev,
++ " V4L2_CID_SATURATION\n");
++ sat = adv7180_read(ADV7180_SD_SATURATION_CB);
++ adv7180_data.sen.saturation = sat;
++ vc->value = adv7180_data.sen.saturation;
++ break;
++ case V4L2_CID_HUE:
++ dev_dbg(&adv7180_data.sen.i2c_client->dev,
++ " V4L2_CID_HUE\n");
++ vc->value = adv7180_data.sen.hue;
++ break;
++ case V4L2_CID_AUTO_WHITE_BALANCE:
++ dev_dbg(&adv7180_data.sen.i2c_client->dev,
++ " V4L2_CID_AUTO_WHITE_BALANCE\n");
++ break;
++ case V4L2_CID_DO_WHITE_BALANCE:
++ dev_dbg(&adv7180_data.sen.i2c_client->dev,
++ " V4L2_CID_DO_WHITE_BALANCE\n");
++ break;
++ case V4L2_CID_RED_BALANCE:
++ dev_dbg(&adv7180_data.sen.i2c_client->dev,
++ " V4L2_CID_RED_BALANCE\n");
++ vc->value = adv7180_data.sen.red;
++ break;
++ case V4L2_CID_BLUE_BALANCE:
++ dev_dbg(&adv7180_data.sen.i2c_client->dev,
++ " V4L2_CID_BLUE_BALANCE\n");
++ vc->value = adv7180_data.sen.blue;
++ break;
++ case V4L2_CID_GAMMA:
++ dev_dbg(&adv7180_data.sen.i2c_client->dev,
++ " V4L2_CID_GAMMA\n");
++ break;
++ case V4L2_CID_EXPOSURE:
++ dev_dbg(&adv7180_data.sen.i2c_client->dev,
++ " V4L2_CID_EXPOSURE\n");
++ vc->value = adv7180_data.sen.ae_mode;
++ break;
++ case V4L2_CID_AUTOGAIN:
++ dev_dbg(&adv7180_data.sen.i2c_client->dev,
++ " V4L2_CID_AUTOGAIN\n");
++ break;
++ case V4L2_CID_GAIN:
++ dev_dbg(&adv7180_data.sen.i2c_client->dev,
++ " V4L2_CID_GAIN\n");
++ break;
++ case V4L2_CID_HFLIP:
++ dev_dbg(&adv7180_data.sen.i2c_client->dev,
++ " V4L2_CID_HFLIP\n");
++ break;
++ case V4L2_CID_VFLIP:
++ dev_dbg(&adv7180_data.sen.i2c_client->dev,
++ " V4L2_CID_VFLIP\n");
++ break;
++ default:
++ dev_dbg(&adv7180_data.sen.i2c_client->dev,
++ " Default case\n");
++ vc->value = 0;
++ ret = -EPERM;
++ break;
++ }
++
++ return ret;
++}
++
++/*!
++ * ioctl_s_ctrl - V4L2 sensor interface handler for VIDIOC_S_CTRL ioctl
++ * @s: pointer to standard V4L2 device structure
++ * @vc: standard V4L2 VIDIOC_S_CTRL ioctl structure
++ *
++ * If the requested control is supported, sets the control's current
++ * value in HW (and updates the video_control[] array). Otherwise,
++ * returns -EINVAL if the control is not supported.
++ */
++static int ioctl_s_ctrl(struct v4l2_int_device *s, struct v4l2_control *vc)
++{
++ int retval = 0;
++ u8 tmp;
++
++ dev_dbg(&adv7180_data.sen.i2c_client->dev, "In adv7180:ioctl_s_ctrl\n");
++
++ switch (vc->id) {
++ case V4L2_CID_BRIGHTNESS:
++ dev_dbg(&adv7180_data.sen.i2c_client->dev,
++ " V4L2_CID_BRIGHTNESS\n");
++ tmp = vc->value;
++ adv7180_write_reg(ADV7180_BRIGHTNESS, tmp);
++ adv7180_data.sen.brightness = vc->value;
++ break;
++ case V4L2_CID_CONTRAST:
++ dev_dbg(&adv7180_data.sen.i2c_client->dev,
++ " V4L2_CID_CONTRAST\n");
++ break;
++ case V4L2_CID_SATURATION:
++ dev_dbg(&adv7180_data.sen.i2c_client->dev,
++ " V4L2_CID_SATURATION\n");
++ tmp = vc->value;
++ adv7180_write_reg(ADV7180_SD_SATURATION_CB, tmp);
++ adv7180_write_reg(ADV7180_SD_SATURATION_CR, tmp);
++ adv7180_data.sen.saturation = vc->value;
++ break;
++ case V4L2_CID_HUE:
++ dev_dbg(&adv7180_data.sen.i2c_client->dev,
++ " V4L2_CID_HUE\n");
++ break;
++ case V4L2_CID_AUTO_WHITE_BALANCE:
++ dev_dbg(&adv7180_data.sen.i2c_client->dev,
++ " V4L2_CID_AUTO_WHITE_BALANCE\n");
++ break;
++ case V4L2_CID_DO_WHITE_BALANCE:
++ dev_dbg(&adv7180_data.sen.i2c_client->dev,
++ " V4L2_CID_DO_WHITE_BALANCE\n");
++ break;
++ case V4L2_CID_RED_BALANCE:
++ dev_dbg(&adv7180_data.sen.i2c_client->dev,
++ " V4L2_CID_RED_BALANCE\n");
++ break;
++ case V4L2_CID_BLUE_BALANCE:
++ dev_dbg(&adv7180_data.sen.i2c_client->dev,
++ " V4L2_CID_BLUE_BALANCE\n");
++ break;
++ case V4L2_CID_GAMMA:
++ dev_dbg(&adv7180_data.sen.i2c_client->dev,
++ " V4L2_CID_GAMMA\n");
++ break;
++ case V4L2_CID_EXPOSURE:
++ dev_dbg(&adv7180_data.sen.i2c_client->dev,
++ " V4L2_CID_EXPOSURE\n");
++ break;
++ case V4L2_CID_AUTOGAIN:
++ dev_dbg(&adv7180_data.sen.i2c_client->dev,
++ " V4L2_CID_AUTOGAIN\n");
++ break;
++ case V4L2_CID_GAIN:
++ dev_dbg(&adv7180_data.sen.i2c_client->dev,
++ " V4L2_CID_GAIN\n");
++ break;
++ case V4L2_CID_HFLIP:
++ dev_dbg(&adv7180_data.sen.i2c_client->dev,
++ " V4L2_CID_HFLIP\n");
++ break;
++ case V4L2_CID_VFLIP:
++ dev_dbg(&adv7180_data.sen.i2c_client->dev,
++ " V4L2_CID_VFLIP\n");
++ break;
++ default:
++ dev_dbg(&adv7180_data.sen.i2c_client->dev,
++ " Default case\n");
++ retval = -EPERM;
++ break;
++ }
++
++ return retval;
++}
++
++/*!
++ * ioctl_enum_framesizes - V4L2 sensor interface handler for
++ * VIDIOC_ENUM_FRAMESIZES ioctl
++ * @s: pointer to standard V4L2 device structure
++ * @fsize: standard V4L2 VIDIOC_ENUM_FRAMESIZES ioctl structure
++ *
++ * Return 0 if successful, otherwise -EINVAL.
++ */
++static int ioctl_enum_framesizes(struct v4l2_int_device *s,
++ struct v4l2_frmsizeenum *fsize)
++{
++ if (fsize->index >= 1)
++ return -EINVAL;
++
++ fsize->discrete.width = video_fmts[video_idx].active_width;
++ fsize->discrete.height = video_fmts[video_idx].active_height;
++
++ return 0;
++}
++
++/*!
++ * ioctl_g_chip_ident - V4L2 sensor interface handler for
++ * VIDIOC_DBG_G_CHIP_IDENT ioctl
++ * @s: pointer to standard V4L2 device structure
++ * @id: pointer to int
++ *
++ * Return 0.
++ */
++static int ioctl_g_chip_ident(struct v4l2_int_device *s, int *id)
++{
++ ((struct v4l2_dbg_chip_ident *)id)->match.type =
++ V4L2_CHIP_MATCH_I2C_DRIVER;
++ strcpy(((struct v4l2_dbg_chip_ident *)id)->match.name,
++ "adv7180_decoder");
++ ((struct v4l2_dbg_chip_ident *)id)->ident = V4L2_IDENT_ADV7180;
++
++ return 0;
++}
++
++/*!
++ * ioctl_init - V4L2 sensor interface handler for VIDIOC_INT_INIT
++ * @s: pointer to standard V4L2 device structure
++ */
++static int ioctl_init(struct v4l2_int_device *s)
++{
++ dev_dbg(&adv7180_data.sen.i2c_client->dev, "In adv7180:ioctl_init\n");
++ return 0;
++}
++
++/*!
++ * ioctl_dev_init - V4L2 sensor interface handler for vidioc_int_dev_init_num
++ * @s: pointer to standard V4L2 device structure
++ *
++ * Initialise the device when slave attaches to the master.
++ */
++static int ioctl_dev_init(struct v4l2_int_device *s)
++{
++ dev_dbg(&adv7180_data.sen.i2c_client->dev, "adv7180:ioctl_dev_init\n");
++ return 0;
++}
++
++/*!
++ * This structure defines all the ioctls for this module.
++ */
++static struct v4l2_int_ioctl_desc adv7180_ioctl_desc[] = {
++
++ {vidioc_int_dev_init_num, (v4l2_int_ioctl_func*)ioctl_dev_init},
++
++ /*!
++ * Delinitialise the dev. at slave detach.
++ * The complement of ioctl_dev_init.
++ */
++/* {vidioc_int_dev_exit_num, (v4l2_int_ioctl_func *)ioctl_dev_exit}, */
++
++ {vidioc_int_s_power_num, (v4l2_int_ioctl_func*)ioctl_s_power},
++ {vidioc_int_g_ifparm_num, (v4l2_int_ioctl_func*)ioctl_g_ifparm},
++/* {vidioc_int_g_needs_reset_num,
++ (v4l2_int_ioctl_func *)ioctl_g_needs_reset}, */
++/* {vidioc_int_reset_num, (v4l2_int_ioctl_func *)ioctl_reset}, */
++ {vidioc_int_init_num, (v4l2_int_ioctl_func*)ioctl_init},
++
++ /*!
++ * VIDIOC_ENUM_FMT ioctl for the CAPTURE buffer type.
++ */
++/* {vidioc_int_enum_fmt_cap_num,
++ (v4l2_int_ioctl_func *)ioctl_enum_fmt_cap}, */
++
++ /*!
++ * VIDIOC_TRY_FMT ioctl for the CAPTURE buffer type.
++ * This ioctl is used to negotiate the image capture size and
++ * pixel format without actually making it take effect.
++ */
++/* {vidioc_int_try_fmt_cap_num,
++ (v4l2_int_ioctl_func *)ioctl_try_fmt_cap}, */
++
++ {vidioc_int_g_fmt_cap_num, (v4l2_int_ioctl_func*)ioctl_g_fmt_cap},
++
++ /*!
++ * If the requested format is supported, configures the HW to use that
++ * format, returns error code if format not supported or HW can't be
++ * correctly configured.
++ */
++/* {vidioc_int_s_fmt_cap_num, (v4l2_int_ioctl_func *)ioctl_s_fmt_cap}, */
++
++ {vidioc_int_g_parm_num, (v4l2_int_ioctl_func*)ioctl_g_parm},
++ {vidioc_int_s_parm_num, (v4l2_int_ioctl_func*)ioctl_s_parm},
++ {vidioc_int_queryctrl_num, (v4l2_int_ioctl_func*)ioctl_queryctrl},
++ {vidioc_int_g_ctrl_num, (v4l2_int_ioctl_func*)ioctl_g_ctrl},
++ {vidioc_int_s_ctrl_num, (v4l2_int_ioctl_func*)ioctl_s_ctrl},
++ {vidioc_int_enum_framesizes_num,
++ (v4l2_int_ioctl_func *) ioctl_enum_framesizes},
++ {vidioc_int_g_chip_ident_num,
++ (v4l2_int_ioctl_func *)ioctl_g_chip_ident},
++};
++
++static struct v4l2_int_slave adv7180_slave = {
++ .ioctls = adv7180_ioctl_desc,
++ .num_ioctls = ARRAY_SIZE(adv7180_ioctl_desc),
++};
++
++static struct v4l2_int_device adv7180_int_device = {
++ .module = THIS_MODULE,
++ .name = "adv7180",
++ .type = v4l2_int_type_slave,
++ .u = {
++ .slave = &adv7180_slave,
++ },
++};
++
++
++/***********************************************************************
++ * I2C client and driver.
++ ***********************************************************************/
++
++/*! ADV7180 Reset function.
++ *
++ * @return None.
++ */
++static void adv7180_hard_reset(bool cvbs)
++{
++ dev_dbg(&adv7180_data.sen.i2c_client->dev,
++ "In adv7180:adv7180_hard_reset\n");
++
++ if (cvbs) {
++ /* Set CVBS input on AIN1 */
++ adv7180_write_reg(ADV7180_INPUT_CTL, 0x00);
++ } else {
++ /*
++ * Set YPbPr input on AIN1,4,5 and normal
++ * operations(autodection of all stds).
++ */
++ adv7180_write_reg(ADV7180_INPUT_CTL, 0x09);
++ }
++
++ /* Datasheet recommends */
++ adv7180_write_reg(0x01, 0xc8);
++ adv7180_write_reg(0x02, 0x04);
++ adv7180_write_reg(0x03, 0x00);
++ adv7180_write_reg(0x04, 0x45);
++ adv7180_write_reg(0x05, 0x00);
++ adv7180_write_reg(0x06, 0x02);
++ adv7180_write_reg(0x07, 0x7F);
++ adv7180_write_reg(0x08, 0x80);
++ adv7180_write_reg(0x0A, 0x00);
++ adv7180_write_reg(0x0B, 0x00);
++ adv7180_write_reg(0x0C, 0x36);
++ adv7180_write_reg(0x0D, 0x7C);
++ adv7180_write_reg(0x0E, 0x00);
++ adv7180_write_reg(0x0F, 0x00);
++ adv7180_write_reg(0x13, 0x00);
++ adv7180_write_reg(0x14, 0x12);
++ adv7180_write_reg(0x15, 0x00);
++ adv7180_write_reg(0x16, 0x00);
++ adv7180_write_reg(0x17, 0x01);
++ adv7180_write_reg(0x18, 0x93);
++ adv7180_write_reg(0xF1, 0x19);
++ adv7180_write_reg(0x1A, 0x00);
++ adv7180_write_reg(0x1B, 0x00);
++ adv7180_write_reg(0x1C, 0x00);
++ adv7180_write_reg(0x1D, 0x40);
++ adv7180_write_reg(0x1E, 0x00);
++ adv7180_write_reg(0x1F, 0x00);
++ adv7180_write_reg(0x20, 0x00);
++ adv7180_write_reg(0x21, 0x00);
++ adv7180_write_reg(0x22, 0x00);
++ adv7180_write_reg(0x23, 0xC0);
++ adv7180_write_reg(0x24, 0x00);
++ adv7180_write_reg(0x25, 0x00);
++ adv7180_write_reg(0x26, 0x00);
++ adv7180_write_reg(0x27, 0x58);
++ adv7180_write_reg(0x28, 0x00);
++ adv7180_write_reg(0x29, 0x00);
++ adv7180_write_reg(0x2A, 0x00);
++ adv7180_write_reg(0x2B, 0xE1);
++ adv7180_write_reg(0x2C, 0xAE);
++ adv7180_write_reg(0x2D, 0xF4);
++ adv7180_write_reg(0x2E, 0x00);
++ adv7180_write_reg(0x2F, 0xF0);
++ adv7180_write_reg(0x30, 0x00);
++ adv7180_write_reg(0x31, 0x12);
++ adv7180_write_reg(0x32, 0x41);
++ adv7180_write_reg(0x33, 0x84);
++ adv7180_write_reg(0x34, 0x00);
++ adv7180_write_reg(0x35, 0x02);
++ adv7180_write_reg(0x36, 0x00);
++ adv7180_write_reg(0x37, 0x01);
++ adv7180_write_reg(0x38, 0x80);
++ adv7180_write_reg(0x39, 0xC0);
++ adv7180_write_reg(0x3A, 0x10);
++ adv7180_write_reg(0x3B, 0x05);
++ adv7180_write_reg(0x3C, 0x58);
++ adv7180_write_reg(0x3D, 0xB2);
++ adv7180_write_reg(0x3E, 0x64);
++ adv7180_write_reg(0x3F, 0xE4);
++ adv7180_write_reg(0x40, 0x90);
++ adv7180_write_reg(0x41, 0x01);
++ adv7180_write_reg(0x42, 0x7E);
++ adv7180_write_reg(0x43, 0xA4);
++ adv7180_write_reg(0x44, 0xFF);
++ adv7180_write_reg(0x45, 0xB6);
++ adv7180_write_reg(0x46, 0x12);
++ adv7180_write_reg(0x48, 0x00);
++ adv7180_write_reg(0x49, 0x00);
++ adv7180_write_reg(0x4A, 0x00);
++ adv7180_write_reg(0x4B, 0x00);
++ adv7180_write_reg(0x4C, 0x00);
++ adv7180_write_reg(0x4D, 0xEF);
++ adv7180_write_reg(0x4E, 0x08);
++ adv7180_write_reg(0x4F, 0x08);
++ adv7180_write_reg(0x50, 0x08);
++ adv7180_write_reg(0x51, 0x24);
++ adv7180_write_reg(0x52, 0x0B);
++ adv7180_write_reg(0x53, 0x4E);
++ adv7180_write_reg(0x54, 0x80);
++ adv7180_write_reg(0x55, 0x00);
++ adv7180_write_reg(0x56, 0x10);
++ adv7180_write_reg(0x57, 0x00);
++ adv7180_write_reg(0x58, 0x00);
++ adv7180_write_reg(0x59, 0x00);
++ adv7180_write_reg(0x5A, 0x00);
++ adv7180_write_reg(0x5B, 0x00);
++ adv7180_write_reg(0x5C, 0x00);
++ adv7180_write_reg(0x5D, 0x00);
++ adv7180_write_reg(0x5E, 0x00);
++ adv7180_write_reg(0x5F, 0x00);
++ adv7180_write_reg(0x60, 0x00);
++ adv7180_write_reg(0x61, 0x00);
++ adv7180_write_reg(0x62, 0x20);
++ adv7180_write_reg(0x63, 0x00);
++ adv7180_write_reg(0x64, 0x00);
++ adv7180_write_reg(0x65, 0x00);
++ adv7180_write_reg(0x66, 0x00);
++ adv7180_write_reg(0x67, 0x03);
++ adv7180_write_reg(0x68, 0x01);
++ adv7180_write_reg(0x69, 0x00);
++ adv7180_write_reg(0x6A, 0x00);
++ adv7180_write_reg(0x6B, 0xC0);
++ adv7180_write_reg(0x6C, 0x00);
++ adv7180_write_reg(0x6D, 0x00);
++ adv7180_write_reg(0x6E, 0x00);
++ adv7180_write_reg(0x6F, 0x00);
++ adv7180_write_reg(0x70, 0x00);
++ adv7180_write_reg(0x71, 0x00);
++ adv7180_write_reg(0x72, 0x00);
++ adv7180_write_reg(0x73, 0x10);
++ adv7180_write_reg(0x74, 0x04);
++ adv7180_write_reg(0x75, 0x01);
++ adv7180_write_reg(0x76, 0x00);
++ adv7180_write_reg(0x77, 0x3F);
++ adv7180_write_reg(0x78, 0xFF);
++ adv7180_write_reg(0x79, 0xFF);
++ adv7180_write_reg(0x7A, 0xFF);
++ adv7180_write_reg(0x7B, 0x1E);
++ adv7180_write_reg(0x7C, 0xC0);
++ adv7180_write_reg(0x7D, 0x00);
++ adv7180_write_reg(0x7E, 0x00);
++ adv7180_write_reg(0x7F, 0x00);
++ adv7180_write_reg(0x80, 0x00);
++ adv7180_write_reg(0x81, 0xC0);
++ adv7180_write_reg(0x82, 0x04);
++ adv7180_write_reg(0x83, 0x00);
++ adv7180_write_reg(0x84, 0x0C);
++ adv7180_write_reg(0x85, 0x02);
++ adv7180_write_reg(0x86, 0x03);
++ adv7180_write_reg(0x87, 0x63);
++ adv7180_write_reg(0x88, 0x5A);
++ adv7180_write_reg(0x89, 0x08);
++ adv7180_write_reg(0x8A, 0x10);
++ adv7180_write_reg(0x8B, 0x00);
++ adv7180_write_reg(0x8C, 0x40);
++ adv7180_write_reg(0x8D, 0x00);
++ adv7180_write_reg(0x8E, 0x40);
++ adv7180_write_reg(0x8F, 0x00);
++ adv7180_write_reg(0x90, 0x00);
++ adv7180_write_reg(0x91, 0x50);
++ adv7180_write_reg(0x92, 0x00);
++ adv7180_write_reg(0x93, 0x00);
++ adv7180_write_reg(0x94, 0x00);
++ adv7180_write_reg(0x95, 0x00);
++ adv7180_write_reg(0x96, 0x00);
++ adv7180_write_reg(0x97, 0xF0);
++ adv7180_write_reg(0x98, 0x00);
++ adv7180_write_reg(0x99, 0x00);
++ adv7180_write_reg(0x9A, 0x00);
++ adv7180_write_reg(0x9B, 0x00);
++ adv7180_write_reg(0x9C, 0x00);
++ adv7180_write_reg(0x9D, 0x00);
++ adv7180_write_reg(0x9E, 0x00);
++ adv7180_write_reg(0x9F, 0x00);
++ adv7180_write_reg(0xA0, 0x00);
++ adv7180_write_reg(0xA1, 0x00);
++ adv7180_write_reg(0xA2, 0x00);
++ adv7180_write_reg(0xA3, 0x00);
++ adv7180_write_reg(0xA4, 0x00);
++ adv7180_write_reg(0xA5, 0x00);
++ adv7180_write_reg(0xA6, 0x00);
++ adv7180_write_reg(0xA7, 0x00);
++ adv7180_write_reg(0xA8, 0x00);
++ adv7180_write_reg(0xA9, 0x00);
++ adv7180_write_reg(0xAA, 0x00);
++ adv7180_write_reg(0xAB, 0x00);
++ adv7180_write_reg(0xAC, 0x00);
++ adv7180_write_reg(0xAD, 0x00);
++ adv7180_write_reg(0xAE, 0x60);
++ adv7180_write_reg(0xAF, 0x00);
++ adv7180_write_reg(0xB0, 0x00);
++ adv7180_write_reg(0xB1, 0x60);
++ adv7180_write_reg(0xB2, 0x1C);
++ adv7180_write_reg(0xB3, 0x54);
++ adv7180_write_reg(0xB4, 0x00);
++ adv7180_write_reg(0xB5, 0x00);
++ adv7180_write_reg(0xB6, 0x00);
++ adv7180_write_reg(0xB7, 0x13);
++ adv7180_write_reg(0xB8, 0x03);
++ adv7180_write_reg(0xB9, 0x33);
++ adv7180_write_reg(0xBF, 0x02);
++ adv7180_write_reg(0xC0, 0x00);
++ adv7180_write_reg(0xC1, 0x00);
++ adv7180_write_reg(0xC2, 0x00);
++ adv7180_write_reg(0xC3, 0x00);
++ adv7180_write_reg(0xC4, 0x00);
++ adv7180_write_reg(0xC5, 0x81);
++ adv7180_write_reg(0xC6, 0x00);
++ adv7180_write_reg(0xC7, 0x00);
++ adv7180_write_reg(0xC8, 0x00);
++ adv7180_write_reg(0xC9, 0x04);
++ adv7180_write_reg(0xCC, 0x69);
++ adv7180_write_reg(0xCD, 0x00);
++ adv7180_write_reg(0xCE, 0x01);
++ adv7180_write_reg(0xCF, 0xB4);
++ adv7180_write_reg(0xD0, 0x00);
++ adv7180_write_reg(0xD1, 0x10);
++ adv7180_write_reg(0xD2, 0xFF);
++ adv7180_write_reg(0xD3, 0xFF);
++ adv7180_write_reg(0xD4, 0x7F);
++ adv7180_write_reg(0xD5, 0x7F);
++ adv7180_write_reg(0xD6, 0x3E);
++ adv7180_write_reg(0xD7, 0x08);
++ adv7180_write_reg(0xD8, 0x3C);
++ adv7180_write_reg(0xD9, 0x08);
++ adv7180_write_reg(0xDA, 0x3C);
++ adv7180_write_reg(0xDB, 0x9B);
++ adv7180_write_reg(0xDC, 0xAC);
++ adv7180_write_reg(0xDD, 0x4C);
++ adv7180_write_reg(0xDE, 0x00);
++ adv7180_write_reg(0xDF, 0x00);
++ adv7180_write_reg(0xE0, 0x14);
++ adv7180_write_reg(0xE1, 0x80);
++ adv7180_write_reg(0xE2, 0x80);
++ adv7180_write_reg(0xE3, 0x80);
++ adv7180_write_reg(0xE4, 0x80);
++ adv7180_write_reg(0xE5, 0x25);
++ adv7180_write_reg(0xE6, 0x44);
++ adv7180_write_reg(0xE7, 0x63);
++ adv7180_write_reg(0xE8, 0x65);
++ adv7180_write_reg(0xE9, 0x14);
++ adv7180_write_reg(0xEA, 0x63);
++ adv7180_write_reg(0xEB, 0x55);
++ adv7180_write_reg(0xEC, 0x55);
++ adv7180_write_reg(0xEE, 0x00);
++ adv7180_write_reg(0xEF, 0x4A);
++ adv7180_write_reg(0xF0, 0x44);
++ adv7180_write_reg(0xF1, 0x0C);
++ adv7180_write_reg(0xF2, 0x32);
++ adv7180_write_reg(0xF3, 0x00);
++ adv7180_write_reg(0xF4, 0x3F);
++ adv7180_write_reg(0xF5, 0xE0);
++ adv7180_write_reg(0xF6, 0x69);
++ adv7180_write_reg(0xF7, 0x10);
++ adv7180_write_reg(0xF8, 0x00);
++ adv7180_write_reg(0xF9, 0x03);
++ adv7180_write_reg(0xFA, 0xFA);
++ adv7180_write_reg(0xFB, 0x40);
++}
++
++/*! ADV7180 I2C attach function.
++ *
++ * @param *adapter struct i2c_adapter *.
++ *
++ * @return Error code indicating success or failure.
++ */
++
++/*!
++ * ADV7180 I2C probe function.
++ * Function set in i2c_driver struct.
++ * Called by insmod.
++ *
++ * @param *adapter I2C adapter descriptor.
++ *
++ * @return Error code indicating success or failure.
++ */
++static int adv7180_probe(struct i2c_client *client,
++ const struct i2c_device_id *id)
++{
++ int rev_id;
++ int ret = 0;
++ u32 cvbs = true;
++ struct pinctrl *pinctrl;
++ struct device *dev = &client->dev;
++
++ printk(KERN_ERR"DBG sensor data is at %p\n", &adv7180_data);
++
++ /* ov5640 pinctrl */
++ pinctrl = devm_pinctrl_get_select_default(dev);
++ if (IS_ERR(pinctrl)) {
++ dev_err(dev, "setup pinctrl failed\n");
++ return PTR_ERR(pinctrl);
++ }
++
++ /* request power down pin */
++ pwn_gpio = of_get_named_gpio(dev->of_node, "pwn-gpios", 0);
++ if (!gpio_is_valid(pwn_gpio)) {
++ dev_err(dev, "no sensor pwdn pin available\n");
++ return -ENODEV;
++ }
++ ret = devm_gpio_request_one(dev, pwn_gpio, GPIOF_OUT_INIT_HIGH,
++ "adv7180_pwdn");
++ if (ret < 0) {
++ dev_err(dev, "no power pin available!\n");
++ return ret;
++ }
++
++ adv7180_regulator_enable(dev);
++
++ adv7180_power_down(0);
++
++ msleep(1);
++
++ /* Set initial values for the sensor struct. */
++ memset(&adv7180_data, 0, sizeof(adv7180_data));
++ adv7180_data.sen.i2c_client = client;
++ adv7180_data.sen.streamcap.timeperframe.denominator = 30;
++ adv7180_data.sen.streamcap.timeperframe.numerator = 1;
++ adv7180_data.std_id = V4L2_STD_ALL;
++ video_idx = ADV7180_NOT_LOCKED;
++ adv7180_data.sen.pix.width = video_fmts[video_idx].raw_width;
++ adv7180_data.sen.pix.height = video_fmts[video_idx].raw_height;
++ adv7180_data.sen.pix.pixelformat = V4L2_PIX_FMT_UYVY; /* YUV422 */
++ adv7180_data.sen.pix.priv = 1; /* 1 is used to indicate TV in */
++ adv7180_data.sen.on = true;
++
++ adv7180_data.sen.sensor_clk = devm_clk_get(dev, "csi_mclk");
++ if (IS_ERR(adv7180_data.sen.sensor_clk)) {
++ dev_err(dev, "get mclk failed\n");
++ return PTR_ERR(adv7180_data.sen.sensor_clk);
++ }
++
++ ret = of_property_read_u32(dev->of_node, "mclk",
++ &adv7180_data.sen.mclk);
++ if (ret) {
++ dev_err(dev, "mclk frequency is invalid\n");
++ return ret;
++ }
++
++ ret = of_property_read_u32(
++ dev->of_node, "mclk_source",
++ (u32 *) &(adv7180_data.sen.mclk_source));
++ if (ret) {
++ dev_err(dev, "mclk_source invalid\n");
++ return ret;
++ }
++
++ ret = of_property_read_u32(dev->of_node, "csi_id",
++ &(adv7180_data.sen.csi));
++ if (ret) {
++ dev_err(dev, "csi_id invalid\n");
++ return ret;
++ }
++
++ clk_prepare_enable(adv7180_data.sen.sensor_clk);
++
++ dev_dbg(&adv7180_data.sen.i2c_client->dev,
++ "%s:adv7180 probe i2c address is 0x%02X\n",
++ __func__, adv7180_data.sen.i2c_client->addr);
++
++ /*! Read the revision ID of the tvin chip */
++ rev_id = adv7180_read(ADV7180_IDENT);
++ dev_dbg(dev,
++ "%s:Analog Device adv7%2X0 detected!\n", __func__,
++ rev_id);
++
++ ret = of_property_read_u32(dev->of_node, "cvbs", &(cvbs));
++ if (ret) {
++ dev_err(dev, "cvbs setting is not found\n");
++ cvbs = true;
++ }
++
++ /*! ADV7180 initialization. */
++ adv7180_hard_reset(cvbs);
++
++ pr_debug(" type is %d (expect %d)\n",
++ adv7180_int_device.type, v4l2_int_type_slave);
++ pr_debug(" num ioctls is %d\n",
++ adv7180_int_device.u.slave->num_ioctls);
++
++ /* This function attaches this structure to the /dev/video0 device.
++ * The pointer in priv points to the adv7180_data structure here.*/
++ adv7180_int_device.priv = &adv7180_data;
++ ret = v4l2_int_device_register(&adv7180_int_device);
++
++ clk_disable_unprepare(adv7180_data.sen.sensor_clk);
++
++ return ret;
++}
++
++/*!
++ * ADV7180 I2C detach function.
++ * Called on rmmod.
++ *
++ * @param *client struct i2c_client*.
++ *
++ * @return Error code indicating success or failure.
++ */
++static int adv7180_detach(struct i2c_client *client)
++{
++ dev_dbg(&adv7180_data.sen.i2c_client->dev,
++ "%s:Removing %s video decoder @ 0x%02X from adapter %s\n",
++ __func__, IF_NAME, client->addr << 1, client->adapter->name);
++
++ /* Power down via i2c */
++ adv7180_write_reg(ADV7180_PWR_MNG, 0x24);
++
++ if (dvddio_regulator)
++ regulator_disable(dvddio_regulator);
++
++ if (dvdd_regulator)
++ regulator_disable(dvdd_regulator);
++
++ if (avdd_regulator)
++ regulator_disable(avdd_regulator);
++
++ if (pvdd_regulator)
++ regulator_disable(pvdd_regulator);
++
++ v4l2_int_device_unregister(&adv7180_int_device);
++
++ return 0;
++}
++
++/*!
++ * ADV7180 init function.
++ * Called on insmod.
++ *
++ * @return Error code indicating success or failure.
++ */
++static __init int adv7180_init(void)
++{
++ u8 err = 0;
++
++ pr_debug("In adv7180_init\n");
++
++ /* Tells the i2c driver what functions to call for this driver. */
++ err = i2c_add_driver(&adv7180_i2c_driver);
++ if (err != 0)
++ pr_err("%s:driver registration failed, error=%d\n",
++ __func__, err);
++
++ return err;
++}
++
++/*!
++ * ADV7180 cleanup function.
++ * Called on rmmod.
++ *
++ * @return Error code indicating success or failure.
++ */
++static void __exit adv7180_clean(void)
++{
++ dev_dbg(&adv7180_data.sen.i2c_client->dev, "In adv7180_clean\n");
++ i2c_del_driver(&adv7180_i2c_driver);
++}
++
++module_init(adv7180_init);
++module_exit(adv7180_clean);
++
++MODULE_AUTHOR("Freescale Semiconductor");
++MODULE_DESCRIPTION("Anolog Device ADV7180 video decoder driver");
++MODULE_LICENSE("GPL");
+diff -Nur linux-3.14.36/drivers/media/platform/mxc/capture/csi_v4l2_capture.c linux-openelec/drivers/media/platform/mxc/capture/csi_v4l2_capture.c
+--- linux-3.14.36/drivers/media/platform/mxc/capture/csi_v4l2_capture.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/media/platform/mxc/capture/csi_v4l2_capture.c 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,2047 @@
++/*
++ * Copyright 2009-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/*!
++ * @file drivers/media/video/mxc/capture/csi_v4l2_capture.c
++ * This file is derived from mxc_v4l2_capture.c
++ *
++ * @brief Video For Linux 2 capture driver
++ *
++ * @ingroup MXC_V4L2_CAPTURE
++ */
++#include <linux/version.h>
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/platform_device.h>
++#include <linux/fs.h>
++#include <linux/slab.h>
++#include <linux/ctype.h>
++#include <linux/clk.h>
++#include <linux/io.h>
++#include <linux/semaphore.h>
++#include <linux/pagemap.h>
++#include <linux/vmalloc.h>
++#include <linux/types.h>
++#include <linux/fb.h>
++#include <linux/mxcfb.h>
++#include <linux/dma-mapping.h>
++#include <media/v4l2-ioctl.h>
++#include <media/v4l2-int-device.h>
++#include <media/v4l2-chip-ident.h>
++#include "mxc_v4l2_capture.h"
++#include "fsl_csi.h"
++
++static int video_nr = -1;
++static cam_data *g_cam;
++static int req_buf_number;
++
++static int csi_v4l2_master_attach(struct v4l2_int_device *slave);
++static void csi_v4l2_master_detach(struct v4l2_int_device *slave);
++static u8 camera_power(cam_data *cam, bool cameraOn);
++struct v4l2_crop crop_current;
++struct v4l2_window win_current;
++
++/*! Information about this driver. */
++static struct v4l2_int_master csi_v4l2_master = {
++ .attach = csi_v4l2_master_attach,
++ .detach = csi_v4l2_master_detach,
++};
++
++static struct v4l2_int_device csi_v4l2_int_device = {
++ .module = THIS_MODULE,
++ .name = "csi_v4l2_cap",
++ .type = v4l2_int_type_master,
++ .u = {
++ .master = &csi_v4l2_master,
++ },
++};
++
++static struct v4l2_queryctrl pxp_controls[] = {
++ {
++ .id = V4L2_CID_HFLIP,
++ .type = V4L2_CTRL_TYPE_BOOLEAN,
++ .name = "Horizontal Flip",
++ .minimum = 0,
++ .maximum = 1,
++ .step = 1,
++ .default_value = 0,
++ .flags = 0,
++ }, {
++ .id = V4L2_CID_VFLIP,
++ .type = V4L2_CTRL_TYPE_BOOLEAN,
++ .name = "Vertical Flip",
++ .minimum = 0,
++ .maximum = 1,
++ .step = 1,
++ .default_value = 0,
++ .flags = 0,
++ }, {
++ .id = V4L2_CID_PRIVATE_BASE,
++ .type = V4L2_CTRL_TYPE_INTEGER,
++ .name = "Rotation",
++ .minimum = 0,
++ .maximum = 270,
++ .step = 90,
++ .default_value = 0,
++ .flags = 0,
++ },
++};
++
++/* Callback function triggered after PxP receives an EOF interrupt */
++static void pxp_dma_done(void *arg)
++{
++ struct pxp_tx_desc *tx_desc = to_tx_desc(arg);
++ struct dma_chan *chan = tx_desc->txd.chan;
++ struct pxp_channel *pxp_chan = to_pxp_channel(chan);
++ cam_data *cam = pxp_chan->client;
++
++ /* This call will signal wait_for_completion_timeout() */
++ complete(&cam->pxp_tx_cmpl);
++}
++
++static bool chan_filter(struct dma_chan *chan, void *arg)
++{
++ if (imx_dma_is_pxp(chan))
++ return true;
++ else
++ return false;
++}
++
++/* Function to request PXP DMA channel */
++static int pxp_chan_init(cam_data *cam)
++{
++ dma_cap_mask_t mask;
++ struct dma_chan *chan;
++
++ /* Request a free channel */
++ dma_cap_zero(mask);
++ dma_cap_set(DMA_SLAVE, mask);
++ dma_cap_set(DMA_PRIVATE, mask);
++ chan = dma_request_channel(mask, chan_filter, NULL);
++ if (!chan) {
++ pr_err("Unsuccessfully request channel!\n");
++ return -EBUSY;
++ }
++
++ cam->pxp_chan = to_pxp_channel(chan);
++ cam->pxp_chan->client = cam;
++
++ init_completion(&cam->pxp_tx_cmpl);
++
++ return 0;
++}
++
++/*
++ * Function to call PxP DMA driver and send our new V4L2 buffer
++ * through the PxP.
++ * Note: This is a blocking call, so upon return the PxP tx should be complete.
++ */
++static int pxp_process_update(cam_data *cam)
++{
++ dma_cookie_t cookie;
++ struct scatterlist *sg = cam->sg;
++ struct dma_chan *dma_chan;
++ struct pxp_tx_desc *desc;
++ struct dma_async_tx_descriptor *txd;
++ struct pxp_config_data *pxp_conf = &cam->pxp_conf;
++ struct pxp_proc_data *proc_data = &cam->pxp_conf.proc_data;
++ int i, ret;
++ int length;
++
++ pr_debug("Starting PxP Send Buffer\n");
++
++ /* First, check to see that we have acquired a PxP Channel object */
++ if (cam->pxp_chan == NULL) {
++ /*
++ * PxP Channel has not yet been created and initialized,
++ * so let's go ahead and try
++ */
++ ret = pxp_chan_init(cam);
++ if (ret) {
++ /*
++ * PxP channel init failed, and we can't use the
++ * PxP until the PxP DMA driver has loaded, so we abort
++ */
++ pr_err("PxP chan init failed\n");
++ return -ENODEV;
++ }
++ }
++
++ /*
++ * Init completion, so that we can be properly informed of
++ * the completion of the PxP task when it is done.
++ */
++ init_completion(&cam->pxp_tx_cmpl);
++
++ dma_chan = &cam->pxp_chan->dma_chan;
++
++ txd = dma_chan->device->device_prep_slave_sg(dma_chan, sg, 2,
++ DMA_TO_DEVICE,
++ DMA_PREP_INTERRUPT,
++ NULL);
++ if (!txd) {
++ pr_err("Error preparing a DMA transaction descriptor.\n");
++ return -EIO;
++ }
++
++ txd->callback_param = txd;
++ txd->callback = pxp_dma_done;
++
++ /*
++ * Configure PxP for processing of new v4l2 buf
++ */
++ pxp_conf->s0_param.pixel_fmt = PXP_PIX_FMT_UYVY;
++ pxp_conf->s0_param.color_key = -1;
++ pxp_conf->s0_param.color_key_enable = false;
++ pxp_conf->s0_param.width = cam->v2f.fmt.pix.width;
++ pxp_conf->s0_param.height = cam->v2f.fmt.pix.height;
++
++ pxp_conf->ol_param[0].combine_enable = false;
++
++ proc_data->srect.top = 0;
++ proc_data->srect.left = 0;
++ proc_data->srect.width = pxp_conf->s0_param.width;
++ proc_data->srect.height = pxp_conf->s0_param.height;
++
++ if (crop_current.c.top != 0)
++ proc_data->srect.top = crop_current.c.top;
++ if (crop_current.c.left != 0)
++ proc_data->srect.left = crop_current.c.left;
++ if (crop_current.c.width != 0)
++ proc_data->srect.width = crop_current.c.width;
++ if (crop_current.c.height != 0)
++ proc_data->srect.height = crop_current.c.height;
++
++ proc_data->drect.left = 0;
++ proc_data->drect.top = 0;
++ proc_data->drect.width = proc_data->srect.width;
++ proc_data->drect.height = proc_data->srect.height;
++
++ if (win_current.w.left != 0)
++ proc_data->drect.left = win_current.w.left;
++ if (win_current.w.top != 0)
++ proc_data->drect.top = win_current.w.top;
++ if (win_current.w.width != 0)
++ proc_data->drect.width = win_current.w.width;
++ if (win_current.w.height != 0)
++ proc_data->drect.height = win_current.w.height;
++
++ pr_debug("srect l: %d, t: %d, w: %d, h: %d; "
++ "drect l: %d, t: %d, w: %d, h: %d\n",
++ proc_data->srect.left, proc_data->srect.top,
++ proc_data->srect.width, proc_data->srect.height,
++ proc_data->drect.left, proc_data->drect.top,
++ proc_data->drect.width, proc_data->drect.height);
++
++ pxp_conf->out_param.pixel_fmt = PXP_PIX_FMT_RGB565;
++ pxp_conf->out_param.width = proc_data->drect.width;
++ pxp_conf->out_param.height = proc_data->drect.height;
++
++ if (cam->rotation % 180)
++ pxp_conf->out_param.stride = pxp_conf->out_param.height;
++ else
++ pxp_conf->out_param.stride = pxp_conf->out_param.width;
++
++ desc = to_tx_desc(txd);
++ length = desc->len;
++ for (i = 0; i < length; i++) {
++ if (i == 0) {/* S0 */
++ memcpy(&desc->proc_data, proc_data,
++ sizeof(struct pxp_proc_data));
++ pxp_conf->s0_param.paddr = sg_dma_address(&sg[0]);
++ memcpy(&desc->layer_param.s0_param, &pxp_conf->s0_param,
++ sizeof(struct pxp_layer_param));
++ } else if (i == 1) {
++ pxp_conf->out_param.paddr = sg_dma_address(&sg[1]);
++ memcpy(&desc->layer_param.out_param,
++ &pxp_conf->out_param,
++ sizeof(struct pxp_layer_param));
++ }
++
++ desc = desc->next;
++ }
++
++ /* Submitting our TX starts the PxP processing task */
++ cookie = txd->tx_submit(txd);
++ if (cookie < 0) {
++ pr_err("Error sending FB through PxP\n");
++ return -EIO;
++ }
++
++ cam->txd = txd;
++
++ /* trigger PxP */
++ dma_async_issue_pending(dma_chan);
++
++ return 0;
++}
++
++static int pxp_complete_update(cam_data *cam)
++{
++ int ret;
++ /*
++ * Wait for completion event, which will be set
++ * through our TX callback function.
++ */
++ ret = wait_for_completion_timeout(&cam->pxp_tx_cmpl, HZ / 10);
++ if (ret <= 0) {
++ pr_warning("PxP operation failed due to %s\n",
++ ret < 0 ? "user interrupt" : "timeout");
++ dma_release_channel(&cam->pxp_chan->dma_chan);
++ cam->pxp_chan = NULL;
++ return ret ? : -ETIMEDOUT;
++ }
++
++ dma_release_channel(&cam->pxp_chan->dma_chan);
++ cam->pxp_chan = NULL;
++
++ pr_debug("TX completed\n");
++
++ return 0;
++}
++
++/*!
++ * Camera V4l2 callback function.
++ *
++ * @param mask u32
++ * @param dev void device structure
++ *
++ * @return none
++ */
++static void camera_callback(u32 mask, void *dev)
++{
++ struct mxc_v4l_frame *done_frame;
++ struct mxc_v4l_frame *ready_frame;
++ cam_data *cam;
++
++ cam = (cam_data *) dev;
++ if (cam == NULL)
++ return;
++
++ spin_lock(&cam->queue_int_lock);
++ spin_lock(&cam->dqueue_int_lock);
++ if (!list_empty(&cam->working_q)) {
++ done_frame = list_entry(cam->working_q.next,
++ struct mxc_v4l_frame, queue);
++
++ if (done_frame->csi_buf_num != cam->ping_pong_csi)
++ goto next;
++
++ if (done_frame->buffer.flags & V4L2_BUF_FLAG_QUEUED) {
++ done_frame->buffer.flags |= V4L2_BUF_FLAG_DONE;
++ done_frame->buffer.flags &= ~V4L2_BUF_FLAG_QUEUED;
++
++ /* Added to the done queue */
++ list_del(cam->working_q.next);
++ list_add_tail(&done_frame->queue, &cam->done_q);
++ cam->enc_counter++;
++ wake_up_interruptible(&cam->enc_queue);
++ } else {
++ pr_err("ERROR: v4l2 capture: %s: "
++ "buffer not queued\n", __func__);
++ }
++ }
++
++next:
++ if (!list_empty(&cam->ready_q)) {
++ ready_frame = list_entry(cam->ready_q.next,
++ struct mxc_v4l_frame, queue);
++ list_del(cam->ready_q.next);
++ list_add_tail(&ready_frame->queue, &cam->working_q);
++
++ __raw_writel(ready_frame->paddress,
++ cam->ping_pong_csi == 1 ? CSI_CSIDMASA_FB1 :
++ CSI_CSIDMASA_FB2);
++ ready_frame->csi_buf_num = cam->ping_pong_csi;
++ } else {
++ __raw_writel(cam->dummy_frame.paddress,
++ cam->ping_pong_csi == 1 ? CSI_CSIDMASA_FB1 :
++ CSI_CSIDMASA_FB2);
++ }
++ spin_unlock(&cam->dqueue_int_lock);
++ spin_unlock(&cam->queue_int_lock);
++
++ return;
++}
++
++/*!
++ * Make csi ready for capture image.
++ *
++ * @param cam structure cam_data *
++ *
++ * @return status 0 success
++ */
++static int csi_cap_image(cam_data *cam)
++{
++ unsigned int value;
++
++ value = __raw_readl(CSI_CSICR3);
++ __raw_writel(value | BIT_FRMCNT_RST, CSI_CSICR3);
++ value = __raw_readl(CSI_CSISR);
++ __raw_writel(value, CSI_CSISR);
++
++ return 0;
++}
++
++/***************************************************************************
++ * Functions for handling Frame buffers.
++ **************************************************************************/
++
++/*!
++ * Free frame buffers
++ *
++ * @param cam Structure cam_data *
++ *
++ * @return status 0 success.
++ */
++static int csi_free_frame_buf(cam_data *cam)
++{
++ int i;
++
++ pr_debug("MVC: In %s\n", __func__);
++
++ for (i = 0; i < FRAME_NUM; i++) {
++ if (cam->frame[i].vaddress != 0) {
++ dma_free_coherent(0, cam->frame[i].buffer.length,
++ cam->frame[i].vaddress,
++ cam->frame[i].paddress);
++ cam->frame[i].vaddress = 0;
++ }
++ }
++
++ if (cam->dummy_frame.vaddress != 0) {
++ dma_free_coherent(0, cam->dummy_frame.buffer.length,
++ cam->dummy_frame.vaddress,
++ cam->dummy_frame.paddress);
++ cam->dummy_frame.vaddress = 0;
++ }
++
++ return 0;
++}
++
++/*!
++ * Allocate frame buffers
++ *
++ * @param cam Structure cam_data *
++ * @param count int number of buffer need to allocated
++ *
++ * @return status -0 Successfully allocated a buffer, -ENOBUFS failed.
++ */
++static int csi_allocate_frame_buf(cam_data *cam, int count)
++{
++ int i;
++
++ pr_debug("In MVC:%s- size=%d\n",
++ __func__, cam->v2f.fmt.pix.sizeimage);
++ for (i = 0; i < count; i++) {
++ cam->frame[i].vaddress = dma_alloc_coherent(0, PAGE_ALIGN
++ (cam->v2f.fmt.
++ pix.sizeimage),
++ &cam->frame[i].
++ paddress,
++ GFP_DMA |
++ GFP_KERNEL);
++ if (cam->frame[i].vaddress == 0) {
++ pr_err("ERROR: v4l2 capture: "
++ "%s failed.\n", __func__);
++ csi_free_frame_buf(cam);
++ return -ENOBUFS;
++ }
++ cam->frame[i].buffer.index = i;
++ cam->frame[i].buffer.flags = V4L2_BUF_FLAG_MAPPED;
++ cam->frame[i].buffer.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
++ cam->frame[i].buffer.length = cam->v2f.fmt.pix.sizeimage;
++ cam->frame[i].buffer.memory = V4L2_MEMORY_MMAP;
++ cam->frame[i].buffer.m.offset = cam->frame[i].paddress;
++ cam->frame[i].index = i;
++ cam->frame[i].csi_buf_num = 0;
++ }
++
++ return 0;
++}
++
++/*!
++ * Free frame buffers status
++ *
++ * @param cam Structure cam_data *
++ *
++ * @return none
++ */
++static void csi_free_frames(cam_data *cam)
++{
++ int i;
++
++ pr_debug("In MVC: %s\n", __func__);
++
++ for (i = 0; i < FRAME_NUM; i++)
++ cam->frame[i].buffer.flags = V4L2_BUF_FLAG_MAPPED;
++
++ cam->enc_counter = 0;
++ INIT_LIST_HEAD(&cam->ready_q);
++ INIT_LIST_HEAD(&cam->working_q);
++ INIT_LIST_HEAD(&cam->done_q);
++
++ return;
++}
++
++/*!
++ * Return the buffer status
++ *
++ * @param cam Structure cam_data *
++ * @param buf Structure v4l2_buffer *
++ *
++ * @return status 0 success, EINVAL failed.
++ */
++static int csi_v4l2_buffer_status(cam_data *cam, struct v4l2_buffer *buf)
++{
++ pr_debug("In MVC: %s\n", __func__);
++
++ if (buf->index < 0 || buf->index >= FRAME_NUM) {
++ pr_err("ERROR: v4l2 capture: %s buffers "
++ "not allocated\n", __func__);
++ return -EINVAL;
++ }
++
++ memcpy(buf, &(cam->frame[buf->index].buffer), sizeof(*buf));
++
++ return 0;
++}
++
++static int csi_v4l2_release_bufs(cam_data *cam)
++{
++ pr_debug("In MVC:csi_v4l2_release_bufs\n");
++ return 0;
++}
++
++static int csi_v4l2_prepare_bufs(cam_data *cam, struct v4l2_buffer *buf)
++{
++ pr_debug("In MVC:csi_v4l2_prepare_bufs\n");
++
++ if (buf->index < 0 || buf->index >= FRAME_NUM || buf->length <
++ cam->v2f.fmt.pix.sizeimage) {
++ pr_err("ERROR: v4l2 capture: csi_v4l2_prepare_bufs buffers "
++ "not allocated,index=%d, length=%d\n", buf->index,
++ buf->length);
++ return -EINVAL;
++ }
++
++ cam->frame[buf->index].buffer.index = buf->index;
++ cam->frame[buf->index].buffer.flags = V4L2_BUF_FLAG_MAPPED;
++ cam->frame[buf->index].buffer.length = buf->length;
++ cam->frame[buf->index].buffer.m.offset = cam->frame[buf->index].paddress
++ = buf->m.offset;
++ cam->frame[buf->index].buffer.type = buf->type;
++ cam->frame[buf->index].buffer.memory = V4L2_MEMORY_USERPTR;
++ cam->frame[buf->index].index = buf->index;
++
++ return 0;
++}
++
++/*!
++ * Indicates whether the palette is supported.
++ *
++ * @param palette V4L2_PIX_FMT_RGB565, V4L2_PIX_FMT_UYVY or V4L2_PIX_FMT_YUV420
++ *
++ * @return 0 if failed
++ */
++static inline int valid_mode(u32 palette)
++{
++ return (palette == V4L2_PIX_FMT_RGB565) ||
++ (palette == V4L2_PIX_FMT_YUYV) ||
++ (palette == V4L2_PIX_FMT_UYVY) || (palette == V4L2_PIX_FMT_YUV420);
++}
++
++/*!
++ * Start stream I/O
++ *
++ * @param cam structure cam_data *
++ *
++ * @return status 0 Success
++ */
++static int csi_streamon(cam_data *cam)
++{
++ struct mxc_v4l_frame *frame;
++ unsigned long flags;
++ unsigned long val;
++ int timeout, timeout2;
++
++ pr_debug("In MVC: %s\n", __func__);
++
++ if (NULL == cam) {
++ pr_err("ERROR: v4l2 capture: %s cam parameter is NULL\n",
++ __func__);
++ return -1;
++ }
++ cam->dummy_frame.vaddress = dma_alloc_coherent(0,
++ PAGE_ALIGN(cam->v2f.fmt.pix.sizeimage),
++ &cam->dummy_frame.paddress,
++ GFP_DMA | GFP_KERNEL);
++ if (cam->dummy_frame.vaddress == 0) {
++ pr_err("ERROR: v4l2 capture: Allocate dummy frame "
++ "failed.\n");
++ return -ENOBUFS;
++ }
++ cam->dummy_frame.buffer.type = V4L2_BUF_TYPE_PRIVATE;
++ cam->dummy_frame.buffer.length = cam->v2f.fmt.pix.sizeimage;
++ cam->dummy_frame.buffer.m.offset = cam->dummy_frame.paddress;
++
++ spin_lock_irqsave(&cam->queue_int_lock, flags);
++ /* move the frame from readyq to workingq */
++ if (list_empty(&cam->ready_q)) {
++ pr_err("ERROR: v4l2 capture: %s: "
++ "ready_q queue empty\n", __func__);
++ spin_unlock_irqrestore(&cam->queue_int_lock, flags);
++ return -1;
++ }
++ frame = list_entry(cam->ready_q.next, struct mxc_v4l_frame, queue);
++ list_del(cam->ready_q.next);
++ list_add_tail(&frame->queue, &cam->working_q);
++ __raw_writel(frame->paddress, CSI_CSIDMASA_FB1);
++ frame->csi_buf_num = 1;
++
++ if (list_empty(&cam->ready_q)) {
++ pr_err("ERROR: v4l2 capture: %s: "
++ "ready_q queue empty\n", __func__);
++ spin_unlock_irqrestore(&cam->queue_int_lock, flags);
++ return -1;
++ }
++ frame = list_entry(cam->ready_q.next, struct mxc_v4l_frame, queue);
++ list_del(cam->ready_q.next);
++ list_add_tail(&frame->queue, &cam->working_q);
++ __raw_writel(frame->paddress, CSI_CSIDMASA_FB2);
++ frame->csi_buf_num = 2;
++ spin_unlock_irqrestore(&cam->queue_int_lock, flags);
++
++ cam->capture_pid = current->pid;
++ cam->capture_on = true;
++ csi_cap_image(cam);
++
++ local_irq_save(flags);
++ for (timeout = 1000000; timeout > 0; timeout--) {
++ if (__raw_readl(CSI_CSISR) & BIT_SOF_INT) {
++ val = __raw_readl(CSI_CSICR3);
++ __raw_writel(val | BIT_DMA_REFLASH_RFF, CSI_CSICR3);
++ for (timeout2 = 1000000; timeout2 > 0; timeout2--) {
++ if (__raw_readl(CSI_CSICR3) &
++ BIT_DMA_REFLASH_RFF)
++ cpu_relax();
++ else
++ break;
++ }
++ if (timeout2 <= 0) {
++ pr_err("timeout when wait for reflash done.\n");
++ local_irq_restore(flags);
++ return -ETIME;
++ }
++
++ csi_dmareq_rff_enable();
++ csi_enable_int(1);
++ break;
++ } else
++ cpu_relax();
++ }
++ if (timeout <= 0) {
++ pr_err("timeout when wait for SOF\n");
++ local_irq_restore(flags);
++ return -ETIME;
++ }
++ local_irq_restore(flags);
++
++ return 0;
++}
++
++/*!
++ * Stop stream I/O
++ *
++ * @param cam structure cam_data *
++ *
++ * @return status 0 Success
++ */
++static int csi_streamoff(cam_data *cam)
++{
++ pr_debug("In MVC: %s\n", __func__);
++
++ if (cam->capture_on == false)
++ return 0;
++
++ csi_dmareq_rff_disable();
++ csi_disable_int();
++ cam->capture_on = false;
++
++ /* set CSI_CSIDMASA_FB1 and CSI_CSIDMASA_FB2 to default value */
++ __raw_writel(0, CSI_CSIDMASA_FB1);
++ __raw_writel(0, CSI_CSIDMASA_FB2);
++
++ csi_free_frames(cam);
++ csi_free_frame_buf(cam);
++
++ return 0;
++}
++
++/*!
++ * start the viewfinder job
++ *
++ * @param cam structure cam_data *
++ *
++ * @return status 0 Success
++ */
++static int start_preview(cam_data *cam)
++{
++ unsigned long fb_addr = (unsigned long)cam->v4l2_fb.base;
++
++ __raw_writel(fb_addr, CSI_CSIDMASA_FB1);
++ __raw_writel(fb_addr, CSI_CSIDMASA_FB2);
++ __raw_writel(__raw_readl(CSI_CSICR3) | BIT_DMA_REFLASH_RFF, CSI_CSICR3);
++
++ csi_enable_int(0);
++
++ return 0;
++}
++
++/*!
++ * shut down the viewfinder job
++ *
++ * @param cam structure cam_data *
++ *
++ * @return status 0 Success
++ */
++static int stop_preview(cam_data *cam)
++{
++ csi_disable_int();
++
++ /* set CSI_CSIDMASA_FB1 and CSI_CSIDMASA_FB2 to default value */
++ __raw_writel(0, CSI_CSIDMASA_FB1);
++ __raw_writel(0, CSI_CSIDMASA_FB2);
++ __raw_writel(__raw_readl(CSI_CSICR3) | BIT_DMA_REFLASH_RFF, CSI_CSICR3);
++
++ return 0;
++}
++
++/***************************************************************************
++ * VIDIOC Functions.
++ **************************************************************************/
++
++/*!
++ *
++ * @param cam structure cam_data *
++ *
++ * @param f structure v4l2_format *
++ *
++ * @return status 0 success, EINVAL failed
++ */
++static int csi_v4l2_g_fmt(cam_data *cam, struct v4l2_format *f)
++{
++ int retval = 0;
++
++ switch (f->type) {
++ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
++ pr_debug(" type is V4L2_BUF_TYPE_VIDEO_CAPTURE\n");
++ f->fmt.pix = cam->v2f.fmt.pix;
++ break;
++ case V4L2_BUF_TYPE_VIDEO_OVERLAY:
++ pr_debug(" type is V4L2_BUF_TYPE_VIDEO_OVERLAY\n");
++ f->fmt.win = cam->win;
++ break;
++ default:
++ pr_debug(" type is invalid\n");
++ retval = -EINVAL;
++ }
++
++ pr_debug("End of %s: v2f pix widthxheight %d x %d\n",
++ __func__, cam->v2f.fmt.pix.width, cam->v2f.fmt.pix.height);
++
++ return retval;
++}
++
++/*!
++ * V4L2 - csi_v4l2_s_fmt function
++ *
++ * @param cam structure cam_data *
++ *
++ * @param f structure v4l2_format *
++ *
++ * @return status 0 success, EINVAL failed
++ */
++static int csi_v4l2_s_fmt(cam_data *cam, struct v4l2_format *f)
++{
++ int retval = 0;
++ int size = 0;
++ int bytesperline = 0;
++ int *width, *height;
++
++ pr_debug("In MVC: %s\n", __func__);
++
++ switch (f->type) {
++ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
++ pr_debug(" type=V4L2_BUF_TYPE_VIDEO_CAPTURE\n");
++ if (!valid_mode(f->fmt.pix.pixelformat)) {
++ pr_err("ERROR: v4l2 capture: %s: format "
++ "not supported\n", __func__);
++ return -EINVAL;
++ }
++
++ /* Handle case where size requested is larger than cuurent
++ * camera setting. */
++ if ((f->fmt.pix.width > cam->crop_bounds.width)
++ || (f->fmt.pix.height > cam->crop_bounds.height)) {
++ /* Need the logic here, calling vidioc_s_param if
++ * camera can change. */
++ pr_debug("csi_v4l2_s_fmt size changed\n");
++ }
++ if (cam->rotation % 180) {
++ height = &f->fmt.pix.width;
++ width = &f->fmt.pix.height;
++ } else {
++ width = &f->fmt.pix.width;
++ height = &f->fmt.pix.height;
++ }
++
++ if ((cam->crop_bounds.width / *width > 8) ||
++ ((cam->crop_bounds.width / *width == 8) &&
++ (cam->crop_bounds.width % *width))) {
++ *width = cam->crop_bounds.width / 8;
++ if (*width % 8)
++ *width += 8 - *width % 8;
++ pr_err("ERROR: v4l2 capture: width exceeds limit "
++ "resize to %d.\n", *width);
++ }
++
++ if ((cam->crop_bounds.height / *height > 8) ||
++ ((cam->crop_bounds.height / *height == 8) &&
++ (cam->crop_bounds.height % *height))) {
++ *height = cam->crop_bounds.height / 8;
++ if (*height % 8)
++ *height += 8 - *height % 8;
++ pr_err("ERROR: v4l2 capture: height exceeds limit "
++ "resize to %d.\n", *height);
++ }
++
++ switch (f->fmt.pix.pixelformat) {
++ case V4L2_PIX_FMT_RGB565:
++ size = f->fmt.pix.width * f->fmt.pix.height * 2;
++ csi_init_format(V4L2_PIX_FMT_UYVY);
++ csi_set_16bit_imagpara(f->fmt.pix.width,
++ f->fmt.pix.height);
++ bytesperline = f->fmt.pix.width * 2;
++ break;
++ case V4L2_PIX_FMT_UYVY:
++ size = f->fmt.pix.width * f->fmt.pix.height * 2;
++ csi_init_format(f->fmt.pix.pixelformat);
++ csi_set_16bit_imagpara(f->fmt.pix.width,
++ f->fmt.pix.height);
++ bytesperline = f->fmt.pix.width * 2;
++ break;
++ case V4L2_PIX_FMT_YUYV:
++ size = f->fmt.pix.width * f->fmt.pix.height * 2;
++ csi_init_format(f->fmt.pix.pixelformat);
++ csi_set_16bit_imagpara(f->fmt.pix.width,
++ f->fmt.pix.height);
++ bytesperline = f->fmt.pix.width * 2;
++ break;
++ case V4L2_PIX_FMT_YUV420:
++ size = f->fmt.pix.width * f->fmt.pix.height * 3 / 2;
++ csi_set_12bit_imagpara(f->fmt.pix.width,
++ f->fmt.pix.height);
++ bytesperline = f->fmt.pix.width;
++ break;
++ case V4L2_PIX_FMT_YUV422P:
++ case V4L2_PIX_FMT_RGB24:
++ case V4L2_PIX_FMT_BGR24:
++ case V4L2_PIX_FMT_BGR32:
++ case V4L2_PIX_FMT_RGB32:
++ case V4L2_PIX_FMT_NV12:
++ default:
++ pr_debug(" case not supported\n");
++ break;
++ }
++
++ if (f->fmt.pix.bytesperline < bytesperline)
++ f->fmt.pix.bytesperline = bytesperline;
++ else
++ bytesperline = f->fmt.pix.bytesperline;
++
++ if (f->fmt.pix.sizeimage < size)
++ f->fmt.pix.sizeimage = size;
++ else
++ size = f->fmt.pix.sizeimage;
++
++ cam->v2f.fmt.pix = f->fmt.pix;
++
++ if (cam->v2f.fmt.pix.priv != 0) {
++ if (copy_from_user(&cam->offset,
++ (void *)cam->v2f.fmt.pix.priv,
++ sizeof(cam->offset))) {
++ retval = -EFAULT;
++ break;
++ }
++ }
++ break;
++ case V4L2_BUF_TYPE_VIDEO_OVERLAY:
++ pr_debug(" type=V4L2_BUF_TYPE_VIDEO_OVERLAY\n");
++ cam->win = f->fmt.win;
++ win_current = f->fmt.win;
++ size = win_current.w.width * win_current.w.height * 2;
++ if (cam->v2f.fmt.pix.sizeimage < size)
++ cam->v2f.fmt.pix.sizeimage = size;
++
++ break;
++ default:
++ retval = -EINVAL;
++ }
++
++ pr_debug("End of %s: v2f pix widthxheight %d x %d\n",
++ __func__, cam->v2f.fmt.pix.width, cam->v2f.fmt.pix.height);
++
++ return retval;
++}
++
++/*!
++ * V4L2 - csi_v4l2_s_param function
++ * Allows setting of capturemode and frame rate.
++ *
++ * @param cam structure cam_data *
++ * @param parm structure v4l2_streamparm *
++ *
++ * @return status 0 success, EINVAL failed
++ */
++static int csi_v4l2_s_param(cam_data *cam, struct v4l2_streamparm *parm)
++{
++ struct v4l2_ifparm ifparm;
++ struct v4l2_format cam_fmt;
++ struct v4l2_streamparm currentparm;
++ int err = 0;
++
++ pr_debug("In %s\n", __func__);
++
++ if (parm->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
++ pr_err(KERN_ERR "%s invalid type\n", __func__);
++ return -EINVAL;
++ }
++
++ /* Stop the viewfinder */
++ if (cam->overlay_on == true)
++ stop_preview(cam);
++
++ currentparm.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
++
++ /* First check that this device can support the changes requested. */
++ err = vidioc_int_g_parm(cam->sensor, &currentparm);
++ if (err) {
++ pr_err("%s: vidioc_int_g_parm returned an error %d\n",
++ __func__, err);
++ goto exit;
++ }
++
++ pr_debug(" Current capabilities are %x\n",
++ currentparm.parm.capture.capability);
++ pr_debug(" Current capturemode is %d change to %d\n",
++ currentparm.parm.capture.capturemode,
++ parm->parm.capture.capturemode);
++ pr_debug(" Current framerate is %d change to %d\n",
++ currentparm.parm.capture.timeperframe.denominator,
++ parm->parm.capture.timeperframe.denominator);
++
++ err = vidioc_int_s_parm(cam->sensor, parm);
++ if (err) {
++ pr_err("%s: vidioc_int_s_parm returned an error %d\n",
++ __func__, err);
++ goto exit;
++ }
++
++ vidioc_int_g_ifparm(cam->sensor, &ifparm);
++ cam_fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
++ vidioc_int_g_fmt_cap(cam->sensor, &cam_fmt);
++ pr_debug(" g_fmt_cap returns widthxheight of input as %d x %d\n",
++ cam_fmt.fmt.pix.width, cam_fmt.fmt.pix.height);
++
++ cam->crop_bounds.top = cam->crop_bounds.left = 0;
++ cam->crop_bounds.width = cam_fmt.fmt.pix.width;
++ cam->crop_bounds.height = cam_fmt.fmt.pix.height;
++ cam->crop_current.width = cam->crop_bounds.width;
++ cam->crop_current.height = cam->crop_bounds.height;
++
++exit:
++ return err;
++}
++
++static int pxp_set_cstate(cam_data *cam, struct v4l2_control *vc)
++{
++ struct pxp_proc_data *proc_data = &cam->pxp_conf.proc_data;
++
++ if (vc->id == V4L2_CID_HFLIP) {
++ proc_data->hflip = vc->value;
++ } else if (vc->id == V4L2_CID_VFLIP) {
++ proc_data->vflip = vc->value;
++ } else if (vc->id == V4L2_CID_PRIVATE_BASE) {
++ if (vc->value % 90)
++ return -ERANGE;
++ proc_data->rotate = vc->value;
++ cam->rotation = vc->value;
++ }
++
++ return 0;
++}
++
++static int pxp_get_cstate(cam_data *cam, struct v4l2_control *vc)
++{
++ struct pxp_proc_data *proc_data = &cam->pxp_conf.proc_data;
++
++ if (vc->id == V4L2_CID_HFLIP)
++ vc->value = proc_data->hflip;
++ else if (vc->id == V4L2_CID_VFLIP)
++ vc->value = proc_data->vflip;
++ else if (vc->id == V4L2_CID_PRIVATE_BASE)
++ vc->value = proc_data->rotate;
++
++ return 0;
++}
++
++
++/*!
++ * Dequeue one V4L capture buffer
++ *
++ * @param cam structure cam_data *
++ * @param buf structure v4l2_buffer *
++ *
++ * @return status 0 success, EINVAL invalid frame number
++ * ETIME timeout, ERESTARTSYS interrupted by user
++ */
++static int csi_v4l_dqueue(cam_data *cam, struct v4l2_buffer *buf)
++{
++ int retval = 0;
++ struct mxc_v4l_frame *frame;
++ unsigned long lock_flags;
++
++ if (!wait_event_interruptible_timeout(cam->enc_queue,
++ cam->enc_counter != 0, 10 * HZ)) {
++ pr_err("ERROR: v4l2 capture: mxc_v4l_dqueue timeout "
++ "enc_counter %x\n", cam->enc_counter);
++ return -ETIME;
++ } else if (signal_pending(current)) {
++ pr_err("ERROR: v4l2 capture: mxc_v4l_dqueue() "
++ "interrupt received\n");
++ return -ERESTARTSYS;
++ }
++
++ if (down_interruptible(&cam->busy_lock))
++ return -EBUSY;
++
++ spin_lock_irqsave(&cam->dqueue_int_lock, lock_flags);
++
++ if (list_empty(&cam->done_q)) {
++ spin_unlock_irqrestore(&cam->dqueue_int_lock, lock_flags);
++ up(&cam->busy_lock);
++ return -EINVAL;
++ }
++
++ cam->enc_counter--;
++
++ frame = list_entry(cam->done_q.next, struct mxc_v4l_frame, queue);
++ list_del(cam->done_q.next);
++
++ if (frame->buffer.flags & V4L2_BUF_FLAG_DONE) {
++ frame->buffer.flags &= ~V4L2_BUF_FLAG_DONE;
++ } else if (frame->buffer.flags & V4L2_BUF_FLAG_QUEUED) {
++ pr_err("ERROR: v4l2 capture: VIDIOC_DQBUF: "
++ "Buffer not filled.\n");
++ frame->buffer.flags &= ~V4L2_BUF_FLAG_QUEUED;
++ retval = -EINVAL;
++ } else if ((frame->buffer.flags & 0x7) == V4L2_BUF_FLAG_MAPPED) {
++ pr_err("ERROR: v4l2 capture: VIDIOC_DQBUF: "
++ "Buffer not queued.\n");
++ retval = -EINVAL;
++ }
++
++ spin_unlock_irqrestore(&cam->dqueue_int_lock, lock_flags);
++
++ buf->bytesused = cam->v2f.fmt.pix.sizeimage;
++ buf->index = frame->index;
++ buf->flags = frame->buffer.flags;
++ buf->m = cam->frame[frame->index].buffer.m;
++
++ /*
++ * Note:
++ * If want to do preview on LCD, use PxP CSC to convert from UYVY
++ * to RGB565; but for encoding, usually we don't use RGB format.
++ */
++ if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_RGB565) {
++ sg_dma_address(&cam->sg[0]) = buf->m.offset;
++ sg_dma_address(&cam->sg[1]) =
++ cam->frame[req_buf_number].paddress;
++ retval = pxp_process_update(cam);
++ if (retval) {
++ pr_err("Unable to submit PxP update task.\n");
++ return retval;
++ }
++ pxp_complete_update(cam);
++ if (cam->frame[buf->index].vaddress)
++ memcpy(cam->frame[buf->index].vaddress,
++ cam->frame[req_buf_number].vaddress,
++ cam->v2f.fmt.pix.sizeimage);
++ }
++ up(&cam->busy_lock);
++
++ return retval;
++}
++
++/*!
++ * V4L interface - open function
++ *
++ * @param file structure file *
++ *
++ * @return status 0 success, ENODEV invalid device instance,
++ * ENODEV timeout, ERESTARTSYS interrupted by user
++ */
++static int csi_v4l_open(struct file *file)
++{
++ struct v4l2_ifparm ifparm;
++ struct v4l2_format cam_fmt;
++ struct video_device *dev = video_devdata(file);
++ cam_data *cam = video_get_drvdata(dev);
++ struct sensor_data *sensor;
++ int err = 0;
++
++ pr_debug(" device name is %s\n", dev->name);
++
++ if (!cam) {
++ pr_err("%s: Internal error, cam_data not found!\n", __func__);
++ return -EBADF;
++ }
++
++ if (!cam->sensor) {
++ pr_err("%s: Internal error, camera is not found!\n", __func__);
++ return -EBADF;
++ }
++
++ sensor = cam->sensor->priv;
++ if (!sensor) {
++ pr_err("%s: Internal error, sensor_data is not found!\n", __func__);
++ return -EBADF;
++ }
++
++ down(&cam->busy_lock);
++ err = 0;
++ if (signal_pending(current))
++ goto oops;
++
++ if (cam->open_count++ == 0) {
++ wait_event_interruptible(cam->power_queue,
++ cam->low_power == false);
++
++ cam->enc_counter = 0;
++ INIT_LIST_HEAD(&cam->ready_q);
++ INIT_LIST_HEAD(&cam->working_q);
++ INIT_LIST_HEAD(&cam->done_q);
++
++ vidioc_int_g_ifparm(cam->sensor, &ifparm);
++
++ cam_fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
++ clk_prepare_enable(sensor->sensor_clk);
++ vidioc_int_s_power(cam->sensor, 1);
++ vidioc_int_init(cam->sensor);
++ vidioc_int_dev_init(cam->sensor);
++ }
++
++ file->private_data = dev;
++
++oops:
++ up(&cam->busy_lock);
++ return err;
++}
++
++/*!
++ * V4L interface - close function
++ *
++ * @param file struct file *
++ *
++ * @return 0 success
++ */
++static int csi_v4l_close(struct file *file)
++{
++ struct video_device *dev = video_devdata(file);
++ int err = 0;
++ cam_data *cam = video_get_drvdata(dev);
++ struct sensor_data *sensor;
++
++ pr_debug("In MVC:%s\n", __func__);
++
++ if (!cam) {
++ pr_err("%s: Internal error, cam_data not found!\n", __func__);
++ return -EBADF;
++ }
++
++ if (!cam->sensor) {
++ pr_err("%s: Internal error, camera is not found!\n", __func__);
++ return -EBADF;
++ }
++
++ sensor = cam->sensor->priv;
++ if (!sensor) {
++ pr_err("%s: Internal error, sensor_data is not found!\n", __func__);
++ return -EBADF;
++ }
++
++ /* for the case somebody hit the ctrl C */
++ if (cam->overlay_pid == current->pid) {
++ err = stop_preview(cam);
++ cam->overlay_on = false;
++ }
++
++ if (--cam->open_count == 0) {
++ wait_event_interruptible(cam->power_queue,
++ cam->low_power == false);
++ file->private_data = NULL;
++ vidioc_int_s_power(cam->sensor, 0);
++ clk_disable_unprepare(sensor->sensor_clk);
++ }
++
++ return err;
++}
++
++/*
++ * V4L interface - read function
++ *
++ * @param file struct file *
++ * @param read buf char *
++ * @param count size_t
++ * @param ppos structure loff_t *
++ *
++ * @return bytes read
++ */
++static ssize_t csi_v4l_read(struct file *file, char *buf, size_t count,
++ loff_t *ppos)
++{
++ int err = 0;
++ struct video_device *dev = video_devdata(file);
++ cam_data *cam = video_get_drvdata(dev);
++
++ if (down_interruptible(&cam->busy_lock))
++ return -EINTR;
++
++ /* Stop the viewfinder */
++ if (cam->overlay_on == true)
++ stop_preview(cam);
++
++ if (cam->still_buf_vaddr == NULL) {
++ cam->still_buf_vaddr = dma_alloc_coherent(0,
++ PAGE_ALIGN
++ (cam->v2f.fmt.
++ pix.sizeimage),
++ &cam->
++ still_buf[0],
++ GFP_DMA | GFP_KERNEL);
++ if (cam->still_buf_vaddr == NULL) {
++ pr_err("alloc dma memory failed\n");
++ return -ENOMEM;
++ }
++ cam->still_counter = 0;
++ __raw_writel(cam->still_buf[0], CSI_CSIDMASA_FB2);
++ __raw_writel(cam->still_buf[0], CSI_CSIDMASA_FB1);
++ __raw_writel(__raw_readl(CSI_CSICR3) | BIT_DMA_REFLASH_RFF,
++ CSI_CSICR3);
++ __raw_writel(__raw_readl(CSI_CSISR), CSI_CSISR);
++ __raw_writel(__raw_readl(CSI_CSICR3) | BIT_FRMCNT_RST,
++ CSI_CSICR3);
++ csi_enable_int(1);
++ }
++
++ wait_event_interruptible(cam->still_queue, cam->still_counter);
++ csi_disable_int();
++ err = copy_to_user(buf, cam->still_buf_vaddr,
++ cam->v2f.fmt.pix.sizeimage);
++
++ if (cam->still_buf_vaddr != NULL) {
++ dma_free_coherent(0, PAGE_ALIGN(cam->v2f.fmt.pix.sizeimage),
++ cam->still_buf_vaddr, cam->still_buf[0]);
++ cam->still_buf[0] = 0;
++ cam->still_buf_vaddr = NULL;
++ }
++
++ if (cam->overlay_on == true)
++ start_preview(cam);
++
++ up(&cam->busy_lock);
++ if (err < 0)
++ return err;
++
++ return cam->v2f.fmt.pix.sizeimage - err;
++}
++
++/*!
++ * V4L interface - ioctl function
++ *
++ * @param file struct file*
++ *
++ * @param ioctlnr unsigned int
++ *
++ * @param arg void*
++ *
++ * @return 0 success, ENODEV for invalid device instance,
++ * -1 for other errors.
++ */
++static long csi_v4l_do_ioctl(struct file *file,
++ unsigned int ioctlnr, void *arg)
++{
++ struct video_device *dev = video_devdata(file);
++ cam_data *cam = video_get_drvdata(dev);
++ int retval = 0;
++ unsigned long lock_flags;
++
++ pr_debug("In MVC: %s, %x\n", __func__, ioctlnr);
++ wait_event_interruptible(cam->power_queue, cam->low_power == false);
++ /* make this _really_ smp-safe */
++ if (ioctlnr != VIDIOC_DQBUF)
++ if (down_interruptible(&cam->busy_lock))
++ return -EBUSY;
++
++ switch (ioctlnr) {
++ /*!
++ * V4l2 VIDIOC_G_FMT ioctl
++ */
++ case VIDIOC_G_FMT:{
++ struct v4l2_format *gf = arg;
++ pr_debug(" case VIDIOC_G_FMT\n");
++ retval = csi_v4l2_g_fmt(cam, gf);
++ break;
++ }
++
++ /*!
++ * V4l2 VIDIOC_S_FMT ioctl
++ */
++ case VIDIOC_S_FMT:{
++ struct v4l2_format *sf = arg;
++ pr_debug(" case VIDIOC_S_FMT\n");
++ retval = csi_v4l2_s_fmt(cam, sf);
++ vidioc_int_s_fmt_cap(cam->sensor, sf);
++ break;
++ }
++
++ /*!
++ * V4l2 VIDIOC_OVERLAY ioctl
++ */
++ case VIDIOC_OVERLAY:{
++ int *on = arg;
++ pr_debug(" case VIDIOC_OVERLAY\n");
++ if (*on) {
++ cam->overlay_on = true;
++ cam->overlay_pid = current->pid;
++ start_preview(cam);
++ }
++ if (!*on) {
++ stop_preview(cam);
++ cam->overlay_on = false;
++ }
++ break;
++ }
++
++ /*!
++ * V4l2 VIDIOC_G_FBUF ioctl
++ */
++ case VIDIOC_G_FBUF:{
++ struct v4l2_framebuffer *fb = arg;
++ *fb = cam->v4l2_fb;
++ fb->capability = V4L2_FBUF_CAP_EXTERNOVERLAY;
++ break;
++ }
++
++ /*!
++ * V4l2 VIDIOC_S_FBUF ioctl
++ */
++ case VIDIOC_S_FBUF:{
++ struct v4l2_framebuffer *fb = arg;
++ cam->v4l2_fb = *fb;
++ break;
++ }
++
++ case VIDIOC_G_PARM:{
++ struct v4l2_streamparm *parm = arg;
++ pr_debug(" case VIDIOC_G_PARM\n");
++ vidioc_int_g_parm(cam->sensor, parm);
++ break;
++ }
++
++ case VIDIOC_S_PARM:{
++ struct v4l2_streamparm *parm = arg;
++ pr_debug(" case VIDIOC_S_PARM\n");
++ retval = csi_v4l2_s_param(cam, parm);
++ break;
++ }
++
++ case VIDIOC_QUERYCAP:{
++ struct v4l2_capability *cap = arg;
++ pr_debug(" case VIDIOC_QUERYCAP\n");
++ strcpy(cap->driver, "csi_v4l2");
++ cap->version = KERNEL_VERSION(0, 1, 11);
++ cap->capabilities = V4L2_CAP_VIDEO_OVERLAY |
++ V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING |
++ V4L2_CAP_VIDEO_OUTPUT_OVERLAY | V4L2_CAP_READWRITE;
++ cap->card[0] = '\0';
++ cap->bus_info[0] = '\0';
++ break;
++ }
++
++ case VIDIOC_CROPCAP:
++ {
++ struct v4l2_cropcap *cap = arg;
++
++ if (cap->type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
++ cap->type != V4L2_BUF_TYPE_VIDEO_OVERLAY) {
++ retval = -EINVAL;
++ break;
++ }
++ cap->bounds = cam->crop_bounds;
++ cap->defrect = cam->crop_defrect;
++ break;
++ }
++ case VIDIOC_S_CROP:
++ {
++ struct v4l2_crop *crop = arg;
++ struct v4l2_rect *b = &cam->crop_bounds;
++
++ if (crop->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
++ retval = -EINVAL;
++ break;
++ }
++
++ crop->c.top = (crop->c.top < b->top) ? b->top
++ : crop->c.top;
++ if (crop->c.top > b->top + b->height)
++ crop->c.top = b->top + b->height - 1;
++ if (crop->c.height > b->top + b->height - crop->c.top)
++ crop->c.height =
++ b->top + b->height - crop->c.top;
++
++ crop->c.left = (crop->c.left < b->left) ? b->left
++ : crop->c.left;
++ if (crop->c.left > b->left + b->width)
++ crop->c.left = b->left + b->width - 1;
++ if (crop->c.width > b->left - crop->c.left + b->width)
++ crop->c.width =
++ b->left - crop->c.left + b->width;
++
++ crop->c.width -= crop->c.width % 8;
++ crop->c.height -= crop->c.height % 8;
++
++ crop_current.c = crop->c;
++
++ break;
++ }
++ case VIDIOC_G_CROP:
++ {
++ struct v4l2_crop *crop = arg;
++
++ if (crop->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
++ retval = -EINVAL;
++ break;
++ }
++ crop->c = crop_current.c;
++
++ break;
++
++ }
++ case VIDIOC_REQBUFS: {
++ struct v4l2_requestbuffers *req = arg;
++ pr_debug(" case VIDIOC_REQBUFS\n");
++
++ if (req->count > FRAME_NUM) {
++ pr_err("ERROR: v4l2 capture: VIDIOC_REQBUFS: "
++ "not enough buffers\n");
++ req->count = FRAME_NUM;
++ }
++
++ if (req->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
++ pr_err("ERROR: v4l2 capture: VIDIOC_REQBUFS: "
++ "wrong buffer type\n");
++ retval = -EINVAL;
++ break;
++ }
++
++ csi_streamoff(cam);
++ if (req->memory & V4L2_MEMORY_MMAP) {
++ csi_free_frame_buf(cam);
++ retval = csi_allocate_frame_buf(cam, req->count + 1);
++ req_buf_number = req->count;
++ }
++ break;
++ }
++
++ case VIDIOC_QUERYBUF: {
++ struct v4l2_buffer *buf = arg;
++ int index = buf->index;
++ pr_debug(" case VIDIOC_QUERYBUF\n");
++
++ if (buf->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
++ retval = -EINVAL;
++ break;
++ }
++
++ if (buf->memory & V4L2_MEMORY_MMAP) {
++ memset(buf, 0, sizeof(buf));
++ buf->index = index;
++ }
++
++ down(&cam->param_lock);
++ if (buf->memory & V4L2_MEMORY_USERPTR) {
++ csi_v4l2_release_bufs(cam);
++ retval = csi_v4l2_prepare_bufs(cam, buf);
++ }
++ if (buf->memory & V4L2_MEMORY_MMAP)
++ retval = csi_v4l2_buffer_status(cam, buf);
++ up(&cam->param_lock);
++ break;
++ }
++
++ case VIDIOC_QBUF: {
++ struct v4l2_buffer *buf = arg;
++ int index = buf->index;
++ pr_debug(" case VIDIOC_QBUF\n");
++
++ spin_lock_irqsave(&cam->queue_int_lock, lock_flags);
++ cam->frame[index].buffer.m.offset = buf->m.offset;
++ if ((cam->frame[index].buffer.flags & 0x7) ==
++ V4L2_BUF_FLAG_MAPPED) {
++ cam->frame[index].buffer.flags |= V4L2_BUF_FLAG_QUEUED;
++ list_add_tail(&cam->frame[index].queue, &cam->ready_q);
++ } else if (cam->frame[index].buffer.flags &
++ V4L2_BUF_FLAG_QUEUED) {
++ pr_err("ERROR: v4l2 capture: VIDIOC_QBUF: "
++ "buffer already queued\n");
++ retval = -EINVAL;
++ } else if (cam->frame[index].buffer.
++ flags & V4L2_BUF_FLAG_DONE) {
++ pr_err("ERROR: v4l2 capture: VIDIOC_QBUF: "
++ "overwrite done buffer.\n");
++ cam->frame[index].buffer.flags &=
++ ~V4L2_BUF_FLAG_DONE;
++ cam->frame[index].buffer.flags |=
++ V4L2_BUF_FLAG_QUEUED;
++ retval = -EINVAL;
++ }
++ buf->flags = cam->frame[index].buffer.flags;
++ spin_unlock_irqrestore(&cam->queue_int_lock, lock_flags);
++
++ break;
++ }
++
++ case VIDIOC_DQBUF: {
++ struct v4l2_buffer *buf = arg;
++ pr_debug(" case VIDIOC_DQBUF\n");
++
++ retval = csi_v4l_dqueue(cam, buf);
++
++ break;
++ }
++
++ case VIDIOC_STREAMON: {
++ pr_debug(" case VIDIOC_STREAMON\n");
++ retval = csi_streamon(cam);
++ break;
++ }
++
++ case VIDIOC_STREAMOFF: {
++ pr_debug(" case VIDIOC_STREAMOFF\n");
++ retval = csi_streamoff(cam);
++ break;
++ }
++ case VIDIOC_ENUM_FMT: {
++ struct v4l2_fmtdesc *fmt = arg;
++ if (cam->sensor)
++ retval = vidioc_int_enum_fmt_cap(cam->sensor, fmt);
++ else {
++ pr_err("ERROR: v4l2 capture: slave not found!\n");
++ retval = -ENODEV;
++ }
++ break;
++ }
++ case VIDIOC_ENUM_FRAMESIZES: {
++ struct v4l2_frmsizeenum *fsize = arg;
++ if (cam->sensor)
++ retval = vidioc_int_enum_framesizes(cam->sensor, fsize);
++ else {
++ pr_err("ERROR: v4l2 capture: slave not found!\n");
++ retval = -ENODEV;
++ }
++ break;
++ }
++ case VIDIOC_ENUM_FRAMEINTERVALS: {
++ struct v4l2_frmivalenum *fival = arg;
++ if (cam->sensor)
++ retval = vidioc_int_enum_frameintervals(cam->sensor,
++ fival);
++ else {
++ pr_err("ERROR: v4l2 capture: slave not found!\n");
++ retval = -ENODEV;
++ }
++ break;
++ }
++ case VIDIOC_DBG_G_CHIP_IDENT: {
++ struct v4l2_dbg_chip_ident *p = arg;
++ p->ident = V4L2_IDENT_NONE;
++ p->revision = 0;
++ if (cam->sensor)
++ retval = vidioc_int_g_chip_ident(cam->sensor, (int *)p);
++ else {
++ pr_err("ERROR: v4l2 capture: slave not found!\n");
++ retval = -ENODEV;
++ }
++ break;
++ }
++
++ case VIDIOC_S_CTRL:
++ {
++ struct v4l2_control *vc = arg;
++ int i;
++
++ for (i = 0; i < ARRAY_SIZE(pxp_controls); i++)
++ if (vc->id == pxp_controls[i].id) {
++ if (vc->value < pxp_controls[i].minimum ||
++ vc->value > pxp_controls[i].maximum) {
++ retval = -ERANGE;
++ break;
++ }
++ retval = pxp_set_cstate(cam, vc);
++ break;
++ }
++
++ if (i >= ARRAY_SIZE(pxp_controls))
++ retval = -EINVAL;
++ break;
++
++ }
++ case VIDIOC_G_CTRL:
++ {
++ struct v4l2_control *vc = arg;
++ int i;
++
++ for (i = 0; i < ARRAY_SIZE(pxp_controls); i++)
++ if (vc->id == pxp_controls[i].id) {
++ retval = pxp_get_cstate(cam, vc);
++ break;
++ }
++
++ if (i >= ARRAY_SIZE(pxp_controls))
++ retval = -EINVAL;
++ break;
++ }
++ case VIDIOC_QUERYCTRL:
++ {
++ struct v4l2_queryctrl *qc = arg;
++ int i;
++
++ for (i = 0; i < ARRAY_SIZE(pxp_controls); i++)
++ if (qc->id && qc->id == pxp_controls[i].id) {
++ memcpy(qc, &(pxp_controls[i]), sizeof(*qc));
++ break;
++ }
++
++ if (i >= ARRAY_SIZE(pxp_controls))
++ retval = -EINVAL;
++ break;
++ }
++ case VIDIOC_G_STD:
++ case VIDIOC_G_OUTPUT:
++ case VIDIOC_S_OUTPUT:
++ case VIDIOC_ENUMSTD:
++ case VIDIOC_S_STD:
++ case VIDIOC_TRY_FMT:
++ case VIDIOC_ENUMINPUT:
++ case VIDIOC_G_INPUT:
++ case VIDIOC_S_INPUT:
++ case VIDIOC_G_TUNER:
++ case VIDIOC_S_TUNER:
++ case VIDIOC_G_FREQUENCY:
++ case VIDIOC_S_FREQUENCY:
++ case VIDIOC_ENUMOUTPUT:
++ default:
++ pr_debug(" case not supported\n");
++ retval = -EINVAL;
++ break;
++ }
++
++ if (ioctlnr != VIDIOC_DQBUF)
++ up(&cam->busy_lock);
++ return retval;
++}
++
++/*
++ * V4L interface - ioctl function
++ *
++ * @return None
++ */
++static long csi_v4l_ioctl(struct file *file,
++ unsigned int cmd, unsigned long arg)
++{
++ return video_usercopy(file, cmd, arg, csi_v4l_do_ioctl);
++}
++
++/*!
++ * V4L interface - mmap function
++ *
++ * @param file structure file *
++ *
++ * @param vma structure vm_area_struct *
++ *
++ * @return status 0 Success, EINTR busy lock error, ENOBUFS remap_page error
++ */
++static int csi_mmap(struct file *file, struct vm_area_struct *vma)
++{
++ struct video_device *dev = video_devdata(file);
++ unsigned long size;
++ int res = 0;
++ cam_data *cam = video_get_drvdata(dev);
++
++ pr_debug("%s\n", __func__);
++ pr_debug("\npgoff=0x%lx, start=0x%lx, end=0x%lx\n",
++ vma->vm_pgoff, vma->vm_start, vma->vm_end);
++
++ /* make this _really_ smp-safe */
++ if (down_interruptible(&cam->busy_lock))
++ return -EINTR;
++
++ size = vma->vm_end - vma->vm_start;
++ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
++
++ if (remap_pfn_range(vma, vma->vm_start,
++ vma->vm_pgoff, size, vma->vm_page_prot)) {
++ pr_err("ERROR: v4l2 capture: %s : "
++ "remap_pfn_range failed\n", __func__);
++ res = -ENOBUFS;
++ goto csi_mmap_exit;
++ }
++
++ vma->vm_flags &= ~VM_IO; /* using shared anonymous pages */
++
++csi_mmap_exit:
++ up(&cam->busy_lock);
++ return res;
++}
++
++/*!
++ * This structure defines the functions to be called in this driver.
++ */
++static struct v4l2_file_operations csi_v4l_fops = {
++ .owner = THIS_MODULE,
++ .open = csi_v4l_open,
++ .release = csi_v4l_close,
++ .read = csi_v4l_read,
++ .ioctl = csi_v4l_ioctl,
++ .mmap = csi_mmap,
++};
++
++static struct video_device csi_v4l_template = {
++ .name = "Mx25 Camera",
++ .fops = &csi_v4l_fops,
++ .release = video_device_release,
++};
++
++/*!
++ * initialize cam_data structure
++ *
++ * @param cam structure cam_data *
++ *
++ * @return status 0 Success
++ */
++static void init_camera_struct(cam_data *cam)
++{
++ struct pxp_proc_data *proc_data = &cam->pxp_conf.proc_data;
++ pr_debug("In MVC: %s\n", __func__);
++
++ proc_data->hflip = 0;
++ proc_data->vflip = 0;
++ proc_data->rotate = 0;
++ proc_data->bgcolor = 0;
++
++ /* Default everything to 0 */
++ memset(cam, 0, sizeof(cam_data));
++
++ sema_init(&cam->param_lock, 1);
++ sema_init(&cam->busy_lock, 1);
++
++ cam->video_dev = video_device_alloc();
++ if (cam->video_dev == NULL)
++ return;
++
++ *(cam->video_dev) = csi_v4l_template;
++
++ video_set_drvdata(cam->video_dev, cam);
++ cam->video_dev->minor = -1;
++
++ init_waitqueue_head(&cam->enc_queue);
++ init_waitqueue_head(&cam->still_queue);
++
++ cam->streamparm.parm.capture.capturemode = 0;
++
++ cam->standard.index = 0;
++ cam->standard.id = V4L2_STD_UNKNOWN;
++ cam->standard.frameperiod.denominator = 30;
++ cam->standard.frameperiod.numerator = 1;
++ cam->standard.framelines = 480;
++ cam->standard_autodetect = true;
++ cam->streamparm.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
++ cam->streamparm.parm.capture.timeperframe = cam->standard.frameperiod;
++ cam->streamparm.parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
++ cam->overlay_on = false;
++ cam->capture_on = false;
++ cam->v4l2_fb.flags = V4L2_FBUF_FLAG_OVERLAY;
++
++ cam->v2f.fmt.pix.sizeimage = 480 * 640 * 2;
++ cam->v2f.fmt.pix.bytesperline = 640 * 2;
++ cam->v2f.fmt.pix.width = 640;
++ cam->v2f.fmt.pix.height = 480;
++ cam->v2f.fmt.pix.pixelformat = V4L2_PIX_FMT_UYVY;
++ cam->win.w.width = 160;
++ cam->win.w.height = 160;
++ cam->win.w.left = 0;
++ cam->win.w.top = 0;
++ cam->still_counter = 0;
++ /* setup cropping */
++ cam->crop_bounds.left = 0;
++ cam->crop_bounds.width = 640;
++ cam->crop_bounds.top = 0;
++ cam->crop_bounds.height = 480;
++ cam->crop_current = cam->crop_defrect = cam->crop_bounds;
++
++ cam->enc_callback = camera_callback;
++ csi_start_callback(cam);
++ init_waitqueue_head(&cam->power_queue);
++ spin_lock_init(&cam->queue_int_lock);
++ spin_lock_init(&cam->dqueue_int_lock);
++}
++
++/*!
++ * camera_power function
++ * Turns Sensor power On/Off
++ *
++ * @param cam cam data struct
++ * @param cameraOn true to turn camera on, false to turn off power.
++ *
++ * @return status
++ */
++static u8 camera_power(cam_data *cam, bool cameraOn)
++{
++ pr_debug("In MVC: %s on=%d\n", __func__, cameraOn);
++
++ if (cameraOn == true) {
++ vidioc_int_s_power(cam->sensor, 1);
++ } else {
++ vidioc_int_s_power(cam->sensor, 0);
++ }
++ return 0;
++}
++
++static const struct of_device_id imx_csi_v4l2_dt_ids[] = {
++ { .compatible = "fsl,imx6sl-csi-v4l2", },
++ { /* sentinel */ }
++};
++MODULE_DEVICE_TABLE(of, imx_csi_v4l2_dt_ids);
++
++static int csi_v4l2_probe(struct platform_device *pdev)
++{
++ struct scatterlist *sg;
++ u8 err = 0;
++
++ /* Create g_cam and initialize it. */
++ g_cam = kmalloc(sizeof(cam_data), GFP_KERNEL);
++ if (g_cam == NULL) {
++ pr_err("ERROR: v4l2 capture: failed to register camera\n");
++ err = -ENOMEM;
++ goto out;
++ }
++ memset(&crop_current, 0, sizeof(crop_current));
++ memset(&win_current, 0, sizeof(win_current));
++ init_camera_struct(g_cam);
++ platform_set_drvdata(pdev, (void *)g_cam);
++
++ /* Set up the v4l2 device and register it */
++ csi_v4l2_int_device.priv = g_cam;
++ /* This function contains a bug that won't let this be rmmod'd. */
++ v4l2_int_device_register(&csi_v4l2_int_device);
++
++ /* register v4l video device */
++ if (video_register_device(g_cam->video_dev, VFL_TYPE_GRABBER, video_nr)
++ == -1) {
++ kfree(g_cam);
++ g_cam = NULL;
++ pr_err("ERROR: v4l2 capture: video_register_device failed\n");
++ err = -ENODEV;
++ goto out;
++ }
++ pr_debug(" Video device registered: %s #%d\n",
++ g_cam->video_dev->name, g_cam->video_dev->minor);
++
++ g_cam->pxp_chan = NULL;
++ /* Initialize Scatter-gather list containing 2 buffer addresses. */
++ sg = g_cam->sg;
++ sg_init_table(sg, 2);
++
++out:
++ return err;
++}
++
++static int csi_v4l2_remove(struct platform_device *pdev)
++{
++ if (g_cam->open_count) {
++ pr_err("ERROR: v4l2 capture:camera open "
++ "-- setting ops to NULL\n");
++ } else {
++ pr_info("V4L2 freeing image input device\n");
++ v4l2_int_device_unregister(&csi_v4l2_int_device);
++ csi_stop_callback(g_cam);
++ video_unregister_device(g_cam->video_dev);
++ platform_set_drvdata(pdev, NULL);
++
++ kfree(g_cam);
++ g_cam = NULL;
++ }
++
++ return 0;
++}
++
++/*!
++ * This function is called to put the sensor in a low power state.
++ * Refer to the document driver-model/driver.txt in the kernel source tree
++ * for more information.
++ *
++ * @param pdev the device structure used to give information on which I2C
++ * to suspend
++ * @param state the power state the device is entering
++ *
++ * @return The function returns 0 on success and -1 on failure.
++ */
++static int csi_v4l2_suspend(struct platform_device *pdev, pm_message_t state)
++{
++ cam_data *cam = platform_get_drvdata(pdev);
++
++ pr_debug("In MVC: %s\n", __func__);
++
++ if (cam == NULL)
++ return -1;
++
++ cam->low_power = true;
++
++ if (cam->overlay_on == true)
++ stop_preview(cam);
++
++ if (cam->capture_on == true || cam->overlay_on == true)
++ camera_power(cam, false);
++
++ return 0;
++}
++
++/*!
++ * This function is called to bring the sensor back from a low power state.
++ * Refer to the document driver-model/driver.txt in the kernel source tree
++ * for more information.
++ *
++ * @param pdev the device structure
++ *
++ * @return The function returns 0 on success and -1 on failure
++ */
++static int csi_v4l2_resume(struct platform_device *pdev)
++{
++ cam_data *cam = platform_get_drvdata(pdev);
++
++ pr_debug("In MVC: %s\n", __func__);
++
++ if (cam == NULL)
++ return -1;
++
++ cam->low_power = false;
++ wake_up_interruptible(&cam->power_queue);
++ if (cam->capture_on == true || cam->overlay_on == true)
++ camera_power(cam, true);
++
++ if (cam->overlay_on == true)
++ start_preview(cam);
++
++ return 0;
++}
++
++/*!
++ * This structure contains pointers to the power management callback functions.
++ */
++static struct platform_driver csi_v4l2_driver = {
++ .driver = {
++ .name = "csi_v4l2",
++ .of_match_table = of_match_ptr(imx_csi_v4l2_dt_ids),
++ },
++ .probe = csi_v4l2_probe,
++ .remove = csi_v4l2_remove,
++#ifdef CONFIG_PM
++ .suspend = csi_v4l2_suspend,
++ .resume = csi_v4l2_resume,
++#endif
++ .shutdown = NULL,
++};
++
++/*!
++ * Initializes the camera driver.
++ */
++static int csi_v4l2_master_attach(struct v4l2_int_device *slave)
++{
++ cam_data *cam = slave->u.slave->master->priv;
++ struct v4l2_format cam_fmt;
++
++ pr_debug("In MVC: %s\n", __func__);
++ pr_debug(" slave.name = %s\n", slave->name);
++ pr_debug(" master.name = %s\n", slave->u.slave->master->name);
++
++ cam->sensor = slave;
++ if (slave == NULL) {
++ pr_err("ERROR: v4l2 capture: slave parameter not valid.\n");
++ return -1;
++ }
++
++ cam_fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
++ vidioc_int_g_fmt_cap(cam->sensor, &cam_fmt);
++
++ /* Used to detect TV in (type 1) vs. camera (type 0) */
++ cam->device_type = cam_fmt.fmt.pix.priv;
++
++ cam->crop_bounds.top = cam->crop_bounds.left = 0;
++ cam->crop_bounds.width = cam_fmt.fmt.pix.width;
++ cam->crop_bounds.height = cam_fmt.fmt.pix.height;
++
++ /* This also is the max crop size for this device. */
++ cam->crop_defrect.top = cam->crop_defrect.left = 0;
++ cam->crop_defrect.width = cam_fmt.fmt.pix.width;
++ cam->crop_defrect.height = cam_fmt.fmt.pix.height;
++
++ /* At this point, this is also the current image size. */
++ cam->crop_current.top = cam->crop_current.left = 0;
++ cam->crop_current.width = cam_fmt.fmt.pix.width;
++ cam->crop_current.height = cam_fmt.fmt.pix.height;
++
++ pr_debug("End of %s: v2f pix widthxheight %d x %d\n",
++ __func__, cam->v2f.fmt.pix.width, cam->v2f.fmt.pix.height);
++
++ return 0;
++}
++
++/*!
++ * Disconnects the camera driver.
++ */
++static void csi_v4l2_master_detach(struct v4l2_int_device *slave)
++{
++ pr_debug("In MVC: %s\n", __func__);
++
++ vidioc_int_dev_exit(slave);
++}
++
++module_platform_driver(csi_v4l2_driver);
++
++module_param(video_nr, int, 0444);
++MODULE_AUTHOR("Freescale Semiconductor, Inc.");
++MODULE_DESCRIPTION("V4L2 capture driver for Mx25 based cameras");
++MODULE_LICENSE("GPL");
++MODULE_SUPPORTED_DEVICE("video");
+diff -Nur linux-3.14.36/drivers/media/platform/mxc/capture/fsl_csi.c linux-openelec/drivers/media/platform/mxc/capture/fsl_csi.c
+--- linux-3.14.36/drivers/media/platform/mxc/capture/fsl_csi.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/media/platform/mxc/capture/fsl_csi.c 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,302 @@
++/*
++ * Copyright 2009-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/*!
++ * @file fsl_csi.c, this file is derived from mx27_csi.c
++ *
++ * @brief mx25 CMOS Sensor interface functions
++ *
++ * @ingroup CSI
++ */
++#include <linux/types.h>
++#include <linux/init.h>
++#include <linux/platform_device.h>
++#include <linux/device.h>
++#include <linux/err.h>
++#include <linux/interrupt.h>
++#include <linux/spinlock.h>
++#include <linux/module.h>
++#include <linux/clk.h>
++#include <linux/of.h>
++#include <linux/sched.h>
++
++#include "mxc_v4l2_capture.h"
++#include "fsl_csi.h"
++
++void __iomem *csi_regbase;
++EXPORT_SYMBOL(csi_regbase);
++static int irq_nr;
++static csi_irq_callback_t g_callback;
++static void *g_callback_data;
++
++static irqreturn_t csi_irq_handler(int irq, void *data)
++{
++ cam_data *cam = (cam_data *) data;
++ unsigned long status = __raw_readl(CSI_CSISR);
++
++ __raw_writel(status, CSI_CSISR);
++
++ if (status & BIT_HRESP_ERR_INT)
++ pr_warning("Hresponse error is detected.\n");
++
++ if (status & BIT_DMA_TSF_DONE_FB1) {
++ if (cam->capture_on) {
++ spin_lock(&cam->queue_int_lock);
++ cam->ping_pong_csi = 1;
++ spin_unlock(&cam->queue_int_lock);
++ cam->enc_callback(0, cam);
++ } else {
++ cam->still_counter++;
++ wake_up_interruptible(&cam->still_queue);
++ }
++ }
++
++ if (status & BIT_DMA_TSF_DONE_FB2) {
++ if (cam->capture_on) {
++ spin_lock(&cam->queue_int_lock);
++ cam->ping_pong_csi = 2;
++ spin_unlock(&cam->queue_int_lock);
++ cam->enc_callback(0, cam);
++ } else {
++ cam->still_counter++;
++ wake_up_interruptible(&cam->still_queue);
++ }
++ }
++
++ if (g_callback)
++ g_callback(g_callback_data, status);
++
++ pr_debug("CSI status = 0x%08lX\n", status);
++
++ return IRQ_HANDLED;
++}
++
++static void csihw_reset_frame_count(void)
++{
++ __raw_writel(__raw_readl(CSI_CSICR3) | BIT_FRMCNT_RST, CSI_CSICR3);
++}
++
++static void csihw_reset(void)
++{
++ csihw_reset_frame_count();
++ __raw_writel(CSICR1_RESET_VAL, CSI_CSICR1);
++ __raw_writel(CSICR2_RESET_VAL, CSI_CSICR2);
++ __raw_writel(CSICR3_RESET_VAL, CSI_CSICR3);
++}
++
++/*!
++ * csi_init_interface
++ * Init csi interface
++ */
++void csi_init_interface(void)
++{
++ unsigned int val = 0;
++ unsigned int imag_para;
++
++ val |= BIT_SOF_POL;
++ val |= BIT_REDGE;
++ val |= BIT_GCLK_MODE;
++ val |= BIT_HSYNC_POL;
++ val |= BIT_PACK_DIR;
++ val |= BIT_FCC;
++ val |= BIT_SWAP16_EN;
++ val |= 1 << SHIFT_MCLKDIV;
++ val |= BIT_MCLKEN;
++ __raw_writel(val, CSI_CSICR1);
++
++ imag_para = (640 << 16) | 960;
++ __raw_writel(imag_para, CSI_CSIIMAG_PARA);
++
++ val = 0x1010;
++ val |= BIT_DMA_REFLASH_RFF;
++ __raw_writel(val, CSI_CSICR3);
++}
++EXPORT_SYMBOL(csi_init_interface);
++
++void csi_init_format(int fmt)
++{
++ unsigned int val;
++
++ val = __raw_readl(CSI_CSICR1);
++ if (fmt == V4L2_PIX_FMT_YUYV) {
++ val &= ~BIT_PACK_DIR;
++ val &= ~BIT_SWAP16_EN;
++ } else if (fmt == V4L2_PIX_FMT_UYVY) {
++ val |= BIT_PACK_DIR;
++ val |= BIT_SWAP16_EN;
++ } else
++ pr_warning("unsupported format, old format remains.\n");
++
++ __raw_writel(val, CSI_CSICR1);
++}
++EXPORT_SYMBOL(csi_init_format);
++
++/*!
++ * csi_read_mclk_flag
++ *
++ * @return gcsi_mclk_source
++ */
++int csi_read_mclk_flag(void)
++{
++ return 0;
++}
++EXPORT_SYMBOL(csi_read_mclk_flag);
++
++void csi_start_callback(void *data)
++{
++ cam_data *cam = (cam_data *) data;
++
++ if (request_irq(irq_nr, csi_irq_handler, 0, "csi", cam) < 0)
++ pr_debug("CSI error: irq request fail\n");
++
++}
++EXPORT_SYMBOL(csi_start_callback);
++
++void csi_stop_callback(void *data)
++{
++ cam_data *cam = (cam_data *) data;
++
++ free_irq(irq_nr, cam);
++}
++EXPORT_SYMBOL(csi_stop_callback);
++
++void csi_enable_int(int arg)
++{
++ unsigned long cr1 = __raw_readl(CSI_CSICR1);
++
++ cr1 |= BIT_SOF_INTEN;
++ if (arg == 1) {
++ /* still capture needs DMA intterrupt */
++ cr1 |= BIT_FB1_DMA_DONE_INTEN;
++ cr1 |= BIT_FB2_DMA_DONE_INTEN;
++ }
++ __raw_writel(cr1, CSI_CSICR1);
++}
++EXPORT_SYMBOL(csi_enable_int);
++
++void csi_disable_int(void)
++{
++ unsigned long cr1 = __raw_readl(CSI_CSICR1);
++
++ cr1 &= ~BIT_SOF_INTEN;
++ cr1 &= ~BIT_FB1_DMA_DONE_INTEN;
++ cr1 &= ~BIT_FB2_DMA_DONE_INTEN;
++ __raw_writel(cr1, CSI_CSICR1);
++}
++EXPORT_SYMBOL(csi_disable_int);
++
++void csi_set_16bit_imagpara(int width, int height)
++{
++ int imag_para = 0;
++ unsigned long cr3 = __raw_readl(CSI_CSICR3);
++
++ imag_para = (width << 16) | (height * 2);
++ __raw_writel(imag_para, CSI_CSIIMAG_PARA);
++
++ /* reflash the embeded DMA controller */
++ __raw_writel(cr3 | BIT_DMA_REFLASH_RFF, CSI_CSICR3);
++}
++EXPORT_SYMBOL(csi_set_16bit_imagpara);
++
++void csi_set_12bit_imagpara(int width, int height)
++{
++ int imag_para = 0;
++ unsigned long cr3 = __raw_readl(CSI_CSICR3);
++
++ imag_para = (width << 16) | (height * 3 / 2);
++ __raw_writel(imag_para, CSI_CSIIMAG_PARA);
++
++ /* reflash the embeded DMA controller */
++ __raw_writel(cr3 | BIT_DMA_REFLASH_RFF, CSI_CSICR3);
++}
++EXPORT_SYMBOL(csi_set_12bit_imagpara);
++
++void csi_dmareq_rff_enable(void)
++{
++ unsigned long cr3 = __raw_readl(CSI_CSICR3);
++
++ cr3 |= BIT_DMA_REQ_EN_RFF;
++ cr3 |= BIT_HRESP_ERR_EN;
++ __raw_writel(cr3, CSI_CSICR3);
++}
++EXPORT_SYMBOL(csi_dmareq_rff_enable);
++
++void csi_dmareq_rff_disable(void)
++{
++ unsigned long cr3 = __raw_readl(CSI_CSICR3);
++
++ cr3 &= ~BIT_DMA_REQ_EN_RFF;
++ cr3 &= ~BIT_HRESP_ERR_EN;
++ __raw_writel(cr3, CSI_CSICR3);
++}
++EXPORT_SYMBOL(csi_dmareq_rff_disable);
++
++static const struct of_device_id fsl_csi_dt_ids[] = {
++ { .compatible = "fsl,imx6sl-csi", },
++ { /* sentinel */ }
++};
++MODULE_DEVICE_TABLE(of, fsl_csi_dt_ids);
++
++static int csi_probe(struct platform_device *pdev)
++{
++ int ret = 0;
++ struct resource *res;
++
++ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
++ if (!res) {
++ dev_err(&pdev->dev, "No csi irq found.\n");
++ ret = -ENODEV;
++ goto err;
++ }
++ irq_nr = res->start;
++
++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++ if (!res) {
++ dev_err(&pdev->dev, "No csi base address found.\n");
++ ret = -ENODEV;
++ goto err;
++ }
++ csi_regbase = devm_ioremap(&pdev->dev, res->start, resource_size(res));
++ if (!csi_regbase) {
++ dev_err(&pdev->dev, "ioremap failed with csi base\n");
++ ret = -ENOMEM;
++ goto err;
++ }
++
++ csihw_reset();
++ csi_init_interface();
++ csi_dmareq_rff_disable();
++
++err:
++ return ret;
++}
++
++static int csi_remove(struct platform_device *pdev)
++{
++ return 0;
++}
++
++static struct platform_driver csi_driver = {
++ .driver = {
++ .name = "fsl_csi",
++ .of_match_table = of_match_ptr(fsl_csi_dt_ids),
++ },
++ .probe = csi_probe,
++ .remove = csi_remove,
++};
++
++module_platform_driver(csi_driver);
++
++MODULE_AUTHOR("Freescale Semiconductor, Inc.");
++MODULE_DESCRIPTION("fsl CSI driver");
++MODULE_LICENSE("GPL");
+diff -Nur linux-3.14.36/drivers/media/platform/mxc/capture/fsl_csi.h linux-openelec/drivers/media/platform/mxc/capture/fsl_csi.h
+--- linux-3.14.36/drivers/media/platform/mxc/capture/fsl_csi.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/media/platform/mxc/capture/fsl_csi.h 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,198 @@
++/*
++ * Copyright 2009-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/*!
++ * @file fsl_csi.h
++ *
++ * @brief mx25 CMOS Sensor interface functions
++ *
++ * @ingroup CSI
++ */
++
++#ifndef MX25_CSI_H
++#define MX25_CSI_H
++
++#include <linux/io.h>
++
++/* reset values */
++#define CSICR1_RESET_VAL 0x40000800
++#define CSICR2_RESET_VAL 0x0
++#define CSICR3_RESET_VAL 0x0
++
++/* csi control reg 1 */
++#define BIT_SWAP16_EN (0x1 << 31)
++#define BIT_EXT_VSYNC (0x1 << 30)
++#define BIT_EOF_INT_EN (0x1 << 29)
++#define BIT_PRP_IF_EN (0x1 << 28)
++#define BIT_CCIR_MODE (0x1 << 27)
++#define BIT_COF_INT_EN (0x1 << 26)
++#define BIT_SF_OR_INTEN (0x1 << 25)
++#define BIT_RF_OR_INTEN (0x1 << 24)
++#define BIT_SFF_DMA_DONE_INTEN (0x1 << 22)
++#define BIT_STATFF_INTEN (0x1 << 21)
++#define BIT_FB2_DMA_DONE_INTEN (0x1 << 20)
++#define BIT_FB1_DMA_DONE_INTEN (0x1 << 19)
++#define BIT_RXFF_INTEN (0x1 << 18)
++#define BIT_SOF_POL (0x1 << 17)
++#define BIT_SOF_INTEN (0x1 << 16)
++#define BIT_MCLKDIV (0xF << 12)
++#define BIT_HSYNC_POL (0x1 << 11)
++#define BIT_CCIR_EN (0x1 << 10)
++#define BIT_MCLKEN (0x1 << 9)
++#define BIT_FCC (0x1 << 8)
++#define BIT_PACK_DIR (0x1 << 7)
++#define BIT_CLR_STATFIFO (0x1 << 6)
++#define BIT_CLR_RXFIFO (0x1 << 5)
++#define BIT_GCLK_MODE (0x1 << 4)
++#define BIT_INV_DATA (0x1 << 3)
++#define BIT_INV_PCLK (0x1 << 2)
++#define BIT_REDGE (0x1 << 1)
++#define BIT_PIXEL_BIT (0x1 << 0)
++
++#define SHIFT_MCLKDIV 12
++
++/* control reg 3 */
++#define BIT_FRMCNT (0xFFFF << 16)
++#define BIT_FRMCNT_RST (0x1 << 15)
++#define BIT_DMA_REFLASH_RFF (0x1 << 14)
++#define BIT_DMA_REFLASH_SFF (0x1 << 13)
++#define BIT_DMA_REQ_EN_RFF (0x1 << 12)
++#define BIT_DMA_REQ_EN_SFF (0x1 << 11)
++#define BIT_STATFF_LEVEL (0x7 << 8)
++#define BIT_HRESP_ERR_EN (0x1 << 7)
++#define BIT_RXFF_LEVEL (0x7 << 4)
++#define BIT_TWO_8BIT_SENSOR (0x1 << 3)
++#define BIT_ZERO_PACK_EN (0x1 << 2)
++#define BIT_ECC_INT_EN (0x1 << 1)
++#define BIT_ECC_AUTO_EN (0x1 << 0)
++
++#define SHIFT_FRMCNT 16
++
++/* csi status reg */
++#define BIT_SFF_OR_INT (0x1 << 25)
++#define BIT_RFF_OR_INT (0x1 << 24)
++#define BIT_DMA_TSF_DONE_SFF (0x1 << 22)
++#define BIT_STATFF_INT (0x1 << 21)
++#define BIT_DMA_TSF_DONE_FB2 (0x1 << 20)
++#define BIT_DMA_TSF_DONE_FB1 (0x1 << 19)
++#define BIT_RXFF_INT (0x1 << 18)
++#define BIT_EOF_INT (0x1 << 17)
++#define BIT_SOF_INT (0x1 << 16)
++#define BIT_F2_INT (0x1 << 15)
++#define BIT_F1_INT (0x1 << 14)
++#define BIT_COF_INT (0x1 << 13)
++#define BIT_HRESP_ERR_INT (0x1 << 7)
++#define BIT_ECC_INT (0x1 << 1)
++#define BIT_DRDY (0x1 << 0)
++
++#define CSI_MCLK_VF 1
++#define CSI_MCLK_ENC 2
++#define CSI_MCLK_RAW 4
++#define CSI_MCLK_I2C 8
++#endif
++
++extern void __iomem *csi_regbase;
++#define CSI_CSICR1 (csi_regbase)
++#define CSI_CSICR2 (csi_regbase + 0x4)
++#define CSI_CSICR3 (csi_regbase + 0x8)
++#define CSI_STATFIFO (csi_regbase + 0xC)
++#define CSI_CSIRXFIFO (csi_regbase + 0x10)
++#define CSI_CSIRXCNT (csi_regbase + 0x14)
++#define CSI_CSISR (csi_regbase + 0x18)
++
++#define CSI_CSIDBG (csi_regbase + 0x1C)
++#define CSI_CSIDMASA_STATFIFO (csi_regbase + 0x20)
++#define CSI_CSIDMATS_STATFIFO (csi_regbase + 0x24)
++#define CSI_CSIDMASA_FB1 (csi_regbase + 0x28)
++#define CSI_CSIDMASA_FB2 (csi_regbase + 0x2C)
++#define CSI_CSIFBUF_PARA (csi_regbase + 0x30)
++#define CSI_CSIIMAG_PARA (csi_regbase + 0x34)
++
++static inline void csi_clear_status(unsigned long status)
++{
++ __raw_writel(status, CSI_CSISR);
++}
++
++struct csi_signal_cfg_t {
++ unsigned data_width:3;
++ unsigned clk_mode:2;
++ unsigned ext_vsync:1;
++ unsigned Vsync_pol:1;
++ unsigned Hsync_pol:1;
++ unsigned pixclk_pol:1;
++ unsigned data_pol:1;
++ unsigned sens_clksrc:1;
++};
++
++struct csi_config_t {
++ /* control reg 1 */
++ unsigned int swap16_en:1;
++ unsigned int ext_vsync:1;
++ unsigned int eof_int_en:1;
++ unsigned int prp_if_en:1;
++ unsigned int ccir_mode:1;
++ unsigned int cof_int_en:1;
++ unsigned int sf_or_inten:1;
++ unsigned int rf_or_inten:1;
++ unsigned int sff_dma_done_inten:1;
++ unsigned int statff_inten:1;
++ unsigned int fb2_dma_done_inten:1;
++ unsigned int fb1_dma_done_inten:1;
++ unsigned int rxff_inten:1;
++ unsigned int sof_pol:1;
++ unsigned int sof_inten:1;
++ unsigned int mclkdiv:4;
++ unsigned int hsync_pol:1;
++ unsigned int ccir_en:1;
++ unsigned int mclken:1;
++ unsigned int fcc:1;
++ unsigned int pack_dir:1;
++ unsigned int gclk_mode:1;
++ unsigned int inv_data:1;
++ unsigned int inv_pclk:1;
++ unsigned int redge:1;
++ unsigned int pixel_bit:1;
++
++ /* control reg 3 */
++ unsigned int frmcnt:16;
++ unsigned int frame_reset:1;
++ unsigned int dma_reflash_rff:1;
++ unsigned int dma_reflash_sff:1;
++ unsigned int dma_req_en_rff:1;
++ unsigned int dma_req_en_sff:1;
++ unsigned int statff_level:3;
++ unsigned int hresp_err_en:1;
++ unsigned int rxff_level:3;
++ unsigned int two_8bit_sensor:1;
++ unsigned int zero_pack_en:1;
++ unsigned int ecc_int_en:1;
++ unsigned int ecc_auto_en:1;
++ /* fifo counter */
++ unsigned int rxcnt;
++};
++
++typedef void (*csi_irq_callback_t) (void *data, unsigned long status);
++
++void csi_init_interface(void);
++void csi_init_format(int fmt);
++void csi_set_16bit_imagpara(int width, int height);
++void csi_set_12bit_imagpara(int width, int height);
++int csi_read_mclk_flag(void);
++void csi_start_callback(void *data);
++void csi_stop_callback(void *data);
++void csi_enable_int(int arg);
++void csi_disable_int(void);
++void csi_mclk_enable(void);
++void csi_mclk_disable(void);
++void csi_dmareq_rff_enable(void);
++void csi_dmareq_rff_disable(void);
+diff -Nur linux-3.14.36/drivers/media/platform/mxc/capture/ipu_bg_overlay_sdc.c linux-openelec/drivers/media/platform/mxc/capture/ipu_bg_overlay_sdc.c
+--- linux-3.14.36/drivers/media/platform/mxc/capture/ipu_bg_overlay_sdc.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/media/platform/mxc/capture/ipu_bg_overlay_sdc.c 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,546 @@
++
++/*
++ * Copyright 2004-2014 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/*!
++ * @file ipu_bg_overlay_sdc_bg.c
++ *
++ * @brief IPU Use case for PRP-VF back-ground
++ *
++ * @ingroup IPU
++ */
++#include <linux/module.h>
++#include <linux/dma-mapping.h>
++#include <linux/fb.h>
++#include <linux/ipu.h>
++#include <linux/mipi_csi2.h>
++#include "mxc_v4l2_capture.h"
++#include "ipu_prp_sw.h"
++
++static int csi_buffer_num;
++static u32 bpp, csi_mem_bufsize = 3;
++static u32 out_format;
++static struct ipu_soc *disp_ipu;
++static u32 offset;
++
++static void csi_buf_work_func(struct work_struct *work)
++{
++ int err = 0;
++ cam_data *cam =
++ container_of(work, struct _cam_data, csi_work_struct);
++
++ struct ipu_task task;
++ memset(&task, 0, sizeof(task));
++
++ if (csi_buffer_num)
++ task.input.paddr = cam->vf_bufs[0];
++ else
++ task.input.paddr = cam->vf_bufs[1];
++ task.input.width = cam->crop_current.width;
++ task.input.height = cam->crop_current.height;
++ task.input.format = IPU_PIX_FMT_UYVY;
++
++ task.output.paddr = offset;
++ task.output.width = cam->overlay_fb->var.xres;
++ task.output.height = cam->overlay_fb->var.yres;
++ task.output.format = out_format;
++ task.output.rotate = cam->rotation;
++ task.output.crop.pos.x = cam->win.w.left;
++ task.output.crop.pos.y = cam->win.w.top;
++ if (cam->win.w.width > 1024 || cam->win.w.height > 1024) {
++ task.output.crop.w = cam->overlay_fb->var.xres;
++ task.output.crop.h = cam->overlay_fb->var.yres;
++ } else {
++ task.output.crop.w = cam->win.w.width;
++ task.output.crop.h = cam->win.w.height;
++ }
++again:
++ err = ipu_check_task(&task);
++ if (err != IPU_CHECK_OK) {
++ if (err > IPU_CHECK_ERR_MIN) {
++ if (err == IPU_CHECK_ERR_SPLIT_INPUTW_OVER) {
++ task.input.crop.w -= 8;
++ goto again;
++ }
++ if (err == IPU_CHECK_ERR_SPLIT_INPUTH_OVER) {
++ task.input.crop.h -= 8;
++ goto again;
++ }
++ if (err == IPU_CHECK_ERR_SPLIT_OUTPUTW_OVER) {
++ task.output.width -= 8;
++ task.output.crop.w = task.output.width;
++ goto again;
++ }
++ if (err == IPU_CHECK_ERR_SPLIT_OUTPUTH_OVER) {
++ task.output.height -= 8;
++ task.output.crop.h = task.output.height;
++ goto again;
++ }
++ printk(KERN_ERR "check ipu taks fail\n");
++ return;
++ }
++ printk(KERN_ERR "check ipu taks fail\n");
++ return;
++ }
++ err = ipu_queue_task(&task);
++ if (err < 0)
++ printk(KERN_ERR "queue ipu task error\n");
++}
++
++static void get_disp_ipu(cam_data *cam)
++{
++ if (cam->output > 2)
++ disp_ipu = ipu_get_soc(1); /* using DISP4 */
++ else
++ disp_ipu = ipu_get_soc(0);
++}
++
++
++/*!
++ * csi ENC callback function.
++ *
++ * @param irq int irq line
++ * @param dev_id void * device id
++ *
++ * @return status IRQ_HANDLED for handled
++ */
++static irqreturn_t csi_enc_callback(int irq, void *dev_id)
++{
++ cam_data *cam = (cam_data *) dev_id;
++
++ ipu_select_buffer(cam->ipu, CSI_MEM, IPU_OUTPUT_BUFFER, csi_buffer_num);
++ schedule_work(&cam->csi_work_struct);
++ csi_buffer_num = (csi_buffer_num == 0) ? 1 : 0;
++ return IRQ_HANDLED;
++}
++
++static int csi_enc_setup(cam_data *cam)
++{
++ ipu_channel_params_t params;
++ u32 pixel_fmt;
++ int err = 0, sensor_protocol = 0;
++#ifdef CONFIG_MXC_MIPI_CSI2
++ void *mipi_csi2_info;
++ int ipu_id;
++ int csi_id;
++#endif
++
++ if (!cam) {
++ printk(KERN_ERR "cam private is NULL\n");
++ return -ENXIO;
++ }
++
++ memset(&params, 0, sizeof(ipu_channel_params_t));
++ params.csi_mem.csi = cam->csi;
++
++ sensor_protocol = ipu_csi_get_sensor_protocol(cam->ipu, cam->csi);
++ switch (sensor_protocol) {
++ case IPU_CSI_CLK_MODE_GATED_CLK:
++ case IPU_CSI_CLK_MODE_NONGATED_CLK:
++ case IPU_CSI_CLK_MODE_CCIR656_PROGRESSIVE:
++ case IPU_CSI_CLK_MODE_CCIR1120_PROGRESSIVE_DDR:
++ case IPU_CSI_CLK_MODE_CCIR1120_PROGRESSIVE_SDR:
++ params.csi_mem.interlaced = false;
++ break;
++ case IPU_CSI_CLK_MODE_CCIR656_INTERLACED:
++ case IPU_CSI_CLK_MODE_CCIR1120_INTERLACED_DDR:
++ case IPU_CSI_CLK_MODE_CCIR1120_INTERLACED_SDR:
++ params.csi_mem.interlaced = true;
++ break;
++ default:
++ printk(KERN_ERR "sensor protocol unsupported\n");
++ return -EINVAL;
++ }
++
++#ifdef CONFIG_MXC_MIPI_CSI2
++ mipi_csi2_info = mipi_csi2_get_info();
++
++ if (mipi_csi2_info) {
++ if (mipi_csi2_get_status(mipi_csi2_info)) {
++ ipu_id = mipi_csi2_get_bind_ipu(mipi_csi2_info);
++ csi_id = mipi_csi2_get_bind_csi(mipi_csi2_info);
++
++ if (cam->ipu == ipu_get_soc(ipu_id)
++ && cam->csi == csi_id) {
++ params.csi_mem.mipi_en = true;
++ params.csi_mem.mipi_vc =
++ mipi_csi2_get_virtual_channel(mipi_csi2_info);
++ params.csi_mem.mipi_id =
++ mipi_csi2_get_datatype(mipi_csi2_info);
++
++ mipi_csi2_pixelclk_enable(mipi_csi2_info);
++ } else {
++ params.csi_mem.mipi_en = false;
++ params.csi_mem.mipi_vc = 0;
++ params.csi_mem.mipi_id = 0;
++ }
++ } else {
++ params.csi_mem.mipi_en = false;
++ params.csi_mem.mipi_vc = 0;
++ params.csi_mem.mipi_id = 0;
++ }
++ }
++#endif
++
++ if (cam->vf_bufs_vaddr[0]) {
++ dma_free_coherent(0, cam->vf_bufs_size[0],
++ cam->vf_bufs_vaddr[0],
++ (dma_addr_t) cam->vf_bufs[0]);
++ }
++ if (cam->vf_bufs_vaddr[1]) {
++ dma_free_coherent(0, cam->vf_bufs_size[1],
++ cam->vf_bufs_vaddr[1],
++ (dma_addr_t) cam->vf_bufs[1]);
++ }
++ csi_mem_bufsize =
++ cam->crop_current.width * cam->crop_current.height * 2;
++ cam->vf_bufs_size[0] = PAGE_ALIGN(csi_mem_bufsize);
++ cam->vf_bufs_vaddr[0] = (void *)dma_alloc_coherent(0,
++ cam->vf_bufs_size[0],
++ (dma_addr_t *) &
++ cam->vf_bufs[0],
++ GFP_DMA |
++ GFP_KERNEL);
++ if (cam->vf_bufs_vaddr[0] == NULL) {
++ printk(KERN_ERR "Error to allocate vf buffer\n");
++ err = -ENOMEM;
++ goto out_2;
++ }
++ cam->vf_bufs_size[1] = PAGE_ALIGN(csi_mem_bufsize);
++ cam->vf_bufs_vaddr[1] = (void *)dma_alloc_coherent(0,
++ cam->vf_bufs_size[1],
++ (dma_addr_t *) &
++ cam->vf_bufs[1],
++ GFP_DMA |
++ GFP_KERNEL);
++ if (cam->vf_bufs_vaddr[1] == NULL) {
++ printk(KERN_ERR "Error to allocate vf buffer\n");
++ err = -ENOMEM;
++ goto out_1;
++ }
++ pr_debug("vf_bufs %x %x\n", cam->vf_bufs[0], cam->vf_bufs[1]);
++
++ err = ipu_init_channel(cam->ipu, CSI_MEM, &params);
++ if (err != 0) {
++ printk(KERN_ERR "ipu_init_channel %d\n", err);
++ goto out_1;
++ }
++
++ pixel_fmt = IPU_PIX_FMT_UYVY;
++ err = ipu_init_channel_buffer(cam->ipu, CSI_MEM, IPU_OUTPUT_BUFFER,
++ pixel_fmt, cam->crop_current.width,
++ cam->crop_current.height,
++ cam->crop_current.width, IPU_ROTATE_NONE,
++ cam->vf_bufs[0], cam->vf_bufs[1], 0,
++ cam->offset.u_offset, cam->offset.u_offset);
++ if (err != 0) {
++ printk(KERN_ERR "CSI_MEM output buffer\n");
++ goto out_1;
++ }
++ err = ipu_enable_channel(cam->ipu, CSI_MEM);
++ if (err < 0) {
++ printk(KERN_ERR "ipu_enable_channel CSI_MEM\n");
++ goto out_1;
++ }
++
++ csi_buffer_num = 0;
++
++ ipu_select_buffer(cam->ipu, CSI_MEM, IPU_OUTPUT_BUFFER, 0);
++ ipu_select_buffer(cam->ipu, CSI_MEM, IPU_OUTPUT_BUFFER, 1);
++ return err;
++out_1:
++ if (cam->vf_bufs_vaddr[0]) {
++ dma_free_coherent(0, cam->vf_bufs_size[0],
++ cam->vf_bufs_vaddr[0],
++ (dma_addr_t) cam->vf_bufs[0]);
++ cam->vf_bufs_vaddr[0] = NULL;
++ cam->vf_bufs[0] = 0;
++ }
++ if (cam->vf_bufs_vaddr[1]) {
++ dma_free_coherent(0, cam->vf_bufs_size[1],
++ cam->vf_bufs_vaddr[1],
++ (dma_addr_t) cam->vf_bufs[1]);
++ cam->vf_bufs_vaddr[1] = NULL;
++ cam->vf_bufs[1] = 0;
++ }
++out_2:
++ return err;
++}
++
++/*!
++ * Enable encoder task
++ * @param private struct cam_data * mxc capture instance
++ *
++ * @return status
++ */
++static int csi_enc_enabling_tasks(void *private)
++{
++ cam_data *cam = (cam_data *) private;
++ int err = 0;
++
++ ipu_clear_irq(cam->ipu, IPU_IRQ_CSI0_OUT_EOF);
++ err = ipu_request_irq(cam->ipu, IPU_IRQ_CSI0_OUT_EOF,
++ csi_enc_callback, 0, "Mxc Camera", cam);
++ if (err != 0) {
++ printk(KERN_ERR "Error registering CSI0_OUT_EOF irq\n");
++ return err;
++ }
++
++ INIT_WORK(&cam->csi_work_struct, csi_buf_work_func);
++
++ err = csi_enc_setup(cam);
++ if (err != 0) {
++ printk(KERN_ERR "csi_enc_setup %d\n", err);
++ goto out1;
++ }
++
++ return err;
++out1:
++ ipu_free_irq(cam->ipu, IPU_IRQ_CSI0_OUT_EOF, cam);
++ return err;
++}
++
++/*!
++ * bg_overlay_start - start the overlay task
++ *
++ * @param private cam_data * mxc v4l2 main structure
++ *
++ */
++static int bg_overlay_start(void *private)
++{
++ cam_data *cam = (cam_data *) private;
++ int err = 0;
++
++ if (!cam) {
++ printk(KERN_ERR "private is NULL\n");
++ return -EIO;
++ }
++
++ if (cam->overlay_active == true) {
++ pr_debug("already start.\n");
++ return 0;
++ }
++
++ get_disp_ipu(cam);
++
++ out_format = cam->v4l2_fb.fmt.pixelformat;
++ if (cam->v4l2_fb.fmt.pixelformat == IPU_PIX_FMT_BGR24) {
++ bpp = 3, csi_mem_bufsize = 3;
++ pr_info("BGR24\n");
++ } else if (cam->v4l2_fb.fmt.pixelformat == IPU_PIX_FMT_RGB565) {
++ bpp = 2, csi_mem_bufsize = 2;
++ pr_info("RGB565\n");
++ } else if (cam->v4l2_fb.fmt.pixelformat == IPU_PIX_FMT_BGR32) {
++ bpp = 4, csi_mem_bufsize = 4;
++ pr_info("BGR32\n");
++ } else {
++ printk(KERN_ERR
++ "unsupported fix format from the framebuffer.\n");
++ return -EINVAL;
++ }
++
++ offset = cam->v4l2_fb.fmt.bytesperline * cam->win.w.top +
++ csi_mem_bufsize * cam->win.w.left;
++
++ if (cam->v4l2_fb.base == 0)
++ printk(KERN_ERR "invalid frame buffer address.\n");
++ else
++ offset += (u32) cam->v4l2_fb.base;
++
++ csi_mem_bufsize = cam->win.w.width * cam->win.w.height
++ * csi_mem_bufsize;
++
++ err = csi_enc_enabling_tasks(cam);
++ if (err != 0) {
++ printk(KERN_ERR "Error csi enc enable fail\n");
++ return err;
++ }
++
++ cam->overlay_active = true;
++ return err;
++}
++
++/*!
++ * bg_overlay_stop - stop the overlay task
++ *
++ * @param private cam_data * mxc v4l2 main structure
++ *
++ */
++static int bg_overlay_stop(void *private)
++{
++ int err = 0;
++ cam_data *cam = (cam_data *) private;
++#ifdef CONFIG_MXC_MIPI_CSI2
++ void *mipi_csi2_info;
++ int ipu_id;
++ int csi_id;
++#endif
++
++ if (cam->overlay_active == false)
++ return 0;
++
++ err = ipu_disable_channel(cam->ipu, CSI_MEM, true);
++
++ ipu_uninit_channel(cam->ipu, CSI_MEM);
++
++ csi_buffer_num = 0;
++
++#ifdef CONFIG_MXC_MIPI_CSI2
++ mipi_csi2_info = mipi_csi2_get_info();
++
++ if (mipi_csi2_info) {
++ if (mipi_csi2_get_status(mipi_csi2_info)) {
++ ipu_id = mipi_csi2_get_bind_ipu(mipi_csi2_info);
++ csi_id = mipi_csi2_get_bind_csi(mipi_csi2_info);
++
++ if (cam->ipu == ipu_get_soc(ipu_id)
++ && cam->csi == csi_id)
++ mipi_csi2_pixelclk_disable(mipi_csi2_info);
++ }
++ }
++#endif
++
++ flush_work(&cam->csi_work_struct);
++ cancel_work_sync(&cam->csi_work_struct);
++
++ if (cam->vf_bufs_vaddr[0]) {
++ dma_free_coherent(0, cam->vf_bufs_size[0],
++ cam->vf_bufs_vaddr[0], cam->vf_bufs[0]);
++ cam->vf_bufs_vaddr[0] = NULL;
++ cam->vf_bufs[0] = 0;
++ }
++ if (cam->vf_bufs_vaddr[1]) {
++ dma_free_coherent(0, cam->vf_bufs_size[1],
++ cam->vf_bufs_vaddr[1], cam->vf_bufs[1]);
++ cam->vf_bufs_vaddr[1] = NULL;
++ cam->vf_bufs[1] = 0;
++ }
++ if (cam->rot_vf_bufs_vaddr[0]) {
++ dma_free_coherent(0, cam->rot_vf_buf_size[0],
++ cam->rot_vf_bufs_vaddr[0],
++ cam->rot_vf_bufs[0]);
++ cam->rot_vf_bufs_vaddr[0] = NULL;
++ cam->rot_vf_bufs[0] = 0;
++ }
++ if (cam->rot_vf_bufs_vaddr[1]) {
++ dma_free_coherent(0, cam->rot_vf_buf_size[1],
++ cam->rot_vf_bufs_vaddr[1],
++ cam->rot_vf_bufs[1]);
++ cam->rot_vf_bufs_vaddr[1] = NULL;
++ cam->rot_vf_bufs[1] = 0;
++ }
++
++ cam->overlay_active = false;
++ return err;
++}
++
++/*!
++ * Enable csi
++ * @param private struct cam_data * mxc capture instance
++ *
++ * @return status
++ */
++static int bg_overlay_enable_csi(void *private)
++{
++ cam_data *cam = (cam_data *) private;
++
++ return ipu_enable_csi(cam->ipu, cam->csi);
++}
++
++/*!
++ * Disable csi
++ * @param private struct cam_data * mxc capture instance
++ *
++ * @return status
++ */
++static int bg_overlay_disable_csi(void *private)
++{
++ cam_data *cam = (cam_data *) private;
++
++ /* free csi eof irq firstly.
++ * when disable csi, wait for idmac eof.
++ * it requests eof irq again */
++ ipu_free_irq(cam->ipu, IPU_IRQ_CSI0_OUT_EOF, cam);
++
++ return ipu_disable_csi(cam->ipu, cam->csi);
++}
++
++/*!
++ * function to select bg as the working path
++ *
++ * @param private cam_data * mxc v4l2 main structure
++ *
++ * @return status
++ */
++int bg_overlay_sdc_select(void *private)
++{
++ cam_data *cam = (cam_data *) private;
++
++ if (cam) {
++ cam->vf_start_sdc = bg_overlay_start;
++ cam->vf_stop_sdc = bg_overlay_stop;
++ cam->vf_enable_csi = bg_overlay_enable_csi;
++ cam->vf_disable_csi = bg_overlay_disable_csi;
++ cam->overlay_active = false;
++ }
++
++ return 0;
++}
++EXPORT_SYMBOL(bg_overlay_sdc_select);
++
++/*!
++ * function to de-select bg as the working path
++ *
++ * @param private cam_data * mxc v4l2 main structure
++ *
++ * @return status
++ */
++int bg_overlay_sdc_deselect(void *private)
++{
++ cam_data *cam = (cam_data *) private;
++
++ if (cam) {
++ cam->vf_start_sdc = NULL;
++ cam->vf_stop_sdc = NULL;
++ cam->vf_enable_csi = NULL;
++ cam->vf_disable_csi = NULL;
++ }
++ return 0;
++}
++EXPORT_SYMBOL(bg_overlay_sdc_deselect);
++
++/*!
++ * Init background overlay task.
++ *
++ * @return Error code indicating success or failure
++ */
++__init int bg_overlay_sdc_init(void)
++{
++ return 0;
++}
++
++/*!
++ * Deinit background overlay task.
++ *
++ * @return Error code indicating success or failure
++ */
++void __exit bg_overlay_sdc_exit(void)
++{
++}
++
++module_init(bg_overlay_sdc_init);
++module_exit(bg_overlay_sdc_exit);
++
++MODULE_AUTHOR("Freescale Semiconductor, Inc.");
++MODULE_DESCRIPTION("IPU PRP VF SDC Backgroud Driver");
++MODULE_LICENSE("GPL");
+diff -Nur linux-3.14.36/drivers/media/platform/mxc/capture/ipu_csi_enc.c linux-openelec/drivers/media/platform/mxc/capture/ipu_csi_enc.c
+--- linux-3.14.36/drivers/media/platform/mxc/capture/ipu_csi_enc.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/media/platform/mxc/capture/ipu_csi_enc.c 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,418 @@
++/*
++ * Copyright 2009-2014 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/*!
++ * @file ipu_csi_enc.c
++ *
++ * @brief CSI Use case for video capture
++ *
++ * @ingroup IPU
++ */
++
++#include <linux/module.h>
++#include <linux/platform_device.h>
++#include <linux/dma-mapping.h>
++#include <linux/ipu.h>
++#include <linux/mipi_csi2.h>
++#include "mxc_v4l2_capture.h"
++#include "ipu_prp_sw.h"
++
++#ifdef CAMERA_DBG
++ #define CAMERA_TRACE(x) (printk)x
++#else
++ #define CAMERA_TRACE(x)
++#endif
++
++/*
++ * Function definitions
++ */
++
++/*!
++ * csi ENC callback function.
++ *
++ * @param irq int irq line
++ * @param dev_id void * device id
++ *
++ * @return status IRQ_HANDLED for handled
++ */
++static irqreturn_t csi_enc_callback(int irq, void *dev_id)
++{
++ cam_data *cam = (cam_data *) dev_id;
++
++ if (cam->enc_callback == NULL)
++ return IRQ_HANDLED;
++
++ cam->enc_callback(irq, dev_id);
++ return IRQ_HANDLED;
++}
++
++/*!
++ * CSI ENC enable channel setup function
++ *
++ * @param cam struct cam_data * mxc capture instance
++ *
++ * @return status
++ */
++static int csi_enc_setup(cam_data *cam)
++{
++ ipu_channel_params_t params;
++ u32 pixel_fmt;
++ int err = 0, sensor_protocol = 0;
++ dma_addr_t dummy = cam->dummy_frame.buffer.m.offset;
++#ifdef CONFIG_MXC_MIPI_CSI2
++ void *mipi_csi2_info;
++ int ipu_id;
++ int csi_id;
++#endif
++
++ CAMERA_TRACE("In csi_enc_setup\n");
++ if (!cam) {
++ printk(KERN_ERR "cam private is NULL\n");
++ return -ENXIO;
++ }
++
++ memset(&params, 0, sizeof(ipu_channel_params_t));
++ params.csi_mem.csi = cam->csi;
++
++ sensor_protocol = ipu_csi_get_sensor_protocol(cam->ipu, cam->csi);
++ switch (sensor_protocol) {
++ case IPU_CSI_CLK_MODE_GATED_CLK:
++ case IPU_CSI_CLK_MODE_NONGATED_CLK:
++ case IPU_CSI_CLK_MODE_CCIR656_PROGRESSIVE:
++ case IPU_CSI_CLK_MODE_CCIR1120_PROGRESSIVE_DDR:
++ case IPU_CSI_CLK_MODE_CCIR1120_PROGRESSIVE_SDR:
++ params.csi_mem.interlaced = false;
++ break;
++ case IPU_CSI_CLK_MODE_CCIR656_INTERLACED:
++ case IPU_CSI_CLK_MODE_CCIR1120_INTERLACED_DDR:
++ case IPU_CSI_CLK_MODE_CCIR1120_INTERLACED_SDR:
++ params.csi_mem.interlaced = true;
++ break;
++ default:
++ printk(KERN_ERR "sensor protocol unsupported\n");
++ return -EINVAL;
++ }
++
++ if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_YUV420)
++ pixel_fmt = IPU_PIX_FMT_YUV420P;
++ else if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_YVU420)
++ pixel_fmt = IPU_PIX_FMT_YVU420P;
++ else if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_YUV422P)
++ pixel_fmt = IPU_PIX_FMT_YUV422P;
++ else if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_UYVY)
++ pixel_fmt = IPU_PIX_FMT_UYVY;
++ else if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_YUYV)
++ pixel_fmt = IPU_PIX_FMT_YUYV;
++ else if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_NV12)
++ pixel_fmt = IPU_PIX_FMT_NV12;
++ else if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_BGR24)
++ pixel_fmt = IPU_PIX_FMT_BGR24;
++ else if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_RGB24)
++ pixel_fmt = IPU_PIX_FMT_RGB24;
++ else if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_RGB565)
++ pixel_fmt = IPU_PIX_FMT_RGB565;
++ else if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_BGR32)
++ pixel_fmt = IPU_PIX_FMT_BGR32;
++ else if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_RGB32)
++ pixel_fmt = IPU_PIX_FMT_RGB32;
++ else {
++ printk(KERN_ERR "format not supported\n");
++ return -EINVAL;
++ }
++
++#ifdef CONFIG_MXC_MIPI_CSI2
++ mipi_csi2_info = mipi_csi2_get_info();
++
++ if (mipi_csi2_info) {
++ if (mipi_csi2_get_status(mipi_csi2_info)) {
++ ipu_id = mipi_csi2_get_bind_ipu(mipi_csi2_info);
++ csi_id = mipi_csi2_get_bind_csi(mipi_csi2_info);
++
++ if (cam->ipu == ipu_get_soc(ipu_id)
++ && cam->csi == csi_id) {
++ params.csi_mem.mipi_en = true;
++ params.csi_mem.mipi_vc =
++ mipi_csi2_get_virtual_channel(mipi_csi2_info);
++ params.csi_mem.mipi_id =
++ mipi_csi2_get_datatype(mipi_csi2_info);
++
++ mipi_csi2_pixelclk_enable(mipi_csi2_info);
++ } else {
++ params.csi_mem.mipi_en = false;
++ params.csi_mem.mipi_vc = 0;
++ params.csi_mem.mipi_id = 0;
++ }
++ } else {
++ params.csi_mem.mipi_en = false;
++ params.csi_mem.mipi_vc = 0;
++ params.csi_mem.mipi_id = 0;
++ }
++ }
++#endif
++
++ err = ipu_init_channel(cam->ipu, CSI_MEM, &params);
++ if (err != 0) {
++ printk(KERN_ERR "ipu_init_channel %d\n", err);
++ return err;
++ }
++
++ err = ipu_init_channel_buffer(cam->ipu, CSI_MEM, IPU_OUTPUT_BUFFER,
++ pixel_fmt, cam->v2f.fmt.pix.width,
++ cam->v2f.fmt.pix.height,
++ cam->v2f.fmt.pix.bytesperline,
++ IPU_ROTATE_NONE,
++ dummy, dummy, 0,
++ cam->offset.u_offset,
++ cam->offset.v_offset);
++ if (err != 0) {
++ printk(KERN_ERR "CSI_MEM output buffer\n");
++ return err;
++ }
++ err = ipu_enable_channel(cam->ipu, CSI_MEM);
++ if (err < 0) {
++ printk(KERN_ERR "ipu_enable_channel CSI_MEM\n");
++ return err;
++ }
++
++ return err;
++}
++
++/*!
++ * function to update physical buffer address for encorder IDMA channel
++ *
++ * @param eba physical buffer address for encorder IDMA channel
++ * @param buffer_num int buffer 0 or buffer 1
++ *
++ * @return status
++ */
++static int csi_enc_eba_update(struct ipu_soc *ipu, dma_addr_t eba,
++ int *buffer_num)
++{
++ int err = 0;
++
++ pr_debug("eba %x\n", eba);
++ err = ipu_update_channel_buffer(ipu, CSI_MEM, IPU_OUTPUT_BUFFER,
++ *buffer_num, eba);
++ if (err != 0) {
++ ipu_clear_buffer_ready(ipu, CSI_MEM, IPU_OUTPUT_BUFFER,
++ *buffer_num);
++
++ err = ipu_update_channel_buffer(ipu, CSI_MEM, IPU_OUTPUT_BUFFER,
++ *buffer_num, eba);
++ if (err != 0) {
++ pr_err("ERROR: v4l2 capture: fail to update "
++ "buf%d\n", *buffer_num);
++ return err;
++ }
++ }
++
++ ipu_select_buffer(ipu, CSI_MEM, IPU_OUTPUT_BUFFER, *buffer_num);
++
++ *buffer_num = (*buffer_num == 0) ? 1 : 0;
++
++ return 0;
++}
++
++/*!
++ * Enable encoder task
++ * @param private struct cam_data * mxc capture instance
++ *
++ * @return status
++ */
++static int csi_enc_enabling_tasks(void *private)
++{
++ cam_data *cam = (cam_data *) private;
++ int err = 0;
++ CAMERA_TRACE("IPU:In csi_enc_enabling_tasks\n");
++
++ cam->dummy_frame.vaddress = dma_alloc_coherent(0,
++ PAGE_ALIGN(cam->v2f.fmt.pix.sizeimage),
++ &cam->dummy_frame.paddress,
++ GFP_DMA | GFP_KERNEL);
++ if (cam->dummy_frame.vaddress == 0) {
++ pr_err("ERROR: v4l2 capture: Allocate dummy frame "
++ "failed.\n");
++ return -ENOBUFS;
++ }
++ cam->dummy_frame.buffer.type = V4L2_BUF_TYPE_PRIVATE;
++ cam->dummy_frame.buffer.length =
++ PAGE_ALIGN(cam->v2f.fmt.pix.sizeimage);
++ cam->dummy_frame.buffer.m.offset = cam->dummy_frame.paddress;
++
++ ipu_clear_irq(cam->ipu, IPU_IRQ_CSI0_OUT_EOF);
++ err = ipu_request_irq(cam->ipu, IPU_IRQ_CSI0_OUT_EOF,
++ csi_enc_callback, 0, "Mxc Camera", cam);
++ if (err != 0) {
++ printk(KERN_ERR "Error registering rot irq\n");
++ return err;
++ }
++
++ err = csi_enc_setup(cam);
++ if (err != 0) {
++ printk(KERN_ERR "csi_enc_setup %d\n", err);
++ return err;
++ }
++
++ return err;
++}
++
++/*!
++ * Disable encoder task
++ * @param private struct cam_data * mxc capture instance
++ *
++ * @return int
++ */
++static int csi_enc_disabling_tasks(void *private)
++{
++ cam_data *cam = (cam_data *) private;
++ int err = 0;
++#ifdef CONFIG_MXC_MIPI_CSI2
++ void *mipi_csi2_info;
++ int ipu_id;
++ int csi_id;
++#endif
++
++ err = ipu_disable_channel(cam->ipu, CSI_MEM, true);
++
++ ipu_uninit_channel(cam->ipu, CSI_MEM);
++
++ if (cam->dummy_frame.vaddress != 0) {
++ dma_free_coherent(0, cam->dummy_frame.buffer.length,
++ cam->dummy_frame.vaddress,
++ cam->dummy_frame.paddress);
++ cam->dummy_frame.vaddress = 0;
++ }
++
++#ifdef CONFIG_MXC_MIPI_CSI2
++ mipi_csi2_info = mipi_csi2_get_info();
++
++ if (mipi_csi2_info) {
++ if (mipi_csi2_get_status(mipi_csi2_info)) {
++ ipu_id = mipi_csi2_get_bind_ipu(mipi_csi2_info);
++ csi_id = mipi_csi2_get_bind_csi(mipi_csi2_info);
++
++ if (cam->ipu == ipu_get_soc(ipu_id)
++ && cam->csi == csi_id)
++ mipi_csi2_pixelclk_disable(mipi_csi2_info);
++ }
++ }
++#endif
++
++ return err;
++}
++
++/*!
++ * Enable csi
++ * @param private struct cam_data * mxc capture instance
++ *
++ * @return status
++ */
++static int csi_enc_enable_csi(void *private)
++{
++ cam_data *cam = (cam_data *) private;
++
++ return ipu_enable_csi(cam->ipu, cam->csi);
++}
++
++/*!
++ * Disable csi
++ * @param private struct cam_data * mxc capture instance
++ *
++ * @return status
++ */
++static int csi_enc_disable_csi(void *private)
++{
++ cam_data *cam = (cam_data *) private;
++
++ /* free csi eof irq firstly.
++ * when disable csi, wait for idmac eof.
++ * it requests eof irq again */
++ ipu_free_irq(cam->ipu, IPU_IRQ_CSI0_OUT_EOF, cam);
++
++ return ipu_disable_csi(cam->ipu, cam->csi);
++}
++
++/*!
++ * function to select CSI ENC as the working path
++ *
++ * @param private struct cam_data * mxc capture instance
++ *
++ * @return int
++ */
++int csi_enc_select(void *private)
++{
++ cam_data *cam = (cam_data *) private;
++ int err = 0;
++
++ if (cam) {
++ cam->enc_update_eba = csi_enc_eba_update;
++ cam->enc_enable = csi_enc_enabling_tasks;
++ cam->enc_disable = csi_enc_disabling_tasks;
++ cam->enc_enable_csi = csi_enc_enable_csi;
++ cam->enc_disable_csi = csi_enc_disable_csi;
++ } else {
++ err = -EIO;
++ }
++
++ return err;
++}
++EXPORT_SYMBOL(csi_enc_select);
++
++/*!
++ * function to de-select CSI ENC as the working path
++ *
++ * @param private struct cam_data * mxc capture instance
++ *
++ * @return int
++ */
++int csi_enc_deselect(void *private)
++{
++ cam_data *cam = (cam_data *) private;
++ int err = 0;
++
++ if (cam) {
++ cam->enc_update_eba = NULL;
++ cam->enc_enable = NULL;
++ cam->enc_disable = NULL;
++ cam->enc_enable_csi = NULL;
++ cam->enc_disable_csi = NULL;
++ }
++
++ return err;
++}
++EXPORT_SYMBOL(csi_enc_deselect);
++
++/*!
++ * Init the Encorder channels
++ *
++ * @return Error code indicating success or failure
++ */
++__init int csi_enc_init(void)
++{
++ return 0;
++}
++
++/*!
++ * Deinit the Encorder channels
++ *
++ */
++void __exit csi_enc_exit(void)
++{
++}
++
++module_init(csi_enc_init);
++module_exit(csi_enc_exit);
++
++MODULE_AUTHOR("Freescale Semiconductor, Inc.");
++MODULE_DESCRIPTION("CSI ENC Driver");
++MODULE_LICENSE("GPL");
+diff -Nur linux-3.14.36/drivers/media/platform/mxc/capture/ipu_fg_overlay_sdc.c linux-openelec/drivers/media/platform/mxc/capture/ipu_fg_overlay_sdc.c
+--- linux-3.14.36/drivers/media/platform/mxc/capture/ipu_fg_overlay_sdc.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/media/platform/mxc/capture/ipu_fg_overlay_sdc.c 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,634 @@
++/*
++ * Copyright 2004-2014 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++/* * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/*!
++ * @file ipu_foreground_sdc.c
++ *
++ * @brief IPU Use case for PRP-VF
++ *
++ * @ingroup IPU
++ */
++
++#include <linux/module.h>
++#include <linux/dma-mapping.h>
++#include <linux/console.h>
++#include <linux/ipu.h>
++#include <linux/mxcfb.h>
++#include <linux/mipi_csi2.h>
++
++#include "mxc_v4l2_capture.h"
++#include "ipu_prp_sw.h"
++
++#ifdef CAMERA_DBG
++ #define CAMERA_TRACE(x) (printk)x
++#else
++ #define CAMERA_TRACE(x)
++#endif
++
++static int csi_buffer_num, buffer_num;
++static u32 csi_mem_bufsize;
++static struct ipu_soc *disp_ipu;
++static struct fb_info *fbi;
++static struct fb_var_screeninfo fbvar;
++static u32 vf_out_format;
++static void csi_buf_work_func(struct work_struct *work)
++{
++ int err = 0;
++ cam_data *cam =
++ container_of(work, struct _cam_data, csi_work_struct);
++
++ struct ipu_task task;
++ memset(&task, 0, sizeof(task));
++
++ if (csi_buffer_num)
++ task.input.paddr = cam->vf_bufs[0];
++ else
++ task.input.paddr = cam->vf_bufs[1];
++ task.input.width = cam->crop_current.width;
++ task.input.height = cam->crop_current.height;
++ task.input.format = IPU_PIX_FMT_NV12;
++
++ if (buffer_num == 0)
++ task.output.paddr = fbi->fix.smem_start +
++ (fbi->fix.line_length * fbvar.yres);
++ else
++ task.output.paddr = fbi->fix.smem_start;
++ task.output.width = cam->win.w.width;
++ task.output.height = cam->win.w.height;
++ task.output.format = vf_out_format;
++ task.output.rotate = cam->rotation;
++again:
++ err = ipu_check_task(&task);
++ if (err != IPU_CHECK_OK) {
++ if (err > IPU_CHECK_ERR_MIN) {
++ if (err == IPU_CHECK_ERR_SPLIT_INPUTW_OVER) {
++ task.input.crop.w -= 8;
++ goto again;
++ }
++ if (err == IPU_CHECK_ERR_SPLIT_INPUTH_OVER) {
++ task.input.crop.h -= 8;
++ goto again;
++ }
++ if (err == IPU_CHECK_ERR_SPLIT_OUTPUTW_OVER) {
++ task.output.width -= 8;
++ task.output.crop.w = task.output.width;
++ goto again;
++ }
++ if (err == IPU_CHECK_ERR_SPLIT_OUTPUTH_OVER) {
++ task.output.height -= 8;
++ task.output.crop.h = task.output.height;
++ goto again;
++ }
++ printk(KERN_ERR "check ipu taks fail\n");
++ return;
++ }
++ printk(KERN_ERR "check ipu taks fail\n");
++ return;
++ }
++ err = ipu_queue_task(&task);
++ if (err < 0)
++ printk(KERN_ERR "queue ipu task error\n");
++ ipu_select_buffer(disp_ipu, MEM_FG_SYNC, IPU_INPUT_BUFFER, buffer_num);
++ buffer_num = (buffer_num == 0) ? 1 : 0;
++}
++
++static void get_disp_ipu(cam_data *cam)
++{
++ if (cam->output > 2)
++ disp_ipu = ipu_get_soc(1); /* using DISP4 */
++ else
++ disp_ipu = ipu_get_soc(0);
++}
++
++/*!
++ * csi ENC callback function.
++ *
++ * @param irq int irq line
++ * @param dev_id void * device id
++ *
++ * @return status IRQ_HANDLED for handled
++ */
++static irqreturn_t csi_enc_callback(int irq, void *dev_id)
++{
++ cam_data *cam = (cam_data *) dev_id;
++
++ ipu_select_buffer(cam->ipu, CSI_MEM, IPU_OUTPUT_BUFFER, csi_buffer_num);
++ if ((cam->crop_current.width != cam->win.w.width) ||
++ (cam->crop_current.height != cam->win.w.height) ||
++ (vf_out_format != IPU_PIX_FMT_NV12) ||
++ (cam->rotation >= IPU_ROTATE_VERT_FLIP))
++ schedule_work(&cam->csi_work_struct);
++ csi_buffer_num = (csi_buffer_num == 0) ? 1 : 0;
++ return IRQ_HANDLED;
++}
++
++static int csi_enc_setup(cam_data *cam)
++{
++ ipu_channel_params_t params;
++ int err = 0, sensor_protocol = 0;
++#ifdef CONFIG_MXC_MIPI_CSI2
++ void *mipi_csi2_info;
++ int ipu_id;
++ int csi_id;
++#endif
++
++ CAMERA_TRACE("In csi_enc_setup\n");
++ if (!cam) {
++ printk(KERN_ERR "cam private is NULL\n");
++ return -ENXIO;
++ }
++
++ memset(&params, 0, sizeof(ipu_channel_params_t));
++ params.csi_mem.csi = cam->csi;
++
++ sensor_protocol = ipu_csi_get_sensor_protocol(cam->ipu, cam->csi);
++ switch (sensor_protocol) {
++ case IPU_CSI_CLK_MODE_GATED_CLK:
++ case IPU_CSI_CLK_MODE_NONGATED_CLK:
++ case IPU_CSI_CLK_MODE_CCIR656_PROGRESSIVE:
++ case IPU_CSI_CLK_MODE_CCIR1120_PROGRESSIVE_DDR:
++ case IPU_CSI_CLK_MODE_CCIR1120_PROGRESSIVE_SDR:
++ params.csi_mem.interlaced = false;
++ break;
++ case IPU_CSI_CLK_MODE_CCIR656_INTERLACED:
++ case IPU_CSI_CLK_MODE_CCIR1120_INTERLACED_DDR:
++ case IPU_CSI_CLK_MODE_CCIR1120_INTERLACED_SDR:
++ params.csi_mem.interlaced = true;
++ break;
++ default:
++ printk(KERN_ERR "sensor protocol unsupported\n");
++ return -EINVAL;
++ }
++
++#ifdef CONFIG_MXC_MIPI_CSI2
++ mipi_csi2_info = mipi_csi2_get_info();
++
++ if (mipi_csi2_info) {
++ if (mipi_csi2_get_status(mipi_csi2_info)) {
++ ipu_id = mipi_csi2_get_bind_ipu(mipi_csi2_info);
++ csi_id = mipi_csi2_get_bind_csi(mipi_csi2_info);
++
++ if (cam->ipu == ipu_get_soc(ipu_id)
++ && cam->csi == csi_id) {
++ params.csi_mem.mipi_en = true;
++ params.csi_mem.mipi_vc =
++ mipi_csi2_get_virtual_channel(mipi_csi2_info);
++ params.csi_mem.mipi_id =
++ mipi_csi2_get_datatype(mipi_csi2_info);
++
++ mipi_csi2_pixelclk_enable(mipi_csi2_info);
++ } else {
++ params.csi_mem.mipi_en = false;
++ params.csi_mem.mipi_vc = 0;
++ params.csi_mem.mipi_id = 0;
++ }
++ } else {
++ params.csi_mem.mipi_en = false;
++ params.csi_mem.mipi_vc = 0;
++ params.csi_mem.mipi_id = 0;
++ }
++ }
++#endif
++
++ if (cam->vf_bufs_vaddr[0]) {
++ dma_free_coherent(0, cam->vf_bufs_size[0],
++ cam->vf_bufs_vaddr[0],
++ (dma_addr_t) cam->vf_bufs[0]);
++ }
++ if (cam->vf_bufs_vaddr[1]) {
++ dma_free_coherent(0, cam->vf_bufs_size[1],
++ cam->vf_bufs_vaddr[1],
++ (dma_addr_t) cam->vf_bufs[1]);
++ }
++ csi_mem_bufsize = cam->crop_current.width *
++ cam->crop_current.height * 3/2;
++ cam->vf_bufs_size[0] = PAGE_ALIGN(csi_mem_bufsize);
++ cam->vf_bufs_vaddr[0] = (void *)dma_alloc_coherent(0,
++ cam->vf_bufs_size[0],
++ (dma_addr_t *) &
++ cam->vf_bufs[0],
++ GFP_DMA |
++ GFP_KERNEL);
++ if (cam->vf_bufs_vaddr[0] == NULL) {
++ printk(KERN_ERR "Error to allocate vf buffer\n");
++ err = -ENOMEM;
++ goto out_2;
++ }
++ cam->vf_bufs_size[1] = PAGE_ALIGN(csi_mem_bufsize);
++ cam->vf_bufs_vaddr[1] = (void *)dma_alloc_coherent(0,
++ cam->vf_bufs_size[1],
++ (dma_addr_t *) &
++ cam->vf_bufs[1],
++ GFP_DMA |
++ GFP_KERNEL);
++ if (cam->vf_bufs_vaddr[1] == NULL) {
++ printk(KERN_ERR "Error to allocate vf buffer\n");
++ err = -ENOMEM;
++ goto out_1;
++ }
++ pr_debug("vf_bufs %x %x\n", cam->vf_bufs[0], cam->vf_bufs[1]);
++
++ err = ipu_init_channel(cam->ipu, CSI_MEM, &params);
++ if (err != 0) {
++ printk(KERN_ERR "ipu_init_channel %d\n", err);
++ goto out_1;
++ }
++
++ if ((cam->crop_current.width == cam->win.w.width) &&
++ (cam->crop_current.height == cam->win.w.height) &&
++ (vf_out_format == IPU_PIX_FMT_NV12) &&
++ (cam->rotation < IPU_ROTATE_VERT_FLIP)) {
++ err = ipu_init_channel_buffer(cam->ipu, CSI_MEM,
++ IPU_OUTPUT_BUFFER,
++ IPU_PIX_FMT_NV12,
++ cam->crop_current.width,
++ cam->crop_current.height,
++ cam->crop_current.width, IPU_ROTATE_NONE,
++ fbi->fix.smem_start +
++ (fbi->fix.line_length * fbvar.yres),
++ fbi->fix.smem_start, 0,
++ cam->offset.u_offset, cam->offset.u_offset);
++ } else {
++ err = ipu_init_channel_buffer(cam->ipu, CSI_MEM,
++ IPU_OUTPUT_BUFFER,
++ IPU_PIX_FMT_NV12,
++ cam->crop_current.width,
++ cam->crop_current.height,
++ cam->crop_current.width, IPU_ROTATE_NONE,
++ cam->vf_bufs[0], cam->vf_bufs[1], 0,
++ cam->offset.u_offset, cam->offset.u_offset);
++ }
++ if (err != 0) {
++ printk(KERN_ERR "CSI_MEM output buffer\n");
++ goto out_1;
++ }
++ err = ipu_enable_channel(cam->ipu, CSI_MEM);
++ if (err < 0) {
++ printk(KERN_ERR "ipu_enable_channel CSI_MEM\n");
++ goto out_1;
++ }
++
++ csi_buffer_num = 0;
++
++ ipu_select_buffer(cam->ipu, CSI_MEM, IPU_OUTPUT_BUFFER, 0);
++ ipu_select_buffer(cam->ipu, CSI_MEM, IPU_OUTPUT_BUFFER, 1);
++ return err;
++out_1:
++ if (cam->vf_bufs_vaddr[0]) {
++ dma_free_coherent(0, cam->vf_bufs_size[0],
++ cam->vf_bufs_vaddr[0],
++ (dma_addr_t) cam->vf_bufs[0]);
++ cam->vf_bufs_vaddr[0] = NULL;
++ cam->vf_bufs[0] = 0;
++ }
++ if (cam->vf_bufs_vaddr[1]) {
++ dma_free_coherent(0, cam->vf_bufs_size[1],
++ cam->vf_bufs_vaddr[1],
++ (dma_addr_t) cam->vf_bufs[1]);
++ cam->vf_bufs_vaddr[1] = NULL;
++ cam->vf_bufs[1] = 0;
++ }
++out_2:
++ return err;
++}
++
++/*!
++ * Enable encoder task
++ * @param private struct cam_data * mxc capture instance
++ *
++ * @return status
++ */
++static int csi_enc_enabling_tasks(void *private)
++{
++ cam_data *cam = (cam_data *) private;
++ int err = 0;
++ CAMERA_TRACE("IPU:In csi_enc_enabling_tasks\n");
++
++ ipu_clear_irq(cam->ipu, IPU_IRQ_CSI0_OUT_EOF);
++ err = ipu_request_irq(cam->ipu, IPU_IRQ_CSI0_OUT_EOF,
++ csi_enc_callback, 0, "Mxc Camera", cam);
++ if (err != 0) {
++ printk(KERN_ERR "Error registering CSI0_OUT_EOF irq\n");
++ return err;
++ }
++
++ INIT_WORK(&cam->csi_work_struct, csi_buf_work_func);
++
++ err = csi_enc_setup(cam);
++ if (err != 0) {
++ printk(KERN_ERR "csi_enc_setup %d\n", err);
++ goto out1;
++ }
++
++ return err;
++out1:
++ ipu_free_irq(cam->ipu, IPU_IRQ_CSI0_OUT_EOF, cam);
++ return err;
++}
++
++/*
++ * Function definitions
++ */
++
++/*!
++ * foreground_start - start the vf task
++ *
++ * @param private cam_data * mxc v4l2 main structure
++ *
++ */
++static int foreground_start(void *private)
++{
++ cam_data *cam = (cam_data *) private;
++ int err = 0, i = 0, screen_size;
++ char *base;
++
++ if (!cam) {
++ printk(KERN_ERR "private is NULL\n");
++ return -EIO;
++ }
++
++ if (cam->overlay_active == true) {
++ pr_debug("already started.\n");
++ return 0;
++ }
++
++ get_disp_ipu(cam);
++
++ for (i = 0; i < num_registered_fb; i++) {
++ char *idstr = registered_fb[i]->fix.id;
++ if (((strcmp(idstr, "DISP3 FG") == 0) && (cam->output < 3)) ||
++ ((strcmp(idstr, "DISP4 FG") == 0) && (cam->output >= 3))) {
++ fbi = registered_fb[i];
++ break;
++ }
++ }
++
++ if (fbi == NULL) {
++ printk(KERN_ERR "DISP FG fb not found\n");
++ return -EPERM;
++ }
++
++ fbvar = fbi->var;
++
++ /* Store the overlay frame buffer's original std */
++ cam->fb_origin_std = fbvar.nonstd;
++
++ if (cam->devtype == IMX5_V4L2 || cam->devtype == IMX6_V4L2) {
++ /* Use DP to do CSC so that we can get better performance */
++ vf_out_format = IPU_PIX_FMT_NV12;
++ fbvar.nonstd = vf_out_format;
++ } else {
++ vf_out_format = IPU_PIX_FMT_RGB565;
++ fbvar.nonstd = 0;
++ }
++
++ fbvar.bits_per_pixel = 16;
++ fbvar.xres = fbvar.xres_virtual = cam->win.w.width;
++ fbvar.yres = cam->win.w.height;
++ fbvar.yres_virtual = cam->win.w.height * 2;
++ fbvar.yoffset = 0;
++ fbvar.vmode &= ~FB_VMODE_YWRAP;
++ fbvar.accel_flags = FB_ACCEL_DOUBLE_FLAG;
++ fbvar.activate |= FB_ACTIVATE_FORCE;
++ fb_set_var(fbi, &fbvar);
++
++ ipu_disp_set_window_pos(disp_ipu, MEM_FG_SYNC, cam->win.w.left,
++ cam->win.w.top);
++
++ /* Fill black color for framebuffer */
++ base = (char *) fbi->screen_base;
++ screen_size = fbi->var.xres * fbi->var.yres;
++ if (cam->devtype == IMX5_V4L2 || cam->devtype == IMX6_V4L2) {
++ memset(base, 0, screen_size);
++ base += screen_size;
++ for (i = 0; i < screen_size / 2; i++, base++)
++ *base = 0x80;
++ } else {
++ for (i = 0; i < screen_size * 2; i++, base++)
++ *base = 0x00;
++ }
++
++ console_lock();
++ fb_blank(fbi, FB_BLANK_UNBLANK);
++ console_unlock();
++
++ /* correct display ch buffer address */
++ ipu_update_channel_buffer(disp_ipu, MEM_FG_SYNC, IPU_INPUT_BUFFER,
++ 0, fbi->fix.smem_start +
++ (fbi->fix.line_length * fbvar.yres));
++ ipu_update_channel_buffer(disp_ipu, MEM_FG_SYNC, IPU_INPUT_BUFFER,
++ 1, fbi->fix.smem_start);
++
++ err = csi_enc_enabling_tasks(cam);
++ if (err != 0) {
++ printk(KERN_ERR "Error csi enc enable fail\n");
++ return err;
++ }
++
++ cam->overlay_active = true;
++ return err;
++
++}
++
++/*!
++ * foreground_stop - stop the vf task
++ *
++ * @param private cam_data * mxc v4l2 main structure
++ *
++ */
++static int foreground_stop(void *private)
++{
++ cam_data *cam = (cam_data *) private;
++ int err = 0, i = 0;
++ struct fb_info *fbi = NULL;
++ struct fb_var_screeninfo fbvar;
++
++#ifdef CONFIG_MXC_MIPI_CSI2
++ void *mipi_csi2_info;
++ int ipu_id;
++ int csi_id;
++#endif
++
++ if (cam->overlay_active == false)
++ return 0;
++
++ err = ipu_disable_channel(cam->ipu, CSI_MEM, true);
++
++ ipu_uninit_channel(cam->ipu, CSI_MEM);
++
++ csi_buffer_num = 0;
++ buffer_num = 0;
++
++ for (i = 0; i < num_registered_fb; i++) {
++ char *idstr = registered_fb[i]->fix.id;
++ if (((strcmp(idstr, "DISP3 FG") == 0) && (cam->output < 3)) ||
++ ((strcmp(idstr, "DISP4 FG") == 0) && (cam->output >= 3))) {
++ fbi = registered_fb[i];
++ break;
++ }
++ }
++
++ if (fbi == NULL) {
++ printk(KERN_ERR "DISP FG fb not found\n");
++ return -EPERM;
++ }
++
++ console_lock();
++ fb_blank(fbi, FB_BLANK_POWERDOWN);
++ console_unlock();
++
++ /* Set the overlay frame buffer std to what it is used to be */
++ fbvar = fbi->var;
++ fbvar.accel_flags = FB_ACCEL_TRIPLE_FLAG;
++ fbvar.nonstd = cam->fb_origin_std;
++ fbvar.activate |= FB_ACTIVATE_FORCE;
++ fb_set_var(fbi, &fbvar);
++
++#ifdef CONFIG_MXC_MIPI_CSI2
++ mipi_csi2_info = mipi_csi2_get_info();
++
++ if (mipi_csi2_info) {
++ if (mipi_csi2_get_status(mipi_csi2_info)) {
++ ipu_id = mipi_csi2_get_bind_ipu(mipi_csi2_info);
++ csi_id = mipi_csi2_get_bind_csi(mipi_csi2_info);
++
++ if (cam->ipu == ipu_get_soc(ipu_id)
++ && cam->csi == csi_id)
++ mipi_csi2_pixelclk_disable(mipi_csi2_info);
++ }
++ }
++#endif
++
++ flush_work(&cam->csi_work_struct);
++ cancel_work_sync(&cam->csi_work_struct);
++
++ if (cam->vf_bufs_vaddr[0]) {
++ dma_free_coherent(0, cam->vf_bufs_size[0],
++ cam->vf_bufs_vaddr[0],
++ (dma_addr_t) cam->vf_bufs[0]);
++ cam->vf_bufs_vaddr[0] = NULL;
++ cam->vf_bufs[0] = 0;
++ }
++ if (cam->vf_bufs_vaddr[1]) {
++ dma_free_coherent(0, cam->vf_bufs_size[1],
++ cam->vf_bufs_vaddr[1],
++ (dma_addr_t) cam->vf_bufs[1]);
++ cam->vf_bufs_vaddr[1] = NULL;
++ cam->vf_bufs[1] = 0;
++ }
++
++ cam->overlay_active = false;
++ return err;
++}
++
++/*!
++ * Enable csi
++ * @param private struct cam_data * mxc capture instance
++ *
++ * @return status
++ */
++static int foreground_enable_csi(void *private)
++{
++ cam_data *cam = (cam_data *) private;
++
++ return ipu_enable_csi(cam->ipu, cam->csi);
++}
++
++/*!
++ * Disable csi
++ * @param private struct cam_data * mxc capture instance
++ *
++ * @return status
++ */
++static int foreground_disable_csi(void *private)
++{
++ cam_data *cam = (cam_data *) private;
++
++ /* free csi eof irq firstly.
++ * when disable csi, wait for idmac eof.
++ * it requests eof irq again */
++ ipu_free_irq(cam->ipu, IPU_IRQ_CSI0_OUT_EOF, cam);
++
++ return ipu_disable_csi(cam->ipu, cam->csi);
++}
++
++/*!
++ * function to select foreground as the working path
++ *
++ * @param private cam_data * mxc v4l2 main structure
++ *
++ * @return status
++ */
++int foreground_sdc_select(void *private)
++{
++ cam_data *cam;
++ int err = 0;
++ if (private) {
++ cam = (cam_data *) private;
++ cam->vf_start_sdc = foreground_start;
++ cam->vf_stop_sdc = foreground_stop;
++ cam->vf_enable_csi = foreground_enable_csi;
++ cam->vf_disable_csi = foreground_disable_csi;
++ cam->overlay_active = false;
++ } else
++ err = -EIO;
++
++ return err;
++}
++EXPORT_SYMBOL(foreground_sdc_select);
++
++/*!
++ * function to de-select foreground as the working path
++ *
++ * @param private cam_data * mxc v4l2 main structure
++ *
++ * @return int
++ */
++int foreground_sdc_deselect(void *private)
++{
++ cam_data *cam;
++
++ if (private) {
++ cam = (cam_data *) private;
++ cam->vf_start_sdc = NULL;
++ cam->vf_stop_sdc = NULL;
++ cam->vf_enable_csi = NULL;
++ cam->vf_disable_csi = NULL;
++ }
++ return 0;
++}
++EXPORT_SYMBOL(foreground_sdc_deselect);
++
++/*!
++ * Init viewfinder task.
++ *
++ * @return Error code indicating success or failure
++ */
++__init int foreground_sdc_init(void)
++{
++ return 0;
++}
++
++/*!
++ * Deinit viewfinder task.
++ *
++ * @return Error code indicating success or failure
++ */
++void __exit foreground_sdc_exit(void)
++{
++}
++
++module_init(foreground_sdc_init);
++module_exit(foreground_sdc_exit);
++
++MODULE_AUTHOR("Freescale Semiconductor, Inc.");
++MODULE_DESCRIPTION("IPU PRP VF SDC Driver");
++MODULE_LICENSE("GPL");
+diff -Nur linux-3.14.36/drivers/media/platform/mxc/capture/ipu_prp_enc.c linux-openelec/drivers/media/platform/mxc/capture/ipu_prp_enc.c
+--- linux-3.14.36/drivers/media/platform/mxc/capture/ipu_prp_enc.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/media/platform/mxc/capture/ipu_prp_enc.c 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,595 @@
++/*
++ * Copyright 2004-2014 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/*!
++ * @file ipu_prp_enc.c
++ *
++ * @brief IPU Use case for PRP-ENC
++ *
++ * @ingroup IPU
++ */
++
++#include <linux/module.h>
++#include <linux/dma-mapping.h>
++#include <linux/platform_device.h>
++#include <linux/ipu.h>
++#include <linux/mipi_csi2.h>
++#include "mxc_v4l2_capture.h"
++#include "ipu_prp_sw.h"
++
++#ifdef CAMERA_DBG
++ #define CAMERA_TRACE(x) (printk)x
++#else
++ #define CAMERA_TRACE(x)
++#endif
++
++static ipu_rotate_mode_t grotation = IPU_ROTATE_NONE;
++
++/*
++ * Function definitions
++ */
++
++/*!
++ * IPU ENC callback function.
++ *
++ * @param irq int irq line
++ * @param dev_id void * device id
++ *
++ * @return status IRQ_HANDLED for handled
++ */
++static irqreturn_t prp_enc_callback(int irq, void *dev_id)
++{
++ cam_data *cam = (cam_data *) dev_id;
++
++ if (cam->enc_callback == NULL)
++ return IRQ_HANDLED;
++
++ cam->enc_callback(irq, dev_id);
++
++ return IRQ_HANDLED;
++}
++
++/*!
++ * PrpENC enable channel setup function
++ *
++ * @param cam struct cam_data * mxc capture instance
++ *
++ * @return status
++ */
++static int prp_enc_setup(cam_data *cam)
++{
++ ipu_channel_params_t enc;
++ int err = 0;
++ dma_addr_t dummy = cam->dummy_frame.buffer.m.offset;
++#ifdef CONFIG_MXC_MIPI_CSI2
++ void *mipi_csi2_info;
++ int ipu_id;
++ int csi_id;
++#endif
++
++ CAMERA_TRACE("In prp_enc_setup\n");
++ if (!cam) {
++ printk(KERN_ERR "cam private is NULL\n");
++ return -ENXIO;
++ }
++ memset(&enc, 0, sizeof(ipu_channel_params_t));
++
++ ipu_csi_get_window_size(cam->ipu, &enc.csi_prp_enc_mem.in_width,
++ &enc.csi_prp_enc_mem.in_height, cam->csi);
++
++ enc.csi_prp_enc_mem.in_pixel_fmt = IPU_PIX_FMT_UYVY;
++ enc.csi_prp_enc_mem.out_width = cam->v2f.fmt.pix.width;
++ enc.csi_prp_enc_mem.out_height = cam->v2f.fmt.pix.height;
++ enc.csi_prp_enc_mem.csi = cam->csi;
++ if (cam->rotation >= IPU_ROTATE_90_RIGHT) {
++ enc.csi_prp_enc_mem.out_width = cam->v2f.fmt.pix.height;
++ enc.csi_prp_enc_mem.out_height = cam->v2f.fmt.pix.width;
++ }
++
++ if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_YUV420) {
++ enc.csi_prp_enc_mem.out_pixel_fmt = IPU_PIX_FMT_YUV420P;
++ pr_info("YUV420\n");
++ } else if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_YVU420) {
++ enc.csi_prp_enc_mem.out_pixel_fmt = IPU_PIX_FMT_YVU420P;
++ pr_info("YVU420\n");
++ } else if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_YUV422P) {
++ enc.csi_prp_enc_mem.out_pixel_fmt = IPU_PIX_FMT_YUV422P;
++ pr_info("YUV422P\n");
++ } else if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_YUYV) {
++ enc.csi_prp_enc_mem.out_pixel_fmt = IPU_PIX_FMT_YUYV;
++ pr_info("YUYV\n");
++ } else if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_UYVY) {
++ enc.csi_prp_enc_mem.out_pixel_fmt = IPU_PIX_FMT_UYVY;
++ pr_info("UYVY\n");
++ } else if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_NV12) {
++ enc.csi_prp_enc_mem.out_pixel_fmt = IPU_PIX_FMT_NV12;
++ pr_info("NV12\n");
++ } else if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_BGR24) {
++ enc.csi_prp_enc_mem.out_pixel_fmt = IPU_PIX_FMT_BGR24;
++ pr_info("BGR24\n");
++ } else if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_RGB24) {
++ enc.csi_prp_enc_mem.out_pixel_fmt = IPU_PIX_FMT_RGB24;
++ pr_info("RGB24\n");
++ } else if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_RGB565) {
++ enc.csi_prp_enc_mem.out_pixel_fmt = IPU_PIX_FMT_RGB565;
++ pr_info("RGB565\n");
++ } else if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_BGR32) {
++ enc.csi_prp_enc_mem.out_pixel_fmt = IPU_PIX_FMT_BGR32;
++ pr_info("BGR32\n");
++ } else if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_RGB32) {
++ enc.csi_prp_enc_mem.out_pixel_fmt = IPU_PIX_FMT_RGB32;
++ pr_info("RGB32\n");
++ } else {
++ printk(KERN_ERR "format not supported\n");
++ return -EINVAL;
++ }
++
++#ifdef CONFIG_MXC_MIPI_CSI2
++ mipi_csi2_info = mipi_csi2_get_info();
++
++ if (mipi_csi2_info) {
++ if (mipi_csi2_get_status(mipi_csi2_info)) {
++ ipu_id = mipi_csi2_get_bind_ipu(mipi_csi2_info);
++ csi_id = mipi_csi2_get_bind_csi(mipi_csi2_info);
++
++ if (cam->ipu == ipu_get_soc(ipu_id)
++ && cam->csi == csi_id) {
++ enc.csi_prp_enc_mem.mipi_en = true;
++ enc.csi_prp_enc_mem.mipi_vc =
++ mipi_csi2_get_virtual_channel(mipi_csi2_info);
++ enc.csi_prp_enc_mem.mipi_id =
++ mipi_csi2_get_datatype(mipi_csi2_info);
++
++ mipi_csi2_pixelclk_enable(mipi_csi2_info);
++ } else {
++ enc.csi_prp_enc_mem.mipi_en = false;
++ enc.csi_prp_enc_mem.mipi_vc = 0;
++ enc.csi_prp_enc_mem.mipi_id = 0;
++ }
++ } else {
++ enc.csi_prp_enc_mem.mipi_en = false;
++ enc.csi_prp_enc_mem.mipi_vc = 0;
++ enc.csi_prp_enc_mem.mipi_id = 0;
++ }
++ }
++#endif
++
++ err = ipu_init_channel(cam->ipu, CSI_PRP_ENC_MEM, &enc);
++ if (err != 0) {
++ printk(KERN_ERR "ipu_init_channel %d\n", err);
++ return err;
++ }
++
++ grotation = cam->rotation;
++ if (cam->rotation >= IPU_ROTATE_90_RIGHT) {
++ if (cam->rot_enc_bufs_vaddr[0]) {
++ dma_free_coherent(0, cam->rot_enc_buf_size[0],
++ cam->rot_enc_bufs_vaddr[0],
++ cam->rot_enc_bufs[0]);
++ }
++ if (cam->rot_enc_bufs_vaddr[1]) {
++ dma_free_coherent(0, cam->rot_enc_buf_size[1],
++ cam->rot_enc_bufs_vaddr[1],
++ cam->rot_enc_bufs[1]);
++ }
++ cam->rot_enc_buf_size[0] =
++ PAGE_ALIGN(cam->v2f.fmt.pix.sizeimage);
++ cam->rot_enc_bufs_vaddr[0] =
++ (void *)dma_alloc_coherent(0, cam->rot_enc_buf_size[0],
++ &cam->rot_enc_bufs[0],
++ GFP_DMA | GFP_KERNEL);
++ if (!cam->rot_enc_bufs_vaddr[0]) {
++ printk(KERN_ERR "alloc enc_bufs0\n");
++ return -ENOMEM;
++ }
++ cam->rot_enc_buf_size[1] =
++ PAGE_ALIGN(cam->v2f.fmt.pix.sizeimage);
++ cam->rot_enc_bufs_vaddr[1] =
++ (void *)dma_alloc_coherent(0, cam->rot_enc_buf_size[1],
++ &cam->rot_enc_bufs[1],
++ GFP_DMA | GFP_KERNEL);
++ if (!cam->rot_enc_bufs_vaddr[1]) {
++ dma_free_coherent(0, cam->rot_enc_buf_size[0],
++ cam->rot_enc_bufs_vaddr[0],
++ cam->rot_enc_bufs[0]);
++ cam->rot_enc_bufs_vaddr[0] = NULL;
++ cam->rot_enc_bufs[0] = 0;
++ printk(KERN_ERR "alloc enc_bufs1\n");
++ return -ENOMEM;
++ }
++
++ err = ipu_init_channel_buffer(cam->ipu, CSI_PRP_ENC_MEM,
++ IPU_OUTPUT_BUFFER,
++ enc.csi_prp_enc_mem.out_pixel_fmt,
++ enc.csi_prp_enc_mem.out_width,
++ enc.csi_prp_enc_mem.out_height,
++ enc.csi_prp_enc_mem.out_width,
++ IPU_ROTATE_NONE,
++ cam->rot_enc_bufs[0],
++ cam->rot_enc_bufs[1], 0, 0, 0);
++ if (err != 0) {
++ printk(KERN_ERR "CSI_PRP_ENC_MEM err\n");
++ return err;
++ }
++
++ err = ipu_init_channel(cam->ipu, MEM_ROT_ENC_MEM, NULL);
++ if (err != 0) {
++ printk(KERN_ERR "MEM_ROT_ENC_MEM channel err\n");
++ return err;
++ }
++
++ err = ipu_init_channel_buffer(cam->ipu, MEM_ROT_ENC_MEM,
++ IPU_INPUT_BUFFER,
++ enc.csi_prp_enc_mem.out_pixel_fmt,
++ enc.csi_prp_enc_mem.out_width,
++ enc.csi_prp_enc_mem.out_height,
++ enc.csi_prp_enc_mem.out_width,
++ cam->rotation,
++ cam->rot_enc_bufs[0],
++ cam->rot_enc_bufs[1], 0, 0, 0);
++ if (err != 0) {
++ printk(KERN_ERR "MEM_ROT_ENC_MEM input buffer\n");
++ return err;
++ }
++
++ err =
++ ipu_init_channel_buffer(cam->ipu, MEM_ROT_ENC_MEM,
++ IPU_OUTPUT_BUFFER,
++ enc.csi_prp_enc_mem.out_pixel_fmt,
++ enc.csi_prp_enc_mem.out_height,
++ enc.csi_prp_enc_mem.out_width,
++ cam->v2f.fmt.pix.bytesperline /
++ bytes_per_pixel(enc.csi_prp_enc_mem.
++ out_pixel_fmt),
++ IPU_ROTATE_NONE,
++ dummy, dummy, 0,
++ cam->offset.u_offset,
++ cam->offset.v_offset);
++ if (err != 0) {
++ printk(KERN_ERR "MEM_ROT_ENC_MEM output buffer\n");
++ return err;
++ }
++
++ err = ipu_link_channels(cam->ipu,
++ CSI_PRP_ENC_MEM, MEM_ROT_ENC_MEM);
++ if (err < 0) {
++ printk(KERN_ERR
++ "link CSI_PRP_ENC_MEM-MEM_ROT_ENC_MEM\n");
++ return err;
++ }
++
++ err = ipu_enable_channel(cam->ipu, CSI_PRP_ENC_MEM);
++ if (err < 0) {
++ printk(KERN_ERR "ipu_enable_channel CSI_PRP_ENC_MEM\n");
++ return err;
++ }
++ err = ipu_enable_channel(cam->ipu, MEM_ROT_ENC_MEM);
++ if (err < 0) {
++ printk(KERN_ERR "ipu_enable_channel MEM_ROT_ENC_MEM\n");
++ return err;
++ }
++
++ ipu_select_buffer(cam->ipu, CSI_PRP_ENC_MEM,
++ IPU_OUTPUT_BUFFER, 0);
++ ipu_select_buffer(cam->ipu, CSI_PRP_ENC_MEM,
++ IPU_OUTPUT_BUFFER, 1);
++ } else {
++ err =
++ ipu_init_channel_buffer(cam->ipu, CSI_PRP_ENC_MEM,
++ IPU_OUTPUT_BUFFER,
++ enc.csi_prp_enc_mem.out_pixel_fmt,
++ enc.csi_prp_enc_mem.out_width,
++ enc.csi_prp_enc_mem.out_height,
++ cam->v2f.fmt.pix.bytesperline /
++ bytes_per_pixel(enc.csi_prp_enc_mem.
++ out_pixel_fmt),
++ cam->rotation,
++ dummy, dummy, 0,
++ cam->offset.u_offset,
++ cam->offset.v_offset);
++ if (err != 0) {
++ printk(KERN_ERR "CSI_PRP_ENC_MEM output buffer\n");
++ return err;
++ }
++ err = ipu_enable_channel(cam->ipu, CSI_PRP_ENC_MEM);
++ if (err < 0) {
++ printk(KERN_ERR "ipu_enable_channel CSI_PRP_ENC_MEM\n");
++ return err;
++ }
++ }
++
++ return err;
++}
++
++/*!
++ * function to update physical buffer address for encorder IDMA channel
++ *
++ * @param eba physical buffer address for encorder IDMA channel
++ * @param buffer_num int buffer 0 or buffer 1
++ *
++ * @return status
++ */
++static int prp_enc_eba_update(struct ipu_soc *ipu, dma_addr_t eba,
++ int *buffer_num)
++{
++ int err = 0;
++
++ pr_debug("eba %x\n", eba);
++ if (grotation >= IPU_ROTATE_90_RIGHT) {
++ err = ipu_update_channel_buffer(ipu, MEM_ROT_ENC_MEM,
++ IPU_OUTPUT_BUFFER, *buffer_num,
++ eba);
++ } else {
++ err = ipu_update_channel_buffer(ipu, CSI_PRP_ENC_MEM,
++ IPU_OUTPUT_BUFFER, *buffer_num,
++ eba);
++ }
++ if (err != 0) {
++ if (grotation >= IPU_ROTATE_90_RIGHT) {
++ ipu_clear_buffer_ready(ipu, MEM_ROT_ENC_MEM,
++ IPU_OUTPUT_BUFFER,
++ *buffer_num);
++ err = ipu_update_channel_buffer(ipu, MEM_ROT_ENC_MEM,
++ IPU_OUTPUT_BUFFER,
++ *buffer_num,
++ eba);
++ } else {
++ ipu_clear_buffer_ready(ipu, CSI_PRP_ENC_MEM,
++ IPU_OUTPUT_BUFFER,
++ *buffer_num);
++ err = ipu_update_channel_buffer(ipu, CSI_PRP_ENC_MEM,
++ IPU_OUTPUT_BUFFER,
++ *buffer_num,
++ eba);
++ }
++
++ if (err != 0) {
++ pr_err("ERROR: v4l2 capture: fail to update "
++ "buf%d\n", *buffer_num);
++ return err;
++ }
++ }
++
++ if (grotation >= IPU_ROTATE_90_RIGHT) {
++ ipu_select_buffer(ipu, MEM_ROT_ENC_MEM, IPU_OUTPUT_BUFFER,
++ *buffer_num);
++ } else {
++ ipu_select_buffer(ipu, CSI_PRP_ENC_MEM, IPU_OUTPUT_BUFFER,
++ *buffer_num);
++ }
++
++ *buffer_num = (*buffer_num == 0) ? 1 : 0;
++ return 0;
++}
++
++/*!
++ * Enable encoder task
++ * @param private struct cam_data * mxc capture instance
++ *
++ * @return status
++ */
++static int prp_enc_enabling_tasks(void *private)
++{
++ cam_data *cam = (cam_data *) private;
++ int err = 0;
++ CAMERA_TRACE("IPU:In prp_enc_enabling_tasks\n");
++
++ cam->dummy_frame.vaddress = dma_alloc_coherent(0,
++ PAGE_ALIGN(cam->v2f.fmt.pix.sizeimage),
++ &cam->dummy_frame.paddress,
++ GFP_DMA | GFP_KERNEL);
++ if (cam->dummy_frame.vaddress == 0) {
++ pr_err("ERROR: v4l2 capture: Allocate dummy frame "
++ "failed.\n");
++ return -ENOBUFS;
++ }
++ cam->dummy_frame.buffer.type = V4L2_BUF_TYPE_PRIVATE;
++ cam->dummy_frame.buffer.length =
++ PAGE_ALIGN(cam->v2f.fmt.pix.sizeimage);
++ cam->dummy_frame.buffer.m.offset = cam->dummy_frame.paddress;
++
++ if (cam->rotation >= IPU_ROTATE_90_RIGHT) {
++ err = ipu_request_irq(cam->ipu, IPU_IRQ_PRP_ENC_ROT_OUT_EOF,
++ prp_enc_callback, 0, "Mxc Camera", cam);
++ } else {
++ err = ipu_request_irq(cam->ipu, IPU_IRQ_PRP_ENC_OUT_EOF,
++ prp_enc_callback, 0, "Mxc Camera", cam);
++ }
++ if (err != 0) {
++ printk(KERN_ERR "Error registering rot irq\n");
++ return err;
++ }
++
++ err = prp_enc_setup(cam);
++ if (err != 0) {
++ printk(KERN_ERR "prp_enc_setup %d\n", err);
++ return err;
++ }
++
++ return err;
++}
++
++/*!
++ * Disable encoder task
++ * @param private struct cam_data * mxc capture instance
++ *
++ * @return int
++ */
++static int prp_enc_disabling_tasks(void *private)
++{
++ cam_data *cam = (cam_data *) private;
++ int err = 0;
++#ifdef CONFIG_MXC_MIPI_CSI2
++ void *mipi_csi2_info;
++ int ipu_id;
++ int csi_id;
++#endif
++
++ if (cam->rotation >= IPU_ROTATE_90_RIGHT) {
++ ipu_free_irq(cam->ipu, IPU_IRQ_PRP_ENC_ROT_OUT_EOF, cam);
++ ipu_unlink_channels(cam->ipu, CSI_PRP_ENC_MEM, MEM_ROT_ENC_MEM);
++ }
++
++ err = ipu_disable_channel(cam->ipu, CSI_PRP_ENC_MEM, true);
++ if (cam->rotation >= IPU_ROTATE_90_RIGHT)
++ err |= ipu_disable_channel(cam->ipu, MEM_ROT_ENC_MEM, true);
++
++ ipu_uninit_channel(cam->ipu, CSI_PRP_ENC_MEM);
++ if (cam->rotation >= IPU_ROTATE_90_RIGHT)
++ ipu_uninit_channel(cam->ipu, MEM_ROT_ENC_MEM);
++
++ if (cam->dummy_frame.vaddress != 0) {
++ dma_free_coherent(0, cam->dummy_frame.buffer.length,
++ cam->dummy_frame.vaddress,
++ cam->dummy_frame.paddress);
++ cam->dummy_frame.vaddress = 0;
++ }
++
++#ifdef CONFIG_MXC_MIPI_CSI2
++ mipi_csi2_info = mipi_csi2_get_info();
++
++ if (mipi_csi2_info) {
++ if (mipi_csi2_get_status(mipi_csi2_info)) {
++ ipu_id = mipi_csi2_get_bind_ipu(mipi_csi2_info);
++ csi_id = mipi_csi2_get_bind_csi(mipi_csi2_info);
++
++ if (cam->ipu == ipu_get_soc(ipu_id)
++ && cam->csi == csi_id)
++ mipi_csi2_pixelclk_disable(mipi_csi2_info);
++ }
++ }
++#endif
++
++ return err;
++}
++
++/*!
++ * Enable csi
++ * @param private struct cam_data * mxc capture instance
++ *
++ * @return status
++ */
++static int prp_enc_enable_csi(void *private)
++{
++ cam_data *cam = (cam_data *) private;
++
++ return ipu_enable_csi(cam->ipu, cam->csi);
++}
++
++/*!
++ * Disable csi
++ * @param private struct cam_data * mxc capture instance
++ *
++ * @return status
++ */
++static int prp_enc_disable_csi(void *private)
++{
++ cam_data *cam = (cam_data *) private;
++
++ /* free csi eof irq firstly.
++ * when disable csi, wait for idmac eof.
++ * it requests eof irq again */
++ if (cam->rotation < IPU_ROTATE_90_RIGHT)
++ ipu_free_irq(cam->ipu, IPU_IRQ_PRP_ENC_OUT_EOF, cam);
++
++ return ipu_disable_csi(cam->ipu, cam->csi);
++}
++
++/*!
++ * function to select PRP-ENC as the working path
++ *
++ * @param private struct cam_data * mxc capture instance
++ *
++ * @return int
++ */
++int prp_enc_select(void *private)
++{
++ cam_data *cam = (cam_data *) private;
++ int err = 0;
++
++ if (cam) {
++ cam->enc_update_eba = prp_enc_eba_update;
++ cam->enc_enable = prp_enc_enabling_tasks;
++ cam->enc_disable = prp_enc_disabling_tasks;
++ cam->enc_enable_csi = prp_enc_enable_csi;
++ cam->enc_disable_csi = prp_enc_disable_csi;
++ } else {
++ err = -EIO;
++ }
++
++ return err;
++}
++EXPORT_SYMBOL(prp_enc_select);
++
++/*!
++ * function to de-select PRP-ENC as the working path
++ *
++ * @param private struct cam_data * mxc capture instance
++ *
++ * @return int
++ */
++int prp_enc_deselect(void *private)
++{
++ cam_data *cam = (cam_data *) private;
++ int err = 0;
++
++ if (cam) {
++ cam->enc_update_eba = NULL;
++ cam->enc_enable = NULL;
++ cam->enc_disable = NULL;
++ cam->enc_enable_csi = NULL;
++ cam->enc_disable_csi = NULL;
++ if (cam->rot_enc_bufs_vaddr[0]) {
++ dma_free_coherent(0, cam->rot_enc_buf_size[0],
++ cam->rot_enc_bufs_vaddr[0],
++ cam->rot_enc_bufs[0]);
++ cam->rot_enc_bufs_vaddr[0] = NULL;
++ cam->rot_enc_bufs[0] = 0;
++ }
++ if (cam->rot_enc_bufs_vaddr[1]) {
++ dma_free_coherent(0, cam->rot_enc_buf_size[1],
++ cam->rot_enc_bufs_vaddr[1],
++ cam->rot_enc_bufs[1]);
++ cam->rot_enc_bufs_vaddr[1] = NULL;
++ cam->rot_enc_bufs[1] = 0;
++ }
++ }
++
++ return err;
++}
++EXPORT_SYMBOL(prp_enc_deselect);
++
++/*!
++ * Init the Encorder channels
++ *
++ * @return Error code indicating success or failure
++ */
++__init int prp_enc_init(void)
++{
++ return 0;
++}
++
++/*!
++ * Deinit the Encorder channels
++ *
++ */
++void __exit prp_enc_exit(void)
++{
++}
++
++module_init(prp_enc_init);
++module_exit(prp_enc_exit);
++
++MODULE_AUTHOR("Freescale Semiconductor, Inc.");
++MODULE_DESCRIPTION("IPU PRP ENC Driver");
++MODULE_LICENSE("GPL");
+diff -Nur linux-3.14.36/drivers/media/platform/mxc/capture/ipu_prp_sw.h linux-openelec/drivers/media/platform/mxc/capture/ipu_prp_sw.h
+--- linux-3.14.36/drivers/media/platform/mxc/capture/ipu_prp_sw.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/media/platform/mxc/capture/ipu_prp_sw.h 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,43 @@
++/*
++ * Copyright 2004-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/*!
++ * @file ipu_prp_sw.h
++ *
++ * @brief This file contains the IPU PRP use case driver header.
++ *
++ * @ingroup IPU
++ */
++
++#ifndef _INCLUDE_IPU__PRP_SW_H_
++#define _INCLUDE_IPU__PRP_SW_H_
++
++int csi_enc_select(void *private);
++int csi_enc_deselect(void *private);
++int prp_enc_select(void *private);
++int prp_enc_deselect(void *private);
++#ifdef CONFIG_MXC_IPU_PRP_VF_SDC
++int prp_vf_sdc_select(void *private);
++int prp_vf_sdc_deselect(void *private);
++int prp_vf_sdc_select_bg(void *private);
++int prp_vf_sdc_deselect_bg(void *private);
++#else
++int foreground_sdc_select(void *private);
++int foreground_sdc_deselect(void *private);
++int bg_overlay_sdc_select(void *private);
++int bg_overlay_sdc_deselect(void *private);
++#endif
++int prp_still_select(void *private);
++int prp_still_deselect(void *private);
++
++#endif
+diff -Nur linux-3.14.36/drivers/media/platform/mxc/capture/ipu_prp_vf_sdc_bg.c linux-openelec/drivers/media/platform/mxc/capture/ipu_prp_vf_sdc_bg.c
+--- linux-3.14.36/drivers/media/platform/mxc/capture/ipu_prp_vf_sdc_bg.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/media/platform/mxc/capture/ipu_prp_vf_sdc_bg.c 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,521 @@
++/*
++ * Copyright 2004-2014 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/*!
++ * @file ipu_prp_vf_sdc_bg.c
++ *
++ * @brief IPU Use case for PRP-VF back-ground
++ *
++ * @ingroup IPU
++ */
++#include <linux/dma-mapping.h>
++#include <linux/fb.h>
++#include <linux/ipu.h>
++#include <linux/module.h>
++#include <mach/mipi_csi2.h>
++#include "mxc_v4l2_capture.h"
++#include "ipu_prp_sw.h"
++
++static int buffer_num;
++static int buffer_ready;
++static struct ipu_soc *disp_ipu;
++
++static void get_disp_ipu(cam_data *cam)
++{
++ if (cam->output > 2)
++ disp_ipu = ipu_get_soc(1); /* using DISP4 */
++ else
++ disp_ipu = ipu_get_soc(0);
++}
++
++/*
++ * Function definitions
++ */
++
++/*!
++ * SDC V-Sync callback function.
++ *
++ * @param irq int irq line
++ * @param dev_id void * device id
++ *
++ * @return status IRQ_HANDLED for handled
++ */
++static irqreturn_t prpvf_sdc_vsync_callback(int irq, void *dev_id)
++{
++ cam_data *cam = dev_id;
++ if (buffer_ready > 0) {
++ ipu_select_buffer(cam->ipu, MEM_ROT_VF_MEM,
++ IPU_OUTPUT_BUFFER, 0);
++ buffer_ready--;
++ }
++
++ return IRQ_HANDLED;
++}
++
++/*!
++ * VF EOF callback function.
++ *
++ * @param irq int irq line
++ * @param dev_id void * device id
++ *
++ * @return status IRQ_HANDLED for handled
++ */
++static irqreturn_t prpvf_vf_eof_callback(int irq, void *dev_id)
++{
++ cam_data *cam = dev_id;
++ pr_debug("buffer_ready %d buffer_num %d\n", buffer_ready, buffer_num);
++
++ ipu_select_buffer(cam->ipu, MEM_ROT_VF_MEM,
++ IPU_INPUT_BUFFER, buffer_num);
++ buffer_num = (buffer_num == 0) ? 1 : 0;
++ ipu_select_buffer(cam->ipu, CSI_PRP_VF_MEM,
++ IPU_OUTPUT_BUFFER, buffer_num);
++ buffer_ready++;
++ return IRQ_HANDLED;
++}
++
++/*!
++ * prpvf_start - start the vf task
++ *
++ * @param private cam_data * mxc v4l2 main structure
++ *
++ */
++static int prpvf_start(void *private)
++{
++ cam_data *cam = (cam_data *) private;
++ ipu_channel_params_t vf;
++ u32 format;
++ u32 offset;
++ u32 bpp, size = 3;
++ int err = 0;
++#ifdef CONFIG_MXC_MIPI_CSI2
++ void *mipi_csi2_info;
++ int ipu_id;
++ int csi_id;
++#endif
++
++ if (!cam) {
++ printk(KERN_ERR "private is NULL\n");
++ return -EIO;
++ }
++
++ if (cam->overlay_active == true) {
++ pr_debug("already start.\n");
++ return 0;
++ }
++
++ get_disp_ipu(cam);
++
++ format = cam->v4l2_fb.fmt.pixelformat;
++ if (cam->v4l2_fb.fmt.pixelformat == IPU_PIX_FMT_BGR24) {
++ bpp = 3, size = 3;
++ pr_info("BGR24\n");
++ } else if (cam->v4l2_fb.fmt.pixelformat == IPU_PIX_FMT_RGB565) {
++ bpp = 2, size = 2;
++ pr_info("RGB565\n");
++ } else if (cam->v4l2_fb.fmt.pixelformat == IPU_PIX_FMT_BGR32) {
++ bpp = 4, size = 4;
++ pr_info("BGR32\n");
++ } else {
++ printk(KERN_ERR
++ "unsupported fix format from the framebuffer.\n");
++ return -EINVAL;
++ }
++
++ offset = cam->v4l2_fb.fmt.bytesperline * cam->win.w.top +
++ size * cam->win.w.left;
++
++ if (cam->v4l2_fb.base == 0)
++ printk(KERN_ERR "invalid frame buffer address.\n");
++ else
++ offset += (u32) cam->v4l2_fb.base;
++
++ memset(&vf, 0, sizeof(ipu_channel_params_t));
++ ipu_csi_get_window_size(cam->ipu, &vf.csi_prp_vf_mem.in_width,
++ &vf.csi_prp_vf_mem.in_height, cam->csi);
++ vf.csi_prp_vf_mem.in_pixel_fmt = IPU_PIX_FMT_UYVY;
++ vf.csi_prp_vf_mem.out_width = cam->win.w.width;
++ vf.csi_prp_vf_mem.out_height = cam->win.w.height;
++ vf.csi_prp_vf_mem.csi = cam->csi;
++ if (cam->vf_rotation >= IPU_ROTATE_90_RIGHT) {
++ vf.csi_prp_vf_mem.out_width = cam->win.w.height;
++ vf.csi_prp_vf_mem.out_height = cam->win.w.width;
++ }
++ vf.csi_prp_vf_mem.out_pixel_fmt = format;
++ size = cam->win.w.width * cam->win.w.height * size;
++
++#ifdef CONFIG_MXC_MIPI_CSI2
++ mipi_csi2_info = mipi_csi2_get_info();
++
++ if (mipi_csi2_info) {
++ if (mipi_csi2_get_status(mipi_csi2_info)) {
++ ipu_id = mipi_csi2_get_bind_ipu(mipi_csi2_info);
++ csi_id = mipi_csi2_get_bind_csi(mipi_csi2_info);
++
++ if (cam->ipu == ipu_get_soc(ipu_id)
++ && cam->csi == csi_id) {
++ vf.csi_prp_vf_mem.mipi_en = true;
++ vf.csi_prp_vf_mem.mipi_vc =
++ mipi_csi2_get_virtual_channel(mipi_csi2_info);
++ vf.csi_prp_vf_mem.mipi_id =
++ mipi_csi2_get_datatype(mipi_csi2_info);
++
++ mipi_csi2_pixelclk_enable(mipi_csi2_info);
++ } else {
++ vf.csi_prp_vf_mem.mipi_en = false;
++ vf.csi_prp_vf_mem.mipi_vc = 0;
++ vf.csi_prp_vf_mem.mipi_id = 0;
++ }
++ } else {
++ vf.csi_prp_vf_mem.mipi_en = false;
++ vf.csi_prp_vf_mem.mipi_vc = 0;
++ vf.csi_prp_vf_mem.mipi_id = 0;
++ }
++ }
++#endif
++
++ err = ipu_init_channel(cam->ipu, CSI_PRP_VF_MEM, &vf);
++ if (err != 0)
++ goto out_4;
++
++ if (cam->vf_bufs_vaddr[0]) {
++ dma_free_coherent(0, cam->vf_bufs_size[0],
++ cam->vf_bufs_vaddr[0], cam->vf_bufs[0]);
++ }
++ if (cam->vf_bufs_vaddr[1]) {
++ dma_free_coherent(0, cam->vf_bufs_size[1],
++ cam->vf_bufs_vaddr[1], cam->vf_bufs[1]);
++ }
++ cam->vf_bufs_size[0] = PAGE_ALIGN(size);
++ cam->vf_bufs_vaddr[0] = (void *)dma_alloc_coherent(0,
++ cam->vf_bufs_size[0],
++ &cam->vf_bufs[0],
++ GFP_DMA |
++ GFP_KERNEL);
++ if (cam->vf_bufs_vaddr[0] == NULL) {
++ printk(KERN_ERR "Error to allocate vf buffer\n");
++ err = -ENOMEM;
++ goto out_3;
++ }
++ cam->vf_bufs_size[1] = PAGE_ALIGN(size);
++ cam->vf_bufs_vaddr[1] = (void *)dma_alloc_coherent(0,
++ cam->vf_bufs_size[1],
++ &cam->vf_bufs[1],
++ GFP_DMA |
++ GFP_KERNEL);
++ if (cam->vf_bufs_vaddr[1] == NULL) {
++ printk(KERN_ERR "Error to allocate vf buffer\n");
++ err = -ENOMEM;
++ goto out_3;
++ }
++
++ err = ipu_init_channel_buffer(cam->ipu, CSI_PRP_VF_MEM,
++ IPU_OUTPUT_BUFFER,
++ format, vf.csi_prp_vf_mem.out_width,
++ vf.csi_prp_vf_mem.out_height,
++ vf.csi_prp_vf_mem.out_width,
++ IPU_ROTATE_NONE,
++ cam->vf_bufs[0],
++ cam->vf_bufs[1],
++ 0, 0, 0);
++ if (err != 0) {
++ printk(KERN_ERR "Error initializing CSI_PRP_VF_MEM\n");
++ goto out_3;
++ }
++ err = ipu_init_channel(cam->ipu, MEM_ROT_VF_MEM, NULL);
++ if (err != 0) {
++ printk(KERN_ERR "Error MEM_ROT_VF_MEM channel\n");
++ goto out_3;
++ }
++
++ err = ipu_init_channel_buffer(cam->ipu, MEM_ROT_VF_MEM,
++ IPU_INPUT_BUFFER,
++ format, vf.csi_prp_vf_mem.out_width,
++ vf.csi_prp_vf_mem.out_height,
++ vf.csi_prp_vf_mem.out_width,
++ cam->vf_rotation,
++ cam->vf_bufs[0],
++ cam->vf_bufs[1],
++ 0, 0, 0);
++ if (err != 0) {
++ printk(KERN_ERR "Error MEM_ROT_VF_MEM input buffer\n");
++ goto out_2;
++ }
++
++ if (cam->vf_rotation >= IPU_ROTATE_90_RIGHT) {
++ err = ipu_init_channel_buffer(cam->ipu, MEM_ROT_VF_MEM,
++ IPU_OUTPUT_BUFFER,
++ format,
++ vf.csi_prp_vf_mem.out_height,
++ vf.csi_prp_vf_mem.out_width,
++ cam->overlay_fb->var.xres * bpp,
++ IPU_ROTATE_NONE,
++ offset, 0, 0, 0, 0);
++
++ if (err != 0) {
++ printk(KERN_ERR "Error MEM_ROT_VF_MEM output buffer\n");
++ goto out_2;
++ }
++ } else {
++ err = ipu_init_channel_buffer(cam->ipu, MEM_ROT_VF_MEM,
++ IPU_OUTPUT_BUFFER,
++ format,
++ vf.csi_prp_vf_mem.out_width,
++ vf.csi_prp_vf_mem.out_height,
++ cam->overlay_fb->var.xres * bpp,
++ IPU_ROTATE_NONE,
++ offset, 0, 0, 0, 0);
++ if (err != 0) {
++ printk(KERN_ERR "Error MEM_ROT_VF_MEM output buffer\n");
++ goto out_2;
++ }
++ }
++
++ ipu_clear_irq(cam->ipu, IPU_IRQ_PRP_VF_OUT_EOF);
++ err = ipu_request_irq(cam->ipu, IPU_IRQ_PRP_VF_OUT_EOF,
++ prpvf_vf_eof_callback,
++ 0, "Mxc Camera", cam);
++ if (err != 0) {
++ printk(KERN_ERR
++ "Error registering IPU_IRQ_PRP_VF_OUT_EOF irq.\n");
++ goto out_2;
++ }
++
++ ipu_clear_irq(disp_ipu, IPU_IRQ_BG_SF_END);
++ err = ipu_request_irq(disp_ipu, IPU_IRQ_BG_SF_END,
++ prpvf_sdc_vsync_callback,
++ 0, "Mxc Camera", cam);
++ if (err != 0) {
++ printk(KERN_ERR "Error registering IPU_IRQ_BG_SF_END irq.\n");
++ goto out_1;
++ }
++
++ ipu_enable_channel(cam->ipu, CSI_PRP_VF_MEM);
++ ipu_enable_channel(cam->ipu, MEM_ROT_VF_MEM);
++
++ buffer_num = 0;
++ buffer_ready = 0;
++ ipu_select_buffer(cam->ipu, CSI_PRP_VF_MEM, IPU_OUTPUT_BUFFER, 0);
++
++ cam->overlay_active = true;
++ return err;
++
++out_1:
++ ipu_free_irq(cam->ipu, IPU_IRQ_PRP_VF_OUT_EOF, NULL);
++out_2:
++ ipu_uninit_channel(cam->ipu, MEM_ROT_VF_MEM);
++out_3:
++ ipu_uninit_channel(cam->ipu, CSI_PRP_VF_MEM);
++out_4:
++ if (cam->vf_bufs_vaddr[0]) {
++ dma_free_coherent(0, cam->vf_bufs_size[0],
++ cam->vf_bufs_vaddr[0], cam->vf_bufs[0]);
++ cam->vf_bufs_vaddr[0] = NULL;
++ cam->vf_bufs[0] = 0;
++ }
++ if (cam->vf_bufs_vaddr[1]) {
++ dma_free_coherent(0, cam->vf_bufs_size[1],
++ cam->vf_bufs_vaddr[1], cam->vf_bufs[1]);
++ cam->vf_bufs_vaddr[1] = NULL;
++ cam->vf_bufs[1] = 0;
++ }
++ if (cam->rot_vf_bufs_vaddr[0]) {
++ dma_free_coherent(0, cam->rot_vf_buf_size[0],
++ cam->rot_vf_bufs_vaddr[0],
++ cam->rot_vf_bufs[0]);
++ cam->rot_vf_bufs_vaddr[0] = NULL;
++ cam->rot_vf_bufs[0] = 0;
++ }
++ if (cam->rot_vf_bufs_vaddr[1]) {
++ dma_free_coherent(0, cam->rot_vf_buf_size[1],
++ cam->rot_vf_bufs_vaddr[1],
++ cam->rot_vf_bufs[1]);
++ cam->rot_vf_bufs_vaddr[1] = NULL;
++ cam->rot_vf_bufs[1] = 0;
++ }
++ return err;
++}
++
++/*!
++ * prpvf_stop - stop the vf task
++ *
++ * @param private cam_data * mxc v4l2 main structure
++ *
++ */
++static int prpvf_stop(void *private)
++{
++ cam_data *cam = (cam_data *) private;
++#ifdef CONFIG_MXC_MIPI_CSI2
++ void *mipi_csi2_info;
++ int ipu_id;
++ int csi_id;
++#endif
++
++ if (cam->overlay_active == false)
++ return 0;
++
++ ipu_free_irq(disp_ipu, IPU_IRQ_BG_SF_END, cam);
++
++ ipu_disable_channel(cam->ipu, CSI_PRP_VF_MEM, true);
++ ipu_disable_channel(cam->ipu, MEM_ROT_VF_MEM, true);
++ ipu_uninit_channel(cam->ipu, CSI_PRP_VF_MEM);
++ ipu_uninit_channel(cam->ipu, MEM_ROT_VF_MEM);
++
++#ifdef CONFIG_MXC_MIPI_CSI2
++ mipi_csi2_info = mipi_csi2_get_info();
++
++ if (mipi_csi2_info) {
++ if (mipi_csi2_get_status(mipi_csi2_info)) {
++ ipu_id = mipi_csi2_get_bind_ipu(mipi_csi2_info);
++ csi_id = mipi_csi2_get_bind_csi(mipi_csi2_info);
++
++ if (cam->ipu == ipu_get_soc(ipu_id)
++ && cam->csi == csi_id)
++ mipi_csi2_pixelclk_disable(mipi_csi2_info);
++ }
++ }
++#endif
++
++ if (cam->vf_bufs_vaddr[0]) {
++ dma_free_coherent(0, cam->vf_bufs_size[0],
++ cam->vf_bufs_vaddr[0], cam->vf_bufs[0]);
++ cam->vf_bufs_vaddr[0] = NULL;
++ cam->vf_bufs[0] = 0;
++ }
++ if (cam->vf_bufs_vaddr[1]) {
++ dma_free_coherent(0, cam->vf_bufs_size[1],
++ cam->vf_bufs_vaddr[1], cam->vf_bufs[1]);
++ cam->vf_bufs_vaddr[1] = NULL;
++ cam->vf_bufs[1] = 0;
++ }
++ if (cam->rot_vf_bufs_vaddr[0]) {
++ dma_free_coherent(0, cam->rot_vf_buf_size[0],
++ cam->rot_vf_bufs_vaddr[0],
++ cam->rot_vf_bufs[0]);
++ cam->rot_vf_bufs_vaddr[0] = NULL;
++ cam->rot_vf_bufs[0] = 0;
++ }
++ if (cam->rot_vf_bufs_vaddr[1]) {
++ dma_free_coherent(0, cam->rot_vf_buf_size[1],
++ cam->rot_vf_bufs_vaddr[1],
++ cam->rot_vf_bufs[1]);
++ cam->rot_vf_bufs_vaddr[1] = NULL;
++ cam->rot_vf_bufs[1] = 0;
++ }
++
++ buffer_num = 0;
++ buffer_ready = 0;
++ cam->overlay_active = false;
++ return 0;
++}
++
++/*!
++ * Enable csi
++ * @param private struct cam_data * mxc capture instance
++ *
++ * @return status
++ */
++static int prp_vf_enable_csi(void *private)
++{
++ cam_data *cam = (cam_data *) private;
++
++ return ipu_enable_csi(cam->ipu, cam->csi);
++}
++
++/*!
++ * Disable csi
++ * @param private struct cam_data * mxc capture instance
++ *
++ * @return status
++ */
++static int prp_vf_disable_csi(void *private)
++{
++ cam_data *cam = (cam_data *) private;
++
++ /* free csi eof irq firstly.
++ * when disable csi, wait for idmac eof.
++ * it requests eof irq again */
++ ipu_free_irq(cam->ipu, IPU_IRQ_PRP_VF_OUT_EOF, cam);
++
++ return ipu_disable_csi(cam->ipu, cam->csi);
++}
++
++/*!
++ * function to select PRP-VF as the working path
++ *
++ * @param private cam_data * mxc v4l2 main structure
++ *
++ * @return status
++ */
++int prp_vf_sdc_select_bg(void *private)
++{
++ cam_data *cam = (cam_data *) private;
++
++ if (cam) {
++ cam->vf_start_sdc = prpvf_start;
++ cam->vf_stop_sdc = prpvf_stop;
++ cam->vf_enable_csi = prp_vf_enable_csi;
++ cam->vf_disable_csi = prp_vf_disable_csi;
++ cam->overlay_active = false;
++ }
++
++ return 0;
++}
++EXPORT_SYMBOL(prp_vf_sdc_select_bg);
++
++/*!
++ * function to de-select PRP-VF as the working path
++ *
++ * @param private cam_data * mxc v4l2 main structure
++ *
++ * @return status
++ */
++int prp_vf_sdc_deselect_bg(void *private)
++{
++ cam_data *cam = (cam_data *) private;
++
++ if (cam) {
++ cam->vf_start_sdc = NULL;
++ cam->vf_stop_sdc = NULL;
++ cam->vf_enable_csi = NULL;
++ cam->vf_disable_csi = NULL;
++ }
++ return 0;
++}
++EXPORT_SYMBOL(prp_vf_sdc_deselect_bg);
++
++/*!
++ * Init viewfinder task.
++ *
++ * @return Error code indicating success or failure
++ */
++__init int prp_vf_sdc_init_bg(void)
++{
++ return 0;
++}
++
++/*!
++ * Deinit viewfinder task.
++ *
++ * @return Error code indicating success or failure
++ */
++void __exit prp_vf_sdc_exit_bg(void)
++{
++}
++
++module_init(prp_vf_sdc_init_bg);
++module_exit(prp_vf_sdc_exit_bg);
++
++MODULE_AUTHOR("Freescale Semiconductor, Inc.");
++MODULE_DESCRIPTION("IPU PRP VF SDC Backgroud Driver");
++MODULE_LICENSE("GPL");
+diff -Nur linux-3.14.36/drivers/media/platform/mxc/capture/ipu_prp_vf_sdc.c linux-openelec/drivers/media/platform/mxc/capture/ipu_prp_vf_sdc.c
+--- linux-3.14.36/drivers/media/platform/mxc/capture/ipu_prp_vf_sdc.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/media/platform/mxc/capture/ipu_prp_vf_sdc.c 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,582 @@
++/*
++ * Copyright 2004-2014 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++/* * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/*!
++ * @file ipu_prp_vf_sdc.c
++ *
++ * @brief IPU Use case for PRP-VF
++ *
++ * @ingroup IPU
++ */
++
++#include <linux/dma-mapping.h>
++#include <linux/console.h>
++#include <linux/ipu.h>
++#include <linux/module.h>
++#include <linux/mxcfb.h>
++#include <mach/hardware.h>
++#include <mach/mipi_csi2.h>
++#include "mxc_v4l2_capture.h"
++#include "ipu_prp_sw.h"
++
++static int buffer_num;
++static struct ipu_soc *disp_ipu;
++
++static void get_disp_ipu(cam_data *cam)
++{
++ if (cam->output > 2)
++ disp_ipu = ipu_get_soc(1); /* using DISP4 */
++ else
++ disp_ipu = ipu_get_soc(0);
++}
++
++static irqreturn_t prpvf_rot_eof_callback(int irq, void *dev_id)
++{
++ cam_data *cam = dev_id;
++ pr_debug("buffer_num %d\n", buffer_num);
++
++ if (cam->vf_rotation >= IPU_ROTATE_VERT_FLIP) {
++ ipu_select_buffer(disp_ipu, MEM_FG_SYNC,
++ IPU_INPUT_BUFFER, buffer_num);
++ buffer_num = (buffer_num == 0) ? 1 : 0;
++ ipu_select_buffer(cam->ipu, MEM_ROT_VF_MEM,
++ IPU_OUTPUT_BUFFER, buffer_num);
++ } else {
++ ipu_select_buffer(disp_ipu, MEM_FG_SYNC,
++ IPU_INPUT_BUFFER, buffer_num);
++ buffer_num = (buffer_num == 0) ? 1 : 0;
++ ipu_select_buffer(cam->ipu, CSI_PRP_VF_MEM,
++ IPU_OUTPUT_BUFFER, buffer_num);
++ }
++ return IRQ_HANDLED;
++}
++/*
++ * Function definitions
++ */
++
++/*!
++ * prpvf_start - start the vf task
++ *
++ * @param private cam_data * mxc v4l2 main structure
++ *
++ */
++static int prpvf_start(void *private)
++{
++ struct fb_var_screeninfo fbvar;
++ struct fb_info *fbi = NULL;
++ cam_data *cam = (cam_data *) private;
++ ipu_channel_params_t vf;
++ u32 vf_out_format = 0;
++ u32 size = 2, temp = 0;
++ int err = 0, i = 0;
++ short *tmp, color;
++#ifdef CONFIG_MXC_MIPI_CSI2
++ void *mipi_csi2_info;
++ int ipu_id;
++ int csi_id;
++#endif
++
++ if (!cam) {
++ printk(KERN_ERR "private is NULL\n");
++ return -EIO;
++ }
++
++ if (cam->overlay_active == true) {
++ pr_debug("already started.\n");
++ return 0;
++ }
++
++ get_disp_ipu(cam);
++
++ for (i = 0; i < num_registered_fb; i++) {
++ char *idstr = registered_fb[i]->fix.id;
++ if (((strcmp(idstr, "DISP3 FG") == 0) && (cam->output < 3)) ||
++ ((strcmp(idstr, "DISP4 FG") == 0) && (cam->output >= 3))) {
++ fbi = registered_fb[i];
++ break;
++ }
++ }
++
++ if (fbi == NULL) {
++ printk(KERN_ERR "DISP FG fb not found\n");
++ return -EPERM;
++ }
++
++ fbvar = fbi->var;
++
++ /* Store the overlay frame buffer's original std */
++ cam->fb_origin_std = fbvar.nonstd;
++
++ if (cam->devtype == IMX5_V4L2 || cam->devtype == IMX6_V4L2) {
++ /* Use DP to do CSC so that we can get better performance */
++ vf_out_format = IPU_PIX_FMT_UYVY;
++ fbvar.nonstd = vf_out_format;
++ color = 0x80;
++ } else {
++ vf_out_format = IPU_PIX_FMT_RGB565;
++ fbvar.nonstd = 0;
++ color = 0x0;
++ }
++
++ fbvar.bits_per_pixel = 16;
++ fbvar.xres = fbvar.xres_virtual = cam->win.w.width;
++ fbvar.yres = cam->win.w.height;
++ fbvar.yres_virtual = cam->win.w.height * 2;
++ fbvar.yoffset = 0;
++ fbvar.accel_flags = FB_ACCEL_DOUBLE_FLAG;
++ fbvar.activate |= FB_ACTIVATE_FORCE;
++ fb_set_var(fbi, &fbvar);
++
++ ipu_disp_set_window_pos(disp_ipu, MEM_FG_SYNC, cam->win.w.left,
++ cam->win.w.top);
++
++ /* Fill black color for framebuffer */
++ tmp = (short *) fbi->screen_base;
++ for (i = 0; i < (fbi->fix.line_length * fbi->var.yres)/2;
++ i++, tmp++)
++ *tmp = color;
++
++ console_lock();
++ fb_blank(fbi, FB_BLANK_UNBLANK);
++ console_unlock();
++
++ /* correct display ch buffer address */
++ ipu_update_channel_buffer(disp_ipu, MEM_FG_SYNC, IPU_INPUT_BUFFER,
++ 0, fbi->fix.smem_start +
++ (fbi->fix.line_length * fbvar.yres));
++ ipu_update_channel_buffer(disp_ipu, MEM_FG_SYNC, IPU_INPUT_BUFFER,
++ 1, fbi->fix.smem_start);
++
++ memset(&vf, 0, sizeof(ipu_channel_params_t));
++ ipu_csi_get_window_size(cam->ipu, &vf.csi_prp_vf_mem.in_width,
++ &vf.csi_prp_vf_mem.in_height, cam->csi);
++ vf.csi_prp_vf_mem.in_pixel_fmt = IPU_PIX_FMT_UYVY;
++ vf.csi_prp_vf_mem.out_width = cam->win.w.width;
++ vf.csi_prp_vf_mem.out_height = cam->win.w.height;
++ vf.csi_prp_vf_mem.csi = cam->csi;
++ if (cam->vf_rotation >= IPU_ROTATE_90_RIGHT) {
++ vf.csi_prp_vf_mem.out_width = cam->win.w.height;
++ vf.csi_prp_vf_mem.out_height = cam->win.w.width;
++ }
++ vf.csi_prp_vf_mem.out_pixel_fmt = vf_out_format;
++ size = cam->win.w.width * cam->win.w.height * size;
++
++#ifdef CONFIG_MXC_MIPI_CSI2
++ mipi_csi2_info = mipi_csi2_get_info();
++
++ if (mipi_csi2_info) {
++ if (mipi_csi2_get_status(mipi_csi2_info)) {
++ ipu_id = mipi_csi2_get_bind_ipu(mipi_csi2_info);
++ csi_id = mipi_csi2_get_bind_csi(mipi_csi2_info);
++
++ if (cam->ipu == ipu_get_soc(ipu_id)
++ && cam->csi == csi_id) {
++ vf.csi_prp_vf_mem.mipi_en = true;
++ vf.csi_prp_vf_mem.mipi_vc =
++ mipi_csi2_get_virtual_channel(mipi_csi2_info);
++ vf.csi_prp_vf_mem.mipi_id =
++ mipi_csi2_get_datatype(mipi_csi2_info);
++
++ mipi_csi2_pixelclk_enable(mipi_csi2_info);
++ } else {
++ vf.csi_prp_vf_mem.mipi_en = false;
++ vf.csi_prp_vf_mem.mipi_vc = 0;
++ vf.csi_prp_vf_mem.mipi_id = 0;
++ }
++ } else {
++ vf.csi_prp_vf_mem.mipi_en = false;
++ vf.csi_prp_vf_mem.mipi_vc = 0;
++ vf.csi_prp_vf_mem.mipi_id = 0;
++ }
++ }
++#endif
++
++ err = ipu_init_channel(cam->ipu, CSI_PRP_VF_MEM, &vf);
++ if (err != 0)
++ goto out_5;
++
++ if (cam->vf_bufs_vaddr[0]) {
++ dma_free_coherent(0, cam->vf_bufs_size[0],
++ cam->vf_bufs_vaddr[0],
++ (dma_addr_t) cam->vf_bufs[0]);
++ }
++ if (cam->vf_bufs_vaddr[1]) {
++ dma_free_coherent(0, cam->vf_bufs_size[1],
++ cam->vf_bufs_vaddr[1],
++ (dma_addr_t) cam->vf_bufs[1]);
++ }
++ cam->vf_bufs_size[0] = PAGE_ALIGN(size);
++ cam->vf_bufs_vaddr[0] = (void *)dma_alloc_coherent(0,
++ cam->vf_bufs_size[0],
++ (dma_addr_t *) &
++ cam->vf_bufs[0],
++ GFP_DMA |
++ GFP_KERNEL);
++ if (cam->vf_bufs_vaddr[0] == NULL) {
++ printk(KERN_ERR "Error to allocate vf buffer\n");
++ err = -ENOMEM;
++ goto out_4;
++ }
++ cam->vf_bufs_size[1] = PAGE_ALIGN(size);
++ cam->vf_bufs_vaddr[1] = (void *)dma_alloc_coherent(0,
++ cam->vf_bufs_size[1],
++ (dma_addr_t *) &
++ cam->vf_bufs[1],
++ GFP_DMA |
++ GFP_KERNEL);
++ if (cam->vf_bufs_vaddr[1] == NULL) {
++ printk(KERN_ERR "Error to allocate vf buffer\n");
++ err = -ENOMEM;
++ goto out_3;
++ }
++ pr_debug("vf_bufs %x %x\n", cam->vf_bufs[0], cam->vf_bufs[1]);
++
++ if (cam->vf_rotation >= IPU_ROTATE_VERT_FLIP) {
++ err = ipu_init_channel_buffer(cam->ipu, CSI_PRP_VF_MEM,
++ IPU_OUTPUT_BUFFER,
++ vf_out_format,
++ vf.csi_prp_vf_mem.out_width,
++ vf.csi_prp_vf_mem.out_height,
++ vf.csi_prp_vf_mem.out_width,
++ IPU_ROTATE_NONE,
++ cam->vf_bufs[0], cam->vf_bufs[1],
++ 0, 0, 0);
++ if (err != 0)
++ goto out_3;
++
++ err = ipu_init_channel(cam->ipu, MEM_ROT_VF_MEM, NULL);
++ if (err != 0) {
++ printk(KERN_ERR "Error MEM_ROT_VF_MEM channel\n");
++ goto out_3;
++ }
++
++ err = ipu_init_channel_buffer(cam->ipu, MEM_ROT_VF_MEM,
++ IPU_INPUT_BUFFER,
++ vf_out_format,
++ vf.csi_prp_vf_mem.out_width,
++ vf.csi_prp_vf_mem.out_height,
++ vf.csi_prp_vf_mem.out_width,
++ cam->vf_rotation,
++ cam->vf_bufs[0],
++ cam->vf_bufs[1],
++ 0, 0, 0);
++ if (err != 0) {
++ printk(KERN_ERR "Error MEM_ROT_VF_MEM input buffer\n");
++ goto out_2;
++ }
++
++ if (cam->vf_rotation < IPU_ROTATE_90_RIGHT) {
++ temp = vf.csi_prp_vf_mem.out_width;
++ vf.csi_prp_vf_mem.out_width =
++ vf.csi_prp_vf_mem.out_height;
++ vf.csi_prp_vf_mem.out_height = temp;
++ }
++
++ err = ipu_init_channel_buffer(cam->ipu, MEM_ROT_VF_MEM,
++ IPU_OUTPUT_BUFFER,
++ vf_out_format,
++ vf.csi_prp_vf_mem.out_height,
++ vf.csi_prp_vf_mem.out_width,
++ vf.csi_prp_vf_mem.out_height,
++ IPU_ROTATE_NONE,
++ fbi->fix.smem_start +
++ (fbi->fix.line_length *
++ fbi->var.yres),
++ fbi->fix.smem_start, 0, 0, 0);
++
++ if (err != 0) {
++ printk(KERN_ERR "Error MEM_ROT_VF_MEM output buffer\n");
++ goto out_2;
++ }
++
++ ipu_clear_irq(cam->ipu, IPU_IRQ_PRP_VF_ROT_OUT_EOF);
++ err = ipu_request_irq(cam->ipu, IPU_IRQ_PRP_VF_ROT_OUT_EOF,
++ prpvf_rot_eof_callback,
++ 0, "Mxc Camera", cam);
++ if (err < 0) {
++ printk(KERN_ERR "Error request irq:IPU_IRQ_PRP_VF_ROT_OUT_EOF\n");
++ goto out_2;
++ }
++
++ err = ipu_link_channels(cam->ipu,
++ CSI_PRP_VF_MEM, MEM_ROT_VF_MEM);
++ if (err < 0) {
++ printk(KERN_ERR
++ "Error link CSI_PRP_VF_MEM-MEM_ROT_VF_MEM\n");
++ goto out_1;
++ }
++
++ ipu_enable_channel(cam->ipu, CSI_PRP_VF_MEM);
++ ipu_enable_channel(cam->ipu, MEM_ROT_VF_MEM);
++
++ ipu_select_buffer(cam->ipu, CSI_PRP_VF_MEM,
++ IPU_OUTPUT_BUFFER, 0);
++ ipu_select_buffer(cam->ipu, CSI_PRP_VF_MEM,
++ IPU_OUTPUT_BUFFER, 1);
++ ipu_select_buffer(cam->ipu, MEM_ROT_VF_MEM,
++ IPU_OUTPUT_BUFFER, 0);
++ } else {
++ err = ipu_init_channel_buffer(cam->ipu, CSI_PRP_VF_MEM,
++ IPU_OUTPUT_BUFFER,
++ vf_out_format, cam->win.w.width,
++ cam->win.w.height,
++ cam->win.w.width,
++ cam->vf_rotation,
++ fbi->fix.smem_start +
++ (fbi->fix.line_length *
++ fbi->var.yres),
++ fbi->fix.smem_start, 0, 0, 0);
++ if (err != 0) {
++ printk(KERN_ERR "Error initializing CSI_PRP_VF_MEM\n");
++ goto out_4;
++ }
++ ipu_clear_irq(cam->ipu, IPU_IRQ_PRP_VF_OUT_EOF);
++ err = ipu_request_irq(cam->ipu, IPU_IRQ_PRP_VF_OUT_EOF,
++ prpvf_rot_eof_callback,
++ 0, "Mxc Camera", cam);
++ if (err < 0) {
++ printk(KERN_ERR "Error request irq:IPU_IRQ_PRP_VF_OUT_EOF\n");
++ goto out_4;
++ }
++
++ ipu_enable_channel(cam->ipu, CSI_PRP_VF_MEM);
++
++ ipu_select_buffer(cam->ipu, CSI_PRP_VF_MEM,
++ IPU_OUTPUT_BUFFER, 0);
++ }
++
++ cam->overlay_active = true;
++ return err;
++
++out_1:
++ ipu_free_irq(cam->ipu, IPU_IRQ_PRP_VF_OUT_EOF, NULL);
++out_2:
++ if (cam->vf_rotation >= IPU_ROTATE_VERT_FLIP)
++ ipu_uninit_channel(cam->ipu, MEM_ROT_VF_MEM);
++out_3:
++ if (cam->vf_bufs_vaddr[0]) {
++ dma_free_coherent(0, cam->vf_bufs_size[0],
++ cam->vf_bufs_vaddr[0],
++ (dma_addr_t) cam->vf_bufs[0]);
++ cam->vf_bufs_vaddr[0] = NULL;
++ cam->vf_bufs[0] = 0;
++ }
++ if (cam->vf_bufs_vaddr[1]) {
++ dma_free_coherent(0, cam->vf_bufs_size[1],
++ cam->vf_bufs_vaddr[1],
++ (dma_addr_t) cam->vf_bufs[1]);
++ cam->vf_bufs_vaddr[1] = NULL;
++ cam->vf_bufs[1] = 0;
++ }
++out_4:
++ ipu_uninit_channel(cam->ipu, CSI_PRP_VF_MEM);
++out_5:
++ return err;
++}
++
++/*!
++ * prpvf_stop - stop the vf task
++ *
++ * @param private cam_data * mxc v4l2 main structure
++ *
++ */
++static int prpvf_stop(void *private)
++{
++ cam_data *cam = (cam_data *) private;
++ int err = 0, i = 0;
++ struct fb_info *fbi = NULL;
++ struct fb_var_screeninfo fbvar;
++#ifdef CONFIG_MXC_MIPI_CSI2
++ void *mipi_csi2_info;
++ int ipu_id;
++ int csi_id;
++#endif
++
++ if (cam->overlay_active == false)
++ return 0;
++
++ for (i = 0; i < num_registered_fb; i++) {
++ char *idstr = registered_fb[i]->fix.id;
++ if (((strcmp(idstr, "DISP3 FG") == 0) && (cam->output < 3)) ||
++ ((strcmp(idstr, "DISP4 FG") == 0) && (cam->output >= 3))) {
++ fbi = registered_fb[i];
++ break;
++ }
++ }
++
++ if (fbi == NULL) {
++ printk(KERN_ERR "DISP FG fb not found\n");
++ return -EPERM;
++ }
++
++ if (cam->vf_rotation >= IPU_ROTATE_VERT_FLIP) {
++ ipu_unlink_channels(cam->ipu, CSI_PRP_VF_MEM, MEM_ROT_VF_MEM);
++ ipu_free_irq(cam->ipu, IPU_IRQ_PRP_VF_ROT_OUT_EOF, cam);
++ }
++ buffer_num = 0;
++
++ ipu_disable_channel(cam->ipu, CSI_PRP_VF_MEM, true);
++
++ if (cam->vf_rotation >= IPU_ROTATE_VERT_FLIP) {
++ ipu_disable_channel(cam->ipu, MEM_ROT_VF_MEM, true);
++ ipu_uninit_channel(cam->ipu, MEM_ROT_VF_MEM);
++ }
++ ipu_uninit_channel(cam->ipu, CSI_PRP_VF_MEM);
++
++ console_lock();
++ fb_blank(fbi, FB_BLANK_POWERDOWN);
++ console_unlock();
++
++ /* Set the overlay frame buffer std to what it is used to be */
++ fbvar = fbi->var;
++ fbvar.accel_flags = FB_ACCEL_TRIPLE_FLAG;
++ fbvar.nonstd = cam->fb_origin_std;
++ fbvar.activate |= FB_ACTIVATE_FORCE;
++ fb_set_var(fbi, &fbvar);
++
++#ifdef CONFIG_MXC_MIPI_CSI2
++ mipi_csi2_info = mipi_csi2_get_info();
++
++ if (mipi_csi2_info) {
++ if (mipi_csi2_get_status(mipi_csi2_info)) {
++ ipu_id = mipi_csi2_get_bind_ipu(mipi_csi2_info);
++ csi_id = mipi_csi2_get_bind_csi(mipi_csi2_info);
++
++ if (cam->ipu == ipu_get_soc(ipu_id)
++ && cam->csi == csi_id)
++ mipi_csi2_pixelclk_disable(mipi_csi2_info);
++ }
++ }
++#endif
++
++ if (cam->vf_bufs_vaddr[0]) {
++ dma_free_coherent(0, cam->vf_bufs_size[0],
++ cam->vf_bufs_vaddr[0],
++ (dma_addr_t) cam->vf_bufs[0]);
++ cam->vf_bufs_vaddr[0] = NULL;
++ cam->vf_bufs[0] = 0;
++ }
++ if (cam->vf_bufs_vaddr[1]) {
++ dma_free_coherent(0, cam->vf_bufs_size[1],
++ cam->vf_bufs_vaddr[1],
++ (dma_addr_t) cam->vf_bufs[1]);
++ cam->vf_bufs_vaddr[1] = NULL;
++ cam->vf_bufs[1] = 0;
++ }
++
++ cam->overlay_active = false;
++ return err;
++}
++
++/*!
++ * Enable csi
++ * @param private struct cam_data * mxc capture instance
++ *
++ * @return status
++ */
++static int prp_vf_enable_csi(void *private)
++{
++ cam_data *cam = (cam_data *) private;
++
++ return ipu_enable_csi(cam->ipu, cam->csi);
++}
++
++/*!
++ * Disable csi
++ * @param private struct cam_data * mxc capture instance
++ *
++ * @return status
++ */
++static int prp_vf_disable_csi(void *private)
++{
++ cam_data *cam = (cam_data *) private;
++
++ /* free csi eof irq firstly.
++ * when disable csi, wait for idmac eof.
++ * it requests eof irq again */
++ if (cam->vf_rotation < IPU_ROTATE_VERT_FLIP)
++ ipu_free_irq(cam->ipu, IPU_IRQ_PRP_VF_OUT_EOF, cam);
++
++ return ipu_disable_csi(cam->ipu, cam->csi);
++}
++
++/*!
++ * function to select PRP-VF as the working path
++ *
++ * @param private cam_data * mxc v4l2 main structure
++ *
++ * @return status
++ */
++int prp_vf_sdc_select(void *private)
++{
++ cam_data *cam;
++ int err = 0;
++ if (private) {
++ cam = (cam_data *) private;
++ cam->vf_start_sdc = prpvf_start;
++ cam->vf_stop_sdc = prpvf_stop;
++ cam->vf_enable_csi = prp_vf_enable_csi;
++ cam->vf_disable_csi = prp_vf_disable_csi;
++ cam->overlay_active = false;
++ } else
++ err = -EIO;
++
++ return err;
++}
++EXPORT_SYMBOL(prp_vf_sdc_select);
++
++/*!
++ * function to de-select PRP-VF as the working path
++ *
++ * @param private cam_data * mxc v4l2 main structure
++ *
++ * @return int
++ */
++int prp_vf_sdc_deselect(void *private)
++{
++ cam_data *cam;
++
++ if (private) {
++ cam = (cam_data *) private;
++ cam->vf_start_sdc = NULL;
++ cam->vf_stop_sdc = NULL;
++ cam->vf_enable_csi = NULL;
++ cam->vf_disable_csi = NULL;
++ }
++ return 0;
++}
++EXPORT_SYMBOL(prp_vf_sdc_deselect);
++
++/*!
++ * Init viewfinder task.
++ *
++ * @return Error code indicating success or failure
++ */
++__init int prp_vf_sdc_init(void)
++{
++ return 0;
++}
++
++/*!
++ * Deinit viewfinder task.
++ *
++ * @return Error code indicating success or failure
++ */
++void __exit prp_vf_sdc_exit(void)
++{
++}
++
++module_init(prp_vf_sdc_init);
++module_exit(prp_vf_sdc_exit);
++
++MODULE_AUTHOR("Freescale Semiconductor, Inc.");
++MODULE_DESCRIPTION("IPU PRP VF SDC Driver");
++MODULE_LICENSE("GPL");
+diff -Nur linux-3.14.36/drivers/media/platform/mxc/capture/ipu_still.c linux-openelec/drivers/media/platform/mxc/capture/ipu_still.c
+--- linux-3.14.36/drivers/media/platform/mxc/capture/ipu_still.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/media/platform/mxc/capture/ipu_still.c 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,268 @@
++/*
++ * Copyright 2004-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/*!
++ * @file ipu_still.c
++ *
++ * @brief IPU Use case for still image capture
++ *
++ * @ingroup IPU
++ */
++
++#include <linux/module.h>
++#include <linux/semaphore.h>
++#include <linux/sched.h>
++#include <linux/ipu.h>
++#include "mxc_v4l2_capture.h"
++#include "ipu_prp_sw.h"
++
++static int callback_eof_flag;
++#ifndef CONFIG_MXC_IPU_V1
++static int buffer_num;
++#endif
++
++#ifdef CONFIG_MXC_IPU_V1
++static int callback_flag;
++/*
++ * Function definitions
++ */
++/*!
++ * CSI EOF callback function.
++ *
++ * @param irq int irq line
++ * @param dev_id void * device id
++ *
++ * @return status IRQ_HANDLED for handled
++ */
++static irqreturn_t prp_csi_eof_callback(int irq, void *dev_id)
++{
++ cam_data *cam = devid;
++ ipu_select_buffer(cam->ipu, CSI_MEM, IPU_OUTPUT_BUFFER,
++ callback_flag%2 ? 1 : 0);
++ if (callback_flag == 0)
++ ipu_enable_channel(cam->ipu, CSI_MEM);
++
++ callback_flag++;
++ return IRQ_HANDLED;
++}
++#endif
++
++/*!
++ * CSI callback function.
++ *
++ * @param irq int irq line
++ * @param dev_id void * device id
++ *
++ * @return status IRQ_HANDLED for handled
++ */
++static irqreturn_t prp_still_callback(int irq, void *dev_id)
++{
++ cam_data *cam = (cam_data *) dev_id;
++
++ callback_eof_flag++;
++ if (callback_eof_flag < 5) {
++#ifndef CONFIG_MXC_IPU_V1
++ buffer_num = (buffer_num == 0) ? 1 : 0;
++ ipu_select_buffer(cam->ipu, CSI_MEM,
++ IPU_OUTPUT_BUFFER, buffer_num);
++#endif
++ } else {
++ cam->still_counter++;
++ wake_up_interruptible(&cam->still_queue);
++ }
++
++ return IRQ_HANDLED;
++}
++
++/*!
++ * start csi->mem task
++ * @param private struct cam_data * mxc capture instance
++ *
++ * @return status
++ */
++static int prp_still_start(void *private)
++{
++ cam_data *cam = (cam_data *) private;
++ u32 pixel_fmt;
++ int err;
++ ipu_channel_params_t params;
++
++ if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_YUV420)
++ pixel_fmt = IPU_PIX_FMT_YUV420P;
++ else if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_NV12)
++ pixel_fmt = IPU_PIX_FMT_NV12;
++ else if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_YUV422P)
++ pixel_fmt = IPU_PIX_FMT_YUV422P;
++ else if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_UYVY)
++ pixel_fmt = IPU_PIX_FMT_UYVY;
++ else if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_YUYV)
++ pixel_fmt = IPU_PIX_FMT_YUYV;
++ else if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_BGR24)
++ pixel_fmt = IPU_PIX_FMT_BGR24;
++ else if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_RGB24)
++ pixel_fmt = IPU_PIX_FMT_RGB24;
++ else if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_RGB565)
++ pixel_fmt = IPU_PIX_FMT_RGB565;
++ else if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_BGR32)
++ pixel_fmt = IPU_PIX_FMT_BGR32;
++ else if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_RGB32)
++ pixel_fmt = IPU_PIX_FMT_RGB32;
++ else {
++ printk(KERN_ERR "format not supported\n");
++ return -EINVAL;
++ }
++
++ memset(&params, 0, sizeof(params));
++ err = ipu_init_channel(cam->ipu, CSI_MEM, &params);
++ if (err != 0)
++ return err;
++
++ err = ipu_init_channel_buffer(cam->ipu, CSI_MEM, IPU_OUTPUT_BUFFER,
++ pixel_fmt, cam->v2f.fmt.pix.width,
++ cam->v2f.fmt.pix.height,
++ cam->v2f.fmt.pix.width, IPU_ROTATE_NONE,
++ cam->still_buf[0], cam->still_buf[1], 0,
++ 0, 0);
++ if (err != 0)
++ return err;
++
++#ifdef CONFIG_MXC_IPU_V1
++ ipu_clear_irq(IPU_IRQ_SENSOR_OUT_EOF);
++ err = ipu_request_irq(IPU_IRQ_SENSOR_OUT_EOF, prp_still_callback,
++ 0, "Mxc Camera", cam);
++ if (err != 0) {
++ printk(KERN_ERR "Error registering irq.\n");
++ return err;
++ }
++ callback_flag = 0;
++ callback_eof_flag = 0;
++ ipu_clear_irq(IPU_IRQ_SENSOR_EOF);
++ err = ipu_request_irq(IPU_IRQ_SENSOR_EOF, prp_csi_eof_callback,
++ 0, "Mxc Camera", cam);
++ if (err != 0) {
++ printk(KERN_ERR "Error IPU_IRQ_SENSOR_EOF\n");
++ return err;
++ }
++#else
++ callback_eof_flag = 0;
++ buffer_num = 0;
++
++ ipu_clear_irq(cam->ipu, IPU_IRQ_CSI0_OUT_EOF);
++ err = ipu_request_irq(cam->ipu, IPU_IRQ_CSI0_OUT_EOF,
++ prp_still_callback,
++ 0, "Mxc Camera", cam);
++ if (err != 0) {
++ printk(KERN_ERR "Error registering irq.\n");
++ return err;
++ }
++
++ ipu_select_buffer(cam->ipu, CSI_MEM, IPU_OUTPUT_BUFFER, 0);
++ ipu_enable_channel(cam->ipu, CSI_MEM);
++ ipu_enable_csi(cam->ipu, cam->csi);
++#endif
++
++ return err;
++}
++
++/*!
++ * stop csi->mem encoder task
++ * @param private struct cam_data * mxc capture instance
++ *
++ * @return status
++ */
++static int prp_still_stop(void *private)
++{
++ cam_data *cam = (cam_data *) private;
++ int err = 0;
++
++#ifdef CONFIG_MXC_IPU_V1
++ ipu_free_irq(IPU_IRQ_SENSOR_EOF, NULL);
++ ipu_free_irq(IPU_IRQ_SENSOR_OUT_EOF, cam);
++#else
++ ipu_free_irq(cam->ipu, IPU_IRQ_CSI0_OUT_EOF, cam);
++#endif
++
++ ipu_disable_csi(cam->ipu, cam->csi);
++ ipu_disable_channel(cam->ipu, CSI_MEM, true);
++ ipu_uninit_channel(cam->ipu, CSI_MEM);
++
++ return err;
++}
++
++/*!
++ * function to select CSI_MEM as the working path
++ *
++ * @param private struct cam_data * mxc capture instance
++ *
++ * @return status
++ */
++int prp_still_select(void *private)
++{
++ cam_data *cam = (cam_data *) private;
++
++ if (cam) {
++ cam->csi_start = prp_still_start;
++ cam->csi_stop = prp_still_stop;
++ }
++
++ return 0;
++}
++EXPORT_SYMBOL(prp_still_select);
++
++/*!
++ * function to de-select CSI_MEM as the working path
++ *
++ * @param private struct cam_data * mxc capture instance
++ *
++ * @return status
++ */
++int prp_still_deselect(void *private)
++{
++ cam_data *cam = (cam_data *) private;
++ int err = 0;
++
++ err = prp_still_stop(cam);
++
++ if (cam) {
++ cam->csi_start = NULL;
++ cam->csi_stop = NULL;
++ }
++
++ return err;
++}
++EXPORT_SYMBOL(prp_still_deselect);
++
++/*!
++ * Init the Encorder channels
++ *
++ * @return Error code indicating success or failure
++ */
++__init int prp_still_init(void)
++{
++ return 0;
++}
++
++/*!
++ * Deinit the Encorder channels
++ *
++ */
++void __exit prp_still_exit(void)
++{
++}
++
++module_init(prp_still_init);
++module_exit(prp_still_exit);
++
++MODULE_AUTHOR("Freescale Semiconductor, Inc.");
++MODULE_DESCRIPTION("IPU PRP STILL IMAGE Driver");
++MODULE_LICENSE("GPL");
+diff -Nur linux-3.14.36/drivers/media/platform/mxc/capture/Kconfig linux-openelec/drivers/media/platform/mxc/capture/Kconfig
+--- linux-3.14.36/drivers/media/platform/mxc/capture/Kconfig 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/media/platform/mxc/capture/Kconfig 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,86 @@
++if VIDEO_MXC_CAPTURE
++
++menu "MXC Camera/V4L2 PRP Features support"
++config VIDEO_MXC_IPU_CAMERA
++ bool
++ depends on VIDEO_MXC_CAPTURE && MXC_IPU
++ default y
++
++config VIDEO_MXC_CSI_CAMERA
++ tristate "CSI camera support"
++ depends on VIDEO_MXC_CAPTURE && VIDEO_V4L2
++ ---help---
++ This is the video4linux2 capture driver based on CSI module.
++
++config MXC_CAMERA_OV5640
++ tristate "OmniVision ov5640 camera support"
++ depends on !VIDEO_MXC_EMMA_CAMERA && I2C
++ ---help---
++ If you plan to use the ov5640 Camera with your MXC system, say Y here.
++
++config MXC_CAMERA_OV5642
++ tristate "OmniVision ov5642 camera support"
++ depends on !VIDEO_MXC_EMMA_CAMERA && I2C
++ ---help---
++ If you plan to use the ov5642 Camera with your MXC system, say Y here.
++
++config MXC_CAMERA_OV5640_MIPI
++ tristate "OmniVision ov5640 camera support using mipi"
++ depends on !VIDEO_MXC_EMMA_CAMERA && I2C
++ ---help---
++ If you plan to use the ov5640 Camera with mipi interface in your MXC system, say Y here.
++
++config MXC_TVIN_ADV7180
++ tristate "Analog Device adv7180 TV Decoder Input support"
++ depends on !VIDEO_MXC_EMMA_CAMERA && I2C
++ ---help---
++ If you plan to use the adv7180 video decoder with your MXC system, say Y here.
++
++choice
++ prompt "Select Overlay Rounting"
++ default MXC_IPU_DEVICE_QUEUE_SDC
++ depends on VIDEO_MXC_IPU_CAMERA && FB_MXC_SYNC_PANEL
++
++config MXC_IPU_DEVICE_QUEUE_SDC
++ tristate "Queue ipu device for overlay library"
++ depends on VIDEO_MXC_IPU_CAMERA
++ ---help---
++ Use case CSI->MEM->IPU DEVICE->SDC:
++ Images from sensor will be frist recieved in memory,then
++ queue to ipu device for processing if needed, and displaying
++ it on synchronous display with SDC use case.
++
++config MXC_IPU_PRP_VF_SDC
++ bool "Pre-Processor VF SDC library"
++ depends on VIDEO_MXC_IPU_CAMERA
++ ---help---
++ Use case PRP_VF_SDC:
++ Preprocessing image from smart sensor for viewfinder and
++ displaying it on synchronous display with SDC use case.
++ If SDC BG is selected, Rotation will not be supported.
++ CSI -> IC (PRP VF) -> MEM
++ MEM -> IC (ROT) -> MEM
++ MEM -> SDC (FG/BG)
++
++endchoice
++
++config MXC_IPU_PRP_ENC
++ tristate "Pre-processor Encoder library"
++ depends on VIDEO_MXC_IPU_CAMERA
++ default y
++ ---help---
++ Use case PRP_ENC:
++ Preprocessing image from smart sensor for encoder.
++ CSI -> IC (PRP ENC) -> MEM
++
++config MXC_IPU_CSI_ENC
++ tristate "IPU CSI Encoder library"
++ depends on VIDEO_MXC_IPU_CAMERA
++ default y
++ ---help---
++ Use case IPU_CSI_ENC:
++ Get raw image with CSI from smart sensor for encoder.
++ CSI -> MEM
++endmenu
++
++endif
+diff -Nur linux-3.14.36/drivers/media/platform/mxc/capture/Makefile linux-openelec/drivers/media/platform/mxc/capture/Makefile
+--- linux-3.14.36/drivers/media/platform/mxc/capture/Makefile 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/media/platform/mxc/capture/Makefile 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,21 @@
++obj-$(CONFIG_VIDEO_MXC_CSI_CAMERA) += fsl_csi.o csi_v4l2_capture.o
++
++ifeq ($(CONFIG_VIDEO_MXC_IPU_CAMERA),y)
++ obj-$(CONFIG_VIDEO_MXC_CAPTURE) += mxc_v4l2_capture.o
++ obj-$(CONFIG_MXC_IPU_PRP_VF_SDC) += ipu_prp_vf_sdc.o ipu_prp_vf_sdc_bg.o
++ obj-$(CONFIG_MXC_IPU_DEVICE_QUEUE_SDC) += ipu_fg_overlay_sdc.o ipu_bg_overlay_sdc.o
++ obj-$(CONFIG_MXC_IPU_PRP_ENC) += ipu_prp_enc.o ipu_still.o
++ obj-$(CONFIG_MXC_IPU_CSI_ENC) += ipu_csi_enc.o ipu_still.o
++endif
++
++ov5640_camera-objs := ov5640.o
++obj-$(CONFIG_MXC_CAMERA_OV5640) += ov5640_camera.o
++
++ov5642_camera-objs := ov5642.o
++obj-$(CONFIG_MXC_CAMERA_OV5642) += ov5642_camera.o
++
++ov5640_camera_mipi-objs := ov5640_mipi.o
++obj-$(CONFIG_MXC_CAMERA_OV5640_MIPI) += ov5640_camera_mipi.o
++
++adv7180_tvin-objs := adv7180.o
++obj-$(CONFIG_MXC_TVIN_ADV7180) += adv7180_tvin.o
+diff -Nur linux-3.14.36/drivers/media/platform/mxc/capture/mxc_v4l2_capture.c linux-openelec/drivers/media/platform/mxc/capture/mxc_v4l2_capture.c
+--- linux-3.14.36/drivers/media/platform/mxc/capture/mxc_v4l2_capture.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/media/platform/mxc/capture/mxc_v4l2_capture.c 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,3102 @@
++/*
++ * Copyright 2004-2014 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/*!
++ * @file drivers/media/video/mxc/capture/mxc_v4l2_capture.c
++ *
++ * @brief Mxc Video For Linux 2 driver
++ *
++ * @ingroup MXC_V4L2_CAPTURE
++ */
++#include <linux/version.h>
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/platform_device.h>
++#include <linux/fs.h>
++#include <linux/slab.h>
++#include <linux/ctype.h>
++#include <linux/clk.h>
++#include <linux/io.h>
++#include <linux/semaphore.h>
++#include <linux/pagemap.h>
++#include <linux/vmalloc.h>
++#include <linux/types.h>
++#include <linux/fb.h>
++#include <linux/dma-mapping.h>
++#include <linux/delay.h>
++#include <linux/mxcfb.h>
++#include <linux/of_device.h>
++#include <media/v4l2-chip-ident.h>
++#include <media/v4l2-ioctl.h>
++#include <media/v4l2-int-device.h>
++#include <linux/fsl_devices.h>
++#include "mxc_v4l2_capture.h"
++#include "ipu_prp_sw.h"
++
++#define init_MUTEX(sem) sema_init(sem, 1)
++
++static struct platform_device_id imx_v4l2_devtype[] = {
++ {
++ .name = "v4l2-capture-imx5",
++ .driver_data = IMX5_V4L2,
++ }, {
++ .name = "v4l2-capture-imx6",
++ .driver_data = IMX6_V4L2,
++ }, {
++ /* sentinel */
++ }
++};
++MODULE_DEVICE_TABLE(platform, imx_v4l2_devtype);
++
++static const struct of_device_id mxc_v4l2_dt_ids[] = {
++ {
++ .compatible = "fsl,imx6q-v4l2-capture",
++ .data = &imx_v4l2_devtype[IMX6_V4L2],
++ }, {
++ /* sentinel */
++ }
++};
++MODULE_DEVICE_TABLE(of, mxc_v4l2_dt_ids);
++
++static int video_nr = -1;
++
++/*! This data is used for the output to the display. */
++#define MXC_V4L2_CAPTURE_NUM_OUTPUTS 6
++#define MXC_V4L2_CAPTURE_NUM_INPUTS 2
++static struct v4l2_output mxc_capture_outputs[MXC_V4L2_CAPTURE_NUM_OUTPUTS] = {
++ {
++ .index = 0,
++ .name = "DISP3 BG",
++ .type = V4L2_OUTPUT_TYPE_ANALOG,
++ .audioset = 0,
++ .modulator = 0,
++ .std = V4L2_STD_UNKNOWN,
++ },
++ {
++ .index = 1,
++ .name = "DISP3 BG - DI1",
++ .type = V4L2_OUTPUT_TYPE_ANALOG,
++ .audioset = 0,
++ .modulator = 0,
++ .std = V4L2_STD_UNKNOWN,
++ },
++ {
++ .index = 2,
++ .name = "DISP3 FG",
++ .type = V4L2_OUTPUT_TYPE_ANALOG,
++ .audioset = 0,
++ .modulator = 0,
++ .std = V4L2_STD_UNKNOWN,
++ },
++ {
++ .index = 3,
++ .name = "DISP4 BG",
++ .type = V4L2_OUTPUT_TYPE_ANALOG,
++ .audioset = 0,
++ .modulator = 0,
++ .std = V4L2_STD_UNKNOWN,
++ },
++ {
++ .index = 4,
++ .name = "DISP4 BG - DI1",
++ .type = V4L2_OUTPUT_TYPE_ANALOG,
++ .audioset = 0,
++ .modulator = 0,
++ .std = V4L2_STD_UNKNOWN,
++ },
++ {
++ .index = 5,
++ .name = "DISP4 FG",
++ .type = V4L2_OUTPUT_TYPE_ANALOG,
++ .audioset = 0,
++ .modulator = 0,
++ .std = V4L2_STD_UNKNOWN,
++ },
++};
++
++static struct v4l2_input mxc_capture_inputs[MXC_V4L2_CAPTURE_NUM_INPUTS] = {
++ {
++ .index = 0,
++ .name = "CSI IC MEM",
++ .type = V4L2_INPUT_TYPE_CAMERA,
++ .audioset = 0,
++ .tuner = 0,
++ .std = V4L2_STD_UNKNOWN,
++ .status = 0,
++ },
++ {
++ .index = 1,
++ .name = "CSI MEM",
++ .type = V4L2_INPUT_TYPE_CAMERA,
++ .audioset = 0,
++ .tuner = 0,
++ .std = V4L2_STD_UNKNOWN,
++ .status = V4L2_IN_ST_NO_POWER,
++ },
++};
++
++/*! List of TV input video formats supported. The video formats is corresponding
++ * to the v4l2_id in video_fmt_t.
++ * Currently, only PAL and NTSC is supported. Needs to be expanded in the
++ * future.
++ */
++typedef enum {
++ TV_NTSC = 0, /*!< Locked on (M) NTSC video signal. */
++ TV_PAL, /*!< (B, G, H, I, N)PAL video signal. */
++ TV_NOT_LOCKED, /*!< Not locked on a signal. */
++} video_fmt_idx;
++
++/*! Number of video standards supported (including 'not locked' signal). */
++#define TV_STD_MAX (TV_NOT_LOCKED + 1)
++
++/*! Video format structure. */
++typedef struct {
++ int v4l2_id; /*!< Video for linux ID. */
++ char name[16]; /*!< Name (e.g., "NTSC", "PAL", etc.) */
++ u16 raw_width; /*!< Raw width. */
++ u16 raw_height; /*!< Raw height. */
++ u16 active_width; /*!< Active width. */
++ u16 active_height; /*!< Active height. */
++ u16 active_top; /*!< Active top. */
++ u16 active_left; /*!< Active left. */
++} video_fmt_t;
++
++/*!
++ * Description of video formats supported.
++ *
++ * PAL: raw=720x625, active=720x576.
++ * NTSC: raw=720x525, active=720x480.
++ */
++static video_fmt_t video_fmts[] = {
++ { /*! NTSC */
++ .v4l2_id = V4L2_STD_NTSC,
++ .name = "NTSC",
++ .raw_width = 720, /* SENS_FRM_WIDTH */
++ .raw_height = 525, /* SENS_FRM_HEIGHT */
++ .active_width = 720, /* ACT_FRM_WIDTH */
++ .active_height = 480, /* ACT_FRM_HEIGHT */
++ .active_top = 13,
++ .active_left = 0,
++ },
++ { /*! (B, G, H, I, N) PAL */
++ .v4l2_id = V4L2_STD_PAL,
++ .name = "PAL",
++ .raw_width = 720,
++ .raw_height = 625,
++ .active_width = 720,
++ .active_height = 576,
++ .active_top = 0,
++ .active_left = 0,
++ },
++ { /*! Unlocked standard */
++ .v4l2_id = V4L2_STD_ALL,
++ .name = "Autodetect",
++ .raw_width = 720,
++ .raw_height = 625,
++ .active_width = 720,
++ .active_height = 576,
++ .active_top = 0,
++ .active_left = 0,
++ },
++};
++
++/*!* Standard index of TV. */
++static video_fmt_idx video_index = TV_NOT_LOCKED;
++
++static int mxc_v4l2_master_attach(struct v4l2_int_device *slave);
++static void mxc_v4l2_master_detach(struct v4l2_int_device *slave);
++static int start_preview(cam_data *cam);
++static int stop_preview(cam_data *cam);
++
++/*! Information about this driver. */
++static struct v4l2_int_master mxc_v4l2_master = {
++ .attach = mxc_v4l2_master_attach,
++ .detach = mxc_v4l2_master_detach,
++};
++
++/***************************************************************************
++ * Functions for handling Frame buffers.
++ **************************************************************************/
++
++/*!
++ * Free frame buffers
++ *
++ * @param cam Structure cam_data *
++ *
++ * @return status 0 success.
++ */
++static int mxc_free_frame_buf(cam_data *cam)
++{
++ int i;
++
++ pr_debug("MVC: In mxc_free_frame_buf\n");
++
++ for (i = 0; i < FRAME_NUM; i++) {
++ if (cam->frame[i].vaddress != 0) {
++ dma_free_coherent(0, cam->frame[i].buffer.length,
++ cam->frame[i].vaddress,
++ cam->frame[i].paddress);
++ cam->frame[i].vaddress = 0;
++ }
++ }
++
++ return 0;
++}
++
++/*!
++ * Allocate frame buffers
++ *
++ * @param cam Structure cam_data*
++ * @param count int number of buffer need to allocated
++ *
++ * @return status -0 Successfully allocated a buffer, -ENOBUFS failed.
++ */
++static int mxc_allocate_frame_buf(cam_data *cam, int count)
++{
++ int i;
++
++ pr_debug("In MVC:mxc_allocate_frame_buf - size=%d\n",
++ cam->v2f.fmt.pix.sizeimage);
++
++ for (i = 0; i < count; i++) {
++ cam->frame[i].vaddress =
++ dma_alloc_coherent(0,
++ PAGE_ALIGN(cam->v2f.fmt.pix.sizeimage),
++ &cam->frame[i].paddress,
++ GFP_DMA | GFP_KERNEL);
++ if (cam->frame[i].vaddress == 0) {
++ pr_err("ERROR: v4l2 capture: "
++ "mxc_allocate_frame_buf failed.\n");
++ mxc_free_frame_buf(cam);
++ return -ENOBUFS;
++ }
++ cam->frame[i].buffer.index = i;
++ cam->frame[i].buffer.flags = V4L2_BUF_FLAG_MAPPED;
++ cam->frame[i].buffer.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
++ cam->frame[i].buffer.length =
++ PAGE_ALIGN(cam->v2f.fmt.pix.sizeimage);
++ cam->frame[i].buffer.memory = V4L2_MEMORY_MMAP;
++ cam->frame[i].buffer.m.offset = cam->frame[i].paddress;
++ cam->frame[i].index = i;
++ }
++
++ return 0;
++}
++
++/*!
++ * Free frame buffers status
++ *
++ * @param cam Structure cam_data *
++ *
++ * @return none
++ */
++static void mxc_free_frames(cam_data *cam)
++{
++ int i;
++
++ pr_debug("In MVC:mxc_free_frames\n");
++
++ for (i = 0; i < FRAME_NUM; i++)
++ cam->frame[i].buffer.flags = V4L2_BUF_FLAG_MAPPED;
++
++ cam->enc_counter = 0;
++ INIT_LIST_HEAD(&cam->ready_q);
++ INIT_LIST_HEAD(&cam->working_q);
++ INIT_LIST_HEAD(&cam->done_q);
++}
++
++/*!
++ * Return the buffer status
++ *
++ * @param cam Structure cam_data *
++ * @param buf Structure v4l2_buffer *
++ *
++ * @return status 0 success, EINVAL failed.
++ */
++static int mxc_v4l2_buffer_status(cam_data *cam, struct v4l2_buffer *buf)
++{
++ pr_debug("In MVC:mxc_v4l2_buffer_status\n");
++
++ if (buf->index < 0 || buf->index >= FRAME_NUM) {
++ pr_err("ERROR: v4l2 capture: mxc_v4l2_buffer_status buffers "
++ "not allocated\n");
++ return -EINVAL;
++ }
++
++ memcpy(buf, &(cam->frame[buf->index].buffer), sizeof(*buf));
++ return 0;
++}
++
++static int mxc_v4l2_release_bufs(cam_data *cam)
++{
++ pr_debug("In MVC:mxc_v4l2_release_bufs\n");
++ return 0;
++}
++
++static int mxc_v4l2_prepare_bufs(cam_data *cam, struct v4l2_buffer *buf)
++{
++ pr_debug("In MVC:mxc_v4l2_prepare_bufs\n");
++
++ if (buf->index < 0 || buf->index >= FRAME_NUM || buf->length <
++ PAGE_ALIGN(cam->v2f.fmt.pix.sizeimage)) {
++ pr_err("ERROR: v4l2 capture: mxc_v4l2_prepare_bufs buffers "
++ "not allocated,index=%d, length=%d\n", buf->index,
++ buf->length);
++ return -EINVAL;
++ }
++
++ cam->frame[buf->index].buffer.index = buf->index;
++ cam->frame[buf->index].buffer.flags = V4L2_BUF_FLAG_MAPPED;
++ cam->frame[buf->index].buffer.length = buf->length;
++ cam->frame[buf->index].buffer.m.offset = cam->frame[buf->index].paddress
++ = buf->m.offset;
++ cam->frame[buf->index].buffer.type = buf->type;
++ cam->frame[buf->index].buffer.memory = V4L2_MEMORY_USERPTR;
++ cam->frame[buf->index].index = buf->index;
++
++ return 0;
++}
++
++/***************************************************************************
++ * Functions for handling the video stream.
++ **************************************************************************/
++
++/*!
++ * Indicates whether the palette is supported.
++ *
++ * @param palette V4L2_PIX_FMT_RGB565, V4L2_PIX_FMT_BGR24 or V4L2_PIX_FMT_BGR32
++ *
++ * @return 0 if failed
++ */
++static inline int valid_mode(u32 palette)
++{
++ return ((palette == V4L2_PIX_FMT_RGB565) ||
++ (palette == V4L2_PIX_FMT_BGR24) ||
++ (palette == V4L2_PIX_FMT_RGB24) ||
++ (palette == V4L2_PIX_FMT_BGR32) ||
++ (palette == V4L2_PIX_FMT_RGB32) ||
++ (palette == V4L2_PIX_FMT_YUV422P) ||
++ (palette == V4L2_PIX_FMT_UYVY) ||
++ (palette == V4L2_PIX_FMT_YUYV) ||
++ (palette == V4L2_PIX_FMT_YUV420) ||
++ (palette == V4L2_PIX_FMT_YVU420) ||
++ (palette == V4L2_PIX_FMT_NV12));
++}
++
++/*!
++ * Start the encoder job
++ *
++ * @param cam structure cam_data *
++ *
++ * @return status 0 Success
++ */
++static int mxc_streamon(cam_data *cam)
++{
++ struct mxc_v4l_frame *frame;
++ unsigned long lock_flags;
++ int err = 0;
++
++ pr_debug("In MVC:mxc_streamon\n");
++
++ if (NULL == cam) {
++ pr_err("ERROR! cam parameter is NULL\n");
++ return -1;
++ }
++
++ if (cam->capture_on) {
++ pr_err("ERROR: v4l2 capture: Capture stream has been turned "
++ " on\n");
++ return -1;
++ }
++
++ if (list_empty(&cam->ready_q)) {
++ pr_err("ERROR: v4l2 capture: mxc_streamon buffer has not been "
++ "queued yet\n");
++ return -EINVAL;
++ }
++ if (cam->enc_update_eba &&
++ cam->ready_q.prev == cam->ready_q.next) {
++ pr_err("ERROR: v4l2 capture: mxc_streamon buffer need "
++ "ping pong at least two buffers\n");
++ return -EINVAL;
++ }
++
++ cam->capture_pid = current->pid;
++
++ if (cam->overlay_on == true)
++ stop_preview(cam);
++
++ if (cam->enc_enable) {
++ err = cam->enc_enable(cam);
++ if (err != 0)
++ return err;
++ }
++
++ spin_lock_irqsave(&cam->queue_int_lock, lock_flags);
++ cam->ping_pong_csi = 0;
++ cam->local_buf_num = 0;
++ if (cam->enc_update_eba) {
++ frame =
++ list_entry(cam->ready_q.next, struct mxc_v4l_frame, queue);
++ list_del(cam->ready_q.next);
++ list_add_tail(&frame->queue, &cam->working_q);
++ frame->ipu_buf_num = cam->ping_pong_csi;
++ err = cam->enc_update_eba(cam->ipu, frame->buffer.m.offset,
++ &cam->ping_pong_csi);
++
++ frame =
++ list_entry(cam->ready_q.next, struct mxc_v4l_frame, queue);
++ list_del(cam->ready_q.next);
++ list_add_tail(&frame->queue, &cam->working_q);
++ frame->ipu_buf_num = cam->ping_pong_csi;
++ err |= cam->enc_update_eba(cam->ipu, frame->buffer.m.offset,
++ &cam->ping_pong_csi);
++ spin_unlock_irqrestore(&cam->queue_int_lock, lock_flags);
++ } else {
++ spin_unlock_irqrestore(&cam->queue_int_lock, lock_flags);
++ return -EINVAL;
++ }
++
++ if (cam->overlay_on == true)
++ start_preview(cam);
++
++ if (cam->enc_enable_csi) {
++ err = cam->enc_enable_csi(cam);
++ if (err != 0)
++ return err;
++ }
++
++ cam->capture_on = true;
++
++ return err;
++}
++
++/*!
++ * Shut down the encoder job
++ *
++ * @param cam structure cam_data *
++ *
++ * @return status 0 Success
++ */
++static int mxc_streamoff(cam_data *cam)
++{
++ int err = 0;
++
++ pr_debug("In MVC:mxc_streamoff\n");
++
++ if (cam->capture_on == false)
++ return 0;
++
++ /* For both CSI--MEM and CSI--IC--MEM
++ * 1. wait for idmac eof
++ * 2. disable csi first
++ * 3. disable idmac
++ * 4. disable smfc (CSI--MEM channel)
++ */
++ if (mxc_capture_inputs[cam->current_input].name != NULL) {
++ if (cam->enc_disable_csi) {
++ err = cam->enc_disable_csi(cam);
++ if (err != 0)
++ return err;
++ }
++ if (cam->enc_disable) {
++ err = cam->enc_disable(cam);
++ if (err != 0)
++ return err;
++ }
++ }
++
++ mxc_free_frames(cam);
++ mxc_capture_inputs[cam->current_input].status |= V4L2_IN_ST_NO_POWER;
++ cam->capture_on = false;
++ return err;
++}
++
++/*!
++ * Valid and adjust the overlay window size, position
++ *
++ * @param cam structure cam_data *
++ * @param win struct v4l2_window *
++ *
++ * @return 0
++ */
++static int verify_preview(cam_data *cam, struct v4l2_window *win)
++{
++ int i = 0, width_bound = 0, height_bound = 0;
++ int *width, *height;
++ unsigned int ipu_ch = CHAN_NONE;
++ struct fb_info *bg_fbi = NULL, *fbi = NULL;
++ bool foregound_fb = false;
++ mm_segment_t old_fs;
++
++ pr_debug("In MVC: verify_preview\n");
++
++ do {
++ fbi = (struct fb_info *)registered_fb[i];
++ if (fbi == NULL) {
++ pr_err("ERROR: verify_preview frame buffer NULL.\n");
++ return -1;
++ }
++
++ /* Which DI supports 2 layers? */
++ if (((strncmp(fbi->fix.id, "DISP3 BG", 8) == 0) &&
++ (cam->output < 3)) ||
++ ((strncmp(fbi->fix.id, "DISP4 BG", 8) == 0) &&
++ (cam->output >= 3))) {
++ if (fbi->fbops->fb_ioctl) {
++ old_fs = get_fs();
++ set_fs(KERNEL_DS);
++ fbi->fbops->fb_ioctl(fbi, MXCFB_GET_FB_IPU_CHAN,
++ (unsigned long)&ipu_ch);
++ set_fs(old_fs);
++ }
++ if (ipu_ch == MEM_BG_SYNC) {
++ bg_fbi = fbi;
++ pr_debug("Found background frame buffer.\n");
++ }
++ }
++
++ /* Found the frame buffer to preview on. */
++ if (strcmp(fbi->fix.id,
++ mxc_capture_outputs[cam->output].name) == 0) {
++ if (((strcmp(fbi->fix.id, "DISP3 FG") == 0) &&
++ (cam->output < 3)) ||
++ ((strcmp(fbi->fix.id, "DISP4 FG") == 0) &&
++ (cam->output >= 3)))
++ foregound_fb = true;
++
++ cam->overlay_fb = fbi;
++ break;
++ }
++ } while (++i < FB_MAX);
++
++ if (foregound_fb) {
++ width_bound = bg_fbi->var.xres;
++ height_bound = bg_fbi->var.yres;
++
++ if (win->w.width + win->w.left > bg_fbi->var.xres ||
++ win->w.height + win->w.top > bg_fbi->var.yres) {
++ pr_err("ERROR: FG window position exceeds.\n");
++ return -1;
++ }
++ } else {
++ /* 4 bytes alignment for BG */
++ width_bound = cam->overlay_fb->var.xres;
++ height_bound = cam->overlay_fb->var.yres;
++
++ if (cam->overlay_fb->var.bits_per_pixel == 24)
++ win->w.left -= win->w.left % 4;
++ else if (cam->overlay_fb->var.bits_per_pixel == 16)
++ win->w.left -= win->w.left % 2;
++
++ if (win->w.width + win->w.left > cam->overlay_fb->var.xres)
++ win->w.width = cam->overlay_fb->var.xres - win->w.left;
++ if (win->w.height + win->w.top > cam->overlay_fb->var.yres)
++ win->w.height = cam->overlay_fb->var.yres - win->w.top;
++ }
++
++ /* stride line limitation */
++ win->w.height -= win->w.height % 8;
++ win->w.width -= win->w.width % 8;
++
++ if (cam->rotation >= IPU_ROTATE_90_RIGHT) {
++ height = &win->w.width;
++ width = &win->w.height;
++ } else {
++ width = &win->w.width;
++ height = &win->w.height;
++ }
++
++ if (*width == 0 || *height == 0) {
++ pr_err("ERROR: v4l2 capture: width or height"
++ " too small.\n");
++ return -EINVAL;
++ }
++
++ if ((cam->crop_bounds.width / *width > 8) ||
++ ((cam->crop_bounds.width / *width == 8) &&
++ (cam->crop_bounds.width % *width))) {
++ *width = cam->crop_bounds.width / 8;
++ if (*width % 8)
++ *width += 8 - *width % 8;
++ if (*width + win->w.left > width_bound) {
++ pr_err("ERROR: v4l2 capture: width exceeds "
++ "resize limit.\n");
++ return -1;
++ }
++ pr_err("ERROR: v4l2 capture: width exceeds limit. "
++ "Resize to %d.\n",
++ *width);
++ }
++
++ if ((cam->crop_bounds.height / *height > 8) ||
++ ((cam->crop_bounds.height / *height == 8) &&
++ (cam->crop_bounds.height % *height))) {
++ *height = cam->crop_bounds.height / 8;
++ if (*height % 8)
++ *height += 8 - *height % 8;
++ if (*height + win->w.top > height_bound) {
++ pr_err("ERROR: v4l2 capture: height exceeds "
++ "resize limit.\n");
++ return -1;
++ }
++ pr_err("ERROR: v4l2 capture: height exceeds limit "
++ "resize to %d.\n",
++ *height);
++ }
++
++ return 0;
++}
++
++/*!
++ * start the viewfinder job
++ *
++ * @param cam structure cam_data *
++ *
++ * @return status 0 Success
++ */
++static int start_preview(cam_data *cam)
++{
++ int err = 0;
++
++ pr_debug("MVC: start_preview\n");
++
++ if (cam->v4l2_fb.flags == V4L2_FBUF_FLAG_OVERLAY)
++ #ifdef CONFIG_MXC_IPU_PRP_VF_SDC
++ err = prp_vf_sdc_select(cam);
++ #else
++ err = foreground_sdc_select(cam);
++ #endif
++ else if (cam->v4l2_fb.flags == V4L2_FBUF_FLAG_PRIMARY)
++ #ifdef CONFIG_MXC_IPU_PRP_VF_SDC
++ err = prp_vf_sdc_select_bg(cam);
++ #else
++ err = bg_overlay_sdc_select(cam);
++ #endif
++ if (err != 0)
++ return err;
++
++ if (cam->vf_start_sdc) {
++ err = cam->vf_start_sdc(cam);
++ if (err != 0)
++ return err;
++ }
++
++ if (cam->vf_enable_csi)
++ err = cam->vf_enable_csi(cam);
++
++ pr_debug("End of %s: v2f pix widthxheight %d x %d\n",
++ __func__,
++ cam->v2f.fmt.pix.width, cam->v2f.fmt.pix.height);
++ pr_debug("End of %s: crop_bounds widthxheight %d x %d\n",
++ __func__,
++ cam->crop_bounds.width, cam->crop_bounds.height);
++ pr_debug("End of %s: crop_defrect widthxheight %d x %d\n",
++ __func__,
++ cam->crop_defrect.width, cam->crop_defrect.height);
++ pr_debug("End of %s: crop_current widthxheight %d x %d\n",
++ __func__,
++ cam->crop_current.width, cam->crop_current.height);
++
++ return err;
++}
++
++/*!
++ * shut down the viewfinder job
++ *
++ * @param cam structure cam_data *
++ *
++ * @return status 0 Success
++ */
++static int stop_preview(cam_data *cam)
++{
++ int err = 0;
++
++ if (cam->vf_disable_csi) {
++ err = cam->vf_disable_csi(cam);
++ if (err != 0)
++ return err;
++ }
++
++ if (cam->vf_stop_sdc) {
++ err = cam->vf_stop_sdc(cam);
++ if (err != 0)
++ return err;
++ }
++
++ if (cam->v4l2_fb.flags == V4L2_FBUF_FLAG_OVERLAY)
++ #ifdef CONFIG_MXC_IPU_PRP_VF_SDC
++ err = prp_vf_sdc_deselect(cam);
++ #else
++ err = foreground_sdc_deselect(cam);
++ #endif
++ else if (cam->v4l2_fb.flags == V4L2_FBUF_FLAG_PRIMARY)
++ #ifdef CONFIG_MXC_IPU_PRP_VF_SDC
++ err = prp_vf_sdc_deselect_bg(cam);
++ #else
++ err = bg_overlay_sdc_deselect(cam);
++ #endif
++
++ return err;
++}
++
++/***************************************************************************
++ * VIDIOC Functions.
++ **************************************************************************/
++
++/*!
++ * V4L2 - mxc_v4l2_g_fmt function
++ *
++ * @param cam structure cam_data *
++ *
++ * @param f structure v4l2_format *
++ *
++ * @return status 0 success, EINVAL failed
++ */
++static int mxc_v4l2_g_fmt(cam_data *cam, struct v4l2_format *f)
++{
++ int retval = 0;
++
++ pr_debug("In MVC: mxc_v4l2_g_fmt type=%d\n", f->type);
++
++ switch (f->type) {
++ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
++ pr_debug(" type is V4L2_BUF_TYPE_VIDEO_CAPTURE\n");
++ f->fmt.pix = cam->v2f.fmt.pix;
++ break;
++ case V4L2_BUF_TYPE_VIDEO_OVERLAY:
++ pr_debug(" type is V4L2_BUF_TYPE_VIDEO_OVERLAY\n");
++ f->fmt.win = cam->win;
++ break;
++ default:
++ pr_debug(" type is invalid\n");
++ retval = -EINVAL;
++ }
++
++ pr_debug("End of %s: v2f pix widthxheight %d x %d\n",
++ __func__,
++ cam->v2f.fmt.pix.width, cam->v2f.fmt.pix.height);
++ pr_debug("End of %s: crop_bounds widthxheight %d x %d\n",
++ __func__,
++ cam->crop_bounds.width, cam->crop_bounds.height);
++ pr_debug("End of %s: crop_defrect widthxheight %d x %d\n",
++ __func__,
++ cam->crop_defrect.width, cam->crop_defrect.height);
++ pr_debug("End of %s: crop_current widthxheight %d x %d\n",
++ __func__,
++ cam->crop_current.width, cam->crop_current.height);
++
++ return retval;
++}
++
++/*!
++ * V4L2 - mxc_v4l2_s_fmt function
++ *
++ * @param cam structure cam_data *
++ *
++ * @param f structure v4l2_format *
++ *
++ * @return status 0 success, EINVAL failed
++ */
++static int mxc_v4l2_s_fmt(cam_data *cam, struct v4l2_format *f)
++{
++ int retval = 0;
++ int size = 0;
++ int bytesperline = 0;
++ int *width, *height;
++
++ pr_debug("In MVC: mxc_v4l2_s_fmt\n");
++
++ switch (f->type) {
++ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
++ pr_debug(" type=V4L2_BUF_TYPE_VIDEO_CAPTURE\n");
++ if (!valid_mode(f->fmt.pix.pixelformat)) {
++ pr_err("ERROR: v4l2 capture: mxc_v4l2_s_fmt: format "
++ "not supported\n");
++ return -EINVAL;
++ }
++
++ /*
++ * Force the capture window resolution to be crop bounds
++ * for CSI MEM input mode.
++ */
++ if (strcmp(mxc_capture_inputs[cam->current_input].name,
++ "CSI MEM") == 0) {
++ f->fmt.pix.width = cam->crop_current.width;
++ f->fmt.pix.height = cam->crop_current.height;
++ }
++
++ if (cam->rotation >= IPU_ROTATE_90_RIGHT) {
++ height = &f->fmt.pix.width;
++ width = &f->fmt.pix.height;
++ } else {
++ width = &f->fmt.pix.width;
++ height = &f->fmt.pix.height;
++ }
++
++ /* stride line limitation */
++ *width -= *width % 8;
++ *height -= *height % 8;
++
++ if (*width == 0 || *height == 0) {
++ pr_err("ERROR: v4l2 capture: width or height"
++ " too small.\n");
++ return -EINVAL;
++ }
++
++ if ((cam->crop_current.width / *width > 8) ||
++ ((cam->crop_current.width / *width == 8) &&
++ (cam->crop_current.width % *width))) {
++ *width = cam->crop_current.width / 8;
++ if (*width % 8)
++ *width += 8 - *width % 8;
++ pr_err("ERROR: v4l2 capture: width exceeds limit "
++ "resize to %d.\n",
++ *width);
++ }
++
++ if ((cam->crop_current.height / *height > 8) ||
++ ((cam->crop_current.height / *height == 8) &&
++ (cam->crop_current.height % *height))) {
++ *height = cam->crop_current.height / 8;
++ if (*height % 8)
++ *height += 8 - *height % 8;
++ pr_err("ERROR: v4l2 capture: height exceeds limit "
++ "resize to %d.\n",
++ *height);
++ }
++
++ switch (f->fmt.pix.pixelformat) {
++ case V4L2_PIX_FMT_RGB565:
++ size = f->fmt.pix.width * f->fmt.pix.height * 2;
++ bytesperline = f->fmt.pix.width * 2;
++ break;
++ case V4L2_PIX_FMT_BGR24:
++ size = f->fmt.pix.width * f->fmt.pix.height * 3;
++ bytesperline = f->fmt.pix.width * 3;
++ break;
++ case V4L2_PIX_FMT_RGB24:
++ size = f->fmt.pix.width * f->fmt.pix.height * 3;
++ bytesperline = f->fmt.pix.width * 3;
++ break;
++ case V4L2_PIX_FMT_BGR32:
++ size = f->fmt.pix.width * f->fmt.pix.height * 4;
++ bytesperline = f->fmt.pix.width * 4;
++ break;
++ case V4L2_PIX_FMT_RGB32:
++ size = f->fmt.pix.width * f->fmt.pix.height * 4;
++ bytesperline = f->fmt.pix.width * 4;
++ break;
++ case V4L2_PIX_FMT_YUV422P:
++ size = f->fmt.pix.width * f->fmt.pix.height * 2;
++ bytesperline = f->fmt.pix.width;
++ break;
++ case V4L2_PIX_FMT_UYVY:
++ case V4L2_PIX_FMT_YUYV:
++ size = f->fmt.pix.width * f->fmt.pix.height * 2;
++ bytesperline = f->fmt.pix.width * 2;
++ break;
++ case V4L2_PIX_FMT_YUV420:
++ case V4L2_PIX_FMT_YVU420:
++ size = f->fmt.pix.width * f->fmt.pix.height * 3 / 2;
++ bytesperline = f->fmt.pix.width;
++ break;
++ case V4L2_PIX_FMT_NV12:
++ size = f->fmt.pix.width * f->fmt.pix.height * 3 / 2;
++ bytesperline = f->fmt.pix.width;
++ break;
++ default:
++ break;
++ }
++
++ if (f->fmt.pix.bytesperline < bytesperline)
++ f->fmt.pix.bytesperline = bytesperline;
++ else
++ bytesperline = f->fmt.pix.bytesperline;
++
++ if (f->fmt.pix.sizeimage < size)
++ f->fmt.pix.sizeimage = size;
++ else
++ size = f->fmt.pix.sizeimage;
++
++ cam->v2f.fmt.pix = f->fmt.pix;
++
++ if (cam->v2f.fmt.pix.priv != 0) {
++ if (copy_from_user(&cam->offset,
++ (void *)cam->v2f.fmt.pix.priv,
++ sizeof(cam->offset))) {
++ retval = -EFAULT;
++ break;
++ }
++ }
++ break;
++ case V4L2_BUF_TYPE_VIDEO_OVERLAY:
++ pr_debug(" type=V4L2_BUF_TYPE_VIDEO_OVERLAY\n");
++ retval = verify_preview(cam, &f->fmt.win);
++ cam->win = f->fmt.win;
++ break;
++ default:
++ retval = -EINVAL;
++ }
++
++ pr_debug("End of %s: v2f pix widthxheight %d x %d\n",
++ __func__,
++ cam->v2f.fmt.pix.width, cam->v2f.fmt.pix.height);
++ pr_debug("End of %s: crop_bounds widthxheight %d x %d\n",
++ __func__,
++ cam->crop_bounds.width, cam->crop_bounds.height);
++ pr_debug("End of %s: crop_defrect widthxheight %d x %d\n",
++ __func__,
++ cam->crop_defrect.width, cam->crop_defrect.height);
++ pr_debug("End of %s: crop_current widthxheight %d x %d\n",
++ __func__,
++ cam->crop_current.width, cam->crop_current.height);
++
++ return retval;
++}
++
++/*!
++ * get control param
++ *
++ * @param cam structure cam_data *
++ *
++ * @param c structure v4l2_control *
++ *
++ * @return status 0 success, EINVAL failed
++ */
++static int mxc_v4l2_g_ctrl(cam_data *cam, struct v4l2_control *c)
++{
++ int status = 0;
++
++ pr_debug("In MVC:mxc_v4l2_g_ctrl\n");
++
++ /* probably don't need to store the values that can be retrieved,
++ * locally, but they are for now. */
++ switch (c->id) {
++ case V4L2_CID_HFLIP:
++ /* This is handled in the ipu. */
++ if (cam->rotation == IPU_ROTATE_HORIZ_FLIP)
++ c->value = 1;
++ break;
++ case V4L2_CID_VFLIP:
++ /* This is handled in the ipu. */
++ if (cam->rotation == IPU_ROTATE_VERT_FLIP)
++ c->value = 1;
++ break;
++ case V4L2_CID_MXC_ROT:
++ /* This is handled in the ipu. */
++ c->value = cam->rotation;
++ break;
++ case V4L2_CID_BRIGHTNESS:
++ if (cam->sensor) {
++ c->value = cam->bright;
++ status = vidioc_int_g_ctrl(cam->sensor, c);
++ cam->bright = c->value;
++ } else {
++ pr_err("ERROR: v4l2 capture: slave not found!\n");
++ status = -ENODEV;
++ }
++ break;
++ case V4L2_CID_HUE:
++ if (cam->sensor) {
++ c->value = cam->hue;
++ status = vidioc_int_g_ctrl(cam->sensor, c);
++ cam->hue = c->value;
++ } else {
++ pr_err("ERROR: v4l2 capture: slave not found!\n");
++ status = -ENODEV;
++ }
++ break;
++ case V4L2_CID_CONTRAST:
++ if (cam->sensor) {
++ c->value = cam->contrast;
++ status = vidioc_int_g_ctrl(cam->sensor, c);
++ cam->contrast = c->value;
++ } else {
++ pr_err("ERROR: v4l2 capture: slave not found!\n");
++ status = -ENODEV;
++ }
++ break;
++ case V4L2_CID_SATURATION:
++ if (cam->sensor) {
++ c->value = cam->saturation;
++ status = vidioc_int_g_ctrl(cam->sensor, c);
++ cam->saturation = c->value;
++ } else {
++ pr_err("ERROR: v4l2 capture: slave not found!\n");
++ status = -ENODEV;
++ }
++ break;
++ case V4L2_CID_RED_BALANCE:
++ if (cam->sensor) {
++ c->value = cam->red;
++ status = vidioc_int_g_ctrl(cam->sensor, c);
++ cam->red = c->value;
++ } else {
++ pr_err("ERROR: v4l2 capture: slave not found!\n");
++ status = -ENODEV;
++ }
++ break;
++ case V4L2_CID_BLUE_BALANCE:
++ if (cam->sensor) {
++ c->value = cam->blue;
++ status = vidioc_int_g_ctrl(cam->sensor, c);
++ cam->blue = c->value;
++ } else {
++ pr_err("ERROR: v4l2 capture: slave not found!\n");
++ status = -ENODEV;
++ }
++ break;
++ case V4L2_CID_BLACK_LEVEL:
++ if (cam->sensor) {
++ c->value = cam->ae_mode;
++ status = vidioc_int_g_ctrl(cam->sensor, c);
++ cam->ae_mode = c->value;
++ } else {
++ pr_err("ERROR: v4l2 capture: slave not found!\n");
++ status = -ENODEV;
++ }
++ break;
++ default:
++ pr_err("ERROR: v4l2 capture: unsupported ioctrl!\n");
++ }
++
++ return status;
++}
++
++/*!
++ * V4L2 - set_control function
++ * V4L2_CID_PRIVATE_BASE is the extention for IPU preprocessing.
++ * 0 for normal operation
++ * 1 for vertical flip
++ * 2 for horizontal flip
++ * 3 for horizontal and vertical flip
++ * 4 for 90 degree rotation
++ * @param cam structure cam_data *
++ *
++ * @param c structure v4l2_control *
++ *
++ * @return status 0 success, EINVAL failed
++ */
++static int mxc_v4l2_s_ctrl(cam_data *cam, struct v4l2_control *c)
++{
++ int i, ret = 0;
++ int tmp_rotation = IPU_ROTATE_NONE;
++ struct sensor_data *sensor_data;
++
++ pr_debug("In MVC:mxc_v4l2_s_ctrl\n");
++
++ switch (c->id) {
++ case V4L2_CID_HFLIP:
++ /* This is done by the IPU */
++ if (c->value == 1) {
++ if ((cam->rotation != IPU_ROTATE_VERT_FLIP) &&
++ (cam->rotation != IPU_ROTATE_180))
++ cam->rotation = IPU_ROTATE_HORIZ_FLIP;
++ else
++ cam->rotation = IPU_ROTATE_180;
++ } else {
++ if (cam->rotation == IPU_ROTATE_HORIZ_FLIP)
++ cam->rotation = IPU_ROTATE_NONE;
++ if (cam->rotation == IPU_ROTATE_180)
++ cam->rotation = IPU_ROTATE_VERT_FLIP;
++ }
++ break;
++ case V4L2_CID_VFLIP:
++ /* This is done by the IPU */
++ if (c->value == 1) {
++ if ((cam->rotation != IPU_ROTATE_HORIZ_FLIP) &&
++ (cam->rotation != IPU_ROTATE_180))
++ cam->rotation = IPU_ROTATE_VERT_FLIP;
++ else
++ cam->rotation = IPU_ROTATE_180;
++ } else {
++ if (cam->rotation == IPU_ROTATE_VERT_FLIP)
++ cam->rotation = IPU_ROTATE_NONE;
++ if (cam->rotation == IPU_ROTATE_180)
++ cam->rotation = IPU_ROTATE_HORIZ_FLIP;
++ }
++ break;
++ case V4L2_CID_MXC_ROT:
++ case V4L2_CID_MXC_VF_ROT:
++ /* This is done by the IPU */
++ switch (c->value) {
++ case V4L2_MXC_ROTATE_NONE:
++ tmp_rotation = IPU_ROTATE_NONE;
++ break;
++ case V4L2_MXC_ROTATE_VERT_FLIP:
++ tmp_rotation = IPU_ROTATE_VERT_FLIP;
++ break;
++ case V4L2_MXC_ROTATE_HORIZ_FLIP:
++ tmp_rotation = IPU_ROTATE_HORIZ_FLIP;
++ break;
++ case V4L2_MXC_ROTATE_180:
++ tmp_rotation = IPU_ROTATE_180;
++ break;
++ case V4L2_MXC_ROTATE_90_RIGHT:
++ tmp_rotation = IPU_ROTATE_90_RIGHT;
++ break;
++ case V4L2_MXC_ROTATE_90_RIGHT_VFLIP:
++ tmp_rotation = IPU_ROTATE_90_RIGHT_VFLIP;
++ break;
++ case V4L2_MXC_ROTATE_90_RIGHT_HFLIP:
++ tmp_rotation = IPU_ROTATE_90_RIGHT_HFLIP;
++ break;
++ case V4L2_MXC_ROTATE_90_LEFT:
++ tmp_rotation = IPU_ROTATE_90_LEFT;
++ break;
++ default:
++ ret = -EINVAL;
++ }
++ #ifdef CONFIG_MXC_IPU_PRP_VF_SDC
++ if (c->id == V4L2_CID_MXC_VF_ROT)
++ cam->vf_rotation = tmp_rotation;
++ else
++ cam->rotation = tmp_rotation;
++ #else
++ cam->rotation = tmp_rotation;
++ #endif
++
++ break;
++ case V4L2_CID_HUE:
++ if (cam->sensor) {
++ cam->hue = c->value;
++ ret = vidioc_int_s_ctrl(cam->sensor, c);
++ } else {
++ pr_err("ERROR: v4l2 capture: slave not found!\n");
++ ret = -ENODEV;
++ }
++ break;
++ case V4L2_CID_CONTRAST:
++ if (cam->sensor) {
++ cam->contrast = c->value;
++ ret = vidioc_int_s_ctrl(cam->sensor, c);
++ } else {
++ pr_err("ERROR: v4l2 capture: slave not found!\n");
++ ret = -ENODEV;
++ }
++ break;
++ case V4L2_CID_BRIGHTNESS:
++ if (cam->sensor) {
++ cam->bright = c->value;
++ ret = vidioc_int_s_ctrl(cam->sensor, c);
++ } else {
++ pr_err("ERROR: v4l2 capture: slave not found!\n");
++ ret = -ENODEV;
++ }
++ break;
++ case V4L2_CID_SATURATION:
++ if (cam->sensor) {
++ cam->saturation = c->value;
++ ret = vidioc_int_s_ctrl(cam->sensor, c);
++ } else {
++ pr_err("ERROR: v4l2 capture: slave not found!\n");
++ ret = -ENODEV;
++ }
++ break;
++ case V4L2_CID_RED_BALANCE:
++ if (cam->sensor) {
++ cam->red = c->value;
++ ret = vidioc_int_s_ctrl(cam->sensor, c);
++ } else {
++ pr_err("ERROR: v4l2 capture: slave not found!\n");
++ ret = -ENODEV;
++ }
++ break;
++ case V4L2_CID_BLUE_BALANCE:
++ if (cam->sensor) {
++ cam->blue = c->value;
++ ret = vidioc_int_s_ctrl(cam->sensor, c);
++ } else {
++ pr_err("ERROR: v4l2 capture: slave not found!\n");
++ ret = -ENODEV;
++ }
++ break;
++ case V4L2_CID_EXPOSURE:
++ if (cam->sensor) {
++ cam->ae_mode = c->value;
++ ret = vidioc_int_s_ctrl(cam->sensor, c);
++ } else {
++ pr_err("ERROR: v4l2 capture: slave not found!\n");
++ ret = -ENODEV;
++ }
++ break;
++ case V4L2_CID_MXC_FLASH:
++#ifdef CONFIG_MXC_IPU_V1
++ ipu_csi_flash_strobe(true);
++#endif
++ break;
++ case V4L2_CID_MXC_SWITCH_CAM:
++ if (cam->sensor == cam->all_sensors[c->value])
++ break;
++
++ /* power down other cameraes before enable new one */
++ for (i = 0; i < cam->sensor_index; i++) {
++ if (i != c->value) {
++ vidioc_int_dev_exit(cam->all_sensors[i]);
++ vidioc_int_s_power(cam->all_sensors[i], 0);
++ if (cam->mclk_on[cam->mclk_source]) {
++ ipu_csi_enable_mclk_if(cam->ipu,
++ CSI_MCLK_I2C,
++ cam->mclk_source,
++ false, false);
++ cam->mclk_on[cam->mclk_source] =
++ false;
++ }
++ }
++ }
++ sensor_data = cam->all_sensors[c->value]->priv;
++ if (sensor_data->io_init)
++ sensor_data->io_init();
++ cam->sensor = cam->all_sensors[c->value];
++ cam->mclk_source = sensor_data->mclk_source;
++ ipu_csi_enable_mclk_if(cam->ipu, CSI_MCLK_I2C,
++ cam->mclk_source, true, true);
++ cam->mclk_on[cam->mclk_source] = true;
++ vidioc_int_s_power(cam->sensor, 1);
++ vidioc_int_dev_init(cam->sensor);
++ break;
++ default:
++ pr_debug(" default case\n");
++ ret = -EINVAL;
++ break;
++ }
++
++ return ret;
++}
++
++/*!
++ * V4L2 - mxc_v4l2_s_param function
++ * Allows setting of capturemode and frame rate.
++ *
++ * @param cam structure cam_data *
++ * @param parm structure v4l2_streamparm *
++ *
++ * @return status 0 success, EINVAL failed
++ */
++static int mxc_v4l2_s_param(cam_data *cam, struct v4l2_streamparm *parm)
++{
++ struct v4l2_ifparm ifparm;
++ struct v4l2_format cam_fmt;
++ struct v4l2_streamparm currentparm;
++ ipu_csi_signal_cfg_t csi_param;
++ u32 current_fps, parm_fps;
++ int err = 0;
++
++ pr_debug("In mxc_v4l2_s_param\n");
++
++ if (parm->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
++ pr_err(KERN_ERR "mxc_v4l2_s_param invalid type\n");
++ return -EINVAL;
++ }
++
++ /* Stop the viewfinder */
++ if (cam->overlay_on == true)
++ stop_preview(cam);
++
++ currentparm.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
++
++ /* First check that this device can support the changes requested. */
++ err = vidioc_int_g_parm(cam->sensor, &currentparm);
++ if (err) {
++ pr_err("%s: vidioc_int_g_parm returned an error %d\n",
++ __func__, err);
++ goto exit;
++ }
++
++ current_fps = currentparm.parm.capture.timeperframe.denominator
++ / currentparm.parm.capture.timeperframe.numerator;
++ parm_fps = parm->parm.capture.timeperframe.denominator
++ / parm->parm.capture.timeperframe.numerator;
++
++ pr_debug(" Current capabilities are %x\n",
++ currentparm.parm.capture.capability);
++ pr_debug(" Current capturemode is %d change to %d\n",
++ currentparm.parm.capture.capturemode,
++ parm->parm.capture.capturemode);
++ pr_debug(" Current framerate is %d change to %d\n",
++ current_fps, parm_fps);
++
++ /* This will change any camera settings needed. */
++ err = vidioc_int_s_parm(cam->sensor, parm);
++ if (err) {
++ pr_err("%s: vidioc_int_s_parm returned an error %d\n",
++ __func__, err);
++ goto exit;
++ }
++
++ /* If resolution changed, need to re-program the CSI */
++ /* Get new values. */
++ vidioc_int_g_ifparm(cam->sensor, &ifparm);
++
++ csi_param.data_width = 0;
++ csi_param.clk_mode = 0;
++ csi_param.ext_vsync = 0;
++ csi_param.Vsync_pol = 0;
++ csi_param.Hsync_pol = 0;
++ csi_param.pixclk_pol = 0;
++ csi_param.data_pol = 0;
++ csi_param.sens_clksrc = 0;
++ csi_param.pack_tight = 0;
++ csi_param.force_eof = 0;
++ csi_param.data_en_pol = 0;
++ csi_param.data_fmt = 0;
++ csi_param.csi = cam->csi;
++ csi_param.mclk = 0;
++
++ pr_debug(" clock_curr=mclk=%d\n", ifparm.u.bt656.clock_curr);
++ if (ifparm.u.bt656.clock_curr == 0)
++ csi_param.clk_mode = IPU_CSI_CLK_MODE_CCIR656_INTERLACED;
++ else
++ csi_param.clk_mode = IPU_CSI_CLK_MODE_GATED_CLK;
++
++ csi_param.pixclk_pol = ifparm.u.bt656.latch_clk_inv;
++
++ if (ifparm.u.bt656.mode == V4L2_IF_TYPE_BT656_MODE_NOBT_8BIT) {
++ csi_param.data_width = IPU_CSI_DATA_WIDTH_8;
++ } else if (ifparm.u.bt656.mode
++ == V4L2_IF_TYPE_BT656_MODE_NOBT_10BIT) {
++ csi_param.data_width = IPU_CSI_DATA_WIDTH_10;
++ } else {
++ csi_param.data_width = IPU_CSI_DATA_WIDTH_8;
++ }
++
++ csi_param.Vsync_pol = ifparm.u.bt656.nobt_vs_inv;
++ csi_param.Hsync_pol = ifparm.u.bt656.nobt_hs_inv;
++ csi_param.ext_vsync = ifparm.u.bt656.bt_sync_correct;
++
++ /* if the capturemode changed, the size bounds will have changed. */
++ cam_fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
++ vidioc_int_g_fmt_cap(cam->sensor, &cam_fmt);
++ pr_debug(" g_fmt_cap returns widthxheight of input as %d x %d\n",
++ cam_fmt.fmt.pix.width, cam_fmt.fmt.pix.height);
++
++ csi_param.data_fmt = cam_fmt.fmt.pix.pixelformat;
++
++ cam->crop_bounds.top = cam->crop_bounds.left = 0;
++ cam->crop_bounds.width = cam_fmt.fmt.pix.width;
++ cam->crop_bounds.height = cam_fmt.fmt.pix.height;
++
++ /*
++ * Set the default current cropped resolution to be the same with
++ * the cropping boundary(except for tvin module).
++ */
++ if (cam->device_type != 1) {
++ cam->crop_current.width = cam->crop_bounds.width;
++ cam->crop_current.height = cam->crop_bounds.height;
++ }
++
++ /* This essentially loses the data at the left and bottom of the image
++ * giving a digital zoom image, if crop_current is less than the full
++ * size of the image. */
++ ipu_csi_set_window_size(cam->ipu, cam->crop_current.width,
++ cam->crop_current.height, cam->csi);
++ ipu_csi_set_window_pos(cam->ipu, cam->crop_current.left,
++ cam->crop_current.top,
++ cam->csi);
++ ipu_csi_init_interface(cam->ipu, cam->crop_bounds.width,
++ cam->crop_bounds.height,
++ cam_fmt.fmt.pix.pixelformat, csi_param);
++
++
++exit:
++ if (cam->overlay_on == true)
++ start_preview(cam);
++
++ return err;
++}
++
++/*!
++ * V4L2 - mxc_v4l2_s_std function
++ *
++ * Sets the TV standard to be used.
++ *
++ * @param cam structure cam_data *
++ * @param parm structure v4l2_streamparm *
++ *
++ * @return status 0 success, EINVAL failed
++ */
++static int mxc_v4l2_s_std(cam_data *cam, v4l2_std_id e)
++{
++ pr_debug("In mxc_v4l2_s_std %Lx\n", e);
++
++ if (e == V4L2_STD_PAL) {
++ pr_debug(" Setting standard to PAL %Lx\n", V4L2_STD_PAL);
++ cam->standard.id = V4L2_STD_PAL;
++ video_index = TV_PAL;
++ } else if (e == V4L2_STD_NTSC) {
++ pr_debug(" Setting standard to NTSC %Lx\n",
++ V4L2_STD_NTSC);
++ /* Get rid of the white dot line in NTSC signal input */
++ cam->standard.id = V4L2_STD_NTSC;
++ video_index = TV_NTSC;
++ } else {
++ cam->standard.id = V4L2_STD_ALL;
++ video_index = TV_NOT_LOCKED;
++ pr_err("ERROR: unrecognized std! %Lx (PAL=%Lx, NTSC=%Lx\n",
++ e, V4L2_STD_PAL, V4L2_STD_NTSC);
++ }
++
++ cam->standard.index = video_index;
++ strcpy(cam->standard.name, video_fmts[video_index].name);
++ cam->crop_bounds.width = video_fmts[video_index].raw_width;
++ cam->crop_bounds.height = video_fmts[video_index].raw_height;
++ cam->crop_current.width = video_fmts[video_index].active_width;
++ cam->crop_current.height = video_fmts[video_index].active_height;
++ cam->crop_current.top = video_fmts[video_index].active_top;
++ cam->crop_current.left = video_fmts[video_index].active_left;
++
++ return 0;
++}
++
++/*!
++ * V4L2 - mxc_v4l2_g_std function
++ *
++ * Gets the TV standard from the TV input device.
++ *
++ * @param cam structure cam_data *
++ *
++ * @param e structure v4l2_streamparm *
++ *
++ * @return status 0 success, EINVAL failed
++ */
++static int mxc_v4l2_g_std(cam_data *cam, v4l2_std_id *e)
++{
++ struct v4l2_format tv_fmt;
++
++ pr_debug("In mxc_v4l2_g_std\n");
++
++ if (cam->device_type == 1) {
++ /* Use this function to get what the TV-In device detects the
++ * format to be. pixelformat is used to return the std value
++ * since the interface has no vidioc_g_std.*/
++ tv_fmt.type = V4L2_BUF_TYPE_PRIVATE;
++ vidioc_int_g_fmt_cap(cam->sensor, &tv_fmt);
++
++ /* If the TV-in automatically detects the standard, then if it
++ * changes, the settings need to change. */
++ if (cam->standard_autodetect) {
++ if (cam->standard.id != tv_fmt.fmt.pix.pixelformat) {
++ pr_debug("MVC: mxc_v4l2_g_std: "
++ "Changing standard\n");
++ mxc_v4l2_s_std(cam, tv_fmt.fmt.pix.pixelformat);
++ }
++ }
++
++ *e = tv_fmt.fmt.pix.pixelformat;
++ }
++
++ return 0;
++}
++
++/*!
++ * Dequeue one V4L capture buffer
++ *
++ * @param cam structure cam_data *
++ * @param buf structure v4l2_buffer *
++ *
++ * @return status 0 success, EINVAL invalid frame number,
++ * ETIME timeout, ERESTARTSYS interrupted by user
++ */
++static int mxc_v4l_dqueue(cam_data *cam, struct v4l2_buffer *buf)
++{
++ int retval = 0;
++ struct mxc_v4l_frame *frame;
++ unsigned long lock_flags;
++
++ pr_debug("In MVC:mxc_v4l_dqueue\n");
++
++ if (!wait_event_interruptible_timeout(cam->enc_queue,
++ cam->enc_counter != 0, 10 * HZ)) {
++ pr_err("ERROR: v4l2 capture: mxc_v4l_dqueue timeout "
++ "enc_counter %x\n",
++ cam->enc_counter);
++ return -ETIME;
++ } else if (signal_pending(current)) {
++ pr_err("ERROR: v4l2 capture: mxc_v4l_dqueue() "
++ "interrupt received\n");
++ return -ERESTARTSYS;
++ }
++
++ if (down_interruptible(&cam->busy_lock))
++ return -EBUSY;
++
++ spin_lock_irqsave(&cam->dqueue_int_lock, lock_flags);
++ cam->enc_counter--;
++
++ frame = list_entry(cam->done_q.next, struct mxc_v4l_frame, queue);
++ list_del(cam->done_q.next);
++ if (frame->buffer.flags & V4L2_BUF_FLAG_DONE) {
++ frame->buffer.flags &= ~V4L2_BUF_FLAG_DONE;
++ } else if (frame->buffer.flags & V4L2_BUF_FLAG_QUEUED) {
++ pr_err("ERROR: v4l2 capture: VIDIOC_DQBUF: "
++ "Buffer not filled.\n");
++ frame->buffer.flags &= ~V4L2_BUF_FLAG_QUEUED;
++ retval = -EINVAL;
++ } else if ((frame->buffer.flags & 0x7) == V4L2_BUF_FLAG_MAPPED) {
++ pr_err("ERROR: v4l2 capture: VIDIOC_DQBUF: "
++ "Buffer not queued.\n");
++ retval = -EINVAL;
++ }
++
++ cam->frame[frame->index].buffer.field = cam->device_type ?
++ V4L2_FIELD_INTERLACED : V4L2_FIELD_NONE;
++
++ buf->bytesused = cam->v2f.fmt.pix.sizeimage;
++ buf->index = frame->index;
++ buf->flags = frame->buffer.flags;
++ buf->m = cam->frame[frame->index].buffer.m;
++ buf->timestamp = cam->frame[frame->index].buffer.timestamp;
++ buf->field = cam->frame[frame->index].buffer.field;
++ spin_unlock_irqrestore(&cam->dqueue_int_lock, lock_flags);
++
++ up(&cam->busy_lock);
++ return retval;
++}
++
++/*!
++ * V4L interface - open function
++ *
++ * @param file structure file *
++ *
++ * @return status 0 success, ENODEV invalid device instance,
++ * ENODEV timeout, ERESTARTSYS interrupted by user
++ */
++static int mxc_v4l_open(struct file *file)
++{
++ struct v4l2_ifparm ifparm;
++ struct v4l2_format cam_fmt;
++ ipu_csi_signal_cfg_t csi_param;
++ struct video_device *dev = video_devdata(file);
++ cam_data *cam = video_get_drvdata(dev);
++ int err = 0;
++ struct sensor_data *sensor;
++
++ pr_debug("\nIn MVC: mxc_v4l_open\n");
++ pr_debug(" device name is %s\n", dev->name);
++
++ if (!cam) {
++ pr_err("ERROR: v4l2 capture: Internal error, "
++ "cam_data not found!\n");
++ return -EBADF;
++ }
++
++ if (cam->sensor == NULL ||
++ cam->sensor->type != v4l2_int_type_slave) {
++ pr_err("ERROR: v4l2 capture: slave not found!\n");
++ return -EAGAIN;
++ }
++
++ sensor = cam->sensor->priv;
++ if (!sensor) {
++ pr_err("%s: Internal error, sensor_data is not found!\n", __func__);
++ return -EBADF;
++ }
++
++ down(&cam->busy_lock);
++ err = 0;
++ if (signal_pending(current))
++ goto oops;
++
++ if (cam->open_count++ == 0) {
++ wait_event_interruptible(cam->power_queue,
++ cam->low_power == false);
++
++ if (strcmp(mxc_capture_inputs[cam->current_input].name,
++ "CSI MEM") == 0) {
++#if defined(CONFIG_MXC_IPU_CSI_ENC) || defined(CONFIG_MXC_IPU_CSI_ENC_MODULE)
++ err = csi_enc_select(cam);
++#endif
++ } else if (strcmp(mxc_capture_inputs[cam->current_input].name,
++ "CSI IC MEM") == 0) {
++#if defined(CONFIG_MXC_IPU_PRP_ENC) || defined(CONFIG_MXC_IPU_PRP_ENC_MODULE)
++ err = prp_enc_select(cam);
++#endif
++ }
++
++ cam->enc_counter = 0;
++ INIT_LIST_HEAD(&cam->ready_q);
++ INIT_LIST_HEAD(&cam->working_q);
++ INIT_LIST_HEAD(&cam->done_q);
++
++ vidioc_int_g_ifparm(cam->sensor, &ifparm);
++
++ csi_param.sens_clksrc = 0;
++
++ csi_param.clk_mode = 0;
++ csi_param.data_pol = 0;
++ csi_param.ext_vsync = 0;
++
++ csi_param.pack_tight = 0;
++ csi_param.force_eof = 0;
++ csi_param.data_en_pol = 0;
++
++ csi_param.mclk = ifparm.u.bt656.clock_curr;
++
++ csi_param.pixclk_pol = ifparm.u.bt656.latch_clk_inv;
++
++ if (ifparm.u.bt656.mode
++ == V4L2_IF_TYPE_BT656_MODE_NOBT_8BIT)
++ csi_param.data_width = IPU_CSI_DATA_WIDTH_8;
++ else if (ifparm.u.bt656.mode
++ == V4L2_IF_TYPE_BT656_MODE_NOBT_10BIT)
++ csi_param.data_width = IPU_CSI_DATA_WIDTH_10;
++ else
++ csi_param.data_width = IPU_CSI_DATA_WIDTH_8;
++
++
++ csi_param.Vsync_pol = ifparm.u.bt656.nobt_vs_inv;
++ csi_param.Hsync_pol = ifparm.u.bt656.nobt_hs_inv;
++
++ csi_param.csi = cam->csi;
++
++ cam_fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
++ vidioc_int_g_fmt_cap(cam->sensor, &cam_fmt);
++
++ /* Reset the sizes. Needed to prevent carryover of last
++ * operation.*/
++ cam->crop_bounds.top = cam->crop_bounds.left = 0;
++ cam->crop_bounds.width = cam_fmt.fmt.pix.width;
++ cam->crop_bounds.height = cam_fmt.fmt.pix.height;
++
++ /* This also is the max crop size for this device. */
++ cam->crop_defrect.top = cam->crop_defrect.left = 0;
++ cam->crop_defrect.width = cam_fmt.fmt.pix.width;
++ cam->crop_defrect.height = cam_fmt.fmt.pix.height;
++
++ /* At this point, this is also the current image size. */
++ cam->crop_current.top = cam->crop_current.left = 0;
++ cam->crop_current.width = cam_fmt.fmt.pix.width;
++ cam->crop_current.height = cam_fmt.fmt.pix.height;
++
++ pr_debug("End of %s: v2f pix widthxheight %d x %d\n",
++ __func__,
++ cam->v2f.fmt.pix.width, cam->v2f.fmt.pix.height);
++ pr_debug("End of %s: crop_bounds widthxheight %d x %d\n",
++ __func__,
++ cam->crop_bounds.width, cam->crop_bounds.height);
++ pr_debug("End of %s: crop_defrect widthxheight %d x %d\n",
++ __func__,
++ cam->crop_defrect.width, cam->crop_defrect.height);
++ pr_debug("End of %s: crop_current widthxheight %d x %d\n",
++ __func__,
++ cam->crop_current.width, cam->crop_current.height);
++
++ csi_param.data_fmt = cam_fmt.fmt.pix.pixelformat;
++ pr_debug("On Open: Input to ipu size is %d x %d\n",
++ cam_fmt.fmt.pix.width, cam_fmt.fmt.pix.height);
++ ipu_csi_set_window_size(cam->ipu, cam->crop_current.width,
++ cam->crop_current.height,
++ cam->csi);
++ ipu_csi_set_window_pos(cam->ipu, cam->crop_current.left,
++ cam->crop_current.top,
++ cam->csi);
++ ipu_csi_init_interface(cam->ipu, cam->crop_bounds.width,
++ cam->crop_bounds.height,
++ cam_fmt.fmt.pix.pixelformat,
++ csi_param);
++ clk_prepare_enable(sensor->sensor_clk);
++ vidioc_int_s_power(cam->sensor, 1);
++ vidioc_int_init(cam->sensor);
++ vidioc_int_dev_init(cam->sensor);
++ }
++
++ file->private_data = dev;
++
++oops:
++ up(&cam->busy_lock);
++ return err;
++}
++
++/*!
++ * V4L interface - close function
++ *
++ * @param file struct file *
++ *
++ * @return 0 success
++ */
++static int mxc_v4l_close(struct file *file)
++{
++ struct video_device *dev = video_devdata(file);
++ int err = 0;
++ cam_data *cam = video_get_drvdata(dev);
++ struct sensor_data *sensor;
++ pr_debug("In MVC:mxc_v4l_close\n");
++
++ if (!cam) {
++ pr_err("ERROR: v4l2 capture: Internal error, "
++ "cam_data not found!\n");
++ return -EBADF;
++ }
++
++ if (!cam->sensor) {
++ pr_err("%s: Internal error, camera is not found!\n", __func__);
++ return -EBADF;
++ }
++
++ sensor = cam->sensor->priv;
++ if (!sensor) {
++ pr_err("%s: Internal error, sensor_data is not found!\n", __func__);
++ return -EBADF;
++ }
++
++ down(&cam->busy_lock);
++
++ /* for the case somebody hit the ctrl C */
++ if (cam->overlay_pid == current->pid && cam->overlay_on) {
++ err = stop_preview(cam);
++ cam->overlay_on = false;
++ }
++ if (cam->capture_pid == current->pid) {
++ err |= mxc_streamoff(cam);
++ wake_up_interruptible(&cam->enc_queue);
++ }
++
++ if (--cam->open_count == 0) {
++ vidioc_int_s_power(cam->sensor, 0);
++ clk_disable_unprepare(sensor->sensor_clk);
++ wait_event_interruptible(cam->power_queue,
++ cam->low_power == false);
++ pr_debug("mxc_v4l_close: release resource\n");
++
++ if (strcmp(mxc_capture_inputs[cam->current_input].name,
++ "CSI MEM") == 0) {
++#if defined(CONFIG_MXC_IPU_CSI_ENC) || defined(CONFIG_MXC_IPU_CSI_ENC_MODULE)
++ err |= csi_enc_deselect(cam);
++#endif
++ } else if (strcmp(mxc_capture_inputs[cam->current_input].name,
++ "CSI IC MEM") == 0) {
++#if defined(CONFIG_MXC_IPU_PRP_ENC) || defined(CONFIG_MXC_IPU_PRP_ENC_MODULE)
++ err |= prp_enc_deselect(cam);
++#endif
++ }
++
++ mxc_free_frame_buf(cam);
++ file->private_data = NULL;
++
++ /* capture off */
++ wake_up_interruptible(&cam->enc_queue);
++ mxc_free_frames(cam);
++ cam->enc_counter++;
++ }
++
++ up(&cam->busy_lock);
++
++ return err;
++}
++
++#if defined(CONFIG_MXC_IPU_PRP_ENC) || defined(CONFIG_MXC_IPU_CSI_ENC) || \
++ defined(CONFIG_MXC_IPU_PRP_ENC_MODULE) || \
++ defined(CONFIG_MXC_IPU_CSI_ENC_MODULE)
++/*
++ * V4L interface - read function
++ *
++ * @param file struct file *
++ * @param read buf char *
++ * @param count size_t
++ * @param ppos structure loff_t *
++ *
++ * @return bytes read
++ */
++static ssize_t mxc_v4l_read(struct file *file, char *buf, size_t count,
++ loff_t *ppos)
++{
++ int err = 0;
++ u8 *v_address[2];
++ struct video_device *dev = video_devdata(file);
++ cam_data *cam = video_get_drvdata(dev);
++
++ if (down_interruptible(&cam->busy_lock))
++ return -EINTR;
++
++ /* Stop the viewfinder */
++ if (cam->overlay_on == true)
++ stop_preview(cam);
++
++ v_address[0] = dma_alloc_coherent(0,
++ PAGE_ALIGN(cam->v2f.fmt.pix.sizeimage),
++ &cam->still_buf[0],
++ GFP_DMA | GFP_KERNEL);
++
++ v_address[1] = dma_alloc_coherent(0,
++ PAGE_ALIGN(cam->v2f.fmt.pix.sizeimage),
++ &cam->still_buf[1],
++ GFP_DMA | GFP_KERNEL);
++
++ if (!v_address[0] || !v_address[1]) {
++ err = -ENOBUFS;
++ goto exit0;
++ }
++
++ err = prp_still_select(cam);
++ if (err != 0) {
++ err = -EIO;
++ goto exit0;
++ }
++
++ cam->still_counter = 0;
++ err = cam->csi_start(cam);
++ if (err != 0) {
++ err = -EIO;
++ goto exit1;
++ }
++
++ if (!wait_event_interruptible_timeout(cam->still_queue,
++ cam->still_counter != 0,
++ 10 * HZ)) {
++ pr_err("ERROR: v4l2 capture: mxc_v4l_read timeout counter %x\n",
++ cam->still_counter);
++ err = -ETIME;
++ goto exit1;
++ }
++ err = copy_to_user(buf, v_address[1], cam->v2f.fmt.pix.sizeimage);
++
++exit1:
++ prp_still_deselect(cam);
++
++exit0:
++ if (v_address[0] != 0)
++ dma_free_coherent(0, cam->v2f.fmt.pix.sizeimage, v_address[0],
++ cam->still_buf[0]);
++ if (v_address[1] != 0)
++ dma_free_coherent(0, cam->v2f.fmt.pix.sizeimage, v_address[1],
++ cam->still_buf[1]);
++
++ cam->still_buf[0] = cam->still_buf[1] = 0;
++
++ if (cam->overlay_on == true)
++ start_preview(cam);
++
++ up(&cam->busy_lock);
++ if (err < 0)
++ return err;
++
++ return cam->v2f.fmt.pix.sizeimage - err;
++}
++#endif
++
++/*!
++ * V4L interface - ioctl function
++ *
++ * @param file struct file*
++ *
++ * @param ioctlnr unsigned int
++ *
++ * @param arg void*
++ *
++ * @return 0 success, ENODEV for invalid device instance,
++ * -1 for other errors.
++ */
++static long mxc_v4l_do_ioctl(struct file *file,
++ unsigned int ioctlnr, void *arg)
++{
++ struct video_device *dev = video_devdata(file);
++ cam_data *cam = video_get_drvdata(dev);
++ int retval = 0;
++ unsigned long lock_flags;
++
++ pr_debug("In MVC: mxc_v4l_do_ioctl %x\n", ioctlnr);
++ wait_event_interruptible(cam->power_queue, cam->low_power == false);
++ /* make this _really_ smp-safe */
++ if (ioctlnr != VIDIOC_DQBUF)
++ if (down_interruptible(&cam->busy_lock))
++ return -EBUSY;
++
++ switch (ioctlnr) {
++ /*!
++ * V4l2 VIDIOC_QUERYCAP ioctl
++ */
++ case VIDIOC_QUERYCAP: {
++ struct v4l2_capability *cap = arg;
++ pr_debug(" case VIDIOC_QUERYCAP\n");
++ strcpy(cap->driver, "mxc_v4l2");
++ cap->version = KERNEL_VERSION(0, 1, 11);
++ cap->capabilities = V4L2_CAP_VIDEO_CAPTURE |
++ V4L2_CAP_VIDEO_OVERLAY |
++ V4L2_CAP_STREAMING |
++ V4L2_CAP_READWRITE;
++ cap->card[0] = '\0';
++ cap->bus_info[0] = '\0';
++ break;
++ }
++
++ /*!
++ * V4l2 VIDIOC_G_FMT ioctl
++ */
++ case VIDIOC_G_FMT: {
++ struct v4l2_format *gf = arg;
++ pr_debug(" case VIDIOC_G_FMT\n");
++ retval = mxc_v4l2_g_fmt(cam, gf);
++ break;
++ }
++
++ /*!
++ * V4l2 VIDIOC_S_FMT ioctl
++ */
++ case VIDIOC_S_FMT: {
++ struct v4l2_format *sf = arg;
++ pr_debug(" case VIDIOC_S_FMT\n");
++ retval = mxc_v4l2_s_fmt(cam, sf);
++ break;
++ }
++
++ /*!
++ * V4l2 VIDIOC_REQBUFS ioctl
++ */
++ case VIDIOC_REQBUFS: {
++ struct v4l2_requestbuffers *req = arg;
++ pr_debug(" case VIDIOC_REQBUFS\n");
++
++ if (req->count > FRAME_NUM) {
++ pr_err("ERROR: v4l2 capture: VIDIOC_REQBUFS: "
++ "not enough buffers\n");
++ req->count = FRAME_NUM;
++ }
++
++ if ((req->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)) {
++ pr_err("ERROR: v4l2 capture: VIDIOC_REQBUFS: "
++ "wrong buffer type\n");
++ retval = -EINVAL;
++ break;
++ }
++
++ mxc_streamoff(cam);
++ if (req->memory & V4L2_MEMORY_MMAP) {
++ mxc_free_frame_buf(cam);
++ retval = mxc_allocate_frame_buf(cam, req->count);
++ }
++ break;
++ }
++
++ /*!
++ * V4l2 VIDIOC_QUERYBUF ioctl
++ */
++ case VIDIOC_QUERYBUF: {
++ struct v4l2_buffer *buf = arg;
++ int index = buf->index;
++ pr_debug(" case VIDIOC_QUERYBUF\n");
++
++ if (buf->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
++ pr_err("ERROR: v4l2 capture: "
++ "VIDIOC_QUERYBUFS: "
++ "wrong buffer type\n");
++ retval = -EINVAL;
++ break;
++ }
++
++ if (buf->memory & V4L2_MEMORY_MMAP) {
++ memset(buf, 0, sizeof(buf));
++ buf->index = index;
++ }
++
++ down(&cam->param_lock);
++ if (buf->memory & V4L2_MEMORY_USERPTR) {
++ mxc_v4l2_release_bufs(cam);
++ retval = mxc_v4l2_prepare_bufs(cam, buf);
++ }
++
++ if (buf->memory & V4L2_MEMORY_MMAP)
++ retval = mxc_v4l2_buffer_status(cam, buf);
++ up(&cam->param_lock);
++ break;
++ }
++
++ /*!
++ * V4l2 VIDIOC_QBUF ioctl
++ */
++ case VIDIOC_QBUF: {
++ struct v4l2_buffer *buf = arg;
++ int index = buf->index;
++ pr_debug(" case VIDIOC_QBUF\n");
++
++ spin_lock_irqsave(&cam->queue_int_lock, lock_flags);
++ if ((cam->frame[index].buffer.flags & 0x7) ==
++ V4L2_BUF_FLAG_MAPPED) {
++ cam->frame[index].buffer.flags |=
++ V4L2_BUF_FLAG_QUEUED;
++ list_add_tail(&cam->frame[index].queue,
++ &cam->ready_q);
++ } else if (cam->frame[index].buffer.
++ flags & V4L2_BUF_FLAG_QUEUED) {
++ pr_err("ERROR: v4l2 capture: VIDIOC_QBUF: "
++ "buffer already queued\n");
++ retval = -EINVAL;
++ } else if (cam->frame[index].buffer.
++ flags & V4L2_BUF_FLAG_DONE) {
++ pr_err("ERROR: v4l2 capture: VIDIOC_QBUF: "
++ "overwrite done buffer.\n");
++ cam->frame[index].buffer.flags &=
++ ~V4L2_BUF_FLAG_DONE;
++ cam->frame[index].buffer.flags |=
++ V4L2_BUF_FLAG_QUEUED;
++ retval = -EINVAL;
++ }
++
++ buf->flags = cam->frame[index].buffer.flags;
++ spin_unlock_irqrestore(&cam->queue_int_lock, lock_flags);
++ break;
++ }
++
++ /*!
++ * V4l2 VIDIOC_DQBUF ioctl
++ */
++ case VIDIOC_DQBUF: {
++ struct v4l2_buffer *buf = arg;
++ pr_debug(" case VIDIOC_DQBUF\n");
++
++ if ((cam->enc_counter == 0) &&
++ (file->f_flags & O_NONBLOCK)) {
++ retval = -EAGAIN;
++ break;
++ }
++
++ retval = mxc_v4l_dqueue(cam, buf);
++ break;
++ }
++
++ /*!
++ * V4l2 VIDIOC_STREAMON ioctl
++ */
++ case VIDIOC_STREAMON: {
++ pr_debug(" case VIDIOC_STREAMON\n");
++ retval = mxc_streamon(cam);
++ break;
++ }
++
++ /*!
++ * V4l2 VIDIOC_STREAMOFF ioctl
++ */
++ case VIDIOC_STREAMOFF: {
++ pr_debug(" case VIDIOC_STREAMOFF\n");
++ retval = mxc_streamoff(cam);
++ break;
++ }
++
++ /*!
++ * V4l2 VIDIOC_G_CTRL ioctl
++ */
++ case VIDIOC_G_CTRL: {
++ pr_debug(" case VIDIOC_G_CTRL\n");
++ retval = mxc_v4l2_g_ctrl(cam, arg);
++ break;
++ }
++
++ /*!
++ * V4l2 VIDIOC_S_CTRL ioctl
++ */
++ case VIDIOC_S_CTRL: {
++ pr_debug(" case VIDIOC_S_CTRL\n");
++ retval = mxc_v4l2_s_ctrl(cam, arg);
++ break;
++ }
++
++ /*!
++ * V4l2 VIDIOC_CROPCAP ioctl
++ */
++ case VIDIOC_CROPCAP: {
++ struct v4l2_cropcap *cap = arg;
++ pr_debug(" case VIDIOC_CROPCAP\n");
++ if (cap->type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
++ cap->type != V4L2_BUF_TYPE_VIDEO_OVERLAY) {
++ retval = -EINVAL;
++ break;
++ }
++ cap->bounds = cam->crop_bounds;
++ cap->defrect = cam->crop_defrect;
++ break;
++ }
++
++ /*!
++ * V4l2 VIDIOC_G_CROP ioctl
++ */
++ case VIDIOC_G_CROP: {
++ struct v4l2_crop *crop = arg;
++ pr_debug(" case VIDIOC_G_CROP\n");
++
++ if (crop->type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
++ crop->type != V4L2_BUF_TYPE_VIDEO_OVERLAY) {
++ retval = -EINVAL;
++ break;
++ }
++ crop->c = cam->crop_current;
++ break;
++ }
++
++ /*!
++ * V4l2 VIDIOC_S_CROP ioctl
++ */
++ case VIDIOC_S_CROP: {
++ struct v4l2_crop *crop = arg;
++ struct v4l2_rect *b = &cam->crop_bounds;
++ pr_debug(" case VIDIOC_S_CROP\n");
++
++ if (crop->type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
++ crop->type != V4L2_BUF_TYPE_VIDEO_OVERLAY) {
++ retval = -EINVAL;
++ break;
++ }
++
++ crop->c.top = (crop->c.top < b->top) ? b->top
++ : crop->c.top;
++ if (crop->c.top > b->top + b->height)
++ crop->c.top = b->top + b->height - 1;
++ if (crop->c.height > b->top + b->height - crop->c.top)
++ crop->c.height =
++ b->top + b->height - crop->c.top;
++
++ crop->c.left = (crop->c.left < b->left) ? b->left
++ : crop->c.left;
++ if (crop->c.left > b->left + b->width)
++ crop->c.left = b->left + b->width - 1;
++ if (crop->c.width > b->left - crop->c.left + b->width)
++ crop->c.width =
++ b->left - crop->c.left + b->width;
++
++ crop->c.width -= crop->c.width % 8;
++ crop->c.left -= crop->c.left % 4;
++ cam->crop_current = crop->c;
++
++ pr_debug(" Cropping Input to ipu size %d x %d\n",
++ cam->crop_current.width,
++ cam->crop_current.height);
++ ipu_csi_set_window_size(cam->ipu, cam->crop_current.width,
++ cam->crop_current.height,
++ cam->csi);
++ ipu_csi_set_window_pos(cam->ipu, cam->crop_current.left,
++ cam->crop_current.top,
++ cam->csi);
++ break;
++ }
++
++ /*!
++ * V4l2 VIDIOC_OVERLAY ioctl
++ */
++ case VIDIOC_OVERLAY: {
++ int *on = arg;
++ pr_debug(" VIDIOC_OVERLAY on=%d\n", *on);
++ if (*on) {
++ cam->overlay_on = true;
++ cam->overlay_pid = current->pid;
++ retval = start_preview(cam);
++ }
++ if (!*on) {
++ retval = stop_preview(cam);
++ cam->overlay_on = false;
++ }
++ break;
++ }
++
++ /*!
++ * V4l2 VIDIOC_G_FBUF ioctl
++ */
++ case VIDIOC_G_FBUF: {
++ struct v4l2_framebuffer *fb = arg;
++ pr_debug(" case VIDIOC_G_FBUF\n");
++ *fb = cam->v4l2_fb;
++ fb->capability = V4L2_FBUF_CAP_EXTERNOVERLAY;
++ break;
++ }
++
++ /*!
++ * V4l2 VIDIOC_S_FBUF ioctl
++ */
++ case VIDIOC_S_FBUF: {
++ struct v4l2_framebuffer *fb = arg;
++ pr_debug(" case VIDIOC_S_FBUF\n");
++ cam->v4l2_fb = *fb;
++ break;
++ }
++
++ case VIDIOC_G_PARM: {
++ struct v4l2_streamparm *parm = arg;
++ pr_debug(" case VIDIOC_G_PARM\n");
++ if (cam->sensor)
++ retval = vidioc_int_g_parm(cam->sensor, parm);
++ else {
++ pr_err("ERROR: v4l2 capture: slave not found!\n");
++ retval = -ENODEV;
++ }
++ break;
++ }
++
++ case VIDIOC_S_PARM: {
++ struct v4l2_streamparm *parm = arg;
++ pr_debug(" case VIDIOC_S_PARM\n");
++ if (cam->sensor)
++ retval = mxc_v4l2_s_param(cam, parm);
++ else {
++ pr_err("ERROR: v4l2 capture: slave not found!\n");
++ retval = -ENODEV;
++ }
++ break;
++ }
++
++ /* linux v4l2 bug, kernel c0485619 user c0405619 */
++ case VIDIOC_ENUMSTD: {
++ struct v4l2_standard *e = arg;
++ pr_debug(" case VIDIOC_ENUMSTD\n");
++ *e = cam->standard;
++ break;
++ }
++
++ case VIDIOC_G_STD: {
++ v4l2_std_id *e = arg;
++ pr_debug(" case VIDIOC_G_STD\n");
++ if (cam->sensor)
++ retval = mxc_v4l2_g_std(cam, e);
++ else {
++ pr_err("ERROR: v4l2 capture: slave not found!\n");
++ retval = -ENODEV;
++ }
++ break;
++ }
++
++ case VIDIOC_S_STD: {
++ v4l2_std_id *e = arg;
++ pr_debug(" case VIDIOC_S_STD\n");
++ retval = mxc_v4l2_s_std(cam, *e);
++
++ break;
++ }
++
++ case VIDIOC_ENUMOUTPUT: {
++ struct v4l2_output *output = arg;
++ pr_debug(" case VIDIOC_ENUMOUTPUT\n");
++ if (output->index >= MXC_V4L2_CAPTURE_NUM_OUTPUTS) {
++ retval = -EINVAL;
++ break;
++ }
++ *output = mxc_capture_outputs[output->index];
++
++ break;
++ }
++ case VIDIOC_G_OUTPUT: {
++ int *p_output_num = arg;
++ pr_debug(" case VIDIOC_G_OUTPUT\n");
++ *p_output_num = cam->output;
++ break;
++ }
++
++ case VIDIOC_S_OUTPUT: {
++ int *p_output_num = arg;
++ pr_debug(" case VIDIOC_S_OUTPUT\n");
++ if (*p_output_num >= MXC_V4L2_CAPTURE_NUM_OUTPUTS) {
++ retval = -EINVAL;
++ break;
++ }
++ cam->output = *p_output_num;
++ break;
++ }
++
++ case VIDIOC_ENUMINPUT: {
++ struct v4l2_input *input = arg;
++ pr_debug(" case VIDIOC_ENUMINPUT\n");
++ if (input->index >= MXC_V4L2_CAPTURE_NUM_INPUTS) {
++ retval = -EINVAL;
++ break;
++ }
++ *input = mxc_capture_inputs[input->index];
++ break;
++ }
++
++ case VIDIOC_G_INPUT: {
++ int *index = arg;
++ pr_debug(" case VIDIOC_G_INPUT\n");
++ *index = cam->current_input;
++ break;
++ }
++
++ case VIDIOC_S_INPUT: {
++ int *index = arg;
++ pr_debug(" case VIDIOC_S_INPUT\n");
++ if (*index >= MXC_V4L2_CAPTURE_NUM_INPUTS) {
++ retval = -EINVAL;
++ break;
++ }
++
++ if (*index == cam->current_input)
++ break;
++
++ if ((mxc_capture_inputs[cam->current_input].status &
++ V4L2_IN_ST_NO_POWER) == 0) {
++ retval = mxc_streamoff(cam);
++ if (retval)
++ break;
++ mxc_capture_inputs[cam->current_input].status |=
++ V4L2_IN_ST_NO_POWER;
++ }
++
++ if (strcmp(mxc_capture_inputs[*index].name, "CSI MEM") == 0) {
++#if defined(CONFIG_MXC_IPU_CSI_ENC) || defined(CONFIG_MXC_IPU_CSI_ENC_MODULE)
++ retval = csi_enc_select(cam);
++ if (retval)
++ break;
++#endif
++ } else if (strcmp(mxc_capture_inputs[*index].name,
++ "CSI IC MEM") == 0) {
++#if defined(CONFIG_MXC_IPU_PRP_ENC) || defined(CONFIG_MXC_IPU_PRP_ENC_MODULE)
++ retval = prp_enc_select(cam);
++ if (retval)
++ break;
++#endif
++ }
++
++ mxc_capture_inputs[*index].status &= ~V4L2_IN_ST_NO_POWER;
++ cam->current_input = *index;
++ break;
++ }
++ case VIDIOC_ENUM_FMT: {
++ struct v4l2_fmtdesc *f = arg;
++ if (cam->sensor)
++ retval = vidioc_int_enum_fmt_cap(cam->sensor, f);
++ else {
++ pr_err("ERROR: v4l2 capture: slave not found!\n");
++ retval = -ENODEV;
++ }
++ break;
++ }
++ case VIDIOC_ENUM_FRAMESIZES: {
++ struct v4l2_frmsizeenum *fsize = arg;
++ if (cam->sensor)
++ retval = vidioc_int_enum_framesizes(cam->sensor, fsize);
++ else {
++ pr_err("ERROR: v4l2 capture: slave not found!\n");
++ retval = -ENODEV;
++ }
++ break;
++ }
++ case VIDIOC_DBG_G_CHIP_IDENT: {
++ struct v4l2_dbg_chip_ident *p = arg;
++ p->ident = V4L2_IDENT_NONE;
++ p->revision = 0;
++ if (cam->sensor)
++ retval = vidioc_int_g_chip_ident(cam->sensor, (int *)p);
++ else {
++ pr_err("ERROR: v4l2 capture: slave not found!\n");
++ retval = -ENODEV;
++ }
++ break;
++ }
++ case VIDIOC_TRY_FMT:
++ case VIDIOC_QUERYCTRL:
++ case VIDIOC_G_TUNER:
++ case VIDIOC_S_TUNER:
++ case VIDIOC_G_FREQUENCY:
++ case VIDIOC_S_FREQUENCY:
++ default:
++ pr_debug(" case default or not supported\n");
++ retval = -EINVAL;
++ break;
++ }
++
++ if (ioctlnr != VIDIOC_DQBUF)
++ up(&cam->busy_lock);
++ return retval;
++}
++
++/*
++ * V4L interface - ioctl function
++ *
++ * @return None
++ */
++static long mxc_v4l_ioctl(struct file *file, unsigned int cmd,
++ unsigned long arg)
++{
++ pr_debug("In MVC:mxc_v4l_ioctl\n");
++ return video_usercopy(file, cmd, arg, mxc_v4l_do_ioctl);
++}
++
++/*!
++ * V4L interface - mmap function
++ *
++ * @param file structure file *
++ *
++ * @param vma structure vm_area_struct *
++ *
++ * @return status 0 Success, EINTR busy lock error, ENOBUFS remap_page error
++ */
++static int mxc_mmap(struct file *file, struct vm_area_struct *vma)
++{
++ struct video_device *dev = video_devdata(file);
++ unsigned long size;
++ int res = 0;
++ cam_data *cam = video_get_drvdata(dev);
++
++ pr_debug("In MVC:mxc_mmap\n");
++ pr_debug(" pgoff=0x%lx, start=0x%lx, end=0x%lx\n",
++ vma->vm_pgoff, vma->vm_start, vma->vm_end);
++
++ /* make this _really_ smp-safe */
++ if (down_interruptible(&cam->busy_lock))
++ return -EINTR;
++
++ size = vma->vm_end - vma->vm_start;
++ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
++
++ if (remap_pfn_range(vma, vma->vm_start,
++ vma->vm_pgoff, size, vma->vm_page_prot)) {
++ pr_err("ERROR: v4l2 capture: mxc_mmap: "
++ "remap_pfn_range failed\n");
++ res = -ENOBUFS;
++ goto mxc_mmap_exit;
++ }
++
++ vma->vm_flags &= ~VM_IO; /* using shared anonymous pages */
++
++mxc_mmap_exit:
++ up(&cam->busy_lock);
++ return res;
++}
++
++/*!
++ * V4L interface - poll function
++ *
++ * @param file structure file *
++ *
++ * @param wait structure poll_table_struct *
++ *
++ * @return status POLLIN | POLLRDNORM
++ */
++static unsigned int mxc_poll(struct file *file, struct poll_table_struct *wait)
++{
++ struct video_device *dev = video_devdata(file);
++ cam_data *cam = video_get_drvdata(dev);
++ wait_queue_head_t *queue = NULL;
++ int res = POLLIN | POLLRDNORM;
++
++ pr_debug("In MVC:mxc_poll\n");
++
++ if (down_interruptible(&cam->busy_lock))
++ return -EINTR;
++
++ queue = &cam->enc_queue;
++ poll_wait(file, queue, wait);
++
++ up(&cam->busy_lock);
++
++ return res;
++}
++
++/*!
++ * This structure defines the functions to be called in this driver.
++ */
++static struct v4l2_file_operations mxc_v4l_fops = {
++ .owner = THIS_MODULE,
++ .open = mxc_v4l_open,
++ .release = mxc_v4l_close,
++ .read = mxc_v4l_read,
++ .ioctl = mxc_v4l_ioctl,
++ .mmap = mxc_mmap,
++ .poll = mxc_poll,
++};
++
++static struct video_device mxc_v4l_template = {
++ .name = "Mxc Camera",
++ .fops = &mxc_v4l_fops,
++ .release = video_device_release,
++};
++
++/*!
++ * This function can be used to release any platform data on closing.
++ */
++static void camera_platform_release(struct device *device)
++{
++}
++
++/*!
++ * Camera V4l2 callback function.
++ *
++ * @param mask u32
++ *
++ * @param dev void device structure
++ *
++ * @return status
++ */
++static void camera_callback(u32 mask, void *dev)
++{
++ struct mxc_v4l_frame *done_frame;
++ struct mxc_v4l_frame *ready_frame;
++ struct timeval cur_time;
++
++ cam_data *cam = (cam_data *) dev;
++ if (cam == NULL)
++ return;
++
++ pr_debug("In MVC:camera_callback\n");
++
++ spin_lock(&cam->queue_int_lock);
++ spin_lock(&cam->dqueue_int_lock);
++ if (!list_empty(&cam->working_q)) {
++ do_gettimeofday(&cur_time);
++
++ done_frame = list_entry(cam->working_q.next,
++ struct mxc_v4l_frame,
++ queue);
++
++ if (done_frame->ipu_buf_num != cam->local_buf_num)
++ goto next;
++
++ /*
++ * Set the current time to done frame buffer's
++ * timestamp. Users can use this information to judge
++ * the frame's usage.
++ */
++ done_frame->buffer.timestamp = cur_time;
++
++ if (done_frame->buffer.flags & V4L2_BUF_FLAG_QUEUED) {
++ done_frame->buffer.flags |= V4L2_BUF_FLAG_DONE;
++ done_frame->buffer.flags &= ~V4L2_BUF_FLAG_QUEUED;
++
++ /* Added to the done queue */
++ list_del(cam->working_q.next);
++ list_add_tail(&done_frame->queue, &cam->done_q);
++
++ /* Wake up the queue */
++ cam->enc_counter++;
++ wake_up_interruptible(&cam->enc_queue);
++ } else
++ pr_err("ERROR: v4l2 capture: camera_callback: "
++ "buffer not queued\n");
++ }
++
++next:
++ if (!list_empty(&cam->ready_q)) {
++ ready_frame = list_entry(cam->ready_q.next,
++ struct mxc_v4l_frame,
++ queue);
++ if (cam->enc_update_eba)
++ if (cam->enc_update_eba(cam->ipu,
++ ready_frame->buffer.m.offset,
++ &cam->ping_pong_csi) == 0) {
++ list_del(cam->ready_q.next);
++ list_add_tail(&ready_frame->queue,
++ &cam->working_q);
++ ready_frame->ipu_buf_num = cam->local_buf_num;
++ }
++ } else {
++ if (cam->enc_update_eba)
++ cam->enc_update_eba(
++ cam->ipu, cam->dummy_frame.buffer.m.offset,
++ &cam->ping_pong_csi);
++ }
++
++ cam->local_buf_num = (cam->local_buf_num == 0) ? 1 : 0;
++ spin_unlock(&cam->dqueue_int_lock);
++ spin_unlock(&cam->queue_int_lock);
++
++ return;
++}
++
++/*!
++ * initialize cam_data structure
++ *
++ * @param cam structure cam_data *
++ *
++ * @return status 0 Success
++ */
++static int init_camera_struct(cam_data *cam, struct platform_device *pdev)
++{
++ const struct of_device_id *of_id =
++ of_match_device(mxc_v4l2_dt_ids, &pdev->dev);
++ struct device_node *np = pdev->dev.of_node;
++ int ipu_id, csi_id, mclk_source;
++ int ret = 0;
++
++ pr_debug("In MVC: init_camera_struct\n");
++
++ ret = of_property_read_u32(np, "ipu_id", &ipu_id);
++ if (ret) {
++ dev_err(&pdev->dev, "ipu_id missing or invalid\n");
++ return ret;
++ }
++
++ ret = of_property_read_u32(np, "csi_id", &csi_id);
++ if (ret) {
++ dev_err(&pdev->dev, "csi_id missing or invalid\n");
++ return ret;
++ }
++
++ ret = of_property_read_u32(np, "mclk_source", &mclk_source);
++ if (ret) {
++ dev_err(&pdev->dev, "sensor mclk missing or invalid\n");
++ return ret;
++ }
++
++ /* Default everything to 0 */
++ memset(cam, 0, sizeof(cam_data));
++
++ /* get devtype to distinguish if the cpu is imx5 or imx6
++ * IMX5_V4L2 specify the cpu is imx5
++ * IMX6_V4L2 specify the cpu is imx6q or imx6sdl
++ */
++ if (of_id)
++ pdev->id_entry = of_id->data;
++ cam->devtype = pdev->id_entry->driver_data;
++
++ cam->ipu = ipu_get_soc(ipu_id);
++ if (cam->ipu == NULL) {
++ pr_err("ERROR: v4l2 capture: failed to get ipu\n");
++ return -EINVAL;
++ } else if (cam->ipu == ERR_PTR(-ENODEV)) {
++ pr_err("ERROR: v4l2 capture: get invalid ipu\n");
++ return -ENODEV;
++ }
++
++ init_MUTEX(&cam->param_lock);
++ init_MUTEX(&cam->busy_lock);
++
++ cam->video_dev = video_device_alloc();
++ if (cam->video_dev == NULL)
++ return -ENODEV;
++
++ *(cam->video_dev) = mxc_v4l_template;
++
++ video_set_drvdata(cam->video_dev, cam);
++ dev_set_drvdata(&pdev->dev, (void *)cam);
++ cam->video_dev->minor = -1;
++
++ init_waitqueue_head(&cam->enc_queue);
++ init_waitqueue_head(&cam->still_queue);
++
++ /* setup cropping */
++ cam->crop_bounds.left = 0;
++ cam->crop_bounds.width = 640;
++ cam->crop_bounds.top = 0;
++ cam->crop_bounds.height = 480;
++ cam->crop_current = cam->crop_defrect = cam->crop_bounds;
++ ipu_csi_set_window_size(cam->ipu, cam->crop_current.width,
++ cam->crop_current.height, cam->csi);
++ ipu_csi_set_window_pos(cam->ipu, cam->crop_current.left,
++ cam->crop_current.top, cam->csi);
++ cam->streamparm.parm.capture.capturemode = 0;
++
++ cam->standard.index = 0;
++ cam->standard.id = V4L2_STD_UNKNOWN;
++ cam->standard.frameperiod.denominator = 30;
++ cam->standard.frameperiod.numerator = 1;
++ cam->standard.framelines = 480;
++ cam->standard_autodetect = true;
++ cam->streamparm.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
++ cam->streamparm.parm.capture.timeperframe = cam->standard.frameperiod;
++ cam->streamparm.parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
++ cam->overlay_on = false;
++ cam->capture_on = false;
++ cam->v4l2_fb.flags = V4L2_FBUF_FLAG_OVERLAY;
++
++ cam->v2f.fmt.pix.sizeimage = 352 * 288 * 3 / 2;
++ cam->v2f.fmt.pix.bytesperline = 288 * 3 / 2;
++ cam->v2f.fmt.pix.width = 288;
++ cam->v2f.fmt.pix.height = 352;
++ cam->v2f.fmt.pix.pixelformat = V4L2_PIX_FMT_YUV420;
++ cam->win.w.width = 160;
++ cam->win.w.height = 160;
++ cam->win.w.left = 0;
++ cam->win.w.top = 0;
++
++ cam->ipu_id = ipu_id;
++ cam->csi = csi_id;
++ cam->mclk_source = mclk_source;
++ cam->mclk_on[cam->mclk_source] = false;
++
++ cam->enc_callback = camera_callback;
++ init_waitqueue_head(&cam->power_queue);
++ spin_lock_init(&cam->queue_int_lock);
++ spin_lock_init(&cam->dqueue_int_lock);
++
++ cam->self = kmalloc(sizeof(struct v4l2_int_device), GFP_KERNEL);
++ cam->self->module = THIS_MODULE;
++ sprintf(cam->self->name, "mxc_v4l2_cap%d", cam->csi);
++ cam->self->type = v4l2_int_type_master;
++ cam->self->u.master = &mxc_v4l2_master;
++
++ return 0;
++}
++
++static ssize_t show_streaming(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct video_device *video_dev = container_of(dev,
++ struct video_device, dev);
++ cam_data *cam = video_get_drvdata(video_dev);
++
++ if (cam->capture_on)
++ return sprintf(buf, "stream on\n");
++ else
++ return sprintf(buf, "stream off\n");
++}
++static DEVICE_ATTR(fsl_v4l2_capture_property, S_IRUGO, show_streaming, NULL);
++
++static ssize_t show_overlay(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct video_device *video_dev = container_of(dev,
++ struct video_device, dev);
++ cam_data *cam = video_get_drvdata(video_dev);
++
++ if (cam->overlay_on)
++ return sprintf(buf, "overlay on\n");
++ else
++ return sprintf(buf, "overlay off\n");
++}
++static DEVICE_ATTR(fsl_v4l2_overlay_property, S_IRUGO, show_overlay, NULL);
++
++static ssize_t show_csi(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct video_device *video_dev = container_of(dev,
++ struct video_device, dev);
++ cam_data *cam = video_get_drvdata(video_dev);
++
++ return sprintf(buf, "ipu%d_csi%d\n", cam->ipu_id, cam->csi);
++}
++static DEVICE_ATTR(fsl_csi_property, S_IRUGO, show_csi, NULL);
++
++/*!
++ * This function is called to probe the devices if registered.
++ *
++ * @param pdev the device structure used to give information on which device
++ * to probe
++ *
++ * @return The function returns 0 on success and -1 on failure.
++ */
++static int mxc_v4l2_probe(struct platform_device *pdev)
++{
++ /* Create cam and initialize it. */
++ cam_data *cam = kmalloc(sizeof(cam_data), GFP_KERNEL);
++ if (cam == NULL) {
++ pr_err("ERROR: v4l2 capture: failed to register camera\n");
++ return -1;
++ }
++
++ init_camera_struct(cam, pdev);
++ pdev->dev.release = camera_platform_release;
++
++ /* Set up the v4l2 device and register it*/
++ cam->self->priv = cam;
++ v4l2_int_device_register(cam->self);
++
++ /* register v4l video device */
++ if (video_register_device(cam->video_dev, VFL_TYPE_GRABBER, video_nr)
++ == -1) {
++ kfree(cam);
++ cam = NULL;
++ pr_err("ERROR: v4l2 capture: video_register_device failed\n");
++ return -1;
++ }
++ pr_debug(" Video device registered: %s #%d\n",
++ cam->video_dev->name, cam->video_dev->minor);
++
++ if (device_create_file(&cam->video_dev->dev,
++ &dev_attr_fsl_v4l2_capture_property))
++ dev_err(&pdev->dev, "Error on creating sysfs file"
++ " for capture\n");
++
++ if (device_create_file(&cam->video_dev->dev,
++ &dev_attr_fsl_v4l2_overlay_property))
++ dev_err(&pdev->dev, "Error on creating sysfs file"
++ " for overlay\n");
++
++ if (device_create_file(&cam->video_dev->dev,
++ &dev_attr_fsl_csi_property))
++ dev_err(&pdev->dev, "Error on creating sysfs file"
++ " for csi number\n");
++
++ return 0;
++}
++
++/*!
++ * This function is called to remove the devices when device unregistered.
++ *
++ * @param pdev the device structure used to give information on which device
++ * to remove
++ *
++ * @return The function returns 0 on success and -1 on failure.
++ */
++static int mxc_v4l2_remove(struct platform_device *pdev)
++{
++ cam_data *cam = (cam_data *)platform_get_drvdata(pdev);
++ if (cam->open_count) {
++ pr_err("ERROR: v4l2 capture:camera open "
++ "-- setting ops to NULL\n");
++ return -EBUSY;
++ } else {
++ device_remove_file(&cam->video_dev->dev,
++ &dev_attr_fsl_v4l2_capture_property);
++ device_remove_file(&cam->video_dev->dev,
++ &dev_attr_fsl_v4l2_overlay_property);
++ device_remove_file(&cam->video_dev->dev,
++ &dev_attr_fsl_csi_property);
++
++ pr_info("V4L2 freeing image input device\n");
++ v4l2_int_device_unregister(cam->self);
++ video_unregister_device(cam->video_dev);
++
++ mxc_free_frame_buf(cam);
++ kfree(cam);
++ }
++
++ pr_info("V4L2 unregistering video\n");
++ return 0;
++}
++
++/*!
++ * This function is called to put the sensor in a low power state.
++ * Refer to the document driver-model/driver.txt in the kernel source tree
++ * for more information.
++ *
++ * @param pdev the device structure used to give information on which I2C
++ * to suspend
++ * @param state the power state the device is entering
++ *
++ * @return The function returns 0 on success and -1 on failure.
++ */
++static int mxc_v4l2_suspend(struct platform_device *pdev, pm_message_t state)
++{
++ cam_data *cam = platform_get_drvdata(pdev);
++
++ pr_debug("In MVC:mxc_v4l2_suspend\n");
++
++ if (cam == NULL)
++ return -1;
++
++ down(&cam->busy_lock);
++
++ cam->low_power = true;
++
++ if (cam->overlay_on == true)
++ stop_preview(cam);
++ if ((cam->capture_on == true) && cam->enc_disable)
++ cam->enc_disable(cam);
++
++ if (cam->sensor && cam->open_count) {
++ if (cam->mclk_on[cam->mclk_source]) {
++ ipu_csi_enable_mclk_if(cam->ipu, CSI_MCLK_I2C,
++ cam->mclk_source,
++ false, false);
++ cam->mclk_on[cam->mclk_source] = false;
++ }
++ vidioc_int_s_power(cam->sensor, 0);
++ }
++
++ up(&cam->busy_lock);
++
++ return 0;
++}
++
++/*!
++ * This function is called to bring the sensor back from a low power state.
++ * Refer to the document driver-model/driver.txt in the kernel source tree
++ * for more information.
++ *
++ * @param pdev the device structure
++ *
++ * @return The function returns 0 on success and -1 on failure
++ */
++static int mxc_v4l2_resume(struct platform_device *pdev)
++{
++ cam_data *cam = platform_get_drvdata(pdev);
++
++ pr_debug("In MVC:mxc_v4l2_resume\n");
++
++ if (cam == NULL)
++ return -1;
++
++ down(&cam->busy_lock);
++
++ cam->low_power = false;
++ wake_up_interruptible(&cam->power_queue);
++
++ if (cam->sensor && cam->open_count) {
++ vidioc_int_s_power(cam->sensor, 1);
++
++ if (!cam->mclk_on[cam->mclk_source]) {
++ ipu_csi_enable_mclk_if(cam->ipu, CSI_MCLK_I2C,
++ cam->mclk_source,
++ true, true);
++ cam->mclk_on[cam->mclk_source] = true;
++ }
++ }
++
++ if (cam->overlay_on == true)
++ start_preview(cam);
++ if (cam->capture_on == true)
++ mxc_streamon(cam);
++
++ up(&cam->busy_lock);
++
++ return 0;
++}
++
++/*!
++ * This structure contains pointers to the power management callback functions.
++ */
++static struct platform_driver mxc_v4l2_driver = {
++ .driver = {
++ .name = "mxc_v4l2_capture",
++ .owner = THIS_MODULE,
++ .of_match_table = mxc_v4l2_dt_ids,
++ },
++ .id_table = imx_v4l2_devtype,
++ .probe = mxc_v4l2_probe,
++ .remove = mxc_v4l2_remove,
++ .suspend = mxc_v4l2_suspend,
++ .resume = mxc_v4l2_resume,
++ .shutdown = NULL,
++};
++
++/*!
++ * Initializes the camera driver.
++ */
++static int mxc_v4l2_master_attach(struct v4l2_int_device *slave)
++{
++ cam_data *cam = slave->u.slave->master->priv;
++ struct v4l2_format cam_fmt;
++ int i;
++ struct sensor_data *sdata = slave->priv;
++
++ pr_debug("In MVC: mxc_v4l2_master_attach\n");
++ pr_debug(" slave.name = %s\n", slave->name);
++ pr_debug(" master.name = %s\n", slave->u.slave->master->name);
++
++ if (slave == NULL) {
++ pr_err("ERROR: v4l2 capture: slave parameter not valid.\n");
++ return -1;
++ }
++
++ if (sdata->csi != cam->csi) {
++ pr_debug("%s: csi doesn't match\n", __func__);
++ return -1;
++ }
++
++ cam->sensor = slave;
++
++ if (cam->sensor_index < MXC_SENSOR_NUM) {
++ cam->all_sensors[cam->sensor_index] = slave;
++ cam->sensor_index++;
++ } else {
++ pr_err("ERROR: v4l2 capture: slave number exceeds the maximum.\n");
++ return -1;
++ }
++
++ for (i = 0; i < cam->sensor_index; i++) {
++ vidioc_int_dev_exit(cam->all_sensors[i]);
++ vidioc_int_s_power(cam->all_sensors[i], 0);
++ }
++
++ cam_fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
++ vidioc_int_g_fmt_cap(cam->sensor, &cam_fmt);
++
++ /* Used to detect TV in (type 1) vs. camera (type 0)*/
++ cam->device_type = cam_fmt.fmt.pix.priv;
++
++ /* Set the input size to the ipu for this device */
++ cam->crop_bounds.top = cam->crop_bounds.left = 0;
++ cam->crop_bounds.width = cam_fmt.fmt.pix.width;
++ cam->crop_bounds.height = cam_fmt.fmt.pix.height;
++
++ /* This also is the max crop size for this device. */
++ cam->crop_defrect.top = cam->crop_defrect.left = 0;
++ cam->crop_defrect.width = cam_fmt.fmt.pix.width;
++ cam->crop_defrect.height = cam_fmt.fmt.pix.height;
++
++ /* At this point, this is also the current image size. */
++ cam->crop_current.top = cam->crop_current.left = 0;
++ cam->crop_current.width = cam_fmt.fmt.pix.width;
++ cam->crop_current.height = cam_fmt.fmt.pix.height;
++
++ pr_debug("End of %s: v2f pix widthxheight %d x %d\n",
++ __func__,
++ cam->v2f.fmt.pix.width, cam->v2f.fmt.pix.height);
++ pr_debug("End of %s: crop_bounds widthxheight %d x %d\n",
++ __func__,
++ cam->crop_bounds.width, cam->crop_bounds.height);
++ pr_debug("End of %s: crop_defrect widthxheight %d x %d\n",
++ __func__,
++ cam->crop_defrect.width, cam->crop_defrect.height);
++ pr_debug("End of %s: crop_current widthxheight %d x %d\n",
++ __func__,
++ cam->crop_current.width, cam->crop_current.height);
++
++ return 0;
++}
++
++/*!
++ * Disconnects the camera driver.
++ */
++static void mxc_v4l2_master_detach(struct v4l2_int_device *slave)
++{
++ unsigned int i;
++ cam_data *cam = slave->u.slave->master->priv;
++
++ pr_debug("In MVC:mxc_v4l2_master_detach\n");
++
++ if (cam->sensor_index > 1) {
++ for (i = 0; i < cam->sensor_index; i++) {
++ if (cam->all_sensors[i] != slave)
++ continue;
++ /* Move all the sensors behind this
++ * sensor one step forward
++ */
++ for (; i <= MXC_SENSOR_NUM - 2; i++)
++ cam->all_sensors[i] = cam->all_sensors[i+1];
++ break;
++ }
++ /* Point current sensor to the last one */
++ cam->sensor = cam->all_sensors[cam->sensor_index - 2];
++ } else
++ cam->sensor = NULL;
++
++ cam->sensor_index--;
++ vidioc_int_dev_exit(slave);
++}
++
++/*!
++ * Entry point for the V4L2
++ *
++ * @return Error code indicating success or failure
++ */
++static __init int camera_init(void)
++{
++ u8 err = 0;
++
++ pr_debug("In MVC:camera_init\n");
++
++ /* Register the device driver structure. */
++ err = platform_driver_register(&mxc_v4l2_driver);
++ if (err != 0) {
++ pr_err("ERROR: v4l2 capture:camera_init: "
++ "platform_driver_register failed.\n");
++ return err;
++ }
++
++ return err;
++}
++
++/*!
++ * Exit and cleanup for the V4L2
++ */
++static void __exit camera_exit(void)
++{
++ pr_debug("In MVC: camera_exit\n");
++
++ platform_driver_unregister(&mxc_v4l2_driver);
++}
++
++module_init(camera_init);
++module_exit(camera_exit);
++
++module_param(video_nr, int, 0444);
++MODULE_AUTHOR("Freescale Semiconductor, Inc.");
++MODULE_DESCRIPTION("V4L2 capture driver for Mxc based cameras");
++MODULE_LICENSE("GPL");
++MODULE_SUPPORTED_DEVICE("video");
+diff -Nur linux-3.14.36/drivers/media/platform/mxc/capture/mxc_v4l2_capture.h linux-openelec/drivers/media/platform/mxc/capture/mxc_v4l2_capture.h
+--- linux-3.14.36/drivers/media/platform/mxc/capture/mxc_v4l2_capture.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/media/platform/mxc/capture/mxc_v4l2_capture.h 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,260 @@
++/*
++ * Copyright 2004-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/*!
++ * @defgroup MXC_V4L2_CAPTURE MXC V4L2 Video Capture Driver
++ */
++/*!
++ * @file mxc_v4l2_capture.h
++ *
++ * @brief mxc V4L2 capture device API Header file
++ *
++ * It include all the defines for frame operations, also three structure defines
++ * use case ops structure, common v4l2 driver structure and frame structure.
++ *
++ * @ingroup MXC_V4L2_CAPTURE
++ */
++#ifndef __MXC_V4L2_CAPTURE_H__
++#define __MXC_V4L2_CAPTURE_H__
++
++#include <linux/uaccess.h>
++#include <linux/list.h>
++#include <linux/mxc_v4l2.h>
++#include <linux/completion.h>
++#include <linux/dmaengine.h>
++#include <linux/pxp_dma.h>
++#include <linux/ipu-v3.h>
++#include <linux/platform_data/dma-imx.h>
++
++#include <media/v4l2-dev.h>
++#include <media/v4l2-int-device.h>
++
++
++#define FRAME_NUM 10
++#define MXC_SENSOR_NUM 2
++
++enum imx_v4l2_devtype {
++ IMX5_V4L2,
++ IMX6_V4L2,
++};
++
++/*!
++ * v4l2 frame structure.
++ */
++struct mxc_v4l_frame {
++ u32 paddress;
++ void *vaddress;
++ int count;
++ int width;
++ int height;
++
++ struct v4l2_buffer buffer;
++ struct list_head queue;
++ int index;
++ union {
++ int ipu_buf_num;
++ int csi_buf_num;
++ };
++};
++
++/* Only for old version. Will go away soon. */
++typedef struct {
++ u8 clk_mode;
++ u8 ext_vsync;
++ u8 Vsync_pol;
++ u8 Hsync_pol;
++ u8 pixclk_pol;
++ u8 data_pol;
++ u8 data_width;
++ u8 pack_tight;
++ u8 force_eof;
++ u8 data_en_pol;
++ u16 width;
++ u16 height;
++ u32 pixel_fmt;
++ u32 mclk;
++ u16 active_width;
++ u16 active_height;
++} sensor_interface;
++
++/* Sensor control function */
++/* Only for old version. Will go away soon. */
++struct camera_sensor {
++ void (*set_color) (int bright, int saturation, int red, int green,
++ int blue);
++ void (*get_color) (int *bright, int *saturation, int *red, int *green,
++ int *blue);
++ void (*set_ae_mode) (int ae_mode);
++ void (*get_ae_mode) (int *ae_mode);
++ sensor_interface *(*config) (int *frame_rate, int high_quality);
++ sensor_interface *(*reset) (void);
++ void (*get_std) (v4l2_std_id *std);
++ void (*set_std) (v4l2_std_id std);
++ unsigned int csi;
++};
++
++/*!
++ * common v4l2 driver structure.
++ */
++typedef struct _cam_data {
++ struct video_device *video_dev;
++ int device_type;
++
++ /* semaphore guard against SMP multithreading */
++ struct semaphore busy_lock;
++
++ int open_count;
++
++ /* params lock for this camera */
++ struct semaphore param_lock;
++
++ /* Encoder */
++ struct list_head ready_q;
++ struct list_head done_q;
++ struct list_head working_q;
++ int ping_pong_csi;
++ spinlock_t queue_int_lock;
++ spinlock_t dqueue_int_lock;
++ struct mxc_v4l_frame frame[FRAME_NUM];
++ struct mxc_v4l_frame dummy_frame;
++ wait_queue_head_t enc_queue;
++ int enc_counter;
++ dma_addr_t rot_enc_bufs[2];
++ void *rot_enc_bufs_vaddr[2];
++ int rot_enc_buf_size[2];
++ enum v4l2_buf_type type;
++
++ /* still image capture */
++ wait_queue_head_t still_queue;
++ int still_counter;
++ dma_addr_t still_buf[2];
++ void *still_buf_vaddr;
++
++ /* overlay */
++ struct v4l2_window win;
++ struct v4l2_framebuffer v4l2_fb;
++ dma_addr_t vf_bufs[2];
++ void *vf_bufs_vaddr[2];
++ int vf_bufs_size[2];
++ dma_addr_t rot_vf_bufs[2];
++ void *rot_vf_bufs_vaddr[2];
++ int rot_vf_buf_size[2];
++ bool overlay_active;
++ int output;
++ struct fb_info *overlay_fb;
++ int fb_origin_std;
++ struct work_struct csi_work_struct;
++
++ /* v4l2 format */
++ struct v4l2_format v2f;
++ int rotation; /* for IPUv1 and IPUv3, this means encoder rotation */
++ int vf_rotation; /* viewfinder rotation only for IPUv1 and IPUv3 */
++ struct v4l2_mxc_offset offset;
++
++ /* V4l2 control bit */
++ int bright;
++ int hue;
++ int contrast;
++ int saturation;
++ int red;
++ int green;
++ int blue;
++ int ae_mode;
++
++ /* standard */
++ struct v4l2_streamparm streamparm;
++ struct v4l2_standard standard;
++ bool standard_autodetect;
++
++ /* crop */
++ struct v4l2_rect crop_bounds;
++ struct v4l2_rect crop_defrect;
++ struct v4l2_rect crop_current;
++
++ int (*enc_update_eba) (struct ipu_soc *ipu, dma_addr_t eba,
++ int *bufferNum);
++ int (*enc_enable) (void *private);
++ int (*enc_disable) (void *private);
++ int (*enc_enable_csi) (void *private);
++ int (*enc_disable_csi) (void *private);
++ void (*enc_callback) (u32 mask, void *dev);
++ int (*vf_start_adc) (void *private);
++ int (*vf_stop_adc) (void *private);
++ int (*vf_start_sdc) (void *private);
++ int (*vf_stop_sdc) (void *private);
++ int (*vf_enable_csi) (void *private);
++ int (*vf_disable_csi) (void *private);
++ int (*csi_start) (void *private);
++ int (*csi_stop) (void *private);
++
++ /* misc status flag */
++ bool overlay_on;
++ bool capture_on;
++ int overlay_pid;
++ int capture_pid;
++ bool low_power;
++ wait_queue_head_t power_queue;
++ unsigned int ipu_id;
++ unsigned int csi;
++ u8 mclk_source;
++ bool mclk_on[2]; /* two mclk sources at most now */
++ int current_input;
++
++ int local_buf_num;
++
++ /* camera sensor interface */
++ struct camera_sensor *cam_sensor; /* old version */
++ struct v4l2_int_device *all_sensors[MXC_SENSOR_NUM];
++ struct v4l2_int_device *sensor;
++ struct v4l2_int_device *self;
++ int sensor_index;
++ void *ipu;
++ enum imx_v4l2_devtype devtype;
++
++ /* v4l2 buf elements related to PxP DMA */
++ struct completion pxp_tx_cmpl;
++ struct pxp_channel *pxp_chan;
++ struct pxp_config_data pxp_conf;
++ struct dma_async_tx_descriptor *txd;
++ dma_cookie_t cookie;
++ struct scatterlist sg[2];
++} cam_data;
++
++struct sensor_data {
++ const struct ov5642_platform_data *platform_data;
++ struct v4l2_int_device *v4l2_int_device;
++ struct i2c_client *i2c_client;
++ struct v4l2_pix_format pix;
++ struct v4l2_captureparm streamcap;
++ bool on;
++
++ /* control settings */
++ int brightness;
++ int hue;
++ int contrast;
++ int saturation;
++ int red;
++ int green;
++ int blue;
++ int ae_mode;
++
++ u32 mclk;
++ u8 mclk_source;
++ struct clk *sensor_clk;
++ int csi;
++
++ void (*io_init)(void);
++};
++
++void set_mclk_rate(uint32_t *p_mclk_freq, uint32_t csi);
++#endif /* __MXC_V4L2_CAPTURE_H__ */
+diff -Nur linux-3.14.36/drivers/media/platform/mxc/capture/ov5640.c linux-openelec/drivers/media/platform/mxc/capture/ov5640.c
+--- linux-3.14.36/drivers/media/platform/mxc/capture/ov5640.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/media/platform/mxc/capture/ov5640.c 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,1951 @@
++/*
++ * Copyright (C) 2012-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
++ */
++
++#include <linux/clk.h>
++#include <linux/delay.h>
++#include <linux/device.h>
++#include <linux/i2c.h>
++#include <linux/init.h>
++#include <linux/module.h>
++#include <linux/of_device.h>
++#include <linux/of_gpio.h>
++#include <linux/pinctrl/consumer.h>
++#include <linux/regulator/consumer.h>
++#include <media/v4l2-chip-ident.h>
++#include <media/v4l2-int-device.h>
++#include "mxc_v4l2_capture.h"
++
++#define OV5640_VOLTAGE_ANALOG 2800000
++#define OV5640_VOLTAGE_DIGITAL_CORE 1500000
++#define OV5640_VOLTAGE_DIGITAL_IO 1800000
++
++#define MIN_FPS 15
++#define MAX_FPS 30
++#define DEFAULT_FPS 30
++
++#define OV5640_XCLK_MIN 6000000
++#define OV5640_XCLK_MAX 24000000
++
++#define OV5640_CHIP_ID_HIGH_BYTE 0x300A
++#define OV5640_CHIP_ID_LOW_BYTE 0x300B
++
++enum ov5640_mode {
++ ov5640_mode_MIN = 0,
++ ov5640_mode_VGA_640_480 = 0,
++ ov5640_mode_QVGA_320_240 = 1,
++ ov5640_mode_NTSC_720_480 = 2,
++ ov5640_mode_PAL_720_576 = 3,
++ ov5640_mode_720P_1280_720 = 4,
++ ov5640_mode_1080P_1920_1080 = 5,
++ ov5640_mode_QSXGA_2592_1944 = 6,
++ ov5640_mode_QCIF_176_144 = 7,
++ ov5640_mode_XGA_1024_768 = 8,
++ ov5640_mode_MAX = 8
++};
++
++enum ov5640_frame_rate {
++ ov5640_15_fps,
++ ov5640_30_fps
++};
++
++static int ov5640_framerates[] = {
++ [ov5640_15_fps] = 15,
++ [ov5640_30_fps] = 30,
++};
++
++struct reg_value {
++ u16 u16RegAddr;
++ u8 u8Val;
++ u8 u8Mask;
++ u32 u32Delay_ms;
++};
++
++struct ov5640_mode_info {
++ enum ov5640_mode mode;
++ u32 width;
++ u32 height;
++ struct reg_value *init_data_ptr;
++ u32 init_data_size;
++};
++
++/*!
++ * Maintains the information on the current state of the sesor.
++ */
++static struct sensor_data ov5640_data;
++static int pwn_gpio, rst_gpio;
++static int prev_sysclk;
++static int AE_Target = 52, night_mode;
++static int prev_HTS;
++static int AE_high, AE_low;
++
++static struct reg_value ov5640_global_init_setting[] = {
++ {0x3008, 0x42, 0, 0},
++ {0x3103, 0x03, 0, 0}, {0x3017, 0xff, 0, 0}, {0x3018, 0xff, 0, 0},
++ {0x3034, 0x1a, 0, 0}, {0x3037, 0x13, 0, 0}, {0x3108, 0x01, 0, 0},
++ {0x3630, 0x36, 0, 0}, {0x3631, 0x0e, 0, 0}, {0x3632, 0xe2, 0, 0},
++ {0x3633, 0x12, 0, 0}, {0x3621, 0xe0, 0, 0}, {0x3704, 0xa0, 0, 0},
++ {0x3703, 0x5a, 0, 0}, {0x3715, 0x78, 0, 0}, {0x3717, 0x01, 0, 0},
++ {0x370b, 0x60, 0, 0}, {0x3705, 0x1a, 0, 0}, {0x3905, 0x02, 0, 0},
++ {0x3906, 0x10, 0, 0}, {0x3901, 0x0a, 0, 0}, {0x3731, 0x12, 0, 0},
++ {0x3600, 0x08, 0, 0}, {0x3601, 0x33, 0, 0}, {0x302d, 0x60, 0, 0},
++ {0x3620, 0x52, 0, 0}, {0x371b, 0x20, 0, 0}, {0x471c, 0x50, 0, 0},
++ {0x3a13, 0x43, 0, 0}, {0x3a18, 0x00, 0, 0}, {0x3a19, 0x7c, 0, 0},
++ {0x3635, 0x13, 0, 0}, {0x3636, 0x03, 0, 0}, {0x3634, 0x40, 0, 0},
++ {0x3622, 0x01, 0, 0}, {0x3c01, 0x34, 0, 0}, {0x3c04, 0x28, 0, 0},
++ {0x3c05, 0x98, 0, 0}, {0x3c06, 0x00, 0, 0}, {0x3c07, 0x07, 0, 0},
++ {0x3c08, 0x00, 0, 0}, {0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0},
++ {0x3c0b, 0x40, 0, 0}, {0x3810, 0x00, 0, 0}, {0x3811, 0x10, 0, 0},
++ {0x3812, 0x00, 0, 0}, {0x3708, 0x64, 0, 0}, {0x4001, 0x02, 0, 0},
++ {0x4005, 0x1a, 0, 0}, {0x3000, 0x00, 0, 0}, {0x3004, 0xff, 0, 0},
++ {0x300e, 0x58, 0, 0}, {0x302e, 0x00, 0, 0}, {0x4300, 0x30, 0, 0},
++ {0x501f, 0x00, 0, 0}, {0x440e, 0x00, 0, 0}, {0x5000, 0xa7, 0, 0},
++ {0x3008, 0x02, 0, 0},
++};
++
++static struct reg_value ov5640_init_setting_30fps_VGA[] = {
++ {0x3008, 0x42, 0, 0},
++ {0x3103, 0x03, 0, 0}, {0x3017, 0xff, 0, 0}, {0x3018, 0xff, 0, 0},
++ {0x3034, 0x1a, 0, 0}, {0x3035, 0x11, 0, 0}, {0x3036, 0x46, 0, 0},
++ {0x3037, 0x13, 0, 0}, {0x3108, 0x01, 0, 0}, {0x3630, 0x36, 0, 0},
++ {0x3631, 0x0e, 0, 0}, {0x3632, 0xe2, 0, 0}, {0x3633, 0x12, 0, 0},
++ {0x3621, 0xe0, 0, 0}, {0x3704, 0xa0, 0, 0}, {0x3703, 0x5a, 0, 0},
++ {0x3715, 0x78, 0, 0}, {0x3717, 0x01, 0, 0}, {0x370b, 0x60, 0, 0},
++ {0x3705, 0x1a, 0, 0}, {0x3905, 0x02, 0, 0}, {0x3906, 0x10, 0, 0},
++ {0x3901, 0x0a, 0, 0}, {0x3731, 0x12, 0, 0}, {0x3600, 0x08, 0, 0},
++ {0x3601, 0x33, 0, 0}, {0x302d, 0x60, 0, 0}, {0x3620, 0x52, 0, 0},
++ {0x371b, 0x20, 0, 0}, {0x471c, 0x50, 0, 0}, {0x3a13, 0x43, 0, 0},
++ {0x3a18, 0x00, 0, 0}, {0x3a19, 0xf8, 0, 0}, {0x3635, 0x13, 0, 0},
++ {0x3636, 0x03, 0, 0}, {0x3634, 0x40, 0, 0}, {0x3622, 0x01, 0, 0},
++ {0x3c01, 0x34, 0, 0}, {0x3c04, 0x28, 0, 0}, {0x3c05, 0x98, 0, 0},
++ {0x3c06, 0x00, 0, 0}, {0x3c07, 0x08, 0, 0}, {0x3c08, 0x00, 0, 0},
++ {0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
++ {0x3820, 0x41, 0, 0}, {0x3821, 0x07, 0, 0}, {0x3814, 0x31, 0, 0},
++ {0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
++ {0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0}, {0x3804, 0x0a, 0, 0},
++ {0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0}, {0x3807, 0x9b, 0, 0},
++ {0x3808, 0x02, 0, 0}, {0x3809, 0x80, 0, 0}, {0x380a, 0x01, 0, 0},
++ {0x380b, 0xe0, 0, 0}, {0x380c, 0x07, 0, 0}, {0x380d, 0x68, 0, 0},
++ {0x380e, 0x03, 0, 0}, {0x380f, 0xd8, 0, 0}, {0x3810, 0x00, 0, 0},
++ {0x3811, 0x10, 0, 0}, {0x3812, 0x00, 0, 0}, {0x3813, 0x06, 0, 0},
++ {0x3618, 0x00, 0, 0}, {0x3612, 0x29, 0, 0}, {0x3708, 0x64, 0, 0},
++ {0x3709, 0x52, 0, 0}, {0x370c, 0x03, 0, 0}, {0x3a02, 0x03, 0, 0},
++ {0x3a03, 0xd8, 0, 0}, {0x3a08, 0x01, 0, 0}, {0x3a09, 0x27, 0, 0},
++ {0x3a0a, 0x00, 0, 0}, {0x3a0b, 0xf6, 0, 0}, {0x3a0e, 0x03, 0, 0},
++ {0x3a0d, 0x04, 0, 0}, {0x3a14, 0x03, 0, 0}, {0x3a15, 0xd8, 0, 0},
++ {0x4001, 0x02, 0, 0}, {0x4004, 0x02, 0, 0}, {0x3000, 0x00, 0, 0},
++ {0x3002, 0x1c, 0, 0}, {0x3004, 0xff, 0, 0}, {0x3006, 0xc3, 0, 0},
++ {0x300e, 0x58, 0, 0}, {0x302e, 0x00, 0, 0}, {0x4300, 0x30, 0, 0},
++ {0x501f, 0x00, 0, 0}, {0x4713, 0x03, 0, 0}, {0x4407, 0x04, 0, 0},
++ {0x440e, 0x00, 0, 0}, {0x460b, 0x35, 0, 0}, {0x460c, 0x22, 0, 0},
++ {0x4837, 0x22, 0, 0}, {0x3824, 0x02, 0, 0}, {0x5000, 0xa7, 0, 0},
++ {0x5001, 0xa3, 0, 0}, {0x5180, 0xff, 0, 0}, {0x5181, 0xf2, 0, 0},
++ {0x5182, 0x00, 0, 0}, {0x5183, 0x14, 0, 0}, {0x5184, 0x25, 0, 0},
++ {0x5185, 0x24, 0, 0}, {0x5186, 0x09, 0, 0}, {0x5187, 0x09, 0, 0},
++ {0x5188, 0x09, 0, 0}, {0x5189, 0x88, 0, 0}, {0x518a, 0x54, 0, 0},
++ {0x518b, 0xee, 0, 0}, {0x518c, 0xb2, 0, 0}, {0x518d, 0x50, 0, 0},
++ {0x518e, 0x34, 0, 0}, {0x518f, 0x6b, 0, 0}, {0x5190, 0x46, 0, 0},
++ {0x5191, 0xf8, 0, 0}, {0x5192, 0x04, 0, 0}, {0x5193, 0x70, 0, 0},
++ {0x5194, 0xf0, 0, 0}, {0x5195, 0xf0, 0, 0}, {0x5196, 0x03, 0, 0},
++ {0x5197, 0x01, 0, 0}, {0x5198, 0x04, 0, 0}, {0x5199, 0x6c, 0, 0},
++ {0x519a, 0x04, 0, 0}, {0x519b, 0x00, 0, 0}, {0x519c, 0x09, 0, 0},
++ {0x519d, 0x2b, 0, 0}, {0x519e, 0x38, 0, 0}, {0x5381, 0x1e, 0, 0},
++ {0x5382, 0x5b, 0, 0}, {0x5383, 0x08, 0, 0}, {0x5384, 0x0a, 0, 0},
++ {0x5385, 0x7e, 0, 0}, {0x5386, 0x88, 0, 0}, {0x5387, 0x7c, 0, 0},
++ {0x5388, 0x6c, 0, 0}, {0x5389, 0x10, 0, 0}, {0x538a, 0x01, 0, 0},
++ {0x538b, 0x98, 0, 0}, {0x5300, 0x08, 0, 0}, {0x5301, 0x30, 0, 0},
++ {0x5302, 0x10, 0, 0}, {0x5303, 0x00, 0, 0}, {0x5304, 0x08, 0, 0},
++ {0x5305, 0x30, 0, 0}, {0x5306, 0x08, 0, 0}, {0x5307, 0x16, 0, 0},
++ {0x5309, 0x08, 0, 0}, {0x530a, 0x30, 0, 0}, {0x530b, 0x04, 0, 0},
++ {0x530c, 0x06, 0, 0}, {0x5480, 0x01, 0, 0}, {0x5481, 0x08, 0, 0},
++ {0x5482, 0x14, 0, 0}, {0x5483, 0x28, 0, 0}, {0x5484, 0x51, 0, 0},
++ {0x5485, 0x65, 0, 0}, {0x5486, 0x71, 0, 0}, {0x5487, 0x7d, 0, 0},
++ {0x5488, 0x87, 0, 0}, {0x5489, 0x91, 0, 0}, {0x548a, 0x9a, 0, 0},
++ {0x548b, 0xaa, 0, 0}, {0x548c, 0xb8, 0, 0}, {0x548d, 0xcd, 0, 0},
++ {0x548e, 0xdd, 0, 0}, {0x548f, 0xea, 0, 0}, {0x5490, 0x1d, 0, 0},
++ {0x5580, 0x02, 0, 0}, {0x5583, 0x40, 0, 0}, {0x5584, 0x10, 0, 0},
++ {0x5589, 0x10, 0, 0}, {0x558a, 0x00, 0, 0}, {0x558b, 0xf8, 0, 0},
++ {0x5800, 0x23, 0, 0}, {0x5801, 0x14, 0, 0}, {0x5802, 0x0f, 0, 0},
++ {0x5803, 0x0f, 0, 0}, {0x5804, 0x12, 0, 0}, {0x5805, 0x26, 0, 0},
++ {0x5806, 0x0c, 0, 0}, {0x5807, 0x08, 0, 0}, {0x5808, 0x05, 0, 0},
++ {0x5809, 0x05, 0, 0}, {0x580a, 0x08, 0, 0}, {0x580b, 0x0d, 0, 0},
++ {0x580c, 0x08, 0, 0}, {0x580d, 0x03, 0, 0}, {0x580e, 0x00, 0, 0},
++ {0x580f, 0x00, 0, 0}, {0x5810, 0x03, 0, 0}, {0x5811, 0x09, 0, 0},
++ {0x5812, 0x07, 0, 0}, {0x5813, 0x03, 0, 0}, {0x5814, 0x00, 0, 0},
++ {0x5815, 0x01, 0, 0}, {0x5816, 0x03, 0, 0}, {0x5817, 0x08, 0, 0},
++ {0x5818, 0x0d, 0, 0}, {0x5819, 0x08, 0, 0}, {0x581a, 0x05, 0, 0},
++ {0x581b, 0x06, 0, 0}, {0x581c, 0x08, 0, 0}, {0x581d, 0x0e, 0, 0},
++ {0x581e, 0x29, 0, 0}, {0x581f, 0x17, 0, 0}, {0x5820, 0x11, 0, 0},
++ {0x5821, 0x11, 0, 0}, {0x5822, 0x15, 0, 0}, {0x5823, 0x28, 0, 0},
++ {0x5824, 0x46, 0, 0}, {0x5825, 0x26, 0, 0}, {0x5826, 0x08, 0, 0},
++ {0x5827, 0x26, 0, 0}, {0x5828, 0x64, 0, 0}, {0x5829, 0x26, 0, 0},
++ {0x582a, 0x24, 0, 0}, {0x582b, 0x22, 0, 0}, {0x582c, 0x24, 0, 0},
++ {0x582d, 0x24, 0, 0}, {0x582e, 0x06, 0, 0}, {0x582f, 0x22, 0, 0},
++ {0x5830, 0x40, 0, 0}, {0x5831, 0x42, 0, 0}, {0x5832, 0x24, 0, 0},
++ {0x5833, 0x26, 0, 0}, {0x5834, 0x24, 0, 0}, {0x5835, 0x22, 0, 0},
++ {0x5836, 0x22, 0, 0}, {0x5837, 0x26, 0, 0}, {0x5838, 0x44, 0, 0},
++ {0x5839, 0x24, 0, 0}, {0x583a, 0x26, 0, 0}, {0x583b, 0x28, 0, 0},
++ {0x583c, 0x42, 0, 0}, {0x583d, 0xce, 0, 0}, {0x5025, 0x00, 0, 0},
++ {0x3a0f, 0x30, 0, 0}, {0x3a10, 0x28, 0, 0}, {0x3a1b, 0x30, 0, 0},
++ {0x3a1e, 0x26, 0, 0}, {0x3a11, 0x60, 0, 0}, {0x3a1f, 0x14, 0, 0},
++ {0x3008, 0x02, 0, 0}, {0x3034, 0x1a, 0, 0}, {0x3035, 0x11, 0, 0},
++ {0x3036, 0x46, 0, 0}, {0x3037, 0x13, 0, 0},
++};
++
++static struct reg_value ov5640_setting_30fps_VGA_640_480[] = {
++ {0x3c07, 0x08, 0, 0}, {0x3820, 0x41, 0, 0}, {0x3821, 0x07, 0, 0},
++ {0x3814, 0x31, 0, 0}, {0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0},
++ {0x3801, 0x00, 0, 0}, {0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0},
++ {0x3804, 0x0a, 0, 0}, {0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0},
++ {0x3807, 0x9b, 0, 0}, {0x3808, 0x02, 0, 0}, {0x3809, 0x80, 0, 0},
++ {0x380a, 0x01, 0, 0}, {0x380b, 0xe0, 0, 0}, {0x380c, 0x07, 0, 0},
++ {0x380d, 0x68, 0, 0}, {0x380e, 0x03, 0, 0}, {0x380f, 0xd8, 0, 0},
++ {0x3813, 0x06, 0, 0}, {0x3618, 0x00, 0, 0}, {0x3612, 0x29, 0, 0},
++ {0x3709, 0x52, 0, 0}, {0x370c, 0x03, 0, 0}, {0x3a02, 0x0b, 0, 0},
++ {0x3a03, 0x88, 0, 0}, {0x3a14, 0x0b, 0, 0}, {0x3a15, 0x88, 0, 0},
++ {0x4004, 0x02, 0, 0}, {0x3002, 0x1c, 0, 0}, {0x3006, 0xc3, 0, 0},
++ {0x4713, 0x03, 0, 0}, {0x4407, 0x04, 0, 0}, {0x460b, 0x35, 0, 0},
++ {0x460c, 0x22, 0, 0}, {0x4837, 0x22, 0, 0}, {0x3824, 0x02, 0, 0},
++ {0x5001, 0xa3, 0, 0}, {0x3034, 0x1a, 0, 0}, {0x3035, 0x11, 0, 0},
++ {0x3036, 0x46, 0, 0}, {0x3037, 0x13, 0, 0}, {0x3503, 0x00, 0, 0},
++};
++
++static struct reg_value ov5640_setting_15fps_VGA_640_480[] = {
++ {0x3c07, 0x08, 0, 0}, {0x3820, 0x41, 0, 0}, {0x3821, 0x07, 0, 0},
++ {0x3814, 0x31, 0, 0}, {0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0},
++ {0x3801, 0x00, 0, 0}, {0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0},
++ {0x3804, 0x0a, 0, 0}, {0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0},
++ {0x3807, 0x9b, 0, 0}, {0x3808, 0x02, 0, 0}, {0x3809, 0x80, 0, 0},
++ {0x380a, 0x01, 0, 0}, {0x380b, 0xe0, 0, 0}, {0x380c, 0x07, 0, 0},
++ {0x380d, 0x68, 0, 0}, {0x380e, 0x03, 0, 0}, {0x380f, 0xd8, 0, 0},
++ {0x3813, 0x06, 0, 0}, {0x3618, 0x00, 0, 0}, {0x3612, 0x29, 0, 0},
++ {0x3709, 0x52, 0, 0}, {0x370c, 0x03, 0, 0}, {0x3a02, 0x0b, 0, 0},
++ {0x3a03, 0x88, 0, 0}, {0x3a14, 0x0b, 0, 0}, {0x3a15, 0x88, 0, 0},
++ {0x4004, 0x02, 0, 0}, {0x3002, 0x1c, 0, 0}, {0x3006, 0xc3, 0, 0},
++ {0x4713, 0x03, 0, 0}, {0x4407, 0x04, 0, 0}, {0x460b, 0x35, 0, 0},
++ {0x460c, 0x22, 0, 0}, {0x4837, 0x22, 0, 0}, {0x3824, 0x02, 0, 0},
++ {0x5001, 0xa3, 0, 0}, {0x3034, 0x1a, 0, 0}, {0x3035, 0x21, 0, 0},
++ {0x3036, 0x46, 0, 0}, {0x3037, 0x13, 0, 0}, {0x3503, 0x00, 0, 0},
++};
++
++static struct reg_value ov5640_setting_30fps_QVGA_320_240[] = {
++ {0x3c07, 0x08, 0, 0}, {0x3820, 0x41, 0, 0}, {0x3821, 0x07, 0, 0},
++ {0x3814, 0x31, 0, 0}, {0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0},
++ {0x3801, 0x00, 0, 0}, {0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0},
++ {0x3804, 0x0a, 0, 0}, {0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0},
++ {0x3807, 0x9b, 0, 0}, {0x3808, 0x01, 0, 0}, {0x3809, 0x40, 0, 0},
++ {0x380a, 0x00, 0, 0}, {0x380b, 0xf0, 0, 0}, {0x380c, 0x07, 0, 0},
++ {0x380d, 0x68, 0, 0}, {0x380e, 0x03, 0, 0}, {0x380f, 0xd8, 0, 0},
++ {0x3813, 0x06, 0, 0}, {0x3618, 0x00, 0, 0}, {0x3612, 0x29, 0, 0},
++ {0x3709, 0x52, 0, 0}, {0x370c, 0x03, 0, 0}, {0x3a02, 0x0b, 0, 0},
++ {0x3a03, 0x88, 0, 0}, {0x3a14, 0x0b, 0, 0}, {0x3a15, 0x88, 0, 0},
++ {0x4004, 0x02, 0, 0}, {0x3002, 0x1c, 0, 0}, {0x3006, 0xc3, 0, 0},
++ {0x4713, 0x03, 0, 0}, {0x4407, 0x04, 0, 0}, {0x460b, 0x35, 0, 0},
++ {0x460c, 0x22, 0, 0}, {0x4837, 0x22, 0, 0}, {0x3824, 0x02, 0, 0},
++ {0x5001, 0xa3, 0, 0}, {0x3034, 0x1a, 0, 0}, {0x3035, 0x11, 0, 0},
++ {0x3036, 0x46, 0, 0}, {0x3037, 0x13, 0, 0},
++};
++
++static struct reg_value ov5640_setting_15fps_QVGA_320_240[] = {
++ {0x3c07, 0x08, 0, 0}, {0x3820, 0x41, 0, 0}, {0x3821, 0x07, 0, 0},
++ {0x3814, 0x31, 0, 0}, {0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0},
++ {0x3801, 0x00, 0, 0}, {0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0},
++ {0x3804, 0x0a, 0, 0}, {0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0},
++ {0x3807, 0x9b, 0, 0}, {0x3808, 0x01, 0, 0}, {0x3809, 0x40, 0, 0},
++ {0x380a, 0x00, 0, 0}, {0x380b, 0xf0, 0, 0}, {0x380c, 0x07, 0, 0},
++ {0x380d, 0x68, 0, 0}, {0x380e, 0x03, 0, 0}, {0x380f, 0xd8, 0, 0},
++ {0x3813, 0x06, 0, 0}, {0x3618, 0x00, 0, 0}, {0x3612, 0x29, 0, 0},
++ {0x3709, 0x52, 0, 0}, {0x370c, 0x03, 0, 0}, {0x3a02, 0x0b, 0, 0},
++ {0x3a03, 0x88, 0, 0}, {0x3a14, 0x0b, 0, 0}, {0x3a15, 0x88, 0, 0},
++ {0x4004, 0x02, 0, 0}, {0x3002, 0x1c, 0, 0}, {0x3006, 0xc3, 0, 0},
++ {0x4713, 0x03, 0, 0}, {0x4407, 0x04, 0, 0}, {0x460b, 0x35, 0, 0},
++ {0x460c, 0x22, 0, 0}, {0x4837, 0x22, 0, 0}, {0x3824, 0x02, 0, 0},
++ {0x5001, 0xa3, 0, 0}, {0x3034, 0x1a, 0, 0}, {0x3035, 0x21, 0, 0},
++ {0x3036, 0x46, 0, 0}, {0x3037, 0x13, 0, 0},
++};
++
++static struct reg_value ov5640_setting_30fps_NTSC_720_480[] = {
++ {0x3c07, 0x08, 0, 0}, {0x3820, 0x41, 0, 0}, {0x3821, 0x07, 0, 0},
++ {0x3814, 0x31, 0, 0}, {0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0},
++ {0x3801, 0x00, 0, 0}, {0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0},
++ {0x3804, 0x0a, 0, 0}, {0x3805, 0x3f, 0, 0}, {0x3806, 0x06, 0, 0},
++ {0x3807, 0xd4, 0, 0}, {0x3808, 0x02, 0, 0}, {0x3809, 0xd0, 0, 0},
++ {0x380a, 0x01, 0, 0}, {0x380b, 0xe0, 0, 0}, {0x380c, 0x07, 0, 0},
++ {0x380d, 0x68, 0, 0}, {0x380e, 0x03, 0, 0}, {0x380f, 0xd8, 0, 0},
++ {0x3813, 0x06, 0, 0}, {0x3618, 0x00, 0, 0}, {0x3612, 0x29, 0, 0},
++ {0x3709, 0x52, 0, 0}, {0x370c, 0x03, 0, 0}, {0x3a02, 0x0b, 0, 0},
++ {0x3a03, 0x88, 0, 0}, {0x3a14, 0x0b, 0, 0}, {0x3a15, 0x88, 0, 0},
++ {0x4004, 0x02, 0, 0}, {0x3002, 0x1c, 0, 0}, {0x3006, 0xc3, 0, 0},
++ {0x4713, 0x03, 0, 0}, {0x4407, 0x04, 0, 0}, {0x460b, 0x35, 0, 0},
++ {0x460c, 0x22, 0, 0}, {0x4837, 0x22, 0, 0}, {0x3824, 0x02, 0, 0},
++ {0x5001, 0xa3, 0, 0}, {0x3034, 0x1a, 0, 0}, {0x3035, 0x11, 0, 0},
++ {0x3036, 0x46, 0, 0}, {0x3037, 0x13, 0, 0},
++};
++
++static struct reg_value ov5640_setting_15fps_NTSC_720_480[] = {
++ {0x3c07, 0x08, 0, 0}, {0x3820, 0x41, 0, 0}, {0x3821, 0x07, 0, 0},
++ {0x3814, 0x31, 0, 0}, {0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0},
++ {0x3801, 0x00, 0, 0}, {0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0},
++ {0x3804, 0x0a, 0, 0}, {0x3805, 0x3f, 0, 0}, {0x3806, 0x06, 0, 0},
++ {0x3807, 0xd4, 0, 0}, {0x3808, 0x02, 0, 0}, {0x3809, 0xd0, 0, 0},
++ {0x380a, 0x01, 0, 0}, {0x380b, 0xe0, 0, 0}, {0x380c, 0x07, 0, 0},
++ {0x380d, 0x68, 0, 0}, {0x380e, 0x03, 0, 0}, {0x380f, 0xd8, 0, 0},
++ {0x3813, 0x06, 0, 0}, {0x3618, 0x00, 0, 0}, {0x3612, 0x29, 0, 0},
++ {0x3709, 0x52, 0, 0}, {0x370c, 0x03, 0, 0}, {0x3a02, 0x0b, 0, 0},
++ {0x3a03, 0x88, 0, 0}, {0x3a14, 0x0b, 0, 0}, {0x3a15, 0x88, 0, 0},
++ {0x4004, 0x02, 0, 0}, {0x3002, 0x1c, 0, 0}, {0x3006, 0xc3, 0, 0},
++ {0x4713, 0x03, 0, 0}, {0x4407, 0x04, 0, 0}, {0x460b, 0x35, 0, 0},
++ {0x460c, 0x22, 0, 0}, {0x4837, 0x22, 0, 0}, {0x3824, 0x02, 0, 0},
++ {0x5001, 0xa3, 0, 0}, {0x3034, 0x1a, 0, 0}, {0x3035, 0x21, 0, 0},
++ {0x3036, 0x46, 0, 0}, {0x3037, 0x13, 0, 0},
++};
++
++static struct reg_value ov5640_setting_30fps_PAL_720_576[] = {
++ {0x3c07, 0x08, 0, 0}, {0x3820, 0x41, 0, 0}, {0x3821, 0x07, 0, 0},
++ {0x3814, 0x31, 0, 0}, {0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0},
++ {0x3801, 0x60, 0, 0}, {0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0},
++ {0x3804, 0x09, 0, 0}, {0x3805, 0x7e, 0, 0}, {0x3806, 0x07, 0, 0},
++ {0x3807, 0x9b, 0, 0}, {0x3808, 0x02, 0, 0}, {0x3809, 0xd0, 0, 0},
++ {0x380a, 0x02, 0, 0}, {0x380b, 0x40, 0, 0}, {0x380c, 0x07, 0, 0},
++ {0x380d, 0x68, 0, 0}, {0x380e, 0x03, 0, 0}, {0x380f, 0xd8, 0, 0},
++ {0x3813, 0x06, 0, 0}, {0x3618, 0x00, 0, 0}, {0x3612, 0x29, 0, 0},
++ {0x3709, 0x52, 0, 0}, {0x370c, 0x03, 0, 0}, {0x3a02, 0x0b, 0, 0},
++ {0x3a03, 0x88, 0, 0}, {0x3a14, 0x0b, 0, 0}, {0x3a15, 0x88, 0, 0},
++ {0x4004, 0x02, 0, 0}, {0x3002, 0x1c, 0, 0}, {0x3006, 0xc3, 0, 0},
++ {0x4713, 0x03, 0, 0}, {0x4407, 0x04, 0, 0}, {0x460b, 0x35, 0, 0},
++ {0x460c, 0x22, 0, 0}, {0x4837, 0x22, 0, 0}, {0x3824, 0x02, 0, 0},
++ {0x5001, 0xa3, 0, 0}, {0x3034, 0x1a, 0, 0}, {0x3035, 0x11, 0, 0},
++ {0x3036, 0x46, 0, 0}, {0x3037, 0x13, 0, 0},
++};
++
++static struct reg_value ov5640_setting_15fps_PAL_720_576[] = {
++ {0x3c07, 0x08, 0, 0}, {0x3820, 0x41, 0, 0}, {0x3821, 0x07, 0, 0},
++ {0x3814, 0x31, 0, 0}, {0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0},
++ {0x3801, 0x60, 0, 0}, {0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0},
++ {0x3804, 0x09, 0, 0}, {0x3805, 0x7e, 0, 0}, {0x3806, 0x07, 0, 0},
++ {0x3807, 0x9b, 0, 0}, {0x3808, 0x02, 0, 0}, {0x3809, 0xd0, 0, 0},
++ {0x380a, 0x02, 0, 0}, {0x380b, 0x40, 0, 0}, {0x380c, 0x07, 0, 0},
++ {0x380d, 0x68, 0, 0}, {0x380e, 0x03, 0, 0}, {0x380f, 0xd8, 0, 0},
++ {0x3813, 0x06, 0, 0}, {0x3618, 0x00, 0, 0}, {0x3612, 0x29, 0, 0},
++ {0x3709, 0x52, 0, 0}, {0x370c, 0x03, 0, 0}, {0x3a02, 0x0b, 0, 0},
++ {0x3a03, 0x88, 0, 0}, {0x3a14, 0x0b, 0, 0}, {0x3a15, 0x88, 0, 0},
++ {0x4004, 0x02, 0, 0}, {0x3002, 0x1c, 0, 0}, {0x3006, 0xc3, 0, 0},
++ {0x4713, 0x03, 0, 0}, {0x4407, 0x04, 0, 0}, {0x460b, 0x35, 0, 0},
++ {0x460c, 0x22, 0, 0}, {0x4837, 0x22, 0, 0}, {0x3824, 0x02, 0, 0},
++ {0x5001, 0xa3, 0, 0}, {0x3034, 0x1a, 0, 0}, {0x3035, 0x21, 0, 0},
++ {0x3036, 0x46, 0, 0}, {0x3037, 0x13, 0, 0},
++};
++
++static struct reg_value ov5640_setting_30fps_720P_1280_720[] = {
++ {0x3035, 0x21, 0, 0}, {0x3036, 0x69, 0, 0}, {0x3c07, 0x07, 0, 0},
++ {0x3820, 0x41, 0, 0}, {0x3821, 0x07, 0, 0}, {0x3814, 0x31, 0, 0},
++ {0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
++ {0x3802, 0x00, 0, 0}, {0x3803, 0xfa, 0, 0}, {0x3804, 0x0a, 0, 0},
++ {0x3805, 0x3f, 0, 0}, {0x3806, 0x06, 0, 0}, {0x3807, 0xa9, 0, 0},
++ {0x3808, 0x05, 0, 0}, {0x3809, 0x00, 0, 0}, {0x380a, 0x02, 0, 0},
++ {0x380b, 0xd0, 0, 0}, {0x380c, 0x07, 0, 0}, {0x380d, 0x64, 0, 0},
++ {0x380e, 0x02, 0, 0}, {0x380f, 0xe4, 0, 0}, {0x3813, 0x04, 0, 0},
++ {0x3618, 0x00, 0, 0}, {0x3612, 0x29, 0, 0}, {0x3709, 0x52, 0, 0},
++ {0x370c, 0x03, 0, 0}, {0x3a02, 0x02, 0, 0}, {0x3a03, 0xe0, 0, 0},
++ {0x3a14, 0x02, 0, 0}, {0x3a15, 0xe0, 0, 0}, {0x4004, 0x02, 0, 0},
++ {0x3002, 0x1c, 0, 0}, {0x3006, 0xc3, 0, 0}, {0x4713, 0x03, 0, 0},
++ {0x4407, 0x04, 0, 0}, {0x460b, 0x37, 0, 0}, {0x460c, 0x20, 0, 0},
++ {0x4837, 0x16, 0, 0}, {0x3824, 0x04, 0, 0}, {0x5001, 0x83, 0, 0},
++ {0x3503, 0x00, 0, 0},
++};
++
++static struct reg_value ov5640_setting_15fps_720P_1280_720[] = {
++ {0x3035, 0x41, 0, 0}, {0x3036, 0x69, 0, 0}, {0x3c07, 0x07, 0, 0},
++ {0x3820, 0x41, 0, 0}, {0x3821, 0x07, 0, 0}, {0x3814, 0x31, 0, 0},
++ {0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
++ {0x3802, 0x00, 0, 0}, {0x3803, 0xfa, 0, 0}, {0x3804, 0x0a, 0, 0},
++ {0x3805, 0x3f, 0, 0}, {0x3806, 0x06, 0, 0}, {0x3807, 0xa9, 0, 0},
++ {0x3808, 0x05, 0, 0}, {0x3809, 0x00, 0, 0}, {0x380a, 0x02, 0, 0},
++ {0x380b, 0xd0, 0, 0}, {0x380c, 0x07, 0, 0}, {0x380d, 0x64, 0, 0},
++ {0x380e, 0x02, 0, 0}, {0x380f, 0xe4, 0, 0}, {0x3813, 0x04, 0, 0},
++ {0x3618, 0x00, 0, 0}, {0x3612, 0x29, 0, 0}, {0x3709, 0x52, 0, 0},
++ {0x370c, 0x03, 0, 0}, {0x3a02, 0x02, 0, 0}, {0x3a03, 0xe0, 0, 0},
++ {0x3a14, 0x02, 0, 0}, {0x3a15, 0xe0, 0, 0}, {0x4004, 0x02, 0, 0},
++ {0x3002, 0x1c, 0, 0}, {0x3006, 0xc3, 0, 0}, {0x4713, 0x03, 0, 0},
++ {0x4407, 0x04, 0, 0}, {0x460b, 0x37, 0, 0}, {0x460c, 0x20, 0, 0},
++ {0x4837, 0x16, 0, 0}, {0x3824, 0x04, 0, 0}, {0x5001, 0x83, 0, 0},
++ {0x3503, 0x00, 0, 0},
++};
++
++static struct reg_value ov5640_setting_30fps_QCIF_176_144[] = {
++ {0x3c07, 0x08, 0, 0}, {0x3820, 0x41, 0, 0}, {0x3821, 0x07, 0, 0},
++ {0x3814, 0x31, 0, 0}, {0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0},
++ {0x3801, 0x00, 0, 0}, {0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0},
++ {0x3804, 0x0a, 0, 0}, {0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0},
++ {0x3807, 0x9b, 0, 0}, {0x3808, 0x00, 0, 0}, {0x3809, 0xb0, 0, 0},
++ {0x380a, 0x00, 0, 0}, {0x380b, 0x90, 0, 0}, {0x380c, 0x07, 0, 0},
++ {0x380d, 0x68, 0, 0}, {0x380e, 0x03, 0, 0}, {0x380f, 0xd8, 0, 0},
++ {0x3813, 0x06, 0, 0}, {0x3618, 0x00, 0, 0}, {0x3612, 0x29, 0, 0},
++ {0x3709, 0x52, 0, 0}, {0x370c, 0x03, 0, 0}, {0x3a02, 0x0b, 0, 0},
++ {0x3a03, 0x88, 0, 0}, {0x3a14, 0x0b, 0, 0}, {0x3a15, 0x88, 0, 0},
++ {0x4004, 0x02, 0, 0}, {0x3002, 0x1c, 0, 0}, {0x3006, 0xc3, 0, 0},
++ {0x4713, 0x03, 0, 0}, {0x4407, 0x04, 0, 0}, {0x460b, 0x35, 0, 0},
++ {0x460c, 0x22, 0, 0}, {0x4837, 0x22, 0, 0}, {0x3824, 0x02, 0, 0},
++ {0x5001, 0xa3, 0, 0}, {0x3034, 0x1a, 0, 0}, {0x3035, 0x11, 0, 0},
++ {0x3036, 0x46, 0, 0}, {0x3037, 0x13, 0, 0},
++};
++
++static struct reg_value ov5640_setting_15fps_QCIF_176_144[] = {
++ {0x3c07, 0x08, 0, 0}, {0x3820, 0x41, 0, 0}, {0x3821, 0x07, 0, 0},
++ {0x3814, 0x31, 0, 0}, {0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0},
++ {0x3801, 0x00, 0, 0}, {0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0},
++ {0x3804, 0x0a, 0, 0}, {0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0},
++ {0x3807, 0x9b, 0, 0}, {0x3808, 0x00, 0, 0}, {0x3809, 0xb0, 0, 0},
++ {0x380a, 0x00, 0, 0}, {0x380b, 0x90, 0, 0}, {0x380c, 0x07, 0, 0},
++ {0x380d, 0x68, 0, 0}, {0x380e, 0x03, 0, 0}, {0x380f, 0xd8, 0, 0},
++ {0x3813, 0x06, 0, 0}, {0x3618, 0x00, 0, 0}, {0x3612, 0x29, 0, 0},
++ {0x3709, 0x52, 0, 0}, {0x370c, 0x03, 0, 0}, {0x3a02, 0x0b, 0, 0},
++ {0x3a03, 0x88, 0, 0}, {0x3a14, 0x0b, 0, 0}, {0x3a15, 0x88, 0, 0},
++ {0x4004, 0x02, 0, 0}, {0x3002, 0x1c, 0, 0}, {0x3006, 0xc3, 0, 0},
++ {0x4713, 0x03, 0, 0}, {0x4407, 0x04, 0, 0}, {0x460b, 0x35, 0, 0},
++ {0x460c, 0x22, 0, 0}, {0x4837, 0x22, 0, 0}, {0x3824, 0x02, 0, 0},
++ {0x5001, 0xa3, 0, 0}, {0x3034, 0x1a, 0, 0}, {0x3035, 0x21, 0, 0},
++ {0x3036, 0x46, 0, 0}, {0x3037, 0x13, 0, 0},
++};
++
++static struct reg_value ov5640_setting_30fps_XGA_1024_768[] = {
++ {0x3c07, 0x08, 0, 0}, {0x3820, 0x41, 0, 0}, {0x3821, 0x07, 0, 0},
++ {0x3814, 0x31, 0, 0}, {0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0},
++ {0x3801, 0x00, 0, 0}, {0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0},
++ {0x3804, 0x0a, 0, 0}, {0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0},
++ {0x3807, 0x9b, 0, 0}, {0x3808, 0x04, 0, 0}, {0x3809, 0x00, 0, 0},
++ {0x380a, 0x03, 0, 0}, {0x380b, 0x00, 0, 0}, {0x380c, 0x07, 0, 0},
++ {0x380d, 0x68, 0, 0}, {0x380e, 0x03, 0, 0}, {0x380f, 0xd8, 0, 0},
++ {0x3813, 0x06, 0, 0}, {0x3618, 0x00, 0, 0}, {0x3612, 0x29, 0, 0},
++ {0x3709, 0x52, 0, 0}, {0x370c, 0x03, 0, 0}, {0x3a02, 0x0b, 0, 0},
++ {0x3a03, 0x88, 0, 0}, {0x3a14, 0x0b, 0, 0}, {0x3a15, 0x88, 0, 0},
++ {0x4004, 0x02, 0, 0}, {0x3002, 0x1c, 0, 0}, {0x3006, 0xc3, 0, 0},
++ {0x4713, 0x03, 0, 0}, {0x4407, 0x04, 0, 0}, {0x460b, 0x35, 0, 0},
++ {0x460c, 0x20, 0, 0}, {0x4837, 0x22, 0, 0}, {0x3824, 0x01, 0, 0},
++ {0x5001, 0xa3, 0, 0}, {0x3034, 0x1a, 0, 0}, {0x3035, 0x21, 0, 0},
++ {0x3036, 0x69, 0, 0}, {0x3037, 0x13, 0, 0},
++};
++
++static struct reg_value ov5640_setting_15fps_XGA_1024_768[] = {
++ {0x3c07, 0x08, 0, 0}, {0x3820, 0x41, 0, 0}, {0x3821, 0x07, 0, 0},
++ {0x3814, 0x31, 0, 0}, {0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0},
++ {0x3801, 0x00, 0, 0}, {0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0},
++ {0x3804, 0x0a, 0, 0}, {0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0},
++ {0x3807, 0x9b, 0, 0}, {0x3808, 0x04, 0, 0}, {0x3809, 0x00, 0, 0},
++ {0x380a, 0x03, 0, 0}, {0x380b, 0x00, 0, 0}, {0x380c, 0x07, 0, 0},
++ {0x380d, 0x68, 0, 0}, {0x380e, 0x03, 0, 0}, {0x380f, 0xd8, 0, 0},
++ {0x3813, 0x06, 0, 0}, {0x3618, 0x00, 0, 0}, {0x3612, 0x29, 0, 0},
++ {0x3709, 0x52, 0, 0}, {0x370c, 0x03, 0, 0}, {0x3a02, 0x0b, 0, 0},
++ {0x3a03, 0x88, 0, 0}, {0x3a14, 0x0b, 0, 0}, {0x3a15, 0x88, 0, 0},
++ {0x4004, 0x02, 0, 0}, {0x3002, 0x1c, 0, 0}, {0x3006, 0xc3, 0, 0},
++ {0x4713, 0x03, 0, 0}, {0x4407, 0x04, 0, 0}, {0x460b, 0x35, 0, 0},
++ {0x460c, 0x20, 0, 0}, {0x4837, 0x22, 0, 0}, {0x3824, 0x01, 0, 0},
++ {0x5001, 0xa3, 0, 0}, {0x3034, 0x1a, 0, 0}, {0x3035, 0x21, 0, 0},
++ {0x3036, 0x46, 0, 0}, {0x3037, 0x13, 0, 0},
++};
++
++
++static struct reg_value ov5640_setting_15fps_1080P_1920_1080[] = {
++ {0x3c07, 0x07, 0, 0}, {0x3820, 0x40, 0, 0}, {0x3821, 0x06, 0, 0},
++ {0x3814, 0x11, 0, 0}, {0x3815, 0x11, 0, 0}, {0x3800, 0x00, 0, 0},
++ {0x3801, 0x00, 0, 0}, {0x3802, 0x00, 0, 0}, {0x3803, 0xee, 0, 0},
++ {0x3804, 0x0a, 0, 0}, {0x3805, 0x3f, 0, 0}, {0x3806, 0x05, 0, 0},
++ {0x3807, 0xc3, 0, 0}, {0x3808, 0x07, 0, 0}, {0x3809, 0x80, 0, 0},
++ {0x380a, 0x04, 0, 0}, {0x380b, 0x38, 0, 0}, {0x380c, 0x0b, 0, 0},
++ {0x380d, 0x1c, 0, 0}, {0x380e, 0x07, 0, 0}, {0x380f, 0xb0, 0, 0},
++ {0x3813, 0x04, 0, 0}, {0x3618, 0x04, 0, 0}, {0x3612, 0x2b, 0, 0},
++ {0x3709, 0x12, 0, 0}, {0x370c, 0x00, 0, 0}, {0x3a02, 0x07, 0, 0},
++ {0x3a03, 0xae, 0, 0}, {0x3a14, 0x07, 0, 0}, {0x3a15, 0xae, 0, 0},
++ {0x4004, 0x06, 0, 0}, {0x3002, 0x1c, 0, 0}, {0x3006, 0xc3, 0, 0},
++ {0x4713, 0x02, 0, 0}, {0x4407, 0x0c, 0, 0}, {0x460b, 0x37, 0, 0},
++ {0x460c, 0x20, 0, 0}, {0x4837, 0x2c, 0, 0}, {0x3824, 0x01, 0, 0},
++ {0x5001, 0x83, 0, 0}, {0x3034, 0x1a, 0, 0}, {0x3035, 0x21, 0, 0},
++ {0x3036, 0x69, 0, 0}, {0x3037, 0x13, 0, 0},
++};
++
++static struct reg_value ov5640_setting_15fps_QSXGA_2592_1944[] = {
++ {0x3c07, 0x07, 0, 0}, {0x3820, 0x40, 0, 0}, {0x3821, 0x06, 0, 0},
++ {0x3814, 0x11, 0, 0}, {0x3815, 0x11, 0, 0}, {0x3800, 0x00, 0, 0},
++ {0x3801, 0x00, 0, 0}, {0x3802, 0x00, 0, 0}, {0x3803, 0x00, 0, 0},
++ {0x3804, 0x0a, 0, 0}, {0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0},
++ {0x3807, 0x9f, 0, 0}, {0x3808, 0x0a, 0, 0}, {0x3809, 0x20, 0, 0},
++ {0x380a, 0x07, 0, 0}, {0x380b, 0x98, 0, 0}, {0x380c, 0x0b, 0, 0},
++ {0x380d, 0x1c, 0, 0}, {0x380e, 0x07, 0, 0}, {0x380f, 0xb0, 0, 0},
++ {0x3813, 0x04, 0, 0}, {0x3618, 0x04, 0, 0}, {0x3612, 0x2b, 0, 0},
++ {0x3709, 0x12, 0, 0}, {0x370c, 0x00, 0, 0}, {0x3a02, 0x07, 0, 0},
++ {0x3a03, 0xae, 0, 0}, {0x3a14, 0x07, 0, 0}, {0x3a15, 0xae, 0, 0},
++ {0x4004, 0x06, 0, 0}, {0x3002, 0x1c, 0, 0}, {0x3006, 0xc3, 0, 0},
++ {0x4713, 0x02, 0, 0}, {0x4407, 0x0c, 0, 0}, {0x460b, 0x37, 0, 0},
++ {0x460c, 0x20, 0, 0}, {0x4837, 0x2c, 0, 0}, {0x3824, 0x01, 0, 0},
++ {0x5001, 0x83, 0, 0}, {0x3034, 0x1a, 0, 0}, {0x3035, 0x21, 0, 0},
++ {0x3036, 0x69, 0, 0}, {0x3037, 0x13, 0, 0},
++};
++
++static struct ov5640_mode_info ov5640_mode_info_data[2][ov5640_mode_MAX + 1] = {
++ {
++ {ov5640_mode_VGA_640_480, 640, 480,
++ ov5640_setting_15fps_VGA_640_480,
++ ARRAY_SIZE(ov5640_setting_15fps_VGA_640_480)},
++ {ov5640_mode_QVGA_320_240, 320, 240,
++ ov5640_setting_15fps_QVGA_320_240,
++ ARRAY_SIZE(ov5640_setting_15fps_QVGA_320_240)},
++ {ov5640_mode_NTSC_720_480, 720, 480,
++ ov5640_setting_15fps_NTSC_720_480,
++ ARRAY_SIZE(ov5640_setting_15fps_NTSC_720_480)},
++ {ov5640_mode_PAL_720_576, 720, 576,
++ ov5640_setting_15fps_PAL_720_576,
++ ARRAY_SIZE(ov5640_setting_15fps_PAL_720_576)},
++ {ov5640_mode_720P_1280_720, 1280, 720,
++ ov5640_setting_15fps_720P_1280_720,
++ ARRAY_SIZE(ov5640_setting_15fps_720P_1280_720)},
++ {ov5640_mode_1080P_1920_1080, 1920, 1080,
++ ov5640_setting_15fps_1080P_1920_1080,
++ ARRAY_SIZE(ov5640_setting_15fps_1080P_1920_1080)},
++ {ov5640_mode_QSXGA_2592_1944, 2592, 1944,
++ ov5640_setting_15fps_QSXGA_2592_1944,
++ ARRAY_SIZE(ov5640_setting_15fps_QSXGA_2592_1944)},
++ {ov5640_mode_QCIF_176_144, 176, 144,
++ ov5640_setting_15fps_QCIF_176_144,
++ ARRAY_SIZE(ov5640_setting_15fps_QCIF_176_144)},
++ {ov5640_mode_XGA_1024_768, 1024, 768,
++ ov5640_setting_15fps_XGA_1024_768,
++ ARRAY_SIZE(ov5640_setting_15fps_XGA_1024_768)},
++ },
++ {
++ {ov5640_mode_VGA_640_480, 640, 480,
++ ov5640_setting_30fps_VGA_640_480,
++ ARRAY_SIZE(ov5640_setting_30fps_VGA_640_480)},
++ {ov5640_mode_QVGA_320_240, 320, 240,
++ ov5640_setting_30fps_QVGA_320_240,
++ ARRAY_SIZE(ov5640_setting_30fps_QVGA_320_240)},
++ {ov5640_mode_NTSC_720_480, 720, 480,
++ ov5640_setting_30fps_NTSC_720_480,
++ ARRAY_SIZE(ov5640_setting_30fps_NTSC_720_480)},
++ {ov5640_mode_PAL_720_576, 720, 576,
++ ov5640_setting_30fps_PAL_720_576,
++ ARRAY_SIZE(ov5640_setting_30fps_PAL_720_576)},
++ {ov5640_mode_720P_1280_720, 1280, 720,
++ ov5640_setting_30fps_720P_1280_720,
++ ARRAY_SIZE(ov5640_setting_30fps_720P_1280_720)},
++ {ov5640_mode_1080P_1920_1080, 0, 0, NULL, 0},
++ {ov5640_mode_QSXGA_2592_1944, 0, 0, NULL, 0},
++ {ov5640_mode_QCIF_176_144, 176, 144,
++ ov5640_setting_30fps_QCIF_176_144,
++ ARRAY_SIZE(ov5640_setting_30fps_QCIF_176_144)},
++ {ov5640_mode_XGA_1024_768, 1024, 768,
++ ov5640_setting_30fps_XGA_1024_768,
++ ARRAY_SIZE(ov5640_setting_30fps_XGA_1024_768)},
++ },
++};
++
++static struct regulator *io_regulator;
++static struct regulator *core_regulator;
++static struct regulator *analog_regulator;
++
++static int ov5640_probe(struct i2c_client *adapter,
++ const struct i2c_device_id *device_id);
++static int ov5640_remove(struct i2c_client *client);
++
++static s32 ov5640_read_reg(u16 reg, u8 *val);
++static s32 ov5640_write_reg(u16 reg, u8 val);
++
++static const struct i2c_device_id ov5640_id[] = {
++ {"ov5640", 0},
++ {"ov564x", 0},
++ {},
++};
++
++MODULE_DEVICE_TABLE(i2c, ov5640_id);
++
++static struct i2c_driver ov5640_i2c_driver = {
++ .driver = {
++ .owner = THIS_MODULE,
++ .name = "ov5640",
++ },
++ .probe = ov5640_probe,
++ .remove = ov5640_remove,
++ .id_table = ov5640_id,
++};
++
++static inline void ov5640_power_down(int enable)
++{
++ gpio_set_value(pwn_gpio, enable);
++
++ msleep(2);
++}
++
++static inline void ov5640_reset(void)
++{
++ /* camera reset */
++ gpio_set_value(rst_gpio, 1);
++
++ /* camera power down */
++ gpio_set_value(pwn_gpio, 1);
++ msleep(5);
++ gpio_set_value(pwn_gpio, 0);
++ msleep(5);
++ gpio_set_value(rst_gpio, 0);
++ msleep(1);
++ gpio_set_value(rst_gpio, 1);
++ msleep(5);
++ gpio_set_value(pwn_gpio, 1);
++}
++
++static int ov5640_regulator_enable(struct device *dev)
++{
++ int ret = 0;
++
++ io_regulator = devm_regulator_get(dev, "DOVDD");
++ if (!IS_ERR(io_regulator)) {
++ regulator_set_voltage(io_regulator,
++ OV5640_VOLTAGE_DIGITAL_IO,
++ OV5640_VOLTAGE_DIGITAL_IO);
++ ret = regulator_enable(io_regulator);
++ if (ret) {
++ dev_err(dev, "set io voltage failed\n");
++ return ret;
++ } else {
++ dev_dbg(dev, "set io voltage ok\n");
++ }
++ } else {
++ io_regulator = NULL;
++ dev_warn(dev, "cannot get io voltage\n");
++ }
++
++ core_regulator = devm_regulator_get(dev, "DVDD");
++ if (!IS_ERR(core_regulator)) {
++ regulator_set_voltage(core_regulator,
++ OV5640_VOLTAGE_DIGITAL_CORE,
++ OV5640_VOLTAGE_DIGITAL_CORE);
++ ret = regulator_enable(core_regulator);
++ if (ret) {
++ dev_err(dev, "set core voltage failed\n");
++ return ret;
++ } else {
++ dev_dbg(dev, "set core voltage ok\n");
++ }
++ } else {
++ core_regulator = NULL;
++ dev_warn(dev, "cannot get core voltage\n");
++ }
++
++ analog_regulator = devm_regulator_get(dev, "AVDD");
++ if (!IS_ERR(analog_regulator)) {
++ regulator_set_voltage(analog_regulator,
++ OV5640_VOLTAGE_ANALOG,
++ OV5640_VOLTAGE_ANALOG);
++ ret = regulator_enable(analog_regulator);
++ if (ret) {
++ dev_err(dev, "set analog voltage failed\n");
++ return ret;
++ } else {
++ dev_dbg(dev, "set analog voltage ok\n");
++ }
++ } else {
++ analog_regulator = NULL;
++ dev_warn(dev, "cannot get analog voltage\n");
++ }
++
++ return ret;
++}
++
++static s32 ov5640_write_reg(u16 reg, u8 val)
++{
++ u8 au8Buf[3] = {0};
++
++ au8Buf[0] = reg >> 8;
++ au8Buf[1] = reg & 0xff;
++ au8Buf[2] = val;
++
++ if (i2c_master_send(ov5640_data.i2c_client, au8Buf, 3) < 0) {
++ pr_err("%s:write reg error:reg=%x,val=%x\n",
++ __func__, reg, val);
++ return -1;
++ }
++
++ return 0;
++}
++
++static s32 ov5640_read_reg(u16 reg, u8 *val)
++{
++ u8 au8RegBuf[2] = {0};
++ u8 u8RdVal = 0;
++
++ au8RegBuf[0] = reg >> 8;
++ au8RegBuf[1] = reg & 0xff;
++
++ if (2 != i2c_master_send(ov5640_data.i2c_client, au8RegBuf, 2)) {
++ pr_err("%s:write reg error:reg=%x\n",
++ __func__, reg);
++ return -1;
++ }
++
++ if (1 != i2c_master_recv(ov5640_data.i2c_client, &u8RdVal, 1)) {
++ pr_err("%s:read reg error:reg=%x,val=%x\n",
++ __func__, reg, u8RdVal);
++ return -1;
++ }
++
++ *val = u8RdVal;
++
++ return u8RdVal;
++}
++
++static void ov5640_soft_reset(void)
++{
++ /* sysclk from pad */
++ ov5640_write_reg(0x3103, 0x11);
++
++ /* software reset */
++ ov5640_write_reg(0x3008, 0x82);
++
++ /* delay at least 5ms */
++ msleep(10);
++}
++
++/* set sensor driver capability
++ * 0x302c[7:6] - strength
++ 00 - 1x
++ 01 - 2x
++ 10 - 3x
++ 11 - 4x
++ */
++static int ov5640_driver_capability(int strength)
++{
++ u8 temp = 0;
++
++ if (strength > 4 || strength < 1) {
++ pr_err("The valid driver capability of ov5640 is 1x~4x\n");
++ return -EINVAL;
++ }
++
++ ov5640_read_reg(0x302c, &temp);
++
++ temp &= ~0xc0; /* clear [7:6] */
++ temp |= ((strength - 1) << 6); /* set [7:6] */
++
++ ov5640_write_reg(0x302c, temp);
++
++ return 0;
++}
++
++/* calculate sysclk */
++static int ov5640_get_sysclk(void)
++{
++ int xvclk = ov5640_data.mclk / 10000;
++ int sysclk;
++ int temp1, temp2;
++ int Multiplier, PreDiv, VCO, SysDiv, Pll_rdiv, Bit_div2x, sclk_rdiv;
++ int sclk_rdiv_map[] = {1, 2, 4, 8};
++ u8 regval = 0;
++
++ temp1 = ov5640_read_reg(0x3034, &regval);
++ temp2 = temp1 & 0x0f;
++ if (temp2 == 8 || temp2 == 10) {
++ Bit_div2x = temp2 / 2;
++ } else {
++ pr_err("ov5640: unsupported bit mode %d\n", temp2);
++ return -1;
++ }
++
++ temp1 = ov5640_read_reg(0x3035, &regval);
++ SysDiv = temp1 >> 4;
++ if (SysDiv == 0)
++ SysDiv = 16;
++
++ temp1 = ov5640_read_reg(0x3036, &regval);
++ Multiplier = temp1;
++ temp1 = ov5640_read_reg(0x3037, &regval);
++ PreDiv = temp1 & 0x0f;
++ Pll_rdiv = ((temp1 >> 4) & 0x01) + 1;
++
++ temp1 = ov5640_read_reg(0x3108, &regval);
++ temp2 = temp1 & 0x03;
++
++ sclk_rdiv = sclk_rdiv_map[temp2];
++ VCO = xvclk * Multiplier / PreDiv;
++ sysclk = VCO / SysDiv / Pll_rdiv * 2 / Bit_div2x / sclk_rdiv;
++
++ return sysclk;
++}
++
++/* read HTS from register settings */
++static int ov5640_get_HTS(void)
++{
++ int HTS;
++ u8 temp = 0;
++
++ HTS = ov5640_read_reg(0x380c, &temp);
++ HTS = (HTS<<8) + ov5640_read_reg(0x380d, &temp);
++ return HTS;
++}
++
++/* read VTS from register settings */
++static int ov5640_get_VTS(void)
++{
++ int VTS;
++ u8 temp = 0;
++
++ VTS = ov5640_read_reg(0x380e, &temp);
++ VTS = (VTS<<8) + ov5640_read_reg(0x380f, &temp);
++
++ return VTS;
++}
++
++/* write VTS to registers */
++static int ov5640_set_VTS(int VTS)
++{
++ int temp;
++
++ temp = VTS & 0xff;
++ ov5640_write_reg(0x380f, temp);
++
++ temp = VTS>>8;
++ ov5640_write_reg(0x380e, temp);
++ return 0;
++}
++
++/* read shutter, in number of line period */
++static int ov5640_get_shutter(void)
++{
++ int shutter;
++ u8 regval;
++
++ shutter = (ov5640_read_reg(0x03500, &regval) & 0x0f);
++
++ shutter = (shutter<<8) + ov5640_read_reg(0x3501, &regval);
++ shutter = (shutter<<4) + (ov5640_read_reg(0x3502, &regval)>>4);
++
++ return shutter;
++}
++
++/* write shutter, in number of line period */
++static int ov5640_set_shutter(int shutter)
++{
++ int temp;
++
++ shutter = shutter & 0xffff;
++ temp = shutter & 0x0f;
++ temp = temp<<4;
++ ov5640_write_reg(0x3502, temp);
++
++ temp = shutter & 0xfff;
++ temp = temp>>4;
++ ov5640_write_reg(0x3501, temp);
++
++ temp = shutter>>12;
++ ov5640_write_reg(0x3500, temp);
++
++ return 0;
++}
++
++/* read gain, 16 = 1x */
++static int ov5640_get_gain16(void)
++{
++ int gain16;
++ u8 regval;
++
++ gain16 = ov5640_read_reg(0x350a, &regval) & 0x03;
++ gain16 = (gain16<<8) + ov5640_read_reg(0x350b, &regval);
++
++ return gain16;
++}
++
++/* write gain, 16 = 1x */
++static int ov5640_set_gain16(int gain16)
++{
++ int temp;
++
++ gain16 = gain16 & 0x3ff;
++ temp = gain16 & 0xff;
++
++ ov5640_write_reg(0x350b, temp);
++ temp = gain16>>8;
++
++ ov5640_write_reg(0x350a, temp);
++ return 0;
++}
++
++/* get banding filter value */
++static int ov5640_get_light_freq(void)
++{
++ int temp, temp1, light_frequency;
++ u8 regval;
++
++ temp = ov5640_read_reg(0x3c01, &regval);
++ if (temp & 0x80) {
++ /* manual */
++ temp1 = ov5640_read_reg(0x3c00, &regval);
++ if (temp1 & 0x04) {
++ /* 50Hz */
++ light_frequency = 50;
++ } else {
++ /* 60Hz */
++ light_frequency = 60;
++ }
++ } else {
++ /* auto */
++ temp1 = ov5640_read_reg(0x3c0c, &regval);
++ if (temp1 & 0x01) {
++ /* 50Hz */
++ light_frequency = 50;
++ } else {
++ /* 60Hz */
++ light_frequency = 60;
++ }
++ }
++
++ return light_frequency;
++}
++
++static void ov5640_set_bandingfilter(void)
++{
++ int prev_VTS;
++ int band_step60, max_band60, band_step50, max_band50;
++
++ /* read preview PCLK */
++ prev_sysclk = ov5640_get_sysclk();
++
++ /* read preview HTS */
++ prev_HTS = ov5640_get_HTS();
++
++ /* read preview VTS */
++ prev_VTS = ov5640_get_VTS();
++
++ /* calculate banding filter */
++ /* 60Hz */
++ band_step60 = prev_sysclk * 100/prev_HTS * 100/120;
++ ov5640_write_reg(0x3a0a, (band_step60 >> 8));
++ ov5640_write_reg(0x3a0b, (band_step60 & 0xff));
++
++ max_band60 = (int)((prev_VTS-4)/band_step60);
++ ov5640_write_reg(0x3a0d, max_band60);
++
++ /* 50Hz */
++ band_step50 = prev_sysclk * 100/prev_HTS;
++ ov5640_write_reg(0x3a08, (band_step50 >> 8));
++ ov5640_write_reg(0x3a09, (band_step50 & 0xff));
++
++ max_band50 = (int)((prev_VTS-4)/band_step50);
++ ov5640_write_reg(0x3a0e, max_band50);
++}
++
++/* stable in high */
++static int ov5640_set_AE_target(int target)
++{
++ int fast_high, fast_low;
++
++ AE_low = target * 23 / 25; /* 0.92 */
++ AE_high = target * 27 / 25; /* 1.08 */
++ fast_high = AE_high << 1;
++
++ if (fast_high > 255)
++ fast_high = 255;
++ fast_low = AE_low >> 1;
++
++ ov5640_write_reg(0x3a0f, AE_high);
++ ov5640_write_reg(0x3a10, AE_low);
++ ov5640_write_reg(0x3a1b, AE_high);
++ ov5640_write_reg(0x3a1e, AE_low);
++ ov5640_write_reg(0x3a11, fast_high);
++ ov5640_write_reg(0x3a1f, fast_low);
++
++ return 0;
++}
++
++/* enable = 0 to turn off night mode
++ enable = 1 to turn on night mode */
++static int ov5640_set_night_mode(int enable)
++{
++ u8 mode;
++
++ ov5640_read_reg(0x3a00, &mode);
++
++ if (enable) {
++ /* night mode on */
++ mode |= 0x04;
++ ov5640_write_reg(0x3a00, mode);
++ } else {
++ /* night mode off */
++ mode &= 0xfb;
++ ov5640_write_reg(0x3a00, mode);
++ }
++
++ return 0;
++}
++
++/* enable = 0 to turn off AEC/AGC
++ enable = 1 to turn on AEC/AGC */
++void ov5640_turn_on_AE_AG(int enable)
++{
++ u8 ae_ag_ctrl;
++
++ ov5640_read_reg(0x3503, &ae_ag_ctrl);
++ if (enable) {
++ /* turn on auto AE/AG */
++ ae_ag_ctrl = ae_ag_ctrl & ~(0x03);
++ } else {
++ /* turn off AE/AG */
++ ae_ag_ctrl = ae_ag_ctrl | 0x03;
++ }
++ ov5640_write_reg(0x3503, ae_ag_ctrl);
++}
++
++/* download ov5640 settings to sensor through i2c */
++static int ov5640_download_firmware(struct reg_value *pModeSetting, s32 ArySize)
++{
++ register u32 Delay_ms = 0;
++ register u16 RegAddr = 0;
++ register u8 Mask = 0;
++ register u8 Val = 0;
++ u8 RegVal = 0;
++ int i, retval = 0;
++
++ for (i = 0; i < ArySize; ++i, ++pModeSetting) {
++ Delay_ms = pModeSetting->u32Delay_ms;
++ RegAddr = pModeSetting->u16RegAddr;
++ Val = pModeSetting->u8Val;
++ Mask = pModeSetting->u8Mask;
++
++ if (Mask) {
++ retval = ov5640_read_reg(RegAddr, &RegVal);
++ if (retval < 0)
++ goto err;
++
++ RegVal &= ~(u8)Mask;
++ Val &= Mask;
++ Val |= RegVal;
++ }
++
++ retval = ov5640_write_reg(RegAddr, Val);
++ if (retval < 0)
++ goto err;
++
++ if (Delay_ms)
++ msleep(Delay_ms);
++ }
++err:
++ return retval;
++}
++
++static int ov5640_init_mode(void)
++{
++ struct reg_value *pModeSetting = NULL;
++ int ArySize = 0, retval = 0;
++
++ ov5640_soft_reset();
++
++ pModeSetting = ov5640_global_init_setting;
++ ArySize = ARRAY_SIZE(ov5640_global_init_setting);
++ retval = ov5640_download_firmware(pModeSetting, ArySize);
++ if (retval < 0)
++ goto err;
++
++ pModeSetting = ov5640_init_setting_30fps_VGA;
++ ArySize = ARRAY_SIZE(ov5640_init_setting_30fps_VGA);
++ retval = ov5640_download_firmware(pModeSetting, ArySize);
++ if (retval < 0)
++ goto err;
++
++ /* change driver capability to 2x according to validation board.
++ * if the image is not stable, please increase the driver strength.
++ */
++ ov5640_driver_capability(2);
++ ov5640_set_bandingfilter();
++ ov5640_set_AE_target(AE_Target);
++ ov5640_set_night_mode(night_mode);
++
++ /* skip 9 vysnc: start capture at 10th vsync */
++ msleep(300);
++
++ /* turn off night mode */
++ night_mode = 0;
++ ov5640_data.pix.width = 640;
++ ov5640_data.pix.height = 480;
++err:
++ return retval;
++}
++
++/* change to or back to subsampling mode set the mode directly
++ * image size below 1280 * 960 is subsampling mode */
++static int ov5640_change_mode_direct(enum ov5640_frame_rate frame_rate,
++ enum ov5640_mode mode)
++{
++ struct reg_value *pModeSetting = NULL;
++ s32 ArySize = 0;
++ int retval = 0;
++
++ if (mode > ov5640_mode_MAX || mode < ov5640_mode_MIN) {
++ pr_err("Wrong ov5640 mode detected!\n");
++ return -1;
++ }
++
++ pModeSetting = ov5640_mode_info_data[frame_rate][mode].init_data_ptr;
++ ArySize =
++ ov5640_mode_info_data[frame_rate][mode].init_data_size;
++
++ ov5640_data.pix.width = ov5640_mode_info_data[frame_rate][mode].width;
++ ov5640_data.pix.height = ov5640_mode_info_data[frame_rate][mode].height;
++
++ if (ov5640_data.pix.width == 0 || ov5640_data.pix.height == 0 ||
++ pModeSetting == NULL || ArySize == 0)
++ return -EINVAL;
++
++ /* set ov5640 to subsampling mode */
++ retval = ov5640_download_firmware(pModeSetting, ArySize);
++
++ /* turn on AE AG for subsampling mode, in case the firmware didn't */
++ ov5640_turn_on_AE_AG(1);
++
++ /* calculate banding filter */
++ ov5640_set_bandingfilter();
++
++ /* set AE target */
++ ov5640_set_AE_target(AE_Target);
++
++ /* update night mode setting */
++ ov5640_set_night_mode(night_mode);
++
++ /* skip 9 vysnc: start capture at 10th vsync */
++ if (mode == ov5640_mode_XGA_1024_768 && frame_rate == ov5640_30_fps) {
++ pr_warning("ov5640: actual frame rate of XGA is 22.5fps\n");
++ /* 1/22.5 * 9*/
++ msleep(400);
++ return retval;
++ }
++
++ if (frame_rate == ov5640_15_fps) {
++ /* 1/15 * 9*/
++ msleep(600);
++ } else if (frame_rate == ov5640_30_fps) {
++ /* 1/30 * 9*/
++ msleep(300);
++ }
++
++ return retval;
++}
++
++/* change to scaling mode go through exposure calucation
++ * image size above 1280 * 960 is scaling mode */
++static int ov5640_change_mode_exposure_calc(enum ov5640_frame_rate frame_rate,
++ enum ov5640_mode mode)
++{
++ int prev_shutter, prev_gain16, average;
++ int cap_shutter, cap_gain16;
++ int cap_sysclk, cap_HTS, cap_VTS;
++ int light_freq, cap_bandfilt, cap_maxband;
++ long cap_gain16_shutter;
++ u8 temp;
++ struct reg_value *pModeSetting = NULL;
++ s32 ArySize = 0;
++ int retval = 0;
++
++ /* check if the input mode and frame rate is valid */
++ pModeSetting =
++ ov5640_mode_info_data[frame_rate][mode].init_data_ptr;
++ ArySize =
++ ov5640_mode_info_data[frame_rate][mode].init_data_size;
++
++ ov5640_data.pix.width =
++ ov5640_mode_info_data[frame_rate][mode].width;
++ ov5640_data.pix.height =
++ ov5640_mode_info_data[frame_rate][mode].height;
++
++ if (ov5640_data.pix.width == 0 || ov5640_data.pix.height == 0 ||
++ pModeSetting == NULL || ArySize == 0)
++ return -EINVAL;
++
++ /* read preview shutter */
++ prev_shutter = ov5640_get_shutter();
++
++ /* read preview gain */
++ prev_gain16 = ov5640_get_gain16();
++
++ /* get average */
++ average = ov5640_read_reg(0x56a1, &temp);
++
++ /* turn off night mode for capture */
++ ov5640_set_night_mode(0);
++
++ /* turn off overlay */
++ ov5640_write_reg(0x3022, 0x06);
++
++ /* Write capture setting */
++ retval = ov5640_download_firmware(pModeSetting, ArySize);
++ if (retval < 0)
++ goto err;
++
++ /* turn off AE AG when capture image. */
++ ov5640_turn_on_AE_AG(0);
++
++ /* read capture VTS */
++ cap_VTS = ov5640_get_VTS();
++ cap_HTS = ov5640_get_HTS();
++ cap_sysclk = ov5640_get_sysclk();
++
++ /* calculate capture banding filter */
++ light_freq = ov5640_get_light_freq();
++ if (light_freq == 60) {
++ /* 60Hz */
++ cap_bandfilt = cap_sysclk * 100 / cap_HTS * 100 / 120;
++ } else {
++ /* 50Hz */
++ cap_bandfilt = cap_sysclk * 100 / cap_HTS;
++ }
++ cap_maxband = (int)((cap_VTS - 4)/cap_bandfilt);
++ /* calculate capture shutter/gain16 */
++ if (average > AE_low && average < AE_high) {
++ /* in stable range */
++ cap_gain16_shutter =
++ prev_gain16 * prev_shutter * cap_sysclk/prev_sysclk *
++ prev_HTS/cap_HTS * AE_Target / average;
++ } else {
++ cap_gain16_shutter =
++ prev_gain16 * prev_shutter * cap_sysclk/prev_sysclk *
++ prev_HTS/cap_HTS;
++ }
++
++ /* gain to shutter */
++ if (cap_gain16_shutter < (cap_bandfilt * 16)) {
++ /* shutter < 1/100 */
++ cap_shutter = cap_gain16_shutter/16;
++ if (cap_shutter < 1)
++ cap_shutter = 1;
++ cap_gain16 = cap_gain16_shutter/cap_shutter;
++ if (cap_gain16 < 16)
++ cap_gain16 = 16;
++ } else {
++ if (cap_gain16_shutter > (cap_bandfilt*cap_maxband*16)) {
++ /* exposure reach max */
++ cap_shutter = cap_bandfilt*cap_maxband;
++ cap_gain16 = cap_gain16_shutter / cap_shutter;
++ } else {
++ /* 1/100 < cap_shutter =< max, cap_shutter = n/100 */
++ cap_shutter =
++ ((int)(cap_gain16_shutter/16/cap_bandfilt))
++ * cap_bandfilt;
++ cap_gain16 = cap_gain16_shutter / cap_shutter;
++ }
++ }
++
++ /* write capture gain */
++ ov5640_set_gain16(cap_gain16);
++
++ /* write capture shutter */
++ if (cap_shutter > (cap_VTS - 4)) {
++ cap_VTS = cap_shutter + 4;
++ ov5640_set_VTS(cap_VTS);
++ }
++
++ ov5640_set_shutter(cap_shutter);
++
++ /* skip 2 vysnc: start capture at 3rd vsync
++ * frame rate of QSXGA and 1080P is 7.5fps: 1/7.5 * 2
++ */
++ pr_warning("ov5640: the actual frame rate of %s is 7.5fps\n",
++ mode == ov5640_mode_1080P_1920_1080 ? "1080P" : "QSXGA");
++ msleep(267);
++err:
++ return retval;
++}
++
++static int ov5640_change_mode(enum ov5640_frame_rate frame_rate,
++ enum ov5640_mode mode)
++{
++ int retval = 0;
++
++ if (mode > ov5640_mode_MAX || mode < ov5640_mode_MIN) {
++ pr_err("Wrong ov5640 mode detected!\n");
++ return -1;
++ }
++
++ if (mode == ov5640_mode_1080P_1920_1080 ||
++ mode == ov5640_mode_QSXGA_2592_1944) {
++ /* change to scaling mode go through exposure calucation
++ * image size above 1280 * 960 is scaling mode */
++ retval = ov5640_change_mode_exposure_calc(frame_rate, mode);
++ } else {
++ /* change back to subsampling modem download firmware directly
++ * image size below 1280 * 960 is subsampling mode */
++ retval = ov5640_change_mode_direct(frame_rate, mode);
++ }
++
++ return retval;
++}
++
++/* --------------- IOCTL functions from v4l2_int_ioctl_desc --------------- */
++
++static int ioctl_g_ifparm(struct v4l2_int_device *s, struct v4l2_ifparm *p)
++{
++ if (s == NULL) {
++ pr_err(" ERROR!! no slave device set!\n");
++ return -1;
++ }
++
++ memset(p, 0, sizeof(*p));
++ p->u.bt656.clock_curr = ov5640_data.mclk;
++ pr_debug(" clock_curr=mclk=%d\n", ov5640_data.mclk);
++ p->if_type = V4L2_IF_TYPE_BT656;
++ p->u.bt656.mode = V4L2_IF_TYPE_BT656_MODE_NOBT_8BIT;
++ p->u.bt656.clock_min = OV5640_XCLK_MIN;
++ p->u.bt656.clock_max = OV5640_XCLK_MAX;
++ p->u.bt656.bt_sync_correct = 1; /* Indicate external vsync */
++
++ return 0;
++}
++
++/*!
++ * ioctl_s_power - V4L2 sensor interface handler for VIDIOC_S_POWER ioctl
++ * @s: pointer to standard V4L2 device structure
++ * @on: indicates power mode (on or off)
++ *
++ * Turns the power on or off, depending on the value of on and returns the
++ * appropriate error code.
++ */
++static int ioctl_s_power(struct v4l2_int_device *s, int on)
++{
++ struct sensor_data *sensor = s->priv;
++
++ if (on && !sensor->on) {
++ if (io_regulator)
++ if (regulator_enable(io_regulator) != 0)
++ return -EIO;
++ if (core_regulator)
++ if (regulator_enable(core_regulator) != 0)
++ return -EIO;
++ if (analog_regulator)
++ if (regulator_enable(analog_regulator) != 0)
++ return -EIO;
++ /* Make sure power on */
++ ov5640_power_down(0);
++ } else if (!on && sensor->on) {
++ if (analog_regulator)
++ regulator_disable(analog_regulator);
++ if (core_regulator)
++ regulator_disable(core_regulator);
++ if (io_regulator)
++ regulator_disable(io_regulator);
++
++ ov5640_power_down(1);
++}
++
++ sensor->on = on;
++
++ return 0;
++}
++
++/*!
++ * ioctl_g_parm - V4L2 sensor interface handler for VIDIOC_G_PARM ioctl
++ * @s: pointer to standard V4L2 device structure
++ * @a: pointer to standard V4L2 VIDIOC_G_PARM ioctl structure
++ *
++ * Returns the sensor's video CAPTURE parameters.
++ */
++static int ioctl_g_parm(struct v4l2_int_device *s, struct v4l2_streamparm *a)
++{
++ struct sensor_data *sensor = s->priv;
++ struct v4l2_captureparm *cparm = &a->parm.capture;
++ int ret = 0;
++
++ switch (a->type) {
++ /* This is the only case currently handled. */
++ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
++ memset(a, 0, sizeof(*a));
++ a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
++ cparm->capability = sensor->streamcap.capability;
++ cparm->timeperframe = sensor->streamcap.timeperframe;
++ cparm->capturemode = sensor->streamcap.capturemode;
++ ret = 0;
++ break;
++
++ /* These are all the possible cases. */
++ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
++ case V4L2_BUF_TYPE_VIDEO_OVERLAY:
++ case V4L2_BUF_TYPE_VBI_CAPTURE:
++ case V4L2_BUF_TYPE_VBI_OUTPUT:
++ case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
++ case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
++ ret = -EINVAL;
++ break;
++
++ default:
++ pr_debug(" type is unknown - %d\n", a->type);
++ ret = -EINVAL;
++ break;
++ }
++
++ return ret;
++}
++
++/*!
++ * ioctl_s_parm - V4L2 sensor interface handler for VIDIOC_S_PARM ioctl
++ * @s: pointer to standard V4L2 device structure
++ * @a: pointer to standard V4L2 VIDIOC_S_PARM ioctl structure
++ *
++ * Configures the sensor to use the input parameters, if possible. If
++ * not possible, reverts to the old parameters and returns the
++ * appropriate error code.
++ */
++static int ioctl_s_parm(struct v4l2_int_device *s, struct v4l2_streamparm *a)
++{
++ struct sensor_data *sensor = s->priv;
++ struct v4l2_fract *timeperframe = &a->parm.capture.timeperframe;
++ u32 tgt_fps; /* target frames per secound */
++ enum ov5640_frame_rate frame_rate;
++ int ret = 0;
++
++ /* Make sure power on */
++ ov5640_power_down(0);
++
++ switch (a->type) {
++ /* This is the only case currently handled. */
++ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
++ /* Check that the new frame rate is allowed. */
++ if ((timeperframe->numerator == 0) ||
++ (timeperframe->denominator == 0)) {
++ timeperframe->denominator = DEFAULT_FPS;
++ timeperframe->numerator = 1;
++ }
++
++ tgt_fps = timeperframe->denominator /
++ timeperframe->numerator;
++
++ if (tgt_fps > MAX_FPS) {
++ timeperframe->denominator = MAX_FPS;
++ timeperframe->numerator = 1;
++ } else if (tgt_fps < MIN_FPS) {
++ timeperframe->denominator = MIN_FPS;
++ timeperframe->numerator = 1;
++ }
++
++ /* Actual frame rate we use */
++ tgt_fps = timeperframe->denominator /
++ timeperframe->numerator;
++
++ if (tgt_fps == 15)
++ frame_rate = ov5640_15_fps;
++ else if (tgt_fps == 30)
++ frame_rate = ov5640_30_fps;
++ else {
++ pr_err(" The camera frame rate is not supported!\n");
++ return -EINVAL;
++ }
++
++ ret = ov5640_change_mode(frame_rate,
++ a->parm.capture.capturemode);
++ if (ret < 0)
++ return ret;
++
++ sensor->streamcap.timeperframe = *timeperframe;
++ sensor->streamcap.capturemode = a->parm.capture.capturemode;
++
++ break;
++
++ /* These are all the possible cases. */
++ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
++ case V4L2_BUF_TYPE_VIDEO_OVERLAY:
++ case V4L2_BUF_TYPE_VBI_CAPTURE:
++ case V4L2_BUF_TYPE_VBI_OUTPUT:
++ case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
++ case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
++ pr_debug(" type is not " \
++ "V4L2_BUF_TYPE_VIDEO_CAPTURE but %d\n",
++ a->type);
++ ret = -EINVAL;
++ break;
++
++ default:
++ pr_debug(" type is unknown - %d\n", a->type);
++ ret = -EINVAL;
++ break;
++ }
++
++ return ret;
++}
++
++/*!
++ * ioctl_g_fmt_cap - V4L2 sensor interface handler for ioctl_g_fmt_cap
++ * @s: pointer to standard V4L2 device structure
++ * @f: pointer to standard V4L2 v4l2_format structure
++ *
++ * Returns the sensor's current pixel format in the v4l2_format
++ * parameter.
++ */
++static int ioctl_g_fmt_cap(struct v4l2_int_device *s, struct v4l2_format *f)
++{
++ struct sensor_data *sensor = s->priv;
++
++ f->fmt.pix = sensor->pix;
++
++ return 0;
++}
++
++/*!
++ * ioctl_g_ctrl - V4L2 sensor interface handler for VIDIOC_G_CTRL ioctl
++ * @s: pointer to standard V4L2 device structure
++ * @vc: standard V4L2 VIDIOC_G_CTRL ioctl structure
++ *
++ * If the requested control is supported, returns the control's current
++ * value from the video_control[] array. Otherwise, returns -EINVAL
++ * if the control is not supported.
++ */
++static int ioctl_g_ctrl(struct v4l2_int_device *s, struct v4l2_control *vc)
++{
++ int ret = 0;
++
++ switch (vc->id) {
++ case V4L2_CID_BRIGHTNESS:
++ vc->value = ov5640_data.brightness;
++ break;
++ case V4L2_CID_HUE:
++ vc->value = ov5640_data.hue;
++ break;
++ case V4L2_CID_CONTRAST:
++ vc->value = ov5640_data.contrast;
++ break;
++ case V4L2_CID_SATURATION:
++ vc->value = ov5640_data.saturation;
++ break;
++ case V4L2_CID_RED_BALANCE:
++ vc->value = ov5640_data.red;
++ break;
++ case V4L2_CID_BLUE_BALANCE:
++ vc->value = ov5640_data.blue;
++ break;
++ case V4L2_CID_EXPOSURE:
++ vc->value = ov5640_data.ae_mode;
++ break;
++ default:
++ ret = -EINVAL;
++ }
++
++ return ret;
++}
++
++/*!
++ * ioctl_s_ctrl - V4L2 sensor interface handler for VIDIOC_S_CTRL ioctl
++ * @s: pointer to standard V4L2 device structure
++ * @vc: standard V4L2 VIDIOC_S_CTRL ioctl structure
++ *
++ * If the requested control is supported, sets the control's current
++ * value in HW (and updates the video_control[] array). Otherwise,
++ * returns -EINVAL if the control is not supported.
++ */
++static int ioctl_s_ctrl(struct v4l2_int_device *s, struct v4l2_control *vc)
++{
++ int retval = 0;
++
++ pr_debug("In ov5640:ioctl_s_ctrl %d\n",
++ vc->id);
++
++ switch (vc->id) {
++ case V4L2_CID_BRIGHTNESS:
++ break;
++ case V4L2_CID_CONTRAST:
++ break;
++ case V4L2_CID_SATURATION:
++ break;
++ case V4L2_CID_HUE:
++ break;
++ case V4L2_CID_AUTO_WHITE_BALANCE:
++ break;
++ case V4L2_CID_DO_WHITE_BALANCE:
++ break;
++ case V4L2_CID_RED_BALANCE:
++ break;
++ case V4L2_CID_BLUE_BALANCE:
++ break;
++ case V4L2_CID_GAMMA:
++ break;
++ case V4L2_CID_EXPOSURE:
++ break;
++ case V4L2_CID_AUTOGAIN:
++ break;
++ case V4L2_CID_GAIN:
++ break;
++ case V4L2_CID_HFLIP:
++ break;
++ case V4L2_CID_VFLIP:
++ break;
++ default:
++ retval = -EPERM;
++ break;
++ }
++
++ return retval;
++}
++
++/*!
++ * ioctl_enum_framesizes - V4L2 sensor interface handler for
++ * VIDIOC_ENUM_FRAMESIZES ioctl
++ * @s: pointer to standard V4L2 device structure
++ * @fsize: standard V4L2 VIDIOC_ENUM_FRAMESIZES ioctl structure
++ *
++ * Return 0 if successful, otherwise -EINVAL.
++ */
++static int ioctl_enum_framesizes(struct v4l2_int_device *s,
++ struct v4l2_frmsizeenum *fsize)
++{
++ if (fsize->index > ov5640_mode_MAX)
++ return -EINVAL;
++
++ fsize->pixel_format = ov5640_data.pix.pixelformat;
++ fsize->discrete.width =
++ max(ov5640_mode_info_data[0][fsize->index].width,
++ ov5640_mode_info_data[1][fsize->index].width);
++ fsize->discrete.height =
++ max(ov5640_mode_info_data[0][fsize->index].height,
++ ov5640_mode_info_data[1][fsize->index].height);
++ return 0;
++}
++
++/*!
++ * ioctl_enum_frameintervals - V4L2 sensor interface handler for
++ * VIDIOC_ENUM_FRAMEINTERVALS ioctl
++ * @s: pointer to standard V4L2 device structure
++ * @fival: standard V4L2 VIDIOC_ENUM_FRAMEINTERVALS ioctl structure
++ *
++ * Return 0 if successful, otherwise -EINVAL.
++ */
++static int ioctl_enum_frameintervals(struct v4l2_int_device *s,
++ struct v4l2_frmivalenum *fival)
++{
++ int i, j, count;
++
++ if (fival->index < 0 || fival->index > ov5640_mode_MAX)
++ return -EINVAL;
++
++ if (fival->width == 0 || fival->height == 0 ||
++ fival->pixel_format == 0) {
++ pr_warning("Please assign pixelformat, width and height.\n");
++ return -EINVAL;
++ }
++
++ fival->type = V4L2_FRMIVAL_TYPE_DISCRETE;
++ fival->discrete.numerator = 1;
++
++ count = 0;
++ for (i = 0; i < ARRAY_SIZE(ov5640_mode_info_data); i++) {
++ for (j = 0; j < (ov5640_mode_MAX + 1); j++) {
++ if (fival->pixel_format == ov5640_data.pix.pixelformat
++ && fival->width == ov5640_mode_info_data[i][j].width
++ && fival->height == ov5640_mode_info_data[i][j].height
++ && ov5640_mode_info_data[i][j].init_data_ptr != NULL) {
++ count++;
++ }
++ if (fival->index == (count - 1)) {
++ fival->discrete.denominator =
++ ov5640_framerates[i];
++ return 0;
++ }
++ }
++ }
++
++ return -EINVAL;
++}
++
++/*!
++ * ioctl_g_chip_ident - V4L2 sensor interface handler for
++ * VIDIOC_DBG_G_CHIP_IDENT ioctl
++ * @s: pointer to standard V4L2 device structure
++ * @id: pointer to int
++ *
++ * Return 0.
++ */
++static int ioctl_g_chip_ident(struct v4l2_int_device *s, int *id)
++{
++ ((struct v4l2_dbg_chip_ident *)id)->match.type =
++ V4L2_CHIP_MATCH_I2C_DRIVER;
++ strcpy(((struct v4l2_dbg_chip_ident *)id)->match.name, "ov5640_camera");
++
++ return 0;
++}
++
++/*!
++ * ioctl_init - V4L2 sensor interface handler for VIDIOC_INT_INIT
++ * @s: pointer to standard V4L2 device structure
++ */
++static int ioctl_init(struct v4l2_int_device *s)
++{
++
++ return 0;
++}
++
++/*!
++ * ioctl_enum_fmt_cap - V4L2 sensor interface handler for VIDIOC_ENUM_FMT
++ * @s: pointer to standard V4L2 device structure
++ * @fmt: pointer to standard V4L2 fmt description structure
++ *
++ * Return 0.
++ */
++static int ioctl_enum_fmt_cap(struct v4l2_int_device *s,
++ struct v4l2_fmtdesc *fmt)
++{
++ if (fmt->index > ov5640_mode_MAX)
++ return -EINVAL;
++
++ fmt->pixelformat = ov5640_data.pix.pixelformat;
++
++ return 0;
++}
++
++/*!
++ * ioctl_dev_init - V4L2 sensor interface handler for vidioc_int_dev_init_num
++ * @s: pointer to standard V4L2 device structure
++ *
++ * Initialise the device when slave attaches to the master.
++ */
++static int ioctl_dev_init(struct v4l2_int_device *s)
++{
++ struct sensor_data *sensor = s->priv;
++ u32 tgt_xclk; /* target xclk */
++ u32 tgt_fps; /* target frames per secound */
++ enum ov5640_frame_rate frame_rate;
++ int ret;
++
++ ov5640_data.on = true;
++
++ /* mclk */
++ tgt_xclk = ov5640_data.mclk;
++ tgt_xclk = min(tgt_xclk, (u32)OV5640_XCLK_MAX);
++ tgt_xclk = max(tgt_xclk, (u32)OV5640_XCLK_MIN);
++ ov5640_data.mclk = tgt_xclk;
++
++ pr_debug(" Setting mclk to %d MHz\n", tgt_xclk / 1000000);
++ clk_set_rate(ov5640_data.sensor_clk, ov5640_data.mclk);
++
++ /* Default camera frame rate is set in probe */
++ tgt_fps = sensor->streamcap.timeperframe.denominator /
++ sensor->streamcap.timeperframe.numerator;
++
++ if (tgt_fps == 15)
++ frame_rate = ov5640_15_fps;
++ else if (tgt_fps == 30)
++ frame_rate = ov5640_30_fps;
++ else
++ return -EINVAL; /* Only support 15fps or 30fps now. */
++
++ ret = ov5640_init_mode();
++ return ret;
++}
++
++/*!
++ * ioctl_dev_exit - V4L2 sensor interface handler for vidioc_int_dev_exit_num
++ * @s: pointer to standard V4L2 device structure
++ *
++ * Delinitialise the device when slave detaches to the master.
++ */
++static int ioctl_dev_exit(struct v4l2_int_device *s)
++{
++ return 0;
++}
++
++/*!
++ * This structure defines all the ioctls for this module and links them to the
++ * enumeration.
++ */
++static struct v4l2_int_ioctl_desc ov5640_ioctl_desc[] = {
++ { vidioc_int_dev_init_num,
++ (v4l2_int_ioctl_func *)ioctl_dev_init },
++ { vidioc_int_dev_exit_num,
++ ioctl_dev_exit},
++ { vidioc_int_s_power_num,
++ (v4l2_int_ioctl_func *)ioctl_s_power },
++ { vidioc_int_g_ifparm_num,
++ (v4l2_int_ioctl_func *)ioctl_g_ifparm },
++ { vidioc_int_init_num,
++ (v4l2_int_ioctl_func *)ioctl_init },
++ { vidioc_int_enum_fmt_cap_num,
++ (v4l2_int_ioctl_func *)ioctl_enum_fmt_cap },
++ { vidioc_int_g_fmt_cap_num,
++ (v4l2_int_ioctl_func *)ioctl_g_fmt_cap },
++ { vidioc_int_g_parm_num,
++ (v4l2_int_ioctl_func *)ioctl_g_parm },
++ { vidioc_int_s_parm_num,
++ (v4l2_int_ioctl_func *)ioctl_s_parm },
++ { vidioc_int_g_ctrl_num,
++ (v4l2_int_ioctl_func *)ioctl_g_ctrl },
++ { vidioc_int_s_ctrl_num,
++ (v4l2_int_ioctl_func *)ioctl_s_ctrl },
++ { vidioc_int_enum_framesizes_num,
++ (v4l2_int_ioctl_func *)ioctl_enum_framesizes },
++ { vidioc_int_enum_frameintervals_num,
++ (v4l2_int_ioctl_func *)ioctl_enum_frameintervals },
++ { vidioc_int_g_chip_ident_num,
++ (v4l2_int_ioctl_func *)ioctl_g_chip_ident },
++};
++
++static struct v4l2_int_slave ov5640_slave = {
++ .ioctls = ov5640_ioctl_desc,
++ .num_ioctls = ARRAY_SIZE(ov5640_ioctl_desc),
++};
++
++static struct v4l2_int_device ov5640_int_device = {
++ .module = THIS_MODULE,
++ .name = "ov5640",
++ .type = v4l2_int_type_slave,
++ .u = {
++ .slave = &ov5640_slave,
++ },
++};
++
++/*!
++ * ov5640 I2C probe function
++ *
++ * @param adapter struct i2c_adapter *
++ * @return Error code indicating success or failure
++ */
++static int ov5640_probe(struct i2c_client *client,
++ const struct i2c_device_id *id)
++{
++ struct pinctrl *pinctrl;
++ struct device *dev = &client->dev;
++ int retval;
++ u8 chip_id_high, chip_id_low;
++
++ /* ov5640 pinctrl */
++ pinctrl = devm_pinctrl_get_select_default(dev);
++ if (IS_ERR(pinctrl)) {
++ dev_err(dev, "setup pinctrl failed\n");
++ return PTR_ERR(pinctrl);
++ }
++
++ /* request power down pin */
++ pwn_gpio = of_get_named_gpio(dev->of_node, "pwn-gpios", 0);
++ if (!gpio_is_valid(pwn_gpio)) {
++ dev_err(dev, "no sensor pwdn pin available\n");
++ return -ENODEV;
++ }
++ retval = devm_gpio_request_one(dev, pwn_gpio, GPIOF_OUT_INIT_HIGH,
++ "ov5640_pwdn");
++ if (retval < 0)
++ return retval;
++
++ /* request reset pin */
++ rst_gpio = of_get_named_gpio(dev->of_node, "rst-gpios", 0);
++ if (!gpio_is_valid(rst_gpio)) {
++ dev_err(dev, "no sensor reset pin available\n");
++ return -EINVAL;
++ }
++ retval = devm_gpio_request_one(dev, rst_gpio, GPIOF_OUT_INIT_HIGH,
++ "ov5640_reset");
++ if (retval < 0)
++ return retval;
++
++ /* Set initial values for the sensor struct. */
++ memset(&ov5640_data, 0, sizeof(ov5640_data));
++ ov5640_data.sensor_clk = devm_clk_get(dev, "csi_mclk");
++ if (IS_ERR(ov5640_data.sensor_clk)) {
++ dev_err(dev, "get mclk failed\n");
++ return PTR_ERR(ov5640_data.sensor_clk);
++ }
++
++ retval = of_property_read_u32(dev->of_node, "mclk",
++ &ov5640_data.mclk);
++ if (retval) {
++ dev_err(dev, "mclk frequency is invalid\n");
++ return retval;
++ }
++
++ retval = of_property_read_u32(dev->of_node, "mclk_source",
++ (u32 *) &(ov5640_data.mclk_source));
++ if (retval) {
++ dev_err(dev, "mclk_source invalid\n");
++ return retval;
++ }
++
++ retval = of_property_read_u32(dev->of_node, "csi_id",
++ &(ov5640_data.csi));
++ if (retval) {
++ dev_err(dev, "csi_id invalid\n");
++ return retval;
++ }
++
++ clk_prepare_enable(ov5640_data.sensor_clk);
++
++ ov5640_data.io_init = ov5640_reset;
++ ov5640_data.i2c_client = client;
++ ov5640_data.pix.pixelformat = V4L2_PIX_FMT_YUYV;
++ ov5640_data.pix.width = 640;
++ ov5640_data.pix.height = 480;
++ ov5640_data.streamcap.capability = V4L2_MODE_HIGHQUALITY |
++ V4L2_CAP_TIMEPERFRAME;
++ ov5640_data.streamcap.capturemode = 0;
++ ov5640_data.streamcap.timeperframe.denominator = DEFAULT_FPS;
++ ov5640_data.streamcap.timeperframe.numerator = 1;
++
++ ov5640_regulator_enable(&client->dev);
++
++ ov5640_reset();
++
++ ov5640_power_down(0);
++
++ retval = ov5640_read_reg(OV5640_CHIP_ID_HIGH_BYTE, &chip_id_high);
++ if (retval < 0 || chip_id_high != 0x56) {
++ clk_disable_unprepare(ov5640_data.sensor_clk);
++ pr_warning("camera ov5640 is not found\n");
++ return -ENODEV;
++ }
++ retval = ov5640_read_reg(OV5640_CHIP_ID_LOW_BYTE, &chip_id_low);
++ if (retval < 0 || chip_id_low != 0x40) {
++ clk_disable_unprepare(ov5640_data.sensor_clk);
++ pr_warning("camera ov5640 is not found\n");
++ return -ENODEV;
++ }
++
++ ov5640_power_down(1);
++
++ clk_disable_unprepare(ov5640_data.sensor_clk);
++
++ ov5640_int_device.priv = &ov5640_data;
++ retval = v4l2_int_device_register(&ov5640_int_device);
++
++ pr_info("camera ov5640 is found\n");
++ return retval;
++}
++
++/*!
++ * ov5640 I2C detach function
++ *
++ * @param client struct i2c_client *
++ * @return Error code indicating success or failure
++ */
++static int ov5640_remove(struct i2c_client *client)
++{
++ v4l2_int_device_unregister(&ov5640_int_device);
++
++ if (analog_regulator)
++ regulator_disable(analog_regulator);
++
++ if (core_regulator)
++ regulator_disable(core_regulator);
++
++ if (io_regulator)
++ regulator_disable(io_regulator);
++
++ return 0;
++}
++
++module_i2c_driver(ov5640_i2c_driver);
++
++MODULE_AUTHOR("Freescale Semiconductor, Inc.");
++MODULE_DESCRIPTION("OV5640 Camera Driver");
++MODULE_LICENSE("GPL");
++MODULE_VERSION("1.0");
++MODULE_ALIAS("CSI");
+diff -Nur linux-3.14.36/drivers/media/platform/mxc/capture/ov5640_mipi.c linux-openelec/drivers/media/platform/mxc/capture/ov5640_mipi.c
+--- linux-3.14.36/drivers/media/platform/mxc/capture/ov5640_mipi.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/media/platform/mxc/capture/ov5640_mipi.c 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,2104 @@
++/*
++ * Copyright (C) 2011-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
++ */
++
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/slab.h>
++#include <linux/ctype.h>
++#include <linux/types.h>
++#include <linux/delay.h>
++#include <linux/clk.h>
++#include <linux/of_device.h>
++#include <linux/i2c.h>
++#include <linux/of_gpio.h>
++#include <linux/pinctrl/consumer.h>
++#include <linux/regulator/consumer.h>
++#include <linux/fsl_devices.h>
++#include <linux/mipi_csi2.h>
++#include <media/v4l2-chip-ident.h>
++#include <media/v4l2-int-device.h>
++#include "mxc_v4l2_capture.h"
++
++#define OV5640_VOLTAGE_ANALOG 2800000
++#define OV5640_VOLTAGE_DIGITAL_CORE 1500000
++#define OV5640_VOLTAGE_DIGITAL_IO 1800000
++
++#define MIN_FPS 15
++#define MAX_FPS 30
++#define DEFAULT_FPS 30
++
++#define OV5640_XCLK_MIN 6000000
++#define OV5640_XCLK_MAX 24000000
++
++#define OV5640_CHIP_ID_HIGH_BYTE 0x300A
++#define OV5640_CHIP_ID_LOW_BYTE 0x300B
++
++enum ov5640_mode {
++ ov5640_mode_MIN = 0,
++ ov5640_mode_VGA_640_480 = 0,
++ ov5640_mode_QVGA_320_240 = 1,
++ ov5640_mode_NTSC_720_480 = 2,
++ ov5640_mode_PAL_720_576 = 3,
++ ov5640_mode_720P_1280_720 = 4,
++ ov5640_mode_1080P_1920_1080 = 5,
++ ov5640_mode_QSXGA_2592_1944 = 6,
++ ov5640_mode_QCIF_176_144 = 7,
++ ov5640_mode_XGA_1024_768 = 8,
++ ov5640_mode_MAX = 8,
++ ov5640_mode_INIT = 0xff, /*only for sensor init*/
++};
++
++enum ov5640_frame_rate {
++ ov5640_15_fps,
++ ov5640_30_fps
++};
++
++/* image size under 1280 * 960 are SUBSAMPLING
++ * image size upper 1280 * 960 are SCALING
++ */
++enum ov5640_downsize_mode {
++ SUBSAMPLING,
++ SCALING,
++};
++
++struct reg_value {
++ u16 u16RegAddr;
++ u8 u8Val;
++ u8 u8Mask;
++ u32 u32Delay_ms;
++};
++
++struct ov5640_mode_info {
++ enum ov5640_mode mode;
++ enum ov5640_downsize_mode dn_mode;
++ u32 width;
++ u32 height;
++ struct reg_value *init_data_ptr;
++ u32 init_data_size;
++};
++
++/*!
++ * Maintains the information on the current state of the sesor.
++ */
++static struct sensor_data ov5640_data;
++static int pwn_gpio, rst_gpio;
++
++static struct reg_value ov5640_init_setting_30fps_VGA[] = {
++
++ {0x3103, 0x11, 0, 0}, {0x3008, 0x82, 0, 5}, {0x3008, 0x42, 0, 0},
++ {0x3103, 0x03, 0, 0}, {0x3017, 0x00, 0, 0}, {0x3018, 0x00, 0, 0},
++ {0x3034, 0x18, 0, 0}, {0x3035, 0x14, 0, 0}, {0x3036, 0x38, 0, 0},
++ {0x3037, 0x13, 0, 0}, {0x3108, 0x01, 0, 0}, {0x3630, 0x36, 0, 0},
++ {0x3631, 0x0e, 0, 0}, {0x3632, 0xe2, 0, 0}, {0x3633, 0x12, 0, 0},
++ {0x3621, 0xe0, 0, 0}, {0x3704, 0xa0, 0, 0}, {0x3703, 0x5a, 0, 0},
++ {0x3715, 0x78, 0, 0}, {0x3717, 0x01, 0, 0}, {0x370b, 0x60, 0, 0},
++ {0x3705, 0x1a, 0, 0}, {0x3905, 0x02, 0, 0}, {0x3906, 0x10, 0, 0},
++ {0x3901, 0x0a, 0, 0}, {0x3731, 0x12, 0, 0}, {0x3600, 0x08, 0, 0},
++ {0x3601, 0x33, 0, 0}, {0x302d, 0x60, 0, 0}, {0x3620, 0x52, 0, 0},
++ {0x371b, 0x20, 0, 0}, {0x471c, 0x50, 0, 0}, {0x3a13, 0x43, 0, 0},
++ {0x3a18, 0x00, 0, 0}, {0x3a19, 0xf8, 0, 0}, {0x3635, 0x13, 0, 0},
++ {0x3636, 0x03, 0, 0}, {0x3634, 0x40, 0, 0}, {0x3622, 0x01, 0, 0},
++ {0x3c01, 0xa4, 0, 0}, {0x3c04, 0x28, 0, 0}, {0x3c05, 0x98, 0, 0},
++ {0x3c06, 0x00, 0, 0}, {0x3c07, 0x08, 0, 0}, {0x3c08, 0x00, 0, 0},
++ {0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
++ {0x3820, 0x41, 0, 0}, {0x3821, 0x07, 0, 0}, {0x3814, 0x31, 0, 0},
++ {0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
++ {0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0}, {0x3804, 0x0a, 0, 0},
++ {0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0}, {0x3807, 0x9b, 0, 0},
++ {0x3808, 0x02, 0, 0}, {0x3809, 0x80, 0, 0}, {0x380a, 0x01, 0, 0},
++ {0x380b, 0xe0, 0, 0}, {0x380c, 0x07, 0, 0}, {0x380d, 0x68, 0, 0},
++ {0x380e, 0x03, 0, 0}, {0x380f, 0xd8, 0, 0}, {0x3810, 0x00, 0, 0},
++ {0x3811, 0x10, 0, 0}, {0x3812, 0x00, 0, 0}, {0x3813, 0x06, 0, 0},
++ {0x3618, 0x00, 0, 0}, {0x3612, 0x29, 0, 0}, {0x3708, 0x64, 0, 0},
++ {0x3709, 0x52, 0, 0}, {0x370c, 0x03, 0, 0}, {0x3a02, 0x03, 0, 0},
++ {0x3a03, 0xd8, 0, 0}, {0x3a08, 0x01, 0, 0}, {0x3a09, 0x27, 0, 0},
++ {0x3a0a, 0x00, 0, 0}, {0x3a0b, 0xf6, 0, 0}, {0x3a0e, 0x03, 0, 0},
++ {0x3a0d, 0x04, 0, 0}, {0x3a14, 0x03, 0, 0}, {0x3a15, 0xd8, 0, 0},
++ {0x4001, 0x02, 0, 0}, {0x4004, 0x02, 0, 0}, {0x3000, 0x00, 0, 0},
++ {0x3002, 0x1c, 0, 0}, {0x3004, 0xff, 0, 0}, {0x3006, 0xc3, 0, 0},
++ {0x300e, 0x45, 0, 0}, {0x302e, 0x08, 0, 0}, {0x4300, 0x3f, 0, 0},
++ {0x501f, 0x00, 0, 0}, {0x4713, 0x03, 0, 0}, {0x4407, 0x04, 0, 0},
++ {0x440e, 0x00, 0, 0}, {0x460b, 0x35, 0, 0}, {0x460c, 0x22, 0, 0},
++ {0x4837, 0x0a, 0, 0}, {0x4800, 0x04, 0, 0}, {0x3824, 0x02, 0, 0},
++ {0x5000, 0xa7, 0, 0}, {0x5001, 0xa3, 0, 0}, {0x5180, 0xff, 0, 0},
++ {0x5181, 0xf2, 0, 0}, {0x5182, 0x00, 0, 0}, {0x5183, 0x14, 0, 0},
++ {0x5184, 0x25, 0, 0}, {0x5185, 0x24, 0, 0}, {0x5186, 0x09, 0, 0},
++ {0x5187, 0x09, 0, 0}, {0x5188, 0x09, 0, 0}, {0x5189, 0x88, 0, 0},
++ {0x518a, 0x54, 0, 0}, {0x518b, 0xee, 0, 0}, {0x518c, 0xb2, 0, 0},
++ {0x518d, 0x50, 0, 0}, {0x518e, 0x34, 0, 0}, {0x518f, 0x6b, 0, 0},
++ {0x5190, 0x46, 0, 0}, {0x5191, 0xf8, 0, 0}, {0x5192, 0x04, 0, 0},
++ {0x5193, 0x70, 0, 0}, {0x5194, 0xf0, 0, 0}, {0x5195, 0xf0, 0, 0},
++ {0x5196, 0x03, 0, 0}, {0x5197, 0x01, 0, 0}, {0x5198, 0x04, 0, 0},
++ {0x5199, 0x6c, 0, 0}, {0x519a, 0x04, 0, 0}, {0x519b, 0x00, 0, 0},
++ {0x519c, 0x09, 0, 0}, {0x519d, 0x2b, 0, 0}, {0x519e, 0x38, 0, 0},
++ {0x5381, 0x1e, 0, 0}, {0x5382, 0x5b, 0, 0}, {0x5383, 0x08, 0, 0},
++ {0x5384, 0x0a, 0, 0}, {0x5385, 0x7e, 0, 0}, {0x5386, 0x88, 0, 0},
++ {0x5387, 0x7c, 0, 0}, {0x5388, 0x6c, 0, 0}, {0x5389, 0x10, 0, 0},
++ {0x538a, 0x01, 0, 0}, {0x538b, 0x98, 0, 0}, {0x5300, 0x08, 0, 0},
++ {0x5301, 0x30, 0, 0}, {0x5302, 0x10, 0, 0}, {0x5303, 0x00, 0, 0},
++ {0x5304, 0x08, 0, 0}, {0x5305, 0x30, 0, 0}, {0x5306, 0x08, 0, 0},
++ {0x5307, 0x16, 0, 0}, {0x5309, 0x08, 0, 0}, {0x530a, 0x30, 0, 0},
++ {0x530b, 0x04, 0, 0}, {0x530c, 0x06, 0, 0}, {0x5480, 0x01, 0, 0},
++ {0x5481, 0x08, 0, 0}, {0x5482, 0x14, 0, 0}, {0x5483, 0x28, 0, 0},
++ {0x5484, 0x51, 0, 0}, {0x5485, 0x65, 0, 0}, {0x5486, 0x71, 0, 0},
++ {0x5487, 0x7d, 0, 0}, {0x5488, 0x87, 0, 0}, {0x5489, 0x91, 0, 0},
++ {0x548a, 0x9a, 0, 0}, {0x548b, 0xaa, 0, 0}, {0x548c, 0xb8, 0, 0},
++ {0x548d, 0xcd, 0, 0}, {0x548e, 0xdd, 0, 0}, {0x548f, 0xea, 0, 0},
++ {0x5490, 0x1d, 0, 0}, {0x5580, 0x02, 0, 0}, {0x5583, 0x40, 0, 0},
++ {0x5584, 0x10, 0, 0}, {0x5589, 0x10, 0, 0}, {0x558a, 0x00, 0, 0},
++ {0x558b, 0xf8, 0, 0}, {0x5800, 0x23, 0, 0}, {0x5801, 0x14, 0, 0},
++ {0x5802, 0x0f, 0, 0}, {0x5803, 0x0f, 0, 0}, {0x5804, 0x12, 0, 0},
++ {0x5805, 0x26, 0, 0}, {0x5806, 0x0c, 0, 0}, {0x5807, 0x08, 0, 0},
++ {0x5808, 0x05, 0, 0}, {0x5809, 0x05, 0, 0}, {0x580a, 0x08, 0, 0},
++ {0x580b, 0x0d, 0, 0}, {0x580c, 0x08, 0, 0}, {0x580d, 0x03, 0, 0},
++ {0x580e, 0x00, 0, 0}, {0x580f, 0x00, 0, 0}, {0x5810, 0x03, 0, 0},
++ {0x5811, 0x09, 0, 0}, {0x5812, 0x07, 0, 0}, {0x5813, 0x03, 0, 0},
++ {0x5814, 0x00, 0, 0}, {0x5815, 0x01, 0, 0}, {0x5816, 0x03, 0, 0},
++ {0x5817, 0x08, 0, 0}, {0x5818, 0x0d, 0, 0}, {0x5819, 0x08, 0, 0},
++ {0x581a, 0x05, 0, 0}, {0x581b, 0x06, 0, 0}, {0x581c, 0x08, 0, 0},
++ {0x581d, 0x0e, 0, 0}, {0x581e, 0x29, 0, 0}, {0x581f, 0x17, 0, 0},
++ {0x5820, 0x11, 0, 0}, {0x5821, 0x11, 0, 0}, {0x5822, 0x15, 0, 0},
++ {0x5823, 0x28, 0, 0}, {0x5824, 0x46, 0, 0}, {0x5825, 0x26, 0, 0},
++ {0x5826, 0x08, 0, 0}, {0x5827, 0x26, 0, 0}, {0x5828, 0x64, 0, 0},
++ {0x5829, 0x26, 0, 0}, {0x582a, 0x24, 0, 0}, {0x582b, 0x22, 0, 0},
++ {0x582c, 0x24, 0, 0}, {0x582d, 0x24, 0, 0}, {0x582e, 0x06, 0, 0},
++ {0x582f, 0x22, 0, 0}, {0x5830, 0x40, 0, 0}, {0x5831, 0x42, 0, 0},
++ {0x5832, 0x24, 0, 0}, {0x5833, 0x26, 0, 0}, {0x5834, 0x24, 0, 0},
++ {0x5835, 0x22, 0, 0}, {0x5836, 0x22, 0, 0}, {0x5837, 0x26, 0, 0},
++ {0x5838, 0x44, 0, 0}, {0x5839, 0x24, 0, 0}, {0x583a, 0x26, 0, 0},
++ {0x583b, 0x28, 0, 0}, {0x583c, 0x42, 0, 0}, {0x583d, 0xce, 0, 0},
++ {0x5025, 0x00, 0, 0}, {0x3a0f, 0x30, 0, 0}, {0x3a10, 0x28, 0, 0},
++ {0x3a1b, 0x30, 0, 0}, {0x3a1e, 0x26, 0, 0}, {0x3a11, 0x60, 0, 0},
++ {0x3a1f, 0x14, 0, 0}, {0x3008, 0x02, 0, 0}, {0x3c00, 0x04, 0, 300},
++};
++
++static struct reg_value ov5640_setting_30fps_VGA_640_480[] = {
++
++ {0x3035, 0x14, 0, 0}, {0x3036, 0x38, 0, 0}, {0x3c07, 0x08, 0, 0},
++ {0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
++ {0x3820, 0x41, 0, 0}, {0x3821, 0x07, 0, 0}, {0x3814, 0x31, 0, 0},
++ {0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
++ {0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0}, {0x3804, 0x0a, 0, 0},
++ {0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0}, {0x3807, 0x9b, 0, 0},
++ {0x3808, 0x02, 0, 0}, {0x3809, 0x80, 0, 0}, {0x380a, 0x01, 0, 0},
++ {0x380b, 0xe0, 0, 0}, {0x380c, 0x07, 0, 0}, {0x380d, 0x68, 0, 0},
++ {0x380e, 0x04, 0, 0}, {0x380f, 0x38, 0, 0}, {0x3810, 0x00, 0, 0},
++ {0x3811, 0x10, 0, 0}, {0x3812, 0x00, 0, 0}, {0x3813, 0x06, 0, 0},
++ {0x3618, 0x00, 0, 0}, {0x3612, 0x29, 0, 0}, {0x3708, 0x64, 0, 0},
++ {0x3709, 0x52, 0, 0}, {0x370c, 0x03, 0, 0}, {0x3a02, 0x03, 0, 0},
++ {0x3a03, 0xd8, 0, 0}, {0x3a08, 0x01, 0, 0}, {0x3a09, 0x0e, 0, 0},
++ {0x3a0a, 0x00, 0, 0}, {0x3a0b, 0xf6, 0, 0}, {0x3a0e, 0x03, 0, 0},
++ {0x3a0d, 0x04, 0, 0}, {0x3a14, 0x03, 0, 0}, {0x3a15, 0xd8, 0, 0},
++ {0x4001, 0x02, 0, 0}, {0x4004, 0x02, 0, 0}, {0x4713, 0x03, 0, 0},
++ {0x4407, 0x04, 0, 0}, {0x460b, 0x35, 0, 0}, {0x460c, 0x22, 0, 0},
++ {0x3824, 0x02, 0, 0}, {0x5001, 0xa3, 0, 0}, {0x3503, 0x00, 0, 0},
++};
++
++static struct reg_value ov5640_setting_15fps_VGA_640_480[] = {
++ {0x3035, 0x22, 0, 0}, {0x3036, 0x38, 0, 0}, {0x3c07, 0x08, 0, 0},
++ {0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
++ {0x3820, 0x41, 0, 0}, {0x3821, 0x07, 0, 0}, {0x3814, 0x31, 0, 0},
++ {0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
++ {0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0}, {0x3804, 0x0a, 0, 0},
++ {0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0}, {0x3807, 0x9b, 0, 0},
++ {0x3808, 0x02, 0, 0}, {0x3809, 0x80, 0, 0}, {0x380a, 0x01, 0, 0},
++ {0x380b, 0xe0, 0, 0}, {0x380c, 0x07, 0, 0}, {0x380d, 0x68, 0, 0},
++ {0x380e, 0x03, 0, 0}, {0x380f, 0xd8, 0, 0}, {0x3810, 0x00, 0, 0},
++ {0x3811, 0x10, 0, 0}, {0x3812, 0x00, 0, 0}, {0x3813, 0x06, 0, 0},
++ {0x3618, 0x00, 0, 0}, {0x3612, 0x29, 0, 0}, {0x3708, 0x64, 0, 0},
++ {0x3709, 0x52, 0, 0}, {0x370c, 0x03, 0, 0}, {0x3a02, 0x03, 0, 0},
++ {0x3a03, 0xd8, 0, 0}, {0x3a08, 0x01, 0, 0}, {0x3a09, 0x27, 0, 0},
++ {0x3a0a, 0x00, 0, 0}, {0x3a0b, 0xf6, 0, 0}, {0x3a0e, 0x03, 0, 0},
++ {0x3a0d, 0x04, 0, 0}, {0x3a14, 0x03, 0, 0}, {0x3a15, 0xd8, 0, 0},
++ {0x4001, 0x02, 0, 0}, {0x4004, 0x02, 0, 0}, {0x4713, 0x03, 0, 0},
++ {0x4407, 0x04, 0, 0}, {0x460b, 0x35, 0, 0}, {0x460c, 0x22, 0, 0},
++ {0x3824, 0x02, 0, 0}, {0x5001, 0xa3, 0, 0},
++};
++
++static struct reg_value ov5640_setting_30fps_XGA_1024_768[] = {
++
++ {0x3035, 0x14, 0, 0}, {0x3036, 0x38, 0, 0}, {0x3c07, 0x08, 0, 0},
++ {0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
++ {0x3820, 0x41, 0, 0}, {0x3821, 0x07, 0, 0}, {0x3814, 0x31, 0, 0},
++ {0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
++ {0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0}, {0x3804, 0x0a, 0, 0},
++ {0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0}, {0x3807, 0x9b, 0, 0},
++ {0x3808, 0x02, 0, 0}, {0x3809, 0x80, 0, 0}, {0x380a, 0x01, 0, 0},
++ {0x380b, 0xe0, 0, 0}, {0x380c, 0x07, 0, 0}, {0x380d, 0x68, 0, 0},
++ {0x380e, 0x04, 0, 0}, {0x380f, 0x38, 0, 0}, {0x3810, 0x00, 0, 0},
++ {0x3811, 0x10, 0, 0}, {0x3812, 0x00, 0, 0}, {0x3813, 0x06, 0, 0},
++ {0x3618, 0x00, 0, 0}, {0x3612, 0x29, 0, 0}, {0x3708, 0x64, 0, 0},
++ {0x3709, 0x52, 0, 0}, {0x370c, 0x03, 0, 0}, {0x3a02, 0x03, 0, 0},
++ {0x3a03, 0xd8, 0, 0}, {0x3a08, 0x01, 0, 0}, {0x3a09, 0x0e, 0, 0},
++ {0x3a0a, 0x00, 0, 0}, {0x3a0b, 0xf6, 0, 0}, {0x3a0e, 0x03, 0, 0},
++ {0x3a0d, 0x04, 0, 0}, {0x3a14, 0x03, 0, 0}, {0x3a15, 0xd8, 0, 0},
++ {0x4001, 0x02, 0, 0}, {0x4004, 0x02, 0, 0}, {0x4713, 0x03, 0, 0},
++ {0x4407, 0x04, 0, 0}, {0x460b, 0x35, 0, 0}, {0x460c, 0x22, 0, 0},
++ {0x3824, 0x02, 0, 0}, {0x5001, 0xa3, 0, 0}, {0x3503, 0x00, 0, 0},
++ {0x3808, 0x04, 0, 0}, {0x3809, 0x00, 0, 0}, {0x380a, 0x03, 0, 0},
++ {0x380b, 0x00, 0, 0}, {0x3035, 0x12, 0, 0},
++};
++
++static struct reg_value ov5640_setting_15fps_XGA_1024_768[] = {
++ {0x3035, 0x22, 0, 0}, {0x3036, 0x38, 0, 0}, {0x3c07, 0x08, 0, 0},
++ {0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
++ {0x3820, 0x41, 0, 0}, {0x3821, 0x07, 0, 0}, {0x3814, 0x31, 0, 0},
++ {0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
++ {0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0}, {0x3804, 0x0a, 0, 0},
++ {0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0}, {0x3807, 0x9b, 0, 0},
++ {0x3808, 0x02, 0, 0}, {0x3809, 0x80, 0, 0}, {0x380a, 0x01, 0, 0},
++ {0x380b, 0xe0, 0, 0}, {0x380c, 0x07, 0, 0}, {0x380d, 0x68, 0, 0},
++ {0x380e, 0x03, 0, 0}, {0x380f, 0xd8, 0, 0}, {0x3810, 0x00, 0, 0},
++ {0x3811, 0x10, 0, 0}, {0x3812, 0x00, 0, 0}, {0x3813, 0x06, 0, 0},
++ {0x3618, 0x00, 0, 0}, {0x3612, 0x29, 0, 0}, {0x3708, 0x64, 0, 0},
++ {0x3709, 0x52, 0, 0}, {0x370c, 0x03, 0, 0}, {0x3a02, 0x03, 0, 0},
++ {0x3a03, 0xd8, 0, 0}, {0x3a08, 0x01, 0, 0}, {0x3a09, 0x27, 0, 0},
++ {0x3a0a, 0x00, 0, 0}, {0x3a0b, 0xf6, 0, 0}, {0x3a0e, 0x03, 0, 0},
++ {0x3a0d, 0x04, 0, 0}, {0x3a14, 0x03, 0, 0}, {0x3a15, 0xd8, 0, 0},
++ {0x4001, 0x02, 0, 0}, {0x4004, 0x02, 0, 0}, {0x4713, 0x03, 0, 0},
++ {0x4407, 0x04, 0, 0}, {0x460b, 0x35, 0, 0}, {0x460c, 0x22, 0, 0},
++ {0x3824, 0x02, 0, 0}, {0x5001, 0xa3, 0, 0}, {0x3808, 0x04, 0, 0},
++ {0x3809, 0x00, 0, 0}, {0x380a, 0x03, 0, 0}, {0x380b, 0x00, 0, 0},
++};
++
++static struct reg_value ov5640_setting_30fps_QVGA_320_240[] = {
++ {0x3035, 0x14, 0, 0}, {0x3036, 0x38, 0, 0}, {0x3c07, 0x08, 0, 0},
++ {0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
++ {0x3820, 0x41, 0, 0}, {0x3821, 0x07, 0, 0}, {0x3814, 0x31, 0, 0},
++ {0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
++ {0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0}, {0x3804, 0x0a, 0, 0},
++ {0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0}, {0x3807, 0x9b, 0, 0},
++ {0x3808, 0x01, 0, 0}, {0x3809, 0x40, 0, 0}, {0x380a, 0x00, 0, 0},
++ {0x380b, 0xf0, 0, 0}, {0x380c, 0x07, 0, 0}, {0x380d, 0x68, 0, 0},
++ {0x380e, 0x03, 0, 0}, {0x380f, 0xd8, 0, 0}, {0x3810, 0x00, 0, 0},
++ {0x3811, 0x10, 0, 0}, {0x3812, 0x00, 0, 0}, {0x3813, 0x06, 0, 0},
++ {0x3618, 0x00, 0, 0}, {0x3612, 0x29, 0, 0}, {0x3708, 0x64, 0, 0},
++ {0x3709, 0x52, 0, 0}, {0x370c, 0x03, 0, 0}, {0x3a02, 0x03, 0, 0},
++ {0x3a03, 0xd8, 0, 0}, {0x3a08, 0x01, 0, 0}, {0x3a09, 0x27, 0, 0},
++ {0x3a0a, 0x00, 0, 0}, {0x3a0b, 0xf6, 0, 0}, {0x3a0e, 0x03, 0, 0},
++ {0x3a0d, 0x04, 0, 0}, {0x3a14, 0x03, 0, 0}, {0x3a15, 0xd8, 0, 0},
++ {0x4001, 0x02, 0, 0}, {0x4004, 0x02, 0, 0}, {0x4713, 0x03, 0, 0},
++ {0x4407, 0x04, 0, 0}, {0x460b, 0x35, 0, 0}, {0x460c, 0x22, 0, 0},
++ {0x3824, 0x02, 0, 0}, {0x5001, 0xa3, 0, 0},
++};
++
++static struct reg_value ov5640_setting_15fps_QVGA_320_240[] = {
++ {0x3035, 0x22, 0, 0}, {0x3036, 0x38, 0, 0}, {0x3c07, 0x08, 0, 0},
++ {0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
++ {0x3820, 0x41, 0, 0}, {0x3821, 0x07, 0, 0}, {0x3814, 0x31, 0, 0},
++ {0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
++ {0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0}, {0x3804, 0x0a, 0, 0},
++ {0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0}, {0x3807, 0x9b, 0, 0},
++ {0x3808, 0x01, 0, 0}, {0x3809, 0x40, 0, 0}, {0x380a, 0x00, 0, 0},
++ {0x380b, 0xf0, 0, 0}, {0x380c, 0x07, 0, 0}, {0x380d, 0x68, 0, 0},
++ {0x380e, 0x03, 0, 0}, {0x380f, 0xd8, 0, 0}, {0x3810, 0x00, 0, 0},
++ {0x3811, 0x10, 0, 0}, {0x3812, 0x00, 0, 0}, {0x3813, 0x06, 0, 0},
++ {0x3618, 0x00, 0, 0}, {0x3612, 0x29, 0, 0}, {0x3708, 0x64, 0, 0},
++ {0x3709, 0x52, 0, 0}, {0x370c, 0x03, 0, 0}, {0x3a02, 0x03, 0, 0},
++ {0x3a03, 0xd8, 0, 0}, {0x3a08, 0x01, 0, 0}, {0x3a09, 0x27, 0, 0},
++ {0x3a0a, 0x00, 0, 0}, {0x3a0b, 0xf6, 0, 0}, {0x3a0e, 0x03, 0, 0},
++ {0x3a0d, 0x04, 0, 0}, {0x3a14, 0x03, 0, 0}, {0x3a15, 0xd8, 0, 0},
++ {0x4001, 0x02, 0, 0}, {0x4004, 0x02, 0, 0}, {0x4713, 0x03, 0, 0},
++ {0x4407, 0x04, 0, 0}, {0x460b, 0x35, 0, 0}, {0x460c, 0x22, 0, 0},
++ {0x3824, 0x02, 0, 0}, {0x5001, 0xa3, 0, 0},
++};
++
++static struct reg_value ov5640_setting_30fps_QCIF_176_144[] = {
++ {0x3035, 0x14, 0, 0}, {0x3036, 0x38, 0, 0}, {0x3c07, 0x08, 0, 0},
++ {0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
++ {0x3820, 0x41, 0, 0}, {0x3821, 0x07, 0, 0}, {0x3814, 0x31, 0, 0},
++ {0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
++ {0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0}, {0x3804, 0x0a, 0, 0},
++ {0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0}, {0x3807, 0x9b, 0, 0},
++ {0x3808, 0x00, 0, 0}, {0x3809, 0xb0, 0, 0}, {0x380a, 0x00, 0, 0},
++ {0x380b, 0x90, 0, 0}, {0x380c, 0x07, 0, 0}, {0x380d, 0x68, 0, 0},
++ {0x380e, 0x03, 0, 0}, {0x380f, 0xd8, 0, 0}, {0x3810, 0x00, 0, 0},
++ {0x3811, 0x10, 0, 0}, {0x3812, 0x00, 0, 0}, {0x3813, 0x06, 0, 0},
++ {0x3618, 0x00, 0, 0}, {0x3612, 0x29, 0, 0}, {0x3708, 0x64, 0, 0},
++ {0x3709, 0x52, 0, 0}, {0x370c, 0x03, 0, 0}, {0x3a02, 0x03, 0, 0},
++ {0x3a03, 0xd8, 0, 0}, {0x3a08, 0x01, 0, 0}, {0x3a09, 0x27, 0, 0},
++ {0x3a0a, 0x00, 0, 0}, {0x3a0b, 0xf6, 0, 0}, {0x3a0e, 0x03, 0, 0},
++ {0x3a0d, 0x04, 0, 0}, {0x3a14, 0x03, 0, 0}, {0x3a15, 0xd8, 0, 0},
++ {0x4001, 0x02, 0, 0}, {0x4004, 0x02, 0, 0}, {0x4713, 0x03, 0, 0},
++ {0x4407, 0x04, 0, 0}, {0x460b, 0x35, 0, 0}, {0x460c, 0x22, 0, 0},
++ {0x3824, 0x02, 0, 0}, {0x5001, 0xa3, 0, 0},
++};
++static struct reg_value ov5640_setting_15fps_QCIF_176_144[] = {
++ {0x3035, 0x22, 0, 0}, {0x3036, 0x38, 0, 0}, {0x3c07, 0x08, 0, 0},
++ {0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
++ {0x3820, 0x41, 0, 0}, {0x3821, 0x07, 0, 0}, {0x3814, 0x31, 0, 0},
++ {0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
++ {0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0}, {0x3804, 0x0a, 0, 0},
++ {0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0}, {0x3807, 0x9b, 0, 0},
++ {0x3808, 0x00, 0, 0}, {0x3809, 0xb0, 0, 0}, {0x380a, 0x00, 0, 0},
++ {0x380b, 0x90, 0, 0}, {0x380c, 0x07, 0, 0}, {0x380d, 0x68, 0, 0},
++ {0x380e, 0x03, 0, 0}, {0x380f, 0xd8, 0, 0}, {0x3810, 0x00, 0, 0},
++ {0x3811, 0x10, 0, 0}, {0x3812, 0x00, 0, 0}, {0x3813, 0x06, 0, 0},
++ {0x3618, 0x00, 0, 0}, {0x3612, 0x29, 0, 0}, {0x3708, 0x64, 0, 0},
++ {0x3709, 0x52, 0, 0}, {0x370c, 0x03, 0, 0}, {0x3a02, 0x03, 0, 0},
++ {0x3a03, 0xd8, 0, 0}, {0x3a08, 0x01, 0, 0}, {0x3a09, 0x27, 0, 0},
++ {0x3a0a, 0x00, 0, 0}, {0x3a0b, 0xf6, 0, 0}, {0x3a0e, 0x03, 0, 0},
++ {0x3a0d, 0x04, 0, 0}, {0x3a14, 0x03, 0, 0}, {0x3a15, 0xd8, 0, 0},
++ {0x4001, 0x02, 0, 0}, {0x4004, 0x02, 0, 0}, {0x4713, 0x03, 0, 0},
++ {0x4407, 0x04, 0, 0}, {0x460b, 0x35, 0, 0}, {0x460c, 0x22, 0, 0},
++ {0x3824, 0x02, 0, 0}, {0x5001, 0xa3, 0, 0},
++};
++
++static struct reg_value ov5640_setting_30fps_NTSC_720_480[] = {
++ {0x3035, 0x12, 0, 0}, {0x3036, 0x38, 0, 0}, {0x3c07, 0x08, 0, 0},
++ {0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
++ {0x3820, 0x41, 0, 0}, {0x3821, 0x07, 0, 0}, {0x3814, 0x31, 0, 0},
++ {0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
++ {0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0}, {0x3804, 0x0a, 0, 0},
++ {0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0}, {0x3807, 0x9b, 0, 0},
++ {0x3808, 0x02, 0, 0}, {0x3809, 0xd0, 0, 0}, {0x380a, 0x01, 0, 0},
++ {0x380b, 0xe0, 0, 0}, {0x380c, 0x07, 0, 0}, {0x380d, 0x68, 0, 0},
++ {0x380e, 0x03, 0, 0}, {0x380f, 0xd8, 0, 0}, {0x3810, 0x00, 0, 0},
++ {0x3811, 0x10, 0, 0}, {0x3812, 0x00, 0, 0}, {0x3813, 0x3c, 0, 0},
++ {0x3618, 0x00, 0, 0}, {0x3612, 0x29, 0, 0}, {0x3708, 0x64, 0, 0},
++ {0x3709, 0x52, 0, 0}, {0x370c, 0x03, 0, 0}, {0x3a02, 0x03, 0, 0},
++ {0x3a03, 0xd8, 0, 0}, {0x3a08, 0x01, 0, 0}, {0x3a09, 0x27, 0, 0},
++ {0x3a0a, 0x00, 0, 0}, {0x3a0b, 0xf6, 0, 0}, {0x3a0e, 0x03, 0, 0},
++ {0x3a0d, 0x04, 0, 0}, {0x3a14, 0x03, 0, 0}, {0x3a15, 0xd8, 0, 0},
++ {0x4001, 0x02, 0, 0}, {0x4004, 0x02, 0, 0}, {0x4713, 0x03, 0, 0},
++ {0x4407, 0x04, 0, 0}, {0x460b, 0x35, 0, 0}, {0x460c, 0x22, 0, 0},
++ {0x3824, 0x02, 0, 0}, {0x5001, 0xa3, 0, 0},
++};
++
++static struct reg_value ov5640_setting_15fps_NTSC_720_480[] = {
++ {0x3035, 0x22, 0, 0}, {0x3036, 0x38, 0, 0}, {0x3c07, 0x08, 0, 0},
++ {0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
++ {0x3820, 0x41, 0, 0}, {0x3821, 0x07, 0, 0}, {0x3814, 0x31, 0, 0},
++ {0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
++ {0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0}, {0x3804, 0x0a, 0, 0},
++ {0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0}, {0x3807, 0x9b, 0, 0},
++ {0x3808, 0x02, 0, 0}, {0x3809, 0xd0, 0, 0}, {0x380a, 0x01, 0, 0},
++ {0x380b, 0xe0, 0, 0}, {0x380c, 0x07, 0, 0}, {0x380d, 0x68, 0, 0},
++ {0x380e, 0x03, 0, 0}, {0x380f, 0xd8, 0, 0}, {0x3810, 0x00, 0, 0},
++ {0x3811, 0x10, 0, 0}, {0x3812, 0x00, 0, 0}, {0x3813, 0x3c, 0, 0},
++ {0x3618, 0x00, 0, 0}, {0x3612, 0x29, 0, 0}, {0x3708, 0x64, 0, 0},
++ {0x3709, 0x52, 0, 0}, {0x370c, 0x03, 0, 0}, {0x3a02, 0x03, 0, 0},
++ {0x3a03, 0xd8, 0, 0}, {0x3a08, 0x01, 0, 0}, {0x3a09, 0x27, 0, 0},
++ {0x3a0a, 0x00, 0, 0}, {0x3a0b, 0xf6, 0, 0}, {0x3a0e, 0x03, 0, 0},
++ {0x3a0d, 0x04, 0, 0}, {0x3a14, 0x03, 0, 0}, {0x3a15, 0xd8, 0, 0},
++ {0x4001, 0x02, 0, 0}, {0x4004, 0x02, 0, 0}, {0x4713, 0x03, 0, 0},
++ {0x4407, 0x04, 0, 0}, {0x460b, 0x35, 0, 0}, {0x460c, 0x22, 0, 0},
++ {0x3824, 0x02, 0, 0}, {0x5001, 0xa3, 0, 0},
++};
++
++static struct reg_value ov5640_setting_30fps_PAL_720_576[] = {
++ {0x3035, 0x12, 0, 0}, {0x3036, 0x38, 0, 0}, {0x3c07, 0x08, 0, 0},
++ {0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
++ {0x3820, 0x41, 0, 0}, {0x3821, 0x07, 0, 0}, {0x3814, 0x31, 0, 0},
++ {0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
++ {0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0}, {0x3804, 0x0a, 0, 0},
++ {0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0}, {0x3807, 0x9b, 0, 0},
++ {0x3808, 0x02, 0, 0}, {0x3809, 0xd0, 0, 0}, {0x380a, 0x02, 0, 0},
++ {0x380b, 0x40, 0, 0}, {0x380c, 0x07, 0, 0}, {0x380d, 0x68, 0, 0},
++ {0x380e, 0x03, 0, 0}, {0x380f, 0xd8, 0, 0}, {0x3810, 0x00, 0, 0},
++ {0x3811, 0x38, 0, 0}, {0x3812, 0x00, 0, 0}, {0x3813, 0x06, 0, 0},
++ {0x3618, 0x00, 0, 0}, {0x3612, 0x29, 0, 0}, {0x3708, 0x64, 0, 0},
++ {0x3709, 0x52, 0, 0}, {0x370c, 0x03, 0, 0}, {0x3a02, 0x03, 0, 0},
++ {0x3a03, 0xd8, 0, 0}, {0x3a08, 0x01, 0, 0}, {0x3a09, 0x27, 0, 0},
++ {0x3a0a, 0x00, 0, 0}, {0x3a0b, 0xf6, 0, 0}, {0x3a0e, 0x03, 0, 0},
++ {0x3a0d, 0x04, 0, 0}, {0x3a14, 0x03, 0, 0}, {0x3a15, 0xd8, 0, 0},
++ {0x4001, 0x02, 0, 0}, {0x4004, 0x02, 0, 0}, {0x4713, 0x03, 0, 0},
++ {0x4407, 0x04, 0, 0}, {0x460b, 0x35, 0, 0}, {0x460c, 0x22, 0, 0},
++ {0x3824, 0x02, 0, 0}, {0x5001, 0xa3, 0, 0},
++};
++
++static struct reg_value ov5640_setting_15fps_PAL_720_576[] = {
++ {0x3035, 0x22, 0, 0}, {0x3036, 0x38, 0, 0}, {0x3c07, 0x08, 0, 0},
++ {0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
++ {0x3820, 0x41, 0, 0}, {0x3821, 0x07, 0, 0}, {0x3814, 0x31, 0, 0},
++ {0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
++ {0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0}, {0x3804, 0x0a, 0, 0},
++ {0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0}, {0x3807, 0x9b, 0, 0},
++ {0x3808, 0x02, 0, 0}, {0x3809, 0xd0, 0, 0}, {0x380a, 0x02, 0, 0},
++ {0x380b, 0x40, 0, 0}, {0x380c, 0x07, 0, 0}, {0x380d, 0x68, 0, 0},
++ {0x380e, 0x03, 0, 0}, {0x380f, 0xd8, 0, 0}, {0x3810, 0x00, 0, 0},
++ {0x3811, 0x38, 0, 0}, {0x3812, 0x00, 0, 0}, {0x3813, 0x06, 0, 0},
++ {0x3618, 0x00, 0, 0}, {0x3612, 0x29, 0, 0}, {0x3708, 0x64, 0, 0},
++ {0x3709, 0x52, 0, 0}, {0x370c, 0x03, 0, 0}, {0x3a02, 0x03, 0, 0},
++ {0x3a03, 0xd8, 0, 0}, {0x3a08, 0x01, 0, 0}, {0x3a09, 0x27, 0, 0},
++ {0x3a0a, 0x00, 0, 0}, {0x3a0b, 0xf6, 0, 0}, {0x3a0e, 0x03, 0, 0},
++ {0x3a0d, 0x04, 0, 0}, {0x3a14, 0x03, 0, 0}, {0x3a15, 0xd8, 0, 0},
++ {0x4001, 0x02, 0, 0}, {0x4004, 0x02, 0, 0}, {0x4713, 0x03, 0, 0},
++ {0x4407, 0x04, 0, 0}, {0x460b, 0x35, 0, 0}, {0x460c, 0x22, 0, 0},
++ {0x3824, 0x02, 0, 0}, {0x5001, 0xa3, 0, 0},
++};
++
++static struct reg_value ov5640_setting_30fps_720P_1280_720[] = {
++ {0x3008, 0x42, 0, 0},
++ {0x3035, 0x21, 0, 0}, {0x3036, 0x54, 0, 0}, {0x3c07, 0x07, 0, 0},
++ {0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
++ {0x3820, 0x41, 0, 0}, {0x3821, 0x07, 0, 0}, {0x3814, 0x31, 0, 0},
++ {0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
++ {0x3802, 0x00, 0, 0}, {0x3803, 0xfa, 0, 0}, {0x3804, 0x0a, 0, 0},
++ {0x3805, 0x3f, 0, 0}, {0x3806, 0x06, 0, 0}, {0x3807, 0xa9, 0, 0},
++ {0x3808, 0x05, 0, 0}, {0x3809, 0x00, 0, 0}, {0x380a, 0x02, 0, 0},
++ {0x380b, 0xd0, 0, 0}, {0x380c, 0x07, 0, 0}, {0x380d, 0x64, 0, 0},
++ {0x380e, 0x02, 0, 0}, {0x380f, 0xe4, 0, 0}, {0x3810, 0x00, 0, 0},
++ {0x3811, 0x10, 0, 0}, {0x3812, 0x00, 0, 0}, {0x3813, 0x04, 0, 0},
++ {0x3618, 0x00, 0, 0}, {0x3612, 0x29, 0, 0}, {0x3708, 0x64, 0, 0},
++ {0x3709, 0x52, 0, 0}, {0x370c, 0x03, 0, 0}, {0x3a02, 0x02, 0, 0},
++ {0x3a03, 0xe4, 0, 0}, {0x3a08, 0x01, 0, 0}, {0x3a09, 0xbc, 0, 0},
++ {0x3a0a, 0x01, 0, 0}, {0x3a0b, 0x72, 0, 0}, {0x3a0e, 0x01, 0, 0},
++ {0x3a0d, 0x02, 0, 0}, {0x3a14, 0x02, 0, 0}, {0x3a15, 0xe4, 0, 0},
++ {0x4001, 0x02, 0, 0}, {0x4004, 0x02, 0, 0}, {0x4713, 0x02, 0, 0},
++ {0x4407, 0x04, 0, 0}, {0x460b, 0x37, 0, 0}, {0x460c, 0x20, 0, 0},
++ {0x3824, 0x04, 0, 0}, {0x5001, 0x83, 0, 0}, {0x4005, 0x1a, 0, 0},
++ {0x3008, 0x02, 0, 0}, {0x3503, 0, 0, 0},
++};
++
++static struct reg_value ov5640_setting_15fps_720P_1280_720[] = {
++ {0x3035, 0x41, 0, 0}, {0x3036, 0x54, 0, 0}, {0x3c07, 0x07, 0, 0},
++ {0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
++ {0x3820, 0x41, 0, 0}, {0x3821, 0x07, 0, 0}, {0x3814, 0x31, 0, 0},
++ {0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
++ {0x3802, 0x00, 0, 0}, {0x3803, 0xfa, 0, 0}, {0x3804, 0x0a, 0, 0},
++ {0x3805, 0x3f, 0, 0}, {0x3806, 0x06, 0, 0}, {0x3807, 0xa9, 0, 0},
++ {0x3808, 0x05, 0, 0}, {0x3809, 0x00, 0, 0}, {0x380a, 0x02, 0, 0},
++ {0x380b, 0xd0, 0, 0}, {0x380c, 0x07, 0, 0}, {0x380d, 0x64, 0, 0},
++ {0x380e, 0x02, 0, 0}, {0x380f, 0xe4, 0, 0}, {0x3810, 0x00, 0, 0},
++ {0x3811, 0x10, 0, 0}, {0x3812, 0x00, 0, 0}, {0x3813, 0x04, 0, 0},
++ {0x3618, 0x00, 0, 0}, {0x3612, 0x29, 0, 0}, {0x3708, 0x64, 0, 0},
++ {0x3709, 0x52, 0, 0}, {0x370c, 0x03, 0, 0}, {0x3a02, 0x02, 0, 0},
++ {0x3a03, 0xe4, 0, 0}, {0x3a08, 0x01, 0, 0}, {0x3a09, 0xbc, 0, 0},
++ {0x3a0a, 0x01, 0, 0}, {0x3a0b, 0x72, 0, 0}, {0x3a0e, 0x01, 0, 0},
++ {0x3a0d, 0x02, 0, 0}, {0x3a14, 0x02, 0, 0}, {0x3a15, 0xe4, 0, 0},
++ {0x4001, 0x02, 0, 0}, {0x4004, 0x02, 0, 0}, {0x4713, 0x02, 0, 0},
++ {0x4407, 0x04, 0, 0}, {0x460b, 0x37, 0, 0}, {0x460c, 0x20, 0, 0},
++ {0x3824, 0x04, 0, 0}, {0x5001, 0x83, 0, 0},
++};
++
++static struct reg_value ov5640_setting_30fps_1080P_1920_1080[] = {
++ {0x3008, 0x42, 0, 0},
++ {0x3035, 0x21, 0, 0}, {0x3036, 0x54, 0, 0}, {0x3c07, 0x08, 0, 0},
++ {0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
++ {0x3820, 0x40, 0, 0}, {0x3821, 0x06, 0, 0}, {0x3814, 0x11, 0, 0},
++ {0x3815, 0x11, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
++ {0x3802, 0x00, 0, 0}, {0x3803, 0x00, 0, 0}, {0x3804, 0x0a, 0, 0},
++ {0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0}, {0x3807, 0x9f, 0, 0},
++ {0x3808, 0x0a, 0, 0}, {0x3809, 0x20, 0, 0}, {0x380a, 0x07, 0, 0},
++ {0x380b, 0x98, 0, 0}, {0x380c, 0x0b, 0, 0}, {0x380d, 0x1c, 0, 0},
++ {0x380e, 0x07, 0, 0}, {0x380f, 0xb0, 0, 0}, {0x3810, 0x00, 0, 0},
++ {0x3811, 0x10, 0, 0}, {0x3812, 0x00, 0, 0}, {0x3813, 0x04, 0, 0},
++ {0x3618, 0x04, 0, 0}, {0x3612, 0x29, 0, 0}, {0x3708, 0x21, 0, 0},
++ {0x3709, 0x12, 0, 0}, {0x370c, 0x00, 0, 0}, {0x3a02, 0x03, 0, 0},
++ {0x3a03, 0xd8, 0, 0}, {0x3a08, 0x01, 0, 0}, {0x3a09, 0x27, 0, 0},
++ {0x3a0a, 0x00, 0, 0}, {0x3a0b, 0xf6, 0, 0}, {0x3a0e, 0x03, 0, 0},
++ {0x3a0d, 0x04, 0, 0}, {0x3a14, 0x03, 0, 0}, {0x3a15, 0xd8, 0, 0},
++ {0x4001, 0x02, 0, 0}, {0x4004, 0x06, 0, 0}, {0x4713, 0x03, 0, 0},
++ {0x4407, 0x04, 0, 0}, {0x460b, 0x35, 0, 0}, {0x460c, 0x22, 0, 0},
++ {0x3824, 0x02, 0, 0}, {0x5001, 0x83, 0, 0}, {0x3035, 0x11, 0, 0},
++ {0x3036, 0x54, 0, 0}, {0x3c07, 0x07, 0, 0}, {0x3c08, 0x00, 0, 0},
++ {0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
++ {0x3800, 0x01, 0, 0}, {0x3801, 0x50, 0, 0}, {0x3802, 0x01, 0, 0},
++ {0x3803, 0xb2, 0, 0}, {0x3804, 0x08, 0, 0}, {0x3805, 0xef, 0, 0},
++ {0x3806, 0x05, 0, 0}, {0x3807, 0xf1, 0, 0}, {0x3808, 0x07, 0, 0},
++ {0x3809, 0x80, 0, 0}, {0x380a, 0x04, 0, 0}, {0x380b, 0x38, 0, 0},
++ {0x380c, 0x09, 0, 0}, {0x380d, 0xc4, 0, 0}, {0x380e, 0x04, 0, 0},
++ {0x380f, 0x60, 0, 0}, {0x3612, 0x2b, 0, 0}, {0x3708, 0x64, 0, 0},
++ {0x3a02, 0x04, 0, 0}, {0x3a03, 0x60, 0, 0}, {0x3a08, 0x01, 0, 0},
++ {0x3a09, 0x50, 0, 0}, {0x3a0a, 0x01, 0, 0}, {0x3a0b, 0x18, 0, 0},
++ {0x3a0e, 0x03, 0, 0}, {0x3a0d, 0x04, 0, 0}, {0x3a14, 0x04, 0, 0},
++ {0x3a15, 0x60, 0, 0}, {0x4713, 0x02, 0, 0}, {0x4407, 0x04, 0, 0},
++ {0x460b, 0x37, 0, 0}, {0x460c, 0x20, 0, 0}, {0x3824, 0x04, 0, 0},
++ {0x4005, 0x1a, 0, 0}, {0x3008, 0x02, 0, 0},
++ {0x3503, 0, 0, 0},
++};
++
++static struct reg_value ov5640_setting_15fps_1080P_1920_1080[] = {
++ {0x3008, 0x42, 0, 0},
++ {0x3035, 0x21, 0, 0}, {0x3036, 0x54, 0, 0}, {0x3c07, 0x08, 0, 0},
++ {0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
++ {0x3820, 0x40, 0, 0}, {0x3821, 0x06, 0, 0}, {0x3814, 0x11, 0, 0},
++ {0x3815, 0x11, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
++ {0x3802, 0x00, 0, 0}, {0x3803, 0x00, 0, 0}, {0x3804, 0x0a, 0, 0},
++ {0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0}, {0x3807, 0x9f, 0, 0},
++ {0x3808, 0x0a, 0, 0}, {0x3809, 0x20, 0, 0}, {0x380a, 0x07, 0, 0},
++ {0x380b, 0x98, 0, 0}, {0x380c, 0x0b, 0, 0}, {0x380d, 0x1c, 0, 0},
++ {0x380e, 0x07, 0, 0}, {0x380f, 0xb0, 0, 0}, {0x3810, 0x00, 0, 0},
++ {0x3811, 0x10, 0, 0}, {0x3812, 0x00, 0, 0}, {0x3813, 0x04, 0, 0},
++ {0x3618, 0x04, 0, 0}, {0x3612, 0x29, 0, 0}, {0x3708, 0x21, 0, 0},
++ {0x3709, 0x12, 0, 0}, {0x370c, 0x00, 0, 0}, {0x3a02, 0x03, 0, 0},
++ {0x3a03, 0xd8, 0, 0}, {0x3a08, 0x01, 0, 0}, {0x3a09, 0x27, 0, 0},
++ {0x3a0a, 0x00, 0, 0}, {0x3a0b, 0xf6, 0, 0}, {0x3a0e, 0x03, 0, 0},
++ {0x3a0d, 0x04, 0, 0}, {0x3a14, 0x03, 0, 0}, {0x3a15, 0xd8, 0, 0},
++ {0x4001, 0x02, 0, 0}, {0x4004, 0x06, 0, 0}, {0x4713, 0x03, 0, 0},
++ {0x4407, 0x04, 0, 0}, {0x460b, 0x35, 0, 0}, {0x460c, 0x22, 0, 0},
++ {0x3824, 0x02, 0, 0}, {0x5001, 0x83, 0, 0}, {0x3035, 0x21, 0, 0},
++ {0x3036, 0x54, 0, 1}, {0x3c07, 0x07, 0, 0}, {0x3c08, 0x00, 0, 0},
++ {0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
++ {0x3800, 0x01, 0, 0}, {0x3801, 0x50, 0, 0}, {0x3802, 0x01, 0, 0},
++ {0x3803, 0xb2, 0, 0}, {0x3804, 0x08, 0, 0}, {0x3805, 0xef, 0, 0},
++ {0x3806, 0x05, 0, 0}, {0x3807, 0xf1, 0, 0}, {0x3808, 0x07, 0, 0},
++ {0x3809, 0x80, 0, 0}, {0x380a, 0x04, 0, 0}, {0x380b, 0x38, 0, 0},
++ {0x380c, 0x09, 0, 0}, {0x380d, 0xc4, 0, 0}, {0x380e, 0x04, 0, 0},
++ {0x380f, 0x60, 0, 0}, {0x3612, 0x2b, 0, 0}, {0x3708, 0x64, 0, 0},
++ {0x3a02, 0x04, 0, 0}, {0x3a03, 0x60, 0, 0}, {0x3a08, 0x01, 0, 0},
++ {0x3a09, 0x50, 0, 0}, {0x3a0a, 0x01, 0, 0}, {0x3a0b, 0x18, 0, 0},
++ {0x3a0e, 0x03, 0, 0}, {0x3a0d, 0x04, 0, 0}, {0x3a14, 0x04, 0, 0},
++ {0x3a15, 0x60, 0, 0}, {0x4713, 0x02, 0, 0}, {0x4407, 0x04, 0, 0},
++ {0x460b, 0x37, 0, 0}, {0x460c, 0x20, 0, 0}, {0x3824, 0x04, 0, 0},
++ {0x4005, 0x1a, 0, 0}, {0x3008, 0x02, 0, 0}, {0x3503, 0, 0, 0},
++};
++
++static struct reg_value ov5640_setting_15fps_QSXGA_2592_1944[] = {
++ {0x4202, 0x0f, 0, 0}, /* stream off the sensor */
++ {0x3820, 0x40, 0, 0}, {0x3821, 0x06, 0, 0}, /*disable flip*/
++ {0x3035, 0x21, 0, 0}, {0x3036, 0x54, 0, 0}, {0x3c07, 0x08, 0, 0},
++ {0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
++ {0x3820, 0x40, 0, 0}, {0x3821, 0x06, 0, 0}, {0x3814, 0x11, 0, 0},
++ {0x3815, 0x11, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
++ {0x3802, 0x00, 0, 0}, {0x3803, 0x00, 0, 0}, {0x3804, 0x0a, 0, 0},
++ {0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0}, {0x3807, 0x9f, 0, 0},
++ {0x3808, 0x0a, 0, 0}, {0x3809, 0x20, 0, 0}, {0x380a, 0x07, 0, 0},
++ {0x380b, 0x98, 0, 0}, {0x380c, 0x0b, 0, 0}, {0x380d, 0x1c, 0, 0},
++ {0x380e, 0x07, 0, 0}, {0x380f, 0xb0, 0, 0}, {0x3810, 0x00, 0, 0},
++ {0x3811, 0x10, 0, 0}, {0x3812, 0x00, 0, 0}, {0x3813, 0x04, 0, 0},
++ {0x3618, 0x04, 0, 0}, {0x3612, 0x29, 0, 0}, {0x3708, 0x21, 0, 0},
++ {0x3709, 0x12, 0, 0}, {0x370c, 0x00, 0, 0}, {0x3a02, 0x03, 0, 0},
++ {0x3a03, 0xd8, 0, 0}, {0x3a08, 0x01, 0, 0}, {0x3a09, 0x27, 0, 0},
++ {0x3a0a, 0x00, 0, 0}, {0x3a0b, 0xf6, 0, 0}, {0x3a0e, 0x03, 0, 0},
++ {0x3a0d, 0x04, 0, 0}, {0x3a14, 0x03, 0, 0}, {0x3a15, 0xd8, 0, 0},
++ {0x4001, 0x02, 0, 0}, {0x4004, 0x06, 0, 0}, {0x4713, 0x03, 0, 0},
++ {0x4407, 0x04, 0, 0}, {0x460b, 0x35, 0, 0}, {0x460c, 0x22, 0, 0},
++ {0x3824, 0x02, 0, 0}, {0x5001, 0x83, 0, 70},
++ {0x4202, 0x00, 0, 0}, /* stream on the sensor */
++};
++
++static struct ov5640_mode_info ov5640_mode_info_data[2][ov5640_mode_MAX + 1] = {
++ {
++ {ov5640_mode_VGA_640_480, SUBSAMPLING, 640, 480,
++ ov5640_setting_15fps_VGA_640_480,
++ ARRAY_SIZE(ov5640_setting_15fps_VGA_640_480)},
++ {ov5640_mode_QVGA_320_240, SUBSAMPLING, 320, 240,
++ ov5640_setting_15fps_QVGA_320_240,
++ ARRAY_SIZE(ov5640_setting_15fps_QVGA_320_240)},
++ {ov5640_mode_NTSC_720_480, SUBSAMPLING, 720, 480,
++ ov5640_setting_15fps_NTSC_720_480,
++ ARRAY_SIZE(ov5640_setting_15fps_NTSC_720_480)},
++ {ov5640_mode_PAL_720_576, SUBSAMPLING, 720, 576,
++ ov5640_setting_15fps_PAL_720_576,
++ ARRAY_SIZE(ov5640_setting_15fps_PAL_720_576)},
++ {ov5640_mode_720P_1280_720, SUBSAMPLING, 1280, 720,
++ ov5640_setting_15fps_720P_1280_720,
++ ARRAY_SIZE(ov5640_setting_15fps_720P_1280_720)},
++ {ov5640_mode_1080P_1920_1080, SCALING, 1920, 1080,
++ ov5640_setting_15fps_1080P_1920_1080,
++ ARRAY_SIZE(ov5640_setting_15fps_1080P_1920_1080)},
++ {ov5640_mode_QSXGA_2592_1944, SCALING, 2592, 1944,
++ ov5640_setting_15fps_QSXGA_2592_1944,
++ ARRAY_SIZE(ov5640_setting_15fps_QSXGA_2592_1944)},
++ {ov5640_mode_QCIF_176_144, SUBSAMPLING, 176, 144,
++ ov5640_setting_15fps_QCIF_176_144,
++ ARRAY_SIZE(ov5640_setting_15fps_QCIF_176_144)},
++ {ov5640_mode_XGA_1024_768, SUBSAMPLING, 1024, 768,
++ ov5640_setting_15fps_XGA_1024_768,
++ ARRAY_SIZE(ov5640_setting_15fps_XGA_1024_768)},
++ },
++ {
++ {ov5640_mode_VGA_640_480, SUBSAMPLING, 640, 480,
++ ov5640_setting_30fps_VGA_640_480,
++ ARRAY_SIZE(ov5640_setting_30fps_VGA_640_480)},
++ {ov5640_mode_QVGA_320_240, SUBSAMPLING, 320, 240,
++ ov5640_setting_30fps_QVGA_320_240,
++ ARRAY_SIZE(ov5640_setting_30fps_QVGA_320_240)},
++ {ov5640_mode_NTSC_720_480, SUBSAMPLING, 720, 480,
++ ov5640_setting_30fps_NTSC_720_480,
++ ARRAY_SIZE(ov5640_setting_30fps_NTSC_720_480)},
++ {ov5640_mode_PAL_720_576, SUBSAMPLING, 720, 576,
++ ov5640_setting_30fps_PAL_720_576,
++ ARRAY_SIZE(ov5640_setting_30fps_PAL_720_576)},
++ {ov5640_mode_720P_1280_720, SUBSAMPLING, 1280, 720,
++ ov5640_setting_30fps_720P_1280_720,
++ ARRAY_SIZE(ov5640_setting_30fps_720P_1280_720)},
++ {ov5640_mode_1080P_1920_1080, SCALING, 1920, 1080,
++ ov5640_setting_30fps_1080P_1920_1080,
++ ARRAY_SIZE(ov5640_setting_30fps_1080P_1920_1080)},
++ {ov5640_mode_QSXGA_2592_1944, -1, 0, 0, NULL, 0},
++ {ov5640_mode_QCIF_176_144, SUBSAMPLING, 176, 144,
++ ov5640_setting_30fps_QCIF_176_144,
++ ARRAY_SIZE(ov5640_setting_30fps_QCIF_176_144)},
++ {ov5640_mode_XGA_1024_768, SUBSAMPLING, 1024, 768,
++ ov5640_setting_30fps_XGA_1024_768,
++ ARRAY_SIZE(ov5640_setting_30fps_XGA_1024_768)},
++ },
++};
++
++static struct regulator *io_regulator;
++static struct regulator *core_regulator;
++static struct regulator *analog_regulator;
++static struct regulator *gpo_regulator;
++
++static int ov5640_probe(struct i2c_client *adapter,
++ const struct i2c_device_id *device_id);
++static int ov5640_remove(struct i2c_client *client);
++
++static s32 ov5640_read_reg(u16 reg, u8 *val);
++static s32 ov5640_write_reg(u16 reg, u8 val);
++
++static const struct i2c_device_id ov5640_id[] = {
++ {"ov5640_mipi", 0},
++ {},
++};
++
++MODULE_DEVICE_TABLE(i2c, ov5640_id);
++
++static struct i2c_driver ov5640_i2c_driver = {
++ .driver = {
++ .owner = THIS_MODULE,
++ .name = "ov5640_mipi",
++ },
++ .probe = ov5640_probe,
++ .remove = ov5640_remove,
++ .id_table = ov5640_id,
++};
++
++static void ov5640_standby(s32 enable)
++{
++ if (enable)
++ gpio_set_value(pwn_gpio, 1);
++ else
++ gpio_set_value(pwn_gpio, 0);
++
++ msleep(2);
++}
++
++static void ov5640_reset(void)
++{
++ /* camera reset */
++ gpio_set_value(rst_gpio, 1);
++
++ /* camera power dowmn */
++ gpio_set_value(pwn_gpio, 1);
++ msleep(5);
++
++ gpio_set_value(pwn_gpio, 0);
++ msleep(5);
++
++ gpio_set_value(rst_gpio, 0);
++ msleep(1);
++
++ gpio_set_value(rst_gpio, 1);
++ msleep(5);
++
++ gpio_set_value(pwn_gpio, 1);
++}
++
++static int ov5640_power_on(struct device *dev)
++{
++ int ret = 0;
++
++ io_regulator = devm_regulator_get(dev, "DOVDD");
++ if (!IS_ERR(io_regulator)) {
++ regulator_set_voltage(io_regulator,
++ OV5640_VOLTAGE_DIGITAL_IO,
++ OV5640_VOLTAGE_DIGITAL_IO);
++ ret = regulator_enable(io_regulator);
++ if (ret) {
++ pr_err("%s:io set voltage error\n", __func__);
++ return ret;
++ } else {
++ dev_dbg(dev,
++ "%s:io set voltage ok\n", __func__);
++ }
++ } else {
++ pr_err("%s: cannot get io voltage error\n", __func__);
++ io_regulator = NULL;
++ }
++
++ core_regulator = devm_regulator_get(dev, "DVDD");
++ if (!IS_ERR(core_regulator)) {
++ regulator_set_voltage(core_regulator,
++ OV5640_VOLTAGE_DIGITAL_CORE,
++ OV5640_VOLTAGE_DIGITAL_CORE);
++ ret = regulator_enable(core_regulator);
++ if (ret) {
++ pr_err("%s:core set voltage error\n", __func__);
++ return ret;
++ } else {
++ dev_dbg(dev,
++ "%s:core set voltage ok\n", __func__);
++ }
++ } else {
++ core_regulator = NULL;
++ pr_err("%s: cannot get core voltage error\n", __func__);
++ }
++
++ analog_regulator = devm_regulator_get(dev, "AVDD");
++ if (!IS_ERR(analog_regulator)) {
++ regulator_set_voltage(analog_regulator,
++ OV5640_VOLTAGE_ANALOG,
++ OV5640_VOLTAGE_ANALOG);
++ ret = regulator_enable(analog_regulator);
++ if (ret) {
++ pr_err("%s:analog set voltage error\n",
++ __func__);
++ return ret;
++ } else {
++ dev_dbg(dev,
++ "%s:analog set voltage ok\n", __func__);
++ }
++ } else {
++ analog_regulator = NULL;
++ pr_err("%s: cannot get analog voltage error\n", __func__);
++ }
++
++ return ret;
++}
++
++static s32 ov5640_write_reg(u16 reg, u8 val)
++{
++ u8 au8Buf[3] = {0};
++
++ au8Buf[0] = reg >> 8;
++ au8Buf[1] = reg & 0xff;
++ au8Buf[2] = val;
++
++ if (i2c_master_send(ov5640_data.i2c_client, au8Buf, 3) < 0) {
++ pr_err("%s:write reg error:reg=%x,val=%x\n",
++ __func__, reg, val);
++ return -1;
++ }
++
++ return 0;
++}
++
++static s32 ov5640_read_reg(u16 reg, u8 *val)
++{
++ u8 au8RegBuf[2] = {0};
++ u8 u8RdVal = 0;
++
++ au8RegBuf[0] = reg >> 8;
++ au8RegBuf[1] = reg & 0xff;
++
++ if (2 != i2c_master_send(ov5640_data.i2c_client, au8RegBuf, 2)) {
++ pr_err("%s:write reg error:reg=%x\n",
++ __func__, reg);
++ return -1;
++ }
++
++ if (1 != i2c_master_recv(ov5640_data.i2c_client, &u8RdVal, 1)) {
++ pr_err("%s:read reg error:reg=%x,val=%x\n",
++ __func__, reg, u8RdVal);
++ return -1;
++ }
++
++ *val = u8RdVal;
++
++ return u8RdVal;
++}
++
++static int prev_sysclk, prev_HTS;
++static int AE_low, AE_high, AE_Target = 52;
++
++void OV5640_stream_on(void)
++{
++ ov5640_write_reg(0x4202, 0x00);
++}
++
++void OV5640_stream_off(void)
++{
++ ov5640_write_reg(0x4202, 0x0f);
++}
++
++
++int OV5640_get_sysclk(void)
++{
++ /* calculate sysclk */
++ int xvclk = ov5640_data.mclk / 10000;
++ int temp1, temp2;
++ int Multiplier, PreDiv, VCO, SysDiv, Pll_rdiv;
++ int Bit_div2x = 1, sclk_rdiv, sysclk;
++ u8 temp;
++
++ int sclk_rdiv_map[] = {1, 2, 4, 8};
++
++ temp1 = ov5640_read_reg(0x3034, &temp);
++ temp2 = temp1 & 0x0f;
++ if (temp2 == 8 || temp2 == 10)
++ Bit_div2x = temp2 / 2;
++
++ temp1 = ov5640_read_reg(0x3035, &temp);
++ SysDiv = temp1>>4;
++ if (SysDiv == 0)
++ SysDiv = 16;
++
++ temp1 = ov5640_read_reg(0x3036, &temp);
++ Multiplier = temp1;
++
++ temp1 = ov5640_read_reg(0x3037, &temp);
++ PreDiv = temp1 & 0x0f;
++ Pll_rdiv = ((temp1 >> 4) & 0x01) + 1;
++
++ temp1 = ov5640_read_reg(0x3108, &temp);
++ temp2 = temp1 & 0x03;
++ sclk_rdiv = sclk_rdiv_map[temp2];
++
++ VCO = xvclk * Multiplier / PreDiv;
++
++ sysclk = VCO / SysDiv / Pll_rdiv * 2 / Bit_div2x / sclk_rdiv;
++
++ return sysclk;
++}
++
++void OV5640_set_night_mode(void)
++{
++ /* read HTS from register settings */
++ u8 mode;
++
++ ov5640_read_reg(0x3a00, &mode);
++ mode &= 0xfb;
++ ov5640_write_reg(0x3a00, mode);
++}
++
++int OV5640_get_HTS(void)
++{
++ /* read HTS from register settings */
++ int HTS;
++ u8 temp;
++
++ HTS = ov5640_read_reg(0x380c, &temp);
++ HTS = (HTS<<8) + ov5640_read_reg(0x380d, &temp);
++
++ return HTS;
++}
++
++int OV5640_get_VTS(void)
++{
++ /* read VTS from register settings */
++ int VTS;
++ u8 temp;
++
++ /* total vertical size[15:8] high byte */
++ VTS = ov5640_read_reg(0x380e, &temp);
++
++ VTS = (VTS<<8) + ov5640_read_reg(0x380f, &temp);
++
++ return VTS;
++}
++
++int OV5640_set_VTS(int VTS)
++{
++ /* write VTS to registers */
++ int temp;
++
++ temp = VTS & 0xff;
++ ov5640_write_reg(0x380f, temp);
++
++ temp = VTS>>8;
++ ov5640_write_reg(0x380e, temp);
++
++ return 0;
++}
++
++int OV5640_get_shutter(void)
++{
++ /* read shutter, in number of line period */
++ int shutter;
++ u8 temp;
++
++ shutter = (ov5640_read_reg(0x03500, &temp) & 0x0f);
++ shutter = (shutter<<8) + ov5640_read_reg(0x3501, &temp);
++ shutter = (shutter<<4) + (ov5640_read_reg(0x3502, &temp)>>4);
++
++ return shutter;
++}
++
++int OV5640_set_shutter(int shutter)
++{
++ /* write shutter, in number of line period */
++ int temp;
++
++ shutter = shutter & 0xffff;
++
++ temp = shutter & 0x0f;
++ temp = temp<<4;
++ ov5640_write_reg(0x3502, temp);
++
++ temp = shutter & 0xfff;
++ temp = temp>>4;
++ ov5640_write_reg(0x3501, temp);
++
++ temp = shutter>>12;
++ ov5640_write_reg(0x3500, temp);
++
++ return 0;
++}
++
++int OV5640_get_gain16(void)
++{
++ /* read gain, 16 = 1x */
++ int gain16;
++ u8 temp;
++
++ gain16 = ov5640_read_reg(0x350a, &temp) & 0x03;
++ gain16 = (gain16<<8) + ov5640_read_reg(0x350b, &temp);
++
++ return gain16;
++}
++
++int OV5640_set_gain16(int gain16)
++{
++ /* write gain, 16 = 1x */
++ u8 temp;
++ gain16 = gain16 & 0x3ff;
++
++ temp = gain16 & 0xff;
++ ov5640_write_reg(0x350b, temp);
++
++ temp = gain16>>8;
++ ov5640_write_reg(0x350a, temp);
++
++ return 0;
++}
++
++int OV5640_get_light_freq(void)
++{
++ /* get banding filter value */
++ int temp, temp1, light_freq = 0;
++ u8 tmp;
++
++ temp = ov5640_read_reg(0x3c01, &tmp);
++
++ if (temp & 0x80) {
++ /* manual */
++ temp1 = ov5640_read_reg(0x3c00, &tmp);
++ if (temp1 & 0x04) {
++ /* 50Hz */
++ light_freq = 50;
++ } else {
++ /* 60Hz */
++ light_freq = 60;
++ }
++ } else {
++ /* auto */
++ temp1 = ov5640_read_reg(0x3c0c, &tmp);
++ if (temp1 & 0x01) {
++ /* 50Hz */
++ light_freq = 50;
++ } else {
++ /* 60Hz */
++ }
++ }
++ return light_freq;
++}
++
++void OV5640_set_bandingfilter(void)
++{
++ int prev_VTS;
++ int band_step60, max_band60, band_step50, max_band50;
++
++ /* read preview PCLK */
++ prev_sysclk = OV5640_get_sysclk();
++ /* read preview HTS */
++ prev_HTS = OV5640_get_HTS();
++
++ /* read preview VTS */
++ prev_VTS = OV5640_get_VTS();
++
++ /* calculate banding filter */
++ /* 60Hz */
++ band_step60 = prev_sysclk * 100/prev_HTS * 100/120;
++ ov5640_write_reg(0x3a0a, (band_step60 >> 8));
++ ov5640_write_reg(0x3a0b, (band_step60 & 0xff));
++
++ max_band60 = (int)((prev_VTS-4)/band_step60);
++ ov5640_write_reg(0x3a0d, max_band60);
++
++ /* 50Hz */
++ band_step50 = prev_sysclk * 100/prev_HTS;
++ ov5640_write_reg(0x3a08, (band_step50 >> 8));
++ ov5640_write_reg(0x3a09, (band_step50 & 0xff));
++
++ max_band50 = (int)((prev_VTS-4)/band_step50);
++ ov5640_write_reg(0x3a0e, max_band50);
++}
++
++int OV5640_set_AE_target(int target)
++{
++ /* stable in high */
++ int fast_high, fast_low;
++ AE_low = target * 23 / 25; /* 0.92 */
++ AE_high = target * 27 / 25; /* 1.08 */
++
++ fast_high = AE_high<<1;
++ if (fast_high > 255)
++ fast_high = 255;
++
++ fast_low = AE_low >> 1;
++
++ ov5640_write_reg(0x3a0f, AE_high);
++ ov5640_write_reg(0x3a10, AE_low);
++ ov5640_write_reg(0x3a1b, AE_high);
++ ov5640_write_reg(0x3a1e, AE_low);
++ ov5640_write_reg(0x3a11, fast_high);
++ ov5640_write_reg(0x3a1f, fast_low);
++
++ return 0;
++}
++
++void OV5640_turn_on_AE_AG(int enable)
++{
++ u8 ae_ag_ctrl;
++
++ ov5640_read_reg(0x3503, &ae_ag_ctrl);
++ if (enable) {
++ /* turn on auto AE/AG */
++ ae_ag_ctrl = ae_ag_ctrl & ~(0x03);
++ } else {
++ /* turn off AE/AG */
++ ae_ag_ctrl = ae_ag_ctrl | 0x03;
++ }
++ ov5640_write_reg(0x3503, ae_ag_ctrl);
++}
++
++bool binning_on(void)
++{
++ u8 temp;
++ ov5640_read_reg(0x3821, &temp);
++ temp &= 0xfe;
++ if (temp)
++ return true;
++ else
++ return false;
++}
++
++static void ov5640_set_virtual_channel(int channel)
++{
++ u8 channel_id;
++
++ ov5640_read_reg(0x4814, &channel_id);
++ channel_id &= ~(3 << 6);
++ ov5640_write_reg(0x4814, channel_id | (channel << 6));
++}
++
++/* download ov5640 settings to sensor through i2c */
++static int ov5640_download_firmware(struct reg_value *pModeSetting, s32 ArySize)
++{
++ register u32 Delay_ms = 0;
++ register u16 RegAddr = 0;
++ register u8 Mask = 0;
++ register u8 Val = 0;
++ u8 RegVal = 0;
++ int i, retval = 0;
++
++ for (i = 0; i < ArySize; ++i, ++pModeSetting) {
++ Delay_ms = pModeSetting->u32Delay_ms;
++ RegAddr = pModeSetting->u16RegAddr;
++ Val = pModeSetting->u8Val;
++ Mask = pModeSetting->u8Mask;
++
++ if (Mask) {
++ retval = ov5640_read_reg(RegAddr, &RegVal);
++ if (retval < 0)
++ goto err;
++
++ RegVal &= ~(u8)Mask;
++ Val &= Mask;
++ Val |= RegVal;
++ }
++
++ retval = ov5640_write_reg(RegAddr, Val);
++ if (retval < 0)
++ goto err;
++
++ if (Delay_ms)
++ msleep(Delay_ms);
++ }
++err:
++ return retval;
++}
++
++/* sensor changes between scaling and subsampling
++ * go through exposure calcualtion
++ */
++static int ov5640_change_mode_exposure_calc(enum ov5640_frame_rate frame_rate,
++ enum ov5640_mode mode)
++{
++ struct reg_value *pModeSetting = NULL;
++ s32 ArySize = 0;
++ u8 average;
++ int prev_shutter, prev_gain16;
++ int cap_shutter, cap_gain16;
++ int cap_sysclk, cap_HTS, cap_VTS;
++ int light_freq, cap_bandfilt, cap_maxband;
++ long cap_gain16_shutter;
++ int retval = 0;
++
++ /* check if the input mode and frame rate is valid */
++ pModeSetting =
++ ov5640_mode_info_data[frame_rate][mode].init_data_ptr;
++ ArySize =
++ ov5640_mode_info_data[frame_rate][mode].init_data_size;
++
++ ov5640_data.pix.width =
++ ov5640_mode_info_data[frame_rate][mode].width;
++ ov5640_data.pix.height =
++ ov5640_mode_info_data[frame_rate][mode].height;
++
++ if (ov5640_data.pix.width == 0 || ov5640_data.pix.height == 0 ||
++ pModeSetting == NULL || ArySize == 0)
++ return -EINVAL;
++
++ /* auto focus */
++ /* OV5640_auto_focus();//if no af function, just skip it */
++
++ /* turn off AE/AG */
++ OV5640_turn_on_AE_AG(0);
++
++ /* read preview shutter */
++ prev_shutter = OV5640_get_shutter();
++ if ((binning_on()) && (mode != ov5640_mode_720P_1280_720)
++ && (mode != ov5640_mode_1080P_1920_1080))
++ prev_shutter *= 2;
++
++ /* read preview gain */
++ prev_gain16 = OV5640_get_gain16();
++
++ /* get average */
++ ov5640_read_reg(0x56a1, &average);
++
++ /* turn off night mode for capture */
++ OV5640_set_night_mode();
++
++ /* turn off overlay */
++ /* ov5640_write_reg(0x3022, 0x06);//if no af function, just skip it */
++
++ OV5640_stream_off();
++
++ /* Write capture setting */
++ retval = ov5640_download_firmware(pModeSetting, ArySize);
++ if (retval < 0)
++ goto err;
++
++ /* read capture VTS */
++ cap_VTS = OV5640_get_VTS();
++ cap_HTS = OV5640_get_HTS();
++ cap_sysclk = OV5640_get_sysclk();
++
++ /* calculate capture banding filter */
++ light_freq = OV5640_get_light_freq();
++ if (light_freq == 60) {
++ /* 60Hz */
++ cap_bandfilt = cap_sysclk * 100 / cap_HTS * 100 / 120;
++ } else {
++ /* 50Hz */
++ cap_bandfilt = cap_sysclk * 100 / cap_HTS;
++ }
++ cap_maxband = (int)((cap_VTS - 4)/cap_bandfilt);
++
++ /* calculate capture shutter/gain16 */
++ if (average > AE_low && average < AE_high) {
++ /* in stable range */
++ cap_gain16_shutter =
++ prev_gain16 * prev_shutter * cap_sysclk/prev_sysclk
++ * prev_HTS/cap_HTS * AE_Target / average;
++ } else {
++ cap_gain16_shutter =
++ prev_gain16 * prev_shutter * cap_sysclk/prev_sysclk
++ * prev_HTS/cap_HTS;
++ }
++
++ /* gain to shutter */
++ if (cap_gain16_shutter < (cap_bandfilt * 16)) {
++ /* shutter < 1/100 */
++ cap_shutter = cap_gain16_shutter/16;
++ if (cap_shutter < 1)
++ cap_shutter = 1;
++
++ cap_gain16 = cap_gain16_shutter/cap_shutter;
++ if (cap_gain16 < 16)
++ cap_gain16 = 16;
++ } else {
++ if (cap_gain16_shutter >
++ (cap_bandfilt * cap_maxband * 16)) {
++ /* exposure reach max */
++ cap_shutter = cap_bandfilt * cap_maxband;
++ cap_gain16 = cap_gain16_shutter / cap_shutter;
++ } else {
++ /* 1/100 < (cap_shutter = n/100) =< max */
++ cap_shutter =
++ ((int) (cap_gain16_shutter/16 / cap_bandfilt))
++ *cap_bandfilt;
++ cap_gain16 = cap_gain16_shutter / cap_shutter;
++ }
++ }
++
++ /* write capture gain */
++ OV5640_set_gain16(cap_gain16);
++
++ /* write capture shutter */
++ if (cap_shutter > (cap_VTS - 4)) {
++ cap_VTS = cap_shutter + 4;
++ OV5640_set_VTS(cap_VTS);
++ }
++ OV5640_set_shutter(cap_shutter);
++
++ OV5640_stream_on();
++
++err:
++ return retval;
++}
++
++/* if sensor changes inside scaling or subsampling
++ * change mode directly
++ * */
++static int ov5640_change_mode_direct(enum ov5640_frame_rate frame_rate,
++ enum ov5640_mode mode)
++{
++ struct reg_value *pModeSetting = NULL;
++ s32 ArySize = 0;
++ int retval = 0;
++
++ /* check if the input mode and frame rate is valid */
++ pModeSetting =
++ ov5640_mode_info_data[frame_rate][mode].init_data_ptr;
++ ArySize =
++ ov5640_mode_info_data[frame_rate][mode].init_data_size;
++
++ ov5640_data.pix.width =
++ ov5640_mode_info_data[frame_rate][mode].width;
++ ov5640_data.pix.height =
++ ov5640_mode_info_data[frame_rate][mode].height;
++
++ if (ov5640_data.pix.width == 0 || ov5640_data.pix.height == 0 ||
++ pModeSetting == NULL || ArySize == 0)
++ return -EINVAL;
++
++ /* turn off AE/AG */
++ OV5640_turn_on_AE_AG(0);
++
++ OV5640_stream_off();
++
++ /* Write capture setting */
++ retval = ov5640_download_firmware(pModeSetting, ArySize);
++ if (retval < 0)
++ goto err;
++
++ OV5640_stream_on();
++
++ OV5640_turn_on_AE_AG(1);
++
++err:
++ return retval;
++}
++
++static int ov5640_init_mode(enum ov5640_frame_rate frame_rate,
++ enum ov5640_mode mode, enum ov5640_mode orig_mode)
++{
++ struct reg_value *pModeSetting = NULL;
++ s32 ArySize = 0;
++ int retval = 0;
++ void *mipi_csi2_info;
++ u32 mipi_reg, msec_wait4stable = 0;
++ enum ov5640_downsize_mode dn_mode, orig_dn_mode;
++
++ if ((mode > ov5640_mode_MAX || mode < ov5640_mode_MIN)
++ && (mode != ov5640_mode_INIT)) {
++ pr_err("Wrong ov5640 mode detected!\n");
++ return -1;
++ }
++
++ mipi_csi2_info = mipi_csi2_get_info();
++
++ /* initial mipi dphy */
++ if (!mipi_csi2_info) {
++ printk(KERN_ERR "%s() in %s: Fail to get mipi_csi2_info!\n",
++ __func__, __FILE__);
++ return -1;
++ }
++
++ if (!mipi_csi2_get_status(mipi_csi2_info))
++ mipi_csi2_enable(mipi_csi2_info);
++
++ if (!mipi_csi2_get_status(mipi_csi2_info)) {
++ pr_err("Can not enable mipi csi2 driver!\n");
++ return -1;
++ }
++
++ mipi_csi2_set_lanes(mipi_csi2_info);
++
++ /*Only reset MIPI CSI2 HW at sensor initialize*/
++ if (mode == ov5640_mode_INIT)
++ mipi_csi2_reset(mipi_csi2_info);
++
++ if (ov5640_data.pix.pixelformat == V4L2_PIX_FMT_UYVY)
++ mipi_csi2_set_datatype(mipi_csi2_info, MIPI_DT_YUV422);
++ else if (ov5640_data.pix.pixelformat == V4L2_PIX_FMT_RGB565)
++ mipi_csi2_set_datatype(mipi_csi2_info, MIPI_DT_RGB565);
++ else
++ pr_err("currently this sensor format can not be supported!\n");
++
++ dn_mode = ov5640_mode_info_data[frame_rate][mode].dn_mode;
++ orig_dn_mode = ov5640_mode_info_data[frame_rate][orig_mode].dn_mode;
++ if (mode == ov5640_mode_INIT) {
++ pModeSetting = ov5640_init_setting_30fps_VGA;
++ ArySize = ARRAY_SIZE(ov5640_init_setting_30fps_VGA);
++
++ ov5640_data.pix.width = 640;
++ ov5640_data.pix.height = 480;
++ retval = ov5640_download_firmware(pModeSetting, ArySize);
++ if (retval < 0)
++ goto err;
++
++ pModeSetting = ov5640_setting_30fps_VGA_640_480;
++ ArySize = ARRAY_SIZE(ov5640_setting_30fps_VGA_640_480);
++ retval = ov5640_download_firmware(pModeSetting, ArySize);
++ } else if ((dn_mode == SUBSAMPLING && orig_dn_mode == SCALING) ||
++ (dn_mode == SCALING && orig_dn_mode == SUBSAMPLING)) {
++ /* change between subsampling and scaling
++ * go through exposure calucation */
++ retval = ov5640_change_mode_exposure_calc(frame_rate, mode);
++ } else {
++ /* change inside subsampling or scaling
++ * download firmware directly */
++ retval = ov5640_change_mode_direct(frame_rate, mode);
++ }
++
++ if (retval < 0)
++ goto err;
++
++ OV5640_set_AE_target(AE_Target);
++ OV5640_get_light_freq();
++ OV5640_set_bandingfilter();
++ ov5640_set_virtual_channel(ov5640_data.csi);
++
++ /* add delay to wait for sensor stable */
++ if (mode == ov5640_mode_QSXGA_2592_1944) {
++ /* dump the first two frames: 1/7.5*2
++ * the frame rate of QSXGA is 7.5fps */
++ msec_wait4stable = 267;
++ } else if (frame_rate == ov5640_15_fps) {
++ /* dump the first nine frames: 1/15*9 */
++ msec_wait4stable = 600;
++ } else if (frame_rate == ov5640_30_fps) {
++ /* dump the first nine frames: 1/30*9 */
++ msec_wait4stable = 300;
++ }
++ msleep(msec_wait4stable);
++
++ if (mipi_csi2_info) {
++ unsigned int i;
++
++ i = 0;
++
++ /* wait for mipi sensor ready */
++ mipi_reg = mipi_csi2_dphy_status(mipi_csi2_info);
++ while ((mipi_reg == 0x200) && (i < 10)) {
++ mipi_reg = mipi_csi2_dphy_status(mipi_csi2_info);
++ i++;
++ msleep(10);
++ }
++
++ if (i >= 10) {
++ pr_err("mipi csi2 can not receive sensor clk!\n");
++ return -1;
++ }
++
++ i = 0;
++
++ /* wait for mipi stable */
++ mipi_reg = mipi_csi2_get_error1(mipi_csi2_info);
++ while ((mipi_reg != 0x0) && (i < 10)) {
++ mipi_reg = mipi_csi2_get_error1(mipi_csi2_info);
++ i++;
++ msleep(10);
++ }
++
++ if (i >= 10) {
++ pr_err("mipi csi2 can not reveive data correctly!\n");
++ return -1;
++ }
++ }
++err:
++ return retval;
++}
++
++/* --------------- IOCTL functions from v4l2_int_ioctl_desc --------------- */
++
++static int ioctl_g_ifparm(struct v4l2_int_device *s, struct v4l2_ifparm *p)
++{
++ if (s == NULL) {
++ pr_err(" ERROR!! no slave device set!\n");
++ return -1;
++ }
++
++ memset(p, 0, sizeof(*p));
++ p->u.bt656.clock_curr = ov5640_data.mclk;
++ pr_debug(" clock_curr=mclk=%d\n", ov5640_data.mclk);
++ p->if_type = V4L2_IF_TYPE_BT656;
++ p->u.bt656.mode = V4L2_IF_TYPE_BT656_MODE_NOBT_8BIT;
++ p->u.bt656.clock_min = OV5640_XCLK_MIN;
++ p->u.bt656.clock_max = OV5640_XCLK_MAX;
++ p->u.bt656.bt_sync_correct = 1; /* Indicate external vsync */
++
++ return 0;
++}
++
++/*!
++ * ioctl_s_power - V4L2 sensor interface handler for VIDIOC_S_POWER ioctl
++ * @s: pointer to standard V4L2 device structure
++ * @on: indicates power mode (on or off)
++ *
++ * Turns the power on or off, depending on the value of on and returns the
++ * appropriate error code.
++ */
++static int ioctl_s_power(struct v4l2_int_device *s, int on)
++{
++ struct sensor_data *sensor = s->priv;
++
++ if (on && !sensor->on) {
++ if (io_regulator)
++ if (regulator_enable(io_regulator) != 0)
++ return -EIO;
++ if (core_regulator)
++ if (regulator_enable(core_regulator) != 0)
++ return -EIO;
++ if (gpo_regulator)
++ if (regulator_enable(gpo_regulator) != 0)
++ return -EIO;
++ if (analog_regulator)
++ if (regulator_enable(analog_regulator) != 0)
++ return -EIO;
++ /* Make sure power on */
++ ov5640_standby(0);
++ } else if (!on && sensor->on) {
++ if (analog_regulator)
++ regulator_disable(analog_regulator);
++ if (core_regulator)
++ regulator_disable(core_regulator);
++ if (io_regulator)
++ regulator_disable(io_regulator);
++ if (gpo_regulator)
++ regulator_disable(gpo_regulator);
++
++ ov5640_standby(1);
++ }
++
++ sensor->on = on;
++
++ return 0;
++}
++
++/*!
++ * ioctl_g_parm - V4L2 sensor interface handler for VIDIOC_G_PARM ioctl
++ * @s: pointer to standard V4L2 device structure
++ * @a: pointer to standard V4L2 VIDIOC_G_PARM ioctl structure
++ *
++ * Returns the sensor's video CAPTURE parameters.
++ */
++static int ioctl_g_parm(struct v4l2_int_device *s, struct v4l2_streamparm *a)
++{
++ struct sensor_data *sensor = s->priv;
++ struct v4l2_captureparm *cparm = &a->parm.capture;
++ int ret = 0;
++
++ switch (a->type) {
++ /* This is the only case currently handled. */
++ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
++ memset(a, 0, sizeof(*a));
++ a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
++ cparm->capability = sensor->streamcap.capability;
++ cparm->timeperframe = sensor->streamcap.timeperframe;
++ cparm->capturemode = sensor->streamcap.capturemode;
++ ret = 0;
++ break;
++
++ /* These are all the possible cases. */
++ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
++ case V4L2_BUF_TYPE_VIDEO_OVERLAY:
++ case V4L2_BUF_TYPE_VBI_CAPTURE:
++ case V4L2_BUF_TYPE_VBI_OUTPUT:
++ case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
++ case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
++ ret = -EINVAL;
++ break;
++
++ default:
++ pr_debug(" type is unknown - %d\n", a->type);
++ ret = -EINVAL;
++ break;
++ }
++
++ return ret;
++}
++
++/*!
++ * ioctl_s_parm - V4L2 sensor interface handler for VIDIOC_S_PARM ioctl
++ * @s: pointer to standard V4L2 device structure
++ * @a: pointer to standard V4L2 VIDIOC_S_PARM ioctl structure
++ *
++ * Configures the sensor to use the input parameters, if possible. If
++ * not possible, reverts to the old parameters and returns the
++ * appropriate error code.
++ */
++static int ioctl_s_parm(struct v4l2_int_device *s, struct v4l2_streamparm *a)
++{
++ struct sensor_data *sensor = s->priv;
++ struct v4l2_fract *timeperframe = &a->parm.capture.timeperframe;
++ u32 tgt_fps; /* target frames per secound */
++ enum ov5640_frame_rate frame_rate;
++ enum ov5640_mode orig_mode;
++ int ret = 0;
++
++ /* Make sure power on */
++ ov5640_standby(0);
++
++ switch (a->type) {
++ /* This is the only case currently handled. */
++ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
++ /* Check that the new frame rate is allowed. */
++ if ((timeperframe->numerator == 0) ||
++ (timeperframe->denominator == 0)) {
++ timeperframe->denominator = DEFAULT_FPS;
++ timeperframe->numerator = 1;
++ }
++
++ tgt_fps = timeperframe->denominator /
++ timeperframe->numerator;
++
++ if (tgt_fps > MAX_FPS) {
++ timeperframe->denominator = MAX_FPS;
++ timeperframe->numerator = 1;
++ } else if (tgt_fps < MIN_FPS) {
++ timeperframe->denominator = MIN_FPS;
++ timeperframe->numerator = 1;
++ }
++
++ /* Actual frame rate we use */
++ tgt_fps = timeperframe->denominator /
++ timeperframe->numerator;
++
++ if (tgt_fps == 15)
++ frame_rate = ov5640_15_fps;
++ else if (tgt_fps == 30)
++ frame_rate = ov5640_30_fps;
++ else {
++ pr_err(" The camera frame rate is not supported!\n");
++ return -EINVAL;
++ }
++
++ orig_mode = sensor->streamcap.capturemode;
++ ret = ov5640_init_mode(frame_rate,
++ (u32)a->parm.capture.capturemode, orig_mode);
++ if (ret < 0)
++ return ret;
++
++ sensor->streamcap.timeperframe = *timeperframe;
++ sensor->streamcap.capturemode =
++ (u32)a->parm.capture.capturemode;
++
++ break;
++
++ /* These are all the possible cases. */
++ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
++ case V4L2_BUF_TYPE_VIDEO_OVERLAY:
++ case V4L2_BUF_TYPE_VBI_CAPTURE:
++ case V4L2_BUF_TYPE_VBI_OUTPUT:
++ case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
++ case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
++ pr_debug(" type is not " \
++ "V4L2_BUF_TYPE_VIDEO_CAPTURE but %d\n",
++ a->type);
++ ret = -EINVAL;
++ break;
++
++ default:
++ pr_debug(" type is unknown - %d\n", a->type);
++ ret = -EINVAL;
++ break;
++ }
++
++ return ret;
++}
++
++/*!
++ * ioctl_g_fmt_cap - V4L2 sensor interface handler for ioctl_g_fmt_cap
++ * @s: pointer to standard V4L2 device structure
++ * @f: pointer to standard V4L2 v4l2_format structure
++ *
++ * Returns the sensor's current pixel format in the v4l2_format
++ * parameter.
++ */
++static int ioctl_g_fmt_cap(struct v4l2_int_device *s, struct v4l2_format *f)
++{
++ struct sensor_data *sensor = s->priv;
++
++ f->fmt.pix = sensor->pix;
++
++ return 0;
++}
++
++/*!
++ * ioctl_g_ctrl - V4L2 sensor interface handler for VIDIOC_G_CTRL ioctl
++ * @s: pointer to standard V4L2 device structure
++ * @vc: standard V4L2 VIDIOC_G_CTRL ioctl structure
++ *
++ * If the requested control is supported, returns the control's current
++ * value from the video_control[] array. Otherwise, returns -EINVAL
++ * if the control is not supported.
++ */
++static int ioctl_g_ctrl(struct v4l2_int_device *s, struct v4l2_control *vc)
++{
++ int ret = 0;
++
++ switch (vc->id) {
++ case V4L2_CID_BRIGHTNESS:
++ vc->value = ov5640_data.brightness;
++ break;
++ case V4L2_CID_HUE:
++ vc->value = ov5640_data.hue;
++ break;
++ case V4L2_CID_CONTRAST:
++ vc->value = ov5640_data.contrast;
++ break;
++ case V4L2_CID_SATURATION:
++ vc->value = ov5640_data.saturation;
++ break;
++ case V4L2_CID_RED_BALANCE:
++ vc->value = ov5640_data.red;
++ break;
++ case V4L2_CID_BLUE_BALANCE:
++ vc->value = ov5640_data.blue;
++ break;
++ case V4L2_CID_EXPOSURE:
++ vc->value = ov5640_data.ae_mode;
++ break;
++ default:
++ ret = -EINVAL;
++ }
++
++ return ret;
++}
++
++/*!
++ * ioctl_s_ctrl - V4L2 sensor interface handler for VIDIOC_S_CTRL ioctl
++ * @s: pointer to standard V4L2 device structure
++ * @vc: standard V4L2 VIDIOC_S_CTRL ioctl structure
++ *
++ * If the requested control is supported, sets the control's current
++ * value in HW (and updates the video_control[] array). Otherwise,
++ * returns -EINVAL if the control is not supported.
++ */
++static int ioctl_s_ctrl(struct v4l2_int_device *s, struct v4l2_control *vc)
++{
++ int retval = 0;
++
++ pr_debug("In ov5640:ioctl_s_ctrl %d\n",
++ vc->id);
++
++ switch (vc->id) {
++ case V4L2_CID_BRIGHTNESS:
++ break;
++ case V4L2_CID_CONTRAST:
++ break;
++ case V4L2_CID_SATURATION:
++ break;
++ case V4L2_CID_HUE:
++ break;
++ case V4L2_CID_AUTO_WHITE_BALANCE:
++ break;
++ case V4L2_CID_DO_WHITE_BALANCE:
++ break;
++ case V4L2_CID_RED_BALANCE:
++ break;
++ case V4L2_CID_BLUE_BALANCE:
++ break;
++ case V4L2_CID_GAMMA:
++ break;
++ case V4L2_CID_EXPOSURE:
++ break;
++ case V4L2_CID_AUTOGAIN:
++ break;
++ case V4L2_CID_GAIN:
++ break;
++ case V4L2_CID_HFLIP:
++ break;
++ case V4L2_CID_VFLIP:
++ break;
++ default:
++ retval = -EPERM;
++ break;
++ }
++
++ return retval;
++}
++
++/*!
++ * ioctl_enum_framesizes - V4L2 sensor interface handler for
++ * VIDIOC_ENUM_FRAMESIZES ioctl
++ * @s: pointer to standard V4L2 device structure
++ * @fsize: standard V4L2 VIDIOC_ENUM_FRAMESIZES ioctl structure
++ *
++ * Return 0 if successful, otherwise -EINVAL.
++ */
++static int ioctl_enum_framesizes(struct v4l2_int_device *s,
++ struct v4l2_frmsizeenum *fsize)
++{
++ if (fsize->index > ov5640_mode_MAX)
++ return -EINVAL;
++
++ fsize->pixel_format = ov5640_data.pix.pixelformat;
++ fsize->discrete.width =
++ max(ov5640_mode_info_data[0][fsize->index].width,
++ ov5640_mode_info_data[1][fsize->index].width);
++ fsize->discrete.height =
++ max(ov5640_mode_info_data[0][fsize->index].height,
++ ov5640_mode_info_data[1][fsize->index].height);
++ return 0;
++}
++
++/*!
++ * ioctl_g_chip_ident - V4L2 sensor interface handler for
++ * VIDIOC_DBG_G_CHIP_IDENT ioctl
++ * @s: pointer to standard V4L2 device structure
++ * @id: pointer to int
++ *
++ * Return 0.
++ */
++static int ioctl_g_chip_ident(struct v4l2_int_device *s, int *id)
++{
++ ((struct v4l2_dbg_chip_ident *)id)->match.type =
++ V4L2_CHIP_MATCH_I2C_DRIVER;
++ strcpy(((struct v4l2_dbg_chip_ident *)id)->match.name,
++ "ov5640_mipi_camera");
++
++ return 0;
++}
++
++/*!
++ * ioctl_init - V4L2 sensor interface handler for VIDIOC_INT_INIT
++ * @s: pointer to standard V4L2 device structure
++ */
++static int ioctl_init(struct v4l2_int_device *s)
++{
++
++ return 0;
++}
++
++/*!
++ * ioctl_enum_fmt_cap - V4L2 sensor interface handler for VIDIOC_ENUM_FMT
++ * @s: pointer to standard V4L2 device structure
++ * @fmt: pointer to standard V4L2 fmt description structure
++ *
++ * Return 0.
++ */
++static int ioctl_enum_fmt_cap(struct v4l2_int_device *s,
++ struct v4l2_fmtdesc *fmt)
++{
++ if (fmt->index > ov5640_mode_MAX)
++ return -EINVAL;
++
++ fmt->pixelformat = ov5640_data.pix.pixelformat;
++
++ return 0;
++}
++
++/*!
++ * ioctl_dev_init - V4L2 sensor interface handler for vidioc_int_dev_init_num
++ * @s: pointer to standard V4L2 device structure
++ *
++ * Initialise the device when slave attaches to the master.
++ */
++static int ioctl_dev_init(struct v4l2_int_device *s)
++{
++ struct sensor_data *sensor = s->priv;
++ u32 tgt_xclk; /* target xclk */
++ u32 tgt_fps; /* target frames per secound */
++ int ret;
++ enum ov5640_frame_rate frame_rate;
++ void *mipi_csi2_info;
++
++ ov5640_data.on = true;
++
++ /* mclk */
++ tgt_xclk = ov5640_data.mclk;
++ tgt_xclk = min(tgt_xclk, (u32)OV5640_XCLK_MAX);
++ tgt_xclk = max(tgt_xclk, (u32)OV5640_XCLK_MIN);
++ ov5640_data.mclk = tgt_xclk;
++
++ pr_debug(" Setting mclk to %d MHz\n", tgt_xclk / 1000000);
++
++ /* Default camera frame rate is set in probe */
++ tgt_fps = sensor->streamcap.timeperframe.denominator /
++ sensor->streamcap.timeperframe.numerator;
++
++ if (tgt_fps == 15)
++ frame_rate = ov5640_15_fps;
++ else if (tgt_fps == 30)
++ frame_rate = ov5640_30_fps;
++ else
++ return -EINVAL; /* Only support 15fps or 30fps now. */
++
++ mipi_csi2_info = mipi_csi2_get_info();
++
++ /* enable mipi csi2 */
++ if (mipi_csi2_info)
++ mipi_csi2_enable(mipi_csi2_info);
++ else {
++ printk(KERN_ERR "%s() in %s: Fail to get mipi_csi2_info!\n",
++ __func__, __FILE__);
++ return -EPERM;
++ }
++
++ ret = ov5640_init_mode(frame_rate, ov5640_mode_INIT, ov5640_mode_INIT);
++
++ return ret;
++}
++
++/*!
++ * ioctl_dev_exit - V4L2 sensor interface handler for vidioc_int_dev_exit_num
++ * @s: pointer to standard V4L2 device structure
++ *
++ * Delinitialise the device when slave detaches to the master.
++ */
++static int ioctl_dev_exit(struct v4l2_int_device *s)
++{
++ void *mipi_csi2_info;
++
++ mipi_csi2_info = mipi_csi2_get_info();
++
++ /* disable mipi csi2 */
++ if (mipi_csi2_info)
++ if (mipi_csi2_get_status(mipi_csi2_info))
++ mipi_csi2_disable(mipi_csi2_info);
++
++ return 0;
++}
++
++/*!
++ * This structure defines all the ioctls for this module and links them to the
++ * enumeration.
++ */
++static struct v4l2_int_ioctl_desc ov5640_ioctl_desc[] = {
++ {vidioc_int_dev_init_num, (v4l2_int_ioctl_func *) ioctl_dev_init},
++ {vidioc_int_dev_exit_num, ioctl_dev_exit},
++ {vidioc_int_s_power_num, (v4l2_int_ioctl_func *) ioctl_s_power},
++ {vidioc_int_g_ifparm_num, (v4l2_int_ioctl_func *) ioctl_g_ifparm},
++/* {vidioc_int_g_needs_reset_num,
++ (v4l2_int_ioctl_func *)ioctl_g_needs_reset}, */
++/* {vidioc_int_reset_num, (v4l2_int_ioctl_func *)ioctl_reset}, */
++ {vidioc_int_init_num, (v4l2_int_ioctl_func *) ioctl_init},
++ {vidioc_int_enum_fmt_cap_num,
++ (v4l2_int_ioctl_func *) ioctl_enum_fmt_cap},
++/* {vidioc_int_try_fmt_cap_num,
++ (v4l2_int_ioctl_func *)ioctl_try_fmt_cap}, */
++ {vidioc_int_g_fmt_cap_num, (v4l2_int_ioctl_func *) ioctl_g_fmt_cap},
++/* {vidioc_int_s_fmt_cap_num, (v4l2_int_ioctl_func *) ioctl_s_fmt_cap}, */
++ {vidioc_int_g_parm_num, (v4l2_int_ioctl_func *) ioctl_g_parm},
++ {vidioc_int_s_parm_num, (v4l2_int_ioctl_func *) ioctl_s_parm},
++/* {vidioc_int_queryctrl_num, (v4l2_int_ioctl_func *)ioctl_queryctrl}, */
++ {vidioc_int_g_ctrl_num, (v4l2_int_ioctl_func *) ioctl_g_ctrl},
++ {vidioc_int_s_ctrl_num, (v4l2_int_ioctl_func *) ioctl_s_ctrl},
++ {vidioc_int_enum_framesizes_num,
++ (v4l2_int_ioctl_func *) ioctl_enum_framesizes},
++ {vidioc_int_g_chip_ident_num,
++ (v4l2_int_ioctl_func *) ioctl_g_chip_ident},
++};
++
++static struct v4l2_int_slave ov5640_slave = {
++ .ioctls = ov5640_ioctl_desc,
++ .num_ioctls = ARRAY_SIZE(ov5640_ioctl_desc),
++};
++
++static struct v4l2_int_device ov5640_int_device = {
++ .module = THIS_MODULE,
++ .name = "ov5640",
++ .type = v4l2_int_type_slave,
++ .u = {
++ .slave = &ov5640_slave,
++ },
++};
++
++/*!
++ * ov5640 I2C probe function
++ *
++ * @param adapter struct i2c_adapter *
++ * @return Error code indicating success or failure
++ */
++static int ov5640_probe(struct i2c_client *client,
++ const struct i2c_device_id *id)
++{
++ struct device *dev = &client->dev;
++ int retval;
++ u8 chip_id_high, chip_id_low;
++
++ /* request power down pin */
++ pwn_gpio = of_get_named_gpio(dev->of_node, "pwn-gpios", 0);
++ if (!gpio_is_valid(pwn_gpio)) {
++ dev_warn(dev, "no sensor pwdn pin available");
++ return -EINVAL;
++ }
++ retval = devm_gpio_request_one(dev, pwn_gpio, GPIOF_OUT_INIT_HIGH,
++ "ov5640_mipi_pwdn");
++ if (retval < 0)
++ return retval;
++
++ /* request reset pin */
++ rst_gpio = of_get_named_gpio(dev->of_node, "rst-gpios", 0);
++ if (!gpio_is_valid(rst_gpio)) {
++ dev_warn(dev, "no sensor reset pin available");
++ return -EINVAL;
++ }
++ retval = devm_gpio_request_one(dev, rst_gpio, GPIOF_OUT_INIT_HIGH,
++ "ov5640_mipi_reset");
++ if (retval < 0)
++ return retval;
++
++ /* Set initial values for the sensor struct. */
++ memset(&ov5640_data, 0, sizeof(ov5640_data));
++ ov5640_data.sensor_clk = devm_clk_get(dev, "csi_mclk");
++ if (IS_ERR(ov5640_data.sensor_clk)) {
++ /* assuming clock enabled by default */
++ ov5640_data.sensor_clk = NULL;
++ dev_err(dev, "clock-frequency missing or invalid\n");
++ return PTR_ERR(ov5640_data.sensor_clk);
++ }
++
++ retval = of_property_read_u32(dev->of_node, "mclk",
++ &(ov5640_data.mclk));
++ if (retval) {
++ dev_err(dev, "mclk missing or invalid\n");
++ return retval;
++ }
++
++ retval = of_property_read_u32(dev->of_node, "mclk_source",
++ (u32 *) &(ov5640_data.mclk_source));
++ if (retval) {
++ dev_err(dev, "mclk_source missing or invalid\n");
++ return retval;
++ }
++
++ retval = of_property_read_u32(dev->of_node, "csi_id",
++ &(ov5640_data.csi));
++ if (retval) {
++ dev_err(dev, "csi id missing or invalid\n");
++ return retval;
++ }
++
++ clk_prepare_enable(ov5640_data.sensor_clk);
++
++ ov5640_data.io_init = ov5640_reset;
++ ov5640_data.i2c_client = client;
++ ov5640_data.pix.pixelformat = V4L2_PIX_FMT_UYVY;
++ ov5640_data.pix.width = 640;
++ ov5640_data.pix.height = 480;
++ ov5640_data.streamcap.capability = V4L2_MODE_HIGHQUALITY |
++ V4L2_CAP_TIMEPERFRAME;
++ ov5640_data.streamcap.capturemode = 0;
++ ov5640_data.streamcap.timeperframe.denominator = DEFAULT_FPS;
++ ov5640_data.streamcap.timeperframe.numerator = 1;
++
++ ov5640_power_on(dev);
++
++ ov5640_reset();
++
++ ov5640_standby(0);
++
++ retval = ov5640_read_reg(OV5640_CHIP_ID_HIGH_BYTE, &chip_id_high);
++ if (retval < 0 || chip_id_high != 0x56) {
++ pr_warning("camera ov5640_mipi is not found\n");
++ clk_disable_unprepare(ov5640_data.sensor_clk);
++ return -ENODEV;
++ }
++ retval = ov5640_read_reg(OV5640_CHIP_ID_LOW_BYTE, &chip_id_low);
++ if (retval < 0 || chip_id_low != 0x40) {
++ pr_warning("camera ov5640_mipi is not found\n");
++ clk_disable_unprepare(ov5640_data.sensor_clk);
++ return -ENODEV;
++ }
++
++ ov5640_standby(1);
++
++ ov5640_int_device.priv = &ov5640_data;
++ retval = v4l2_int_device_register(&ov5640_int_device);
++
++ clk_disable_unprepare(ov5640_data.sensor_clk);
++
++ pr_info("camera ov5640_mipi is found\n");
++ return retval;
++}
++
++/*!
++ * ov5640 I2C detach function
++ *
++ * @param client struct i2c_client *
++ * @return Error code indicating success or failure
++ */
++static int ov5640_remove(struct i2c_client *client)
++{
++ v4l2_int_device_unregister(&ov5640_int_device);
++
++ if (gpo_regulator)
++ regulator_disable(gpo_regulator);
++
++ if (analog_regulator)
++ regulator_disable(analog_regulator);
++
++ if (core_regulator)
++ regulator_disable(core_regulator);
++
++ if (io_regulator)
++ regulator_disable(io_regulator);
++
++ return 0;
++}
++
++/*!
++ * ov5640 init function
++ * Called by insmod ov5640_camera.ko.
++ *
++ * @return Error code indicating success or failure
++ */
++static __init int ov5640_init(void)
++{
++ u8 err;
++
++ err = i2c_add_driver(&ov5640_i2c_driver);
++ if (err != 0)
++ pr_err("%s:driver registration failed, error=%d\n",
++ __func__, err);
++
++ return err;
++}
++
++/*!
++ * OV5640 cleanup function
++ * Called on rmmod ov5640_camera.ko
++ *
++ * @return Error code indicating success or failure
++ */
++static void __exit ov5640_clean(void)
++{
++ i2c_del_driver(&ov5640_i2c_driver);
++}
++
++module_init(ov5640_init);
++module_exit(ov5640_clean);
++
++MODULE_AUTHOR("Freescale Semiconductor, Inc.");
++MODULE_DESCRIPTION("OV5640 MIPI Camera Driver");
++MODULE_LICENSE("GPL");
++MODULE_VERSION("1.0");
++MODULE_ALIAS("CSI");
+diff -Nur linux-3.14.36/drivers/media/platform/mxc/capture/ov5642.c linux-openelec/drivers/media/platform/mxc/capture/ov5642.c
+--- linux-3.14.36/drivers/media/platform/mxc/capture/ov5642.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/media/platform/mxc/capture/ov5642.c 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,4252 @@
++/*
++ * Copyright (C) 2012-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
++ */
++
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/slab.h>
++#include <linux/ctype.h>
++#include <linux/types.h>
++#include <linux/delay.h>
++#include <linux/clk.h>
++#include <linux/of_device.h>
++#include <linux/i2c.h>
++#include <linux/of_gpio.h>
++#include <linux/pinctrl/consumer.h>
++#include <linux/regulator/consumer.h>
++#include <linux/fsl_devices.h>
++#include <media/v4l2-chip-ident.h>
++#include <media/v4l2-int-device.h>
++#include "mxc_v4l2_capture.h"
++
++#define OV5642_VOLTAGE_ANALOG 2800000
++#define OV5642_VOLTAGE_DIGITAL_CORE 1500000
++#define OV5642_VOLTAGE_DIGITAL_IO 1800000
++
++#define MIN_FPS 15
++#define MAX_FPS 30
++#define DEFAULT_FPS 30
++
++#define OV5642_XCLK_MIN 6000000
++#define OV5642_XCLK_MAX 24000000
++
++#define OV5642_CHIP_ID_HIGH_BYTE 0x300A
++#define OV5642_CHIP_ID_LOW_BYTE 0x300B
++
++enum ov5642_mode {
++ ov5642_mode_MIN = 0,
++ ov5642_mode_VGA_640_480 = 0,
++ ov5642_mode_QVGA_320_240 = 1,
++ ov5642_mode_NTSC_720_480 = 2,
++ ov5642_mode_PAL_720_576 = 3,
++ ov5642_mode_720P_1280_720 = 4,
++ ov5642_mode_1080P_1920_1080 = 5,
++ ov5642_mode_QSXGA_2592_1944 = 6,
++ ov5642_mode_QCIF_176_144 = 7,
++ ov5642_mode_XGA_1024_768 = 8,
++ ov5642_mode_MAX = 8
++};
++
++enum ov5642_frame_rate {
++ ov5642_15_fps,
++ ov5642_30_fps
++};
++
++static int ov5642_framerates[] = {
++ [ov5642_15_fps] = 15,
++ [ov5642_30_fps] = 30,
++};
++
++struct reg_value {
++ u16 u16RegAddr;
++ u8 u8Val;
++ u8 u8Mask;
++ u32 u32Delay_ms;
++};
++
++struct ov5642_mode_info {
++ enum ov5642_mode mode;
++ u32 width;
++ u32 height;
++ struct reg_value *init_data_ptr;
++ u32 init_data_size;
++};
++
++/*!
++ * Maintains the information on the current state of the sesor.
++ */
++static struct sensor_data ov5642_data;
++static int pwn_gpio, rst_gpio;
++
++static struct reg_value ov5642_rot_none_VGA[] = {
++ {0x3818, 0xc1, 0x00, 0x00}, {0x3621, 0x87, 0x00, 0x00},
++};
++
++static struct reg_value ov5642_rot_vert_flip_VGA[] = {
++ {0x3818, 0x20, 0xbf, 0x00}, {0x3621, 0x20, 0xff, 0x00},
++};
++
++static struct reg_value ov5642_rot_horiz_flip_VGA[] = {
++ {0x3818, 0x81, 0x00, 0x01}, {0x3621, 0xa7, 0x00, 0x00},
++};
++
++static struct reg_value ov5642_rot_180_VGA[] = {
++ {0x3818, 0x60, 0xff, 0x00}, {0x3621, 0x00, 0xdf, 0x00},
++};
++
++
++static struct reg_value ov5642_rot_none_FULL[] = {
++ {0x3818, 0xc0, 0x00, 0x00}, {0x3621, 0x09, 0x00, 0x00},
++};
++
++static struct reg_value ov5642_rot_vert_flip_FULL[] = {
++ {0x3818, 0x20, 0xbf, 0x01}, {0x3621, 0x20, 0xff, 0x00},
++};
++
++static struct reg_value ov5642_rot_horiz_flip_FULL[] = {
++ {0x3818, 0x80, 0x00, 0x01}, {0x3621, 0x29, 0x00, 0x00},
++};
++
++static struct reg_value ov5642_rot_180_FULL[] = {
++ {0x3818, 0x60, 0xff, 0x00}, {0x3621, 0x00, 0xdf, 0x00},
++};
++
++
++static struct reg_value ov5642_initial_setting[] = {
++ {0x3103, 0x93, 0, 0}, {0x3008, 0x82, 0, 0}, {0x3017, 0x7f, 0, 0},
++ {0x3018, 0xfc, 0, 0}, {0x3810, 0xc2, 0, 0}, {0x3615, 0xf0, 0, 0},
++ {0x3000, 0x00, 0, 0}, {0x3001, 0x00, 0, 0}, {0x3002, 0x5c, 0, 0},
++ {0x3003, 0x00, 0, 0}, {0x3004, 0xff, 0, 0}, {0x3005, 0xff, 0, 0},
++ {0x3006, 0x43, 0, 0}, {0x3007, 0x37, 0, 0}, {0x3011, 0x08, 0, 0},
++ {0x3010, 0x00, 0, 0}, {0x460c, 0x22, 0, 0}, {0x3815, 0x04, 0, 0},
++ {0x370c, 0xa0, 0, 0}, {0x3602, 0xfc, 0, 0}, {0x3612, 0xff, 0, 0},
++ {0x3634, 0xc0, 0, 0}, {0x3613, 0x00, 0, 0}, {0x3605, 0x7c, 0, 0},
++ {0x3621, 0x09, 0, 0}, {0x3622, 0x60, 0, 0}, {0x3604, 0x40, 0, 0},
++ {0x3603, 0xa7, 0, 0}, {0x3603, 0x27, 0, 0}, {0x4000, 0x21, 0, 0},
++ {0x401d, 0x22, 0, 0}, {0x3600, 0x54, 0, 0}, {0x3605, 0x04, 0, 0},
++ {0x3606, 0x3f, 0, 0}, {0x3c00, 0x04, 0, 0}, {0x3c01, 0x80, 0, 0},
++ {0x5000, 0x4f, 0, 0}, {0x5020, 0x04, 0, 0}, {0x5181, 0x79, 0, 0},
++ {0x5182, 0x00, 0, 0}, {0x5185, 0x22, 0, 0}, {0x5197, 0x01, 0, 0},
++ {0x5001, 0xff, 0, 0}, {0x5500, 0x0a, 0, 0}, {0x5504, 0x00, 0, 0},
++ {0x5505, 0x7f, 0, 0}, {0x5080, 0x08, 0, 0}, {0x300e, 0x18, 0, 0},
++ {0x4610, 0x00, 0, 0}, {0x471d, 0x05, 0, 0}, {0x4708, 0x06, 0, 0},
++ {0x3808, 0x02, 0, 0}, {0x3809, 0x80, 0, 0}, {0x380a, 0x01, 0, 0},
++ {0x380b, 0xe0, 0, 0}, {0x380e, 0x07, 0, 0}, {0x380f, 0xd0, 0, 0},
++ {0x501f, 0x00, 0, 0}, {0x5000, 0x4f, 0, 0}, {0x4300, 0x30, 0, 0},
++ {0x3503, 0x07, 0, 0}, {0x3501, 0x73, 0, 0}, {0x3502, 0x80, 0, 0},
++ {0x350b, 0x00, 0, 0}, {0x3503, 0x07, 0, 0}, {0x3824, 0x11, 0, 0},
++ {0x3501, 0x1e, 0, 0}, {0x3502, 0x80, 0, 0}, {0x350b, 0x7f, 0, 0},
++ {0x380c, 0x0c, 0, 0}, {0x380d, 0x80, 0, 0}, {0x380e, 0x03, 0, 0},
++ {0x380f, 0xe8, 0, 0}, {0x3a0d, 0x04, 0, 0}, {0x3a0e, 0x03, 0, 0},
++ {0x3818, 0xc1, 0, 0}, {0x3705, 0xdb, 0, 0}, {0x370a, 0x81, 0, 0},
++ {0x3801, 0x80, 0, 0}, {0x3621, 0x87, 0, 0}, {0x3801, 0x50, 0, 0},
++ {0x3803, 0x08, 0, 0}, {0x3827, 0x08, 0, 0}, {0x3810, 0x40, 0, 0},
++ {0x3804, 0x05, 0, 0}, {0x3805, 0x00, 0, 0}, {0x5682, 0x05, 0, 0},
++ {0x5683, 0x00, 0, 0}, {0x3806, 0x03, 0, 0}, {0x3807, 0xc0, 0, 0},
++ {0x5686, 0x03, 0, 0}, {0x5687, 0xbc, 0, 0}, {0x3a00, 0x78, 0, 0},
++ {0x3a1a, 0x05, 0, 0}, {0x3a13, 0x30, 0, 0}, {0x3a18, 0x00, 0, 0},
++ {0x3a19, 0x7c, 0, 0}, {0x3a08, 0x12, 0, 0}, {0x3a09, 0xc0, 0, 0},
++ {0x3a0a, 0x0f, 0, 0}, {0x3a0b, 0xa0, 0, 0}, {0x350c, 0x07, 0, 0},
++ {0x350d, 0xd0, 0, 0}, {0x3500, 0x00, 0, 0}, {0x3501, 0x00, 0, 0},
++ {0x3502, 0x00, 0, 0}, {0x350a, 0x00, 0, 0}, {0x350b, 0x00, 0, 0},
++ {0x3503, 0x00, 0, 0}, {0x528a, 0x02, 0, 0}, {0x528b, 0x04, 0, 0},
++ {0x528c, 0x08, 0, 0}, {0x528d, 0x08, 0, 0}, {0x528e, 0x08, 0, 0},
++ {0x528f, 0x10, 0, 0}, {0x5290, 0x10, 0, 0}, {0x5292, 0x00, 0, 0},
++ {0x5293, 0x02, 0, 0}, {0x5294, 0x00, 0, 0}, {0x5295, 0x02, 0, 0},
++ {0x5296, 0x00, 0, 0}, {0x5297, 0x02, 0, 0}, {0x5298, 0x00, 0, 0},
++ {0x5299, 0x02, 0, 0}, {0x529a, 0x00, 0, 0}, {0x529b, 0x02, 0, 0},
++ {0x529c, 0x00, 0, 0}, {0x529d, 0x02, 0, 0}, {0x529e, 0x00, 0, 0},
++ {0x529f, 0x02, 0, 0}, {0x3a0f, 0x3c, 0, 0}, {0x3a10, 0x30, 0, 0},
++ {0x3a1b, 0x3c, 0, 0}, {0x3a1e, 0x30, 0, 0}, {0x3a11, 0x70, 0, 0},
++ {0x3a1f, 0x10, 0, 0}, {0x3030, 0x0b, 0, 0}, {0x3a02, 0x00, 0, 0},
++ {0x3a03, 0x7d, 0, 0}, {0x3a04, 0x00, 0, 0}, {0x3a14, 0x00, 0, 0},
++ {0x3a15, 0x7d, 0, 0}, {0x3a16, 0x00, 0, 0}, {0x3a00, 0x78, 0, 0},
++ {0x5193, 0x70, 0, 0}, {0x589b, 0x04, 0, 0}, {0x589a, 0xc5, 0, 0},
++ {0x401e, 0x20, 0, 0}, {0x4001, 0x42, 0, 0}, {0x401c, 0x04, 0, 0},
++ {0x528a, 0x01, 0, 0}, {0x528b, 0x04, 0, 0}, {0x528c, 0x08, 0, 0},
++ {0x528d, 0x10, 0, 0}, {0x528e, 0x20, 0, 0}, {0x528f, 0x28, 0, 0},
++ {0x5290, 0x30, 0, 0}, {0x5292, 0x00, 0, 0}, {0x5293, 0x01, 0, 0},
++ {0x5294, 0x00, 0, 0}, {0x5295, 0x04, 0, 0}, {0x5296, 0x00, 0, 0},
++ {0x5297, 0x08, 0, 0}, {0x5298, 0x00, 0, 0}, {0x5299, 0x10, 0, 0},
++ {0x529a, 0x00, 0, 0}, {0x529b, 0x20, 0, 0}, {0x529c, 0x00, 0, 0},
++ {0x529d, 0x28, 0, 0}, {0x529e, 0x00, 0, 0}, {0x529f, 0x30, 0, 0},
++ {0x5282, 0x00, 0, 0}, {0x5300, 0x00, 0, 0}, {0x5301, 0x20, 0, 0},
++ {0x5302, 0x00, 0, 0}, {0x5303, 0x7c, 0, 0}, {0x530c, 0x00, 0, 0},
++ {0x530d, 0x0c, 0, 0}, {0x530e, 0x20, 0, 0}, {0x530f, 0x80, 0, 0},
++ {0x5310, 0x20, 0, 0}, {0x5311, 0x80, 0, 0}, {0x5308, 0x20, 0, 0},
++ {0x5309, 0x40, 0, 0}, {0x5304, 0x00, 0, 0}, {0x5305, 0x30, 0, 0},
++ {0x5306, 0x00, 0, 0}, {0x5307, 0x80, 0, 0}, {0x5314, 0x08, 0, 0},
++ {0x5315, 0x20, 0, 0}, {0x5319, 0x30, 0, 0}, {0x5316, 0x10, 0, 0},
++ {0x5317, 0x00, 0, 0}, {0x5318, 0x02, 0, 0}, {0x5380, 0x01, 0, 0},
++ {0x5381, 0x00, 0, 0}, {0x5382, 0x00, 0, 0}, {0x5383, 0x4e, 0, 0},
++ {0x5384, 0x00, 0, 0}, {0x5385, 0x0f, 0, 0}, {0x5386, 0x00, 0, 0},
++ {0x5387, 0x00, 0, 0}, {0x5388, 0x01, 0, 0}, {0x5389, 0x15, 0, 0},
++ {0x538a, 0x00, 0, 0}, {0x538b, 0x31, 0, 0}, {0x538c, 0x00, 0, 0},
++ {0x538d, 0x00, 0, 0}, {0x538e, 0x00, 0, 0}, {0x538f, 0x0f, 0, 0},
++ {0x5390, 0x00, 0, 0}, {0x5391, 0xab, 0, 0}, {0x5392, 0x00, 0, 0},
++ {0x5393, 0xa2, 0, 0}, {0x5394, 0x08, 0, 0}, {0x5480, 0x14, 0, 0},
++ {0x5481, 0x21, 0, 0}, {0x5482, 0x36, 0, 0}, {0x5483, 0x57, 0, 0},
++ {0x5484, 0x65, 0, 0}, {0x5485, 0x71, 0, 0}, {0x5486, 0x7d, 0, 0},
++ {0x5487, 0x87, 0, 0}, {0x5488, 0x91, 0, 0}, {0x5489, 0x9a, 0, 0},
++ {0x548a, 0xaa, 0, 0}, {0x548b, 0xb8, 0, 0}, {0x548c, 0xcd, 0, 0},
++ {0x548d, 0xdd, 0, 0}, {0x548e, 0xea, 0, 0}, {0x548f, 0x1d, 0, 0},
++ {0x5490, 0x05, 0, 0}, {0x5491, 0x00, 0, 0}, {0x5492, 0x04, 0, 0},
++ {0x5493, 0x20, 0, 0}, {0x5494, 0x03, 0, 0}, {0x5495, 0x60, 0, 0},
++ {0x5496, 0x02, 0, 0}, {0x5497, 0xb8, 0, 0}, {0x5498, 0x02, 0, 0},
++ {0x5499, 0x86, 0, 0}, {0x549a, 0x02, 0, 0}, {0x549b, 0x5b, 0, 0},
++ {0x549c, 0x02, 0, 0}, {0x549d, 0x3b, 0, 0}, {0x549e, 0x02, 0, 0},
++ {0x549f, 0x1c, 0, 0}, {0x54a0, 0x02, 0, 0}, {0x54a1, 0x04, 0, 0},
++ {0x54a2, 0x01, 0, 0}, {0x54a3, 0xed, 0, 0}, {0x54a4, 0x01, 0, 0},
++ {0x54a5, 0xc5, 0, 0}, {0x54a6, 0x01, 0, 0}, {0x54a7, 0xa5, 0, 0},
++ {0x54a8, 0x01, 0, 0}, {0x54a9, 0x6c, 0, 0}, {0x54aa, 0x01, 0, 0},
++ {0x54ab, 0x41, 0, 0}, {0x54ac, 0x01, 0, 0}, {0x54ad, 0x20, 0, 0},
++ {0x54ae, 0x00, 0, 0}, {0x54af, 0x16, 0, 0}, {0x54b0, 0x01, 0, 0},
++ {0x54b1, 0x20, 0, 0}, {0x54b2, 0x00, 0, 0}, {0x54b3, 0x10, 0, 0},
++ {0x54b4, 0x00, 0, 0}, {0x54b5, 0xf0, 0, 0}, {0x54b6, 0x00, 0, 0},
++ {0x54b7, 0xdf, 0, 0}, {0x5402, 0x3f, 0, 0}, {0x5403, 0x00, 0, 0},
++ {0x3406, 0x00, 0, 0}, {0x5180, 0xff, 0, 0}, {0x5181, 0x52, 0, 0},
++ {0x5182, 0x11, 0, 0}, {0x5183, 0x14, 0, 0}, {0x5184, 0x25, 0, 0},
++ {0x5185, 0x24, 0, 0}, {0x5186, 0x06, 0, 0}, {0x5187, 0x08, 0, 0},
++ {0x5188, 0x08, 0, 0}, {0x5189, 0x7c, 0, 0}, {0x518a, 0x60, 0, 0},
++ {0x518b, 0xb2, 0, 0}, {0x518c, 0xb2, 0, 0}, {0x518d, 0x44, 0, 0},
++ {0x518e, 0x3d, 0, 0}, {0x518f, 0x58, 0, 0}, {0x5190, 0x46, 0, 0},
++ {0x5191, 0xf8, 0, 0}, {0x5192, 0x04, 0, 0}, {0x5193, 0x70, 0, 0},
++ {0x5194, 0xf0, 0, 0}, {0x5195, 0xf0, 0, 0}, {0x5196, 0x03, 0, 0},
++ {0x5197, 0x01, 0, 0}, {0x5198, 0x04, 0, 0}, {0x5199, 0x12, 0, 0},
++ {0x519a, 0x04, 0, 0}, {0x519b, 0x00, 0, 0}, {0x519c, 0x06, 0, 0},
++ {0x519d, 0x82, 0, 0}, {0x519e, 0x00, 0, 0}, {0x5025, 0x80, 0, 0},
++ {0x3a0f, 0x38, 0, 0}, {0x3a10, 0x30, 0, 0}, {0x3a1b, 0x3a, 0, 0},
++ {0x3a1e, 0x2e, 0, 0}, {0x3a11, 0x60, 0, 0}, {0x3a1f, 0x10, 0, 0},
++ {0x5688, 0xa6, 0, 0}, {0x5689, 0x6a, 0, 0}, {0x568a, 0xea, 0, 0},
++ {0x568b, 0xae, 0, 0}, {0x568c, 0xa6, 0, 0}, {0x568d, 0x6a, 0, 0},
++ {0x568e, 0x62, 0, 0}, {0x568f, 0x26, 0, 0}, {0x5583, 0x40, 0, 0},
++ {0x5584, 0x40, 0, 0}, {0x5580, 0x02, 0, 0}, {0x5000, 0xcf, 0, 0},
++ {0x5800, 0x27, 0, 0}, {0x5801, 0x19, 0, 0}, {0x5802, 0x12, 0, 0},
++ {0x5803, 0x0f, 0, 0}, {0x5804, 0x10, 0, 0}, {0x5805, 0x15, 0, 0},
++ {0x5806, 0x1e, 0, 0}, {0x5807, 0x2f, 0, 0}, {0x5808, 0x15, 0, 0},
++ {0x5809, 0x0d, 0, 0}, {0x580a, 0x0a, 0, 0}, {0x580b, 0x09, 0, 0},
++ {0x580c, 0x0a, 0, 0}, {0x580d, 0x0c, 0, 0}, {0x580e, 0x12, 0, 0},
++ {0x580f, 0x19, 0, 0}, {0x5810, 0x0b, 0, 0}, {0x5811, 0x07, 0, 0},
++ {0x5812, 0x04, 0, 0}, {0x5813, 0x03, 0, 0}, {0x5814, 0x03, 0, 0},
++ {0x5815, 0x06, 0, 0}, {0x5816, 0x0a, 0, 0}, {0x5817, 0x0f, 0, 0},
++ {0x5818, 0x0a, 0, 0}, {0x5819, 0x05, 0, 0}, {0x581a, 0x01, 0, 0},
++ {0x581b, 0x00, 0, 0}, {0x581c, 0x00, 0, 0}, {0x581d, 0x03, 0, 0},
++ {0x581e, 0x08, 0, 0}, {0x581f, 0x0c, 0, 0}, {0x5820, 0x0a, 0, 0},
++ {0x5821, 0x05, 0, 0}, {0x5822, 0x01, 0, 0}, {0x5823, 0x00, 0, 0},
++ {0x5824, 0x00, 0, 0}, {0x5825, 0x03, 0, 0}, {0x5826, 0x08, 0, 0},
++ {0x5827, 0x0c, 0, 0}, {0x5828, 0x0e, 0, 0}, {0x5829, 0x08, 0, 0},
++ {0x582a, 0x06, 0, 0}, {0x582b, 0x04, 0, 0}, {0x582c, 0x05, 0, 0},
++ {0x582d, 0x07, 0, 0}, {0x582e, 0x0b, 0, 0}, {0x582f, 0x12, 0, 0},
++ {0x5830, 0x18, 0, 0}, {0x5831, 0x10, 0, 0}, {0x5832, 0x0c, 0, 0},
++ {0x5833, 0x0a, 0, 0}, {0x5834, 0x0b, 0, 0}, {0x5835, 0x0e, 0, 0},
++ {0x5836, 0x15, 0, 0}, {0x5837, 0x19, 0, 0}, {0x5838, 0x32, 0, 0},
++ {0x5839, 0x1f, 0, 0}, {0x583a, 0x18, 0, 0}, {0x583b, 0x16, 0, 0},
++ {0x583c, 0x17, 0, 0}, {0x583d, 0x1e, 0, 0}, {0x583e, 0x26, 0, 0},
++ {0x583f, 0x53, 0, 0}, {0x5840, 0x10, 0, 0}, {0x5841, 0x0f, 0, 0},
++ {0x5842, 0x0d, 0, 0}, {0x5843, 0x0c, 0, 0}, {0x5844, 0x0e, 0, 0},
++ {0x5845, 0x09, 0, 0}, {0x5846, 0x11, 0, 0}, {0x5847, 0x10, 0, 0},
++ {0x5848, 0x10, 0, 0}, {0x5849, 0x10, 0, 0}, {0x584a, 0x10, 0, 0},
++ {0x584b, 0x0e, 0, 0}, {0x584c, 0x10, 0, 0}, {0x584d, 0x10, 0, 0},
++ {0x584e, 0x11, 0, 0}, {0x584f, 0x10, 0, 0}, {0x5850, 0x0f, 0, 0},
++ {0x5851, 0x0c, 0, 0}, {0x5852, 0x0f, 0, 0}, {0x5853, 0x10, 0, 0},
++ {0x5854, 0x10, 0, 0}, {0x5855, 0x0f, 0, 0}, {0x5856, 0x0e, 0, 0},
++ {0x5857, 0x0b, 0, 0}, {0x5858, 0x10, 0, 0}, {0x5859, 0x0d, 0, 0},
++ {0x585a, 0x0d, 0, 0}, {0x585b, 0x0c, 0, 0}, {0x585c, 0x0c, 0, 0},
++ {0x585d, 0x0c, 0, 0}, {0x585e, 0x0b, 0, 0}, {0x585f, 0x0c, 0, 0},
++ {0x5860, 0x0c, 0, 0}, {0x5861, 0x0c, 0, 0}, {0x5862, 0x0d, 0, 0},
++ {0x5863, 0x08, 0, 0}, {0x5864, 0x11, 0, 0}, {0x5865, 0x18, 0, 0},
++ {0x5866, 0x18, 0, 0}, {0x5867, 0x19, 0, 0}, {0x5868, 0x17, 0, 0},
++ {0x5869, 0x19, 0, 0}, {0x586a, 0x16, 0, 0}, {0x586b, 0x13, 0, 0},
++ {0x586c, 0x13, 0, 0}, {0x586d, 0x12, 0, 0}, {0x586e, 0x13, 0, 0},
++ {0x586f, 0x16, 0, 0}, {0x5870, 0x14, 0, 0}, {0x5871, 0x12, 0, 0},
++ {0x5872, 0x10, 0, 0}, {0x5873, 0x11, 0, 0}, {0x5874, 0x11, 0, 0},
++ {0x5875, 0x16, 0, 0}, {0x5876, 0x14, 0, 0}, {0x5877, 0x11, 0, 0},
++ {0x5878, 0x10, 0, 0}, {0x5879, 0x0f, 0, 0}, {0x587a, 0x10, 0, 0},
++ {0x587b, 0x14, 0, 0}, {0x587c, 0x13, 0, 0}, {0x587d, 0x12, 0, 0},
++ {0x587e, 0x11, 0, 0}, {0x587f, 0x11, 0, 0}, {0x5880, 0x12, 0, 0},
++ {0x5881, 0x15, 0, 0}, {0x5882, 0x14, 0, 0}, {0x5883, 0x15, 0, 0},
++ {0x5884, 0x15, 0, 0}, {0x5885, 0x15, 0, 0}, {0x5886, 0x13, 0, 0},
++ {0x5887, 0x17, 0, 0}, {0x3710, 0x10, 0, 0}, {0x3632, 0x51, 0, 0},
++ {0x3702, 0x10, 0, 0}, {0x3703, 0xb2, 0, 0}, {0x3704, 0x18, 0, 0},
++ {0x370b, 0x40, 0, 0}, {0x370d, 0x03, 0, 0}, {0x3631, 0x01, 0, 0},
++ {0x3632, 0x52, 0, 0}, {0x3606, 0x24, 0, 0}, {0x3620, 0x96, 0, 0},
++ {0x5785, 0x07, 0, 0}, {0x3a13, 0x30, 0, 0}, {0x3600, 0x52, 0, 0},
++ {0x3604, 0x48, 0, 0}, {0x3606, 0x1b, 0, 0}, {0x370d, 0x0b, 0, 0},
++ {0x370f, 0xc0, 0, 0}, {0x3709, 0x01, 0, 0}, {0x3823, 0x00, 0, 0},
++ {0x5007, 0x00, 0, 0}, {0x5009, 0x00, 0, 0}, {0x5011, 0x00, 0, 0},
++ {0x5013, 0x00, 0, 0}, {0x519e, 0x00, 0, 0}, {0x5086, 0x00, 0, 0},
++ {0x5087, 0x00, 0, 0}, {0x5088, 0x00, 0, 0}, {0x5089, 0x00, 0, 0},
++ {0x302b, 0x00, 0, 300},
++};
++
++static struct reg_value ov5642_setting_15fps_QCIF_176_144[] = {
++ {0x3103, 0x93, 0, 0}, {0x3008, 0x82, 0, 0}, {0x3017, 0x7f, 0, 0},
++ {0x3018, 0xfc, 0, 0}, {0x3810, 0xc2, 0, 0}, {0x3615, 0xf0, 0, 0},
++ {0x3000, 0x00, 0, 0}, {0x3001, 0x00, 0, 0}, {0x3002, 0x5c, 0, 0},
++ {0x3003, 0x00, 0, 0}, {0x3004, 0xff, 0, 0}, {0x3005, 0xff, 0, 0},
++ {0x3006, 0x43, 0, 0}, {0x3007, 0x37, 0, 0}, {0x3011, 0x08, 0, 0},
++ {0x3010, 0x10, 0, 0}, {0x460c, 0x22, 0, 0}, {0x3815, 0x04, 0, 0},
++ {0x370c, 0xa0, 0, 0}, {0x3602, 0xfc, 0, 0}, {0x3612, 0xff, 0, 0},
++ {0x3634, 0xc0, 0, 0}, {0x3613, 0x00, 0, 0}, {0x3605, 0x7c, 0, 0},
++ {0x3621, 0x09, 0, 0}, {0x3622, 0x60, 0, 0}, {0x3604, 0x40, 0, 0},
++ {0x3603, 0xa7, 0, 0}, {0x3603, 0x27, 0, 0}, {0x4000, 0x21, 0, 0},
++ {0x401d, 0x22, 0, 0}, {0x3600, 0x54, 0, 0}, {0x3605, 0x04, 0, 0},
++ {0x3606, 0x3f, 0, 0}, {0x3c01, 0x80, 0, 0}, {0x5000, 0x4f, 0, 0},
++ {0x5020, 0x04, 0, 0}, {0x5181, 0x79, 0, 0}, {0x5182, 0x00, 0, 0},
++ {0x5185, 0x22, 0, 0}, {0x5197, 0x01, 0, 0}, {0x5001, 0xff, 0, 0},
++ {0x5500, 0x0a, 0, 0}, {0x5504, 0x00, 0, 0}, {0x5505, 0x7f, 0, 0},
++ {0x5080, 0x08, 0, 0}, {0x300e, 0x18, 0, 0}, {0x4610, 0x00, 0, 0},
++ {0x471d, 0x05, 0, 0}, {0x4708, 0x06, 0, 0}, {0x3808, 0x02, 0, 0},
++ {0x3809, 0x80, 0, 0}, {0x380a, 0x01, 0, 0}, {0x380b, 0xe0, 0, 0},
++ {0x380e, 0x07, 0, 0}, {0x380f, 0xd0, 0, 0}, {0x501f, 0x00, 0, 0},
++ {0x5000, 0x4f, 0, 0}, {0x4300, 0x30, 0, 0}, {0x3503, 0x07, 0, 0},
++ {0x3501, 0x73, 0, 0}, {0x3502, 0x80, 0, 0}, {0x350b, 0x00, 0, 0},
++ {0x3503, 0x07, 0, 0}, {0x3824, 0x11, 0, 0}, {0x3501, 0x1e, 0, 0},
++ {0x3502, 0x80, 0, 0}, {0x350b, 0x7f, 0, 0}, {0x380c, 0x0c, 0, 0},
++ {0x380d, 0x80, 0, 0}, {0x380e, 0x03, 0, 0}, {0x380f, 0xe8, 0, 0},
++ {0x3a0d, 0x04, 0, 0}, {0x3a0e, 0x03, 0, 0}, {0x3818, 0xc1, 0, 0},
++ {0x3705, 0xdb, 0, 0}, {0x370a, 0x81, 0, 0}, {0x3801, 0x80, 0, 0},
++ {0x3621, 0x87, 0, 0}, {0x3801, 0x50, 0, 0}, {0x3803, 0x08, 0, 0},
++ {0x3827, 0x08, 0, 0}, {0x3810, 0x40, 0, 0}, {0x3804, 0x05, 0, 0},
++ {0x3805, 0x00, 0, 0}, {0x5682, 0x05, 0, 0}, {0x5683, 0x00, 0, 0},
++ {0x3806, 0x03, 0, 0}, {0x3807, 0xc0, 0, 0}, {0x5686, 0x03, 0, 0},
++ {0x5687, 0xbc, 0, 0}, {0x3a00, 0x78, 0, 0}, {0x3a1a, 0x05, 0, 0},
++ {0x3a13, 0x30, 0, 0}, {0x3a18, 0x00, 0, 0}, {0x3a19, 0x7c, 0, 0},
++ {0x3a08, 0x12, 0, 0}, {0x3a09, 0xc0, 0, 0}, {0x3a0a, 0x0f, 0, 0},
++ {0x3a0b, 0xa0, 0, 0}, {0x350c, 0x07, 0, 0}, {0x350d, 0xd0, 0, 0},
++ {0x3500, 0x00, 0, 0}, {0x3501, 0x00, 0, 0}, {0x3502, 0x00, 0, 0},
++ {0x350a, 0x00, 0, 0}, {0x350b, 0x00, 0, 0}, {0x3503, 0x00, 0, 0},
++ {0x528a, 0x02, 0, 0}, {0x528b, 0x04, 0, 0}, {0x528c, 0x08, 0, 0},
++ {0x528d, 0x08, 0, 0}, {0x528e, 0x08, 0, 0}, {0x528f, 0x10, 0, 0},
++ {0x5290, 0x10, 0, 0}, {0x5292, 0x00, 0, 0}, {0x5293, 0x02, 0, 0},
++ {0x5294, 0x00, 0, 0}, {0x5295, 0x02, 0, 0}, {0x5296, 0x00, 0, 0},
++ {0x5297, 0x02, 0, 0}, {0x5298, 0x00, 0, 0}, {0x5299, 0x02, 0, 0},
++ {0x529a, 0x00, 0, 0}, {0x529b, 0x02, 0, 0}, {0x529c, 0x00, 0, 0},
++ {0x529d, 0x02, 0, 0}, {0x529e, 0x00, 0, 0}, {0x529f, 0x02, 0, 0},
++ {0x3a0f, 0x3c, 0, 0}, {0x3a10, 0x30, 0, 0}, {0x3a1b, 0x3c, 0, 0},
++ {0x3a1e, 0x30, 0, 0}, {0x3a11, 0x70, 0, 0}, {0x3a1f, 0x10, 0, 0},
++ {0x3030, 0x2b, 0, 0}, {0x3a02, 0x00, 0, 0}, {0x3a03, 0x7d, 0, 0},
++ {0x3a04, 0x00, 0, 0}, {0x3a14, 0x00, 0, 0}, {0x3a15, 0x7d, 0, 0},
++ {0x3a16, 0x00, 0, 0}, {0x3a00, 0x78, 0, 0}, {0x3a08, 0x09, 0, 0},
++ {0x3a09, 0x60, 0, 0}, {0x3a0a, 0x07, 0, 0}, {0x3a0b, 0xd0, 0, 0},
++ {0x3a0d, 0x08, 0, 0}, {0x3a0e, 0x06, 0, 0}, {0x5193, 0x70, 0, 0},
++ {0x589b, 0x04, 0, 0}, {0x589a, 0xc5, 0, 0}, {0x401e, 0x20, 0, 0},
++ {0x4001, 0x42, 0, 0}, {0x401c, 0x04, 0, 0}, {0x528a, 0x01, 0, 0},
++ {0x528b, 0x04, 0, 0}, {0x528c, 0x08, 0, 0}, {0x528d, 0x10, 0, 0},
++ {0x528e, 0x20, 0, 0}, {0x528f, 0x28, 0, 0}, {0x5290, 0x30, 0, 0},
++ {0x5292, 0x00, 0, 0}, {0x5293, 0x01, 0, 0}, {0x5294, 0x00, 0, 0},
++ {0x5295, 0x04, 0, 0}, {0x5296, 0x00, 0, 0}, {0x5297, 0x08, 0, 0},
++ {0x5298, 0x00, 0, 0}, {0x5299, 0x10, 0, 0}, {0x529a, 0x00, 0, 0},
++ {0x529b, 0x20, 0, 0}, {0x529c, 0x00, 0, 0}, {0x529d, 0x28, 0, 0},
++ {0x529e, 0x00, 0, 0}, {0x529f, 0x30, 0, 0}, {0x5282, 0x00, 0, 0},
++ {0x5300, 0x00, 0, 0}, {0x5301, 0x20, 0, 0}, {0x5302, 0x00, 0, 0},
++ {0x5303, 0x7c, 0, 0}, {0x530c, 0x00, 0, 0}, {0x530d, 0x0c, 0, 0},
++ {0x530e, 0x20, 0, 0}, {0x530f, 0x80, 0, 0}, {0x5310, 0x20, 0, 0},
++ {0x5311, 0x80, 0, 0}, {0x5308, 0x20, 0, 0}, {0x5309, 0x40, 0, 0},
++ {0x5304, 0x00, 0, 0}, {0x5305, 0x30, 0, 0}, {0x5306, 0x00, 0, 0},
++ {0x5307, 0x80, 0, 0}, {0x5314, 0x08, 0, 0}, {0x5315, 0x20, 0, 0},
++ {0x5319, 0x30, 0, 0}, {0x5316, 0x10, 0, 0}, {0x5317, 0x00, 0, 0},
++ {0x5318, 0x02, 0, 0}, {0x5380, 0x01, 0, 0}, {0x5381, 0x00, 0, 0},
++ {0x5382, 0x00, 0, 0}, {0x5383, 0x4e, 0, 0}, {0x5384, 0x00, 0, 0},
++ {0x5385, 0x0f, 0, 0}, {0x5386, 0x00, 0, 0}, {0x5387, 0x00, 0, 0},
++ {0x5388, 0x01, 0, 0}, {0x5389, 0x15, 0, 0}, {0x538a, 0x00, 0, 0},
++ {0x538b, 0x31, 0, 0}, {0x538c, 0x00, 0, 0}, {0x538d, 0x00, 0, 0},
++ {0x538e, 0x00, 0, 0}, {0x538f, 0x0f, 0, 0}, {0x5390, 0x00, 0, 0},
++ {0x5391, 0xab, 0, 0}, {0x5392, 0x00, 0, 0}, {0x5393, 0xa2, 0, 0},
++ {0x5394, 0x08, 0, 0}, {0x5480, 0x14, 0, 0}, {0x5481, 0x21, 0, 0},
++ {0x5482, 0x36, 0, 0}, {0x5483, 0x57, 0, 0}, {0x5484, 0x65, 0, 0},
++ {0x5485, 0x71, 0, 0}, {0x5486, 0x7d, 0, 0}, {0x5487, 0x87, 0, 0},
++ {0x5488, 0x91, 0, 0}, {0x5489, 0x9a, 0, 0}, {0x548a, 0xaa, 0, 0},
++ {0x548b, 0xb8, 0, 0}, {0x548c, 0xcd, 0, 0}, {0x548d, 0xdd, 0, 0},
++ {0x548e, 0xea, 0, 0}, {0x548f, 0x1d, 0, 0}, {0x5490, 0x05, 0, 0},
++ {0x5491, 0x00, 0, 0}, {0x5492, 0x04, 0, 0}, {0x5493, 0x20, 0, 0},
++ {0x5494, 0x03, 0, 0}, {0x5495, 0x60, 0, 0}, {0x5496, 0x02, 0, 0},
++ {0x5497, 0xb8, 0, 0}, {0x5498, 0x02, 0, 0}, {0x5499, 0x86, 0, 0},
++ {0x549a, 0x02, 0, 0}, {0x549b, 0x5b, 0, 0}, {0x549c, 0x02, 0, 0},
++ {0x549d, 0x3b, 0, 0}, {0x549e, 0x02, 0, 0}, {0x549f, 0x1c, 0, 0},
++ {0x54a0, 0x02, 0, 0}, {0x54a1, 0x04, 0, 0}, {0x54a2, 0x01, 0, 0},
++ {0x54a3, 0xed, 0, 0}, {0x54a4, 0x01, 0, 0}, {0x54a5, 0xc5, 0, 0},
++ {0x54a6, 0x01, 0, 0}, {0x54a7, 0xa5, 0, 0}, {0x54a8, 0x01, 0, 0},
++ {0x54a9, 0x6c, 0, 0}, {0x54aa, 0x01, 0, 0}, {0x54ab, 0x41, 0, 0},
++ {0x54ac, 0x01, 0, 0}, {0x54ad, 0x20, 0, 0}, {0x54ae, 0x00, 0, 0},
++ {0x54af, 0x16, 0, 0}, {0x54b0, 0x01, 0, 0}, {0x54b1, 0x20, 0, 0},
++ {0x54b2, 0x00, 0, 0}, {0x54b3, 0x10, 0, 0}, {0x54b4, 0x00, 0, 0},
++ {0x54b5, 0xf0, 0, 0}, {0x54b6, 0x00, 0, 0}, {0x54b7, 0xdf, 0, 0},
++ {0x5402, 0x3f, 0, 0}, {0x5403, 0x00, 0, 0}, {0x3406, 0x00, 0, 0},
++ {0x5180, 0xff, 0, 0}, {0x5181, 0x52, 0, 0}, {0x5182, 0x11, 0, 0},
++ {0x5183, 0x14, 0, 0}, {0x5184, 0x25, 0, 0}, {0x5185, 0x24, 0, 0},
++ {0x5186, 0x06, 0, 0}, {0x5187, 0x08, 0, 0}, {0x5188, 0x08, 0, 0},
++ {0x5189, 0x7c, 0, 0}, {0x518a, 0x60, 0, 0}, {0x518b, 0xb2, 0, 0},
++ {0x518c, 0xb2, 0, 0}, {0x518d, 0x44, 0, 0}, {0x518e, 0x3d, 0, 0},
++ {0x518f, 0x58, 0, 0}, {0x5190, 0x46, 0, 0}, {0x5191, 0xf8, 0, 0},
++ {0x5192, 0x04, 0, 0}, {0x5193, 0x70, 0, 0}, {0x5194, 0xf0, 0, 0},
++ {0x5195, 0xf0, 0, 0}, {0x5196, 0x03, 0, 0}, {0x5197, 0x01, 0, 0},
++ {0x5198, 0x04, 0, 0}, {0x5199, 0x12, 0, 0}, {0x519a, 0x04, 0, 0},
++ {0x519b, 0x00, 0, 0}, {0x519c, 0x06, 0, 0}, {0x519d, 0x82, 0, 0},
++ {0x519e, 0x00, 0, 0}, {0x5025, 0x80, 0, 0}, {0x3a0f, 0x38, 0, 0},
++ {0x3a10, 0x30, 0, 0}, {0x3a1b, 0x3a, 0, 0}, {0x3a1e, 0x2e, 0, 0},
++ {0x3a11, 0x60, 0, 0}, {0x3a1f, 0x10, 0, 0}, {0x5688, 0xa6, 0, 0},
++ {0x5689, 0x6a, 0, 0}, {0x568a, 0xea, 0, 0}, {0x568b, 0xae, 0, 0},
++ {0x568c, 0xa6, 0, 0}, {0x568d, 0x6a, 0, 0}, {0x568e, 0x62, 0, 0},
++ {0x568f, 0x26, 0, 0}, {0x5583, 0x40, 0, 0}, {0x5584, 0x40, 0, 0},
++ {0x5580, 0x02, 0, 0}, {0x5000, 0xcf, 0, 0}, {0x5800, 0x27, 0, 0},
++ {0x5801, 0x19, 0, 0}, {0x5802, 0x12, 0, 0}, {0x5803, 0x0f, 0, 0},
++ {0x5804, 0x10, 0, 0}, {0x5805, 0x15, 0, 0}, {0x5806, 0x1e, 0, 0},
++ {0x5807, 0x2f, 0, 0}, {0x5808, 0x15, 0, 0}, {0x5809, 0x0d, 0, 0},
++ {0x580a, 0x0a, 0, 0}, {0x580b, 0x09, 0, 0}, {0x580c, 0x0a, 0, 0},
++ {0x580d, 0x0c, 0, 0}, {0x580e, 0x12, 0, 0}, {0x580f, 0x19, 0, 0},
++ {0x5810, 0x0b, 0, 0}, {0x5811, 0x07, 0, 0}, {0x5812, 0x04, 0, 0},
++ {0x5813, 0x03, 0, 0}, {0x5814, 0x03, 0, 0}, {0x5815, 0x06, 0, 0},
++ {0x5816, 0x0a, 0, 0}, {0x5817, 0x0f, 0, 0}, {0x5818, 0x0a, 0, 0},
++ {0x5819, 0x05, 0, 0}, {0x581a, 0x01, 0, 0}, {0x581b, 0x00, 0, 0},
++ {0x581c, 0x00, 0, 0}, {0x581d, 0x03, 0, 0}, {0x581e, 0x08, 0, 0},
++ {0x581f, 0x0c, 0, 0}, {0x5820, 0x0a, 0, 0}, {0x5821, 0x05, 0, 0},
++ {0x5822, 0x01, 0, 0}, {0x5823, 0x00, 0, 0}, {0x5824, 0x00, 0, 0},
++ {0x5825, 0x03, 0, 0}, {0x5826, 0x08, 0, 0}, {0x5827, 0x0c, 0, 0},
++ {0x5828, 0x0e, 0, 0}, {0x5829, 0x08, 0, 0}, {0x582a, 0x06, 0, 0},
++ {0x582b, 0x04, 0, 0}, {0x582c, 0x05, 0, 0}, {0x582d, 0x07, 0, 0},
++ {0x582e, 0x0b, 0, 0}, {0x582f, 0x12, 0, 0}, {0x5830, 0x18, 0, 0},
++ {0x5831, 0x10, 0, 0}, {0x5832, 0x0c, 0, 0}, {0x5833, 0x0a, 0, 0},
++ {0x5834, 0x0b, 0, 0}, {0x5835, 0x0e, 0, 0}, {0x5836, 0x15, 0, 0},
++ {0x5837, 0x19, 0, 0}, {0x5838, 0x32, 0, 0}, {0x5839, 0x1f, 0, 0},
++ {0x583a, 0x18, 0, 0}, {0x583b, 0x16, 0, 0}, {0x583c, 0x17, 0, 0},
++ {0x583d, 0x1e, 0, 0}, {0x583e, 0x26, 0, 0}, {0x583f, 0x53, 0, 0},
++ {0x5840, 0x10, 0, 0}, {0x5841, 0x0f, 0, 0}, {0x5842, 0x0d, 0, 0},
++ {0x5843, 0x0c, 0, 0}, {0x5844, 0x0e, 0, 0}, {0x5845, 0x09, 0, 0},
++ {0x5846, 0x11, 0, 0}, {0x5847, 0x10, 0, 0}, {0x5848, 0x10, 0, 0},
++ {0x5849, 0x10, 0, 0}, {0x584a, 0x10, 0, 0}, {0x584b, 0x0e, 0, 0},
++ {0x584c, 0x10, 0, 0}, {0x584d, 0x10, 0, 0}, {0x584e, 0x11, 0, 0},
++ {0x584f, 0x10, 0, 0}, {0x5850, 0x0f, 0, 0}, {0x5851, 0x0c, 0, 0},
++ {0x5852, 0x0f, 0, 0}, {0x5853, 0x10, 0, 0}, {0x5854, 0x10, 0, 0},
++ {0x5855, 0x0f, 0, 0}, {0x5856, 0x0e, 0, 0}, {0x5857, 0x0b, 0, 0},
++ {0x5858, 0x10, 0, 0}, {0x5859, 0x0d, 0, 0}, {0x585a, 0x0d, 0, 0},
++ {0x585b, 0x0c, 0, 0}, {0x585c, 0x0c, 0, 0}, {0x585d, 0x0c, 0, 0},
++ {0x585e, 0x0b, 0, 0}, {0x585f, 0x0c, 0, 0}, {0x5860, 0x0c, 0, 0},
++ {0x5861, 0x0c, 0, 0}, {0x5862, 0x0d, 0, 0}, {0x5863, 0x08, 0, 0},
++ {0x5864, 0x11, 0, 0}, {0x5865, 0x18, 0, 0}, {0x5866, 0x18, 0, 0},
++ {0x5867, 0x19, 0, 0}, {0x5868, 0x17, 0, 0}, {0x5869, 0x19, 0, 0},
++ {0x586a, 0x16, 0, 0}, {0x586b, 0x13, 0, 0}, {0x586c, 0x13, 0, 0},
++ {0x586d, 0x12, 0, 0}, {0x586e, 0x13, 0, 0}, {0x586f, 0x16, 0, 0},
++ {0x5870, 0x14, 0, 0}, {0x5871, 0x12, 0, 0}, {0x5872, 0x10, 0, 0},
++ {0x5873, 0x11, 0, 0}, {0x5874, 0x11, 0, 0}, {0x5875, 0x16, 0, 0},
++ {0x5876, 0x14, 0, 0}, {0x5877, 0x11, 0, 0}, {0x5878, 0x10, 0, 0},
++ {0x5879, 0x0f, 0, 0}, {0x587a, 0x10, 0, 0}, {0x587b, 0x14, 0, 0},
++ {0x587c, 0x13, 0, 0}, {0x587d, 0x12, 0, 0}, {0x587e, 0x11, 0, 0},
++ {0x587f, 0x11, 0, 0}, {0x5880, 0x12, 0, 0}, {0x5881, 0x15, 0, 0},
++ {0x5882, 0x14, 0, 0}, {0x5883, 0x15, 0, 0}, {0x5884, 0x15, 0, 0},
++ {0x5885, 0x15, 0, 0}, {0x5886, 0x13, 0, 0}, {0x5887, 0x17, 0, 0},
++ {0x3710, 0x10, 0, 0}, {0x3632, 0x51, 0, 0}, {0x3702, 0x10, 0, 0},
++ {0x3703, 0xb2, 0, 0}, {0x3704, 0x18, 0, 0}, {0x370b, 0x40, 0, 0},
++ {0x370d, 0x03, 0, 0}, {0x3631, 0x01, 0, 0}, {0x3632, 0x52, 0, 0},
++ {0x3606, 0x24, 0, 0}, {0x3620, 0x96, 0, 0}, {0x5785, 0x07, 0, 0},
++ {0x3a13, 0x30, 0, 0}, {0x3600, 0x52, 0, 0}, {0x3604, 0x48, 0, 0},
++ {0x3606, 0x1b, 0, 0}, {0x370d, 0x0b, 0, 0}, {0x370f, 0xc0, 0, 0},
++ {0x3709, 0x01, 0, 0}, {0x3823, 0x00, 0, 0}, {0x5007, 0x00, 0, 0},
++ {0x5009, 0x00, 0, 0}, {0x5011, 0x00, 0, 0}, {0x5013, 0x00, 0, 0},
++ {0x519e, 0x00, 0, 0}, {0x5086, 0x00, 0, 0}, {0x5087, 0x00, 0, 0},
++ {0x5088, 0x00, 0, 0}, {0x5089, 0x00, 0, 0}, {0x302b, 0x00, 0, 0},
++ {0x3808, 0x00, 0, 0}, {0x3809, 0xb0, 0, 0}, {0x380a, 0x00, 0, 0},
++ {0x380b, 0x90, 0, 0}, {0x3a00, 0x78, 0, 0},
++};
++
++static struct reg_value ov5642_setting_30fps_QCIF_176_144[] = {
++ {0x3103, 0x93, 0, 0}, {0x3008, 0x82, 0, 0}, {0x3017, 0x7f, 0, 0},
++ {0x3018, 0xfc, 0, 0}, {0x3810, 0xc2, 0, 0}, {0x3615, 0xf0, 0, 0},
++ {0x3000, 0x00, 0, 0}, {0x3001, 0x00, 0, 0}, {0x3002, 0x5c, 0, 0},
++ {0x3003, 0x00, 0, 0}, {0x3004, 0xff, 0, 0}, {0x3005, 0xff, 0, 0},
++ {0x3006, 0x43, 0, 0}, {0x3007, 0x37, 0, 0}, {0x3011, 0x10, 0, 0},
++ {0x3010, 0x10, 0, 0}, {0x460c, 0x22, 0, 0}, {0x3815, 0x04, 0, 0},
++ {0x370c, 0xa0, 0, 0}, {0x3602, 0xfc, 0, 0}, {0x3612, 0xff, 0, 0},
++ {0x3634, 0xc0, 0, 0}, {0x3613, 0x00, 0, 0}, {0x3605, 0x7c, 0, 0},
++ {0x3621, 0x09, 0, 0}, {0x3622, 0x60, 0, 0}, {0x3604, 0x40, 0, 0},
++ {0x3603, 0xa7, 0, 0}, {0x3603, 0x27, 0, 0}, {0x4000, 0x21, 0, 0},
++ {0x401d, 0x22, 0, 0}, {0x3600, 0x54, 0, 0}, {0x3605, 0x04, 0, 0},
++ {0x3606, 0x3f, 0, 0}, {0x3c01, 0x80, 0, 0}, {0x5000, 0x4f, 0, 0},
++ {0x5020, 0x04, 0, 0}, {0x5181, 0x79, 0, 0}, {0x5182, 0x00, 0, 0},
++ {0x5185, 0x22, 0, 0}, {0x5197, 0x01, 0, 0}, {0x5001, 0xff, 0, 0},
++ {0x5500, 0x0a, 0, 0}, {0x5504, 0x00, 0, 0}, {0x5505, 0x7f, 0, 0},
++ {0x5080, 0x08, 0, 0}, {0x300e, 0x18, 0, 0}, {0x4610, 0x00, 0, 0},
++ {0x471d, 0x05, 0, 0}, {0x4708, 0x06, 0, 0}, {0x3808, 0x02, 0, 0},
++ {0x3809, 0x80, 0, 0}, {0x380a, 0x01, 0, 0}, {0x380b, 0xe0, 0, 0},
++ {0x380e, 0x07, 0, 0}, {0x380f, 0xd0, 0, 0}, {0x501f, 0x00, 0, 0},
++ {0x5000, 0x4f, 0, 0}, {0x4300, 0x30, 0, 0}, {0x3503, 0x07, 0, 0},
++ {0x3501, 0x73, 0, 0}, {0x3502, 0x80, 0, 0}, {0x350b, 0x00, 0, 0},
++ {0x3503, 0x07, 0, 0}, {0x3824, 0x11, 0, 0}, {0x3501, 0x1e, 0, 0},
++ {0x3502, 0x80, 0, 0}, {0x350b, 0x7f, 0, 0}, {0x380c, 0x0c, 0, 0},
++ {0x380d, 0x80, 0, 0}, {0x380e, 0x03, 0, 0}, {0x380f, 0xe8, 0, 0},
++ {0x3a0d, 0x04, 0, 0}, {0x3a0e, 0x03, 0, 0}, {0x3818, 0xc1, 0, 0},
++ {0x3705, 0xdb, 0, 0}, {0x370a, 0x81, 0, 0}, {0x3801, 0x80, 0, 0},
++ {0x3621, 0x87, 0, 0}, {0x3801, 0x50, 0, 0}, {0x3803, 0x08, 0, 0},
++ {0x3827, 0x08, 0, 0}, {0x3810, 0x40, 0, 0}, {0x3804, 0x05, 0, 0},
++ {0x3805, 0x00, 0, 0}, {0x5682, 0x05, 0, 0}, {0x5683, 0x00, 0, 0},
++ {0x3806, 0x03, 0, 0}, {0x3807, 0xc0, 0, 0}, {0x5686, 0x03, 0, 0},
++ {0x5687, 0xbc, 0, 0}, {0x3a00, 0x78, 0, 0}, {0x3a1a, 0x05, 0, 0},
++ {0x3a13, 0x30, 0, 0}, {0x3a18, 0x00, 0, 0}, {0x3a19, 0x7c, 0, 0},
++ {0x3a08, 0x12, 0, 0}, {0x3a09, 0xc0, 0, 0}, {0x3a0a, 0x0f, 0, 0},
++ {0x3a0b, 0xa0, 0, 0}, {0x350c, 0x07, 0, 0}, {0x350d, 0xd0, 0, 0},
++ {0x3500, 0x00, 0, 0}, {0x3501, 0x00, 0, 0}, {0x3502, 0x00, 0, 0},
++ {0x350a, 0x00, 0, 0}, {0x350b, 0x00, 0, 0}, {0x3503, 0x00, 0, 0},
++ {0x528a, 0x02, 0, 0}, {0x528b, 0x04, 0, 0}, {0x528c, 0x08, 0, 0},
++ {0x528d, 0x08, 0, 0}, {0x528e, 0x08, 0, 0}, {0x528f, 0x10, 0, 0},
++ {0x5290, 0x10, 0, 0}, {0x5292, 0x00, 0, 0}, {0x5293, 0x02, 0, 0},
++ {0x5294, 0x00, 0, 0}, {0x5295, 0x02, 0, 0}, {0x5296, 0x00, 0, 0},
++ {0x5297, 0x02, 0, 0}, {0x5298, 0x00, 0, 0}, {0x5299, 0x02, 0, 0},
++ {0x529a, 0x00, 0, 0}, {0x529b, 0x02, 0, 0}, {0x529c, 0x00, 0, 0},
++ {0x529d, 0x02, 0, 0}, {0x529e, 0x00, 0, 0}, {0x529f, 0x02, 0, 0},
++ {0x3a0f, 0x3c, 0, 0}, {0x3a10, 0x30, 0, 0}, {0x3a1b, 0x3c, 0, 0},
++ {0x3a1e, 0x30, 0, 0}, {0x3a11, 0x70, 0, 0}, {0x3a1f, 0x10, 0, 0},
++ {0x3030, 0x2b, 0, 0}, {0x3a02, 0x00, 0, 0}, {0x3a03, 0x7d, 0, 0},
++ {0x3a04, 0x00, 0, 0}, {0x3a14, 0x00, 0, 0}, {0x3a15, 0x7d, 0, 0},
++ {0x3a16, 0x00, 0, 0}, {0x3a00, 0x78, 0, 0}, {0x3a08, 0x09, 0, 0},
++ {0x3a09, 0x60, 0, 0}, {0x3a0a, 0x07, 0, 0}, {0x3a0b, 0xd0, 0, 0},
++ {0x3a0d, 0x08, 0, 0}, {0x3a0e, 0x06, 0, 0}, {0x5193, 0x70, 0, 0},
++ {0x589b, 0x04, 0, 0}, {0x589a, 0xc5, 0, 0}, {0x401e, 0x20, 0, 0},
++ {0x4001, 0x42, 0, 0}, {0x401c, 0x04, 0, 0}, {0x528a, 0x01, 0, 0},
++ {0x528b, 0x04, 0, 0}, {0x528c, 0x08, 0, 0}, {0x528d, 0x10, 0, 0},
++ {0x528e, 0x20, 0, 0}, {0x528f, 0x28, 0, 0}, {0x5290, 0x30, 0, 0},
++ {0x5292, 0x00, 0, 0}, {0x5293, 0x01, 0, 0}, {0x5294, 0x00, 0, 0},
++ {0x5295, 0x04, 0, 0}, {0x5296, 0x00, 0, 0}, {0x5297, 0x08, 0, 0},
++ {0x5298, 0x00, 0, 0}, {0x5299, 0x10, 0, 0}, {0x529a, 0x00, 0, 0},
++ {0x529b, 0x20, 0, 0}, {0x529c, 0x00, 0, 0}, {0x529d, 0x28, 0, 0},
++ {0x529e, 0x00, 0, 0}, {0x529f, 0x30, 0, 0}, {0x5282, 0x00, 0, 0},
++ {0x5300, 0x00, 0, 0}, {0x5301, 0x20, 0, 0}, {0x5302, 0x00, 0, 0},
++ {0x5303, 0x7c, 0, 0}, {0x530c, 0x00, 0, 0}, {0x530d, 0x0c, 0, 0},
++ {0x530e, 0x20, 0, 0}, {0x530f, 0x80, 0, 0}, {0x5310, 0x20, 0, 0},
++ {0x5311, 0x80, 0, 0}, {0x5308, 0x20, 0, 0}, {0x5309, 0x40, 0, 0},
++ {0x5304, 0x00, 0, 0}, {0x5305, 0x30, 0, 0}, {0x5306, 0x00, 0, 0},
++ {0x5307, 0x80, 0, 0}, {0x5314, 0x08, 0, 0}, {0x5315, 0x20, 0, 0},
++ {0x5319, 0x30, 0, 0}, {0x5316, 0x10, 0, 0}, {0x5317, 0x00, 0, 0},
++ {0x5318, 0x02, 0, 0}, {0x5380, 0x01, 0, 0}, {0x5381, 0x00, 0, 0},
++ {0x5382, 0x00, 0, 0}, {0x5383, 0x4e, 0, 0}, {0x5384, 0x00, 0, 0},
++ {0x5385, 0x0f, 0, 0}, {0x5386, 0x00, 0, 0}, {0x5387, 0x00, 0, 0},
++ {0x5388, 0x01, 0, 0}, {0x5389, 0x15, 0, 0}, {0x538a, 0x00, 0, 0},
++ {0x538b, 0x31, 0, 0}, {0x538c, 0x00, 0, 0}, {0x538d, 0x00, 0, 0},
++ {0x538e, 0x00, 0, 0}, {0x538f, 0x0f, 0, 0}, {0x5390, 0x00, 0, 0},
++ {0x5391, 0xab, 0, 0}, {0x5392, 0x00, 0, 0}, {0x5393, 0xa2, 0, 0},
++ {0x5394, 0x08, 0, 0}, {0x5480, 0x14, 0, 0}, {0x5481, 0x21, 0, 0},
++ {0x5482, 0x36, 0, 0}, {0x5483, 0x57, 0, 0}, {0x5484, 0x65, 0, 0},
++ {0x5485, 0x71, 0, 0}, {0x5486, 0x7d, 0, 0}, {0x5487, 0x87, 0, 0},
++ {0x5488, 0x91, 0, 0}, {0x5489, 0x9a, 0, 0}, {0x548a, 0xaa, 0, 0},
++ {0x548b, 0xb8, 0, 0}, {0x548c, 0xcd, 0, 0}, {0x548d, 0xdd, 0, 0},
++ {0x548e, 0xea, 0, 0}, {0x548f, 0x1d, 0, 0}, {0x5490, 0x05, 0, 0},
++ {0x5491, 0x00, 0, 0}, {0x5492, 0x04, 0, 0}, {0x5493, 0x20, 0, 0},
++ {0x5494, 0x03, 0, 0}, {0x5495, 0x60, 0, 0}, {0x5496, 0x02, 0, 0},
++ {0x5497, 0xb8, 0, 0}, {0x5498, 0x02, 0, 0}, {0x5499, 0x86, 0, 0},
++ {0x549a, 0x02, 0, 0}, {0x549b, 0x5b, 0, 0}, {0x549c, 0x02, 0, 0},
++ {0x549d, 0x3b, 0, 0}, {0x549e, 0x02, 0, 0}, {0x549f, 0x1c, 0, 0},
++ {0x54a0, 0x02, 0, 0}, {0x54a1, 0x04, 0, 0}, {0x54a2, 0x01, 0, 0},
++ {0x54a3, 0xed, 0, 0}, {0x54a4, 0x01, 0, 0}, {0x54a5, 0xc5, 0, 0},
++ {0x54a6, 0x01, 0, 0}, {0x54a7, 0xa5, 0, 0}, {0x54a8, 0x01, 0, 0},
++ {0x54a9, 0x6c, 0, 0}, {0x54aa, 0x01, 0, 0}, {0x54ab, 0x41, 0, 0},
++ {0x54ac, 0x01, 0, 0}, {0x54ad, 0x20, 0, 0}, {0x54ae, 0x00, 0, 0},
++ {0x54af, 0x16, 0, 0}, {0x54b0, 0x01, 0, 0}, {0x54b1, 0x20, 0, 0},
++ {0x54b2, 0x00, 0, 0}, {0x54b3, 0x10, 0, 0}, {0x54b4, 0x00, 0, 0},
++ {0x54b5, 0xf0, 0, 0}, {0x54b6, 0x00, 0, 0}, {0x54b7, 0xdf, 0, 0},
++ {0x5402, 0x3f, 0, 0}, {0x5403, 0x00, 0, 0}, {0x3406, 0x00, 0, 0},
++ {0x5180, 0xff, 0, 0}, {0x5181, 0x52, 0, 0}, {0x5182, 0x11, 0, 0},
++ {0x5183, 0x14, 0, 0}, {0x5184, 0x25, 0, 0}, {0x5185, 0x24, 0, 0},
++ {0x5186, 0x06, 0, 0}, {0x5187, 0x08, 0, 0}, {0x5188, 0x08, 0, 0},
++ {0x5189, 0x7c, 0, 0}, {0x518a, 0x60, 0, 0}, {0x518b, 0xb2, 0, 0},
++ {0x518c, 0xb2, 0, 0}, {0x518d, 0x44, 0, 0}, {0x518e, 0x3d, 0, 0},
++ {0x518f, 0x58, 0, 0}, {0x5190, 0x46, 0, 0}, {0x5191, 0xf8, 0, 0},
++ {0x5192, 0x04, 0, 0}, {0x5193, 0x70, 0, 0}, {0x5194, 0xf0, 0, 0},
++ {0x5195, 0xf0, 0, 0}, {0x5196, 0x03, 0, 0}, {0x5197, 0x01, 0, 0},
++ {0x5198, 0x04, 0, 0}, {0x5199, 0x12, 0, 0}, {0x519a, 0x04, 0, 0},
++ {0x519b, 0x00, 0, 0}, {0x519c, 0x06, 0, 0}, {0x519d, 0x82, 0, 0},
++ {0x519e, 0x00, 0, 0}, {0x5025, 0x80, 0, 0}, {0x3a0f, 0x38, 0, 0},
++ {0x3a10, 0x30, 0, 0}, {0x3a1b, 0x3a, 0, 0}, {0x3a1e, 0x2e, 0, 0},
++ {0x3a11, 0x60, 0, 0}, {0x3a1f, 0x10, 0, 0}, {0x5688, 0xa6, 0, 0},
++ {0x5689, 0x6a, 0, 0}, {0x568a, 0xea, 0, 0}, {0x568b, 0xae, 0, 0},
++ {0x568c, 0xa6, 0, 0}, {0x568d, 0x6a, 0, 0}, {0x568e, 0x62, 0, 0},
++ {0x568f, 0x26, 0, 0}, {0x5583, 0x40, 0, 0}, {0x5584, 0x40, 0, 0},
++ {0x5580, 0x02, 0, 0}, {0x5000, 0xcf, 0, 0}, {0x5800, 0x27, 0, 0},
++ {0x5801, 0x19, 0, 0}, {0x5802, 0x12, 0, 0}, {0x5803, 0x0f, 0, 0},
++ {0x5804, 0x10, 0, 0}, {0x5805, 0x15, 0, 0}, {0x5806, 0x1e, 0, 0},
++ {0x5807, 0x2f, 0, 0}, {0x5808, 0x15, 0, 0}, {0x5809, 0x0d, 0, 0},
++ {0x580a, 0x0a, 0, 0}, {0x580b, 0x09, 0, 0}, {0x580c, 0x0a, 0, 0},
++ {0x580d, 0x0c, 0, 0}, {0x580e, 0x12, 0, 0}, {0x580f, 0x19, 0, 0},
++ {0x5810, 0x0b, 0, 0}, {0x5811, 0x07, 0, 0}, {0x5812, 0x04, 0, 0},
++ {0x5813, 0x03, 0, 0}, {0x5814, 0x03, 0, 0}, {0x5815, 0x06, 0, 0},
++ {0x5816, 0x0a, 0, 0}, {0x5817, 0x0f, 0, 0}, {0x5818, 0x0a, 0, 0},
++ {0x5819, 0x05, 0, 0}, {0x581a, 0x01, 0, 0}, {0x581b, 0x00, 0, 0},
++ {0x581c, 0x00, 0, 0}, {0x581d, 0x03, 0, 0}, {0x581e, 0x08, 0, 0},
++ {0x581f, 0x0c, 0, 0}, {0x5820, 0x0a, 0, 0}, {0x5821, 0x05, 0, 0},
++ {0x5822, 0x01, 0, 0}, {0x5823, 0x00, 0, 0}, {0x5824, 0x00, 0, 0},
++ {0x5825, 0x03, 0, 0}, {0x5826, 0x08, 0, 0}, {0x5827, 0x0c, 0, 0},
++ {0x5828, 0x0e, 0, 0}, {0x5829, 0x08, 0, 0}, {0x582a, 0x06, 0, 0},
++ {0x582b, 0x04, 0, 0}, {0x582c, 0x05, 0, 0}, {0x582d, 0x07, 0, 0},
++ {0x582e, 0x0b, 0, 0}, {0x582f, 0x12, 0, 0}, {0x5830, 0x18, 0, 0},
++ {0x5831, 0x10, 0, 0}, {0x5832, 0x0c, 0, 0}, {0x5833, 0x0a, 0, 0},
++ {0x5834, 0x0b, 0, 0}, {0x5835, 0x0e, 0, 0}, {0x5836, 0x15, 0, 0},
++ {0x5837, 0x19, 0, 0}, {0x5838, 0x32, 0, 0}, {0x5839, 0x1f, 0, 0},
++ {0x583a, 0x18, 0, 0}, {0x583b, 0x16, 0, 0}, {0x583c, 0x17, 0, 0},
++ {0x583d, 0x1e, 0, 0}, {0x583e, 0x26, 0, 0}, {0x583f, 0x53, 0, 0},
++ {0x5840, 0x10, 0, 0}, {0x5841, 0x0f, 0, 0}, {0x5842, 0x0d, 0, 0},
++ {0x5843, 0x0c, 0, 0}, {0x5844, 0x0e, 0, 0}, {0x5845, 0x09, 0, 0},
++ {0x5846, 0x11, 0, 0}, {0x5847, 0x10, 0, 0}, {0x5848, 0x10, 0, 0},
++ {0x5849, 0x10, 0, 0}, {0x584a, 0x10, 0, 0}, {0x584b, 0x0e, 0, 0},
++ {0x584c, 0x10, 0, 0}, {0x584d, 0x10, 0, 0}, {0x584e, 0x11, 0, 0},
++ {0x584f, 0x10, 0, 0}, {0x5850, 0x0f, 0, 0}, {0x5851, 0x0c, 0, 0},
++ {0x5852, 0x0f, 0, 0}, {0x5853, 0x10, 0, 0}, {0x5854, 0x10, 0, 0},
++ {0x5855, 0x0f, 0, 0}, {0x5856, 0x0e, 0, 0}, {0x5857, 0x0b, 0, 0},
++ {0x5858, 0x10, 0, 0}, {0x5859, 0x0d, 0, 0}, {0x585a, 0x0d, 0, 0},
++ {0x585b, 0x0c, 0, 0}, {0x585c, 0x0c, 0, 0}, {0x585d, 0x0c, 0, 0},
++ {0x585e, 0x0b, 0, 0}, {0x585f, 0x0c, 0, 0}, {0x5860, 0x0c, 0, 0},
++ {0x5861, 0x0c, 0, 0}, {0x5862, 0x0d, 0, 0}, {0x5863, 0x08, 0, 0},
++ {0x5864, 0x11, 0, 0}, {0x5865, 0x18, 0, 0}, {0x5866, 0x18, 0, 0},
++ {0x5867, 0x19, 0, 0}, {0x5868, 0x17, 0, 0}, {0x5869, 0x19, 0, 0},
++ {0x586a, 0x16, 0, 0}, {0x586b, 0x13, 0, 0}, {0x586c, 0x13, 0, 0},
++ {0x586d, 0x12, 0, 0}, {0x586e, 0x13, 0, 0}, {0x586f, 0x16, 0, 0},
++ {0x5870, 0x14, 0, 0}, {0x5871, 0x12, 0, 0}, {0x5872, 0x10, 0, 0},
++ {0x5873, 0x11, 0, 0}, {0x5874, 0x11, 0, 0}, {0x5875, 0x16, 0, 0},
++ {0x5876, 0x14, 0, 0}, {0x5877, 0x11, 0, 0}, {0x5878, 0x10, 0, 0},
++ {0x5879, 0x0f, 0, 0}, {0x587a, 0x10, 0, 0}, {0x587b, 0x14, 0, 0},
++ {0x587c, 0x13, 0, 0}, {0x587d, 0x12, 0, 0}, {0x587e, 0x11, 0, 0},
++ {0x587f, 0x11, 0, 0}, {0x5880, 0x12, 0, 0}, {0x5881, 0x15, 0, 0},
++ {0x5882, 0x14, 0, 0}, {0x5883, 0x15, 0, 0}, {0x5884, 0x15, 0, 0},
++ {0x5885, 0x15, 0, 0}, {0x5886, 0x13, 0, 0}, {0x5887, 0x17, 0, 0},
++ {0x3710, 0x10, 0, 0}, {0x3632, 0x51, 0, 0}, {0x3702, 0x10, 0, 0},
++ {0x3703, 0xb2, 0, 0}, {0x3704, 0x18, 0, 0}, {0x370b, 0x40, 0, 0},
++ {0x370d, 0x03, 0, 0}, {0x3631, 0x01, 0, 0}, {0x3632, 0x52, 0, 0},
++ {0x3606, 0x24, 0, 0}, {0x3620, 0x96, 0, 0}, {0x5785, 0x07, 0, 0},
++ {0x3a13, 0x30, 0, 0}, {0x3600, 0x52, 0, 0}, {0x3604, 0x48, 0, 0},
++ {0x3606, 0x1b, 0, 0}, {0x370d, 0x0b, 0, 0}, {0x370f, 0xc0, 0, 0},
++ {0x3709, 0x01, 0, 0}, {0x3823, 0x00, 0, 0}, {0x5007, 0x00, 0, 0},
++ {0x5009, 0x00, 0, 0}, {0x5011, 0x00, 0, 0}, {0x5013, 0x00, 0, 0},
++ {0x519e, 0x00, 0, 0}, {0x5086, 0x00, 0, 0}, {0x5087, 0x00, 0, 0},
++ {0x5088, 0x00, 0, 0}, {0x5089, 0x00, 0, 0}, {0x302b, 0x00, 0, 0},
++ {0x3808, 0x00, 0, 0}, {0x3809, 0xb0, 0, 0}, {0x380a, 0x00, 0, 0},
++ {0x380b, 0x90, 0, 0}, {0x3a00, 0x78, 0, 0},
++};
++
++static struct reg_value ov5642_setting_15fps_QSXGA_2592_1944[] = {
++ {0x3503, 0x07, 0, 0}, {0x3000, 0x00, 0, 0}, {0x3001, 0x00, 0, 0},
++ {0x3002, 0x00, 0, 0}, {0x3003, 0x00, 0, 0}, {0x3004, 0xff, 0, 0},
++ {0x3005, 0xff, 0, 0}, {0x3006, 0xff, 0, 0}, {0x3007, 0x3f, 0, 0},
++ {0x3011, 0x08, 0, 0}, {0x3010, 0x10, 0, 0}, {0x3818, 0xc0, 0, 0},
++ {0x3621, 0x09, 0, 0}, {0x350c, 0x07, 0, 0}, {0x350d, 0xd0, 0, 0},
++ {0x3602, 0xe4, 0, 0}, {0x3612, 0xac, 0, 0}, {0x3613, 0x44, 0, 0},
++ {0x3622, 0x60, 0, 0}, {0x3623, 0x22, 0, 0}, {0x3604, 0x48, 0, 0},
++ {0x3705, 0xda, 0, 0}, {0x370a, 0x80, 0, 0}, {0x3801, 0x95, 0, 0},
++ {0x3803, 0x0e, 0, 0}, {0x3804, 0x0a, 0, 0}, {0x3805, 0x20, 0, 0},
++ {0x3806, 0x07, 0, 0}, {0x3807, 0x98, 0, 0}, {0x3808, 0x0a, 0, 0},
++ {0x3809, 0x20, 0, 0}, {0x380a, 0x07, 0, 0}, {0x380b, 0x98, 0, 0},
++ {0x380c, 0x0c, 0, 0}, {0x380d, 0x80, 0, 0}, {0x380e, 0x07, 0, 0},
++ {0x380f, 0xd0, 0, 0}, {0x3810, 0xc2, 0, 0}, {0x3815, 0x44, 0, 0},
++ {0x3824, 0x11, 0, 0}, {0x3825, 0xac, 0, 0}, {0x3827, 0x0c, 0, 0},
++ {0x3a00, 0x78, 0, 0}, {0x3a0d, 0x10, 0, 0}, {0x3a0e, 0x0d, 0, 0},
++ {0x5682, 0x0a, 0, 0}, {0x5683, 0x20, 0, 0}, {0x5686, 0x07, 0, 0},
++ {0x5687, 0x98, 0, 0}, {0x5001, 0xff, 0, 0}, {0x589b, 0x00, 0, 0},
++ {0x589a, 0xc0, 0, 0}, {0x4407, 0x04, 0, 0}, {0x3008, 0x02, 0, 0},
++ {0x460b, 0x37, 0, 0}, {0x460c, 0x22, 0, 0}, {0x471d, 0x05, 0, 0},
++ {0x4713, 0x03, 0, 0}, {0x471c, 0xd0, 0, 0}, {0x3815, 0x01, 0, 0},
++ {0x501f, 0x00, 0, 0}, {0x3002, 0x1c, 0, 0}, {0x3819, 0x80, 0, 0},
++ {0x5002, 0xe0, 0, 0}, {0x530a, 0x01, 0, 0}, {0x530d, 0x10, 0, 0},
++ {0x530c, 0x04, 0, 0}, {0x5312, 0x20, 0, 0}, {0x5282, 0x01, 0, 0},
++ {0x3010, 0x10, 0, 0}, {0x3012, 0x00, 0, 0},
++};
++
++
++static struct reg_value ov5642_setting_VGA_2_QVGA[] = {
++ {0x3808, 0x01, 0, 0}, {0x3809, 0x40, 0, 0}, {0x380a, 0x00, 0, 0},
++ {0x380b, 0xf0, 0, 0}, {0x3815, 0x04, 0, 0},
++};
++
++static struct reg_value ov5642_setting_QSXGA_2_VGA[] = {
++ {0x3503, 0x00, 0, 0}, {0x3000, 0x00, 0, 0}, {0x3001, 0x00, 0, 0},
++ {0x3002, 0x5c, 0, 0}, {0x3003, 0x00, 0, 0}, {0x3004, 0xff, 0, 0},
++ {0x3005, 0xff, 0, 0}, {0x3006, 0x43, 0, 0}, {0x3007, 0x37, 0, 0},
++ {0x3010, 0x00, 0, 0}, {0x3818, 0xc1, 0, 0}, {0x3621, 0x87, 0, 0},
++ {0x350c, 0x03, 0, 0}, {0x350d, 0xe8, 0, 0}, {0x3602, 0xfc, 0, 0},
++ {0x3612, 0xff, 0, 0}, {0x3613, 0x00, 0, 0}, {0x3622, 0x60, 0, 0},
++ {0x3623, 0x01, 0, 0}, {0x3604, 0x48, 0, 0}, {0x3705, 0xdb, 0, 0},
++ {0x370a, 0x81, 0, 0}, {0x3801, 0x50, 0, 0}, {0x3803, 0x08, 0, 0},
++ {0x3804, 0x05, 0, 0}, {0x3805, 0x00, 0, 0}, {0x3806, 0x03, 0, 0},
++ {0x3807, 0xc0, 0, 0}, {0x3808, 0x02, 0, 0}, {0x3809, 0x80, 0, 0},
++ {0x380a, 0x01, 0, 0}, {0x380b, 0xe0, 0, 0}, {0x380c, 0x0c, 0, 0},
++ {0x380d, 0x80, 0, 0}, {0x380e, 0x03, 0, 0}, {0x380f, 0xe8, 0, 0},
++ {0x3810, 0x40, 0, 0}, {0x3815, 0x04, 0, 0}, {0x3824, 0x11, 0, 0},
++ {0x3825, 0xb4, 0, 0}, {0x3827, 0x08, 0, 0}, {0x3a00, 0x78, 0, 0},
++ {0x3a0d, 0x04, 0, 0}, {0x3a0e, 0x03, 0, 0}, {0x5682, 0x05, 0, 0},
++ {0x5683, 0x00, 0, 0}, {0x5686, 0x03, 0, 0}, {0x5687, 0xbc, 0, 0},
++ {0x5001, 0xff, 0, 0}, {0x589b, 0x04, 0, 0}, {0x589a, 0xc5, 0, 0},
++ {0x4407, 0x0c, 0, 0}, {0x3008, 0x02, 0, 0}, {0x460b, 0x37, 0, 0},
++ {0x460c, 0x22, 0, 0}, {0x471d, 0x05, 0, 0}, {0x4713, 0x02, 0, 0},
++ {0x471c, 0xd0, 0, 0}, {0x3815, 0x04, 0, 0}, {0x501f, 0x00, 0, 0},
++ {0x3002, 0x5c, 0, 0}, {0x3819, 0x80, 0, 0}, {0x5002, 0xe0, 0, 0},
++ {0x530a, 0x01, 0, 0}, {0x530d, 0x0c, 0, 0}, {0x530c, 0x00, 0, 0},
++ {0x5312, 0x40, 0, 0}, {0x5282, 0x00, 0, 0},
++ {0x3012, 0x02, 0, 0}, {0x3010, 0x00, 0, 0},
++};
++
++static struct reg_value ov5642_setting_30fps_VGA_640_480[] = {
++ {0x3103, 0x93, 0, 0}, {0x3008, 0x82, 0, 0}, {0x3017, 0x7f, 0, 0},
++ {0x3018, 0xfc, 0, 0}, {0x3615, 0xf0, 0, 0}, {0x3000, 0x00, 0, 0},
++ {0x3001, 0x00, 0, 0}, {0x3002, 0x5c, 0, 0}, {0x3003, 0x00, 0, 0},
++ {0x3004, 0xff, 0, 0}, {0x3005, 0xff, 0, 0}, {0x3006, 0x43, 0, 0},
++ {0x3007, 0x37, 0, 0}, {0x3011, 0x09, 0, 0}, {0x3012, 0x02, 0, 0},
++ {0x3010, 0x00, 0, 0}, {0x460c, 0x20, 0, 0}, {0x3815, 0x04, 0, 0},
++ {0x370c, 0xa0, 0, 0}, {0x3602, 0xfc, 0, 0}, {0x3612, 0xff, 0, 0},
++ {0x3634, 0xc0, 0, 0}, {0x3613, 0x00, 0, 0}, {0x3605, 0x7c, 0, 0},
++ {0x3621, 0x09, 0, 0}, {0x3622, 0x60, 0, 0}, {0x3604, 0x40, 0, 0},
++ {0x3603, 0xa7, 0, 0}, {0x3603, 0x27, 0, 0}, {0x4000, 0x21, 0, 0},
++ {0x401d, 0x22, 0, 0}, {0x3600, 0x54, 0, 0}, {0x3605, 0x04, 0, 0},
++ {0x3606, 0x3f, 0, 0}, {0x3c01, 0x80, 0, 0}, {0x5000, 0x4f, 0, 0},
++ {0x5020, 0x04, 0, 0}, {0x5181, 0x79, 0, 0}, {0x5182, 0x00, 0, 0},
++ {0x5185, 0x22, 0, 0}, {0x5197, 0x01, 0, 0}, {0x5001, 0xff, 0, 0},
++ {0x5500, 0x0a, 0, 0}, {0x5504, 0x00, 0, 0}, {0x5505, 0x7f, 0, 0},
++ {0x5080, 0x08, 0, 0}, {0x300e, 0x18, 0, 0}, {0x4610, 0x00, 0, 0},
++ {0x471d, 0x05, 0, 0}, {0x4708, 0x06, 0, 0}, {0x3808, 0x02, 0, 0},
++ {0x3809, 0x80, 0, 0}, {0x380a, 0x01, 0, 0}, {0x380b, 0xe0, 0, 0},
++ {0x380e, 0x07, 0, 0}, {0x380f, 0xd0, 0, 0}, {0x501f, 0x00, 0, 0},
++ {0x5000, 0x4f, 0, 0}, {0x4300, 0x30, 0, 0}, {0x3503, 0x07, 0, 0},
++ {0x3501, 0x73, 0, 0}, {0x3502, 0x80, 0, 0}, {0x350b, 0x00, 0, 0},
++ {0x3503, 0x07, 0, 0}, {0x3824, 0x11, 0, 0}, {0x3825, 0xb0, 0, 0},
++ {0x3501, 0x1e, 0, 0}, {0x3502, 0x80, 0, 0}, {0x350b, 0x7f, 0, 0},
++ {0x380c, 0x07, 0, 0}, {0x380d, 0x2a, 0, 0}, {0x380e, 0x03, 0, 0},
++ {0x380f, 0xe8, 0, 0}, {0x3a0d, 0x04, 0, 0}, {0x3a0e, 0x03, 0, 0},
++ {0x3818, 0xc1, 0, 0}, {0x3705, 0xdb, 0, 0}, {0x370a, 0x81, 0, 0},
++ {0x3801, 0x80, 0, 0}, {0x3621, 0xc7, 0, 0}, {0x3801, 0x50, 0, 0},
++ {0x3803, 0x08, 0, 0}, {0x3827, 0x08, 0, 0}, {0x3810, 0x80, 0, 0},
++ {0x3804, 0x05, 0, 0}, {0x3805, 0x00, 0, 0}, {0x5682, 0x05, 0, 0},
++ {0x5683, 0x00, 0, 0}, {0x3806, 0x03, 0, 0}, {0x3807, 0xc0, 0, 0},
++ {0x5686, 0x03, 0, 0}, {0x5687, 0xbc, 0, 0}, {0x3a00, 0x78, 0, 0},
++ {0x3a1a, 0x05, 0, 0}, {0x3a13, 0x30, 0, 0}, {0x3a18, 0x00, 0, 0},
++ {0x3a19, 0x7c, 0, 0}, {0x3a08, 0x12, 0, 0}, {0x3a09, 0xc0, 0, 0},
++ {0x3a0a, 0x0f, 0, 0}, {0x3a0b, 0xa0, 0, 0}, {0x350c, 0x07, 0, 0},
++ {0x350d, 0xd0, 0, 0}, {0x3500, 0x00, 0, 0}, {0x3501, 0x00, 0, 0},
++ {0x3502, 0x00, 0, 0}, {0x350a, 0x00, 0, 0}, {0x350b, 0x00, 0, 0},
++ {0x3503, 0x00, 0, 0}, {0x528a, 0x02, 0, 0}, {0x528b, 0x04, 0, 0},
++ {0x528c, 0x08, 0, 0}, {0x528d, 0x08, 0, 0}, {0x528e, 0x08, 0, 0},
++ {0x528f, 0x10, 0, 0}, {0x5290, 0x10, 0, 0}, {0x5292, 0x00, 0, 0},
++ {0x5293, 0x02, 0, 0}, {0x5294, 0x00, 0, 0}, {0x5295, 0x02, 0, 0},
++ {0x5296, 0x00, 0, 0}, {0x5297, 0x02, 0, 0}, {0x5298, 0x00, 0, 0},
++ {0x5299, 0x02, 0, 0}, {0x529a, 0x00, 0, 0}, {0x529b, 0x02, 0, 0},
++ {0x529c, 0x00, 0, 0}, {0x529d, 0x02, 0, 0}, {0x529e, 0x00, 0, 0},
++ {0x529f, 0x02, 0, 0}, {0x3a0f, 0x3c, 0, 0}, {0x3a10, 0x30, 0, 0},
++ {0x3a1b, 0x3c, 0, 0}, {0x3a1e, 0x30, 0, 0}, {0x3a11, 0x70, 0, 0},
++ {0x3a1f, 0x10, 0, 0}, {0x3030, 0x2b, 0, 0}, {0x3a02, 0x00, 0, 0},
++ {0x3a03, 0x7d, 0, 0}, {0x3a04, 0x00, 0, 0}, {0x3a14, 0x00, 0, 0},
++ {0x3a15, 0x7d, 0, 0}, {0x3a16, 0x00, 0, 0}, {0x3a00, 0x78, 0, 0},
++ {0x3a08, 0x12, 0, 0}, {0x3a09, 0xc0, 0, 0}, {0x3a0a, 0x0f, 0, 0},
++ {0x3a0b, 0xa0, 0, 0}, {0x3a0d, 0x04, 0, 0}, {0x3a0e, 0x03, 0, 0},
++ {0x5193, 0x70, 0, 0}, {0x589b, 0x04, 0, 0}, {0x589a, 0xc5, 0, 0},
++ {0x401e, 0x20, 0, 0}, {0x4001, 0x42, 0, 0}, {0x401c, 0x04, 0, 0},
++ {0x528a, 0x01, 0, 0}, {0x528b, 0x04, 0, 0}, {0x528c, 0x08, 0, 0},
++ {0x528d, 0x10, 0, 0}, {0x528e, 0x20, 0, 0}, {0x528f, 0x28, 0, 0},
++ {0x5290, 0x30, 0, 0}, {0x5292, 0x00, 0, 0}, {0x5293, 0x01, 0, 0},
++ {0x5294, 0x00, 0, 0}, {0x5295, 0x04, 0, 0}, {0x5296, 0x00, 0, 0},
++ {0x5297, 0x08, 0, 0}, {0x5298, 0x00, 0, 0}, {0x5299, 0x10, 0, 0},
++ {0x529a, 0x00, 0, 0}, {0x529b, 0x20, 0, 0}, {0x529c, 0x00, 0, 0},
++ {0x529d, 0x28, 0, 0}, {0x529e, 0x00, 0, 0}, {0x529f, 0x30, 0, 0},
++ {0x5282, 0x00, 0, 0}, {0x5300, 0x00, 0, 0}, {0x5301, 0x20, 0, 0},
++ {0x5302, 0x00, 0, 0}, {0x5303, 0x7c, 0, 0}, {0x530c, 0x00, 0, 0},
++ {0x530d, 0x0c, 0, 0}, {0x530e, 0x20, 0, 0}, {0x530f, 0x80, 0, 0},
++ {0x5310, 0x20, 0, 0}, {0x5311, 0x80, 0, 0}, {0x5308, 0x20, 0, 0},
++ {0x5309, 0x40, 0, 0}, {0x5304, 0x00, 0, 0}, {0x5305, 0x30, 0, 0},
++ {0x5306, 0x00, 0, 0}, {0x5307, 0x80, 0, 0}, {0x5314, 0x08, 0, 0},
++ {0x5315, 0x20, 0, 0}, {0x5319, 0x30, 0, 0}, {0x5316, 0x10, 0, 0},
++ {0x5317, 0x00, 0, 0}, {0x5318, 0x02, 0, 0}, {0x5380, 0x01, 0, 0},
++ {0x5381, 0x00, 0, 0}, {0x5382, 0x00, 0, 0}, {0x5383, 0x4e, 0, 0},
++ {0x5384, 0x00, 0, 0}, {0x5385, 0x0f, 0, 0}, {0x5386, 0x00, 0, 0},
++ {0x5387, 0x00, 0, 0}, {0x5388, 0x01, 0, 0}, {0x5389, 0x15, 0, 0},
++ {0x538a, 0x00, 0, 0}, {0x538b, 0x31, 0, 0}, {0x538c, 0x00, 0, 0},
++ {0x538d, 0x00, 0, 0}, {0x538e, 0x00, 0, 0}, {0x538f, 0x0f, 0, 0},
++ {0x5390, 0x00, 0, 0}, {0x5391, 0xab, 0, 0}, {0x5392, 0x00, 0, 0},
++ {0x5393, 0xa2, 0, 0}, {0x5394, 0x08, 0, 0}, {0x5480, 0x14, 0, 0},
++ {0x5481, 0x21, 0, 0}, {0x5482, 0x36, 0, 0}, {0x5483, 0x57, 0, 0},
++ {0x5484, 0x65, 0, 0}, {0x5485, 0x71, 0, 0}, {0x5486, 0x7d, 0, 0},
++ {0x5487, 0x87, 0, 0}, {0x5488, 0x91, 0, 0}, {0x5489, 0x9a, 0, 0},
++ {0x548a, 0xaa, 0, 0}, {0x548b, 0xb8, 0, 0}, {0x548c, 0xcd, 0, 0},
++ {0x548d, 0xdd, 0, 0}, {0x548e, 0xea, 0, 0}, {0x548f, 0x1d, 0, 0},
++ {0x5490, 0x05, 0, 0}, {0x5491, 0x00, 0, 0}, {0x5492, 0x04, 0, 0},
++ {0x5493, 0x20, 0, 0}, {0x5494, 0x03, 0, 0}, {0x5495, 0x60, 0, 0},
++ {0x5496, 0x02, 0, 0}, {0x5497, 0xb8, 0, 0}, {0x5498, 0x02, 0, 0},
++ {0x5499, 0x86, 0, 0}, {0x549a, 0x02, 0, 0}, {0x549b, 0x5b, 0, 0},
++ {0x549c, 0x02, 0, 0}, {0x549d, 0x3b, 0, 0}, {0x549e, 0x02, 0, 0},
++ {0x549f, 0x1c, 0, 0}, {0x54a0, 0x02, 0, 0}, {0x54a1, 0x04, 0, 0},
++ {0x54a2, 0x01, 0, 0}, {0x54a3, 0xed, 0, 0}, {0x54a4, 0x01, 0, 0},
++ {0x54a5, 0xc5, 0, 0}, {0x54a6, 0x01, 0, 0}, {0x54a7, 0xa5, 0, 0},
++ {0x54a8, 0x01, 0, 0}, {0x54a9, 0x6c, 0, 0}, {0x54aa, 0x01, 0, 0},
++ {0x54ab, 0x41, 0, 0}, {0x54ac, 0x01, 0, 0}, {0x54ad, 0x20, 0, 0},
++ {0x54ae, 0x00, 0, 0}, {0x54af, 0x16, 0, 0}, {0x54b0, 0x01, 0, 0},
++ {0x54b1, 0x20, 0, 0}, {0x54b2, 0x00, 0, 0}, {0x54b3, 0x10, 0, 0},
++ {0x54b4, 0x00, 0, 0}, {0x54b5, 0xf0, 0, 0}, {0x54b6, 0x00, 0, 0},
++ {0x54b7, 0xdf, 0, 0}, {0x5402, 0x3f, 0, 0}, {0x5403, 0x00, 0, 0},
++ {0x3406, 0x00, 0, 0}, {0x5180, 0xff, 0, 0}, {0x5181, 0x52, 0, 0},
++ {0x5182, 0x11, 0, 0}, {0x5183, 0x14, 0, 0}, {0x5184, 0x25, 0, 0},
++ {0x5185, 0x24, 0, 0}, {0x5186, 0x06, 0, 0}, {0x5187, 0x08, 0, 0},
++ {0x5188, 0x08, 0, 0}, {0x5189, 0x7c, 0, 0}, {0x518a, 0x60, 0, 0},
++ {0x518b, 0xb2, 0, 0}, {0x518c, 0xb2, 0, 0}, {0x518d, 0x44, 0, 0},
++ {0x518e, 0x3d, 0, 0}, {0x518f, 0x58, 0, 0}, {0x5190, 0x46, 0, 0},
++ {0x5191, 0xf8, 0, 0}, {0x5192, 0x04, 0, 0}, {0x5193, 0x70, 0, 0},
++ {0x5194, 0xf0, 0, 0}, {0x5195, 0xf0, 0, 0}, {0x5196, 0x03, 0, 0},
++ {0x5197, 0x01, 0, 0}, {0x5198, 0x04, 0, 0}, {0x5199, 0x12, 0, 0},
++ {0x519a, 0x04, 0, 0}, {0x519b, 0x00, 0, 0}, {0x519c, 0x06, 0, 0},
++ {0x519d, 0x82, 0, 0}, {0x519e, 0x00, 0, 0}, {0x5025, 0x80, 0, 0},
++ {0x3a0f, 0x38, 0, 0}, {0x3a10, 0x30, 0, 0}, {0x3a1b, 0x3a, 0, 0},
++ {0x3a1e, 0x2e, 0, 0}, {0x3a11, 0x60, 0, 0}, {0x3a1f, 0x10, 0, 0},
++ {0x5688, 0xa6, 0, 0}, {0x5689, 0x6a, 0, 0}, {0x568a, 0xea, 0, 0},
++ {0x568b, 0xae, 0, 0}, {0x568c, 0xa6, 0, 0}, {0x568d, 0x6a, 0, 0},
++ {0x568e, 0x62, 0, 0}, {0x568f, 0x26, 0, 0}, {0x5583, 0x40, 0, 0},
++ {0x5584, 0x40, 0, 0}, {0x5580, 0x02, 0, 0}, {0x5000, 0xcf, 0, 0},
++ {0x5800, 0x27, 0, 0}, {0x5801, 0x19, 0, 0}, {0x5802, 0x12, 0, 0},
++ {0x5803, 0x0f, 0, 0}, {0x5804, 0x10, 0, 0}, {0x5805, 0x15, 0, 0},
++ {0x5806, 0x1e, 0, 0}, {0x5807, 0x2f, 0, 0}, {0x5808, 0x15, 0, 0},
++ {0x5809, 0x0d, 0, 0}, {0x580a, 0x0a, 0, 0}, {0x580b, 0x09, 0, 0},
++ {0x580c, 0x0a, 0, 0}, {0x580d, 0x0c, 0, 0}, {0x580e, 0x12, 0, 0},
++ {0x580f, 0x19, 0, 0}, {0x5810, 0x0b, 0, 0}, {0x5811, 0x07, 0, 0},
++ {0x5812, 0x04, 0, 0}, {0x5813, 0x03, 0, 0}, {0x5814, 0x03, 0, 0},
++ {0x5815, 0x06, 0, 0}, {0x5816, 0x0a, 0, 0}, {0x5817, 0x0f, 0, 0},
++ {0x5818, 0x0a, 0, 0}, {0x5819, 0x05, 0, 0}, {0x581a, 0x01, 0, 0},
++ {0x581b, 0x00, 0, 0}, {0x581c, 0x00, 0, 0}, {0x581d, 0x03, 0, 0},
++ {0x581e, 0x08, 0, 0}, {0x581f, 0x0c, 0, 0}, {0x5820, 0x0a, 0, 0},
++ {0x5821, 0x05, 0, 0}, {0x5822, 0x01, 0, 0}, {0x5823, 0x00, 0, 0},
++ {0x5824, 0x00, 0, 0}, {0x5825, 0x03, 0, 0}, {0x5826, 0x08, 0, 0},
++ {0x5827, 0x0c, 0, 0}, {0x5828, 0x0e, 0, 0}, {0x5829, 0x08, 0, 0},
++ {0x582a, 0x06, 0, 0}, {0x582b, 0x04, 0, 0}, {0x582c, 0x05, 0, 0},
++ {0x582d, 0x07, 0, 0}, {0x582e, 0x0b, 0, 0}, {0x582f, 0x12, 0, 0},
++ {0x5830, 0x18, 0, 0}, {0x5831, 0x10, 0, 0}, {0x5832, 0x0c, 0, 0},
++ {0x5833, 0x0a, 0, 0}, {0x5834, 0x0b, 0, 0}, {0x5835, 0x0e, 0, 0},
++ {0x5836, 0x15, 0, 0}, {0x5837, 0x19, 0, 0}, {0x5838, 0x32, 0, 0},
++ {0x5839, 0x1f, 0, 0}, {0x583a, 0x18, 0, 0}, {0x583b, 0x16, 0, 0},
++ {0x583c, 0x17, 0, 0}, {0x583d, 0x1e, 0, 0}, {0x583e, 0x26, 0, 0},
++ {0x583f, 0x53, 0, 0}, {0x5840, 0x10, 0, 0}, {0x5841, 0x0f, 0, 0},
++ {0x5842, 0x0d, 0, 0}, {0x5843, 0x0c, 0, 0}, {0x5844, 0x0e, 0, 0},
++ {0x5845, 0x09, 0, 0}, {0x5846, 0x11, 0, 0}, {0x5847, 0x10, 0, 0},
++ {0x5848, 0x10, 0, 0}, {0x5849, 0x10, 0, 0}, {0x584a, 0x10, 0, 0},
++ {0x584b, 0x0e, 0, 0}, {0x584c, 0x10, 0, 0}, {0x584d, 0x10, 0, 0},
++ {0x584e, 0x11, 0, 0}, {0x584f, 0x10, 0, 0}, {0x5850, 0x0f, 0, 0},
++ {0x5851, 0x0c, 0, 0}, {0x5852, 0x0f, 0, 0}, {0x5853, 0x10, 0, 0},
++ {0x5854, 0x10, 0, 0}, {0x5855, 0x0f, 0, 0}, {0x5856, 0x0e, 0, 0},
++ {0x5857, 0x0b, 0, 0}, {0x5858, 0x10, 0, 0}, {0x5859, 0x0d, 0, 0},
++ {0x585a, 0x0d, 0, 0}, {0x585b, 0x0c, 0, 0}, {0x585c, 0x0c, 0, 0},
++ {0x585d, 0x0c, 0, 0}, {0x585e, 0x0b, 0, 0}, {0x585f, 0x0c, 0, 0},
++ {0x5860, 0x0c, 0, 0}, {0x5861, 0x0c, 0, 0}, {0x5862, 0x0d, 0, 0},
++ {0x5863, 0x08, 0, 0}, {0x5864, 0x11, 0, 0}, {0x5865, 0x18, 0, 0},
++ {0x5866, 0x18, 0, 0}, {0x5867, 0x19, 0, 0}, {0x5868, 0x17, 0, 0},
++ {0x5869, 0x19, 0, 0}, {0x586a, 0x16, 0, 0}, {0x586b, 0x13, 0, 0},
++ {0x586c, 0x13, 0, 0}, {0x586d, 0x12, 0, 0}, {0x586e, 0x13, 0, 0},
++ {0x586f, 0x16, 0, 0}, {0x5870, 0x14, 0, 0}, {0x5871, 0x12, 0, 0},
++ {0x5872, 0x10, 0, 0}, {0x5873, 0x11, 0, 0}, {0x5874, 0x11, 0, 0},
++ {0x5875, 0x16, 0, 0}, {0x5876, 0x14, 0, 0}, {0x5877, 0x11, 0, 0},
++ {0x5878, 0x10, 0, 0}, {0x5879, 0x0f, 0, 0}, {0x587a, 0x10, 0, 0},
++ {0x587b, 0x14, 0, 0}, {0x587c, 0x13, 0, 0}, {0x587d, 0x12, 0, 0},
++ {0x587e, 0x11, 0, 0}, {0x587f, 0x11, 0, 0}, {0x5880, 0x12, 0, 0},
++ {0x5881, 0x15, 0, 0}, {0x5882, 0x14, 0, 0}, {0x5883, 0x15, 0, 0},
++ {0x5884, 0x15, 0, 0}, {0x5885, 0x15, 0, 0}, {0x5886, 0x13, 0, 0},
++ {0x5887, 0x17, 0, 0}, {0x3710, 0x10, 0, 0}, {0x3632, 0x51, 0, 0},
++ {0x3702, 0x10, 0, 0}, {0x3703, 0xb2, 0, 0}, {0x3704, 0x18, 0, 0},
++ {0x370b, 0x40, 0, 0}, {0x370d, 0x03, 0, 0}, {0x3631, 0x01, 0, 0},
++ {0x3632, 0x52, 0, 0}, {0x3606, 0x24, 0, 0}, {0x3620, 0x96, 0, 0},
++ {0x5785, 0x07, 0, 0}, {0x3a13, 0x30, 0, 0}, {0x3600, 0x52, 0, 0},
++ {0x3604, 0x48, 0, 0}, {0x3606, 0x1b, 0, 0}, {0x370d, 0x0b, 0, 0},
++ {0x370f, 0xc0, 0, 0}, {0x3709, 0x01, 0, 0}, {0x3823, 0x00, 0, 0},
++ {0x5007, 0x00, 0, 0}, {0x5009, 0x00, 0, 0}, {0x5011, 0x00, 0, 0},
++ {0x5013, 0x00, 0, 0}, {0x519e, 0x00, 0, 0}, {0x5086, 0x00, 0, 0},
++ {0x5087, 0x00, 0, 0}, {0x5088, 0x00, 0, 0}, {0x5089, 0x00, 0, 0},
++ {0x302b, 0x00, 0, 0}, {0x3621, 0x87, 0, 0}, {0x3a00, 0x78, 0, 0},
++};
++
++static struct reg_value ov5642_setting_15fps_VGA_640_480[] = {
++ {0x3103, 0x93, 0, 0}, {0x3008, 0x82, 0, 0}, {0x3017, 0x7f, 0, 0},
++ {0x3018, 0xfc, 0, 0}, {0x3615, 0xf0, 0, 0}, {0x3000, 0x00, 0, 0},
++ {0x3001, 0x00, 0, 0}, {0x3002, 0x5c, 0, 0}, {0x3003, 0x00, 0, 0},
++ {0x3004, 0xff, 0, 0}, {0x3005, 0xff, 0, 0}, {0x3006, 0x43, 0, 0},
++ {0x3007, 0x37, 0, 0}, {0x3011, 0x09, 0, 0}, {0x3012, 0x02, 0, 0},
++ {0x3010, 0x00, 0, 0}, {0x460c, 0x20, 0, 0}, {0x3815, 0x04, 0, 0},
++ {0x370c, 0xa0, 0, 0}, {0x3602, 0xfc, 0, 0}, {0x3612, 0xff, 0, 0},
++ {0x3634, 0xc0, 0, 0}, {0x3613, 0x00, 0, 0}, {0x3605, 0x7c, 0, 0},
++ {0x3621, 0x09, 0, 0}, {0x3622, 0x60, 0, 0}, {0x3604, 0x40, 0, 0},
++ {0x3603, 0xa7, 0, 0}, {0x3603, 0x27, 0, 0}, {0x4000, 0x21, 0, 0},
++ {0x401d, 0x22, 0, 0}, {0x3600, 0x54, 0, 0}, {0x3605, 0x04, 0, 0},
++ {0x3606, 0x3f, 0, 0}, {0x3c01, 0x80, 0, 0}, {0x5000, 0x4f, 0, 0},
++ {0x5020, 0x04, 0, 0}, {0x5181, 0x79, 0, 0}, {0x5182, 0x00, 0, 0},
++ {0x5185, 0x22, 0, 0}, {0x5197, 0x01, 0, 0}, {0x5001, 0xff, 0, 0},
++ {0x5500, 0x0a, 0, 0}, {0x5504, 0x00, 0, 0}, {0x5505, 0x7f, 0, 0},
++ {0x5080, 0x08, 0, 0}, {0x300e, 0x18, 0, 0}, {0x4610, 0x00, 0, 0},
++ {0x471d, 0x05, 0, 0}, {0x4708, 0x06, 0, 0}, {0x3808, 0x02, 0, 0},
++ {0x3809, 0x80, 0, 0}, {0x380a, 0x01, 0, 0}, {0x380b, 0xe0, 0, 0},
++ {0x380e, 0x07, 0, 0}, {0x380f, 0xd0, 0, 0}, {0x501f, 0x00, 0, 0},
++ {0x5000, 0x4f, 0, 0}, {0x4300, 0x30, 0, 0}, {0x3503, 0x07, 0, 0},
++ {0x3501, 0x73, 0, 0}, {0x3502, 0x80, 0, 0}, {0x350b, 0x00, 0, 0},
++ {0x3503, 0x07, 0, 0}, {0x3824, 0x11, 0, 0}, {0x3825, 0xb0, 0, 0},
++ {0x3501, 0x1e, 0, 0}, {0x3502, 0x80, 0, 0}, {0x350b, 0x7f, 0, 0},
++ {0x380c, 0x07, 0, 0}, {0x380d, 0x2a, 0, 0}, {0x380e, 0x07, 0, 0},
++ {0x380f, 0xd0, 0, 0}, {0x3a0d, 0x04, 0, 0}, {0x3a0e, 0x03, 0, 0},
++ {0x3818, 0xc1, 0, 0}, {0x3705, 0xdb, 0, 0}, {0x370a, 0x81, 0, 0},
++ {0x3801, 0x80, 0, 0}, {0x3621, 0xc7, 0, 0}, {0x3801, 0x50, 0, 0},
++ {0x3803, 0x08, 0, 0}, {0x3827, 0x08, 0, 0}, {0x3810, 0x80, 0, 0},
++ {0x3804, 0x05, 0, 0}, {0x3805, 0x00, 0, 0}, {0x5682, 0x05, 0, 0},
++ {0x5683, 0x00, 0, 0}, {0x3806, 0x03, 0, 0}, {0x3807, 0xc0, 0, 0},
++ {0x5686, 0x03, 0, 0}, {0x5687, 0xbc, 0, 0}, {0x3a00, 0x78, 0, 0},
++ {0x3a1a, 0x05, 0, 0}, {0x3a13, 0x30, 0, 0}, {0x3a18, 0x00, 0, 0},
++ {0x3a19, 0x7c, 0, 0}, {0x3a08, 0x12, 0, 0}, {0x3a09, 0xc0, 0, 0},
++ {0x3a0a, 0x0f, 0, 0}, {0x3a0b, 0xa0, 0, 0}, {0x350c, 0x07, 0, 0},
++ {0x350d, 0xd0, 0, 0}, {0x3500, 0x00, 0, 0}, {0x3501, 0x00, 0, 0},
++ {0x3502, 0x00, 0, 0}, {0x350a, 0x00, 0, 0}, {0x350b, 0x00, 0, 0},
++ {0x3503, 0x00, 0, 0}, {0x528a, 0x02, 0, 0}, {0x528b, 0x04, 0, 0},
++ {0x528c, 0x08, 0, 0}, {0x528d, 0x08, 0, 0}, {0x528e, 0x08, 0, 0},
++ {0x528f, 0x10, 0, 0}, {0x5290, 0x10, 0, 0}, {0x5292, 0x00, 0, 0},
++ {0x5293, 0x02, 0, 0}, {0x5294, 0x00, 0, 0}, {0x5295, 0x02, 0, 0},
++ {0x5296, 0x00, 0, 0}, {0x5297, 0x02, 0, 0}, {0x5298, 0x00, 0, 0},
++ {0x5299, 0x02, 0, 0}, {0x529a, 0x00, 0, 0}, {0x529b, 0x02, 0, 0},
++ {0x529c, 0x00, 0, 0}, {0x529d, 0x02, 0, 0}, {0x529e, 0x00, 0, 0},
++ {0x529f, 0x02, 0, 0}, {0x3a0f, 0x3c, 0, 0}, {0x3a10, 0x30, 0, 0},
++ {0x3a1b, 0x3c, 0, 0}, {0x3a1e, 0x30, 0, 0}, {0x3a11, 0x70, 0, 0},
++ {0x3a1f, 0x10, 0, 0}, {0x3030, 0x2b, 0, 0}, {0x3a02, 0x00, 0, 0},
++ {0x3a03, 0x7d, 0, 0}, {0x3a04, 0x00, 0, 0}, {0x3a14, 0x00, 0, 0},
++ {0x3a15, 0x7d, 0, 0}, {0x3a16, 0x00, 0, 0}, {0x3a00, 0x78, 0, 0},
++ {0x3a08, 0x12, 0, 0}, {0x3a09, 0xc0, 0, 0}, {0x3a0a, 0x0f, 0, 0},
++ {0x3a0b, 0xa0, 0, 0}, {0x3a0d, 0x04, 0, 0}, {0x3a0e, 0x03, 0, 0},
++ {0x5193, 0x70, 0, 0}, {0x589b, 0x04, 0, 0}, {0x589a, 0xc5, 0, 0},
++ {0x401e, 0x20, 0, 0}, {0x4001, 0x42, 0, 0}, {0x401c, 0x04, 0, 0},
++ {0x528a, 0x01, 0, 0}, {0x528b, 0x04, 0, 0}, {0x528c, 0x08, 0, 0},
++ {0x528d, 0x10, 0, 0}, {0x528e, 0x20, 0, 0}, {0x528f, 0x28, 0, 0},
++ {0x5290, 0x30, 0, 0}, {0x5292, 0x00, 0, 0}, {0x5293, 0x01, 0, 0},
++ {0x5294, 0x00, 0, 0}, {0x5295, 0x04, 0, 0}, {0x5296, 0x00, 0, 0},
++ {0x5297, 0x08, 0, 0}, {0x5298, 0x00, 0, 0}, {0x5299, 0x10, 0, 0},
++ {0x529a, 0x00, 0, 0}, {0x529b, 0x20, 0, 0}, {0x529c, 0x00, 0, 0},
++ {0x529d, 0x28, 0, 0}, {0x529e, 0x00, 0, 0}, {0x529f, 0x30, 0, 0},
++ {0x5282, 0x00, 0, 0}, {0x5300, 0x00, 0, 0}, {0x5301, 0x20, 0, 0},
++ {0x5302, 0x00, 0, 0}, {0x5303, 0x7c, 0, 0}, {0x530c, 0x00, 0, 0},
++ {0x530d, 0x0c, 0, 0}, {0x530e, 0x20, 0, 0}, {0x530f, 0x80, 0, 0},
++ {0x5310, 0x20, 0, 0}, {0x5311, 0x80, 0, 0}, {0x5308, 0x20, 0, 0},
++ {0x5309, 0x40, 0, 0}, {0x5304, 0x00, 0, 0}, {0x5305, 0x30, 0, 0},
++ {0x5306, 0x00, 0, 0}, {0x5307, 0x80, 0, 0}, {0x5314, 0x08, 0, 0},
++ {0x5315, 0x20, 0, 0}, {0x5319, 0x30, 0, 0}, {0x5316, 0x10, 0, 0},
++ {0x5317, 0x00, 0, 0}, {0x5318, 0x02, 0, 0}, {0x5380, 0x01, 0, 0},
++ {0x5381, 0x00, 0, 0}, {0x5382, 0x00, 0, 0}, {0x5383, 0x4e, 0, 0},
++ {0x5384, 0x00, 0, 0}, {0x5385, 0x0f, 0, 0}, {0x5386, 0x00, 0, 0},
++ {0x5387, 0x00, 0, 0}, {0x5388, 0x01, 0, 0}, {0x5389, 0x15, 0, 0},
++ {0x538a, 0x00, 0, 0}, {0x538b, 0x31, 0, 0}, {0x538c, 0x00, 0, 0},
++ {0x538d, 0x00, 0, 0}, {0x538e, 0x00, 0, 0}, {0x538f, 0x0f, 0, 0},
++ {0x5390, 0x00, 0, 0}, {0x5391, 0xab, 0, 0}, {0x5392, 0x00, 0, 0},
++ {0x5393, 0xa2, 0, 0}, {0x5394, 0x08, 0, 0}, {0x5480, 0x14, 0, 0},
++ {0x5481, 0x21, 0, 0}, {0x5482, 0x36, 0, 0}, {0x5483, 0x57, 0, 0},
++ {0x5484, 0x65, 0, 0}, {0x5485, 0x71, 0, 0}, {0x5486, 0x7d, 0, 0},
++ {0x5487, 0x87, 0, 0}, {0x5488, 0x91, 0, 0}, {0x5489, 0x9a, 0, 0},
++ {0x548a, 0xaa, 0, 0}, {0x548b, 0xb8, 0, 0}, {0x548c, 0xcd, 0, 0},
++ {0x548d, 0xdd, 0, 0}, {0x548e, 0xea, 0, 0}, {0x548f, 0x1d, 0, 0},
++ {0x5490, 0x05, 0, 0}, {0x5491, 0x00, 0, 0}, {0x5492, 0x04, 0, 0},
++ {0x5493, 0x20, 0, 0}, {0x5494, 0x03, 0, 0}, {0x5495, 0x60, 0, 0},
++ {0x5496, 0x02, 0, 0}, {0x5497, 0xb8, 0, 0}, {0x5498, 0x02, 0, 0},
++ {0x5499, 0x86, 0, 0}, {0x549a, 0x02, 0, 0}, {0x549b, 0x5b, 0, 0},
++ {0x549c, 0x02, 0, 0}, {0x549d, 0x3b, 0, 0}, {0x549e, 0x02, 0, 0},
++ {0x549f, 0x1c, 0, 0}, {0x54a0, 0x02, 0, 0}, {0x54a1, 0x04, 0, 0},
++ {0x54a2, 0x01, 0, 0}, {0x54a3, 0xed, 0, 0}, {0x54a4, 0x01, 0, 0},
++ {0x54a5, 0xc5, 0, 0}, {0x54a6, 0x01, 0, 0}, {0x54a7, 0xa5, 0, 0},
++ {0x54a8, 0x01, 0, 0}, {0x54a9, 0x6c, 0, 0}, {0x54aa, 0x01, 0, 0},
++ {0x54ab, 0x41, 0, 0}, {0x54ac, 0x01, 0, 0}, {0x54ad, 0x20, 0, 0},
++ {0x54ae, 0x00, 0, 0}, {0x54af, 0x16, 0, 0}, {0x54b0, 0x01, 0, 0},
++ {0x54b1, 0x20, 0, 0}, {0x54b2, 0x00, 0, 0}, {0x54b3, 0x10, 0, 0},
++ {0x54b4, 0x00, 0, 0}, {0x54b5, 0xf0, 0, 0}, {0x54b6, 0x00, 0, 0},
++ {0x54b7, 0xdf, 0, 0}, {0x5402, 0x3f, 0, 0}, {0x5403, 0x00, 0, 0},
++ {0x3406, 0x00, 0, 0}, {0x5180, 0xff, 0, 0}, {0x5181, 0x52, 0, 0},
++ {0x5182, 0x11, 0, 0}, {0x5183, 0x14, 0, 0}, {0x5184, 0x25, 0, 0},
++ {0x5185, 0x24, 0, 0}, {0x5186, 0x06, 0, 0}, {0x5187, 0x08, 0, 0},
++ {0x5188, 0x08, 0, 0}, {0x5189, 0x7c, 0, 0}, {0x518a, 0x60, 0, 0},
++ {0x518b, 0xb2, 0, 0}, {0x518c, 0xb2, 0, 0}, {0x518d, 0x44, 0, 0},
++ {0x518e, 0x3d, 0, 0}, {0x518f, 0x58, 0, 0}, {0x5190, 0x46, 0, 0},
++ {0x5191, 0xf8, 0, 0}, {0x5192, 0x04, 0, 0}, {0x5193, 0x70, 0, 0},
++ {0x5194, 0xf0, 0, 0}, {0x5195, 0xf0, 0, 0}, {0x5196, 0x03, 0, 0},
++ {0x5197, 0x01, 0, 0}, {0x5198, 0x04, 0, 0}, {0x5199, 0x12, 0, 0},
++ {0x519a, 0x04, 0, 0}, {0x519b, 0x00, 0, 0}, {0x519c, 0x06, 0, 0},
++ {0x519d, 0x82, 0, 0}, {0x519e, 0x00, 0, 0}, {0x5025, 0x80, 0, 0},
++ {0x3a0f, 0x38, 0, 0}, {0x3a10, 0x30, 0, 0}, {0x3a1b, 0x3a, 0, 0},
++ {0x3a1e, 0x2e, 0, 0}, {0x3a11, 0x60, 0, 0}, {0x3a1f, 0x10, 0, 0},
++ {0x5688, 0xa6, 0, 0}, {0x5689, 0x6a, 0, 0}, {0x568a, 0xea, 0, 0},
++ {0x568b, 0xae, 0, 0}, {0x568c, 0xa6, 0, 0}, {0x568d, 0x6a, 0, 0},
++ {0x568e, 0x62, 0, 0}, {0x568f, 0x26, 0, 0}, {0x5583, 0x40, 0, 0},
++ {0x5584, 0x40, 0, 0}, {0x5580, 0x02, 0, 0}, {0x5000, 0xcf, 0, 0},
++ {0x5800, 0x27, 0, 0}, {0x5801, 0x19, 0, 0}, {0x5802, 0x12, 0, 0},
++ {0x5803, 0x0f, 0, 0}, {0x5804, 0x10, 0, 0}, {0x5805, 0x15, 0, 0},
++ {0x5806, 0x1e, 0, 0}, {0x5807, 0x2f, 0, 0}, {0x5808, 0x15, 0, 0},
++ {0x5809, 0x0d, 0, 0}, {0x580a, 0x0a, 0, 0}, {0x580b, 0x09, 0, 0},
++ {0x580c, 0x0a, 0, 0}, {0x580d, 0x0c, 0, 0}, {0x580e, 0x12, 0, 0},
++ {0x580f, 0x19, 0, 0}, {0x5810, 0x0b, 0, 0}, {0x5811, 0x07, 0, 0},
++ {0x5812, 0x04, 0, 0}, {0x5813, 0x03, 0, 0}, {0x5814, 0x03, 0, 0},
++ {0x5815, 0x06, 0, 0}, {0x5816, 0x0a, 0, 0}, {0x5817, 0x0f, 0, 0},
++ {0x5818, 0x0a, 0, 0}, {0x5819, 0x05, 0, 0}, {0x581a, 0x01, 0, 0},
++ {0x581b, 0x00, 0, 0}, {0x581c, 0x00, 0, 0}, {0x581d, 0x03, 0, 0},
++ {0x581e, 0x08, 0, 0}, {0x581f, 0x0c, 0, 0}, {0x5820, 0x0a, 0, 0},
++ {0x5821, 0x05, 0, 0}, {0x5822, 0x01, 0, 0}, {0x5823, 0x00, 0, 0},
++ {0x5824, 0x00, 0, 0}, {0x5825, 0x03, 0, 0}, {0x5826, 0x08, 0, 0},
++ {0x5827, 0x0c, 0, 0}, {0x5828, 0x0e, 0, 0}, {0x5829, 0x08, 0, 0},
++ {0x582a, 0x06, 0, 0}, {0x582b, 0x04, 0, 0}, {0x582c, 0x05, 0, 0},
++ {0x582d, 0x07, 0, 0}, {0x582e, 0x0b, 0, 0}, {0x582f, 0x12, 0, 0},
++ {0x5830, 0x18, 0, 0}, {0x5831, 0x10, 0, 0}, {0x5832, 0x0c, 0, 0},
++ {0x5833, 0x0a, 0, 0}, {0x5834, 0x0b, 0, 0}, {0x5835, 0x0e, 0, 0},
++ {0x5836, 0x15, 0, 0}, {0x5837, 0x19, 0, 0}, {0x5838, 0x32, 0, 0},
++ {0x5839, 0x1f, 0, 0}, {0x583a, 0x18, 0, 0}, {0x583b, 0x16, 0, 0},
++ {0x583c, 0x17, 0, 0}, {0x583d, 0x1e, 0, 0}, {0x583e, 0x26, 0, 0},
++ {0x583f, 0x53, 0, 0}, {0x5840, 0x10, 0, 0}, {0x5841, 0x0f, 0, 0},
++ {0x5842, 0x0d, 0, 0}, {0x5843, 0x0c, 0, 0}, {0x5844, 0x0e, 0, 0},
++ {0x5845, 0x09, 0, 0}, {0x5846, 0x11, 0, 0}, {0x5847, 0x10, 0, 0},
++ {0x5848, 0x10, 0, 0}, {0x5849, 0x10, 0, 0}, {0x584a, 0x10, 0, 0},
++ {0x584b, 0x0e, 0, 0}, {0x584c, 0x10, 0, 0}, {0x584d, 0x10, 0, 0},
++ {0x584e, 0x11, 0, 0}, {0x584f, 0x10, 0, 0}, {0x5850, 0x0f, 0, 0},
++ {0x5851, 0x0c, 0, 0}, {0x5852, 0x0f, 0, 0}, {0x5853, 0x10, 0, 0},
++ {0x5854, 0x10, 0, 0}, {0x5855, 0x0f, 0, 0}, {0x5856, 0x0e, 0, 0},
++ {0x5857, 0x0b, 0, 0}, {0x5858, 0x10, 0, 0}, {0x5859, 0x0d, 0, 0},
++ {0x585a, 0x0d, 0, 0}, {0x585b, 0x0c, 0, 0}, {0x585c, 0x0c, 0, 0},
++ {0x585d, 0x0c, 0, 0}, {0x585e, 0x0b, 0, 0}, {0x585f, 0x0c, 0, 0},
++ {0x5860, 0x0c, 0, 0}, {0x5861, 0x0c, 0, 0}, {0x5862, 0x0d, 0, 0},
++ {0x5863, 0x08, 0, 0}, {0x5864, 0x11, 0, 0}, {0x5865, 0x18, 0, 0},
++ {0x5866, 0x18, 0, 0}, {0x5867, 0x19, 0, 0}, {0x5868, 0x17, 0, 0},
++ {0x5869, 0x19, 0, 0}, {0x586a, 0x16, 0, 0}, {0x586b, 0x13, 0, 0},
++ {0x586c, 0x13, 0, 0}, {0x586d, 0x12, 0, 0}, {0x586e, 0x13, 0, 0},
++ {0x586f, 0x16, 0, 0}, {0x5870, 0x14, 0, 0}, {0x5871, 0x12, 0, 0},
++ {0x5872, 0x10, 0, 0}, {0x5873, 0x11, 0, 0}, {0x5874, 0x11, 0, 0},
++ {0x5875, 0x16, 0, 0}, {0x5876, 0x14, 0, 0}, {0x5877, 0x11, 0, 0},
++ {0x5878, 0x10, 0, 0}, {0x5879, 0x0f, 0, 0}, {0x587a, 0x10, 0, 0},
++ {0x587b, 0x14, 0, 0}, {0x587c, 0x13, 0, 0}, {0x587d, 0x12, 0, 0},
++ {0x587e, 0x11, 0, 0}, {0x587f, 0x11, 0, 0}, {0x5880, 0x12, 0, 0},
++ {0x5881, 0x15, 0, 0}, {0x5882, 0x14, 0, 0}, {0x5883, 0x15, 0, 0},
++ {0x5884, 0x15, 0, 0}, {0x5885, 0x15, 0, 0}, {0x5886, 0x13, 0, 0},
++ {0x5887, 0x17, 0, 0}, {0x3710, 0x10, 0, 0}, {0x3632, 0x51, 0, 0},
++ {0x3702, 0x10, 0, 0}, {0x3703, 0xb2, 0, 0}, {0x3704, 0x18, 0, 0},
++ {0x370b, 0x40, 0, 0}, {0x370d, 0x03, 0, 0}, {0x3631, 0x01, 0, 0},
++ {0x3632, 0x52, 0, 0}, {0x3606, 0x24, 0, 0}, {0x3620, 0x96, 0, 0},
++ {0x5785, 0x07, 0, 0}, {0x3a13, 0x30, 0, 0}, {0x3600, 0x52, 0, 0},
++ {0x3604, 0x48, 0, 0}, {0x3606, 0x1b, 0, 0}, {0x370d, 0x0b, 0, 0},
++ {0x370f, 0xc0, 0, 0}, {0x3709, 0x01, 0, 0}, {0x3823, 0x00, 0, 0},
++ {0x5007, 0x00, 0, 0}, {0x5009, 0x00, 0, 0}, {0x5011, 0x00, 0, 0},
++ {0x5013, 0x00, 0, 0}, {0x519e, 0x00, 0, 0}, {0x5086, 0x00, 0, 0},
++ {0x5087, 0x00, 0, 0}, {0x5088, 0x00, 0, 0}, {0x5089, 0x00, 0, 0},
++ {0x302b, 0x00, 0, 0}, {0x3621, 0x87, 0, 0}, {0x3a00, 0x78, 0, 0},
++};
++
++
++static struct reg_value ov5642_setting_30fps_XGA_1024_768[] = {
++ {0x3103, 0x93, 0, 0}, {0x3008, 0x82, 0, 0}, {0x3017, 0x7f, 0, 0},
++ {0x3018, 0xfc, 0, 0}, {0x3615, 0xf0, 0, 0}, {0x3000, 0x00, 0, 0},
++ {0x3001, 0x00, 0, 0}, {0x3002, 0x5c, 0, 0}, {0x3003, 0x00, 0, 0},
++ {0x3004, 0xff, 0, 0}, {0x3005, 0xff, 0, 0}, {0x3006, 0x43, 0, 0},
++ {0x3007, 0x37, 0, 0}, {0x3011, 0x09, 0, 0}, {0x3012, 0x02, 0, 0},
++ {0x3010, 0x00, 0, 0}, {0x460c, 0x20, 0, 0}, {0x3815, 0x04, 0, 0},
++ {0x370c, 0xa0, 0, 0}, {0x3602, 0xfc, 0, 0}, {0x3612, 0xff, 0, 0},
++ {0x3634, 0xc0, 0, 0}, {0x3613, 0x00, 0, 0}, {0x3605, 0x7c, 0, 0},
++ {0x3621, 0x09, 0, 0}, {0x3622, 0x60, 0, 0}, {0x3604, 0x40, 0, 0},
++ {0x3603, 0xa7, 0, 0}, {0x3603, 0x27, 0, 0}, {0x4000, 0x21, 0, 0},
++ {0x401d, 0x22, 0, 0}, {0x3600, 0x54, 0, 0}, {0x3605, 0x04, 0, 0},
++ {0x3606, 0x3f, 0, 0}, {0x3c01, 0x80, 0, 0}, {0x5000, 0x4f, 0, 0},
++ {0x5020, 0x04, 0, 0}, {0x5181, 0x79, 0, 0}, {0x5182, 0x00, 0, 0},
++ {0x5185, 0x22, 0, 0}, {0x5197, 0x01, 0, 0}, {0x5001, 0xff, 0, 0},
++ {0x5500, 0x0a, 0, 0}, {0x5504, 0x00, 0, 0}, {0x5505, 0x7f, 0, 0},
++ {0x5080, 0x08, 0, 0}, {0x300e, 0x18, 0, 0}, {0x4610, 0x00, 0, 0},
++ {0x471d, 0x05, 0, 0}, {0x4708, 0x06, 0, 0}, {0x3808, 0x02, 0, 0},
++ {0x3809, 0x80, 0, 0}, {0x380a, 0x01, 0, 0}, {0x380b, 0xe0, 0, 0},
++ {0x380e, 0x07, 0, 0}, {0x380f, 0xd0, 0, 0}, {0x501f, 0x00, 0, 0},
++ {0x5000, 0x4f, 0, 0}, {0x4300, 0x30, 0, 0}, {0x3503, 0x07, 0, 0},
++ {0x3501, 0x73, 0, 0}, {0x3502, 0x80, 0, 0}, {0x350b, 0x00, 0, 0},
++ {0x3503, 0x07, 0, 0}, {0x3824, 0x11, 0, 0}, {0x3825, 0xb0, 0, 0},
++ {0x3501, 0x1e, 0, 0}, {0x3502, 0x80, 0, 0}, {0x350b, 0x7f, 0, 0},
++ {0x380c, 0x07, 0, 0}, {0x380d, 0x2a, 0, 0}, {0x380e, 0x03, 0, 0},
++ {0x380f, 0xe8, 0, 0}, {0x3a0d, 0x04, 0, 0}, {0x3a0e, 0x03, 0, 0},
++ {0x3818, 0xc1, 0, 0}, {0x3705, 0xdb, 0, 0}, {0x370a, 0x81, 0, 0},
++ {0x3801, 0x80, 0, 0}, {0x3621, 0xc7, 0, 0}, {0x3801, 0x50, 0, 0},
++ {0x3803, 0x08, 0, 0}, {0x3827, 0x08, 0, 0}, {0x3810, 0x80, 0, 0},
++ {0x3804, 0x05, 0, 0}, {0x3805, 0x00, 0, 0}, {0x5682, 0x05, 0, 0},
++ {0x5683, 0x00, 0, 0}, {0x3806, 0x03, 0, 0}, {0x3807, 0xc0, 0, 0},
++ {0x5686, 0x03, 0, 0}, {0x5687, 0xbc, 0, 0}, {0x3a00, 0x78, 0, 0},
++ {0x3a1a, 0x05, 0, 0}, {0x3a13, 0x30, 0, 0}, {0x3a18, 0x00, 0, 0},
++ {0x3a19, 0x7c, 0, 0}, {0x3a08, 0x12, 0, 0}, {0x3a09, 0xc0, 0, 0},
++ {0x3a0a, 0x0f, 0, 0}, {0x3a0b, 0xa0, 0, 0}, {0x350c, 0x07, 0, 0},
++ {0x350d, 0xd0, 0, 0}, {0x3500, 0x00, 0, 0}, {0x3501, 0x00, 0, 0},
++ {0x3502, 0x00, 0, 0}, {0x350a, 0x00, 0, 0}, {0x350b, 0x00, 0, 0},
++ {0x3503, 0x00, 0, 0}, {0x528a, 0x02, 0, 0}, {0x528b, 0x04, 0, 0},
++ {0x528c, 0x08, 0, 0}, {0x528d, 0x08, 0, 0}, {0x528e, 0x08, 0, 0},
++ {0x528f, 0x10, 0, 0}, {0x5290, 0x10, 0, 0}, {0x5292, 0x00, 0, 0},
++ {0x5293, 0x02, 0, 0}, {0x5294, 0x00, 0, 0}, {0x5295, 0x02, 0, 0},
++ {0x5296, 0x00, 0, 0}, {0x5297, 0x02, 0, 0}, {0x5298, 0x00, 0, 0},
++ {0x5299, 0x02, 0, 0}, {0x529a, 0x00, 0, 0}, {0x529b, 0x02, 0, 0},
++ {0x529c, 0x00, 0, 0}, {0x529d, 0x02, 0, 0}, {0x529e, 0x00, 0, 0},
++ {0x529f, 0x02, 0, 0}, {0x3a0f, 0x3c, 0, 0}, {0x3a10, 0x30, 0, 0},
++ {0x3a1b, 0x3c, 0, 0}, {0x3a1e, 0x30, 0, 0}, {0x3a11, 0x70, 0, 0},
++ {0x3a1f, 0x10, 0, 0}, {0x3030, 0x2b, 0, 0}, {0x3a02, 0x00, 0, 0},
++ {0x3a03, 0x7d, 0, 0}, {0x3a04, 0x00, 0, 0}, {0x3a14, 0x00, 0, 0},
++ {0x3a15, 0x7d, 0, 0}, {0x3a16, 0x00, 0, 0}, {0x3a00, 0x78, 0, 0},
++ {0x3a08, 0x12, 0, 0}, {0x3a09, 0xc0, 0, 0}, {0x3a0a, 0x0f, 0, 0},
++ {0x3a0b, 0xa0, 0, 0}, {0x3a0d, 0x04, 0, 0}, {0x3a0e, 0x03, 0, 0},
++ {0x5193, 0x70, 0, 0}, {0x589b, 0x04, 0, 0}, {0x589a, 0xc5, 0, 0},
++ {0x401e, 0x20, 0, 0}, {0x4001, 0x42, 0, 0}, {0x401c, 0x04, 0, 0},
++ {0x528a, 0x01, 0, 0}, {0x528b, 0x04, 0, 0}, {0x528c, 0x08, 0, 0},
++ {0x528d, 0x10, 0, 0}, {0x528e, 0x20, 0, 0}, {0x528f, 0x28, 0, 0},
++ {0x5290, 0x30, 0, 0}, {0x5292, 0x00, 0, 0}, {0x5293, 0x01, 0, 0},
++ {0x5294, 0x00, 0, 0}, {0x5295, 0x04, 0, 0}, {0x5296, 0x00, 0, 0},
++ {0x5297, 0x08, 0, 0}, {0x5298, 0x00, 0, 0}, {0x5299, 0x10, 0, 0},
++ {0x529a, 0x00, 0, 0}, {0x529b, 0x20, 0, 0}, {0x529c, 0x00, 0, 0},
++ {0x529d, 0x28, 0, 0}, {0x529e, 0x00, 0, 0}, {0x529f, 0x30, 0, 0},
++ {0x5282, 0x00, 0, 0}, {0x5300, 0x00, 0, 0}, {0x5301, 0x20, 0, 0},
++ {0x5302, 0x00, 0, 0}, {0x5303, 0x7c, 0, 0}, {0x530c, 0x00, 0, 0},
++ {0x530d, 0x0c, 0, 0}, {0x530e, 0x20, 0, 0}, {0x530f, 0x80, 0, 0},
++ {0x5310, 0x20, 0, 0}, {0x5311, 0x80, 0, 0}, {0x5308, 0x20, 0, 0},
++ {0x5309, 0x40, 0, 0}, {0x5304, 0x00, 0, 0}, {0x5305, 0x30, 0, 0},
++ {0x5306, 0x00, 0, 0}, {0x5307, 0x80, 0, 0}, {0x5314, 0x08, 0, 0},
++ {0x5315, 0x20, 0, 0}, {0x5319, 0x30, 0, 0}, {0x5316, 0x10, 0, 0},
++ {0x5317, 0x00, 0, 0}, {0x5318, 0x02, 0, 0}, {0x5380, 0x01, 0, 0},
++ {0x5381, 0x00, 0, 0}, {0x5382, 0x00, 0, 0}, {0x5383, 0x4e, 0, 0},
++ {0x5384, 0x00, 0, 0}, {0x5385, 0x0f, 0, 0}, {0x5386, 0x00, 0, 0},
++ {0x5387, 0x00, 0, 0}, {0x5388, 0x01, 0, 0}, {0x5389, 0x15, 0, 0},
++ {0x538a, 0x00, 0, 0}, {0x538b, 0x31, 0, 0}, {0x538c, 0x00, 0, 0},
++ {0x538d, 0x00, 0, 0}, {0x538e, 0x00, 0, 0}, {0x538f, 0x0f, 0, 0},
++ {0x5390, 0x00, 0, 0}, {0x5391, 0xab, 0, 0}, {0x5392, 0x00, 0, 0},
++ {0x5393, 0xa2, 0, 0}, {0x5394, 0x08, 0, 0}, {0x5480, 0x14, 0, 0},
++ {0x5481, 0x21, 0, 0}, {0x5482, 0x36, 0, 0}, {0x5483, 0x57, 0, 0},
++ {0x5484, 0x65, 0, 0}, {0x5485, 0x71, 0, 0}, {0x5486, 0x7d, 0, 0},
++ {0x5487, 0x87, 0, 0}, {0x5488, 0x91, 0, 0}, {0x5489, 0x9a, 0, 0},
++ {0x548a, 0xaa, 0, 0}, {0x548b, 0xb8, 0, 0}, {0x548c, 0xcd, 0, 0},
++ {0x548d, 0xdd, 0, 0}, {0x548e, 0xea, 0, 0}, {0x548f, 0x1d, 0, 0},
++ {0x5490, 0x05, 0, 0}, {0x5491, 0x00, 0, 0}, {0x5492, 0x04, 0, 0},
++ {0x5493, 0x20, 0, 0}, {0x5494, 0x03, 0, 0}, {0x5495, 0x60, 0, 0},
++ {0x5496, 0x02, 0, 0}, {0x5497, 0xb8, 0, 0}, {0x5498, 0x02, 0, 0},
++ {0x5499, 0x86, 0, 0}, {0x549a, 0x02, 0, 0}, {0x549b, 0x5b, 0, 0},
++ {0x549c, 0x02, 0, 0}, {0x549d, 0x3b, 0, 0}, {0x549e, 0x02, 0, 0},
++ {0x549f, 0x1c, 0, 0}, {0x54a0, 0x02, 0, 0}, {0x54a1, 0x04, 0, 0},
++ {0x54a2, 0x01, 0, 0}, {0x54a3, 0xed, 0, 0}, {0x54a4, 0x01, 0, 0},
++ {0x54a5, 0xc5, 0, 0}, {0x54a6, 0x01, 0, 0}, {0x54a7, 0xa5, 0, 0},
++ {0x54a8, 0x01, 0, 0}, {0x54a9, 0x6c, 0, 0}, {0x54aa, 0x01, 0, 0},
++ {0x54ab, 0x41, 0, 0}, {0x54ac, 0x01, 0, 0}, {0x54ad, 0x20, 0, 0},
++ {0x54ae, 0x00, 0, 0}, {0x54af, 0x16, 0, 0}, {0x54b0, 0x01, 0, 0},
++ {0x54b1, 0x20, 0, 0}, {0x54b2, 0x00, 0, 0}, {0x54b3, 0x10, 0, 0},
++ {0x54b4, 0x00, 0, 0}, {0x54b5, 0xf0, 0, 0}, {0x54b6, 0x00, 0, 0},
++ {0x54b7, 0xdf, 0, 0}, {0x5402, 0x3f, 0, 0}, {0x5403, 0x00, 0, 0},
++ {0x3406, 0x00, 0, 0}, {0x5180, 0xff, 0, 0}, {0x5181, 0x52, 0, 0},
++ {0x5182, 0x11, 0, 0}, {0x5183, 0x14, 0, 0}, {0x5184, 0x25, 0, 0},
++ {0x5185, 0x24, 0, 0}, {0x5186, 0x06, 0, 0}, {0x5187, 0x08, 0, 0},
++ {0x5188, 0x08, 0, 0}, {0x5189, 0x7c, 0, 0}, {0x518a, 0x60, 0, 0},
++ {0x518b, 0xb2, 0, 0}, {0x518c, 0xb2, 0, 0}, {0x518d, 0x44, 0, 0},
++ {0x518e, 0x3d, 0, 0}, {0x518f, 0x58, 0, 0}, {0x5190, 0x46, 0, 0},
++ {0x5191, 0xf8, 0, 0}, {0x5192, 0x04, 0, 0}, {0x5193, 0x70, 0, 0},
++ {0x5194, 0xf0, 0, 0}, {0x5195, 0xf0, 0, 0}, {0x5196, 0x03, 0, 0},
++ {0x5197, 0x01, 0, 0}, {0x5198, 0x04, 0, 0}, {0x5199, 0x12, 0, 0},
++ {0x519a, 0x04, 0, 0}, {0x519b, 0x00, 0, 0}, {0x519c, 0x06, 0, 0},
++ {0x519d, 0x82, 0, 0}, {0x519e, 0x00, 0, 0}, {0x5025, 0x80, 0, 0},
++ {0x3a0f, 0x38, 0, 0}, {0x3a10, 0x30, 0, 0}, {0x3a1b, 0x3a, 0, 0},
++ {0x3a1e, 0x2e, 0, 0}, {0x3a11, 0x60, 0, 0}, {0x3a1f, 0x10, 0, 0},
++ {0x5688, 0xa6, 0, 0}, {0x5689, 0x6a, 0, 0}, {0x568a, 0xea, 0, 0},
++ {0x568b, 0xae, 0, 0}, {0x568c, 0xa6, 0, 0}, {0x568d, 0x6a, 0, 0},
++ {0x568e, 0x62, 0, 0}, {0x568f, 0x26, 0, 0}, {0x5583, 0x40, 0, 0},
++ {0x5584, 0x40, 0, 0}, {0x5580, 0x02, 0, 0}, {0x5000, 0xcf, 0, 0},
++ {0x5800, 0x27, 0, 0}, {0x5801, 0x19, 0, 0}, {0x5802, 0x12, 0, 0},
++ {0x5803, 0x0f, 0, 0}, {0x5804, 0x10, 0, 0}, {0x5805, 0x15, 0, 0},
++ {0x5806, 0x1e, 0, 0}, {0x5807, 0x2f, 0, 0}, {0x5808, 0x15, 0, 0},
++ {0x5809, 0x0d, 0, 0}, {0x580a, 0x0a, 0, 0}, {0x580b, 0x09, 0, 0},
++ {0x580c, 0x0a, 0, 0}, {0x580d, 0x0c, 0, 0}, {0x580e, 0x12, 0, 0},
++ {0x580f, 0x19, 0, 0}, {0x5810, 0x0b, 0, 0}, {0x5811, 0x07, 0, 0},
++ {0x5812, 0x04, 0, 0}, {0x5813, 0x03, 0, 0}, {0x5814, 0x03, 0, 0},
++ {0x5815, 0x06, 0, 0}, {0x5816, 0x0a, 0, 0}, {0x5817, 0x0f, 0, 0},
++ {0x5818, 0x0a, 0, 0}, {0x5819, 0x05, 0, 0}, {0x581a, 0x01, 0, 0},
++ {0x581b, 0x00, 0, 0}, {0x581c, 0x00, 0, 0}, {0x581d, 0x03, 0, 0},
++ {0x581e, 0x08, 0, 0}, {0x581f, 0x0c, 0, 0}, {0x5820, 0x0a, 0, 0},
++ {0x5821, 0x05, 0, 0}, {0x5822, 0x01, 0, 0}, {0x5823, 0x00, 0, 0},
++ {0x5824, 0x00, 0, 0}, {0x5825, 0x03, 0, 0}, {0x5826, 0x08, 0, 0},
++ {0x5827, 0x0c, 0, 0}, {0x5828, 0x0e, 0, 0}, {0x5829, 0x08, 0, 0},
++ {0x582a, 0x06, 0, 0}, {0x582b, 0x04, 0, 0}, {0x582c, 0x05, 0, 0},
++ {0x582d, 0x07, 0, 0}, {0x582e, 0x0b, 0, 0}, {0x582f, 0x12, 0, 0},
++ {0x5830, 0x18, 0, 0}, {0x5831, 0x10, 0, 0}, {0x5832, 0x0c, 0, 0},
++ {0x5833, 0x0a, 0, 0}, {0x5834, 0x0b, 0, 0}, {0x5835, 0x0e, 0, 0},
++ {0x5836, 0x15, 0, 0}, {0x5837, 0x19, 0, 0}, {0x5838, 0x32, 0, 0},
++ {0x5839, 0x1f, 0, 0}, {0x583a, 0x18, 0, 0}, {0x583b, 0x16, 0, 0},
++ {0x583c, 0x17, 0, 0}, {0x583d, 0x1e, 0, 0}, {0x583e, 0x26, 0, 0},
++ {0x583f, 0x53, 0, 0}, {0x5840, 0x10, 0, 0}, {0x5841, 0x0f, 0, 0},
++ {0x5842, 0x0d, 0, 0}, {0x5843, 0x0c, 0, 0}, {0x5844, 0x0e, 0, 0},
++ {0x5845, 0x09, 0, 0}, {0x5846, 0x11, 0, 0}, {0x5847, 0x10, 0, 0},
++ {0x5848, 0x10, 0, 0}, {0x5849, 0x10, 0, 0}, {0x584a, 0x10, 0, 0},
++ {0x584b, 0x0e, 0, 0}, {0x584c, 0x10, 0, 0}, {0x584d, 0x10, 0, 0},
++ {0x584e, 0x11, 0, 0}, {0x584f, 0x10, 0, 0}, {0x5850, 0x0f, 0, 0},
++ {0x5851, 0x0c, 0, 0}, {0x5852, 0x0f, 0, 0}, {0x5853, 0x10, 0, 0},
++ {0x5854, 0x10, 0, 0}, {0x5855, 0x0f, 0, 0}, {0x5856, 0x0e, 0, 0},
++ {0x5857, 0x0b, 0, 0}, {0x5858, 0x10, 0, 0}, {0x5859, 0x0d, 0, 0},
++ {0x585a, 0x0d, 0, 0}, {0x585b, 0x0c, 0, 0}, {0x585c, 0x0c, 0, 0},
++ {0x585d, 0x0c, 0, 0}, {0x585e, 0x0b, 0, 0}, {0x585f, 0x0c, 0, 0},
++ {0x5860, 0x0c, 0, 0}, {0x5861, 0x0c, 0, 0}, {0x5862, 0x0d, 0, 0},
++ {0x5863, 0x08, 0, 0}, {0x5864, 0x11, 0, 0}, {0x5865, 0x18, 0, 0},
++ {0x5866, 0x18, 0, 0}, {0x5867, 0x19, 0, 0}, {0x5868, 0x17, 0, 0},
++ {0x5869, 0x19, 0, 0}, {0x586a, 0x16, 0, 0}, {0x586b, 0x13, 0, 0},
++ {0x586c, 0x13, 0, 0}, {0x586d, 0x12, 0, 0}, {0x586e, 0x13, 0, 0},
++ {0x586f, 0x16, 0, 0}, {0x5870, 0x14, 0, 0}, {0x5871, 0x12, 0, 0},
++ {0x5872, 0x10, 0, 0}, {0x5873, 0x11, 0, 0}, {0x5874, 0x11, 0, 0},
++ {0x5875, 0x16, 0, 0}, {0x5876, 0x14, 0, 0}, {0x5877, 0x11, 0, 0},
++ {0x5878, 0x10, 0, 0}, {0x5879, 0x0f, 0, 0}, {0x587a, 0x10, 0, 0},
++ {0x587b, 0x14, 0, 0}, {0x587c, 0x13, 0, 0}, {0x587d, 0x12, 0, 0},
++ {0x587e, 0x11, 0, 0}, {0x587f, 0x11, 0, 0}, {0x5880, 0x12, 0, 0},
++ {0x5881, 0x15, 0, 0}, {0x5882, 0x14, 0, 0}, {0x5883, 0x15, 0, 0},
++ {0x5884, 0x15, 0, 0}, {0x5885, 0x15, 0, 0}, {0x5886, 0x13, 0, 0},
++ {0x5887, 0x17, 0, 0}, {0x3710, 0x10, 0, 0}, {0x3632, 0x51, 0, 0},
++ {0x3702, 0x10, 0, 0}, {0x3703, 0xb2, 0, 0}, {0x3704, 0x18, 0, 0},
++ {0x370b, 0x40, 0, 0}, {0x370d, 0x03, 0, 0}, {0x3631, 0x01, 0, 0},
++ {0x3632, 0x52, 0, 0}, {0x3606, 0x24, 0, 0}, {0x3620, 0x96, 0, 0},
++ {0x5785, 0x07, 0, 0}, {0x3a13, 0x30, 0, 0}, {0x3600, 0x52, 0, 0},
++ {0x3604, 0x48, 0, 0}, {0x3606, 0x1b, 0, 0}, {0x370d, 0x0b, 0, 0},
++ {0x370f, 0xc0, 0, 0}, {0x3709, 0x01, 0, 0}, {0x3823, 0x00, 0, 0},
++ {0x5007, 0x00, 0, 0}, {0x5009, 0x00, 0, 0}, {0x5011, 0x00, 0, 0},
++ {0x5013, 0x00, 0, 0}, {0x519e, 0x00, 0, 0}, {0x5086, 0x00, 0, 0},
++ {0x5087, 0x00, 0, 0}, {0x5088, 0x00, 0, 0}, {0x5089, 0x00, 0, 0},
++ {0x302b, 0x00, 0, 0}, {0x3621, 0x87, 0, 0}, {0x3a00, 0x78, 0, 0},
++ {0x3808, 0x04, 0, 0}, {0x3809, 0x00, 0, 0}, {0x380a, 0x03, 0, 0},
++ {0x380b, 0x00, 0, 0}, {0x3815, 0x02, 0, 0}, {0x302c, 0x60, 0x60, 0},
++};
++
++static struct reg_value ov5642_setting_15fps_XGA_1024_768[] = {
++ {0x3103, 0x93, 0, 0}, {0x3008, 0x82, 0, 0}, {0x3017, 0x7f, 0, 0},
++ {0x3018, 0xfc, 0, 0}, {0x3615, 0xf0, 0, 0}, {0x3000, 0x00, 0, 0},
++ {0x3001, 0x00, 0, 0}, {0x3002, 0x5c, 0, 0}, {0x3003, 0x00, 0, 0},
++ {0x3004, 0xff, 0, 0}, {0x3005, 0xff, 0, 0}, {0x3006, 0x43, 0, 0},
++ {0x3007, 0x37, 0, 0}, {0x3011, 0x09, 0, 0}, {0x3012, 0x02, 0, 0},
++ {0x3010, 0x00, 0, 0}, {0x460c, 0x20, 0, 0}, {0x3815, 0x04, 0, 0},
++ {0x370c, 0xa0, 0, 0}, {0x3602, 0xfc, 0, 0}, {0x3612, 0xff, 0, 0},
++ {0x3634, 0xc0, 0, 0}, {0x3613, 0x00, 0, 0}, {0x3605, 0x7c, 0, 0},
++ {0x3621, 0x09, 0, 0}, {0x3622, 0x60, 0, 0}, {0x3604, 0x40, 0, 0},
++ {0x3603, 0xa7, 0, 0}, {0x3603, 0x27, 0, 0}, {0x4000, 0x21, 0, 0},
++ {0x401d, 0x22, 0, 0}, {0x3600, 0x54, 0, 0}, {0x3605, 0x04, 0, 0},
++ {0x3606, 0x3f, 0, 0}, {0x3c01, 0x80, 0, 0}, {0x5000, 0x4f, 0, 0},
++ {0x5020, 0x04, 0, 0}, {0x5181, 0x79, 0, 0}, {0x5182, 0x00, 0, 0},
++ {0x5185, 0x22, 0, 0}, {0x5197, 0x01, 0, 0}, {0x5001, 0xff, 0, 0},
++ {0x5500, 0x0a, 0, 0}, {0x5504, 0x00, 0, 0}, {0x5505, 0x7f, 0, 0},
++ {0x5080, 0x08, 0, 0}, {0x300e, 0x18, 0, 0}, {0x4610, 0x00, 0, 0},
++ {0x471d, 0x05, 0, 0}, {0x4708, 0x06, 0, 0}, {0x3808, 0x02, 0, 0},
++ {0x3809, 0x80, 0, 0}, {0x380a, 0x01, 0, 0}, {0x380b, 0xe0, 0, 0},
++ {0x380e, 0x07, 0, 0}, {0x380f, 0xd0, 0, 0}, {0x501f, 0x00, 0, 0},
++ {0x5000, 0x4f, 0, 0}, {0x4300, 0x30, 0, 0}, {0x3503, 0x07, 0, 0},
++ {0x3501, 0x73, 0, 0}, {0x3502, 0x80, 0, 0}, {0x350b, 0x00, 0, 0},
++ {0x3503, 0x07, 0, 0}, {0x3824, 0x11, 0, 0}, {0x3825, 0xb0, 0, 0},
++ {0x3501, 0x1e, 0, 0}, {0x3502, 0x80, 0, 0}, {0x350b, 0x7f, 0, 0},
++ {0x380c, 0x07, 0, 0}, {0x380d, 0x2a, 0, 0}, {0x380e, 0x07, 0, 0},
++ {0x380f, 0xd0, 0, 0}, {0x3a0d, 0x04, 0, 0}, {0x3a0e, 0x03, 0, 0},
++ {0x3818, 0xc1, 0, 0}, {0x3705, 0xdb, 0, 0}, {0x370a, 0x81, 0, 0},
++ {0x3801, 0x80, 0, 0}, {0x3621, 0xc7, 0, 0}, {0x3801, 0x50, 0, 0},
++ {0x3803, 0x08, 0, 0}, {0x3827, 0x08, 0, 0}, {0x3810, 0x80, 0, 0},
++ {0x3804, 0x05, 0, 0}, {0x3805, 0x00, 0, 0}, {0x5682, 0x05, 0, 0},
++ {0x5683, 0x00, 0, 0}, {0x3806, 0x03, 0, 0}, {0x3807, 0xc0, 0, 0},
++ {0x5686, 0x03, 0, 0}, {0x5687, 0xbc, 0, 0}, {0x3a00, 0x78, 0, 0},
++ {0x3a1a, 0x05, 0, 0}, {0x3a13, 0x30, 0, 0}, {0x3a18, 0x00, 0, 0},
++ {0x3a19, 0x7c, 0, 0}, {0x3a08, 0x12, 0, 0}, {0x3a09, 0xc0, 0, 0},
++ {0x3a0a, 0x0f, 0, 0}, {0x3a0b, 0xa0, 0, 0}, {0x350c, 0x07, 0, 0},
++ {0x350d, 0xd0, 0, 0}, {0x3500, 0x00, 0, 0}, {0x3501, 0x00, 0, 0},
++ {0x3502, 0x00, 0, 0}, {0x350a, 0x00, 0, 0}, {0x350b, 0x00, 0, 0},
++ {0x3503, 0x00, 0, 0}, {0x528a, 0x02, 0, 0}, {0x528b, 0x04, 0, 0},
++ {0x528c, 0x08, 0, 0}, {0x528d, 0x08, 0, 0}, {0x528e, 0x08, 0, 0},
++ {0x528f, 0x10, 0, 0}, {0x5290, 0x10, 0, 0}, {0x5292, 0x00, 0, 0},
++ {0x5293, 0x02, 0, 0}, {0x5294, 0x00, 0, 0}, {0x5295, 0x02, 0, 0},
++ {0x5296, 0x00, 0, 0}, {0x5297, 0x02, 0, 0}, {0x5298, 0x00, 0, 0},
++ {0x5299, 0x02, 0, 0}, {0x529a, 0x00, 0, 0}, {0x529b, 0x02, 0, 0},
++ {0x529c, 0x00, 0, 0}, {0x529d, 0x02, 0, 0}, {0x529e, 0x00, 0, 0},
++ {0x529f, 0x02, 0, 0}, {0x3a0f, 0x3c, 0, 0}, {0x3a10, 0x30, 0, 0},
++ {0x3a1b, 0x3c, 0, 0}, {0x3a1e, 0x30, 0, 0}, {0x3a11, 0x70, 0, 0},
++ {0x3a1f, 0x10, 0, 0}, {0x3030, 0x2b, 0, 0}, {0x3a02, 0x00, 0, 0},
++ {0x3a03, 0x7d, 0, 0}, {0x3a04, 0x00, 0, 0}, {0x3a14, 0x00, 0, 0},
++ {0x3a15, 0x7d, 0, 0}, {0x3a16, 0x00, 0, 0}, {0x3a00, 0x78, 0, 0},
++ {0x3a08, 0x12, 0, 0}, {0x3a09, 0xc0, 0, 0}, {0x3a0a, 0x0f, 0, 0},
++ {0x3a0b, 0xa0, 0, 0}, {0x3a0d, 0x04, 0, 0}, {0x3a0e, 0x03, 0, 0},
++ {0x5193, 0x70, 0, 0}, {0x589b, 0x04, 0, 0}, {0x589a, 0xc5, 0, 0},
++ {0x401e, 0x20, 0, 0}, {0x4001, 0x42, 0, 0}, {0x401c, 0x04, 0, 0},
++ {0x528a, 0x01, 0, 0}, {0x528b, 0x04, 0, 0}, {0x528c, 0x08, 0, 0},
++ {0x528d, 0x10, 0, 0}, {0x528e, 0x20, 0, 0}, {0x528f, 0x28, 0, 0},
++ {0x5290, 0x30, 0, 0}, {0x5292, 0x00, 0, 0}, {0x5293, 0x01, 0, 0},
++ {0x5294, 0x00, 0, 0}, {0x5295, 0x04, 0, 0}, {0x5296, 0x00, 0, 0},
++ {0x5297, 0x08, 0, 0}, {0x5298, 0x00, 0, 0}, {0x5299, 0x10, 0, 0},
++ {0x529a, 0x00, 0, 0}, {0x529b, 0x20, 0, 0}, {0x529c, 0x00, 0, 0},
++ {0x529d, 0x28, 0, 0}, {0x529e, 0x00, 0, 0}, {0x529f, 0x30, 0, 0},
++ {0x5282, 0x00, 0, 0}, {0x5300, 0x00, 0, 0}, {0x5301, 0x20, 0, 0},
++ {0x5302, 0x00, 0, 0}, {0x5303, 0x7c, 0, 0}, {0x530c, 0x00, 0, 0},
++ {0x530d, 0x0c, 0, 0}, {0x530e, 0x20, 0, 0}, {0x530f, 0x80, 0, 0},
++ {0x5310, 0x20, 0, 0}, {0x5311, 0x80, 0, 0}, {0x5308, 0x20, 0, 0},
++ {0x5309, 0x40, 0, 0}, {0x5304, 0x00, 0, 0}, {0x5305, 0x30, 0, 0},
++ {0x5306, 0x00, 0, 0}, {0x5307, 0x80, 0, 0}, {0x5314, 0x08, 0, 0},
++ {0x5315, 0x20, 0, 0}, {0x5319, 0x30, 0, 0}, {0x5316, 0x10, 0, 0},
++ {0x5317, 0x00, 0, 0}, {0x5318, 0x02, 0, 0}, {0x5380, 0x01, 0, 0},
++ {0x5381, 0x00, 0, 0}, {0x5382, 0x00, 0, 0}, {0x5383, 0x4e, 0, 0},
++ {0x5384, 0x00, 0, 0}, {0x5385, 0x0f, 0, 0}, {0x5386, 0x00, 0, 0},
++ {0x5387, 0x00, 0, 0}, {0x5388, 0x01, 0, 0}, {0x5389, 0x15, 0, 0},
++ {0x538a, 0x00, 0, 0}, {0x538b, 0x31, 0, 0}, {0x538c, 0x00, 0, 0},
++ {0x538d, 0x00, 0, 0}, {0x538e, 0x00, 0, 0}, {0x538f, 0x0f, 0, 0},
++ {0x5390, 0x00, 0, 0}, {0x5391, 0xab, 0, 0}, {0x5392, 0x00, 0, 0},
++ {0x5393, 0xa2, 0, 0}, {0x5394, 0x08, 0, 0}, {0x5480, 0x14, 0, 0},
++ {0x5481, 0x21, 0, 0}, {0x5482, 0x36, 0, 0}, {0x5483, 0x57, 0, 0},
++ {0x5484, 0x65, 0, 0}, {0x5485, 0x71, 0, 0}, {0x5486, 0x7d, 0, 0},
++ {0x5487, 0x87, 0, 0}, {0x5488, 0x91, 0, 0}, {0x5489, 0x9a, 0, 0},
++ {0x548a, 0xaa, 0, 0}, {0x548b, 0xb8, 0, 0}, {0x548c, 0xcd, 0, 0},
++ {0x548d, 0xdd, 0, 0}, {0x548e, 0xea, 0, 0}, {0x548f, 0x1d, 0, 0},
++ {0x5490, 0x05, 0, 0}, {0x5491, 0x00, 0, 0}, {0x5492, 0x04, 0, 0},
++ {0x5493, 0x20, 0, 0}, {0x5494, 0x03, 0, 0}, {0x5495, 0x60, 0, 0},
++ {0x5496, 0x02, 0, 0}, {0x5497, 0xb8, 0, 0}, {0x5498, 0x02, 0, 0},
++ {0x5499, 0x86, 0, 0}, {0x549a, 0x02, 0, 0}, {0x549b, 0x5b, 0, 0},
++ {0x549c, 0x02, 0, 0}, {0x549d, 0x3b, 0, 0}, {0x549e, 0x02, 0, 0},
++ {0x549f, 0x1c, 0, 0}, {0x54a0, 0x02, 0, 0}, {0x54a1, 0x04, 0, 0},
++ {0x54a2, 0x01, 0, 0}, {0x54a3, 0xed, 0, 0}, {0x54a4, 0x01, 0, 0},
++ {0x54a5, 0xc5, 0, 0}, {0x54a6, 0x01, 0, 0}, {0x54a7, 0xa5, 0, 0},
++ {0x54a8, 0x01, 0, 0}, {0x54a9, 0x6c, 0, 0}, {0x54aa, 0x01, 0, 0},
++ {0x54ab, 0x41, 0, 0}, {0x54ac, 0x01, 0, 0}, {0x54ad, 0x20, 0, 0},
++ {0x54ae, 0x00, 0, 0}, {0x54af, 0x16, 0, 0}, {0x54b0, 0x01, 0, 0},
++ {0x54b1, 0x20, 0, 0}, {0x54b2, 0x00, 0, 0}, {0x54b3, 0x10, 0, 0},
++ {0x54b4, 0x00, 0, 0}, {0x54b5, 0xf0, 0, 0}, {0x54b6, 0x00, 0, 0},
++ {0x54b7, 0xdf, 0, 0}, {0x5402, 0x3f, 0, 0}, {0x5403, 0x00, 0, 0},
++ {0x3406, 0x00, 0, 0}, {0x5180, 0xff, 0, 0}, {0x5181, 0x52, 0, 0},
++ {0x5182, 0x11, 0, 0}, {0x5183, 0x14, 0, 0}, {0x5184, 0x25, 0, 0},
++ {0x5185, 0x24, 0, 0}, {0x5186, 0x06, 0, 0}, {0x5187, 0x08, 0, 0},
++ {0x5188, 0x08, 0, 0}, {0x5189, 0x7c, 0, 0}, {0x518a, 0x60, 0, 0},
++ {0x518b, 0xb2, 0, 0}, {0x518c, 0xb2, 0, 0}, {0x518d, 0x44, 0, 0},
++ {0x518e, 0x3d, 0, 0}, {0x518f, 0x58, 0, 0}, {0x5190, 0x46, 0, 0},
++ {0x5191, 0xf8, 0, 0}, {0x5192, 0x04, 0, 0}, {0x5193, 0x70, 0, 0},
++ {0x5194, 0xf0, 0, 0}, {0x5195, 0xf0, 0, 0}, {0x5196, 0x03, 0, 0},
++ {0x5197, 0x01, 0, 0}, {0x5198, 0x04, 0, 0}, {0x5199, 0x12, 0, 0},
++ {0x519a, 0x04, 0, 0}, {0x519b, 0x00, 0, 0}, {0x519c, 0x06, 0, 0},
++ {0x519d, 0x82, 0, 0}, {0x519e, 0x00, 0, 0}, {0x5025, 0x80, 0, 0},
++ {0x3a0f, 0x38, 0, 0}, {0x3a10, 0x30, 0, 0}, {0x3a1b, 0x3a, 0, 0},
++ {0x3a1e, 0x2e, 0, 0}, {0x3a11, 0x60, 0, 0}, {0x3a1f, 0x10, 0, 0},
++ {0x5688, 0xa6, 0, 0}, {0x5689, 0x6a, 0, 0}, {0x568a, 0xea, 0, 0},
++ {0x568b, 0xae, 0, 0}, {0x568c, 0xa6, 0, 0}, {0x568d, 0x6a, 0, 0},
++ {0x568e, 0x62, 0, 0}, {0x568f, 0x26, 0, 0}, {0x5583, 0x40, 0, 0},
++ {0x5584, 0x40, 0, 0}, {0x5580, 0x02, 0, 0}, {0x5000, 0xcf, 0, 0},
++ {0x5800, 0x27, 0, 0}, {0x5801, 0x19, 0, 0}, {0x5802, 0x12, 0, 0},
++ {0x5803, 0x0f, 0, 0}, {0x5804, 0x10, 0, 0}, {0x5805, 0x15, 0, 0},
++ {0x5806, 0x1e, 0, 0}, {0x5807, 0x2f, 0, 0}, {0x5808, 0x15, 0, 0},
++ {0x5809, 0x0d, 0, 0}, {0x580a, 0x0a, 0, 0}, {0x580b, 0x09, 0, 0},
++ {0x580c, 0x0a, 0, 0}, {0x580d, 0x0c, 0, 0}, {0x580e, 0x12, 0, 0},
++ {0x580f, 0x19, 0, 0}, {0x5810, 0x0b, 0, 0}, {0x5811, 0x07, 0, 0},
++ {0x5812, 0x04, 0, 0}, {0x5813, 0x03, 0, 0}, {0x5814, 0x03, 0, 0},
++ {0x5815, 0x06, 0, 0}, {0x5816, 0x0a, 0, 0}, {0x5817, 0x0f, 0, 0},
++ {0x5818, 0x0a, 0, 0}, {0x5819, 0x05, 0, 0}, {0x581a, 0x01, 0, 0},
++ {0x581b, 0x00, 0, 0}, {0x581c, 0x00, 0, 0}, {0x581d, 0x03, 0, 0},
++ {0x581e, 0x08, 0, 0}, {0x581f, 0x0c, 0, 0}, {0x5820, 0x0a, 0, 0},
++ {0x5821, 0x05, 0, 0}, {0x5822, 0x01, 0, 0}, {0x5823, 0x00, 0, 0},
++ {0x5824, 0x00, 0, 0}, {0x5825, 0x03, 0, 0}, {0x5826, 0x08, 0, 0},
++ {0x5827, 0x0c, 0, 0}, {0x5828, 0x0e, 0, 0}, {0x5829, 0x08, 0, 0},
++ {0x582a, 0x06, 0, 0}, {0x582b, 0x04, 0, 0}, {0x582c, 0x05, 0, 0},
++ {0x582d, 0x07, 0, 0}, {0x582e, 0x0b, 0, 0}, {0x582f, 0x12, 0, 0},
++ {0x5830, 0x18, 0, 0}, {0x5831, 0x10, 0, 0}, {0x5832, 0x0c, 0, 0},
++ {0x5833, 0x0a, 0, 0}, {0x5834, 0x0b, 0, 0}, {0x5835, 0x0e, 0, 0},
++ {0x5836, 0x15, 0, 0}, {0x5837, 0x19, 0, 0}, {0x5838, 0x32, 0, 0},
++ {0x5839, 0x1f, 0, 0}, {0x583a, 0x18, 0, 0}, {0x583b, 0x16, 0, 0},
++ {0x583c, 0x17, 0, 0}, {0x583d, 0x1e, 0, 0}, {0x583e, 0x26, 0, 0},
++ {0x583f, 0x53, 0, 0}, {0x5840, 0x10, 0, 0}, {0x5841, 0x0f, 0, 0},
++ {0x5842, 0x0d, 0, 0}, {0x5843, 0x0c, 0, 0}, {0x5844, 0x0e, 0, 0},
++ {0x5845, 0x09, 0, 0}, {0x5846, 0x11, 0, 0}, {0x5847, 0x10, 0, 0},
++ {0x5848, 0x10, 0, 0}, {0x5849, 0x10, 0, 0}, {0x584a, 0x10, 0, 0},
++ {0x584b, 0x0e, 0, 0}, {0x584c, 0x10, 0, 0}, {0x584d, 0x10, 0, 0},
++ {0x584e, 0x11, 0, 0}, {0x584f, 0x10, 0, 0}, {0x5850, 0x0f, 0, 0},
++ {0x5851, 0x0c, 0, 0}, {0x5852, 0x0f, 0, 0}, {0x5853, 0x10, 0, 0},
++ {0x5854, 0x10, 0, 0}, {0x5855, 0x0f, 0, 0}, {0x5856, 0x0e, 0, 0},
++ {0x5857, 0x0b, 0, 0}, {0x5858, 0x10, 0, 0}, {0x5859, 0x0d, 0, 0},
++ {0x585a, 0x0d, 0, 0}, {0x585b, 0x0c, 0, 0}, {0x585c, 0x0c, 0, 0},
++ {0x585d, 0x0c, 0, 0}, {0x585e, 0x0b, 0, 0}, {0x585f, 0x0c, 0, 0},
++ {0x5860, 0x0c, 0, 0}, {0x5861, 0x0c, 0, 0}, {0x5862, 0x0d, 0, 0},
++ {0x5863, 0x08, 0, 0}, {0x5864, 0x11, 0, 0}, {0x5865, 0x18, 0, 0},
++ {0x5866, 0x18, 0, 0}, {0x5867, 0x19, 0, 0}, {0x5868, 0x17, 0, 0},
++ {0x5869, 0x19, 0, 0}, {0x586a, 0x16, 0, 0}, {0x586b, 0x13, 0, 0},
++ {0x586c, 0x13, 0, 0}, {0x586d, 0x12, 0, 0}, {0x586e, 0x13, 0, 0},
++ {0x586f, 0x16, 0, 0}, {0x5870, 0x14, 0, 0}, {0x5871, 0x12, 0, 0},
++ {0x5872, 0x10, 0, 0}, {0x5873, 0x11, 0, 0}, {0x5874, 0x11, 0, 0},
++ {0x5875, 0x16, 0, 0}, {0x5876, 0x14, 0, 0}, {0x5877, 0x11, 0, 0},
++ {0x5878, 0x10, 0, 0}, {0x5879, 0x0f, 0, 0}, {0x587a, 0x10, 0, 0},
++ {0x587b, 0x14, 0, 0}, {0x587c, 0x13, 0, 0}, {0x587d, 0x12, 0, 0},
++ {0x587e, 0x11, 0, 0}, {0x587f, 0x11, 0, 0}, {0x5880, 0x12, 0, 0},
++ {0x5881, 0x15, 0, 0}, {0x5882, 0x14, 0, 0}, {0x5883, 0x15, 0, 0},
++ {0x5884, 0x15, 0, 0}, {0x5885, 0x15, 0, 0}, {0x5886, 0x13, 0, 0},
++ {0x5887, 0x17, 0, 0}, {0x3710, 0x10, 0, 0}, {0x3632, 0x51, 0, 0},
++ {0x3702, 0x10, 0, 0}, {0x3703, 0xb2, 0, 0}, {0x3704, 0x18, 0, 0},
++ {0x370b, 0x40, 0, 0}, {0x370d, 0x03, 0, 0}, {0x3631, 0x01, 0, 0},
++ {0x3632, 0x52, 0, 0}, {0x3606, 0x24, 0, 0}, {0x3620, 0x96, 0, 0},
++ {0x5785, 0x07, 0, 0}, {0x3a13, 0x30, 0, 0}, {0x3600, 0x52, 0, 0},
++ {0x3604, 0x48, 0, 0}, {0x3606, 0x1b, 0, 0}, {0x370d, 0x0b, 0, 0},
++ {0x370f, 0xc0, 0, 0}, {0x3709, 0x01, 0, 0}, {0x3823, 0x00, 0, 0},
++ {0x5007, 0x00, 0, 0}, {0x5009, 0x00, 0, 0}, {0x5011, 0x00, 0, 0},
++ {0x5013, 0x00, 0, 0}, {0x519e, 0x00, 0, 0}, {0x5086, 0x00, 0, 0},
++ {0x5087, 0x00, 0, 0}, {0x5088, 0x00, 0, 0}, {0x5089, 0x00, 0, 0},
++ {0x302b, 0x00, 0, 0}, {0x3621, 0x87, 0, 0}, {0x3a00, 0x78, 0, 0},
++ {0x3808, 0x04, 0, 0}, {0x3809, 0x00, 0, 0}, {0x380a, 0x03, 0, 0},
++ {0x380b, 0x00, 0, 0}, {0x3815, 0x02, 0, 0}, {0x302c, 0x60, 0x60, 0},
++};
++
++static struct reg_value ov5642_setting_30fps_QVGA_320_240[] = {
++ {0x3103, 0x93, 0, 0}, {0x3008, 0x82, 0, 0}, {0x3017, 0x7f, 0, 0},
++ {0x3018, 0xfc, 0, 0}, {0x3615, 0xf0, 0, 0}, {0x3000, 0x00, 0, 0},
++ {0x3001, 0x00, 0, 0}, {0x3002, 0x5c, 0, 0}, {0x3003, 0x00, 0, 0},
++ {0x3004, 0xff, 0, 0}, {0x3005, 0xff, 0, 0}, {0x3006, 0x43, 0, 0},
++ {0x3007, 0x37, 0, 0}, {0x3011, 0x09, 0, 0}, {0x3012, 0x02, 0, 0},
++ {0x3010, 0x00, 0, 0}, {0x460c, 0x20, 0, 0}, {0x3815, 0x04, 0, 0},
++ {0x370c, 0xa0, 0, 0}, {0x3602, 0xfc, 0, 0}, {0x3612, 0xff, 0, 0},
++ {0x3634, 0xc0, 0, 0}, {0x3613, 0x00, 0, 0}, {0x3605, 0x7c, 0, 0},
++ {0x3621, 0x09, 0, 0}, {0x3622, 0x60, 0, 0}, {0x3604, 0x40, 0, 0},
++ {0x3603, 0xa7, 0, 0}, {0x3603, 0x27, 0, 0}, {0x4000, 0x21, 0, 0},
++ {0x401d, 0x22, 0, 0}, {0x3600, 0x54, 0, 0}, {0x3605, 0x04, 0, 0},
++ {0x3606, 0x3f, 0, 0}, {0x3c01, 0x80, 0, 0}, {0x5000, 0x4f, 0, 0},
++ {0x5020, 0x04, 0, 0}, {0x5181, 0x79, 0, 0}, {0x5182, 0x00, 0, 0},
++ {0x5185, 0x22, 0, 0}, {0x5197, 0x01, 0, 0}, {0x5001, 0xff, 0, 0},
++ {0x5500, 0x0a, 0, 0}, {0x5504, 0x00, 0, 0}, {0x5505, 0x7f, 0, 0},
++ {0x5080, 0x08, 0, 0}, {0x300e, 0x18, 0, 0}, {0x4610, 0x00, 0, 0},
++ {0x471d, 0x05, 0, 0}, {0x4708, 0x06, 0, 0}, {0x3808, 0x02, 0, 0},
++ {0x3809, 0x80, 0, 0}, {0x380a, 0x01, 0, 0}, {0x380b, 0xe0, 0, 0},
++ {0x380e, 0x07, 0, 0}, {0x380f, 0xd0, 0, 0}, {0x501f, 0x00, 0, 0},
++ {0x5000, 0x4f, 0, 0}, {0x4300, 0x30, 0, 0}, {0x3503, 0x07, 0, 0},
++ {0x3501, 0x73, 0, 0}, {0x3502, 0x80, 0, 0}, {0x350b, 0x00, 0, 0},
++ {0x3503, 0x07, 0, 0}, {0x3824, 0x11, 0, 0}, {0x3825, 0xb0, 0, 0},
++ {0x3501, 0x1e, 0, 0}, {0x3502, 0x80, 0, 0}, {0x350b, 0x7f, 0, 0},
++ {0x380c, 0x07, 0, 0}, {0x380d, 0x2a, 0, 0}, {0x380e, 0x03, 0, 0},
++ {0x380f, 0xe8, 0, 0}, {0x3a0d, 0x04, 0, 0}, {0x3a0e, 0x03, 0, 0},
++ {0x3818, 0xc1, 0, 0}, {0x3705, 0xdb, 0, 0}, {0x370a, 0x81, 0, 0},
++ {0x3801, 0x80, 0, 0}, {0x3621, 0xc7, 0, 0}, {0x3801, 0x50, 0, 0},
++ {0x3803, 0x08, 0, 0}, {0x3827, 0x08, 0, 0}, {0x3810, 0x80, 0, 0},
++ {0x3804, 0x05, 0, 0}, {0x3805, 0x00, 0, 0}, {0x5682, 0x05, 0, 0},
++ {0x5683, 0x00, 0, 0}, {0x3806, 0x03, 0, 0}, {0x3807, 0xc0, 0, 0},
++ {0x5686, 0x03, 0, 0}, {0x5687, 0xbc, 0, 0}, {0x3a00, 0x78, 0, 0},
++ {0x3a1a, 0x05, 0, 0}, {0x3a13, 0x30, 0, 0}, {0x3a18, 0x00, 0, 0},
++ {0x3a19, 0x7c, 0, 0}, {0x3a08, 0x12, 0, 0}, {0x3a09, 0xc0, 0, 0},
++ {0x3a0a, 0x0f, 0, 0}, {0x3a0b, 0xa0, 0, 0}, {0x350c, 0x07, 0, 0},
++ {0x350d, 0xd0, 0, 0}, {0x3500, 0x00, 0, 0}, {0x3501, 0x00, 0, 0},
++ {0x3502, 0x00, 0, 0}, {0x350a, 0x00, 0, 0}, {0x350b, 0x00, 0, 0},
++ {0x3503, 0x00, 0, 0}, {0x528a, 0x02, 0, 0}, {0x528b, 0x04, 0, 0},
++ {0x528c, 0x08, 0, 0}, {0x528d, 0x08, 0, 0}, {0x528e, 0x08, 0, 0},
++ {0x528f, 0x10, 0, 0}, {0x5290, 0x10, 0, 0}, {0x5292, 0x00, 0, 0},
++ {0x5293, 0x02, 0, 0}, {0x5294, 0x00, 0, 0}, {0x5295, 0x02, 0, 0},
++ {0x5296, 0x00, 0, 0}, {0x5297, 0x02, 0, 0}, {0x5298, 0x00, 0, 0},
++ {0x5299, 0x02, 0, 0}, {0x529a, 0x00, 0, 0}, {0x529b, 0x02, 0, 0},
++ {0x529c, 0x00, 0, 0}, {0x529d, 0x02, 0, 0}, {0x529e, 0x00, 0, 0},
++ {0x529f, 0x02, 0, 0}, {0x3a0f, 0x3c, 0, 0}, {0x3a10, 0x30, 0, 0},
++ {0x3a1b, 0x3c, 0, 0}, {0x3a1e, 0x30, 0, 0}, {0x3a11, 0x70, 0, 0},
++ {0x3a1f, 0x10, 0, 0}, {0x3030, 0x2b, 0, 0}, {0x3a02, 0x00, 0, 0},
++ {0x3a03, 0x7d, 0, 0}, {0x3a04, 0x00, 0, 0}, {0x3a14, 0x00, 0, 0},
++ {0x3a15, 0x7d, 0, 0}, {0x3a16, 0x00, 0, 0}, {0x3a00, 0x78, 0, 0},
++ {0x3a08, 0x12, 0, 0}, {0x3a09, 0xc0, 0, 0}, {0x3a0a, 0x0f, 0, 0},
++ {0x3a0b, 0xa0, 0, 0}, {0x3a0d, 0x04, 0, 0}, {0x3a0e, 0x03, 0, 0},
++ {0x5193, 0x70, 0, 0}, {0x589b, 0x04, 0, 0}, {0x589a, 0xc5, 0, 0},
++ {0x401e, 0x20, 0, 0}, {0x4001, 0x42, 0, 0}, {0x401c, 0x04, 0, 0},
++ {0x528a, 0x01, 0, 0}, {0x528b, 0x04, 0, 0}, {0x528c, 0x08, 0, 0},
++ {0x528d, 0x10, 0, 0}, {0x528e, 0x20, 0, 0}, {0x528f, 0x28, 0, 0},
++ {0x5290, 0x30, 0, 0}, {0x5292, 0x00, 0, 0}, {0x5293, 0x01, 0, 0},
++ {0x5294, 0x00, 0, 0}, {0x5295, 0x04, 0, 0}, {0x5296, 0x00, 0, 0},
++ {0x5297, 0x08, 0, 0}, {0x5298, 0x00, 0, 0}, {0x5299, 0x10, 0, 0},
++ {0x529a, 0x00, 0, 0}, {0x529b, 0x20, 0, 0}, {0x529c, 0x00, 0, 0},
++ {0x529d, 0x28, 0, 0}, {0x529e, 0x00, 0, 0}, {0x529f, 0x30, 0, 0},
++ {0x5282, 0x00, 0, 0}, {0x5300, 0x00, 0, 0}, {0x5301, 0x20, 0, 0},
++ {0x5302, 0x00, 0, 0}, {0x5303, 0x7c, 0, 0}, {0x530c, 0x00, 0, 0},
++ {0x530d, 0x0c, 0, 0}, {0x530e, 0x20, 0, 0}, {0x530f, 0x80, 0, 0},
++ {0x5310, 0x20, 0, 0}, {0x5311, 0x80, 0, 0}, {0x5308, 0x20, 0, 0},
++ {0x5309, 0x40, 0, 0}, {0x5304, 0x00, 0, 0}, {0x5305, 0x30, 0, 0},
++ {0x5306, 0x00, 0, 0}, {0x5307, 0x80, 0, 0}, {0x5314, 0x08, 0, 0},
++ {0x5315, 0x20, 0, 0}, {0x5319, 0x30, 0, 0}, {0x5316, 0x10, 0, 0},
++ {0x5317, 0x00, 0, 0}, {0x5318, 0x02, 0, 0}, {0x5380, 0x01, 0, 0},
++ {0x5381, 0x00, 0, 0}, {0x5382, 0x00, 0, 0}, {0x5383, 0x4e, 0, 0},
++ {0x5384, 0x00, 0, 0}, {0x5385, 0x0f, 0, 0}, {0x5386, 0x00, 0, 0},
++ {0x5387, 0x00, 0, 0}, {0x5388, 0x01, 0, 0}, {0x5389, 0x15, 0, 0},
++ {0x538a, 0x00, 0, 0}, {0x538b, 0x31, 0, 0}, {0x538c, 0x00, 0, 0},
++ {0x538d, 0x00, 0, 0}, {0x538e, 0x00, 0, 0}, {0x538f, 0x0f, 0, 0},
++ {0x5390, 0x00, 0, 0}, {0x5391, 0xab, 0, 0}, {0x5392, 0x00, 0, 0},
++ {0x5393, 0xa2, 0, 0}, {0x5394, 0x08, 0, 0}, {0x5480, 0x14, 0, 0},
++ {0x5481, 0x21, 0, 0}, {0x5482, 0x36, 0, 0}, {0x5483, 0x57, 0, 0},
++ {0x5484, 0x65, 0, 0}, {0x5485, 0x71, 0, 0}, {0x5486, 0x7d, 0, 0},
++ {0x5487, 0x87, 0, 0}, {0x5488, 0x91, 0, 0}, {0x5489, 0x9a, 0, 0},
++ {0x548a, 0xaa, 0, 0}, {0x548b, 0xb8, 0, 0}, {0x548c, 0xcd, 0, 0},
++ {0x548d, 0xdd, 0, 0}, {0x548e, 0xea, 0, 0}, {0x548f, 0x1d, 0, 0},
++ {0x5490, 0x05, 0, 0}, {0x5491, 0x00, 0, 0}, {0x5492, 0x04, 0, 0},
++ {0x5493, 0x20, 0, 0}, {0x5494, 0x03, 0, 0}, {0x5495, 0x60, 0, 0},
++ {0x5496, 0x02, 0, 0}, {0x5497, 0xb8, 0, 0}, {0x5498, 0x02, 0, 0},
++ {0x5499, 0x86, 0, 0}, {0x549a, 0x02, 0, 0}, {0x549b, 0x5b, 0, 0},
++ {0x549c, 0x02, 0, 0}, {0x549d, 0x3b, 0, 0}, {0x549e, 0x02, 0, 0},
++ {0x549f, 0x1c, 0, 0}, {0x54a0, 0x02, 0, 0}, {0x54a1, 0x04, 0, 0},
++ {0x54a2, 0x01, 0, 0}, {0x54a3, 0xed, 0, 0}, {0x54a4, 0x01, 0, 0},
++ {0x54a5, 0xc5, 0, 0}, {0x54a6, 0x01, 0, 0}, {0x54a7, 0xa5, 0, 0},
++ {0x54a8, 0x01, 0, 0}, {0x54a9, 0x6c, 0, 0}, {0x54aa, 0x01, 0, 0},
++ {0x54ab, 0x41, 0, 0}, {0x54ac, 0x01, 0, 0}, {0x54ad, 0x20, 0, 0},
++ {0x54ae, 0x00, 0, 0}, {0x54af, 0x16, 0, 0}, {0x54b0, 0x01, 0, 0},
++ {0x54b1, 0x20, 0, 0}, {0x54b2, 0x00, 0, 0}, {0x54b3, 0x10, 0, 0},
++ {0x54b4, 0x00, 0, 0}, {0x54b5, 0xf0, 0, 0}, {0x54b6, 0x00, 0, 0},
++ {0x54b7, 0xdf, 0, 0}, {0x5402, 0x3f, 0, 0}, {0x5403, 0x00, 0, 0},
++ {0x3406, 0x00, 0, 0}, {0x5180, 0xff, 0, 0}, {0x5181, 0x52, 0, 0},
++ {0x5182, 0x11, 0, 0}, {0x5183, 0x14, 0, 0}, {0x5184, 0x25, 0, 0},
++ {0x5185, 0x24, 0, 0}, {0x5186, 0x06, 0, 0}, {0x5187, 0x08, 0, 0},
++ {0x5188, 0x08, 0, 0}, {0x5189, 0x7c, 0, 0}, {0x518a, 0x60, 0, 0},
++ {0x518b, 0xb2, 0, 0}, {0x518c, 0xb2, 0, 0}, {0x518d, 0x44, 0, 0},
++ {0x518e, 0x3d, 0, 0}, {0x518f, 0x58, 0, 0}, {0x5190, 0x46, 0, 0},
++ {0x5191, 0xf8, 0, 0}, {0x5192, 0x04, 0, 0}, {0x5193, 0x70, 0, 0},
++ {0x5194, 0xf0, 0, 0}, {0x5195, 0xf0, 0, 0}, {0x5196, 0x03, 0, 0},
++ {0x5197, 0x01, 0, 0}, {0x5198, 0x04, 0, 0}, {0x5199, 0x12, 0, 0},
++ {0x519a, 0x04, 0, 0}, {0x519b, 0x00, 0, 0}, {0x519c, 0x06, 0, 0},
++ {0x519d, 0x82, 0, 0}, {0x519e, 0x00, 0, 0}, {0x5025, 0x80, 0, 0},
++ {0x3a0f, 0x38, 0, 0}, {0x3a10, 0x30, 0, 0}, {0x3a1b, 0x3a, 0, 0},
++ {0x3a1e, 0x2e, 0, 0}, {0x3a11, 0x60, 0, 0}, {0x3a1f, 0x10, 0, 0},
++ {0x5688, 0xa6, 0, 0}, {0x5689, 0x6a, 0, 0}, {0x568a, 0xea, 0, 0},
++ {0x568b, 0xae, 0, 0}, {0x568c, 0xa6, 0, 0}, {0x568d, 0x6a, 0, 0},
++ {0x568e, 0x62, 0, 0}, {0x568f, 0x26, 0, 0}, {0x5583, 0x40, 0, 0},
++ {0x5584, 0x40, 0, 0}, {0x5580, 0x02, 0, 0}, {0x5000, 0xcf, 0, 0},
++ {0x5800, 0x27, 0, 0}, {0x5801, 0x19, 0, 0}, {0x5802, 0x12, 0, 0},
++ {0x5803, 0x0f, 0, 0}, {0x5804, 0x10, 0, 0}, {0x5805, 0x15, 0, 0},
++ {0x5806, 0x1e, 0, 0}, {0x5807, 0x2f, 0, 0}, {0x5808, 0x15, 0, 0},
++ {0x5809, 0x0d, 0, 0}, {0x580a, 0x0a, 0, 0}, {0x580b, 0x09, 0, 0},
++ {0x580c, 0x0a, 0, 0}, {0x580d, 0x0c, 0, 0}, {0x580e, 0x12, 0, 0},
++ {0x580f, 0x19, 0, 0}, {0x5810, 0x0b, 0, 0}, {0x5811, 0x07, 0, 0},
++ {0x5812, 0x04, 0, 0}, {0x5813, 0x03, 0, 0}, {0x5814, 0x03, 0, 0},
++ {0x5815, 0x06, 0, 0}, {0x5816, 0x0a, 0, 0}, {0x5817, 0x0f, 0, 0},
++ {0x5818, 0x0a, 0, 0}, {0x5819, 0x05, 0, 0}, {0x581a, 0x01, 0, 0},
++ {0x581b, 0x00, 0, 0}, {0x581c, 0x00, 0, 0}, {0x581d, 0x03, 0, 0},
++ {0x581e, 0x08, 0, 0}, {0x581f, 0x0c, 0, 0}, {0x5820, 0x0a, 0, 0},
++ {0x5821, 0x05, 0, 0}, {0x5822, 0x01, 0, 0}, {0x5823, 0x00, 0, 0},
++ {0x5824, 0x00, 0, 0}, {0x5825, 0x03, 0, 0}, {0x5826, 0x08, 0, 0},
++ {0x5827, 0x0c, 0, 0}, {0x5828, 0x0e, 0, 0}, {0x5829, 0x08, 0, 0},
++ {0x582a, 0x06, 0, 0}, {0x582b, 0x04, 0, 0}, {0x582c, 0x05, 0, 0},
++ {0x582d, 0x07, 0, 0}, {0x582e, 0x0b, 0, 0}, {0x582f, 0x12, 0, 0},
++ {0x5830, 0x18, 0, 0}, {0x5831, 0x10, 0, 0}, {0x5832, 0x0c, 0, 0},
++ {0x5833, 0x0a, 0, 0}, {0x5834, 0x0b, 0, 0}, {0x5835, 0x0e, 0, 0},
++ {0x5836, 0x15, 0, 0}, {0x5837, 0x19, 0, 0}, {0x5838, 0x32, 0, 0},
++ {0x5839, 0x1f, 0, 0}, {0x583a, 0x18, 0, 0}, {0x583b, 0x16, 0, 0},
++ {0x583c, 0x17, 0, 0}, {0x583d, 0x1e, 0, 0}, {0x583e, 0x26, 0, 0},
++ {0x583f, 0x53, 0, 0}, {0x5840, 0x10, 0, 0}, {0x5841, 0x0f, 0, 0},
++ {0x5842, 0x0d, 0, 0}, {0x5843, 0x0c, 0, 0}, {0x5844, 0x0e, 0, 0},
++ {0x5845, 0x09, 0, 0}, {0x5846, 0x11, 0, 0}, {0x5847, 0x10, 0, 0},
++ {0x5848, 0x10, 0, 0}, {0x5849, 0x10, 0, 0}, {0x584a, 0x10, 0, 0},
++ {0x584b, 0x0e, 0, 0}, {0x584c, 0x10, 0, 0}, {0x584d, 0x10, 0, 0},
++ {0x584e, 0x11, 0, 0}, {0x584f, 0x10, 0, 0}, {0x5850, 0x0f, 0, 0},
++ {0x5851, 0x0c, 0, 0}, {0x5852, 0x0f, 0, 0}, {0x5853, 0x10, 0, 0},
++ {0x5854, 0x10, 0, 0}, {0x5855, 0x0f, 0, 0}, {0x5856, 0x0e, 0, 0},
++ {0x5857, 0x0b, 0, 0}, {0x5858, 0x10, 0, 0}, {0x5859, 0x0d, 0, 0},
++ {0x585a, 0x0d, 0, 0}, {0x585b, 0x0c, 0, 0}, {0x585c, 0x0c, 0, 0},
++ {0x585d, 0x0c, 0, 0}, {0x585e, 0x0b, 0, 0}, {0x585f, 0x0c, 0, 0},
++ {0x5860, 0x0c, 0, 0}, {0x5861, 0x0c, 0, 0}, {0x5862, 0x0d, 0, 0},
++ {0x5863, 0x08, 0, 0}, {0x5864, 0x11, 0, 0}, {0x5865, 0x18, 0, 0},
++ {0x5866, 0x18, 0, 0}, {0x5867, 0x19, 0, 0}, {0x5868, 0x17, 0, 0},
++ {0x5869, 0x19, 0, 0}, {0x586a, 0x16, 0, 0}, {0x586b, 0x13, 0, 0},
++ {0x586c, 0x13, 0, 0}, {0x586d, 0x12, 0, 0}, {0x586e, 0x13, 0, 0},
++ {0x586f, 0x16, 0, 0}, {0x5870, 0x14, 0, 0}, {0x5871, 0x12, 0, 0},
++ {0x5872, 0x10, 0, 0}, {0x5873, 0x11, 0, 0}, {0x5874, 0x11, 0, 0},
++ {0x5875, 0x16, 0, 0}, {0x5876, 0x14, 0, 0}, {0x5877, 0x11, 0, 0},
++ {0x5878, 0x10, 0, 0}, {0x5879, 0x0f, 0, 0}, {0x587a, 0x10, 0, 0},
++ {0x587b, 0x14, 0, 0}, {0x587c, 0x13, 0, 0}, {0x587d, 0x12, 0, 0},
++ {0x587e, 0x11, 0, 0}, {0x587f, 0x11, 0, 0}, {0x5880, 0x12, 0, 0},
++ {0x5881, 0x15, 0, 0}, {0x5882, 0x14, 0, 0}, {0x5883, 0x15, 0, 0},
++ {0x5884, 0x15, 0, 0}, {0x5885, 0x15, 0, 0}, {0x5886, 0x13, 0, 0},
++ {0x5887, 0x17, 0, 0}, {0x3710, 0x10, 0, 0}, {0x3632, 0x51, 0, 0},
++ {0x3702, 0x10, 0, 0}, {0x3703, 0xb2, 0, 0}, {0x3704, 0x18, 0, 0},
++ {0x370b, 0x40, 0, 0}, {0x370d, 0x03, 0, 0}, {0x3631, 0x01, 0, 0},
++ {0x3632, 0x52, 0, 0}, {0x3606, 0x24, 0, 0}, {0x3620, 0x96, 0, 0},
++ {0x5785, 0x07, 0, 0}, {0x3a13, 0x30, 0, 0}, {0x3600, 0x52, 0, 0},
++ {0x3604, 0x48, 0, 0}, {0x3606, 0x1b, 0, 0}, {0x370d, 0x0b, 0, 0},
++ {0x370f, 0xc0, 0, 0}, {0x3709, 0x01, 0, 0}, {0x3823, 0x00, 0, 0},
++ {0x5007, 0x00, 0, 0}, {0x5009, 0x00, 0, 0}, {0x5011, 0x00, 0, 0},
++ {0x5013, 0x00, 0, 0}, {0x519e, 0x00, 0, 0}, {0x5086, 0x00, 0, 0},
++ {0x5087, 0x00, 0, 0}, {0x5088, 0x00, 0, 0}, {0x5089, 0x00, 0, 0},
++ {0x302b, 0x00, 0, 0}, {0x3621, 0x87, 0, 0}, {0x3808, 0x01, 0, 0},
++ {0x3809, 0x40, 0, 0}, {0x380a, 0x00, 0, 0}, {0x380b, 0xf0, 0, 0},
++};
++
++static struct reg_value ov5642_setting_30fps_NTSC_720_480[] = {
++ {0x3103, 0x93, 0, 0}, {0x3008, 0x82, 0, 0}, {0x3017, 0x7f, 0, 0},
++ {0x3018, 0xfc, 0, 0}, {0x3615, 0xf0, 0, 0}, {0x3000, 0x00, 0, 0},
++ {0x3001, 0x00, 0, 0}, {0x3002, 0x5c, 0, 0}, {0x3003, 0x00, 0, 0},
++ {0x3004, 0xff, 0, 0}, {0x3005, 0xff, 0, 0}, {0x3006, 0x43, 0, 0},
++ {0x3007, 0x37, 0, 0}, {0x3011, 0x09, 0, 0}, {0x3012, 0x02, 0, 0},
++ {0x3010, 0x00, 0, 0}, {0x460c, 0x20, 0, 0}, {0x3815, 0x04, 0, 0},
++ {0x370c, 0xa0, 0, 0}, {0x3602, 0xfc, 0, 0}, {0x3612, 0xff, 0, 0},
++ {0x3634, 0xc0, 0, 0}, {0x3613, 0x00, 0, 0}, {0x3605, 0x7c, 0, 0},
++ {0x3621, 0x09, 0, 0}, {0x3622, 0x60, 0, 0}, {0x3604, 0x40, 0, 0},
++ {0x3603, 0xa7, 0, 0}, {0x3603, 0x27, 0, 0}, {0x4000, 0x21, 0, 0},
++ {0x401d, 0x22, 0, 0}, {0x3600, 0x54, 0, 0}, {0x3605, 0x04, 0, 0},
++ {0x3606, 0x3f, 0, 0}, {0x3c01, 0x80, 0, 0}, {0x5000, 0x4f, 0, 0},
++ {0x5020, 0x04, 0, 0}, {0x5181, 0x79, 0, 0}, {0x5182, 0x00, 0, 0},
++ {0x5185, 0x22, 0, 0}, {0x5197, 0x01, 0, 0}, {0x5001, 0xff, 0, 0},
++ {0x5500, 0x0a, 0, 0}, {0x5504, 0x00, 0, 0}, {0x5505, 0x7f, 0, 0},
++ {0x5080, 0x08, 0, 0}, {0x300e, 0x18, 0, 0}, {0x4610, 0x00, 0, 0},
++ {0x471d, 0x05, 0, 0}, {0x4708, 0x06, 0, 0}, {0x3808, 0x02, 0, 0},
++ {0x3809, 0xd0, 0, 0}, {0x380a, 0x01, 0, 0}, {0x380b, 0xe0, 0, 0},
++ {0x380e, 0x07, 0, 0}, {0x380f, 0xd0, 0, 0}, {0x501f, 0x00, 0, 0},
++ {0x5000, 0x4f, 0, 0}, {0x4300, 0x30, 0, 0}, {0x3503, 0x07, 0, 0},
++ {0x3501, 0x73, 0, 0}, {0x3502, 0x80, 0, 0}, {0x350b, 0x00, 0, 0},
++ {0x3503, 0x07, 0, 0}, {0x3824, 0x11, 0, 0}, {0x3825, 0xb0, 0, 0},
++ {0x3501, 0x1e, 0, 0}, {0x3502, 0x80, 0, 0}, {0x350b, 0x7f, 0, 0},
++ {0x380c, 0x07, 0, 0}, {0x380d, 0x2a, 0, 0}, {0x380e, 0x03, 0, 0},
++ {0x380f, 0xe8, 0, 0}, {0x3a0d, 0x04, 0, 0}, {0x3a0e, 0x03, 0, 0},
++ {0x3818, 0xc1, 0, 0}, {0x3705, 0xdb, 0, 0}, {0x370a, 0x81, 0, 0},
++ {0x3801, 0x80, 0, 0}, {0x3621, 0xc7, 0, 0}, {0x3801, 0x50, 0, 0},
++ {0x3803, 0x08, 0, 0}, {0x3827, 0x3c, 0, 0}, {0x3810, 0x80, 0, 0},
++ {0x3804, 0x05, 0, 0}, {0x3805, 0x00, 0, 0}, {0x5682, 0x05, 0, 0},
++ {0x5683, 0x00, 0, 0}, {0x3806, 0x03, 0, 0}, {0x3807, 0x58, 0, 0},
++ {0x5686, 0x03, 0, 0}, {0x5687, 0x58, 0, 0}, {0x3a00, 0x78, 0, 0},
++ {0x3a1a, 0x05, 0, 0}, {0x3a13, 0x30, 0, 0}, {0x3a18, 0x00, 0, 0},
++ {0x3a19, 0x7c, 0, 0}, {0x3a08, 0x12, 0, 0}, {0x3a09, 0xc0, 0, 0},
++ {0x3a0a, 0x0f, 0, 0}, {0x3a0b, 0xa0, 0, 0}, {0x350c, 0x07, 0, 0},
++ {0x350d, 0xd0, 0, 0}, {0x3500, 0x00, 0, 0}, {0x3501, 0x00, 0, 0},
++ {0x3502, 0x00, 0, 0}, {0x350a, 0x00, 0, 0}, {0x350b, 0x00, 0, 0},
++ {0x3503, 0x00, 0, 0}, {0x528a, 0x02, 0, 0}, {0x528b, 0x04, 0, 0},
++ {0x528c, 0x08, 0, 0}, {0x528d, 0x08, 0, 0}, {0x528e, 0x08, 0, 0},
++ {0x528f, 0x10, 0, 0}, {0x5290, 0x10, 0, 0}, {0x5292, 0x00, 0, 0},
++ {0x5293, 0x02, 0, 0}, {0x5294, 0x00, 0, 0}, {0x5295, 0x02, 0, 0},
++ {0x5296, 0x00, 0, 0}, {0x5297, 0x02, 0, 0}, {0x5298, 0x00, 0, 0},
++ {0x5299, 0x02, 0, 0}, {0x529a, 0x00, 0, 0}, {0x529b, 0x02, 0, 0},
++ {0x529c, 0x00, 0, 0}, {0x529d, 0x02, 0, 0}, {0x529e, 0x00, 0, 0},
++ {0x529f, 0x02, 0, 0}, {0x3a0f, 0x3c, 0, 0}, {0x3a10, 0x30, 0, 0},
++ {0x3a1b, 0x3c, 0, 0}, {0x3a1e, 0x30, 0, 0}, {0x3a11, 0x70, 0, 0},
++ {0x3a1f, 0x10, 0, 0}, {0x3030, 0x2b, 0, 0}, {0x3a02, 0x00, 0, 0},
++ {0x3a03, 0x7d, 0, 0}, {0x3a04, 0x00, 0, 0}, {0x3a14, 0x00, 0, 0},
++ {0x3a15, 0x7d, 0, 0}, {0x3a16, 0x00, 0, 0}, {0x3a00, 0x78, 0, 0},
++ {0x3a08, 0x12, 0, 0}, {0x3a09, 0xc0, 0, 0}, {0x3a0a, 0x0f, 0, 0},
++ {0x3a0b, 0xa0, 0, 0}, {0x3a0d, 0x04, 0, 0}, {0x3a0e, 0x03, 0, 0},
++ {0x5193, 0x70, 0, 0}, {0x589b, 0x04, 0, 0}, {0x589a, 0xc5, 0, 0},
++ {0x401e, 0x20, 0, 0}, {0x4001, 0x42, 0, 0}, {0x401c, 0x04, 0, 0},
++ {0x528a, 0x01, 0, 0}, {0x528b, 0x04, 0, 0}, {0x528c, 0x08, 0, 0},
++ {0x528d, 0x10, 0, 0}, {0x528e, 0x20, 0, 0}, {0x528f, 0x28, 0, 0},
++ {0x5290, 0x30, 0, 0}, {0x5292, 0x00, 0, 0}, {0x5293, 0x01, 0, 0},
++ {0x5294, 0x00, 0, 0}, {0x5295, 0x04, 0, 0}, {0x5296, 0x00, 0, 0},
++ {0x5297, 0x08, 0, 0}, {0x5298, 0x00, 0, 0}, {0x5299, 0x10, 0, 0},
++ {0x529a, 0x00, 0, 0}, {0x529b, 0x20, 0, 0}, {0x529c, 0x00, 0, 0},
++ {0x529d, 0x28, 0, 0}, {0x529e, 0x00, 0, 0}, {0x529f, 0x30, 0, 0},
++ {0x5282, 0x00, 0, 0}, {0x5300, 0x00, 0, 0}, {0x5301, 0x20, 0, 0},
++ {0x5302, 0x00, 0, 0}, {0x5303, 0x7c, 0, 0}, {0x530c, 0x00, 0, 0},
++ {0x530d, 0x0c, 0, 0}, {0x530e, 0x20, 0, 0}, {0x530f, 0x80, 0, 0},
++ {0x5310, 0x20, 0, 0}, {0x5311, 0x80, 0, 0}, {0x5308, 0x20, 0, 0},
++ {0x5309, 0x40, 0, 0}, {0x5304, 0x00, 0, 0}, {0x5305, 0x30, 0, 0},
++ {0x5306, 0x00, 0, 0}, {0x5307, 0x80, 0, 0}, {0x5314, 0x08, 0, 0},
++ {0x5315, 0x20, 0, 0}, {0x5319, 0x30, 0, 0}, {0x5316, 0x10, 0, 0},
++ {0x5317, 0x00, 0, 0}, {0x5318, 0x02, 0, 0}, {0x5380, 0x01, 0, 0},
++ {0x5381, 0x00, 0, 0}, {0x5382, 0x00, 0, 0}, {0x5383, 0x4e, 0, 0},
++ {0x5384, 0x00, 0, 0}, {0x5385, 0x0f, 0, 0}, {0x5386, 0x00, 0, 0},
++ {0x5387, 0x00, 0, 0}, {0x5388, 0x01, 0, 0}, {0x5389, 0x15, 0, 0},
++ {0x538a, 0x00, 0, 0}, {0x538b, 0x31, 0, 0}, {0x538c, 0x00, 0, 0},
++ {0x538d, 0x00, 0, 0}, {0x538e, 0x00, 0, 0}, {0x538f, 0x0f, 0, 0},
++ {0x5390, 0x00, 0, 0}, {0x5391, 0xab, 0, 0}, {0x5392, 0x00, 0, 0},
++ {0x5393, 0xa2, 0, 0}, {0x5394, 0x08, 0, 0}, {0x5480, 0x14, 0, 0},
++ {0x5481, 0x21, 0, 0}, {0x5482, 0x36, 0, 0}, {0x5483, 0x57, 0, 0},
++ {0x5484, 0x65, 0, 0}, {0x5485, 0x71, 0, 0}, {0x5486, 0x7d, 0, 0},
++ {0x5487, 0x87, 0, 0}, {0x5488, 0x91, 0, 0}, {0x5489, 0x9a, 0, 0},
++ {0x548a, 0xaa, 0, 0}, {0x548b, 0xb8, 0, 0}, {0x548c, 0xcd, 0, 0},
++ {0x548d, 0xdd, 0, 0}, {0x548e, 0xea, 0, 0}, {0x548f, 0x1d, 0, 0},
++ {0x5490, 0x05, 0, 0}, {0x5491, 0x00, 0, 0}, {0x5492, 0x04, 0, 0},
++ {0x5493, 0x20, 0, 0}, {0x5494, 0x03, 0, 0}, {0x5495, 0x60, 0, 0},
++ {0x5496, 0x02, 0, 0}, {0x5497, 0xb8, 0, 0}, {0x5498, 0x02, 0, 0},
++ {0x5499, 0x86, 0, 0}, {0x549a, 0x02, 0, 0}, {0x549b, 0x5b, 0, 0},
++ {0x549c, 0x02, 0, 0}, {0x549d, 0x3b, 0, 0}, {0x549e, 0x02, 0, 0},
++ {0x549f, 0x1c, 0, 0}, {0x54a0, 0x02, 0, 0}, {0x54a1, 0x04, 0, 0},
++ {0x54a2, 0x01, 0, 0}, {0x54a3, 0xed, 0, 0}, {0x54a4, 0x01, 0, 0},
++ {0x54a5, 0xc5, 0, 0}, {0x54a6, 0x01, 0, 0}, {0x54a7, 0xa5, 0, 0},
++ {0x54a8, 0x01, 0, 0}, {0x54a9, 0x6c, 0, 0}, {0x54aa, 0x01, 0, 0},
++ {0x54ab, 0x41, 0, 0}, {0x54ac, 0x01, 0, 0}, {0x54ad, 0x20, 0, 0},
++ {0x54ae, 0x00, 0, 0}, {0x54af, 0x16, 0, 0}, {0x54b0, 0x01, 0, 0},
++ {0x54b1, 0x20, 0, 0}, {0x54b2, 0x00, 0, 0}, {0x54b3, 0x10, 0, 0},
++ {0x54b4, 0x00, 0, 0}, {0x54b5, 0xf0, 0, 0}, {0x54b6, 0x00, 0, 0},
++ {0x54b7, 0xdf, 0, 0}, {0x5402, 0x3f, 0, 0}, {0x5403, 0x00, 0, 0},
++ {0x3406, 0x00, 0, 0}, {0x5180, 0xff, 0, 0}, {0x5181, 0x52, 0, 0},
++ {0x5182, 0x11, 0, 0}, {0x5183, 0x14, 0, 0}, {0x5184, 0x25, 0, 0},
++ {0x5185, 0x24, 0, 0}, {0x5186, 0x06, 0, 0}, {0x5187, 0x08, 0, 0},
++ {0x5188, 0x08, 0, 0}, {0x5189, 0x7c, 0, 0}, {0x518a, 0x60, 0, 0},
++ {0x518b, 0xb2, 0, 0}, {0x518c, 0xb2, 0, 0}, {0x518d, 0x44, 0, 0},
++ {0x518e, 0x3d, 0, 0}, {0x518f, 0x58, 0, 0}, {0x5190, 0x46, 0, 0},
++ {0x5191, 0xf8, 0, 0}, {0x5192, 0x04, 0, 0}, {0x5193, 0x70, 0, 0},
++ {0x5194, 0xf0, 0, 0}, {0x5195, 0xf0, 0, 0}, {0x5196, 0x03, 0, 0},
++ {0x5197, 0x01, 0, 0}, {0x5198, 0x04, 0, 0}, {0x5199, 0x12, 0, 0},
++ {0x519a, 0x04, 0, 0}, {0x519b, 0x00, 0, 0}, {0x519c, 0x06, 0, 0},
++ {0x519d, 0x82, 0, 0}, {0x519e, 0x00, 0, 0}, {0x5025, 0x80, 0, 0},
++ {0x3a0f, 0x38, 0, 0}, {0x3a10, 0x30, 0, 0}, {0x3a1b, 0x3a, 0, 0},
++ {0x3a1e, 0x2e, 0, 0}, {0x3a11, 0x60, 0, 0}, {0x3a1f, 0x10, 0, 0},
++ {0x5688, 0xa6, 0, 0}, {0x5689, 0x6a, 0, 0}, {0x568a, 0xea, 0, 0},
++ {0x568b, 0xae, 0, 0}, {0x568c, 0xa6, 0, 0}, {0x568d, 0x6a, 0, 0},
++ {0x568e, 0x62, 0, 0}, {0x568f, 0x26, 0, 0}, {0x5583, 0x40, 0, 0},
++ {0x5584, 0x40, 0, 0}, {0x5580, 0x02, 0, 0}, {0x5000, 0xcf, 0, 0},
++ {0x5800, 0x27, 0, 0}, {0x5801, 0x19, 0, 0}, {0x5802, 0x12, 0, 0},
++ {0x5803, 0x0f, 0, 0}, {0x5804, 0x10, 0, 0}, {0x5805, 0x15, 0, 0},
++ {0x5806, 0x1e, 0, 0}, {0x5807, 0x2f, 0, 0}, {0x5808, 0x15, 0, 0},
++ {0x5809, 0x0d, 0, 0}, {0x580a, 0x0a, 0, 0}, {0x580b, 0x09, 0, 0},
++ {0x580c, 0x0a, 0, 0}, {0x580d, 0x0c, 0, 0}, {0x580e, 0x12, 0, 0},
++ {0x580f, 0x19, 0, 0}, {0x5810, 0x0b, 0, 0}, {0x5811, 0x07, 0, 0},
++ {0x5812, 0x04, 0, 0}, {0x5813, 0x03, 0, 0}, {0x5814, 0x03, 0, 0},
++ {0x5815, 0x06, 0, 0}, {0x5816, 0x0a, 0, 0}, {0x5817, 0x0f, 0, 0},
++ {0x5818, 0x0a, 0, 0}, {0x5819, 0x05, 0, 0}, {0x581a, 0x01, 0, 0},
++ {0x581b, 0x00, 0, 0}, {0x581c, 0x00, 0, 0}, {0x581d, 0x03, 0, 0},
++ {0x581e, 0x08, 0, 0}, {0x581f, 0x0c, 0, 0}, {0x5820, 0x0a, 0, 0},
++ {0x5821, 0x05, 0, 0}, {0x5822, 0x01, 0, 0}, {0x5823, 0x00, 0, 0},
++ {0x5824, 0x00, 0, 0}, {0x5825, 0x03, 0, 0}, {0x5826, 0x08, 0, 0},
++ {0x5827, 0x0c, 0, 0}, {0x5828, 0x0e, 0, 0}, {0x5829, 0x08, 0, 0},
++ {0x582a, 0x06, 0, 0}, {0x582b, 0x04, 0, 0}, {0x582c, 0x05, 0, 0},
++ {0x582d, 0x07, 0, 0}, {0x582e, 0x0b, 0, 0}, {0x582f, 0x12, 0, 0},
++ {0x5830, 0x18, 0, 0}, {0x5831, 0x10, 0, 0}, {0x5832, 0x0c, 0, 0},
++ {0x5833, 0x0a, 0, 0}, {0x5834, 0x0b, 0, 0}, {0x5835, 0x0e, 0, 0},
++ {0x5836, 0x15, 0, 0}, {0x5837, 0x19, 0, 0}, {0x5838, 0x32, 0, 0},
++ {0x5839, 0x1f, 0, 0}, {0x583a, 0x18, 0, 0}, {0x583b, 0x16, 0, 0},
++ {0x583c, 0x17, 0, 0}, {0x583d, 0x1e, 0, 0}, {0x583e, 0x26, 0, 0},
++ {0x583f, 0x53, 0, 0}, {0x5840, 0x10, 0, 0}, {0x5841, 0x0f, 0, 0},
++ {0x5842, 0x0d, 0, 0}, {0x5843, 0x0c, 0, 0}, {0x5844, 0x0e, 0, 0},
++ {0x5845, 0x09, 0, 0}, {0x5846, 0x11, 0, 0}, {0x5847, 0x10, 0, 0},
++ {0x5848, 0x10, 0, 0}, {0x5849, 0x10, 0, 0}, {0x584a, 0x10, 0, 0},
++ {0x584b, 0x0e, 0, 0}, {0x584c, 0x10, 0, 0}, {0x584d, 0x10, 0, 0},
++ {0x584e, 0x11, 0, 0}, {0x584f, 0x10, 0, 0}, {0x5850, 0x0f, 0, 0},
++ {0x5851, 0x0c, 0, 0}, {0x5852, 0x0f, 0, 0}, {0x5853, 0x10, 0, 0},
++ {0x5854, 0x10, 0, 0}, {0x5855, 0x0f, 0, 0}, {0x5856, 0x0e, 0, 0},
++ {0x5857, 0x0b, 0, 0}, {0x5858, 0x10, 0, 0}, {0x5859, 0x0d, 0, 0},
++ {0x585a, 0x0d, 0, 0}, {0x585b, 0x0c, 0, 0}, {0x585c, 0x0c, 0, 0},
++ {0x585d, 0x0c, 0, 0}, {0x585e, 0x0b, 0, 0}, {0x585f, 0x0c, 0, 0},
++ {0x5860, 0x0c, 0, 0}, {0x5861, 0x0c, 0, 0}, {0x5862, 0x0d, 0, 0},
++ {0x5863, 0x08, 0, 0}, {0x5864, 0x11, 0, 0}, {0x5865, 0x18, 0, 0},
++ {0x5866, 0x18, 0, 0}, {0x5867, 0x19, 0, 0}, {0x5868, 0x17, 0, 0},
++ {0x5869, 0x19, 0, 0}, {0x586a, 0x16, 0, 0}, {0x586b, 0x13, 0, 0},
++ {0x586c, 0x13, 0, 0}, {0x586d, 0x12, 0, 0}, {0x586e, 0x13, 0, 0},
++ {0x586f, 0x16, 0, 0}, {0x5870, 0x14, 0, 0}, {0x5871, 0x12, 0, 0},
++ {0x5872, 0x10, 0, 0}, {0x5873, 0x11, 0, 0}, {0x5874, 0x11, 0, 0},
++ {0x5875, 0x16, 0, 0}, {0x5876, 0x14, 0, 0}, {0x5877, 0x11, 0, 0},
++ {0x5878, 0x10, 0, 0}, {0x5879, 0x0f, 0, 0}, {0x587a, 0x10, 0, 0},
++ {0x587b, 0x14, 0, 0}, {0x587c, 0x13, 0, 0}, {0x587d, 0x12, 0, 0},
++ {0x587e, 0x11, 0, 0}, {0x587f, 0x11, 0, 0}, {0x5880, 0x12, 0, 0},
++ {0x5881, 0x15, 0, 0}, {0x5882, 0x14, 0, 0}, {0x5883, 0x15, 0, 0},
++ {0x5884, 0x15, 0, 0}, {0x5885, 0x15, 0, 0}, {0x5886, 0x13, 0, 0},
++ {0x5887, 0x17, 0, 0}, {0x3710, 0x10, 0, 0}, {0x3632, 0x51, 0, 0},
++ {0x3702, 0x10, 0, 0}, {0x3703, 0xb2, 0, 0}, {0x3704, 0x18, 0, 0},
++ {0x370b, 0x40, 0, 0}, {0x370d, 0x03, 0, 0}, {0x3631, 0x01, 0, 0},
++ {0x3632, 0x52, 0, 0}, {0x3606, 0x24, 0, 0}, {0x3620, 0x96, 0, 0},
++ {0x5785, 0x07, 0, 0}, {0x3a13, 0x30, 0, 0}, {0x3600, 0x52, 0, 0},
++ {0x3604, 0x48, 0, 0}, {0x3606, 0x1b, 0, 0}, {0x370d, 0x0b, 0, 0},
++ {0x370f, 0xc0, 0, 0}, {0x3709, 0x01, 0, 0}, {0x3823, 0x00, 0, 0},
++ {0x5007, 0x00, 0, 0}, {0x5009, 0x00, 0, 0}, {0x5011, 0x00, 0, 0},
++ {0x5013, 0x00, 0, 0}, {0x519e, 0x00, 0, 0}, {0x5086, 0x00, 0, 0},
++ {0x5087, 0x00, 0, 0}, {0x5088, 0x00, 0, 0}, {0x5089, 0x00, 0, 0},
++ {0x302b, 0x00, 0, 0}, {0x3621, 0x87, 0, 0}, {0x3a00, 0x78, 0, 0},
++ {0x302c, 0x60, 0x60, 0},
++};
++
++static struct reg_value ov5642_setting_30fps_PAL_720_576[] = {
++ {0x3103, 0x93, 0, 0}, {0x3008, 0x82, 0, 0}, {0x3017, 0x7f, 0, 0},
++ {0x3018, 0xfc, 0, 0}, {0x3615, 0xf0, 0, 0}, {0x3000, 0x00, 0, 0},
++ {0x3001, 0x00, 0, 0}, {0x3002, 0x5c, 0, 0}, {0x3003, 0x00, 0, 0},
++ {0x3004, 0xff, 0, 0}, {0x3005, 0xff, 0, 0}, {0x3006, 0x43, 0, 0},
++ {0x3007, 0x37, 0, 0}, {0x3011, 0x09, 0, 0}, {0x3012, 0x02, 0, 0},
++ {0x3010, 0x00, 0, 0}, {0x460c, 0x20, 0, 0}, {0x3815, 0x04, 0, 0},
++ {0x370c, 0xa0, 0, 0}, {0x3602, 0xfc, 0, 0}, {0x3612, 0xff, 0, 0},
++ {0x3634, 0xc0, 0, 0}, {0x3613, 0x00, 0, 0}, {0x3605, 0x7c, 0, 0},
++ {0x3621, 0x09, 0, 0}, {0x3622, 0x60, 0, 0}, {0x3604, 0x40, 0, 0},
++ {0x3603, 0xa7, 0, 0}, {0x3603, 0x27, 0, 0}, {0x4000, 0x21, 0, 0},
++ {0x401d, 0x22, 0, 0}, {0x3600, 0x54, 0, 0}, {0x3605, 0x04, 0, 0},
++ {0x3606, 0x3f, 0, 0}, {0x3c01, 0x80, 0, 0}, {0x5000, 0x4f, 0, 0},
++ {0x5020, 0x04, 0, 0}, {0x5181, 0x79, 0, 0}, {0x5182, 0x00, 0, 0},
++ {0x5185, 0x22, 0, 0}, {0x5197, 0x01, 0, 0}, {0x5001, 0xff, 0, 0},
++ {0x5500, 0x0a, 0, 0}, {0x5504, 0x00, 0, 0}, {0x5505, 0x7f, 0, 0},
++ {0x5080, 0x08, 0, 0}, {0x300e, 0x18, 0, 0}, {0x4610, 0x00, 0, 0},
++ {0x471d, 0x05, 0, 0}, {0x4708, 0x06, 0, 0}, {0x3808, 0x02, 0, 0},
++ {0x3809, 0xd0, 0, 0}, {0x380a, 0x02, 0, 0}, {0x380b, 0x40, 0, 0},
++ {0x380e, 0x07, 0, 0}, {0x380f, 0xd0, 0, 0}, {0x501f, 0x00, 0, 0},
++ {0x5000, 0x4f, 0, 0}, {0x4300, 0x30, 0, 0}, {0x3503, 0x07, 0, 0},
++ {0x3501, 0x73, 0, 0}, {0x3502, 0x80, 0, 0}, {0x350b, 0x00, 0, 0},
++ {0x3503, 0x07, 0, 0}, {0x3824, 0x11, 0, 0}, {0x3825, 0xd8, 0, 0},
++ {0x3501, 0x1e, 0, 0}, {0x3502, 0x80, 0, 0}, {0x350b, 0x7f, 0, 0},
++ {0x380c, 0x07, 0, 0}, {0x380d, 0x2a, 0, 0}, {0x380e, 0x03, 0, 0},
++ {0x380f, 0xe8, 0, 0}, {0x3a0d, 0x04, 0, 0}, {0x3a0e, 0x03, 0, 0},
++ {0x3818, 0xc1, 0, 0}, {0x3705, 0xdb, 0, 0}, {0x370a, 0x81, 0, 0},
++ {0x3801, 0x80, 0, 0}, {0x3621, 0xc7, 0, 0}, {0x3801, 0x50, 0, 0},
++ {0x3803, 0x08, 0, 0}, {0x3827, 0x3c, 0, 0}, {0x3810, 0x80, 0, 0},
++ {0x3804, 0x04, 0, 0}, {0x3805, 0xb0, 0, 0}, {0x5682, 0x04, 0, 0},
++ {0x5683, 0xb0, 0, 0}, {0x3806, 0x03, 0, 0}, {0x3807, 0x58, 0, 0},
++ {0x5686, 0x03, 0, 0}, {0x5687, 0x58, 0, 0}, {0x3a00, 0x78, 0, 0},
++ {0x3a1a, 0x05, 0, 0}, {0x3a13, 0x30, 0, 0}, {0x3a18, 0x00, 0, 0},
++ {0x3a19, 0x7c, 0, 0}, {0x3a08, 0x12, 0, 0}, {0x3a09, 0xc0, 0, 0},
++ {0x3a0a, 0x0f, 0, 0}, {0x3a0b, 0xa0, 0, 0}, {0x350c, 0x07, 0, 0},
++ {0x350d, 0xd0, 0, 0}, {0x3500, 0x00, 0, 0}, {0x3501, 0x00, 0, 0},
++ {0x3502, 0x00, 0, 0}, {0x350a, 0x00, 0, 0}, {0x350b, 0x00, 0, 0},
++ {0x3503, 0x00, 0, 0}, {0x528a, 0x02, 0, 0}, {0x528b, 0x04, 0, 0},
++ {0x528c, 0x08, 0, 0}, {0x528d, 0x08, 0, 0}, {0x528e, 0x08, 0, 0},
++ {0x528f, 0x10, 0, 0}, {0x5290, 0x10, 0, 0}, {0x5292, 0x00, 0, 0},
++ {0x5293, 0x02, 0, 0}, {0x5294, 0x00, 0, 0}, {0x5295, 0x02, 0, 0},
++ {0x5296, 0x00, 0, 0}, {0x5297, 0x02, 0, 0}, {0x5298, 0x00, 0, 0},
++ {0x5299, 0x02, 0, 0}, {0x529a, 0x00, 0, 0}, {0x529b, 0x02, 0, 0},
++ {0x529c, 0x00, 0, 0}, {0x529d, 0x02, 0, 0}, {0x529e, 0x00, 0, 0},
++ {0x529f, 0x02, 0, 0}, {0x3a0f, 0x3c, 0, 0}, {0x3a10, 0x30, 0, 0},
++ {0x3a1b, 0x3c, 0, 0}, {0x3a1e, 0x30, 0, 0}, {0x3a11, 0x70, 0, 0},
++ {0x3a1f, 0x10, 0, 0}, {0x3030, 0x2b, 0, 0}, {0x3a02, 0x00, 0, 0},
++ {0x3a03, 0x7d, 0, 0}, {0x3a04, 0x00, 0, 0}, {0x3a14, 0x00, 0, 0},
++ {0x3a15, 0x7d, 0, 0}, {0x3a16, 0x00, 0, 0}, {0x3a00, 0x78, 0, 0},
++ {0x3a08, 0x12, 0, 0}, {0x3a09, 0xc0, 0, 0}, {0x3a0a, 0x0f, 0, 0},
++ {0x3a0b, 0xa0, 0, 0}, {0x3a0d, 0x04, 0, 0}, {0x3a0e, 0x03, 0, 0},
++ {0x5193, 0x70, 0, 0}, {0x589b, 0x04, 0, 0}, {0x589a, 0xc5, 0, 0},
++ {0x401e, 0x20, 0, 0}, {0x4001, 0x42, 0, 0}, {0x401c, 0x04, 0, 0},
++ {0x528a, 0x01, 0, 0}, {0x528b, 0x04, 0, 0}, {0x528c, 0x08, 0, 0},
++ {0x528d, 0x10, 0, 0}, {0x528e, 0x20, 0, 0}, {0x528f, 0x28, 0, 0},
++ {0x5290, 0x30, 0, 0}, {0x5292, 0x00, 0, 0}, {0x5293, 0x01, 0, 0},
++ {0x5294, 0x00, 0, 0}, {0x5295, 0x04, 0, 0}, {0x5296, 0x00, 0, 0},
++ {0x5297, 0x08, 0, 0}, {0x5298, 0x00, 0, 0}, {0x5299, 0x10, 0, 0},
++ {0x529a, 0x00, 0, 0}, {0x529b, 0x20, 0, 0}, {0x529c, 0x00, 0, 0},
++ {0x529d, 0x28, 0, 0}, {0x529e, 0x00, 0, 0}, {0x529f, 0x30, 0, 0},
++ {0x5282, 0x00, 0, 0}, {0x5300, 0x00, 0, 0}, {0x5301, 0x20, 0, 0},
++ {0x5302, 0x00, 0, 0}, {0x5303, 0x7c, 0, 0}, {0x530c, 0x00, 0, 0},
++ {0x530d, 0x0c, 0, 0}, {0x530e, 0x20, 0, 0}, {0x530f, 0x80, 0, 0},
++ {0x5310, 0x20, 0, 0}, {0x5311, 0x80, 0, 0}, {0x5308, 0x20, 0, 0},
++ {0x5309, 0x40, 0, 0}, {0x5304, 0x00, 0, 0}, {0x5305, 0x30, 0, 0},
++ {0x5306, 0x00, 0, 0}, {0x5307, 0x80, 0, 0}, {0x5314, 0x08, 0, 0},
++ {0x5315, 0x20, 0, 0}, {0x5319, 0x30, 0, 0}, {0x5316, 0x10, 0, 0},
++ {0x5317, 0x00, 0, 0}, {0x5318, 0x02, 0, 0}, {0x5380, 0x01, 0, 0},
++ {0x5381, 0x00, 0, 0}, {0x5382, 0x00, 0, 0}, {0x5383, 0x4e, 0, 0},
++ {0x5384, 0x00, 0, 0}, {0x5385, 0x0f, 0, 0}, {0x5386, 0x00, 0, 0},
++ {0x5387, 0x00, 0, 0}, {0x5388, 0x01, 0, 0}, {0x5389, 0x15, 0, 0},
++ {0x538a, 0x00, 0, 0}, {0x538b, 0x31, 0, 0}, {0x538c, 0x00, 0, 0},
++ {0x538d, 0x00, 0, 0}, {0x538e, 0x00, 0, 0}, {0x538f, 0x0f, 0, 0},
++ {0x5390, 0x00, 0, 0}, {0x5391, 0xab, 0, 0}, {0x5392, 0x00, 0, 0},
++ {0x5393, 0xa2, 0, 0}, {0x5394, 0x08, 0, 0}, {0x5480, 0x14, 0, 0},
++ {0x5481, 0x21, 0, 0}, {0x5482, 0x36, 0, 0}, {0x5483, 0x57, 0, 0},
++ {0x5484, 0x65, 0, 0}, {0x5485, 0x71, 0, 0}, {0x5486, 0x7d, 0, 0},
++ {0x5487, 0x87, 0, 0}, {0x5488, 0x91, 0, 0}, {0x5489, 0x9a, 0, 0},
++ {0x548a, 0xaa, 0, 0}, {0x548b, 0xb8, 0, 0}, {0x548c, 0xcd, 0, 0},
++ {0x548d, 0xdd, 0, 0}, {0x548e, 0xea, 0, 0}, {0x548f, 0x1d, 0, 0},
++ {0x5490, 0x05, 0, 0}, {0x5491, 0x00, 0, 0}, {0x5492, 0x04, 0, 0},
++ {0x5493, 0x20, 0, 0}, {0x5494, 0x03, 0, 0}, {0x5495, 0x60, 0, 0},
++ {0x5496, 0x02, 0, 0}, {0x5497, 0xb8, 0, 0}, {0x5498, 0x02, 0, 0},
++ {0x5499, 0x86, 0, 0}, {0x549a, 0x02, 0, 0}, {0x549b, 0x5b, 0, 0},
++ {0x549c, 0x02, 0, 0}, {0x549d, 0x3b, 0, 0}, {0x549e, 0x02, 0, 0},
++ {0x549f, 0x1c, 0, 0}, {0x54a0, 0x02, 0, 0}, {0x54a1, 0x04, 0, 0},
++ {0x54a2, 0x01, 0, 0}, {0x54a3, 0xed, 0, 0}, {0x54a4, 0x01, 0, 0},
++ {0x54a5, 0xc5, 0, 0}, {0x54a6, 0x01, 0, 0}, {0x54a7, 0xa5, 0, 0},
++ {0x54a8, 0x01, 0, 0}, {0x54a9, 0x6c, 0, 0}, {0x54aa, 0x01, 0, 0},
++ {0x54ab, 0x41, 0, 0}, {0x54ac, 0x01, 0, 0}, {0x54ad, 0x20, 0, 0},
++ {0x54ae, 0x00, 0, 0}, {0x54af, 0x16, 0, 0}, {0x54b0, 0x01, 0, 0},
++ {0x54b1, 0x20, 0, 0}, {0x54b2, 0x00, 0, 0}, {0x54b3, 0x10, 0, 0},
++ {0x54b4, 0x00, 0, 0}, {0x54b5, 0xf0, 0, 0}, {0x54b6, 0x00, 0, 0},
++ {0x54b7, 0xdf, 0, 0}, {0x5402, 0x3f, 0, 0}, {0x5403, 0x00, 0, 0},
++ {0x3406, 0x00, 0, 0}, {0x5180, 0xff, 0, 0}, {0x5181, 0x52, 0, 0},
++ {0x5182, 0x11, 0, 0}, {0x5183, 0x14, 0, 0}, {0x5184, 0x25, 0, 0},
++ {0x5185, 0x24, 0, 0}, {0x5186, 0x06, 0, 0}, {0x5187, 0x08, 0, 0},
++ {0x5188, 0x08, 0, 0}, {0x5189, 0x7c, 0, 0}, {0x518a, 0x60, 0, 0},
++ {0x518b, 0xb2, 0, 0}, {0x518c, 0xb2, 0, 0}, {0x518d, 0x44, 0, 0},
++ {0x518e, 0x3d, 0, 0}, {0x518f, 0x58, 0, 0}, {0x5190, 0x46, 0, 0},
++ {0x5191, 0xf8, 0, 0}, {0x5192, 0x04, 0, 0}, {0x5193, 0x70, 0, 0},
++ {0x5194, 0xf0, 0, 0}, {0x5195, 0xf0, 0, 0}, {0x5196, 0x03, 0, 0},
++ {0x5197, 0x01, 0, 0}, {0x5198, 0x04, 0, 0}, {0x5199, 0x12, 0, 0},
++ {0x519a, 0x04, 0, 0}, {0x519b, 0x00, 0, 0}, {0x519c, 0x06, 0, 0},
++ {0x519d, 0x82, 0, 0}, {0x519e, 0x00, 0, 0}, {0x5025, 0x80, 0, 0},
++ {0x3a0f, 0x38, 0, 0}, {0x3a10, 0x30, 0, 0}, {0x3a1b, 0x3a, 0, 0},
++ {0x3a1e, 0x2e, 0, 0}, {0x3a11, 0x60, 0, 0}, {0x3a1f, 0x10, 0, 0},
++ {0x5688, 0xa6, 0, 0}, {0x5689, 0x6a, 0, 0}, {0x568a, 0xea, 0, 0},
++ {0x568b, 0xae, 0, 0}, {0x568c, 0xa6, 0, 0}, {0x568d, 0x6a, 0, 0},
++ {0x568e, 0x62, 0, 0}, {0x568f, 0x26, 0, 0}, {0x5583, 0x40, 0, 0},
++ {0x5584, 0x40, 0, 0}, {0x5580, 0x02, 0, 0}, {0x5000, 0xcf, 0, 0},
++ {0x5800, 0x27, 0, 0}, {0x5801, 0x19, 0, 0}, {0x5802, 0x12, 0, 0},
++ {0x5803, 0x0f, 0, 0}, {0x5804, 0x10, 0, 0}, {0x5805, 0x15, 0, 0},
++ {0x5806, 0x1e, 0, 0}, {0x5807, 0x2f, 0, 0}, {0x5808, 0x15, 0, 0},
++ {0x5809, 0x0d, 0, 0}, {0x580a, 0x0a, 0, 0}, {0x580b, 0x09, 0, 0},
++ {0x580c, 0x0a, 0, 0}, {0x580d, 0x0c, 0, 0}, {0x580e, 0x12, 0, 0},
++ {0x580f, 0x19, 0, 0}, {0x5810, 0x0b, 0, 0}, {0x5811, 0x07, 0, 0},
++ {0x5812, 0x04, 0, 0}, {0x5813, 0x03, 0, 0}, {0x5814, 0x03, 0, 0},
++ {0x5815, 0x06, 0, 0}, {0x5816, 0x0a, 0, 0}, {0x5817, 0x0f, 0, 0},
++ {0x5818, 0x0a, 0, 0}, {0x5819, 0x05, 0, 0}, {0x581a, 0x01, 0, 0},
++ {0x581b, 0x00, 0, 0}, {0x581c, 0x00, 0, 0}, {0x581d, 0x03, 0, 0},
++ {0x581e, 0x08, 0, 0}, {0x581f, 0x0c, 0, 0}, {0x5820, 0x0a, 0, 0},
++ {0x5821, 0x05, 0, 0}, {0x5822, 0x01, 0, 0}, {0x5823, 0x00, 0, 0},
++ {0x5824, 0x00, 0, 0}, {0x5825, 0x03, 0, 0}, {0x5826, 0x08, 0, 0},
++ {0x5827, 0x0c, 0, 0}, {0x5828, 0x0e, 0, 0}, {0x5829, 0x08, 0, 0},
++ {0x582a, 0x06, 0, 0}, {0x582b, 0x04, 0, 0}, {0x582c, 0x05, 0, 0},
++ {0x582d, 0x07, 0, 0}, {0x582e, 0x0b, 0, 0}, {0x582f, 0x12, 0, 0},
++ {0x5830, 0x18, 0, 0}, {0x5831, 0x10, 0, 0}, {0x5832, 0x0c, 0, 0},
++ {0x5833, 0x0a, 0, 0}, {0x5834, 0x0b, 0, 0}, {0x5835, 0x0e, 0, 0},
++ {0x5836, 0x15, 0, 0}, {0x5837, 0x19, 0, 0}, {0x5838, 0x32, 0, 0},
++ {0x5839, 0x1f, 0, 0}, {0x583a, 0x18, 0, 0}, {0x583b, 0x16, 0, 0},
++ {0x583c, 0x17, 0, 0}, {0x583d, 0x1e, 0, 0}, {0x583e, 0x26, 0, 0},
++ {0x583f, 0x53, 0, 0}, {0x5840, 0x10, 0, 0}, {0x5841, 0x0f, 0, 0},
++ {0x5842, 0x0d, 0, 0}, {0x5843, 0x0c, 0, 0}, {0x5844, 0x0e, 0, 0},
++ {0x5845, 0x09, 0, 0}, {0x5846, 0x11, 0, 0}, {0x5847, 0x10, 0, 0},
++ {0x5848, 0x10, 0, 0}, {0x5849, 0x10, 0, 0}, {0x584a, 0x10, 0, 0},
++ {0x584b, 0x0e, 0, 0}, {0x584c, 0x10, 0, 0}, {0x584d, 0x10, 0, 0},
++ {0x584e, 0x11, 0, 0}, {0x584f, 0x10, 0, 0}, {0x5850, 0x0f, 0, 0},
++ {0x5851, 0x0c, 0, 0}, {0x5852, 0x0f, 0, 0}, {0x5853, 0x10, 0, 0},
++ {0x5854, 0x10, 0, 0}, {0x5855, 0x0f, 0, 0}, {0x5856, 0x0e, 0, 0},
++ {0x5857, 0x0b, 0, 0}, {0x5858, 0x10, 0, 0}, {0x5859, 0x0d, 0, 0},
++ {0x585a, 0x0d, 0, 0}, {0x585b, 0x0c, 0, 0}, {0x585c, 0x0c, 0, 0},
++ {0x585d, 0x0c, 0, 0}, {0x585e, 0x0b, 0, 0}, {0x585f, 0x0c, 0, 0},
++ {0x5860, 0x0c, 0, 0}, {0x5861, 0x0c, 0, 0}, {0x5862, 0x0d, 0, 0},
++ {0x5863, 0x08, 0, 0}, {0x5864, 0x11, 0, 0}, {0x5865, 0x18, 0, 0},
++ {0x5866, 0x18, 0, 0}, {0x5867, 0x19, 0, 0}, {0x5868, 0x17, 0, 0},
++ {0x5869, 0x19, 0, 0}, {0x586a, 0x16, 0, 0}, {0x586b, 0x13, 0, 0},
++ {0x586c, 0x13, 0, 0}, {0x586d, 0x12, 0, 0}, {0x586e, 0x13, 0, 0},
++ {0x586f, 0x16, 0, 0}, {0x5870, 0x14, 0, 0}, {0x5871, 0x12, 0, 0},
++ {0x5872, 0x10, 0, 0}, {0x5873, 0x11, 0, 0}, {0x5874, 0x11, 0, 0},
++ {0x5875, 0x16, 0, 0}, {0x5876, 0x14, 0, 0}, {0x5877, 0x11, 0, 0},
++ {0x5878, 0x10, 0, 0}, {0x5879, 0x0f, 0, 0}, {0x587a, 0x10, 0, 0},
++ {0x587b, 0x14, 0, 0}, {0x587c, 0x13, 0, 0}, {0x587d, 0x12, 0, 0},
++ {0x587e, 0x11, 0, 0}, {0x587f, 0x11, 0, 0}, {0x5880, 0x12, 0, 0},
++ {0x5881, 0x15, 0, 0}, {0x5882, 0x14, 0, 0}, {0x5883, 0x15, 0, 0},
++ {0x5884, 0x15, 0, 0}, {0x5885, 0x15, 0, 0}, {0x5886, 0x13, 0, 0},
++ {0x5887, 0x17, 0, 0}, {0x3710, 0x10, 0, 0}, {0x3632, 0x51, 0, 0},
++ {0x3702, 0x10, 0, 0}, {0x3703, 0xb2, 0, 0}, {0x3704, 0x18, 0, 0},
++ {0x370b, 0x40, 0, 0}, {0x370d, 0x03, 0, 0}, {0x3631, 0x01, 0, 0},
++ {0x3632, 0x52, 0, 0}, {0x3606, 0x24, 0, 0}, {0x3620, 0x96, 0, 0},
++ {0x5785, 0x07, 0, 0}, {0x3a13, 0x30, 0, 0}, {0x3600, 0x52, 0, 0},
++ {0x3604, 0x48, 0, 0}, {0x3606, 0x1b, 0, 0}, {0x370d, 0x0b, 0, 0},
++ {0x370f, 0xc0, 0, 0}, {0x3709, 0x01, 0, 0}, {0x3823, 0x00, 0, 0},
++ {0x5007, 0x00, 0, 0}, {0x5009, 0x00, 0, 0}, {0x5011, 0x00, 0, 0},
++ {0x5013, 0x00, 0, 0}, {0x519e, 0x00, 0, 0}, {0x5086, 0x00, 0, 0},
++ {0x5087, 0x00, 0, 0}, {0x5088, 0x00, 0, 0}, {0x5089, 0x00, 0, 0},
++ {0x302b, 0x00, 0, 0}, {0x3621, 0x87, 0, 0}, {0x3a00, 0x78, 0, 0},
++ {0x302c, 0x60, 0x60, 0},
++};
++
++static struct reg_value ov5642_setting_15fps_720P_1280_720[] = {
++ {0x3103, 0x93, 0, 0}, {0x3008, 0x82, 0, 0}, {0x3017, 0x7f, 0, 0},
++ {0x3018, 0xfc, 0, 0}, {0x3810, 0xc2, 0, 0}, {0x3615, 0xf0, 0, 0},
++ {0x3000, 0x00, 0, 0}, {0x3001, 0x00, 0, 0}, {0x3002, 0x00, 0, 0},
++ {0x3003, 0x00, 0, 0}, {0x3004, 0xff, 0, 0}, {0x3030, 0x2b, 0, 0},
++ {0x3011, 0x08, 0, 0}, {0x3010, 0x10, 0, 0}, {0x3604, 0x60, 0, 0},
++ {0x3622, 0x60, 0, 0}, {0x3621, 0x09, 0, 0}, {0x3709, 0x00, 0, 0},
++ {0x4000, 0x21, 0, 0}, {0x401d, 0x22, 0, 0}, {0x3600, 0x54, 0, 0},
++ {0x3605, 0x04, 0, 0}, {0x3606, 0x3f, 0, 0}, {0x3c01, 0x80, 0, 0},
++ {0x300d, 0x22, 0, 0}, {0x3623, 0x22, 0, 0}, {0x5000, 0x4f, 0, 0},
++ {0x5020, 0x04, 0, 0}, {0x5181, 0x79, 0, 0}, {0x5182, 0x00, 0, 0},
++ {0x5185, 0x22, 0, 0}, {0x5197, 0x01, 0, 0}, {0x5500, 0x0a, 0, 0},
++ {0x5504, 0x00, 0, 0}, {0x5505, 0x7f, 0, 0}, {0x5080, 0x08, 0, 0},
++ {0x300e, 0x18, 0, 0}, {0x4610, 0x00, 0, 0}, {0x471d, 0x05, 0, 0},
++ {0x4708, 0x06, 0, 0}, {0x370c, 0xa0, 0, 0}, {0x3808, 0x0a, 0, 0},
++ {0x3809, 0x20, 0, 0}, {0x380a, 0x07, 0, 0}, {0x380b, 0x98, 0, 0},
++ {0x380c, 0x0c, 0, 0}, {0x380d, 0x80, 0, 0}, {0x380e, 0x07, 0, 0},
++ {0x380f, 0xd0, 0, 0}, {0x5687, 0x94, 0, 0}, {0x501f, 0x00, 0, 0},
++ {0x5000, 0x4f, 0, 0}, {0x5001, 0xcf, 0, 0}, {0x4300, 0x30, 0, 0},
++ {0x4300, 0x30, 0, 0}, {0x460b, 0x35, 0, 0}, {0x471d, 0x00, 0, 0},
++ {0x3002, 0x0c, 0, 0}, {0x3002, 0x00, 0, 0}, {0x4713, 0x03, 0, 0},
++ {0x471c, 0x50, 0, 0}, {0x4721, 0x02, 0, 0}, {0x4402, 0x90, 0, 0},
++ {0x460c, 0x22, 0, 0}, {0x3815, 0x44, 0, 0}, {0x3503, 0x07, 0, 0},
++ {0x3501, 0x73, 0, 0}, {0x3502, 0x80, 0, 0}, {0x350b, 0x00, 0, 0},
++ {0x3818, 0xc8, 0, 0}, {0x3801, 0x88, 0, 0}, {0x3824, 0x11, 0, 0},
++ {0x3a00, 0x78, 0, 0}, {0x3a1a, 0x04, 0, 0}, {0x3a13, 0x30, 0, 0},
++ {0x3a18, 0x00, 0, 0}, {0x3a19, 0x7c, 0, 0}, {0x3a08, 0x12, 0, 0},
++ {0x3a09, 0xc0, 0, 0}, {0x3a0a, 0x0f, 0, 0}, {0x3a0b, 0xa0, 0, 0},
++ {0x350c, 0x07, 0, 0}, {0x350d, 0xd0, 0, 0}, {0x3a0d, 0x08, 0, 0},
++ {0x3a0e, 0x06, 0, 0}, {0x3500, 0x00, 0, 0}, {0x3501, 0x00, 0, 0},
++ {0x3502, 0x00, 0, 0}, {0x350a, 0x00, 0, 0}, {0x350b, 0x00, 0, 0},
++ {0x3503, 0x00, 0, 0}, {0x3a0f, 0x3c, 0, 0}, {0x3a10, 0x32, 0, 0},
++ {0x3a1b, 0x3c, 0, 0}, {0x3a1e, 0x32, 0, 0}, {0x3a11, 0x80, 0, 0},
++ {0x3a1f, 0x20, 0, 0}, {0x3030, 0x2b, 0, 0}, {0x3a02, 0x00, 0, 0},
++ {0x3a03, 0x7d, 0, 0}, {0x3a04, 0x00, 0, 0}, {0x3a14, 0x00, 0, 0},
++ {0x3a15, 0x7d, 0, 0}, {0x3a16, 0x00, 0, 0}, {0x3a00, 0x78, 0, 0},
++ {0x3a08, 0x09, 0, 0}, {0x3a09, 0x60, 0, 0}, {0x3a0a, 0x07, 0, 0},
++ {0x3a0b, 0xd0, 0, 0}, {0x3a0d, 0x10, 0, 0}, {0x3a0e, 0x0d, 0, 0},
++ {0x4407, 0x04, 0, 0}, {0x5193, 0x70, 0, 0}, {0x589b, 0x00, 0, 0},
++ {0x589a, 0xc0, 0, 0}, {0x401e, 0x20, 0, 0}, {0x4001, 0x42, 0, 0},
++ {0x401c, 0x06, 0, 0}, {0x3825, 0xac, 0, 0}, {0x3827, 0x0c, 0, 0},
++ {0x528a, 0x01, 0, 0}, {0x528b, 0x04, 0, 0}, {0x528c, 0x08, 0, 0},
++ {0x528d, 0x10, 0, 0}, {0x528e, 0x20, 0, 0}, {0x528f, 0x28, 0, 0},
++ {0x5290, 0x30, 0, 0}, {0x5292, 0x00, 0, 0}, {0x5293, 0x01, 0, 0},
++ {0x5294, 0x00, 0, 0}, {0x5295, 0x04, 0, 0}, {0x5296, 0x00, 0, 0},
++ {0x5297, 0x08, 0, 0}, {0x5298, 0x00, 0, 0}, {0x5299, 0x10, 0, 0},
++ {0x529a, 0x00, 0, 0}, {0x529b, 0x20, 0, 0}, {0x529c, 0x00, 0, 0},
++ {0x529d, 0x28, 0, 0}, {0x529e, 0x00, 0, 0}, {0x529f, 0x30, 0, 0},
++ {0x5282, 0x00, 0, 0}, {0x5300, 0x00, 0, 0}, {0x5301, 0x20, 0, 0},
++ {0x5302, 0x00, 0, 0}, {0x5303, 0x7c, 0, 0}, {0x530c, 0x00, 0, 0},
++ {0x530d, 0x0c, 0, 0}, {0x530e, 0x20, 0, 0}, {0x530f, 0x80, 0, 0},
++ {0x5310, 0x20, 0, 0}, {0x5311, 0x80, 0, 0}, {0x5308, 0x20, 0, 0},
++ {0x5309, 0x40, 0, 0}, {0x5304, 0x00, 0, 0}, {0x5305, 0x30, 0, 0},
++ {0x5306, 0x00, 0, 0}, {0x5307, 0x80, 0, 0}, {0x5314, 0x08, 0, 0},
++ {0x5315, 0x20, 0, 0}, {0x5319, 0x30, 0, 0}, {0x5316, 0x10, 0, 0},
++ {0x5317, 0x00, 0, 0}, {0x5318, 0x02, 0, 0}, {0x5380, 0x01, 0, 0},
++ {0x5381, 0x00, 0, 0}, {0x5382, 0x00, 0, 0}, {0x5383, 0x4e, 0, 0},
++ {0x5384, 0x00, 0, 0}, {0x5385, 0x0f, 0, 0}, {0x5386, 0x00, 0, 0},
++ {0x5387, 0x00, 0, 0}, {0x5388, 0x01, 0, 0}, {0x5389, 0x15, 0, 0},
++ {0x538a, 0x00, 0, 0}, {0x538b, 0x31, 0, 0}, {0x538c, 0x00, 0, 0},
++ {0x538d, 0x00, 0, 0}, {0x538e, 0x00, 0, 0}, {0x538f, 0x0f, 0, 0},
++ {0x5390, 0x00, 0, 0}, {0x5391, 0xab, 0, 0}, {0x5392, 0x00, 0, 0},
++ {0x5393, 0xa2, 0, 0}, {0x5394, 0x08, 0, 0}, {0x5480, 0x14, 0, 0},
++ {0x5481, 0x21, 0, 0}, {0x5482, 0x36, 0, 0}, {0x5483, 0x57, 0, 0},
++ {0x5484, 0x65, 0, 0}, {0x5485, 0x71, 0, 0}, {0x5486, 0x7d, 0, 0},
++ {0x5487, 0x87, 0, 0}, {0x5488, 0x91, 0, 0}, {0x5489, 0x9a, 0, 0},
++ {0x548a, 0xaa, 0, 0}, {0x548b, 0xb8, 0, 0}, {0x548c, 0xcd, 0, 0},
++ {0x548d, 0xdd, 0, 0}, {0x548e, 0xea, 0, 0}, {0x548f, 0x1d, 0, 0},
++ {0x5490, 0x05, 0, 0}, {0x5491, 0x00, 0, 0}, {0x5492, 0x04, 0, 0},
++ {0x5493, 0x20, 0, 0}, {0x5494, 0x03, 0, 0}, {0x5495, 0x60, 0, 0},
++ {0x5496, 0x02, 0, 0}, {0x5497, 0xb8, 0, 0}, {0x5498, 0x02, 0, 0},
++ {0x5499, 0x86, 0, 0}, {0x549a, 0x02, 0, 0}, {0x549b, 0x5b, 0, 0},
++ {0x549c, 0x02, 0, 0}, {0x549d, 0x3b, 0, 0}, {0x549e, 0x02, 0, 0},
++ {0x549f, 0x1c, 0, 0}, {0x54a0, 0x02, 0, 0}, {0x54a1, 0x04, 0, 0},
++ {0x54a2, 0x01, 0, 0}, {0x54a3, 0xed, 0, 0}, {0x54a4, 0x01, 0, 0},
++ {0x54a5, 0xc5, 0, 0}, {0x54a6, 0x01, 0, 0}, {0x54a7, 0xa5, 0, 0},
++ {0x54a8, 0x01, 0, 0}, {0x54a9, 0x6c, 0, 0}, {0x54aa, 0x01, 0, 0},
++ {0x54ab, 0x41, 0, 0}, {0x54ac, 0x01, 0, 0}, {0x54ad, 0x20, 0, 0},
++ {0x54ae, 0x00, 0, 0}, {0x54af, 0x16, 0, 0}, {0x54b0, 0x01, 0, 0},
++ {0x54b1, 0x20, 0, 0}, {0x54b2, 0x00, 0, 0}, {0x54b3, 0x10, 0, 0},
++ {0x54b4, 0x00, 0, 0}, {0x54b5, 0xf0, 0, 0}, {0x54b6, 0x00, 0, 0},
++ {0x54b7, 0xdf, 0, 0}, {0x5402, 0x3f, 0, 0}, {0x5403, 0x00, 0, 0},
++ {0x3406, 0x00, 0, 0}, {0x5180, 0xff, 0, 0}, {0x5181, 0x52, 0, 0},
++ {0x5182, 0x11, 0, 0}, {0x5183, 0x14, 0, 0}, {0x5184, 0x25, 0, 0},
++ {0x5185, 0x24, 0, 0}, {0x5186, 0x06, 0, 0}, {0x5187, 0x08, 0, 0},
++ {0x5188, 0x08, 0, 0}, {0x5189, 0x7c, 0, 0}, {0x518a, 0x60, 0, 0},
++ {0x518b, 0xb2, 0, 0}, {0x518c, 0xb2, 0, 0}, {0x518d, 0x44, 0, 0},
++ {0x518e, 0x3d, 0, 0}, {0x518f, 0x58, 0, 0}, {0x5190, 0x46, 0, 0},
++ {0x5191, 0xf8, 0, 0}, {0x5192, 0x04, 0, 0}, {0x5193, 0x70, 0, 0},
++ {0x5194, 0xf0, 0, 0}, {0x5195, 0xf0, 0, 0}, {0x5196, 0x03, 0, 0},
++ {0x5197, 0x01, 0, 0}, {0x5198, 0x04, 0, 0}, {0x5199, 0x12, 0, 0},
++ {0x519a, 0x04, 0, 0}, {0x519b, 0x00, 0, 0}, {0x519c, 0x06, 0, 0},
++ {0x519d, 0x82, 0, 0}, {0x519e, 0x00, 0, 0}, {0x5025, 0x80, 0, 0},
++ {0x3a0f, 0x38, 0, 0}, {0x3a10, 0x30, 0, 0}, {0x3a1b, 0x3a, 0, 0},
++ {0x3a1e, 0x2e, 0, 0}, {0x3a11, 0x60, 0, 0}, {0x3a1f, 0x10, 0, 0},
++ {0x5688, 0xa6, 0, 0}, {0x5689, 0x6a, 0, 0}, {0x568a, 0xea, 0, 0},
++ {0x568b, 0xae, 0, 0}, {0x568c, 0xa6, 0, 0}, {0x568d, 0x6a, 0, 0},
++ {0x568e, 0x62, 0, 0}, {0x568f, 0x26, 0, 0}, {0x5583, 0x40, 0, 0},
++ {0x5584, 0x40, 0, 0}, {0x5580, 0x02, 0, 0}, {0x5000, 0xcf, 0, 0},
++ {0x5800, 0x27, 0, 0}, {0x5801, 0x19, 0, 0}, {0x5802, 0x12, 0, 0},
++ {0x5803, 0x0f, 0, 0}, {0x5804, 0x10, 0, 0}, {0x5805, 0x15, 0, 0},
++ {0x5806, 0x1e, 0, 0}, {0x5807, 0x2f, 0, 0}, {0x5808, 0x15, 0, 0},
++ {0x5809, 0x0d, 0, 0}, {0x580a, 0x0a, 0, 0}, {0x580b, 0x09, 0, 0},
++ {0x580c, 0x0a, 0, 0}, {0x580d, 0x0c, 0, 0}, {0x580e, 0x12, 0, 0},
++ {0x580f, 0x19, 0, 0}, {0x5810, 0x0b, 0, 0}, {0x5811, 0x07, 0, 0},
++ {0x5812, 0x04, 0, 0}, {0x5813, 0x03, 0, 0}, {0x5814, 0x03, 0, 0},
++ {0x5815, 0x06, 0, 0}, {0x5816, 0x0a, 0, 0}, {0x5817, 0x0f, 0, 0},
++ {0x5818, 0x0a, 0, 0}, {0x5819, 0x05, 0, 0}, {0x581a, 0x01, 0, 0},
++ {0x581b, 0x00, 0, 0}, {0x581c, 0x00, 0, 0}, {0x581d, 0x03, 0, 0},
++ {0x581e, 0x08, 0, 0}, {0x581f, 0x0c, 0, 0}, {0x5820, 0x0a, 0, 0},
++ {0x5821, 0x05, 0, 0}, {0x5822, 0x01, 0, 0}, {0x5823, 0x00, 0, 0},
++ {0x5824, 0x00, 0, 0}, {0x5825, 0x03, 0, 0}, {0x5826, 0x08, 0, 0},
++ {0x5827, 0x0c, 0, 0}, {0x5828, 0x0e, 0, 0}, {0x5829, 0x08, 0, 0},
++ {0x582a, 0x06, 0, 0}, {0x582b, 0x04, 0, 0}, {0x582c, 0x05, 0, 0},
++ {0x582d, 0x07, 0, 0}, {0x582e, 0x0b, 0, 0}, {0x582f, 0x12, 0, 0},
++ {0x5830, 0x18, 0, 0}, {0x5831, 0x10, 0, 0}, {0x5832, 0x0c, 0, 0},
++ {0x5833, 0x0a, 0, 0}, {0x5834, 0x0b, 0, 0}, {0x5835, 0x0e, 0, 0},
++ {0x5836, 0x15, 0, 0}, {0x5837, 0x19, 0, 0}, {0x5838, 0x32, 0, 0},
++ {0x5839, 0x1f, 0, 0}, {0x583a, 0x18, 0, 0}, {0x583b, 0x16, 0, 0},
++ {0x583c, 0x17, 0, 0}, {0x583d, 0x1e, 0, 0}, {0x583e, 0x26, 0, 0},
++ {0x583f, 0x53, 0, 0}, {0x5840, 0x10, 0, 0}, {0x5841, 0x0f, 0, 0},
++ {0x5842, 0x0d, 0, 0}, {0x5843, 0x0c, 0, 0}, {0x5844, 0x0e, 0, 0},
++ {0x5845, 0x09, 0, 0}, {0x5846, 0x11, 0, 0}, {0x5847, 0x10, 0, 0},
++ {0x5848, 0x10, 0, 0}, {0x5849, 0x10, 0, 0}, {0x584a, 0x10, 0, 0},
++ {0x584b, 0x0e, 0, 0}, {0x584c, 0x10, 0, 0}, {0x584d, 0x10, 0, 0},
++ {0x584e, 0x11, 0, 0}, {0x584f, 0x10, 0, 0}, {0x5850, 0x0f, 0, 0},
++ {0x5851, 0x0c, 0, 0}, {0x5852, 0x0f, 0, 0}, {0x5853, 0x10, 0, 0},
++ {0x5854, 0x10, 0, 0}, {0x5855, 0x0f, 0, 0}, {0x5856, 0x0e, 0, 0},
++ {0x5857, 0x0b, 0, 0}, {0x5858, 0x10, 0, 0}, {0x5859, 0x0d, 0, 0},
++ {0x585a, 0x0d, 0, 0}, {0x585b, 0x0c, 0, 0}, {0x585c, 0x0c, 0, 0},
++ {0x585d, 0x0c, 0, 0}, {0x585e, 0x0b, 0, 0}, {0x585f, 0x0c, 0, 0},
++ {0x5860, 0x0c, 0, 0}, {0x5861, 0x0c, 0, 0}, {0x5862, 0x0d, 0, 0},
++ {0x5863, 0x08, 0, 0}, {0x5864, 0x11, 0, 0}, {0x5865, 0x18, 0, 0},
++ {0x5866, 0x18, 0, 0}, {0x5867, 0x19, 0, 0}, {0x5868, 0x17, 0, 0},
++ {0x5869, 0x19, 0, 0}, {0x586a, 0x16, 0, 0}, {0x586b, 0x13, 0, 0},
++ {0x586c, 0x13, 0, 0}, {0x586d, 0x12, 0, 0}, {0x586e, 0x13, 0, 0},
++ {0x586f, 0x16, 0, 0}, {0x5870, 0x14, 0, 0}, {0x5871, 0x12, 0, 0},
++ {0x5872, 0x10, 0, 0}, {0x5873, 0x11, 0, 0}, {0x5874, 0x11, 0, 0},
++ {0x5875, 0x16, 0, 0}, {0x5876, 0x14, 0, 0}, {0x5877, 0x11, 0, 0},
++ {0x5878, 0x10, 0, 0}, {0x5879, 0x0f, 0, 0}, {0x587a, 0x10, 0, 0},
++ {0x587b, 0x14, 0, 0}, {0x587c, 0x13, 0, 0}, {0x587d, 0x12, 0, 0},
++ {0x587e, 0x11, 0, 0}, {0x587f, 0x11, 0, 0}, {0x5880, 0x12, 0, 0},
++ {0x5881, 0x15, 0, 0}, {0x5882, 0x14, 0, 0}, {0x5883, 0x15, 0, 0},
++ {0x5884, 0x15, 0, 0}, {0x5885, 0x15, 0, 0}, {0x5886, 0x13, 0, 0},
++ {0x5887, 0x17, 0, 0}, {0x3710, 0x10, 0, 0}, {0x3632, 0x51, 0, 0},
++ {0x3702, 0x10, 0, 0}, {0x3703, 0xb2, 0, 0}, {0x3704, 0x18, 0, 0},
++ {0x370b, 0x40, 0, 0}, {0x370d, 0x03, 0, 0}, {0x3631, 0x01, 0, 0},
++ {0x3632, 0x52, 0, 0}, {0x3606, 0x24, 0, 0}, {0x3620, 0x96, 0, 0},
++ {0x5785, 0x07, 0, 0}, {0x3a13, 0x30, 0, 0}, {0x3600, 0x52, 0, 0},
++ {0x3604, 0x48, 0, 0}, {0x3606, 0x1b, 0, 0}, {0x370d, 0x0b, 0, 0},
++ {0x370f, 0xc0, 0, 0}, {0x3709, 0x01, 0, 0}, {0x3823, 0x00, 0, 0},
++ {0x5007, 0x00, 0, 0}, {0x5009, 0x00, 0, 0}, {0x5011, 0x00, 0, 0},
++ {0x5013, 0x00, 0, 0}, {0x519e, 0x00, 0, 0}, {0x5086, 0x00, 0, 0},
++ {0x5087, 0x00, 0, 0}, {0x5088, 0x00, 0, 0}, {0x5089, 0x00, 0, 0},
++ {0x302b, 0x00, 0, 0}, {0x3503, 0x07, 0, 0}, {0x3011, 0x08, 0, 0},
++ {0x350c, 0x02, 0, 0}, {0x350d, 0xe4, 0, 0}, {0x3621, 0xc9, 0, 0},
++ {0x370a, 0x81, 0, 0}, {0x3803, 0x08, 0, 0}, {0x3804, 0x05, 0, 0},
++ {0x3805, 0x00, 0, 0}, {0x3806, 0x02, 0, 0}, {0x3807, 0xd0, 0, 0},
++ {0x3808, 0x05, 0, 0}, {0x3809, 0x00, 0, 0}, {0x380a, 0x02, 0, 0},
++ {0x380b, 0xd0, 0, 0}, {0x380c, 0x08, 0, 0}, {0x380d, 0x72, 0, 0},
++ {0x380e, 0x02, 0, 0}, {0x380f, 0xe4, 0, 0}, {0x3810, 0xc0, 0, 0},
++ {0x3818, 0xc9, 0, 0}, {0x381c, 0x10, 0, 0}, {0x381d, 0xa0, 0, 0},
++ {0x381e, 0x05, 0, 0}, {0x381f, 0xb0, 0, 0}, {0x3820, 0x00, 0, 0},
++ {0x3821, 0x00, 0, 0}, {0x3824, 0x11, 0, 0}, {0x3a08, 0x1b, 0, 0},
++ {0x3a09, 0xc0, 0, 0}, {0x3a0a, 0x17, 0, 0}, {0x3a0b, 0x20, 0, 0},
++ {0x3a0d, 0x02, 0, 0}, {0x3a0e, 0x01, 0, 0}, {0x401c, 0x04, 0, 0},
++ {0x5682, 0x05, 0, 0}, {0x5683, 0x00, 0, 0}, {0x5686, 0x02, 0, 0},
++ {0x5687, 0xcc, 0, 0}, {0x5001, 0x7f, 0, 0}, {0x589b, 0x06, 0, 0},
++ {0x589a, 0xc5, 0, 0}, {0x3503, 0x00, 0, 0}, {0x3010, 0x10, 0, 0},
++ {0x460c, 0x20, 0, 0}, {0x460b, 0x37, 0, 0}, {0x471c, 0xd0, 0, 0},
++ {0x471d, 0x05, 0, 0}, {0x3815, 0x01, 0, 0}, {0x3818, 0x00, 0x08, 0},
++ {0x501f, 0x00, 0, 0}, {0x4300, 0x30, 0, 0}, {0x3002, 0x1c, 0, 0},
++ {0x3819, 0x80, 0, 0}, {0x5002, 0xe0, 0, 0}, {0x3010, 0x30, 0, 0},
++ {0x3a08, 0x06, 0, 0}, {0x3a09, 0x60, 0, 0}, {0x3a0a, 0x05, 0, 0},
++ {0x3a0b, 0x50, 0, 0}, {0x3a0d, 0x08, 0, 0}, {0x3a0e, 0x07, 0, 0},
++};
++
++static struct reg_value ov5642_setting_30fps_720P_1280_720[] = {
++ {0x3103, 0x93, 0, 0}, {0x3008, 0x82, 0, 0}, {0x3017, 0x7f, 0, 0},
++ {0x3018, 0xfc, 0, 0}, {0x3810, 0xc2, 0, 0}, {0x3615, 0xf0, 0, 0},
++ {0x3000, 0x00, 0, 0}, {0x3001, 0x00, 0, 0}, {0x3002, 0x00, 0, 0},
++ {0x3003, 0x00, 0, 0}, {0x3004, 0xff, 0, 0}, {0x3030, 0x2b, 0, 0},
++ {0x3011, 0x08, 0, 0}, {0x3010, 0x10, 0, 0}, {0x3604, 0x60, 0, 0},
++ {0x3622, 0x60, 0, 0}, {0x3621, 0x09, 0, 0}, {0x3709, 0x00, 0, 0},
++ {0x4000, 0x21, 0, 0}, {0x401d, 0x22, 0, 0}, {0x3600, 0x54, 0, 0},
++ {0x3605, 0x04, 0, 0}, {0x3606, 0x3f, 0, 0}, {0x3c01, 0x80, 0, 0},
++ {0x300d, 0x22, 0, 0}, {0x3623, 0x22, 0, 0}, {0x5000, 0x4f, 0, 0},
++ {0x5020, 0x04, 0, 0}, {0x5181, 0x79, 0, 0}, {0x5182, 0x00, 0, 0},
++ {0x5185, 0x22, 0, 0}, {0x5197, 0x01, 0, 0}, {0x5500, 0x0a, 0, 0},
++ {0x5504, 0x00, 0, 0}, {0x5505, 0x7f, 0, 0}, {0x5080, 0x08, 0, 0},
++ {0x300e, 0x18, 0, 0}, {0x4610, 0x00, 0, 0}, {0x471d, 0x05, 0, 0},
++ {0x4708, 0x06, 0, 0}, {0x370c, 0xa0, 0, 0}, {0x3808, 0x0a, 0, 0},
++ {0x3809, 0x20, 0, 0}, {0x380a, 0x07, 0, 0}, {0x380b, 0x98, 0, 0},
++ {0x380c, 0x0c, 0, 0}, {0x380d, 0x80, 0, 0}, {0x380e, 0x07, 0, 0},
++ {0x380f, 0xd0, 0, 0}, {0x5687, 0x94, 0, 0}, {0x501f, 0x00, 0, 0},
++ {0x5000, 0x4f, 0, 0}, {0x5001, 0xcf, 0, 0}, {0x4300, 0x30, 0, 0},
++ {0x4300, 0x30, 0, 0}, {0x460b, 0x35, 0, 0}, {0x471d, 0x00, 0, 0},
++ {0x3002, 0x0c, 0, 0}, {0x3002, 0x00, 0, 0}, {0x4713, 0x03, 0, 0},
++ {0x471c, 0x50, 0, 0}, {0x4721, 0x02, 0, 0}, {0x4402, 0x90, 0, 0},
++ {0x460c, 0x22, 0, 0}, {0x3815, 0x44, 0, 0}, {0x3503, 0x07, 0, 0},
++ {0x3501, 0x73, 0, 0}, {0x3502, 0x80, 0, 0}, {0x350b, 0x00, 0, 0},
++ {0x3818, 0xc8, 0, 0}, {0x3801, 0x88, 0, 0}, {0x3824, 0x11, 0, 0},
++ {0x3a00, 0x78, 0, 0}, {0x3a1a, 0x04, 0, 0}, {0x3a13, 0x30, 0, 0},
++ {0x3a18, 0x00, 0, 0}, {0x3a19, 0x7c, 0, 0}, {0x3a08, 0x12, 0, 0},
++ {0x3a09, 0xc0, 0, 0}, {0x3a0a, 0x0f, 0, 0}, {0x3a0b, 0xa0, 0, 0},
++ {0x350c, 0x07, 0, 0}, {0x350d, 0xd0, 0, 0}, {0x3a0d, 0x08, 0, 0},
++ {0x3a0e, 0x06, 0, 0}, {0x3500, 0x00, 0, 0}, {0x3501, 0x00, 0, 0},
++ {0x3502, 0x00, 0, 0}, {0x350a, 0x00, 0, 0}, {0x350b, 0x00, 0, 0},
++ {0x3503, 0x00, 0, 0}, {0x3a0f, 0x3c, 0, 0}, {0x3a10, 0x32, 0, 0},
++ {0x3a1b, 0x3c, 0, 0}, {0x3a1e, 0x32, 0, 0}, {0x3a11, 0x80, 0, 0},
++ {0x3a1f, 0x20, 0, 0}, {0x3030, 0x2b, 0, 0}, {0x3a02, 0x00, 0, 0},
++ {0x3a03, 0x7d, 0, 0}, {0x3a04, 0x00, 0, 0}, {0x3a14, 0x00, 0, 0},
++ {0x3a15, 0x7d, 0, 0}, {0x3a16, 0x00, 0, 0}, {0x3a00, 0x78, 0, 0},
++ {0x3a08, 0x09, 0, 0}, {0x3a09, 0x60, 0, 0}, {0x3a0a, 0x07, 0, 0},
++ {0x3a0b, 0xd0, 0, 0}, {0x3a0d, 0x10, 0, 0}, {0x3a0e, 0x0d, 0, 0},
++ {0x4407, 0x04, 0, 0}, {0x5193, 0x70, 0, 0}, {0x589b, 0x00, 0, 0},
++ {0x589a, 0xc0, 0, 0}, {0x401e, 0x20, 0, 0}, {0x4001, 0x42, 0, 0},
++ {0x401c, 0x06, 0, 0}, {0x3825, 0xac, 0, 0}, {0x3827, 0x0c, 0, 0},
++ {0x528a, 0x01, 0, 0}, {0x528b, 0x04, 0, 0}, {0x528c, 0x08, 0, 0},
++ {0x528d, 0x10, 0, 0}, {0x528e, 0x20, 0, 0}, {0x528f, 0x28, 0, 0},
++ {0x5290, 0x30, 0, 0}, {0x5292, 0x00, 0, 0}, {0x5293, 0x01, 0, 0},
++ {0x5294, 0x00, 0, 0}, {0x5295, 0x04, 0, 0}, {0x5296, 0x00, 0, 0},
++ {0x5297, 0x08, 0, 0}, {0x5298, 0x00, 0, 0}, {0x5299, 0x10, 0, 0},
++ {0x529a, 0x00, 0, 0}, {0x529b, 0x20, 0, 0}, {0x529c, 0x00, 0, 0},
++ {0x529d, 0x28, 0, 0}, {0x529e, 0x00, 0, 0}, {0x529f, 0x30, 0, 0},
++ {0x5282, 0x00, 0, 0}, {0x5300, 0x00, 0, 0}, {0x5301, 0x20, 0, 0},
++ {0x5302, 0x00, 0, 0}, {0x5303, 0x7c, 0, 0}, {0x530c, 0x00, 0, 0},
++ {0x530d, 0x0c, 0, 0}, {0x530e, 0x20, 0, 0}, {0x530f, 0x80, 0, 0},
++ {0x5310, 0x20, 0, 0}, {0x5311, 0x80, 0, 0}, {0x5308, 0x20, 0, 0},
++ {0x5309, 0x40, 0, 0}, {0x5304, 0x00, 0, 0}, {0x5305, 0x30, 0, 0},
++ {0x5306, 0x00, 0, 0}, {0x5307, 0x80, 0, 0}, {0x5314, 0x08, 0, 0},
++ {0x5315, 0x20, 0, 0}, {0x5319, 0x30, 0, 0}, {0x5316, 0x10, 0, 0},
++ {0x5317, 0x00, 0, 0}, {0x5318, 0x02, 0, 0}, {0x5380, 0x01, 0, 0},
++ {0x5381, 0x00, 0, 0}, {0x5382, 0x00, 0, 0}, {0x5383, 0x4e, 0, 0},
++ {0x5384, 0x00, 0, 0}, {0x5385, 0x0f, 0, 0}, {0x5386, 0x00, 0, 0},
++ {0x5387, 0x00, 0, 0}, {0x5388, 0x01, 0, 0}, {0x5389, 0x15, 0, 0},
++ {0x538a, 0x00, 0, 0}, {0x538b, 0x31, 0, 0}, {0x538c, 0x00, 0, 0},
++ {0x538d, 0x00, 0, 0}, {0x538e, 0x00, 0, 0}, {0x538f, 0x0f, 0, 0},
++ {0x5390, 0x00, 0, 0}, {0x5391, 0xab, 0, 0}, {0x5392, 0x00, 0, 0},
++ {0x5393, 0xa2, 0, 0}, {0x5394, 0x08, 0, 0}, {0x5480, 0x14, 0, 0},
++ {0x5481, 0x21, 0, 0}, {0x5482, 0x36, 0, 0}, {0x5483, 0x57, 0, 0},
++ {0x5484, 0x65, 0, 0}, {0x5485, 0x71, 0, 0}, {0x5486, 0x7d, 0, 0},
++ {0x5487, 0x87, 0, 0}, {0x5488, 0x91, 0, 0}, {0x5489, 0x9a, 0, 0},
++ {0x548a, 0xaa, 0, 0}, {0x548b, 0xb8, 0, 0}, {0x548c, 0xcd, 0, 0},
++ {0x548d, 0xdd, 0, 0}, {0x548e, 0xea, 0, 0}, {0x548f, 0x1d, 0, 0},
++ {0x5490, 0x05, 0, 0}, {0x5491, 0x00, 0, 0}, {0x5492, 0x04, 0, 0},
++ {0x5493, 0x20, 0, 0}, {0x5494, 0x03, 0, 0}, {0x5495, 0x60, 0, 0},
++ {0x5496, 0x02, 0, 0}, {0x5497, 0xb8, 0, 0}, {0x5498, 0x02, 0, 0},
++ {0x5499, 0x86, 0, 0}, {0x549a, 0x02, 0, 0}, {0x549b, 0x5b, 0, 0},
++ {0x549c, 0x02, 0, 0}, {0x549d, 0x3b, 0, 0}, {0x549e, 0x02, 0, 0},
++ {0x549f, 0x1c, 0, 0}, {0x54a0, 0x02, 0, 0}, {0x54a1, 0x04, 0, 0},
++ {0x54a2, 0x01, 0, 0}, {0x54a3, 0xed, 0, 0}, {0x54a4, 0x01, 0, 0},
++ {0x54a5, 0xc5, 0, 0}, {0x54a6, 0x01, 0, 0}, {0x54a7, 0xa5, 0, 0},
++ {0x54a8, 0x01, 0, 0}, {0x54a9, 0x6c, 0, 0}, {0x54aa, 0x01, 0, 0},
++ {0x54ab, 0x41, 0, 0}, {0x54ac, 0x01, 0, 0}, {0x54ad, 0x20, 0, 0},
++ {0x54ae, 0x00, 0, 0}, {0x54af, 0x16, 0, 0}, {0x54b0, 0x01, 0, 0},
++ {0x54b1, 0x20, 0, 0}, {0x54b2, 0x00, 0, 0}, {0x54b3, 0x10, 0, 0},
++ {0x54b4, 0x00, 0, 0}, {0x54b5, 0xf0, 0, 0}, {0x54b6, 0x00, 0, 0},
++ {0x54b7, 0xdf, 0, 0}, {0x5402, 0x3f, 0, 0}, {0x5403, 0x00, 0, 0},
++ {0x3406, 0x00, 0, 0}, {0x5180, 0xff, 0, 0}, {0x5181, 0x52, 0, 0},
++ {0x5182, 0x11, 0, 0}, {0x5183, 0x14, 0, 0}, {0x5184, 0x25, 0, 0},
++ {0x5185, 0x24, 0, 0}, {0x5186, 0x06, 0, 0}, {0x5187, 0x08, 0, 0},
++ {0x5188, 0x08, 0, 0}, {0x5189, 0x7c, 0, 0}, {0x518a, 0x60, 0, 0},
++ {0x518b, 0xb2, 0, 0}, {0x518c, 0xb2, 0, 0}, {0x518d, 0x44, 0, 0},
++ {0x518e, 0x3d, 0, 0}, {0x518f, 0x58, 0, 0}, {0x5190, 0x46, 0, 0},
++ {0x5191, 0xf8, 0, 0}, {0x5192, 0x04, 0, 0}, {0x5193, 0x70, 0, 0},
++ {0x5194, 0xf0, 0, 0}, {0x5195, 0xf0, 0, 0}, {0x5196, 0x03, 0, 0},
++ {0x5197, 0x01, 0, 0}, {0x5198, 0x04, 0, 0}, {0x5199, 0x12, 0, 0},
++ {0x519a, 0x04, 0, 0}, {0x519b, 0x00, 0, 0}, {0x519c, 0x06, 0, 0},
++ {0x519d, 0x82, 0, 0}, {0x519e, 0x00, 0, 0}, {0x5025, 0x80, 0, 0},
++ {0x3a0f, 0x38, 0, 0}, {0x3a10, 0x30, 0, 0}, {0x3a1b, 0x3a, 0, 0},
++ {0x3a1e, 0x2e, 0, 0}, {0x3a11, 0x60, 0, 0}, {0x3a1f, 0x10, 0, 0},
++ {0x5688, 0xa6, 0, 0}, {0x5689, 0x6a, 0, 0}, {0x568a, 0xea, 0, 0},
++ {0x568b, 0xae, 0, 0}, {0x568c, 0xa6, 0, 0}, {0x568d, 0x6a, 0, 0},
++ {0x568e, 0x62, 0, 0}, {0x568f, 0x26, 0, 0}, {0x5583, 0x40, 0, 0},
++ {0x5584, 0x40, 0, 0}, {0x5580, 0x02, 0, 0}, {0x5000, 0xcf, 0, 0},
++ {0x5800, 0x27, 0, 0}, {0x5801, 0x19, 0, 0}, {0x5802, 0x12, 0, 0},
++ {0x5803, 0x0f, 0, 0}, {0x5804, 0x10, 0, 0}, {0x5805, 0x15, 0, 0},
++ {0x5806, 0x1e, 0, 0}, {0x5807, 0x2f, 0, 0}, {0x5808, 0x15, 0, 0},
++ {0x5809, 0x0d, 0, 0}, {0x580a, 0x0a, 0, 0}, {0x580b, 0x09, 0, 0},
++ {0x580c, 0x0a, 0, 0}, {0x580d, 0x0c, 0, 0}, {0x580e, 0x12, 0, 0},
++ {0x580f, 0x19, 0, 0}, {0x5810, 0x0b, 0, 0}, {0x5811, 0x07, 0, 0},
++ {0x5812, 0x04, 0, 0}, {0x5813, 0x03, 0, 0}, {0x5814, 0x03, 0, 0},
++ {0x5815, 0x06, 0, 0}, {0x5816, 0x0a, 0, 0}, {0x5817, 0x0f, 0, 0},
++ {0x5818, 0x0a, 0, 0}, {0x5819, 0x05, 0, 0}, {0x581a, 0x01, 0, 0},
++ {0x581b, 0x00, 0, 0}, {0x581c, 0x00, 0, 0}, {0x581d, 0x03, 0, 0},
++ {0x581e, 0x08, 0, 0}, {0x581f, 0x0c, 0, 0}, {0x5820, 0x0a, 0, 0},
++ {0x5821, 0x05, 0, 0}, {0x5822, 0x01, 0, 0}, {0x5823, 0x00, 0, 0},
++ {0x5824, 0x00, 0, 0}, {0x5825, 0x03, 0, 0}, {0x5826, 0x08, 0, 0},
++ {0x5827, 0x0c, 0, 0}, {0x5828, 0x0e, 0, 0}, {0x5829, 0x08, 0, 0},
++ {0x582a, 0x06, 0, 0}, {0x582b, 0x04, 0, 0}, {0x582c, 0x05, 0, 0},
++ {0x582d, 0x07, 0, 0}, {0x582e, 0x0b, 0, 0}, {0x582f, 0x12, 0, 0},
++ {0x5830, 0x18, 0, 0}, {0x5831, 0x10, 0, 0}, {0x5832, 0x0c, 0, 0},
++ {0x5833, 0x0a, 0, 0}, {0x5834, 0x0b, 0, 0}, {0x5835, 0x0e, 0, 0},
++ {0x5836, 0x15, 0, 0}, {0x5837, 0x19, 0, 0}, {0x5838, 0x32, 0, 0},
++ {0x5839, 0x1f, 0, 0}, {0x583a, 0x18, 0, 0}, {0x583b, 0x16, 0, 0},
++ {0x583c, 0x17, 0, 0}, {0x583d, 0x1e, 0, 0}, {0x583e, 0x26, 0, 0},
++ {0x583f, 0x53, 0, 0}, {0x5840, 0x10, 0, 0}, {0x5841, 0x0f, 0, 0},
++ {0x5842, 0x0d, 0, 0}, {0x5843, 0x0c, 0, 0}, {0x5844, 0x0e, 0, 0},
++ {0x5845, 0x09, 0, 0}, {0x5846, 0x11, 0, 0}, {0x5847, 0x10, 0, 0},
++ {0x5848, 0x10, 0, 0}, {0x5849, 0x10, 0, 0}, {0x584a, 0x10, 0, 0},
++ {0x584b, 0x0e, 0, 0}, {0x584c, 0x10, 0, 0}, {0x584d, 0x10, 0, 0},
++ {0x584e, 0x11, 0, 0}, {0x584f, 0x10, 0, 0}, {0x5850, 0x0f, 0, 0},
++ {0x5851, 0x0c, 0, 0}, {0x5852, 0x0f, 0, 0}, {0x5853, 0x10, 0, 0},
++ {0x5854, 0x10, 0, 0}, {0x5855, 0x0f, 0, 0}, {0x5856, 0x0e, 0, 0},
++ {0x5857, 0x0b, 0, 0}, {0x5858, 0x10, 0, 0}, {0x5859, 0x0d, 0, 0},
++ {0x585a, 0x0d, 0, 0}, {0x585b, 0x0c, 0, 0}, {0x585c, 0x0c, 0, 0},
++ {0x585d, 0x0c, 0, 0}, {0x585e, 0x0b, 0, 0}, {0x585f, 0x0c, 0, 0},
++ {0x5860, 0x0c, 0, 0}, {0x5861, 0x0c, 0, 0}, {0x5862, 0x0d, 0, 0},
++ {0x5863, 0x08, 0, 0}, {0x5864, 0x11, 0, 0}, {0x5865, 0x18, 0, 0},
++ {0x5866, 0x18, 0, 0}, {0x5867, 0x19, 0, 0}, {0x5868, 0x17, 0, 0},
++ {0x5869, 0x19, 0, 0}, {0x586a, 0x16, 0, 0}, {0x586b, 0x13, 0, 0},
++ {0x586c, 0x13, 0, 0}, {0x586d, 0x12, 0, 0}, {0x586e, 0x13, 0, 0},
++ {0x586f, 0x16, 0, 0}, {0x5870, 0x14, 0, 0}, {0x5871, 0x12, 0, 0},
++ {0x5872, 0x10, 0, 0}, {0x5873, 0x11, 0, 0}, {0x5874, 0x11, 0, 0},
++ {0x5875, 0x16, 0, 0}, {0x5876, 0x14, 0, 0}, {0x5877, 0x11, 0, 0},
++ {0x5878, 0x10, 0, 0}, {0x5879, 0x0f, 0, 0}, {0x587a, 0x10, 0, 0},
++ {0x587b, 0x14, 0, 0}, {0x587c, 0x13, 0, 0}, {0x587d, 0x12, 0, 0},
++ {0x587e, 0x11, 0, 0}, {0x587f, 0x11, 0, 0}, {0x5880, 0x12, 0, 0},
++ {0x5881, 0x15, 0, 0}, {0x5882, 0x14, 0, 0}, {0x5883, 0x15, 0, 0},
++ {0x5884, 0x15, 0, 0}, {0x5885, 0x15, 0, 0}, {0x5886, 0x13, 0, 0},
++ {0x5887, 0x17, 0, 0}, {0x3710, 0x10, 0, 0}, {0x3632, 0x51, 0, 0},
++ {0x3702, 0x10, 0, 0}, {0x3703, 0xb2, 0, 0}, {0x3704, 0x18, 0, 0},
++ {0x370b, 0x40, 0, 0}, {0x370d, 0x03, 0, 0}, {0x3631, 0x01, 0, 0},
++ {0x3632, 0x52, 0, 0}, {0x3606, 0x24, 0, 0}, {0x3620, 0x96, 0, 0},
++ {0x5785, 0x07, 0, 0}, {0x3a13, 0x30, 0, 0}, {0x3600, 0x52, 0, 0},
++ {0x3604, 0x48, 0, 0}, {0x3606, 0x1b, 0, 0}, {0x370d, 0x0b, 0, 0},
++ {0x370f, 0xc0, 0, 0}, {0x3709, 0x01, 0, 0}, {0x3823, 0x00, 0, 0},
++ {0x5007, 0x00, 0, 0}, {0x5009, 0x00, 0, 0}, {0x5011, 0x00, 0, 0},
++ {0x5013, 0x00, 0, 0}, {0x519e, 0x00, 0, 0}, {0x5086, 0x00, 0, 0},
++ {0x5087, 0x00, 0, 0}, {0x5088, 0x00, 0, 0}, {0x5089, 0x00, 0, 0},
++ {0x302b, 0x00, 0, 0}, {0x3503, 0x07, 0, 0}, {0x3011, 0x08, 0, 0},
++ {0x350c, 0x02, 0, 0}, {0x350d, 0xe4, 0, 0}, {0x3621, 0xc9, 0, 0},
++ {0x370a, 0x81, 0, 0}, {0x3803, 0x08, 0, 0}, {0x3804, 0x05, 0, 0},
++ {0x3805, 0x00, 0, 0}, {0x3806, 0x02, 0, 0}, {0x3807, 0xd0, 0, 0},
++ {0x3808, 0x05, 0, 0}, {0x3809, 0x00, 0, 0}, {0x380a, 0x02, 0, 0},
++ {0x380b, 0xd0, 0, 0}, {0x380c, 0x08, 0, 0}, {0x380d, 0x72, 0, 0},
++ {0x380e, 0x02, 0, 0}, {0x380f, 0xe4, 0, 0}, {0x3810, 0xc0, 0, 0},
++ {0x3818, 0xc9, 0, 0}, {0x381c, 0x10, 0, 0}, {0x381d, 0xa0, 0, 0},
++ {0x381e, 0x05, 0, 0}, {0x381f, 0xb0, 0, 0}, {0x3820, 0x00, 0, 0},
++ {0x3821, 0x00, 0, 0}, {0x3824, 0x11, 0, 0}, {0x3a08, 0x1b, 0, 0},
++ {0x3a09, 0xc0, 0, 0}, {0x3a0a, 0x17, 0, 0}, {0x3a0b, 0x20, 0, 0},
++ {0x3a0d, 0x02, 0, 0}, {0x3a0e, 0x01, 0, 0}, {0x401c, 0x04, 0, 0},
++ {0x5682, 0x05, 0, 0}, {0x5683, 0x00, 0, 0}, {0x5686, 0x02, 0, 0},
++ {0x5687, 0xcc, 0, 0}, {0x5001, 0x7f, 0, 0}, {0x589b, 0x06, 0, 0},
++ {0x589a, 0xc5, 0, 0}, {0x3503, 0x00, 0, 0}, {0x3010, 0x10, 0, 0},
++ {0x460c, 0x20, 0, 0}, {0x460b, 0x37, 0, 0}, {0x471c, 0xd0, 0, 0},
++ {0x471d, 0x05, 0, 0}, {0x3815, 0x01, 0, 0}, {0x3818, 0x00, 0x08, 0},
++ {0x501f, 0x00, 0, 0}, {0x4300, 0x30, 0, 0}, {0x3002, 0x1c, 0, 0},
++ {0x3819, 0x80, 0, 0}, {0x5002, 0xe0, 0, 0},
++};
++
++static struct reg_value ov5642_setting_15fps_1080P_1920_1080[] = {
++ {0x3103, 0x93, 0, 0}, {0x3008, 0x82, 0, 0}, {0x3017, 0x7f, 0, 0},
++ {0x3018, 0xfc, 0, 0}, {0x3810, 0xc2, 0, 0}, {0x3615, 0xf0, 0, 0},
++ {0x3000, 0x00, 0, 0}, {0x3001, 0x00, 0, 0}, {0x3002, 0x00, 0, 0},
++ {0x3003, 0x00, 0, 0}, {0x3004, 0xff, 0, 0}, {0x3030, 0x2b, 0, 0},
++ {0x3011, 0x08, 0, 0}, {0x3010, 0x10, 0, 0}, {0x3604, 0x60, 0, 0},
++ {0x3622, 0x60, 0, 0}, {0x3621, 0x09, 0, 0}, {0x3709, 0x00, 0, 0},
++ {0x4000, 0x21, 0, 0}, {0x401d, 0x22, 0, 0}, {0x3600, 0x54, 0, 0},
++ {0x3605, 0x04, 0, 0}, {0x3606, 0x3f, 0, 0}, {0x3c01, 0x80, 0, 0},
++ {0x300d, 0x22, 0, 0}, {0x3623, 0x22, 0, 0}, {0x5000, 0x4f, 0, 0},
++ {0x5020, 0x04, 0, 0}, {0x5181, 0x79, 0, 0}, {0x5182, 0x00, 0, 0},
++ {0x5185, 0x22, 0, 0}, {0x5197, 0x01, 0, 0}, {0x5500, 0x0a, 0, 0},
++ {0x5504, 0x00, 0, 0}, {0x5505, 0x7f, 0, 0}, {0x5080, 0x08, 0, 0},
++ {0x300e, 0x18, 0, 0}, {0x4610, 0x00, 0, 0}, {0x471d, 0x05, 0, 0},
++ {0x4708, 0x06, 0, 0}, {0x370c, 0xa0, 0, 0}, {0x3808, 0x0a, 0, 0},
++ {0x3809, 0x20, 0, 0}, {0x380a, 0x07, 0, 0}, {0x380b, 0x98, 0, 0},
++ {0x380c, 0x0c, 0, 0}, {0x380d, 0x80, 0, 0}, {0x380e, 0x07, 0, 0},
++ {0x380f, 0xd0, 0, 0}, {0x5687, 0x94, 0, 0}, {0x501f, 0x00, 0, 0},
++ {0x5000, 0x4f, 0, 0}, {0x5001, 0xcf, 0, 0}, {0x4300, 0x30, 0, 0},
++ {0x4300, 0x30, 0, 0}, {0x460b, 0x35, 0, 0}, {0x471d, 0x00, 0, 0},
++ {0x3002, 0x0c, 0, 0}, {0x3002, 0x00, 0, 0}, {0x4713, 0x03, 0, 0},
++ {0x471c, 0x50, 0, 0}, {0x4721, 0x02, 0, 0}, {0x4402, 0x90, 0, 0},
++ {0x460c, 0x22, 0, 0}, {0x3815, 0x44, 0, 0}, {0x3503, 0x07, 0, 0},
++ {0x3501, 0x73, 0, 0}, {0x3502, 0x80, 0, 0}, {0x350b, 0x00, 0, 0},
++ {0x3818, 0xc8, 0, 0}, {0x3801, 0x88, 0, 0}, {0x3824, 0x11, 0, 0},
++ {0x3a00, 0x78, 0, 0}, {0x3a1a, 0x04, 0, 0}, {0x3a13, 0x30, 0, 0},
++ {0x3a18, 0x00, 0, 0}, {0x3a19, 0x7c, 0, 0}, {0x3a08, 0x12, 0, 0},
++ {0x3a09, 0xc0, 0, 0}, {0x3a0a, 0x0f, 0, 0}, {0x3a0b, 0xa0, 0, 0},
++ {0x350c, 0x07, 0, 0}, {0x350d, 0xd0, 0, 0}, {0x3a0d, 0x08, 0, 0},
++ {0x3a0e, 0x06, 0, 0}, {0x3500, 0x00, 0, 0}, {0x3501, 0x00, 0, 0},
++ {0x3502, 0x00, 0, 0}, {0x350a, 0x00, 0, 0}, {0x350b, 0x00, 0, 0},
++ {0x3503, 0x00, 0, 0}, {0x3a0f, 0x3c, 0, 0}, {0x3a10, 0x32, 0, 0},
++ {0x3a1b, 0x3c, 0, 0}, {0x3a1e, 0x32, 0, 0}, {0x3a11, 0x80, 0, 0},
++ {0x3a1f, 0x20, 0, 0}, {0x3030, 0x2b, 0, 0}, {0x3a02, 0x00, 0, 0},
++ {0x3a03, 0x7d, 0, 0}, {0x3a04, 0x00, 0, 0}, {0x3a14, 0x00, 0, 0},
++ {0x3a15, 0x7d, 0, 0}, {0x3a16, 0x00, 0, 0}, {0x3a00, 0x78, 0, 0},
++ {0x3a08, 0x09, 0, 0}, {0x3a09, 0x60, 0, 0}, {0x3a0a, 0x07, 0, 0},
++ {0x3a0b, 0xd0, 0, 0}, {0x3a0d, 0x10, 0, 0}, {0x3a0e, 0x0d, 0, 0},
++ {0x4407, 0x04, 0, 0}, {0x5193, 0x70, 0, 0}, {0x589b, 0x00, 0, 0},
++ {0x589a, 0xc0, 0, 0}, {0x401e, 0x20, 0, 0}, {0x4001, 0x42, 0, 0},
++ {0x401c, 0x06, 0, 0}, {0x3825, 0xac, 0, 0}, {0x3827, 0x0c, 0, 0},
++ {0x528a, 0x01, 0, 0}, {0x528b, 0x04, 0, 0}, {0x528c, 0x08, 0, 0},
++ {0x528d, 0x10, 0, 0}, {0x528e, 0x20, 0, 0}, {0x528f, 0x28, 0, 0},
++ {0x5290, 0x30, 0, 0}, {0x5292, 0x00, 0, 0}, {0x5293, 0x01, 0, 0},
++ {0x5294, 0x00, 0, 0}, {0x5295, 0x04, 0, 0}, {0x5296, 0x00, 0, 0},
++ {0x5297, 0x08, 0, 0}, {0x5298, 0x00, 0, 0}, {0x5299, 0x10, 0, 0},
++ {0x529a, 0x00, 0, 0}, {0x529b, 0x20, 0, 0}, {0x529c, 0x00, 0, 0},
++ {0x529d, 0x28, 0, 0}, {0x529e, 0x00, 0, 0}, {0x529f, 0x30, 0, 0},
++ {0x5282, 0x00, 0, 0}, {0x5300, 0x00, 0, 0}, {0x5301, 0x20, 0, 0},
++ {0x5302, 0x00, 0, 0}, {0x5303, 0x7c, 0, 0}, {0x530c, 0x00, 0, 0},
++ {0x530d, 0x0c, 0, 0}, {0x530e, 0x20, 0, 0}, {0x530f, 0x80, 0, 0},
++ {0x5310, 0x20, 0, 0}, {0x5311, 0x80, 0, 0}, {0x5308, 0x20, 0, 0},
++ {0x5309, 0x40, 0, 0}, {0x5304, 0x00, 0, 0}, {0x5305, 0x30, 0, 0},
++ {0x5306, 0x00, 0, 0}, {0x5307, 0x80, 0, 0}, {0x5314, 0x08, 0, 0},
++ {0x5315, 0x20, 0, 0}, {0x5319, 0x30, 0, 0}, {0x5316, 0x10, 0, 0},
++ {0x5317, 0x00, 0, 0}, {0x5318, 0x02, 0, 0}, {0x5380, 0x01, 0, 0},
++ {0x5381, 0x00, 0, 0}, {0x5382, 0x00, 0, 0}, {0x5383, 0x4e, 0, 0},
++ {0x5384, 0x00, 0, 0}, {0x5385, 0x0f, 0, 0}, {0x5386, 0x00, 0, 0},
++ {0x5387, 0x00, 0, 0}, {0x5388, 0x01, 0, 0}, {0x5389, 0x15, 0, 0},
++ {0x538a, 0x00, 0, 0}, {0x538b, 0x31, 0, 0}, {0x538c, 0x00, 0, 0},
++ {0x538d, 0x00, 0, 0}, {0x538e, 0x00, 0, 0}, {0x538f, 0x0f, 0, 0},
++ {0x5390, 0x00, 0, 0}, {0x5391, 0xab, 0, 0}, {0x5392, 0x00, 0, 0},
++ {0x5393, 0xa2, 0, 0}, {0x5394, 0x08, 0, 0}, {0x5480, 0x14, 0, 0},
++ {0x5481, 0x21, 0, 0}, {0x5482, 0x36, 0, 0}, {0x5483, 0x57, 0, 0},
++ {0x5484, 0x65, 0, 0}, {0x5485, 0x71, 0, 0}, {0x5486, 0x7d, 0, 0},
++ {0x5487, 0x87, 0, 0}, {0x5488, 0x91, 0, 0}, {0x5489, 0x9a, 0, 0},
++ {0x548a, 0xaa, 0, 0}, {0x548b, 0xb8, 0, 0}, {0x548c, 0xcd, 0, 0},
++ {0x548d, 0xdd, 0, 0}, {0x548e, 0xea, 0, 0}, {0x548f, 0x1d, 0, 0},
++ {0x5490, 0x05, 0, 0}, {0x5491, 0x00, 0, 0}, {0x5492, 0x04, 0, 0},
++ {0x5493, 0x20, 0, 0}, {0x5494, 0x03, 0, 0}, {0x5495, 0x60, 0, 0},
++ {0x5496, 0x02, 0, 0}, {0x5497, 0xb8, 0, 0}, {0x5498, 0x02, 0, 0},
++ {0x5499, 0x86, 0, 0}, {0x549a, 0x02, 0, 0}, {0x549b, 0x5b, 0, 0},
++ {0x549c, 0x02, 0, 0}, {0x549d, 0x3b, 0, 0}, {0x549e, 0x02, 0, 0},
++ {0x549f, 0x1c, 0, 0}, {0x54a0, 0x02, 0, 0}, {0x54a1, 0x04, 0, 0},
++ {0x54a2, 0x01, 0, 0}, {0x54a3, 0xed, 0, 0}, {0x54a4, 0x01, 0, 0},
++ {0x54a5, 0xc5, 0, 0}, {0x54a6, 0x01, 0, 0}, {0x54a7, 0xa5, 0, 0},
++ {0x54a8, 0x01, 0, 0}, {0x54a9, 0x6c, 0, 0}, {0x54aa, 0x01, 0, 0},
++ {0x54ab, 0x41, 0, 0}, {0x54ac, 0x01, 0, 0}, {0x54ad, 0x20, 0, 0},
++ {0x54ae, 0x00, 0, 0}, {0x54af, 0x16, 0, 0}, {0x54b0, 0x01, 0, 0},
++ {0x54b1, 0x20, 0, 0}, {0x54b2, 0x00, 0, 0}, {0x54b3, 0x10, 0, 0},
++ {0x54b4, 0x00, 0, 0}, {0x54b5, 0xf0, 0, 0}, {0x54b6, 0x00, 0, 0},
++ {0x54b7, 0xdf, 0, 0}, {0x5402, 0x3f, 0, 0}, {0x5403, 0x00, 0, 0},
++ {0x3406, 0x00, 0, 0}, {0x5180, 0xff, 0, 0}, {0x5181, 0x52, 0, 0},
++ {0x5182, 0x11, 0, 0}, {0x5183, 0x14, 0, 0}, {0x5184, 0x25, 0, 0},
++ {0x5185, 0x24, 0, 0}, {0x5186, 0x06, 0, 0}, {0x5187, 0x08, 0, 0},
++ {0x5188, 0x08, 0, 0}, {0x5189, 0x7c, 0, 0}, {0x518a, 0x60, 0, 0},
++ {0x518b, 0xb2, 0, 0}, {0x518c, 0xb2, 0, 0}, {0x518d, 0x44, 0, 0},
++ {0x518e, 0x3d, 0, 0}, {0x518f, 0x58, 0, 0}, {0x5190, 0x46, 0, 0},
++ {0x5191, 0xf8, 0, 0}, {0x5192, 0x04, 0, 0}, {0x5193, 0x70, 0, 0},
++ {0x5194, 0xf0, 0, 0}, {0x5195, 0xf0, 0, 0}, {0x5196, 0x03, 0, 0},
++ {0x5197, 0x01, 0, 0}, {0x5198, 0x04, 0, 0}, {0x5199, 0x12, 0, 0},
++ {0x519a, 0x04, 0, 0}, {0x519b, 0x00, 0, 0}, {0x519c, 0x06, 0, 0},
++ {0x519d, 0x82, 0, 0}, {0x519e, 0x00, 0, 0}, {0x5025, 0x80, 0, 0},
++ {0x3a0f, 0x38, 0, 0}, {0x3a10, 0x30, 0, 0}, {0x3a1b, 0x3a, 0, 0},
++ {0x3a1e, 0x2e, 0, 0}, {0x3a11, 0x60, 0, 0}, {0x3a1f, 0x10, 0, 0},
++ {0x5688, 0xa6, 0, 0}, {0x5689, 0x6a, 0, 0}, {0x568a, 0xea, 0, 0},
++ {0x568b, 0xae, 0, 0}, {0x568c, 0xa6, 0, 0}, {0x568d, 0x6a, 0, 0},
++ {0x568e, 0x62, 0, 0}, {0x568f, 0x26, 0, 0}, {0x5583, 0x40, 0, 0},
++ {0x5584, 0x40, 0, 0}, {0x5580, 0x02, 0, 0}, {0x5000, 0xcf, 0, 0},
++ {0x5800, 0x27, 0, 0}, {0x5801, 0x19, 0, 0}, {0x5802, 0x12, 0, 0},
++ {0x5803, 0x0f, 0, 0}, {0x5804, 0x10, 0, 0}, {0x5805, 0x15, 0, 0},
++ {0x5806, 0x1e, 0, 0}, {0x5807, 0x2f, 0, 0}, {0x5808, 0x15, 0, 0},
++ {0x5809, 0x0d, 0, 0}, {0x580a, 0x0a, 0, 0}, {0x580b, 0x09, 0, 0},
++ {0x580c, 0x0a, 0, 0}, {0x580d, 0x0c, 0, 0}, {0x580e, 0x12, 0, 0},
++ {0x580f, 0x19, 0, 0}, {0x5810, 0x0b, 0, 0}, {0x5811, 0x07, 0, 0},
++ {0x5812, 0x04, 0, 0}, {0x5813, 0x03, 0, 0}, {0x5814, 0x03, 0, 0},
++ {0x5815, 0x06, 0, 0}, {0x5816, 0x0a, 0, 0}, {0x5817, 0x0f, 0, 0},
++ {0x5818, 0x0a, 0, 0}, {0x5819, 0x05, 0, 0}, {0x581a, 0x01, 0, 0},
++ {0x581b, 0x00, 0, 0}, {0x581c, 0x00, 0, 0}, {0x581d, 0x03, 0, 0},
++ {0x581e, 0x08, 0, 0}, {0x581f, 0x0c, 0, 0}, {0x5820, 0x0a, 0, 0},
++ {0x5821, 0x05, 0, 0}, {0x5822, 0x01, 0, 0}, {0x5823, 0x00, 0, 0},
++ {0x5824, 0x00, 0, 0}, {0x5825, 0x03, 0, 0}, {0x5826, 0x08, 0, 0},
++ {0x5827, 0x0c, 0, 0}, {0x5828, 0x0e, 0, 0}, {0x5829, 0x08, 0, 0},
++ {0x582a, 0x06, 0, 0}, {0x582b, 0x04, 0, 0}, {0x582c, 0x05, 0, 0},
++ {0x582d, 0x07, 0, 0}, {0x582e, 0x0b, 0, 0}, {0x582f, 0x12, 0, 0},
++ {0x5830, 0x18, 0, 0}, {0x5831, 0x10, 0, 0}, {0x5832, 0x0c, 0, 0},
++ {0x5833, 0x0a, 0, 0}, {0x5834, 0x0b, 0, 0}, {0x5835, 0x0e, 0, 0},
++ {0x5836, 0x15, 0, 0}, {0x5837, 0x19, 0, 0}, {0x5838, 0x32, 0, 0},
++ {0x5839, 0x1f, 0, 0}, {0x583a, 0x18, 0, 0}, {0x583b, 0x16, 0, 0},
++ {0x583c, 0x17, 0, 0}, {0x583d, 0x1e, 0, 0}, {0x583e, 0x26, 0, 0},
++ {0x583f, 0x53, 0, 0}, {0x5840, 0x10, 0, 0}, {0x5841, 0x0f, 0, 0},
++ {0x5842, 0x0d, 0, 0}, {0x5843, 0x0c, 0, 0}, {0x5844, 0x0e, 0, 0},
++ {0x5845, 0x09, 0, 0}, {0x5846, 0x11, 0, 0}, {0x5847, 0x10, 0, 0},
++ {0x5848, 0x10, 0, 0}, {0x5849, 0x10, 0, 0}, {0x584a, 0x10, 0, 0},
++ {0x584b, 0x0e, 0, 0}, {0x584c, 0x10, 0, 0}, {0x584d, 0x10, 0, 0},
++ {0x584e, 0x11, 0, 0}, {0x584f, 0x10, 0, 0}, {0x5850, 0x0f, 0, 0},
++ {0x5851, 0x0c, 0, 0}, {0x5852, 0x0f, 0, 0}, {0x5853, 0x10, 0, 0},
++ {0x5854, 0x10, 0, 0}, {0x5855, 0x0f, 0, 0}, {0x5856, 0x0e, 0, 0},
++ {0x5857, 0x0b, 0, 0}, {0x5858, 0x10, 0, 0}, {0x5859, 0x0d, 0, 0},
++ {0x585a, 0x0d, 0, 0}, {0x585b, 0x0c, 0, 0}, {0x585c, 0x0c, 0, 0},
++ {0x585d, 0x0c, 0, 0}, {0x585e, 0x0b, 0, 0}, {0x585f, 0x0c, 0, 0},
++ {0x5860, 0x0c, 0, 0}, {0x5861, 0x0c, 0, 0}, {0x5862, 0x0d, 0, 0},
++ {0x5863, 0x08, 0, 0}, {0x5864, 0x11, 0, 0}, {0x5865, 0x18, 0, 0},
++ {0x5866, 0x18, 0, 0}, {0x5867, 0x19, 0, 0}, {0x5868, 0x17, 0, 0},
++ {0x5869, 0x19, 0, 0}, {0x586a, 0x16, 0, 0}, {0x586b, 0x13, 0, 0},
++ {0x586c, 0x13, 0, 0}, {0x586d, 0x12, 0, 0}, {0x586e, 0x13, 0, 0},
++ {0x586f, 0x16, 0, 0}, {0x5870, 0x14, 0, 0}, {0x5871, 0x12, 0, 0},
++ {0x5872, 0x10, 0, 0}, {0x5873, 0x11, 0, 0}, {0x5874, 0x11, 0, 0},
++ {0x5875, 0x16, 0, 0}, {0x5876, 0x14, 0, 0}, {0x5877, 0x11, 0, 0},
++ {0x5878, 0x10, 0, 0}, {0x5879, 0x0f, 0, 0}, {0x587a, 0x10, 0, 0},
++ {0x587b, 0x14, 0, 0}, {0x587c, 0x13, 0, 0}, {0x587d, 0x12, 0, 0},
++ {0x587e, 0x11, 0, 0}, {0x587f, 0x11, 0, 0}, {0x5880, 0x12, 0, 0},
++ {0x5881, 0x15, 0, 0}, {0x5882, 0x14, 0, 0}, {0x5883, 0x15, 0, 0},
++ {0x5884, 0x15, 0, 0}, {0x5885, 0x15, 0, 0}, {0x5886, 0x13, 0, 0},
++ {0x5887, 0x17, 0, 0}, {0x3710, 0x10, 0, 0}, {0x3632, 0x51, 0, 0},
++ {0x3702, 0x10, 0, 0}, {0x3703, 0xb2, 0, 0}, {0x3704, 0x18, 0, 0},
++ {0x370b, 0x40, 0, 0}, {0x370d, 0x03, 0, 0}, {0x3631, 0x01, 0, 0},
++ {0x3632, 0x52, 0, 0}, {0x3606, 0x24, 0, 0}, {0x3620, 0x96, 0, 0},
++ {0x5785, 0x07, 0, 0}, {0x3a13, 0x30, 0, 0}, {0x3600, 0x52, 0, 0},
++ {0x3604, 0x48, 0, 0}, {0x3606, 0x1b, 0, 0}, {0x370d, 0x0b, 0, 0},
++ {0x370f, 0xc0, 0, 0}, {0x3709, 0x01, 0, 0}, {0x3823, 0x00, 0, 0},
++ {0x5007, 0x00, 0, 0}, {0x5009, 0x00, 0, 0}, {0x5011, 0x00, 0, 0},
++ {0x5013, 0x00, 0, 0}, {0x519e, 0x00, 0, 0}, {0x5086, 0x00, 0, 0},
++ {0x5087, 0x00, 0, 0}, {0x5088, 0x00, 0, 0}, {0x5089, 0x00, 0, 0},
++ {0x302b, 0x00, 0, 0}, {0x3503, 0x07, 0, 0}, {0x3011, 0x07, 0, 0},
++ {0x350c, 0x04, 0, 0}, {0x350d, 0x58, 0, 0}, {0x3801, 0x8a, 0, 0},
++ {0x3803, 0x0a, 0, 0}, {0x3804, 0x07, 0, 0}, {0x3805, 0x80, 0, 0},
++ {0x3806, 0x04, 0, 0}, {0x3807, 0x39, 0, 0}, {0x3808, 0x07, 0, 0},
++ {0x3809, 0x80, 0, 0}, {0x380a, 0x04, 0, 0}, {0x380b, 0x38, 0, 0},
++ {0x380c, 0x09, 0, 0}, {0x380d, 0xd6, 0, 0}, {0x380e, 0x04, 0, 0},
++ {0x380f, 0x58, 0, 0}, {0x381c, 0x11, 0, 0}, {0x381d, 0xba, 0, 0},
++ {0x381e, 0x04, 0, 0}, {0x381f, 0x48, 0, 0}, {0x3820, 0x04, 0, 0},
++ {0x3821, 0x18, 0, 0}, {0x3a08, 0x14, 0, 0}, {0x3a09, 0xe0, 0, 0},
++ {0x3a0a, 0x11, 0, 0}, {0x3a0b, 0x60, 0, 0}, {0x3a0d, 0x04, 0, 0},
++ {0x3a0e, 0x03, 0, 0}, {0x5682, 0x07, 0, 0}, {0x5683, 0x60, 0, 0},
++ {0x5686, 0x04, 0, 0}, {0x5687, 0x1c, 0, 0}, {0x5001, 0x7f, 0, 0},
++ {0x3503, 0x00, 0, 0}, {0x3010, 0x10, 0, 0}, {0x460c, 0x20, 0, 0},
++ {0x460b, 0x37, 0, 0}, {0x471c, 0xd0, 0, 0}, {0x471d, 0x05, 0, 0},
++ {0x3815, 0x01, 0, 0}, {0x3818, 0x00, 0x08, 0}, {0x501f, 0x00, 0, 0},
++ {0x4300, 0x30, 0, 0}, {0x3002, 0x1c, 0, 0}, {0x3819, 0x80, 0, 0},
++ {0x5002, 0xe0, 0, 0},
++};
++
++static struct reg_value ov5642_setting_15fps_QVGA_320_240[] = {
++ {0x3103, 0x93, 0, 0}, {0x3008, 0x82, 0, 0}, {0x3017, 0x7f, 0, 0},
++ {0x3018, 0xfc, 0, 0}, {0x3810, 0xc2, 0, 0}, {0x3615, 0xf0, 0, 0},
++ {0x3000, 0x00, 0, 0}, {0x3001, 0x00, 0, 0}, {0x3002, 0x5c, 0, 0},
++ {0x3003, 0x00, 0, 0}, {0x3004, 0xff, 0, 0}, {0x3005, 0xff, 0, 0},
++ {0x3006, 0x43, 0, 0}, {0x3007, 0x37, 0, 0}, {0x3011, 0x08, 0, 0},
++ {0x3010, 0x10, 0, 0}, {0x460c, 0x22, 0, 0}, {0x3815, 0x04, 0, 0},
++ {0x370c, 0xa0, 0, 0}, {0x3602, 0xfc, 0, 0}, {0x3612, 0xff, 0, 0},
++ {0x3634, 0xc0, 0, 0}, {0x3613, 0x00, 0, 0}, {0x3605, 0x7c, 0, 0},
++ {0x3621, 0x09, 0, 0}, {0x3622, 0x60, 0, 0}, {0x3604, 0x40, 0, 0},
++ {0x3603, 0xa7, 0, 0}, {0x3603, 0x27, 0, 0}, {0x4000, 0x21, 0, 0},
++ {0x401d, 0x22, 0, 0}, {0x3600, 0x54, 0, 0}, {0x3605, 0x04, 0, 0},
++ {0x3606, 0x3f, 0, 0}, {0x3c01, 0x80, 0, 0}, {0x5000, 0x4f, 0, 0},
++ {0x5020, 0x04, 0, 0}, {0x5181, 0x79, 0, 0}, {0x5182, 0x00, 0, 0},
++ {0x5185, 0x22, 0, 0}, {0x5197, 0x01, 0, 0}, {0x5001, 0xff, 0, 0},
++ {0x5500, 0x0a, 0, 0}, {0x5504, 0x00, 0, 0}, {0x5505, 0x7f, 0, 0},
++ {0x5080, 0x08, 0, 0}, {0x300e, 0x18, 0, 0}, {0x4610, 0x00, 0, 0},
++ {0x471d, 0x05, 0, 0}, {0x4708, 0x06, 0, 0}, {0x3808, 0x02, 0, 0},
++ {0x3809, 0x80, 0, 0}, {0x380a, 0x01, 0, 0}, {0x380b, 0xe0, 0, 0},
++ {0x380e, 0x07, 0, 0}, {0x380f, 0xd0, 0, 0}, {0x501f, 0x00, 0, 0},
++ {0x5000, 0x4f, 0, 0}, {0x4300, 0x30, 0, 0}, {0x3503, 0x07, 0, 0},
++ {0x3501, 0x73, 0, 0}, {0x3502, 0x80, 0, 0}, {0x350b, 0x00, 0, 0},
++ {0x3503, 0x07, 0, 0}, {0x3824, 0x11, 0, 0}, {0x3501, 0x1e, 0, 0},
++ {0x3502, 0x80, 0, 0}, {0x350b, 0x7f, 0, 0}, {0x380c, 0x0c, 0, 0},
++ {0x380d, 0x80, 0, 0}, {0x380e, 0x03, 0, 0}, {0x380f, 0xe8, 0, 0},
++ {0x3a0d, 0x04, 0, 0}, {0x3a0e, 0x03, 0, 0}, {0x3818, 0xc1, 0, 0},
++ {0x3705, 0xdb, 0, 0}, {0x370a, 0x81, 0, 0}, {0x3801, 0x80, 0, 0},
++ {0x3621, 0x87, 0, 0}, {0x3801, 0x50, 0, 0}, {0x3803, 0x08, 0, 0},
++ {0x3827, 0x08, 0, 0}, {0x3810, 0x40, 0, 0}, {0x3804, 0x05, 0, 0},
++ {0x3805, 0x00, 0, 0}, {0x5682, 0x05, 0, 0}, {0x5683, 0x00, 0, 0},
++ {0x3806, 0x03, 0, 0}, {0x3807, 0xc0, 0, 0}, {0x5686, 0x03, 0, 0},
++ {0x5687, 0xbc, 0, 0}, {0x3a00, 0x78, 0, 0}, {0x3a1a, 0x05, 0, 0},
++ {0x3a13, 0x30, 0, 0}, {0x3a18, 0x00, 0, 0}, {0x3a19, 0x7c, 0, 0},
++ {0x3a08, 0x12, 0, 0}, {0x3a09, 0xc0, 0, 0}, {0x3a0a, 0x0f, 0, 0},
++ {0x3a0b, 0xa0, 0, 0}, {0x350c, 0x07, 0, 0}, {0x350d, 0xd0, 0, 0},
++ {0x3500, 0x00, 0, 0}, {0x3501, 0x00, 0, 0}, {0x3502, 0x00, 0, 0},
++ {0x350a, 0x00, 0, 0}, {0x350b, 0x00, 0, 0}, {0x3503, 0x00, 0, 0},
++ {0x528a, 0x02, 0, 0}, {0x528b, 0x04, 0, 0}, {0x528c, 0x08, 0, 0},
++ {0x528d, 0x08, 0, 0}, {0x528e, 0x08, 0, 0}, {0x528f, 0x10, 0, 0},
++ {0x5290, 0x10, 0, 0}, {0x5292, 0x00, 0, 0}, {0x5293, 0x02, 0, 0},
++ {0x5294, 0x00, 0, 0}, {0x5295, 0x02, 0, 0}, {0x5296, 0x00, 0, 0},
++ {0x5297, 0x02, 0, 0}, {0x5298, 0x00, 0, 0}, {0x5299, 0x02, 0, 0},
++ {0x529a, 0x00, 0, 0}, {0x529b, 0x02, 0, 0}, {0x529c, 0x00, 0, 0},
++ {0x529d, 0x02, 0, 0}, {0x529e, 0x00, 0, 0}, {0x529f, 0x02, 0, 0},
++ {0x3a0f, 0x3c, 0, 0}, {0x3a10, 0x30, 0, 0}, {0x3a1b, 0x3c, 0, 0},
++ {0x3a1e, 0x30, 0, 0}, {0x3a11, 0x70, 0, 0}, {0x3a1f, 0x10, 0, 0},
++ {0x3030, 0x2b, 0, 0}, {0x3a02, 0x00, 0, 0}, {0x3a03, 0x7d, 0, 0},
++ {0x3a04, 0x00, 0, 0}, {0x3a14, 0x00, 0, 0}, {0x3a15, 0x7d, 0, 0},
++ {0x3a16, 0x00, 0, 0}, {0x3a00, 0x78, 0, 0}, {0x3a08, 0x09, 0, 0},
++ {0x3a09, 0x60, 0, 0}, {0x3a0a, 0x07, 0, 0}, {0x3a0b, 0xd0, 0, 0},
++ {0x3a0d, 0x08, 0, 0}, {0x3a0e, 0x06, 0, 0}, {0x5193, 0x70, 0, 0},
++ {0x589b, 0x04, 0, 0}, {0x589a, 0xc5, 0, 0}, {0x401e, 0x20, 0, 0},
++ {0x4001, 0x42, 0, 0}, {0x401c, 0x04, 0, 0}, {0x528a, 0x01, 0, 0},
++ {0x528b, 0x04, 0, 0}, {0x528c, 0x08, 0, 0}, {0x528d, 0x10, 0, 0},
++ {0x528e, 0x20, 0, 0}, {0x528f, 0x28, 0, 0}, {0x5290, 0x30, 0, 0},
++ {0x5292, 0x00, 0, 0}, {0x5293, 0x01, 0, 0}, {0x5294, 0x00, 0, 0},
++ {0x5295, 0x04, 0, 0}, {0x5296, 0x00, 0, 0}, {0x5297, 0x08, 0, 0},
++ {0x5298, 0x00, 0, 0}, {0x5299, 0x10, 0, 0}, {0x529a, 0x00, 0, 0},
++ {0x529b, 0x20, 0, 0}, {0x529c, 0x00, 0, 0}, {0x529d, 0x28, 0, 0},
++ {0x529e, 0x00, 0, 0}, {0x529f, 0x30, 0, 0}, {0x5282, 0x00, 0, 0},
++ {0x5300, 0x00, 0, 0}, {0x5301, 0x20, 0, 0}, {0x5302, 0x00, 0, 0},
++ {0x5303, 0x7c, 0, 0}, {0x530c, 0x00, 0, 0}, {0x530d, 0x0c, 0, 0},
++ {0x530e, 0x20, 0, 0}, {0x530f, 0x80, 0, 0}, {0x5310, 0x20, 0, 0},
++ {0x5311, 0x80, 0, 0}, {0x5308, 0x20, 0, 0}, {0x5309, 0x40, 0, 0},
++ {0x5304, 0x00, 0, 0}, {0x5305, 0x30, 0, 0}, {0x5306, 0x00, 0, 0},
++ {0x5307, 0x80, 0, 0}, {0x5314, 0x08, 0, 0}, {0x5315, 0x20, 0, 0},
++ {0x5319, 0x30, 0, 0}, {0x5316, 0x10, 0, 0}, {0x5317, 0x00, 0, 0},
++ {0x5318, 0x02, 0, 0}, {0x5380, 0x01, 0, 0}, {0x5381, 0x00, 0, 0},
++ {0x5382, 0x00, 0, 0}, {0x5383, 0x4e, 0, 0}, {0x5384, 0x00, 0, 0},
++ {0x5385, 0x0f, 0, 0}, {0x5386, 0x00, 0, 0}, {0x5387, 0x00, 0, 0},
++ {0x5388, 0x01, 0, 0}, {0x5389, 0x15, 0, 0}, {0x538a, 0x00, 0, 0},
++ {0x538b, 0x31, 0, 0}, {0x538c, 0x00, 0, 0}, {0x538d, 0x00, 0, 0},
++ {0x538e, 0x00, 0, 0}, {0x538f, 0x0f, 0, 0}, {0x5390, 0x00, 0, 0},
++ {0x5391, 0xab, 0, 0}, {0x5392, 0x00, 0, 0}, {0x5393, 0xa2, 0, 0},
++ {0x5394, 0x08, 0, 0}, {0x5480, 0x14, 0, 0}, {0x5481, 0x21, 0, 0},
++ {0x5482, 0x36, 0, 0}, {0x5483, 0x57, 0, 0}, {0x5484, 0x65, 0, 0},
++ {0x5485, 0x71, 0, 0}, {0x5486, 0x7d, 0, 0}, {0x5487, 0x87, 0, 0},
++ {0x5488, 0x91, 0, 0}, {0x5489, 0x9a, 0, 0}, {0x548a, 0xaa, 0, 0},
++ {0x548b, 0xb8, 0, 0}, {0x548c, 0xcd, 0, 0}, {0x548d, 0xdd, 0, 0},
++ {0x548e, 0xea, 0, 0}, {0x548f, 0x1d, 0, 0}, {0x5490, 0x05, 0, 0},
++ {0x5491, 0x00, 0, 0}, {0x5492, 0x04, 0, 0}, {0x5493, 0x20, 0, 0},
++ {0x5494, 0x03, 0, 0}, {0x5495, 0x60, 0, 0}, {0x5496, 0x02, 0, 0},
++ {0x5497, 0xb8, 0, 0}, {0x5498, 0x02, 0, 0}, {0x5499, 0x86, 0, 0},
++ {0x549a, 0x02, 0, 0}, {0x549b, 0x5b, 0, 0}, {0x549c, 0x02, 0, 0},
++ {0x549d, 0x3b, 0, 0}, {0x549e, 0x02, 0, 0}, {0x549f, 0x1c, 0, 0},
++ {0x54a0, 0x02, 0, 0}, {0x54a1, 0x04, 0, 0}, {0x54a2, 0x01, 0, 0},
++ {0x54a3, 0xed, 0, 0}, {0x54a4, 0x01, 0, 0}, {0x54a5, 0xc5, 0, 0},
++ {0x54a6, 0x01, 0, 0}, {0x54a7, 0xa5, 0, 0}, {0x54a8, 0x01, 0, 0},
++ {0x54a9, 0x6c, 0, 0}, {0x54aa, 0x01, 0, 0}, {0x54ab, 0x41, 0, 0},
++ {0x54ac, 0x01, 0, 0}, {0x54ad, 0x20, 0, 0}, {0x54ae, 0x00, 0, 0},
++ {0x54af, 0x16, 0, 0}, {0x54b0, 0x01, 0, 0}, {0x54b1, 0x20, 0, 0},
++ {0x54b2, 0x00, 0, 0}, {0x54b3, 0x10, 0, 0}, {0x54b4, 0x00, 0, 0},
++ {0x54b5, 0xf0, 0, 0}, {0x54b6, 0x00, 0, 0}, {0x54b7, 0xdf, 0, 0},
++ {0x5402, 0x3f, 0, 0}, {0x5403, 0x00, 0, 0}, {0x3406, 0x00, 0, 0},
++ {0x5180, 0xff, 0, 0}, {0x5181, 0x52, 0, 0}, {0x5182, 0x11, 0, 0},
++ {0x5183, 0x14, 0, 0}, {0x5184, 0x25, 0, 0}, {0x5185, 0x24, 0, 0},
++ {0x5186, 0x06, 0, 0}, {0x5187, 0x08, 0, 0}, {0x5188, 0x08, 0, 0},
++ {0x5189, 0x7c, 0, 0}, {0x518a, 0x60, 0, 0}, {0x518b, 0xb2, 0, 0},
++ {0x518c, 0xb2, 0, 0}, {0x518d, 0x44, 0, 0}, {0x518e, 0x3d, 0, 0},
++ {0x518f, 0x58, 0, 0}, {0x5190, 0x46, 0, 0}, {0x5191, 0xf8, 0, 0},
++ {0x5192, 0x04, 0, 0}, {0x5193, 0x70, 0, 0}, {0x5194, 0xf0, 0, 0},
++ {0x5195, 0xf0, 0, 0}, {0x5196, 0x03, 0, 0}, {0x5197, 0x01, 0, 0},
++ {0x5198, 0x04, 0, 0}, {0x5199, 0x12, 0, 0}, {0x519a, 0x04, 0, 0},
++ {0x519b, 0x00, 0, 0}, {0x519c, 0x06, 0, 0}, {0x519d, 0x82, 0, 0},
++ {0x519e, 0x00, 0, 0}, {0x5025, 0x80, 0, 0}, {0x3a0f, 0x38, 0, 0},
++ {0x3a10, 0x30, 0, 0}, {0x3a1b, 0x3a, 0, 0}, {0x3a1e, 0x2e, 0, 0},
++ {0x3a11, 0x60, 0, 0}, {0x3a1f, 0x10, 0, 0}, {0x5688, 0xa6, 0, 0},
++ {0x5689, 0x6a, 0, 0}, {0x568a, 0xea, 0, 0}, {0x568b, 0xae, 0, 0},
++ {0x568c, 0xa6, 0, 0}, {0x568d, 0x6a, 0, 0}, {0x568e, 0x62, 0, 0},
++ {0x568f, 0x26, 0, 0}, {0x5583, 0x40, 0, 0}, {0x5584, 0x40, 0, 0},
++ {0x5580, 0x02, 0, 0}, {0x5000, 0xcf, 0, 0}, {0x5800, 0x27, 0, 0},
++ {0x5801, 0x19, 0, 0}, {0x5802, 0x12, 0, 0}, {0x5803, 0x0f, 0, 0},
++ {0x5804, 0x10, 0, 0}, {0x5805, 0x15, 0, 0}, {0x5806, 0x1e, 0, 0},
++ {0x5807, 0x2f, 0, 0}, {0x5808, 0x15, 0, 0}, {0x5809, 0x0d, 0, 0},
++ {0x580a, 0x0a, 0, 0}, {0x580b, 0x09, 0, 0}, {0x580c, 0x0a, 0, 0},
++ {0x580d, 0x0c, 0, 0}, {0x580e, 0x12, 0, 0}, {0x580f, 0x19, 0, 0},
++ {0x5810, 0x0b, 0, 0}, {0x5811, 0x07, 0, 0}, {0x5812, 0x04, 0, 0},
++ {0x5813, 0x03, 0, 0}, {0x5814, 0x03, 0, 0}, {0x5815, 0x06, 0, 0},
++ {0x5816, 0x0a, 0, 0}, {0x5817, 0x0f, 0, 0}, {0x5818, 0x0a, 0, 0},
++ {0x5819, 0x05, 0, 0}, {0x581a, 0x01, 0, 0}, {0x581b, 0x00, 0, 0},
++ {0x581c, 0x00, 0, 0}, {0x581d, 0x03, 0, 0}, {0x581e, 0x08, 0, 0},
++ {0x581f, 0x0c, 0, 0}, {0x5820, 0x0a, 0, 0}, {0x5821, 0x05, 0, 0},
++ {0x5822, 0x01, 0, 0}, {0x5823, 0x00, 0, 0}, {0x5824, 0x00, 0, 0},
++ {0x5825, 0x03, 0, 0}, {0x5826, 0x08, 0, 0}, {0x5827, 0x0c, 0, 0},
++ {0x5828, 0x0e, 0, 0}, {0x5829, 0x08, 0, 0}, {0x582a, 0x06, 0, 0},
++ {0x582b, 0x04, 0, 0}, {0x582c, 0x05, 0, 0}, {0x582d, 0x07, 0, 0},
++ {0x582e, 0x0b, 0, 0}, {0x582f, 0x12, 0, 0}, {0x5830, 0x18, 0, 0},
++ {0x5831, 0x10, 0, 0}, {0x5832, 0x0c, 0, 0}, {0x5833, 0x0a, 0, 0},
++ {0x5834, 0x0b, 0, 0}, {0x5835, 0x0e, 0, 0}, {0x5836, 0x15, 0, 0},
++ {0x5837, 0x19, 0, 0}, {0x5838, 0x32, 0, 0}, {0x5839, 0x1f, 0, 0},
++ {0x583a, 0x18, 0, 0}, {0x583b, 0x16, 0, 0}, {0x583c, 0x17, 0, 0},
++ {0x583d, 0x1e, 0, 0}, {0x583e, 0x26, 0, 0}, {0x583f, 0x53, 0, 0},
++ {0x5840, 0x10, 0, 0}, {0x5841, 0x0f, 0, 0}, {0x5842, 0x0d, 0, 0},
++ {0x5843, 0x0c, 0, 0}, {0x5844, 0x0e, 0, 0}, {0x5845, 0x09, 0, 0},
++ {0x5846, 0x11, 0, 0}, {0x5847, 0x10, 0, 0}, {0x5848, 0x10, 0, 0},
++ {0x5849, 0x10, 0, 0}, {0x584a, 0x10, 0, 0}, {0x584b, 0x0e, 0, 0},
++ {0x584c, 0x10, 0, 0}, {0x584d, 0x10, 0, 0}, {0x584e, 0x11, 0, 0},
++ {0x584f, 0x10, 0, 0}, {0x5850, 0x0f, 0, 0}, {0x5851, 0x0c, 0, 0},
++ {0x5852, 0x0f, 0, 0}, {0x5853, 0x10, 0, 0}, {0x5854, 0x10, 0, 0},
++ {0x5855, 0x0f, 0, 0}, {0x5856, 0x0e, 0, 0}, {0x5857, 0x0b, 0, 0},
++ {0x5858, 0x10, 0, 0}, {0x5859, 0x0d, 0, 0}, {0x585a, 0x0d, 0, 0},
++ {0x585b, 0x0c, 0, 0}, {0x585c, 0x0c, 0, 0}, {0x585d, 0x0c, 0, 0},
++ {0x585e, 0x0b, 0, 0}, {0x585f, 0x0c, 0, 0}, {0x5860, 0x0c, 0, 0},
++ {0x5861, 0x0c, 0, 0}, {0x5862, 0x0d, 0, 0}, {0x5863, 0x08, 0, 0},
++ {0x5864, 0x11, 0, 0}, {0x5865, 0x18, 0, 0}, {0x5866, 0x18, 0, 0},
++ {0x5867, 0x19, 0, 0}, {0x5868, 0x17, 0, 0}, {0x5869, 0x19, 0, 0},
++ {0x586a, 0x16, 0, 0}, {0x586b, 0x13, 0, 0}, {0x586c, 0x13, 0, 0},
++ {0x586d, 0x12, 0, 0}, {0x586e, 0x13, 0, 0}, {0x586f, 0x16, 0, 0},
++ {0x5870, 0x14, 0, 0}, {0x5871, 0x12, 0, 0}, {0x5872, 0x10, 0, 0},
++ {0x5873, 0x11, 0, 0}, {0x5874, 0x11, 0, 0}, {0x5875, 0x16, 0, 0},
++ {0x5876, 0x14, 0, 0}, {0x5877, 0x11, 0, 0}, {0x5878, 0x10, 0, 0},
++ {0x5879, 0x0f, 0, 0}, {0x587a, 0x10, 0, 0}, {0x587b, 0x14, 0, 0},
++ {0x587c, 0x13, 0, 0}, {0x587d, 0x12, 0, 0}, {0x587e, 0x11, 0, 0},
++ {0x587f, 0x11, 0, 0}, {0x5880, 0x12, 0, 0}, {0x5881, 0x15, 0, 0},
++ {0x5882, 0x14, 0, 0}, {0x5883, 0x15, 0, 0}, {0x5884, 0x15, 0, 0},
++ {0x5885, 0x15, 0, 0}, {0x5886, 0x13, 0, 0}, {0x5887, 0x17, 0, 0},
++ {0x3710, 0x10, 0, 0}, {0x3632, 0x51, 0, 0}, {0x3702, 0x10, 0, 0},
++ {0x3703, 0xb2, 0, 0}, {0x3704, 0x18, 0, 0}, {0x370b, 0x40, 0, 0},
++ {0x370d, 0x03, 0, 0}, {0x3631, 0x01, 0, 0}, {0x3632, 0x52, 0, 0},
++ {0x3606, 0x24, 0, 0}, {0x3620, 0x96, 0, 0}, {0x5785, 0x07, 0, 0},
++ {0x3a13, 0x30, 0, 0}, {0x3600, 0x52, 0, 0}, {0x3604, 0x48, 0, 0},
++ {0x3606, 0x1b, 0, 0}, {0x370d, 0x0b, 0, 0}, {0x370f, 0xc0, 0, 0},
++ {0x3709, 0x01, 0, 0}, {0x3823, 0x00, 0, 0}, {0x5007, 0x00, 0, 0},
++ {0x5009, 0x00, 0, 0}, {0x5011, 0x00, 0, 0}, {0x5013, 0x00, 0, 0},
++ {0x519e, 0x00, 0, 0}, {0x5086, 0x00, 0, 0}, {0x5087, 0x00, 0, 0},
++ {0x5088, 0x00, 0, 0}, {0x5089, 0x00, 0, 0}, {0x302b, 0x00, 0, 0},
++ {0x3808, 0x01, 0, 0}, {0x3809, 0x40, 0, 0}, {0x380a, 0x00, 0, 0},
++ {0x380b, 0xf0, 0, 0}, {0x3a00, 0x78, 0, 0},
++};
++
++static struct reg_value ov5642_setting_15fps_NTSC_720_480[] = {
++ {0x3103, 0x93, 0, 0}, {0x3008, 0x82, 0, 0}, {0x3017, 0x7f, 0, 0},
++ {0x3018, 0xfc, 0, 0}, {0x3810, 0xc2, 0, 0}, {0x3615, 0xf0, 0, 0},
++ {0x3000, 0x00, 0, 0}, {0x3001, 0x00, 0, 0}, {0x3002, 0x5c, 0, 0},
++ {0x3003, 0x00, 0, 0}, {0x3004, 0xff, 0, 0}, {0x3005, 0xff, 0, 0},
++ {0x3006, 0x43, 0, 0}, {0x3007, 0x37, 0, 0}, {0x3011, 0x08, 0, 0},
++ {0x3010, 0x10, 0, 0}, {0x460c, 0x22, 0, 0}, {0x3815, 0x04, 0, 0},
++ {0x370c, 0xa0, 0, 0}, {0x3602, 0xfc, 0, 0}, {0x3612, 0xff, 0, 0},
++ {0x3634, 0xc0, 0, 0}, {0x3613, 0x00, 0, 0}, {0x3605, 0x7c, 0, 0},
++ {0x3621, 0x09, 0, 0}, {0x3622, 0x60, 0, 0}, {0x3604, 0x40, 0, 0},
++ {0x3603, 0xa7, 0, 0}, {0x3603, 0x27, 0, 0}, {0x4000, 0x21, 0, 0},
++ {0x401d, 0x22, 0, 0}, {0x3600, 0x54, 0, 0}, {0x3605, 0x04, 0, 0},
++ {0x3606, 0x3f, 0, 0}, {0x3c01, 0x80, 0, 0}, {0x5000, 0x4f, 0, 0},
++ {0x5020, 0x04, 0, 0}, {0x5181, 0x79, 0, 0}, {0x5182, 0x00, 0, 0},
++ {0x5185, 0x22, 0, 0}, {0x5197, 0x01, 0, 0}, {0x5001, 0xff, 0, 0},
++ {0x5500, 0x0a, 0, 0}, {0x5504, 0x00, 0, 0}, {0x5505, 0x7f, 0, 0},
++ {0x5080, 0x08, 0, 0}, {0x300e, 0x18, 0, 0}, {0x4610, 0x00, 0, 0},
++ {0x471d, 0x05, 0, 0}, {0x4708, 0x06, 0, 0}, {0x3808, 0x02, 0, 0},
++ {0x3809, 0x80, 0, 0}, {0x380a, 0x01, 0, 0}, {0x380b, 0xe0, 0, 0},
++ {0x380e, 0x07, 0, 0}, {0x380f, 0xd0, 0, 0}, {0x501f, 0x00, 0, 0},
++ {0x5000, 0x4f, 0, 0}, {0x4300, 0x30, 0, 0}, {0x3503, 0x07, 0, 0},
++ {0x3501, 0x73, 0, 0}, {0x3502, 0x80, 0, 0}, {0x350b, 0x00, 0, 0},
++ {0x3503, 0x07, 0, 0}, {0x3824, 0x11, 0, 0}, {0x3501, 0x1e, 0, 0},
++ {0x3502, 0x80, 0, 0}, {0x350b, 0x7f, 0, 0}, {0x380c, 0x0c, 0, 0},
++ {0x380d, 0x80, 0, 0}, {0x380e, 0x03, 0, 0}, {0x380f, 0xe8, 0, 0},
++ {0x3a0d, 0x04, 0, 0}, {0x3a0e, 0x03, 0, 0}, {0x3818, 0xc1, 0, 0},
++ {0x3705, 0xdb, 0, 0}, {0x370a, 0x81, 0, 0}, {0x3801, 0x80, 0, 0},
++ {0x3621, 0x87, 0, 0}, {0x3801, 0x50, 0, 0}, {0x3803, 0x08, 0, 0},
++ {0x3827, 0x08, 0, 0}, {0x3810, 0x40, 0, 0}, {0x3804, 0x05, 0, 0},
++ {0x3805, 0x00, 0, 0}, {0x5682, 0x05, 0, 0}, {0x5683, 0x00, 0, 0},
++ {0x3806, 0x03, 0, 0}, {0x3807, 0xc0, 0, 0}, {0x5686, 0x03, 0, 0},
++ {0x5687, 0xbc, 0, 0}, {0x3a00, 0x78, 0, 0}, {0x3a1a, 0x05, 0, 0},
++ {0x3a13, 0x30, 0, 0}, {0x3a18, 0x00, 0, 0}, {0x3a19, 0x7c, 0, 0},
++ {0x3a08, 0x12, 0, 0}, {0x3a09, 0xc0, 0, 0}, {0x3a0a, 0x0f, 0, 0},
++ {0x3a0b, 0xa0, 0, 0}, {0x350c, 0x07, 0, 0}, {0x350d, 0xd0, 0, 0},
++ {0x3500, 0x00, 0, 0}, {0x3501, 0x00, 0, 0}, {0x3502, 0x00, 0, 0},
++ {0x350a, 0x00, 0, 0}, {0x350b, 0x00, 0, 0}, {0x3503, 0x00, 0, 0},
++ {0x528a, 0x02, 0, 0}, {0x528b, 0x04, 0, 0}, {0x528c, 0x08, 0, 0},
++ {0x528d, 0x08, 0, 0}, {0x528e, 0x08, 0, 0}, {0x528f, 0x10, 0, 0},
++ {0x5290, 0x10, 0, 0}, {0x5292, 0x00, 0, 0}, {0x5293, 0x02, 0, 0},
++ {0x5294, 0x00, 0, 0}, {0x5295, 0x02, 0, 0}, {0x5296, 0x00, 0, 0},
++ {0x5297, 0x02, 0, 0}, {0x5298, 0x00, 0, 0}, {0x5299, 0x02, 0, 0},
++ {0x529a, 0x00, 0, 0}, {0x529b, 0x02, 0, 0}, {0x529c, 0x00, 0, 0},
++ {0x529d, 0x02, 0, 0}, {0x529e, 0x00, 0, 0}, {0x529f, 0x02, 0, 0},
++ {0x3a0f, 0x3c, 0, 0}, {0x3a10, 0x30, 0, 0}, {0x3a1b, 0x3c, 0, 0},
++ {0x3a1e, 0x30, 0, 0}, {0x3a11, 0x70, 0, 0}, {0x3a1f, 0x10, 0, 0},
++ {0x3030, 0x2b, 0, 0}, {0x3a02, 0x00, 0, 0}, {0x3a03, 0x7d, 0, 0},
++ {0x3a04, 0x00, 0, 0}, {0x3a14, 0x00, 0, 0}, {0x3a15, 0x7d, 0, 0},
++ {0x3a16, 0x00, 0, 0}, {0x3a00, 0x78, 0, 0}, {0x3a08, 0x09, 0, 0},
++ {0x3a09, 0x60, 0, 0}, {0x3a0a, 0x07, 0, 0}, {0x3a0b, 0xd0, 0, 0},
++ {0x3a0d, 0x08, 0, 0}, {0x3a0e, 0x06, 0, 0}, {0x5193, 0x70, 0, 0},
++ {0x589b, 0x04, 0, 0}, {0x589a, 0xc5, 0, 0}, {0x401e, 0x20, 0, 0},
++ {0x4001, 0x42, 0, 0}, {0x401c, 0x04, 0, 0}, {0x528a, 0x01, 0, 0},
++ {0x528b, 0x04, 0, 0}, {0x528c, 0x08, 0, 0}, {0x528d, 0x10, 0, 0},
++ {0x528e, 0x20, 0, 0}, {0x528f, 0x28, 0, 0}, {0x5290, 0x30, 0, 0},
++ {0x5292, 0x00, 0, 0}, {0x5293, 0x01, 0, 0}, {0x5294, 0x00, 0, 0},
++ {0x5295, 0x04, 0, 0}, {0x5296, 0x00, 0, 0}, {0x5297, 0x08, 0, 0},
++ {0x5298, 0x00, 0, 0}, {0x5299, 0x10, 0, 0}, {0x529a, 0x00, 0, 0},
++ {0x529b, 0x20, 0, 0}, {0x529c, 0x00, 0, 0}, {0x529d, 0x28, 0, 0},
++ {0x529e, 0x00, 0, 0}, {0x529f, 0x30, 0, 0}, {0x5282, 0x00, 0, 0},
++ {0x5300, 0x00, 0, 0}, {0x5301, 0x20, 0, 0}, {0x5302, 0x00, 0, 0},
++ {0x5303, 0x7c, 0, 0}, {0x530c, 0x00, 0, 0}, {0x530d, 0x0c, 0, 0},
++ {0x530e, 0x20, 0, 0}, {0x530f, 0x80, 0, 0}, {0x5310, 0x20, 0, 0},
++ {0x5311, 0x80, 0, 0}, {0x5308, 0x20, 0, 0}, {0x5309, 0x40, 0, 0},
++ {0x5304, 0x00, 0, 0}, {0x5305, 0x30, 0, 0}, {0x5306, 0x00, 0, 0},
++ {0x5307, 0x80, 0, 0}, {0x5314, 0x08, 0, 0}, {0x5315, 0x20, 0, 0},
++ {0x5319, 0x30, 0, 0}, {0x5316, 0x10, 0, 0}, {0x5317, 0x00, 0, 0},
++ {0x5318, 0x02, 0, 0}, {0x5380, 0x01, 0, 0}, {0x5381, 0x00, 0, 0},
++ {0x5382, 0x00, 0, 0}, {0x5383, 0x4e, 0, 0}, {0x5384, 0x00, 0, 0},
++ {0x5385, 0x0f, 0, 0}, {0x5386, 0x00, 0, 0}, {0x5387, 0x00, 0, 0},
++ {0x5388, 0x01, 0, 0}, {0x5389, 0x15, 0, 0}, {0x538a, 0x00, 0, 0},
++ {0x538b, 0x31, 0, 0}, {0x538c, 0x00, 0, 0}, {0x538d, 0x00, 0, 0},
++ {0x538e, 0x00, 0, 0}, {0x538f, 0x0f, 0, 0}, {0x5390, 0x00, 0, 0},
++ {0x5391, 0xab, 0, 0}, {0x5392, 0x00, 0, 0}, {0x5393, 0xa2, 0, 0},
++ {0x5394, 0x08, 0, 0}, {0x5480, 0x14, 0, 0}, {0x5481, 0x21, 0, 0},
++ {0x5482, 0x36, 0, 0}, {0x5483, 0x57, 0, 0}, {0x5484, 0x65, 0, 0},
++ {0x5485, 0x71, 0, 0}, {0x5486, 0x7d, 0, 0}, {0x5487, 0x87, 0, 0},
++ {0x5488, 0x91, 0, 0}, {0x5489, 0x9a, 0, 0}, {0x548a, 0xaa, 0, 0},
++ {0x548b, 0xb8, 0, 0}, {0x548c, 0xcd, 0, 0}, {0x548d, 0xdd, 0, 0},
++ {0x548e, 0xea, 0, 0}, {0x548f, 0x1d, 0, 0}, {0x5490, 0x05, 0, 0},
++ {0x5491, 0x00, 0, 0}, {0x5492, 0x04, 0, 0}, {0x5493, 0x20, 0, 0},
++ {0x5494, 0x03, 0, 0}, {0x5495, 0x60, 0, 0}, {0x5496, 0x02, 0, 0},
++ {0x5497, 0xb8, 0, 0}, {0x5498, 0x02, 0, 0}, {0x5499, 0x86, 0, 0},
++ {0x549a, 0x02, 0, 0}, {0x549b, 0x5b, 0, 0}, {0x549c, 0x02, 0, 0},
++ {0x549d, 0x3b, 0, 0}, {0x549e, 0x02, 0, 0}, {0x549f, 0x1c, 0, 0},
++ {0x54a0, 0x02, 0, 0}, {0x54a1, 0x04, 0, 0}, {0x54a2, 0x01, 0, 0},
++ {0x54a3, 0xed, 0, 0}, {0x54a4, 0x01, 0, 0}, {0x54a5, 0xc5, 0, 0},
++ {0x54a6, 0x01, 0, 0}, {0x54a7, 0xa5, 0, 0}, {0x54a8, 0x01, 0, 0},
++ {0x54a9, 0x6c, 0, 0}, {0x54aa, 0x01, 0, 0}, {0x54ab, 0x41, 0, 0},
++ {0x54ac, 0x01, 0, 0}, {0x54ad, 0x20, 0, 0}, {0x54ae, 0x00, 0, 0},
++ {0x54af, 0x16, 0, 0}, {0x54b0, 0x01, 0, 0}, {0x54b1, 0x20, 0, 0},
++ {0x54b2, 0x00, 0, 0}, {0x54b3, 0x10, 0, 0}, {0x54b4, 0x00, 0, 0},
++ {0x54b5, 0xf0, 0, 0}, {0x54b6, 0x00, 0, 0}, {0x54b7, 0xdf, 0, 0},
++ {0x5402, 0x3f, 0, 0}, {0x5403, 0x00, 0, 0}, {0x3406, 0x00, 0, 0},
++ {0x5180, 0xff, 0, 0}, {0x5181, 0x52, 0, 0}, {0x5182, 0x11, 0, 0},
++ {0x5183, 0x14, 0, 0}, {0x5184, 0x25, 0, 0}, {0x5185, 0x24, 0, 0},
++ {0x5186, 0x06, 0, 0}, {0x5187, 0x08, 0, 0}, {0x5188, 0x08, 0, 0},
++ {0x5189, 0x7c, 0, 0}, {0x518a, 0x60, 0, 0}, {0x518b, 0xb2, 0, 0},
++ {0x518c, 0xb2, 0, 0}, {0x518d, 0x44, 0, 0}, {0x518e, 0x3d, 0, 0},
++ {0x518f, 0x58, 0, 0}, {0x5190, 0x46, 0, 0}, {0x5191, 0xf8, 0, 0},
++ {0x5192, 0x04, 0, 0}, {0x5193, 0x70, 0, 0}, {0x5194, 0xf0, 0, 0},
++ {0x5195, 0xf0, 0, 0}, {0x5196, 0x03, 0, 0}, {0x5197, 0x01, 0, 0},
++ {0x5198, 0x04, 0, 0}, {0x5199, 0x12, 0, 0}, {0x519a, 0x04, 0, 0},
++ {0x519b, 0x00, 0, 0}, {0x519c, 0x06, 0, 0}, {0x519d, 0x82, 0, 0},
++ {0x519e, 0x00, 0, 0}, {0x5025, 0x80, 0, 0}, {0x3a0f, 0x38, 0, 0},
++ {0x3a10, 0x30, 0, 0}, {0x3a1b, 0x3a, 0, 0}, {0x3a1e, 0x2e, 0, 0},
++ {0x3a11, 0x60, 0, 0}, {0x3a1f, 0x10, 0, 0}, {0x5688, 0xa6, 0, 0},
++ {0x5689, 0x6a, 0, 0}, {0x568a, 0xea, 0, 0}, {0x568b, 0xae, 0, 0},
++ {0x568c, 0xa6, 0, 0}, {0x568d, 0x6a, 0, 0}, {0x568e, 0x62, 0, 0},
++ {0x568f, 0x26, 0, 0}, {0x5583, 0x40, 0, 0}, {0x5584, 0x40, 0, 0},
++ {0x5580, 0x02, 0, 0}, {0x5000, 0xcf, 0, 0}, {0x5800, 0x27, 0, 0},
++ {0x5801, 0x19, 0, 0}, {0x5802, 0x12, 0, 0}, {0x5803, 0x0f, 0, 0},
++ {0x5804, 0x10, 0, 0}, {0x5805, 0x15, 0, 0}, {0x5806, 0x1e, 0, 0},
++ {0x5807, 0x2f, 0, 0}, {0x5808, 0x15, 0, 0}, {0x5809, 0x0d, 0, 0},
++ {0x580a, 0x0a, 0, 0}, {0x580b, 0x09, 0, 0}, {0x580c, 0x0a, 0, 0},
++ {0x580d, 0x0c, 0, 0}, {0x580e, 0x12, 0, 0}, {0x580f, 0x19, 0, 0},
++ {0x5810, 0x0b, 0, 0}, {0x5811, 0x07, 0, 0}, {0x5812, 0x04, 0, 0},
++ {0x5813, 0x03, 0, 0}, {0x5814, 0x03, 0, 0}, {0x5815, 0x06, 0, 0},
++ {0x5816, 0x0a, 0, 0}, {0x5817, 0x0f, 0, 0}, {0x5818, 0x0a, 0, 0},
++ {0x5819, 0x05, 0, 0}, {0x581a, 0x01, 0, 0}, {0x581b, 0x00, 0, 0},
++ {0x581c, 0x00, 0, 0}, {0x581d, 0x03, 0, 0}, {0x581e, 0x08, 0, 0},
++ {0x581f, 0x0c, 0, 0}, {0x5820, 0x0a, 0, 0}, {0x5821, 0x05, 0, 0},
++ {0x5822, 0x01, 0, 0}, {0x5823, 0x00, 0, 0}, {0x5824, 0x00, 0, 0},
++ {0x5825, 0x03, 0, 0}, {0x5826, 0x08, 0, 0}, {0x5827, 0x0c, 0, 0},
++ {0x5828, 0x0e, 0, 0}, {0x5829, 0x08, 0, 0}, {0x582a, 0x06, 0, 0},
++ {0x582b, 0x04, 0, 0}, {0x582c, 0x05, 0, 0}, {0x582d, 0x07, 0, 0},
++ {0x582e, 0x0b, 0, 0}, {0x582f, 0x12, 0, 0}, {0x5830, 0x18, 0, 0},
++ {0x5831, 0x10, 0, 0}, {0x5832, 0x0c, 0, 0}, {0x5833, 0x0a, 0, 0},
++ {0x5834, 0x0b, 0, 0}, {0x5835, 0x0e, 0, 0}, {0x5836, 0x15, 0, 0},
++ {0x5837, 0x19, 0, 0}, {0x5838, 0x32, 0, 0}, {0x5839, 0x1f, 0, 0},
++ {0x583a, 0x18, 0, 0}, {0x583b, 0x16, 0, 0}, {0x583c, 0x17, 0, 0},
++ {0x583d, 0x1e, 0, 0}, {0x583e, 0x26, 0, 0}, {0x583f, 0x53, 0, 0},
++ {0x5840, 0x10, 0, 0}, {0x5841, 0x0f, 0, 0}, {0x5842, 0x0d, 0, 0},
++ {0x5843, 0x0c, 0, 0}, {0x5844, 0x0e, 0, 0}, {0x5845, 0x09, 0, 0},
++ {0x5846, 0x11, 0, 0}, {0x5847, 0x10, 0, 0}, {0x5848, 0x10, 0, 0},
++ {0x5849, 0x10, 0, 0}, {0x584a, 0x10, 0, 0}, {0x584b, 0x0e, 0, 0},
++ {0x584c, 0x10, 0, 0}, {0x584d, 0x10, 0, 0}, {0x584e, 0x11, 0, 0},
++ {0x584f, 0x10, 0, 0}, {0x5850, 0x0f, 0, 0}, {0x5851, 0x0c, 0, 0},
++ {0x5852, 0x0f, 0, 0}, {0x5853, 0x10, 0, 0}, {0x5854, 0x10, 0, 0},
++ {0x5855, 0x0f, 0, 0}, {0x5856, 0x0e, 0, 0}, {0x5857, 0x0b, 0, 0},
++ {0x5858, 0x10, 0, 0}, {0x5859, 0x0d, 0, 0}, {0x585a, 0x0d, 0, 0},
++ {0x585b, 0x0c, 0, 0}, {0x585c, 0x0c, 0, 0}, {0x585d, 0x0c, 0, 0},
++ {0x585e, 0x0b, 0, 0}, {0x585f, 0x0c, 0, 0}, {0x5860, 0x0c, 0, 0},
++ {0x5861, 0x0c, 0, 0}, {0x5862, 0x0d, 0, 0}, {0x5863, 0x08, 0, 0},
++ {0x5864, 0x11, 0, 0}, {0x5865, 0x18, 0, 0}, {0x5866, 0x18, 0, 0},
++ {0x5867, 0x19, 0, 0}, {0x5868, 0x17, 0, 0}, {0x5869, 0x19, 0, 0},
++ {0x586a, 0x16, 0, 0}, {0x586b, 0x13, 0, 0}, {0x586c, 0x13, 0, 0},
++ {0x586d, 0x12, 0, 0}, {0x586e, 0x13, 0, 0}, {0x586f, 0x16, 0, 0},
++ {0x5870, 0x14, 0, 0}, {0x5871, 0x12, 0, 0}, {0x5872, 0x10, 0, 0},
++ {0x5873, 0x11, 0, 0}, {0x5874, 0x11, 0, 0}, {0x5875, 0x16, 0, 0},
++ {0x5876, 0x14, 0, 0}, {0x5877, 0x11, 0, 0}, {0x5878, 0x10, 0, 0},
++ {0x5879, 0x0f, 0, 0}, {0x587a, 0x10, 0, 0}, {0x587b, 0x14, 0, 0},
++ {0x587c, 0x13, 0, 0}, {0x587d, 0x12, 0, 0}, {0x587e, 0x11, 0, 0},
++ {0x587f, 0x11, 0, 0}, {0x5880, 0x12, 0, 0}, {0x5881, 0x15, 0, 0},
++ {0x5882, 0x14, 0, 0}, {0x5883, 0x15, 0, 0}, {0x5884, 0x15, 0, 0},
++ {0x5885, 0x15, 0, 0}, {0x5886, 0x13, 0, 0}, {0x5887, 0x17, 0, 0},
++ {0x3710, 0x10, 0, 0}, {0x3632, 0x51, 0, 0}, {0x3702, 0x10, 0, 0},
++ {0x3703, 0xb2, 0, 0}, {0x3704, 0x18, 0, 0}, {0x370b, 0x40, 0, 0},
++ {0x370d, 0x03, 0, 0}, {0x3631, 0x01, 0, 0}, {0x3632, 0x52, 0, 0},
++ {0x3606, 0x24, 0, 0}, {0x3620, 0x96, 0, 0}, {0x5785, 0x07, 0, 0},
++ {0x3a13, 0x30, 0, 0}, {0x3600, 0x52, 0, 0}, {0x3604, 0x48, 0, 0},
++ {0x3606, 0x1b, 0, 0}, {0x370d, 0x0b, 0, 0}, {0x370f, 0xc0, 0, 0},
++ {0x3709, 0x01, 0, 0}, {0x3823, 0x00, 0, 0}, {0x5007, 0x00, 0, 0},
++ {0x5009, 0x00, 0, 0}, {0x5011, 0x00, 0, 0}, {0x5013, 0x00, 0, 0},
++ {0x519e, 0x00, 0, 0}, {0x5086, 0x00, 0, 0}, {0x5087, 0x00, 0, 0},
++ {0x5088, 0x00, 0, 0}, {0x5089, 0x00, 0, 0}, {0x302b, 0x00, 0, 0},
++ {0x3824, 0x11, 0, 0}, {0x3825, 0xb4, 0, 0}, {0x3826, 0x00, 0, 0},
++ {0x3827, 0x3d, 0, 0}, {0x380c, 0x0c, 0, 0}, {0x380d, 0x80, 0, 0},
++ {0x380e, 0x03, 0, 0}, {0x380f, 0xe8, 0, 0}, {0x3808, 0x02, 0, 0},
++ {0x3809, 0xd0, 0, 0}, {0x380A, 0x01, 0, 0}, {0x380B, 0xe0, 0, 0},
++ {0x3804, 0x05, 0, 0}, {0x3805, 0x00, 0, 0}, {0x3806, 0x03, 0, 0},
++ {0x3807, 0x55, 0, 0}, {0x5686, 0x03, 0, 0}, {0x5687, 0x55, 0, 0},
++ {0x5682, 0x05, 0, 0}, {0x5683, 0x00, 0, 0},
++};
++
++static struct reg_value ov5642_setting_15fps_PAL_720_576[] = {
++ {0x3103, 0x93, 0, 0}, {0x3008, 0x82, 0, 0}, {0x3017, 0x7f, 0, 0},
++ {0x3018, 0xfc, 0, 0}, {0x3810, 0xc2, 0, 0}, {0x3615, 0xf0, 0, 0},
++ {0x3000, 0x00, 0, 0}, {0x3001, 0x00, 0, 0}, {0x3002, 0x5c, 0, 0},
++ {0x3003, 0x00, 0, 0}, {0x3004, 0xff, 0, 0}, {0x3005, 0xff, 0, 0},
++ {0x3006, 0x43, 0, 0}, {0x3007, 0x37, 0, 0}, {0x3011, 0x08, 0, 0},
++ {0x3010, 0x10, 0, 0}, {0x460c, 0x22, 0, 0}, {0x3815, 0x04, 0, 0},
++ {0x370c, 0xa0, 0, 0}, {0x3602, 0xfc, 0, 0}, {0x3612, 0xff, 0, 0},
++ {0x3634, 0xc0, 0, 0}, {0x3613, 0x00, 0, 0}, {0x3605, 0x7c, 0, 0},
++ {0x3621, 0x09, 0, 0}, {0x3622, 0x60, 0, 0}, {0x3604, 0x40, 0, 0},
++ {0x3603, 0xa7, 0, 0}, {0x3603, 0x27, 0, 0}, {0x4000, 0x21, 0, 0},
++ {0x401d, 0x22, 0, 0}, {0x3600, 0x54, 0, 0}, {0x3605, 0x04, 0, 0},
++ {0x3606, 0x3f, 0, 0}, {0x3c01, 0x80, 0, 0}, {0x5000, 0x4f, 0, 0},
++ {0x5020, 0x04, 0, 0}, {0x5181, 0x79, 0, 0}, {0x5182, 0x00, 0, 0},
++ {0x5185, 0x22, 0, 0}, {0x5197, 0x01, 0, 0}, {0x5001, 0xff, 0, 0},
++ {0x5500, 0x0a, 0, 0}, {0x5504, 0x00, 0, 0}, {0x5505, 0x7f, 0, 0},
++ {0x5080, 0x08, 0, 0}, {0x300e, 0x18, 0, 0}, {0x4610, 0x00, 0, 0},
++ {0x471d, 0x05, 0, 0}, {0x4708, 0x06, 0, 0}, {0x3808, 0x02, 0, 0},
++ {0x3809, 0x80, 0, 0}, {0x380a, 0x01, 0, 0}, {0x380b, 0xe0, 0, 0},
++ {0x380e, 0x07, 0, 0}, {0x380f, 0xd0, 0, 0}, {0x501f, 0x00, 0, 0},
++ {0x5000, 0x4f, 0, 0}, {0x4300, 0x30, 0, 0}, {0x3503, 0x07, 0, 0},
++ {0x3501, 0x73, 0, 0}, {0x3502, 0x80, 0, 0}, {0x350b, 0x00, 0, 0},
++ {0x3503, 0x07, 0, 0}, {0x3824, 0x11, 0, 0}, {0x3501, 0x1e, 0, 0},
++ {0x3502, 0x80, 0, 0}, {0x350b, 0x7f, 0, 0}, {0x380c, 0x0c, 0, 0},
++ {0x380d, 0x80, 0, 0}, {0x380e, 0x03, 0, 0}, {0x380f, 0xe8, 0, 0},
++ {0x3a0d, 0x04, 0, 0}, {0x3a0e, 0x03, 0, 0}, {0x3818, 0xc1, 0, 0},
++ {0x3705, 0xdb, 0, 0}, {0x370a, 0x81, 0, 0}, {0x3801, 0x80, 0, 0},
++ {0x3621, 0x87, 0, 0}, {0x3801, 0x50, 0, 0}, {0x3803, 0x08, 0, 0},
++ {0x3827, 0x08, 0, 0}, {0x3810, 0x40, 0, 0}, {0x3804, 0x05, 0, 0},
++ {0x3805, 0x00, 0, 0}, {0x5682, 0x05, 0, 0}, {0x5683, 0x00, 0, 0},
++ {0x3806, 0x03, 0, 0}, {0x3807, 0xc0, 0, 0}, {0x5686, 0x03, 0, 0},
++ {0x5687, 0xbc, 0, 0}, {0x3a00, 0x78, 0, 0}, {0x3a1a, 0x05, 0, 0},
++ {0x3a13, 0x30, 0, 0}, {0x3a18, 0x00, 0, 0}, {0x3a19, 0x7c, 0, 0},
++ {0x3a08, 0x12, 0, 0}, {0x3a09, 0xc0, 0, 0}, {0x3a0a, 0x0f, 0, 0},
++ {0x3a0b, 0xa0, 0, 0}, {0x350c, 0x07, 0, 0}, {0x350d, 0xd0, 0, 0},
++ {0x3500, 0x00, 0, 0}, {0x3501, 0x00, 0, 0}, {0x3502, 0x00, 0, 0},
++ {0x350a, 0x00, 0, 0}, {0x350b, 0x00, 0, 0}, {0x3503, 0x00, 0, 0},
++ {0x528a, 0x02, 0, 0}, {0x528b, 0x04, 0, 0}, {0x528c, 0x08, 0, 0},
++ {0x528d, 0x08, 0, 0}, {0x528e, 0x08, 0, 0}, {0x528f, 0x10, 0, 0},
++ {0x5290, 0x10, 0, 0}, {0x5292, 0x00, 0, 0}, {0x5293, 0x02, 0, 0},
++ {0x5294, 0x00, 0, 0}, {0x5295, 0x02, 0, 0}, {0x5296, 0x00, 0, 0},
++ {0x5297, 0x02, 0, 0}, {0x5298, 0x00, 0, 0}, {0x5299, 0x02, 0, 0},
++ {0x529a, 0x00, 0, 0}, {0x529b, 0x02, 0, 0}, {0x529c, 0x00, 0, 0},
++ {0x529d, 0x02, 0, 0}, {0x529e, 0x00, 0, 0}, {0x529f, 0x02, 0, 0},
++ {0x3a0f, 0x3c, 0, 0}, {0x3a10, 0x30, 0, 0}, {0x3a1b, 0x3c, 0, 0},
++ {0x3a1e, 0x30, 0, 0}, {0x3a11, 0x70, 0, 0}, {0x3a1f, 0x10, 0, 0},
++ {0x3030, 0x2b, 0, 0}, {0x3a02, 0x00, 0, 0}, {0x3a03, 0x7d, 0, 0},
++ {0x3a04, 0x00, 0, 0}, {0x3a14, 0x00, 0, 0}, {0x3a15, 0x7d, 0, 0},
++ {0x3a16, 0x00, 0, 0}, {0x3a00, 0x78, 0, 0}, {0x3a08, 0x09, 0, 0},
++ {0x3a09, 0x60, 0, 0}, {0x3a0a, 0x07, 0, 0}, {0x3a0b, 0xd0, 0, 0},
++ {0x3a0d, 0x08, 0, 0}, {0x3a0e, 0x06, 0, 0}, {0x5193, 0x70, 0, 0},
++ {0x589b, 0x04, 0, 0}, {0x589a, 0xc5, 0, 0}, {0x401e, 0x20, 0, 0},
++ {0x4001, 0x42, 0, 0}, {0x401c, 0x04, 0, 0}, {0x528a, 0x01, 0, 0},
++ {0x528b, 0x04, 0, 0}, {0x528c, 0x08, 0, 0}, {0x528d, 0x10, 0, 0},
++ {0x528e, 0x20, 0, 0}, {0x528f, 0x28, 0, 0}, {0x5290, 0x30, 0, 0},
++ {0x5292, 0x00, 0, 0}, {0x5293, 0x01, 0, 0}, {0x5294, 0x00, 0, 0},
++ {0x5295, 0x04, 0, 0}, {0x5296, 0x00, 0, 0}, {0x5297, 0x08, 0, 0},
++ {0x5298, 0x00, 0, 0}, {0x5299, 0x10, 0, 0}, {0x529a, 0x00, 0, 0},
++ {0x529b, 0x20, 0, 0}, {0x529c, 0x00, 0, 0}, {0x529d, 0x28, 0, 0},
++ {0x529e, 0x00, 0, 0}, {0x529f, 0x30, 0, 0}, {0x5282, 0x00, 0, 0},
++ {0x5300, 0x00, 0, 0}, {0x5301, 0x20, 0, 0}, {0x5302, 0x00, 0, 0},
++ {0x5303, 0x7c, 0, 0}, {0x530c, 0x00, 0, 0}, {0x530d, 0x0c, 0, 0},
++ {0x530e, 0x20, 0, 0}, {0x530f, 0x80, 0, 0}, {0x5310, 0x20, 0, 0},
++ {0x5311, 0x80, 0, 0}, {0x5308, 0x20, 0, 0}, {0x5309, 0x40, 0, 0},
++ {0x5304, 0x00, 0, 0}, {0x5305, 0x30, 0, 0}, {0x5306, 0x00, 0, 0},
++ {0x5307, 0x80, 0, 0}, {0x5314, 0x08, 0, 0}, {0x5315, 0x20, 0, 0},
++ {0x5319, 0x30, 0, 0}, {0x5316, 0x10, 0, 0}, {0x5317, 0x00, 0, 0},
++ {0x5318, 0x02, 0, 0}, {0x5380, 0x01, 0, 0}, {0x5381, 0x00, 0, 0},
++ {0x5382, 0x00, 0, 0}, {0x5383, 0x4e, 0, 0}, {0x5384, 0x00, 0, 0},
++ {0x5385, 0x0f, 0, 0}, {0x5386, 0x00, 0, 0}, {0x5387, 0x00, 0, 0},
++ {0x5388, 0x01, 0, 0}, {0x5389, 0x15, 0, 0}, {0x538a, 0x00, 0, 0},
++ {0x538b, 0x31, 0, 0}, {0x538c, 0x00, 0, 0}, {0x538d, 0x00, 0, 0},
++ {0x538e, 0x00, 0, 0}, {0x538f, 0x0f, 0, 0}, {0x5390, 0x00, 0, 0},
++ {0x5391, 0xab, 0, 0}, {0x5392, 0x00, 0, 0}, {0x5393, 0xa2, 0, 0},
++ {0x5394, 0x08, 0, 0}, {0x5480, 0x14, 0, 0}, {0x5481, 0x21, 0, 0},
++ {0x5482, 0x36, 0, 0}, {0x5483, 0x57, 0, 0}, {0x5484, 0x65, 0, 0},
++ {0x5485, 0x71, 0, 0}, {0x5486, 0x7d, 0, 0}, {0x5487, 0x87, 0, 0},
++ {0x5488, 0x91, 0, 0}, {0x5489, 0x9a, 0, 0}, {0x548a, 0xaa, 0, 0},
++ {0x548b, 0xb8, 0, 0}, {0x548c, 0xcd, 0, 0}, {0x548d, 0xdd, 0, 0},
++ {0x548e, 0xea, 0, 0}, {0x548f, 0x1d, 0, 0}, {0x5490, 0x05, 0, 0},
++ {0x5491, 0x00, 0, 0}, {0x5492, 0x04, 0, 0}, {0x5493, 0x20, 0, 0},
++ {0x5494, 0x03, 0, 0}, {0x5495, 0x60, 0, 0}, {0x5496, 0x02, 0, 0},
++ {0x5497, 0xb8, 0, 0}, {0x5498, 0x02, 0, 0}, {0x5499, 0x86, 0, 0},
++ {0x549a, 0x02, 0, 0}, {0x549b, 0x5b, 0, 0}, {0x549c, 0x02, 0, 0},
++ {0x549d, 0x3b, 0, 0}, {0x549e, 0x02, 0, 0}, {0x549f, 0x1c, 0, 0},
++ {0x54a0, 0x02, 0, 0}, {0x54a1, 0x04, 0, 0}, {0x54a2, 0x01, 0, 0},
++ {0x54a3, 0xed, 0, 0}, {0x54a4, 0x01, 0, 0}, {0x54a5, 0xc5, 0, 0},
++ {0x54a6, 0x01, 0, 0}, {0x54a7, 0xa5, 0, 0}, {0x54a8, 0x01, 0, 0},
++ {0x54a9, 0x6c, 0, 0}, {0x54aa, 0x01, 0, 0}, {0x54ab, 0x41, 0, 0},
++ {0x54ac, 0x01, 0, 0}, {0x54ad, 0x20, 0, 0}, {0x54ae, 0x00, 0, 0},
++ {0x54af, 0x16, 0, 0}, {0x54b0, 0x01, 0, 0}, {0x54b1, 0x20, 0, 0},
++ {0x54b2, 0x00, 0, 0}, {0x54b3, 0x10, 0, 0}, {0x54b4, 0x00, 0, 0},
++ {0x54b5, 0xf0, 0, 0}, {0x54b6, 0x00, 0, 0}, {0x54b7, 0xdf, 0, 0},
++ {0x5402, 0x3f, 0, 0}, {0x5403, 0x00, 0, 0}, {0x3406, 0x00, 0, 0},
++ {0x5180, 0xff, 0, 0}, {0x5181, 0x52, 0, 0}, {0x5182, 0x11, 0, 0},
++ {0x5183, 0x14, 0, 0}, {0x5184, 0x25, 0, 0}, {0x5185, 0x24, 0, 0},
++ {0x5186, 0x06, 0, 0}, {0x5187, 0x08, 0, 0}, {0x5188, 0x08, 0, 0},
++ {0x5189, 0x7c, 0, 0}, {0x518a, 0x60, 0, 0}, {0x518b, 0xb2, 0, 0},
++ {0x518c, 0xb2, 0, 0}, {0x518d, 0x44, 0, 0}, {0x518e, 0x3d, 0, 0},
++ {0x518f, 0x58, 0, 0}, {0x5190, 0x46, 0, 0}, {0x5191, 0xf8, 0, 0},
++ {0x5192, 0x04, 0, 0}, {0x5193, 0x70, 0, 0}, {0x5194, 0xf0, 0, 0},
++ {0x5195, 0xf0, 0, 0}, {0x5196, 0x03, 0, 0}, {0x5197, 0x01, 0, 0},
++ {0x5198, 0x04, 0, 0}, {0x5199, 0x12, 0, 0}, {0x519a, 0x04, 0, 0},
++ {0x519b, 0x00, 0, 0}, {0x519c, 0x06, 0, 0}, {0x519d, 0x82, 0, 0},
++ {0x519e, 0x00, 0, 0}, {0x5025, 0x80, 0, 0}, {0x3a0f, 0x38, 0, 0},
++ {0x3a10, 0x30, 0, 0}, {0x3a1b, 0x3a, 0, 0}, {0x3a1e, 0x2e, 0, 0},
++ {0x3a11, 0x60, 0, 0}, {0x3a1f, 0x10, 0, 0}, {0x5688, 0xa6, 0, 0},
++ {0x5689, 0x6a, 0, 0}, {0x568a, 0xea, 0, 0}, {0x568b, 0xae, 0, 0},
++ {0x568c, 0xa6, 0, 0}, {0x568d, 0x6a, 0, 0}, {0x568e, 0x62, 0, 0},
++ {0x568f, 0x26, 0, 0}, {0x5583, 0x40, 0, 0}, {0x5584, 0x40, 0, 0},
++ {0x5580, 0x02, 0, 0}, {0x5000, 0xcf, 0, 0}, {0x5800, 0x27, 0, 0},
++ {0x5801, 0x19, 0, 0}, {0x5802, 0x12, 0, 0}, {0x5803, 0x0f, 0, 0},
++ {0x5804, 0x10, 0, 0}, {0x5805, 0x15, 0, 0}, {0x5806, 0x1e, 0, 0},
++ {0x5807, 0x2f, 0, 0}, {0x5808, 0x15, 0, 0}, {0x5809, 0x0d, 0, 0},
++ {0x580a, 0x0a, 0, 0}, {0x580b, 0x09, 0, 0}, {0x580c, 0x0a, 0, 0},
++ {0x580d, 0x0c, 0, 0}, {0x580e, 0x12, 0, 0}, {0x580f, 0x19, 0, 0},
++ {0x5810, 0x0b, 0, 0}, {0x5811, 0x07, 0, 0}, {0x5812, 0x04, 0, 0},
++ {0x5813, 0x03, 0, 0}, {0x5814, 0x03, 0, 0}, {0x5815, 0x06, 0, 0},
++ {0x5816, 0x0a, 0, 0}, {0x5817, 0x0f, 0, 0}, {0x5818, 0x0a, 0, 0},
++ {0x5819, 0x05, 0, 0}, {0x581a, 0x01, 0, 0}, {0x581b, 0x00, 0, 0},
++ {0x581c, 0x00, 0, 0}, {0x581d, 0x03, 0, 0}, {0x581e, 0x08, 0, 0},
++ {0x581f, 0x0c, 0, 0}, {0x5820, 0x0a, 0, 0}, {0x5821, 0x05, 0, 0},
++ {0x5822, 0x01, 0, 0}, {0x5823, 0x00, 0, 0}, {0x5824, 0x00, 0, 0},
++ {0x5825, 0x03, 0, 0}, {0x5826, 0x08, 0, 0}, {0x5827, 0x0c, 0, 0},
++ {0x5828, 0x0e, 0, 0}, {0x5829, 0x08, 0, 0}, {0x582a, 0x06, 0, 0},
++ {0x582b, 0x04, 0, 0}, {0x582c, 0x05, 0, 0}, {0x582d, 0x07, 0, 0},
++ {0x582e, 0x0b, 0, 0}, {0x582f, 0x12, 0, 0}, {0x5830, 0x18, 0, 0},
++ {0x5831, 0x10, 0, 0}, {0x5832, 0x0c, 0, 0}, {0x5833, 0x0a, 0, 0},
++ {0x5834, 0x0b, 0, 0}, {0x5835, 0x0e, 0, 0}, {0x5836, 0x15, 0, 0},
++ {0x5837, 0x19, 0, 0}, {0x5838, 0x32, 0, 0}, {0x5839, 0x1f, 0, 0},
++ {0x583a, 0x18, 0, 0}, {0x583b, 0x16, 0, 0}, {0x583c, 0x17, 0, 0},
++ {0x583d, 0x1e, 0, 0}, {0x583e, 0x26, 0, 0}, {0x583f, 0x53, 0, 0},
++ {0x5840, 0x10, 0, 0}, {0x5841, 0x0f, 0, 0}, {0x5842, 0x0d, 0, 0},
++ {0x5843, 0x0c, 0, 0}, {0x5844, 0x0e, 0, 0}, {0x5845, 0x09, 0, 0},
++ {0x5846, 0x11, 0, 0}, {0x5847, 0x10, 0, 0}, {0x5848, 0x10, 0, 0},
++ {0x5849, 0x10, 0, 0}, {0x584a, 0x10, 0, 0}, {0x584b, 0x0e, 0, 0},
++ {0x584c, 0x10, 0, 0}, {0x584d, 0x10, 0, 0}, {0x584e, 0x11, 0, 0},
++ {0x584f, 0x10, 0, 0}, {0x5850, 0x0f, 0, 0}, {0x5851, 0x0c, 0, 0},
++ {0x5852, 0x0f, 0, 0}, {0x5853, 0x10, 0, 0}, {0x5854, 0x10, 0, 0},
++ {0x5855, 0x0f, 0, 0}, {0x5856, 0x0e, 0, 0}, {0x5857, 0x0b, 0, 0},
++ {0x5858, 0x10, 0, 0}, {0x5859, 0x0d, 0, 0}, {0x585a, 0x0d, 0, 0},
++ {0x585b, 0x0c, 0, 0}, {0x585c, 0x0c, 0, 0}, {0x585d, 0x0c, 0, 0},
++ {0x585e, 0x0b, 0, 0}, {0x585f, 0x0c, 0, 0}, {0x5860, 0x0c, 0, 0},
++ {0x5861, 0x0c, 0, 0}, {0x5862, 0x0d, 0, 0}, {0x5863, 0x08, 0, 0},
++ {0x5864, 0x11, 0, 0}, {0x5865, 0x18, 0, 0}, {0x5866, 0x18, 0, 0},
++ {0x5867, 0x19, 0, 0}, {0x5868, 0x17, 0, 0}, {0x5869, 0x19, 0, 0},
++ {0x586a, 0x16, 0, 0}, {0x586b, 0x13, 0, 0}, {0x586c, 0x13, 0, 0},
++ {0x586d, 0x12, 0, 0}, {0x586e, 0x13, 0, 0}, {0x586f, 0x16, 0, 0},
++ {0x5870, 0x14, 0, 0}, {0x5871, 0x12, 0, 0}, {0x5872, 0x10, 0, 0},
++ {0x5873, 0x11, 0, 0}, {0x5874, 0x11, 0, 0}, {0x5875, 0x16, 0, 0},
++ {0x5876, 0x14, 0, 0}, {0x5877, 0x11, 0, 0}, {0x5878, 0x10, 0, 0},
++ {0x5879, 0x0f, 0, 0}, {0x587a, 0x10, 0, 0}, {0x587b, 0x14, 0, 0},
++ {0x587c, 0x13, 0, 0}, {0x587d, 0x12, 0, 0}, {0x587e, 0x11, 0, 0},
++ {0x587f, 0x11, 0, 0}, {0x5880, 0x12, 0, 0}, {0x5881, 0x15, 0, 0},
++ {0x5882, 0x14, 0, 0}, {0x5883, 0x15, 0, 0}, {0x5884, 0x15, 0, 0},
++ {0x5885, 0x15, 0, 0}, {0x5886, 0x13, 0, 0}, {0x5887, 0x17, 0, 0},
++ {0x3710, 0x10, 0, 0}, {0x3632, 0x51, 0, 0}, {0x3702, 0x10, 0, 0},
++ {0x3703, 0xb2, 0, 0}, {0x3704, 0x18, 0, 0}, {0x370b, 0x40, 0, 0},
++ {0x370d, 0x03, 0, 0}, {0x3631, 0x01, 0, 0}, {0x3632, 0x52, 0, 0},
++ {0x3606, 0x24, 0, 0}, {0x3620, 0x96, 0, 0}, {0x5785, 0x07, 0, 0},
++ {0x3a13, 0x30, 0, 0}, {0x3600, 0x52, 0, 0}, {0x3604, 0x48, 0, 0},
++ {0x3606, 0x1b, 0, 0}, {0x370d, 0x0b, 0, 0}, {0x370f, 0xc0, 0, 0},
++ {0x3709, 0x01, 0, 0}, {0x3823, 0x00, 0, 0}, {0x5007, 0x00, 0, 0},
++ {0x5009, 0x00, 0, 0}, {0x5011, 0x00, 0, 0}, {0x5013, 0x00, 0, 0},
++ {0x519e, 0x00, 0, 0}, {0x5086, 0x00, 0, 0}, {0x5087, 0x00, 0, 0},
++ {0x5088, 0x00, 0, 0}, {0x5089, 0x00, 0, 0}, {0x302b, 0x00, 0, 0},
++ {0x3824, 0x11, 0, 0}, {0x3825, 0xdc, 0, 0}, {0x3826, 0x00, 0, 0},
++ {0x3827, 0x08, 0, 0}, {0x380c, 0x0c, 0, 0}, {0x380d, 0x80, 0, 0},
++ {0x380e, 0x03, 0, 0}, {0x380f, 0xe8, 0, 0}, {0x3808, 0x02, 0, 0},
++ {0x3809, 0xd0, 0, 0}, {0x380A, 0x02, 0, 0}, {0x380B, 0x40, 0, 0},
++ {0x3804, 0x04, 0, 0}, {0x3805, 0xb0, 0, 0}, {0x3806, 0x03, 0, 0},
++ {0x3807, 0xc0, 0, 0}, {0x5686, 0x03, 0, 0}, {0x5687, 0xc0, 0, 0},
++ {0x5682, 0x04, 0, 0}, {0x5683, 0xb0, 0, 0},
++};
++
++static struct ov5642_mode_info ov5642_mode_info_data[2][ov5642_mode_MAX + 1] = {
++ {
++ {ov5642_mode_VGA_640_480, 640, 480,
++ ov5642_setting_15fps_VGA_640_480,
++ ARRAY_SIZE(ov5642_setting_15fps_VGA_640_480)},
++ {ov5642_mode_QVGA_320_240, 320, 240,
++ ov5642_setting_15fps_QVGA_320_240,
++ ARRAY_SIZE(ov5642_setting_15fps_QVGA_320_240)},
++ {ov5642_mode_NTSC_720_480, 720, 480,
++ ov5642_setting_15fps_NTSC_720_480,
++ ARRAY_SIZE(ov5642_setting_15fps_NTSC_720_480)},
++ {ov5642_mode_PAL_720_576, 720, 576,
++ ov5642_setting_15fps_PAL_720_576,
++ ARRAY_SIZE(ov5642_setting_15fps_PAL_720_576)},
++ {ov5642_mode_720P_1280_720, 1280, 720,
++ ov5642_setting_15fps_720P_1280_720,
++ ARRAY_SIZE(ov5642_setting_15fps_720P_1280_720)},
++ {ov5642_mode_1080P_1920_1080, 1920, 1080,
++ ov5642_setting_15fps_1080P_1920_1080,
++ ARRAY_SIZE(ov5642_setting_15fps_1080P_1920_1080)},
++ {ov5642_mode_QSXGA_2592_1944, 2592, 1944,
++ ov5642_setting_15fps_QSXGA_2592_1944,
++ ARRAY_SIZE(ov5642_setting_15fps_QSXGA_2592_1944)},
++ {ov5642_mode_QCIF_176_144, 176, 144,
++ ov5642_setting_15fps_QCIF_176_144,
++ ARRAY_SIZE(ov5642_setting_15fps_QCIF_176_144)},
++ {ov5642_mode_XGA_1024_768, 1024, 768,
++ ov5642_setting_15fps_XGA_1024_768,
++ ARRAY_SIZE(ov5642_setting_15fps_XGA_1024_768)},
++ },
++ {
++ {ov5642_mode_VGA_640_480, 640, 480,
++ ov5642_setting_30fps_VGA_640_480,
++ ARRAY_SIZE(ov5642_setting_30fps_VGA_640_480)},
++ {ov5642_mode_QVGA_320_240, 320, 240,
++ ov5642_setting_30fps_QVGA_320_240,
++ ARRAY_SIZE(ov5642_setting_30fps_QVGA_320_240)},
++ {ov5642_mode_NTSC_720_480, 720, 480,
++ ov5642_setting_30fps_NTSC_720_480,
++ ARRAY_SIZE(ov5642_setting_30fps_NTSC_720_480)},
++ {ov5642_mode_PAL_720_576, 720, 576,
++ ov5642_setting_30fps_PAL_720_576,
++ ARRAY_SIZE(ov5642_setting_30fps_PAL_720_576)},
++ {ov5642_mode_720P_1280_720, 1280, 720,
++ ov5642_setting_30fps_720P_1280_720,
++ ARRAY_SIZE(ov5642_setting_30fps_720P_1280_720)},
++ {ov5642_mode_1080P_1920_1080, 0, 0, NULL, 0},
++ {ov5642_mode_QSXGA_2592_1944, 0, 0, NULL, 0},
++ {ov5642_mode_QCIF_176_144, 176, 144,
++ ov5642_setting_30fps_QCIF_176_144,
++ ARRAY_SIZE(ov5642_setting_30fps_QCIF_176_144)},
++ {ov5642_mode_XGA_1024_768, 1024, 768,
++ ov5642_setting_30fps_XGA_1024_768,
++ ARRAY_SIZE(ov5642_setting_30fps_XGA_1024_768)},
++ },
++};
++
++static struct regulator *io_regulator;
++static struct regulator *core_regulator;
++static struct regulator *analog_regulator;
++static struct regulator *gpo_regulator;
++
++static int ov5642_probe(struct i2c_client *adapter,
++ const struct i2c_device_id *device_id);
++static int ov5642_remove(struct i2c_client *client);
++
++static s32 ov5642_read_reg(u16 reg, u8 *val);
++static s32 ov5642_write_reg(u16 reg, u8 val);
++
++static const struct i2c_device_id ov5642_id[] = {
++ {"ov5642", 0},
++ {"ov564x", 0},
++ {},
++};
++
++MODULE_DEVICE_TABLE(i2c, ov5642_id);
++
++static struct i2c_driver ov5642_i2c_driver = {
++ .driver = {
++ .owner = THIS_MODULE,
++ .name = "ov5642",
++ },
++ .probe = ov5642_probe,
++ .remove = ov5642_remove,
++ .id_table = ov5642_id,
++};
++
++static void ov5642_standby(s32 enable)
++{
++ if (enable)
++ gpio_set_value(pwn_gpio, 1);
++ else
++ gpio_set_value(pwn_gpio, 0);
++
++ msleep(2);
++}
++
++static void ov5642_reset(void)
++{
++ /* camera reset */
++ gpio_set_value(rst_gpio, 1);
++
++ /* camera power down */
++ gpio_set_value(pwn_gpio, 1);
++ msleep(5);
++
++ gpio_set_value(pwn_gpio, 0);
++ msleep(5);
++
++ gpio_set_value(rst_gpio, 0);
++ msleep(1);
++
++ gpio_set_value(rst_gpio, 1);
++ msleep(5);
++
++ gpio_set_value(pwn_gpio, 1);
++}
++
++static int ov5642_power_on(struct device *dev)
++{
++ int ret = 0;
++
++ io_regulator = devm_regulator_get(dev, "DOVDD");
++ if (!IS_ERR(io_regulator)) {
++ regulator_set_voltage(io_regulator,
++ OV5642_VOLTAGE_DIGITAL_IO,
++ OV5642_VOLTAGE_DIGITAL_IO);
++ ret = regulator_enable(io_regulator);
++ if (ret) {
++ pr_err("%s:io set voltage error\n", __func__);
++ return ret;
++ } else {
++ dev_dbg(dev,
++ "%s:io set voltage ok\n", __func__);
++ }
++ } else {
++ pr_err("%s: cannot get io voltage error\n", __func__);
++ io_regulator = NULL;
++ }
++
++ core_regulator = devm_regulator_get(dev, "DVDD");
++ if (!IS_ERR(core_regulator)) {
++ regulator_set_voltage(core_regulator,
++ OV5642_VOLTAGE_DIGITAL_CORE,
++ OV5642_VOLTAGE_DIGITAL_CORE);
++ ret = regulator_enable(core_regulator);
++ if (ret) {
++ pr_err("%s:core set voltage error\n", __func__);
++ return ret;
++ } else {
++ dev_dbg(dev,
++ "%s:core set voltage ok\n", __func__);
++ }
++ } else {
++ core_regulator = NULL;
++ pr_err("%s: cannot get core voltage error\n", __func__);
++ }
++
++ analog_regulator = devm_regulator_get(dev, "AVDD");
++ if (!IS_ERR(analog_regulator)) {
++ regulator_set_voltage(analog_regulator,
++ OV5642_VOLTAGE_ANALOG,
++ OV5642_VOLTAGE_ANALOG);
++ ret = regulator_enable(analog_regulator);
++ if (ret) {
++ pr_err("%s:analog set voltage error\n",
++ __func__);
++ return ret;
++ } else {
++ dev_dbg(dev,
++ "%s:analog set voltage ok\n", __func__);
++ }
++ } else {
++ analog_regulator = NULL;
++ pr_err("%s: cannot get analog voltage error\n", __func__);
++ }
++
++ return ret;
++}
++
++static s32 ov5642_write_reg(u16 reg, u8 val)
++{
++ u8 au8Buf[3] = {0};
++
++ au8Buf[0] = reg >> 8;
++ au8Buf[1] = reg & 0xff;
++ au8Buf[2] = val;
++
++ if (i2c_master_send(ov5642_data.i2c_client, au8Buf, 3) < 0) {
++ pr_err("%s:write reg error:reg=%x,val=%x\n",
++ __func__, reg, val);
++ return -1;
++ }
++
++ return 0;
++}
++
++static s32 ov5642_read_reg(u16 reg, u8 *val)
++{
++ u8 au8RegBuf[2] = {0};
++ u8 u8RdVal = 0;
++
++ au8RegBuf[0] = reg >> 8;
++ au8RegBuf[1] = reg & 0xff;
++
++ if (2 != i2c_master_send(ov5642_data.i2c_client, au8RegBuf, 2)) {
++ pr_err("%s:write reg error:reg=%x\n",
++ __func__, reg);
++ return -1;
++ }
++
++ if (1 != i2c_master_recv(ov5642_data.i2c_client, &u8RdVal, 1)) {
++ pr_err("%s:read reg error:reg=%x,val=%x\n",
++ __func__, reg, u8RdVal);
++ return -1;
++ }
++
++ *val = u8RdVal;
++
++ return u8RdVal;
++}
++
++static int ov5642_set_rot_mode(struct reg_value *rot_mode)
++{
++ s32 i = 0;
++ s32 iModeSettingArySize = 2;
++ register u32 Delay_ms = 0;
++ register u16 RegAddr = 0;
++ register u8 Mask = 0;
++ register u8 Val = 0;
++ u8 RegVal = 0;
++ int retval = 0;
++ for (i = 0; i < iModeSettingArySize; ++i, ++rot_mode) {
++ Delay_ms = rot_mode->u32Delay_ms;
++ RegAddr = rot_mode->u16RegAddr;
++ Val = rot_mode->u8Val;
++ Mask = rot_mode->u8Mask;
++
++ if (Mask) {
++ retval = ov5642_read_reg(RegAddr, &RegVal);
++ if (retval < 0) {
++ pr_err("%s, read reg 0x%x failed\n",
++ __func__, RegAddr);
++ goto err;
++ }
++
++ Val |= RegVal;
++ Val &= Mask;
++ }
++
++ retval = ov5642_write_reg(RegAddr, Val);
++ if (retval < 0) {
++ pr_err("%s, write reg 0x%x failed\n",
++ __func__, RegAddr);
++ goto err;
++ }
++
++ if (Delay_ms)
++ mdelay(Delay_ms);
++ }
++err:
++ return retval;
++}
++static int ov5642_init_mode(enum ov5642_frame_rate frame_rate,
++ enum ov5642_mode mode);
++static int ov5642_write_snapshot_para(enum ov5642_frame_rate frame_rate,
++ enum ov5642_mode mode);
++static int ov5642_change_mode(enum ov5642_frame_rate new_frame_rate,
++ enum ov5642_frame_rate old_frame_rate,
++ enum ov5642_mode new_mode,
++ enum ov5642_mode orig_mode)
++{
++ struct reg_value *pModeSetting = NULL;
++ s32 i = 0;
++ s32 iModeSettingArySize = 0;
++ register u32 Delay_ms = 0;
++ register u16 RegAddr = 0;
++ register u8 Mask = 0;
++ register u8 Val = 0;
++ u8 RegVal = 0;
++ int retval = 0;
++
++ if (new_mode > ov5642_mode_MAX || new_mode < ov5642_mode_MIN) {
++ pr_err("Wrong ov5642 mode detected!\n");
++ return -1;
++ }
++
++ if ((new_frame_rate == old_frame_rate) &&
++ (new_mode == ov5642_mode_VGA_640_480) &&
++ (orig_mode == ov5642_mode_QSXGA_2592_1944)) {
++ pModeSetting = ov5642_setting_QSXGA_2_VGA;
++ iModeSettingArySize = ARRAY_SIZE(ov5642_setting_QSXGA_2_VGA);
++ ov5642_data.pix.width = 640;
++ ov5642_data.pix.height = 480;
++ } else if ((new_frame_rate == old_frame_rate) &&
++ (new_mode == ov5642_mode_QVGA_320_240) &&
++ (orig_mode == ov5642_mode_VGA_640_480)) {
++ pModeSetting = ov5642_setting_VGA_2_QVGA;
++ iModeSettingArySize = ARRAY_SIZE(ov5642_setting_VGA_2_QVGA);
++ ov5642_data.pix.width = 320;
++ ov5642_data.pix.height = 240;
++ } else {
++ retval = ov5642_write_snapshot_para(new_frame_rate, new_mode);
++ goto err;
++ }
++
++ if (ov5642_data.pix.width == 0 || ov5642_data.pix.height == 0 ||
++ pModeSetting == NULL || iModeSettingArySize == 0)
++ return -EINVAL;
++
++ for (i = 0; i < iModeSettingArySize; ++i, ++pModeSetting) {
++ Delay_ms = pModeSetting->u32Delay_ms;
++ RegAddr = pModeSetting->u16RegAddr;
++ Val = pModeSetting->u8Val;
++ Mask = pModeSetting->u8Mask;
++
++ if (Mask) {
++ retval = ov5642_read_reg(RegAddr, &RegVal);
++ if (retval < 0) {
++ pr_err("read reg error addr=0x%x", RegAddr);
++ goto err;
++ }
++
++ RegVal &= ~(u8)Mask;
++ Val &= Mask;
++ Val |= RegVal;
++ }
++
++ retval = ov5642_write_reg(RegAddr, Val);
++ if (retval < 0) {
++ pr_err("write reg error addr=0x%x", RegAddr);
++ goto err;
++ }
++
++ if (Delay_ms)
++ msleep(Delay_ms);
++ }
++err:
++ return retval;
++}
++static int ov5642_init_mode(enum ov5642_frame_rate frame_rate,
++ enum ov5642_mode mode)
++{
++ struct reg_value *pModeSetting = NULL;
++ s32 i = 0;
++ s32 iModeSettingArySize = 0;
++ register u32 Delay_ms = 0;
++ register u16 RegAddr = 0;
++ register u8 Mask = 0;
++ register u8 Val = 0;
++ u8 RegVal = 0;
++ int retval = 0;
++
++ if (mode > ov5642_mode_MAX || mode < ov5642_mode_MIN) {
++ pr_err("Wrong ov5642 mode detected!\n");
++ return -1;
++ }
++
++ pModeSetting = ov5642_mode_info_data[frame_rate][mode].init_data_ptr;
++ iModeSettingArySize =
++ ov5642_mode_info_data[frame_rate][mode].init_data_size;
++
++ ov5642_data.pix.width = ov5642_mode_info_data[frame_rate][mode].width;
++ ov5642_data.pix.height = ov5642_mode_info_data[frame_rate][mode].height;
++
++ if (ov5642_data.pix.width == 0 || ov5642_data.pix.height == 0 ||
++ pModeSetting == NULL || iModeSettingArySize == 0)
++ return -EINVAL;
++
++ for (i = 0; i < iModeSettingArySize; ++i, ++pModeSetting) {
++ Delay_ms = pModeSetting->u32Delay_ms;
++ RegAddr = pModeSetting->u16RegAddr;
++ Val = pModeSetting->u8Val;
++ Mask = pModeSetting->u8Mask;
++
++ if (Mask) {
++ retval = ov5642_read_reg(RegAddr, &RegVal);
++ if (retval < 0) {
++ pr_err("read reg error addr=0x%x", RegAddr);
++ goto err;
++ }
++
++ RegVal &= ~(u8)Mask;
++ Val &= Mask;
++ Val |= RegVal;
++ }
++
++ retval = ov5642_write_reg(RegAddr, Val);
++ if (retval < 0) {
++ pr_err("write reg error addr=0x%x", RegAddr);
++ goto err;
++ }
++
++ if (Delay_ms)
++ msleep(Delay_ms);
++ }
++err:
++ return retval;
++}
++
++static int ov5642_write_snapshot_para(enum ov5642_frame_rate frame_rate,
++ enum ov5642_mode mode)
++{
++ int ret = 0;
++ bool m_60Hz = false;
++ u16 cap_frame_rate = 50;
++ u16 g_prev_frame_rate = 225;
++
++ u8 ev_low, ev_mid, ev_high;
++ u8 ret_l, ret_m, ret_h, gain, lines_10ms;
++ u16 ulcap_ev, icap_gain, prev_maxlines;
++ u32 ulcap_ev_gain, cap_maxlines, g_prev_ev;
++
++ ov5642_write_reg(0x3503, 0x07);
++
++ ret_h = ret_m = ret_l = 0;
++ g_prev_ev = 0;
++ ov5642_read_reg(0x3500, &ret_h);
++ ov5642_read_reg(0x3501, &ret_m);
++ ov5642_read_reg(0x3502, &ret_l);
++ g_prev_ev = (ret_h << 12) + (ret_m << 4) + (ret_l >> 4);
++
++ ret_h = ret_m = ret_l = 0;
++ prev_maxlines = 0;
++ ov5642_read_reg(0x380e, &ret_h);
++ ov5642_read_reg(0x380f, &ret_l);
++ prev_maxlines = (ret_h << 8) + ret_l;
++ /*Read back AGC Gain for preview*/
++ gain = 0;
++ ov5642_read_reg(0x350b, &gain);
++
++ ret = ov5642_init_mode(frame_rate, mode);
++ if (ret < 0)
++ return ret;
++
++ ret_h = ret_m = ret_l = 0;
++ ov5642_read_reg(0x380e, &ret_h);
++ ov5642_read_reg(0x380f, &ret_l);
++ cap_maxlines = (ret_h << 8) + ret_l;
++ if (m_60Hz == true)
++ lines_10ms = cap_frame_rate * cap_maxlines/12000;
++ else
++ lines_10ms = cap_frame_rate * cap_maxlines/10000;
++
++ if (prev_maxlines == 0)
++ prev_maxlines = 1;
++
++ ulcap_ev = (g_prev_ev*(cap_frame_rate)*(cap_maxlines))/
++ (((prev_maxlines)*(g_prev_frame_rate)));
++ icap_gain = (gain & 0x0f) + 16;
++ if (gain & 0x10)
++ icap_gain = icap_gain << 1;
++
++ if (gain & 0x20)
++ icap_gain = icap_gain << 1;
++
++ if (gain & 0x40)
++ icap_gain = icap_gain << 1;
++
++ if (gain & 0x80)
++ icap_gain = icap_gain << 1;
++
++ ulcap_ev_gain = 2 * ulcap_ev * icap_gain;
++
++ if (ulcap_ev_gain < cap_maxlines*16) {
++ ulcap_ev = ulcap_ev_gain/16;
++ if (ulcap_ev > lines_10ms) {
++ ulcap_ev /= lines_10ms;
++ ulcap_ev *= lines_10ms;
++ }
++ } else
++ ulcap_ev = cap_maxlines;
++
++ if (ulcap_ev == 0)
++ ulcap_ev = 1;
++
++ icap_gain = (ulcap_ev_gain*2/ulcap_ev + 1)/2;
++ ev_low = ((unsigned char)ulcap_ev)<<4;
++ ev_mid = (unsigned char)(ulcap_ev >> 4) & 0xff;
++ ev_high = (unsigned char)(ulcap_ev >> 12);
++
++ gain = 0;
++ if (icap_gain > 31) {
++ gain |= 0x10;
++ icap_gain = icap_gain >> 1;
++ }
++ if (icap_gain > 31) {
++ gain |= 0x20;
++ icap_gain = icap_gain >> 1;
++ }
++ if (icap_gain > 31) {
++ gain |= 0x40;
++ icap_gain = icap_gain >> 1;
++ }
++ if (icap_gain > 31) {
++ gain |= 0x80;
++ icap_gain = icap_gain >> 1;
++ }
++ if (icap_gain > 16)
++ gain |= ((icap_gain - 16) & 0x0f);
++
++ if (gain == 0x10)
++ gain = 0x11;
++
++ ov5642_write_reg(0x350b, gain);
++ ov5642_write_reg(0x3502, ev_low);
++ ov5642_write_reg(0x3501, ev_mid);
++ ov5642_write_reg(0x3500, ev_high);
++ msleep(500);
++
++ return ret ;
++}
++
++
++/* --------------- IOCTL functions from v4l2_int_ioctl_desc --------------- */
++
++static int ioctl_g_ifparm(struct v4l2_int_device *s, struct v4l2_ifparm *p)
++{
++ if (s == NULL) {
++ pr_err(" ERROR!! no slave device set!\n");
++ return -1;
++ }
++
++ memset(p, 0, sizeof(*p));
++ p->u.bt656.clock_curr = ov5642_data.mclk;
++ pr_debug(" clock_curr=mclk=%d\n", ov5642_data.mclk);
++ p->if_type = V4L2_IF_TYPE_BT656;
++ p->u.bt656.mode = V4L2_IF_TYPE_BT656_MODE_NOBT_8BIT;
++ p->u.bt656.clock_min = OV5642_XCLK_MIN;
++ p->u.bt656.clock_max = OV5642_XCLK_MAX;
++ p->u.bt656.bt_sync_correct = 1; /* Indicate external vsync */
++
++ return 0;
++}
++
++/*!
++ * ioctl_s_power - V4L2 sensor interface handler for VIDIOC_S_POWER ioctl
++ * @s: pointer to standard V4L2 device structure
++ * @on: indicates power mode (on or off)
++ *
++ * Turns the power on or off, depending on the value of on and returns the
++ * appropriate error code.
++ */
++static int ioctl_s_power(struct v4l2_int_device *s, int on)
++{
++ struct sensor_data *sensor = s->priv;
++
++ if (on && !sensor->on) {
++ if (io_regulator)
++ if (regulator_enable(io_regulator) != 0)
++ return -EIO;
++ if (core_regulator)
++ if (regulator_enable(core_regulator) != 0)
++ return -EIO;
++ if (gpo_regulator)
++ if (regulator_enable(gpo_regulator) != 0)
++ return -EIO;
++ if (analog_regulator)
++ if (regulator_enable(analog_regulator) != 0)
++ return -EIO;
++ /* Make sure power on */
++ ov5642_standby(0);
++ } else if (!on && sensor->on) {
++ if (analog_regulator)
++ regulator_disable(analog_regulator);
++ if (core_regulator)
++ regulator_disable(core_regulator);
++ if (io_regulator)
++ regulator_disable(io_regulator);
++ if (gpo_regulator)
++ regulator_disable(gpo_regulator);
++
++ ov5642_standby(1);
++ }
++
++ sensor->on = on;
++
++ return 0;
++}
++
++/*!
++ * ioctl_g_parm - V4L2 sensor interface handler for VIDIOC_G_PARM ioctl
++ * @s: pointer to standard V4L2 device structure
++ * @a: pointer to standard V4L2 VIDIOC_G_PARM ioctl structure
++ *
++ * Returns the sensor's video CAPTURE parameters.
++ */
++static int ioctl_g_parm(struct v4l2_int_device *s, struct v4l2_streamparm *a)
++{
++ struct sensor_data *sensor = s->priv;
++ struct v4l2_captureparm *cparm = &a->parm.capture;
++ int ret = 0;
++
++ switch (a->type) {
++ /* This is the only case currently handled. */
++ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
++ memset(a, 0, sizeof(*a));
++ a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
++ cparm->capability = sensor->streamcap.capability;
++ cparm->timeperframe = sensor->streamcap.timeperframe;
++ cparm->capturemode = sensor->streamcap.capturemode;
++ ret = 0;
++ break;
++
++ /* These are all the possible cases. */
++ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
++ case V4L2_BUF_TYPE_VIDEO_OVERLAY:
++ case V4L2_BUF_TYPE_VBI_CAPTURE:
++ case V4L2_BUF_TYPE_VBI_OUTPUT:
++ case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
++ case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
++ ret = -EINVAL;
++ break;
++
++ default:
++ pr_debug(" type is unknown - %d\n", a->type);
++ ret = -EINVAL;
++ break;
++ }
++
++ return ret;
++}
++
++/*!
++ * ioctl_s_parm - V4L2 sensor interface handler for VIDIOC_S_PARM ioctl
++ * @s: pointer to standard V4L2 device structure
++ * @a: pointer to standard V4L2 VIDIOC_S_PARM ioctl structure
++ *
++ * Configures the sensor to use the input parameters, if possible. If
++ * not possible, reverts to the old parameters and returns the
++ * appropriate error code.
++ */
++static int ioctl_s_parm(struct v4l2_int_device *s, struct v4l2_streamparm *a)
++{
++ struct sensor_data *sensor = s->priv;
++ struct v4l2_fract *timeperframe = &a->parm.capture.timeperframe;
++ u32 tgt_fps, old_fps; /* target frames per secound */
++ enum ov5642_frame_rate new_frame_rate, old_frame_rate;
++ int ret = 0;
++
++ /* Make sure power on */
++ ov5642_standby(0);
++
++ switch (a->type) {
++ /* This is the only case currently handled. */
++ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
++ /* Check that the new frame rate is allowed. */
++ if ((timeperframe->numerator == 0) ||
++ (timeperframe->denominator == 0)) {
++ timeperframe->denominator = DEFAULT_FPS;
++ timeperframe->numerator = 1;
++ }
++
++ tgt_fps = timeperframe->denominator /
++ timeperframe->numerator;
++
++ if (tgt_fps > MAX_FPS) {
++ timeperframe->denominator = MAX_FPS;
++ timeperframe->numerator = 1;
++ } else if (tgt_fps < MIN_FPS) {
++ timeperframe->denominator = MIN_FPS;
++ timeperframe->numerator = 1;
++ }
++
++ /* Actual frame rate we use */
++ tgt_fps = timeperframe->denominator /
++ timeperframe->numerator;
++
++ if (tgt_fps == 15)
++ new_frame_rate = ov5642_15_fps;
++ else if (tgt_fps == 30)
++ new_frame_rate = ov5642_30_fps;
++ else {
++ pr_err(" The camera frame rate is not supported!\n");
++ return -EINVAL;
++ }
++
++ if (sensor->streamcap.timeperframe.numerator != 0)
++ old_fps = sensor->streamcap.timeperframe.denominator /
++ sensor->streamcap.timeperframe.numerator;
++ else
++ old_fps = 30;
++
++ if (old_fps == 15)
++ old_frame_rate = ov5642_15_fps;
++ else if (old_fps == 30)
++ old_frame_rate = ov5642_30_fps;
++ else {
++ pr_warning(" No valid frame rate set!\n");
++ old_frame_rate = ov5642_30_fps;
++ }
++
++ ret = ov5642_change_mode(new_frame_rate, old_frame_rate,
++ a->parm.capture.capturemode,
++ sensor->streamcap.capturemode);
++ if (ret < 0)
++ return ret;
++
++ sensor->streamcap.timeperframe = *timeperframe;
++ sensor->streamcap.capturemode =
++ (u32)a->parm.capture.capturemode;
++ break;
++
++ /* These are all the possible cases. */
++ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
++ case V4L2_BUF_TYPE_VIDEO_OVERLAY:
++ case V4L2_BUF_TYPE_VBI_CAPTURE:
++ case V4L2_BUF_TYPE_VBI_OUTPUT:
++ case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
++ case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
++ pr_debug(" type is not " \
++ "V4L2_BUF_TYPE_VIDEO_CAPTURE but %d\n",
++ a->type);
++ ret = -EINVAL;
++ break;
++
++ default:
++ pr_debug(" type is unknown - %d\n", a->type);
++ ret = -EINVAL;
++ break;
++ }
++
++ return ret;
++}
++
++/*!
++ * ioctl_g_fmt_cap - V4L2 sensor interface handler for ioctl_g_fmt_cap
++ * @s: pointer to standard V4L2 device structure
++ * @f: pointer to standard V4L2 v4l2_format structure
++ *
++ * Returns the sensor's current pixel format in the v4l2_format
++ * parameter.
++ */
++static int ioctl_g_fmt_cap(struct v4l2_int_device *s, struct v4l2_format *f)
++{
++ struct sensor_data *sensor = s->priv;
++
++ f->fmt.pix = sensor->pix;
++
++ return 0;
++}
++
++/*!
++ * ioctl_g_ctrl - V4L2 sensor interface handler for VIDIOC_G_CTRL ioctl
++ * @s: pointer to standard V4L2 device structure
++ * @vc: standard V4L2 VIDIOC_G_CTRL ioctl structure
++ *
++ * If the requested control is supported, returns the control's current
++ * value from the video_control[] array. Otherwise, returns -EINVAL
++ * if the control is not supported.
++ */
++static int ioctl_g_ctrl(struct v4l2_int_device *s, struct v4l2_control *vc)
++{
++ int ret = 0;
++
++ switch (vc->id) {
++ case V4L2_CID_BRIGHTNESS:
++ vc->value = ov5642_data.brightness;
++ break;
++ case V4L2_CID_HUE:
++ vc->value = ov5642_data.hue;
++ break;
++ case V4L2_CID_CONTRAST:
++ vc->value = ov5642_data.contrast;
++ break;
++ case V4L2_CID_SATURATION:
++ vc->value = ov5642_data.saturation;
++ break;
++ case V4L2_CID_RED_BALANCE:
++ vc->value = ov5642_data.red;
++ break;
++ case V4L2_CID_BLUE_BALANCE:
++ vc->value = ov5642_data.blue;
++ break;
++ case V4L2_CID_EXPOSURE:
++ vc->value = ov5642_data.ae_mode;
++ break;
++ default:
++ ret = -EINVAL;
++ }
++
++ return ret;
++}
++
++/*!
++ * ioctl_s_ctrl - V4L2 sensor interface handler for VIDIOC_S_CTRL ioctl
++ * @s: pointer to standard V4L2 device structure
++ * @vc: standard V4L2 VIDIOC_S_CTRL ioctl structure
++ *
++ * If the requested control is supported, sets the control's current
++ * value in HW (and updates the video_control[] array). Otherwise,
++ * returns -EINVAL if the control is not supported.
++ */
++static int ioctl_s_ctrl(struct v4l2_int_device *s, struct v4l2_control *vc)
++{
++ int retval = 0;
++ struct sensor_data *sensor = s->priv;
++ __u32 captureMode = sensor->streamcap.capturemode;
++ struct reg_value *rot_mode = NULL;
++
++ pr_debug("In ov5642:ioctl_s_ctrl %d\n",
++ vc->id);
++
++ switch (vc->id) {
++ case V4L2_CID_BRIGHTNESS:
++ break;
++ case V4L2_CID_CONTRAST:
++ break;
++ case V4L2_CID_SATURATION:
++ break;
++ case V4L2_CID_HUE:
++ break;
++ case V4L2_CID_AUTO_WHITE_BALANCE:
++ break;
++ case V4L2_CID_DO_WHITE_BALANCE:
++ break;
++ case V4L2_CID_RED_BALANCE:
++ break;
++ case V4L2_CID_BLUE_BALANCE:
++ break;
++ case V4L2_CID_GAMMA:
++ break;
++ case V4L2_CID_EXPOSURE:
++ break;
++ case V4L2_CID_AUTOGAIN:
++ break;
++ case V4L2_CID_GAIN:
++ break;
++ case V4L2_CID_HFLIP:
++ break;
++ case V4L2_CID_VFLIP:
++ break;
++ case V4L2_CID_MXC_ROT:
++ case V4L2_CID_MXC_VF_ROT:
++ switch (vc->value) {
++ case V4L2_MXC_ROTATE_NONE:
++ if (captureMode == ov5642_mode_QSXGA_2592_1944)
++ rot_mode = ov5642_rot_none_FULL;
++ else
++ rot_mode = ov5642_rot_none_VGA;
++
++ if (ov5642_set_rot_mode(rot_mode))
++ retval = -EPERM;
++ break;
++ case V4L2_MXC_ROTATE_VERT_FLIP:
++ if (captureMode == ov5642_mode_QSXGA_2592_1944)
++ rot_mode = ov5642_rot_vert_flip_FULL;
++ else
++ rot_mode = ov5642_rot_vert_flip_VGA ;
++
++ if (ov5642_set_rot_mode(rot_mode))
++ retval = -EPERM;
++ break;
++ case V4L2_MXC_ROTATE_HORIZ_FLIP:
++ if (captureMode == ov5642_mode_QSXGA_2592_1944)
++ rot_mode = ov5642_rot_horiz_flip_FULL;
++ else
++ rot_mode = ov5642_rot_horiz_flip_VGA;
++
++ if (ov5642_set_rot_mode(rot_mode))
++ retval = -EPERM;
++ break;
++ case V4L2_MXC_ROTATE_180:
++ if (captureMode == ov5642_mode_QSXGA_2592_1944)
++ rot_mode = ov5642_rot_180_FULL;
++ else
++ rot_mode = ov5642_rot_180_VGA;
++
++ if (ov5642_set_rot_mode(rot_mode))
++ retval = -EPERM;
++ break;
++ default:
++ retval = -EPERM;
++ break;
++ }
++ break;
++ default:
++ retval = -EPERM;
++ break;
++ }
++
++ return retval;
++}
++
++/*!
++ * ioctl_enum_framesizes - V4L2 sensor interface handler for
++ * VIDIOC_ENUM_FRAMESIZES ioctl
++ * @s: pointer to standard V4L2 device structure
++ * @fsize: standard V4L2 VIDIOC_ENUM_FRAMESIZES ioctl structure
++ *
++ * Return 0 if successful, otherwise -EINVAL.
++ */
++static int ioctl_enum_framesizes(struct v4l2_int_device *s,
++ struct v4l2_frmsizeenum *fsize)
++{
++ if (fsize->index > ov5642_mode_MAX)
++ return -EINVAL;
++
++ fsize->pixel_format = ov5642_data.pix.pixelformat;
++ fsize->discrete.width =
++ max(ov5642_mode_info_data[0][fsize->index].width,
++ ov5642_mode_info_data[1][fsize->index].width);
++ fsize->discrete.height =
++ max(ov5642_mode_info_data[0][fsize->index].height,
++ ov5642_mode_info_data[1][fsize->index].height);
++ return 0;
++}
++
++/*!
++ * ioctl_enum_frameintervals - V4L2 sensor interface handler for
++ * VIDIOC_ENUM_FRAMEINTERVALS ioctl
++ * @s: pointer to standard V4L2 device structure
++ * @fival: standard V4L2 VIDIOC_ENUM_FRAMEINTERVALS ioctl structure
++ *
++ * Return 0 if successful, otherwise -EINVAL.
++ */
++static int ioctl_enum_frameintervals(struct v4l2_int_device *s,
++ struct v4l2_frmivalenum *fival)
++{
++ int i, j, count;
++
++ if (fival->index < 0 || fival->index > ov5642_mode_MAX)
++ return -EINVAL;
++
++ if (fival->pixel_format == 0 || fival->width == 0 ||
++ fival->height == 0) {
++ pr_warning("Please assign pixelformat, width and height.\n");
++ return -EINVAL;
++ }
++
++ fival->type = V4L2_FRMIVAL_TYPE_DISCRETE;
++ fival->discrete.numerator = 1;
++
++ count = 0;
++ for (i = 0; i < ARRAY_SIZE(ov5642_mode_info_data); i++) {
++ for (j = 0; j < (ov5642_mode_MAX + 1); j++) {
++ if (fival->pixel_format == ov5642_data.pix.pixelformat
++ && fival->width == ov5642_mode_info_data[i][j].width
++ && fival->height == ov5642_mode_info_data[i][j].height
++ && ov5642_mode_info_data[i][j].init_data_ptr != NULL) {
++ count++;
++ }
++ if (fival->index == (count - 1)) {
++ fival->discrete.denominator =
++ ov5642_framerates[i];
++ return 0;
++ }
++ }
++ }
++
++ return -EINVAL;
++}
++
++/*!
++ * ioctl_g_chip_ident - V4L2 sensor interface handler for
++ * VIDIOC_DBG_G_CHIP_IDENT ioctl
++ * @s: pointer to standard V4L2 device structure
++ * @id: pointer to int
++ *
++ * Return 0.
++ */
++static int ioctl_g_chip_ident(struct v4l2_int_device *s, int *id)
++{
++ ((struct v4l2_dbg_chip_ident *)id)->match.type =
++ V4L2_CHIP_MATCH_I2C_DRIVER;
++ strcpy(((struct v4l2_dbg_chip_ident *)id)->match.name, "ov5642_camera");
++
++ return 0;
++}
++
++/*!
++ * ioctl_init - V4L2 sensor interface handler for VIDIOC_INT_INIT
++ * @s: pointer to standard V4L2 device structure
++ */
++static int ioctl_init(struct v4l2_int_device *s)
++{
++
++ return 0;
++}
++
++/*!
++ * ioctl_enum_fmt_cap - V4L2 sensor interface handler for VIDIOC_ENUM_FMT
++ * @s: pointer to standard V4L2 device structure
++ * @fmt: pointer to standard V4L2 fmt description structure
++ *
++ * Return 0.
++ */
++static int ioctl_enum_fmt_cap(struct v4l2_int_device *s,
++ struct v4l2_fmtdesc *fmt)
++{
++ if (fmt->index > 0) /* only 1 pixelformat support so far */
++ return -EINVAL;
++
++ fmt->pixelformat = ov5642_data.pix.pixelformat;
++
++ return 0;
++}
++
++/*!
++ * ioctl_dev_init - V4L2 sensor interface handler for vidioc_int_dev_init_num
++ * @s: pointer to standard V4L2 device structure
++ *
++ * Initialise the device when slave attaches to the master.
++ */
++static int ioctl_dev_init(struct v4l2_int_device *s)
++{
++ struct reg_value *pModeSetting = NULL;
++ s32 i = 0;
++ s32 iModeSettingArySize = 0;
++ register u32 Delay_ms = 0;
++ register u16 RegAddr = 0;
++ register u8 Mask = 0;
++ register u8 Val = 0;
++ u8 RegVal = 0;
++ int retval = 0;
++
++ struct sensor_data *sensor = s->priv;
++ u32 tgt_xclk; /* target xclk */
++ u32 tgt_fps; /* target frames per secound */
++ enum ov5642_frame_rate frame_rate;
++
++ ov5642_data.on = true;
++
++ /* mclk */
++ tgt_xclk = ov5642_data.mclk;
++ tgt_xclk = min(tgt_xclk, (u32)OV5642_XCLK_MAX);
++ tgt_xclk = max(tgt_xclk, (u32)OV5642_XCLK_MIN);
++ ov5642_data.mclk = tgt_xclk;
++
++ pr_debug(" Setting mclk to %d MHz\n", tgt_xclk / 1000000);
++
++ /* Default camera frame rate is set in probe */
++ tgt_fps = sensor->streamcap.timeperframe.denominator /
++ sensor->streamcap.timeperframe.numerator;
++
++ if (tgt_fps == 15)
++ frame_rate = ov5642_15_fps;
++ else if (tgt_fps == 30)
++ frame_rate = ov5642_30_fps;
++ else
++ return -EINVAL; /* Only support 15fps or 30fps now. */
++
++ pModeSetting = ov5642_initial_setting;
++ iModeSettingArySize = ARRAY_SIZE(ov5642_initial_setting);
++
++ for (i = 0; i < iModeSettingArySize; ++i, ++pModeSetting) {
++ Delay_ms = pModeSetting->u32Delay_ms;
++ RegAddr = pModeSetting->u16RegAddr;
++ Val = pModeSetting->u8Val;
++ Mask = pModeSetting->u8Mask;
++ if (Mask) {
++ retval = ov5642_read_reg(RegAddr, &RegVal);
++ if (retval < 0)
++ goto err;
++
++ RegVal &= ~(u8)Mask;
++ Val &= Mask;
++ Val |= RegVal;
++ }
++
++ retval = ov5642_write_reg(RegAddr, Val);
++ if (retval < 0)
++ goto err;
++
++ if (Delay_ms)
++ msleep(Delay_ms);
++ }
++err:
++ return retval;
++}
++
++/*!
++ * ioctl_dev_exit - V4L2 sensor interface handler for vidioc_int_dev_exit_num
++ * @s: pointer to standard V4L2 device structure
++ *
++ * Delinitialise the device when slave detaches to the master.
++ */
++static int ioctl_dev_exit(struct v4l2_int_device *s)
++{
++ return 0;
++}
++
++/*!
++ * This structure defines all the ioctls for this module and links them to the
++ * enumeration.
++ */
++static struct v4l2_int_ioctl_desc ov5642_ioctl_desc[] = {
++ { vidioc_int_dev_init_num,
++ (v4l2_int_ioctl_func *)ioctl_dev_init },
++ { vidioc_int_dev_exit_num, ioctl_dev_exit},
++ { vidioc_int_s_power_num,
++ (v4l2_int_ioctl_func *)ioctl_s_power },
++ { vidioc_int_g_ifparm_num,
++ (v4l2_int_ioctl_func *)ioctl_g_ifparm },
++/* { vidioc_int_g_needs_reset_num,
++ (v4l2_int_ioctl_func *)ioctl_g_needs_reset }, */
++/* { vidioc_int_reset_num,
++ (v4l2_int_ioctl_func *)ioctl_reset }, */
++ { vidioc_int_init_num,
++ (v4l2_int_ioctl_func *)ioctl_init },
++ { vidioc_int_enum_fmt_cap_num,
++ (v4l2_int_ioctl_func *)ioctl_enum_fmt_cap },
++/* { vidioc_int_try_fmt_cap_num,
++ (v4l2_int_ioctl_func *)ioctl_try_fmt_cap }, */
++ { vidioc_int_g_fmt_cap_num,
++ (v4l2_int_ioctl_func *)ioctl_g_fmt_cap },
++/* { vidioc_int_s_fmt_cap_num,
++ (v4l2_int_ioctl_func *)ioctl_s_fmt_cap }, */
++ { vidioc_int_g_parm_num,
++ (v4l2_int_ioctl_func *)ioctl_g_parm },
++ { vidioc_int_s_parm_num,
++ (v4l2_int_ioctl_func *)ioctl_s_parm },
++/* { vidioc_int_queryctrl_num,
++ (v4l2_int_ioctl_func *)ioctl_queryctrl }, */
++ { vidioc_int_g_ctrl_num,
++ (v4l2_int_ioctl_func *)ioctl_g_ctrl },
++ { vidioc_int_s_ctrl_num,
++ (v4l2_int_ioctl_func *)ioctl_s_ctrl },
++ { vidioc_int_enum_framesizes_num,
++ (v4l2_int_ioctl_func *)ioctl_enum_framesizes },
++ { vidioc_int_enum_frameintervals_num,
++ (v4l2_int_ioctl_func *)ioctl_enum_frameintervals },
++ { vidioc_int_g_chip_ident_num,
++ (v4l2_int_ioctl_func *)ioctl_g_chip_ident },
++};
++
++static struct v4l2_int_slave ov5642_slave = {
++ .ioctls = ov5642_ioctl_desc,
++ .num_ioctls = ARRAY_SIZE(ov5642_ioctl_desc),
++};
++
++static struct v4l2_int_device ov5642_int_device = {
++ .module = THIS_MODULE,
++ .name = "ov5642",
++ .type = v4l2_int_type_slave,
++ .u = {
++ .slave = &ov5642_slave,
++ },
++};
++
++/*!
++ * ov5642 I2C probe function
++ *
++ * @param adapter struct i2c_adapter *
++ * @return Error code indicating success or failure
++ */
++static int ov5642_probe(struct i2c_client *client,
++ const struct i2c_device_id *id)
++{
++ struct pinctrl *pinctrl;
++ struct device *dev = &client->dev;
++ int retval;
++ u8 chip_id_high, chip_id_low;
++
++ /* ov5642 pinctrl */
++ pinctrl = devm_pinctrl_get_select_default(dev);
++ if (IS_ERR(pinctrl)) {
++ dev_err(dev, "ov5642 setup pinctrl failed!");
++ return PTR_ERR(pinctrl);
++ }
++
++ /* request power down pin */
++ pwn_gpio = of_get_named_gpio(dev->of_node, "pwn-gpios", 0);
++ if (!gpio_is_valid(pwn_gpio)) {
++ dev_warn(dev, "no sensor pwdn pin available");
++ return -EINVAL;
++ }
++ retval = devm_gpio_request_one(dev, pwn_gpio, GPIOF_OUT_INIT_HIGH,
++ "ov5642_pwdn");
++ if (retval < 0)
++ return retval;
++
++ /* request reset pin */
++ rst_gpio = of_get_named_gpio(dev->of_node, "rst-gpios", 0);
++ if (!gpio_is_valid(rst_gpio)) {
++ dev_warn(dev, "no sensor reset pin available");
++ return -EINVAL;
++ }
++ retval = devm_gpio_request_one(dev, rst_gpio, GPIOF_OUT_INIT_HIGH,
++ "ov5642_reset");
++ if (retval < 0)
++ return retval;
++
++ /* Set initial values for the sensor struct. */
++ memset(&ov5642_data, 0, sizeof(ov5642_data));
++ ov5642_data.sensor_clk = devm_clk_get(dev, "csi_mclk");
++ if (IS_ERR(ov5642_data.sensor_clk)) {
++ /* assuming clock enabled by default */
++ ov5642_data.sensor_clk = NULL;
++ dev_err(dev, "clock-frequency missing or invalid\n");
++ return PTR_ERR(ov5642_data.sensor_clk);
++ }
++
++ retval = of_property_read_u32(dev->of_node, "mclk",
++ (u32 *) &(ov5642_data.mclk));
++ if (retval) {
++ dev_err(dev, "mclk missing or invalid\n");
++ return retval;
++ }
++
++ retval = of_property_read_u32(dev->of_node, "mclk_source",
++ (u32 *) &(ov5642_data.mclk_source));
++ if (retval) {
++ dev_err(dev, "mclk_source missing or invalid\n");
++ return retval;
++ }
++
++ retval = of_property_read_u32(dev->of_node, "csi_id",
++ &(ov5642_data.csi));
++ if (retval) {
++ dev_err(dev, "csi_id missing or invalid\n");
++ return retval;
++ }
++
++ clk_prepare_enable(ov5642_data.sensor_clk);
++
++ ov5642_data.io_init = ov5642_reset;
++ ov5642_data.i2c_client = client;
++ ov5642_data.pix.pixelformat = V4L2_PIX_FMT_YUYV;
++ ov5642_data.pix.width = 640;
++ ov5642_data.pix.height = 480;
++ ov5642_data.streamcap.capability = V4L2_MODE_HIGHQUALITY |
++ V4L2_CAP_TIMEPERFRAME;
++ ov5642_data.streamcap.capturemode = 0;
++ ov5642_data.streamcap.timeperframe.denominator = DEFAULT_FPS;
++ ov5642_data.streamcap.timeperframe.numerator = 1;
++
++ ov5642_power_on(&client->dev);
++
++ ov5642_reset();
++
++ ov5642_standby(0);
++
++ retval = ov5642_read_reg(OV5642_CHIP_ID_HIGH_BYTE, &chip_id_high);
++ if (retval < 0 || chip_id_high != 0x56) {
++ pr_warning("camera ov5642 is not found\n");
++ clk_disable_unprepare(ov5642_data.sensor_clk);
++ return -ENODEV;
++ }
++ retval = ov5642_read_reg(OV5642_CHIP_ID_LOW_BYTE, &chip_id_low);
++ if (retval < 0 || chip_id_low != 0x42) {
++ pr_warning("camera ov5642 is not found\n");
++ clk_disable_unprepare(ov5642_data.sensor_clk);
++ return -ENODEV;
++ }
++
++ ov5642_standby(1);
++
++ ov5642_int_device.priv = &ov5642_data;
++ retval = v4l2_int_device_register(&ov5642_int_device);
++
++ clk_disable_unprepare(ov5642_data.sensor_clk);
++
++ pr_info("camera ov5642 is found\n");
++ return retval;
++}
++
++/*!
++ * ov5642 I2C detach function
++ *
++ * @param client struct i2c_client *
++ * @return Error code indicating success or failure
++ */
++static int ov5642_remove(struct i2c_client *client)
++{
++ v4l2_int_device_unregister(&ov5642_int_device);
++
++ if (gpo_regulator)
++ regulator_disable(gpo_regulator);
++
++ if (analog_regulator)
++ regulator_disable(analog_regulator);
++
++ if (core_regulator)
++ regulator_disable(core_regulator);
++
++ if (io_regulator)
++ regulator_disable(io_regulator);
++
++ return 0;
++}
++
++/*!
++ * ov5642 init function
++ * Called by insmod ov5642_camera.ko.
++ *
++ * @return Error code indicating success or failure
++ */
++static __init int ov5642_init(void)
++{
++ u8 err;
++
++ err = i2c_add_driver(&ov5642_i2c_driver);
++ if (err != 0)
++ pr_err("%s:driver registration failed, error=%d\n",
++ __func__, err);
++
++ return err;
++}
++
++/*!
++ * OV5642 cleanup function
++ * Called on rmmod ov5642_camera.ko
++ *
++ * @return Error code indicating success or failure
++ */
++static void __exit ov5642_clean(void)
++{
++ i2c_del_driver(&ov5642_i2c_driver);
++}
++
++module_init(ov5642_init);
++module_exit(ov5642_clean);
++
++MODULE_AUTHOR("Freescale Semiconductor, Inc.");
++MODULE_DESCRIPTION("OV5642 Camera Driver");
++MODULE_LICENSE("GPL");
++MODULE_VERSION("1.0");
++MODULE_ALIAS("CSI");
+diff -Nur linux-3.14.36/drivers/media/platform/mxc/output/Kconfig linux-openelec/drivers/media/platform/mxc/output/Kconfig
+--- linux-3.14.36/drivers/media/platform/mxc/output/Kconfig 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/media/platform/mxc/output/Kconfig 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,5 @@
++config VIDEO_MXC_IPU_OUTPUT
++ tristate "IPU v4l2 output support"
++ depends on VIDEO_MXC_OUTPUT && MXC_IPU
++ ---help---
++ This is the video4linux2 driver for IPU post processing video output.
+diff -Nur linux-3.14.36/drivers/media/platform/mxc/output/Makefile linux-openelec/drivers/media/platform/mxc/output/Makefile
+--- linux-3.14.36/drivers/media/platform/mxc/output/Makefile 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/media/platform/mxc/output/Makefile 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1 @@
++obj-$(CONFIG_VIDEO_MXC_IPU_OUTPUT) += mxc_vout.o
+diff -Nur linux-3.14.36/drivers/media/platform/mxc/output/mxc_vout.c linux-openelec/drivers/media/platform/mxc/output/mxc_vout.c
+--- linux-3.14.36/drivers/media/platform/mxc/output/mxc_vout.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/media/platform/mxc/output/mxc_vout.c 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,2265 @@
++/*
++ * Copyright (C) 2011-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++#include <linux/console.h>
++#include <linux/dma-mapping.h>
++#include <linux/init.h>
++#include <linux/ipu-v3.h>
++#include <linux/module.h>
++#include <linux/mxcfb.h>
++#include <linux/mxc_v4l2.h>
++#include <linux/platform_device.h>
++#include <linux/sched.h>
++#include <linux/types.h>
++#include <linux/videodev2.h>
++#include <linux/vmalloc.h>
++
++#include <media/videobuf-dma-contig.h>
++#include <media/v4l2-device.h>
++#include <media/v4l2-ioctl.h>
++
++#define UYVY_BLACK (0x00800080)
++#define RGB_BLACK (0x0)
++#define UV_BLACK (0x80)
++#define Y_BLACK (0x0)
++
++#define MAX_FB_NUM 6
++#define FB_BUFS 3
++#define VDOA_FB_BUFS (FB_BUFS - 1)
++#define VALID_HEIGHT_1080P (1080)
++#define FRAME_HEIGHT_1080P (1088)
++#define FRAME_WIDTH_1080P (1920)
++#define CHECK_TILED_1080P_DISPLAY(vout) \
++ ((((vout)->task.input.format == IPU_PIX_FMT_TILED_NV12) || \
++ ((vout)->task.input.format == IPU_PIX_FMT_TILED_NV12F)) &&\
++ ((vout)->task.input.width == FRAME_WIDTH_1080P) && \
++ ((vout)->task.input.height == FRAME_HEIGHT_1080P) && \
++ ((vout)->task.input.crop.w == FRAME_WIDTH_1080P) && \
++ (((vout)->task.input.crop.h == FRAME_HEIGHT_1080P) || \
++ ((vout)->task.input.crop.h == VALID_HEIGHT_1080P)) && \
++ ((vout)->task.output.width == FRAME_WIDTH_1080P) && \
++ ((vout)->task.output.height == VALID_HEIGHT_1080P) && \
++ ((vout)->task.output.crop.w == FRAME_WIDTH_1080P) && \
++ ((vout)->task.output.crop.h == VALID_HEIGHT_1080P))
++#define CHECK_TILED_1080P_STREAM(vout) \
++ ((((vout)->task.input.format == IPU_PIX_FMT_TILED_NV12) || \
++ ((vout)->task.input.format == IPU_PIX_FMT_TILED_NV12F)) &&\
++ ((vout)->task.input.width == FRAME_WIDTH_1080P) && \
++ ((vout)->task.input.crop.w == FRAME_WIDTH_1080P) && \
++ ((vout)->task.input.height == FRAME_HEIGHT_1080P) && \
++ ((vout)->task.input.crop.h == FRAME_HEIGHT_1080P))
++#define IS_PLANAR_PIXEL_FORMAT(format) \
++ (format == IPU_PIX_FMT_NV12 || \
++ format == IPU_PIX_FMT_YUV420P2 || \
++ format == IPU_PIX_FMT_YUV420P || \
++ format == IPU_PIX_FMT_YVU420P || \
++ format == IPU_PIX_FMT_YUV422P || \
++ format == IPU_PIX_FMT_YVU422P || \
++ format == IPU_PIX_FMT_YUV444P)
++
++#define NSEC_PER_FRAME_30FPS (33333333)
++
++struct mxc_vout_fb {
++ char *name;
++ int ipu_id;
++ struct v4l2_rect crop_bounds;
++ unsigned int disp_fmt;
++ bool disp_support_csc;
++ bool disp_support_windows;
++};
++
++struct dma_mem {
++ void *vaddr;
++ dma_addr_t paddr;
++ size_t size;
++};
++
++struct mxc_vout_output {
++ int open_cnt;
++ struct fb_info *fbi;
++ unsigned long fb_smem_start;
++ unsigned long fb_smem_len;
++ struct video_device *vfd;
++ struct mutex mutex;
++ struct mutex task_lock;
++ enum v4l2_buf_type type;
++
++ struct videobuf_queue vbq;
++ spinlock_t vbq_lock;
++
++ struct list_head queue_list;
++ struct list_head active_list;
++
++ struct v4l2_rect crop_bounds;
++ unsigned int disp_fmt;
++ struct mxcfb_pos win_pos;
++ bool disp_support_windows;
++ bool disp_support_csc;
++
++ bool fmt_init;
++ bool release;
++ bool linear_bypass_pp;
++ bool vdoa_1080p;
++ bool tiled_bypass_pp;
++ struct v4l2_rect in_rect;
++ struct ipu_task task;
++ struct ipu_task vdoa_task;
++ struct dma_mem vdoa_work;
++ struct dma_mem vdoa_output[VDOA_FB_BUFS];
++
++ bool timer_stop;
++ struct hrtimer timer;
++ struct workqueue_struct *v4l_wq;
++ struct work_struct disp_work;
++ unsigned long frame_count;
++ unsigned long vdi_frame_cnt;
++ ktime_t start_ktime;
++
++ int ctrl_rotate;
++ int ctrl_vflip;
++ int ctrl_hflip;
++
++ dma_addr_t disp_bufs[FB_BUFS];
++
++ struct videobuf_buffer *pre1_vb;
++ struct videobuf_buffer *pre2_vb;
++};
++
++struct mxc_vout_dev {
++ struct device *dev;
++ struct v4l2_device v4l2_dev;
++ struct mxc_vout_output *out[MAX_FB_NUM];
++ int out_num;
++};
++
++/* Driver Configuration macros */
++#define VOUT_NAME "mxc_vout"
++
++/* Variables configurable through module params*/
++static int debug;
++static int vdi_rate_double;
++static int video_nr = 16;
++
++/* Module parameters */
++module_param(video_nr, int, S_IRUGO);
++MODULE_PARM_DESC(video_nr, "video device numbers");
++module_param(debug, int, 0600);
++MODULE_PARM_DESC(debug, "Debug level (0-1)");
++module_param(vdi_rate_double, int, 0600);
++MODULE_PARM_DESC(vdi_rate_double, "vdi frame rate double on/off");
++
++static const struct v4l2_fmtdesc mxc_formats[] = {
++ {
++ .description = "RGB565",
++ .pixelformat = V4L2_PIX_FMT_RGB565,
++ },
++ {
++ .description = "BGR24",
++ .pixelformat = V4L2_PIX_FMT_BGR24,
++ },
++ {
++ .description = "RGB24",
++ .pixelformat = V4L2_PIX_FMT_RGB24,
++ },
++ {
++ .description = "RGB32",
++ .pixelformat = V4L2_PIX_FMT_RGB32,
++ },
++ {
++ .description = "BGR32",
++ .pixelformat = V4L2_PIX_FMT_BGR32,
++ },
++ {
++ .description = "NV12",
++ .pixelformat = V4L2_PIX_FMT_NV12,
++ },
++ {
++ .description = "UYVY",
++ .pixelformat = V4L2_PIX_FMT_UYVY,
++ },
++ {
++ .description = "YUYV",
++ .pixelformat = V4L2_PIX_FMT_YUYV,
++ },
++ {
++ .description = "YUV422 planar",
++ .pixelformat = V4L2_PIX_FMT_YUV422P,
++ },
++ {
++ .description = "YUV444",
++ .pixelformat = V4L2_PIX_FMT_YUV444,
++ },
++ {
++ .description = "YUV420",
++ .pixelformat = V4L2_PIX_FMT_YUV420,
++ },
++ {
++ .description = "YVU420",
++ .pixelformat = V4L2_PIX_FMT_YVU420,
++ },
++ {
++ .description = "TILED NV12P",
++ .pixelformat = IPU_PIX_FMT_TILED_NV12,
++ },
++ {
++ .description = "TILED NV12F",
++ .pixelformat = IPU_PIX_FMT_TILED_NV12F,
++ },
++ {
++ .description = "YUV444 planar",
++ .pixelformat = IPU_PIX_FMT_YUV444P,
++ },
++};
++
++#define NUM_MXC_VOUT_FORMATS (ARRAY_SIZE(mxc_formats))
++
++#define DEF_INPUT_WIDTH 320
++#define DEF_INPUT_HEIGHT 240
++
++static int mxc_vidioc_streamoff(struct file *file, void *fh,
++ enum v4l2_buf_type i);
++
++static struct mxc_vout_fb g_fb_setting[MAX_FB_NUM];
++static int config_disp_output(struct mxc_vout_output *vout);
++static void release_disp_output(struct mxc_vout_output *vout);
++
++static unsigned int get_frame_size(struct mxc_vout_output *vout)
++{
++ unsigned int size;
++
++ if (IPU_PIX_FMT_TILED_NV12 == vout->task.input.format)
++ size = TILED_NV12_FRAME_SIZE(vout->task.input.width,
++ vout->task.input.height);
++ else if (IPU_PIX_FMT_TILED_NV12F == vout->task.input.format) {
++ size = TILED_NV12_FRAME_SIZE(vout->task.input.width,
++ vout->task.input.height/2);
++ size *= 2;
++ } else
++ size = vout->task.input.width * vout->task.input.height *
++ fmt_to_bpp(vout->task.input.format)/8;
++
++ return size;
++}
++
++static void free_dma_buf(struct mxc_vout_output *vout, struct dma_mem *buf)
++{
++ dma_free_coherent(vout->vbq.dev, buf->size, buf->vaddr, buf->paddr);
++ v4l2_dbg(1, debug, vout->vfd->v4l2_dev,
++ "free dma size:0x%x, paddr:0x%x\n",
++ buf->size, buf->paddr);
++ memset(buf, 0, sizeof(*buf));
++}
++
++static int alloc_dma_buf(struct mxc_vout_output *vout, struct dma_mem *buf)
++{
++
++ buf->vaddr = dma_alloc_coherent(vout->vbq.dev, buf->size, &buf->paddr,
++ GFP_DMA | GFP_KERNEL);
++ if (!buf->vaddr) {
++ v4l2_err(vout->vfd->v4l2_dev,
++ "cannot get dma buf size:0x%x\n", buf->size);
++ return -ENOMEM;
++ }
++ v4l2_dbg(1, debug, vout->vfd->v4l2_dev,
++ "alloc dma buf size:0x%x, paddr:0x%x\n", buf->size, buf->paddr);
++ return 0;
++}
++
++static ipu_channel_t get_ipu_channel(struct fb_info *fbi)
++{
++ ipu_channel_t ipu_ch = CHAN_NONE;
++ mm_segment_t old_fs;
++
++ if (fbi->fbops->fb_ioctl) {
++ old_fs = get_fs();
++ set_fs(KERNEL_DS);
++ fbi->fbops->fb_ioctl(fbi, MXCFB_GET_FB_IPU_CHAN,
++ (unsigned long)&ipu_ch);
++ set_fs(old_fs);
++ }
++
++ return ipu_ch;
++}
++
++static unsigned int get_ipu_fmt(struct fb_info *fbi)
++{
++ mm_segment_t old_fs;
++ unsigned int fb_fmt;
++
++ if (fbi->fbops->fb_ioctl) {
++ old_fs = get_fs();
++ set_fs(KERNEL_DS);
++ fbi->fbops->fb_ioctl(fbi, MXCFB_GET_DIFMT,
++ (unsigned long)&fb_fmt);
++ set_fs(old_fs);
++ }
++
++ return fb_fmt;
++}
++
++static void update_display_setting(void)
++{
++ int i;
++ struct fb_info *fbi;
++ struct v4l2_rect bg_crop_bounds[2];
++
++ for (i = 0; i < num_registered_fb; i++) {
++ fbi = registered_fb[i];
++
++ memset(&g_fb_setting[i], 0, sizeof(struct mxc_vout_fb));
++
++ if (!strncmp(fbi->fix.id, "DISP3", 5))
++ g_fb_setting[i].ipu_id = 0;
++ else
++ g_fb_setting[i].ipu_id = 1;
++
++ g_fb_setting[i].name = fbi->fix.id;
++ g_fb_setting[i].crop_bounds.left = 0;
++ g_fb_setting[i].crop_bounds.top = 0;
++ g_fb_setting[i].crop_bounds.width = fbi->var.xres;
++ g_fb_setting[i].crop_bounds.height = fbi->var.yres;
++ g_fb_setting[i].disp_fmt = get_ipu_fmt(fbi);
++
++ if (get_ipu_channel(fbi) == MEM_BG_SYNC) {
++ bg_crop_bounds[g_fb_setting[i].ipu_id] =
++ g_fb_setting[i].crop_bounds;
++ g_fb_setting[i].disp_support_csc = true;
++ } else if (get_ipu_channel(fbi) == MEM_FG_SYNC) {
++ g_fb_setting[i].disp_support_csc = true;
++ g_fb_setting[i].disp_support_windows = true;
++ }
++ }
++
++ for (i = 0; i < num_registered_fb; i++) {
++ fbi = registered_fb[i];
++
++ if (get_ipu_channel(fbi) == MEM_FG_SYNC)
++ g_fb_setting[i].crop_bounds =
++ bg_crop_bounds[g_fb_setting[i].ipu_id];
++ }
++}
++
++/* called after g_fb_setting filled by update_display_setting */
++static int update_setting_from_fbi(struct mxc_vout_output *vout,
++ struct fb_info *fbi)
++{
++ int i;
++ bool found = false;
++
++ for (i = 0; i < MAX_FB_NUM; i++) {
++ if (g_fb_setting[i].name) {
++ if (!strcmp(fbi->fix.id, g_fb_setting[i].name)) {
++ vout->crop_bounds = g_fb_setting[i].crop_bounds;
++ vout->disp_fmt = g_fb_setting[i].disp_fmt;
++ vout->disp_support_csc =
++ g_fb_setting[i].disp_support_csc;
++ vout->disp_support_windows =
++ g_fb_setting[i].disp_support_windows;
++ found = true;
++ break;
++ }
++ }
++ }
++
++ if (!found) {
++ v4l2_err(vout->vfd->v4l2_dev, "can not find output\n");
++ return -EINVAL;
++ }
++ strlcpy(vout->vfd->name, fbi->fix.id, sizeof(vout->vfd->name));
++
++ memset(&vout->task, 0, sizeof(struct ipu_task));
++
++ vout->task.input.width = DEF_INPUT_WIDTH;
++ vout->task.input.height = DEF_INPUT_HEIGHT;
++ vout->task.input.crop.pos.x = 0;
++ vout->task.input.crop.pos.y = 0;
++ vout->task.input.crop.w = DEF_INPUT_WIDTH;
++ vout->task.input.crop.h = DEF_INPUT_HEIGHT;
++
++ vout->task.output.width = vout->crop_bounds.width;
++ vout->task.output.height = vout->crop_bounds.height;
++ vout->task.output.crop.pos.x = 0;
++ vout->task.output.crop.pos.y = 0;
++ vout->task.output.crop.w = vout->crop_bounds.width;
++ vout->task.output.crop.h = vout->crop_bounds.height;
++ if (colorspaceofpixel(vout->disp_fmt) == YUV_CS)
++ vout->task.output.format = IPU_PIX_FMT_UYVY;
++ else
++ vout->task.output.format = IPU_PIX_FMT_RGB565;
++
++ return 0;
++}
++
++static inline unsigned long get_jiffies(struct timeval *t)
++{
++ struct timeval cur;
++
++ if (t->tv_usec >= 1000000) {
++ t->tv_sec += t->tv_usec / 1000000;
++ t->tv_usec = t->tv_usec % 1000000;
++ }
++
++ do_gettimeofday(&cur);
++ if ((t->tv_sec < cur.tv_sec)
++ || ((t->tv_sec == cur.tv_sec) && (t->tv_usec < cur.tv_usec)))
++ return jiffies;
++
++ if (t->tv_usec < cur.tv_usec) {
++ cur.tv_sec = t->tv_sec - cur.tv_sec - 1;
++ cur.tv_usec = t->tv_usec + 1000000 - cur.tv_usec;
++ } else {
++ cur.tv_sec = t->tv_sec - cur.tv_sec;
++ cur.tv_usec = t->tv_usec - cur.tv_usec;
++ }
++
++ return jiffies + timeval_to_jiffies(&cur);
++}
++
++static bool deinterlace_3_field(struct mxc_vout_output *vout)
++{
++ return (vout->task.input.deinterlace.enable &&
++ (vout->task.input.deinterlace.motion != HIGH_MOTION));
++}
++
++static int set_field_fmt(struct mxc_vout_output *vout, enum v4l2_field field)
++{
++ struct ipu_deinterlace *deinterlace = &vout->task.input.deinterlace;
++
++ switch (field) {
++ /* Images are in progressive format, not interlaced */
++ case V4L2_FIELD_NONE:
++ case V4L2_FIELD_ANY:
++ deinterlace->enable = false;
++ deinterlace->field_fmt = 0;
++ v4l2_dbg(1, debug, vout->vfd->v4l2_dev, "Progressive frame.\n");
++ break;
++ case V4L2_FIELD_INTERLACED_TB:
++ v4l2_dbg(1, debug, vout->vfd->v4l2_dev,
++ "Enable deinterlace TB.\n");
++ deinterlace->enable = true;
++ deinterlace->field_fmt = IPU_DEINTERLACE_FIELD_TOP;
++ break;
++ case V4L2_FIELD_INTERLACED_BT:
++ v4l2_dbg(1, debug, vout->vfd->v4l2_dev,
++ "Enable deinterlace BT.\n");
++ deinterlace->enable = true;
++ deinterlace->field_fmt = IPU_DEINTERLACE_FIELD_BOTTOM;
++ break;
++ default:
++ v4l2_err(vout->vfd->v4l2_dev,
++ "field format:%d not supported yet!\n", field);
++ return -EINVAL;
++ }
++
++ if (IPU_PIX_FMT_TILED_NV12F == vout->task.input.format) {
++ v4l2_dbg(1, debug, vout->vfd->v4l2_dev,
++ "tiled fmt enable deinterlace.\n");
++ deinterlace->enable = true;
++ }
++
++ if (deinterlace->enable && vdi_rate_double)
++ deinterlace->field_fmt |= IPU_DEINTERLACE_RATE_EN;
++
++ return 0;
++}
++
++static bool is_pp_bypass(struct mxc_vout_output *vout)
++{
++ if ((IPU_PIX_FMT_TILED_NV12 == vout->task.input.format) ||
++ (IPU_PIX_FMT_TILED_NV12F == vout->task.input.format))
++ return false;
++ if ((vout->task.input.width == vout->task.output.width) &&
++ (vout->task.input.height == vout->task.output.height) &&
++ (vout->task.input.crop.w == vout->task.output.crop.w) &&
++ (vout->task.input.crop.h == vout->task.output.crop.h) &&
++ (vout->task.output.rotate < IPU_ROTATE_HORIZ_FLIP) &&
++ !vout->task.input.deinterlace.enable) {
++ if (vout->disp_support_csc)
++ return true;
++ else if (!need_csc(vout->task.input.format, vout->disp_fmt))
++ return true;
++ /*
++ * input crop show to full output which can show based on
++ * xres_virtual/yres_virtual
++ */
++ } else if ((vout->task.input.crop.w == vout->task.output.crop.w) &&
++ (vout->task.output.crop.w == vout->task.output.width) &&
++ (vout->task.input.crop.h == vout->task.output.crop.h) &&
++ (vout->task.output.crop.h ==
++ vout->task.output.height) &&
++ (vout->task.output.rotate < IPU_ROTATE_HORIZ_FLIP) &&
++ !vout->task.input.deinterlace.enable) {
++ if (vout->disp_support_csc)
++ return true;
++ else if (!need_csc(vout->task.input.format, vout->disp_fmt))
++ return true;
++ }
++ return false;
++}
++
++static void setup_buf_timer(struct mxc_vout_output *vout,
++ struct videobuf_buffer *vb)
++{
++ ktime_t expiry_time, now;
++
++ /* if timestamp is 0, then default to 30fps */
++ if ((vb->ts.tv_sec == 0) && (vb->ts.tv_usec == 0))
++ expiry_time = ktime_add_ns(vout->start_ktime,
++ NSEC_PER_FRAME_30FPS * vout->frame_count);
++ else
++ expiry_time = timeval_to_ktime(vb->ts);
++
++ now = hrtimer_cb_get_time(&vout->timer);
++ if ((now.tv64 > expiry_time.tv64)) {
++ v4l2_dbg(1, debug, vout->vfd->v4l2_dev,
++ "warning: timer timeout already expired.\n");
++ expiry_time = now;
++ }
++
++ hrtimer_start(&vout->timer, expiry_time, HRTIMER_MODE_ABS);
++
++ v4l2_dbg(1, debug, vout->vfd->v4l2_dev, "timer handler next "
++ "schedule: %lldnsecs\n", expiry_time.tv64);
++}
++
++static int show_buf(struct mxc_vout_output *vout, int idx,
++ struct ipu_pos *ipos)
++{
++ struct fb_info *fbi = vout->fbi;
++ struct fb_var_screeninfo var;
++ int ret;
++ u32 fb_base = 0;
++
++ memcpy(&var, &fbi->var, sizeof(var));
++
++ if (vout->linear_bypass_pp || vout->tiled_bypass_pp) {
++ /*
++ * crack fb base
++ * NOTE: should not do other fb operation during v4l2
++ */
++ console_lock();
++ fb_base = fbi->fix.smem_start;
++ fbi->fix.smem_start = vout->task.output.paddr;
++ fbi->var.yoffset = ipos->y + 1;
++ var.xoffset = ipos->x;
++ var.yoffset = ipos->y;
++ var.vmode |= FB_VMODE_YWRAP;
++ ret = fb_pan_display(fbi, &var);
++ fbi->fix.smem_start = fb_base;
++ console_unlock();
++ } else {
++ console_lock();
++ var.yoffset = idx * fbi->var.yres;
++ var.vmode &= ~FB_VMODE_YWRAP;
++ ret = fb_pan_display(fbi, &var);
++ console_unlock();
++ }
++
++ return ret;
++}
++
++static void disp_work_func(struct work_struct *work)
++{
++ struct mxc_vout_output *vout =
++ container_of(work, struct mxc_vout_output, disp_work);
++ struct videobuf_queue *q = &vout->vbq;
++ struct videobuf_buffer *vb, *vb_next = NULL;
++ unsigned long flags = 0;
++ struct ipu_pos ipos;
++ int ret = 0;
++ u32 in_fmt = 0;
++ u32 vdi_cnt = 0;
++ u32 vdi_frame;
++ u32 index = 0;
++ u32 ocrop_h = 0;
++ u32 o_height = 0;
++ u32 tiled_interlaced = 0;
++ bool tiled_fmt = false;
++
++ v4l2_dbg(1, debug, vout->vfd->v4l2_dev, "disp work begin one frame\n");
++
++ spin_lock_irqsave(q->irqlock, flags);
++
++ if (list_empty(&vout->active_list)) {
++ v4l2_warn(vout->vfd->v4l2_dev,
++ "no entry in active_list, should not be here\n");
++ spin_unlock_irqrestore(q->irqlock, flags);
++ return;
++ }
++
++ vb = list_first_entry(&vout->active_list,
++ struct videobuf_buffer, queue);
++ ret = set_field_fmt(vout, vb->field);
++ if (ret < 0) {
++ spin_unlock_irqrestore(q->irqlock, flags);
++ return;
++ }
++ if (deinterlace_3_field(vout)) {
++ if (list_is_singular(&vout->active_list)) {
++ if (list_empty(&vout->queue_list)) {
++ vout->timer_stop = true;
++ spin_unlock_irqrestore(q->irqlock, flags);
++ v4l2_warn(vout->vfd->v4l2_dev,
++ "no enough entry for 3 fields "
++ "deinterlacer\n");
++ return;
++ }
++
++ /*
++ * We need to use the next vb even if it is
++ * not on the active list.
++ */
++ vb_next = list_first_entry(&vout->queue_list,
++ struct videobuf_buffer, queue);
++ } else
++ vb_next = list_first_entry(vout->active_list.next,
++ struct videobuf_buffer, queue);
++ v4l2_dbg(1, debug, vout->vfd->v4l2_dev,
++ "cur field_fmt:%d, next field_fmt:%d.\n",
++ vb->field, vb_next->field);
++ /* repeat the last field during field format changing */
++ if ((vb->field != vb_next->field) &&
++ (vb_next->field != V4L2_FIELD_NONE))
++ vb_next = vb;
++ }
++
++ spin_unlock_irqrestore(q->irqlock, flags);
++
++vdi_frame_rate_double:
++ mutex_lock(&vout->task_lock);
++
++ v4l2_dbg(1, debug, vout->vfd->v4l2_dev,
++ "v4l2 frame_cnt:%ld, vb_field:%d, fmt:%d\n",
++ vout->frame_count, vb->field,
++ vout->task.input.deinterlace.field_fmt);
++ if (vb->memory == V4L2_MEMORY_USERPTR)
++ vout->task.input.paddr = vb->baddr;
++ else
++ vout->task.input.paddr = videobuf_to_dma_contig(vb);
++
++ if (vout->task.input.deinterlace.field_fmt & IPU_DEINTERLACE_RATE_EN)
++ index = vout->vdi_frame_cnt % FB_BUFS;
++ else
++ index = vout->frame_count % FB_BUFS;
++ if (vout->linear_bypass_pp) {
++ vout->task.output.paddr = vout->task.input.paddr;
++ ipos.x = vout->task.input.crop.pos.x;
++ ipos.y = vout->task.input.crop.pos.y;
++ } else {
++ if (deinterlace_3_field(vout)) {
++ if (vb->memory == V4L2_MEMORY_USERPTR)
++ vout->task.input.paddr_n = vb_next->baddr;
++ else
++ vout->task.input.paddr_n =
++ videobuf_to_dma_contig(vb_next);
++ }
++ vout->task.output.paddr = vout->disp_bufs[index];
++ if (vout->vdoa_1080p) {
++ o_height = vout->task.output.height;
++ ocrop_h = vout->task.output.crop.h;
++ vout->task.output.height = FRAME_HEIGHT_1080P;
++ vout->task.output.crop.h = FRAME_HEIGHT_1080P;
++ }
++ tiled_fmt =
++ (IPU_PIX_FMT_TILED_NV12 == vout->task.input.format) ||
++ (IPU_PIX_FMT_TILED_NV12F == vout->task.input.format);
++ if (vout->tiled_bypass_pp) {
++ ipos.x = vout->task.input.crop.pos.x;
++ ipos.y = vout->task.input.crop.pos.y;
++ } else if (tiled_fmt) {
++ vout->vdoa_task.input.paddr = vout->task.input.paddr;
++ if (deinterlace_3_field(vout))
++ vout->vdoa_task.input.paddr_n =
++ vout->task.input.paddr_n;
++ vout->vdoa_task.output.paddr = vout->vdoa_work.paddr;
++ ret = ipu_queue_task(&vout->vdoa_task);
++ if (ret < 0) {
++ mutex_unlock(&vout->task_lock);
++ goto err;
++ }
++ vout->task.input.paddr = vout->vdoa_task.output.paddr;
++ in_fmt = vout->task.input.format;
++ vout->task.input.format = vout->vdoa_task.output.format;
++ if (vout->task.input.deinterlace.enable) {
++ tiled_interlaced = 1;
++ vout->task.input.deinterlace.enable = 0;
++ }
++ v4l2_dbg(1, debug, vout->vfd->v4l2_dev,
++ "tiled queue task\n");
++ }
++ ret = ipu_queue_task(&vout->task);
++ if ((!vout->tiled_bypass_pp) && tiled_fmt)
++ vout->task.input.format = in_fmt;
++ if (tiled_interlaced)
++ vout->task.input.deinterlace.enable = 1;
++ if (ret < 0) {
++ mutex_unlock(&vout->task_lock);
++ goto err;
++ }
++ if (vout->vdoa_1080p) {
++ vout->task.output.crop.h = ocrop_h;
++ vout->task.output.height = o_height;
++ }
++ }
++
++ mutex_unlock(&vout->task_lock);
++
++ ret = show_buf(vout, index, &ipos);
++ if (ret < 0)
++ v4l2_dbg(1, debug, vout->vfd->v4l2_dev,
++ "show buf with ret %d\n", ret);
++
++ if (vout->task.input.deinterlace.field_fmt & IPU_DEINTERLACE_RATE_EN) {
++ vdi_frame = vout->task.input.deinterlace.field_fmt
++ & IPU_DEINTERLACE_RATE_FRAME1;
++ if (vdi_frame)
++ vout->task.input.deinterlace.field_fmt &=
++ ~IPU_DEINTERLACE_RATE_FRAME1;
++ else
++ vout->task.input.deinterlace.field_fmt |=
++ IPU_DEINTERLACE_RATE_FRAME1;
++ vout->vdi_frame_cnt++;
++ vdi_cnt++;
++ if (vdi_cnt < IPU_DEINTERLACE_MAX_FRAME)
++ goto vdi_frame_rate_double;
++ }
++ spin_lock_irqsave(q->irqlock, flags);
++
++ list_del(&vb->queue);
++
++ /*
++ * The videobuf before the last one has been shown. Set
++ * VIDEOBUF_DONE state here to avoid tearing issue in ic bypass
++ * case, which makes sure a buffer being shown will not be
++ * dequeued to be overwritten. It also brings side-effect that
++ * the last 2 buffers can not be dequeued correctly, apps need
++ * to take care of it.
++ */
++ if (vout->pre2_vb) {
++ vout->pre2_vb->state = VIDEOBUF_DONE;
++ wake_up_interruptible(&vout->pre2_vb->done);
++ vout->pre2_vb = NULL;
++ }
++
++ if (vout->linear_bypass_pp) {
++ vout->pre2_vb = vout->pre1_vb;
++ vout->pre1_vb = vb;
++ } else {
++ if (vout->pre1_vb) {
++ vout->pre1_vb->state = VIDEOBUF_DONE;
++ wake_up_interruptible(&vout->pre1_vb->done);
++ vout->pre1_vb = NULL;
++ }
++ vb->state = VIDEOBUF_DONE;
++ wake_up_interruptible(&vb->done);
++ }
++
++ vout->frame_count++;
++
++ /* pick next queue buf to setup timer */
++ if (list_empty(&vout->queue_list))
++ vout->timer_stop = true;
++ else {
++ vb = list_first_entry(&vout->queue_list,
++ struct videobuf_buffer, queue);
++ setup_buf_timer(vout, vb);
++ }
++
++ spin_unlock_irqrestore(q->irqlock, flags);
++
++ v4l2_dbg(1, debug, vout->vfd->v4l2_dev, "disp work finish one frame\n");
++
++ return;
++err:
++ v4l2_err(vout->vfd->v4l2_dev, "display work fail ret = %d\n", ret);
++ vout->timer_stop = true;
++ vb->state = VIDEOBUF_ERROR;
++ return;
++}
++
++static enum hrtimer_restart mxc_vout_timer_handler(struct hrtimer *timer)
++{
++ struct mxc_vout_output *vout = container_of(timer,
++ struct mxc_vout_output,
++ timer);
++ struct videobuf_queue *q = &vout->vbq;
++ struct videobuf_buffer *vb;
++ unsigned long flags = 0;
++
++ spin_lock_irqsave(q->irqlock, flags);
++
++ /*
++ * put first queued entry into active, if previous entry did not
++ * finish, setup current entry's timer again.
++ */
++ if (list_empty(&vout->queue_list)) {
++ spin_unlock_irqrestore(q->irqlock, flags);
++ return HRTIMER_NORESTART;
++ }
++
++ /* move videobuf from queued list to active list */
++ vb = list_first_entry(&vout->queue_list,
++ struct videobuf_buffer, queue);
++ list_del(&vb->queue);
++ list_add_tail(&vb->queue, &vout->active_list);
++
++ if (queue_work(vout->v4l_wq, &vout->disp_work) == 0) {
++ v4l2_warn(vout->vfd->v4l2_dev,
++ "disp work was in queue already, queue buf again next time\n");
++ list_del(&vb->queue);
++ list_add(&vb->queue, &vout->queue_list);
++ spin_unlock_irqrestore(q->irqlock, flags);
++ return HRTIMER_NORESTART;
++ }
++
++ vb->state = VIDEOBUF_ACTIVE;
++
++ spin_unlock_irqrestore(q->irqlock, flags);
++
++ return HRTIMER_NORESTART;
++}
++
++/* Video buffer call backs */
++
++/*
++ * Buffer setup function is called by videobuf layer when REQBUF ioctl is
++ * called. This is used to setup buffers and return size and count of
++ * buffers allocated. After the call to this buffer, videobuf layer will
++ * setup buffer queue depending on the size and count of buffers
++ */
++static int mxc_vout_buffer_setup(struct videobuf_queue *q, unsigned int *count,
++ unsigned int *size)
++{
++ struct mxc_vout_output *vout = q->priv_data;
++ unsigned int frame_size;
++
++ if (!vout)
++ return -EINVAL;
++
++ if (V4L2_BUF_TYPE_VIDEO_OUTPUT != q->type)
++ return -EINVAL;
++
++ frame_size = get_frame_size(vout);
++ *size = PAGE_ALIGN(frame_size);
++
++ return 0;
++}
++
++/*
++ * This function will be called when VIDIOC_QBUF ioctl is called.
++ * It prepare buffers before give out for the display. This function
++ * converts user space virtual address into physical address if userptr memory
++ * exchange mechanism is used.
++ */
++static int mxc_vout_buffer_prepare(struct videobuf_queue *q,
++ struct videobuf_buffer *vb,
++ enum v4l2_field field)
++{
++ vb->state = VIDEOBUF_PREPARED;
++ return 0;
++}
++
++/*
++ * Buffer queue funtion will be called from the videobuf layer when _QBUF
++ * ioctl is called. It is used to enqueue buffer, which is ready to be
++ * displayed.
++ * This function is protected by q->irqlock.
++ */
++static void mxc_vout_buffer_queue(struct videobuf_queue *q,
++ struct videobuf_buffer *vb)
++{
++ struct mxc_vout_output *vout = q->priv_data;
++ struct videobuf_buffer *active_vb;
++
++ list_add_tail(&vb->queue, &vout->queue_list);
++ vb->state = VIDEOBUF_QUEUED;
++
++ if (vout->timer_stop) {
++ if (deinterlace_3_field(vout) &&
++ !list_empty(&vout->active_list)) {
++ active_vb = list_first_entry(&vout->active_list,
++ struct videobuf_buffer, queue);
++ setup_buf_timer(vout, active_vb);
++ } else {
++ setup_buf_timer(vout, vb);
++ }
++ vout->timer_stop = false;
++ }
++}
++
++/*
++ * Buffer release function is called from videobuf layer to release buffer
++ * which are already allocated
++ */
++static void mxc_vout_buffer_release(struct videobuf_queue *q,
++ struct videobuf_buffer *vb)
++{
++ vb->state = VIDEOBUF_NEEDS_INIT;
++}
++
++static int mxc_vout_mmap(struct file *file, struct vm_area_struct *vma)
++{
++ int ret;
++ struct mxc_vout_output *vout = file->private_data;
++
++ if (!vout)
++ return -ENODEV;
++
++ ret = videobuf_mmap_mapper(&vout->vbq, vma);
++ if (ret < 0)
++ v4l2_err(vout->vfd->v4l2_dev,
++ "offset invalid [offset=0x%lx]\n",
++ (vma->vm_pgoff << PAGE_SHIFT));
++
++ return ret;
++}
++
++static int mxc_vout_release(struct file *file)
++{
++ unsigned int ret = 0;
++ struct videobuf_queue *q;
++ struct mxc_vout_output *vout = file->private_data;
++
++ if (!vout)
++ return 0;
++
++ if (--vout->open_cnt == 0) {
++ q = &vout->vbq;
++ if (q->streaming)
++ mxc_vidioc_streamoff(file, vout, vout->type);
++ else {
++ release_disp_output(vout);
++ videobuf_queue_cancel(q);
++ }
++ destroy_workqueue(vout->v4l_wq);
++ ret = videobuf_mmap_free(q);
++ }
++
++ return ret;
++}
++
++static int mxc_vout_open(struct file *file)
++{
++ struct mxc_vout_output *vout = NULL;
++ int ret = 0;
++
++ vout = video_drvdata(file);
++
++ if (vout == NULL)
++ return -ENODEV;
++
++ if (vout->open_cnt++ == 0) {
++ vout->ctrl_rotate = 0;
++ vout->ctrl_vflip = 0;
++ vout->ctrl_hflip = 0;
++ update_display_setting();
++ ret = update_setting_from_fbi(vout, vout->fbi);
++ if (ret < 0)
++ goto err;
++
++ vout->v4l_wq = create_singlethread_workqueue("v4l2q");
++ if (!vout->v4l_wq) {
++ v4l2_err(vout->vfd->v4l2_dev,
++ "Could not create work queue\n");
++ ret = -ENOMEM;
++ goto err;
++ }
++
++ INIT_WORK(&vout->disp_work, disp_work_func);
++
++ INIT_LIST_HEAD(&vout->queue_list);
++ INIT_LIST_HEAD(&vout->active_list);
++
++ vout->fmt_init = false;
++ vout->frame_count = 0;
++ vout->vdi_frame_cnt = 0;
++
++ vout->win_pos.x = 0;
++ vout->win_pos.y = 0;
++ vout->release = true;
++ }
++
++ file->private_data = vout;
++
++err:
++ return ret;
++}
++
++/*
++ * V4L2 ioctls
++ */
++static int mxc_vidioc_querycap(struct file *file, void *fh,
++ struct v4l2_capability *cap)
++{
++ struct mxc_vout_output *vout = fh;
++
++ strlcpy(cap->driver, VOUT_NAME, sizeof(cap->driver));
++ strlcpy(cap->card, vout->vfd->name, sizeof(cap->card));
++ cap->bus_info[0] = '\0';
++ cap->capabilities = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_OUTPUT;
++
++ return 0;
++}
++
++static int mxc_vidioc_enum_fmt_vid_out(struct file *file, void *fh,
++ struct v4l2_fmtdesc *fmt)
++{
++ if (fmt->index >= NUM_MXC_VOUT_FORMATS)
++ return -EINVAL;
++
++ strlcpy(fmt->description, mxc_formats[fmt->index].description,
++ sizeof(fmt->description));
++ fmt->pixelformat = mxc_formats[fmt->index].pixelformat;
++
++ return 0;
++}
++
++static int mxc_vidioc_g_fmt_vid_out(struct file *file, void *fh,
++ struct v4l2_format *f)
++{
++ struct mxc_vout_output *vout = fh;
++ struct v4l2_rect rect;
++
++ f->fmt.pix.width = vout->task.input.width;
++ f->fmt.pix.height = vout->task.input.height;
++ f->fmt.pix.pixelformat = vout->task.input.format;
++ f->fmt.pix.sizeimage = get_frame_size(vout);
++
++ if (f->fmt.pix.priv) {
++ rect.left = vout->task.input.crop.pos.x;
++ rect.top = vout->task.input.crop.pos.y;
++ rect.width = vout->task.input.crop.w;
++ rect.height = vout->task.input.crop.h;
++ if (copy_to_user((void __user *)f->fmt.pix.priv,
++ &rect, sizeof(rect)))
++ return -EFAULT;
++ }
++ v4l2_dbg(1, debug, vout->vfd->v4l2_dev,
++ "frame_size:0x%x, pix_fmt:0x%x\n",
++ f->fmt.pix.sizeimage,
++ vout->task.input.format);
++
++ return 0;
++}
++
++static inline int ipu_try_task(struct mxc_vout_output *vout)
++{
++ int ret;
++ struct ipu_task *task = &vout->task;
++
++again:
++ ret = ipu_check_task(task);
++ if (ret != IPU_CHECK_OK) {
++ if (ret > IPU_CHECK_ERR_MIN) {
++ if (ret == IPU_CHECK_ERR_SPLIT_INPUTW_OVER ||
++ ret == IPU_CHECK_ERR_W_DOWNSIZE_OVER) {
++ task->input.crop.w -= 8;
++ goto again;
++ }
++ if (ret == IPU_CHECK_ERR_SPLIT_INPUTH_OVER ||
++ ret == IPU_CHECK_ERR_H_DOWNSIZE_OVER) {
++ task->input.crop.h -= 8;
++ goto again;
++ }
++ if (ret == IPU_CHECK_ERR_SPLIT_OUTPUTW_OVER) {
++ if (vout->disp_support_windows) {
++ task->output.width -= 8;
++ task->output.crop.w =
++ task->output.width;
++ } else
++ task->output.crop.w -= 8;
++ goto again;
++ }
++ if (ret == IPU_CHECK_ERR_SPLIT_OUTPUTH_OVER) {
++ if (vout->disp_support_windows) {
++ task->output.height -= 8;
++ task->output.crop.h =
++ task->output.height;
++ } else
++ task->output.crop.h -= 8;
++ goto again;
++ }
++ ret = -EINVAL;
++ }
++ } else
++ ret = 0;
++
++ return ret;
++}
++
++static inline int vdoaipu_try_task(struct mxc_vout_output *vout)
++{
++ int ret;
++ int is_1080p_stream;
++ size_t size;
++ struct ipu_task *ipu_task = &vout->task;
++ struct ipu_crop *icrop = &ipu_task->input.crop;
++ struct ipu_task *vdoa_task = &vout->vdoa_task;
++ u32 deinterlace = 0;
++ u32 in_fmt;
++
++ if (vout->task.input.deinterlace.enable)
++ deinterlace = 1;
++
++ memset(vdoa_task, 0, sizeof(*vdoa_task));
++ vdoa_task->output.format = IPU_PIX_FMT_NV12;
++ memcpy(&vdoa_task->input, &ipu_task->input,
++ sizeof(ipu_task->input));
++ if ((icrop->w % IPU_PIX_FMT_TILED_NV12_MBALIGN) ||
++ (icrop->h % IPU_PIX_FMT_TILED_NV12_MBALIGN)) {
++ vdoa_task->input.crop.w =
++ ALIGN(icrop->w, IPU_PIX_FMT_TILED_NV12_MBALIGN);
++ vdoa_task->input.crop.h =
++ ALIGN(icrop->h, IPU_PIX_FMT_TILED_NV12_MBALIGN);
++ }
++ vdoa_task->output.width = vdoa_task->input.crop.w;
++ vdoa_task->output.height = vdoa_task->input.crop.h;
++ vdoa_task->output.crop.w = vdoa_task->input.crop.w;
++ vdoa_task->output.crop.h = vdoa_task->input.crop.h;
++
++ size = PAGE_ALIGN(vdoa_task->input.crop.w *
++ vdoa_task->input.crop.h *
++ fmt_to_bpp(vdoa_task->output.format)/8);
++ if (size > vout->vdoa_work.size) {
++ if (vout->vdoa_work.vaddr)
++ free_dma_buf(vout, &vout->vdoa_work);
++ vout->vdoa_work.size = size;
++ ret = alloc_dma_buf(vout, &vout->vdoa_work);
++ if (ret < 0)
++ return ret;
++ }
++ ret = ipu_check_task(vdoa_task);
++ if (ret != IPU_CHECK_OK)
++ return -EINVAL;
++
++ is_1080p_stream = CHECK_TILED_1080P_STREAM(vout);
++ if (is_1080p_stream)
++ ipu_task->input.crop.h = VALID_HEIGHT_1080P;
++ in_fmt = ipu_task->input.format;
++ ipu_task->input.format = vdoa_task->output.format;
++ ipu_task->input.height = vdoa_task->output.height;
++ ipu_task->input.width = vdoa_task->output.width;
++ if (deinterlace)
++ ipu_task->input.deinterlace.enable = 0;
++ ret = ipu_try_task(vout);
++ if (deinterlace)
++ ipu_task->input.deinterlace.enable = 1;
++ ipu_task->input.format = in_fmt;
++
++ return ret;
++}
++
++static int mxc_vout_try_task(struct mxc_vout_output *vout)
++{
++ int ret = 0;
++ struct ipu_output *output = &vout->task.output;
++ struct ipu_input *input = &vout->task.input;
++ struct ipu_crop *crop = &input->crop;
++ u32 o_height = 0;
++ u32 ocrop_h = 0;
++ bool tiled_fmt = false;
++ bool tiled_need_pp = false;
++
++ vout->vdoa_1080p = CHECK_TILED_1080P_DISPLAY(vout);
++ if (vout->vdoa_1080p) {
++ input->crop.h = FRAME_HEIGHT_1080P;
++ o_height = output->height;
++ ocrop_h = output->crop.h;
++ output->height = FRAME_HEIGHT_1080P;
++ output->crop.h = FRAME_HEIGHT_1080P;
++ }
++
++ if ((IPU_PIX_FMT_TILED_NV12 == input->format) ||
++ (IPU_PIX_FMT_TILED_NV12F == input->format)) {
++ if ((input->width % IPU_PIX_FMT_TILED_NV12_MBALIGN) ||
++ (input->height % IPU_PIX_FMT_TILED_NV12_MBALIGN) ||
++ (crop->pos.x % IPU_PIX_FMT_TILED_NV12_MBALIGN) ||
++ (crop->pos.y % IPU_PIX_FMT_TILED_NV12_MBALIGN)) {
++ v4l2_err(vout->vfd->v4l2_dev,
++ "ERR: tiled fmt needs 16 pixel align.\n");
++ return -EINVAL;
++ }
++ if ((crop->w % IPU_PIX_FMT_TILED_NV12_MBALIGN) ||
++ (crop->h % IPU_PIX_FMT_TILED_NV12_MBALIGN))
++ tiled_need_pp = true;
++ } else {
++ crop->w -= crop->w % 8;
++ crop->h -= crop->h % 8;
++ }
++ /* assume task.output already set by S_CROP */
++ vout->linear_bypass_pp = is_pp_bypass(vout);
++ if (vout->linear_bypass_pp) {
++ v4l2_info(vout->vfd->v4l2_dev, "Bypass IC.\n");
++ output->format = input->format;
++ } else {
++ /* if need CSC, choose IPU-DP or IPU_IC do it */
++ if (vout->disp_support_csc) {
++ if (colorspaceofpixel(input->format) == YUV_CS)
++ output->format = IPU_PIX_FMT_UYVY;
++ else
++ output->format = IPU_PIX_FMT_RGB565;
++ } else {
++ if (colorspaceofpixel(vout->disp_fmt) == YUV_CS)
++ output->format = IPU_PIX_FMT_UYVY;
++ else
++ output->format = IPU_PIX_FMT_RGB565;
++ }
++
++ vout->tiled_bypass_pp = false;
++ if ((IPU_PIX_FMT_TILED_NV12 == input->format) ||
++ (IPU_PIX_FMT_TILED_NV12F == input->format)) {
++ /* check resize/rotate/flip, or csc task */
++ if (!(tiled_need_pp ||
++ (IPU_ROTATE_NONE != output->rotate) ||
++ (input->crop.w != output->crop.w) ||
++ (input->crop.h != output->crop.h) ||
++ (!vout->disp_support_csc &&
++ (colorspaceofpixel(vout->disp_fmt) == RGB_CS)))
++ ) {
++ /* IC bypass */
++ output->format = IPU_PIX_FMT_NV12;
++ v4l2_dbg(1, debug, vout->vfd->v4l2_dev,
++ "tiled bypass pp\n");
++ vout->tiled_bypass_pp = true;
++ }
++ tiled_fmt = true;
++ }
++
++ if ((!vout->tiled_bypass_pp) && tiled_fmt)
++ ret = vdoaipu_try_task(vout);
++ else
++ ret = ipu_try_task(vout);
++ }
++
++ if (vout->vdoa_1080p) {
++ output->height = o_height;
++ output->crop.h = ocrop_h;
++ }
++
++ v4l2_dbg(1, debug, vout->vfd->v4l2_dev,
++ "icrop.w:%u, icrop.h:%u, iw:%u, ih:%u,"
++ "ocrop.w:%u, ocrop.h:%u, ow:%u, oh:%u\n",
++ input->crop.w, input->crop.h,
++ input->width, input->height,
++ output->crop.w, output->crop.h,
++ output->width, output->height);
++ return ret;
++}
++
++static int mxc_vout_try_format(struct mxc_vout_output *vout,
++ struct v4l2_format *f)
++{
++ int ret = 0;
++ struct v4l2_rect rect;
++
++ if ((f->fmt.pix.field != V4L2_FIELD_NONE) &&
++ (IPU_PIX_FMT_TILED_NV12 == vout->task.input.format)) {
++ v4l2_err(vout->vfd->v4l2_dev,
++ "progressive tiled fmt should used V4L2_FIELD_NONE!\n");
++ return -EINVAL;
++ }
++
++ if (f->fmt.pix.priv && copy_from_user(&rect,
++ (void __user *)f->fmt.pix.priv, sizeof(rect)))
++ return -EFAULT;
++
++ vout->task.input.width = f->fmt.pix.width;
++ vout->task.input.height = f->fmt.pix.height;
++ vout->task.input.format = f->fmt.pix.pixelformat;
++
++ ret = set_field_fmt(vout, f->fmt.pix.field);
++ if (ret < 0)
++ return ret;
++
++ if (f->fmt.pix.priv) {
++ vout->task.input.crop.pos.x = rect.left;
++ vout->task.input.crop.pos.y = rect.top;
++ vout->task.input.crop.w = rect.width;
++ vout->task.input.crop.h = rect.height;
++ } else {
++ vout->task.input.crop.pos.x = 0;
++ vout->task.input.crop.pos.y = 0;
++ vout->task.input.crop.w = f->fmt.pix.width;
++ vout->task.input.crop.h = f->fmt.pix.height;
++ }
++ memcpy(&vout->in_rect, &vout->task.input.crop, sizeof(vout->in_rect));
++
++ ret = mxc_vout_try_task(vout);
++ if (!ret) {
++ if (f->fmt.pix.priv) {
++ rect.width = vout->task.input.crop.w;
++ rect.height = vout->task.input.crop.h;
++ if (copy_to_user((void __user *)f->fmt.pix.priv,
++ &rect, sizeof(rect)))
++ ret = -EFAULT;
++ } else {
++ f->fmt.pix.width = vout->task.input.crop.w;
++ f->fmt.pix.height = vout->task.input.crop.h;
++ }
++ }
++
++ return ret;
++}
++
++static bool mxc_vout_need_fb_reconfig(struct mxc_vout_output *vout,
++ struct mxc_vout_output *pre_vout)
++{
++ if (!vout->vbq.streaming)
++ return false;
++
++ if (vout->tiled_bypass_pp)
++ return true;
++
++ if (vout->linear_bypass_pp != pre_vout->linear_bypass_pp)
++ return true;
++
++ /* cropped output resolution or format are changed */
++ if (vout->task.output.format != pre_vout->task.output.format ||
++ vout->task.output.crop.w != pre_vout->task.output.crop.w ||
++ vout->task.output.crop.h != pre_vout->task.output.crop.h)
++ return true;
++
++ /* overlay: window position or resolution are changed */
++ if (vout->disp_support_windows &&
++ (vout->win_pos.x != pre_vout->win_pos.x ||
++ vout->win_pos.y != pre_vout->win_pos.y ||
++ vout->task.output.width != pre_vout->task.output.width ||
++ vout->task.output.height != pre_vout->task.output.height))
++ return true;
++
++ /* background: cropped position is changed */
++ if (!vout->disp_support_windows &&
++ (vout->task.output.crop.pos.x !=
++ pre_vout->task.output.crop.pos.x ||
++ vout->task.output.crop.pos.y !=
++ pre_vout->task.output.crop.pos.y))
++ return true;
++
++ return false;
++}
++
++static int mxc_vidioc_s_fmt_vid_out(struct file *file, void *fh,
++ struct v4l2_format *f)
++{
++ struct mxc_vout_output *vout = fh;
++ int ret = 0;
++
++ if (vout->vbq.streaming)
++ return -EBUSY;
++
++ mutex_lock(&vout->task_lock);
++ ret = mxc_vout_try_format(vout, f);
++ if (ret >= 0)
++ vout->fmt_init = true;
++ mutex_unlock(&vout->task_lock);
++
++ return ret;
++}
++
++static int mxc_vidioc_cropcap(struct file *file, void *fh,
++ struct v4l2_cropcap *cropcap)
++{
++ struct mxc_vout_output *vout = fh;
++
++ if (cropcap->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
++ return -EINVAL;
++
++ cropcap->bounds = vout->crop_bounds;
++ cropcap->defrect = vout->crop_bounds;
++
++ return 0;
++}
++
++static int mxc_vidioc_g_crop(struct file *file, void *fh,
++ struct v4l2_crop *crop)
++{
++ struct mxc_vout_output *vout = fh;
++
++ if (crop->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
++ return -EINVAL;
++
++ if (vout->disp_support_windows) {
++ crop->c.left = vout->win_pos.x;
++ crop->c.top = vout->win_pos.y;
++ crop->c.width = vout->task.output.width;
++ crop->c.height = vout->task.output.height;
++ } else {
++ if (vout->task.output.crop.w && vout->task.output.crop.h) {
++ crop->c.left = vout->task.output.crop.pos.x;
++ crop->c.top = vout->task.output.crop.pos.y;
++ crop->c.width = vout->task.output.crop.w;
++ crop->c.height = vout->task.output.crop.h;
++ } else {
++ crop->c.left = 0;
++ crop->c.top = 0;
++ crop->c.width = vout->task.output.width;
++ crop->c.height = vout->task.output.height;
++ }
++ }
++
++ return 0;
++}
++
++static int mxc_vidioc_s_crop(struct file *file, void *fh,
++ const struct v4l2_crop *crop)
++{
++ struct mxc_vout_output *vout = fh, *pre_vout;
++ struct v4l2_rect *b = &vout->crop_bounds;
++ struct v4l2_crop fix_up_crop;
++ int ret = 0;
++
++ memcpy(&fix_up_crop, crop, sizeof(*crop));
++
++ if (crop->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
++ return -EINVAL;
++
++ if (crop->c.width < 0 || crop->c.height < 0)
++ return -EINVAL;
++
++ if (crop->c.width == 0)
++ fix_up_crop.c.width = b->width - b->left;
++ if (crop->c.height == 0)
++ fix_up_crop.c.height = b->height - b->top;
++
++ if (crop->c.top < b->top)
++ fix_up_crop.c.top = b->top;
++ if (crop->c.top >= b->top + b->height)
++ fix_up_crop.c.top = b->top + b->height - 1;
++ if (crop->c.height > b->top - crop->c.top + b->height)
++ fix_up_crop.c.height =
++ b->top - fix_up_crop.c.top + b->height;
++
++ if (crop->c.left < b->left)
++ fix_up_crop.c.left = b->left;
++ if (crop->c.left >= b->left + b->width)
++ fix_up_crop.c.left = b->left + b->width - 1;
++ if (crop->c.width > b->left - crop->c.left + b->width)
++ fix_up_crop.c.width =
++ b->left - fix_up_crop.c.left + b->width;
++
++ /* stride line limitation */
++ fix_up_crop.c.height -= fix_up_crop.c.height % 8;
++ fix_up_crop.c.width -= fix_up_crop.c.width % 8;
++ if ((fix_up_crop.c.width <= 0) || (fix_up_crop.c.height <= 0) ||
++ ((fix_up_crop.c.left + fix_up_crop.c.width) >
++ (b->left + b->width)) ||
++ ((fix_up_crop.c.top + fix_up_crop.c.height) >
++ (b->top + b->height))) {
++ v4l2_err(vout->vfd->v4l2_dev, "s_crop err: %d, %d, %d, %d",
++ fix_up_crop.c.left, fix_up_crop.c.top,
++ fix_up_crop.c.width, fix_up_crop.c.height);
++ return -EINVAL;
++ }
++
++ /* the same setting, return */
++ if (vout->disp_support_windows) {
++ if ((vout->win_pos.x == fix_up_crop.c.left) &&
++ (vout->win_pos.y == fix_up_crop.c.top) &&
++ (vout->task.output.crop.w == fix_up_crop.c.width) &&
++ (vout->task.output.crop.h == fix_up_crop.c.height))
++ return 0;
++ } else {
++ if ((vout->task.output.crop.pos.x == fix_up_crop.c.left) &&
++ (vout->task.output.crop.pos.y == fix_up_crop.c.top) &&
++ (vout->task.output.crop.w == fix_up_crop.c.width) &&
++ (vout->task.output.crop.h == fix_up_crop.c.height))
++ return 0;
++ }
++
++ pre_vout = vmalloc(sizeof(*pre_vout));
++ if (!pre_vout)
++ return -ENOMEM;
++
++ /* wait current work finish */
++ if (vout->vbq.streaming)
++ flush_workqueue(vout->v4l_wq);
++
++ mutex_lock(&vout->task_lock);
++
++ memcpy(pre_vout, vout, sizeof(*vout));
++
++ if (vout->disp_support_windows) {
++ vout->task.output.crop.pos.x = 0;
++ vout->task.output.crop.pos.y = 0;
++ vout->win_pos.x = fix_up_crop.c.left;
++ vout->win_pos.y = fix_up_crop.c.top;
++ vout->task.output.width = fix_up_crop.c.width;
++ vout->task.output.height = fix_up_crop.c.height;
++ } else {
++ vout->task.output.crop.pos.x = fix_up_crop.c.left;
++ vout->task.output.crop.pos.y = fix_up_crop.c.top;
++ }
++
++ vout->task.output.crop.w = fix_up_crop.c.width;
++ vout->task.output.crop.h = fix_up_crop.c.height;
++
++ /*
++ * must S_CROP before S_FMT, for fist time S_CROP, will not check
++ * ipu task, it will check in S_FMT, after S_FMT, S_CROP should
++ * check ipu task too.
++ */
++ if (vout->fmt_init) {
++ memcpy(&vout->task.input.crop, &vout->in_rect,
++ sizeof(vout->in_rect));
++ ret = mxc_vout_try_task(vout);
++ if (ret < 0) {
++ v4l2_err(vout->vfd->v4l2_dev,
++ "vout check task failed\n");
++ memcpy(vout, pre_vout, sizeof(*vout));
++ goto done;
++ }
++
++ if (mxc_vout_need_fb_reconfig(vout, pre_vout)) {
++ ret = config_disp_output(vout);
++ if (ret < 0)
++ v4l2_err(vout->vfd->v4l2_dev,
++ "Config display output failed\n");
++ }
++ }
++
++done:
++ vfree(pre_vout);
++ mutex_unlock(&vout->task_lock);
++
++ return ret;
++}
++
++static int mxc_vidioc_queryctrl(struct file *file, void *fh,
++ struct v4l2_queryctrl *ctrl)
++{
++ int ret = 0;
++
++ switch (ctrl->id) {
++ case V4L2_CID_ROTATE:
++ ret = v4l2_ctrl_query_fill(ctrl, 0, 270, 90, 0);
++ break;
++ case V4L2_CID_VFLIP:
++ ret = v4l2_ctrl_query_fill(ctrl, 0, 1, 1, 0);
++ break;
++ case V4L2_CID_HFLIP:
++ ret = v4l2_ctrl_query_fill(ctrl, 0, 1, 1, 0);
++ break;
++ case V4L2_CID_MXC_MOTION:
++ ret = v4l2_ctrl_query_fill(ctrl, 0, 2, 1, 0);
++ break;
++ default:
++ ctrl->name[0] = '\0';
++ ret = -EINVAL;
++ }
++ return ret;
++}
++
++static int mxc_vidioc_g_ctrl(struct file *file, void *fh,
++ struct v4l2_control *ctrl)
++{
++ int ret = 0;
++ struct mxc_vout_output *vout = fh;
++
++ switch (ctrl->id) {
++ case V4L2_CID_ROTATE:
++ ctrl->value = vout->ctrl_rotate;
++ break;
++ case V4L2_CID_VFLIP:
++ ctrl->value = vout->ctrl_vflip;
++ break;
++ case V4L2_CID_HFLIP:
++ ctrl->value = vout->ctrl_hflip;
++ break;
++ case V4L2_CID_MXC_MOTION:
++ if (vout->task.input.deinterlace.enable)
++ ctrl->value = vout->task.input.deinterlace.motion;
++ else
++ ctrl->value = 0;
++ break;
++ default:
++ ret = -EINVAL;
++ }
++ return ret;
++}
++
++static void setup_task_rotation(struct mxc_vout_output *vout)
++{
++ if (vout->ctrl_rotate == 0) {
++ if (vout->ctrl_vflip && vout->ctrl_hflip)
++ vout->task.output.rotate = IPU_ROTATE_180;
++ else if (vout->ctrl_vflip)
++ vout->task.output.rotate = IPU_ROTATE_VERT_FLIP;
++ else if (vout->ctrl_hflip)
++ vout->task.output.rotate = IPU_ROTATE_HORIZ_FLIP;
++ else
++ vout->task.output.rotate = IPU_ROTATE_NONE;
++ } else if (vout->ctrl_rotate == 90) {
++ if (vout->ctrl_vflip && vout->ctrl_hflip)
++ vout->task.output.rotate = IPU_ROTATE_90_LEFT;
++ else if (vout->ctrl_vflip)
++ vout->task.output.rotate = IPU_ROTATE_90_RIGHT_VFLIP;
++ else if (vout->ctrl_hflip)
++ vout->task.output.rotate = IPU_ROTATE_90_RIGHT_HFLIP;
++ else
++ vout->task.output.rotate = IPU_ROTATE_90_RIGHT;
++ } else if (vout->ctrl_rotate == 180) {
++ if (vout->ctrl_vflip && vout->ctrl_hflip)
++ vout->task.output.rotate = IPU_ROTATE_NONE;
++ else if (vout->ctrl_vflip)
++ vout->task.output.rotate = IPU_ROTATE_HORIZ_FLIP;
++ else if (vout->ctrl_hflip)
++ vout->task.output.rotate = IPU_ROTATE_VERT_FLIP;
++ else
++ vout->task.output.rotate = IPU_ROTATE_180;
++ } else if (vout->ctrl_rotate == 270) {
++ if (vout->ctrl_vflip && vout->ctrl_hflip)
++ vout->task.output.rotate = IPU_ROTATE_90_RIGHT;
++ else if (vout->ctrl_vflip)
++ vout->task.output.rotate = IPU_ROTATE_90_RIGHT_HFLIP;
++ else if (vout->ctrl_hflip)
++ vout->task.output.rotate = IPU_ROTATE_90_RIGHT_VFLIP;
++ else
++ vout->task.output.rotate = IPU_ROTATE_90_LEFT;
++ }
++}
++
++static int mxc_vidioc_s_ctrl(struct file *file, void *fh,
++ struct v4l2_control *ctrl)
++{
++ int ret = 0;
++ struct mxc_vout_output *vout = fh, *pre_vout;
++
++ pre_vout = vmalloc(sizeof(*pre_vout));
++ if (!pre_vout)
++ return -ENOMEM;
++
++ /* wait current work finish */
++ if (vout->vbq.streaming)
++ flush_workqueue(vout->v4l_wq);
++
++ mutex_lock(&vout->task_lock);
++
++ memcpy(pre_vout, vout, sizeof(*vout));
++
++ switch (ctrl->id) {
++ case V4L2_CID_ROTATE:
++ {
++ vout->ctrl_rotate = (ctrl->value/90) * 90;
++ if (vout->ctrl_rotate > 270)
++ vout->ctrl_rotate = 270;
++ setup_task_rotation(vout);
++ break;
++ }
++ case V4L2_CID_VFLIP:
++ {
++ vout->ctrl_vflip = ctrl->value;
++ setup_task_rotation(vout);
++ break;
++ }
++ case V4L2_CID_HFLIP:
++ {
++ vout->ctrl_hflip = ctrl->value;
++ setup_task_rotation(vout);
++ break;
++ }
++ case V4L2_CID_MXC_MOTION:
++ {
++ vout->task.input.deinterlace.motion = ctrl->value;
++ break;
++ }
++ default:
++ ret = -EINVAL;
++ goto done;
++ }
++
++ if (vout->fmt_init) {
++ memcpy(&vout->task.input.crop, &vout->in_rect,
++ sizeof(vout->in_rect));
++ ret = mxc_vout_try_task(vout);
++ if (ret < 0) {
++ v4l2_err(vout->vfd->v4l2_dev,
++ "vout check task failed\n");
++ memcpy(vout, pre_vout, sizeof(*vout));
++ goto done;
++ }
++
++ if (mxc_vout_need_fb_reconfig(vout, pre_vout)) {
++ ret = config_disp_output(vout);
++ if (ret < 0)
++ v4l2_err(vout->vfd->v4l2_dev,
++ "Config display output failed\n");
++ }
++ }
++
++done:
++ vfree(pre_vout);
++ mutex_unlock(&vout->task_lock);
++
++ return ret;
++}
++
++static int mxc_vidioc_reqbufs(struct file *file, void *fh,
++ struct v4l2_requestbuffers *req)
++{
++ int ret = 0;
++ struct mxc_vout_output *vout = fh;
++ struct videobuf_queue *q = &vout->vbq;
++
++ if (req->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
++ return -EINVAL;
++
++ /* should not be here after streaming, videobuf_reqbufs will control */
++ mutex_lock(&vout->task_lock);
++
++ ret = videobuf_reqbufs(q, req);
++
++ mutex_unlock(&vout->task_lock);
++ return ret;
++}
++
++static int mxc_vidioc_querybuf(struct file *file, void *fh,
++ struct v4l2_buffer *b)
++{
++ int ret;
++ struct mxc_vout_output *vout = fh;
++
++ ret = videobuf_querybuf(&vout->vbq, b);
++ if (!ret) {
++ /* return physical address */
++ struct videobuf_buffer *vb = vout->vbq.bufs[b->index];
++ if (b->flags & V4L2_BUF_FLAG_MAPPED)
++ b->m.offset = videobuf_to_dma_contig(vb);
++ }
++
++ return ret;
++}
++
++static int mxc_vidioc_qbuf(struct file *file, void *fh,
++ struct v4l2_buffer *buffer)
++{
++ struct mxc_vout_output *vout = fh;
++
++ return videobuf_qbuf(&vout->vbq, buffer);
++}
++
++static int mxc_vidioc_dqbuf(struct file *file, void *fh, struct v4l2_buffer *b)
++{
++ struct mxc_vout_output *vout = fh;
++
++ if (!vout->vbq.streaming)
++ return -EINVAL;
++
++ if (file->f_flags & O_NONBLOCK)
++ return videobuf_dqbuf(&vout->vbq, (struct v4l2_buffer *)b, 1);
++ else
++ return videobuf_dqbuf(&vout->vbq, (struct v4l2_buffer *)b, 0);
++}
++
++static int set_window_position(struct mxc_vout_output *vout,
++ struct mxcfb_pos *pos)
++{
++ struct fb_info *fbi = vout->fbi;
++ mm_segment_t old_fs;
++ int ret = 0;
++
++ if (vout->disp_support_windows) {
++ old_fs = get_fs();
++ set_fs(KERNEL_DS);
++ ret = fbi->fbops->fb_ioctl(fbi, MXCFB_SET_OVERLAY_POS,
++ (unsigned long)pos);
++ set_fs(old_fs);
++ }
++
++ return ret;
++}
++
++static int config_disp_output(struct mxc_vout_output *vout)
++{
++ struct dma_mem *buf = NULL;
++ struct fb_info *fbi = vout->fbi;
++ struct fb_var_screeninfo var;
++ struct mxcfb_pos pos;
++ int i, fb_num, ret;
++ u32 fb_base;
++ u32 size;
++ u32 display_buf_size;
++ u32 *pixel = NULL;
++ u32 color;
++ int j;
++
++ memcpy(&var, &fbi->var, sizeof(var));
++ fb_base = fbi->fix.smem_start;
++
++ var.xres = vout->task.output.width;
++ var.yres = vout->task.output.height;
++ if (vout->linear_bypass_pp || vout->tiled_bypass_pp) {
++ fb_num = 1;
++ /* input crop */
++ if (vout->task.input.width > vout->task.output.width)
++ var.xres_virtual = vout->task.input.width;
++ else
++ var.xres_virtual = var.xres;
++ if (vout->task.input.height > vout->task.output.height)
++ var.yres_virtual = vout->task.input.height;
++ else
++ var.yres_virtual = var.yres;
++ var.rotate = vout->task.output.rotate;
++ var.vmode |= FB_VMODE_YWRAP;
++ } else {
++ fb_num = FB_BUFS;
++ var.xres_virtual = var.xres;
++ var.yres_virtual = fb_num * var.yres;
++ var.vmode &= ~FB_VMODE_YWRAP;
++ }
++ var.bits_per_pixel = fmt_to_bpp(vout->task.output.format);
++ var.nonstd = vout->task.output.format;
++
++ v4l2_dbg(1, debug, vout->vfd->v4l2_dev,
++ "set display fb to %d %d\n",
++ var.xres, var.yres);
++
++ /*
++ * To setup the overlay fb from scratch without
++ * the last time overlay fb position or resolution's
++ * impact, we take the following steps:
++ * - blank fb
++ * - set fb position to the starting point
++ * - reconfigure fb
++ * - set fb position to a specific point
++ * - unblank fb
++ * This procedure applies to non-overlay fbs as well.
++ */
++ console_lock();
++ fbi->flags |= FBINFO_MISC_USEREVENT;
++ fb_blank(fbi, FB_BLANK_POWERDOWN);
++ fbi->flags &= ~FBINFO_MISC_USEREVENT;
++ console_unlock();
++
++ pos.x = 0;
++ pos.y = 0;
++ ret = set_window_position(vout, &pos);
++ if (ret < 0) {
++ v4l2_err(vout->vfd->v4l2_dev, "failed to set fb position "
++ "to starting point\n");
++ return ret;
++ }
++
++ /* Init display channel through fb API */
++ var.yoffset = 0;
++ var.activate |= FB_ACTIVATE_FORCE;
++ console_lock();
++ fbi->flags |= FBINFO_MISC_USEREVENT;
++ ret = fb_set_var(fbi, &var);
++ fbi->flags &= ~FBINFO_MISC_USEREVENT;
++ console_unlock();
++ if (ret < 0) {
++ v4l2_err(vout->vfd->v4l2_dev,
++ "ERR:%s fb_set_var ret:%d\n", __func__, ret);
++ return ret;
++ }
++
++ ret = set_window_position(vout, &vout->win_pos);
++ if (ret < 0) {
++ v4l2_err(vout->vfd->v4l2_dev, "failed to set fb position\n");
++ return ret;
++ }
++
++ if (vout->linear_bypass_pp || vout->tiled_bypass_pp)
++ display_buf_size = fbi->fix.line_length * fbi->var.yres_virtual;
++ else
++ display_buf_size = fbi->fix.line_length * fbi->var.yres;
++ for (i = 0; i < fb_num; i++)
++ vout->disp_bufs[i] = fbi->fix.smem_start + i * display_buf_size;
++ if (vout->tiled_bypass_pp) {
++ size = PAGE_ALIGN(vout->task.input.crop.w *
++ vout->task.input.crop.h *
++ fmt_to_bpp(vout->task.output.format)/8);
++ if (size > vout->vdoa_output[0].size) {
++ for (i = 0; i < VDOA_FB_BUFS; i++) {
++ buf = &vout->vdoa_output[i];
++ if (buf->vaddr)
++ free_dma_buf(vout, buf);
++ buf->size = size;
++ ret = alloc_dma_buf(vout, buf);
++ if (ret < 0)
++ goto err;
++ }
++ }
++ for (i = fb_num; i < (fb_num + VDOA_FB_BUFS); i++)
++ vout->disp_bufs[i] =
++ vout->vdoa_output[i - fb_num].paddr;
++ }
++ vout->fb_smem_len = fbi->fix.smem_len;
++ vout->fb_smem_start = fbi->fix.smem_start;
++ if (fb_base != fbi->fix.smem_start) {
++ v4l2_dbg(1, debug, vout->vfd->v4l2_dev,
++ "realloc fb mem size:0x%x@0x%lx,old paddr @0x%x\n",
++ fbi->fix.smem_len, fbi->fix.smem_start, fb_base);
++ }
++
++ /* fill black when video config changed */
++ color = colorspaceofpixel(vout->task.output.format) == YUV_CS ?
++ UYVY_BLACK : RGB_BLACK;
++ if (IS_PLANAR_PIXEL_FORMAT(vout->task.output.format)) {
++ size = display_buf_size * 8 /
++ fmt_to_bpp(vout->task.output.format);
++ memset(fbi->screen_base, Y_BLACK, size);
++ memset(fbi->screen_base + size, UV_BLACK,
++ display_buf_size - size);
++ } else {
++ pixel = (u32 *)fbi->screen_base;
++ for (i = 0; i < (display_buf_size >> 2); i++)
++ *pixel++ = color;
++ }
++ console_lock();
++ fbi->flags |= FBINFO_MISC_USEREVENT;
++ ret = fb_blank(fbi, FB_BLANK_UNBLANK);
++ fbi->flags &= ~FBINFO_MISC_USEREVENT;
++ console_unlock();
++ vout->release = false;
++
++ return ret;
++err:
++ for (j = i - 1; j >= 0; j--) {
++ buf = &vout->vdoa_output[j];
++ if (buf->vaddr)
++ free_dma_buf(vout, buf);
++ }
++ return ret;
++}
++
++static inline void wait_for_vsync(struct mxc_vout_output *vout)
++{
++ struct fb_info *fbi = vout->fbi;
++ mm_segment_t old_fs;
++
++ if (fbi->fbops->fb_ioctl) {
++ old_fs = get_fs();
++ set_fs(KERNEL_DS);
++ fbi->fbops->fb_ioctl(fbi, MXCFB_WAIT_FOR_VSYNC,
++ (unsigned long)NULL);
++ set_fs(old_fs);
++ }
++
++ return;
++}
++
++static void release_disp_output(struct mxc_vout_output *vout)
++{
++ struct fb_info *fbi = vout->fbi;
++ struct mxcfb_pos pos;
++
++ if (vout->release)
++ return;
++ console_lock();
++ fbi->flags |= FBINFO_MISC_USEREVENT;
++ fb_blank(fbi, FB_BLANK_POWERDOWN);
++ fbi->flags &= ~FBINFO_MISC_USEREVENT;
++ console_unlock();
++
++ /* restore pos to 0,0 avoid fb pan display hang? */
++ pos.x = 0;
++ pos.y = 0;
++ set_window_position(vout, &pos);
++
++ if (get_ipu_channel(fbi) == MEM_BG_SYNC) {
++ console_lock();
++ fbi->fix.smem_start = vout->disp_bufs[0];
++ fbi->flags |= FBINFO_MISC_USEREVENT;
++ fb_blank(fbi, FB_BLANK_UNBLANK);
++ fbi->flags &= ~FBINFO_MISC_USEREVENT;
++ console_unlock();
++
++ }
++
++ vout->release = true;
++}
++
++static int mxc_vidioc_streamon(struct file *file, void *fh,
++ enum v4l2_buf_type i)
++{
++ struct mxc_vout_output *vout = fh;
++ struct videobuf_queue *q = &vout->vbq;
++ int ret;
++
++ if (q->streaming) {
++ v4l2_err(vout->vfd->v4l2_dev,
++ "video output already run\n");
++ ret = -EBUSY;
++ goto done;
++ }
++
++ if (deinterlace_3_field(vout) && list_is_singular(&q->stream)) {
++ v4l2_err(vout->vfd->v4l2_dev,
++ "deinterlacing: need queue 2 frame before streamon\n");
++ ret = -EINVAL;
++ goto done;
++ }
++
++ ret = config_disp_output(vout);
++ if (ret < 0) {
++ v4l2_err(vout->vfd->v4l2_dev,
++ "Config display output failed\n");
++ goto done;
++ }
++
++ hrtimer_init(&vout->timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
++ vout->timer.function = mxc_vout_timer_handler;
++ vout->timer_stop = true;
++
++ vout->start_ktime = hrtimer_cb_get_time(&vout->timer);
++
++ vout->pre1_vb = NULL;
++ vout->pre2_vb = NULL;
++
++ ret = videobuf_streamon(q);
++done:
++ return ret;
++}
++
++static int mxc_vidioc_streamoff(struct file *file, void *fh,
++ enum v4l2_buf_type i)
++{
++ struct mxc_vout_output *vout = fh;
++ struct videobuf_queue *q = &vout->vbq;
++ int ret = 0;
++
++ if (q->streaming) {
++ flush_workqueue(vout->v4l_wq);
++
++ hrtimer_cancel(&vout->timer);
++
++ /*
++ * Wait for 2 vsyncs to make sure
++ * frames are drained on triple
++ * buffer.
++ */
++ wait_for_vsync(vout);
++ wait_for_vsync(vout);
++
++ release_disp_output(vout);
++
++ ret = videobuf_streamoff(&vout->vbq);
++ }
++ INIT_LIST_HEAD(&vout->queue_list);
++ INIT_LIST_HEAD(&vout->active_list);
++
++ return ret;
++}
++
++static const struct v4l2_ioctl_ops mxc_vout_ioctl_ops = {
++ .vidioc_querycap = mxc_vidioc_querycap,
++ .vidioc_enum_fmt_vid_out = mxc_vidioc_enum_fmt_vid_out,
++ .vidioc_g_fmt_vid_out = mxc_vidioc_g_fmt_vid_out,
++ .vidioc_s_fmt_vid_out = mxc_vidioc_s_fmt_vid_out,
++ .vidioc_cropcap = mxc_vidioc_cropcap,
++ .vidioc_g_crop = mxc_vidioc_g_crop,
++ .vidioc_s_crop = mxc_vidioc_s_crop,
++ .vidioc_queryctrl = mxc_vidioc_queryctrl,
++ .vidioc_g_ctrl = mxc_vidioc_g_ctrl,
++ .vidioc_s_ctrl = mxc_vidioc_s_ctrl,
++ .vidioc_reqbufs = mxc_vidioc_reqbufs,
++ .vidioc_querybuf = mxc_vidioc_querybuf,
++ .vidioc_qbuf = mxc_vidioc_qbuf,
++ .vidioc_dqbuf = mxc_vidioc_dqbuf,
++ .vidioc_streamon = mxc_vidioc_streamon,
++ .vidioc_streamoff = mxc_vidioc_streamoff,
++};
++
++static const struct v4l2_file_operations mxc_vout_fops = {
++ .owner = THIS_MODULE,
++ .unlocked_ioctl = video_ioctl2,
++ .mmap = mxc_vout_mmap,
++ .open = mxc_vout_open,
++ .release = mxc_vout_release,
++};
++
++static struct video_device mxc_vout_template = {
++ .name = "MXC Video Output",
++ .fops = &mxc_vout_fops,
++ .ioctl_ops = &mxc_vout_ioctl_ops,
++ .release = video_device_release,
++};
++
++static struct videobuf_queue_ops mxc_vout_vbq_ops = {
++ .buf_setup = mxc_vout_buffer_setup,
++ .buf_prepare = mxc_vout_buffer_prepare,
++ .buf_release = mxc_vout_buffer_release,
++ .buf_queue = mxc_vout_buffer_queue,
++};
++
++static void mxc_vout_free_output(struct mxc_vout_dev *dev)
++{
++ int i;
++ int j;
++ struct mxc_vout_output *vout;
++ struct video_device *vfd;
++
++ for (i = 0; i < dev->out_num; i++) {
++ vout = dev->out[i];
++ vfd = vout->vfd;
++ if (vout->vdoa_work.vaddr)
++ free_dma_buf(vout, &vout->vdoa_work);
++ for (j = 0; j < VDOA_FB_BUFS; j++) {
++ if (vout->vdoa_output[j].vaddr)
++ free_dma_buf(vout, &vout->vdoa_output[j]);
++ }
++ if (vfd) {
++ if (!video_is_registered(vfd))
++ video_device_release(vfd);
++ else
++ video_unregister_device(vfd);
++ }
++ kfree(vout);
++ }
++}
++
++static int mxc_vout_setup_output(struct mxc_vout_dev *dev)
++{
++ struct videobuf_queue *q;
++ struct fb_info *fbi;
++ struct mxc_vout_output *vout;
++ int i, ret = 0;
++
++ update_display_setting();
++
++ /* all output/overlay based on fb */
++ for (i = 0; i < num_registered_fb; i++) {
++ fbi = registered_fb[i];
++
++ vout = kzalloc(sizeof(struct mxc_vout_output), GFP_KERNEL);
++ if (!vout) {
++ ret = -ENOMEM;
++ break;
++ }
++
++ dev->out[dev->out_num] = vout;
++ dev->out_num++;
++
++ vout->fbi = fbi;
++ vout->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
++ vout->vfd = video_device_alloc();
++ if (!vout->vfd) {
++ ret = -ENOMEM;
++ break;
++ }
++
++ *vout->vfd = mxc_vout_template;
++ vout->vfd->debug = debug;
++ vout->vfd->v4l2_dev = &dev->v4l2_dev;
++ vout->vfd->lock = &vout->mutex;
++ vout->vfd->vfl_dir = VFL_DIR_TX;
++
++ mutex_init(&vout->mutex);
++ mutex_init(&vout->task_lock);
++
++ strlcpy(vout->vfd->name, fbi->fix.id, sizeof(vout->vfd->name));
++
++ video_set_drvdata(vout->vfd, vout);
++
++ if (video_register_device(vout->vfd,
++ VFL_TYPE_GRABBER, video_nr + i) < 0) {
++ ret = -ENODEV;
++ break;
++ }
++
++ q = &vout->vbq;
++ q->dev = dev->dev;
++ spin_lock_init(&vout->vbq_lock);
++ videobuf_queue_dma_contig_init(q, &mxc_vout_vbq_ops, q->dev,
++ &vout->vbq_lock, vout->type, V4L2_FIELD_NONE,
++ sizeof(struct videobuf_buffer), vout, NULL);
++
++ v4l2_info(vout->vfd->v4l2_dev, "V4L2 device registered as %s\n",
++ video_device_node_name(vout->vfd));
++
++ }
++
++ return ret;
++}
++
++static int mxc_vout_probe(struct platform_device *pdev)
++{
++ int ret;
++ struct mxc_vout_dev *dev;
++
++ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
++ if (!dev)
++ return -ENOMEM;
++
++ dev->dev = &pdev->dev;
++ dev->dev->dma_mask = kmalloc(sizeof(*dev->dev->dma_mask), GFP_KERNEL);
++ *dev->dev->dma_mask = DMA_BIT_MASK(32);
++ dev->dev->coherent_dma_mask = DMA_BIT_MASK(32);
++
++ ret = v4l2_device_register(dev->dev, &dev->v4l2_dev);
++ if (ret) {
++ dev_err(dev->dev, "v4l2_device_register failed\n");
++ goto free_dev;
++ }
++
++ ret = mxc_vout_setup_output(dev);
++ if (ret < 0)
++ goto rel_vdev;
++
++ return 0;
++
++rel_vdev:
++ mxc_vout_free_output(dev);
++ v4l2_device_unregister(&dev->v4l2_dev);
++free_dev:
++ kfree(dev);
++ return ret;
++}
++
++static int mxc_vout_remove(struct platform_device *pdev)
++{
++ struct v4l2_device *v4l2_dev = platform_get_drvdata(pdev);
++ struct mxc_vout_dev *dev = container_of(v4l2_dev, struct
++ mxc_vout_dev, v4l2_dev);
++
++ mxc_vout_free_output(dev);
++ v4l2_device_unregister(v4l2_dev);
++ kfree(dev);
++ return 0;
++}
++
++static const struct of_device_id mxc_v4l2_dt_ids[] = {
++ { .compatible = "fsl,mxc_v4l2_output", },
++ { /* sentinel */ }
++};
++
++static struct platform_driver mxc_vout_driver = {
++ .driver = {
++ .name = "mxc_v4l2_output",
++ .of_match_table = mxc_v4l2_dt_ids,
++ },
++ .probe = mxc_vout_probe,
++ .remove = mxc_vout_remove,
++};
++
++static int __init mxc_vout_init(void)
++{
++ if (platform_driver_register(&mxc_vout_driver) != 0) {
++ printk(KERN_ERR VOUT_NAME ":Could not register Video driver\n");
++ return -EINVAL;
++ }
++ return 0;
++}
++
++static void mxc_vout_cleanup(void)
++{
++ platform_driver_unregister(&mxc_vout_driver);
++}
++
++module_init(mxc_vout_init);
++module_exit(mxc_vout_cleanup);
++
++MODULE_AUTHOR("Freescale Semiconductor, Inc.");
++MODULE_DESCRIPTION("V4L2-driver for MXC video output");
++MODULE_LICENSE("GPL");
+diff -Nur linux-3.14.36/drivers/media/rc/imon.c linux-openelec/drivers/media/rc/imon.c
+--- linux-3.14.36/drivers/media/rc/imon.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/media/rc/imon.c 2015-07-24 18:03:30.068842002 -0500
+@@ -1344,6 +1344,17 @@
+ }
+ } else {
+ /*
++ * For users without stabilized, just ignore any value getting
++ * to close to the diagonal.
++ */
++ if ((abs(rel_y) < 2 && abs(rel_x) < 2) ||
++ abs(abs(rel_y) - abs(rel_x)) < 2 ) {
++ spin_lock_irqsave(&ictx->kc_lock, flags);
++ ictx->kc = KEY_UNKNOWN;
++ spin_unlock_irqrestore(&ictx->kc_lock, flags);
++ return;
++ }
++ /*
+ * Hack alert: instead of using keycodes, we have
+ * to use hard-coded scancodes here...
+ */
+diff -Nur linux-3.14.36/drivers/media/rc/ir-rc6-decoder.c linux-openelec/drivers/media/rc/ir-rc6-decoder.c
+--- linux-3.14.36/drivers/media/rc/ir-rc6-decoder.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/media/rc/ir-rc6-decoder.c 2015-07-24 18:03:30.040842002 -0500
+@@ -39,7 +39,6 @@
+ #define RC6_STARTBIT_MASK 0x08 /* for the header bits */
+ #define RC6_6A_MCE_TOGGLE_MASK 0x8000 /* for the body bits */
+ #define RC6_6A_LCC_MASK 0xffff0000 /* RC6-6A-32 long customer code mask */
+-#define RC6_6A_MCE_CC 0x800f0000 /* MCE customer code */
+ #ifndef CHAR_BIT
+ #define CHAR_BIT 8 /* Normally in <limits.h> */
+ #endif
+@@ -244,9 +243,8 @@
+ }
+
+ scancode = data->body;
+- if (data->count == RC6_6A_32_NBITS &&
+- (scancode & RC6_6A_LCC_MASK) == RC6_6A_MCE_CC) {
+- /* MCE RC */
++ if (data->count == RC6_6A_32_NBITS) {
++ /* MCE compatible RC */
+ toggle = (scancode & RC6_6A_MCE_TOGGLE_MASK) ? 1 : 0;
+ scancode &= ~RC6_6A_MCE_TOGGLE_MASK;
+ } else {
+diff -Nur linux-3.14.36/drivers/media/rc/ir-rc6-decoder.c.orig linux-openelec/drivers/media/rc/ir-rc6-decoder.c.orig
+--- linux-3.14.36/drivers/media/rc/ir-rc6-decoder.c.orig 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/media/rc/ir-rc6-decoder.c.orig 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,300 @@
++/* ir-rc6-decoder.c - A decoder for the RC6 IR protocol
++ *
++ * Copyright (C) 2010 by David Härdeman <david@hardeman.nu>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ */
++
++#include "rc-core-priv.h"
++#include <linux/module.h>
++
++/*
++ * This decoder currently supports:
++ * RC6-0-16 (standard toggle bit in header)
++ * RC6-6A-20 (no toggle bit)
++ * RC6-6A-24 (no toggle bit)
++ * RC6-6A-32 (MCE version with toggle bit in body)
++ */
++
++#define RC6_UNIT 444444 /* nanosecs */
++#define RC6_HEADER_NBITS 4 /* not including toggle bit */
++#define RC6_0_NBITS 16
++#define RC6_6A_32_NBITS 32
++#define RC6_6A_NBITS 128 /* Variable 8..128 */
++#define RC6_PREFIX_PULSE (6 * RC6_UNIT)
++#define RC6_PREFIX_SPACE (2 * RC6_UNIT)
++#define RC6_BIT_START (1 * RC6_UNIT)
++#define RC6_BIT_END (1 * RC6_UNIT)
++#define RC6_TOGGLE_START (2 * RC6_UNIT)
++#define RC6_TOGGLE_END (2 * RC6_UNIT)
++#define RC6_SUFFIX_SPACE (6 * RC6_UNIT)
++#define RC6_MODE_MASK 0x07 /* for the header bits */
++#define RC6_STARTBIT_MASK 0x08 /* for the header bits */
++#define RC6_6A_MCE_TOGGLE_MASK 0x8000 /* for the body bits */
++#define RC6_6A_LCC_MASK 0xffff0000 /* RC6-6A-32 long customer code mask */
++#define RC6_6A_MCE_CC 0x800f0000 /* MCE customer code */
++#ifndef CHAR_BIT
++#define CHAR_BIT 8 /* Normally in <limits.h> */
++#endif
++
++enum rc6_mode {
++ RC6_MODE_0,
++ RC6_MODE_6A,
++ RC6_MODE_UNKNOWN,
++};
++
++enum rc6_state {
++ STATE_INACTIVE,
++ STATE_PREFIX_SPACE,
++ STATE_HEADER_BIT_START,
++ STATE_HEADER_BIT_END,
++ STATE_TOGGLE_START,
++ STATE_TOGGLE_END,
++ STATE_BODY_BIT_START,
++ STATE_BODY_BIT_END,
++ STATE_FINISHED,
++};
++
++static enum rc6_mode rc6_mode(struct rc6_dec *data)
++{
++ switch (data->header & RC6_MODE_MASK) {
++ case 0:
++ return RC6_MODE_0;
++ case 6:
++ if (!data->toggle)
++ return RC6_MODE_6A;
++ /* fall through */
++ default:
++ return RC6_MODE_UNKNOWN;
++ }
++}
++
++/**
++ * ir_rc6_decode() - Decode one RC6 pulse or space
++ * @dev: the struct rc_dev descriptor of the device
++ * @ev: the struct ir_raw_event descriptor of the pulse/space
++ *
++ * This function returns -EINVAL if the pulse violates the state machine
++ */
++static int ir_rc6_decode(struct rc_dev *dev, struct ir_raw_event ev)
++{
++ struct rc6_dec *data = &dev->raw->rc6;
++ u32 scancode;
++ u8 toggle;
++
++ if (!(dev->enabled_protocols &
++ (RC_BIT_RC6_0 | RC_BIT_RC6_6A_20 | RC_BIT_RC6_6A_24 |
++ RC_BIT_RC6_6A_32 | RC_BIT_RC6_MCE)))
++ return 0;
++
++ if (!is_timing_event(ev)) {
++ if (ev.reset)
++ data->state = STATE_INACTIVE;
++ return 0;
++ }
++
++ if (!geq_margin(ev.duration, RC6_UNIT, RC6_UNIT / 2))
++ goto out;
++
++again:
++ IR_dprintk(2, "RC6 decode started at state %i (%uus %s)\n",
++ data->state, TO_US(ev.duration), TO_STR(ev.pulse));
++
++ if (!geq_margin(ev.duration, RC6_UNIT, RC6_UNIT / 2))
++ return 0;
++
++ switch (data->state) {
++
++ case STATE_INACTIVE:
++ if (!ev.pulse)
++ break;
++
++ /* Note: larger margin on first pulse since each RC6_UNIT
++ is quite short and some hardware takes some time to
++ adjust to the signal */
++ if (!eq_margin(ev.duration, RC6_PREFIX_PULSE, RC6_UNIT))
++ break;
++
++ data->state = STATE_PREFIX_SPACE;
++ data->count = 0;
++ return 0;
++
++ case STATE_PREFIX_SPACE:
++ if (ev.pulse)
++ break;
++
++ if (!eq_margin(ev.duration, RC6_PREFIX_SPACE, RC6_UNIT / 2))
++ break;
++
++ data->state = STATE_HEADER_BIT_START;
++ data->header = 0;
++ return 0;
++
++ case STATE_HEADER_BIT_START:
++ if (!eq_margin(ev.duration, RC6_BIT_START, RC6_UNIT / 2))
++ break;
++
++ data->header <<= 1;
++ if (ev.pulse)
++ data->header |= 1;
++ data->count++;
++ data->state = STATE_HEADER_BIT_END;
++ return 0;
++
++ case STATE_HEADER_BIT_END:
++ if (!is_transition(&ev, &dev->raw->prev_ev))
++ break;
++
++ if (data->count == RC6_HEADER_NBITS)
++ data->state = STATE_TOGGLE_START;
++ else
++ data->state = STATE_HEADER_BIT_START;
++
++ decrease_duration(&ev, RC6_BIT_END);
++ goto again;
++
++ case STATE_TOGGLE_START:
++ if (!eq_margin(ev.duration, RC6_TOGGLE_START, RC6_UNIT / 2))
++ break;
++
++ data->toggle = ev.pulse;
++ data->state = STATE_TOGGLE_END;
++ return 0;
++
++ case STATE_TOGGLE_END:
++ if (!is_transition(&ev, &dev->raw->prev_ev) ||
++ !geq_margin(ev.duration, RC6_TOGGLE_END, RC6_UNIT / 2))
++ break;
++
++ if (!(data->header & RC6_STARTBIT_MASK)) {
++ IR_dprintk(1, "RC6 invalid start bit\n");
++ break;
++ }
++
++ data->state = STATE_BODY_BIT_START;
++ decrease_duration(&ev, RC6_TOGGLE_END);
++ data->count = 0;
++ data->body = 0;
++
++ switch (rc6_mode(data)) {
++ case RC6_MODE_0:
++ data->wanted_bits = RC6_0_NBITS;
++ break;
++ case RC6_MODE_6A:
++ data->wanted_bits = RC6_6A_NBITS;
++ break;
++ default:
++ IR_dprintk(1, "RC6 unknown mode\n");
++ goto out;
++ }
++ goto again;
++
++ case STATE_BODY_BIT_START:
++ if (eq_margin(ev.duration, RC6_BIT_START, RC6_UNIT / 2)) {
++ /* Discard LSB's that won't fit in data->body */
++ if (data->count++ < CHAR_BIT * sizeof data->body) {
++ data->body <<= 1;
++ if (ev.pulse)
++ data->body |= 1;
++ }
++ data->state = STATE_BODY_BIT_END;
++ return 0;
++ } else if (RC6_MODE_6A == rc6_mode(data) && !ev.pulse &&
++ geq_margin(ev.duration, RC6_SUFFIX_SPACE, RC6_UNIT / 2)) {
++ data->state = STATE_FINISHED;
++ goto again;
++ }
++ break;
++
++ case STATE_BODY_BIT_END:
++ if (!is_transition(&ev, &dev->raw->prev_ev))
++ break;
++
++ if (data->count == data->wanted_bits)
++ data->state = STATE_FINISHED;
++ else
++ data->state = STATE_BODY_BIT_START;
++
++ decrease_duration(&ev, RC6_BIT_END);
++ goto again;
++
++ case STATE_FINISHED:
++ if (ev.pulse)
++ break;
++
++ switch (rc6_mode(data)) {
++ case RC6_MODE_0:
++ scancode = data->body;
++ toggle = data->toggle;
++ IR_dprintk(1, "RC6(0) scancode 0x%04x (toggle: %u)\n",
++ scancode, toggle);
++ break;
++ case RC6_MODE_6A:
++ if (data->count > CHAR_BIT * sizeof data->body) {
++ IR_dprintk(1, "RC6 too many (%u) data bits\n",
++ data->count);
++ goto out;
++ }
++
++ scancode = data->body;
++ if (data->count == RC6_6A_32_NBITS &&
++ (scancode & RC6_6A_LCC_MASK) == RC6_6A_MCE_CC) {
++ /* MCE RC */
++ toggle = (scancode & RC6_6A_MCE_TOGGLE_MASK) ? 1 : 0;
++ scancode &= ~RC6_6A_MCE_TOGGLE_MASK;
++ } else {
++ toggle = 0;
++ }
++ IR_dprintk(1, "RC6(6A) scancode 0x%08x (toggle: %u)\n",
++ scancode, toggle);
++ break;
++ default:
++ IR_dprintk(1, "RC6 unknown mode\n");
++ goto out;
++ }
++
++ rc_keydown(dev, scancode, toggle);
++ data->state = STATE_INACTIVE;
++ return 0;
++ }
++
++out:
++ IR_dprintk(1, "RC6 decode failed at state %i (%uus %s)\n",
++ data->state, TO_US(ev.duration), TO_STR(ev.pulse));
++ data->state = STATE_INACTIVE;
++ return -EINVAL;
++}
++
++static struct ir_raw_handler rc6_handler = {
++ .protocols = RC_BIT_RC6_0 | RC_BIT_RC6_6A_20 |
++ RC_BIT_RC6_6A_24 | RC_BIT_RC6_6A_32 |
++ RC_BIT_RC6_MCE,
++ .decode = ir_rc6_decode,
++};
++
++static int __init ir_rc6_decode_init(void)
++{
++ ir_raw_handler_register(&rc6_handler);
++
++ printk(KERN_INFO "IR RC6 protocol handler initialized\n");
++ return 0;
++}
++
++static void __exit ir_rc6_decode_exit(void)
++{
++ ir_raw_handler_unregister(&rc6_handler);
++}
++
++module_init(ir_rc6_decode_init);
++module_exit(ir_rc6_decode_exit);
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("David Härdeman <david@hardeman.nu>");
++MODULE_DESCRIPTION("RC6 IR protocol decoder");
+diff -Nur linux-3.14.36/drivers/media/rc/keymaps/Makefile linux-openelec/drivers/media/rc/keymaps/Makefile
+--- linux-3.14.36/drivers/media/rc/keymaps/Makefile 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/media/rc/keymaps/Makefile 2015-07-24 18:03:30.128842002 -0500
+@@ -28,6 +28,7 @@
+ rc-dm1105-nec.o \
+ rc-dntv-live-dvb-t.o \
+ rc-dntv-live-dvbt-pro.o \
++ rc-dvbsky.o \
+ rc-em-terratec.o \
+ rc-encore-enltv2.o \
+ rc-encore-enltv.o \
+diff -Nur linux-3.14.36/drivers/media/rc/keymaps/rc-dvbsky.c linux-openelec/drivers/media/rc/keymaps/rc-dvbsky.c
+--- linux-3.14.36/drivers/media/rc/keymaps/rc-dvbsky.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/media/rc/keymaps/rc-dvbsky.c 2015-07-24 18:03:30.128842002 -0500
+@@ -0,0 +1,78 @@
++/* rc-dvbsky.c - Keytable for Dvbsky Remote Controllers
++ *
++ * keymap imported from ir-keymaps.c
++ *
++ *
++ * Copyright (c) 2010-2012 by Nibble Max <nibble.max@gmail.com>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ */
++
++#include <media/rc-map.h>
++#include <linux/module.h>
++/*
++ * This table contains the complete RC5 code, instead of just the data part
++ */
++
++static struct rc_map_table rc5_dvbsky[] = {
++ { 0x0000, KEY_0 },
++ { 0x0001, KEY_1 },
++ { 0x0002, KEY_2 },
++ { 0x0003, KEY_3 },
++ { 0x0004, KEY_4 },
++ { 0x0005, KEY_5 },
++ { 0x0006, KEY_6 },
++ { 0x0007, KEY_7 },
++ { 0x0008, KEY_8 },
++ { 0x0009, KEY_9 },
++ { 0x000a, KEY_MUTE },
++ { 0x000d, KEY_OK },
++ { 0x000b, KEY_STOP },
++ { 0x000c, KEY_EXIT },
++ { 0x000e, KEY_CAMERA }, /*Snap shot*/
++ { 0x000f, KEY_SUBTITLE }, /*PIP*/
++ { 0x0010, KEY_VOLUMEUP },
++ { 0x0011, KEY_VOLUMEDOWN },
++ { 0x0012, KEY_FAVORITES },
++ { 0x0013, KEY_LIST }, /*Info*/
++ { 0x0016, KEY_PAUSE },
++ { 0x0017, KEY_PLAY },
++ { 0x001f, KEY_RECORD },
++ { 0x0020, KEY_CHANNELDOWN },
++ { 0x0021, KEY_CHANNELUP },
++ { 0x0025, KEY_POWER2 },
++ { 0x0026, KEY_REWIND },
++ { 0x0027, KEY_FASTFORWARD },
++ { 0x0029, KEY_LAST },
++ { 0x002b, KEY_MENU },
++ { 0x002c, KEY_EPG },
++ { 0x002d, KEY_ZOOM },
++};
++
++static struct rc_map_list rc5_dvbsky_map = {
++ .map = {
++ .scan = rc5_dvbsky,
++ .size = ARRAY_SIZE(rc5_dvbsky),
++ .rc_type = RC_TYPE_RC5,
++ .name = RC_MAP_DVBSKY,
++ }
++};
++
++static int __init init_rc_map_rc5_dvbsky(void)
++{
++ return rc_map_register(&rc5_dvbsky_map);
++}
++
++static void __exit exit_rc_map_rc5_dvbsky(void)
++{
++ rc_map_unregister(&rc5_dvbsky_map);
++}
++
++module_init(init_rc_map_rc5_dvbsky)
++module_exit(exit_rc_map_rc5_dvbsky)
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Nibble Max <nibble.max@gmail.com>");
+diff -Nur linux-3.14.36/drivers/media/rc/mceusb.c linux-openelec/drivers/media/rc/mceusb.c
+--- linux-3.14.36/drivers/media/rc/mceusb.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/media/rc/mceusb.c 2015-07-24 18:03:30.372842002 -0500
+@@ -200,6 +200,7 @@
+ #define VENDOR_CONEXANT 0x0572
+ #define VENDOR_TWISTEDMELON 0x2596
+ #define VENDOR_HAUPPAUGE 0x2040
++#define VENDOR_ADAPTEC 0x03f3
+
+ enum mceusb_model_type {
+ MCE_GEN2 = 0, /* Most boards */
+@@ -316,6 +317,9 @@
+ /* SMK/I-O Data GV-MC7/RCKIT Receiver */
+ { USB_DEVICE(VENDOR_SMK, 0x0353),
+ .driver_info = MCE_GEN2_NO_TX },
++ /* SMK Manufacturing, Inc. Receiver */
++ { USB_DEVICE(VENDOR_SMK, 0x0357),
++ .driver_info = MCE_GEN2_NO_TX },
+ /* Tatung eHome Infrared Transceiver */
+ { USB_DEVICE(VENDOR_TATUNG, 0x9150) },
+ /* Shuttle eHome Infrared Transceiver */
+@@ -358,6 +362,8 @@
+ { USB_DEVICE(VENDOR_FORMOSA, 0xe015) },
+ /* Formosa21 / eHome Infrared Receiver */
+ { USB_DEVICE(VENDOR_FORMOSA, 0xe016) },
++ /* Formosa21 / eHome Infrared Receiver */
++ { USB_DEVICE(VENDOR_FORMOSA, 0xe042) },
+ /* Formosa aim / Trust MCE Infrared Receiver */
+ { USB_DEVICE(VENDOR_FORMOSA, 0xe017),
+ .driver_info = MCE_GEN2_NO_TX },
+@@ -409,6 +415,8 @@
+ /* Hauppauge WINTV-HVR-HVR 930C-HD - based on cx231xx */
+ { USB_DEVICE(VENDOR_HAUPPAUGE, 0xb130),
+ .driver_info = HAUPPAUGE_CX_HYBRID_TV },
++ /* Adaptec / HP eHome Receiver */
++ { USB_DEVICE(VENDOR_ADAPTEC, 0x0094) },
+ /* Terminating entry */
+ { }
+ };
+@@ -753,11 +761,18 @@
+ }
+
+ /* outbound data */
+- pipe = usb_sndintpipe(ir->usbdev,
+- ir->usb_ep_out->bEndpointAddress);
+- usb_fill_int_urb(async_urb, ir->usbdev, pipe,
+- async_buf, size, mce_async_callback,
+- ir, ir->usb_ep_out->bInterval);
++ if (usb_endpoint_xfer_int(ir->usb_ep_out)) {
++ pipe = usb_sndintpipe(ir->usbdev,
++ ir->usb_ep_out->bEndpointAddress);
++ usb_fill_int_urb(async_urb, ir->usbdev, pipe, async_buf,
++ size, mce_async_callback, ir,
++ ir->usb_ep_out->bInterval);
++ } else {
++ pipe = usb_sndbulkpipe(ir->usbdev,
++ ir->usb_ep_out->bEndpointAddress);
++ usb_fill_bulk_urb(async_urb, ir->usbdev, pipe, async_buf,
++ size, mce_async_callback, ir);
++ }
+ memcpy(async_buf, data, size);
+
+ } else if (urb_type == MCEUSB_RX) {
+@@ -1275,34 +1290,26 @@
+ for (i = 0; i < idesc->desc.bNumEndpoints; ++i) {
+ ep = &idesc->endpoint[i].desc;
+
+- if ((ep_in == NULL)
+- && ((ep->bEndpointAddress & USB_ENDPOINT_DIR_MASK)
+- == USB_DIR_IN)
+- && (((ep->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
+- == USB_ENDPOINT_XFER_BULK)
+- || ((ep->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
+- == USB_ENDPOINT_XFER_INT))) {
+-
+- ep_in = ep;
+- ep_in->bmAttributes = USB_ENDPOINT_XFER_INT;
+- ep_in->bInterval = 1;
+- mce_dbg(&intf->dev, "acceptable inbound endpoint "
+- "found\n");
++ if (ep_in == NULL) {
++ if (usb_endpoint_is_bulk_in(ep)) {
++ ep_in = ep;
++ mce_dbg(&intf->dev, "acceptable bulk inbound endpoint found\n");
++ } else if (usb_endpoint_is_int_in(ep)) {
++ ep_in = ep;
++ ep_in->bInterval = 1;
++ mce_dbg(&intf->dev, "acceptable interrupt inbound endpoint found\n");
++ }
+ }
+
+- if ((ep_out == NULL)
+- && ((ep->bEndpointAddress & USB_ENDPOINT_DIR_MASK)
+- == USB_DIR_OUT)
+- && (((ep->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
+- == USB_ENDPOINT_XFER_BULK)
+- || ((ep->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
+- == USB_ENDPOINT_XFER_INT))) {
+-
+- ep_out = ep;
+- ep_out->bmAttributes = USB_ENDPOINT_XFER_INT;
+- ep_out->bInterval = 1;
+- mce_dbg(&intf->dev, "acceptable outbound endpoint "
+- "found\n");
++ if (ep_out == NULL) {
++ if (usb_endpoint_is_bulk_out(ep)) {
++ ep_out = ep;
++ mce_dbg(&intf->dev, "acceptable bulk outbound endpoint found\n");
++ } else if (usb_endpoint_is_int_out(ep)) {
++ ep_out = ep;
++ ep_out->bInterval = 1;
++ mce_dbg(&intf->dev, "acceptable interrupt outbound endpoint found\n");
++ }
+ }
+ }
+ if (ep_in == NULL) {
+@@ -1310,7 +1317,11 @@
+ return -ENODEV;
+ }
+
+- pipe = usb_rcvintpipe(dev, ep_in->bEndpointAddress);
++ if (usb_endpoint_xfer_int(ep_in)) {
++ pipe = usb_rcvintpipe(dev, ep_in->bEndpointAddress);
++ } else {
++ pipe = usb_rcvbulkpipe(dev, ep_in->bEndpointAddress);
++ }
+ maxp = usb_maxpacket(dev, pipe, usb_pipeout(pipe));
+
+ ir = kzalloc(sizeof(struct mceusb_dev), GFP_KERNEL);
+diff -Nur linux-3.14.36/drivers/media/rc/mceusb.c.orig linux-openelec/drivers/media/rc/mceusb.c.orig
+--- linux-3.14.36/drivers/media/rc/mceusb.c.orig 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/media/rc/mceusb.c.orig 2015-07-24 18:03:29.988842002 -0500
+@@ -0,0 +1,1467 @@
++/*
++ * Driver for USB Windows Media Center Ed. eHome Infrared Transceivers
++ *
++ * Copyright (c) 2010-2011, Jarod Wilson <jarod@redhat.com>
++ *
++ * Based on the original lirc_mceusb and lirc_mceusb2 drivers, by Dan
++ * Conti, Martin Blatter and Daniel Melander, the latter of which was
++ * in turn also based on the lirc_atiusb driver by Paul Miller. The
++ * two mce drivers were merged into one by Jarod Wilson, with transmit
++ * support for the 1st-gen device added primarily by Patrick Calhoun,
++ * with a bit of tweaks by Jarod. Debugging improvements and proper
++ * support for what appears to be 3rd-gen hardware added by Jarod.
++ * Initial port from lirc driver to ir-core drivery by Jarod, based
++ * partially on a port to an earlier proposed IR infrastructure by
++ * Jon Smirl, which included enhancements and simplifications to the
++ * incoming IR buffer parsing routines.
++ *
++ * Updated in July of 2011 with the aid of Microsoft's official
++ * remote/transceiver requirements and specification document, found at
++ * download.microsoft.com, title
++ * Windows-Media-Center-RC-IR-Collection-Green-Button-Specification-03-08-2011-V2.pdf
++ *
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ *
++ */
++
++#include <linux/device.h>
++#include <linux/module.h>
++#include <linux/slab.h>
++#include <linux/usb.h>
++#include <linux/usb/input.h>
++#include <linux/pm_wakeup.h>
++#include <media/rc-core.h>
++
++#define DRIVER_VERSION "1.92"
++#define DRIVER_AUTHOR "Jarod Wilson <jarod@redhat.com>"
++#define DRIVER_DESC "Windows Media Center Ed. eHome Infrared Transceiver " \
++ "device driver"
++#define DRIVER_NAME "mceusb"
++
++#define USB_BUFLEN 32 /* USB reception buffer length */
++#define USB_CTRL_MSG_SZ 2 /* Size of usb ctrl msg on gen1 hw */
++#define MCE_G1_INIT_MSGS 40 /* Init messages on gen1 hw to throw out */
++
++/* MCE constants */
++#define MCE_CMDBUF_SIZE 384 /* MCE Command buffer length */
++#define MCE_TIME_UNIT 50 /* Approx 50us resolution */
++#define MCE_CODE_LENGTH 5 /* Normal length of packet (with header) */
++#define MCE_PACKET_SIZE 4 /* Normal length of packet (without header) */
++#define MCE_IRDATA_HEADER 0x84 /* Actual header format is 0x80 + num_bytes */
++#define MCE_IRDATA_TRAILER 0x80 /* End of IR data */
++#define MCE_MAX_CHANNELS 2 /* Two transmitters, hardware dependent? */
++#define MCE_DEFAULT_TX_MASK 0x03 /* Vals: TX1=0x01, TX2=0x02, ALL=0x03 */
++#define MCE_PULSE_BIT 0x80 /* Pulse bit, MSB set == PULSE else SPACE */
++#define MCE_PULSE_MASK 0x7f /* Pulse mask */
++#define MCE_MAX_PULSE_LENGTH 0x7f /* Longest transmittable pulse symbol */
++
++/*
++ * The interface between the host and the IR hardware is command-response
++ * based. All commands and responses have a consistent format, where a lead
++ * byte always identifies the type of data following it. The lead byte has
++ * a port value in the 3 highest bits and a length value in the 5 lowest
++ * bits.
++ *
++ * The length field is overloaded, with a value of 11111 indicating that the
++ * following byte is a command or response code, and the length of the entire
++ * message is determined by the code. If the length field is not 11111, then
++ * it specifies the number of bytes of port data that follow.
++ */
++#define MCE_CMD 0x1f
++#define MCE_PORT_IR 0x4 /* (0x4 << 5) | MCE_CMD = 0x9f */
++#define MCE_PORT_SYS 0x7 /* (0x7 << 5) | MCE_CMD = 0xff */
++#define MCE_PORT_SER 0x6 /* 0xc0 thru 0xdf flush & 0x1f bytes */
++#define MCE_PORT_MASK 0xe0 /* Mask out command bits */
++
++/* Command port headers */
++#define MCE_CMD_PORT_IR 0x9f /* IR-related cmd/rsp */
++#define MCE_CMD_PORT_SYS 0xff /* System (non-IR) device cmd/rsp */
++
++/* Commands that set device state (2-4 bytes in length) */
++#define MCE_CMD_RESET 0xfe /* Reset device, 2 bytes */
++#define MCE_CMD_RESUME 0xaa /* Resume device after error, 2 bytes */
++#define MCE_CMD_SETIRCFS 0x06 /* Set tx carrier, 4 bytes */
++#define MCE_CMD_SETIRTIMEOUT 0x0c /* Set timeout, 4 bytes */
++#define MCE_CMD_SETIRTXPORTS 0x08 /* Set tx ports, 3 bytes */
++#define MCE_CMD_SETIRRXPORTEN 0x14 /* Set rx ports, 3 bytes */
++#define MCE_CMD_FLASHLED 0x23 /* Flash receiver LED, 2 bytes */
++
++/* Commands that query device state (all 2 bytes, unless noted) */
++#define MCE_CMD_GETIRCFS 0x07 /* Get carrier */
++#define MCE_CMD_GETIRTIMEOUT 0x0d /* Get timeout */
++#define MCE_CMD_GETIRTXPORTS 0x13 /* Get tx ports */
++#define MCE_CMD_GETIRRXPORTEN 0x15 /* Get rx ports */
++#define MCE_CMD_GETPORTSTATUS 0x11 /* Get tx port status, 3 bytes */
++#define MCE_CMD_GETIRNUMPORTS 0x16 /* Get number of ports */
++#define MCE_CMD_GETWAKESOURCE 0x17 /* Get wake source */
++#define MCE_CMD_GETEMVER 0x22 /* Get emulator interface version */
++#define MCE_CMD_GETDEVDETAILS 0x21 /* Get device details (em ver2 only) */
++#define MCE_CMD_GETWAKESUPPORT 0x20 /* Get wake details (em ver2 only) */
++#define MCE_CMD_GETWAKEVERSION 0x18 /* Get wake pattern (em ver2 only) */
++
++/* Misc commands */
++#define MCE_CMD_NOP 0xff /* No operation */
++
++/* Responses to commands (non-error cases) */
++#define MCE_RSP_EQIRCFS 0x06 /* tx carrier, 4 bytes */
++#define MCE_RSP_EQIRTIMEOUT 0x0c /* rx timeout, 4 bytes */
++#define MCE_RSP_GETWAKESOURCE 0x17 /* wake source, 3 bytes */
++#define MCE_RSP_EQIRTXPORTS 0x08 /* tx port mask, 3 bytes */
++#define MCE_RSP_EQIRRXPORTEN 0x14 /* rx port mask, 3 bytes */
++#define MCE_RSP_GETPORTSTATUS 0x11 /* tx port status, 7 bytes */
++#define MCE_RSP_EQIRRXCFCNT 0x15 /* rx carrier count, 4 bytes */
++#define MCE_RSP_EQIRNUMPORTS 0x16 /* number of ports, 4 bytes */
++#define MCE_RSP_EQWAKESUPPORT 0x20 /* wake capabilities, 3 bytes */
++#define MCE_RSP_EQWAKEVERSION 0x18 /* wake pattern details, 6 bytes */
++#define MCE_RSP_EQDEVDETAILS 0x21 /* device capabilities, 3 bytes */
++#define MCE_RSP_EQEMVER 0x22 /* emulator interface ver, 3 bytes */
++#define MCE_RSP_FLASHLED 0x23 /* success flashing LED, 2 bytes */
++
++/* Responses to error cases, must send MCE_CMD_RESUME to clear them */
++#define MCE_RSP_CMD_ILLEGAL 0xfe /* illegal command for port, 2 bytes */
++#define MCE_RSP_TX_TIMEOUT 0x81 /* tx timed out, 2 bytes */
++
++/* Misc commands/responses not defined in the MCE remote/transceiver spec */
++#define MCE_CMD_SIG_END 0x01 /* End of signal */
++#define MCE_CMD_PING 0x03 /* Ping device */
++#define MCE_CMD_UNKNOWN 0x04 /* Unknown */
++#define MCE_CMD_UNKNOWN2 0x05 /* Unknown */
++#define MCE_CMD_UNKNOWN3 0x09 /* Unknown */
++#define MCE_CMD_UNKNOWN4 0x0a /* Unknown */
++#define MCE_CMD_G_REVISION 0x0b /* Get hw/sw revision */
++#define MCE_CMD_UNKNOWN5 0x0e /* Unknown */
++#define MCE_CMD_UNKNOWN6 0x0f /* Unknown */
++#define MCE_CMD_UNKNOWN8 0x19 /* Unknown */
++#define MCE_CMD_UNKNOWN9 0x1b /* Unknown */
++#define MCE_CMD_NULL 0x00 /* These show up various places... */
++
++/* if buf[i] & MCE_PORT_MASK == 0x80 and buf[i] != MCE_CMD_PORT_IR,
++ * then we're looking at a raw IR data sample */
++#define MCE_COMMAND_IRDATA 0x80
++#define MCE_PACKET_LENGTH_MASK 0x1f /* Packet length mask */
++
++/* module parameters */
++#ifdef CONFIG_USB_DEBUG
++static bool debug = 1;
++#else
++static bool debug;
++#endif
++
++#define mce_dbg(dev, fmt, ...) \
++ do { \
++ if (debug) \
++ dev_info(dev, fmt, ## __VA_ARGS__); \
++ } while (0)
++
++/* general constants */
++#define SEND_FLAG_IN_PROGRESS 1
++#define SEND_FLAG_COMPLETE 2
++#define RECV_FLAG_IN_PROGRESS 3
++#define RECV_FLAG_COMPLETE 4
++
++#define MCEUSB_RX 1
++#define MCEUSB_TX 2
++
++#define VENDOR_PHILIPS 0x0471
++#define VENDOR_SMK 0x0609
++#define VENDOR_TATUNG 0x1460
++#define VENDOR_GATEWAY 0x107b
++#define VENDOR_SHUTTLE 0x1308
++#define VENDOR_SHUTTLE2 0x051c
++#define VENDOR_MITSUMI 0x03ee
++#define VENDOR_TOPSEED 0x1784
++#define VENDOR_RICAVISION 0x179d
++#define VENDOR_ITRON 0x195d
++#define VENDOR_FIC 0x1509
++#define VENDOR_LG 0x043e
++#define VENDOR_MICROSOFT 0x045e
++#define VENDOR_FORMOSA 0x147a
++#define VENDOR_FINTEK 0x1934
++#define VENDOR_PINNACLE 0x2304
++#define VENDOR_ECS 0x1019
++#define VENDOR_WISTRON 0x0fb8
++#define VENDOR_COMPRO 0x185b
++#define VENDOR_NORTHSTAR 0x04eb
++#define VENDOR_REALTEK 0x0bda
++#define VENDOR_TIVO 0x105a
++#define VENDOR_CONEXANT 0x0572
++#define VENDOR_TWISTEDMELON 0x2596
++#define VENDOR_HAUPPAUGE 0x2040
++#define VENDOR_ADAPTEC 0x03f3
++
++enum mceusb_model_type {
++ MCE_GEN2 = 0, /* Most boards */
++ MCE_GEN1,
++ MCE_GEN3,
++ MCE_GEN2_TX_INV,
++ POLARIS_EVK,
++ CX_HYBRID_TV,
++ MULTIFUNCTION,
++ TIVO_KIT,
++ MCE_GEN2_NO_TX,
++ HAUPPAUGE_CX_HYBRID_TV,
++};
++
++struct mceusb_model {
++ u32 mce_gen1:1;
++ u32 mce_gen2:1;
++ u32 mce_gen3:1;
++ u32 tx_mask_normal:1;
++ u32 no_tx:1;
++
++ int ir_intfnum;
++
++ const char *rc_map; /* Allow specify a per-board map */
++ const char *name; /* per-board name */
++};
++
++static const struct mceusb_model mceusb_model[] = {
++ [MCE_GEN1] = {
++ .mce_gen1 = 1,
++ .tx_mask_normal = 1,
++ },
++ [MCE_GEN2] = {
++ .mce_gen2 = 1,
++ },
++ [MCE_GEN2_NO_TX] = {
++ .mce_gen2 = 1,
++ .no_tx = 1,
++ },
++ [MCE_GEN2_TX_INV] = {
++ .mce_gen2 = 1,
++ .tx_mask_normal = 1,
++ },
++ [MCE_GEN3] = {
++ .mce_gen3 = 1,
++ .tx_mask_normal = 1,
++ },
++ [POLARIS_EVK] = {
++ /*
++ * In fact, the EVK is shipped without
++ * remotes, but we should have something handy,
++ * to allow testing it
++ */
++ .rc_map = RC_MAP_HAUPPAUGE,
++ .name = "Conexant Hybrid TV (cx231xx) MCE IR",
++ },
++ [CX_HYBRID_TV] = {
++ .no_tx = 1, /* tx isn't wired up at all */
++ .name = "Conexant Hybrid TV (cx231xx) MCE IR",
++ },
++ [HAUPPAUGE_CX_HYBRID_TV] = {
++ .rc_map = RC_MAP_HAUPPAUGE,
++ .no_tx = 1, /* eeprom says it has no tx */
++ .name = "Conexant Hybrid TV (cx231xx) MCE IR no TX",
++ },
++ [MULTIFUNCTION] = {
++ .mce_gen2 = 1,
++ .ir_intfnum = 2,
++ },
++ [TIVO_KIT] = {
++ .mce_gen2 = 1,
++ .rc_map = RC_MAP_TIVO,
++ },
++};
++
++static struct usb_device_id mceusb_dev_table[] = {
++ /* Original Microsoft MCE IR Transceiver (often HP-branded) */
++ { USB_DEVICE(VENDOR_MICROSOFT, 0x006d),
++ .driver_info = MCE_GEN1 },
++ /* Philips Infrared Transceiver - Sahara branded */
++ { USB_DEVICE(VENDOR_PHILIPS, 0x0608) },
++ /* Philips Infrared Transceiver - HP branded */
++ { USB_DEVICE(VENDOR_PHILIPS, 0x060c),
++ .driver_info = MCE_GEN2_TX_INV },
++ /* Philips SRM5100 */
++ { USB_DEVICE(VENDOR_PHILIPS, 0x060d) },
++ /* Philips Infrared Transceiver - Omaura */
++ { USB_DEVICE(VENDOR_PHILIPS, 0x060f) },
++ /* Philips Infrared Transceiver - Spinel plus */
++ { USB_DEVICE(VENDOR_PHILIPS, 0x0613) },
++ /* Philips eHome Infrared Transceiver */
++ { USB_DEVICE(VENDOR_PHILIPS, 0x0815) },
++ /* Philips/Spinel plus IR transceiver for ASUS */
++ { USB_DEVICE(VENDOR_PHILIPS, 0x206c) },
++ /* Philips/Spinel plus IR transceiver for ASUS */
++ { USB_DEVICE(VENDOR_PHILIPS, 0x2088) },
++ /* Philips IR transceiver (Dell branded) */
++ { USB_DEVICE(VENDOR_PHILIPS, 0x2093),
++ .driver_info = MCE_GEN2_TX_INV },
++ /* Realtek MCE IR Receiver and card reader */
++ { USB_DEVICE(VENDOR_REALTEK, 0x0161),
++ .driver_info = MULTIFUNCTION },
++ /* SMK/Toshiba G83C0004D410 */
++ { USB_DEVICE(VENDOR_SMK, 0x031d),
++ .driver_info = MCE_GEN2_TX_INV },
++ /* SMK eHome Infrared Transceiver (Sony VAIO) */
++ { USB_DEVICE(VENDOR_SMK, 0x0322),
++ .driver_info = MCE_GEN2_TX_INV },
++ /* bundled with Hauppauge PVR-150 */
++ { USB_DEVICE(VENDOR_SMK, 0x0334),
++ .driver_info = MCE_GEN2_TX_INV },
++ /* SMK eHome Infrared Transceiver */
++ { USB_DEVICE(VENDOR_SMK, 0x0338) },
++ /* SMK/I-O Data GV-MC7/RCKIT Receiver */
++ { USB_DEVICE(VENDOR_SMK, 0x0353),
++ .driver_info = MCE_GEN2_NO_TX },
++ /* Tatung eHome Infrared Transceiver */
++ { USB_DEVICE(VENDOR_TATUNG, 0x9150) },
++ /* Shuttle eHome Infrared Transceiver */
++ { USB_DEVICE(VENDOR_SHUTTLE, 0xc001) },
++ /* Shuttle eHome Infrared Transceiver */
++ { USB_DEVICE(VENDOR_SHUTTLE2, 0xc001) },
++ /* Gateway eHome Infrared Transceiver */
++ { USB_DEVICE(VENDOR_GATEWAY, 0x3009) },
++ /* Mitsumi */
++ { USB_DEVICE(VENDOR_MITSUMI, 0x2501) },
++ /* Topseed eHome Infrared Transceiver */
++ { USB_DEVICE(VENDOR_TOPSEED, 0x0001),
++ .driver_info = MCE_GEN2_TX_INV },
++ /* Topseed HP eHome Infrared Transceiver */
++ { USB_DEVICE(VENDOR_TOPSEED, 0x0006),
++ .driver_info = MCE_GEN2_TX_INV },
++ /* Topseed eHome Infrared Transceiver */
++ { USB_DEVICE(VENDOR_TOPSEED, 0x0007),
++ .driver_info = MCE_GEN2_TX_INV },
++ /* Topseed eHome Infrared Transceiver */
++ { USB_DEVICE(VENDOR_TOPSEED, 0x0008),
++ .driver_info = MCE_GEN3 },
++ /* Topseed eHome Infrared Transceiver */
++ { USB_DEVICE(VENDOR_TOPSEED, 0x000a),
++ .driver_info = MCE_GEN2_TX_INV },
++ /* Topseed eHome Infrared Transceiver */
++ { USB_DEVICE(VENDOR_TOPSEED, 0x0011),
++ .driver_info = MCE_GEN3 },
++ /* Ricavision internal Infrared Transceiver */
++ { USB_DEVICE(VENDOR_RICAVISION, 0x0010) },
++ /* Itron ione Libra Q-11 */
++ { USB_DEVICE(VENDOR_ITRON, 0x7002) },
++ /* FIC eHome Infrared Transceiver */
++ { USB_DEVICE(VENDOR_FIC, 0x9242) },
++ /* LG eHome Infrared Transceiver */
++ { USB_DEVICE(VENDOR_LG, 0x9803) },
++ /* Microsoft MCE Infrared Transceiver */
++ { USB_DEVICE(VENDOR_MICROSOFT, 0x00a0) },
++ /* Formosa eHome Infrared Transceiver */
++ { USB_DEVICE(VENDOR_FORMOSA, 0xe015) },
++ /* Formosa21 / eHome Infrared Receiver */
++ { USB_DEVICE(VENDOR_FORMOSA, 0xe016) },
++ /* Formosa21 / eHome Infrared Receiver */
++ { USB_DEVICE(VENDOR_FORMOSA, 0xe042) },
++ /* Formosa aim / Trust MCE Infrared Receiver */
++ { USB_DEVICE(VENDOR_FORMOSA, 0xe017),
++ .driver_info = MCE_GEN2_NO_TX },
++ /* Formosa Industrial Computing / Beanbag Emulation Device */
++ { USB_DEVICE(VENDOR_FORMOSA, 0xe018) },
++ /* Formosa21 / eHome Infrared Receiver */
++ { USB_DEVICE(VENDOR_FORMOSA, 0xe03a) },
++ /* Formosa Industrial Computing AIM IR605/A */
++ { USB_DEVICE(VENDOR_FORMOSA, 0xe03c) },
++ /* Formosa Industrial Computing */
++ { USB_DEVICE(VENDOR_FORMOSA, 0xe03e) },
++ /* Formosa Industrial Computing */
++ { USB_DEVICE(VENDOR_FORMOSA, 0xe042) },
++ /* Fintek eHome Infrared Transceiver (HP branded) */
++ { USB_DEVICE(VENDOR_FINTEK, 0x5168),
++ .driver_info = MCE_GEN2_TX_INV },
++ /* Fintek eHome Infrared Transceiver */
++ { USB_DEVICE(VENDOR_FINTEK, 0x0602) },
++ /* Fintek eHome Infrared Transceiver (in the AOpen MP45) */
++ { USB_DEVICE(VENDOR_FINTEK, 0x0702) },
++ /* Pinnacle Remote Kit */
++ { USB_DEVICE(VENDOR_PINNACLE, 0x0225),
++ .driver_info = MCE_GEN3 },
++ /* Elitegroup Computer Systems IR */
++ { USB_DEVICE(VENDOR_ECS, 0x0f38) },
++ /* Wistron Corp. eHome Infrared Receiver */
++ { USB_DEVICE(VENDOR_WISTRON, 0x0002) },
++ /* Compro K100 */
++ { USB_DEVICE(VENDOR_COMPRO, 0x3020) },
++ /* Compro K100 v2 */
++ { USB_DEVICE(VENDOR_COMPRO, 0x3082) },
++ /* Northstar Systems, Inc. eHome Infrared Transceiver */
++ { USB_DEVICE(VENDOR_NORTHSTAR, 0xe004) },
++ /* TiVo PC IR Receiver */
++ { USB_DEVICE(VENDOR_TIVO, 0x2000),
++ .driver_info = TIVO_KIT },
++ /* Conexant Hybrid TV "Shelby" Polaris SDK */
++ { USB_DEVICE(VENDOR_CONEXANT, 0x58a1),
++ .driver_info = POLARIS_EVK },
++ /* Conexant Hybrid TV RDU253S Polaris */
++ { USB_DEVICE(VENDOR_CONEXANT, 0x58a5),
++ .driver_info = CX_HYBRID_TV },
++ /* Twisted Melon Inc. - Manta Mini Receiver */
++ { USB_DEVICE(VENDOR_TWISTEDMELON, 0x8008) },
++ /* Twisted Melon Inc. - Manta Pico Receiver */
++ { USB_DEVICE(VENDOR_TWISTEDMELON, 0x8016) },
++ /* Twisted Melon Inc. - Manta Transceiver */
++ { USB_DEVICE(VENDOR_TWISTEDMELON, 0x8042) },
++ /* Hauppauge WINTV-HVR-HVR 930C-HD - based on cx231xx */
++ { USB_DEVICE(VENDOR_HAUPPAUGE, 0xb130),
++ .driver_info = HAUPPAUGE_CX_HYBRID_TV },
++ /* Adaptec / HP eHome Receiver */
++ { USB_DEVICE(VENDOR_ADAPTEC, 0x0094) },
++ /* Terminating entry */
++ { }
++};
++
++/* data structure for each usb transceiver */
++struct mceusb_dev {
++ /* ir-core bits */
++ struct rc_dev *rc;
++
++ /* optional features we can enable */
++ bool carrier_report_enabled;
++ bool learning_enabled;
++
++ /* core device bits */
++ struct device *dev;
++
++ /* usb */
++ struct usb_device *usbdev;
++ struct urb *urb_in;
++ struct usb_endpoint_descriptor *usb_ep_out;
++
++ /* buffers and dma */
++ unsigned char *buf_in;
++ unsigned int len_in;
++ dma_addr_t dma_in;
++
++ enum {
++ CMD_HEADER = 0,
++ SUBCMD,
++ CMD_DATA,
++ PARSE_IRDATA,
++ } parser_state;
++
++ u8 cmd, rem; /* Remaining IR data bytes in packet */
++
++ struct {
++ u32 connected:1;
++ u32 tx_mask_normal:1;
++ u32 microsoft_gen1:1;
++ u32 no_tx:1;
++ } flags;
++
++ /* transmit support */
++ int send_flags;
++ u32 carrier;
++ unsigned char tx_mask;
++
++ char name[128];
++ char phys[64];
++ enum mceusb_model_type model;
++
++ bool need_reset; /* flag to issue a device resume cmd */
++ u8 emver; /* emulator interface version */
++ u8 num_txports; /* number of transmit ports */
++ u8 num_rxports; /* number of receive sensors */
++ u8 txports_cabled; /* bitmask of transmitters with cable */
++ u8 rxports_active; /* bitmask of active receive sensors */
++};
++
++/* MCE Device Command Strings, generally a port and command pair */
++static char DEVICE_RESUME[] = {MCE_CMD_NULL, MCE_CMD_PORT_SYS,
++ MCE_CMD_RESUME};
++static char GET_REVISION[] = {MCE_CMD_PORT_SYS, MCE_CMD_G_REVISION};
++static char GET_EMVER[] = {MCE_CMD_PORT_SYS, MCE_CMD_GETEMVER};
++static char GET_WAKEVERSION[] = {MCE_CMD_PORT_SYS, MCE_CMD_GETWAKEVERSION};
++static char FLASH_LED[] = {MCE_CMD_PORT_SYS, MCE_CMD_FLASHLED};
++static char GET_UNKNOWN2[] = {MCE_CMD_PORT_IR, MCE_CMD_UNKNOWN2};
++static char GET_CARRIER_FREQ[] = {MCE_CMD_PORT_IR, MCE_CMD_GETIRCFS};
++static char GET_RX_TIMEOUT[] = {MCE_CMD_PORT_IR, MCE_CMD_GETIRTIMEOUT};
++static char GET_NUM_PORTS[] = {MCE_CMD_PORT_IR, MCE_CMD_GETIRNUMPORTS};
++static char GET_TX_BITMASK[] = {MCE_CMD_PORT_IR, MCE_CMD_GETIRTXPORTS};
++static char GET_RX_SENSOR[] = {MCE_CMD_PORT_IR, MCE_CMD_GETIRRXPORTEN};
++/* sub in desired values in lower byte or bytes for full command */
++/* FIXME: make use of these for transmit.
++static char SET_CARRIER_FREQ[] = {MCE_CMD_PORT_IR,
++ MCE_CMD_SETIRCFS, 0x00, 0x00};
++static char SET_TX_BITMASK[] = {MCE_CMD_PORT_IR, MCE_CMD_SETIRTXPORTS, 0x00};
++static char SET_RX_TIMEOUT[] = {MCE_CMD_PORT_IR,
++ MCE_CMD_SETIRTIMEOUT, 0x00, 0x00};
++static char SET_RX_SENSOR[] = {MCE_CMD_PORT_IR,
++ MCE_RSP_EQIRRXPORTEN, 0x00};
++*/
++
++static int mceusb_cmd_datasize(u8 cmd, u8 subcmd)
++{
++ int datasize = 0;
++
++ switch (cmd) {
++ case MCE_CMD_NULL:
++ if (subcmd == MCE_CMD_PORT_SYS)
++ datasize = 1;
++ break;
++ case MCE_CMD_PORT_SYS:
++ switch (subcmd) {
++ case MCE_RSP_GETPORTSTATUS:
++ datasize = 5;
++ break;
++ case MCE_RSP_EQWAKEVERSION:
++ datasize = 4;
++ break;
++ case MCE_CMD_G_REVISION:
++ datasize = 2;
++ break;
++ case MCE_RSP_EQWAKESUPPORT:
++ case MCE_RSP_GETWAKESOURCE:
++ case MCE_RSP_EQDEVDETAILS:
++ case MCE_RSP_EQEMVER:
++ datasize = 1;
++ break;
++ }
++ case MCE_CMD_PORT_IR:
++ switch (subcmd) {
++ case MCE_CMD_UNKNOWN:
++ case MCE_RSP_EQIRCFS:
++ case MCE_RSP_EQIRTIMEOUT:
++ case MCE_RSP_EQIRRXCFCNT:
++ case MCE_RSP_EQIRNUMPORTS:
++ datasize = 2;
++ break;
++ case MCE_CMD_SIG_END:
++ case MCE_RSP_EQIRTXPORTS:
++ case MCE_RSP_EQIRRXPORTEN:
++ datasize = 1;
++ break;
++ }
++ }
++ return datasize;
++}
++
++static void mceusb_dev_printdata(struct mceusb_dev *ir, char *buf,
++ int offset, int len, bool out)
++{
++ char codes[USB_BUFLEN * 3 + 1];
++ char inout[9];
++ u8 cmd, subcmd, data1, data2, data3, data4;
++ struct device *dev = ir->dev;
++ int i, start, skip = 0;
++ u32 carrier, period;
++
++ if (!debug)
++ return;
++
++ /* skip meaningless 0xb1 0x60 header bytes on orig receiver */
++ if (ir->flags.microsoft_gen1 && !out && !offset)
++ skip = 2;
++
++ if (len <= skip)
++ return;
++
++ for (i = 0; i < len && i < USB_BUFLEN; i++)
++ snprintf(codes + i * 3, 4, "%02x ", buf[i + offset] & 0xff);
++
++ dev_info(dev, "%sx data: %s(length=%d)\n",
++ (out ? "t" : "r"), codes, len);
++
++ if (out)
++ strcpy(inout, "Request\0");
++ else
++ strcpy(inout, "Got\0");
++
++ start = offset + skip;
++ cmd = buf[start] & 0xff;
++ subcmd = buf[start + 1] & 0xff;
++ data1 = buf[start + 2] & 0xff;
++ data2 = buf[start + 3] & 0xff;
++ data3 = buf[start + 4] & 0xff;
++ data4 = buf[start + 5] & 0xff;
++
++ switch (cmd) {
++ case MCE_CMD_NULL:
++ if (subcmd == MCE_CMD_NULL)
++ break;
++ if ((subcmd == MCE_CMD_PORT_SYS) &&
++ (data1 == MCE_CMD_RESUME))
++ dev_info(dev, "Device resume requested\n");
++ else
++ dev_info(dev, "Unknown command 0x%02x 0x%02x\n",
++ cmd, subcmd);
++ break;
++ case MCE_CMD_PORT_SYS:
++ switch (subcmd) {
++ case MCE_RSP_EQEMVER:
++ if (!out)
++ dev_info(dev, "Emulator interface version %x\n",
++ data1);
++ break;
++ case MCE_CMD_G_REVISION:
++ if (len == 2)
++ dev_info(dev, "Get hw/sw rev?\n");
++ else
++ dev_info(dev, "hw/sw rev 0x%02x 0x%02x "
++ "0x%02x 0x%02x\n", data1, data2,
++ buf[start + 4], buf[start + 5]);
++ break;
++ case MCE_CMD_RESUME:
++ dev_info(dev, "Device resume requested\n");
++ break;
++ case MCE_RSP_CMD_ILLEGAL:
++ dev_info(dev, "Illegal PORT_SYS command\n");
++ break;
++ case MCE_RSP_EQWAKEVERSION:
++ if (!out)
++ dev_info(dev, "Wake version, proto: 0x%02x, "
++ "payload: 0x%02x, address: 0x%02x, "
++ "version: 0x%02x\n",
++ data1, data2, data3, data4);
++ break;
++ case MCE_RSP_GETPORTSTATUS:
++ if (!out)
++ /* We use data1 + 1 here, to match hw labels */
++ dev_info(dev, "TX port %d: blaster is%s connected\n",
++ data1 + 1, data4 ? " not" : "");
++ break;
++ case MCE_CMD_FLASHLED:
++ dev_info(dev, "Attempting to flash LED\n");
++ break;
++ default:
++ dev_info(dev, "Unknown command 0x%02x 0x%02x\n",
++ cmd, subcmd);
++ break;
++ }
++ break;
++ case MCE_CMD_PORT_IR:
++ switch (subcmd) {
++ case MCE_CMD_SIG_END:
++ dev_info(dev, "End of signal\n");
++ break;
++ case MCE_CMD_PING:
++ dev_info(dev, "Ping\n");
++ break;
++ case MCE_CMD_UNKNOWN:
++ dev_info(dev, "Resp to 9f 05 of 0x%02x 0x%02x\n",
++ data1, data2);
++ break;
++ case MCE_RSP_EQIRCFS:
++ period = DIV_ROUND_CLOSEST(
++ (1U << data1 * 2) * (data2 + 1), 10);
++ if (!period)
++ break;
++ carrier = (1000 * 1000) / period;
++ dev_info(dev, "%s carrier of %u Hz (period %uus)\n",
++ inout, carrier, period);
++ break;
++ case MCE_CMD_GETIRCFS:
++ dev_info(dev, "Get carrier mode and freq\n");
++ break;
++ case MCE_RSP_EQIRTXPORTS:
++ dev_info(dev, "%s transmit blaster mask of 0x%02x\n",
++ inout, data1);
++ break;
++ case MCE_RSP_EQIRTIMEOUT:
++ /* value is in units of 50us, so x*50/1000 ms */
++ period = ((data1 << 8) | data2) * MCE_TIME_UNIT / 1000;
++ dev_info(dev, "%s receive timeout of %d ms\n",
++ inout, period);
++ break;
++ case MCE_CMD_GETIRTIMEOUT:
++ dev_info(dev, "Get receive timeout\n");
++ break;
++ case MCE_CMD_GETIRTXPORTS:
++ dev_info(dev, "Get transmit blaster mask\n");
++ break;
++ case MCE_RSP_EQIRRXPORTEN:
++ dev_info(dev, "%s %s-range receive sensor in use\n",
++ inout, data1 == 0x02 ? "short" : "long");
++ break;
++ case MCE_CMD_GETIRRXPORTEN:
++ /* aka MCE_RSP_EQIRRXCFCNT */
++ if (out)
++ dev_info(dev, "Get receive sensor\n");
++ else if (ir->learning_enabled)
++ dev_info(dev, "RX pulse count: %d\n",
++ ((data1 << 8) | data2));
++ break;
++ case MCE_RSP_EQIRNUMPORTS:
++ if (out)
++ break;
++ dev_info(dev, "Num TX ports: %x, num RX ports: %x\n",
++ data1, data2);
++ break;
++ case MCE_RSP_CMD_ILLEGAL:
++ dev_info(dev, "Illegal PORT_IR command\n");
++ break;
++ default:
++ dev_info(dev, "Unknown command 0x%02x 0x%02x\n",
++ cmd, subcmd);
++ break;
++ }
++ break;
++ default:
++ break;
++ }
++
++ if (cmd == MCE_IRDATA_TRAILER)
++ dev_info(dev, "End of raw IR data\n");
++ else if ((cmd != MCE_CMD_PORT_IR) &&
++ ((cmd & MCE_PORT_MASK) == MCE_COMMAND_IRDATA))
++ dev_info(dev, "Raw IR data, %d pulse/space samples\n", ir->rem);
++}
++
++static void mce_async_callback(struct urb *urb)
++{
++ struct mceusb_dev *ir;
++ int len;
++
++ if (!urb)
++ return;
++
++ ir = urb->context;
++ if (ir) {
++ len = urb->actual_length;
++
++ mceusb_dev_printdata(ir, urb->transfer_buffer, 0, len, true);
++ }
++
++ /* the transfer buffer and urb were allocated in mce_request_packet */
++ kfree(urb->transfer_buffer);
++ usb_free_urb(urb);
++}
++
++/* request incoming or send outgoing usb packet - used to initialize remote */
++static void mce_request_packet(struct mceusb_dev *ir, unsigned char *data,
++ int size, int urb_type)
++{
++ int res, pipe;
++ struct urb *async_urb;
++ struct device *dev = ir->dev;
++ unsigned char *async_buf;
++
++ if (urb_type == MCEUSB_TX) {
++ async_urb = usb_alloc_urb(0, GFP_KERNEL);
++ if (unlikely(!async_urb)) {
++ dev_err(dev, "Error, couldn't allocate urb!\n");
++ return;
++ }
++
++ async_buf = kzalloc(size, GFP_KERNEL);
++ if (!async_buf) {
++ dev_err(dev, "Error, couldn't allocate buf!\n");
++ usb_free_urb(async_urb);
++ return;
++ }
++
++ /* outbound data */
++ pipe = usb_sndintpipe(ir->usbdev,
++ ir->usb_ep_out->bEndpointAddress);
++ usb_fill_int_urb(async_urb, ir->usbdev, pipe,
++ async_buf, size, mce_async_callback,
++ ir, ir->usb_ep_out->bInterval);
++ memcpy(async_buf, data, size);
++
++ } else if (urb_type == MCEUSB_RX) {
++ /* standard request */
++ async_urb = ir->urb_in;
++ ir->send_flags = RECV_FLAG_IN_PROGRESS;
++
++ } else {
++ dev_err(dev, "Error! Unknown urb type %d\n", urb_type);
++ return;
++ }
++
++ mce_dbg(dev, "receive request called (size=%#x)\n", size);
++
++ async_urb->transfer_buffer_length = size;
++ async_urb->dev = ir->usbdev;
++
++ res = usb_submit_urb(async_urb, GFP_ATOMIC);
++ if (res) {
++ mce_dbg(dev, "receive request FAILED! (res=%d)\n", res);
++ return;
++ }
++ mce_dbg(dev, "receive request complete (res=%d)\n", res);
++}
++
++static void mce_async_out(struct mceusb_dev *ir, unsigned char *data, int size)
++{
++ int rsize = sizeof(DEVICE_RESUME);
++
++ if (ir->need_reset) {
++ ir->need_reset = false;
++ mce_request_packet(ir, DEVICE_RESUME, rsize, MCEUSB_TX);
++ msleep(10);
++ }
++
++ mce_request_packet(ir, data, size, MCEUSB_TX);
++ msleep(10);
++}
++
++static void mce_flush_rx_buffer(struct mceusb_dev *ir, int size)
++{
++ mce_request_packet(ir, NULL, size, MCEUSB_RX);
++}
++
++/* Send data out the IR blaster port(s) */
++static int mceusb_tx_ir(struct rc_dev *dev, unsigned *txbuf, unsigned count)
++{
++ struct mceusb_dev *ir = dev->priv;
++ int i, length, ret = 0;
++ int cmdcount = 0;
++ unsigned char cmdbuf[MCE_CMDBUF_SIZE];
++
++ /* MCE tx init header */
++ cmdbuf[cmdcount++] = MCE_CMD_PORT_IR;
++ cmdbuf[cmdcount++] = MCE_CMD_SETIRTXPORTS;
++ cmdbuf[cmdcount++] = ir->tx_mask;
++
++ /* Send the set TX ports command */
++ mce_async_out(ir, cmdbuf, cmdcount);
++ cmdcount = 0;
++
++ /* Generate mce packet data */
++ for (i = 0; (i < count) && (cmdcount < MCE_CMDBUF_SIZE); i++) {
++ txbuf[i] = txbuf[i] / MCE_TIME_UNIT;
++
++ do { /* loop to support long pulses/spaces > 127*50us=6.35ms */
++
++ /* Insert mce packet header every 4th entry */
++ if ((cmdcount < MCE_CMDBUF_SIZE) &&
++ (cmdcount % MCE_CODE_LENGTH) == 0)
++ cmdbuf[cmdcount++] = MCE_IRDATA_HEADER;
++
++ /* Insert mce packet data */
++ if (cmdcount < MCE_CMDBUF_SIZE)
++ cmdbuf[cmdcount++] =
++ (txbuf[i] < MCE_PULSE_BIT ?
++ txbuf[i] : MCE_MAX_PULSE_LENGTH) |
++ (i & 1 ? 0x00 : MCE_PULSE_BIT);
++ else {
++ ret = -EINVAL;
++ goto out;
++ }
++
++ } while ((txbuf[i] > MCE_MAX_PULSE_LENGTH) &&
++ (txbuf[i] -= MCE_MAX_PULSE_LENGTH));
++ }
++
++ /* Check if we have room for the empty packet at the end */
++ if (cmdcount >= MCE_CMDBUF_SIZE) {
++ ret = -EINVAL;
++ goto out;
++ }
++
++ /* Fix packet length in last header */
++ length = cmdcount % MCE_CODE_LENGTH;
++ cmdbuf[cmdcount - length] -= MCE_CODE_LENGTH - length;
++
++ /* All mce commands end with an empty packet (0x80) */
++ cmdbuf[cmdcount++] = MCE_IRDATA_TRAILER;
++
++ /* Transmit the command to the mce device */
++ mce_async_out(ir, cmdbuf, cmdcount);
++
++out:
++ return ret ? ret : count;
++}
++
++/* Sets active IR outputs -- mce devices typically have two */
++static int mceusb_set_tx_mask(struct rc_dev *dev, u32 mask)
++{
++ struct mceusb_dev *ir = dev->priv;
++
++ if (ir->flags.tx_mask_normal)
++ ir->tx_mask = mask;
++ else
++ ir->tx_mask = (mask != MCE_DEFAULT_TX_MASK ?
++ mask ^ MCE_DEFAULT_TX_MASK : mask) << 1;
++
++ return 0;
++}
++
++/* Sets the send carrier frequency and mode */
++static int mceusb_set_tx_carrier(struct rc_dev *dev, u32 carrier)
++{
++ struct mceusb_dev *ir = dev->priv;
++ int clk = 10000000;
++ int prescaler = 0, divisor = 0;
++ unsigned char cmdbuf[4] = { MCE_CMD_PORT_IR,
++ MCE_CMD_SETIRCFS, 0x00, 0x00 };
++
++ /* Carrier has changed */
++ if (ir->carrier != carrier) {
++
++ if (carrier == 0) {
++ ir->carrier = carrier;
++ cmdbuf[2] = MCE_CMD_SIG_END;
++ cmdbuf[3] = MCE_IRDATA_TRAILER;
++ mce_dbg(ir->dev, "%s: disabling carrier "
++ "modulation\n", __func__);
++ mce_async_out(ir, cmdbuf, sizeof(cmdbuf));
++ return carrier;
++ }
++
++ for (prescaler = 0; prescaler < 4; ++prescaler) {
++ divisor = (clk >> (2 * prescaler)) / carrier;
++ if (divisor <= 0xff) {
++ ir->carrier = carrier;
++ cmdbuf[2] = prescaler;
++ cmdbuf[3] = divisor;
++ mce_dbg(ir->dev, "%s: requesting %u HZ "
++ "carrier\n", __func__, carrier);
++
++ /* Transmit new carrier to mce device */
++ mce_async_out(ir, cmdbuf, sizeof(cmdbuf));
++ return carrier;
++ }
++ }
++
++ return -EINVAL;
++
++ }
++
++ return carrier;
++}
++
++/*
++ * We don't do anything but print debug spew for many of the command bits
++ * we receive from the hardware, but some of them are useful information
++ * we want to store so that we can use them.
++ */
++static void mceusb_handle_command(struct mceusb_dev *ir, int index)
++{
++ u8 hi = ir->buf_in[index + 1] & 0xff;
++ u8 lo = ir->buf_in[index + 2] & 0xff;
++
++ switch (ir->buf_in[index]) {
++ /* the one and only 5-byte return value command */
++ case MCE_RSP_GETPORTSTATUS:
++ if ((ir->buf_in[index + 4] & 0xff) == 0x00)
++ ir->txports_cabled |= 1 << hi;
++ break;
++
++ /* 2-byte return value commands */
++ case MCE_RSP_EQIRTIMEOUT:
++ ir->rc->timeout = US_TO_NS((hi << 8 | lo) * MCE_TIME_UNIT);
++ break;
++ case MCE_RSP_EQIRNUMPORTS:
++ ir->num_txports = hi;
++ ir->num_rxports = lo;
++ break;
++
++ /* 1-byte return value commands */
++ case MCE_RSP_EQEMVER:
++ ir->emver = hi;
++ break;
++ case MCE_RSP_EQIRTXPORTS:
++ ir->tx_mask = hi;
++ break;
++ case MCE_RSP_EQIRRXPORTEN:
++ ir->learning_enabled = ((hi & 0x02) == 0x02);
++ ir->rxports_active = hi;
++ break;
++ case MCE_RSP_CMD_ILLEGAL:
++ ir->need_reset = true;
++ break;
++ default:
++ break;
++ }
++}
++
++static void mceusb_process_ir_data(struct mceusb_dev *ir, int buf_len)
++{
++ DEFINE_IR_RAW_EVENT(rawir);
++ bool event = false;
++ int i = 0;
++
++ /* skip meaningless 0xb1 0x60 header bytes on orig receiver */
++ if (ir->flags.microsoft_gen1)
++ i = 2;
++
++ /* if there's no data, just return now */
++ if (buf_len <= i)
++ return;
++
++ for (; i < buf_len; i++) {
++ switch (ir->parser_state) {
++ case SUBCMD:
++ ir->rem = mceusb_cmd_datasize(ir->cmd, ir->buf_in[i]);
++ mceusb_dev_printdata(ir, ir->buf_in, i - 1,
++ ir->rem + 2, false);
++ mceusb_handle_command(ir, i);
++ ir->parser_state = CMD_DATA;
++ break;
++ case PARSE_IRDATA:
++ ir->rem--;
++ init_ir_raw_event(&rawir);
++ rawir.pulse = ((ir->buf_in[i] & MCE_PULSE_BIT) != 0);
++ rawir.duration = (ir->buf_in[i] & MCE_PULSE_MASK)
++ * US_TO_NS(MCE_TIME_UNIT);
++
++ mce_dbg(ir->dev, "Storing %s with duration %d\n",
++ rawir.pulse ? "pulse" : "space",
++ rawir.duration);
++
++ if (ir_raw_event_store_with_filter(ir->rc, &rawir))
++ event = true;
++ break;
++ case CMD_DATA:
++ ir->rem--;
++ break;
++ case CMD_HEADER:
++ /* decode mce packets of the form (84),AA,BB,CC,DD */
++ /* IR data packets can span USB messages - rem */
++ ir->cmd = ir->buf_in[i];
++ if ((ir->cmd == MCE_CMD_PORT_IR) ||
++ ((ir->cmd & MCE_PORT_MASK) !=
++ MCE_COMMAND_IRDATA)) {
++ ir->parser_state = SUBCMD;
++ continue;
++ }
++ ir->rem = (ir->cmd & MCE_PACKET_LENGTH_MASK);
++ mceusb_dev_printdata(ir, ir->buf_in,
++ i, ir->rem + 1, false);
++ if (ir->rem)
++ ir->parser_state = PARSE_IRDATA;
++ else
++ ir_raw_event_reset(ir->rc);
++ break;
++ }
++
++ if (ir->parser_state != CMD_HEADER && !ir->rem)
++ ir->parser_state = CMD_HEADER;
++ }
++ if (event) {
++ mce_dbg(ir->dev, "processed IR data, calling ir_raw_event_handle\n");
++ ir_raw_event_handle(ir->rc);
++ }
++}
++
++static void mceusb_dev_recv(struct urb *urb)
++{
++ struct mceusb_dev *ir;
++ int buf_len;
++
++ if (!urb)
++ return;
++
++ ir = urb->context;
++ if (!ir) {
++ usb_unlink_urb(urb);
++ return;
++ }
++
++ buf_len = urb->actual_length;
++
++ if (ir->send_flags == RECV_FLAG_IN_PROGRESS) {
++ ir->send_flags = SEND_FLAG_COMPLETE;
++ mce_dbg(ir->dev, "setup answer received %d bytes\n",
++ buf_len);
++ }
++
++ switch (urb->status) {
++ /* success */
++ case 0:
++ mceusb_process_ir_data(ir, buf_len);
++ break;
++
++ case -ECONNRESET:
++ case -ENOENT:
++ case -ESHUTDOWN:
++ usb_unlink_urb(urb);
++ return;
++
++ case -EPIPE:
++ default:
++ mce_dbg(ir->dev, "Error: urb status = %d\n", urb->status);
++ break;
++ }
++
++ usb_submit_urb(urb, GFP_ATOMIC);
++}
++
++static void mceusb_get_emulator_version(struct mceusb_dev *ir)
++{
++ /* If we get no reply or an illegal command reply, its ver 1, says MS */
++ ir->emver = 1;
++ mce_async_out(ir, GET_EMVER, sizeof(GET_EMVER));
++}
++
++static void mceusb_gen1_init(struct mceusb_dev *ir)
++{
++ int ret;
++ struct device *dev = ir->dev;
++ char *data;
++
++ data = kzalloc(USB_CTRL_MSG_SZ, GFP_KERNEL);
++ if (!data) {
++ dev_err(dev, "%s: memory allocation failed!\n", __func__);
++ return;
++ }
++
++ /*
++ * This is a strange one. Windows issues a set address to the device
++ * on the receive control pipe and expect a certain value pair back
++ */
++ ret = usb_control_msg(ir->usbdev, usb_rcvctrlpipe(ir->usbdev, 0),
++ USB_REQ_SET_ADDRESS, USB_TYPE_VENDOR, 0, 0,
++ data, USB_CTRL_MSG_SZ, HZ * 3);
++ mce_dbg(dev, "%s - ret = %d\n", __func__, ret);
++ mce_dbg(dev, "%s - data[0] = %d, data[1] = %d\n",
++ __func__, data[0], data[1]);
++
++ /* set feature: bit rate 38400 bps */
++ ret = usb_control_msg(ir->usbdev, usb_sndctrlpipe(ir->usbdev, 0),
++ USB_REQ_SET_FEATURE, USB_TYPE_VENDOR,
++ 0xc04e, 0x0000, NULL, 0, HZ * 3);
++
++ mce_dbg(dev, "%s - ret = %d\n", __func__, ret);
++
++ /* bRequest 4: set char length to 8 bits */
++ ret = usb_control_msg(ir->usbdev, usb_sndctrlpipe(ir->usbdev, 0),
++ 4, USB_TYPE_VENDOR,
++ 0x0808, 0x0000, NULL, 0, HZ * 3);
++ mce_dbg(dev, "%s - retB = %d\n", __func__, ret);
++
++ /* bRequest 2: set handshaking to use DTR/DSR */
++ ret = usb_control_msg(ir->usbdev, usb_sndctrlpipe(ir->usbdev, 0),
++ 2, USB_TYPE_VENDOR,
++ 0x0000, 0x0100, NULL, 0, HZ * 3);
++ mce_dbg(dev, "%s - retC = %d\n", __func__, ret);
++
++ /* device resume */
++ mce_async_out(ir, DEVICE_RESUME, sizeof(DEVICE_RESUME));
++
++ /* get hw/sw revision? */
++ mce_async_out(ir, GET_REVISION, sizeof(GET_REVISION));
++
++ kfree(data);
++}
++
++static void mceusb_gen2_init(struct mceusb_dev *ir)
++{
++ /* device resume */
++ mce_async_out(ir, DEVICE_RESUME, sizeof(DEVICE_RESUME));
++
++ /* get wake version (protocol, key, address) */
++ mce_async_out(ir, GET_WAKEVERSION, sizeof(GET_WAKEVERSION));
++
++ /* unknown what this one actually returns... */
++ mce_async_out(ir, GET_UNKNOWN2, sizeof(GET_UNKNOWN2));
++}
++
++static void mceusb_get_parameters(struct mceusb_dev *ir)
++{
++ int i;
++ unsigned char cmdbuf[3] = { MCE_CMD_PORT_SYS,
++ MCE_CMD_GETPORTSTATUS, 0x00 };
++
++ /* defaults, if the hardware doesn't support querying */
++ ir->num_txports = 2;
++ ir->num_rxports = 2;
++
++ /* get number of tx and rx ports */
++ mce_async_out(ir, GET_NUM_PORTS, sizeof(GET_NUM_PORTS));
++
++ /* get the carrier and frequency */
++ mce_async_out(ir, GET_CARRIER_FREQ, sizeof(GET_CARRIER_FREQ));
++
++ if (ir->num_txports && !ir->flags.no_tx)
++ /* get the transmitter bitmask */
++ mce_async_out(ir, GET_TX_BITMASK, sizeof(GET_TX_BITMASK));
++
++ /* get receiver timeout value */
++ mce_async_out(ir, GET_RX_TIMEOUT, sizeof(GET_RX_TIMEOUT));
++
++ /* get receiver sensor setting */
++ mce_async_out(ir, GET_RX_SENSOR, sizeof(GET_RX_SENSOR));
++
++ for (i = 0; i < ir->num_txports; i++) {
++ cmdbuf[2] = i;
++ mce_async_out(ir, cmdbuf, sizeof(cmdbuf));
++ }
++}
++
++static void mceusb_flash_led(struct mceusb_dev *ir)
++{
++ if (ir->emver < 2)
++ return;
++
++ mce_async_out(ir, FLASH_LED, sizeof(FLASH_LED));
++}
++
++static struct rc_dev *mceusb_init_rc_dev(struct mceusb_dev *ir)
++{
++ struct device *dev = ir->dev;
++ struct rc_dev *rc;
++ int ret;
++
++ rc = rc_allocate_device();
++ if (!rc) {
++ dev_err(dev, "remote dev allocation failed\n");
++ goto out;
++ }
++
++ snprintf(ir->name, sizeof(ir->name), "%s (%04x:%04x)",
++ mceusb_model[ir->model].name ?
++ mceusb_model[ir->model].name :
++ "Media Center Ed. eHome Infrared Remote Transceiver",
++ le16_to_cpu(ir->usbdev->descriptor.idVendor),
++ le16_to_cpu(ir->usbdev->descriptor.idProduct));
++
++ usb_make_path(ir->usbdev, ir->phys, sizeof(ir->phys));
++
++ rc->input_name = ir->name;
++ rc->input_phys = ir->phys;
++ usb_to_input_id(ir->usbdev, &rc->input_id);
++ rc->dev.parent = dev;
++ rc->priv = ir;
++ rc->driver_type = RC_DRIVER_IR_RAW;
++ rc->allowed_protos = RC_BIT_ALL;
++ rc->timeout = MS_TO_NS(100);
++ if (!ir->flags.no_tx) {
++ rc->s_tx_mask = mceusb_set_tx_mask;
++ rc->s_tx_carrier = mceusb_set_tx_carrier;
++ rc->tx_ir = mceusb_tx_ir;
++ }
++ rc->driver_name = DRIVER_NAME;
++ rc->map_name = mceusb_model[ir->model].rc_map ?
++ mceusb_model[ir->model].rc_map : RC_MAP_RC6_MCE;
++
++ ret = rc_register_device(rc);
++ if (ret < 0) {
++ dev_err(dev, "remote dev registration failed\n");
++ goto out;
++ }
++
++ return rc;
++
++out:
++ rc_free_device(rc);
++ return NULL;
++}
++
++static int mceusb_dev_probe(struct usb_interface *intf,
++ const struct usb_device_id *id)
++{
++ struct usb_device *dev = interface_to_usbdev(intf);
++ struct usb_host_interface *idesc;
++ struct usb_endpoint_descriptor *ep = NULL;
++ struct usb_endpoint_descriptor *ep_in = NULL;
++ struct usb_endpoint_descriptor *ep_out = NULL;
++ struct mceusb_dev *ir = NULL;
++ int pipe, maxp, i;
++ char buf[63], name[128] = "";
++ enum mceusb_model_type model = id->driver_info;
++ bool is_gen3;
++ bool is_microsoft_gen1;
++ bool tx_mask_normal;
++ int ir_intfnum;
++
++ mce_dbg(&intf->dev, "%s called\n", __func__);
++
++ idesc = intf->cur_altsetting;
++
++ is_gen3 = mceusb_model[model].mce_gen3;
++ is_microsoft_gen1 = mceusb_model[model].mce_gen1;
++ tx_mask_normal = mceusb_model[model].tx_mask_normal;
++ ir_intfnum = mceusb_model[model].ir_intfnum;
++
++ /* There are multi-function devices with non-IR interfaces */
++ if (idesc->desc.bInterfaceNumber != ir_intfnum)
++ return -ENODEV;
++
++ /* step through the endpoints to find first bulk in and out endpoint */
++ for (i = 0; i < idesc->desc.bNumEndpoints; ++i) {
++ ep = &idesc->endpoint[i].desc;
++
++ if ((ep_in == NULL)
++ && ((ep->bEndpointAddress & USB_ENDPOINT_DIR_MASK)
++ == USB_DIR_IN)
++ && (((ep->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
++ == USB_ENDPOINT_XFER_BULK)
++ || ((ep->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
++ == USB_ENDPOINT_XFER_INT))) {
++
++ ep_in = ep;
++ ep_in->bmAttributes = USB_ENDPOINT_XFER_INT;
++ ep_in->bInterval = 1;
++ mce_dbg(&intf->dev, "acceptable inbound endpoint "
++ "found\n");
++ }
++
++ if ((ep_out == NULL)
++ && ((ep->bEndpointAddress & USB_ENDPOINT_DIR_MASK)
++ == USB_DIR_OUT)
++ && (((ep->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
++ == USB_ENDPOINT_XFER_BULK)
++ || ((ep->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
++ == USB_ENDPOINT_XFER_INT))) {
++
++ ep_out = ep;
++ ep_out->bmAttributes = USB_ENDPOINT_XFER_INT;
++ ep_out->bInterval = 1;
++ mce_dbg(&intf->dev, "acceptable outbound endpoint "
++ "found\n");
++ }
++ }
++ if (ep_in == NULL) {
++ mce_dbg(&intf->dev, "inbound and/or endpoint not found\n");
++ return -ENODEV;
++ }
++
++ pipe = usb_rcvintpipe(dev, ep_in->bEndpointAddress);
++ maxp = usb_maxpacket(dev, pipe, usb_pipeout(pipe));
++
++ ir = kzalloc(sizeof(struct mceusb_dev), GFP_KERNEL);
++ if (!ir)
++ goto mem_alloc_fail;
++
++ ir->buf_in = usb_alloc_coherent(dev, maxp, GFP_ATOMIC, &ir->dma_in);
++ if (!ir->buf_in)
++ goto buf_in_alloc_fail;
++
++ ir->urb_in = usb_alloc_urb(0, GFP_KERNEL);
++ if (!ir->urb_in)
++ goto urb_in_alloc_fail;
++
++ ir->usbdev = dev;
++ ir->dev = &intf->dev;
++ ir->len_in = maxp;
++ ir->flags.microsoft_gen1 = is_microsoft_gen1;
++ ir->flags.tx_mask_normal = tx_mask_normal;
++ ir->flags.no_tx = mceusb_model[model].no_tx;
++ ir->model = model;
++
++ /* Saving usb interface data for use by the transmitter routine */
++ ir->usb_ep_out = ep_out;
++
++ if (dev->descriptor.iManufacturer
++ && usb_string(dev, dev->descriptor.iManufacturer,
++ buf, sizeof(buf)) > 0)
++ strlcpy(name, buf, sizeof(name));
++ if (dev->descriptor.iProduct
++ && usb_string(dev, dev->descriptor.iProduct,
++ buf, sizeof(buf)) > 0)
++ snprintf(name + strlen(name), sizeof(name) - strlen(name),
++ " %s", buf);
++
++ ir->rc = mceusb_init_rc_dev(ir);
++ if (!ir->rc)
++ goto rc_dev_fail;
++
++ /* wire up inbound data handler */
++ usb_fill_int_urb(ir->urb_in, dev, pipe, ir->buf_in, maxp,
++ mceusb_dev_recv, ir, ep_in->bInterval);
++ ir->urb_in->transfer_dma = ir->dma_in;
++ ir->urb_in->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
++
++ /* flush buffers on the device */
++ mce_dbg(&intf->dev, "Flushing receive buffers\n");
++ mce_flush_rx_buffer(ir, maxp);
++
++ /* figure out which firmware/emulator version this hardware has */
++ mceusb_get_emulator_version(ir);
++
++ /* initialize device */
++ if (ir->flags.microsoft_gen1)
++ mceusb_gen1_init(ir);
++ else if (!is_gen3)
++ mceusb_gen2_init(ir);
++
++ mceusb_get_parameters(ir);
++
++ mceusb_flash_led(ir);
++
++ if (!ir->flags.no_tx)
++ mceusb_set_tx_mask(ir->rc, MCE_DEFAULT_TX_MASK);
++
++ usb_set_intfdata(intf, ir);
++
++ /* enable wake via this device */
++ device_set_wakeup_capable(ir->dev, true);
++ device_set_wakeup_enable(ir->dev, true);
++
++ dev_info(&intf->dev, "Registered %s with mce emulator interface "
++ "version %x\n", name, ir->emver);
++ dev_info(&intf->dev, "%x tx ports (0x%x cabled) and "
++ "%x rx sensors (0x%x active)\n",
++ ir->num_txports, ir->txports_cabled,
++ ir->num_rxports, ir->rxports_active);
++
++ return 0;
++
++ /* Error-handling path */
++rc_dev_fail:
++ usb_free_urb(ir->urb_in);
++urb_in_alloc_fail:
++ usb_free_coherent(dev, maxp, ir->buf_in, ir->dma_in);
++buf_in_alloc_fail:
++ kfree(ir);
++mem_alloc_fail:
++ dev_err(&intf->dev, "%s: device setup failed!\n", __func__);
++
++ return -ENOMEM;
++}
++
++
++static void mceusb_dev_disconnect(struct usb_interface *intf)
++{
++ struct usb_device *dev = interface_to_usbdev(intf);
++ struct mceusb_dev *ir = usb_get_intfdata(intf);
++
++ usb_set_intfdata(intf, NULL);
++
++ if (!ir)
++ return;
++
++ ir->usbdev = NULL;
++ rc_unregister_device(ir->rc);
++ usb_kill_urb(ir->urb_in);
++ usb_free_urb(ir->urb_in);
++ usb_free_coherent(dev, ir->len_in, ir->buf_in, ir->dma_in);
++
++ kfree(ir);
++}
++
++static int mceusb_dev_suspend(struct usb_interface *intf, pm_message_t message)
++{
++ struct mceusb_dev *ir = usb_get_intfdata(intf);
++ dev_info(ir->dev, "suspend\n");
++ usb_kill_urb(ir->urb_in);
++ return 0;
++}
++
++static int mceusb_dev_resume(struct usb_interface *intf)
++{
++ struct mceusb_dev *ir = usb_get_intfdata(intf);
++ dev_info(ir->dev, "resume\n");
++ if (usb_submit_urb(ir->urb_in, GFP_ATOMIC))
++ return -EIO;
++ return 0;
++}
++
++static struct usb_driver mceusb_dev_driver = {
++ .name = DRIVER_NAME,
++ .probe = mceusb_dev_probe,
++ .disconnect = mceusb_dev_disconnect,
++ .suspend = mceusb_dev_suspend,
++ .resume = mceusb_dev_resume,
++ .reset_resume = mceusb_dev_resume,
++ .id_table = mceusb_dev_table
++};
++
++module_usb_driver(mceusb_dev_driver);
++
++MODULE_DESCRIPTION(DRIVER_DESC);
++MODULE_AUTHOR(DRIVER_AUTHOR);
++MODULE_LICENSE("GPL");
++MODULE_DEVICE_TABLE(usb, mceusb_dev_table);
++
++module_param(debug, bool, S_IRUGO | S_IWUSR);
++MODULE_PARM_DESC(debug, "Debug enabled or not");
+diff -Nur linux-3.14.36/drivers/media/tuners/Kconfig linux-openelec/drivers/media/tuners/Kconfig
+--- linux-3.14.36/drivers/media/tuners/Kconfig 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/media/tuners/Kconfig 2015-07-24 18:03:30.148842002 -0500
+@@ -242,4 +242,11 @@
+ default m if !MEDIA_SUBDRV_AUTOSELECT
+ help
+ Rafael Micro R820T silicon tuner driver.
++
++config MEDIA_TUNER_SI2157
++ tristate "Silicon Labs Si2157 silicon tuner"
++ depends on MEDIA_SUPPORT && I2C
++ default m if !MEDIA_SUBDRV_AUTOSELECT
++ help
++ Si2157 silicon tuner driver.
+ endmenu
+diff -Nur linux-3.14.36/drivers/media/tuners/Makefile linux-openelec/drivers/media/tuners/Makefile
+--- linux-3.14.36/drivers/media/tuners/Makefile 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/media/tuners/Makefile 2015-07-24 18:03:30.148842002 -0500
+@@ -37,6 +37,7 @@
+ obj-$(CONFIG_MEDIA_TUNER_FC0013) += fc0013.o
+ obj-$(CONFIG_MEDIA_TUNER_IT913X) += tuner_it913x.o
+ obj-$(CONFIG_MEDIA_TUNER_R820T) += r820t.o
++obj-$(CONFIG_MEDIA_TUNER_SI2157) += si2157.o
+
+ ccflags-y += -I$(srctree)/drivers/media/dvb-core
+ ccflags-y += -I$(srctree)/drivers/media/dvb-frontends
+diff -Nur linux-3.14.36/drivers/media/tuners/si2157.c linux-openelec/drivers/media/tuners/si2157.c
+--- linux-3.14.36/drivers/media/tuners/si2157.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/media/tuners/si2157.c 2015-07-24 18:03:30.148842002 -0500
+@@ -0,0 +1,417 @@
++/*
++ * Silicon Labs Si2146/2147/2148/2157/2158 silicon tuner driver
++ *
++ * Copyright (C) 2014 Antti Palosaari <crope@iki.fi>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ */
++
++#include "si2157_priv.h"
++
++static const struct dvb_tuner_ops si2157_ops;
++
++/* execute firmware command */
++static int si2157_cmd_execute(struct si2157 *s, struct si2157_cmd *cmd)
++{
++ int ret;
++ unsigned long timeout;
++
++ mutex_lock(&s->i2c_mutex);
++
++ if (cmd->wlen) {
++ /* write cmd and args for firmware */
++ ret = i2c_master_send(s->client, cmd->args, cmd->wlen);
++ if (ret < 0) {
++ goto err_mutex_unlock;
++ } else if (ret != cmd->wlen) {
++ ret = -EREMOTEIO;
++ goto err_mutex_unlock;
++ }
++ }
++
++ if (cmd->rlen) {
++ /* wait cmd execution terminate */
++ #define TIMEOUT 80
++ timeout = jiffies + msecs_to_jiffies(TIMEOUT);
++ while (!time_after(jiffies, timeout)) {
++ ret = i2c_master_recv(s->client, cmd->args, cmd->rlen);
++ if (ret < 0) {
++ goto err_mutex_unlock;
++ } else if (ret != cmd->rlen) {
++ ret = -EREMOTEIO;
++ goto err_mutex_unlock;
++ }
++
++ /* firmware ready? */
++ if ((cmd->args[0] >> 7) & 0x01)
++ break;
++ }
++
++ dev_dbg(&s->client->dev, "cmd execution took %d ms\n",
++ jiffies_to_msecs(jiffies) -
++ (jiffies_to_msecs(timeout) - TIMEOUT));
++
++ if (!((cmd->args[0] >> 7) & 0x01)) {
++ ret = -ETIMEDOUT;
++ goto err_mutex_unlock;
++ }
++ }
++
++ ret = 0;
++
++err_mutex_unlock:
++ mutex_unlock(&s->i2c_mutex);
++ if (ret)
++ goto err;
++
++ return 0;
++err:
++ dev_dbg(&s->client->dev, "failed=%d\n", ret);
++ return ret;
++}
++
++static int si2157_init(struct dvb_frontend *fe)
++{
++ struct si2157 *s = fe->tuner_priv;
++ int ret, len, remaining;
++ struct si2157_cmd cmd;
++ const struct firmware *fw = NULL;
++ u8 *fw_file;
++ unsigned int chip_id;
++
++ dev_dbg(&s->client->dev, "\n");
++
++ if (s->fw_loaded)
++ goto warm;
++
++ /* power up */
++ if (s->chiptype == SI2157_CHIPTYPE_SI2146) {
++ memcpy(cmd.args, "\xc0\x05\x01\x00\x00\x0b\x00\x00\x01", 9);
++ cmd.wlen = 9;
++ } else {
++ memcpy(cmd.args, "\xc0\x00\x0c\x00\x00\x01\x01\x01\x01\x01\x01\x02\x00\x00\x01", 15);
++ cmd.wlen = 15;
++ }
++ cmd.rlen = 1;
++ ret = si2157_cmd_execute(s, &cmd);
++ if (ret)
++ goto err;
++
++ /* query chip revision */
++ memcpy(cmd.args, "\x02", 1);
++ cmd.wlen = 1;
++ cmd.rlen = 13;
++ ret = si2157_cmd_execute(s, &cmd);
++ if (ret)
++ goto err;
++
++ chip_id = cmd.args[1] << 24 | cmd.args[2] << 16 | cmd.args[3] << 8 |
++ cmd.args[4] << 0;
++
++ #define SI2158_A20 ('A' << 24 | 58 << 16 | '2' << 8 | '0' << 0)
++ #define SI2148_A20 ('A' << 24 | 48 << 16 | '2' << 8 | '0' << 0)
++ #define SI2157_A30 ('A' << 24 | 57 << 16 | '3' << 8 | '0' << 0)
++ #define SI2147_A30 ('A' << 24 | 47 << 16 | '3' << 8 | '0' << 0)
++ #define SI2146_A10 ('A' << 24 | 46 << 16 | '1' << 8 | '0' << 0)
++
++ switch (chip_id) {
++ case SI2158_A20:
++ case SI2148_A20:
++ fw_file = SI2158_A20_FIRMWARE;
++ break;
++ case SI2157_A30:
++ case SI2147_A30:
++ case SI2146_A10:
++ goto skip_fw_download;
++ default:
++ dev_err(&s->client->dev,
++ "unknown chip version Si21%d-%c%c%c\n",
++ cmd.args[2], cmd.args[1],
++ cmd.args[3], cmd.args[4]);
++ ret = -EINVAL;
++ goto err;
++ }
++
++ /* cold state - try to download firmware */
++ dev_info(&s->client->dev, "found a '%s' in cold state\n",
++ si2157_ops.info.name);
++
++ /* request the firmware, this will block and timeout */
++ ret = request_firmware(&fw, fw_file, &s->client->dev);
++ if (ret) {
++ dev_err(&s->client->dev, "firmware file '%s' not found\n",
++ fw_file);
++ goto err;
++ }
++
++ /* firmware should be n chunks of 17 bytes */
++ if (fw->size % 17 != 0) {
++ dev_err(&s->client->dev, "firmware file '%s' is invalid\n",
++ fw_file);
++ ret = -EINVAL;
++ goto fw_release_exit;
++ }
++
++ dev_info(&s->client->dev, "downloading firmware from file '%s'\n",
++ fw_file);
++
++ for (remaining = fw->size; remaining > 0; remaining -= 17) {
++ len = fw->data[fw->size - remaining];
++ memcpy(cmd.args, &fw->data[(fw->size - remaining) + 1], len);
++ cmd.wlen = len;
++ cmd.rlen = 1;
++ ret = si2157_cmd_execute(s, &cmd);
++ if (ret) {
++ dev_err(&s->client->dev,
++ "firmware download failed=%d\n",
++ ret);
++ goto fw_release_exit;
++ }
++ }
++
++ release_firmware(fw);
++ fw = NULL;
++
++skip_fw_download:
++ /* reboot the tuner with new firmware? */
++ memcpy(cmd.args, "\x01\x01", 2);
++ cmd.wlen = 2;
++ cmd.rlen = 1;
++ ret = si2157_cmd_execute(s, &cmd);
++ if (ret)
++ goto err;
++
++ s->fw_loaded = true;
++
++warm:
++ s->active = true;
++ return 0;
++
++fw_release_exit:
++ release_firmware(fw);
++err:
++ dev_dbg(&s->client->dev, "failed=%d\n", ret);
++ return ret;
++}
++
++static int si2157_sleep(struct dvb_frontend *fe)
++{
++ struct si2157 *s = fe->tuner_priv;
++ int ret;
++ struct si2157_cmd cmd;
++
++ dev_dbg(&s->client->dev, "\n");
++
++ s->active = false;
++
++ /* standby */
++ memcpy(cmd.args, "\x16\x00", 2);
++ cmd.wlen = 2;
++ cmd.rlen = 1;
++ ret = si2157_cmd_execute(s, &cmd);
++ if (ret)
++ goto err;
++
++ return 0;
++err:
++ dev_dbg(&s->client->dev, "failed=%d\n", ret);
++ return ret;
++}
++
++static int si2157_set_params(struct dvb_frontend *fe)
++{
++ struct si2157 *s = fe->tuner_priv;
++ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
++ int ret;
++ struct si2157_cmd cmd;
++ u8 bandwidth, delivery_system;
++
++ dev_dbg(&s->client->dev,
++ "delivery_system=%d frequency=%u bandwidth_hz=%u\n",
++ c->delivery_system, c->frequency,
++ c->bandwidth_hz);
++
++ if (!s->active) {
++ ret = -EAGAIN;
++ goto err;
++ }
++
++ if (c->bandwidth_hz <= 6000000)
++ bandwidth = 0x06;
++ else if (c->bandwidth_hz <= 7000000)
++ bandwidth = 0x07;
++ else if (c->bandwidth_hz <= 8000000)
++ bandwidth = 0x08;
++ else
++ bandwidth = 0x0f;
++
++ switch (c->delivery_system) {
++ case SYS_ATSC:
++ delivery_system = 0x00;
++ break;
++ case SYS_DVBC_ANNEX_B:
++ delivery_system = 0x10;
++ break;
++ case SYS_DVBT:
++ case SYS_DVBT2: /* it seems DVB-T and DVB-T2 both are 0x20 here */
++ delivery_system = 0x20;
++ break;
++ case SYS_DVBC_ANNEX_A:
++ delivery_system = 0x30;
++ break;
++ default:
++ ret = -EINVAL;
++ goto err;
++ }
++
++ memcpy(cmd.args, "\x14\x00\x03\x07\x00\x00", 6);
++ cmd.args[4] = delivery_system | bandwidth;
++ if (s->inversion)
++ cmd.args[5] = 0x01;
++ cmd.wlen = 6;
++ cmd.rlen = 4;
++ ret = si2157_cmd_execute(s, &cmd);
++ if (ret)
++ goto err;
++
++ if (s->chiptype == SI2157_CHIPTYPE_SI2146)
++ memcpy(cmd.args, "\x14\x00\x02\x07\x00\x01", 6);
++ else
++ memcpy(cmd.args, "\x14\x00\x02\x07\x01\x00", 6);
++ cmd.wlen = 6;
++ cmd.rlen = 4;
++ ret = si2157_cmd_execute(s, &cmd);
++ if (ret)
++ goto err;
++
++ /* set frequency */
++ memcpy(cmd.args, "\x41\x00\x00\x00\x00\x00\x00\x00", 8);
++ cmd.args[4] = (c->frequency >> 0) & 0xff;
++ cmd.args[5] = (c->frequency >> 8) & 0xff;
++ cmd.args[6] = (c->frequency >> 16) & 0xff;
++ cmd.args[7] = (c->frequency >> 24) & 0xff;
++ cmd.wlen = 8;
++ cmd.rlen = 1;
++ ret = si2157_cmd_execute(s, &cmd);
++ if (ret)
++ goto err;
++
++ return 0;
++err:
++ dev_dbg(&s->client->dev, "failed=%d\n", ret);
++ return ret;
++}
++
++static int si2157_get_if_frequency(struct dvb_frontend *fe, u32 *frequency)
++{
++ *frequency = 5000000; /* default value of property 0x0706 */
++ return 0;
++}
++
++static const struct dvb_tuner_ops si2157_ops = {
++ .info = {
++ .name = "Silicon Labs Si2146/2147/2148/2157/2158",
++ .frequency_min = 110000000,
++ .frequency_max = 862000000,
++ },
++
++ .init = si2157_init,
++ .sleep = si2157_sleep,
++ .set_params = si2157_set_params,
++ .get_if_frequency = si2157_get_if_frequency,
++};
++
++static int si2157_probe(struct i2c_client *client,
++ const struct i2c_device_id *id)
++{
++ struct si2157_config *cfg = client->dev.platform_data;
++ struct dvb_frontend *fe = cfg->fe;
++ struct si2157 *s;
++ struct si2157_cmd cmd;
++ int ret;
++
++ s = kzalloc(sizeof(struct si2157), GFP_KERNEL);
++ if (!s) {
++ ret = -ENOMEM;
++ dev_err(&client->dev, "kzalloc() failed\n");
++ goto err;
++ }
++
++ s->client = client;
++ s->fe = cfg->fe;
++ s->inversion = cfg->inversion;
++ s->fw_loaded = false;
++ s->chiptype = (u8)id->driver_data;
++ mutex_init(&s->i2c_mutex);
++
++ /* check if the tuner is there */
++ cmd.wlen = 0;
++ cmd.rlen = 1;
++ ret = si2157_cmd_execute(s, &cmd);
++ if (ret)
++ goto err;
++
++ fe->tuner_priv = s;
++ memcpy(&fe->ops.tuner_ops, &si2157_ops,
++ sizeof(struct dvb_tuner_ops));
++
++ i2c_set_clientdata(client, s);
++
++ dev_info(&s->client->dev,
++ "Silicon Labs %s successfully attached\n",
++ s->chiptype == SI2157_CHIPTYPE_SI2146 ?
++ "Si2146" : "Si2147/2148/2157/2158");
++
++ return 0;
++err:
++ dev_dbg(&client->dev, "failed=%d\n", ret);
++ kfree(s);
++
++ return ret;
++}
++
++static int si2157_remove(struct i2c_client *client)
++{
++ struct si2157 *s = i2c_get_clientdata(client);
++ struct dvb_frontend *fe = s->fe;
++
++ dev_dbg(&client->dev, "\n");
++
++ memset(&fe->ops.tuner_ops, 0, sizeof(struct dvb_tuner_ops));
++ fe->tuner_priv = NULL;
++ kfree(s);
++
++ return 0;
++}
++
++static const struct i2c_device_id si2157_id[] = {
++ {"si2157", 0},
++ {"si2146", 1},
++ {}
++};
++MODULE_DEVICE_TABLE(i2c, si2157_id);
++
++static struct i2c_driver si2157_driver = {
++ .driver = {
++ .owner = THIS_MODULE,
++ .name = "si2157",
++ },
++ .probe = si2157_probe,
++ .remove = si2157_remove,
++ .id_table = si2157_id,
++};
++
++module_i2c_driver(si2157_driver);
++
++MODULE_DESCRIPTION("Silicon Labs Si2146/2147/2148/2157/2158 silicon tuner driver");
++MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
++MODULE_LICENSE("GPL");
++MODULE_FIRMWARE(SI2158_A20_FIRMWARE);
+diff -Nur linux-3.14.36/drivers/media/tuners/si2157.h linux-openelec/drivers/media/tuners/si2157.h
+--- linux-3.14.36/drivers/media/tuners/si2157.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/media/tuners/si2157.h 2015-07-24 18:03:30.148842002 -0500
+@@ -0,0 +1,39 @@
++/*
++ * Silicon Labs Si2146/2147/2148/2157/2158 silicon tuner driver
++ *
++ * Copyright (C) 2014 Antti Palosaari <crope@iki.fi>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ */
++
++#ifndef SI2157_H
++#define SI2157_H
++
++#include <linux/kconfig.h>
++#include "dvb_frontend.h"
++
++/*
++ * I2C address
++ * 0x60
++ */
++struct si2157_config {
++ /*
++ * frontend
++ */
++ struct dvb_frontend *fe;
++
++ /*
++ * Spectral Inversion
++ */
++ bool inversion;
++};
++
++#endif
+diff -Nur linux-3.14.36/drivers/media/tuners/si2157_priv.h linux-openelec/drivers/media/tuners/si2157_priv.h
+--- linux-3.14.36/drivers/media/tuners/si2157_priv.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/media/tuners/si2157_priv.h 2015-07-24 18:03:30.148842002 -0500
+@@ -0,0 +1,47 @@
++/*
++ * Silicon Labs Si2146/2147/2148/2157/2158 silicon tuner driver
++ *
++ * Copyright (C) 2014 Antti Palosaari <crope@iki.fi>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ */
++
++#ifndef SI2157_PRIV_H
++#define SI2157_PRIV_H
++
++#include <linux/firmware.h>
++#include "si2157.h"
++
++/* state struct */
++struct si2157 {
++ struct mutex i2c_mutex;
++ struct i2c_client *client;
++ struct dvb_frontend *fe;
++ bool active;
++ bool fw_loaded;
++ bool inversion;
++ u8 chiptype;
++};
++
++#define SI2157_CHIPTYPE_SI2157 0
++#define SI2157_CHIPTYPE_SI2146 1
++
++/* firmware command struct */
++#define SI2157_ARGLEN 30
++struct si2157_cmd {
++ u8 args[SI2157_ARGLEN];
++ unsigned wlen;
++ unsigned rlen;
++};
++
++#define SI2158_A20_FIRMWARE "dvb-tuner-si2158-a20-01.fw"
++
++#endif
+diff -Nur linux-3.14.36/drivers/media/usb/dvb-usb/dw2102.c linux-openelec/drivers/media/usb/dvb-usb/dw2102.c
+--- linux-3.14.36/drivers/media/usb/dvb-usb/dw2102.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/media/usb/dvb-usb/dw2102.c 2015-07-24 18:03:30.140842002 -0500
+@@ -1109,6 +1109,7 @@
+ static struct cxd2820r_config cxd2820r_config = {
+ .i2c_address = 0x6c, /* (0xd8 >> 1) */
+ .ts_mode = 0x38,
++ .ts_clock_inv = 1,
+ };
+
+ static struct tda18271_config tda18271_config = {
+@@ -1387,20 +1388,27 @@
+
+ static int t220_frontend_attach(struct dvb_usb_adapter *d)
+ {
+- u8 obuf[3] = { 0xe, 0x80, 0 };
++ u8 obuf[3] = { 0xe, 0x87, 0 };
+ u8 ibuf[] = { 0 };
+
+ if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0)
+ err("command 0x0e transfer failed.");
+
+ obuf[0] = 0xe;
+- obuf[1] = 0x83;
++ obuf[1] = 0x86;
++ obuf[2] = 1;
++
++ if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0)
++ err("command 0x0e transfer failed.");
++
++ obuf[0] = 0xe;
++ obuf[1] = 0x80;
+ obuf[2] = 0;
+
+ if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0)
+ err("command 0x0e transfer failed.");
+
+- msleep(100);
++ msleep(50);
+
+ obuf[0] = 0xe;
+ obuf[1] = 0x80;
+diff -Nur linux-3.14.36/drivers/media/usb/dvb-usb/pctv452e.c linux-openelec/drivers/media/usb/dvb-usb/pctv452e.c
+--- linux-3.14.36/drivers/media/usb/dvb-usb/pctv452e.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/media/usb/dvb-usb/pctv452e.c 2015-07-24 18:03:30.140842002 -0500
+@@ -995,11 +995,11 @@
+ /* parameter for the MPEG2-data transfer */
+ .stream = {
+ .type = USB_ISOC,
+- .count = 7,
++ .count = 4,
+ .endpoint = 0x02,
+ .u = {
+ .isoc = {
+- .framesperurb = 4,
++ .framesperurb = 64,
+ .framesize = 940,
+ .interval = 1
+ }
+diff -Nur linux-3.14.36/drivers/media/usb/dvb-usb-v2/dvbsky.c linux-openelec/drivers/media/usb/dvb-usb-v2/dvbsky.c
+--- linux-3.14.36/drivers/media/usb/dvb-usb-v2/dvbsky.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/media/usb/dvb-usb-v2/dvbsky.c 2015-07-24 18:03:30.196842002 -0500
+@@ -0,0 +1,845 @@
++/*
++ * Driver for DVBSky USB2.0 receiver
++ *
++ * Copyright (C) 2013 Max nibble <nibble.max@gmail.com>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ */
++
++#include "dvb_usb.h"
++#include "m88ds3103.h"
++#include "m88ts2022.h"
++#include "sp2.h"
++#include "si2168.h"
++#include "si2157.h"
++
++#define DVBSKY_MSG_DELAY 0/*2000*/
++#define DVBSKY_BUF_LEN 64
++
++static int dvb_usb_dvbsky_disable_rc;
++module_param_named(disable_rc, dvb_usb_dvbsky_disable_rc, int, 0644);
++MODULE_PARM_DESC(disable_rc, "Disable inbuilt IR receiver.");
++
++DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
++
++struct dvbsky_state {
++ struct mutex stream_mutex;
++ u8 ibuf[DVBSKY_BUF_LEN];
++ u8 obuf[DVBSKY_BUF_LEN];
++ u8 last_lock;
++ struct i2c_client *i2c_client_demod;
++ struct i2c_client *i2c_client_tuner;
++ struct i2c_client *i2c_client_ci;
++
++ /* fe hook functions*/
++ int (*fe_set_voltage)(struct dvb_frontend *fe,
++ fe_sec_voltage_t voltage);
++ int (*fe_read_status)(struct dvb_frontend *fe,
++ fe_status_t *status);
++};
++
++static int dvbsky_usb_generic_rw(struct dvb_usb_device *d,
++ u8 *wbuf, u16 wlen, u8 *rbuf, u16 rlen)
++{
++ int ret;
++ struct dvbsky_state *state = d_to_priv(d);
++
++ mutex_lock(&d->usb_mutex);
++ if (wlen != 0)
++ memcpy(state->obuf, wbuf, wlen);
++
++ ret = dvb_usbv2_generic_rw_locked(d, state->obuf, wlen,
++ state->ibuf, rlen);
++
++ if (!ret && (rlen != 0))
++ memcpy(rbuf, state->ibuf, rlen);
++
++ mutex_unlock(&d->usb_mutex);
++ return ret;
++}
++
++static int dvbsky_stream_ctrl(struct dvb_usb_device *d, u8 onoff)
++{
++ struct dvbsky_state *state = d_to_priv(d);
++ int ret;
++ u8 obuf_pre[3] = { 0x37, 0, 0 };
++ u8 obuf_post[3] = { 0x36, 3, 0 };
++
++ mutex_lock(&state->stream_mutex);
++ ret = dvbsky_usb_generic_rw(d, obuf_pre, 3, NULL, 0);
++ if (!ret && onoff) {
++ msleep(20);
++ ret = dvbsky_usb_generic_rw(d, obuf_post, 3, NULL, 0);
++ }
++ mutex_unlock(&state->stream_mutex);
++ return ret;
++}
++
++static int dvbsky_streaming_ctrl(struct dvb_frontend *fe, int onoff)
++{
++ struct dvb_usb_device *d = fe_to_d(fe);
++
++ return dvbsky_stream_ctrl(d, (onoff == 0) ? 0 : 1);
++}
++
++/* GPIO */
++static int dvbsky_gpio_ctrl(struct dvb_usb_device *d, u8 gport, u8 value)
++{
++ int ret;
++ u8 obuf[3], ibuf[2];
++
++ obuf[0] = 0x0e;
++ obuf[1] = gport;
++ obuf[2] = value;
++ ret = dvbsky_usb_generic_rw(d, obuf, 3, ibuf, 1);
++ if (ret)
++ dev_err(&d->udev->dev, "failed=%d\n", ret);
++ return ret;
++}
++
++/* I2C */
++static int dvbsky_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
++ int num)
++{
++ struct dvb_usb_device *d = i2c_get_adapdata(adap);
++ int ret = 0;
++ u8 ibuf[64], obuf[64];
++
++ if (mutex_lock_interruptible(&d->i2c_mutex) < 0)
++ return -EAGAIN;
++
++ if (num > 2) {
++ dev_err(&d->udev->dev,
++ "too many i2c messages[%d], max 2.", num);
++ ret = -EOPNOTSUPP;
++ goto i2c_error;
++ }
++
++ if (num == 1) {
++ if (msg[0].len > 60) {
++ dev_err(&d->udev->dev,
++ "too many i2c bytes[%d], max 60.",
++ msg[0].len);
++ ret = -EOPNOTSUPP;
++ goto i2c_error;
++ }
++ if (msg[0].flags & I2C_M_RD) {
++ /* single read */
++ obuf[0] = 0x09;
++ obuf[1] = 0;
++ obuf[2] = msg[0].len;
++ obuf[3] = msg[0].addr;
++ ret = dvbsky_usb_generic_rw(d, obuf, 4,
++ ibuf, msg[0].len + 1);
++ if (ret)
++ dev_err(&d->udev->dev, "failed=%d\n", ret);
++ if (!ret)
++ memcpy(msg[0].buf, &ibuf[1], msg[0].len);
++ } else {
++ /* write */
++ obuf[0] = 0x08;
++ obuf[1] = msg[0].addr;
++ obuf[2] = msg[0].len;
++ memcpy(&obuf[3], msg[0].buf, msg[0].len);
++ ret = dvbsky_usb_generic_rw(d, obuf,
++ msg[0].len + 3, ibuf, 1);
++ if (ret)
++ dev_err(&d->udev->dev, "failed=%d\n", ret);
++ }
++ } else {
++ if ((msg[0].len > 60) || (msg[1].len > 60)) {
++ dev_err(&d->udev->dev,
++ "too many i2c bytes[w-%d][r-%d], max 60.",
++ msg[0].len, msg[1].len);
++ ret = -EOPNOTSUPP;
++ goto i2c_error;
++ }
++ /* write then read */
++ obuf[0] = 0x09;
++ obuf[1] = msg[0].len;
++ obuf[2] = msg[1].len;
++ obuf[3] = msg[0].addr;
++ memcpy(&obuf[4], msg[0].buf, msg[0].len);
++ ret = dvbsky_usb_generic_rw(d, obuf,
++ msg[0].len + 4, ibuf, msg[1].len + 1);
++ if (ret)
++ dev_err(&d->udev->dev, "failed=%d\n", ret);
++
++ if (!ret)
++ memcpy(msg[1].buf, &ibuf[1], msg[1].len);
++ }
++i2c_error:
++ mutex_unlock(&d->i2c_mutex);
++ return (ret) ? ret : num;
++}
++
++static u32 dvbsky_i2c_func(struct i2c_adapter *adapter)
++{
++ return I2C_FUNC_I2C;
++}
++
++static struct i2c_algorithm dvbsky_i2c_algo = {
++ .master_xfer = dvbsky_i2c_xfer,
++ .functionality = dvbsky_i2c_func,
++};
++
++#if IS_ENABLED(CONFIG_RC_CORE)
++static int dvbsky_rc_query(struct dvb_usb_device *d)
++{
++ return 0;
++}
++
++static int dvbsky_get_rc_config(struct dvb_usb_device *d, struct dvb_usb_rc *rc)
++{
++ if (dvb_usb_dvbsky_disable_rc) {
++ rc->map_name = NULL;
++ return 0;
++ }
++
++ rc->allowed_protos = RC_BIT_RC5;
++ rc->query = dvbsky_rc_query;
++ rc->interval = 300;
++ return 0;
++}
++#else
++ #define dvbsky_get_rc_config NULL
++#endif
++
++static int dvbsky_usb_set_voltage(struct dvb_frontend *fe,
++ fe_sec_voltage_t voltage)
++{
++ struct dvb_usb_device *d = fe_to_d(fe);
++ struct dvbsky_state *state = d_to_priv(d);
++ u8 value;
++
++ if (voltage == SEC_VOLTAGE_OFF)
++ value = 0;
++ else
++ value = 1;
++ dvbsky_gpio_ctrl(d, 0x80, value);
++
++ return state->fe_set_voltage(fe, voltage);
++}
++
++static int dvbsky_read_mac_addr(struct dvb_usb_adapter *adap, u8 mac[6])
++{
++ struct dvb_usb_device *d = adap_to_d(adap);
++ u8 obuf[] = { 0x1e, 0x00 };
++ u8 ibuf[6] = { 0 };
++ struct i2c_msg msg[] = {
++ {
++ .addr = 0x51,
++ .flags = 0,
++ .buf = obuf,
++ .len = 2,
++ }, {
++ .addr = 0x51,
++ .flags = I2C_M_RD,
++ .buf = ibuf,
++ .len = 6,
++ }
++ };
++
++ if (i2c_transfer(&d->i2c_adap, msg, 2) == 2)
++ memcpy(mac, ibuf, 6);
++
++ return 0;
++}
++
++static int dvbsky_usb_read_status(struct dvb_frontend *fe, fe_status_t *status)
++{
++ struct dvb_usb_device *d = fe_to_d(fe);
++ struct dvbsky_state *state = d_to_priv(d);
++ int ret;
++
++ ret = state->fe_read_status(fe, status);
++
++ /* it need resync slave fifo when signal change from unlock to lock.*/
++ if ((*status & FE_HAS_LOCK) && (!state->last_lock))
++ dvbsky_stream_ctrl(d, 1);
++
++ state->last_lock = (*status & FE_HAS_LOCK) ? 1 : 0;
++ return ret;
++}
++
++static const struct m88ds3103_config dvbsky_s960_m88ds3103_config = {
++ .i2c_addr = 0x68,
++ .clock = 27000000,
++ .i2c_wr_max = 33,
++ .clock_out = 0,
++ .ts_mode = M88DS3103_TS_CI,
++ .ts_clk = 16000,
++ .ts_clk_pol = 0,
++ .agc = 0x99,
++ .lnb_hv_pol = 1,
++ .lnb_en_pol = 1,
++};
++
++static int dvbsky_s960_attach(struct dvb_usb_adapter *adap)
++{
++ struct dvbsky_state *state = adap_to_priv(adap);
++ struct dvb_usb_device *d = adap_to_d(adap);
++ int ret = 0;
++ /* demod I2C adapter */
++ struct i2c_adapter *i2c_adapter;
++ struct i2c_client *client;
++ struct i2c_board_info info;
++ struct m88ts2022_config m88ts2022_config = {
++ .clock = 27000000,
++ };
++ memset(&info, 0, sizeof(struct i2c_board_info));
++
++ /* attach demod */
++ adap->fe[0] = dvb_attach(m88ds3103_attach,
++ &dvbsky_s960_m88ds3103_config,
++ &d->i2c_adap,
++ &i2c_adapter);
++ if (!adap->fe[0]) {
++ dev_err(&d->udev->dev, "dvbsky_s960_attach fail.\n");
++ ret = -ENODEV;
++ goto fail_attach;
++ }
++
++ /* attach tuner */
++ m88ts2022_config.fe = adap->fe[0];
++ strlcpy(info.type, "m88ts2022", I2C_NAME_SIZE);
++ info.addr = 0x60;
++ info.platform_data = &m88ts2022_config;
++ request_module("m88ts2022");
++ client = i2c_new_device(i2c_adapter, &info);
++ if (client == NULL || client->dev.driver == NULL) {
++ dvb_frontend_detach(adap->fe[0]);
++ ret = -ENODEV;
++ goto fail_attach;
++ }
++
++ if (!try_module_get(client->dev.driver->owner)) {
++ i2c_unregister_device(client);
++ dvb_frontend_detach(adap->fe[0]);
++ ret = -ENODEV;
++ goto fail_attach;
++ }
++
++ /* delegate signal strength measurement to tuner */
++ adap->fe[0]->ops.read_signal_strength =
++ adap->fe[0]->ops.tuner_ops.get_rf_strength;
++
++ /* hook fe: need to resync the slave fifo when signal locks. */
++ state->fe_read_status = adap->fe[0]->ops.read_status;
++ adap->fe[0]->ops.read_status = dvbsky_usb_read_status;
++
++ /* hook fe: LNB off/on is control by Cypress usb chip. */
++ state->fe_set_voltage = adap->fe[0]->ops.set_voltage;
++ adap->fe[0]->ops.set_voltage = dvbsky_usb_set_voltage;
++
++ state->i2c_client_tuner = client;
++
++fail_attach:
++ return ret;
++}
++
++static int dvbsky_usb_ci_set_voltage(struct dvb_frontend *fe,
++ fe_sec_voltage_t voltage)
++{
++ struct dvb_usb_device *d = fe_to_d(fe);
++ struct dvbsky_state *state = d_to_priv(d);
++ u8 value;
++
++ if (voltage == SEC_VOLTAGE_OFF)
++ value = 0;
++ else
++ value = 1;
++ dvbsky_gpio_ctrl(d, 0x00, value);
++
++ return state->fe_set_voltage(fe, voltage);
++}
++
++static int dvbsky_ci_ctrl(void *priv, u8 read, int addr,
++ u8 data, int *mem)
++{
++ struct dvb_usb_device *d = priv;
++ int ret = 0;
++ u8 command[4], respond[2], command_size, respond_size;
++
++ command[1] = (u8)((addr >> 8) & 0xff); /*high part of address*/
++ command[2] = (u8)(addr & 0xff); /*low part of address*/
++ if (read) {
++ command[0] = 0x71;
++ command_size = 3;
++ respond_size = 2;
++ } else {
++ command[0] = 0x70;
++ command[3] = data;
++ command_size = 4;
++ respond_size = 1;
++ }
++ ret = dvbsky_usb_generic_rw(d, command, command_size,
++ respond, respond_size);
++ if (ret)
++ goto err;
++ if (read)
++ *mem = respond[1];
++ return ret;
++err:
++ dev_err(&d->udev->dev, "ci control failed=%d\n", ret);
++ return ret;
++}
++
++static const struct m88ds3103_config dvbsky_s960c_m88ds3103_config = {
++ .i2c_addr = 0x68,
++ .clock = 27000000,
++ .i2c_wr_max = 33,
++ .clock_out = 0,
++ .ts_mode = M88DS3103_TS_CI,
++ .agc = 0x99,
++};
++
++static int dvbsky_s960c_attach(struct dvb_usb_adapter *adap)
++{
++ struct dvbsky_state *state = adap_to_priv(adap);
++ struct dvb_usb_device *d = adap_to_d(adap);
++ int ret = 0;
++ /* demod I2C adapter */
++ struct i2c_adapter *i2c_adapter;
++ struct i2c_client *client_tuner, *client_ci;
++ struct i2c_board_info info;
++ struct sp2_config sp2_config;
++ struct m88ts2022_config m88ts2022_config = {
++ .clock = 27000000,
++ };
++ memset(&info, 0, sizeof(struct i2c_board_info));
++
++ /* attach demod */
++ adap->fe[0] = dvb_attach(m88ds3103_attach,
++ &dvbsky_s960c_m88ds3103_config,
++ &d->i2c_adap,
++ &i2c_adapter);
++ if (!adap->fe[0]) {
++ dev_err(&d->udev->dev, "dvbsky_s960ci_attach fail.\n");
++ ret = -ENODEV;
++ goto fail_attach;
++ }
++
++ /* attach tuner */
++ m88ts2022_config.fe = adap->fe[0];
++ strlcpy(info.type, "m88ts2022", I2C_NAME_SIZE);
++ info.addr = 0x60;
++ info.platform_data = &m88ts2022_config;
++ request_module("m88ts2022");
++ client_tuner = i2c_new_device(i2c_adapter, &info);
++ if (client_tuner == NULL || client_tuner->dev.driver == NULL) {
++ ret = -ENODEV;
++ goto fail_tuner_device;
++ }
++
++ if (!try_module_get(client_tuner->dev.driver->owner)) {
++ ret = -ENODEV;
++ goto fail_tuner_module;
++ }
++
++ /* attach ci controller */
++ memset(&sp2_config, 0, sizeof(sp2_config));
++ sp2_config.dvb_adap = &adap->dvb_adap;
++ sp2_config.priv = d;
++ sp2_config.ci_control = dvbsky_ci_ctrl;
++ memset(&info, 0, sizeof(struct i2c_board_info));
++ strlcpy(info.type, "sp2", I2C_NAME_SIZE);
++ info.addr = 0x40;
++ info.platform_data = &sp2_config;
++ request_module("sp2");
++ client_ci = i2c_new_device(&d->i2c_adap, &info);
++ if (client_ci == NULL || client_ci->dev.driver == NULL) {
++ ret = -ENODEV;
++ goto fail_ci_device;
++ }
++
++ if (!try_module_get(client_ci->dev.driver->owner)) {
++ ret = -ENODEV;
++ goto fail_ci_module;
++ }
++
++ /* delegate signal strength measurement to tuner */
++ adap->fe[0]->ops.read_signal_strength =
++ adap->fe[0]->ops.tuner_ops.get_rf_strength;
++
++ /* hook fe: need to resync the slave fifo when signal locks. */
++ state->fe_read_status = adap->fe[0]->ops.read_status;
++ adap->fe[0]->ops.read_status = dvbsky_usb_read_status;
++
++ /* hook fe: LNB off/on is control by Cypress usb chip. */
++ state->fe_set_voltage = adap->fe[0]->ops.set_voltage;
++ adap->fe[0]->ops.set_voltage = dvbsky_usb_ci_set_voltage;
++
++ state->i2c_client_tuner = client_tuner;
++ state->i2c_client_ci = client_ci;
++ return ret;
++fail_ci_module:
++ i2c_unregister_device(client_ci);
++fail_ci_device:
++ module_put(client_tuner->dev.driver->owner);
++fail_tuner_module:
++ i2c_unregister_device(client_tuner);
++fail_tuner_device:
++ dvb_frontend_detach(adap->fe[0]);
++fail_attach:
++ return ret;
++}
++
++static int dvbsky_t680c_attach(struct dvb_usb_adapter *adap)
++{
++ struct dvbsky_state *state = adap_to_priv(adap);
++ struct dvb_usb_device *d = adap_to_d(adap);
++ int ret = 0;
++ struct i2c_adapter *i2c_adapter;
++ struct i2c_client *client_demod, *client_tuner, *client_ci;
++ struct i2c_board_info info;
++ struct si2168_config si2168_config;
++ struct si2157_config si2157_config;
++ struct sp2_config sp2_config;
++
++ /* attach demod */
++ memset(&si2168_config, 0, sizeof(si2168_config));
++ si2168_config.i2c_adapter = &i2c_adapter;
++ si2168_config.fe = &adap->fe[0];
++ si2168_config.ts_mode = SI2168_TS_PARALLEL;
++ memset(&info, 0, sizeof(struct i2c_board_info));
++ strlcpy(info.type, "si2168", I2C_NAME_SIZE);
++ info.addr = 0x64;
++ info.platform_data = &si2168_config;
++
++ request_module(info.type);
++ client_demod = i2c_new_device(&d->i2c_adap, &info);
++ if (client_demod == NULL ||
++ client_demod->dev.driver == NULL)
++ goto fail_demod_device;
++ if (!try_module_get(client_demod->dev.driver->owner))
++ goto fail_demod_module;
++
++ /* attach tuner */
++ memset(&si2157_config, 0, sizeof(si2157_config));
++ si2157_config.fe = adap->fe[0];
++ memset(&info, 0, sizeof(struct i2c_board_info));
++ strlcpy(info.type, "si2157", I2C_NAME_SIZE);
++ info.addr = 0x60;
++ info.platform_data = &si2157_config;
++
++ request_module(info.type);
++ client_tuner = i2c_new_device(i2c_adapter, &info);
++ if (client_tuner == NULL ||
++ client_tuner->dev.driver == NULL)
++ goto fail_tuner_device;
++ if (!try_module_get(client_tuner->dev.driver->owner))
++ goto fail_tuner_module;
++
++ /* attach ci controller */
++ memset(&sp2_config, 0, sizeof(sp2_config));
++ sp2_config.dvb_adap = &adap->dvb_adap;
++ sp2_config.priv = d;
++ sp2_config.ci_control = dvbsky_ci_ctrl;
++ memset(&info, 0, sizeof(struct i2c_board_info));
++ strlcpy(info.type, "sp2", I2C_NAME_SIZE);
++ info.addr = 0x40;
++ info.platform_data = &sp2_config;
++
++ request_module(info.type);
++ client_ci = i2c_new_device(&d->i2c_adap, &info);
++
++ if (client_ci == NULL || client_ci->dev.driver == NULL)
++ goto fail_ci_device;
++
++ if (!try_module_get(client_ci->dev.driver->owner))
++ goto fail_ci_module;
++
++ state->i2c_client_demod = client_demod;
++ state->i2c_client_tuner = client_tuner;
++ state->i2c_client_ci = client_ci;
++ return ret;
++fail_ci_module:
++ i2c_unregister_device(client_ci);
++fail_ci_device:
++ module_put(client_tuner->dev.driver->owner);
++fail_tuner_module:
++ i2c_unregister_device(client_tuner);
++fail_tuner_device:
++ module_put(client_demod->dev.driver->owner);
++fail_demod_module:
++ i2c_unregister_device(client_demod);
++fail_demod_device:
++ ret = -ENODEV;
++ return ret;
++}
++
++static int dvbsky_t330_attach(struct dvb_usb_adapter *adap)
++{
++ struct dvbsky_state *state = adap_to_priv(adap);
++ struct dvb_usb_device *d = adap_to_d(adap);
++ int ret = 0;
++ struct i2c_adapter *i2c_adapter;
++ struct i2c_client *client_demod, *client_tuner;
++ struct i2c_board_info info;
++ struct si2168_config si2168_config;
++ struct si2157_config si2157_config;
++
++ /* attach demod */
++ memset(&si2168_config, 0, sizeof(si2168_config));
++ si2168_config.i2c_adapter = &i2c_adapter;
++ si2168_config.fe = &adap->fe[0];
++ si2168_config.ts_mode = SI2168_TS_PARALLEL | 0x40;
++ memset(&info, 0, sizeof(struct i2c_board_info));
++ strlcpy(info.type, "si2168", I2C_NAME_SIZE);
++ info.addr = 0x64;
++ info.platform_data = &si2168_config;
++
++ request_module(info.type);
++ client_demod = i2c_new_device(&d->i2c_adap, &info);
++ if (client_demod == NULL ||
++ client_demod->dev.driver == NULL)
++ goto fail_demod_device;
++ if (!try_module_get(client_demod->dev.driver->owner))
++ goto fail_demod_module;
++
++ /* attach tuner */
++ memset(&si2157_config, 0, sizeof(si2157_config));
++ si2157_config.fe = adap->fe[0];
++ memset(&info, 0, sizeof(struct i2c_board_info));
++ strlcpy(info.type, "si2157", I2C_NAME_SIZE);
++ info.addr = 0x60;
++ info.platform_data = &si2157_config;
++
++ request_module(info.type);
++ client_tuner = i2c_new_device(i2c_adapter, &info);
++ if (client_tuner == NULL ||
++ client_tuner->dev.driver == NULL)
++ goto fail_tuner_device;
++ if (!try_module_get(client_tuner->dev.driver->owner))
++ goto fail_tuner_module;
++
++ state->i2c_client_demod = client_demod;
++ state->i2c_client_tuner = client_tuner;
++ return ret;
++fail_tuner_module:
++ i2c_unregister_device(client_tuner);
++fail_tuner_device:
++ module_put(client_demod->dev.driver->owner);
++fail_demod_module:
++ i2c_unregister_device(client_demod);
++fail_demod_device:
++ ret = -ENODEV;
++ return ret;
++}
++
++static int dvbsky_identify_state(struct dvb_usb_device *d, const char **name)
++{
++ dvbsky_gpio_ctrl(d, 0x04, 1);
++ msleep(20);
++ dvbsky_gpio_ctrl(d, 0x83, 0);
++ dvbsky_gpio_ctrl(d, 0xc0, 1);
++ msleep(100);
++ dvbsky_gpio_ctrl(d, 0x83, 1);
++ dvbsky_gpio_ctrl(d, 0xc0, 0);
++ msleep(50);
++
++ return WARM;
++}
++
++static int dvbsky_init(struct dvb_usb_device *d)
++{
++ struct dvbsky_state *state = d_to_priv(d);
++
++ /* use default interface */
++ /*
++ ret = usb_set_interface(d->udev, 0, 0);
++ if (ret)
++ return ret;
++ */
++ mutex_init(&state->stream_mutex);
++
++ state->last_lock = 0;
++
++ return 0;
++}
++
++static void dvbsky_exit(struct dvb_usb_device *d)
++{
++ struct dvbsky_state *state = d_to_priv(d);
++ struct i2c_client *client;
++
++ client = state->i2c_client_tuner;
++ /* remove I2C tuner */
++ if (client) {
++ module_put(client->dev.driver->owner);
++ i2c_unregister_device(client);
++ }
++ client = state->i2c_client_demod;
++ /* remove I2C demod */
++ if (client) {
++ module_put(client->dev.driver->owner);
++ i2c_unregister_device(client);
++ }
++ client = state->i2c_client_ci;
++ /* remove I2C ci */
++ if (client) {
++ module_put(client->dev.driver->owner);
++ i2c_unregister_device(client);
++ }
++}
++
++/* DVB USB Driver stuff */
++static struct dvb_usb_device_properties dvbsky_s960_props = {
++ .driver_name = KBUILD_MODNAME,
++ .owner = THIS_MODULE,
++ .adapter_nr = adapter_nr,
++ .size_of_priv = sizeof(struct dvbsky_state),
++
++ .generic_bulk_ctrl_endpoint = 0x01,
++ .generic_bulk_ctrl_endpoint_response = 0x81,
++ .generic_bulk_ctrl_delay = DVBSKY_MSG_DELAY,
++
++ .i2c_algo = &dvbsky_i2c_algo,
++ .frontend_attach = dvbsky_s960_attach,
++ .init = dvbsky_init,
++ .get_rc_config = dvbsky_get_rc_config,
++ .streaming_ctrl = dvbsky_streaming_ctrl,
++ .identify_state = dvbsky_identify_state,
++ .exit = dvbsky_exit,
++ .read_mac_address = dvbsky_read_mac_addr,
++
++ .num_adapters = 1,
++ .adapter = {
++ {
++ .stream = DVB_USB_STREAM_BULK(0x82, 8, 4096),
++ }
++ }
++};
++
++static struct dvb_usb_device_properties dvbsky_s960c_props = {
++ .driver_name = KBUILD_MODNAME,
++ .owner = THIS_MODULE,
++ .adapter_nr = adapter_nr,
++ .size_of_priv = sizeof(struct dvbsky_state),
++
++ .generic_bulk_ctrl_endpoint = 0x01,
++ .generic_bulk_ctrl_endpoint_response = 0x81,
++ .generic_bulk_ctrl_delay = DVBSKY_MSG_DELAY,
++
++ .i2c_algo = &dvbsky_i2c_algo,
++ .frontend_attach = dvbsky_s960c_attach,
++ .init = dvbsky_init,
++ .get_rc_config = dvbsky_get_rc_config,
++ .streaming_ctrl = dvbsky_streaming_ctrl,
++ .identify_state = dvbsky_identify_state,
++ .exit = dvbsky_exit,
++ .read_mac_address = dvbsky_read_mac_addr,
++
++ .num_adapters = 1,
++ .adapter = {
++ {
++ .stream = DVB_USB_STREAM_BULK(0x82, 8, 4096),
++ }
++ }
++};
++
++static struct dvb_usb_device_properties dvbsky_t680c_props = {
++ .driver_name = KBUILD_MODNAME,
++ .owner = THIS_MODULE,
++ .adapter_nr = adapter_nr,
++ .size_of_priv = sizeof(struct dvbsky_state),
++
++ .generic_bulk_ctrl_endpoint = 0x01,
++ .generic_bulk_ctrl_endpoint_response = 0x81,
++ .generic_bulk_ctrl_delay = DVBSKY_MSG_DELAY,
++
++ .i2c_algo = &dvbsky_i2c_algo,
++ .frontend_attach = dvbsky_t680c_attach,
++ .init = dvbsky_init,
++ .get_rc_config = dvbsky_get_rc_config,
++ .streaming_ctrl = dvbsky_streaming_ctrl,
++ .identify_state = dvbsky_identify_state,
++ .exit = dvbsky_exit,
++ .read_mac_address = dvbsky_read_mac_addr,
++
++ .num_adapters = 1,
++ .adapter = {
++ {
++ .stream = DVB_USB_STREAM_BULK(0x82, 8, 4096),
++ }
++ }
++};
++
++static struct dvb_usb_device_properties dvbsky_t330_props = {
++ .driver_name = KBUILD_MODNAME,
++ .owner = THIS_MODULE,
++ .adapter_nr = adapter_nr,
++ .size_of_priv = sizeof(struct dvbsky_state),
++
++ .generic_bulk_ctrl_endpoint = 0x01,
++ .generic_bulk_ctrl_endpoint_response = 0x81,
++ .generic_bulk_ctrl_delay = DVBSKY_MSG_DELAY,
++
++ .i2c_algo = &dvbsky_i2c_algo,
++ .frontend_attach = dvbsky_t330_attach,
++ .init = dvbsky_init,
++ .get_rc_config = dvbsky_get_rc_config,
++ .streaming_ctrl = dvbsky_streaming_ctrl,
++ .identify_state = dvbsky_identify_state,
++ .exit = dvbsky_exit,
++ .read_mac_address = dvbsky_read_mac_addr,
++
++ .num_adapters = 1,
++ .adapter = {
++ {
++ .stream = DVB_USB_STREAM_BULK(0x82, 8, 4096),
++ }
++ }
++};
++
++static const struct usb_device_id dvbsky_id_table[] = {
++ { DVB_USB_DEVICE(0x0572, 0x6831,
++ &dvbsky_s960_props, "DVBSky S960/S860", RC_MAP_DVBSKY) },
++ { DVB_USB_DEVICE(0x0572, 0x960c,
++ &dvbsky_s960c_props, "DVBSky S960CI", RC_MAP_DVBSKY) },
++ { DVB_USB_DEVICE(0x0572, 0x680c,
++ &dvbsky_t680c_props, "DVBSky T680CI", RC_MAP_DVBSKY) },
++ { DVB_USB_DEVICE(0x0572, 0x0320,
++ &dvbsky_t330_props, "DVBSky T330", RC_MAP_DVBSKY) },
++ { DVB_USB_DEVICE(USB_VID_TECHNOTREND,
++ USB_PID_TECHNOTREND_TVSTICK_CT2_4400,
++ &dvbsky_t330_props, "TechnoTrend TVStick CT2-4400",
++ RC_MAP_TT_1500) },
++ { DVB_USB_DEVICE(USB_VID_TECHNOTREND,
++ USB_PID_TECHNOTREND_CONNECT_CT2_4650_CI,
++ &dvbsky_t680c_props, "TechnoTrend TT-connect CT2-4650 CI",
++ RC_MAP_TT_1500) },
++ { }
++};
++MODULE_DEVICE_TABLE(usb, dvbsky_id_table);
++
++static struct usb_driver dvbsky_usb_driver = {
++ .name = KBUILD_MODNAME,
++ .id_table = dvbsky_id_table,
++ .probe = dvb_usbv2_probe,
++ .disconnect = dvb_usbv2_disconnect,
++ .suspend = dvb_usbv2_suspend,
++ .resume = dvb_usbv2_resume,
++ .reset_resume = dvb_usbv2_reset_resume,
++ .no_dynamic_id = 1,
++ .soft_unbind = 1,
++};
++
++module_usb_driver(dvbsky_usb_driver);
++
++MODULE_AUTHOR("Max nibble <nibble.max@gmail.com>");
++MODULE_DESCRIPTION("Driver for DVBSky USB");
++MODULE_LICENSE("GPL");
+diff -Nur linux-3.14.36/drivers/media/usb/dvb-usb-v2/Kconfig linux-openelec/drivers/media/usb/dvb-usb-v2/Kconfig
+--- linux-3.14.36/drivers/media/usb/dvb-usb-v2/Kconfig 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/media/usb/dvb-usb-v2/Kconfig 2015-07-24 18:03:30.152842002 -0500
+@@ -147,3 +147,12 @@
+ help
+ Say Y here to support the Realtek RTL28xxU DVB USB receiver.
+
++config DVB_USB_DVBSKY
++ tristate "DVBSky USB2.0 support"
++ depends on DVB_USB_V2
++ select DVB_DVBSKY_M88DS3103 if MEDIA_SUBDRV_AUTOSELECT
++ select DVB_SI2168 if MEDIA_SUBDRV_AUTOSELECT
++ select DVB_SP2 if MEDIA_SUBDRV_AUTOSELECT
++ select MEDIA_TUNER_SI2157 if MEDIA_SUBDRV_AUTOSELECT
++ help
++ Say Y here to support the USB receivers from DVBSky.
+diff -Nur linux-3.14.36/drivers/media/usb/dvb-usb-v2/Makefile linux-openelec/drivers/media/usb/dvb-usb-v2/Makefile
+--- linux-3.14.36/drivers/media/usb/dvb-usb-v2/Makefile 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/media/usb/dvb-usb-v2/Makefile 2015-07-24 18:03:30.128842002 -0500
+@@ -40,6 +40,9 @@
+ dvb-usb-rtl28xxu-objs := rtl28xxu.o
+ obj-$(CONFIG_DVB_USB_RTL28XXU) += dvb-usb-rtl28xxu.o
+
++dvb-usb-dvbsky-objs := dvbsky.o
++obj-$(CONFIG_DVB_USB_DVBSKY) += dvb-usb-dvbsky.o
++
+ ccflags-y += -I$(srctree)/drivers/media/dvb-core
+ ccflags-y += -I$(srctree)/drivers/media/dvb-frontends
+ ccflags-y += -I$(srctree)/drivers/media/tuners
+diff -Nur linux-3.14.36/drivers/media/usb/dvb-usb-v2/rtl28xxu.c linux-openelec/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
+--- linux-3.14.36/drivers/media/usb/dvb-usb-v2/rtl28xxu.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/media/usb/dvb-usb-v2/rtl28xxu.c 2015-07-24 18:03:30.060842002 -0500
+@@ -1441,6 +1441,8 @@
+ &rtl2832u_props, "Sveon STV20", NULL) },
+ { DVB_USB_DEVICE(USB_VID_KWORLD_2, USB_PID_SVEON_STV27,
+ &rtl2832u_props, "Sveon STV27", NULL) },
++ { DVB_USB_DEVICE(USB_VID_GTEK, 0xa803,
++ &rtl2832u_props, "Realtek RTL2832U reference design", NULL) },
+
+ /* RTL2832P devices: */
+ { DVB_USB_DEVICE(USB_VID_HANFTEK, 0x0131,
+diff -Nur linux-3.14.36/drivers/media/usb/em28xx/em28xx-cards.c linux-openelec/drivers/media/usb/em28xx/em28xx-cards.c
+--- linux-3.14.36/drivers/media/usb/em28xx/em28xx-cards.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/media/usb/em28xx/em28xx-cards.c 2015-07-24 18:03:30.156842002 -0500
+@@ -448,6 +448,18 @@
+ { -1, -1, -1, -1},
+ };
+
++static struct em28xx_reg_seq pctv_292e[] = {
++ {EM2874_R80_GPIO_P0_CTRL, 0xff, 0xff, 0},
++ {0x0d, 0xff, 0xff, 950},
++ {EM2874_R80_GPIO_P0_CTRL, 0xbd, 0xff, 100},
++ {EM2874_R80_GPIO_P0_CTRL, 0xfd, 0xff, 410},
++ {EM2874_R80_GPIO_P0_CTRL, 0x7d, 0xff, 300},
++ {EM2874_R80_GPIO_P0_CTRL, 0x7c, 0xff, 60},
++ {0x0d, 0x42, 0xff, 50},
++ {EM2874_R5F_TS_ENABLE, 0x85, 0xff, 0},
++ {-1, -1, -1, -1},
++};
++
+ /*
+ * Button definitions
+ */
+@@ -2157,6 +2169,17 @@
+ .has_dvb = 1,
+ .ir_codes = RC_MAP_PINNACLE_PCTV_HD,
+ },
++ /* 2013:025f PCTV tripleStick (292e).
++ * Empia EM28178, Silicon Labs Si2168, Silicon Labs Si2157 */
++ [EM28178_BOARD_PCTV_292E] = {
++ .name = "PCTV tripleStick (292e)",
++ .def_i2c_bus = 1,
++ .i2c_speed = EM28XX_I2C_CLK_WAIT_ENABLE | EM28XX_I2C_FREQ_400_KHZ,
++ .tuner_type = TUNER_ABSENT,
++ .tuner_gpio = pctv_292e,
++ .has_dvb = 1,
++ .ir_codes = RC_MAP_PINNACLE_PCTV_HD,
++ },
+ };
+ EXPORT_SYMBOL_GPL(em28xx_boards);
+
+@@ -2330,6 +2353,8 @@
+ .driver_info = EM2765_BOARD_SPEEDLINK_VAD_LAPLACE },
+ { USB_DEVICE(0x2013, 0x0258),
+ .driver_info = EM28178_BOARD_PCTV_461E },
++ { USB_DEVICE(0x2013, 0x025f),
++ .driver_info = EM28178_BOARD_PCTV_292E },
+ { },
+ };
+ MODULE_DEVICE_TABLE(usb, em28xx_id_table);
+diff -Nur linux-3.14.36/drivers/media/usb/em28xx/em28xx-dvb.c linux-openelec/drivers/media/usb/em28xx/em28xx-dvb.c
+--- linux-3.14.36/drivers/media/usb/em28xx/em28xx-dvb.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/media/usb/em28xx/em28xx-dvb.c 2015-07-24 18:03:30.156842002 -0500
+@@ -53,6 +53,8 @@
+ #include "mb86a20s.h"
+ #include "m88ds3103.h"
+ #include "m88ts2022.h"
++#include "si2168.h"
++#include "si2157.h"
+
+ MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@infradead.org>");
+ MODULE_LICENSE("GPL");
+@@ -91,6 +93,7 @@
+ struct semaphore pll_mutex;
+ bool dont_attach_fe1;
+ int lna_gpio;
++ struct i2c_client *i2c_client_demod;
+ struct i2c_client *i2c_client_tuner;
+ };
+
+@@ -719,6 +722,21 @@
+ #endif
+ }
+
++static int em28xx_pctv_292e_set_lna(struct dvb_frontend *fe)
++{
++ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
++ struct em28xx_i2c_bus *i2c_bus = fe->dvb->priv;
++ struct em28xx *dev = i2c_bus->dev;
++ u8 lna;
++
++ if (c->lna == 1)
++ lna = 0x01;
++ else
++ lna = 0x00;
++
++ return em28xx_write_reg_bits(dev, EM2874_R80_GPIO_P0_CTRL, lna, 0x01);
++}
++
+ static int em28xx_mt352_terratec_xs_init(struct dvb_frontend *fe)
+ {
+ /* Values extracted from a USB trace of the Terratec Windows driver */
+@@ -814,7 +832,9 @@
+ .clock = 27000000,
+ .i2c_wr_max = 33,
+ .clock_out = 0,
+- .ts_mode = M88DS3103_TS_PARALLEL_16,
++ .ts_mode = M88DS3103_TS_PARALLEL,
++ .ts_clk = 16000,
++ .ts_clk_pol = 1,
+ .agc = 0x99,
+ };
+
+@@ -1413,6 +1433,66 @@
+ }
+ }
+ break;
++ case EM28178_BOARD_PCTV_292E:
++ {
++ struct i2c_adapter *adapter;
++ struct i2c_client *client;
++ struct i2c_board_info info;
++ struct si2168_config si2168_config;
++ struct si2157_config si2157_config;
++
++ /* attach demod */
++ memset(&si2168_config, 0, sizeof(si2168_config));
++ si2168_config.i2c_adapter = &adapter;
++ si2168_config.fe = &dvb->fe[0];
++ si2168_config.ts_mode = SI2168_TS_PARALLEL;
++ memset(&info, 0, sizeof(struct i2c_board_info));
++ strlcpy(info.type, "si2168", I2C_NAME_SIZE);
++ info.addr = 0x64;
++ info.platform_data = &si2168_config;
++ request_module(info.type);
++ client = i2c_new_device(&dev->i2c_adap[dev->def_i2c_bus], &info);
++ if (client == NULL || client->dev.driver == NULL) {
++ result = -ENODEV;
++ goto out_free;
++ }
++
++ if (!try_module_get(client->dev.driver->owner)) {
++ i2c_unregister_device(client);
++ result = -ENODEV;
++ goto out_free;
++ }
++
++ dvb->i2c_client_demod = client;
++
++ /* attach tuner */
++ memset(&si2157_config, 0, sizeof(si2157_config));
++ si2157_config.fe = dvb->fe[0];
++ memset(&info, 0, sizeof(struct i2c_board_info));
++ strlcpy(info.type, "si2157", I2C_NAME_SIZE);
++ info.addr = 0x60;
++ info.platform_data = &si2157_config;
++ request_module(info.type);
++ client = i2c_new_device(adapter, &info);
++ if (client == NULL || client->dev.driver == NULL) {
++ module_put(dvb->i2c_client_demod->dev.driver->owner);
++ i2c_unregister_device(dvb->i2c_client_demod);
++ result = -ENODEV;
++ goto out_free;
++ }
++
++ if (!try_module_get(client->dev.driver->owner)) {
++ i2c_unregister_device(client);
++ module_put(dvb->i2c_client_demod->dev.driver->owner);
++ i2c_unregister_device(dvb->i2c_client_demod);
++ result = -ENODEV;
++ goto out_free;
++ }
++
++ dvb->i2c_client_tuner = client;
++ dvb->fe[0]->ops.set_lna = em28xx_pctv_292e_set_lna;
++ }
++ break;
+ default:
+ em28xx_errdev("/2: The frontend of your DVB/ATSC card"
+ " isn't supported yet\n");
+@@ -1485,6 +1565,10 @@
+ }
+
+ i2c_release_client(dvb->i2c_client_tuner);
++ /* remove I2C demod */
++ if (dvb->i2c_client_demod) {
++ i2c_unregister_device(dvb->i2c_client_demod);
++ }
+ em28xx_unregister_dvb(dvb);
+ kfree(dvb);
+ dev->dvb = NULL;
+diff -Nur linux-3.14.36/drivers/media/usb/em28xx/em28xx-dvb.c.orig linux-openelec/drivers/media/usb/em28xx/em28xx-dvb.c.orig
+--- linux-3.14.36/drivers/media/usb/em28xx/em28xx-dvb.c.orig 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/media/usb/em28xx/em28xx-dvb.c.orig 2015-07-24 18:03:30.152842002 -0500
+@@ -0,0 +1,1516 @@
++/*
++ DVB device driver for em28xx
++
++ (c) 2008-2011 Mauro Carvalho Chehab <mchehab@infradead.org>
++
++ (c) 2008 Devin Heitmueller <devin.heitmueller@gmail.com>
++ - Fixes for the driver to properly work with HVR-950
++ - Fixes for the driver to properly work with Pinnacle PCTV HD Pro Stick
++ - Fixes for the driver to properly work with AMD ATI TV Wonder HD 600
++
++ (c) 2008 Aidan Thornton <makosoft@googlemail.com>
++
++ (c) 2012 Frank Schäfer <fschaefer.oss@googlemail.com>
++
++ Based on cx88-dvb, saa7134-dvb and videobuf-dvb originally written by:
++ (c) 2004, 2005 Chris Pascoe <c.pascoe@itee.uq.edu.au>
++ (c) 2004 Gerd Knorr <kraxel@bytesex.org> [SuSE Labs]
++
++ This program is free software; you can redistribute it and/or modify
++ it under the terms of the GNU General Public License as published by
++ the Free Software Foundation; either version 2 of the License.
++ */
++
++#include <linux/kernel.h>
++#include <linux/slab.h>
++#include <linux/usb.h>
++
++#include "em28xx.h"
++#include <media/v4l2-common.h>
++#include <dvb_demux.h>
++#include <dvb_net.h>
++#include <dmxdev.h>
++#include <media/tuner.h>
++#include "tuner-simple.h"
++#include <linux/gpio.h>
++
++#include "lgdt330x.h"
++#include "lgdt3305.h"
++#include "zl10353.h"
++#include "s5h1409.h"
++#include "mt352.h"
++#include "mt352_priv.h" /* FIXME */
++#include "tda1002x.h"
++#include "tda18271.h"
++#include "s921.h"
++#include "drxd.h"
++#include "cxd2820r.h"
++#include "tda18271c2dd.h"
++#include "drxk.h"
++#include "tda10071.h"
++#include "a8293.h"
++#include "qt1010.h"
++#include "mb86a20s.h"
++#include "m88ds3103.h"
++#include "m88ts2022.h"
++
++MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@infradead.org>");
++MODULE_LICENSE("GPL");
++MODULE_DESCRIPTION(DRIVER_DESC " - digital TV interface");
++MODULE_VERSION(EM28XX_VERSION);
++
++
++static unsigned int debug;
++module_param(debug, int, 0644);
++MODULE_PARM_DESC(debug, "enable debug messages [dvb]");
++
++DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
++
++#define dprintk(level, fmt, arg...) do { \
++if (debug >= level) \
++ printk(KERN_DEBUG "%s/2-dvb: " fmt, dev->name, ## arg); \
++} while (0)
++
++struct em28xx_dvb {
++ struct dvb_frontend *fe[2];
++
++ /* feed count management */
++ struct mutex lock;
++ int nfeeds;
++
++ /* general boilerplate stuff */
++ struct dvb_adapter adapter;
++ struct dvb_demux demux;
++ struct dmxdev dmxdev;
++ struct dmx_frontend fe_hw;
++ struct dmx_frontend fe_mem;
++ struct dvb_net net;
++
++ /* Due to DRX-K - probably need changes */
++ int (*gate_ctrl)(struct dvb_frontend *, int);
++ struct semaphore pll_mutex;
++ bool dont_attach_fe1;
++ int lna_gpio;
++ struct i2c_client *i2c_client_tuner;
++};
++
++
++static inline void print_err_status(struct em28xx *dev,
++ int packet, int status)
++{
++ char *errmsg = "Unknown";
++
++ switch (status) {
++ case -ENOENT:
++ errmsg = "unlinked synchronuously";
++ break;
++ case -ECONNRESET:
++ errmsg = "unlinked asynchronuously";
++ break;
++ case -ENOSR:
++ errmsg = "Buffer error (overrun)";
++ break;
++ case -EPIPE:
++ errmsg = "Stalled (device not responding)";
++ break;
++ case -EOVERFLOW:
++ errmsg = "Babble (bad cable?)";
++ break;
++ case -EPROTO:
++ errmsg = "Bit-stuff error (bad cable?)";
++ break;
++ case -EILSEQ:
++ errmsg = "CRC/Timeout (could be anything)";
++ break;
++ case -ETIME:
++ errmsg = "Device does not respond";
++ break;
++ }
++ if (packet < 0) {
++ dprintk(1, "URB status %d [%s].\n", status, errmsg);
++ } else {
++ dprintk(1, "URB packet %d, status %d [%s].\n",
++ packet, status, errmsg);
++ }
++}
++
++static inline int em28xx_dvb_urb_data_copy(struct em28xx *dev, struct urb *urb)
++{
++ int xfer_bulk, num_packets, i;
++
++ if (!dev)
++ return 0;
++
++ if (dev->disconnected)
++ return 0;
++
++ if (urb->status < 0)
++ print_err_status(dev, -1, urb->status);
++
++ xfer_bulk = usb_pipebulk(urb->pipe);
++
++ if (xfer_bulk) /* bulk */
++ num_packets = 1;
++ else /* isoc */
++ num_packets = urb->number_of_packets;
++
++ for (i = 0; i < num_packets; i++) {
++ if (xfer_bulk) {
++ if (urb->status < 0) {
++ print_err_status(dev, i, urb->status);
++ if (urb->status != -EPROTO)
++ continue;
++ }
++ dvb_dmx_swfilter(&dev->dvb->demux, urb->transfer_buffer,
++ urb->actual_length);
++ } else {
++ if (urb->iso_frame_desc[i].status < 0) {
++ print_err_status(dev, i,
++ urb->iso_frame_desc[i].status);
++ if (urb->iso_frame_desc[i].status != -EPROTO)
++ continue;
++ }
++ dvb_dmx_swfilter(&dev->dvb->demux,
++ urb->transfer_buffer +
++ urb->iso_frame_desc[i].offset,
++ urb->iso_frame_desc[i].actual_length);
++ }
++ }
++
++ return 0;
++}
++
++static int em28xx_start_streaming(struct em28xx_dvb *dvb)
++{
++ int rc;
++ struct em28xx_i2c_bus *i2c_bus = dvb->adapter.priv;
++ struct em28xx *dev = i2c_bus->dev;
++ int dvb_max_packet_size, packet_multiplier, dvb_alt;
++
++ if (dev->dvb_xfer_bulk) {
++ if (!dev->dvb_ep_bulk)
++ return -ENODEV;
++ dvb_max_packet_size = 512; /* USB 2.0 spec */
++ packet_multiplier = EM28XX_DVB_BULK_PACKET_MULTIPLIER;
++ dvb_alt = 0;
++ } else { /* isoc */
++ if (!dev->dvb_ep_isoc)
++ return -ENODEV;
++ dvb_max_packet_size = dev->dvb_max_pkt_size_isoc;
++ if (dvb_max_packet_size < 0)
++ return dvb_max_packet_size;
++ packet_multiplier = EM28XX_DVB_NUM_ISOC_PACKETS;
++ dvb_alt = dev->dvb_alt_isoc;
++ }
++
++ usb_set_interface(dev->udev, dev->ifnum, dvb_alt);
++ rc = em28xx_set_mode(dev, EM28XX_DIGITAL_MODE);
++ if (rc < 0)
++ return rc;
++
++ dprintk(1, "Using %d buffers each with %d x %d bytes\n",
++ EM28XX_DVB_NUM_BUFS,
++ packet_multiplier,
++ dvb_max_packet_size);
++
++ return em28xx_init_usb_xfer(dev, EM28XX_DIGITAL_MODE,
++ dev->dvb_xfer_bulk,
++ EM28XX_DVB_NUM_BUFS,
++ dvb_max_packet_size,
++ packet_multiplier,
++ em28xx_dvb_urb_data_copy);
++}
++
++static int em28xx_stop_streaming(struct em28xx_dvb *dvb)
++{
++ struct em28xx_i2c_bus *i2c_bus = dvb->adapter.priv;
++ struct em28xx *dev = i2c_bus->dev;
++
++ em28xx_stop_urbs(dev);
++
++ return 0;
++}
++
++static int em28xx_start_feed(struct dvb_demux_feed *feed)
++{
++ struct dvb_demux *demux = feed->demux;
++ struct em28xx_dvb *dvb = demux->priv;
++ int rc, ret;
++
++ if (!demux->dmx.frontend)
++ return -EINVAL;
++
++ mutex_lock(&dvb->lock);
++ dvb->nfeeds++;
++ rc = dvb->nfeeds;
++
++ if (dvb->nfeeds == 1) {
++ ret = em28xx_start_streaming(dvb);
++ if (ret < 0)
++ rc = ret;
++ }
++
++ mutex_unlock(&dvb->lock);
++ return rc;
++}
++
++static int em28xx_stop_feed(struct dvb_demux_feed *feed)
++{
++ struct dvb_demux *demux = feed->demux;
++ struct em28xx_dvb *dvb = demux->priv;
++ int err = 0;
++
++ mutex_lock(&dvb->lock);
++ dvb->nfeeds--;
++
++ if (0 == dvb->nfeeds)
++ err = em28xx_stop_streaming(dvb);
++
++ mutex_unlock(&dvb->lock);
++ return err;
++}
++
++
++
++/* ------------------------------------------------------------------ */
++static int em28xx_dvb_bus_ctrl(struct dvb_frontend *fe, int acquire)
++{
++ struct em28xx_i2c_bus *i2c_bus = fe->dvb->priv;
++ struct em28xx *dev = i2c_bus->dev;
++
++ if (acquire)
++ return em28xx_set_mode(dev, EM28XX_DIGITAL_MODE);
++ else
++ return em28xx_set_mode(dev, EM28XX_SUSPEND);
++}
++
++/* ------------------------------------------------------------------ */
++
++static struct lgdt330x_config em2880_lgdt3303_dev = {
++ .demod_address = 0x0e,
++ .demod_chip = LGDT3303,
++};
++
++static struct lgdt3305_config em2870_lgdt3304_dev = {
++ .i2c_addr = 0x0e,
++ .demod_chip = LGDT3304,
++ .spectral_inversion = 1,
++ .deny_i2c_rptr = 1,
++ .mpeg_mode = LGDT3305_MPEG_PARALLEL,
++ .tpclk_edge = LGDT3305_TPCLK_FALLING_EDGE,
++ .tpvalid_polarity = LGDT3305_TP_VALID_HIGH,
++ .vsb_if_khz = 3250,
++ .qam_if_khz = 4000,
++};
++
++static struct lgdt3305_config em2874_lgdt3305_dev = {
++ .i2c_addr = 0x0e,
++ .demod_chip = LGDT3305,
++ .spectral_inversion = 1,
++ .deny_i2c_rptr = 0,
++ .mpeg_mode = LGDT3305_MPEG_SERIAL,
++ .tpclk_edge = LGDT3305_TPCLK_FALLING_EDGE,
++ .tpvalid_polarity = LGDT3305_TP_VALID_HIGH,
++ .vsb_if_khz = 3250,
++ .qam_if_khz = 4000,
++};
++
++static struct s921_config sharp_isdbt = {
++ .demod_address = 0x30 >> 1
++};
++
++static struct zl10353_config em28xx_zl10353_with_xc3028 = {
++ .demod_address = (0x1e >> 1),
++ .no_tuner = 1,
++ .parallel_ts = 1,
++ .if2 = 45600,
++};
++
++static struct s5h1409_config em28xx_s5h1409_with_xc3028 = {
++ .demod_address = 0x32 >> 1,
++ .output_mode = S5H1409_PARALLEL_OUTPUT,
++ .gpio = S5H1409_GPIO_OFF,
++ .inversion = S5H1409_INVERSION_OFF,
++ .status_mode = S5H1409_DEMODLOCKING,
++ .mpeg_timing = S5H1409_MPEGTIMING_CONTINOUS_NONINVERTING_CLOCK
++};
++
++static struct tda18271_std_map kworld_a340_std_map = {
++ .atsc_6 = { .if_freq = 3250, .agc_mode = 3, .std = 0,
++ .if_lvl = 1, .rfagc_top = 0x37, },
++ .qam_6 = { .if_freq = 4000, .agc_mode = 3, .std = 1,
++ .if_lvl = 1, .rfagc_top = 0x37, },
++};
++
++static struct tda18271_config kworld_a340_config = {
++ .std_map = &kworld_a340_std_map,
++};
++
++static struct tda18271_config kworld_ub435q_v2_config = {
++ .std_map = &kworld_a340_std_map,
++ .gate = TDA18271_GATE_DIGITAL,
++};
++
++static struct zl10353_config em28xx_zl10353_xc3028_no_i2c_gate = {
++ .demod_address = (0x1e >> 1),
++ .no_tuner = 1,
++ .disable_i2c_gate_ctrl = 1,
++ .parallel_ts = 1,
++ .if2 = 45600,
++};
++
++static struct drxd_config em28xx_drxd = {
++ .demod_address = 0x70,
++ .demod_revision = 0xa2,
++ .pll_type = DRXD_PLL_NONE,
++ .clock = 12000,
++ .insert_rs_byte = 1,
++ .IF = 42800000,
++ .disable_i2c_gate_ctrl = 1,
++};
++
++static struct drxk_config terratec_h5_drxk = {
++ .adr = 0x29,
++ .single_master = 1,
++ .no_i2c_bridge = 1,
++ .microcode_name = "dvb-usb-terratec-h5-drxk.fw",
++ .qam_demod_parameter_count = 2,
++};
++
++static struct drxk_config hauppauge_930c_drxk = {
++ .adr = 0x29,
++ .single_master = 1,
++ .no_i2c_bridge = 1,
++ .microcode_name = "dvb-usb-hauppauge-hvr930c-drxk.fw",
++ .chunk_size = 56,
++ .qam_demod_parameter_count = 2,
++};
++
++static struct drxk_config terratec_htc_stick_drxk = {
++ .adr = 0x29,
++ .single_master = 1,
++ .no_i2c_bridge = 1,
++ .microcode_name = "dvb-usb-terratec-htc-stick-drxk.fw",
++ .chunk_size = 54,
++ .qam_demod_parameter_count = 2,
++ /* Required for the antenna_gpio to disable LNA. */
++ .antenna_dvbt = true,
++ /* The windows driver uses the same. This will disable LNA. */
++ .antenna_gpio = 0x6,
++};
++
++static struct drxk_config maxmedia_ub425_tc_drxk = {
++ .adr = 0x29,
++ .single_master = 1,
++ .no_i2c_bridge = 1,
++ .microcode_name = "dvb-demod-drxk-01.fw",
++ .chunk_size = 62,
++ .qam_demod_parameter_count = 2,
++};
++
++static struct drxk_config pctv_520e_drxk = {
++ .adr = 0x29,
++ .single_master = 1,
++ .microcode_name = "dvb-demod-drxk-pctv.fw",
++ .qam_demod_parameter_count = 2,
++ .chunk_size = 58,
++ .antenna_dvbt = true, /* disable LNA */
++ .antenna_gpio = (1 << 2), /* disable LNA */
++};
++
++static int drxk_gate_ctrl(struct dvb_frontend *fe, int enable)
++{
++ struct em28xx_dvb *dvb = fe->sec_priv;
++ int status;
++
++ if (!dvb)
++ return -EINVAL;
++
++ if (enable) {
++ down(&dvb->pll_mutex);
++ status = dvb->gate_ctrl(fe, 1);
++ } else {
++ status = dvb->gate_ctrl(fe, 0);
++ up(&dvb->pll_mutex);
++ }
++ return status;
++}
++
++static void hauppauge_hvr930c_init(struct em28xx *dev)
++{
++ int i;
++
++ struct em28xx_reg_seq hauppauge_hvr930c_init[] = {
++ {EM2874_R80_GPIO_P0_CTRL, 0xff, 0xff, 0x65},
++ {EM2874_R80_GPIO_P0_CTRL, 0xfb, 0xff, 0x32},
++ {EM2874_R80_GPIO_P0_CTRL, 0xff, 0xff, 0xb8},
++ { -1, -1, -1, -1},
++ };
++ struct em28xx_reg_seq hauppauge_hvr930c_end[] = {
++ {EM2874_R80_GPIO_P0_CTRL, 0xef, 0xff, 0x01},
++ {EM2874_R80_GPIO_P0_CTRL, 0xaf, 0xff, 0x65},
++ {EM2874_R80_GPIO_P0_CTRL, 0xef, 0xff, 0x76},
++ {EM2874_R80_GPIO_P0_CTRL, 0xef, 0xff, 0x01},
++ {EM2874_R80_GPIO_P0_CTRL, 0xcf, 0xff, 0x0b},
++ {EM2874_R80_GPIO_P0_CTRL, 0xef, 0xff, 0x40},
++
++ {EM2874_R80_GPIO_P0_CTRL, 0xcf, 0xff, 0x65},
++ {EM2874_R80_GPIO_P0_CTRL, 0xef, 0xff, 0x65},
++ {EM2874_R80_GPIO_P0_CTRL, 0xcf, 0xff, 0x0b},
++ {EM2874_R80_GPIO_P0_CTRL, 0xef, 0xff, 0x65},
++
++ { -1, -1, -1, -1},
++ };
++
++ struct {
++ unsigned char r[4];
++ int len;
++ } regs[] = {
++ {{ 0x06, 0x02, 0x00, 0x31 }, 4},
++ {{ 0x01, 0x02 }, 2},
++ {{ 0x01, 0x02, 0x00, 0xc6 }, 4},
++ {{ 0x01, 0x00 }, 2},
++ {{ 0x01, 0x00, 0xff, 0xaf }, 4},
++ {{ 0x01, 0x00, 0x03, 0xa0 }, 4},
++ {{ 0x01, 0x00 }, 2},
++ {{ 0x01, 0x00, 0x73, 0xaf }, 4},
++ {{ 0x04, 0x00 }, 2},
++ {{ 0x00, 0x04 }, 2},
++ {{ 0x00, 0x04, 0x00, 0x0a }, 4},
++ {{ 0x04, 0x14 }, 2},
++ {{ 0x04, 0x14, 0x00, 0x00 }, 4},
++ };
++
++ em28xx_gpio_set(dev, hauppauge_hvr930c_init);
++ em28xx_write_reg(dev, EM28XX_R06_I2C_CLK, 0x40);
++ msleep(10);
++ em28xx_write_reg(dev, EM28XX_R06_I2C_CLK, 0x44);
++ msleep(10);
++
++ dev->i2c_client[dev->def_i2c_bus].addr = 0x82 >> 1;
++
++ for (i = 0; i < ARRAY_SIZE(regs); i++)
++ i2c_master_send(&dev->i2c_client[dev->def_i2c_bus], regs[i].r, regs[i].len);
++ em28xx_gpio_set(dev, hauppauge_hvr930c_end);
++
++ msleep(100);
++
++ em28xx_write_reg(dev, EM28XX_R06_I2C_CLK, 0x44);
++ msleep(30);
++
++ em28xx_write_reg(dev, EM28XX_R06_I2C_CLK, 0x45);
++ msleep(10);
++
++}
++
++static void terratec_h5_init(struct em28xx *dev)
++{
++ int i;
++ struct em28xx_reg_seq terratec_h5_init[] = {
++ {EM2820_R08_GPIO_CTRL, 0xff, 0xff, 10},
++ {EM2874_R80_GPIO_P0_CTRL, 0xf6, 0xff, 100},
++ {EM2874_R80_GPIO_P0_CTRL, 0xf2, 0xff, 50},
++ {EM2874_R80_GPIO_P0_CTRL, 0xf6, 0xff, 100},
++ { -1, -1, -1, -1},
++ };
++ struct em28xx_reg_seq terratec_h5_end[] = {
++ {EM2874_R80_GPIO_P0_CTRL, 0xe6, 0xff, 100},
++ {EM2874_R80_GPIO_P0_CTRL, 0xa6, 0xff, 50},
++ {EM2874_R80_GPIO_P0_CTRL, 0xe6, 0xff, 100},
++ { -1, -1, -1, -1},
++ };
++ struct {
++ unsigned char r[4];
++ int len;
++ } regs[] = {
++ {{ 0x06, 0x02, 0x00, 0x31 }, 4},
++ {{ 0x01, 0x02 }, 2},
++ {{ 0x01, 0x02, 0x00, 0xc6 }, 4},
++ {{ 0x01, 0x00 }, 2},
++ {{ 0x01, 0x00, 0xff, 0xaf }, 4},
++ {{ 0x01, 0x00, 0x03, 0xa0 }, 4},
++ {{ 0x01, 0x00 }, 2},
++ {{ 0x01, 0x00, 0x73, 0xaf }, 4},
++ {{ 0x04, 0x00 }, 2},
++ {{ 0x00, 0x04 }, 2},
++ {{ 0x00, 0x04, 0x00, 0x0a }, 4},
++ {{ 0x04, 0x14 }, 2},
++ {{ 0x04, 0x14, 0x00, 0x00 }, 4},
++ };
++
++ em28xx_gpio_set(dev, terratec_h5_init);
++ em28xx_write_reg(dev, EM28XX_R06_I2C_CLK, 0x40);
++ msleep(10);
++ em28xx_write_reg(dev, EM28XX_R06_I2C_CLK, 0x45);
++ msleep(10);
++
++ dev->i2c_client[dev->def_i2c_bus].addr = 0x82 >> 1;
++
++ for (i = 0; i < ARRAY_SIZE(regs); i++)
++ i2c_master_send(&dev->i2c_client[dev->def_i2c_bus], regs[i].r, regs[i].len);
++ em28xx_gpio_set(dev, terratec_h5_end);
++};
++
++static void terratec_htc_stick_init(struct em28xx *dev)
++{
++ int i;
++
++ /*
++ * GPIO configuration:
++ * 0xff: unknown (does not affect DVB-T).
++ * 0xf6: DRX-K (demodulator).
++ * 0xe6: unknown (does not affect DVB-T).
++ * 0xb6: unknown (does not affect DVB-T).
++ */
++ struct em28xx_reg_seq terratec_htc_stick_init[] = {
++ {EM2820_R08_GPIO_CTRL, 0xff, 0xff, 10},
++ {EM2874_R80_GPIO_P0_CTRL, 0xf6, 0xff, 100},
++ {EM2874_R80_GPIO_P0_CTRL, 0xe6, 0xff, 50},
++ {EM2874_R80_GPIO_P0_CTRL, 0xf6, 0xff, 100},
++ { -1, -1, -1, -1},
++ };
++ struct em28xx_reg_seq terratec_htc_stick_end[] = {
++ {EM2874_R80_GPIO_P0_CTRL, 0xb6, 0xff, 100},
++ {EM2874_R80_GPIO_P0_CTRL, 0xf6, 0xff, 50},
++ { -1, -1, -1, -1},
++ };
++
++ /*
++ * Init the analog decoder (not yet supported), but
++ * it's probably still a good idea.
++ */
++ struct {
++ unsigned char r[4];
++ int len;
++ } regs[] = {
++ {{ 0x06, 0x02, 0x00, 0x31 }, 4},
++ {{ 0x01, 0x02 }, 2},
++ {{ 0x01, 0x02, 0x00, 0xc6 }, 4},
++ {{ 0x01, 0x00 }, 2},
++ {{ 0x01, 0x00, 0xff, 0xaf }, 4},
++ };
++
++ em28xx_gpio_set(dev, terratec_htc_stick_init);
++
++ em28xx_write_reg(dev, EM28XX_R06_I2C_CLK, 0x40);
++ msleep(10);
++ em28xx_write_reg(dev, EM28XX_R06_I2C_CLK, 0x44);
++ msleep(10);
++
++ dev->i2c_client[dev->def_i2c_bus].addr = 0x82 >> 1;
++
++ for (i = 0; i < ARRAY_SIZE(regs); i++)
++ i2c_master_send(&dev->i2c_client[dev->def_i2c_bus], regs[i].r, regs[i].len);
++
++ em28xx_gpio_set(dev, terratec_htc_stick_end);
++};
++
++static void terratec_htc_usb_xs_init(struct em28xx *dev)
++{
++ int i;
++
++ struct em28xx_reg_seq terratec_htc_usb_xs_init[] = {
++ {EM2820_R08_GPIO_CTRL, 0xff, 0xff, 10},
++ {EM2874_R80_GPIO_P0_CTRL, 0xb2, 0xff, 100},
++ {EM2874_R80_GPIO_P0_CTRL, 0xb2, 0xff, 50},
++ {EM2874_R80_GPIO_P0_CTRL, 0xb6, 0xff, 100},
++ { -1, -1, -1, -1},
++ };
++ struct em28xx_reg_seq terratec_htc_usb_xs_end[] = {
++ {EM2874_R80_GPIO_P0_CTRL, 0xa6, 0xff, 100},
++ {EM2874_R80_GPIO_P0_CTRL, 0xa6, 0xff, 50},
++ {EM2874_R80_GPIO_P0_CTRL, 0xe6, 0xff, 100},
++ { -1, -1, -1, -1},
++ };
++
++ /*
++ * Init the analog decoder (not yet supported), but
++ * it's probably still a good idea.
++ */
++ struct {
++ unsigned char r[4];
++ int len;
++ } regs[] = {
++ {{ 0x06, 0x02, 0x00, 0x31 }, 4},
++ {{ 0x01, 0x02 }, 2},
++ {{ 0x01, 0x02, 0x00, 0xc6 }, 4},
++ {{ 0x01, 0x00 }, 2},
++ {{ 0x01, 0x00, 0xff, 0xaf }, 4},
++ {{ 0x01, 0x00, 0x03, 0xa0 }, 4},
++ {{ 0x01, 0x00 }, 2},
++ {{ 0x01, 0x00, 0x73, 0xaf }, 4},
++ {{ 0x04, 0x00 }, 2},
++ {{ 0x00, 0x04 }, 2},
++ {{ 0x00, 0x04, 0x00, 0x0a }, 4},
++ {{ 0x04, 0x14 }, 2},
++ {{ 0x04, 0x14, 0x00, 0x00 }, 4},
++ };
++
++ em28xx_write_reg(dev, EM28XX_R06_I2C_CLK, 0x40);
++
++ em28xx_gpio_set(dev, terratec_htc_usb_xs_init);
++
++ em28xx_write_reg(dev, EM28XX_R06_I2C_CLK, 0x40);
++ msleep(10);
++ em28xx_write_reg(dev, EM28XX_R06_I2C_CLK, 0x44);
++ msleep(10);
++
++ dev->i2c_client[dev->def_i2c_bus].addr = 0x82 >> 1;
++
++ for (i = 0; i < ARRAY_SIZE(regs); i++)
++ i2c_master_send(&dev->i2c_client[dev->def_i2c_bus], regs[i].r, regs[i].len);
++
++ em28xx_gpio_set(dev, terratec_htc_usb_xs_end);
++};
++
++static void pctv_520e_init(struct em28xx *dev)
++{
++ /*
++ * Init AVF4910B analog decoder. Looks like I2C traffic to
++ * digital demodulator and tuner are routed via AVF4910B.
++ */
++ int i;
++ struct {
++ unsigned char r[4];
++ int len;
++ } regs[] = {
++ {{ 0x06, 0x02, 0x00, 0x31 }, 4},
++ {{ 0x01, 0x02 }, 2},
++ {{ 0x01, 0x02, 0x00, 0xc6 }, 4},
++ {{ 0x01, 0x00 }, 2},
++ {{ 0x01, 0x00, 0xff, 0xaf }, 4},
++ {{ 0x01, 0x00, 0x03, 0xa0 }, 4},
++ {{ 0x01, 0x00 }, 2},
++ {{ 0x01, 0x00, 0x73, 0xaf }, 4},
++ };
++
++ dev->i2c_client[dev->def_i2c_bus].addr = 0x82 >> 1; /* 0x41 */
++
++ for (i = 0; i < ARRAY_SIZE(regs); i++)
++ i2c_master_send(&dev->i2c_client[dev->def_i2c_bus], regs[i].r, regs[i].len);
++};
++
++static int em28xx_pctv_290e_set_lna(struct dvb_frontend *fe)
++{
++ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
++ struct em28xx_i2c_bus *i2c_bus = fe->dvb->priv;
++ struct em28xx *dev = i2c_bus->dev;
++#ifdef CONFIG_GPIOLIB
++ struct em28xx_dvb *dvb = dev->dvb;
++ int ret;
++ unsigned long flags;
++
++ if (c->lna == 1)
++ flags = GPIOF_OUT_INIT_HIGH; /* enable LNA */
++ else
++ flags = GPIOF_OUT_INIT_LOW; /* disable LNA */
++
++ ret = gpio_request_one(dvb->lna_gpio, flags, NULL);
++ if (ret)
++ em28xx_errdev("gpio request failed %d\n", ret);
++ else
++ gpio_free(dvb->lna_gpio);
++
++ return ret;
++#else
++ dev_warn(&dev->udev->dev, "%s: LNA control is disabled (lna=%u)\n",
++ KBUILD_MODNAME, c->lna);
++ return 0;
++#endif
++}
++
++static int em28xx_mt352_terratec_xs_init(struct dvb_frontend *fe)
++{
++ /* Values extracted from a USB trace of the Terratec Windows driver */
++ static u8 clock_config[] = { CLOCK_CTL, 0x38, 0x2c };
++ static u8 reset[] = { RESET, 0x80 };
++ static u8 adc_ctl_1_cfg[] = { ADC_CTL_1, 0x40 };
++ static u8 agc_cfg[] = { AGC_TARGET, 0x28, 0xa0 };
++ static u8 input_freq_cfg[] = { INPUT_FREQ_1, 0x31, 0xb8 };
++ static u8 rs_err_cfg[] = { RS_ERR_PER_1, 0x00, 0x4d };
++ static u8 capt_range_cfg[] = { CAPT_RANGE, 0x32 };
++ static u8 trl_nom_cfg[] = { TRL_NOMINAL_RATE_1, 0x64, 0x00 };
++ static u8 tps_given_cfg[] = { TPS_GIVEN_1, 0x40, 0x80, 0x50 };
++ static u8 tuner_go[] = { TUNER_GO, 0x01};
++
++ mt352_write(fe, clock_config, sizeof(clock_config));
++ udelay(200);
++ mt352_write(fe, reset, sizeof(reset));
++ mt352_write(fe, adc_ctl_1_cfg, sizeof(adc_ctl_1_cfg));
++ mt352_write(fe, agc_cfg, sizeof(agc_cfg));
++ mt352_write(fe, input_freq_cfg, sizeof(input_freq_cfg));
++ mt352_write(fe, rs_err_cfg, sizeof(rs_err_cfg));
++ mt352_write(fe, capt_range_cfg, sizeof(capt_range_cfg));
++ mt352_write(fe, trl_nom_cfg, sizeof(trl_nom_cfg));
++ mt352_write(fe, tps_given_cfg, sizeof(tps_given_cfg));
++ mt352_write(fe, tuner_go, sizeof(tuner_go));
++ return 0;
++}
++
++static struct mt352_config terratec_xs_mt352_cfg = {
++ .demod_address = (0x1e >> 1),
++ .no_tuner = 1,
++ .if2 = 45600,
++ .demod_init = em28xx_mt352_terratec_xs_init,
++};
++
++static struct tda10023_config em28xx_tda10023_config = {
++ .demod_address = 0x0c,
++ .invert = 1,
++};
++
++static struct cxd2820r_config em28xx_cxd2820r_config = {
++ .i2c_address = (0xd8 >> 1),
++ .ts_mode = CXD2820R_TS_SERIAL,
++};
++
++static struct tda18271_config em28xx_cxd2820r_tda18271_config = {
++ .output_opt = TDA18271_OUTPUT_LT_OFF,
++ .gate = TDA18271_GATE_DIGITAL,
++};
++
++static const struct tda10071_config em28xx_tda10071_config = {
++ .demod_i2c_addr = 0x55, /* (0xaa >> 1) */
++ .tuner_i2c_addr = 0x14,
++ .i2c_wr_max = 64,
++ .ts_mode = TDA10071_TS_SERIAL,
++ .spec_inv = 0,
++ .xtal = 40444000, /* 40.444 MHz */
++ .pll_multiplier = 20,
++};
++
++static const struct a8293_config em28xx_a8293_config = {
++ .i2c_addr = 0x08, /* (0x10 >> 1) */
++};
++
++static struct zl10353_config em28xx_zl10353_no_i2c_gate_dev = {
++ .demod_address = (0x1e >> 1),
++ .disable_i2c_gate_ctrl = 1,
++ .no_tuner = 1,
++ .parallel_ts = 1,
++};
++static struct qt1010_config em28xx_qt1010_config = {
++ .i2c_address = 0x62
++};
++
++static const struct mb86a20s_config c3tech_duo_mb86a20s_config = {
++ .demod_address = 0x10,
++ .is_serial = true,
++};
++
++static struct tda18271_std_map mb86a20s_tda18271_config = {
++ .dvbt_6 = { .if_freq = 4000, .agc_mode = 3, .std = 4,
++ .if_lvl = 1, .rfagc_top = 0x37, },
++};
++
++static struct tda18271_config c3tech_duo_tda18271_config = {
++ .std_map = &mb86a20s_tda18271_config,
++ .gate = TDA18271_GATE_DIGITAL,
++ .small_i2c = TDA18271_03_BYTE_CHUNK_INIT,
++};
++
++static const struct m88ds3103_config pctv_461e_m88ds3103_config = {
++ .i2c_addr = 0x68,
++ .clock = 27000000,
++ .i2c_wr_max = 33,
++ .clock_out = 0,
++ .ts_mode = M88DS3103_TS_PARALLEL,
++ .ts_clk = 16000,
++ .ts_clk_pol = 1,
++ .agc = 0x99,
++};
++
++/* ------------------------------------------------------------------ */
++
++static int em28xx_attach_xc3028(u8 addr, struct em28xx *dev)
++{
++ struct dvb_frontend *fe;
++ struct xc2028_config cfg;
++ struct xc2028_ctrl ctl;
++
++ memset(&cfg, 0, sizeof(cfg));
++ cfg.i2c_adap = &dev->i2c_adap[dev->def_i2c_bus];
++ cfg.i2c_addr = addr;
++
++ memset(&ctl, 0, sizeof(ctl));
++ em28xx_setup_xc3028(dev, &ctl);
++ cfg.ctrl = &ctl;
++
++ if (!dev->dvb->fe[0]) {
++ em28xx_errdev("/2: dvb frontend not attached. "
++ "Can't attach xc3028\n");
++ return -EINVAL;
++ }
++
++ fe = dvb_attach(xc2028_attach, dev->dvb->fe[0], &cfg);
++ if (!fe) {
++ em28xx_errdev("/2: xc3028 attach failed\n");
++ dvb_frontend_detach(dev->dvb->fe[0]);
++ dev->dvb->fe[0] = NULL;
++ return -EINVAL;
++ }
++
++ em28xx_info("%s/2: xc3028 attached\n", dev->name);
++
++ return 0;
++}
++
++/* ------------------------------------------------------------------ */
++
++static int em28xx_register_dvb(struct em28xx_dvb *dvb, struct module *module,
++ struct em28xx *dev, struct device *device)
++{
++ int result;
++
++ mutex_init(&dvb->lock);
++
++ /* register adapter */
++ result = dvb_register_adapter(&dvb->adapter, dev->name, module, device,
++ adapter_nr);
++ if (result < 0) {
++ printk(KERN_WARNING "%s: dvb_register_adapter failed (errno = %d)\n",
++ dev->name, result);
++ goto fail_adapter;
++ }
++
++ /* Ensure all frontends negotiate bus access */
++ dvb->fe[0]->ops.ts_bus_ctrl = em28xx_dvb_bus_ctrl;
++ if (dvb->fe[1])
++ dvb->fe[1]->ops.ts_bus_ctrl = em28xx_dvb_bus_ctrl;
++
++ dvb->adapter.priv = &dev->i2c_bus[dev->def_i2c_bus];
++
++ /* register frontend */
++ result = dvb_register_frontend(&dvb->adapter, dvb->fe[0]);
++ if (result < 0) {
++ printk(KERN_WARNING "%s: dvb_register_frontend failed (errno = %d)\n",
++ dev->name, result);
++ goto fail_frontend0;
++ }
++
++ /* register 2nd frontend */
++ if (dvb->fe[1]) {
++ result = dvb_register_frontend(&dvb->adapter, dvb->fe[1]);
++ if (result < 0) {
++ printk(KERN_WARNING "%s: 2nd dvb_register_frontend failed (errno = %d)\n",
++ dev->name, result);
++ goto fail_frontend1;
++ }
++ }
++
++ /* register demux stuff */
++ dvb->demux.dmx.capabilities =
++ DMX_TS_FILTERING | DMX_SECTION_FILTERING |
++ DMX_MEMORY_BASED_FILTERING;
++ dvb->demux.priv = dvb;
++ dvb->demux.filternum = 256;
++ dvb->demux.feednum = 256;
++ dvb->demux.start_feed = em28xx_start_feed;
++ dvb->demux.stop_feed = em28xx_stop_feed;
++
++ result = dvb_dmx_init(&dvb->demux);
++ if (result < 0) {
++ printk(KERN_WARNING "%s: dvb_dmx_init failed (errno = %d)\n",
++ dev->name, result);
++ goto fail_dmx;
++ }
++
++ dvb->dmxdev.filternum = 256;
++ dvb->dmxdev.demux = &dvb->demux.dmx;
++ dvb->dmxdev.capabilities = 0;
++ result = dvb_dmxdev_init(&dvb->dmxdev, &dvb->adapter);
++ if (result < 0) {
++ printk(KERN_WARNING "%s: dvb_dmxdev_init failed (errno = %d)\n",
++ dev->name, result);
++ goto fail_dmxdev;
++ }
++
++ dvb->fe_hw.source = DMX_FRONTEND_0;
++ result = dvb->demux.dmx.add_frontend(&dvb->demux.dmx, &dvb->fe_hw);
++ if (result < 0) {
++ printk(KERN_WARNING "%s: add_frontend failed (DMX_FRONTEND_0, errno = %d)\n",
++ dev->name, result);
++ goto fail_fe_hw;
++ }
++
++ dvb->fe_mem.source = DMX_MEMORY_FE;
++ result = dvb->demux.dmx.add_frontend(&dvb->demux.dmx, &dvb->fe_mem);
++ if (result < 0) {
++ printk(KERN_WARNING "%s: add_frontend failed (DMX_MEMORY_FE, errno = %d)\n",
++ dev->name, result);
++ goto fail_fe_mem;
++ }
++
++ result = dvb->demux.dmx.connect_frontend(&dvb->demux.dmx, &dvb->fe_hw);
++ if (result < 0) {
++ printk(KERN_WARNING "%s: connect_frontend failed (errno = %d)\n",
++ dev->name, result);
++ goto fail_fe_conn;
++ }
++
++ /* register network adapter */
++ dvb_net_init(&dvb->adapter, &dvb->net, &dvb->demux.dmx);
++ return 0;
++
++fail_fe_conn:
++ dvb->demux.dmx.remove_frontend(&dvb->demux.dmx, &dvb->fe_mem);
++fail_fe_mem:
++ dvb->demux.dmx.remove_frontend(&dvb->demux.dmx, &dvb->fe_hw);
++fail_fe_hw:
++ dvb_dmxdev_release(&dvb->dmxdev);
++fail_dmxdev:
++ dvb_dmx_release(&dvb->demux);
++fail_dmx:
++ if (dvb->fe[1])
++ dvb_unregister_frontend(dvb->fe[1]);
++ dvb_unregister_frontend(dvb->fe[0]);
++fail_frontend1:
++ if (dvb->fe[1])
++ dvb_frontend_detach(dvb->fe[1]);
++fail_frontend0:
++ dvb_frontend_detach(dvb->fe[0]);
++ dvb_unregister_adapter(&dvb->adapter);
++fail_adapter:
++ return result;
++}
++
++static void em28xx_unregister_dvb(struct em28xx_dvb *dvb)
++{
++ dvb_net_release(&dvb->net);
++ dvb->demux.dmx.remove_frontend(&dvb->demux.dmx, &dvb->fe_mem);
++ dvb->demux.dmx.remove_frontend(&dvb->demux.dmx, &dvb->fe_hw);
++ dvb_dmxdev_release(&dvb->dmxdev);
++ dvb_dmx_release(&dvb->demux);
++ if (dvb->fe[1])
++ dvb_unregister_frontend(dvb->fe[1]);
++ dvb_unregister_frontend(dvb->fe[0]);
++ if (dvb->fe[1] && !dvb->dont_attach_fe1)
++ dvb_frontend_detach(dvb->fe[1]);
++ dvb_frontend_detach(dvb->fe[0]);
++ dvb_unregister_adapter(&dvb->adapter);
++}
++
++static int em28xx_dvb_init(struct em28xx *dev)
++{
++ int result = 0, mfe_shared = 0;
++ struct em28xx_dvb *dvb;
++
++ if (dev->is_audio_only) {
++ /* Shouldn't initialize IR for this interface */
++ return 0;
++ }
++
++ if (!dev->board.has_dvb) {
++ /* This device does not support the extension */
++ return 0;
++ }
++
++ em28xx_info("Binding DVB extension\n");
++
++ dvb = kzalloc(sizeof(struct em28xx_dvb), GFP_KERNEL);
++
++ if (dvb == NULL) {
++ em28xx_info("em28xx_dvb: memory allocation failed\n");
++ return -ENOMEM;
++ }
++ dev->dvb = dvb;
++ dvb->fe[0] = dvb->fe[1] = NULL;
++
++ /* pre-allocate DVB usb transfer buffers */
++ if (dev->dvb_xfer_bulk) {
++ result = em28xx_alloc_urbs(dev, EM28XX_DIGITAL_MODE,
++ dev->dvb_xfer_bulk,
++ EM28XX_DVB_NUM_BUFS,
++ 512,
++ EM28XX_DVB_BULK_PACKET_MULTIPLIER);
++ } else {
++ result = em28xx_alloc_urbs(dev, EM28XX_DIGITAL_MODE,
++ dev->dvb_xfer_bulk,
++ EM28XX_DVB_NUM_BUFS,
++ dev->dvb_max_pkt_size_isoc,
++ EM28XX_DVB_NUM_ISOC_PACKETS);
++ }
++ if (result) {
++ em28xx_errdev("em28xx_dvb: failed to pre-allocate USB transfer buffers for DVB.\n");
++ kfree(dvb);
++ dev->dvb = NULL;
++ return result;
++ }
++
++ mutex_lock(&dev->lock);
++ em28xx_set_mode(dev, EM28XX_DIGITAL_MODE);
++ /* init frontend */
++ switch (dev->model) {
++ case EM2874_BOARD_LEADERSHIP_ISDBT:
++ dvb->fe[0] = dvb_attach(s921_attach,
++ &sharp_isdbt, &dev->i2c_adap[dev->def_i2c_bus]);
++
++ if (!dvb->fe[0]) {
++ result = -EINVAL;
++ goto out_free;
++ }
++
++ break;
++ case EM2883_BOARD_HAUPPAUGE_WINTV_HVR_850:
++ case EM2883_BOARD_HAUPPAUGE_WINTV_HVR_950:
++ case EM2880_BOARD_PINNACLE_PCTV_HD_PRO:
++ case EM2880_BOARD_AMD_ATI_TV_WONDER_HD_600:
++ dvb->fe[0] = dvb_attach(lgdt330x_attach,
++ &em2880_lgdt3303_dev,
++ &dev->i2c_adap[dev->def_i2c_bus]);
++ if (em28xx_attach_xc3028(0x61, dev) < 0) {
++ result = -EINVAL;
++ goto out_free;
++ }
++ break;
++ case EM2880_BOARD_KWORLD_DVB_310U:
++ dvb->fe[0] = dvb_attach(zl10353_attach,
++ &em28xx_zl10353_with_xc3028,
++ &dev->i2c_adap[dev->def_i2c_bus]);
++ if (em28xx_attach_xc3028(0x61, dev) < 0) {
++ result = -EINVAL;
++ goto out_free;
++ }
++ break;
++ case EM2880_BOARD_HAUPPAUGE_WINTV_HVR_900:
++ case EM2882_BOARD_TERRATEC_HYBRID_XS:
++ case EM2880_BOARD_EMPIRE_DUAL_TV:
++ dvb->fe[0] = dvb_attach(zl10353_attach,
++ &em28xx_zl10353_xc3028_no_i2c_gate,
++ &dev->i2c_adap[dev->def_i2c_bus]);
++ if (em28xx_attach_xc3028(0x61, dev) < 0) {
++ result = -EINVAL;
++ goto out_free;
++ }
++ break;
++ case EM2880_BOARD_TERRATEC_HYBRID_XS:
++ case EM2880_BOARD_TERRATEC_HYBRID_XS_FR:
++ case EM2881_BOARD_PINNACLE_HYBRID_PRO:
++ case EM2882_BOARD_DIKOM_DK300:
++ case EM2882_BOARD_KWORLD_VS_DVBT:
++ dvb->fe[0] = dvb_attach(zl10353_attach,
++ &em28xx_zl10353_xc3028_no_i2c_gate,
++ &dev->i2c_adap[dev->def_i2c_bus]);
++ if (dvb->fe[0] == NULL) {
++ /* This board could have either a zl10353 or a mt352.
++ If the chip id isn't for zl10353, try mt352 */
++ dvb->fe[0] = dvb_attach(mt352_attach,
++ &terratec_xs_mt352_cfg,
++ &dev->i2c_adap[dev->def_i2c_bus]);
++ }
++
++ if (em28xx_attach_xc3028(0x61, dev) < 0) {
++ result = -EINVAL;
++ goto out_free;
++ }
++ break;
++ case EM2870_BOARD_KWORLD_355U:
++ dvb->fe[0] = dvb_attach(zl10353_attach,
++ &em28xx_zl10353_no_i2c_gate_dev,
++ &dev->i2c_adap[dev->def_i2c_bus]);
++ if (dvb->fe[0] != NULL)
++ dvb_attach(qt1010_attach, dvb->fe[0],
++ &dev->i2c_adap[dev->def_i2c_bus], &em28xx_qt1010_config);
++ break;
++ case EM2883_BOARD_KWORLD_HYBRID_330U:
++ case EM2882_BOARD_EVGA_INDTUBE:
++ dvb->fe[0] = dvb_attach(s5h1409_attach,
++ &em28xx_s5h1409_with_xc3028,
++ &dev->i2c_adap[dev->def_i2c_bus]);
++ if (em28xx_attach_xc3028(0x61, dev) < 0) {
++ result = -EINVAL;
++ goto out_free;
++ }
++ break;
++ case EM2882_BOARD_KWORLD_ATSC_315U:
++ dvb->fe[0] = dvb_attach(lgdt330x_attach,
++ &em2880_lgdt3303_dev,
++ &dev->i2c_adap[dev->def_i2c_bus]);
++ if (dvb->fe[0] != NULL) {
++ if (!dvb_attach(simple_tuner_attach, dvb->fe[0],
++ &dev->i2c_adap[dev->def_i2c_bus], 0x61, TUNER_THOMSON_DTT761X)) {
++ result = -EINVAL;
++ goto out_free;
++ }
++ }
++ break;
++ case EM2880_BOARD_HAUPPAUGE_WINTV_HVR_900_R2:
++ case EM2882_BOARD_PINNACLE_HYBRID_PRO_330E:
++ dvb->fe[0] = dvb_attach(drxd_attach, &em28xx_drxd, NULL,
++ &dev->i2c_adap[dev->def_i2c_bus], &dev->udev->dev);
++ if (em28xx_attach_xc3028(0x61, dev) < 0) {
++ result = -EINVAL;
++ goto out_free;
++ }
++ break;
++ case EM2870_BOARD_REDDO_DVB_C_USB_BOX:
++ /* Philips CU1216L NIM (Philips TDA10023 + Infineon TUA6034) */
++ dvb->fe[0] = dvb_attach(tda10023_attach,
++ &em28xx_tda10023_config,
++ &dev->i2c_adap[dev->def_i2c_bus], 0x48);
++ if (dvb->fe[0]) {
++ if (!dvb_attach(simple_tuner_attach, dvb->fe[0],
++ &dev->i2c_adap[dev->def_i2c_bus], 0x60, TUNER_PHILIPS_CU1216L)) {
++ result = -EINVAL;
++ goto out_free;
++ }
++ }
++ break;
++ case EM2870_BOARD_KWORLD_A340:
++ dvb->fe[0] = dvb_attach(lgdt3305_attach,
++ &em2870_lgdt3304_dev,
++ &dev->i2c_adap[dev->def_i2c_bus]);
++ if (dvb->fe[0] != NULL)
++ dvb_attach(tda18271_attach, dvb->fe[0], 0x60,
++ &dev->i2c_adap[dev->def_i2c_bus], &kworld_a340_config);
++ break;
++ case EM28174_BOARD_PCTV_290E:
++ /* set default GPIO0 for LNA, used if GPIOLIB is undefined */
++ dvb->lna_gpio = CXD2820R_GPIO_E | CXD2820R_GPIO_O |
++ CXD2820R_GPIO_L;
++ dvb->fe[0] = dvb_attach(cxd2820r_attach,
++ &em28xx_cxd2820r_config,
++ &dev->i2c_adap[dev->def_i2c_bus],
++ &dvb->lna_gpio);
++ if (dvb->fe[0]) {
++ /* FE 0 attach tuner */
++ if (!dvb_attach(tda18271_attach,
++ dvb->fe[0],
++ 0x60,
++ &dev->i2c_adap[dev->def_i2c_bus],
++ &em28xx_cxd2820r_tda18271_config)) {
++
++ dvb_frontend_detach(dvb->fe[0]);
++ result = -EINVAL;
++ goto out_free;
++ }
++
++#ifdef CONFIG_GPIOLIB
++ /* enable LNA for DVB-T, DVB-T2 and DVB-C */
++ result = gpio_request_one(dvb->lna_gpio,
++ GPIOF_OUT_INIT_LOW, NULL);
++ if (result)
++ em28xx_errdev("gpio request failed %d\n",
++ result);
++ else
++ gpio_free(dvb->lna_gpio);
++
++ result = 0; /* continue even set LNA fails */
++#endif
++ dvb->fe[0]->ops.set_lna = em28xx_pctv_290e_set_lna;
++ }
++
++ break;
++ case EM2884_BOARD_HAUPPAUGE_WINTV_HVR_930C:
++ {
++ struct xc5000_config cfg;
++ hauppauge_hvr930c_init(dev);
++
++ dvb->fe[0] = dvb_attach(drxk_attach,
++ &hauppauge_930c_drxk, &dev->i2c_adap[dev->def_i2c_bus]);
++ if (!dvb->fe[0]) {
++ result = -EINVAL;
++ goto out_free;
++ }
++ /* FIXME: do we need a pll semaphore? */
++ dvb->fe[0]->sec_priv = dvb;
++ sema_init(&dvb->pll_mutex, 1);
++ dvb->gate_ctrl = dvb->fe[0]->ops.i2c_gate_ctrl;
++ dvb->fe[0]->ops.i2c_gate_ctrl = drxk_gate_ctrl;
++
++ /* Attach xc5000 */
++ memset(&cfg, 0, sizeof(cfg));
++ cfg.i2c_address = 0x61;
++ cfg.if_khz = 4000;
++
++ if (dvb->fe[0]->ops.i2c_gate_ctrl)
++ dvb->fe[0]->ops.i2c_gate_ctrl(dvb->fe[0], 1);
++ if (!dvb_attach(xc5000_attach, dvb->fe[0], &dev->i2c_adap[dev->def_i2c_bus],
++ &cfg)) {
++ result = -EINVAL;
++ goto out_free;
++ }
++ if (dvb->fe[0]->ops.i2c_gate_ctrl)
++ dvb->fe[0]->ops.i2c_gate_ctrl(dvb->fe[0], 0);
++
++ break;
++ }
++ case EM2884_BOARD_TERRATEC_H5:
++ terratec_h5_init(dev);
++
++ dvb->fe[0] = dvb_attach(drxk_attach, &terratec_h5_drxk, &dev->i2c_adap[dev->def_i2c_bus]);
++ if (!dvb->fe[0]) {
++ result = -EINVAL;
++ goto out_free;
++ }
++ /* FIXME: do we need a pll semaphore? */
++ dvb->fe[0]->sec_priv = dvb;
++ sema_init(&dvb->pll_mutex, 1);
++ dvb->gate_ctrl = dvb->fe[0]->ops.i2c_gate_ctrl;
++ dvb->fe[0]->ops.i2c_gate_ctrl = drxk_gate_ctrl;
++
++ /* Attach tda18271 to DVB-C frontend */
++ if (dvb->fe[0]->ops.i2c_gate_ctrl)
++ dvb->fe[0]->ops.i2c_gate_ctrl(dvb->fe[0], 1);
++ if (!dvb_attach(tda18271c2dd_attach, dvb->fe[0], &dev->i2c_adap[dev->def_i2c_bus], 0x60)) {
++ result = -EINVAL;
++ goto out_free;
++ }
++ if (dvb->fe[0]->ops.i2c_gate_ctrl)
++ dvb->fe[0]->ops.i2c_gate_ctrl(dvb->fe[0], 0);
++
++ break;
++ case EM2884_BOARD_C3TECH_DIGITAL_DUO:
++ dvb->fe[0] = dvb_attach(mb86a20s_attach,
++ &c3tech_duo_mb86a20s_config,
++ &dev->i2c_adap[dev->def_i2c_bus]);
++ if (dvb->fe[0] != NULL)
++ dvb_attach(tda18271_attach, dvb->fe[0], 0x60,
++ &dev->i2c_adap[dev->def_i2c_bus],
++ &c3tech_duo_tda18271_config);
++ break;
++ case EM28174_BOARD_PCTV_460E:
++ /* attach demod */
++ dvb->fe[0] = dvb_attach(tda10071_attach,
++ &em28xx_tda10071_config, &dev->i2c_adap[dev->def_i2c_bus]);
++
++ /* attach SEC */
++ if (dvb->fe[0])
++ dvb_attach(a8293_attach, dvb->fe[0], &dev->i2c_adap[dev->def_i2c_bus],
++ &em28xx_a8293_config);
++ break;
++ case EM2874_BOARD_DELOCK_61959:
++ case EM2874_BOARD_MAXMEDIA_UB425_TC:
++ /* attach demodulator */
++ dvb->fe[0] = dvb_attach(drxk_attach, &maxmedia_ub425_tc_drxk,
++ &dev->i2c_adap[dev->def_i2c_bus]);
++
++ if (dvb->fe[0]) {
++ /* disable I2C-gate */
++ dvb->fe[0]->ops.i2c_gate_ctrl = NULL;
++
++ /* attach tuner */
++ if (!dvb_attach(tda18271_attach, dvb->fe[0], 0x60,
++ &dev->i2c_adap[dev->def_i2c_bus],
++ &em28xx_cxd2820r_tda18271_config)) {
++ dvb_frontend_detach(dvb->fe[0]);
++ result = -EINVAL;
++ goto out_free;
++ }
++ }
++ break;
++ case EM2884_BOARD_PCTV_510E:
++ case EM2884_BOARD_PCTV_520E:
++ pctv_520e_init(dev);
++
++ /* attach demodulator */
++ dvb->fe[0] = dvb_attach(drxk_attach, &pctv_520e_drxk,
++ &dev->i2c_adap[dev->def_i2c_bus]);
++
++ if (dvb->fe[0]) {
++ /* attach tuner */
++ if (!dvb_attach(tda18271_attach, dvb->fe[0], 0x60,
++ &dev->i2c_adap[dev->def_i2c_bus],
++ &em28xx_cxd2820r_tda18271_config)) {
++ dvb_frontend_detach(dvb->fe[0]);
++ result = -EINVAL;
++ goto out_free;
++ }
++ }
++ break;
++ case EM2884_BOARD_CINERGY_HTC_STICK:
++ terratec_htc_stick_init(dev);
++
++ /* attach demodulator */
++ dvb->fe[0] = dvb_attach(drxk_attach, &terratec_htc_stick_drxk,
++ &dev->i2c_adap[dev->def_i2c_bus]);
++ if (!dvb->fe[0]) {
++ result = -EINVAL;
++ goto out_free;
++ }
++
++ /* Attach the demodulator. */
++ if (!dvb_attach(tda18271_attach, dvb->fe[0], 0x60,
++ &dev->i2c_adap[dev->def_i2c_bus],
++ &em28xx_cxd2820r_tda18271_config)) {
++ result = -EINVAL;
++ goto out_free;
++ }
++ break;
++ case EM2884_BOARD_TERRATEC_HTC_USB_XS:
++ terratec_htc_usb_xs_init(dev);
++
++ /* attach demodulator */
++ dvb->fe[0] = dvb_attach(drxk_attach, &terratec_htc_stick_drxk,
++ &dev->i2c_adap[dev->def_i2c_bus]);
++ if (!dvb->fe[0]) {
++ result = -EINVAL;
++ goto out_free;
++ }
++
++ /* Attach the demodulator. */
++ if (!dvb_attach(tda18271_attach, dvb->fe[0], 0x60,
++ &dev->i2c_adap[dev->def_i2c_bus],
++ &em28xx_cxd2820r_tda18271_config)) {
++ result = -EINVAL;
++ goto out_free;
++ }
++ break;
++ case EM2874_BOARD_KWORLD_UB435Q_V2:
++ dvb->fe[0] = dvb_attach(lgdt3305_attach,
++ &em2874_lgdt3305_dev,
++ &dev->i2c_adap[dev->def_i2c_bus]);
++ if (!dvb->fe[0]) {
++ result = -EINVAL;
++ goto out_free;
++ }
++
++ /* Attach the demodulator. */
++ if (!dvb_attach(tda18271_attach, dvb->fe[0], 0x60,
++ &dev->i2c_adap[dev->def_i2c_bus],
++ &kworld_ub435q_v2_config)) {
++ result = -EINVAL;
++ goto out_free;
++ }
++ break;
++ case EM28178_BOARD_PCTV_461E:
++ {
++ /* demod I2C adapter */
++ struct i2c_adapter *i2c_adapter;
++ struct i2c_board_info info;
++ struct m88ts2022_config m88ts2022_config = {
++ .clock = 27000000,
++ };
++ memset(&info, 0, sizeof(struct i2c_board_info));
++
++ /* attach demod */
++ dvb->fe[0] = dvb_attach(m88ds3103_attach,
++ &pctv_461e_m88ds3103_config,
++ &dev->i2c_adap[dev->def_i2c_bus],
++ &i2c_adapter);
++ if (dvb->fe[0] == NULL) {
++ result = -ENODEV;
++ goto out_free;
++ }
++
++ /* attach tuner */
++ m88ts2022_config.fe = dvb->fe[0];
++ strlcpy(info.type, "m88ts2022", I2C_NAME_SIZE);
++ info.addr = 0x60;
++ info.platform_data = &m88ts2022_config;
++ request_module("m88ts2022");
++ dvb->i2c_client_tuner = i2c_new_device(i2c_adapter, &info);
++
++ /* delegate signal strength measurement to tuner */
++ dvb->fe[0]->ops.read_signal_strength =
++ dvb->fe[0]->ops.tuner_ops.get_rf_strength;
++
++ /* attach SEC */
++ if (!dvb_attach(a8293_attach, dvb->fe[0],
++ &dev->i2c_adap[dev->def_i2c_bus],
++ &em28xx_a8293_config)) {
++ dvb_frontend_detach(dvb->fe[0]);
++ result = -ENODEV;
++ goto out_free;
++ }
++ }
++ break;
++ default:
++ em28xx_errdev("/2: The frontend of your DVB/ATSC card"
++ " isn't supported yet\n");
++ break;
++ }
++ if (NULL == dvb->fe[0]) {
++ em28xx_errdev("/2: frontend initialization failed\n");
++ result = -EINVAL;
++ goto out_free;
++ }
++ /* define general-purpose callback pointer */
++ dvb->fe[0]->callback = em28xx_tuner_callback;
++ if (dvb->fe[1])
++ dvb->fe[1]->callback = em28xx_tuner_callback;
++
++ /* register everything */
++ result = em28xx_register_dvb(dvb, THIS_MODULE, dev, &dev->udev->dev);
++
++ if (result < 0)
++ goto out_free;
++
++ /* MFE lock */
++ dvb->adapter.mfe_shared = mfe_shared;
++
++ em28xx_info("DVB extension successfully initialized\n");
++ret:
++ em28xx_set_mode(dev, EM28XX_SUSPEND);
++ mutex_unlock(&dev->lock);
++ return result;
++
++out_free:
++ kfree(dvb);
++ dev->dvb = NULL;
++ goto ret;
++}
++
++static inline void prevent_sleep(struct dvb_frontend_ops *ops)
++{
++ ops->set_voltage = NULL;
++ ops->sleep = NULL;
++ ops->tuner_ops.sleep = NULL;
++}
++
++static int em28xx_dvb_fini(struct em28xx *dev)
++{
++ if (dev->is_audio_only) {
++ /* Shouldn't initialize IR for this interface */
++ return 0;
++ }
++
++ if (!dev->board.has_dvb) {
++ /* This device does not support the extension */
++ return 0;
++ }
++
++ em28xx_info("Closing DVB extension\n");
++
++ if (dev->dvb) {
++ struct em28xx_dvb *dvb = dev->dvb;
++
++ em28xx_uninit_usb_xfer(dev, EM28XX_DIGITAL_MODE);
++
++ if (dev->disconnected) {
++ /* We cannot tell the device to sleep
++ * once it has been unplugged. */
++ if (dvb->fe[0])
++ prevent_sleep(&dvb->fe[0]->ops);
++ if (dvb->fe[1])
++ prevent_sleep(&dvb->fe[1]->ops);
++ }
++
++ i2c_release_client(dvb->i2c_client_tuner);
++ em28xx_unregister_dvb(dvb);
++ kfree(dvb);
++ dev->dvb = NULL;
++ }
++
++ return 0;
++}
++
++static struct em28xx_ops dvb_ops = {
++ .id = EM28XX_DVB,
++ .name = "Em28xx dvb Extension",
++ .init = em28xx_dvb_init,
++ .fini = em28xx_dvb_fini,
++};
++
++static int __init em28xx_dvb_register(void)
++{
++ return em28xx_register_extension(&dvb_ops);
++}
++
++static void __exit em28xx_dvb_unregister(void)
++{
++ em28xx_unregister_extension(&dvb_ops);
++}
++
++module_init(em28xx_dvb_register);
++module_exit(em28xx_dvb_unregister);
+diff -Nur linux-3.14.36/drivers/media/usb/em28xx/em28xx.h linux-openelec/drivers/media/usb/em28xx/em28xx.h
+--- linux-3.14.36/drivers/media/usb/em28xx/em28xx.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/media/usb/em28xx/em28xx.h 2015-07-24 18:03:30.156842002 -0500
+@@ -137,6 +137,7 @@
+ #define EM2874_BOARD_KWORLD_UB435Q_V2 90
+ #define EM2765_BOARD_SPEEDLINK_VAD_LAPLACE 91
+ #define EM28178_BOARD_PCTV_461E 92
++#define EM28178_BOARD_PCTV_292E 94
+
+ /* Limits minimum and default number of buffers */
+ #define EM28XX_MIN_BUF 4
+diff -Nur linux-3.14.36/drivers/media/usb/em28xx/Kconfig linux-openelec/drivers/media/usb/em28xx/Kconfig
+--- linux-3.14.36/drivers/media/usb/em28xx/Kconfig 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/media/usb/em28xx/Kconfig 2015-07-24 18:03:30.156842002 -0500
+@@ -55,6 +55,8 @@
+ select MEDIA_TUNER_TDA18271 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_M88DS3103 if MEDIA_SUBDRV_AUTOSELECT
+ select MEDIA_TUNER_M88TS2022 if MEDIA_SUBDRV_AUTOSELECT
++ select DVB_SI2168 if MEDIA_SUBDRV_AUTOSELECT
++ select MEDIA_TUNER_SI2157 if MEDIA_SUBDRV_AUTOSELECT
+ ---help---
+ This adds support for DVB cards based on the
+ Empiatech em28xx chips.
+diff -Nur linux-3.14.36/drivers/media/v4l2-core/videobuf2-dma-contig.c linux-openelec/drivers/media/v4l2-core/videobuf2-dma-contig.c
+--- linux-3.14.36/drivers/media/v4l2-core/videobuf2-dma-contig.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/media/v4l2-core/videobuf2-dma-contig.c 2015-05-06 12:05:42.000000000 -0500
+@@ -719,7 +719,7 @@
+
+ /* get the associated scatterlist for this buffer */
+ sgt = dma_buf_map_attachment(buf->db_attach, buf->dma_dir);
+- if (IS_ERR_OR_NULL(sgt)) {
++ if (IS_ERR(sgt)) {
+ pr_err("Error getting dmabuf scatterlist\n");
+ return -EINVAL;
+ }
+diff -Nur linux-3.14.36/drivers/media/v4l2-core/videobuf-dma-contig.c linux-openelec/drivers/media/v4l2-core/videobuf-dma-contig.c
+--- linux-3.14.36/drivers/media/v4l2-core/videobuf-dma-contig.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/media/v4l2-core/videobuf-dma-contig.c 2015-05-06 12:05:42.000000000 -0500
+@@ -304,7 +304,7 @@
+
+ /* Try to remap memory */
+ size = vma->vm_end - vma->vm_start;
+- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
++ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+ retval = vm_iomap_memory(vma, vma->vm_start, size);
+ if (retval) {
+ dev_err(q->dev, "mmap: remap failed with error %d. ",
+diff -Nur linux-3.14.36/drivers/mfd/ab8500-core.c linux-openelec/drivers/mfd/ab8500-core.c
+--- linux-3.14.36/drivers/mfd/ab8500-core.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/mfd/ab8500-core.c 2015-05-06 12:05:42.000000000 -0500
+@@ -592,7 +592,7 @@
+
+ /* If ->irq_base is zero this will give a linear mapping */
+ ab8500->domain = irq_domain_add_simple(NULL,
+- num_irqs, ab8500->irq_base,
++ num_irqs, 0,
+ &ab8500_irq_ops, ab8500);
+
+ if (!ab8500->domain) {
+@@ -1583,14 +1583,13 @@
+ if (!ab8500)
+ return -ENOMEM;
+
+- if (plat)
+- ab8500->irq_base = plat->irq_base;
+-
+ ab8500->dev = &pdev->dev;
+
+ resource = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+- if (!resource)
++ if (!resource) {
++ dev_err(&pdev->dev, "no IRQ resource\n");
+ return -ENODEV;
++ }
+
+ ab8500->irq = resource->start;
+
+@@ -1612,8 +1611,10 @@
+ else {
+ ret = get_register_interruptible(ab8500, AB8500_MISC,
+ AB8500_IC_NAME_REG, &value);
+- if (ret < 0)
++ if (ret < 0) {
++ dev_err(&pdev->dev, "could not probe HW\n");
+ return ret;
++ }
+
+ ab8500->version = value;
+ }
+@@ -1759,30 +1760,30 @@
+ if (is_ab9540(ab8500))
+ ret = mfd_add_devices(ab8500->dev, 0, ab9540_devs,
+ ARRAY_SIZE(ab9540_devs), NULL,
+- ab8500->irq_base, ab8500->domain);
++ 0, ab8500->domain);
+ else if (is_ab8540(ab8500)) {
+ ret = mfd_add_devices(ab8500->dev, 0, ab8540_devs,
+ ARRAY_SIZE(ab8540_devs), NULL,
+- ab8500->irq_base, NULL);
++ 0, ab8500->domain);
+ if (ret)
+ return ret;
+
+ if (is_ab8540_1p2_or_earlier(ab8500))
+ ret = mfd_add_devices(ab8500->dev, 0, ab8540_cut1_devs,
+ ARRAY_SIZE(ab8540_cut1_devs), NULL,
+- ab8500->irq_base, NULL);
++ 0, ab8500->domain);
+ else /* ab8540 >= cut2 */
+ ret = mfd_add_devices(ab8500->dev, 0, ab8540_cut2_devs,
+ ARRAY_SIZE(ab8540_cut2_devs), NULL,
+- ab8500->irq_base, NULL);
++ 0, ab8500->domain);
+ } else if (is_ab8505(ab8500))
+ ret = mfd_add_devices(ab8500->dev, 0, ab8505_devs,
+ ARRAY_SIZE(ab8505_devs), NULL,
+- ab8500->irq_base, ab8500->domain);
++ 0, ab8500->domain);
+ else
+ ret = mfd_add_devices(ab8500->dev, 0, ab8500_devs,
+ ARRAY_SIZE(ab8500_devs), NULL,
+- ab8500->irq_base, ab8500->domain);
++ 0, ab8500->domain);
+ if (ret)
+ return ret;
+
+@@ -1790,7 +1791,7 @@
+ /* Add battery management devices */
+ ret = mfd_add_devices(ab8500->dev, 0, ab8500_bm_devs,
+ ARRAY_SIZE(ab8500_bm_devs), NULL,
+- ab8500->irq_base, ab8500->domain);
++ 0, ab8500->domain);
+ if (ret)
+ dev_err(ab8500->dev, "error adding bm devices\n");
+ }
+diff -Nur linux-3.14.36/drivers/mfd/db8500-prcmu.c linux-openelec/drivers/mfd/db8500-prcmu.c
+--- linux-3.14.36/drivers/mfd/db8500-prcmu.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/mfd/db8500-prcmu.c 2015-05-06 12:05:42.000000000 -0500
+@@ -25,6 +25,7 @@
+ #include <linux/bitops.h>
+ #include <linux/fs.h>
+ #include <linux/of.h>
++#include <linux/of_irq.h>
+ #include <linux/platform_device.h>
+ #include <linux/uaccess.h>
+ #include <linux/mfd/core.h>
+@@ -2678,16 +2679,12 @@
+ .xlate = irq_domain_xlate_twocell,
+ };
+
+-static int db8500_irq_init(struct device_node *np, int irq_base)
++static int db8500_irq_init(struct device_node *np)
+ {
+ int i;
+
+- /* In the device tree case, just take some IRQs */
+- if (np)
+- irq_base = 0;
+-
+ db8500_irq_domain = irq_domain_add_simple(
+- np, NUM_PRCMU_WAKEUPS, irq_base,
++ np, NUM_PRCMU_WAKEUPS, 0,
+ &db8500_irq_ops, NULL);
+
+ if (!db8500_irq_domain) {
+@@ -3114,10 +3111,10 @@
+ }
+
+ static int db8500_prcmu_register_ab8500(struct device *parent,
+- struct ab8500_platform_data *pdata,
+- int irq)
++ struct ab8500_platform_data *pdata)
+ {
+- struct resource ab8500_resource = DEFINE_RES_IRQ(irq);
++ struct device_node *np;
++ struct resource ab8500_resource;
+ struct mfd_cell ab8500_cell = {
+ .name = "ab8500-core",
+ .of_compatible = "stericsson,ab8500",
+@@ -3128,6 +3125,20 @@
+ .num_resources = 1,
+ };
+
++ if (!parent->of_node)
++ return -ENODEV;
++
++ /* Look up the device node, sneak the IRQ out of it */
++ for_each_child_of_node(parent->of_node, np) {
++ if (of_device_is_compatible(np, ab8500_cell.of_compatible))
++ break;
++ }
++ if (!np) {
++ dev_info(parent, "could not find AB8500 node in the device tree\n");
++ return -ENODEV;
++ }
++ of_irq_to_resource_table(np, &ab8500_resource, 1);
++
+ return mfd_add_devices(parent, 0, &ab8500_cell, 1, NULL, 0, NULL);
+ }
+
+@@ -3180,7 +3191,7 @@
+ goto no_irq_return;
+ }
+
+- db8500_irq_init(np, pdata->irq_base);
++ db8500_irq_init(np);
+
+ prcmu_config_esram0_deep_sleep(ESRAM0_DEEP_SLEEP_STATE_RET);
+
+@@ -3205,8 +3216,7 @@
+ }
+ }
+
+- err = db8500_prcmu_register_ab8500(&pdev->dev, pdata->ab_platdata,
+- pdata->ab_irq);
++ err = db8500_prcmu_register_ab8500(&pdev->dev, pdata->ab_platdata);
+ if (err) {
+ mfd_remove_devices(&pdev->dev);
+ pr_err("prcmu: Failed to add ab8500 subdevice\n");
+diff -Nur linux-3.14.36/drivers/mfd/Kconfig linux-openelec/drivers/mfd/Kconfig
+--- linux-3.14.36/drivers/mfd/Kconfig 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/mfd/Kconfig 2015-05-06 12:05:42.000000000 -0500
+@@ -163,6 +163,14 @@
+ Additional drivers must be enabled in order to use the functionality
+ of the device.
+
++config MFD_MXC_HDMI
++ tristate "Freescale HDMI Core"
++ select MFD_CORE
++ help
++ This is the core driver for the Freescale i.MX6 on-chip HDMI.
++ This MFD driver connects with the video and audio drivers for HDMI.
++
++
+ config MFD_MC13XXX
+ tristate
+ depends on (SPI_MASTER || I2C)
+@@ -1226,3 +1234,4 @@
+ help
+ Platform configuration infrastructure for the ARM Ltd.
+ Versatile Express.
++
+diff -Nur linux-3.14.36/drivers/mfd/Makefile linux-openelec/drivers/mfd/Makefile
+--- linux-3.14.36/drivers/mfd/Makefile 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/mfd/Makefile 2015-05-06 12:05:42.000000000 -0500
+@@ -166,3 +166,4 @@
+ obj-$(CONFIG_MFD_AS3711) += as3711.o
+ obj-$(CONFIG_MFD_AS3722) += as3722.o
+ obj-$(CONFIG_MFD_STW481X) += stw481x.o
++obj-$(CONFIG_MFD_MXC_HDMI) += mxc-hdmi-core.o
+diff -Nur linux-3.14.36/drivers/mfd/mxc-hdmi-core.c linux-openelec/drivers/mfd/mxc-hdmi-core.c
+--- linux-3.14.36/drivers/mfd/mxc-hdmi-core.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mfd/mxc-hdmi-core.c 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,798 @@
++/*
++ * Copyright (C) 2011-2014 Freescale Semiconductor, Inc.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ *
++ */
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/slab.h>
++#include <linux/device.h>
++#include <linux/err.h>
++#include <linux/io.h>
++#include <linux/clk.h>
++#include <linux/spinlock.h>
++#include <linux/irq.h>
++#include <linux/interrupt.h>
++
++#include <linux/platform_device.h>
++#include <linux/regulator/machine.h>
++#include <asm/mach-types.h>
++
++#include <video/mxc_hdmi.h>
++#include <linux/ipu-v3.h>
++#include <video/mxc_edid.h>
++#include "../mxc/ipu3/ipu_prv.h"
++#include <linux/mfd/mxc-hdmi-core.h>
++#include <linux/of_device.h>
++#include <linux/mod_devicetable.h>
++
++struct mxc_hdmi_data {
++ struct platform_device *pdev;
++ unsigned long __iomem *reg_base;
++ unsigned long reg_phys_base;
++ struct device *dev;
++};
++
++static void __iomem *hdmi_base;
++static struct clk *isfr_clk;
++static struct clk *iahb_clk;
++static spinlock_t irq_spinlock;
++static spinlock_t edid_spinlock;
++static unsigned int sample_rate;
++static unsigned long pixel_clk_rate;
++static struct clk *pixel_clk;
++static int hdmi_ratio;
++int mxc_hdmi_ipu_id;
++int mxc_hdmi_disp_id;
++static int hdmi_core_edid_status;
++static struct mxc_edid_cfg hdmi_core_edid_cfg;
++static int hdmi_core_init;
++static unsigned int hdmi_dma_running;
++static struct snd_pcm_substream *hdmi_audio_stream_playback;
++static unsigned int hdmi_cable_state;
++static unsigned int hdmi_blank_state;
++static unsigned int hdmi_abort_state;
++static spinlock_t hdmi_audio_lock, hdmi_blank_state_lock, hdmi_cable_state_lock;
++
++void hdmi_set_dvi_mode(unsigned int state)
++{
++ if (state) {
++ mxc_hdmi_abort_stream();
++ hdmi_cec_stop_device();
++ } else {
++ hdmi_cec_start_device();
++ }
++}
++EXPORT_SYMBOL(hdmi_set_dvi_mode);
++
++unsigned int hdmi_set_cable_state(unsigned int state)
++{
++ unsigned long flags;
++ struct snd_pcm_substream *substream = hdmi_audio_stream_playback;
++
++ spin_lock_irqsave(&hdmi_cable_state_lock, flags);
++ hdmi_cable_state = state;
++ spin_unlock_irqrestore(&hdmi_cable_state_lock, flags);
++
++ if (check_hdmi_state() && substream && hdmi_abort_state) {
++ hdmi_abort_state = 0;
++ substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_START);
++ }
++ return 0;
++}
++EXPORT_SYMBOL(hdmi_set_cable_state);
++
++unsigned int hdmi_set_blank_state(unsigned int state)
++{
++ unsigned long flags;
++ struct snd_pcm_substream *substream = hdmi_audio_stream_playback;
++
++ spin_lock_irqsave(&hdmi_blank_state_lock, flags);
++ hdmi_blank_state = state;
++ spin_unlock_irqrestore(&hdmi_blank_state_lock, flags);
++
++ if (check_hdmi_state() && substream && hdmi_abort_state) {
++ hdmi_abort_state = 0;
++ substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_START);
++ }
++ return 0;
++}
++EXPORT_SYMBOL(hdmi_set_blank_state);
++
++static void hdmi_audio_abort_stream(struct snd_pcm_substream *substream)
++{
++ unsigned long flags;
++
++ snd_pcm_stream_lock_irqsave(substream, flags);
++
++ if (snd_pcm_running(substream)) {
++ hdmi_abort_state = 1;
++ substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_STOP);
++ }
++
++ snd_pcm_stream_unlock_irqrestore(substream, flags);
++}
++
++int mxc_hdmi_abort_stream(void)
++{
++ unsigned long flags;
++ spin_lock_irqsave(&hdmi_audio_lock, flags);
++ if (hdmi_audio_stream_playback)
++ hdmi_audio_abort_stream(hdmi_audio_stream_playback);
++ spin_unlock_irqrestore(&hdmi_audio_lock, flags);
++
++ return 0;
++}
++EXPORT_SYMBOL(mxc_hdmi_abort_stream);
++
++int check_hdmi_state(void)
++{
++ unsigned long flags1, flags2;
++ unsigned int ret;
++
++ spin_lock_irqsave(&hdmi_cable_state_lock, flags1);
++ spin_lock_irqsave(&hdmi_blank_state_lock, flags2);
++
++ ret = hdmi_cable_state && hdmi_blank_state;
++
++ spin_unlock_irqrestore(&hdmi_blank_state_lock, flags2);
++ spin_unlock_irqrestore(&hdmi_cable_state_lock, flags1);
++
++ return ret;
++}
++EXPORT_SYMBOL(check_hdmi_state);
++
++int mxc_hdmi_register_audio(struct snd_pcm_substream *substream)
++{
++ unsigned long flags, flags1;
++ int ret = 0;
++
++ snd_pcm_stream_lock_irqsave(substream, flags);
++
++ if (substream && check_hdmi_state()) {
++ spin_lock_irqsave(&hdmi_audio_lock, flags1);
++ if (hdmi_audio_stream_playback) {
++ pr_err("%s unconsist hdmi auido stream!\n", __func__);
++ ret = -EINVAL;
++ }
++ hdmi_audio_stream_playback = substream;
++ hdmi_abort_state = 0;
++ spin_unlock_irqrestore(&hdmi_audio_lock, flags1);
++ } else
++ ret = -EINVAL;
++
++ snd_pcm_stream_unlock_irqrestore(substream, flags);
++
++ return ret;
++}
++EXPORT_SYMBOL(mxc_hdmi_register_audio);
++
++void mxc_hdmi_unregister_audio(struct snd_pcm_substream *substream)
++{
++ unsigned long flags;
++
++ spin_lock_irqsave(&hdmi_audio_lock, flags);
++ hdmi_audio_stream_playback = NULL;
++ hdmi_abort_state = 0;
++ spin_unlock_irqrestore(&hdmi_audio_lock, flags);
++}
++EXPORT_SYMBOL(mxc_hdmi_unregister_audio);
++
++u8 hdmi_readb(unsigned int reg)
++{
++ u8 value;
++
++ value = __raw_readb(hdmi_base + reg);
++
++ return value;
++}
++EXPORT_SYMBOL(hdmi_readb);
++
++#ifdef DEBUG
++static bool overflow_lo;
++static bool overflow_hi;
++
++bool hdmi_check_overflow(void)
++{
++ u8 val, lo, hi;
++
++ val = hdmi_readb(HDMI_IH_FC_STAT2);
++ lo = (val & HDMI_IH_FC_STAT2_LOW_PRIORITY_OVERFLOW) != 0;
++ hi = (val & HDMI_IH_FC_STAT2_HIGH_PRIORITY_OVERFLOW) != 0;
++
++ if ((lo != overflow_lo) || (hi != overflow_hi)) {
++ pr_debug("%s LowPriority=%d HighPriority=%d <=======================\n",
++ __func__, lo, hi);
++ overflow_lo = lo;
++ overflow_hi = hi;
++ return true;
++ }
++ return false;
++}
++#else
++bool hdmi_check_overflow(void)
++{
++ return false;
++}
++#endif
++EXPORT_SYMBOL(hdmi_check_overflow);
++
++void hdmi_writeb(u8 value, unsigned int reg)
++{
++ hdmi_check_overflow();
++ __raw_writeb(value, hdmi_base + reg);
++ hdmi_check_overflow();
++}
++EXPORT_SYMBOL(hdmi_writeb);
++
++void hdmi_mask_writeb(u8 data, unsigned int reg, u8 shift, u8 mask)
++{
++ u8 value = hdmi_readb(reg) & ~mask;
++ value |= (data << shift) & mask;
++ hdmi_writeb(value, reg);
++}
++EXPORT_SYMBOL(hdmi_mask_writeb);
++
++unsigned int hdmi_read4(unsigned int reg)
++{
++ /* read a four byte address from registers */
++ return (hdmi_readb(reg + 3) << 24) |
++ (hdmi_readb(reg + 2) << 16) |
++ (hdmi_readb(reg + 1) << 8) |
++ hdmi_readb(reg);
++}
++EXPORT_SYMBOL(hdmi_read4);
++
++void hdmi_write4(unsigned int value, unsigned int reg)
++{
++ /* write a four byte address to hdmi regs */
++ hdmi_writeb(value & 0xff, reg);
++ hdmi_writeb((value >> 8) & 0xff, reg + 1);
++ hdmi_writeb((value >> 16) & 0xff, reg + 2);
++ hdmi_writeb((value >> 24) & 0xff, reg + 3);
++}
++EXPORT_SYMBOL(hdmi_write4);
++
++static void initialize_hdmi_ih_mutes(void)
++{
++ u8 ih_mute;
++
++ /*
++ * Boot up defaults are:
++ * HDMI_IH_MUTE = 0x03 (disabled)
++ * HDMI_IH_MUTE_* = 0x00 (enabled)
++ */
++
++ /* Disable top level interrupt bits in HDMI block */
++ ih_mute = hdmi_readb(HDMI_IH_MUTE) |
++ HDMI_IH_MUTE_MUTE_WAKEUP_INTERRUPT |
++ HDMI_IH_MUTE_MUTE_ALL_INTERRUPT;
++
++ hdmi_writeb(ih_mute, HDMI_IH_MUTE);
++
++ /* by default mask all interrupts */
++ hdmi_writeb(0xff, HDMI_VP_MASK);
++ hdmi_writeb(0xff, HDMI_FC_MASK0);
++ hdmi_writeb(0xff, HDMI_FC_MASK1);
++ hdmi_writeb(0xff, HDMI_FC_MASK2);
++ hdmi_writeb(0xff, HDMI_PHY_MASK0);
++ hdmi_writeb(0xff, HDMI_PHY_I2CM_INT_ADDR);
++ hdmi_writeb(0xff, HDMI_PHY_I2CM_CTLINT_ADDR);
++ hdmi_writeb(0xff, HDMI_AUD_INT);
++ hdmi_writeb(0xff, HDMI_AUD_SPDIFINT);
++ hdmi_writeb(0xff, HDMI_AUD_HBR_MASK);
++ hdmi_writeb(0xff, HDMI_GP_MASK);
++ hdmi_writeb(0xff, HDMI_A_APIINTMSK);
++ hdmi_writeb(0xff, HDMI_CEC_MASK);
++ hdmi_writeb(0xff, HDMI_I2CM_INT);
++ hdmi_writeb(0xff, HDMI_I2CM_CTLINT);
++
++ /* Disable interrupts in the IH_MUTE_* registers */
++ hdmi_writeb(0xff, HDMI_IH_MUTE_FC_STAT0);
++ hdmi_writeb(0xff, HDMI_IH_MUTE_FC_STAT1);
++ hdmi_writeb(0xff, HDMI_IH_MUTE_FC_STAT2);
++ hdmi_writeb(0xff, HDMI_IH_MUTE_AS_STAT0);
++ hdmi_writeb(0xff, HDMI_IH_MUTE_PHY_STAT0);
++ hdmi_writeb(0xff, HDMI_IH_MUTE_I2CM_STAT0);
++ hdmi_writeb(0xff, HDMI_IH_MUTE_CEC_STAT0);
++ hdmi_writeb(0xff, HDMI_IH_MUTE_VP_STAT0);
++ hdmi_writeb(0xff, HDMI_IH_MUTE_I2CMPHY_STAT0);
++ hdmi_writeb(0xff, HDMI_IH_MUTE_AHBDMAAUD_STAT0);
++
++ /* Enable top level interrupt bits in HDMI block */
++ ih_mute &= ~(HDMI_IH_MUTE_MUTE_WAKEUP_INTERRUPT |
++ HDMI_IH_MUTE_MUTE_ALL_INTERRUPT);
++ hdmi_writeb(ih_mute, HDMI_IH_MUTE);
++}
++
++static void hdmi_set_clock_regenerator_n(unsigned int value)
++{
++ u8 val;
++
++ if (!hdmi_dma_running) {
++ hdmi_writeb(value & 0xff, HDMI_AUD_N1);
++ hdmi_writeb(0, HDMI_AUD_N2);
++ hdmi_writeb(0, HDMI_AUD_N3);
++ }
++
++ hdmi_writeb(value & 0xff, HDMI_AUD_N1);
++ hdmi_writeb((value >> 8) & 0xff, HDMI_AUD_N2);
++ hdmi_writeb((value >> 16) & 0x0f, HDMI_AUD_N3);
++
++ /* nshift factor = 0 */
++ val = hdmi_readb(HDMI_AUD_CTS3);
++ val &= ~HDMI_AUD_CTS3_N_SHIFT_MASK;
++ hdmi_writeb(val, HDMI_AUD_CTS3);
++}
++
++static void hdmi_set_clock_regenerator_cts(unsigned int cts)
++{
++ u8 val;
++
++ if (!hdmi_dma_running) {
++ hdmi_writeb(cts & 0xff, HDMI_AUD_CTS1);
++ hdmi_writeb(0, HDMI_AUD_CTS2);
++ hdmi_writeb(0, HDMI_AUD_CTS3);
++ }
++
++ /* Must be set/cleared first */
++ val = hdmi_readb(HDMI_AUD_CTS3);
++ val &= ~HDMI_AUD_CTS3_CTS_MANUAL;
++ hdmi_writeb(val, HDMI_AUD_CTS3);
++
++ hdmi_writeb(cts & 0xff, HDMI_AUD_CTS1);
++ hdmi_writeb((cts >> 8) & 0xff, HDMI_AUD_CTS2);
++ hdmi_writeb(((cts >> 16) & HDMI_AUD_CTS3_AUDCTS19_16_MASK) |
++ HDMI_AUD_CTS3_CTS_MANUAL, HDMI_AUD_CTS3);
++}
++
++static unsigned int hdmi_compute_n(unsigned int freq, unsigned long pixel_clk,
++ unsigned int ratio)
++{
++ unsigned int n = (128 * freq) / 1000;
++
++ switch (freq) {
++ case 32000:
++ if (pixel_clk == 25174000)
++ n = (ratio == 150) ? 9152 : 4576;
++ else if (pixel_clk == 27020000)
++ n = (ratio == 150) ? 8192 : 4096;
++ else if (pixel_clk == 74170000 || pixel_clk == 148350000)
++ n = 11648;
++ else if (pixel_clk == 297000000)
++ n = (ratio == 150) ? 6144 : 3072;
++ else
++ n = 4096;
++ break;
++
++ case 44100:
++ if (pixel_clk == 25174000)
++ n = 7007;
++ else if (pixel_clk == 74170000)
++ n = 17836;
++ else if (pixel_clk == 148350000)
++ n = (ratio == 150) ? 17836 : 8918;
++ else if (pixel_clk == 297000000)
++ n = (ratio == 150) ? 9408 : 4704;
++ else
++ n = 6272;
++ break;
++
++ case 48000:
++ if (pixel_clk == 25174000)
++ n = (ratio == 150) ? 9152 : 6864;
++ else if (pixel_clk == 27020000)
++ n = (ratio == 150) ? 8192 : 6144;
++ else if (pixel_clk == 74170000)
++ n = 11648;
++ else if (pixel_clk == 148350000)
++ n = (ratio == 150) ? 11648 : 5824;
++ else if (pixel_clk == 297000000)
++ n = (ratio == 150) ? 10240 : 5120;
++ else
++ n = 6144;
++ break;
++
++ case 88200:
++ n = hdmi_compute_n(44100, pixel_clk, ratio) * 2;
++ break;
++
++ case 96000:
++ n = hdmi_compute_n(48000, pixel_clk, ratio) * 2;
++ break;
++
++ case 176400:
++ n = hdmi_compute_n(44100, pixel_clk, ratio) * 4;
++ break;
++
++ case 192000:
++ n = hdmi_compute_n(48000, pixel_clk, ratio) * 4;
++ break;
++
++ default:
++ break;
++ }
++
++ return n;
++}
++
++static unsigned int hdmi_compute_cts(unsigned int freq, unsigned long pixel_clk,
++ unsigned int ratio)
++{
++ unsigned int cts = 0;
++ switch (freq) {
++ case 32000:
++ if (pixel_clk == 297000000) {
++ cts = 222750;
++ break;
++ } else if (pixel_clk == 25174000) {
++ cts = 28125;
++ break;
++ }
++ case 48000:
++ case 96000:
++ case 192000:
++ switch (pixel_clk) {
++ case 25200000:
++ case 27000000:
++ case 54000000:
++ case 74250000:
++ case 148500000:
++ cts = pixel_clk / 1000;
++ break;
++ case 297000000:
++ cts = 247500;
++ break;
++ case 25174000:
++ cts = 28125l;
++ break;
++ /*
++ * All other TMDS clocks are not supported by
++ * DWC_hdmi_tx. The TMDS clocks divided or
++ * multiplied by 1,001 coefficients are not
++ * supported.
++ */
++ default:
++ break;
++ }
++ break;
++ case 44100:
++ case 88200:
++ case 176400:
++ switch (pixel_clk) {
++ case 25200000:
++ cts = 28000;
++ break;
++ case 25174000:
++ cts = 31250;
++ break;
++ case 27000000:
++ cts = 30000;
++ break;
++ case 54000000:
++ cts = 60000;
++ break;
++ case 74250000:
++ cts = 82500;
++ break;
++ case 148500000:
++ cts = 165000;
++ break;
++ case 297000000:
++ cts = 247500;
++ break;
++ default:
++ break;
++ }
++ break;
++ default:
++ break;
++ }
++ if (ratio == 100)
++ return cts;
++ else
++ return (cts * ratio) / 100;
++}
++
++static void hdmi_set_clk_regenerator(void)
++{
++ unsigned int clk_n, clk_cts;
++
++ clk_n = hdmi_compute_n(sample_rate, pixel_clk_rate, hdmi_ratio);
++ clk_cts = hdmi_compute_cts(sample_rate, pixel_clk_rate, hdmi_ratio);
++
++ if (clk_cts == 0) {
++ pr_debug("%s: pixel clock not supported: %d\n",
++ __func__, (int)pixel_clk_rate);
++ return;
++ }
++
++ pr_debug("%s: samplerate=%d ratio=%d pixelclk=%d N=%d cts=%d\n",
++ __func__, sample_rate, hdmi_ratio, (int)pixel_clk_rate,
++ clk_n, clk_cts);
++
++ hdmi_set_clock_regenerator_cts(clk_cts);
++ hdmi_set_clock_regenerator_n(clk_n);
++}
++
++static int hdmi_core_get_of_property(struct platform_device *pdev)
++{
++ struct device_node *np = pdev->dev.of_node;
++ int err;
++ int ipu_id, disp_id;
++
++ err = of_property_read_u32(np, "ipu_id", &ipu_id);
++ if (err) {
++ dev_dbg(&pdev->dev, "get of property ipu_id fail\n");
++ return err;
++ }
++ err = of_property_read_u32(np, "disp_id", &disp_id);
++ if (err) {
++ dev_dbg(&pdev->dev, "get of property disp_id fail\n");
++ return err;
++ }
++
++ mxc_hdmi_ipu_id = ipu_id;
++ mxc_hdmi_disp_id = disp_id;
++
++ return err;
++}
++
++/* Need to run this before phy is enabled the first time to prevent
++ * overflow condition in HDMI_IH_FC_STAT2 */
++void hdmi_init_clk_regenerator(void)
++{
++ if (pixel_clk_rate == 0) {
++ pixel_clk_rate = 74250000;
++ hdmi_set_clk_regenerator();
++ }
++}
++EXPORT_SYMBOL(hdmi_init_clk_regenerator);
++
++void hdmi_clk_regenerator_update_pixel_clock(u32 pixclock)
++{
++
++ /* Translate pixel clock in ps (pico seconds) to Hz */
++ pixel_clk_rate = PICOS2KHZ(pixclock) * 1000UL;
++ hdmi_set_clk_regenerator();
++}
++EXPORT_SYMBOL(hdmi_clk_regenerator_update_pixel_clock);
++
++void hdmi_set_dma_mode(unsigned int dma_running)
++{
++ hdmi_dma_running = dma_running;
++ hdmi_set_clk_regenerator();
++}
++EXPORT_SYMBOL(hdmi_set_dma_mode);
++
++void hdmi_set_sample_rate(unsigned int rate)
++{
++ sample_rate = rate;
++}
++EXPORT_SYMBOL(hdmi_set_sample_rate);
++
++void hdmi_set_edid_cfg(int edid_status, struct mxc_edid_cfg *cfg)
++{
++ unsigned long flags;
++
++ spin_lock_irqsave(&edid_spinlock, flags);
++ hdmi_core_edid_status = edid_status;
++ memcpy(&hdmi_core_edid_cfg, cfg, sizeof(struct mxc_edid_cfg));
++ spin_unlock_irqrestore(&edid_spinlock, flags);
++}
++EXPORT_SYMBOL(hdmi_set_edid_cfg);
++
++int hdmi_get_edid_cfg(struct mxc_edid_cfg *cfg)
++{
++ unsigned long flags;
++
++ spin_lock_irqsave(&edid_spinlock, flags);
++ memcpy(cfg, &hdmi_core_edid_cfg, sizeof(struct mxc_edid_cfg));
++ spin_unlock_irqrestore(&edid_spinlock, flags);
++
++ return hdmi_core_edid_status;
++}
++EXPORT_SYMBOL(hdmi_get_edid_cfg);
++
++void hdmi_set_registered(int registered)
++{
++ hdmi_core_init = registered;
++}
++EXPORT_SYMBOL(hdmi_set_registered);
++
++int hdmi_get_registered(void)
++{
++ return hdmi_core_init;
++}
++EXPORT_SYMBOL(hdmi_get_registered);
++
++static int mxc_hdmi_core_probe(struct platform_device *pdev)
++{
++ struct mxc_hdmi_data *hdmi_data;
++ struct resource *res;
++ unsigned long flags;
++ int ret = 0;
++
++#ifdef DEBUG
++ overflow_lo = false;
++ overflow_hi = false;
++#endif
++
++ hdmi_core_init = 0;
++ hdmi_dma_running = 0;
++
++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++ if (!res)
++ return -ENOENT;
++
++ ret = hdmi_core_get_of_property(pdev);
++ if (ret < 0) {
++ dev_err(&pdev->dev, "get hdmi of property fail\n");
++ return -ENOENT;
++ }
++
++ hdmi_data = devm_kzalloc(&pdev->dev, sizeof(struct mxc_hdmi_data), GFP_KERNEL);
++ if (!hdmi_data) {
++ dev_err(&pdev->dev, "Couldn't allocate mxc hdmi mfd device\n");
++ return -ENOMEM;
++ }
++ hdmi_data->pdev = pdev;
++
++ pixel_clk = NULL;
++ sample_rate = 48000;
++ pixel_clk_rate = 0;
++ hdmi_ratio = 100;
++
++ spin_lock_init(&irq_spinlock);
++ spin_lock_init(&edid_spinlock);
++
++
++ spin_lock_init(&hdmi_cable_state_lock);
++ spin_lock_init(&hdmi_blank_state_lock);
++ spin_lock_init(&hdmi_audio_lock);
++
++ spin_lock_irqsave(&hdmi_cable_state_lock, flags);
++ hdmi_cable_state = 0;
++ spin_unlock_irqrestore(&hdmi_cable_state_lock, flags);
++
++ spin_lock_irqsave(&hdmi_blank_state_lock, flags);
++ hdmi_blank_state = 0;
++ spin_unlock_irqrestore(&hdmi_blank_state_lock, flags);
++
++ spin_lock_irqsave(&hdmi_audio_lock, flags);
++ hdmi_audio_stream_playback = NULL;
++ hdmi_abort_state = 0;
++ spin_unlock_irqrestore(&hdmi_audio_lock, flags);
++
++ isfr_clk = clk_get(&hdmi_data->pdev->dev, "hdmi_isfr");
++ if (IS_ERR(isfr_clk)) {
++ ret = PTR_ERR(isfr_clk);
++ dev_err(&hdmi_data->pdev->dev,
++ "Unable to get HDMI isfr clk: %d\n", ret);
++ goto eclkg;
++ }
++
++ ret = clk_prepare_enable(isfr_clk);
++ if (ret < 0) {
++ dev_err(&pdev->dev, "Cannot enable HDMI clock: %d\n", ret);
++ goto eclke;
++ }
++
++ pr_debug("%s isfr_clk:%d\n", __func__,
++ (int)clk_get_rate(isfr_clk));
++
++ iahb_clk = clk_get(&hdmi_data->pdev->dev, "hdmi_iahb");
++ if (IS_ERR(iahb_clk)) {
++ ret = PTR_ERR(iahb_clk);
++ dev_err(&hdmi_data->pdev->dev,
++ "Unable to get HDMI iahb clk: %d\n", ret);
++ goto eclkg2;
++ }
++
++ ret = clk_prepare_enable(iahb_clk);
++ if (ret < 0) {
++ dev_err(&pdev->dev, "Cannot enable HDMI clock: %d\n", ret);
++ goto eclke2;
++ }
++
++ hdmi_data->reg_phys_base = res->start;
++ if (!request_mem_region(res->start, resource_size(res),
++ dev_name(&pdev->dev))) {
++ dev_err(&pdev->dev, "request_mem_region failed\n");
++ ret = -EBUSY;
++ goto emem;
++ }
++
++ hdmi_data->reg_base = ioremap(res->start, resource_size(res));
++ if (!hdmi_data->reg_base) {
++ dev_err(&pdev->dev, "ioremap failed\n");
++ ret = -ENOMEM;
++ goto eirq;
++ }
++ hdmi_base = hdmi_data->reg_base;
++
++ pr_debug("\n%s hdmi hw base = 0x%08x\n\n", __func__, (int)res->start);
++
++ initialize_hdmi_ih_mutes();
++
++ /* Disable HDMI clocks until video/audio sub-drivers are initialized */
++ clk_disable_unprepare(isfr_clk);
++ clk_disable_unprepare(iahb_clk);
++
++ /* Replace platform data coming in with a local struct */
++ platform_set_drvdata(pdev, hdmi_data);
++
++ return ret;
++
++eirq:
++ release_mem_region(res->start, resource_size(res));
++emem:
++ clk_disable_unprepare(iahb_clk);
++eclke2:
++ clk_put(iahb_clk);
++eclkg2:
++ clk_disable_unprepare(isfr_clk);
++eclke:
++ clk_put(isfr_clk);
++eclkg:
++ return ret;
++}
++
++
++static int __exit mxc_hdmi_core_remove(struct platform_device *pdev)
++{
++ struct mxc_hdmi_data *hdmi_data = platform_get_drvdata(pdev);
++ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++
++ iounmap(hdmi_data->reg_base);
++ release_mem_region(res->start, resource_size(res));
++
++ return 0;
++}
++
++static const struct of_device_id imx_hdmi_dt_ids[] = {
++ { .compatible = "fsl,imx6q-hdmi-core", },
++ { .compatible = "fsl,imx6dl-hdmi-core", },
++ { /* sentinel */ }
++};
++
++static struct platform_driver mxc_hdmi_core_driver = {
++ .driver = {
++ .name = "mxc_hdmi_core",
++ .of_match_table = imx_hdmi_dt_ids,
++ .owner = THIS_MODULE,
++ },
++ .remove = __exit_p(mxc_hdmi_core_remove),
++};
++
++static int __init mxc_hdmi_core_init(void)
++{
++ return platform_driver_probe(&mxc_hdmi_core_driver,
++ mxc_hdmi_core_probe);
++}
++
++static void __exit mxc_hdmi_core_exit(void)
++{
++ platform_driver_unregister(&mxc_hdmi_core_driver);
++}
++
++subsys_initcall(mxc_hdmi_core_init);
++module_exit(mxc_hdmi_core_exit);
++
++MODULE_DESCRIPTION("Core driver for Freescale i.Mx on-chip HDMI");
++MODULE_AUTHOR("Freescale Semiconductor, Inc.");
++MODULE_LICENSE("GPL");
+diff -Nur linux-3.14.36/drivers/mfd/si476x-cmd.c linux-openelec/drivers/mfd/si476x-cmd.c
+--- linux-3.14.36/drivers/mfd/si476x-cmd.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/mfd/si476x-cmd.c 2015-05-06 12:05:42.000000000 -0500
+@@ -303,13 +303,13 @@
+ * possible racing conditions when working in polling mode */
+ atomic_set(&core->cts, 0);
+
+- /* if (unlikely(command == CMD_POWER_DOWN) */
+- if (!wait_event_timeout(core->command,
+- atomic_read(&core->cts),
+- usecs_to_jiffies(usecs) + 1))
+- dev_warn(&core->client->dev,
+- "(%s) [CMD 0x%02x] Answer timeout.\n",
+- __func__, command);
++ if (!(command == CMD_POWER_DOWN))
++ if (!wait_event_timeout(core->command,
++ atomic_read(&core->cts),
++ usecs_to_jiffies(usecs) + 1))
++ dev_warn(&core->client->dev,
++ "(%s) [CMD 0x%02x] Answer timeout.\n",
++ __func__, command);
+
+ /*
+ When working in polling mode, for some reason the tuner will
+diff -Nur linux-3.14.36/drivers/mfd/si476x-i2c.c linux-openelec/drivers/mfd/si476x-i2c.c
+--- linux-3.14.36/drivers/mfd/si476x-i2c.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/mfd/si476x-i2c.c 2015-05-06 12:05:42.000000000 -0500
+@@ -303,7 +303,7 @@
+ */
+ udelay(100);
+
+- err = si476x_core_start(core, false);
++ err = si476x_core_start(core, true);
+ if (err < 0)
+ goto disable_regulators;
+
+@@ -312,7 +312,7 @@
+
+ case SI476X_POWER_DOWN:
+ core->power_state = next_state;
+- err = si476x_core_stop(core, false);
++ err = si476x_core_stop(core, true);
+ if (err < 0)
+ core->power_state = SI476X_POWER_INCONSISTENT;
+ disable_regulators:
+@@ -740,8 +740,15 @@
+ memcpy(&core->pinmux, &pdata->pinmux,
+ sizeof(struct si476x_pinmux));
+ } else {
+- dev_err(&client->dev, "No platform data provided\n");
+- return -EINVAL;
++ dev_warn(&client->dev, "Using default platform data.\n");
++ core->power_up_parameters.xcload = 0x28;
++ core->power_up_parameters.func = SI476X_FUNC_FM_RECEIVER;
++ core->power_up_parameters.freq = SI476X_FREQ_37P209375_MHZ;
++ core->diversity_mode = SI476X_PHDIV_DISABLED;
++ core->pinmux.dclk = SI476X_DCLK_DAUDIO;
++ core->pinmux.dfs = SI476X_DFS_DAUDIO;
++ core->pinmux.dout = SI476X_DOUT_I2S_OUTPUT;
++ core->pinmux.xout = SI476X_XOUT_TRISTATE;
+ }
+
+ core->supplies[0].supply = "vd";
+@@ -799,6 +806,10 @@
+
+ core->chip_id = id->driver_data;
+
++ /* Power down si476x first */
++ core->power_state = SI476X_POWER_UP_FULL;
++ si476x_core_set_power_state(core, SI476X_POWER_DOWN);
++
+ rval = si476x_core_get_revision_info(core);
+ if (rval < 0) {
+ rval = -ENODEV;
+diff -Nur linux-3.14.36/drivers/mfd/si476x-prop.c linux-openelec/drivers/mfd/si476x-prop.c
+--- linux-3.14.36/drivers/mfd/si476x-prop.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/mfd/si476x-prop.c 2015-05-06 12:05:42.000000000 -0500
+@@ -217,15 +217,36 @@
+ return 0;
+ }
+
++static bool si476x_core_regmap_volatile_register(struct device *dev, unsigned int reg)
++{
++ switch (reg) {
++ case SI476X_PROP_DIGITAL_IO_OUTPUT_SAMPLE_RATE:
++ case SI476X_PROP_DIGITAL_IO_OUTPUT_FORMAT:
++ return false;
++ default:
++ return true;
++ }
++
++ return true;
++}
++
++/* These two register is used by the codec, so add reg_default here */
++static struct reg_default si476x_core_reg[] = {
++ { 0x202, 0xBB80 },
++ { 0x203, 0x1700 },
++};
+
+ static const struct regmap_config si476x_regmap_config = {
+ .reg_bits = 16,
+ .val_bits = 16,
+
+ .max_register = 0x4003,
++ .reg_defaults = si476x_core_reg,
++ .num_reg_defaults = ARRAY_SIZE(si476x_core_reg),
+
+ .writeable_reg = si476x_core_regmap_writable_register,
+ .readable_reg = si476x_core_regmap_readable_register,
++ .volatile_reg = si476x_core_regmap_volatile_register,
+
+ .reg_read = si476x_core_regmap_read,
+ .reg_write = si476x_core_regmap_write,
+diff -Nur linux-3.14.36/drivers/misc/sram.c linux-openelec/drivers/misc/sram.c
+--- linux-3.14.36/drivers/misc/sram.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/misc/sram.c 2015-05-06 12:05:42.000000000 -0500
+@@ -29,7 +29,7 @@
+ #include <linux/spinlock.h>
+ #include <linux/genalloc.h>
+
+-#define SRAM_GRANULARITY 32
++#define SRAM_GRANULARITY 4096
+
+ struct sram_dev {
+ struct gen_pool *pool;
+diff -Nur linux-3.14.36/drivers/mmc/core/core.c linux-openelec/drivers/mmc/core/core.c
+--- linux-3.14.36/drivers/mmc/core/core.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/mmc/core/core.c 2015-05-06 12:05:42.000000000 -0500
+@@ -13,11 +13,13 @@
+ #include <linux/module.h>
+ #include <linux/init.h>
+ #include <linux/interrupt.h>
++#include <linux/clk.h>
+ #include <linux/completion.h>
+ #include <linux/device.h>
+ #include <linux/delay.h>
+ #include <linux/pagemap.h>
+ #include <linux/err.h>
++#include <linux/gpio.h>
+ #include <linux/leds.h>
+ #include <linux/scatterlist.h>
+ #include <linux/log2.h>
+@@ -1519,6 +1521,43 @@
+ mmc_host_clk_release(host);
+ }
+
++static void mmc_card_power_up(struct mmc_host *host)
++{
++ int i;
++ struct gpio_desc **gds = host->card_reset_gpios;
++
++ for (i = 0; i < ARRAY_SIZE(host->card_reset_gpios); i++) {
++ if (gds[i]) {
++ dev_dbg(host->parent, "Asserting reset line %d", i);
++ gpiod_set_value(gds[i], 1);
++ }
++ }
++
++ if (host->card_regulator) {
++ dev_dbg(host->parent, "Enabling external regulator");
++ if (regulator_enable(host->card_regulator))
++ dev_err(host->parent, "Failed to enable external regulator");
++ }
++
++ if (host->card_clk) {
++ dev_dbg(host->parent, "Enabling external clock");
++ clk_prepare_enable(host->card_clk);
++ }
++
++ /* 2ms delay to let clocks and power settle */
++ mmc_delay(20);
++
++ for (i = 0; i < ARRAY_SIZE(host->card_reset_gpios); i++) {
++ if (gds[i]) {
++ dev_dbg(host->parent, "Deasserting reset line %d", i);
++ gpiod_set_value(gds[i], 0);
++ }
++ }
++
++ /* 2ms delay to after reset release */
++ mmc_delay(20);
++}
++
+ /*
+ * Apply power to the MMC stack. This is a two-stage process.
+ * First, we enable power to the card without the clock running.
+@@ -1535,6 +1574,9 @@
+ if (host->ios.power_mode == MMC_POWER_ON)
+ return;
+
++ /* Power up the card/module first, if needed */
++ mmc_card_power_up(host);
++
+ mmc_host_clk_hold(host);
+
+ host->ios.vdd = fls(ocr) - 1;
+diff -Nur linux-3.14.36/drivers/mmc/core/host.c linux-openelec/drivers/mmc/core/host.c
+--- linux-3.14.36/drivers/mmc/core/host.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/mmc/core/host.c 2015-05-06 12:05:42.000000000 -0500
+@@ -12,14 +12,18 @@
+ * MMC host class device management
+ */
+
++#include <linux/kernel.h>
++#include <linux/clk.h>
+ #include <linux/device.h>
+ #include <linux/err.h>
++#include <linux/gpio/consumer.h>
+ #include <linux/idr.h>
+ #include <linux/of.h>
+ #include <linux/of_gpio.h>
+ #include <linux/pagemap.h>
+ #include <linux/export.h>
+ #include <linux/leds.h>
++#include <linux/regulator/consumer.h>
+ #include <linux/slab.h>
+ #include <linux/suspend.h>
+
+@@ -439,6 +443,66 @@
+
+ EXPORT_SYMBOL(mmc_of_parse);
+
++static int mmc_of_parse_child(struct mmc_host *host)
++{
++ struct device_node *np;
++ struct clk *clk;
++ int i;
++
++ if (!host->parent || !host->parent->of_node)
++ return 0;
++
++ np = host->parent->of_node;
++
++ host->card_regulator = regulator_get(host->parent, "card-external-vcc");
++ if (IS_ERR(host->card_regulator)) {
++ if (PTR_ERR(host->card_regulator) == -EPROBE_DEFER)
++ return PTR_ERR(host->card_regulator);
++ host->card_regulator = NULL;
++ }
++
++ /* Parse card power/reset/clock control */
++ if (of_find_property(np, "card-reset-gpios", NULL)) {
++ struct gpio_desc *gpd;
++ int level = 0;
++
++ /*
++ * If the regulator is enabled, then we can hold the
++ * card in reset with an active high resets. Otherwise,
++ * hold the resets low.
++ */
++ if (host->card_regulator && regulator_is_enabled(host->card_regulator))
++ level = 1;
++
++ for (i = 0; i < ARRAY_SIZE(host->card_reset_gpios); i++) {
++ gpd = devm_gpiod_get_index(host->parent, "card-reset", i);
++ if (IS_ERR(gpd)) {
++ if (PTR_ERR(gpd) == -EPROBE_DEFER)
++ return PTR_ERR(gpd);
++ break;
++ }
++ gpiod_direction_output(gpd, gpiod_is_active_low(gpd) | level);
++ host->card_reset_gpios[i] = gpd;
++ }
++
++ gpd = devm_gpiod_get_index(host->parent, "card-reset", ARRAY_SIZE(host->card_reset_gpios));
++ if (!IS_ERR(gpd)) {
++ dev_warn(host->parent, "More reset gpios than we can handle");
++ gpiod_put(gpd);
++ }
++ }
++
++ clk = of_clk_get_by_name(np, "card_ext_clock");
++ if (IS_ERR(clk)) {
++ if (PTR_ERR(clk) == -EPROBE_DEFER)
++ return PTR_ERR(clk);
++ clk = NULL;
++ }
++ host->card_clk = clk;
++
++ return 0;
++}
++
+ /**
+ * mmc_alloc_host - initialise the per-host structure.
+ * @extra: sizeof private data structure
+@@ -518,6 +582,10 @@
+ {
+ int err;
+
++ err = mmc_of_parse_child(host);
++ if (err)
++ return err;
++
+ WARN_ON((host->caps & MMC_CAP_SDIO_IRQ) &&
+ !host->ops->enable_sdio_irq);
+
+diff -Nur linux-3.14.36/drivers/mmc/core/mmc.c linux-openelec/drivers/mmc/core/mmc.c
+--- linux-3.14.36/drivers/mmc/core/mmc.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/mmc/core/mmc.c 2015-05-06 12:05:42.000000000 -0500
+@@ -317,6 +317,11 @@
+ mmc_card_set_blockaddr(card);
+ }
+
++ card->ext_csd.boot_info = ext_csd[EXT_CSD_BOOT_INFO];
++ card->ext_csd.boot_config = ext_csd[EXT_CSD_PART_CONFIG];
++ card->ext_csd.boot_size = ext_csd[EXT_CSD_BOOT_MULT];
++ card->ext_csd.boot_bus_width = ext_csd[EXT_CSD_BOOT_BUS_WIDTH];
++
+ card->ext_csd.raw_card_type = ext_csd[EXT_CSD_CARD_TYPE];
+ mmc_select_card_type(card);
+
+@@ -655,6 +660,372 @@
+ return err;
+ }
+
++static ssize_t mmc_boot_info_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ char *boot_partition[8] = {
++ "Device not boot enabled",
++ "Boot partition 1 enabled",
++ "Boot partition 2 enabled",
++ "Reserved",
++ "Reserved",
++ "Reserved",
++ "Reserved",
++ "User area enabled for boot"};
++
++ char *bus_width[4] = {
++ "x1 (sdr) or x4 (ddr) bus width in boot operation mode",
++ "x4 (sdr/ddr) bus width in boot operation mode",
++ "x8 (sdr/ddr) bus width in boot operation mode",
++ "Reserved"};
++
++ char *boot_mode[4] = {
++ "Use single data rate + backward compatible timings in boot operation",
++ "Use single data rate + high speed timings in boot operation mode",
++ "Use dual data rate in boot operation",
++ "Reserved"};
++
++ int partition;
++ int width;
++ int mode;
++ int err;
++ u8 *ext_csd = NULL;
++ struct mmc_card *card = container_of(dev, struct mmc_card, dev);
++
++ /* read it again because user may change it */
++ mmc_claim_host(card->host);
++ err = mmc_get_ext_csd(card, &ext_csd);
++ mmc_release_host(card->host);
++ if (err || !ext_csd) {
++ pr_err("%s: failed to get ext_csd, err=%d\n",
++ mmc_hostname(card->host),
++ err);
++ return err;
++ }
++
++ mmc_read_ext_csd(card, ext_csd);
++ mmc_free_ext_csd(ext_csd);
++
++ partition = (card->ext_csd.boot_config >> 3) & 0x7;
++ width = card->ext_csd.boot_bus_width & 0x3;
++ mode = (card->ext_csd.boot_bus_width >> 3) & 0x3;
++
++ return sprintf(buf,
++ "boot_info:0x%02x;\n"
++ " ALT_BOOT_MODE:%x - %s\n"
++ " DDR_BOOT_MODE:%x - %s\n"
++ " HS_BOOTMODE:%x - %s\n"
++ "boot_size:%04dKB\n"
++ "boot_partition:0x%02x;\n"
++ " BOOT_ACK:%x - %s\n"
++ " BOOT_PARTITION-ENABLE: %x - %s\n"
++ "boot_bus:0x%02x\n"
++ " BOOT_MODE:%x - %s\n"
++ " RESET_BOOT_BUS_WIDTH:%x - %s\n"
++ " BOOT_BUS_WIDTH:%x - %s\n",
++
++ card->ext_csd.boot_info,
++ !!(card->ext_csd.boot_info & 0x1),
++ (card->ext_csd.boot_info & 0x1) ?
++ "Supports alternate boot method" :
++ "Does not support alternate boot method",
++ !!(card->ext_csd.boot_info & 0x2),
++ (card->ext_csd.boot_info & 0x2) ?
++ "Supports alternate dual data rate during boot" :
++ "Does not support dual data rate during boot",
++ !!(card->ext_csd.boot_info & 0x4),
++ (card->ext_csd.boot_info & 0x4) ?
++ "Supports high speed timing during boot" :
++ "Does not support high speed timing during boot",
++
++ card->ext_csd.boot_size * 128,
++
++ card->ext_csd.boot_config,
++ !!(card->ext_csd.boot_config & 0x40),
++ (card->ext_csd.boot_config & 0x40) ?
++ "Boot acknowledge sent during boot operation" :
++ "No boot acknowledge sent",
++ partition,
++ boot_partition[partition],
++
++ card->ext_csd.boot_bus_width,
++ mode,
++ boot_mode[mode],
++ !!(card->ext_csd.boot_bus_width & 0x4),
++ (card->ext_csd.boot_bus_width & 0x4) ?
++ "Retain boot bus width and boot mode after boot operation" :
++ "Reset bus width to x1, single data rate and backward"
++ "compatible timings after boot operation",
++ width,
++ bus_width[width]);
++}
++
++/* set up boot partitions */
++static ssize_t
++setup_boot_partitions(struct device *dev, struct device_attribute *attr,
++ const char *buf, size_t count)
++{
++ int err, busy = 0;
++ u32 part;
++ u8 *ext_csd, boot_config;
++ struct mmc_command cmd;
++ struct mmc_card *card = container_of(dev, struct mmc_card, dev);
++
++ BUG_ON(!card);
++
++ sscanf(buf, "%d\n", &part);
++
++ if (card->csd.mmca_vsn < CSD_SPEC_VER_4) {
++ pr_err("%s: invalid mmc version" \
++ " mmc version is below version 4!)\n",
++ mmc_hostname(card->host));
++ return -EINVAL;
++ }
++
++ /* it's a normal SD/MMC but user request to configure boot partition */
++ if (card->ext_csd.boot_size <= 0) {
++ pr_err("%s: fail to send SWITCH command to card " \
++ "to update boot_config of the EXT_CSD!\n",
++ mmc_hostname(card->host));
++ return -EINVAL;
++ }
++
++ /*
++ * partition must be -
++ * 0 - user area
++ * 1 - boot partition 1
++ * 2 - boot partition 2
++ * DO NOT switch the partitions that used to be accessed
++ * in OS layer HERE
++ */
++ if (part & EXT_CSD_BOOT_PARTITION_ACCESS_MASK) {
++ pr_err("%s: DO NOT switch the partitions that used to be\n" \
++ " accessed in OS layer HERE. please following the\n" \
++ " guidance of Documentation/mmc/mmc-dev-parts.txt.\n",
++ mmc_hostname(card->host));
++ return -EINVAL;
++ }
++
++ ext_csd = kmalloc(512, GFP_KERNEL);
++ if (!ext_csd) {
++ pr_err("%s: could not allocate a buffer to " \
++ "receive the ext_csd.\n", mmc_hostname(card->host));
++ return -ENOMEM;
++ }
++
++ mmc_claim_host(card->host);
++ err = mmc_send_ext_csd(card, ext_csd);
++ if (err) {
++ pr_err("%s: unable to read EXT_CSD.\n",
++ mmc_hostname(card->host));
++ goto err_rtn;
++ }
++
++ /* enable the boot partition in boot mode */
++ /* boot enable be -
++ * 0x00 - disable boot enable.
++ * 0x08 - boot partition 1 is enabled for boot.
++ * 0x10 - boot partition 2 is enabled for boot.
++ * 0x38 - User area is enabled for boot.
++ */
++ switch (part & EXT_CSD_BOOT_PARTITION_ENABLE_MASK) {
++ case 0:
++ boot_config = (ext_csd[EXT_CSD_PART_CONFIG]
++ & ~EXT_CSD_BOOT_PARTITION_ENABLE_MASK
++ & ~EXT_CSD_BOOT_ACK_ENABLE);
++ break;
++ case EXT_CSD_BOOT_PARTITION_PART1:
++ boot_config = ((ext_csd[EXT_CSD_PART_CONFIG]
++ & ~EXT_CSD_BOOT_PARTITION_ENABLE_MASK)
++ | EXT_CSD_BOOT_PARTITION_PART1
++ | EXT_CSD_BOOT_ACK_ENABLE);
++ break;
++ case EXT_CSD_BOOT_PARTITION_PART2:
++ boot_config = ((ext_csd[EXT_CSD_PART_CONFIG]
++ & ~EXT_CSD_BOOT_PARTITION_ENABLE_MASK)
++ | EXT_CSD_BOOT_PARTITION_PART2
++ | EXT_CSD_BOOT_ACK_ENABLE);
++ break;
++ case EXT_CSD_BOOT_PARTITION_ENABLE_MASK:
++ boot_config = ((ext_csd[EXT_CSD_PART_CONFIG]
++ | EXT_CSD_BOOT_PARTITION_ENABLE_MASK)
++ & ~EXT_CSD_BOOT_ACK_ENABLE);
++ break;
++ default:
++ pr_err("%s: wrong boot config parameter" \
++ " 00 (disable boot), 08 (enable boot1)," \
++ "16 (enable boot2), 56 (User area)\n",
++ mmc_hostname(card->host));
++ err = -EINVAL;
++ goto err_rtn;
++ }
++
++ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
++ EXT_CSD_PART_CONFIG, boot_config, card->ext_csd.part_time);
++ if (err) {
++ pr_err("%s: fail to send SWITCH command to card " \
++ "to update boot_config of the EXT_CSD!\n",
++ mmc_hostname(card->host));
++ goto err_rtn;
++ }
++
++ /* waiting for the card to finish the busy state */
++ do {
++ memset(&cmd, 0, sizeof(struct mmc_command));
++
++ cmd.opcode = MMC_SEND_STATUS;
++ cmd.arg = card->rca << 16;
++ cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
++
++ err = mmc_wait_for_cmd(card->host, &cmd, 0);
++ if (err || busy > 100) {
++ pr_err("%s: failed to wait for" \
++ "the busy state to end.\n",
++ mmc_hostname(card->host));
++ break;
++ }
++
++ if (!busy && !(cmd.resp[0] & R1_READY_FOR_DATA)) {
++ pr_info("%s: card is in busy state" \
++ "pls wait for busy state to end.\n",
++ mmc_hostname(card->host));
++ }
++ busy++;
++ } while (!(cmd.resp[0] & R1_READY_FOR_DATA));
++
++ /* Now check whether it works */
++ err = mmc_send_ext_csd(card, ext_csd);
++ if (err) {
++ pr_err("%s: %d unable to re-read EXT_CSD.\n",
++ mmc_hostname(card->host), err);
++ goto err_rtn;
++ }
++
++ card->ext_csd.boot_config = ext_csd[EXT_CSD_PART_CONFIG];
++
++err_rtn:
++ mmc_release_host(card->host);
++ kfree(ext_csd);
++ if (err)
++ return err;
++ else
++ return count;
++}
++
++/* configure the boot bus */
++static ssize_t
++setup_boot_bus(struct device *dev, struct device_attribute *attr,
++ const char *buf, size_t count)
++{
++ int err, busy = 0;
++ u32 boot_bus, new_bus;
++ u8 *ext_csd;
++ struct mmc_command cmd;
++ struct mmc_card *card = container_of(dev, struct mmc_card, dev);
++
++ BUG_ON(!card);
++
++ sscanf(buf, "%d\n", &boot_bus);
++
++ if (card->csd.mmca_vsn < CSD_SPEC_VER_4) {
++ pr_err("%s: invalid mmc version" \
++ " mmc version is below version 4!)\n",
++ mmc_hostname(card->host));
++ return -EINVAL;
++ }
++
++ /* it's a normal SD/MMC but user request to configure boot bus */
++ if (card->ext_csd.boot_size <= 0) {
++ pr_err("%s: this is a normal SD/MMC card" \
++ " but you request to configure boot bus !\n",
++ mmc_hostname(card->host));
++ return -EINVAL;
++ }
++
++ ext_csd = kmalloc(512, GFP_KERNEL);
++ if (!ext_csd) {
++ pr_err("%s: could not allocate a buffer to " \
++ "receive the ext_csd.\n", mmc_hostname(card->host));
++ return -ENOMEM;
++ }
++
++ mmc_claim_host(card->host);
++ err = mmc_send_ext_csd(card, ext_csd);
++ if (err) {
++ pr_err("%s: unable to read EXT_CSD.\n",
++ mmc_hostname(card->host));
++ goto err_rtn;
++ }
++
++ /* Configure the boot bus width when boot partition is enabled */
++ if (((boot_bus & EXT_CSD_BOOT_BUS_WIDTH_MODE_MASK) >> 3) > 2
++ || (boot_bus & EXT_CSD_BOOT_BUS_WIDTH_WIDTH_MASK) > 2
++ || (boot_bus & ~EXT_CSD_BOOT_BUS_WIDTH_MASK) > 0) {
++ pr_err("%s: Invalid inputs!\n",
++ mmc_hostname(card->host));
++ err = -EINVAL;
++ goto err_rtn;
++ }
++
++ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
++ EXT_CSD_BOOT_BUS_WIDTH, boot_bus, card->ext_csd.part_time);
++ if (err) {
++ pr_err("%s: fail to send SWITCH command to card " \
++ "to update boot_config of the EXT_CSD!\n",
++ mmc_hostname(card->host));
++ goto err_rtn;
++ }
++
++ /* waiting for the card to finish the busy state */
++ do {
++ memset(&cmd, 0, sizeof(struct mmc_command));
++
++ cmd.opcode = MMC_SEND_STATUS;
++ cmd.arg = card->rca << 16;
++ cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
++
++ err = mmc_wait_for_cmd(card->host, &cmd, 0);
++ if (err || busy > 100) {
++ pr_err("%s: failed to wait for" \
++ "the busy state to end.\n",
++ mmc_hostname(card->host));
++ break;
++ }
++
++ if (!busy && !(cmd.resp[0] & R1_READY_FOR_DATA)) {
++ pr_info("%s: card is in busy state" \
++ "pls wait for busy state to end.\n",
++ mmc_hostname(card->host));
++ }
++ busy++;
++ } while (!(cmd.resp[0] & R1_READY_FOR_DATA));
++
++ /* Now check whether it works */
++ err = mmc_send_ext_csd(card, ext_csd);
++ if (err) {
++ pr_err("%s: %d unable to re-read EXT_CSD.\n",
++ mmc_hostname(card->host), err);
++ goto err_rtn;
++ }
++
++ new_bus = ext_csd[EXT_CSD_BOOT_BUS_WIDTH];
++ if (boot_bus != new_bus) {
++ pr_err("%s: after SWITCH, current boot bus mode %d" \
++ " is not same as requested bus mode %d!\n",
++ mmc_hostname(card->host), new_bus, boot_bus);
++ goto err_rtn;
++ }
++ card->ext_csd.boot_bus_width = ext_csd[EXT_CSD_BOOT_BUS_WIDTH];
++
++err_rtn:
++ mmc_release_host(card->host);
++ mmc_free_ext_csd(ext_csd);
++ if (err)
++ return err;
++ else
++ return count;
++}
++
+ MMC_DEV_ATTR(cid, "%08x%08x%08x%08x\n", card->raw_cid[0], card->raw_cid[1],
+ card->raw_cid[2], card->raw_cid[3]);
+ MMC_DEV_ATTR(csd, "%08x%08x%08x%08x\n", card->raw_csd[0], card->raw_csd[1],
+@@ -674,6 +1045,9 @@
+ MMC_DEV_ATTR(enhanced_area_size, "%u\n", card->ext_csd.enhanced_area_size);
+ MMC_DEV_ATTR(raw_rpmb_size_mult, "%#x\n", card->ext_csd.raw_rpmb_size_mult);
+ MMC_DEV_ATTR(rel_sectors, "%#x\n", card->ext_csd.rel_sectors);
++DEVICE_ATTR(boot_info, S_IRUGO, mmc_boot_info_show, NULL);
++DEVICE_ATTR(boot_config, S_IWUGO, NULL, setup_boot_partitions);
++DEVICE_ATTR(boot_bus_config, S_IWUGO, NULL, setup_boot_bus);
+
+ static struct attribute *mmc_std_attrs[] = {
+ &dev_attr_cid.attr,
+@@ -692,6 +1066,9 @@
+ &dev_attr_enhanced_area_size.attr,
+ &dev_attr_raw_rpmb_size_mult.attr,
+ &dev_attr_rel_sectors.attr,
++ &dev_attr_boot_info.attr,
++ &dev_attr_boot_config.attr,
++ &dev_attr_boot_bus_config.attr,
+ NULL,
+ };
+
+diff -Nur linux-3.14.36/drivers/mmc/core/sdio_irq.c linux-openelec/drivers/mmc/core/sdio_irq.c
+--- linux-3.14.36/drivers/mmc/core/sdio_irq.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/mmc/core/sdio_irq.c 2015-05-06 12:05:42.000000000 -0500
+@@ -90,6 +90,15 @@
+ return ret;
+ }
+
++void sdio_run_irqs(struct mmc_host *host)
++{
++ mmc_claim_host(host);
++ host->sdio_irq_pending = true;
++ process_sdio_pending_irqs(host);
++ mmc_release_host(host);
++}
++EXPORT_SYMBOL_GPL(sdio_run_irqs);
++
+ static int sdio_irq_thread(void *_host)
+ {
+ struct mmc_host *host = _host;
+@@ -189,14 +198,20 @@
+ WARN_ON(!host->claimed);
+
+ if (!host->sdio_irqs++) {
+- atomic_set(&host->sdio_irq_thread_abort, 0);
+- host->sdio_irq_thread =
+- kthread_run(sdio_irq_thread, host, "ksdioirqd/%s",
+- mmc_hostname(host));
+- if (IS_ERR(host->sdio_irq_thread)) {
+- int err = PTR_ERR(host->sdio_irq_thread);
+- host->sdio_irqs--;
+- return err;
++ if (!(host->caps2 & MMC_CAP2_SDIO_NOTHREAD)) {
++ atomic_set(&host->sdio_irq_thread_abort, 0);
++ host->sdio_irq_thread =
++ kthread_run(sdio_irq_thread, host,
++ "ksdioirqd/%s", mmc_hostname(host));
++ if (IS_ERR(host->sdio_irq_thread)) {
++ int err = PTR_ERR(host->sdio_irq_thread);
++ host->sdio_irqs--;
++ return err;
++ }
++ } else {
++ mmc_host_clk_hold(host);
++ host->ops->enable_sdio_irq(host, 1);
++ mmc_host_clk_release(host);
+ }
+ }
+
+@@ -211,8 +226,14 @@
+ BUG_ON(host->sdio_irqs < 1);
+
+ if (!--host->sdio_irqs) {
+- atomic_set(&host->sdio_irq_thread_abort, 1);
+- kthread_stop(host->sdio_irq_thread);
++ if (!(host->caps2 & MMC_CAP2_SDIO_NOTHREAD)) {
++ atomic_set(&host->sdio_irq_thread_abort, 1);
++ kthread_stop(host->sdio_irq_thread);
++ } else {
++ mmc_host_clk_hold(host);
++ host->ops->enable_sdio_irq(host, 0);
++ mmc_host_clk_release(host);
++ }
+ }
+
+ return 0;
+diff -Nur linux-3.14.36/drivers/mmc/host/dw_mmc.c linux-openelec/drivers/mmc/host/dw_mmc.c
+--- linux-3.14.36/drivers/mmc/host/dw_mmc.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/mmc/host/dw_mmc.c 2015-07-24 18:03:29.264842002 -0500
+@@ -2147,6 +2147,8 @@
+ if (!mmc)
+ return -ENOMEM;
+
++ mmc_of_parse(mmc);
++
+ slot = mmc_priv(mmc);
+ slot->id = id;
+ slot->mmc = mmc;
+diff -Nur linux-3.14.36/drivers/mmc/host/Kconfig linux-openelec/drivers/mmc/host/Kconfig
+--- linux-3.14.36/drivers/mmc/host/Kconfig 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/mmc/host/Kconfig 2015-05-06 12:05:42.000000000 -0500
+@@ -25,8 +25,7 @@
+ If unsure, say N.
+
+ config MMC_SDHCI
+- tristate "Secure Digital Host Controller Interface support"
+- depends on HAS_DMA
++ tristate
+ help
+ This selects the generic Secure Digital Host Controller Interface.
+ It is used by manufacturers such as Texas Instruments(R), Ricoh(R)
+@@ -59,7 +58,8 @@
+
+ config MMC_SDHCI_PCI
+ tristate "SDHCI support on PCI bus"
+- depends on MMC_SDHCI && PCI
++ depends on PCI && HAS_DMA
++ select MMC_SDHCI
+ help
+ This selects the PCI Secure Digital Host Controller Interface.
+ Most controllers found today are PCI devices.
+@@ -83,7 +83,8 @@
+
+ config MMC_SDHCI_ACPI
+ tristate "SDHCI support for ACPI enumerated SDHCI controllers"
+- depends on MMC_SDHCI && ACPI
++ depends on ACPI && HAS_DMA
++ select MMC_SDHCI
+ help
+ This selects support for ACPI enumerated SDHCI controllers,
+ identified by ACPI Compatibility ID PNP0D40 or specific
+@@ -94,8 +95,8 @@
+ If unsure, say N.
+
+ config MMC_SDHCI_PLTFM
+- tristate "SDHCI platform and OF driver helper"
+- depends on MMC_SDHCI
++ tristate
++ select MMC_SDHCI
+ help
+ This selects the common helper functions support for Secure Digital
+ Host Controller Interface based platform and OF drivers.
+@@ -106,8 +107,8 @@
+
+ config MMC_SDHCI_OF_ARASAN
+ tristate "SDHCI OF support for the Arasan SDHCI controllers"
+- depends on MMC_SDHCI_PLTFM
+- depends on OF
++ depends on OF && HAS_DMA
++ select MMC_SDHCI_PLTFM
+ help
+ This selects the Arasan Secure Digital Host Controller Interface
+ (SDHCI). This hardware is found e.g. in Xilinx' Zynq SoC.
+@@ -118,9 +119,9 @@
+
+ config MMC_SDHCI_OF_ESDHC
+ tristate "SDHCI OF support for the Freescale eSDHC controller"
+- depends on MMC_SDHCI_PLTFM
+- depends on PPC_OF
++ depends on PPC_OF && HAS_DMA
+ select MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER
++ select MMC_SDHCI_PLTFM
+ help
+ This selects the Freescale eSDHC controller support.
+
+@@ -130,9 +131,9 @@
+
+ config MMC_SDHCI_OF_HLWD
+ tristate "SDHCI OF support for the Nintendo Wii SDHCI controllers"
+- depends on MMC_SDHCI_PLTFM
+- depends on PPC_OF
++ depends on PPC_OF && HAS_DMA
+ select MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER
++ select MMC_SDHCI_PLTFM
+ help
+ This selects the Secure Digital Host Controller Interface (SDHCI)
+ found in the "Hollywood" chipset of the Nintendo Wii video game
+@@ -144,8 +145,8 @@
+
+ config MMC_SDHCI_CNS3XXX
+ tristate "SDHCI support on the Cavium Networks CNS3xxx SoC"
+- depends on ARCH_CNS3XXX
+- depends on MMC_SDHCI_PLTFM
++ depends on ARCH_CNS3XXX && HAS_DMA
++ select MMC_SDHCI_PLTFM
+ help
+ This selects the SDHCI support for CNS3xxx System-on-Chip devices.
+
+@@ -155,9 +156,9 @@
+
+ config MMC_SDHCI_ESDHC_IMX
+ tristate "SDHCI support for the Freescale eSDHC/uSDHC i.MX controller"
+- depends on ARCH_MXC
+- depends on MMC_SDHCI_PLTFM
++ depends on ARCH_MXC && HAS_DMA
+ select MMC_SDHCI_IO_ACCESSORS
++ select MMC_SDHCI_PLTFM
+ help
+ This selects the Freescale eSDHC/uSDHC controller support
+ found on i.MX25, i.MX35 i.MX5x and i.MX6x.
+@@ -168,9 +169,9 @@
+
+ config MMC_SDHCI_DOVE
+ tristate "SDHCI support on Marvell's Dove SoC"
+- depends on ARCH_DOVE
+- depends on MMC_SDHCI_PLTFM
++ depends on ARCH_DOVE && HAS_DMA
+ select MMC_SDHCI_IO_ACCESSORS
++ select MMC_SDHCI_PLTFM
+ help
+ This selects the Secure Digital Host Controller Interface in
+ Marvell's Dove SoC.
+@@ -181,9 +182,9 @@
+
+ config MMC_SDHCI_TEGRA
+ tristate "SDHCI platform support for the Tegra SD/MMC Controller"
+- depends on ARCH_TEGRA
+- depends on MMC_SDHCI_PLTFM
++ depends on ARCH_TEGRA && HAS_DMA
+ select MMC_SDHCI_IO_ACCESSORS
++ select MMC_SDHCI_PLTFM
+ help
+ This selects the Tegra SD/MMC controller. If you have a Tegra
+ platform with SD or MMC devices, say Y or M here.
+@@ -192,7 +193,8 @@
+
+ config MMC_SDHCI_S3C
+ tristate "SDHCI support on Samsung S3C SoC"
+- depends on MMC_SDHCI && PLAT_SAMSUNG
++ depends on PLAT_SAMSUNG && HAS_DMA
++ select MMC_SDHCI
+ help
+ This selects the Secure Digital Host Controller Interface (SDHCI)
+ often referrered to as the HSMMC block in some of the Samsung S3C
+@@ -204,8 +206,8 @@
+
+ config MMC_SDHCI_SIRF
+ tristate "SDHCI support on CSR SiRFprimaII and SiRFmarco SoCs"
+- depends on ARCH_SIRF
+- depends on MMC_SDHCI_PLTFM
++ depends on ARCH_SIRF && HAS_DMA
++ select MMC_SDHCI_PLTFM
+ help
+ This selects the SDHCI support for SiRF System-on-Chip devices.
+
+@@ -215,8 +217,7 @@
+
+ config MMC_SDHCI_PXAV3
+ tristate "Marvell MMP2 SD Host Controller support (PXAV3)"
+- depends on CLKDEV_LOOKUP
+- select MMC_SDHCI
++ depends on CLKDEV_LOOKUP && HAS_DMA
+ select MMC_SDHCI_PLTFM
+ default CPU_MMP2
+ help
+@@ -228,8 +229,7 @@
+
+ config MMC_SDHCI_PXAV2
+ tristate "Marvell PXA9XX SD Host Controller support (PXAV2)"
+- depends on CLKDEV_LOOKUP
+- select MMC_SDHCI
++ depends on CLKDEV_LOOKUP && HAS_DMA
+ select MMC_SDHCI_PLTFM
+ default CPU_PXA910
+ help
+@@ -241,7 +241,8 @@
+
+ config MMC_SDHCI_SPEAR
+ tristate "SDHCI support on ST SPEAr platform"
+- depends on MMC_SDHCI && PLAT_SPEAR
++ depends on PLAT_SPEAR && HAS_DMA
++ select MMC_SDHCI
+ help
+ This selects the Secure Digital Host Controller Interface (SDHCI)
+ often referrered to as the HSMMC block in some of the ST SPEAR range
+@@ -263,7 +264,7 @@
+
+ config MMC_SDHCI_BCM_KONA
+ tristate "SDHCI support on Broadcom KONA platform"
+- depends on ARCH_BCM
++ depends on ARCH_BCM && HAS_DMA
+ select MMC_SDHCI_PLTFM
+ help
+ This selects the Broadcom Kona Secure Digital Host Controller
+@@ -274,9 +275,9 @@
+
+ config MMC_SDHCI_BCM2835
+ tristate "SDHCI platform support for the BCM2835 SD/MMC Controller"
+- depends on ARCH_BCM2835
+- depends on MMC_SDHCI_PLTFM
++ depends on ARCH_BCM2835 && HAS_DMA
+ select MMC_SDHCI_IO_ACCESSORS
++ select MMC_SDHCI_PLTFM
+ help
+ This selects the BCM2835 SD/MMC controller. If you have a BCM2835
+ platform with SD or MMC devices, say Y or M here.
+diff -Nur linux-3.14.36/drivers/mmc/host/sdhci-acpi.c linux-openelec/drivers/mmc/host/sdhci-acpi.c
+--- linux-3.14.36/drivers/mmc/host/sdhci-acpi.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/mmc/host/sdhci-acpi.c 2015-05-06 12:05:42.000000000 -0500
+@@ -101,11 +101,19 @@
+ }
+
+ static const struct sdhci_ops sdhci_acpi_ops_dflt = {
++ .set_clock = sdhci_set_clock,
+ .enable_dma = sdhci_acpi_enable_dma,
++ .set_bus_width = sdhci_set_bus_width,
++ .reset = sdhci_reset,
++ .set_uhs_signaling = sdhci_set_uhs_signaling,
+ };
+
+ static const struct sdhci_ops sdhci_acpi_ops_int = {
++ .set_clock = sdhci_set_clock,
+ .enable_dma = sdhci_acpi_enable_dma,
++ .set_bus_width = sdhci_set_bus_width,
++ .reset = sdhci_reset,
++ .set_uhs_signaling = sdhci_set_uhs_signaling,
+ .hw_reset = sdhci_acpi_int_hw_reset,
+ };
+
+diff -Nur linux-3.14.36/drivers/mmc/host/sdhci-bcm2835.c linux-openelec/drivers/mmc/host/sdhci-bcm2835.c
+--- linux-3.14.36/drivers/mmc/host/sdhci-bcm2835.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/mmc/host/sdhci-bcm2835.c 2015-05-06 12:05:42.000000000 -0500
+@@ -131,8 +131,12 @@
+ .read_l = bcm2835_sdhci_readl,
+ .read_w = bcm2835_sdhci_readw,
+ .read_b = bcm2835_sdhci_readb,
++ .set_clock = sdhci_set_clock,
+ .get_max_clock = sdhci_pltfm_clk_get_max_clock,
+ .get_min_clock = bcm2835_sdhci_get_min_clock,
++ .set_bus_width = sdhci_set_bus_width,
++ .reset = sdhci_reset,
++ .set_uhs_signaling = sdhci_set_uhs_signaling,
+ };
+
+ static const struct sdhci_pltfm_data bcm2835_sdhci_pdata = {
+diff -Nur linux-3.14.36/drivers/mmc/host/sdhci-bcm-kona.c linux-openelec/drivers/mmc/host/sdhci-bcm-kona.c
+--- linux-3.14.36/drivers/mmc/host/sdhci-bcm-kona.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/mmc/host/sdhci-bcm-kona.c 2015-05-06 12:05:42.000000000 -0500
+@@ -205,9 +205,13 @@
+ }
+
+ static struct sdhci_ops sdhci_bcm_kona_ops = {
++ .set_clock = sdhci_set_clock,
+ .get_max_clock = sdhci_bcm_kona_get_max_clk,
+ .get_timeout_clock = sdhci_bcm_kona_get_timeout_clock,
+ .platform_send_init_74_clocks = sdhci_bcm_kona_init_74_clocks,
++ .set_bus_width = sdhci_set_bus_width,
++ .reset = sdhci_reset,
++ .set_uhs_signaling = sdhci_set_uhs_signaling,
+ .card_event = sdhci_bcm_kona_card_event,
+ };
+
+diff -Nur linux-3.14.36/drivers/mmc/host/sdhci.c linux-openelec/drivers/mmc/host/sdhci.c
+--- linux-3.14.36/drivers/mmc/host/sdhci.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/mmc/host/sdhci.c 2015-07-24 18:03:29.304842002 -0500
+@@ -44,6 +44,8 @@
+
+ #define MAX_TUNING_LOOP 40
+
++#define ADMA_SIZE ((128 * 2 + 1) * 4)
++
+ static unsigned int debug_quirks = 0;
+ static unsigned int debug_quirks2;
+
+@@ -131,43 +133,28 @@
+ * *
+ \*****************************************************************************/
+
+-static void sdhci_clear_set_irqs(struct sdhci_host *host, u32 clear, u32 set)
+-{
+- u32 ier;
+-
+- ier = sdhci_readl(host, SDHCI_INT_ENABLE);
+- ier &= ~clear;
+- ier |= set;
+- sdhci_writel(host, ier, SDHCI_INT_ENABLE);
+- sdhci_writel(host, ier, SDHCI_SIGNAL_ENABLE);
+-}
+-
+-static void sdhci_unmask_irqs(struct sdhci_host *host, u32 irqs)
+-{
+- sdhci_clear_set_irqs(host, 0, irqs);
+-}
+-
+-static void sdhci_mask_irqs(struct sdhci_host *host, u32 irqs)
+-{
+- sdhci_clear_set_irqs(host, irqs, 0);
+-}
+-
+ static void sdhci_set_card_detection(struct sdhci_host *host, bool enable)
+ {
+- u32 present, irqs;
++ u32 present;
++ int gpio_cd = mmc_gpio_get_cd(host->mmc);
+
+ if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) ||
+- (host->mmc->caps & MMC_CAP_NONREMOVABLE))
++ (host->mmc->caps & MMC_CAP_NONREMOVABLE) ||
++ !IS_ERR_VALUE(gpio_cd))
+ return;
+
+- present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
+- SDHCI_CARD_PRESENT;
+- irqs = present ? SDHCI_INT_CARD_REMOVE : SDHCI_INT_CARD_INSERT;
++ if (enable) {
++ present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
++ SDHCI_CARD_PRESENT;
+
+- if (enable)
+- sdhci_unmask_irqs(host, irqs);
+- else
+- sdhci_mask_irqs(host, irqs);
++ host->ier |= present ? SDHCI_INT_CARD_REMOVE :
++ SDHCI_INT_CARD_INSERT;
++ } else {
++ host->ier &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT);
++ }
++
++ sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
++ sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
+ }
+
+ static void sdhci_enable_card_detection(struct sdhci_host *host)
+@@ -180,22 +167,9 @@
+ sdhci_set_card_detection(host, false);
+ }
+
+-static void sdhci_reset(struct sdhci_host *host, u8 mask)
++void sdhci_reset(struct sdhci_host *host, u8 mask)
+ {
+ unsigned long timeout;
+- u32 uninitialized_var(ier);
+-
+- if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) {
+- if (!(sdhci_readl(host, SDHCI_PRESENT_STATE) &
+- SDHCI_CARD_PRESENT))
+- return;
+- }
+-
+- if (host->quirks & SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET)
+- ier = sdhci_readl(host, SDHCI_INT_ENABLE);
+-
+- if (host->ops->platform_reset_enter)
+- host->ops->platform_reset_enter(host, mask);
+
+ sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET);
+
+@@ -220,16 +194,27 @@
+ timeout--;
+ mdelay(1);
+ }
++}
++EXPORT_SYMBOL_GPL(sdhci_reset);
+
+- if (host->ops->platform_reset_exit)
+- host->ops->platform_reset_exit(host, mask);
++static void sdhci_do_reset(struct sdhci_host *host, u8 mask)
++{
++ if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) {
++ if (!(sdhci_readl(host, SDHCI_PRESENT_STATE) &
++ SDHCI_CARD_PRESENT))
++ return;
++ }
+
+- if (host->quirks & SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET)
+- sdhci_clear_set_irqs(host, SDHCI_INT_ALL_MASK, ier);
++ host->ops->reset(host, mask);
+
+- if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
+- if ((host->ops->enable_dma) && (mask & SDHCI_RESET_ALL))
+- host->ops->enable_dma(host);
++ if (mask & SDHCI_RESET_ALL) {
++ if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
++ if (host->ops->enable_dma)
++ host->ops->enable_dma(host);
++ }
++
++ /* Resetting the controller clears many */
++ host->preset_enabled = false;
+ }
+ }
+
+@@ -238,15 +223,18 @@
+ static void sdhci_init(struct sdhci_host *host, int soft)
+ {
+ if (soft)
+- sdhci_reset(host, SDHCI_RESET_CMD|SDHCI_RESET_DATA);
++ sdhci_do_reset(host, SDHCI_RESET_CMD|SDHCI_RESET_DATA);
+ else
+- sdhci_reset(host, SDHCI_RESET_ALL);
++ sdhci_do_reset(host, SDHCI_RESET_ALL);
+
+- sdhci_clear_set_irqs(host, SDHCI_INT_ALL_MASK,
+- SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT |
+- SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT | SDHCI_INT_INDEX |
+- SDHCI_INT_END_BIT | SDHCI_INT_CRC | SDHCI_INT_TIMEOUT |
+- SDHCI_INT_DATA_END | SDHCI_INT_RESPONSE);
++ host->ier = SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT |
++ SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT |
++ SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC |
++ SDHCI_INT_TIMEOUT | SDHCI_INT_DATA_END |
++ SDHCI_INT_RESPONSE;
++
++ sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
++ sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
+
+ if (soft) {
+ /* force clock reconfiguration */
+@@ -502,11 +490,6 @@
+ else
+ direction = DMA_TO_DEVICE;
+
+- /*
+- * The ADMA descriptor table is mapped further down as we
+- * need to fill it with data first.
+- */
+-
+ host->align_addr = dma_map_single(mmc_dev(host->mmc),
+ host->align_buffer, 128 * 4, direction);
+ if (dma_mapping_error(mmc_dev(host->mmc), host->align_addr))
+@@ -567,7 +550,7 @@
+ * If this triggers then we have a calculation bug
+ * somewhere. :/
+ */
+- WARN_ON((desc - host->adma_desc) > (128 * 2 + 1) * 4);
++ WARN_ON((desc - host->adma_desc) > ADMA_SIZE);
+ }
+
+ if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) {
+@@ -595,17 +578,8 @@
+ host->align_addr, 128 * 4, direction);
+ }
+
+- host->adma_addr = dma_map_single(mmc_dev(host->mmc),
+- host->adma_desc, (128 * 2 + 1) * 4, DMA_TO_DEVICE);
+- if (dma_mapping_error(mmc_dev(host->mmc), host->adma_addr))
+- goto unmap_entries;
+- BUG_ON(host->adma_addr & 0x3);
+-
+ return 0;
+
+-unmap_entries:
+- dma_unmap_sg(mmc_dev(host->mmc), data->sg,
+- data->sg_len, direction);
+ unmap_align:
+ dma_unmap_single(mmc_dev(host->mmc), host->align_addr,
+ 128 * 4, direction);
+@@ -623,19 +597,25 @@
+ u8 *align;
+ char *buffer;
+ unsigned long flags;
++ bool has_unaligned;
+
+ if (data->flags & MMC_DATA_READ)
+ direction = DMA_FROM_DEVICE;
+ else
+ direction = DMA_TO_DEVICE;
+
+- dma_unmap_single(mmc_dev(host->mmc), host->adma_addr,
+- (128 * 2 + 1) * 4, DMA_TO_DEVICE);
+-
+ dma_unmap_single(mmc_dev(host->mmc), host->align_addr,
+ 128 * 4, direction);
+
+- if (data->flags & MMC_DATA_READ) {
++ /* Do a quick scan of the SG list for any unaligned mappings */
++ has_unaligned = false;
++ for_each_sg(data->sg, sg, host->sg_count, i)
++ if (sg_dma_address(sg) & 3) {
++ has_unaligned = true;
++ break;
++ }
++
++ if (has_unaligned && data->flags & MMC_DATA_READ) {
+ dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg,
+ data->sg_len, direction);
+
+@@ -721,9 +701,12 @@
+ u32 dma_irqs = SDHCI_INT_DMA_END | SDHCI_INT_ADMA_ERROR;
+
+ if (host->flags & SDHCI_REQ_USE_DMA)
+- sdhci_clear_set_irqs(host, pio_irqs, dma_irqs);
++ host->ier = (host->ier & ~pio_irqs) | dma_irqs;
+ else
+- sdhci_clear_set_irqs(host, dma_irqs, pio_irqs);
++ host->ier = (host->ier & ~dma_irqs) | pio_irqs;
++
++ sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
++ sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
+ }
+
+ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
+@@ -976,8 +959,8 @@
+ * upon error conditions.
+ */
+ if (data->error) {
+- sdhci_reset(host, SDHCI_RESET_CMD);
+- sdhci_reset(host, SDHCI_RESET_DATA);
++ sdhci_do_reset(host, SDHCI_RESET_CMD);
++ sdhci_do_reset(host, SDHCI_RESET_DATA);
+ }
+
+ sdhci_send_command(host, data->stop);
+@@ -1107,24 +1090,23 @@
+
+ static u16 sdhci_get_preset_value(struct sdhci_host *host)
+ {
+- u16 ctrl, preset = 0;
+-
+- ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
++ u16 preset = 0;
+
+- switch (ctrl & SDHCI_CTRL_UHS_MASK) {
+- case SDHCI_CTRL_UHS_SDR12:
++ switch (host->timing) {
++ case MMC_TIMING_UHS_SDR12:
+ preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
+ break;
+- case SDHCI_CTRL_UHS_SDR25:
++ case MMC_TIMING_UHS_SDR25:
+ preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR25);
+ break;
+- case SDHCI_CTRL_UHS_SDR50:
++ case MMC_TIMING_UHS_SDR50:
+ preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR50);
+ break;
+- case SDHCI_CTRL_UHS_SDR104:
++ case MMC_TIMING_UHS_SDR104:
++ case MMC_TIMING_MMC_HS200:
+ preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR104);
+ break;
+- case SDHCI_CTRL_UHS_DDR50:
++ case MMC_TIMING_UHS_DDR50:
+ preset = sdhci_readw(host, SDHCI_PRESET_FOR_DDR50);
+ break;
+ default:
+@@ -1136,32 +1118,22 @@
+ return preset;
+ }
+
+-static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
++void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
+ {
+ int div = 0; /* Initialized for compiler warning */
+ int real_div = div, clk_mul = 1;
+ u16 clk = 0;
+ unsigned long timeout;
+
+- if (clock && clock == host->clock)
+- return;
+-
+ host->mmc->actual_clock = 0;
+
+- if (host->ops->set_clock) {
+- host->ops->set_clock(host, clock);
+- if (host->quirks & SDHCI_QUIRK_NONSTANDARD_CLOCK)
+- return;
+- }
+-
+ sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
+
+ if (clock == 0)
+- goto out;
++ return;
+
+ if (host->version >= SDHCI_SPEC_300) {
+- if (sdhci_readw(host, SDHCI_HOST_CONTROL2) &
+- SDHCI_CTRL_PRESET_VAL_ENABLE) {
++ if (host->preset_enabled) {
+ u16 pre_val;
+
+ clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
+@@ -1247,26 +1219,16 @@
+
+ clk |= SDHCI_CLOCK_CARD_EN;
+ sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
+-
+-out:
+- host->clock = clock;
+-}
+-
+-static inline void sdhci_update_clock(struct sdhci_host *host)
+-{
+- unsigned int clock;
+-
+- clock = host->clock;
+- host->clock = 0;
+- sdhci_set_clock(host, clock);
+ }
++EXPORT_SYMBOL_GPL(sdhci_set_clock);
+
+-static int sdhci_set_power(struct sdhci_host *host, unsigned short power)
++static void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
++ unsigned short vdd)
+ {
+ u8 pwr = 0;
+
+- if (power != (unsigned short)-1) {
+- switch (1 << power) {
++ if (mode != MMC_POWER_OFF) {
++ switch (1 << vdd) {
+ case MMC_VDD_165_195:
+ pwr = SDHCI_POWER_180;
+ break;
+@@ -1284,7 +1246,7 @@
+ }
+
+ if (host->pwr == pwr)
+- return -1;
++ return;
+
+ host->pwr = pwr;
+
+@@ -1292,38 +1254,43 @@
+ sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
+ if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
+ sdhci_runtime_pm_bus_off(host);
+- return 0;
+- }
+-
+- /*
+- * Spec says that we should clear the power reg before setting
+- * a new value. Some controllers don't seem to like this though.
+- */
+- if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE))
+- sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
++ vdd = 0;
++ } else {
++ /*
++ * Spec says that we should clear the power reg before setting
++ * a new value. Some controllers don't seem to like this though.
++ */
++ if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE))
++ sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
+
+- /*
+- * At least the Marvell CaFe chip gets confused if we set the voltage
+- * and set turn on power at the same time, so set the voltage first.
+- */
+- if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER)
+- sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
++ /*
++ * At least the Marvell CaFe chip gets confused if we set the
++ * voltage and set turn on power at the same time, so set the
++ * voltage first.
++ */
++ if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER)
++ sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
+
+- pwr |= SDHCI_POWER_ON;
++ pwr |= SDHCI_POWER_ON;
+
+- sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
++ sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
+
+- if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
+- sdhci_runtime_pm_bus_on(host);
++ if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
++ sdhci_runtime_pm_bus_on(host);
+
+- /*
+- * Some controllers need an extra 10ms delay of 10ms before they
+- * can apply clock after applying power
+- */
+- if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER)
+- mdelay(10);
++ /*
++ * Some controllers need an extra 10ms delay of 10ms before
++ * they can apply clock after applying power
++ */
++ if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER)
++ mdelay(10);
++ }
+
+- return power;
++ if (host->vmmc) {
++ spin_unlock_irq(&host->lock);
++ mmc_regulator_set_ocr(host->mmc, host->vmmc, vdd);
++ spin_lock_irq(&host->lock);
++ }
+ }
+
+ /*****************************************************************************\
+@@ -1428,10 +1395,52 @@
+ spin_unlock_irqrestore(&host->lock, flags);
+ }
+
++void sdhci_set_bus_width(struct sdhci_host *host, int width)
++{
++ u8 ctrl;
++
++ ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
++ if (width == MMC_BUS_WIDTH_8) {
++ ctrl &= ~SDHCI_CTRL_4BITBUS;
++ if (host->version >= SDHCI_SPEC_300)
++ ctrl |= SDHCI_CTRL_8BITBUS;
++ } else {
++ if (host->version >= SDHCI_SPEC_300)
++ ctrl &= ~SDHCI_CTRL_8BITBUS;
++ if (width == MMC_BUS_WIDTH_4)
++ ctrl |= SDHCI_CTRL_4BITBUS;
++ else
++ ctrl &= ~SDHCI_CTRL_4BITBUS;
++ }
++ sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
++}
++EXPORT_SYMBOL_GPL(sdhci_set_bus_width);
++
++void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing)
++{
++ u16 ctrl_2;
++
++ ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
++ /* Select Bus Speed Mode for host */
++ ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
++ if ((timing == MMC_TIMING_MMC_HS200) ||
++ (timing == MMC_TIMING_UHS_SDR104))
++ ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
++ else if (timing == MMC_TIMING_UHS_SDR12)
++ ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
++ else if (timing == MMC_TIMING_UHS_SDR25)
++ ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
++ else if (timing == MMC_TIMING_UHS_SDR50)
++ ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
++ else if (timing == MMC_TIMING_UHS_DDR50)
++ ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
++ sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
++}
++EXPORT_SYMBOL_GPL(sdhci_set_uhs_signaling);
++
+ static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios)
+ {
+ unsigned long flags;
+- int vdd_bit = -1;
+ u8 ctrl;
+
+ spin_lock_irqsave(&host->lock, flags);
+@@ -1457,45 +1466,17 @@
+ !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN))
+ sdhci_enable_preset_value(host, false);
+
+- sdhci_set_clock(host, ios->clock);
+-
+- if (ios->power_mode == MMC_POWER_OFF)
+- vdd_bit = sdhci_set_power(host, -1);
+- else
+- vdd_bit = sdhci_set_power(host, ios->vdd);
+-
+- if (host->vmmc && vdd_bit != -1) {
+- spin_unlock_irqrestore(&host->lock, flags);
+- mmc_regulator_set_ocr(host->mmc, host->vmmc, vdd_bit);
+- spin_lock_irqsave(&host->lock, flags);
++ if (!ios->clock || ios->clock != host->clock) {
++ host->ops->set_clock(host, ios->clock);
++ host->clock = ios->clock;
+ }
+
++ sdhci_set_power(host, ios->power_mode, ios->vdd);
++
+ if (host->ops->platform_send_init_74_clocks)
+ host->ops->platform_send_init_74_clocks(host, ios->power_mode);
+
+- /*
+- * If your platform has 8-bit width support but is not a v3 controller,
+- * or if it requires special setup code, you should implement that in
+- * platform_bus_width().
+- */
+- if (host->ops->platform_bus_width) {
+- host->ops->platform_bus_width(host, ios->bus_width);
+- } else {
+- ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
+- if (ios->bus_width == MMC_BUS_WIDTH_8) {
+- ctrl &= ~SDHCI_CTRL_4BITBUS;
+- if (host->version >= SDHCI_SPEC_300)
+- ctrl |= SDHCI_CTRL_8BITBUS;
+- } else {
+- if (host->version >= SDHCI_SPEC_300)
+- ctrl &= ~SDHCI_CTRL_8BITBUS;
+- if (ios->bus_width == MMC_BUS_WIDTH_4)
+- ctrl |= SDHCI_CTRL_4BITBUS;
+- else
+- ctrl &= ~SDHCI_CTRL_4BITBUS;
+- }
+- sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
+- }
++ host->ops->set_bus_width(host, ios->bus_width);
+
+ ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
+
+@@ -1517,13 +1498,13 @@
+ (ios->timing == MMC_TIMING_UHS_SDR25))
+ ctrl |= SDHCI_CTRL_HISPD;
+
+- ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
+- if (!(ctrl_2 & SDHCI_CTRL_PRESET_VAL_ENABLE)) {
++ if (!host->preset_enabled) {
+ sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
+ /*
+ * We only need to set Driver Strength if the
+ * preset value enable is not set.
+ */
++ ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
+ ctrl_2 &= ~SDHCI_CTRL_DRV_TYPE_MASK;
+ if (ios->drv_type == MMC_SET_DRIVER_TYPE_A)
+ ctrl_2 |= SDHCI_CTRL_DRV_TYPE_A;
+@@ -1547,34 +1528,16 @@
+ sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
+
+ /* Re-enable SD Clock */
+- sdhci_update_clock(host);
++ host->ops->set_clock(host, host->clock);
+ }
+
+-
+ /* Reset SD Clock Enable */
+ clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
+ clk &= ~SDHCI_CLOCK_CARD_EN;
+ sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
+
+- if (host->ops->set_uhs_signaling)
+- host->ops->set_uhs_signaling(host, ios->timing);
+- else {
+- ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
+- /* Select Bus Speed Mode for host */
+- ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
+- if ((ios->timing == MMC_TIMING_MMC_HS200) ||
+- (ios->timing == MMC_TIMING_UHS_SDR104))
+- ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
+- else if (ios->timing == MMC_TIMING_UHS_SDR12)
+- ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
+- else if (ios->timing == MMC_TIMING_UHS_SDR25)
+- ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
+- else if (ios->timing == MMC_TIMING_UHS_SDR50)
+- ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
+- else if (ios->timing == MMC_TIMING_UHS_DDR50)
+- ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
+- sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
+- }
++ host->ops->set_uhs_signaling(host, ios->timing);
++ host->timing = ios->timing;
+
+ if (!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN) &&
+ ((ios->timing == MMC_TIMING_UHS_SDR12) ||
+@@ -1590,8 +1553,7 @@
+ >> SDHCI_PRESET_DRV_SHIFT;
+ }
+
+- /* Re-enable SD Clock */
+- sdhci_update_clock(host);
++ host->ops->set_clock(host, host->clock);
+ } else
+ sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
+
+@@ -1601,7 +1563,7 @@
+ * it on each ios seems to solve the problem.
+ */
+ if(host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS)
+- sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
++ sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
+
+ mmiowb();
+ spin_unlock_irqrestore(&host->lock, flags);
+@@ -1710,24 +1672,16 @@
+
+ static void sdhci_enable_sdio_irq_nolock(struct sdhci_host *host, int enable)
+ {
+- if (host->flags & SDHCI_DEVICE_DEAD)
+- goto out;
+-
+- if (enable)
+- host->flags |= SDHCI_SDIO_IRQ_ENABLED;
+- else
+- host->flags &= ~SDHCI_SDIO_IRQ_ENABLED;
+-
+- /* SDIO IRQ will be enabled as appropriate in runtime resume */
+- if (host->runtime_suspended)
+- goto out;
++ if (!(host->flags & SDHCI_DEVICE_DEAD)) {
++ if (enable)
++ host->ier |= SDHCI_INT_CARD_INT;
++ else
++ host->ier &= ~SDHCI_INT_CARD_INT;
+
+- if (enable)
+- sdhci_unmask_irqs(host, SDHCI_INT_CARD_INT);
+- else
+- sdhci_mask_irqs(host, SDHCI_INT_CARD_INT);
+-out:
+- mmiowb();
++ sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
++ sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
++ mmiowb();
++ }
+ }
+
+ static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
+@@ -1735,9 +1689,18 @@
+ struct sdhci_host *host = mmc_priv(mmc);
+ unsigned long flags;
+
++ sdhci_runtime_pm_get(host);
++
+ spin_lock_irqsave(&host->lock, flags);
++ if (enable)
++ host->flags |= SDHCI_SDIO_IRQ_ENABLED;
++ else
++ host->flags &= ~SDHCI_SDIO_IRQ_ENABLED;
++
+ sdhci_enable_sdio_irq_nolock(host, enable);
+ spin_unlock_irqrestore(&host->lock, flags);
++
++ sdhci_runtime_pm_put(host);
+ }
+
+ static int sdhci_do_start_signal_voltage_switch(struct sdhci_host *host,
+@@ -1856,22 +1819,16 @@
+
+ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
+ {
+- struct sdhci_host *host;
++ struct sdhci_host *host = mmc_priv(mmc);
+ u16 ctrl;
+- u32 ier;
+ int tuning_loop_counter = MAX_TUNING_LOOP;
+ unsigned long timeout;
+ int err = 0;
+- bool requires_tuning_nonuhs = false;
+ unsigned long flags;
+
+- host = mmc_priv(mmc);
+-
+ sdhci_runtime_pm_get(host);
+ spin_lock_irqsave(&host->lock, flags);
+
+- ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
+-
+ /*
+ * The Host Controller needs tuning only in case of SDR104 mode
+ * and for SDR50 mode when Use Tuning for SDR50 is set in the
+@@ -1879,15 +1836,18 @@
+ * If the Host Controller supports the HS200 mode then the
+ * tuning function has to be executed.
+ */
+- if (((ctrl & SDHCI_CTRL_UHS_MASK) == SDHCI_CTRL_UHS_SDR50) &&
+- (host->flags & SDHCI_SDR50_NEEDS_TUNING ||
+- host->flags & SDHCI_SDR104_NEEDS_TUNING))
+- requires_tuning_nonuhs = true;
+-
+- if (((ctrl & SDHCI_CTRL_UHS_MASK) == SDHCI_CTRL_UHS_SDR104) ||
+- requires_tuning_nonuhs)
+- ctrl |= SDHCI_CTRL_EXEC_TUNING;
+- else {
++ switch (host->timing) {
++ case MMC_TIMING_MMC_HS200:
++ case MMC_TIMING_UHS_SDR104:
++ break;
++
++ case MMC_TIMING_UHS_SDR50:
++ if (host->flags & SDHCI_SDR50_NEEDS_TUNING ||
++ host->flags & SDHCI_SDR104_NEEDS_TUNING)
++ break;
++ /* FALLTHROUGH */
++
++ default:
+ spin_unlock_irqrestore(&host->lock, flags);
+ sdhci_runtime_pm_put(host);
+ return 0;
+@@ -1900,6 +1860,8 @@
+ return err;
+ }
+
++ ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
++ ctrl |= SDHCI_CTRL_EXEC_TUNING;
+ sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
+
+ /*
+@@ -1912,8 +1874,8 @@
+ * to make sure we don't hit a controller bug, we _only_
+ * enable Buffer Read Ready interrupt here.
+ */
+- ier = sdhci_readl(host, SDHCI_INT_ENABLE);
+- sdhci_clear_set_irqs(host, ier, SDHCI_INT_DATA_AVAIL);
++ sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_INT_ENABLE);
++ sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_SIGNAL_ENABLE);
+
+ /*
+ * Issue CMD19 repeatedly till Execute Tuning is set to 0 or the number
+@@ -2046,7 +2008,8 @@
+ if (err && (host->flags & SDHCI_USING_RETUNING_TIMER))
+ err = 0;
+
+- sdhci_clear_set_irqs(host, SDHCI_INT_DATA_AVAIL, ier);
++ sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
++ sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
+ spin_unlock_irqrestore(&host->lock, flags);
+ sdhci_runtime_pm_put(host);
+
+@@ -2056,26 +2019,30 @@
+
+ static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable)
+ {
+- u16 ctrl;
+-
+ /* Host Controller v3.00 defines preset value registers */
+ if (host->version < SDHCI_SPEC_300)
+ return;
+
+- ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
+-
+ /*
+ * We only enable or disable Preset Value if they are not already
+ * enabled or disabled respectively. Otherwise, we bail out.
+ */
+- if (enable && !(ctrl & SDHCI_CTRL_PRESET_VAL_ENABLE)) {
+- ctrl |= SDHCI_CTRL_PRESET_VAL_ENABLE;
+- sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
+- host->flags |= SDHCI_PV_ENABLED;
+- } else if (!enable && (ctrl & SDHCI_CTRL_PRESET_VAL_ENABLE)) {
+- ctrl &= ~SDHCI_CTRL_PRESET_VAL_ENABLE;
++ if (host->preset_enabled != enable) {
++ u16 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
++
++ if (enable)
++ ctrl |= SDHCI_CTRL_PRESET_VAL_ENABLE;
++ else
++ ctrl &= ~SDHCI_CTRL_PRESET_VAL_ENABLE;
++
+ sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
+- host->flags &= ~SDHCI_PV_ENABLED;
++
++ if (enable)
++ host->flags |= SDHCI_PV_ENABLED;
++ else
++ host->flags &= ~SDHCI_PV_ENABLED;
++
++ host->preset_enabled = enable;
+ }
+ }
+
+@@ -2100,8 +2067,8 @@
+ pr_err("%s: Resetting controller.\n",
+ mmc_hostname(host->mmc));
+
+- sdhci_reset(host, SDHCI_RESET_CMD);
+- sdhci_reset(host, SDHCI_RESET_DATA);
++ sdhci_do_reset(host, SDHCI_RESET_CMD);
++ sdhci_do_reset(host, SDHCI_RESET_DATA);
+
+ host->mrq->cmd->error = -ENOMEDIUM;
+ tasklet_schedule(&host->finish_tasklet);
+@@ -2129,15 +2096,6 @@
+ * *
+ \*****************************************************************************/
+
+-static void sdhci_tasklet_card(unsigned long param)
+-{
+- struct sdhci_host *host = (struct sdhci_host*)param;
+-
+- sdhci_card_event(host->mmc);
+-
+- mmc_detect_change(host->mmc, msecs_to_jiffies(200));
+-}
+-
+ static void sdhci_tasklet_finish(unsigned long param)
+ {
+ struct sdhci_host *host;
+@@ -2174,12 +2132,12 @@
+ /* Some controllers need this kick or reset won't work here */
+ if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET)
+ /* This is to force an update */
+- sdhci_update_clock(host);
++ host->ops->set_clock(host, host->clock);
+
+ /* Spec says we should do both at the same time, but Ricoh
+ controllers do not like that. */
+- sdhci_reset(host, SDHCI_RESET_CMD);
+- sdhci_reset(host, SDHCI_RESET_DATA);
++ sdhci_do_reset(host, SDHCI_RESET_CMD);
++ sdhci_do_reset(host, SDHCI_RESET_DATA);
+ }
+
+ host->mrq = NULL;
+@@ -2429,14 +2387,14 @@
+
+ static irqreturn_t sdhci_irq(int irq, void *dev_id)
+ {
+- irqreturn_t result;
++ irqreturn_t result = IRQ_NONE;
+ struct sdhci_host *host = dev_id;
+- u32 intmask, unexpected = 0;
+- int cardint = 0, max_loops = 16;
++ u32 intmask, mask, unexpected = 0;
++ int max_loops = 16;
+
+ spin_lock(&host->lock);
+
+- if (host->runtime_suspended) {
++ if (host->runtime_suspended && !sdhci_sdio_irq_enabled(host)) {
+ spin_unlock(&host->lock);
+ pr_warning("%s: got irq while runtime suspended\n",
+ mmc_hostname(host->mmc));
+@@ -2444,88 +2402,81 @@
+ }
+
+ intmask = sdhci_readl(host, SDHCI_INT_STATUS);
+-
+ if (!intmask || intmask == 0xffffffff) {
+ result = IRQ_NONE;
+ goto out;
+ }
+
+-again:
+- DBG("*** %s got interrupt: 0x%08x\n",
+- mmc_hostname(host->mmc), intmask);
+-
+- if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
+- u32 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
+- SDHCI_CARD_PRESENT;
+-
+- /*
+- * There is a observation on i.mx esdhc. INSERT bit will be
+- * immediately set again when it gets cleared, if a card is
+- * inserted. We have to mask the irq to prevent interrupt
+- * storm which will freeze the system. And the REMOVE gets
+- * the same situation.
+- *
+- * More testing are needed here to ensure it works for other
+- * platforms though.
+- */
+- sdhci_mask_irqs(host, present ? SDHCI_INT_CARD_INSERT :
+- SDHCI_INT_CARD_REMOVE);
+- sdhci_unmask_irqs(host, present ? SDHCI_INT_CARD_REMOVE :
+- SDHCI_INT_CARD_INSERT);
+-
+- sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT |
+- SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS);
+- intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE);
+- tasklet_schedule(&host->card_tasklet);
+- }
+-
+- if (intmask & SDHCI_INT_CMD_MASK) {
+- sdhci_writel(host, intmask & SDHCI_INT_CMD_MASK,
+- SDHCI_INT_STATUS);
+- sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK);
+- }
++ do {
++ /* Clear selected interrupts. */
++ mask = intmask & (SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
++ SDHCI_INT_BUS_POWER);
++ sdhci_writel(host, mask, SDHCI_INT_STATUS);
++
++ DBG("*** %s got interrupt: 0x%08x\n",
++ mmc_hostname(host->mmc), intmask);
++
++ if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
++ u32 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
++ SDHCI_CARD_PRESENT;
+
+- if (intmask & SDHCI_INT_DATA_MASK) {
+- sdhci_writel(host, intmask & SDHCI_INT_DATA_MASK,
+- SDHCI_INT_STATUS);
+- sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK);
+- }
++ /*
++ * There is a observation on i.mx esdhc. INSERT
++ * bit will be immediately set again when it gets
++ * cleared, if a card is inserted. We have to mask
++ * the irq to prevent interrupt storm which will
++ * freeze the system. And the REMOVE gets the
++ * same situation.
++ *
++ * More testing are needed here to ensure it works
++ * for other platforms though.
++ */
++ host->ier &= ~(SDHCI_INT_CARD_INSERT |
++ SDHCI_INT_CARD_REMOVE);
++ host->ier |= present ? SDHCI_INT_CARD_REMOVE :
++ SDHCI_INT_CARD_INSERT;
++ sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
++ sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
+
+- intmask &= ~(SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK);
++ sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT |
++ SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS);
+
+- intmask &= ~SDHCI_INT_ERROR;
++ host->thread_isr |= intmask & (SDHCI_INT_CARD_INSERT |
++ SDHCI_INT_CARD_REMOVE);
++ result = IRQ_WAKE_THREAD;
++ }
+
+- if (intmask & SDHCI_INT_BUS_POWER) {
+- pr_err("%s: Card is consuming too much power!\n",
+- mmc_hostname(host->mmc));
+- sdhci_writel(host, SDHCI_INT_BUS_POWER, SDHCI_INT_STATUS);
+- }
++ if (intmask & SDHCI_INT_CMD_MASK)
++ sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK);
+
+- intmask &= ~SDHCI_INT_BUS_POWER;
++ if (intmask & SDHCI_INT_DATA_MASK)
++ sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK);
+
+- if (intmask & SDHCI_INT_CARD_INT)
+- cardint = 1;
++ if (intmask & SDHCI_INT_BUS_POWER)
++ pr_err("%s: Card is consuming too much power!\n",
++ mmc_hostname(host->mmc));
+
+- intmask &= ~SDHCI_INT_CARD_INT;
++ if (intmask & SDHCI_INT_CARD_INT) {
++ sdhci_enable_sdio_irq_nolock(host, false);
++ host->thread_isr |= SDHCI_INT_CARD_INT;
++ result = IRQ_WAKE_THREAD;
++ }
+
+- if (intmask) {
+- unexpected |= intmask;
+- sdhci_writel(host, intmask, SDHCI_INT_STATUS);
+- }
++ intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE |
++ SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
++ SDHCI_INT_ERROR | SDHCI_INT_BUS_POWER |
++ SDHCI_INT_CARD_INT);
+
+- result = IRQ_HANDLED;
++ if (intmask) {
++ unexpected |= intmask;
++ sdhci_writel(host, intmask, SDHCI_INT_STATUS);
++ }
+
+- intmask = sdhci_readl(host, SDHCI_INT_STATUS);
++ if (result == IRQ_NONE)
++ result = IRQ_HANDLED;
+
+- /*
+- * If we know we'll call the driver to signal SDIO IRQ, disregard
+- * further indications of Card Interrupt in the status to avoid a
+- * needless loop.
+- */
+- if (cardint)
+- intmask &= ~SDHCI_INT_CARD_INT;
+- if (intmask && --max_loops)
+- goto again;
++ intmask = sdhci_readl(host, SDHCI_INT_STATUS);
++ } while (intmask && --max_loops);
+ out:
+ spin_unlock(&host->lock);
+
+@@ -2534,15 +2485,38 @@
+ mmc_hostname(host->mmc), unexpected);
+ sdhci_dumpregs(host);
+ }
+- /*
+- * We have to delay this as it calls back into the driver.
+- */
+- if (cardint && host->mmc->sdio_irqs)
+- mmc_signal_sdio_irq(host->mmc);
+
+ return result;
+ }
+
++static irqreturn_t sdhci_thread_irq(int irq, void *dev_id)
++{
++ struct sdhci_host *host = dev_id;
++ unsigned long flags;
++ u32 isr;
++
++ spin_lock_irqsave(&host->lock, flags);
++ isr = host->thread_isr;
++ host->thread_isr = 0;
++ spin_unlock_irqrestore(&host->lock, flags);
++
++ if (isr & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
++ sdhci_card_event(host->mmc);
++ mmc_detect_change(host->mmc, msecs_to_jiffies(200));
++ }
++
++ if (isr & SDHCI_INT_CARD_INT) {
++ sdio_run_irqs(host->mmc);
++
++ spin_lock_irqsave(&host->lock, flags);
++ if (host->flags & SDHCI_SDIO_IRQ_ENABLED)
++ sdhci_enable_sdio_irq_nolock(host, true);
++ spin_unlock_irqrestore(&host->lock, flags);
++ }
++
++ return isr ? IRQ_HANDLED : IRQ_NONE;
++}
++
+ /*****************************************************************************\
+ * *
+ * Suspend/resume *
+@@ -2552,6 +2526,7 @@
+ #ifdef CONFIG_PM
+ void sdhci_enable_irq_wakeups(struct sdhci_host *host)
+ {
++ int gpio_cd = mmc_gpio_get_cd(host->mmc);
+ u8 val;
+ u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE
+ | SDHCI_WAKE_ON_INT;
+@@ -2559,7 +2534,8 @@
+ val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
+ val |= mask ;
+ /* Avoid fake wake up */
+- if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
++ if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION ||
++ !IS_ERR_VALUE(gpio_cd))
+ val &= ~(SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE);
+ sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
+ }
+@@ -2579,9 +2555,6 @@
+
+ int sdhci_suspend_host(struct sdhci_host *host)
+ {
+- if (host->ops->platform_suspend)
+- host->ops->platform_suspend(host);
+-
+ sdhci_disable_card_detection(host);
+
+ /* Disable tuning since we are suspending */
+@@ -2591,7 +2564,9 @@
+ }
+
+ if (!device_may_wakeup(mmc_dev(host->mmc))) {
+- sdhci_mask_irqs(host, SDHCI_INT_ALL_MASK);
++ host->ier = 0;
++ sdhci_writel(host, 0, SDHCI_INT_ENABLE);
++ sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
+ free_irq(host->irq, host);
+ } else {
+ sdhci_enable_irq_wakeups(host);
+@@ -2612,8 +2587,9 @@
+ }
+
+ if (!device_may_wakeup(mmc_dev(host->mmc))) {
+- ret = request_irq(host->irq, sdhci_irq, IRQF_SHARED,
+- mmc_hostname(host->mmc), host);
++ ret = request_threaded_irq(host->irq, sdhci_irq,
++ sdhci_thread_irq, IRQF_SHARED,
++ mmc_hostname(host->mmc), host);
+ if (ret)
+ return ret;
+ } else {
+@@ -2635,9 +2611,6 @@
+
+ sdhci_enable_card_detection(host);
+
+- if (host->ops->platform_resume)
+- host->ops->platform_resume(host);
+-
+ /* Set the re-tuning expiration flag */
+ if (host->flags & SDHCI_USING_RETUNING_TIMER)
+ host->flags |= SDHCI_NEEDS_RETUNING;
+@@ -2689,10 +2662,12 @@
+ }
+
+ spin_lock_irqsave(&host->lock, flags);
+- sdhci_mask_irqs(host, SDHCI_INT_ALL_MASK);
++ host->ier &= SDHCI_INT_CARD_INT;
++ sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
++ sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
+ spin_unlock_irqrestore(&host->lock, flags);
+
+- synchronize_irq(host->irq);
++ synchronize_hardirq(host->irq);
+
+ spin_lock_irqsave(&host->lock, flags);
+ host->runtime_suspended = true;
+@@ -2736,7 +2711,7 @@
+ host->runtime_suspended = false;
+
+ /* Enable SDIO IRQ */
+- if ((host->flags & SDHCI_SDIO_IRQ_ENABLED))
++ if (host->flags & SDHCI_SDIO_IRQ_ENABLED)
+ sdhci_enable_sdio_irq_nolock(host, true);
+
+ /* Enable Card Detection */
+@@ -2795,7 +2770,7 @@
+ if (debug_quirks2)
+ host->quirks2 = debug_quirks2;
+
+- sdhci_reset(host, SDHCI_RESET_ALL);
++ sdhci_do_reset(host, SDHCI_RESET_ALL);
+
+ host->version = sdhci_readw(host, SDHCI_HOST_VERSION);
+ host->version = (host->version & SDHCI_SPEC_VER_MASK)
+@@ -2855,15 +2830,29 @@
+ * (128) and potentially one alignment transfer for
+ * each of those entries.
+ */
+- host->adma_desc = kmalloc((128 * 2 + 1) * 4, GFP_KERNEL);
++ host->adma_desc = dma_alloc_coherent(mmc_dev(host->mmc),
++ ADMA_SIZE, &host->adma_addr,
++ GFP_KERNEL);
+ host->align_buffer = kmalloc(128 * 4, GFP_KERNEL);
+ if (!host->adma_desc || !host->align_buffer) {
+- kfree(host->adma_desc);
++ dma_free_coherent(mmc_dev(host->mmc), ADMA_SIZE,
++ host->adma_desc, host->adma_addr);
+ kfree(host->align_buffer);
+ pr_warning("%s: Unable to allocate ADMA "
+ "buffers. Falling back to standard DMA.\n",
+ mmc_hostname(mmc));
+ host->flags &= ~SDHCI_USE_ADMA;
++ host->adma_desc = NULL;
++ host->align_buffer = NULL;
++ } else if (host->adma_addr & 3) {
++ pr_warning("%s: unable to allocate aligned ADMA descriptor\n",
++ mmc_hostname(mmc));
++ host->flags &= ~SDHCI_USE_ADMA;
++ dma_free_coherent(mmc_dev(host->mmc), ADMA_SIZE,
++ host->adma_desc, host->adma_addr);
++ kfree(host->align_buffer);
++ host->adma_desc = NULL;
++ host->align_buffer = NULL;
+ }
+ }
+
+@@ -2945,9 +2934,22 @@
+ if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)
+ host->timeout_clk = mmc->f_max / 1000;
+
+- mmc->max_discard_to = (1 << 27) / host->timeout_clk;
++ if (host->quirks2 & SDHCI_QUIRK2_NOSTD_TIMEOUT_COUNTER) {
++ if (host->ops->get_max_timeout_counter) {
++ mmc->max_discard_to =
++ host->ops->get_max_timeout_counter(host)
++ / host->timeout_clk;
++ } else {
++ pr_err("%s: Hardware doesn't specify max timeout "
++ "counter\n", mmc_hostname(mmc));
++ return -ENODEV;
++ }
++ } else {
++ mmc->max_discard_to = (1 << 27) / host->timeout_clk;
++ }
+
+ mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE | MMC_CAP_CMD23;
++ mmc->caps2 |= MMC_CAP2_SDIO_NOTHREAD;
+
+ if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12)
+ host->flags |= SDHCI_AUTO_CMD12;
+@@ -3218,8 +3220,6 @@
+ /*
+ * Init tasklets.
+ */
+- tasklet_init(&host->card_tasklet,
+- sdhci_tasklet_card, (unsigned long)host);
+ tasklet_init(&host->finish_tasklet,
+ sdhci_tasklet_finish, (unsigned long)host);
+
+@@ -3236,8 +3236,8 @@
+
+ sdhci_init(host, 0);
+
+- ret = request_irq(host->irq, sdhci_irq, IRQF_SHARED,
+- mmc_hostname(mmc), host);
++ ret = request_threaded_irq(host->irq, sdhci_irq, sdhci_thread_irq,
++ IRQF_SHARED, mmc_hostname(mmc), host);
+ if (ret) {
+ pr_err("%s: Failed to request IRQ %d: %d\n",
+ mmc_hostname(mmc), host->irq, ret);
+@@ -3279,12 +3279,12 @@
+
+ #ifdef SDHCI_USE_LEDS_CLASS
+ reset:
+- sdhci_reset(host, SDHCI_RESET_ALL);
+- sdhci_mask_irqs(host, SDHCI_INT_ALL_MASK);
++ sdhci_do_reset(host, SDHCI_RESET_ALL);
++ sdhci_writel(host, 0, SDHCI_INT_ENABLE);
++ sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
+ free_irq(host->irq, host);
+ #endif
+ untasklet:
+- tasklet_kill(&host->card_tasklet);
+ tasklet_kill(&host->finish_tasklet);
+
+ return ret;
+@@ -3321,14 +3321,14 @@
+ #endif
+
+ if (!dead)
+- sdhci_reset(host, SDHCI_RESET_ALL);
++ sdhci_do_reset(host, SDHCI_RESET_ALL);
+
+- sdhci_mask_irqs(host, SDHCI_INT_ALL_MASK);
++ sdhci_writel(host, 0, SDHCI_INT_ENABLE);
++ sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
+ free_irq(host->irq, host);
+
+ del_timer_sync(&host->timer);
+
+- tasklet_kill(&host->card_tasklet);
+ tasklet_kill(&host->finish_tasklet);
+
+ if (host->vmmc) {
+@@ -3341,7 +3341,9 @@
+ regulator_put(host->vqmmc);
+ }
+
+- kfree(host->adma_desc);
++ if (host->adma_desc)
++ dma_free_coherent(mmc_dev(host->mmc), ADMA_SIZE,
++ host->adma_desc, host->adma_addr);
+ kfree(host->align_buffer);
+
+ host->adma_desc = NULL;
+diff -Nur linux-3.14.36/drivers/mmc/host/sdhci-cns3xxx.c linux-openelec/drivers/mmc/host/sdhci-cns3xxx.c
+--- linux-3.14.36/drivers/mmc/host/sdhci-cns3xxx.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/mmc/host/sdhci-cns3xxx.c 2015-05-06 12:05:42.000000000 -0500
+@@ -30,13 +30,12 @@
+ u16 clk;
+ unsigned long timeout;
+
+- if (clock == host->clock)
+- return;
++ host->mmc->actual_clock = 0;
+
+ sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
+
+ if (clock == 0)
+- goto out;
++ return;
+
+ while (host->max_clk / div > clock) {
+ /*
+@@ -75,13 +74,14 @@
+
+ clk |= SDHCI_CLOCK_CARD_EN;
+ sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
+-out:
+- host->clock = clock;
+ }
+
+ static const struct sdhci_ops sdhci_cns3xxx_ops = {
+ .get_max_clock = sdhci_cns3xxx_get_max_clk,
+ .set_clock = sdhci_cns3xxx_set_clock,
++ .set_bus_width = sdhci_set_bus_width,
++ .reset = sdhci_reset,
++ .set_uhs_signaling = sdhci_set_uhs_signaling,
+ };
+
+ static const struct sdhci_pltfm_data sdhci_cns3xxx_pdata = {
+@@ -90,8 +90,7 @@
+ SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
+ SDHCI_QUIRK_INVERTED_WRITE_PROTECT |
+ SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN |
+- SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
+- SDHCI_QUIRK_NONSTANDARD_CLOCK,
++ SDHCI_QUIRK_BROKEN_TIMEOUT_VAL,
+ };
+
+ static int sdhci_cns3xxx_probe(struct platform_device *pdev)
+diff -Nur linux-3.14.36/drivers/mmc/host/sdhci.c.orig linux-openelec/drivers/mmc/host/sdhci.c.orig
+--- linux-3.14.36/drivers/mmc/host/sdhci.c.orig 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mmc/host/sdhci.c.orig 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,3388 @@
++/*
++ * linux/drivers/mmc/host/sdhci.c - Secure Digital Host Controller Interface driver
++ *
++ * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or (at
++ * your option) any later version.
++ *
++ * Thanks to the following companies for their support:
++ *
++ * - JMicron (hardware and technical support)
++ */
++
++#include <linux/delay.h>
++#include <linux/highmem.h>
++#include <linux/io.h>
++#include <linux/module.h>
++#include <linux/dma-mapping.h>
++#include <linux/slab.h>
++#include <linux/scatterlist.h>
++#include <linux/regulator/consumer.h>
++#include <linux/pm_runtime.h>
++
++#include <linux/leds.h>
++
++#include <linux/mmc/mmc.h>
++#include <linux/mmc/host.h>
++#include <linux/mmc/card.h>
++#include <linux/mmc/slot-gpio.h>
++
++#include "sdhci.h"
++
++#define DRIVER_NAME "sdhci"
++
++#define DBG(f, x...) \
++ pr_debug(DRIVER_NAME " [%s()]: " f, __func__,## x)
++
++#if defined(CONFIG_LEDS_CLASS) || (defined(CONFIG_LEDS_CLASS_MODULE) && \
++ defined(CONFIG_MMC_SDHCI_MODULE))
++#define SDHCI_USE_LEDS_CLASS
++#endif
++
++#define MAX_TUNING_LOOP 40
++
++#define ADMA_SIZE ((128 * 2 + 1) * 4)
++
++static unsigned int debug_quirks = 0;
++static unsigned int debug_quirks2;
++
++static void sdhci_finish_data(struct sdhci_host *);
++
++static void sdhci_finish_command(struct sdhci_host *);
++static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode);
++static void sdhci_tuning_timer(unsigned long data);
++static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable);
++
++#ifdef CONFIG_PM_RUNTIME
++static int sdhci_runtime_pm_get(struct sdhci_host *host);
++static int sdhci_runtime_pm_put(struct sdhci_host *host);
++static void sdhci_runtime_pm_bus_on(struct sdhci_host *host);
++static void sdhci_runtime_pm_bus_off(struct sdhci_host *host);
++#else
++static inline int sdhci_runtime_pm_get(struct sdhci_host *host)
++{
++ return 0;
++}
++static inline int sdhci_runtime_pm_put(struct sdhci_host *host)
++{
++ return 0;
++}
++static void sdhci_runtime_pm_bus_on(struct sdhci_host *host)
++{
++}
++static void sdhci_runtime_pm_bus_off(struct sdhci_host *host)
++{
++}
++#endif
++
++static void sdhci_dumpregs(struct sdhci_host *host)
++{
++ pr_debug(DRIVER_NAME ": =========== REGISTER DUMP (%s)===========\n",
++ mmc_hostname(host->mmc));
++
++ pr_debug(DRIVER_NAME ": Sys addr: 0x%08x | Version: 0x%08x\n",
++ sdhci_readl(host, SDHCI_DMA_ADDRESS),
++ sdhci_readw(host, SDHCI_HOST_VERSION));
++ pr_debug(DRIVER_NAME ": Blk size: 0x%08x | Blk cnt: 0x%08x\n",
++ sdhci_readw(host, SDHCI_BLOCK_SIZE),
++ sdhci_readw(host, SDHCI_BLOCK_COUNT));
++ pr_debug(DRIVER_NAME ": Argument: 0x%08x | Trn mode: 0x%08x\n",
++ sdhci_readl(host, SDHCI_ARGUMENT),
++ sdhci_readw(host, SDHCI_TRANSFER_MODE));
++ pr_debug(DRIVER_NAME ": Present: 0x%08x | Host ctl: 0x%08x\n",
++ sdhci_readl(host, SDHCI_PRESENT_STATE),
++ sdhci_readb(host, SDHCI_HOST_CONTROL));
++ pr_debug(DRIVER_NAME ": Power: 0x%08x | Blk gap: 0x%08x\n",
++ sdhci_readb(host, SDHCI_POWER_CONTROL),
++ sdhci_readb(host, SDHCI_BLOCK_GAP_CONTROL));
++ pr_debug(DRIVER_NAME ": Wake-up: 0x%08x | Clock: 0x%08x\n",
++ sdhci_readb(host, SDHCI_WAKE_UP_CONTROL),
++ sdhci_readw(host, SDHCI_CLOCK_CONTROL));
++ pr_debug(DRIVER_NAME ": Timeout: 0x%08x | Int stat: 0x%08x\n",
++ sdhci_readb(host, SDHCI_TIMEOUT_CONTROL),
++ sdhci_readl(host, SDHCI_INT_STATUS));
++ pr_debug(DRIVER_NAME ": Int enab: 0x%08x | Sig enab: 0x%08x\n",
++ sdhci_readl(host, SDHCI_INT_ENABLE),
++ sdhci_readl(host, SDHCI_SIGNAL_ENABLE));
++ pr_debug(DRIVER_NAME ": AC12 err: 0x%08x | Slot int: 0x%08x\n",
++ sdhci_readw(host, SDHCI_ACMD12_ERR),
++ sdhci_readw(host, SDHCI_SLOT_INT_STATUS));
++ pr_debug(DRIVER_NAME ": Caps: 0x%08x | Caps_1: 0x%08x\n",
++ sdhci_readl(host, SDHCI_CAPABILITIES),
++ sdhci_readl(host, SDHCI_CAPABILITIES_1));
++ pr_debug(DRIVER_NAME ": Cmd: 0x%08x | Max curr: 0x%08x\n",
++ sdhci_readw(host, SDHCI_COMMAND),
++ sdhci_readl(host, SDHCI_MAX_CURRENT));
++ pr_debug(DRIVER_NAME ": Host ctl2: 0x%08x\n",
++ sdhci_readw(host, SDHCI_HOST_CONTROL2));
++
++ if (host->flags & SDHCI_USE_ADMA)
++ pr_debug(DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n",
++ readl(host->ioaddr + SDHCI_ADMA_ERROR),
++ readl(host->ioaddr + SDHCI_ADMA_ADDRESS));
++
++ pr_debug(DRIVER_NAME ": ===========================================\n");
++}
++
++/*****************************************************************************\
++ * *
++ * Low level functions *
++ * *
++\*****************************************************************************/
++
++static void sdhci_set_card_detection(struct sdhci_host *host, bool enable)
++{
++ u32 present;
++ int gpio_cd = mmc_gpio_get_cd(host->mmc);
++
++ if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) ||
++ (host->mmc->caps & MMC_CAP_NONREMOVABLE) ||
++ !IS_ERR_VALUE(gpio_cd))
++ return;
++
++ if (enable) {
++ present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
++ SDHCI_CARD_PRESENT;
++
++ host->ier |= present ? SDHCI_INT_CARD_REMOVE :
++ SDHCI_INT_CARD_INSERT;
++ } else {
++ host->ier &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT);
++ }
++
++ sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
++ sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
++}
++
++static void sdhci_enable_card_detection(struct sdhci_host *host)
++{
++ sdhci_set_card_detection(host, true);
++}
++
++static void sdhci_disable_card_detection(struct sdhci_host *host)
++{
++ sdhci_set_card_detection(host, false);
++}
++
++void sdhci_reset(struct sdhci_host *host, u8 mask)
++{
++ unsigned long timeout;
++
++ sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET);
++
++ if (mask & SDHCI_RESET_ALL) {
++ host->clock = 0;
++ /* Reset-all turns off SD Bus Power */
++ if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
++ sdhci_runtime_pm_bus_off(host);
++ }
++
++ /* Wait max 100 ms */
++ timeout = 100;
++
++ /* hw clears the bit when it's done */
++ while (sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask) {
++ if (timeout == 0) {
++ pr_err("%s: Reset 0x%x never completed.\n",
++ mmc_hostname(host->mmc), (int)mask);
++ sdhci_dumpregs(host);
++ return;
++ }
++ timeout--;
++ mdelay(1);
++ }
++}
++EXPORT_SYMBOL_GPL(sdhci_reset);
++
++static void sdhci_do_reset(struct sdhci_host *host, u8 mask)
++{
++ if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) {
++ if (!(sdhci_readl(host, SDHCI_PRESENT_STATE) &
++ SDHCI_CARD_PRESENT))
++ return;
++ }
++
++ host->ops->reset(host, mask);
++
++ if (mask & SDHCI_RESET_ALL) {
++ if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
++ if (host->ops->enable_dma)
++ host->ops->enable_dma(host);
++ }
++
++ /* Resetting the controller clears many */
++ host->preset_enabled = false;
++ }
++}
++
++static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios);
++
++static void sdhci_init(struct sdhci_host *host, int soft)
++{
++ if (soft)
++ sdhci_do_reset(host, SDHCI_RESET_CMD|SDHCI_RESET_DATA);
++ else
++ sdhci_do_reset(host, SDHCI_RESET_ALL);
++
++ host->ier = SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT |
++ SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT |
++ SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC |
++ SDHCI_INT_TIMEOUT | SDHCI_INT_DATA_END |
++ SDHCI_INT_RESPONSE;
++
++ sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
++ sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
++
++ if (soft) {
++ /* force clock reconfiguration */
++ host->clock = 0;
++ sdhci_set_ios(host->mmc, &host->mmc->ios);
++ }
++}
++
++static void sdhci_reinit(struct sdhci_host *host)
++{
++ sdhci_init(host, 0);
++ /*
++ * Retuning stuffs are affected by different cards inserted and only
++ * applicable to UHS-I cards. So reset these fields to their initial
++ * value when card is removed.
++ */
++ if (host->flags & SDHCI_USING_RETUNING_TIMER) {
++ host->flags &= ~SDHCI_USING_RETUNING_TIMER;
++
++ del_timer_sync(&host->tuning_timer);
++ host->flags &= ~SDHCI_NEEDS_RETUNING;
++ host->mmc->max_blk_count =
++ (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535;
++ }
++ sdhci_enable_card_detection(host);
++}
++
++static void sdhci_activate_led(struct sdhci_host *host)
++{
++ u8 ctrl;
++
++ ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
++ ctrl |= SDHCI_CTRL_LED;
++ sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
++}
++
++static void sdhci_deactivate_led(struct sdhci_host *host)
++{
++ u8 ctrl;
++
++ ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
++ ctrl &= ~SDHCI_CTRL_LED;
++ sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
++}
++
++#ifdef SDHCI_USE_LEDS_CLASS
++static void sdhci_led_control(struct led_classdev *led,
++ enum led_brightness brightness)
++{
++ struct sdhci_host *host = container_of(led, struct sdhci_host, led);
++ unsigned long flags;
++
++ spin_lock_irqsave(&host->lock, flags);
++
++ if (host->runtime_suspended)
++ goto out;
++
++ if (brightness == LED_OFF)
++ sdhci_deactivate_led(host);
++ else
++ sdhci_activate_led(host);
++out:
++ spin_unlock_irqrestore(&host->lock, flags);
++}
++#endif
++
++/*****************************************************************************\
++ * *
++ * Core functions *
++ * *
++\*****************************************************************************/
++
++static void sdhci_read_block_pio(struct sdhci_host *host)
++{
++ unsigned long flags;
++ size_t blksize, len, chunk;
++ u32 uninitialized_var(scratch);
++ u8 *buf;
++
++ DBG("PIO reading\n");
++
++ blksize = host->data->blksz;
++ chunk = 0;
++
++ local_irq_save(flags);
++
++ while (blksize) {
++ if (!sg_miter_next(&host->sg_miter))
++ BUG();
++
++ len = min(host->sg_miter.length, blksize);
++
++ blksize -= len;
++ host->sg_miter.consumed = len;
++
++ buf = host->sg_miter.addr;
++
++ while (len) {
++ if (chunk == 0) {
++ scratch = sdhci_readl(host, SDHCI_BUFFER);
++ chunk = 4;
++ }
++
++ *buf = scratch & 0xFF;
++
++ buf++;
++ scratch >>= 8;
++ chunk--;
++ len--;
++ }
++ }
++
++ sg_miter_stop(&host->sg_miter);
++
++ local_irq_restore(flags);
++}
++
++static void sdhci_write_block_pio(struct sdhci_host *host)
++{
++ unsigned long flags;
++ size_t blksize, len, chunk;
++ u32 scratch;
++ u8 *buf;
++
++ DBG("PIO writing\n");
++
++ blksize = host->data->blksz;
++ chunk = 0;
++ scratch = 0;
++
++ local_irq_save(flags);
++
++ while (blksize) {
++ if (!sg_miter_next(&host->sg_miter))
++ BUG();
++
++ len = min(host->sg_miter.length, blksize);
++
++ blksize -= len;
++ host->sg_miter.consumed = len;
++
++ buf = host->sg_miter.addr;
++
++ while (len) {
++ scratch |= (u32)*buf << (chunk * 8);
++
++ buf++;
++ chunk++;
++ len--;
++
++ if ((chunk == 4) || ((len == 0) && (blksize == 0))) {
++ sdhci_writel(host, scratch, SDHCI_BUFFER);
++ chunk = 0;
++ scratch = 0;
++ }
++ }
++ }
++
++ sg_miter_stop(&host->sg_miter);
++
++ local_irq_restore(flags);
++}
++
++static void sdhci_transfer_pio(struct sdhci_host *host)
++{
++ u32 mask;
++
++ BUG_ON(!host->data);
++
++ if (host->blocks == 0)
++ return;
++
++ if (host->data->flags & MMC_DATA_READ)
++ mask = SDHCI_DATA_AVAILABLE;
++ else
++ mask = SDHCI_SPACE_AVAILABLE;
++
++ /*
++ * Some controllers (JMicron JMB38x) mess up the buffer bits
++ * for transfers < 4 bytes. As long as it is just one block,
++ * we can ignore the bits.
++ */
++ if ((host->quirks & SDHCI_QUIRK_BROKEN_SMALL_PIO) &&
++ (host->data->blocks == 1))
++ mask = ~0;
++
++ while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
++ if (host->quirks & SDHCI_QUIRK_PIO_NEEDS_DELAY)
++ udelay(100);
++
++ if (host->data->flags & MMC_DATA_READ)
++ sdhci_read_block_pio(host);
++ else
++ sdhci_write_block_pio(host);
++
++ host->blocks--;
++ if (host->blocks == 0)
++ break;
++ }
++
++ DBG("PIO transfer complete.\n");
++}
++
++static char *sdhci_kmap_atomic(struct scatterlist *sg, unsigned long *flags)
++{
++ local_irq_save(*flags);
++ return kmap_atomic(sg_page(sg)) + sg->offset;
++}
++
++static void sdhci_kunmap_atomic(void *buffer, unsigned long *flags)
++{
++ kunmap_atomic(buffer);
++ local_irq_restore(*flags);
++}
++
++static void sdhci_set_adma_desc(u8 *desc, u32 addr, int len, unsigned cmd)
++{
++ __le32 *dataddr = (__le32 __force *)(desc + 4);
++ __le16 *cmdlen = (__le16 __force *)desc;
++
++ /* SDHCI specification says ADMA descriptors should be 4 byte
++ * aligned, so using 16 or 32bit operations should be safe. */
++
++ cmdlen[0] = cpu_to_le16(cmd);
++ cmdlen[1] = cpu_to_le16(len);
++
++ dataddr[0] = cpu_to_le32(addr);
++}
++
++static int sdhci_adma_table_pre(struct sdhci_host *host,
++ struct mmc_data *data)
++{
++ int direction;
++
++ u8 *desc;
++ u8 *align;
++ dma_addr_t addr;
++ dma_addr_t align_addr;
++ int len, offset;
++
++ struct scatterlist *sg;
++ int i;
++ char *buffer;
++ unsigned long flags;
++
++ /*
++ * The spec does not specify endianness of descriptor table.
++ * We currently guess that it is LE.
++ */
++
++ if (data->flags & MMC_DATA_READ)
++ direction = DMA_FROM_DEVICE;
++ else
++ direction = DMA_TO_DEVICE;
++
++ host->align_addr = dma_map_single(mmc_dev(host->mmc),
++ host->align_buffer, 128 * 4, direction);
++ if (dma_mapping_error(mmc_dev(host->mmc), host->align_addr))
++ goto fail;
++ BUG_ON(host->align_addr & 0x3);
++
++ host->sg_count = dma_map_sg(mmc_dev(host->mmc),
++ data->sg, data->sg_len, direction);
++ if (host->sg_count == 0)
++ goto unmap_align;
++
++ desc = host->adma_desc;
++ align = host->align_buffer;
++
++ align_addr = host->align_addr;
++
++ for_each_sg(data->sg, sg, host->sg_count, i) {
++ addr = sg_dma_address(sg);
++ len = sg_dma_len(sg);
++
++ /*
++ * The SDHCI specification states that ADMA
++ * addresses must be 32-bit aligned. If they
++ * aren't, then we use a bounce buffer for
++ * the (up to three) bytes that screw up the
++ * alignment.
++ */
++ offset = (4 - (addr & 0x3)) & 0x3;
++ if (offset) {
++ if (data->flags & MMC_DATA_WRITE) {
++ buffer = sdhci_kmap_atomic(sg, &flags);
++ WARN_ON(((long)buffer & PAGE_MASK) > (PAGE_SIZE - 3));
++ memcpy(align, buffer, offset);
++ sdhci_kunmap_atomic(buffer, &flags);
++ }
++
++ /* tran, valid */
++ sdhci_set_adma_desc(desc, align_addr, offset, 0x21);
++
++ BUG_ON(offset > 65536);
++
++ align += 4;
++ align_addr += 4;
++
++ desc += 8;
++
++ addr += offset;
++ len -= offset;
++ }
++
++ BUG_ON(len > 65536);
++
++ /* tran, valid */
++ sdhci_set_adma_desc(desc, addr, len, 0x21);
++ desc += 8;
++
++ /*
++ * If this triggers then we have a calculation bug
++ * somewhere. :/
++ */
++ WARN_ON((desc - host->adma_desc) > ADMA_SIZE);
++ }
++
++ if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) {
++ /*
++ * Mark the last descriptor as the terminating descriptor
++ */
++ if (desc != host->adma_desc) {
++ desc -= 8;
++ desc[0] |= 0x2; /* end */
++ }
++ } else {
++ /*
++ * Add a terminating entry.
++ */
++
++ /* nop, end, valid */
++ sdhci_set_adma_desc(desc, 0, 0, 0x3);
++ }
++
++ /*
++ * Resync align buffer as we might have changed it.
++ */
++ if (data->flags & MMC_DATA_WRITE) {
++ dma_sync_single_for_device(mmc_dev(host->mmc),
++ host->align_addr, 128 * 4, direction);
++ }
++
++ return 0;
++
++unmap_align:
++ dma_unmap_single(mmc_dev(host->mmc), host->align_addr,
++ 128 * 4, direction);
++fail:
++ return -EINVAL;
++}
++
++static void sdhci_adma_table_post(struct sdhci_host *host,
++ struct mmc_data *data)
++{
++ int direction;
++
++ struct scatterlist *sg;
++ int i, size;
++ u8 *align;
++ char *buffer;
++ unsigned long flags;
++ bool has_unaligned;
++
++ if (data->flags & MMC_DATA_READ)
++ direction = DMA_FROM_DEVICE;
++ else
++ direction = DMA_TO_DEVICE;
++
++ dma_unmap_single(mmc_dev(host->mmc), host->align_addr,
++ 128 * 4, direction);
++
++ /* Do a quick scan of the SG list for any unaligned mappings */
++ has_unaligned = false;
++ for_each_sg(data->sg, sg, host->sg_count, i)
++ if (sg_dma_address(sg) & 3) {
++ has_unaligned = true;
++ break;
++ }
++
++ if (has_unaligned && data->flags & MMC_DATA_READ) {
++ dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg,
++ data->sg_len, direction);
++
++ align = host->align_buffer;
++
++ for_each_sg(data->sg, sg, host->sg_count, i) {
++ if (sg_dma_address(sg) & 0x3) {
++ size = 4 - (sg_dma_address(sg) & 0x3);
++
++ buffer = sdhci_kmap_atomic(sg, &flags);
++ WARN_ON(((long)buffer & PAGE_MASK) > (PAGE_SIZE - 3));
++ memcpy(buffer, align, size);
++ sdhci_kunmap_atomic(buffer, &flags);
++
++ align += 4;
++ }
++ }
++ }
++
++ dma_unmap_sg(mmc_dev(host->mmc), data->sg,
++ data->sg_len, direction);
++}
++
++static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd)
++{
++ u8 count;
++ struct mmc_data *data = cmd->data;
++ unsigned target_timeout, current_timeout;
++
++ /*
++ * If the host controller provides us with an incorrect timeout
++ * value, just skip the check and use 0xE. The hardware may take
++ * longer to time out, but that's much better than having a too-short
++ * timeout value.
++ */
++ if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL)
++ return 0xE;
++
++ /* Unspecified timeout, assume max */
++ if (!data && !cmd->cmd_timeout_ms)
++ return 0xE;
++
++ /* timeout in us */
++ if (!data)
++ target_timeout = cmd->cmd_timeout_ms * 1000;
++ else {
++ target_timeout = data->timeout_ns / 1000;
++ if (host->clock)
++ target_timeout += data->timeout_clks / host->clock;
++ }
++
++ /*
++ * Figure out needed cycles.
++ * We do this in steps in order to fit inside a 32 bit int.
++ * The first step is the minimum timeout, which will have a
++ * minimum resolution of 6 bits:
++ * (1) 2^13*1000 > 2^22,
++ * (2) host->timeout_clk < 2^16
++ * =>
++ * (1) / (2) > 2^6
++ */
++ count = 0;
++ current_timeout = (1 << 13) * 1000 / host->timeout_clk;
++ while (current_timeout < target_timeout) {
++ count++;
++ current_timeout <<= 1;
++ if (count >= 0xF)
++ break;
++ }
++
++ if (count >= 0xF) {
++ DBG("%s: Too large timeout 0x%x requested for CMD%d!\n",
++ mmc_hostname(host->mmc), count, cmd->opcode);
++ count = 0xE;
++ }
++
++ return count;
++}
++
++static void sdhci_set_transfer_irqs(struct sdhci_host *host)
++{
++ u32 pio_irqs = SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL;
++ u32 dma_irqs = SDHCI_INT_DMA_END | SDHCI_INT_ADMA_ERROR;
++
++ if (host->flags & SDHCI_REQ_USE_DMA)
++ host->ier = (host->ier & ~pio_irqs) | dma_irqs;
++ else
++ host->ier = (host->ier & ~dma_irqs) | pio_irqs;
++
++ sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
++ sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
++}
++
++static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
++{
++ u8 count;
++ u8 ctrl;
++ struct mmc_data *data = cmd->data;
++ int ret;
++
++ WARN_ON(host->data);
++
++ if (data || (cmd->flags & MMC_RSP_BUSY)) {
++ count = sdhci_calc_timeout(host, cmd);
++ sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL);
++ }
++
++ if (!data)
++ return;
++
++ /* Sanity checks */
++ BUG_ON(data->blksz * data->blocks > 524288);
++ BUG_ON(data->blksz > host->mmc->max_blk_size);
++ BUG_ON(data->blocks > 65535);
++
++ host->data = data;
++ host->data_early = 0;
++ host->data->bytes_xfered = 0;
++
++ if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))
++ host->flags |= SDHCI_REQ_USE_DMA;
++
++ /*
++ * FIXME: This doesn't account for merging when mapping the
++ * scatterlist.
++ */
++ if (host->flags & SDHCI_REQ_USE_DMA) {
++ int broken, i;
++ struct scatterlist *sg;
++
++ broken = 0;
++ if (host->flags & SDHCI_USE_ADMA) {
++ if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE)
++ broken = 1;
++ } else {
++ if (host->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE)
++ broken = 1;
++ }
++
++ if (unlikely(broken)) {
++ for_each_sg(data->sg, sg, data->sg_len, i) {
++ if (sg->length & 0x3) {
++ DBG("Reverting to PIO because of "
++ "transfer size (%d)\n",
++ sg->length);
++ host->flags &= ~SDHCI_REQ_USE_DMA;
++ break;
++ }
++ }
++ }
++ }
++
++ /*
++ * The assumption here being that alignment is the same after
++ * translation to device address space.
++ */
++ if (host->flags & SDHCI_REQ_USE_DMA) {
++ int broken, i;
++ struct scatterlist *sg;
++
++ broken = 0;
++ if (host->flags & SDHCI_USE_ADMA) {
++ /*
++ * As we use 3 byte chunks to work around
++ * alignment problems, we need to check this
++ * quirk.
++ */
++ if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE)
++ broken = 1;
++ } else {
++ if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR)
++ broken = 1;
++ }
++
++ if (unlikely(broken)) {
++ for_each_sg(data->sg, sg, data->sg_len, i) {
++ if (sg->offset & 0x3) {
++ DBG("Reverting to PIO because of "
++ "bad alignment\n");
++ host->flags &= ~SDHCI_REQ_USE_DMA;
++ break;
++ }
++ }
++ }
++ }
++
++ if (host->flags & SDHCI_REQ_USE_DMA) {
++ if (host->flags & SDHCI_USE_ADMA) {
++ ret = sdhci_adma_table_pre(host, data);
++ if (ret) {
++ /*
++ * This only happens when someone fed
++ * us an invalid request.
++ */
++ WARN_ON(1);
++ host->flags &= ~SDHCI_REQ_USE_DMA;
++ } else {
++ sdhci_writel(host, host->adma_addr,
++ SDHCI_ADMA_ADDRESS);
++ }
++ } else {
++ int sg_cnt;
++
++ sg_cnt = dma_map_sg(mmc_dev(host->mmc),
++ data->sg, data->sg_len,
++ (data->flags & MMC_DATA_READ) ?
++ DMA_FROM_DEVICE :
++ DMA_TO_DEVICE);
++ if (sg_cnt == 0) {
++ /*
++ * This only happens when someone fed
++ * us an invalid request.
++ */
++ WARN_ON(1);
++ host->flags &= ~SDHCI_REQ_USE_DMA;
++ } else {
++ WARN_ON(sg_cnt != 1);
++ sdhci_writel(host, sg_dma_address(data->sg),
++ SDHCI_DMA_ADDRESS);
++ }
++ }
++ }
++
++ /*
++ * Always adjust the DMA selection as some controllers
++ * (e.g. JMicron) can't do PIO properly when the selection
++ * is ADMA.
++ */
++ if (host->version >= SDHCI_SPEC_200) {
++ ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
++ ctrl &= ~SDHCI_CTRL_DMA_MASK;
++ if ((host->flags & SDHCI_REQ_USE_DMA) &&
++ (host->flags & SDHCI_USE_ADMA))
++ ctrl |= SDHCI_CTRL_ADMA32;
++ else
++ ctrl |= SDHCI_CTRL_SDMA;
++ sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
++ }
++
++ if (!(host->flags & SDHCI_REQ_USE_DMA)) {
++ int flags;
++
++ flags = SG_MITER_ATOMIC;
++ if (host->data->flags & MMC_DATA_READ)
++ flags |= SG_MITER_TO_SG;
++ else
++ flags |= SG_MITER_FROM_SG;
++ sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
++ host->blocks = data->blocks;
++ }
++
++ sdhci_set_transfer_irqs(host);
++
++ /* Set the DMA boundary value and block size */
++ sdhci_writew(host, SDHCI_MAKE_BLKSZ(SDHCI_DEFAULT_BOUNDARY_ARG,
++ data->blksz), SDHCI_BLOCK_SIZE);
++ sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);
++}
++
++static void sdhci_set_transfer_mode(struct sdhci_host *host,
++ struct mmc_command *cmd)
++{
++ u16 mode;
++ struct mmc_data *data = cmd->data;
++
++ if (data == NULL) {
++ /* clear Auto CMD settings for no data CMDs */
++ mode = sdhci_readw(host, SDHCI_TRANSFER_MODE);
++ sdhci_writew(host, mode & ~(SDHCI_TRNS_AUTO_CMD12 |
++ SDHCI_TRNS_AUTO_CMD23), SDHCI_TRANSFER_MODE);
++ return;
++ }
++
++ WARN_ON(!host->data);
++
++ mode = SDHCI_TRNS_BLK_CNT_EN;
++ if (mmc_op_multi(cmd->opcode) || data->blocks > 1) {
++ mode |= SDHCI_TRNS_MULTI;
++ /*
++ * If we are sending CMD23, CMD12 never gets sent
++ * on successful completion (so no Auto-CMD12).
++ */
++ if (!host->mrq->sbc && (host->flags & SDHCI_AUTO_CMD12))
++ mode |= SDHCI_TRNS_AUTO_CMD12;
++ else if (host->mrq->sbc && (host->flags & SDHCI_AUTO_CMD23)) {
++ mode |= SDHCI_TRNS_AUTO_CMD23;
++ sdhci_writel(host, host->mrq->sbc->arg, SDHCI_ARGUMENT2);
++ }
++ }
++
++ if (data->flags & MMC_DATA_READ)
++ mode |= SDHCI_TRNS_READ;
++ if (host->flags & SDHCI_REQ_USE_DMA)
++ mode |= SDHCI_TRNS_DMA;
++
++ sdhci_writew(host, mode, SDHCI_TRANSFER_MODE);
++}
++
++static void sdhci_finish_data(struct sdhci_host *host)
++{
++ struct mmc_data *data;
++
++ BUG_ON(!host->data);
++
++ data = host->data;
++ host->data = NULL;
++
++ if (host->flags & SDHCI_REQ_USE_DMA) {
++ if (host->flags & SDHCI_USE_ADMA)
++ sdhci_adma_table_post(host, data);
++ else {
++ dma_unmap_sg(mmc_dev(host->mmc), data->sg,
++ data->sg_len, (data->flags & MMC_DATA_READ) ?
++ DMA_FROM_DEVICE : DMA_TO_DEVICE);
++ }
++ }
++
++ /*
++ * The specification states that the block count register must
++ * be updated, but it does not specify at what point in the
++ * data flow. That makes the register entirely useless to read
++ * back so we have to assume that nothing made it to the card
++ * in the event of an error.
++ */
++ if (data->error)
++ data->bytes_xfered = 0;
++ else
++ data->bytes_xfered = data->blksz * data->blocks;
++
++ /*
++ * Need to send CMD12 if -
++ * a) open-ended multiblock transfer (no CMD23)
++ * b) error in multiblock transfer
++ */
++ if (data->stop &&
++ (data->error ||
++ !host->mrq->sbc)) {
++
++ /*
++ * The controller needs a reset of internal state machines
++ * upon error conditions.
++ */
++ if (data->error) {
++ sdhci_do_reset(host, SDHCI_RESET_CMD);
++ sdhci_do_reset(host, SDHCI_RESET_DATA);
++ }
++
++ sdhci_send_command(host, data->stop);
++ } else
++ tasklet_schedule(&host->finish_tasklet);
++}
++
++void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
++{
++ int flags;
++ u32 mask;
++ unsigned long timeout;
++
++ WARN_ON(host->cmd);
++
++ /* Wait max 10 ms */
++ timeout = 10;
++
++ mask = SDHCI_CMD_INHIBIT;
++ if ((cmd->data != NULL) || (cmd->flags & MMC_RSP_BUSY))
++ mask |= SDHCI_DATA_INHIBIT;
++
++ /* We shouldn't wait for data inihibit for stop commands, even
++ though they might use busy signaling */
++ if (host->mrq->data && (cmd == host->mrq->data->stop))
++ mask &= ~SDHCI_DATA_INHIBIT;
++
++ while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
++ if (timeout == 0) {
++ pr_err("%s: Controller never released "
++ "inhibit bit(s).\n", mmc_hostname(host->mmc));
++ sdhci_dumpregs(host);
++ cmd->error = -EIO;
++ tasklet_schedule(&host->finish_tasklet);
++ return;
++ }
++ timeout--;
++ mdelay(1);
++ }
++
++ timeout = jiffies;
++ if (!cmd->data && cmd->cmd_timeout_ms > 9000)
++ timeout += DIV_ROUND_UP(cmd->cmd_timeout_ms, 1000) * HZ + HZ;
++ else
++ timeout += 10 * HZ;
++ mod_timer(&host->timer, timeout);
++
++ host->cmd = cmd;
++
++ sdhci_prepare_data(host, cmd);
++
++ sdhci_writel(host, cmd->arg, SDHCI_ARGUMENT);
++
++ sdhci_set_transfer_mode(host, cmd);
++
++ if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) {
++ pr_err("%s: Unsupported response type!\n",
++ mmc_hostname(host->mmc));
++ cmd->error = -EINVAL;
++ tasklet_schedule(&host->finish_tasklet);
++ return;
++ }
++
++ if (!(cmd->flags & MMC_RSP_PRESENT))
++ flags = SDHCI_CMD_RESP_NONE;
++ else if (cmd->flags & MMC_RSP_136)
++ flags = SDHCI_CMD_RESP_LONG;
++ else if (cmd->flags & MMC_RSP_BUSY)
++ flags = SDHCI_CMD_RESP_SHORT_BUSY;
++ else
++ flags = SDHCI_CMD_RESP_SHORT;
++
++ if (cmd->flags & MMC_RSP_CRC)
++ flags |= SDHCI_CMD_CRC;
++ if (cmd->flags & MMC_RSP_OPCODE)
++ flags |= SDHCI_CMD_INDEX;
++
++ /* CMD19 is special in that the Data Present Select should be set */
++ if (cmd->data || cmd->opcode == MMC_SEND_TUNING_BLOCK ||
++ cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200)
++ flags |= SDHCI_CMD_DATA;
++
++ sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND);
++}
++EXPORT_SYMBOL_GPL(sdhci_send_command);
++
++static void sdhci_finish_command(struct sdhci_host *host)
++{
++ int i;
++
++ BUG_ON(host->cmd == NULL);
++
++ if (host->cmd->flags & MMC_RSP_PRESENT) {
++ if (host->cmd->flags & MMC_RSP_136) {
++ /* CRC is stripped so we need to do some shifting. */
++ for (i = 0;i < 4;i++) {
++ host->cmd->resp[i] = sdhci_readl(host,
++ SDHCI_RESPONSE + (3-i)*4) << 8;
++ if (i != 3)
++ host->cmd->resp[i] |=
++ sdhci_readb(host,
++ SDHCI_RESPONSE + (3-i)*4-1);
++ }
++ } else {
++ host->cmd->resp[0] = sdhci_readl(host, SDHCI_RESPONSE);
++ }
++ }
++
++ host->cmd->error = 0;
++
++ /* Finished CMD23, now send actual command. */
++ if (host->cmd == host->mrq->sbc) {
++ host->cmd = NULL;
++ sdhci_send_command(host, host->mrq->cmd);
++ } else {
++
++ /* Processed actual command. */
++ if (host->data && host->data_early)
++ sdhci_finish_data(host);
++
++ if (!host->cmd->data)
++ tasklet_schedule(&host->finish_tasklet);
++
++ host->cmd = NULL;
++ }
++}
++
++static u16 sdhci_get_preset_value(struct sdhci_host *host)
++{
++ u16 preset = 0;
++
++ switch (host->timing) {
++ case MMC_TIMING_UHS_SDR12:
++ preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
++ break;
++ case MMC_TIMING_UHS_SDR25:
++ preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR25);
++ break;
++ case MMC_TIMING_UHS_SDR50:
++ preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR50);
++ break;
++ case MMC_TIMING_UHS_SDR104:
++ case MMC_TIMING_MMC_HS200:
++ preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR104);
++ break;
++ case MMC_TIMING_UHS_DDR50:
++ preset = sdhci_readw(host, SDHCI_PRESET_FOR_DDR50);
++ break;
++ default:
++ pr_warn("%s: Invalid UHS-I mode selected\n",
++ mmc_hostname(host->mmc));
++ preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
++ break;
++ }
++ return preset;
++}
++
++void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
++{
++ int div = 0; /* Initialized for compiler warning */
++ int real_div = div, clk_mul = 1;
++ u16 clk = 0;
++ unsigned long timeout;
++
++ host->mmc->actual_clock = 0;
++
++ sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
++
++ if (clock == 0)
++ return;
++
++ if (host->version >= SDHCI_SPEC_300) {
++ if (host->preset_enabled) {
++ u16 pre_val;
++
++ clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
++ pre_val = sdhci_get_preset_value(host);
++ div = (pre_val & SDHCI_PRESET_SDCLK_FREQ_MASK)
++ >> SDHCI_PRESET_SDCLK_FREQ_SHIFT;
++ if (host->clk_mul &&
++ (pre_val & SDHCI_PRESET_CLKGEN_SEL_MASK)) {
++ clk = SDHCI_PROG_CLOCK_MODE;
++ real_div = div + 1;
++ clk_mul = host->clk_mul;
++ } else {
++ real_div = max_t(int, 1, div << 1);
++ }
++ goto clock_set;
++ }
++
++ /*
++ * Check if the Host Controller supports Programmable Clock
++ * Mode.
++ */
++ if (host->clk_mul) {
++ for (div = 1; div <= 1024; div++) {
++ if ((host->max_clk * host->clk_mul / div)
++ <= clock)
++ break;
++ }
++ /*
++ * Set Programmable Clock Mode in the Clock
++ * Control register.
++ */
++ clk = SDHCI_PROG_CLOCK_MODE;
++ real_div = div;
++ clk_mul = host->clk_mul;
++ div--;
++ } else {
++ /* Version 3.00 divisors must be a multiple of 2. */
++ if (host->max_clk <= clock)
++ div = 1;
++ else {
++ for (div = 2; div < SDHCI_MAX_DIV_SPEC_300;
++ div += 2) {
++ if ((host->max_clk / div) <= clock)
++ break;
++ }
++ }
++ real_div = div;
++ div >>= 1;
++ }
++ } else {
++ /* Version 2.00 divisors must be a power of 2. */
++ for (div = 1; div < SDHCI_MAX_DIV_SPEC_200; div *= 2) {
++ if ((host->max_clk / div) <= clock)
++ break;
++ }
++ real_div = div;
++ div >>= 1;
++ }
++
++clock_set:
++ if (real_div)
++ host->mmc->actual_clock = (host->max_clk * clk_mul) / real_div;
++
++ clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT;
++ clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN)
++ << SDHCI_DIVIDER_HI_SHIFT;
++ clk |= SDHCI_CLOCK_INT_EN;
++ sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
++
++ /* Wait max 20 ms */
++ timeout = 20;
++ while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL))
++ & SDHCI_CLOCK_INT_STABLE)) {
++ if (timeout == 0) {
++ pr_err("%s: Internal clock never "
++ "stabilised.\n", mmc_hostname(host->mmc));
++ sdhci_dumpregs(host);
++ return;
++ }
++ timeout--;
++ mdelay(1);
++ }
++
++ clk |= SDHCI_CLOCK_CARD_EN;
++ sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
++}
++EXPORT_SYMBOL_GPL(sdhci_set_clock);
++
++static void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
++ unsigned short vdd)
++{
++ u8 pwr = 0;
++
++ if (mode != MMC_POWER_OFF) {
++ switch (1 << vdd) {
++ case MMC_VDD_165_195:
++ pwr = SDHCI_POWER_180;
++ break;
++ case MMC_VDD_29_30:
++ case MMC_VDD_30_31:
++ pwr = SDHCI_POWER_300;
++ break;
++ case MMC_VDD_32_33:
++ case MMC_VDD_33_34:
++ pwr = SDHCI_POWER_330;
++ break;
++ default:
++ BUG();
++ }
++ }
++
++ if (host->pwr == pwr)
++ return;
++
++ host->pwr = pwr;
++
++ if (pwr == 0) {
++ sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
++ if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
++ sdhci_runtime_pm_bus_off(host);
++ vdd = 0;
++ } else {
++ /*
++ * Spec says that we should clear the power reg before setting
++ * a new value. Some controllers don't seem to like this though.
++ */
++ if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE))
++ sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
++
++ /*
++ * At least the Marvell CaFe chip gets confused if we set the
++ * voltage and set turn on power at the same time, so set the
++ * voltage first.
++ */
++ if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER)
++ sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
++
++ pwr |= SDHCI_POWER_ON;
++
++ sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
++
++ if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
++ sdhci_runtime_pm_bus_on(host);
++
++ /*
++ * Some controllers need an extra 10ms delay of 10ms before
++ * they can apply clock after applying power
++ */
++ if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER)
++ mdelay(10);
++ }
++
++ if (host->vmmc) {
++ spin_unlock_irq(&host->lock);
++ mmc_regulator_set_ocr(host->mmc, host->vmmc, vdd);
++ spin_lock_irq(&host->lock);
++ }
++}
++
++/*****************************************************************************\
++ * *
++ * MMC callbacks *
++ * *
++\*****************************************************************************/
++
++static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
++{
++ struct sdhci_host *host;
++ int present;
++ unsigned long flags;
++ u32 tuning_opcode;
++
++ host = mmc_priv(mmc);
++
++ sdhci_runtime_pm_get(host);
++
++ spin_lock_irqsave(&host->lock, flags);
++
++ WARN_ON(host->mrq != NULL);
++
++#ifndef SDHCI_USE_LEDS_CLASS
++ sdhci_activate_led(host);
++#endif
++
++ /*
++ * Ensure we don't send the STOP for non-SET_BLOCK_COUNTED
++ * requests if Auto-CMD12 is enabled.
++ */
++ if (!mrq->sbc && (host->flags & SDHCI_AUTO_CMD12)) {
++ if (mrq->stop) {
++ mrq->data->stop = NULL;
++ mrq->stop = NULL;
++ }
++ }
++
++ host->mrq = mrq;
++
++ /*
++ * Firstly check card presence from cd-gpio. The return could
++ * be one of the following possibilities:
++ * negative: cd-gpio is not available
++ * zero: cd-gpio is used, and card is removed
++ * one: cd-gpio is used, and card is present
++ */
++ present = mmc_gpio_get_cd(host->mmc);
++ if (present < 0) {
++ /* If polling, assume that the card is always present. */
++ if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
++ present = 1;
++ else
++ present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
++ SDHCI_CARD_PRESENT;
++ }
++
++ if (!present || host->flags & SDHCI_DEVICE_DEAD) {
++ host->mrq->cmd->error = -ENOMEDIUM;
++ tasklet_schedule(&host->finish_tasklet);
++ } else {
++ u32 present_state;
++
++ present_state = sdhci_readl(host, SDHCI_PRESENT_STATE);
++ /*
++ * Check if the re-tuning timer has already expired and there
++ * is no on-going data transfer. If so, we need to execute
++ * tuning procedure before sending command.
++ */
++ if ((host->flags & SDHCI_NEEDS_RETUNING) &&
++ !(present_state & (SDHCI_DOING_WRITE | SDHCI_DOING_READ))) {
++ if (mmc->card) {
++ /* eMMC uses cmd21 but sd and sdio use cmd19 */
++ tuning_opcode =
++ mmc->card->type == MMC_TYPE_MMC ?
++ MMC_SEND_TUNING_BLOCK_HS200 :
++ MMC_SEND_TUNING_BLOCK;
++
++ /* Here we need to set the host->mrq to NULL,
++ * in case the pending finish_tasklet
++ * finishes it incorrectly.
++ */
++ host->mrq = NULL;
++
++ spin_unlock_irqrestore(&host->lock, flags);
++ sdhci_execute_tuning(mmc, tuning_opcode);
++ spin_lock_irqsave(&host->lock, flags);
++
++ /* Restore original mmc_request structure */
++ host->mrq = mrq;
++ }
++ }
++
++ if (mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23))
++ sdhci_send_command(host, mrq->sbc);
++ else
++ sdhci_send_command(host, mrq->cmd);
++ }
++
++ mmiowb();
++ spin_unlock_irqrestore(&host->lock, flags);
++}
++
++void sdhci_set_bus_width(struct sdhci_host *host, int width)
++{
++ u8 ctrl;
++
++ ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
++ if (width == MMC_BUS_WIDTH_8) {
++ ctrl &= ~SDHCI_CTRL_4BITBUS;
++ if (host->version >= SDHCI_SPEC_300)
++ ctrl |= SDHCI_CTRL_8BITBUS;
++ } else {
++ if (host->version >= SDHCI_SPEC_300)
++ ctrl &= ~SDHCI_CTRL_8BITBUS;
++ if (width == MMC_BUS_WIDTH_4)
++ ctrl |= SDHCI_CTRL_4BITBUS;
++ else
++ ctrl &= ~SDHCI_CTRL_4BITBUS;
++ }
++ sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
++}
++EXPORT_SYMBOL_GPL(sdhci_set_bus_width);
++
++void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing)
++{
++ u16 ctrl_2;
++
++ ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
++ /* Select Bus Speed Mode for host */
++ ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
++ if ((timing == MMC_TIMING_MMC_HS200) ||
++ (timing == MMC_TIMING_UHS_SDR104))
++ ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
++ else if (timing == MMC_TIMING_UHS_SDR12)
++ ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
++ else if (timing == MMC_TIMING_UHS_SDR25)
++ ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
++ else if (timing == MMC_TIMING_UHS_SDR50)
++ ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
++ else if (timing == MMC_TIMING_UHS_DDR50)
++ ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
++ sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
++}
++EXPORT_SYMBOL_GPL(sdhci_set_uhs_signaling);
++
++static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios)
++{
++ unsigned long flags;
++ u8 ctrl;
++
++ spin_lock_irqsave(&host->lock, flags);
++
++ if (host->flags & SDHCI_DEVICE_DEAD) {
++ spin_unlock_irqrestore(&host->lock, flags);
++ if (host->vmmc && ios->power_mode == MMC_POWER_OFF)
++ mmc_regulator_set_ocr(host->mmc, host->vmmc, 0);
++ return;
++ }
++
++ /*
++ * Reset the chip on each power off.
++ * Should clear out any weird states.
++ */
++ if (ios->power_mode == MMC_POWER_OFF) {
++ sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
++ sdhci_reinit(host);
++ }
++
++ if (host->version >= SDHCI_SPEC_300 &&
++ (ios->power_mode == MMC_POWER_UP) &&
++ !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN))
++ sdhci_enable_preset_value(host, false);
++
++ if (!ios->clock || ios->clock != host->clock) {
++ host->ops->set_clock(host, ios->clock);
++ host->clock = ios->clock;
++ }
++
++ sdhci_set_power(host, ios->power_mode, ios->vdd);
++
++ if (host->ops->platform_send_init_74_clocks)
++ host->ops->platform_send_init_74_clocks(host, ios->power_mode);
++
++ host->ops->set_bus_width(host, ios->bus_width);
++
++ ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
++
++ if ((ios->timing == MMC_TIMING_SD_HS ||
++ ios->timing == MMC_TIMING_MMC_HS)
++ && !(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT))
++ ctrl |= SDHCI_CTRL_HISPD;
++ else
++ ctrl &= ~SDHCI_CTRL_HISPD;
++
++ if (host->version >= SDHCI_SPEC_300) {
++ u16 clk, ctrl_2;
++
++ /* In case of UHS-I modes, set High Speed Enable */
++ if ((ios->timing == MMC_TIMING_MMC_HS200) ||
++ (ios->timing == MMC_TIMING_UHS_SDR50) ||
++ (ios->timing == MMC_TIMING_UHS_SDR104) ||
++ (ios->timing == MMC_TIMING_UHS_DDR50) ||
++ (ios->timing == MMC_TIMING_UHS_SDR25))
++ ctrl |= SDHCI_CTRL_HISPD;
++
++ if (!host->preset_enabled) {
++ sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
++ /*
++ * We only need to set Driver Strength if the
++ * preset value enable is not set.
++ */
++ ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
++ ctrl_2 &= ~SDHCI_CTRL_DRV_TYPE_MASK;
++ if (ios->drv_type == MMC_SET_DRIVER_TYPE_A)
++ ctrl_2 |= SDHCI_CTRL_DRV_TYPE_A;
++ else if (ios->drv_type == MMC_SET_DRIVER_TYPE_C)
++ ctrl_2 |= SDHCI_CTRL_DRV_TYPE_C;
++
++ sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
++ } else {
++ /*
++ * According to SDHC Spec v3.00, if the Preset Value
++ * Enable in the Host Control 2 register is set, we
++ * need to reset SD Clock Enable before changing High
++ * Speed Enable to avoid generating clock gliches.
++ */
++
++ /* Reset SD Clock Enable */
++ clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
++ clk &= ~SDHCI_CLOCK_CARD_EN;
++ sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
++
++ sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
++
++ /* Re-enable SD Clock */
++ host->ops->set_clock(host, host->clock);
++ }
++
++ /* Reset SD Clock Enable */
++ clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
++ clk &= ~SDHCI_CLOCK_CARD_EN;
++ sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
++
++ host->ops->set_uhs_signaling(host, ios->timing);
++ host->timing = ios->timing;
++
++ if (!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN) &&
++ ((ios->timing == MMC_TIMING_UHS_SDR12) ||
++ (ios->timing == MMC_TIMING_UHS_SDR25) ||
++ (ios->timing == MMC_TIMING_UHS_SDR50) ||
++ (ios->timing == MMC_TIMING_UHS_SDR104) ||
++ (ios->timing == MMC_TIMING_UHS_DDR50))) {
++ u16 preset;
++
++ sdhci_enable_preset_value(host, true);
++ preset = sdhci_get_preset_value(host);
++ ios->drv_type = (preset & SDHCI_PRESET_DRV_MASK)
++ >> SDHCI_PRESET_DRV_SHIFT;
++ }
++
++ host->ops->set_clock(host, host->clock);
++ } else
++ sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
++
++ /*
++ * Some (ENE) controllers go apeshit on some ios operation,
++ * signalling timeout and CRC errors even on CMD0. Resetting
++ * it on each ios seems to solve the problem.
++ */
++ if(host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS)
++ sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
++
++ mmiowb();
++ spin_unlock_irqrestore(&host->lock, flags);
++}
++
++static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
++{
++ struct sdhci_host *host = mmc_priv(mmc);
++
++ sdhci_runtime_pm_get(host);
++ sdhci_do_set_ios(host, ios);
++ sdhci_runtime_pm_put(host);
++}
++
++static int sdhci_do_get_cd(struct sdhci_host *host)
++{
++ int gpio_cd = mmc_gpio_get_cd(host->mmc);
++
++ if (host->flags & SDHCI_DEVICE_DEAD)
++ return 0;
++
++ /* If polling/nonremovable, assume that the card is always present. */
++ if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) ||
++ (host->mmc->caps & MMC_CAP_NONREMOVABLE))
++ return 1;
++
++ /* Try slot gpio detect */
++ if (!IS_ERR_VALUE(gpio_cd))
++ return !!gpio_cd;
++
++ /* Host native card detect */
++ return !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
++}
++
++static int sdhci_get_cd(struct mmc_host *mmc)
++{
++ struct sdhci_host *host = mmc_priv(mmc);
++ int ret;
++
++ sdhci_runtime_pm_get(host);
++ ret = sdhci_do_get_cd(host);
++ sdhci_runtime_pm_put(host);
++ return ret;
++}
++
++static int sdhci_check_ro(struct sdhci_host *host)
++{
++ unsigned long flags;
++ int is_readonly;
++
++ spin_lock_irqsave(&host->lock, flags);
++
++ if (host->flags & SDHCI_DEVICE_DEAD)
++ is_readonly = 0;
++ else if (host->ops->get_ro)
++ is_readonly = host->ops->get_ro(host);
++ else
++ is_readonly = !(sdhci_readl(host, SDHCI_PRESENT_STATE)
++ & SDHCI_WRITE_PROTECT);
++
++ spin_unlock_irqrestore(&host->lock, flags);
++
++ /* This quirk needs to be replaced by a callback-function later */
++ return host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT ?
++ !is_readonly : is_readonly;
++}
++
++#define SAMPLE_COUNT 5
++
++static int sdhci_do_get_ro(struct sdhci_host *host)
++{
++ int i, ro_count;
++
++ if (!(host->quirks & SDHCI_QUIRK_UNSTABLE_RO_DETECT))
++ return sdhci_check_ro(host);
++
++ ro_count = 0;
++ for (i = 0; i < SAMPLE_COUNT; i++) {
++ if (sdhci_check_ro(host)) {
++ if (++ro_count > SAMPLE_COUNT / 2)
++ return 1;
++ }
++ msleep(30);
++ }
++ return 0;
++}
++
++static void sdhci_hw_reset(struct mmc_host *mmc)
++{
++ struct sdhci_host *host = mmc_priv(mmc);
++
++ if (host->ops && host->ops->hw_reset)
++ host->ops->hw_reset(host);
++}
++
++static int sdhci_get_ro(struct mmc_host *mmc)
++{
++ struct sdhci_host *host = mmc_priv(mmc);
++ int ret;
++
++ sdhci_runtime_pm_get(host);
++ ret = sdhci_do_get_ro(host);
++ sdhci_runtime_pm_put(host);
++ return ret;
++}
++
++static void sdhci_enable_sdio_irq_nolock(struct sdhci_host *host, int enable)
++{
++ if (!(host->flags & SDHCI_DEVICE_DEAD)) {
++ if (enable)
++ host->ier |= SDHCI_INT_CARD_INT;
++ else
++ host->ier &= ~SDHCI_INT_CARD_INT;
++
++ sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
++ sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
++ mmiowb();
++ }
++}
++
++static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
++{
++ struct sdhci_host *host = mmc_priv(mmc);
++ unsigned long flags;
++
++ sdhci_runtime_pm_get(host);
++
++ spin_lock_irqsave(&host->lock, flags);
++ if (enable)
++ host->flags |= SDHCI_SDIO_IRQ_ENABLED;
++ else
++ host->flags &= ~SDHCI_SDIO_IRQ_ENABLED;
++
++ sdhci_enable_sdio_irq_nolock(host, enable);
++ spin_unlock_irqrestore(&host->lock, flags);
++
++ sdhci_runtime_pm_put(host);
++}
++
++static int sdhci_do_start_signal_voltage_switch(struct sdhci_host *host,
++ struct mmc_ios *ios)
++{
++ u16 ctrl;
++ int ret;
++
++ /*
++ * Signal Voltage Switching is only applicable for Host Controllers
++ * v3.00 and above.
++ */
++ if (host->version < SDHCI_SPEC_300)
++ return 0;
++
++ ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
++
++ switch (ios->signal_voltage) {
++ case MMC_SIGNAL_VOLTAGE_330:
++ /* Set 1.8V Signal Enable in the Host Control2 register to 0 */
++ ctrl &= ~SDHCI_CTRL_VDD_180;
++ sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
++
++ if (host->vqmmc) {
++ ret = regulator_set_voltage(host->vqmmc, 2700000, 3600000);
++ if (ret) {
++ pr_warning("%s: Switching to 3.3V signalling voltage "
++ " failed\n", mmc_hostname(host->mmc));
++ return -EIO;
++ }
++ }
++ /* Wait for 5ms */
++ usleep_range(5000, 5500);
++
++ /* 3.3V regulator output should be stable within 5 ms */
++ ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
++ if (!(ctrl & SDHCI_CTRL_VDD_180))
++ return 0;
++
++ pr_warning("%s: 3.3V regulator output did not became stable\n",
++ mmc_hostname(host->mmc));
++
++ return -EAGAIN;
++ case MMC_SIGNAL_VOLTAGE_180:
++ if (host->vqmmc) {
++ ret = regulator_set_voltage(host->vqmmc,
++ 1700000, 1950000);
++ if (ret) {
++ pr_warning("%s: Switching to 1.8V signalling voltage "
++ " failed\n", mmc_hostname(host->mmc));
++ return -EIO;
++ }
++ }
++
++ /*
++ * Enable 1.8V Signal Enable in the Host Control2
++ * register
++ */
++ ctrl |= SDHCI_CTRL_VDD_180;
++ sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
++
++ /* Wait for 5ms */
++ usleep_range(5000, 5500);
++
++ /* 1.8V regulator output should be stable within 5 ms */
++ ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
++ if (ctrl & SDHCI_CTRL_VDD_180)
++ return 0;
++
++ pr_warning("%s: 1.8V regulator output did not became stable\n",
++ mmc_hostname(host->mmc));
++
++ return -EAGAIN;
++ case MMC_SIGNAL_VOLTAGE_120:
++ if (host->vqmmc) {
++ ret = regulator_set_voltage(host->vqmmc, 1100000, 1300000);
++ if (ret) {
++ pr_warning("%s: Switching to 1.2V signalling voltage "
++ " failed\n", mmc_hostname(host->mmc));
++ return -EIO;
++ }
++ }
++ return 0;
++ default:
++ /* No signal voltage switch required */
++ return 0;
++ }
++}
++
++static int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
++ struct mmc_ios *ios)
++{
++ struct sdhci_host *host = mmc_priv(mmc);
++ int err;
++
++ if (host->version < SDHCI_SPEC_300)
++ return 0;
++ sdhci_runtime_pm_get(host);
++ err = sdhci_do_start_signal_voltage_switch(host, ios);
++ sdhci_runtime_pm_put(host);
++ return err;
++}
++
++static int sdhci_card_busy(struct mmc_host *mmc)
++{
++ struct sdhci_host *host = mmc_priv(mmc);
++ u32 present_state;
++
++ sdhci_runtime_pm_get(host);
++ /* Check whether DAT[3:0] is 0000 */
++ present_state = sdhci_readl(host, SDHCI_PRESENT_STATE);
++ sdhci_runtime_pm_put(host);
++
++ return !(present_state & SDHCI_DATA_LVL_MASK);
++}
++
++static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
++{
++ struct sdhci_host *host = mmc_priv(mmc);
++ u16 ctrl;
++ int tuning_loop_counter = MAX_TUNING_LOOP;
++ unsigned long timeout;
++ int err = 0;
++ unsigned long flags;
++
++ sdhci_runtime_pm_get(host);
++ spin_lock_irqsave(&host->lock, flags);
++
++ /*
++ * The Host Controller needs tuning only in case of SDR104 mode
++ * and for SDR50 mode when Use Tuning for SDR50 is set in the
++ * Capabilities register.
++ * If the Host Controller supports the HS200 mode then the
++ * tuning function has to be executed.
++ */
++ switch (host->timing) {
++ case MMC_TIMING_MMC_HS200:
++ case MMC_TIMING_UHS_SDR104:
++ break;
++
++ case MMC_TIMING_UHS_SDR50:
++ if (host->flags & SDHCI_SDR50_NEEDS_TUNING ||
++ host->flags & SDHCI_SDR104_NEEDS_TUNING)
++ break;
++ /* FALLTHROUGH */
++
++ default:
++ spin_unlock_irqrestore(&host->lock, flags);
++ sdhci_runtime_pm_put(host);
++ return 0;
++ }
++
++ if (host->ops->platform_execute_tuning) {
++ spin_unlock_irqrestore(&host->lock, flags);
++ err = host->ops->platform_execute_tuning(host, opcode);
++ sdhci_runtime_pm_put(host);
++ return err;
++ }
++
++ ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
++ ctrl |= SDHCI_CTRL_EXEC_TUNING;
++ sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
++
++ /*
++ * As per the Host Controller spec v3.00, tuning command
++ * generates Buffer Read Ready interrupt, so enable that.
++ *
++ * Note: The spec clearly says that when tuning sequence
++ * is being performed, the controller does not generate
++ * interrupts other than Buffer Read Ready interrupt. But
++ * to make sure we don't hit a controller bug, we _only_
++ * enable Buffer Read Ready interrupt here.
++ */
++ sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_INT_ENABLE);
++ sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_SIGNAL_ENABLE);
++
++ /*
++ * Issue CMD19 repeatedly till Execute Tuning is set to 0 or the number
++ * of loops reaches 40 times or a timeout of 150ms occurs.
++ */
++ timeout = 150;
++ do {
++ struct mmc_command cmd = {0};
++ struct mmc_request mrq = {NULL};
++
++ if (!tuning_loop_counter && !timeout)
++ break;
++
++ cmd.opcode = opcode;
++ cmd.arg = 0;
++ cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
++ cmd.retries = 0;
++ cmd.data = NULL;
++ cmd.error = 0;
++
++ mrq.cmd = &cmd;
++ host->mrq = &mrq;
++
++ /*
++ * In response to CMD19, the card sends 64 bytes of tuning
++ * block to the Host Controller. So we set the block size
++ * to 64 here.
++ */
++ if (cmd.opcode == MMC_SEND_TUNING_BLOCK_HS200) {
++ if (mmc->ios.bus_width == MMC_BUS_WIDTH_8)
++ sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 128),
++ SDHCI_BLOCK_SIZE);
++ else if (mmc->ios.bus_width == MMC_BUS_WIDTH_4)
++ sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 64),
++ SDHCI_BLOCK_SIZE);
++ } else {
++ sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 64),
++ SDHCI_BLOCK_SIZE);
++ }
++
++ /*
++ * The tuning block is sent by the card to the host controller.
++ * So we set the TRNS_READ bit in the Transfer Mode register.
++ * This also takes care of setting DMA Enable and Multi Block
++ * Select in the same register to 0.
++ */
++ sdhci_writew(host, SDHCI_TRNS_READ, SDHCI_TRANSFER_MODE);
++
++ sdhci_send_command(host, &cmd);
++
++ host->cmd = NULL;
++ host->mrq = NULL;
++
++ spin_unlock_irqrestore(&host->lock, flags);
++ /* Wait for Buffer Read Ready interrupt */
++ wait_event_interruptible_timeout(host->buf_ready_int,
++ (host->tuning_done == 1),
++ msecs_to_jiffies(50));
++ spin_lock_irqsave(&host->lock, flags);
++
++ if (!host->tuning_done) {
++ pr_info(DRIVER_NAME ": Timeout waiting for "
++ "Buffer Read Ready interrupt during tuning "
++ "procedure, falling back to fixed sampling "
++ "clock\n");
++ ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
++ ctrl &= ~SDHCI_CTRL_TUNED_CLK;
++ ctrl &= ~SDHCI_CTRL_EXEC_TUNING;
++ sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
++
++ err = -EIO;
++ goto out;
++ }
++
++ host->tuning_done = 0;
++
++ ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
++ tuning_loop_counter--;
++ timeout--;
++ mdelay(1);
++ } while (ctrl & SDHCI_CTRL_EXEC_TUNING);
++
++ /*
++ * The Host Driver has exhausted the maximum number of loops allowed,
++ * so use fixed sampling frequency.
++ */
++ if (!tuning_loop_counter || !timeout) {
++ ctrl &= ~SDHCI_CTRL_TUNED_CLK;
++ sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
++ err = -EIO;
++ } else {
++ if (!(ctrl & SDHCI_CTRL_TUNED_CLK)) {
++ pr_info(DRIVER_NAME ": Tuning procedure"
++ " failed, falling back to fixed sampling"
++ " clock\n");
++ err = -EIO;
++ }
++ }
++
++out:
++ /*
++ * If this is the very first time we are here, we start the retuning
++ * timer. Since only during the first time, SDHCI_NEEDS_RETUNING
++ * flag won't be set, we check this condition before actually starting
++ * the timer.
++ */
++ if (!(host->flags & SDHCI_NEEDS_RETUNING) && host->tuning_count &&
++ (host->tuning_mode == SDHCI_TUNING_MODE_1)) {
++ host->flags |= SDHCI_USING_RETUNING_TIMER;
++ mod_timer(&host->tuning_timer, jiffies +
++ host->tuning_count * HZ);
++ /* Tuning mode 1 limits the maximum data length to 4MB */
++ mmc->max_blk_count = (4 * 1024 * 1024) / mmc->max_blk_size;
++ } else {
++ host->flags &= ~SDHCI_NEEDS_RETUNING;
++ /* Reload the new initial value for timer */
++ if (host->tuning_mode == SDHCI_TUNING_MODE_1)
++ mod_timer(&host->tuning_timer, jiffies +
++ host->tuning_count * HZ);
++ }
++
++ /*
++ * In case tuning fails, host controllers which support re-tuning can
++ * try tuning again at a later time, when the re-tuning timer expires.
++ * So for these controllers, we return 0. Since there might be other
++ * controllers who do not have this capability, we return error for
++ * them. SDHCI_USING_RETUNING_TIMER means the host is currently using
++ * a retuning timer to do the retuning for the card.
++ */
++ if (err && (host->flags & SDHCI_USING_RETUNING_TIMER))
++ err = 0;
++
++ sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
++ sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
++ spin_unlock_irqrestore(&host->lock, flags);
++ sdhci_runtime_pm_put(host);
++
++ return err;
++}
++
++
++static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable)
++{
++ /* Host Controller v3.00 defines preset value registers */
++ if (host->version < SDHCI_SPEC_300)
++ return;
++
++ /*
++ * We only enable or disable Preset Value if they are not already
++ * enabled or disabled respectively. Otherwise, we bail out.
++ */
++ if (host->preset_enabled != enable) {
++ u16 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
++
++ if (enable)
++ ctrl |= SDHCI_CTRL_PRESET_VAL_ENABLE;
++ else
++ ctrl &= ~SDHCI_CTRL_PRESET_VAL_ENABLE;
++
++ sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
++
++ if (enable)
++ host->flags |= SDHCI_PV_ENABLED;
++ else
++ host->flags &= ~SDHCI_PV_ENABLED;
++
++ host->preset_enabled = enable;
++ }
++}
++
++static void sdhci_card_event(struct mmc_host *mmc)
++{
++ struct sdhci_host *host = mmc_priv(mmc);
++ unsigned long flags;
++
++ /* First check if client has provided their own card event */
++ if (host->ops->card_event)
++ host->ops->card_event(host);
++
++ spin_lock_irqsave(&host->lock, flags);
++
++ /* Check host->mrq first in case we are runtime suspended */
++ if (host->mrq && !sdhci_do_get_cd(host)) {
++ pr_err("%s: Card removed during transfer!\n",
++ mmc_hostname(host->mmc));
++ pr_err("%s: Resetting controller.\n",
++ mmc_hostname(host->mmc));
++
++ sdhci_do_reset(host, SDHCI_RESET_CMD);
++ sdhci_do_reset(host, SDHCI_RESET_DATA);
++
++ host->mrq->cmd->error = -ENOMEDIUM;
++ tasklet_schedule(&host->finish_tasklet);
++ }
++
++ spin_unlock_irqrestore(&host->lock, flags);
++}
++
++static const struct mmc_host_ops sdhci_ops = {
++ .request = sdhci_request,
++ .set_ios = sdhci_set_ios,
++ .get_cd = sdhci_get_cd,
++ .get_ro = sdhci_get_ro,
++ .hw_reset = sdhci_hw_reset,
++ .enable_sdio_irq = sdhci_enable_sdio_irq,
++ .start_signal_voltage_switch = sdhci_start_signal_voltage_switch,
++ .execute_tuning = sdhci_execute_tuning,
++ .card_event = sdhci_card_event,
++ .card_busy = sdhci_card_busy,
++};
++
++/*****************************************************************************\
++ * *
++ * Tasklets *
++ * *
++\*****************************************************************************/
++
++static void sdhci_tasklet_finish(unsigned long param)
++{
++ struct sdhci_host *host;
++ unsigned long flags;
++ struct mmc_request *mrq;
++
++ host = (struct sdhci_host*)param;
++
++ spin_lock_irqsave(&host->lock, flags);
++
++ /*
++ * If this tasklet gets rescheduled while running, it will
++ * be run again afterwards but without any active request.
++ */
++ if (!host->mrq) {
++ spin_unlock_irqrestore(&host->lock, flags);
++ return;
++ }
++
++ del_timer(&host->timer);
++
++ mrq = host->mrq;
++
++ /*
++ * The controller needs a reset of internal state machines
++ * upon error conditions.
++ */
++ if (!(host->flags & SDHCI_DEVICE_DEAD) &&
++ ((mrq->cmd && mrq->cmd->error) ||
++ (mrq->data && (mrq->data->error ||
++ (mrq->data->stop && mrq->data->stop->error))) ||
++ (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST))) {
++
++ /* Some controllers need this kick or reset won't work here */
++ if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET)
++ /* This is to force an update */
++ host->ops->set_clock(host, host->clock);
++
++ /* Spec says we should do both at the same time, but Ricoh
++ controllers do not like that. */
++ sdhci_do_reset(host, SDHCI_RESET_CMD);
++ sdhci_do_reset(host, SDHCI_RESET_DATA);
++ }
++
++ host->mrq = NULL;
++ host->cmd = NULL;
++ host->data = NULL;
++
++#ifndef SDHCI_USE_LEDS_CLASS
++ sdhci_deactivate_led(host);
++#endif
++
++ mmiowb();
++ spin_unlock_irqrestore(&host->lock, flags);
++
++ mmc_request_done(host->mmc, mrq);
++ sdhci_runtime_pm_put(host);
++}
++
++static void sdhci_timeout_timer(unsigned long data)
++{
++ struct sdhci_host *host;
++ unsigned long flags;
++
++ host = (struct sdhci_host*)data;
++
++ spin_lock_irqsave(&host->lock, flags);
++
++ if (host->mrq) {
++ pr_err("%s: Timeout waiting for hardware "
++ "interrupt.\n", mmc_hostname(host->mmc));
++ sdhci_dumpregs(host);
++
++ if (host->data) {
++ host->data->error = -ETIMEDOUT;
++ sdhci_finish_data(host);
++ } else {
++ if (host->cmd)
++ host->cmd->error = -ETIMEDOUT;
++ else
++ host->mrq->cmd->error = -ETIMEDOUT;
++
++ tasklet_schedule(&host->finish_tasklet);
++ }
++ }
++
++ mmiowb();
++ spin_unlock_irqrestore(&host->lock, flags);
++}
++
++static void sdhci_tuning_timer(unsigned long data)
++{
++ struct sdhci_host *host;
++ unsigned long flags;
++
++ host = (struct sdhci_host *)data;
++
++ spin_lock_irqsave(&host->lock, flags);
++
++ host->flags |= SDHCI_NEEDS_RETUNING;
++
++ spin_unlock_irqrestore(&host->lock, flags);
++}
++
++/*****************************************************************************\
++ * *
++ * Interrupt handling *
++ * *
++\*****************************************************************************/
++
++static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask)
++{
++ BUG_ON(intmask == 0);
++
++ if (!host->cmd) {
++ pr_err("%s: Got command interrupt 0x%08x even "
++ "though no command operation was in progress.\n",
++ mmc_hostname(host->mmc), (unsigned)intmask);
++ sdhci_dumpregs(host);
++ return;
++ }
++
++ if (intmask & SDHCI_INT_TIMEOUT)
++ host->cmd->error = -ETIMEDOUT;
++ else if (intmask & (SDHCI_INT_CRC | SDHCI_INT_END_BIT |
++ SDHCI_INT_INDEX))
++ host->cmd->error = -EILSEQ;
++
++ if (host->cmd->error) {
++ tasklet_schedule(&host->finish_tasklet);
++ return;
++ }
++
++ /*
++ * The host can send and interrupt when the busy state has
++ * ended, allowing us to wait without wasting CPU cycles.
++ * Unfortunately this is overloaded on the "data complete"
++ * interrupt, so we need to take some care when handling
++ * it.
++ *
++ * Note: The 1.0 specification is a bit ambiguous about this
++ * feature so there might be some problems with older
++ * controllers.
++ */
++ if (host->cmd->flags & MMC_RSP_BUSY) {
++ if (host->cmd->data)
++ DBG("Cannot wait for busy signal when also "
++ "doing a data transfer");
++ else if (!(host->quirks & SDHCI_QUIRK_NO_BUSY_IRQ))
++ return;
++
++ /* The controller does not support the end-of-busy IRQ,
++ * fall through and take the SDHCI_INT_RESPONSE */
++ }
++
++ if (intmask & SDHCI_INT_RESPONSE)
++ sdhci_finish_command(host);
++}
++
++#ifdef CONFIG_MMC_DEBUG
++static void sdhci_show_adma_error(struct sdhci_host *host)
++{
++ const char *name = mmc_hostname(host->mmc);
++ u8 *desc = host->adma_desc;
++ __le32 *dma;
++ __le16 *len;
++ u8 attr;
++
++ sdhci_dumpregs(host);
++
++ while (true) {
++ dma = (__le32 *)(desc + 4);
++ len = (__le16 *)(desc + 2);
++ attr = *desc;
++
++ DBG("%s: %p: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n",
++ name, desc, le32_to_cpu(*dma), le16_to_cpu(*len), attr);
++
++ desc += 8;
++
++ if (attr & 2)
++ break;
++ }
++}
++#else
++static void sdhci_show_adma_error(struct sdhci_host *host) { }
++#endif
++
++static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
++{
++ u32 command;
++ BUG_ON(intmask == 0);
++
++ /* CMD19 generates _only_ Buffer Read Ready interrupt */
++ if (intmask & SDHCI_INT_DATA_AVAIL) {
++ command = SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND));
++ if (command == MMC_SEND_TUNING_BLOCK ||
++ command == MMC_SEND_TUNING_BLOCK_HS200) {
++ host->tuning_done = 1;
++ wake_up(&host->buf_ready_int);
++ return;
++ }
++ }
++
++ if (!host->data) {
++ /*
++ * The "data complete" interrupt is also used to
++ * indicate that a busy state has ended. See comment
++ * above in sdhci_cmd_irq().
++ */
++ if (host->cmd && (host->cmd->flags & MMC_RSP_BUSY)) {
++ if (intmask & SDHCI_INT_DATA_END) {
++ sdhci_finish_command(host);
++ return;
++ }
++ }
++
++ pr_err("%s: Got data interrupt 0x%08x even "
++ "though no data operation was in progress.\n",
++ mmc_hostname(host->mmc), (unsigned)intmask);
++ sdhci_dumpregs(host);
++
++ return;
++ }
++
++ if (intmask & SDHCI_INT_DATA_TIMEOUT)
++ host->data->error = -ETIMEDOUT;
++ else if (intmask & SDHCI_INT_DATA_END_BIT)
++ host->data->error = -EILSEQ;
++ else if ((intmask & SDHCI_INT_DATA_CRC) &&
++ SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))
++ != MMC_BUS_TEST_R)
++ host->data->error = -EILSEQ;
++ else if (intmask & SDHCI_INT_ADMA_ERROR) {
++ pr_err("%s: ADMA error\n", mmc_hostname(host->mmc));
++ sdhci_show_adma_error(host);
++ host->data->error = -EIO;
++ if (host->ops->adma_workaround)
++ host->ops->adma_workaround(host, intmask);
++ }
++
++ if (host->data->error)
++ sdhci_finish_data(host);
++ else {
++ if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL))
++ sdhci_transfer_pio(host);
++
++ /*
++ * We currently don't do anything fancy with DMA
++ * boundaries, but as we can't disable the feature
++ * we need to at least restart the transfer.
++ *
++ * According to the spec sdhci_readl(host, SDHCI_DMA_ADDRESS)
++ * should return a valid address to continue from, but as
++ * some controllers are faulty, don't trust them.
++ */
++ if (intmask & SDHCI_INT_DMA_END) {
++ u32 dmastart, dmanow;
++ dmastart = sg_dma_address(host->data->sg);
++ dmanow = dmastart + host->data->bytes_xfered;
++ /*
++ * Force update to the next DMA block boundary.
++ */
++ dmanow = (dmanow &
++ ~(SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) +
++ SDHCI_DEFAULT_BOUNDARY_SIZE;
++ host->data->bytes_xfered = dmanow - dmastart;
++ DBG("%s: DMA base 0x%08x, transferred 0x%06x bytes,"
++ " next 0x%08x\n",
++ mmc_hostname(host->mmc), dmastart,
++ host->data->bytes_xfered, dmanow);
++ sdhci_writel(host, dmanow, SDHCI_DMA_ADDRESS);
++ }
++
++ if (intmask & SDHCI_INT_DATA_END) {
++ if (host->cmd) {
++ /*
++ * Data managed to finish before the
++ * command completed. Make sure we do
++ * things in the proper order.
++ */
++ host->data_early = 1;
++ } else {
++ sdhci_finish_data(host);
++ }
++ }
++ }
++}
++
++static irqreturn_t sdhci_irq(int irq, void *dev_id)
++{
++ irqreturn_t result = IRQ_NONE;
++ struct sdhci_host *host = dev_id;
++ u32 intmask, mask, unexpected = 0;
++ int max_loops = 16;
++
++ spin_lock(&host->lock);
++
++ if (host->runtime_suspended && !sdhci_sdio_irq_enabled(host)) {
++ spin_unlock(&host->lock);
++ pr_warning("%s: got irq while runtime suspended\n",
++ mmc_hostname(host->mmc));
++ return IRQ_HANDLED;
++ }
++
++ intmask = sdhci_readl(host, SDHCI_INT_STATUS);
++ if (!intmask || intmask == 0xffffffff) {
++ result = IRQ_NONE;
++ goto out;
++ }
++
++ do {
++ /* Clear selected interrupts. */
++ mask = intmask & (SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
++ SDHCI_INT_BUS_POWER);
++ sdhci_writel(host, mask, SDHCI_INT_STATUS);
++
++ DBG("*** %s got interrupt: 0x%08x\n",
++ mmc_hostname(host->mmc), intmask);
++
++ if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
++ u32 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
++ SDHCI_CARD_PRESENT;
++
++ /*
++ * There is a observation on i.mx esdhc. INSERT
++ * bit will be immediately set again when it gets
++ * cleared, if a card is inserted. We have to mask
++ * the irq to prevent interrupt storm which will
++ * freeze the system. And the REMOVE gets the
++ * same situation.
++ *
++ * More testing are needed here to ensure it works
++ * for other platforms though.
++ */
++ host->ier &= ~(SDHCI_INT_CARD_INSERT |
++ SDHCI_INT_CARD_REMOVE);
++ host->ier |= present ? SDHCI_INT_CARD_REMOVE :
++ SDHCI_INT_CARD_INSERT;
++ sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
++ sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
++
++ sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT |
++ SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS);
++
++ host->thread_isr |= intmask & (SDHCI_INT_CARD_INSERT |
++ SDHCI_INT_CARD_REMOVE);
++ result = IRQ_WAKE_THREAD;
++ }
++
++ if (intmask & SDHCI_INT_CMD_MASK)
++ sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK);
++
++ if (intmask & SDHCI_INT_DATA_MASK)
++ sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK);
++
++ if (intmask & SDHCI_INT_BUS_POWER)
++ pr_err("%s: Card is consuming too much power!\n",
++ mmc_hostname(host->mmc));
++
++ if (intmask & SDHCI_INT_CARD_INT) {
++ sdhci_enable_sdio_irq_nolock(host, false);
++ host->thread_isr |= SDHCI_INT_CARD_INT;
++ result = IRQ_WAKE_THREAD;
++ }
++
++ intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE |
++ SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
++ SDHCI_INT_ERROR | SDHCI_INT_BUS_POWER |
++ SDHCI_INT_CARD_INT);
++
++ if (intmask) {
++ unexpected |= intmask;
++ sdhci_writel(host, intmask, SDHCI_INT_STATUS);
++ }
++
++ if (result == IRQ_NONE)
++ result = IRQ_HANDLED;
++
++ intmask = sdhci_readl(host, SDHCI_INT_STATUS);
++ } while (intmask && --max_loops);
++out:
++ spin_unlock(&host->lock);
++
++ if (unexpected) {
++ pr_err("%s: Unexpected interrupt 0x%08x.\n",
++ mmc_hostname(host->mmc), unexpected);
++ sdhci_dumpregs(host);
++ }
++
++ return result;
++}
++
++static irqreturn_t sdhci_thread_irq(int irq, void *dev_id)
++{
++ struct sdhci_host *host = dev_id;
++ unsigned long flags;
++ u32 isr;
++
++ spin_lock_irqsave(&host->lock, flags);
++ isr = host->thread_isr;
++ host->thread_isr = 0;
++ spin_unlock_irqrestore(&host->lock, flags);
++
++ if (isr & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
++ sdhci_card_event(host->mmc);
++ mmc_detect_change(host->mmc, msecs_to_jiffies(200));
++ }
++
++ if (isr & SDHCI_INT_CARD_INT) {
++ sdio_run_irqs(host->mmc);
++
++ spin_lock_irqsave(&host->lock, flags);
++ if (host->flags & SDHCI_SDIO_IRQ_ENABLED)
++ sdhci_enable_sdio_irq_nolock(host, true);
++ spin_unlock_irqrestore(&host->lock, flags);
++ }
++
++ return isr ? IRQ_HANDLED : IRQ_NONE;
++}
++
++/*****************************************************************************\
++ * *
++ * Suspend/resume *
++ * *
++\*****************************************************************************/
++
++#ifdef CONFIG_PM
++void sdhci_enable_irq_wakeups(struct sdhci_host *host)
++{
++ int gpio_cd = mmc_gpio_get_cd(host->mmc);
++ u8 val;
++ u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE
++ | SDHCI_WAKE_ON_INT;
++
++ val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
++ val |= mask ;
++ /* Avoid fake wake up */
++ if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION ||
++ !IS_ERR_VALUE(gpio_cd))
++ val &= ~(SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE);
++ sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
++}
++EXPORT_SYMBOL_GPL(sdhci_enable_irq_wakeups);
++
++void sdhci_disable_irq_wakeups(struct sdhci_host *host)
++{
++ u8 val;
++ u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE
++ | SDHCI_WAKE_ON_INT;
++
++ val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
++ val &= ~mask;
++ sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
++}
++EXPORT_SYMBOL_GPL(sdhci_disable_irq_wakeups);
++
++int sdhci_suspend_host(struct sdhci_host *host)
++{
++ sdhci_disable_card_detection(host);
++
++ /* Disable tuning since we are suspending */
++ if (host->flags & SDHCI_USING_RETUNING_TIMER) {
++ del_timer_sync(&host->tuning_timer);
++ host->flags &= ~SDHCI_NEEDS_RETUNING;
++ }
++
++ if (!device_may_wakeup(mmc_dev(host->mmc))) {
++ host->ier = 0;
++ sdhci_writel(host, 0, SDHCI_INT_ENABLE);
++ sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
++ free_irq(host->irq, host);
++ } else {
++ sdhci_enable_irq_wakeups(host);
++ enable_irq_wake(host->irq);
++ }
++ return 0;
++}
++
++EXPORT_SYMBOL_GPL(sdhci_suspend_host);
++
++int sdhci_resume_host(struct sdhci_host *host)
++{
++ int ret = 0;
++
++ if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
++ if (host->ops->enable_dma)
++ host->ops->enable_dma(host);
++ }
++
++ if (!device_may_wakeup(mmc_dev(host->mmc))) {
++ ret = request_threaded_irq(host->irq, sdhci_irq,
++ sdhci_thread_irq, IRQF_SHARED,
++ mmc_hostname(host->mmc), host);
++ if (ret)
++ return ret;
++ } else {
++ sdhci_disable_irq_wakeups(host);
++ disable_irq_wake(host->irq);
++ }
++
++ if ((host->mmc->pm_flags & MMC_PM_KEEP_POWER) &&
++ (host->quirks2 & SDHCI_QUIRK2_HOST_OFF_CARD_ON)) {
++ /* Card keeps power but host controller does not */
++ sdhci_init(host, 0);
++ host->pwr = 0;
++ host->clock = 0;
++ sdhci_do_set_ios(host, &host->mmc->ios);
++ } else {
++ sdhci_init(host, (host->mmc->pm_flags & MMC_PM_KEEP_POWER));
++ mmiowb();
++ }
++
++ sdhci_enable_card_detection(host);
++
++ /* Set the re-tuning expiration flag */
++ if (host->flags & SDHCI_USING_RETUNING_TIMER)
++ host->flags |= SDHCI_NEEDS_RETUNING;
++
++ return ret;
++}
++
++EXPORT_SYMBOL_GPL(sdhci_resume_host);
++#endif /* CONFIG_PM */
++
++#ifdef CONFIG_PM_RUNTIME
++
++static int sdhci_runtime_pm_get(struct sdhci_host *host)
++{
++ return pm_runtime_get_sync(host->mmc->parent);
++}
++
++static int sdhci_runtime_pm_put(struct sdhci_host *host)
++{
++ pm_runtime_mark_last_busy(host->mmc->parent);
++ return pm_runtime_put_autosuspend(host->mmc->parent);
++}
++
++static void sdhci_runtime_pm_bus_on(struct sdhci_host *host)
++{
++ if (host->runtime_suspended || host->bus_on)
++ return;
++ host->bus_on = true;
++ pm_runtime_get_noresume(host->mmc->parent);
++}
++
++static void sdhci_runtime_pm_bus_off(struct sdhci_host *host)
++{
++ if (host->runtime_suspended || !host->bus_on)
++ return;
++ host->bus_on = false;
++ pm_runtime_put_noidle(host->mmc->parent);
++}
++
++int sdhci_runtime_suspend_host(struct sdhci_host *host)
++{
++ unsigned long flags;
++ int ret = 0;
++
++ /* Disable tuning since we are suspending */
++ if (host->flags & SDHCI_USING_RETUNING_TIMER) {
++ del_timer_sync(&host->tuning_timer);
++ host->flags &= ~SDHCI_NEEDS_RETUNING;
++ }
++
++ spin_lock_irqsave(&host->lock, flags);
++ host->ier &= SDHCI_INT_CARD_INT;
++ sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
++ sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
++ spin_unlock_irqrestore(&host->lock, flags);
++
++ synchronize_hardirq(host->irq);
++
++ spin_lock_irqsave(&host->lock, flags);
++ host->runtime_suspended = true;
++ spin_unlock_irqrestore(&host->lock, flags);
++
++ return ret;
++}
++EXPORT_SYMBOL_GPL(sdhci_runtime_suspend_host);
++
++int sdhci_runtime_resume_host(struct sdhci_host *host)
++{
++ unsigned long flags;
++ int ret = 0, host_flags = host->flags;
++
++ if (host_flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
++ if (host->ops->enable_dma)
++ host->ops->enable_dma(host);
++ }
++
++ sdhci_init(host, 0);
++
++ /* Force clock and power re-program */
++ host->pwr = 0;
++ host->clock = 0;
++ sdhci_do_set_ios(host, &host->mmc->ios);
++
++ sdhci_do_start_signal_voltage_switch(host, &host->mmc->ios);
++ if ((host_flags & SDHCI_PV_ENABLED) &&
++ !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) {
++ spin_lock_irqsave(&host->lock, flags);
++ sdhci_enable_preset_value(host, true);
++ spin_unlock_irqrestore(&host->lock, flags);
++ }
++
++ /* Set the re-tuning expiration flag */
++ if (host->flags & SDHCI_USING_RETUNING_TIMER)
++ host->flags |= SDHCI_NEEDS_RETUNING;
++
++ spin_lock_irqsave(&host->lock, flags);
++
++ host->runtime_suspended = false;
++
++ /* Enable SDIO IRQ */
++ if (host->flags & SDHCI_SDIO_IRQ_ENABLED)
++ sdhci_enable_sdio_irq_nolock(host, true);
++
++ /* Enable Card Detection */
++ sdhci_enable_card_detection(host);
++
++ spin_unlock_irqrestore(&host->lock, flags);
++
++ return ret;
++}
++EXPORT_SYMBOL_GPL(sdhci_runtime_resume_host);
++
++#endif
++
++/*****************************************************************************\
++ * *
++ * Device allocation/registration *
++ * *
++\*****************************************************************************/
++
++struct sdhci_host *sdhci_alloc_host(struct device *dev,
++ size_t priv_size)
++{
++ struct mmc_host *mmc;
++ struct sdhci_host *host;
++
++ WARN_ON(dev == NULL);
++
++ mmc = mmc_alloc_host(sizeof(struct sdhci_host) + priv_size, dev);
++ if (!mmc)
++ return ERR_PTR(-ENOMEM);
++
++ host = mmc_priv(mmc);
++ host->mmc = mmc;
++
++ return host;
++}
++
++EXPORT_SYMBOL_GPL(sdhci_alloc_host);
++
++int sdhci_add_host(struct sdhci_host *host)
++{
++ struct mmc_host *mmc;
++ u32 caps[2] = {0, 0};
++ u32 max_current_caps;
++ unsigned int ocr_avail;
++ int ret;
++
++ WARN_ON(host == NULL);
++ if (host == NULL)
++ return -EINVAL;
++
++ mmc = host->mmc;
++
++ if (debug_quirks)
++ host->quirks = debug_quirks;
++ if (debug_quirks2)
++ host->quirks2 = debug_quirks2;
++
++ sdhci_do_reset(host, SDHCI_RESET_ALL);
++
++ host->version = sdhci_readw(host, SDHCI_HOST_VERSION);
++ host->version = (host->version & SDHCI_SPEC_VER_MASK)
++ >> SDHCI_SPEC_VER_SHIFT;
++ if (host->version > SDHCI_SPEC_300) {
++ pr_err("%s: Unknown controller version (%d). "
++ "You may experience problems.\n", mmc_hostname(mmc),
++ host->version);
++ }
++
++ caps[0] = (host->quirks & SDHCI_QUIRK_MISSING_CAPS) ? host->caps :
++ sdhci_readl(host, SDHCI_CAPABILITIES);
++
++ if (host->version >= SDHCI_SPEC_300)
++ caps[1] = (host->quirks & SDHCI_QUIRK_MISSING_CAPS) ?
++ host->caps1 :
++ sdhci_readl(host, SDHCI_CAPABILITIES_1);
++
++ if (host->quirks & SDHCI_QUIRK_FORCE_DMA)
++ host->flags |= SDHCI_USE_SDMA;
++ else if (!(caps[0] & SDHCI_CAN_DO_SDMA))
++ DBG("Controller doesn't have SDMA capability\n");
++ else
++ host->flags |= SDHCI_USE_SDMA;
++
++ if ((host->quirks & SDHCI_QUIRK_BROKEN_DMA) &&
++ (host->flags & SDHCI_USE_SDMA)) {
++ DBG("Disabling DMA as it is marked broken\n");
++ host->flags &= ~SDHCI_USE_SDMA;
++ }
++
++ if ((host->version >= SDHCI_SPEC_200) &&
++ (caps[0] & SDHCI_CAN_DO_ADMA2))
++ host->flags |= SDHCI_USE_ADMA;
++
++ if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) &&
++ (host->flags & SDHCI_USE_ADMA)) {
++ DBG("Disabling ADMA as it is marked broken\n");
++ host->flags &= ~SDHCI_USE_ADMA;
++ }
++
++ if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
++ if (host->ops->enable_dma) {
++ if (host->ops->enable_dma(host)) {
++ pr_warning("%s: No suitable DMA "
++ "available. Falling back to PIO.\n",
++ mmc_hostname(mmc));
++ host->flags &=
++ ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA);
++ }
++ }
++ }
++
++ if (host->flags & SDHCI_USE_ADMA) {
++ /*
++ * We need to allocate descriptors for all sg entries
++ * (128) and potentially one alignment transfer for
++ * each of those entries.
++ */
++ host->adma_desc = dma_alloc_coherent(mmc_dev(host->mmc),
++ ADMA_SIZE, &host->adma_addr,
++ GFP_KERNEL);
++ host->align_buffer = kmalloc(128 * 4, GFP_KERNEL);
++ if (!host->adma_desc || !host->align_buffer) {
++ dma_free_coherent(mmc_dev(host->mmc), ADMA_SIZE,
++ host->adma_desc, host->adma_addr);
++ kfree(host->align_buffer);
++ pr_warning("%s: Unable to allocate ADMA "
++ "buffers. Falling back to standard DMA.\n",
++ mmc_hostname(mmc));
++ host->flags &= ~SDHCI_USE_ADMA;
++ host->adma_desc = NULL;
++ host->align_buffer = NULL;
++ } else if (host->adma_addr & 3) {
++ pr_warning("%s: unable to allocate aligned ADMA descriptor\n",
++ mmc_hostname(mmc));
++ host->flags &= ~SDHCI_USE_ADMA;
++ dma_free_coherent(mmc_dev(host->mmc), ADMA_SIZE,
++ host->adma_desc, host->adma_addr);
++ kfree(host->align_buffer);
++ host->adma_desc = NULL;
++ host->align_buffer = NULL;
++ }
++ }
++
++ /*
++ * If we use DMA, then it's up to the caller to set the DMA
++ * mask, but PIO does not need the hw shim so we set a new
++ * mask here in that case.
++ */
++ if (!(host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))) {
++ host->dma_mask = DMA_BIT_MASK(64);
++ mmc_dev(host->mmc)->dma_mask = &host->dma_mask;
++ }
++
++ if (host->version >= SDHCI_SPEC_300)
++ host->max_clk = (caps[0] & SDHCI_CLOCK_V3_BASE_MASK)
++ >> SDHCI_CLOCK_BASE_SHIFT;
++ else
++ host->max_clk = (caps[0] & SDHCI_CLOCK_BASE_MASK)
++ >> SDHCI_CLOCK_BASE_SHIFT;
++
++ host->max_clk *= 1000000;
++ if (host->max_clk == 0 || host->quirks &
++ SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN) {
++ if (!host->ops->get_max_clock) {
++ pr_err("%s: Hardware doesn't specify base clock "
++ "frequency.\n", mmc_hostname(mmc));
++ return -ENODEV;
++ }
++ host->max_clk = host->ops->get_max_clock(host);
++ }
++
++ /*
++ * In case of Host Controller v3.00, find out whether clock
++ * multiplier is supported.
++ */
++ host->clk_mul = (caps[1] & SDHCI_CLOCK_MUL_MASK) >>
++ SDHCI_CLOCK_MUL_SHIFT;
++
++ /*
++ * In case the value in Clock Multiplier is 0, then programmable
++ * clock mode is not supported, otherwise the actual clock
++ * multiplier is one more than the value of Clock Multiplier
++ * in the Capabilities Register.
++ */
++ if (host->clk_mul)
++ host->clk_mul += 1;
++
++ /*
++ * Set host parameters.
++ */
++ mmc->ops = &sdhci_ops;
++ mmc->f_max = host->max_clk;
++ if (host->ops->get_min_clock)
++ mmc->f_min = host->ops->get_min_clock(host);
++ else if (host->version >= SDHCI_SPEC_300) {
++ if (host->clk_mul) {
++ mmc->f_min = (host->max_clk * host->clk_mul) / 1024;
++ mmc->f_max = host->max_clk * host->clk_mul;
++ } else
++ mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300;
++ } else
++ mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200;
++
++ host->timeout_clk =
++ (caps[0] & SDHCI_TIMEOUT_CLK_MASK) >> SDHCI_TIMEOUT_CLK_SHIFT;
++ if (host->timeout_clk == 0) {
++ if (host->ops->get_timeout_clock) {
++ host->timeout_clk = host->ops->get_timeout_clock(host);
++ } else if (!(host->quirks &
++ SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) {
++ pr_err("%s: Hardware doesn't specify timeout clock "
++ "frequency.\n", mmc_hostname(mmc));
++ return -ENODEV;
++ }
++ }
++ if (caps[0] & SDHCI_TIMEOUT_CLK_UNIT)
++ host->timeout_clk *= 1000;
++
++ if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)
++ host->timeout_clk = mmc->f_max / 1000;
++
++ if (host->quirks2 & SDHCI_QUIRK2_NOSTD_TIMEOUT_COUNTER) {
++ if (host->ops->get_max_timeout_counter) {
++ mmc->max_discard_to =
++ host->ops->get_max_timeout_counter(host)
++ / host->timeout_clk;
++ } else {
++ pr_err("%s: Hardware doesn't specify max timeout "
++ "counter\n", mmc_hostname(mmc));
++ return -ENODEV;
++ }
++ } else {
++ mmc->max_discard_to = (1 << 27) / host->timeout_clk;
++ }
++
++ mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE | MMC_CAP_CMD23;
++ mmc->caps2 |= MMC_CAP2_SDIO_NOTHREAD;
++
++ if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12)
++ host->flags |= SDHCI_AUTO_CMD12;
++
++ /* Auto-CMD23 stuff only works in ADMA or PIO. */
++ if ((host->version >= SDHCI_SPEC_300) &&
++ ((host->flags & SDHCI_USE_ADMA) ||
++ !(host->flags & SDHCI_USE_SDMA))) {
++ host->flags |= SDHCI_AUTO_CMD23;
++ DBG("%s: Auto-CMD23 available\n", mmc_hostname(mmc));
++ } else {
++ DBG("%s: Auto-CMD23 unavailable\n", mmc_hostname(mmc));
++ }
++
++ /*
++ * A controller may support 8-bit width, but the board itself
++ * might not have the pins brought out. Boards that support
++ * 8-bit width must set "mmc->caps |= MMC_CAP_8_BIT_DATA;" in
++ * their platform code before calling sdhci_add_host(), and we
++ * won't assume 8-bit width for hosts without that CAP.
++ */
++ if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA))
++ mmc->caps |= MMC_CAP_4_BIT_DATA;
++
++ if (host->quirks2 & SDHCI_QUIRK2_HOST_NO_CMD23)
++ mmc->caps &= ~MMC_CAP_CMD23;
++
++ if (caps[0] & SDHCI_CAN_DO_HISPD)
++ mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
++
++ if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
++ !(host->mmc->caps & MMC_CAP_NONREMOVABLE))
++ mmc->caps |= MMC_CAP_NEEDS_POLL;
++
++ /* If vqmmc regulator and no 1.8V signalling, then there's no UHS */
++ host->vqmmc = regulator_get_optional(mmc_dev(mmc), "vqmmc");
++ if (IS_ERR_OR_NULL(host->vqmmc)) {
++ if (PTR_ERR(host->vqmmc) < 0) {
++ pr_info("%s: no vqmmc regulator found\n",
++ mmc_hostname(mmc));
++ host->vqmmc = NULL;
++ }
++ } else {
++ ret = regulator_enable(host->vqmmc);
++ if (!regulator_is_supported_voltage(host->vqmmc, 1700000,
++ 1950000))
++ caps[1] &= ~(SDHCI_SUPPORT_SDR104 |
++ SDHCI_SUPPORT_SDR50 |
++ SDHCI_SUPPORT_DDR50);
++ if (ret) {
++ pr_warn("%s: Failed to enable vqmmc regulator: %d\n",
++ mmc_hostname(mmc), ret);
++ host->vqmmc = NULL;
++ }
++ }
++
++ if (host->quirks2 & SDHCI_QUIRK2_NO_1_8_V)
++ caps[1] &= ~(SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
++ SDHCI_SUPPORT_DDR50);
++
++ /* Any UHS-I mode in caps implies SDR12 and SDR25 support. */
++ if (caps[1] & (SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
++ SDHCI_SUPPORT_DDR50))
++ mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25;
++
++ /* SDR104 supports also implies SDR50 support */
++ if (caps[1] & SDHCI_SUPPORT_SDR104) {
++ mmc->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50;
++ /* SD3.0: SDR104 is supported so (for eMMC) the caps2
++ * field can be promoted to support HS200.
++ */
++ if (!(host->quirks2 & SDHCI_QUIRK2_BROKEN_HS200))
++ mmc->caps2 |= MMC_CAP2_HS200;
++ } else if (caps[1] & SDHCI_SUPPORT_SDR50)
++ mmc->caps |= MMC_CAP_UHS_SDR50;
++
++ if (caps[1] & SDHCI_SUPPORT_DDR50)
++ mmc->caps |= MMC_CAP_UHS_DDR50;
++
++ /* Does the host need tuning for SDR50? */
++ if (caps[1] & SDHCI_USE_SDR50_TUNING)
++ host->flags |= SDHCI_SDR50_NEEDS_TUNING;
++
++ /* Does the host need tuning for SDR104 / HS200? */
++ if (mmc->caps2 & MMC_CAP2_HS200)
++ host->flags |= SDHCI_SDR104_NEEDS_TUNING;
++
++ /* Driver Type(s) (A, C, D) supported by the host */
++ if (caps[1] & SDHCI_DRIVER_TYPE_A)
++ mmc->caps |= MMC_CAP_DRIVER_TYPE_A;
++ if (caps[1] & SDHCI_DRIVER_TYPE_C)
++ mmc->caps |= MMC_CAP_DRIVER_TYPE_C;
++ if (caps[1] & SDHCI_DRIVER_TYPE_D)
++ mmc->caps |= MMC_CAP_DRIVER_TYPE_D;
++
++ /* Initial value for re-tuning timer count */
++ host->tuning_count = (caps[1] & SDHCI_RETUNING_TIMER_COUNT_MASK) >>
++ SDHCI_RETUNING_TIMER_COUNT_SHIFT;
++
++ /*
++ * In case Re-tuning Timer is not disabled, the actual value of
++ * re-tuning timer will be 2 ^ (n - 1).
++ */
++ if (host->tuning_count)
++ host->tuning_count = 1 << (host->tuning_count - 1);
++
++ /* Re-tuning mode supported by the Host Controller */
++ host->tuning_mode = (caps[1] & SDHCI_RETUNING_MODE_MASK) >>
++ SDHCI_RETUNING_MODE_SHIFT;
++
++ ocr_avail = 0;
++
++ host->vmmc = regulator_get_optional(mmc_dev(mmc), "vmmc");
++ if (IS_ERR_OR_NULL(host->vmmc)) {
++ if (PTR_ERR(host->vmmc) < 0) {
++ pr_info("%s: no vmmc regulator found\n",
++ mmc_hostname(mmc));
++ host->vmmc = NULL;
++ }
++ }
++
++#ifdef CONFIG_REGULATOR
++ /*
++ * Voltage range check makes sense only if regulator reports
++ * any voltage value.
++ */
++ if (host->vmmc && regulator_get_voltage(host->vmmc) > 0) {
++ ret = regulator_is_supported_voltage(host->vmmc, 2700000,
++ 3600000);
++ if ((ret <= 0) || (!(caps[0] & SDHCI_CAN_VDD_330)))
++ caps[0] &= ~SDHCI_CAN_VDD_330;
++ if ((ret <= 0) || (!(caps[0] & SDHCI_CAN_VDD_300)))
++ caps[0] &= ~SDHCI_CAN_VDD_300;
++ ret = regulator_is_supported_voltage(host->vmmc, 1700000,
++ 1950000);
++ if ((ret <= 0) || (!(caps[0] & SDHCI_CAN_VDD_180)))
++ caps[0] &= ~SDHCI_CAN_VDD_180;
++ }
++#endif /* CONFIG_REGULATOR */
++
++ /*
++ * According to SD Host Controller spec v3.00, if the Host System
++ * can afford more than 150mA, Host Driver should set XPC to 1. Also
++ * the value is meaningful only if Voltage Support in the Capabilities
++ * register is set. The actual current value is 4 times the register
++ * value.
++ */
++ max_current_caps = sdhci_readl(host, SDHCI_MAX_CURRENT);
++ if (!max_current_caps && host->vmmc) {
++ u32 curr = regulator_get_current_limit(host->vmmc);
++ if (curr > 0) {
++
++ /* convert to SDHCI_MAX_CURRENT format */
++ curr = curr/1000; /* convert to mA */
++ curr = curr/SDHCI_MAX_CURRENT_MULTIPLIER;
++
++ curr = min_t(u32, curr, SDHCI_MAX_CURRENT_LIMIT);
++ max_current_caps =
++ (curr << SDHCI_MAX_CURRENT_330_SHIFT) |
++ (curr << SDHCI_MAX_CURRENT_300_SHIFT) |
++ (curr << SDHCI_MAX_CURRENT_180_SHIFT);
++ }
++ }
++
++ if (caps[0] & SDHCI_CAN_VDD_330) {
++ ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34;
++
++ mmc->max_current_330 = ((max_current_caps &
++ SDHCI_MAX_CURRENT_330_MASK) >>
++ SDHCI_MAX_CURRENT_330_SHIFT) *
++ SDHCI_MAX_CURRENT_MULTIPLIER;
++ }
++ if (caps[0] & SDHCI_CAN_VDD_300) {
++ ocr_avail |= MMC_VDD_29_30 | MMC_VDD_30_31;
++
++ mmc->max_current_300 = ((max_current_caps &
++ SDHCI_MAX_CURRENT_300_MASK) >>
++ SDHCI_MAX_CURRENT_300_SHIFT) *
++ SDHCI_MAX_CURRENT_MULTIPLIER;
++ }
++ if (caps[0] & SDHCI_CAN_VDD_180) {
++ ocr_avail |= MMC_VDD_165_195;
++
++ mmc->max_current_180 = ((max_current_caps &
++ SDHCI_MAX_CURRENT_180_MASK) >>
++ SDHCI_MAX_CURRENT_180_SHIFT) *
++ SDHCI_MAX_CURRENT_MULTIPLIER;
++ }
++
++ if (host->ocr_mask)
++ ocr_avail = host->ocr_mask;
++
++ mmc->ocr_avail = ocr_avail;
++ mmc->ocr_avail_sdio = ocr_avail;
++ if (host->ocr_avail_sdio)
++ mmc->ocr_avail_sdio &= host->ocr_avail_sdio;
++ mmc->ocr_avail_sd = ocr_avail;
++ if (host->ocr_avail_sd)
++ mmc->ocr_avail_sd &= host->ocr_avail_sd;
++ else /* normal SD controllers don't support 1.8V */
++ mmc->ocr_avail_sd &= ~MMC_VDD_165_195;
++ mmc->ocr_avail_mmc = ocr_avail;
++ if (host->ocr_avail_mmc)
++ mmc->ocr_avail_mmc &= host->ocr_avail_mmc;
++
++ if (mmc->ocr_avail == 0) {
++ pr_err("%s: Hardware doesn't report any "
++ "support voltages.\n", mmc_hostname(mmc));
++ return -ENODEV;
++ }
++
++ spin_lock_init(&host->lock);
++
++ /*
++ * Maximum number of segments. Depends on if the hardware
++ * can do scatter/gather or not.
++ */
++ if (host->flags & SDHCI_USE_ADMA)
++ mmc->max_segs = 128;
++ else if (host->flags & SDHCI_USE_SDMA)
++ mmc->max_segs = 1;
++ else /* PIO */
++ mmc->max_segs = 128;
++
++ /*
++ * Maximum number of sectors in one transfer. Limited by DMA boundary
++ * size (512KiB).
++ */
++ mmc->max_req_size = 524288;
++
++ /*
++ * Maximum segment size. Could be one segment with the maximum number
++ * of bytes. When doing hardware scatter/gather, each entry cannot
++ * be larger than 64 KiB though.
++ */
++ if (host->flags & SDHCI_USE_ADMA) {
++ if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC)
++ mmc->max_seg_size = 65535;
++ else
++ mmc->max_seg_size = 65536;
++ } else {
++ mmc->max_seg_size = mmc->max_req_size;
++ }
++
++ /*
++ * Maximum block size. This varies from controller to controller and
++ * is specified in the capabilities register.
++ */
++ if (host->quirks & SDHCI_QUIRK_FORCE_BLK_SZ_2048) {
++ mmc->max_blk_size = 2;
++ } else {
++ mmc->max_blk_size = (caps[0] & SDHCI_MAX_BLOCK_MASK) >>
++ SDHCI_MAX_BLOCK_SHIFT;
++ if (mmc->max_blk_size >= 3) {
++ pr_warning("%s: Invalid maximum block size, "
++ "assuming 512 bytes\n", mmc_hostname(mmc));
++ mmc->max_blk_size = 0;
++ }
++ }
++
++ mmc->max_blk_size = 512 << mmc->max_blk_size;
++
++ /*
++ * Maximum block count.
++ */
++ mmc->max_blk_count = (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535;
++
++ /*
++ * Init tasklets.
++ */
++ tasklet_init(&host->finish_tasklet,
++ sdhci_tasklet_finish, (unsigned long)host);
++
++ setup_timer(&host->timer, sdhci_timeout_timer, (unsigned long)host);
++
++ if (host->version >= SDHCI_SPEC_300) {
++ init_waitqueue_head(&host->buf_ready_int);
++
++ /* Initialize re-tuning timer */
++ init_timer(&host->tuning_timer);
++ host->tuning_timer.data = (unsigned long)host;
++ host->tuning_timer.function = sdhci_tuning_timer;
++ }
++
++ sdhci_init(host, 0);
++
++ ret = request_threaded_irq(host->irq, sdhci_irq, sdhci_thread_irq,
++ IRQF_SHARED, mmc_hostname(mmc), host);
++ if (ret) {
++ pr_err("%s: Failed to request IRQ %d: %d\n",
++ mmc_hostname(mmc), host->irq, ret);
++ goto untasklet;
++ }
++
++#ifdef CONFIG_MMC_DEBUG
++ sdhci_dumpregs(host);
++#endif
++
++#ifdef SDHCI_USE_LEDS_CLASS
++ snprintf(host->led_name, sizeof(host->led_name),
++ "%s::", mmc_hostname(mmc));
++ host->led.name = host->led_name;
++ host->led.brightness = LED_OFF;
++ host->led.default_trigger = mmc_hostname(mmc);
++ host->led.brightness_set = sdhci_led_control;
++
++ ret = led_classdev_register(mmc_dev(mmc), &host->led);
++ if (ret) {
++ pr_err("%s: Failed to register LED device: %d\n",
++ mmc_hostname(mmc), ret);
++ goto reset;
++ }
++#endif
++
++ mmiowb();
++
++ mmc_add_host(mmc);
++
++ pr_info("%s: SDHCI controller on %s [%s] using %s\n",
++ mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)),
++ (host->flags & SDHCI_USE_ADMA) ? "ADMA" :
++ (host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO");
++
++ sdhci_enable_card_detection(host);
++
++ return 0;
++
++#ifdef SDHCI_USE_LEDS_CLASS
++reset:
++ sdhci_do_reset(host, SDHCI_RESET_ALL);
++ sdhci_writel(host, 0, SDHCI_INT_ENABLE);
++ sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
++ free_irq(host->irq, host);
++#endif
++untasklet:
++ tasklet_kill(&host->finish_tasklet);
++
++ return ret;
++}
++
++EXPORT_SYMBOL_GPL(sdhci_add_host);
++
++void sdhci_remove_host(struct sdhci_host *host, int dead)
++{
++ unsigned long flags;
++
++ if (dead) {
++ spin_lock_irqsave(&host->lock, flags);
++
++ host->flags |= SDHCI_DEVICE_DEAD;
++
++ if (host->mrq) {
++ pr_err("%s: Controller removed during "
++ " transfer!\n", mmc_hostname(host->mmc));
++
++ host->mrq->cmd->error = -ENOMEDIUM;
++ tasklet_schedule(&host->finish_tasklet);
++ }
++
++ spin_unlock_irqrestore(&host->lock, flags);
++ }
++
++ sdhci_disable_card_detection(host);
++
++ mmc_remove_host(host->mmc);
++
++#ifdef SDHCI_USE_LEDS_CLASS
++ led_classdev_unregister(&host->led);
++#endif
++
++ if (!dead)
++ sdhci_do_reset(host, SDHCI_RESET_ALL);
++
++ sdhci_writel(host, 0, SDHCI_INT_ENABLE);
++ sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
++ free_irq(host->irq, host);
++
++ del_timer_sync(&host->timer);
++
++ tasklet_kill(&host->finish_tasklet);
++
++ if (host->vmmc) {
++ regulator_disable(host->vmmc);
++ regulator_put(host->vmmc);
++ }
++
++ if (host->vqmmc) {
++ regulator_disable(host->vqmmc);
++ regulator_put(host->vqmmc);
++ }
++
++ if (host->adma_desc)
++ dma_free_coherent(mmc_dev(host->mmc), ADMA_SIZE,
++ host->adma_desc, host->adma_addr);
++ kfree(host->align_buffer);
++
++ host->adma_desc = NULL;
++ host->align_buffer = NULL;
++}
++
++EXPORT_SYMBOL_GPL(sdhci_remove_host);
++
++void sdhci_free_host(struct sdhci_host *host)
++{
++ mmc_free_host(host->mmc);
++}
++
++EXPORT_SYMBOL_GPL(sdhci_free_host);
++
++/*****************************************************************************\
++ * *
++ * Driver init/exit *
++ * *
++\*****************************************************************************/
++
++static int __init sdhci_drv_init(void)
++{
++ pr_info(DRIVER_NAME
++ ": Secure Digital Host Controller Interface driver\n");
++ pr_info(DRIVER_NAME ": Copyright(c) Pierre Ossman\n");
++
++ return 0;
++}
++
++static void __exit sdhci_drv_exit(void)
++{
++}
++
++module_init(sdhci_drv_init);
++module_exit(sdhci_drv_exit);
++
++module_param(debug_quirks, uint, 0444);
++module_param(debug_quirks2, uint, 0444);
++
++MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>");
++MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver");
++MODULE_LICENSE("GPL");
++
++MODULE_PARM_DESC(debug_quirks, "Force certain quirks.");
++MODULE_PARM_DESC(debug_quirks2, "Force certain other quirks.");
+diff -Nur linux-3.14.36/drivers/mmc/host/sdhci-dove.c linux-openelec/drivers/mmc/host/sdhci-dove.c
+--- linux-3.14.36/drivers/mmc/host/sdhci-dove.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/mmc/host/sdhci-dove.c 2015-05-06 12:05:42.000000000 -0500
+@@ -86,6 +86,10 @@
+ static const struct sdhci_ops sdhci_dove_ops = {
+ .read_w = sdhci_dove_readw,
+ .read_l = sdhci_dove_readl,
++ .set_clock = sdhci_set_clock,
++ .set_bus_width = sdhci_set_bus_width,
++ .reset = sdhci_reset,
++ .set_uhs_signaling = sdhci_set_uhs_signaling,
+ };
+
+ static const struct sdhci_pltfm_data sdhci_dove_pdata = {
+diff -Nur linux-3.14.36/drivers/mmc/host/sdhci-esdhc.h linux-openelec/drivers/mmc/host/sdhci-esdhc.h
+--- linux-3.14.36/drivers/mmc/host/sdhci-esdhc.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/mmc/host/sdhci-esdhc.h 2015-05-06 12:05:42.000000000 -0500
+@@ -20,12 +20,11 @@
+
+ #define ESDHC_DEFAULT_QUIRKS (SDHCI_QUIRK_FORCE_BLK_SZ_2048 | \
+ SDHCI_QUIRK_NO_BUSY_IRQ | \
+- SDHCI_QUIRK_NONSTANDARD_CLOCK | \
+ SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | \
+- SDHCI_QUIRK_PIO_NEEDS_DELAY | \
+- SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET)
++ SDHCI_QUIRK_PIO_NEEDS_DELAY)
+
+ #define ESDHC_SYSTEM_CONTROL 0x2c
++#define ESDHC_SYS_CTRL_RSTA (1 << 24)
+ #define ESDHC_CLOCK_MASK 0x0000fff0
+ #define ESDHC_PREDIV_SHIFT 8
+ #define ESDHC_DIVIDER_SHIFT 4
+diff -Nur linux-3.14.36/drivers/mmc/host/sdhci-esdhc-imx.c linux-openelec/drivers/mmc/host/sdhci-esdhc-imx.c
+--- linux-3.14.36/drivers/mmc/host/sdhci-esdhc-imx.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/mmc/host/sdhci-esdhc-imx.c 2015-05-06 12:05:42.000000000 -0500
+@@ -11,6 +11,7 @@
+ * the Free Software Foundation; either version 2 of the License.
+ */
+
++#include <linux/busfreq-imx6.h>
+ #include <linux/io.h>
+ #include <linux/delay.h>
+ #include <linux/err.h>
+@@ -114,6 +115,10 @@
+ #define ESDHC_FLAG_STD_TUNING BIT(5)
+ /* The IP has SDHCI_CAPABILITIES_1 register */
+ #define ESDHC_FLAG_HAVE_CAP1 BIT(6)
++/* The IP has errata ERR004536 */
++#define ESDHC_FLAG_ERR004536 BIT(7)
++/* need request bus freq during low power */
++#define ESDHC_FLAG_BUSFREQ BIT(8)
+
+ struct esdhc_soc_data {
+ u32 flags;
+@@ -141,7 +146,8 @@
+
+ static struct esdhc_soc_data usdhc_imx6sl_data = {
+ .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING
+- | ESDHC_FLAG_HAVE_CAP1,
++ | ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_ERR004536
++ | ESDHC_FLAG_BUSFREQ,
+ };
+
+ struct pltfm_imx_data {
+@@ -160,7 +166,6 @@
+ MULTIBLK_IN_PROCESS, /* exact multiblock cmd in process */
+ WAIT_FOR_INT, /* sent CMD12, waiting for response INT */
+ } multiblock_status;
+- u32 uhs_mode;
+ u32 is_ddr;
+ };
+
+@@ -382,7 +387,6 @@
+ if (val & ESDHC_MIX_CTRL_SMPCLK_SEL)
+ ret |= SDHCI_CTRL_TUNED_CLK;
+
+- ret |= (imx_data->uhs_mode & SDHCI_CTRL_UHS_MASK);
+ ret &= ~SDHCI_CTRL_PRESET_VAL_ENABLE;
+
+ return ret;
+@@ -429,7 +433,6 @@
+ else
+ new_val &= ~ESDHC_VENDOR_SPEC_VSELECT;
+ writel(new_val, host->ioaddr + ESDHC_VENDOR_SPEC);
+- imx_data->uhs_mode = val & SDHCI_CTRL_UHS_MASK;
+ if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING) {
+ new_val = readl(host->ioaddr + ESDHC_MIX_CTRL);
+ if (val & SDHCI_CTRL_TUNED_CLK)
+@@ -600,12 +603,14 @@
+ u32 temp, val;
+
+ if (clock == 0) {
++ host->mmc->actual_clock = 0;
++
+ if (esdhc_is_usdhc(imx_data)) {
+ val = readl(host->ioaddr + ESDHC_VENDOR_SPEC);
+ writel(val & ~ESDHC_VENDOR_SPEC_FRC_SDCLK_ON,
+ host->ioaddr + ESDHC_VENDOR_SPEC);
+ }
+- goto out;
++ return;
+ }
+
+ if (esdhc_is_usdhc(imx_data) && !imx_data->is_ddr)
+@@ -645,8 +650,6 @@
+ }
+
+ mdelay(1);
+-out:
+- host->clock = clock;
+ }
+
+ static unsigned int esdhc_pltfm_get_ro(struct sdhci_host *host)
+@@ -668,7 +671,7 @@
+ return -ENOSYS;
+ }
+
+-static int esdhc_pltfm_bus_width(struct sdhci_host *host, int width)
++static void esdhc_pltfm_set_bus_width(struct sdhci_host *host, int width)
+ {
+ u32 ctrl;
+
+@@ -686,17 +689,56 @@
+
+ esdhc_clrset_le(host, ESDHC_CTRL_BUSWIDTH_MASK, ctrl,
+ SDHCI_HOST_CONTROL);
++}
+
+- return 0;
++static void esdhc_tuning_reset(struct sdhci_host *host, u32 rst_bits)
++{
++ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
++ struct pltfm_imx_data *imx_data = pltfm_host->priv;
++ u32 timeout;
++ u32 reg;
++
++ reg = readl(host->ioaddr + ESDHC_SYSTEM_CONTROL);
++ reg |= rst_bits;
++ writel(reg, host->ioaddr + ESDHC_SYSTEM_CONTROL);
++
++ /* Wait for max 100ms */
++ timeout = 100;
++
++ /* hw clears the bit when it's done */
++ while (readl(host->ioaddr + ESDHC_SYSTEM_CONTROL) & rst_bits) {
++ if (timeout == 0) {
++ dev_err(mmc_dev(host->mmc),
++ "Reset never completes!\n");
++ return;
++ }
++ timeout--;
++ mdelay(1);
++ }
++
++ /*
++ * The RSTA, reset all, on usdhc will not clear following regs:
++ * > SDHCI_MIX_CTRL
++ * > SDHCI_TUNE_CTRL_STATUS
++ *
++ * Do it manually here.
++ */
++ if ((rst_bits & ESDHC_SYS_CTRL_RSTA) && is_imx6q_usdhc(imx_data)) {
++ writel(0, host->ioaddr + ESDHC_MIX_CTRL);
++ writel(0, host->ioaddr + ESDHC_TUNE_CTRL_STATUS);
++ /* FIXME: delay for clear tuning status or some cards may not work */
++ mdelay(1);
++ }
+ }
+
+ static void esdhc_prepare_tuning(struct sdhci_host *host, u32 val)
+ {
+ u32 reg;
+
+- /* FIXME: delay a bit for card to be ready for next tuning due to errors */
+- mdelay(1);
++ /* reset controller before tuning or it may fail on some cards */
++ esdhc_tuning_reset(host, ESDHC_SYS_CTRL_RSTA);
+
++ /* This is balanced by the runtime put in sdhci_tasklet_finish */
+ pm_runtime_get_sync(host->mmc->parent);
+ reg = readl(host->ioaddr + ESDHC_MIX_CTRL);
+ reg |= ESDHC_MIX_CTRL_EXE_TUNE | ESDHC_MIX_CTRL_SMPCLK_SEL |
+@@ -713,13 +755,12 @@
+ complete(&mrq->completion);
+ }
+
+-static int esdhc_send_tuning_cmd(struct sdhci_host *host, u32 opcode)
++static int esdhc_send_tuning_cmd(struct sdhci_host *host, u32 opcode,
++ struct scatterlist *sg)
+ {
+ struct mmc_command cmd = {0};
+ struct mmc_request mrq = {NULL};
+ struct mmc_data data = {0};
+- struct scatterlist sg;
+- char tuning_pattern[ESDHC_TUNING_BLOCK_PATTERN_LEN];
+
+ cmd.opcode = opcode;
+ cmd.arg = 0;
+@@ -728,11 +769,9 @@
+ data.blksz = ESDHC_TUNING_BLOCK_PATTERN_LEN;
+ data.blocks = 1;
+ data.flags = MMC_DATA_READ;
+- data.sg = &sg;
++ data.sg = sg;
+ data.sg_len = 1;
+
+- sg_init_one(&sg, tuning_pattern, sizeof(tuning_pattern));
+-
+ mrq.cmd = &cmd;
+ mrq.cmd->mrq = &mrq;
+ mrq.data = &data;
+@@ -742,14 +781,12 @@
+ mrq.done = esdhc_request_done;
+ init_completion(&(mrq.completion));
+
+- disable_irq(host->irq);
+- spin_lock(&host->lock);
++ spin_lock_irq(&host->lock);
+ host->mrq = &mrq;
+
+ sdhci_send_command(host, mrq.cmd);
+
+- spin_unlock(&host->lock);
+- enable_irq(host->irq);
++ spin_unlock_irq(&host->lock);
+
+ wait_for_completion(&mrq.completion);
+
+@@ -772,13 +809,21 @@
+
+ static int esdhc_executing_tuning(struct sdhci_host *host, u32 opcode)
+ {
++ struct scatterlist sg;
++ char *tuning_pattern;
+ int min, max, avg, ret;
+
++ tuning_pattern = kmalloc(ESDHC_TUNING_BLOCK_PATTERN_LEN, GFP_KERNEL);
++ if (!tuning_pattern)
++ return -ENOMEM;
++
++ sg_init_one(&sg, tuning_pattern, ESDHC_TUNING_BLOCK_PATTERN_LEN);
++
+ /* find the mininum delay first which can pass tuning */
+ min = ESDHC_TUNE_CTRL_MIN;
+ while (min < ESDHC_TUNE_CTRL_MAX) {
+ esdhc_prepare_tuning(host, min);
+- if (!esdhc_send_tuning_cmd(host, opcode))
++ if (!esdhc_send_tuning_cmd(host, opcode, &sg))
+ break;
+ min += ESDHC_TUNE_CTRL_STEP;
+ }
+@@ -787,7 +832,7 @@
+ max = min + ESDHC_TUNE_CTRL_STEP;
+ while (max < ESDHC_TUNE_CTRL_MAX) {
+ esdhc_prepare_tuning(host, max);
+- if (esdhc_send_tuning_cmd(host, opcode)) {
++ if (esdhc_send_tuning_cmd(host, opcode, &sg)) {
+ max -= ESDHC_TUNE_CTRL_STEP;
+ break;
+ }
+@@ -797,9 +842,11 @@
+ /* use average delay to get the best timing */
+ avg = (min + max) / 2;
+ esdhc_prepare_tuning(host, avg);
+- ret = esdhc_send_tuning_cmd(host, opcode);
++ ret = esdhc_send_tuning_cmd(host, opcode, &sg);
+ esdhc_post_tuning(host);
+
++ kfree(tuning_pattern);
++
+ dev_dbg(mmc_dev(host->mmc), "tunning %s at 0x%x ret %d\n",
+ ret ? "failed" : "passed", avg, ret);
+
+@@ -837,28 +884,20 @@
+ return pinctrl_select_state(imx_data->pinctrl, pinctrl);
+ }
+
+-static int esdhc_set_uhs_signaling(struct sdhci_host *host, unsigned int uhs)
++static void esdhc_set_uhs_signaling(struct sdhci_host *host, unsigned timing)
+ {
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct pltfm_imx_data *imx_data = pltfm_host->priv;
+ struct esdhc_platform_data *boarddata = &imx_data->boarddata;
+
+- switch (uhs) {
++ switch (timing) {
+ case MMC_TIMING_UHS_SDR12:
+- imx_data->uhs_mode = SDHCI_CTRL_UHS_SDR12;
+- break;
+ case MMC_TIMING_UHS_SDR25:
+- imx_data->uhs_mode = SDHCI_CTRL_UHS_SDR25;
+- break;
+ case MMC_TIMING_UHS_SDR50:
+- imx_data->uhs_mode = SDHCI_CTRL_UHS_SDR50;
+- break;
+ case MMC_TIMING_UHS_SDR104:
+ case MMC_TIMING_MMC_HS200:
+- imx_data->uhs_mode = SDHCI_CTRL_UHS_SDR104;
+ break;
+ case MMC_TIMING_UHS_DDR50:
+- imx_data->uhs_mode = SDHCI_CTRL_UHS_DDR50;
+ writel(readl(host->ioaddr + ESDHC_MIX_CTRL) |
+ ESDHC_MIX_CTRL_DDREN,
+ host->ioaddr + ESDHC_MIX_CTRL);
+@@ -875,7 +914,20 @@
+ break;
+ }
+
+- return esdhc_change_pinstate(host, uhs);
++ esdhc_change_pinstate(host, timing);
++}
++
++static void esdhc_reset(struct sdhci_host *host, u8 mask)
++{
++ sdhci_reset(host, mask);
++
++ sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
++ sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
++}
++
++static unsigned int esdhc_get_max_timeout_counter(struct sdhci_host *host)
++{
++ return 1 << 28;
+ }
+
+ static struct sdhci_ops sdhci_esdhc_ops = {
+@@ -888,8 +940,9 @@
+ .get_max_clock = esdhc_pltfm_get_max_clock,
+ .get_min_clock = esdhc_pltfm_get_min_clock,
+ .get_ro = esdhc_pltfm_get_ro,
+- .platform_bus_width = esdhc_pltfm_bus_width,
++ .set_bus_width = esdhc_pltfm_set_bus_width,
+ .set_uhs_signaling = esdhc_set_uhs_signaling,
++ .reset = esdhc_reset,
+ };
+
+ static const struct sdhci_pltfm_data sdhci_esdhc_imx_pdata = {
+@@ -906,6 +959,7 @@
+ struct esdhc_platform_data *boarddata)
+ {
+ struct device_node *np = pdev->dev.of_node;
++ struct sdhci_host *host = platform_get_drvdata(pdev);
+
+ if (!np)
+ return -ENODEV;
+@@ -939,6 +993,12 @@
+ if (of_property_read_u32(np, "fsl,delay-line", &boarddata->delay_line))
+ boarddata->delay_line = 0;
+
++ if (of_find_property(np, "keep-power-in-suspend", NULL))
++ host->mmc->pm_caps |= MMC_PM_KEEP_POWER;
++
++ if (of_find_property(np, "enable-sdio-wakeup", NULL))
++ host->mmc->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
++
+ return 0;
+ }
+ #else
+@@ -994,6 +1054,9 @@
+ goto free_sdhci;
+ }
+
++ if (imx_data->socdata->flags & ESDHC_FLAG_BUSFREQ)
++ request_bus_freq(BUS_FREQ_HIGH);
++
+ pltfm_host->clk = imx_data->clk_per;
+ pltfm_host->clock = clk_get_rate(pltfm_host->clk);
+ clk_prepare_enable(imx_data->clk_per);
+@@ -1027,8 +1090,17 @@
+ */
+ if (esdhc_is_usdhc(imx_data)) {
+ writel(0x08100810, host->ioaddr + ESDHC_WTMK_LVL);
+- host->quirks2 |= SDHCI_QUIRK2_PRESET_VALUE_BROKEN;
++ host->quirks2 |= SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
++ SDHCI_QUIRK2_NOSTD_TIMEOUT_COUNTER;
+ host->mmc->caps |= MMC_CAP_1_8V_DDR;
++
++ /*
++ * errata ESDHC_FLAG_ERR004536 fix for MX6Q TO1.2 and MX6DL
++ * TO1.1, it's harmless for MX6SL
++ */
++ writel(readl(host->ioaddr + 0x6c) | BIT(7), host->ioaddr + 0x6c);
++ sdhci_esdhc_ops.get_max_timeout_counter =
++ esdhc_get_max_timeout_counter;
+ }
+
+ if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING)
+@@ -1040,6 +1112,9 @@
+ ESDHC_STD_TUNING_EN | ESDHC_TUNING_START_TAP,
+ host->ioaddr + ESDHC_TUNING_CTRL);
+
++ if (imx_data->socdata->flags & ESDHC_FLAG_ERR004536)
++ host->quirks |= SDHCI_QUIRK_BROKEN_ADMA;
++
+ boarddata = &imx_data->boarddata;
+ if (sdhci_esdhc_imx_probe_dt(pdev, boarddata) < 0) {
+ if (!host->mmc->parent->platform_data) {
+@@ -1116,6 +1191,10 @@
+ host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V;
+ }
+
++ if (host->mmc->pm_caps & MMC_PM_KEEP_POWER &&
++ host->mmc->pm_caps & MMC_PM_WAKE_SDIO_IRQ)
++ device_init_wakeup(&pdev->dev, 1);
++
+ err = sdhci_add_host(host);
+ if (err)
+ goto disable_clk;
+@@ -1132,6 +1211,8 @@
+ clk_disable_unprepare(imx_data->clk_per);
+ clk_disable_unprepare(imx_data->clk_ipg);
+ clk_disable_unprepare(imx_data->clk_ahb);
++ if (imx_data->socdata->flags & ESDHC_FLAG_BUSFREQ)
++ release_bus_freq(BUS_FREQ_HIGH);
+ free_sdhci:
+ sdhci_pltfm_free(pdev);
+ return err;
+@@ -1170,10 +1251,15 @@
+
+ ret = sdhci_runtime_suspend_host(host);
+
+- clk_disable_unprepare(imx_data->clk_per);
+- clk_disable_unprepare(imx_data->clk_ipg);
++ if (!sdhci_sdio_irq_enabled(host)) {
++ clk_disable_unprepare(imx_data->clk_per);
++ clk_disable_unprepare(imx_data->clk_ipg);
++ }
+ clk_disable_unprepare(imx_data->clk_ahb);
+
++ if (imx_data->socdata->flags & ESDHC_FLAG_BUSFREQ)
++ release_bus_freq(BUS_FREQ_HIGH);
++
+ return ret;
+ }
+
+@@ -1183,8 +1269,10 @@
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct pltfm_imx_data *imx_data = pltfm_host->priv;
+
+- clk_prepare_enable(imx_data->clk_per);
+- clk_prepare_enable(imx_data->clk_ipg);
++ if (!sdhci_sdio_irq_enabled(host)) {
++ clk_prepare_enable(imx_data->clk_per);
++ clk_prepare_enable(imx_data->clk_ipg);
++ }
+ clk_prepare_enable(imx_data->clk_ahb);
+
+ return sdhci_runtime_resume_host(host);
+diff -Nur linux-3.14.36/drivers/mmc/host/sdhci.h linux-openelec/drivers/mmc/host/sdhci.h
+--- linux-3.14.36/drivers/mmc/host/sdhci.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/mmc/host/sdhci.h 2015-05-06 12:05:42.000000000 -0500
+@@ -281,18 +281,15 @@
+ unsigned int (*get_max_clock)(struct sdhci_host *host);
+ unsigned int (*get_min_clock)(struct sdhci_host *host);
+ unsigned int (*get_timeout_clock)(struct sdhci_host *host);
+- int (*platform_bus_width)(struct sdhci_host *host,
+- int width);
++ unsigned int (*get_max_timeout_counter)(struct sdhci_host *host);
++ void (*set_bus_width)(struct sdhci_host *host, int width);
+ void (*platform_send_init_74_clocks)(struct sdhci_host *host,
+ u8 power_mode);
+ unsigned int (*get_ro)(struct sdhci_host *host);
+- void (*platform_reset_enter)(struct sdhci_host *host, u8 mask);
+- void (*platform_reset_exit)(struct sdhci_host *host, u8 mask);
++ void (*reset)(struct sdhci_host *host, u8 mask);
+ int (*platform_execute_tuning)(struct sdhci_host *host, u32 opcode);
+- int (*set_uhs_signaling)(struct sdhci_host *host, unsigned int uhs);
++ void (*set_uhs_signaling)(struct sdhci_host *host, unsigned int uhs);
+ void (*hw_reset)(struct sdhci_host *host);
+- void (*platform_suspend)(struct sdhci_host *host);
+- void (*platform_resume)(struct sdhci_host *host);
+ void (*adma_workaround)(struct sdhci_host *host, u32 intmask);
+ void (*platform_init)(struct sdhci_host *host);
+ void (*card_event)(struct sdhci_host *host);
+@@ -397,6 +394,16 @@
+ extern void sdhci_send_command(struct sdhci_host *host,
+ struct mmc_command *cmd);
+
++static inline bool sdhci_sdio_irq_enabled(struct sdhci_host *host)
++{
++ return !!(host->flags & SDHCI_SDIO_IRQ_ENABLED);
++}
++
++void sdhci_set_clock(struct sdhci_host *host, unsigned int clock);
++void sdhci_set_bus_width(struct sdhci_host *host, int width);
++void sdhci_reset(struct sdhci_host *host, u8 mask);
++void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing);
++
+ #ifdef CONFIG_PM
+ extern int sdhci_suspend_host(struct sdhci_host *host);
+ extern int sdhci_resume_host(struct sdhci_host *host);
+diff -Nur linux-3.14.36/drivers/mmc/host/sdhci-of-arasan.c linux-openelec/drivers/mmc/host/sdhci-of-arasan.c
+--- linux-3.14.36/drivers/mmc/host/sdhci-of-arasan.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/mmc/host/sdhci-of-arasan.c 2015-05-06 12:05:42.000000000 -0500
+@@ -52,8 +52,12 @@
+ }
+
+ static struct sdhci_ops sdhci_arasan_ops = {
++ .set_clock = sdhci_set_clock,
+ .get_max_clock = sdhci_pltfm_clk_get_max_clock,
+ .get_timeout_clock = sdhci_arasan_get_timeout_clock,
++ .set_bus_width = sdhci_set_bus_width,
++ .reset = sdhci_reset,
++ .set_uhs_signaling = sdhci_set_uhs_signaling,
+ };
+
+ static struct sdhci_pltfm_data sdhci_arasan_pdata = {
+diff -Nur linux-3.14.36/drivers/mmc/host/sdhci-of-esdhc.c linux-openelec/drivers/mmc/host/sdhci-of-esdhc.c
+--- linux-3.14.36/drivers/mmc/host/sdhci-of-esdhc.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/mmc/host/sdhci-of-esdhc.c 2015-05-06 12:05:42.000000000 -0500
+@@ -199,13 +199,14 @@
+
+ static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock)
+ {
+-
+ int pre_div = 2;
+ int div = 1;
+ u32 temp;
+
++ host->mmc->actual_clock = 0;
++
+ if (clock == 0)
+- goto out;
++ return;
+
+ /* Workaround to reduce the clock frequency for p1010 esdhc */
+ if (of_find_compatible_node(NULL, NULL, "fsl,p1010-esdhc")) {
+@@ -238,24 +239,8 @@
+ | (pre_div << ESDHC_PREDIV_SHIFT));
+ sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
+ mdelay(1);
+-out:
+- host->clock = clock;
+ }
+
+-#ifdef CONFIG_PM
+-static u32 esdhc_proctl;
+-static void esdhc_of_suspend(struct sdhci_host *host)
+-{
+- esdhc_proctl = sdhci_be32bs_readl(host, SDHCI_HOST_CONTROL);
+-}
+-
+-static void esdhc_of_resume(struct sdhci_host *host)
+-{
+- esdhc_of_enable_dma(host);
+- sdhci_be32bs_writel(host, esdhc_proctl, SDHCI_HOST_CONTROL);
+-}
+-#endif
+-
+ static void esdhc_of_platform_init(struct sdhci_host *host)
+ {
+ u32 vvn;
+@@ -269,7 +254,7 @@
+ host->quirks &= ~SDHCI_QUIRK_NO_BUSY_IRQ;
+ }
+
+-static int esdhc_pltfm_bus_width(struct sdhci_host *host, int width)
++static void esdhc_pltfm_set_bus_width(struct sdhci_host *host, int width)
+ {
+ u32 ctrl;
+
+@@ -289,8 +274,6 @@
+
+ clrsetbits_be32(host->ioaddr + SDHCI_HOST_CONTROL,
+ ESDHC_CTRL_BUSWIDTH_MASK, ctrl);
+-
+- return 0;
+ }
+
+ static const struct sdhci_ops sdhci_esdhc_ops = {
+@@ -305,13 +288,46 @@
+ .get_max_clock = esdhc_of_get_max_clock,
+ .get_min_clock = esdhc_of_get_min_clock,
+ .platform_init = esdhc_of_platform_init,
+-#ifdef CONFIG_PM
+- .platform_suspend = esdhc_of_suspend,
+- .platform_resume = esdhc_of_resume,
+-#endif
+ .adma_workaround = esdhci_of_adma_workaround,
+- .platform_bus_width = esdhc_pltfm_bus_width,
++ .set_bus_width = esdhc_pltfm_set_bus_width,
++ .reset = sdhci_reset,
++ .set_uhs_signaling = sdhci_set_uhs_signaling,
++};
++
++#ifdef CONFIG_PM
++
++static u32 esdhc_proctl;
++static int esdhc_of_suspend(struct device *dev)
++{
++ struct sdhci_host *host = dev_get_drvdata(dev);
++
++ esdhc_proctl = sdhci_be32bs_readl(host, SDHCI_HOST_CONTROL);
++
++ return sdhci_suspend_host(host);
++}
++
++static void esdhc_of_resume(device *dev)
++{
++ struct sdhci_host *host = dev_get_drvdata(dev);
++ int ret = sdhci_resume_host(host);
++
++ if (ret == 0) {
++ /* Isn't this already done by sdhci_resume_host() ? --rmk */
++ esdhc_of_enable_dma(host);
++ sdhci_be32bs_writel(host, esdhc_proctl, SDHCI_HOST_CONTROL);
++ }
++
++ return ret;
++}
++
++static const struct dev_pm_ops esdhc_pmops = {
++ .suspend = esdhci_of_suspend,
++ .resume = esdhci_of_resume,
+ };
++#define ESDHC_PMOPS (&esdhc_pmops)
++#else
++#define ESDHC_PMOPS NULL
++#endif
+
+ static const struct sdhci_pltfm_data sdhci_esdhc_pdata = {
+ /*
+@@ -374,7 +390,7 @@
+ .name = "sdhci-esdhc",
+ .owner = THIS_MODULE,
+ .of_match_table = sdhci_esdhc_of_match,
+- .pm = SDHCI_PLTFM_PMOPS,
++ .pm = ESDHC_PMOPS,
+ },
+ .probe = sdhci_esdhc_probe,
+ .remove = sdhci_esdhc_remove,
+diff -Nur linux-3.14.36/drivers/mmc/host/sdhci-of-hlwd.c linux-openelec/drivers/mmc/host/sdhci-of-hlwd.c
+--- linux-3.14.36/drivers/mmc/host/sdhci-of-hlwd.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/mmc/host/sdhci-of-hlwd.c 2015-05-06 12:05:42.000000000 -0500
+@@ -58,6 +58,10 @@
+ .write_l = sdhci_hlwd_writel,
+ .write_w = sdhci_hlwd_writew,
+ .write_b = sdhci_hlwd_writeb,
++ .set_clock = sdhci_set_clock,
++ .set_bus_width = sdhci_set_bus_width,
++ .reset = sdhci_reset,
++ .set_uhs_signaling = sdhci_set_uhs_signaling,
+ };
+
+ static const struct sdhci_pltfm_data sdhci_hlwd_pdata = {
+diff -Nur linux-3.14.36/drivers/mmc/host/sdhci-pci.c linux-openelec/drivers/mmc/host/sdhci-pci.c
+--- linux-3.14.36/drivers/mmc/host/sdhci-pci.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/mmc/host/sdhci-pci.c 2015-07-24 18:03:28.796842002 -0500
+@@ -1023,7 +1023,7 @@
+ return 0;
+ }
+
+-static int sdhci_pci_bus_width(struct sdhci_host *host, int width)
++static void sdhci_pci_set_bus_width(struct sdhci_host *host, int width)
+ {
+ u8 ctrl;
+
+@@ -1044,8 +1044,6 @@
+ }
+
+ sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
+-
+- return 0;
+ }
+
+ static void sdhci_pci_gpio_hw_reset(struct sdhci_host *host)
+@@ -1072,8 +1070,11 @@
+ }
+
+ static const struct sdhci_ops sdhci_pci_ops = {
++ .set_clock = sdhci_set_clock,
+ .enable_dma = sdhci_pci_enable_dma,
+- .platform_bus_width = sdhci_pci_bus_width,
++ .set_bus_width = sdhci_pci_set_bus_width,
++ .reset = sdhci_reset,
++ .set_uhs_signaling = sdhci_set_uhs_signaling,
+ .hw_reset = sdhci_pci_hw_reset,
+ };
+
+diff -Nur linux-3.14.36/drivers/mmc/host/sdhci-pltfm.c linux-openelec/drivers/mmc/host/sdhci-pltfm.c
+--- linux-3.14.36/drivers/mmc/host/sdhci-pltfm.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/mmc/host/sdhci-pltfm.c 2015-05-06 12:05:42.000000000 -0500
+@@ -45,6 +45,10 @@
+ EXPORT_SYMBOL_GPL(sdhci_pltfm_clk_get_max_clock);
+
+ static const struct sdhci_ops sdhci_pltfm_ops = {
++ .set_clock = sdhci_set_clock,
++ .set_bus_width = sdhci_set_bus_width,
++ .reset = sdhci_reset,
++ .set_uhs_signaling = sdhci_set_uhs_signaling,
+ };
+
+ #ifdef CONFIG_OF
+diff -Nur linux-3.14.36/drivers/mmc/host/sdhci-pxav2.c linux-openelec/drivers/mmc/host/sdhci-pxav2.c
+--- linux-3.14.36/drivers/mmc/host/sdhci-pxav2.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/mmc/host/sdhci-pxav2.c 2015-05-06 12:05:42.000000000 -0500
+@@ -51,11 +51,13 @@
+ #define MMC_CARD 0x1000
+ #define MMC_WIDTH 0x0100
+
+-static void pxav2_set_private_registers(struct sdhci_host *host, u8 mask)
++static void pxav2_reset(struct sdhci_host *host, u8 mask)
+ {
+ struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc));
+ struct sdhci_pxa_platdata *pdata = pdev->dev.platform_data;
+
++ sdhci_reset(host, mask);
++
+ if (mask == SDHCI_RESET_ALL) {
+ u16 tmp = 0;
+
+@@ -88,7 +90,7 @@
+ }
+ }
+
+-static int pxav2_mmc_set_width(struct sdhci_host *host, int width)
++static void pxav2_mmc_set_bus_width(struct sdhci_host *host, int width)
+ {
+ u8 ctrl;
+ u16 tmp;
+@@ -107,14 +109,14 @@
+ }
+ writew(tmp, host->ioaddr + SD_CE_ATA_2);
+ writeb(ctrl, host->ioaddr + SDHCI_HOST_CONTROL);
+-
+- return 0;
+ }
+
+ static const struct sdhci_ops pxav2_sdhci_ops = {
++ .set_clock = sdhci_set_clock,
+ .get_max_clock = sdhci_pltfm_clk_get_max_clock,
+- .platform_reset_exit = pxav2_set_private_registers,
+- .platform_bus_width = pxav2_mmc_set_width,
++ .set_bus_width = pxav2_mmc_set_bus_width,
++ .reset = pxav2_reset,
++ .set_uhs_signaling = sdhci_set_uhs_signaling,
+ };
+
+ #ifdef CONFIG_OF
+diff -Nur linux-3.14.36/drivers/mmc/host/sdhci-pxav3.c linux-openelec/drivers/mmc/host/sdhci-pxav3.c
+--- linux-3.14.36/drivers/mmc/host/sdhci-pxav3.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/mmc/host/sdhci-pxav3.c 2015-07-24 18:03:29.688842002 -0500
+@@ -57,11 +57,13 @@
+ #define SDCE_MISC_INT (1<<2)
+ #define SDCE_MISC_INT_EN (1<<1)
+
+-static void pxav3_set_private_registers(struct sdhci_host *host, u8 mask)
++static void pxav3_reset(struct sdhci_host *host, u8 mask)
+ {
+ struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc));
+ struct sdhci_pxa_platdata *pdata = pdev->dev.platform_data;
+
++ sdhci_reset(host, mask);
++
+ if (mask == SDHCI_RESET_ALL) {
+ /*
+ * tune timing of read data/command when crc error happen
+@@ -129,7 +131,7 @@
+ pxa->power_mode = power_mode;
+ }
+
+-static int pxav3_set_uhs_signaling(struct sdhci_host *host, unsigned int uhs)
++static void pxav3_set_uhs_signaling(struct sdhci_host *host, unsigned int uhs)
+ {
+ u16 ctrl_2;
+
+@@ -163,15 +165,16 @@
+ dev_dbg(mmc_dev(host->mmc),
+ "%s uhs = %d, ctrl_2 = %04X\n",
+ __func__, uhs, ctrl_2);
+-
+- return 0;
+ }
+
+ static const struct sdhci_ops pxav3_sdhci_ops = {
+- .platform_reset_exit = pxav3_set_private_registers,
++ .set_clock = sdhci_set_clock,
+ .set_uhs_signaling = pxav3_set_uhs_signaling,
+ .platform_send_init_74_clocks = pxav3_gen_init_74_clocks,
+ .get_max_clock = sdhci_pltfm_clk_get_max_clock,
++ .set_bus_width = sdhci_set_bus_width,
++ .reset = pxav3_reset,
++ .set_uhs_signaling = sdhci_set_uhs_signaling,
+ };
+
+ static struct sdhci_pltfm_data sdhci_pxav3_pdata = {
+diff -Nur linux-3.14.36/drivers/mmc/host/sdhci-pxav3.c.orig linux-openelec/drivers/mmc/host/sdhci-pxav3.c.orig
+--- linux-3.14.36/drivers/mmc/host/sdhci-pxav3.c.orig 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mmc/host/sdhci-pxav3.c.orig 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,446 @@
++/*
++ * Copyright (C) 2010 Marvell International Ltd.
++ * Zhangfei Gao <zhangfei.gao@marvell.com>
++ * Kevin Wang <dwang4@marvell.com>
++ * Mingwei Wang <mwwang@marvell.com>
++ * Philip Rakity <prakity@marvell.com>
++ * Mark Brown <markb@marvell.com>
++ *
++ * This software is licensed under the terms of the GNU General Public
++ * License version 2, as published by the Free Software Foundation, and
++ * may be copied, distributed, and modified under those terms.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ */
++#include <linux/err.h>
++#include <linux/init.h>
++#include <linux/platform_device.h>
++#include <linux/clk.h>
++#include <linux/io.h>
++#include <linux/gpio.h>
++#include <linux/mmc/card.h>
++#include <linux/mmc/host.h>
++#include <linux/mmc/slot-gpio.h>
++#include <linux/platform_data/pxa_sdhci.h>
++#include <linux/slab.h>
++#include <linux/delay.h>
++#include <linux/module.h>
++#include <linux/of.h>
++#include <linux/of_device.h>
++#include <linux/of_gpio.h>
++#include <linux/pm.h>
++#include <linux/pm_runtime.h>
++
++#include "sdhci.h"
++#include "sdhci-pltfm.h"
++
++#define PXAV3_RPM_DELAY_MS 50
++
++#define SD_CLOCK_BURST_SIZE_SETUP 0x10A
++#define SDCLK_SEL 0x100
++#define SDCLK_DELAY_SHIFT 9
++#define SDCLK_DELAY_MASK 0x1f
++
++#define SD_CFG_FIFO_PARAM 0x100
++#define SDCFG_GEN_PAD_CLK_ON (1<<6)
++#define SDCFG_GEN_PAD_CLK_CNT_MASK 0xFF
++#define SDCFG_GEN_PAD_CLK_CNT_SHIFT 24
++
++#define SD_SPI_MODE 0x108
++#define SD_CE_ATA_1 0x10C
++
++#define SD_CE_ATA_2 0x10E
++#define SDCE_MISC_INT (1<<2)
++#define SDCE_MISC_INT_EN (1<<1)
++
++static void pxav3_reset(struct sdhci_host *host, u8 mask)
++{
++ struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc));
++ struct sdhci_pxa_platdata *pdata = pdev->dev.platform_data;
++
++ sdhci_reset(host, mask);
++
++ if (mask == SDHCI_RESET_ALL) {
++ /*
++ * tune timing of read data/command when crc error happen
++ * no performance impact
++ */
++ if (pdata && 0 != pdata->clk_delay_cycles) {
++ u16 tmp;
++
++ tmp = readw(host->ioaddr + SD_CLOCK_BURST_SIZE_SETUP);
++ tmp |= (pdata->clk_delay_cycles & SDCLK_DELAY_MASK)
++ << SDCLK_DELAY_SHIFT;
++ tmp |= SDCLK_SEL;
++ writew(tmp, host->ioaddr + SD_CLOCK_BURST_SIZE_SETUP);
++ }
++ }
++}
++
++#define MAX_WAIT_COUNT 5
++static void pxav3_gen_init_74_clocks(struct sdhci_host *host, u8 power_mode)
++{
++ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
++ struct sdhci_pxa *pxa = pltfm_host->priv;
++ u16 tmp;
++ int count;
++
++ if (pxa->power_mode == MMC_POWER_UP
++ && power_mode == MMC_POWER_ON) {
++
++ dev_dbg(mmc_dev(host->mmc),
++ "%s: slot->power_mode = %d,"
++ "ios->power_mode = %d\n",
++ __func__,
++ pxa->power_mode,
++ power_mode);
++
++ /* set we want notice of when 74 clocks are sent */
++ tmp = readw(host->ioaddr + SD_CE_ATA_2);
++ tmp |= SDCE_MISC_INT_EN;
++ writew(tmp, host->ioaddr + SD_CE_ATA_2);
++
++ /* start sending the 74 clocks */
++ tmp = readw(host->ioaddr + SD_CFG_FIFO_PARAM);
++ tmp |= SDCFG_GEN_PAD_CLK_ON;
++ writew(tmp, host->ioaddr + SD_CFG_FIFO_PARAM);
++
++ /* slowest speed is about 100KHz or 10usec per clock */
++ udelay(740);
++ count = 0;
++
++ while (count++ < MAX_WAIT_COUNT) {
++ if ((readw(host->ioaddr + SD_CE_ATA_2)
++ & SDCE_MISC_INT) == 0)
++ break;
++ udelay(10);
++ }
++
++ if (count == MAX_WAIT_COUNT)
++ dev_warn(mmc_dev(host->mmc), "74 clock interrupt not cleared\n");
++
++ /* clear the interrupt bit if posted */
++ tmp = readw(host->ioaddr + SD_CE_ATA_2);
++ tmp |= SDCE_MISC_INT;
++ writew(tmp, host->ioaddr + SD_CE_ATA_2);
++ }
++ pxa->power_mode = power_mode;
++}
++
++static void pxav3_set_uhs_signaling(struct sdhci_host *host, unsigned int uhs)
++{
++ u16 ctrl_2;
++
++ /*
++ * Set V18_EN -- UHS modes do not work without this.
++ * does not change signaling voltage
++ */
++ ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
++
++ /* Select Bus Speed Mode for host */
++ ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
++ switch (uhs) {
++ case MMC_TIMING_UHS_SDR12:
++ ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
++ break;
++ case MMC_TIMING_UHS_SDR25:
++ ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
++ break;
++ case MMC_TIMING_UHS_SDR50:
++ ctrl_2 |= SDHCI_CTRL_UHS_SDR50 | SDHCI_CTRL_VDD_180;
++ break;
++ case MMC_TIMING_UHS_SDR104:
++ ctrl_2 |= SDHCI_CTRL_UHS_SDR104 | SDHCI_CTRL_VDD_180;
++ break;
++ case MMC_TIMING_UHS_DDR50:
++ ctrl_2 |= SDHCI_CTRL_UHS_DDR50 | SDHCI_CTRL_VDD_180;
++ break;
++ }
++
++ sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
++ dev_dbg(mmc_dev(host->mmc),
++ "%s uhs = %d, ctrl_2 = %04X\n",
++ __func__, uhs, ctrl_2);
++}
++
++static const struct sdhci_ops pxav3_sdhci_ops = {
++ .set_clock = sdhci_set_clock,
++ .set_uhs_signaling = pxav3_set_uhs_signaling,
++ .platform_send_init_74_clocks = pxav3_gen_init_74_clocks,
++ .get_max_clock = sdhci_pltfm_clk_get_max_clock,
++ .set_bus_width = sdhci_set_bus_width,
++ .reset = pxav3_reset,
++ .set_uhs_signaling = sdhci_set_uhs_signaling,
++};
++
++static struct sdhci_pltfm_data sdhci_pxav3_pdata = {
++ .quirks = SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK
++ | SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC
++ | SDHCI_QUIRK_32BIT_ADMA_SIZE
++ | SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
++ .ops = &pxav3_sdhci_ops,
++};
++
++#ifdef CONFIG_OF
++static const struct of_device_id sdhci_pxav3_of_match[] = {
++ {
++ .compatible = "mrvl,pxav3-mmc",
++ },
++ {},
++};
++MODULE_DEVICE_TABLE(of, sdhci_pxav3_of_match);
++
++static struct sdhci_pxa_platdata *pxav3_get_mmc_pdata(struct device *dev)
++{
++ struct sdhci_pxa_platdata *pdata;
++ struct device_node *np = dev->of_node;
++ u32 clk_delay_cycles;
++
++ pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
++ if (!pdata)
++ return NULL;
++
++ of_property_read_u32(np, "mrvl,clk-delay-cycles", &clk_delay_cycles);
++ if (clk_delay_cycles > 0)
++ pdata->clk_delay_cycles = clk_delay_cycles;
++
++ return pdata;
++}
++#else
++static inline struct sdhci_pxa_platdata *pxav3_get_mmc_pdata(struct device *dev)
++{
++ return NULL;
++}
++#endif
++
++static int sdhci_pxav3_probe(struct platform_device *pdev)
++{
++ struct sdhci_pltfm_host *pltfm_host;
++ struct sdhci_pxa_platdata *pdata = pdev->dev.platform_data;
++ struct device *dev = &pdev->dev;
++ struct sdhci_host *host = NULL;
++ struct sdhci_pxa *pxa = NULL;
++ const struct of_device_id *match;
++
++ int ret;
++ struct clk *clk;
++
++ pxa = kzalloc(sizeof(struct sdhci_pxa), GFP_KERNEL);
++ if (!pxa)
++ return -ENOMEM;
++
++ host = sdhci_pltfm_init(pdev, &sdhci_pxav3_pdata, 0);
++ if (IS_ERR(host)) {
++ kfree(pxa);
++ return PTR_ERR(host);
++ }
++ pltfm_host = sdhci_priv(host);
++ pltfm_host->priv = pxa;
++
++ clk = clk_get(dev, NULL);
++ if (IS_ERR(clk)) {
++ dev_err(dev, "failed to get io clock\n");
++ ret = PTR_ERR(clk);
++ goto err_clk_get;
++ }
++ pltfm_host->clk = clk;
++ clk_prepare_enable(clk);
++
++ /* enable 1/8V DDR capable */
++ host->mmc->caps |= MMC_CAP_1_8V_DDR;
++
++ match = of_match_device(of_match_ptr(sdhci_pxav3_of_match), &pdev->dev);
++ if (match) {
++ ret = mmc_of_parse(host->mmc);
++ if (ret)
++ goto err_of_parse;
++ sdhci_get_of_property(pdev);
++ pdata = pxav3_get_mmc_pdata(dev);
++ } else if (pdata) {
++ /* on-chip device */
++ if (pdata->flags & PXA_FLAG_CARD_PERMANENT)
++ host->mmc->caps |= MMC_CAP_NONREMOVABLE;
++
++ /* If slot design supports 8 bit data, indicate this to MMC. */
++ if (pdata->flags & PXA_FLAG_SD_8_BIT_CAPABLE_SLOT)
++ host->mmc->caps |= MMC_CAP_8_BIT_DATA;
++
++ if (pdata->quirks)
++ host->quirks |= pdata->quirks;
++ if (pdata->quirks2)
++ host->quirks2 |= pdata->quirks2;
++ if (pdata->host_caps)
++ host->mmc->caps |= pdata->host_caps;
++ if (pdata->host_caps2)
++ host->mmc->caps2 |= pdata->host_caps2;
++ if (pdata->pm_caps)
++ host->mmc->pm_caps |= pdata->pm_caps;
++
++ if (gpio_is_valid(pdata->ext_cd_gpio)) {
++ ret = mmc_gpio_request_cd(host->mmc, pdata->ext_cd_gpio,
++ 0);
++ if (ret) {
++ dev_err(mmc_dev(host->mmc),
++ "failed to allocate card detect gpio\n");
++ goto err_cd_req;
++ }
++ }
++ }
++
++ pm_runtime_enable(&pdev->dev);
++ pm_runtime_get_sync(&pdev->dev);
++ pm_runtime_set_autosuspend_delay(&pdev->dev, PXAV3_RPM_DELAY_MS);
++ pm_runtime_use_autosuspend(&pdev->dev);
++ pm_suspend_ignore_children(&pdev->dev, 1);
++
++ ret = sdhci_add_host(host);
++ if (ret) {
++ dev_err(&pdev->dev, "failed to add host\n");
++ goto err_add_host;
++ }
++
++ platform_set_drvdata(pdev, host);
++
++ if (host->mmc->pm_caps & MMC_PM_KEEP_POWER) {
++ device_init_wakeup(&pdev->dev, 1);
++ host->mmc->pm_flags |= MMC_PM_WAKE_SDIO_IRQ;
++ } else {
++ device_init_wakeup(&pdev->dev, 0);
++ }
++
++ pm_runtime_put_autosuspend(&pdev->dev);
++
++ return 0;
++
++err_of_parse:
++err_cd_req:
++err_add_host:
++ pm_runtime_put_sync(&pdev->dev);
++ pm_runtime_disable(&pdev->dev);
++ clk_disable_unprepare(clk);
++ clk_put(clk);
++err_clk_get:
++ sdhci_pltfm_free(pdev);
++ kfree(pxa);
++ return ret;
++}
++
++static int sdhci_pxav3_remove(struct platform_device *pdev)
++{
++ struct sdhci_host *host = platform_get_drvdata(pdev);
++ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
++ struct sdhci_pxa *pxa = pltfm_host->priv;
++
++ pm_runtime_get_sync(&pdev->dev);
++ sdhci_remove_host(host, 1);
++ pm_runtime_disable(&pdev->dev);
++
++ clk_disable_unprepare(pltfm_host->clk);
++ clk_put(pltfm_host->clk);
++
++ sdhci_pltfm_free(pdev);
++ kfree(pxa);
++
++ return 0;
++}
++
++#ifdef CONFIG_PM_SLEEP
++static int sdhci_pxav3_suspend(struct device *dev)
++{
++ int ret;
++ struct sdhci_host *host = dev_get_drvdata(dev);
++
++ pm_runtime_get_sync(dev);
++ ret = sdhci_suspend_host(host);
++ pm_runtime_mark_last_busy(dev);
++ pm_runtime_put_autosuspend(dev);
++
++ return ret;
++}
++
++static int sdhci_pxav3_resume(struct device *dev)
++{
++ int ret;
++ struct sdhci_host *host = dev_get_drvdata(dev);
++
++ pm_runtime_get_sync(dev);
++ ret = sdhci_resume_host(host);
++ pm_runtime_mark_last_busy(dev);
++ pm_runtime_put_autosuspend(dev);
++
++ return ret;
++}
++#endif
++
++#ifdef CONFIG_PM_RUNTIME
++static int sdhci_pxav3_runtime_suspend(struct device *dev)
++{
++ struct sdhci_host *host = dev_get_drvdata(dev);
++ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
++ unsigned long flags;
++
++ if (pltfm_host->clk) {
++ spin_lock_irqsave(&host->lock, flags);
++ host->runtime_suspended = true;
++ spin_unlock_irqrestore(&host->lock, flags);
++
++ clk_disable_unprepare(pltfm_host->clk);
++ }
++
++ return 0;
++}
++
++static int sdhci_pxav3_runtime_resume(struct device *dev)
++{
++ struct sdhci_host *host = dev_get_drvdata(dev);
++ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
++ unsigned long flags;
++
++ if (pltfm_host->clk) {
++ clk_prepare_enable(pltfm_host->clk);
++
++ spin_lock_irqsave(&host->lock, flags);
++ host->runtime_suspended = false;
++ spin_unlock_irqrestore(&host->lock, flags);
++ }
++
++ return 0;
++}
++#endif
++
++#ifdef CONFIG_PM
++static const struct dev_pm_ops sdhci_pxav3_pmops = {
++ SET_SYSTEM_SLEEP_PM_OPS(sdhci_pxav3_suspend, sdhci_pxav3_resume)
++ SET_RUNTIME_PM_OPS(sdhci_pxav3_runtime_suspend,
++ sdhci_pxav3_runtime_resume, NULL)
++};
++
++#define SDHCI_PXAV3_PMOPS (&sdhci_pxav3_pmops)
++
++#else
++#define SDHCI_PXAV3_PMOPS NULL
++#endif
++
++static struct platform_driver sdhci_pxav3_driver = {
++ .driver = {
++ .name = "sdhci-pxav3",
++#ifdef CONFIG_OF
++ .of_match_table = sdhci_pxav3_of_match,
++#endif
++ .owner = THIS_MODULE,
++ .pm = SDHCI_PXAV3_PMOPS,
++ },
++ .probe = sdhci_pxav3_probe,
++ .remove = sdhci_pxav3_remove,
++};
++
++module_platform_driver(sdhci_pxav3_driver);
++
++MODULE_DESCRIPTION("SDHCI driver for pxav3");
++MODULE_AUTHOR("Marvell International Ltd.");
++MODULE_LICENSE("GPL v2");
++
+diff -Nur linux-3.14.36/drivers/mmc/host/sdhci-s3c.c linux-openelec/drivers/mmc/host/sdhci-s3c.c
+--- linux-3.14.36/drivers/mmc/host/sdhci-s3c.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/mmc/host/sdhci-s3c.c 2015-05-06 12:05:42.000000000 -0500
+@@ -57,6 +57,8 @@
+
+ struct clk *clk_io;
+ struct clk *clk_bus[MAX_BUS_CLK];
++
++ bool no_divider;
+ };
+
+ /**
+@@ -69,6 +71,7 @@
+ */
+ struct sdhci_s3c_drv_data {
+ unsigned int sdhci_quirks;
++ bool no_divider;
+ };
+
+ static inline struct sdhci_s3c *to_s3c(struct sdhci_host *host)
+@@ -153,7 +156,7 @@
+ * If controller uses a non-standard clock division, find the best clock
+ * speed possible with selected clock source and skip the division.
+ */
+- if (ourhost->host->quirks & SDHCI_QUIRK_NONSTANDARD_CLOCK) {
++ if (ourhost->no_divider) {
+ rate = clk_round_rate(clksrc, wanted);
+ return wanted - rate;
+ }
+@@ -188,9 +191,13 @@
+ int src;
+ u32 ctrl;
+
++ host->mmc->actual_clock = 0;
++
+ /* don't bother if the clock is going off. */
+- if (clock == 0)
++ if (clock == 0) {
++ sdhci_set_clock(host, clock);
+ return;
++ }
+
+ for (src = 0; src < MAX_BUS_CLK; src++) {
+ delta = sdhci_s3c_consider_clock(ourhost, src, clock);
+@@ -240,6 +247,8 @@
+ if (clock < 25 * 1000000)
+ ctrl |= (S3C_SDHCI_CTRL3_FCSEL3 | S3C_SDHCI_CTRL3_FCSEL2);
+ writel(ctrl, host->ioaddr + S3C_SDHCI_CONTROL3);
++
++ sdhci_set_clock(host, clock);
+ }
+
+ /**
+@@ -296,10 +305,11 @@
+ unsigned long timeout;
+ u16 clk = 0;
+
++ host->mmc->actual_clock = 0;
++
+ /* If the clock is going off, set to 0 at clock control register */
+ if (clock == 0) {
+ sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
+- host->clock = clock;
+ return;
+ }
+
+@@ -307,8 +317,6 @@
+
+ clk_set_rate(ourhost->clk_bus[ourhost->cur_clk], clock);
+
+- host->clock = clock;
+-
+ clk = SDHCI_CLOCK_INT_EN;
+ sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
+
+@@ -330,14 +338,14 @@
+ }
+
+ /**
+- * sdhci_s3c_platform_bus_width - support 8bit buswidth
++ * sdhci_s3c_set_bus_width - support 8bit buswidth
+ * @host: The SDHCI host being queried
+ * @width: MMC_BUS_WIDTH_ macro for the bus width being requested
+ *
+ * We have 8-bit width support but is not a v3 controller.
+ * So we add platform_bus_width() and support 8bit width.
+ */
+-static int sdhci_s3c_platform_bus_width(struct sdhci_host *host, int width)
++static void sdhci_s3c_set_bus_width(struct sdhci_host *host, int width)
+ {
+ u8 ctrl;
+
+@@ -359,15 +367,15 @@
+ }
+
+ sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
+-
+- return 0;
+ }
+
+ static struct sdhci_ops sdhci_s3c_ops = {
+ .get_max_clock = sdhci_s3c_get_max_clk,
+ .set_clock = sdhci_s3c_set_clock,
+ .get_min_clock = sdhci_s3c_get_min_clock,
+- .platform_bus_width = sdhci_s3c_platform_bus_width,
++ .set_bus_width = sdhci_s3c_set_bus_width,
++ .reset = sdhci_reset,
++ .set_uhs_signaling = sdhci_set_uhs_signaling,
+ };
+
+ static void sdhci_s3c_notify_change(struct platform_device *dev, int state)
+@@ -617,8 +625,10 @@
+ /* Setup quirks for the controller */
+ host->quirks |= SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC;
+ host->quirks |= SDHCI_QUIRK_NO_HISPD_BIT;
+- if (drv_data)
++ if (drv_data) {
+ host->quirks |= drv_data->sdhci_quirks;
++ sc->no_divider = drv_data->no_divider;
++ }
+
+ #ifndef CONFIG_MMC_SDHCI_S3C_DMA
+
+@@ -667,7 +677,7 @@
+ * If controller does not have internal clock divider,
+ * we can use overriding functions instead of default.
+ */
+- if (host->quirks & SDHCI_QUIRK_NONSTANDARD_CLOCK) {
++ if (sc->no_divider) {
+ sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
+ sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
+ sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
+@@ -813,7 +823,7 @@
+
+ #if defined(CONFIG_CPU_EXYNOS4210) || defined(CONFIG_SOC_EXYNOS4212)
+ static struct sdhci_s3c_drv_data exynos4_sdhci_drv_data = {
+- .sdhci_quirks = SDHCI_QUIRK_NONSTANDARD_CLOCK,
++ .no_divider = true,
+ };
+ #define EXYNOS4_SDHCI_DRV_DATA ((kernel_ulong_t)&exynos4_sdhci_drv_data)
+ #else
+diff -Nur linux-3.14.36/drivers/mmc/host/sdhci-sirf.c linux-openelec/drivers/mmc/host/sdhci-sirf.c
+--- linux-3.14.36/drivers/mmc/host/sdhci-sirf.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/mmc/host/sdhci-sirf.c 2015-05-06 12:05:42.000000000 -0500
+@@ -28,7 +28,11 @@
+ }
+
+ static struct sdhci_ops sdhci_sirf_ops = {
++ .set_clock = sdhci_set_clock,
+ .get_max_clock = sdhci_sirf_get_max_clk,
++ .set_bus_width = sdhci_set_bus_width,
++ .reset = sdhci_reset,
++ .set_uhs_signaling = sdhci_set_uhs_signaling,
+ };
+
+ static struct sdhci_pltfm_data sdhci_sirf_pdata = {
+diff -Nur linux-3.14.36/drivers/mmc/host/sdhci-spear.c linux-openelec/drivers/mmc/host/sdhci-spear.c
+--- linux-3.14.36/drivers/mmc/host/sdhci-spear.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/mmc/host/sdhci-spear.c 2015-05-06 12:05:42.000000000 -0500
+@@ -37,7 +37,10 @@
+
+ /* sdhci ops */
+ static const struct sdhci_ops sdhci_pltfm_ops = {
+- /* Nothing to do for now. */
++ .set_clock = sdhci_set_clock,
++ .set_bus_width = sdhci_set_bus_width,
++ .reset = sdhci_reset,
++ .set_uhs_signaling = sdhci_set_uhs_signaling,
+ };
+
+ /* gpio card detection interrupt handler */
+diff -Nur linux-3.14.36/drivers/mmc/host/sdhci-tegra.c linux-openelec/drivers/mmc/host/sdhci-tegra.c
+--- linux-3.14.36/drivers/mmc/host/sdhci-tegra.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/mmc/host/sdhci-tegra.c 2015-05-06 12:05:42.000000000 -0500
+@@ -48,19 +48,6 @@
+ int power_gpio;
+ };
+
+-static u32 tegra_sdhci_readl(struct sdhci_host *host, int reg)
+-{
+- u32 val;
+-
+- if (unlikely(reg == SDHCI_PRESENT_STATE)) {
+- /* Use wp_gpio here instead? */
+- val = readl(host->ioaddr + reg);
+- return val | SDHCI_WRITE_PROTECT;
+- }
+-
+- return readl(host->ioaddr + reg);
+-}
+-
+ static u16 tegra_sdhci_readw(struct sdhci_host *host, int reg)
+ {
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+@@ -108,12 +95,14 @@
+ return mmc_gpio_get_ro(host->mmc);
+ }
+
+-static void tegra_sdhci_reset_exit(struct sdhci_host *host, u8 mask)
++static void tegra_sdhci_reset(struct sdhci_host *host, u8 mask)
+ {
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_tegra *tegra_host = pltfm_host->priv;
+ const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
+
++ sdhci_reset(host, mask);
++
+ if (!(mask & SDHCI_RESET_ALL))
+ return;
+
+@@ -127,7 +116,7 @@
+ }
+ }
+
+-static int tegra_sdhci_buswidth(struct sdhci_host *host, int bus_width)
++static void tegra_sdhci_set_bus_width(struct sdhci_host *host, int bus_width)
+ {
+ u32 ctrl;
+
+@@ -144,16 +133,16 @@
+ ctrl &= ~SDHCI_CTRL_4BITBUS;
+ }
+ sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
+- return 0;
+ }
+
+ static const struct sdhci_ops tegra_sdhci_ops = {
+ .get_ro = tegra_sdhci_get_ro,
+- .read_l = tegra_sdhci_readl,
+ .read_w = tegra_sdhci_readw,
+ .write_l = tegra_sdhci_writel,
+- .platform_bus_width = tegra_sdhci_buswidth,
+- .platform_reset_exit = tegra_sdhci_reset_exit,
++ .set_clock = sdhci_set_clock,
++ .set_bus_width = tegra_sdhci_set_bus_width,
++ .reset = tegra_sdhci_reset,
++ .set_uhs_signaling = sdhci_set_uhs_signaling,
+ };
+
+ static const struct sdhci_pltfm_data sdhci_tegra20_pdata = {
+diff -Nur linux-3.14.36/drivers/mtd/chips/cfi_cmdset_0002.c linux-openelec/drivers/mtd/chips/cfi_cmdset_0002.c
+--- linux-3.14.36/drivers/mtd/chips/cfi_cmdset_0002.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/mtd/chips/cfi_cmdset_0002.c 2015-05-06 12:05:42.000000000 -0500
+@@ -1058,17 +1058,13 @@
+
+ #define UDELAY(map, chip, adr, usec) \
+ do { \
+- mutex_unlock(&chip->mutex); \
+ cfi_udelay(usec); \
+- mutex_lock(&chip->mutex); \
+ } while (0)
+
+ #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \
+ do { \
+- mutex_unlock(&chip->mutex); \
+ INVALIDATE_CACHED_RANGE(map, adr, len); \
+ cfi_udelay(usec); \
+- mutex_lock(&chip->mutex); \
+ } while (0)
+
+ #endif
+diff -Nur linux-3.14.36/drivers/mtd/ubi/build.c linux-openelec/drivers/mtd/ubi/build.c
+--- linux-3.14.36/drivers/mtd/ubi/build.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/mtd/ubi/build.c 2015-05-06 12:05:42.000000000 -0500
+@@ -640,7 +640,7 @@
+ dbg_gen("sizeof(struct ubi_ainf_peb) %zu", sizeof(struct ubi_ainf_peb));
+ dbg_gen("sizeof(struct ubi_wl_entry) %zu", sizeof(struct ubi_wl_entry));
+
+- if (ubi->mtd->numeraseregions != 0) {
++ if (ubi->mtd->numeraseregions > 1) {
+ /*
+ * Some flashes have several erase regions. Different regions
+ * may have different eraseblock size and other
+diff -Nur linux-3.14.36/drivers/mxc/asrc/Kconfig linux-openelec/drivers/mxc/asrc/Kconfig
+--- linux-3.14.36/drivers/mxc/asrc/Kconfig 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/asrc/Kconfig 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,14 @@
++#
++# ASRC configuration
++#
++
++menu "MXC Asynchronous Sample Rate Converter support"
++
++config MXC_ASRC
++ tristate "ASRC support"
++ depends on SOC_IMX35 || SOC_IMX53 || SOC_IMX6Q
++ select SND_SOC_FSL_ASRC
++ ---help---
++ Say Y to get the ASRC service.
++
++endmenu
+diff -Nur linux-3.14.36/drivers/mxc/asrc/Makefile linux-openelec/drivers/mxc/asrc/Makefile
+--- linux-3.14.36/drivers/mxc/asrc/Makefile 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/asrc/Makefile 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,4 @@
++#
++# Makefile for the kernel Asynchronous Sample Rate Converter driver
++#
++obj-$(CONFIG_MXC_ASRC) += mxc_asrc.o
+diff -Nur linux-3.14.36/drivers/mxc/asrc/mxc_asrc.c linux-openelec/drivers/mxc/asrc/mxc_asrc.c
+--- linux-3.14.36/drivers/mxc/asrc/mxc_asrc.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/asrc/mxc_asrc.c 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,1957 @@
++/*
++ * Freescale Asynchronous Sample Rate Converter (ASRC) driver
++ *
++ * Copyright 2008-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * This file is licensed under the terms of the GNU General Public License
++ * version 2. This program is licensed "as is" without any warranty of any
++ * kind, whether express or implied.
++ */
++
++#include <linux/clk.h>
++#include <linux/slab.h>
++#include <linux/delay.h>
++#include <linux/sched.h>
++#include <linux/regmap.h>
++#include <linux/module.h>
++#include <linux/proc_fs.h>
++#include <linux/pagemap.h>
++#include <linux/interrupt.h>
++#include <linux/miscdevice.h>
++#include <linux/dma-mapping.h>
++#include <linux/of_platform.h>
++#include <linux/platform_data/dma-imx.h>
++
++#include <linux/mxc_asrc.h>
++
++#define ASRC_PROC_PATH "driver/asrc"
++
++#define ASRC_RATIO_DECIMAL_DEPTH 26
++
++#define pair_err(fmt, ...) \
++ dev_err(asrc->dev, "Pair %c: " fmt, 'A' + index, ##__VA_ARGS__)
++
++#define pair_dbg(fmt, ...) \
++ dev_dbg(asrc->dev, "Pair %c: " fmt, 'A' + index, ##__VA_ARGS__)
++
++DEFINE_SPINLOCK(data_lock);
++DEFINE_SPINLOCK(pair_lock);
++
++/* Sample rates are aligned with that defined in pcm.h file */
++static const unsigned char asrc_process_table[][8][2] = {
++ /* 32kHz 44.1kHz 48kHz 64kHz 88.2kHz 96kHz 176kHz 192kHz */
++ {{0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0},}, /* 5512Hz */
++ {{0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0},}, /* 8kHz */
++ {{0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0},}, /* 11025Hz */
++ {{0, 1}, {0, 1}, {0, 1}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0},}, /* 16kHz */
++ {{0, 1}, {0, 1}, {0, 1}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0},}, /* 22050Hz */
++ {{0, 1}, {0, 1}, {0, 1}, {0, 1}, {0, 1}, {0, 0}, {0, 0}, {0, 0},}, /* 32kHz */
++ {{0, 2}, {0, 1}, {0, 1}, {0, 1}, {0, 1}, {0, 1}, {0, 0}, {0, 0},}, /* 44.1kHz */
++ {{0, 2}, {0, 2}, {0, 1}, {0, 1}, {0, 1}, {0, 1}, {0, 0}, {0, 0},}, /* 48kHz */
++ {{1, 2}, {0, 2}, {0, 2}, {0, 1}, {0, 1}, {0, 1}, {0, 1}, {0, 0},}, /* 64kHz */
++ {{1, 2}, {1, 2}, {1, 2}, {1, 1}, {1, 1}, {1, 1}, {1, 1}, {1, 1},}, /* 88.2kHz */
++ {{1, 2}, {1, 2}, {1, 2}, {1, 1}, {1, 1}, {1, 1}, {1, 1}, {1, 1},}, /* 96kHz */
++ {{2, 2}, {2, 2}, {2, 2}, {2, 1}, {2, 1}, {2, 1}, {2, 1}, {2, 1},}, /* 176kHz */
++ {{2, 2}, {2, 2}, {2, 2}, {2, 1}, {2, 1}, {2, 1}, {2, 1}, {2, 1},}, /* 192kHz */
++};
++
++static struct asrc_data *asrc;
++
++/*
++ * The following tables map the relationship between asrc_inclk/asrc_outclk in
++ * mxc_asrc.h and the registers of ASRCSR
++ */
++static unsigned char input_clk_map_v1[] = {
++ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf,
++};
++
++static unsigned char output_clk_map_v1[] = {
++ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf,
++};
++
++/* V2 uses the same map for input and output */
++static unsigned char input_clk_map_v2[] = {
++/* 0x0 0x1 0x2 0x3 0x4 0x5 0x6 0x7 0x8 0x9 0xa 0xb 0xc 0xd 0xe 0xf */
++ 0x0, 0x1, 0x2, 0x7, 0x4, 0x5, 0x6, 0x3, 0x8, 0x9, 0xa, 0xb, 0xc, 0xf, 0xe, 0xd,
++};
++
++static unsigned char output_clk_map_v2[] = {
++/* 0x0 0x1 0x2 0x3 0x4 0x5 0x6 0x7 0x8 0x9 0xa 0xb 0xc 0xd 0xe 0xf */
++ 0x8, 0x9, 0xa, 0x7, 0xc, 0x5, 0x6, 0xb, 0x0, 0x1, 0x2, 0x3, 0x4, 0xf, 0xe, 0xd,
++};
++
++static unsigned char *input_clk_map, *output_clk_map;
++
++enum mxc_asrc_type {
++ IMX35_ASRC,
++ IMX53_ASRC,
++};
++
++static const struct platform_device_id mxc_asrc_devtype[] = {
++ {
++ .name = "imx35-asrc",
++ .driver_data = IMX35_ASRC,
++ }, {
++ .name = "imx53-asrc",
++ .driver_data = IMX53_ASRC,
++ }, {
++ /* sentinel */
++ }
++};
++MODULE_DEVICE_TABLE(platform, mxc_asrc_devtype);
++
++static const struct of_device_id fsl_asrc_ids[] = {
++ {
++ .compatible = "fsl,imx35-asrc",
++ .data = &mxc_asrc_devtype[IMX35_ASRC],
++ }, {
++ .compatible = "fsl,imx53-asrc",
++ .data = &mxc_asrc_devtype[IMX53_ASRC],
++ }, {
++ /* sentinel */
++ }
++};
++MODULE_DEVICE_TABLE(of, fsl_asrc_ids);
++
++
++#ifdef DEBUG
++u32 asrc_reg[] = {
++ REG_ASRCTR,
++ REG_ASRIER,
++ REG_ASRCNCR,
++ REG_ASRCFG,
++ REG_ASRCSR,
++ REG_ASRCDR1,
++ REG_ASRCDR2,
++ REG_ASRSTR,
++ REG_ASRRA,
++ REG_ASRRB,
++ REG_ASRRC,
++ REG_ASRPM1,
++ REG_ASRPM2,
++ REG_ASRPM3,
++ REG_ASRPM4,
++ REG_ASRPM5,
++ REG_ASRTFR1,
++ REG_ASRCCR,
++ REG_ASRIDRHA,
++ REG_ASRIDRLA,
++ REG_ASRIDRHB,
++ REG_ASRIDRLB,
++ REG_ASRIDRHC,
++ REG_ASRIDRLC,
++ REG_ASR76K,
++ REG_ASR56K,
++ REG_ASRMCRA,
++ REG_ASRFSTA,
++ REG_ASRMCRB,
++ REG_ASRFSTB,
++ REG_ASRMCRC,
++ REG_ASRFSTC,
++ REG_ASRMCR1A,
++ REG_ASRMCR1B,
++ REG_ASRMCR1C,
++};
++
++static void dump_regs(void)
++{
++ u32 reg, val;
++ int i;
++
++ for (i = 0; i < ARRAY_SIZE(asrc_reg); i++) {
++ reg = asrc_reg[i];
++ regmap_read(asrc->regmap, reg, &val);
++ dev_dbg(asrc->dev, "REG addr=0x%x val=0x%x\n", reg, val);
++ }
++}
++#else
++static void dump_regs(void) {}
++#endif
++
++/* Only used for Ideal Ratio mode */
++static int asrc_set_clock_ratio(enum asrc_pair_index index,
++ int inrate, int outrate)
++{
++ unsigned long val = 0;
++ int integ, i;
++
++ if (outrate == 0) {
++ dev_err(asrc->dev, "wrong output sample rate: %d\n", outrate);
++ return -EINVAL;
++ }
++
++ /* Formula: r = (1 << ASRC_RATIO_DECIMAL_DEPTH) / outrate * inrate; */
++ for (integ = 0; inrate >= outrate; integ++)
++ inrate -= outrate;
++
++ val |= (integ << ASRC_RATIO_DECIMAL_DEPTH);
++
++ for (i = 1; i <= ASRC_RATIO_DECIMAL_DEPTH; i++) {
++ if ((inrate * 2) >= outrate) {
++ val |= (1 << (ASRC_RATIO_DECIMAL_DEPTH - i));
++ inrate = inrate * 2 - outrate;
++ } else
++ inrate = inrate << 1;
++
++ if (inrate == 0)
++ break;
++ }
++
++ regmap_write(asrc->regmap, REG_ASRIDRL(index), val);
++ regmap_write(asrc->regmap, REG_ASRIDRH(index), (val >> 24));
++
++ return 0;
++}
++
++/* Corresponding to asrc_process_table */
++static int supported_input_rate[] = {
++ 5512, 8000, 11025, 16000, 22050, 32000, 44100, 48000, 64000, 88200,
++ 96000, 176400, 192000,
++};
++
++static int supported_output_rate[] = {
++ 32000, 44100, 48000, 64000, 88200, 96000, 176400, 192000,
++};
++
++static int asrc_set_process_configuration(enum asrc_pair_index index,
++ int inrate, int outrate)
++{
++ int in, out;
++
++ for (in = 0; in < ARRAY_SIZE(supported_input_rate); in++) {
++ if (inrate == supported_input_rate[in])
++ break;
++ }
++
++ if (in == ARRAY_SIZE(supported_input_rate)) {
++ dev_err(asrc->dev, "unsupported input sample rate: %d\n", in);
++ return -EINVAL;
++ }
++
++ for (out = 0; out < ARRAY_SIZE(supported_output_rate); out++) {
++ if (outrate == supported_output_rate[out])
++ break;
++ }
++
++ if (out == ARRAY_SIZE(supported_output_rate)) {
++ dev_err(asrc->dev, "unsupported output sample rate: %d\n", out);
++ return -EINVAL;
++ }
++
++ regmap_update_bits(asrc->regmap, REG_ASRCFG,
++ ASRCFG_PREMODx_MASK(index) | ASRCFG_POSTMODx_MASK(index),
++ ASRCFG_PREMOD(index, asrc_process_table[in][out][0]) |
++ ASRCFG_POSTMOD(index, asrc_process_table[in][out][1]));
++
++ return 0;
++}
++
++static int asrc_get_asrck_clock_divider(int samplerate)
++{
++ unsigned int prescaler, divider, ratio, ra, i;
++ unsigned long bitclk;
++
++ if (samplerate == 0) {
++ dev_err(asrc->dev, "invalid sample rate: %d\n", samplerate);
++ return -EINVAL;
++ }
++
++ bitclk = clk_get_rate(asrc->asrc_clk);
++
++ ra = bitclk / samplerate;
++ ratio = ra;
++
++ /* Calculate the prescaler */
++ for (i = 0; ratio > 8; i++)
++ ratio >>= 1;
++
++ prescaler = i;
++
++ /* Calculate the divider */
++ divider = i ? (((ra + (1 << (i - 1)) - 1) >> i) - 1) : (ra - 1);
++
++ /* The totally divider is (2 ^ prescaler) * divider */
++ return (divider << ASRCDRx_AxCPx_WIDTH) + prescaler;
++}
++
++int asrc_req_pair(int chn_num, enum asrc_pair_index *index)
++{
++ int imax = 0, busy = 0, i, ret = 0;
++ unsigned long lock_flags;
++ struct asrc_pair *pair;
++
++ spin_lock_irqsave(&data_lock, lock_flags);
++
++ for (i = ASRC_PAIR_A; i < ASRC_PAIR_MAX_NUM; i++) {
++ pair = &asrc->asrc_pair[i];
++ if (chn_num > pair->chn_max) {
++ imax++;
++ continue;
++ } else if (pair->active) {
++ busy++;
++ continue;
++ }
++ /* Save the current qualified pair */
++ *index = i;
++
++ /* Check if this pair is a perfect one */
++ if (chn_num == pair->chn_max)
++ break;
++ }
++
++ if (imax == ASRC_PAIR_MAX_NUM) {
++ dev_err(asrc->dev, "no pair could afford required channel number\n");
++ ret = -EINVAL;
++ } else if (busy == ASRC_PAIR_MAX_NUM) {
++ dev_err(asrc->dev, "all pairs are busy now\n");
++ ret = -EBUSY;
++ } else if (busy + imax >= ASRC_PAIR_MAX_NUM) {
++ dev_err(asrc->dev, "all affordable pairs are busy now\n");
++ ret = -EBUSY;
++ } else {
++ pair = &asrc->asrc_pair[*index];
++ pair->chn_num = chn_num;
++ pair->active = 1;
++ }
++
++ spin_unlock_irqrestore(&data_lock, lock_flags);
++
++ if (!ret) {
++ clk_enable(asrc->asrc_clk);
++ clk_prepare_enable(asrc->dma_clk);
++ }
++
++ return ret;
++}
++EXPORT_SYMBOL(asrc_req_pair);
++
++void asrc_release_pair(enum asrc_pair_index index)
++{
++ struct asrc_pair *pair = &asrc->asrc_pair[index];
++ unsigned long lock_flags;
++
++ spin_lock_irqsave(&data_lock, lock_flags);
++
++ pair->active = 0;
++ pair->overload_error = 0;
++
++ spin_unlock_irqrestore(&data_lock, lock_flags);
++
++ /* Disable PAIR */
++ regmap_update_bits(asrc->regmap, REG_ASRCTR, ASRCTR_ASRCEx_MASK(index), 0);
++}
++EXPORT_SYMBOL(asrc_release_pair);
++
++int asrc_config_pair(struct asrc_config *config)
++{
++ u32 inrate = config->input_sample_rate, indiv;
++ u32 outrate = config->output_sample_rate, outdiv;
++ int ret, channels, index = config->pair;
++ unsigned long lock_flags;
++
++ /* Set the channel number */
++ spin_lock_irqsave(&data_lock, lock_flags);
++ asrc->asrc_pair[index].chn_num = config->channel_num;
++ spin_unlock_irqrestore(&data_lock, lock_flags);
++
++ if (asrc->channel_bits > 3)
++ channels = config->channel_num;
++ else
++ channels = (config->channel_num + 1) / 2;
++
++ /* Update channel number of current pair */
++ regmap_update_bits(asrc->regmap, REG_ASRCNCR,
++ ASRCNCR_ANCx_MASK(index, asrc->channel_bits),
++ ASRCNCR_ANCx_set(index, channels, asrc->channel_bits));
++
++ /* Set the clock source */
++ regmap_update_bits(asrc->regmap, REG_ASRCSR,
++ ASRCSR_AICSx_MASK(index) | ASRCSR_AOCSx_MASK(index),
++ ASRCSR_AICS(index, input_clk_map[config->inclk]) |
++ ASRCSR_AOCS(index, output_clk_map[config->outclk]));
++
++ /* Default setting: Automatic selection for processing mode */
++ regmap_update_bits(asrc->regmap, REG_ASRCTR,
++ ASRCTR_ATSx_MASK(index), ASRCTR_ATS(index));
++ regmap_update_bits(asrc->regmap, REG_ASRCTR, ASRCTR_USRx_MASK(index), 0);
++
++ /* Default Input Clock Divider Setting */
++ switch (config->inclk & ASRCSR_AxCSx_MASK) {
++ case INCLK_SPDIF_RX:
++ indiv = ASRC_PRESCALER_SPDIF_RX;
++ break;
++ case INCLK_SPDIF_TX:
++ indiv = ASRC_PRESCALER_SPDIF_TX;
++ break;
++ case INCLK_ASRCK1_CLK:
++ indiv = asrc_get_asrck_clock_divider(inrate);
++ break;
++ default:
++ switch (config->input_word_width) {
++ case ASRC_WIDTH_16_BIT:
++ indiv = ASRC_PRESCALER_I2S_16BIT;
++ break;
++ case ASRC_WIDTH_24_BIT:
++ indiv = ASRC_PRESCALER_I2S_24BIT;
++ break;
++ default:
++ pair_err("unsupported input word width %d\n",
++ config->input_word_width);
++ return -EINVAL;
++ }
++ break;
++ }
++
++ /* Default Output Clock Divider Setting */
++ switch (config->outclk & ASRCSR_AxCSx_MASK) {
++ case OUTCLK_SPDIF_RX:
++ outdiv = ASRC_PRESCALER_SPDIF_RX;
++ break;
++ case OUTCLK_SPDIF_TX:
++ outdiv = ASRC_PRESCALER_SPDIF_TX;
++ break;
++ case OUTCLK_ASRCK1_CLK:
++ if ((config->inclk & ASRCSR_AxCSx_MASK) == INCLK_NONE)
++ outdiv = ASRC_PRESCALER_IDEAL_RATIO;
++ else
++ outdiv = asrc_get_asrck_clock_divider(outrate);
++ break;
++ default:
++ switch (config->output_word_width) {
++ case ASRC_WIDTH_16_BIT:
++ outdiv = ASRC_PRESCALER_I2S_16BIT;
++ break;
++ case ASRC_WIDTH_24_BIT:
++ outdiv = ASRC_PRESCALER_I2S_24BIT;
++ break;
++ default:
++ pair_err("unsupported output word width %d\n",
++ config->input_word_width);
++ return -EINVAL;
++ }
++ break;
++ }
++
++ /* indiv and outdiv'd include prescaler's value, so add its MASK too */
++ regmap_update_bits(asrc->regmap, REG_ASRCDR(index),
++ ASRCDRx_AOCPx_MASK(index) | ASRCDRx_AICPx_MASK(index) |
++ ASRCDRx_AOCDx_MASK(index) | ASRCDRx_AICDx_MASK(index),
++ ASRCDRx_AOCP(index, outdiv) | ASRCDRx_AICP(index, indiv));
++
++ /* Check whether ideal ratio is a must */
++ switch (config->inclk & ASRCSR_AxCSx_MASK) {
++ case INCLK_NONE:
++ /* Clear ASTSx bit to use ideal ratio */
++ regmap_update_bits(asrc->regmap, REG_ASRCTR,
++ ASRCTR_ATSx_MASK(index), 0);
++
++ regmap_update_bits(asrc->regmap, REG_ASRCTR,
++ ASRCTR_IDRx_MASK(index) | ASRCTR_USRx_MASK(index),
++ ASRCTR_IDR(index) | ASRCTR_USR(index));
++
++ ret = asrc_set_clock_ratio(index, inrate, outrate);
++ if (ret)
++ return ret;
++
++ ret = asrc_set_process_configuration(index, inrate, outrate);
++ if (ret)
++ return ret;
++
++ break;
++ case INCLK_ASRCK1_CLK:
++ /* This case and default are both remained for v1 */
++ if (inrate == 44100 || inrate == 88200) {
++ pair_err("unsupported sample rate %d by selected clock\n",
++ inrate);
++ return -EINVAL;
++ }
++ break;
++ default:
++ if ((config->outclk & ASRCSR_AxCSx_MASK) != OUTCLK_ASRCK1_CLK)
++ break;
++
++ if (outrate == 44100 || outrate == 88200) {
++ pair_err("unsupported sample rate %d by selected clock\n",
++ outrate);
++ return -EINVAL;
++ }
++ break;
++ }
++
++ /* Config input and output wordwidth */
++ if (config->output_word_width == ASRC_WIDTH_8_BIT) {
++ pair_err("unsupported wordwidth for output: 8bit\n");
++ pair_err("output only support: 16bit or 24bit\n");
++ return -EINVAL;
++ }
++
++ regmap_update_bits(asrc->regmap, REG_ASRMCR1(index),
++ ASRMCR1x_OW16_MASK | ASRMCR1x_IWD_MASK,
++ ASRMCR1x_OW16(config->output_word_width) |
++ ASRMCR1x_IWD(config->input_word_width));
++
++ /* Enable BUFFER STALL */
++ regmap_update_bits(asrc->regmap, REG_ASRMCR(index),
++ ASRMCRx_BUFSTALLx_MASK, ASRMCRx_BUFSTALLx);
++
++ /* Set Threshold for input and output FIFO */
++ return asrc_set_watermark(index, ASRC_INPUTFIFO_THRESHOLD,
++ ASRC_INPUTFIFO_THRESHOLD);
++}
++EXPORT_SYMBOL(asrc_config_pair);
++
++int asrc_set_watermark(enum asrc_pair_index index, u32 in_wm, u32 out_wm)
++{
++ if (in_wm > ASRC_FIFO_THRESHOLD_MAX || out_wm > ASRC_FIFO_THRESHOLD_MAX) {
++ pair_err("invalid watermark!\n");
++ return -EINVAL;
++ }
++
++ return regmap_update_bits(asrc->regmap, REG_ASRMCR(index),
++ ASRMCRx_EXTTHRSHx_MASK | ASRMCRx_INFIFO_THRESHOLD_MASK |
++ ASRMCRx_OUTFIFO_THRESHOLD_MASK,
++ ASRMCRx_EXTTHRSHx | ASRMCRx_INFIFO_THRESHOLD(in_wm) |
++ ASRMCRx_OUTFIFO_THRESHOLD(out_wm));
++}
++EXPORT_SYMBOL(asrc_set_watermark);
++
++void asrc_start_conv(enum asrc_pair_index index)
++{
++ int reg, retry, channels, i;
++
++ regmap_update_bits(asrc->regmap, REG_ASRCTR,
++ ASRCTR_ASRCEx_MASK(index), ASRCTR_ASRCE(index));
++
++ /* Wait for status of initialization */
++ for (retry = 10, reg = 0; !reg && retry; --retry) {
++ udelay(5);
++ regmap_read(asrc->regmap, REG_ASRCFG, &reg);
++ reg &= ASRCFG_INIRQx_MASK(index);
++ }
++
++ /* Set the input fifo to ASRC STALL level */
++ regmap_read(asrc->regmap, REG_ASRCNCR, &reg);
++ channels = ASRCNCR_ANCx_get(index, reg, asrc->channel_bits);
++ for (i = 0; i < channels * 4; i++)
++ regmap_write(asrc->regmap, REG_ASRDI(index), 0);
++
++ /* Overload Interrupt Enable */
++ regmap_write(asrc->regmap, REG_ASRIER, ASRIER_AOLIE);
++}
++EXPORT_SYMBOL(asrc_start_conv);
++
++void asrc_stop_conv(enum asrc_pair_index index)
++{
++ regmap_update_bits(asrc->regmap, REG_ASRCTR, ASRCTR_ASRCEx_MASK(index), 0);
++}
++EXPORT_SYMBOL(asrc_stop_conv);
++
++void asrc_finish_conv(enum asrc_pair_index index)
++{
++ clk_disable_unprepare(asrc->dma_clk);
++ clk_disable(asrc->asrc_clk);
++}
++EXPORT_SYMBOL(asrc_finish_conv);
++
++#define SET_OVERLOAD_ERR(index, err, msg) \
++ do { \
++ asrc->asrc_pair[index].overload_error |= err; \
++ pair_dbg(msg); \
++ } while (0)
++
++static irqreturn_t asrc_isr(int irq, void *dev_id)
++{
++ enum asrc_pair_index index;
++ u32 status;
++
++ regmap_read(asrc->regmap, REG_ASRSTR, &status);
++
++ for (index = ASRC_PAIR_A; index < ASRC_PAIR_MAX_NUM; index++) {
++ if (asrc->asrc_pair[index].active == 0)
++ continue;
++ if (status & ASRSTR_ATQOL)
++ SET_OVERLOAD_ERR(index, ASRC_TASK_Q_OVERLOAD,
++ "Task Queue FIFO overload");
++ if (status & ASRSTR_AOOL(index))
++ SET_OVERLOAD_ERR(index, ASRC_OUTPUT_TASK_OVERLOAD,
++ "Output Task Overload");
++ if (status & ASRSTR_AIOL(index))
++ SET_OVERLOAD_ERR(index, ASRC_INPUT_TASK_OVERLOAD,
++ "Input Task Overload");
++ if (status & ASRSTR_AODO(index))
++ SET_OVERLOAD_ERR(index, ASRC_OUTPUT_BUFFER_OVERFLOW,
++ "Output Data Buffer has overflowed");
++ if (status & ASRSTR_AIDU(index))
++ SET_OVERLOAD_ERR(index, ASRC_INPUT_BUFFER_UNDERRUN,
++ "Input Data Buffer has underflowed");
++ }
++
++ /* Clean overload error */
++ regmap_write(asrc->regmap, REG_ASRSTR, ASRSTR_AOLE);
++
++ return IRQ_HANDLED;
++}
++
++void asrc_get_status(struct asrc_status_flags *flags)
++{
++ enum asrc_pair_index index = flags->index;
++ unsigned long lock_flags;
++
++ spin_lock_irqsave(&data_lock, lock_flags);
++
++ flags->overload_error = asrc->asrc_pair[index].overload_error;
++
++ spin_unlock_irqrestore(&data_lock, lock_flags);
++}
++EXPORT_SYMBOL(asrc_get_status);
++
++u32 asrc_get_per_addr(enum asrc_pair_index index, bool in)
++{
++ return asrc->paddr + (in ? REG_ASRDI(index) : REG_ASRDO(index));
++}
++EXPORT_SYMBOL(asrc_get_per_addr);
++
++static int mxc_init_asrc(void)
++{
++ /* Halt ASRC internal FP when input FIFO needs data for pair A, B, C */
++ regmap_write(asrc->regmap, REG_ASRCTR, ASRCTR_ASRCEN);
++
++ /* Disable interrupt by default */
++ regmap_write(asrc->regmap, REG_ASRIER, 0x0);
++
++ /* Default 2: 6: 2 channel assignment */
++ regmap_update_bits(asrc->regmap, REG_ASRCNCR,
++ ASRCNCR_ANCx_MASK(ASRC_PAIR_A, asrc->channel_bits),
++ ASRCNCR_ANCx_set(ASRC_PAIR_A, 2, asrc->channel_bits));
++ regmap_update_bits(asrc->regmap, REG_ASRCNCR,
++ ASRCNCR_ANCx_MASK(ASRC_PAIR_B, asrc->channel_bits),
++ ASRCNCR_ANCx_set(ASRC_PAIR_B, 6, asrc->channel_bits));
++ regmap_update_bits(asrc->regmap, REG_ASRCNCR,
++ ASRCNCR_ANCx_MASK(ASRC_PAIR_C, asrc->channel_bits),
++ ASRCNCR_ANCx_set(ASRC_PAIR_C, 2, asrc->channel_bits));
++
++ /* Parameter Registers recommended settings */
++ regmap_write(asrc->regmap, REG_ASRPM1, 0x7fffff);
++ regmap_write(asrc->regmap, REG_ASRPM2, 0x255555);
++ regmap_write(asrc->regmap, REG_ASRPM3, 0xff7280);
++ regmap_write(asrc->regmap, REG_ASRPM4, 0xff7280);
++ regmap_write(asrc->regmap, REG_ASRPM5, 0xff7280);
++
++ /* Base address for task queue FIFO. Set to 0x7C */
++ regmap_update_bits(asrc->regmap, REG_ASRTFR1,
++ ASRTFR1_TF_BASE_MASK, ASRTFR1_TF_BASE(0xfc));
++
++ /* Set the processing clock for 76KHz, 133M */
++ regmap_write(asrc->regmap, REG_ASR76K, 0x06D6);
++
++ /* Set the processing clock for 56KHz, 133M */
++ return regmap_write(asrc->regmap, REG_ASR56K, 0x0947);
++}
++
++#define ASRC_xPUT_DMA_CALLBACK(in) \
++ ((in) ? asrc_input_dma_callback : asrc_output_dma_callback)
++
++static void asrc_input_dma_callback(void *data)
++{
++ struct asrc_pair_params *params = (struct asrc_pair_params *)data;
++
++ dma_unmap_sg(NULL, params->input_sg, params->input_sg_nodes,
++ DMA_MEM_TO_DEV);
++
++ complete(&params->input_complete);
++
++ schedule_work(&params->task_output_work);
++}
++
++static void asrc_output_dma_callback(void *data)
++{
++ struct asrc_pair_params *params = (struct asrc_pair_params *)data;
++
++ dma_unmap_sg(NULL, params->output_sg, params->output_sg_nodes,
++ DMA_DEV_TO_MEM);
++
++ complete(&params->output_complete);
++}
++
++static unsigned int asrc_get_output_FIFO_size(enum asrc_pair_index index)
++{
++ u32 val;
++
++ regmap_read(asrc->regmap, REG_ASRFST(index), &val);
++
++ val &= ASRFSTx_OUTPUT_FIFO_MASK;
++
++ return val >> ASRFSTx_OUTPUT_FIFO_SHIFT;
++}
++
++static u32 asrc_read_one_from_output_FIFO(enum asrc_pair_index index)
++{
++ u32 val;
++
++ regmap_read(asrc->regmap, REG_ASRDO(index), &val);
++
++ return val;
++}
++
++static void asrc_read_output_FIFO(struct asrc_pair_params *params)
++{
++ u32 *reg24 = params->output_last_period.dma_vaddr;
++ u16 *reg16 = params->output_last_period.dma_vaddr;
++ enum asrc_pair_index index = params->index;
++ u32 i, j, reg, size, t_size;
++ bool bit24 = false;
++
++ if (params->output_word_width == ASRC_WIDTH_24_BIT)
++ bit24 = true;
++
++ t_size = 0;
++ do {
++ size = asrc_get_output_FIFO_size(index);
++ for (i = 0; i < size; i++) {
++ for (j = 0; j < params->channel_nums; j++) {
++ reg = asrc_read_one_from_output_FIFO(index);
++ if (bit24) {
++ *(reg24) = reg;
++ reg24++;
++ } else {
++ *(reg16) = (u16)reg;
++ reg16++;
++ }
++ }
++ }
++ t_size += size;
++ } while (size);
++
++ if (t_size > params->last_period_sample)
++ t_size = params->last_period_sample;
++
++ params->output_last_period.length = t_size * params->channel_nums * 2;
++ if (bit24)
++ params->output_last_period.length *= 2;
++}
++
++static void asrc_output_task_worker(struct work_struct *w)
++{
++ struct asrc_pair_params *params =
++ container_of(w, struct asrc_pair_params, task_output_work);
++ enum asrc_pair_index index = params->index;
++ unsigned long lock_flags;
++
++ if (!wait_for_completion_interruptible_timeout(&params->output_complete, HZ / 10)) {
++ pair_err("output dma task timeout\n");
++ return;
++ }
++
++ init_completion(&params->output_complete);
++
++ spin_lock_irqsave(&pair_lock, lock_flags);
++ if (!params->pair_hold) {
++ spin_unlock_irqrestore(&pair_lock, lock_flags);
++ return;
++ }
++ asrc_read_output_FIFO(params);
++ spin_unlock_irqrestore(&pair_lock, lock_flags);
++
++ complete(&params->lastperiod_complete);
++}
++
++static void mxc_free_dma_buf(struct asrc_pair_params *params)
++{
++ if (params->input_dma_total.dma_vaddr != NULL) {
++ kfree(params->input_dma_total.dma_vaddr);
++ params->input_dma_total.dma_vaddr = NULL;
++ }
++
++ if (params->output_dma_total.dma_vaddr != NULL) {
++ kfree(params->output_dma_total.dma_vaddr);
++ params->output_dma_total.dma_vaddr = NULL;
++ }
++
++ if (params->output_last_period.dma_vaddr) {
++ dma_free_coherent(asrc->dev, 1024 * params->last_period_sample,
++ params->output_last_period.dma_vaddr,
++ params->output_last_period.dma_paddr);
++ params->output_last_period.dma_vaddr = NULL;
++ }
++}
++
++static int mxc_allocate_dma_buf(struct asrc_pair_params *params)
++{
++ struct dma_block *input_a, *output_a, *last_period;
++ enum asrc_pair_index index = params->index;
++
++ input_a = &params->input_dma_total;
++ output_a = &params->output_dma_total;
++ last_period = &params->output_last_period;
++
++ input_a->dma_vaddr = kzalloc(input_a->length, GFP_KERNEL);
++ if (!input_a->dma_vaddr) {
++ pair_err("failed to allocate input dma buffer\n");
++ goto exit;
++ }
++ input_a->dma_paddr = virt_to_dma(NULL, input_a->dma_vaddr);
++
++ output_a->dma_vaddr = kzalloc(output_a->length, GFP_KERNEL);
++ if (!output_a->dma_vaddr) {
++ pair_err("failed to allocate output dma buffer\n");
++ goto exit;
++ }
++ output_a->dma_paddr = virt_to_dma(NULL, output_a->dma_vaddr);
++
++ last_period->dma_vaddr = dma_alloc_coherent(asrc->dev,
++ 1024 * params->last_period_sample,
++ &last_period->dma_paddr, GFP_KERNEL);
++ if (!last_period->dma_vaddr) {
++ pair_err("failed to allocate last period buffer\n");
++ goto exit;
++ }
++
++ return 0;
++
++exit:
++ mxc_free_dma_buf(params);
++
++ return -ENOBUFS;
++}
++
++static struct dma_chan *imx_asrc_get_dma_channel(enum asrc_pair_index index, bool in)
++{
++ char name[4];
++
++ sprintf(name, "%cx%c", in ? 'r' : 't', index + 'a');
++
++ return dma_request_slave_channel(asrc->dev, name);
++}
++
++static int imx_asrc_dma_config(struct asrc_pair_params *params,
++ struct dma_chan *chan, u32 dma_addr,
++ void *buf_addr, u32 buf_len, bool in,
++ enum asrc_word_width word_width)
++{
++ enum asrc_pair_index index = params->index;
++ struct dma_async_tx_descriptor *desc;
++ struct dma_slave_config slave_config;
++ enum dma_slave_buswidth buswidth;
++ struct scatterlist *sg;
++ unsigned int sg_nent, i;
++ int ret;
++
++ if (in) {
++ sg = params->input_sg;
++ sg_nent = params->input_sg_nodes;
++ desc = params->desc_in;
++ } else {
++ sg = params->output_sg;
++ sg_nent = params->output_sg_nodes;
++ desc = params->desc_out;
++ }
++
++ switch (word_width) {
++ case ASRC_WIDTH_16_BIT:
++ buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
++ break;
++ case ASRC_WIDTH_24_BIT:
++ buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES;
++ break;
++ default:
++ pair_err("invalid word width\n");
++ return -EINVAL;
++ }
++
++ slave_config.dma_request0 = 0;
++ slave_config.dma_request1 = 0;
++
++ if (in) {
++ slave_config.direction = DMA_MEM_TO_DEV;
++ slave_config.dst_addr = dma_addr;
++ slave_config.dst_addr_width = buswidth;
++ slave_config.dst_maxburst =
++ params->input_wm * params->channel_nums / buswidth;
++ } else {
++ slave_config.direction = DMA_DEV_TO_MEM;
++ slave_config.src_addr = dma_addr;
++ slave_config.src_addr_width = buswidth;
++ slave_config.src_maxburst =
++ params->output_wm * params->channel_nums / buswidth;
++ }
++ ret = dmaengine_slave_config(chan, &slave_config);
++ if (ret) {
++ pair_err("failed to config dmaengine for %sput task: %d\n",
++ in ? "in" : "out", ret);
++ return -EINVAL;
++ }
++
++ sg_init_table(sg, sg_nent);
++ switch (sg_nent) {
++ case 1:
++ sg_init_one(sg, buf_addr, buf_len);
++ break;
++ case 2:
++ case 3:
++ case 4:
++ for (i = 0; i < (sg_nent - 1); i++)
++ sg_set_buf(&sg[i], buf_addr + i * ASRC_MAX_BUFFER_SIZE,
++ ASRC_MAX_BUFFER_SIZE);
++
++ sg_set_buf(&sg[i], buf_addr + i * ASRC_MAX_BUFFER_SIZE,
++ buf_len - ASRC_MAX_BUFFER_SIZE * i);
++ break;
++ default:
++ pair_err("invalid input DMA nodes number: %d\n", sg_nent);
++ return -EINVAL;
++ }
++
++ ret = dma_map_sg(NULL, sg, sg_nent, slave_config.direction);
++ if (ret != sg_nent) {
++ pair_err("failed to map dma sg for %sput task\n",
++ in ? "in" : "out");
++ return -EINVAL;
++ }
++
++ desc = dmaengine_prep_slave_sg(chan, sg, sg_nent,
++ slave_config.direction, DMA_PREP_INTERRUPT);
++ if (!desc) {
++ pair_err("failed to prepare slave sg for %sput task\n",
++ in ? "in" : "out");
++ return -EINVAL;
++ }
++
++ if (in) {
++ params->desc_in = desc;
++ params->desc_in->callback = asrc_input_dma_callback;
++ } else {
++ params->desc_out = desc;
++ params->desc_out->callback = asrc_output_dma_callback;
++ }
++
++ desc->callback = ASRC_xPUT_DMA_CALLBACK(in);
++ desc->callback_param = params;
++
++ return 0;
++}
++
++static int mxc_asrc_prepare_io_buffer(struct asrc_pair_params *params,
++ struct asrc_convert_buffer *pbuf, bool in)
++{
++ enum asrc_pair_index index = params->index;
++ struct dma_chan *dma_channel;
++ enum asrc_word_width width;
++ unsigned int *dma_len, *sg_nodes, buf_len, wm;
++ void __user *buf_vaddr;
++ void *dma_vaddr;
++ u32 word_size, fifo_addr;
++
++ if (in) {
++ dma_channel = params->input_dma_channel;
++ dma_vaddr = params->input_dma_total.dma_vaddr;
++ dma_len = &params->input_dma_total.length;
++ width = params->input_word_width;
++ sg_nodes = &params->input_sg_nodes;
++ wm = params->input_wm;
++ buf_vaddr = (void __user *)pbuf->input_buffer_vaddr;
++ buf_len = pbuf->input_buffer_length;
++ } else {
++ dma_channel = params->output_dma_channel;
++ dma_vaddr = params->output_dma_total.dma_vaddr;
++ dma_len = &params->output_dma_total.length;
++ width = params->output_word_width;
++ sg_nodes = &params->output_sg_nodes;
++ wm = params->last_period_sample;
++ buf_vaddr = (void __user *)pbuf->output_buffer_vaddr;
++ buf_len = pbuf->output_buffer_length;
++ }
++
++ switch (width) {
++ case ASRC_WIDTH_24_BIT:
++ word_size = 4;
++ break;
++ case ASRC_WIDTH_16_BIT:
++ case ASRC_WIDTH_8_BIT:
++ word_size = 2;
++ break;
++ default:
++ pair_err("invalid %sput word size!\n", in ? "in" : "out");
++ return -EINVAL;
++ }
++
++ if (buf_len < word_size * params->channel_nums * wm) {
++ pair_err("%sput buffer size[%d] is too small!\n",
++ in ? "in" : "out", buf_len);
++ return -EINVAL;
++ }
++
++ /* Copy origin data into input buffer */
++ if (in && copy_from_user(dma_vaddr, buf_vaddr, buf_len))
++ return -EFAULT;
++
++ *dma_len = buf_len;
++ if (!in)
++ *dma_len -= wm * word_size * params->channel_nums;
++
++ *sg_nodes = *dma_len / ASRC_MAX_BUFFER_SIZE + 1;
++
++ fifo_addr = asrc_get_per_addr(params->index, in);
++
++ return imx_asrc_dma_config(params, dma_channel, fifo_addr, dma_vaddr,
++ *dma_len, in, width);
++}
++
++static int mxc_asrc_prepare_buffer(struct asrc_pair_params *params,
++ struct asrc_convert_buffer *pbuf)
++{
++ enum asrc_pair_index index = params->index;
++ int ret;
++
++ ret = mxc_asrc_prepare_io_buffer(params, pbuf, true);
++ if (ret) {
++ pair_err("failed to prepare input buffer: %d\n", ret);
++ return ret;
++ }
++
++ ret = mxc_asrc_prepare_io_buffer(params, pbuf, false);
++ if (ret) {
++ pair_err("failed to prepare output buffer: %d\n", ret);
++ return ret;
++ }
++
++ return 0;
++}
++
++int mxc_asrc_process_io_buffer(struct asrc_pair_params *params,
++ struct asrc_convert_buffer *pbuf, bool in)
++{
++ void *last_vaddr = params->output_last_period.dma_vaddr;
++ unsigned int *last_len = &params->output_last_period.length;
++ enum asrc_pair_index index = params->index;
++ unsigned int dma_len, *buf_len;
++ struct completion *complete;
++ void __user *buf_vaddr;
++ void *dma_vaddr;
++
++ if (in) {
++ dma_vaddr = params->input_dma_total.dma_vaddr;
++ dma_len = params->input_dma_total.length;
++ buf_len = &pbuf->input_buffer_length;
++ complete = &params->input_complete;
++ buf_vaddr = (void __user *)pbuf->input_buffer_vaddr;
++ } else {
++ dma_vaddr = params->output_dma_total.dma_vaddr;
++ dma_len = params->output_dma_total.length;
++ buf_len = &pbuf->output_buffer_length;
++ complete = &params->lastperiod_complete;
++ buf_vaddr = (void __user *)pbuf->output_buffer_vaddr;
++ }
++
++ if (!wait_for_completion_interruptible_timeout(complete, 10 * HZ)) {
++ pair_err("%s task timeout\n", in ? "input dma" : "last period");
++ return -ETIME;
++ } else if (signal_pending(current)) {
++ pair_err("%sput task forcibly aborted\n", in ? "in" : "out");
++ return -ERESTARTSYS;
++ }
++
++ init_completion(complete);
++
++ *buf_len = dma_len;
++
++ /* Only output need return data to user space */
++ if (!in) {
++ if (copy_to_user(buf_vaddr, dma_vaddr, dma_len))
++ return -EFAULT;
++
++ *buf_len += *last_len;
++
++ if (copy_to_user(buf_vaddr + dma_len, last_vaddr, *last_len))
++ return -EFAULT;
++ }
++
++ return 0;
++}
++
++int mxc_asrc_process_buffer(struct asrc_pair_params *params,
++ struct asrc_convert_buffer *pbuf)
++{
++ enum asrc_pair_index index = params->index;
++ int ret;
++
++ ret = mxc_asrc_process_io_buffer(params, pbuf, true);
++ if (ret) {
++ pair_err("failed to process input buffer: %d\n", ret);
++ return ret;
++ }
++
++ ret = mxc_asrc_process_io_buffer(params, pbuf, false);
++ if (ret) {
++ pair_err("failed to process output buffer: %d\n", ret);
++ return ret;
++ }
++
++ return 0;
++}
++
++#ifdef ASRC_POLLING_WITHOUT_DMA
++static void asrc_write_one_to_input_FIFO(enum asrc_pair_index index, u32 val)
++{
++ regmap_write(asrc->regmap, REG_ASRDI(index), val);
++}
++
++/* THIS FUNCTION ONLY EXISTS FOR DEBUGGING AND ONLY SUPPORTS TWO CHANNELS */
++static void asrc_polling_debug(struct asrc_pair_params *params)
++{
++ enum asrc_pair_index index = params->index;
++ u32 *in24 = params->input_dma_total.dma_vaddr;
++ u32 dma_len = params->input_dma_total.length / (params->channel_nums * 4);
++ u32 size, i, j, t_size, reg;
++ u32 *reg24 = params->output_dma_total.dma_vaddr;
++
++ t_size = 0;
++
++ for (i = 0; i < dma_len; ) {
++ for (j = 0; j < 2; j++) {
++ asrc_write_one_to_input_FIFO(index, *in24);
++ in24++;
++ asrc_write_one_to_input_FIFO(index, *in24);
++ in24++;
++ i++;
++ }
++ udelay(50);
++ udelay(50 * params->output_sample_rate / params->input_sample_rate);
++
++ size = asrc_get_output_FIFO_size(index);
++ for (j = 0; j < size; j++) {
++ reg = asrc_read_one_from_output_FIFO(index);
++ *(reg24) = reg;
++ reg24++;
++ reg = asrc_read_one_from_output_FIFO(index);
++ *(reg24) = reg;
++ reg24++;
++ }
++ t_size += size;
++ }
++
++ mdelay(1);
++ size = asrc_get_output_FIFO_size(index);
++ for (j = 0; j < size; j++) {
++ reg = asrc_read_one_from_output_FIFO(index);
++ *(reg24) = reg;
++ reg24++;
++ reg = asrc_read_one_from_output_FIFO(index);
++ *(reg24) = reg;
++ reg24++;
++ }
++ t_size += size;
++
++ params->output_dma_total.length = t_size * params->channel_nums * 4;
++ params->output_last_period.length = 0;
++
++ dma_unmap_sg(NULL, params->input_sg, params->input_sg_nodes,
++ DMA_MEM_TO_DEV);
++ dma_unmap_sg(NULL, params->output_sg, params->output_sg_nodes,
++ DMA_DEV_TO_MEM);
++
++ complete(&params->input_complete);
++ complete(&params->lastperiod_complete);
++}
++#else
++static void mxc_asrc_submit_dma(struct asrc_pair_params *params)
++{
++ enum asrc_pair_index index = params->index;
++ u32 size = asrc_get_output_FIFO_size(params->index);
++ int i, j;
++
++ /* Read all data in OUTPUT FIFO */
++ while (size) {
++ for (j = 0; j < size; j++)
++ for (i = 0; i < params->channel_nums; i++)
++ asrc_read_one_from_output_FIFO(index);
++ /* Fetch the data every 100us */
++ udelay(100);
++
++ size = asrc_get_output_FIFO_size(index);
++ }
++
++ /* Submit dma request */
++ dmaengine_submit(params->desc_in);
++ dma_async_issue_pending(params->desc_in->chan);
++
++ dmaengine_submit(params->desc_out);
++ dma_async_issue_pending(params->desc_out->chan);
++
++ /*
++ * Clear dma request during the stall state of ASRC:
++ * During STALL state, the remaining in input fifo would never be
++ * smaller than the input threshold while the output fifo would not
++ * be bigger than output one. Thus the dma request would be cleared.
++ */
++ asrc_set_watermark(index, ASRC_FIFO_THRESHOLD_MIN, ASRC_FIFO_THRESHOLD_MAX);
++
++ /* Update the real input threshold to raise dma request */
++ asrc_set_watermark(index, params->input_wm, params->output_wm);
++}
++#endif
++
++static long asrc_ioctl_req_pair(struct asrc_pair_params *params,
++ void __user *user)
++{
++ struct asrc_req req;
++ long ret;
++
++ ret = copy_from_user(&req, user, sizeof(req));
++ if (ret) {
++ dev_err(asrc->dev, "failed to get req from user space: %ld\n", ret);
++ return ret;
++ }
++
++ ret = asrc_req_pair(req.chn_num, &req.index);
++ if (ret) {
++ dev_err(asrc->dev, "failed to request pair: %ld\n", ret);
++ return ret;
++ }
++
++ params->pair_hold = 1;
++ params->index = req.index;
++ params->channel_nums = req.chn_num;
++
++ ret = copy_to_user(user, &req, sizeof(req));
++ if (ret) {
++ dev_err(asrc->dev, "failed to send req to user space: %ld\n", ret);
++ return ret;
++ }
++
++ return 0;
++}
++
++static long asrc_ioctl_config_pair(struct asrc_pair_params *params,
++ void __user *user)
++{
++ struct asrc_config config;
++ enum asrc_pair_index index;
++ long ret;
++
++ ret = copy_from_user(&config, user, sizeof(config));
++ if (ret) {
++ dev_err(asrc->dev, "failed to get config from user space: %ld\n", ret);
++ return ret;
++ }
++
++ index = config.pair;
++
++ ret = asrc_config_pair(&config);
++ if (ret) {
++ pair_err("failed to config pair: %ld\n", ret);
++ return ret;
++ }
++
++ params->input_wm = 4;
++ params->output_wm = 2;
++
++ ret = asrc_set_watermark(index, params->input_wm, params->output_wm);
++ if (ret)
++ return ret;
++
++ params->output_buffer_size = config.dma_buffer_size;
++ params->input_buffer_size = config.dma_buffer_size;
++ if (config.buffer_num > ASRC_DMA_BUFFER_NUM)
++ params->buffer_num = ASRC_DMA_BUFFER_NUM;
++ else
++ params->buffer_num = config.buffer_num;
++
++ params->input_dma_total.length = ASRC_DMA_BUFFER_SIZE;
++ params->output_dma_total.length = ASRC_DMA_BUFFER_SIZE;
++
++ params->input_word_width = config.input_word_width;
++ params->output_word_width = config.output_word_width;
++
++ params->input_sample_rate = config.input_sample_rate;
++ params->output_sample_rate = config.output_sample_rate;
++
++ params->last_period_sample = ASRC_OUTPUT_LAST_SAMPLE_DEFAULT;
++
++ ret = mxc_allocate_dma_buf(params);
++ if (ret) {
++ pair_err("failed to allocate dma buffer: %ld\n", ret);
++ return ret;
++ }
++
++ /* Request DMA channel for both input and output */
++ params->input_dma_channel = imx_asrc_get_dma_channel(index, true);
++ if (params->input_dma_channel == NULL) {
++ pair_err("failed to request input task dma channel\n");
++ return -EBUSY;
++ }
++
++ params->output_dma_channel = imx_asrc_get_dma_channel(index, false);
++ if (params->output_dma_channel == NULL) {
++ pair_err("failed to request output task dma channel\n");
++ return -EBUSY;
++ }
++
++ init_completion(&params->input_complete);
++ init_completion(&params->output_complete);
++ init_completion(&params->lastperiod_complete);
++
++ /* Add work struct to receive last period of output data */
++ INIT_WORK(&params->task_output_work, asrc_output_task_worker);
++
++ ret = copy_to_user(user, &config, sizeof(config));
++ if (ret) {
++ pair_err("failed to send config to user space: %ld\n", ret);
++ return ret;
++ }
++
++ return 0;
++}
++
++static long asrc_ioctl_release_pair(struct asrc_pair_params *params,
++ void __user *user)
++{
++ enum asrc_pair_index index;
++ unsigned long lock_flags;
++ long ret;
++
++ ret = copy_from_user(&index, user, sizeof(index));
++ if (ret) {
++ dev_err(asrc->dev, "failed to get index from user space: %ld\n", ret);
++ return ret;
++ }
++
++ /* index might be not valid due to some application failure. */
++ if (index < 0)
++ return -EINVAL;
++
++ params->asrc_active = 0;
++
++ spin_lock_irqsave(&pair_lock, lock_flags);
++ params->pair_hold = 0;
++ spin_unlock_irqrestore(&pair_lock, lock_flags);
++
++ if (params->input_dma_channel)
++ dma_release_channel(params->input_dma_channel);
++ if (params->output_dma_channel)
++ dma_release_channel(params->output_dma_channel);
++ mxc_free_dma_buf(params);
++ asrc_release_pair(index);
++ asrc_finish_conv(index);
++
++ return 0;
++}
++
++static long asrc_ioctl_convert(struct asrc_pair_params *params,
++ void __user *user)
++{
++ enum asrc_pair_index index = params->index;
++ struct asrc_convert_buffer buf;
++ long ret;
++
++ ret = copy_from_user(&buf, user, sizeof(buf));
++ if (ret) {
++ pair_err("failed to get buf from user space: %ld\n", ret);
++ return ret;
++ }
++
++ ret = mxc_asrc_prepare_buffer(params, &buf);
++ if (ret) {
++ pair_err("failed to prepare buffer: %ld\n", ret);
++ return ret;
++ }
++
++#ifdef ASRC_POLLING_WITHOUT_DMA
++ asrc_polling_debug(params);
++#else
++ mxc_asrc_submit_dma(params);
++#endif
++
++ ret = mxc_asrc_process_buffer(params, &buf);
++ if (ret) {
++ pair_err("failed to process buffer: %ld\n", ret);
++ return ret;
++ }
++
++ ret = copy_to_user(user, &buf, sizeof(buf));
++ if (ret) {
++ pair_err("failed to send buf to user space: %ld\n", ret);
++ return ret;
++ }
++
++ return 0;
++}
++
++static long asrc_ioctl_start_conv(struct asrc_pair_params *params,
++ void __user *user)
++{
++ enum asrc_pair_index index;
++ long ret;
++
++ ret = copy_from_user(&index, user, sizeof(index));
++ if (ret) {
++ dev_err(asrc->dev, "failed to get index from user space: %ld\n", ret);
++ return ret;
++ }
++
++ params->asrc_active = 1;
++ asrc_start_conv(index);
++
++ return 0;
++}
++
++static long asrc_ioctl_stop_conv(struct asrc_pair_params *params,
++ void __user *user)
++{
++ enum asrc_pair_index index;
++ long ret;
++
++ ret = copy_from_user(&index, user, sizeof(index));
++ if (ret) {
++ dev_err(asrc->dev, "failed to get index from user space: %ld\n", ret);
++ return ret;
++ }
++
++ dmaengine_terminate_all(params->input_dma_channel);
++ dmaengine_terminate_all(params->output_dma_channel);
++
++ asrc_stop_conv(index);
++ params->asrc_active = 0;
++
++ return 0;
++}
++
++static long asrc_ioctl_status(struct asrc_pair_params *params,
++ void __user *user)
++{
++ enum asrc_pair_index index = params->index;
++ struct asrc_status_flags flags;
++ long ret;
++
++ ret = copy_from_user(&flags, user, sizeof(flags));
++ if (ret) {
++ pair_err("failed to get flags from user space: %ld\n", ret);
++ return ret;
++ }
++
++ asrc_get_status(&flags);
++
++ ret = copy_to_user(user, &flags, sizeof(flags));
++ if (ret) {
++ pair_err("failed to send flags to user space: %ld\n", ret);
++ return ret;
++ }
++
++ return 0;
++}
++
++static long asrc_ioctl_flush(struct asrc_pair_params *params,
++ void __user *user)
++{
++ enum asrc_pair_index index = params->index;
++ init_completion(&params->input_complete);
++ init_completion(&params->output_complete);
++ init_completion(&params->lastperiod_complete);
++
++ /* Release DMA and request again */
++ dma_release_channel(params->input_dma_channel);
++ dma_release_channel(params->output_dma_channel);
++
++ params->input_dma_channel = imx_asrc_get_dma_channel(index, true);
++ if (params->input_dma_channel == NULL) {
++ pair_err("failed to request input task dma channel\n");
++ return -EBUSY;
++ }
++
++ params->output_dma_channel = imx_asrc_get_dma_channel(index, false);
++ if (params->output_dma_channel == NULL) {
++ pair_err("failed to request output task dma channel\n");
++ return -EBUSY;
++ }
++
++ return 0;
++}
++
++static long asrc_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
++{
++ struct asrc_pair_params *params = file->private_data;
++ void __user *user = (void __user *)arg;
++ long ret = 0;
++
++ switch (cmd) {
++ case ASRC_REQ_PAIR:
++ ret = asrc_ioctl_req_pair(params, user);
++ break;
++ case ASRC_CONFIG_PAIR:
++ ret = asrc_ioctl_config_pair(params, user);
++ break;
++ case ASRC_RELEASE_PAIR:
++ ret = asrc_ioctl_release_pair(params, user);
++ break;
++ case ASRC_CONVERT:
++ ret = asrc_ioctl_convert(params, user);
++ break;
++ case ASRC_START_CONV:
++ ret = asrc_ioctl_start_conv(params, user);
++ dump_regs();
++ break;
++ case ASRC_STOP_CONV:
++ ret = asrc_ioctl_stop_conv(params, user);
++ break;
++ case ASRC_STATUS:
++ ret = asrc_ioctl_status(params, user);
++ break;
++ case ASRC_FLUSH:
++ ret = asrc_ioctl_flush(params, user);
++ break;
++ default:
++ dev_err(asrc->dev, "invalid ioctl cmd!\n");
++ break;
++ }
++
++ return ret;
++}
++
++static int mxc_asrc_open(struct inode *inode, struct file *file)
++{
++ struct asrc_pair_params *params;
++ int ret = 0;
++
++ ret = signal_pending(current);
++ if (ret) {
++ dev_err(asrc->dev, "current process has a signal pending\n");
++ return ret;
++ }
++
++ params = kzalloc(sizeof(struct asrc_pair_params), GFP_KERNEL);
++ if (params == NULL) {
++ dev_err(asrc->dev, "failed to allocate pair_params\n");
++ return -ENOBUFS;
++ }
++
++ file->private_data = params;
++
++ return ret;
++}
++
++static int mxc_asrc_close(struct inode *inode, struct file *file)
++{
++ struct asrc_pair_params *params;
++ unsigned long lock_flags;
++
++ params = file->private_data;
++
++ if (!params)
++ return 0;
++
++ if (params->asrc_active) {
++ params->asrc_active = 0;
++
++ dmaengine_terminate_all(params->input_dma_channel);
++ dmaengine_terminate_all(params->output_dma_channel);
++
++ asrc_stop_conv(params->index);
++
++ complete(&params->input_complete);
++ complete(&params->output_complete);
++ complete(&params->lastperiod_complete);
++ }
++
++ if (params->pair_hold) {
++ spin_lock_irqsave(&pair_lock, lock_flags);
++ params->pair_hold = 0;
++ spin_unlock_irqrestore(&pair_lock, lock_flags);
++
++ if (params->input_dma_channel)
++ dma_release_channel(params->input_dma_channel);
++ if (params->output_dma_channel)
++ dma_release_channel(params->output_dma_channel);
++
++ mxc_free_dma_buf(params);
++
++ asrc_release_pair(params->index);
++ asrc_finish_conv(params->index);
++ }
++
++ kfree(params);
++ file->private_data = NULL;
++
++ return 0;
++}
++
++static int mxc_asrc_mmap(struct file *file, struct vm_area_struct *vma)
++{
++ unsigned long size = vma->vm_end - vma->vm_start;
++ int ret;
++
++ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
++
++ ret = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
++ size, vma->vm_page_prot);
++ if (ret) {
++ dev_err(asrc->dev, "failed to memory map!\n");
++ return ret;
++ }
++
++ vma->vm_flags &= ~VM_IO;
++
++ return ret;
++}
++
++static const struct file_operations asrc_fops = {
++ .owner = THIS_MODULE,
++ .unlocked_ioctl = asrc_ioctl,
++ .mmap = mxc_asrc_mmap,
++ .open = mxc_asrc_open,
++ .release = mxc_asrc_close,
++};
++
++static struct miscdevice asrc_miscdev = {
++ .name = "mxc_asrc",
++ .fops = &asrc_fops,
++ .minor = MISC_DYNAMIC_MINOR,
++};
++
++static int asrc_read_proc_attr(struct file *file, char __user *buf,
++ size_t count, loff_t *off)
++{
++ char tmpbuf[80];
++ int len = 0;
++ u32 reg;
++
++ if (*off)
++ return 0;
++
++ regmap_read(asrc->regmap, REG_ASRCNCR, &reg);
++
++ len += sprintf(tmpbuf, "ANCA: %d\nANCB: %d\nANCC: %d\n",
++ ASRCNCR_ANCx_get(ASRC_PAIR_A, reg, asrc->channel_bits),
++ ASRCNCR_ANCx_get(ASRC_PAIR_B, reg, asrc->channel_bits),
++ ASRCNCR_ANCx_get(ASRC_PAIR_C, reg, asrc->channel_bits));
++
++ if (len > count)
++ return 0;
++
++ if (copy_to_user(buf, &tmpbuf, len))
++ return -EFAULT;
++
++ *off += len;
++
++ return len;
++}
++
++#define ASRC_MAX_PROC_BUFFER_SIZE 63
++
++static int asrc_write_proc_attr(struct file *file, const char __user *buffer,
++ size_t count, loff_t *data)
++{
++ char buf[ASRC_MAX_PROC_BUFFER_SIZE];
++ int na, nb, nc;
++ int total;
++
++ if (count > ASRC_MAX_PROC_BUFFER_SIZE) {
++ dev_err(asrc->dev, "proc write: the input string was too long\n");
++ return -EINVAL;
++ }
++
++ if (copy_from_user(buf, buffer, count)) {
++ dev_err(asrc->dev, "proc write: failed to copy buffer from user\n");
++ return -EFAULT;
++ }
++
++ sscanf(buf, "ANCA: %d\nANCB: %d\nANCC: %d", &na, &nb, &nc);
++
++ total = asrc->channel_bits > 3 ? 10 : 5;
++
++ if (na + nb + nc > total) {
++ dev_err(asrc->dev, "don't surpass %d for total\n", total);
++ return -EINVAL;
++ } else if (na % 2 != 0 || nb % 2 != 0 || nc % 2 != 0) {
++ dev_err(asrc->dev, "please set an even number for each pair\n");
++ return -EINVAL;
++ } else if (na < 0 || nb < 0 || nc < 0) {
++ dev_err(asrc->dev, "please set an positive number for each pair\n");
++ return -EINVAL;
++ }
++
++
++ asrc->asrc_pair[ASRC_PAIR_A].chn_max = na;
++ asrc->asrc_pair[ASRC_PAIR_B].chn_max = nb;
++ asrc->asrc_pair[ASRC_PAIR_C].chn_max = nc;
++
++ /* Update channel number settings */
++ regmap_update_bits(asrc->regmap, REG_ASRCNCR,
++ ASRCNCR_ANCx_MASK(ASRC_PAIR_A, asrc->channel_bits),
++ ASRCNCR_ANCx_set(ASRC_PAIR_A, na, asrc->channel_bits));
++ regmap_update_bits(asrc->regmap, REG_ASRCNCR,
++ ASRCNCR_ANCx_MASK(ASRC_PAIR_B, asrc->channel_bits),
++ ASRCNCR_ANCx_set(ASRC_PAIR_B, nb, asrc->channel_bits));
++ regmap_update_bits(asrc->regmap, REG_ASRCNCR,
++ ASRCNCR_ANCx_MASK(ASRC_PAIR_C, asrc->channel_bits),
++ ASRCNCR_ANCx_set(ASRC_PAIR_C, nc, asrc->channel_bits));
++
++ return count;
++}
++
++static const struct file_operations asrc_proc_fops = {
++ .read = asrc_read_proc_attr,
++ .write = asrc_write_proc_attr,
++};
++
++static void asrc_proc_create(void)
++{
++ struct proc_dir_entry *proc_attr;
++
++ asrc->proc_asrc = proc_mkdir(ASRC_PROC_PATH, NULL);
++ if (!asrc->proc_asrc) {
++ dev_err(asrc->dev, "failed to create proc entry %s\n", ASRC_PROC_PATH);
++ return;
++ }
++
++ proc_attr = proc_create("ChSettings", S_IFREG | S_IRUGO | S_IWUSR,
++ asrc->proc_asrc, &asrc_proc_fops);
++ if (!proc_attr) {
++ remove_proc_entry(ASRC_PROC_PATH, NULL);
++ dev_err(asrc->dev, "failed to create proc attribute entry\n");
++ }
++}
++
++static void asrc_proc_remove(void)
++{
++ remove_proc_entry("ChSettings", asrc->proc_asrc);
++ remove_proc_entry(ASRC_PROC_PATH, NULL);
++}
++
++
++static bool asrc_readable_reg(struct device *dev, unsigned int reg)
++{
++ switch (reg) {
++ case REG_ASRCTR:
++ case REG_ASRIER:
++ case REG_ASRCNCR:
++ case REG_ASRCFG:
++ case REG_ASRCSR:
++ case REG_ASRCDR1:
++ case REG_ASRCDR2:
++ case REG_ASRSTR:
++ case REG_ASRPM1:
++ case REG_ASRPM2:
++ case REG_ASRPM3:
++ case REG_ASRPM4:
++ case REG_ASRPM5:
++ case REG_ASRTFR1:
++ case REG_ASRCCR:
++ case REG_ASRDOA:
++ case REG_ASRDOB:
++ case REG_ASRDOC:
++ case REG_ASRIDRHA:
++ case REG_ASRIDRLA:
++ case REG_ASRIDRHB:
++ case REG_ASRIDRLB:
++ case REG_ASRIDRHC:
++ case REG_ASRIDRLC:
++ case REG_ASR76K:
++ case REG_ASR56K:
++ case REG_ASRMCRA:
++ case REG_ASRFSTA:
++ case REG_ASRMCRB:
++ case REG_ASRFSTB:
++ case REG_ASRMCRC:
++ case REG_ASRFSTC:
++ case REG_ASRMCR1A:
++ case REG_ASRMCR1B:
++ case REG_ASRMCR1C:
++ return true;
++ default:
++ return false;
++ }
++}
++
++static bool asrc_writeable_reg(struct device *dev, unsigned int reg)
++{
++ switch (reg) {
++ case REG_ASRCTR:
++ case REG_ASRIER:
++ case REG_ASRCNCR:
++ case REG_ASRCFG:
++ case REG_ASRCSR:
++ case REG_ASRCDR1:
++ case REG_ASRCDR2:
++ case REG_ASRSTR:
++ case REG_ASRPM1:
++ case REG_ASRPM2:
++ case REG_ASRPM3:
++ case REG_ASRPM4:
++ case REG_ASRPM5:
++ case REG_ASRTFR1:
++ case REG_ASRCCR:
++ case REG_ASRDIA:
++ case REG_ASRDIB:
++ case REG_ASRDIC:
++ case REG_ASRIDRHA:
++ case REG_ASRIDRLA:
++ case REG_ASRIDRHB:
++ case REG_ASRIDRLB:
++ case REG_ASRIDRHC:
++ case REG_ASRIDRLC:
++ case REG_ASR76K:
++ case REG_ASR56K:
++ case REG_ASRMCRA:
++ case REG_ASRMCRB:
++ case REG_ASRMCRC:
++ case REG_ASRMCR1A:
++ case REG_ASRMCR1B:
++ case REG_ASRMCR1C:
++ return true;
++ default:
++ return false;
++ }
++}
++
++static struct regmap_config asrc_regmap_config = {
++ .reg_bits = 32,
++ .reg_stride = 4,
++ .val_bits = 32,
++
++ .max_register = REG_ASRMCR1C,
++ .readable_reg = asrc_readable_reg,
++ .writeable_reg = asrc_writeable_reg,
++};
++
++static int mxc_asrc_probe(struct platform_device *pdev)
++{
++ const struct of_device_id *of_id = of_match_device(fsl_asrc_ids, &pdev->dev);
++ struct device_node *np = pdev->dev.of_node;
++ enum mxc_asrc_type devtype;
++ struct resource *res;
++ void __iomem *regs;
++ int ret;
++
++ /* Check if the device is existed */
++ if (!np)
++ return -ENODEV;
++
++ asrc = devm_kzalloc(&pdev->dev, sizeof(struct asrc_data), GFP_KERNEL);
++ if (!asrc)
++ return -ENOMEM;
++
++ if (of_id) {
++ const struct platform_device_id *id_entry = of_id->data;
++ devtype = id_entry->driver_data;
++ } else {
++ devtype = pdev->id_entry->driver_data;
++ }
++
++ asrc->dev = &pdev->dev;
++ asrc->dev->coherent_dma_mask = DMA_BIT_MASK(32);
++
++ asrc->asrc_pair[ASRC_PAIR_A].chn_max = 2;
++ asrc->asrc_pair[ASRC_PAIR_B].chn_max = 6;
++ asrc->asrc_pair[ASRC_PAIR_C].chn_max = 2;
++ asrc->asrc_pair[ASRC_PAIR_A].overload_error = 0;
++ asrc->asrc_pair[ASRC_PAIR_B].overload_error = 0;
++ asrc->asrc_pair[ASRC_PAIR_C].overload_error = 0;
++
++ /* Map the address */
++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++ if (IS_ERR(res)) {
++ dev_err(&pdev->dev, "could not determine device resources\n");
++ return PTR_ERR(res);
++ }
++
++ regs = devm_ioremap_resource(&pdev->dev, res);
++ if (IS_ERR(regs)) {
++ dev_err(&pdev->dev, "could not map device resources\n");
++ return PTR_ERR(regs);
++ }
++ asrc->paddr = res->start;
++
++ /* Register regmap and let it prepare core clock */
++ asrc->regmap = devm_regmap_init_mmio_clk(&pdev->dev,
++ "core", regs, &asrc_regmap_config);
++ if (IS_ERR(asrc->regmap)) {
++ dev_err(&pdev->dev, "regmap init failed\n");
++ return PTR_ERR(asrc->regmap);
++ }
++
++ asrc->irq = platform_get_irq(pdev, 0);
++ if (asrc->irq == NO_IRQ) {
++ dev_err(&pdev->dev, "no irq for node %s\n", np->full_name);
++ return asrc->irq;
++ }
++
++ ret = devm_request_irq(&pdev->dev, asrc->irq, asrc_isr, 0, np->name, NULL);
++ if (ret) {
++ dev_err(&pdev->dev, "could not claim irq %u: %d\n", asrc->irq, ret);
++ return ret;
++ }
++
++ asrc->asrc_clk = devm_clk_get(&pdev->dev, "core");
++ if (IS_ERR(asrc->asrc_clk)) {
++ dev_err(&pdev->dev, "failed to get core clock\n");
++ return PTR_ERR(asrc->asrc_clk);
++ }
++
++ asrc->dma_clk = devm_clk_get(&pdev->dev, "dma");
++ if (IS_ERR(asrc->dma_clk)) {
++ dev_err(&pdev->dev, "failed to get dma script clock\n");
++ return PTR_ERR(asrc->dma_clk);
++ }
++
++ switch (devtype) {
++ case IMX35_ASRC:
++ asrc->channel_bits = 3;
++ input_clk_map = input_clk_map_v1;
++ output_clk_map = output_clk_map_v1;
++ break;
++ case IMX53_ASRC:
++ asrc->channel_bits = 4;
++ input_clk_map = input_clk_map_v2;
++ output_clk_map = output_clk_map_v2;
++ break;
++ default:
++ dev_err(&pdev->dev, "unsupported device type\n");
++ return -EINVAL;
++ }
++
++ ret = misc_register(&asrc_miscdev);
++ if (ret) {
++ dev_err(&pdev->dev, "failed to register char device %d\n", ret);
++ return ret;
++ }
++
++ asrc_proc_create();
++
++ ret = mxc_init_asrc();
++ if (ret) {
++ dev_err(&pdev->dev, "failed to init asrc %d\n", ret);
++ goto err_misc;
++ }
++
++ dev_info(&pdev->dev, "mxc_asrc registered\n");
++
++ return ret;
++
++err_misc:
++ misc_deregister(&asrc_miscdev);
++
++ return ret;
++}
++
++static int mxc_asrc_remove(struct platform_device *pdev)
++{
++ asrc_proc_remove();
++ misc_deregister(&asrc_miscdev);
++
++ return 0;
++}
++
++static struct platform_driver mxc_asrc_driver = {
++ .driver = {
++ .name = "mxc_asrc",
++ .of_match_table = fsl_asrc_ids,
++ },
++ .probe = mxc_asrc_probe,
++ .remove = mxc_asrc_remove,
++};
++
++module_platform_driver(mxc_asrc_driver);
++
++MODULE_AUTHOR("Freescale Semiconductor, Inc.");
++MODULE_DESCRIPTION("Asynchronous Sample Rate Converter");
++MODULE_LICENSE("GPL");
++MODULE_ALIAS("platform:mxc_asrc");
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/Kconfig linux-openelec/drivers/mxc/gpu-viv/Kconfig
+--- linux-3.14.36/drivers/mxc/gpu-viv/Kconfig 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/Kconfig 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,20 @@
++menu "MXC Vivante GPU support"
++ depends on SOC_IMX6Q
++
++config MXC_GPU_VIV
++ tristate "MXC Vivante GPU support"
++ ---help---
++ Say Y to get the GPU driver support.
++choice
++ prompt "Galcore Version"
++ default MXC_GPU_VIV_V5
++
++config MXC_GPU_VIV_V5
++ bool "Galcore Version 5.x"
++
++config MXC_GPU_VIV_V4
++ bool "Galcore Version 4.x"
++
++endchoice
++
++endmenu
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v4/arch/GC350/hal/kernel/gc_hal_kernel_hardware_command_vg.c linux-openelec/drivers/mxc/gpu-viv/v4/arch/GC350/hal/kernel/gc_hal_kernel_hardware_command_vg.c
+--- linux-3.14.36/drivers/mxc/gpu-viv/v4/arch/GC350/hal/kernel/gc_hal_kernel_hardware_command_vg.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v4/arch/GC350/hal/kernel/gc_hal_kernel_hardware_command_vg.c 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,932 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal.h"
++#include "gc_hal_kernel.h"
++
++#if gcdENABLE_VG
++
++#include "gc_hal_kernel_hardware_command_vg.h"
++
++#define _GC_OBJ_ZONE gcvZONE_COMMAND
++
++/******************************************************************************\
++****************************** gckVGCOMMAND API code *****************************
++\******************************************************************************/
++
++/*******************************************************************************
++**
++** gckVGCOMMAND_InitializeInfo
++**
++** Initialize architecture dependent command buffer information.
++**
++** INPUT:
++**
++** gckVGCOMMAND Command
++** Pointer to the Command object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckVGCOMMAND_InitializeInfo(
++ IN gckVGCOMMAND Command
++ )
++{
++ gceSTATUS status;
++ gcmkHEADER_ARG("Command=0x%x", Command);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++
++ do
++ {
++ /* Reset interrupts. */
++ Command->info.feBufferInt = -1;
++ Command->info.tsOverflowInt = -1;
++
++ /* Set command buffer attributes. */
++ Command->info.addressAlignment = 64;
++ Command->info.commandAlignment = 8;
++
++ /* Determine command alignment address mask. */
++ Command->info.addressMask = ((((gctUINT32) (Command->info.addressAlignment - 1)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:0) - (0 ? 1:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:0) - (0 ? 1:0) + 1))))))) << (0 ? 1:0))) | (((gctUINT32) ((gctUINT32) (0 ) & ((gctUINT32) ((((1 ? 1:0) - (0 ? 1:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:0) - (0 ? 1:0) + 1))))))) << (0 ? 1:0)));
++
++ /* Query the number of bytes needed by the STATE command. */
++ gcmkERR_BREAK(gckVGCOMMAND_StateCommand(
++ Command, 0x0, gcvNULL, (gctUINT32)~0, 0,
++ &Command->info.stateCommandSize
++ ));
++
++ /* Query the number of bytes needed by the RESTART command. */
++ gcmkERR_BREAK(gckVGCOMMAND_RestartCommand(
++ Command, gcvNULL, (gctUINT32)~0, 0,
++ &Command->info.restartCommandSize
++ ));
++
++ /* Query the number of bytes needed by the FETCH command. */
++ gcmkERR_BREAK(gckVGCOMMAND_FetchCommand(
++ Command, gcvNULL, (gctUINT32)~0, 0,
++ &Command->info.fetchCommandSize
++ ));
++
++ /* Query the number of bytes needed by the CALL command. */
++ gcmkERR_BREAK(gckVGCOMMAND_CallCommand(
++ Command, gcvNULL, (gctUINT32)~0, 0,
++ &Command->info.callCommandSize
++ ));
++
++ /* Query the number of bytes needed by the RETURN command. */
++ gcmkERR_BREAK(gckVGCOMMAND_ReturnCommand(
++ Command, gcvNULL,
++ &Command->info.returnCommandSize
++ ));
++
++ /* Query the number of bytes needed by the EVENT command. */
++ gcmkERR_BREAK(gckVGCOMMAND_EventCommand(
++ Command, gcvNULL, gcvBLOCK_PIXEL, -1,
++ &Command->info.eventCommandSize
++ ));
++
++ /* Query the number of bytes needed by the END command. */
++ gcmkERR_BREAK(gckVGCOMMAND_EndCommand(
++ Command, gcvNULL, -1,
++ &Command->info.endCommandSize
++ ));
++
++ /* Determine the tail reserve size. */
++ Command->info.staticTailSize = gcmMAX(
++ Command->info.fetchCommandSize,
++ gcmMAX(
++ Command->info.returnCommandSize,
++ Command->info.endCommandSize
++ )
++ );
++
++ /* Determine the maximum tail size. */
++ Command->info.dynamicTailSize
++ = Command->info.staticTailSize
++ + Command->info.eventCommandSize * gcvBLOCK_COUNT;
++ }
++ while (gcvFALSE);
++
++ gcmkFOOTER();
++ /* Return status. */
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckVGCOMMAND_StateCommand
++**
++** Append a STATE command at the specified location in the command buffer.
++**
++** INPUT:
++**
++** gckVGCOMMAND Command
++** Pointer to an gckVGCOMMAND object.
++**
++** gctUINT32 Pipe
++** Harwdare destination pipe.
++**
++** gctPOINTER Logical
++** Pointer to the current location inside the command buffer to append
++** STATE command at or gcvNULL to query the size of the command.
++**
++** gctUINT32 Address
++** Starting register address of the state buffer.
++** If 'Logical' is gcvNULL, this argument is ignored.
++**
++** gctUINT32 Count
++** Number of states in state buffer.
++** If 'Logical' is gcvNULL, this argument is ignored.
++**
++** gctSIZE_T * Bytes
++** Pointer to the number of bytes available for the STATE command.
++** If 'Logical' is gcvNULL, the value from this argument is ignored.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Bytes
++** Pointer to a variable that will receive the number of bytes required
++** for the STATE command. If 'Bytes' is gcvNULL, nothing is returned.
++*/
++gceSTATUS
++gckVGCOMMAND_StateCommand(
++ IN gckVGCOMMAND Command,
++ IN gctUINT32 Pipe,
++ IN gctPOINTER Logical,
++ IN gctUINT32 Address,
++ IN gctSIZE_T Count,
++ IN OUT gctSIZE_T * Bytes
++ )
++{
++ gcmkHEADER_ARG("Command=0x%x Pipe=0x%x Logical=0x%x Address=0x%x Count=0x%x Bytes = 0x%x",
++ Command, Pipe, Logical, Address, Count, Bytes);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++
++ if (Command->fe20)
++ {
++ if (Logical != gcvNULL)
++ {
++ gctUINT32_PTR buffer;
++
++ /* Cast the buffer pointer. */
++ buffer = (gctUINT32_PTR) Logical;
++
++ /* Append STATE. */
++ buffer[0]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:28) - (0 ? 31:28) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:28) - (0 ? 31:28) + 1))))))) << (0 ? 31:28))) | (((gctUINT32) (0x3 & ((gctUINT32) ((((1 ? 31:28) - (0 ? 31:28) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:28) - (0 ? 31:28) + 1))))))) << (0 ? 31:28)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 11:0) - (0 ? 11:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 11:0) - (0 ? 11:0) + 1))))))) << (0 ? 11:0))) | (((gctUINT32) ((gctUINT32) (Address) & ((gctUINT32) ((((1 ? 11:0) - (0 ? 11:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 11:0) - (0 ? 11:0) + 1))))))) << (0 ? 11:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 27:16) - (0 ? 27:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 27:16) - (0 ? 27:16) + 1))))))) << (0 ? 27:16))) | (((gctUINT32) ((gctUINT32) (Count) & ((gctUINT32) ((((1 ? 27:16) - (0 ? 27:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 27:16) - (0 ? 27:16) + 1))))))) << (0 ? 27:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 13:12) - (0 ? 13:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 13:12) - (0 ? 13:12) + 1))))))) << (0 ? 13:12))) | (((gctUINT32) ((gctUINT32) (Pipe) & ((gctUINT32) ((((1 ? 13:12) - (0 ? 13:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 13:12) - (0 ? 13:12) + 1))))))) << (0 ? 13:12)));
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* Return number of bytes required by the STATE command. */
++ *Bytes = 4 * (Count + 1);
++ }
++ }
++ else
++ {
++ if (Logical != gcvNULL)
++ {
++ gctUINT32_PTR buffer;
++
++ /* Cast the buffer pointer. */
++ buffer = (gctUINT32_PTR) Logical;
++
++ /* Append LOAD_STATE. */
++ buffer[0]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (Count) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (Address) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* Return number of bytes required by the STATE command. */
++ *Bytes = 4 * (Count + 1);
++ }
++ }
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckVGCOMMAND_RestartCommand
++**
++** Form a RESTART command at the specified location in the command buffer.
++**
++** INPUT:
++**
++** gckVGCOMMAND Command
++** Pointer to an gckVGCOMMAND object.
++**
++** gctPOINTER Logical
++** Pointer to the current location inside the command buffer to append
++** RESTART command at or gcvNULL to query the size of the command.
++**
++** gctUINT32 FetchAddress
++** The address of another command buffer to be executed by this RESTART
++** command. If 'Logical' is gcvNULL, this argument is ignored.
++**
++** gctUINT FetchCount
++** The number of 64-bit data quantities in another command buffer to
++** be executed by this RESTART command. If 'Logical' is gcvNULL, this
++** argument is ignored.
++**
++** gctSIZE_T * Bytes
++** Pointer to the number of bytes available for the RESTART command.
++** If 'Logical' is gcvNULL, the value from this argument is ignored.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Bytes
++** Pointer to a variable that will receive the number of bytes required
++** for the RESTART command. If 'Bytes' is gcvNULL, nothing is returned.
++*/
++gceSTATUS
++gckVGCOMMAND_RestartCommand(
++ IN gckVGCOMMAND Command,
++ IN gctPOINTER Logical,
++ IN gctUINT32 FetchAddress,
++ IN gctUINT FetchCount,
++ IN OUT gctSIZE_T * Bytes
++ )
++{
++ gcmkHEADER_ARG("Command=0x%x Logical=0x%x FetchAddress=0x%x FetchCount=0x%x Bytes = 0x%x",
++ Command, Logical, FetchAddress, FetchCount, Bytes);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++
++ if (Command->fe20)
++ {
++ if (Logical != gcvNULL)
++ {
++ gctUINT32_PTR buffer;
++ gctUINT32 beginEndMark;
++
++ /* Cast the buffer pointer. */
++ buffer = (gctUINT32_PTR) Logical;
++
++ /* Determine Begin/End flag. */
++ beginEndMark = (FetchCount > 0)
++ ? ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 24:24) - (0 ? 24:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 24:24) - (0 ? 24:24) + 1))))))) << (0 ? 24:24))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? 24:24) - (0 ? 24:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 24:24) - (0 ? 24:24) + 1))))))) << (0 ? 24:24)))
++ : ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 24:24) - (0 ? 24:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 24:24) - (0 ? 24:24) + 1))))))) << (0 ? 24:24))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 24:24) - (0 ? 24:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 24:24) - (0 ? 24:24) + 1))))))) << (0 ? 24:24)));
++
++ /* Append RESTART. */
++ buffer[0]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:28) - (0 ? 31:28) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:28) - (0 ? 31:28) + 1))))))) << (0 ? 31:28))) | (((gctUINT32) (0x9 & ((gctUINT32) ((((1 ? 31:28) - (0 ? 31:28) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:28) - (0 ? 31:28) + 1))))))) << (0 ? 31:28)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 20:0) - (0 ? 20:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 20:0) - (0 ? 20:0) + 1))))))) << (0 ? 20:0))) | (((gctUINT32) ((gctUINT32) (FetchCount) & ((gctUINT32) ((((1 ? 20:0) - (0 ? 20:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 20:0) - (0 ? 20:0) + 1))))))) << (0 ? 20:0)))
++ | beginEndMark;
++
++ buffer[1]
++ = FetchAddress;
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* Return number of bytes required by the RESTART command. */
++ *Bytes = 8;
++ }
++ }
++ else
++ {
++ gcmkFOOTER_NO();
++ return gcvSTATUS_NOT_SUPPORTED;
++ }
++
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckVGCOMMAND_FetchCommand
++**
++** Form a FETCH command at the specified location in the command buffer.
++**
++** INPUT:
++**
++** gckVGCOMMAND Command
++** Pointer to an gckVGCOMMAND object.
++**
++** gctPOINTER Logical
++** Pointer to the current location inside the command buffer to append
++** FETCH command at or gcvNULL to query the size of the command.
++**
++** gctUINT32 FetchAddress
++** The address of another command buffer to be executed by this FETCH
++** command. If 'Logical' is gcvNULL, this argument is ignored.
++**
++** gctUINT FetchCount
++** The number of 64-bit data quantities in another command buffer to
++** be executed by this FETCH command. If 'Logical' is gcvNULL, this
++** argument is ignored.
++**
++** gctSIZE_T * Bytes
++** Pointer to the number of bytes available for the FETCH command.
++** If 'Logical' is gcvNULL, the value from this argument is ignored.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Bytes
++** Pointer to a variable that will receive the number of bytes required
++** for the FETCH command. If 'Bytes' is gcvNULL, nothing is returned.
++*/
++gceSTATUS
++gckVGCOMMAND_FetchCommand(
++ IN gckVGCOMMAND Command,
++ IN gctPOINTER Logical,
++ IN gctUINT32 FetchAddress,
++ IN gctUINT FetchCount,
++ IN OUT gctSIZE_T * Bytes
++ )
++{
++ gcmkHEADER_ARG("Command=0x%x Logical=0x%x FetchAddress=0x%x FetchCount=0x%x Bytes = 0x%x",
++ Command, Logical, FetchAddress, FetchCount, Bytes);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++
++ if (Command->fe20)
++ {
++ if (Logical != gcvNULL)
++ {
++ gctUINT32_PTR buffer;
++
++ /* Cast the buffer pointer. */
++ buffer = (gctUINT32_PTR) Logical;
++
++ /* Append FETCH. */
++ buffer[0]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:28) - (0 ? 31:28) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:28) - (0 ? 31:28) + 1))))))) << (0 ? 31:28))) | (((gctUINT32) (0x5 & ((gctUINT32) ((((1 ? 31:28) - (0 ? 31:28) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:28) - (0 ? 31:28) + 1))))))) << (0 ? 31:28)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 20:0) - (0 ? 20:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 20:0) - (0 ? 20:0) + 1))))))) << (0 ? 20:0))) | (((gctUINT32) ((gctUINT32) (FetchCount) & ((gctUINT32) ((((1 ? 20:0) - (0 ? 20:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 20:0) - (0 ? 20:0) + 1))))))) << (0 ? 20:0)));
++
++ buffer[1]
++ = gcmkFIXADDRESS(FetchAddress);
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* Return number of bytes required by the FETCH command. */
++ *Bytes = 8;
++ }
++ }
++ else
++ {
++ if (Logical != gcvNULL)
++ {
++ gctUINT32_PTR buffer;
++
++ /* Cast the buffer pointer. */
++ buffer = (gctUINT32_PTR) Logical;
++
++ /* Append LINK. */
++ buffer[0]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x08 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (FetchCount) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ buffer[1]
++ = gcmkFIXADDRESS(FetchAddress);
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* Return number of bytes required by the LINK command. */
++ *Bytes = 8;
++ }
++ }
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckVGCOMMAND_CallCommand
++**
++** Append a CALL command at the specified location in the command buffer.
++**
++** INPUT:
++**
++** gckVGCOMMAND Command
++** Pointer to an gckVGCOMMAND object.
++**
++** gctPOINTER Logical
++** Pointer to the current location inside the command buffer to append
++** CALL command at or gcvNULL to query the size of the command.
++**
++** gctUINT32 FetchAddress
++** The address of another command buffer to be executed by this CALL
++** command. If 'Logical' is gcvNULL, this argument is ignored.
++**
++** gctUINT FetchCount
++** The number of 64-bit data quantities in another command buffer to
++** be executed by this CALL command. If 'Logical' is gcvNULL, this
++** argument is ignored.
++**
++** gctSIZE_T * Bytes
++** Pointer to the number of bytes available for the CALL command.
++** If 'Logical' is gcvNULL, the value from this argument is ignored.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Bytes
++** Pointer to a variable that will receive the number of bytes required
++** for the CALL command. If 'Bytes' is gcvNULL, nothing is returned.
++*/
++gceSTATUS
++gckVGCOMMAND_CallCommand(
++ IN gckVGCOMMAND Command,
++ IN gctPOINTER Logical,
++ IN gctUINT32 FetchAddress,
++ IN gctUINT FetchCount,
++ IN OUT gctSIZE_T * Bytes
++ )
++{
++ gcmkHEADER_ARG("Command=0x%x Logical=0x%x FetchAddress=0x%x FetchCount=0x%x Bytes = 0x%x",
++ Command, Logical, FetchAddress, FetchCount, Bytes);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++
++ if (Command->fe20)
++ {
++ if (Logical != gcvNULL)
++ {
++ gctUINT32_PTR buffer;
++
++ /* Cast the buffer pointer. */
++ buffer = (gctUINT32_PTR) Logical;
++
++ /* Append CALL. */
++ buffer[0]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:28) - (0 ? 31:28) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:28) - (0 ? 31:28) + 1))))))) << (0 ? 31:28))) | (((gctUINT32) (0x6 & ((gctUINT32) ((((1 ? 31:28) - (0 ? 31:28) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:28) - (0 ? 31:28) + 1))))))) << (0 ? 31:28)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 20:0) - (0 ? 20:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 20:0) - (0 ? 20:0) + 1))))))) << (0 ? 20:0))) | (((gctUINT32) ((gctUINT32) (FetchCount) & ((gctUINT32) ((((1 ? 20:0) - (0 ? 20:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 20:0) - (0 ? 20:0) + 1))))))) << (0 ? 20:0)));
++
++ buffer[1]
++ = gcmkFIXADDRESS(FetchAddress);
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* Return number of bytes required by the CALL command. */
++ *Bytes = 8;
++ }
++ }
++ else
++ {
++ gcmkFOOTER_NO();
++ return gcvSTATUS_NOT_SUPPORTED;
++ }
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckVGCOMMAND_ReturnCommand
++**
++** Append a RETURN command at the specified location in the command buffer.
++**
++** INPUT:
++**
++** gckVGCOMMAND Command
++** Pointer to an gckVGCOMMAND object.
++**
++** gctPOINTER Logical
++** Pointer to the current location inside the command buffer to append
++** RETURN command at or gcvNULL to query the size of the command.
++**
++** gctSIZE_T * Bytes
++** Pointer to the number of bytes available for the RETURN command.
++** If 'Logical' is gcvNULL, the value from this argument is ignored.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Bytes
++** Pointer to a variable that will receive the number of bytes required
++** for the RETURN command. If 'Bytes' is gcvNULL, nothing is returned.
++*/
++gceSTATUS
++gckVGCOMMAND_ReturnCommand(
++ IN gckVGCOMMAND Command,
++ IN gctPOINTER Logical,
++ IN OUT gctSIZE_T * Bytes
++ )
++{
++ gcmkHEADER_ARG("Command=0x%x Logical=0x%x Bytes = 0x%x",
++ Command, Logical, Bytes);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++
++ if (Command->fe20)
++ {
++ if (Logical != gcvNULL)
++ {
++ gctUINT32_PTR buffer;
++
++ /* Cast the buffer pointer. */
++ buffer = (gctUINT32_PTR) Logical;
++
++ /* Append RETURN. */
++ buffer[0]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:28) - (0 ? 31:28) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:28) - (0 ? 31:28) + 1))))))) << (0 ? 31:28))) | (((gctUINT32) (0x7 & ((gctUINT32) ((((1 ? 31:28) - (0 ? 31:28) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:28) - (0 ? 31:28) + 1))))))) << (0 ? 31:28)));
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* Return number of bytes required by the RETURN command. */
++ *Bytes = 8;
++ }
++ }
++ else
++ {
++ gcmkFOOTER_NO();
++ return gcvSTATUS_NOT_SUPPORTED;
++ }
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckVGCOMMAND_EventCommand
++**
++** Form an EVENT command at the specified location in the command buffer.
++**
++** INPUT:
++**
++** gckVGCOMMAND Command
++** Pointer to the Command object.
++**
++** gctPOINTER Logical
++** Pointer to the current location inside the command buffer to append
++** EVENT command at or gcvNULL to query the size of the command.
++**
++** gctINT32 InterruptId
++** The ID of the interrupt to generate.
++** If 'Logical' is gcvNULL, this argument is ignored.
++**
++** gceBLOCK Block
++** Block that will generate the interrupt.
++**
++** gctSIZE_T * Bytes
++** Pointer to the number of bytes available for the EVENT command.
++** If 'Logical' is gcvNULL, the value from this argument is ignored.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Bytes
++** Pointer to a variable that will receive the number of bytes required
++** for the END command. If 'Bytes' is gcvNULL, nothing is returned.
++*/
++gceSTATUS
++gckVGCOMMAND_EventCommand(
++ IN gckVGCOMMAND Command,
++ IN gctPOINTER Logical,
++ IN gceBLOCK Block,
++ IN gctINT32 InterruptId,
++ IN OUT gctSIZE_T * Bytes
++ )
++{
++ gcmkHEADER_ARG("Command=0x%x Logical=0x%x Block=0x%x InterruptId=0x%x Bytes = 0x%x",
++ Command, Logical, Block, InterruptId, Bytes);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++
++ if (Command->fe20)
++ {
++ typedef struct _gcsEVENTSTATES
++ {
++ /* Chips before VG21 use these values. */
++ gctUINT eventFromFE;
++ gctUINT eventFromPE;
++
++ /* VG21 chips and later use SOURCE field. */
++ gctUINT eventSource;
++ }
++ gcsEVENTSTATES;
++
++ static gcsEVENTSTATES states[] =
++ {
++ /* gcvBLOCK_COMMAND */
++ {
++ (gctUINT)~0,
++ (gctUINT)~0,
++ (gctUINT)~0
++ },
++
++ /* gcvBLOCK_TESSELLATOR */
++ {
++ 0x0,
++ 0x1,
++ 0x10
++ },
++
++ /* gcvBLOCK_TESSELLATOR2 */
++ {
++ 0x0,
++ 0x1,
++ 0x12
++ },
++
++ /* gcvBLOCK_TESSELLATOR3 */
++ {
++ 0x0,
++ 0x1,
++ 0x14
++ },
++
++ /* gcvBLOCK_RASTER */
++ {
++ 0x0,
++ 0x1,
++ 0x07,
++ },
++
++ /* gcvBLOCK_VG */
++ {
++ 0x0,
++ 0x1,
++ 0x0F
++ },
++
++ /* gcvBLOCK_VG2 */
++ {
++ 0x0,
++ 0x1,
++ 0x11
++ },
++
++ /* gcvBLOCK_VG3 */
++ {
++ 0x0,
++ 0x1,
++ 0x13
++ },
++
++ /* gcvBLOCK_PIXEL */
++ {
++ 0x0,
++ 0x1,
++ 0x07
++ },
++ };
++
++ /* Verify block ID. */
++ gcmkVERIFY_ARGUMENT(gcmIS_VALID_INDEX(Block, states));
++
++ if (Logical != gcvNULL)
++ {
++ gctUINT32_PTR buffer;
++
++ /* Verify the event ID. */
++ gcmkVERIFY_ARGUMENT(InterruptId >= 0);
++ gcmkVERIFY_ARGUMENT(InterruptId <= ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))));
++
++ /* Cast the buffer pointer. */
++ buffer = (gctUINT32_PTR) Logical;
++
++ /* Append EVENT. */
++ buffer[0]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:28) - (0 ? 31:28) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:28) - (0 ? 31:28) + 1))))))) << (0 ? 31:28))) | (((gctUINT32) (0x3 & ((gctUINT32) ((((1 ? 31:28) - (0 ? 31:28) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:28) - (0 ? 31:28) + 1))))))) << (0 ? 31:28)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 11:0) - (0 ? 11:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 11:0) - (0 ? 11:0) + 1))))))) << (0 ? 11:0))) | (((gctUINT32) ((gctUINT32) (0x0E01) & ((gctUINT32) ((((1 ? 11:0) - (0 ? 11:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 11:0) - (0 ? 11:0) + 1))))))) << (0 ? 11:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 27:16) - (0 ? 27:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 27:16) - (0 ? 27:16) + 1))))))) << (0 ? 27:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 27:16) - (0 ? 27:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 27:16) - (0 ? 27:16) + 1))))))) << (0 ? 27:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 13:12) - (0 ? 13:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 13:12) - (0 ? 13:12) + 1))))))) << (0 ? 13:12))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? 13:12) - (0 ? 13:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 13:12) - (0 ? 13:12) + 1))))))) << (0 ? 13:12)));
++
++ /* Determine chip version. */
++ if (Command->vg21)
++ {
++ /* Get the event source for the block. */
++ gctUINT eventSource = states[Block].eventSource;
++
++ /* Supported? */
++ if (eventSource == ~0)
++ {
++ gcmkFOOTER_NO();
++ return gcvSTATUS_NOT_SUPPORTED;
++ }
++
++ buffer[1]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) ((gctUINT32) (InterruptId) & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) ((gctUINT32) (eventSource) & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));
++ }
++ else
++ {
++ /* Get the event source for the block. */
++ gctUINT eventFromFE = states[Block].eventFromFE;
++ gctUINT eventFromPE = states[Block].eventFromPE;
++
++ /* Supported? */
++ if (eventFromFE == ~0)
++ {
++ gcmkFOOTER_NO();
++ return gcvSTATUS_NOT_SUPPORTED;
++ }
++
++ buffer[1]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) ((gctUINT32) (InterruptId) & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5))) | (((gctUINT32) ((gctUINT32) (eventFromFE) & ((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6))) | (((gctUINT32) ((gctUINT32) (eventFromPE) & ((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6)));
++ }
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* Make sure the events are directly supported for the block. */
++ if (states[Block].eventSource == ~0)
++ {
++ gcmkFOOTER_NO();
++ return gcvSTATUS_NOT_SUPPORTED;
++ }
++
++ /* Return number of bytes required by the END command. */
++ *Bytes = 8;
++ }
++ }
++ else
++ {
++ if (Logical != gcvNULL)
++ {
++ gctUINT32_PTR buffer;
++
++ /* Verify the event ID. */
++ gcmkVERIFY_ARGUMENT(InterruptId >= 0);
++ gcmkVERIFY_ARGUMENT(InterruptId <= ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))));
++
++ /* Cast the buffer pointer. */
++ buffer = (gctUINT32_PTR) Logical;
++
++ /* Append EVENT. */
++ buffer[0]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E01) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ /* Determine event source. */
++ if (Block == gcvBLOCK_COMMAND)
++ {
++ buffer[1]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) ((gctUINT32) (InterruptId) & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5)));
++ }
++ else
++ {
++ buffer[1]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) ((gctUINT32) (InterruptId) & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6)));
++ }
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* Return number of bytes required by the EVENT and END commands. */
++ *Bytes = 8;
++ }
++ }
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckVGCOMMAND_EndCommand
++**
++** Form an END command at the specified location in the command buffer.
++**
++** INPUT:
++**
++** gckVGCOMMAND Command
++** Pointer to the Command object.
++**
++** gctPOINTER Logical
++** Pointer to the current location inside the command buffer to append
++** END command at or gcvNULL to query the size of the command.
++**
++** gctINT32 InterruptId
++** The ID of the interrupt to generate.
++** If 'Logical' is gcvNULL, this argument will be ignored.
++**
++** gctSIZE_T * Bytes
++** Pointer to the number of bytes available for the END command.
++** If 'Logical' is gcvNULL, the value from this argument is ignored.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Bytes
++** Pointer to a variable that will receive the number of bytes required
++** for the END command. If 'Bytes' is gcvNULL, nothing is returned.
++*/
++gceSTATUS
++gckVGCOMMAND_EndCommand(
++ IN gckVGCOMMAND Command,
++ IN gctPOINTER Logical,
++ IN gctINT32 InterruptId,
++ IN OUT gctSIZE_T * Bytes
++ )
++{
++ gcmkHEADER_ARG("Command=0x%x Logical=0x%x InterruptId=0x%x Bytes = 0x%x",
++ Command, Logical, InterruptId, Bytes);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++
++ if (Command->fe20)
++ {
++ if (Logical != gcvNULL)
++ {
++ gctUINT32_PTR buffer;
++
++ /* Verify the event ID. */
++ gcmkVERIFY_ARGUMENT(InterruptId >= 0);
++
++ /* Cast the buffer pointer. */
++ buffer = (gctUINT32_PTR) Logical;
++
++ /* Append END. */
++ buffer[0]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:28) - (0 ? 31:28) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:28) - (0 ? 31:28) + 1))))))) << (0 ? 31:28))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? 31:28) - (0 ? 31:28) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:28) - (0 ? 31:28) + 1))))))) << (0 ? 31:28)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) ((gctUINT32) (InterruptId) & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)));
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* Return number of bytes required by the END command. */
++ *Bytes = 8;
++ }
++ }
++ else
++ {
++ if (Logical != gcvNULL)
++ {
++ gctUINT32_PTR memory;
++
++ /* Verify the event ID. */
++ gcmkVERIFY_ARGUMENT(InterruptId >= 0);
++
++ /* Cast the buffer pointer. */
++ memory = (gctUINT32_PTR) Logical;
++
++ /* Append EVENT. */
++ memory[0]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E01) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ memory[1]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) ((gctUINT32) (InterruptId) & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6)));
++
++ /* Append END. */
++ memory[2]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x02 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)));
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* Return number of bytes required by the EVENT and END commands. */
++ *Bytes = 16;
++ }
++ }
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++#endif /* gcdENABLE_VG */
++
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v4/arch/GC350/hal/kernel/gc_hal_kernel_hardware_command_vg.h linux-openelec/drivers/mxc/gpu-viv/v4/arch/GC350/hal/kernel/gc_hal_kernel_hardware_command_vg.h
+--- linux-3.14.36/drivers/mxc/gpu-viv/v4/arch/GC350/hal/kernel/gc_hal_kernel_hardware_command_vg.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v4/arch/GC350/hal/kernel/gc_hal_kernel_hardware_command_vg.h 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,319 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_kernel_hardware_command_vg_h_
++#define __gc_hal_kernel_hardware_command_vg_h_
++
++/******************************************************************************\
++******************* Task and Interrupt Management Structures. ******************
++\******************************************************************************/
++
++/* Task storage header. */
++typedef struct _gcsTASK_STORAGE * gcsTASK_STORAGE_PTR;
++typedef struct _gcsTASK_STORAGE
++{
++ /* Next allocated storage buffer. */
++ gcsTASK_STORAGE_PTR next;
++}
++gcsTASK_STORAGE;
++
++/* Task container header. */
++typedef struct _gcsTASK_CONTAINER * gcsTASK_CONTAINER_PTR;
++typedef struct _gcsTASK_CONTAINER
++{
++ /* The number of tasks left to be processed in the container. */
++ gctINT referenceCount;
++
++ /* Size of the buffer. */
++ gctUINT size;
++
++ /* Link to the previous and the next allocated containers. */
++ gcsTASK_CONTAINER_PTR allocPrev;
++ gcsTASK_CONTAINER_PTR allocNext;
++
++ /* Link to the previous and the next containers in the free list. */
++ gcsTASK_CONTAINER_PTR freePrev;
++ gcsTASK_CONTAINER_PTR freeNext;
++}
++gcsTASK_CONTAINER;
++
++/* Kernel space task master table entry. */
++typedef struct _gcsBLOCK_TASK_ENTRY * gcsBLOCK_TASK_ENTRY_PTR;
++typedef struct _gcsBLOCK_TASK_ENTRY
++{
++ /* Pointer to the current task container for the block. */
++ gcsTASK_CONTAINER_PTR container;
++
++ /* Pointer to the current task data within the container. */
++ gcsTASK_HEADER_PTR task;
++
++ /* Pointer to the last link task within the container. */
++ gcsTASK_LINK_PTR link;
++
++ /* Number of interrupts allocated for this block. */
++ gctUINT interruptCount;
++
++ /* The index of the current interrupt. */
++ gctUINT interruptIndex;
++
++ /* Interrupt semaphore. */
++ gctSEMAPHORE interruptSemaphore;
++
++ /* Interrupt value array. */
++ gctINT32 interruptArray[32];
++}
++gcsBLOCK_TASK_ENTRY;
++
++
++/******************************************************************************\
++********************* Command Queue Management Structures. *********************
++\******************************************************************************/
++
++/* Command queue kernel element pointer. */
++typedef struct _gcsKERNEL_CMDQUEUE * gcsKERNEL_CMDQUEUE_PTR;
++
++/* Command queue object handler function type. */
++typedef gceSTATUS (* gctOBJECT_HANDLER) (
++ gckVGKERNEL Kernel,
++ gcsKERNEL_CMDQUEUE_PTR Entry
++ );
++
++/* Command queue kernel element. */
++typedef struct _gcsKERNEL_CMDQUEUE
++{
++ /* The number of buffers in the queue. */
++ gcsCMDBUFFER_PTR commandBuffer;
++
++ /* Pointer to the object handler function. */
++ gctOBJECT_HANDLER handler;
++}
++gcsKERNEL_CMDQUEUE;
++
++/* Command queue header. */
++typedef struct _gcsKERNEL_QUEUE_HEADER * gcsKERNEL_QUEUE_HEADER_PTR;
++typedef struct _gcsKERNEL_QUEUE_HEADER
++{
++ /* The size of the buffer in bytes. */
++ gctUINT size;
++
++ /* The number of pending entries to be processed. */
++ volatile gctUINT pending;
++
++ /* The current command queue entry. */
++ gcsKERNEL_CMDQUEUE_PTR currentEntry;
++
++ /* Next buffer. */
++ gcsKERNEL_QUEUE_HEADER_PTR next;
++}
++gcsKERNEL_QUEUE_HEADER;
++
++
++/******************************************************************************\
++******************************* gckVGCOMMAND Object *******************************
++\******************************************************************************/
++
++/* gckVGCOMMAND object. */
++struct _gckVGCOMMAND
++{
++ /***************************************************************************
++ ** Object data and pointers.
++ */
++
++ gcsOBJECT object;
++ gckVGKERNEL kernel;
++ gckOS os;
++ gckVGHARDWARE hardware;
++
++ /* Features. */
++ gctBOOL fe20;
++ gctBOOL vg20;
++ gctBOOL vg21;
++
++
++ /***************************************************************************
++ ** Enable command queue dumping.
++ */
++
++ gctBOOL enableDumping;
++
++
++ /***************************************************************************
++ ** Bus Error interrupt.
++ */
++
++ gctINT32 busErrorInt;
++
++
++ /***************************************************************************
++ ** Command buffer information.
++ */
++
++ gcsCOMMAND_BUFFER_INFO info;
++
++
++ /***************************************************************************
++ ** Synchronization objects.
++ */
++
++ gctPOINTER queueMutex;
++ gctPOINTER taskMutex;
++ gctPOINTER commitMutex;
++
++
++ /***************************************************************************
++ ** Task management.
++ */
++
++ /* The head of the storage buffer linked list. */
++ gcsTASK_STORAGE_PTR taskStorage;
++
++ /* Allocation size. */
++ gctUINT taskStorageGranularity;
++ gctUINT taskStorageUsable;
++
++ /* The free container list. */
++ gcsTASK_CONTAINER_PTR taskFreeHead;
++ gcsTASK_CONTAINER_PTR taskFreeTail;
++
++ /* Task table */
++ gcsBLOCK_TASK_ENTRY taskTable[gcvBLOCK_COUNT];
++
++
++ /***************************************************************************
++ ** Command queue.
++ */
++
++ /* Pointer to the allocated queue memory. */
++ gcsKERNEL_QUEUE_HEADER_PTR queue;
++
++ /* Pointer to the current available queue from which new queue entries
++ will be allocated. */
++ gcsKERNEL_QUEUE_HEADER_PTR queueHead;
++
++ /* If different from queueHead, points to the command queue which is
++ currently being executed by the hardware. */
++ gcsKERNEL_QUEUE_HEADER_PTR queueTail;
++
++ /* Points to the queue to merge the tail with when the tail is processed. */
++ gcsKERNEL_QUEUE_HEADER_PTR mergeQueue;
++
++ /* Queue overflow counter. */
++ gctUINT queueOverflow;
++
++
++ /***************************************************************************
++ ** Context.
++ */
++
++ /* Context counter used for unique ID. */
++ gctUINT64 contextCounter;
++
++ /* Current context ID. */
++ gctUINT64 currentContext;
++
++ /* Command queue power semaphore. */
++ gctPOINTER powerSemaphore;
++ gctINT32 powerStallInt;
++ gcsCMDBUFFER_PTR powerStallBuffer;
++ gctSIGNAL powerStallSignal;
++
++};
++
++/******************************************************************************\
++************************ gckVGCOMMAND Object Internal API. ***********************
++\******************************************************************************/
++
++/* Initialize architecture dependent command buffer information. */
++gceSTATUS
++gckVGCOMMAND_InitializeInfo(
++ IN gckVGCOMMAND Command
++ );
++
++/* Form a STATE command at the specified location in the command buffer. */
++gceSTATUS
++gckVGCOMMAND_StateCommand(
++ IN gckVGCOMMAND Command,
++ IN gctUINT32 Pipe,
++ IN gctPOINTER Logical,
++ IN gctUINT32 Address,
++ IN gctSIZE_T Count,
++ IN OUT gctSIZE_T * Bytes
++ );
++
++/* Form a RESTART command at the specified location in the command buffer. */
++gceSTATUS
++gckVGCOMMAND_RestartCommand(
++ IN gckVGCOMMAND Command,
++ IN gctPOINTER Logical,
++ IN gctUINT32 FetchAddress,
++ IN gctUINT FetchCount,
++ IN OUT gctSIZE_T * Bytes
++ );
++
++/* Form a FETCH command at the specified location in the command buffer. */
++gceSTATUS
++gckVGCOMMAND_FetchCommand(
++ IN gckVGCOMMAND Command,
++ IN gctPOINTER Logical,
++ IN gctUINT32 FetchAddress,
++ IN gctUINT FetchCount,
++ IN OUT gctSIZE_T * Bytes
++ );
++
++/* Form a CALL command at the specified location in the command buffer. */
++gceSTATUS
++gckVGCOMMAND_CallCommand(
++ IN gckVGCOMMAND Command,
++ IN gctPOINTER Logical,
++ IN gctUINT32 FetchAddress,
++ IN gctUINT FetchCount,
++ IN OUT gctSIZE_T * Bytes
++ );
++
++/* Form a RETURN command at the specified location in the command buffer. */
++gceSTATUS
++gckVGCOMMAND_ReturnCommand(
++ IN gckVGCOMMAND Command,
++ IN gctPOINTER Logical,
++ IN OUT gctSIZE_T * Bytes
++ );
++
++/* Form an EVENT command at the specified location in the command buffer. */
++gceSTATUS
++gckVGCOMMAND_EventCommand(
++ IN gckVGCOMMAND Command,
++ IN gctPOINTER Logical,
++ IN gceBLOCK Block,
++ IN gctINT32 InterruptId,
++ IN OUT gctSIZE_T * Bytes
++ );
++
++/* Form an END command at the specified location in the command buffer. */
++gceSTATUS
++gckVGCOMMAND_EndCommand(
++ IN gckVGCOMMAND Command,
++ IN gctPOINTER Logical,
++ IN gctINT32 InterruptId,
++ IN OUT gctSIZE_T * Bytes
++ );
++
++#endif /* __gc_hal_kernel_hardware_command_h_ */
++
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v4/arch/GC350/hal/kernel/gc_hal_kernel_hardware_vg.c linux-openelec/drivers/mxc/gpu-viv/v4/arch/GC350/hal/kernel/gc_hal_kernel_hardware_vg.c
+--- linux-3.14.36/drivers/mxc/gpu-viv/v4/arch/GC350/hal/kernel/gc_hal_kernel_hardware_vg.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v4/arch/GC350/hal/kernel/gc_hal_kernel_hardware_vg.c 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,2114 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal.h"
++#include "gc_hal_kernel.h"
++#include "gc_hal_kernel_hardware_command_vg.h"
++
++#if gcdENABLE_VG
++
++#define _GC_OBJ_ZONE gcvZONE_HARDWARE
++
++typedef enum
++{
++ gcvPOWER_FLAG_INITIALIZE = 1 << 0,
++ gcvPOWER_FLAG_STALL = 1 << 1,
++ gcvPOWER_FLAG_STOP = 1 << 2,
++ gcvPOWER_FLAG_START = 1 << 3,
++ gcvPOWER_FLAG_RELEASE = 1 << 4,
++ gcvPOWER_FLAG_DELAY = 1 << 5,
++ gcvPOWER_FLAG_SAVE = 1 << 6,
++ gcvPOWER_FLAG_ACQUIRE = 1 << 7,
++ gcvPOWER_FLAG_POWER_OFF = 1 << 8,
++ gcvPOWER_FLAG_CLOCK_OFF = 1 << 9,
++ gcvPOWER_FLAG_CLOCK_ON = 1 << 10,
++ gcvPOWER_FLAG_NOP = 1 << 11,
++}
++gcePOWER_FLAGS;
++
++/******************************************************************************\
++********************************* Support Code *********************************
++\******************************************************************************/
++static gceSTATUS
++_ResetGPU(
++ IN gckOS Os
++ )
++{
++ gctUINT32 control, idle;
++ gceSTATUS status;
++
++ /* Read register. */
++ gcmkONERROR(gckOS_ReadRegisterEx(Os,
++ gcvCORE_VG,
++ 0x00000,
++ &control));
++
++ for (;;)
++ {
++ /* Disable clock gating. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Os,
++ gcvCORE_VG,
++ 0x00104,
++ 0x00000000));
++
++ /* Wait for clock being stable. */
++ gcmkONERROR(gckOS_Delay(Os, 1));
++
++ /* Isolate the GPU. */
++ control = ((((gctUINT32) (control)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 19:19) - (0 ? 19:19) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 19:19) - (0 ? 19:19) + 1))))))) << (0 ? 19:19))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 19:19) - (0 ? 19:19) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 19:19) - (0 ? 19:19) + 1))))))) << (0 ? 19:19)));
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Os,
++ gcvCORE_VG,
++ 0x00000,
++ control));
++
++ /* Set soft reset. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Os,
++ gcvCORE_VG,
++ 0x00000,
++ ((((gctUINT32) (control)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:12) - (0 ? 12:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:12) - (0 ? 12:12) + 1))))))) << (0 ? 12:12))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 12:12) - (0 ? 12:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:12) - (0 ? 12:12) + 1))))))) << (0 ? 12:12)))));
++
++ /* Wait for reset. */
++ gcmkONERROR(gckOS_Delay(Os, 1));
++
++ /* Reset soft reset bit. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Os,
++ gcvCORE_VG,
++ 0x00000,
++ ((((gctUINT32) (control)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:12) - (0 ? 12:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:12) - (0 ? 12:12) + 1))))))) << (0 ? 12:12))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 12:12) - (0 ? 12:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:12) - (0 ? 12:12) + 1))))))) << (0 ? 12:12)))));
++
++ /* Reset GPU isolation. */
++ control = ((((gctUINT32) (control)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 19:19) - (0 ? 19:19) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 19:19) - (0 ? 19:19) + 1))))))) << (0 ? 19:19))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 19:19) - (0 ? 19:19) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 19:19) - (0 ? 19:19) + 1))))))) << (0 ? 19:19)));
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Os,
++ gcvCORE_VG,
++ 0x00000,
++ control));
++
++ /* Read idle register. */
++ gcmkONERROR(gckOS_ReadRegisterEx(Os,
++ gcvCORE_VG,
++ 0x00004,
++ &idle));
++
++ if ((((((gctUINT32) (idle)) >> (0 ? 0:0)) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1)))))) ) == 0)
++ {
++ continue;
++ }
++
++ /* Read reset register. */
++ gcmkONERROR(gckOS_ReadRegisterEx(Os,
++ gcvCORE_VG,
++ 0x00000,
++ &control));
++
++ if (((((((gctUINT32) (control)) >> (0 ? 16:16)) & ((gctUINT32) ((((1 ? 16:16) - (0 ? 16:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 16:16) - (0 ? 16:16) + 1)))))) ) == 0)
++ || ((((((gctUINT32) (control)) >> (0 ? 17:17)) & ((gctUINT32) ((((1 ? 17:17) - (0 ? 17:17) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 17:17) - (0 ? 17:17) + 1)))))) ) == 0)
++ )
++ {
++ continue;
++ }
++
++ /* GPU is idle. */
++ break;
++ }
++
++ /* Success. */
++ return gcvSTATUS_OK;
++
++OnError:
++
++ /* Return the error. */
++ return status;
++}
++
++
++static gceSTATUS
++_IdentifyHardware(
++ IN gckOS Os,
++ OUT gceCHIPMODEL * ChipModel,
++ OUT gctUINT32 * ChipRevision,
++ OUT gctUINT32 * ChipFeatures,
++ OUT gctUINT32 * ChipMinorFeatures,
++ OUT gctUINT32 * ChipMinorFeatures2
++ )
++{
++ gceSTATUS status;
++ gctUINT32 chipIdentity;
++
++ do
++ {
++ /* Read chip identity register. */
++ gcmkERR_BREAK(gckOS_ReadRegisterEx(Os, gcvCORE_VG, 0x00018, &chipIdentity));
++
++ /* Special case for older graphic cores. */
++ if (((((gctUINT32) (chipIdentity)) >> (0 ? 31:24) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1)))))) == (0x01 & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))))
++ {
++ *ChipModel = gcv500;
++ *ChipRevision = (((((gctUINT32) (chipIdentity)) >> (0 ? 15:12)) & ((gctUINT32) ((((1 ? 15:12) - (0 ? 15:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:12) - (0 ? 15:12) + 1)))))) );
++ }
++
++ else
++ {
++ /* Read chip identity register. */
++ gcmkERR_BREAK(gckOS_ReadRegisterEx(Os, gcvCORE_VG,
++ 0x00020,
++ (gctUINT32 *) ChipModel));
++
++ /* Read CHIP_REV register. */
++ gcmkERR_BREAK(gckOS_ReadRegisterEx(Os, gcvCORE_VG,
++ 0x00024,
++ ChipRevision));
++ }
++
++ /* Read chip feature register. */
++ gcmkERR_BREAK(gckOS_ReadRegisterEx(
++ Os, gcvCORE_VG, 0x0001C, ChipFeatures
++ ));
++
++ /* Read chip minor feature register. */
++ gcmkERR_BREAK(gckOS_ReadRegisterEx(
++ Os, gcvCORE_VG, 0x00034, ChipMinorFeatures
++ ));
++
++ /* Read chip minor feature register #2. */
++ gcmkERR_BREAK(gckOS_ReadRegisterEx(
++ Os, gcvCORE_VG, 0x00074, ChipMinorFeatures2
++ ));
++
++ gcmkTRACE(
++ gcvLEVEL_VERBOSE,
++ "ChipModel=0x%08X\n"
++ "ChipRevision=0x%08X\n"
++ "ChipFeatures=0x%08X\n"
++ "ChipMinorFeatures=0x%08X\n"
++ "ChipMinorFeatures2=0x%08X\n",
++ *ChipModel,
++ *ChipRevision,
++ *ChipFeatures,
++ *ChipMinorFeatures,
++ *ChipMinorFeatures2
++ );
++
++ /* Success. */
++ return gcvSTATUS_OK;
++ }
++ while (gcvFALSE);
++
++ /* Return the status. */
++ return status;
++}
++
++#if gcdPOWEROFF_TIMEOUT
++void
++_VGPowerTimerFunction(
++ gctPOINTER Data
++ )
++{
++ gckVGHARDWARE hardware = (gckVGHARDWARE)Data;
++ gcmkVERIFY_OK(
++ gckVGHARDWARE_SetPowerManagementState(hardware, gcvPOWER_OFF_TIMEOUT));
++}
++#endif
++
++/******************************************************************************\
++****************************** gckVGHARDWARE API code *****************************
++\******************************************************************************/
++
++/*******************************************************************************
++**
++** gckVGHARDWARE_Construct
++**
++** Construct a new gckVGHARDWARE object.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an initialized gckOS object.
++**
++** OUTPUT:
++**
++** gckVGHARDWARE * Hardware
++** Pointer to a variable that will hold the pointer to the gckVGHARDWARE
++** object.
++*/
++gceSTATUS
++gckVGHARDWARE_Construct(
++ IN gckOS Os,
++ OUT gckVGHARDWARE * Hardware
++ )
++{
++ gckVGHARDWARE hardware = gcvNULL;
++ gceSTATUS status;
++ gceCHIPMODEL chipModel;
++ gctUINT32 chipRevision;
++ gctUINT32 chipFeatures;
++ gctUINT32 chipMinorFeatures;
++ gctUINT32 chipMinorFeatures2;
++
++ gcmkHEADER_ARG("Os=0x%x Hardware=0x%x ", Os, Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Hardware != gcvNULL);
++
++ do
++ {
++ gcmkERR_BREAK(gckOS_SetGPUPower(Os, gcvCORE_VG, gcvTRUE, gcvTRUE));
++
++ status = _ResetGPU(Os);
++
++ if (status != gcvSTATUS_OK)
++ {
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "_ResetGPU failed: status=%d\n", status);
++ }
++
++ /* Identify the hardware. */
++ gcmkERR_BREAK(_IdentifyHardware(Os,
++ &chipModel, &chipRevision,
++ &chipFeatures, &chipMinorFeatures, &chipMinorFeatures2
++ ));
++
++ /* Allocate the gckVGHARDWARE object. */
++ gcmkERR_BREAK(gckOS_Allocate(Os,
++ gcmSIZEOF(struct _gckVGHARDWARE), (gctPOINTER *) &hardware
++ ));
++
++ /* Initialize the gckVGHARDWARE object. */
++ hardware->object.type = gcvOBJ_HARDWARE;
++ hardware->os = Os;
++
++ /* Set chip identity. */
++ hardware->chipModel = chipModel;
++ hardware->chipRevision = chipRevision;
++ hardware->chipFeatures = chipFeatures;
++ hardware->chipMinorFeatures = chipMinorFeatures;
++ hardware->chipMinorFeatures2 = chipMinorFeatures2;
++
++ hardware->powerMutex = gcvNULL;
++ hardware->chipPowerState = gcvPOWER_ON;
++ hardware->chipPowerStateGlobal = gcvPOWER_ON;
++ hardware->clockState = gcvTRUE;
++ hardware->powerState = gcvTRUE;
++
++#if gcdPOWEROFF_TIMEOUT
++ hardware->powerOffTime = 0;
++ hardware->powerOffTimeout = gcdPOWEROFF_TIMEOUT;
++
++ gcmkVERIFY_OK(gckOS_CreateTimer(Os,
++ _VGPowerTimerFunction,
++ (gctPOINTER)hardware,
++ &hardware->powerOffTimer));
++#endif
++
++ /* Determine whether FE 2.0 is present. */
++ hardware->fe20 = ((((gctUINT32) (hardware->chipFeatures)) >> (0 ? 28:28) & ((gctUINT32) ((((1 ? 28:28) - (0 ? 28:28) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 28:28) - (0 ? 28:28) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 28:28) - (0 ? 28:28) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 28:28) - (0 ? 28:28) + 1)))))));
++
++ /* Determine whether VG 2.0 is present. */
++ hardware->vg20 = ((((gctUINT32) (hardware->chipMinorFeatures)) >> (0 ? 13:13) & ((gctUINT32) ((((1 ? 13:13) - (0 ? 13:13) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 13:13) - (0 ? 13:13) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 13:13) - (0 ? 13:13) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 13:13) - (0 ? 13:13) + 1)))))));
++
++ /* Determine whether VG 2.1 is present. */
++ hardware->vg21 = ((((gctUINT32) (hardware->chipMinorFeatures)) >> (0 ? 18:18) & ((gctUINT32) ((((1 ? 18:18) - (0 ? 18:18) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 18:18) - (0 ? 18:18) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 18:18) - (0 ? 18:18) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 18:18) - (0 ? 18:18) + 1)))))));
++
++ /* Set default event mask. */
++ hardware->eventMask = 0xFFFFFFFF;
++
++ gcmkERR_BREAK(gckOS_AtomConstruct(Os, &hardware->pageTableDirty));
++
++ /* Set fast clear to auto. */
++ gcmkVERIFY_OK(gckVGHARDWARE_SetFastClear(hardware, -1));
++
++ gcmkERR_BREAK(gckOS_CreateMutex(Os, &hardware->powerMutex));
++
++ /* Enable power management by default. */
++ hardware->powerManagement = gcvTRUE;
++
++ /* Return pointer to the gckVGHARDWARE object. */
++ *Hardware = hardware;
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++ }
++ while (gcvFALSE);
++
++#if gcdPOWEROFF_TIMEOUT
++ if (hardware->powerOffTimer != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_StopTimer(Os, hardware->powerOffTimer));
++ gcmkVERIFY_OK(gckOS_DestroyTimer(Os, hardware->powerOffTimer));
++ }
++#endif
++
++ if (hardware->pageTableDirty != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_AtomDestroy(Os, hardware->pageTableDirty));
++ }
++
++ if (hardware != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_Free(Os, hardware));
++ }
++
++ gcmkVERIFY_OK(gckOS_SetGPUPower(Os, gcvCORE_VG, gcvFALSE, gcvFALSE));
++
++ gcmkFOOTER();
++ /* Return the status. */
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckVGHARDWARE_Destroy
++**
++** Destroy an gckVGHARDWARE object.
++**
++** INPUT:
++**
++** gckVGHARDWARE Hardware
++** Pointer to the gckVGHARDWARE object that needs to be destroyed.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckVGHARDWARE_Destroy(
++ IN gckVGHARDWARE Hardware
++ )
++{
++ gceSTATUS status;
++ gcmkHEADER_ARG("Hardware=0x%x ", Hardware);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ /* Mark the object as unknown. */
++ Hardware->object.type = gcvOBJ_UNKNOWN;
++
++ if (Hardware->powerMutex != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_DeleteMutex(
++ Hardware->os, Hardware->powerMutex));
++ }
++
++#if gcdPOWEROFF_TIMEOUT
++ gcmkVERIFY_OK(gckOS_StopTimer(Hardware->os, Hardware->powerOffTimer));
++ gcmkVERIFY_OK(gckOS_DestroyTimer(Hardware->os, Hardware->powerOffTimer));
++#endif
++
++ if (Hardware->pageTableDirty != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_AtomDestroy(Hardware->os, Hardware->pageTableDirty));
++ }
++
++ /* Free the object. */
++ status = gckOS_Free(Hardware->os, Hardware);
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckVGHARDWARE_QueryMemory
++**
++** Query the amount of memory available on the hardware.
++**
++** INPUT:
++**
++** gckVGHARDWARE Hardware
++** Pointer to the gckVGHARDWARE object.
++**
++** OUTPUT:
++**
++** gctSIZE_T * InternalSize
++** Pointer to a variable that will hold the size of the internal video
++** memory in bytes. If 'InternalSize' is gcvNULL, no information of the
++** internal memory will be returned.
++**
++** gctUINT32 * InternalBaseAddress
++** Pointer to a variable that will hold the hardware's base address for
++** the internal video memory. This pointer cannot be gcvNULL if
++** 'InternalSize' is also non-gcvNULL.
++**
++** gctUINT32 * InternalAlignment
++** Pointer to a variable that will hold the hardware's base address for
++** the internal video memory. This pointer cannot be gcvNULL if
++** 'InternalSize' is also non-gcvNULL.
++**
++** gctSIZE_T * ExternalSize
++** Pointer to a variable that will hold the size of the external video
++** memory in bytes. If 'ExternalSize' is gcvNULL, no information of the
++** external memory will be returned.
++**
++** gctUINT32 * ExternalBaseAddress
++** Pointer to a variable that will hold the hardware's base address for
++** the external video memory. This pointer cannot be gcvNULL if
++** 'ExternalSize' is also non-gcvNULL.
++**
++** gctUINT32 * ExternalAlignment
++** Pointer to a variable that will hold the hardware's base address for
++** the external video memory. This pointer cannot be gcvNULL if
++** 'ExternalSize' is also non-gcvNULL.
++**
++** gctUINT32 * HorizontalTileSize
++** Number of horizontal pixels per tile. If 'HorizontalTileSize' is
++** gcvNULL, no horizontal pixel per tile will be returned.
++**
++** gctUINT32 * VerticalTileSize
++** Number of vertical pixels per tile. If 'VerticalTileSize' is
++** gcvNULL, no vertical pixel per tile will be returned.
++*/
++gceSTATUS
++gckVGHARDWARE_QueryMemory(
++ IN gckVGHARDWARE Hardware,
++ OUT gctSIZE_T * InternalSize,
++ OUT gctUINT32 * InternalBaseAddress,
++ OUT gctUINT32 * InternalAlignment,
++ OUT gctSIZE_T * ExternalSize,
++ OUT gctUINT32 * ExternalBaseAddress,
++ OUT gctUINT32 * ExternalAlignment,
++ OUT gctUINT32 * HorizontalTileSize,
++ OUT gctUINT32 * VerticalTileSize
++ )
++{
++ gcmkHEADER_ARG("Hardware=0x%x InternalSize=0x%x InternalBaseAddress=0x%x InternalAlignment=0x%x"
++ "ExternalSize=0x%x ExternalBaseAddress=0x%x ExternalAlignment=0x%x HorizontalTileSize=0x%x VerticalTileSize=0x%x",
++ Hardware, InternalSize, InternalBaseAddress, InternalAlignment,
++ ExternalSize, ExternalBaseAddress, ExternalAlignment, HorizontalTileSize, VerticalTileSize);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ if (InternalSize != gcvNULL)
++ {
++ /* No internal memory. */
++ *InternalSize = 0;
++ }
++
++ if (ExternalSize != gcvNULL)
++ {
++ /* No external memory. */
++ *ExternalSize = 0;
++ }
++
++ if (HorizontalTileSize != gcvNULL)
++ {
++ /* 4x4 tiles. */
++ *HorizontalTileSize = 4;
++ }
++
++ if (VerticalTileSize != gcvNULL)
++ {
++ /* 4x4 tiles. */
++ *VerticalTileSize = 4;
++ }
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckVGHARDWARE_QueryChipIdentity
++**
++** Query the identity of the hardware.
++**
++** INPUT:
++**
++** gckVGHARDWARE Hardware
++** Pointer to the gckVGHARDWARE object.
++**
++** OUTPUT:
++**
++** gceCHIPMODEL * ChipModel
++** If 'ChipModel' is not gcvNULL, the variable it points to will
++** receive the model of the chip.
++**
++** gctUINT32 * ChipRevision
++** If 'ChipRevision' is not gcvNULL, the variable it points to will
++** receive the revision of the chip.
++**
++** gctUINT32 * ChipFeatures
++** If 'ChipFeatures' is not gcvNULL, the variable it points to will
++** receive the feature set of the chip.
++**
++** gctUINT32 * ChipMinorFeatures
++** If 'ChipMinorFeatures' is not gcvNULL, the variable it points to
++** will receive the minor feature set of the chip.
++**
++** gctUINT32 * ChipMinorFeatures2
++** If 'ChipMinorFeatures2' is not gcvNULL, the variable it points to
++** will receive the minor feature set of the chip.
++**
++*/
++gceSTATUS
++gckVGHARDWARE_QueryChipIdentity(
++ IN gckVGHARDWARE Hardware,
++ OUT gceCHIPMODEL * ChipModel,
++ OUT gctUINT32 * ChipRevision,
++ OUT gctUINT32* ChipFeatures,
++ OUT gctUINT32* ChipMinorFeatures,
++ OUT gctUINT32* ChipMinorFeatures2
++ )
++{
++ gcmkHEADER_ARG("Hardware=0x%x ChipModel=0x%x ChipRevision=0x%x ChipFeatures = 0x%x ChipMinorFeatures = 0x%x ChipMinorFeatures2 = 0x%x",
++ Hardware, ChipModel, ChipRevision, ChipFeatures, ChipMinorFeatures, ChipMinorFeatures2);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ /* Return chip model. */
++ if (ChipModel != gcvNULL)
++ {
++ *ChipModel = Hardware->chipModel;
++ }
++
++ /* Return revision number. */
++ if (ChipRevision != gcvNULL)
++ {
++ *ChipRevision = Hardware->chipRevision;
++ }
++
++ /* Return feature set. */
++ if (ChipFeatures != gcvNULL)
++ {
++ gctUINT32 features = Hardware->chipFeatures;
++
++ if ((((((gctUINT32) (features)) >> (0 ? 0:0)) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1)))))) ))
++ {
++ features = ((((gctUINT32) (features)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) ((gctUINT32) (Hardware->allowFastClear) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)));
++ }
++
++ /* Mark 2D pipe as available for GC500.0 since it did not have this *\
++ \* bit. */
++ if ((Hardware->chipModel == gcv500)
++ && (Hardware->chipRevision == 0)
++ )
++ {
++ features = ((((gctUINT32) (features)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9)));
++ }
++
++ /* Mark 2D pipe as available for GC300 since it did not have this *\
++ \* bit. */
++ if (Hardware->chipModel == gcv300)
++ {
++ features = ((((gctUINT32) (features)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9)));
++ }
++
++ *ChipFeatures = features;
++ }
++
++ /* Return minor feature set. */
++ if (ChipMinorFeatures != gcvNULL)
++ {
++ *ChipMinorFeatures = Hardware->chipMinorFeatures;
++ }
++
++ /* Return minor feature set #2. */
++ if (ChipMinorFeatures2 != gcvNULL)
++ {
++ *ChipMinorFeatures2 = Hardware->chipMinorFeatures2;
++ }
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckVGHARDWARE_ConvertFormat
++**
++** Convert an API format to hardware parameters.
++**
++** INPUT:
++**
++** gckVGHARDWARE Hardware
++** Pointer to the gckVGHARDWARE object.
++**
++** gceSURF_FORMAT Format
++** API format to convert.
++**
++** OUTPUT:
++**
++** gctUINT32 * BitsPerPixel
++** Pointer to a variable that will hold the number of bits per pixel.
++**
++** gctUINT32 * BytesPerTile
++** Pointer to a variable that will hold the number of bytes per tile.
++*/
++gceSTATUS
++gckVGHARDWARE_ConvertFormat(
++ IN gckVGHARDWARE Hardware,
++ IN gceSURF_FORMAT Format,
++ OUT gctUINT32 * BitsPerPixel,
++ OUT gctUINT32 * BytesPerTile
++ )
++{
++ gctUINT32 bitsPerPixel;
++ gctUINT32 bytesPerTile;
++
++ gcmkHEADER_ARG("Hardware=0x%x Format=0x%x BitsPerPixel=0x%x BytesPerTile = 0x%x",
++ Hardware, Format, BitsPerPixel, BytesPerTile);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ /* Dispatch on format. */
++ switch (Format)
++ {
++ case gcvSURF_A1:
++ case gcvSURF_L1:
++ /* 1-bpp format. */
++ bitsPerPixel = 1;
++ bytesPerTile = (1 * 4 * 4) / 8;
++ break;
++
++ case gcvSURF_A4:
++ /* 4-bpp format. */
++ bitsPerPixel = 4;
++ bytesPerTile = (4 * 4 * 4) / 8;
++ break;
++
++ case gcvSURF_INDEX8:
++ case gcvSURF_A8:
++ case gcvSURF_L8:
++ /* 8-bpp format. */
++ bitsPerPixel = 8;
++ bytesPerTile = (8 * 4 * 4) / 8;
++ break;
++
++ case gcvSURF_YV12:
++ /* 12-bpp planar YUV formats. */
++ bitsPerPixel = 12;
++ bytesPerTile = (12 * 4 * 4) / 8;
++ break;
++
++ case gcvSURF_NV12:
++ /* 12-bpp planar YUV formats. */
++ bitsPerPixel = 12;
++ bytesPerTile = (12 * 4 * 4) / 8;
++ break;
++
++ /* 4444 variations. */
++ case gcvSURF_X4R4G4B4:
++ case gcvSURF_A4R4G4B4:
++ case gcvSURF_R4G4B4X4:
++ case gcvSURF_R4G4B4A4:
++ case gcvSURF_B4G4R4X4:
++ case gcvSURF_B4G4R4A4:
++ case gcvSURF_X4B4G4R4:
++ case gcvSURF_A4B4G4R4:
++
++ /* 1555 variations. */
++ case gcvSURF_X1R5G5B5:
++ case gcvSURF_A1R5G5B5:
++ case gcvSURF_R5G5B5X1:
++ case gcvSURF_R5G5B5A1:
++ case gcvSURF_X1B5G5R5:
++ case gcvSURF_A1B5G5R5:
++ case gcvSURF_B5G5R5X1:
++ case gcvSURF_B5G5R5A1:
++
++ /* 565 variations. */
++ case gcvSURF_R5G6B5:
++ case gcvSURF_B5G6R5:
++
++ case gcvSURF_A8L8:
++ case gcvSURF_YUY2:
++ case gcvSURF_UYVY:
++ case gcvSURF_D16:
++ /* 16-bpp format. */
++ bitsPerPixel = 16;
++ bytesPerTile = (16 * 4 * 4) / 8;
++ break;
++
++ case gcvSURF_X8R8G8B8:
++ case gcvSURF_A8R8G8B8:
++ case gcvSURF_X8B8G8R8:
++ case gcvSURF_A8B8G8R8:
++ case gcvSURF_R8G8B8X8:
++ case gcvSURF_R8G8B8A8:
++ case gcvSURF_B8G8R8X8:
++ case gcvSURF_B8G8R8A8:
++ case gcvSURF_D32:
++ /* 32-bpp format. */
++ bitsPerPixel = 32;
++ bytesPerTile = (32 * 4 * 4) / 8;
++ break;
++
++ case gcvSURF_D24S8:
++ /* 24-bpp format. */
++ bitsPerPixel = 32;
++ bytesPerTile = (32 * 4 * 4) / 8;
++ break;
++
++ case gcvSURF_DXT1:
++ case gcvSURF_ETC1:
++ bitsPerPixel = 4;
++ bytesPerTile = (4 * 4 * 4) / 8;
++ break;
++
++ case gcvSURF_DXT2:
++ case gcvSURF_DXT3:
++ case gcvSURF_DXT4:
++ case gcvSURF_DXT5:
++ bitsPerPixel = 8;
++ bytesPerTile = (8 * 4 * 4) / 8;
++ break;
++
++ default:
++ /* Invalid format. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_INVALID_ARGUMENT;
++ }
++
++ /* Set the result. */
++ if (BitsPerPixel != gcvNULL)
++ {
++ * BitsPerPixel = bitsPerPixel;
++ }
++
++ if (BytesPerTile != gcvNULL)
++ {
++ * BytesPerTile = bytesPerTile;
++ }
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckVGHARDWARE_SplitMemory
++**
++** Split a hardware specific memory address into a pool and offset.
++**
++** INPUT:
++**
++** gckVGHARDWARE Hardware
++** Pointer to the gckVGHARDWARE object.
++**
++** gctUINT32 Address
++** Address in hardware specific format.
++**
++** OUTPUT:
++**
++** gcePOOL * Pool
++** Pointer to a variable that will hold the pool type for the address.
++**
++** gctUINT32 * Offset
++** Pointer to a variable that will hold the offset for the address.
++*/
++gceSTATUS
++gckVGHARDWARE_SplitMemory(
++ IN gckVGHARDWARE Hardware,
++ IN gctUINT32 Address,
++ OUT gcePOOL * Pool,
++ OUT gctUINT32 * Offset
++ )
++{
++ gcmkHEADER_ARG("Hardware=0x%x Address=0x%x Pool=0x%x Offset = 0x%x",
++ Hardware, Address, Pool, Offset);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(Pool != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Offset != gcvNULL);
++
++ /* Dispatch on memory type. */
++ switch ((((((gctUINT32) (Address)) >> (0 ? 1:0)) & ((gctUINT32) ((((1 ? 1:0) - (0 ? 1:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:0) - (0 ? 1:0) + 1)))))) ))
++ {
++ case 0x0:
++ /* System memory. */
++ *Pool = gcvPOOL_SYSTEM;
++ break;
++
++ case 0x2:
++ /* Virtual memory. */
++ *Pool = gcvPOOL_VIRTUAL;
++ break;
++
++ default:
++ /* Invalid memory type. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_INVALID_ARGUMENT;
++ }
++
++ /* Return offset of address. */
++ *Offset = ((((gctUINT32) (Address)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:0) - (0 ? 1:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:0) - (0 ? 1:0) + 1))))))) << (0 ? 1:0))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 1:0) - (0 ? 1:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:0) - (0 ? 1:0) + 1))))))) << (0 ? 1:0)));
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckVGHARDWARE_Execute
++**
++** Kickstart the hardware's command processor with an initialized command
++** buffer.
++**
++** INPUT:
++**
++** gckVGHARDWARE Hardware
++** Pointer to the gckVGHARDWARE object.
++**
++** gctUINT32 Address
++** Address of the command buffer.
++**
++** gctSIZE_T Count
++** Number of command-sized data units to be executed.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckVGHARDWARE_Execute(
++ IN gckVGHARDWARE Hardware,
++ IN gctUINT32 Address,
++ IN gctSIZE_T Count
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Hardware=0x%x Address=0x%x Count=0x%x",
++ Hardware, Address, Count);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ do
++ {
++ /* Enable all events. */
++ gcmkERR_BREAK(gckOS_WriteRegisterEx(
++ Hardware->os,
++ gcvCORE_VG,
++ 0x00014,
++ Hardware->eventMask
++ ));
++
++ if (Hardware->fe20)
++ {
++ /* Write address register. */
++ gcmkERR_BREAK(gckOS_WriteRegisterEx(
++ Hardware->os,
++ gcvCORE_VG,
++ 0x00500,
++ gcmkFIXADDRESS(Address)
++ ));
++
++ /* Write control register. */
++ gcmkERR_BREAK(gckOS_WriteRegisterEx(
++ Hardware->os,
++ gcvCORE_VG,
++ 0x00504,
++ Count
++ ));
++ }
++ else
++ {
++ /* Write address register. */
++ gcmkERR_BREAK(gckOS_WriteRegisterEx(
++ Hardware->os,
++ gcvCORE_VG,
++ 0x00654,
++ gcmkFIXADDRESS(Address)
++ ));
++
++ /* Write control register. */
++ gcmkERR_BREAK(gckOS_WriteRegisterEx(
++ Hardware->os,
++ gcvCORE_VG,
++ 0x00658,
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 16:16) - (0 ? 16:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 16:16) - (0 ? 16:16) + 1))))))) << (0 ? 16:16))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 16:16) - (0 ? 16:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 16:16) - (0 ? 16:16) + 1))))))) << (0 ? 16:16))) |
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (Count) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ ));
++ }
++
++ /* Success. */
++ gcmkFOOTER();
++ return gcvSTATUS_OK;
++ }
++ while (gcvFALSE);
++
++
++ gcmkFOOTER();
++ /* Return the status. */
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckVGHARDWARE_AlignToTile
++**
++** Align the specified width and height to tile boundaries.
++**
++** INPUT:
++**
++** gckVGHARDWARE Hardware
++** Pointer to an gckVGHARDWARE object.
++**
++** gceSURF_TYPE Type
++** Type of alignment.
++**
++** gctUINT32 * Width
++** Pointer to the width to be aligned. If 'Width' is gcvNULL, no width
++** will be aligned.
++**
++** gctUINT32 * Height
++** Pointer to the height to be aligned. If 'Height' is gcvNULL, no height
++** will be aligned.
++**
++** OUTPUT:
++**
++** gctUINT32 * Width
++** Pointer to a variable that will receive the aligned width.
++**
++** gctUINT32 * Height
++** Pointer to a variable that will receive the aligned height.
++*/
++gceSTATUS
++gckVGHARDWARE_AlignToTile(
++ IN gckVGHARDWARE Hardware,
++ IN gceSURF_TYPE Type,
++ IN OUT gctUINT32 * Width,
++ IN OUT gctUINT32 * Height
++ )
++{
++ gcmkHEADER_ARG("Hardware=0x%x Type=0x%x Width=0x%x Height=0x%x",
++ Hardware, Type, Width, Height);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ if (Width != gcvNULL)
++ {
++ /* Align the width. */
++ *Width = gcmALIGN(*Width, (Type == gcvSURF_TEXTURE) ? 4 : 16);
++ }
++
++ if (Height != gcvNULL)
++ {
++ /* Special case for VG images. */
++ if ((*Height == 0) && (Type == gcvSURF_IMAGE))
++ {
++ *Height = 4;
++ }
++ else
++ {
++ /* Align the height. */
++ *Height = gcmALIGN(*Height, 4);
++ }
++ }
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckVGHARDWARE_ConvertLogical
++**
++** Convert a logical system address into a hardware specific address.
++**
++** INPUT:
++**
++** gckVGHARDWARE Hardware
++** Pointer to an gckVGHARDWARE object.
++**
++** gctPOINTER Logical
++** Logical address to convert.
++**
++** gctUINT32* Address
++** Return hardware specific address.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckVGHARDWARE_ConvertLogical(
++ IN gckVGHARDWARE Hardware,
++ IN gctPOINTER Logical,
++ OUT gctUINT32 * Address
++ )
++{
++ gctUINT32 address;
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Hardware=0x%x Logical=0x%x Address=0x%x",
++ Hardware, Logical, Address);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Address != gcvNULL);
++
++ do
++ {
++ /* Convert logical address into a physical address. */
++ gcmkERR_BREAK(gckOS_GetPhysicalAddress(
++ Hardware->os, Logical, &address
++ ));
++
++ /* Return hardware specific address. */
++ *Address = ((((gctUINT32) (address)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:0) - (0 ? 1:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:0) - (0 ? 1:0) + 1))))))) << (0 ? 1:0))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? 1:0) - (0 ? 1:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:0) - (0 ? 1:0) + 1))))))) << (0 ? 1:0)));
++
++ /* Success. */
++ gcmkFOOTER();
++ return gcvSTATUS_OK;
++ }
++ while (gcvFALSE);
++
++ gcmkFOOTER();
++ /* Return the status. */
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckVGHARDWARE_QuerySystemMemory
++**
++** Query the command buffer alignment and number of reserved bytes.
++**
++** INPUT:
++**
++** gckVGHARDWARE Harwdare
++** Pointer to an gckVGHARDWARE object.
++**
++** OUTPUT:
++**
++** gctSIZE_T * SystemSize
++** Pointer to a variable that receives the maximum size of the system
++** memory.
++**
++** gctUINT32 * SystemBaseAddress
++** Poinetr to a variable that receives the base address for system
++** memory.
++*/
++gceSTATUS gckVGHARDWARE_QuerySystemMemory(
++ IN gckVGHARDWARE Hardware,
++ OUT gctSIZE_T * SystemSize,
++ OUT gctUINT32 * SystemBaseAddress
++ )
++{
++ gcmkHEADER_ARG("Hardware=0x%x SystemSize=0x%x SystemBaseAddress=0x%x",
++ Hardware, SystemSize, SystemBaseAddress);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ if (SystemSize != gcvNULL)
++ {
++ /* Maximum system memory can be 2GB. */
++ *SystemSize = (gctSIZE_T)(1 << 31);
++ }
++
++ if (SystemBaseAddress != gcvNULL)
++ {
++ /* Set system memory base address. */
++ *SystemBaseAddress = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:0) - (0 ? 1:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:0) - (0 ? 1:0) + 1))))))) << (0 ? 1:0))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? 1:0) - (0 ? 1:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:0) - (0 ? 1:0) + 1))))))) << (0 ? 1:0)));
++ }
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckVGHARDWARE_SetMMU
++**
++** Set the page table base address.
++**
++** INPUT:
++**
++** gckVGHARDWARE Harwdare
++** Pointer to an gckVGHARDWARE object.
++**
++** gctPOINTER Logical
++** Logical address of the page table.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS gckVGHARDWARE_SetMMU(
++ IN gckVGHARDWARE Hardware,
++ IN gctPOINTER Logical
++ )
++{
++ gceSTATUS status;
++ gctUINT32 address = 0;
++
++ gcmkHEADER_ARG("Hardware=0x%x Logical=0x%x",
++ Hardware, Logical);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++
++ do
++ {
++ /* Convert the logical address into an hardware address. */
++ gcmkERR_BREAK(gckVGHARDWARE_ConvertLogical(Hardware, Logical, &address) );
++
++ /* Write the AQMemoryFePageTable register. */
++ gcmkERR_BREAK(gckOS_WriteRegisterEx(Hardware->os, gcvCORE_VG,
++ 0x00400,
++ gcmkFIXADDRESS(address)) );
++
++ /* Write the AQMemoryTxPageTable register. */
++ gcmkERR_BREAK(gckOS_WriteRegisterEx(Hardware->os, gcvCORE_VG,
++ 0x00404,
++ gcmkFIXADDRESS(address)) );
++
++ /* Write the AQMemoryPePageTable register. */
++ gcmkERR_BREAK(gckOS_WriteRegisterEx(Hardware->os, gcvCORE_VG,
++ 0x00408,
++ gcmkFIXADDRESS(address)) );
++
++ /* Write the AQMemoryPezPageTable register. */
++ gcmkERR_BREAK(gckOS_WriteRegisterEx(Hardware->os, gcvCORE_VG,
++ 0x0040C,
++ gcmkFIXADDRESS(address)) );
++
++ /* Write the AQMemoryRaPageTable register. */
++ gcmkERR_BREAK(gckOS_WriteRegisterEx(Hardware->os, gcvCORE_VG,
++ 0x00410,
++ gcmkFIXADDRESS(address)) );
++ }
++ while (gcvFALSE);
++
++ gcmkFOOTER();
++ /* Return the status. */
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckVGHARDWARE_FlushMMU
++**
++** Flush the page table.
++**
++** INPUT:
++**
++** gckVGHARDWARE Harwdare
++** Pointer to an gckVGHARDWARE object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS gckVGHARDWARE_FlushMMU(
++ IN gckVGHARDWARE Hardware
++ )
++{
++ gceSTATUS status;
++ gckVGCOMMAND command;
++
++ gcmkHEADER_ARG("Hardware=0x%x ", Hardware);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ do
++ {
++ gcsCMDBUFFER_PTR commandBuffer;
++ gctUINT32_PTR buffer;
++
++ /* Create a shortcut to the command buffer object. */
++ command = Hardware->kernel->command;
++
++ /* Allocate command buffer space. */
++ gcmkERR_BREAK(gckVGCOMMAND_Allocate(
++ command, 8, &commandBuffer, (gctPOINTER *) &buffer
++ ));
++
++ buffer[0]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E04) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ buffer[1]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1))))))) << (0 ? 2:2))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1))))))) << (0 ? 2:2)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4)));
++ }
++ while(gcvFALSE);
++
++ gcmkFOOTER();
++ /* Return the status. */
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckVGHARDWARE_BuildVirtualAddress
++**
++** Build a virtual address.
++**
++** INPUT:
++**
++** gckVGHARDWARE Harwdare
++** Pointer to an gckVGHARDWARE object.
++**
++** gctUINT32 Index
++** Index into page table.
++**
++** gctUINT32 Offset
++** Offset into page.
++**
++** OUTPUT:
++**
++** gctUINT32 * Address
++** Pointer to a variable receiving te hardware address.
++*/
++gceSTATUS gckVGHARDWARE_BuildVirtualAddress(
++ IN gckVGHARDWARE Hardware,
++ IN gctUINT32 Index,
++ IN gctUINT32 Offset,
++ OUT gctUINT32 * Address
++ )
++{
++ gctUINT32 address;
++
++ gcmkHEADER_ARG("Hardware=0x%x Index=0x%x Offset=0x%x Address=0x%x",
++ Hardware, Index, Offset, Address);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(Address != gcvNULL);
++
++ /* Build virtual address. */
++ address = (Index << 12) | Offset;
++
++ /* Set virtual type. */
++ address = ((((gctUINT32) (address)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:0) - (0 ? 1:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:0) - (0 ? 1:0) + 1))))))) << (0 ? 1:0))) | (((gctUINT32) (0x2 & ((gctUINT32) ((((1 ? 1:0) - (0 ? 1:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:0) - (0 ? 1:0) + 1))))))) << (0 ? 1:0)));
++
++ /* Set the result. */
++ *Address = address;
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckVGHARDWARE_GetIdle(
++ IN gckVGHARDWARE Hardware,
++ OUT gctUINT32 * Data
++ )
++{
++ gceSTATUS status;
++ gcmkHEADER_ARG("Hardware=0x%x Data=0x%x", Hardware, Data);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(Data != gcvNULL);
++
++ /* Read register and return. */
++ status = gckOS_ReadRegisterEx(Hardware->os, gcvCORE_VG, 0x00004, Data);
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckVGHARDWARE_SetFastClear(
++ IN gckVGHARDWARE Hardware,
++ IN gctINT Enable
++ )
++{
++ gctUINT32 debug;
++ gceSTATUS status;
++
++ if (!(((((gctUINT32) (Hardware->chipFeatures)) >> (0 ? 0:0)) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1)))))) ))
++ {
++ return gcvSTATUS_OK;
++ }
++
++ do
++ {
++ if (Enable == -1)
++ {
++ Enable = (Hardware->chipModel > gcv500) ||
++ ((Hardware->chipModel == gcv500) && (Hardware->chipRevision >= 3));
++ }
++
++ gcmkERR_BREAK(gckOS_ReadRegisterEx(Hardware->os, gcvCORE_VG,
++ 0x00414,
++ &debug));
++
++ debug = ((((gctUINT32) (debug)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 20:20) - (0 ? 20:20) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 20:20) - (0 ? 20:20) + 1))))))) << (0 ? 20:20))) | (((gctUINT32) ((gctUINT32) (Enable == 0) & ((gctUINT32) ((((1 ? 20:20) - (0 ? 20:20) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 20:20) - (0 ? 20:20) + 1))))))) << (0 ? 20:20)));
++
++#ifdef AQ_MEMORY_DEBUG_DISABLE_Z_COMPRESSION
++ debug = ((((gctUINT32) (debug)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? AQ_MEMORY_DEBUG_DISABLE_Z_COMPRESSION) - (0 ? AQ_MEMORY_DEBUG_DISABLE_Z_COMPRESSION) + 1) == 32) ? ~0 : (~(~0 << ((1 ? AQ_MEMORY_DEBUG_DISABLE_Z_COMPRESSION) - (0 ? AQ_MEMORY_DEBUG_DISABLE_Z_COMPRESSION) + 1))))))) << (0 ? AQ_MEMORY_DEBUG_DISABLE_Z_COMPRESSION))) | (((gctUINT32) ((gctUINT32) (Enable == 0) & ((gctUINT32) ((((1 ? AQ_MEMORY_DEBUG_DISABLE_Z_COMPRESSION) - (0 ? AQ_MEMORY_DEBUG_DISABLE_Z_COMPRESSION) + 1) == 32) ? ~0 : (~(~0 << ((1 ? AQ_MEMORY_DEBUG_DISABLE_Z_COMPRESSION) - (0 ? AQ_MEMORY_DEBUG_DISABLE_Z_COMPRESSION) + 1))))))) << (0 ? AQ_MEMORY_DEBUG_DISABLE_Z_COMPRESSION)));
++#endif
++
++ gcmkERR_BREAK(gckOS_WriteRegisterEx(Hardware->os, gcvCORE_VG,
++ 0x00414,
++ debug));
++
++ Hardware->allowFastClear = Enable;
++
++ status = gcvFALSE;
++ }
++ while (gcvFALSE);
++
++ return status;
++}
++
++gceSTATUS
++gckVGHARDWARE_ReadInterrupt(
++ IN gckVGHARDWARE Hardware,
++ OUT gctUINT32_PTR IDs
++ )
++{
++ gceSTATUS status;
++ gcmkHEADER_ARG("Hardware=0x%x IDs=0x%x", Hardware, IDs);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(IDs != gcvNULL);
++
++ /* Read AQIntrAcknowledge register. */
++ status = gckOS_ReadRegisterEx(Hardware->os, gcvCORE_VG,
++ 0x00010,
++ IDs);
++ gcmkFOOTER();
++ return status;
++}
++
++static gceSTATUS _CommandStall(
++ gckVGHARDWARE Hardware)
++{
++ gceSTATUS status;
++ gckVGCOMMAND command;
++
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ do
++ {
++ gctUINT32_PTR buffer;
++ command = Hardware->kernel->command;
++
++ /* Allocate command buffer space. */
++ gcmkERR_BREAK(gckVGCOMMAND_Allocate(
++ command, 8, &command->powerStallBuffer,
++ (gctPOINTER *) &buffer
++ ));
++
++ gcmkERR_BREAK(gckVGCOMMAND_EventCommand(
++ command, buffer, gcvBLOCK_PIXEL,
++ command->powerStallInt, gcvNULL));
++
++ gcmkERR_BREAK(gckVGCOMMAND_Execute(
++ command,
++ command->powerStallBuffer
++ ));
++
++ /* Wait the signal. */
++ gcmkERR_BREAK(gckOS_WaitSignal(
++ command->os,
++ command->powerStallSignal,
++ gcdGPU_TIMEOUT));
++
++
++ }
++ while(gcvFALSE);
++
++ gcmkFOOTER();
++ /* Return the status. */
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_SetPowerManagementState
++**
++** Set GPU to a specified power state.
++**
++** INPUT:
++**
++** gckHARDWARE Harwdare
++** Pointer to an gckHARDWARE object.
++**
++** gceCHIPPOWERSTATE State
++** Power State.
++**
++*/
++gceSTATUS
++gckVGHARDWARE_SetPowerManagementState(
++ IN gckVGHARDWARE Hardware,
++ IN gceCHIPPOWERSTATE State
++ )
++{
++ gceSTATUS status;
++ gckVGCOMMAND command = gcvNULL;
++ gckOS os;
++ gctUINT flag/*, clock*/;
++
++ gctBOOL acquired = gcvFALSE;
++ gctBOOL stall = gcvTRUE;
++ gctBOOL commitMutex = gcvFALSE;
++ gctBOOL mutexAcquired = gcvFALSE;
++
++#if gcdPOWEROFF_TIMEOUT
++ gctBOOL timeout = gcvFALSE;
++ gctBOOL isAfter = gcvFALSE;
++ gctUINT32 currentTime;
++#endif
++
++ gctBOOL broadcast = gcvFALSE;
++ gctUINT32 process, thread;
++ gctBOOL global = gcvFALSE;
++
++#if gcdENABLE_PROFILING
++ gctUINT64 time, freq, mutexTime, onTime, stallTime, stopTime, delayTime,
++ initTime, offTime, startTime, totalTime;
++#endif
++
++ /* State transition flags. */
++ static const gctUINT flags[4][4] =
++ {
++ /* gcvPOWER_ON */
++ { /* ON */ 0,
++ /* OFF */ gcvPOWER_FLAG_ACQUIRE |
++ gcvPOWER_FLAG_STALL |
++ gcvPOWER_FLAG_STOP |
++ gcvPOWER_FLAG_POWER_OFF |
++ gcvPOWER_FLAG_CLOCK_OFF,
++ /* IDLE */ gcvPOWER_FLAG_NOP,
++ /* SUSPEND */ gcvPOWER_FLAG_ACQUIRE |
++ gcvPOWER_FLAG_STALL |
++ gcvPOWER_FLAG_STOP |
++ gcvPOWER_FLAG_CLOCK_OFF,
++ },
++
++ /* gcvPOWER_OFF */
++ { /* ON */ gcvPOWER_FLAG_INITIALIZE |
++ gcvPOWER_FLAG_START |
++ gcvPOWER_FLAG_RELEASE |
++ gcvPOWER_FLAG_DELAY,
++ /* OFF */ 0,
++ /* IDLE */ gcvPOWER_FLAG_INITIALIZE |
++ gcvPOWER_FLAG_START |
++ gcvPOWER_FLAG_RELEASE |
++ gcvPOWER_FLAG_DELAY,
++ /* SUSPEND */ gcvPOWER_FLAG_INITIALIZE |
++ gcvPOWER_FLAG_CLOCK_OFF,
++ },
++
++ /* gcvPOWER_IDLE */
++ { /* ON */ gcvPOWER_FLAG_NOP,
++ /* OFF */ gcvPOWER_FLAG_ACQUIRE |
++ gcvPOWER_FLAG_STOP |
++ gcvPOWER_FLAG_POWER_OFF |
++ gcvPOWER_FLAG_CLOCK_OFF,
++ /* IDLE */ 0,
++ /* SUSPEND */ gcvPOWER_FLAG_ACQUIRE |
++ gcvPOWER_FLAG_STOP |
++ gcvPOWER_FLAG_CLOCK_OFF,
++ },
++
++ /* gcvPOWER_SUSPEND */
++ { /* ON */ gcvPOWER_FLAG_START |
++ gcvPOWER_FLAG_RELEASE |
++ gcvPOWER_FLAG_DELAY |
++ gcvPOWER_FLAG_CLOCK_ON,
++ /* OFF */ gcvPOWER_FLAG_SAVE |
++ gcvPOWER_FLAG_POWER_OFF |
++ gcvPOWER_FLAG_CLOCK_OFF,
++ /* IDLE */ gcvPOWER_FLAG_START |
++ gcvPOWER_FLAG_DELAY |
++ gcvPOWER_FLAG_RELEASE |
++ gcvPOWER_FLAG_CLOCK_ON,
++ /* SUSPEND */ 0,
++ },
++ };
++
++ gcmkHEADER_ARG("Hardware=0x%x State=%d", Hardware, State);
++#if gcmIS_DEBUG(gcdDEBUG_TRACE)
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Switching to power state %d",
++ State);
++#endif
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ /* Get the gckOS object pointer. */
++ os = Hardware->os;
++ gcmkVERIFY_OBJECT(os, gcvOBJ_OS);
++
++ /* Get the gckCOMMAND object pointer. */
++ gcmkVERIFY_OBJECT(Hardware->kernel, gcvOBJ_KERNEL);
++ command = Hardware->kernel->command;
++ gcmkVERIFY_OBJECT(command, gcvOBJ_COMMAND);
++
++ if (Hardware->powerManagement == gcvFALSE)
++ {
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++
++ /* Start profiler. */
++ gcmkPROFILE_INIT(freq, time);
++
++ /* Convert the broadcast power state. */
++ switch (State)
++ {
++ case gcvPOWER_SUSPEND_ATPOWERON:
++ /* Convert to SUSPEND and don't wait for STALL. */
++ State = gcvPOWER_SUSPEND;
++ stall = gcvFALSE;
++ break;
++
++ case gcvPOWER_OFF_ATPOWERON:
++ /* Convert to OFF and don't wait for STALL. */
++ State = gcvPOWER_OFF;
++ stall = gcvFALSE;
++ break;
++
++ case gcvPOWER_IDLE_BROADCAST:
++ /* Convert to IDLE and note we are inside broadcast. */
++ State = gcvPOWER_IDLE;
++ broadcast = gcvTRUE;
++ break;
++
++ case gcvPOWER_SUSPEND_BROADCAST:
++ /* Convert to SUSPEND and note we are inside broadcast. */
++ State = gcvPOWER_SUSPEND;
++ broadcast = gcvTRUE;
++ break;
++
++ case gcvPOWER_OFF_BROADCAST:
++ /* Convert to OFF and note we are inside broadcast. */
++ State = gcvPOWER_OFF;
++ broadcast = gcvTRUE;
++ break;
++
++ case gcvPOWER_OFF_RECOVERY:
++ /* Convert to OFF and note we are inside recovery. */
++ State = gcvPOWER_OFF;
++ stall = gcvFALSE;
++ broadcast = gcvTRUE;
++ break;
++
++ case gcvPOWER_ON_AUTO:
++ /* Convert to ON and note we are inside recovery. */
++ State = gcvPOWER_ON;
++ break;
++
++ case gcvPOWER_ON:
++ case gcvPOWER_IDLE:
++ case gcvPOWER_SUSPEND:
++ case gcvPOWER_OFF:
++ /* Mark as global power management. */
++ global = gcvTRUE;
++ break;
++
++#if gcdPOWEROFF_TIMEOUT
++ case gcvPOWER_OFF_TIMEOUT:
++ /* Convert to OFF and note we are inside broadcast. */
++ State = gcvPOWER_OFF;
++ broadcast = gcvTRUE;
++ /* Check time out */
++ timeout = gcvTRUE;
++ break;
++#endif
++
++ default:
++ break;
++ }
++
++ /* Get current process and thread IDs. */
++ gcmkONERROR(gckOS_GetProcessID(&process));
++ gcmkONERROR(gckOS_GetThreadID(&thread));
++
++ /* Acquire the power mutex. */
++ if (broadcast)
++ {
++ /* Try to acquire the power mutex. */
++ status = gckOS_AcquireMutex(os, Hardware->powerMutex, 0);
++
++ if (status == gcvSTATUS_TIMEOUT)
++ {
++ /* Check if we already own this mutex. */
++ if ((Hardware->powerProcess == process)
++ && (Hardware->powerThread == thread)
++ )
++ {
++ /* Bail out on recursive power management. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++ else if (State == gcvPOWER_IDLE)
++ {
++ /* gcvPOWER_IDLE_BROADCAST is from IST,
++ ** so waiting here will cause deadlock,
++ ** if lock holder call gckCOMMAND_Stall() */
++ gcmkONERROR(gcvSTATUS_INVALID_REQUEST);
++ }
++ else
++ {
++ /* Acquire the power mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(os,
++ Hardware->powerMutex,
++ gcvINFINITE));
++ }
++ }
++ }
++ else
++ {
++ /* Acquire the power mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(os, Hardware->powerMutex, gcvINFINITE));
++ }
++
++ /* Get time until mtuex acquired. */
++ gcmkPROFILE_QUERY(time, mutexTime);
++
++ Hardware->powerProcess = process;
++ Hardware->powerThread = thread;
++ mutexAcquired = gcvTRUE;
++
++ /* Grab control flags and clock. */
++ flag = flags[Hardware->chipPowerState][State];
++ /*clock = clocks[State];*/
++
++#if gcdPOWEROFF_TIMEOUT
++ if (timeout)
++ {
++ gcmkONERROR(gckOS_GetTicks(&currentTime));
++
++ gcmkONERROR(
++ gckOS_TicksAfter(Hardware->powerOffTime, currentTime, &isAfter));
++
++ /* powerOffTime is pushed forward, give up.*/
++ if (isAfter
++ /* Expect a transition start from IDLE. */
++ || (Hardware->chipPowerState == gcvPOWER_ON)
++ || (Hardware->chipPowerState == gcvPOWER_OFF)
++ )
++ {
++ /* Release the power mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(os, Hardware->powerMutex));
++
++ /* No need to do anything. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++ }
++#endif
++
++ if (flag == 0)
++ {
++ /* Release the power mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(os, Hardware->powerMutex));
++
++ /* No need to do anything. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++
++ /* internal power control */
++ if (!global)
++ {
++ if (Hardware->chipPowerStateGlobal == gcvPOWER_OFF)
++ {
++ /* Release the power mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(os, Hardware->powerMutex));
++
++ /* No need to do anything. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++ }
++ else
++ {
++ if (flag & gcvPOWER_FLAG_ACQUIRE)
++ {
++ /* Acquire the power management semaphore. */
++ gcmkONERROR(gckOS_AcquireSemaphore(os, command->powerSemaphore));
++ acquired = gcvTRUE;
++
++ /* avoid acquiring again. */
++ flag &= ~gcvPOWER_FLAG_ACQUIRE;
++ }
++ }
++
++ if (flag & (gcvPOWER_FLAG_INITIALIZE | gcvPOWER_FLAG_CLOCK_ON))
++ {
++ /* Turn on the power. */
++ gcmkONERROR(gckOS_SetGPUPower(os, gcvCORE_VG, gcvTRUE, gcvTRUE));
++
++ /* Mark clock and power as enabled. */
++ Hardware->clockState = gcvTRUE;
++ Hardware->powerState = gcvTRUE;
++ }
++
++ /* Get time until powered on. */
++ gcmkPROFILE_QUERY(time, onTime);
++
++ if ((flag & gcvPOWER_FLAG_STALL) && stall)
++ {
++ /* Acquire the mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(
++ command->os,
++ command->commitMutex,
++ gcvINFINITE
++ ));
++
++ commitMutex = gcvTRUE;
++
++ gcmkONERROR(_CommandStall(Hardware));
++ }
++
++ /* Get time until stalled. */
++ gcmkPROFILE_QUERY(time, stallTime);
++
++ if (flag & gcvPOWER_FLAG_ACQUIRE)
++ {
++ /* Acquire the power management semaphore. */
++ gcmkONERROR(gckOS_AcquireSemaphore(os, command->powerSemaphore));
++
++ acquired = gcvTRUE;
++ }
++
++ if (flag & gcvPOWER_FLAG_STOP)
++ {
++ }
++
++ /* Get time until stopped. */
++ gcmkPROFILE_QUERY(time, stopTime);
++
++ /* Only process this when hardware is enabled. */
++ if (Hardware->clockState && Hardware->powerState)
++ {
++ }
++
++ if (flag & gcvPOWER_FLAG_DELAY)
++ {
++ /* Wait for the specified amount of time to settle coming back from
++ ** power-off or suspend state. */
++ gcmkONERROR(gckOS_Delay(os, gcdPOWER_CONTROL_DELAY));
++ }
++
++ /* Get time until delayed. */
++ gcmkPROFILE_QUERY(time, delayTime);
++
++ if (flag & gcvPOWER_FLAG_INITIALIZE)
++ {
++ gcmkONERROR(gckVGHARDWARE_SetMMU(Hardware, Hardware->kernel->mmu->pageTableLogical));
++
++ /* Force the command queue to reload the next context. */
++ command->currentContext = 0;
++ }
++
++ /* Get time until initialized. */
++ gcmkPROFILE_QUERY(time, initTime);
++
++ if (flag & (gcvPOWER_FLAG_POWER_OFF | gcvPOWER_FLAG_CLOCK_OFF))
++ {
++ /* Turn off the GPU power. */
++ gcmkONERROR(
++ gckOS_SetGPUPower(os,
++ gcvCORE_VG,
++ (flag & gcvPOWER_FLAG_CLOCK_OFF) ? gcvFALSE
++ : gcvTRUE,
++ (flag & gcvPOWER_FLAG_POWER_OFF) ? gcvFALSE
++ : gcvTRUE));
++
++ /* Save current hardware power and clock states. */
++ Hardware->clockState = (flag & gcvPOWER_FLAG_CLOCK_OFF) ? gcvFALSE
++ : gcvTRUE;
++ Hardware->powerState = (flag & gcvPOWER_FLAG_POWER_OFF) ? gcvFALSE
++ : gcvTRUE;
++ }
++
++ /* Get time until off. */
++ gcmkPROFILE_QUERY(time, offTime);
++
++ if (flag & gcvPOWER_FLAG_START)
++ {
++ }
++
++ /* Get time until started. */
++ gcmkPROFILE_QUERY(time, startTime);
++
++ if (flag & gcvPOWER_FLAG_RELEASE)
++ {
++ /* Release the power management semaphore. */
++ gcmkONERROR(gckOS_ReleaseSemaphore(os, command->powerSemaphore));
++ acquired = gcvFALSE;
++ }
++
++ /* Save the new power state. */
++ Hardware->chipPowerState = State;
++
++ if (global)
++ {
++ /* Save the new power state. */
++ Hardware->chipPowerStateGlobal = State;
++ }
++
++ if (commitMutex)
++ {
++ /* Acquire the mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(
++ command->os,
++ command->commitMutex
++ ));
++ }
++
++#if gcdPOWEROFF_TIMEOUT
++ /* Reset power off time */
++ gcmkONERROR(gckOS_GetTicks(&currentTime));
++
++ Hardware->powerOffTime = currentTime + Hardware->powerOffTimeout;
++
++ if (State == gcvPOWER_IDLE)
++ {
++ /* Start a timer to power off GPU when GPU enters IDLE or SUSPEND. */
++ gcmkVERIFY_OK(gckOS_StartTimer(os,
++ Hardware->powerOffTimer,
++ Hardware->powerOffTimeout));
++ }
++ else
++ {
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE, "Cancel powerOfftimer");
++
++ /* Cancel running timer when GPU enters ON or OFF. */
++ gcmkVERIFY_OK(gckOS_StopTimer(os, Hardware->powerOffTimer));
++ }
++#endif
++
++ /* Release the power mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(os, Hardware->powerMutex));
++
++ /* Get total time. */
++ gcmkPROFILE_QUERY(time, totalTime);
++#if gcdENABLE_PROFILING
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "PROF(%llu): mutex:%llu on:%llu stall:%llu stop:%llu",
++ freq, mutexTime, onTime, stallTime, stopTime);
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ " delay:%llu init:%llu off:%llu start:%llu total:%llu",
++ delayTime, initTime, offTime, startTime, totalTime);
++#endif
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++
++ if (acquired)
++ {
++ /* Release semaphore. */
++ gcmkVERIFY_OK(gckOS_ReleaseSemaphore(Hardware->os,
++ command->powerSemaphore));
++ }
++
++ if (mutexAcquired)
++ {
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Hardware->os, Hardware->powerMutex));
++ }
++
++ if (commitMutex)
++ {
++ /* Acquire the mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(
++ command->os,
++ command->commitMutex
++ ));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_QueryPowerManagementState
++**
++** Get GPU power state.
++**
++** INPUT:
++**
++** gckHARDWARE Harwdare
++** Pointer to an gckHARDWARE object.
++**
++** gceCHIPPOWERSTATE* State
++** Power State.
++**
++*/
++gceSTATUS
++gckVGHARDWARE_QueryPowerManagementState(
++ IN gckVGHARDWARE Hardware,
++ OUT gceCHIPPOWERSTATE* State
++ )
++{
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(State != gcvNULL);
++
++ /* Return the statue. */
++ *State = Hardware->chipPowerState;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*State=%d", *State);
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckVGHARDWARE_SetPowerManagement
++**
++** Configure GPU power management function.
++** Only used in driver initialization stage.
++**
++** INPUT:
++**
++** gckVGHARDWARE Harwdare
++** Pointer to an gckHARDWARE object.
++**
++** gctBOOL PowerManagement
++** Power Mangement State.
++**
++*/
++gceSTATUS
++gckVGHARDWARE_SetPowerManagement(
++ IN gckVGHARDWARE Hardware,
++ IN gctBOOL PowerManagement
++ )
++{
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ Hardware->powerManagement = PowerManagement;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckVGHARDWARE_SetPowerOffTimeout(
++ IN gckVGHARDWARE Hardware,
++ IN gctUINT32 Timeout
++ )
++{
++ gcmkHEADER_ARG("Hardware=0x%x Timeout=%d", Hardware, Timeout);
++
++#if gcdPOWEROFF_TIMEOUT
++ Hardware->powerOffTimeout = Timeout;
++#endif
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++
++gceSTATUS
++gckVGHARDWARE_QueryPowerOffTimeout(
++ IN gckVGHARDWARE Hardware,
++ OUT gctUINT32* Timeout
++ )
++{
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++#if gcdPOWEROFF_TIMEOUT
++ *Timeout = Hardware->powerOffTimeout;
++#endif
++
++ gcmkFOOTER_ARG("*Timeout=%d", *Timeout);
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckVGHARDWARE_QueryIdle(
++ IN gckVGHARDWARE Hardware,
++ OUT gctBOOL_PTR IsIdle
++ )
++{
++ gceSTATUS status;
++ gctUINT32 idle;
++
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(IsIdle != gcvNULL);
++
++ /* We are idle when the power is not ON. */
++ if (Hardware->chipPowerState != gcvPOWER_ON)
++ {
++ *IsIdle = gcvTRUE;
++ }
++
++ else
++ {
++ /* Read idle register. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os, gcvCORE_VG, 0x00004, &idle));
++
++ /* Pipe must be idle. */
++ if (((((((gctUINT32) (idle)) >> (0 ? 0:0)) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1)))))) ) != 1)
++ || ((((((gctUINT32) (idle)) >> (0 ? 8:8)) & ((gctUINT32) ((((1 ? 8:8) - (0 ? 8:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:8) - (0 ? 8:8) + 1)))))) ) != 1)
++ || ((((((gctUINT32) (idle)) >> (0 ? 9:9)) & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1)))))) ) != 1)
++ || ((((((gctUINT32) (idle)) >> (0 ? 10:10)) & ((gctUINT32) ((((1 ? 10:10) - (0 ? 10:10) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 10:10) - (0 ? 10:10) + 1)))))) ) != 1)
++ || ((((((gctUINT32) (idle)) >> (0 ? 11:11)) & ((gctUINT32) ((((1 ? 11:11) - (0 ? 11:11) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 11:11) - (0 ? 11:11) + 1)))))) ) != 1)
++ )
++ {
++ /* Something is busy. */
++ *IsIdle = gcvFALSE;
++ }
++
++ else
++ {
++ *IsIdle = gcvTRUE;
++ }
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++#endif /* gcdENABLE_VG */
++
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v4/arch/GC350/hal/kernel/gc_hal_kernel_hardware_vg.h linux-openelec/drivers/mxc/gpu-viv/v4/arch/GC350/hal/kernel/gc_hal_kernel_hardware_vg.h
+--- linux-3.14.36/drivers/mxc/gpu-viv/v4/arch/GC350/hal/kernel/gc_hal_kernel_hardware_vg.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v4/arch/GC350/hal/kernel/gc_hal_kernel_hardware_vg.h 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,75 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_kernel_hardware_vg_h_
++#define __gc_hal_kernel_hardware_vg_h_
++
++/* gckHARDWARE object. */
++struct _gckVGHARDWARE
++{
++ /* Object. */
++ gcsOBJECT object;
++
++ /* Pointer to gckKERNEL object. */
++ gckVGKERNEL kernel;
++
++ /* Pointer to gckOS object. */
++ gckOS os;
++
++ /* Chip characteristics. */
++ gceCHIPMODEL chipModel;
++ gctUINT32 chipRevision;
++ gctUINT32 chipFeatures;
++ gctUINT32 chipMinorFeatures;
++ gctUINT32 chipMinorFeatures2;
++ gctBOOL allowFastClear;
++
++ /* Features. */
++ gctBOOL fe20;
++ gctBOOL vg20;
++ gctBOOL vg21;
++
++ /* Event mask. */
++ gctUINT32 eventMask;
++
++ gctBOOL clockState;
++ gctBOOL powerState;
++ gctPOINTER powerMutex;
++ gctUINT32 powerProcess;
++ gctUINT32 powerThread;
++ gceCHIPPOWERSTATE chipPowerState;
++ gceCHIPPOWERSTATE chipPowerStateGlobal;
++ gctISRMANAGERFUNC startIsr;
++ gctISRMANAGERFUNC stopIsr;
++ gctPOINTER isrContext;
++ gctPOINTER pageTableDirty;
++
++#if gcdPOWEROFF_TIMEOUT
++ gctUINT32 powerOffTime;
++ gctUINT32 powerOffTimeout;
++ gctPOINTER powerOffTimer;
++#endif
++
++ gctBOOL powerManagement;
++};
++
++#endif /* __gc_hal_kernel_hardware_h_ */
++
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v4/arch/XAQ2/hal/kernel/gc_hal_kernel_context.c linux-openelec/drivers/mxc/gpu-viv/v4/arch/XAQ2/hal/kernel/gc_hal_kernel_context.c
+--- linux-3.14.36/drivers/mxc/gpu-viv/v4/arch/XAQ2/hal/kernel/gc_hal_kernel_context.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v4/arch/XAQ2/hal/kernel/gc_hal_kernel_context.c 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,1735 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal.h"
++#include "gc_hal_kernel.h"
++#include "gc_hal_kernel_context.h"
++#include "gc_hal_kernel_buffer.h"
++
++/******************************************************************************\
++******************************** Debugging Macro *******************************
++\******************************************************************************/
++
++/* Zone used for header/footer. */
++#define _GC_OBJ_ZONE gcvZONE_HARDWARE
++
++
++/******************************************************************************\
++************************** Context State Buffer Helpers ************************
++\******************************************************************************/
++
++#define _STATE(reg) \
++ _State(\
++ Context, index, \
++ reg ## _Address >> 2, \
++ reg ## _ResetValue, \
++ reg ## _Count, \
++ gcvFALSE, gcvFALSE \
++ )
++
++#define _STATE_COUNT(reg, count) \
++ _State(\
++ Context, index, \
++ reg ## _Address >> 2, \
++ reg ## _ResetValue, \
++ count, \
++ gcvFALSE, gcvFALSE \
++ )
++
++#define _STATE_COUNT_OFFSET(reg, offset, count) \
++ _State(\
++ Context, index, \
++ (reg ## _Address >> 2) + offset, \
++ reg ## _ResetValue, \
++ count, \
++ gcvFALSE, gcvFALSE \
++ )
++
++#define _STATE_MIRROR_COUNT(reg, mirror, count) \
++ _StateMirror(\
++ Context, \
++ reg ## _Address >> 2, \
++ count, \
++ mirror ## _Address >> 2 \
++ )
++
++#define _STATE_HINT(reg) \
++ _State(\
++ Context, index, \
++ reg ## _Address >> 2, \
++ reg ## _ResetValue, \
++ reg ## _Count, \
++ gcvFALSE, gcvTRUE \
++ )
++
++#define _STATE_HINT_BLOCK(reg, block, count) \
++ _State(\
++ Context, index, \
++ (reg ## _Address >> 2) + (block << reg ## _BLK), \
++ reg ## _ResetValue, \
++ count, \
++ gcvFALSE, gcvTRUE \
++ )
++
++#define _STATE_X(reg) \
++ _State(\
++ Context, index, \
++ reg ## _Address >> 2, \
++ reg ## _ResetValue, \
++ reg ## _Count, \
++ gcvTRUE, gcvFALSE \
++ )
++
++#define _CLOSE_RANGE() \
++ _TerminateStateBlock(Context, index)
++
++#define _ENABLE(reg, field) \
++ do \
++ { \
++ if (gcmVERIFYFIELDVALUE(data, reg, MASK_ ## field, ENABLED)) \
++ { \
++ enable |= gcmFIELDMASK(reg, field); \
++ } \
++ } \
++ while (gcvFALSE)
++
++#define _BLOCK_COUNT(reg) \
++ ((reg ## _Count) >> (reg ## _BLK))
++
++
++/******************************************************************************\
++*********************** Support Functions and Definitions **********************
++\******************************************************************************/
++
++#define gcdSTATE_MASK \
++ (((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x03 | 0xC0FFEE & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))))
++
++#if !defined(VIVANTE_NO_3D)
++static gctSIZE_T
++_TerminateStateBlock(
++ IN gckCONTEXT Context,
++ IN gctSIZE_T Index
++ )
++{
++ gctUINT32_PTR buffer;
++ gctSIZE_T align;
++
++ /* Determine if we need alignment. */
++ align = (Index & 1) ? 1 : 0;
++
++ /* Address correct index. */
++ buffer = (Context->buffer == gcvNULL)
++ ? gcvNULL
++ : Context->buffer->logical;
++
++ /* Flush the current state block; make sure no pairing with the states
++ to follow happens. */
++ if (align && (buffer != gcvNULL))
++ {
++ buffer[Index] = 0xDEADDEAD;
++ }
++
++ /* Reset last address. */
++ Context->lastAddress = ~0U;
++
++ /* Return alignment requirement. */
++ return align;
++}
++#endif
++
++
++static gctSIZE_T
++_FlushPipe(
++ IN gckCONTEXT Context,
++ IN gctSIZE_T Index,
++ IN gcePIPE_SELECT Pipe
++ )
++{
++ if (Context->buffer != gcvNULL)
++ {
++ gctUINT32_PTR buffer;
++
++ /* Address correct index. */
++ buffer = Context->buffer->logical + Index;
++
++ /* Flush the current pipe. */
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E03) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ *buffer++
++ = (Pipe == gcvPIPE_2D)
++ ? ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3)))
++ : ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1))))))) << (0 ? 2:2))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1))))))) << (0 ? 2:2)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4)));
++
++ /* Semaphore from FE to PE. */
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E02) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));
++
++ /* Stall from FE to PE. */
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x09 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)));
++
++ *buffer
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));
++ }
++
++ /* Flushing 3D pipe takes 6 slots. */
++ return 6;
++}
++
++#if !defined(VIVANTE_NO_3D)
++static gctSIZE_T
++_SemaphoreStall(
++ IN gckCONTEXT Context,
++ IN gctSIZE_T Index
++ )
++{
++ if (Context->buffer != gcvNULL)
++ {
++ gctUINT32_PTR buffer;
++
++ /* Address correct index. */
++ buffer = Context->buffer->logical + Index;
++
++ /* Semaphore from FE to PE. */
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E02) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));
++
++ /* Stall from FE to PE. */
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x09 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)));
++
++ *buffer
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));
++ }
++
++ /* Semaphore/stall takes 4 slots. */
++ return 4;
++}
++#endif
++
++static gctSIZE_T
++_SwitchPipe(
++ IN gckCONTEXT Context,
++ IN gctSIZE_T Index,
++ IN gcePIPE_SELECT Pipe
++ )
++{
++ if (Context->buffer != gcvNULL)
++ {
++ gctUINT32_PTR buffer;
++
++ /* Address correct index. */
++ buffer = Context->buffer->logical + Index;
++
++ /* LoadState(AQPipeSelect, 1), pipe. */
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E00) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ *buffer
++ = (Pipe == gcvPIPE_2D)
++ ? 0x1
++ : 0x0;
++ }
++
++ return 2;
++}
++
++#if !defined(VIVANTE_NO_3D)
++static gctSIZE_T
++_State(
++ IN gckCONTEXT Context,
++ IN gctSIZE_T Index,
++ IN gctUINT32 Address,
++ IN gctUINT32 Value,
++ IN gctSIZE_T Size,
++ IN gctBOOL FixedPoint,
++ IN gctBOOL Hinted
++ )
++{
++ gctUINT32_PTR buffer;
++ gctSIZE_T align, i;
++
++ /* Determine if we need alignment. */
++ align = (Index & 1) ? 1 : 0;
++
++ /* Address correct index. */
++ buffer = (Context->buffer == gcvNULL)
++ ? gcvNULL
++ : Context->buffer->logical;
++
++ if ((buffer == gcvNULL) && (Address + Size > Context->stateCount))
++ {
++ /* Determine maximum state. */
++ Context->stateCount = Address + Size;
++ }
++
++ /* Do we need a new entry? */
++ if ((Address != Context->lastAddress) || (FixedPoint != Context->lastFixed))
++ {
++ if (buffer != gcvNULL)
++ {
++ if (align)
++ {
++ /* Add filler. */
++ buffer[Index++] = 0xDEADDEAD;
++ }
++
++ /* LoadState(Address, Count). */
++ gcmkASSERT((Index & 1) == 0);
++
++ if (FixedPoint)
++ {
++ buffer[Index]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 26:26) - (0 ? 26:26) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 26:26) - (0 ? 26:26) + 1))))))) << (0 ? 26:26))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 26:26) - (0 ? 26:26) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 26:26) - (0 ? 26:26) + 1))))))) << (0 ? 26:26)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (Size) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (Address) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++ }
++ else
++ {
++ buffer[Index]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 26:26) - (0 ? 26:26) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 26:26) - (0 ? 26:26) + 1))))))) << (0 ? 26:26))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? 26:26) - (0 ? 26:26) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 26:26) - (0 ? 26:26) + 1))))))) << (0 ? 26:26)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (Size) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (Address) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++ }
++
++ /* Walk all the states. */
++ for (i = 0; i < Size; i += 1)
++ {
++ /* Set state to uninitialized value. */
++ buffer[Index + 1 + i] = Value;
++
++ /* Set index in state mapping table. */
++ Context->map[Address + i].index = Index + 1 + i;
++
++#if gcdSECURE_USER
++ /* Save hint. */
++ if (Context->hint != gcvNULL)
++ {
++ Context->hint[Address + i] = Hinted;
++ }
++#endif
++ }
++ }
++
++ /* Save information for this LoadState. */
++ Context->lastIndex = Index;
++ Context->lastAddress = Address + Size;
++ Context->lastSize = Size;
++ Context->lastFixed = FixedPoint;
++
++ /* Return size for load state. */
++ return align + 1 + Size;
++ }
++
++ /* Append this state to the previous one. */
++ if (buffer != gcvNULL)
++ {
++ /* Update last load state. */
++ buffer[Context->lastIndex] =
++ ((((gctUINT32) (buffer[Context->lastIndex])) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (Context->lastSize + Size) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ /* Walk all the states. */
++ for (i = 0; i < Size; i += 1)
++ {
++ /* Set state to uninitialized value. */
++ buffer[Index + i] = Value;
++
++ /* Set index in state mapping table. */
++ Context->map[Address + i].index = Index + i;
++
++#if gcdSECURE_USER
++ /* Save hint. */
++ if (Context->hint != gcvNULL)
++ {
++ Context->hint[Address + i] = Hinted;
++ }
++#endif
++ }
++ }
++
++ /* Update last address and size. */
++ Context->lastAddress += Size;
++ Context->lastSize += Size;
++
++ /* Return number of slots required. */
++ return Size;
++}
++
++static gctSIZE_T
++_StateMirror(
++ IN gckCONTEXT Context,
++ IN gctUINT32 Address,
++ IN gctSIZE_T Size,
++ IN gctUINT32 AddressMirror
++ )
++{
++ gctSIZE_T i;
++
++ /* Process when buffer is set. */
++ if (Context->buffer != gcvNULL)
++ {
++ /* Walk all states. */
++ for (i = 0; i < Size; i++)
++ {
++ /* Copy the mapping address. */
++ Context->map[Address + i].index =
++ Context->map[AddressMirror + i].index;
++ }
++ }
++
++ /* Return the number of required maps. */
++ return Size;
++}
++#endif
++
++static gceSTATUS
++_InitializeContextBuffer(
++ IN gckCONTEXT Context
++ )
++{
++ gctUINT32_PTR buffer;
++ gctSIZE_T index;
++
++#if !defined(VIVANTE_NO_3D)
++ gctUINT i;
++ gctUINT vertexUniforms, fragmentUniforms;
++ gctUINT fe2vsCount;
++ gctBOOL halti0;
++#endif
++
++ /* Reset the buffer index. */
++ index = 0;
++
++ /* Reset the last state address. */
++ Context->lastAddress = ~0U;
++
++ /* Get the buffer pointer. */
++ buffer = (Context->buffer == gcvNULL)
++ ? gcvNULL
++ : Context->buffer->logical;
++
++
++ /**************************************************************************/
++ /* Build 2D states. *******************************************************/
++
++
++#if !defined(VIVANTE_NO_3D)
++ /**************************************************************************/
++ /* Build 3D states. *******************************************************/
++ halti0 = (((((gctUINT32) (Context->hardware->identity.chipMinorFeatures1)) >> (0 ? 23:23)) & ((gctUINT32) ((((1 ? 23:23) - (0 ? 23:23) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:23) - (0 ? 23:23) + 1)))))) );
++
++ /* Query shader support. */
++ gcmkVERIFY_OK(gckHARDWARE_QueryShaderCaps(
++ Context->hardware, &vertexUniforms, &fragmentUniforms, gcvNULL));
++
++ /* Store the 3D entry index. */
++ Context->entryOffset3D = index * gcmSIZEOF(gctUINT32);
++
++ /* Flush 2D pipe. */
++ index += _FlushPipe(Context, index, gcvPIPE_2D);
++
++ /* Switch to 3D pipe. */
++ index += _SwitchPipe(Context, index, gcvPIPE_3D);
++
++ /* Current context pointer. */
++#if gcdDEBUG
++ index += _State(Context, index, 0x03850 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++#endif
++
++ index += _FlushPipe(Context, index, gcvPIPE_3D);
++
++ /* Global states. */
++ index += _State(Context, index, 0x03814 >> 2, 0x00000001, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x03818 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x0381C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x03820 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x03828 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x0382C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x03834 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x03838 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x0384C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++
++ /* Front End states. */
++ fe2vsCount = 12;
++ if ((((((gctUINT32) (Context->hardware->identity.chipMinorFeatures1)) >> (0 ? 23:23)) & ((gctUINT32) ((((1 ? 23:23) - (0 ? 23:23) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:23) - (0 ? 23:23) + 1)))))) ))
++ {
++ fe2vsCount = 16;
++ }
++ index += _State(Context, index, 0x00600 >> 2, 0x00000000, fe2vsCount, gcvFALSE, gcvFALSE);
++ index += _CLOSE_RANGE();
++
++ index += _State(Context, index, 0x00644 >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x00648 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x0064C >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x00650 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00680 >> 2, 0x00000000, 8, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x006A0 >> 2, 0x00000000, 8, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00670 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00678 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x0067C >> 2, 0xFFFFFFFF, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x006C0 >> 2, 0x00000000, 16, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00700 >> 2, 0x00000000, 16, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00740 >> 2, 0x00000000, 16, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00780 >> 2, 0x3F800000, 16, gcvFALSE, gcvFALSE);
++
++ /* Vertex Shader states. */
++ index += _State(Context, index, 0x00800 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00804 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00808 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x0080C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00810 >> 2, 0x00000000, 4, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00820 >> 2, 0x00000000, 4, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00830 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00838 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ if (Context->hardware->identity.instructionCount <= 256)
++ {
++ index += _State(Context, index, 0x04000 >> 2, 0x00000000, 1024, gcvFALSE, gcvFALSE);
++ }
++
++ index += _CLOSE_RANGE();
++ index += _State(Context, index, 0x05000 >> 2, 0x00000000, vertexUniforms * 4, gcvFALSE, gcvFALSE);
++
++ /* Primitive Assembly states. */
++ index += _State(Context, index, 0x00A00 >> 2, 0x00000000, 1, gcvTRUE, gcvFALSE);
++ index += _State(Context, index, 0x00A04 >> 2, 0x00000000, 1, gcvTRUE, gcvFALSE);
++ index += _State(Context, index, 0x00A08 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00A0C >> 2, 0x00000000, 1, gcvTRUE, gcvFALSE);
++ index += _State(Context, index, 0x00A10 >> 2, 0x00000000, 1, gcvTRUE, gcvFALSE);
++ index += _State(Context, index, 0x00A14 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00A18 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00A1C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00A28 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00A2C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00A30 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00A40 >> 2, 0x00000000, 10, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00A34 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00A38 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00A3C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00A80 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00A84 >> 2, 0x00000000, 1, gcvTRUE, gcvFALSE);
++ index += _State(Context, index, 0x00A8C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++
++ /* Setup states. */
++ index += _State(Context, index, 0x00C00 >> 2, 0x00000000, 1, gcvTRUE, gcvFALSE);
++ index += _State(Context, index, 0x00C04 >> 2, 0x00000000, 1, gcvTRUE, gcvFALSE);
++ index += _State(Context, index, 0x00C08 >> 2, 0x45000000, 1, gcvTRUE, gcvFALSE);
++ index += _State(Context, index, 0x00C0C >> 2, 0x45000000, 1, gcvTRUE, gcvFALSE);
++ index += _State(Context, index, 0x00C10 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00C14 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00C18 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00C1C >> 2, 0x42000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00C20 >> 2, 0x00000000, 1, gcvTRUE, gcvFALSE);
++ index += _State(Context, index, 0x00C24 >> 2, 0x00000000, 1, gcvTRUE, gcvFALSE);
++
++ /* Raster states. */
++ index += _State(Context, index, 0x00E00 >> 2, 0x00000001, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00E10 >> 2, 0x00000000, 4, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00E04 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00E40 >> 2, 0x00000000, 16, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00E08 >> 2, 0x00000031, 1, gcvFALSE, gcvFALSE);
++
++ /* Pixel Shader states. */
++ index += _State(Context, index, 0x01000 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01004 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01008 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x0100C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01010 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01018 >> 2, 0x01000000, 1, gcvFALSE, gcvFALSE);
++ if (Context->hardware->identity.instructionCount <= 256)
++ {
++ index += _State(Context, index, 0x06000 >> 2, 0x00000000, 1024, gcvFALSE, gcvFALSE);
++ }
++
++ index += _CLOSE_RANGE();
++ index += _State(Context, index, 0x07000 >> 2, 0x00000000, fragmentUniforms * 4, gcvFALSE, gcvFALSE);
++
++ /* Texture states. */
++ index += _State(Context, index, 0x02000 >> 2, 0x00000000, 12, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x02040 >> 2, 0x00000000, 12, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x02080 >> 2, 0x00000000, 12, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x020C0 >> 2, 0x00000000, 12, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x02100 >> 2, 0x00000000, 12, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x02140 >> 2, 0x00000000, 12, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x02180 >> 2, 0x00000000, 12, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x021C0 >> 2, 0x00321000, 12, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x02200 >> 2, 0x00000000, 12, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x02240 >> 2, 0x00000000, 12, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, (0x02400 >> 2) + (0 << 4), 0x00000000, 12, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, (0x02440 >> 2) + (0 << 4), 0x00000000, 12, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, (0x02480 >> 2) + (0 << 4), 0x00000000, 12, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, (0x024C0 >> 2) + (0 << 4), 0x00000000, 12, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, (0x02500 >> 2) + (0 << 4), 0x00000000, 12, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, (0x02540 >> 2) + (0 << 4), 0x00000000, 12, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, (0x02580 >> 2) + (0 << 4), 0x00000000, 12, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, (0x025C0 >> 2) + (0 << 4), 0x00000000, 12, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, (0x02600 >> 2) + (0 << 4), 0x00000000, 12, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, (0x02640 >> 2) + (0 << 4), 0x00000000, 12, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, (0x02680 >> 2) + (0 << 4), 0x00000000, 12, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, (0x026C0 >> 2) + (0 << 4), 0x00000000, 12, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, (0x02700 >> 2) + (0 << 4), 0x00000000, 12, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, (0x02740 >> 2) + (0 << 4), 0x00000000, 12, gcvFALSE, gcvTRUE);
++ index += _CLOSE_RANGE();
++
++ if ((((((gctUINT32) (Context->hardware->identity.chipMinorFeatures2)) >> (0 ? 11:11)) & ((gctUINT32) ((((1 ? 11:11) - (0 ? 11:11) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 11:11) - (0 ? 11:11) + 1)))))) ))
++ {
++ gctUINT texBlockCount;
++
++ /* New texture block. */
++ index += _State(Context, index, 0x10000 >> 2, 0x00000000, 32, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x10080 >> 2, 0x00000000, 32, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x10100 >> 2, 0x00000000, 32, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x10180 >> 2, 0x00000000, 32, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x10200 >> 2, 0x00000000, 32, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x10280 >> 2, 0x00000000, 32, gcvFALSE, gcvFALSE);
++ for (i = 0; i < 256 / 16; i += 1)
++ {
++ index += _State(Context, index, (0x02C00 >> 2) + i * 16, 0x00000000, 14, gcvFALSE, gcvFALSE);
++ }
++ index += _State(Context, index, 0x10300 >> 2, 0x00000000, 32, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x10380 >> 2, 0x00321000, 32, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x10400 >> 2, 0x00000000, 32, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x10480 >> 2, 0x00000000, 32, gcvFALSE, gcvFALSE);
++
++ if ((((((gctUINT32) (Context->hardware->identity.chipMinorFeatures2)) >> (0 ? 15:15)) & ((gctUINT32) ((((1 ? 15:15) - (0 ? 15:15) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:15) - (0 ? 15:15) + 1)))))) ))
++ {
++ index += _State(Context, index, 0x12000 >> 2, 0x00000000, 256, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x12400 >> 2, 0x00000000, 256, gcvFALSE, gcvFALSE);
++ }
++
++ if ((Context->hardware->identity.chipModel == gcv2000)
++ && (Context->hardware->identity.chipRevision == 0x5108))
++ {
++ texBlockCount = 12;
++ }
++ else
++ {
++ texBlockCount = ((512) >> (4));
++ }
++ for (i = 0; i < texBlockCount; i += 1)
++ {
++ index += _State(Context, index, (0x10800 >> 2) + (i << 4), 0x00000000, 14, gcvFALSE, gcvTRUE);
++ }
++ }
++
++ /* YUV. */
++ index += _State(Context, index, 0x01678 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x0167C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01680 >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x01684 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01688 >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x0168C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01690 >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x01694 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01698 >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x0169C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _CLOSE_RANGE();
++
++ /* Thread walker states. */
++ index += _State(Context, index, 0x00900 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00904 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00908 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x0090C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00910 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00914 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00918 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x0091C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00924 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _CLOSE_RANGE();
++
++ if (Context->hardware->identity.instructionCount > 1024)
++ {
++ /* New Shader instruction memory. */
++ index += _State(Context, index, 0x0085C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x0101C >> 2, 0x00000100, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00860 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _CLOSE_RANGE();
++
++ for (i = 0;
++ i < Context->hardware->identity.instructionCount << 2;
++ i += 256 << 2
++ )
++ {
++ index += _State(Context, index, (0x20000 >> 2) + i, 0x00000000, 256 << 2, gcvFALSE, gcvFALSE);
++ index += _CLOSE_RANGE();
++ }
++ }
++ else if (Context->hardware->identity.instructionCount > 256)
++ {
++ /* New Shader instruction memory. */
++ index += _State(Context, index, 0x0085C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x0101C >> 2, 0x00000100, 1, gcvFALSE, gcvFALSE);
++ index += _CLOSE_RANGE();
++
++ /* VX instruction memory. */
++ for (i = 0;
++ i < Context->hardware->identity.instructionCount << 2;
++ i += 256 << 2
++ )
++ {
++ index += _State(Context, index, (0x0C000 >> 2) + i, 0x00000000, 256 << 2, gcvFALSE, gcvFALSE);
++ index += _CLOSE_RANGE();
++ }
++
++ _StateMirror(Context, 0x08000 >> 2, Context->hardware->identity.instructionCount << 2 , 0x0C000 >> 2);
++ }
++
++ /* Store the index of the "XD" entry. */
++ Context->entryOffsetXDFrom3D = index * gcmSIZEOF(gctUINT32);
++
++
++ /* Pixel Engine states. */
++ index += _State(Context, index, 0x01400 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01404 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01408 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x0140C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01414 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01418 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x0141C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01420 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01424 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01428 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x0142C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01434 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01454 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01458 >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x0145C >> 2, 0x00000010, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x014A0 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x014A8 >> 2, 0xFFFFFFFF, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x014AC >> 2, 0xFFFFFFFF, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x014B0 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x014B4 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x014A4 >> 2, 0x000E400C, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01580 >> 2, 0x00000000, 3, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x014B8 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++
++ /* Composition states. */
++ index += _State(Context, index, 0x03008 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++
++ if (Context->hardware->identity.pixelPipes == 1)
++ {
++ index += _State(Context, index, 0x01460 >> 2, 0x00000000, 8, gcvFALSE, gcvTRUE);
++
++ index += _State(Context, index, 0x01430 >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x01410 >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE);
++ }
++ else
++ {
++ index += _State(Context, index, (0x01460 >> 2) + (0 << 3), 0x00000000, Context->hardware->identity.pixelPipes, gcvFALSE, gcvTRUE);
++
++ for (i = 0; i < 2; i++)
++ {
++ index += _State(Context, index, (0x01500 >> 2) + (i << 3), 0x00000000, Context->hardware->identity.pixelPipes, gcvFALSE, gcvTRUE);
++ }
++ }
++
++ if (Context->hardware->identity.pixelPipes > 1 || halti0)
++ {
++ index += _State(Context, index, (0x01480 >> 2) + (0 << 3), 0x00000000, Context->hardware->identity.pixelPipes, gcvFALSE, gcvTRUE);
++ }
++
++ /* Resolve states. */
++ index += _State(Context, index, 0x01604 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01608 >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x0160C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01610 >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x01614 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01620 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01630 >> 2, 0x00000000, 2, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01640 >> 2, 0x00000000, 4, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x0163C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x016A0 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x016B4 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _CLOSE_RANGE();
++
++ if (Context->hardware->identity.pixelPipes > 1)
++ {
++ index += _State(Context, index, (0x016C0 >> 2) + (0 << 3), 0x00000000, Context->hardware->identity.pixelPipes, gcvFALSE, gcvTRUE);
++
++ index += _State(Context, index, (0x016E0 >> 2) + (0 << 3), 0x00000000, Context->hardware->identity.pixelPipes, gcvFALSE, gcvTRUE);
++
++ index += _State(Context, index, 0x01700 >> 2, 0x00000000, Context->hardware->identity.pixelPipes, gcvFALSE, gcvFALSE);
++ }
++
++ /* Tile status. */
++ index += _State(Context, index, 0x01654 >> 2, 0x00200000, 1, gcvFALSE, gcvFALSE);
++
++ index += _CLOSE_RANGE();
++ index += _State(Context, index, 0x01658 >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x0165C >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x01660 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01664 >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x01668 >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x0166C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01670 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01674 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x016A4 >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x016AC >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x016A8 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01720 >> 2, 0x00000000, 8, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01740 >> 2, 0x00000000, 8, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x01760 >> 2, 0x00000000, 8, gcvFALSE, gcvFALSE);
++ index += _CLOSE_RANGE();
++
++ /* Semaphore/stall. */
++ index += _SemaphoreStall(Context, index);
++#endif
++
++ /**************************************************************************/
++ /* Link to another address. ***********************************************/
++
++ Context->linkIndex3D = index;
++
++ if (buffer != gcvNULL)
++ {
++ buffer[index + 0]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x08 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ buffer[index + 1]
++ = 0;
++ }
++
++ index += 2;
++
++ /* Store the end of the context buffer. */
++ Context->bufferSize = index * gcmSIZEOF(gctUINT32);
++
++
++ /**************************************************************************/
++ /* Pipe switch for the case where neither 2D nor 3D are used. *************/
++
++ /* Store the 3D entry index. */
++ Context->entryOffsetXDFrom2D = index * gcmSIZEOF(gctUINT32);
++
++ /* Flush 2D pipe. */
++ index += _FlushPipe(Context, index, gcvPIPE_2D);
++
++ /* Switch to 3D pipe. */
++ index += _SwitchPipe(Context, index, gcvPIPE_3D);
++
++ /* Store the location of the link. */
++ Context->linkIndexXD = index;
++
++ if (buffer != gcvNULL)
++ {
++ buffer[index + 0]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x08 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ buffer[index + 1]
++ = 0;
++ }
++
++ index += 2;
++
++
++ /**************************************************************************/
++ /* Save size for buffer. **************************************************/
++
++ Context->totalSize = index * gcmSIZEOF(gctUINT32);
++
++
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++static gceSTATUS
++_DestroyContext(
++ IN gckCONTEXT Context
++ )
++{
++ gceSTATUS status = gcvSTATUS_OK;
++
++ if (Context != gcvNULL)
++ {
++ gcsCONTEXT_PTR bufferHead;
++
++ /* Free context buffers. */
++ for (bufferHead = Context->buffer; Context->buffer != gcvNULL;)
++ {
++ /* Get a shortcut to the current buffer. */
++ gcsCONTEXT_PTR buffer = Context->buffer;
++
++ /* Get the next buffer. */
++ gcsCONTEXT_PTR next = buffer->next;
++
++ /* Last item? */
++ if (next == bufferHead)
++ {
++ next = gcvNULL;
++ }
++
++ /* Destroy the signal. */
++ if (buffer->signal != gcvNULL)
++ {
++ gcmkONERROR(gckOS_DestroySignal(
++ Context->os, buffer->signal
++ ));
++
++ buffer->signal = gcvNULL;
++ }
++
++ /* Free state delta map. */
++ if (buffer->logical != gcvNULL)
++ {
++#if gcdVIRTUAL_COMMAND_BUFFER
++ gcmkONERROR(gckEVENT_DestroyVirtualCommandBuffer(
++ Context->hardware->kernel->eventObj,
++ Context->totalSize,
++ buffer->physical,
++ buffer->logical,
++ gcvKERNEL_PIXEL
++ ));
++
++#else
++ gcmkONERROR(gckEVENT_FreeContiguousMemory(
++ Context->hardware->kernel->eventObj,
++ Context->totalSize,
++ buffer->physical,
++ buffer->logical,
++ gcvKERNEL_PIXEL
++ ));
++#endif
++
++ buffer->logical = gcvNULL;
++ }
++
++ /* Free context buffer. */
++ gcmkONERROR(gcmkOS_SAFE_FREE(Context->os, buffer));
++
++ /* Remove from the list. */
++ Context->buffer = next;
++ }
++
++#if gcdSECURE_USER
++ /* Free the hint array. */
++ if (Context->hint != gcvNULL)
++ {
++ gcmkONERROR(gcmkOS_SAFE_FREE(Context->os, Context->hint));
++ }
++#endif
++ /* Free record array copy. */
++ if (Context->recordArray != gcvNULL)
++ {
++ gcmkONERROR(gcmkOS_SAFE_FREE(Context->os, Context->recordArray));
++ }
++
++ /* Free the state mapping. */
++ if (Context->map != gcvNULL)
++ {
++ gcmkONERROR(gcmkOS_SAFE_FREE(Context->os, Context->map));
++ }
++
++ /* Mark the gckCONTEXT object as unknown. */
++ Context->object.type = gcvOBJ_UNKNOWN;
++
++ /* Free the gckCONTEXT object. */
++ gcmkONERROR(gcmkOS_SAFE_FREE(Context->os, Context));
++ }
++
++OnError:
++ return status;
++}
++
++
++/******************************************************************************\
++**************************** Context Management API ****************************
++\******************************************************************************/
++
++/******************************************************************************\
++**
++** gckCONTEXT_Construct
++**
++** Construct a new gckCONTEXT object.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to gckOS object.
++**
++** gctUINT32 ProcessID
++** Current process ID.
++**
++** gckHARDWARE Hardware
++** Pointer to gckHARDWARE object.
++**
++** OUTPUT:
++**
++** gckCONTEXT * Context
++** Pointer to a variable thet will receive the gckCONTEXT object
++** pointer.
++*/
++gceSTATUS
++gckCONTEXT_Construct(
++ IN gckOS Os,
++ IN gckHARDWARE Hardware,
++ IN gctUINT32 ProcessID,
++ OUT gckCONTEXT * Context
++ )
++{
++ gceSTATUS status;
++ gckCONTEXT context = gcvNULL;
++ gctSIZE_T allocationSize;
++ gctUINT i;
++ gctPOINTER pointer = gcvNULL;
++
++ gcmkHEADER_ARG("Os=0x%08X Hardware=0x%08X", Os, Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Context != gcvNULL);
++
++
++ /**************************************************************************/
++ /* Allocate and initialize basic fields of gckCONTEXT. ********************/
++
++ /* The context object size. */
++ allocationSize = gcmSIZEOF(struct _gckCONTEXT);
++
++ /* Allocate the object. */
++ gcmkONERROR(gckOS_Allocate(
++ Os, allocationSize, &pointer
++ ));
++
++ context = pointer;
++
++ /* Reset the entire object. */
++ gcmkONERROR(gckOS_ZeroMemory(context, allocationSize));
++
++ /* Initialize the gckCONTEXT object. */
++ context->object.type = gcvOBJ_CONTEXT;
++ context->os = Os;
++ context->hardware = Hardware;
++
++
++#if defined(VIVANTE_NO_3D)
++ context->entryPipe = gcvPIPE_2D;
++ context->exitPipe = gcvPIPE_2D;
++#elif gcdCMD_NO_2D_CONTEXT
++ context->entryPipe = gcvPIPE_3D;
++ context->exitPipe = gcvPIPE_3D;
++#else
++ context->entryPipe
++ = (((((gctUINT32) (context->hardware->identity.chipFeatures)) >> (0 ? 9:9)) & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1)))))) )
++ ? gcvPIPE_2D
++ : gcvPIPE_3D;
++ context->exitPipe = gcvPIPE_3D;
++#endif
++
++ /* Get the command buffer requirements. */
++ gcmkONERROR(gckHARDWARE_QueryCommandBuffer(
++ Hardware,
++ &context->alignment,
++ &context->reservedHead,
++ &context->reservedTail
++ ));
++
++ /* Mark the context as dirty to force loading of the entire state table
++ the first time. */
++ context->dirty = gcvTRUE;
++
++
++ /**************************************************************************/
++ /* Get the size of the context buffer. ************************************/
++
++ gcmkONERROR(_InitializeContextBuffer(context));
++
++
++ /**************************************************************************/
++ /* Compute the size of the record array. **********************************/
++
++ context->recordArraySize
++ = gcmSIZEOF(gcsSTATE_DELTA_RECORD) * context->stateCount;
++
++
++ if (context->stateCount > 0)
++ {
++ /**************************************************************************/
++ /* Allocate and reset the state mapping table. ****************************/
++
++ /* Allocate the state mapping table. */
++ gcmkONERROR(gckOS_Allocate(
++ Os,
++ gcmSIZEOF(gcsSTATE_MAP) * context->stateCount,
++ &pointer
++ ));
++
++ context->map = pointer;
++
++ /* Zero the state mapping table. */
++ gcmkONERROR(gckOS_ZeroMemory(
++ context->map, gcmSIZEOF(gcsSTATE_MAP) * context->stateCount
++ ));
++
++
++ /**************************************************************************/
++ /* Allocate the hint array. ***********************************************/
++
++#if gcdSECURE_USER
++ /* Allocate hints. */
++ gcmkONERROR(gckOS_Allocate(
++ Os,
++ gcmSIZEOF(gctBOOL) * context->stateCount,
++ &pointer
++ ));
++
++ context->hint = pointer;
++#endif
++ }
++
++ /**************************************************************************/
++ /* Allocate the context and state delta buffers. **************************/
++
++ for (i = 0; i < gcdCONTEXT_BUFFER_COUNT; i += 1)
++ {
++ /* Allocate a context buffer. */
++ gcsCONTEXT_PTR buffer;
++
++ /* Allocate the context buffer structure. */
++ gcmkONERROR(gckOS_Allocate(
++ Os,
++ gcmSIZEOF(gcsCONTEXT),
++ &pointer
++ ));
++
++ buffer = pointer;
++
++ /* Reset the context buffer structure. */
++ gcmkVERIFY_OK(gckOS_ZeroMemory(
++ buffer, gcmSIZEOF(gcsCONTEXT)
++ ));
++
++ /* Append to the list. */
++ if (context->buffer == gcvNULL)
++ {
++ buffer->next = buffer;
++ context->buffer = buffer;
++ }
++ else
++ {
++ buffer->next = context->buffer->next;
++ context->buffer->next = buffer;
++ }
++
++ /* Set the number of delta in the order of creation. */
++#if gcmIS_DEBUG(gcdDEBUG_CODE)
++ buffer->num = i;
++#endif
++
++ /* Create the busy signal. */
++ gcmkONERROR(gckOS_CreateSignal(
++ Os, gcvFALSE, &buffer->signal
++ ));
++
++ /* Set the signal, buffer is currently not busy. */
++ gcmkONERROR(gckOS_Signal(
++ Os, buffer->signal, gcvTRUE
++ ));
++
++ /* Create a new physical context buffer. */
++#if gcdVIRTUAL_COMMAND_BUFFER
++ gcmkONERROR(gckKERNEL_AllocateVirtualCommandBuffer(
++ context->hardware->kernel,
++ gcvFALSE,
++ &context->totalSize,
++ &buffer->physical,
++ &pointer
++ ));
++
++#else
++ gcmkONERROR(gckOS_AllocateContiguous(
++ Os,
++ gcvFALSE,
++ &context->totalSize,
++ &buffer->physical,
++ &pointer
++ ));
++#endif
++
++ buffer->logical = pointer;
++
++ /* Set gckEVENT object pointer. */
++ buffer->eventObj = Hardware->kernel->eventObj;
++
++ /* Set the pointers to the LINK commands. */
++ if (context->linkIndex2D != 0)
++ {
++ buffer->link2D = &buffer->logical[context->linkIndex2D];
++ }
++
++ if (context->linkIndex3D != 0)
++ {
++ buffer->link3D = &buffer->logical[context->linkIndex3D];
++ }
++
++ if (context->linkIndexXD != 0)
++ {
++ gctPOINTER xdLink;
++ gctUINT8_PTR xdEntryLogical;
++ gctSIZE_T xdEntrySize;
++ gctSIZE_T linkBytes;
++
++ /* Determine LINK parameters. */
++ xdLink
++ = &buffer->logical[context->linkIndexXD];
++
++ xdEntryLogical
++ = (gctUINT8_PTR) buffer->logical
++ + context->entryOffsetXDFrom3D;
++
++ xdEntrySize
++ = context->bufferSize
++ - context->entryOffsetXDFrom3D;
++
++ /* Query LINK size. */
++ gcmkONERROR(gckHARDWARE_Link(
++ Hardware, gcvNULL, gcvNULL, 0, &linkBytes
++ ));
++
++ /* Generate a LINK. */
++ gcmkONERROR(gckHARDWARE_Link(
++ Hardware,
++ xdLink,
++ xdEntryLogical,
++ xdEntrySize,
++ &linkBytes
++ ));
++ }
++ }
++
++
++ /**************************************************************************/
++ /* Initialize the context buffers. ****************************************/
++
++ /* Initialize the current context buffer. */
++ gcmkONERROR(_InitializeContextBuffer(context));
++
++ /* Make all created contexts equal. */
++ {
++ gcsCONTEXT_PTR currContext, tempContext;
++
++ /* Set the current context buffer. */
++ currContext = context->buffer;
++
++ /* Get the next context buffer. */
++ tempContext = currContext->next;
++
++ /* Loop through all buffers. */
++ while (tempContext != currContext)
++ {
++ if (tempContext == gcvNULL)
++ {
++ gcmkONERROR(gcvSTATUS_NOT_FOUND);
++ }
++
++ /* Copy the current context. */
++ gckOS_MemCopy(
++ tempContext->logical,
++ currContext->logical,
++ context->totalSize
++ );
++
++ /* Get the next context buffer. */
++ tempContext = tempContext->next;
++ }
++ }
++
++ /* Return pointer to the gckCONTEXT object. */
++ *Context = context;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Context=0x%08X", *Context);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Roll back on error. */
++ gcmkVERIFY_OK(_DestroyContext(context));
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/******************************************************************************\
++**
++** gckCONTEXT_Destroy
++**
++** Destroy a gckCONTEXT object.
++**
++** INPUT:
++**
++** gckCONTEXT Context
++** Pointer to an gckCONTEXT object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckCONTEXT_Destroy(
++ IN gckCONTEXT Context
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Context=0x%08X", Context);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Context, gcvOBJ_CONTEXT);
++
++ /* Destroy the context and all related objects. */
++ status = _DestroyContext(Context);
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return status;
++}
++
++/******************************************************************************\
++**
++** gckCONTEXT_Update
++**
++** Merge all pending state delta buffers into the current context buffer.
++**
++** INPUT:
++**
++** gckCONTEXT Context
++** Pointer to an gckCONTEXT object.
++**
++** gctUINT32 ProcessID
++** Current process ID.
++**
++** gcsSTATE_DELTA_PTR StateDelta
++** Pointer to the state delta.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckCONTEXT_Update(
++ IN gckCONTEXT Context,
++ IN gctUINT32 ProcessID,
++ IN gcsSTATE_DELTA_PTR StateDelta
++ )
++{
++#ifndef VIVANTE_NO_3D
++ gceSTATUS status = gcvSTATUS_OK;
++ gcsSTATE_DELTA _stateDelta;
++ gckKERNEL kernel;
++ gcsCONTEXT_PTR buffer;
++ gcsSTATE_MAP_PTR map;
++ gctBOOL needCopy = gcvFALSE;
++ gcsSTATE_DELTA_PTR nDelta;
++ gcsSTATE_DELTA_PTR uDelta = gcvNULL;
++ gcsSTATE_DELTA_PTR kDelta = gcvNULL;
++ gcsSTATE_DELTA_RECORD_PTR record;
++ gcsSTATE_DELTA_RECORD_PTR recordArray = gcvNULL;
++ gctUINT elementCount;
++ gctUINT address;
++ gctUINT32 mask;
++ gctUINT32 data;
++ gctUINT index;
++ gctUINT i, j;
++
++#if gcdSECURE_USER
++ gcskSECURE_CACHE_PTR cache;
++#endif
++
++ gcmkHEADER_ARG(
++ "Context=0x%08X ProcessID=%d StateDelta=0x%08X",
++ Context, ProcessID, StateDelta
++ );
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Context, gcvOBJ_CONTEXT);
++
++ /* Get a shortcut to the kernel object. */
++ kernel = Context->hardware->kernel;
++
++ /* Check wehther we need to copy the structures or not. */
++ gcmkONERROR(gckOS_QueryNeedCopy(Context->os, ProcessID, &needCopy));
++
++ /* Allocate the copy buffer for the user record array. */
++ if (needCopy && (Context->recordArray == gcvNULL))
++ {
++ /* Allocate the buffer. */
++ gcmkONERROR(gckOS_Allocate(
++ Context->os,
++ Context->recordArraySize,
++ (gctPOINTER *) &Context->recordArray
++ ));
++ }
++
++ /* Get the current context buffer. */
++ buffer = Context->buffer;
++
++ /* Wait until the context buffer becomes available; this will
++ also reset the signal and mark the buffer as busy. */
++ gcmkONERROR(gckOS_WaitSignal(
++ Context->os, buffer->signal, gcvINFINITE
++ ));
++
++#if gcdSECURE_USER
++ /* Get the cache form the database. */
++ gcmkONERROR(gckKERNEL_GetProcessDBCache(kernel, ProcessID, &cache));
++#endif
++
++#if gcmIS_DEBUG(gcdDEBUG_CODE) && 1 && !defined(VIVANTE_NO_3D)
++ /* Update current context token. */
++ buffer->logical[Context->map[0x0E14].index]
++ = gcmPTR2INT(Context);
++#endif
++
++ /* Are there any pending deltas? */
++ if (buffer->deltaCount != 0)
++ {
++ /* Get the state map. */
++ map = Context->map;
++
++ /* Get the first delta item. */
++ uDelta = buffer->delta;
++
++ /* Reset the vertex stream count. */
++ elementCount = 0;
++
++ /* Merge all pending deltas. */
++ for (i = 0; i < buffer->deltaCount; i += 1)
++ {
++ /* Get access to the state delta. */
++ gcmkONERROR(gckKERNEL_OpenUserData(
++ kernel, needCopy,
++ &_stateDelta,
++ uDelta, gcmSIZEOF(gcsSTATE_DELTA),
++ (gctPOINTER *) &kDelta
++ ));
++
++ /* Get access to the state records. */
++ gcmkONERROR(gckKERNEL_OpenUserData(
++ kernel, needCopy,
++ Context->recordArray,
++ gcmUINT64_TO_PTR(kDelta->recordArray), Context->recordArraySize,
++ (gctPOINTER *) &recordArray
++ ));
++
++ /* Merge all pending states. */
++ for (j = 0; j < kDelta->recordCount; j += 1)
++ {
++ if (j >= Context->stateCount)
++ {
++ break;
++ }
++
++ /* Get the current state record. */
++ record = &recordArray[j];
++
++ /* Get the state address. */
++ address = record->address;
++
++ /* Make sure the state is a part of the mapping table. */
++ if (address >= Context->stateCount)
++ {
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s(%d): State 0x%04X is not mapped.\n",
++ __FUNCTION__, __LINE__,
++ address
++ );
++
++ continue;
++ }
++
++ /* Get the state index. */
++ index = map[address].index;
++
++ /* Skip the state if not mapped. */
++ if (index == 0)
++ {
++#if gcdDEBUG
++ if ((address != 0x0594)
++ && (address != 0x0E00)
++ && (address != 0x0E03)
++ )
++ {
++#endif
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s(%d): State 0x%04X is not mapped.\n",
++ __FUNCTION__, __LINE__,
++ address
++ );
++#if gcdDEBUG
++ }
++#endif
++ continue;
++ }
++
++ /* Get the data mask. */
++ mask = record->mask;
++
++ /* Masked states that are being completly reset or regular states. */
++ if ((mask == 0) || (mask == ~0U))
++ {
++ /* Get the new data value. */
++ data = record->data;
++
++ /* Process special states. */
++ if (address == 0x0595)
++ {
++ /* Force auto-disable to be disabled. */
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5)));
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4)));
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 13:13) - (0 ? 13:13) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 13:13) - (0 ? 13:13) + 1))))))) << (0 ? 13:13))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? 13:13) - (0 ? 13:13) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 13:13) - (0 ? 13:13) + 1))))))) << (0 ? 13:13)));
++ }
++
++#if gcdSECURE_USER
++ /* Do we need to convert the logical address? */
++ if (Context->hint[address])
++ {
++ /* Map handle into physical address. */
++ gcmkONERROR(gckKERNEL_MapLogicalToPhysical(
++ kernel, cache, (gctPOINTER) &data
++ ));
++ }
++#endif
++
++ /* Set new data. */
++ buffer->logical[index] = data;
++ }
++
++ /* Masked states that are being set partially. */
++ else
++ {
++ buffer->logical[index]
++ = (~mask & buffer->logical[index])
++ | (mask & record->data);
++ }
++ }
++
++ /* Get the element count. */
++ if (kDelta->elementCount != 0)
++ {
++ elementCount = kDelta->elementCount;
++ }
++
++ /* Dereference delta. */
++ kDelta->refCount -= 1;
++ gcmkASSERT(kDelta->refCount >= 0);
++
++ /* Get the next state delta. */
++ nDelta = gcmUINT64_TO_PTR(kDelta->next);
++
++ /* Get access to the state records. */
++ gcmkONERROR(gckKERNEL_CloseUserData(
++ kernel, needCopy,
++ gcvFALSE,
++ gcmUINT64_TO_PTR(kDelta->recordArray), Context->recordArraySize,
++ (gctPOINTER *) &recordArray
++ ));
++
++ /* Close access to the current state delta. */
++ gcmkONERROR(gckKERNEL_CloseUserData(
++ kernel, needCopy,
++ gcvTRUE,
++ uDelta, gcmSIZEOF(gcsSTATE_DELTA),
++ (gctPOINTER *) &kDelta
++ ));
++
++ /* Update the user delta pointer. */
++ uDelta = nDelta;
++ }
++
++ /* Hardware disables all input streams when the stream 0 is programmed,
++ it then reenables those streams that were explicitely programmed by
++ the software. Because of this we cannot program the entire array of
++ values, otherwise we'll get all streams reenabled, but rather program
++ only those that are actully needed by the software. */
++ if (elementCount != 0)
++ {
++ gctUINT base;
++ gctUINT nopCount;
++ gctUINT32_PTR nop;
++ gctUINT fe2vsCount = 12;
++
++ if ((((((gctUINT32) (Context->hardware->identity.chipMinorFeatures1)) >> (0 ? 23:23)) & ((gctUINT32) ((((1 ? 23:23) - (0 ? 23:23) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:23) - (0 ? 23:23) + 1)))))) ))
++ {
++ fe2vsCount = 16;
++ }
++
++ /* Determine the base index of the vertex stream array. */
++ base = map[0x0180].index;
++
++ /* Set the proper state count. */
++ buffer->logical[base - 1]
++ = ((((gctUINT32) (buffer->logical[base - 1])) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (elementCount ) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ /* Determine the number of NOP commands. */
++ nopCount
++ = (fe2vsCount / 2)
++ - (elementCount / 2);
++
++ /* Determine the location of the first NOP. */
++ nop = &buffer->logical[base + (elementCount | 1)];
++
++ /* Fill the unused space with NOPs. */
++ for (i = 0; i < nopCount; i += 1)
++ {
++ if (nop >= buffer->logical + Context->totalSize)
++ {
++ break;
++ }
++
++ /* Generate a NOP command. */
++ *nop = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x03 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)));
++
++ /* Advance. */
++ nop += 2;
++ }
++ }
++
++ /* Reset pending deltas. */
++ buffer->deltaCount = 0;
++ buffer->delta = gcvNULL;
++ }
++
++ /* Set state delta user pointer. */
++ uDelta = StateDelta;
++
++ /* Get access to the state delta. */
++ gcmkONERROR(gckKERNEL_OpenUserData(
++ kernel, needCopy,
++ &_stateDelta,
++ uDelta, gcmSIZEOF(gcsSTATE_DELTA),
++ (gctPOINTER *) &kDelta
++ ));
++
++ /* State delta cannot be attached to anything yet. */
++ if (kDelta->refCount != 0)
++ {
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s(%d): kDelta->refCount = %d (has to be 0).\n",
++ __FUNCTION__, __LINE__,
++ kDelta->refCount
++ );
++ }
++
++ /* Attach to all contexts. */
++ buffer = Context->buffer;
++
++ do
++ {
++ /* Attach to the context if nothing is attached yet. If a delta
++ is allready attached, all we need to do is to increment
++ the number of deltas in the context. */
++ if (buffer->delta == gcvNULL)
++ {
++ buffer->delta = uDelta;
++ }
++
++ /* Update reference count. */
++ kDelta->refCount += 1;
++
++ /* Update counters. */
++ buffer->deltaCount += 1;
++
++ /* Get the next context buffer. */
++ buffer = buffer->next;
++
++ if (buffer == gcvNULL)
++ {
++ gcmkONERROR(gcvSTATUS_NOT_FOUND);
++ }
++ }
++ while (Context->buffer != buffer);
++
++ /* Close access to the current state delta. */
++ gcmkONERROR(gckKERNEL_CloseUserData(
++ kernel, needCopy,
++ gcvTRUE,
++ uDelta, gcmSIZEOF(gcsSTATE_DELTA),
++ (gctPOINTER *) &kDelta
++ ));
++
++ /* Schedule an event to mark the context buffer as available. */
++ gcmkONERROR(gckEVENT_Signal(
++ buffer->eventObj, buffer->signal, gcvKERNEL_PIXEL
++ ));
++
++ /* Advance to the next context buffer. */
++ Context->buffer = buffer->next;
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Get access to the state records. */
++ if (kDelta != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckKERNEL_CloseUserData(
++ kernel, needCopy,
++ gcvFALSE,
++ gcmUINT64_TO_PTR(kDelta->recordArray), Context->recordArraySize,
++ (gctPOINTER *) &recordArray
++ ));
++ }
++
++ /* Close access to the current state delta. */
++ gcmkVERIFY_OK(gckKERNEL_CloseUserData(
++ kernel, needCopy,
++ gcvTRUE,
++ uDelta, gcmSIZEOF(gcsSTATE_DELTA),
++ (gctPOINTER *) &kDelta
++ ));
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++#else
++ return gcvSTATUS_OK;
++#endif
++}
++
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v4/arch/XAQ2/hal/kernel/gc_hal_kernel_context.h linux-openelec/drivers/mxc/gpu-viv/v4/arch/XAQ2/hal/kernel/gc_hal_kernel_context.h
+--- linux-3.14.36/drivers/mxc/gpu-viv/v4/arch/XAQ2/hal/kernel/gc_hal_kernel_context.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v4/arch/XAQ2/hal/kernel/gc_hal_kernel_context.h 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,157 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_kernel_context_h_
++#define __gc_hal_kernel_context_h_
++
++#include "gc_hal_kernel_buffer.h"
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/* Maps state locations within the context buffer. */
++typedef struct _gcsSTATE_MAP * gcsSTATE_MAP_PTR;
++typedef struct _gcsSTATE_MAP
++{
++ /* Index of the state in the context buffer. */
++ gctUINT index;
++
++ /* State mask. */
++ gctUINT32 mask;
++}
++gcsSTATE_MAP;
++
++/* Context buffer. */
++typedef struct _gcsCONTEXT * gcsCONTEXT_PTR;
++typedef struct _gcsCONTEXT
++{
++ /* For debugging: the number of context buffer in the order of creation. */
++#if gcmIS_DEBUG(gcdDEBUG_CODE)
++ gctUINT num;
++#endif
++
++ /* Pointer to gckEVENT object. */
++ gckEVENT eventObj;
++
++ /* Context busy signal. */
++ gctSIGNAL signal;
++
++ /* Physical address of the context buffer. */
++ gctPHYS_ADDR physical;
++
++ /* Logical address of the context buffer. */
++ gctUINT32_PTR logical;
++
++ /* Pointer to the LINK commands. */
++ gctPOINTER link2D;
++ gctPOINTER link3D;
++
++ /* The number of pending state deltas. */
++ gctUINT deltaCount;
++
++ /* Pointer to the first delta to be applied. */
++ gcsSTATE_DELTA_PTR delta;
++
++ /* Next context buffer. */
++ gcsCONTEXT_PTR next;
++}
++gcsCONTEXT;
++
++/* gckCONTEXT structure that hold the current context. */
++struct _gckCONTEXT
++{
++ /* Object. */
++ gcsOBJECT object;
++
++ /* Pointer to gckOS object. */
++ gckOS os;
++
++ /* Pointer to gckHARDWARE object. */
++ gckHARDWARE hardware;
++
++ /* Command buffer alignment. */
++ gctSIZE_T alignment;
++ gctSIZE_T reservedHead;
++ gctSIZE_T reservedTail;
++
++ /* Context buffer metrics. */
++ gctSIZE_T stateCount;
++ gctSIZE_T totalSize;
++ gctSIZE_T bufferSize;
++ gctUINT32 linkIndex2D;
++ gctUINT32 linkIndex3D;
++ gctUINT32 linkIndexXD;
++ gctUINT32 entryOffset3D;
++ gctUINT32 entryOffsetXDFrom2D;
++ gctUINT32 entryOffsetXDFrom3D;
++
++ /* Dirty flags. */
++ gctBOOL dirty;
++ gctBOOL dirty2D;
++ gctBOOL dirty3D;
++ gcsCONTEXT_PTR dirtyBuffer;
++
++ /* State mapping. */
++ gcsSTATE_MAP_PTR map;
++
++ /* List of context buffers. */
++ gcsCONTEXT_PTR buffer;
++
++ /* A copy of the user record array. */
++ gctUINT recordArraySize;
++ gcsSTATE_DELTA_RECORD_PTR recordArray;
++
++ /* Requested pipe select for context. */
++ gcePIPE_SELECT entryPipe;
++ gcePIPE_SELECT exitPipe;
++
++ /* Variables used for building state buffer. */
++ gctUINT32 lastAddress;
++ gctSIZE_T lastSize;
++ gctUINT32 lastIndex;
++ gctBOOL lastFixed;
++
++ /* Hint array. */
++#if gcdSECURE_USER
++ gctBOOL_PTR hint;
++#endif
++
++#if VIVANTE_PROFILER_CONTEXT
++ gcsPROFILER_COUNTERS latestProfiler;
++ gcsPROFILER_COUNTERS histroyProfiler;
++ gctUINT32 prevVSInstCount;
++ gctUINT32 prevVSBranchInstCount;
++ gctUINT32 prevVSTexInstCount;
++ gctUINT32 prevVSVertexCount;
++ gctUINT32 prevPSInstCount;
++ gctUINT32 prevPSBranchInstCount;
++ gctUINT32 prevPSTexInstCount;
++ gctUINT32 prevPSPixelCount;
++#endif
++};
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif /* __gc_hal_kernel_context_h_ */
++
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v4/arch/XAQ2/hal/kernel/gc_hal_kernel_hardware.c linux-openelec/drivers/mxc/gpu-viv/v4/arch/XAQ2/hal/kernel/gc_hal_kernel_hardware.c
+--- linux-3.14.36/drivers/mxc/gpu-viv/v4/arch/XAQ2/hal/kernel/gc_hal_kernel_hardware.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v4/arch/XAQ2/hal/kernel/gc_hal_kernel_hardware.c 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,7280 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal.h"
++#include "gc_hal_kernel.h"
++#if VIVANTE_PROFILER_CONTEXT
++#include "gc_hal_kernel_context.h"
++#endif
++
++#define _GC_OBJ_ZONE gcvZONE_HARDWARE
++
++typedef struct _gcsiDEBUG_REGISTERS * gcsiDEBUG_REGISTERS_PTR;
++typedef struct _gcsiDEBUG_REGISTERS
++{
++ gctSTRING module;
++ gctUINT index;
++ gctUINT shift;
++ gctUINT data;
++ gctUINT count;
++ gctUINT32 signature;
++}
++gcsiDEBUG_REGISTERS;
++
++extern int gpu3DMinClock;
++/******************************************************************************\
++********************************* Support Code *********************************
++\******************************************************************************/
++static gceSTATUS
++_ResetGPU(
++ IN gckHARDWARE Hardware,
++ IN gckOS Os,
++ IN gceCORE Core
++ );
++
++static gceSTATUS
++_IdentifyHardware(
++ IN gckOS Os,
++ IN gceCORE Core,
++ OUT gcsHAL_QUERY_CHIP_IDENTITY_PTR Identity
++ )
++{
++ gceSTATUS status;
++
++ gctUINT32 chipIdentity;
++
++ gctUINT32 streamCount = 0;
++ gctUINT32 registerMax = 0;
++ gctUINT32 threadCount = 0;
++ gctUINT32 shaderCoreCount = 0;
++ gctUINT32 vertexCacheSize = 0;
++ gctUINT32 vertexOutputBufferSize = 0;
++ gctUINT32 pixelPipes = 0;
++ gctUINT32 instructionCount = 0;
++ gctUINT32 numConstants = 0;
++ gctUINT32 bufferSize = 0;
++ gctUINT32 varyingsCount = 0;
++ gctBOOL useHZ;
++
++ gcmkHEADER_ARG("Os=0x%x", Os);
++
++ /***************************************************************************
++ ** Get chip ID and revision.
++ */
++
++ /* Read chip identity register. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Os, Core,
++ 0x00018,
++ &chipIdentity));
++
++ /* Special case for older graphic cores. */
++ if (((((gctUINT32) (chipIdentity)) >> (0 ? 31:24) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1)))))) == (0x01 & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))))
++ {
++ Identity->chipModel = gcv500;
++ Identity->chipRevision = (((((gctUINT32) (chipIdentity)) >> (0 ? 15:12)) & ((gctUINT32) ((((1 ? 15:12) - (0 ? 15:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:12) - (0 ? 15:12) + 1)))))) );
++ }
++
++ else
++ {
++ /* Read chip identity register. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Os, Core,
++ 0x00020,
++ (gctUINT32_PTR) &Identity->chipModel));
++
++ /* !!!! HACK ALERT !!!! */
++ /* Because people change device IDs without letting software know
++ ** about it - here is the hack to make it all look the same. Only
++ ** for GC400 family. Next time - TELL ME!!! */
++ if (((Identity->chipModel & 0xFF00) == 0x0400)
++ && (Identity->chipModel != 0x0420))
++ {
++ Identity->chipModel = (gceCHIPMODEL) (Identity->chipModel & 0x0400);
++ }
++
++ /* Read CHIP_REV register. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Os, Core,
++ 0x00024,
++ &Identity->chipRevision));
++
++ if ((Identity->chipModel == gcv300)
++ && (Identity->chipRevision == 0x2201)
++ )
++ {
++ gctUINT32 chipDate;
++ gctUINT32 chipTime;
++
++ /* Read date and time registers. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Os, Core,
++ 0x00028,
++ &chipDate));
++
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Os, Core,
++ 0x0002C,
++ &chipTime));
++
++ if ((chipDate == 0x20080814) && (chipTime == 0x12051100))
++ {
++ /* This IP has an ECO; put the correct revision in it. */
++ Identity->chipRevision = 0x1051;
++ }
++ }
++ }
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Identity: chipModel=%X",
++ Identity->chipModel);
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Identity: chipRevision=%X",
++ Identity->chipRevision);
++
++
++ /***************************************************************************
++ ** Get chip features.
++ */
++
++ /* Read chip feature register. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Os, Core,
++ 0x0001C,
++ &Identity->chipFeatures));
++
++#ifndef VIVANTE_NO_3D
++ /* Disable fast clear on GC700. */
++ if (Identity->chipModel == gcv700)
++ {
++ Identity->chipFeatures
++ = ((((gctUINT32) (Identity->chipFeatures)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)));
++ }
++#endif
++
++ if (((Identity->chipModel == gcv500) && (Identity->chipRevision < 2))
++ || ((Identity->chipModel == gcv300) && (Identity->chipRevision < 0x2000))
++ )
++ {
++ /* GC500 rev 1.x and GC300 rev < 2.0 doesn't have these registers. */
++ Identity->chipMinorFeatures = 0;
++ Identity->chipMinorFeatures1 = 0;
++ Identity->chipMinorFeatures2 = 0;
++ Identity->chipMinorFeatures3 = 0;
++ Identity->chipMinorFeatures4 = 0;
++ }
++ else
++ {
++ /* Read chip minor feature register #0. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Os, Core,
++ 0x00034,
++ &Identity->chipMinorFeatures));
++
++ if (((((gctUINT32) (Identity->chipMinorFeatures)) >> (0 ? 21:21) & ((gctUINT32) ((((1 ? 21:21) - (0 ? 21:21) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 21:21) - (0 ? 21:21) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 21:21) - (0 ? 21:21) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 21:21) - (0 ? 21:21) + 1)))))))
++ )
++ {
++ /* Read chip minor featuress register #1. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Os, Core,
++ 0x00074,
++ &Identity->chipMinorFeatures1));
++
++ /* Read chip minor featuress register #2. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Os, Core,
++ 0x00084,
++ &Identity->chipMinorFeatures2));
++
++ /*Identity->chipMinorFeatures2 &= ~(0x1 << 3);*/
++
++ /* Read chip minor featuress register #1. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Os, Core,
++ 0x00088,
++ &Identity->chipMinorFeatures3));
++
++ /*The BG2 chip has no compression supertiled, and the bit of GCMinorFeature3BugFixes15 is n/a*/
++ if(Identity->chipModel == gcv1000 && Identity->chipRevision == 0x5036)
++ {
++ Identity->chipMinorFeatures3
++ = ((((gctUINT32) (Identity->chipMinorFeatures3)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5)));
++ Identity->chipMinorFeatures3
++ = ((((gctUINT32) (Identity->chipMinorFeatures3)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 27:27) - (0 ? 27:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 27:27) - (0 ? 27:27) + 1))))))) << (0 ? 27:27))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? 27:27) - (0 ? 27:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 27:27) - (0 ? 27:27) + 1))))))) << (0 ? 27:27)));
++ }
++
++ /* Read chip minor featuress register #4. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Os, Core,
++ 0x00094,
++ &Identity->chipMinorFeatures4));
++ }
++ else
++ {
++ /* Chip doesn't has minor features register #1 or 2 or 3 or 4. */
++ Identity->chipMinorFeatures1 = 0;
++ Identity->chipMinorFeatures2 = 0;
++ Identity->chipMinorFeatures3 = 0;
++ Identity->chipMinorFeatures4 = 0;
++ }
++ }
++
++ /* Get the Supertile layout in the hardware. */
++ if (((((gctUINT32) (Identity->chipMinorFeatures3)) >> (0 ? 26:26) & ((gctUINT32) ((((1 ? 26:26) - (0 ? 26:26) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 26:26) - (0 ? 26:26) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 26:26) - (0 ? 26:26) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 26:26) - (0 ? 26:26) + 1)))))))
++ || ((((gctUINT32) (Identity->chipMinorFeatures3)) >> (0 ? 8:8) & ((gctUINT32) ((((1 ? 8:8) - (0 ? 8:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:8) - (0 ? 8:8) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 8:8) - (0 ? 8:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:8) - (0 ? 8:8) + 1))))))))
++ {
++ Identity->superTileMode = 2;
++ }
++ else if (((((gctUINT32) (Identity->chipMinorFeatures)) >> (0 ? 27:27) & ((gctUINT32) ((((1 ? 27:27) - (0 ? 27:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 27:27) - (0 ? 27:27) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 27:27) - (0 ? 27:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 27:27) - (0 ? 27:27) + 1))))))))
++ {
++ Identity->superTileMode = 1;
++ }
++ else
++ {
++ Identity->superTileMode = 0;
++ }
++
++ /* Exception for GC1000, revision 5035 & GC800, revision 4612 */
++ if (((Identity->chipModel == gcv1000) && ((Identity->chipRevision == 0x5035)
++ || (Identity->chipRevision == 0x5036)
++ || (Identity->chipRevision == 0x5037)))
++ || ((Identity->chipModel == gcv800) && (Identity->chipRevision == 0x4612))
++ || ((Identity->chipModel == gcv860) && (Identity->chipRevision == 0x4647)))
++ {
++ Identity->superTileMode = 1;
++ }
++
++ if (Identity->chipModel == gcv4000 && Identity->chipRevision == 0x5245)
++ {
++ useHZ = ((((gctUINT32) (Identity->chipMinorFeatures3)) >> (0 ? 26:26) & ((gctUINT32) ((((1 ? 26:26) - (0 ? 26:26) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 26:26) - (0 ? 26:26) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 26:26) - (0 ? 26:26) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 26:26) - (0 ? 26:26) + 1)))))))
++ || ((((gctUINT32) (Identity->chipMinorFeatures3)) >> (0 ? 8:8) & ((gctUINT32) ((((1 ? 8:8) - (0 ? 8:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:8) - (0 ? 8:8) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 8:8) - (0 ? 8:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:8) - (0 ? 8:8) + 1)))))));
++ }
++ else
++ {
++ useHZ = gcvFALSE;
++ }
++
++ if (useHZ)
++ {
++ /* Disable EZ. */
++ Identity->chipFeatures
++ = ((((gctUINT32) (Identity->chipFeatures)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 16:16) - (0 ? 16:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 16:16) - (0 ? 16:16) + 1))))))) << (0 ? 16:16))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 16:16) - (0 ? 16:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 16:16) - (0 ? 16:16) + 1))))))) << (0 ? 16:16)));
++ }
++
++ /* Disable HZ when EZ is present for older chips. */
++ else if (!((((gctUINT32) (Identity->chipFeatures)) >> (0 ? 16:16) & ((gctUINT32) ((((1 ? 16:16) - (0 ? 16:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 16:16) - (0 ? 16:16) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 16:16) - (0 ? 16:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 16:16) - (0 ? 16:16) + 1))))))))
++ {
++ /* Disable HIERARCHICAL_Z. */
++ Identity->chipMinorFeatures
++ = ((((gctUINT32) (Identity->chipMinorFeatures)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 27:27) - (0 ? 27:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 27:27) - (0 ? 27:27) + 1))))))) << (0 ? 27:27))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? 27:27) - (0 ? 27:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 27:27) - (0 ? 27:27) + 1))))))) << (0 ? 27:27)));
++ }
++
++ /* Disable rectangle primitive when chip is gc880_5_1_0_rc6*/
++ if ((Identity->chipModel == gcv880) && (Identity->chipRevision == 0x5106))
++ {
++ /* Disable rectangle primitive. */
++ Identity->chipMinorFeatures2
++ = ((((gctUINT32) (Identity->chipMinorFeatures2)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5)));
++ }
++
++ if ((Identity->chipModel == gcv800) && (Identity->chipRevision == 0x4605))
++ {
++ /* Correct feature bit: RTL does not have such feature. */
++ Identity->chipFeatures
++ = ((((gctUINT32) (Identity->chipFeatures)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:31) - (0 ? 31:31) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:31) - (0 ? 31:31) + 1))))))) << (0 ? 31:31))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? 31:31) - (0 ? 31:31) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:31) - (0 ? 31:31) + 1))))))) << (0 ? 31:31)));
++ }
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Identity: chipFeatures=0x%08X",
++ Identity->chipFeatures);
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Identity: chipMinorFeatures=0x%08X",
++ Identity->chipMinorFeatures);
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Identity: chipMinorFeatures1=0x%08X",
++ Identity->chipMinorFeatures1);
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Identity: chipMinorFeatures2=0x%08X",
++ Identity->chipMinorFeatures2);
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Identity: chipMinorFeatures3=0x%08X",
++ Identity->chipMinorFeatures3);
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Identity: chipMinorFeatures4=0x%08X",
++ Identity->chipMinorFeatures4);
++
++ /***************************************************************************
++ ** Get chip specs.
++ */
++
++ if (((((gctUINT32) (Identity->chipMinorFeatures)) >> (0 ? 21:21) & ((gctUINT32) ((((1 ? 21:21) - (0 ? 21:21) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 21:21) - (0 ? 21:21) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 21:21) - (0 ? 21:21) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 21:21) - (0 ? 21:21) + 1))))))))
++ {
++ gctUINT32 specs, specs2, specs3;
++
++ /* Read gcChipSpecs register. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Os, Core,
++ 0x00048,
++ &specs));
++
++ /* Extract the fields. */
++ streamCount = (((((gctUINT32) (specs)) >> (0 ? 3:0)) & ((gctUINT32) ((((1 ? 3:0) - (0 ? 3:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:0) - (0 ? 3:0) + 1)))))) );
++ registerMax = (((((gctUINT32) (specs)) >> (0 ? 7:4)) & ((gctUINT32) ((((1 ? 7:4) - (0 ? 7:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:4) - (0 ? 7:4) + 1)))))) );
++ threadCount = (((((gctUINT32) (specs)) >> (0 ? 11:8)) & ((gctUINT32) ((((1 ? 11:8) - (0 ? 11:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 11:8) - (0 ? 11:8) + 1)))))) );
++ shaderCoreCount = (((((gctUINT32) (specs)) >> (0 ? 24:20)) & ((gctUINT32) ((((1 ? 24:20) - (0 ? 24:20) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 24:20) - (0 ? 24:20) + 1)))))) );
++ vertexCacheSize = (((((gctUINT32) (specs)) >> (0 ? 16:12)) & ((gctUINT32) ((((1 ? 16:12) - (0 ? 16:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 16:12) - (0 ? 16:12) + 1)))))) );
++ vertexOutputBufferSize = (((((gctUINT32) (specs)) >> (0 ? 31:28)) & ((gctUINT32) ((((1 ? 31:28) - (0 ? 31:28) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:28) - (0 ? 31:28) + 1)))))) );
++ pixelPipes = (((((gctUINT32) (specs)) >> (0 ? 27:25)) & ((gctUINT32) ((((1 ? 27:25) - (0 ? 27:25) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 27:25) - (0 ? 27:25) + 1)))))) );
++
++ /* Read gcChipSpecs2 register. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Os, Core,
++ 0x00080,
++ &specs2));
++
++ instructionCount = (((((gctUINT32) (specs2)) >> (0 ? 15:8)) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1)))))) );
++ numConstants = (((((gctUINT32) (specs2)) >> (0 ? 31:16)) & ((gctUINT32) ((((1 ? 31:16) - (0 ? 31:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:16) - (0 ? 31:16) + 1)))))) );
++ bufferSize = (((((gctUINT32) (specs2)) >> (0 ? 7:0)) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1)))))) );
++
++ /* Read gcChipSpecs3 register. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Os, Core,
++ 0x0008C,
++ &specs3));
++
++ varyingsCount = (((((gctUINT32) (specs3)) >> (0 ? 8:4)) & ((gctUINT32) ((((1 ? 8:4) - (0 ? 8:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:4) - (0 ? 8:4) + 1)))))) );
++ }
++
++ /* Get the number of pixel pipes. */
++ Identity->pixelPipes = gcmMAX(pixelPipes, 1);
++
++ /* Get the stream count. */
++ Identity->streamCount = (streamCount != 0)
++ ? streamCount
++ : (Identity->chipModel >= gcv1000) ? 4 : 1;
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Specs: streamCount=%u%s",
++ Identity->streamCount,
++ (streamCount == 0) ? " (default)" : "");
++
++ /* Get the vertex output buffer size. */
++ Identity->vertexOutputBufferSize = (vertexOutputBufferSize != 0)
++ ? 1 << vertexOutputBufferSize
++ : (Identity->chipModel == gcv400)
++ ? (Identity->chipRevision < 0x4000) ? 512
++ : (Identity->chipRevision < 0x4200) ? 256
++ : 128
++ : (Identity->chipModel == gcv530)
++ ? (Identity->chipRevision < 0x4200) ? 512
++ : 128
++ : 512;
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Specs: vertexOutputBufferSize=%u%s",
++ Identity->vertexOutputBufferSize,
++ (vertexOutputBufferSize == 0) ? " (default)" : "");
++
++ /* Get the maximum number of threads. */
++ Identity->threadCount = (threadCount != 0)
++ ? 1 << threadCount
++ : (Identity->chipModel == gcv400) ? 64
++ : (Identity->chipModel == gcv500) ? 128
++ : (Identity->chipModel == gcv530) ? 128
++ : 256;
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Specs: threadCount=%u%s",
++ Identity->threadCount,
++ (threadCount == 0) ? " (default)" : "");
++
++ /* Get the number of shader cores. */
++ Identity->shaderCoreCount = (shaderCoreCount != 0)
++ ? shaderCoreCount
++ : (Identity->chipModel >= gcv1000) ? 2
++ : 1;
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Specs: shaderCoreCount=%u%s",
++ Identity->shaderCoreCount,
++ (shaderCoreCount == 0) ? " (default)" : "");
++
++ /* Get the vertex cache size. */
++ Identity->vertexCacheSize = (vertexCacheSize != 0)
++ ? vertexCacheSize
++ : 8;
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Specs: vertexCacheSize=%u%s",
++ Identity->vertexCacheSize,
++ (vertexCacheSize == 0) ? " (default)" : "");
++
++ /* Get the maximum number of temporary registers. */
++ Identity->registerMax = (registerMax != 0)
++ /* Maximum of registerMax/4 registers are accessible to 1 shader */
++ ? 1 << registerMax
++ : (Identity->chipModel == gcv400) ? 32
++ : 64;
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Specs: registerMax=%u%s",
++ Identity->registerMax,
++ (registerMax == 0) ? " (default)" : "");
++
++ /* Get the instruction count. */
++ Identity->instructionCount = (instructionCount == 0) ? 256
++ : (instructionCount == 1) ? 1024
++ : (instructionCount == 2) ? 2048
++ : (instructionCount == 0xFF) ? 512
++ : 256;
++
++ if (Identity->instructionCount == 256)
++ {
++ if ((Identity->chipModel == gcv2000 && Identity->chipRevision == 0x5108)
++ || Identity->chipModel == gcv880)
++ {
++ Identity->instructionCount = 512;
++ }
++ }
++
++ if (((((gctUINT32) (Identity->chipMinorFeatures3)) >> (0 ? 3:3) & ((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))))
++ {
++ Identity->instructionCount = 512;
++ }
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Specs: instructionCount=%u%s",
++ Identity->instructionCount,
++ (instructionCount == 0) ? " (default)" : "");
++
++ /* Get the number of constants. */
++ Identity->numConstants = (numConstants == 0) ? 168 : numConstants;
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Specs: numConstants=%u%s",
++ Identity->numConstants,
++ (numConstants == 0) ? " (default)" : "");
++
++ /* Get the buffer size. */
++ Identity->bufferSize = bufferSize;
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Specs: bufferSize=%u%s",
++ Identity->bufferSize,
++ (bufferSize == 0) ? " (default)" : "");
++
++
++ if (varyingsCount != 0)
++ {
++ /* Bug 4480. */
++ /*Identity->varyingsCount = varyingsCount;*/
++ Identity->varyingsCount = 12;
++ }
++ else if (((((gctUINT32) (Identity->chipMinorFeatures1)) >> (0 ? 23:23) & ((gctUINT32) ((((1 ? 23:23) - (0 ? 23:23) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:23) - (0 ? 23:23) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 23:23) - (0 ? 23:23) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:23) - (0 ? 23:23) + 1))))))))
++ {
++ Identity->varyingsCount = 12;
++ }
++ else
++ {
++ Identity->varyingsCount = 8;
++ }
++
++ /* For some cores, it consumes two varying for position, so the max varying vectors should minus one. */
++ if ((Identity->chipModel == gcv4000 && Identity->chipRevision == 0x5222) ||
++ (Identity->chipModel == gcv4000 && Identity->chipRevision == 0x5208) ||
++ ((Identity->chipModel == gcv2100 || Identity->chipModel == gcv2000) && Identity->chipRevision == 0x5108) ||
++ (Identity->chipModel == gcv880 && (Identity->chipRevision == 0x5107 || Identity->chipRevision == 0x5106)))
++ {
++ Identity->varyingsCount -= 1;
++ }
++
++ Identity->chip2DControl = 0;
++ if (Identity->chipModel == gcv320)
++ {
++ gctUINT32 data;
++
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Os,
++ Core,
++ 0x0002C,
++ &data));
++
++ if ((data != 33956864) &&
++ ((Identity->chipRevision == 0x5007) ||
++ (Identity->chipRevision == 0x5220)))
++ {
++ Identity->chip2DControl |= 0xFF &
++ (Identity->chipRevision == 0x5220 ? 8 :
++ (Identity->chipRevision == 0x5007 ? 12 : 0));
++ }
++
++ if (Identity->chipRevision == 0x5007)
++ {
++ /* Disable splitting rectangle. */
++ Identity->chip2DControl |= 0x100;
++
++ /* Enable 2D Flush. */
++ Identity->chip2DControl |= 0x200;
++ }
++ }
++
++ /* Success. */
++ gcmkFOOTER();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++#if gcdPOWEROFF_TIMEOUT
++void
++_PowerTimerFunction(
++ gctPOINTER Data
++ )
++{
++ gckHARDWARE hardware = (gckHARDWARE)Data;
++ gcmkVERIFY_OK(
++ gckHARDWARE_SetPowerManagementState(hardware, gcvPOWER_OFF_TIMEOUT));
++}
++#endif
++
++static gceSTATUS
++_VerifyDMA(
++ IN gckOS Os,
++ IN gceCORE Core,
++ gctUINT32_PTR Address1,
++ gctUINT32_PTR Address2,
++ gctUINT32_PTR State1,
++ gctUINT32_PTR State2
++ )
++{
++ gceSTATUS status;
++ gctUINT32 i;
++
++ gcmkONERROR(gckOS_ReadRegisterEx(Os, Core, 0x660, State1));
++ gcmkONERROR(gckOS_ReadRegisterEx(Os, Core, 0x664, Address1));
++
++ for (i = 0; i < 500; i += 1)
++ {
++ gcmkONERROR(gckOS_ReadRegisterEx(Os, Core, 0x660, State2));
++ gcmkONERROR(gckOS_ReadRegisterEx(Os, Core, 0x664, Address2));
++
++ if (*Address1 != *Address2)
++ {
++ break;
++ }
++
++ if (*State1 != *State2)
++ {
++ break;
++ }
++ }
++
++OnError:
++ return status;
++}
++
++static gceSTATUS
++_DumpDebugRegisters(
++ IN gckOS Os,
++ IN gceCORE Core,
++ IN gcsiDEBUG_REGISTERS_PTR Descriptor
++ )
++{
++ gceSTATUS status = gcvSTATUS_OK;
++ gctUINT32 select;
++ gctUINT32 data = 0;
++ gctUINT i;
++
++ gcmkHEADER_ARG("Os=0x%X Descriptor=0x%X", Os, Descriptor);
++
++ gcmkPRINT_N(4, " %s debug registers:\n", Descriptor->module);
++
++ for (i = 0; i < Descriptor->count; i += 1)
++ {
++ select = i << Descriptor->shift;
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Os, Core, Descriptor->index, select));
++#if gcdFPGA_BUILD
++ gcmkONERROR(gckOS_Delay(Os, 1000));
++#endif
++ gcmkONERROR(gckOS_ReadRegisterEx(Os, Core, Descriptor->data, &data));
++
++ gcmkPRINT_N(12, " [0x%02X] 0x%08X\n", i, data);
++ }
++
++ select = 0xF << Descriptor->shift;
++
++ for (i = 0; i < 500; i += 1)
++ {
++ gcmkONERROR(gckOS_WriteRegisterEx(Os, Core, Descriptor->index, select));
++#if gcdFPGA_BUILD
++ gcmkONERROR(gckOS_Delay(Os, 1000));
++#endif
++ gcmkONERROR(gckOS_ReadRegisterEx(Os, Core, Descriptor->data, &data));
++
++ if (data == Descriptor->signature)
++ {
++ break;
++ }
++ }
++
++ if (i == 500)
++ {
++ gcmkPRINT_N(4, " failed to obtain the signature (read 0x%08X).\n", data);
++ }
++ else
++ {
++ gcmkPRINT_N(8, " signature = 0x%08X (%d read attempt(s))\n", data, i + 1);
++ }
++
++OnError:
++ /* Return the error. */
++ gcmkFOOTER();
++ return status;
++}
++
++static gceSTATUS
++_IsGPUPresent(
++ IN gckHARDWARE Hardware
++ )
++{
++ gceSTATUS status;
++ gcsHAL_QUERY_CHIP_IDENTITY identity;
++ gctUINT32 control;
++
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00000,
++ &control));
++
++ control = ((((gctUINT32) (control)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1)));
++ control = ((((gctUINT32) (control)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)));
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00000,
++ control));
++
++ /* Identify the hardware. */
++ gcmkONERROR(_IdentifyHardware(Hardware->os,
++ Hardware->core,
++ &identity));
++
++ /* Check if these are the same values as saved before. */
++ if ((Hardware->identity.chipModel != identity.chipModel)
++ || (Hardware->identity.chipRevision != identity.chipRevision)
++ || (Hardware->identity.chipFeatures != identity.chipFeatures)
++ || (Hardware->identity.chipMinorFeatures != identity.chipMinorFeatures)
++ || (Hardware->identity.chipMinorFeatures1 != identity.chipMinorFeatures1)
++ || (Hardware->identity.chipMinorFeatures2 != identity.chipMinorFeatures2)
++ )
++ {
++ gcmkPRINT("[galcore]: GPU is not present.");
++ gcmkONERROR(gcvSTATUS_GPU_NOT_RESPONDING);
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the error. */
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++_FlushCache(
++ gckHARDWARE Hardware,
++ gckCOMMAND Command
++ )
++{
++ gceSTATUS status;
++ gctSIZE_T bytes, requested;
++ gctPOINTER buffer;
++
++ /* Get the size of the flush command. */
++ gcmkONERROR(gckHARDWARE_Flush(Hardware,
++ gcvFLUSH_ALL,
++ gcvNULL,
++ &requested));
++
++ /* Reserve space in the command queue. */
++ gcmkONERROR(gckCOMMAND_Reserve(Command,
++ requested,
++ &buffer,
++ &bytes));
++
++ /* Append a flush. */
++ gcmkONERROR(gckHARDWARE_Flush(
++ Hardware, gcvFLUSH_ALL, buffer, &bytes
++ ));
++
++ /* Execute the command queue. */
++ gcmkONERROR(gckCOMMAND_Execute(Command, requested));
++
++ return gcvSTATUS_OK;
++
++OnError:
++ return status;
++}
++
++/******************************************************************************\
++****************************** gckHARDWARE API code *****************************
++\******************************************************************************/
++
++/*******************************************************************************
++**
++** gckHARDWARE_Construct
++**
++** Construct a new gckHARDWARE object.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an initialized gckOS object.
++**
++** gceCORE Core
++** Specified core.
++**
++** OUTPUT:
++**
++** gckHARDWARE * Hardware
++** Pointer to a variable that will hold the pointer to the gckHARDWARE
++** object.
++*/
++gceSTATUS
++gckHARDWARE_Construct(
++ IN gckOS Os,
++ IN gceCORE Core,
++ OUT gckHARDWARE * Hardware
++ )
++{
++ gceSTATUS status;
++ gckHARDWARE hardware = gcvNULL;
++ gctUINT16 data = 0xff00;
++ gctUINT32 axi_ot;
++ gctPOINTER pointer = gcvNULL;
++
++ gcmkHEADER_ARG("Os=0x%x", Os);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Hardware != gcvNULL);
++
++ /* Enable the GPU. */
++ gcmkONERROR(gckOS_SetGPUPower(Os, Core, gcvTRUE, gcvTRUE));
++ gcmkONERROR(gckOS_WriteRegisterEx(Os,
++ Core,
++ 0x00000,
++ 0x00000900));
++
++ /* Allocate the gckHARDWARE object. */
++ gcmkONERROR(gckOS_Allocate(Os,
++ gcmSIZEOF(struct _gckHARDWARE),
++ &pointer));
++
++ hardware = (gckHARDWARE) pointer;
++
++ /* Initialize the gckHARDWARE object. */
++ hardware->object.type = gcvOBJ_HARDWARE;
++ hardware->os = Os;
++ hardware->core = Core;
++
++ /* Identify the hardware. */
++ gcmkONERROR(_IdentifyHardware(Os, Core, &hardware->identity));
++
++ /* Determine the hardware type */
++ switch (hardware->identity.chipModel)
++ {
++ case gcv350:
++ case gcv355:
++ hardware->type = gcvHARDWARE_VG;
++ break;
++
++ case gcv300:
++ case gcv320:
++ case gcv420:
++ hardware->type = gcvHARDWARE_2D;
++ /*set outstanding limit*/
++ gcmkONERROR(gckOS_ReadRegisterEx(Os, Core, 0x00414, &axi_ot));
++ axi_ot = (axi_ot & (~0xFF)) | 0x10;
++ gcmkONERROR(gckOS_WriteRegisterEx(Os, Core, 0x00414, axi_ot));
++ break;
++
++ default:
++ hardware->type = gcvHARDWARE_3D;
++ if(hardware->identity.chipModel == gcv880)
++ {
++ /*set outstanding limit*/
++ gcmkONERROR(gckOS_ReadRegisterEx(Os, Core, 0x00414, &axi_ot));
++ axi_ot = (axi_ot & (~0xFF)) | 0x10;
++ gcmkONERROR(gckOS_WriteRegisterEx(Os, Core, 0x00414, axi_ot));
++ }
++
++ if ((((((gctUINT32) (hardware->identity.chipFeatures)) >> (0 ? 9:9)) & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1)))))) ))
++ {
++ hardware->type = (gceHARDWARE_TYPE) (hardware->type | gcvHARDWARE_2D);
++ }
++ }
++
++ hardware->powerBaseAddress
++ = ((hardware->identity.chipModel == gcv300)
++ && (hardware->identity.chipRevision < 0x2000))
++ ? 0x0100
++ : 0x0000;
++
++ /* _ResetGPU need powerBaseAddress. */
++ status = _ResetGPU(hardware, Os, Core);
++
++ if (status != gcvSTATUS_OK)
++ {
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "_ResetGPU failed: status=%d\n", status);
++ }
++
++ hardware->powerMutex = gcvNULL;
++
++ hardware->mmuVersion
++ = (((((gctUINT32) (hardware->identity.chipMinorFeatures1)) >> (0 ? 28:28)) & ((gctUINT32) ((((1 ? 28:28) - (0 ? 28:28) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 28:28) - (0 ? 28:28) + 1)))))) );
++
++ /* Determine whether bug fixes #1 are present. */
++ hardware->extraEventStates = ((((gctUINT32) (hardware->identity.chipMinorFeatures1)) >> (0 ? 3:3) & ((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1)))))) == (0x0 & ((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1)))))));
++
++ /* Check if big endian */
++ hardware->bigEndian = (*(gctUINT8 *)&data == 0xff);
++
++ /* Initialize the fast clear. */
++ gcmkONERROR(gckHARDWARE_SetFastClear(hardware, -1, -1));
++
++#if !gcdENABLE_128B_MERGE
++
++ if (((((gctUINT32) (hardware->identity.chipMinorFeatures2)) >> (0 ? 21:21) & ((gctUINT32) ((((1 ? 21:21) - (0 ? 21:21) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 21:21) - (0 ? 21:21) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 21:21) - (0 ? 21:21) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 21:21) - (0 ? 21:21) + 1))))))))
++ {
++ /* 128B merge is turned on by default. Disable it. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Os, Core, 0x00558, 0));
++ }
++
++#endif
++
++ /* Set power state to ON. */
++ hardware->chipPowerState = gcvPOWER_ON;
++ hardware->clockState = gcvTRUE;
++ hardware->powerState = gcvTRUE;
++ hardware->lastWaitLink = ~0U;
++ hardware->globalSemaphore = gcvNULL;
++#if gcdENABLE_FSCALE_VAL_ADJUST
++ hardware->powerOnFscaleVal = 64;
++#endif
++
++ gcmkONERROR(gckOS_CreateMutex(Os, &hardware->powerMutex));
++ gcmkONERROR(gckOS_CreateSemaphore(Os, &hardware->globalSemaphore));
++ hardware->startIsr = gcvNULL;
++ hardware->stopIsr = gcvNULL;
++
++#if gcdPOWEROFF_TIMEOUT
++ hardware->powerOffTimeout = gcdPOWEROFF_TIMEOUT;
++
++ gcmkVERIFY_OK(gckOS_CreateTimer(Os,
++ _PowerTimerFunction,
++ (gctPOINTER)hardware,
++ &hardware->powerOffTimer));
++#endif
++
++ gcmkONERROR(gckOS_AtomConstruct(Os, &hardware->pageTableDirty));
++
++#if gcdLINK_QUEUE_SIZE
++ hardware->linkQueue.front = 0;
++ hardware->linkQueue.rear = 0;
++ hardware->linkQueue.count = 0;
++#endif
++
++ /* Enable power management by default. */
++ hardware->powerManagement = gcvTRUE;
++
++ /* Disable profiler by default */
++ hardware->gpuProfiler = gcvFALSE;
++
++ /* Return pointer to the gckHARDWARE object. */
++ *Hardware = hardware;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Hardware=0x%x", *Hardware);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Roll back. */
++ if (hardware != gcvNULL)
++ {
++ /* Turn off the power. */
++ gcmkVERIFY_OK(gckOS_SetGPUPower(Os, Core, gcvFALSE, gcvFALSE));
++
++ if (hardware->globalSemaphore != gcvNULL)
++ {
++ /* Destroy the global semaphore. */
++ gcmkVERIFY_OK(gckOS_DestroySemaphore(Os,
++ hardware->globalSemaphore));
++ }
++
++ if (hardware->powerMutex != gcvNULL)
++ {
++ /* Destroy the power mutex. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Os, hardware->powerMutex));
++ }
++
++#if gcdPOWEROFF_TIMEOUT
++ if (hardware->powerOffTimer != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_StopTimer(Os, hardware->powerOffTimer));
++ gcmkVERIFY_OK(gckOS_DestroyTimer(Os, hardware->powerOffTimer));
++ }
++#endif
++
++ if (hardware->pageTableDirty != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_AtomDestroy(Os, hardware->pageTableDirty));
++ }
++
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Os, hardware));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_Destroy
++**
++** Destroy an gckHARDWARE object.
++**
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to the gckHARDWARE object that needs to be destroyed.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckHARDWARE_Destroy(
++ IN gckHARDWARE Hardware
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ /* Destroy the power semaphore. */
++ gcmkVERIFY_OK(gckOS_DestroySemaphore(Hardware->os,
++ Hardware->globalSemaphore));
++
++ /* Destroy the power mutex. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Hardware->os, Hardware->powerMutex));
++
++#if gcdPOWEROFF_TIMEOUT
++ gcmkVERIFY_OK(gckOS_StopTimer(Hardware->os, Hardware->powerOffTimer));
++ gcmkVERIFY_OK(gckOS_DestroyTimer(Hardware->os, Hardware->powerOffTimer));
++#endif
++
++ gcmkVERIFY_OK(gckOS_AtomDestroy(Hardware->os, Hardware->pageTableDirty));
++
++ /* Mark the object as unknown. */
++ Hardware->object.type = gcvOBJ_UNKNOWN;
++
++ /* Free the object. */
++ gcmkONERROR(gcmkOS_SAFE_FREE(Hardware->os, Hardware));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_GetType
++**
++** Get the hardware type.
++**
++** INPUT:
++**
++** gckHARDWARE Harwdare
++** Pointer to an gckHARDWARE object.
++**
++** OUTPUT:
++**
++** gceHARDWARE_TYPE * Type
++** Pointer to a variable that receives the type of hardware object.
++*/
++gceSTATUS
++gckHARDWARE_GetType(
++ IN gckHARDWARE Hardware,
++ OUT gceHARDWARE_TYPE * Type
++ )
++{
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++ gcmkVERIFY_ARGUMENT(Type != gcvNULL);
++
++ *Type = Hardware->type;
++
++ gcmkFOOTER_ARG("*Type=%d", *Type);
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_InitializeHardware
++**
++** Initialize the hardware.
++**
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to the gckHARDWARE object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckHARDWARE_InitializeHardware(
++ IN gckHARDWARE Hardware
++ )
++{
++ gceSTATUS status;
++ gctUINT32 baseAddress;
++ gctUINT32 chipRev;
++ gctUINT32 control;
++
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ /* Read the chip revision register. */
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00024,
++ &chipRev));
++
++ if (chipRev != Hardware->identity.chipRevision)
++ {
++ /* Chip is not there! */
++ gcmkONERROR(gcvSTATUS_CONTEXT_LOSSED);
++ }
++
++ /* Disable isolate GPU bit. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00000,
++ ((((gctUINT32) (0x00000900)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 19:19) - (0 ? 19:19) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 19:19) - (0 ? 19:19) + 1))))))) << (0 ? 19:19))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 19:19) - (0 ? 19:19) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 19:19) - (0 ? 19:19) + 1))))))) << (0 ? 19:19)))));
++
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00000,
++ &control));
++
++ /* Enable debug register. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00000,
++ ((((gctUINT32) (control)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 11:11) - (0 ? 11:11) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 11:11) - (0 ? 11:11) + 1))))))) << (0 ? 11:11))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 11:11) - (0 ? 11:11) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 11:11) - (0 ? 11:11) + 1))))))) << (0 ? 11:11)))));
++
++ /* Reset memory counters. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x0003C,
++ ~0U));
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x0003C,
++ 0));
++
++ /* Get the system's physical base address. */
++ gcmkONERROR(gckOS_GetBaseAddress(Hardware->os, &baseAddress));
++
++ /* Program the base addesses. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x0041C,
++ baseAddress));
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00418,
++ baseAddress));
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00428,
++ baseAddress));
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00420,
++ baseAddress));
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00424,
++ baseAddress));
++
++#if !VIVANTE_PROFILER
++ {
++ gctUINT32 data;
++
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ Hardware->powerBaseAddress +
++ 0x00100,
++ &data));
++
++ /* Enable clock gating. */
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)));
++
++ if ((Hardware->identity.chipRevision == 0x4301)
++ || (Hardware->identity.chipRevision == 0x4302)
++ )
++ {
++ /* Disable stall module level clock gating for 4.3.0.1 and 4.3.0.2
++ ** revisions. */
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1)));
++ }
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ Hardware->powerBaseAddress
++ + 0x00100,
++ data));
++
++#ifndef VIVANTE_NO_3D
++ /* Disable PE clock gating on revs < 5.0 when HZ is present without a
++ ** bug fix. */
++ if ((Hardware->identity.chipRevision < 0x5000)
++ && ((((gctUINT32) (Hardware->identity.chipMinorFeatures1)) >> (0 ? 9:9) & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1)))))) == (0x0 & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1)))))))
++ && ((((gctUINT32) (Hardware->identity.chipMinorFeatures)) >> (0 ? 27:27) & ((gctUINT32) ((((1 ? 27:27) - (0 ? 27:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 27:27) - (0 ? 27:27) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 27:27) - (0 ? 27:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 27:27) - (0 ? 27:27) + 1)))))))
++ )
++ {
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ Hardware->powerBaseAddress
++ + 0x00104,
++ &data));
++
++ /* Disable PE clock gating. */
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1))))))) << (0 ? 2:2))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1))))))) << (0 ? 2:2)));
++
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ Hardware->powerBaseAddress
++ + 0x00104,
++ data));
++ }
++
++#endif
++ }
++#endif
++
++ /* Special workaround for this core
++ ** Make sure pulse eater kicks in only when SH is idle */
++ if (Hardware->identity.chipModel == gcv4000 &&
++ Hardware->identity.chipRevision == 0x5208)
++ {
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x0010C,
++ ((((gctUINT32) (0x01590880)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:23) - (0 ? 23:23) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:23) - (0 ? 23:23) + 1))))))) << (0 ? 23:23))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 23:23) - (0 ? 23:23) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:23) - (0 ? 23:23) + 1))))))) << (0 ? 23:23)))));
++ }
++
++ if ((gckHARDWARE_IsFeatureAvailable(Hardware, gcvFEATURE_HALTI2) == gcvFALSE)
++ || (gckHARDWARE_IsFeatureAvailable(Hardware, gcvFEATURE_HALTI2) && (Hardware->identity.chipRevision < 0x5422))
++ )
++ {
++ gctUINT32 data;
++
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ Hardware->powerBaseAddress
++ + 0x00104,
++ &data));
++
++
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:15) - (0 ? 15:15) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:15) - (0 ? 15:15) + 1))))))) << (0 ? 15:15))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 15:15) - (0 ? 15:15) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:15) - (0 ? 15:15) + 1))))))) << (0 ? 15:15)));
++
++
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ Hardware->powerBaseAddress
++ + 0x00104,
++ data));
++ }
++
++ /* Special workaround for this core
++ ** Make sure FE and TX are on different buses */
++ if ((Hardware->identity.chipModel == gcv2000)
++ && (Hardware->identity.chipRevision == 0x5108))
++ {
++ gctUINT32 data;
++
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00480,
++ &data));
++
++ /* Set FE bus to one, TX bus to zero */
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3)));
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:7) - (0 ? 7:7) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:7) - (0 ? 7:7) + 1))))))) << (0 ? 7:7))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 7:7) - (0 ? 7:7) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:7) - (0 ? 7:7) + 1))))))) << (0 ? 7:7)));
++
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00480,
++ data));
++ }
++
++ /* Test if MMU is initialized. */
++ if ((Hardware->kernel != gcvNULL)
++ && (Hardware->kernel->mmu != gcvNULL)
++ )
++ {
++ /* Reset MMU. */
++ if (Hardware->mmuVersion == 0)
++ {
++ gcmkONERROR(
++ gckHARDWARE_SetMMU(Hardware,
++ Hardware->kernel->mmu->pageTableLogical));
++ }
++ }
++
++ if (Hardware->identity.chipModel >= gcv400
++ && Hardware->identity.chipModel != gcv420
++ && (((((gctUINT32) (Hardware->identity.chipMinorFeatures3)) >> (0 ? 15:15) & ((gctUINT32) ((((1 ? 15:15) - (0 ? 15:15) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:15) - (0 ? 15:15) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 15:15) - (0 ? 15:15) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:15) - (0 ? 15:15) + 1))))))) != gcvTRUE)
++ )
++ {
++ gctUINT32 data;
++
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ Hardware->powerBaseAddress
++ + 0x00104,
++ &data));
++
++ /* Disable PA clock gating. */
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4)));
++
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ Hardware->powerBaseAddress
++ + 0x00104,
++ data));
++ }
++
++#if gcdHZ_L2_DISALBE
++ /* Disable HZ-L2. */
++ if (((((gctUINT32) (Hardware->identity.chipMinorFeatures3)) >> (0 ? 26:26) & ((gctUINT32) ((((1 ? 26:26) - (0 ? 26:26) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 26:26) - (0 ? 26:26) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 26:26) - (0 ? 26:26) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 26:26) - (0 ? 26:26) + 1))))))) == gcvTRUE ||
++ ((((gctUINT32) (Hardware->identity.chipMinorFeatures3)) >> (0 ? 8:8) & ((gctUINT32) ((((1 ? 8:8) - (0 ? 8:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:8) - (0 ? 8:8) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 8:8) - (0 ? 8:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:8) - (0 ? 8:8) + 1))))))) == gcvTRUE)
++ {
++ gctUINT32 data;
++
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00414,
++ &data));
++
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:12) - (0 ? 12:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:12) - (0 ? 12:12) + 1))))))) << (0 ? 12:12))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 12:12) - (0 ? 12:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:12) - (0 ? 12:12) + 1))))))) << (0 ? 12:12)));
++
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00414,
++ data));
++ }
++#endif
++
++ /* Limit 2D outstanding request. */
++ if(Hardware->identity.chipModel == gcv880)
++ {
++ gctUINT32 axi_ot;
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00414, &axi_ot));
++ axi_ot = (axi_ot & (~0xFF)) | 0x10;
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00414, axi_ot));
++ }
++
++ if (Hardware->identity.chip2DControl & 0xFF)
++ {
++ gctUINT32 data;
++
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00414,
++ &data));
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (Hardware->identity.chip2DControl & 0xFF) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0)));
++
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00414,
++ data));
++ }
++
++ /* Update GPU AXI cache atttribute. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00008,
++ 0x00002200));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the error. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_QueryMemory
++**
++** Query the amount of memory available on the hardware.
++**
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to the gckHARDWARE object.
++**
++** OUTPUT:
++**
++** gctSIZE_T * InternalSize
++** Pointer to a variable that will hold the size of the internal video
++** memory in bytes. If 'InternalSize' is gcvNULL, no information of the
++** internal memory will be returned.
++**
++** gctUINT32 * InternalBaseAddress
++** Pointer to a variable that will hold the hardware's base address for
++** the internal video memory. This pointer cannot be gcvNULL if
++** 'InternalSize' is also non-gcvNULL.
++**
++** gctUINT32 * InternalAlignment
++** Pointer to a variable that will hold the hardware's base address for
++** the internal video memory. This pointer cannot be gcvNULL if
++** 'InternalSize' is also non-gcvNULL.
++**
++** gctSIZE_T * ExternalSize
++** Pointer to a variable that will hold the size of the external video
++** memory in bytes. If 'ExternalSize' is gcvNULL, no information of the
++** external memory will be returned.
++**
++** gctUINT32 * ExternalBaseAddress
++** Pointer to a variable that will hold the hardware's base address for
++** the external video memory. This pointer cannot be gcvNULL if
++** 'ExternalSize' is also non-gcvNULL.
++**
++** gctUINT32 * ExternalAlignment
++** Pointer to a variable that will hold the hardware's base address for
++** the external video memory. This pointer cannot be gcvNULL if
++** 'ExternalSize' is also non-gcvNULL.
++**
++** gctUINT32 * HorizontalTileSize
++** Number of horizontal pixels per tile. If 'HorizontalTileSize' is
++** gcvNULL, no horizontal pixel per tile will be returned.
++**
++** gctUINT32 * VerticalTileSize
++** Number of vertical pixels per tile. If 'VerticalTileSize' is
++** gcvNULL, no vertical pixel per tile will be returned.
++*/
++gceSTATUS
++gckHARDWARE_QueryMemory(
++ IN gckHARDWARE Hardware,
++ OUT gctSIZE_T * InternalSize,
++ OUT gctUINT32 * InternalBaseAddress,
++ OUT gctUINT32 * InternalAlignment,
++ OUT gctSIZE_T * ExternalSize,
++ OUT gctUINT32 * ExternalBaseAddress,
++ OUT gctUINT32 * ExternalAlignment,
++ OUT gctUINT32 * HorizontalTileSize,
++ OUT gctUINT32 * VerticalTileSize
++ )
++{
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ if (InternalSize != gcvNULL)
++ {
++ /* No internal memory. */
++ *InternalSize = 0;
++ }
++
++ if (ExternalSize != gcvNULL)
++ {
++ /* No external memory. */
++ *ExternalSize = 0;
++ }
++
++ if (HorizontalTileSize != gcvNULL)
++ {
++ /* 4x4 tiles. */
++ *HorizontalTileSize = 4;
++ }
++
++ if (VerticalTileSize != gcvNULL)
++ {
++ /* 4x4 tiles. */
++ *VerticalTileSize = 4;
++ }
++
++ /* Success. */
++ gcmkFOOTER_ARG("*InternalSize=%lu *InternalBaseAddress=0x%08x "
++ "*InternalAlignment=0x%08x *ExternalSize=%lu "
++ "*ExternalBaseAddress=0x%08x *ExtenalAlignment=0x%08x "
++ "*HorizontalTileSize=%u *VerticalTileSize=%u",
++ gcmOPT_VALUE(InternalSize),
++ gcmOPT_VALUE(InternalBaseAddress),
++ gcmOPT_VALUE(InternalAlignment),
++ gcmOPT_VALUE(ExternalSize),
++ gcmOPT_VALUE(ExternalBaseAddress),
++ gcmOPT_VALUE(ExternalAlignment),
++ gcmOPT_VALUE(HorizontalTileSize),
++ gcmOPT_VALUE(VerticalTileSize));
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_QueryChipIdentity
++**
++** Query the identity of the hardware.
++**
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to the gckHARDWARE object.
++**
++** OUTPUT:
++**
++** gcsHAL_QUERY_CHIP_IDENTITY_PTR Identity
++** Pointer to the identity structure.
++**
++*/
++gceSTATUS
++gckHARDWARE_QueryChipIdentity(
++ IN gckHARDWARE Hardware,
++ OUT gcsHAL_QUERY_CHIP_IDENTITY_PTR Identity
++ )
++{
++ gctUINT32 features;
++
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(Identity != gcvNULL);
++
++ /* Return chip model and revision. */
++ Identity->chipModel = Hardware->identity.chipModel;
++ Identity->chipRevision = Hardware->identity.chipRevision;
++
++ /* Return feature set. */
++ features = Hardware->identity.chipFeatures;
++
++ if ((((((gctUINT32) (features)) >> (0 ? 0:0)) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1)))))) ))
++ {
++ /* Override fast clear by command line. */
++ features = ((((gctUINT32) (features)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) ((gctUINT32) (Hardware->allowFastClear) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)));
++ }
++
++ if ((((((gctUINT32) (features)) >> (0 ? 5:5)) & ((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1)))))) ))
++ {
++ /* Override compression by command line. */
++ features = ((((gctUINT32) (features)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5))) | (((gctUINT32) ((gctUINT32) (Hardware->allowCompression) & ((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5)));
++ }
++
++ /* Mark 2D pipe as available for GC500.0 through GC500.2 and GC300,
++ ** since they did not have this bit. */
++ if (((Hardware->identity.chipModel == gcv500) && (Hardware->identity.chipRevision <= 2))
++ || (Hardware->identity.chipModel == gcv300)
++ )
++ {
++ features = ((((gctUINT32) (features)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9)));
++ }
++
++ Identity->chipFeatures = features;
++
++ /* Return minor features. */
++ Identity->chipMinorFeatures = Hardware->identity.chipMinorFeatures;
++ Identity->chipMinorFeatures1 = Hardware->identity.chipMinorFeatures1;
++ Identity->chipMinorFeatures2 = Hardware->identity.chipMinorFeatures2;
++ Identity->chipMinorFeatures3 = Hardware->identity.chipMinorFeatures3;
++ Identity->chipMinorFeatures4 = Hardware->identity.chipMinorFeatures4;
++
++ /* Return chip specs. */
++ Identity->streamCount = Hardware->identity.streamCount;
++ Identity->registerMax = Hardware->identity.registerMax;
++ Identity->threadCount = Hardware->identity.threadCount;
++ Identity->shaderCoreCount = Hardware->identity.shaderCoreCount;
++ Identity->vertexCacheSize = Hardware->identity.vertexCacheSize;
++ Identity->vertexOutputBufferSize = Hardware->identity.vertexOutputBufferSize;
++ Identity->pixelPipes = Hardware->identity.pixelPipes;
++ Identity->instructionCount = Hardware->identity.instructionCount;
++ Identity->numConstants = Hardware->identity.numConstants;
++ Identity->bufferSize = Hardware->identity.bufferSize;
++ Identity->varyingsCount = Hardware->identity.varyingsCount;
++ Identity->superTileMode = Hardware->identity.superTileMode;
++ Identity->chip2DControl = Hardware->identity.chip2DControl;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_SplitMemory
++**
++** Split a hardware specific memory address into a pool and offset.
++**
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to the gckHARDWARE object.
++**
++** gctUINT32 Address
++** Address in hardware specific format.
++**
++** OUTPUT:
++**
++** gcePOOL * Pool
++** Pointer to a variable that will hold the pool type for the address.
++**
++** gctUINT32 * Offset
++** Pointer to a variable that will hold the offset for the address.
++*/
++gceSTATUS
++gckHARDWARE_SplitMemory(
++ IN gckHARDWARE Hardware,
++ IN gctUINT32 Address,
++ OUT gcePOOL * Pool,
++ OUT gctUINT32 * Offset
++ )
++{
++ gcmkHEADER_ARG("Hardware=0x%x Addres=0x%08x", Hardware, Address);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(Pool != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Offset != gcvNULL);
++
++ if (Hardware->mmuVersion == 0)
++ {
++ /* Dispatch on memory type. */
++ switch ((((((gctUINT32) (Address)) >> (0 ? 31:31)) & ((gctUINT32) ((((1 ? 31:31) - (0 ? 31:31) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:31) - (0 ? 31:31) + 1)))))) ))
++ {
++ case 0x0:
++ /* System memory. */
++ *Pool = gcvPOOL_SYSTEM;
++ break;
++
++ case 0x1:
++ /* Virtual memory. */
++ *Pool = gcvPOOL_VIRTUAL;
++ break;
++
++ default:
++ /* Invalid memory type. */
++ gcmkFOOTER_ARG("status=%d", gcvSTATUS_INVALID_ARGUMENT);
++ return gcvSTATUS_INVALID_ARGUMENT;
++ }
++
++ /* Return offset of address. */
++ *Offset = (((((gctUINT32) (Address)) >> (0 ? 30:0)) & ((gctUINT32) ((((1 ? 30:0) - (0 ? 30:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 30:0) - (0 ? 30:0) + 1)))))) );
++ }
++ else
++ {
++ *Pool = gcvPOOL_SYSTEM;
++ *Offset = Address;
++ }
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Pool=%d *Offset=0x%08x", *Pool, *Offset);
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_Execute
++**
++** Kickstart the hardware's command processor with an initialized command
++** buffer.
++**
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to the gckHARDWARE object.
++**
++** gctPOINTER Logical
++** Logical address of command buffer.
++**
++** gctSIZE_T Bytes
++** Number of bytes for the prefetch unit (until after the first LINK).
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckHARDWARE_Execute(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical,
++#ifdef __QNXNTO__
++ IN gctPOINTER Physical,
++ IN gctBOOL PhysicalAddresses,
++#endif
++ IN gctSIZE_T Bytes
++ )
++{
++ gceSTATUS status;
++ gctUINT32 address = 0, control;
++
++ gcmkHEADER_ARG("Hardware=0x%x Logical=0x%x Bytes=%lu",
++ Hardware, Logical, Bytes);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++
++#ifdef __QNXNTO__
++ if (PhysicalAddresses && (Hardware->mmuVersion == 0))
++ {
++ /* Convert physical into hardware specific address. */
++ gcmkONERROR(
++ gckHARDWARE_ConvertPhysical(Hardware, Physical, &address));
++ }
++ else
++ {
++#endif
++ /* Convert logical into hardware specific address. */
++ gcmkONERROR(
++ gckHARDWARE_ConvertLogical(Hardware, Logical, &address));
++#ifdef __QNXNTO__
++ }
++#endif
++
++ /* Enable all events. */
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00014, ~0U));
++
++ /* Write address register. */
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00654, address));
++
++ /* Build control register. */
++ control = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 16:16) - (0 ? 16:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 16:16) - (0 ? 16:16) + 1))))))) << (0 ? 16:16))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 16:16) - (0 ? 16:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 16:16) - (0 ? 16:16) + 1))))))) << (0 ? 16:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) ((Bytes + 7) >> 3) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ /* Set big endian */
++ if (Hardware->bigEndian)
++ {
++ control |= ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 21:20) - (0 ? 21:20) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 21:20) - (0 ? 21:20) + 1))))))) << (0 ? 21:20))) | (((gctUINT32) (0x2 & ((gctUINT32) ((((1 ? 21:20) - (0 ? 21:20) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 21:20) - (0 ? 21:20) + 1))))))) << (0 ? 21:20)));
++ }
++
++ /* Write control register. */
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00658, control));
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Started command buffer @ 0x%08x",
++ address);
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_WaitLink
++**
++** Append a WAIT/LINK command sequence at the specified location in the command
++** queue.
++**
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to an gckHARDWARE object.
++**
++** gctPOINTER Logical
++** Pointer to the current location inside the command queue to append
++** WAIT/LINK command sequence at or gcvNULL just to query the size of the
++** WAIT/LINK command sequence.
++**
++** gctUINT32 Offset
++** Offset into command buffer required for alignment.
++**
++** gctSIZE_T * Bytes
++** Pointer to the number of bytes available for the WAIT/LINK command
++** sequence. If 'Logical' is gcvNULL, this argument will be ignored.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Bytes
++** Pointer to a variable that will receive the number of bytes required
++** by the WAIT/LINK command sequence. If 'Bytes' is gcvNULL, nothing will
++** be returned.
++**
++** gctUINT32 * WaitOffset
++** Pointer to a variable that will receive the offset of the WAIT command
++** from the specified logcial pointer.
++** If 'WaitOffset' is gcvNULL nothing will be returned.
++**
++** gctSIZE_T * WaitSize
++** Pointer to a variable that will receive the number of bytes used by
++** the WAIT command. If 'LinkSize' is gcvNULL nothing will be returned.
++*/
++gceSTATUS
++gckHARDWARE_WaitLink(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical,
++ IN gctUINT32 Offset,
++ IN OUT gctSIZE_T * Bytes,
++ OUT gctUINT32 * WaitOffset,
++ OUT gctSIZE_T * WaitSize
++ )
++{
++ static const gctUINT waitCount = 200;
++
++ gceSTATUS status;
++ gctUINT32 address;
++ gctUINT32_PTR logical;
++ gctSIZE_T bytes;
++
++ gcmkHEADER_ARG("Hardware=0x%x Logical=0x%x Offset=0x%08x *Bytes=%lu",
++ Hardware, Logical, Offset, gcmOPT_VALUE(Bytes));
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT((Logical != gcvNULL) || (Bytes != gcvNULL));
++
++ /* Compute number of bytes required. */
++#if gcd6000_SUPPORT
++ bytes = gcmALIGN(Offset + 96, 8) - Offset;
++#else
++ bytes = gcmALIGN(Offset + 16, 8) - Offset;
++#endif
++
++ /* Cast the input pointer. */
++ logical = (gctUINT32_PTR) Logical;
++
++ if (logical != gcvNULL)
++ {
++ /* Not enough space? */
++ if (*Bytes < bytes)
++ {
++ /* Command queue too small. */
++ gcmkONERROR(gcvSTATUS_BUFFER_TOO_SMALL);
++ }
++
++ /* Convert logical into hardware specific address. */
++ gcmkONERROR(gckHARDWARE_ConvertLogical(Hardware, logical, &address));
++
++ /* Store the WAIT/LINK address. */
++ Hardware->lastWaitLink = address;
++
++ /* Append WAIT(count). */
++ logical[0]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (waitCount) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++#if gcd6000_SUPPORT
++ /* Send FE-PE sempahore token. */
++ logical[2]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E02) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ logical[3]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));
++
++ /* Send FE-PE stall token. */
++ logical[4]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x09 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)));
++
++ logical[5]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));
++
++ /*************************************************************/
++ /* Enable chip ID 0. */
++ logical[6] =
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x0D & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | (1 << 0);
++
++ /* Send semaphore from FE to ChipID 1. */
++ logical[8] =
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E02) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ logical[9] =
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x0F & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 27:24) - (0 ? 27:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 27:24) - (0 ? 27:24) + 1))))))) << (0 ? 27:24))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 27:24) - (0 ? 27:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 27:24) - (0 ? 27:24) + 1))))))) << (0 ? 27:24)));
++
++ /* Send semaphore from FE to ChipID 1. */
++ logical[10] =
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x09 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)));
++
++ logical[11] =
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x0F & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 27:24) - (0 ? 27:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 27:24) - (0 ? 27:24) + 1))))))) << (0 ? 27:24))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 27:24) - (0 ? 27:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 27:24) - (0 ? 27:24) + 1))))))) << (0 ? 27:24)));
++
++ /*************************************************************/
++ /* Enable chip ID 1. */
++ logical[12] =
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x0D & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | (1 << 1);
++
++ /* Send semaphore from FE to ChipID 1. */
++ logical[14] =
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E02) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ logical[15] =
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x0F & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 27:24) - (0 ? 27:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 27:24) - (0 ? 27:24) + 1))))))) << (0 ? 27:24))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 27:24) - (0 ? 27:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 27:24) - (0 ? 27:24) + 1))))))) << (0 ? 27:24)));
++
++ /* Wait for semaphore from ChipID 0. */
++ logical[16] =
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x09 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)));
++
++ logical[17] =
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x0F & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 27:24) - (0 ? 27:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 27:24) - (0 ? 27:24) + 1))))))) << (0 ? 27:24))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 27:24) - (0 ? 27:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 27:24) - (0 ? 27:24) + 1))))))) << (0 ? 27:24)));
++
++ /*************************************************************/
++ /* Enable all chips. */
++ logical[18] =
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x0D & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | (0xFFFF);
++
++ /* LoadState(AQFlush, 1), flush. */
++ logical[20]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E03) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ logical[21]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6)));
++
++ /* Append LINK(2, address). */
++ logical[22]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x08 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (bytes >> 3) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ logical[23] = address;
++#else
++ /* Append LINK(2, address). */
++ logical[2]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x08 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (bytes >> 3) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ logical[3] = address;
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "0x%08x: WAIT %u", address, waitCount
++ );
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "0x%08x: LINK 0x%08x, #%lu",
++ address + 8, address, bytes
++ );
++#endif
++
++ if (WaitOffset != gcvNULL)
++ {
++ /* Return the offset pointer to WAIT command. */
++ *WaitOffset = 0;
++ }
++
++ if (WaitSize != gcvNULL)
++ {
++ /* Return number of bytes used by the WAIT command. */
++ *WaitSize = 8;
++ }
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* Return number of bytes required by the WAIT/LINK command
++ ** sequence. */
++ *Bytes = bytes;
++ }
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Bytes=%lu *WaitOffset=0x%x *WaitSize=%lu",
++ gcmOPT_VALUE(Bytes), gcmOPT_VALUE(WaitOffset),
++ gcmOPT_VALUE(WaitSize));
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_End
++**
++** Append an END command at the specified location in the command queue.
++**
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to an gckHARDWARE object.
++**
++** gctPOINTER Logical
++** Pointer to the current location inside the command queue to append
++** END command at or gcvNULL just to query the size of the END command.
++**
++** gctSIZE_T * Bytes
++** Pointer to the number of bytes available for the END command. If
++** 'Logical' is gcvNULL, this argument will be ignored.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Bytes
++** Pointer to a variable that will receive the number of bytes required
++** for the END command. If 'Bytes' is gcvNULL, nothing will be returned.
++*/
++gceSTATUS
++gckHARDWARE_End(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical,
++ IN OUT gctSIZE_T * Bytes
++ )
++{
++ gctUINT32_PTR logical = (gctUINT32_PTR) Logical;
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Hardware=0x%x Logical=0x%x *Bytes=%lu",
++ Hardware, Logical, gcmOPT_VALUE(Bytes));
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT((Logical == gcvNULL) || (Bytes != gcvNULL));
++
++ if (Logical != gcvNULL)
++ {
++ if (*Bytes < 8)
++ {
++ /* Command queue too small. */
++ gcmkONERROR(gcvSTATUS_BUFFER_TOO_SMALL);
++ }
++
++ /* Append END. */
++ logical[0] =
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x02 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)));
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE, "0x%x: END", Logical);
++
++ /* Make sure the CPU writes out the data to memory. */
++ gcmkONERROR(
++ gckOS_MemoryBarrier(Hardware->os, Logical));
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* Return number of bytes required by the END command. */
++ *Bytes = 8;
++ }
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Bytes=%lu", gcmOPT_VALUE(Bytes));
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_Nop
++**
++** Append a NOP command at the specified location in the command queue.
++**
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to an gckHARDWARE object.
++**
++** gctPOINTER Logical
++** Pointer to the current location inside the command queue to append
++** NOP command at or gcvNULL just to query the size of the NOP command.
++**
++** gctSIZE_T * Bytes
++** Pointer to the number of bytes available for the NOP command. If
++** 'Logical' is gcvNULL, this argument will be ignored.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Bytes
++** Pointer to a variable that will receive the number of bytes required
++** for the NOP command. If 'Bytes' is gcvNULL, nothing will be returned.
++*/
++gceSTATUS
++gckHARDWARE_Nop(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical,
++ IN OUT gctSIZE_T * Bytes
++ )
++{
++ gctUINT32_PTR logical = (gctUINT32_PTR) Logical;
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Hardware=0x%x Logical=0x%x *Bytes=%lu",
++ Hardware, Logical, gcmOPT_VALUE(Bytes));
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT((Logical == gcvNULL) || (Bytes != gcvNULL));
++
++ if (Logical != gcvNULL)
++ {
++ if (*Bytes < 8)
++ {
++ /* Command queue too small. */
++ gcmkONERROR(gcvSTATUS_BUFFER_TOO_SMALL);
++ }
++
++ /* Append NOP. */
++ logical[0] = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x03 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)));
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE, "0x%x: NOP", Logical);
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* Return number of bytes required by the NOP command. */
++ *Bytes = 8;
++ }
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Bytes=%lu", gcmOPT_VALUE(Bytes));
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_Wait
++**
++** Append a WAIT command at the specified location in the command queue.
++**
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to an gckHARDWARE object.
++**
++** gctPOINTER Logical
++** Pointer to the current location inside the command queue to append
++** WAIT command at or gcvNULL just to query the size of the WAIT command.
++**
++** gctUINT32 Count
++** Number of cycles to wait.
++**
++** gctSIZE_T * Bytes
++** Pointer to the number of bytes available for the WAIT command. If
++** 'Logical' is gcvNULL, this argument will be ignored.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Bytes
++** Pointer to a variable that will receive the number of bytes required
++** for the NOP command. If 'Bytes' is gcvNULL, nothing will be returned.
++*/
++gceSTATUS
++gckHARDWARE_Wait(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical,
++ IN gctUINT32 Count,
++ IN OUT gctSIZE_T * Bytes
++ )
++{
++ gceSTATUS status;
++ gctUINT32_PTR logical;
++
++ gcmkHEADER_ARG("Hardware=0x%x Logical=0x%x Count=%u *Bytes=%lu",
++ Hardware, Logical, Count, gcmOPT_VALUE(Bytes));
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT((Logical == gcvNULL) || (Bytes != gcvNULL));
++
++ /* Cast the input pointer. */
++ logical = (gctUINT32_PTR) Logical;
++
++ if (Logical != gcvNULL)
++ {
++ if (*Bytes < 8)
++ {
++ /* Command queue too small. */
++ gcmkONERROR(gcvSTATUS_BUFFER_TOO_SMALL);
++ }
++
++ /* Append WAIT. */
++ logical[0] = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (Count) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++#if gcmIS_DEBUG(gcdDEBUG_TRACE)
++ {
++ gctUINT32 address;
++
++ /* Convert logical into hardware specific address. */
++ gcmkONERROR(gckHARDWARE_ConvertLogical(
++ Hardware, logical, &address
++ ));
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "0x%08x: WAIT %u", address, Count
++ );
++ }
++#endif
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* Return number of bytes required by the WAIT command. */
++ *Bytes = 8;
++ }
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Bytes=%lu", gcmOPT_VALUE(Bytes));
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_Event
++**
++** Append an EVENT command at the specified location in the command queue.
++**
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to an gckHARDWARE object.
++**
++** gctPOINTER Logical
++** Pointer to the current location inside the command queue to append
++** the EVENT command at or gcvNULL just to query the size of the EVENT
++** command.
++**
++** gctUINT8 Event
++** Event ID to program.
++**
++** gceKERNEL_WHERE FromWhere
++** Location of the pipe to send the event.
++**
++** gctSIZE_T * Bytes
++** Pointer to the number of bytes available for the EVENT command. If
++** 'Logical' is gcvNULL, this argument will be ignored.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Bytes
++** Pointer to a variable that will receive the number of bytes required
++** for the EVENT command. If 'Bytes' is gcvNULL, nothing will be
++** returned.
++*/
++gceSTATUS
++gckHARDWARE_Event(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical,
++ IN gctUINT8 Event,
++ IN gceKERNEL_WHERE FromWhere,
++ IN OUT gctSIZE_T * Bytes
++ )
++{
++ gctUINT size;
++ gctUINT32 destination = 0;
++ gctUINT32_PTR logical = (gctUINT32_PTR) Logical;
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Hardware=0x%x Logical=0x%x Event=%u FromWhere=%d *Bytes=%lu",
++ Hardware, Logical, Event, FromWhere, gcmOPT_VALUE(Bytes));
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT((Logical == gcvNULL) || (Bytes != gcvNULL));
++ gcmkVERIFY_ARGUMENT(Event < 32);
++
++ /* Determine the size of the command. */
++
++ size = (Hardware->extraEventStates && (FromWhere == gcvKERNEL_PIXEL))
++ ? gcmALIGN(8 + (1 + 5) * 4, 8) /* EVENT + 5 STATES */
++ : 8;
++
++ if (Logical != gcvNULL)
++ {
++ if (*Bytes < size)
++ {
++ /* Command queue too small. */
++ gcmkONERROR(gcvSTATUS_BUFFER_TOO_SMALL);
++ }
++
++ switch (FromWhere)
++ {
++ case gcvKERNEL_COMMAND:
++ /* From command processor. */
++ destination = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5)));
++ break;
++
++ case gcvKERNEL_PIXEL:
++ /* From pixel engine. */
++ destination = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6)));
++ break;
++
++ default:
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ /* Append EVENT(Event, destiantion). */
++ logical[0] = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E01) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ logical[1] = ((((gctUINT32) (destination)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) ((gctUINT32) (Event) & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)));
++
++ /* Make sure the event ID gets written out before GPU can access it. */
++ gcmkONERROR(
++ gckOS_MemoryBarrier(Hardware->os, logical + 1));
++
++#if gcmIS_DEBUG(gcdDEBUG_TRACE)
++ {
++ gctUINT32 phys;
++ gckOS_GetPhysicalAddress(Hardware->os, Logical, &phys);
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "0x%08x: EVENT %d", phys, Event);
++ }
++#endif
++
++ /* Append the extra states. These are needed for the chips that do not
++ ** support back-to-back events due to the async interface. The extra
++ ** states add the necessary delay to ensure that event IDs do not
++ ** collide. */
++ if (size > 8)
++ {
++ logical[2] = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0100) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (5) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++ logical[3] = 0;
++ logical[4] = 0;
++ logical[5] = 0;
++ logical[6] = 0;
++ logical[7] = 0;
++ }
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* Return number of bytes required by the EVENT command. */
++ *Bytes = size;
++ }
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Bytes=%lu", gcmOPT_VALUE(Bytes));
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_PipeSelect
++**
++** Append a PIPESELECT command at the specified location in the command queue.
++**
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to an gckHARDWARE object.
++**
++** gctPOINTER Logical
++** Pointer to the current location inside the command queue to append
++** the PIPESELECT command at or gcvNULL just to query the size of the
++** PIPESELECT command.
++**
++** gcePIPE_SELECT Pipe
++** Pipe value to select.
++**
++** gctSIZE_T * Bytes
++** Pointer to the number of bytes available for the PIPESELECT command.
++** If 'Logical' is gcvNULL, this argument will be ignored.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Bytes
++** Pointer to a variable that will receive the number of bytes required
++** for the PIPESELECT command. If 'Bytes' is gcvNULL, nothing will be
++** returned.
++*/
++gceSTATUS
++gckHARDWARE_PipeSelect(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical,
++ IN gcePIPE_SELECT Pipe,
++ IN OUT gctSIZE_T * Bytes
++ )
++{
++ gctUINT32_PTR logical = (gctUINT32_PTR) Logical;
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Hardware=0x%x Logical=0x%x Pipe=%d *Bytes=%lu",
++ Hardware, Logical, Pipe, gcmOPT_VALUE(Bytes));
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT((Logical == gcvNULL) || (Bytes != gcvNULL));
++
++ /* Append a PipeSelect. */
++ if (Logical != gcvNULL)
++ {
++ gctUINT32 flush, stall;
++
++ if (*Bytes < 32)
++ {
++ /* Command queue too small. */
++ gcmkONERROR(gcvSTATUS_BUFFER_TOO_SMALL);
++ }
++
++ flush = (Pipe == gcvPIPE_2D)
++ ? ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)))
++ : ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3)));
++
++ stall = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));
++
++ /* LoadState(AQFlush, 1), flush. */
++ logical[0]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E03) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ logical[1]
++ = flush;
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "0x%x: FLUSH 0x%x", logical, flush);
++
++ /* LoadState(AQSempahore, 1), stall. */
++ logical[2]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E02) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ logical[3]
++ = stall;
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "0x%x: SEMAPHORE 0x%x", logical + 2, stall);
++
++ /* Stall, stall. */
++ logical[4] = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x09 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)));
++ logical[5] = stall;
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "0x%x: STALL 0x%x", logical + 4, stall);
++
++ /* LoadState(AQPipeSelect, 1), pipe. */
++ logical[6]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E00) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ logical[7] = (Pipe == gcvPIPE_2D)
++ ? 0x1
++ : 0x0;
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "0x%x: PIPE %d", logical + 6, Pipe);
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* Return number of bytes required by the PIPESELECT command. */
++ *Bytes = 32;
++ }
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Bytes=%lu", gcmOPT_VALUE(Bytes));
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_Link
++**
++** Append a LINK command at the specified location in the command queue.
++**
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to an gckHARDWARE object.
++**
++** gctPOINTER Logical
++** Pointer to the current location inside the command queue to append
++** the LINK command at or gcvNULL just to query the size of the LINK
++** command.
++**
++** gctPOINTER FetchAddress
++** Logical address of destination of LINK.
++**
++** gctSIZE_T FetchSize
++** Number of bytes in destination of LINK.
++**
++** gctSIZE_T * Bytes
++** Pointer to the number of bytes available for the LINK command. If
++** 'Logical' is gcvNULL, this argument will be ignored.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Bytes
++** Pointer to a variable that will receive the number of bytes required
++** for the LINK command. If 'Bytes' is gcvNULL, nothing will be returned.
++*/
++gceSTATUS
++gckHARDWARE_Link(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical,
++ IN gctPOINTER FetchAddress,
++ IN gctSIZE_T FetchSize,
++ IN OUT gctSIZE_T * Bytes
++ )
++{
++ gceSTATUS status;
++ gctSIZE_T bytes;
++ gctUINT32 address;
++ gctUINT32 link;
++ gctUINT32_PTR logical = (gctUINT32_PTR) Logical;
++
++ gcmkHEADER_ARG("Hardware=0x%x Logical=0x%x FetchAddress=0x%x FetchSize=%lu "
++ "*Bytes=%lu",
++ Hardware, Logical, FetchAddress, FetchSize,
++ gcmOPT_VALUE(Bytes));
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT((Logical == gcvNULL) || (Bytes != gcvNULL));
++
++ if (Logical != gcvNULL)
++ {
++ if (*Bytes < 8)
++ {
++ /* Command queue too small. */
++ gcmkONERROR(gcvSTATUS_BUFFER_TOO_SMALL);
++ }
++
++ /* Convert logical address to hardware address. */
++ gcmkONERROR(
++ gckHARDWARE_ConvertLogical(Hardware, FetchAddress, &address));
++
++ gcmkONERROR(
++ gckOS_WriteMemory(Hardware->os, logical + 1, address));
++
++ /* Make sure the address got written before the LINK command. */
++ gcmkONERROR(
++ gckOS_MemoryBarrier(Hardware->os, logical + 1));
++
++ /* Compute number of 64-byte aligned bytes to fetch. */
++ bytes = gcmALIGN(address + FetchSize, 8) - address;
++
++ /* Append LINK(bytes / 8), FetchAddress. */
++ link = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x08 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (bytes >> 3) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ gcmkONERROR(
++ gckOS_WriteMemory(Hardware->os, logical, link));
++
++ /* Memory barrier. */
++ gcmkONERROR(
++ gckOS_MemoryBarrier(Hardware->os, logical));
++
++#if gcdLINK_QUEUE_SIZE && gcdVIRTUAL_COMMAND_BUFFER
++ if (address >= 0x80000000)
++ {
++ gckLINKQUEUE_Enqueue(&Hardware->linkQueue, address, address + bytes);
++ }
++#endif
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* Return number of bytes required by the LINK command. */
++ *Bytes = 8;
++ }
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Bytes=%lu", gcmOPT_VALUE(Bytes));
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_UpdateQueueTail
++**
++** Update the tail of the command queue.
++**
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to an gckHARDWARE object.
++**
++** gctPOINTER Logical
++** Logical address of the start of the command queue.
++**
++** gctUINT32 Offset
++** Offset into the command queue of the tail (last command).
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckHARDWARE_UpdateQueueTail(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical,
++ IN gctUINT32 Offset
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Hardware=0x%x Logical=0x%x Offset=0x%08x",
++ Hardware, Logical, Offset);
++
++ /* Verify the hardware. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ /* Force a barrier. */
++ gcmkONERROR(
++ gckOS_MemoryBarrier(Hardware->os, Logical));
++
++ /* Notify gckKERNEL object of change. */
++ gcmkONERROR(
++ gckKERNEL_Notify(Hardware->kernel,
++ gcvNOTIFY_COMMAND_QUEUE,
++ gcvFALSE));
++
++ if (status == gcvSTATUS_CHIP_NOT_READY)
++ {
++ gcmkONERROR(gcvSTATUS_GPU_NOT_RESPONDING);
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_ConvertLogical
++**
++** Convert a logical system address into a hardware specific address.
++**
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to an gckHARDWARE object.
++**
++** gctPOINTER Logical
++** Logical address to convert.
++**
++** gctUINT32* Address
++** Return hardware specific address.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckHARDWARE_ConvertLogical(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical,
++ OUT gctUINT32 * Address
++ )
++{
++ gctUINT32 address;
++ gceSTATUS status;
++ gctUINT32 baseAddress;
++
++ gcmkHEADER_ARG("Hardware=0x%x Logical=0x%x", Hardware, Logical);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Address != gcvNULL);
++
++#if gcdVIRTUAL_COMMAND_BUFFER
++ status = gckKERNEL_GetGPUAddress(Hardware->kernel, Logical, Address);
++
++ if (status == gcvSTATUS_INVALID_ADDRESS)
++#endif
++ {
++ /* Convert logical address into a physical address. */
++ gcmkONERROR(
++ gckOS_GetPhysicalAddress(Hardware->os, Logical, &address));
++
++ /* For old MMU, get GPU address according to baseAddress. */
++ if (Hardware->mmuVersion == 0)
++ {
++ gcmkONERROR(gckOS_GetBaseAddress(Hardware->os, &baseAddress));
++
++ /* Subtract base address to get a GPU address. */
++ gcmkASSERT(address >= baseAddress);
++ address -= baseAddress;
++ }
++
++ /* Return hardware specific address. */
++ *Address = (Hardware->mmuVersion == 0)
++ ? ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:31) - (0 ? 31:31) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:31) - (0 ? 31:31) + 1))))))) << (0 ? 31:31))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? 31:31) - (0 ? 31:31) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:31) - (0 ? 31:31) + 1))))))) << (0 ? 31:31)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 30:0) - (0 ? 30:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 30:0) - (0 ? 30:0) + 1))))))) << (0 ? 30:0))) | (((gctUINT32) ((gctUINT32) (address) & ((gctUINT32) ((((1 ? 30:0) - (0 ? 30:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 30:0) - (0 ? 30:0) + 1))))))) << (0 ? 30:0)))
++ : address;
++ }
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Address=0x%08x", *Address);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_ConvertPhysical
++**
++** Convert a physical address into a hardware specific address.
++**
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to an gckHARDWARE object.
++**
++** gctPHYS_ADDR Physical
++** Physical address to convert.
++**
++** gctUINT32* Address
++** Return hardware specific address.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckHARDWARE_ConvertPhysical(
++ IN gckHARDWARE Hardware,
++ IN gctPHYS_ADDR Physical,
++ OUT gctUINT32 * Address
++ )
++{
++ gctUINT32 address;
++ gctUINT32 baseAddress;
++
++ gcmkHEADER_ARG("Hardware=0x%x Physical=0x%x", Hardware, Physical);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(Physical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Address != gcvNULL);
++
++ address = gcmPTR2INT(Physical);
++
++ /* For old MMU, get GPU address according to baseAddress. */
++ if (Hardware->mmuVersion == 0)
++ {
++ gcmkVERIFY_OK(gckOS_GetBaseAddress(Hardware->os, &baseAddress));
++
++ /* Subtract base address to get a GPU address. */
++ gcmkASSERT(address >= baseAddress);
++ address -= baseAddress;
++ }
++
++ /* Return hardware specific address. */
++ *Address = (Hardware->mmuVersion == 0)
++ ? ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:31) - (0 ? 31:31) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:31) - (0 ? 31:31) + 1))))))) << (0 ? 31:31))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? 31:31) - (0 ? 31:31) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:31) - (0 ? 31:31) + 1))))))) << (0 ? 31:31)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 30:0) - (0 ? 30:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 30:0) - (0 ? 30:0) + 1))))))) << (0 ? 30:0))) | (((gctUINT32) ((gctUINT32) (address) & ((gctUINT32) ((((1 ? 30:0) - (0 ? 30:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 30:0) - (0 ? 30:0) + 1))))))) << (0 ? 30:0)))
++ : address;
++
++ /* Return the status. */
++ gcmkFOOTER_ARG("*Address=0x%08x", *Address);
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_Interrupt
++**
++** Process an interrupt.
++**
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to an gckHARDWARE object.
++**
++** gctBOOL InterruptValid
++** If gcvTRUE, this function will read the interrupt acknowledge
++** register, stores the data, and return whether or not the interrupt
++** is ours or not. If gcvFALSE, this functions will read the interrupt
++** acknowledge register and combine it with any stored value to handle
++** the event notifications.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckHARDWARE_Interrupt(
++ IN gckHARDWARE Hardware,
++ IN gctBOOL InterruptValid
++ )
++{
++ gckEVENT eventObj;
++ gctUINT32 data;
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Hardware=0x%x InterruptValid=%d", Hardware, InterruptValid);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ /* Extract gckEVENT object. */
++ eventObj = Hardware->kernel->eventObj;
++ gcmkVERIFY_OBJECT(eventObj, gcvOBJ_EVENT);
++
++ if (InterruptValid)
++ {
++ /* Read AQIntrAcknowledge register. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00010,
++ &data));
++
++ if (data == 0)
++ {
++ /* Not our interrupt. */
++ status = gcvSTATUS_NOT_OUR_INTERRUPT;
++ }
++ else
++ {
++ /* Inform gckEVENT of the interrupt. */
++ status = gckEVENT_Interrupt(eventObj, data);
++ }
++ }
++ else
++ {
++ /* Handle events. */
++ status = gckEVENT_Notify(eventObj, 0);
++ }
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_QueryCommandBuffer
++**
++** Query the command buffer alignment and number of reserved bytes.
++**
++** INPUT:
++**
++** gckHARDWARE Harwdare
++** Pointer to an gckHARDWARE object.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Alignment
++** Pointer to a variable receiving the alignment for each command.
++**
++** gctSIZE_T * ReservedHead
++** Pointer to a variable receiving the number of reserved bytes at the
++** head of each command buffer.
++**
++** gctSIZE_T * ReservedTail
++** Pointer to a variable receiving the number of bytes reserved at the
++** tail of each command buffer.
++*/
++gceSTATUS
++gckHARDWARE_QueryCommandBuffer(
++ IN gckHARDWARE Hardware,
++ OUT gctSIZE_T * Alignment,
++ OUT gctSIZE_T * ReservedHead,
++ OUT gctSIZE_T * ReservedTail
++ )
++{
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ if (Alignment != gcvNULL)
++ {
++ /* Align every 8 bytes. */
++ *Alignment = 8;
++ }
++
++ if (ReservedHead != gcvNULL)
++ {
++ /* Reserve space for SelectPipe(). */
++ *ReservedHead = 32;
++ }
++
++ if (ReservedTail != gcvNULL)
++ {
++ /* Reserve space for Link(). */
++ *ReservedTail = 8;
++ }
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Alignment=%lu *ReservedHead=%lu *ReservedTail=%lu",
++ gcmOPT_VALUE(Alignment), gcmOPT_VALUE(ReservedHead),
++ gcmOPT_VALUE(ReservedTail));
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_QuerySystemMemory
++**
++** Query the command buffer alignment and number of reserved bytes.
++**
++** INPUT:
++**
++** gckHARDWARE Harwdare
++** Pointer to an gckHARDWARE object.
++**
++** OUTPUT:
++**
++** gctSIZE_T * SystemSize
++** Pointer to a variable that receives the maximum size of the system
++** memory.
++**
++** gctUINT32 * SystemBaseAddress
++** Poinetr to a variable that receives the base address for system
++** memory.
++*/
++gceSTATUS
++gckHARDWARE_QuerySystemMemory(
++ IN gckHARDWARE Hardware,
++ OUT gctSIZE_T * SystemSize,
++ OUT gctUINT32 * SystemBaseAddress
++ )
++{
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ if (SystemSize != gcvNULL)
++ {
++ /* Maximum system memory can be 2GB. */
++ *SystemSize = 1U << 31;
++ }
++
++ if (SystemBaseAddress != gcvNULL)
++ {
++ /* Set system memory base address. */
++ *SystemBaseAddress = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:31) - (0 ? 31:31) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:31) - (0 ? 31:31) + 1))))))) << (0 ? 31:31))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? 31:31) - (0 ? 31:31) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:31) - (0 ? 31:31) + 1))))))) << (0 ? 31:31)));
++ }
++
++ /* Success. */
++ gcmkFOOTER_ARG("*SystemSize=%lu *SystemBaseAddress=%lu",
++ gcmOPT_VALUE(SystemSize), gcmOPT_VALUE(SystemBaseAddress));
++ return gcvSTATUS_OK;
++}
++
++#ifndef VIVANTE_NO_3D
++/*******************************************************************************
++**
++** gckHARDWARE_QueryShaderCaps
++**
++** Query the shader capabilities.
++**
++** INPUT:
++**
++** Nothing.
++**
++** OUTPUT:
++**
++** gctUINT * VertexUniforms
++** Pointer to a variable receiving the number of uniforms in the vertex
++** shader.
++**
++** gctUINT * FragmentUniforms
++** Pointer to a variable receiving the number of uniforms in the
++** fragment shader.
++**
++** gctUINT * Varyings
++** Pointer to a variable receiving the maimum number of varyings.
++*/
++gceSTATUS
++gckHARDWARE_QueryShaderCaps(
++ IN gckHARDWARE Hardware,
++ OUT gctUINT * VertexUniforms,
++ OUT gctUINT * FragmentUniforms,
++ OUT gctUINT * Varyings
++ )
++{
++ gctUINT32 vsConstMax;
++ gctUINT32 psConstMax;
++
++ gcmkHEADER_ARG("Hardware=0x%x VertexUniforms=0x%x "
++ "FragmentUniforms=0x%x Varyings=0x%x",
++ Hardware, VertexUniforms,
++ FragmentUniforms, Varyings);
++
++ if ((Hardware->identity.chipModel == gcv2000)
++ && (Hardware->identity.chipRevision == 0x5118))
++ {
++ vsConstMax = 256;
++ psConstMax = 64;
++ }
++ else if (Hardware->identity.numConstants > 256)
++ {
++ vsConstMax = 256;
++ psConstMax = 256;
++ }
++ else if (Hardware->identity.numConstants == 256)
++ {
++ vsConstMax = 256;
++ psConstMax = 256;
++ }
++ else
++ {
++ vsConstMax = 168;
++ psConstMax = 64;
++ }
++
++ if (VertexUniforms != gcvNULL)
++ {
++ *VertexUniforms = vsConstMax;
++ }
++
++ if (FragmentUniforms != gcvNULL)
++ {
++ *FragmentUniforms = psConstMax;
++ }
++
++ if (Varyings != gcvNULL)
++ {
++ /* Return the shader varyings count. */
++ *Varyings = Hardware->identity.varyingsCount;
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++#endif
++
++/*******************************************************************************
++**
++** gckHARDWARE_SetMMU
++**
++** Set the page table base address.
++**
++** INPUT:
++**
++** gckHARDWARE Harwdare
++** Pointer to an gckHARDWARE object.
++**
++** gctPOINTER Logical
++** Logical address of the page table.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckHARDWARE_SetMMU(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical
++ )
++{
++ gceSTATUS status;
++ gctUINT32 address = 0;
++ gctUINT32 baseAddress;
++
++ gcmkHEADER_ARG("Hardware=0x%x Logical=0x%x", Hardware, Logical);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++
++ /* Convert the logical address into an hardware address. */
++ gcmkONERROR(
++ gckHARDWARE_ConvertLogical(Hardware, Logical, &address));
++
++ /* Also get the base address - we need a real physical address. */
++ gcmkONERROR(
++ gckOS_GetBaseAddress(Hardware->os, &baseAddress));
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Setting page table to 0x%08X",
++ address + baseAddress);
++
++ /* Write the AQMemoryFePageTable register. */
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00400,
++ address + baseAddress));
++
++ /* Write the AQMemoryRaPageTable register. */
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00410,
++ address + baseAddress));
++
++ /* Write the AQMemoryTxPageTable register. */
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00404,
++ address + baseAddress));
++
++
++ /* Write the AQMemoryPePageTable register. */
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00408,
++ address + baseAddress));
++
++ /* Write the AQMemoryPezPageTable register. */
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x0040C,
++ address + baseAddress));
++
++ /* Return the status. */
++ gcmkFOOTER_NO();
++ return status;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_FlushMMU
++**
++** Flush the page table.
++**
++** INPUT:
++**
++** gckHARDWARE Harwdare
++** Pointer to an gckHARDWARE object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckHARDWARE_FlushMMU(
++ IN gckHARDWARE Hardware
++ )
++{
++ gceSTATUS status;
++ gckCOMMAND command;
++ gctUINT32_PTR buffer;
++ gctSIZE_T bufferSize;
++ gctBOOL commitEntered = gcvFALSE;
++ gctPOINTER pointer = gcvNULL;
++ gctUINT32 flushSize;
++ gctUINT32 count;
++ gctUINT32 physical;
++
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ /* Verify the gckCOMMAND object pointer. */
++ command = Hardware->kernel->command;
++
++ /* Acquire the command queue. */
++ gcmkONERROR(gckCOMMAND_EnterCommit(command, gcvFALSE));
++ commitEntered = gcvTRUE;
++
++ /* Flush the memory controller. */
++ if (Hardware->mmuVersion == 0)
++ {
++ gcmkONERROR(gckCOMMAND_Reserve(
++ command, 8, &pointer, &bufferSize
++ ));
++
++ buffer = (gctUINT32_PTR) pointer;
++
++ buffer[0]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E04) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ buffer[1]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1))))))) << (0 ? 2:2))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1))))))) << (0 ? 2:2)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4)));
++
++ gcmkONERROR(gckCOMMAND_Execute(command, 8));
++ }
++ else
++ {
++ flushSize = 16 * 4;
++
++ gcmkONERROR(gckCOMMAND_Reserve(
++ command, flushSize, &pointer, &bufferSize
++ ));
++
++ buffer = (gctUINT32_PTR) pointer;
++
++ count = (bufferSize - flushSize + 7) >> 3;
++
++ gcmkONERROR(gckOS_GetPhysicalAddress(command->os, buffer, &physical));
++
++ /* Flush cache. */
++ buffer[0]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E03) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ buffer[1]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1))))))) << (0 ? 2:2))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1))))))) << (0 ? 2:2)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6)));
++
++ /* Arm the PE-FE Semaphore. */
++ buffer[2]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E02) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ buffer[3]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));
++
++ /* STALL FE until PE is done flushing. */
++ buffer[4]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x09 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)));
++
++ buffer[5]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));
++
++ /* LINK to next slot to flush FE FIFO. */
++ buffer[6]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x08 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (4) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ buffer[7]
++ = physical + 8 * gcmSIZEOF(gctUINT32);
++
++ /* Flush MMU cache. */
++ buffer[8]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0061) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ buffer[9]
++ = (((((gctUINT32) (~0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4))) & ((((gctUINT32) (~0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:7) - (0 ? 7:7) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:7) - (0 ? 7:7) + 1))))))) << (0 ? 7:7))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? 7:7) - (0 ? 7:7) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:7) - (0 ? 7:7) + 1))))))) << (0 ? 7:7))));
++
++ /* Arm the PE-FE Semaphore. */
++ buffer[10]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E02) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ buffer[11]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));
++
++ /* STALL FE until PE is done flushing. */
++ buffer[12]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x09 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)));
++
++ buffer[13]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));
++
++ /* LINK to next slot to flush FE FIFO. */
++ buffer[14]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x08 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (count) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ buffer[15]
++ = physical + flushSize;
++
++ gcmkONERROR(gckCOMMAND_Execute(command, flushSize));
++ }
++
++ /* Release the command queue. */
++ gcmkONERROR(gckCOMMAND_ExitCommit(command, gcvFALSE));
++ commitEntered = gcvFALSE;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (commitEntered)
++ {
++ /* Release the command queue mutex. */
++ gcmkVERIFY_OK(gckCOMMAND_ExitCommit(Hardware->kernel->command,
++ gcvFALSE));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_SetMMUv2
++**
++** Set the page table base address.
++**
++** INPUT:
++**
++** gckHARDWARE Harwdare
++** Pointer to an gckHARDWARE object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckHARDWARE_SetMMUv2(
++ IN gckHARDWARE Hardware,
++ IN gctBOOL Enable,
++ IN gctPOINTER MtlbAddress,
++ IN gceMMU_MODE Mode,
++ IN gctPOINTER SafeAddress,
++ IN gctBOOL FromPower
++ )
++{
++ gceSTATUS status;
++ gctUINT32 config, address;
++ gckCOMMAND command;
++ gctUINT32_PTR buffer;
++ gctSIZE_T bufferSize;
++ gctBOOL commitEntered = gcvFALSE;
++ gctPOINTER pointer = gcvNULL;
++ gctBOOL acquired = gcvFALSE;
++ gctBOOL config2D;
++ gctSIZE_T configSize;
++
++ gcmkHEADER_ARG("Hardware=0x%x Enable=%d", Hardware, Enable);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ config2D = gckHARDWARE_IsFeatureAvailable(Hardware, gcvFEATURE_PIPE_3D)
++ && gckHARDWARE_IsFeatureAvailable(Hardware, gcvFEATURE_PIPE_2D);
++
++ configSize = 4 * 4;
++
++ if (config2D)
++ {
++ configSize +=
++ /* Pipe Select. */
++ 4 * 4
++ /* Configure MMU States. */
++ + 4 * 4;
++ }
++
++ /* Convert logical address into physical address. */
++ gcmkONERROR(
++ gckOS_GetPhysicalAddress(Hardware->os, MtlbAddress, &config));
++
++ gcmkONERROR(
++ gckOS_GetPhysicalAddress(Hardware->os, SafeAddress, &address));
++
++ if (address & 0x3F)
++ {
++ gcmkONERROR(gcvSTATUS_NOT_ALIGNED);
++ }
++
++ switch (Mode)
++ {
++ case gcvMMU_MODE_1K:
++ if (config & 0x3FF)
++ {
++ gcmkONERROR(gcvSTATUS_NOT_ALIGNED);
++ }
++
++ config |= ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)));
++
++ break;
++
++ case gcvMMU_MODE_4K:
++ if (config & 0xFFF)
++ {
++ gcmkONERROR(gcvSTATUS_NOT_ALIGNED);
++ }
++
++ config |= ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)));
++
++ break;
++
++ default:
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ /* Verify the gckCOMMAND object pointer. */
++ command = Hardware->kernel->command;
++
++ /* Acquire the command queue. */
++ gcmkONERROR(gckCOMMAND_EnterCommit(command, FromPower));
++ commitEntered = gcvTRUE;
++
++ gcmkONERROR(gckCOMMAND_Reserve(
++ command, configSize, &pointer, &bufferSize
++ ));
++
++ buffer = pointer;
++
++ buffer[0]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0061) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ buffer[1] = config;
++
++ buffer[2]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0060) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ buffer[3] = address;
++
++ if (config2D)
++ {
++ /* LoadState(AQPipeSelect, 1), pipe. */
++ buffer[4]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E00) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ buffer[5] = 0x1;
++
++ buffer[6]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0061) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ buffer[7] = config;
++
++ buffer[8]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0060) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ buffer[9] = address;
++
++ /* LoadState(AQPipeSelect, 1), pipe. */
++ buffer[10]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E00) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ buffer[11] = 0x0;
++ }
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Setup MMU: config=%08x, Safe Address=%08x\n.", config, address);
++
++ gcmkONERROR(gckCOMMAND_Execute(command, configSize));
++
++ if (FromPower == gcvFALSE)
++ {
++ /* Acquire global semaphore to suspend power management until MMU
++ ** is enabled. And acquired it before gckCOMMAND_ExitCommit to
++ ** make sure GPU keeps ON. */
++ gcmkONERROR(
++ gckOS_AcquireSemaphore(Hardware->os, Hardware->globalSemaphore));
++
++ acquired = gcvTRUE;
++ }
++
++ /* Release the command queue. */
++ gcmkONERROR(gckCOMMAND_ExitCommit(command, FromPower));
++ commitEntered = gcvFALSE;
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "call gckCOMMAND_Stall to make sure the config is done.\n ");
++
++ gcmkONERROR(gckCOMMAND_Stall(command, FromPower));
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Enable MMU through GCREG_MMU_CONTROL.");
++
++ /* Enable MMU. */
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x0018C,
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) ((gctUINT32) (Enable) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)))));
++
++ if (FromPower == gcvFALSE)
++ {
++ /* Relase global semaphore. */
++ gcmkVERIFY_OK(
++ gckOS_ReleaseSemaphore(Hardware->os, Hardware->globalSemaphore));
++
++ acquired = gcvFALSE;
++ }
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "call gckCOMMAND_Stall to check MMU available.\n");
++
++ gcmkONERROR(gckCOMMAND_Stall(command, FromPower));
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "The MMU is available.\n");
++
++ /* Return the status. */
++ gcmkFOOTER_NO();
++ return status;
++
++OnError:
++ if (commitEntered)
++ {
++ /* Release the command queue mutex. */
++ gcmkVERIFY_OK(gckCOMMAND_ExitCommit(Hardware->kernel->command,
++ FromPower));
++ }
++
++ if (acquired)
++ {
++ gcmkVERIFY_OK(
++ gckOS_ReleaseSemaphore(Hardware->os, Hardware->globalSemaphore));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_BuildVirtualAddress
++**
++** Build a virtual address.
++**
++** INPUT:
++**
++** gckHARDWARE Harwdare
++** Pointer to an gckHARDWARE object.
++**
++** gctUINT32 Index
++** Index into page table.
++**
++** gctUINT32 Offset
++** Offset into page.
++**
++** OUTPUT:
++**
++** gctUINT32 * Address
++** Pointer to a variable receiving te hardware address.
++*/
++gceSTATUS
++gckHARDWARE_BuildVirtualAddress(
++ IN gckHARDWARE Hardware,
++ IN gctUINT32 Index,
++ IN gctUINT32 Offset,
++ OUT gctUINT32 * Address
++ )
++{
++ gcmkHEADER_ARG("Hardware=0x%x Index=%u Offset=%u", Hardware, Index, Offset);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(Address != gcvNULL);
++
++ /* Build virtual address. */
++ *Address = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:31) - (0 ? 31:31) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:31) - (0 ? 31:31) + 1))))))) << (0 ? 31:31))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 31:31) - (0 ? 31:31) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:31) - (0 ? 31:31) + 1))))))) << (0 ? 31:31)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 30:0) - (0 ? 30:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 30:0) - (0 ? 30:0) + 1))))))) << (0 ? 30:0))) | (((gctUINT32) ((gctUINT32) (Offset | (Index << 12)) & ((gctUINT32) ((((1 ? 30:0) - (0 ? 30:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 30:0) - (0 ? 30:0) + 1))))))) << (0 ? 30:0)));
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Address=0x%08x", *Address);
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckHARDWARE_GetIdle(
++ IN gckHARDWARE Hardware,
++ IN gctBOOL Wait,
++ OUT gctUINT32 * Data
++ )
++{
++ gceSTATUS status;
++ gctUINT32 idle = 0;
++ gctINT retry, poll, pollCount;
++
++ gcmkHEADER_ARG("Hardware=0x%x Wait=%d", Hardware, Wait);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(Data != gcvNULL);
++
++
++ /* If we have to wait, try 100 polls per millisecond. */
++ pollCount = Wait ? 100 : 1;
++
++ /* At most, try for 1 second. */
++ for (retry = 0; retry < 1000; ++retry)
++ {
++ /* If we have to wait, try 100 polls per millisecond. */
++ for (poll = pollCount; poll > 0; --poll)
++ {
++ /* Read register. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00004, &idle));
++
++ /* See if we have to wait for FE idle. */
++ if ((((((gctUINT32) (idle)) >> (0 ? 0:0)) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1)))))) ))
++ {
++ /* FE is idle. */
++ break;
++ }
++ }
++
++ /* Check if we need to wait for FE and FE is busy. */
++ if (Wait && !(((((gctUINT32) (idle)) >> (0 ? 0:0)) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1)))))) ))
++ {
++ /* Wait a little. */
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "%s: Waiting for idle: 0x%08X",
++ __FUNCTION__, idle);
++
++ gcmkVERIFY_OK(gckOS_Delay(Hardware->os, 1));
++ }
++ else
++ {
++ break;
++ }
++ }
++
++ /* Return idle to caller. */
++ *Data = idle;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Data=0x%08x", *Data);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/* Flush the caches. */
++gceSTATUS
++gckHARDWARE_Flush(
++ IN gckHARDWARE Hardware,
++ IN gceKERNEL_FLUSH Flush,
++ IN gctPOINTER Logical,
++ IN OUT gctSIZE_T * Bytes
++ )
++{
++ gctUINT32 pipe;
++ gctUINT32 flush = 0;
++ gctUINT32_PTR logical = (gctUINT32_PTR) Logical;
++ gceSTATUS status;
++ gctBOOL fcFlushStall;
++ gctUINT32 reserveBytes = 8;
++
++ gcmkHEADER_ARG("Hardware=0x%x Flush=0x%x Logical=0x%x *Bytes=%lu",
++ Hardware, Flush, Logical, gcmOPT_VALUE(Bytes));
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ /* Get current pipe. */
++ pipe = Hardware->kernel->command->pipeSelect;
++
++ fcFlushStall
++ = ((((gctUINT32) (Hardware->identity.chipMinorFeatures1)) >> (0 ? 31:31) & ((gctUINT32) ((((1 ? 31:31) - (0 ? 31:31) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:31) - (0 ? 31:31) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 31:31) - (0 ? 31:31) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:31) - (0 ? 31:31) + 1)))))))
++ && (Flush == gcvFLUSH_ALL)
++ ;
++
++ if (fcFlushStall)
++ {
++ reserveBytes += 8;
++ }
++
++ /* Flush 3D color cache. */
++ if ((Flush & gcvFLUSH_COLOR) && (pipe == 0x0))
++ {
++ flush |= ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1)));
++ }
++
++ /* Flush 3D depth cache. */
++ if ((Flush & gcvFLUSH_DEPTH) && (pipe == 0x0))
++ {
++ flush |= ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)));
++ }
++
++ /* Flush 3D texture cache. */
++ if ((Flush & gcvFLUSH_TEXTURE) && (pipe == 0x0))
++ {
++ flush |= ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1))))))) << (0 ? 2:2))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1))))))) << (0 ? 2:2)));
++ flush |= ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4)));
++ }
++
++ /* Flush 2D cache. */
++ if ((Flush & gcvFLUSH_2D) && (pipe == 0x1))
++ {
++ flush |= ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3)));
++ }
++
++ /* See if there is a valid flush. */
++ if (flush == 0)
++ {
++ if (Bytes != gcvNULL)
++ {
++ /* No bytes required. */
++ *Bytes = 0;
++ }
++ }
++
++ else
++ {
++ /* Copy to command queue. */
++ if (Logical != gcvNULL)
++ {
++ if (*Bytes < reserveBytes)
++ {
++ /* Command queue too small. */
++ gcmkONERROR(gcvSTATUS_BUFFER_TOO_SMALL);
++ }
++
++ /* Append LOAD_STATE to AQFlush. */
++ logical[0] = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E03) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ logical[1] = flush;
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "0x%x: FLUSH 0x%x", logical, flush);
++
++ if (fcFlushStall)
++ {
++ logical[2] = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0594) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ logical[3] = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)));
++
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "0x%x: FLUSH 0x%x", logical + 3, logical[3]);
++ }
++
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* bytes required. */
++ *Bytes = reserveBytes;
++ }
++ }
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Bytes=%lu", gcmOPT_VALUE(Bytes));
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckHARDWARE_SetFastClear(
++ IN gckHARDWARE Hardware,
++ IN gctINT Enable,
++ IN gctINT Compression
++ )
++{
++#ifndef VIVANTE_NO_3D
++ gctUINT32 debug;
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Hardware=0x%x Enable=%d Compression=%d",
++ Hardware, Enable, Compression);
++
++ /* Only process if fast clear is available. */
++ if ((((((gctUINT32) (Hardware->identity.chipFeatures)) >> (0 ? 0:0)) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1)))))) ))
++ {
++ if (Enable == -1)
++ {
++ /* Determine automatic value for fast clear. */
++ Enable = ((Hardware->identity.chipModel != gcv500)
++ || (Hardware->identity.chipRevision >= 3)
++ ) ? 1 : 0;
++ }
++
++ if (Compression == -1)
++ {
++ /* Determine automatic value for compression. */
++ Compression = Enable
++ & (((((gctUINT32) (Hardware->identity.chipFeatures)) >> (0 ? 5:5)) & ((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1)))))) );
++ }
++
++ /* Read AQMemoryDebug register. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00414, &debug));
++
++ /* Set fast clear bypass. */
++ debug = ((((gctUINT32) (debug)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 20:20) - (0 ? 20:20) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 20:20) - (0 ? 20:20) + 1))))))) << (0 ? 20:20))) | (((gctUINT32) ((gctUINT32) (Enable == 0) & ((gctUINT32) ((((1 ? 20:20) - (0 ? 20:20) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 20:20) - (0 ? 20:20) + 1))))))) << (0 ? 20:20)));
++
++ if (
++ ((((gctUINT32) (Hardware->identity.chipMinorFeatures2)) >> (0 ? 27:27) & ((gctUINT32) ((((1 ? 27:27) - (0 ? 27:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 27:27) - (0 ? 27:27) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 27:27) - (0 ? 27:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 27:27) - (0 ? 27:27) + 1))))))) ||
++ (Hardware->identity.chipModel >= gcv4000))
++ {
++ /* Set compression bypass. */
++ debug = ((((gctUINT32) (debug)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 21:21) - (0 ? 21:21) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 21:21) - (0 ? 21:21) + 1))))))) << (0 ? 21:21))) | (((gctUINT32) ((gctUINT32) (Compression == 0) & ((gctUINT32) ((((1 ? 21:21) - (0 ? 21:21) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 21:21) - (0 ? 21:21) + 1))))))) << (0 ? 21:21)));
++ }
++
++ /* Write back AQMemoryDebug register. */
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00414,
++ debug));
++
++ /* Store fast clear and comprersison flags. */
++ Hardware->allowFastClear = Enable;
++ Hardware->allowCompression = Compression;
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "FastClear=%d Compression=%d", Enable, Compression);
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++#else
++ return gcvSTATUS_OK;
++#endif
++}
++
++typedef enum
++{
++ gcvPOWER_FLAG_INITIALIZE = 1 << 0,
++ gcvPOWER_FLAG_STALL = 1 << 1,
++ gcvPOWER_FLAG_STOP = 1 << 2,
++ gcvPOWER_FLAG_START = 1 << 3,
++ gcvPOWER_FLAG_RELEASE = 1 << 4,
++ gcvPOWER_FLAG_DELAY = 1 << 5,
++ gcvPOWER_FLAG_SAVE = 1 << 6,
++ gcvPOWER_FLAG_ACQUIRE = 1 << 7,
++ gcvPOWER_FLAG_POWER_OFF = 1 << 8,
++ gcvPOWER_FLAG_CLOCK_OFF = 1 << 9,
++ gcvPOWER_FLAG_CLOCK_ON = 1 << 10,
++}
++gcePOWER_FLAGS;
++
++#if gcmIS_DEBUG(gcdDEBUG_TRACE)
++static gctCONST_STRING
++_PowerEnum(gceCHIPPOWERSTATE State)
++{
++ const gctCONST_STRING states[] =
++ {
++ gcmSTRING(gcvPOWER_ON),
++ gcmSTRING(gcvPOWER_OFF),
++ gcmSTRING(gcvPOWER_IDLE),
++ gcmSTRING(gcvPOWER_SUSPEND),
++ gcmSTRING(gcvPOWER_SUSPEND_ATPOWERON),
++ gcmSTRING(gcvPOWER_OFF_ATPOWERON),
++ gcmSTRING(gcvPOWER_IDLE_BROADCAST),
++ gcmSTRING(gcvPOWER_SUSPEND_BROADCAST),
++ gcmSTRING(gcvPOWER_OFF_BROADCAST),
++ gcmSTRING(gcvPOWER_OFF_RECOVERY),
++ gcmSTRING(gcvPOWER_ON_AUTO)
++ };
++
++ if ((State >= gcvPOWER_ON) && (State <= gcvPOWER_ON_AUTO))
++ {
++ return states[State - gcvPOWER_ON];
++ }
++
++ return "unknown";
++}
++#endif
++
++/*******************************************************************************
++**
++** gckHARDWARE_SetPowerManagementState
++**
++** Set GPU to a specified power state.
++**
++** INPUT:
++**
++** gckHARDWARE Harwdare
++** Pointer to an gckHARDWARE object.
++**
++** gceCHIPPOWERSTATE State
++** Power State.
++**
++*/
++gceSTATUS
++gckHARDWARE_SetPowerManagementState(
++ IN gckHARDWARE Hardware,
++ IN gceCHIPPOWERSTATE State
++ )
++{
++ gceSTATUS status;
++ gckCOMMAND command = gcvNULL;
++ gckOS os;
++ gctUINT flag, clock;
++ gctPOINTER buffer;
++ gctSIZE_T bytes, requested;
++ gctBOOL acquired = gcvFALSE;
++ gctBOOL mutexAcquired = gcvFALSE;
++ gctBOOL stall = gcvTRUE;
++ gctBOOL broadcast = gcvFALSE;
++#if gcdPOWEROFF_TIMEOUT
++ gctBOOL timeout = gcvFALSE;
++ gctBOOL isAfter = gcvFALSE;
++ gctUINT32 currentTime;
++#endif
++ gctUINT32 process, thread;
++ gctBOOL commitEntered = gcvFALSE;
++ gctBOOL commandStarted = gcvFALSE;
++ gctBOOL isrStarted = gcvFALSE;
++
++#if gcdENABLE_PROFILING
++ gctUINT64 time, freq, mutexTime, onTime, stallTime, stopTime, delayTime,
++ initTime, offTime, startTime, totalTime;
++#endif
++ gctBOOL global = gcvFALSE;
++ gctBOOL globalAcquired = gcvFALSE;
++ gctBOOL configMmu = gcvFALSE;
++
++ /* State transition flags. */
++ static const gctUINT flags[4][4] =
++ {
++ /* gcvPOWER_ON */
++ { /* ON */ 0,
++ /* OFF */ gcvPOWER_FLAG_ACQUIRE |
++ gcvPOWER_FLAG_STALL |
++ gcvPOWER_FLAG_STOP |
++ gcvPOWER_FLAG_POWER_OFF |
++ gcvPOWER_FLAG_CLOCK_OFF,
++ /* IDLE */ gcvPOWER_FLAG_ACQUIRE |
++ gcvPOWER_FLAG_STALL,
++ /* SUSPEND */ gcvPOWER_FLAG_ACQUIRE |
++ gcvPOWER_FLAG_STALL |
++ gcvPOWER_FLAG_STOP |
++ gcvPOWER_FLAG_CLOCK_OFF,
++ },
++
++ /* gcvPOWER_OFF */
++ { /* ON */ gcvPOWER_FLAG_INITIALIZE |
++ gcvPOWER_FLAG_START |
++ gcvPOWER_FLAG_RELEASE |
++ gcvPOWER_FLAG_DELAY,
++ /* OFF */ 0,
++ /* IDLE */ gcvPOWER_FLAG_INITIALIZE |
++ gcvPOWER_FLAG_START |
++ gcvPOWER_FLAG_DELAY,
++ /* SUSPEND */ gcvPOWER_FLAG_INITIALIZE |
++ gcvPOWER_FLAG_CLOCK_OFF,
++ },
++
++ /* gcvPOWER_IDLE */
++ { /* ON */ gcvPOWER_FLAG_RELEASE,
++ /* OFF */ gcvPOWER_FLAG_STOP |
++ gcvPOWER_FLAG_POWER_OFF |
++ gcvPOWER_FLAG_CLOCK_OFF,
++ /* IDLE */ 0,
++ /* SUSPEND */ gcvPOWER_FLAG_STOP |
++ gcvPOWER_FLAG_CLOCK_OFF,
++ },
++
++ /* gcvPOWER_SUSPEND */
++ { /* ON */ gcvPOWER_FLAG_START |
++ gcvPOWER_FLAG_RELEASE |
++ gcvPOWER_FLAG_DELAY |
++ gcvPOWER_FLAG_CLOCK_ON,
++ /* OFF */ gcvPOWER_FLAG_SAVE |
++ gcvPOWER_FLAG_POWER_OFF |
++ gcvPOWER_FLAG_CLOCK_OFF,
++ /* IDLE */ gcvPOWER_FLAG_START |
++ gcvPOWER_FLAG_DELAY |
++ gcvPOWER_FLAG_CLOCK_ON,
++ /* SUSPEND */ 0,
++ },
++ };
++
++ /* Clocks. */
++ static const gctUINT clocks[4] =
++ {
++ /* gcvPOWER_ON */
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) |
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) |
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 8:2) - (0 ? 8:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:2) - (0 ? 8:2) + 1))))))) << (0 ? 8:2))) | (((gctUINT32) ((gctUINT32) (64) & ((gctUINT32) ((((1 ? 8:2) - (0 ? 8:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:2) - (0 ? 8:2) + 1))))))) << (0 ? 8:2))) |
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))),
++
++ /* gcvPOWER_OFF */
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) |
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) |
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 8:2) - (0 ? 8:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:2) - (0 ? 8:2) + 1))))))) << (0 ? 8:2))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 8:2) - (0 ? 8:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:2) - (0 ? 8:2) + 1))))))) << (0 ? 8:2))) |
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))),
++
++ /* gcvPOWER_IDLE */
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) |
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) |
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 8:2) - (0 ? 8:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:2) - (0 ? 8:2) + 1))))))) << (0 ? 8:2))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 8:2) - (0 ? 8:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:2) - (0 ? 8:2) + 1))))))) << (0 ? 8:2))) |
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))),
++
++ /* gcvPOWER_SUSPEND */
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) |
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) |
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 8:2) - (0 ? 8:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:2) - (0 ? 8:2) + 1))))))) << (0 ? 8:2))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 8:2) - (0 ? 8:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:2) - (0 ? 8:2) + 1))))))) << (0 ? 8:2))) |
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))),
++ };
++
++ gcmkHEADER_ARG("Hardware=0x%x State=%d", Hardware, State);
++#if gcmIS_DEBUG(gcdDEBUG_TRACE)
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Switching to power state %d(%s)",
++ State, _PowerEnum(State));
++#endif
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ /* Get the gckOS object pointer. */
++ os = Hardware->os;
++ gcmkVERIFY_OBJECT(os, gcvOBJ_OS);
++
++ /* Get the gckCOMMAND object pointer. */
++ gcmkVERIFY_OBJECT(Hardware->kernel, gcvOBJ_KERNEL);
++ command = Hardware->kernel->command;
++ gcmkVERIFY_OBJECT(command, gcvOBJ_COMMAND);
++
++ if (Hardware->powerManagement == gcvFALSE)
++ {
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++
++ /* Start profiler. */
++ gcmkPROFILE_INIT(freq, time);
++
++ /* Convert the broadcast power state. */
++ switch (State)
++ {
++ case gcvPOWER_SUSPEND_ATPOWERON:
++ /* Convert to SUSPEND and don't wait for STALL. */
++ State = gcvPOWER_SUSPEND;
++ stall = gcvFALSE;
++ break;
++
++ case gcvPOWER_OFF_ATPOWERON:
++ /* Convert to OFF and don't wait for STALL. */
++ State = gcvPOWER_OFF;
++ stall = gcvFALSE;
++ break;
++
++ case gcvPOWER_IDLE_BROADCAST:
++ /* Convert to IDLE and note we are inside broadcast. */
++ State = gcvPOWER_IDLE;
++ broadcast = gcvTRUE;
++ break;
++
++ case gcvPOWER_SUSPEND_BROADCAST:
++ /* Convert to SUSPEND and note we are inside broadcast. */
++ State = gcvPOWER_SUSPEND;
++ broadcast = gcvTRUE;
++ break;
++
++ case gcvPOWER_OFF_BROADCAST:
++ /* Convert to OFF and note we are inside broadcast. */
++ State = gcvPOWER_OFF;
++ broadcast = gcvTRUE;
++ break;
++
++ case gcvPOWER_OFF_RECOVERY:
++ /* Convert to OFF and note we are inside recovery. */
++ State = gcvPOWER_OFF;
++ stall = gcvFALSE;
++ broadcast = gcvTRUE;
++ break;
++
++ case gcvPOWER_ON_AUTO:
++ /* Convert to ON and note we are inside recovery. */
++ State = gcvPOWER_ON;
++ break;
++
++ case gcvPOWER_ON:
++ case gcvPOWER_IDLE:
++ case gcvPOWER_SUSPEND:
++ case gcvPOWER_OFF:
++ /* Mark as global power management. */
++ global = gcvTRUE;
++ break;
++
++#if gcdPOWEROFF_TIMEOUT
++ case gcvPOWER_OFF_TIMEOUT:
++ /* Convert to OFF and note we are inside broadcast. */
++ State = gcvPOWER_OFF;
++ broadcast = gcvTRUE;
++ /* Check time out */
++ timeout = gcvTRUE;
++ break;
++#endif
++
++ default:
++ break;
++ }
++
++ /* Get current process and thread IDs. */
++ gcmkONERROR(gckOS_GetProcessID(&process));
++ gcmkONERROR(gckOS_GetThreadID(&thread));
++
++ /* Before we grab locks see if this is actually a needed change */
++ if (State == Hardware->chipPowerState)
++ return gcvSTATUS_OK;
++
++ if (broadcast)
++ {
++ /* Try to acquire the power mutex. */
++ status = gckOS_AcquireMutex(os, Hardware->powerMutex, 0);
++
++ if (status == gcvSTATUS_TIMEOUT)
++ {
++ /* Check if we already own this mutex. */
++ if ((Hardware->powerProcess == process)
++ && (Hardware->powerThread == thread)
++ )
++ {
++ /* Bail out on recursive power management. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++ else if (State == gcvPOWER_IDLE || State == gcvPOWER_SUSPEND)
++ {
++ /* Called from IST,
++ ** so waiting here will cause deadlock,
++ ** if lock holder call gckCOMMAND_Stall() */
++ gcmkONERROR(gcvSTATUS_INVALID_REQUEST);
++ }
++#if gcdPOWEROFF_TIMEOUT
++ else if(State == gcvPOWER_OFF && timeout == gcvTRUE)
++ {
++ /*
++ ** try to aqcuire the mutex with more milliseconds,
++ ** flush_delayed_work should be running with timeout,
++ ** so waiting here will cause deadlock */
++ status = gckOS_AcquireMutex(os, Hardware->powerMutex, gcdPOWEROFF_TIMEOUT);
++
++ if (status == gcvSTATUS_TIMEOUT)
++ {
++ gckOS_Print("GPU Timer deadlock, exit by timeout!!!!\n");
++
++ gcmkONERROR(gcvSTATUS_INVALID_REQUEST);
++ }
++ }
++#endif
++ else
++ {
++ /* Acquire the power mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(os,
++ Hardware->powerMutex,
++ gcvINFINITE));
++ }
++ }
++ }
++ else
++ {
++ /* Acquire the power mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(os, Hardware->powerMutex, gcvINFINITE));
++ }
++
++ /* Get time until mtuex acquired. */
++ gcmkPROFILE_QUERY(time, mutexTime);
++
++ Hardware->powerProcess = process;
++ Hardware->powerThread = thread;
++ mutexAcquired = gcvTRUE;
++
++ /* Grab control flags and clock. */
++ flag = flags[Hardware->chipPowerState][State];
++ clock = clocks[State];
++
++#if gcdENABLE_FSCALE_VAL_ADJUST
++ if (State == gcvPOWER_ON)
++ {
++ clock = ((((gctUINT32) (clock)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 8:2) - (0 ? 8:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:2) - (0 ? 8:2) + 1))))))) << (0 ? 8:2))) | (((gctUINT32) ((gctUINT32) (Hardware->powerOnFscaleVal) & ((gctUINT32) ((((1 ? 8:2) - (0 ? 8:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:2) - (0 ? 8:2) + 1))))))) << (0 ? 8:2)));
++ }
++#endif
++
++ if (State == gcvPOWER_SUSPEND && Hardware->chipPowerState == gcvPOWER_OFF && broadcast)
++ {
++#if gcdPOWER_SUSNPEND_WHEN_IDLE
++ /* Do nothing */
++
++ /* Release the power mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(os, Hardware->powerMutex));
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++#else
++ /* Clock should be on when switch power from off to suspend */
++ clock = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) |
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) |
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 8:2) - (0 ? 8:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:2) - (0 ? 8:2) + 1))))))) << (0 ? 8:2))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 8:2) - (0 ? 8:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:2) - (0 ? 8:2) + 1))))))) << (0 ? 8:2))) |
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))) ;
++#endif
++ }
++
++#if gcdPOWEROFF_TIMEOUT
++ if (timeout)
++ {
++ gcmkONERROR(gckOS_GetTicks(&currentTime));
++
++ gcmkONERROR(
++ gckOS_TicksAfter(Hardware->powerOffTime, currentTime, &isAfter));
++
++ /* powerOffTime is pushed forward, give up.*/
++ if (isAfter
++ /* Expect a transition start from IDLE or SUSPEND. */
++ || (Hardware->chipPowerState == gcvPOWER_ON)
++ || (Hardware->chipPowerState == gcvPOWER_OFF)
++ )
++ {
++ /* Release the power mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(os, Hardware->powerMutex));
++
++ /* No need to do anything. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Power Off GPU[%d] at %u [supposed to be at %u]",
++ Hardware->core, currentTime, Hardware->powerOffTime);
++ }
++
++ if (State == gcvPOWER_ON || State == gcvPOWER_OFF)
++ {
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE, "Cancel powerOfftimer");
++
++ /* Cancel running timer when GPU enters ON or OFF. */
++ gcmkVERIFY_OK(gckOS_StopTimer(os, Hardware->powerOffTimer));
++ }
++#endif
++
++ if (flag == 0)
++ {
++ /* Release the power mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(os, Hardware->powerMutex));
++
++ /* No need to do anything. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++
++ /* If this is an internal power management, we have to check if we can grab
++ ** the global power semaphore. If we cannot, we have to wait until the
++ ** external world changes power management. */
++ if (!global)
++ {
++ /* Try to acquire the global semaphore. */
++ status = gckOS_TryAcquireSemaphore(os, Hardware->globalSemaphore);
++ if (status == gcvSTATUS_TIMEOUT)
++ {
++ if (State == gcvPOWER_IDLE || State == gcvPOWER_SUSPEND)
++ {
++ /* Called from thread routine which should NEVER sleep.*/
++ gcmkONERROR(gcvSTATUS_INVALID_REQUEST);
++ }
++
++ /* Release the power mutex. */
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Releasing the power mutex.");
++ gcmkONERROR(gckOS_ReleaseMutex(os, Hardware->powerMutex));
++ mutexAcquired = gcvFALSE;
++
++ /* Wait for the semaphore. */
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Waiting for global semaphore.");
++ gcmkONERROR(gckOS_AcquireSemaphore(os, Hardware->globalSemaphore));
++ globalAcquired = gcvTRUE;
++
++ /* Acquire the power mutex. */
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Reacquiring the power mutex.");
++ gcmkONERROR(gckOS_AcquireMutex(os,
++ Hardware->powerMutex,
++ gcvINFINITE));
++ mutexAcquired = gcvTRUE;
++
++ /* chipPowerState may be changed by external world during the time
++ ** we give up powerMutex, so updating flag now is necessary. */
++ flag = flags[Hardware->chipPowerState][State];
++
++ if (flag == 0)
++ {
++ gcmkONERROR(gckOS_ReleaseSemaphore(os, Hardware->globalSemaphore));
++ globalAcquired = gcvFALSE;
++
++ gcmkONERROR(gckOS_ReleaseMutex(os, Hardware->powerMutex));
++ mutexAcquired = gcvFALSE;
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++ }
++ else
++ {
++ /* Error. */
++ gcmkONERROR(status);
++ }
++
++ /* Release the global semaphore again. */
++ gcmkONERROR(gckOS_ReleaseSemaphore(os, Hardware->globalSemaphore));
++ globalAcquired = gcvFALSE;
++ }
++ else
++ {
++ if (State == gcvPOWER_OFF || State == gcvPOWER_SUSPEND || State == gcvPOWER_IDLE)
++ {
++ /* Acquire the global semaphore if it has not been acquired. */
++ status = gckOS_TryAcquireSemaphore(os, Hardware->globalSemaphore);
++ if (status == gcvSTATUS_OK)
++ {
++ globalAcquired = gcvTRUE;
++ }
++ else if (status != gcvSTATUS_TIMEOUT)
++ {
++ /* Other errors. */
++ gcmkONERROR(status);
++ }
++ /* Ignore gcvSTATUS_TIMEOUT and leave globalAcquired as gcvFALSE.
++ ** gcvSTATUS_TIMEOUT means global semaphore has already
++ ** been acquired before this operation, so even if we fail,
++ ** we should not release it in our error handling. It should be
++ ** released by the next successful global gcvPOWER_ON. */
++ }
++
++ /* Global power management can't be aborted, so sync with
++ ** proceeding last commit. */
++ if (flag & gcvPOWER_FLAG_ACQUIRE)
++ {
++ /* Acquire the power management semaphore. */
++ gcmkONERROR(gckOS_AcquireSemaphore(os, command->powerSemaphore));
++ acquired = gcvTRUE;
++
++ /* avoid acquiring again. */
++ flag &= ~gcvPOWER_FLAG_ACQUIRE;
++ }
++ }
++
++ if (flag & (gcvPOWER_FLAG_INITIALIZE | gcvPOWER_FLAG_CLOCK_ON))
++ {
++ /* Turn on the power. */
++ gcmkONERROR(gckOS_SetGPUPower(os, Hardware->core, gcvTRUE, gcvTRUE));
++
++ /* Mark clock and power as enabled. */
++ Hardware->clockState = gcvTRUE;
++ Hardware->powerState = gcvTRUE;
++
++ for (;;)
++ {
++ /* Check if GPU is present and awake. */
++ status = _IsGPUPresent(Hardware);
++
++ /* Check if the GPU is not responding. */
++ if (status == gcvSTATUS_GPU_NOT_RESPONDING)
++ {
++ /* Turn off the power and clock. */
++ gcmkONERROR(gckOS_SetGPUPower(os, Hardware->core, gcvFALSE, gcvFALSE));
++
++ Hardware->clockState = gcvFALSE;
++ Hardware->powerState = gcvFALSE;
++
++ /* Wait a little. */
++ gckOS_Delay(os, 1);
++
++ /* Turn on the power and clock. */
++ gcmkONERROR(gckOS_SetGPUPower(os, Hardware->core, gcvTRUE, gcvTRUE));
++
++ Hardware->clockState = gcvTRUE;
++ Hardware->powerState = gcvTRUE;
++
++ /* We need to initialize the hardware and start the command
++ * processor. */
++ flag |= gcvPOWER_FLAG_INITIALIZE | gcvPOWER_FLAG_START;
++ }
++ else
++ {
++ /* Test for error. */
++ gcmkONERROR(status);
++
++ /* Break out of loop. */
++ break;
++ }
++ }
++ }
++
++ /* Get time until powered on. */
++ gcmkPROFILE_QUERY(time, onTime);
++
++ if ((flag & gcvPOWER_FLAG_STALL) && stall)
++ {
++ gctBOOL idle;
++ gctINT32 atomValue;
++
++ /* For global operation, all pending commits have already been
++ ** blocked by globalSemaphore or powerSemaphore.*/
++ if (!global)
++ {
++ /* Check commit atom. */
++ gcmkONERROR(gckOS_AtomGet(os, command->atomCommit, &atomValue));
++
++ if (atomValue > 0)
++ {
++ /* Commits are pending - abort power management. */
++ status = broadcast ? gcvSTATUS_CHIP_NOT_READY
++ : gcvSTATUS_MORE_DATA;
++ goto OnError;
++ }
++ }
++
++ if (broadcast)
++ {
++ /* Check for idle. */
++ gcmkONERROR(gckHARDWARE_QueryIdle(Hardware, &idle));
++
++ if (!idle)
++ {
++ status = gcvSTATUS_CHIP_NOT_READY;
++ goto OnError;
++ }
++ }
++
++ else
++ {
++ /* Acquire the command queue. */
++ gcmkONERROR(gckCOMMAND_EnterCommit(command, gcvTRUE));
++ commitEntered = gcvTRUE;
++
++ /* Get the size of the flush command. */
++ gcmkONERROR(gckHARDWARE_Flush(Hardware,
++ gcvFLUSH_ALL,
++ gcvNULL,
++ &requested));
++
++ /* Reserve space in the command queue. */
++ gcmkONERROR(gckCOMMAND_Reserve(command,
++ requested,
++ &buffer,
++ &bytes));
++
++ /* Append a flush. */
++ gcmkONERROR(gckHARDWARE_Flush(
++ Hardware, gcvFLUSH_ALL, buffer, &bytes
++ ));
++
++ /* Execute the command queue. */
++ gcmkONERROR(gckCOMMAND_Execute(command, requested));
++
++ /* Release the command queue. */
++ gcmkONERROR(gckCOMMAND_ExitCommit(command, gcvTRUE));
++ commitEntered = gcvFALSE;
++
++ /* Wait to finish all commands. */
++ gcmkONERROR(gckCOMMAND_Stall(command, gcvTRUE));
++ }
++ }
++
++ /* Get time until stalled. */
++ gcmkPROFILE_QUERY(time, stallTime);
++
++ if (flag & gcvPOWER_FLAG_ACQUIRE)
++ {
++ /* Acquire the power management semaphore. */
++ gcmkONERROR(gckOS_AcquireSemaphore(os, command->powerSemaphore));
++ acquired = gcvTRUE;
++ }
++
++ if (flag & gcvPOWER_FLAG_STOP)
++ {
++ /* Stop the command parser. */
++ gcmkONERROR(gckCOMMAND_Stop(command, gcvFALSE));
++
++ /* Stop the Isr. */
++ if (Hardware->stopIsr)
++ {
++ gcmkONERROR(Hardware->stopIsr(Hardware->isrContext, Hardware->core));
++ }
++ }
++
++ /* Flush Cache before Power Off. */
++ if (flag & gcvPOWER_FLAG_POWER_OFF)
++ {
++ if (Hardware->clockState == gcvFALSE)
++ {
++ /* Turn off the GPU power. */
++ gcmkONERROR(
++ gckOS_SetGPUPower(os,
++ Hardware->core,
++ gcvTRUE,
++ gcvTRUE));
++
++ Hardware->clockState = gcvTRUE;
++
++ if (gckHARDWARE_IsFeatureAvailable(Hardware, gcvFEATURE_DYNAMIC_FREQUENCY_SCALING) != gcvTRUE)
++ {
++ /* Write the clock control register. */
++ gcmkONERROR(gckOS_WriteRegisterEx(os,
++ Hardware->core,
++ 0x00000,
++ clocks[0]));
++
++ /* Done loading the frequency scaler. */
++ gcmkONERROR(gckOS_WriteRegisterEx(os,
++ Hardware->core,
++ 0x00000,
++ ((((gctUINT32) (clocks[0])) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9)))));
++ }
++ }
++
++ gcmkONERROR(gckCOMMAND_Start(command));
++
++ gcmkONERROR(_FlushCache(Hardware, command));
++
++ gckOS_Delay(gcvNULL, 1);
++
++ /* Stop the command parser. */
++ gcmkONERROR(gckCOMMAND_Stop(command, gcvFALSE));
++
++ flag |= gcvPOWER_FLAG_CLOCK_OFF;
++ }
++
++ /* Get time until stopped. */
++ gcmkPROFILE_QUERY(time, stopTime);
++
++ /* Only process this when hardware is enabled. */
++ if (Hardware->clockState && Hardware->powerState
++ /* Don't touch clock control if dynamic frequency scaling is available. */
++ && gckHARDWARE_IsFeatureAvailable(Hardware, gcvFEATURE_DYNAMIC_FREQUENCY_SCALING) != gcvTRUE
++ )
++ {
++ if (flag & (gcvPOWER_FLAG_POWER_OFF | gcvPOWER_FLAG_CLOCK_OFF))
++ {
++ if (Hardware->identity.chipModel == gcv4000
++ && Hardware->identity.chipRevision == 0x5208)
++ {
++ clock &= ~2U;
++ }
++ }
++
++ /* Write the clock control register. */
++ gcmkONERROR(gckOS_WriteRegisterEx(os,
++ Hardware->core,
++ 0x00000,
++ clock));
++
++ /* Done loading the frequency scaler. */
++ gcmkONERROR(gckOS_WriteRegisterEx(os,
++ Hardware->core,
++ 0x00000,
++ ((((gctUINT32) (clock)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9)))));
++ }
++
++ if (flag & gcvPOWER_FLAG_DELAY)
++ {
++ /* Wait for the specified amount of time to settle coming back from
++ ** power-off or suspend state. */
++ gcmkONERROR(gckOS_Delay(os, gcdPOWER_CONTROL_DELAY));
++ }
++
++ /* Get time until delayed. */
++ gcmkPROFILE_QUERY(time, delayTime);
++
++ if (flag & gcvPOWER_FLAG_INITIALIZE)
++ {
++ /* Initialize hardware. */
++ gcmkONERROR(gckHARDWARE_InitializeHardware(Hardware));
++
++ gcmkONERROR(gckHARDWARE_SetFastClear(Hardware,
++ Hardware->allowFastClear,
++ Hardware->allowCompression));
++
++ /* Force the command queue to reload the next context. */
++ command->currContext = gcvNULL;
++
++ /* Need to config mmu after command start. */
++ configMmu = gcvTRUE;
++ }
++
++ /* Get time until initialized. */
++ gcmkPROFILE_QUERY(time, initTime);
++
++ if (flag & (gcvPOWER_FLAG_POWER_OFF | gcvPOWER_FLAG_CLOCK_OFF))
++ {
++ /* Turn off the GPU power. */
++ gcmkONERROR(
++ gckOS_SetGPUPower(os,
++ Hardware->core,
++ (flag & gcvPOWER_FLAG_CLOCK_OFF) ? gcvFALSE
++ : gcvTRUE,
++ (flag & gcvPOWER_FLAG_POWER_OFF) ? gcvFALSE
++ : gcvTRUE));
++
++ /* Save current hardware power and clock states. */
++ Hardware->clockState = (flag & gcvPOWER_FLAG_CLOCK_OFF) ? gcvFALSE
++ : gcvTRUE;
++ Hardware->powerState = (flag & gcvPOWER_FLAG_POWER_OFF) ? gcvFALSE
++ : gcvTRUE;
++ }
++
++ /* Get time until off. */
++ gcmkPROFILE_QUERY(time, offTime);
++
++ if (flag & gcvPOWER_FLAG_START)
++ {
++ /* Start the command processor. */
++ gcmkONERROR(gckCOMMAND_Start(command));
++ commandStarted = gcvTRUE;
++
++ if (Hardware->startIsr)
++ {
++ /* Start the Isr. */
++ gcmkONERROR(Hardware->startIsr(Hardware->isrContext, Hardware->core));
++ isrStarted = gcvTRUE;
++ }
++
++ /* Set NEW MMU. */
++ if (Hardware->mmuVersion != 0 && configMmu)
++ {
++ gcmkONERROR(
++ gckHARDWARE_SetMMUv2(
++ Hardware,
++ gcvTRUE,
++ Hardware->kernel->mmu->mtlbLogical,
++ gcvMMU_MODE_4K,
++ (gctUINT8_PTR)Hardware->kernel->mmu->mtlbLogical + gcdMMU_MTLB_SIZE,
++ gcvTRUE
++ ));
++ }
++ }
++
++ /* Get time until started. */
++ gcmkPROFILE_QUERY(time, startTime);
++
++ if (flag & gcvPOWER_FLAG_RELEASE)
++ {
++ /* Release the power management semaphore. */
++ gcmkONERROR(gckOS_ReleaseSemaphore(os, command->powerSemaphore));
++ acquired = gcvFALSE;
++
++ if (global)
++ {
++ /* Verify global semaphore has been acquired already before
++ ** we release it.
++ ** If it was acquired, gckOS_TryAcquireSemaphore will return
++ ** gcvSTATUS_TIMEOUT and we release it. Otherwise, global
++ ** semaphore will be acquried now, but it still is released
++ ** immediately. */
++ status = gckOS_TryAcquireSemaphore(os, Hardware->globalSemaphore);
++ if (status != gcvSTATUS_TIMEOUT)
++ {
++ gcmkONERROR(status);
++ }
++
++ /* Release the global semaphore. */
++ gcmkONERROR(gckOS_ReleaseSemaphore(os, Hardware->globalSemaphore));
++ globalAcquired = gcvFALSE;
++ }
++ }
++
++ /* Save the new power state. */
++ Hardware->chipPowerState = State;
++
++#if gcdDVFS
++ if (State == gcvPOWER_ON && Hardware->kernel->dvfs)
++ {
++ gckDVFS_Start(Hardware->kernel->dvfs);
++ }
++#endif
++
++#if gcdPOWEROFF_TIMEOUT
++ if (State == gcvPOWER_IDLE || State == gcvPOWER_SUSPEND)
++ {
++ gcmkONERROR(gckOS_GetTicks(&currentTime));
++
++ Hardware->powerOffTime = currentTime + Hardware->powerOffTimeout;
++ /* Start a timer to power off GPU when GPU enters IDLE or SUSPEND. */
++ gcmkVERIFY_OK(gckOS_StartTimer(os,
++ Hardware->powerOffTimer,
++ Hardware->powerOffTimeout));
++ }
++#endif
++
++ /* Release the power mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(os, Hardware->powerMutex));
++
++ /* Get total time. */
++ gcmkPROFILE_QUERY(time, totalTime);
++#if gcdENABLE_PROFILING
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "PROF(%llu): mutex:%llu on:%llu stall:%llu stop:%llu",
++ freq, mutexTime, onTime, stallTime, stopTime);
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ " delay:%llu init:%llu off:%llu start:%llu total:%llu",
++ delayTime, initTime, offTime, startTime, totalTime);
++#endif
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (commandStarted)
++ {
++ gcmkVERIFY_OK(gckCOMMAND_Stop(command, gcvFALSE));
++ }
++
++ if (isrStarted)
++ {
++ gcmkVERIFY_OK(Hardware->stopIsr(Hardware->isrContext, Hardware->core));
++ }
++
++ if (commitEntered)
++ {
++ /* Release the command queue mutex. */
++ gcmkVERIFY_OK(gckCOMMAND_ExitCommit(command, gcvTRUE));
++ }
++
++ if (acquired)
++ {
++ /* Release semaphore. */
++ gcmkVERIFY_OK(gckOS_ReleaseSemaphore(Hardware->os,
++ command->powerSemaphore));
++ }
++
++ if (globalAcquired)
++ {
++ gcmkVERIFY_OK(gckOS_ReleaseSemaphore(Hardware->os,
++ Hardware->globalSemaphore));
++ }
++
++ if (mutexAcquired)
++ {
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Hardware->os, Hardware->powerMutex));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_QueryPowerManagementState
++**
++** Get GPU power state.
++**
++** INPUT:
++**
++** gckHARDWARE Harwdare
++** Pointer to an gckHARDWARE object.
++**
++** gceCHIPPOWERSTATE* State
++** Power State.
++**
++*/
++gceSTATUS
++gckHARDWARE_QueryPowerManagementState(
++ IN gckHARDWARE Hardware,
++ OUT gceCHIPPOWERSTATE* State
++ )
++{
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(State != gcvNULL);
++
++ /* Return the statue. */
++ *State = Hardware->chipPowerState;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*State=%d", *State);
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_SetPowerManagement
++**
++** Configure GPU power management function.
++** Only used in driver initialization stage.
++**
++** INPUT:
++**
++** gckHARDWARE Harwdare
++** Pointer to an gckHARDWARE object.
++**
++** gctBOOL PowerManagement
++** Power Mangement State.
++**
++*/
++gceSTATUS
++gckHARDWARE_SetPowerManagement(
++ IN gckHARDWARE Hardware,
++ IN gctBOOL PowerManagement
++ )
++{
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ Hardware->powerManagement = PowerManagement;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_SetGpuProfiler
++**
++** Configure GPU profiler function.
++** Only used in driver initialization stage.
++**
++** INPUT:
++**
++** gckHARDWARE Harwdare
++** Pointer to an gckHARDWARE object.
++**
++** gctBOOL GpuProfiler
++** GOU Profiler State.
++**
++*/
++gceSTATUS
++gckHARDWARE_SetGpuProfiler(
++ IN gckHARDWARE Hardware,
++ IN gctBOOL GpuProfiler
++ )
++{
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ Hardware->gpuProfiler = GpuProfiler;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++#if gcdENABLE_FSCALE_VAL_ADJUST
++gceSTATUS
++gckHARDWARE_SetFscaleValue(
++ IN gckHARDWARE Hardware,
++ IN gctUINT32 FscaleValue
++ )
++{
++ gceSTATUS status;
++ gctUINT32 clock;
++ gctBOOL acquired = gcvFALSE;
++
++ gcmkHEADER_ARG("Hardware=0x%x FscaleValue=%d", Hardware, FscaleValue);
++
++ gcmkVERIFY_ARGUMENT(FscaleValue > 0 && FscaleValue <= 64);
++
++ gcmkONERROR(
++ gckOS_AcquireMutex(Hardware->os, Hardware->powerMutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++ Hardware->powerOnFscaleVal = FscaleValue;
++
++ if (Hardware->chipPowerState == gcvPOWER_ON)
++ {
++ gctUINT32 data;
++
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ Hardware->powerBaseAddress
++ + 0x00104,
++ &data));
++
++ /* Disable all clock gating. */
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ Hardware->powerBaseAddress
++ + 0x00104,
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1))))))) << (0 ? 2:2))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1))))))) << (0 ? 2:2)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:7) - (0 ? 7:7) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:7) - (0 ? 7:7) + 1))))))) << (0 ? 7:7))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 7:7) - (0 ? 7:7) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:7) - (0 ? 7:7) + 1))))))) << (0 ? 7:7)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 8:8) - (0 ? 8:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:8) - (0 ? 8:8) + 1))))))) << (0 ? 8:8))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 8:8) - (0 ? 8:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:8) - (0 ? 8:8) + 1))))))) << (0 ? 8:8)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 11:11) - (0 ? 11:11) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 11:11) - (0 ? 11:11) + 1))))))) << (0 ? 11:11))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 11:11) - (0 ? 11:11) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 11:11) - (0 ? 11:11) + 1))))))) << (0 ? 11:11)))));
++
++ clock = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 8:2) - (0 ? 8:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:2) - (0 ? 8:2) + 1))))))) << (0 ? 8:2))) | (((gctUINT32) ((gctUINT32) (FscaleValue) & ((gctUINT32) ((((1 ? 8:2) - (0 ? 8:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:2) - (0 ? 8:2) + 1))))))) << (0 ? 8:2)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9)));
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00000,
++ clock));
++
++ /* Done loading the frequency scaler. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00000,
++ ((((gctUINT32) (clock)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9)))));
++
++ /* Restore all clock gating. */
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ Hardware->powerBaseAddress
++ + 0x00104,
++ data));
++ }
++
++ gcmkVERIFY(gckOS_ReleaseMutex(Hardware->os, Hardware->powerMutex));
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ gcmkVERIFY(gckOS_ReleaseMutex(Hardware->os, Hardware->powerMutex));
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckHARDWARE_GetFscaleValue(
++ IN gckHARDWARE Hardware,
++ IN gctUINT * FscaleValue,
++ IN gctUINT * MinFscaleValue,
++ IN gctUINT * MaxFscaleValue
++ )
++{
++ *FscaleValue = Hardware->powerOnFscaleVal;
++ if ((gpu3DMinClock > 0) && (gpu3DMinClock <= 64) && (Hardware->core == gcvCORE_MAJOR))
++ *MinFscaleValue = gpu3DMinClock;
++ else
++ *MinFscaleValue = 1;
++ *MaxFscaleValue = 64;
++
++ return gcvSTATUS_OK;
++}
++
++#endif
++
++#if gcdPOWEROFF_TIMEOUT
++gceSTATUS
++gckHARDWARE_SetPowerOffTimeout(
++ IN gckHARDWARE Hardware,
++ IN gctUINT32 Timeout
++)
++{
++ gcmkHEADER_ARG("Hardware=0x%x Timeout=%d", Hardware, Timeout);
++
++ Hardware->powerOffTimeout = Timeout;
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++
++gceSTATUS
++gckHARDWARE_QueryPowerOffTimeout(
++ IN gckHARDWARE Hardware,
++ OUT gctUINT32* Timeout
++)
++{
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ *Timeout = Hardware->powerOffTimeout;
++
++ gcmkFOOTER_ARG("*Timeout=%d", *Timeout);
++ return gcvSTATUS_OK;
++}
++#endif
++
++gceSTATUS
++gckHARDWARE_QueryIdle(
++ IN gckHARDWARE Hardware,
++ OUT gctBOOL_PTR IsIdle
++ )
++{
++ gceSTATUS status;
++ gctUINT32 idle, address;
++
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(IsIdle != gcvNULL);
++
++ /* We are idle when the power is not ON. */
++ if (Hardware->chipPowerState != gcvPOWER_ON)
++ {
++ *IsIdle = gcvTRUE;
++ }
++
++ else
++ {
++ /* Read idle register. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00004, &idle));
++
++ /* Pipe must be idle. */
++ if (((((((gctUINT32) (idle)) >> (0 ? 1:1)) & ((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1)))))) ) != 1)
++ || ((((((gctUINT32) (idle)) >> (0 ? 3:3)) & ((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1)))))) ) != 1)
++ || ((((((gctUINT32) (idle)) >> (0 ? 4:4)) & ((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1)))))) ) != 1)
++ || ((((((gctUINT32) (idle)) >> (0 ? 5:5)) & ((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1)))))) ) != 1)
++ || ((((((gctUINT32) (idle)) >> (0 ? 6:6)) & ((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1)))))) ) != 1)
++ || ((((((gctUINT32) (idle)) >> (0 ? 7:7)) & ((gctUINT32) ((((1 ? 7:7) - (0 ? 7:7) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:7) - (0 ? 7:7) + 1)))))) ) != 1)
++ || ((((((gctUINT32) (idle)) >> (0 ? 2:2)) & ((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1)))))) ) != 1)
++ )
++ {
++ /* Something is busy. */
++ *IsIdle = gcvFALSE;
++ }
++
++ else
++ {
++ /* Read the current FE address. */
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00664,
++ &address));
++
++ /* Test if address is inside the last WAIT/LINK sequence. */
++ if ((address >= Hardware->lastWaitLink)
++ && (address <= Hardware->lastWaitLink + 16)
++ )
++ {
++ /* FE is in last WAIT/LINK and the pipe is idle. */
++ *IsIdle = gcvTRUE;
++ }
++ else
++ {
++ /* FE is not in WAIT/LINK yet. */
++ *IsIdle = gcvFALSE;
++ }
++ }
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++** Handy macros that will help in reading those debug registers.
++*/
++
++#define gcmkREAD_DEBUG_REGISTER(control, block, index, data) \
++ gcmkONERROR(\
++ gckOS_WriteRegisterEx(Hardware->os, \
++ Hardware->core, \
++ GC_DEBUG_CONTROL##control##_Address, \
++ gcmSETFIELD(0, \
++ GC_DEBUG_CONTROL##control, \
++ block, \
++ index))); \
++ gcmkONERROR(\
++ gckOS_ReadRegisterEx(Hardware->os, \
++ Hardware->core, \
++ GC_DEBUG_SIGNALS_##block##_Address, \
++ &profiler->data))
++
++#define gcmkREAD_DEBUG_REGISTER_N(control, block, index, data) \
++ gcmkONERROR(\
++ gckOS_WriteRegisterEx(Hardware->os, \
++ Hardware->core, \
++ GC_DEBUG_CONTROL##control##_Address, \
++ gcmSETFIELD(0, \
++ GC_DEBUG_CONTROL##control, \
++ block, \
++ index))); \
++ gcmkONERROR(\
++ gckOS_ReadRegisterEx(Hardware->os, \
++ Hardware->core, \
++ GC_DEBUG_SIGNALS_##block##_Address, \
++ &data))
++
++#define gcmkRESET_DEBUG_REGISTER(control, block) \
++ gcmkONERROR(\
++ gckOS_WriteRegisterEx(Hardware->os, \
++ Hardware->core, \
++ GC_DEBUG_CONTROL##control##_Address, \
++ gcmSETFIELD(0, \
++ GC_DEBUG_CONTROL##control, \
++ block, \
++ 15))); \
++ gcmkONERROR(\
++ gckOS_WriteRegisterEx(Hardware->os, \
++ Hardware->core, \
++ GC_DEBUG_CONTROL##control##_Address, \
++ gcmSETFIELD(0, \
++ GC_DEBUG_CONTROL##control, \
++ block, \
++ 0)))
++
++/*******************************************************************************
++**
++** gckHARDWARE_ProfileEngine2D
++**
++** Read the profile registers available in the 2D engine and sets them in the
++** profile. The function will also reset the pixelsRendered counter every time.
++**
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to an gckHARDWARE object.
++**
++** OPTIONAL gcs2D_PROFILE_PTR Profile
++** Pointer to a gcs2D_Profile structure.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckHARDWARE_ProfileEngine2D(
++ IN gckHARDWARE Hardware,
++ OPTIONAL gcs2D_PROFILE_PTR Profile
++ )
++{
++ gceSTATUS status;
++ gcs2D_PROFILE_PTR profiler = Profile;
++
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ if (Profile != gcvNULL)
++ {
++ /* Read the cycle count. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00438,
++ &Profile->cycleCount));
++
++ /* Read pixels rendered by 2D engine. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (11) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00454, &profiler->pixelsRendered));
++
++ /* Reset counter. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (15) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) ));
++gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16)))
++));
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++#if VIVANTE_PROFILER
++gceSTATUS
++gckHARDWARE_QueryProfileRegisters(
++ IN gckHARDWARE Hardware,
++ IN gctBOOL Reset,
++ OUT gcsPROFILER_COUNTERS * Counters
++ )
++{
++ gceSTATUS status;
++ gcsPROFILER_COUNTERS * profiler = Counters;
++ gctUINT i, clock;
++ gctUINT32 colorKilled, colorDrawn, depthKilled, depthDrawn;
++ gctUINT32 totalRead, totalWrite;
++
++ gcmkHEADER_ARG("Hardware=0x%x Counters=0x%x", Hardware, Counters);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ /* Read the counters. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00438,
++ &profiler->gpuCyclesCounter));
++
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00078,
++ &profiler->gpuTotalCyclesCounter));
++
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x0007C,
++ &profiler->gpuIdleCyclesCounter));
++
++
++ /* Read clock control register. */
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00000,
++ &clock));
++
++ profiler->gpuTotalRead64BytesPerFrame = 0;
++ profiler->gpuTotalWrite64BytesPerFrame = 0;
++ profiler->pe_pixel_count_killed_by_color_pipe = 0;
++ profiler->pe_pixel_count_killed_by_depth_pipe = 0;
++ profiler->pe_pixel_count_drawn_by_color_pipe = 0;
++ profiler->pe_pixel_count_drawn_by_depth_pipe = 0;
++
++ /* Walk through all avaiable pixel pipes. */
++ for (i = 0; i < Hardware->identity.pixelPipes; ++i)
++ {
++ /* Select proper pipe. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00000,
++ ((((gctUINT32) (clock)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:20) - (0 ? 23:20) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:20) - (0 ? 23:20) + 1))))))) << (0 ? 23:20))) | (((gctUINT32) ((gctUINT32) (i) & ((gctUINT32) ((((1 ? 23:20) - (0 ? 23:20) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:20) - (0 ? 23:20) + 1))))))) << (0 ? 23:20)))));
++
++ /* BW */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00040,
++ &totalRead));
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00044,
++ &totalWrite));
++
++ profiler->gpuTotalRead64BytesPerFrame += totalRead;
++ profiler->gpuTotalWrite64BytesPerFrame += totalWrite;
++
++ /* PE */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16)))));gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00454, &colorKilled));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16)))));gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00454, &depthKilled));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (2) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16)))));gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00454, &colorDrawn));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (3) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16)))));gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00454, &depthDrawn));
++
++ profiler->pe_pixel_count_killed_by_color_pipe += colorKilled;
++ profiler->pe_pixel_count_killed_by_depth_pipe += depthKilled;
++ profiler->pe_pixel_count_drawn_by_color_pipe += colorDrawn;
++ profiler->pe_pixel_count_drawn_by_depth_pipe += depthDrawn;
++ }
++
++ /* Reset clock control register. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00000,
++ clock));
++
++ if(Reset){
++ /* Reset counters. */
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x0003C, 1));
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x0003C, 0));
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00438, 0));
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00078, 0));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (15) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) ));
++gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16)))
++));
++ }
++
++ /* SH */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (7) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler->ps_inst_counter));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (8) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler->rendered_pixel_counter));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (9) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler->vs_inst_counter));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (10) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler->rendered_vertice_counter));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (11) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler->vtx_branch_inst_counter));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (12) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler->vtx_texld_inst_counter));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (13) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler->pxl_branch_inst_counter));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (14) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler->pxl_texld_inst_counter));
++ if(Reset){ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (15) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24)))
++));}
++
++ /* PA */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (3) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00460, &profiler->pa_input_vtx_counter));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (4) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00460, &profiler->pa_input_prim_counter));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (5) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00460, &profiler->pa_output_prim_counter));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (6) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00460, &profiler->pa_depth_clipped_counter));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (7) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00460, &profiler->pa_trivial_rejected_counter));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (8) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00460, &profiler->pa_culled_counter));
++ if(Reset){ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (15) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0)))
++));}
++
++ /* SE */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00464, &profiler->se_culled_triangle_count));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00464, &profiler->se_culled_lines_count));
++ if(Reset){ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) | (((gctUINT32) ((gctUINT32) (15) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) ));
++gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8)))
++));}
++
++ /* RA */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00448, &profiler->ra_valid_pixel_count));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00448, &profiler->ra_total_quad_count));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (2) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00448, &profiler->ra_valid_quad_count_after_early_z));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (3) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00448, &profiler->ra_total_primitive_count));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (9) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00448, &profiler->ra_pipe_cache_miss_counter));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (10) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00448, &profiler->ra_prefetch_cache_miss_counter));
++ if(Reset){ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (15) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) ));
++gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16)))
++));}
++
++ /* TX */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler->tx_total_bilinear_requests));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler->tx_total_trilinear_requests));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (2) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler->tx_total_discarded_texture_requests));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (3) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler->tx_total_texture_requests));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (5) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler->tx_mem_read_count));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (6) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler->tx_mem_read_in_8B_count));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (7) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler->tx_cache_miss_count));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (8) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler->tx_cache_hit_texel_count));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (9) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler->tx_cache_miss_texel_count));
++ if(Reset){ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (15) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24)))
++));}
++
++ /* MC */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00468, &profiler->mc_total_read_req_8B_from_pipeline));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (2) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00468, &profiler->mc_total_read_req_8B_from_IP));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (3) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00468, &profiler->mc_total_write_req_8B_from_pipeline));
++ if(Reset){ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (15) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0)))
++));}
++
++ /* HI */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0046C, &profiler->hi_axi_cycles_read_request_stalled));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0046C, &profiler->hi_axi_cycles_write_request_stalled));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) | (((gctUINT32) ((gctUINT32) (2) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0046C, &profiler->hi_axi_cycles_write_data_stalled));
++ if(Reset){ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) | (((gctUINT32) ((gctUINT32) (15) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) ));
++gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8)))
++));}
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++#endif
++
++#if VIVANTE_PROFILER_CONTEXT
++#define gcmkUPDATE_PROFILE_DATA(data) \
++ profilerHistroy->data += profiler->data
++
++gceSTATUS
++gckHARDWARE_QueryContextProfile(
++ IN gckHARDWARE Hardware,
++ IN gctBOOL Reset,
++ IN gckCONTEXT Context,
++ OUT gcsPROFILER_COUNTERS * Counters
++ )
++{
++ gceSTATUS status;
++ gckCOMMAND command = Hardware->kernel->command;
++ gcsPROFILER_COUNTERS * profiler = Counters;
++
++ gcmkHEADER_ARG("Hardware=0x%x Counters=0x%x", Hardware, Counters);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ /* Acquire the context sequnence mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(
++ command->os, command->mutexContextSeq, gcvINFINITE
++ ));
++
++ /* Read the counters. */
++ gcmkVERIFY_OK(gckOS_MemCopy(
++ profiler, &Context->histroyProfiler, gcmSIZEOF(gcsPROFILER_COUNTERS)
++ ));
++
++ if (Reset)
++ {
++ /* Reset counters. */
++ gcmkVERIFY_OK(gckOS_ZeroMemory(
++ &Context->histroyProfiler, gcmSIZEOF(gcsPROFILER_COUNTERS)
++ ));
++ }
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(
++ command->os, command->mutexContextSeq
++ ));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++
++gceSTATUS
++gckHARDWARE_UpdateContextProfile(
++ IN gckHARDWARE Hardware,
++ IN gckCONTEXT Context
++ )
++{
++ gceSTATUS status;
++ gcsPROFILER_COUNTERS * profiler = &Context->latestProfiler;
++ gcsPROFILER_COUNTERS * profilerHistroy = &Context->histroyProfiler;
++ gctUINT i, clock;
++ gctUINT32 colorKilled, colorDrawn, depthKilled, depthDrawn;
++ gctUINT32 totalRead, totalWrite;
++ gceCHIPMODEL chipModel;
++ gctUINT32 chipRevision;
++ gctUINT32 temp;
++ gctBOOL needResetShader = gcvFALSE;
++
++ gcmkHEADER_ARG("Hardware=0x%x Context=0x%x", Hardware, Context);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_OBJECT(Context, gcvOBJ_CONTEXT);
++
++ chipModel = Hardware->identity.chipModel;
++ chipRevision = Hardware->identity.chipRevision;
++ if (chipModel == gcv2000 || (chipModel == gcv2100 && chipRevision == 0x5118))
++ {
++ needResetShader = gcvTRUE;
++ }
++
++ /* Read the counters. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00438,
++ &profiler->gpuCyclesCounter));
++ gcmkUPDATE_PROFILE_DATA(gpuCyclesCounter);
++
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00078,
++ &profiler->gpuTotalCyclesCounter));
++ gcmkUPDATE_PROFILE_DATA(gpuTotalCyclesCounter);
++
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x0007C,
++ &profiler->gpuIdleCyclesCounter));
++ gcmkUPDATE_PROFILE_DATA(gpuIdleCyclesCounter);
++
++ /* Read clock control register. */
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00000,
++ &clock));
++
++ profiler->gpuTotalRead64BytesPerFrame = 0;
++ profiler->gpuTotalWrite64BytesPerFrame = 0;
++ profiler->pe_pixel_count_killed_by_color_pipe = 0;
++ profiler->pe_pixel_count_killed_by_depth_pipe = 0;
++ profiler->pe_pixel_count_drawn_by_color_pipe = 0;
++ profiler->pe_pixel_count_drawn_by_depth_pipe = 0;
++
++ /* Walk through all avaiable pixel pipes. */
++ for (i = 0; i < Hardware->identity.pixelPipes; ++i)
++ {
++ /* Select proper pipe. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00000,
++ ((((gctUINT32) (clock)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:20) - (0 ? 23:20) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:20) - (0 ? 23:20) + 1))))))) << (0 ? 23:20))) | (((gctUINT32) ((gctUINT32) (i) & ((gctUINT32) ((((1 ? 23:20) - (0 ? 23:20) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:20) - (0 ? 23:20) + 1))))))) << (0 ? 23:20)))));
++
++ /* BW */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00040,
++ &totalRead));
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00044,
++ &totalWrite));
++
++ profiler->gpuTotalRead64BytesPerFrame += totalRead;
++ profiler->gpuTotalWrite64BytesPerFrame += totalWrite;
++ gcmkUPDATE_PROFILE_DATA(gpuTotalRead64BytesPerFrame);
++ gcmkUPDATE_PROFILE_DATA(gpuTotalWrite64BytesPerFrame);
++
++ /* PE */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16)))));gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00454, &colorKilled));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16)))));gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00454, &depthKilled));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (2) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16)))));gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00454, &colorDrawn));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (3) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16)))));gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00454, &depthDrawn));
++
++ profiler->pe_pixel_count_killed_by_color_pipe += colorKilled;
++ profiler->pe_pixel_count_killed_by_depth_pipe += depthKilled;
++ profiler->pe_pixel_count_drawn_by_color_pipe += colorDrawn;
++ profiler->pe_pixel_count_drawn_by_depth_pipe += depthDrawn;
++ gcmkUPDATE_PROFILE_DATA(pe_pixel_count_killed_by_color_pipe);
++ gcmkUPDATE_PROFILE_DATA(pe_pixel_count_killed_by_depth_pipe);
++ gcmkUPDATE_PROFILE_DATA(pe_pixel_count_drawn_by_color_pipe);
++ gcmkUPDATE_PROFILE_DATA(pe_pixel_count_drawn_by_depth_pipe);
++ }
++
++ /* Reset clock control register. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00000,
++ clock));
++
++
++
++
++ /* Reset counters. */
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x0003C, 1));
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x0003C, 0));
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00438, 0));
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00078, 0));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (15) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) ));
++gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16)))
++));
++
++ /* SH */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (7) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler->ps_inst_counter));
++ if (needResetShader)
++ {
++ temp = profiler->ps_inst_counter;
++ profiler->ps_inst_counter -= Context->prevPSInstCount;
++ Context->prevPSInstCount = temp;
++ }
++ gcmkUPDATE_PROFILE_DATA(ps_inst_counter);
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (8) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler->rendered_pixel_counter));
++ if (needResetShader)
++ {
++ temp = profiler->rendered_pixel_counter;
++ profiler->rendered_pixel_counter -= Context->prevPSPixelCount;
++ Context->prevPSPixelCount = temp;
++ }
++ gcmkUPDATE_PROFILE_DATA(rendered_pixel_counter);
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (9) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler->vs_inst_counter));
++ if (needResetShader)
++ {
++ temp = profiler->vs_inst_counter;
++ profiler->vs_inst_counter -= Context->prevVSInstCount;
++ Context->prevVSInstCount = temp;
++ }
++ gcmkUPDATE_PROFILE_DATA(vs_inst_counter);
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (10) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler->rendered_vertice_counter));
++ if (needResetShader)
++ {
++ temp = profiler->rendered_vertice_counter;
++ profiler->rendered_vertice_counter -= Context->prevVSVertexCount;
++ Context->prevVSVertexCount = temp;
++ }
++ gcmkUPDATE_PROFILE_DATA(rendered_vertice_counter);
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (11) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler->vtx_branch_inst_counter));
++ if (needResetShader)
++ {
++ temp = profiler->vtx_branch_inst_counter;
++ profiler->vtx_branch_inst_counter -= Context->prevVSBranchInstCount;
++ Context->prevVSBranchInstCount = temp;
++ }
++ gcmkUPDATE_PROFILE_DATA(vtx_branch_inst_counter);
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (12) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler->vtx_texld_inst_counter));
++ if (needResetShader)
++ {
++ temp = profiler->vtx_texld_inst_counter;
++ profiler->vtx_texld_inst_counter -= Context->prevVSTexInstCount;
++ Context->prevVSTexInstCount = temp;
++ }
++ gcmkUPDATE_PROFILE_DATA(vtx_texld_inst_counter);
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (13) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler->pxl_branch_inst_counter));
++ if (needResetShader)
++ {
++ temp = profiler->pxl_branch_inst_counter;
++ profiler->pxl_branch_inst_counter -= Context->prevPSBranchInstCount;
++ Context->prevPSBranchInstCount = temp;
++ }
++ gcmkUPDATE_PROFILE_DATA(pxl_branch_inst_counter);
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (14) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler->pxl_texld_inst_counter));
++ if (needResetShader)
++ {
++ temp = profiler->pxl_texld_inst_counter;
++ profiler->pxl_texld_inst_counter -= Context->prevPSTexInstCount;
++ Context->prevPSTexInstCount = temp;
++ }
++ gcmkUPDATE_PROFILE_DATA(pxl_texld_inst_counter);
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (15) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24)))
++));
++
++ /* PA */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (3) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00460, &profiler->pa_input_vtx_counter));
++ gcmkUPDATE_PROFILE_DATA(pa_input_vtx_counter);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (4) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00460, &profiler->pa_input_prim_counter));
++ gcmkUPDATE_PROFILE_DATA(pa_input_prim_counter);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (5) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00460, &profiler->pa_output_prim_counter));
++ gcmkUPDATE_PROFILE_DATA(pa_output_prim_counter);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (6) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00460, &profiler->pa_depth_clipped_counter));
++ gcmkUPDATE_PROFILE_DATA(pa_depth_clipped_counter);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (7) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00460, &profiler->pa_trivial_rejected_counter));
++ gcmkUPDATE_PROFILE_DATA(pa_trivial_rejected_counter);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (8) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00460, &profiler->pa_culled_counter));
++ gcmkUPDATE_PROFILE_DATA(pa_culled_counter);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (15) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0)))
++));
++
++ /* SE */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00464, &profiler->se_culled_triangle_count));
++ gcmkUPDATE_PROFILE_DATA(se_culled_triangle_count);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00464, &profiler->se_culled_lines_count));
++ gcmkUPDATE_PROFILE_DATA(se_culled_lines_count);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) | (((gctUINT32) ((gctUINT32) (15) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) ));
++gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8)))
++));
++
++ /* RA */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00448, &profiler->ra_valid_pixel_count));
++ gcmkUPDATE_PROFILE_DATA(ra_valid_pixel_count);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00448, &profiler->ra_total_quad_count));
++ gcmkUPDATE_PROFILE_DATA(ra_total_quad_count);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (2) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00448, &profiler->ra_valid_quad_count_after_early_z));
++ gcmkUPDATE_PROFILE_DATA(ra_valid_quad_count_after_early_z);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (3) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00448, &profiler->ra_total_primitive_count));
++ gcmkUPDATE_PROFILE_DATA(ra_total_primitive_count);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (9) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00448, &profiler->ra_pipe_cache_miss_counter));
++ gcmkUPDATE_PROFILE_DATA(ra_pipe_cache_miss_counter);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (10) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00448, &profiler->ra_prefetch_cache_miss_counter));
++ gcmkUPDATE_PROFILE_DATA(ra_prefetch_cache_miss_counter);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (15) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) ));
++gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16)))
++));
++
++ /* TX */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler->tx_total_bilinear_requests));
++ gcmkUPDATE_PROFILE_DATA(tx_total_bilinear_requests);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler->tx_total_trilinear_requests));
++ gcmkUPDATE_PROFILE_DATA(tx_total_trilinear_requests);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (2) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler->tx_total_discarded_texture_requests));
++ gcmkUPDATE_PROFILE_DATA(tx_total_discarded_texture_requests);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (3) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler->tx_total_texture_requests));
++ gcmkUPDATE_PROFILE_DATA(tx_total_texture_requests);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (5) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler->tx_mem_read_count));
++ gcmkUPDATE_PROFILE_DATA(tx_mem_read_count);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (6) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler->tx_mem_read_in_8B_count));
++ gcmkUPDATE_PROFILE_DATA(tx_mem_read_in_8B_count);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (7) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler->tx_cache_miss_count));
++ gcmkUPDATE_PROFILE_DATA(tx_cache_miss_count);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (8) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler->tx_cache_hit_texel_count));
++ gcmkUPDATE_PROFILE_DATA(tx_cache_hit_texel_count);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (9) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler->tx_cache_miss_texel_count));
++ gcmkUPDATE_PROFILE_DATA(tx_cache_miss_texel_count);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (15) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24)))
++));
++
++ /* MC */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00468, &profiler->mc_total_read_req_8B_from_pipeline));
++ gcmkUPDATE_PROFILE_DATA(mc_total_read_req_8B_from_pipeline);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (2) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00468, &profiler->mc_total_read_req_8B_from_IP));
++ gcmkUPDATE_PROFILE_DATA(mc_total_read_req_8B_from_IP);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (3) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00468, &profiler->mc_total_write_req_8B_from_pipeline));
++ gcmkUPDATE_PROFILE_DATA(mc_total_write_req_8B_from_pipeline);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (15) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0)))
++));
++
++ /* HI */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0046C, &profiler->hi_axi_cycles_read_request_stalled));
++ gcmkUPDATE_PROFILE_DATA(hi_axi_cycles_read_request_stalled);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0046C, &profiler->hi_axi_cycles_write_request_stalled));
++ gcmkUPDATE_PROFILE_DATA(hi_axi_cycles_write_request_stalled);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) | (((gctUINT32) ((gctUINT32) (2) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0046C, &profiler->hi_axi_cycles_write_data_stalled));
++ gcmkUPDATE_PROFILE_DATA(hi_axi_cycles_write_data_stalled);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) | (((gctUINT32) ((gctUINT32) (15) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) ));
++gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8)))
++));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++#endif
++
++static gceSTATUS
++_ResetGPU(
++ IN gckHARDWARE Hardware,
++ IN gckOS Os,
++ IN gceCORE Core
++ )
++{
++ gctUINT32 control, idle;
++ gceSTATUS status;
++
++ for (;;)
++ {
++ /* Disable clock gating. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Os,
++ Core,
++ Hardware->powerBaseAddress +
++ 0x00104,
++ 0x00000000));
++
++ control = ((((gctUINT32) (0x01590880)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 17:17) - (0 ? 17:17) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 17:17) - (0 ? 17:17) + 1))))))) << (0 ? 17:17))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 17:17) - (0 ? 17:17) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 17:17) - (0 ? 17:17) + 1))))))) << (0 ? 17:17)));
++
++ /* Disable pulse-eater. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Os,
++ Core,
++ 0x0010C,
++ control));
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Os,
++ Core,
++ 0x0010C,
++ ((((gctUINT32) (control)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)))));
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Os,
++ Core,
++ 0x0010C,
++ control));
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Os,
++ Core,
++ 0x00000,
++ ((((gctUINT32) (0x00000900)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9)))));
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Os,
++ Core,
++ 0x00000,
++ 0x00000900));
++
++ /* Wait for clock being stable. */
++ gcmkONERROR(gckOS_Delay(Os, 1));
++
++ /* Isolate the GPU. */
++ control = ((((gctUINT32) (0x00000900)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 19:19) - (0 ? 19:19) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 19:19) - (0 ? 19:19) + 1))))))) << (0 ? 19:19))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 19:19) - (0 ? 19:19) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 19:19) - (0 ? 19:19) + 1))))))) << (0 ? 19:19)));
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Os,
++ Core,
++ 0x00000,
++ control));
++
++ /* Set soft reset. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Os,
++ Core,
++ 0x00000,
++ ((((gctUINT32) (control)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:12) - (0 ? 12:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:12) - (0 ? 12:12) + 1))))))) << (0 ? 12:12))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 12:12) - (0 ? 12:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:12) - (0 ? 12:12) + 1))))))) << (0 ? 12:12)))));
++
++ /* Wait for reset. */
++ gcmkONERROR(gckOS_Delay(Os, 1));
++
++ /* Reset soft reset bit. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Os,
++ Core,
++ 0x00000,
++ ((((gctUINT32) (control)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:12) - (0 ? 12:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:12) - (0 ? 12:12) + 1))))))) << (0 ? 12:12))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 12:12) - (0 ? 12:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:12) - (0 ? 12:12) + 1))))))) << (0 ? 12:12)))));
++
++ /* Reset GPU isolation. */
++ control = ((((gctUINT32) (control)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 19:19) - (0 ? 19:19) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 19:19) - (0 ? 19:19) + 1))))))) << (0 ? 19:19))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 19:19) - (0 ? 19:19) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 19:19) - (0 ? 19:19) + 1))))))) << (0 ? 19:19)));
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Os,
++ Core,
++ 0x00000,
++ control));
++
++ /* Read idle register. */
++ gcmkONERROR(gckOS_ReadRegisterEx(Os,
++ Core,
++ 0x00004,
++ &idle));
++
++ if ((((((gctUINT32) (idle)) >> (0 ? 0:0)) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1)))))) ) == 0)
++ {
++ continue;
++ }
++
++ /* Read reset register. */
++ gcmkONERROR(gckOS_ReadRegisterEx(Os,
++ Core,
++ 0x00000,
++ &control));
++
++ if (((((((gctUINT32) (control)) >> (0 ? 16:16)) & ((gctUINT32) ((((1 ? 16:16) - (0 ? 16:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 16:16) - (0 ? 16:16) + 1)))))) ) == 0)
++ || ((((((gctUINT32) (control)) >> (0 ? 17:17)) & ((gctUINT32) ((((1 ? 17:17) - (0 ? 17:17) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 17:17) - (0 ? 17:17) + 1)))))) ) == 0)
++ )
++ {
++ continue;
++ }
++
++ /* GPU is idle. */
++ break;
++ }
++
++ /* Success. */
++ return gcvSTATUS_OK;
++
++OnError:
++
++ /* Return the error. */
++ return status;
++}
++
++gceSTATUS
++gckHARDWARE_Reset(
++ IN gckHARDWARE Hardware
++ )
++{
++ gceSTATUS status;
++ gckCOMMAND command;
++ gctBOOL acquired = gcvFALSE;
++ gctBOOL mutexAcquired = gcvFALSE;
++ gctUINT32 process, thread;
++
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_OBJECT(Hardware->kernel, gcvOBJ_KERNEL);
++ command = Hardware->kernel->command;
++ gcmkVERIFY_OBJECT(command, gcvOBJ_COMMAND);
++
++ if (Hardware->identity.chipRevision < 0x4600)
++ {
++ /* Not supported - we need the isolation bit. */
++ gcmkONERROR(gcvSTATUS_NOT_SUPPORTED);
++ }
++
++ status = gckOS_AcquireMutex(Hardware->os, Hardware->powerMutex, 0);
++ if (status == gcvSTATUS_TIMEOUT)
++ {
++ gcmkONERROR(gckOS_GetProcessID(&process));
++ gcmkONERROR(gckOS_GetThreadID(&thread));
++
++ if ((Hardware->powerProcess == process)
++ && (Hardware->powerThread == thread))
++ {
++ /* No way to recovery from a error in power management. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++ }
++ else
++ {
++ mutexAcquired = gcvTRUE;
++ }
++
++ if (Hardware->chipPowerState == gcvPOWER_ON)
++ {
++ /* Acquire the power management semaphore. */
++ gcmkONERROR(
++ gckOS_AcquireSemaphore(Hardware->os, command->powerSemaphore));
++ acquired = gcvTRUE;
++ }
++
++ if ((Hardware->chipPowerState == gcvPOWER_ON)
++ || (Hardware->chipPowerState == gcvPOWER_IDLE)
++ )
++ {
++ /* Stop the command processor. */
++ gcmkONERROR(gckCOMMAND_Stop(command, gcvTRUE));
++ }
++
++ /* Stop isr, we will start it again when power on GPU. */
++ if (Hardware->stopIsr)
++ {
++ gcmkONERROR(Hardware->stopIsr(Hardware->isrContext, Hardware->core));
++ }
++
++ /* Hardware reset. */
++ status = gckOS_ResetGPU(Hardware->os, Hardware->core);
++
++ if (gcmIS_ERROR(status))
++ {
++ /* Soft reset. */
++ gcmkONERROR(_ResetGPU(Hardware, Hardware->os, Hardware->core));
++ }
++
++ /* Force an OFF to ON power switch. */
++ Hardware->chipPowerState = gcvPOWER_OFF;
++
++ gcmkONERROR(gckOS_ReleaseMutex(Hardware->os, Hardware->powerMutex));
++ mutexAcquired = gcvFALSE;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ /* Release the power management semaphore. */
++ gcmkVERIFY_OK(
++ gckOS_ReleaseSemaphore(Hardware->os, command->powerSemaphore));
++ }
++
++ if (mutexAcquired)
++ {
++ gckOS_ReleaseMutex(Hardware->os, Hardware->powerMutex);
++ }
++
++ /* Return the error. */
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckHARDWARE_GetBaseAddress(
++ IN gckHARDWARE Hardware,
++ OUT gctUINT32_PTR BaseAddress
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(BaseAddress != gcvNULL);
++
++ /* Test if we have a new Memory Controller. */
++ if (((((gctUINT32) (Hardware->identity.chipMinorFeatures)) >> (0 ? 22:22) & ((gctUINT32) ((((1 ? 22:22) - (0 ? 22:22) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 22:22) - (0 ? 22:22) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 22:22) - (0 ? 22:22) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 22:22) - (0 ? 22:22) + 1))))))))
++ {
++ /* No base address required. */
++ *BaseAddress = 0;
++ }
++ else
++ {
++ /* Get the base address from the OS. */
++ gcmkONERROR(gckOS_GetBaseAddress(Hardware->os, BaseAddress));
++ }
++
++ /* Success. */
++ gcmkFOOTER_ARG("*BaseAddress=0x%08x", *BaseAddress);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckHARDWARE_NeedBaseAddress(
++ IN gckHARDWARE Hardware,
++ IN gctUINT32 State,
++ OUT gctBOOL_PTR NeedBase
++ )
++{
++ gctBOOL need = gcvFALSE;
++
++ gcmkHEADER_ARG("Hardware=0x%x State=0x%08x", Hardware, State);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(NeedBase != gcvNULL);
++
++ /* Make sure this is a load state. */
++ if (((((gctUINT32) (State)) >> (0 ? 31:27) & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1)))))) == (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))))
++ {
++#ifndef VIVANTE_NO_3D
++ /* Get the state address. */
++ switch ((((((gctUINT32) (State)) >> (0 ? 15:0)) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1)))))) ))
++ {
++ case 0x0596:
++ case 0x0597:
++ case 0x0599:
++ case 0x059A:
++ case 0x05A9:
++ /* These states need a TRUE physical address. */
++ need = gcvTRUE;
++ break;
++ }
++#else
++ /* 2D addresses don't need a base address. */
++#endif
++ }
++
++ /* Return the flag. */
++ *NeedBase = need;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*NeedBase=%d", *NeedBase);
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckHARDWARE_SetIsrManager(
++ IN gckHARDWARE Hardware,
++ IN gctISRMANAGERFUNC StartIsr,
++ IN gctISRMANAGERFUNC StopIsr,
++ IN gctPOINTER Context
++ )
++{
++ gceSTATUS status = gcvSTATUS_OK;
++
++ gcmkHEADER_ARG("Hardware=0x%x, StartIsr=0x%x, StopIsr=0x%x, Context=0x%x",
++ Hardware, StartIsr, StopIsr, Context);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ if (StartIsr == gcvNULL ||
++ StopIsr == gcvNULL ||
++ Context == gcvNULL)
++ {
++ status = gcvSTATUS_INVALID_ARGUMENT;
++
++ gcmkFOOTER();
++ return status;
++ }
++
++ Hardware->startIsr = StartIsr;
++ Hardware->stopIsr = StopIsr;
++ Hardware->isrContext = Context;
++
++ /* Success. */
++ gcmkFOOTER();
++
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_Compose
++**
++** Start a composition.
++**
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to the gckHARDWARE object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckHARDWARE_Compose(
++ IN gckHARDWARE Hardware,
++ IN gctUINT32 ProcessID,
++ IN gctPHYS_ADDR Physical,
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Offset,
++ IN gctSIZE_T Size,
++ IN gctUINT8 EventID
++ )
++{
++#ifndef VIVANTE_NO_3D
++ gceSTATUS status;
++ gctUINT32_PTR triggerState;
++
++ gcmkHEADER_ARG("Hardware=0x%x Physical=0x%x Logical=0x%x"
++ " Offset=%d Size=%d EventID=%d",
++ Hardware, Physical, Logical, Offset, Size, EventID);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(((Size + 8) & 63) == 0);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++
++ /* Program the trigger state. */
++ triggerState = (gctUINT32_PTR) ((gctUINT8_PTR) Logical + Offset + Size);
++ triggerState[0] = 0x0C03;
++ triggerState[1]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:0) - (0 ? 1:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:0) - (0 ? 1:0) + 1))))))) << (0 ? 1:0))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 1:0) - (0 ? 1:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:0) - (0 ? 1:0) + 1))))))) << (0 ? 1:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 5:4) - (0 ? 5:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:4) - (0 ? 5:4) + 1))))))) << (0 ? 5:4))) | (((gctUINT32) (0x3 & ((gctUINT32) ((((1 ? 5:4) - (0 ? 5:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:4) - (0 ? 5:4) + 1))))))) << (0 ? 5:4)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 8:8) - (0 ? 8:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:8) - (0 ? 8:8) + 1))))))) << (0 ? 8:8))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 8:8) - (0 ? 8:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:8) - (0 ? 8:8) + 1))))))) << (0 ? 8:8)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 24:24) - (0 ? 24:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 24:24) - (0 ? 24:24) + 1))))))) << (0 ? 24:24))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 24:24) - (0 ? 24:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 24:24) - (0 ? 24:24) + 1))))))) << (0 ? 24:24)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:12) - (0 ? 12:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:12) - (0 ? 12:12) + 1))))))) << (0 ? 12:12))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 12:12) - (0 ? 12:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:12) - (0 ? 12:12) + 1))))))) << (0 ? 12:12)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 20:16) - (0 ? 20:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 20:16) - (0 ? 20:16) + 1))))))) << (0 ? 20:16))) | (((gctUINT32) ((gctUINT32) (EventID) & ((gctUINT32) ((((1 ? 20:16) - (0 ? 20:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 20:16) - (0 ? 20:16) + 1))))))) << (0 ? 20:16)))
++ ;
++
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ /* Flush the cache for the wait/link. */
++ gcmkONERROR(gckOS_CacheClean(
++ Hardware->os, ProcessID, gcvNULL,
++ Physical, Logical, Offset + Size
++ ));
++#endif
++
++ /* Start composition. */
++ gcmkONERROR(gckOS_WriteRegisterEx(
++ Hardware->os, Hardware->core, 0x00554,
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:0) - (0 ? 1:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:0) - (0 ? 1:0) + 1))))))) << (0 ? 1:0))) | (((gctUINT32) (0x3 & ((gctUINT32) ((((1 ? 1:0) - (0 ? 1:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:0) - (0 ? 1:0) + 1))))))) << (0 ? 1:0)))
++ ));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++#else
++ /* Return the status. */
++ return gcvSTATUS_NOT_SUPPORTED;
++#endif
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_IsFeatureAvailable
++**
++** Verifies whether the specified feature is available in hardware.
++**
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to an gckHARDWARE object.
++**
++** gceFEATURE Feature
++** Feature to be verified.
++*/
++gceSTATUS
++gckHARDWARE_IsFeatureAvailable(
++ IN gckHARDWARE Hardware,
++ IN gceFEATURE Feature
++ )
++{
++ gctBOOL available;
++
++ gcmkHEADER_ARG("Hardware=0x%x Feature=%d", Hardware, Feature);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ /* Only features needed by common kernel logic added here. */
++ switch (Feature)
++ {
++ case gcvFEATURE_END_EVENT:
++ /*available = gcmVERIFYFIELDVALUE(Hardware->identity.chipMinorFeatures2,
++ GC_MINOR_FEATURES2, END_EVENT, AVAILABLE
++ );*/
++ available = gcvFALSE;
++ break;
++ case gcvFEATURE_MC20:
++ available = ((((gctUINT32) (Hardware->identity.chipMinorFeatures)) >> (0 ? 22:22) & ((gctUINT32) ((((1 ? 22:22) - (0 ? 22:22) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 22:22) - (0 ? 22:22) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 22:22) - (0 ? 22:22) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 22:22) - (0 ? 22:22) + 1)))))));
++ break;
++ case gcvFEATURE_DYNAMIC_FREQUENCY_SCALING:
++ /* This feature doesn't apply for 2D cores. */
++ available = ((((gctUINT32) (Hardware->identity.chipMinorFeatures2)) >> (0 ? 14:14) & ((gctUINT32) ((((1 ? 14:14) - (0 ? 14:14) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 14:14) - (0 ? 14:14) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 14:14) - (0 ? 14:14) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 14:14) - (0 ? 14:14) + 1)))))))
++ && ((((gctUINT32) (Hardware->identity.chipFeatures)) >> (0 ? 2:2) & ((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1)))))));
++ break;
++
++ case gcvFEATURE_PIPE_2D:
++ available = ((((gctUINT32) (Hardware->identity.chipFeatures)) >> (0 ? 9:9) & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1)))))));
++ break;
++
++ case gcvFEATURE_PIPE_3D:
++#ifndef VIVANTE_NO_3D
++ available = ((((gctUINT32) (Hardware->identity.chipFeatures)) >> (0 ? 2:2) & ((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1)))))));
++#else
++ available = gcvFALSE;
++#endif
++ break;
++
++ case gcvFEATURE_HALTI2:
++ available = ((((gctUINT32) (Hardware->identity.chipMinorFeatures4)) >> (0 ? 16:16) & ((gctUINT32) ((((1 ? 16:16) - (0 ? 16:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 16:16) - (0 ? 16:16) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 16:16) - (0 ? 16:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 16:16) - (0 ? 16:16) + 1)))))));
++ break;
++
++ default:
++ gcmkFATAL("Invalid feature has been requested.");
++ available = gcvFALSE;
++ }
++
++ /* Return result. */
++ gcmkFOOTER_ARG("%d", available ? gcvSTATUS_TRUE : gcvSTATUS_OK);
++ return available ? gcvSTATUS_TRUE : gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_DumpMMUException
++**
++** Dump the MMU debug info on an MMU exception.
++**
++** INPUT:
++**
++** gckHARDWARE Harwdare
++** Pointer to an gckHARDWARE object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckHARDWARE_DumpMMUException(
++ IN gckHARDWARE Hardware
++ )
++{
++#if !gcdPOWER_SUSNPEND_WHEN_IDLE && !gcdPOWEROFF_TIMEOUT
++ gctUINT32 mmu, mmuStatus, address, i;
++#if gcdDEBUG
++ gctUINT32 mtlb, stlb, offset;
++#endif
++
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ gcmkPRINT("GPU[%d](ChipModel=0x%x ChipRevision=0x%x):\n",
++ Hardware->core,
++ Hardware->identity.chipModel,
++ Hardware->identity.chipRevision);
++
++ gcmkPRINT("**************************\n");
++ gcmkPRINT("*** MMU ERROR DUMP ***\n");
++ gcmkPRINT("**************************\n");
++
++ gcmkVERIFY_OK(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00188,
++ &mmuStatus));
++
++ gcmkPRINT(" MMU status = 0x%08X\n", mmuStatus);
++
++ for (i = 0; i < 4; i += 1)
++ {
++ mmu = mmuStatus & 0xF;
++ mmuStatus >>= 4;
++
++ if (mmu == 0)
++ {
++ continue;
++ }
++
++ switch (mmu)
++ {
++ case 1:
++ gcmkPRINT(" MMU%d: slave not present\n", i);
++ break;
++
++ case 2:
++ gcmkPRINT(" MMU%d: page not present\n", i);
++ break;
++
++ case 3:
++ gcmkPRINT(" MMU%d: write violation\n", i);
++ break;
++
++ default:
++ gcmkPRINT(" MMU%d: unknown state\n", i);
++ }
++
++ gcmkVERIFY_OK(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00190 + i * 4,
++ &address));
++
++ mtlb = (address & gcdMMU_MTLB_MASK) >> gcdMMU_MTLB_SHIFT;
++ stlb = (address & gcdMMU_STLB_4K_MASK) >> gcdMMU_STLB_4K_SHIFT;
++ offset = address & gcdMMU_OFFSET_4K_MASK;
++
++ gcmkPRINT(" MMU%d: exception address = 0x%08X\n", i, address);
++
++ gcmkPRINT(" MTLB entry = %d\n", mtlb);
++
++ gcmkPRINT(" STLB entry = %d\n", stlb);
++
++ gcmkPRINT(" Offset = 0x%08X (%d)\n", offset, offset);
++
++ gckMMU_DumpPageTableEntry(Hardware->kernel->mmu, address);
++
++ }
++
++ gcmkFOOTER_NO();
++#else
++ /* If clock could be off automatically, we can't read mmu debug
++ ** register here; build driver with gcdPOWER_SUSPEND_WHEN_IDLE = 0
++ ** and gcdPOWEROFF_TIMEOUT = 0 to make it safe to read mmu register. */
++ gcmkPRINT("[galcore] %s(%d): MMU Exception!", __FUNCTION__, __LINE__);
++#endif
++
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_DumpGPUState
++**
++** Dump the GPU debug registers.
++**
++** INPUT:
++**
++** gckHARDWARE Harwdare
++** Pointer to an gckHARDWARE object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckHARDWARE_DumpGPUState(
++ IN gckHARDWARE Hardware
++ )
++{
++ static gctCONST_STRING _cmdState[] =
++ {
++ "PAR_IDLE_ST", "PAR_DEC_ST", "PAR_ADR0_ST", "PAR_LOAD0_ST",
++ "PAR_ADR1_ST", "PAR_LOAD1_ST", "PAR_3DADR_ST", "PAR_3DCMD_ST",
++ "PAR_3DCNTL_ST", "PAR_3DIDXCNTL_ST", "PAR_INITREQDMA_ST",
++ "PAR_DRAWIDX_ST", "PAR_DRAW_ST", "PAR_2DRECT0_ST", "PAR_2DRECT1_ST",
++ "PAR_2DDATA0_ST", "PAR_2DDATA1_ST", "PAR_WAITFIFO_ST", "PAR_WAIT_ST",
++ "PAR_LINK_ST", "PAR_END_ST", "PAR_STALL_ST"
++ };
++
++ static gctCONST_STRING _cmdDmaState[] =
++ {
++ "CMD_IDLE_ST", "CMD_START_ST", "CMD_REQ_ST", "CMD_END_ST"
++ };
++
++ static gctCONST_STRING _cmdFetState[] =
++ {
++ "FET_IDLE_ST", "FET_RAMVALID_ST", "FET_VALID_ST"
++ };
++
++ static gctCONST_STRING _reqDmaState[] =
++ {
++ "REQ_IDLE_ST", "REQ_WAITIDX_ST", "REQ_CAL_ST"
++ };
++
++ static gctCONST_STRING _calState[] =
++ {
++ "CAL_IDLE_ST", "CAL_LDADR_ST", "CAL_IDXCALC_ST"
++ };
++
++ static gctCONST_STRING _veReqState[] =
++ {
++ "VER_IDLE_ST", "VER_CKCACHE_ST", "VER_MISS_ST"
++ };
++
++ static gcsiDEBUG_REGISTERS _dbgRegs[] =
++ {
++ { "RA", 0x474, 16, 0x448, 16, 0x12344321 },
++ { "TX", 0x474, 24, 0x44C, 16, 0x12211221 },
++ { "FE", 0x470, 0, 0x450, 16, 0xBABEF00D },
++ { "PE", 0x470, 16, 0x454, 16, 0xBABEF00D },
++ { "DE", 0x470, 8, 0x458, 16, 0xBABEF00D },
++ { "SH", 0x470, 24, 0x45C, 16, 0xDEADBEEF },
++ { "PA", 0x474, 0, 0x460, 16, 0x0000AAAA },
++ { "SE", 0x474, 8, 0x464, 16, 0x5E5E5E5E },
++ { "MC", 0x478, 0, 0x468, 16, 0x12345678 },
++ { "HI", 0x478, 8, 0x46C, 16, 0xAAAAAAAA }
++ };
++
++ static gctUINT32 _otherRegs[] =
++ {
++ 0x040, 0x044, 0x04C, 0x050, 0x054, 0x058, 0x05C, 0x060,
++ 0x43c, 0x440, 0x444, 0x414,
++ };
++
++ gceSTATUS status;
++ gckKERNEL kernel;
++ gctUINT32 idle, axi;
++ gctUINT32 dmaAddress1, dmaAddress2;
++ gctUINT32 dmaState1, dmaState2;
++ gctUINT32 dmaLow, dmaHigh;
++ gctUINT32 cmdState, cmdDmaState, cmdFetState;
++ gctUINT32 dmaReqState, calState, veReqState;
++ gctUINT i;
++ gctUINT pipe, pixelPipes;
++ gctUINT32 control, oldControl;
++ gckOS os = Hardware->os;
++ gceCORE core = Hardware->core;
++
++ gcmkHEADER_ARG("Hardware=0x%X", Hardware);
++
++ kernel = Hardware->kernel;
++
++ gcmkPRINT_N(12, "GPU[%d](ChipModel=0x%x ChipRevision=0x%x):\n",
++ core,
++ Hardware->identity.chipModel,
++ Hardware->identity.chipRevision);
++
++ pixelPipes = Hardware->identity.pixelPipes
++ ? Hardware->identity.pixelPipes
++ : 1;
++
++ /* Reset register values. */
++ idle = axi =
++ dmaState1 = dmaState2 =
++ dmaAddress1 = dmaAddress2 =
++ dmaLow = dmaHigh = 0;
++
++ /* Verify whether DMA is running. */
++ gcmkONERROR(_VerifyDMA(
++ os, core, &dmaAddress1, &dmaAddress2, &dmaState1, &dmaState2
++ ));
++
++ cmdState = dmaState2 & 0x1F;
++ cmdDmaState = (dmaState2 >> 8) & 0x03;
++ cmdFetState = (dmaState2 >> 10) & 0x03;
++ dmaReqState = (dmaState2 >> 12) & 0x03;
++ calState = (dmaState2 >> 14) & 0x03;
++ veReqState = (dmaState2 >> 16) & 0x03;
++
++ gcmkONERROR(gckOS_ReadRegisterEx(os, core, 0x004, &idle));
++ gcmkONERROR(gckOS_ReadRegisterEx(os, core, 0x00C, &axi));
++ gcmkONERROR(gckOS_ReadRegisterEx(os, core, 0x668, &dmaLow));
++ gcmkONERROR(gckOS_ReadRegisterEx(os, core, 0x66C, &dmaHigh));
++
++ gcmkPRINT_N(0, "**************************\n");
++ gcmkPRINT_N(0, "*** GPU STATE DUMP ***\n");
++ gcmkPRINT_N(0, "**************************\n");
++
++ gcmkPRINT_N(4, " axi = 0x%08X\n", axi);
++
++ gcmkPRINT_N(4, " idle = 0x%08X\n", idle);
++ if ((idle & 0x00000001) == 0) gcmkPRINT_N(0, " FE not idle\n");
++ if ((idle & 0x00000002) == 0) gcmkPRINT_N(0, " DE not idle\n");
++ if ((idle & 0x00000004) == 0) gcmkPRINT_N(0, " PE not idle\n");
++ if ((idle & 0x00000008) == 0) gcmkPRINT_N(0, " SH not idle\n");
++ if ((idle & 0x00000010) == 0) gcmkPRINT_N(0, " PA not idle\n");
++ if ((idle & 0x00000020) == 0) gcmkPRINT_N(0, " SE not idle\n");
++ if ((idle & 0x00000040) == 0) gcmkPRINT_N(0, " RA not idle\n");
++ if ((idle & 0x00000080) == 0) gcmkPRINT_N(0, " TX not idle\n");
++ if ((idle & 0x00000100) == 0) gcmkPRINT_N(0, " VG not idle\n");
++ if ((idle & 0x00000200) == 0) gcmkPRINT_N(0, " IM not idle\n");
++ if ((idle & 0x00000400) == 0) gcmkPRINT_N(0, " FP not idle\n");
++ if ((idle & 0x00000800) == 0) gcmkPRINT_N(0, " TS not idle\n");
++ if ((idle & 0x80000000) != 0) gcmkPRINT_N(0, " AXI low power mode\n");
++
++ if (
++ (dmaAddress1 == dmaAddress2)
++ && (dmaState1 == dmaState2)
++ )
++ {
++ gcmkPRINT_N(0, " DMA appears to be stuck at this address:\n");
++ gcmkPRINT_N(4, " 0x%08X\n", dmaAddress1);
++ }
++ else
++ {
++ if (dmaAddress1 == dmaAddress2)
++ {
++ gcmkPRINT_N(0, " DMA address is constant, but state is changing:\n");
++ gcmkPRINT_N(4, " 0x%08X\n", dmaState1);
++ gcmkPRINT_N(4, " 0x%08X\n", dmaState2);
++ }
++ else
++ {
++ gcmkPRINT_N(0, " DMA is running; known addresses are:\n");
++ gcmkPRINT_N(4, " 0x%08X\n", dmaAddress1);
++ gcmkPRINT_N(4, " 0x%08X\n", dmaAddress2);
++ }
++ }
++ gcmkPRINT_N(4, " dmaLow = 0x%08X\n", dmaLow);
++ gcmkPRINT_N(4, " dmaHigh = 0x%08X\n", dmaHigh);
++ gcmkPRINT_N(4, " dmaState = 0x%08X\n", dmaState2);
++ gcmkPRINT_N(8, " command state = %d (%s)\n", cmdState, _cmdState [cmdState]);
++ gcmkPRINT_N(8, " command DMA state = %d (%s)\n", cmdDmaState, _cmdDmaState[cmdDmaState]);
++ gcmkPRINT_N(8, " command fetch state = %d (%s)\n", cmdFetState, _cmdFetState[cmdFetState]);
++ gcmkPRINT_N(8, " DMA request state = %d (%s)\n", dmaReqState, _reqDmaState[dmaReqState]);
++ gcmkPRINT_N(8, " cal state = %d (%s)\n", calState, _calState [calState]);
++ gcmkPRINT_N(8, " VE request state = %d (%s)\n", veReqState, _veReqState [veReqState]);
++
++ /* Record control. */
++ gckOS_ReadRegisterEx(os, core, 0x0, &oldControl);
++
++ for (pipe = 0; pipe < pixelPipes; pipe++)
++ {
++ gcmkPRINT_N(4, " Debug registers of pipe[%d]:\n", pipe);
++
++ /* Switch pipe. */
++ gckOS_ReadRegisterEx(os, core, 0x0, &control);
++ control &= ~(0xF << 20);
++ control |= (pipe << 20);
++ gckOS_WriteRegisterEx(os, core, 0x0, control);
++
++ for (i = 0; i < gcmCOUNTOF(_dbgRegs); i += 1)
++ {
++ gcmkONERROR(_DumpDebugRegisters(os, core, &_dbgRegs[i]));
++ }
++
++ gcmkPRINT_N(0, " Other Registers:\n");
++ for (i = 0; i < gcmCOUNTOF(_otherRegs); i += 1)
++ {
++ gctUINT32 read;
++ gcmkONERROR(gckOS_ReadRegisterEx(os, core, _otherRegs[i], &read));
++ gcmkPRINT_N(12, " [0x%04X] 0x%08X\n", _otherRegs[i], read);
++ }
++ }
++
++ if (kernel->hardware->identity.chipFeatures & (1 << 4))
++ {
++ gctUINT32 read0, read1, write;
++
++ read0 = read1 = write = 0;
++
++ gcmkONERROR(gckOS_ReadRegisterEx(os, core, 0x43C, &read0));
++ gcmkONERROR(gckOS_ReadRegisterEx(os, core, 0x440, &read1));
++ gcmkONERROR(gckOS_ReadRegisterEx(os, core, 0x444, &write));
++
++ gcmkPRINT_N(4, " read0 = 0x%08X\n", read0);
++ gcmkPRINT_N(4, " read1 = 0x%08X\n", read1);
++ gcmkPRINT_N(4, " write = 0x%08X\n", write);
++ }
++
++ /* Restore control. */
++ gckOS_WriteRegisterEx(os, core, 0x0, oldControl);
++
++ /* dump stack. */
++ gckOS_DumpCallStack(os);
++
++OnError:
++
++ /* Return the error. */
++ gcmkFOOTER();
++ return status;
++}
++
++
++#if gcdFRAME_DB
++static gceSTATUS
++gckHARDWARE_ReadPerformanceRegister(
++ IN gckHARDWARE Hardware,
++ IN gctUINT PerformanceAddress,
++ IN gctUINT IndexAddress,
++ IN gctUINT IndexShift,
++ IN gctUINT Index,
++ OUT gctUINT32_PTR Value
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Hardware=0x%x PerformanceAddress=0x%x IndexAddress=0x%x "
++ "IndexShift=%u Index=%u",
++ Hardware, PerformanceAddress, IndexAddress, IndexShift,
++ Index);
++
++ /* Write the index. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ IndexAddress,
++ Index << IndexShift));
++
++ /* Read the register. */
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ PerformanceAddress,
++ Value));
++
++ /* Test for reset. */
++ if (Index == 15)
++ {
++ /* Index another register to get out of reset. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, IndexAddress, 0));
++ }
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Value=0x%x", *Value);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckHARDWARE_GetFrameInfo(
++ IN gckHARDWARE Hardware,
++ OUT gcsHAL_FRAME_INFO * FrameInfo
++ )
++{
++ gceSTATUS status;
++ gctUINT i, clock;
++ gcsHAL_FRAME_INFO info;
++#if gcdFRAME_DB_RESET
++ gctUINT reset;
++#endif
++
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Get profile tick. */
++ gcmkONERROR(gckOS_GetProfileTick(&info.ticks));
++
++ /* Read SH counters and reset them. */
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x0045C,
++ 0x00470,
++ 24,
++ 4,
++ &info.shaderCycles));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x0045C,
++ 0x00470,
++ 24,
++ 9,
++ &info.vsInstructionCount));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x0045C,
++ 0x00470,
++ 24,
++ 12,
++ &info.vsTextureCount));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x0045C,
++ 0x00470,
++ 24,
++ 7,
++ &info.psInstructionCount));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x0045C,
++ 0x00470,
++ 24,
++ 14,
++ &info.psTextureCount));
++#if gcdFRAME_DB_RESET
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x0045C,
++ 0x00470,
++ 24,
++ 15,
++ &reset));
++#endif
++
++ /* Read PA counters and reset them. */
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x00460,
++ 0x00474,
++ 0,
++ 3,
++ &info.vertexCount));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x00460,
++ 0x00474,
++ 0,
++ 4,
++ &info.primitiveCount));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x00460,
++ 0x00474,
++ 0,
++ 7,
++ &info.rejectedPrimitives));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x00460,
++ 0x00474,
++ 0,
++ 8,
++ &info.culledPrimitives));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x00460,
++ 0x00474,
++ 0,
++ 6,
++ &info.clippedPrimitives));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x00460,
++ 0x00474,
++ 0,
++ 5,
++ &info.outPrimitives));
++#if gcdFRAME_DB_RESET
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x00460,
++ 0x00474,
++ 0,
++ 15,
++ &reset));
++#endif
++
++ /* Read RA counters and reset them. */
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x00448,
++ 0x00474,
++ 16,
++ 3,
++ &info.inPrimitives));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x00448,
++ 0x00474,
++ 16,
++ 11,
++ &info.culledQuadCount));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x00448,
++ 0x00474,
++ 16,
++ 1,
++ &info.totalQuadCount));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x00448,
++ 0x00474,
++ 16,
++ 2,
++ &info.quadCount));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x00448,
++ 0x00474,
++ 16,
++ 0,
++ &info.totalPixelCount));
++#if gcdFRAME_DB_RESET
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x00448,
++ 0x00474,
++ 16,
++ 15,
++ &reset));
++#endif
++
++ /* Read TX counters and reset them. */
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x0044C,
++ 0x00474,
++ 24,
++ 0,
++ &info.bilinearRequests));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x0044C,
++ 0x00474,
++ 24,
++ 1,
++ &info.trilinearRequests));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x0044C,
++ 0x00474,
++ 24,
++ 8,
++ &info.txHitCount));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x0044C,
++ 0x00474,
++ 24,
++ 9,
++ &info.txMissCount));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x0044C,
++ 0x00474,
++ 24,
++ 6,
++ &info.txBytes8));
++#if gcdFRAME_DB_RESET
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x0044C,
++ 0x00474,
++ 24,
++ 15,
++ &reset));
++#endif
++
++ /* Read clock control register. */
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00000,
++ &clock));
++
++ /* Walk through all avaiable pixel pipes. */
++ for (i = 0; i < Hardware->identity.pixelPipes; ++i)
++ {
++ /* Select proper pipe. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00000,
++ ((((gctUINT32) (clock)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:20) - (0 ? 23:20) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:20) - (0 ? 23:20) + 1))))))) << (0 ? 23:20))) | (((gctUINT32) ((gctUINT32) (i) & ((gctUINT32) ((((1 ? 23:20) - (0 ? 23:20) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:20) - (0 ? 23:20) + 1))))))) << (0 ? 23:20)))));
++
++ /* Read cycle registers. */
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00078,
++ &info.cycles[i]));
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x0007C,
++ &info.idleCycles[i]));
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00438,
++ &info.mcCycles[i]));
++
++ /* Read bandwidth registers. */
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x0005C,
++ &info.readRequests[i]));
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00040,
++ &info.readBytes8[i]));
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00050,
++ &info.writeRequests[i]));
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00044,
++ &info.writeBytes8[i]));
++
++ /* Read PE counters. */
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x00454,
++ 0x00470,
++ 16,
++ 0,
++ &info.colorKilled[i]));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x00454,
++ 0x00470,
++ 16,
++ 2,
++ &info.colorDrawn[i]));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x00454,
++ 0x00470,
++ 16,
++ 1,
++ &info.depthKilled[i]));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x00454,
++ 0x00470,
++ 16,
++ 3,
++ &info.depthDrawn[i]));
++ }
++
++ /* Zero out remaning reserved counters. */
++ for (; i < 8; ++i)
++ {
++ info.readBytes8[i] = 0;
++ info.writeBytes8[i] = 0;
++ info.cycles[i] = 0;
++ info.idleCycles[i] = 0;
++ info.mcCycles[i] = 0;
++ info.readRequests[i] = 0;
++ info.writeRequests[i] = 0;
++ info.colorKilled[i] = 0;
++ info.colorDrawn[i] = 0;
++ info.depthKilled[i] = 0;
++ info.depthDrawn[i] = 0;
++ }
++
++ /* Reset clock control register. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00000,
++ clock));
++
++ /* Reset cycle and bandwidth counters. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x0003C,
++ 1));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x0003C,
++ 0));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00078,
++ 0));
++
++#if gcdFRAME_DB_RESET
++ /* Reset PE counters. */
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x00454,
++ 0x00470,
++ 16,
++ 15,
++ &reset));
++#endif
++
++ /* Copy to user. */
++ gcmkONERROR(gckOS_CopyToUserData(Hardware->os,
++ &info,
++ FrameInfo,
++ gcmSIZEOF(info)));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++#endif
++
++#if gcdDVFS
++#define READ_FROM_EATER1 0
++
++gceSTATUS
++gckHARDWARE_QueryLoad(
++ IN gckHARDWARE Hardware,
++ OUT gctUINT32 * Load
++ )
++{
++ gctUINT32 debug1;
++ gceSTATUS status;
++ gcmkHEADER_ARG("Hardware=0x%X", Hardware);
++
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(Load != gcvNULL);
++
++ gckOS_AcquireMutex(Hardware->os, Hardware->powerMutex, gcvINFINITE);
++
++ if (Hardware->chipPowerState == gcvPOWER_ON)
++ {
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00110,
++ Load));
++#if READ_FROM_EATER1
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00134,
++ Load));
++#endif
++
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00114,
++ &debug1));
++
++ /* Patch result of 0x110 with result of 0x114. */
++ if ((debug1 & 0xFF) == 1)
++ {
++ *Load &= ~0xFF;
++ *Load |= 1;
++ }
++
++ if (((debug1 & 0xFF00) >> 8) == 1)
++ {
++ *Load &= ~(0xFF << 8);
++ *Load |= 1 << 8;
++ }
++
++ if (((debug1 & 0xFF0000) >> 16) == 1)
++ {
++ *Load &= ~(0xFF << 16);
++ *Load |= 1 << 16;
++ }
++
++ if (((debug1 & 0xFF000000) >> 24) == 1)
++ {
++ *Load &= ~(0xFF << 24);
++ *Load |= 1 << 24;
++ }
++ }
++ else
++ {
++ status = gcvSTATUS_INVALID_REQUEST;
++ }
++
++OnError:
++
++ gckOS_ReleaseMutex(Hardware->os, Hardware->powerMutex);
++
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckHARDWARE_SetDVFSPeroid(
++ IN gckHARDWARE Hardware,
++ OUT gctUINT32 Frequency
++ )
++{
++ gceSTATUS status;
++ gctUINT32 period;
++ gctUINT32 eater;
++
++#if READ_FROM_EATER1
++ gctUINT32 period1;
++ gctUINT32 eater1;
++#endif
++
++ gcmkHEADER_ARG("Hardware=0x%X Frequency=%d", Hardware, Frequency);
++
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ period = 0;
++
++ while((64 << period) < (gcdDVFS_ANAYLSE_WINDOW * Frequency * 1000) )
++ {
++ period++;
++ }
++
++#if READ_FROM_EATER1
++ /*
++ * Peroid = F * 1000 * 1000 / (60 * 16 * 1024);
++ */
++ period1 = Frequency * 6250 / 6114;
++#endif
++
++ gckOS_AcquireMutex(Hardware->os, Hardware->powerMutex, gcvINFINITE);
++
++ if (Hardware->chipPowerState == gcvPOWER_ON)
++ {
++ /* Get current configure. */
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x0010C,
++ &eater));
++
++ /* Change peroid. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x0010C,
++ ((((gctUINT32) (eater)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) | (((gctUINT32) ((gctUINT32) (period) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8)))));
++
++#if READ_FROM_EATER1
++ /* Config eater1. */
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00130,
++ &eater1));
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00130,
++ ((((gctUINT32) (eater1)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:16) - (0 ? 31:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:16) - (0 ? 31:16) + 1))))))) << (0 ? 31:16))) | (((gctUINT32) ((gctUINT32) (period1) & ((gctUINT32) ((((1 ? 31:16) - (0 ? 31:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:16) - (0 ? 31:16) + 1))))))) << (0 ? 31:16)))));
++#endif
++ }
++ else
++ {
++ status = gcvSTATUS_INVALID_REQUEST;
++ }
++
++OnError:
++ gckOS_ReleaseMutex(Hardware->os, Hardware->powerMutex);
++
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckHARDWARE_InitDVFS(
++ IN gckHARDWARE Hardware
++ )
++{
++ gceSTATUS status;
++ gctUINT32 data;
++
++ gcmkHEADER_ARG("Hardware=0x%X", Hardware);
++
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x0010C,
++ &data));
++
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 16:16) - (0 ? 16:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 16:16) - (0 ? 16:16) + 1))))))) << (0 ? 16:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 16:16) - (0 ? 16:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 16:16) - (0 ? 16:16) + 1))))))) << (0 ? 16:16)));
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 18:18) - (0 ? 18:18) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 18:18) - (0 ? 18:18) + 1))))))) << (0 ? 18:18))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 18:18) - (0 ? 18:18) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 18:18) - (0 ? 18:18) + 1))))))) << (0 ? 18:18)));
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 19:19) - (0 ? 19:19) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 19:19) - (0 ? 19:19) + 1))))))) << (0 ? 19:19))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 19:19) - (0 ? 19:19) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 19:19) - (0 ? 19:19) + 1))))))) << (0 ? 19:19)));
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 20:20) - (0 ? 20:20) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 20:20) - (0 ? 20:20) + 1))))))) << (0 ? 20:20))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 20:20) - (0 ? 20:20) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 20:20) - (0 ? 20:20) + 1))))))) << (0 ? 20:20)));
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:23) - (0 ? 23:23) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:23) - (0 ? 23:23) + 1))))))) << (0 ? 23:23))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 23:23) - (0 ? 23:23) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:23) - (0 ? 23:23) + 1))))))) << (0 ? 23:23)));
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 22:22) - (0 ? 22:22) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 22:22) - (0 ? 22:22) + 1))))))) << (0 ? 22:22))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 22:22) - (0 ? 22:22) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 22:22) - (0 ? 22:22) + 1))))))) << (0 ? 22:22)));
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "DVFS Configure=0x%X",
++ data);
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x0010C,
++ data));
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++#endif
++
++
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v4/arch/XAQ2/hal/kernel/gc_hal_kernel_hardware.h linux-openelec/drivers/mxc/gpu-viv/v4/arch/XAQ2/hal/kernel/gc_hal_kernel_hardware.h
+--- linux-3.14.36/drivers/mxc/gpu-viv/v4/arch/XAQ2/hal/kernel/gc_hal_kernel_hardware.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v4/arch/XAQ2/hal/kernel/gc_hal_kernel_hardware.h 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,136 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_kernel_hardware_h_
++#define __gc_hal_kernel_hardware_h_
++
++#if gcdENABLE_VG
++#include "gc_hal_kernel_hardware_vg.h"
++#endif
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/* gckHARDWARE object. */
++struct _gckHARDWARE
++{
++ /* Object. */
++ gcsOBJECT object;
++
++ /* Pointer to gctKERNEL object. */
++ gckKERNEL kernel;
++
++ /* Pointer to gctOS object. */
++ gckOS os;
++
++ /* Core */
++ gceCORE core;
++
++ /* Chip characteristics. */
++ gcsHAL_QUERY_CHIP_IDENTITY identity;
++ gctBOOL allowFastClear;
++ gctBOOL allowCompression;
++ gctUINT32 powerBaseAddress;
++ gctBOOL extraEventStates;
++
++ /* Big endian */
++ gctBOOL bigEndian;
++
++ /* Chip status */
++ gctPOINTER powerMutex;
++ gctUINT32 powerProcess;
++ gctUINT32 powerThread;
++ gceCHIPPOWERSTATE chipPowerState;
++ gctUINT32 lastWaitLink;
++ gctBOOL clockState;
++ gctBOOL powerState;
++ gctPOINTER globalSemaphore;
++
++ gctISRMANAGERFUNC startIsr;
++ gctISRMANAGERFUNC stopIsr;
++ gctPOINTER isrContext;
++
++ gctUINT32 mmuVersion;
++
++ /* Type */
++ gceHARDWARE_TYPE type;
++
++#if gcdPOWEROFF_TIMEOUT
++ gctUINT32 powerOffTime;
++ gctUINT32 powerOffTimeout;
++ gctPOINTER powerOffTimer;
++#endif
++
++ gctPOINTER pageTableDirty;
++
++#if gcdENABLE_FSCALE_VAL_ADJUST
++ /* FSCALE_VAL when gcvPOWER_ON. */
++ gctUINT32 powerOnFscaleVal;
++#endif
++
++#if gcdLINK_QUEUE_SIZE
++ struct _gckLINKQUEUE linkQueue;
++#endif
++
++ gctBOOL powerManagement;
++ gctBOOL gpuProfiler;
++};
++
++gceSTATUS
++gckHARDWARE_GetBaseAddress(
++ IN gckHARDWARE Hardware,
++ OUT gctUINT32_PTR BaseAddress
++ );
++
++gceSTATUS
++gckHARDWARE_NeedBaseAddress(
++ IN gckHARDWARE Hardware,
++ IN gctUINT32 State,
++ OUT gctBOOL_PTR NeedBase
++ );
++
++gceSTATUS
++gckHARDWARE_GetFrameInfo(
++ IN gckHARDWARE Hardware,
++ OUT gcsHAL_FRAME_INFO * FrameInfo
++ );
++
++gceSTATUS
++gckHARDWARE_SetFscaleValue(
++ IN gckHARDWARE Hardware,
++ IN gctUINT32 FscaleValue
++ );
++
++gceSTATUS
++gckHARDWARE_GetFscaleValue(
++ IN gckHARDWARE Hardware,
++ IN gctUINT * FscaleValue,
++ IN gctUINT * MinFscaleValue,
++ IN gctUINT * MaxFscaleValue
++ );
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif /* __gc_hal_kernel_hardware_h_ */
++
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v4/config linux-openelec/drivers/mxc/gpu-viv/v4/config
+--- linux-3.14.36/drivers/mxc/gpu-viv/v4/config 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v4/config 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,38 @@
++##############################################################################
++#
++# Copyright (C) 2005 - 2013 by Vivante Corp.
++#
++# This program is free software; you can redistribute it and/or modify
++# it under the terms of the GNU General Public License as published by
++# the Free Software Foundation; either version 2 of the license, or
++# (at your option) any later version.
++#
++# This program is distributed in the hope that it will be useful,
++# but WITHOUT ANY WARRANTY; without even the implied warranty of
++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++# GNU General Public License for more details.
++#
++# You should have received a copy of the GNU General Public License
++# along with this program; if not write to the Free Software
++# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++#
++##############################################################################
++
++
++ARCH_TYPE ?= arm
++SDK_DIR ?= $(AQROOT)/build/sdk
++USE_3D_VG ?= 1
++FORCE_ALL_VIDEO_MEMORY_CACHED ?= 0
++NONPAGED_MEMORY_CACHEABLE ?= 0
++NONPAGED_MEMORY_BUFFERABLE ?= 1
++CACHE_FUNCTION_UNIMPLEMENTED ?= 0
++VIVANTE_ENABLE_VG ?= 1
++NO_USER_DIRECT_ACCESS_FROM_KERNEL ?= 1
++VIVANTE_NO_3D ?= 0
++ENABLE_OUTER_CACHE_PATCH ?= 1
++USE_BANK_ALIGNMENT ?= 1
++BANK_BIT_START ?= 13
++BANK_BIT_END ?= 15
++BANK_CHANNEL_BIT ?= 12
++ENABLE_GPU_CLOCK_BY_DRIVER = 1
++
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel.c linux-openelec/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel.c
+--- linux-3.14.36/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel.c 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,3967 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal_kernel_precomp.h"
++
++#define _GC_OBJ_ZONE gcvZONE_KERNEL
++
++/*******************************************************************************
++***** Version Signature *******************************************************/
++
++#define _gcmTXT2STR(t) #t
++#define gcmTXT2STR(t) _gcmTXT2STR(t)
++const char * _VERSION = "\n\0$VERSION$"
++ gcmTXT2STR(gcvVERSION_MAJOR) "."
++ gcmTXT2STR(gcvVERSION_MINOR) "."
++ gcmTXT2STR(gcvVERSION_PATCH) ":"
++ gcmTXT2STR(gcvVERSION_BUILD) "$\n";
++
++/******************************************************************************\
++******************************* gckKERNEL API Code ******************************
++\******************************************************************************/
++
++#if gcmIS_DEBUG(gcdDEBUG_TRACE)
++#define gcmDEFINE2TEXT(d) #d
++gctCONST_STRING _DispatchText[] =
++{
++ gcmDEFINE2TEXT(gcvHAL_QUERY_VIDEO_MEMORY),
++ gcmDEFINE2TEXT(gcvHAL_QUERY_CHIP_IDENTITY),
++ gcmDEFINE2TEXT(gcvHAL_ALLOCATE_NON_PAGED_MEMORY),
++ gcmDEFINE2TEXT(gcvHAL_FREE_NON_PAGED_MEMORY),
++ gcmDEFINE2TEXT(gcvHAL_ALLOCATE_CONTIGUOUS_MEMORY),
++ gcmDEFINE2TEXT(gcvHAL_FREE_CONTIGUOUS_MEMORY),
++ gcmDEFINE2TEXT(gcvHAL_ALLOCATE_VIDEO_MEMORY),
++ gcmDEFINE2TEXT(gcvHAL_ALLOCATE_LINEAR_VIDEO_MEMORY),
++ gcmDEFINE2TEXT(gcvHAL_FREE_VIDEO_MEMORY),
++ gcmDEFINE2TEXT(gcvHAL_MAP_MEMORY),
++ gcmDEFINE2TEXT(gcvHAL_UNMAP_MEMORY),
++ gcmDEFINE2TEXT(gcvHAL_MAP_USER_MEMORY),
++ gcmDEFINE2TEXT(gcvHAL_UNMAP_USER_MEMORY),
++ gcmDEFINE2TEXT(gcvHAL_LOCK_VIDEO_MEMORY),
++ gcmDEFINE2TEXT(gcvHAL_UNLOCK_VIDEO_MEMORY),
++ gcmDEFINE2TEXT(gcvHAL_EVENT_COMMIT),
++ gcmDEFINE2TEXT(gcvHAL_USER_SIGNAL),
++ gcmDEFINE2TEXT(gcvHAL_SIGNAL),
++ gcmDEFINE2TEXT(gcvHAL_WRITE_DATA),
++ gcmDEFINE2TEXT(gcvHAL_COMMIT),
++ gcmDEFINE2TEXT(gcvHAL_STALL),
++ gcmDEFINE2TEXT(gcvHAL_READ_REGISTER),
++ gcmDEFINE2TEXT(gcvHAL_WRITE_REGISTER),
++ gcmDEFINE2TEXT(gcvHAL_GET_PROFILE_SETTING),
++ gcmDEFINE2TEXT(gcvHAL_SET_PROFILE_SETTING),
++ gcmDEFINE2TEXT(gcvHAL_READ_ALL_PROFILE_REGISTERS),
++#if VIVANTE_PROFILER_PERDRAW
++ gcmDEFINE2TEXT(gcvHAL_READ_PROFILER_REGISTER_SETTING),
++#endif
++ gcmDEFINE2TEXT(gcvHAL_PROFILE_REGISTERS_2D),
++ gcmDEFINE2TEXT(gcvHAL_SET_POWER_MANAGEMENT_STATE),
++ gcmDEFINE2TEXT(gcvHAL_QUERY_POWER_MANAGEMENT_STATE),
++ gcmDEFINE2TEXT(gcvHAL_GET_BASE_ADDRESS),
++ gcmDEFINE2TEXT(gcvHAL_SET_IDLE),
++ gcmDEFINE2TEXT(gcvHAL_QUERY_KERNEL_SETTINGS),
++ gcmDEFINE2TEXT(gcvHAL_RESET),
++ gcmDEFINE2TEXT(gcvHAL_MAP_PHYSICAL),
++ gcmDEFINE2TEXT(gcvHAL_DEBUG),
++ gcmDEFINE2TEXT(gcvHAL_CACHE),
++ gcmDEFINE2TEXT(gcvHAL_TIMESTAMP),
++ gcmDEFINE2TEXT(gcvHAL_DATABASE),
++ gcmDEFINE2TEXT(gcvHAL_VERSION),
++ gcmDEFINE2TEXT(gcvHAL_CHIP_INFO),
++ gcmDEFINE2TEXT(gcvHAL_ATTACH),
++ gcmDEFINE2TEXT(gcvHAL_DETACH)
++};
++#endif
++
++#if gcdENABLE_RECOVERY
++void
++_ResetFinishFunction(
++ gctPOINTER Data
++ )
++{
++ gckKERNEL kernel = (gckKERNEL)Data;
++
++ gckOS_AtomSet(kernel->os, kernel->resetAtom, 0);
++}
++#endif
++
++/*******************************************************************************
++**
++** gckKERNEL_Construct
++**
++** Construct a new gckKERNEL object.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gceCORE Core
++** Specified core.
++**
++** IN gctPOINTER Context
++** Pointer to a driver defined context.
++**
++** IN gckDB SharedDB,
++** Pointer to a shared DB.
++**
++** OUTPUT:
++**
++** gckKERNEL * Kernel
++** Pointer to a variable that will hold the pointer to the gckKERNEL
++** object.
++*/
++
++gceSTATUS
++gckKERNEL_Construct(
++ IN gckOS Os,
++ IN gceCORE Core,
++ IN gctPOINTER Context,
++ IN gckDB SharedDB,
++ OUT gckKERNEL * Kernel
++ )
++{
++ gckKERNEL kernel = gcvNULL;
++ gceSTATUS status;
++ gctSIZE_T i;
++ gctPOINTER pointer = gcvNULL;
++
++ gcmkHEADER_ARG("Os=0x%x Context=0x%x", Os, Context);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Kernel != gcvNULL);
++
++ /* Allocate the gckKERNEL object. */
++ gcmkONERROR(gckOS_Allocate(Os,
++ gcmSIZEOF(struct _gckKERNEL),
++ &pointer));
++
++ kernel = pointer;
++
++ /* Zero the object pointers. */
++ kernel->hardware = gcvNULL;
++ kernel->command = gcvNULL;
++ kernel->eventObj = gcvNULL;
++ kernel->mmu = gcvNULL;
++#if gcdDVFS
++ kernel->dvfs = gcvNULL;
++#endif
++
++ /* Initialize the gckKERNEL object. */
++ kernel->object.type = gcvOBJ_KERNEL;
++ kernel->os = Os;
++ kernel->core = Core;
++
++
++ if (SharedDB == gcvNULL)
++ {
++ gcmkONERROR(gckOS_Allocate(Os,
++ gcmSIZEOF(struct _gckDB),
++ &pointer));
++
++ kernel->db = pointer;
++ kernel->dbCreated = gcvTRUE;
++ kernel->db->freeDatabase = gcvNULL;
++ kernel->db->freeRecord = gcvNULL;
++ kernel->db->dbMutex = gcvNULL;
++ kernel->db->lastDatabase = gcvNULL;
++ kernel->db->idleTime = 0;
++ kernel->db->lastIdle = 0;
++ kernel->db->lastSlowdown = 0;
++
++ for (i = 0; i < gcmCOUNTOF(kernel->db->db); ++i)
++ {
++ kernel->db->db[i] = gcvNULL;
++ }
++
++ /* Construct a database mutex. */
++ gcmkONERROR(gckOS_CreateMutex(Os, &kernel->db->dbMutex));
++
++ /* Construct a id-pointer database. */
++ gcmkONERROR(gckKERNEL_CreateIntegerDatabase(kernel, &kernel->db->pointerDatabase));
++
++ /* Construct a id-pointer database mutex. */
++ gcmkONERROR(gckOS_CreateMutex(Os, &kernel->db->pointerDatabaseMutex));
++ }
++ else
++ {
++ kernel->db = SharedDB;
++ kernel->dbCreated = gcvFALSE;
++ }
++
++ for (i = 0; i < gcmCOUNTOF(kernel->timers); ++i)
++ {
++ kernel->timers[i].startTime = 0;
++ kernel->timers[i].stopTime = 0;
++ }
++
++ kernel->timeOut = gcdGPU_TIMEOUT;
++
++ /* Save context. */
++ kernel->context = Context;
++
++#if gcdVIRTUAL_COMMAND_BUFFER
++ kernel->virtualBufferHead =
++ kernel->virtualBufferTail = gcvNULL;
++
++ gcmkONERROR(
++ gckOS_CreateMutex(Os, (gctPOINTER)&kernel->virtualBufferLock));
++#endif
++
++ /* Construct atom holding number of clients. */
++ kernel->atomClients = gcvNULL;
++ gcmkONERROR(gckOS_AtomConstruct(Os, &kernel->atomClients));
++
++#if gcdENABLE_VG
++ kernel->vg = gcvNULL;
++
++ if (Core == gcvCORE_VG)
++ {
++ /* Construct the gckMMU object. */
++ gcmkONERROR(
++ gckVGKERNEL_Construct(Os, Context, kernel, &kernel->vg));
++ }
++ else
++#endif
++ {
++ /* Construct the gckHARDWARE object. */
++ gcmkONERROR(
++ gckHARDWARE_Construct(Os, kernel->core, &kernel->hardware));
++
++ /* Set pointer to gckKERNEL object in gckHARDWARE object. */
++ kernel->hardware->kernel = kernel;
++
++ /* Initialize the hardware. */
++ gcmkONERROR(
++ gckHARDWARE_InitializeHardware(kernel->hardware));
++
++ /* Construct the gckCOMMAND object. */
++ gcmkONERROR(
++ gckCOMMAND_Construct(kernel, &kernel->command));
++
++ /* Construct the gckEVENT object. */
++ gcmkONERROR(
++ gckEVENT_Construct(kernel, &kernel->eventObj));
++
++ /* Construct the gckMMU object. */
++ gcmkONERROR(
++ gckMMU_Construct(kernel, gcdMMU_SIZE, &kernel->mmu));
++
++#if gcdENABLE_RECOVERY
++ gcmkONERROR(
++ gckOS_AtomConstruct(Os, &kernel->resetAtom));
++
++ gcmkVERIFY_OK(
++ gckOS_CreateTimer(Os,
++ (gctTIMERFUNCTION)_ResetFinishFunction,
++ (gctPOINTER)kernel,
++ &kernel->resetFlagClearTimer));
++ kernel->resetTimeStamp = 0;
++#endif
++
++#if gcdDVFS
++ if (gckHARDWARE_IsFeatureAvailable(kernel->hardware,
++ gcvFEATURE_DYNAMIC_FREQUENCY_SCALING))
++ {
++ gcmkONERROR(gckDVFS_Construct(kernel->hardware, &kernel->dvfs));
++ gcmkONERROR(gckDVFS_Start(kernel->dvfs));
++ }
++#endif
++ }
++
++ spin_lock_init(&kernel->irq_lock);
++
++#if VIVANTE_PROFILER
++ /* Initialize profile setting */
++ kernel->profileEnable = gcvFALSE;
++ kernel->profileCleanRegister = gcvTRUE;
++#endif
++
++#if gcdANDROID_NATIVE_FENCE_SYNC
++ gcmkONERROR(gckOS_CreateSyncTimeline(Os, &kernel->timeline));
++#endif
++
++ /* Return pointer to the gckKERNEL object. */
++ *Kernel = kernel;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Kernel=0x%x", *Kernel);
++ return gcvSTATUS_OK;
++
++OnError:
++ if (kernel != gcvNULL)
++ {
++#if gcdENABLE_VG
++ if (Core != gcvCORE_VG)
++#endif
++ {
++ if (kernel->eventObj != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckEVENT_Destroy(kernel->eventObj));
++ }
++
++ if (kernel->command != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckCOMMAND_Destroy(kernel->command));
++ }
++
++ if (kernel->hardware != gcvNULL)
++ {
++ /* Turn off the power. */
++ gcmkVERIFY_OK(gckOS_SetGPUPower(kernel->hardware->os,
++ kernel->hardware->core,
++ gcvFALSE,
++ gcvFALSE));
++ gcmkVERIFY_OK(gckHARDWARE_Destroy(kernel->hardware));
++ }
++ }
++
++ if (kernel->atomClients != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_AtomDestroy(Os, kernel->atomClients));
++ }
++
++#if gcdENABLE_RECOVERY
++ if (kernel->resetAtom != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_AtomDestroy(Os, kernel->resetAtom));
++ }
++
++ if (kernel->resetFlagClearTimer)
++ {
++ gcmkVERIFY_OK(gckOS_StopTimer(Os, kernel->resetFlagClearTimer));
++ gcmkVERIFY_OK(gckOS_DestroyTimer(Os, kernel->resetFlagClearTimer));
++ }
++#endif
++
++ if (kernel->dbCreated && kernel->db != gcvNULL)
++ {
++ if (kernel->db->dbMutex != gcvNULL)
++ {
++ /* Destroy the database mutex. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Os, kernel->db->dbMutex));
++ }
++
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Os, kernel->db));
++ }
++
++#if gcdVIRTUAL_COMMAND_BUFFER
++ if (kernel->virtualBufferLock != gcvNULL)
++ {
++ /* Destroy the virtual command buffer mutex. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Os, kernel->virtualBufferLock));
++ }
++#endif
++
++#if gcdDVFS
++ if (kernel->dvfs)
++ {
++ gcmkVERIFY_OK(gckDVFS_Stop(kernel->dvfs));
++ gcmkVERIFY_OK(gckDVFS_Destroy(kernel->dvfs));
++ }
++#endif
++
++#if gcdANDROID_NATIVE_FENCE_SYNC
++ if (kernel->timeline)
++ {
++ gcmkVERIFY_OK(gckOS_DestroySyncTimeline(Os, kernel->timeline));
++ }
++#endif
++
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Os, kernel));
++ }
++
++ /* Return the error. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckKERNEL_Destroy
++**
++** Destroy an gckKERNEL object.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object to destroy.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckKERNEL_Destroy(
++ IN gckKERNEL Kernel
++ )
++{
++ gctSIZE_T i;
++ gcsDATABASE_PTR database, databaseNext;
++ gcsDATABASE_RECORD_PTR record, recordNext;
++
++ gcmkHEADER_ARG("Kernel=0x%x", Kernel);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++#if QNX_SINGLE_THREADED_DEBUGGING
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Kernel->os, Kernel->debugMutex));
++#endif
++
++ /* Destroy the database. */
++ if (Kernel->dbCreated)
++ {
++ for (i = 0; i < gcmCOUNTOF(Kernel->db->db); ++i)
++ {
++ if (Kernel->db->db[i] != gcvNULL)
++ {
++ gcmkVERIFY_OK(
++ gckKERNEL_DestroyProcessDB(Kernel, Kernel->db->db[i]->processID));
++ }
++ }
++
++ /* Free all databases. */
++ for (database = Kernel->db->freeDatabase;
++ database != gcvNULL;
++ database = databaseNext)
++ {
++ databaseNext = database->next;
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Kernel->os, database));
++ }
++
++ if (Kernel->db->lastDatabase != gcvNULL)
++ {
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Kernel->os, Kernel->db->lastDatabase));
++ }
++
++ /* Free all database records. */
++ for (record = Kernel->db->freeRecord; record != gcvNULL; record = recordNext)
++ {
++ recordNext = record->next;
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Kernel->os, record));
++ }
++
++ /* Destroy the database mutex. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Kernel->os, Kernel->db->dbMutex));
++
++
++ /* Destroy id-pointer database. */
++ gcmkVERIFY_OK(gckKERNEL_DestroyIntegerDatabase(Kernel, Kernel->db->pointerDatabase));
++
++ /* Destroy id-pointer database mutex. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Kernel->os, Kernel->db->pointerDatabaseMutex));
++ }
++
++#if gcdENABLE_VG
++ if (Kernel->vg)
++ {
++ gcmkVERIFY_OK(gckVGKERNEL_Destroy(Kernel->vg));
++ }
++ else
++#endif
++ {
++ /* Destroy the gckMMU object. */
++ gcmkVERIFY_OK(gckMMU_Destroy(Kernel->mmu));
++
++ /* Destroy the gckCOMMNAND object. */
++ gcmkVERIFY_OK(gckCOMMAND_Destroy(Kernel->command));
++
++ /* Destroy the gckEVENT object. */
++ gcmkVERIFY_OK(gckEVENT_Destroy(Kernel->eventObj));
++
++ /* Destroy the gckHARDWARE object. */
++ gcmkVERIFY_OK(gckHARDWARE_Destroy(Kernel->hardware));
++
++#if gcdENABLE_RECOVERY
++ gcmkVERIFY_OK(gckOS_AtomDestroy(Kernel->os, Kernel->resetAtom));
++
++ if (Kernel->resetFlagClearTimer)
++ {
++ gcmkVERIFY_OK(gckOS_StopTimer(Kernel->os, Kernel->resetFlagClearTimer));
++ gcmkVERIFY_OK(gckOS_DestroyTimer(Kernel->os, Kernel->resetFlagClearTimer));
++ }
++#endif
++ }
++
++ /* Detsroy the client atom. */
++ gcmkVERIFY_OK(gckOS_AtomDestroy(Kernel->os, Kernel->atomClients));
++
++#if gcdVIRTUAL_COMMAND_BUFFER
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Kernel->os, Kernel->virtualBufferLock));
++#endif
++
++#if gcdDVFS
++ if (Kernel->dvfs)
++ {
++ gcmkVERIFY_OK(gckDVFS_Stop(Kernel->dvfs));
++ gcmkVERIFY_OK(gckDVFS_Destroy(Kernel->dvfs));
++ }
++#endif
++
++#if gcdANDROID_NATIVE_FENCE_SYNC
++ gcmkVERIFY_OK(gckOS_DestroySyncTimeline(Kernel->os, Kernel->timeline));
++#endif
++
++ /* Mark the gckKERNEL object as unknown. */
++ Kernel->object.type = gcvOBJ_UNKNOWN;
++
++ /* Free the gckKERNEL object. */
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Kernel->os, Kernel));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++#ifdef CONFIG_ANDROID_RESERVED_MEMORY_ACCOUNT
++#include <linux/kernel.h>
++#include <linux/mm.h>
++#include <linux/oom.h>
++#include <linux/sched.h>
++#include <linux/notifier.h>
++
++extern struct task_struct *lowmem_deathpending;
++static unsigned long lowmem_deathpending_timeout;
++
++static int force_contiguous_lowmem_shrink(IN gckKERNEL Kernel)
++{
++ struct task_struct *p;
++ struct task_struct *selected = NULL;
++ int tasksize;
++ int ret = -1;
++ int min_adj = 0;
++ int selected_tasksize = 0;
++ int selected_oom_adj;
++ /*
++ * If we already have a death outstanding, then
++ * bail out right away; indicating to vmscan
++ * that we have nothing further to offer on
++ * this pass.
++ *
++ */
++ if (lowmem_deathpending &&
++ time_before_eq(jiffies, lowmem_deathpending_timeout))
++ return 0;
++ selected_oom_adj = min_adj;
++
++ read_lock(&tasklist_lock);
++ for_each_process(p) {
++ struct mm_struct *mm;
++ struct signal_struct *sig;
++ gcuDATABASE_INFO info;
++ int oom_adj;
++
++ task_lock(p);
++ mm = p->mm;
++ sig = p->signal;
++ if (!mm || !sig) {
++ task_unlock(p);
++ continue;
++ }
++ oom_adj = sig->oom_adj;
++ if (oom_adj < min_adj) {
++ task_unlock(p);
++ continue;
++ }
++
++ tasksize = 0;
++ if (gckKERNEL_QueryProcessDB(Kernel, p->pid, gcvFALSE, gcvDB_VIDEO_MEMORY, &info) == gcvSTATUS_OK){
++ tasksize += info.counters.bytes / PAGE_SIZE;
++ }
++ if (gckKERNEL_QueryProcessDB(Kernel, p->pid, gcvFALSE, gcvDB_CONTIGUOUS, &info) == gcvSTATUS_OK){
++ tasksize += info.counters.bytes / PAGE_SIZE;
++ }
++
++ task_unlock(p);
++
++ if (tasksize <= 0)
++ continue;
++
++ gckOS_Print("<gpu> pid %d (%s), adj %d, size %d \n", p->pid, p->comm, oom_adj, tasksize);
++
++ if (selected) {
++ if (oom_adj < selected_oom_adj)
++ continue;
++ if (oom_adj == selected_oom_adj &&
++ tasksize <= selected_tasksize)
++ continue;
++ }
++ selected = p;
++ selected_tasksize = tasksize;
++ selected_oom_adj = oom_adj;
++ }
++ if (selected) {
++ gckOS_Print("<gpu> send sigkill to %d (%s), adj %d, size %d\n",
++ selected->pid, selected->comm,
++ selected_oom_adj, selected_tasksize);
++ lowmem_deathpending = selected;
++ lowmem_deathpending_timeout = jiffies + HZ;
++ force_sig(SIGKILL, selected);
++ ret = 0;
++ }
++ read_unlock(&tasklist_lock);
++ return ret;
++}
++
++#endif
++
++/*******************************************************************************
++**
++** _AllocateMemory
++**
++** Private function to walk all required memory pools to allocate the requested
++** amount of video memory.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gcsHAL_INTERFACE * Interface
++** Pointer to a gcsHAL_INTERFACE structure that defines the command to
++** be dispatched.
++**
++** OUTPUT:
++**
++** gcsHAL_INTERFACE * Interface
++** Pointer to a gcsHAL_INTERFACE structure that receives any data to be
++** returned.
++*/
++static gceSTATUS
++_AllocateMemory(
++ IN gckKERNEL Kernel,
++ IN OUT gcePOOL * Pool,
++ IN gctSIZE_T Bytes,
++ IN gctSIZE_T Alignment,
++ IN gceSURF_TYPE Type,
++ OUT gcuVIDMEM_NODE_PTR * Node
++ )
++{
++ gcePOOL pool;
++ gceSTATUS status;
++ gckVIDMEM videoMemory;
++ gctINT loopCount;
++ gcuVIDMEM_NODE_PTR node = gcvNULL;
++ gctBOOL tileStatusInVirtual;
++ gctBOOL forceContiguous = gcvFALSE;
++
++ gcmkHEADER_ARG("Kernel=0x%x *Pool=%d Bytes=%lu Alignment=%lu Type=%d",
++ Kernel, *Pool, Bytes, Alignment, Type);
++
++ gcmkVERIFY_ARGUMENT(Pool != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Bytes != 0);
++
++#ifdef CONFIG_ANDROID_RESERVED_MEMORY_ACCOUNT
++_AllocateMemory_Retry:
++#endif
++ /* Get initial pool. */
++ switch (pool = *Pool)
++ {
++ case gcvPOOL_DEFAULT_FORCE_CONTIGUOUS:
++ forceContiguous = gcvTRUE;
++ case gcvPOOL_DEFAULT:
++ case gcvPOOL_LOCAL:
++ pool = gcvPOOL_LOCAL_INTERNAL;
++ loopCount = (gctINT) gcvPOOL_NUMBER_OF_POOLS;
++ break;
++
++ case gcvPOOL_UNIFIED:
++ pool = gcvPOOL_SYSTEM;
++ loopCount = (gctINT) gcvPOOL_NUMBER_OF_POOLS;
++ break;
++
++ case gcvPOOL_CONTIGUOUS:
++ loopCount = (gctINT) gcvPOOL_NUMBER_OF_POOLS;
++ break;
++
++ case gcvPOOL_DEFAULT_FORCE_CONTIGUOUS_CACHEABLE:
++ pool = gcvPOOL_CONTIGUOUS;
++ loopCount = 1;
++ forceContiguous = gcvTRUE;
++ break;
++
++ default:
++ loopCount = 1;
++ break;
++ }
++
++ while (loopCount-- > 0)
++ {
++ if (pool == gcvPOOL_VIRTUAL)
++ {
++ /* Create a gcuVIDMEM_NODE for virtual memory. */
++ gcmkONERROR(
++ gckVIDMEM_ConstructVirtual(Kernel, gcvFALSE, Bytes, &node));
++
++ /* Success. */
++ break;
++ }
++
++ else
++ if (pool == gcvPOOL_CONTIGUOUS)
++ {
++#if gcdCONTIGUOUS_SIZE_LIMIT
++ if (Bytes > gcdCONTIGUOUS_SIZE_LIMIT && forceContiguous == gcvFALSE)
++ {
++ status = gcvSTATUS_OUT_OF_MEMORY;
++ }
++ else
++#endif
++ {
++ /* Create a gcuVIDMEM_NODE from contiguous memory. */
++ status = gckVIDMEM_ConstructVirtual(Kernel, gcvTRUE, Bytes, &node);
++ }
++
++ if (gcmIS_SUCCESS(status) || forceContiguous == gcvTRUE)
++ {
++ /* Memory allocated. */
++ if(node && forceContiguous == gcvTRUE)
++ {
++ gctUINT32 physAddr=0;
++ gctUINT32 baseAddress = 0;
++
++ gcmkONERROR(
++ gckOS_LockPages(Kernel->os,
++ node->Virtual.physical,
++ node->Virtual.bytes,
++ gcvFALSE,
++ &node->Virtual.logical,
++ &node->Virtual.pageCount));
++
++ /* Convert logical address into a physical address. */
++ gcmkONERROR(
++ gckOS_GetPhysicalAddress(Kernel->os,
++ node->Virtual.logical,
++ &physAddr));
++
++ gcmkONERROR(
++ gckOS_UnlockPages(Kernel->os,
++ node->Virtual.physical,
++ node->Virtual.bytes,
++ node->Virtual.logical));
++
++ gcmkONERROR(gckOS_GetBaseAddress(Kernel->os, &baseAddress));
++
++ gcmkASSERT(physAddr >= baseAddress);
++
++ /* Subtract baseAddress to get a GPU address used for programming. */
++ physAddr -= baseAddress;
++
++ if((physAddr & 0x80000000) || ((physAddr + Bytes) & 0x80000000))
++ {
++ gckOS_Print("gpu virtual memory 0x%x cannot be allocated in force contiguous request!\n", physAddr);
++
++ gcmkONERROR(gckVIDMEM_Free(node));
++
++ node = gcvNULL;
++ }
++ }
++
++ break;
++ }
++ }
++
++ else
++ {
++ /* Get pointer to gckVIDMEM object for pool. */
++#if gcdUSE_VIDMEM_PER_PID
++ gctUINT32 pid;
++ gckOS_GetProcessID(&pid);
++
++ status = gckKERNEL_GetVideoMemoryPoolPid(Kernel, pool, pid, &videoMemory);
++ if (status == gcvSTATUS_NOT_FOUND)
++ {
++ /* Create VidMem pool for this process. */
++ status = gckKERNEL_CreateVideoMemoryPoolPid(Kernel, pool, pid, &videoMemory);
++ }
++#else
++ status = gckKERNEL_GetVideoMemoryPool(Kernel, pool, &videoMemory);
++#endif
++
++ if (gcmIS_SUCCESS(status))
++ {
++ /* Allocate memory. */
++ status = gckVIDMEM_AllocateLinear(videoMemory,
++ Bytes,
++ Alignment,
++ Type,
++ &node);
++
++ if (gcmIS_SUCCESS(status))
++ {
++ /* Memory allocated. */
++ node->VidMem.pool = pool;
++ break;
++ }
++ }
++ }
++
++ if (pool == gcvPOOL_LOCAL_INTERNAL)
++ {
++ /* Advance to external memory. */
++ pool = gcvPOOL_LOCAL_EXTERNAL;
++ }
++
++ else
++ if (pool == gcvPOOL_LOCAL_EXTERNAL)
++ {
++ /* Advance to contiguous system memory. */
++ pool = gcvPOOL_SYSTEM;
++ }
++
++ else
++ if (pool == gcvPOOL_SYSTEM)
++ {
++ /* Advance to contiguous memory. */
++ pool = gcvPOOL_CONTIGUOUS;
++ }
++
++ else
++ if (pool == gcvPOOL_CONTIGUOUS)
++ {
++ tileStatusInVirtual =
++ gckHARDWARE_IsFeatureAvailable(Kernel->hardware,
++ gcvFEATURE_MC20);
++
++ if (Type == gcvSURF_TILE_STATUS && tileStatusInVirtual != gcvTRUE)
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ /* Advance to virtual memory. */
++ pool = gcvPOOL_VIRTUAL;
++ }
++
++ else
++ {
++ /* Out of pools. */
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++ }
++
++ if (node == gcvNULL)
++ {
++
++#ifdef CONFIG_ANDROID_RESERVED_MEMORY_ACCOUNT
++ if(forceContiguous == gcvTRUE)
++ {
++ if(force_contiguous_lowmem_shrink(Kernel) == 0)
++ {
++ /* Sleep 1 millisecond. */
++ gckOS_Delay(gcvNULL, 1);
++ goto _AllocateMemory_Retry;
++ }
++ }
++#endif
++ /* Nothing allocated. */
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ /* Return node and pool used for allocation. */
++ *Node = node;
++ *Pool = pool;
++
++ /* Return status. */
++ gcmkFOOTER_ARG("*Pool=%d *Node=0x%x", *Pool, *Node);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckKERNEL_Dispatch
++**
++** Dispatch a command received from the user HAL layer.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gctBOOL FromUser
++** whether the call is from the user space.
++**
++** gcsHAL_INTERFACE * Interface
++** Pointer to a gcsHAL_INTERFACE structure that defines the command to
++** be dispatched.
++**
++** OUTPUT:
++**
++** gcsHAL_INTERFACE * Interface
++** Pointer to a gcsHAL_INTERFACE structure that receives any data to be
++** returned.
++*/
++
++gceSTATUS
++gckKERNEL_Dispatch(
++ IN gckKERNEL Kernel,
++ IN gctBOOL FromUser,
++ IN OUT gcsHAL_INTERFACE * Interface
++ )
++{
++ gceSTATUS status = gcvSTATUS_OK;
++ gctSIZE_T bytes;
++ gcuVIDMEM_NODE_PTR node;
++ gctBOOL locked = gcvFALSE;
++ gctPHYS_ADDR physical = gcvNULL;
++ gctPOINTER logical = gcvNULL;
++ gctPOINTER info = gcvNULL;
++ gckCONTEXT context = gcvNULL;
++ gctUINT32 address;
++ gctUINT32 processID;
++ gckKERNEL kernel = Kernel;
++#if gcdSECURE_USER
++ gcskSECURE_CACHE_PTR cache;
++#endif
++ gctBOOL asynchronous;
++ gctPOINTER paddr = gcvNULL;
++#if !USE_NEW_LINUX_SIGNAL
++ gctSIGNAL signal;
++#endif
++ gceSURF_TYPE type;
++
++ gcmkHEADER_ARG("Kernel=0x%x FromUser=%d Interface=0x%x",
++ Kernel, FromUser, Interface);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(Interface != gcvNULL);
++
++#if gcmIS_DEBUG(gcdDEBUG_TRACE)
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_KERNEL,
++ "Dispatching command %d (%s)",
++ Interface->command, _DispatchText[Interface->command]);
++#endif
++#if QNX_SINGLE_THREADED_DEBUGGING
++ gckOS_AcquireMutex(Kernel->os, Kernel->debugMutex, gcvINFINITE);
++#endif
++
++ /* Get the current process ID. */
++ gcmkONERROR(gckOS_GetProcessID(&processID));
++
++#if gcdSECURE_USER
++ gcmkONERROR(gckKERNEL_GetProcessDBCache(Kernel, processID, &cache));
++#endif
++
++ /* Dispatch on command. */
++ switch (Interface->command)
++ {
++ case gcvHAL_GET_BASE_ADDRESS:
++ /* Get base address. */
++ gcmkONERROR(
++ gckOS_GetBaseAddress(Kernel->os,
++ &Interface->u.GetBaseAddress.baseAddress));
++ break;
++
++ case gcvHAL_QUERY_VIDEO_MEMORY:
++ /* Query video memory size. */
++ gcmkONERROR(gckKERNEL_QueryVideoMemory(Kernel, Interface));
++ break;
++
++ case gcvHAL_QUERY_CHIP_IDENTITY:
++ /* Query chip identity. */
++ gcmkONERROR(
++ gckHARDWARE_QueryChipIdentity(
++ Kernel->hardware,
++ &Interface->u.QueryChipIdentity));
++ break;
++
++ case gcvHAL_MAP_MEMORY:
++ physical = gcmINT2PTR(Interface->u.MapMemory.physical);
++
++ /* Map memory. */
++ gcmkONERROR(
++ gckKERNEL_MapMemory(Kernel,
++ physical,
++ (gctSIZE_T) Interface->u.MapMemory.bytes,
++ &logical));
++
++ Interface->u.MapMemory.logical = gcmPTR_TO_UINT64(logical);
++
++ gcmkVERIFY_OK(
++ gckKERNEL_AddProcessDB(Kernel,
++ processID, gcvDB_MAP_MEMORY,
++ logical,
++ physical,
++ (gctSIZE_T) Interface->u.MapMemory.bytes));
++ break;
++
++ case gcvHAL_UNMAP_MEMORY:
++ physical = gcmINT2PTR(Interface->u.UnmapMemory.physical);
++
++ /* Unmap memory. */
++ gcmkONERROR(
++ gckKERNEL_UnmapMemory(Kernel,
++ physical,
++ (gctSIZE_T) Interface->u.UnmapMemory.bytes,
++ gcmUINT64_TO_PTR(Interface->u.UnmapMemory.logical)));
++ gcmkVERIFY_OK(
++ gckKERNEL_RemoveProcessDB(Kernel,
++ processID, gcvDB_MAP_MEMORY,
++ gcmUINT64_TO_PTR(Interface->u.UnmapMemory.logical)));
++ break;
++
++ case gcvHAL_ALLOCATE_NON_PAGED_MEMORY:
++ bytes = (gctSIZE_T) Interface->u.AllocateNonPagedMemory.bytes;
++
++ /* Allocate non-paged memory. */
++ gcmkONERROR(
++ gckOS_AllocateNonPagedMemory(
++ Kernel->os,
++ FromUser,
++ &bytes,
++ &physical,
++ &logical));
++
++ Interface->u.AllocateNonPagedMemory.bytes = bytes;
++ Interface->u.AllocateNonPagedMemory.logical = gcmPTR_TO_UINT64(logical);
++ Interface->u.AllocateNonPagedMemory.physical = gcmPTR_TO_NAME(physical);
++
++ gcmkVERIFY_OK(
++ gckKERNEL_AddProcessDB(Kernel,
++ processID, gcvDB_NON_PAGED,
++ logical,
++ gcmINT2PTR(Interface->u.AllocateNonPagedMemory.physical),
++ bytes));
++
++ break;
++
++ case gcvHAL_ALLOCATE_VIRTUAL_COMMAND_BUFFER:
++#if gcdVIRTUAL_COMMAND_BUFFER
++ bytes = (gctSIZE_T) Interface->u.AllocateVirtualCommandBuffer.bytes;
++
++ gcmkONERROR(
++ gckKERNEL_AllocateVirtualCommandBuffer(
++ Kernel,
++ FromUser,
++ &bytes,
++ &physical,
++ &logical));
++
++ Interface->u.AllocateVirtualCommandBuffer.bytes = bytes;
++ Interface->u.AllocateVirtualCommandBuffer.logical = gcmPTR_TO_UINT64(logical);
++ Interface->u.AllocateVirtualCommandBuffer.physical = gcmPTR_TO_NAME(physical);
++
++ gcmkVERIFY_OK(
++ gckKERNEL_AddProcessDB(Kernel,
++ processID, gcvDB_COMMAND_BUFFER,
++ logical,
++ gcmINT2PTR(Interface->u.AllocateVirtualCommandBuffer.physical),
++ bytes));
++#else
++ status = gcvSTATUS_NOT_SUPPORTED;
++#endif
++ break;
++
++ case gcvHAL_FREE_NON_PAGED_MEMORY:
++ physical = gcmNAME_TO_PTR(Interface->u.FreeNonPagedMemory.physical);
++
++ /* Unmap user logical out of physical memory first. */
++ gcmkONERROR(gckOS_UnmapUserLogical(Kernel->os,
++ physical,
++ (gctSIZE_T) Interface->u.FreeNonPagedMemory.bytes,
++ gcmUINT64_TO_PTR(Interface->u.FreeNonPagedMemory.logical)));
++
++ /* Free non-paged memory. */
++ gcmkONERROR(
++ gckOS_FreeNonPagedMemory(Kernel->os,
++ (gctSIZE_T) Interface->u.FreeNonPagedMemory.bytes,
++ physical,
++ gcmUINT64_TO_PTR(Interface->u.FreeNonPagedMemory.logical)));
++
++ gcmkVERIFY_OK(
++ gckKERNEL_RemoveProcessDB(Kernel,
++ processID, gcvDB_NON_PAGED,
++ gcmUINT64_TO_PTR(Interface->u.FreeNonPagedMemory.logical)));
++
++#if gcdSECURE_USER
++ gcmkVERIFY_OK(gckKERNEL_FlushTranslationCache(
++ Kernel,
++ cache,
++ gcmUINT64_TO_PTR(Interface->u.FreeNonPagedMemory.logical),
++ Interface->u.FreeNonPagedMemory.bytes));
++#endif
++
++ gcmRELEASE_NAME(Interface->u.FreeNonPagedMemory.physical);
++
++ break;
++
++ case gcvHAL_ALLOCATE_CONTIGUOUS_MEMORY:
++ bytes = (gctSIZE_T) Interface->u.AllocateContiguousMemory.bytes;
++
++ /* Allocate contiguous memory. */
++ gcmkONERROR(gckOS_AllocateContiguous(
++ Kernel->os,
++ FromUser,
++ &bytes,
++ &physical,
++ &logical));
++
++ Interface->u.AllocateContiguousMemory.bytes = bytes;
++ Interface->u.AllocateContiguousMemory.logical = gcmPTR_TO_UINT64(logical);
++ Interface->u.AllocateContiguousMemory.physical = gcmPTR_TO_NAME(physical);
++
++ gcmkONERROR(gckHARDWARE_ConvertLogical(
++ Kernel->hardware,
++ gcmUINT64_TO_PTR(Interface->u.AllocateContiguousMemory.logical),
++ &Interface->u.AllocateContiguousMemory.address));
++
++ gcmkVERIFY_OK(gckKERNEL_AddProcessDB(
++ Kernel,
++ processID, gcvDB_CONTIGUOUS,
++ logical,
++ gcmINT2PTR(Interface->u.AllocateContiguousMemory.physical),
++ bytes));
++
++ break;
++
++ case gcvHAL_FREE_CONTIGUOUS_MEMORY:
++ physical = gcmNAME_TO_PTR(Interface->u.FreeContiguousMemory.physical);
++
++ /* Unmap user logical out of physical memory first. */
++ gcmkONERROR(gckOS_UnmapUserLogical(Kernel->os,
++ physical,
++ (gctSIZE_T) Interface->u.FreeContiguousMemory.bytes,
++ gcmUINT64_TO_PTR(Interface->u.FreeContiguousMemory.logical)));
++
++ /* Free contiguous memory. */
++ gcmkONERROR(
++ gckOS_FreeContiguous(Kernel->os,
++ physical,
++ gcmUINT64_TO_PTR(Interface->u.FreeContiguousMemory.logical),
++ (gctSIZE_T) Interface->u.FreeContiguousMemory.bytes));
++
++ gcmkVERIFY_OK(
++ gckKERNEL_RemoveProcessDB(Kernel,
++ processID, gcvDB_CONTIGUOUS,
++ gcmUINT64_TO_PTR(Interface->u.FreeNonPagedMemory.logical)));
++
++#if gcdSECURE_USER
++ gcmkVERIFY_OK(gckKERNEL_FlushTranslationCache(
++ Kernel,
++ cache,
++ gcmUINT64_TO_PTR(Interface->u.FreeContiguousMemory.logical),
++ Interface->u.FreeContiguousMemory.bytes));
++#endif
++
++ gcmRELEASE_NAME(Interface->u.FreeContiguousMemory.physical);
++
++ break;
++
++ case gcvHAL_ALLOCATE_VIDEO_MEMORY:
++
++ gcmkONERROR(gcvSTATUS_NOT_SUPPORTED);
++
++ break;
++
++ case gcvHAL_ALLOCATE_LINEAR_VIDEO_MEMORY:
++ type = Interface->u.AllocateLinearVideoMemory.type;
++
++ /* Allocate memory. */
++ gcmkONERROR(
++ _AllocateMemory(Kernel,
++ &Interface->u.AllocateLinearVideoMemory.pool,
++ Interface->u.AllocateLinearVideoMemory.bytes,
++ Interface->u.AllocateLinearVideoMemory.alignment,
++ Interface->u.AllocateLinearVideoMemory.type,
++ &node));
++
++ if (node->VidMem.memory->object.type == gcvOBJ_VIDMEM)
++ {
++ bytes = node->VidMem.bytes;
++ node->VidMem.type = type;
++
++ gcmkONERROR(
++ gckKERNEL_AddProcessDB(Kernel,
++ processID, gcvDB_VIDEO_MEMORY_RESERVED,
++ node,
++ gcvNULL,
++ bytes));
++ }
++ else
++ {
++ bytes = node->Virtual.bytes;
++ node->Virtual.type = type;
++
++ if(node->Virtual.contiguous)
++ {
++ gcmkONERROR(
++ gckKERNEL_AddProcessDB(Kernel,
++ processID, gcvDB_VIDEO_MEMORY_CONTIGUOUS,
++ node,
++ gcvNULL,
++ bytes));
++ }
++ else
++ {
++ gcmkONERROR(
++ gckKERNEL_AddProcessDB(Kernel,
++ processID, gcvDB_VIDEO_MEMORY_VIRTUAL,
++ node,
++ gcvNULL,
++ bytes));
++ }
++
++ }
++
++ gcmkONERROR(
++ gckKERNEL_AddProcessDB(Kernel,
++ processID, gcvDB_VIDEO_MEMORY,
++ node,
++ gcvNULL,
++ bytes));
++
++ /* Get the node. */
++ Interface->u.AllocateLinearVideoMemory.node = gcmPTR_TO_UINT64(node);
++ break;
++
++ case gcvHAL_FREE_VIDEO_MEMORY:
++ node = gcmUINT64_TO_PTR(Interface->u.FreeVideoMemory.node);
++#ifdef __QNXNTO__
++ if (node->VidMem.memory->object.type == gcvOBJ_VIDMEM
++ && node->VidMem.logical != gcvNULL)
++ {
++ gcmkONERROR(
++ gckKERNEL_UnmapVideoMemory(Kernel,
++ node->VidMem.logical,
++ processID,
++ node->VidMem.bytes));
++ node->VidMem.logical = gcvNULL;
++ }
++#endif
++ /* Free video memory. */
++ gcmkONERROR(
++ gckVIDMEM_Free(node));
++
++ gcmkONERROR(
++ gckKERNEL_RemoveProcessDB(Kernel,
++ processID, gcvDB_VIDEO_MEMORY,
++ node));
++
++ if (node->VidMem.memory->object.type == gcvOBJ_VIDMEM)
++ {
++ gcmkONERROR(
++ gckKERNEL_RemoveProcessDB(Kernel,
++ processID, gcvDB_VIDEO_MEMORY_RESERVED,
++ node));
++ }
++ else if(node->Virtual.contiguous)
++ {
++ gcmkONERROR(
++ gckKERNEL_RemoveProcessDB(Kernel,
++ processID, gcvDB_VIDEO_MEMORY_CONTIGUOUS,
++ node));
++ }
++ else
++ {
++ gcmkONERROR(
++ gckKERNEL_RemoveProcessDB(Kernel,
++ processID, gcvDB_VIDEO_MEMORY_VIRTUAL,
++ node));
++ }
++
++ break;
++
++ case gcvHAL_LOCK_VIDEO_MEMORY:
++ node = gcmUINT64_TO_PTR(Interface->u.LockVideoMemory.node);
++
++ /* Lock video memory. */
++ gcmkONERROR(
++ gckVIDMEM_Lock(Kernel,
++ node,
++ Interface->u.LockVideoMemory.cacheable,
++ &Interface->u.LockVideoMemory.address));
++
++ locked = gcvTRUE;
++
++ if (node->VidMem.memory->object.type == gcvOBJ_VIDMEM)
++ {
++ /* Map video memory address into user space. */
++#ifdef __QNXNTO__
++ if (node->VidMem.logical == gcvNULL)
++ {
++ gcmkONERROR(
++ gckKERNEL_MapVideoMemory(Kernel,
++ FromUser,
++ Interface->u.LockVideoMemory.address,
++ processID,
++ node->VidMem.bytes,
++ &node->VidMem.logical));
++ }
++ gcmkASSERT(node->VidMem.logical != gcvNULL);
++
++ Interface->u.LockVideoMemory.memory = gcmPTR_TO_UINT64(node->VidMem.logical);
++#else
++ gcmkONERROR(
++ gckKERNEL_MapVideoMemory(Kernel,
++ FromUser,
++ Interface->u.LockVideoMemory.address,
++ &logical));
++
++ Interface->u.LockVideoMemory.memory = gcmPTR_TO_UINT64(logical);
++#endif
++ }
++ else
++ {
++ Interface->u.LockVideoMemory.memory = gcmPTR_TO_UINT64(node->Virtual.logical);
++
++ /* Success. */
++ status = gcvSTATUS_OK;
++ }
++
++#if gcdSECURE_USER
++ /* Return logical address as physical address. */
++ Interface->u.LockVideoMemory.address =
++ Interface->u.LockVideoMemory.memory;
++#endif
++ gcmkONERROR(
++ gckKERNEL_AddProcessDB(Kernel,
++ processID, gcvDB_VIDEO_MEMORY_LOCKED,
++ node,
++ gcvNULL,
++ 0));
++
++ break;
++
++ case gcvHAL_UNLOCK_VIDEO_MEMORY:
++ /* Unlock video memory. */
++ node = gcmUINT64_TO_PTR(Interface->u.UnlockVideoMemory.node);
++
++#if gcdSECURE_USER
++ /* Save node information before it disappears. */
++ if (node->VidMem.memory->object.type == gcvOBJ_VIDMEM)
++ {
++ logical = gcvNULL;
++ bytes = 0;
++ }
++ else
++ {
++ logical = node->Virtual.logical;
++ bytes = node->Virtual.bytes;
++ }
++#endif
++
++ /* Unlock video memory. */
++ gcmkONERROR(
++ gckVIDMEM_Unlock(Kernel,
++ node,
++ Interface->u.UnlockVideoMemory.type,
++ &Interface->u.UnlockVideoMemory.asynchroneous));
++
++#if gcdSECURE_USER
++ /* Flush the translation cache for virtual surfaces. */
++ if (logical != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckKERNEL_FlushTranslationCache(Kernel,
++ cache,
++ logical,
++ bytes));
++ }
++#endif
++ if (Interface->u.UnlockVideoMemory.asynchroneous == gcvFALSE)
++ {
++ /* There isn't a event to unlock this node, remove record now */
++ gcmkONERROR(
++ gckKERNEL_RemoveProcessDB(Kernel,
++ processID, gcvDB_VIDEO_MEMORY_LOCKED,
++ node));
++ }
++ break;
++
++ case gcvHAL_EVENT_COMMIT:
++ /* Commit an event queue. */
++ gcmkONERROR(
++ gckEVENT_Commit(Kernel->eventObj,
++ gcmUINT64_TO_PTR(Interface->u.Event.queue)));
++ break;
++
++ case gcvHAL_COMMIT:
++ /* Commit a command and context buffer. */
++ gcmkONERROR(
++ gckCOMMAND_Commit(Kernel->command,
++ Interface->u.Commit.context ?
++ gcmNAME_TO_PTR(Interface->u.Commit.context) : gcvNULL,
++ gcmUINT64_TO_PTR(Interface->u.Commit.commandBuffer),
++ gcmUINT64_TO_PTR(Interface->u.Commit.delta),
++ gcmUINT64_TO_PTR(Interface->u.Commit.queue),
++ processID));
++ break;
++
++ case gcvHAL_STALL:
++ /* Stall the command queue. */
++ gcmkONERROR(gckCOMMAND_Stall(Kernel->command, gcvFALSE));
++ break;
++
++ case gcvHAL_MAP_USER_MEMORY:
++ /* Map user memory to DMA. */
++ gcmkONERROR(
++ gckOS_MapUserMemory(Kernel->os,
++ Kernel->core,
++ gcmUINT64_TO_PTR(Interface->u.MapUserMemory.memory),
++ Interface->u.MapUserMemory.physical,
++ (gctSIZE_T) Interface->u.MapUserMemory.size,
++ &info,
++ &Interface->u.MapUserMemory.address));
++
++ Interface->u.MapUserMemory.info = gcmPTR_TO_NAME(info);
++
++ gcmkVERIFY_OK(
++ gckKERNEL_AddProcessDB(Kernel,
++ processID, gcvDB_MAP_USER_MEMORY,
++ gcmINT2PTR(Interface->u.MapUserMemory.info),
++ gcmUINT64_TO_PTR(Interface->u.MapUserMemory.memory),
++ (gctSIZE_T) Interface->u.MapUserMemory.size));
++ break;
++
++ case gcvHAL_UNMAP_USER_MEMORY:
++ address = Interface->u.UnmapUserMemory.address;
++ info = gcmNAME_TO_PTR(Interface->u.UnmapUserMemory.info);
++
++ /* Unmap user memory. */
++ gcmkONERROR(
++ gckOS_UnmapUserMemory(Kernel->os,
++ Kernel->core,
++ gcmUINT64_TO_PTR(Interface->u.UnmapUserMemory.memory),
++ (gctSIZE_T) Interface->u.UnmapUserMemory.size,
++ info,
++ address));
++
++#if gcdSECURE_USER
++ gcmkVERIFY_OK(gckKERNEL_FlushTranslationCache(
++ Kernel,
++ cache,
++ gcmUINT64_TO_PTR(Interface->u.UnmapUserMemory.memory),
++ Interface->u.UnmapUserMemory.size));
++#endif
++ gcmkVERIFY_OK(
++ gckKERNEL_RemoveProcessDB(Kernel,
++ processID, gcvDB_MAP_USER_MEMORY,
++ gcmINT2PTR(Interface->u.UnmapUserMemory.info)));
++
++ gcmRELEASE_NAME(Interface->u.UnmapUserMemory.info);
++
++ break;
++
++#if !USE_NEW_LINUX_SIGNAL
++ case gcvHAL_USER_SIGNAL:
++ /* Dispatch depends on the user signal subcommands. */
++ switch(Interface->u.UserSignal.command)
++ {
++ case gcvUSER_SIGNAL_CREATE:
++ /* Create a signal used in the user space. */
++ gcmkONERROR(
++ gckOS_CreateUserSignal(Kernel->os,
++ Interface->u.UserSignal.manualReset,
++ &Interface->u.UserSignal.id));
++
++ gcmkVERIFY_OK(
++ gckKERNEL_AddProcessDB(Kernel,
++ processID, gcvDB_SIGNAL,
++ gcmINT2PTR(Interface->u.UserSignal.id),
++ gcvNULL,
++ 0));
++ break;
++
++ case gcvUSER_SIGNAL_DESTROY:
++ /* Destroy the signal. */
++ gcmkONERROR(
++ gckOS_DestroyUserSignal(Kernel->os,
++ Interface->u.UserSignal.id));
++
++ gcmkVERIFY_OK(gckKERNEL_RemoveProcessDB(
++ Kernel,
++ processID, gcvDB_SIGNAL,
++ gcmINT2PTR(Interface->u.UserSignal.id)));
++ break;
++
++ case gcvUSER_SIGNAL_SIGNAL:
++ /* Signal the signal. */
++ gcmkONERROR(
++ gckOS_SignalUserSignal(Kernel->os,
++ Interface->u.UserSignal.id,
++ Interface->u.UserSignal.state));
++ break;
++
++ case gcvUSER_SIGNAL_WAIT:
++#if gcdGPU_TIMEOUT
++ if (Interface->u.UserSignal.wait == gcvINFINITE)
++ {
++ gckHARDWARE hardware;
++ gctUINT32 timer = 0;
++
++ for(;;)
++ {
++ /* Wait on the signal. */
++ status = gckOS_WaitUserSignal(Kernel->os,
++ Interface->u.UserSignal.id,
++ gcdGPU_ADVANCETIMER);
++
++ if (status == gcvSTATUS_TIMEOUT)
++ {
++ gcmkONERROR(
++ gckOS_SignalQueryHardware(Kernel->os,
++ (gctSIGNAL)(gctUINTPTR_T)Interface->u.UserSignal.id,
++ &hardware));
++
++ if (hardware)
++ {
++ /* This signal is bound to a hardware,
++ ** so the timeout is limited by Kernel->timeOut.
++ */
++ timer += gcdGPU_ADVANCETIMER;
++ }
++
++ if (timer >= Kernel->timeOut)
++ {
++ gcmkONERROR(
++ gckOS_Broadcast(Kernel->os,
++ hardware,
++ gcvBROADCAST_GPU_STUCK));
++
++ timer = 0;
++
++ /* If a few process try to reset GPU, only one
++ ** of them can do the real reset, other processes
++ ** still need to wait for this signal is triggered,
++ ** which menas reset is finished.
++ */
++ continue;
++ }
++ }
++ else
++ {
++ /* Bail out on other error. */
++ gcmkONERROR(status);
++
++ /* Wait for signal successfully. */
++ break;
++ }
++ }
++ }
++ else
++#endif
++ {
++ /* Wait on the signal. */
++ status = gckOS_WaitUserSignal(Kernel->os,
++ Interface->u.UserSignal.id,
++ Interface->u.UserSignal.wait);
++ }
++
++ break;
++
++ case gcvUSER_SIGNAL_MAP:
++ gcmkONERROR(
++ gckOS_MapSignal(Kernel->os,
++ (gctSIGNAL)(gctUINTPTR_T)Interface->u.UserSignal.id,
++ (gctHANDLE)(gctUINTPTR_T)processID,
++ &signal));
++
++ gcmkVERIFY_OK(
++ gckKERNEL_AddProcessDB(Kernel,
++ processID, gcvDB_SIGNAL,
++ gcmINT2PTR(Interface->u.UserSignal.id),
++ gcvNULL,
++ 0));
++ break;
++
++ case gcvUSER_SIGNAL_UNMAP:
++ /* Destroy the signal. */
++ gcmkONERROR(
++ gckOS_DestroyUserSignal(Kernel->os,
++ Interface->u.UserSignal.id));
++
++ gcmkVERIFY_OK(gckKERNEL_RemoveProcessDB(
++ Kernel,
++ processID, gcvDB_SIGNAL,
++ gcmINT2PTR(Interface->u.UserSignal.id)));
++ break;
++
++ default:
++ /* Invalid user signal command. */
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++ break;
++#endif
++
++ case gcvHAL_SET_POWER_MANAGEMENT_STATE:
++ /* Set the power management state. */
++ gcmkONERROR(
++ gckHARDWARE_SetPowerManagementState(
++ Kernel->hardware,
++ Interface->u.SetPowerManagement.state));
++ break;
++
++ case gcvHAL_QUERY_POWER_MANAGEMENT_STATE:
++ /* Chip is not idle. */
++ Interface->u.QueryPowerManagement.isIdle = gcvFALSE;
++
++ /* Query the power management state. */
++ gcmkONERROR(gckHARDWARE_QueryPowerManagementState(
++ Kernel->hardware,
++ &Interface->u.QueryPowerManagement.state));
++
++ /* Query the idle state. */
++ gcmkONERROR(
++ gckHARDWARE_QueryIdle(Kernel->hardware,
++ &Interface->u.QueryPowerManagement.isIdle));
++ break;
++
++ case gcvHAL_READ_REGISTER:
++#if gcdREGISTER_ACCESS_FROM_USER
++ {
++ gceCHIPPOWERSTATE power;
++
++ gckOS_AcquireMutex(Kernel->os, Kernel->hardware->powerMutex, gcvINFINITE);
++ gcmkONERROR(gckHARDWARE_QueryPowerManagementState(Kernel->hardware,
++ &power));
++ if (power == gcvPOWER_ON)
++ {
++ /* Read a register. */
++ gcmkONERROR(gckOS_ReadRegisterEx(
++ Kernel->os,
++ Kernel->core,
++ Interface->u.ReadRegisterData.address,
++ &Interface->u.ReadRegisterData.data));
++ }
++ else
++ {
++ /* Chip is in power-state. */
++ Interface->u.ReadRegisterData.data = 0;
++ status = gcvSTATUS_CHIP_NOT_READY;
++ }
++ gcmkONERROR(gckOS_ReleaseMutex(Kernel->os, Kernel->hardware->powerMutex));
++ }
++#else
++ /* No access from user land to read registers. */
++ Interface->u.ReadRegisterData.data = 0;
++ status = gcvSTATUS_NOT_SUPPORTED;
++#endif
++ break;
++
++ case gcvHAL_WRITE_REGISTER:
++#if gcdREGISTER_ACCESS_FROM_USER
++ {
++ gceCHIPPOWERSTATE power;
++
++ gckOS_AcquireMutex(Kernel->os, Kernel->hardware->powerMutex, gcvINFINITE);
++ gcmkONERROR(gckHARDWARE_QueryPowerManagementState(Kernel->hardware,
++ &power));
++ if (power == gcvPOWER_ON)
++ {
++ /* Write a register. */
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Kernel->os,
++ Kernel->core,
++ Interface->u.WriteRegisterData.address,
++ Interface->u.WriteRegisterData.data));
++ }
++ else
++ {
++ /* Chip is in power-state. */
++ Interface->u.WriteRegisterData.data = 0;
++ status = gcvSTATUS_CHIP_NOT_READY;
++ }
++ gcmkONERROR(gckOS_ReleaseMutex(Kernel->os, Kernel->hardware->powerMutex));
++ }
++#else
++ /* No access from user land to write registers. */
++ status = gcvSTATUS_NOT_SUPPORTED;
++#endif
++ break;
++
++ case gcvHAL_READ_ALL_PROFILE_REGISTERS:
++#if VIVANTE_PROFILER && VIVANTE_PROFILER_CONTEXT
++ /* Read profile data according to the context. */
++ gcmkONERROR(
++ gckHARDWARE_QueryContextProfile(
++ Kernel->hardware,
++ Kernel->profileCleanRegister,
++ gcmNAME_TO_PTR(Interface->u.RegisterProfileData.context),
++ &Interface->u.RegisterProfileData.counters));
++#elif VIVANTE_PROFILER
++ /* Read all 3D profile registers. */
++ gcmkONERROR(
++ gckHARDWARE_QueryProfileRegisters(
++ Kernel->hardware,
++ Kernel->profileCleanRegister,
++ &Interface->u.RegisterProfileData.counters));
++#else
++ status = gcvSTATUS_OK;
++#endif
++ break;
++
++ case gcvHAL_PROFILE_REGISTERS_2D:
++#if VIVANTE_PROFILER
++ /* Read all 2D profile registers. */
++ gcmkONERROR(
++ gckHARDWARE_ProfileEngine2D(
++ Kernel->hardware,
++ gcmUINT64_TO_PTR(Interface->u.RegisterProfileData2D.hwProfile2D)));
++#else
++ status = gcvSTATUS_OK;
++#endif
++ break;
++
++ case gcvHAL_GET_PROFILE_SETTING:
++#if VIVANTE_PROFILER
++ /* Get profile setting */
++ Interface->u.GetProfileSetting.enable = Kernel->profileEnable;
++#endif
++
++ status = gcvSTATUS_OK;
++ break;
++ case gcvHAL_SET_PROFILE_SETTING:
++#if VIVANTE_PROFILER
++ /* Set profile setting */
++ if(Kernel->hardware->gpuProfiler)
++ Kernel->profileEnable = Interface->u.SetProfileSetting.enable;
++ else
++ {
++ status = gcvSTATUS_NOT_SUPPORTED;
++ break;
++ }
++#endif
++
++ status = gcvSTATUS_OK;
++ break;
++
++#if VIVANTE_PROFILER_PERDRAW
++ case gcvHAL_READ_PROFILER_REGISTER_SETTING:
++ #if VIVANTE_PROFILER
++ Kernel->profileCleanRegister = Interface->u.SetProfilerRegisterClear.bclear;
++ #endif
++ status = gcvSTATUS_OK;
++ break;
++#endif
++
++ case gcvHAL_QUERY_KERNEL_SETTINGS:
++ /* Get kernel settings. */
++ gcmkONERROR(
++ gckKERNEL_QuerySettings(Kernel,
++ &Interface->u.QueryKernelSettings.settings));
++ break;
++
++ case gcvHAL_RESET:
++ /* Reset the hardware. */
++ gckKERNEL_Recovery(Kernel);
++ break;
++
++ case gcvHAL_DEBUG:
++ /* Set debug level and zones. */
++ if (Interface->u.Debug.set)
++ {
++ gckOS_SetDebugLevel(Interface->u.Debug.level);
++ gckOS_SetDebugZones(Interface->u.Debug.zones,
++ Interface->u.Debug.enable);
++ }
++
++ if (Interface->u.Debug.message[0] != '\0')
++ {
++ /* Print a message to the debugger. */
++ if (Interface->u.Debug.type == gcvMESSAGE_TEXT)
++ {
++ gckOS_CopyPrint(Interface->u.Debug.message);
++ }
++ else
++ {
++ gckOS_DumpBuffer(Kernel->os,
++ Interface->u.Debug.message,
++ Interface->u.Debug.messageSize,
++ gceDUMP_BUFFER_FROM_USER,
++ gcvTRUE);
++ }
++ }
++ status = gcvSTATUS_OK;
++ break;
++
++ case gcvHAL_DUMP_GPU_STATE:
++ /* Dump GPU state */
++ {
++ gceCHIPPOWERSTATE power;
++ gcmkONERROR(gckHARDWARE_QueryPowerManagementState(Kernel->hardware,
++ &power));
++ if (power == gcvPOWER_ON)
++ {
++ Interface->u.ReadRegisterData.data = 1;
++ gcmkVERIFY_OK(
++ gckHARDWARE_DumpGPUState(Kernel->hardware));
++#if gcdVIRTUAL_COMMAND_BUFFER
++ gcmkVERIFY_OK(
++ gckCOMMAND_DumpExecutingBuffer(Kernel->command));
++#endif
++ }
++ else
++ {
++ Interface->u.ReadRegisterData.data = 0;
++ status = gcvSTATUS_CHIP_NOT_READY;
++ }
++ }
++ break;
++
++ case gcvHAL_DUMP_EVENT:
++ /* Dump GPU event */
++ gcmkVERIFY_OK(gckEVENT_Dump(Kernel->eventObj));
++
++ /* Dump Process DB. */
++ gcmkVERIFY_OK(gckKERNEL_DumpProcessDB(Kernel));
++ break;
++
++ case gcvHAL_CACHE:
++ node = gcmUINT64_TO_PTR(Interface->u.Cache.node);
++ if (node == gcvNULL)
++ {
++ /* FIXME Surface wrap some memory which is not allocated by us,
++ ** So we don't have physical address to handle outer cache, ignore it*/
++ status = gcvSTATUS_OK;
++ break;
++ }
++ else if (node->VidMem.memory->object.type == gcvOBJ_VIDMEM)
++ {
++ /* Video memory has no physical handles. */
++ physical = gcvNULL;
++ }
++ else
++ {
++ /* Grab physical handle. */
++ physical = node->Virtual.physical;
++ }
++
++ logical = gcmUINT64_TO_PTR(Interface->u.Cache.logical);
++ bytes = (gctSIZE_T) Interface->u.Cache.bytes;
++ switch(Interface->u.Cache.operation)
++ {
++ case gcvCACHE_FLUSH:
++ /* Clean and invalidate the cache. */
++ status = gckOS_CacheFlush(Kernel->os,
++ processID,
++ physical,
++ paddr,
++ logical,
++ bytes);
++ break;
++ case gcvCACHE_CLEAN:
++ /* Clean the cache. */
++ status = gckOS_CacheClean(Kernel->os,
++ processID,
++ physical,
++ paddr,
++ logical,
++ bytes);
++ break;
++ case gcvCACHE_INVALIDATE:
++ /* Invalidate the cache. */
++ status = gckOS_CacheInvalidate(Kernel->os,
++ processID,
++ physical,
++ paddr,
++ logical,
++ bytes);
++ break;
++
++ case gcvCACHE_MEMORY_BARRIER:
++ status = gckOS_MemoryBarrier(Kernel->os,
++ logical);
++ break;
++ default:
++ status = gcvSTATUS_INVALID_ARGUMENT;
++ break;
++ }
++ break;
++
++ case gcvHAL_TIMESTAMP:
++ /* Check for invalid timer. */
++ if ((Interface->u.TimeStamp.timer >= gcmCOUNTOF(Kernel->timers))
++ || (Interface->u.TimeStamp.request != 2))
++ {
++ Interface->u.TimeStamp.timeDelta = 0;
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ /* Return timer results and reset timer. */
++ {
++ gcsTIMER_PTR timer = &(Kernel->timers[Interface->u.TimeStamp.timer]);
++ gctUINT64 timeDelta = 0;
++
++ if (timer->stopTime < timer->startTime )
++ {
++ Interface->u.TimeStamp.timeDelta = 0;
++ gcmkONERROR(gcvSTATUS_TIMER_OVERFLOW);
++ }
++
++ timeDelta = timer->stopTime - timer->startTime;
++
++ /* Check truncation overflow. */
++ Interface->u.TimeStamp.timeDelta = (gctINT32) timeDelta;
++ /*bit0~bit30 is available*/
++ if (timeDelta>>31)
++ {
++ Interface->u.TimeStamp.timeDelta = 0;
++ gcmkONERROR(gcvSTATUS_TIMER_OVERFLOW);
++ }
++
++ status = gcvSTATUS_OK;
++ }
++ break;
++
++ case gcvHAL_DATABASE:
++ /* Query video memory. */
++ gcmkONERROR(
++ gckKERNEL_QueryProcessDB(Kernel,
++ Interface->u.Database.processID,
++ !Interface->u.Database.validProcessID,
++ gcvDB_VIDEO_MEMORY,
++ &Interface->u.Database.vidMem));
++
++ /* Query non-paged memory. */
++ gcmkONERROR(
++ gckKERNEL_QueryProcessDB(Kernel,
++ Interface->u.Database.processID,
++ !Interface->u.Database.validProcessID,
++ gcvDB_NON_PAGED,
++ &Interface->u.Database.nonPaged));
++
++ /* Query contiguous memory. */
++ gcmkONERROR(
++ gckKERNEL_QueryProcessDB(Kernel,
++ Interface->u.Database.processID,
++ !Interface->u.Database.validProcessID,
++ gcvDB_CONTIGUOUS,
++ &Interface->u.Database.contiguous));
++
++ /* Query GPU idle time. */
++ gcmkONERROR(
++ gckKERNEL_QueryProcessDB(Kernel,
++ Interface->u.Database.processID,
++ !Interface->u.Database.validProcessID,
++ gcvDB_IDLE,
++ &Interface->u.Database.gpuIdle));
++ break;
++
++ case gcvHAL_VIDMEM_DATABASE:
++ /* Query reserved video memory. */
++ gcmkONERROR(
++ gckKERNEL_QueryProcessDB(Kernel,
++ Interface->u.VidMemDatabase.processID,
++ !Interface->u.VidMemDatabase.validProcessID,
++ gcvDB_VIDEO_MEMORY_RESERVED,
++ &Interface->u.VidMemDatabase.vidMemResv));
++
++ /* Query contiguous video memory. */
++ gcmkONERROR(
++ gckKERNEL_QueryProcessDB(Kernel,
++ Interface->u.VidMemDatabase.processID,
++ !Interface->u.VidMemDatabase.validProcessID,
++ gcvDB_VIDEO_MEMORY_CONTIGUOUS,
++ &Interface->u.VidMemDatabase.vidMemCont));
++
++ /* Query virtual video memory. */
++ gcmkONERROR(
++ gckKERNEL_QueryProcessDB(Kernel,
++ Interface->u.VidMemDatabase.processID,
++ !Interface->u.VidMemDatabase.validProcessID,
++ gcvDB_VIDEO_MEMORY_VIRTUAL,
++ &Interface->u.VidMemDatabase.vidMemVirt));
++
++ break;
++
++ case gcvHAL_VERSION:
++ Interface->u.Version.major = gcvVERSION_MAJOR;
++ Interface->u.Version.minor = gcvVERSION_MINOR;
++ Interface->u.Version.patch = gcvVERSION_PATCH;
++ Interface->u.Version.build = gcvVERSION_BUILD;
++#if gcmIS_DEBUG(gcdDEBUG_TRACE)
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_KERNEL,
++ "KERNEL version %d.%d.%d build %u %s %s",
++ gcvVERSION_MAJOR, gcvVERSION_MINOR, gcvVERSION_PATCH,
++ gcvVERSION_BUILD, gcvVERSION_DATE, gcvVERSION_TIME);
++#endif
++ break;
++
++ case gcvHAL_CHIP_INFO:
++ /* Only if not support multi-core */
++ Interface->u.ChipInfo.count = 1;
++ Interface->u.ChipInfo.types[0] = Kernel->hardware->type;
++ break;
++
++ case gcvHAL_ATTACH:
++ /* Attach user process. */
++ gcmkONERROR(
++ gckCOMMAND_Attach(Kernel->command,
++ &context,
++ &bytes,
++ processID));
++
++ Interface->u.Attach.stateCount = bytes;
++ Interface->u.Attach.context = gcmPTR_TO_NAME(context);
++
++ gcmkVERIFY_OK(
++ gckKERNEL_AddProcessDB(Kernel,
++ processID, gcvDB_CONTEXT,
++ gcmINT2PTR(Interface->u.Attach.context),
++ gcvNULL,
++ 0));
++ break;
++
++ case gcvHAL_DETACH:
++ /* Detach user process. */
++ gcmkONERROR(
++ gckCOMMAND_Detach(Kernel->command,
++ gcmNAME_TO_PTR(Interface->u.Detach.context)));
++
++ gcmkVERIFY_OK(
++ gckKERNEL_RemoveProcessDB(Kernel,
++ processID, gcvDB_CONTEXT,
++ gcmINT2PTR(Interface->u.Detach.context)));
++
++ gcmRELEASE_NAME(Interface->u.Detach.context);
++ break;
++
++ case gcvHAL_COMPOSE:
++ Interface->u.Compose.physical = gcmPTR_TO_UINT64(gcmNAME_TO_PTR(Interface->u.Compose.physical));
++ /* Start composition. */
++ gcmkONERROR(
++ gckEVENT_Compose(Kernel->eventObj,
++ &Interface->u.Compose));
++ break;
++
++ case gcvHAL_SET_TIMEOUT:
++ /* set timeOut value from user */
++ gckKERNEL_SetTimeOut(Kernel, Interface->u.SetTimeOut.timeOut);
++ break;
++
++#if gcdFRAME_DB
++ case gcvHAL_GET_FRAME_INFO:
++ gcmkONERROR(gckHARDWARE_GetFrameInfo(
++ Kernel->hardware,
++ gcmUINT64_TO_PTR(Interface->u.GetFrameInfo.frameInfo)));
++ break;
++#endif
++
++ case gcvHAL_GET_SHARED_INFO:
++ if (Interface->u.GetSharedInfo.data == gcvNULL)
++ {
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++ else
++ {
++ gctUINT32 pid = Interface->u.GetSharedInfo.pid;
++ gctUINT32 dataId = Interface->u.GetSharedInfo.dataId;
++ gctSIZE_T bytes = Interface->u.GetSharedInfo.bytes;
++ gctPOINTER data = Interface->u.GetSharedInfo.data;
++ gcsDATABASE_RECORD record;
++
++ /* Find record. */
++ gcmkONERROR(
++ gckKERNEL_FindProcessDB(Kernel,
++ pid,
++ 0,
++ gcvDB_SHARED_INFO,
++ gcmINT2PTR(dataId),
++ &record));
++
++ /* Check memory size. */
++ if (bytes < record.bytes)
++ {
++ /* Insufficient memory to hold shared data. */
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ /* Copy to user. */
++ status = gckOS_CopyToUserData(Kernel->os,
++ record.physical,
++ data,
++ record.bytes);
++
++ /*
++ * Remove from process db.
++ * Every time when shared info is taken, the record is erased in
++ * kernel side.
++ */
++ gcmkVERIFY_OK(
++ gckKERNEL_RemoveProcessDB(Kernel,
++ pid,
++ gcvDB_SHARED_INFO,
++ gcmINT2PTR(dataId)));
++ /* Free existed data. */
++ gcmkVERIFY_OK(
++ gckOS_FreeMemory(Kernel->os, record.physical));
++ }
++ break;
++
++ case gcvHAL_SET_SHARED_INFO:
++ {
++ gctUINT32 dataId = Interface->u.SetSharedInfo.dataId;
++ gctPOINTER data = Interface->u.SetSharedInfo.data;
++ gctUINT32 bytes = Interface->u.SetSharedInfo.bytes;
++ gctPOINTER memory = gcvNULL;
++ gcsDATABASE_RECORD record;
++
++ if (gcmIS_SUCCESS(gckKERNEL_FindProcessDB(Kernel,
++ processID,
++ 0,
++ gcvDB_SHARED_INFO,
++ gcmINT2PTR(dataId),
++ &record)))
++ {
++ /* Find a record with the same id. */
++ if (bytes != record.bytes)
++ {
++ /* Remove from process db. */
++ gcmkVERIFY_OK(
++ gckKERNEL_RemoveProcessDB(Kernel,
++ processID,
++ gcvDB_SHARED_INFO,
++ gcmINT2PTR(dataId)));
++
++ /* Free existed data. */
++ gcmkVERIFY_OK(
++ gckOS_FreeMemory(Kernel->os, record.physical));
++ }
++ else
++ {
++ /* Re-use allocated memory. */
++ memory = record.physical;
++ }
++ }
++
++ if ((data == gcvNULL) || (bytes == 0))
++ {
++ /* Nothing to record. */
++ break;
++ }
++
++ if (bytes > 1024)
++ {
++ /* Limite data size. */
++ gcmkONERROR(gcvSTATUS_TOO_COMPLEX);
++ }
++
++ if (memory == gcvNULL)
++ {
++ /* Allocate memory for holding shared data. */
++ gcmkONERROR(
++ gckOS_AllocateMemory(Kernel->os, bytes, &memory));
++
++ /* Add to process db. */
++ status = gckKERNEL_AddProcessDB(Kernel,
++ processID,
++ gcvDB_SHARED_INFO,
++ gcmINT2PTR(dataId),
++ memory,
++ bytes);
++
++ if (gcmIS_ERROR(status))
++ {
++ /* Failed to add process db. Free allocated memory. */
++ gcmkVERIFY_OK(gckOS_FreeMemory(Kernel->os, memory));
++ break;
++ }
++ }
++
++ /* Copy shared data to kernel memory. */
++ gcmkONERROR(
++ gckOS_CopyFromUserData(Kernel->os,
++ memory,
++ data,
++ bytes));
++ }
++ break;
++
++ case gcvHAL_SET_FSCALE_VALUE:
++#if gcdENABLE_FSCALE_VAL_ADJUST
++ status = gckHARDWARE_SetFscaleValue(Kernel->hardware,
++ Interface->u.SetFscaleValue.value);
++#else
++ status = gcvSTATUS_NOT_SUPPORTED;
++#endif
++ break;
++ case gcvHAL_GET_FSCALE_VALUE:
++#if gcdENABLE_FSCALE_VAL_ADJUST
++ status = gckHARDWARE_GetFscaleValue(Kernel->hardware,
++ &Interface->u.GetFscaleValue.value,
++ &Interface->u.GetFscaleValue.minValue,
++ &Interface->u.GetFscaleValue.maxValue);
++#else
++ status = gcvSTATUS_NOT_SUPPORTED;
++#endif
++ break;
++
++ case gcvHAL_QUERY_RESET_TIME_STAMP:
++#if gcdENABLE_RECOVERY
++ Interface->u.QueryResetTimeStamp.timeStamp = Kernel->resetTimeStamp;
++#else
++ Interface->u.QueryResetTimeStamp.timeStamp = 0;
++#endif
++ break;
++
++#if gcdANDROID_NATIVE_FENCE_SYNC
++ case gcvHAL_SYNC_POINT:
++ {
++ gctSYNC_POINT syncPoint;
++
++ switch (Interface->u.SyncPoint.command)
++ {
++ case gcvSYNC_POINT_CREATE:
++ gcmkONERROR(gckOS_CreateSyncPoint(Kernel->os, &syncPoint));
++
++ Interface->u.SyncPoint.syncPoint = gcmPTR_TO_UINT64(syncPoint);
++
++ gcmkVERIFY_OK(
++ gckKERNEL_AddProcessDB(Kernel,
++ processID, gcvDB_SYNC_POINT,
++ syncPoint,
++ gcvNULL,
++ 0));
++ break;
++
++ case gcvSYNC_POINT_DESTROY:
++ syncPoint = gcmUINT64_TO_PTR(Interface->u.SyncPoint.syncPoint);
++
++ gcmkONERROR(gckOS_DestroySyncPoint(Kernel->os, syncPoint));
++
++ gcmkVERIFY_OK(
++ gckKERNEL_RemoveProcessDB(Kernel,
++ processID, gcvDB_SYNC_POINT,
++ syncPoint));
++ break;
++
++ default:
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ break;
++ }
++ }
++ break;
++
++ case gcvHAL_CREATE_NATIVE_FENCE:
++ {
++ gctINT fenceFD;
++ gctSYNC_POINT syncPoint =
++ gcmUINT64_TO_PTR(Interface->u.CreateNativeFence.syncPoint);
++
++ gcmkONERROR(
++ gckOS_CreateNativeFence(Kernel->os,
++ Kernel->timeline,
++ syncPoint,
++ &fenceFD));
++
++ Interface->u.CreateNativeFence.fenceFD = fenceFD;
++ }
++ break;
++#endif
++
++ default:
++ /* Invalid command. */
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++OnError:
++ /* Save status. */
++ Interface->status = status;
++
++ if (gcmIS_ERROR(status))
++ {
++ if (locked)
++ {
++ /* Roll back the lock. */
++ gcmkVERIFY_OK(
++ gckVIDMEM_Unlock(Kernel,
++ gcmUINT64_TO_PTR(Interface->u.LockVideoMemory.node),
++ gcvSURF_TYPE_UNKNOWN,
++ &asynchronous));
++
++ if (gcvTRUE == asynchronous)
++ {
++ /* Bottom Half */
++ gcmkVERIFY_OK(
++ gckVIDMEM_Unlock(Kernel,
++ gcmUINT64_TO_PTR(Interface->u.LockVideoMemory.node),
++ gcvSURF_TYPE_UNKNOWN,
++ gcvNULL));
++ }
++ }
++ }
++
++#if QNX_SINGLE_THREADED_DEBUGGING
++ gckOS_ReleaseMutex(Kernel->os, Kernel->debugMutex);
++#endif
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++** gckKERNEL_AttachProcess
++**
++** Attach or detach a process.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gctBOOL Attach
++** gcvTRUE if a new process gets attached or gcFALSE when a process
++** gets detatched.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckKERNEL_AttachProcess(
++ IN gckKERNEL Kernel,
++ IN gctBOOL Attach
++ )
++{
++ gceSTATUS status;
++ gctUINT32 processID;
++
++ gcmkHEADER_ARG("Kernel=0x%x Attach=%d", Kernel, Attach);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++
++ /* Get current process ID. */
++ gcmkONERROR(gckOS_GetProcessID(&processID));
++
++ gcmkONERROR(gckKERNEL_AttachProcessEx(Kernel, Attach, processID));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++** gckKERNEL_AttachProcessEx
++**
++** Attach or detach a process with the given PID. Can be paired with gckKERNEL_AttachProcess
++** provided the programmer is aware of the consequences.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gctBOOL Attach
++** gcvTRUE if a new process gets attached or gcFALSE when a process
++** gets detatched.
++**
++** gctUINT32 PID
++** PID of the process to attach or detach.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckKERNEL_AttachProcessEx(
++ IN gckKERNEL Kernel,
++ IN gctBOOL Attach,
++ IN gctUINT32 PID
++ )
++{
++ gceSTATUS status;
++ gctINT32 old;
++
++ gcmkHEADER_ARG("Kernel=0x%x Attach=%d PID=%d", Kernel, Attach, PID);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++
++ if (Attach)
++ {
++ /* Increment the number of clients attached. */
++ gcmkONERROR(
++ gckOS_AtomIncrement(Kernel->os, Kernel->atomClients, &old));
++
++ if (old == 0)
++ {
++#if gcdENABLE_VG
++ if (Kernel->vg == gcvNULL)
++#endif
++ {
++ gcmkONERROR(gckOS_Broadcast(Kernel->os,
++ Kernel->hardware,
++ gcvBROADCAST_FIRST_PROCESS));
++ }
++ }
++
++ if (Kernel->dbCreated)
++ {
++ /* Create the process database. */
++ gcmkONERROR(gckKERNEL_CreateProcessDB(Kernel, PID));
++ }
++ }
++ else
++ {
++ if (Kernel->dbCreated)
++ {
++ /* Clean up the process database. */
++ gcmkONERROR(gckKERNEL_DestroyProcessDB(Kernel, PID));
++
++ /* Save the last know process ID. */
++ Kernel->db->lastProcessID = PID;
++ }
++
++#if gcdENABLE_VG
++ if (Kernel->vg == gcvNULL)
++#endif
++ {
++ status = gckEVENT_Submit(Kernel->eventObj, gcvTRUE, gcvFALSE);
++
++ if (status == gcvSTATUS_INTERRUPTED && Kernel->eventObj->submitTimer)
++ {
++ gcmkONERROR(gckOS_StartTimer(Kernel->os,
++ Kernel->eventObj->submitTimer,
++ 1));
++ }
++ else
++ {
++ gcmkONERROR(status);
++ }
++ }
++
++ /* Decrement the number of clients attached. */
++ gcmkONERROR(
++ gckOS_AtomDecrement(Kernel->os, Kernel->atomClients, &old));
++
++ if (old == 1)
++ {
++#if gcdENABLE_VG
++ if (Kernel->vg == gcvNULL)
++#endif
++ {
++ /* Last client detached, switch to SUSPEND power state. */
++ gcmkONERROR(gckOS_Broadcast(Kernel->os,
++ Kernel->hardware,
++ gcvBROADCAST_LAST_PROCESS));
++ }
++
++ /* Flush the debug cache. */
++ gcmkDEBUGFLUSH(~0U);
++ }
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++#if gcdSECURE_USER
++gceSTATUS
++gckKERNEL_MapLogicalToPhysical(
++ IN gckKERNEL Kernel,
++ IN gcskSECURE_CACHE_PTR Cache,
++ IN OUT gctPOINTER * Data
++ )
++{
++ gceSTATUS status;
++ static gctBOOL baseAddressValid = gcvFALSE;
++ static gctUINT32 baseAddress;
++ gctBOOL needBase;
++ gcskLOGICAL_CACHE_PTR slot;
++
++ gcmkHEADER_ARG("Kernel=0x%x Cache=0x%x *Data=0x%x",
++ Kernel, Cache, gcmOPT_POINTER(Data));
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++
++ if (!baseAddressValid)
++ {
++ /* Get base address. */
++ gcmkONERROR(gckHARDWARE_GetBaseAddress(Kernel->hardware, &baseAddress));
++
++ baseAddressValid = gcvTRUE;
++ }
++
++ /* Does this state load need a base address? */
++ gcmkONERROR(gckHARDWARE_NeedBaseAddress(Kernel->hardware,
++ ((gctUINT32_PTR) Data)[-1],
++ &needBase));
++
++#if gcdSECURE_CACHE_METHOD == gcdSECURE_CACHE_LRU
++ {
++ gcskLOGICAL_CACHE_PTR next;
++ gctINT i;
++
++ /* Walk all used cache slots. */
++ for (i = 1, slot = Cache->cache[0].next, next = gcvNULL;
++ (i <= gcdSECURE_CACHE_SLOTS) && (slot->logical != gcvNULL);
++ ++i, slot = slot->next
++ )
++ {
++ if (slot->logical == *Data)
++ {
++ /* Bail out. */
++ next = slot;
++ break;
++ }
++ }
++
++ /* See if we had a miss. */
++ if (next == gcvNULL)
++ {
++ /* Use the tail of the cache. */
++ slot = Cache->cache[0].prev;
++
++ /* Initialize the cache line. */
++ slot->logical = *Data;
++
++ /* Map the logical address to a DMA address. */
++ gcmkONERROR(
++ gckOS_GetPhysicalAddress(Kernel->os, *Data, &slot->dma));
++ }
++
++ /* Move slot to head of list. */
++ if (slot != Cache->cache[0].next)
++ {
++ /* Unlink. */
++ slot->prev->next = slot->next;
++ slot->next->prev = slot->prev;
++
++ /* Move to head of chain. */
++ slot->prev = &Cache->cache[0];
++ slot->next = Cache->cache[0].next;
++ slot->prev->next = slot;
++ slot->next->prev = slot;
++ }
++ }
++#elif gcdSECURE_CACHE_METHOD == gcdSECURE_CACHE_LINEAR
++ {
++ gctINT i;
++ gcskLOGICAL_CACHE_PTR next = gcvNULL;
++ gcskLOGICAL_CACHE_PTR oldestSlot = gcvNULL;
++ slot = gcvNULL;
++
++ if (Cache->cacheIndex != gcvNULL)
++ {
++ /* Walk the cache forwards. */
++ for (i = 1, slot = Cache->cacheIndex;
++ (i <= gcdSECURE_CACHE_SLOTS) && (slot->logical != gcvNULL);
++ ++i, slot = slot->next)
++ {
++ if (slot->logical == *Data)
++ {
++ /* Bail out. */
++ next = slot;
++ break;
++ }
++
++ /* Determine age of this slot. */
++ if ((oldestSlot == gcvNULL)
++ || (oldestSlot->stamp > slot->stamp)
++ )
++ {
++ oldestSlot = slot;
++ }
++ }
++
++ if (next == gcvNULL)
++ {
++ /* Walk the cache backwards. */
++ for (slot = Cache->cacheIndex->prev;
++ (i <= gcdSECURE_CACHE_SLOTS) && (slot->logical != gcvNULL);
++ ++i, slot = slot->prev)
++ {
++ if (slot->logical == *Data)
++ {
++ /* Bail out. */
++ next = slot;
++ break;
++ }
++
++ /* Determine age of this slot. */
++ if ((oldestSlot == gcvNULL)
++ || (oldestSlot->stamp > slot->stamp)
++ )
++ {
++ oldestSlot = slot;
++ }
++ }
++ }
++ }
++
++ /* See if we had a miss. */
++ if (next == gcvNULL)
++ {
++ if (Cache->cacheFree != 0)
++ {
++ slot = &Cache->cache[Cache->cacheFree];
++ gcmkASSERT(slot->logical == gcvNULL);
++
++ ++ Cache->cacheFree;
++ if (Cache->cacheFree >= gcmCOUNTOF(Cache->cache))
++ {
++ Cache->cacheFree = 0;
++ }
++ }
++ else
++ {
++ /* Use the oldest cache slot. */
++ gcmkASSERT(oldestSlot != gcvNULL);
++ slot = oldestSlot;
++
++ /* Unlink from the chain. */
++ slot->prev->next = slot->next;
++ slot->next->prev = slot->prev;
++
++ /* Append to the end. */
++ slot->prev = Cache->cache[0].prev;
++ slot->next = &Cache->cache[0];
++ slot->prev->next = slot;
++ slot->next->prev = slot;
++ }
++
++ /* Initialize the cache line. */
++ slot->logical = *Data;
++
++ /* Map the logical address to a DMA address. */
++ gcmkONERROR(
++ gckOS_GetPhysicalAddress(Kernel->os, *Data, &slot->dma));
++ }
++
++ /* Save time stamp. */
++ slot->stamp = ++ Cache->cacheStamp;
++
++ /* Save current slot for next lookup. */
++ Cache->cacheIndex = slot;
++ }
++#elif gcdSECURE_CACHE_METHOD == gcdSECURE_CACHE_HASH
++ {
++ gctINT i;
++ gctUINT32 data = gcmPTR2INT(*Data);
++ gctUINT32 key, index;
++ gcskLOGICAL_CACHE_PTR hash;
++
++ /* Generate a hash key. */
++ key = (data >> 24) + (data >> 16) + (data >> 8) + data;
++ index = key % gcmCOUNTOF(Cache->hash);
++
++ /* Get the hash entry. */
++ hash = &Cache->hash[index];
++
++ for (slot = hash->nextHash, i = 0;
++ (slot != gcvNULL) && (i < gcdSECURE_CACHE_SLOTS);
++ slot = slot->nextHash, ++i
++ )
++ {
++ if (slot->logical == (*Data))
++ {
++ break;
++ }
++ }
++
++ if (slot == gcvNULL)
++ {
++ /* Grab from the tail of the cache. */
++ slot = Cache->cache[0].prev;
++
++ /* Unlink slot from any hash table it is part of. */
++ if (slot->prevHash != gcvNULL)
++ {
++ slot->prevHash->nextHash = slot->nextHash;
++ }
++ if (slot->nextHash != gcvNULL)
++ {
++ slot->nextHash->prevHash = slot->prevHash;
++ }
++
++ /* Initialize the cache line. */
++ slot->logical = *Data;
++
++ /* Map the logical address to a DMA address. */
++ gcmkONERROR(
++ gckOS_GetPhysicalAddress(Kernel->os, *Data, &slot->dma));
++
++ if (hash->nextHash != gcvNULL)
++ {
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_KERNEL,
++ "Hash Collision: logical=0x%x key=0x%08x",
++ *Data, key);
++ }
++
++ /* Insert the slot at the head of the hash list. */
++ slot->nextHash = hash->nextHash;
++ if (slot->nextHash != gcvNULL)
++ {
++ slot->nextHash->prevHash = slot;
++ }
++ slot->prevHash = hash;
++ hash->nextHash = slot;
++ }
++
++ /* Move slot to head of list. */
++ if (slot != Cache->cache[0].next)
++ {
++ /* Unlink. */
++ slot->prev->next = slot->next;
++ slot->next->prev = slot->prev;
++
++ /* Move to head of chain. */
++ slot->prev = &Cache->cache[0];
++ slot->next = Cache->cache[0].next;
++ slot->prev->next = slot;
++ slot->next->prev = slot;
++ }
++ }
++#elif gcdSECURE_CACHE_METHOD == gcdSECURE_CACHE_TABLE
++ {
++ gctUINT32 index = (gcmPTR2INT(*Data) % gcdSECURE_CACHE_SLOTS) + 1;
++
++ /* Get cache slot. */
++ slot = &Cache->cache[index];
++
++ /* Check for cache miss. */
++ if (slot->logical != *Data)
++ {
++ /* Initialize the cache line. */
++ slot->logical = *Data;
++
++ /* Map the logical address to a DMA address. */
++ gcmkONERROR(
++ gckOS_GetPhysicalAddress(Kernel->os, *Data, &slot->dma));
++ }
++ }
++#endif
++
++ /* Return DMA address. */
++ *Data = gcmINT2PTR(slot->dma + (needBase ? baseAddress : 0));
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Data=0x%08x", *Data);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckKERNEL_FlushTranslationCache(
++ IN gckKERNEL Kernel,
++ IN gcskSECURE_CACHE_PTR Cache,
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Bytes
++ )
++{
++ gctINT i;
++ gcskLOGICAL_CACHE_PTR slot;
++ gctUINT8_PTR ptr;
++
++ gcmkHEADER_ARG("Kernel=0x%x Cache=0x%x Logical=0x%x Bytes=%lu",
++ Kernel, Cache, Logical, Bytes);
++
++ /* Do we need to flush the entire cache? */
++ if (Logical == gcvNULL)
++ {
++ /* Clear all cache slots. */
++ for (i = 1; i <= gcdSECURE_CACHE_SLOTS; ++i)
++ {
++ Cache->cache[i].logical = gcvNULL;
++
++#if gcdSECURE_CACHE_METHOD == gcdSECURE_CACHE_HASH
++ Cache->cache[i].nextHash = gcvNULL;
++ Cache->cache[i].prevHash = gcvNULL;
++#endif
++}
++
++#if gcdSECURE_CACHE_METHOD == gcdSECURE_CACHE_HASH
++ /* Zero the hash table. */
++ for (i = 0; i < gcmCOUNTOF(Cache->hash); ++i)
++ {
++ Cache->hash[i].nextHash = gcvNULL;
++ }
++#endif
++
++ /* Reset the cache functionality. */
++ Cache->cacheIndex = gcvNULL;
++ Cache->cacheFree = 1;
++ Cache->cacheStamp = 0;
++ }
++
++ else
++ {
++ gctUINT8_PTR low = (gctUINT8_PTR) Logical;
++ gctUINT8_PTR high = low + Bytes;
++
++#if gcdSECURE_CACHE_METHOD == gcdSECURE_CACHE_LRU
++ gcskLOGICAL_CACHE_PTR next;
++
++ /* Walk all used cache slots. */
++ for (i = 1, slot = Cache->cache[0].next;
++ (i <= gcdSECURE_CACHE_SLOTS) && (slot->logical != gcvNULL);
++ ++i, slot = next
++ )
++ {
++ /* Save pointer to next slot. */
++ next = slot->next;
++
++ /* Test if this slot falls within the range to flush. */
++ ptr = (gctUINT8_PTR) slot->logical;
++ if ((ptr >= low) && (ptr < high))
++ {
++ /* Unlink slot. */
++ slot->prev->next = slot->next;
++ slot->next->prev = slot->prev;
++
++ /* Append slot to tail of cache. */
++ slot->prev = Cache->cache[0].prev;
++ slot->next = &Cache->cache[0];
++ slot->prev->next = slot;
++ slot->next->prev = slot;
++
++ /* Mark slot as empty. */
++ slot->logical = gcvNULL;
++ }
++ }
++
++#elif gcdSECURE_CACHE_METHOD == gcdSECURE_CACHE_LINEAR
++ gcskLOGICAL_CACHE_PTR next;
++
++ for (i = 1, slot = Cache->cache[0].next;
++ (i <= gcdSECURE_CACHE_SLOTS) && (slot->logical != gcvNULL);
++ ++i, slot = next)
++ {
++ /* Save pointer to next slot. */
++ next = slot->next;
++
++ /* Test if this slot falls within the range to flush. */
++ ptr = (gctUINT8_PTR) slot->logical;
++ if ((ptr >= low) && (ptr < high))
++ {
++ /* Test if this slot is the current slot. */
++ if (slot == Cache->cacheIndex)
++ {
++ /* Move to next or previous slot. */
++ Cache->cacheIndex = (slot->next->logical != gcvNULL)
++ ? slot->next
++ : (slot->prev->logical != gcvNULL)
++ ? slot->prev
++ : gcvNULL;
++ }
++
++ /* Unlink slot from cache. */
++ slot->prev->next = slot->next;
++ slot->next->prev = slot->prev;
++
++ /* Insert slot to head of cache. */
++ slot->prev = &Cache->cache[0];
++ slot->next = Cache->cache[0].next;
++ slot->prev->next = slot;
++ slot->next->prev = slot;
++
++ /* Mark slot as empty. */
++ slot->logical = gcvNULL;
++ slot->stamp = 0;
++ }
++ }
++
++#elif gcdSECURE_CACHE_METHOD == gcdSECURE_CACHE_HASH
++ gctINT j;
++ gcskLOGICAL_CACHE_PTR hash, next;
++
++ /* Walk all hash tables. */
++ for (i = 0, hash = Cache->hash;
++ i < gcmCOUNTOF(Cache->hash);
++ ++i, ++hash)
++ {
++ /* Walk all slots in the hash. */
++ for (j = 0, slot = hash->nextHash;
++ (j < gcdSECURE_CACHE_SLOTS) && (slot != gcvNULL);
++ ++j, slot = next)
++ {
++ /* Save pointer to next slot. */
++ next = slot->next;
++
++ /* Test if this slot falls within the range to flush. */
++ ptr = (gctUINT8_PTR) slot->logical;
++ if ((ptr >= low) && (ptr < high))
++ {
++ /* Unlink slot from hash table. */
++ if (slot->prevHash == hash)
++ {
++ hash->nextHash = slot->nextHash;
++ }
++ else
++ {
++ slot->prevHash->nextHash = slot->nextHash;
++ }
++
++ if (slot->nextHash != gcvNULL)
++ {
++ slot->nextHash->prevHash = slot->prevHash;
++ }
++
++ /* Unlink slot from cache. */
++ slot->prev->next = slot->next;
++ slot->next->prev = slot->prev;
++
++ /* Append slot to tail of cache. */
++ slot->prev = Cache->cache[0].prev;
++ slot->next = &Cache->cache[0];
++ slot->prev->next = slot;
++ slot->next->prev = slot;
++
++ /* Mark slot as empty. */
++ slot->logical = gcvNULL;
++ slot->prevHash = gcvNULL;
++ slot->nextHash = gcvNULL;
++ }
++ }
++ }
++
++#elif gcdSECURE_CACHE_METHOD == gcdSECURE_CACHE_TABLE
++ gctUINT32 index;
++
++ /* Loop while inside the range. */
++ for (i = 1; (low < high) && (i <= gcdSECURE_CACHE_SLOTS); ++i)
++ {
++ /* Get index into cache for this range. */
++ index = (gcmPTR2INT(low) % gcdSECURE_CACHE_SLOTS) + 1;
++ slot = &Cache->cache[index];
++
++ /* Test if this slot falls within the range to flush. */
++ ptr = (gctUINT8_PTR) slot->logical;
++ if ((ptr >= low) && (ptr < high))
++ {
++ /* Remove entry from cache. */
++ slot->logical = gcvNULL;
++ }
++
++ /* Next block. */
++ low += gcdSECURE_CACHE_SLOTS;
++ }
++#endif
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++#endif
++
++/*******************************************************************************
++**
++** gckKERNEL_Recovery
++**
++** Try to recover the GPU from a fatal error.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckKERNEL_Recovery(
++ IN gckKERNEL Kernel
++ )
++{
++#if gcdENABLE_RECOVERY
++#define gcdEVENT_MASK 0x3FFFFFFF
++ gceSTATUS status;
++ gckEVENT eventObj;
++ gckHARDWARE hardware;
++#if gcdSECURE_USER
++ gctUINT32 processID;
++ gcskSECURE_CACHE_PTR cache;
++#endif
++ gctUINT32 oldValue;
++ gcmkHEADER_ARG("Kernel=0x%x", Kernel);
++
++ /* Validate the arguemnts. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++
++ /* Grab gckEVENT object. */
++ eventObj = Kernel->eventObj;
++ gcmkVERIFY_OBJECT(eventObj, gcvOBJ_EVENT);
++
++ /* Grab gckHARDWARE object. */
++ hardware = Kernel->hardware;
++ gcmkVERIFY_OBJECT(hardware, gcvOBJ_HARDWARE);
++
++#if gcdSECURE_USER
++ /* Flush the secure mapping cache. */
++ gcmkONERROR(gckOS_GetProcessID(&processID));
++ gcmkONERROR(gckKERNEL_GetProcessDBCache(Kernel, processID, &cache));
++ gcmkONERROR(gckKERNEL_FlushTranslationCache(Kernel, cache, gcvNULL, 0));
++#endif
++
++ gcmkONERROR(
++ gckOS_AtomicExchange(Kernel->os, Kernel->resetAtom, 1, &oldValue));
++
++ if (oldValue)
++ {
++ /* Some one else will recovery GPU. */
++ return gcvSTATUS_OK;
++ }
++
++ gcmkPRINT("[galcore]: GPU[%d] hang, automatic recovery.", Kernel->core);
++
++ /* Start a timer to clear reset flag, before timer is expired,
++ ** other recovery request is ignored. */
++ gcmkVERIFY_OK(
++ gckOS_StartTimer(Kernel->os,
++ Kernel->resetFlagClearTimer,
++ gcdGPU_TIMEOUT - 500));
++
++
++ /* Try issuing a soft reset for the GPU. */
++ status = gckHARDWARE_Reset(hardware);
++ if (status == gcvSTATUS_NOT_SUPPORTED)
++ {
++ /* Switch to OFF power. The next submit should return the GPU to ON
++ ** state. */
++ gcmkONERROR(
++ gckHARDWARE_SetPowerManagementState(hardware,
++ gcvPOWER_OFF_RECOVERY));
++ }
++ else
++ {
++ /* Bail out on reset error. */
++ gcmkONERROR(status);
++ }
++
++ /* Handle all outstanding events now. */
++#if gcdSMP
++ gcmkONERROR(gckOS_AtomSet(Kernel->os, eventObj->pending, gcdEVENT_MASK));
++#else
++ eventObj->pending = gcdEVENT_MASK;
++#endif
++ gcmkONERROR(gckEVENT_Notify(eventObj, 1));
++
++ /* Again in case more events got submitted. */
++#if gcdSMP
++ gcmkONERROR(gckOS_AtomSet(Kernel->os, eventObj->pending, gcdEVENT_MASK));
++#else
++ eventObj->pending = gcdEVENT_MASK;
++#endif
++ gcmkONERROR(gckEVENT_Notify(eventObj, 2));
++
++ Kernel->resetTimeStamp++;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++#else
++ return gcvSTATUS_OK;
++#endif
++}
++
++/*******************************************************************************
++**
++** gckKERNEL_OpenUserData
++**
++** Get access to the user data.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gctBOOL NeedCopy
++** The flag indicating whether or not the data should be copied.
++**
++** gctPOINTER StaticStorage
++** Pointer to the kernel storage where the data is to be copied if
++** NeedCopy is gcvTRUE.
++**
++** gctPOINTER UserPointer
++** User pointer to the data.
++**
++** gctSIZE_T Size
++** Size of the data.
++**
++** OUTPUT:
++**
++** gctPOINTER * KernelPointer
++** Pointer to the kernel pointer that will be pointing to the data.
++*/
++gceSTATUS
++gckKERNEL_OpenUserData(
++ IN gckKERNEL Kernel,
++ IN gctBOOL NeedCopy,
++ IN gctPOINTER StaticStorage,
++ IN gctPOINTER UserPointer,
++ IN gctSIZE_T Size,
++ OUT gctPOINTER * KernelPointer
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG(
++ "Kernel=0x%08X NeedCopy=%d StaticStorage=0x%08X "
++ "UserPointer=0x%08X Size=%lu KernelPointer=0x%08X",
++ Kernel, NeedCopy, StaticStorage, UserPointer, Size, KernelPointer
++ );
++
++ /* Validate the arguemnts. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(!NeedCopy || (StaticStorage != gcvNULL));
++ gcmkVERIFY_ARGUMENT(UserPointer != gcvNULL);
++ gcmkVERIFY_ARGUMENT(KernelPointer != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Size > 0);
++
++ if (NeedCopy)
++ {
++ /* Copy the user data to the static storage. */
++ gcmkONERROR(gckOS_CopyFromUserData(
++ Kernel->os, StaticStorage, UserPointer, Size
++ ));
++
++ /* Set the kernel pointer. */
++ * KernelPointer = StaticStorage;
++ }
++ else
++ {
++ gctPOINTER pointer = gcvNULL;
++
++ /* Map the user pointer. */
++ gcmkONERROR(gckOS_MapUserPointer(
++ Kernel->os, UserPointer, Size, &pointer
++ ));
++
++ /* Set the kernel pointer. */
++ * KernelPointer = pointer;
++ }
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckKERNEL_CloseUserData
++**
++** Release resources associated with the user data connection opened by
++** gckKERNEL_OpenUserData.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gctBOOL NeedCopy
++** The flag indicating whether or not the data should be copied.
++**
++** gctBOOL FlushData
++** If gcvTRUE, the data is written back to the user.
++**
++** gctPOINTER UserPointer
++** User pointer to the data.
++**
++** gctSIZE_T Size
++** Size of the data.
++**
++** OUTPUT:
++**
++** gctPOINTER * KernelPointer
++** Kernel pointer to the data.
++*/
++gceSTATUS
++gckKERNEL_CloseUserData(
++ IN gckKERNEL Kernel,
++ IN gctBOOL NeedCopy,
++ IN gctBOOL FlushData,
++ IN gctPOINTER UserPointer,
++ IN gctSIZE_T Size,
++ OUT gctPOINTER * KernelPointer
++ )
++{
++ gceSTATUS status = gcvSTATUS_OK;
++ gctPOINTER pointer;
++
++ gcmkHEADER_ARG(
++ "Kernel=0x%08X NeedCopy=%d FlushData=%d "
++ "UserPointer=0x%08X Size=%lu KernelPointer=0x%08X",
++ Kernel, NeedCopy, FlushData, UserPointer, Size, KernelPointer
++ );
++
++ /* Validate the arguemnts. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(UserPointer != gcvNULL);
++ gcmkVERIFY_ARGUMENT(KernelPointer != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Size > 0);
++
++ /* Get a shortcut to the kernel pointer. */
++ pointer = * KernelPointer;
++
++ if (pointer != gcvNULL)
++ {
++ if (NeedCopy)
++ {
++ if (FlushData)
++ {
++ gcmkONERROR(gckOS_CopyToUserData(
++ Kernel->os, * KernelPointer, UserPointer, Size
++ ));
++ }
++ }
++ else
++ {
++ /* Unmap record from kernel memory. */
++ gcmkONERROR(gckOS_UnmapUserPointer(
++ Kernel->os,
++ UserPointer,
++ Size,
++ * KernelPointer
++ ));
++ }
++
++ /* Reset the kernel pointer. */
++ * KernelPointer = gcvNULL;
++ }
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++void
++gckKERNEL_SetTimeOut(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 timeOut
++ )
++{
++ gcmkHEADER_ARG("Kernel=0x%x timeOut=%d", Kernel, timeOut);
++#if gcdGPU_TIMEOUT
++ Kernel->timeOut = timeOut;
++#endif
++ gcmkFOOTER_NO();
++}
++
++#if gcdVIRTUAL_COMMAND_BUFFER
++gceSTATUS
++gckKERNEL_AllocateVirtualCommandBuffer(
++ IN gckKERNEL Kernel,
++ IN gctBOOL InUserSpace,
++ IN OUT gctSIZE_T * Bytes,
++ OUT gctPHYS_ADDR * Physical,
++ OUT gctPOINTER * Logical
++ )
++{
++ gckOS os = Kernel->os;
++ gceSTATUS status;
++ gctPOINTER logical;
++ gctSIZE_T pageCount;
++ gctSIZE_T bytes = *Bytes;
++ gckVIRTUAL_COMMAND_BUFFER_PTR buffer;
++
++ gcmkHEADER_ARG("Os=0x%X InUserSpace=%d *Bytes=%lu",
++ os, InUserSpace, gcmOPT_VALUE(Bytes));
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Bytes != gcvNULL);
++ gcmkVERIFY_ARGUMENT(*Bytes > 0);
++ gcmkVERIFY_ARGUMENT(Physical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++
++ gcmkONERROR(gckOS_Allocate(os,
++ sizeof(gckVIRTUAL_COMMAND_BUFFER),
++ (gctPOINTER)&buffer));
++
++ gcmkONERROR(gckOS_ZeroMemory(buffer, sizeof(gckVIRTUAL_COMMAND_BUFFER)));
++
++ gcmkONERROR(gckOS_AllocatePagedMemoryEx(os,
++ gcvFALSE,
++ bytes,
++ &buffer->physical));
++
++ if (InUserSpace)
++ {
++ gcmkONERROR(gckOS_LockPages(os,
++ buffer->physical,
++ bytes,
++ gcvFALSE,
++ &logical,
++ &pageCount));
++
++ *Logical =
++ buffer->userLogical = logical;
++ }
++ else
++ {
++ gcmkONERROR(
++ gckOS_CreateKernelVirtualMapping(buffer->physical,
++ &pageCount,
++ &logical));
++ *Logical =
++ buffer->kernelLogical = logical;
++ }
++
++ buffer->pageCount = pageCount;
++ buffer->kernel = Kernel;
++
++ gcmkONERROR(gckOS_GetProcessID(&buffer->pid));
++
++ gcmkONERROR(gckMMU_AllocatePages(Kernel->mmu,
++ pageCount,
++ &buffer->pageTable,
++ &buffer->gpuAddress));
++
++ gcmkONERROR(gckOS_MapPagesEx(os,
++ Kernel->core,
++ buffer->physical,
++ pageCount,
++ buffer->pageTable));
++
++ gcmkONERROR(gckMMU_Flush(Kernel->mmu));
++
++ *Physical = buffer;
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_KERNEL,
++ "gpuAddress = %x pageCount = %d kernelLogical = %x userLogical=%x",
++ buffer->gpuAddress, buffer->pageCount,
++ buffer->kernelLogical, buffer->userLogical);
++
++ gcmkVERIFY_OK(gckOS_AcquireMutex(os, Kernel->virtualBufferLock, gcvINFINITE));
++
++ if (Kernel->virtualBufferHead == gcvNULL)
++ {
++ Kernel->virtualBufferHead =
++ Kernel->virtualBufferTail = buffer;
++ }
++ else
++ {
++ buffer->prev = Kernel->virtualBufferTail;
++ Kernel->virtualBufferTail->next = buffer;
++ Kernel->virtualBufferTail = buffer;
++ }
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(os, Kernel->virtualBufferLock));
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (buffer->gpuAddress)
++ {
++ gcmkVERIFY_OK(
++ gckMMU_FreePages(Kernel->mmu, buffer->pageTable, buffer->pageCount));
++ }
++
++ if (buffer->userLogical)
++ {
++ gcmkVERIFY_OK(
++ gckOS_UnlockPages(os, buffer->physical, bytes, buffer->userLogical));
++ }
++
++ if (buffer->kernelLogical)
++ {
++ gcmkVERIFY_OK(
++ gckOS_DestroyKernelVirtualMapping(buffer->kernelLogical));
++ }
++
++ if (buffer->physical)
++ {
++ gcmkVERIFY_OK(gckOS_FreePagedMemory(os, buffer->physical, bytes));
++ }
++
++ gcmkVERIFY_OK(gckOS_Free(os, buffer));
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckKERNEL_DestroyVirtualCommandBuffer(
++ IN gckKERNEL Kernel,
++ IN gctSIZE_T Bytes,
++ IN gctPHYS_ADDR Physical,
++ IN gctPOINTER Logical
++ )
++{
++ gckOS os;
++ gckKERNEL kernel;
++ gckVIRTUAL_COMMAND_BUFFER_PTR buffer = (gckVIRTUAL_COMMAND_BUFFER_PTR)Physical;
++
++ gcmkHEADER();
++ gcmkVERIFY_ARGUMENT(buffer != gcvNULL);
++
++ kernel = buffer->kernel;
++ os = kernel->os;
++
++ if (buffer->userLogical)
++ {
++ gcmkVERIFY_OK(gckOS_UnlockPages(os, buffer->physical, Bytes, Logical));
++ }
++ else
++ {
++ gcmkVERIFY_OK(gckOS_DestroyKernelVirtualMapping(Logical));
++ }
++
++ gcmkVERIFY_OK(
++ gckMMU_FreePages(kernel->mmu, buffer->pageTable, buffer->pageCount));
++
++ gcmkVERIFY_OK(gckOS_FreePagedMemory(os, buffer->physical, Bytes));
++
++ gcmkVERIFY_OK(gckOS_AcquireMutex(os, kernel->virtualBufferLock, gcvINFINITE));
++
++ if (buffer == kernel->virtualBufferHead)
++ {
++ if ((kernel->virtualBufferHead = buffer->next) == gcvNULL)
++ {
++ kernel->virtualBufferTail = gcvNULL;
++ }
++ }
++ else
++ {
++ buffer->prev->next = buffer->next;
++
++ if (buffer == kernel->virtualBufferTail)
++ {
++ kernel->virtualBufferTail = buffer->prev;
++ }
++ else
++ {
++ buffer->next->prev = buffer->prev;
++ }
++ }
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(os, kernel->virtualBufferLock));
++
++ gcmkVERIFY_OK(gckOS_Free(os, buffer));
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckKERNEL_GetGPUAddress(
++ IN gckKERNEL Kernel,
++ IN gctPOINTER Logical,
++ OUT gctUINT32 * Address
++ )
++{
++ gceSTATUS status;
++ gckVIRTUAL_COMMAND_BUFFER_PTR buffer;
++ gctPOINTER start;
++ gctINT pid;
++
++ gcmkHEADER_ARG("Logical = %x", Logical);
++
++ gckOS_GetProcessID(&pid);
++
++ status = gcvSTATUS_INVALID_ADDRESS;
++
++ gcmkVERIFY_OK(gckOS_AcquireMutex(Kernel->os, Kernel->virtualBufferLock, gcvINFINITE));
++
++ /* Walk all command buffer. */
++ for (buffer = Kernel->virtualBufferHead; buffer != gcvNULL; buffer = buffer->next)
++ {
++ if (buffer->userLogical)
++ {
++ start = buffer->userLogical;
++ }
++ else
++ {
++ start = buffer->kernelLogical;
++ }
++
++ if (Logical >= start
++ && (Logical < (start + buffer->pageCount * 4096))
++ && pid == buffer->pid
++ )
++ {
++ * Address = buffer->gpuAddress + (Logical - start);
++ status = gcvSTATUS_OK;
++ break;
++ }
++ }
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, Kernel->virtualBufferLock));
++
++ gcmkFOOTER_NO();
++ return status;
++}
++
++gceSTATUS
++gckKERNEL_QueryGPUAddress(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 GpuAddress,
++ OUT gckVIRTUAL_COMMAND_BUFFER_PTR * Buffer
++ )
++{
++ gckVIRTUAL_COMMAND_BUFFER_PTR buffer;
++ gctUINT32 start;
++ gceSTATUS status = gcvSTATUS_NOT_SUPPORTED;
++
++ gcmkVERIFY_OK(gckOS_AcquireMutex(Kernel->os, Kernel->virtualBufferLock, gcvINFINITE));
++
++ /* Walk all command buffers. */
++ for (buffer = Kernel->virtualBufferHead; buffer != gcvNULL; buffer = buffer->next)
++ {
++ start = (gctUINT32)buffer->gpuAddress;
++
++ if (GpuAddress >= start && GpuAddress < (start + buffer->pageCount * 4096))
++ {
++ /* Find a range matched. */
++ *Buffer = buffer;
++ status = gcvSTATUS_OK;
++ break;
++ }
++ }
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, Kernel->virtualBufferLock));
++
++ return status;
++}
++#endif
++
++#if gcdLINK_QUEUE_SIZE
++static void
++gckLINKQUEUE_Dequeue(
++ IN gckLINKQUEUE LinkQueue
++ )
++{
++ gcmkASSERT(LinkQueue->count == gcdLINK_QUEUE_SIZE);
++
++ LinkQueue->count--;
++ LinkQueue->front = (LinkQueue->front + 1) % gcdLINK_QUEUE_SIZE;
++}
++
++void
++gckLINKQUEUE_Enqueue(
++ IN gckLINKQUEUE LinkQueue,
++ IN gctUINT32 start,
++ IN gctUINT32 end
++ )
++{
++ if (LinkQueue->count == gcdLINK_QUEUE_SIZE)
++ {
++ gckLINKQUEUE_Dequeue(LinkQueue);
++ }
++
++ gcmkASSERT(LinkQueue->count < gcdLINK_QUEUE_SIZE);
++
++ LinkQueue->count++;
++
++ LinkQueue->data[LinkQueue->rear].start = start;
++ LinkQueue->data[LinkQueue->rear].end = end;
++
++ gcmkVERIFY_OK(
++ gckOS_GetProcessID(&LinkQueue->data[LinkQueue->rear].pid));
++
++ LinkQueue->rear = (LinkQueue->rear + 1) % gcdLINK_QUEUE_SIZE;
++}
++
++void
++gckLINKQUEUE_GetData(
++ IN gckLINKQUEUE LinkQueue,
++ IN gctUINT32 Index,
++ OUT gckLINKDATA * Data
++ )
++{
++ gcmkASSERT(Index >= 0 && Index < gcdLINK_QUEUE_SIZE);
++
++ *Data = &LinkQueue->data[(Index + LinkQueue->front) % gcdLINK_QUEUE_SIZE];
++}
++#endif
++
++/******************************************************************************\
++*************************** Pointer - ID translation ***************************
++\******************************************************************************/
++#define gcdID_TABLE_LENGTH 1024
++typedef struct _gcsINTEGERDB * gckINTEGERDB;
++typedef struct _gcsINTEGERDB
++{
++ gckOS os;
++ gctPOINTER* table;
++ gctPOINTER mutex;
++ gctUINT32 tableLen;
++ gctUINT32 currentID;
++ gctUINT32 unused;
++}
++gcsINTEGERDB;
++
++gceSTATUS
++gckKERNEL_CreateIntegerDatabase(
++ IN gckKERNEL Kernel,
++ OUT gctPOINTER * Database
++ )
++{
++ gceSTATUS status;
++ gckINTEGERDB database = gcvNULL;
++
++ gcmkHEADER_ARG("Kernel=0x%08X Datbase=0x%08X", Kernel, Database);
++
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(Database != gcvNULL);
++
++ /* Allocate a database. */
++ gcmkONERROR(gckOS_Allocate(
++ Kernel->os, gcmSIZEOF(gcsINTEGERDB), (gctPOINTER *)&database));
++
++ gckOS_ZeroMemory(database, gcmSIZEOF(gcsINTEGERDB));
++
++ /* Allocate a pointer table. */
++ gcmkONERROR(gckOS_Allocate(
++ Kernel->os, gcmSIZEOF(gctPOINTER) * gcdID_TABLE_LENGTH, (gctPOINTER *)&database->table));
++
++ gckOS_ZeroMemory(database->table, gcmSIZEOF(gctPOINTER) * gcdID_TABLE_LENGTH);
++
++ /* Allocate a database mutex. */
++ gcmkONERROR(gckOS_CreateMutex(Kernel->os, &database->mutex));
++
++ /* Initialize. */
++ database->currentID = 0;
++ database->unused = gcdID_TABLE_LENGTH;
++ database->os = Kernel->os;
++ database->tableLen = gcdID_TABLE_LENGTH;
++
++ *Database = database;
++
++ gcmkFOOTER_ARG("*Database=0x%08X", *Database);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Rollback. */
++ if (database)
++ {
++ if (database->table)
++ {
++ gcmkOS_SAFE_FREE(Kernel->os, database->table);
++ }
++
++ gcmkOS_SAFE_FREE(Kernel->os, database);
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckKERNEL_DestroyIntegerDatabase(
++ IN gckKERNEL Kernel,
++ IN gctPOINTER Database
++ )
++{
++ gckINTEGERDB database = Database;
++
++ gcmkHEADER_ARG("Kernel=0x%08X Datbase=0x%08X", Kernel, Database);
++
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(Database != gcvNULL);
++
++ /* Destroy pointer table. */
++ gcmkOS_SAFE_FREE(Kernel->os, database->table);
++
++ /* Destroy database mutex. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Kernel->os, database->mutex));
++
++ /* Destroy database. */
++ gcmkOS_SAFE_FREE(Kernel->os, database);
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckKERNEL_AllocateIntegerId(
++ IN gctPOINTER Database,
++ IN gctPOINTER Pointer,
++ OUT gctUINT32 * Id
++ )
++{
++ gceSTATUS status;
++ gckINTEGERDB database = Database;
++ gctUINT32 i, unused, currentID, tableLen;
++ gctPOINTER * table;
++ gckOS os = database->os;
++ gctBOOL acquired = gcvFALSE;
++
++ gcmkHEADER_ARG("Database=0x%08X Pointer=0x%08X", Database, Pointer);
++
++ gcmkVERIFY_ARGUMENT(Id != gcvNULL);
++
++ gcmkVERIFY_OK(gckOS_AcquireMutex(os, database->mutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++ if (database->unused < 1)
++ {
++ /* Extend table. */
++ gcmkONERROR(
++ gckOS_Allocate(os,
++ gcmSIZEOF(gctPOINTER) * (database->tableLen + gcdID_TABLE_LENGTH),
++ (gctPOINTER *)&table));
++
++ gckOS_ZeroMemory(table + database->tableLen,
++ gcmSIZEOF(gctPOINTER) * gcdID_TABLE_LENGTH);
++
++ /* Copy data from old table. */
++ gckOS_MemCopy(table,
++ database->table,
++ database->tableLen * gcmSIZEOF(gctPOINTER));
++
++ gcmkOS_SAFE_FREE(os, database->table);
++
++ /* Update databse with new allocated table. */
++ database->table = table;
++ database->currentID = database->tableLen;
++ database->tableLen += gcdID_TABLE_LENGTH;
++ database->unused += gcdID_TABLE_LENGTH;
++ }
++
++ table = database->table;
++ currentID = database->currentID;
++ tableLen = database->tableLen;
++ unused = database->unused;
++
++ /* Connect id with pointer. */
++ table[currentID] = Pointer;
++
++ *Id = currentID + 1;
++
++ /* Update the currentID. */
++ if (--unused > 0)
++ {
++ for (i = 0; i < tableLen; i++)
++ {
++ if (++currentID >= tableLen)
++ {
++ /* Wrap to the begin. */
++ currentID = 0;
++ }
++
++ if (table[currentID] == gcvNULL)
++ {
++ break;
++ }
++ }
++ }
++
++ database->table = table;
++ database->currentID = currentID;
++ database->tableLen = tableLen;
++ database->unused = unused;
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(os, database->mutex));
++ acquired = gcvFALSE;
++
++ gcmkFOOTER_ARG("*Id=%d", *Id);
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(os, database->mutex));
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckKERNEL_FreeIntegerId(
++ IN gctPOINTER Database,
++ IN gctUINT32 Id
++ )
++{
++ gceSTATUS status;
++ gckINTEGERDB database = Database;
++ gckOS os = database->os;
++ gctBOOL acquired = gcvFALSE;
++
++ gcmkHEADER_ARG("Database=0x%08X Id=%d", Database, Id);
++
++ gcmkVERIFY_OK(gckOS_AcquireMutex(os, database->mutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++ if (!(Id > 0 && Id <= database->tableLen))
++ {
++ gcmkONERROR(gcvSTATUS_NOT_FOUND);
++ }
++
++ Id -= 1;
++
++ database->table[Id] = gcvNULL;
++
++ if (database->unused++ == 0)
++ {
++ database->currentID = Id;
++ }
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(os, database->mutex));
++ acquired = gcvFALSE;
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(os, database->mutex));
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckKERNEL_QueryIntegerId(
++ IN gctPOINTER Database,
++ IN gctUINT32 Id,
++ OUT gctPOINTER * Pointer
++ )
++{
++ gceSTATUS status;
++ gckINTEGERDB database = Database;
++ gctPOINTER pointer;
++ gckOS os = database->os;
++ gctBOOL acquired = gcvFALSE;
++
++ gcmkHEADER_ARG("Database=0x%08X Id=%d", Database, Id);
++ gcmkVERIFY_ARGUMENT(Pointer != gcvNULL);
++
++ gcmkVERIFY_OK(gckOS_AcquireMutex(os, database->mutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++ if (!(Id > 0 && Id <= database->tableLen))
++ {
++ gcmkONERROR(gcvSTATUS_NOT_FOUND);
++ }
++
++ Id -= 1;
++
++ pointer = database->table[Id];
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(os, database->mutex));
++ acquired = gcvFALSE;
++
++ if (pointer)
++ {
++ *Pointer = pointer;
++ }
++ else
++ {
++ gcmkONERROR(gcvSTATUS_NOT_FOUND);
++ }
++
++ gcmkFOOTER_ARG("*Pointer=0x%08X", *Pointer);
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(os, database->mutex));
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++
++
++gctUINT32
++gckKERNEL_AllocateNameFromPointer(
++ IN gckKERNEL Kernel,
++ IN gctPOINTER Pointer
++ )
++{
++ gceSTATUS status;
++ gctUINT32 name;
++ gctPOINTER database = Kernel->db->pointerDatabase;
++
++ gcmkHEADER_ARG("Kernel=0x%X Pointer=0x%X", Kernel, Pointer);
++
++ gcmkONERROR(
++ gckKERNEL_AllocateIntegerId(database, Pointer, &name));
++
++ gcmkFOOTER_ARG("name=%d", name);
++ return name;
++
++OnError:
++ gcmkFOOTER();
++ return 0;
++}
++
++gctPOINTER
++gckKERNEL_QueryPointerFromName(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 Name
++ )
++{
++ gceSTATUS status;
++ gctPOINTER pointer = gcvNULL;
++ gctPOINTER database = Kernel->db->pointerDatabase;
++
++ gcmkHEADER_ARG("Kernel=0x%X Name=%d", Kernel, Name);
++
++ /* Lookup in database to get pointer. */
++ gcmkONERROR(gckKERNEL_QueryIntegerId(database, Name, &pointer));
++
++ gcmkFOOTER_ARG("pointer=0x%X", pointer);
++ return pointer;
++
++OnError:
++ gcmkFOOTER();
++ return gcvNULL;
++}
++
++gceSTATUS
++gckKERNEL_DeleteName(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 Name
++ )
++{
++ gctPOINTER database = Kernel->db->pointerDatabase;
++
++ gcmkHEADER_ARG("Kernel=0x%X Name=0x%X", Kernel, Name);
++
++ /* Free name if exists. */
++ gcmkVERIFY_OK(gckKERNEL_FreeIntegerId(database, Name));
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++/*******************************************************************************
++***** Test Code ****************************************************************
++*******************************************************************************/
++
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel_command.c linux-openelec/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel_command.c
+--- linux-3.14.36/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel_command.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel_command.c 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,3042 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal_kernel_precomp.h"
++#include "gc_hal_kernel_context.h"
++
++#ifdef __QNXNTO__
++#include <sys/slog.h>
++#endif
++
++#define _GC_OBJ_ZONE gcvZONE_COMMAND
++
++/******************************************************************************\
++********************************* Support Code *********************************
++\******************************************************************************/
++
++/*******************************************************************************
++**
++** _NewQueue
++**
++** Allocate a new command queue.
++**
++** INPUT:
++**
++** gckCOMMAND Command
++** Pointer to an gckCOMMAND object.
++**
++** OUTPUT:
++**
++** gckCOMMAND Command
++** gckCOMMAND object has been updated with a new command queue.
++*/
++static gceSTATUS
++_NewQueue(
++ IN OUT gckCOMMAND Command
++ )
++{
++ gceSTATUS status;
++ gctINT currentIndex, newIndex;
++
++ gcmkHEADER_ARG("Command=0x%x", Command);
++
++ /* Switch to the next command buffer. */
++ currentIndex = Command->index;
++ newIndex = (currentIndex + 1) % gcdCOMMAND_QUEUES;
++
++ /* Wait for availability. */
++#if gcdDUMP_COMMAND
++ gcmkPRINT("@[kernel.waitsignal]");
++#endif
++
++ gcmkONERROR(gckOS_WaitSignal(
++ Command->os,
++ Command->queues[newIndex].signal,
++ gcvINFINITE
++ ));
++
++#if gcmIS_DEBUG(gcdDEBUG_TRACE)
++ if (newIndex < currentIndex)
++ {
++ Command->wrapCount += 1;
++
++ gcmkTRACE_ZONE_N(
++ gcvLEVEL_INFO, gcvZONE_COMMAND,
++ 2 * 4,
++ "%s(%d): queue array wrapped around.\n",
++ __FUNCTION__, __LINE__
++ );
++ }
++
++ gcmkTRACE_ZONE_N(
++ gcvLEVEL_INFO, gcvZONE_COMMAND,
++ 3 * 4,
++ "%s(%d): total queue wrap arounds %d.\n",
++ __FUNCTION__, __LINE__, Command->wrapCount
++ );
++
++ gcmkTRACE_ZONE_N(
++ gcvLEVEL_INFO, gcvZONE_COMMAND,
++ 3 * 4,
++ "%s(%d): switched to queue %d.\n",
++ __FUNCTION__, __LINE__, newIndex
++ );
++#endif
++
++ /* Update gckCOMMAND object with new command queue. */
++ Command->index = newIndex;
++ Command->newQueue = gcvTRUE;
++ Command->logical = Command->queues[newIndex].logical;
++ Command->offset = 0;
++
++ gcmkONERROR(
++ gckOS_GetPhysicalAddress(
++ Command->os,
++ Command->logical,
++ (gctUINT32 *) &Command->physical
++ ));
++
++ if (currentIndex != -1)
++ {
++ /* Mark the command queue as available. */
++ gcmkONERROR(gckEVENT_Signal(
++ Command->kernel->eventObj,
++ Command->queues[currentIndex].signal,
++ gcvKERNEL_COMMAND
++ ));
++ }
++
++ /* Success. */
++ gcmkFOOTER_ARG("Command->index=%d", Command->index);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++static gceSTATUS
++_IncrementCommitAtom(
++ IN gckCOMMAND Command,
++ IN gctBOOL Increment
++ )
++{
++ gceSTATUS status;
++ gckHARDWARE hardware;
++ gctINT32 atomValue;
++ gctBOOL powerAcquired = gcvFALSE;
++
++ gcmkHEADER_ARG("Command=0x%x", Command);
++
++ /* Extract the gckHARDWARE and gckEVENT objects. */
++ hardware = Command->kernel->hardware;
++ gcmkVERIFY_OBJECT(hardware, gcvOBJ_HARDWARE);
++
++ /* Grab the power mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(
++ Command->os, hardware->powerMutex, gcvINFINITE
++ ));
++ powerAcquired = gcvTRUE;
++
++ /* Increment the commit atom. */
++ if (Increment)
++ {
++ gcmkONERROR(gckOS_AtomIncrement(
++ Command->os, Command->atomCommit, &atomValue
++ ));
++ }
++ else
++ {
++ gcmkONERROR(gckOS_AtomDecrement(
++ Command->os, Command->atomCommit, &atomValue
++ ));
++ }
++
++ /* Release the power mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(
++ Command->os, hardware->powerMutex
++ ));
++ powerAcquired = gcvFALSE;
++
++ /* Success. */
++ gcmkFOOTER();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (powerAcquired)
++ {
++ /* Release the power mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(
++ Command->os, hardware->powerMutex
++ ));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++#if gcdSECURE_USER
++static gceSTATUS
++_ProcessHints(
++ IN gckCOMMAND Command,
++ IN gctUINT32 ProcessID,
++ IN gcoCMDBUF CommandBuffer
++ )
++{
++ gceSTATUS status = gcvSTATUS_OK;
++ gckKERNEL kernel;
++ gctBOOL needCopy = gcvFALSE;
++ gcskSECURE_CACHE_PTR cache;
++ gctUINT8_PTR commandBufferLogical;
++ gctUINT8_PTR hintedData;
++ gctUINT32_PTR hintArray;
++ gctUINT i, hintCount;
++
++ gcmkHEADER_ARG(
++ "Command=0x%08X ProcessID=%d CommandBuffer=0x%08X",
++ Command, ProcessID, CommandBuffer
++ );
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++
++ /* Reset state array pointer. */
++ hintArray = gcvNULL;
++
++ /* Get the kernel object. */
++ kernel = Command->kernel;
++
++ /* Get the cache form the database. */
++ gcmkONERROR(gckKERNEL_GetProcessDBCache(kernel, ProcessID, &cache));
++
++ /* Determine the start of the command buffer. */
++ commandBufferLogical
++ = (gctUINT8_PTR) CommandBuffer->logical
++ + CommandBuffer->startOffset;
++
++ /* Determine the number of records in the state array. */
++ hintCount = CommandBuffer->hintArrayTail - CommandBuffer->hintArray;
++
++ /* Check wehther we need to copy the structures or not. */
++ gcmkONERROR(gckOS_QueryNeedCopy(Command->os, ProcessID, &needCopy));
++
++ /* Get access to the state array. */
++ if (needCopy)
++ {
++ gctUINT copySize;
++
++ if (Command->hintArrayAllocated &&
++ (Command->hintArraySize < CommandBuffer->hintArraySize))
++ {
++ gcmkONERROR(gcmkOS_SAFE_FREE(Command->os, gcmUINT64_TO_PTR(Command->hintArray)));
++ Command->hintArraySize = gcvFALSE;
++ }
++
++ if (!Command->hintArrayAllocated)
++ {
++ gctPOINTER pointer = gcvNULL;
++
++ gcmkONERROR(gckOS_Allocate(
++ Command->os,
++ CommandBuffer->hintArraySize,
++ &pointer
++ ));
++
++ Command->hintArray = gcmPTR_TO_UINT64(pointer);
++ Command->hintArrayAllocated = gcvTRUE;
++ Command->hintArraySize = CommandBuffer->hintArraySize;
++ }
++
++ hintArray = gcmUINT64_TO_PTR(Command->hintArray);
++ copySize = hintCount * gcmSIZEOF(gctUINT32);
++
++ gcmkONERROR(gckOS_CopyFromUserData(
++ Command->os,
++ hintArray,
++ gcmUINT64_TO_PTR(CommandBuffer->hintArray),
++ copySize
++ ));
++ }
++ else
++ {
++ gctPOINTER pointer = gcvNULL;
++
++ gcmkONERROR(gckOS_MapUserPointer(
++ Command->os,
++ gcmUINT64_TO_PTR(CommandBuffer->hintArray),
++ CommandBuffer->hintArraySize,
++ &pointer
++ ));
++
++ hintArray = pointer;
++ }
++
++ /* Scan through the buffer. */
++ for (i = 0; i < hintCount; i += 1)
++ {
++ /* Determine the location of the hinted data. */
++ hintedData = commandBufferLogical + hintArray[i];
++
++ /* Map handle into physical address. */
++ gcmkONERROR(gckKERNEL_MapLogicalToPhysical(
++ kernel, cache, (gctPOINTER) hintedData
++ ));
++ }
++
++OnError:
++ /* Get access to the state array. */
++ if (!needCopy && (hintArray != gcvNULL))
++ {
++ gcmkVERIFY_OK(gckOS_UnmapUserPointer(
++ Command->os,
++ gcmUINT64_TO_PTR(CommandBuffer->hintArray),
++ CommandBuffer->hintArraySize,
++ hintArray
++ ));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++#endif
++
++static gceSTATUS
++_FlushMMU(
++ IN gckCOMMAND Command
++ )
++{
++ gceSTATUS status;
++ gctUINT32 oldValue;
++ gckHARDWARE hardware = Command->kernel->hardware;
++
++ gcmkONERROR(gckOS_AtomicExchange(Command->os,
++ hardware->pageTableDirty,
++ 0,
++ &oldValue));
++
++ if (oldValue)
++ {
++ /* Page Table is upated, flush mmu before commit. */
++ gcmkONERROR(gckHARDWARE_FlushMMU(hardware));
++ }
++
++ return gcvSTATUS_OK;
++OnError:
++ return status;
++}
++
++#if gcdVIRTUAL_COMMAND_BUFFER
++static void
++_DumpBuffer(
++ IN gctPOINTER Buffer,
++ IN gctUINT32 GpuAddress,
++ IN gctSIZE_T Size
++ )
++{
++ gctINT i, line, left;
++ gctUINT32_PTR data = Buffer;
++
++ line = Size / 32;
++ left = Size % 32;
++
++
++ for (i = 0; i < line; i++)
++ {
++ gcmkPRINT("%X : %08X %08X %08X %08X %08X %08X %08X %08X ",
++ GpuAddress, data[0], data[1], data[2], data[3], data[4], data[5], data[6], data[7]);
++ data += 8;
++ GpuAddress += 8 * 4;
++ }
++
++ switch(left)
++ {
++ case 28:
++ gcmkPRINT("%X : %08X %08X %08X %08X %08X %08X %08X ",
++ GpuAddress, data[0], data[1], data[2], data[3], data[4], data[5], data[6]);
++ break;
++ case 24:
++ gcmkPRINT("%X : %08X %08X %08X %08X %08X %08X ",
++ GpuAddress, data[0], data[1], data[2], data[3], data[4], data[5]);
++ break;
++ case 20:
++ gcmkPRINT("%X : %08X %08X %08X %08X %08X ",
++ GpuAddress, data[0], data[1], data[2], data[3], data[4]);
++ break;
++ case 16:
++ gcmkPRINT("%X : %08X %08X %08X %08X ",
++ GpuAddress, data[0], data[1], data[2], data[3]);
++ break;
++ case 12:
++ gcmkPRINT("%X : %08X %08X %08X ",
++ GpuAddress, data[0], data[1], data[2]);
++ break;
++ case 8:
++ gcmkPRINT("%X : %08X %08X ",
++ GpuAddress, data[0], data[1]);
++ break;
++ case 4:
++ gcmkPRINT("%X : %08X ",
++ GpuAddress, data[0]);
++ break;
++ default:
++ break;
++ }
++}
++
++static void
++_DumpKernelCommandBuffer(
++ IN gckCOMMAND Command
++)
++{
++ gctINT i;
++ gctUINT32 physical;
++ gctPOINTER entry;
++
++ for (i = 0; i < gcdCOMMAND_QUEUES; i++)
++ {
++ entry = Command->queues[i].logical;
++
++ gckOS_GetPhysicalAddress(Command->os, entry, &physical);
++
++ gcmkPRINT("Kernel command buffer %d\n", i);
++
++ _DumpBuffer(entry, physical, Command->pageSize);
++ }
++}
++#endif
++
++/******************************************************************************\
++****************************** gckCOMMAND API Code ******************************
++\******************************************************************************/
++
++/*******************************************************************************
++**
++** gckCOMMAND_Construct
++**
++** Construct a new gckCOMMAND object.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** OUTPUT:
++**
++** gckCOMMAND * Command
++** Pointer to a variable that will hold the pointer to the gckCOMMAND
++** object.
++*/
++gceSTATUS
++gckCOMMAND_Construct(
++ IN gckKERNEL Kernel,
++ OUT gckCOMMAND * Command
++ )
++{
++ gckOS os;
++ gckCOMMAND command = gcvNULL;
++ gceSTATUS status;
++ gctINT i;
++ gctPOINTER pointer = gcvNULL;
++
++ gcmkHEADER_ARG("Kernel=0x%x", Kernel);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(Command != gcvNULL);
++
++ /* Extract the gckOS object. */
++ os = Kernel->os;
++
++ /* Allocate the gckCOMMAND structure. */
++ gcmkONERROR(gckOS_Allocate(os, gcmSIZEOF(struct _gckCOMMAND), &pointer));
++ command = pointer;
++
++ /* Reset the entire object. */
++ gcmkONERROR(gckOS_ZeroMemory(command, gcmSIZEOF(struct _gckCOMMAND)));
++
++ /* Initialize the gckCOMMAND object.*/
++ command->object.type = gcvOBJ_COMMAND;
++ command->kernel = Kernel;
++ command->os = os;
++
++ /* Get the command buffer requirements. */
++ gcmkONERROR(gckHARDWARE_QueryCommandBuffer(
++ Kernel->hardware,
++ &command->alignment,
++ &command->reservedHead,
++ &command->reservedTail
++ ));
++
++ /* Create the command queue mutex. */
++ gcmkONERROR(gckOS_CreateMutex(os, &command->mutexQueue));
++
++ /* Create the context switching mutex. */
++ gcmkONERROR(gckOS_CreateMutex(os, &command->mutexContext));
++
++#if VIVANTE_PROFILER_CONTEXT
++ /* Create the context switching mutex. */
++ gcmkONERROR(gckOS_CreateMutex(os, &command->mutexContextSeq));
++#endif
++
++ /* Create the power management semaphore. */
++ gcmkONERROR(gckOS_CreateSemaphore(os, &command->powerSemaphore));
++
++ /* Create the commit atom. */
++ gcmkONERROR(gckOS_AtomConstruct(os, &command->atomCommit));
++
++ /* Get the page size from teh OS. */
++ gcmkONERROR(gckOS_GetPageSize(os, &command->pageSize));
++
++ /* Get process ID. */
++ gcmkONERROR(gckOS_GetProcessID(&command->kernelProcessID));
++
++ /* Set hardware to pipe 0. */
++ command->pipeSelect = gcvPIPE_INVALID;
++
++ /* Pre-allocate the command queues. */
++ for (i = 0; i < gcdCOMMAND_QUEUES; ++i)
++ {
++ gcmkONERROR(gckOS_AllocateNonPagedMemory(
++ os,
++ gcvFALSE,
++ &command->pageSize,
++ &command->queues[i].physical,
++ &command->queues[i].logical
++ ));
++
++ gcmkONERROR(gckOS_CreateSignal(
++ os, gcvFALSE, &command->queues[i].signal
++ ));
++
++ gcmkONERROR(gckOS_Signal(
++ os, command->queues[i].signal, gcvTRUE
++ ));
++ }
++
++ /* No command queue in use yet. */
++ command->index = -1;
++ command->logical = gcvNULL;
++ command->newQueue = gcvFALSE;
++
++ /* Command is not yet running. */
++ command->running = gcvFALSE;
++
++ /* Command queue is idle. */
++ command->idle = gcvTRUE;
++
++ /* Commit stamp is zero. */
++ command->commitStamp = 0;
++
++ /* END event signal not created. */
++ command->endEventSignal = gcvNULL;
++
++ /* Return pointer to the gckCOMMAND object. */
++ *Command = command;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Command=0x%x", *Command);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Roll back. */
++ if (command != gcvNULL)
++ {
++ if (command->atomCommit != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_AtomDestroy(os, command->atomCommit));
++ }
++
++ if (command->powerSemaphore != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_DestroySemaphore(os, command->powerSemaphore));
++ }
++
++ if (command->mutexContext != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_DeleteMutex(os, command->mutexContext));
++ }
++
++#if VIVANTE_PROFILER_CONTEXT
++ if (command->mutexContextSeq != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_DeleteMutex(os, command->mutexContextSeq));
++ }
++#endif
++
++ if (command->mutexQueue != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_DeleteMutex(os, command->mutexQueue));
++ }
++
++ for (i = 0; i < gcdCOMMAND_QUEUES; ++i)
++ {
++ if (command->queues[i].signal != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_DestroySignal(
++ os, command->queues[i].signal
++ ));
++ }
++
++ if (command->queues[i].logical != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_FreeNonPagedMemory(
++ os,
++ command->pageSize,
++ command->queues[i].physical,
++ command->queues[i].logical
++ ));
++ }
++ }
++
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(os, command));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckCOMMAND_Destroy
++**
++** Destroy an gckCOMMAND object.
++**
++** INPUT:
++**
++** gckCOMMAND Command
++** Pointer to an gckCOMMAND object to destroy.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckCOMMAND_Destroy(
++ IN gckCOMMAND Command
++ )
++{
++ gctINT i;
++
++ gcmkHEADER_ARG("Command=0x%x", Command);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++
++ /* Stop the command queue. */
++ gcmkVERIFY_OK(gckCOMMAND_Stop(Command, gcvFALSE));
++
++ for (i = 0; i < gcdCOMMAND_QUEUES; ++i)
++ {
++ gcmkASSERT(Command->queues[i].signal != gcvNULL);
++ gcmkVERIFY_OK(gckOS_DestroySignal(
++ Command->os, Command->queues[i].signal
++ ));
++
++ gcmkASSERT(Command->queues[i].logical != gcvNULL);
++ gcmkVERIFY_OK(gckOS_FreeNonPagedMemory(
++ Command->os,
++ Command->pageSize,
++ Command->queues[i].physical,
++ Command->queues[i].logical
++ ));
++ }
++
++ /* END event signal. */
++ if (Command->endEventSignal != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_DestroySignal(
++ Command->os, Command->endEventSignal
++ ));
++ }
++
++ /* Delete the context switching mutex. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Command->os, Command->mutexContext));
++
++#if VIVANTE_PROFILER_CONTEXT
++ if (Command->mutexContextSeq != gcvNULL)
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Command->os, Command->mutexContextSeq));
++#endif
++
++ /* Delete the command queue mutex. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Command->os, Command->mutexQueue));
++
++ /* Destroy the power management semaphore. */
++ gcmkVERIFY_OK(gckOS_DestroySemaphore(Command->os, Command->powerSemaphore));
++
++ /* Destroy the commit atom. */
++ gcmkVERIFY_OK(gckOS_AtomDestroy(Command->os, Command->atomCommit));
++
++#if gcdSECURE_USER
++ /* Free state array. */
++ if (Command->hintArrayAllocated)
++ {
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Command->os, gcmUINT64_TO_PTR(Command->hintArray)));
++ Command->hintArrayAllocated = gcvFALSE;
++ }
++#endif
++
++ /* Mark object as unknown. */
++ Command->object.type = gcvOBJ_UNKNOWN;
++
++ /* Free the gckCOMMAND object. */
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Command->os, Command));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckCOMMAND_EnterCommit
++**
++** Acquire command queue synchronization objects.
++**
++** INPUT:
++**
++** gckCOMMAND Command
++** Pointer to an gckCOMMAND object to destroy.
++**
++** gctBOOL FromPower
++** Determines whether the call originates from inside the power
++** management or not.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckCOMMAND_EnterCommit(
++ IN gckCOMMAND Command,
++ IN gctBOOL FromPower
++ )
++{
++ gceSTATUS status;
++ gckHARDWARE hardware;
++ gctBOOL atomIncremented = gcvFALSE;
++ gctBOOL semaAcquired = gcvFALSE;
++
++ gcmkHEADER_ARG("Command=0x%x", Command);
++
++ /* Extract the gckHARDWARE and gckEVENT objects. */
++ hardware = Command->kernel->hardware;
++ gcmkVERIFY_OBJECT(hardware, gcvOBJ_HARDWARE);
++
++ if (!FromPower)
++ {
++ /* Increment COMMIT atom to let power management know that a commit is
++ ** in progress. */
++ gcmkONERROR(_IncrementCommitAtom(Command, gcvTRUE));
++ atomIncremented = gcvTRUE;
++
++ /* Notify the system the GPU has a commit. */
++ gcmkONERROR(gckOS_Broadcast(Command->os,
++ hardware,
++ gcvBROADCAST_GPU_COMMIT));
++
++ /* Acquire the power management semaphore. */
++ gcmkONERROR(gckOS_AcquireSemaphore(Command->os,
++ Command->powerSemaphore));
++ semaAcquired = gcvTRUE;
++ }
++
++ /* Grab the conmmand queue mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(Command->os,
++ Command->mutexQueue,
++ gcvINFINITE));
++
++ /* Success. */
++ gcmkFOOTER();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (semaAcquired)
++ {
++ /* Release the power management semaphore. */
++ gcmkVERIFY_OK(gckOS_ReleaseSemaphore(
++ Command->os, Command->powerSemaphore
++ ));
++ }
++
++ if (atomIncremented)
++ {
++ /* Decrement the commit atom. */
++ gcmkVERIFY_OK(_IncrementCommitAtom(
++ Command, gcvFALSE
++ ));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckCOMMAND_ExitCommit
++**
++** Release command queue synchronization objects.
++**
++** INPUT:
++**
++** gckCOMMAND Command
++** Pointer to an gckCOMMAND object to destroy.
++**
++** gctBOOL FromPower
++** Determines whether the call originates from inside the power
++** management or not.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckCOMMAND_ExitCommit(
++ IN gckCOMMAND Command,
++ IN gctBOOL FromPower
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Command=0x%x", Command);
++
++ /* Release the power mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(Command->os, Command->mutexQueue));
++
++ if (!FromPower)
++ {
++ /* Release the power management semaphore. */
++ gcmkONERROR(gckOS_ReleaseSemaphore(Command->os,
++ Command->powerSemaphore));
++
++ /* Decrement the commit atom. */
++ gcmkONERROR(_IncrementCommitAtom(Command, gcvFALSE));
++ }
++
++ /* Success. */
++ gcmkFOOTER();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckCOMMAND_Start
++**
++** Start up the command queue.
++**
++** INPUT:
++**
++** gckCOMMAND Command
++** Pointer to an gckCOMMAND object to start.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckCOMMAND_Start(
++ IN gckCOMMAND Command
++ )
++{
++ gceSTATUS status;
++ gckHARDWARE hardware;
++ gctUINT32 waitOffset;
++ gctSIZE_T waitLinkBytes;
++
++ gcmkHEADER_ARG("Command=0x%x", Command);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++
++ if (Command->running)
++ {
++ /* Command queue already running. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++
++ /* Extract the gckHARDWARE object. */
++ hardware = Command->kernel->hardware;
++ gcmkVERIFY_OBJECT(hardware, gcvOBJ_HARDWARE);
++
++ if (Command->logical == gcvNULL)
++ {
++ /* Start at beginning of a new queue. */
++ gcmkONERROR(_NewQueue(Command));
++ }
++
++ /* Start at beginning of page. */
++ Command->offset = 0;
++
++ /* Set abvailable number of bytes for WAIT/LINK command sequence. */
++ waitLinkBytes = Command->pageSize;
++
++ /* Append WAIT/LINK. */
++ gcmkONERROR(gckHARDWARE_WaitLink(
++ hardware,
++ Command->logical,
++ 0,
++ &waitLinkBytes,
++ &waitOffset,
++ &Command->waitSize
++ ));
++
++ Command->waitLogical = (gctUINT8_PTR) Command->logical + waitOffset;
++ Command->waitPhysical = (gctUINT8_PTR) Command->physical + waitOffset;
++
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ /* Flush the cache for the wait/link. */
++ gcmkONERROR(gckOS_CacheClean(
++ Command->os,
++ Command->kernelProcessID,
++ gcvNULL,
++ Command->physical,
++ Command->logical,
++ waitLinkBytes
++ ));
++#endif
++
++ /* Adjust offset. */
++ Command->offset = waitLinkBytes;
++ Command->newQueue = gcvFALSE;
++
++ /* Enable command processor. */
++#ifdef __QNXNTO__
++ gcmkONERROR(gckHARDWARE_Execute(
++ hardware,
++ Command->logical,
++ Command->physical,
++ gcvTRUE,
++ waitLinkBytes
++ ));
++#else
++ gcmkONERROR(gckHARDWARE_Execute(
++ hardware,
++ Command->logical,
++ waitLinkBytes
++ ));
++#endif
++
++ /* Command queue is running. */
++ Command->running = gcvTRUE;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckCOMMAND_Stop
++**
++** Stop the command queue.
++**
++** INPUT:
++**
++** gckCOMMAND Command
++** Pointer to an gckCOMMAND object to stop.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckCOMMAND_Stop(
++ IN gckCOMMAND Command,
++ IN gctBOOL FromRecovery
++ )
++{
++ gckHARDWARE hardware;
++ gceSTATUS status;
++ gctUINT32 idle;
++
++ gcmkHEADER_ARG("Command=0x%x", Command);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++
++ if (!Command->running)
++ {
++ /* Command queue is not running. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++
++ /* Extract the gckHARDWARE object. */
++ hardware = Command->kernel->hardware;
++ gcmkVERIFY_OBJECT(hardware, gcvOBJ_HARDWARE);
++
++ if (gckHARDWARE_IsFeatureAvailable(hardware,
++ gcvFEATURE_END_EVENT) == gcvSTATUS_TRUE)
++ {
++ /* Allocate the signal. */
++ if (Command->endEventSignal == gcvNULL)
++ {
++ gcmkONERROR(gckOS_CreateSignal(Command->os,
++ gcvTRUE,
++ &Command->endEventSignal));
++ }
++
++ /* Append the END EVENT command to trigger the signal. */
++ gcmkONERROR(gckEVENT_Stop(Command->kernel->eventObj,
++ Command->kernelProcessID,
++ Command->waitPhysical,
++ Command->waitLogical,
++ Command->endEventSignal,
++ &Command->waitSize));
++ }
++ else
++ {
++ /* Replace last WAIT with END. */
++ gcmkONERROR(gckHARDWARE_End(
++ hardware, Command->waitLogical, &Command->waitSize
++ ));
++
++ /* Update queue tail pointer. */
++ gcmkONERROR(gckHARDWARE_UpdateQueueTail(Command->kernel->hardware,
++ Command->logical,
++ Command->offset));
++
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ /* Flush the cache for the END. */
++ gcmkONERROR(gckOS_CacheClean(
++ Command->os,
++ Command->kernelProcessID,
++ gcvNULL,
++ Command->waitPhysical,
++ Command->waitLogical,
++ Command->waitSize
++ ));
++#endif
++
++ /* Wait for idle. */
++ gcmkONERROR(gckHARDWARE_GetIdle(hardware, !FromRecovery, &idle));
++ }
++
++ /* Command queue is no longer running. */
++ Command->running = gcvFALSE;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckCOMMAND_Commit
++**
++** Commit a command buffer to the command queue.
++**
++** INPUT:
++**
++** gckCOMMAND Command
++** Pointer to a gckCOMMAND object.
++**
++** gckCONTEXT Context
++** Pointer to a gckCONTEXT object.
++**
++** gcoCMDBUF CommandBuffer
++** Pointer to a gcoCMDBUF object.
++**
++** gcsSTATE_DELTA_PTR StateDelta
++** Pointer to the state delta.
++**
++** gctUINT32 ProcessID
++** Current process ID.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckCOMMAND_Commit(
++ IN gckCOMMAND Command,
++ IN gckCONTEXT Context,
++ IN gcoCMDBUF CommandBuffer,
++ IN gcsSTATE_DELTA_PTR StateDelta,
++ IN gcsQUEUE_PTR EventQueue,
++ IN gctUINT32 ProcessID
++ )
++{
++ gceSTATUS status;
++ gctBOOL commitEntered = gcvFALSE;
++ gctBOOL contextAcquired = gcvFALSE;
++ gckHARDWARE hardware;
++ gctBOOL needCopy = gcvFALSE;
++ gcsQUEUE_PTR eventRecord = gcvNULL;
++ gcsQUEUE _eventRecord;
++ gcsQUEUE_PTR nextEventRecord;
++ gctBOOL commandBufferMapped = gcvFALSE;
++ gcoCMDBUF commandBufferObject = gcvNULL;
++
++#if !gcdNULL_DRIVER
++ gcsCONTEXT_PTR contextBuffer;
++ struct _gcoCMDBUF _commandBufferObject;
++ gctPHYS_ADDR commandBufferPhysical;
++ gctUINT8_PTR commandBufferLogical;
++ gctUINT8_PTR commandBufferLink;
++ gctUINT commandBufferSize;
++ gctSIZE_T nopBytes;
++ gctSIZE_T pipeBytes;
++ gctSIZE_T linkBytes;
++ gctSIZE_T bytes;
++ gctUINT32 offset;
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ gctPHYS_ADDR entryPhysical;
++#endif
++ gctPOINTER entryLogical;
++ gctSIZE_T entryBytes;
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ gctPHYS_ADDR exitPhysical;
++#endif
++ gctPOINTER exitLogical;
++ gctSIZE_T exitBytes;
++ gctPHYS_ADDR waitLinkPhysical;
++ gctPOINTER waitLinkLogical;
++ gctSIZE_T waitLinkBytes;
++ gctPHYS_ADDR waitPhysical;
++ gctPOINTER waitLogical;
++ gctUINT32 waitOffset;
++ gctSIZE_T waitSize;
++
++#if gcdDUMP_COMMAND
++ gctPOINTER contextDumpLogical = gcvNULL;
++ gctSIZE_T contextDumpBytes = 0;
++ gctPOINTER bufferDumpLogical = gcvNULL;
++ gctSIZE_T bufferDumpBytes = 0;
++# endif
++#endif
++
++#if VIVANTE_PROFILER_CONTEXT
++ gctBOOL sequenceAcquired = gcvFALSE;
++#endif
++
++ gctPOINTER pointer = gcvNULL;
++
++ gcmkHEADER_ARG(
++ "Command=0x%x CommandBuffer=0x%x ProcessID=%d",
++ Command, CommandBuffer, ProcessID
++ );
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++
++ if (Command->kernel->core == gcvCORE_2D)
++ {
++ /* There is no context for 2D. */
++ Context = gcvNULL;
++ }
++
++ gcmkONERROR(_FlushMMU(Command));
++
++#if VIVANTE_PROFILER_CONTEXT
++ if((Command->kernel->hardware->gpuProfiler) && (Command->kernel->profileEnable))
++ {
++ /* Acquire the context sequnence mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(
++ Command->os, Command->mutexContextSeq, gcvINFINITE
++ ));
++ sequenceAcquired = gcvTRUE;
++ }
++#endif
++
++ /* Acquire the command queue. */
++ gcmkONERROR(gckCOMMAND_EnterCommit(Command, gcvFALSE));
++ commitEntered = gcvTRUE;
++
++ /* Acquire the context switching mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(
++ Command->os, Command->mutexContext, gcvINFINITE
++ ));
++ contextAcquired = gcvTRUE;
++
++ /* Extract the gckHARDWARE and gckEVENT objects. */
++ hardware = Command->kernel->hardware;
++
++ /* Check wehther we need to copy the structures or not. */
++ gcmkONERROR(gckOS_QueryNeedCopy(Command->os, ProcessID, &needCopy));
++
++#if gcdNULL_DRIVER
++ /* Context switch required? */
++ if ((Context != gcvNULL) && (Command->currContext != Context))
++ {
++ /* Yes, merge in the deltas. */
++ gckCONTEXT_Update(Context, ProcessID, StateDelta);
++
++ /* Update the current context. */
++ Command->currContext = Context;
++ }
++#else
++ if (needCopy)
++ {
++ commandBufferObject = &_commandBufferObject;
++
++ gcmkONERROR(gckOS_CopyFromUserData(
++ Command->os,
++ commandBufferObject,
++ CommandBuffer,
++ gcmSIZEOF(struct _gcoCMDBUF)
++ ));
++
++ gcmkVERIFY_OBJECT(commandBufferObject, gcvOBJ_COMMANDBUFFER);
++ }
++ else
++ {
++ gcmkONERROR(gckOS_MapUserPointer(
++ Command->os,
++ CommandBuffer,
++ gcmSIZEOF(struct _gcoCMDBUF),
++ &pointer
++ ));
++
++ commandBufferObject = pointer;
++
++ gcmkVERIFY_OBJECT(commandBufferObject, gcvOBJ_COMMANDBUFFER);
++ commandBufferMapped = gcvTRUE;
++ }
++
++ /* Query the size of NOP command. */
++ gcmkONERROR(gckHARDWARE_Nop(
++ hardware, gcvNULL, &nopBytes
++ ));
++
++ /* Query the size of pipe select command sequence. */
++ gcmkONERROR(gckHARDWARE_PipeSelect(
++ hardware, gcvNULL, gcvPIPE_3D, &pipeBytes
++ ));
++
++ /* Query the size of LINK command. */
++ gcmkONERROR(gckHARDWARE_Link(
++ hardware, gcvNULL, gcvNULL, 0, &linkBytes
++ ));
++
++ /* Compute the command buffer entry and the size. */
++ commandBufferLogical
++ = (gctUINT8_PTR) gcmUINT64_TO_PTR(commandBufferObject->logical)
++ + commandBufferObject->startOffset;
++
++ gcmkONERROR(gckOS_GetPhysicalAddress(
++ Command->os,
++ commandBufferLogical,
++ (gctUINT32_PTR)&commandBufferPhysical
++ ));
++
++ commandBufferSize
++ = commandBufferObject->offset
++ + Command->reservedTail
++ - commandBufferObject->startOffset;
++
++ /* Get the current offset. */
++ offset = Command->offset;
++
++ /* Compute number of bytes left in current kernel command queue. */
++ bytes = Command->pageSize - offset;
++
++ /* Query the size of WAIT/LINK command sequence. */
++ gcmkONERROR(gckHARDWARE_WaitLink(
++ hardware,
++ gcvNULL,
++ offset,
++ &waitLinkBytes,
++ gcvNULL,
++ gcvNULL
++ ));
++
++ /* Is there enough space in the current command queue? */
++ if (bytes < waitLinkBytes)
++ {
++ /* No, create a new one. */
++ gcmkONERROR(_NewQueue(Command));
++
++ /* Get the new current offset. */
++ offset = Command->offset;
++
++ /* Recompute the number of bytes in the new kernel command queue. */
++ bytes = Command->pageSize - offset;
++ gcmkASSERT(bytes >= waitLinkBytes);
++ }
++
++ /* Compute the location if WAIT/LINK command sequence. */
++ waitLinkPhysical = (gctUINT8_PTR) Command->physical + offset;
++ waitLinkLogical = (gctUINT8_PTR) Command->logical + offset;
++
++ /* Context switch required? */
++ if (Context == gcvNULL)
++ {
++ /* See if we have to switch pipes for the command buffer. */
++ if (commandBufferObject->entryPipe == Command->pipeSelect)
++ {
++ /* Skip pipe switching sequence. */
++ offset = pipeBytes;
++ }
++ else
++ {
++ /* The current hardware and the entry command buffer pipes
++ ** are different, switch to the correct pipe. */
++ gcmkONERROR(gckHARDWARE_PipeSelect(
++ Command->kernel->hardware,
++ commandBufferLogical,
++ commandBufferObject->entryPipe,
++ &pipeBytes
++ ));
++
++ /* Do not skip pipe switching sequence. */
++ offset = 0;
++ }
++
++ /* Compute the entry. */
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ entryPhysical = (gctUINT8_PTR) commandBufferPhysical + offset;
++#endif
++ entryLogical = commandBufferLogical + offset;
++ entryBytes = commandBufferSize - offset;
++ }
++ else if (Command->currContext != Context)
++ {
++ /* Temporary disable context length oprimization. */
++ Context->dirty = gcvTRUE;
++
++ /* Get the current context buffer. */
++ contextBuffer = Context->buffer;
++
++ /* Yes, merge in the deltas. */
++ gcmkONERROR(gckCONTEXT_Update(Context, ProcessID, StateDelta));
++
++ /* Determine context entry and exit points. */
++ if (0)
++ {
++ /* Reset 2D dirty flag. */
++ Context->dirty2D = gcvFALSE;
++
++ if (Context->dirty || commandBufferObject->using3D)
++ {
++ /***************************************************************
++ ** SWITCHING CONTEXT: 2D and 3D are used.
++ */
++
++ /* Reset 3D dirty flag. */
++ Context->dirty3D = gcvFALSE;
++
++ /* Compute the entry. */
++ if (Command->pipeSelect == gcvPIPE_2D)
++ {
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ entryPhysical = (gctUINT8_PTR) contextBuffer->physical + pipeBytes;
++#endif
++ entryLogical = (gctUINT8_PTR) contextBuffer->logical + pipeBytes;
++ entryBytes = Context->bufferSize - pipeBytes;
++ }
++ else
++ {
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ entryPhysical = (gctUINT8_PTR) contextBuffer->physical;
++#endif
++ entryLogical = (gctUINT8_PTR) contextBuffer->logical;
++ entryBytes = Context->bufferSize;
++ }
++
++ /* See if we have to switch pipes between the context
++ and command buffers. */
++ if (commandBufferObject->entryPipe == gcvPIPE_3D)
++ {
++ /* Skip pipe switching sequence. */
++ offset = pipeBytes;
++ }
++ else
++ {
++ /* The current hardware and the initial context pipes are
++ different, switch to the correct pipe. */
++ gcmkONERROR(gckHARDWARE_PipeSelect(
++ Command->kernel->hardware,
++ commandBufferLogical,
++ commandBufferObject->entryPipe,
++ &pipeBytes
++ ));
++
++ /* Do not skip pipe switching sequence. */
++ offset = 0;
++ }
++
++ /* Ensure the NOP between 2D and 3D is in place so that the
++ execution falls through from 2D to 3D. */
++ gcmkONERROR(gckHARDWARE_Nop(
++ hardware,
++ contextBuffer->link2D,
++ &nopBytes
++ ));
++
++ /* Generate a LINK from the context buffer to
++ the command buffer. */
++ gcmkONERROR(gckHARDWARE_Link(
++ hardware,
++ contextBuffer->link3D,
++ commandBufferLogical + offset,
++ commandBufferSize - offset,
++ &linkBytes
++ ));
++
++ /* Mark context as not dirty. */
++ Context->dirty = gcvFALSE;
++ }
++ else
++ {
++ /***************************************************************
++ ** SWITCHING CONTEXT: 2D only command buffer.
++ */
++
++ /* Mark 3D as dirty. */
++ Context->dirty3D = gcvTRUE;
++
++ /* Compute the entry. */
++ if (Command->pipeSelect == gcvPIPE_2D)
++ {
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ entryPhysical = (gctUINT8_PTR) contextBuffer->physical + pipeBytes;
++#endif
++ entryLogical = (gctUINT8_PTR) contextBuffer->logical + pipeBytes;
++ entryBytes = Context->entryOffset3D - pipeBytes;
++ }
++ else
++ {
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ entryPhysical = (gctUINT8_PTR) contextBuffer->physical;
++#endif
++ entryLogical = (gctUINT8_PTR) contextBuffer->logical;
++ entryBytes = Context->entryOffset3D;
++ }
++
++ /* Store the current context buffer. */
++ Context->dirtyBuffer = contextBuffer;
++
++ /* See if we have to switch pipes between the context
++ and command buffers. */
++ if (commandBufferObject->entryPipe == gcvPIPE_2D)
++ {
++ /* Skip pipe switching sequence. */
++ offset = pipeBytes;
++ }
++ else
++ {
++ /* The current hardware and the initial context pipes are
++ different, switch to the correct pipe. */
++ gcmkONERROR(gckHARDWARE_PipeSelect(
++ Command->kernel->hardware,
++ commandBufferLogical,
++ commandBufferObject->entryPipe,
++ &pipeBytes
++ ));
++
++ /* Do not skip pipe switching sequence. */
++ offset = 0;
++ }
++
++ /* 3D is not used, generate a LINK from the end of 2D part of
++ the context buffer to the command buffer. */
++ gcmkONERROR(gckHARDWARE_Link(
++ hardware,
++ contextBuffer->link2D,
++ commandBufferLogical + offset,
++ commandBufferSize - offset,
++ &linkBytes
++ ));
++ }
++ }
++
++ /* Not using 2D. */
++ else
++ {
++ /* Mark 2D as dirty. */
++ Context->dirty2D = gcvTRUE;
++
++ /* Store the current context buffer. */
++ Context->dirtyBuffer = contextBuffer;
++
++ if (Context->dirty || commandBufferObject->using3D)
++ {
++ /***************************************************************
++ ** SWITCHING CONTEXT: 3D only command buffer.
++ */
++
++ /* Reset 3D dirty flag. */
++ Context->dirty3D = gcvFALSE;
++
++ /* Determine context buffer entry offset. */
++ offset = (Command->pipeSelect == gcvPIPE_3D)
++
++ /* Skip pipe switching sequence. */
++ ? Context->entryOffset3D + pipeBytes
++
++ /* Do not skip pipe switching sequence. */
++ : Context->entryOffset3D;
++
++ /* Compute the entry. */
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ entryPhysical = (gctUINT8_PTR) contextBuffer->physical + offset;
++#endif
++ entryLogical = (gctUINT8_PTR) contextBuffer->logical + offset;
++ entryBytes = Context->bufferSize - offset;
++
++ /* See if we have to switch pipes between the context
++ and command buffers. */
++ if (commandBufferObject->entryPipe == gcvPIPE_3D)
++ {
++ /* Skip pipe switching sequence. */
++ offset = pipeBytes;
++ }
++ else
++ {
++ /* The current hardware and the initial context pipes are
++ different, switch to the correct pipe. */
++ gcmkONERROR(gckHARDWARE_PipeSelect(
++ Command->kernel->hardware,
++ commandBufferLogical,
++ commandBufferObject->entryPipe,
++ &pipeBytes
++ ));
++
++ /* Do not skip pipe switching sequence. */
++ offset = 0;
++ }
++
++ /* Generate a LINK from the context buffer to
++ the command buffer. */
++ gcmkONERROR(gckHARDWARE_Link(
++ hardware,
++ contextBuffer->link3D,
++ commandBufferLogical + offset,
++ commandBufferSize - offset,
++ &linkBytes
++ ));
++ }
++ else
++ {
++ /***************************************************************
++ ** SWITCHING CONTEXT: "XD" command buffer - neither 2D nor 3D.
++ */
++
++ /* Mark 3D as dirty. */
++ Context->dirty3D = gcvTRUE;
++
++ /* Compute the entry. */
++ if (Command->pipeSelect == gcvPIPE_3D)
++ {
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ entryPhysical
++ = (gctUINT8_PTR) contextBuffer->physical
++ + Context->entryOffsetXDFrom3D;
++#endif
++ entryLogical
++ = (gctUINT8_PTR) contextBuffer->logical
++ + Context->entryOffsetXDFrom3D;
++
++ entryBytes
++ = Context->bufferSize
++ - Context->entryOffsetXDFrom3D;
++ }
++ else
++ {
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ entryPhysical
++ = (gctUINT8_PTR) contextBuffer->physical
++ + Context->entryOffsetXDFrom2D;
++#endif
++ entryLogical
++ = (gctUINT8_PTR) contextBuffer->logical
++ + Context->entryOffsetXDFrom2D;
++
++ entryBytes
++ = Context->totalSize
++ - Context->entryOffsetXDFrom2D;
++ }
++
++ /* See if we have to switch pipes between the context
++ and command buffers. */
++ if (commandBufferObject->entryPipe == gcvPIPE_3D)
++ {
++ /* Skip pipe switching sequence. */
++ offset = pipeBytes;
++ }
++ else
++ {
++ /* The current hardware and the initial context pipes are
++ different, switch to the correct pipe. */
++ gcmkONERROR(gckHARDWARE_PipeSelect(
++ Command->kernel->hardware,
++ commandBufferLogical,
++ commandBufferObject->entryPipe,
++ &pipeBytes
++ ));
++
++ /* Do not skip pipe switching sequence. */
++ offset = 0;
++ }
++
++ /* Generate a LINK from the context buffer to
++ the command buffer. */
++ gcmkONERROR(gckHARDWARE_Link(
++ hardware,
++ contextBuffer->link3D,
++ commandBufferLogical + offset,
++ commandBufferSize - offset,
++ &linkBytes
++ ));
++ }
++ }
++
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ /* Flush the context buffer cache. */
++ gcmkONERROR(gckOS_CacheClean(
++ Command->os,
++ Command->kernelProcessID,
++ gcvNULL,
++ entryPhysical,
++ entryLogical,
++ entryBytes
++ ));
++#endif
++
++ /* Update the current context. */
++ Command->currContext = Context;
++
++#if gcdDUMP_COMMAND
++ contextDumpLogical = entryLogical;
++ contextDumpBytes = entryBytes;
++#endif
++ }
++
++ /* Same context. */
++ else
++ {
++ /* Determine context entry and exit points. */
++ if (commandBufferObject->using2D && Context->dirty2D)
++ {
++ /* Reset 2D dirty flag. */
++ Context->dirty2D = gcvFALSE;
++
++ /* Get the "dirty" context buffer. */
++ contextBuffer = Context->dirtyBuffer;
++
++ if (commandBufferObject->using3D && Context->dirty3D)
++ {
++ /* Reset 3D dirty flag. */
++ Context->dirty3D = gcvFALSE;
++
++ /* Compute the entry. */
++ if (Command->pipeSelect == gcvPIPE_2D)
++ {
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ entryPhysical = (gctUINT8_PTR) contextBuffer->physical + pipeBytes;
++#endif
++ entryLogical = (gctUINT8_PTR) contextBuffer->logical + pipeBytes;
++ entryBytes = Context->bufferSize - pipeBytes;
++ }
++ else
++ {
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ entryPhysical = (gctUINT8_PTR) contextBuffer->physical;
++#endif
++ entryLogical = (gctUINT8_PTR) contextBuffer->logical;
++ entryBytes = Context->bufferSize;
++ }
++
++ /* See if we have to switch pipes between the context
++ and command buffers. */
++ if (commandBufferObject->entryPipe == gcvPIPE_3D)
++ {
++ /* Skip pipe switching sequence. */
++ offset = pipeBytes;
++ }
++ else
++ {
++ /* The current hardware and the initial context pipes are
++ different, switch to the correct pipe. */
++ gcmkONERROR(gckHARDWARE_PipeSelect(
++ Command->kernel->hardware,
++ commandBufferLogical,
++ commandBufferObject->entryPipe,
++ &pipeBytes
++ ));
++
++ /* Do not skip pipe switching sequence. */
++ offset = 0;
++ }
++
++ /* Ensure the NOP between 2D and 3D is in place so that the
++ execution falls through from 2D to 3D. */
++ gcmkONERROR(gckHARDWARE_Nop(
++ hardware,
++ contextBuffer->link2D,
++ &nopBytes
++ ));
++
++ /* Generate a LINK from the context buffer to
++ the command buffer. */
++ gcmkONERROR(gckHARDWARE_Link(
++ hardware,
++ contextBuffer->link3D,
++ commandBufferLogical + offset,
++ commandBufferSize - offset,
++ &linkBytes
++ ));
++ }
++ else
++ {
++ /* Compute the entry. */
++ if (Command->pipeSelect == gcvPIPE_2D)
++ {
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ entryPhysical = (gctUINT8_PTR) contextBuffer->physical + pipeBytes;
++#endif
++ entryLogical = (gctUINT8_PTR) contextBuffer->logical + pipeBytes;
++ entryBytes = Context->entryOffset3D - pipeBytes;
++ }
++ else
++ {
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ entryPhysical = (gctUINT8_PTR) contextBuffer->physical;
++#endif
++ entryLogical = (gctUINT8_PTR) contextBuffer->logical;
++ entryBytes = Context->entryOffset3D;
++ }
++
++ /* See if we have to switch pipes between the context
++ and command buffers. */
++ if (commandBufferObject->entryPipe == gcvPIPE_2D)
++ {
++ /* Skip pipe switching sequence. */
++ offset = pipeBytes;
++ }
++ else
++ {
++ /* The current hardware and the initial context pipes are
++ different, switch to the correct pipe. */
++ gcmkONERROR(gckHARDWARE_PipeSelect(
++ Command->kernel->hardware,
++ commandBufferLogical,
++ commandBufferObject->entryPipe,
++ &pipeBytes
++ ));
++
++ /* Do not skip pipe switching sequence. */
++ offset = 0;
++ }
++
++ /* 3D is not used, generate a LINK from the end of 2D part of
++ the context buffer to the command buffer. */
++ gcmkONERROR(gckHARDWARE_Link(
++ hardware,
++ contextBuffer->link2D,
++ commandBufferLogical + offset,
++ commandBufferSize - offset,
++ &linkBytes
++ ));
++ }
++ }
++ else
++ {
++ if (commandBufferObject->using3D && Context->dirty3D)
++ {
++ /* Reset 3D dirty flag. */
++ Context->dirty3D = gcvFALSE;
++
++ /* Get the "dirty" context buffer. */
++ contextBuffer = Context->dirtyBuffer;
++
++ /* Determine context buffer entry offset. */
++ offset = (Command->pipeSelect == gcvPIPE_3D)
++
++ /* Skip pipe switching sequence. */
++ ? Context->entryOffset3D + pipeBytes
++
++ /* Do not skip pipe switching sequence. */
++ : Context->entryOffset3D;
++
++ /* Compute the entry. */
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ entryPhysical = (gctUINT8_PTR) contextBuffer->physical + offset;
++#endif
++ entryLogical = (gctUINT8_PTR) contextBuffer->logical + offset;
++ entryBytes = Context->bufferSize - offset;
++
++ /* See if we have to switch pipes between the context
++ and command buffers. */
++ if (commandBufferObject->entryPipe == gcvPIPE_3D)
++ {
++ /* Skip pipe switching sequence. */
++ offset = pipeBytes;
++ }
++ else
++ {
++ /* The current hardware and the initial context pipes are
++ different, switch to the correct pipe. */
++ gcmkONERROR(gckHARDWARE_PipeSelect(
++ Command->kernel->hardware,
++ commandBufferLogical,
++ commandBufferObject->entryPipe,
++ &pipeBytes
++ ));
++
++ /* Do not skip pipe switching sequence. */
++ offset = 0;
++ }
++
++ /* Generate a LINK from the context buffer to
++ the command buffer. */
++ gcmkONERROR(gckHARDWARE_Link(
++ hardware,
++ contextBuffer->link3D,
++ commandBufferLogical + offset,
++ commandBufferSize - offset,
++ &linkBytes
++ ));
++ }
++ else
++ {
++ /* See if we have to switch pipes for the command buffer. */
++ if (commandBufferObject->entryPipe == Command->pipeSelect)
++ {
++ /* Skip pipe switching sequence. */
++ offset = pipeBytes;
++ }
++ else
++ {
++ /* The current hardware and the entry command buffer pipes
++ ** are different, switch to the correct pipe. */
++ gcmkONERROR(gckHARDWARE_PipeSelect(
++ Command->kernel->hardware,
++ commandBufferLogical,
++ commandBufferObject->entryPipe,
++ &pipeBytes
++ ));
++
++ /* Do not skip pipe switching sequence. */
++ offset = 0;
++ }
++
++ /* Compute the entry. */
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ entryPhysical = (gctUINT8_PTR) commandBufferPhysical + offset;
++#endif
++ entryLogical = commandBufferLogical + offset;
++ entryBytes = commandBufferSize - offset;
++ }
++ }
++ }
++
++#if gcdDUMP_COMMAND
++ bufferDumpLogical = commandBufferLogical + offset;
++ bufferDumpBytes = commandBufferSize - offset;
++#endif
++
++#if gcdSECURE_USER
++ /* Process user hints. */
++ gcmkONERROR(_ProcessHints(Command, ProcessID, commandBufferObject));
++#endif
++
++ /* Determine the location to jump to for the command buffer being
++ ** scheduled. */
++ if (Command->newQueue)
++ {
++ /* New command queue, jump to the beginning of it. */
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ exitPhysical = Command->physical;
++#endif
++ exitLogical = Command->logical;
++ exitBytes = Command->offset + waitLinkBytes;
++ }
++ else
++ {
++ /* Still within the preexisting command queue, jump to the new
++ WAIT/LINK command sequence. */
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ exitPhysical = waitLinkPhysical;
++#endif
++ exitLogical = waitLinkLogical;
++ exitBytes = waitLinkBytes;
++ }
++
++ /* Add a new WAIT/LINK command sequence. When the command buffer which is
++ currently being scheduled is fully executed by the GPU, the FE will
++ jump to this WAIT/LINK sequence. */
++ gcmkONERROR(gckHARDWARE_WaitLink(
++ hardware,
++ waitLinkLogical,
++ offset,
++ &waitLinkBytes,
++ &waitOffset,
++ &waitSize
++ ));
++
++ /* Compute the location if WAIT command. */
++ waitPhysical = (gctUINT8_PTR) waitLinkPhysical + waitOffset;
++ waitLogical = (gctUINT8_PTR) waitLinkLogical + waitOffset;
++
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ /* Flush the command queue cache. */
++ gcmkONERROR(gckOS_CacheClean(
++ Command->os,
++ Command->kernelProcessID,
++ gcvNULL,
++ exitPhysical,
++ exitLogical,
++ exitBytes
++ ));
++#endif
++
++ /* Determine the location of the LINK command in the command buffer. */
++ commandBufferLink
++ = (gctUINT8_PTR) gcmUINT64_TO_PTR(commandBufferObject->logical)
++ + commandBufferObject->offset;
++
++ /* Generate a LINK from the end of the command buffer being scheduled
++ back to the kernel command queue. */
++ gcmkONERROR(gckHARDWARE_Link(
++ hardware,
++ commandBufferLink,
++ exitLogical,
++ exitBytes,
++ &linkBytes
++ ));
++
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ /* Flush the command buffer cache. */
++ gcmkONERROR(gckOS_CacheClean(
++ Command->os,
++ ProcessID,
++ gcvNULL,
++ commandBufferPhysical,
++ commandBufferLogical,
++ commandBufferSize
++ ));
++#endif
++
++ /* Generate a LINK from the previous WAIT/LINK command sequence to the
++ entry determined above (either the context or the command buffer).
++ This LINK replaces the WAIT instruction from the previous WAIT/LINK
++ pair, therefore we use WAIT metrics for generation of this LINK.
++ This action will execute the entire sequence. */
++ gcmkONERROR(gckHARDWARE_Link(
++ hardware,
++ Command->waitLogical,
++ entryLogical,
++ entryBytes,
++ &Command->waitSize
++ ));
++
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ /* Flush the cache for the link. */
++ gcmkONERROR(gckOS_CacheClean(
++ Command->os,
++ Command->kernelProcessID,
++ gcvNULL,
++ Command->waitPhysical,
++ Command->waitLogical,
++ Command->waitSize
++ ));
++#endif
++
++ gcmkDUMPCOMMAND(
++ Command->os,
++ Command->waitLogical,
++ Command->waitSize,
++ gceDUMP_BUFFER_LINK,
++ gcvFALSE
++ );
++
++ gcmkDUMPCOMMAND(
++ Command->os,
++ contextDumpLogical,
++ contextDumpBytes,
++ gceDUMP_BUFFER_CONTEXT,
++ gcvFALSE
++ );
++
++ gcmkDUMPCOMMAND(
++ Command->os,
++ bufferDumpLogical,
++ bufferDumpBytes,
++ gceDUMP_BUFFER_USER,
++ gcvFALSE
++ );
++
++ gcmkDUMPCOMMAND(
++ Command->os,
++ waitLinkLogical,
++ waitLinkBytes,
++ gceDUMP_BUFFER_WAITLINK,
++ gcvFALSE
++ );
++
++ /* Update the current pipe. */
++ Command->pipeSelect = commandBufferObject->exitPipe;
++
++ /* Update command queue offset. */
++ Command->offset += waitLinkBytes;
++ Command->newQueue = gcvFALSE;
++
++ /* Update address of last WAIT. */
++ Command->waitPhysical = waitPhysical;
++ Command->waitLogical = waitLogical;
++ Command->waitSize = waitSize;
++
++ /* Update queue tail pointer. */
++ gcmkONERROR(gckHARDWARE_UpdateQueueTail(
++ hardware, Command->logical, Command->offset
++ ));
++
++#if gcdDUMP_COMMAND
++ gcmkPRINT("@[kernel.commit]");
++#endif
++#endif /* gcdNULL_DRIVER */
++
++ /* Release the context switching mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(Command->os, Command->mutexContext));
++ contextAcquired = gcvFALSE;
++
++ /* Release the command queue. */
++ gcmkONERROR(gckCOMMAND_ExitCommit(Command, gcvFALSE));
++ commitEntered = gcvFALSE;
++
++#if VIVANTE_PROFILER_CONTEXT
++ if(sequenceAcquired)
++ {
++ gcmkONERROR(gckCOMMAND_Stall(Command, gcvTRUE));
++ if (Command->currContext)
++ {
++ gcmkONERROR(gckHARDWARE_UpdateContextProfile(
++ hardware,
++ Command->currContext));
++ }
++
++ /* Release the context switching mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(Command->os, Command->mutexContextSeq));
++ sequenceAcquired = gcvFALSE;
++ }
++#endif
++
++ /* Loop while there are records in the queue. */
++ while (EventQueue != gcvNULL)
++ {
++ if (needCopy)
++ {
++ /* Point to stack record. */
++ eventRecord = &_eventRecord;
++
++ /* Copy the data from the client. */
++ gcmkONERROR(gckOS_CopyFromUserData(
++ Command->os, eventRecord, EventQueue, gcmSIZEOF(gcsQUEUE)
++ ));
++ }
++ else
++ {
++ /* Map record into kernel memory. */
++ gcmkONERROR(gckOS_MapUserPointer(Command->os,
++ EventQueue,
++ gcmSIZEOF(gcsQUEUE),
++ &pointer));
++
++ eventRecord = pointer;
++ }
++
++ /* Append event record to event queue. */
++ gcmkONERROR(gckEVENT_AddList(
++ Command->kernel->eventObj, &eventRecord->iface, gcvKERNEL_PIXEL, gcvTRUE, gcvFALSE
++ ));
++
++ /* Next record in the queue. */
++ nextEventRecord = gcmUINT64_TO_PTR(eventRecord->next);
++
++ if (!needCopy)
++ {
++ /* Unmap record from kernel memory. */
++ gcmkONERROR(gckOS_UnmapUserPointer(
++ Command->os, EventQueue, gcmSIZEOF(gcsQUEUE), (gctPOINTER *) eventRecord
++ ));
++
++ eventRecord = gcvNULL;
++ }
++
++ EventQueue = nextEventRecord;
++ }
++
++ if (Command->kernel->eventObj->queueHead == gcvNULL
++ && Command->kernel->hardware->powerManagement == gcvTRUE
++ )
++ {
++ /* Commit done event by which work thread knows all jobs done. */
++ gcmkVERIFY_OK(
++ gckEVENT_CommitDone(Command->kernel->eventObj, gcvKERNEL_PIXEL));
++ }
++
++ /* Submit events. */
++ status = gckEVENT_Submit(Command->kernel->eventObj, gcvTRUE, gcvFALSE);
++
++ if (status == gcvSTATUS_INTERRUPTED)
++ {
++ gcmkTRACE(
++ gcvLEVEL_INFO,
++ "%s(%d): Intterupted in gckEVENT_Submit",
++ __FUNCTION__, __LINE__
++ );
++ status = gcvSTATUS_OK;
++ }
++ else
++ {
++ gcmkONERROR(status);
++ }
++
++ /* Unmap the command buffer pointer. */
++ if (commandBufferMapped)
++ {
++ gcmkONERROR(gckOS_UnmapUserPointer(
++ Command->os,
++ CommandBuffer,
++ gcmSIZEOF(struct _gcoCMDBUF),
++ commandBufferObject
++ ));
++
++ commandBufferMapped = gcvFALSE;
++ }
++
++ /* Return status. */
++ gcmkFOOTER();
++ return gcvSTATUS_OK;
++
++OnError:
++ if ((eventRecord != gcvNULL) && !needCopy)
++ {
++ /* Roll back. */
++ gcmkVERIFY_OK(gckOS_UnmapUserPointer(
++ Command->os,
++ EventQueue,
++ gcmSIZEOF(gcsQUEUE),
++ (gctPOINTER *) eventRecord
++ ));
++ }
++
++ if (contextAcquired)
++ {
++ /* Release the context switching mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Command->os, Command->mutexContext));
++ }
++
++ if (commitEntered)
++ {
++ /* Release the command queue mutex. */
++ gcmkVERIFY_OK(gckCOMMAND_ExitCommit(Command, gcvFALSE));
++ }
++
++#if VIVANTE_PROFILER_CONTEXT
++ if (sequenceAcquired)
++ {
++ /* Release the context sequence mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Command->os, Command->mutexContextSeq));
++ }
++#endif
++
++ /* Unmap the command buffer pointer. */
++ if (commandBufferMapped)
++ {
++ gcmkVERIFY_OK(gckOS_UnmapUserPointer(
++ Command->os,
++ CommandBuffer,
++ gcmSIZEOF(struct _gcoCMDBUF),
++ commandBufferObject
++ ));
++ }
++
++ /* Return status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckCOMMAND_Reserve
++**
++** Reserve space in the command queue. Also acquire the command queue mutex.
++**
++** INPUT:
++**
++** gckCOMMAND Command
++** Pointer to an gckCOMMAND object.
++**
++** gctSIZE_T RequestedBytes
++** Number of bytes previously reserved.
++**
++** OUTPUT:
++**
++** gctPOINTER * Buffer
++** Pointer to a variable that will receive the address of the reserved
++** space.
++**
++** gctSIZE_T * BufferSize
++** Pointer to a variable that will receive the number of bytes
++** available in the command queue.
++*/
++gceSTATUS
++gckCOMMAND_Reserve(
++ IN gckCOMMAND Command,
++ IN gctSIZE_T RequestedBytes,
++ OUT gctPOINTER * Buffer,
++ OUT gctSIZE_T * BufferSize
++ )
++{
++ gceSTATUS status;
++ gctSIZE_T bytes;
++ gctSIZE_T requiredBytes;
++ gctUINT32 requestedAligned;
++
++ gcmkHEADER_ARG("Command=0x%x RequestedBytes=%lu", Command, RequestedBytes);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++
++ /* Compute aligned number of reuested bytes. */
++ requestedAligned = gcmALIGN(RequestedBytes, Command->alignment);
++
++ /* Another WAIT/LINK command sequence will have to be appended after
++ the requested area being reserved. Compute the number of bytes
++ required for WAIT/LINK at the location after the reserved area. */
++ gcmkONERROR(gckHARDWARE_WaitLink(
++ Command->kernel->hardware,
++ gcvNULL,
++ Command->offset + requestedAligned,
++ &requiredBytes,
++ gcvNULL,
++ gcvNULL
++ ));
++
++ /* Compute total number of bytes required. */
++ requiredBytes += requestedAligned;
++
++ /* Compute number of bytes available in command queue. */
++ bytes = Command->pageSize - Command->offset;
++
++ /* Is there enough space in the current command queue? */
++ if (bytes < requiredBytes)
++ {
++ /* Create a new command queue. */
++ gcmkONERROR(_NewQueue(Command));
++
++ /* Recompute the number of bytes in the new kernel command queue. */
++ bytes = Command->pageSize - Command->offset;
++
++ /* Still not enough space? */
++ if (bytes < requiredBytes)
++ {
++ /* Rare case, not enough room in command queue. */
++ gcmkONERROR(gcvSTATUS_BUFFER_TOO_SMALL);
++ }
++ }
++
++ /* Return pointer to empty slot command queue. */
++ *Buffer = (gctUINT8 *) Command->logical + Command->offset;
++
++ /* Return number of bytes left in command queue. */
++ *BufferSize = bytes;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Buffer=0x%x *BufferSize=%lu", *Buffer, *BufferSize);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckCOMMAND_Execute
++**
++** Execute a previously reserved command queue by appending a WAIT/LINK command
++** sequence after it and modifying the last WAIT into a LINK command. The
++** command FIFO mutex will be released whether this function succeeds or not.
++**
++** INPUT:
++**
++** gckCOMMAND Command
++** Pointer to an gckCOMMAND object.
++**
++** gctSIZE_T RequestedBytes
++** Number of bytes previously reserved.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckCOMMAND_Execute(
++ IN gckCOMMAND Command,
++ IN gctSIZE_T RequestedBytes
++ )
++{
++ gceSTATUS status;
++
++ gctPHYS_ADDR waitLinkPhysical;
++ gctUINT8_PTR waitLinkLogical;
++ gctUINT32 waitLinkOffset;
++ gctSIZE_T waitLinkBytes;
++
++ gctPHYS_ADDR waitPhysical;
++ gctPOINTER waitLogical;
++ gctUINT32 waitOffset;
++ gctSIZE_T waitBytes;
++
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ gctPHYS_ADDR execPhysical;
++#endif
++ gctPOINTER execLogical;
++ gctSIZE_T execBytes;
++
++ gcmkHEADER_ARG("Command=0x%x RequestedBytes=%lu", Command, RequestedBytes);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++
++ /* Compute offset for WAIT/LINK. */
++ waitLinkOffset = Command->offset + RequestedBytes;
++
++ /* Compute number of bytes left in command queue. */
++ waitLinkBytes = Command->pageSize - waitLinkOffset;
++
++ /* Compute the location if WAIT/LINK command sequence. */
++ waitLinkPhysical = (gctUINT8_PTR) Command->physical + waitLinkOffset;
++ waitLinkLogical = (gctUINT8_PTR) Command->logical + waitLinkOffset;
++
++ /* Append WAIT/LINK in command queue. */
++ gcmkONERROR(gckHARDWARE_WaitLink(
++ Command->kernel->hardware,
++ waitLinkLogical,
++ waitLinkOffset,
++ &waitLinkBytes,
++ &waitOffset,
++ &waitBytes
++ ));
++
++ /* Compute the location if WAIT command. */
++ waitPhysical = (gctUINT8_PTR) waitLinkPhysical + waitOffset;
++ waitLogical = waitLinkLogical + waitOffset;
++
++ /* Determine the location to jump to for the command buffer being
++ ** scheduled. */
++ if (Command->newQueue)
++ {
++ /* New command queue, jump to the beginning of it. */
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ execPhysical = Command->physical;
++#endif
++ execLogical = Command->logical;
++ execBytes = waitLinkOffset + waitLinkBytes;
++ }
++ else
++ {
++ /* Still within the preexisting command queue, jump directly to the
++ reserved area. */
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ execPhysical = (gctUINT8 *) Command->physical + Command->offset;
++#endif
++ execLogical = (gctUINT8 *) Command->logical + Command->offset;
++ execBytes = RequestedBytes + waitLinkBytes;
++ }
++
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ /* Flush the cache. */
++ gcmkONERROR(gckOS_CacheClean(
++ Command->os,
++ Command->kernelProcessID,
++ gcvNULL,
++ execPhysical,
++ execLogical,
++ execBytes
++ ));
++#endif
++
++ /* Convert the last WAIT into a LINK. */
++ gcmkONERROR(gckHARDWARE_Link(
++ Command->kernel->hardware,
++ Command->waitLogical,
++ execLogical,
++ execBytes,
++ &Command->waitSize
++ ));
++
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ /* Flush the cache. */
++ gcmkONERROR(gckOS_CacheClean(
++ Command->os,
++ Command->kernelProcessID,
++ gcvNULL,
++ Command->waitPhysical,
++ Command->waitLogical,
++ Command->waitSize
++ ));
++#endif
++
++ gcmkDUMPCOMMAND(
++ Command->os,
++ Command->waitLogical,
++ Command->waitSize,
++ gceDUMP_BUFFER_LINK,
++ gcvFALSE
++ );
++
++ gcmkDUMPCOMMAND(
++ Command->os,
++ execLogical,
++ execBytes,
++ gceDUMP_BUFFER_KERNEL,
++ gcvFALSE
++ );
++
++ /* Update the pointer to the last WAIT. */
++ Command->waitPhysical = waitPhysical;
++ Command->waitLogical = waitLogical;
++ Command->waitSize = waitBytes;
++
++ /* Update the command queue. */
++ Command->offset += RequestedBytes + waitLinkBytes;
++ Command->newQueue = gcvFALSE;
++
++ /* Update queue tail pointer. */
++ gcmkONERROR(gckHARDWARE_UpdateQueueTail(
++ Command->kernel->hardware, Command->logical, Command->offset
++ ));
++
++#if gcdDUMP_COMMAND
++ gcmkPRINT("@[kernel.execute]");
++#endif
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckCOMMAND_Stall
++**
++** The calling thread will be suspended until the command queue has been
++** completed.
++**
++** INPUT:
++**
++** gckCOMMAND Command
++** Pointer to an gckCOMMAND object.
++**
++** gctBOOL FromPower
++** Determines whether the call originates from inside the power
++** management or not.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckCOMMAND_Stall(
++ IN gckCOMMAND Command,
++ IN gctBOOL FromPower
++ )
++{
++#if gcdNULL_DRIVER
++ /* Do nothing with infinite hardware. */
++ return gcvSTATUS_OK;
++#else
++ gckOS os;
++ gckHARDWARE hardware;
++ gckEVENT eventObject;
++ gceSTATUS status;
++ gctSIGNAL signal = gcvNULL;
++ gctUINT timer = 0;
++
++ gcmkHEADER_ARG("Command=0x%x", Command);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++
++ /* Extract the gckOS object pointer. */
++ os = Command->os;
++ gcmkVERIFY_OBJECT(os, gcvOBJ_OS);
++
++ /* Extract the gckHARDWARE object pointer. */
++ hardware = Command->kernel->hardware;
++ gcmkVERIFY_OBJECT(hardware, gcvOBJ_HARDWARE);
++
++ /* Extract the gckEVENT object pointer. */
++ eventObject = Command->kernel->eventObj;
++ gcmkVERIFY_OBJECT(eventObject, gcvOBJ_EVENT);
++
++ /* Allocate the signal. */
++ gcmkONERROR(gckOS_CreateSignal(os, gcvTRUE, &signal));
++
++ /* Append the EVENT command to trigger the signal. */
++ gcmkONERROR(gckEVENT_Signal(eventObject, signal, gcvKERNEL_PIXEL));
++
++ /* Submit the event queue. */
++ gcmkONERROR(gckEVENT_Submit(eventObject, gcvTRUE, FromPower));
++
++#if gcdDUMP_COMMAND
++ gcmkPRINT("@[kernel.stall]");
++#endif
++
++ if (status == gcvSTATUS_CHIP_NOT_READY)
++ {
++ /* Error. */
++ goto OnError;
++ }
++
++ do
++ {
++ /* Wait for the signal. */
++ status = gckOS_WaitSignal(os, signal, gcdGPU_ADVANCETIMER);
++
++ if (status == gcvSTATUS_TIMEOUT)
++ {
++#if gcmIS_DEBUG(gcdDEBUG_CODE)
++ gctUINT32 idle;
++
++ /* Read idle register. */
++ gcmkVERIFY_OK(gckHARDWARE_GetIdle(
++ hardware, gcvFALSE, &idle
++ ));
++
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s(%d): idle=%08x",
++ __FUNCTION__, __LINE__, idle
++ );
++
++ gcmkONERROR(gckOS_MemoryBarrier(os, gcvNULL));
++
++#ifdef __QNXNTO__
++ gctUINT32 reg_cmdbuf_fetch;
++ gctUINT32 reg_intr;
++
++ gcmkVERIFY_OK(gckOS_ReadRegisterEx(
++ Command->kernel->hardware->os, Command->kernel->core, 0x0664, &reg_cmdbuf_fetch
++ ));
++
++ if (idle == 0x7FFFFFFE)
++ {
++ /*
++ * GPU is idle so there should not be pending interrupts.
++ * Just double check.
++ *
++ * Note that reading interrupt register clears it.
++ * That's why we don't read it in all cases.
++ */
++ gcmkVERIFY_OK(gckOS_ReadRegisterEx(
++ Command->kernel->hardware->os, Command->kernel->core, 0x10, &reg_intr
++ ));
++
++ slogf(
++ _SLOG_SETCODE(1, 0),
++ _SLOG_CRITICAL,
++ "GALcore: Stall timeout (idle = 0x%X, command buffer fetch = 0x%X, interrupt = 0x%X)",
++ idle, reg_cmdbuf_fetch, reg_intr
++ );
++ }
++ else
++ {
++ slogf(
++ _SLOG_SETCODE(1, 0),
++ _SLOG_CRITICAL,
++ "GALcore: Stall timeout (idle = 0x%X, command buffer fetch = 0x%X)",
++ idle, reg_cmdbuf_fetch
++ );
++ }
++#endif
++#endif
++ /* Advance timer. */
++ timer += gcdGPU_ADVANCETIMER;
++ }
++ else if (status == gcvSTATUS_INTERRUPTED)
++ {
++ gcmkONERROR(gcvSTATUS_INTERRUPTED);
++ }
++
++ }
++ while (gcmIS_ERROR(status)
++#if gcdGPU_TIMEOUT
++ && (timer < Command->kernel->timeOut)
++#endif
++ );
++
++ /* Bail out on timeout. */
++ if (gcmIS_ERROR(status))
++ {
++ /* Broadcast the stuck GPU. */
++ gcmkONERROR(gckOS_Broadcast(
++ os, hardware, gcvBROADCAST_GPU_STUCK
++ ));
++ }
++
++ /* Delete the signal. */
++ gcmkVERIFY_OK(gckOS_DestroySignal(os, signal));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (signal != gcvNULL)
++ {
++ /* Free the signal. */
++ gcmkVERIFY_OK(gckOS_DestroySignal(os, signal));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++#endif
++}
++
++/*******************************************************************************
++**
++** gckCOMMAND_Attach
++**
++** Attach user process.
++**
++** INPUT:
++**
++** gckCOMMAND Command
++** Pointer to a gckCOMMAND object.
++**
++** gctUINT32 ProcessID
++** Current process ID.
++**
++** OUTPUT:
++**
++** gckCONTEXT * Context
++** Pointer to a variable that will receive a pointer to a new
++** gckCONTEXT object.
++**
++** gctSIZE_T * StateCount
++** Pointer to a variable that will receive the number of states
++** in the context buffer.
++*/
++gceSTATUS
++gckCOMMAND_Attach(
++ IN gckCOMMAND Command,
++ OUT gckCONTEXT * Context,
++ OUT gctSIZE_T * StateCount,
++ IN gctUINT32 ProcessID
++ )
++{
++ gceSTATUS status;
++ gctBOOL acquired = gcvFALSE;
++
++ gcmkHEADER_ARG("Command=0x%x", Command);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++
++ /* Acquire the context switching mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(
++ Command->os, Command->mutexContext, gcvINFINITE
++ ));
++ acquired = gcvTRUE;
++
++ /* Construct a gckCONTEXT object. */
++ gcmkONERROR(gckCONTEXT_Construct(
++ Command->os,
++ Command->kernel->hardware,
++ ProcessID,
++ Context
++ ));
++
++ /* Return the number of states in the context. */
++ * StateCount = (* Context)->stateCount;
++
++ /* Release the context switching mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(Command->os, Command->mutexContext));
++ acquired = gcvFALSE;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Context=0x%x", *Context);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Release mutex. */
++ if (acquired)
++ {
++ /* Release the context switching mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Command->os, Command->mutexContext));
++ acquired = gcvFALSE;
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckCOMMAND_Detach
++**
++** Detach user process.
++**
++** INPUT:
++**
++** gckCOMMAND Command
++** Pointer to a gckCOMMAND object.
++**
++** gckCONTEXT Context
++** Pointer to a gckCONTEXT object to be destroyed.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckCOMMAND_Detach(
++ IN gckCOMMAND Command,
++ IN gckCONTEXT Context
++ )
++{
++ gceSTATUS status;
++ gctBOOL acquired = gcvFALSE;
++
++ gcmkHEADER_ARG("Command=0x%x Context=0x%x", Command, Context);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++
++ /* Acquire the context switching mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(
++ Command->os, Command->mutexContext, gcvINFINITE
++ ));
++ acquired = gcvTRUE;
++
++ /* Construct a gckCONTEXT object. */
++ gcmkONERROR(gckCONTEXT_Destroy(Context));
++
++ if (Command->currContext == Context)
++ {
++ /* Detach from gckCOMMAND object if the destoryed context is current context. */
++ Command->currContext = gcvNULL;
++ }
++
++ /* Release the context switching mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(Command->os, Command->mutexContext));
++ acquired = gcvFALSE;
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Release mutex. */
++ if (acquired)
++ {
++ /* Release the context switching mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Command->os, Command->mutexContext));
++ acquired = gcvFALSE;
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++#if gcdVIRTUAL_COMMAND_BUFFER
++/*******************************************************************************
++**
++** gckCOMMAND_DumpExecutingBuffer
++**
++** Dump the command buffer which GPU is executing.
++**
++** INPUT:
++**
++** gckCOMMAND Command
++** Pointer to a gckCOMMAND object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckCOMMAND_DumpExecutingBuffer(
++ IN gckCOMMAND Command
++ )
++{
++ gceSTATUS status;
++ gckVIRTUAL_COMMAND_BUFFER_PTR buffer;
++ gctUINT32 gpuAddress;
++ gctSIZE_T pageCount;
++ gctPOINTER entry;
++ gckOS os = Command->os;
++ gckKERNEL kernel = Command->kernel;
++#if gcdLINK_QUEUE_SIZE
++ gctINT pid;
++ gctINT i, rear;
++ gctUINT32 start, end;
++ gctUINT32 dumpFront, dumpRear;
++ gckLINKQUEUE queue = &kernel->hardware->linkQueue;
++ gckLINKQUEUE queueMirror;
++ gctUINT32 bytes;
++ gckLINKDATA linkData;
++#endif
++
++ gcmkPRINT("**************************\n");
++ gcmkPRINT("**** COMMAND BUF DUMP ****\n");
++ gcmkPRINT("**************************\n");
++
++ gcmkVERIFY_OK(gckOS_ReadRegisterEx(os, kernel->core, 0x664, &gpuAddress));
++
++ gcmkPRINT("DMA Address 0x%08X", gpuAddress);
++
++#if gcdLINK_QUEUE_SIZE
++ /* Duplicate queue because it will be changed.*/
++ gcmkONERROR(gckOS_AllocateMemory(os,
++ sizeof(struct _gckLINKQUEUE),
++ (gctPOINTER *)&queueMirror));
++
++ gcmkONERROR(gckOS_MemCopy(queueMirror,
++ queue,
++ sizeof(struct _gckLINKQUEUE)));
++
++ /* If kernel command buffer link to a context buffer, then link to a user command
++ ** buffer, the second link will be in queue first, so we must fix this.
++ ** In Queue: C1 U1 U2 C2 U3 U4 U5 C3
++ ** Real: C1 X1 U1 C2 U2 U3 U4 C3 U5
++ ** Command buffer X1 which is after C1 is out of queue, so C1 is meaningless.
++ */
++ for (i = 0; i < gcdLINK_QUEUE_SIZE; i++)
++ {
++ gckLINKQUEUE_GetData(queueMirror, i, &linkData);
++
++ status = gckKERNEL_QueryGPUAddress(kernel, linkData->start, &buffer);
++
++ if (gcmIS_ERROR(status))
++ {
++ /* Can't find it in virtual command buffer list, ignore it. */
++ continue;
++ }
++
++ if (buffer->kernelLogical)
++ {
++ /* It is a context buffer. */
++ if (i == 0)
++ {
++ /* The real command buffer is out, so clear this slot. */
++ linkData->start = 0;
++ linkData->end = 0;
++ linkData->pid = 0;
++ }
++ else
++ {
++ /* switch context buffer and command buffer. */
++ struct _gckLINKDATA tmp = *linkData;
++ gckLINKDATA linkDataPrevious;
++
++ gckLINKQUEUE_GetData(queueMirror, i - 1, &linkDataPrevious);
++ *linkData = *linkDataPrevious;
++ *linkDataPrevious = tmp;
++ }
++ }
++ }
++
++ /* Clear search result. */
++ dumpFront = dumpRear = gcvINFINITE;
++
++ gcmkPRINT("Link Stack:");
++
++ /* Search stuck address in link queue from rear. */
++ rear = gcdLINK_QUEUE_SIZE - 1;
++ for (i = 0; i < gcdLINK_QUEUE_SIZE; i++)
++ {
++ gckLINKQUEUE_GetData(queueMirror, rear, &linkData);
++
++ start = linkData->start;
++ end = linkData->end;
++ pid = linkData->pid;
++
++ if (gpuAddress >= start && gpuAddress < end)
++ {
++ /* Find latest matched command buffer. */
++ gcmkPRINT(" %d, [%08X - %08X]", pid, start, end);
++
++ /* Initiliaze dump information. */
++ dumpFront = dumpRear = rear;
++ }
++
++ /* Advance to previous one. */
++ rear--;
++
++ if (dumpFront != gcvINFINITE)
++ {
++ break;
++ }
++ }
++
++ if (dumpFront == gcvINFINITE)
++ {
++ /* Can't find matched record in link queue, dump kernel command buffer. */
++ _DumpKernelCommandBuffer(Command);
++
++ /* Free local copy. */
++ gcmkOS_SAFE_FREE(os, queueMirror);
++ return gcvSTATUS_OK;
++ }
++
++ /* Search the last context buffer linked. */
++ while (rear >= 0)
++ {
++ gckLINKQUEUE_GetData(queueMirror, rear, &linkData);
++
++ gcmkPRINT(" %d, [%08X - %08X]",
++ linkData->pid,
++ linkData->start,
++ linkData->end);
++
++ status = gckKERNEL_QueryGPUAddress(kernel, linkData->start, &buffer);
++
++ if (gcmIS_SUCCESS(status) && buffer->kernelLogical)
++ {
++ /* Find a context buffer. */
++ dumpFront = rear;
++ break;
++ }
++
++ rear--;
++ }
++
++ /* Dump from last context buffer to last command buffer where hang happens. */
++ for (i = dumpFront; i <= dumpRear; i++)
++ {
++ gckLINKQUEUE_GetData(queueMirror, i, &linkData);
++
++ /* Get gpu address of this command buffer. */
++ gpuAddress = linkData->start;
++ bytes = linkData->end - gpuAddress;
++
++ /* Get the whole buffer. */
++ status = gckKERNEL_QueryGPUAddress(kernel, gpuAddress, &buffer);
++
++ if (gcmIS_ERROR(status))
++ {
++ gcmkPRINT("Buffer [%08X - %08X] is lost",
++ linkData->start,
++ linkData->end);
++ continue;
++ }
++
++ /* Get kernel logical for dump. */
++ if (buffer->kernelLogical)
++ {
++ /* Get kernel logical directly if it is a context buffer. */
++ entry = buffer->kernelLogical;
++ gcmkPRINT("Context Buffer:");
++ }
++ else
++ {
++ /* Make it accessiable by kernel if it is a user command buffer. */
++ gcmkVERIFY_OK(
++ gckOS_CreateKernelVirtualMapping(buffer->physical,
++ &pageCount,
++ &entry));
++ gcmkPRINT("User Command Buffer:");
++ }
++
++ /* Dump from the entry. */
++ _DumpBuffer(entry + (gpuAddress - buffer->gpuAddress), gpuAddress, bytes);
++
++ /* Release kernel logical address if neccessary. */
++ if (!buffer->kernelLogical)
++ {
++ gcmkVERIFY_OK(gckOS_DestroyKernelVirtualMapping(entry));
++ }
++ }
++
++ /* Free local copy. */
++ gcmkOS_SAFE_FREE(os, queueMirror);
++ return gcvSTATUS_OK;
++OnError:
++ return status;
++#else
++ /* Without link queue information, we don't know the entry of last command
++ ** buffer, just dump the page where GPU stuck. */
++ status = gckKERNEL_QueryGPUAddress(kernel, gpuAddress, &buffer);
++
++ if (gcmIS_SUCCESS(status))
++ {
++ gcmkVERIFY_OK(
++ gckOS_CreateKernelVirtualMapping(buffer->physical, &pageCount, &entry));
++
++ if (entry)
++ {
++ gctUINT32 offset = gpuAddress - buffer->gpuAddress;
++ gctPOINTER entryDump = entry;
++
++ /* Dump one pages. */
++ gctUINT32 bytes = 4096;
++
++ /* Align to page. */
++ offset &= 0xfffff000;
++
++ /* Kernel address of page where stall point stay. */
++ entryDump += offset;
++
++ /* Align to page. */
++ gpuAddress &= 0xfffff000;
++
++ gcmkPRINT("User Command Buffer:\n");
++ _DumpBuffer(entryDump, gpuAddress, bytes);
++ }
++
++ gcmkVERIFY_OK(
++ gckOS_DestroyKernelVirtualMapping(entry));
++ }
++ else
++ {
++ _DumpKernelCommandBuffer(Command);
++ }
++
++ return gcvSTATUS_OK;
++#endif
++}
++#endif
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel_command_vg.c linux-openelec/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel_command_vg.c
+--- linux-3.14.36/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel_command_vg.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel_command_vg.c 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,3677 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal_kernel_precomp.h"
++
++#if gcdENABLE_VG
++
++#include "gc_hal_kernel_hardware_command_vg.h"
++
++#define _GC_OBJ_ZONE gcvZONE_COMMAND
++
++/******************************************************************************\
++*********************************** Debugging **********************************
++\******************************************************************************/
++
++#define gcvDISABLE_TIMEOUT 1
++#define gcvDUMP_COMMAND_BUFFER 0
++#define gcvDUMP_COMMAND_LINES 0
++
++
++#if gcvDEBUG || defined(EMULATOR) || gcvDISABLE_TIMEOUT
++# define gcvQUEUE_TIMEOUT ~0
++#else
++# define gcvQUEUE_TIMEOUT 10
++#endif
++
++
++/******************************************************************************\
++********************************** Definitions *********************************
++\******************************************************************************/
++
++/* Minimum buffer size. */
++#define gcvMINUMUM_BUFFER \
++ gcmSIZEOF(gcsKERNEL_QUEUE_HEADER) + \
++ gcmSIZEOF(gcsKERNEL_CMDQUEUE) * 2
++
++#define gcmDECLARE_INTERRUPT_HANDLER(Block, Number) \
++ static gceSTATUS \
++ _EventHandler_##Block##_##Number( \
++ IN gckVGKERNEL Kernel \
++ )
++
++#define gcmDEFINE_INTERRUPT_HANDLER(Block, Number) \
++ gcmDECLARE_INTERRUPT_HANDLER(Block, Number) \
++ { \
++ return _EventHandler_Block( \
++ Kernel, \
++ &Kernel->command->taskTable[gcvBLOCK_##Block], \
++ gcvFALSE \
++ ); \
++ }
++
++#define gcmDEFINE_INTERRUPT_HANDLER_ENTRY(Block, Number) \
++ { gcvBLOCK_##Block, _EventHandler_##Block##_##Number }
++
++/* Block interrupt handling table entry. */
++typedef struct _gcsBLOCK_INTERRUPT_HANDLER * gcsBLOCK_INTERRUPT_HANDLER_PTR;
++typedef struct _gcsBLOCK_INTERRUPT_HANDLER
++{
++ gceBLOCK block;
++ gctINTERRUPT_HANDLER handler;
++}
++gcsBLOCK_INTERRUPT_HANDLER;
++
++/* Queue control functions. */
++typedef struct _gcsQUEUE_UPDATE_CONTROL * gcsQUEUE_UPDATE_CONTROL_PTR;
++typedef struct _gcsQUEUE_UPDATE_CONTROL
++{
++ gctOBJECT_HANDLER execute;
++ gctOBJECT_HANDLER update;
++ gctOBJECT_HANDLER lastExecute;
++ gctOBJECT_HANDLER lastUpdate;
++}
++gcsQUEUE_UPDATE_CONTROL;
++
++
++/******************************************************************************\
++********************************* Support Code *********************************
++\******************************************************************************/
++static gceSTATUS
++_FlushMMU(
++ IN gckVGCOMMAND Command
++ )
++{
++ gceSTATUS status;
++ gctUINT32 oldValue;
++ gckVGHARDWARE hardware = Command->hardware;
++
++ gcmkONERROR(gckOS_AtomicExchange(Command->os,
++ hardware->pageTableDirty,
++ 0,
++ &oldValue));
++
++ if (oldValue)
++ {
++ /* Page Table is upated, flush mmu before commit. */
++ gcmkONERROR(gckVGHARDWARE_FlushMMU(hardware));
++ }
++
++ return gcvSTATUS_OK;
++OnError:
++ return status;
++}
++
++static gceSTATUS
++_WaitForIdle(
++ IN gckVGCOMMAND Command,
++ IN gcsKERNEL_QUEUE_HEADER_PTR Queue
++ )
++{
++ gceSTATUS status = gcvSTATUS_OK;
++ gctUINT32 idle;
++ gctUINT timeout = 0;
++
++ /* Loop while not idle. */
++ while (Queue->pending)
++ {
++ /* Did we reach the timeout limit? */
++ if (timeout == gcvQUEUE_TIMEOUT)
++ {
++ /* Hardware is probably dead... */
++ return gcvSTATUS_TIMEOUT;
++ }
++
++ /* Sleep for 100ms. */
++ gcmkERR_BREAK(gckOS_Delay(Command->os, 100));
++
++ /* Not the first loop? */
++ if (timeout > 0)
++ {
++ /* Read IDLE register. */
++ gcmkVERIFY_OK(gckVGHARDWARE_GetIdle(Command->hardware, &idle));
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_COMMAND,
++ "%s: timeout, IDLE=%08X\n",
++ __FUNCTION__, idle
++ );
++ }
++
++ /* Increment the timeout counter. */
++ timeout += 1;
++ }
++
++ /* Return status. */
++ return status;
++}
++
++static gctINT32
++_GetNextInterrupt(
++ IN gckVGCOMMAND Command,
++ IN gceBLOCK Block
++ )
++{
++ gctUINT index;
++ gcsBLOCK_TASK_ENTRY_PTR entry;
++ gctINT32 interrupt;
++
++ /* Get the block entry. */
++ entry = &Command->taskTable[Block];
++
++ /* Make sure we have initialized interrupts. */
++ gcmkASSERT(entry->interruptCount > 0);
++
++ /* Decrement the interrupt usage semaphore. */
++ gcmkVERIFY_OK(gckOS_DecrementSemaphore(
++ Command->os, entry->interruptSemaphore
++ ));
++
++ /* Get the value index. */
++ index = entry->interruptIndex;
++
++ /* Get the interrupt value. */
++ interrupt = entry->interruptArray[index];
++
++ /* Must be a valid value. */
++ gcmkASSERT((interrupt >= 0) && (interrupt <= 31));
++
++ /* Advance the index to the next value. */
++ index += 1;
++
++ /* Set the new index. */
++ entry->interruptIndex = (index == entry->interruptCount)
++ ? 0
++ : index;
++
++ /* Return interrupt value. */
++ return interrupt;
++}
++
++
++/******************************************************************************\
++***************************** Task Storage Management **************************
++\******************************************************************************/
++
++/* Minimum task buffer size. */
++#define gcvMIN_TASK_BUFFER \
++( \
++ gcmSIZEOF(gcsTASK_CONTAINER) + 128 \
++)
++
++/* Free list terminator. */
++#define gcvFREE_TASK_TERMINATOR \
++( \
++ (gcsTASK_CONTAINER_PTR) gcmINT2PTR(~0) \
++)
++
++
++/*----------------------------------------------------------------------------*/
++/*------------------- Allocated Task Buffer List Management ------------------*/
++
++static void
++_InsertTaskBuffer(
++ IN gcsTASK_CONTAINER_PTR AddAfter,
++ IN gcsTASK_CONTAINER_PTR Buffer
++ )
++{
++ gcsTASK_CONTAINER_PTR addBefore;
++
++ /* Cannot add before the first buffer. */
++ gcmkASSERT(AddAfter != gcvNULL);
++
++ /* Create a shortcut to the next buffer. */
++ addBefore = AddAfter->allocNext;
++
++ /* Initialize the links. */
++ Buffer->allocPrev = AddAfter;
++ Buffer->allocNext = addBefore;
++
++ /* Link to the previous buffer. */
++ AddAfter->allocNext = Buffer;
++
++ /* Link to the next buffer. */
++ if (addBefore != gcvNULL)
++ {
++ addBefore->allocPrev = Buffer;
++ }
++}
++
++static void
++_RemoveTaskBuffer(
++ IN gcsTASK_CONTAINER_PTR Buffer
++ )
++{
++ gcsTASK_CONTAINER_PTR prev;
++ gcsTASK_CONTAINER_PTR next;
++
++ /* Cannot remove the first buffer. */
++ gcmkASSERT(Buffer->allocPrev != gcvNULL);
++
++ /* Create shortcuts to the previous and next buffers. */
++ prev = Buffer->allocPrev;
++ next = Buffer->allocNext;
++
++ /* Tail buffer? */
++ if (next == gcvNULL)
++ {
++ /* Remove from the list. */
++ prev->allocNext = gcvNULL;
++ }
++
++ /* Buffer from the middle. */
++ else
++ {
++ prev->allocNext = next;
++ next->allocPrev = prev;
++ }
++}
++
++
++/*----------------------------------------------------------------------------*/
++/*--------------------- Free Task Buffer List Management ---------------------*/
++
++static void
++_AppendToFreeList(
++ IN gckVGCOMMAND Command,
++ IN gcsTASK_CONTAINER_PTR Buffer
++ )
++{
++ /* Cannot be a part of the free list already. */
++ gcmkASSERT(Buffer->freePrev == gcvNULL);
++ gcmkASSERT(Buffer->freeNext == gcvNULL);
++
++ /* First buffer to add? */
++ if (Command->taskFreeHead == gcvNULL)
++ {
++ /* Terminate the links. */
++ Buffer->freePrev = gcvFREE_TASK_TERMINATOR;
++ Buffer->freeNext = gcvFREE_TASK_TERMINATOR;
++
++ /* Initialize the list pointer. */
++ Command->taskFreeHead = Command->taskFreeTail = Buffer;
++ }
++
++ /* Not the first, add after the tail. */
++ else
++ {
++ /* Initialize the new tail buffer. */
++ Buffer->freePrev = Command->taskFreeTail;
++ Buffer->freeNext = gcvFREE_TASK_TERMINATOR;
++
++ /* Add after the tail. */
++ Command->taskFreeTail->freeNext = Buffer;
++ Command->taskFreeTail = Buffer;
++ }
++}
++
++static void
++_RemoveFromFreeList(
++ IN gckVGCOMMAND Command,
++ IN gcsTASK_CONTAINER_PTR Buffer
++ )
++{
++ /* Has to be a part of the free list. */
++ gcmkASSERT(Buffer->freePrev != gcvNULL);
++ gcmkASSERT(Buffer->freeNext != gcvNULL);
++
++ /* Head buffer? */
++ if (Buffer->freePrev == gcvFREE_TASK_TERMINATOR)
++ {
++ /* Tail buffer as well? */
++ if (Buffer->freeNext == gcvFREE_TASK_TERMINATOR)
++ {
++ /* Reset the list pointer. */
++ Command->taskFreeHead = Command->taskFreeTail = gcvNULL;
++ }
++
++ /* No, just the head. */
++ else
++ {
++ /* Update the head. */
++ Command->taskFreeHead = Buffer->freeNext;
++
++ /* Terminate the next buffer. */
++ Command->taskFreeHead->freePrev = gcvFREE_TASK_TERMINATOR;
++ }
++ }
++
++ /* Not the head. */
++ else
++ {
++ /* Tail buffer? */
++ if (Buffer->freeNext == gcvFREE_TASK_TERMINATOR)
++ {
++ /* Update the tail. */
++ Command->taskFreeTail = Buffer->freePrev;
++
++ /* Terminate the previous buffer. */
++ Command->taskFreeTail->freeNext = gcvFREE_TASK_TERMINATOR;
++ }
++
++ /* A buffer in the middle. */
++ else
++ {
++ /* Remove the buffer from the list. */
++ Buffer->freePrev->freeNext = Buffer->freeNext;
++ Buffer->freeNext->freePrev = Buffer->freePrev;
++ }
++ }
++
++ /* Reset free list pointers. */
++ Buffer->freePrev = gcvNULL;
++ Buffer->freeNext = gcvNULL;
++}
++
++
++/*----------------------------------------------------------------------------*/
++/*-------------------------- Task Buffer Allocation --------------------------*/
++
++static void
++_SplitTaskBuffer(
++ IN gckVGCOMMAND Command,
++ IN gcsTASK_CONTAINER_PTR Buffer,
++ IN gctUINT Size
++ )
++{
++ /* Determine the size of the new buffer. */
++ gctINT splitBufferSize = Buffer->size - Size;
++ gcmkASSERT(splitBufferSize >= 0);
++
++ /* Is the split buffer big enough to become a separate buffer? */
++ if (splitBufferSize >= gcvMIN_TASK_BUFFER)
++ {
++ /* Place the new path data. */
++ gcsTASK_CONTAINER_PTR splitBuffer = (gcsTASK_CONTAINER_PTR)
++ (
++ (gctUINT8_PTR) Buffer + Size
++ );
++
++ /* Set the trimmed buffer size. */
++ Buffer->size = Size;
++
++ /* Initialize the split buffer. */
++ splitBuffer->referenceCount = 0;
++ splitBuffer->size = splitBufferSize;
++ splitBuffer->freePrev = gcvNULL;
++ splitBuffer->freeNext = gcvNULL;
++
++ /* Link in. */
++ _InsertTaskBuffer(Buffer, splitBuffer);
++ _AppendToFreeList(Command, splitBuffer);
++ }
++}
++
++static gceSTATUS
++_AllocateTaskContainer(
++ IN gckVGCOMMAND Command,
++ IN gctUINT Size,
++ OUT gcsTASK_CONTAINER_PTR * Buffer
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Command=0x%x Size=0x%x, Buffer ==0x%x", Command, Size, Buffer);
++
++ /* Verify arguments. */
++ gcmkVERIFY_ARGUMENT(Buffer != gcvNULL);
++
++ do
++ {
++ gcsTASK_STORAGE_PTR storage;
++ gcsTASK_CONTAINER_PTR buffer;
++
++ /* Adjust the size. */
++ Size += gcmSIZEOF(gcsTASK_CONTAINER);
++
++ /* Adjust the allocation size if not big enough. */
++ if (Size > Command->taskStorageUsable)
++ {
++ Command->taskStorageGranularity
++ = gcmALIGN(Size + gcmSIZEOF(gcsTASK_STORAGE), 1024);
++
++ Command->taskStorageUsable
++ = Command->taskStorageGranularity - gcmSIZEOF(gcsTASK_STORAGE);
++ }
++
++ /* Is there a free buffer available? */
++ else if (Command->taskFreeHead != gcvNULL)
++ {
++ /* Set the initial free buffer. */
++ gcsTASK_CONTAINER_PTR buffer = Command->taskFreeHead;
++
++ do
++ {
++ /* Is the buffer big enough? */
++ if (buffer->size >= Size)
++ {
++ /* Remove the buffer from the free list. */
++ _RemoveFromFreeList(Command, buffer);
++
++ /* Split the buffer. */
++ _SplitTaskBuffer(Command, buffer, Size);
++
++ /* Set the result. */
++ * Buffer = buffer;
++
++ gcmkFOOTER_ARG("*Buffer=0x%x",*Buffer);
++ /* Success. */
++ return gcvSTATUS_OK;
++ }
++
++ /* Get the next free buffer. */
++ buffer = buffer->freeNext;
++ }
++ while (buffer != gcvFREE_TASK_TERMINATOR);
++ }
++
++ /* Allocate a container. */
++ gcmkERR_BREAK(gckOS_Allocate(
++ Command->os,
++ Command->taskStorageGranularity,
++ (gctPOINTER *) &storage
++ ));
++
++ /* Link in the storage buffer. */
++ storage->next = Command->taskStorage;
++ Command->taskStorage = storage;
++
++ /* Place the task buffer. */
++ buffer = (gcsTASK_CONTAINER_PTR) (storage + 1);
++
++ /* Determine the size of the buffer. */
++ buffer->size
++ = Command->taskStorageGranularity
++ - gcmSIZEOF(gcsTASK_STORAGE);
++
++ /* Initialize the task buffer. */
++ buffer->referenceCount = 0;
++ buffer->allocPrev = gcvNULL;
++ buffer->allocNext = gcvNULL;
++ buffer->freePrev = gcvNULL;
++ buffer->freeNext = gcvNULL;
++
++ /* Split the buffer. */
++ _SplitTaskBuffer(Command, buffer, Size);
++
++ /* Set the result. */
++ * Buffer = buffer;
++
++ gcmkFOOTER_ARG("*Buffer=0x%x",*Buffer);
++ /* Success. */
++ return gcvSTATUS_OK;
++ }
++ while (gcvFALSE);
++
++ gcmkFOOTER();
++ /* Return status. */
++ return status;
++}
++
++static void
++_FreeTaskContainer(
++ IN gckVGCOMMAND Command,
++ IN gcsTASK_CONTAINER_PTR Buffer
++ )
++{
++ gcsTASK_CONTAINER_PTR prev;
++ gcsTASK_CONTAINER_PTR next;
++ gcsTASK_CONTAINER_PTR merged;
++
++ gctSIZE_T mergedSize;
++
++ /* Verify arguments. */
++ gcmkASSERT(Buffer != gcvNULL);
++ gcmkASSERT(Buffer->freePrev == gcvNULL);
++ gcmkASSERT(Buffer->freeNext == gcvNULL);
++
++ /* Get shortcuts to the previous and next path data buffers. */
++ prev = Buffer->allocPrev;
++ next = Buffer->allocNext;
++
++ /* Is the previous path data buffer already free? */
++ if (prev && prev->freeNext)
++ {
++ /* The previous path data buffer is the one that remains. */
++ merged = prev;
++
++ /* Is the next path data buffer already free? */
++ if (next && next->freeNext)
++ {
++ /* Merge all three path data buffers into the previous. */
++ mergedSize = prev->size + Buffer->size + next->size;
++
++ /* Remove the next path data buffer. */
++ _RemoveFromFreeList(Command, next);
++ _RemoveTaskBuffer(next);
++ }
++ else
++ {
++ /* Merge the current path data buffer into the previous. */
++ mergedSize = prev->size + Buffer->size;
++ }
++
++ /* Delete the current path data buffer. */
++ _RemoveTaskBuffer(Buffer);
++
++ /* Set new size. */
++ merged->size = mergedSize;
++ }
++ else
++ {
++ /* The current path data buffer is the one that remains. */
++ merged = Buffer;
++
++ /* Is the next buffer already free? */
++ if (next && next->freeNext)
++ {
++ /* Merge the next into the current. */
++ mergedSize = Buffer->size + next->size;
++
++ /* Remove the next buffer. */
++ _RemoveFromFreeList(Command, next);
++ _RemoveTaskBuffer(next);
++
++ /* Set new size. */
++ merged->size = mergedSize;
++ }
++
++ /* Add the current buffer into the free list. */
++ _AppendToFreeList(Command, merged);
++ }
++}
++
++gceSTATUS
++_RemoveRecordFromProcesDB(
++ IN gckVGCOMMAND Command,
++ IN gcsTASK_HEADER_PTR Task
++ )
++{
++ gcsTASK_PTR task = (gcsTASK_PTR)((gctUINT8_PTR)Task - sizeof(gcsTASK));
++ gcsTASK_FREE_VIDEO_MEMORY_PTR freeVideoMemory;
++ gcsTASK_UNLOCK_VIDEO_MEMORY_PTR unlockVideoMemory;
++ gctINT pid;
++ gctUINT32 size;
++
++ /* Get the total size of all tasks. */
++ size = task->size;
++
++ gcmkVERIFY_OK(gckOS_GetProcessID((gctUINT32_PTR)&pid));
++
++ do
++ {
++ switch (Task->id)
++ {
++ case gcvTASK_FREE_VIDEO_MEMORY:
++ freeVideoMemory = (gcsTASK_FREE_VIDEO_MEMORY_PTR)Task;
++
++ /* Remove record from process db. */
++ gcmkVERIFY_OK(gckKERNEL_RemoveProcessDB(
++ Command->kernel->kernel,
++ pid,
++ gcvDB_VIDEO_MEMORY,
++ gcmUINT64_TO_PTR(freeVideoMemory->node)));
++
++ /* Advance to next task. */
++ size -= sizeof(gcsTASK_FREE_VIDEO_MEMORY);
++ Task = (gcsTASK_HEADER_PTR)(freeVideoMemory + 1);
++
++ break;
++ case gcvTASK_UNLOCK_VIDEO_MEMORY:
++ unlockVideoMemory = (gcsTASK_UNLOCK_VIDEO_MEMORY_PTR)Task;
++
++ /* Remove record from process db. */
++ gcmkVERIFY_OK(gckKERNEL_RemoveProcessDB(
++ Command->kernel->kernel,
++ pid,
++ gcvDB_VIDEO_MEMORY_LOCKED,
++ gcmUINT64_TO_PTR(unlockVideoMemory->node)));
++
++ /* Advance to next task. */
++ size -= sizeof(gcsTASK_UNLOCK_VIDEO_MEMORY);
++ Task = (gcsTASK_HEADER_PTR)(unlockVideoMemory + 1);
++
++ break;
++ default:
++ /* Skip the whole task. */
++ size = 0;
++ break;
++ }
++ }
++ while(size);
++
++ return gcvSTATUS_OK;
++}
++
++/******************************************************************************\
++********************************* Task Scheduling ******************************
++\******************************************************************************/
++
++static gceSTATUS
++_ScheduleTasks(
++ IN gckVGCOMMAND Command,
++ IN gcsTASK_MASTER_TABLE_PTR TaskTable,
++ IN gctUINT8_PTR PreviousEnd
++ )
++{
++ gceSTATUS status;
++
++ do
++ {
++ gctINT block;
++ gcsTASK_CONTAINER_PTR container;
++ gcsTASK_MASTER_ENTRY_PTR userTaskEntry;
++ gcsBLOCK_TASK_ENTRY_PTR kernelTaskEntry;
++ gcsTASK_PTR userTask;
++ gctUINT8_PTR kernelTask;
++ gctINT32 interrupt;
++ gctUINT8_PTR eventCommand;
++
++ /* Nothing to schedule? */
++ if (TaskTable->size == 0)
++ {
++ status = gcvSTATUS_OK;
++ break;
++ }
++
++ /* Acquire the mutex. */
++ gcmkERR_BREAK(gckOS_AcquireMutex(
++ Command->os,
++ Command->taskMutex,
++ gcvINFINITE
++ ));
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ "%s(%d)\n",
++ __FUNCTION__, __LINE__
++ );
++
++ do
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ " number of tasks scheduled = %d\n"
++ " size of event data in bytes = %d\n",
++ TaskTable->count,
++ TaskTable->size
++ );
++
++ /* Allocate task buffer. */
++ gcmkERR_BREAK(_AllocateTaskContainer(
++ Command,
++ TaskTable->size,
++ &container
++ ));
++
++ /* Determine the task data pointer. */
++ kernelTask = (gctUINT8_PTR) (container + 1);
++
++ /* Initialize the reference count. */
++ container->referenceCount = TaskTable->count;
++
++ /* Process tasks. */
++ for (block = gcvBLOCK_COUNT - 1; block >= 0; block -= 1)
++ {
++ /* Get the current user table entry. */
++ userTaskEntry = &TaskTable->table[block];
++
++ /* Are there tasks scheduled? */
++ if (userTaskEntry->head == gcvNULL)
++ {
++ /* No, skip to the next block. */
++ continue;
++ }
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ " processing tasks for block %d\n",
++ block
++ );
++
++ /* Get the current kernel table entry. */
++ kernelTaskEntry = &Command->taskTable[block];
++
++ /* Are there tasks for the current block scheduled? */
++ if (kernelTaskEntry->container == gcvNULL)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ " first task container for the block added\n",
++ block
++ );
++
++ /* Nothing yet, set the container buffer pointer. */
++ kernelTaskEntry->container = container;
++ kernelTaskEntry->task = (gcsTASK_HEADER_PTR) kernelTask;
++ }
++
++ /* Yes, append to the end. */
++ else
++ {
++ kernelTaskEntry->link->cotainer = container;
++ kernelTaskEntry->link->task = (gcsTASK_HEADER_PTR) kernelTask;
++ }
++
++ /* Set initial task. */
++ userTask = userTaskEntry->head;
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ " copying user tasks over to the kernel\n"
++ );
++
++ /* Copy tasks. */
++ do
++ {
++ gcsTASK_HEADER_PTR taskHeader = (gcsTASK_HEADER_PTR) (userTask + 1);
++
++ gcmkVERIFY_OK(_RemoveRecordFromProcesDB(Command, taskHeader));
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ " task ID = %d, size = %d\n",
++ ((gcsTASK_HEADER_PTR) (userTask + 1))->id,
++ userTask->size
++ );
++
++#ifdef __QNXNTO__
++ if (taskHeader->id == gcvTASK_SIGNAL)
++ {
++ ((gcsTASK_SIGNAL_PTR)taskHeader)->coid = TaskTable->coid;
++ ((gcsTASK_SIGNAL_PTR)taskHeader)->rcvid = TaskTable->rcvid;
++ }
++#endif /* __QNXNTO__ */
++ /* Copy the task data. */
++ gcmkVERIFY_OK(gckOS_MemCopy(
++ kernelTask, taskHeader, userTask->size
++ ));
++
++ /* Advance to the next task. */
++ kernelTask += userTask->size;
++ userTask = userTask->next;
++ }
++ while (userTask != gcvNULL);
++
++ /* Update link pointer in the header. */
++ kernelTaskEntry->link = (gcsTASK_LINK_PTR) kernelTask;
++
++ /* Initialize link task. */
++ kernelTaskEntry->link->id = gcvTASK_LINK;
++ kernelTaskEntry->link->cotainer = gcvNULL;
++ kernelTaskEntry->link->task = gcvNULL;
++
++ /* Advance the task data pointer. */
++ kernelTask += gcmSIZEOF(gcsTASK_LINK);
++ }
++ }
++ while (gcvFALSE);
++
++ /* Release the mutex. */
++ gcmkERR_BREAK(gckOS_ReleaseMutex(
++ Command->os,
++ Command->taskMutex
++ ));
++
++ /* Assign interrupts to the blocks. */
++ eventCommand = PreviousEnd;
++
++ for (block = gcvBLOCK_COUNT - 1; block >= 0; block -= 1)
++ {
++ /* Get the current user table entry. */
++ userTaskEntry = &TaskTable->table[block];
++
++ /* Are there tasks scheduled? */
++ if (userTaskEntry->head == gcvNULL)
++ {
++ /* No, skip to the next block. */
++ continue;
++ }
++
++ /* Get the interrupt number. */
++ interrupt = _GetNextInterrupt(Command, block);
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ "%s(%d): block = %d interrupt = %d\n",
++ __FUNCTION__, __LINE__,
++ block, interrupt
++ );
++
++ /* Determine the command position. */
++ eventCommand -= Command->info.eventCommandSize;
++
++ /* Append an EVENT command. */
++ gcmkERR_BREAK(gckVGCOMMAND_EventCommand(
++ Command, eventCommand, block, interrupt, gcvNULL
++ ));
++ }
++ }
++ while (gcvFALSE);
++
++ /* Return status. */
++ return status;
++}
++
++
++/******************************************************************************\
++******************************** Memory Management *****************************
++\******************************************************************************/
++
++static gceSTATUS
++_HardwareToKernel(
++ IN gckOS Os,
++ IN gcuVIDMEM_NODE_PTR Node,
++ IN gctUINT32 Address,
++ OUT gctPOINTER * KernelPointer
++ )
++{
++ gceSTATUS status;
++ gckVIDMEM memory;
++ gctUINT32 offset;
++#if gcdDYNAMIC_MAP_RESERVED_MEMORY
++ gctUINT32 nodePhysical;
++#endif
++ status = gcvSTATUS_OK;
++ /* Assume a non-virtual node and get the pool manager object. */
++ memory = Node->VidMem.memory;
++
++#if gcdDYNAMIC_MAP_RESERVED_MEMORY
++ nodePhysical = memory->baseAddress
++ + Node->VidMem.offset
++ + Node->VidMem.alignment;
++
++ if (Node->VidMem.kernelVirtual == gcvNULL)
++ {
++ status = gckOS_MapPhysical(Os,
++ nodePhysical,
++ Node->VidMem.bytes,
++ (gctPOINTER *)&Node->VidMem.kernelVirtual);
++
++ if (gcmkIS_ERROR(status))
++ {
++ return status;
++ }
++ }
++
++ offset = Address - nodePhysical;
++ *KernelPointer = (gctPOINTER)((gctUINT8_PTR)Node->VidMem.kernelVirtual + offset);
++#else
++ /* Determine the header offset within the pool it is allocated in. */
++ offset = Address - memory->baseAddress;
++
++ /* Translate the offset into the kernel side pointer. */
++ status = gckOS_GetKernelLogicalEx(
++ Os,
++ gcvCORE_VG,
++ offset,
++ KernelPointer
++ );
++#endif
++
++ /* Return status. */
++ return status;
++}
++
++static gceSTATUS
++_ConvertUserCommandBufferPointer(
++ IN gckVGCOMMAND Command,
++ IN gcsCMDBUFFER_PTR UserCommandBuffer,
++ OUT gcsCMDBUFFER_PTR * KernelCommandBuffer
++ )
++{
++ gceSTATUS status, last;
++ gcsCMDBUFFER_PTR mappedUserCommandBuffer = gcvNULL;
++
++ do
++ {
++ gctUINT32 headerAddress;
++
++ /* Map the command buffer structure into the kernel space. */
++ gcmkERR_BREAK(gckOS_MapUserPointer(
++ Command->os,
++ UserCommandBuffer,
++ gcmSIZEOF(gcsCMDBUFFER),
++ (gctPOINTER *) &mappedUserCommandBuffer
++ ));
++
++ /* Determine the address of the header. */
++ headerAddress
++ = mappedUserCommandBuffer->address
++ - mappedUserCommandBuffer->bufferOffset;
++
++ /* Translate the logical address to the kernel space. */
++ gcmkERR_BREAK(_HardwareToKernel(
++ Command->os,
++ gcmUINT64_TO_PTR(mappedUserCommandBuffer->node),
++ headerAddress,
++ (gctPOINTER *) KernelCommandBuffer
++ ));
++ }
++ while (gcvFALSE);
++
++ /* Unmap the user command buffer. */
++ if (mappedUserCommandBuffer != gcvNULL)
++ {
++ gcmkCHECK_STATUS(gckOS_UnmapUserPointer(
++ Command->os,
++ UserCommandBuffer,
++ gcmSIZEOF(gcsCMDBUFFER),
++ mappedUserCommandBuffer
++ ));
++ }
++
++ /* Return status. */
++ return status;
++}
++
++static gceSTATUS
++_AllocateLinear(
++ IN gckVGCOMMAND Command,
++ IN gctUINT Size,
++ IN gctUINT Alignment,
++ OUT gcuVIDMEM_NODE_PTR * Node,
++ OUT gctUINT32 * Address,
++ OUT gctPOINTER * Logical
++ )
++{
++ gceSTATUS status, last;
++ gcuVIDMEM_NODE_PTR node = gcvNULL;
++ gctUINT32 address = (gctUINT32)~0;
++
++ do
++ {
++ gcePOOL pool;
++ gctPOINTER logical;
++
++ /* Allocate from the system pool. */
++ pool = gcvPOOL_SYSTEM;
++
++ /* Allocate memory. */
++ gcmkERR_BREAK(gckKERNEL_AllocateLinearMemory(
++ Command->kernel->kernel, &pool,
++ Size, Alignment,
++ gcvSURF_TYPE_UNKNOWN,
++ &node
++ ));
++
++ /* Do not accept virtual pools for now because we don't handle the
++ kernel pointer translation at the moment. */
++ if (pool == gcvPOOL_VIRTUAL)
++ {
++ status = gcvSTATUS_OUT_OF_MEMORY;
++ break;
++ }
++
++ /* Lock the command buffer. */
++ gcmkERR_BREAK(gckVIDMEM_Lock(
++ Command->kernel->kernel,
++ node,
++ gcvFALSE,
++ &address
++ ));
++
++ /* Translate the logical address to the kernel space. */
++ gcmkERR_BREAK(_HardwareToKernel(
++ Command->os,
++ node,
++ address,
++ &logical
++ ));
++
++ /* Set return values. */
++ * Node = node;
++ * Address = address;
++ * Logical = logical;
++
++ /* Success. */
++ return gcvSTATUS_OK;
++ }
++ while (gcvFALSE);
++
++ /* Roll back. */
++ if (node != gcvNULL)
++ {
++ /* Unlock the command buffer. */
++ if (address != ~0)
++ {
++ gcmkCHECK_STATUS(gckVIDMEM_Unlock(
++ Command->kernel->kernel, node, gcvSURF_TYPE_UNKNOWN, gcvNULL
++ ));
++ }
++
++ /* Free the command buffer. */
++ gcmkCHECK_STATUS(gckVIDMEM_Free(
++ node
++ ));
++ }
++
++ /* Return status. */
++ return status;
++}
++
++static gceSTATUS
++_FreeLinear(
++ IN gckVGKERNEL Kernel,
++ IN gcuVIDMEM_NODE_PTR Node
++ )
++{
++ gceSTATUS status;
++
++ do
++ {
++ /* Unlock the linear buffer. */
++ gcmkERR_BREAK(gckVIDMEM_Unlock(Kernel->kernel, Node, gcvSURF_TYPE_UNKNOWN, gcvNULL));
++
++ /* Free the linear buffer. */
++ gcmkERR_BREAK(gckVIDMEM_Free(Node));
++ }
++ while (gcvFALSE);
++
++ /* Return status. */
++ return status;
++}
++
++gceSTATUS
++_AllocateCommandBuffer(
++ IN gckVGCOMMAND Command,
++ IN gctSIZE_T Size,
++ OUT gcsCMDBUFFER_PTR * CommandBuffer
++ )
++{
++ gceSTATUS status, last;
++ gcuVIDMEM_NODE_PTR node = gcvNULL;
++
++ do
++ {
++ gctUINT alignedHeaderSize;
++ gctUINT requestedSize;
++ gctUINT allocationSize;
++ gctUINT32 address = 0;
++ gcsCMDBUFFER_PTR commandBuffer;
++ gctUINT8_PTR endCommand;
++
++ /* Determine the aligned header size. */
++ alignedHeaderSize
++ = gcmALIGN(gcmSIZEOF(gcsCMDBUFFER), Command->info.addressAlignment);
++
++ /* Align the requested size. */
++ requestedSize
++ = gcmALIGN(Size, Command->info.commandAlignment);
++
++ /* Determine the size of the buffer to allocate. */
++ allocationSize
++ = alignedHeaderSize
++ + requestedSize
++ + Command->info.staticTailSize;
++
++ /* Allocate the command buffer. */
++ gcmkERR_BREAK(_AllocateLinear(
++ Command,
++ allocationSize,
++ Command->info.addressAlignment,
++ &node,
++ &address,
++ (gctPOINTER *) &commandBuffer
++ ));
++
++ /* Initialize the structure. */
++ commandBuffer->completion = gcvVACANT_BUFFER;
++ commandBuffer->node = gcmPTR_TO_UINT64(node);
++ commandBuffer->address = address + alignedHeaderSize;
++ commandBuffer->bufferOffset = alignedHeaderSize;
++ commandBuffer->size = requestedSize;
++ commandBuffer->offset = requestedSize;
++ commandBuffer->nextAllocated = gcvNULL;
++ commandBuffer->nextSubBuffer = gcvNULL;
++
++ /* Determine the data count. */
++ commandBuffer->dataCount
++ = (requestedSize + Command->info.staticTailSize)
++ / Command->info.commandAlignment;
++
++ /* Determine the location of the END command. */
++ endCommand
++ = (gctUINT8_PTR) commandBuffer
++ + alignedHeaderSize
++ + requestedSize;
++
++ /* Append an END command. */
++ gcmkERR_BREAK(gckVGCOMMAND_EndCommand(
++ Command,
++ endCommand,
++ Command->info.feBufferInt,
++ gcvNULL
++ ));
++
++ /* Set the return pointer. */
++ * CommandBuffer = commandBuffer;
++
++ /* Success. */
++ return gcvSTATUS_OK;
++ }
++ while (gcvFALSE);
++
++ /* Roll back. */
++ if (node != gcvNULL)
++ {
++ /* Free the command buffer. */
++ gcmkCHECK_STATUS(_FreeLinear(Command->kernel, node));
++ }
++
++ /* Return status. */
++ return status;
++}
++
++static gceSTATUS
++_FreeCommandBuffer(
++ IN gckVGKERNEL Kernel,
++ IN gcsCMDBUFFER_PTR CommandBuffer
++ )
++{
++ gceSTATUS status;
++
++ /* Free the buffer. */
++ status = _FreeLinear(Kernel, gcmUINT64_TO_PTR(CommandBuffer->node));
++
++ /* Return status. */
++ return status;
++}
++
++
++/******************************************************************************\
++****************************** TS Overflow Handler *****************************
++\******************************************************************************/
++
++static gceSTATUS
++_EventHandler_TSOverflow(
++ IN gckVGKERNEL Kernel
++ )
++{
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s(%d): **** TS OVERFLOW ENCOUNTERED ****\n",
++ __FUNCTION__, __LINE__
++ );
++
++ return gcvSTATUS_OK;
++}
++
++
++/******************************************************************************\
++****************************** Bus Error Handler *******************************
++\******************************************************************************/
++
++static gceSTATUS
++_EventHandler_BusError(
++ IN gckVGKERNEL Kernel
++ )
++{
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s(%d): **** BUS ERROR ENCOUNTERED ****\n",
++ __FUNCTION__, __LINE__
++ );
++
++ return gcvSTATUS_OK;
++}
++
++/******************************************************************************\
++****************************** Power Stall Handler *******************************
++\******************************************************************************/
++
++static gceSTATUS
++_EventHandler_PowerStall(
++ IN gckVGKERNEL Kernel
++ )
++{
++ /* Signal. */
++ return gckOS_Signal(
++ Kernel->os,
++ Kernel->command->powerStallSignal,
++ gcvTRUE);
++}
++
++/******************************************************************************\
++******************************** Task Routines *********************************
++\******************************************************************************/
++
++typedef gceSTATUS (* gctTASKROUTINE) (
++ gckVGCOMMAND Command,
++ gcsBLOCK_TASK_ENTRY_PTR TaskHeader
++ );
++
++static gceSTATUS
++_TaskLink(
++ gckVGCOMMAND Command,
++ gcsBLOCK_TASK_ENTRY_PTR TaskHeader
++ );
++
++static gceSTATUS
++_TaskCluster(
++ gckVGCOMMAND Command,
++ gcsBLOCK_TASK_ENTRY_PTR TaskHeader
++ );
++
++static gceSTATUS
++_TaskIncrement(
++ gckVGCOMMAND Command,
++ gcsBLOCK_TASK_ENTRY_PTR TaskHeader
++ );
++
++static gceSTATUS
++_TaskDecrement(
++ gckVGCOMMAND Command,
++ gcsBLOCK_TASK_ENTRY_PTR TaskHeader
++ );
++
++static gceSTATUS
++_TaskSignal(
++ gckVGCOMMAND Command,
++ gcsBLOCK_TASK_ENTRY_PTR TaskHeader
++ );
++
++static gceSTATUS
++_TaskLockdown(
++ gckVGCOMMAND Command,
++ gcsBLOCK_TASK_ENTRY_PTR TaskHeader
++ );
++
++static gceSTATUS
++_TaskUnlockVideoMemory(
++ gckVGCOMMAND Command,
++ gcsBLOCK_TASK_ENTRY_PTR TaskHeader
++ );
++
++static gceSTATUS
++_TaskFreeVideoMemory(
++ gckVGCOMMAND Command,
++ gcsBLOCK_TASK_ENTRY_PTR TaskHeader
++ );
++
++static gceSTATUS
++_TaskFreeContiguousMemory(
++ gckVGCOMMAND Command,
++ gcsBLOCK_TASK_ENTRY_PTR TaskHeader
++ );
++
++static gceSTATUS
++_TaskUnmapUserMemory(
++ gckVGCOMMAND Command,
++ gcsBLOCK_TASK_ENTRY_PTR TaskHeader
++ );
++
++static gctTASKROUTINE _taskRoutine[] =
++{
++ _TaskLink, /* gcvTASK_LINK */
++ _TaskCluster, /* gcvTASK_CLUSTER */
++ _TaskIncrement, /* gcvTASK_INCREMENT */
++ _TaskDecrement, /* gcvTASK_DECREMENT */
++ _TaskSignal, /* gcvTASK_SIGNAL */
++ _TaskLockdown, /* gcvTASK_LOCKDOWN */
++ _TaskUnlockVideoMemory, /* gcvTASK_UNLOCK_VIDEO_MEMORY */
++ _TaskFreeVideoMemory, /* gcvTASK_FREE_VIDEO_MEMORY */
++ _TaskFreeContiguousMemory, /* gcvTASK_FREE_CONTIGUOUS_MEMORY */
++ _TaskUnmapUserMemory, /* gcvTASK_UNMAP_USER_MEMORY */
++};
++
++static gceSTATUS
++_TaskLink(
++ gckVGCOMMAND Command,
++ gcsBLOCK_TASK_ENTRY_PTR TaskHeader
++ )
++{
++ /* Cast the task pointer. */
++ gcsTASK_LINK_PTR task = (gcsTASK_LINK_PTR) TaskHeader->task;
++
++ /* Save the pointer to the container. */
++ gcsTASK_CONTAINER_PTR container = TaskHeader->container;
++
++ /* No more tasks in the list? */
++ if (task->task == gcvNULL)
++ {
++ /* Reset the entry. */
++ TaskHeader->container = gcvNULL;
++ TaskHeader->task = gcvNULL;
++ TaskHeader->link = gcvNULL;
++ }
++ else
++ {
++ /* Update the entry. */
++ TaskHeader->container = task->cotainer;
++ TaskHeader->task = task->task;
++ }
++
++ /* Decrement the task buffer reference. */
++ gcmkASSERT(container->referenceCount >= 0);
++ if (container->referenceCount == 0)
++ {
++ /* Free the container. */
++ _FreeTaskContainer(Command, container);
++ }
++
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++static gceSTATUS
++_TaskCluster(
++ gckVGCOMMAND Command,
++ gcsBLOCK_TASK_ENTRY_PTR TaskHeader
++ )
++{
++ gceSTATUS status = gcvSTATUS_OK;
++
++ /* Cast the task pointer. */
++ gcsTASK_CLUSTER_PTR cluster = (gcsTASK_CLUSTER_PTR) TaskHeader->task;
++
++ /* Get the number of tasks. */
++ gctUINT taskCount = cluster->taskCount;
++
++ /* Advance to the next task. */
++ TaskHeader->task = (gcsTASK_HEADER_PTR) (cluster + 1);
++
++ /* Perform all tasks in the cluster. */
++ while (taskCount)
++ {
++ /* Perform the current task. */
++ gcmkERR_BREAK(_taskRoutine[TaskHeader->task->id](
++ Command,
++ TaskHeader
++ ));
++
++ /* Update the task count. */
++ taskCount -= 1;
++ }
++
++ /* Return status. */
++ return status;
++}
++
++static gceSTATUS
++_TaskIncrement(
++ gckVGCOMMAND Command,
++ gcsBLOCK_TASK_ENTRY_PTR TaskHeader
++ )
++{
++ gceSTATUS status;
++
++ do
++ {
++ /* Cast the task pointer. */
++ gcsTASK_INCREMENT_PTR task = (gcsTASK_INCREMENT_PTR) TaskHeader->task;
++
++ /* Convert physical into logical address. */
++ gctUINT32_PTR logical;
++ gcmkERR_BREAK(gckOS_MapPhysical(
++ Command->os,
++ task->address,
++ gcmSIZEOF(gctUINT32),
++ (gctPOINTER *) &logical
++ ));
++
++ /* Increment data. */
++ (* logical) += 1;
++
++ /* Unmap the physical memory. */
++ gcmkERR_BREAK(gckOS_UnmapPhysical(
++ Command->os,
++ logical,
++ gcmSIZEOF(gctUINT32)
++ ));
++
++ /* Update the reference counter. */
++ TaskHeader->container->referenceCount -= 1;
++
++ /* Update the task pointer. */
++ TaskHeader->task = (gcsTASK_HEADER_PTR) (task + 1);
++ }
++ while (gcvFALSE);
++
++ /* Return status. */
++ return status;
++}
++
++static gceSTATUS
++_TaskDecrement(
++ gckVGCOMMAND Command,
++ gcsBLOCK_TASK_ENTRY_PTR TaskHeader
++ )
++{
++ gceSTATUS status;
++
++ do
++ {
++ /* Cast the task pointer. */
++ gcsTASK_DECREMENT_PTR task = (gcsTASK_DECREMENT_PTR) TaskHeader->task;
++
++ /* Convert physical into logical address. */
++ gctUINT32_PTR logical;
++ gcmkERR_BREAK(gckOS_MapPhysical(
++ Command->os,
++ task->address,
++ gcmSIZEOF(gctUINT32),
++ (gctPOINTER *) &logical
++ ));
++
++ /* Decrement data. */
++ (* logical) -= 1;
++
++ /* Unmap the physical memory. */
++ gcmkERR_BREAK(gckOS_UnmapPhysical(
++ Command->os,
++ logical,
++ gcmSIZEOF(gctUINT32)
++ ));
++
++ /* Update the reference counter. */
++ TaskHeader->container->referenceCount -= 1;
++
++ /* Update the task pointer. */
++ TaskHeader->task = (gcsTASK_HEADER_PTR) (task + 1);
++ }
++ while (gcvFALSE);
++
++ /* Return status. */
++ return status;
++}
++
++static gceSTATUS
++_TaskSignal(
++ gckVGCOMMAND Command,
++ gcsBLOCK_TASK_ENTRY_PTR TaskHeader
++ )
++{
++ gceSTATUS status;
++
++ do
++ {
++ /* Cast the task pointer. */
++ gcsTASK_SIGNAL_PTR task = (gcsTASK_SIGNAL_PTR) TaskHeader->task;
++
++
++ /* Map the signal into kernel space. */
++#ifdef __QNXNTO__
++ gcmkERR_BREAK(gckOS_UserSignal(
++ Command->os, task->signal, task->rcvid, task->coid
++ ));
++#else
++ gcmkERR_BREAK(gckOS_UserSignal(
++ Command->os, task->signal, task->process
++ ));
++#endif /* __QNXNTO__ */
++
++ /* Update the reference counter. */
++ TaskHeader->container->referenceCount -= 1;
++
++ /* Update the task pointer. */
++ TaskHeader->task = (gcsTASK_HEADER_PTR) (task + 1);
++ }
++ while (gcvFALSE);
++
++ /* Return status. */
++ return status;
++}
++
++static gceSTATUS
++_TaskLockdown(
++ gckVGCOMMAND Command,
++ gcsBLOCK_TASK_ENTRY_PTR TaskHeader
++ )
++{
++ gceSTATUS status;
++ gctUINT32_PTR userCounter = gcvNULL;
++ gctUINT32_PTR kernelCounter = gcvNULL;
++ gctSIGNAL signal = gcvNULL;
++
++ do
++ {
++ /* Cast the task pointer. */
++ gcsTASK_LOCKDOWN_PTR task = (gcsTASK_LOCKDOWN_PTR) TaskHeader->task;
++
++ /* Convert physical addresses into logical. */
++ gcmkERR_BREAK(gckOS_MapPhysical(
++ Command->os,
++ task->userCounter,
++ gcmSIZEOF(gctUINT32),
++ (gctPOINTER *) &userCounter
++ ));
++
++ gcmkERR_BREAK(gckOS_MapPhysical(
++ Command->os,
++ task->kernelCounter,
++ gcmSIZEOF(gctUINT32),
++ (gctPOINTER *) &kernelCounter
++ ));
++
++ /* Update the kernel counter. */
++ (* kernelCounter) += 1;
++
++ /* Are the counters equal? */
++ if ((* userCounter) == (* kernelCounter))
++ {
++ /* Map the signal into kernel space. */
++ gcmkERR_BREAK(gckOS_MapSignal(
++ Command->os, task->signal, task->process, &signal
++ ));
++
++ if (signal == gcvNULL)
++ {
++ /* Signal. */
++ gcmkERR_BREAK(gckOS_Signal(
++ Command->os, task->signal, gcvTRUE
++ ));
++ }
++ else
++ {
++ /* Signal. */
++ gcmkERR_BREAK(gckOS_Signal(
++ Command->os, signal, gcvTRUE
++ ));
++ }
++ }
++
++ /* Update the reference counter. */
++ TaskHeader->container->referenceCount -= 1;
++
++ /* Update the task pointer. */
++ TaskHeader->task = (gcsTASK_HEADER_PTR) (task + 1);
++ }
++ while (gcvFALSE);
++
++ /* Destroy the mapped signal. */
++ if (signal != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_DestroySignal(
++ Command->os, signal
++ ));
++ }
++
++ /* Unmap the physical memory. */
++ if (kernelCounter != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_UnmapPhysical(
++ Command->os,
++ kernelCounter,
++ gcmSIZEOF(gctUINT32)
++ ));
++ }
++
++ if (userCounter != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_UnmapPhysical(
++ Command->os,
++ userCounter,
++ gcmSIZEOF(gctUINT32)
++ ));
++ }
++
++ /* Return status. */
++ return status;
++}
++
++static gceSTATUS
++_TaskUnlockVideoMemory(
++ gckVGCOMMAND Command,
++ gcsBLOCK_TASK_ENTRY_PTR TaskHeader
++ )
++{
++ gceSTATUS status;
++
++ do
++ {
++ /* Cast the task pointer. */
++ gcsTASK_UNLOCK_VIDEO_MEMORY_PTR task
++ = (gcsTASK_UNLOCK_VIDEO_MEMORY_PTR) TaskHeader->task;
++
++ /* Unlock video memory. */
++ gcmkERR_BREAK(gckVIDMEM_Unlock(
++ Command->kernel->kernel,
++ gcmUINT64_TO_PTR(task->node),
++ gcvSURF_TYPE_UNKNOWN,
++ gcvNULL));
++
++ /* Update the reference counter. */
++ TaskHeader->container->referenceCount -= 1;
++
++ /* Update the task pointer. */
++ TaskHeader->task = (gcsTASK_HEADER_PTR) (task + 1);
++ }
++ while (gcvFALSE);
++
++ /* Return status. */
++ return status;
++}
++
++static gceSTATUS
++_TaskFreeVideoMemory(
++ gckVGCOMMAND Command,
++ gcsBLOCK_TASK_ENTRY_PTR TaskHeader
++ )
++{
++ gceSTATUS status;
++
++ do
++ {
++ /* Cast the task pointer. */
++ gcsTASK_FREE_VIDEO_MEMORY_PTR task
++ = (gcsTASK_FREE_VIDEO_MEMORY_PTR) TaskHeader->task;
++
++ /* Free video memory. */
++ gcmkERR_BREAK(gckVIDMEM_Free(gcmUINT64_TO_PTR(task->node)));
++
++ /* Update the reference counter. */
++ TaskHeader->container->referenceCount -= 1;
++
++ /* Update the task pointer. */
++ TaskHeader->task = (gcsTASK_HEADER_PTR) (task + 1);
++ }
++ while (gcvFALSE);
++
++ /* Return status. */
++ return status;
++}
++
++static gceSTATUS
++_TaskFreeContiguousMemory(
++ gckVGCOMMAND Command,
++ gcsBLOCK_TASK_ENTRY_PTR TaskHeader
++ )
++{
++ gceSTATUS status;
++
++ do
++ {
++ /* Cast the task pointer. */
++ gcsTASK_FREE_CONTIGUOUS_MEMORY_PTR task
++ = (gcsTASK_FREE_CONTIGUOUS_MEMORY_PTR) TaskHeader->task;
++
++ /* Free contiguous memory. */
++ gcmkERR_BREAK(gckOS_FreeContiguous(
++ Command->os, task->physical, task->logical, task->bytes
++ ));
++
++ /* Update the reference counter. */
++ TaskHeader->container->referenceCount -= 1;
++
++ /* Update the task pointer. */
++ TaskHeader->task = (gcsTASK_HEADER_PTR) (task + 1);
++ }
++ while (gcvFALSE);
++
++ /* Return status. */
++ return status;
++}
++
++static gceSTATUS
++_TaskUnmapUserMemory(
++ gckVGCOMMAND Command,
++ gcsBLOCK_TASK_ENTRY_PTR TaskHeader
++ )
++{
++ gceSTATUS status;
++
++ do
++ {
++ /* Cast the task pointer. */
++ gcsTASK_UNMAP_USER_MEMORY_PTR task
++ = (gcsTASK_UNMAP_USER_MEMORY_PTR) TaskHeader->task;
++
++ /* Unmap the user memory. */
++ gcmkERR_BREAK(gckOS_UnmapUserMemory(
++ Command->os, gcvCORE_VG, task->memory, task->size, task->info, task->address
++ ));
++
++ /* Update the reference counter. */
++ TaskHeader->container->referenceCount -= 1;
++
++ /* Update the task pointer. */
++ TaskHeader->task = (gcsTASK_HEADER_PTR) (task + 1);
++ }
++ while (gcvFALSE);
++
++ /* Return status. */
++ return status;
++}
++
++/******************************************************************************\
++************ Hardware Block Interrupt Handlers For Scheduled Events ************
++\******************************************************************************/
++
++static gceSTATUS
++_EventHandler_Block(
++ IN gckVGKERNEL Kernel,
++ IN gcsBLOCK_TASK_ENTRY_PTR TaskHeader,
++ IN gctBOOL ProcessAll
++ )
++{
++ gceSTATUS status, last;
++
++ gcmkHEADER_ARG("Kernel=0x%x TaskHeader=0x%x ProcessAll=0x%x", Kernel, TaskHeader, ProcessAll);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++
++ do
++ {
++ gckVGCOMMAND command;
++
++ /* Get the command buffer object. */
++ command = Kernel->command;
++
++ /* Increment the interrupt usage semaphore. */
++ gcmkERR_BREAK(gckOS_IncrementSemaphore(
++ command->os, TaskHeader->interruptSemaphore
++ ));
++
++ /* Acquire the mutex. */
++ gcmkERR_BREAK(gckOS_AcquireMutex(
++ command->os,
++ command->taskMutex,
++ gcvINFINITE
++ ));
++
++ /* Verify inputs. */
++ gcmkASSERT(TaskHeader != gcvNULL);
++ gcmkASSERT(TaskHeader->container != gcvNULL);
++ gcmkASSERT(TaskHeader->task != gcvNULL);
++ gcmkASSERT(TaskHeader->link != gcvNULL);
++
++ /* Process tasks. */
++ do
++ {
++ /* Process the current task. */
++ gcmkERR_BREAK(_taskRoutine[TaskHeader->task->id](
++ command,
++ TaskHeader
++ ));
++
++ /* Is the next task is LINK? */
++ if (TaskHeader->task->id == gcvTASK_LINK)
++ {
++ gcmkERR_BREAK(_taskRoutine[TaskHeader->task->id](
++ command,
++ TaskHeader
++ ));
++
++ /* Done. */
++ break;
++ }
++ }
++ while (ProcessAll);
++
++ /* Release the mutex. */
++ gcmkCHECK_STATUS(gckOS_ReleaseMutex(
++ command->os,
++ command->taskMutex
++ ));
++ }
++ while (gcvFALSE);
++
++ gcmkFOOTER();
++ /* Return status. */
++ return status;
++}
++
++gcmDECLARE_INTERRUPT_HANDLER(COMMAND, 0)
++{
++ gceSTATUS status, last;
++
++ gcmkHEADER_ARG("Kernel=0x%x ", Kernel);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++
++
++ do
++ {
++ gckVGCOMMAND command;
++ gcsKERNEL_QUEUE_HEADER_PTR mergeQueue;
++ gcsKERNEL_QUEUE_HEADER_PTR queueTail;
++ gcsKERNEL_CMDQUEUE_PTR entry;
++ gctUINT entryCount;
++
++ /* Get the command buffer object. */
++ command = Kernel->command;
++
++ /* Acquire the mutex. */
++ gcmkERR_BREAK(gckOS_AcquireMutex(
++ command->os,
++ command->queueMutex,
++ gcvINFINITE
++ ));
++
++ /* Get the current queue. */
++ queueTail = command->queueTail;
++
++ /* Get the current queue entry. */
++ entry = queueTail->currentEntry;
++
++ /* Get the number of entries in the queue. */
++ entryCount = queueTail->pending;
++
++ /* Process all entries. */
++ while (gcvTRUE)
++ {
++ /* Call post-execution function. */
++ status = entry->handler(Kernel, entry);
++
++ /* Failed? */
++ if (gcmkIS_ERROR(status))
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR,
++ gcvZONE_COMMAND,
++ "[%s] line %d: post action failed.\n",
++ __FUNCTION__, __LINE__
++ );
++ }
++
++ /* Executed the next buffer? */
++ if (status == gcvSTATUS_EXECUTED)
++ {
++ /* Update the queue. */
++ queueTail->pending = entryCount;
++ queueTail->currentEntry = entry;
++
++ /* Success. */
++ status = gcvSTATUS_OK;
++
++ /* Break out of the loop. */
++ break;
++ }
++
++ /* Advance to the next entry. */
++ entry += 1;
++ entryCount -= 1;
++
++ /* Last entry? */
++ if (entryCount == 0)
++ {
++ /* Reset the queue to idle. */
++ queueTail->pending = 0;
++
++ /* Get a shortcut to the queue to merge with. */
++ mergeQueue = command->mergeQueue;
++
++ /* Merge the queues if necessary. */
++ if (mergeQueue != queueTail)
++ {
++ gcmkASSERT(mergeQueue < queueTail);
++ gcmkASSERT(mergeQueue->next == queueTail);
++
++ mergeQueue->size
++ += gcmSIZEOF(gcsKERNEL_QUEUE_HEADER)
++ + queueTail->size;
++
++ mergeQueue->next = queueTail->next;
++ }
++
++ /* Advance to the next queue. */
++ queueTail = queueTail->next;
++
++ /* Did it wrap around? */
++ if (command->queue == queueTail)
++ {
++ /* Reset merge queue. */
++ command->mergeQueue = queueTail;
++ }
++
++ /* Set new queue. */
++ command->queueTail = queueTail;
++
++ /* Is the next queue scheduled? */
++ if (queueTail->pending > 0)
++ {
++ gcsCMDBUFFER_PTR commandBuffer;
++
++ /* The first entry must be a command buffer. */
++ commandBuffer = queueTail->currentEntry->commandBuffer;
++
++ /* Start the command processor. */
++ status = gckVGHARDWARE_Execute(
++ command->hardware,
++ commandBuffer->address,
++ commandBuffer->dataCount
++ );
++
++ /* Failed? */
++ if (gcmkIS_ERROR(status))
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR,
++ gcvZONE_COMMAND,
++ "[%s] line %d: failed to start the next queue.\n",
++ __FUNCTION__, __LINE__
++ );
++ }
++ }
++ else
++ {
++ status = gckVGHARDWARE_SetPowerManagementState(
++ Kernel->command->hardware, gcvPOWER_IDLE_BROADCAST
++ );
++ }
++
++ /* Break out of the loop. */
++ break;
++ }
++ }
++
++ /* Release the mutex. */
++ gcmkCHECK_STATUS(gckOS_ReleaseMutex(
++ command->os,
++ command->queueMutex
++ ));
++ }
++ while (gcvFALSE);
++
++
++ gcmkFOOTER();
++ /* Return status. */
++ return status;
++}
++
++/* Define standard block interrupt handlers. */
++gcmDEFINE_INTERRUPT_HANDLER(TESSELLATOR, 0)
++gcmDEFINE_INTERRUPT_HANDLER(VG, 0)
++gcmDEFINE_INTERRUPT_HANDLER(PIXEL, 0)
++gcmDEFINE_INTERRUPT_HANDLER(PIXEL, 1)
++gcmDEFINE_INTERRUPT_HANDLER(PIXEL, 2)
++gcmDEFINE_INTERRUPT_HANDLER(PIXEL, 3)
++gcmDEFINE_INTERRUPT_HANDLER(PIXEL, 4)
++gcmDEFINE_INTERRUPT_HANDLER(PIXEL, 5)
++gcmDEFINE_INTERRUPT_HANDLER(PIXEL, 6)
++gcmDEFINE_INTERRUPT_HANDLER(PIXEL, 7)
++gcmDEFINE_INTERRUPT_HANDLER(PIXEL, 8)
++gcmDEFINE_INTERRUPT_HANDLER(PIXEL, 9)
++
++/* The entries in the array are arranged by event priority. */
++static gcsBLOCK_INTERRUPT_HANDLER _blockHandlers[] =
++{
++ gcmDEFINE_INTERRUPT_HANDLER_ENTRY(TESSELLATOR, 0),
++ gcmDEFINE_INTERRUPT_HANDLER_ENTRY(VG, 0),
++ gcmDEFINE_INTERRUPT_HANDLER_ENTRY(PIXEL, 0),
++ gcmDEFINE_INTERRUPT_HANDLER_ENTRY(PIXEL, 1),
++ gcmDEFINE_INTERRUPT_HANDLER_ENTRY(PIXEL, 2),
++ gcmDEFINE_INTERRUPT_HANDLER_ENTRY(PIXEL, 3),
++ gcmDEFINE_INTERRUPT_HANDLER_ENTRY(PIXEL, 4),
++ gcmDEFINE_INTERRUPT_HANDLER_ENTRY(PIXEL, 5),
++ gcmDEFINE_INTERRUPT_HANDLER_ENTRY(PIXEL, 6),
++ gcmDEFINE_INTERRUPT_HANDLER_ENTRY(PIXEL, 7),
++ gcmDEFINE_INTERRUPT_HANDLER_ENTRY(PIXEL, 8),
++ gcmDEFINE_INTERRUPT_HANDLER_ENTRY(PIXEL, 9),
++ gcmDEFINE_INTERRUPT_HANDLER_ENTRY(COMMAND, 0),
++};
++
++
++/******************************************************************************\
++************************* Static Command Buffer Handlers ***********************
++\******************************************************************************/
++
++static gceSTATUS
++_UpdateStaticCommandBuffer(
++ IN gckVGKERNEL Kernel,
++ IN gcsKERNEL_CMDQUEUE_PTR Entry
++ )
++{
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ "%s(%d)\n",
++ __FUNCTION__, __LINE__
++ );
++
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++static gceSTATUS
++_ExecuteStaticCommandBuffer(
++ IN gckVGKERNEL Kernel,
++ IN gcsKERNEL_CMDQUEUE_PTR Entry
++ )
++{
++ gceSTATUS status;
++
++ do
++ {
++ gcsCMDBUFFER_PTR commandBuffer;
++
++ /* Cast the command buffer header. */
++ commandBuffer = Entry->commandBuffer;
++
++ /* Set to update the command buffer next time. */
++ Entry->handler = _UpdateStaticCommandBuffer;
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ "%s(%d): executing next buffer @ 0x%08X, data count = %d\n",
++ __FUNCTION__, __LINE__,
++ commandBuffer->address,
++ commandBuffer->dataCount
++ );
++
++ /* Start the command processor. */
++ gcmkERR_BREAK(gckVGHARDWARE_Execute(
++ Kernel->hardware,
++ commandBuffer->address,
++ commandBuffer->dataCount
++ ));
++
++ /* Success. */
++ return gcvSTATUS_EXECUTED;
++ }
++ while (gcvFALSE);
++
++ /* Return status. */
++ return status;
++}
++
++static gceSTATUS
++_UpdateLastStaticCommandBuffer(
++ IN gckVGKERNEL Kernel,
++ IN gcsKERNEL_CMDQUEUE_PTR Entry
++ )
++{
++#if gcvDEBUG || gcdFORCE_MESSAGES
++ /* Get the command buffer header. */
++ gcsCMDBUFFER_PTR commandBuffer = Entry->commandBuffer;
++
++ /* Validate the command buffer. */
++ gcmkASSERT(commandBuffer->completion != gcvNULL);
++ gcmkASSERT(commandBuffer->completion != gcvVACANT_BUFFER);
++
++#endif
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ "%s(%d): processing all tasks scheduled for FE.\n",
++ __FUNCTION__, __LINE__
++ );
++
++ /* Perform scheduled tasks. */
++ return _EventHandler_Block(
++ Kernel,
++ &Kernel->command->taskTable[gcvBLOCK_COMMAND],
++ gcvTRUE
++ );
++}
++
++static gceSTATUS
++_ExecuteLastStaticCommandBuffer(
++ IN gckVGKERNEL Kernel,
++ IN gcsKERNEL_CMDQUEUE_PTR Entry
++ )
++{
++ gceSTATUS status;
++
++ do
++ {
++ /* Cast the command buffer header. */
++ gcsCMDBUFFER_PTR commandBuffer = Entry->commandBuffer;
++
++ /* Set to update the command buffer next time. */
++ Entry->handler = _UpdateLastStaticCommandBuffer;
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ "%s(%d): executing next buffer @ 0x%08X, data count = %d\n",
++ __FUNCTION__, __LINE__,
++ commandBuffer->address,
++ commandBuffer->dataCount
++ );
++
++ /* Start the command processor. */
++ gcmkERR_BREAK(gckVGHARDWARE_Execute(
++ Kernel->hardware,
++ commandBuffer->address,
++ commandBuffer->dataCount
++ ));
++
++ /* Success. */
++ return gcvSTATUS_EXECUTED;
++ }
++ while (gcvFALSE);
++
++ /* Return status. */
++ return status;
++}
++
++
++/******************************************************************************\
++************************* Dynamic Command Buffer Handlers **********************
++\******************************************************************************/
++
++static gceSTATUS
++_UpdateDynamicCommandBuffer(
++ IN gckVGKERNEL Kernel,
++ IN gcsKERNEL_CMDQUEUE_PTR Entry
++ )
++{
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ "%s(%d)\n",
++ __FUNCTION__, __LINE__
++ );
++
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++static gceSTATUS
++_ExecuteDynamicCommandBuffer(
++ IN gckVGKERNEL Kernel,
++ IN gcsKERNEL_CMDQUEUE_PTR Entry
++ )
++{
++ gceSTATUS status;
++
++ do
++ {
++ /* Cast the command buffer header. */
++ gcsCMDBUFFER_PTR commandBuffer = Entry->commandBuffer;
++
++ /* Set to update the command buffer next time. */
++ Entry->handler = _UpdateDynamicCommandBuffer;
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ "%s(%d): executing next buffer @ 0x%08X, data count = %d\n",
++ __FUNCTION__, __LINE__,
++ commandBuffer->address,
++ commandBuffer->dataCount
++ );
++
++ /* Start the command processor. */
++ gcmkERR_BREAK(gckVGHARDWARE_Execute(
++ Kernel->hardware,
++ commandBuffer->address,
++ commandBuffer->dataCount
++ ));
++
++ /* Success. */
++ return gcvSTATUS_EXECUTED;
++ }
++ while (gcvFALSE);
++
++ /* Return status. */
++ return status;
++}
++
++static gceSTATUS
++_UpdateLastDynamicCommandBuffer(
++ IN gckVGKERNEL Kernel,
++ IN gcsKERNEL_CMDQUEUE_PTR Entry
++ )
++{
++#if gcvDEBUG || gcdFORCE_MESSAGES
++ /* Get the command buffer header. */
++ gcsCMDBUFFER_PTR commandBuffer = Entry->commandBuffer;
++
++ /* Validate the command buffer. */
++ gcmkASSERT(commandBuffer->completion != gcvNULL);
++ gcmkASSERT(commandBuffer->completion != gcvVACANT_BUFFER);
++
++#endif
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ "%s(%d): processing all tasks scheduled for FE.\n",
++ __FUNCTION__, __LINE__
++ );
++
++ /* Perform scheduled tasks. */
++ return _EventHandler_Block(
++ Kernel,
++ &Kernel->command->taskTable[gcvBLOCK_COMMAND],
++ gcvTRUE
++ );
++}
++
++static gceSTATUS
++_ExecuteLastDynamicCommandBuffer(
++ IN gckVGKERNEL Kernel,
++ IN gcsKERNEL_CMDQUEUE_PTR Entry
++ )
++{
++ gceSTATUS status;
++
++ do
++ {
++ /* Cast the command buffer header. */
++ gcsCMDBUFFER_PTR commandBuffer = Entry->commandBuffer;
++
++ /* Set to update the command buffer next time. */
++ Entry->handler = _UpdateLastDynamicCommandBuffer;
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ "%s(%d): executing next buffer @ 0x%08X, data count = %d\n",
++ __FUNCTION__, __LINE__,
++ commandBuffer->address,
++ commandBuffer->dataCount
++ );
++
++ /* Start the command processor. */
++ gcmkERR_BREAK(gckVGHARDWARE_Execute(
++ Kernel->hardware,
++ commandBuffer->address,
++ commandBuffer->dataCount
++ ));
++
++ /* Success. */
++ return gcvSTATUS_EXECUTED;
++ }
++ while (gcvFALSE);
++
++ /* Return status. */
++ return status;
++}
++
++
++/******************************************************************************\
++********************************* Other Handlers *******************************
++\******************************************************************************/
++
++static gceSTATUS
++_FreeKernelCommandBuffer(
++ IN gckVGKERNEL Kernel,
++ IN gcsKERNEL_CMDQUEUE_PTR Entry
++ )
++{
++ gceSTATUS status;
++
++ /* Free the command buffer. */
++ status = _FreeCommandBuffer(Kernel, Entry->commandBuffer);
++
++ /* Return status. */
++ return status;
++}
++
++
++/******************************************************************************\
++******************************* Queue Management *******************************
++\******************************************************************************/
++
++#if gcvDUMP_COMMAND_BUFFER
++static void
++_DumpCommandQueue(
++ IN gckVGCOMMAND Command,
++ IN gcsKERNEL_QUEUE_HEADER_PTR QueueHeader,
++ IN gctUINT EntryCount
++ )
++{
++ gcsKERNEL_CMDQUEUE_PTR entry;
++ gctUINT queueIndex;
++
++#if defined(gcvCOMMAND_BUFFER_NAME)
++ static gctUINT arrayCount = 0;
++#endif
++
++ /* Is dumpinng enabled? */
++ if (!Commad->enableDumping)
++ {
++ return;
++ }
++
++#if !defined(gcvCOMMAND_BUFFER_NAME)
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_COMMAND,
++ "COMMAND QUEUE DUMP: %d entries\n", EntryCount
++ );
++#endif
++
++ /* Get the pointer to the first entry. */
++ entry = QueueHeader->currentEntry;
++
++ /* Iterate through the queue. */
++ for (queueIndex = 0; queueIndex < EntryCount; queueIndex += 1)
++ {
++ gcsCMDBUFFER_PTR buffer;
++ gctUINT bufferCount;
++ gctUINT bufferIndex;
++ gctUINT i, count;
++ gctUINT size;
++ gctUINT32_PTR data;
++
++#if gcvDUMP_COMMAND_LINES
++ gctUINT lineNumber;
++#endif
++
++#if !defined(gcvCOMMAND_BUFFER_NAME)
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_COMMAND,
++ "ENTRY %d\n", queueIndex
++ );
++#endif
++
++ /* Reset the count. */
++ bufferCount = 0;
++
++ /* Set the initial buffer. */
++ buffer = entry->commandBuffer;
++
++ /* Loop through all subbuffers. */
++ while (buffer)
++ {
++ /* Update the count. */
++ bufferCount += 1;
++
++ /* Advance to the next subbuffer. */
++ buffer = buffer->nextSubBuffer;
++ }
++
++#if !defined(gcvCOMMAND_BUFFER_NAME)
++ if (bufferCount > 1)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO,
++ gcvZONE_COMMAND,
++ " COMMAND BUFFER SET: %d buffers.\n",
++ bufferCount
++ );
++ }
++#endif
++
++ /* Reset the buffer index. */
++ bufferIndex = 0;
++
++ /* Set the initial buffer. */
++ buffer = entry->commandBuffer;
++
++ /* Loop through all subbuffers. */
++ while (buffer)
++ {
++ /* Determine the size of the buffer. */
++ size = buffer->dataCount * Command->info.commandAlignment;
++
++#if !defined(gcvCOMMAND_BUFFER_NAME)
++ /* A single buffer? */
++ if (bufferCount == 1)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO,
++ gcvZONE_COMMAND,
++ " COMMAND BUFFER: count=%d (0x%X), size=%d bytes @ %08X.\n",
++ buffer->dataCount,
++ buffer->dataCount,
++ size,
++ buffer->address
++ );
++ }
++ else
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO,
++ gcvZONE_COMMAND,
++ " COMMAND BUFFER %d: count=%d (0x%X), size=%d bytes @ %08X\n",
++ bufferIndex,
++ buffer->dataCount,
++ buffer->dataCount,
++ size,
++ buffer->address
++ );
++ }
++#endif
++
++ /* Determine the number of double words to print. */
++ count = size / 4;
++
++ /* Determine the buffer location. */
++ data = (gctUINT32_PTR)
++ (
++ (gctUINT8_PTR) buffer + buffer->bufferOffset
++ );
++
++#if defined(gcvCOMMAND_BUFFER_NAME)
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO,
++ gcvZONE_COMMAND,
++ "unsigned int _" gcvCOMMAND_BUFFER_NAME "_%d[] =\n",
++ arrayCount
++ );
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO,
++ gcvZONE_COMMAND,
++ "{\n"
++ );
++
++ arrayCount += 1;
++#endif
++
++#if gcvDUMP_COMMAND_LINES
++ /* Reset the line number. */
++ lineNumber = 0;
++#endif
++
++#if defined(gcvCOMMAND_BUFFER_NAME)
++ count -= 2;
++#endif
++
++ for (i = 0; i < count; i += 1)
++ {
++ if ((i % 8) == 0)
++ {
++#if defined(gcvCOMMAND_BUFFER_NAME)
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_COMMAND, "\t");
++#else
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_COMMAND, " ");
++#endif
++ }
++
++#if gcvDUMP_COMMAND_LINES
++ if (lineNumber == gcvDUMP_COMMAND_LINES)
++ {
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_COMMAND, " . . . . . . . . .\n");
++ break;
++ }
++#endif
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_COMMAND, "0x%08X", data[i]);
++
++ if (i + 1 == count)
++ {
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_COMMAND, "\n");
++
++#if gcvDUMP_COMMAND_LINES
++ lineNumber += 1;
++#endif
++ }
++ else
++ {
++ if (((i + 1) % 8) == 0)
++ {
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_COMMAND, ",\n");
++
++#if gcvDUMP_COMMAND_LINES
++ lineNumber += 1;
++#endif
++ }
++ else
++ {
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_COMMAND, ", ");
++ }
++ }
++ }
++
++#if defined(gcvCOMMAND_BUFFER_NAME)
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO,
++ gcvZONE_COMMAND,
++ "};\n\n"
++ );
++#endif
++
++ /* Advance to the next subbuffer. */
++ buffer = buffer->nextSubBuffer;
++ bufferIndex += 1;
++ }
++
++ /* Advance to the next entry. */
++ entry += 1;
++ }
++}
++#endif
++
++static gceSTATUS
++_LockCurrentQueue(
++ IN gckVGCOMMAND Command,
++ OUT gcsKERNEL_CMDQUEUE_PTR * Entries,
++ OUT gctUINT_PTR EntryCount
++ )
++{
++ gceSTATUS status;
++
++ do
++ {
++ gcsKERNEL_QUEUE_HEADER_PTR queueHead;
++
++ /* Get a shortcut to the head of the queue. */
++ queueHead = Command->queueHead;
++
++ /* Is the head buffer still being worked on? */
++ if (queueHead->pending)
++ {
++ /* Increment overflow count. */
++ Command->queueOverflow += 1;
++
++ /* Wait until the head becomes idle. */
++ gcmkERR_BREAK(_WaitForIdle(Command, queueHead));
++ }
++
++ /* Acquire the mutex. */
++ gcmkERR_BREAK(gckOS_AcquireMutex(
++ Command->os,
++ Command->queueMutex,
++ gcvINFINITE
++ ));
++
++ /* Determine the first queue entry. */
++ queueHead->currentEntry = (gcsKERNEL_CMDQUEUE_PTR)
++ (
++ (gctUINT8_PTR) queueHead + gcmSIZEOF(gcsKERNEL_QUEUE_HEADER)
++ );
++
++ /* Set the pointer to the first entry. */
++ * Entries = queueHead->currentEntry;
++
++ /* Determine the number of available entries. */
++ * EntryCount = queueHead->size / gcmSIZEOF(gcsKERNEL_CMDQUEUE);
++
++ /* Success. */
++ return gcvSTATUS_OK;
++ }
++ while (gcvFALSE);
++
++ /* Return status. */
++ return status;
++}
++
++static gceSTATUS
++_UnlockCurrentQueue(
++ IN gckVGCOMMAND Command,
++ IN gctUINT EntryCount
++ )
++{
++ gceSTATUS status;
++
++ do
++ {
++#if !gcdENABLE_INFINITE_SPEED_HW
++ gcsKERNEL_QUEUE_HEADER_PTR queueTail;
++ gcsKERNEL_QUEUE_HEADER_PTR queueHead;
++ gcsKERNEL_QUEUE_HEADER_PTR queueNext;
++ gctUINT queueSize;
++ gctUINT newSize;
++ gctUINT unusedSize;
++
++ /* Get shortcut to the head and to the tail of the queue. */
++ queueTail = Command->queueTail;
++ queueHead = Command->queueHead;
++
++ /* Dump the command buffer. */
++#if gcvDUMP_COMMAND_BUFFER
++ _DumpCommandQueue(Command, queueHead, EntryCount);
++#endif
++
++ /* Get a shortcut to the current queue size. */
++ queueSize = queueHead->size;
++
++ /* Determine the new queue size. */
++ newSize = EntryCount * gcmSIZEOF(gcsKERNEL_CMDQUEUE);
++ gcmkASSERT(newSize <= queueSize);
++
++ /* Determine the size of the unused area. */
++ unusedSize = queueSize - newSize;
++
++ /* Is the unused area big enough to become a buffer? */
++ if (unusedSize >= gcvMINUMUM_BUFFER)
++ {
++ gcsKERNEL_QUEUE_HEADER_PTR nextHead;
++
++ /* Place the new header. */
++ nextHead = (gcsKERNEL_QUEUE_HEADER_PTR)
++ (
++ (gctUINT8_PTR) queueHead
++ + gcmSIZEOF(gcsKERNEL_QUEUE_HEADER)
++ + newSize
++ );
++
++ /* Initialize the buffer. */
++ nextHead->size = unusedSize - gcmSIZEOF(gcsKERNEL_QUEUE_HEADER);
++ nextHead->pending = 0;
++
++ /* Link the buffer in. */
++ nextHead->next = queueHead->next;
++ queueHead->next = nextHead;
++ queueNext = nextHead;
++
++ /* Update the size of the current buffer. */
++ queueHead->size = newSize;
++ }
++
++ /* Not big enough. */
++ else
++ {
++ /* Determine the next queue. */
++ queueNext = queueHead->next;
++ }
++
++ /* Mark the buffer as busy. */
++ queueHead->pending = EntryCount;
++
++ /* Advance to the next buffer. */
++ Command->queueHead = queueNext;
++
++ /* Start the command processor if the queue was empty. */
++ if (queueTail == queueHead)
++ {
++ gcsCMDBUFFER_PTR commandBuffer;
++
++ /* The first entry must be a command buffer. */
++ commandBuffer = queueTail->currentEntry->commandBuffer;
++
++ /* Start the command processor. */
++ gcmkERR_BREAK(gckVGHARDWARE_Execute(
++ Command->hardware,
++ commandBuffer->address,
++ commandBuffer->dataCount
++ ));
++ }
++
++ /* The queue was not empty. */
++ else
++ {
++ /* Advance the merge buffer if needed. */
++ if (queueHead == Command->mergeQueue)
++ {
++ Command->mergeQueue = queueNext;
++ }
++ }
++#endif
++
++ /* Release the mutex. */
++ gcmkERR_BREAK(gckOS_ReleaseMutex(
++ Command->os,
++ Command->queueMutex
++ ));
++
++ /* Success. */
++ return gcvSTATUS_OK;
++ }
++ while (gcvFALSE);
++
++ /* Return status. */
++ return status;
++}
++
++
++
++/******************************************************************************\
++****************************** gckVGCOMMAND API Code *****************************
++\******************************************************************************/
++gceSTATUS
++gckVGCOMMAND_Construct(
++ IN gckVGKERNEL Kernel,
++ IN gctUINT TaskGranularity,
++ IN gctUINT QueueSize,
++ OUT gckVGCOMMAND * Command
++ )
++{
++ gceSTATUS status, last;
++ gckVGCOMMAND command = gcvNULL;
++ gcsKERNEL_QUEUE_HEADER_PTR queue;
++ gctUINT i, j;
++
++ gcmkHEADER_ARG("Kernel=0x%x TaskGranularity=0x%x QueueSize=0x%x Command=0x%x",
++ Kernel, TaskGranularity, QueueSize, Command);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(QueueSize >= gcvMINUMUM_BUFFER);
++ gcmkVERIFY_ARGUMENT(Command != gcvNULL);
++
++ do
++ {
++ /***********************************************************************
++ ** Generic object initialization.
++ */
++
++ /* Allocate the gckVGCOMMAND structure. */
++ gcmkERR_BREAK(gckOS_Allocate(
++ Kernel->os,
++ gcmSIZEOF(struct _gckVGCOMMAND),
++ (gctPOINTER *) &command
++ ));
++
++ /* Initialize the object. */
++ command->object.type = gcvOBJ_COMMAND;
++
++ /* Set the object pointers. */
++ command->kernel = Kernel;
++ command->os = Kernel->os;
++ command->hardware = Kernel->hardware;
++
++ /* Reset pointers. */
++ command->queue = gcvNULL;
++ command->queueMutex = gcvNULL;
++ command->taskMutex = gcvNULL;
++ command->commitMutex = gcvNULL;
++
++ command->powerStallBuffer = gcvNULL;
++ command->powerStallSignal = gcvNULL;
++ command->powerSemaphore = gcvNULL;
++
++ /* Reset context states. */
++ command->contextCounter = 0;
++ command->currentContext = 0;
++
++ /* Enable command buffer dumping. */
++ command->enableDumping = gcvTRUE;
++
++ /* Set features. */
++ command->fe20 = Kernel->hardware->fe20;
++ command->vg20 = Kernel->hardware->vg20;
++ command->vg21 = Kernel->hardware->vg21;
++
++ /* Reset task table .*/
++ gcmkVERIFY_OK(gckOS_ZeroMemory(
++ command->taskTable, gcmSIZEOF(command->taskTable)
++ ));
++
++ /* Query command buffer attributes. */
++ gcmkERR_BREAK(gckVGCOMMAND_InitializeInfo(command));
++
++ /* Create the control mutexes. */
++ gcmkERR_BREAK(gckOS_CreateMutex(Kernel->os, &command->queueMutex));
++ gcmkERR_BREAK(gckOS_CreateMutex(Kernel->os, &command->taskMutex));
++ gcmkERR_BREAK(gckOS_CreateMutex(Kernel->os, &command->commitMutex));
++
++ /* Create the power management semaphore. */
++ gcmkERR_BREAK(gckOS_CreateSemaphore(Kernel->os,
++ &command->powerSemaphore));
++
++ gcmkERR_BREAK(gckOS_CreateSignal(Kernel->os,
++ gcvFALSE, &command->powerStallSignal));
++
++ /***********************************************************************
++ ** Command queue initialization.
++ */
++
++ /* Allocate the command queue. */
++ gcmkERR_BREAK(gckOS_Allocate(
++ Kernel->os,
++ QueueSize,
++ (gctPOINTER *) &command->queue
++ ));
++
++ /* Initialize the command queue. */
++ queue = command->queue;
++
++ queue->size = QueueSize - gcmSIZEOF(gcsKERNEL_QUEUE_HEADER);
++ queue->pending = 0;
++ queue->next = queue;
++
++ command->queueHead =
++ command->queueTail =
++ command->mergeQueue = command->queue;
++
++ command->queueOverflow = 0;
++
++
++ /***********************************************************************
++ ** Enable TS overflow interrupt.
++ */
++
++ command->info.tsOverflowInt = 0;
++ gcmkERR_BREAK(gckVGINTERRUPT_Enable(
++ Kernel->interrupt,
++ &command->info.tsOverflowInt,
++ _EventHandler_TSOverflow
++ ));
++
++ /* Mask out the interrupt. */
++ Kernel->hardware->eventMask &= ~(1 << command->info.tsOverflowInt);
++
++
++ /***********************************************************************
++ ** Enable Bus Error interrupt.
++ */
++
++ /* Hardwired to bit 31. */
++ command->busErrorInt = 31;
++
++ /* Enable the interrupt. */
++ gcmkERR_BREAK(gckVGINTERRUPT_Enable(
++ Kernel->interrupt,
++ &command->busErrorInt,
++ _EventHandler_BusError
++ ));
++
++
++ command->powerStallInt = 30;
++ /* Enable the interrupt. */
++ gcmkERR_BREAK(gckVGINTERRUPT_Enable(
++ Kernel->interrupt,
++ &command->powerStallInt,
++ _EventHandler_PowerStall
++ ));
++
++ /***********************************************************************
++ ** Task management initialization.
++ */
++
++ command->taskStorage = gcvNULL;
++ command->taskStorageGranularity = TaskGranularity;
++ command->taskStorageUsable = TaskGranularity - gcmSIZEOF(gcsTASK_STORAGE);
++
++ command->taskFreeHead = gcvNULL;
++ command->taskFreeTail = gcvNULL;
++
++ /* Enable block handlers. */
++ for (i = 0; i < gcmCOUNTOF(_blockHandlers); i += 1)
++ {
++ /* Get the target hardware block. */
++ gceBLOCK block = _blockHandlers[i].block;
++
++ /* Get the interrupt array entry. */
++ gcsBLOCK_TASK_ENTRY_PTR entry = &command->taskTable[block];
++
++ /* Determine the interrupt value index. */
++ gctUINT index = entry->interruptCount;
++
++ /* Create the block semaphore. */
++ if (entry->interruptSemaphore == gcvNULL)
++ {
++ gcmkERR_BREAK(gckOS_CreateSemaphoreVG(
++ command->os, &entry->interruptSemaphore
++ ));
++ }
++
++ /* Enable auto-detection. */
++ entry->interruptArray[index] = -1;
++
++ /* Enable interrupt for the block. */
++ gcmkERR_BREAK(gckVGINTERRUPT_Enable(
++ Kernel->interrupt,
++ &entry->interruptArray[index],
++ _blockHandlers[i].handler
++ ));
++
++ /* Update the number of registered interrupts. */
++ entry->interruptCount += 1;
++
++ /* Inrement the semaphore to allow the usage of the registered
++ interrupt. */
++ gcmkERR_BREAK(gckOS_IncrementSemaphore(
++ command->os, entry->interruptSemaphore
++ ));
++
++ }
++
++ /* Error? */
++ if (gcmkIS_ERROR(status))
++ {
++ break;
++ }
++
++ /* Get the FE interrupt. */
++ command->info.feBufferInt
++ = command->taskTable[gcvBLOCK_COMMAND].interruptArray[0];
++
++ /* Return gckVGCOMMAND object pointer. */
++ *Command = command;
++
++ gcmkFOOTER_ARG("*Command=0x%x",*Command);
++ /* Success. */
++ return gcvSTATUS_OK;
++ }
++ while (gcvFALSE);
++
++ /* Roll back. */
++ if (command != gcvNULL)
++ {
++ /* Disable block handlers. */
++ for (i = 0; i < gcvBLOCK_COUNT; i += 1)
++ {
++ /* Get the task table entry. */
++ gcsBLOCK_TASK_ENTRY_PTR entry = &command->taskTable[i];
++
++ /* Destroy the semaphore. */
++ if (entry->interruptSemaphore != gcvNULL)
++ {
++ gcmkCHECK_STATUS(gckOS_DestroySemaphore(
++ command->os, entry->interruptSemaphore
++ ));
++ }
++
++ /* Disable all enabled interrupts. */
++ for (j = 0; j < entry->interruptCount; j += 1)
++ {
++ /* Must be a valid value. */
++ gcmkASSERT(entry->interruptArray[j] >= 0);
++ gcmkASSERT(entry->interruptArray[j] <= 31);
++
++ /* Disable the interrupt. */
++ gcmkCHECK_STATUS(gckVGINTERRUPT_Disable(
++ Kernel->interrupt,
++ entry->interruptArray[j]
++ ));
++ }
++ }
++
++ /* Disable the bus error interrupt. */
++ gcmkCHECK_STATUS(gckVGINTERRUPT_Disable(
++ Kernel->interrupt,
++ command->busErrorInt
++ ));
++
++ /* Disable TS overflow interrupt. */
++ if (command->info.tsOverflowInt != -1)
++ {
++ gcmkCHECK_STATUS(gckVGINTERRUPT_Disable(
++ Kernel->interrupt,
++ command->info.tsOverflowInt
++ ));
++ }
++
++ /* Delete the commit mutex. */
++ if (command->commitMutex != gcvNULL)
++ {
++ gcmkCHECK_STATUS(gckOS_DeleteMutex(
++ Kernel->os, command->commitMutex
++ ));
++ }
++
++ /* Delete the command queue mutex. */
++ if (command->taskMutex != gcvNULL)
++ {
++ gcmkCHECK_STATUS(gckOS_DeleteMutex(
++ Kernel->os, command->taskMutex
++ ));
++ }
++
++ /* Delete the command queue mutex. */
++ if (command->queueMutex != gcvNULL)
++ {
++ gcmkCHECK_STATUS(gckOS_DeleteMutex(
++ Kernel->os, command->queueMutex
++ ));
++ }
++
++ /* Delete the command queue. */
++ if (command->queue != gcvNULL)
++ {
++ gcmkCHECK_STATUS(gckOS_Free(
++ Kernel->os, command->queue
++ ));
++ }
++
++ if (command->powerSemaphore != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_DestroySemaphore(
++ Kernel->os, command->powerSemaphore));
++ }
++
++ if (command->powerStallSignal != gcvNULL)
++ {
++ /* Create the power management semaphore. */
++ gcmkVERIFY_OK(gckOS_DestroySignal(
++ Kernel->os,
++ command->powerStallSignal));
++ }
++
++ /* Free the gckVGCOMMAND structure. */
++ gcmkCHECK_STATUS(gckOS_Free(
++ Kernel->os, command
++ ));
++ }
++
++ gcmkFOOTER();
++ /* Return the error. */
++ return status;
++}
++
++gceSTATUS
++gckVGCOMMAND_Destroy(
++ OUT gckVGCOMMAND Command
++ )
++{
++ gceSTATUS status = gcvSTATUS_OK;
++
++ gcmkHEADER_ARG("Command=0x%x", Command);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++
++ do
++ {
++ gctUINT i;
++ gcsTASK_STORAGE_PTR nextStorage;
++
++ if (Command->queueHead != gcvNULL)
++ {
++ /* Wait until the head becomes idle. */
++ gcmkERR_BREAK(_WaitForIdle(Command, Command->queueHead));
++ }
++
++ /* Disable block handlers. */
++ for (i = 0; i < gcvBLOCK_COUNT; i += 1)
++ {
++ /* Get the interrupt array entry. */
++ gcsBLOCK_TASK_ENTRY_PTR entry = &Command->taskTable[i];
++
++ /* Determine the index of the last interrupt in the array. */
++ gctINT index = entry->interruptCount - 1;
++
++ /* Destroy the semaphore. */
++ if (entry->interruptSemaphore != gcvNULL)
++ {
++ gcmkERR_BREAK(gckOS_DestroySemaphore(
++ Command->os, entry->interruptSemaphore
++ ));
++ }
++
++ /* Disable all enabled interrupts. */
++ while (index >= 0)
++ {
++ /* Must be a valid value. */
++ gcmkASSERT(entry->interruptArray[index] >= 0);
++ gcmkASSERT(entry->interruptArray[index] <= 31);
++
++ /* Disable the interrupt. */
++ gcmkERR_BREAK(gckVGINTERRUPT_Disable(
++ Command->kernel->interrupt,
++ entry->interruptArray[index]
++ ));
++
++ /* Update to the next interrupt. */
++ index -= 1;
++ entry->interruptCount -= 1;
++ }
++
++ /* Error? */
++ if (gcmkIS_ERROR(status))
++ {
++ break;
++ }
++ }
++
++ /* Error? */
++ if (gcmkIS_ERROR(status))
++ {
++ break;
++ }
++
++ /* Disable the bus error interrupt. */
++ gcmkERR_BREAK(gckVGINTERRUPT_Disable(
++ Command->kernel->interrupt,
++ Command->busErrorInt
++ ));
++
++ /* Disable TS overflow interrupt. */
++ if (Command->info.tsOverflowInt != -1)
++ {
++ gcmkERR_BREAK(gckVGINTERRUPT_Disable(
++ Command->kernel->interrupt,
++ Command->info.tsOverflowInt
++ ));
++
++ Command->info.tsOverflowInt = -1;
++ }
++
++ /* Delete the commit mutex. */
++ if (Command->commitMutex != gcvNULL)
++ {
++ gcmkERR_BREAK(gckOS_DeleteMutex(
++ Command->os, Command->commitMutex
++ ));
++
++ Command->commitMutex = gcvNULL;
++ }
++
++ /* Delete the command queue mutex. */
++ if (Command->taskMutex != gcvNULL)
++ {
++ gcmkERR_BREAK(gckOS_DeleteMutex(
++ Command->os, Command->taskMutex
++ ));
++
++ Command->taskMutex = gcvNULL;
++ }
++
++ /* Delete the command queue mutex. */
++ if (Command->queueMutex != gcvNULL)
++ {
++ gcmkERR_BREAK(gckOS_DeleteMutex(
++ Command->os, Command->queueMutex
++ ));
++
++ Command->queueMutex = gcvNULL;
++ }
++
++ if (Command->powerSemaphore != gcvNULL)
++ {
++ /* Destroy the power management semaphore. */
++ gcmkERR_BREAK(gckOS_DestroySemaphore(
++ Command->os, Command->powerSemaphore));
++ }
++
++ if (Command->powerStallSignal != gcvNULL)
++ {
++ /* Create the power management semaphore. */
++ gcmkERR_BREAK(gckOS_DestroySignal(
++ Command->os,
++ Command->powerStallSignal));
++ }
++
++ if (Command->queue != gcvNULL)
++ {
++ /* Delete the command queue. */
++ gcmkERR_BREAK(gckOS_Free(
++ Command->os, Command->queue
++ ));
++ }
++
++ /* Destroy all allocated buffers. */
++ while (Command->taskStorage)
++ {
++ /* Copy the buffer pointer. */
++ nextStorage = Command->taskStorage->next;
++
++ /* Free the current container. */
++ gcmkERR_BREAK(gckOS_Free(
++ Command->os, Command->taskStorage
++ ));
++
++ /* Advance to the next one. */
++ Command->taskStorage = nextStorage;
++ }
++
++ /* Error? */
++ if (gcmkIS_ERROR(status))
++ {
++ break;
++ }
++
++ /* Mark the object as unknown. */
++ Command->object.type = gcvOBJ_UNKNOWN;
++
++ /* Free the gckVGCOMMAND structure. */
++ gcmkERR_BREAK(gckOS_Free(Command->os, Command));
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++ }
++ while (gcvFALSE);
++
++ /* Restore the object type if failed. */
++ Command->object.type = gcvOBJ_COMMAND;
++
++ gcmkFOOTER();
++ /* Return the error. */
++ return status;
++}
++
++gceSTATUS
++gckVGCOMMAND_QueryCommandBuffer(
++ IN gckVGCOMMAND Command,
++ OUT gcsCOMMAND_BUFFER_INFO_PTR Information
++ )
++{
++ gcmkHEADER_ARG("Command=0x%x Information=0x%x", Command, Information);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++ gcmkVERIFY_ARGUMENT(Information != gcvNULL);
++
++ /* Copy the information. */
++ gcmkVERIFY_OK(gckOS_MemCopy(
++ Information, &Command->info, sizeof(gcsCOMMAND_BUFFER_INFO)
++ ));
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckVGCOMMAND_Allocate(
++ IN gckVGCOMMAND Command,
++ IN gctSIZE_T Size,
++ OUT gcsCMDBUFFER_PTR * CommandBuffer,
++ OUT gctPOINTER * Data
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Command=0x%x Size=0x%x CommandBuffer=0x%x Data=0x%x",
++ Command, Size, CommandBuffer, Data);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++ gcmkVERIFY_ARGUMENT(Data != gcvNULL);
++
++ do
++ {
++ /* Allocate the buffer. */
++ gcmkERR_BREAK(_AllocateCommandBuffer(Command, Size, CommandBuffer));
++
++ /* Determine the data pointer. */
++ * Data = (gctUINT8_PTR) (*CommandBuffer) + (* CommandBuffer)->bufferOffset;
++ }
++ while (gcvFALSE);
++
++ gcmkFOOTER();
++ /* Return status. */
++ return status;
++}
++
++gceSTATUS
++gckVGCOMMAND_Free(
++ IN gckVGCOMMAND Command,
++ IN gcsCMDBUFFER_PTR CommandBuffer
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Command=0x%x CommandBuffer=0x%x",
++ Command, CommandBuffer);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++ gcmkVERIFY_ARGUMENT(CommandBuffer != gcvNULL);
++
++ /* Free command buffer. */
++ status = _FreeCommandBuffer(Command->kernel, CommandBuffer);
++
++ gcmkFOOTER();
++ /* Return status. */
++ return status;
++}
++
++gceSTATUS
++gckVGCOMMAND_Execute(
++ IN gckVGCOMMAND Command,
++ IN gcsCMDBUFFER_PTR CommandBuffer
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Command=0x%x CommandBuffer=0x%x",
++ Command, CommandBuffer);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++ gcmkVERIFY_ARGUMENT(CommandBuffer != gcvNULL);
++
++ do
++ {
++ gctUINT queueLength;
++ gcsKERNEL_CMDQUEUE_PTR kernelEntry;
++
++ /* Lock the current queue. */
++ gcmkERR_BREAK(_LockCurrentQueue(
++ Command, &kernelEntry, &queueLength
++ ));
++
++ /* Set the buffer. */
++ kernelEntry->commandBuffer = CommandBuffer;
++ kernelEntry->handler = _FreeKernelCommandBuffer;
++
++ /* Lock the current queue. */
++ gcmkERR_BREAK(_UnlockCurrentQueue(
++ Command, 1
++ ));
++ }
++ while (gcvFALSE);
++
++ gcmkFOOTER();
++ /* Return status. */
++ return status;
++}
++
++gceSTATUS
++gckVGCOMMAND_Commit(
++ IN gckVGCOMMAND Command,
++ IN gcsVGCONTEXT_PTR Context,
++ IN gcsVGCMDQUEUE_PTR Queue,
++ IN gctUINT EntryCount,
++ IN gcsTASK_MASTER_TABLE_PTR TaskTable
++ )
++{
++ /*
++ The first buffer is executed through a direct gckVGHARDWARE_Execute call,
++ therefore only an update is needed after the execution is over. All
++ consequent buffers need to be executed upon the first update call from
++ the FE interrupt handler.
++ */
++
++ static gcsQUEUE_UPDATE_CONTROL _dynamicBuffer[] =
++ {
++ {
++ _UpdateDynamicCommandBuffer,
++ _UpdateDynamicCommandBuffer,
++ _UpdateLastDynamicCommandBuffer,
++ _UpdateLastDynamicCommandBuffer
++ },
++ {
++ _ExecuteDynamicCommandBuffer,
++ _UpdateDynamicCommandBuffer,
++ _ExecuteLastDynamicCommandBuffer,
++ _UpdateLastDynamicCommandBuffer
++ }
++ };
++
++ static gcsQUEUE_UPDATE_CONTROL _staticBuffer[] =
++ {
++ {
++ _UpdateStaticCommandBuffer,
++ _UpdateStaticCommandBuffer,
++ _UpdateLastStaticCommandBuffer,
++ _UpdateLastStaticCommandBuffer
++ },
++ {
++ _ExecuteStaticCommandBuffer,
++ _UpdateStaticCommandBuffer,
++ _ExecuteLastStaticCommandBuffer,
++ _UpdateLastStaticCommandBuffer
++ }
++ };
++
++ gceSTATUS status, last;
++
++ gcmkHEADER_ARG("Command=0x%x Context=0x%x Queue=0x%x EntryCount=0x%x TaskTable=0x%x",
++ Command, Context, Queue, EntryCount, TaskTable);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++ gcmkVERIFY_ARGUMENT(Context != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Queue != gcvNULL);
++ gcmkVERIFY_ARGUMENT(EntryCount > 1);
++
++#ifdef __QNXNTO__
++ TaskTable->coid = Context->coid;
++ TaskTable->rcvid = Context->rcvid;
++#endif /* __QNXNTO__ */
++
++ do
++ {
++ gctBOOL haveFETasks;
++ gctUINT queueSize;
++ gcsVGCMDQUEUE_PTR mappedQueue;
++ gcsVGCMDQUEUE_PTR userEntry;
++ gcsKERNEL_CMDQUEUE_PTR kernelEntry;
++ gcsQUEUE_UPDATE_CONTROL_PTR queueControl;
++ gctUINT currentLength;
++ gctUINT queueLength;
++ gctUINT entriesQueued;
++ gctUINT8_PTR previousEnd;
++ gctBOOL previousDynamic;
++ gctBOOL previousExecuted;
++ gctUINT controlIndex;
++
++ gcmkERR_BREAK(gckVGHARDWARE_SetPowerManagementState(
++ Command->hardware, gcvPOWER_ON_AUTO
++ ));
++
++ /* Acquire the power semaphore. */
++ gcmkERR_BREAK(gckOS_AcquireSemaphore(
++ Command->os, Command->powerSemaphore
++ ));
++
++ /* Acquire the mutex. */
++ status = gckOS_AcquireMutex(
++ Command->os,
++ Command->commitMutex,
++ gcvINFINITE
++ );
++
++ if (gcmIS_ERROR(status))
++ {
++ gcmkVERIFY_OK(gckOS_ReleaseSemaphore(
++ Command->os, Command->powerSemaphore));
++ break;
++ }
++
++ do
++ {
++ gcmkERR_BREAK(_FlushMMU(Command));
++
++ /* Assign a context ID if not yet assigned. */
++ if (Context->id == 0)
++ {
++ /* Assign the next context number. */
++ Context->id = ++ Command->contextCounter;
++
++ /* See if we overflowed. */
++ if (Command->contextCounter == 0)
++ {
++ /* We actually did overflow, wow... */
++ status = gcvSTATUS_OUT_OF_RESOURCES;
++ break;
++ }
++ }
++
++ /* The first entry in the queue is always the context buffer.
++ Verify whether the user context is the same as the current
++ context and if that's the case, skip the first entry. */
++ if (Context->id == Command->currentContext)
++ {
++ /* Same context as before, skip the first entry. */
++ EntryCount -= 1;
++ Queue += 1;
++
++ /* Set the signal to avoid user waiting. */
++#ifdef __QNXNTO__
++ gcmkERR_BREAK(gckOS_UserSignal(
++ Command->os, Context->signal, Context->rcvid, Context->coid
++ ));
++#else
++ gcmkERR_BREAK(gckOS_UserSignal(
++ Command->os, Context->signal, Context->process
++ ));
++
++#endif /* __QNXNTO__ */
++
++ }
++ else
++ {
++ /* Different user context - keep the first entry.
++ Set the user context as the current one. */
++ Command->currentContext = Context->id;
++ }
++
++ /* Reset pointers. */
++ queueControl = gcvNULL;
++ previousEnd = gcvNULL;
++
++ /* Determine whether there are FE tasks to be performed. */
++ haveFETasks = (TaskTable->table[gcvBLOCK_COMMAND].head != gcvNULL);
++
++ /* Determine the size of the queue. */
++ queueSize = EntryCount * gcmSIZEOF(gcsVGCMDQUEUE);
++
++ /* Map the command queue into the kernel space. */
++ gcmkERR_BREAK(gckOS_MapUserPointer(
++ Command->os,
++ Queue,
++ queueSize,
++ (gctPOINTER *) &mappedQueue
++ ));
++
++ /* Set the first entry. */
++ userEntry = mappedQueue;
++
++ /* Process the command queue. */
++ while (EntryCount)
++ {
++ /* Lock the current queue. */
++ gcmkERR_BREAK(_LockCurrentQueue(
++ Command, &kernelEntry, &queueLength
++ ));
++
++ /* Determine the number of entries to process. */
++ currentLength = (queueLength < EntryCount)
++ ? queueLength
++ : EntryCount;
++
++ /* Update the number of the entries left to process. */
++ EntryCount -= currentLength;
++
++ /* Reset previous flags. */
++ previousDynamic = gcvFALSE;
++ previousExecuted = gcvFALSE;
++
++ /* Set the initial control index. */
++ controlIndex = 0;
++
++ /* Process entries. */
++ for (entriesQueued = 0; entriesQueued < currentLength; entriesQueued += 1)
++ {
++ /* Get the kernel pointer to the command buffer header. */
++ gcsCMDBUFFER_PTR commandBuffer = gcvNULL;
++ gcmkERR_BREAK(_ConvertUserCommandBufferPointer(
++ Command,
++ userEntry->commandBuffer,
++ &commandBuffer
++ ));
++
++ /* Is it a dynamic command buffer? */
++ if (userEntry->dynamic)
++ {
++ /* Select dynamic buffer control functions. */
++ queueControl = &_dynamicBuffer[controlIndex];
++ }
++
++ /* No, a static command buffer. */
++ else
++ {
++ /* Select static buffer control functions. */
++ queueControl = &_staticBuffer[controlIndex];
++ }
++
++ /* Set the command buffer pointer to the entry. */
++ kernelEntry->commandBuffer = commandBuffer;
++
++ /* If the previous entry was a dynamic command buffer,
++ link it to the current. */
++ if (previousDynamic)
++ {
++ gcmkERR_BREAK(gckVGCOMMAND_FetchCommand(
++ Command,
++ previousEnd,
++ commandBuffer->address,
++ commandBuffer->dataCount,
++ gcvNULL
++ ));
++
++ /* The buffer will be auto-executed, only need to
++ update it after it has been executed. */
++ kernelEntry->handler = queueControl->update;
++
++ /* The buffer is only being updated. */
++ previousExecuted = gcvFALSE;
++ }
++ else
++ {
++ /* Set the buffer up for execution. */
++ kernelEntry->handler = queueControl->execute;
++
++ /* The buffer is being updated. */
++ previousExecuted = gcvTRUE;
++ }
++
++ /* The current buffer's END command becomes the last END. */
++ previousEnd
++ = ((gctUINT8_PTR) commandBuffer)
++ + commandBuffer->bufferOffset
++ + commandBuffer->dataCount * Command->info.commandAlignment
++ - Command->info.staticTailSize;
++
++ /* Update the last entry info. */
++ previousDynamic = userEntry->dynamic;
++
++ /* Advance entries. */
++ userEntry += 1;
++ kernelEntry += 1;
++
++ /* Update the control index. */
++ controlIndex = 1;
++ }
++
++ /* If the previous entry was a dynamic command buffer,
++ terminate it with an END. */
++ if (previousDynamic)
++ {
++ gcmkERR_BREAK(gckVGCOMMAND_EndCommand(
++ Command,
++ previousEnd,
++ Command->info.feBufferInt,
++ gcvNULL
++ ));
++ }
++
++ /* Last buffer? */
++ if (EntryCount == 0)
++ {
++ /* Modify the last command buffer's routines to handle
++ tasks if any.*/
++ if (haveFETasks)
++ {
++ if (previousExecuted)
++ {
++ kernelEntry[-1].handler = queueControl->lastExecute;
++ }
++ else
++ {
++ kernelEntry[-1].handler = queueControl->lastUpdate;
++ }
++ }
++
++ /* Release the mutex. */
++ gcmkERR_BREAK(gckOS_ReleaseMutex(
++ Command->os,
++ Command->queueMutex
++ ));
++ /* Schedule tasks. */
++ gcmkERR_BREAK(_ScheduleTasks(Command, TaskTable, previousEnd));
++
++ /* Acquire the mutex. */
++ gcmkERR_BREAK(gckOS_AcquireMutex(
++ Command->os,
++ Command->queueMutex,
++ gcvINFINITE
++ ));
++ }
++
++ /* Unkock and schedule the current queue for execution. */
++ gcmkERR_BREAK(_UnlockCurrentQueue(
++ Command, currentLength
++ ));
++ }
++
++
++ /* Unmap the user command buffer. */
++ gcmkERR_BREAK(gckOS_UnmapUserPointer(
++ Command->os,
++ Queue,
++ queueSize,
++ mappedQueue
++ ));
++ }
++ while (gcvFALSE);
++
++ /* Release the mutex. */
++ gcmkCHECK_STATUS(gckOS_ReleaseMutex(
++ Command->os,
++ Command->commitMutex
++ ));
++
++ gcmkVERIFY_OK(gckOS_ReleaseSemaphore(
++ Command->os, Command->powerSemaphore));
++ }
++ while (gcvFALSE);
++
++ gcmkFOOTER();
++ /* Return status. */
++ return status;
++}
++
++#endif /* gcdENABLE_VG */
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel_db.c linux-openelec/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel_db.c
+--- linux-3.14.36/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel_db.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel_db.c 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,1604 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal_kernel_precomp.h"
++
++#define _GC_OBJ_ZONE gcvZONE_DATABASE
++
++/*******************************************************************************
++***** Private fuctions ********************************************************/
++
++#define _GetSlot(database, x) \
++ (gctUINT32)(((gcmPTR_TO_UINT64(x) >> 7) % gcmCOUNTOF(database->list)))
++
++/*******************************************************************************
++** gckKERNEL_NewDatabase
++**
++** Create a new database structure and insert it to the head of the hash list.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to a gckKERNEL object.
++**
++** gctUINT32 ProcessID
++** ProcessID that identifies the database.
++**
++** OUTPUT:
++**
++** gcsDATABASE_PTR * Database
++** Pointer to a variable receiving the database structure pointer on
++** success.
++*/
++static gceSTATUS
++gckKERNEL_NewDatabase(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 ProcessID,
++ OUT gcsDATABASE_PTR * Database
++ )
++{
++ gceSTATUS status;
++ gcsDATABASE_PTR database;
++ gctBOOL acquired = gcvFALSE;
++ gctSIZE_T slot;
++ gcsDATABASE_PTR existingDatabase;
++
++ gcmkHEADER_ARG("Kernel=0x%x ProcessID=%d", Kernel, ProcessID);
++
++ /* Acquire the database mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(Kernel->os, Kernel->db->dbMutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++ /* Compute the hash for the database. */
++ slot = ProcessID % gcmCOUNTOF(Kernel->db->db);
++
++ /* Walk the hash list. */
++ for (existingDatabase = Kernel->db->db[slot];
++ existingDatabase != gcvNULL;
++ existingDatabase = existingDatabase->next)
++ {
++ if (existingDatabase->processID == ProcessID)
++ {
++ /* One process can't be added twice. */
++ gcmkONERROR(gcvSTATUS_NOT_SUPPORTED);
++ }
++ }
++
++ if (Kernel->db->freeDatabase != gcvNULL)
++ {
++ /* Allocate a database from the free list. */
++ database = Kernel->db->freeDatabase;
++ Kernel->db->freeDatabase = database->next;
++ }
++ else
++ {
++ gctPOINTER pointer = gcvNULL;
++
++ /* Allocate a new database from the heap. */
++ gcmkONERROR(gckOS_Allocate(Kernel->os,
++ gcmSIZEOF(gcsDATABASE),
++ &pointer));
++
++ database = pointer;
++ }
++
++ /* Insert the database into the hash. */
++ database->next = Kernel->db->db[slot];
++ Kernel->db->db[slot] = database;
++
++ /* Save the hash slot. */
++ database->slot = slot;
++
++ /* Release the database mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(Kernel->os, Kernel->db->dbMutex));
++
++ /* Return the database. */
++ *Database = database;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Database=0x%x", *Database);
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ /* Release the database mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, Kernel->db->dbMutex));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++** gckKERNEL_FindDatabase
++**
++** Find a database identified by a process ID and move it to the head of the
++** hash list.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to a gckKERNEL object.
++**
++** gctUINT32 ProcessID
++** ProcessID that identifies the database.
++**
++** gctBOOL LastProcessID
++** gcvTRUE if searching for the last known process ID. gcvFALSE if
++** we need to search for the process ID specified by the ProcessID
++** argument.
++**
++** OUTPUT:
++**
++** gcsDATABASE_PTR * Database
++** Pointer to a variable receiving the database structure pointer on
++** success.
++*/
++static gceSTATUS
++gckKERNEL_FindDatabase(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 ProcessID,
++ IN gctBOOL LastProcessID,
++ OUT gcsDATABASE_PTR * Database
++ )
++{
++ gceSTATUS status;
++ gcsDATABASE_PTR database, previous;
++ gctSIZE_T slot;
++ gctBOOL acquired = gcvFALSE;
++
++ gcmkHEADER_ARG("Kernel=0x%x ProcessID=%d LastProcessID=%d",
++ Kernel, ProcessID, LastProcessID);
++
++ /* Compute the hash for the database. */
++ slot = ProcessID % gcmCOUNTOF(Kernel->db->db);
++
++ /* Acquire the database mutex. */
++ gcmkONERROR(
++ gckOS_AcquireMutex(Kernel->os, Kernel->db->dbMutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++ /* Check whether we are getting the last known database. */
++ if (LastProcessID)
++ {
++ /* Use last database. */
++ database = Kernel->db->lastDatabase;
++
++ if (database == gcvNULL)
++ {
++ /* Database not found. */
++ gcmkONERROR(gcvSTATUS_INVALID_DATA);
++ }
++ }
++ else
++ {
++ /* Walk the hash list. */
++ for (previous = gcvNULL, database = Kernel->db->db[slot];
++ database != gcvNULL;
++ database = database->next)
++ {
++ if (database->processID == ProcessID)
++ {
++ /* Found it! */
++ break;
++ }
++
++ previous = database;
++ }
++
++ if (database == gcvNULL)
++ {
++ /* Database not found. */
++ gcmkONERROR(gcvSTATUS_INVALID_DATA);
++ }
++
++ if (previous != gcvNULL)
++ {
++ /* Move database to the head of the hash list. */
++ previous->next = database->next;
++ database->next = Kernel->db->db[slot];
++ Kernel->db->db[slot] = database;
++ }
++ }
++
++ /* Release the database mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(Kernel->os, Kernel->db->dbMutex));
++
++ /* Return the database. */
++ *Database = database;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Database=0x%x", *Database);
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ /* Release the database mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, Kernel->db->dbMutex));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++** gckKERNEL_DeleteDatabase
++**
++** Remove a database from the hash list and delete its structure.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to a gckKERNEL object.
++**
++** gcsDATABASE_PTR Database
++** Pointer to the database structure to remove.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++static gceSTATUS
++gckKERNEL_DeleteDatabase(
++ IN gckKERNEL Kernel,
++ IN gcsDATABASE_PTR Database
++ )
++{
++ gceSTATUS status;
++ gctBOOL acquired = gcvFALSE;
++ gcsDATABASE_PTR database;
++
++ gcmkHEADER_ARG("Kernel=0x%x Database=0x%x", Kernel, Database);
++
++ /* Acquire the database mutex. */
++ gcmkONERROR(
++ gckOS_AcquireMutex(Kernel->os, Kernel->db->dbMutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++ /* Check slot value. */
++ gcmkVERIFY_ARGUMENT(Database->slot < gcmCOUNTOF(Kernel->db->db));
++
++ if (Database->slot < gcmCOUNTOF(Kernel->db->db))
++ {
++ /* Check if database if the head of the hash list. */
++ if (Kernel->db->db[Database->slot] == Database)
++ {
++ /* Remove the database from the hash list. */
++ Kernel->db->db[Database->slot] = Database->next;
++ }
++ else
++ {
++ /* Walk the has list to find the database. */
++ for (database = Kernel->db->db[Database->slot];
++ database != gcvNULL;
++ database = database->next
++ )
++ {
++ /* Check if the next list entry is this database. */
++ if (database->next == Database)
++ {
++ /* Remove the database from the hash list. */
++ database->next = Database->next;
++ break;
++ }
++ }
++
++ if (database == gcvNULL)
++ {
++ /* Ouch! Something got corrupted. */
++ gcmkONERROR(gcvSTATUS_INVALID_DATA);
++ }
++ }
++ }
++
++ if (Kernel->db->lastDatabase != gcvNULL)
++ {
++ /* Insert database to the free list. */
++ Kernel->db->lastDatabase->next = Kernel->db->freeDatabase;
++ Kernel->db->freeDatabase = Kernel->db->lastDatabase;
++ }
++
++ /* Keep database as the last database. */
++ Kernel->db->lastDatabase = Database;
++
++ /* Release the database mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(Kernel->os, Kernel->db->dbMutex));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ /* Release the database mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, Kernel->db->dbMutex));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++** gckKERNEL_NewRecord
++**
++** Create a new database record structure and insert it to the head of the
++** database.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to a gckKERNEL object.
++**
++** gcsDATABASE_PTR Database
++** Pointer to a database structure.
++**
++** OUTPUT:
++**
++** gcsDATABASE_RECORD_PTR * Record
++** Pointer to a variable receiving the database record structure
++** pointer on success.
++*/
++static gceSTATUS
++gckKERNEL_NewRecord(
++ IN gckKERNEL Kernel,
++ IN gcsDATABASE_PTR Database,
++ IN gctUINT32 Slot,
++ OUT gcsDATABASE_RECORD_PTR * Record
++ )
++{
++ gceSTATUS status;
++ gctBOOL acquired = gcvFALSE;
++ gcsDATABASE_RECORD_PTR record = gcvNULL;
++
++ gcmkHEADER_ARG("Kernel=0x%x Database=0x%x", Kernel, Database);
++
++ /* Acquire the database mutex. */
++ gcmkONERROR(
++ gckOS_AcquireMutex(Kernel->os, Kernel->db->dbMutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++ if (Kernel->db->freeRecord != gcvNULL)
++ {
++ /* Allocate the record from the free list. */
++ record = Kernel->db->freeRecord;
++ Kernel->db->freeRecord = record->next;
++ }
++ else
++ {
++ gctPOINTER pointer = gcvNULL;
++
++ /* Allocate the record from the heap. */
++ gcmkONERROR(gckOS_Allocate(Kernel->os,
++ gcmSIZEOF(gcsDATABASE_RECORD),
++ &pointer));
++
++ record = pointer;
++ }
++
++ /* Insert the record in the database. */
++ record->next = Database->list[Slot];
++ Database->list[Slot] = record;
++
++ /* Release the database mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(Kernel->os, Kernel->db->dbMutex));
++
++ /* Return the record. */
++ *Record = record;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Record=0x%x", *Record);
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ /* Release the database mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, Kernel->db->dbMutex));
++ }
++ if (record != gcvNULL)
++ {
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Kernel->os, record));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++** gckKERNEL_DeleteRecord
++**
++** Remove a database record from the database and delete its structure.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to a gckKERNEL object.
++**
++** gcsDATABASE_PTR Database
++** Pointer to a database structure.
++**
++** gceDATABASE_TYPE Type
++** Type of the record to remove.
++**
++** gctPOINTER Data
++** Data of the record to remove.
++**
++** OUTPUT:
++**
++** gctSIZE_T_PTR Bytes
++** Pointer to a variable that receives the size of the record deleted.
++** Can be gcvNULL if the size is not required.
++*/
++static gceSTATUS
++gckKERNEL_DeleteRecord(
++ IN gckKERNEL Kernel,
++ IN gcsDATABASE_PTR Database,
++ IN gceDATABASE_TYPE Type,
++ IN gctPOINTER Data,
++ OUT gctSIZE_T_PTR Bytes OPTIONAL
++ )
++{
++ gceSTATUS status;
++ gctBOOL acquired = gcvFALSE;
++ gcsDATABASE_RECORD_PTR record, previous;
++ gctUINT32 slot = _GetSlot(Database, Data);
++
++ gcmkHEADER_ARG("Kernel=0x%x Database=0x%x Type=%d Data=0x%x",
++ Kernel, Database, Type, Data);
++
++ /* Acquire the database mutex. */
++ gcmkONERROR(
++ gckOS_AcquireMutex(Kernel->os, Kernel->db->dbMutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++
++ /* Scan the database for this record. */
++ for (record = Database->list[slot], previous = gcvNULL;
++ record != gcvNULL;
++ record = record->next
++ )
++ {
++ if ((record->type == Type)
++ && (record->data == Data)
++ )
++ {
++ /* Found it! */
++ break;
++ }
++
++ previous = record;
++ }
++
++ if (record == gcvNULL)
++ {
++ /* Ouch! This record is not found? */
++ gcmkONERROR(gcvSTATUS_INVALID_DATA);
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* Return size of record. */
++ *Bytes = record->bytes;
++ }
++
++ /* Remove record from database. */
++ if (previous == gcvNULL)
++ {
++ Database->list[slot] = record->next;
++ }
++ else
++ {
++ previous->next = record->next;
++ }
++
++ /* Insert record in free list. */
++ record->next = Kernel->db->freeRecord;
++ Kernel->db->freeRecord = record;
++
++ /* Release the database mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(Kernel->os, Kernel->db->dbMutex));
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Bytes=%lu", gcmOPT_VALUE(Bytes));
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ /* Release the database mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, Kernel->db->dbMutex));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++** gckKERNEL_FindRecord
++**
++** Find a database record from the database.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to a gckKERNEL object.
++**
++** gcsDATABASE_PTR Database
++** Pointer to a database structure.
++**
++** gceDATABASE_TYPE Type
++** Type of the record to remove.
++**
++** gctPOINTER Data
++** Data of the record to remove.
++**
++** OUTPUT:
++**
++** gctSIZE_T_PTR Bytes
++** Pointer to a variable that receives the size of the record deleted.
++** Can be gcvNULL if the size is not required.
++*/
++static gceSTATUS
++gckKERNEL_FindRecord(
++ IN gckKERNEL Kernel,
++ IN gcsDATABASE_PTR Database,
++ IN gceDATABASE_TYPE Type,
++ IN gctPOINTER Data,
++ OUT gcsDATABASE_RECORD_PTR Record
++ )
++{
++ gceSTATUS status;
++ gctBOOL acquired = gcvFALSE;
++ gcsDATABASE_RECORD_PTR record;
++ gctUINT32 slot = _GetSlot(Database, Data);
++
++ gcmkHEADER_ARG("Kernel=0x%x Database=0x%x Type=%d Data=0x%x",
++ Kernel, Database, Type, Data);
++
++ /* Acquire the database mutex. */
++ gcmkONERROR(
++ gckOS_AcquireMutex(Kernel->os, Kernel->db->dbMutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++ /* Scan the database for this record. */
++ for (record = Database->list[slot];
++ record != gcvNULL;
++ record = record->next
++ )
++ {
++ if ((record->type == Type)
++ && (record->data == Data)
++ )
++ {
++ /* Found it! */
++ break;
++ }
++ }
++
++ if (record == gcvNULL)
++ {
++ /* Ouch! This record is not found? */
++ gcmkONERROR(gcvSTATUS_INVALID_DATA);
++ }
++
++ if (Record != gcvNULL)
++ {
++ /* Return information of record. */
++ gcmkONERROR(
++ gckOS_MemCopy(Record, record, sizeof(gcsDATABASE_RECORD)));
++ }
++
++ /* Release the database mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(Kernel->os, Kernel->db->dbMutex));
++
++ /* Success. */
++ gcmkFOOTER_ARG("Record=0x%x", Record);
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ /* Release the database mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, Kernel->db->dbMutex));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++
++/*******************************************************************************
++***** Public API **************************************************************/
++
++/*******************************************************************************
++** gckKERNEL_CreateProcessDB
++**
++** Create a new process database.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to a gckKERNEL object.
++**
++** gctUINT32 ProcessID
++** Process ID used to identify the database.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckKERNEL_CreateProcessDB(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 ProcessID
++ )
++{
++ gceSTATUS status;
++ gcsDATABASE_PTR database = gcvNULL;
++ gctUINT32 i;
++
++ gcmkHEADER_ARG("Kernel=0x%x ProcessID=%d", Kernel, ProcessID);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++
++ /* Create a new database. */
++ gcmkONERROR(gckKERNEL_NewDatabase(Kernel, ProcessID, &database));
++
++ /* Initialize the database. */
++ database->processID = ProcessID;
++ database->vidMem.bytes = 0;
++ database->vidMem.maxBytes = 0;
++ database->vidMem.totalBytes = 0;
++ database->nonPaged.bytes = 0;
++ database->nonPaged.maxBytes = 0;
++ database->nonPaged.totalBytes = 0;
++ database->contiguous.bytes = 0;
++ database->contiguous.maxBytes = 0;
++ database->contiguous.totalBytes = 0;
++ database->mapMemory.bytes = 0;
++ database->mapMemory.maxBytes = 0;
++ database->mapMemory.totalBytes = 0;
++ database->mapUserMemory.bytes = 0;
++ database->mapUserMemory.maxBytes = 0;
++ database->mapUserMemory.totalBytes = 0;
++ database->vidMemResv.bytes = 0;
++ database->vidMemResv.maxBytes = 0;
++ database->vidMemResv.totalBytes = 0;
++ database->vidMemCont.bytes = 0;
++ database->vidMemCont.maxBytes = 0;
++ database->vidMemCont.totalBytes = 0;
++ database->vidMemVirt.bytes = 0;
++ database->vidMemVirt.maxBytes = 0;
++ database->vidMemVirt.totalBytes = 0;
++
++ for (i = 0; i < gcmCOUNTOF(database->list); i++)
++ {
++ database->list[i] = gcvNULL;
++ }
++
++#if gcdSECURE_USER
++ {
++ gctINT slot;
++ gcskSECURE_CACHE * cache = &database->cache;
++
++ /* Setup the linked list of cache nodes. */
++ for (slot = 1; slot <= gcdSECURE_CACHE_SLOTS; ++slot)
++ {
++ cache->cache[slot].logical = gcvNULL;
++
++#if gcdSECURE_CACHE_METHOD != gcdSECURE_CACHE_TABLE
++ cache->cache[slot].prev = &cache->cache[slot - 1];
++ cache->cache[slot].next = &cache->cache[slot + 1];
++# endif
++#if gcdSECURE_CACHE_METHOD == gcdSECURE_CACHE_HASH
++ cache->cache[slot].nextHash = gcvNULL;
++ cache->cache[slot].prevHash = gcvNULL;
++# endif
++ }
++
++#if gcdSECURE_CACHE_METHOD != gcdSECURE_CACHE_TABLE
++ /* Setup the head and tail of the cache. */
++ cache->cache[0].next = &cache->cache[1];
++ cache->cache[0].prev = &cache->cache[gcdSECURE_CACHE_SLOTS];
++ cache->cache[0].logical = gcvNULL;
++
++ /* Fix up the head and tail pointers. */
++ cache->cache[0].next->prev = &cache->cache[0];
++ cache->cache[0].prev->next = &cache->cache[0];
++# endif
++
++#if gcdSECURE_CACHE_METHOD == gcdSECURE_CACHE_HASH
++ /* Zero out the hash table. */
++ for (slot = 0; slot < gcmCOUNTOF(cache->hash); ++slot)
++ {
++ cache->hash[slot].logical = gcvNULL;
++ cache->hash[slot].nextHash = gcvNULL;
++ }
++# endif
++
++ /* Initialize cache index. */
++ cache->cacheIndex = gcvNULL;
++ cache->cacheFree = 1;
++ cache->cacheStamp = 0;
++ }
++#endif
++
++ /* Reset idle timer. */
++ Kernel->db->lastIdle = 0;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++** gckKERNEL_AddProcessDB
++**
++** Add a record to a process database.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to a gckKERNEL object.
++**
++** gctUINT32 ProcessID
++** Process ID used to identify the database.
++**
++** gceDATABASE_TYPE TYPE
++** Type of the record to add.
++**
++** gctPOINTER Pointer
++** Data of the record to add.
++**
++** gctPHYS_ADDR Physical
++** Physical address of the record to add.
++**
++** gctSIZE_T Size
++** Size of the record to add.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckKERNEL_AddProcessDB(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 ProcessID,
++ IN gceDATABASE_TYPE Type,
++ IN gctPOINTER Pointer,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Size
++ )
++{
++ gceSTATUS status;
++ gcsDATABASE_PTR database;
++ gcsDATABASE_RECORD_PTR record = gcvNULL;
++ gcsDATABASE_COUNTERS * count;
++
++ gcmkHEADER_ARG("Kernel=0x%x ProcessID=%d Type=%d Pointer=0x%x "
++ "Physical=0x%x Size=%lu",
++ Kernel, ProcessID, Type, Pointer, Physical, Size);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++
++ /* Special case the idle record. */
++ if (Type == gcvDB_IDLE)
++ {
++ gctUINT64 time;
++
++ /* Get the current profile time. */
++ gcmkONERROR(gckOS_GetProfileTick(&time));
++
++ if ((ProcessID == 0) && (Kernel->db->lastIdle != 0))
++ {
++ /* Out of idle, adjust time it was idle. */
++ Kernel->db->idleTime += time - Kernel->db->lastIdle;
++ Kernel->db->lastIdle = 0;
++ }
++ else if (ProcessID == 1)
++ {
++ /* Save current idle time. */
++ Kernel->db->lastIdle = time;
++ }
++
++#if gcdDYNAMIC_SPEED
++ {
++ /* Test for first call. */
++ if (Kernel->db->lastSlowdown == 0)
++ {
++ /* Save milliseconds. */
++ Kernel->db->lastSlowdown = time;
++ Kernel->db->lastSlowdownIdle = Kernel->db->idleTime;
++ }
++ else
++ {
++ /* Compute ellapsed time in milliseconds. */
++ gctUINT delta = gckOS_ProfileToMS(time - Kernel->db->lastSlowdown);
++
++ /* Test for end of period. */
++ if (delta >= gcdDYNAMIC_SPEED)
++ {
++ /* Compute number of idle milliseconds. */
++ gctUINT idle = gckOS_ProfileToMS(
++ Kernel->db->idleTime - Kernel->db->lastSlowdownIdle);
++
++ /* Broadcast to slow down the GPU. */
++ gcmkONERROR(gckOS_BroadcastCalibrateSpeed(Kernel->os,
++ Kernel->hardware,
++ idle,
++ delta));
++
++ /* Save current time. */
++ Kernel->db->lastSlowdown = time;
++ Kernel->db->lastSlowdownIdle = Kernel->db->idleTime;
++ }
++ }
++ }
++#endif
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++
++ /* Verify the arguments. */
++ gcmkVERIFY_ARGUMENT(Pointer != gcvNULL);
++
++ /* Find the database. */
++ gcmkONERROR(gckKERNEL_FindDatabase(Kernel, ProcessID, gcvFALSE, &database));
++
++ /* Create a new record in the database. */
++ gcmkONERROR(gckKERNEL_NewRecord(Kernel, database, _GetSlot(database, Pointer), &record));
++
++ /* Initialize the record. */
++ record->kernel = Kernel;
++ record->type = Type;
++ record->data = Pointer;
++ record->physical = Physical;
++ record->bytes = Size;
++
++ /* Get pointer to counters. */
++ switch (Type)
++ {
++ case gcvDB_VIDEO_MEMORY:
++ count = &database->vidMem;
++ break;
++
++ case gcvDB_NON_PAGED:
++ count = &database->nonPaged;
++ break;
++
++ case gcvDB_CONTIGUOUS:
++ count = &database->contiguous;
++ break;
++
++ case gcvDB_MAP_MEMORY:
++ count = &database->mapMemory;
++ break;
++
++ case gcvDB_MAP_USER_MEMORY:
++ count = &database->mapUserMemory;
++ break;
++
++ case gcvDB_VIDEO_MEMORY_RESERVED:
++ count = &database->vidMemResv;
++ break;
++
++ case gcvDB_VIDEO_MEMORY_CONTIGUOUS:
++ count = &database->vidMemCont;
++ break;
++
++ case gcvDB_VIDEO_MEMORY_VIRTUAL:
++ count = &database->vidMemVirt;
++ break;
++
++ default:
++ count = gcvNULL;
++ break;
++ }
++
++ if (count != gcvNULL)
++ {
++ /* Adjust counters. */
++ count->totalBytes += Size;
++ count->bytes += Size;
++
++ if (count->bytes > count->maxBytes)
++ {
++ count->maxBytes = count->bytes;
++ }
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++** gckKERNEL_RemoveProcessDB
++**
++** Remove a record from a process database.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to a gckKERNEL object.
++**
++** gctUINT32 ProcessID
++** Process ID used to identify the database.
++**
++** gceDATABASE_TYPE TYPE
++** Type of the record to remove.
++**
++** gctPOINTER Pointer
++** Data of the record to remove.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckKERNEL_RemoveProcessDB(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 ProcessID,
++ IN gceDATABASE_TYPE Type,
++ IN gctPOINTER Pointer
++ )
++{
++ gceSTATUS status;
++ gcsDATABASE_PTR database;
++ gctSIZE_T bytes = 0;
++
++ gcmkHEADER_ARG("Kernel=0x%x ProcessID=%d Type=%d Pointer=0x%x",
++ Kernel, ProcessID, Type, Pointer);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(Pointer != gcvNULL);
++
++ /* Find the database. */
++ gcmkONERROR(gckKERNEL_FindDatabase(Kernel, ProcessID, gcvFALSE, &database));
++
++ /* Delete the record. */
++ gcmkONERROR(
++ gckKERNEL_DeleteRecord(Kernel, database, Type, Pointer, &bytes));
++
++ /* Update counters. */
++ switch (Type)
++ {
++ case gcvDB_VIDEO_MEMORY:
++ database->vidMem.bytes -= bytes;
++ break;
++
++ case gcvDB_NON_PAGED:
++ database->nonPaged.bytes -= bytes;
++ break;
++
++ case gcvDB_CONTIGUOUS:
++ database->contiguous.bytes -= bytes;
++ break;
++
++ case gcvDB_MAP_MEMORY:
++ database->mapMemory.bytes -= bytes;
++ break;
++
++ case gcvDB_MAP_USER_MEMORY:
++ database->mapUserMemory.bytes -= bytes;
++ break;
++
++ case gcvDB_VIDEO_MEMORY_RESERVED:
++ database->vidMemResv.bytes -= bytes;
++ break;
++
++ case gcvDB_VIDEO_MEMORY_CONTIGUOUS:
++ database->vidMemCont.bytes -= bytes;
++ break;
++
++ case gcvDB_VIDEO_MEMORY_VIRTUAL:
++ database->vidMemVirt.bytes -= bytes;
++ break;
++
++ default:
++ break;
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++** gckKERNEL_FindProcessDB
++**
++** Find a record from a process database.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to a gckKERNEL object.
++**
++** gctUINT32 ProcessID
++** Process ID used to identify the database.
++**
++** gceDATABASE_TYPE TYPE
++** Type of the record to remove.
++**
++** gctPOINTER Pointer
++** Data of the record to remove.
++**
++** OUTPUT:
++**
++** gcsDATABASE_RECORD_PTR Record
++** Copy of record.
++*/
++gceSTATUS
++gckKERNEL_FindProcessDB(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 ProcessID,
++ IN gctUINT32 ThreadID,
++ IN gceDATABASE_TYPE Type,
++ IN gctPOINTER Pointer,
++ OUT gcsDATABASE_RECORD_PTR Record
++ )
++{
++ gceSTATUS status;
++ gcsDATABASE_PTR database;
++
++ gcmkHEADER_ARG("Kernel=0x%x ProcessID=%d Type=%d Pointer=0x%x",
++ Kernel, ProcessID, ThreadID, Type, Pointer);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(Pointer != gcvNULL);
++
++ /* Find the database. */
++ gcmkONERROR(gckKERNEL_FindDatabase(Kernel, ProcessID, gcvFALSE, &database));
++
++ /* Find the record. */
++ gcmkONERROR(
++ gckKERNEL_FindRecord(Kernel, database, Type, Pointer, Record));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++** gckKERNEL_DestroyProcessDB
++**
++** Destroy a process database. If the database contains any records, the data
++** inside those records will be deleted as well. This aids in the cleanup if
++** a process has died unexpectedly or has memory leaks.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to a gckKERNEL object.
++**
++** gctUINT32 ProcessID
++** Process ID used to identify the database.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckKERNEL_DestroyProcessDB(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 ProcessID
++ )
++{
++ gceSTATUS status;
++ gcsDATABASE_PTR database;
++ gcsDATABASE_RECORD_PTR record, next;
++ gctBOOL asynchronous;
++ gctPHYS_ADDR physical;
++ gcuVIDMEM_NODE_PTR node;
++ gckKERNEL kernel = Kernel;
++ gctUINT32 i;
++
++ gcmkHEADER_ARG("Kernel=0x%x ProcessID=%d", Kernel, ProcessID);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++
++ /* Find the database. */
++ gcmkONERROR(gckKERNEL_FindDatabase(Kernel, ProcessID, gcvFALSE, &database));
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_DATABASE,
++ "DB(%d): VidMem: total=%lu max=%lu",
++ ProcessID, database->vidMem.totalBytes,
++ database->vidMem.maxBytes);
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_DATABASE,
++ "DB(%d): NonPaged: total=%lu max=%lu",
++ ProcessID, database->nonPaged.totalBytes,
++ database->nonPaged.maxBytes);
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_DATABASE,
++ "DB(%d): Contiguous: total=%lu max=%lu",
++ ProcessID, database->contiguous.totalBytes,
++ database->contiguous.maxBytes);
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_DATABASE,
++ "DB(%d): Idle time=%llu",
++ ProcessID, Kernel->db->idleTime);
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_DATABASE,
++ "DB(%d): Map: total=%lu max=%lu",
++ ProcessID, database->mapMemory.totalBytes,
++ database->mapMemory.maxBytes);
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_DATABASE,
++ "DB(%d): Map: total=%lu max=%lu",
++ ProcessID, database->mapUserMemory.totalBytes,
++ database->mapUserMemory.maxBytes);
++
++ if (database->list != gcvNULL)
++ {
++ gcmkTRACE_ZONE(gcvLEVEL_WARNING, gcvZONE_DATABASE,
++ "Process %d has entries in its database:",
++ ProcessID);
++ }
++
++ for(i = 0; i < gcmCOUNTOF(database->list); i++)
++ {
++
++ /* Walk all records. */
++ for (record = database->list[i]; record != gcvNULL; record = next)
++ {
++ /* Next next record. */
++ next = record->next;
++
++ /* Dispatch on record type. */
++ switch (record->type)
++ {
++ case gcvDB_VIDEO_MEMORY:
++ /* Free the video memory. */
++ status = gckVIDMEM_Free(gcmUINT64_TO_PTR(record->data));
++
++ gcmkTRACE_ZONE(gcvLEVEL_WARNING, gcvZONE_DATABASE,
++ "DB: VIDEO_MEMORY 0x%x (status=%d)",
++ record->data, status);
++ break;
++
++ case gcvDB_NON_PAGED:
++ physical = gcmNAME_TO_PTR(record->physical);
++ /* Unmap user logical memory first. */
++ status = gckOS_UnmapUserLogical(Kernel->os,
++ physical,
++ record->bytes,
++ record->data);
++
++ /* Free the non paged memory. */
++ status = gckOS_FreeNonPagedMemory(Kernel->os,
++ record->bytes,
++ physical,
++ record->data);
++ gcmRELEASE_NAME(record->physical);
++
++ gcmkTRACE_ZONE(gcvLEVEL_WARNING, gcvZONE_DATABASE,
++ "DB: NON_PAGED 0x%x, bytes=%lu (status=%d)",
++ record->data, record->bytes, status);
++ break;
++
++#if gcdVIRTUAL_COMMAND_BUFFER
++ case gcvDB_COMMAND_BUFFER:
++ /* Free the command buffer. */
++ status = gckEVENT_DestroyVirtualCommandBuffer(record->kernel->eventObj,
++ record->bytes,
++ gcmNAME_TO_PTR(record->physical),
++ record->data,
++ gcvKERNEL_PIXEL);
++ gcmRELEASE_NAME(record->physical);
++
++ gcmkTRACE_ZONE(gcvLEVEL_WARNING, gcvZONE_DATABASE,
++ "DB: COMMAND_BUFFER 0x%x, bytes=%lu (status=%d)",
++ record->data, record->bytes, status);
++ break;
++#endif
++
++ case gcvDB_CONTIGUOUS:
++ physical = gcmNAME_TO_PTR(record->physical);
++ /* Unmap user logical memory first. */
++ status = gckOS_UnmapUserLogical(Kernel->os,
++ physical,
++ record->bytes,
++ record->data);
++
++ /* Free the contiguous memory. */
++ status = gckEVENT_FreeContiguousMemory(Kernel->eventObj,
++ record->bytes,
++ physical,
++ record->data,
++ gcvKERNEL_PIXEL);
++ gcmRELEASE_NAME(record->physical);
++
++ gcmkTRACE_ZONE(gcvLEVEL_WARNING, gcvZONE_DATABASE,
++ "DB: CONTIGUOUS 0x%x bytes=%lu (status=%d)",
++ record->data, record->bytes, status);
++ break;
++
++ case gcvDB_SIGNAL:
++#if USE_NEW_LINUX_SIGNAL
++ status = gcvSTATUS_NOT_SUPPORTED;
++#else
++ /* Free the user signal. */
++ status = gckOS_DestroyUserSignal(Kernel->os,
++ gcmPTR2INT(record->data));
++#endif /* USE_NEW_LINUX_SIGNAL */
++
++ gcmkTRACE_ZONE(gcvLEVEL_WARNING, gcvZONE_DATABASE,
++ "DB: SIGNAL %d (status=%d)",
++ (gctINT)(gctUINTPTR_T)record->data, status);
++ break;
++
++ case gcvDB_VIDEO_MEMORY_LOCKED:
++ node = gcmUINT64_TO_PTR(record->data);
++ /* Unlock what we still locked */
++ status = gckVIDMEM_Unlock(record->kernel,
++ node,
++ gcvSURF_TYPE_UNKNOWN,
++ &asynchronous);
++
++ if (gcmIS_SUCCESS(status) && (gcvTRUE == asynchronous))
++ {
++ /* TODO: we maybe need to schedule a event here */
++ status = gckVIDMEM_Unlock(record->kernel,
++ node,
++ gcvSURF_TYPE_UNKNOWN,
++ gcvNULL);
++ }
++
++ gcmkTRACE_ZONE(gcvLEVEL_WARNING, gcvZONE_DATABASE,
++ "DB: VIDEO_MEMORY_LOCKED 0x%x (status=%d)",
++ node, status);
++ break;
++
++ case gcvDB_CONTEXT:
++ /* TODO: Free the context */
++ status = gckCOMMAND_Detach(Kernel->command, gcmNAME_TO_PTR(record->data));
++ gcmRELEASE_NAME(record->data);
++
++ gcmkTRACE_ZONE(gcvLEVEL_WARNING, gcvZONE_DATABASE,
++ "DB: CONTEXT 0x%x (status=%d)",
++ record->data, status);
++ break;
++
++ case gcvDB_MAP_MEMORY:
++ /* Unmap memory. */
++ status = gckKERNEL_UnmapMemory(Kernel,
++ record->physical,
++ record->bytes,
++ record->data);
++
++ gcmkTRACE_ZONE(gcvLEVEL_WARNING, gcvZONE_DATABASE,
++ "DB: MAP MEMORY %d (status=%d)",
++ gcmPTR2INT(record->data), status);
++ break;
++
++ case gcvDB_MAP_USER_MEMORY:
++ /* TODO: Unmap user memory. */
++ status = gckOS_UnmapUserMemory(Kernel->os,
++ Kernel->core,
++ record->physical,
++ record->bytes,
++ gcmNAME_TO_PTR(record->data),
++ 0);
++ gcmRELEASE_NAME(record->data);
++
++ gcmkTRACE_ZONE(gcvLEVEL_WARNING, gcvZONE_DATABASE,
++ "DB: MAP USER MEMORY %d (status=%d)",
++ gcmPTR2INT(record->data), status);
++ break;
++
++ case gcvDB_SHARED_INFO:
++ status = gckOS_FreeMemory(Kernel->os, record->physical);
++ break;
++
++#if gcdANDROID_NATIVE_FENCE_SYNC
++ case gcvDB_SYNC_POINT:
++ /* Free the user signal. */
++ status = gckOS_DestroySyncPoint(Kernel->os,
++ (gctSYNC_POINT) record->data);
++
++ gcmkTRACE_ZONE(gcvLEVEL_WARNING, gcvZONE_DATABASE,
++ "DB: SYNC POINT %d (status=%d)",
++ (gctINT)(gctUINTPTR_T)record->data, status);
++ break;
++#endif
++
++ case gcvDB_VIDEO_MEMORY_RESERVED:
++ case gcvDB_VIDEO_MEMORY_CONTIGUOUS:
++ case gcvDB_VIDEO_MEMORY_VIRTUAL:
++ break;//Nothing to do
++
++ default:
++ gcmkTRACE_ZONE(gcvLEVEL_ERROR, gcvZONE_DATABASE,
++ "DB: Correcupted record=0x%08x type=%d",
++ record, record->type);
++ break;
++ }
++
++ /* Delete the record. */
++ gcmkONERROR(gckKERNEL_DeleteRecord(Kernel,
++ database,
++ record->type,
++ record->data,
++ gcvNULL));
++ }
++
++ }
++
++ /* Delete the database. */
++ gcmkONERROR(gckKERNEL_DeleteDatabase(Kernel, database));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++** gckKERNEL_QueryProcessDB
++**
++** Query a process database for the current usage of a particular record type.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to a gckKERNEL object.
++**
++** gctUINT32 ProcessID
++** Process ID used to identify the database.
++**
++** gctBOOL LastProcessID
++** gcvTRUE if searching for the last known process ID. gcvFALSE if
++** we need to search for the process ID specified by the ProcessID
++** argument.
++**
++** gceDATABASE_TYPE Type
++** Type of the record to query.
++**
++** OUTPUT:
++**
++** gcuDATABASE_INFO * Info
++** Pointer to a variable that receives the requested information.
++*/
++gceSTATUS
++gckKERNEL_QueryProcessDB(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 ProcessID,
++ IN gctBOOL LastProcessID,
++ IN gceDATABASE_TYPE Type,
++ OUT gcuDATABASE_INFO * Info
++ )
++{
++ gceSTATUS status;
++ gcsDATABASE_PTR database;
++
++ gcmkHEADER_ARG("Kernel=0x%x ProcessID=%d Type=%d Info=0x%x",
++ Kernel, ProcessID, Type, Info);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(Info != gcvNULL);
++
++ /* Find the database. */
++ gcmkONERROR(
++ gckKERNEL_FindDatabase(Kernel, ProcessID, LastProcessID, &database));
++
++ /* Get pointer to counters. */
++ switch (Type)
++ {
++ case gcvDB_VIDEO_MEMORY:
++ gckOS_MemCopy(&Info->counters,
++ &database->vidMem,
++ gcmSIZEOF(database->vidMem));
++ break;
++
++ case gcvDB_NON_PAGED:
++ gckOS_MemCopy(&Info->counters,
++ &database->nonPaged,
++ gcmSIZEOF(database->vidMem));
++ break;
++
++ case gcvDB_CONTIGUOUS:
++ gckOS_MemCopy(&Info->counters,
++ &database->contiguous,
++ gcmSIZEOF(database->vidMem));
++ break;
++
++ case gcvDB_IDLE:
++ Info->time = Kernel->db->idleTime;
++ Kernel->db->idleTime = 0;
++ break;
++
++ case gcvDB_MAP_MEMORY:
++ gckOS_MemCopy(&Info->counters,
++ &database->mapMemory,
++ gcmSIZEOF(database->mapMemory));
++ break;
++
++ case gcvDB_MAP_USER_MEMORY:
++ gckOS_MemCopy(&Info->counters,
++ &database->mapUserMemory,
++ gcmSIZEOF(database->mapUserMemory));
++ break;
++
++ case gcvDB_VIDEO_MEMORY_RESERVED:
++ gckOS_MemCopy(&Info->counters,
++ &database->vidMemResv,
++ gcmSIZEOF(database->vidMemResv));
++ break;
++
++ case gcvDB_VIDEO_MEMORY_CONTIGUOUS:
++ gckOS_MemCopy(&Info->counters,
++ &database->vidMemCont,
++ gcmSIZEOF(database->vidMemCont));
++ break;
++
++ case gcvDB_VIDEO_MEMORY_VIRTUAL:
++ gckOS_MemCopy(&Info->counters,
++ &database->vidMemVirt,
++ gcmSIZEOF(database->vidMemVirt));
++ break;
++
++ default:
++ break;
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++#if gcdSECURE_USER
++/*******************************************************************************
++** gckKERNEL_GetProcessDBCache
++**
++** Get teh secure cache from a process database.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to a gckKERNEL object.
++**
++** gctUINT32 ProcessID
++** Process ID used to identify the database.
++**
++** OUTPUT:
++**
++** gcskSECURE_CACHE_PTR * Cache
++** Pointer to a variable that receives the secure cache pointer.
++*/
++gceSTATUS
++gckKERNEL_GetProcessDBCache(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 ProcessID,
++ OUT gcskSECURE_CACHE_PTR * Cache
++ )
++{
++ gceSTATUS status;
++ gcsDATABASE_PTR database;
++
++ gcmkHEADER_ARG("Kernel=0x%x ProcessID=%d", Kernel, ProcessID);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(Cache != gcvNULL);
++
++ /* Find the database. */
++ gcmkONERROR(gckKERNEL_FindDatabase(Kernel, ProcessID, gcvFALSE, &database));
++
++ /* Return the pointer to the cache. */
++ *Cache = &database->cache;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Cache=0x%x", *Cache);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++#endif
++
++gceSTATUS
++gckKERNEL_DumpProcessDB(
++ IN gckKERNEL Kernel
++ )
++{
++ gcsDATABASE_PTR database;
++ gctINT i, pid;
++ gctUINT8 name[24];
++
++ gcmkHEADER_ARG("Kernel=0x%x", Kernel);
++
++ /* Acquire the database mutex. */
++ gcmkVERIFY_OK(
++ gckOS_AcquireMutex(Kernel->os, Kernel->db->dbMutex, gcvINFINITE));
++
++ gcmkPRINT("**************************\n");
++ gcmkPRINT("*** PROCESS DB DUMP ***\n");
++ gcmkPRINT("**************************\n");
++
++ gcmkPRINT_N(8, "%-8s%s\n", "PID", "NAME");
++ /* Walk the databases. */
++ for (i = 0; i < gcmCOUNTOF(Kernel->db->db); ++i)
++ {
++ for (database = Kernel->db->db[i];
++ database != gcvNULL;
++ database = database->next)
++ {
++ pid = database->processID;
++
++ gcmkVERIFY_OK(gckOS_ZeroMemory(name, gcmSIZEOF(name)));
++
++ gcmkVERIFY_OK(gckOS_GetProcessNameByPid(pid, gcmSIZEOF(name), name));
++
++ gcmkPRINT_N(8, "%-8d%s\n", pid, name);
++ }
++ }
++
++ /* Release the database mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, Kernel->db->dbMutex));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel_debug.c linux-openelec/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel_debug.c
+--- linux-3.14.36/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel_debug.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel_debug.c 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,2559 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal_kernel_precomp.h"
++#include <gc_hal_kernel_debug.h>
++
++/******************************************************************************\
++******************************** Debug Variables *******************************
++\******************************************************************************/
++
++static gceSTATUS _lastError = gcvSTATUS_OK;
++static gctUINT32 _debugLevel = gcvLEVEL_ERROR;
++/*
++_debugZones config value
++Please Reference define in gc_hal_base.h
++*/
++static gctUINT32 _debugZones = gcvZONE_NONE;
++
++/******************************************************************************\
++********************************* Debug Switches *******************************
++\******************************************************************************/
++
++/*
++ gcdBUFFERED_OUTPUT
++
++ When set to non-zero, all output is collected into a buffer with the
++ specified size. Once the buffer gets full, the debug buffer will be
++ printed to the console. gcdBUFFERED_SIZE determines the size of the buffer.
++*/
++#define gcdBUFFERED_OUTPUT 0
++
++/*
++ gcdBUFFERED_SIZE
++
++ When set to non-zero, all output is collected into a buffer with the
++ specified size. Once the buffer gets full, the debug buffer will be
++ printed to the console.
++*/
++#define gcdBUFFERED_SIZE (1024 * 1024 * 2)
++
++/*
++ gcdDMA_BUFFER_COUNT
++
++ If greater then zero, the debugger will attempt to find the command buffer
++ where DMA is currently executing and then print this buffer and
++ (gcdDMA_BUFFER_COUNT - 1) buffers before the current one. If set to zero
++ or the current buffer is not found, all buffers are printed.
++*/
++#define gcdDMA_BUFFER_COUNT 0
++
++/*
++ gcdTHREAD_BUFFERS
++
++ When greater then one, will accumulate messages from the specified number
++ of threads in separate output buffers.
++*/
++#define gcdTHREAD_BUFFERS 1
++
++/*
++ gcdENABLE_OVERFLOW
++
++ When set to non-zero, and the output buffer gets full, instead of being
++ printed, it will be allowed to overflow removing the oldest messages.
++*/
++#define gcdENABLE_OVERFLOW 1
++
++/*
++ gcdSHOW_LINE_NUMBER
++
++ When enabledm each print statement will be preceeded with the current
++ line number.
++*/
++#define gcdSHOW_LINE_NUMBER 0
++
++/*
++ gcdSHOW_PROCESS_ID
++
++ When enabledm each print statement will be preceeded with the current
++ process ID.
++*/
++#define gcdSHOW_PROCESS_ID 0
++
++/*
++ gcdSHOW_THREAD_ID
++
++ When enabledm each print statement will be preceeded with the current
++ thread ID.
++*/
++#define gcdSHOW_THREAD_ID 0
++
++/*
++ gcdSHOW_TIME
++
++ When enabled each print statement will be preceeded with the current
++ high-resolution time.
++*/
++#define gcdSHOW_TIME 0
++
++
++/******************************************************************************\
++****************************** Miscellaneous Macros ****************************
++\******************************************************************************/
++
++#if gcmIS_DEBUG(gcdDEBUG_TRACE)
++# define gcmDBGASSERT(Expression, Format, Value) \
++ if (!(Expression)) \
++ { \
++ _DirectPrint( \
++ "*** gcmDBGASSERT ***************************\n" \
++ " function : %s\n" \
++ " line : %d\n" \
++ " expression : " #Expression "\n" \
++ " actual value : " Format "\n", \
++ __FUNCTION__, __LINE__, Value \
++ ); \
++ }
++#else
++# define gcmDBGASSERT(Expression, Format, Value)
++#endif
++
++#define gcmPTRALIGNMENT(Pointer, Alignemnt) \
++( \
++ gcmALIGN(gcmPTR2INT(Pointer), Alignemnt) - gcmPTR2INT(Pointer) \
++)
++
++#if gcdALIGNBYSIZE
++# define gcmISALIGNED(Offset, Alignment) \
++ (((Offset) & ((Alignment) - 1)) == 0)
++
++# define gcmkALIGNPTR(Type, Pointer, Alignment) \
++ Pointer = (Type) gcmINT2PTR(gcmALIGN(gcmPTR2INT(Pointer), Alignment))
++#else
++# define gcmISALIGNED(Offset, Alignment) \
++ gcvTRUE
++
++# define gcmkALIGNPTR(Type, Pointer, Alignment)
++#endif
++
++#define gcmALIGNSIZE(Offset, Size) \
++ ((Size - Offset) + Size)
++
++#define gcdHAVEPREFIX \
++( \
++ gcdSHOW_TIME \
++ || gcdSHOW_LINE_NUMBER \
++ || gcdSHOW_PROCESS_ID \
++ || gcdSHOW_THREAD_ID \
++)
++
++#if gcdHAVEPREFIX
++
++# define gcdOFFSET 0
++
++#if gcdSHOW_TIME
++#if gcmISALIGNED(gcdOFFSET, 8)
++# define gcdTIMESIZE gcmSIZEOF(gctUINT64)
++# elif gcdOFFSET == 4
++# define gcdTIMESIZE gcmALIGNSIZE(4, gcmSIZEOF(gctUINT64))
++# else
++# error "Unexpected offset value."
++# endif
++# undef gcdOFFSET
++# define gcdOFFSET 8
++#if !defined(gcdPREFIX_LEADER)
++# define gcdPREFIX_LEADER gcmSIZEOF(gctUINT64)
++# define gcdTIMEFORMAT "0x%016llX"
++# else
++# define gcdTIMEFORMAT ", 0x%016llX"
++# endif
++# else
++# define gcdTIMESIZE 0
++# define gcdTIMEFORMAT
++# endif
++
++#if gcdSHOW_LINE_NUMBER
++#if gcmISALIGNED(gcdOFFSET, 8)
++# define gcdNUMSIZE gcmSIZEOF(gctUINT64)
++# elif gcdOFFSET == 4
++# define gcdNUMSIZE gcmALIGNSIZE(4, gcmSIZEOF(gctUINT64))
++# else
++# error "Unexpected offset value."
++# endif
++# undef gcdOFFSET
++# define gcdOFFSET 8
++#if !defined(gcdPREFIX_LEADER)
++# define gcdPREFIX_LEADER gcmSIZEOF(gctUINT64)
++# define gcdNUMFORMAT "%8llu"
++# else
++# define gcdNUMFORMAT ", %8llu"
++# endif
++# else
++# define gcdNUMSIZE 0
++# define gcdNUMFORMAT
++# endif
++
++#if gcdSHOW_PROCESS_ID
++#if gcmISALIGNED(gcdOFFSET, 4)
++# define gcdPIDSIZE gcmSIZEOF(gctUINT32)
++# else
++# error "Unexpected offset value."
++# endif
++# undef gcdOFFSET
++# define gcdOFFSET 4
++#if !defined(gcdPREFIX_LEADER)
++# define gcdPREFIX_LEADER gcmSIZEOF(gctUINT32)
++# define gcdPIDFORMAT "pid=%5d"
++# else
++# define gcdPIDFORMAT ", pid=%5d"
++# endif
++# else
++# define gcdPIDSIZE 0
++# define gcdPIDFORMAT
++# endif
++
++#if gcdSHOW_THREAD_ID
++#if gcmISALIGNED(gcdOFFSET, 4)
++# define gcdTIDSIZE gcmSIZEOF(gctUINT32)
++# else
++# error "Unexpected offset value."
++# endif
++# undef gcdOFFSET
++# define gcdOFFSET 4
++#if !defined(gcdPREFIX_LEADER)
++# define gcdPREFIX_LEADER gcmSIZEOF(gctUINT32)
++# define gcdTIDFORMAT "tid=%5d"
++# else
++# define gcdTIDFORMAT ", tid=%5d"
++# endif
++# else
++# define gcdTIDSIZE 0
++# define gcdTIDFORMAT
++# endif
++
++# define gcdPREFIX_SIZE \
++ ( \
++ gcdTIMESIZE \
++ + gcdNUMSIZE \
++ + gcdPIDSIZE \
++ + gcdTIDSIZE \
++ )
++
++ static const char * _prefixFormat =
++ "["
++ gcdTIMEFORMAT
++ gcdNUMFORMAT
++ gcdPIDFORMAT
++ gcdTIDFORMAT
++ "] ";
++
++#else
++
++# define gcdPREFIX_LEADER gcmSIZEOF(gctUINT32)
++# define gcdPREFIX_SIZE 0
++
++#endif
++
++/* Assumed largest variable argument leader size. */
++#define gcdVARARG_LEADER gcmSIZEOF(gctUINT64)
++
++/* Alignnments. */
++#if gcdALIGNBYSIZE
++# define gcdPREFIX_ALIGNMENT gcdPREFIX_LEADER
++# define gcdVARARG_ALIGNMENT gcdVARARG_LEADER
++#else
++# define gcdPREFIX_ALIGNMENT 0
++# define gcdVARARG_ALIGNMENT 0
++#endif
++
++#if gcdBUFFERED_OUTPUT
++# define gcdOUTPUTPREFIX _AppendPrefix
++# define gcdOUTPUTSTRING _AppendString
++# define gcdOUTPUTCOPY _AppendCopy
++# define gcdOUTPUTBUFFER _AppendBuffer
++#else
++# define gcdOUTPUTPREFIX _PrintPrefix
++# define gcdOUTPUTSTRING _PrintString
++# define gcdOUTPUTCOPY _PrintString
++# define gcdOUTPUTBUFFER _PrintBuffer
++#endif
++
++/******************************************************************************\
++****************************** Private Structures ******************************
++\******************************************************************************/
++
++typedef enum _gceBUFITEM
++{
++ gceBUFITEM_NONE,
++ gcvBUFITEM_PREFIX,
++ gcvBUFITEM_STRING,
++ gcvBUFITEM_COPY,
++ gcvBUFITEM_BUFFER
++}
++gceBUFITEM;
++
++/* Common item head/buffer terminator. */
++typedef struct _gcsBUFITEM_HEAD * gcsBUFITEM_HEAD_PTR;
++typedef struct _gcsBUFITEM_HEAD
++{
++ gceBUFITEM type;
++}
++gcsBUFITEM_HEAD;
++
++/* String prefix (for ex. [ 1,tid=0x019A]) */
++typedef struct _gcsBUFITEM_PREFIX * gcsBUFITEM_PREFIX_PTR;
++typedef struct _gcsBUFITEM_PREFIX
++{
++ gceBUFITEM type;
++#if gcdHAVEPREFIX
++ gctPOINTER prefixData;
++#endif
++}
++gcsBUFITEM_PREFIX;
++
++/* Buffered string. */
++typedef struct _gcsBUFITEM_STRING * gcsBUFITEM_STRING_PTR;
++typedef struct _gcsBUFITEM_STRING
++{
++ gceBUFITEM type;
++ gctINT indent;
++ gctCONST_STRING message;
++ gctPOINTER messageData;
++ gctUINT messageDataSize;
++}
++gcsBUFITEM_STRING;
++
++/* Buffered string (copy of the string is included with the record). */
++typedef struct _gcsBUFITEM_COPY * gcsBUFITEM_COPY_PTR;
++typedef struct _gcsBUFITEM_COPY
++{
++ gceBUFITEM type;
++ gctINT indent;
++ gctPOINTER messageData;
++ gctUINT messageDataSize;
++}
++gcsBUFITEM_COPY;
++
++/* Memory buffer. */
++typedef struct _gcsBUFITEM_BUFFER * gcsBUFITEM_BUFFER_PTR;
++typedef struct _gcsBUFITEM_BUFFER
++{
++ gceBUFITEM type;
++ gctINT indent;
++ gceDUMP_BUFFER bufferType;
++
++#if gcdDMA_BUFFER_COUNT && (gcdTHREAD_BUFFERS == 1)
++ gctUINT32 dmaAddress;
++#endif
++
++ gctUINT dataSize;
++ gctUINT32 address;
++#if gcdHAVEPREFIX
++ gctPOINTER prefixData;
++#endif
++}
++gcsBUFITEM_BUFFER;
++
++typedef struct _gcsBUFFERED_OUTPUT * gcsBUFFERED_OUTPUT_PTR;
++typedef struct _gcsBUFFERED_OUTPUT
++{
++#if gcdTHREAD_BUFFERS > 1
++ gctUINT32 threadID;
++#endif
++
++#if gcdSHOW_LINE_NUMBER
++ gctUINT64 lineNumber;
++#endif
++
++ gctINT indent;
++
++#if gcdBUFFERED_OUTPUT
++ gctINT start;
++ gctINT index;
++ gctINT count;
++ gctUINT8 buffer[gcdBUFFERED_SIZE];
++#endif
++
++ gcsBUFFERED_OUTPUT_PTR prev;
++ gcsBUFFERED_OUTPUT_PTR next;
++}
++gcsBUFFERED_OUTPUT;
++
++typedef gctUINT (* gcfPRINTSTRING) (
++ IN gcsBUFFERED_OUTPUT_PTR OutputBuffer,
++ IN gcsBUFITEM_HEAD_PTR Item
++ );
++
++typedef gctINT (* gcfGETITEMSIZE) (
++ IN gcsBUFITEM_HEAD_PTR Item
++ );
++
++/******************************************************************************\
++******************************* Private Variables ******************************
++\******************************************************************************/
++
++static gcsBUFFERED_OUTPUT _outputBuffer[gcdTHREAD_BUFFERS];
++static gcsBUFFERED_OUTPUT_PTR _outputBufferHead = gcvNULL;
++static gcsBUFFERED_OUTPUT_PTR _outputBufferTail = gcvNULL;
++
++/******************************************************************************\
++****************************** Item Size Functions *****************************
++\******************************************************************************/
++
++#if gcdBUFFERED_OUTPUT
++static gctINT
++_GetTerminatorItemSize(
++ IN gcsBUFITEM_HEAD_PTR Item
++ )
++{
++ return gcmSIZEOF(gcsBUFITEM_HEAD);
++}
++
++static gctINT
++_GetPrefixItemSize(
++ IN gcsBUFITEM_HEAD_PTR Item
++ )
++{
++#if gcdHAVEPREFIX
++ gcsBUFITEM_PREFIX_PTR item = (gcsBUFITEM_PREFIX_PTR) Item;
++ gctUINT vlen = ((gctUINT8_PTR) item->prefixData) - ((gctUINT8_PTR) item);
++ return vlen + gcdPREFIX_SIZE;
++#else
++ return gcmSIZEOF(gcsBUFITEM_PREFIX);
++#endif
++}
++
++static gctINT
++_GetStringItemSize(
++ IN gcsBUFITEM_HEAD_PTR Item
++ )
++{
++ gcsBUFITEM_STRING_PTR item = (gcsBUFITEM_STRING_PTR) Item;
++ gctUINT vlen = ((gctUINT8_PTR) item->messageData) - ((gctUINT8_PTR) item);
++ return vlen + item->messageDataSize;
++}
++
++static gctINT
++_GetCopyItemSize(
++ IN gcsBUFITEM_HEAD_PTR Item
++ )
++{
++ gcsBUFITEM_COPY_PTR item = (gcsBUFITEM_COPY_PTR) Item;
++ gctUINT vlen = ((gctUINT8_PTR) item->messageData) - ((gctUINT8_PTR) item);
++ return vlen + item->messageDataSize;
++}
++
++static gctINT
++_GetBufferItemSize(
++ IN gcsBUFITEM_HEAD_PTR Item
++ )
++{
++#if gcdHAVEPREFIX
++ gcsBUFITEM_BUFFER_PTR item = (gcsBUFITEM_BUFFER_PTR) Item;
++ gctUINT vlen = ((gctUINT8_PTR) item->prefixData) - ((gctUINT8_PTR) item);
++ return vlen + gcdPREFIX_SIZE + item->dataSize;
++#else
++ gcsBUFITEM_BUFFER_PTR item = (gcsBUFITEM_BUFFER_PTR) Item;
++ return gcmSIZEOF(gcsBUFITEM_BUFFER) + item->dataSize;
++#endif
++}
++
++static gcfGETITEMSIZE _itemSize[] =
++{
++ _GetTerminatorItemSize,
++ _GetPrefixItemSize,
++ _GetStringItemSize,
++ _GetCopyItemSize,
++ _GetBufferItemSize
++};
++#endif
++
++/******************************************************************************\
++******************************* Printing Functions *****************************
++\******************************************************************************/
++
++#if gcdDEBUG || gcdBUFFERED_OUTPUT
++static void
++_DirectPrint(
++ gctCONST_STRING Message,
++ ...
++ )
++{
++ gctINT len;
++ char buffer[768];
++ gctARGUMENTS arguments;
++
++ gcmkARGUMENTS_START(arguments, Message);
++ len = gcmkVSPRINTF(buffer, gcmSIZEOF(buffer), Message, arguments);
++ gcmkARGUMENTS_END(arguments);
++
++ buffer[len] = '\0';
++ gcmkOUTPUT_STRING(buffer);
++}
++#endif
++
++static int
++_AppendIndent(
++ IN gctINT Indent,
++ IN char * Buffer,
++ IN int BufferSize
++ )
++{
++ gctINT i;
++
++ gctINT len = 0;
++ gctINT indent = Indent % 40;
++
++ for (i = 0; i < indent; i += 1)
++ {
++ Buffer[len++] = ' ';
++ }
++
++ if (indent != Indent)
++ {
++ len += gcmkSPRINTF(
++ Buffer + len, BufferSize - len, " <%d> ", Indent
++ );
++
++ Buffer[len] = '\0';
++ }
++
++ return len;
++}
++
++#if gcdHAVEPREFIX
++static void
++_PrintPrefix(
++ IN gcsBUFFERED_OUTPUT_PTR OutputBuffer,
++ IN gctPOINTER Data
++ )
++{
++ char buffer[768];
++ gctINT len;
++
++ /* Format the string. */
++ len = gcmkVSPRINTF(buffer, gcmSIZEOF(buffer), _prefixFormat, Data);
++ buffer[len] = '\0';
++
++ /* Print the string. */
++ gcmkOUTPUT_STRING(buffer);
++}
++#endif
++
++static void
++_PrintString(
++ IN gcsBUFFERED_OUTPUT_PTR OutputBuffer,
++ IN gctINT Indent,
++ IN gctCONST_STRING Message,
++ IN gctUINT ArgumentSize,
++ IN gctPOINTER Data
++ )
++{
++ char buffer[768];
++ gctINT len;
++
++ /* Append the indent string. */
++ len = _AppendIndent(Indent, buffer, gcmSIZEOF(buffer));
++
++ /* Format the string. */
++ len += gcmkVSPRINTF(buffer + len, gcmSIZEOF(buffer) - len, Message, Data);
++ buffer[len] = '\0';
++
++ /* Add end-of-line if missing. */
++ if (buffer[len - 1] != '\n')
++ {
++ buffer[len++] = '\n';
++ buffer[len] = '\0';
++ }
++
++ /* Print the string. */
++ gcmkOUTPUT_STRING(buffer);
++}
++
++static void
++_PrintBuffer(
++ IN gcsBUFFERED_OUTPUT_PTR OutputBuffer,
++ IN gctINT Indent,
++ IN gctPOINTER PrefixData,
++ IN gctPOINTER Data,
++ IN gctUINT Address,
++ IN gctUINT DataSize,
++ IN gceDUMP_BUFFER Type,
++ IN gctUINT32 DmaAddress
++ )
++{
++ static gctCONST_STRING _titleString[] =
++ {
++ "CONTEXT BUFFER",
++ "USER COMMAND BUFFER",
++ "KERNEL COMMAND BUFFER",
++ "LINK BUFFER",
++ "WAIT LINK BUFFER",
++ ""
++ };
++
++ static const gctINT COLUMN_COUNT = 8;
++
++ gctUINT i, count, column, address;
++ gctUINT32_PTR data;
++ gctCHAR buffer[768];
++ gctUINT indent, len;
++ gctBOOL command;
++
++ /* Append space for the prefix. */
++#if gcdHAVEPREFIX
++ indent = gcmkVSPRINTF(buffer, gcmSIZEOF(buffer), _prefixFormat, PrefixData);
++ buffer[indent] = '\0';
++#else
++ indent = 0;
++#endif
++
++ /* Append the indent string. */
++ indent += _AppendIndent(
++ Indent, buffer + indent, gcmSIZEOF(buffer) - indent
++ );
++
++ switch (Type)
++ {
++ case gceDUMP_BUFFER_CONTEXT:
++ case gceDUMP_BUFFER_USER:
++ case gceDUMP_BUFFER_KERNEL:
++ case gceDUMP_BUFFER_LINK:
++ case gceDUMP_BUFFER_WAITLINK:
++ /* Form and print the title string. */
++ gcmkSPRINTF2(
++ buffer + indent, gcmSIZEOF(buffer) - indent,
++ "%s%s\n", _titleString[Type],
++ ((DmaAddress >= Address) && (DmaAddress < Address + DataSize))
++ ? " (CURRENT)" : ""
++ );
++
++ gcmkOUTPUT_STRING(buffer);
++
++ /* Terminate the string. */
++ buffer[indent] = '\0';
++
++ /* This is a command buffer. */
++ command = gcvTRUE;
++ break;
++
++ case gceDUMP_BUFFER_FROM_USER:
++ /* This is not a command buffer. */
++ command = gcvFALSE;
++
++ /* No title. */
++ break;
++
++ default:
++ gcmDBGASSERT(gcvFALSE, "%s", "invalid buffer type");
++
++ /* This is not a command buffer. */
++ command = gcvFALSE;
++ }
++
++ /* Overwrite the prefix with spaces. */
++ for (i = 0; i < indent; i += 1)
++ {
++ buffer[i] = ' ';
++ }
++
++ /* Form and print the opening string. */
++ if (command)
++ {
++ gcmkSPRINTF2(
++ buffer + indent, gcmSIZEOF(buffer) - indent,
++ "@[kernel.command %08X %08X\n", Address, DataSize
++ );
++
++ gcmkOUTPUT_STRING(buffer);
++
++ /* Terminate the string. */
++ buffer[indent] = '\0';
++ }
++
++ /* Get initial address. */
++ address = Address;
++
++ /* Cast the data pointer. */
++ data = (gctUINT32_PTR) Data;
++
++ /* Compute the number of double words. */
++ count = DataSize / gcmSIZEOF(gctUINT32);
++
++ /* Print the buffer. */
++ for (i = 0, len = indent, column = 0; i < count; i += 1)
++ {
++ /* Append the address. */
++ if (column == 0)
++ {
++ len += gcmkSPRINTF(
++ buffer + len, gcmSIZEOF(buffer) - len, "0x%08X:", address
++ );
++ }
++
++ /* Append the data value. */
++ len += gcmkSPRINTF2(
++ buffer + len, gcmSIZEOF(buffer) - len, "%c%08X",
++ (address == DmaAddress)? '>' : ' ', data[i]
++ );
++
++ buffer[len] = '\0';
++
++ /* Update the address. */
++ address += gcmSIZEOF(gctUINT32);
++
++ /* Advance column count. */
++ column += 1;
++
++ /* End of line? */
++ if ((column % COLUMN_COUNT) == 0)
++ {
++ /* Append EOL. */
++ gcmkSTRCAT(buffer + len, gcmSIZEOF(buffer) - len, "\n");
++
++ /* Print the string. */
++ gcmkOUTPUT_STRING(buffer);
++
++ /* Reset. */
++ len = indent;
++ column = 0;
++ }
++ }
++
++ /* Print the last partial string. */
++ if (column != 0)
++ {
++ /* Append EOL. */
++ gcmkSTRCAT(buffer + len, gcmSIZEOF(buffer) - len, "\n");
++
++ /* Print the string. */
++ gcmkOUTPUT_STRING(buffer);
++ }
++
++ /* Form and print the opening string. */
++ if (command)
++ {
++ buffer[indent] = '\0';
++ gcmkSTRCAT(buffer, gcmSIZEOF(buffer), "] -- command\n");
++ gcmkOUTPUT_STRING(buffer);
++ }
++}
++
++#if gcdBUFFERED_OUTPUT
++static gctUINT
++_PrintNone(
++ IN gcsBUFFERED_OUTPUT_PTR OutputBuffer,
++ IN gcsBUFITEM_HEAD_PTR Item
++ )
++{
++ /* Return the size of the node. */
++ return gcmSIZEOF(gcsBUFITEM_HEAD);
++}
++
++static gctUINT
++_PrintPrefixWrapper(
++ IN gcsBUFFERED_OUTPUT_PTR OutputBuffer,
++ IN gcsBUFITEM_HEAD_PTR Item
++ )
++{
++#if gcdHAVEPREFIX
++ gcsBUFITEM_PREFIX_PTR item;
++ gctUINT vlen;
++
++ /* Get access to the data. */
++ item = (gcsBUFITEM_PREFIX_PTR) Item;
++
++ /* Print the message. */
++ _PrintPrefix(OutputBuffer, item->prefixData);
++
++ /* Compute the size of the variable portion of the structure. */
++ vlen = ((gctUINT8_PTR) item->prefixData) - ((gctUINT8_PTR) item);
++
++ /* Return the size of the node. */
++ return vlen + gcdPREFIX_SIZE;
++#else
++ return gcmSIZEOF(gcsBUFITEM_PREFIX);
++#endif
++}
++
++static gctUINT
++_PrintStringWrapper(
++ IN gcsBUFFERED_OUTPUT_PTR OutputBuffer,
++ IN gcsBUFITEM_HEAD_PTR Item
++ )
++{
++ gcsBUFITEM_STRING_PTR item;
++ gctUINT vlen;
++
++ /* Get access to the data. */
++ item = (gcsBUFITEM_STRING_PTR) Item;
++
++ /* Print the message. */
++ _PrintString(
++ OutputBuffer,
++ item->indent, item->message, item->messageDataSize, item->messageData
++ );
++
++ /* Compute the size of the variable portion of the structure. */
++ vlen = ((gctUINT8_PTR) item->messageData) - ((gctUINT8_PTR) item);
++
++ /* Return the size of the node. */
++ return vlen + item->messageDataSize;
++}
++
++static gctUINT
++_PrintCopyWrapper(
++ IN gcsBUFFERED_OUTPUT_PTR OutputBuffer,
++ IN gcsBUFITEM_HEAD_PTR Item
++ )
++{
++ gcsBUFITEM_COPY_PTR item;
++ gctCONST_STRING message;
++ gctUINT vlen;
++
++ /* Get access to the data. */
++ item = (gcsBUFITEM_COPY_PTR) Item;
++
++ /* Determine the string pointer. */
++ message = (gctCONST_STRING) (item + 1);
++
++ /* Print the message. */
++ _PrintString(
++ OutputBuffer,
++ item->indent, message, item->messageDataSize, item->messageData
++ );
++
++ /* Compute the size of the variable portion of the structure. */
++ vlen = ((gctUINT8_PTR) item->messageData) - ((gctUINT8_PTR) item);
++
++ /* Return the size of the node. */
++ return vlen + item->messageDataSize;
++}
++
++static gctUINT
++_PrintBufferWrapper(
++ IN gcsBUFFERED_OUTPUT_PTR OutputBuffer,
++ IN gcsBUFITEM_HEAD_PTR Item
++ )
++{
++#if gcdHAVEPREFIX
++ gctUINT32 dmaAddress;
++ gcsBUFITEM_BUFFER_PTR item;
++ gctPOINTER data;
++ gctUINT vlen;
++
++ /* Get access to the data. */
++ item = (gcsBUFITEM_BUFFER_PTR) Item;
++
++#if gcdDMA_BUFFER_COUNT && (gcdTHREAD_BUFFERS == 1)
++ dmaAddress = item->dmaAddress;
++#else
++ dmaAddress = 0xFFFFFFFF;
++#endif
++
++ if (dmaAddress != 0)
++ {
++ /* Compute the data address. */
++ data = ((gctUINT8_PTR) item->prefixData) + gcdPREFIX_SIZE;
++
++ /* Print buffer. */
++ _PrintBuffer(
++ OutputBuffer,
++ item->indent, item->prefixData,
++ data, item->address, item->dataSize,
++ item->bufferType, dmaAddress
++ );
++ }
++
++ /* Compute the size of the variable portion of the structure. */
++ vlen = ((gctUINT8_PTR) item->prefixData) - ((gctUINT8_PTR) item);
++
++ /* Return the size of the node. */
++ return vlen + gcdPREFIX_SIZE + item->dataSize;
++#else
++ gctUINT32 dmaAddress;
++ gcsBUFITEM_BUFFER_PTR item;
++
++ /* Get access to the data. */
++ item = (gcsBUFITEM_BUFFER_PTR) Item;
++
++#if gcdDMA_BUFFER_COUNT && (gcdTHREAD_BUFFERS == 1)
++ dmaAddress = item->dmaAddress;
++#else
++ dmaAddress = 0xFFFFFFFF;
++#endif
++
++ if (dmaAddress != 0)
++ {
++ /* Print buffer. */
++ _PrintBuffer(
++ OutputBuffer,
++ item->indent, gcvNULL,
++ item + 1, item->address, item->dataSize,
++ item->bufferType, dmaAddress
++ );
++ }
++
++ /* Return the size of the node. */
++ return gcmSIZEOF(gcsBUFITEM_BUFFER) + item->dataSize;
++#endif
++}
++
++static gcfPRINTSTRING _printArray[] =
++{
++ _PrintNone,
++ _PrintPrefixWrapper,
++ _PrintStringWrapper,
++ _PrintCopyWrapper,
++ _PrintBufferWrapper
++};
++#endif
++
++/******************************************************************************\
++******************************* Private Functions ******************************
++\******************************************************************************/
++
++#if gcdBUFFERED_OUTPUT
++
++#if gcdDMA_BUFFER_COUNT && (gcdTHREAD_BUFFERS == 1)
++static gcsBUFITEM_BUFFER_PTR
++_FindCurrentDMABuffer(
++ gctUINT32 DmaAddress
++ )
++{
++ gctINT i, skip;
++ gcsBUFITEM_HEAD_PTR item;
++ gcsBUFITEM_BUFFER_PTR dmaCurrent;
++
++ /* Reset the current buffer. */
++ dmaCurrent = gcvNULL;
++
++ /* Get the first stored item. */
++ item = (gcsBUFITEM_HEAD_PTR) &_outputBufferHead->buffer[_outputBufferHead->start];
++
++ /* Run through all items. */
++ for (i = 0; i < _outputBufferHead->count; i += 1)
++ {
++ /* Buffer item? */
++ if (item->type == gcvBUFITEM_BUFFER)
++ {
++ gcsBUFITEM_BUFFER_PTR buffer = (gcsBUFITEM_BUFFER_PTR) item;
++
++ if ((DmaAddress >= buffer->address) &&
++ (DmaAddress < buffer->address + buffer->dataSize))
++ {
++ dmaCurrent = buffer;
++ }
++ }
++
++ /* Get the item size and skip it. */
++ skip = (* _itemSize[item->type]) (item);
++ item = (gcsBUFITEM_HEAD_PTR) ((gctUINT8_PTR) item + skip);
++
++ /* End of the buffer? Wrap around. */
++ if (item->type == gceBUFITEM_NONE)
++ {
++ item = (gcsBUFITEM_HEAD_PTR) _outputBufferHead->buffer;
++ }
++ }
++
++ /* Return result. */
++ return dmaCurrent;
++}
++
++static void
++_EnableAllDMABuffers(
++ void
++ )
++{
++ gctINT i, skip;
++ gcsBUFITEM_HEAD_PTR item;
++
++ /* Get the first stored item. */
++ item = (gcsBUFITEM_HEAD_PTR) &_outputBufferHead->buffer[_outputBufferHead->start];
++
++ /* Run through all items. */
++ for (i = 0; i < _outputBufferHead->count; i += 1)
++ {
++ /* Buffer item? */
++ if (item->type == gcvBUFITEM_BUFFER)
++ {
++ gcsBUFITEM_BUFFER_PTR buffer = (gcsBUFITEM_BUFFER_PTR) item;
++
++ /* Enable the buffer. */
++ buffer->dmaAddress = ~0U;
++ }
++
++ /* Get the item size and skip it. */
++ skip = (* _itemSize[item->type]) (item);
++ item = (gcsBUFITEM_HEAD_PTR) ((gctUINT8_PTR) item + skip);
++
++ /* End of the buffer? Wrap around. */
++ if (item->type == gceBUFITEM_NONE)
++ {
++ item = (gcsBUFITEM_HEAD_PTR) _outputBufferHead->buffer;
++ }
++ }
++}
++
++static void
++_EnableDMABuffers(
++ gctUINT32 DmaAddress,
++ gcsBUFITEM_BUFFER_PTR CurrentDMABuffer
++ )
++{
++ gctINT i, skip, index;
++ gcsBUFITEM_HEAD_PTR item;
++ gcsBUFITEM_BUFFER_PTR buffers[gcdDMA_BUFFER_COUNT];
++
++ /* Reset buffer pointers. */
++ gckOS_ZeroMemory(buffers, gcmSIZEOF(buffers));
++
++ /* Set the current buffer index. */
++ index = -1;
++
++ /* Get the first stored item. */
++ item = (gcsBUFITEM_HEAD_PTR) &_outputBufferHead->buffer[_outputBufferHead->start];
++
++ /* Run through all items until the current DMA buffer is found. */
++ for (i = 0; i < _outputBufferHead->count; i += 1)
++ {
++ /* Buffer item? */
++ if (item->type == gcvBUFITEM_BUFFER)
++ {
++ /* Advance the index. */
++ index = (index + 1) % gcdDMA_BUFFER_COUNT;
++
++ /* Add to the buffer array. */
++ buffers[index] = (gcsBUFITEM_BUFFER_PTR) item;
++
++ /* Stop if this is the current DMA buffer. */
++ if ((gcsBUFITEM_BUFFER_PTR) item == CurrentDMABuffer)
++ {
++ break;
++ }
++ }
++
++ /* Get the item size and skip it. */
++ skip = (* _itemSize[item->type]) (item);
++ item = (gcsBUFITEM_HEAD_PTR) ((gctUINT8_PTR) item + skip);
++
++ /* End of the buffer? Wrap around. */
++ if (item->type == gceBUFITEM_NONE)
++ {
++ item = (gcsBUFITEM_HEAD_PTR) _outputBufferHead->buffer;
++ }
++ }
++
++ /* Enable the found buffers. */
++ gcmDBGASSERT(index != -1, "%d", index);
++
++ for (i = 0; i < gcdDMA_BUFFER_COUNT; i += 1)
++ {
++ if (buffers[index] == gcvNULL)
++ {
++ break;
++ }
++
++ buffers[index]->dmaAddress = DmaAddress;
++
++ index -= 1;
++
++ if (index == -1)
++ {
++ index = gcdDMA_BUFFER_COUNT - 1;
++ }
++ }
++}
++#endif
++
++static void
++_Flush(
++ gctUINT32 DmaAddress
++ )
++{
++ gctINT i, skip;
++ gcsBUFITEM_HEAD_PTR item;
++
++ gcsBUFFERED_OUTPUT_PTR outputBuffer = _outputBufferHead;
++
++#if gcdDMA_BUFFER_COUNT && (gcdTHREAD_BUFFERS == 1)
++ if ((outputBuffer != gcvNULL) && (outputBuffer->count != 0))
++ {
++ /* Find the current DMA buffer. */
++ gcsBUFITEM_BUFFER_PTR dmaCurrent = _FindCurrentDMABuffer(DmaAddress);
++
++ /* Was the current buffer found? */
++ if (dmaCurrent == gcvNULL)
++ {
++ /* No, print all buffers. */
++ _EnableAllDMABuffers();
++ }
++ else
++ {
++ /* Yes, enable only specified number of buffers. */
++ _EnableDMABuffers(DmaAddress, dmaCurrent);
++ }
++ }
++#endif
++
++ while (outputBuffer != gcvNULL)
++ {
++ if (outputBuffer->count != 0)
++ {
++ _DirectPrint("********************************************************************************\n");
++ _DirectPrint("FLUSHING DEBUG OUTPUT BUFFER (%d elements).\n", outputBuffer->count);
++ _DirectPrint("********************************************************************************\n");
++
++ item = (gcsBUFITEM_HEAD_PTR) &outputBuffer->buffer[outputBuffer->start];
++
++ for (i = 0; i < outputBuffer->count; i += 1)
++ {
++ skip = (* _printArray[item->type]) (outputBuffer, item);
++
++ item = (gcsBUFITEM_HEAD_PTR) ((gctUINT8_PTR) item + skip);
++
++ if (item->type == gceBUFITEM_NONE)
++ {
++ item = (gcsBUFITEM_HEAD_PTR) outputBuffer->buffer;
++ }
++ }
++
++ outputBuffer->start = 0;
++ outputBuffer->index = 0;
++ outputBuffer->count = 0;
++ }
++
++ outputBuffer = outputBuffer->next;
++ }
++}
++
++static gcsBUFITEM_HEAD_PTR
++_AllocateItem(
++ IN gcsBUFFERED_OUTPUT_PTR OutputBuffer,
++ IN gctINT Size
++ )
++{
++ gctINT skip;
++ gcsBUFITEM_HEAD_PTR item, next;
++
++#if gcdENABLE_OVERFLOW
++ if (
++ (OutputBuffer->index + Size >= gcdBUFFERED_SIZE - gcmSIZEOF(gcsBUFITEM_HEAD))
++ ||
++ (
++ (OutputBuffer->index < OutputBuffer->start) &&
++ (OutputBuffer->index + Size >= OutputBuffer->start)
++ )
++ )
++ {
++ if (OutputBuffer->index + Size >= gcdBUFFERED_SIZE - gcmSIZEOF(gcsBUFITEM_HEAD))
++ {
++ if (OutputBuffer->index < OutputBuffer->start)
++ {
++ item = (gcsBUFITEM_HEAD_PTR) &OutputBuffer->buffer[OutputBuffer->start];
++
++ while (item->type != gceBUFITEM_NONE)
++ {
++ skip = (* _itemSize[item->type]) (item);
++
++ OutputBuffer->start += skip;
++ OutputBuffer->count -= 1;
++
++ item->type = gceBUFITEM_NONE;
++ item = (gcsBUFITEM_HEAD_PTR) ((gctUINT8_PTR) item + skip);
++ }
++
++ OutputBuffer->start = 0;
++ }
++
++ OutputBuffer->index = 0;
++ }
++
++ item = (gcsBUFITEM_HEAD_PTR) &OutputBuffer->buffer[OutputBuffer->start];
++
++ while (OutputBuffer->start - OutputBuffer->index <= Size)
++ {
++ skip = (* _itemSize[item->type]) (item);
++
++ OutputBuffer->start += skip;
++ OutputBuffer->count -= 1;
++
++ item->type = gceBUFITEM_NONE;
++ item = (gcsBUFITEM_HEAD_PTR) ((gctUINT8_PTR) item + skip);
++
++ if (item->type == gceBUFITEM_NONE)
++ {
++ OutputBuffer->start = 0;
++ break;
++ }
++ }
++ }
++#else
++ if (OutputBuffer->index + Size > gcdBUFFERED_SIZE - gcmSIZEOF(gcsBUFITEM_HEAD))
++ {
++ _DirectPrint("\nMessage buffer full; forcing message flush.\n\n");
++ _Flush(~0U);
++ }
++#endif
++
++ item = (gcsBUFITEM_HEAD_PTR) &OutputBuffer->buffer[OutputBuffer->index];
++
++ OutputBuffer->index += Size;
++ OutputBuffer->count += 1;
++
++ next = (gcsBUFITEM_HEAD_PTR) ((gctUINT8_PTR) item + Size);
++ next->type = gceBUFITEM_NONE;
++
++ return item;
++}
++
++#if gcdALIGNBYSIZE
++static void
++_FreeExtraSpace(
++ IN gcsBUFFERED_OUTPUT_PTR OutputBuffer,
++ IN gctPOINTER Item,
++ IN gctINT ItemSize,
++ IN gctINT FreeSize
++ )
++{
++ gcsBUFITEM_HEAD_PTR next;
++
++ OutputBuffer->index -= FreeSize;
++
++ next = (gcsBUFITEM_HEAD_PTR) ((gctUINT8_PTR) Item + ItemSize);
++ next->type = gceBUFITEM_NONE;
++}
++#endif
++
++#if gcdHAVEPREFIX
++static void
++_AppendPrefix(
++ IN gcsBUFFERED_OUTPUT_PTR OutputBuffer,
++ IN gctPOINTER Data
++ )
++{
++ gctUINT8_PTR prefixData;
++ gcsBUFITEM_PREFIX_PTR item;
++ gctINT allocSize;
++
++#if gcdALIGNBYSIZE
++ gctUINT alignment;
++ gctINT size, freeSize;
++#endif
++
++ gcmDBGASSERT(Data != gcvNULL, "%p", Data);
++
++ /* Determine the maximum item size. */
++ allocSize
++ = gcmSIZEOF(gcsBUFITEM_PREFIX)
++ + gcdPREFIX_SIZE
++ + gcdPREFIX_ALIGNMENT;
++
++ /* Allocate prefix item. */
++ item = (gcsBUFITEM_PREFIX_PTR) _AllocateItem(OutputBuffer, allocSize);
++
++ /* Compute the initial prefix data pointer. */
++ prefixData = (gctUINT8_PTR) (item + 1);
++
++ /* Align the data pointer as necessary. */
++#if gcdALIGNBYSIZE
++ alignment = gcmPTRALIGNMENT(prefixData, gcdPREFIX_ALIGNMENT);
++ prefixData += alignment;
++#endif
++
++ /* Set item data. */
++ item->type = gcvBUFITEM_PREFIX;
++ item->prefixData = prefixData;
++
++ /* Copy argument value. */
++ memcpy(prefixData, Data, gcdPREFIX_SIZE);
++
++#if gcdALIGNBYSIZE
++ /* Compute the actual node size. */
++ size = gcmSIZEOF(gcsBUFITEM_PREFIX) + gcdPREFIX_SIZE + alignment;
++
++ /* Free extra memory if any. */
++ freeSize = allocSize - size;
++ if (freeSize != 0)
++ {
++ _FreeExtraSpace(OutputBuffer, item, size, freeSize);
++ }
++#endif
++}
++#endif
++
++static void
++_AppendString(
++ IN gcsBUFFERED_OUTPUT_PTR OutputBuffer,
++ IN gctINT Indent,
++ IN gctCONST_STRING Message,
++ IN gctUINT ArgumentSize,
++ IN gctPOINTER Data
++ )
++{
++ gctUINT8_PTR messageData;
++ gcsBUFITEM_STRING_PTR item;
++ gctINT allocSize;
++
++#if gcdALIGNBYSIZE
++ gctUINT alignment;
++ gctINT size, freeSize;
++#endif
++
++ /* Determine the maximum item size. */
++ allocSize
++ = gcmSIZEOF(gcsBUFITEM_STRING)
++ + ArgumentSize
++ + gcdVARARG_ALIGNMENT;
++
++ /* Allocate prefix item. */
++ item = (gcsBUFITEM_STRING_PTR) _AllocateItem(OutputBuffer, allocSize);
++
++ /* Compute the initial message data pointer. */
++ messageData = (gctUINT8_PTR) (item + 1);
++
++ /* Align the data pointer as necessary. */
++#if gcdALIGNBYSIZE
++ alignment = gcmPTRALIGNMENT(messageData, gcdVARARG_ALIGNMENT);
++ messageData += alignment;
++#endif
++
++ /* Set item data. */
++ item->type = gcvBUFITEM_STRING;
++ item->indent = Indent;
++ item->message = Message;
++ item->messageData = messageData;
++ item->messageDataSize = ArgumentSize;
++
++ /* Copy argument value. */
++ if (ArgumentSize != 0)
++ {
++ memcpy(messageData, Data, ArgumentSize);
++ }
++
++#if gcdALIGNBYSIZE
++ /* Compute the actual node size. */
++ size = gcmSIZEOF(gcsBUFITEM_STRING) + ArgumentSize + alignment;
++
++ /* Free extra memory if any. */
++ freeSize = allocSize - size;
++ if (freeSize != 0)
++ {
++ _FreeExtraSpace(OutputBuffer, item, size, freeSize);
++ }
++#endif
++}
++
++static void
++_AppendCopy(
++ IN gcsBUFFERED_OUTPUT_PTR OutputBuffer,
++ IN gctINT Indent,
++ IN gctCONST_STRING Message,
++ IN gctUINT ArgumentSize,
++ IN gctPOINTER Data
++ )
++{
++ gctUINT8_PTR messageData;
++ gcsBUFITEM_COPY_PTR item;
++ gctINT allocSize;
++ gctINT messageLength;
++ gctCONST_STRING message;
++
++#if gcdALIGNBYSIZE
++ gctUINT alignment;
++ gctINT size, freeSize;
++#endif
++
++ /* Get the length of the string. */
++ messageLength = strlen(Message) + 1;
++
++ /* Determine the maximum item size. */
++ allocSize
++ = gcmSIZEOF(gcsBUFITEM_COPY)
++ + messageLength
++ + ArgumentSize
++ + gcdVARARG_ALIGNMENT;
++
++ /* Allocate prefix item. */
++ item = (gcsBUFITEM_COPY_PTR) _AllocateItem(OutputBuffer, allocSize);
++
++ /* Determine the message placement. */
++ message = (gctCONST_STRING) (item + 1);
++
++ /* Compute the initial message data pointer. */
++ messageData = (gctUINT8_PTR) message + messageLength;
++
++ /* Align the data pointer as necessary. */
++#if gcdALIGNBYSIZE
++ if (ArgumentSize == 0)
++ {
++ alignment = 0;
++ }
++ else
++ {
++ alignment = gcmPTRALIGNMENT(messageData, gcdVARARG_ALIGNMENT);
++ messageData += alignment;
++ }
++#endif
++
++ /* Set item data. */
++ item->type = gcvBUFITEM_COPY;
++ item->indent = Indent;
++ item->messageData = messageData;
++ item->messageDataSize = ArgumentSize;
++
++ /* Copy the message. */
++ memcpy((gctPOINTER) message, Message, messageLength);
++
++ /* Copy argument value. */
++ if (ArgumentSize != 0)
++ {
++ memcpy(messageData, Data, ArgumentSize);
++ }
++
++#if gcdALIGNBYSIZE
++ /* Compute the actual node size. */
++ size
++ = gcmSIZEOF(gcsBUFITEM_COPY)
++ + messageLength
++ + ArgumentSize
++ + alignment;
++
++ /* Free extra memory if any. */
++ freeSize = allocSize - size;
++ if (freeSize != 0)
++ {
++ _FreeExtraSpace(OutputBuffer, item, size, freeSize);
++ }
++#endif
++}
++
++static void
++_AppendBuffer(
++ IN gcsBUFFERED_OUTPUT_PTR OutputBuffer,
++ IN gctINT Indent,
++ IN gctPOINTER PrefixData,
++ IN gctPOINTER Data,
++ IN gctUINT Address,
++ IN gctUINT DataSize,
++ IN gceDUMP_BUFFER Type,
++ IN gctUINT32 DmaAddress
++ )
++{
++#if gcdHAVEPREFIX
++ gctUINT8_PTR prefixData;
++ gcsBUFITEM_BUFFER_PTR item;
++ gctINT allocSize;
++ gctPOINTER data;
++
++#if gcdALIGNBYSIZE
++ gctUINT alignment;
++ gctINT size, freeSize;
++#endif
++
++ gcmDBGASSERT(DataSize != 0, "%d", DataSize);
++ gcmDBGASSERT(Data != gcvNULL, "%p", Data);
++
++ /* Determine the maximum item size. */
++ allocSize
++ = gcmSIZEOF(gcsBUFITEM_BUFFER)
++ + gcdPREFIX_SIZE
++ + gcdPREFIX_ALIGNMENT
++ + DataSize;
++
++ /* Allocate prefix item. */
++ item = (gcsBUFITEM_BUFFER_PTR) _AllocateItem(OutputBuffer, allocSize);
++
++ /* Compute the initial prefix data pointer. */
++ prefixData = (gctUINT8_PTR) (item + 1);
++
++#if gcdALIGNBYSIZE
++ /* Align the data pointer as necessary. */
++ alignment = gcmPTRALIGNMENT(prefixData, gcdPREFIX_ALIGNMENT);
++ prefixData += alignment;
++#endif
++
++ /* Set item data. */
++ item->type = gcvBUFITEM_BUFFER;
++ item->indent = Indent;
++ item->bufferType = Type;
++ item->dataSize = DataSize;
++ item->address = Address;
++ item->prefixData = prefixData;
++
++#if gcdDMA_BUFFER_COUNT && (gcdTHREAD_BUFFERS == 1)
++ item->dmaAddress = DmaAddress;
++#endif
++
++ /* Copy prefix data. */
++ memcpy(prefixData, PrefixData, gcdPREFIX_SIZE);
++
++ /* Compute the data pointer. */
++ data = prefixData + gcdPREFIX_SIZE;
++
++ /* Copy argument value. */
++ memcpy(data, Data, DataSize);
++
++#if gcdALIGNBYSIZE
++ /* Compute the actual node size. */
++ size
++ = gcmSIZEOF(gcsBUFITEM_BUFFER)
++ + gcdPREFIX_SIZE
++ + alignment
++ + DataSize;
++
++ /* Free extra memory if any. */
++ freeSize = allocSize - size;
++ if (freeSize != 0)
++ {
++ _FreeExtraSpace(OutputBuffer, item, size, freeSize);
++ }
++#endif
++#else
++ gcsBUFITEM_BUFFER_PTR item;
++ gctINT size;
++
++ gcmDBGASSERT(DataSize != 0, "%d", DataSize);
++ gcmDBGASSERT(Data != gcvNULL, "%p", Data);
++
++ /* Determine the maximum item size. */
++ size = gcmSIZEOF(gcsBUFITEM_BUFFER) + DataSize;
++
++ /* Allocate prefix item. */
++ item = (gcsBUFITEM_BUFFER_PTR) _AllocateItem(OutputBuffer, size);
++
++ /* Set item data. */
++ item->type = gcvBUFITEM_BUFFER;
++ item->indent = Indent;
++ item->dataSize = DataSize;
++ item->address = Address;
++
++ /* Copy argument value. */
++ memcpy(item + 1, Data, DataSize);
++#endif
++}
++#endif
++
++static gcmINLINE void
++_InitBuffers(
++ void
++ )
++{
++ int i;
++
++ if (_outputBufferHead == gcvNULL)
++ {
++ for (i = 0; i < gcdTHREAD_BUFFERS; i += 1)
++ {
++ if (_outputBufferTail == gcvNULL)
++ {
++ _outputBufferHead = &_outputBuffer[i];
++ }
++ else
++ {
++ _outputBufferTail->next = &_outputBuffer[i];
++ }
++
++#if gcdTHREAD_BUFFERS > 1
++ _outputBuffer[i].threadID = ~0U;
++#endif
++
++ _outputBuffer[i].prev = _outputBufferTail;
++ _outputBuffer[i].next = gcvNULL;
++
++ _outputBufferTail = &_outputBuffer[i];
++ }
++ }
++}
++
++static gcmINLINE gcsBUFFERED_OUTPUT_PTR
++_GetOutputBuffer(
++ void
++ )
++{
++ gcsBUFFERED_OUTPUT_PTR outputBuffer;
++
++#if gcdTHREAD_BUFFERS > 1
++ /* Get the current thread ID. */
++ gctUINT32 ThreadID = gcmkGETTHREADID();
++
++ /* Locate the output buffer for the thread. */
++ outputBuffer = _outputBufferHead;
++
++ while (outputBuffer != gcvNULL)
++ {
++ if (outputBuffer->threadID == ThreadID)
++ {
++ break;
++ }
++
++ outputBuffer = outputBuffer->next;
++ }
++
++ /* No matching buffer found? */
++ if (outputBuffer == gcvNULL)
++ {
++ /* Get the tail for the buffer. */
++ outputBuffer = _outputBufferTail;
++
++ /* Move it to the head. */
++ _outputBufferTail = _outputBufferTail->prev;
++ _outputBufferTail->next = gcvNULL;
++
++ outputBuffer->prev = gcvNULL;
++ outputBuffer->next = _outputBufferHead;
++
++ _outputBufferHead->prev = outputBuffer;
++ _outputBufferHead = outputBuffer;
++
++ /* Reset the buffer. */
++ outputBuffer->threadID = ThreadID;
++#if gcdBUFFERED_OUTPUT
++ outputBuffer->start = 0;
++ outputBuffer->index = 0;
++ outputBuffer->count = 0;
++#endif
++#if gcdSHOW_LINE_NUMBER
++ outputBuffer->lineNumber = 0;
++#endif
++ }
++#else
++ outputBuffer = _outputBufferHead;
++#endif
++
++ return outputBuffer;
++}
++
++static gcmINLINE int _GetArgumentSize(
++ IN gctCONST_STRING Message
++ )
++{
++ int i, count;
++
++ gcmDBGASSERT(Message != gcvNULL, "%p", Message);
++
++ for (i = 0, count = 0; Message[i]; i += 1)
++ {
++ if (Message[i] == '%')
++ {
++ count += 1;
++ }
++ }
++
++ return count * gcmSIZEOF(gctUINT32);
++}
++
++#if gcdHAVEPREFIX
++static void
++_InitPrefixData(
++ IN gcsBUFFERED_OUTPUT_PTR OutputBuffer,
++ IN gctPOINTER Data
++ )
++{
++ gctUINT8_PTR data = (gctUINT8_PTR) Data;
++
++#if gcdSHOW_TIME
++ {
++ gctUINT64 time;
++ gckOS_GetProfileTick(&time);
++ gcmkALIGNPTR(gctUINT8_PTR, data, gcmSIZEOF(gctUINT64));
++ * ((gctUINT64_PTR) data) = time;
++ data += gcmSIZEOF(gctUINT64);
++ }
++#endif
++
++#if gcdSHOW_LINE_NUMBER
++ {
++ gcmkALIGNPTR(gctUINT8_PTR, data, gcmSIZEOF(gctUINT64));
++ * ((gctUINT64_PTR) data) = OutputBuffer->lineNumber;
++ data += gcmSIZEOF(gctUINT64);
++ }
++#endif
++
++#if gcdSHOW_PROCESS_ID
++ {
++ gcmkALIGNPTR(gctUINT8_PTR, data, gcmSIZEOF(gctUINT32));
++ * ((gctUINT32_PTR) data) = gcmkGETPROCESSID();
++ data += gcmSIZEOF(gctUINT32);
++ }
++#endif
++
++#if gcdSHOW_THREAD_ID
++ {
++ gcmkALIGNPTR(gctUINT8_PTR, data, gcmSIZEOF(gctUINT32));
++ * ((gctUINT32_PTR) data) = gcmkGETTHREADID();
++ }
++#endif
++}
++#endif
++
++static void
++_Print(
++ IN gctUINT ArgumentSize,
++ IN gctBOOL CopyMessage,
++ IN gctCONST_STRING Message,
++ IN gctARGUMENTS Arguments
++ )
++{
++ gcsBUFFERED_OUTPUT_PTR outputBuffer;
++ gcmkDECLARE_LOCK(lockHandle);
++
++ gcmkLOCKSECTION(lockHandle);
++
++ /* Initialize output buffer list. */
++ _InitBuffers();
++
++ /* Locate the proper output buffer. */
++ outputBuffer = _GetOutputBuffer();
++
++ /* Update the line number. */
++#if gcdSHOW_LINE_NUMBER
++ outputBuffer->lineNumber += 1;
++#endif
++
++ /* Print prefix. */
++#if gcdHAVEPREFIX
++ {
++ gctUINT8_PTR alignedPrefixData;
++ gctUINT8 prefixData[gcdPREFIX_SIZE + gcdPREFIX_ALIGNMENT];
++
++ /* Compute aligned pointer. */
++ alignedPrefixData = prefixData;
++ gcmkALIGNPTR(gctUINT8_PTR, alignedPrefixData, gcdPREFIX_ALIGNMENT);
++
++ /* Initialize the prefix data. */
++ _InitPrefixData(outputBuffer, alignedPrefixData);
++
++ /* Print the prefix. */
++ gcdOUTPUTPREFIX(outputBuffer, alignedPrefixData);
++ }
++#endif
++
++ /* Form the indent string. */
++ if (strncmp(Message, "--", 2) == 0)
++ {
++ outputBuffer->indent -= 2;
++ }
++
++ /* Print the message. */
++ if (CopyMessage)
++ {
++ gcdOUTPUTCOPY(
++ outputBuffer, outputBuffer->indent,
++ Message, ArgumentSize, * (gctPOINTER *) &Arguments
++ );
++ }
++ else
++ {
++ gcdOUTPUTSTRING(
++ outputBuffer, outputBuffer->indent,
++ Message, ArgumentSize, * (gctPOINTER *) &Arguments
++ );
++ }
++
++ /* Check increasing indent. */
++ if (strncmp(Message, "++", 2) == 0)
++ {
++ outputBuffer->indent += 2;
++ }
++
++ gcmkUNLOCKSECTION(lockHandle);
++}
++
++
++/******************************************************************************\
++********************************* Debug Macros *********************************
++\******************************************************************************/
++
++#ifdef __QNXNTO__
++
++extern volatile unsigned g_nQnxInIsrs;
++
++#define gcmDEBUGPRINT(ArgumentSize, CopyMessage, Message) \
++{ \
++ if (atomic_add_value(&g_nQnxInIsrs, 1) == 0) \
++ { \
++ gctARGUMENTS __arguments__; \
++ gcmkARGUMENTS_START(__arguments__, Message); \
++ _Print(ArgumentSize, CopyMessage, Message, __arguments__); \
++ gcmkARGUMENTS_END(__arguments__); \
++ } \
++ atomic_sub(&g_nQnxInIsrs, 1); \
++}
++
++#else
++
++#define gcmDEBUGPRINT(ArgumentSize, CopyMessage, Message) \
++{ \
++ gctARGUMENTS __arguments__; \
++ gcmkARGUMENTS_START(__arguments__, Message); \
++ _Print(ArgumentSize, CopyMessage, Message, __arguments__); \
++ gcmkARGUMENTS_END(__arguments__); \
++}
++
++#endif
++
++/******************************************************************************\
++********************************** Debug Code **********************************
++\******************************************************************************/
++
++/*******************************************************************************
++**
++** gckOS_Print
++**
++** Send a message to the debugger.
++**
++** INPUT:
++**
++** gctCONST_STRING Message
++** Pointer to message.
++**
++** ...
++** Optional arguments.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++
++void
++gckOS_Print(
++ IN gctCONST_STRING Message,
++ ...
++ )
++{
++ gcmDEBUGPRINT(_GetArgumentSize(Message), gcvFALSE, Message);
++}
++
++/*******************************************************************************
++**
++** gckOS_PrintN
++**
++** Send a message to the debugger.
++**
++** INPUT:
++**
++** gctUINT ArgumentSize
++** The size of the optional arguments in bytes.
++**
++** gctCONST_STRING Message
++** Pointer to message.
++**
++** ...
++** Optional arguments.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++
++void
++gckOS_PrintN(
++ IN gctUINT ArgumentSize,
++ IN gctCONST_STRING Message,
++ ...
++ )
++{
++ gcmDEBUGPRINT(ArgumentSize, gcvFALSE, Message);
++}
++
++/*******************************************************************************
++**
++** gckOS_CopyPrint
++**
++** Send a message to the debugger. If in buffered output mode, the entire
++** message will be copied into the buffer instead of using the pointer to
++** the string.
++**
++** INPUT:
++**
++** gctCONST_STRING Message
++** Pointer to message.
++**
++** ...
++** Optional arguments.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++
++void
++gckOS_CopyPrint(
++ IN gctCONST_STRING Message,
++ ...
++ )
++{
++ gcmDEBUGPRINT(_GetArgumentSize(Message), gcvTRUE, Message);
++}
++
++/*******************************************************************************
++**
++** gckOS_DumpBuffer
++**
++** Print the contents of the specified buffer.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to gckOS object.
++**
++** gctPOINTER Buffer
++** Pointer to the buffer to print.
++**
++** gctUINT Size
++** Size of the buffer.
++**
++** gceDUMP_BUFFER Type
++** Buffer type.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++
++void
++gckOS_DumpBuffer(
++ IN gckOS Os,
++ IN gctPOINTER Buffer,
++ IN gctUINT Size,
++ IN gceDUMP_BUFFER Type,
++ IN gctBOOL CopyMessage
++ )
++{
++ gctUINT32 address;
++ gcsBUFFERED_OUTPUT_PTR outputBuffer;
++ static gctBOOL userLocked;
++ gctCHAR *buffer = (gctCHAR*)Buffer;
++
++ gcmkDECLARE_LOCK(lockHandle);
++
++ /* Request lock when not coming from user,
++ or coming from user and not yet locked
++ and message is starting with @[. */
++ if (Type == gceDUMP_BUFFER_FROM_USER)
++ {
++ if ((Size > 2)
++ && (buffer[0] == '@')
++ && (buffer[1] == '['))
++ {
++ /* Beginning of a user dump. */
++ gcmkLOCKSECTION(lockHandle);
++ userLocked = gcvTRUE;
++ }
++ /* Else, let it pass through. */
++ }
++ else
++ {
++ gcmkLOCKSECTION(lockHandle);
++ userLocked = gcvFALSE;
++ }
++
++ if (Buffer != gcvNULL)
++ {
++ /* Initialize output buffer list. */
++ _InitBuffers();
++
++ /* Locate the proper output buffer. */
++ outputBuffer = _GetOutputBuffer();
++
++ /* Update the line number. */
++#if gcdSHOW_LINE_NUMBER
++ outputBuffer->lineNumber += 1;
++#endif
++
++ /* Get the physical address of the buffer. */
++ if (Type != gceDUMP_BUFFER_FROM_USER)
++ {
++ gcmkVERIFY_OK(gckOS_GetPhysicalAddress(Os, Buffer, &address));
++ }
++ else
++ {
++ address = 0;
++ }
++
++#if gcdHAVEPREFIX
++ {
++ gctUINT8_PTR alignedPrefixData;
++ gctUINT8 prefixData[gcdPREFIX_SIZE + gcdPREFIX_ALIGNMENT];
++
++ /* Compute aligned pointer. */
++ alignedPrefixData = prefixData;
++ gcmkALIGNPTR(gctUINT8_PTR, alignedPrefixData, gcdPREFIX_ALIGNMENT);
++
++ /* Initialize the prefix data. */
++ _InitPrefixData(outputBuffer, alignedPrefixData);
++
++ /* Print/schedule the buffer. */
++ gcdOUTPUTBUFFER(
++ outputBuffer, outputBuffer->indent,
++ alignedPrefixData, Buffer, address, Size, Type, 0
++ );
++ }
++#else
++ /* Print/schedule the buffer. */
++ if (Type == gceDUMP_BUFFER_FROM_USER)
++ {
++ gcdOUTPUTSTRING(
++ outputBuffer, outputBuffer->indent,
++ Buffer, 0, gcvNULL
++ );
++ }
++ else
++ {
++ gcdOUTPUTBUFFER(
++ outputBuffer, outputBuffer->indent,
++ gcvNULL, Buffer, address, Size, Type, 0
++ );
++ }
++#endif
++ }
++
++ /* Unlock when not coming from user,
++ or coming from user and not yet locked. */
++ if (userLocked)
++ {
++ if ((Size > 4)
++ && (buffer[0] == ']')
++ && (buffer[1] == ' ')
++ && (buffer[2] == '-')
++ && (buffer[3] == '-'))
++ {
++ /* End of a user dump. */
++ gcmkUNLOCKSECTION(lockHandle);
++ userLocked = gcvFALSE;
++ }
++ /* Else, let it pass through, don't unlock. */
++ }
++ else
++ {
++ gcmkUNLOCKSECTION(lockHandle);
++ }
++}
++
++/*******************************************************************************
++**
++** gckOS_DebugTrace
++**
++** Send a leveled message to the debugger.
++**
++** INPUT:
++**
++** gctUINT32 Level
++** Debug level of message.
++**
++** gctCONST_STRING Message
++** Pointer to message.
++**
++** ...
++** Optional arguments.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++
++void
++gckOS_DebugTrace(
++ IN gctUINT32 Level,
++ IN gctCONST_STRING Message,
++ ...
++ )
++{
++ if (Level > _debugLevel)
++ {
++ return;
++ }
++
++ gcmDEBUGPRINT(_GetArgumentSize(Message), gcvFALSE, Message);
++}
++
++/*******************************************************************************
++**
++** gckOS_DebugTraceN
++**
++** Send a leveled message to the debugger.
++**
++** INPUT:
++**
++** gctUINT32 Level
++** Debug level of message.
++**
++** gctUINT ArgumentSize
++** The size of the optional arguments in bytes.
++**
++** gctCONST_STRING Message
++** Pointer to message.
++**
++** ...
++** Optional arguments.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++
++void
++gckOS_DebugTraceN(
++ IN gctUINT32 Level,
++ IN gctUINT ArgumentSize,
++ IN gctCONST_STRING Message,
++ ...
++ )
++{
++ if (Level > _debugLevel)
++ {
++ return;
++ }
++
++ gcmDEBUGPRINT(ArgumentSize, gcvFALSE, Message);
++}
++
++/*******************************************************************************
++**
++** gckOS_DebugTraceZone
++**
++** Send a leveled and zoned message to the debugger.
++**
++** INPUT:
++**
++** gctUINT32 Level
++** Debug level for message.
++**
++** gctUINT32 Zone
++** Debug zone for message.
++**
++** gctCONST_STRING Message
++** Pointer to message.
++**
++** ...
++** Optional arguments.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++
++void
++gckOS_DebugTraceZone(
++ IN gctUINT32 Level,
++ IN gctUINT32 Zone,
++ IN gctCONST_STRING Message,
++ ...
++ )
++{
++ if ((Level > _debugLevel) || !(Zone & _debugZones))
++ {
++ return;
++ }
++
++ gcmDEBUGPRINT(_GetArgumentSize(Message), gcvFALSE, Message);
++}
++
++/*******************************************************************************
++**
++** gckOS_DebugTraceZoneN
++**
++** Send a leveled and zoned message to the debugger.
++**
++** INPUT:
++**
++** gctUINT32 Level
++** Debug level for message.
++**
++** gctUINT32 Zone
++** Debug zone for message.
++**
++** gctUINT ArgumentSize
++** The size of the optional arguments in bytes.
++**
++** gctCONST_STRING Message
++** Pointer to message.
++**
++** ...
++** Optional arguments.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++
++void
++gckOS_DebugTraceZoneN(
++ IN gctUINT32 Level,
++ IN gctUINT32 Zone,
++ IN gctUINT ArgumentSize,
++ IN gctCONST_STRING Message,
++ ...
++ )
++{
++ if ((Level > _debugLevel) || !(Zone & _debugZones))
++ {
++ return;
++ }
++
++ gcmDEBUGPRINT(ArgumentSize, gcvFALSE, Message);
++}
++
++/*******************************************************************************
++**
++** gckOS_DebugBreak
++**
++** Break into the debugger.
++**
++** INPUT:
++**
++** Nothing.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++void
++gckOS_DebugBreak(
++ void
++ )
++{
++ gckOS_DebugTrace(gcvLEVEL_ERROR, "%s(%d)", __FUNCTION__, __LINE__);
++}
++
++/*******************************************************************************
++**
++** gckOS_DebugFatal
++**
++** Send a message to the debugger and break into the debugger.
++**
++** INPUT:
++**
++** gctCONST_STRING Message
++** Pointer to message.
++**
++** ...
++** Optional arguments.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++void
++gckOS_DebugFatal(
++ IN gctCONST_STRING Message,
++ ...
++ )
++{
++ gcmkPRINT_VERSION();
++ gcmDEBUGPRINT(_GetArgumentSize(Message), gcvFALSE, Message);
++
++ /* Break into the debugger. */
++ gckOS_DebugBreak();
++}
++
++/*******************************************************************************
++**
++** gckOS_SetDebugLevel
++**
++** Set the debug level.
++**
++** INPUT:
++**
++** gctUINT32 Level
++** New debug level.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++
++void
++gckOS_SetDebugLevel(
++ IN gctUINT32 Level
++ )
++{
++ _debugLevel = Level;
++}
++
++/*******************************************************************************
++**
++** gckOS_SetDebugZone
++**
++** Set the debug zone.
++**
++** INPUT:
++**
++** gctUINT32 Zone
++** New debug zone.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++void
++gckOS_SetDebugZone(
++ IN gctUINT32 Zone
++ )
++{
++ _debugZones = Zone;
++}
++
++/*******************************************************************************
++**
++** gckOS_SetDebugLevelZone
++**
++** Set the debug level and zone.
++**
++** INPUT:
++**
++** gctUINT32 Level
++** New debug level.
++**
++** gctUINT32 Zone
++** New debug zone.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++
++void
++gckOS_SetDebugLevelZone(
++ IN gctUINT32 Level,
++ IN gctUINT32 Zone
++ )
++{
++ _debugLevel = Level;
++ _debugZones = Zone;
++}
++
++/*******************************************************************************
++**
++** gckOS_SetDebugZones
++**
++** Enable or disable debug zones.
++**
++** INPUT:
++**
++** gctUINT32 Zones
++** Debug zones to enable or disable.
++**
++** gctBOOL Enable
++** Set to gcvTRUE to enable the zones (or the Zones with the current
++** zones) or gcvFALSE to disable the specified Zones.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++
++void
++gckOS_SetDebugZones(
++ IN gctUINT32 Zones,
++ IN gctBOOL Enable
++ )
++{
++ if (Enable)
++ {
++ /* Enable the zones. */
++ _debugZones |= Zones;
++ }
++ else
++ {
++ /* Disable the zones. */
++ _debugZones &= ~Zones;
++ }
++}
++
++/*******************************************************************************
++**
++** gckOS_Verify
++**
++** Called to verify the result of a function call.
++**
++** INPUT:
++**
++** gceSTATUS Status
++** Function call result.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++
++void
++gckOS_Verify(
++ IN gceSTATUS status
++ )
++{
++ _lastError = status;
++}
++
++/*******************************************************************************
++**
++** gckOS_DebugFlush
++**
++** Force messages to be flushed out.
++**
++** INPUT:
++**
++** gctCONST_STRING CallerName
++** Name of the caller function.
++**
++** gctUINT LineNumber
++** Line number of the caller.
++**
++** gctUINT32 DmaAddress
++** The current DMA address or ~0U to ignore.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++
++void
++gckOS_DebugFlush(
++ gctCONST_STRING CallerName,
++ gctUINT LineNumber,
++ gctUINT32 DmaAddress
++ )
++{
++#if gcdBUFFERED_OUTPUT
++ _DirectPrint("\nFlush requested by %s(%d).\n\n", CallerName, LineNumber);
++ _Flush(DmaAddress);
++#endif
++}
++gctCONST_STRING
++gckOS_DebugStatus2Name(
++ gceSTATUS status
++ )
++{
++ switch (status)
++ {
++ case gcvSTATUS_OK:
++ return "gcvSTATUS_OK";
++ case gcvSTATUS_TRUE:
++ return "gcvSTATUS_TRUE";
++ case gcvSTATUS_NO_MORE_DATA:
++ return "gcvSTATUS_NO_MORE_DATA";
++ case gcvSTATUS_CACHED:
++ return "gcvSTATUS_CACHED";
++ case gcvSTATUS_MIPMAP_TOO_LARGE:
++ return "gcvSTATUS_MIPMAP_TOO_LARGE";
++ case gcvSTATUS_NAME_NOT_FOUND:
++ return "gcvSTATUS_NAME_NOT_FOUND";
++ case gcvSTATUS_NOT_OUR_INTERRUPT:
++ return "gcvSTATUS_NOT_OUR_INTERRUPT";
++ case gcvSTATUS_MISMATCH:
++ return "gcvSTATUS_MISMATCH";
++ case gcvSTATUS_MIPMAP_TOO_SMALL:
++ return "gcvSTATUS_MIPMAP_TOO_SMALL";
++ case gcvSTATUS_LARGER:
++ return "gcvSTATUS_LARGER";
++ case gcvSTATUS_SMALLER:
++ return "gcvSTATUS_SMALLER";
++ case gcvSTATUS_CHIP_NOT_READY:
++ return "gcvSTATUS_CHIP_NOT_READY";
++ case gcvSTATUS_NEED_CONVERSION:
++ return "gcvSTATUS_NEED_CONVERSION";
++ case gcvSTATUS_SKIP:
++ return "gcvSTATUS_SKIP";
++ case gcvSTATUS_DATA_TOO_LARGE:
++ return "gcvSTATUS_DATA_TOO_LARGE";
++ case gcvSTATUS_INVALID_CONFIG:
++ return "gcvSTATUS_INVALID_CONFIG";
++ case gcvSTATUS_CHANGED:
++ return "gcvSTATUS_CHANGED";
++ case gcvSTATUS_NOT_SUPPORT_DITHER:
++ return "gcvSTATUS_NOT_SUPPORT_DITHER";
++
++ case gcvSTATUS_INVALID_ARGUMENT:
++ return "gcvSTATUS_INVALID_ARGUMENT";
++ case gcvSTATUS_INVALID_OBJECT:
++ return "gcvSTATUS_INVALID_OBJECT";
++ case gcvSTATUS_OUT_OF_MEMORY:
++ return "gcvSTATUS_OUT_OF_MEMORY";
++ case gcvSTATUS_MEMORY_LOCKED:
++ return "gcvSTATUS_MEMORY_LOCKED";
++ case gcvSTATUS_MEMORY_UNLOCKED:
++ return "gcvSTATUS_MEMORY_UNLOCKED";
++ case gcvSTATUS_HEAP_CORRUPTED:
++ return "gcvSTATUS_HEAP_CORRUPTED";
++ case gcvSTATUS_GENERIC_IO:
++ return "gcvSTATUS_GENERIC_IO";
++ case gcvSTATUS_INVALID_ADDRESS:
++ return "gcvSTATUS_INVALID_ADDRESS";
++ case gcvSTATUS_CONTEXT_LOSSED:
++ return "gcvSTATUS_CONTEXT_LOSSED";
++ case gcvSTATUS_TOO_COMPLEX:
++ return "gcvSTATUS_TOO_COMPLEX";
++ case gcvSTATUS_BUFFER_TOO_SMALL:
++ return "gcvSTATUS_BUFFER_TOO_SMALL";
++ case gcvSTATUS_INTERFACE_ERROR:
++ return "gcvSTATUS_INTERFACE_ERROR";
++ case gcvSTATUS_NOT_SUPPORTED:
++ return "gcvSTATUS_NOT_SUPPORTED";
++ case gcvSTATUS_MORE_DATA:
++ return "gcvSTATUS_MORE_DATA";
++ case gcvSTATUS_TIMEOUT:
++ return "gcvSTATUS_TIMEOUT";
++ case gcvSTATUS_OUT_OF_RESOURCES:
++ return "gcvSTATUS_OUT_OF_RESOURCES";
++ case gcvSTATUS_INVALID_DATA:
++ return "gcvSTATUS_INVALID_DATA";
++ case gcvSTATUS_INVALID_MIPMAP:
++ return "gcvSTATUS_INVALID_MIPMAP";
++ case gcvSTATUS_NOT_FOUND:
++ return "gcvSTATUS_NOT_FOUND";
++ case gcvSTATUS_NOT_ALIGNED:
++ return "gcvSTATUS_NOT_ALIGNED";
++ case gcvSTATUS_INVALID_REQUEST:
++ return "gcvSTATUS_INVALID_REQUEST";
++ case gcvSTATUS_GPU_NOT_RESPONDING:
++ return "gcvSTATUS_GPU_NOT_RESPONDING";
++ case gcvSTATUS_TIMER_OVERFLOW:
++ return "gcvSTATUS_TIMER_OVERFLOW";
++ case gcvSTATUS_VERSION_MISMATCH:
++ return "gcvSTATUS_VERSION_MISMATCH";
++ case gcvSTATUS_LOCKED:
++ return "gcvSTATUS_LOCKED";
++
++ /* Linker errors. */
++ case gcvSTATUS_GLOBAL_TYPE_MISMATCH:
++ return "gcvSTATUS_GLOBAL_TYPE_MISMATCH";
++ case gcvSTATUS_TOO_MANY_ATTRIBUTES:
++ return "gcvSTATUS_TOO_MANY_ATTRIBUTES";
++ case gcvSTATUS_TOO_MANY_UNIFORMS:
++ return "gcvSTATUS_TOO_MANY_UNIFORMS";
++ case gcvSTATUS_TOO_MANY_VARYINGS:
++ return "gcvSTATUS_TOO_MANY_VARYINGS";
++ case gcvSTATUS_UNDECLARED_VARYING:
++ return "gcvSTATUS_UNDECLARED_VARYING";
++ case gcvSTATUS_VARYING_TYPE_MISMATCH:
++ return "gcvSTATUS_VARYING_TYPE_MISMATCH";
++ case gcvSTATUS_MISSING_MAIN:
++ return "gcvSTATUS_MISSING_MAIN";
++ case gcvSTATUS_NAME_MISMATCH:
++ return "gcvSTATUS_NAME_MISMATCH";
++ case gcvSTATUS_INVALID_INDEX:
++ return "gcvSTATUS_INVALID_INDEX";
++ default:
++ return "nil";
++ }
++}
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel_event.c linux-openelec/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel_event.c
+--- linux-3.14.36/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel_event.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel_event.c 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,2898 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal_kernel_precomp.h"
++#include "gc_hal_kernel_buffer.h"
++
++#ifdef __QNXNTO__
++#include <atomic.h>
++#include "gc_hal_kernel_qnx.h"
++#endif
++
++#define _GC_OBJ_ZONE gcvZONE_EVENT
++
++#define gcdEVENT_ALLOCATION_COUNT (4096 / gcmSIZEOF(gcsHAL_INTERFACE))
++#define gcdEVENT_MIN_THRESHOLD 4
++
++/******************************************************************************\
++********************************* Support Code *********************************
++\******************************************************************************/
++
++static gceSTATUS
++gckEVENT_AllocateQueue(
++ IN gckEVENT Event,
++ OUT gcsEVENT_QUEUE_PTR * Queue
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Event=0x%x", Event);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
++ gcmkVERIFY_ARGUMENT(Queue != gcvNULL);
++
++ /* Do we have free queues? */
++ if (Event->freeList == gcvNULL)
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++
++ /* Move one free queue from the free list. */
++ * Queue = Event->freeList;
++ Event->freeList = Event->freeList->next;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Queue=0x%x", gcmOPT_POINTER(Queue));
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++static gceSTATUS
++gckEVENT_FreeQueue(
++ IN gckEVENT Event,
++ OUT gcsEVENT_QUEUE_PTR Queue
++ )
++{
++ gceSTATUS status = gcvSTATUS_OK;
++
++ gcmkHEADER_ARG("Event=0x%x", Event);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
++ gcmkVERIFY_ARGUMENT(Queue != gcvNULL);
++
++ /* Move one free queue from the free list. */
++ Queue->next = Event->freeList;
++ Event->freeList = Queue;
++
++ /* Success. */
++ gcmkFOOTER();
++ return status;
++}
++
++static gceSTATUS
++gckEVENT_FreeRecord(
++ IN gckEVENT Event,
++ IN gcsEVENT_PTR Record
++ )
++{
++ gceSTATUS status;
++ gctBOOL acquired = gcvFALSE;
++
++ gcmkHEADER_ARG("Event=0x%x Record=0x%x", Event, Record);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
++ gcmkVERIFY_ARGUMENT(Record != gcvNULL);
++
++ /* Acquire the mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(Event->os,
++ Event->freeEventMutex,
++ gcvINFINITE));
++ acquired = gcvTRUE;
++
++ /* Push the record on the free list. */
++ Record->next = Event->freeEventList;
++ Event->freeEventList = Record;
++ Event->freeEventCount += 1;
++
++ /* Release the mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(Event->os, Event->freeEventMutex));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Roll back. */
++ if (acquired)
++ {
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Event->os, Event->freeEventMutex));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return gcvSTATUS_OK;
++}
++
++static gceSTATUS
++gckEVENT_IsEmpty(
++ IN gckEVENT Event,
++ OUT gctBOOL_PTR IsEmpty
++ )
++{
++ gceSTATUS status;
++ gctSIZE_T i;
++
++ gcmkHEADER_ARG("Event=0x%x", Event);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
++ gcmkVERIFY_ARGUMENT(IsEmpty != gcvNULL);
++
++ /* Assume the event queue is empty. */
++ *IsEmpty = gcvTRUE;
++
++ /* Try acquiring the mutex. */
++ status = gckOS_AcquireMutex(Event->os, Event->eventQueueMutex, 0);
++ if (status == gcvSTATUS_TIMEOUT)
++ {
++ /* Timeout - queue is no longer empty. */
++ *IsEmpty = gcvFALSE;
++ }
++ else
++ {
++ /* Bail out on error. */
++ gcmkONERROR(status);
++
++ /* Walk the event queue. */
++ for (i = 0; i < gcmCOUNTOF(Event->queues); ++i)
++ {
++ /* Check whether this event is in use. */
++ if (Event->queues[i].head != gcvNULL)
++ {
++ /* The event is in use, hence the queue is not empty. */
++ *IsEmpty = gcvFALSE;
++ break;
++ }
++ }
++
++ /* Release the mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Event->os, Event->eventQueueMutex));
++ }
++
++ /* Success. */
++ gcmkFOOTER_ARG("*IsEmpty=%d", gcmOPT_VALUE(IsEmpty));
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++static gceSTATUS
++_TryToIdleGPU(
++ IN gckEVENT Event
++)
++{
++ gceSTATUS status;
++ gctBOOL empty = gcvFALSE, idle = gcvFALSE;
++ gctBOOL powerLocked = gcvFALSE;
++ gckHARDWARE hardware;
++
++ gcmkHEADER_ARG("Event=0x%x", Event);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
++
++ /* Grab gckHARDWARE object. */
++ hardware = Event->kernel->hardware;
++ gcmkVERIFY_OBJECT(hardware, gcvOBJ_HARDWARE);
++
++ /* Check whether the event queue is empty. */
++ gcmkONERROR(gckEVENT_IsEmpty(Event, &empty));
++
++ if (empty)
++ {
++ status = gckOS_AcquireMutex(hardware->os, hardware->powerMutex, 0);
++ if (status == gcvSTATUS_TIMEOUT)
++ {
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++
++ powerLocked = gcvTRUE;
++
++ /* Query whether the hardware is idle. */
++ gcmkONERROR(gckHARDWARE_QueryIdle(Event->kernel->hardware, &idle));
++
++ gcmkONERROR(gckOS_ReleaseMutex(hardware->os, hardware->powerMutex));
++ powerLocked = gcvFALSE;
++
++ if (idle)
++ {
++ /* Inform the system of idle GPU. */
++ gcmkONERROR(gckOS_Broadcast(Event->os,
++ Event->kernel->hardware,
++ gcvBROADCAST_GPU_IDLE));
++ }
++ }
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (powerLocked)
++ {
++ gcmkONERROR(gckOS_ReleaseMutex(hardware->os, hardware->powerMutex));
++ powerLocked = gcvFALSE;
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++
++static gceSTATUS
++__RemoveRecordFromProcessDB(
++ IN gckEVENT Event,
++ IN gcsEVENT_PTR Record
++ )
++{
++ gcmkHEADER_ARG("Event=0x%x Record=0x%x", Event, Record);
++ gcmkVERIFY_ARGUMENT(Record != gcvNULL);
++
++ while (Record != gcvNULL)
++ {
++ if (Record->info.command == gcvHAL_SIGNAL)
++ {
++ /* TODO: Find a better place to bind signal to hardware.*/
++ gcmkVERIFY_OK(gckOS_SignalSetHardware(Event->os,
++ gcmUINT64_TO_PTR(Record->info.u.Signal.signal),
++ Event->kernel->hardware));
++ }
++
++ if (Record->fromKernel)
++ {
++ /* No need to check db if event is from kernel. */
++ Record = Record->next;
++ continue;
++ }
++
++ switch (Record->info.command)
++ {
++ case gcvHAL_FREE_NON_PAGED_MEMORY:
++ gcmkVERIFY_OK(gckKERNEL_RemoveProcessDB(
++ Event->kernel,
++ Record->processID,
++ gcvDB_NON_PAGED,
++ gcmUINT64_TO_PTR(Record->info.u.FreeNonPagedMemory.logical)));
++ break;
++
++ case gcvHAL_FREE_CONTIGUOUS_MEMORY:
++ gcmkVERIFY_OK(gckKERNEL_RemoveProcessDB(
++ Event->kernel,
++ Record->processID,
++ gcvDB_CONTIGUOUS,
++ gcmUINT64_TO_PTR(Record->info.u.FreeContiguousMemory.logical)));
++ break;
++
++ case gcvHAL_FREE_VIDEO_MEMORY:
++ gcmkVERIFY_OK(gckKERNEL_RemoveProcessDB(
++ Event->kernel,
++ Record->processID,
++ gcvDB_VIDEO_MEMORY,
++ gcmUINT64_TO_PTR(Record->info.u.FreeVideoMemory.node)));
++
++ {
++ gcuVIDMEM_NODE_PTR node = (gcuVIDMEM_NODE_PTR)(gcmUINT64_TO_PTR(Record->info.u.FreeVideoMemory.node));
++
++ if (node->VidMem.memory->object.type == gcvOBJ_VIDMEM)
++ {
++ gcmkVERIFY_OK(gckKERNEL_RemoveProcessDB(Event->kernel,
++ Record->processID,
++ gcvDB_VIDEO_MEMORY_RESERVED,
++ node));
++ }
++ else if(node->Virtual.contiguous)
++ {
++ gcmkVERIFY_OK(gckKERNEL_RemoveProcessDB(Event->kernel,
++ Record->processID,
++ gcvDB_VIDEO_MEMORY_CONTIGUOUS,
++ node));
++ }
++ else
++ {
++ gcmkVERIFY_OK(gckKERNEL_RemoveProcessDB(Event->kernel,
++ Record->processID,
++ gcvDB_VIDEO_MEMORY_VIRTUAL,
++ node));
++ }
++ }
++
++ break;
++
++ case gcvHAL_UNLOCK_VIDEO_MEMORY:
++ gcmkVERIFY_OK(gckKERNEL_RemoveProcessDB(
++ Event->kernel,
++ Record->processID,
++ gcvDB_VIDEO_MEMORY_LOCKED,
++ gcmUINT64_TO_PTR(Record->info.u.UnlockVideoMemory.node)));
++ break;
++
++ case gcvHAL_UNMAP_USER_MEMORY:
++ gcmkVERIFY_OK(gckKERNEL_RemoveProcessDB(
++ Event->kernel,
++ Record->processID,
++ gcvDB_MAP_USER_MEMORY,
++ gcmINT2PTR(Record->info.u.UnmapUserMemory.info)));
++ break;
++
++ case gcvHAL_FREE_VIRTUAL_COMMAND_BUFFER:
++ gcmkVERIFY_OK(gckKERNEL_RemoveProcessDB(
++ Event->kernel,
++ Record->processID,
++ gcvDB_COMMAND_BUFFER,
++ gcmUINT64_TO_PTR(Record->info.u.FreeVirtualCommandBuffer.logical)));
++ break;
++
++ default:
++ break;
++ }
++
++ Record = Record->next;
++ }
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++void
++_SubmitTimerFunction(
++ gctPOINTER Data
++ )
++{
++ gckEVENT event = (gckEVENT)Data;
++ gcmkVERIFY_OK(gckEVENT_Submit(event, gcvTRUE, gcvFALSE));
++}
++
++/******************************************************************************\
++******************************* gckEVENT API Code *******************************
++\******************************************************************************/
++
++/*******************************************************************************
++**
++** gckEVENT_Construct
++**
++** Construct a new gckEVENT object.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** OUTPUT:
++**
++** gckEVENT * Event
++** Pointer to a variable that receives the gckEVENT object pointer.
++*/
++gceSTATUS
++gckEVENT_Construct(
++ IN gckKERNEL Kernel,
++ OUT gckEVENT * Event
++ )
++{
++ gckOS os;
++ gceSTATUS status;
++ gckEVENT eventObj = gcvNULL;
++ int i;
++ gcsEVENT_PTR record;
++ gctPOINTER pointer = gcvNULL;
++
++ gcmkHEADER_ARG("Kernel=0x%x", Kernel);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(Event != gcvNULL);
++
++ /* Extract the pointer to the gckOS object. */
++ os = Kernel->os;
++ gcmkVERIFY_OBJECT(os, gcvOBJ_OS);
++
++ /* Allocate the gckEVENT object. */
++ gcmkONERROR(gckOS_Allocate(os, gcmSIZEOF(struct _gckEVENT), &pointer));
++
++ eventObj = pointer;
++
++ /* Reset the object. */
++ gcmkVERIFY_OK(gckOS_ZeroMemory(eventObj, gcmSIZEOF(struct _gckEVENT)));
++
++ /* Initialize the gckEVENT object. */
++ eventObj->object.type = gcvOBJ_EVENT;
++ eventObj->kernel = Kernel;
++ eventObj->os = os;
++
++ /* Create the mutexes. */
++ gcmkONERROR(gckOS_CreateMutex(os, &eventObj->eventQueueMutex));
++ gcmkONERROR(gckOS_CreateMutex(os, &eventObj->freeEventMutex));
++ gcmkONERROR(gckOS_CreateMutex(os, &eventObj->eventListMutex));
++
++ /* Create a bunch of event reccords. */
++ for (i = 0; i < gcdEVENT_ALLOCATION_COUNT; i += 1)
++ {
++ /* Allocate an event record. */
++ gcmkONERROR(gckOS_Allocate(os, gcmSIZEOF(gcsEVENT), &pointer));
++
++ record = pointer;
++
++ /* Push it on the free list. */
++ record->next = eventObj->freeEventList;
++ eventObj->freeEventList = record;
++ eventObj->freeEventCount += 1;
++ }
++
++ /* Initialize the free list of event queues. */
++ for (i = 0; i < gcdREPO_LIST_COUNT; i += 1)
++ {
++ eventObj->repoList[i].next = eventObj->freeList;
++ eventObj->freeList = &eventObj->repoList[i];
++ }
++
++ /* Construct the atom. */
++ gcmkONERROR(gckOS_AtomConstruct(os, &eventObj->freeAtom));
++ gcmkONERROR(gckOS_AtomSet(os,
++ eventObj->freeAtom,
++ gcmCOUNTOF(eventObj->queues)));
++
++#if gcdSMP
++ gcmkONERROR(gckOS_AtomConstruct(os, &eventObj->pending));
++#endif
++
++ gcmkVERIFY_OK(gckOS_CreateTimer(os,
++ _SubmitTimerFunction,
++ (gctPOINTER)eventObj,
++ &eventObj->submitTimer));
++
++ /* Return pointer to the gckEVENT object. */
++ *Event = eventObj;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Event=0x%x", *Event);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Roll back. */
++ if (eventObj != gcvNULL)
++ {
++ if (eventObj->eventQueueMutex != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_DeleteMutex(os, eventObj->eventQueueMutex));
++ }
++
++ if (eventObj->freeEventMutex != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_DeleteMutex(os, eventObj->freeEventMutex));
++ }
++
++ if (eventObj->eventListMutex != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_DeleteMutex(os, eventObj->eventListMutex));
++ }
++
++ while (eventObj->freeEventList != gcvNULL)
++ {
++ record = eventObj->freeEventList;
++ eventObj->freeEventList = record->next;
++
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(os, record));
++ }
++
++ if (eventObj->freeAtom != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_AtomDestroy(os, eventObj->freeAtom));
++ }
++
++#if gcdSMP
++ if (eventObj->pending != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_AtomDestroy(os, eventObj->pending));
++ }
++#endif
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(os, eventObj));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckEVENT_Destroy
++**
++** Destroy an gckEVENT object.
++**
++** INPUT:
++**
++** gckEVENT Event
++** Pointer to an gckEVENT object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckEVENT_Destroy(
++ IN gckEVENT Event
++ )
++{
++ gcsEVENT_PTR record;
++ gcsEVENT_QUEUE_PTR queue;
++
++ gcmkHEADER_ARG("Event=0x%x", Event);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
++
++ if (Event->submitTimer != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_StopTimer(Event->os, Event->submitTimer));
++ gcmkVERIFY_OK(gckOS_DestroyTimer(Event->os, Event->submitTimer));
++ }
++
++ /* Delete the queue mutex. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Event->os, Event->eventQueueMutex));
++
++ /* Free all free events. */
++ while (Event->freeEventList != gcvNULL)
++ {
++ record = Event->freeEventList;
++ Event->freeEventList = record->next;
++
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Event->os, record));
++ }
++
++ /* Delete the free mutex. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Event->os, Event->freeEventMutex));
++
++ /* Free all pending queues. */
++ while (Event->queueHead != gcvNULL)
++ {
++ /* Get the current queue. */
++ queue = Event->queueHead;
++
++ /* Free all pending events. */
++ while (queue->head != gcvNULL)
++ {
++ record = queue->head;
++ queue->head = record->next;
++
++ gcmkTRACE_ZONE_N(
++ gcvLEVEL_WARNING, gcvZONE_EVENT,
++ gcmSIZEOF(record) + gcmSIZEOF(queue->source),
++ "Event record 0x%x is still pending for %d.",
++ record, queue->source
++ );
++
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Event->os, record));
++ }
++
++ /* Remove the top queue from the list. */
++ if (Event->queueHead == Event->queueTail)
++ {
++ Event->queueHead =
++ Event->queueTail = gcvNULL;
++ }
++ else
++ {
++ Event->queueHead = Event->queueHead->next;
++ }
++
++ /* Free the queue. */
++ gcmkVERIFY_OK(gckEVENT_FreeQueue(Event, queue));
++ }
++
++ /* Delete the list mutex. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Event->os, Event->eventListMutex));
++
++ /* Delete the atom. */
++ gcmkVERIFY_OK(gckOS_AtomDestroy(Event->os, Event->freeAtom));
++
++#if gcdSMP
++ gcmkVERIFY_OK(gckOS_AtomDestroy(Event->os, Event->pending));
++#endif
++
++ /* Mark the gckEVENT object as unknown. */
++ Event->object.type = gcvOBJ_UNKNOWN;
++
++ /* Free the gckEVENT object. */
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Event->os, Event));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckEVENT_GetEvent
++**
++** Reserve the next available hardware event.
++**
++** INPUT:
++**
++** gckEVENT Event
++** Pointer to an gckEVENT object.
++**
++** gctBOOL Wait
++** Set to gcvTRUE to force the function to wait if no events are
++** immediately available.
++**
++** gceKERNEL_WHERE Source
++** Source of the event.
++**
++** OUTPUT:
++**
++** gctUINT8 * EventID
++** Reserved event ID.
++*/
++static gceSTATUS
++gckEVENT_GetEvent(
++ IN gckEVENT Event,
++ IN gctBOOL Wait,
++ OUT gctUINT8 * EventID,
++ IN gcsEVENT_PTR Head,
++ IN gceKERNEL_WHERE Source
++ )
++{
++ gctINT i, id;
++ gceSTATUS status;
++ gctBOOL acquired = gcvFALSE;
++ gctINT32 free;
++
++#if gcdGPU_TIMEOUT
++ gctUINT32 timer = 0;
++#endif
++
++ gcmkHEADER_ARG("Event=0x%x Head=%p Source=%d", Event, Head, Source);
++
++ while (gcvTRUE)
++ {
++ /* Grab the queue mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(Event->os,
++ Event->eventQueueMutex,
++ gcvINFINITE));
++ acquired = gcvTRUE;
++
++ /* Walk through all events. */
++ id = Event->lastID;
++ for (i = 0; i < gcmCOUNTOF(Event->queues); ++i)
++ {
++ gctINT nextID = gckMATH_ModuloInt((id + 1),
++ gcmCOUNTOF(Event->queues));
++
++ if (Event->queues[id].head == gcvNULL)
++ {
++ *EventID = (gctUINT8) id;
++
++ Event->lastID = (gctUINT8) nextID;
++
++ /* Save time stamp of event. */
++ Event->queues[id].stamp = ++(Event->stamp);
++ Event->queues[id].head = Head;
++ Event->queues[id].source = Source;
++
++ gcmkONERROR(gckOS_AtomDecrement(Event->os,
++ Event->freeAtom,
++ &free));
++#if gcdDYNAMIC_SPEED
++ if (free <= gcdDYNAMIC_EVENT_THRESHOLD)
++ {
++ gcmkONERROR(gckOS_BroadcastHurry(
++ Event->os,
++ Event->kernel->hardware,
++ gcdDYNAMIC_EVENT_THRESHOLD - free));
++ }
++#endif
++
++ /* Release the queue mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(Event->os,
++ Event->eventQueueMutex));
++
++ /* Success. */
++ gcmkTRACE_ZONE_N(
++ gcvLEVEL_INFO, gcvZONE_EVENT,
++ gcmSIZEOF(id),
++ "Using id=%d",
++ id
++ );
++
++ gcmkFOOTER_ARG("*EventID=%u", *EventID);
++ return gcvSTATUS_OK;
++ }
++
++ id = nextID;
++ }
++
++#if gcdDYNAMIC_SPEED
++ /* No free events, speed up the GPU right now! */
++ gcmkONERROR(gckOS_BroadcastHurry(Event->os,
++ Event->kernel->hardware,
++ gcdDYNAMIC_EVENT_THRESHOLD));
++#endif
++
++ /* Release the queue mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(Event->os, Event->eventQueueMutex));
++ acquired = gcvFALSE;
++
++ /* Fail if wait is not requested. */
++ if (!Wait)
++ {
++ /* Out of resources. */
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++
++ /* Delay a while. */
++ gcmkONERROR(gckOS_Delay(Event->os, 1));
++
++#if gcdGPU_TIMEOUT
++ /* Increment the wait timer. */
++ timer += 1;
++
++ if (timer == Event->kernel->timeOut)
++ {
++ /* Try to call any outstanding events. */
++ gcmkONERROR(gckHARDWARE_Interrupt(Event->kernel->hardware,
++ gcvTRUE));
++ }
++ else if (timer > Event->kernel->timeOut)
++ {
++ gcmkTRACE_N(
++ gcvLEVEL_ERROR,
++ gcmSIZEOF(gctCONST_STRING) + gcmSIZEOF(gctINT),
++ "%s(%d): no available events\n",
++ __FUNCTION__, __LINE__
++ );
++
++ /* Bail out. */
++ gcmkONERROR(gcvSTATUS_GPU_NOT_RESPONDING);
++ }
++#endif
++ }
++
++OnError:
++ if (acquired)
++ {
++ /* Release the queue mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Event->os, Event->eventQueueMutex));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckEVENT_AllocateRecord
++**
++** Allocate a record for the new event.
++**
++** INPUT:
++**
++** gckEVENT Event
++** Pointer to an gckEVENT object.
++**
++** gctBOOL AllocateAllowed
++** State for allocation if out of free events.
++**
++** OUTPUT:
++**
++** gcsEVENT_PTR * Record
++** Allocated event record.
++*/
++gceSTATUS
++gckEVENT_AllocateRecord(
++ IN gckEVENT Event,
++ IN gctBOOL AllocateAllowed,
++ OUT gcsEVENT_PTR * Record
++ )
++{
++ gceSTATUS status;
++ gctBOOL acquired = gcvFALSE;
++ gctINT i;
++ gcsEVENT_PTR record;
++ gctPOINTER pointer = gcvNULL;
++
++ gcmkHEADER_ARG("Event=0x%x AllocateAllowed=%d", Event, AllocateAllowed);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
++ gcmkVERIFY_ARGUMENT(Record != gcvNULL);
++
++ /* Acquire the mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(Event->os, Event->freeEventMutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++ /* Test if we are below the allocation threshold. */
++ if ( (AllocateAllowed && (Event->freeEventCount < gcdEVENT_MIN_THRESHOLD)) ||
++ (Event->freeEventCount == 0) )
++ {
++ /* Allocate a bunch of records. */
++ for (i = 0; i < gcdEVENT_ALLOCATION_COUNT; i += 1)
++ {
++ /* Allocate an event record. */
++ gcmkONERROR(gckOS_Allocate(Event->os,
++ gcmSIZEOF(gcsEVENT),
++ &pointer));
++
++ record = pointer;
++
++ /* Push it on the free list. */
++ record->next = Event->freeEventList;
++ Event->freeEventList = record;
++ Event->freeEventCount += 1;
++ }
++ }
++
++ *Record = Event->freeEventList;
++ Event->freeEventList = Event->freeEventList->next;
++ Event->freeEventCount -= 1;
++
++ /* Release the mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(Event->os, Event->freeEventMutex));
++ acquired = gcvFALSE;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Record=0x%x", gcmOPT_POINTER(Record));
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Roll back. */
++ if (acquired)
++ {
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Event->os, Event->freeEventMutex));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckEVENT_AddList
++**
++** Add a new event to the list of events.
++**
++** INPUT:
++**
++** gckEVENT Event
++** Pointer to an gckEVENT object.
++**
++** gcsHAL_INTERFACE_PTR Interface
++** Pointer to the interface for the event to be added.
++**
++** gceKERNEL_WHERE FromWhere
++** Place in the pipe where the event needs to be generated.
++**
++** gctBOOL AllocateAllowed
++** State for allocation if out of free events.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckEVENT_AddList(
++ IN gckEVENT Event,
++ IN gcsHAL_INTERFACE_PTR Interface,
++ IN gceKERNEL_WHERE FromWhere,
++ IN gctBOOL AllocateAllowed,
++ IN gctBOOL FromKernel
++ )
++{
++ gceSTATUS status;
++ gctBOOL acquired = gcvFALSE;
++ gcsEVENT_PTR record = gcvNULL;
++ gcsEVENT_QUEUE_PTR queue;
++ gckKERNEL kernel = Event->kernel;
++
++ gcmkHEADER_ARG("Event=0x%x Interface=0x%x",
++ Event, Interface);
++
++ gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, _GC_OBJ_ZONE,
++ "FromWhere=%d AllocateAllowed=%d",
++ FromWhere, AllocateAllowed);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
++ gcmkVERIFY_ARGUMENT(Interface != gcvNULL);
++
++ /* Verify the event command. */
++ gcmkASSERT
++ ( (Interface->command == gcvHAL_FREE_NON_PAGED_MEMORY)
++ || (Interface->command == gcvHAL_FREE_CONTIGUOUS_MEMORY)
++ || (Interface->command == gcvHAL_FREE_VIDEO_MEMORY)
++ || (Interface->command == gcvHAL_WRITE_DATA)
++ || (Interface->command == gcvHAL_UNLOCK_VIDEO_MEMORY)
++ || (Interface->command == gcvHAL_SIGNAL)
++ || (Interface->command == gcvHAL_UNMAP_USER_MEMORY)
++ || (Interface->command == gcvHAL_TIMESTAMP)
++ || (Interface->command == gcvHAL_COMMIT_DONE)
++ || (Interface->command == gcvHAL_FREE_VIRTUAL_COMMAND_BUFFER)
++ || (Interface->command == gcvHAL_SYNC_POINT)
++ );
++
++ /* Validate the source. */
++ if ((FromWhere != gcvKERNEL_COMMAND) && (FromWhere != gcvKERNEL_PIXEL))
++ {
++ /* Invalid argument. */
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ /* Allocate a free record. */
++ gcmkONERROR(gckEVENT_AllocateRecord(Event, AllocateAllowed, &record));
++
++ /* Termninate the record. */
++ record->next = gcvNULL;
++
++ /* Record the committer. */
++ record->fromKernel = FromKernel;
++
++ /* Copy the event interface into the record. */
++ gckOS_MemCopy(&record->info, Interface, gcmSIZEOF(record->info));
++
++ /* Get process ID. */
++ gcmkONERROR(gckOS_GetProcessID(&record->processID));
++
++#ifdef __QNXNTO__
++ record->kernel = Event->kernel;
++#endif
++
++ gcmkONERROR(__RemoveRecordFromProcessDB(Event, record));
++
++ /* Acquire the mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(Event->os, Event->eventListMutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++ /* Do we need to allocate a new queue? */
++ if ((Event->queueTail == gcvNULL) || (Event->queueTail->source < FromWhere))
++ {
++ /* Allocate a new queue. */
++ gcmkONERROR(gckEVENT_AllocateQueue(Event, &queue));
++
++ /* Initialize the queue. */
++ queue->source = FromWhere;
++ queue->head = gcvNULL;
++ queue->next = gcvNULL;
++
++ /* Attach it to the list of allocated queues. */
++ if (Event->queueTail == gcvNULL)
++ {
++ Event->queueHead =
++ Event->queueTail = queue;
++ }
++ else
++ {
++ Event->queueTail->next = queue;
++ Event->queueTail = queue;
++ }
++ }
++ else
++ {
++ queue = Event->queueTail;
++ }
++
++ /* Attach the record to the queue. */
++ if (queue->head == gcvNULL)
++ {
++ queue->head = record;
++ queue->tail = record;
++ }
++ else
++ {
++ queue->tail->next = record;
++ queue->tail = record;
++ }
++
++ /* Unmap user space logical address.
++ * Linux kernel does not support unmap the memory of other process any more since 3.5.
++ * Let's unmap memory of self process before submit the event to gpu.
++ * */
++ switch(Interface->command)
++ {
++ case gcvHAL_FREE_NON_PAGED_MEMORY:
++ gcmkONERROR(gckOS_UnmapUserLogical(
++ Event->os,
++ gcmNAME_TO_PTR(Interface->u.FreeNonPagedMemory.physical),
++ (gctSIZE_T) Interface->u.FreeNonPagedMemory.bytes,
++ gcmUINT64_TO_PTR(Interface->u.FreeNonPagedMemory.logical)));
++ break;
++ case gcvHAL_FREE_CONTIGUOUS_MEMORY:
++ gcmkONERROR(gckOS_UnmapUserLogical(
++ Event->os,
++ gcmNAME_TO_PTR(Interface->u.FreeContiguousMemory.physical),
++ (gctSIZE_T) Interface->u.FreeContiguousMemory.bytes,
++ gcmUINT64_TO_PTR(Interface->u.FreeContiguousMemory.logical)));
++ break;
++ default:
++ break;
++ }
++
++
++ /* Release the mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(Event->os, Event->eventListMutex));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Roll back. */
++ if (acquired)
++ {
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Event->os, Event->eventListMutex));
++ }
++
++ if (record != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckEVENT_FreeRecord(Event, record));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckEVENT_Unlock
++**
++** Schedule an event to unlock virtual memory.
++**
++** INPUT:
++**
++** gckEVENT Event
++** Pointer to an gckEVENT object.
++**
++** gceKERNEL_WHERE FromWhere
++** Place in the pipe where the event needs to be generated.
++**
++** gcuVIDMEM_NODE_PTR Node
++** Pointer to a gcuVIDMEM_NODE union that specifies the virtual memory
++** to unlock.
++**
++** gceSURF_TYPE Type
++** Type of surface to unlock.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckEVENT_Unlock(
++ IN gckEVENT Event,
++ IN gceKERNEL_WHERE FromWhere,
++ IN gcuVIDMEM_NODE_PTR Node,
++ IN gceSURF_TYPE Type
++ )
++{
++ gceSTATUS status;
++ gcsHAL_INTERFACE iface;
++
++ gcmkHEADER_ARG("Event=0x%x FromWhere=%d Node=0x%x Type=%d",
++ Event, FromWhere, Node, Type);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
++ gcmkVERIFY_ARGUMENT(Node != gcvNULL);
++
++ /* Mark the event as an unlock. */
++ iface.command = gcvHAL_UNLOCK_VIDEO_MEMORY;
++ iface.u.UnlockVideoMemory.node = gcmPTR_TO_UINT64(Node);
++ iface.u.UnlockVideoMemory.type = Type;
++ iface.u.UnlockVideoMemory.asynchroneous = 0;
++
++ /* Append it to the queue. */
++ gcmkONERROR(gckEVENT_AddList(Event, &iface, FromWhere, gcvFALSE, gcvTRUE));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckEVENT_FreeVideoMemory
++**
++** Schedule an event to free video memory.
++**
++** INPUT:
++**
++** gckEVENT Event
++** Pointer to an gckEVENT object.
++**
++** gcuVIDMEM_NODE_PTR VideoMemory
++** Pointer to a gcuVIDMEM_NODE object to free.
++**
++** gceKERNEL_WHERE FromWhere
++** Place in the pipe where the event needs to be generated.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckEVENT_FreeVideoMemory(
++ IN gckEVENT Event,
++ IN gcuVIDMEM_NODE_PTR VideoMemory,
++ IN gceKERNEL_WHERE FromWhere
++ )
++{
++ gceSTATUS status;
++ gcsHAL_INTERFACE iface;
++
++ gcmkHEADER_ARG("Event=0x%x VideoMemory=0x%x FromWhere=%d",
++ Event, VideoMemory, FromWhere);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
++ gcmkVERIFY_ARGUMENT(VideoMemory != gcvNULL);
++
++ /* Create an event. */
++ iface.command = gcvHAL_FREE_VIDEO_MEMORY;
++ iface.u.FreeVideoMemory.node = gcmPTR_TO_UINT64(VideoMemory);
++
++ /* Append it to the queue. */
++ gcmkONERROR(gckEVENT_AddList(Event, &iface, FromWhere, gcvFALSE, gcvTRUE));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckEVENT_FreeNonPagedMemory
++**
++** Schedule an event to free non-paged memory.
++**
++** INPUT:
++**
++** gckEVENT Event
++** Pointer to an gckEVENT object.
++**
++** gctSIZE_T Bytes
++** Number of bytes of non-paged memory to free.
++**
++** gctPHYS_ADDR Physical
++** Physical address of non-paged memory to free.
++**
++** gctPOINTER Logical
++** Logical address of non-paged memory to free.
++**
++** gceKERNEL_WHERE FromWhere
++** Place in the pipe where the event needs to be generated.
++*/
++gceSTATUS
++gckEVENT_FreeNonPagedMemory(
++ IN gckEVENT Event,
++ IN gctSIZE_T Bytes,
++ IN gctPHYS_ADDR Physical,
++ IN gctPOINTER Logical,
++ IN gceKERNEL_WHERE FromWhere
++ )
++{
++ gceSTATUS status;
++ gcsHAL_INTERFACE iface;
++ gckKERNEL kernel = Event->kernel;
++
++ gcmkHEADER_ARG("Event=0x%x Bytes=%lu Physical=0x%x Logical=0x%x "
++ "FromWhere=%d",
++ Event, Bytes, Physical, Logical, FromWhere);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
++ gcmkVERIFY_ARGUMENT(Physical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++
++ /* Create an event. */
++ iface.command = gcvHAL_FREE_NON_PAGED_MEMORY;
++ iface.u.FreeNonPagedMemory.bytes = Bytes;
++ iface.u.FreeNonPagedMemory.physical = gcmPTR_TO_NAME(Physical);
++ iface.u.FreeNonPagedMemory.logical = gcmPTR_TO_UINT64(Logical);
++
++ /* Append it to the queue. */
++ gcmkONERROR(gckEVENT_AddList(Event, &iface, FromWhere, gcvFALSE, gcvTRUE));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckEVENT_DestroyVirtualCommandBuffer(
++ IN gckEVENT Event,
++ IN gctSIZE_T Bytes,
++ IN gctPHYS_ADDR Physical,
++ IN gctPOINTER Logical,
++ IN gceKERNEL_WHERE FromWhere
++ )
++{
++ gceSTATUS status;
++ gcsHAL_INTERFACE iface;
++ gckKERNEL kernel = Event->kernel;
++
++ gcmkHEADER_ARG("Event=0x%x Bytes=%lu Physical=0x%x Logical=0x%x "
++ "FromWhere=%d",
++ Event, Bytes, Physical, Logical, FromWhere);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
++ gcmkVERIFY_ARGUMENT(Physical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++
++ /* Create an event. */
++ iface.command = gcvHAL_FREE_VIRTUAL_COMMAND_BUFFER;
++ iface.u.FreeVirtualCommandBuffer.bytes = Bytes;
++ iface.u.FreeVirtualCommandBuffer.physical = gcmPTR_TO_NAME(Physical);
++ iface.u.FreeVirtualCommandBuffer.logical = gcmPTR_TO_UINT64(Logical);
++
++ /* Append it to the queue. */
++ gcmkONERROR(gckEVENT_AddList(Event, &iface, FromWhere, gcvFALSE, gcvTRUE));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckEVENT_FreeContigiuousMemory
++**
++** Schedule an event to free contiguous memory.
++**
++** INPUT:
++**
++** gckEVENT Event
++** Pointer to an gckEVENT object.
++**
++** gctSIZE_T Bytes
++** Number of bytes of contiguous memory to free.
++**
++** gctPHYS_ADDR Physical
++** Physical address of contiguous memory to free.
++**
++** gctPOINTER Logical
++** Logical address of contiguous memory to free.
++**
++** gceKERNEL_WHERE FromWhere
++** Place in the pipe where the event needs to be generated.
++*/
++gceSTATUS
++gckEVENT_FreeContiguousMemory(
++ IN gckEVENT Event,
++ IN gctSIZE_T Bytes,
++ IN gctPHYS_ADDR Physical,
++ IN gctPOINTER Logical,
++ IN gceKERNEL_WHERE FromWhere
++ )
++{
++ gceSTATUS status;
++ gcsHAL_INTERFACE iface;
++ gckKERNEL kernel = Event->kernel;
++
++ gcmkHEADER_ARG("Event=0x%x Bytes=%lu Physical=0x%x Logical=0x%x "
++ "FromWhere=%d",
++ Event, Bytes, Physical, Logical, FromWhere);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
++ gcmkVERIFY_ARGUMENT(Physical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++
++ /* Create an event. */
++ iface.command = gcvHAL_FREE_CONTIGUOUS_MEMORY;
++ iface.u.FreeContiguousMemory.bytes = Bytes;
++ iface.u.FreeContiguousMemory.physical = gcmPTR_TO_NAME(Physical);
++ iface.u.FreeContiguousMemory.logical = gcmPTR_TO_UINT64(Logical);
++
++ /* Append it to the queue. */
++ gcmkONERROR(gckEVENT_AddList(Event, &iface, FromWhere, gcvFALSE, gcvTRUE));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckEVENT_Signal
++**
++** Schedule an event to trigger a signal.
++**
++** INPUT:
++**
++** gckEVENT Event
++** Pointer to an gckEVENT object.
++**
++** gctSIGNAL Signal
++** Pointer to the signal to trigger.
++**
++** gceKERNEL_WHERE FromWhere
++** Place in the pipe where the event needs to be generated.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckEVENT_Signal(
++ IN gckEVENT Event,
++ IN gctSIGNAL Signal,
++ IN gceKERNEL_WHERE FromWhere
++ )
++{
++ gceSTATUS status;
++ gcsHAL_INTERFACE iface;
++
++ gcmkHEADER_ARG("Event=0x%x Signal=0x%x FromWhere=%d",
++ Event, Signal, FromWhere);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
++ gcmkVERIFY_ARGUMENT(Signal != gcvNULL);
++
++ /* Mark the event as a signal. */
++ iface.command = gcvHAL_SIGNAL;
++ iface.u.Signal.signal = gcmPTR_TO_UINT64(Signal);
++#ifdef __QNXNTO__
++ iface.u.Signal.coid = 0;
++ iface.u.Signal.rcvid = 0;
++#endif
++ iface.u.Signal.auxSignal = 0;
++ iface.u.Signal.process = 0;
++
++ /* Append it to the queue. */
++ gcmkONERROR(gckEVENT_AddList(Event, &iface, FromWhere, gcvFALSE, gcvTRUE));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckEVENT_CommitDone
++**
++** Schedule an event to wake up work thread when commit is done by GPU.
++**
++** INPUT:
++**
++** gckEVENT Event
++** Pointer to an gckEVENT object.
++**
++** gceKERNEL_WHERE FromWhere
++** Place in the pipe where the event needs to be generated.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckEVENT_CommitDone(
++ IN gckEVENT Event,
++ IN gceKERNEL_WHERE FromWhere
++ )
++{
++ gceSTATUS status;
++ gcsHAL_INTERFACE iface;
++
++ gcmkHEADER_ARG("Event=0x%x FromWhere=%d", Event, FromWhere);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
++
++ iface.command = gcvHAL_COMMIT_DONE;
++
++ /* Append it to the queue. */
++ gcmkONERROR(gckEVENT_AddList(Event, &iface, FromWhere, gcvFALSE, gcvTRUE));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++/*******************************************************************************
++**
++** gckEVENT_Submit
++**
++** Submit the current event queue to the GPU.
++**
++** INPUT:
++**
++** gckEVENT Event
++** Pointer to an gckEVENT object.
++**
++** gctBOOL Wait
++** Submit requires one vacant event; if Wait is set to not zero,
++** and there are no vacant events at this time, the function will
++** wait until an event becomes vacant so that submission of the
++** queue is successful.
++**
++** gctBOOL FromPower
++** Determines whether the call originates from inside the power
++** management or not.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckEVENT_Submit(
++ IN gckEVENT Event,
++ IN gctBOOL Wait,
++ IN gctBOOL FromPower
++ )
++{
++ gceSTATUS status;
++ gctUINT8 id = 0xFF;
++ gcsEVENT_QUEUE_PTR queue;
++ gctBOOL acquired = gcvFALSE;
++ gckCOMMAND command = gcvNULL;
++ gctBOOL commitEntered = gcvFALSE;
++#if !gcdNULL_DRIVER
++ gctSIZE_T bytes;
++ gctPOINTER buffer;
++#endif
++
++ gcmkHEADER_ARG("Event=0x%x Wait=%d", Event, Wait);
++
++ /* Get gckCOMMAND object. */
++ command = Event->kernel->command;
++
++ /* Are there event queues? */
++ if (Event->queueHead != gcvNULL)
++ {
++ /* Acquire the command queue. */
++ gcmkONERROR(gckCOMMAND_EnterCommit(command, FromPower));
++ commitEntered = gcvTRUE;
++
++ /* Process all queues. */
++ while (Event->queueHead != gcvNULL)
++ {
++ /* Acquire the list mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(Event->os,
++ Event->eventListMutex,
++ gcvINFINITE));
++ acquired = gcvTRUE;
++
++ /* Get the current queue. */
++ queue = Event->queueHead;
++
++ /* Allocate an event ID. */
++ gcmkONERROR(gckEVENT_GetEvent(Event, Wait, &id, queue->head, queue->source));
++
++ /* Copy event list to event ID queue. */
++ Event->queues[id].head = queue->head;
++
++ /* Remove the top queue from the list. */
++ if (Event->queueHead == Event->queueTail)
++ {
++ Event->queueHead = gcvNULL;
++ Event->queueTail = gcvNULL;
++ }
++ else
++ {
++ Event->queueHead = Event->queueHead->next;
++ }
++
++ /* Free the queue. */
++ gcmkONERROR(gckEVENT_FreeQueue(Event, queue));
++
++ /* Release the list mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(Event->os, Event->eventListMutex));
++ acquired = gcvFALSE;
++
++#if gcdNULL_DRIVER
++ /* Notify immediately on infinite hardware. */
++ gcmkONERROR(gckEVENT_Interrupt(Event, 1 << id));
++
++ gcmkONERROR(gckEVENT_Notify(Event, 0));
++#else
++ /* Get the size of the hardware event. */
++ gcmkONERROR(gckHARDWARE_Event(Event->kernel->hardware,
++ gcvNULL,
++ id,
++ Event->queues[id].source,
++ &bytes));
++
++ /* Reserve space in the command queue. */
++ gcmkONERROR(gckCOMMAND_Reserve(command,
++ bytes,
++ &buffer,
++ &bytes));
++
++ /* Set the hardware event in the command queue. */
++ gcmkONERROR(gckHARDWARE_Event(Event->kernel->hardware,
++ buffer,
++ id,
++ Event->queues[id].source,
++ &bytes));
++
++ /* Execute the hardware event. */
++ gcmkONERROR(gckCOMMAND_Execute(command, bytes));
++#endif
++ }
++
++ /* Release the command queue. */
++ gcmkONERROR(gckCOMMAND_ExitCommit(command, FromPower));
++ commitEntered = gcvFALSE;
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (commitEntered)
++ {
++ /* Release the command queue mutex. */
++ gcmkVERIFY_OK(gckCOMMAND_ExitCommit(command, FromPower));
++ }
++
++ if (acquired)
++ {
++ /* Need to unroll the mutex acquire. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Event->os, Event->eventListMutex));
++ }
++
++ if (id != 0xFF)
++ {
++ /* Need to unroll the event allocation. */
++ Event->queues[id].head = gcvNULL;
++ }
++
++ if (status == gcvSTATUS_GPU_NOT_RESPONDING)
++ {
++ /* Broadcast GPU stuck. */
++ status = gckOS_Broadcast(Event->os,
++ Event->kernel->hardware,
++ gcvBROADCAST_GPU_STUCK);
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckEVENT_Commit
++**
++** Commit an event queue from the user.
++**
++** INPUT:
++**
++** gckEVENT Event
++** Pointer to an gckEVENT object.
++**
++** gcsQUEUE_PTR Queue
++** User event queue.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckEVENT_Commit(
++ IN gckEVENT Event,
++ IN gcsQUEUE_PTR Queue
++ )
++{
++ gceSTATUS status;
++ gcsQUEUE_PTR record = gcvNULL, next;
++ gctUINT32 processID;
++ gctBOOL needCopy = gcvFALSE;
++
++ gcmkHEADER_ARG("Event=0x%x Queue=0x%x", Event, Queue);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
++
++ /* Get the current process ID. */
++ gcmkONERROR(gckOS_GetProcessID(&processID));
++
++ /* Query if we need to copy the client data. */
++ gcmkONERROR(gckOS_QueryNeedCopy(Event->os, processID, &needCopy));
++
++ /* Loop while there are records in the queue. */
++ while (Queue != gcvNULL)
++ {
++ gcsQUEUE queue;
++
++ if (needCopy)
++ {
++ /* Point to stack record. */
++ record = &queue;
++
++ /* Copy the data from the client. */
++ gcmkONERROR(gckOS_CopyFromUserData(Event->os,
++ record,
++ Queue,
++ gcmSIZEOF(gcsQUEUE)));
++ }
++ else
++ {
++ gctPOINTER pointer = gcvNULL;
++
++ /* Map record into kernel memory. */
++ gcmkONERROR(gckOS_MapUserPointer(Event->os,
++ Queue,
++ gcmSIZEOF(gcsQUEUE),
++ &pointer));
++
++ record = pointer;
++ }
++
++ /* Append event record to event queue. */
++ gcmkONERROR(
++ gckEVENT_AddList(Event, &record->iface, gcvKERNEL_PIXEL, gcvTRUE, gcvFALSE));
++
++ /* Next record in the queue. */
++ next = gcmUINT64_TO_PTR(record->next);
++
++ if (!needCopy)
++ {
++ /* Unmap record from kernel memory. */
++ gcmkONERROR(
++ gckOS_UnmapUserPointer(Event->os,
++ Queue,
++ gcmSIZEOF(gcsQUEUE),
++ (gctPOINTER *) record));
++ record = gcvNULL;
++ }
++
++ Queue = next;
++ }
++
++ /* Submit the event list. */
++ gcmkONERROR(gckEVENT_Submit(Event, gcvTRUE, gcvFALSE));
++
++ /* Success */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ if ((record != gcvNULL) && !needCopy)
++ {
++ /* Roll back. */
++ gcmkVERIFY_OK(gckOS_UnmapUserPointer(Event->os,
++ Queue,
++ gcmSIZEOF(gcsQUEUE),
++ (gctPOINTER *) record));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckEVENT_Compose
++**
++** Schedule a composition event and start a composition.
++**
++** INPUT:
++**
++** gckEVENT Event
++** Pointer to an gckEVENT object.
++**
++** gcsHAL_COMPOSE_PTR Info
++** Pointer to the composition structure.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckEVENT_Compose(
++ IN gckEVENT Event,
++ IN gcsHAL_COMPOSE_PTR Info
++ )
++{
++ gceSTATUS status;
++ gcsEVENT_PTR headRecord;
++ gcsEVENT_PTR tailRecord;
++ gcsEVENT_PTR tempRecord;
++ gctUINT8 id = 0xFF;
++ gctUINT32 processID;
++
++ gcmkHEADER_ARG("Event=0x%x Info=0x%x", Event, Info);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
++ gcmkVERIFY_ARGUMENT(Info != gcvNULL);
++
++ /* Get process ID. */
++ gcmkONERROR(gckOS_GetProcessID(&processID));
++
++ /* Allocate a record. */
++ gcmkONERROR(gckEVENT_AllocateRecord(Event, gcvTRUE, &tempRecord));
++ headRecord = tailRecord = tempRecord;
++
++ /* Initialize the record. */
++ tempRecord->info.command = gcvHAL_SIGNAL;
++ tempRecord->info.u.Signal.process = Info->process;
++#ifdef __QNXNTO__
++ tempRecord->info.u.Signal.coid = Info->coid;
++ tempRecord->info.u.Signal.rcvid = Info->rcvid;
++#endif
++ tempRecord->info.u.Signal.signal = Info->signal;
++ tempRecord->info.u.Signal.auxSignal = 0;
++ tempRecord->next = gcvNULL;
++ tempRecord->processID = processID;
++
++ /* Allocate another record for user signal #1. */
++ if (gcmUINT64_TO_PTR(Info->userSignal1) != gcvNULL)
++ {
++ /* Allocate a record. */
++ gcmkONERROR(gckEVENT_AllocateRecord(Event, gcvTRUE, &tempRecord));
++ tailRecord->next = tempRecord;
++ tailRecord = tempRecord;
++
++ /* Initialize the record. */
++ tempRecord->info.command = gcvHAL_SIGNAL;
++ tempRecord->info.u.Signal.process = Info->userProcess;
++#ifdef __QNXNTO__
++ tempRecord->info.u.Signal.coid = Info->coid;
++ tempRecord->info.u.Signal.rcvid = Info->rcvid;
++#endif
++ tempRecord->info.u.Signal.signal = Info->userSignal1;
++ tempRecord->info.u.Signal.auxSignal = 0;
++ tempRecord->next = gcvNULL;
++ tempRecord->processID = processID;
++ }
++
++ /* Allocate another record for user signal #2. */
++ if (gcmUINT64_TO_PTR(Info->userSignal2) != gcvNULL)
++ {
++ /* Allocate a record. */
++ gcmkONERROR(gckEVENT_AllocateRecord(Event, gcvTRUE, &tempRecord));
++ tailRecord->next = tempRecord;
++ tailRecord = tempRecord;
++
++ /* Initialize the record. */
++ tempRecord->info.command = gcvHAL_SIGNAL;
++ tempRecord->info.u.Signal.process = Info->userProcess;
++#ifdef __QNXNTO__
++ tempRecord->info.u.Signal.coid = Info->coid;
++ tempRecord->info.u.Signal.rcvid = Info->rcvid;
++#endif
++ tempRecord->info.u.Signal.signal = Info->userSignal2;
++ tempRecord->info.u.Signal.auxSignal = 0;
++ tempRecord->next = gcvNULL;
++ tempRecord->processID = processID;
++ }
++
++ /* Allocate an event ID. */
++ gcmkONERROR(gckEVENT_GetEvent(Event, gcvTRUE, &id, headRecord, gcvKERNEL_PIXEL));
++
++ /* Start composition. */
++ gcmkONERROR(gckHARDWARE_Compose(
++ Event->kernel->hardware, processID,
++ gcmUINT64_TO_PTR(Info->physical), gcmUINT64_TO_PTR(Info->logical), Info->offset, Info->size, id
++ ));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckEVENT_Interrupt
++**
++** Called by the interrupt service routine to store the triggered interrupt
++** mask to be later processed by gckEVENT_Notify.
++**
++** INPUT:
++**
++** gckEVENT Event
++** Pointer to an gckEVENT object.
++**
++** gctUINT32 Data
++** Mask for the 32 interrupts.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckEVENT_Interrupt(
++ IN gckEVENT Event,
++ IN gctUINT32 Data
++ )
++{
++ unsigned long flags;
++ gcmkHEADER_ARG("Event=0x%x Data=0x%x", Event, Data);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
++
++ /* Combine current interrupt status with pending flags. */
++ spin_lock_irqsave(&Event->kernel->irq_lock, flags);
++#if gcdSMP
++ gckOS_AtomSetMask(Event->pending, Data);
++#elif defined(__QNXNTO__)
++ atomic_set(&Event->pending, Data);
++#else
++ Event->pending |= Data;
++#endif
++ spin_unlock_irqrestore(&Event->kernel->irq_lock, flags);
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckEVENT_Notify
++**
++** Process all triggered interrupts.
++**
++** INPUT:
++**
++** gckEVENT Event
++** Pointer to an gckEVENT object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckEVENT_Notify(
++ IN gckEVENT Event,
++ IN gctUINT32 IDs
++ )
++{
++ gceSTATUS status = gcvSTATUS_OK;
++ gctINT i;
++ gcsEVENT_QUEUE * queue;
++ gctUINT mask = 0;
++ gctBOOL acquired = gcvFALSE;
++ gcuVIDMEM_NODE_PTR node;
++ gctPOINTER info;
++ gctSIGNAL signal;
++ gctUINT pending;
++ gckKERNEL kernel = Event->kernel;
++#if !gcdSMP
++ gctBOOL suspended = gcvFALSE;
++#endif
++#if gcmIS_DEBUG(gcdDEBUG_TRACE)
++ gctINT eventNumber = 0;
++#endif
++ gctINT32 free;
++#if gcdSECURE_USER
++ gcskSECURE_CACHE_PTR cache;
++#endif
++ unsigned long flags;
++
++ gcmkHEADER_ARG("Event=0x%x IDs=0x%x", Event, IDs);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
++
++ gcmDEBUG_ONLY(
++ if (IDs != 0)
++ {
++ for (i = 0; i < gcmCOUNTOF(Event->queues); ++i)
++ {
++ if (Event->queues[i].head != gcvNULL)
++ {
++ gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_EVENT,
++ "Queue(%d): stamp=%llu source=%d",
++ i,
++ Event->queues[i].stamp,
++ Event->queues[i].source);
++ }
++ }
++ }
++ );
++
++ for (;;)
++ {
++ gcsEVENT_PTR record;
++
++ spin_lock_irqsave(&Event->kernel->irq_lock, flags);
++#if gcdSMP
++ /* Get current interrupts. */
++ gckOS_AtomGet(Event->os, Event->pending, (gctINT32_PTR)&pending);
++#else
++ /* Get current interrupts. */
++ pending = Event->pending;
++#endif
++ spin_unlock_irqrestore(&Event->kernel->irq_lock, flags);
++
++ if (pending & 0x80000000)
++ {
++ //gckOS_Print("!!!!!!!!!!!!! AXI BUS ERROR !!!!!!!!!!!!!\n");
++ gcmkTRACE_ZONE(gcvLEVEL_ERROR, gcvZONE_EVENT, "AXI BUS ERROR");
++ pending &= 0x7FFFFFFF;
++ }
++
++ if (pending & 0x40000000)
++ {
++ gckHARDWARE_DumpMMUException(Event->kernel->hardware);
++
++ pending &= 0x3FFFFFFF;
++ }
++
++ gcmkTRACE_ZONE_N(
++ gcvLEVEL_INFO, gcvZONE_EVENT,
++ gcmSIZEOF(pending),
++ "Pending interrupts 0x%x",
++ pending
++ );
++
++ if (pending == 0)
++ {
++ /* No more pending interrupts - done. */
++ break;
++ }
++
++ queue = gcvNULL;
++
++ /* Grab the mutex queue. */
++ gcmkONERROR(gckOS_AcquireMutex(Event->os,
++ Event->eventQueueMutex,
++ gcvINFINITE));
++ acquired = gcvTRUE;
++
++ gcmDEBUG_ONLY(
++ if (IDs == 0)
++ {
++ for (i = 0; i < gcmCOUNTOF(Event->queues); ++i)
++ {
++ if (Event->queues[i].head != gcvNULL)
++ {
++ gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_EVENT,
++ "Queue(%d): stamp=%llu source=%d",
++ i,
++ Event->queues[i].stamp,
++ Event->queues[i].source);
++ }
++ }
++ }
++ );
++
++ /* Find the oldest pending interrupt. */
++ for (i = 0; i < gcmCOUNTOF(Event->queues); ++i)
++ {
++ if ((Event->queues[i].head != gcvNULL)
++ && (pending & (1 << i))
++ )
++ {
++ if ((queue == gcvNULL)
++ || (Event->queues[i].stamp < queue->stamp)
++ )
++ {
++ queue = &Event->queues[i];
++ mask = 1 << i;
++#if gcmIS_DEBUG(gcdDEBUG_TRACE)
++ eventNumber = i;
++#endif
++ }
++ }
++ }
++
++ if (queue == gcvNULL)
++ {
++ gcmkTRACE_ZONE_N(
++ gcvLEVEL_ERROR, gcvZONE_EVENT,
++ gcmSIZEOF(pending),
++ "Interrupts 0x%x are not pending.",
++ pending
++ );
++
++ /* Release the mutex queue. */
++ gcmkONERROR(gckOS_ReleaseMutex(Event->os, Event->eventQueueMutex));
++ acquired = gcvFALSE;
++
++ spin_lock_irqsave(&Event->kernel->irq_lock, flags);
++#if gcdSMP
++ /* Mark pending interrupts as handled. */
++ gckOS_AtomClearMask(Event->pending, pending);
++#elif defined(__QNXNTO__)
++ /* Mark pending interrupts as handled. */
++ atomic_clr((gctUINT32_PTR)&Event->pending, pending);
++#else
++ /* Mark pending interrupts as handled. */
++ Event->pending &= ~pending;
++#endif
++ spin_unlock_irqrestore(&Event->kernel->irq_lock, flags);
++ break;
++ }
++
++ /* Check whether there is a missed interrupt. */
++ for (i = 0; i < gcmCOUNTOF(Event->queues); ++i)
++ {
++ if ((Event->queues[i].head != gcvNULL)
++ && (Event->queues[i].stamp < queue->stamp)
++ && (Event->queues[i].source <= queue->source)
++ )
++ {
++ gcmkTRACE_N(
++ gcvLEVEL_ERROR,
++ gcmSIZEOF(i) + gcmSIZEOF(Event->queues[i].stamp),
++ "Event %d lost (stamp %llu)",
++ i, Event->queues[i].stamp
++ );
++
++ /* Use this event instead. */
++ queue = &Event->queues[i];
++ mask = 0;
++ }
++ }
++
++ if (mask != 0)
++ {
++#if gcmIS_DEBUG(gcdDEBUG_TRACE)
++ gcmkTRACE_ZONE_N(
++ gcvLEVEL_INFO, gcvZONE_EVENT,
++ gcmSIZEOF(eventNumber),
++ "Processing interrupt %d",
++ eventNumber
++ );
++#endif
++ }
++
++ spin_lock_irqsave(&Event->kernel->irq_lock, flags);
++#if gcdSMP
++ /* Mark pending interrupt as handled. */
++ gckOS_AtomClearMask(Event->pending, mask);
++#elif defined(__QNXNTO__)
++ /* Mark pending interrupt as handled. */
++ atomic_clr(&Event->pending, mask);
++#else
++ /* Mark pending interrupt as handled. */
++ Event->pending &= ~mask;
++#endif
++ spin_unlock_irqrestore(&Event->kernel->irq_lock, flags);
++
++ /* We are in the notify loop. */
++ Event->inNotify = gcvTRUE;
++
++ /* We are in the notify loop. */
++ Event->inNotify = gcvTRUE;
++
++ /* Grab the event head. */
++ record = queue->head;
++
++ /* Now quickly clear its event list. */
++ queue->head = gcvNULL;
++
++ /* Release the mutex queue. */
++ gcmkONERROR(gckOS_ReleaseMutex(Event->os, Event->eventQueueMutex));
++ acquired = gcvFALSE;
++
++ /* Increase the number of free events. */
++ gcmkONERROR(gckOS_AtomIncrement(Event->os, Event->freeAtom, &free));
++
++ /* Walk all events for this interrupt. */
++ while (record != gcvNULL)
++ {
++ gcsEVENT_PTR recordNext;
++#ifndef __QNXNTO__
++ gctPOINTER logical;
++#endif
++#if gcdSECURE_USER
++ gctSIZE_T bytes;
++#endif
++
++ /* Grab next record. */
++ recordNext = record->next;
++
++#ifdef __QNXNTO__
++ /* Assign record->processID as the pid for this galcore thread.
++ * Used in OS calls like gckOS_UnlockMemory() which do not take a pid.
++ */
++ drv_thread_specific_key_assign(record->processID, 0, Event->kernel->core);
++#endif
++
++#if gcdSECURE_USER
++ /* Get the cache that belongs to this process. */
++ gcmkONERROR(gckKERNEL_GetProcessDBCache(Event->kernel,
++ record->processID,
++ &cache));
++#endif
++
++ gcmkTRACE_ZONE_N(
++ gcvLEVEL_INFO, gcvZONE_EVENT,
++ gcmSIZEOF(record->info.command),
++ "Processing event type: %d",
++ record->info.command
++ );
++
++ switch (record->info.command)
++ {
++ case gcvHAL_FREE_NON_PAGED_MEMORY:
++ gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_EVENT,
++ "gcvHAL_FREE_NON_PAGED_MEMORY: 0x%x",
++ gcmNAME_TO_PTR(record->info.u.FreeNonPagedMemory.physical));
++
++ /* Free non-paged memory. */
++ status = gckOS_FreeNonPagedMemory(
++ Event->os,
++ (gctSIZE_T) record->info.u.FreeNonPagedMemory.bytes,
++ gcmNAME_TO_PTR(record->info.u.FreeNonPagedMemory.physical),
++ gcmUINT64_TO_PTR(record->info.u.FreeNonPagedMemory.logical));
++
++ if (gcmIS_SUCCESS(status))
++ {
++#if gcdSECURE_USER
++ gcmkVERIFY_OK(gckKERNEL_FlushTranslationCache(
++ Event->kernel,
++ cache,
++ gcmUINT64_TO_PTR(record->record.u.FreeNonPagedMemory.logical),
++ (gctSIZE_T) record->record.u.FreeNonPagedMemory.bytes));
++#endif
++ }
++ gcmRELEASE_NAME(record->info.u.FreeNonPagedMemory.physical);
++ break;
++
++ case gcvHAL_FREE_CONTIGUOUS_MEMORY:
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_EVENT,
++ "gcvHAL_FREE_CONTIGUOUS_MEMORY: 0x%x",
++ gcmNAME_TO_PTR(record->info.u.FreeContiguousMemory.physical));
++
++ /* Unmap the user memory. */
++ status = gckOS_FreeContiguous(
++ Event->os,
++ gcmNAME_TO_PTR(record->info.u.FreeContiguousMemory.physical),
++ gcmUINT64_TO_PTR(record->info.u.FreeContiguousMemory.logical),
++ (gctSIZE_T) record->info.u.FreeContiguousMemory.bytes);
++
++ if (gcmIS_SUCCESS(status))
++ {
++#if gcdSECURE_USER
++ gcmkVERIFY_OK(gckKERNEL_FlushTranslationCache(
++ Event->kernel,
++ cache,
++ gcmUINT64_TO_PTR(record->record.u.FreeContiguousMemory.logical),
++ (gctSIZE_T) record->record.u.FreeContiguousMemory.bytes));
++#endif
++ }
++ gcmRELEASE_NAME(record->info.u.FreeContiguousMemory.physical);
++ break;
++
++ case gcvHAL_FREE_VIDEO_MEMORY:
++ node = gcmUINT64_TO_PTR(record->info.u.FreeVideoMemory.node);
++ gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_EVENT,
++ "gcvHAL_FREE_VIDEO_MEMORY: 0x%x",
++ node);
++#ifdef __QNXNTO__
++#if gcdUSE_VIDMEM_PER_PID
++ /* Check if the VidMem object still exists. */
++ if (gckKERNEL_GetVideoMemoryPoolPid(record->kernel,
++ gcvPOOL_SYSTEM,
++ record->processID,
++ gcvNULL) == gcvSTATUS_NOT_FOUND)
++ {
++ /*printf("Vidmem not found for process:%d\n", queue->processID);*/
++ status = gcvSTATUS_OK;
++ break;
++ }
++#else
++ if ((node->VidMem.memory->object.type == gcvOBJ_VIDMEM)
++ && (node->VidMem.logical != gcvNULL)
++ )
++ {
++ gcmkERR_BREAK(
++ gckKERNEL_UnmapVideoMemory(record->kernel,
++ node->VidMem.logical,
++ record->processID,
++ node->VidMem.bytes));
++ node->VidMem.logical = gcvNULL;
++ }
++#endif
++#endif
++
++ /* Free video memory. */
++ status =
++ gckVIDMEM_Free(node);
++
++ break;
++
++ case gcvHAL_WRITE_DATA:
++#ifndef __QNXNTO__
++ /* Convert physical into logical address. */
++ gcmkERR_BREAK(
++ gckOS_MapPhysical(Event->os,
++ record->info.u.WriteData.address,
++ gcmSIZEOF(gctUINT32),
++ &logical));
++
++ /* Write data. */
++ gcmkERR_BREAK(
++ gckOS_WriteMemory(Event->os,
++ logical,
++ record->info.u.WriteData.data));
++
++ /* Unmap the physical memory. */
++ gcmkERR_BREAK(
++ gckOS_UnmapPhysical(Event->os,
++ logical,
++ gcmSIZEOF(gctUINT32)));
++#else
++ /* Write data. */
++ gcmkERR_BREAK(
++ gckOS_WriteMemory(Event->os,
++ (gctPOINTER)
++ record->info.u.WriteData.address,
++ record->info.u.WriteData.data));
++#endif
++ break;
++
++ case gcvHAL_UNLOCK_VIDEO_MEMORY:
++ node = gcmUINT64_TO_PTR(record->info.u.UnlockVideoMemory.node);
++
++ gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_EVENT,
++ "gcvHAL_UNLOCK_VIDEO_MEMORY: 0x%x",
++ node);
++
++ /* Save node information before it disappears. */
++#if gcdSECURE_USER
++ if (node->VidMem.memory->object.type == gcvOBJ_VIDMEM)
++ {
++ logical = gcvNULL;
++ bytes = 0;
++ }
++ else
++ {
++ logical = node->Virtual.logical;
++ bytes = node->Virtual.bytes;
++ }
++#endif
++
++ /* Unlock. */
++ status = gckVIDMEM_Unlock(
++ Event->kernel,
++ node,
++ record->info.u.UnlockVideoMemory.type,
++ gcvNULL);
++
++#if gcdSECURE_USER
++ if (gcmIS_SUCCESS(status) && (logical != gcvNULL))
++ {
++ gcmkVERIFY_OK(gckKERNEL_FlushTranslationCache(
++ Event->kernel,
++ cache,
++ logical,
++ bytes));
++ }
++#endif
++ break;
++
++ case gcvHAL_SIGNAL:
++ signal = gcmUINT64_TO_PTR(record->info.u.Signal.signal);
++ gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_EVENT,
++ "gcvHAL_SIGNAL: 0x%x",
++ signal);
++
++#ifdef __QNXNTO__
++ if ((record->info.u.Signal.coid == 0)
++ && (record->info.u.Signal.rcvid == 0)
++ )
++ {
++ /* Kernel signal. */
++ gcmkERR_BREAK(
++ gckOS_Signal(Event->os,
++ signal,
++ gcvTRUE));
++ }
++ else
++ {
++ /* User signal. */
++ gcmkERR_BREAK(
++ gckOS_UserSignal(Event->os,
++ signal,
++ record->info.u.Signal.rcvid,
++ record->info.u.Signal.coid));
++ }
++#else
++ /* Set signal. */
++ if (gcmUINT64_TO_PTR(record->info.u.Signal.process) == gcvNULL)
++ {
++ /* Kernel signal. */
++ gcmkERR_BREAK(
++ gckOS_Signal(Event->os,
++ signal,
++ gcvTRUE));
++ }
++ else
++ {
++ /* User signal. */
++ gcmkERR_BREAK(
++ gckOS_UserSignal(Event->os,
++ signal,
++ gcmUINT64_TO_PTR(record->info.u.Signal.process)));
++ }
++
++ gcmkASSERT(record->info.u.Signal.auxSignal == 0);
++#endif
++ break;
++
++ case gcvHAL_UNMAP_USER_MEMORY:
++ info = gcmNAME_TO_PTR(record->info.u.UnmapUserMemory.info);
++ gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_EVENT,
++ "gcvHAL_UNMAP_USER_MEMORY: 0x%x",
++ info);
++
++ /* Unmap the user memory. */
++ status = gckOS_UnmapUserMemory(
++ Event->os,
++ Event->kernel->core,
++ gcmUINT64_TO_PTR(record->info.u.UnmapUserMemory.memory),
++ (gctSIZE_T) record->info.u.UnmapUserMemory.size,
++ info,
++ record->info.u.UnmapUserMemory.address);
++
++#if gcdSECURE_USER
++ if (gcmIS_SUCCESS(status))
++ {
++ gcmkVERIFY_OK(gckKERNEL_FlushTranslationCache(
++ Event->kernel,
++ cache,
++ gcmUINT64_TO_PTR(record->info.u.UnmapUserMemory.memory),
++ (gctSIZE_T) record->info.u.UnmapUserMemory.size));
++ }
++#endif
++ gcmRELEASE_NAME(record->info.u.UnmapUserMemory.info);
++ break;
++
++ case gcvHAL_TIMESTAMP:
++ gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_EVENT,
++ "gcvHAL_TIMESTAMP: %d %d",
++ record->info.u.TimeStamp.timer,
++ record->info.u.TimeStamp.request);
++
++ /* Process the timestamp. */
++ switch (record->info.u.TimeStamp.request)
++ {
++ case 0:
++ status = gckOS_GetTime(&Event->kernel->timers[
++ record->info.u.TimeStamp.timer].
++ stopTime);
++ break;
++
++ case 1:
++ status = gckOS_GetTime(&Event->kernel->timers[
++ record->info.u.TimeStamp.timer].
++ startTime);
++ break;
++
++ default:
++ gcmkTRACE_ZONE_N(
++ gcvLEVEL_ERROR, gcvZONE_EVENT,
++ gcmSIZEOF(record->info.u.TimeStamp.request),
++ "Invalid timestamp request: %d",
++ record->info.u.TimeStamp.request
++ );
++
++ status = gcvSTATUS_INVALID_ARGUMENT;
++ break;
++ }
++ break;
++
++#if gcdVIRTUAL_COMMAND_BUFFER
++ case gcvHAL_FREE_VIRTUAL_COMMAND_BUFFER:
++ gcmkVERIFY_OK(
++ gckKERNEL_DestroyVirtualCommandBuffer(Event->kernel,
++ (gctSIZE_T) record->info.u.FreeVirtualCommandBuffer.bytes,
++ gcmNAME_TO_PTR(record->info.u.FreeVirtualCommandBuffer.physical),
++ gcmUINT64_TO_PTR(record->info.u.FreeVirtualCommandBuffer.logical)
++ ));
++ gcmRELEASE_NAME(record->info.u.FreeVirtualCommandBuffer.physical);
++ break;
++#endif
++
++#if gcdANDROID_NATIVE_FENCE_SYNC
++ case gcvHAL_SYNC_POINT:
++ {
++ gctSYNC_POINT syncPoint;
++
++ syncPoint = gcmUINT64_TO_PTR(record->info.u.SyncPoint.syncPoint);
++ status = gckOS_SignalSyncPoint(Event->os, syncPoint);
++ }
++ break;
++#endif
++
++ case gcvHAL_COMMIT_DONE:
++ break;
++
++ default:
++ /* Invalid argument. */
++ gcmkTRACE_ZONE_N(
++ gcvLEVEL_ERROR, gcvZONE_EVENT,
++ gcmSIZEOF(record->info.command),
++ "Unknown event type: %d",
++ record->info.command
++ );
++
++ status = gcvSTATUS_INVALID_ARGUMENT;
++ break;
++ }
++
++ /* Make sure there are no errors generated. */
++ if (gcmIS_ERROR(status))
++ {
++ gcmkTRACE_ZONE_N(
++ gcvLEVEL_WARNING, gcvZONE_EVENT,
++ gcmSIZEOF(status),
++ "Event produced status: %d(%s)",
++ status, gckOS_DebugStatus2Name(status));
++ }
++
++ /* Free the event. */
++ gcmkVERIFY_OK(gckEVENT_FreeRecord(Event, record));
++
++ /* Advance to next record. */
++ record = recordNext;
++ }
++
++ gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_EVENT,
++ "Handled interrupt 0x%x", mask);
++ }
++
++ if (IDs == 0)
++ {
++ gcmkONERROR(_TryToIdleGPU(Event));
++ }
++
++ /* We are out the notify loop. */
++ Event->inNotify = gcvFALSE;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ /* Release mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Event->os, Event->eventQueueMutex));
++ }
++
++#if !gcdSMP
++ if (suspended)
++ {
++ /* Resume interrupts. */
++ gcmkVERIFY_OK(gckOS_ResumeInterruptEx(Event->os, Event->kernel->core));
++ }
++#endif
++
++ /* We are out the notify loop. */
++ Event->inNotify = gcvFALSE;
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++** gckEVENT_FreeProcess
++**
++** Free all events owned by a particular process ID.
++**
++** INPUT:
++**
++** gckEVENT Event
++** Pointer to an gckEVENT object.
++**
++** gctUINT32 ProcessID
++** Process ID of the process to be freed up.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckEVENT_FreeProcess(
++ IN gckEVENT Event,
++ IN gctUINT32 ProcessID
++ )
++{
++ gctSIZE_T i;
++ gctBOOL acquired = gcvFALSE;
++ gcsEVENT_PTR record, next;
++ gceSTATUS status;
++ gcsEVENT_PTR deleteHead, deleteTail;
++
++ gcmkHEADER_ARG("Event=0x%x ProcessID=%d", Event, ProcessID);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
++
++ /* Walk through all queues. */
++ for (i = 0; i < gcmCOUNTOF(Event->queues); ++i)
++ {
++ if (Event->queues[i].head != gcvNULL)
++ {
++ /* Grab the event queue mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(Event->os,
++ Event->eventQueueMutex,
++ gcvINFINITE));
++ acquired = gcvTRUE;
++
++ /* Grab the mutex head. */
++ record = Event->queues[i].head;
++ Event->queues[i].head = gcvNULL;
++ Event->queues[i].tail = gcvNULL;
++ deleteHead = gcvNULL;
++ deleteTail = gcvNULL;
++
++ while (record != gcvNULL)
++ {
++ next = record->next;
++ if (record->processID == ProcessID)
++ {
++ if (deleteHead == gcvNULL)
++ {
++ deleteHead = record;
++ }
++ else
++ {
++ deleteTail->next = record;
++ }
++
++ deleteTail = record;
++ }
++ else
++ {
++ if (Event->queues[i].head == gcvNULL)
++ {
++ Event->queues[i].head = record;
++ }
++ else
++ {
++ Event->queues[i].tail->next = record;
++ }
++
++ Event->queues[i].tail = record;
++ }
++
++ record->next = gcvNULL;
++ record = next;
++ }
++
++ /* Release the mutex queue. */
++ gcmkONERROR(gckOS_ReleaseMutex(Event->os, Event->eventQueueMutex));
++ acquired = gcvFALSE;
++
++ /* Loop through the entire list of events. */
++ for (record = deleteHead; record != gcvNULL; record = next)
++ {
++ /* Get the next event record. */
++ next = record->next;
++
++ /* Free the event record. */
++ gcmkONERROR(gckEVENT_FreeRecord(Event, record));
++ }
++ }
++ }
++
++ gcmkONERROR(_TryToIdleGPU(Event));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Release the event queue mutex. */
++ if (acquired)
++ {
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Event->os, Event->eventQueueMutex));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++** gckEVENT_Stop
++**
++** Stop the hardware using the End event mechanism.
++**
++** INPUT:
++**
++** gckEVENT Event
++** Pointer to an gckEVENT object.
++**
++** gctUINT32 ProcessID
++** Process ID Logical belongs.
++**
++** gctPHYS_ADDR Handle
++** Physical address handle. If gcvNULL it is video memory.
++**
++** gctPOINTER Logical
++** Logical address to flush.
++**
++** gctSIGNAL Signal
++** Pointer to the signal to trigger.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckEVENT_Stop(
++ IN gckEVENT Event,
++ IN gctUINT32 ProcessID,
++ IN gctPHYS_ADDR Handle,
++ IN gctPOINTER Logical,
++ IN gctSIGNAL Signal,
++ IN OUT gctSIZE_T * waitSize
++ )
++{
++ gceSTATUS status;
++ /* gctSIZE_T waitSize;*/
++ gcsEVENT_PTR record;
++ gctUINT8 id = 0xFF;
++
++ gcmkHEADER_ARG("Event=0x%x ProcessID=%u Handle=0x%x Logical=0x%x "
++ "Signal=0x%x",
++ Event, ProcessID, Handle, Logical, Signal);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
++
++ /* Submit the current event queue. */
++ gcmkONERROR(gckEVENT_Submit(Event, gcvTRUE, gcvFALSE));
++
++ /* Allocate a record. */
++ gcmkONERROR(gckEVENT_AllocateRecord(Event, gcvTRUE, &record));
++
++ /* Initialize the record. */
++ record->next = gcvNULL;
++ record->processID = ProcessID;
++ record->info.command = gcvHAL_SIGNAL;
++ record->info.u.Signal.signal = gcmPTR_TO_UINT64(Signal);
++#ifdef __QNXNTO__
++ record->info.u.Signal.coid = 0;
++ record->info.u.Signal.rcvid = 0;
++#endif
++ record->info.u.Signal.auxSignal = 0;
++ record->info.u.Signal.process = 0;
++
++
++ gcmkONERROR(gckEVENT_GetEvent(Event, gcvTRUE, &id, record, gcvKERNEL_PIXEL));
++
++ /* Replace last WAIT with END. */
++ gcmkONERROR(gckHARDWARE_End(
++ Event->kernel->hardware, Logical, waitSize
++ ));
++
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ /* Flush the cache for the END. */
++ gcmkONERROR(gckOS_CacheClean(
++ Event->os,
++ ProcessID,
++ gcvNULL,
++ Handle,
++ Logical,
++ *waitSize
++ ));
++#endif
++
++ /* Wait for the signal. */
++ gcmkONERROR(gckOS_WaitSignal(Event->os, Signal, gcvINFINITE));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++static void
++_PrintRecord(
++ gcsEVENT_PTR record
++ )
++{
++ switch (record->info.command)
++ {
++ case gcvHAL_FREE_NON_PAGED_MEMORY:
++ gcmkPRINT(" gcvHAL_FREE_NON_PAGED_MEMORY");
++ break;
++
++ case gcvHAL_FREE_CONTIGUOUS_MEMORY:
++ gcmkPRINT(" gcvHAL_FREE_CONTIGUOUS_MEMORY");
++ break;
++
++ case gcvHAL_FREE_VIDEO_MEMORY:
++ gcmkPRINT(" gcvHAL_FREE_VIDEO_MEMORY");
++ break;
++
++ case gcvHAL_WRITE_DATA:
++ gcmkPRINT(" gcvHAL_WRITE_DATA");
++ break;
++
++ case gcvHAL_UNLOCK_VIDEO_MEMORY:
++ gcmkPRINT(" gcvHAL_UNLOCK_VIDEO_MEMORY");
++ break;
++
++ case gcvHAL_SIGNAL:
++ gcmkPRINT(" gcvHAL_SIGNAL process=%d signal=0x%x",
++ record->info.u.Signal.process,
++ record->info.u.Signal.signal);
++ break;
++
++ case gcvHAL_UNMAP_USER_MEMORY:
++ gcmkPRINT(" gcvHAL_UNMAP_USER_MEMORY");
++ break;
++
++ case gcvHAL_TIMESTAMP:
++ gcmkPRINT(" gcvHAL_TIMESTAMP");
++ break;
++
++ case gcvHAL_COMMIT_DONE:
++ gcmkPRINT(" gcvHAL_COMMIT_DONE");
++ break;
++
++ case gcvHAL_FREE_VIRTUAL_COMMAND_BUFFER:
++ gcmkPRINT(" gcvHAL_FREE_VIRTUAL_COMMAND_BUFFER logical=0x%08x",
++ record->info.u.FreeVirtualCommandBuffer.logical);
++ break;
++
++ default:
++ gcmkPRINT(" Illegal Event %d", record->info.command);
++ break;
++ }
++}
++
++/*******************************************************************************
++** gckEVENT_Dump
++**
++** Dump record in event queue when stuck happens.
++** No protection for the event queue.
++**/
++gceSTATUS
++gckEVENT_Dump(
++ IN gckEVENT Event
++ )
++{
++ gcsEVENT_QUEUE_PTR queueHead = Event->queueHead;
++ gcsEVENT_QUEUE_PTR queue;
++ gcsEVENT_PTR record = gcvNULL;
++ gctINT i;
++
++ gcmkHEADER_ARG("Event=0x%x", Event);
++
++ gcmkPRINT("**************************\n");
++ gcmkPRINT("*** EVENT STATE DUMP ***\n");
++ gcmkPRINT("**************************\n");
++
++
++ gcmkPRINT(" Unsumbitted Event:");
++ while(queueHead)
++ {
++ queue = queueHead;
++ record = queueHead->head;
++
++ gcmkPRINT(" [%x]:", queue);
++ while(record)
++ {
++ _PrintRecord(record);
++ record = record->next;
++ }
++
++ if (queueHead == Event->queueTail)
++ {
++ queueHead = gcvNULL;
++ }
++ else
++ {
++ queueHead = queueHead->next;
++ }
++ }
++
++ gcmkPRINT(" Untriggered Event:");
++ for (i = 0; i < 30; i++)
++ {
++ queue = &Event->queues[i];
++ record = queue->head;
++
++ gcmkPRINT(" [%d]:", i);
++ while(record)
++ {
++ _PrintRecord(record);
++ record = record->next;
++ }
++ }
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS gckEVENT_WaitEmpty(gckEVENT Event)
++{
++ gctBOOL isEmpty;
++
++ while (Event->inNotify || (gcmIS_SUCCESS(gckEVENT_IsEmpty(Event, &isEmpty)) && !isEmpty)) ;
++
++ return gcvSTATUS_OK;
++}
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel.h linux-openelec/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel.h
+--- linux-3.14.36/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel.h 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,1011 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_kernel_h_
++#define __gc_hal_kernel_h_
++
++#include <linux/spinlock.h>
++
++#include "gc_hal.h"
++#include "gc_hal_kernel_hardware.h"
++#include "gc_hal_driver.h"
++
++#if gcdENABLE_VG
++#include "gc_hal_kernel_vg.h"
++#endif
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++
++/*******************************************************************************
++***** New MMU Defination *******************************************************/
++#define gcdMMU_MTLB_SHIFT 22
++#define gcdMMU_STLB_4K_SHIFT 12
++#define gcdMMU_STLB_64K_SHIFT 16
++
++#define gcdMMU_MTLB_BITS (32 - gcdMMU_MTLB_SHIFT)
++#define gcdMMU_PAGE_4K_BITS gcdMMU_STLB_4K_SHIFT
++#define gcdMMU_STLB_4K_BITS (32 - gcdMMU_MTLB_BITS - gcdMMU_PAGE_4K_BITS)
++#define gcdMMU_PAGE_64K_BITS gcdMMU_STLB_64K_SHIFT
++#define gcdMMU_STLB_64K_BITS (32 - gcdMMU_MTLB_BITS - gcdMMU_PAGE_64K_BITS)
++
++#define gcdMMU_MTLB_ENTRY_NUM (1 << gcdMMU_MTLB_BITS)
++#define gcdMMU_MTLB_SIZE (gcdMMU_MTLB_ENTRY_NUM << 2)
++#define gcdMMU_STLB_4K_ENTRY_NUM (1 << gcdMMU_STLB_4K_BITS)
++#define gcdMMU_STLB_4K_SIZE (gcdMMU_STLB_4K_ENTRY_NUM << 2)
++#define gcdMMU_PAGE_4K_SIZE (1 << gcdMMU_STLB_4K_SHIFT)
++#define gcdMMU_STLB_64K_ENTRY_NUM (1 << gcdMMU_STLB_64K_BITS)
++#define gcdMMU_STLB_64K_SIZE (gcdMMU_STLB_64K_ENTRY_NUM << 2)
++#define gcdMMU_PAGE_64K_SIZE (1 << gcdMMU_STLB_64K_SHIFT)
++
++#define gcdMMU_MTLB_MASK (~((1U << gcdMMU_MTLB_SHIFT)-1))
++#define gcdMMU_STLB_4K_MASK ((~0U << gcdMMU_STLB_4K_SHIFT) ^ gcdMMU_MTLB_MASK)
++#define gcdMMU_PAGE_4K_MASK (gcdMMU_PAGE_4K_SIZE - 1)
++#define gcdMMU_STLB_64K_MASK ((~((1U << gcdMMU_STLB_64K_SHIFT)-1)) ^ gcdMMU_MTLB_MASK)
++#define gcdMMU_PAGE_64K_MASK (gcdMMU_PAGE_64K_SIZE - 1)
++
++/* Page offset definitions. */
++#define gcdMMU_OFFSET_4K_BITS (32 - gcdMMU_MTLB_BITS - gcdMMU_STLB_4K_BITS)
++#define gcdMMU_OFFSET_4K_MASK ((1U << gcdMMU_OFFSET_4K_BITS) - 1)
++#define gcdMMU_OFFSET_16K_BITS (32 - gcdMMU_MTLB_BITS - gcdMMU_STLB_16K_BITS)
++#define gcdMMU_OFFSET_16K_MASK ((1U << gcdMMU_OFFSET_16K_BITS) - 1)
++
++/*******************************************************************************
++***** Process Secure Cache ****************************************************/
++
++#define gcdSECURE_CACHE_LRU 1
++#define gcdSECURE_CACHE_LINEAR 2
++#define gcdSECURE_CACHE_HASH 3
++#define gcdSECURE_CACHE_TABLE 4
++
++typedef struct _gcskLOGICAL_CACHE * gcskLOGICAL_CACHE_PTR;
++typedef struct _gcskLOGICAL_CACHE gcskLOGICAL_CACHE;
++struct _gcskLOGICAL_CACHE
++{
++ /* Logical address. */
++ gctPOINTER logical;
++
++ /* DMAable address. */
++ gctUINT32 dma;
++
++#if gcdSECURE_CACHE_METHOD == gcdSECURE_CACHE_HASH
++ /* Pointer to the previous and next hash tables. */
++ gcskLOGICAL_CACHE_PTR nextHash;
++ gcskLOGICAL_CACHE_PTR prevHash;
++#endif
++
++#if gcdSECURE_CACHE_METHOD != gcdSECURE_CACHE_TABLE
++ /* Pointer to the previous and next slot. */
++ gcskLOGICAL_CACHE_PTR next;
++ gcskLOGICAL_CACHE_PTR prev;
++#endif
++
++#if gcdSECURE_CACHE_METHOD == gcdSECURE_CACHE_LINEAR
++ /* Time stamp. */
++ gctUINT64 stamp;
++#endif
++};
++
++typedef struct _gcskSECURE_CACHE * gcskSECURE_CACHE_PTR;
++typedef struct _gcskSECURE_CACHE
++{
++ /* Cache memory. */
++ gcskLOGICAL_CACHE cache[1 + gcdSECURE_CACHE_SLOTS];
++
++ /* Last known index for LINEAR mode. */
++ gcskLOGICAL_CACHE_PTR cacheIndex;
++
++ /* Current free slot for LINEAR mode. */
++ gctUINT32 cacheFree;
++
++ /* Time stamp for LINEAR mode. */
++ gctUINT64 cacheStamp;
++
++#if gcdSECURE_CACHE_METHOD == gcdSECURE_CACHE_HASH
++ /* Hash table for HASH mode. */
++ gcskLOGICAL_CACHE hash[256];
++#endif
++}
++gcskSECURE_CACHE;
++
++/*******************************************************************************
++***** Process Database Management *********************************************/
++
++typedef enum _gceDATABASE_TYPE
++{
++ gcvDB_VIDEO_MEMORY = 1, /* Video memory created. */
++ gcvDB_COMMAND_BUFFER, /* Command Buffer. */
++ gcvDB_NON_PAGED, /* Non paged memory. */
++ gcvDB_CONTIGUOUS, /* Contiguous memory. */
++ gcvDB_SIGNAL, /* Signal. */
++ gcvDB_VIDEO_MEMORY_LOCKED, /* Video memory locked. */
++ gcvDB_CONTEXT, /* Context */
++ gcvDB_IDLE, /* GPU idle. */
++ gcvDB_MAP_MEMORY, /* Map memory */
++ gcvDB_SHARED_INFO, /* Private data */
++ gcvDB_MAP_USER_MEMORY, /* Map user memory */
++ gcvDB_SYNC_POINT, /* Sync point. */
++ gcvDB_VIDEO_MEMORY_RESERVED, /* Reserved video memory */
++ gcvDB_VIDEO_MEMORY_CONTIGUOUS, /* Contiguous video memory */
++ gcvDB_VIDEO_MEMORY_VIRTUAL, /* Virtual video memory */
++}
++gceDATABASE_TYPE;
++
++typedef struct _gcsDATABASE_RECORD * gcsDATABASE_RECORD_PTR;
++typedef struct _gcsDATABASE_RECORD
++{
++ /* Pointer to kernel. */
++ gckKERNEL kernel;
++
++ /* Pointer to next database record. */
++ gcsDATABASE_RECORD_PTR next;
++
++ /* Type of record. */
++ gceDATABASE_TYPE type;
++
++ /* Data for record. */
++ gctPOINTER data;
++ gctPHYS_ADDR physical;
++ gctSIZE_T bytes;
++}
++gcsDATABASE_RECORD;
++
++typedef struct _gcsDATABASE * gcsDATABASE_PTR;
++typedef struct _gcsDATABASE
++{
++ /* Pointer to next entry is hash list. */
++ gcsDATABASE_PTR next;
++ gctSIZE_T slot;
++
++ /* Process ID. */
++ gctUINT32 processID;
++
++ /* Sizes to query. */
++ gcsDATABASE_COUNTERS vidMem;
++ gcsDATABASE_COUNTERS nonPaged;
++ gcsDATABASE_COUNTERS contiguous;
++ gcsDATABASE_COUNTERS mapUserMemory;
++ gcsDATABASE_COUNTERS mapMemory;
++ gcsDATABASE_COUNTERS vidMemResv;
++ gcsDATABASE_COUNTERS vidMemCont;
++ gcsDATABASE_COUNTERS vidMemVirt;
++
++ /* Idle time management. */
++ gctUINT64 lastIdle;
++ gctUINT64 idle;
++
++ /* Pointer to database. */
++ gcsDATABASE_RECORD_PTR list[48];
++
++#if gcdSECURE_USER
++ /* Secure cache. */
++ gcskSECURE_CACHE cache;
++#endif
++
++ gctPOINTER handleDatabase;
++ gctPOINTER handleDatabaseMutex;
++}
++gcsDATABASE;
++
++/* Create a process database that will contain all its allocations. */
++gceSTATUS
++gckKERNEL_CreateProcessDB(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 ProcessID
++ );
++
++/* Add a record to the process database. */
++gceSTATUS
++gckKERNEL_AddProcessDB(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 ProcessID,
++ IN gceDATABASE_TYPE Type,
++ IN gctPOINTER Pointer,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Size
++ );
++
++/* Remove a record to the process database. */
++gceSTATUS
++gckKERNEL_RemoveProcessDB(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 ProcessID,
++ IN gceDATABASE_TYPE Type,
++ IN gctPOINTER Pointer
++ );
++
++/* Destroy the process database. */
++gceSTATUS
++gckKERNEL_DestroyProcessDB(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 ProcessID
++ );
++
++/* Find a record to the process database. */
++gceSTATUS
++gckKERNEL_FindProcessDB(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 ProcessID,
++ IN gctUINT32 ThreadID,
++ IN gceDATABASE_TYPE Type,
++ IN gctPOINTER Pointer,
++ OUT gcsDATABASE_RECORD_PTR Record
++ );
++
++/* Query the process database. */
++gceSTATUS
++gckKERNEL_QueryProcessDB(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 ProcessID,
++ IN gctBOOL LastProcessID,
++ IN gceDATABASE_TYPE Type,
++ OUT gcuDATABASE_INFO * Info
++ );
++
++/* Dump the process database. */
++gceSTATUS
++gckKERNEL_DumpProcessDB(
++ IN gckKERNEL Kernel
++ );
++
++/* ID database */
++gceSTATUS
++gckKERNEL_CreateIntegerDatabase(
++ IN gckKERNEL Kernel,
++ OUT gctPOINTER * Database
++ );
++
++gceSTATUS
++gckKERNEL_DestroyIntegerDatabase(
++ IN gckKERNEL Kernel,
++ IN gctPOINTER Database
++ );
++
++gceSTATUS
++gckKERNEL_AllocateIntegerId(
++ IN gctPOINTER Database,
++ IN gctPOINTER Pointer,
++ OUT gctUINT32 * Id
++ );
++
++gceSTATUS
++gckKERNEL_FreeIntegerId(
++ IN gctPOINTER Database,
++ IN gctUINT32 Id
++ );
++
++gceSTATUS
++gckKERNEL_QueryIntegerId(
++ IN gctPOINTER Database,
++ IN gctUINT32 Id,
++ OUT gctPOINTER * Pointer
++ );
++
++gctUINT32
++gckKERNEL_AllocateNameFromPointer(
++ IN gckKERNEL Kernel,
++ IN gctPOINTER Pointer
++ );
++
++gctPOINTER
++gckKERNEL_QueryPointerFromName(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 Name
++ );
++
++gceSTATUS
++gckKERNEL_DeleteName(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 Name
++ );
++
++#if gcdSECURE_USER
++/* Get secure cache from the process database. */
++gceSTATUS
++gckKERNEL_GetProcessDBCache(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 ProcessID,
++ OUT gcskSECURE_CACHE_PTR * Cache
++ );
++#endif
++
++/*******************************************************************************
++********* Timer Management ****************************************************/
++typedef struct _gcsTIMER * gcsTIMER_PTR;
++typedef struct _gcsTIMER
++{
++ /* Start and Stop time holders. */
++ gctUINT64 startTime;
++ gctUINT64 stopTime;
++}
++gcsTIMER;
++
++/******************************************************************************\
++********************************** Structures **********************************
++\******************************************************************************/
++
++/* gckDB object. */
++struct _gckDB
++{
++ /* Database management. */
++ gcsDATABASE_PTR db[16];
++ gctPOINTER dbMutex;
++ gcsDATABASE_PTR freeDatabase;
++ gcsDATABASE_RECORD_PTR freeRecord;
++ gcsDATABASE_PTR lastDatabase;
++ gctUINT32 lastProcessID;
++ gctUINT64 lastIdle;
++ gctUINT64 idleTime;
++ gctUINT64 lastSlowdown;
++ gctUINT64 lastSlowdownIdle;
++ /* ID - Pointer database*/
++ gctPOINTER pointerDatabase;
++ gctPOINTER pointerDatabaseMutex;
++};
++
++#if gcdVIRTUAL_COMMAND_BUFFER
++typedef struct _gckVIRTUAL_COMMAND_BUFFER * gckVIRTUAL_COMMAND_BUFFER_PTR;
++typedef struct _gckVIRTUAL_COMMAND_BUFFER
++{
++ gctPHYS_ADDR physical;
++ gctPOINTER userLogical;
++ gctPOINTER kernelLogical;
++ gctSIZE_T pageCount;
++ gctPOINTER pageTable;
++ gctUINT32 gpuAddress;
++ gctUINT pid;
++ gckVIRTUAL_COMMAND_BUFFER_PTR next;
++ gckVIRTUAL_COMMAND_BUFFER_PTR prev;
++ gckKERNEL kernel;
++}
++gckVIRTUAL_COMMAND_BUFFER;
++#endif
++
++/* gckKERNEL object. */
++struct _gckKERNEL
++{
++ /* Object. */
++ gcsOBJECT object;
++
++ /* Pointer to gckOS object. */
++ gckOS os;
++
++ /* Core */
++ gceCORE core;
++
++ /* Pointer to gckHARDWARE object. */
++ gckHARDWARE hardware;
++
++ /* Pointer to gckCOMMAND object. */
++ gckCOMMAND command;
++
++ /* Pointer to gckEVENT object. */
++ gckEVENT eventObj;
++
++ /* Pointer to context. */
++ gctPOINTER context;
++
++ /* Pointer to gckMMU object. */
++ gckMMU mmu;
++
++ /* Arom holding number of clients. */
++ gctPOINTER atomClients;
++
++#if VIVANTE_PROFILER
++ /* Enable profiling */
++ gctBOOL profileEnable;
++
++ /* Clear profile register or not*/
++ gctBOOL profileCleanRegister;
++
++#endif
++
++#ifdef QNX_SINGLE_THREADED_DEBUGGING
++ gctPOINTER debugMutex;
++#endif
++
++ /* Database management. */
++ gckDB db;
++ gctBOOL dbCreated;
++
++#if gcdENABLE_RECOVERY
++ gctPOINTER resetFlagClearTimer;
++ gctPOINTER resetAtom;
++ gctUINT64 resetTimeStamp;
++#endif
++
++ /* Pointer to gckEVENT object. */
++ gcsTIMER timers[8];
++ gctUINT32 timeOut;
++
++#if gcdENABLE_VG
++ gckVGKERNEL vg;
++#endif
++
++#if gcdVIRTUAL_COMMAND_BUFFER
++ gckVIRTUAL_COMMAND_BUFFER_PTR virtualBufferHead;
++ gckVIRTUAL_COMMAND_BUFFER_PTR virtualBufferTail;
++ gctPOINTER virtualBufferLock;
++#endif
++
++#if gcdDVFS
++ gckDVFS dvfs;
++#endif
++
++#if gcdANDROID_NATIVE_FENCE_SYNC
++ gctHANDLE timeline;
++#endif
++
++ spinlock_t irq_lock;
++};
++
++struct _FrequencyHistory
++{
++ gctUINT32 frequency;
++ gctUINT32 count;
++};
++
++/* gckDVFS object. */
++struct _gckDVFS
++{
++ gckOS os;
++ gckHARDWARE hardware;
++ gctPOINTER timer;
++ gctUINT32 pollingTime;
++ gctBOOL stop;
++ gctUINT32 totalConfig;
++ gctUINT32 loads[8];
++ gctUINT8 currentScale;
++ struct _FrequencyHistory frequencyHistory[16];
++};
++
++/* gckCOMMAND object. */
++struct _gckCOMMAND
++{
++ /* Object. */
++ gcsOBJECT object;
++
++ /* Pointer to required object. */
++ gckKERNEL kernel;
++ gckOS os;
++
++ /* Number of bytes per page. */
++ gctSIZE_T pageSize;
++
++ /* Current pipe select. */
++ gcePIPE_SELECT pipeSelect;
++
++ /* Command queue running flag. */
++ gctBOOL running;
++
++ /* Idle flag and commit stamp. */
++ gctBOOL idle;
++ gctUINT64 commitStamp;
++
++ /* Command queue mutex. */
++ gctPOINTER mutexQueue;
++
++ /* Context switching mutex. */
++ gctPOINTER mutexContext;
++
++#if VIVANTE_PROFILER_CONTEXT
++ /* Context sequence mutex. */
++ gctPOINTER mutexContextSeq;
++#endif
++
++ /* Command queue power semaphore. */
++ gctPOINTER powerSemaphore;
++
++ /* Current command queue. */
++ struct _gcskCOMMAND_QUEUE
++ {
++ gctSIGNAL signal;
++ gctPHYS_ADDR physical;
++ gctPOINTER logical;
++ }
++ queues[gcdCOMMAND_QUEUES];
++
++ gctPHYS_ADDR physical;
++ gctPOINTER logical;
++ gctUINT32 offset;
++ gctINT index;
++#if gcmIS_DEBUG(gcdDEBUG_TRACE)
++ gctUINT wrapCount;
++#endif
++
++ /* The command queue is new. */
++ gctBOOL newQueue;
++
++ /* Context management. */
++ gckCONTEXT currContext;
++
++ /* Pointer to last WAIT command. */
++ gctPHYS_ADDR waitPhysical;
++ gctPOINTER waitLogical;
++ gctSIZE_T waitSize;
++
++ /* Command buffer alignment. */
++ gctSIZE_T alignment;
++ gctSIZE_T reservedHead;
++ gctSIZE_T reservedTail;
++
++ /* Commit counter. */
++ gctPOINTER atomCommit;
++
++ /* Kernel process ID. */
++ gctUINT32 kernelProcessID;
++
++ /* End Event signal. */
++ gctSIGNAL endEventSignal;
++
++#if gcdSECURE_USER
++ /* Hint array copy buffer. */
++ gctBOOL hintArrayAllocated;
++ gctUINT hintArraySize;
++ gctUINT32_PTR hintArray;
++#endif
++};
++
++typedef struct _gcsEVENT * gcsEVENT_PTR;
++
++/* Structure holding one event to be processed. */
++typedef struct _gcsEVENT
++{
++ /* Pointer to next event in queue. */
++ gcsEVENT_PTR next;
++
++ /* Event information. */
++ gcsHAL_INTERFACE info;
++
++ /* Process ID owning the event. */
++ gctUINT32 processID;
++
++#ifdef __QNXNTO__
++ /* Kernel. */
++ gckKERNEL kernel;
++#endif
++
++ gctBOOL fromKernel;
++}
++gcsEVENT;
++
++/* Structure holding a list of events to be processed by an interrupt. */
++typedef struct _gcsEVENT_QUEUE * gcsEVENT_QUEUE_PTR;
++typedef struct _gcsEVENT_QUEUE
++{
++ /* Time stamp. */
++ gctUINT64 stamp;
++
++ /* Source of the event. */
++ gceKERNEL_WHERE source;
++
++ /* Pointer to head of event queue. */
++ gcsEVENT_PTR head;
++
++ /* Pointer to tail of event queue. */
++ gcsEVENT_PTR tail;
++
++ /* Next list of events. */
++ gcsEVENT_QUEUE_PTR next;
++}
++gcsEVENT_QUEUE;
++
++/*
++ gcdREPO_LIST_COUNT defines the maximum number of event queues with different
++ hardware module sources that may coexist at the same time. Only two sources
++ are supported - gcvKERNEL_COMMAND and gcvKERNEL_PIXEL. gcvKERNEL_COMMAND
++ source is used only for managing the kernel command queue and is only issued
++ when the current command queue gets full. Since we commit event queues every
++ time we commit command buffers, in the worst case we can have up to three
++ pending event queues:
++ - gcvKERNEL_PIXEL
++ - gcvKERNEL_COMMAND (queue overflow)
++ - gcvKERNEL_PIXEL
++*/
++#define gcdREPO_LIST_COUNT 3
++
++/* gckEVENT object. */
++struct _gckEVENT
++{
++ /* The object. */
++ gcsOBJECT object;
++
++ /* Pointer to required objects. */
++ gckOS os;
++ gckKERNEL kernel;
++
++ /* Time stamp. */
++ gctUINT64 stamp;
++ gctUINT64 lastCommitStamp;
++
++ /* Queue mutex. */
++ gctPOINTER eventQueueMutex;
++
++ /* Array of event queues. */
++ gcsEVENT_QUEUE queues[30];
++ gctUINT8 lastID;
++ gctPOINTER freeAtom;
++
++ /* Pending events. */
++#if gcdSMP
++ gctPOINTER pending;
++#else
++ volatile gctUINT pending;
++#endif
++
++ /* List of free event structures and its mutex. */
++ gcsEVENT_PTR freeEventList;
++ gctSIZE_T freeEventCount;
++ gctPOINTER freeEventMutex;
++
++ /* Event queues. */
++ gcsEVENT_QUEUE_PTR queueHead;
++ gcsEVENT_QUEUE_PTR queueTail;
++ gcsEVENT_QUEUE_PTR freeList;
++ gcsEVENT_QUEUE repoList[gcdREPO_LIST_COUNT];
++ gctPOINTER eventListMutex;
++
++ gctPOINTER submitTimer;
++
++ volatile gctBOOL inNotify;
++};
++
++/* Free all events belonging to a process. */
++gceSTATUS
++gckEVENT_FreeProcess(
++ IN gckEVENT Event,
++ IN gctUINT32 ProcessID
++ );
++
++gceSTATUS
++gckEVENT_Stop(
++ IN gckEVENT Event,
++ IN gctUINT32 ProcessID,
++ IN gctPHYS_ADDR Handle,
++ IN gctPOINTER Logical,
++ IN gctSIGNAL Signal,
++ IN OUT gctSIZE_T * waitSize
++ );
++
++gceSTATUS
++gckEVENT_WaitEmpty(
++ IN gckEVENT Event
++ );
++
++/* gcuVIDMEM_NODE structure. */
++typedef union _gcuVIDMEM_NODE
++{
++ /* Allocated from gckVIDMEM. */
++ struct _gcsVIDMEM_NODE_VIDMEM
++ {
++ /* Owner of this node. */
++ gckVIDMEM memory;
++
++ /* Dual-linked list of nodes. */
++ gcuVIDMEM_NODE_PTR next;
++ gcuVIDMEM_NODE_PTR prev;
++
++ /* Dual linked list of free nodes. */
++ gcuVIDMEM_NODE_PTR nextFree;
++ gcuVIDMEM_NODE_PTR prevFree;
++
++ /* Information for this node. */
++ gctUINT32 offset;
++ gctSIZE_T bytes;
++ gctUINT32 alignment;
++
++#ifdef __QNXNTO__
++ /* Client/server vaddr (mapped using mmap_join). */
++ gctPOINTER logical;
++#endif
++
++ /* Locked counter. */
++ gctINT32 locked;
++
++ /* Memory pool. */
++ gcePOOL pool;
++ gctUINT32 physical;
++
++ /* Process ID owning this memory. */
++ gctUINT32 processID;
++
++ /* Prevent compositor from freeing until client unlocks. */
++ gctBOOL freePending;
++
++ /* */
++ gcsVIDMEM_NODE_SHARED_INFO sharedInfo;
++
++#if gcdDYNAMIC_MAP_RESERVED_MEMORY && gcdENABLE_VG
++ gctPOINTER kernelVirtual;
++#endif
++
++ /* Surface type. */
++ gceSURF_TYPE type;
++ }
++ VidMem;
++
++ /* Allocated from gckOS. */
++ struct _gcsVIDMEM_NODE_VIRTUAL
++ {
++ /* Pointer to gckKERNEL object. */
++ gckKERNEL kernel;
++
++ /* Information for this node. */
++ /* Contiguously allocated? */
++ gctBOOL contiguous;
++ /* mdl record pointer... a kmalloc address. Process agnostic. */
++ gctPHYS_ADDR physical;
++ gctSIZE_T bytes;
++ /* do_mmap_pgoff address... mapped per-process. */
++ gctPOINTER logical;
++
++ /* Page table information. */
++ /* Used only when node is not contiguous */
++ gctSIZE_T pageCount;
++
++ /* Used only when node is not contiguous */
++ gctPOINTER pageTables[gcdMAX_GPU_COUNT];
++ /* Pointer to gckKERNEL object who lock this. */
++ gckKERNEL lockKernels[gcdMAX_GPU_COUNT];
++ /* Actual physical address */
++ gctUINT32 addresses[gcdMAX_GPU_COUNT];
++
++ /* Mutex. */
++ gctPOINTER mutex;
++
++ /* Locked counter. */
++ gctINT32 lockeds[gcdMAX_GPU_COUNT];
++
++#ifdef __QNXNTO__
++ /* Single linked list of nodes. */
++ gcuVIDMEM_NODE_PTR next;
++
++ /* Unlock pending flag. */
++ gctBOOL unlockPendings[gcdMAX_GPU_COUNT];
++
++ /* Free pending flag. */
++ gctBOOL freePending;
++#endif
++
++ /* Process ID owning this memory. */
++ gctUINT32 processID;
++
++ /* Owner process sets freed to true
++ * when it trys to free a locked
++ * node */
++ gctBOOL freed;
++
++ /* */
++ gcsVIDMEM_NODE_SHARED_INFO sharedInfo;
++
++ /* Surface type. */
++ gceSURF_TYPE type;
++ }
++ Virtual;
++}
++gcuVIDMEM_NODE;
++
++/* gckVIDMEM object. */
++struct _gckVIDMEM
++{
++ /* Object. */
++ gcsOBJECT object;
++
++ /* Pointer to gckOS object. */
++ gckOS os;
++
++ /* Information for this video memory heap. */
++ gctUINT32 baseAddress;
++ gctSIZE_T bytes;
++ gctSIZE_T freeBytes;
++
++ /* Mapping for each type of surface. */
++ gctINT mapping[gcvSURF_NUM_TYPES];
++
++ /* Sentinel nodes for up to 8 banks. */
++ gcuVIDMEM_NODE sentinel[8];
++
++ /* Allocation threshold. */
++ gctSIZE_T threshold;
++
++ /* The heap mutex. */
++ gctPOINTER mutex;
++
++#if gcdUSE_VIDMEM_PER_PID
++ /* The Pid this VidMem belongs to. */
++ gctUINT32 pid;
++
++ struct _gckVIDMEM* next;
++#endif
++};
++
++/* gckMMU object. */
++struct _gckMMU
++{
++ /* The object. */
++ gcsOBJECT object;
++
++ /* Pointer to gckOS object. */
++ gckOS os;
++
++ /* Pointer to gckHARDWARE object. */
++ gckHARDWARE hardware;
++
++ /* The page table mutex. */
++ gctPOINTER pageTableMutex;
++
++ /* Page table information. */
++ gctSIZE_T pageTableSize;
++ gctPHYS_ADDR pageTablePhysical;
++ gctUINT32_PTR pageTableLogical;
++ gctUINT32 pageTableEntries;
++
++ /* Master TLB information. */
++ gctSIZE_T mtlbSize;
++ gctPHYS_ADDR mtlbPhysical;
++ gctUINT32_PTR mtlbLogical;
++ gctUINT32 mtlbEntries;
++
++ /* Free entries. */
++ gctUINT32 heapList;
++ gctBOOL freeNodes;
++
++ gctPOINTER staticSTLB;
++ gctBOOL enabled;
++
++ gctUINT32 dynamicMappingStart;
++
++#ifdef __QNXNTO__
++ /* Single linked list of all allocated nodes. */
++ gctPOINTER nodeMutex;
++ gcuVIDMEM_NODE_PTR nodeList;
++#endif
++};
++
++#if gcdVIRTUAL_COMMAND_BUFFER
++gceSTATUS
++gckOS_CreateKernelVirtualMapping(
++ IN gctPHYS_ADDR Physical,
++ OUT gctSIZE_T * PageCount,
++ OUT gctPOINTER * Logical
++ );
++
++gceSTATUS
++gckOS_DestroyKernelVirtualMapping(
++ IN gctPOINTER Logical
++ );
++
++gceSTATUS
++gckKERNEL_AllocateVirtualCommandBuffer(
++ IN gckKERNEL Kernel,
++ IN gctBOOL InUserSpace,
++ IN OUT gctSIZE_T * Bytes,
++ OUT gctPHYS_ADDR * Physical,
++ OUT gctPOINTER * Logical
++ );
++
++gceSTATUS
++gckKERNEL_DestroyVirtualCommandBuffer(
++ IN gckKERNEL Kernel,
++ IN gctSIZE_T Bytes,
++ IN gctPHYS_ADDR Physical,
++ IN gctPOINTER Logical
++ );
++
++gceSTATUS
++gckKERNEL_GetGPUAddress(
++ IN gckKERNEL Kernel,
++ IN gctPOINTER Logical,
++ OUT gctUINT32 * Address
++ );
++
++gceSTATUS
++gckKERNEL_QueryGPUAddress(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 GpuAddress,
++ OUT gckVIRTUAL_COMMAND_BUFFER_PTR * Buffer
++ );
++#endif
++
++gceSTATUS
++gckKERNEL_AttachProcess(
++ IN gckKERNEL Kernel,
++ IN gctBOOL Attach
++ );
++
++gceSTATUS
++gckKERNEL_AttachProcessEx(
++ IN gckKERNEL Kernel,
++ IN gctBOOL Attach,
++ IN gctUINT32 PID
++ );
++
++#if gcdSECURE_USER
++gceSTATUS
++gckKERNEL_MapLogicalToPhysical(
++ IN gckKERNEL Kernel,
++ IN gcskSECURE_CACHE_PTR Cache,
++ IN OUT gctPOINTER * Data
++ );
++
++gceSTATUS
++gckKERNEL_FlushTranslationCache(
++ IN gckKERNEL Kernel,
++ IN gcskSECURE_CACHE_PTR Cache,
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Bytes
++ );
++#endif
++
++gceSTATUS
++gckHARDWARE_QueryIdle(
++ IN gckHARDWARE Hardware,
++ OUT gctBOOL_PTR IsIdle
++ );
++
++/******************************************************************************\
++******************************* gckCONTEXT Object *******************************
++\******************************************************************************/
++
++gceSTATUS
++gckCONTEXT_Construct(
++ IN gckOS Os,
++ IN gckHARDWARE Hardware,
++ IN gctUINT32 ProcessID,
++ OUT gckCONTEXT * Context
++ );
++
++gceSTATUS
++gckCONTEXT_Destroy(
++ IN gckCONTEXT Context
++ );
++
++gceSTATUS
++gckCONTEXT_Update(
++ IN gckCONTEXT Context,
++ IN gctUINT32 ProcessID,
++ IN gcsSTATE_DELTA_PTR StateDelta
++ );
++
++#if gcdLINK_QUEUE_SIZE
++void
++gckLINKQUEUE_Enqueue(
++ IN gckLINKQUEUE LinkQueue,
++ IN gctUINT32 start,
++ IN gctUINT32 end
++ );
++
++void
++gckLINKQUEUE_GetData(
++ IN gckLINKQUEUE LinkQueue,
++ IN gctUINT32 Index,
++ OUT gckLINKDATA * Data
++ );
++#endif
++
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif /* __gc_hal_kernel_h_ */
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel_heap.c linux-openelec/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel_heap.c
+--- linux-3.14.36/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel_heap.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel_heap.c 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,859 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++/**
++** @file
++** gckHEAP object for kernel HAL layer. The heap implemented here is an arena-
++** based memory allocation. An arena-based memory heap allocates data quickly
++** from specified arenas and reduces memory fragmentation.
++**
++*/
++#include "gc_hal_kernel_precomp.h"
++
++#define _GC_OBJ_ZONE gcvZONE_HEAP
++
++/*******************************************************************************
++***** Structures ***************************************************************
++*******************************************************************************/
++
++#define gcdIN_USE ((gcskNODE_PTR) ~0)
++
++typedef struct _gcskNODE * gcskNODE_PTR;
++typedef struct _gcskNODE
++{
++ /* Number of byets in node. */
++ gctSIZE_T bytes;
++
++ /* Pointer to next free node, or gcvNULL to mark the node as freed, or
++ ** gcdIN_USE to mark the node as used. */
++ gcskNODE_PTR next;
++
++#if gcmIS_DEBUG(gcdDEBUG_CODE)
++ /* Time stamp of allocation. */
++ gctUINT64 timeStamp;
++#endif
++}
++gcskNODE;
++
++typedef struct _gcskHEAP * gcskHEAP_PTR;
++typedef struct _gcskHEAP
++{
++ /* Linked list. */
++ gcskHEAP_PTR next;
++ gcskHEAP_PTR prev;
++
++ /* Heap size. */
++ gctSIZE_T size;
++
++ /* Free list. */
++ gcskNODE_PTR freeList;
++}
++gcskHEAP;
++
++struct _gckHEAP
++{
++ /* Object. */
++ gcsOBJECT object;
++
++ /* Pointer to a gckOS object. */
++ gckOS os;
++
++ /* Locking mutex. */
++ gctPOINTER mutex;
++
++ /* Allocation parameters. */
++ gctSIZE_T allocationSize;
++
++ /* Heap list. */
++ gcskHEAP_PTR heap;
++#if gcmIS_DEBUG(gcdDEBUG_CODE)
++ gctUINT64 timeStamp;
++#endif
++
++#if VIVANTE_PROFILER || gcmIS_DEBUG(gcdDEBUG_CODE)
++ /* Profile information. */
++ gctUINT32 allocCount;
++ gctUINT64 allocBytes;
++ gctUINT64 allocBytesMax;
++ gctUINT64 allocBytesTotal;
++ gctUINT32 heapCount;
++ gctUINT32 heapCountMax;
++ gctUINT64 heapMemory;
++ gctUINT64 heapMemoryMax;
++#endif
++};
++
++/*******************************************************************************
++***** Static Support Functions *************************************************
++*******************************************************************************/
++
++#if gcmIS_DEBUG(gcdDEBUG_CODE)
++static gctSIZE_T
++_DumpHeap(
++ IN gcskHEAP_PTR Heap
++ )
++{
++ gctPOINTER p;
++ gctSIZE_T leaked = 0;
++
++ /* Start at first node. */
++ for (p = Heap + 1;;)
++ {
++ /* Convert the pointer. */
++ gcskNODE_PTR node = (gcskNODE_PTR) p;
++
++ /* Check if this is a used node. */
++ if (node->next == gcdIN_USE)
++ {
++ /* Print the leaking node. */
++ gcmkTRACE_ZONE(gcvLEVEL_WARNING, gcvZONE_HEAP,
++ "Detected leaking: node=0x%x bytes=%lu timeStamp=%llu "
++ "(%08X %c%c%c%c)",
++ node, node->bytes, node->timeStamp,
++ ((gctUINT32_PTR) (node + 1))[0],
++ gcmPRINTABLE(((gctUINT8_PTR) (node + 1))[0]),
++ gcmPRINTABLE(((gctUINT8_PTR) (node + 1))[1]),
++ gcmPRINTABLE(((gctUINT8_PTR) (node + 1))[2]),
++ gcmPRINTABLE(((gctUINT8_PTR) (node + 1))[3]));
++
++ /* Add leaking byte count. */
++ leaked += node->bytes;
++ }
++
++ /* Test for end of heap. */
++ if (node->bytes == 0)
++ {
++ break;
++ }
++
++ else
++ {
++ /* Move to next node. */
++ p = (gctUINT8_PTR) node + node->bytes;
++ }
++ }
++
++ /* Return the number of leaked bytes. */
++ return leaked;
++}
++#endif
++
++static gceSTATUS
++_CompactKernelHeap(
++ IN gckHEAP Heap
++ )
++{
++ gcskHEAP_PTR heap, next;
++ gctPOINTER p;
++ gcskHEAP_PTR freeList = gcvNULL;
++
++ gcmkHEADER_ARG("Heap=0x%x", Heap);
++
++ /* Walk all the heaps. */
++ for (heap = Heap->heap; heap != gcvNULL; heap = next)
++ {
++ gcskNODE_PTR lastFree = gcvNULL;
++
++ /* Zero out the free list. */
++ heap->freeList = gcvNULL;
++
++ /* Start at the first node. */
++ for (p = (gctUINT8_PTR) (heap + 1);;)
++ {
++ /* Convert the pointer. */
++ gcskNODE_PTR node = (gcskNODE_PTR) p;
++
++ gcmkASSERT(p <= (gctPOINTER) ((gctUINT8_PTR) (heap + 1) + heap->size));
++
++ /* Test if this node not used. */
++ if (node->next != gcdIN_USE)
++ {
++ /* Test if this is the end of the heap. */
++ if (node->bytes == 0)
++ {
++ break;
++ }
++
++ /* Test of this is the first free node. */
++ else if (lastFree == gcvNULL)
++ {
++ /* Initialzie the free list. */
++ heap->freeList = node;
++ lastFree = node;
++ }
++
++ else
++ {
++ /* Test if this free node is contiguous with the previous
++ ** free node. */
++ if ((gctUINT8_PTR) lastFree + lastFree->bytes == p)
++ {
++ /* Just increase the size of the previous free node. */
++ lastFree->bytes += node->bytes;
++ }
++ else
++ {
++ /* Add to linked list. */
++ lastFree->next = node;
++ lastFree = node;
++ }
++ }
++ }
++
++ /* Move to next node. */
++ p = (gctUINT8_PTR) node + node->bytes;
++ }
++
++ /* Mark the end of the chain. */
++ if (lastFree != gcvNULL)
++ {
++ lastFree->next = gcvNULL;
++ }
++
++ /* Get next heap. */
++ next = heap->next;
++
++ /* Check if the entire heap is free. */
++ if ((heap->freeList != gcvNULL)
++ && (heap->freeList->bytes == heap->size - gcmSIZEOF(gcskNODE))
++ )
++ {
++ /* Remove the heap from the linked list. */
++ if (heap->prev == gcvNULL)
++ {
++ Heap->heap = next;
++ }
++ else
++ {
++ heap->prev->next = next;
++ }
++
++ if (heap->next != gcvNULL)
++ {
++ heap->next->prev = heap->prev;
++ }
++
++#if VIVANTE_PROFILER || gcmIS_DEBUG(gcdDEBUG_CODE)
++ /* Update profiling. */
++ Heap->heapCount -= 1;
++ Heap->heapMemory -= heap->size + gcmSIZEOF(gcskHEAP);
++#endif
++
++ /* Add this heap to the list of heaps that need to be freed. */
++ heap->next = freeList;
++ freeList = heap;
++ }
++ }
++
++ if (freeList != gcvNULL)
++ {
++ /* Release the mutex, remove any chance for a dead lock. */
++ gcmkVERIFY_OK(
++ gckOS_ReleaseMutex(Heap->os, Heap->mutex));
++
++ /* Free all heaps in the free list. */
++ for (heap = freeList; heap != gcvNULL; heap = next)
++ {
++ /* Get pointer to the next heap. */
++ next = heap->next;
++
++ /* Free the heap. */
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HEAP,
++ "Freeing heap 0x%x (%lu bytes)",
++ heap, heap->size + gcmSIZEOF(gcskHEAP));
++ gcmkVERIFY_OK(gckOS_FreeMemory(Heap->os, heap));
++ }
++
++ /* Acquire the mutex again. */
++ gcmkVERIFY_OK(
++ gckOS_AcquireMutex(Heap->os, Heap->mutex, gcvINFINITE));
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++***** gckHEAP API Code *********************************************************
++*******************************************************************************/
++
++/*******************************************************************************
++**
++** gckHEAP_Construct
++**
++** Construct a new gckHEAP object.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to a gckOS object.
++**
++** gctSIZE_T AllocationSize
++** Minimum size per arena.
++**
++** OUTPUT:
++**
++** gckHEAP * Heap
++** Pointer to a variable that will hold the pointer to the gckHEAP
++** object.
++*/
++gceSTATUS
++gckHEAP_Construct(
++ IN gckOS Os,
++ IN gctSIZE_T AllocationSize,
++ OUT gckHEAP * Heap
++ )
++{
++ gceSTATUS status;
++ gckHEAP heap = gcvNULL;
++ gctPOINTER pointer = gcvNULL;
++
++ gcmkHEADER_ARG("Os=0x%x AllocationSize=%lu", Os, AllocationSize);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Heap != gcvNULL);
++
++ /* Allocate the gckHEAP object. */
++ gcmkONERROR(gckOS_AllocateMemory(Os,
++ gcmSIZEOF(struct _gckHEAP),
++ &pointer));
++
++ heap = pointer;
++
++ /* Initialize the gckHEAP object. */
++ heap->object.type = gcvOBJ_HEAP;
++ heap->os = Os;
++ heap->allocationSize = AllocationSize;
++ heap->heap = gcvNULL;
++#if gcmIS_DEBUG(gcdDEBUG_CODE)
++ heap->timeStamp = 0;
++#endif
++
++#if VIVANTE_PROFILER || gcmIS_DEBUG(gcdDEBUG_CODE)
++ /* Zero the counters. */
++ heap->allocCount = 0;
++ heap->allocBytes = 0;
++ heap->allocBytesMax = 0;
++ heap->allocBytesTotal = 0;
++ heap->heapCount = 0;
++ heap->heapCountMax = 0;
++ heap->heapMemory = 0;
++ heap->heapMemoryMax = 0;
++#endif
++
++ /* Create the mutex. */
++ gcmkONERROR(gckOS_CreateMutex(Os, &heap->mutex));
++
++ /* Return the pointer to the gckHEAP object. */
++ *Heap = heap;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Heap=0x%x", *Heap);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Roll back. */
++ if (heap != gcvNULL)
++ {
++ /* Free the heap structure. */
++ gcmkVERIFY_OK(gckOS_FreeMemory(Os, heap));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHEAP_Destroy
++**
++** Destroy a gckHEAP object.
++**
++** INPUT:
++**
++** gckHEAP Heap
++** Pointer to a gckHEAP object to destroy.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckHEAP_Destroy(
++ IN gckHEAP Heap
++ )
++{
++ gcskHEAP_PTR heap;
++#if gcmIS_DEBUG(gcdDEBUG_CODE)
++ gctSIZE_T leaked = 0;
++#endif
++
++ gcmkHEADER_ARG("Heap=0x%x", Heap);
++
++ for (heap = Heap->heap; heap != gcvNULL; heap = Heap->heap)
++ {
++ /* Unlink heap from linked list. */
++ Heap->heap = heap->next;
++
++#if gcmIS_DEBUG(gcdDEBUG_CODE)
++ /* Check for leaked memory. */
++ leaked += _DumpHeap(heap);
++#endif
++
++ /* Free the heap. */
++ gcmkVERIFY_OK(gckOS_FreeMemory(Heap->os, heap));
++ }
++
++ /* Free the mutex. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Heap->os, Heap->mutex));
++
++ /* Free the heap structure. */
++ gcmkVERIFY_OK(gckOS_FreeMemory(Heap->os, Heap));
++
++ /* Success. */
++#if gcmIS_DEBUG(gcdDEBUG_CODE)
++ gcmkFOOTER_ARG("leaked=%lu", leaked);
++#else
++ gcmkFOOTER_NO();
++#endif
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckHEAP_Allocate
++**
++** Allocate data from the heap.
++**
++** INPUT:
++**
++** gckHEAP Heap
++** Pointer to a gckHEAP object.
++**
++** IN gctSIZE_T Bytes
++** Number of byte to allocate.
++**
++** OUTPUT:
++**
++** gctPOINTER * Memory
++** Pointer to a variable that will hold the address of the allocated
++** memory.
++*/
++gceSTATUS
++gckHEAP_Allocate(
++ IN gckHEAP Heap,
++ IN gctSIZE_T Bytes,
++ OUT gctPOINTER * Memory
++ )
++{
++ gctBOOL acquired = gcvFALSE;
++ gcskHEAP_PTR heap;
++ gceSTATUS status;
++ gctSIZE_T bytes;
++ gcskNODE_PTR node, used, prevFree = gcvNULL;
++ gctPOINTER memory = gcvNULL;
++
++ gcmkHEADER_ARG("Heap=0x%x Bytes=%lu", Heap, Bytes);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Heap, gcvOBJ_HEAP);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++ gcmkVERIFY_ARGUMENT(Memory != gcvNULL);
++
++ /* Determine number of bytes required for a node. */
++ bytes = gcmALIGN(Bytes + gcmSIZEOF(gcskNODE), 8);
++
++ /* Acquire the mutex. */
++ gcmkONERROR(
++ gckOS_AcquireMutex(Heap->os, Heap->mutex, gcvINFINITE));
++
++ acquired = gcvTRUE;
++
++ /* Check if this allocation is bigger than the default allocation size. */
++ if (bytes > Heap->allocationSize - gcmSIZEOF(gcskHEAP) - gcmSIZEOF(gcskNODE))
++ {
++ /* Adjust allocation size. */
++ Heap->allocationSize = bytes * 2;
++ }
++
++ else if (Heap->heap != gcvNULL)
++ {
++ gctINT i;
++
++ /* 2 retries, since we might need to compact. */
++ for (i = 0; i < 2; ++i)
++ {
++ /* Walk all the heaps. */
++ for (heap = Heap->heap; heap != gcvNULL; heap = heap->next)
++ {
++ /* Check if this heap has enough bytes to hold the request. */
++ if (bytes <= heap->size - gcmSIZEOF(gcskNODE))
++ {
++ prevFree = gcvNULL;
++
++ /* Walk the chain of free nodes. */
++ for (node = heap->freeList;
++ node != gcvNULL;
++ node = node->next
++ )
++ {
++ gcmkASSERT(node->next != gcdIN_USE);
++
++ /* Check if this free node has enough bytes. */
++ if (node->bytes >= bytes)
++ {
++ /* Use the node. */
++ goto UseNode;
++ }
++
++ /* Save current free node for linked list management. */
++ prevFree = node;
++ }
++ }
++ }
++
++ if (i == 0)
++ {
++ /* Compact the heap. */
++ gcmkVERIFY_OK(_CompactKernelHeap(Heap));
++
++#if gcmIS_DEBUG(gcdDEBUG_CODE)
++ gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_HEAP,
++ "===== KERNEL HEAP =====");
++ gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_HEAP,
++ "Number of allocations : %12u",
++ Heap->allocCount);
++ gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_HEAP,
++ "Number of bytes allocated : %12llu",
++ Heap->allocBytes);
++ gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_HEAP,
++ "Maximum allocation size : %12llu",
++ Heap->allocBytesMax);
++ gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_HEAP,
++ "Total number of bytes allocated : %12llu",
++ Heap->allocBytesTotal);
++ gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_HEAP,
++ "Number of heaps : %12u",
++ Heap->heapCount);
++ gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_HEAP,
++ "Heap memory in bytes : %12llu",
++ Heap->heapMemory);
++ gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_HEAP,
++ "Maximum number of heaps : %12u",
++ Heap->heapCountMax);
++ gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_HEAP,
++ "Maximum heap memory in bytes : %12llu",
++ Heap->heapMemoryMax);
++#endif
++ }
++ }
++ }
++
++ /* Release the mutex. */
++ gcmkONERROR(
++ gckOS_ReleaseMutex(Heap->os, Heap->mutex));
++
++ acquired = gcvFALSE;
++
++ /* Allocate a new heap. */
++ gcmkONERROR(
++ gckOS_AllocateMemory(Heap->os,
++ Heap->allocationSize,
++ &memory));
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HEAP,
++ "Allocated heap 0x%x (%lu bytes)",
++ memory, Heap->allocationSize);
++
++ /* Acquire the mutex. */
++ gcmkONERROR(
++ gckOS_AcquireMutex(Heap->os, Heap->mutex, gcvINFINITE));
++
++ acquired = gcvTRUE;
++
++ /* Use the allocated memory as the heap. */
++ heap = (gcskHEAP_PTR) memory;
++
++ /* Insert this heap to the head of the chain. */
++ heap->next = Heap->heap;
++ heap->prev = gcvNULL;
++ heap->size = Heap->allocationSize - gcmSIZEOF(gcskHEAP);
++
++ if (heap->next != gcvNULL)
++ {
++ heap->next->prev = heap;
++ }
++ Heap->heap = heap;
++
++ /* Mark the end of the heap. */
++ node = (gcskNODE_PTR) ( (gctUINT8_PTR) heap
++ + Heap->allocationSize
++ - gcmSIZEOF(gcskNODE)
++ );
++ node->bytes = 0;
++ node->next = gcvNULL;
++
++ /* Create a free list. */
++ node = (gcskNODE_PTR) (heap + 1);
++ heap->freeList = node;
++
++ /* Initialize the free list. */
++ node->bytes = heap->size - gcmSIZEOF(gcskNODE);
++ node->next = gcvNULL;
++
++ /* No previous free. */
++ prevFree = gcvNULL;
++
++#if VIVANTE_PROFILER || gcmIS_DEBUG(gcdDEBUG_CODE)
++ /* Update profiling. */
++ Heap->heapCount += 1;
++ Heap->heapMemory += Heap->allocationSize;
++
++ if (Heap->heapCount > Heap->heapCountMax)
++ {
++ Heap->heapCountMax = Heap->heapCount;
++ }
++ if (Heap->heapMemory > Heap->heapMemoryMax)
++ {
++ Heap->heapMemoryMax = Heap->heapMemory;
++ }
++#endif
++
++UseNode:
++ /* Verify some stuff. */
++ gcmkASSERT(heap != gcvNULL);
++ gcmkASSERT(node != gcvNULL);
++ gcmkASSERT(node->bytes >= bytes);
++
++ if (heap->prev != gcvNULL)
++ {
++ /* Unlink the heap from the linked list. */
++ heap->prev->next = heap->next;
++ if (heap->next != gcvNULL)
++ {
++ heap->next->prev = heap->prev;
++ }
++
++ /* Move the heap to the front of the list. */
++ heap->next = Heap->heap;
++ heap->prev = gcvNULL;
++ Heap->heap = heap;
++ heap->next->prev = heap;
++ }
++
++ /* Check if there is enough free space left after usage for another free
++ ** node. */
++ if (node->bytes - bytes >= gcmSIZEOF(gcskNODE))
++ {
++ /* Allocated used space from the back of the free list. */
++ used = (gcskNODE_PTR) ((gctUINT8_PTR) node + node->bytes - bytes);
++
++ /* Adjust the number of free bytes. */
++ node->bytes -= bytes;
++ gcmkASSERT(node->bytes >= gcmSIZEOF(gcskNODE));
++ }
++ else
++ {
++ /* Remove this free list from the chain. */
++ if (prevFree == gcvNULL)
++ {
++ heap->freeList = node->next;
++ }
++ else
++ {
++ prevFree->next = node->next;
++ }
++
++ /* Consume the entire free node. */
++ used = (gcskNODE_PTR) node;
++ bytes = node->bytes;
++ }
++
++ /* Mark node as used. */
++ used->bytes = bytes;
++ used->next = gcdIN_USE;
++#if gcmIS_DEBUG(gcdDEBUG_CODE)
++ used->timeStamp = ++Heap->timeStamp;
++#endif
++
++#if VIVANTE_PROFILER || gcmIS_DEBUG(gcdDEBUG_CODE)
++ /* Update profile counters. */
++ Heap->allocCount += 1;
++ Heap->allocBytes += bytes;
++ Heap->allocBytesMax = gcmMAX(Heap->allocBytes, Heap->allocBytesMax);
++ Heap->allocBytesTotal += bytes;
++#endif
++
++ /* Release the mutex. */
++ gcmkVERIFY_OK(
++ gckOS_ReleaseMutex(Heap->os, Heap->mutex));
++
++ /* Return pointer to memory. */
++ *Memory = used + 1;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Memory=0x%x", *Memory);
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ /* Release the mutex. */
++ gcmkVERIFY_OK(
++ gckOS_ReleaseMutex(Heap->os, Heap->mutex));
++ }
++
++ if (memory != gcvNULL)
++ {
++ /* Free the heap memory. */
++ gckOS_FreeMemory(Heap->os, memory);
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHEAP_Free
++**
++** Free allocated memory from the heap.
++**
++** INPUT:
++**
++** gckHEAP Heap
++** Pointer to a gckHEAP object.
++**
++** IN gctPOINTER Memory
++** Pointer to memory to free.
++**
++** OUTPUT:
++**
++** NOTHING.
++*/
++gceSTATUS
++gckHEAP_Free(
++ IN gckHEAP Heap,
++ IN gctPOINTER Memory
++ )
++{
++ gcskNODE_PTR node;
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Heap=0x%x Memory=0x%x", Heap, Memory);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Heap, gcvOBJ_HEAP);
++ gcmkVERIFY_ARGUMENT(Memory != gcvNULL);
++
++ /* Acquire the mutex. */
++ gcmkONERROR(
++ gckOS_AcquireMutex(Heap->os, Heap->mutex, gcvINFINITE));
++
++ /* Pointer to structure. */
++ node = (gcskNODE_PTR) Memory - 1;
++
++ /* Mark the node as freed. */
++ node->next = gcvNULL;
++
++#if VIVANTE_PROFILER || gcmIS_DEBUG(gcdDEBUG_CODE)
++ /* Update profile counters. */
++ Heap->allocBytes -= node->bytes;
++#endif
++
++ /* Release the mutex. */
++ gcmkVERIFY_OK(
++ gckOS_ReleaseMutex(Heap->os, Heap->mutex));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++#if VIVANTE_PROFILER
++gceSTATUS
++gckHEAP_ProfileStart(
++ IN gckHEAP Heap
++ )
++{
++ gcmkHEADER_ARG("Heap=0x%x", Heap);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Heap, gcvOBJ_HEAP);
++
++ /* Zero the counters. */
++ Heap->allocCount = 0;
++ Heap->allocBytes = 0;
++ Heap->allocBytesMax = 0;
++ Heap->allocBytesTotal = 0;
++ Heap->heapCount = 0;
++ Heap->heapCountMax = 0;
++ Heap->heapMemory = 0;
++ Heap->heapMemoryMax = 0;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckHEAP_ProfileEnd(
++ IN gckHEAP Heap,
++ IN gctCONST_STRING Title
++ )
++{
++ gcmkHEADER_ARG("Heap=0x%x Title=0x%x", Heap, Title);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Heap, gcvOBJ_HEAP);
++ gcmkVERIFY_ARGUMENT(Title != gcvNULL);
++
++ gcmkPRINT("");
++ gcmkPRINT("=====[ HEAP - %s ]=====", Title);
++ gcmkPRINT("Number of allocations : %12u", Heap->allocCount);
++ gcmkPRINT("Number of bytes allocated : %12llu", Heap->allocBytes);
++ gcmkPRINT("Maximum allocation size : %12llu", Heap->allocBytesMax);
++ gcmkPRINT("Total number of bytes allocated : %12llu", Heap->allocBytesTotal);
++ gcmkPRINT("Number of heaps : %12u", Heap->heapCount);
++ gcmkPRINT("Heap memory in bytes : %12llu", Heap->heapMemory);
++ gcmkPRINT("Maximum number of heaps : %12u", Heap->heapCountMax);
++ gcmkPRINT("Maximum heap memory in bytes : %12llu", Heap->heapMemoryMax);
++ gcmkPRINT("==============================================");
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++#endif /* VIVANTE_PROFILER */
++
++/*******************************************************************************
++***** Test Code ****************************************************************
++*******************************************************************************/
++
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel_interrupt_vg.c linux-openelec/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel_interrupt_vg.c
+--- linux-3.14.36/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel_interrupt_vg.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel_interrupt_vg.c 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,877 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal_kernel_precomp.h"
++
++#if gcdENABLE_VG
++
++/******************************************************************************\
++*********************** Support Functions and Definitions **********************
++\******************************************************************************/
++
++/* Interruot statistics will be accumulated if not zero. */
++#define gcmENABLE_INTERRUPT_STATISTICS 0
++
++#define _GC_OBJ_ZONE gcvZONE_INTERRUPT
++
++/* Object structure. */
++struct _gckVGINTERRUPT
++{
++ /* Object. */
++ gcsOBJECT object;
++
++ /* gckVGKERNEL pointer. */
++ gckVGKERNEL kernel;
++
++ /* gckOS pointer. */
++ gckOS os;
++
++ /* Interrupt handlers. */
++ gctINTERRUPT_HANDLER handlers[32];
++
++ /* Main interrupt handler thread. */
++ gctTHREAD handler;
++ gctBOOL terminate;
++
++ /* Interrupt FIFO. */
++ gctSEMAPHORE fifoValid;
++ gctUINT32 fifo[256];
++ gctUINT fifoItems;
++ gctUINT8 head;
++ gctUINT8 tail;
++
++ /* Interrupt statistics. */
++#if gcmENABLE_INTERRUPT_STATISTICS
++ gctUINT maxFifoItems;
++ gctUINT fifoOverflow;
++ gctUINT maxSimultaneous;
++ gctUINT multipleCount;
++#endif
++};
++
++
++/*******************************************************************************
++**
++** _ProcessInterrupt
++**
++** The interrupt processor.
++**
++** INPUT:
++**
++** ThreadParameter
++** Pointer to the gckVGINTERRUPT object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++
++#if gcmENABLE_INTERRUPT_STATISTICS
++static void
++_ProcessInterrupt(
++ gckVGINTERRUPT Interrupt,
++ gctUINT_PTR TriggeredCount
++ )
++#else
++static void
++_ProcessInterrupt(
++ gckVGINTERRUPT Interrupt
++ )
++#endif
++{
++ gceSTATUS status;
++ gctUINT32 triggered;
++ gctUINT i;
++
++ /* Advance to the next entry. */
++ Interrupt->tail += 1;
++ Interrupt->fifoItems -= 1;
++
++ /* Get the interrupt value. */
++ triggered = Interrupt->fifo[Interrupt->tail];
++ gcmkASSERT(triggered != 0);
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ "%s: triggered=0x%08X\n",
++ __FUNCTION__,
++ triggered
++ );
++
++ /* Walk through all possible interrupts. */
++ for (i = 0; i < gcmSIZEOF(Interrupt->handlers); i += 1)
++ {
++ /* Test if interrupt happened. */
++ if ((triggered & 1) == 1)
++ {
++#if gcmENABLE_INTERRUPT_STATISTICS
++ if (TriggeredCount != gcvNULL)
++ {
++ (* TriggeredCount) += 1;
++ }
++#endif
++
++ /* Make sure we have valid handler. */
++ if (Interrupt->handlers[i] == gcvNULL)
++ {
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s: Interrupt %d isn't registered.\n",
++ __FUNCTION__, i
++ );
++ }
++ else
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ "%s: interrupt=%d\n",
++ __FUNCTION__,
++ i
++ );
++
++ /* Call the handler. */
++ status = Interrupt->handlers[i] (Interrupt->kernel);
++
++ if (gcmkIS_ERROR(status))
++ {
++ /* Failed to signal the semaphore. */
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s: Error %d incrementing the semaphore #%d.\n",
++ __FUNCTION__, status, i
++ );
++ }
++ }
++ }
++
++ /* Next interrupt. */
++ triggered >>= 1;
++
++ /* No more interrupts to handle? */
++ if (triggered == 0)
++ {
++ break;
++ }
++ }
++}
++
++
++/*******************************************************************************
++**
++** _MainInterruptHandler
++**
++** The main interrupt thread serves the interrupt FIFO and calls registered
++** handlers for the interrupts that occured. The handlers are called in the
++** sequence interrupts occured with the exception when multiple interrupts
++** occured at the same time. In that case the handler calls are "sorted" by
++** the interrupt number therefore giving the interrupts with lower numbers
++** higher priority.
++**
++** INPUT:
++**
++** ThreadParameter
++** Pointer to the gckVGINTERRUPT object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++
++static gctTHREADFUNCRESULT gctTHREADFUNCTYPE
++_MainInterruptHandler(
++ gctTHREADFUNCPARAMETER ThreadParameter
++ )
++{
++ gceSTATUS status;
++ gckVGINTERRUPT interrupt;
++
++#if gcmENABLE_INTERRUPT_STATISTICS
++ gctUINT count;
++#endif
++
++ /* Cast the object. */
++ interrupt = (gckVGINTERRUPT) ThreadParameter;
++
++ /* Enter the loop. */
++ while (gcvTRUE)
++ {
++ /* Wait for an interrupt. */
++ status = gckOS_DecrementSemaphore(interrupt->os, interrupt->fifoValid);
++
++ /* Error? */
++ if (gcmkIS_ERROR(status))
++ {
++ break;
++ }
++
++ /* System termination request? */
++ if (status == gcvSTATUS_TERMINATE)
++ {
++ break;
++ }
++
++ /* Driver is shutting down? */
++ if (interrupt->terminate)
++ {
++ break;
++ }
++
++#if gcmENABLE_INTERRUPT_STATISTICS
++ /* Reset triggered count. */
++ count = 0;
++
++ /* Process the interrupt. */
++ _ProcessInterrupt(interrupt, &count);
++
++ /* Update conters. */
++ if (count > interrupt->maxSimultaneous)
++ {
++ interrupt->maxSimultaneous = count;
++ }
++
++ if (count > 1)
++ {
++ interrupt->multipleCount += 1;
++ }
++#else
++ /* Process the interrupt. */
++ _ProcessInterrupt(interrupt);
++#endif
++ }
++
++ return 0;
++}
++
++
++/*******************************************************************************
++**
++** _StartInterruptHandler / _StopInterruptHandler
++**
++** Main interrupt handler routine control.
++**
++** INPUT:
++**
++** ThreadParameter
++** Pointer to the gckVGINTERRUPT object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++
++static gceSTATUS
++_StartInterruptHandler(
++ gckVGINTERRUPT Interrupt
++ )
++{
++ gceSTATUS status, last;
++
++ do
++ {
++ /* Objects must not be already created. */
++ gcmkASSERT(Interrupt->fifoValid == gcvNULL);
++ gcmkASSERT(Interrupt->handler == gcvNULL);
++
++ /* Reset the termination request. */
++ Interrupt->terminate = gcvFALSE;
++
++#if !gcdENABLE_INFINITE_SPEED_HW
++ /* Construct the fifo semaphore. */
++ gcmkERR_BREAK(gckOS_CreateSemaphoreVG(
++ Interrupt->os, &Interrupt->fifoValid
++ ));
++
++ /* Start the interrupt handler thread. */
++ gcmkERR_BREAK(gckOS_StartThread(
++ Interrupt->os,
++ _MainInterruptHandler,
++ Interrupt,
++ &Interrupt->handler
++ ));
++#endif
++
++ /* Success. */
++ return gcvSTATUS_OK;
++ }
++ while (gcvFALSE);
++
++ /* Roll back. */
++ if (Interrupt->fifoValid != gcvNULL)
++ {
++ gcmkCHECK_STATUS(gckOS_DestroySemaphore(
++ Interrupt->os, Interrupt->fifoValid
++ ));
++
++ Interrupt->fifoValid = gcvNULL;
++ }
++
++ /* Return the status. */
++ return status;
++}
++
++static gceSTATUS
++_StopInterruptHandler(
++ gckVGINTERRUPT Interrupt
++ )
++{
++ gceSTATUS status;
++
++ do
++ {
++ /* Does the thread exist? */
++ if (Interrupt->handler == gcvNULL)
++ {
++ /* The semaphore must be NULL as well. */
++ gcmkASSERT(Interrupt->fifoValid == gcvNULL);
++
++ /* Success. */
++ status = gcvSTATUS_OK;
++ break;
++ }
++
++ /* The semaphore must exist as well. */
++ gcmkASSERT(Interrupt->fifoValid != gcvNULL);
++
++ /* Set the termination request. */
++ Interrupt->terminate = gcvTRUE;
++
++ /* Unlock the thread. */
++ gcmkERR_BREAK(gckOS_IncrementSemaphore(
++ Interrupt->os, Interrupt->fifoValid
++ ));
++
++ /* Wait until the thread quits. */
++ gcmkERR_BREAK(gckOS_StopThread(
++ Interrupt->os,
++ Interrupt->handler
++ ));
++
++ /* Destroy the semaphore. */
++ gcmkERR_BREAK(gckOS_DestroySemaphore(
++ Interrupt->os, Interrupt->fifoValid
++ ));
++
++ /* Reset handles. */
++ Interrupt->handler = gcvNULL;
++ Interrupt->fifoValid = gcvNULL;
++ }
++ while (gcvFALSE);
++
++ /* Return the status. */
++ return status;
++}
++
++
++/******************************************************************************\
++***************************** Interrupt Object API *****************************
++\******************************************************************************/
++
++/*******************************************************************************
++**
++** gckVGINTERRUPT_Construct
++**
++** Construct an interrupt object.
++**
++** INPUT:
++**
++** Kernel
++** Pointer to the gckVGKERNEL object.
++**
++** OUTPUT:
++**
++** Interrupt
++** Pointer to the new gckVGINTERRUPT object.
++*/
++
++gceSTATUS
++gckVGINTERRUPT_Construct(
++ IN gckVGKERNEL Kernel,
++ OUT gckVGINTERRUPT * Interrupt
++ )
++{
++ gceSTATUS status;
++ gckVGINTERRUPT interrupt = gcvNULL;
++
++ gcmkHEADER_ARG("Kernel=0x%x Interrupt=0x%x", Kernel, Interrupt);
++
++ /* Verify argeuments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(Interrupt != gcvNULL);
++
++ do
++ {
++ /* Allocate the gckVGINTERRUPT structure. */
++ gcmkERR_BREAK(gckOS_Allocate(
++ Kernel->os,
++ gcmSIZEOF(struct _gckVGINTERRUPT),
++ (gctPOINTER *) &interrupt
++ ));
++
++ /* Reset the object data. */
++ gcmkVERIFY_OK(gckOS_ZeroMemory(
++ interrupt, gcmSIZEOF(struct _gckVGINTERRUPT)
++ ));
++
++ /* Initialize the object. */
++ interrupt->object.type = gcvOBJ_INTERRUPT;
++
++ /* Initialize the object pointers. */
++ interrupt->kernel = Kernel;
++ interrupt->os = Kernel->os;
++
++ /* Initialize the current FIFO position. */
++ interrupt->head = (gctUINT8)~0;
++ interrupt->tail = (gctUINT8)~0;
++
++ /* Start the thread. */
++ gcmkERR_BREAK(_StartInterruptHandler(interrupt));
++
++ /* Return interrupt object. */
++ *Interrupt = interrupt;
++
++ gcmkFOOTER_ARG("*Interrup=0x%x", *Interrupt);
++ /* Success. */
++ return gcvSTATUS_OK;
++ }
++ while (gcvFALSE);
++
++ /* Roll back. */
++ if (interrupt != gcvNULL)
++ {
++ /* Free the gckVGINTERRUPT structure. */
++ gcmkVERIFY_OK(gckOS_Free(interrupt->os, interrupt));
++ }
++
++ gcmkFOOTER();
++
++ /* Return the status. */
++ return status;
++}
++
++
++/*******************************************************************************
++**
++** gckVGINTERRUPT_Destroy
++**
++** Destroy an interrupt object.
++**
++** INPUT:
++**
++** Interrupt
++** Pointer to the gckVGINTERRUPT object to destroy.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++
++gceSTATUS
++gckVGINTERRUPT_Destroy(
++ IN gckVGINTERRUPT Interrupt
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Interrupt=0x%x", Interrupt);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Interrupt, gcvOBJ_INTERRUPT);
++
++ do
++ {
++ /* Stop the interrupt thread. */
++ gcmkERR_BREAK(_StopInterruptHandler(Interrupt));
++
++ /* Mark the object as unknown. */
++ Interrupt->object.type = gcvOBJ_UNKNOWN;
++
++ /* Free the gckVGINTERRUPT structure. */
++ gcmkERR_BREAK(gckOS_Free(Interrupt->os, Interrupt));
++ }
++ while (gcvFALSE);
++
++ gcmkFOOTER();
++
++ /* Return the status. */
++ return status;
++}
++
++
++/*******************************************************************************
++**
++** gckVGINTERRUPT_DumpState
++**
++** Print the current state of the interrupt manager.
++**
++** INPUT:
++**
++** Interrupt
++** Pointer to a gckVGINTERRUPT object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++
++#if gcvDEBUG
++gceSTATUS
++gckVGINTERRUPT_DumpState(
++ IN gckVGINTERRUPT Interrupt
++ )
++{
++ gcmkHEADER_ARG("Interrupt=0x%x", Interrupt);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Interrupt, gcvOBJ_INTERRUPT);
++
++ /* Print the header. */
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ "%s: INTERRUPT OBJECT STATUS\n",
++ __FUNCTION__
++ );
++
++ /* Print statistics. */
++#if gcmENABLE_INTERRUPT_STATISTICS
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ " Maximum number of FIFO items accumulated at a single time: %d\n",
++ Interrupt->maxFifoItems
++ );
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ " Interrupt FIFO overflow happened times: %d\n",
++ Interrupt->fifoOverflow
++ );
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ " Maximum number of interrupts simultaneously generated: %d\n",
++ Interrupt->maxSimultaneous
++ );
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ " Number of times when there were multiple interrupts generated: %d\n",
++ Interrupt->multipleCount
++ );
++#endif
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ " The current number of entries in the FIFO: %d\n",
++ Interrupt->fifoItems
++ );
++
++ /* Print the FIFO contents. */
++ if (Interrupt->fifoItems != 0)
++ {
++ gctUINT8 index;
++ gctUINT8 last;
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ " FIFO current contents:\n"
++ );
++
++ /* Get the current pointers. */
++ index = Interrupt->tail;
++ last = Interrupt->head;
++
++ while (index != last)
++ {
++ /* Advance to the next entry. */
++ index += 1;
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ " %d: 0x%08X\n",
++ index, Interrupt->fifo[index]
++ );
++ }
++ }
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++#endif
++
++
++/*******************************************************************************
++**
++** gckVGINTERRUPT_Enable
++**
++** Enable the specified interrupt.
++**
++** INPUT:
++**
++** Interrupt
++** Pointer to a gckVGINTERRUPT object.
++**
++** Id
++** Pointer to the variable that holds the interrupt number to be
++** registered in range 0..31.
++** If the value is less then 0, gckVGINTERRUPT_Enable will attempt
++** to find an unused interrupt. If such interrupt is found, the number
++** will be assigned to the variable if the functuion call succeedes.
++**
++** Handler
++** Pointer to the handler to register for the interrupt.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++
++gceSTATUS
++gckVGINTERRUPT_Enable(
++ IN gckVGINTERRUPT Interrupt,
++ IN OUT gctINT32_PTR Id,
++ IN gctINTERRUPT_HANDLER Handler
++ )
++{
++ gceSTATUS status;
++ gctINT32 i;
++
++ gcmkHEADER_ARG("Interrupt=0x%x Id=0x%x Handler=0x%x", Interrupt, Id, Handler);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Interrupt, gcvOBJ_INTERRUPT);
++ gcmkVERIFY_ARGUMENT(Id != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Handler != gcvNULL);
++
++ do
++ {
++ /* See if we need to allocate an ID. */
++ if (*Id < 0)
++ {
++ /* Find the first unused interrupt handler. */
++ for (i = 0; i < gcmCOUNTOF(Interrupt->handlers); ++i)
++ {
++ if (Interrupt->handlers[i] == gcvNULL)
++ {
++ break;
++ }
++ }
++
++ /* No unused innterrupts? */
++ if (i == gcmCOUNTOF(Interrupt->handlers))
++ {
++ status = gcvSTATUS_OUT_OF_RESOURCES;
++ break;
++ }
++
++ /* Update the interrupt ID. */
++ *Id = i;
++ }
++
++ /* Make sure the ID is in range. */
++ else if (*Id >= gcmCOUNTOF(Interrupt->handlers))
++ {
++ status = gcvSTATUS_INVALID_ARGUMENT;
++ break;
++ }
++
++ /* Set interrupt handler. */
++ Interrupt->handlers[*Id] = Handler;
++
++ /* Success. */
++ status = gcvSTATUS_OK;
++ }
++ while (gcvFALSE);
++
++ gcmkFOOTER();
++ /* Return the status. */
++ return status;
++}
++
++
++/*******************************************************************************
++**
++** gckVGINTERRUPT_Disable
++**
++** Disable the specified interrupt.
++**
++** INPUT:
++**
++** Interrupt
++** Pointer to a gckVGINTERRUPT object.
++**
++** Id
++** Interrupt number to be disabled in range 0..31.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++
++gceSTATUS
++gckVGINTERRUPT_Disable(
++ IN gckVGINTERRUPT Interrupt,
++ IN gctINT32 Id
++ )
++{
++ gcmkHEADER_ARG("Interrupt=0x%x Id=0x%x", Interrupt, Id);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Interrupt, gcvOBJ_INTERRUPT);
++ gcmkVERIFY_ARGUMENT((Id >= 0) && (Id < gcmCOUNTOF(Interrupt->handlers)));
++
++ /* Reset interrupt handler. */
++ Interrupt->handlers[Id] = gcvNULL;
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++
++/*******************************************************************************
++**
++** gckVGINTERRUPT_Enque
++**
++** Read the interrupt status register and put the value in the interrupt FIFO.
++**
++** INPUT:
++**
++** Interrupt
++** Pointer to a gckVGINTERRUPT object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++
++#ifndef __QNXNTO__
++gceSTATUS
++gckVGINTERRUPT_Enque(
++ IN gckVGINTERRUPT Interrupt
++ )
++#else
++gceSTATUS
++gckVGINTERRUPT_Enque(
++ IN gckVGINTERRUPT Interrupt,
++ OUT gckOS *Os,
++ OUT gctSEMAPHORE *Semaphore
++ )
++#endif
++{
++ gceSTATUS status;
++ gctUINT32 triggered;
++
++ gcmkHEADER_ARG("Interrupt=0x%x", Interrupt);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Interrupt, gcvOBJ_INTERRUPT);
++
++#ifdef __QNXNTO__
++ *Os = gcvNULL;
++ *Semaphore = gcvNULL;
++#endif
++
++ do
++ {
++ /* Read interrupt status register. */
++ gcmkERR_BREAK(gckVGHARDWARE_ReadInterrupt(
++ Interrupt->kernel->hardware, &triggered
++ ));
++
++ /* Mask out TS overflow interrupt */
++ triggered &= 0xfffffffe;
++
++ /* No interrupts to process? */
++ if (triggered == 0)
++ {
++ status = gcvSTATUS_NOT_OUR_INTERRUPT;
++ break;
++ }
++
++ /* FIFO overflow? */
++ if (Interrupt->fifoItems == gcmCOUNTOF(Interrupt->fifo))
++ {
++#if gcmENABLE_INTERRUPT_STATISTICS
++ Interrupt->fifoOverflow += 1;
++#endif
++
++ /* OR the interrupt with the last value in the FIFO. */
++ Interrupt->fifo[Interrupt->head] |= triggered;
++
++ /* Success (kind of). */
++ status = gcvSTATUS_OK;
++ }
++ else
++ {
++ /* Advance to the next entry. */
++ Interrupt->head += 1;
++ Interrupt->fifoItems += 1;
++
++#if gcmENABLE_INTERRUPT_STATISTICS
++ if (Interrupt->fifoItems > Interrupt->maxFifoItems)
++ {
++ Interrupt->maxFifoItems = Interrupt->fifoItems;
++ }
++#endif
++
++ /* Set the new value. */
++ Interrupt->fifo[Interrupt->head] = triggered;
++
++#ifndef __QNXNTO__
++ /* Increment the FIFO semaphore. */
++ gcmkERR_BREAK(gckOS_IncrementSemaphore(
++ Interrupt->os, Interrupt->fifoValid
++ ));
++#else
++ *Os = Interrupt->os;
++ *Semaphore = Interrupt->fifoValid;
++#endif
++
++ /* Windows kills our threads prematurely when the application
++ exists. Verify here that the thread is still alive. */
++ status = gckOS_VerifyThread(Interrupt->os, Interrupt->handler);
++
++ /* Has the thread been prematurely terminated? */
++ if (status != gcvSTATUS_OK)
++ {
++ /* Process all accumulated interrupts. */
++ while (Interrupt->head != Interrupt->tail)
++ {
++#if gcmENABLE_INTERRUPT_STATISTICS
++ /* Process the interrupt. */
++ _ProcessInterrupt(Interrupt, gcvNULL);
++#else
++ /* Process the interrupt. */
++ _ProcessInterrupt(Interrupt);
++#endif
++ }
++
++ /* Set success. */
++ status = gcvSTATUS_OK;
++ }
++ }
++ }
++ while (gcvFALSE);
++
++ gcmkFOOTER();
++ /* Return status. */
++ return status;
++}
++
++#endif /* gcdENABLE_VG */
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel_mmu.c linux-openelec/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel_mmu.c
+--- linux-3.14.36/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel_mmu.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel_mmu.c 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,1982 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal_kernel_precomp.h"
++
++#define _GC_OBJ_ZONE gcvZONE_MMU
++
++typedef enum _gceMMU_TYPE
++{
++ gcvMMU_USED = (0 << 4),
++ gcvMMU_SINGLE = (1 << 4),
++ gcvMMU_FREE = (2 << 4),
++}
++gceMMU_TYPE;
++
++#define gcmENTRY_TYPE(x) (x & 0xF0)
++
++#define gcdMMU_TABLE_DUMP 0
++
++#define gcdUSE_MMU_EXCEPTION 0
++
++/*
++ gcdMMU_CLEAR_VALUE
++
++ The clear value for the entry of the old MMU.
++*/
++#ifndef gcdMMU_CLEAR_VALUE
++# define gcdMMU_CLEAR_VALUE 0x00000ABC
++#endif
++
++/* VIV: Start GPU address for gcvSURF_VERTEX. */
++#define gcdVERTEX_START (128 << 10)
++
++typedef struct _gcsMMU_STLB *gcsMMU_STLB_PTR;
++
++typedef struct _gcsMMU_STLB
++{
++ gctPHYS_ADDR physical;
++ gctUINT32_PTR logical;
++ gctSIZE_T size;
++ gctUINT32 physBase;
++ gctSIZE_T pageCount;
++ gctUINT32 mtlbIndex;
++ gctUINT32 mtlbEntryNum;
++ gcsMMU_STLB_PTR next;
++} gcsMMU_STLB;
++
++#if gcdSHARED_PAGETABLE
++typedef struct _gcsSharedPageTable * gcsSharedPageTable_PTR;
++typedef struct _gcsSharedPageTable
++{
++ /* Shared gckMMU object. */
++ gckMMU mmu;
++
++ /* Hardwares which use this shared pagetable. */
++ gckHARDWARE hardwares[gcdMAX_GPU_COUNT];
++
++ /* Number of cores use this shared pagetable. */
++ gctUINT32 reference;
++}
++gcsSharedPageTable;
++
++static gcsSharedPageTable_PTR sharedPageTable = gcvNULL;
++#endif
++
++#if gcdMIRROR_PAGETABLE
++typedef struct _gcsMirrorPageTable * gcsMirrorPageTable_PTR;
++typedef struct _gcsMirrorPageTable
++{
++ /* gckMMU objects. */
++ gckMMU mmus[gcdMAX_GPU_COUNT];
++
++ /* Hardwares which use this shared pagetable. */
++ gckHARDWARE hardwares[gcdMAX_GPU_COUNT];
++
++ /* Number of cores use this shared pagetable. */
++ gctUINT32 reference;
++}
++gcsMirrorPageTable;
++
++static gcsMirrorPageTable_PTR mirrorPageTable = gcvNULL;
++static gctPOINTER mirrorPageTableMutex = gcvNULL;
++#endif
++
++typedef struct _gcsDynamicSpaceNode * gcsDynamicSpaceNode_PTR;
++typedef struct _gcsDynamicSpaceNode
++{
++ gctUINT32 start;
++ gctINT32 entries;
++}
++gcsDynamicSpaceNode;
++
++static void
++_WritePageEntry(
++ IN gctUINT32_PTR PageEntry,
++ IN gctUINT32 EntryValue
++ )
++{
++ static gctUINT16 data = 0xff00;
++
++ if (*(gctUINT8 *)&data == 0xff)
++ {
++ *PageEntry = gcmSWAB32(EntryValue);
++ }
++ else
++ {
++ *PageEntry = EntryValue;
++ }
++}
++
++static gctUINT32
++_ReadPageEntry(
++ IN gctUINT32_PTR PageEntry
++ )
++{
++ static gctUINT16 data = 0xff00;
++ gctUINT32 entryValue;
++
++ if (*(gctUINT8 *)&data == 0xff)
++ {
++ entryValue = *PageEntry;
++ return gcmSWAB32(entryValue);
++ }
++ else
++ {
++ return *PageEntry;
++ }
++}
++
++static gceSTATUS
++_FillPageTable(
++ IN gctUINT32_PTR PageTable,
++ IN gctUINT32 PageCount,
++ IN gctUINT32 EntryValue
++)
++{
++ gctUINT i;
++
++ for (i = 0; i < PageCount; i++)
++ {
++ _WritePageEntry(PageTable + i, EntryValue);
++ }
++
++ return gcvSTATUS_OK;
++}
++
++static gceSTATUS
++_Link(
++ IN gckMMU Mmu,
++ IN gctUINT32 Index,
++ IN gctUINT32 Next
++ )
++{
++ if (Index >= Mmu->pageTableEntries)
++ {
++ /* Just move heap pointer. */
++ Mmu->heapList = Next;
++ }
++ else
++ {
++ /* Address page table. */
++ gctUINT32_PTR pageTable = Mmu->pageTableLogical;
++
++ /* Dispatch on node type. */
++ switch (gcmENTRY_TYPE(_ReadPageEntry(&pageTable[Index])))
++ {
++ case gcvMMU_SINGLE:
++ /* Set single index. */
++ _WritePageEntry(&pageTable[Index], (Next << 8) | gcvMMU_SINGLE);
++ break;
++
++ case gcvMMU_FREE:
++ /* Set index. */
++ _WritePageEntry(&pageTable[Index + 1], Next);
++ break;
++
++ default:
++ gcmkFATAL("MMU table correcupted at index %u!", Index);
++ return gcvSTATUS_HEAP_CORRUPTED;
++ }
++ }
++
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++static gceSTATUS
++_AddFree(
++ IN gckMMU Mmu,
++ IN gctUINT32 Index,
++ IN gctUINT32 Node,
++ IN gctUINT32 Count
++ )
++{
++ gctUINT32_PTR pageTable = Mmu->pageTableLogical;
++
++ if (Count == 1)
++ {
++ /* Initialize a single page node. */
++ _WritePageEntry(pageTable + Node, (~((1U<<8)-1)) | gcvMMU_SINGLE);
++ }
++ else
++ {
++ /* Initialize the node. */
++ _WritePageEntry(pageTable + Node + 0, (Count << 8) | gcvMMU_FREE);
++ _WritePageEntry(pageTable + Node + 1, ~0U);
++ }
++
++ /* Append the node. */
++ return _Link(Mmu, Index, Node);
++}
++
++static gceSTATUS
++_Collect(
++ IN gckMMU Mmu
++ )
++{
++ gctUINT32_PTR pageTable = Mmu->pageTableLogical;
++ gceSTATUS status;
++ gctUINT32 i, previous, start = 0, count = 0;
++
++ previous = Mmu->heapList = ~0U;
++ Mmu->freeNodes = gcvFALSE;
++
++ /* Walk the entire page table. */
++ for (i = 0; i < Mmu->pageTableEntries; ++i)
++ {
++ /* Dispatch based on type of page. */
++ switch (gcmENTRY_TYPE(_ReadPageEntry(&pageTable[i])))
++ {
++ case gcvMMU_USED:
++ /* Used page, so close any open node. */
++ if (count > 0)
++ {
++ /* Add the node. */
++ gcmkONERROR(_AddFree(Mmu, previous, start, count));
++
++ /* Reset the node. */
++ previous = start;
++ count = 0;
++ }
++ break;
++
++ case gcvMMU_SINGLE:
++ /* Single free node. */
++ if (count++ == 0)
++ {
++ /* Start a new node. */
++ start = i;
++ }
++ break;
++
++ case gcvMMU_FREE:
++ /* A free node. */
++ if (count == 0)
++ {
++ /* Start a new node. */
++ start = i;
++ }
++
++ /* Advance the count. */
++ count += _ReadPageEntry(&pageTable[i]) >> 8;
++
++ /* Advance the index into the page table. */
++ i += (_ReadPageEntry(&pageTable[i]) >> 8) - 1;
++ break;
++
++ default:
++ gcmkFATAL("MMU page table correcupted at index %u!", i);
++ return gcvSTATUS_HEAP_CORRUPTED;
++ }
++ }
++
++ /* See if we have an open node left. */
++ if (count > 0)
++ {
++ /* Add the node to the list. */
++ gcmkONERROR(_AddFree(Mmu, previous, start, count));
++ }
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_MMU,
++ "Performed a garbage collection of the MMU heap.");
++
++ /* Success. */
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the staus. */
++ return status;
++}
++
++static gctUINT32
++_SetPage(gctUINT32 PageAddress)
++{
++ return PageAddress
++ /* writable */
++ | (1 << 2)
++ /* Ignore exception */
++ | (0 << 1)
++ /* Present */
++ | (1 << 0);
++}
++
++static gceSTATUS
++_FillFlatMapping(
++ IN gckMMU Mmu,
++ IN gctUINT32 PhysBase,
++ OUT gctSIZE_T Size
++ )
++{
++ gceSTATUS status;
++ gctBOOL mutex = gcvFALSE;
++ gcsMMU_STLB_PTR head = gcvNULL, pre = gcvNULL;
++ gctUINT32 start = PhysBase & (~gcdMMU_PAGE_64K_MASK);
++ gctUINT32 end = (PhysBase + Size - 1) & (~gcdMMU_PAGE_64K_MASK);
++ gctUINT32 mStart = start >> gcdMMU_MTLB_SHIFT;
++ gctUINT32 mEnd = end >> gcdMMU_MTLB_SHIFT;
++ gctUINT32 sStart = (start & gcdMMU_STLB_64K_MASK) >> gcdMMU_STLB_64K_SHIFT;
++ gctUINT32 sEnd = (end & gcdMMU_STLB_64K_MASK) >> gcdMMU_STLB_64K_SHIFT;
++
++ /* Grab the mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(Mmu->os, Mmu->pageTableMutex, gcvINFINITE));
++ mutex = gcvTRUE;
++
++ while (mStart <= mEnd)
++ {
++ gcmkASSERT(mStart < gcdMMU_MTLB_ENTRY_NUM);
++ if (*(Mmu->mtlbLogical + mStart) == 0)
++ {
++ gcsMMU_STLB_PTR stlb;
++ gctPOINTER pointer = gcvNULL;
++ gctUINT32 last = (mStart == mEnd) ? sEnd : (gcdMMU_STLB_64K_ENTRY_NUM - 1);
++
++ gcmkONERROR(gckOS_Allocate(Mmu->os, sizeof(struct _gcsMMU_STLB), &pointer));
++ stlb = pointer;
++
++ stlb->mtlbEntryNum = 0;
++ stlb->next = gcvNULL;
++ stlb->physical = gcvNULL;
++ stlb->logical = gcvNULL;
++ stlb->size = gcdMMU_STLB_64K_SIZE;
++ stlb->pageCount = 0;
++
++ if (pre == gcvNULL)
++ {
++ pre = head = stlb;
++ }
++ else
++ {
++ gcmkASSERT(pre->next == gcvNULL);
++ pre->next = stlb;
++ pre = stlb;
++ }
++
++ gcmkONERROR(
++ gckOS_AllocateContiguous(Mmu->os,
++ gcvFALSE,
++ &stlb->size,
++ &stlb->physical,
++ (gctPOINTER)&stlb->logical));
++
++ gcmkONERROR(gckOS_ZeroMemory(stlb->logical, stlb->size));
++
++ gcmkONERROR(gckOS_GetPhysicalAddress(
++ Mmu->os,
++ stlb->logical,
++ &stlb->physBase));
++
++ if (stlb->physBase & (gcdMMU_STLB_64K_SIZE - 1))
++ {
++ gcmkONERROR(gcvSTATUS_NOT_ALIGNED);
++ }
++
++ _WritePageEntry(Mmu->mtlbLogical + mStart,
++ stlb->physBase
++ /* 64KB page size */
++ | (1 << 2)
++ /* Ignore exception */
++ | (0 << 1)
++ /* Present */
++ | (1 << 0)
++ );
++#if gcdMMU_TABLE_DUMP
++ gckOS_Print("%s(%d): insert MTLB[%d]: %08x\n",
++ __FUNCTION__, __LINE__,
++ mStart,
++ _ReadPageEntry(Mmu->mtlbLogical + mStart));
++#endif
++
++ stlb->mtlbIndex = mStart;
++ stlb->mtlbEntryNum = 1;
++#if gcdMMU_TABLE_DUMP
++ gckOS_Print("%s(%d): STLB: logical:%08x -> physical:%08x\n",
++ __FUNCTION__, __LINE__,
++ stlb->logical,
++ stlb->physBase);
++#endif
++
++ while (sStart <= last)
++ {
++ gcmkASSERT(!(start & gcdMMU_PAGE_64K_MASK));
++ _WritePageEntry(stlb->logical + sStart, _SetPage(start));
++#if gcdMMU_TABLE_DUMP
++ gckOS_Print("%s(%d): insert STLB[%d]: %08x\n",
++ __FUNCTION__, __LINE__,
++ sStart,
++ _ReadPageEntry(stlb->logical + sStart));
++#endif
++ /* next page. */
++ start += gcdMMU_PAGE_64K_SIZE;
++ sStart++;
++ stlb->pageCount++;
++ }
++
++ sStart = 0;
++ ++mStart;
++ }
++ else
++ {
++ gcmkONERROR(gcvSTATUS_INVALID_REQUEST);
++ }
++ }
++
++ /* Insert the stlb into staticSTLB. */
++ if (Mmu->staticSTLB == gcvNULL)
++ {
++ Mmu->staticSTLB = head;
++ }
++ else
++ {
++ gcmkASSERT(pre == gcvNULL);
++ gcmkASSERT(pre->next == gcvNULL);
++ pre->next = Mmu->staticSTLB;
++ Mmu->staticSTLB = head;
++ }
++
++ /* Release the mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Mmu->os, Mmu->pageTableMutex));
++
++ return gcvSTATUS_OK;
++
++OnError:
++
++ /* Roll back. */
++ while (head != gcvNULL)
++ {
++ pre = head;
++ head = head->next;
++
++ if (pre->physical != gcvNULL)
++ {
++ gcmkVERIFY_OK(
++ gckOS_FreeContiguous(Mmu->os,
++ pre->physical,
++ pre->logical,
++ pre->size));
++ }
++
++ if (pre->mtlbEntryNum != 0)
++ {
++ gcmkASSERT(pre->mtlbEntryNum == 1);
++ _WritePageEntry(Mmu->mtlbLogical + pre->mtlbIndex, 0);
++ }
++
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Mmu->os, pre));
++ }
++
++ if (mutex)
++ {
++ /* Release the mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Mmu->os, Mmu->pageTableMutex));
++ }
++
++ return status;
++}
++
++static gceSTATUS
++_FindDynamicSpace(
++ IN gckMMU Mmu,
++ OUT gcsDynamicSpaceNode_PTR *Array,
++ OUT gctINT * Size
++ )
++{
++ gceSTATUS status = gcvSTATUS_OK;
++ gctPOINTER pointer = gcvNULL;
++ gcsDynamicSpaceNode_PTR array = gcvNULL;
++ gctINT size = 0;
++ gctINT i = 0, nodeStart = -1, nodeEntries = 0;
++
++ /* Allocate memory for the array. */
++ gcmkONERROR(gckOS_Allocate(Mmu->os,
++ gcmSIZEOF(*array) * (gcdMMU_MTLB_ENTRY_NUM / 2),
++ &pointer));
++
++ array = (gcsDynamicSpaceNode_PTR)pointer;
++
++ /* Loop all the entries. */
++ while (i < gcdMMU_MTLB_ENTRY_NUM)
++ {
++ if (!Mmu->mtlbLogical[i])
++ {
++ if (nodeStart < 0)
++ {
++ /* This is the first entry of the dynamic space. */
++ nodeStart = i;
++ nodeEntries = 1;
++ }
++ else
++ {
++ /* Other entries of the dynamic space. */
++ nodeEntries++;
++ }
++ }
++ else if (nodeStart >= 0)
++ {
++ /* Save the previous node. */
++ array[size].start = nodeStart;
++ array[size].entries = nodeEntries;
++ size++;
++
++ /* Reset the start. */
++ nodeStart = -1;
++ nodeEntries = 0;
++ }
++
++ i++;
++ }
++
++ /* Save the previous node. */
++ if (nodeStart >= 0)
++ {
++ array[size].start = nodeStart;
++ array[size].entries = nodeEntries;
++ size++;
++ }
++
++#if gcdMMU_TABLE_DUMP
++ for (i = 0; i < size; i++)
++ {
++ gckOS_Print("%s(%d): [%d]: start=%d, entries=%d.\n",
++ __FUNCTION__, __LINE__,
++ i,
++ array[i].start,
++ array[i].entries);
++ }
++#endif
++
++ *Array = array;
++ *Size = size;
++
++ return gcvSTATUS_OK;
++
++OnError:
++ if (pointer != gcvNULL)
++ {
++ gckOS_Free(Mmu->os, pointer);
++ }
++
++ return status;
++}
++
++static gceSTATUS
++_SetupDynamicSpace(
++ IN gckMMU Mmu
++ )
++{
++ gceSTATUS status;
++ gcsDynamicSpaceNode_PTR nodeArray = gcvNULL;
++ gctINT i, nodeArraySize = 0;
++ gctUINT32 physical;
++ gctINT numEntries = 0;
++ gctUINT32_PTR pageTable;
++ gctBOOL acquired = gcvFALSE;
++
++ /* Find all the dynamic address space. */
++ gcmkONERROR(_FindDynamicSpace(Mmu, &nodeArray, &nodeArraySize));
++
++ /* TODO: We only use the largest one for now. */
++ for (i = 0; i < nodeArraySize; i++)
++ {
++ if (nodeArray[i].entries > numEntries)
++ {
++ Mmu->dynamicMappingStart = nodeArray[i].start;
++ numEntries = nodeArray[i].entries;
++ }
++ }
++
++ gckOS_Free(Mmu->os, (gctPOINTER)nodeArray);
++
++ Mmu->pageTableSize = numEntries * 4096;
++
++ Mmu->pageTableEntries = Mmu->pageTableSize / gcmSIZEOF(gctUINT32);
++
++ /* Construct Slave TLB. */
++ gcmkONERROR(gckOS_AllocateContiguous(Mmu->os,
++ gcvFALSE,
++ &Mmu->pageTableSize,
++ &Mmu->pageTablePhysical,
++ (gctPOINTER)&Mmu->pageTableLogical));
++
++#if gcdUSE_MMU_EXCEPTION
++ gcmkONERROR(_FillPageTable(Mmu->pageTableLogical,
++ Mmu->pageTableEntries,
++ /* Enable exception */
++ 1 << 1));
++#else
++ /* Invalidate all entries. */
++ gcmkONERROR(gckOS_ZeroMemory(Mmu->pageTableLogical,
++ Mmu->pageTableSize));
++#endif
++
++ /* Initilization. */
++ pageTable = Mmu->pageTableLogical;
++ _WritePageEntry(pageTable, (Mmu->pageTableEntries << 8) | gcvMMU_FREE);
++ _WritePageEntry(pageTable + 1, ~0U);
++ Mmu->heapList = 0;
++ Mmu->freeNodes = gcvFALSE;
++
++ gcmkONERROR(gckOS_GetPhysicalAddress(Mmu->os,
++ Mmu->pageTableLogical,
++ &physical));
++
++ /* Grab the mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(Mmu->os, Mmu->pageTableMutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++ /* Map to Master TLB. */
++ for (i = (gctINT)Mmu->dynamicMappingStart;
++ i < (gctINT)Mmu->dynamicMappingStart + numEntries;
++ i++)
++ {
++ _WritePageEntry(Mmu->mtlbLogical + i,
++ physical
++ /* 4KB page size */
++ | (0 << 2)
++ /* Ignore exception */
++ | (0 << 1)
++ /* Present */
++ | (1 << 0)
++ );
++#if gcdMMU_TABLE_DUMP
++ gckOS_Print("%s(%d): insert MTLB[%d]: %08x\n",
++ __FUNCTION__, __LINE__,
++ i,
++ _ReadPageEntry(Mmu->mtlbLogical + i));
++#endif
++ physical += gcdMMU_STLB_4K_SIZE;
++ }
++
++ /* Release the mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Mmu->os, Mmu->pageTableMutex));
++
++ return gcvSTATUS_OK;
++
++OnError:
++ if (Mmu->pageTableLogical)
++ {
++ /* Free the page table. */
++ gcmkVERIFY_OK(
++ gckOS_FreeContiguous(Mmu->os,
++ Mmu->pageTablePhysical,
++ (gctPOINTER) Mmu->pageTableLogical,
++ Mmu->pageTableSize));
++ }
++
++ if (acquired)
++ {
++ /* Release the mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Mmu->os, Mmu->pageTableMutex));
++ }
++
++ return status;
++}
++
++/*******************************************************************************
++**
++** _Construct
++**
++** Construct a new gckMMU object.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gctSIZE_T MmuSize
++** Number of bytes for the page table.
++**
++** OUTPUT:
++**
++** gckMMU * Mmu
++** Pointer to a variable that receives the gckMMU object pointer.
++*/
++gceSTATUS
++_Construct(
++ IN gckKERNEL Kernel,
++ IN gctSIZE_T MmuSize,
++ OUT gckMMU * Mmu
++ )
++{
++ gckOS os;
++ gckHARDWARE hardware;
++ gceSTATUS status;
++ gckMMU mmu = gcvNULL;
++ gctUINT32_PTR pageTable;
++ gctPOINTER pointer = gcvNULL;
++
++ gcmkHEADER_ARG("Kernel=0x%x MmuSize=%lu", Kernel, MmuSize);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(MmuSize > 0);
++ gcmkVERIFY_ARGUMENT(Mmu != gcvNULL);
++
++ /* Extract the gckOS object pointer. */
++ os = Kernel->os;
++ gcmkVERIFY_OBJECT(os, gcvOBJ_OS);
++
++ /* Extract the gckHARDWARE object pointer. */
++ hardware = Kernel->hardware;
++ gcmkVERIFY_OBJECT(hardware, gcvOBJ_HARDWARE);
++
++ /* Allocate memory for the gckMMU object. */
++ gcmkONERROR(gckOS_Allocate(os, sizeof(struct _gckMMU), &pointer));
++
++ mmu = pointer;
++
++ /* Initialize the gckMMU object. */
++ mmu->object.type = gcvOBJ_MMU;
++ mmu->os = os;
++ mmu->hardware = hardware;
++ mmu->pageTableMutex = gcvNULL;
++ mmu->pageTableLogical = gcvNULL;
++ mmu->mtlbLogical = gcvNULL;
++ mmu->staticSTLB = gcvNULL;
++ mmu->enabled = gcvFALSE;
++#ifdef __QNXNTO__
++ mmu->nodeList = gcvNULL;
++ mmu->nodeMutex = gcvNULL;
++#endif
++
++ /* Create the page table mutex. */
++ gcmkONERROR(gckOS_CreateMutex(os, &mmu->pageTableMutex));
++
++#ifdef __QNXNTO__
++ /* Create the node list mutex. */
++ gcmkONERROR(gckOS_CreateMutex(os, &mmu->nodeMutex));
++#endif
++
++ if (hardware->mmuVersion == 0)
++ {
++ mmu->pageTableSize = MmuSize;
++
++ gcmkONERROR(
++ gckOS_AllocateContiguous(os,
++ gcvFALSE,
++ &mmu->pageTableSize,
++ &mmu->pageTablePhysical,
++ &pointer));
++
++ mmu->pageTableLogical = pointer;
++
++ /* Compute number of entries in page table. */
++ mmu->pageTableEntries = mmu->pageTableSize / sizeof(gctUINT32);
++
++ /* Mark all pages as free. */
++ pageTable = mmu->pageTableLogical;
++
++#if gcdMMU_CLEAR_VALUE
++ _FillPageTable(pageTable, mmu->pageTableEntries, gcdMMU_CLEAR_VALUE);
++#endif
++
++ _WritePageEntry(pageTable, (mmu->pageTableEntries << 8) | gcvMMU_FREE);
++ _WritePageEntry(pageTable + 1, ~0U);
++ mmu->heapList = 0;
++ mmu->freeNodes = gcvFALSE;
++
++ /* Set page table address. */
++ gcmkONERROR(
++ gckHARDWARE_SetMMU(hardware, (gctPOINTER) mmu->pageTableLogical));
++ }
++ else
++ {
++ /* Allocate the 4K mode MTLB table. */
++ mmu->mtlbSize = gcdMMU_MTLB_SIZE + 64;
++
++ gcmkONERROR(
++ gckOS_AllocateContiguous(os,
++ gcvFALSE,
++ &mmu->mtlbSize,
++ &mmu->mtlbPhysical,
++ &pointer));
++
++ mmu->mtlbLogical = pointer;
++
++ /* Invalid all the entries. */
++ gcmkONERROR(
++ gckOS_ZeroMemory(pointer, mmu->mtlbSize));
++ }
++
++ /* Return the gckMMU object pointer. */
++ *Mmu = mmu;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Mmu=0x%x", *Mmu);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Roll back. */
++ if (mmu != gcvNULL)
++ {
++ if (mmu->pageTableLogical != gcvNULL)
++ {
++ /* Free the page table. */
++ gcmkVERIFY_OK(
++ gckOS_FreeContiguous(os,
++ mmu->pageTablePhysical,
++ (gctPOINTER) mmu->pageTableLogical,
++ mmu->pageTableSize));
++
++ }
++
++ if (mmu->mtlbLogical != gcvNULL)
++ {
++ gcmkVERIFY_OK(
++ gckOS_FreeContiguous(os,
++ mmu->mtlbPhysical,
++ (gctPOINTER) mmu->mtlbLogical,
++ mmu->mtlbSize));
++ }
++
++ if (mmu->pageTableMutex != gcvNULL)
++ {
++ /* Delete the mutex. */
++ gcmkVERIFY_OK(
++ gckOS_DeleteMutex(os, mmu->pageTableMutex));
++ }
++
++#ifdef __QNXNTO__
++ if (mmu->nodeMutex != gcvNULL)
++ {
++ /* Delete the mutex. */
++ gcmkVERIFY_OK(
++ gckOS_DeleteMutex(os, mmu->nodeMutex));
++ }
++#endif
++
++ /* Mark the gckMMU object as unknown. */
++ mmu->object.type = gcvOBJ_UNKNOWN;
++
++ /* Free the allocates memory. */
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(os, mmu));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** _Destroy
++**
++** Destroy a gckMMU object.
++**
++** INPUT:
++**
++** gckMMU Mmu
++** Pointer to an gckMMU object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++_Destroy(
++ IN gckMMU Mmu
++ )
++{
++#ifdef __QNXNTO__
++ gcuVIDMEM_NODE_PTR node, next;
++#endif
++
++ gcmkHEADER_ARG("Mmu=0x%x", Mmu);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Mmu, gcvOBJ_MMU);
++
++#ifdef __QNXNTO__
++ /* Free all associated virtual memory. */
++ for (node = Mmu->nodeList; node != gcvNULL; node = next)
++ {
++ next = node->Virtual.next;
++ gcmkVERIFY_OK(gckVIDMEM_Free(node));
++ }
++#endif
++
++ while (Mmu->staticSTLB != gcvNULL)
++ {
++ gcsMMU_STLB_PTR pre = Mmu->staticSTLB;
++ Mmu->staticSTLB = pre->next;
++
++ if (pre->physical != gcvNULL)
++ {
++ gcmkVERIFY_OK(
++ gckOS_FreeContiguous(Mmu->os,
++ pre->physical,
++ pre->logical,
++ pre->size));
++ }
++
++ if (pre->mtlbEntryNum != 0)
++ {
++ gcmkASSERT(pre->mtlbEntryNum == 1);
++ _WritePageEntry(Mmu->mtlbLogical + pre->mtlbIndex, 0);
++#if gcdMMU_TABLE_DUMP
++ gckOS_Print("%s(%d): clean MTLB[%d]\n",
++ __FUNCTION__, __LINE__,
++ pre->mtlbIndex);
++#endif
++ }
++
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Mmu->os, pre));
++ }
++
++ if (Mmu->hardware->mmuVersion != 0)
++ {
++ gcmkVERIFY_OK(
++ gckOS_FreeContiguous(Mmu->os,
++ Mmu->mtlbPhysical,
++ (gctPOINTER) Mmu->mtlbLogical,
++ Mmu->mtlbSize));
++ }
++
++ /* Free the page table. */
++ gcmkVERIFY_OK(
++ gckOS_FreeContiguous(Mmu->os,
++ Mmu->pageTablePhysical,
++ (gctPOINTER) Mmu->pageTableLogical,
++ Mmu->pageTableSize));
++
++#ifdef __QNXNTO__
++ /* Delete the node list mutex. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Mmu->os, Mmu->nodeMutex));
++#endif
++
++ /* Delete the page table mutex. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Mmu->os, Mmu->pageTableMutex));
++
++ /* Mark the gckMMU object as unknown. */
++ Mmu->object.type = gcvOBJ_UNKNOWN;
++
++ /* Free the gckMMU object. */
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Mmu->os, Mmu));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++** _AdjstIndex
++**
++** Adjust the index from which we search for a usable node to make sure
++** index allocated is greater than Start.
++*/
++gceSTATUS
++_AdjustIndex(
++ IN gckMMU Mmu,
++ IN gctUINT32 Index,
++ IN gctUINT32 PageCount,
++ IN gctUINT32 Start,
++ OUT gctUINT32 * IndexAdjusted
++ )
++{
++ gceSTATUS status;
++ gctUINT32 index = Index;
++ gctUINT32_PTR map = Mmu->pageTableLogical;
++
++ gcmkHEADER();
++
++ for (; index < Mmu->pageTableEntries;)
++ {
++ gctUINT32 result = 0;
++ gctUINT32 nodeSize = 0;
++
++ if (index >= Start)
++ {
++ break;
++ }
++
++ switch (gcmENTRY_TYPE(map[index]))
++ {
++ case gcvMMU_SINGLE:
++ nodeSize = 1;
++ break;
++
++ case gcvMMU_FREE:
++ nodeSize = map[index] >> 8;
++ break;
++
++ default:
++ gcmkFATAL("MMU table correcupted at index %u!", index);
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++
++ if (nodeSize > PageCount)
++ {
++ result = index + (nodeSize - PageCount);
++
++ if (result >= Start)
++ {
++ break;
++ }
++ }
++
++ switch (gcmENTRY_TYPE(map[index]))
++ {
++ case gcvMMU_SINGLE:
++ index = map[index] >> 8;
++ break;
++
++ case gcvMMU_FREE:
++ index = map[index + 1];
++ break;
++
++ default:
++ gcmkFATAL("MMU table correcupted at index %u!", index);
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++ }
++
++ *IndexAdjusted = index;
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckMMU_Construct(
++ IN gckKERNEL Kernel,
++ IN gctSIZE_T MmuSize,
++ OUT gckMMU * Mmu
++ )
++{
++#if gcdSHARED_PAGETABLE
++ gceSTATUS status;
++ gctPOINTER pointer;
++
++ gcmkHEADER_ARG("Kernel=0x%08x", Kernel);
++
++ if (sharedPageTable == gcvNULL)
++ {
++ gcmkONERROR(
++ gckOS_Allocate(Kernel->os,
++ sizeof(struct _gcsSharedPageTable),
++ &pointer));
++ sharedPageTable = pointer;
++
++ gcmkONERROR(
++ gckOS_ZeroMemory(sharedPageTable,
++ sizeof(struct _gcsSharedPageTable)));
++
++ gcmkONERROR(_Construct(Kernel, MmuSize, &sharedPageTable->mmu));
++ }
++ else if (Kernel->hardware->mmuVersion == 0)
++ {
++ /* Set page table address. */
++ gcmkONERROR(
++ gckHARDWARE_SetMMU(Kernel->hardware, (gctPOINTER) sharedPageTable->mmu->pageTableLogical));
++ }
++
++ *Mmu = sharedPageTable->mmu;
++
++ sharedPageTable->hardwares[sharedPageTable->reference] = Kernel->hardware;
++
++ sharedPageTable->reference++;
++
++ gcmkFOOTER_ARG("sharedPageTable->reference=%lu", sharedPageTable->reference);
++ return gcvSTATUS_OK;
++
++OnError:
++ if (sharedPageTable)
++ {
++ if (sharedPageTable->mmu)
++ {
++ gcmkVERIFY_OK(gckMMU_Destroy(sharedPageTable->mmu));
++ }
++
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Kernel->os, sharedPageTable));
++ }
++
++ gcmkFOOTER();
++ return status;
++#elif gcdMIRROR_PAGETABLE
++ gceSTATUS status;
++ gctPOINTER pointer;
++
++ gcmkHEADER_ARG("Kernel=0x%08x", Kernel);
++
++ if (mirrorPageTable == gcvNULL)
++ {
++ gcmkONERROR(
++ gckOS_Allocate(Kernel->os,
++ sizeof(struct _gcsMirrorPageTable),
++ &pointer));
++ mirrorPageTable = pointer;
++
++ gcmkONERROR(
++ gckOS_ZeroMemory(mirrorPageTable,
++ sizeof(struct _gcsMirrorPageTable)));
++
++ gcmkONERROR(
++ gckOS_CreateMutex(Kernel->os, &mirrorPageTableMutex));
++ }
++
++ gcmkONERROR(_Construct(Kernel, MmuSize, Mmu));
++
++ mirrorPageTable->mmus[mirrorPageTable->reference] = *Mmu;
++
++ mirrorPageTable->hardwares[mirrorPageTable->reference] = Kernel->hardware;
++
++ mirrorPageTable->reference++;
++
++ gcmkFOOTER_ARG("mirrorPageTable->reference=%lu", mirrorPageTable->reference);
++ return gcvSTATUS_OK;
++
++OnError:
++ if (mirrorPageTable && mirrorPageTable->reference == 0)
++ {
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Kernel->os, mirrorPageTable));
++ }
++
++ gcmkFOOTER();
++ return status;
++#else
++ return _Construct(Kernel, MmuSize, Mmu);
++#endif
++}
++
++gceSTATUS
++gckMMU_Destroy(
++ IN gckMMU Mmu
++ )
++{
++#if gcdSHARED_PAGETABLE
++ sharedPageTable->reference--;
++
++ if (sharedPageTable->reference == 0)
++ {
++ if (sharedPageTable->mmu)
++ {
++ gcmkVERIFY_OK(_Destroy(Mmu));
++ }
++
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Mmu->os, sharedPageTable));
++ }
++
++ return gcvSTATUS_OK;
++#elif gcdMIRROR_PAGETABLE
++ mirrorPageTable->reference--;
++
++ if (mirrorPageTable->reference == 0)
++ {
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Mmu->os, mirrorPageTable));
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Mmu->os, mirrorPageTableMutex));
++ }
++
++ return _Destroy(Mmu);
++#else
++ return _Destroy(Mmu);
++#endif
++}
++
++/*******************************************************************************
++**
++** gckMMU_AllocatePages
++**
++** Allocate pages inside the page table.
++**
++** INPUT:
++**
++** gckMMU Mmu
++** Pointer to an gckMMU object.
++**
++** gctSIZE_T PageCount
++** Number of pages to allocate.
++**
++** OUTPUT:
++**
++** gctPOINTER * PageTable
++** Pointer to a variable that receives the base address of the page
++** table.
++**
++** gctUINT32 * Address
++** Pointer to a variable that receives the hardware specific address.
++*/
++gceSTATUS
++_AllocatePages(
++ IN gckMMU Mmu,
++ IN gctSIZE_T PageCount,
++ IN gceSURF_TYPE Type,
++ OUT gctPOINTER * PageTable,
++ OUT gctUINT32 * Address
++ )
++{
++ gceSTATUS status;
++ gctBOOL mutex = gcvFALSE;
++ gctUINT32 index = 0, previous = ~0U, left;
++ gctUINT32_PTR pageTable;
++ gctBOOL gotIt;
++ gctUINT32 address;
++
++ gcmkHEADER_ARG("Mmu=0x%x PageCount=%lu", Mmu, PageCount);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Mmu, gcvOBJ_MMU);
++ gcmkVERIFY_ARGUMENT(PageCount > 0);
++ gcmkVERIFY_ARGUMENT(PageTable != gcvNULL);
++
++ if (PageCount > Mmu->pageTableEntries)
++ {
++ gcmkPRINT("[galcore]: %s(%d): Run out of free page entry.",
++ __FUNCTION__, __LINE__);
++
++ /* Not enough pages avaiable. */
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++
++ /* Grab the mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(Mmu->os, Mmu->pageTableMutex, gcvINFINITE));
++ mutex = gcvTRUE;
++
++ /* Cast pointer to page table. */
++ for (pageTable = Mmu->pageTableLogical, gotIt = gcvFALSE; !gotIt;)
++ {
++ index = Mmu->heapList;
++
++ if ((Mmu->hardware->mmuVersion == 0) && (Type == gcvSURF_VERTEX))
++ {
++ gcmkONERROR(_AdjustIndex(
++ Mmu,
++ index,
++ PageCount,
++ gcdVERTEX_START / gcmSIZEOF(gctUINT32),
++ &index
++ ));
++ }
++
++ /* Walk the heap list. */
++ for (; !gotIt && (index < Mmu->pageTableEntries);)
++ {
++ /* Check the node type. */
++ switch (gcmENTRY_TYPE(_ReadPageEntry(&pageTable[index])))
++ {
++ case gcvMMU_SINGLE:
++ /* Single odes are valid if we only need 1 page. */
++ if (PageCount == 1)
++ {
++ gotIt = gcvTRUE;
++ }
++ else
++ {
++ /* Move to next node. */
++ previous = index;
++ index = _ReadPageEntry(&pageTable[index]) >> 8;
++ }
++ break;
++
++ case gcvMMU_FREE:
++ /* Test if the node has enough space. */
++ if (PageCount <= (_ReadPageEntry(&pageTable[index]) >> 8))
++ {
++ gotIt = gcvTRUE;
++ }
++ else
++ {
++ /* Move to next node. */
++ previous = index;
++ index = _ReadPageEntry(&pageTable[index + 1]);
++ }
++ break;
++
++ default:
++ gcmkFATAL("MMU table correcupted at index %u!", index);
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++ }
++
++ /* Test if we are out of memory. */
++ if (index >= Mmu->pageTableEntries)
++ {
++ if (Mmu->freeNodes)
++ {
++ /* Time to move out the trash! */
++ gcmkONERROR(_Collect(Mmu));
++ }
++ else
++ {
++ gcmkPRINT("[galcore]: %s(%d): Run out of free page entry.",
++ __FUNCTION__, __LINE__);
++
++ /* Out of resources. */
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++ }
++ }
++
++ switch (gcmENTRY_TYPE(_ReadPageEntry(&pageTable[index])))
++ {
++ case gcvMMU_SINGLE:
++ /* Unlink single node from free list. */
++ gcmkONERROR(
++ _Link(Mmu, previous, _ReadPageEntry(&pageTable[index]) >> 8));
++ break;
++
++ case gcvMMU_FREE:
++ /* Check how many pages will be left. */
++ left = (_ReadPageEntry(&pageTable[index]) >> 8) - PageCount;
++ switch (left)
++ {
++ case 0:
++ /* The entire node is consumed, just unlink it. */
++ gcmkONERROR(
++ _Link(Mmu, previous, _ReadPageEntry(&pageTable[index + 1])));
++ break;
++
++ case 1:
++ /* One page will remain. Convert the node to a single node and
++ ** advance the index. */
++ _WritePageEntry(&pageTable[index], (_ReadPageEntry(&pageTable[index + 1]) << 8) | gcvMMU_SINGLE);
++ index ++;
++ break;
++
++ default:
++ /* Enough pages remain for a new node. However, we will just adjust
++ ** the size of the current node and advance the index. */
++ _WritePageEntry(&pageTable[index], (left << 8) | gcvMMU_FREE);
++ index += left;
++ break;
++ }
++ break;
++ }
++
++ /* Mark node as used. */
++ gcmkONERROR(_FillPageTable(&pageTable[index], PageCount, gcvMMU_USED));
++
++ /* Return pointer to page table. */
++ *PageTable = &pageTable[index];
++
++ /* Build virtual address. */
++ if (Mmu->hardware->mmuVersion == 0)
++ {
++ gcmkONERROR(
++ gckHARDWARE_BuildVirtualAddress(Mmu->hardware, index, 0, &address));
++ }
++ else
++ {
++ gctUINT32 masterOffset = index / gcdMMU_STLB_4K_ENTRY_NUM
++ + Mmu->dynamicMappingStart;
++ gctUINT32 slaveOffset = index % gcdMMU_STLB_4K_ENTRY_NUM;
++
++ address = (masterOffset << gcdMMU_MTLB_SHIFT)
++ | (slaveOffset << gcdMMU_STLB_4K_SHIFT);
++ }
++
++ if (Address != gcvNULL)
++ {
++ *Address = address;
++ }
++
++ /* Release the mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Mmu->os, Mmu->pageTableMutex));
++
++ /* Success. */
++ gcmkFOOTER_ARG("*PageTable=0x%x *Address=%08x",
++ *PageTable, gcmOPT_VALUE(Address));
++ return gcvSTATUS_OK;
++
++OnError:
++
++ if (mutex)
++ {
++ /* Release the mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Mmu->os, Mmu->pageTableMutex));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckMMU_FreePages
++**
++** Free pages inside the page table.
++**
++** INPUT:
++**
++** gckMMU Mmu
++** Pointer to an gckMMU object.
++**
++** gctPOINTER PageTable
++** Base address of the page table to free.
++**
++** gctSIZE_T PageCount
++** Number of pages to free.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++_FreePages(
++ IN gckMMU Mmu,
++ IN gctPOINTER PageTable,
++ IN gctSIZE_T PageCount
++ )
++{
++ gctUINT32_PTR pageTable;
++ gceSTATUS status;
++ gctBOOL acquired = gcvFALSE;
++
++ gcmkHEADER_ARG("Mmu=0x%x PageTable=0x%x PageCount=%lu",
++ Mmu, PageTable, PageCount);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Mmu, gcvOBJ_MMU);
++ gcmkVERIFY_ARGUMENT(PageTable != gcvNULL);
++ gcmkVERIFY_ARGUMENT(PageCount > 0);
++
++ /* Convert the pointer. */
++ pageTable = (gctUINT32_PTR) PageTable;
++
++ gcmkONERROR(gckOS_AcquireMutex(Mmu->os, Mmu->pageTableMutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++#if gcdMMU_CLEAR_VALUE
++ if (Mmu->hardware->mmuVersion == 0)
++ {
++ _FillPageTable(pageTable, PageCount, gcdMMU_CLEAR_VALUE);
++ }
++#endif
++
++ if (PageCount == 1)
++ {
++ /* Single page node. */
++ _WritePageEntry(pageTable,
++ (~((1U<<8)-1)) | gcvMMU_SINGLE
++#if gcdUSE_MMU_EXCEPTION
++ /* Enable exception */
++ | 1 << 1
++#endif
++ );
++ }
++ else
++ {
++ /* Mark the node as free. */
++ _WritePageEntry(pageTable,
++ (PageCount << 8) | gcvMMU_FREE
++#if gcdUSE_MMU_EXCEPTION
++ /* Enable exception */
++ | 1 << 1
++#endif
++ );
++ _WritePageEntry(pageTable + 1, ~0U);
++
++#if gcdUSE_MMU_EXCEPTION
++ /* Enable exception */
++ gcmkVERIFY_OK(_FillPageTable(pageTable + 2, PageCount - 2, 1 << 1));
++#endif
++ }
++
++ /* We have free nodes. */
++ Mmu->freeNodes = gcvTRUE;
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Mmu->os, Mmu->pageTableMutex));
++ acquired = gcvFALSE;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Mmu->os, Mmu->pageTableMutex));
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckMMU_AllocatePages(
++ IN gckMMU Mmu,
++ IN gctSIZE_T PageCount,
++ OUT gctPOINTER * PageTable,
++ OUT gctUINT32 * Address
++ )
++{
++ return gckMMU_AllocatePagesEx(
++ Mmu, PageCount, gcvSURF_UNKNOWN, PageTable, Address);
++}
++
++gceSTATUS
++gckMMU_AllocatePagesEx(
++ IN gckMMU Mmu,
++ IN gctSIZE_T PageCount,
++ IN gceSURF_TYPE Type,
++ OUT gctPOINTER * PageTable,
++ OUT gctUINT32 * Address
++ )
++{
++#if gcdMIRROR_PAGETABLE
++ gceSTATUS status;
++ gctPOINTER pageTable;
++ gctUINT32 address;
++ gctINT i;
++ gckMMU mmu;
++ gctBOOL acquired = gcvFALSE;
++ gctBOOL allocated = gcvFALSE;
++
++ gckOS_AcquireMutex(Mmu->os, mirrorPageTableMutex, gcvINFINITE);
++ acquired = gcvTRUE;
++
++ /* Allocate page table for current MMU. */
++ for (i = 0; i < (gctINT)mirrorPageTable->reference; i++)
++ {
++ if (Mmu == mirrorPageTable->mmus[i])
++ {
++ gcmkONERROR(_AllocatePages(Mmu, PageCount, Type, PageTable, Address));
++ allocated = gcvTRUE;
++ }
++ }
++
++ /* Allocate page table for other MMUs. */
++ for (i = 0; i < (gctINT)mirrorPageTable->reference; i++)
++ {
++ mmu = mirrorPageTable->mmus[i];
++
++ if (Mmu != mmu)
++ {
++ gcmkONERROR(_AllocatePages(mmu, PageCount, Type, &pageTable, &address));
++ gcmkASSERT(address == *Address);
++ }
++ }
++
++ gckOS_ReleaseMutex(Mmu->os, mirrorPageTableMutex);
++ acquired = gcvFALSE;
++
++ return gcvSTATUS_OK;
++OnError:
++
++ if (allocated)
++ {
++ /* Page tables for multiple GPU always keep the same. So it is impossible
++ * the fist one allocates successfully but others fail.
++ */
++ gcmkASSERT(0);
++ }
++
++ if (acquired)
++ {
++ gckOS_ReleaseMutex(Mmu->os, mirrorPageTableMutex);
++ }
++
++ return status;
++#else
++ return _AllocatePages(Mmu, PageCount, Type, PageTable, Address);
++#endif
++}
++
++gceSTATUS
++gckMMU_FreePages(
++ IN gckMMU Mmu,
++ IN gctPOINTER PageTable,
++ IN gctSIZE_T PageCount
++ )
++{
++#if gcdMIRROR_PAGETABLE
++ gctINT i;
++ gctUINT32 offset;
++ gckMMU mmu;
++
++ gckOS_AcquireMutex(Mmu->os, mirrorPageTableMutex, gcvINFINITE);
++
++ gcmkVERIFY_OK(_FreePages(Mmu, PageTable, PageCount));
++
++ offset = (gctUINT32)PageTable - (gctUINT32)Mmu->pageTableLogical;
++
++ for (i = 0; i < (gctINT)mirrorPageTable->reference; i++)
++ {
++ mmu = mirrorPageTable->mmus[i];
++
++ if (mmu != Mmu)
++ {
++ gcmkVERIFY_OK(_FreePages(mmu, mmu->pageTableLogical + offset/4, PageCount));
++ }
++ }
++
++ gckOS_ReleaseMutex(Mmu->os, mirrorPageTableMutex);
++
++ return gcvSTATUS_OK;
++#else
++ return _FreePages(Mmu, PageTable, PageCount);
++#endif
++}
++
++gceSTATUS
++gckMMU_Enable(
++ IN gckMMU Mmu,
++ IN gctUINT32 PhysBaseAddr,
++ IN gctUINT32 PhysSize
++ )
++{
++ gceSTATUS status;
++#if gcdSHARED_PAGETABLE
++ gckHARDWARE hardware;
++ gctINT i;
++#endif
++
++ gcmkHEADER_ARG("Mmu=0x%x", Mmu);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Mmu, gcvOBJ_MMU);
++
++#if gcdSHARED_PAGETABLE
++ if (Mmu->enabled)
++ {
++ gcmkFOOTER_ARG("Status=%d", gcvSTATUS_SKIP);
++ return gcvSTATUS_SKIP;
++ }
++#endif
++
++ if (Mmu->hardware->mmuVersion == 0)
++ {
++ /* Success. */
++ gcmkFOOTER_ARG("Status=%d", gcvSTATUS_SKIP);
++ return gcvSTATUS_SKIP;
++ }
++ else
++ {
++ if (PhysSize != 0)
++ {
++ gcmkONERROR(_FillFlatMapping(
++ Mmu,
++ PhysBaseAddr,
++ PhysSize
++ ));
++ }
++
++ gcmkONERROR(_SetupDynamicSpace(Mmu));
++
++#if gcdSHARED_PAGETABLE
++ for(i = 0; i < gcdMAX_GPU_COUNT; i++)
++ {
++ hardware = sharedPageTable->hardwares[i];
++ if (hardware != gcvNULL)
++ {
++ gcmkONERROR(
++ gckHARDWARE_SetMMUv2(
++ hardware,
++ gcvTRUE,
++ Mmu->mtlbLogical,
++ gcvMMU_MODE_4K,
++ (gctUINT8_PTR)Mmu->mtlbLogical + gcdMMU_MTLB_SIZE,
++ gcvFALSE
++ ));
++ }
++ }
++#else
++ gcmkONERROR(
++ gckHARDWARE_SetMMUv2(
++ Mmu->hardware,
++ gcvTRUE,
++ Mmu->mtlbLogical,
++ gcvMMU_MODE_4K,
++ (gctUINT8_PTR)Mmu->mtlbLogical + gcdMMU_MTLB_SIZE,
++ gcvFALSE
++ ));
++#endif
++
++ Mmu->enabled = gcvTRUE;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckMMU_SetPage(
++ IN gckMMU Mmu,
++ IN gctUINT32 PageAddress,
++ IN gctUINT32 *PageEntry
++ )
++{
++#if gcdMIRROR_PAGETABLE
++ gctUINT32_PTR pageEntry;
++ gctINT i;
++ gckMMU mmu;
++ gctUINT32 offset = (gctUINT32)PageEntry - (gctUINT32)Mmu->pageTableLogical;
++#endif
++
++ gctUINT32 data;
++ gcmkHEADER_ARG("Mmu=0x%x", Mmu);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Mmu, gcvOBJ_MMU);
++ gcmkVERIFY_ARGUMENT(PageEntry != gcvNULL);
++ gcmkVERIFY_ARGUMENT(!(PageAddress & 0xFFF));
++
++ if (Mmu->hardware->mmuVersion == 0)
++ {
++ data = PageAddress;
++ }
++ else
++ {
++ data = _SetPage(PageAddress);
++ }
++
++ _WritePageEntry(PageEntry, data);
++
++#if gcdMIRROR_PAGETABLE
++ for (i = 0; i < (gctINT)mirrorPageTable->reference; i++)
++ {
++ mmu = mirrorPageTable->mmus[i];
++
++ if (mmu != Mmu)
++ {
++ pageEntry = mmu->pageTableLogical + offset / 4;
++
++ if (mmu->hardware->mmuVersion == 0)
++ {
++ _WritePageEntry(pageEntry, PageAddress);
++ }
++ else
++ {
++ _WritePageEntry(pageEntry, _SetPage(PageAddress));
++ }
++ }
++
++ }
++#endif
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++#ifdef __QNXNTO__
++gceSTATUS
++gckMMU_InsertNode(
++ IN gckMMU Mmu,
++ IN gcuVIDMEM_NODE_PTR Node)
++{
++ gceSTATUS status;
++ gctBOOL mutex = gcvFALSE;
++
++ gcmkHEADER_ARG("Mmu=0x%x Node=0x%x", Mmu, Node);
++
++ gcmkVERIFY_OBJECT(Mmu, gcvOBJ_MMU);
++
++ gcmkONERROR(gckOS_AcquireMutex(Mmu->os, Mmu->nodeMutex, gcvINFINITE));
++ mutex = gcvTRUE;
++
++ Node->Virtual.next = Mmu->nodeList;
++ Mmu->nodeList = Node;
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Mmu->os, Mmu->nodeMutex));
++
++ gcmkFOOTER();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (mutex)
++ {
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Mmu->os, Mmu->nodeMutex));
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckMMU_RemoveNode(
++ IN gckMMU Mmu,
++ IN gcuVIDMEM_NODE_PTR Node)
++{
++ gceSTATUS status;
++ gctBOOL mutex = gcvFALSE;
++ gcuVIDMEM_NODE_PTR *iter;
++
++ gcmkHEADER_ARG("Mmu=0x%x Node=0x%x", Mmu, Node);
++
++ gcmkVERIFY_OBJECT(Mmu, gcvOBJ_MMU);
++
++ gcmkONERROR(gckOS_AcquireMutex(Mmu->os, Mmu->nodeMutex, gcvINFINITE));
++ mutex = gcvTRUE;
++
++ for (iter = &Mmu->nodeList; *iter; iter = &(*iter)->Virtual.next)
++ {
++ if (*iter == Node)
++ {
++ *iter = Node->Virtual.next;
++ break;
++ }
++ }
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Mmu->os, Mmu->nodeMutex));
++
++ gcmkFOOTER();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (mutex)
++ {
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Mmu->os, Mmu->nodeMutex));
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckMMU_FreeHandleMemory(
++ IN gckKERNEL Kernel,
++ IN gckMMU Mmu,
++ IN gctUINT32 Pid
++ )
++{
++ gceSTATUS status;
++ gctBOOL acquired = gcvFALSE;
++ gcuVIDMEM_NODE_PTR curr, next;
++
++ gcmkHEADER_ARG("Kernel=0x%x, Mmu=0x%x Pid=%u", Kernel, Mmu, Pid);
++
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_OBJECT(Mmu, gcvOBJ_MMU);
++
++ gcmkONERROR(gckOS_AcquireMutex(Mmu->os, Mmu->nodeMutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++ for (curr = Mmu->nodeList; curr != gcvNULL; curr = next)
++ {
++ next = curr->Virtual.next;
++
++ if (curr->Virtual.processID == Pid)
++ {
++ while (curr->Virtual.unlockPendings[Kernel->core] == 0 && curr->Virtual.lockeds[Kernel->core] > 0)
++ {
++ gcmkONERROR(gckVIDMEM_Unlock(Kernel, curr, gcvSURF_TYPE_UNKNOWN, gcvNULL));
++ }
++
++ gcmkVERIFY_OK(gckVIDMEM_Free(curr));
++ }
++ }
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Mmu->os, Mmu->nodeMutex));
++
++ gcmkFOOTER();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Mmu->os, Mmu->nodeMutex));
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++#endif
++
++gceSTATUS
++gckMMU_Flush(
++ IN gckMMU Mmu
++ )
++{
++ gckHARDWARE hardware;
++#if gcdSHARED_PAGETABLE
++ gctINT i;
++ for (i = 0; i < gcdMAX_GPU_COUNT; i++)
++ {
++#if gcdENABLE_VG
++ if (i == gcvCORE_VG)
++ {
++ continue;
++ }
++#endif
++ hardware = sharedPageTable->hardwares[i];
++ if (hardware)
++ {
++ /* Notify cores who use this page table. */
++ gcmkVERIFY_OK(
++ gckOS_AtomSet(hardware->os, hardware->pageTableDirty, 1));
++ }
++ }
++#elif gcdMIRROR_PAGETABLE
++ gctINT i;
++ for (i = 0; i < mirrorPageTable->reference; i++)
++ {
++ hardware = mirrorPageTable->hardwares[i];
++
++ /* Notify cores who use this page table. */
++ gcmkVERIFY_OK(
++ gckOS_AtomSet(hardware->os, hardware->pageTableDirty, 1));
++ }
++#else
++ hardware = Mmu->hardware;
++ gcmkVERIFY_OK(
++ gckOS_AtomSet(hardware->os, hardware->pageTableDirty, 1));
++#endif
++
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckMMU_DumpPageTableEntry(
++ IN gckMMU Mmu,
++ IN gctUINT32 Address
++ )
++{
++ gctUINT32_PTR pageTable;
++ gctUINT32 index;
++ gctUINT32 mtlb, stlb;
++
++ gcmkHEADER_ARG("Mmu=0x%08X Address=0x%08X", Mmu, Address);
++ gcmkVERIFY_OBJECT(Mmu, gcvOBJ_MMU);
++
++ gcmkASSERT(Mmu->hardware->mmuVersion > 0);
++
++ mtlb = (Address & gcdMMU_MTLB_MASK) >> gcdMMU_MTLB_SHIFT;
++ stlb = (Address & gcdMMU_STLB_4K_MASK) >> gcdMMU_STLB_4K_SHIFT;
++
++ if (Address >= 0x80000000)
++ {
++ pageTable = Mmu->pageTableLogical;
++
++ index = (mtlb - Mmu->dynamicMappingStart)
++ * gcdMMU_STLB_4K_ENTRY_NUM
++ + stlb;
++
++ gcmkPRINT(" Page table entry = 0x%08X", _ReadPageEntry(pageTable + index));
++ }
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/******************************************************************************
++****************************** T E S T C O D E ******************************
++******************************************************************************/
++
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel_mmu_vg.c linux-openelec/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel_mmu_vg.c
+--- linux-3.14.36/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel_mmu_vg.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel_mmu_vg.c 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,522 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal_kernel_precomp.h"
++
++#if gcdENABLE_VG
++
++#define _GC_OBJ_ZONE gcvZONE_MMU
++
++/*******************************************************************************
++**
++** gckVGMMU_Construct
++**
++** Construct a new gckVGMMU object.
++**
++** INPUT:
++**
++** gckVGKERNEL Kernel
++** Pointer to an gckVGKERNEL object.
++**
++** gctSIZE_T MmuSize
++** Number of bytes for the page table.
++**
++** OUTPUT:
++**
++** gckVGMMU * Mmu
++** Pointer to a variable that receives the gckVGMMU object pointer.
++*/
++gceSTATUS gckVGMMU_Construct(
++ IN gckVGKERNEL Kernel,
++ IN gctSIZE_T MmuSize,
++ OUT gckVGMMU * Mmu
++ )
++{
++ gckOS os;
++ gckVGHARDWARE hardware;
++ gceSTATUS status;
++ gckVGMMU mmu;
++ gctUINT32 * pageTable;
++ gctUINT32 i;
++
++ gcmkHEADER_ARG("Kernel=0x%x MmuSize=0x%x Mmu=0x%x", Kernel, MmuSize, Mmu);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(MmuSize > 0);
++ gcmkVERIFY_ARGUMENT(Mmu != gcvNULL);
++
++ /* Extract the gckOS object pointer. */
++ os = Kernel->os;
++ gcmkVERIFY_OBJECT(os, gcvOBJ_OS);
++
++ /* Extract the gckVGHARDWARE object pointer. */
++ hardware = Kernel->hardware;
++ gcmkVERIFY_OBJECT(hardware, gcvOBJ_HARDWARE);
++
++ /* Allocate memory for the gckVGMMU object. */
++ status = gckOS_Allocate(os, sizeof(struct _gckVGMMU), (gctPOINTER *) &mmu);
++
++ if (status < 0)
++ {
++ /* Error. */
++ gcmkFATAL(
++ "%s(%d): could not allocate gckVGMMU object.",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkFOOTER();
++ return status;
++ }
++
++ /* Initialize the gckVGMMU object. */
++ mmu->object.type = gcvOBJ_MMU;
++ mmu->os = os;
++ mmu->hardware = hardware;
++
++ /* Create the mutex. */
++ status = gckOS_CreateMutex(os, &mmu->mutex);
++
++ if (status < 0)
++ {
++ /* Roll back. */
++ mmu->object.type = gcvOBJ_UNKNOWN;
++ gcmkVERIFY_OK(gckOS_Free(os, mmu));
++
++ gcmkFOOTER();
++ /* Error. */
++ return status;
++ }
++
++ /* Allocate the page table. */
++ mmu->pageTableSize = MmuSize;
++ status = gckOS_AllocateContiguous(os,
++ gcvFALSE,
++ &mmu->pageTableSize,
++ &mmu->pageTablePhysical,
++ &mmu->pageTableLogical);
++
++ if (status < 0)
++ {
++ /* Roll back. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(os, mmu->mutex));
++
++ mmu->object.type = gcvOBJ_UNKNOWN;
++ gcmkVERIFY_OK(gckOS_Free(os, mmu));
++
++ /* Error. */
++ gcmkFATAL(
++ "%s(%d): could not allocate page table.",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkFOOTER();
++ return status;
++ }
++
++ /* Compute number of entries in page table. */
++ mmu->entryCount = mmu->pageTableSize / sizeof(gctUINT32);
++ mmu->entry = 0;
++
++ /* Mark the entire page table as available. */
++ pageTable = (gctUINT32 *) mmu->pageTableLogical;
++ for (i = 0; i < mmu->entryCount; i++)
++ {
++ pageTable[i] = (gctUINT32)~0;
++ }
++
++ /* Set page table address. */
++ status = gckVGHARDWARE_SetMMU(hardware, mmu->pageTableLogical);
++
++ if (status < 0)
++ {
++ /* Free the page table. */
++ gcmkVERIFY_OK(gckOS_FreeContiguous(mmu->os,
++ mmu->pageTablePhysical,
++ mmu->pageTableLogical,
++ mmu->pageTableSize));
++
++ /* Roll back. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(os, mmu->mutex));
++
++ mmu->object.type = gcvOBJ_UNKNOWN;
++ gcmkVERIFY_OK(gckOS_Free(os, mmu));
++
++ /* Error. */
++ gcmkFATAL(
++ "%s(%d): could not program page table.",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkFOOTER();
++ return status;
++ }
++
++ /* Return the gckVGMMU object pointer. */
++ *Mmu = mmu;
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_MMU,
++ "%s(%d): %u entries at %p.(0x%08X)\n",
++ __FUNCTION__, __LINE__,
++ mmu->entryCount,
++ mmu->pageTableLogical,
++ mmu->pageTablePhysical
++ );
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckVGMMU_Destroy
++**
++** Destroy a nAQMMU object.
++**
++** INPUT:
++**
++** gckVGMMU Mmu
++** Pointer to an gckVGMMU object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS gckVGMMU_Destroy(
++ IN gckVGMMU Mmu
++ )
++{
++ gcmkHEADER_ARG("Mmu=0x%x", Mmu);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Mmu, gcvOBJ_MMU);
++
++ /* Free the page table. */
++ gcmkVERIFY_OK(gckOS_FreeContiguous(Mmu->os,
++ Mmu->pageTablePhysical,
++ Mmu->pageTableLogical,
++ Mmu->pageTableSize));
++
++ /* Roll back. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Mmu->os, Mmu->mutex));
++
++ /* Mark the gckVGMMU object as unknown. */
++ Mmu->object.type = gcvOBJ_UNKNOWN;
++
++ /* Free the gckVGMMU object. */
++ gcmkVERIFY_OK(gckOS_Free(Mmu->os, Mmu));
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckVGMMU_AllocatePages
++**
++** Allocate pages inside the page table.
++**
++** INPUT:
++**
++** gckVGMMU Mmu
++** Pointer to an gckVGMMU object.
++**
++** gctSIZE_T PageCount
++** Number of pages to allocate.
++**
++** OUTPUT:
++**
++** gctPOINTER * PageTable
++** Pointer to a variable that receives the base address of the page
++** table.
++**
++** gctUINT32 * Address
++** Pointer to a variable that receives the hardware specific address.
++*/
++gceSTATUS gckVGMMU_AllocatePages(
++ IN gckVGMMU Mmu,
++ IN gctSIZE_T PageCount,
++ OUT gctPOINTER * PageTable,
++ OUT gctUINT32 * Address
++ )
++{
++ gceSTATUS status;
++ gctUINT32 tail, index, i;
++ gctUINT32 * table;
++ gctBOOL allocated = gcvFALSE;
++
++ gcmkHEADER_ARG("Mmu=0x%x PageCount=0x%x PageTable=0x%x Address=0x%x",
++ Mmu, PageCount, PageTable, Address);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Mmu, gcvOBJ_MMU);
++ gcmkVERIFY_ARGUMENT(PageCount > 0);
++ gcmkVERIFY_ARGUMENT(PageTable != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Address != gcvNULL);
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_MMU,
++ "%s(%d): %u pages.\n",
++ __FUNCTION__, __LINE__,
++ PageCount
++ );
++
++ if (PageCount > Mmu->entryCount)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_MMU,
++ "%s(%d): page table too small for %u pages.\n",
++ __FUNCTION__, __LINE__,
++ PageCount
++ );
++
++ gcmkFOOTER_NO();
++ /* Not enough pages avaiable. */
++ return gcvSTATUS_OUT_OF_RESOURCES;
++ }
++
++ /* Grab the mutex. */
++ status = gckOS_AcquireMutex(Mmu->os, Mmu->mutex, gcvINFINITE);
++
++ if (status < 0)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_MMU,
++ "%s(%d): could not acquire mutex.\n"
++ ,__FUNCTION__, __LINE__
++ );
++
++ gcmkFOOTER();
++ /* Error. */
++ return status;
++ }
++
++ /* Compute the tail for this allocation. */
++ tail = Mmu->entryCount - PageCount;
++
++ /* Walk all entries until we find enough slots. */
++ for (index = Mmu->entry; index <= tail;)
++ {
++ /* Access page table. */
++ table = (gctUINT32 *) Mmu->pageTableLogical + index;
++
++ /* See if all slots are available. */
++ for (i = 0; i < PageCount; i++, table++)
++ {
++ if (*table != ~0)
++ {
++ /* Start from next slot. */
++ index += i + 1;
++ break;
++ }
++ }
++
++ if (i == PageCount)
++ {
++ /* Bail out if we have enough page entries. */
++ allocated = gcvTRUE;
++ break;
++ }
++ }
++
++ if (!allocated)
++ {
++ if (status >= 0)
++ {
++ /* Walk all entries until we find enough slots. */
++ for (index = 0; index <= tail;)
++ {
++ /* Access page table. */
++ table = (gctUINT32 *) Mmu->pageTableLogical + index;
++
++ /* See if all slots are available. */
++ for (i = 0; i < PageCount; i++, table++)
++ {
++ if (*table != ~0)
++ {
++ /* Start from next slot. */
++ index += i + 1;
++ break;
++ }
++ }
++
++ if (i == PageCount)
++ {
++ /* Bail out if we have enough page entries. */
++ allocated = gcvTRUE;
++ break;
++ }
++ }
++ }
++ }
++
++ if (!allocated && (status >= 0))
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_MMU,
++ "%s(%d): not enough free pages for %u pages.\n",
++ __FUNCTION__, __LINE__,
++ PageCount
++ );
++
++ /* Not enough empty slots available. */
++ status = gcvSTATUS_OUT_OF_RESOURCES;
++ }
++
++ if (status >= 0)
++ {
++ /* Build virtual address. */
++ status = gckVGHARDWARE_BuildVirtualAddress(Mmu->hardware,
++ index,
++ 0,
++ Address);
++
++ if (status >= 0)
++ {
++ /* Update current entry into page table. */
++ Mmu->entry = index + PageCount;
++
++ /* Return pointer to page table. */
++ *PageTable = (gctUINT32 *) Mmu->pageTableLogical + index;
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_MMU,
++ "%s(%d): allocated %u pages at index %u (0x%08X) @ %p.\n",
++ __FUNCTION__, __LINE__,
++ PageCount,
++ index,
++ *Address,
++ *PageTable
++ );
++ }
++ }
++
++ /* Release the mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Mmu->os, Mmu->mutex));
++ gcmkFOOTER();
++
++ /* Return status. */
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckVGMMU_FreePages
++**
++** Free pages inside the page table.
++**
++** INPUT:
++**
++** gckVGMMU Mmu
++** Pointer to an gckVGMMU object.
++**
++** gctPOINTER PageTable
++** Base address of the page table to free.
++**
++** gctSIZE_T PageCount
++** Number of pages to free.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS gckVGMMU_FreePages(
++ IN gckVGMMU Mmu,
++ IN gctPOINTER PageTable,
++ IN gctSIZE_T PageCount
++ )
++{
++ gctUINT32 * table;
++
++ gcmkHEADER_ARG("Mmu=0x%x PageTable=0x%x PageCount=0x%x",
++ Mmu, PageTable, PageCount);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Mmu, gcvOBJ_MMU);
++ gcmkVERIFY_ARGUMENT(PageTable != gcvNULL);
++ gcmkVERIFY_ARGUMENT(PageCount > 0);
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_MMU,
++ "%s(%d): freeing %u pages at index %u @ %p.\n",
++ __FUNCTION__, __LINE__,
++ PageCount,
++ ((gctUINT32 *) PageTable - (gctUINT32 *) Mmu->pageTableLogical),
++ PageTable
++ );
++
++ /* Convert pointer. */
++ table = (gctUINT32 *) PageTable;
++
++ /* Mark the page table entries as available. */
++ while (PageCount-- > 0)
++ {
++ *table++ = (gctUINT32)~0;
++ }
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckVGMMU_SetPage(
++ IN gckVGMMU Mmu,
++ IN gctUINT32 PageAddress,
++ IN gctUINT32 *PageEntry
++ )
++{
++ gcmkHEADER_ARG("Mmu=0x%x", Mmu);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Mmu, gcvOBJ_MMU);
++ gcmkVERIFY_ARGUMENT(PageEntry != gcvNULL);
++ gcmkVERIFY_ARGUMENT(!(PageAddress & 0xFFF));
++
++ *PageEntry = PageAddress;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckVGMMU_Flush(
++ IN gckVGMMU Mmu
++ )
++{
++ gckVGHARDWARE hardware;
++
++ gcmkHEADER_ARG("Mmu=0x%x", Mmu);
++
++ hardware = Mmu->hardware;
++ gcmkVERIFY_OK(
++ gckOS_AtomSet(hardware->os, hardware->pageTableDirty, 1));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++#endif /* gcdENABLE_VG */
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel_power.c linux-openelec/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel_power.c
+--- linux-3.14.36/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel_power.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel_power.c 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,347 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal_kernel_precomp.h"
++
++#define _GC_OBJ_ZONE gcvZONE_POWER
++
++/******************************************************************************\
++************************ Dynamic Voltage Frequency Setting *********************
++\******************************************************************************/
++#if gcdDVFS
++static gctUINT32
++_GetLoadHistory(
++ IN gckDVFS Dvfs,
++ IN gctUINT32 Select,
++ IN gctUINT32 Index
++)
++{
++ return Dvfs->loads[Index];
++}
++
++static void
++_IncreaseScale(
++ IN gckDVFS Dvfs,
++ IN gctUINT32 Load,
++ OUT gctUINT8 *Scale
++ )
++{
++ if (Dvfs->currentScale < 32)
++ {
++ *Scale = Dvfs->currentScale + 8;
++ }
++ else
++ {
++ *Scale = Dvfs->currentScale + 8;
++ *Scale = gcmMIN(64, *Scale);
++ }
++}
++
++static void
++_RecordFrequencyHistory(
++ gckDVFS Dvfs,
++ gctUINT32 Frequency
++ )
++{
++ gctUINT32 i = 0;
++
++ struct _FrequencyHistory *history = Dvfs->frequencyHistory;
++
++ for (i = 0; i < 16; i++)
++ {
++ if (history->frequency == Frequency)
++ {
++ break;
++ }
++
++ if (history->frequency == 0)
++ {
++ history->frequency = Frequency;
++ break;
++ }
++
++ history++;
++ }
++
++ if (i < 16)
++ {
++ history->count++;
++ }
++}
++
++static gctUINT32
++_GetFrequencyHistory(
++ gckDVFS Dvfs,
++ gctUINT32 Frequency
++ )
++{
++ gctUINT32 i = 0;
++
++ struct _FrequencyHistory * history = Dvfs->frequencyHistory;
++
++ for (i = 0; i < 16; i++)
++ {
++ if (history->frequency == Frequency)
++ {
++ break;
++ }
++
++ history++;
++ }
++
++ if (i < 16)
++ {
++ return history->count;
++ }
++
++ return 0;
++}
++
++static void
++_Policy(
++ IN gckDVFS Dvfs,
++ IN gctUINT32 Load,
++ OUT gctUINT8 *Scale
++ )
++{
++ gctUINT8 load[4], nextLoad;
++ gctUINT8 scale;
++
++ /* Last 4 history. */
++ load[0] = (Load & 0xFF);
++ load[1] = (Load & 0xFF00) >> 8;
++ load[2] = (Load & 0xFF0000) >> 16;
++ load[3] = (Load & 0xFF000000) >> 24;
++
++ /* Determine target scale. */
++ if (load[0] > 54)
++ {
++ _IncreaseScale(Dvfs, Load, &scale);
++ }
++ else
++ {
++ nextLoad = (load[0] + load[1] + load[2] + load[3])/4;
++
++ scale = Dvfs->currentScale * (nextLoad) / 54;
++
++ scale = gcmMAX(1, scale);
++ scale = gcmMIN(64, scale);
++ }
++
++ Dvfs->totalConfig++;
++
++ Dvfs->loads[(load[0]-1)/8]++;
++
++ *Scale = scale;
++
++
++ if (Dvfs->totalConfig % 100 == 0)
++ {
++ gcmkPRINT("=======================================================");
++ gcmkPRINT("GPU Load: %-8d %-8d %-8d %-8d %-8d %-8d %-8d %-8d",
++ 8, 16, 24, 32, 40, 48, 56, 64);
++ gcmkPRINT(" %-8d %-8d %-8d %-8d %-8d %-8d %-8d %-8d",
++ _GetLoadHistory(Dvfs,2, 0),
++ _GetLoadHistory(Dvfs,2, 1),
++ _GetLoadHistory(Dvfs,2, 2),
++ _GetLoadHistory(Dvfs,2, 3),
++ _GetLoadHistory(Dvfs,2, 4),
++ _GetLoadHistory(Dvfs,2, 5),
++ _GetLoadHistory(Dvfs,2, 6),
++ _GetLoadHistory(Dvfs,2, 7)
++ );
++
++ gcmkPRINT("Frequency(MHz) %-8d %-8d %-8d %-8d %-8d",
++ 58, 120, 240, 360, 480);
++ gcmkPRINT(" %-8d %-8d %-8d %-8d %-8d",
++ _GetFrequencyHistory(Dvfs, 58),
++ _GetFrequencyHistory(Dvfs,120),
++ _GetFrequencyHistory(Dvfs,240),
++ _GetFrequencyHistory(Dvfs,360),
++ _GetFrequencyHistory(Dvfs,480)
++ );
++ }
++}
++
++static void
++_TimerFunction(
++ gctPOINTER Data
++ )
++{
++ gceSTATUS status;
++ gckDVFS dvfs = (gckDVFS) Data;
++ gckHARDWARE hardware = dvfs->hardware;
++ gctUINT32 value;
++ gctUINT32 frequency;
++ gctUINT8 scale;
++ gctUINT32 t1, t2, consumed;
++
++ gckOS_GetTicks(&t1);
++
++ gcmkONERROR(gckHARDWARE_QueryLoad(hardware, &value));
++
++ /* determine target sacle. */
++ _Policy(dvfs, value, &scale);
++
++ /* Set frequency and voltage. */
++ gcmkONERROR(gckOS_SetGPUFrequency(hardware->os, hardware->core, scale));
++
++ /* Query real frequency. */
++ gcmkONERROR(
++ gckOS_QueryGPUFrequency(hardware->os,
++ hardware->core,
++ &frequency,
++ &dvfs->currentScale));
++
++ _RecordFrequencyHistory(dvfs, frequency);
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_POWER,
++ "Current frequency = %d",
++ frequency);
++
++ /* Set period. */
++ gcmkONERROR(gckHARDWARE_SetDVFSPeroid(hardware, frequency));
++
++OnError:
++ /* Determine next querying time. */
++ gckOS_GetTicks(&t2);
++
++ consumed = gcmMIN(((long)t2 - (long)t1), 5);
++
++ if (dvfs->stop == gcvFALSE)
++ {
++ gcmkVERIFY_OK(gckOS_StartTimer(hardware->os,
++ dvfs->timer,
++ dvfs->pollingTime - consumed));
++ }
++
++ return;
++}
++
++gceSTATUS
++gckDVFS_Construct(
++ IN gckHARDWARE Hardware,
++ OUT gckDVFS * Dvfs
++ )
++{
++ gceSTATUS status;
++ gctPOINTER pointer;
++ gckDVFS dvfs = gcvNULL;
++ gckOS os = Hardware->os;
++
++ gcmkHEADER_ARG("Hardware=0x%X", Hardware);
++
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(Dvfs != gcvNULL);
++
++ /* Allocate a gckDVFS manager. */
++ gcmkONERROR(gckOS_Allocate(os, gcmSIZEOF(struct _gckDVFS), &pointer));
++
++ gckOS_ZeroMemory(pointer, gcmSIZEOF(struct _gckDVFS));
++
++ dvfs = pointer;
++
++ /* Initialization. */
++ dvfs->hardware = Hardware;
++ dvfs->pollingTime = gcdDVFS_POLLING_TIME;
++ dvfs->os = Hardware->os;
++ dvfs->currentScale = 64;
++
++ /* Create a polling timer. */
++ gcmkONERROR(gckOS_CreateTimer(os, _TimerFunction, pointer, &dvfs->timer));
++
++ /* Initialize frequency and voltage adjustment helper. */
++ gcmkONERROR(gckOS_PrepareGPUFrequency(os, Hardware->core));
++
++ /* Return result. */
++ *Dvfs = dvfs;
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Roll back. */
++ if (dvfs)
++ {
++ if (dvfs->timer)
++ {
++ gcmkVERIFY_OK(gckOS_DestroyTimer(os, dvfs->timer));
++ }
++
++ gcmkOS_SAFE_FREE(os, dvfs);
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckDVFS_Destroy(
++ IN gckDVFS Dvfs
++ )
++{
++ gcmkHEADER_ARG("Dvfs=0x%X", Dvfs);
++ gcmkVERIFY_ARGUMENT(Dvfs != gcvNULL);
++
++ /* Deinitialize helper fuunction. */
++ gcmkVERIFY_OK(gckOS_FinishGPUFrequency(Dvfs->os, Dvfs->hardware->core));
++
++ /* DestroyTimer. */
++ gcmkVERIFY_OK(gckOS_DestroyTimer(Dvfs->os, Dvfs->timer));
++
++ gcmkOS_SAFE_FREE(Dvfs->os, Dvfs);
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckDVFS_Start(
++ IN gckDVFS Dvfs
++ )
++{
++ gcmkHEADER_ARG("Dvfs=0x%X", Dvfs);
++ gcmkVERIFY_ARGUMENT(Dvfs != gcvNULL);
++
++ gckHARDWARE_InitDVFS(Dvfs->hardware);
++
++ Dvfs->stop = gcvFALSE;
++
++ gckOS_StartTimer(Dvfs->os, Dvfs->timer, Dvfs->pollingTime);
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckDVFS_Stop(
++ IN gckDVFS Dvfs
++ )
++{
++ gcmkHEADER_ARG("Dvfs=0x%X", Dvfs);
++ gcmkVERIFY_ARGUMENT(Dvfs != gcvNULL);
++
++ Dvfs->stop = gcvTRUE;
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++#endif
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel_precomp.h linux-openelec/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel_precomp.h
+--- linux-3.14.36/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel_precomp.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel_precomp.h 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,29 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_kernel_precomp_h_
++#define __gc_hal_kernel_precomp_h_
++
++#include "gc_hal.h"
++#include "gc_hal_driver.h"
++#include "gc_hal_kernel.h"
++
++#endif /* __gc_hal_kernel_precomp_h_ */
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel_vg.c linux-openelec/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel_vg.c
+--- linux-3.14.36/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel_vg.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel_vg.c 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,895 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal_kernel_precomp.h"
++
++#if gcdENABLE_VG
++
++#define ENABLE_VG_TRY_VIRTUAL_MEMORY 0
++
++#define _GC_OBJ_ZONE gcvZONE_VG
++
++/******************************************************************************\
++******************************* gckKERNEL API Code ******************************
++\******************************************************************************/
++
++/*******************************************************************************
++**
++** gckKERNEL_Construct
++**
++** Construct a new gckKERNEL object.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** IN gctPOINTER Context
++** Pointer to a driver defined context.
++**
++** OUTPUT:
++**
++** gckKERNEL * Kernel
++** Pointer to a variable that will hold the pointer to the gckKERNEL
++** object.
++*/
++gceSTATUS gckVGKERNEL_Construct(
++ IN gckOS Os,
++ IN gctPOINTER Context,
++ IN gckKERNEL inKernel,
++ OUT gckVGKERNEL * Kernel
++ )
++{
++ gceSTATUS status;
++ gckVGKERNEL kernel = gcvNULL;
++
++ gcmkHEADER_ARG("Os=0x%x Context=0x%x", Os, Context);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Kernel != gcvNULL);
++
++ do
++ {
++ /* Allocate the gckKERNEL object. */
++ gcmkERR_BREAK(gckOS_Allocate(
++ Os,
++ sizeof(struct _gckVGKERNEL),
++ (gctPOINTER *) &kernel
++ ));
++
++ /* Initialize the gckKERNEL object. */
++ kernel->object.type = gcvOBJ_KERNEL;
++ kernel->os = Os;
++ kernel->context = Context;
++ kernel->hardware = gcvNULL;
++ kernel->interrupt = gcvNULL;
++ kernel->command = gcvNULL;
++ kernel->mmu = gcvNULL;
++ kernel->kernel = inKernel;
++
++ /* Construct the gckVGHARDWARE object. */
++ gcmkERR_BREAK(gckVGHARDWARE_Construct(
++ Os, &kernel->hardware
++ ));
++
++ /* Set pointer to gckKERNEL object in gckVGHARDWARE object. */
++ kernel->hardware->kernel = kernel;
++
++ /* Construct the gckVGINTERRUPT object. */
++ gcmkERR_BREAK(gckVGINTERRUPT_Construct(
++ kernel, &kernel->interrupt
++ ));
++
++ /* Construct the gckVGCOMMAND object. */
++ gcmkERR_BREAK(gckVGCOMMAND_Construct(
++ kernel, gcmKB2BYTES(8), gcmKB2BYTES(2), &kernel->command
++ ));
++
++ /* Construct the gckVGMMU object. */
++ gcmkERR_BREAK(gckVGMMU_Construct(
++ kernel, gcmKB2BYTES(32), &kernel->mmu
++ ));
++
++ /* Return pointer to the gckKERNEL object. */
++ *Kernel = kernel;
++
++ gcmkFOOTER_ARG("*Kernel=0x%x", *Kernel);
++ /* Success. */
++ return gcvSTATUS_OK;
++ }
++ while (gcvFALSE);
++
++ /* Roll back. */
++ if (kernel != gcvNULL)
++ {
++ if (kernel->mmu != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckVGMMU_Destroy(kernel->mmu));
++ }
++
++ if (kernel->command != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckVGCOMMAND_Destroy(kernel->command));
++ }
++
++ if (kernel->interrupt != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckVGINTERRUPT_Destroy(kernel->interrupt));
++ }
++
++ if (kernel->hardware != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckVGHARDWARE_Destroy(kernel->hardware));
++ }
++
++ gcmkVERIFY_OK(gckOS_Free(Os, kernel));
++ }
++
++ gcmkFOOTER();
++ /* Return status. */
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckKERNEL_Destroy
++**
++** Destroy an gckKERNEL object.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object to destroy.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS gckVGKERNEL_Destroy(
++ IN gckVGKERNEL Kernel
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Kernel=0x%x", Kernel);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++
++ do
++ {
++ /* Destroy the gckVGMMU object. */
++ if (Kernel->mmu != gcvNULL)
++ {
++ gcmkERR_BREAK(gckVGMMU_Destroy(Kernel->mmu));
++ Kernel->mmu = gcvNULL;
++ }
++
++ /* Destroy the gckVGCOMMAND object. */
++ if (Kernel->command != gcvNULL)
++ {
++ gcmkERR_BREAK(gckVGCOMMAND_Destroy(Kernel->command));
++ Kernel->command = gcvNULL;
++ }
++
++ /* Destroy the gckVGINTERRUPT object. */
++ if (Kernel->interrupt != gcvNULL)
++ {
++ gcmkERR_BREAK(gckVGINTERRUPT_Destroy(Kernel->interrupt));
++ Kernel->interrupt = gcvNULL;
++ }
++
++ /* Destroy the gckVGHARDWARE object. */
++ if (Kernel->hardware != gcvNULL)
++ {
++ gcmkERR_BREAK(gckVGHARDWARE_Destroy(Kernel->hardware));
++ Kernel->hardware = gcvNULL;
++ }
++
++ /* Mark the gckKERNEL object as unknown. */
++ Kernel->object.type = gcvOBJ_UNKNOWN;
++
++ /* Free the gckKERNEL object. */
++ gcmkERR_BREAK(gckOS_Free(Kernel->os, Kernel));
++ }
++ while (gcvFALSE);
++
++ gcmkFOOTER();
++
++ /* Return status. */
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckKERNEL_AllocateLinearMemory
++**
++** Function walks all required memory pools and allocates the requested
++** amount of video memory.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gcePOOL * Pool
++** Pointer the desired memory pool.
++**
++** gctSIZE_T Bytes
++** Number of bytes to allocate.
++**
++** gctSIZE_T Alignment
++** Required buffer alignment.
++**
++** gceSURF_TYPE Type
++** Surface type.
++**
++** OUTPUT:
++**
++** gcePOOL * Pool
++** Pointer to the actual pool where the memory was allocated.
++**
++** gcuVIDMEM_NODE_PTR * Node
++** Allocated node.
++*/
++gceSTATUS
++gckKERNEL_AllocateLinearMemory(
++ IN gckKERNEL Kernel,
++ IN OUT gcePOOL * Pool,
++ IN gctSIZE_T Bytes,
++ IN gctSIZE_T Alignment,
++ IN gceSURF_TYPE Type,
++ OUT gcuVIDMEM_NODE_PTR * Node
++ )
++{
++ gcePOOL pool;
++ gceSTATUS status;
++ gckVIDMEM videoMemory;
++
++ /* Get initial pool. */
++ switch (pool = *Pool)
++ {
++ case gcvPOOL_DEFAULT:
++ case gcvPOOL_LOCAL:
++ pool = gcvPOOL_LOCAL_INTERNAL;
++ break;
++
++ case gcvPOOL_UNIFIED:
++ pool = gcvPOOL_SYSTEM;
++ break;
++
++ default:
++ break;
++ }
++
++ do
++ {
++ /* Verify the number of bytes to allocate. */
++ if (Bytes == 0)
++ {
++ status = gcvSTATUS_INVALID_ARGUMENT;
++ break;
++ }
++
++ if (pool == gcvPOOL_VIRTUAL)
++ {
++ /* Create a gcuVIDMEM_NODE for virtual memory. */
++ gcmkERR_BREAK(gckVIDMEM_ConstructVirtual(Kernel, gcvFALSE, Bytes, Node));
++
++ /* Success. */
++ break;
++ }
++
++ else
++ {
++ /* Get pointer to gckVIDMEM object for pool. */
++ status = gckKERNEL_GetVideoMemoryPool(Kernel, pool, &videoMemory);
++
++ if (status == gcvSTATUS_OK)
++ {
++ if(*Pool == gcvPOOL_SYSTEM)
++ Type |= gcvSURF_VG;
++ /* Allocate memory. */
++ status = gckVIDMEM_AllocateLinear(videoMemory,
++ Bytes,
++ Alignment,
++ Type,
++ Node);
++
++ if (status == gcvSTATUS_OK)
++ {
++ /* Memory allocated. */
++ break;
++ }
++ }
++ }
++
++ if (pool == gcvPOOL_LOCAL_INTERNAL)
++ {
++ /* Advance to external memory. */
++ pool = gcvPOOL_LOCAL_EXTERNAL;
++ }
++ else if (pool == gcvPOOL_LOCAL_EXTERNAL)
++ {
++ /* Advance to contiguous system memory. */
++ pool = gcvPOOL_SYSTEM;
++ }
++ else if (pool == gcvPOOL_SYSTEM)
++ {
++ /* Advance to virtual memory. */
++#if ENABLE_VG_TRY_VIRTUAL_MEMORY
++ pool = gcvPOOL_VIRTUAL;
++#else
++ /*VG non-contiguous memory support is not ready yet, disable it temporary*/
++ status = gcvSTATUS_OUT_OF_MEMORY;
++ break;
++#endif
++ }
++ else
++ {
++ /* Out of pools. */
++ status = gcvSTATUS_OUT_OF_MEMORY;
++ break;
++ }
++ }
++ /* Loop only for multiple selection pools. */
++ while ((*Pool == gcvPOOL_DEFAULT)
++ || (*Pool == gcvPOOL_LOCAL)
++ || (*Pool == gcvPOOL_UNIFIED)
++ );
++
++ if (gcmIS_SUCCESS(status))
++ {
++ /* Return pool used for allocation. */
++ *Pool = pool;
++ }
++
++ /* Return status. */
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckKERNEL_Dispatch
++**
++** Dispatch a command received from the user HAL layer.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gcsHAL_INTERFACE * Interface
++** Pointer to a gcsHAL_INTERFACE structure that defines the command to
++** be dispatched.
++**
++** OUTPUT:
++**
++** gcsHAL_INTERFACE * Interface
++** Pointer to a gcsHAL_INTERFACE structure that receives any data to be
++** returned.
++*/
++gceSTATUS gckVGKERNEL_Dispatch(
++ IN gckKERNEL Kernel,
++ IN gctBOOL FromUser,
++ IN OUT gcsHAL_INTERFACE * Interface
++ )
++{
++ gceSTATUS status;
++ gcsHAL_INTERFACE * kernelInterface = Interface;
++ gcuVIDMEM_NODE_PTR node;
++ gctUINT32 processID;
++ gckKERNEL kernel = Kernel;
++ gctPOINTER info = gcvNULL;
++ gctPHYS_ADDR physical = gcvNULL;
++ gctPOINTER logical = gcvNULL;
++ gctSIZE_T bytes = 0;
++
++ gcmkHEADER_ARG("Kernel=0x%x Interface=0x%x ", Kernel, Interface);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(Interface != gcvNULL);
++
++ gcmkONERROR(gckOS_GetProcessID(&processID));
++
++ /* Dispatch on command. */
++ switch (Interface->command)
++ {
++ case gcvHAL_QUERY_VIDEO_MEMORY:
++ /* Query video memory size. */
++ gcmkERR_BREAK(gckKERNEL_QueryVideoMemory(
++ Kernel, kernelInterface
++ ));
++ break;
++
++ case gcvHAL_QUERY_CHIP_IDENTITY:
++ /* Query chip identity. */
++ gcmkERR_BREAK(gckVGHARDWARE_QueryChipIdentity(
++ Kernel->vg->hardware,
++ &kernelInterface->u.QueryChipIdentity.chipModel,
++ &kernelInterface->u.QueryChipIdentity.chipRevision,
++ &kernelInterface->u.QueryChipIdentity.chipFeatures,
++ &kernelInterface->u.QueryChipIdentity.chipMinorFeatures,
++ &kernelInterface->u.QueryChipIdentity.chipMinorFeatures2
++ ));
++ break;
++
++ case gcvHAL_QUERY_COMMAND_BUFFER:
++ /* Query command buffer information. */
++ gcmkERR_BREAK(gckKERNEL_QueryCommandBuffer(
++ Kernel,
++ &kernelInterface->u.QueryCommandBuffer.information
++ ));
++ break;
++ case gcvHAL_ALLOCATE_NON_PAGED_MEMORY:
++ bytes = (gctSIZE_T) kernelInterface->u.AllocateNonPagedMemory.bytes;
++ /* Allocate non-paged memory. */
++ gcmkERR_BREAK(gckOS_AllocateContiguous(
++ Kernel->os,
++ gcvTRUE,
++ &bytes,
++ &physical,
++ &logical
++ ));
++
++ kernelInterface->u.AllocateNonPagedMemory.bytes = bytes;
++ kernelInterface->u.AllocateNonPagedMemory.logical = gcmPTR_TO_UINT64(logical);
++ kernelInterface->u.AllocateNonPagedMemory.physical = gcmPTR_TO_NAME(physical);
++ break;
++
++ case gcvHAL_FREE_NON_PAGED_MEMORY:
++ physical = gcmNAME_TO_PTR(kernelInterface->u.AllocateNonPagedMemory.physical);
++
++ /* Unmap user logical out of physical memory first. */
++ gcmkERR_BREAK(gckOS_UnmapUserLogical(
++ Kernel->os,
++ physical,
++ (gctSIZE_T) kernelInterface->u.AllocateNonPagedMemory.bytes,
++ gcmUINT64_TO_PTR(kernelInterface->u.AllocateNonPagedMemory.logical)
++ ));
++
++ /* Free non-paged memory. */
++ gcmkERR_BREAK(gckOS_FreeNonPagedMemory(
++ Kernel->os,
++ (gctSIZE_T) kernelInterface->u.AllocateNonPagedMemory.bytes,
++ physical,
++ gcmUINT64_TO_PTR(kernelInterface->u.AllocateNonPagedMemory.logical)
++ ));
++
++ gcmRELEASE_NAME(kernelInterface->u.AllocateNonPagedMemory.physical);
++ break;
++
++ case gcvHAL_ALLOCATE_CONTIGUOUS_MEMORY:
++ bytes = (gctSIZE_T) kernelInterface->u.AllocateNonPagedMemory.bytes;
++ /* Allocate contiguous memory. */
++ gcmkERR_BREAK(gckOS_AllocateContiguous(
++ Kernel->os,
++ gcvTRUE,
++ &bytes,
++ &physical,
++ &logical
++ ));
++
++ kernelInterface->u.AllocateNonPagedMemory.bytes = bytes;
++ kernelInterface->u.AllocateNonPagedMemory.logical = gcmPTR_TO_UINT64(logical);
++ kernelInterface->u.AllocateNonPagedMemory.physical = gcmPTR_TO_NAME(physical);
++ break;
++
++ case gcvHAL_FREE_CONTIGUOUS_MEMORY:
++ physical = gcmNAME_TO_PTR(kernelInterface->u.AllocateNonPagedMemory.physical);
++ /* Unmap user logical out of physical memory first. */
++ gcmkERR_BREAK(gckOS_UnmapUserLogical(
++ Kernel->os,
++ physical,
++ (gctSIZE_T) kernelInterface->u.AllocateNonPagedMemory.bytes,
++ gcmUINT64_TO_PTR(kernelInterface->u.AllocateNonPagedMemory.logical)
++ ));
++
++ /* Free contiguous memory. */
++ gcmkERR_BREAK(gckOS_FreeContiguous(
++ Kernel->os,
++ physical,
++ gcmUINT64_TO_PTR(kernelInterface->u.AllocateNonPagedMemory.logical),
++ (gctSIZE_T) kernelInterface->u.AllocateNonPagedMemory.bytes
++ ));
++
++ gcmRELEASE_NAME(kernelInterface->u.AllocateNonPagedMemory.physical);
++ break;
++
++ case gcvHAL_ALLOCATE_VIDEO_MEMORY:
++ {
++ gctSIZE_T bytes;
++ gctUINT32 bitsPerPixel;
++ gctUINT32 bits;
++
++ /* Align width and height to tiles. */
++ gcmkERR_BREAK(gckVGHARDWARE_AlignToTile(
++ Kernel->vg->hardware,
++ kernelInterface->u.AllocateVideoMemory.type,
++ &kernelInterface->u.AllocateVideoMemory.width,
++ &kernelInterface->u.AllocateVideoMemory.height
++ ));
++
++ /* Convert format into bytes per pixel and bytes per tile. */
++ gcmkERR_BREAK(gckVGHARDWARE_ConvertFormat(
++ Kernel->vg->hardware,
++ kernelInterface->u.AllocateVideoMemory.format,
++ &bitsPerPixel,
++ gcvNULL
++ ));
++
++ /* Compute number of bits for the allocation. */
++ bits
++ = kernelInterface->u.AllocateVideoMemory.width
++ * kernelInterface->u.AllocateVideoMemory.height
++ * kernelInterface->u.AllocateVideoMemory.depth
++ * bitsPerPixel;
++
++ /* Compute number of bytes for the allocation. */
++ bytes = gcmALIGN(bits, 8) / 8;
++
++ /* Allocate memory. */
++ gcmkERR_BREAK(gckKERNEL_AllocateLinearMemory(
++ Kernel,
++ &kernelInterface->u.AllocateVideoMemory.pool,
++ bytes,
++ 64,
++ kernelInterface->u.AllocateVideoMemory.type,
++ &node
++ ));
++
++ kernelInterface->u.AllocateVideoMemory.node = gcmPTR_TO_UINT64(node);
++ }
++ break;
++
++ case gcvHAL_ALLOCATE_LINEAR_VIDEO_MEMORY:
++ /* Allocate memory. */
++ gcmkERR_BREAK(gckKERNEL_AllocateLinearMemory(
++ Kernel,
++ &kernelInterface->u.AllocateLinearVideoMemory.pool,
++ kernelInterface->u.AllocateLinearVideoMemory.bytes,
++ kernelInterface->u.AllocateLinearVideoMemory.alignment,
++ kernelInterface->u.AllocateLinearVideoMemory.type,
++ &node
++ ));
++
++ gcmkERR_BREAK(gckKERNEL_AddProcessDB(Kernel,
++ processID, gcvDB_VIDEO_MEMORY,
++ node,
++ gcvNULL,
++ kernelInterface->u.AllocateLinearVideoMemory.bytes
++ ));
++
++ kernelInterface->u.AllocateLinearVideoMemory.node = gcmPTR_TO_UINT64(node);
++ break;
++
++ case gcvHAL_FREE_VIDEO_MEMORY:
++ node = gcmUINT64_TO_PTR(Interface->u.FreeVideoMemory.node);
++#ifdef __QNXNTO__
++ /* Unmap the video memory */
++
++ if ((node->VidMem.memory->object.type == gcvOBJ_VIDMEM) &&
++ (node->VidMem.logical != gcvNULL))
++ {
++ gckKERNEL_UnmapVideoMemory(Kernel,
++ node->VidMem.logical,
++ processID,
++ node->VidMem.bytes);
++ node->VidMem.logical = gcvNULL;
++ }
++#endif /* __QNXNTO__ */
++
++ /* Free video memory. */
++ gcmkERR_BREAK(gckVIDMEM_Free(
++ node
++ ));
++
++ gcmkERR_BREAK(gckKERNEL_RemoveProcessDB(
++ Kernel,
++ processID, gcvDB_VIDEO_MEMORY,
++ node
++ ));
++
++ break;
++
++ case gcvHAL_MAP_MEMORY:
++ /* Map memory. */
++ gcmkERR_BREAK(gckKERNEL_MapMemory(
++ Kernel,
++ gcmINT2PTR(kernelInterface->u.MapMemory.physical),
++ (gctSIZE_T) kernelInterface->u.MapMemory.bytes,
++ &logical
++ ));
++ kernelInterface->u.MapMemory.logical = gcmPTR_TO_UINT64(logical);
++ break;
++
++ case gcvHAL_UNMAP_MEMORY:
++ /* Unmap memory. */
++ gcmkERR_BREAK(gckKERNEL_UnmapMemory(
++ Kernel,
++ gcmINT2PTR(kernelInterface->u.MapMemory.physical),
++ (gctSIZE_T) kernelInterface->u.MapMemory.bytes,
++ gcmUINT64_TO_PTR(kernelInterface->u.MapMemory.logical)
++ ));
++ break;
++
++ case gcvHAL_MAP_USER_MEMORY:
++ /* Map user memory to DMA. */
++ gcmkERR_BREAK(gckOS_MapUserMemory(
++ Kernel->os,
++ gcvCORE_VG,
++ gcmUINT64_TO_PTR(kernelInterface->u.MapUserMemory.memory),
++ kernelInterface->u.MapUserMemory.physical,
++ (gctSIZE_T) kernelInterface->u.MapUserMemory.size,
++ &info,
++ &kernelInterface->u.MapUserMemory.address
++ ));
++
++ kernelInterface->u.MapUserMemory.info = gcmPTR_TO_NAME(info);
++ break;
++
++ case gcvHAL_UNMAP_USER_MEMORY:
++ /* Unmap user memory. */
++ gcmkERR_BREAK(gckOS_UnmapUserMemory(
++ Kernel->os,
++ gcvCORE_VG,
++ gcmUINT64_TO_PTR(kernelInterface->u.UnmapUserMemory.memory),
++ (gctSIZE_T) kernelInterface->u.UnmapUserMemory.size,
++ gcmNAME_TO_PTR(kernelInterface->u.UnmapUserMemory.info),
++ kernelInterface->u.UnmapUserMemory.address
++ ));
++ gcmRELEASE_NAME(kernelInterface->u.UnmapUserMemory.info);
++ break;
++ case gcvHAL_LOCK_VIDEO_MEMORY:
++ node = gcmUINT64_TO_PTR(Interface->u.LockVideoMemory.node);
++
++ /* Lock video memory. */
++ gcmkERR_BREAK(
++ gckVIDMEM_Lock(Kernel,
++ node,
++ gcvFALSE,
++ &Interface->u.LockVideoMemory.address));
++
++ if (node->VidMem.memory->object.type == gcvOBJ_VIDMEM)
++ {
++ /* Map video memory address into user space. */
++#ifdef __QNXNTO__
++ if (node->VidMem.logical == gcvNULL)
++ {
++ gcmkONERROR(
++ gckKERNEL_MapVideoMemory(Kernel,
++ FromUser,
++ Interface->u.LockVideoMemory.address,
++ processID,
++ node->VidMem.bytes,
++ &node->VidMem.logical));
++ }
++
++ Interface->u.LockVideoMemory.memory = gcmPTR_TO_UINT64(node->VidMem.logical);
++#else
++ gcmkERR_BREAK(
++ gckKERNEL_MapVideoMemoryEx(Kernel,
++ gcvCORE_VG,
++ FromUser,
++ Interface->u.LockVideoMemory.address,
++ &logical));
++ Interface->u.LockVideoMemory.memory = gcmPTR_TO_UINT64(logical);
++#endif
++ }
++ else
++ {
++ Interface->u.LockVideoMemory.memory = gcmPTR_TO_UINT64(node->Virtual.logical);
++
++ /* Success. */
++ status = gcvSTATUS_OK;
++ }
++
++#if gcdSECURE_USER
++ /* Return logical address as physical address. */
++ Interface->u.LockVideoMemory.address =
++ (gctUINT32)(Interface->u.LockVideoMemory.memory);
++#endif
++ gcmkERR_BREAK(
++ gckKERNEL_AddProcessDB(Kernel,
++ processID, gcvDB_VIDEO_MEMORY_LOCKED,
++ node,
++ gcvNULL,
++ 0));
++ break;
++
++ case gcvHAL_UNLOCK_VIDEO_MEMORY:
++ /* Unlock video memory. */
++ node = gcmUINT64_TO_PTR(Interface->u.UnlockVideoMemory.node);
++
++#if gcdSECURE_USER
++ /* Save node information before it disappears. */
++ if (node->VidMem.memory->object.type == gcvOBJ_VIDMEM)
++ {
++ logical = gcvNULL;
++ bytes = 0;
++ }
++ else
++ {
++ logical = node->Virtual.logical;
++ bytes = node->Virtual.bytes;
++ }
++#endif
++
++ /* Unlock video memory. */
++ gcmkERR_BREAK(
++ gckVIDMEM_Unlock(Kernel,
++ node,
++ Interface->u.UnlockVideoMemory.type,
++ &Interface->u.UnlockVideoMemory.asynchroneous));
++
++#if gcdSECURE_USER
++ /* Flush the translation cache for virtual surfaces. */
++ if (logical != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckKERNEL_FlushTranslationCache(Kernel,
++ cache,
++ logical,
++ bytes));
++ }
++#endif
++
++ if (Interface->u.UnlockVideoMemory.asynchroneous == gcvFALSE)
++ {
++ /* There isn't a event to unlock this node, remove record now */
++ gcmkERR_BREAK(
++ gckKERNEL_RemoveProcessDB(Kernel,
++ processID, gcvDB_VIDEO_MEMORY_LOCKED,
++ node));
++ }
++
++ break;
++ case gcvHAL_USER_SIGNAL:
++#if !USE_NEW_LINUX_SIGNAL
++ /* Dispatch depends on the user signal subcommands. */
++ switch(Interface->u.UserSignal.command)
++ {
++ case gcvUSER_SIGNAL_CREATE:
++ /* Create a signal used in the user space. */
++ gcmkERR_BREAK(
++ gckOS_CreateUserSignal(Kernel->os,
++ Interface->u.UserSignal.manualReset,
++ &Interface->u.UserSignal.id));
++
++ gcmkVERIFY_OK(
++ gckKERNEL_AddProcessDB(Kernel,
++ processID, gcvDB_SIGNAL,
++ gcmINT2PTR(Interface->u.UserSignal.id),
++ gcvNULL,
++ 0));
++ break;
++
++ case gcvUSER_SIGNAL_DESTROY:
++ /* Destroy the signal. */
++ gcmkERR_BREAK(
++ gckOS_DestroyUserSignal(Kernel->os,
++ Interface->u.UserSignal.id));
++
++ gcmkVERIFY_OK(gckKERNEL_RemoveProcessDB(
++ Kernel,
++ processID, gcvDB_SIGNAL,
++ gcmINT2PTR(Interface->u.UserSignal.id)));
++ break;
++
++ case gcvUSER_SIGNAL_SIGNAL:
++ /* Signal the signal. */
++ gcmkERR_BREAK(
++ gckOS_SignalUserSignal(Kernel->os,
++ Interface->u.UserSignal.id,
++ Interface->u.UserSignal.state));
++ break;
++
++ case gcvUSER_SIGNAL_WAIT:
++ /* Wait on the signal. */
++ status = gckOS_WaitUserSignal(Kernel->os,
++ Interface->u.UserSignal.id,
++ Interface->u.UserSignal.wait);
++ break;
++
++ default:
++ /* Invalid user signal command. */
++ gcmkERR_BREAK(gcvSTATUS_INVALID_ARGUMENT);
++ }
++#endif
++ break;
++
++ case gcvHAL_COMMIT:
++ /* Commit a command and context buffer. */
++ gcmkERR_BREAK(gckVGCOMMAND_Commit(
++ Kernel->vg->command,
++ gcmUINT64_TO_PTR(kernelInterface->u.VGCommit.context),
++ gcmUINT64_TO_PTR(kernelInterface->u.VGCommit.queue),
++ kernelInterface->u.VGCommit.entryCount,
++ gcmUINT64_TO_PTR(kernelInterface->u.VGCommit.taskTable)
++ ));
++ break;
++ case gcvHAL_VERSION:
++ kernelInterface->u.Version.major = gcvVERSION_MAJOR;
++ kernelInterface->u.Version.minor = gcvVERSION_MINOR;
++ kernelInterface->u.Version.patch = gcvVERSION_PATCH;
++ kernelInterface->u.Version.build = gcvVERSION_BUILD;
++ status = gcvSTATUS_OK;
++ break;
++
++ case gcvHAL_GET_BASE_ADDRESS:
++ /* Get base address. */
++ gcmkERR_BREAK(
++ gckOS_GetBaseAddress(Kernel->os,
++ &kernelInterface->u.GetBaseAddress.baseAddress));
++ break;
++ default:
++ /* Invalid command. */
++ status = gcvSTATUS_INVALID_ARGUMENT;
++ }
++
++OnError:
++ /* Save status. */
++ kernelInterface->status = status;
++
++ gcmkFOOTER();
++
++ /* Return the status. */
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckKERNEL_QueryCommandBuffer
++**
++** Query command buffer attributes.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckVGHARDWARE object.
++**
++** OUTPUT:
++**
++** gcsCOMMAND_BUFFER_INFO_PTR Information
++** Pointer to the information structure to receive buffer attributes.
++*/
++gceSTATUS
++gckKERNEL_QueryCommandBuffer(
++ IN gckKERNEL Kernel,
++ OUT gcsCOMMAND_BUFFER_INFO_PTR Information
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Kernel=0x%x *Pool=0x%x",
++ Kernel, Information);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++
++ /* Get the information. */
++ status = gckVGCOMMAND_QueryCommandBuffer(Kernel->vg->command, Information);
++
++ gcmkFOOTER();
++ /* Return status. */
++ return status;
++}
++
++#endif /* gcdENABLE_VG */
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel_vg.h linux-openelec/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel_vg.h
+--- linux-3.14.36/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel_vg.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel_vg.h 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,85 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_kernel_vg_h_
++#define __gc_hal_kernel_vg_h_
++
++#include "gc_hal.h"
++#include "gc_hal_driver.h"
++#include "gc_hal_kernel_hardware.h"
++
++/******************************************************************************\
++********************************** Structures **********************************
++\******************************************************************************/
++
++/* gckKERNEL object. */
++struct _gckVGKERNEL
++{
++ /* Object. */
++ gcsOBJECT object;
++
++ /* Pointer to gckOS object. */
++ gckOS os;
++
++ /* Pointer to gckHARDWARE object. */
++ gckVGHARDWARE hardware;
++
++ /* Pointer to gckINTERRUPT object. */
++ gckVGINTERRUPT interrupt;
++
++ /* Pointer to gckCOMMAND object. */
++ gckVGCOMMAND command;
++
++ /* Pointer to context. */
++ gctPOINTER context;
++
++ /* Pointer to gckMMU object. */
++ gckVGMMU mmu;
++
++ gckKERNEL kernel;
++};
++
++/* gckMMU object. */
++struct _gckVGMMU
++{
++ /* The object. */
++ gcsOBJECT object;
++
++ /* Pointer to gckOS object. */
++ gckOS os;
++
++ /* Pointer to gckHARDWARE object. */
++ gckVGHARDWARE hardware;
++
++ /* The page table mutex. */
++ gctPOINTER mutex;
++
++ /* Page table information. */
++ gctSIZE_T pageTableSize;
++ gctPHYS_ADDR pageTablePhysical;
++ gctPOINTER pageTableLogical;
++
++ /* Allocation index. */
++ gctUINT32 entryCount;
++ gctUINT32 entry;
++};
++
++#endif /* __gc_hal_kernel_h_ */
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel_video_memory.c linux-openelec/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel_video_memory.c
+--- linux-3.14.36/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel_video_memory.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel_video_memory.c 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,2264 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal_kernel_precomp.h"
++
++#define _GC_OBJ_ZONE gcvZONE_VIDMEM
++
++/******************************************************************************\
++******************************* Private Functions ******************************
++\******************************************************************************/
++
++/*******************************************************************************
++**
++** _Split
++**
++** Split a node on the required byte boundary.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gcuVIDMEM_NODE_PTR Node
++** Pointer to the node to split.
++**
++** gctSIZE_T Bytes
++** Number of bytes to keep in the node.
++**
++** OUTPUT:
++**
++** Nothing.
++**
++** RETURNS:
++**
++** gctBOOL
++** gcvTRUE if the node was split successfully, or gcvFALSE if there is an
++** error.
++**
++*/
++static gctBOOL
++_Split(
++ IN gckOS Os,
++ IN gcuVIDMEM_NODE_PTR Node,
++ IN gctSIZE_T Bytes
++ )
++{
++ gcuVIDMEM_NODE_PTR node;
++ gctPOINTER pointer = gcvNULL;
++
++ /* Make sure the byte boundary makes sense. */
++ if ((Bytes <= 0) || (Bytes > Node->VidMem.bytes))
++ {
++ return gcvFALSE;
++ }
++
++ /* Allocate a new gcuVIDMEM_NODE object. */
++ if (gcmIS_ERROR(gckOS_Allocate(Os,
++ gcmSIZEOF(gcuVIDMEM_NODE),
++ &pointer)))
++ {
++ /* Error. */
++ return gcvFALSE;
++ }
++
++ node = pointer;
++
++ /* Initialize gcuVIDMEM_NODE structure. */
++ node->VidMem.offset = Node->VidMem.offset + Bytes;
++ node->VidMem.bytes = Node->VidMem.bytes - Bytes;
++ node->VidMem.alignment = 0;
++ node->VidMem.locked = 0;
++ node->VidMem.memory = Node->VidMem.memory;
++ node->VidMem.pool = Node->VidMem.pool;
++ node->VidMem.physical = Node->VidMem.physical;
++#ifdef __QNXNTO__
++#if gcdUSE_VIDMEM_PER_PID
++ gcmkASSERT(Node->VidMem.physical != 0);
++ gcmkASSERT(Node->VidMem.logical != gcvNULL);
++ node->VidMem.processID = Node->VidMem.processID;
++ node->VidMem.physical = Node->VidMem.physical + Bytes;
++ node->VidMem.logical = Node->VidMem.logical + Bytes;
++#else
++ node->VidMem.processID = 0;
++ node->VidMem.logical = gcvNULL;
++#endif
++#endif
++
++ /* Insert node behind specified node. */
++ node->VidMem.next = Node->VidMem.next;
++ node->VidMem.prev = Node;
++ Node->VidMem.next = node->VidMem.next->VidMem.prev = node;
++
++ /* Insert free node behind specified node. */
++ node->VidMem.nextFree = Node->VidMem.nextFree;
++ node->VidMem.prevFree = Node;
++ Node->VidMem.nextFree = node->VidMem.nextFree->VidMem.prevFree = node;
++
++ /* Adjust size of specified node. */
++ Node->VidMem.bytes = Bytes;
++
++ /* Success. */
++ return gcvTRUE;
++}
++
++/*******************************************************************************
++**
++** _Merge
++**
++** Merge two adjacent nodes together.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gcuVIDMEM_NODE_PTR Node
++** Pointer to the first of the two nodes to merge.
++**
++** OUTPUT:
++**
++** Nothing.
++**
++*/
++static gceSTATUS
++_Merge(
++ IN gckOS Os,
++ IN gcuVIDMEM_NODE_PTR Node
++ )
++{
++ gcuVIDMEM_NODE_PTR node;
++ gceSTATUS status;
++
++ /* Save pointer to next node. */
++ node = Node->VidMem.next;
++#if gcdUSE_VIDMEM_PER_PID
++ /* Check if the nodes are adjacent physically. */
++ if ( ((Node->VidMem.physical + Node->VidMem.bytes) != node->VidMem.physical) ||
++ ((Node->VidMem.logical + Node->VidMem.bytes) != node->VidMem.logical) )
++ {
++ /* Can't merge. */
++ return gcvSTATUS_OK;
++ }
++#else
++
++ /* This is a good time to make sure the heap is not corrupted. */
++ if (Node->VidMem.offset + Node->VidMem.bytes != node->VidMem.offset)
++ {
++ /* Corrupted heap. */
++ gcmkASSERT(
++ Node->VidMem.offset + Node->VidMem.bytes == node->VidMem.offset);
++ return gcvSTATUS_HEAP_CORRUPTED;
++ }
++#endif
++
++ /* Adjust byte count. */
++ Node->VidMem.bytes += node->VidMem.bytes;
++
++ /* Unlink next node from linked list. */
++ Node->VidMem.next = node->VidMem.next;
++ Node->VidMem.nextFree = node->VidMem.nextFree;
++
++ Node->VidMem.next->VidMem.prev =
++ Node->VidMem.nextFree->VidMem.prevFree = Node;
++
++ /* Free next node. */
++ status = gcmkOS_SAFE_FREE(Os, node);
++ return status;
++}
++
++/******************************************************************************\
++******************************* gckVIDMEM API Code ******************************
++\******************************************************************************/
++
++/*******************************************************************************
++**
++** gckVIDMEM_ConstructVirtual
++**
++** Construct a new gcuVIDMEM_NODE union for virtual memory.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gctSIZE_T Bytes
++** Number of byte to allocate.
++**
++** OUTPUT:
++**
++** gcuVIDMEM_NODE_PTR * Node
++** Pointer to a variable that receives the gcuVIDMEM_NODE union pointer.
++*/
++gceSTATUS
++gckVIDMEM_ConstructVirtual(
++ IN gckKERNEL Kernel,
++ IN gctBOOL Contiguous,
++ IN gctSIZE_T Bytes,
++ OUT gcuVIDMEM_NODE_PTR * Node
++ )
++{
++ gckOS os;
++ gceSTATUS status;
++ gcuVIDMEM_NODE_PTR node = gcvNULL;
++ gctPOINTER pointer = gcvNULL;
++ gctINT i;
++
++ gcmkHEADER_ARG("Kernel=0x%x Contiguous=%d Bytes=%lu", Kernel, Contiguous, Bytes);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++ gcmkVERIFY_ARGUMENT(Node != gcvNULL);
++
++ /* Extract the gckOS object pointer. */
++ os = Kernel->os;
++ gcmkVERIFY_OBJECT(os, gcvOBJ_OS);
++
++ /* Allocate an gcuVIDMEM_NODE union. */
++ gcmkONERROR(gckOS_Allocate(os, gcmSIZEOF(gcuVIDMEM_NODE), &pointer));
++
++ node = pointer;
++
++ /* Initialize gcuVIDMEM_NODE union for virtual memory. */
++ node->Virtual.kernel = Kernel;
++ node->Virtual.contiguous = Contiguous;
++ node->Virtual.logical = gcvNULL;
++
++ for (i = 0; i < gcdMAX_GPU_COUNT; i++)
++ {
++ node->Virtual.lockeds[i] = 0;
++ node->Virtual.pageTables[i] = gcvNULL;
++ node->Virtual.lockKernels[i] = gcvNULL;
++ }
++
++ node->Virtual.mutex = gcvNULL;
++
++ gcmkONERROR(gckOS_GetProcessID(&node->Virtual.processID));
++
++#ifdef __QNXNTO__
++ node->Virtual.next = gcvNULL;
++ node->Virtual.freePending = gcvFALSE;
++ for (i = 0; i < gcdMAX_GPU_COUNT; i++)
++ {
++ node->Virtual.unlockPendings[i] = gcvFALSE;
++ }
++#endif
++
++ node->Virtual.freed = gcvFALSE;
++
++ gcmkONERROR(gckOS_ZeroMemory(&node->Virtual.sharedInfo, gcmSIZEOF(gcsVIDMEM_NODE_SHARED_INFO)));
++
++ /* Create the mutex. */
++ gcmkONERROR(
++ gckOS_CreateMutex(os, &node->Virtual.mutex));
++
++ /* Allocate the virtual memory. */
++ gcmkONERROR(
++ gckOS_AllocatePagedMemoryEx(os,
++ node->Virtual.contiguous,
++ node->Virtual.bytes = Bytes,
++ &node->Virtual.physical));
++
++#ifdef __QNXNTO__
++ /* Register. */
++#if gcdENABLE_VG
++ if (Kernel->core != gcvCORE_VG)
++#endif
++ {
++ gckMMU_InsertNode(Kernel->mmu, node);
++ }
++#endif
++
++ /* Return pointer to the gcuVIDMEM_NODE union. */
++ *Node = node;
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
++ "Created virtual node 0x%x for %u bytes @ 0x%x",
++ node, Bytes, node->Virtual.physical);
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Node=0x%x", *Node);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Roll back. */
++ if (node != gcvNULL)
++ {
++ if (node->Virtual.mutex != gcvNULL)
++ {
++ /* Destroy the mutex. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(os, node->Virtual.mutex));
++ }
++
++ /* Free the structure. */
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(os, node));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckVIDMEM_DestroyVirtual
++**
++** Destroy an gcuVIDMEM_NODE union for virtual memory.
++**
++** INPUT:
++**
++** gcuVIDMEM_NODE_PTR Node
++** Pointer to a gcuVIDMEM_NODE union.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckVIDMEM_DestroyVirtual(
++ IN gcuVIDMEM_NODE_PTR Node
++ )
++{
++ gckOS os;
++ gctINT i;
++
++ gcmkHEADER_ARG("Node=0x%x", Node);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Node->Virtual.kernel, gcvOBJ_KERNEL);
++
++ /* Extact the gckOS object pointer. */
++ os = Node->Virtual.kernel->os;
++ gcmkVERIFY_OBJECT(os, gcvOBJ_OS);
++
++#ifdef __QNXNTO__
++ /* Unregister. */
++#if gcdENABLE_VG
++ if (Node->Virtual.kernel->core != gcvCORE_VG)
++#endif
++ {
++ gcmkVERIFY_OK(
++ gckMMU_RemoveNode(Node->Virtual.kernel->mmu, Node));
++ }
++#endif
++
++ /* Delete the mutex. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(os, Node->Virtual.mutex));
++
++ for (i = 0; i < gcdMAX_GPU_COUNT; i++)
++ {
++ if (Node->Virtual.pageTables[i] != gcvNULL)
++ {
++#if gcdENABLE_VG
++ if (i == gcvCORE_VG)
++ {
++ /* Free the pages. */
++ gcmkVERIFY_OK(gckVGMMU_FreePages(Node->Virtual.lockKernels[i]->vg->mmu,
++ Node->Virtual.pageTables[i],
++ Node->Virtual.pageCount));
++ }
++ else
++#endif
++ {
++ /* Free the pages. */
++ gcmkVERIFY_OK(gckMMU_FreePages(Node->Virtual.lockKernels[i]->mmu,
++ Node->Virtual.pageTables[i],
++ Node->Virtual.pageCount));
++ }
++ }
++ }
++
++ /* Delete the gcuVIDMEM_NODE union. */
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(os, Node));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckVIDMEM_Construct
++**
++** Construct a new gckVIDMEM object.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctUINT32 BaseAddress
++** Base address for the video memory heap.
++**
++** gctSIZE_T Bytes
++** Number of bytes in the video memory heap.
++**
++** gctSIZE_T Threshold
++** Minimum number of bytes beyond am allocation before the node is
++** split. Can be used as a minimum alignment requirement.
++**
++** gctSIZE_T BankSize
++** Number of bytes per physical memory bank. Used by bank
++** optimization.
++**
++** OUTPUT:
++**
++** gckVIDMEM * Memory
++** Pointer to a variable that will hold the pointer to the gckVIDMEM
++** object.
++*/
++gceSTATUS
++gckVIDMEM_Construct(
++ IN gckOS Os,
++ IN gctUINT32 BaseAddress,
++ IN gctSIZE_T Bytes,
++ IN gctSIZE_T Threshold,
++ IN gctSIZE_T BankSize,
++ OUT gckVIDMEM * Memory
++ )
++{
++ gckVIDMEM memory = gcvNULL;
++ gceSTATUS status;
++ gcuVIDMEM_NODE_PTR node;
++ gctINT i, banks = 0;
++ gctPOINTER pointer = gcvNULL;
++
++ gcmkHEADER_ARG("Os=0x%x BaseAddress=%08x Bytes=%lu Threshold=%lu "
++ "BankSize=%lu",
++ Os, BaseAddress, Bytes, Threshold, BankSize);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++ gcmkVERIFY_ARGUMENT(Memory != gcvNULL);
++
++ /* Allocate the gckVIDMEM object. */
++ gcmkONERROR(gckOS_Allocate(Os, gcmSIZEOF(struct _gckVIDMEM), &pointer));
++
++ memory = pointer;
++
++ /* Initialize the gckVIDMEM object. */
++ memory->object.type = gcvOBJ_VIDMEM;
++ memory->os = Os;
++
++ /* Set video memory heap information. */
++ memory->baseAddress = BaseAddress;
++ memory->bytes = Bytes;
++ memory->freeBytes = Bytes;
++ memory->threshold = Threshold;
++ memory->mutex = gcvNULL;
++#if gcdUSE_VIDMEM_PER_PID
++ gcmkONERROR(gckOS_GetProcessID(&memory->pid));
++#endif
++
++ BaseAddress = 0;
++
++ /* Walk all possible banks. */
++ for (i = 0; i < gcmCOUNTOF(memory->sentinel); ++i)
++ {
++ gctSIZE_T bytes;
++
++ if (BankSize == 0)
++ {
++ /* Use all bytes for the first bank. */
++ bytes = Bytes;
++ }
++ else
++ {
++ /* Compute number of bytes for this bank. */
++ bytes = gcmALIGN(BaseAddress + 1, BankSize) - BaseAddress;
++
++ if (bytes > Bytes)
++ {
++ /* Make sure we don't exceed the total number of bytes. */
++ bytes = Bytes;
++ }
++ }
++
++ if (bytes == 0)
++ {
++ /* Mark heap is not used. */
++ memory->sentinel[i].VidMem.next =
++ memory->sentinel[i].VidMem.prev =
++ memory->sentinel[i].VidMem.nextFree =
++ memory->sentinel[i].VidMem.prevFree = gcvNULL;
++ continue;
++ }
++
++ /* Allocate one gcuVIDMEM_NODE union. */
++ gcmkONERROR(gckOS_Allocate(Os, gcmSIZEOF(gcuVIDMEM_NODE), &pointer));
++
++ node = pointer;
++
++ /* Initialize gcuVIDMEM_NODE union. */
++ node->VidMem.memory = memory;
++
++ node->VidMem.next =
++ node->VidMem.prev =
++ node->VidMem.nextFree =
++ node->VidMem.prevFree = &memory->sentinel[i];
++
++ node->VidMem.offset = BaseAddress;
++ node->VidMem.bytes = bytes;
++ node->VidMem.alignment = 0;
++ node->VidMem.physical = 0;
++ node->VidMem.pool = gcvPOOL_UNKNOWN;
++
++ node->VidMem.locked = 0;
++
++#if gcdDYNAMIC_MAP_RESERVED_MEMORY && gcdENABLE_VG
++ node->VidMem.kernelVirtual = gcvNULL;
++#endif
++
++ gcmkONERROR(gckOS_ZeroMemory(&node->VidMem.sharedInfo, gcmSIZEOF(gcsVIDMEM_NODE_SHARED_INFO)));
++
++#ifdef __QNXNTO__
++#if gcdUSE_VIDMEM_PER_PID
++ node->VidMem.processID = memory->pid;
++ node->VidMem.physical = memory->baseAddress + BaseAddress;
++ gcmkONERROR(gckOS_GetLogicalAddressProcess(Os,
++ node->VidMem.processID,
++ node->VidMem.physical,
++ &node->VidMem.logical));
++#else
++ node->VidMem.processID = 0;
++ node->VidMem.logical = gcvNULL;
++#endif
++#endif
++
++ /* Initialize the linked list of nodes. */
++ memory->sentinel[i].VidMem.next =
++ memory->sentinel[i].VidMem.prev =
++ memory->sentinel[i].VidMem.nextFree =
++ memory->sentinel[i].VidMem.prevFree = node;
++
++ /* Mark sentinel. */
++ memory->sentinel[i].VidMem.bytes = 0;
++
++ /* Adjust address for next bank. */
++ BaseAddress += bytes;
++ Bytes -= bytes;
++ banks ++;
++ }
++
++ /* Assign all the bank mappings. */
++ memory->mapping[gcvSURF_RENDER_TARGET] = banks - 1;
++ memory->mapping[gcvSURF_BITMAP] = banks - 1;
++ if (banks > 1) --banks;
++ memory->mapping[gcvSURF_DEPTH] = banks - 1;
++ memory->mapping[gcvSURF_HIERARCHICAL_DEPTH] = banks - 1;
++ if (banks > 1) --banks;
++ memory->mapping[gcvSURF_TEXTURE] = banks - 1;
++ if (banks > 1) --banks;
++ memory->mapping[gcvSURF_VERTEX] = banks - 1;
++ if (banks > 1) --banks;
++ memory->mapping[gcvSURF_INDEX] = banks - 1;
++ if (banks > 1) --banks;
++ memory->mapping[gcvSURF_TILE_STATUS] = banks - 1;
++ if (banks > 1) --banks;
++ memory->mapping[gcvSURF_TYPE_UNKNOWN] = 0;
++
++#if gcdENABLE_VG
++ memory->mapping[gcvSURF_IMAGE] = 0;
++ memory->mapping[gcvSURF_MASK] = 0;
++ memory->mapping[gcvSURF_SCISSOR] = 0;
++#endif
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
++ "[GALCORE] INDEX: bank %d",
++ memory->mapping[gcvSURF_INDEX]);
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
++ "[GALCORE] VERTEX: bank %d",
++ memory->mapping[gcvSURF_VERTEX]);
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
++ "[GALCORE] TEXTURE: bank %d",
++ memory->mapping[gcvSURF_TEXTURE]);
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
++ "[GALCORE] RENDER_TARGET: bank %d",
++ memory->mapping[gcvSURF_RENDER_TARGET]);
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
++ "[GALCORE] DEPTH: bank %d",
++ memory->mapping[gcvSURF_DEPTH]);
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
++ "[GALCORE] TILE_STATUS: bank %d",
++ memory->mapping[gcvSURF_TILE_STATUS]);
++
++ /* Allocate the mutex. */
++ gcmkONERROR(gckOS_CreateMutex(Os, &memory->mutex));
++
++ /* Return pointer to the gckVIDMEM object. */
++ *Memory = memory;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Memory=0x%x", *Memory);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Roll back. */
++ if (memory != gcvNULL)
++ {
++ if (memory->mutex != gcvNULL)
++ {
++ /* Delete the mutex. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Os, memory->mutex));
++ }
++
++ for (i = 0; i < banks; ++i)
++ {
++ /* Free the heap. */
++ gcmkASSERT(memory->sentinel[i].VidMem.next != gcvNULL);
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Os, memory->sentinel[i].VidMem.next));
++ }
++
++ /* Free the object. */
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Os, memory));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckVIDMEM_Destroy
++**
++** Destroy an gckVIDMEM object.
++**
++** INPUT:
++**
++** gckVIDMEM Memory
++** Pointer to an gckVIDMEM object to destroy.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckVIDMEM_Destroy(
++ IN gckVIDMEM Memory
++ )
++{
++ gcuVIDMEM_NODE_PTR node, next;
++ gctINT i;
++
++ gcmkHEADER_ARG("Memory=0x%x", Memory);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Memory, gcvOBJ_VIDMEM);
++
++ /* Walk all sentinels. */
++ for (i = 0; i < gcmCOUNTOF(Memory->sentinel); ++i)
++ {
++ /* Bail out of the heap is not used. */
++ if (Memory->sentinel[i].VidMem.next == gcvNULL)
++ {
++ break;
++ }
++
++ /* Walk all the nodes until we reach the sentinel. */
++ for (node = Memory->sentinel[i].VidMem.next;
++ node->VidMem.bytes != 0;
++ node = next)
++ {
++ /* Save pointer to the next node. */
++ next = node->VidMem.next;
++
++ /* Free the node. */
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Memory->os, node));
++ }
++ }
++
++ /* Free the mutex. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Memory->os, Memory->mutex));
++
++ /* Mark the object as unknown. */
++ Memory->object.type = gcvOBJ_UNKNOWN;
++
++ /* Free the gckVIDMEM object. */
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Memory->os, Memory));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckVIDMEM_Allocate
++**
++** Allocate rectangular memory from the gckVIDMEM object.
++**
++** INPUT:
++**
++** gckVIDMEM Memory
++** Pointer to an gckVIDMEM object.
++**
++** gctUINT Width
++** Width of rectangle to allocate. Make sure the width is properly
++** aligned.
++**
++** gctUINT Height
++** Height of rectangle to allocate. Make sure the height is properly
++** aligned.
++**
++** gctUINT Depth
++** Depth of rectangle to allocate. This equals to the number of
++** rectangles to allocate contiguously (i.e., for cubic maps and volume
++** textures).
++**
++** gctUINT BytesPerPixel
++** Number of bytes per pixel.
++**
++** gctUINT32 Alignment
++** Byte alignment for allocation.
++**
++** gceSURF_TYPE Type
++** Type of surface to allocate (use by bank optimization).
++**
++** OUTPUT:
++**
++** gcuVIDMEM_NODE_PTR * Node
++** Pointer to a variable that will hold the allocated memory node.
++*/
++gceSTATUS
++gckVIDMEM_Allocate(
++ IN gckVIDMEM Memory,
++ IN gctUINT Width,
++ IN gctUINT Height,
++ IN gctUINT Depth,
++ IN gctUINT BytesPerPixel,
++ IN gctUINT32 Alignment,
++ IN gceSURF_TYPE Type,
++ OUT gcuVIDMEM_NODE_PTR * Node
++ )
++{
++ gctSIZE_T bytes;
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Memory=0x%x Width=%u Height=%u Depth=%u BytesPerPixel=%u "
++ "Alignment=%u Type=%d",
++ Memory, Width, Height, Depth, BytesPerPixel, Alignment,
++ Type);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Memory, gcvOBJ_VIDMEM);
++ gcmkVERIFY_ARGUMENT(Width > 0);
++ gcmkVERIFY_ARGUMENT(Height > 0);
++ gcmkVERIFY_ARGUMENT(Depth > 0);
++ gcmkVERIFY_ARGUMENT(BytesPerPixel > 0);
++ gcmkVERIFY_ARGUMENT(Node != gcvNULL);
++
++ /* Compute linear size. */
++ bytes = Width * Height * Depth * BytesPerPixel;
++
++ /* Allocate through linear function. */
++ gcmkONERROR(
++ gckVIDMEM_AllocateLinear(Memory, bytes, Alignment, Type, Node));
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Node=0x%x", *Node);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++#if gcdENABLE_BANK_ALIGNMENT
++
++#if !gcdBANK_BIT_START
++#error gcdBANK_BIT_START not defined.
++#endif
++
++#if !gcdBANK_BIT_END
++#error gcdBANK_BIT_END not defined.
++#endif
++/*******************************************************************************
++** _GetSurfaceBankAlignment
++**
++** Return the required offset alignment required to the make BaseAddress
++** aligned properly.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to gcoOS object.
++**
++** gceSURF_TYPE Type
++** Type of allocation.
++**
++** gctUINT32 BaseAddress
++** Base address of current video memory node.
++**
++** OUTPUT:
++**
++** gctUINT32_PTR AlignmentOffset
++** Pointer to a variable that will hold the number of bytes to skip in
++** the current video memory node in order to make the alignment bank
++** aligned.
++*/
++static gceSTATUS
++_GetSurfaceBankAlignment(
++ IN gceSURF_TYPE Type,
++ IN gctUINT32 BaseAddress,
++ OUT gctUINT32_PTR AlignmentOffset
++ )
++{
++ gctUINT32 bank;
++ /* To retrieve the bank. */
++ static const gctUINT32 bankMask = (0xFFFFFFFF << gcdBANK_BIT_START)
++ ^ (0xFFFFFFFF << (gcdBANK_BIT_END + 1));
++
++ /* To retrieve the bank and all the lower bytes. */
++ static const gctUINT32 byteMask = ~(0xFFFFFFFF << (gcdBANK_BIT_END + 1));
++
++ gcmkHEADER_ARG("Type=%d BaseAddress=0x%x ", Type, BaseAddress);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_ARGUMENT(AlignmentOffset != gcvNULL);
++
++ switch (Type)
++ {
++ case gcvSURF_RENDER_TARGET:
++ bank = (BaseAddress & bankMask) >> (gcdBANK_BIT_START);
++
++ /* Align to the first bank. */
++ *AlignmentOffset = (bank == 0) ?
++ 0 :
++ ((1 << (gcdBANK_BIT_END + 1)) + 0) - (BaseAddress & byteMask);
++ break;
++
++ case gcvSURF_DEPTH:
++ bank = (BaseAddress & bankMask) >> (gcdBANK_BIT_START);
++
++ /* Align to the third bank. */
++ *AlignmentOffset = (bank == 2) ?
++ 0 :
++ ((1 << (gcdBANK_BIT_END + 1)) + (2 << gcdBANK_BIT_START)) - (BaseAddress & byteMask);
++
++ /* Add a channel offset at the channel bit. */
++ *AlignmentOffset += (1 << gcdBANK_CHANNEL_BIT);
++ break;
++
++ default:
++ /* no alignment needed. */
++ *AlignmentOffset = 0;
++ }
++
++ /* Return the status. */
++ gcmkFOOTER_ARG("*AlignmentOffset=%u", *AlignmentOffset);
++ return gcvSTATUS_OK;
++}
++#endif
++
++static gcuVIDMEM_NODE_PTR
++_FindNode(
++ IN gckVIDMEM Memory,
++ IN gctINT Bank,
++ IN gctSIZE_T Bytes,
++ IN gceSURF_TYPE Type,
++ IN OUT gctUINT32_PTR Alignment
++ )
++{
++ gcuVIDMEM_NODE_PTR node;
++ gctUINT32 alignment;
++
++#if gcdENABLE_BANK_ALIGNMENT
++ gctUINT32 bankAlignment;
++ gceSTATUS status;
++#endif
++
++ if (Memory->sentinel[Bank].VidMem.nextFree == gcvNULL)
++ {
++ /* No free nodes left. */
++ return gcvNULL;
++ }
++
++#if gcdENABLE_BANK_ALIGNMENT
++ /* Walk all free nodes until we have one that is big enough or we have
++ ** reached the sentinel. */
++ for (node = Memory->sentinel[Bank].VidMem.nextFree;
++ node->VidMem.bytes != 0;
++ node = node->VidMem.nextFree)
++ {
++ gcmkONERROR(_GetSurfaceBankAlignment(
++ Type,
++ node->VidMem.memory->baseAddress + node->VidMem.offset,
++ &bankAlignment));
++
++ bankAlignment = gcmALIGN(bankAlignment, *Alignment);
++
++ /* Compute number of bytes to skip for alignment. */
++ alignment = (*Alignment == 0)
++ ? 0
++ : (*Alignment - (node->VidMem.offset % *Alignment));
++
++ if (alignment == *Alignment)
++ {
++ /* Node is already aligned. */
++ alignment = 0;
++ }
++
++ if (node->VidMem.bytes >= Bytes + alignment + bankAlignment)
++ {
++ /* This node is big enough. */
++ *Alignment = alignment + bankAlignment;
++ return node;
++ }
++ }
++#endif
++
++ /* Walk all free nodes until we have one that is big enough or we have
++ reached the sentinel. */
++ for (node = Memory->sentinel[Bank].VidMem.nextFree;
++ node->VidMem.bytes != 0;
++ node = node->VidMem.nextFree)
++ {
++
++ gctINT modulo = gckMATH_ModuloInt(node->VidMem.offset, *Alignment);
++
++ /* Compute number of bytes to skip for alignment. */
++ alignment = (*Alignment == 0) ? 0 : (*Alignment - modulo);
++
++ if (alignment == *Alignment)
++ {
++ /* Node is already aligned. */
++ alignment = 0;
++ }
++
++ if (node->VidMem.bytes >= Bytes + alignment)
++ {
++ /* This node is big enough. */
++ *Alignment = alignment;
++ return node;
++ }
++ }
++
++#if gcdENABLE_BANK_ALIGNMENT
++OnError:
++#endif
++ /* Not enough memory. */
++ return gcvNULL;
++}
++
++/*******************************************************************************
++**
++** gckVIDMEM_AllocateLinear
++**
++** Allocate linear memory from the gckVIDMEM object.
++**
++** INPUT:
++**
++** gckVIDMEM Memory
++** Pointer to an gckVIDMEM object.
++**
++** gctSIZE_T Bytes
++** Number of bytes to allocate.
++**
++** gctUINT32 Alignment
++** Byte alignment for allocation.
++**
++** gceSURF_TYPE Type
++** Type of surface to allocate (use by bank optimization).
++**
++** OUTPUT:
++**
++** gcuVIDMEM_NODE_PTR * Node
++** Pointer to a variable that will hold the allocated memory node.
++*/
++gceSTATUS
++gckVIDMEM_AllocateLinear(
++ IN gckVIDMEM Memory,
++ IN gctSIZE_T Bytes,
++ IN gctUINT32 Alignment,
++ IN gceSURF_TYPE Type,
++ OUT gcuVIDMEM_NODE_PTR * Node
++ )
++{
++ gceSTATUS status;
++ gcuVIDMEM_NODE_PTR node;
++ gctUINT32 alignment;
++ gctINT bank, i;
++ gctBOOL acquired = gcvFALSE;
++#if gcdSMALL_BLOCK_SIZE
++ gctBOOL force_allocate = (Type == gcvSURF_TILE_STATUS) || (Type & gcvSURF_VG);
++#endif
++
++ gcmkHEADER_ARG("Memory=0x%x Bytes=%lu Alignment=%u Type=%d",
++ Memory, Bytes, Alignment, Type);
++
++ Type &= ~gcvSURF_VG;
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Memory, gcvOBJ_VIDMEM);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++ gcmkVERIFY_ARGUMENT(Node != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Type < gcvSURF_NUM_TYPES);
++
++ /* Acquire the mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(Memory->os, Memory->mutex, gcvINFINITE));
++
++ acquired = gcvTRUE;
++#if !gcdUSE_VIDMEM_PER_PID
++
++ if (Bytes > Memory->freeBytes)
++ {
++ /* Not enough memory. */
++ status = gcvSTATUS_OUT_OF_MEMORY;
++ goto OnError;
++ }
++#endif
++
++#if gcdSMALL_BLOCK_SIZE
++ if ((!force_allocate) && (Memory->freeBytes < (Memory->bytes/gcdRATIO_FOR_SMALL_MEMORY))
++ && (Bytes >= gcdSMALL_BLOCK_SIZE)
++ )
++ {
++ /* The left memory is for small memory.*/
++ status = gcvSTATUS_OUT_OF_MEMORY;
++ goto OnError;
++ }
++#endif
++
++ /* Find the default bank for this surface type. */
++ gcmkASSERT((gctINT) Type < gcmCOUNTOF(Memory->mapping));
++ bank = Memory->mapping[Type];
++ alignment = Alignment;
++
++#if gcdUSE_VIDMEM_PER_PID
++ if (Bytes <= Memory->freeBytes)
++ {
++#endif
++ /* Find a free node in the default bank. */
++ node = _FindNode(Memory, bank, Bytes, Type, &alignment);
++
++ /* Out of memory? */
++ if (node == gcvNULL)
++ {
++ /* Walk all lower banks. */
++ for (i = bank - 1; i >= 0; --i)
++ {
++ /* Find a free node inside the current bank. */
++ node = _FindNode(Memory, i, Bytes, Type, &alignment);
++ if (node != gcvNULL)
++ {
++ break;
++ }
++ }
++ }
++
++ if (node == gcvNULL)
++ {
++ /* Walk all upper banks. */
++ for (i = bank + 1; i < gcmCOUNTOF(Memory->sentinel); ++i)
++ {
++ if (Memory->sentinel[i].VidMem.nextFree == gcvNULL)
++ {
++ /* Abort when we reach unused banks. */
++ break;
++ }
++
++ /* Find a free node inside the current bank. */
++ node = _FindNode(Memory, i, Bytes, Type, &alignment);
++ if (node != gcvNULL)
++ {
++ break;
++ }
++ }
++ }
++#if gcdUSE_VIDMEM_PER_PID
++ }
++#endif
++
++ if (node == gcvNULL)
++ {
++ /* Out of memory. */
++#if gcdUSE_VIDMEM_PER_PID
++ /* Allocate more memory from shared pool. */
++ gctSIZE_T bytes;
++ gctPHYS_ADDR physical_temp;
++ gctUINT32 physical;
++ gctPOINTER logical;
++
++ bytes = gcmALIGN(Bytes, gcdUSE_VIDMEM_PER_PID_SIZE);
++
++ gcmkONERROR(gckOS_AllocateContiguous(Memory->os,
++ gcvTRUE,
++ &bytes,
++ &physical_temp,
++ &logical));
++
++ /* physical address is returned as 0 for user space. workaround. */
++ if (physical_temp == gcvNULL)
++ {
++ gcmkONERROR(gckOS_GetPhysicalAddress(Memory->os, logical, &physical));
++ }
++
++ /* Allocate one gcuVIDMEM_NODE union. */
++ gcmkONERROR(
++ gckOS_Allocate(Memory->os,
++ gcmSIZEOF(gcuVIDMEM_NODE),
++ (gctPOINTER *) &node));
++
++ /* Initialize gcuVIDMEM_NODE union. */
++ node->VidMem.memory = Memory;
++
++ node->VidMem.offset = 0;
++ node->VidMem.bytes = bytes;
++ node->VidMem.alignment = 0;
++ node->VidMem.physical = physical;
++ node->VidMem.pool = gcvPOOL_UNKNOWN;
++
++ node->VidMem.locked = 0;
++
++#ifdef __QNXNTO__
++ gcmkONERROR(gckOS_GetProcessID(&node->VidMem.processID));
++ node->VidMem.logical = logical;
++ gcmkASSERT(logical != gcvNULL);
++#endif
++
++ /* Insert node behind sentinel node. */
++ node->VidMem.next = Memory->sentinel[bank].VidMem.next;
++ node->VidMem.prev = &Memory->sentinel[bank];
++ Memory->sentinel[bank].VidMem.next = node->VidMem.next->VidMem.prev = node;
++
++ /* Insert free node behind sentinel node. */
++ node->VidMem.nextFree = Memory->sentinel[bank].VidMem.nextFree;
++ node->VidMem.prevFree = &Memory->sentinel[bank];
++ Memory->sentinel[bank].VidMem.nextFree = node->VidMem.nextFree->VidMem.prevFree = node;
++
++ Memory->freeBytes += bytes;
++#else
++ status = gcvSTATUS_OUT_OF_MEMORY;
++ goto OnError;
++#endif
++ }
++
++ /* Do we have an alignment? */
++ if (alignment > 0)
++ {
++ /* Split the node so it is aligned. */
++ if (_Split(Memory->os, node, alignment))
++ {
++ /* Successful split, move to aligned node. */
++ node = node->VidMem.next;
++
++ /* Remove alignment. */
++ alignment = 0;
++ }
++ }
++
++ /* Do we have enough memory after the allocation to split it? */
++ if (node->VidMem.bytes - Bytes > Memory->threshold)
++ {
++ /* Adjust the node size. */
++ _Split(Memory->os, node, Bytes);
++ }
++
++ /* Remove the node from the free list. */
++ node->VidMem.prevFree->VidMem.nextFree = node->VidMem.nextFree;
++ node->VidMem.nextFree->VidMem.prevFree = node->VidMem.prevFree;
++ node->VidMem.nextFree =
++ node->VidMem.prevFree = gcvNULL;
++
++ /* Fill in the information. */
++ node->VidMem.alignment = alignment;
++ node->VidMem.memory = Memory;
++#ifdef __QNXNTO__
++#if !gcdUSE_VIDMEM_PER_PID
++ node->VidMem.logical = gcvNULL;
++ gcmkONERROR(gckOS_GetProcessID(&node->VidMem.processID));
++#else
++ gcmkASSERT(node->VidMem.logical != gcvNULL);
++#endif
++#endif
++
++ /* Adjust the number of free bytes. */
++ Memory->freeBytes -= node->VidMem.bytes;
++
++ node->VidMem.freePending = gcvFALSE;
++
++#if gcdDYNAMIC_MAP_RESERVED_MEMORY && gcdENABLE_VG
++ node->VidMem.kernelVirtual = gcvNULL;
++#endif
++
++ /* Release the mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Memory->os, Memory->mutex));
++
++ /* Return the pointer to the node. */
++ *Node = node;
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
++ "Allocated %u bytes @ 0x%x [0x%08X]",
++ node->VidMem.bytes, node, node->VidMem.offset);
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Node=0x%x", *Node);
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ /* Release the mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Memory->os, Memory->mutex));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckVIDMEM_Free
++**
++** Free an allocated video memory node.
++**
++** INPUT:
++**
++** gcuVIDMEM_NODE_PTR Node
++** Pointer to a gcuVIDMEM_NODE object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckVIDMEM_Free(
++ IN gcuVIDMEM_NODE_PTR Node
++ )
++{
++ gceSTATUS status;
++ gckKERNEL kernel = gcvNULL;
++ gckVIDMEM memory = gcvNULL;
++ gcuVIDMEM_NODE_PTR node;
++ gctBOOL mutexAcquired = gcvFALSE;
++ gckOS os = gcvNULL;
++ gctBOOL acquired = gcvFALSE;
++ gctINT32 i, totalLocked;
++
++ gcmkHEADER_ARG("Node=0x%x", Node);
++
++ /* Verify the arguments. */
++ if ((Node == gcvNULL)
++ || (Node->VidMem.memory == gcvNULL)
++ )
++ {
++ /* Invalid object. */
++ gcmkONERROR(gcvSTATUS_INVALID_OBJECT);
++ }
++
++ /**************************** Video Memory ********************************/
++
++ if (Node->VidMem.memory->object.type == gcvOBJ_VIDMEM)
++ {
++ if (Node->VidMem.locked > 0)
++ {
++ /* Client still has a lock, defer free op 'till when lock reaches 0. */
++ Node->VidMem.freePending = gcvTRUE;
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
++ "Node 0x%x is locked (%d)... deferring free.",
++ Node, Node->VidMem.locked);
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++
++ /* Extract pointer to gckVIDMEM object owning the node. */
++ memory = Node->VidMem.memory;
++
++ /* Acquire the mutex. */
++ gcmkONERROR(
++ gckOS_AcquireMutex(memory->os, memory->mutex, gcvINFINITE));
++
++ mutexAcquired = gcvTRUE;
++
++#ifdef __QNXNTO__
++#if !gcdUSE_VIDMEM_PER_PID
++ /* Reset. */
++ Node->VidMem.processID = 0;
++ Node->VidMem.logical = gcvNULL;
++#endif
++
++ /* Don't try to re-free an already freed node. */
++ if ((Node->VidMem.nextFree == gcvNULL)
++ && (Node->VidMem.prevFree == gcvNULL)
++ )
++#endif
++ {
++#if gcdDYNAMIC_MAP_RESERVED_MEMORY && gcdENABLE_VG
++ if (Node->VidMem.kernelVirtual)
++ {
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
++ "%s(%d) Unmap %x from kernel space.",
++ __FUNCTION__, __LINE__,
++ Node->VidMem.kernelVirtual);
++
++ gcmkVERIFY_OK(
++ gckOS_UnmapPhysical(memory->os,
++ Node->VidMem.kernelVirtual,
++ Node->VidMem.bytes));
++
++ Node->VidMem.kernelVirtual = gcvNULL;
++ }
++#endif
++
++ /* Check if Node is already freed. */
++ if (Node->VidMem.nextFree)
++ {
++ /* Node is alread freed. */
++ gcmkONERROR(gcvSTATUS_INVALID_DATA);
++ }
++
++ /* Update the number of free bytes. */
++ memory->freeBytes += Node->VidMem.bytes;
++
++ /* Find the next free node. */
++ for (node = Node->VidMem.next;
++ node != gcvNULL && node->VidMem.nextFree == gcvNULL;
++ node = node->VidMem.next) ;
++
++ /* Insert this node in the free list. */
++ Node->VidMem.nextFree = node;
++ Node->VidMem.prevFree = node->VidMem.prevFree;
++
++ Node->VidMem.prevFree->VidMem.nextFree =
++ node->VidMem.prevFree = Node;
++
++ /* Is the next node a free node and not the sentinel? */
++ if ((Node->VidMem.next == Node->VidMem.nextFree)
++ && (Node->VidMem.next->VidMem.bytes != 0)
++ )
++ {
++ /* Merge this node with the next node. */
++ gcmkONERROR(_Merge(memory->os, node = Node));
++ gcmkASSERT(node->VidMem.nextFree != node);
++ gcmkASSERT(node->VidMem.prevFree != node);
++ }
++
++ /* Is the previous node a free node and not the sentinel? */
++ if ((Node->VidMem.prev == Node->VidMem.prevFree)
++ && (Node->VidMem.prev->VidMem.bytes != 0)
++ )
++ {
++ /* Merge this node with the previous node. */
++ gcmkONERROR(_Merge(memory->os, node = Node->VidMem.prev));
++ gcmkASSERT(node->VidMem.nextFree != node);
++ gcmkASSERT(node->VidMem.prevFree != node);
++ }
++ }
++
++ /* Release the mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(memory->os, memory->mutex));
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
++ "Node 0x%x is freed.",
++ Node);
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++
++ /*************************** Virtual Memory *******************************/
++
++ /* Get gckKERNEL object. */
++ kernel = Node->Virtual.kernel;
++
++ /* Verify the gckKERNEL object pointer. */
++ gcmkVERIFY_OBJECT(kernel, gcvOBJ_KERNEL);
++
++ /* Get the gckOS object pointer. */
++ os = kernel->os;
++ gcmkVERIFY_OBJECT(os, gcvOBJ_OS);
++
++ /* Grab the mutex. */
++ gcmkONERROR(
++ gckOS_AcquireMutex(os, Node->Virtual.mutex, gcvINFINITE));
++
++ acquired = gcvTRUE;
++
++ for (i = 0, totalLocked = 0; i < gcdMAX_GPU_COUNT; i++)
++ {
++ totalLocked += Node->Virtual.lockeds[i];
++ }
++
++ if (totalLocked > 0)
++ {
++ gcmkTRACE_ZONE(gcvLEVEL_ERROR, gcvZONE_VIDMEM,
++ "gckVIDMEM_Free: Virtual node 0x%x is locked (%d)",
++ Node, totalLocked);
++
++ /* Set Flag */
++ Node->Virtual.freed = gcvTRUE;
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(os, Node->Virtual.mutex));
++ }
++ else
++ {
++ /* Free the virtual memory. */
++ gcmkVERIFY_OK(gckOS_FreePagedMemory(kernel->os,
++ Node->Virtual.physical,
++ Node->Virtual.bytes));
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(os, Node->Virtual.mutex));
++
++ /* Destroy the gcuVIDMEM_NODE union. */
++ gcmkVERIFY_OK(gckVIDMEM_DestroyVirtual(Node));
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (mutexAcquired)
++ {
++ /* Release the mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(
++ memory->os, memory->mutex
++ ));
++ }
++
++ if (acquired)
++ {
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(os, Node->Virtual.mutex));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++
++#ifdef __QNXNTO__
++/*******************************************************************************
++**
++** gcoVIDMEM_FreeHandleMemory
++**
++** Free all allocated video memory nodes for a handle.
++**
++** INPUT:
++**
++** gcoVIDMEM Memory
++** Pointer to an gcoVIDMEM object..
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckVIDMEM_FreeHandleMemory(
++ IN gckKERNEL Kernel,
++ IN gckVIDMEM Memory,
++ IN gctUINT32 Pid
++ )
++{
++ gceSTATUS status;
++ gctBOOL mutex = gcvFALSE;
++ gcuVIDMEM_NODE_PTR node;
++ gctINT i;
++ gctUINT32 nodeCount = 0, byteCount = 0;
++ gctBOOL again;
++
++ gcmkHEADER_ARG("Kernel=0x%x, Memory=0x%x Pid=0x%u", Kernel, Memory, Pid);
++
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_OBJECT(Memory, gcvOBJ_VIDMEM);
++
++ gcmkONERROR(gckOS_AcquireMutex(Memory->os, Memory->mutex, gcvINFINITE));
++ mutex = gcvTRUE;
++
++ /* Walk all sentinels. */
++ for (i = 0; i < gcmCOUNTOF(Memory->sentinel); ++i)
++ {
++ /* Bail out of the heap if it is not used. */
++ if (Memory->sentinel[i].VidMem.next == gcvNULL)
++ {
++ break;
++ }
++
++ do
++ {
++ again = gcvFALSE;
++
++ /* Walk all the nodes until we reach the sentinel. */
++ for (node = Memory->sentinel[i].VidMem.next;
++ node->VidMem.bytes != 0;
++ node = node->VidMem.next)
++ {
++ /* Free the node if it was allocated by Handle. */
++ if (node->VidMem.processID == Pid)
++ {
++ /* Unlock video memory. */
++ while (node->VidMem.locked > 0)
++ {
++ gckVIDMEM_Unlock(Kernel, node, gcvSURF_TYPE_UNKNOWN, gcvNULL);
++ }
++
++ nodeCount++;
++ byteCount += node->VidMem.bytes;
++
++ /* Free video memory. */
++ gcmkVERIFY_OK(gckVIDMEM_Free(node));
++
++ /*
++ * Freeing may cause a merge which will invalidate our iteration.
++ * Don't be clever, just restart.
++ */
++ again = gcvTRUE;
++
++ break;
++ }
++#if gcdUSE_VIDMEM_PER_PID
++ else
++ {
++ gcmkASSERT(node->VidMem.processID == Pid);
++ }
++#endif
++ }
++ }
++ while (again);
++ }
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Memory->os, Memory->mutex));
++ gcmkFOOTER();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (mutex)
++ {
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Memory->os, Memory->mutex));
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++#endif
++
++/*******************************************************************************
++**
++** _NeedVirtualMapping
++**
++** Whether setup GPU page table for video node.
++**
++** INPUT:
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gcuVIDMEM_NODE_PTR Node
++** Pointer to a gcuVIDMEM_NODE union.
++**
++** gceCORE Core
++** Id of current GPU.
++**
++** OUTPUT:
++** gctBOOL * NeedMapping
++** A pointer hold the result whether Node should be mapping.
++*/
++static gceSTATUS
++_NeedVirtualMapping(
++ IN gckKERNEL Kernel,
++ IN gceCORE Core,
++ IN gcuVIDMEM_NODE_PTR Node,
++ OUT gctBOOL * NeedMapping
++)
++{
++ gceSTATUS status;
++ gctUINT32 phys;
++ gctUINT32 end;
++ gcePOOL pool;
++ gctUINT32 offset;
++ gctUINT32 baseAddress;
++
++ gcmkHEADER_ARG("Node=0x%X", Node);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_ARGUMENT(Kernel != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Node != gcvNULL);
++ gcmkVERIFY_ARGUMENT(NeedMapping != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Core < gcdMAX_GPU_COUNT);
++
++ if (Node->Virtual.contiguous)
++ {
++#if gcdENABLE_VG
++ if (Core == gcvCORE_VG)
++ {
++ *NeedMapping = gcvFALSE;
++ }
++ else
++#endif
++ {
++ /* Convert logical address into a physical address. */
++ gcmkONERROR(
++ gckOS_GetPhysicalAddress(Kernel->os, Node->Virtual.logical, &phys));
++
++ gcmkONERROR(gckOS_GetBaseAddress(Kernel->os, &baseAddress));
++
++ gcmkASSERT(phys >= baseAddress);
++
++ /* Subtract baseAddress to get a GPU address used for programming. */
++ phys -= baseAddress;
++
++ /* If part of region is belong to gcvPOOL_VIRTUAL,
++ ** whole region has to be mapped. */
++ end = phys + Node->Virtual.bytes - 1;
++
++ gcmkONERROR(gckHARDWARE_SplitMemory(
++ Kernel->hardware, end, &pool, &offset
++ ));
++
++ *NeedMapping = (pool == gcvPOOL_VIRTUAL);
++ }
++ }
++ else
++ {
++ *NeedMapping = gcvTRUE;
++ }
++
++ gcmkFOOTER_ARG("*NeedMapping=%d", *NeedMapping);
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckVIDMEM_Lock
++**
++** Lock a video memory node and return its hardware specific address.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gcuVIDMEM_NODE_PTR Node
++** Pointer to a gcuVIDMEM_NODE union.
++**
++** OUTPUT:
++**
++** gctUINT32 * Address
++** Pointer to a variable that will hold the hardware specific address.
++*/
++gceSTATUS
++gckVIDMEM_Lock(
++ IN gckKERNEL Kernel,
++ IN gcuVIDMEM_NODE_PTR Node,
++ IN gctBOOL Cacheable,
++ OUT gctUINT32 * Address
++ )
++{
++ gceSTATUS status;
++ gctBOOL acquired = gcvFALSE;
++ gctBOOL locked = gcvFALSE;
++ gckOS os = gcvNULL;
++ gctBOOL needMapping;
++ gctUINT32 baseAddress;
++
++ gcmkHEADER_ARG("Node=0x%x", Node);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_ARGUMENT(Address != gcvNULL);
++
++ if ((Node == gcvNULL)
++ || (Node->VidMem.memory == gcvNULL)
++ )
++ {
++ /* Invalid object. */
++ gcmkONERROR(gcvSTATUS_INVALID_OBJECT);
++ }
++
++ /**************************** Video Memory ********************************/
++
++ if (Node->VidMem.memory->object.type == gcvOBJ_VIDMEM)
++ {
++ if (Cacheable == gcvTRUE)
++ {
++ gcmkONERROR(gcvSTATUS_INVALID_REQUEST);
++ }
++
++ /* Increment the lock count. */
++ Node->VidMem.locked ++;
++
++ /* Return the physical address of the node. */
++#if !gcdUSE_VIDMEM_PER_PID
++ *Address = Node->VidMem.memory->baseAddress
++ + Node->VidMem.offset
++ + Node->VidMem.alignment;
++#else
++ *Address = Node->VidMem.physical;
++#endif
++
++ /* Get hardware specific address. */
++#if gcdENABLE_VG
++ if (Kernel->vg == gcvNULL)
++#endif
++ {
++ if (Kernel->hardware->mmuVersion == 0)
++ {
++ /* Convert physical to GPU address for old mmu. */
++ gcmkONERROR(gckOS_GetBaseAddress(Kernel->os, &baseAddress));
++ gcmkASSERT(*Address > baseAddress);
++ *Address -= baseAddress;
++ }
++ }
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
++ "Locked node 0x%x (%d) @ 0x%08X",
++ Node,
++ Node->VidMem.locked,
++ *Address);
++ }
++
++ /*************************** Virtual Memory *******************************/
++
++ else
++ {
++ /* Verify the gckKERNEL object pointer. */
++ gcmkVERIFY_OBJECT(Node->Virtual.kernel, gcvOBJ_KERNEL);
++
++ /* Extract the gckOS object pointer. */
++ os = Node->Virtual.kernel->os;
++ gcmkVERIFY_OBJECT(os, gcvOBJ_OS);
++
++ /* Grab the mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(os, Node->Virtual.mutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++#if gcdPAGED_MEMORY_CACHEABLE
++ /* Force video memory cacheable. */
++ Cacheable = gcvTRUE;
++#endif
++
++ gcmkONERROR(
++ gckOS_LockPages(os,
++ Node->Virtual.physical,
++ Node->Virtual.bytes,
++ Cacheable,
++ &Node->Virtual.logical,
++ &Node->Virtual.pageCount));
++
++ /* Increment the lock count. */
++ if (Node->Virtual.lockeds[Kernel->core] ++ == 0)
++ {
++ /* Is this node pending for a final unlock? */
++#ifdef __QNXNTO__
++ if (!Node->Virtual.contiguous && Node->Virtual.unlockPendings[Kernel->core])
++ {
++ /* Make sure we have a page table. */
++ gcmkASSERT(Node->Virtual.pageTables[Kernel->core] != gcvNULL);
++
++ /* Remove pending unlock. */
++ Node->Virtual.unlockPendings[Kernel->core] = gcvFALSE;
++ }
++
++ /* First lock - create a page table. */
++ gcmkASSERT(Node->Virtual.pageTables[Kernel->core] == gcvNULL);
++
++ /* Make sure we mark our node as not flushed. */
++ Node->Virtual.unlockPendings[Kernel->core] = gcvFALSE;
++#endif
++
++ locked = gcvTRUE;
++
++ gcmkONERROR(_NeedVirtualMapping(Kernel, Kernel->core, Node, &needMapping));
++
++ if (needMapping == gcvFALSE)
++ {
++ /* Get hardware specific address. */
++#if gcdENABLE_VG
++ if (Kernel->vg != gcvNULL)
++ {
++ gcmkONERROR(gckVGHARDWARE_ConvertLogical(Kernel->vg->hardware,
++ Node->Virtual.logical,
++ &Node->Virtual.addresses[Kernel->core]));
++ }
++ else
++#endif
++ {
++ gcmkONERROR(gckHARDWARE_ConvertLogical(Kernel->hardware,
++ Node->Virtual.logical,
++ &Node->Virtual.addresses[Kernel->core]));
++ }
++ }
++ else
++ {
++#if gcdENABLE_VG
++ if (Kernel->vg != gcvNULL)
++ {
++ /* Allocate pages inside the MMU. */
++ gcmkONERROR(
++ gckVGMMU_AllocatePages(Kernel->vg->mmu,
++ Node->Virtual.pageCount,
++ &Node->Virtual.pageTables[Kernel->core],
++ &Node->Virtual.addresses[Kernel->core]));
++ }
++ else
++#endif
++ {
++ /* Allocate pages inside the MMU. */
++ gcmkONERROR(
++ gckMMU_AllocatePagesEx(Kernel->mmu,
++ Node->Virtual.pageCount,
++ Node->Virtual.type,
++ &Node->Virtual.pageTables[Kernel->core],
++ &Node->Virtual.addresses[Kernel->core]));
++ }
++
++ Node->Virtual.lockKernels[Kernel->core] = Kernel;
++
++ /* Map the pages. */
++#ifdef __QNXNTO__
++ gcmkONERROR(
++ gckOS_MapPagesEx(os,
++ Kernel->core,
++ Node->Virtual.physical,
++ Node->Virtual.logical,
++ Node->Virtual.pageCount,
++ Node->Virtual.pageTables[Kernel->core]));
++#else
++ gcmkONERROR(
++ gckOS_MapPagesEx(os,
++ Kernel->core,
++ Node->Virtual.physical,
++ Node->Virtual.pageCount,
++ Node->Virtual.pageTables[Kernel->core]));
++#endif
++
++#if gcdENABLE_VG
++ if (Kernel->core == gcvCORE_VG)
++ {
++ gcmkONERROR(gckVGMMU_Flush(Kernel->vg->mmu));
++ }
++ else
++#endif
++ {
++ gcmkONERROR(gckMMU_Flush(Kernel->mmu));
++ }
++ }
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
++ "Mapped virtual node 0x%x to 0x%08X",
++ Node,
++ Node->Virtual.addresses[Kernel->core]);
++ }
++
++ /* Return hardware address. */
++ *Address = Node->Virtual.addresses[Kernel->core];
++
++ /* Release the mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(os, Node->Virtual.mutex));
++ }
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Address=%08x", *Address);
++ return gcvSTATUS_OK;
++
++OnError:
++ if (locked)
++ {
++ if (Node->Virtual.pageTables[Kernel->core] != gcvNULL)
++ {
++#if gcdENABLE_VG
++ if (Kernel->vg != gcvNULL)
++ {
++ /* Free the pages from the MMU. */
++ gcmkVERIFY_OK(
++ gckVGMMU_FreePages(Kernel->vg->mmu,
++ Node->Virtual.pageTables[Kernel->core],
++ Node->Virtual.pageCount));
++ }
++ else
++#endif
++ {
++ /* Free the pages from the MMU. */
++ gcmkVERIFY_OK(
++ gckMMU_FreePages(Kernel->mmu,
++ Node->Virtual.pageTables[Kernel->core],
++ Node->Virtual.pageCount));
++ }
++ Node->Virtual.pageTables[Kernel->core] = gcvNULL;
++ Node->Virtual.lockKernels[Kernel->core] = gcvNULL;
++ }
++
++ /* Unlock the pages. */
++ gcmkVERIFY_OK(
++ gckOS_UnlockPages(os,
++ Node->Virtual.physical,
++ Node->Virtual.bytes,
++ Node->Virtual.logical
++ ));
++
++ Node->Virtual.lockeds[Kernel->core]--;
++ }
++
++ if (acquired)
++ {
++ /* Release the mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(os, Node->Virtual.mutex));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckVIDMEM_Unlock
++**
++** Unlock a video memory node.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gcuVIDMEM_NODE_PTR Node
++** Pointer to a locked gcuVIDMEM_NODE union.
++**
++** gceSURF_TYPE Type
++** Type of surface to unlock.
++**
++** gctBOOL * Asynchroneous
++** Pointer to a variable specifying whether the surface should be
++** unlocked asynchroneously or not.
++**
++** OUTPUT:
++**
++** gctBOOL * Asynchroneous
++** Pointer to a variable receiving the number of bytes used in the
++** command buffer specified by 'Commands'. If gcvNULL, there is no
++** command buffer.
++*/
++gceSTATUS
++gckVIDMEM_Unlock(
++ IN gckKERNEL Kernel,
++ IN gcuVIDMEM_NODE_PTR Node,
++ IN gceSURF_TYPE Type,
++ IN OUT gctBOOL * Asynchroneous
++ )
++{
++ gceSTATUS status;
++ gckHARDWARE hardware;
++ gctPOINTER buffer;
++ gctSIZE_T requested, bufferSize;
++ gckCOMMAND command = gcvNULL;
++ gceKERNEL_FLUSH flush;
++ gckOS os = gcvNULL;
++ gctBOOL acquired = gcvFALSE;
++ gctBOOL commitEntered = gcvFALSE;
++ gctINT32 i, totalLocked;
++
++ gcmkHEADER_ARG("Node=0x%x Type=%d *Asynchroneous=%d",
++ Node, Type, gcmOPT_VALUE(Asynchroneous));
++
++ /* Verify the arguments. */
++ if ((Node == gcvNULL)
++ || (Node->VidMem.memory == gcvNULL)
++ )
++ {
++ /* Invalid object. */
++ gcmkONERROR(gcvSTATUS_INVALID_OBJECT);
++ }
++
++ /**************************** Video Memory ********************************/
++
++ if (Node->VidMem.memory->object.type == gcvOBJ_VIDMEM)
++ {
++ if (Node->VidMem.locked <= 0)
++ {
++ /* The surface was not locked. */
++ status = gcvSTATUS_MEMORY_UNLOCKED;
++ goto OnError;
++ }
++
++ /* Decrement the lock count. */
++ Node->VidMem.locked --;
++
++ if (Asynchroneous != gcvNULL)
++ {
++ /* No need for any events. */
++ *Asynchroneous = gcvFALSE;
++ }
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
++ "Unlocked node 0x%x (%d)",
++ Node,
++ Node->VidMem.locked);
++
++#ifdef __QNXNTO__
++ /* Unmap the video memory */
++ if ((Node->VidMem.locked == 0) && (Node->VidMem.logical != gcvNULL))
++ {
++ if (Kernel->core == gcvCORE_VG)
++ {
++ gckKERNEL_UnmapVideoMemory(Kernel,
++ Node->VidMem.logical,
++ Node->VidMem.processID,
++ Node->VidMem.bytes);
++ Node->VidMem.logical = gcvNULL;
++ }
++ }
++#endif /* __QNXNTO__ */
++
++ if (Node->VidMem.freePending && (Node->VidMem.locked == 0))
++ {
++ /* Client has unlocked node previously attempted to be freed by compositor. Free now. */
++ Node->VidMem.freePending = gcvFALSE;
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
++ "Deferred-freeing Node 0x%x.",
++ Node);
++ gcmkONERROR(gckVIDMEM_Free(Node));
++ }
++ }
++
++ /*************************** Virtual Memory *******************************/
++
++ else
++ {
++ /* Verify the gckHARDWARE object pointer. */
++ hardware = Kernel->hardware;
++ gcmkVERIFY_OBJECT(hardware, gcvOBJ_HARDWARE);
++
++ /* Verify the gckCOMMAND object pointer. */
++ command = Kernel->command;
++ gcmkVERIFY_OBJECT(command, gcvOBJ_COMMAND);
++
++ /* Get the gckOS object pointer. */
++ os = Kernel->os;
++ gcmkVERIFY_OBJECT(os, gcvOBJ_OS);
++
++ /* Grab the mutex. */
++ gcmkONERROR(
++ gckOS_AcquireMutex(os, Node->Virtual.mutex, gcvINFINITE));
++
++ acquired = gcvTRUE;
++
++ if (Asynchroneous == gcvNULL)
++ {
++ if (Node->Virtual.lockeds[Kernel->core] == 0)
++ {
++ status = gcvSTATUS_MEMORY_UNLOCKED;
++ goto OnError;
++ }
++
++ /* Decrement lock count. */
++ -- Node->Virtual.lockeds[Kernel->core];
++
++ /* See if we can unlock the resources. */
++ if (Node->Virtual.lockeds[Kernel->core] == 0)
++ {
++ /* Free the page table. */
++ if (Node->Virtual.pageTables[Kernel->core] != gcvNULL)
++ {
++#if gcdENABLE_VG
++ if (Kernel->vg != gcvNULL)
++ {
++ gcmkONERROR(
++ gckVGMMU_FreePages(Kernel->vg->mmu,
++ Node->Virtual.pageTables[Kernel->core],
++ Node->Virtual.pageCount));
++ }
++ else
++#endif
++ {
++ gcmkONERROR(
++ gckMMU_FreePages(Kernel->mmu,
++ Node->Virtual.pageTables[Kernel->core],
++ Node->Virtual.pageCount));
++ }
++ /* Mark page table as freed. */
++ Node->Virtual.pageTables[Kernel->core] = gcvNULL;
++ Node->Virtual.lockKernels[Kernel->core] = gcvNULL;
++ }
++
++#ifdef __QNXNTO__
++ /* Mark node as unlocked. */
++ Node->Virtual.unlockPendings[Kernel->core] = gcvFALSE;
++#endif
++ }
++
++ for (i = 0, totalLocked = 0; i < gcdMAX_GPU_COUNT; i++)
++ {
++ totalLocked += Node->Virtual.lockeds[i];
++ }
++
++ if (totalLocked == 0)
++ {
++ /* Owner have already freed this node
++ ** and we are the last one to unlock, do
++ ** real free */
++ if (Node->Virtual.freed)
++ {
++ /* Free the virtual memory. */
++ gcmkVERIFY_OK(gckOS_FreePagedMemory(Kernel->os,
++ Node->Virtual.physical,
++ Node->Virtual.bytes));
++
++ /* Release mutex before node is destroyed */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(os, Node->Virtual.mutex));
++
++ acquired = gcvFALSE;
++
++ /* Destroy the gcuVIDMEM_NODE union. */
++ gcmkVERIFY_OK(gckVIDMEM_DestroyVirtual(Node));
++
++ /* Node has been destroyed, so we should not touch it any more */
++ gcmkFOOTER();
++ return gcvSTATUS_OK;
++ }
++ }
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
++ "Unmapped virtual node 0x%x from 0x%08X",
++ Node, Node->Virtual.addresses[Kernel->core]);
++
++ }
++
++ else
++ {
++ /* If we need to unlock a node from virtual memory we have to be
++ ** very carefull. If the node is still inside the caches we
++ ** might get a bus error later if the cache line needs to be
++ ** replaced. So - we have to flush the caches before we do
++ ** anything. */
++
++ /* gckCommand_EnterCommit() can't be called in interrupt handler because
++ ** of a dead lock situation:
++ ** process call Command_Commit(), and acquire Command->mutexQueue in
++ ** gckCOMMAND_EnterCommit(). Then it will wait for a signal which depends
++ ** on interrupt handler to generate, if interrupt handler enter
++ ** gckCommand_EnterCommit(), process will never get the signal. */
++
++ /* So, flush cache when we still in process context, and then ask caller to
++ ** schedule a event. */
++
++ gcmkONERROR(
++ gckOS_UnlockPages(os,
++ Node->Virtual.physical,
++ Node->Virtual.bytes,
++ Node->Virtual.logical));
++
++ if (!Node->Virtual.contiguous
++ && (Node->Virtual.lockeds[Kernel->core] == 1)
++#if gcdENABLE_VG
++ && (Kernel->vg == gcvNULL)
++#endif
++ )
++ {
++ if (Type == gcvSURF_BITMAP)
++ {
++ /* Flush 2D cache. */
++ flush = gcvFLUSH_2D;
++ }
++ else if (Type == gcvSURF_RENDER_TARGET)
++ {
++ /* Flush color cache. */
++ flush = gcvFLUSH_COLOR;
++ }
++ else if (Type == gcvSURF_DEPTH)
++ {
++ /* Flush depth cache. */
++ flush = gcvFLUSH_DEPTH;
++ }
++ else
++ {
++ /* No flush required. */
++ flush = (gceKERNEL_FLUSH) 0;
++ }
++ if(hardware)
++ {
++ gcmkONERROR(
++ gckHARDWARE_Flush(hardware, flush, gcvNULL, &requested));
++
++ if (requested != 0)
++ {
++ /* Acquire the command queue. */
++ gcmkONERROR(gckCOMMAND_EnterCommit(command, gcvFALSE));
++ commitEntered = gcvTRUE;
++
++ gcmkONERROR(gckCOMMAND_Reserve(
++ command, requested, &buffer, &bufferSize
++ ));
++
++ gcmkONERROR(gckHARDWARE_Flush(
++ hardware, flush, buffer, &bufferSize
++ ));
++
++ /* Mark node as pending. */
++#ifdef __QNXNTO__
++ Node->Virtual.unlockPendings[Kernel->core] = gcvTRUE;
++#endif
++
++ gcmkONERROR(gckCOMMAND_Execute(command, requested));
++
++ /* Release the command queue. */
++ gcmkONERROR(gckCOMMAND_ExitCommit(command, gcvFALSE));
++ commitEntered = gcvFALSE;
++ }
++ }
++ else
++ {
++ gckOS_Print("Hardware already is freed.\n");
++ }
++ }
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
++ "Scheduled unlock for virtual node 0x%x",
++ Node);
++
++ /* Schedule the surface to be unlocked. */
++ *Asynchroneous = gcvTRUE;
++ }
++
++ /* Release the mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(os, Node->Virtual.mutex));
++
++ acquired = gcvFALSE;
++ }
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Asynchroneous=%d", gcmOPT_VALUE(Asynchroneous));
++ return gcvSTATUS_OK;
++
++OnError:
++ if (commitEntered)
++ {
++ /* Release the command queue mutex. */
++ gcmkVERIFY_OK(gckCOMMAND_ExitCommit(command, gcvFALSE));
++ }
++
++ if (acquired)
++ {
++ /* Release the mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(os, Node->Virtual.mutex));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_base.h linux-openelec/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_base.h
+--- linux-3.14.36/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_base.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_base.h 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,3896 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_base_h_
++#define __gc_hal_base_h_
++
++#include "gc_hal_enum.h"
++#include "gc_hal_types.h"
++
++#include "gc_hal_dump.h"
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/******************************************************************************\
++****************************** Object Declarations *****************************
++\******************************************************************************/
++
++typedef struct _gckOS * gckOS;
++typedef struct _gcoHAL * gcoHAL;
++typedef struct _gcoOS * gcoOS;
++typedef struct _gco2D * gco2D;
++
++#ifndef VIVANTE_NO_3D
++typedef struct _gco3D * gco3D;
++#endif
++
++typedef struct _gcoSURF * gcoSURF;
++typedef struct _gcsSURF_INFO * gcsSURF_INFO_PTR;
++typedef struct _gcsSURF_NODE * gcsSURF_NODE_PTR;
++typedef struct _gcsSURF_FORMAT_INFO * gcsSURF_FORMAT_INFO_PTR;
++typedef struct _gcsPOINT * gcsPOINT_PTR;
++typedef struct _gcsSIZE * gcsSIZE_PTR;
++typedef struct _gcsRECT * gcsRECT_PTR;
++typedef struct _gcsBOUNDARY * gcsBOUNDARY_PTR;
++typedef struct _gcoDUMP * gcoDUMP;
++typedef struct _gcoHARDWARE * gcoHARDWARE;
++typedef union _gcuVIDMEM_NODE * gcuVIDMEM_NODE_PTR;
++
++typedef struct gcsATOM * gcsATOM_PTR;
++
++#if gcdENABLE_VG
++typedef struct _gcoVG * gcoVG;
++typedef struct _gcsCOMPLETION_SIGNAL * gcsCOMPLETION_SIGNAL_PTR;
++typedef struct _gcsCONTEXT_MAP * gcsCONTEXT_MAP_PTR;
++#else
++typedef void * gcoVG;
++#endif
++
++#if gcdSYNC
++typedef struct _gcoFENCE * gcoFENCE;
++typedef struct _gcsSYNC_CONTEXT * gcsSYNC_CONTEXT_PTR;
++#endif
++
++typedef struct _gcoOS_SymbolsList gcoOS_SymbolsList;
++
++/******************************************************************************\
++******************************* Process local storage *************************
++\******************************************************************************/
++typedef struct _gcsPLS * gcsPLS_PTR;
++
++typedef void (* gctPLS_DESTRUCTOR) (
++ gcsPLS_PTR
++ );
++
++typedef struct _gcsPLS
++{
++ /* Global objects. */
++ gcoOS os;
++ gcoHAL hal;
++
++ /* Internal memory pool. */
++ gctSIZE_T internalSize;
++ gctPHYS_ADDR internalPhysical;
++ gctPOINTER internalLogical;
++
++ /* External memory pool. */
++ gctSIZE_T externalSize;
++ gctPHYS_ADDR externalPhysical;
++ gctPOINTER externalLogical;
++
++ /* Contiguous memory pool. */
++ gctSIZE_T contiguousSize;
++ gctPHYS_ADDR contiguousPhysical;
++ gctPOINTER contiguousLogical;
++
++ /* EGL-specific process-wide objects. */
++ gctPOINTER eglDisplayInfo;
++ gctPOINTER eglSurfaceInfo;
++ gceSURF_FORMAT eglConfigFormat;
++
++ /* PorcessID of the constrcutor process */
++ gctUINT32 processID;
++#if gcdFORCE_GAL_LOAD_TWICE
++ /* ThreadID of the constrcutor process. */
++ gctSIZE_T threadID;
++ /* Flag for calling module destructor. */
++ gctBOOL exiting;
++#endif
++
++ /* Reference count for destructor. */
++ gcsATOM_PTR reference;
++ gctBOOL bKFS;
++#if gcdUSE_NPOT_PATCH
++ gctBOOL bNeedSupportNP2Texture;
++#endif
++
++ /* Destructor for eglDisplayInfo. */
++ gctPLS_DESTRUCTOR destructor;
++}
++gcsPLS;
++
++extern gcsPLS gcPLS;
++
++/******************************************************************************\
++******************************* Thread local storage *************************
++\******************************************************************************/
++
++typedef struct _gcsTLS * gcsTLS_PTR;
++
++typedef void (* gctTLS_DESTRUCTOR) (
++ gcsTLS_PTR
++ );
++
++typedef struct _gcsTLS
++{
++ gceHARDWARE_TYPE currentType;
++ gcoHARDWARE hardware;
++ /* Only for separated 3D and 2D */
++ gcoHARDWARE hardware2D;
++#if gcdENABLE_VG
++ gcoVGHARDWARE vg;
++ gcoVG engineVG;
++#endif /* gcdENABLE_VG */
++ gctPOINTER context;
++ gctTLS_DESTRUCTOR destructor;
++ gctBOOL ProcessExiting;
++
++#ifndef VIVANTE_NO_3D
++ gco3D engine3D;
++#endif
++#if gcdSYNC
++ gctBOOL fenceEnable;
++#endif
++ gco2D engine2D;
++ gctBOOL copied;
++
++#if gcdFORCE_GAL_LOAD_TWICE
++ /* libGAL.so handle */
++ gctHANDLE handle;
++#endif
++}
++gcsTLS;
++
++/******************************************************************************\
++********************************* Enumerations *********************************
++\******************************************************************************/
++
++typedef enum _gcePLS_VALUE
++{
++ gcePLS_VALUE_EGL_DISPLAY_INFO,
++ gcePLS_VALUE_EGL_SURFACE_INFO,
++ gcePLS_VALUE_EGL_CONFIG_FORMAT_INFO,
++ gcePLS_VALUE_EGL_DESTRUCTOR_INFO,
++}
++gcePLS_VALUE;
++
++/* Video memory pool type. */
++typedef enum _gcePOOL
++{
++ gcvPOOL_UNKNOWN = 0,
++ gcvPOOL_DEFAULT,
++ gcvPOOL_LOCAL,
++ gcvPOOL_LOCAL_INTERNAL,
++ gcvPOOL_LOCAL_EXTERNAL,
++ gcvPOOL_UNIFIED,
++ gcvPOOL_SYSTEM,
++ gcvPOOL_VIRTUAL,
++ gcvPOOL_USER,
++ gcvPOOL_CONTIGUOUS,
++ gcvPOOL_DEFAULT_FORCE_CONTIGUOUS,
++ gcvPOOL_DEFAULT_FORCE_CONTIGUOUS_CACHEABLE,
++
++ gcvPOOL_NUMBER_OF_POOLS
++}
++gcePOOL;
++
++#ifndef VIVANTE_NO_3D
++/* Blending functions. */
++typedef enum _gceBLEND_FUNCTION
++{
++ gcvBLEND_ZERO,
++ gcvBLEND_ONE,
++ gcvBLEND_SOURCE_COLOR,
++ gcvBLEND_INV_SOURCE_COLOR,
++ gcvBLEND_SOURCE_ALPHA,
++ gcvBLEND_INV_SOURCE_ALPHA,
++ gcvBLEND_TARGET_COLOR,
++ gcvBLEND_INV_TARGET_COLOR,
++ gcvBLEND_TARGET_ALPHA,
++ gcvBLEND_INV_TARGET_ALPHA,
++ gcvBLEND_SOURCE_ALPHA_SATURATE,
++ gcvBLEND_CONST_COLOR,
++ gcvBLEND_INV_CONST_COLOR,
++ gcvBLEND_CONST_ALPHA,
++ gcvBLEND_INV_CONST_ALPHA,
++}
++gceBLEND_FUNCTION;
++
++/* Blending modes. */
++typedef enum _gceBLEND_MODE
++{
++ gcvBLEND_ADD,
++ gcvBLEND_SUBTRACT,
++ gcvBLEND_REVERSE_SUBTRACT,
++ gcvBLEND_MIN,
++ gcvBLEND_MAX,
++}
++gceBLEND_MODE;
++
++/* API flags. */
++typedef enum _gceAPI
++{
++ gcvAPI_D3D = 0x1,
++ gcvAPI_OPENGL = 0x2,
++ gcvAPI_OPENVG = 0x3,
++ gcvAPI_OPENCL = 0x4,
++}
++gceAPI;
++
++/* Depth modes. */
++typedef enum _gceDEPTH_MODE
++{
++ gcvDEPTH_NONE,
++ gcvDEPTH_Z,
++ gcvDEPTH_W,
++}
++gceDEPTH_MODE;
++#endif /* VIVANTE_NO_3D */
++
++typedef enum _gceWHERE
++{
++ gcvWHERE_COMMAND,
++ gcvWHERE_RASTER,
++ gcvWHERE_PIXEL,
++}
++gceWHERE;
++
++typedef enum _gceHOW
++{
++ gcvHOW_SEMAPHORE = 0x1,
++ gcvHOW_STALL = 0x2,
++ gcvHOW_SEMAPHORE_STALL = 0x3,
++}
++gceHOW;
++
++typedef enum _gceSignalHandlerType
++{
++ gcvHANDLE_SIGFPE_WHEN_SIGNAL_CODE_IS_0 = 0x1,
++}
++gceSignalHandlerType;
++
++
++#if gcdENABLE_VG
++/* gcsHAL_Limits*/
++typedef struct _gcsHAL_LIMITS
++{
++ /* chip info */
++ gceCHIPMODEL chipModel;
++ gctUINT32 chipRevision;
++ gctUINT32 featureCount;
++ gctUINT32 *chipFeatures;
++
++ /* target caps */
++ gctUINT32 maxWidth;
++ gctUINT32 maxHeight;
++ gctUINT32 multiTargetCount;
++ gctUINT32 maxSamples;
++
++}gcsHAL_LIMITS;
++#endif
++
++/******************************************************************************\
++*********** Generic Memory Allocation Optimization Using Containers ************
++\******************************************************************************/
++
++/* Generic container definition. */
++typedef struct _gcsCONTAINER_LINK * gcsCONTAINER_LINK_PTR;
++typedef struct _gcsCONTAINER_LINK
++{
++ /* Points to the next container. */
++ gcsCONTAINER_LINK_PTR next;
++}
++gcsCONTAINER_LINK;
++
++typedef struct _gcsCONTAINER_RECORD * gcsCONTAINER_RECORD_PTR;
++typedef struct _gcsCONTAINER_RECORD
++{
++ gcsCONTAINER_RECORD_PTR prev;
++ gcsCONTAINER_RECORD_PTR next;
++}
++gcsCONTAINER_RECORD;
++
++typedef struct _gcsCONTAINER * gcsCONTAINER_PTR;
++typedef struct _gcsCONTAINER
++{
++ gctUINT containerSize;
++ gctUINT recordSize;
++ gctUINT recordCount;
++ gcsCONTAINER_LINK_PTR containers;
++ gcsCONTAINER_RECORD freeList;
++ gcsCONTAINER_RECORD allocList;
++}
++gcsCONTAINER;
++
++gceSTATUS
++gcsCONTAINER_Construct(
++ IN gcsCONTAINER_PTR Container,
++ gctUINT RecordsPerContainer,
++ gctUINT RecordSize
++ );
++
++gceSTATUS
++gcsCONTAINER_Destroy(
++ IN gcsCONTAINER_PTR Container
++ );
++
++gceSTATUS
++gcsCONTAINER_AllocateRecord(
++ IN gcsCONTAINER_PTR Container,
++ OUT gctPOINTER * Record
++ );
++
++gceSTATUS
++gcsCONTAINER_FreeRecord(
++ IN gcsCONTAINER_PTR Container,
++ IN gctPOINTER Record
++ );
++
++gceSTATUS
++gcsCONTAINER_FreeAll(
++ IN gcsCONTAINER_PTR Container
++ );
++
++/******************************************************************************\
++********************************* gcoHAL Object *********************************
++\******************************************************************************/
++
++/* Construct a new gcoHAL object. */
++gceSTATUS
++gcoHAL_Construct(
++ IN gctPOINTER Context,
++ IN gcoOS Os,
++ OUT gcoHAL * Hal
++ );
++
++/* Destroy an gcoHAL object. */
++gceSTATUS
++gcoHAL_Destroy(
++ IN gcoHAL Hal
++ );
++
++/* Get pointer to gco2D object. */
++gceSTATUS
++gcoHAL_Get2DEngine(
++ IN gcoHAL Hal,
++ OUT gco2D * Engine
++ );
++
++gceSTATUS
++gcoHAL_SetFscaleValue(
++ IN gctUINT FscaleValue
++ );
++
++gceSTATUS
++gcoHAL_GetFscaleValue(
++ OUT gctUINT * FscaleValue,
++ OUT gctUINT * MinFscaleValue,
++ OUT gctUINT * MaxFscaleValue
++ );
++
++gceSTATUS
++gcoHAL_SetBltNP2Texture(
++ gctBOOL enable
++ );
++
++#ifndef VIVANTE_NO_3D
++/* Get pointer to gco3D object. */
++gceSTATUS
++gcoHAL_Get3DEngine(
++ IN gcoHAL Hal,
++ OUT gco3D * Engine
++ );
++
++gceSTATUS
++gcoHAL_Query3DEngine(
++ IN gcoHAL Hal,
++ OUT gco3D * Engine
++ );
++
++gceSTATUS
++gcoHAL_Set3DEngine(
++ IN gcoHAL Hal,
++ IN gco3D Engine
++ );
++
++gceSTATUS
++gcoHAL_Get3DHardware(
++ IN gcoHAL Hal,
++ OUT gcoHARDWARE * Hardware
++ );
++
++gceSTATUS
++gcoHAL_Set3DHardware(
++ IN gcoHAL Hal,
++ IN gcoHARDWARE Hardware
++ );
++
++
++#endif /* VIVANTE_NO_3D */
++
++/* Verify whether the specified feature is available in hardware. */
++gceSTATUS
++gcoHAL_IsFeatureAvailable(
++ IN gcoHAL Hal,
++ IN gceFEATURE Feature
++ );
++
++/* Query the identity of the hardware. */
++gceSTATUS
++gcoHAL_QueryChipIdentity(
++ IN gcoHAL Hal,
++ OUT gceCHIPMODEL* ChipModel,
++ OUT gctUINT32* ChipRevision,
++ OUT gctUINT32* ChipFeatures,
++ OUT gctUINT32* ChipMinorFeatures
++ );
++
++/* Query the minor features of the hardware. */
++gceSTATUS gcoHAL_QueryChipMinorFeatures(
++ IN gcoHAL Hal,
++ OUT gctUINT32* NumFeatures,
++ OUT gctUINT32* ChipMinorFeatures
++ );
++
++/* Query the amount of video memory. */
++gceSTATUS
++gcoHAL_QueryVideoMemory(
++ IN gcoHAL Hal,
++ OUT gctPHYS_ADDR * InternalAddress,
++ OUT gctSIZE_T * InternalSize,
++ OUT gctPHYS_ADDR * ExternalAddress,
++ OUT gctSIZE_T * ExternalSize,
++ OUT gctPHYS_ADDR * ContiguousAddress,
++ OUT gctSIZE_T * ContiguousSize
++ );
++
++/* Map video memory. */
++gceSTATUS
++gcoHAL_MapMemory(
++ IN gcoHAL Hal,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T NumberOfBytes,
++ OUT gctPOINTER * Logical
++ );
++
++/* Unmap video memory. */
++gceSTATUS
++gcoHAL_UnmapMemory(
++ IN gcoHAL Hal,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T NumberOfBytes,
++ IN gctPOINTER Logical
++ );
++
++/* Schedule an unmap of a buffer mapped through its physical address. */
++gceSTATUS
++gcoHAL_ScheduleUnmapMemory(
++ IN gcoHAL Hal,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T NumberOfBytes,
++ IN gctPOINTER Logical
++ );
++
++/* Map user memory. */
++gceSTATUS
++gcoHAL_MapUserMemory(
++ IN gctPOINTER Logical,
++ IN gctUINT32 Physical,
++ IN gctSIZE_T Size,
++ OUT gctPOINTER * Info,
++ OUT gctUINT32_PTR GPUAddress
++ );
++
++/* Unmap user memory. */
++gceSTATUS
++gcoHAL_UnmapUserMemory(
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Size,
++ IN gctPOINTER Info,
++ IN gctUINT32 GPUAddress
++ );
++
++/* Schedule an unmap of a user buffer using event mechanism. */
++gceSTATUS
++gcoHAL_ScheduleUnmapUserMemory(
++ IN gcoHAL Hal,
++ IN gctPOINTER Info,
++ IN gctSIZE_T Size,
++ IN gctUINT32 Address,
++ IN gctPOINTER Memory
++ );
++
++/* Commit the current command buffer. */
++gceSTATUS
++gcoHAL_Commit(
++ IN gcoHAL Hal,
++ IN gctBOOL Stall
++ );
++
++/* Query the tile capabilities. */
++gceSTATUS
++gcoHAL_QueryTiled(
++ IN gcoHAL Hal,
++ OUT gctINT32 * TileWidth2D,
++ OUT gctINT32 * TileHeight2D,
++ OUT gctINT32 * TileWidth3D,
++ OUT gctINT32 * TileHeight3D
++ );
++
++gceSTATUS
++gcoHAL_Compact(
++ IN gcoHAL Hal
++ );
++
++#if VIVANTE_PROFILER
++gceSTATUS
++gcoHAL_ProfileStart(
++ IN gcoHAL Hal
++ );
++
++gceSTATUS
++gcoHAL_ProfileEnd(
++ IN gcoHAL Hal,
++ IN gctCONST_STRING Title
++ );
++#endif
++
++/* Power Management */
++gceSTATUS
++gcoHAL_SetPowerManagementState(
++ IN gcoHAL Hal,
++ IN gceCHIPPOWERSTATE State
++ );
++
++gceSTATUS
++gcoHAL_QueryPowerManagementState(
++ IN gcoHAL Hal,
++ OUT gceCHIPPOWERSTATE *State
++ );
++
++/* Set the filter type for filter blit. */
++gceSTATUS
++gcoHAL_SetFilterType(
++ IN gcoHAL Hal,
++ IN gceFILTER_TYPE FilterType
++ );
++
++gceSTATUS
++gcoHAL_GetDump(
++ IN gcoHAL Hal,
++ OUT gcoDUMP * Dump
++ );
++
++/* Call the kernel HAL layer. */
++gceSTATUS
++gcoHAL_Call(
++ IN gcoHAL Hal,
++ IN OUT gcsHAL_INTERFACE_PTR Interface
++ );
++
++gceSTATUS
++gcoHAL_GetPatchID(
++ IN gcoHAL Hal,
++ OUT gcePATCH_ID * PatchID
++ );
++
++/* Schedule an event. */
++gceSTATUS
++gcoHAL_ScheduleEvent(
++ IN gcoHAL Hal,
++ IN OUT gcsHAL_INTERFACE_PTR Interface
++ );
++
++/* Destroy a surface. */
++gceSTATUS
++gcoHAL_DestroySurface(
++ IN gcoHAL Hal,
++ IN gcoSURF Surface
++ );
++
++/* Request a start/stop timestamp. */
++gceSTATUS
++gcoHAL_SetTimer(
++ IN gcoHAL Hal,
++ IN gctUINT32 Index,
++ IN gctBOOL Start
++ );
++
++/* Get Time delta from a Timer in microseconds. */
++gceSTATUS
++gcoHAL_GetTimerTime(
++ IN gcoHAL Hal,
++ IN gctUINT32 Timer,
++ OUT gctINT32_PTR TimeDelta
++ );
++
++/* set timeout value. */
++gceSTATUS
++gcoHAL_SetTimeOut(
++ IN gcoHAL Hal,
++ IN gctUINT32 timeOut
++ );
++
++gceSTATUS
++gcoHAL_SetHardwareType(
++ IN gcoHAL Hal,
++ IN gceHARDWARE_TYPE HardwardType
++ );
++
++gceSTATUS
++gcoHAL_GetHardwareType(
++ IN gcoHAL Hal,
++ OUT gceHARDWARE_TYPE * HardwardType
++ );
++
++gceSTATUS
++gcoHAL_QueryChipCount(
++ IN gcoHAL Hal,
++ OUT gctINT32 * Count
++ );
++
++gceSTATUS
++gcoHAL_QuerySeparated3D2D(
++ IN gcoHAL Hal
++ );
++
++gceSTATUS
++gcoHAL_QuerySpecialHint(
++ IN gceSPECIAL_HINT Hint
++ );
++
++gceSTATUS
++gcoHAL_SetSpecialHintData(
++ IN gcoHARDWARE Hardware
++ );
++
++/* Get pointer to gcoVG object. */
++gceSTATUS
++gcoHAL_GetVGEngine(
++ IN gcoHAL Hal,
++ OUT gcoVG * Engine
++ );
++
++#if gcdENABLE_VG
++gceSTATUS
++gcoHAL_QueryChipLimits(
++ IN gcoHAL Hal,
++ IN gctINT32 Chip,
++ OUT gcsHAL_LIMITS *Limits);
++
++gceSTATUS
++gcoHAL_QueryChipFeature(
++ IN gcoHAL Hal,
++ IN gctINT32 Chip,
++ IN gceFEATURE Feature);
++
++#endif
++/******************************************************************************\
++********************************** gcoOS Object *********************************
++\******************************************************************************/
++
++/* Get PLS value for given key */
++gctPOINTER
++gcoOS_GetPLSValue(
++ IN gcePLS_VALUE key
++ );
++
++/* Set PLS value of a given key */
++void
++gcoOS_SetPLSValue(
++ IN gcePLS_VALUE key,
++ OUT gctPOINTER value
++ );
++
++/* Get access to the thread local storage. */
++gceSTATUS
++gcoOS_GetTLS(
++ OUT gcsTLS_PTR * TLS
++ );
++
++ /* Copy the TLS from a source thread. */
++ gceSTATUS gcoOS_CopyTLS(IN gcsTLS_PTR Source);
++
++/* Destroy the objects associated with the current thread. */
++void
++gcoOS_FreeThreadData(
++ IN gctBOOL ProcessExiting
++ );
++
++/* Construct a new gcoOS object. */
++gceSTATUS
++gcoOS_Construct(
++ IN gctPOINTER Context,
++ OUT gcoOS * Os
++ );
++
++/* Destroy an gcoOS object. */
++gceSTATUS
++gcoOS_Destroy(
++ IN gcoOS Os
++ );
++
++/* Get the base address for the physical memory. */
++gceSTATUS
++gcoOS_GetBaseAddress(
++ IN gcoOS Os,
++ OUT gctUINT32_PTR BaseAddress
++ );
++
++/* Allocate memory from the heap. */
++gceSTATUS
++gcoOS_Allocate(
++ IN gcoOS Os,
++ IN gctSIZE_T Bytes,
++ OUT gctPOINTER * Memory
++ );
++
++/* Get allocated memory size. */
++gceSTATUS
++gcoOS_GetMemorySize(
++ IN gcoOS Os,
++ IN gctPOINTER Memory,
++ OUT gctSIZE_T_PTR MemorySize
++ );
++
++/* Free allocated memory. */
++gceSTATUS
++gcoOS_Free(
++ IN gcoOS Os,
++ IN gctPOINTER Memory
++ );
++
++/* Allocate memory. */
++gceSTATUS
++gcoOS_AllocateMemory(
++ IN gcoOS Os,
++ IN gctSIZE_T Bytes,
++ OUT gctPOINTER * Memory
++ );
++
++/* Free memory. */
++gceSTATUS
++gcoOS_FreeMemory(
++ IN gcoOS Os,
++ IN gctPOINTER Memory
++ );
++
++/* Allocate contiguous memory. */
++gceSTATUS
++gcoOS_AllocateContiguous(
++ IN gcoOS Os,
++ IN gctBOOL InUserSpace,
++ IN OUT gctSIZE_T * Bytes,
++ OUT gctPHYS_ADDR * Physical,
++ OUT gctPOINTER * Logical
++ );
++
++/* Free contiguous memory. */
++gceSTATUS
++gcoOS_FreeContiguous(
++ IN gcoOS Os,
++ IN gctPHYS_ADDR Physical,
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Bytes
++ );
++
++/* Allocate video memory. */
++gceSTATUS
++gcoOS_AllocateVideoMemory(
++ IN gcoOS Os,
++ IN gctBOOL InUserSpace,
++ IN gctBOOL InCacheable,
++ IN OUT gctSIZE_T * Bytes,
++ OUT gctUINT32 * Physical,
++ OUT gctPOINTER * Logical,
++ OUT gctPOINTER * Handle
++ );
++
++/* Free video memory. */
++gceSTATUS
++gcoOS_FreeVideoMemory(
++ IN gcoOS Os,
++ IN gctPOINTER Handle
++ );
++
++gceSTATUS
++gcoSURF_GetBankOffsetBytes(
++ IN gcoSURF Surfce,
++ IN gceSURF_TYPE Type,
++ IN gctUINT32 Stride,
++ IN gctUINT32_PTR Bytes
++ );
++
++/* Map user memory. */
++gceSTATUS
++gcoOS_MapUserMemory(
++ IN gcoOS Os,
++ IN gctPOINTER Memory,
++ IN gctSIZE_T Size,
++ OUT gctPOINTER * Info,
++ OUT gctUINT32_PTR Address
++ );
++
++/* Map user memory. */
++gceSTATUS
++gcoOS_MapUserMemoryEx(
++ IN gcoOS Os,
++ IN gctPOINTER Memory,
++ IN gctUINT32 Physical,
++ IN gctSIZE_T Size,
++ OUT gctPOINTER * Info,
++ OUT gctUINT32_PTR Address
++ );
++
++/* Unmap user memory. */
++gceSTATUS
++gcoOS_UnmapUserMemory(
++ IN gcoOS Os,
++ IN gctPOINTER Memory,
++ IN gctSIZE_T Size,
++ IN gctPOINTER Info,
++ IN gctUINT32 Address
++ );
++
++/* Device I/O Control call to the kernel HAL layer. */
++gceSTATUS
++gcoOS_DeviceControl(
++ IN gcoOS Os,
++ IN gctUINT32 IoControlCode,
++ IN gctPOINTER InputBuffer,
++ IN gctSIZE_T InputBufferSize,
++ IN gctPOINTER OutputBuffer,
++ IN gctSIZE_T OutputBufferSize
++ );
++
++/* Allocate non paged memory. */
++gceSTATUS
++gcoOS_AllocateNonPagedMemory(
++ IN gcoOS Os,
++ IN gctBOOL InUserSpace,
++ IN OUT gctSIZE_T * Bytes,
++ OUT gctPHYS_ADDR * Physical,
++ OUT gctPOINTER * Logical
++ );
++
++/* Free non paged memory. */
++gceSTATUS
++gcoOS_FreeNonPagedMemory(
++ IN gcoOS Os,
++ IN gctSIZE_T Bytes,
++ IN gctPHYS_ADDR Physical,
++ IN gctPOINTER Logical
++ );
++
++#define gcmOS_SAFE_FREE(os, mem) \
++ gcoOS_Free(os, mem); \
++ mem = gcvNULL
++
++#define gcmkOS_SAFE_FREE(os, mem) \
++ gckOS_Free(os, mem); \
++ mem = gcvNULL
++
++typedef enum _gceFILE_MODE
++{
++ gcvFILE_CREATE = 0,
++ gcvFILE_APPEND,
++ gcvFILE_READ,
++ gcvFILE_CREATETEXT,
++ gcvFILE_APPENDTEXT,
++ gcvFILE_READTEXT,
++}
++gceFILE_MODE;
++
++/* Open a file. */
++gceSTATUS
++gcoOS_Open(
++ IN gcoOS Os,
++ IN gctCONST_STRING FileName,
++ IN gceFILE_MODE Mode,
++ OUT gctFILE * File
++ );
++
++/* Close a file. */
++gceSTATUS
++gcoOS_Close(
++ IN gcoOS Os,
++ IN gctFILE File
++ );
++
++/* Read data from a file. */
++gceSTATUS
++gcoOS_Read(
++ IN gcoOS Os,
++ IN gctFILE File,
++ IN gctSIZE_T ByteCount,
++ IN gctPOINTER Data,
++ OUT gctSIZE_T * ByteRead
++ );
++
++/* Write data to a file. */
++gceSTATUS
++gcoOS_Write(
++ IN gcoOS Os,
++ IN gctFILE File,
++ IN gctSIZE_T ByteCount,
++ IN gctCONST_POINTER Data
++ );
++
++/* Flush data to a file. */
++gceSTATUS
++gcoOS_Flush(
++ IN gcoOS Os,
++ IN gctFILE File
++ );
++
++/* Close a file descriptor. */
++gceSTATUS
++gcoOS_CloseFD(
++ IN gcoOS Os,
++ IN gctINT FD
++ );
++
++/* Dup file descriptor to another. */
++gceSTATUS
++gcoOS_DupFD(
++ IN gcoOS Os,
++ IN gctINT FD,
++ OUT gctINT * FD2
++ );
++
++/* Create an endpoint for communication. */
++gceSTATUS
++gcoOS_Socket(
++ IN gcoOS Os,
++ IN gctINT Domain,
++ IN gctINT Type,
++ IN gctINT Protocol,
++ OUT gctINT *SockFd
++ );
++
++/* Close a socket. */
++gceSTATUS
++gcoOS_CloseSocket(
++ IN gcoOS Os,
++ IN gctINT SockFd
++ );
++
++/* Initiate a connection on a socket. */
++gceSTATUS
++gcoOS_Connect(
++ IN gcoOS Os,
++ IN gctINT SockFd,
++ IN gctCONST_POINTER HostName,
++ IN gctUINT Port);
++
++/* Shut down part of connection on a socket. */
++gceSTATUS
++gcoOS_Shutdown(
++ IN gcoOS Os,
++ IN gctINT SockFd,
++ IN gctINT How
++ );
++
++/* Send a message on a socket. */
++gceSTATUS
++gcoOS_Send(
++ IN gcoOS Os,
++ IN gctINT SockFd,
++ IN gctSIZE_T ByteCount,
++ IN gctCONST_POINTER Data,
++ IN gctINT Flags
++ );
++
++/* Initiate a connection on a socket. */
++gceSTATUS
++gcoOS_WaitForSend(
++ IN gcoOS Os,
++ IN gctINT SockFd,
++ IN gctINT Seconds,
++ IN gctINT MicroSeconds);
++
++/* Get environment variable value. */
++gceSTATUS
++gcoOS_GetEnv(
++ IN gcoOS Os,
++ IN gctCONST_STRING VarName,
++ OUT gctSTRING * Value
++ );
++
++/* Set environment variable value. */
++gceSTATUS
++gcoOS_SetEnv(
++ IN gcoOS Os,
++ IN gctCONST_STRING VarName,
++ IN gctSTRING Value
++ );
++
++/* Get current working directory. */
++gceSTATUS
++gcoOS_GetCwd(
++ IN gcoOS Os,
++ IN gctINT SizeInBytes,
++ OUT gctSTRING Buffer
++ );
++
++/* Get file status info. */
++gceSTATUS
++gcoOS_Stat(
++ IN gcoOS Os,
++ IN gctCONST_STRING FileName,
++ OUT gctPOINTER Buffer
++ );
++
++typedef enum _gceFILE_WHENCE
++{
++ gcvFILE_SEEK_SET,
++ gcvFILE_SEEK_CUR,
++ gcvFILE_SEEK_END
++}
++gceFILE_WHENCE;
++
++/* Set the current position of a file. */
++gceSTATUS
++gcoOS_Seek(
++ IN gcoOS Os,
++ IN gctFILE File,
++ IN gctUINT32 Offset,
++ IN gceFILE_WHENCE Whence
++ );
++
++/* Set the current position of a file. */
++gceSTATUS
++gcoOS_SetPos(
++ IN gcoOS Os,
++ IN gctFILE File,
++ IN gctUINT32 Position
++ );
++
++/* Get the current position of a file. */
++gceSTATUS
++gcoOS_GetPos(
++ IN gcoOS Os,
++ IN gctFILE File,
++ OUT gctUINT32 * Position
++ );
++
++/* Same as strstr. */
++gceSTATUS
++gcoOS_StrStr(
++ IN gctCONST_STRING String,
++ IN gctCONST_STRING SubString,
++ OUT gctSTRING * Output
++ );
++
++/* Find the last occurance of a character inside a string. */
++gceSTATUS
++gcoOS_StrFindReverse(
++ IN gctCONST_STRING String,
++ IN gctINT8 Character,
++ OUT gctSTRING * Output
++ );
++
++gceSTATUS
++gcoOS_StrDup(
++ IN gcoOS Os,
++ IN gctCONST_STRING String,
++ OUT gctSTRING * Target
++ );
++
++/* Copy a string. */
++gceSTATUS
++gcoOS_StrCopySafe(
++ IN gctSTRING Destination,
++ IN gctSIZE_T DestinationSize,
++ IN gctCONST_STRING Source
++ );
++
++/* Append a string. */
++gceSTATUS
++gcoOS_StrCatSafe(
++ IN gctSTRING Destination,
++ IN gctSIZE_T DestinationSize,
++ IN gctCONST_STRING Source
++ );
++
++/* Compare two strings. */
++gceSTATUS
++gcoOS_StrCmp(
++ IN gctCONST_STRING String1,
++ IN gctCONST_STRING String2
++ );
++
++/* Compare characters of two strings. */
++gceSTATUS
++gcoOS_StrNCmp(
++ IN gctCONST_STRING String1,
++ IN gctCONST_STRING String2,
++ IN gctSIZE_T Count
++ );
++
++/* Convert string to float. */
++gceSTATUS
++gcoOS_StrToFloat(
++ IN gctCONST_STRING String,
++ OUT gctFLOAT * Float
++ );
++
++/* Convert hex string to integer. */
++gceSTATUS
++gcoOS_HexStrToInt(
++ IN gctCONST_STRING String,
++ OUT gctINT * Int
++ );
++
++/* Convert hex string to float. */
++gceSTATUS
++gcoOS_HexStrToFloat(
++ IN gctCONST_STRING String,
++ OUT gctFLOAT * Float
++ );
++
++/* Convert string to integer. */
++gceSTATUS
++gcoOS_StrToInt(
++ IN gctCONST_STRING String,
++ OUT gctINT * Int
++ );
++
++gceSTATUS
++gcoOS_MemCmp(
++ IN gctCONST_POINTER Memory1,
++ IN gctCONST_POINTER Memory2,
++ IN gctSIZE_T Bytes
++ );
++
++gceSTATUS
++gcoOS_PrintStrSafe(
++ OUT gctSTRING String,
++ IN gctSIZE_T StringSize,
++ IN OUT gctUINT * Offset,
++ IN gctCONST_STRING Format,
++ ...
++ );
++
++gceSTATUS
++gcoOS_LoadLibrary(
++ IN gcoOS Os,
++ IN gctCONST_STRING Library,
++ OUT gctHANDLE * Handle
++ );
++
++gceSTATUS
++gcoOS_FreeLibrary(
++ IN gcoOS Os,
++ IN gctHANDLE Handle
++ );
++
++gceSTATUS
++gcoOS_GetProcAddress(
++ IN gcoOS Os,
++ IN gctHANDLE Handle,
++ IN gctCONST_STRING Name,
++ OUT gctPOINTER * Function
++ );
++
++gceSTATUS
++gcoOS_Compact(
++ IN gcoOS Os
++ );
++
++gceSTATUS
++gcoOS_AddSignalHandler (
++ IN gceSignalHandlerType SignalHandlerType
++ );
++
++#if VIVANTE_PROFILER
++gceSTATUS
++gcoOS_ProfileStart(
++ IN gcoOS Os
++ );
++
++gceSTATUS
++gcoOS_ProfileEnd(
++ IN gcoOS Os,
++ IN gctCONST_STRING Title
++ );
++
++gceSTATUS
++gcoOS_SetProfileSetting(
++ IN gcoOS Os,
++ IN gctBOOL Enable,
++ IN gctCONST_STRING FileName
++ );
++#endif
++
++gctBOOL
++gcoOS_IsNeededSupportNP2Texture(
++ IN gctCHAR* ProcName
++ );
++
++/* Query the video memory. */
++gceSTATUS
++gcoOS_QueryVideoMemory(
++ IN gcoOS Os,
++ OUT gctPHYS_ADDR * InternalAddress,
++ OUT gctSIZE_T * InternalSize,
++ OUT gctPHYS_ADDR * ExternalAddress,
++ OUT gctSIZE_T * ExternalSize,
++ OUT gctPHYS_ADDR * ContiguousAddress,
++ OUT gctSIZE_T * ContiguousSize
++ );
++
++/* Detect if the process is the executable specified. */
++gceSTATUS
++gcoOS_DetectProcessByNamePid(
++ IN gctCONST_STRING Name,
++ IN gctHANDLE Pid
++ );
++
++/* Detect if the current process is the executable specified. */
++gceSTATUS
++gcoOS_DetectProcessByName(
++ IN gctCONST_STRING Name
++ );
++
++gceSTATUS
++gcoOS_DetectProcessByEncryptedName(
++ IN gctCONST_STRING Name
++ );
++
++#if defined(ANDROID)
++gceSTATUS
++gcoOS_DetectProgrameByEncryptedSymbols(
++ IN gcoOS_SymbolsList Symbols
++ );
++#endif
++
++/*----------------------------------------------------------------------------*/
++/*----- Atoms ----------------------------------------------------------------*/
++
++/* Construct an atom. */
++gceSTATUS
++gcoOS_AtomConstruct(
++ IN gcoOS Os,
++ OUT gcsATOM_PTR * Atom
++ );
++
++/* Destroy an atom. */
++gceSTATUS
++gcoOS_AtomDestroy(
++ IN gcoOS Os,
++ IN gcsATOM_PTR Atom
++ );
++
++/* Increment an atom. */
++gceSTATUS
++gcoOS_AtomIncrement(
++ IN gcoOS Os,
++ IN gcsATOM_PTR Atom,
++ OUT gctINT32_PTR OldValue
++ );
++
++/* Decrement an atom. */
++gceSTATUS
++gcoOS_AtomDecrement(
++ IN gcoOS Os,
++ IN gcsATOM_PTR Atom,
++ OUT gctINT32_PTR OldValue
++ );
++
++gctHANDLE
++gcoOS_GetCurrentProcessID(
++ void
++ );
++
++gctHANDLE
++gcoOS_GetCurrentThreadID(
++ void
++ );
++
++/*----------------------------------------------------------------------------*/
++/*----- Time -----------------------------------------------------------------*/
++
++/* Get the number of milliseconds since the system started. */
++gctUINT32
++gcoOS_GetTicks(
++ void
++ );
++
++/* Get time in microseconds. */
++gceSTATUS
++gcoOS_GetTime(
++ gctUINT64_PTR Time
++ );
++
++/* Get CPU usage in microseconds. */
++gceSTATUS
++gcoOS_GetCPUTime(
++ gctUINT64_PTR CPUTime
++ );
++
++/* Get memory usage. */
++gceSTATUS
++gcoOS_GetMemoryUsage(
++ gctUINT32_PTR MaxRSS,
++ gctUINT32_PTR IxRSS,
++ gctUINT32_PTR IdRSS,
++ gctUINT32_PTR IsRSS
++ );
++
++/* Delay a number of microseconds. */
++gceSTATUS
++gcoOS_Delay(
++ IN gcoOS Os,
++ IN gctUINT32 Delay
++ );
++
++/*----------------------------------------------------------------------------*/
++/*----- Threads --------------------------------------------------------------*/
++
++#ifdef _WIN32
++/* Cannot include windows.h here becuase "near" and "far"
++ * which are used in gcsDEPTH_INFO, are defined to nothing in WinDef.h.
++ * So, use the real value of DWORD and WINAPI, instead.
++ * DWORD is unsigned long, and WINAPI is __stdcall.
++ * If these two are change in WinDef.h, the following two typdefs
++ * need to be changed, too.
++ */
++typedef unsigned long gctTHREAD_RETURN;
++typedef unsigned long (__stdcall * gcTHREAD_ROUTINE)(void * Argument);
++#else
++typedef void * gctTHREAD_RETURN;
++typedef void * (* gcTHREAD_ROUTINE)(void *);
++#endif
++
++/* Create a new thread. */
++gceSTATUS
++gcoOS_CreateThread(
++ IN gcoOS Os,
++ IN gcTHREAD_ROUTINE Worker,
++ IN gctPOINTER Argument,
++ OUT gctPOINTER * Thread
++ );
++
++/* Close a thread. */
++gceSTATUS
++gcoOS_CloseThread(
++ IN gcoOS Os,
++ IN gctPOINTER Thread
++ );
++
++/*----------------------------------------------------------------------------*/
++/*----- Mutexes --------------------------------------------------------------*/
++
++/* Create a new mutex. */
++gceSTATUS
++gcoOS_CreateMutex(
++ IN gcoOS Os,
++ OUT gctPOINTER * Mutex
++ );
++
++/* Delete a mutex. */
++gceSTATUS
++gcoOS_DeleteMutex(
++ IN gcoOS Os,
++ IN gctPOINTER Mutex
++ );
++
++/* Acquire a mutex. */
++gceSTATUS
++gcoOS_AcquireMutex(
++ IN gcoOS Os,
++ IN gctPOINTER Mutex,
++ IN gctUINT32 Timeout
++ );
++
++/* Release a mutex. */
++gceSTATUS
++gcoOS_ReleaseMutex(
++ IN gcoOS Os,
++ IN gctPOINTER Mutex
++ );
++
++/*----------------------------------------------------------------------------*/
++/*----- Signals --------------------------------------------------------------*/
++
++/* Create a signal. */
++gceSTATUS
++gcoOS_CreateSignal(
++ IN gcoOS Os,
++ IN gctBOOL ManualReset,
++ OUT gctSIGNAL * Signal
++ );
++
++/* Destroy a signal. */
++gceSTATUS
++gcoOS_DestroySignal(
++ IN gcoOS Os,
++ IN gctSIGNAL Signal
++ );
++
++/* Signal a signal. */
++gceSTATUS
++gcoOS_Signal(
++ IN gcoOS Os,
++ IN gctSIGNAL Signal,
++ IN gctBOOL State
++ );
++
++/* Wait for a signal. */
++gceSTATUS
++gcoOS_WaitSignal(
++ IN gcoOS Os,
++ IN gctSIGNAL Signal,
++ IN gctUINT32 Wait
++ );
++
++/* Map a signal from another process */
++gceSTATUS
++gcoOS_MapSignal(
++ IN gctSIGNAL RemoteSignal,
++ OUT gctSIGNAL * LocalSignal
++ );
++
++/* Unmap a signal mapped from another process */
++gceSTATUS
++gcoOS_UnmapSignal(
++ IN gctSIGNAL Signal
++ );
++
++/*----------------------------------------------------------------------------*/
++/*----- Android Native Fence -------------------------------------------------*/
++
++/* Create sync point. */
++gceSTATUS
++gcoOS_CreateSyncPoint(
++ IN gcoOS Os,
++ OUT gctSYNC_POINT * SyncPoint
++ );
++
++/* Destroy sync point. */
++gceSTATUS
++gcoOS_DestroySyncPoint(
++ IN gcoOS Os,
++ IN gctSYNC_POINT SyncPoint
++ );
++
++/* Create native fence. */
++gceSTATUS
++gcoOS_CreateNativeFence(
++ IN gcoOS Os,
++ IN gctSYNC_POINT SyncPoint,
++ OUT gctINT * FenceFD
++ );
++
++/* Wait on native fence. */
++gceSTATUS
++gcoOS_WaitNativeFence(
++ IN gcoOS Os,
++ IN gctINT FenceFD,
++ IN gctUINT32 Timeout
++ );
++
++/*----------------------------------------------------------------------------*/
++/*----- Memory Access and Cache ----------------------------------------------*/
++
++/* Write a register. */
++gceSTATUS
++gcoOS_WriteRegister(
++ IN gcoOS Os,
++ IN gctUINT32 Address,
++ IN gctUINT32 Data
++ );
++
++/* Read a register. */
++gceSTATUS
++gcoOS_ReadRegister(
++ IN gcoOS Os,
++ IN gctUINT32 Address,
++ OUT gctUINT32 * Data
++ );
++
++gceSTATUS
++gcoOS_CacheClean(
++ IN gcoOS Os,
++ IN gctUINT64 Node,
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Bytes
++ );
++
++gceSTATUS
++gcoOS_CacheFlush(
++ IN gcoOS Os,
++ IN gctUINT64 Node,
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Bytes
++ );
++
++gceSTATUS
++gcoOS_CacheInvalidate(
++ IN gcoOS Os,
++ IN gctUINT64 Node,
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Bytes
++ );
++
++gceSTATUS
++gcoOS_MemoryBarrier(
++ IN gcoOS Os,
++ IN gctPOINTER Logical
++ );
++
++
++/*----------------------------------------------------------------------------*/
++/*----- Profile --------------------------------------------------------------*/
++
++gceSTATUS
++gckOS_GetProfileTick(
++ OUT gctUINT64_PTR Tick
++ );
++
++gceSTATUS
++gckOS_QueryProfileTickRate(
++ OUT gctUINT64_PTR TickRate
++ );
++
++gctUINT32
++gckOS_ProfileToMS(
++ IN gctUINT64 Ticks
++ );
++
++gceSTATUS
++gcoOS_GetProfileTick(
++ OUT gctUINT64_PTR Tick
++ );
++
++gceSTATUS
++gcoOS_QueryProfileTickRate(
++ OUT gctUINT64_PTR TickRate
++ );
++
++#define _gcmPROFILE_INIT(prefix, freq, start) \
++ do { \
++ prefix ## OS_QueryProfileTickRate(&(freq)); \
++ prefix ## OS_GetProfileTick(&(start)); \
++ } while (gcvFALSE)
++
++#define _gcmPROFILE_QUERY(prefix, start, ticks) \
++ do { \
++ prefix ## OS_GetProfileTick(&(ticks)); \
++ (ticks) = ((ticks) > (start)) ? ((ticks) - (start)) \
++ : (~0ull - (start) + (ticks) + 1); \
++ } while (gcvFALSE)
++
++#if gcdENABLE_PROFILING
++# define gcmkPROFILE_INIT(freq, start) _gcmPROFILE_INIT(gck, freq, start)
++# define gcmkPROFILE_QUERY(start, ticks) _gcmPROFILE_QUERY(gck, start, ticks)
++# define gcmPROFILE_INIT(freq, start) _gcmPROFILE_INIT(gco, freq, start)
++# define gcmPROFILE_QUERY(start, ticks) _gcmPROFILE_QUERY(gco, start, ticks)
++# define gcmPROFILE_ONLY(x) x
++# define gcmPROFILE_ELSE(x) do { } while (gcvFALSE)
++# define gcmPROFILE_DECLARE_ONLY(x) x
++# define gcmPROFILE_DECLARE_ELSE(x) typedef x
++#else
++# define gcmkPROFILE_INIT(start, freq) do { } while (gcvFALSE)
++# define gcmkPROFILE_QUERY(start, ticks) do { } while (gcvFALSE)
++# define gcmPROFILE_INIT(start, freq) do { } while (gcvFALSE)
++# define gcmPROFILE_QUERY(start, ticks) do { } while (gcvFALSE)
++# define gcmPROFILE_ONLY(x) do { } while (gcvFALSE)
++# define gcmPROFILE_ELSE(x) x
++# define gcmPROFILE_DECLARE_ONLY(x) do { } while (gcvFALSE)
++# define gcmPROFILE_DECLARE_ELSE(x) x
++#endif
++
++/*******************************************************************************
++** gcoMATH object
++*/
++
++#define gcdPI 3.14159265358979323846f
++
++/* Kernel. */
++gctINT
++gckMATH_ModuloInt(
++ IN gctINT X,
++ IN gctINT Y
++ );
++
++/* User. */
++gctUINT32
++gcoMATH_Log2in5dot5(
++ IN gctINT X
++ );
++
++
++gctFLOAT
++gcoMATH_UIntAsFloat(
++ IN gctUINT32 X
++ );
++
++gctUINT32
++gcoMATH_FloatAsUInt(
++ IN gctFLOAT X
++ );
++
++gctBOOL
++gcoMATH_CompareEqualF(
++ IN gctFLOAT X,
++ IN gctFLOAT Y
++ );
++
++gctUINT16
++gcoMATH_UInt8AsFloat16(
++ IN gctUINT8 X
++ );
++
++/******************************************************************************\
++**************************** Coordinate Structures *****************************
++\******************************************************************************/
++
++typedef struct _gcsPOINT
++{
++ gctINT32 x;
++ gctINT32 y;
++}
++gcsPOINT;
++
++typedef struct _gcsSIZE
++{
++ gctINT32 width;
++ gctINT32 height;
++}
++gcsSIZE;
++
++typedef struct _gcsRECT
++{
++ gctINT32 left;
++ gctINT32 top;
++ gctINT32 right;
++ gctINT32 bottom;
++}
++gcsRECT;
++
++typedef union _gcsPIXEL
++{
++ struct
++ {
++ gctFLOAT r, g, b, a;
++ gctFLOAT d, s;
++ } pf;
++
++ struct
++ {
++ gctINT32 r, g, b, a;
++ gctINT32 d, s;
++ } pi;
++
++ struct
++ {
++ gctUINT32 r, g, b, a;
++ gctUINT32 d, s;
++ } pui;
++
++} gcsPIXEL;
++
++
++/******************************************************************************\
++********************************* gcoSURF Object ********************************
++\******************************************************************************/
++
++/*----------------------------------------------------------------------------*/
++/*------------------------------- gcoSURF Common ------------------------------*/
++
++/* Color format classes. */
++typedef enum _gceFORMAT_CLASS
++{
++ gcvFORMAT_CLASS_RGBA = 4500,
++ gcvFORMAT_CLASS_YUV,
++ gcvFORMAT_CLASS_INDEX,
++ gcvFORMAT_CLASS_LUMINANCE,
++ gcvFORMAT_CLASS_BUMP,
++ gcvFORMAT_CLASS_DEPTH,
++}
++gceFORMAT_CLASS;
++
++/* Special enums for width field in gcsFORMAT_COMPONENT. */
++typedef enum _gceCOMPONENT_CONTROL
++{
++ gcvCOMPONENT_NOTPRESENT = 0x00,
++ gcvCOMPONENT_DONTCARE = 0x80,
++ gcvCOMPONENT_WIDTHMASK = 0x7F,
++ gcvCOMPONENT_ODD = 0x80
++}
++gceCOMPONENT_CONTROL;
++
++/* Color format component parameters. */
++typedef struct _gcsFORMAT_COMPONENT
++{
++ gctUINT8 start;
++ gctUINT8 width;
++}
++gcsFORMAT_COMPONENT;
++
++/* RGBA color format class. */
++typedef struct _gcsFORMAT_CLASS_TYPE_RGBA
++{
++ gcsFORMAT_COMPONENT alpha;
++ gcsFORMAT_COMPONENT red;
++ gcsFORMAT_COMPONENT green;
++ gcsFORMAT_COMPONENT blue;
++}
++gcsFORMAT_CLASS_TYPE_RGBA;
++
++/* YUV color format class. */
++typedef struct _gcsFORMAT_CLASS_TYPE_YUV
++{
++ gcsFORMAT_COMPONENT y;
++ gcsFORMAT_COMPONENT u;
++ gcsFORMAT_COMPONENT v;
++}
++gcsFORMAT_CLASS_TYPE_YUV;
++
++/* Index color format class. */
++typedef struct _gcsFORMAT_CLASS_TYPE_INDEX
++{
++ gcsFORMAT_COMPONENT value;
++}
++gcsFORMAT_CLASS_TYPE_INDEX;
++
++/* Luminance color format class. */
++typedef struct _gcsFORMAT_CLASS_TYPE_LUMINANCE
++{
++ gcsFORMAT_COMPONENT alpha;
++ gcsFORMAT_COMPONENT value;
++}
++gcsFORMAT_CLASS_TYPE_LUMINANCE;
++
++/* Bump map color format class. */
++typedef struct _gcsFORMAT_CLASS_TYPE_BUMP
++{
++ gcsFORMAT_COMPONENT alpha;
++ gcsFORMAT_COMPONENT l;
++ gcsFORMAT_COMPONENT v;
++ gcsFORMAT_COMPONENT u;
++ gcsFORMAT_COMPONENT q;
++ gcsFORMAT_COMPONENT w;
++}
++gcsFORMAT_CLASS_TYPE_BUMP;
++
++/* Depth and stencil format class. */
++typedef struct _gcsFORMAT_CLASS_TYPE_DEPTH
++{
++ gcsFORMAT_COMPONENT depth;
++ gcsFORMAT_COMPONENT stencil;
++}
++gcsFORMAT_CLASS_TYPE_DEPTH;
++
++/* Format parameters. */
++typedef struct _gcsSURF_FORMAT_INFO
++{
++ /* Format code and class. */
++ gceSURF_FORMAT format;
++ gceFORMAT_CLASS fmtClass;
++
++ /* The size of one pixel in bits. */
++ gctUINT8 bitsPerPixel;
++
++ /* Component swizzle. */
++ gceSURF_SWIZZLE swizzle;
++
++ /* Some formats have two neighbour pixels interleaved together. */
++ /* To describe such format, set the flag to 1 and add another */
++ /* like this one describing the odd pixel format. */
++ gctUINT8 interleaved;
++
++ /* Format components. */
++ union
++ {
++ gcsFORMAT_CLASS_TYPE_BUMP bump;
++ gcsFORMAT_CLASS_TYPE_RGBA rgba;
++ gcsFORMAT_CLASS_TYPE_YUV yuv;
++ gcsFORMAT_CLASS_TYPE_LUMINANCE lum;
++ gcsFORMAT_CLASS_TYPE_INDEX index;
++ gcsFORMAT_CLASS_TYPE_DEPTH depth;
++ } u;
++}
++gcsSURF_FORMAT_INFO;
++
++/* Frame buffer information. */
++typedef struct _gcsSURF_FRAMEBUFFER
++{
++ gctPOINTER logical;
++ gctUINT width, height;
++ gctINT stride;
++ gceSURF_FORMAT format;
++}
++gcsSURF_FRAMEBUFFER;
++
++typedef struct _gcsVIDMEM_NODE_SHARED_INFO
++{
++ gctBOOL tileStatusDisabled;
++ gcsPOINT SrcOrigin;
++ gcsPOINT DestOrigin;
++ gcsSIZE RectSize;
++ gctUINT32 clearValue;
++}
++gcsVIDMEM_NODE_SHARED_INFO;
++
++/* Generic pixel component descriptors. */
++extern gcsFORMAT_COMPONENT gcvPIXEL_COMP_XXX8;
++extern gcsFORMAT_COMPONENT gcvPIXEL_COMP_XX8X;
++extern gcsFORMAT_COMPONENT gcvPIXEL_COMP_X8XX;
++extern gcsFORMAT_COMPONENT gcvPIXEL_COMP_8XXX;
++
++typedef enum _gceORIENTATION
++{
++ gcvORIENTATION_TOP_BOTTOM,
++ gcvORIENTATION_BOTTOM_TOP,
++}
++gceORIENTATION;
++
++
++/* Construct a new gcoSURF object. */
++gceSTATUS
++gcoSURF_Construct(
++ IN gcoHAL Hal,
++ IN gctUINT Width,
++ IN gctUINT Height,
++ IN gctUINT Depth,
++ IN gceSURF_TYPE Type,
++ IN gceSURF_FORMAT Format,
++ IN gcePOOL Pool,
++ OUT gcoSURF * Surface
++ );
++
++/* Destroy an gcoSURF object. */
++gceSTATUS
++gcoSURF_Destroy(
++ IN gcoSURF Surface
++ );
++
++/* Map user-allocated surface. */
++gceSTATUS
++gcoSURF_MapUserSurface(
++ IN gcoSURF Surface,
++ IN gctUINT Alignment,
++ IN gctPOINTER Logical,
++ IN gctUINT32 Physical
++ );
++
++/* Query vid mem node info. */
++gceSTATUS
++gcoSURF_QueryVidMemNode(
++ IN gcoSURF Surface,
++ OUT gctUINT64 * Node,
++ OUT gcePOOL * Pool,
++ OUT gctUINT_PTR Bytes
++ );
++
++/* Set the color type of the surface. */
++gceSTATUS
++gcoSURF_SetColorType(
++ IN gcoSURF Surface,
++ IN gceSURF_COLOR_TYPE ColorType
++ );
++
++/* Get the color type of the surface. */
++gceSTATUS
++gcoSURF_GetColorType(
++ IN gcoSURF Surface,
++ OUT gceSURF_COLOR_TYPE *ColorType
++ );
++
++/* Set the surface ration angle. */
++gceSTATUS
++gcoSURF_SetRotation(
++ IN gcoSURF Surface,
++ IN gceSURF_ROTATION Rotation
++ );
++
++gceSTATUS
++gcoSURF_SetPreRotation(
++ IN gcoSURF Surface,
++ IN gceSURF_ROTATION Rotation
++ );
++
++gceSTATUS
++gcoSURF_GetPreRotation(
++ IN gcoSURF Surface,
++ IN gceSURF_ROTATION *Rotation
++ );
++
++gceSTATUS
++gcoSURF_IsValid(
++ IN gcoSURF Surface
++ );
++
++#ifndef VIVANTE_NO_3D
++/* Verify and return the state of the tile status mechanism. */
++gceSTATUS
++gcoSURF_IsTileStatusSupported(
++ IN gcoSURF Surface
++ );
++
++/* Process tile status for the specified surface. */
++gceSTATUS
++gcoSURF_SetTileStatus(
++ IN gcoSURF Surface
++ );
++
++/* Enable tile status for the specified surface. */
++gceSTATUS
++gcoSURF_EnableTileStatus(
++ IN gcoSURF Surface
++ );
++
++/* Disable tile status for the specified surface. */
++gceSTATUS
++gcoSURF_DisableTileStatus(
++ IN gcoSURF Surface,
++ IN gctBOOL Decompress
++ );
++
++gceSTATUS
++gcoSURF_AlignResolveRect(
++ IN gcoSURF Surf,
++ IN gcsPOINT_PTR RectOrigin,
++ IN gcsPOINT_PTR RectSize,
++ OUT gcsPOINT_PTR AlignedOrigin,
++ OUT gcsPOINT_PTR AlignedSize
++ );
++#endif /* VIVANTE_NO_3D */
++
++/* Get surface size. */
++gceSTATUS
++gcoSURF_GetSize(
++ IN gcoSURF Surface,
++ OUT gctUINT * Width,
++ OUT gctUINT * Height,
++ OUT gctUINT * Depth
++ );
++
++/* Get surface aligned sizes. */
++gceSTATUS
++gcoSURF_GetAlignedSize(
++ IN gcoSURF Surface,
++ OUT gctUINT * Width,
++ OUT gctUINT * Height,
++ OUT gctINT * Stride
++ );
++
++/* Get alignments. */
++gceSTATUS
++gcoSURF_GetAlignment(
++ IN gceSURF_TYPE Type,
++ IN gceSURF_FORMAT Format,
++ OUT gctUINT * AddressAlignment,
++ OUT gctUINT * XAlignment,
++ OUT gctUINT * YAlignment
++ );
++
++/* Get surface type and format. */
++gceSTATUS
++gcoSURF_GetFormat(
++ IN gcoSURF Surface,
++ OUT gceSURF_TYPE * Type,
++ OUT gceSURF_FORMAT * Format
++ );
++
++/* Get surface tiling. */
++gceSTATUS
++gcoSURF_GetTiling(
++ IN gcoSURF Surface,
++ OUT gceTILING * Tiling
++ );
++
++/* Lock the surface. */
++gceSTATUS
++gcoSURF_Lock(
++ IN gcoSURF Surface,
++ IN OUT gctUINT32 * Address,
++ IN OUT gctPOINTER * Memory
++ );
++
++/* Unlock the surface. */
++gceSTATUS
++gcoSURF_Unlock(
++ IN gcoSURF Surface,
++ IN gctPOINTER Memory
++ );
++
++/* Return pixel format parameters. */
++gceSTATUS
++gcoSURF_QueryFormat(
++ IN gceSURF_FORMAT Format,
++ OUT gcsSURF_FORMAT_INFO_PTR * Info
++ );
++
++/* Compute the color pixel mask. */
++gceSTATUS
++gcoSURF_ComputeColorMask(
++ IN gcsSURF_FORMAT_INFO_PTR Format,
++ OUT gctUINT32_PTR ColorMask
++ );
++
++/* Flush the surface. */
++gceSTATUS
++gcoSURF_Flush(
++ IN gcoSURF Surface
++ );
++
++/* Fill surface from it's tile status buffer. */
++gceSTATUS
++gcoSURF_FillFromTile(
++ IN gcoSURF Surface
++ );
++
++/* Check if surface needs a filler. */
++gceSTATUS gcoSURF_NeedFiller(IN gcoSURF Surface);
++
++/* Fill surface with a value. */
++gceSTATUS
++gcoSURF_Fill(
++ IN gcoSURF Surface,
++ IN gcsPOINT_PTR Origin,
++ IN gcsSIZE_PTR Size,
++ IN gctUINT32 Value,
++ IN gctUINT32 Mask
++ );
++
++/* Alpha blend two surfaces together. */
++gceSTATUS
++gcoSURF_Blend(
++ IN gcoSURF SrcSurface,
++ IN gcoSURF DestSurface,
++ IN gcsPOINT_PTR SrcOrig,
++ IN gcsPOINT_PTR DestOrigin,
++ IN gcsSIZE_PTR Size,
++ IN gceSURF_BLEND_MODE Mode
++ );
++
++/* Create a new gcoSURF wrapper object. */
++gceSTATUS
++gcoSURF_ConstructWrapper(
++ IN gcoHAL Hal,
++ OUT gcoSURF * Surface
++ );
++
++/* Set the underlying buffer for the surface wrapper. */
++gceSTATUS
++gcoSURF_SetBuffer(
++ IN gcoSURF Surface,
++ IN gceSURF_TYPE Type,
++ IN gceSURF_FORMAT Format,
++ IN gctUINT Stride,
++ IN gctPOINTER Logical,
++ IN gctUINT32 Physical
++ );
++
++/* Set the underlying video buffer for the surface wrapper. */
++gceSTATUS
++gcoSURF_SetVideoBuffer(
++ IN gcoSURF Surface,
++ IN gceSURF_TYPE Type,
++ IN gceSURF_FORMAT Format,
++ IN gctUINT Width,
++ IN gctUINT Height,
++ IN gctUINT Stride,
++ IN gctPOINTER *LogicalPlane1,
++ IN gctUINT32 *PhysicalPlane1
++ );
++
++/* Set the size of the surface in pixels and map the underlying buffer. */
++gceSTATUS
++gcoSURF_SetWindow(
++ IN gcoSURF Surface,
++ IN gctUINT X,
++ IN gctUINT Y,
++ IN gctUINT Width,
++ IN gctUINT Height
++ );
++
++/* Set width/height alignment of the surface directly and calculate stride/size. This is only for dri backend now. Please be careful before use. */
++gceSTATUS
++gcoSURF_SetAlignment(
++ IN gcoSURF Surface,
++ IN gctUINT Width,
++ IN gctUINT Height
++ );
++
++/* Increase reference count of the surface. */
++gceSTATUS
++gcoSURF_ReferenceSurface(
++ IN gcoSURF Surface
++ );
++
++/* Get surface reference count. */
++gceSTATUS
++gcoSURF_QueryReferenceCount(
++ IN gcoSURF Surface,
++ OUT gctINT32 * ReferenceCount
++ );
++
++/* Set surface orientation. */
++gceSTATUS
++gcoSURF_SetOrientation(
++ IN gcoSURF Surface,
++ IN gceORIENTATION Orientation
++ );
++
++/* Query surface orientation. */
++gceSTATUS
++gcoSURF_QueryOrientation(
++ IN gcoSURF Surface,
++ OUT gceORIENTATION * Orientation
++ );
++
++gceSTATUS
++gcoSURF_SetOffset(
++ IN gcoSURF Surface,
++ IN gctUINT Offset
++ );
++
++gceSTATUS
++gcoSURF_GetOffset(
++ IN gcoSURF Surface,
++ OUT gctUINT *Offset
++ );
++
++gceSTATUS
++gcoSURF_NODE_Cache(
++ IN gcsSURF_NODE_PTR Node,
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Bytes,
++ IN gceCACHEOPERATION Operation
++ );
++
++/* Perform CPU cache operation on surface */
++gceSTATUS
++gcoSURF_CPUCacheOperation(
++ IN gcoSURF Surface,
++ IN gceCACHEOPERATION Operation
++ );
++
++
++gceSTATUS
++gcoSURF_SetLinearResolveAddress(
++ IN gcoSURF Surface,
++ IN gctUINT32 Address,
++ IN gctPOINTER Memory
++ );
++
++ gceSTATUS
++ gcoSURF_Swap(IN gcoSURF Surface1, IN gcoSURF Surface2);
++
++/******************************************************************************\
++********************************* gcoDUMP Object ********************************
++\******************************************************************************/
++
++/* Construct a new gcoDUMP object. */
++gceSTATUS
++gcoDUMP_Construct(
++ IN gcoOS Os,
++ IN gcoHAL Hal,
++ OUT gcoDUMP * Dump
++ );
++
++/* Destroy a gcoDUMP object. */
++gceSTATUS
++gcoDUMP_Destroy(
++ IN gcoDUMP Dump
++ );
++
++/* Enable/disable dumping. */
++gceSTATUS
++gcoDUMP_Control(
++ IN gcoDUMP Dump,
++ IN gctSTRING FileName
++ );
++
++gceSTATUS
++gcoDUMP_IsEnabled(
++ IN gcoDUMP Dump,
++ OUT gctBOOL * Enabled
++ );
++
++/* Add surface. */
++gceSTATUS
++gcoDUMP_AddSurface(
++ IN gcoDUMP Dump,
++ IN gctINT32 Width,
++ IN gctINT32 Height,
++ IN gceSURF_FORMAT PixelFormat,
++ IN gctUINT32 Address,
++ IN gctSIZE_T ByteCount
++ );
++
++/* Mark the beginning of a frame. */
++gceSTATUS
++gcoDUMP_FrameBegin(
++ IN gcoDUMP Dump
++ );
++
++/* Mark the end of a frame. */
++gceSTATUS
++gcoDUMP_FrameEnd(
++ IN gcoDUMP Dump
++ );
++
++/* Dump data. */
++gceSTATUS
++gcoDUMP_DumpData(
++ IN gcoDUMP Dump,
++ IN gceDUMP_TAG Type,
++ IN gctUINT32 Address,
++ IN gctSIZE_T ByteCount,
++ IN gctCONST_POINTER Data
++ );
++
++/* Delete an address. */
++gceSTATUS
++gcoDUMP_Delete(
++ IN gcoDUMP Dump,
++ IN gctUINT32 Address
++ );
++
++/* Enable dump or not. */
++gceSTATUS
++gcoDUMP_SetDumpFlag(
++ IN gctBOOL DumpState
++ );
++
++/******************************************************************************\
++******************************* gcsRECT Structure ******************************
++\******************************************************************************/
++
++/* Initialize rectangle structure. */
++gceSTATUS
++gcsRECT_Set(
++ OUT gcsRECT_PTR Rect,
++ IN gctINT32 Left,
++ IN gctINT32 Top,
++ IN gctINT32 Right,
++ IN gctINT32 Bottom
++ );
++
++/* Return the width of the rectangle. */
++gceSTATUS
++gcsRECT_Width(
++ IN gcsRECT_PTR Rect,
++ OUT gctINT32 * Width
++ );
++
++/* Return the height of the rectangle. */
++gceSTATUS
++gcsRECT_Height(
++ IN gcsRECT_PTR Rect,
++ OUT gctINT32 * Height
++ );
++
++/* Ensure that top left corner is to the left and above the right bottom. */
++gceSTATUS
++gcsRECT_Normalize(
++ IN OUT gcsRECT_PTR Rect
++ );
++
++/* Compare two rectangles. */
++gceSTATUS
++gcsRECT_IsEqual(
++ IN gcsRECT_PTR Rect1,
++ IN gcsRECT_PTR Rect2,
++ OUT gctBOOL * Equal
++ );
++
++/* Compare the sizes of two rectangles. */
++gceSTATUS
++gcsRECT_IsOfEqualSize(
++ IN gcsRECT_PTR Rect1,
++ IN gcsRECT_PTR Rect2,
++ OUT gctBOOL * EqualSize
++ );
++
++gceSTATUS
++gcsRECT_RelativeRotation(
++ IN gceSURF_ROTATION Orientation,
++ IN OUT gceSURF_ROTATION *Relation);
++
++gceSTATUS
++
++gcsRECT_Rotate(
++
++ IN OUT gcsRECT_PTR Rect,
++
++ IN gceSURF_ROTATION Rotation,
++
++ IN gceSURF_ROTATION toRotation,
++
++ IN gctINT32 SurfaceWidth,
++
++ IN gctINT32 SurfaceHeight
++
++ );
++
++/******************************************************************************\
++**************************** gcsBOUNDARY Structure *****************************
++\******************************************************************************/
++
++typedef struct _gcsBOUNDARY
++{
++ gctINT x;
++ gctINT y;
++ gctINT width;
++ gctINT height;
++}
++gcsBOUNDARY;
++
++/******************************************************************************\
++********************************* gcoHEAP Object ********************************
++\******************************************************************************/
++
++typedef struct _gcoHEAP * gcoHEAP;
++
++/* Construct a new gcoHEAP object. */
++gceSTATUS
++gcoHEAP_Construct(
++ IN gcoOS Os,
++ IN gctSIZE_T AllocationSize,
++ OUT gcoHEAP * Heap
++ );
++
++/* Destroy an gcoHEAP object. */
++gceSTATUS
++gcoHEAP_Destroy(
++ IN gcoHEAP Heap
++ );
++
++/* Allocate memory. */
++gceSTATUS
++gcoHEAP_Allocate(
++ IN gcoHEAP Heap,
++ IN gctSIZE_T Bytes,
++ OUT gctPOINTER * Node
++ );
++
++gceSTATUS
++gcoHEAP_GetMemorySize(
++ IN gcoHEAP Heap,
++ IN gctPOINTER Memory,
++ OUT gctSIZE_T_PTR MemorySize
++ );
++
++/* Free memory. */
++gceSTATUS
++gcoHEAP_Free(
++ IN gcoHEAP Heap,
++ IN gctPOINTER Node
++ );
++
++#if (VIVANTE_PROFILER || gcdDEBUG)
++/* Profile the heap. */
++gceSTATUS
++gcoHEAP_ProfileStart(
++ IN gcoHEAP Heap
++ );
++
++gceSTATUS
++gcoHEAP_ProfileEnd(
++ IN gcoHEAP Heap,
++ IN gctCONST_STRING Title
++ );
++#endif
++
++
++/******************************************************************************\
++******************************* Debugging Macros *******************************
++\******************************************************************************/
++
++void
++gcoOS_SetDebugLevel(
++ IN gctUINT32 Level
++ );
++
++void
++gcoOS_GetDebugLevel(
++ OUT gctUINT32_PTR DebugLevel
++ );
++
++void
++gcoOS_SetDebugZone(
++ IN gctUINT32 Zone
++ );
++
++void
++gcoOS_GetDebugZone(
++ IN gctUINT32 Zone,
++ OUT gctUINT32_PTR DebugZone
++ );
++
++void
++gcoOS_SetDebugLevelZone(
++ IN gctUINT32 Level,
++ IN gctUINT32 Zone
++ );
++
++void
++gcoOS_SetDebugZones(
++ IN gctUINT32 Zones,
++ IN gctBOOL Enable
++ );
++
++void
++gcoOS_SetDebugFile(
++ IN gctCONST_STRING FileName
++ );
++
++gctFILE
++gcoOS_ReplaceDebugFile(
++ IN gctFILE fp
++ );
++
++/*******************************************************************************
++**
++** gcmFATAL
++**
++** Print a message to the debugger and execute a break point.
++**
++** ARGUMENTS:
++**
++** message Message.
++** ... Optional arguments.
++*/
++
++void
++gckOS_DebugFatal(
++ IN gctCONST_STRING Message,
++ ...
++ );
++
++void
++gcoOS_DebugFatal(
++ IN gctCONST_STRING Message,
++ ...
++ );
++
++#if gcmIS_DEBUG(gcdDEBUG_FATAL)
++# define gcmFATAL gcoOS_DebugFatal
++# define gcmkFATAL gckOS_DebugFatal
++#elif gcdHAS_ELLIPSES
++# define gcmFATAL(...)
++# define gcmkFATAL(...)
++#else
++ gcmINLINE static void
++ __dummy_fatal(
++ IN gctCONST_STRING Message,
++ ...
++ )
++ {
++ }
++# define gcmFATAL __dummy_fatal
++# define gcmkFATAL __dummy_fatal
++#endif
++
++#define gcmENUM2TEXT(e) case e: return #e
++
++/*******************************************************************************
++**
++** gcmTRACE
++**
++** Print a message to the debugfer if the correct level has been set. In
++** retail mode this macro does nothing.
++**
++** ARGUMENTS:
++**
++** level Level of message.
++** message Message.
++** ... Optional arguments.
++*/
++#define gcvLEVEL_NONE -1
++#define gcvLEVEL_ERROR 0
++#define gcvLEVEL_WARNING 1
++#define gcvLEVEL_INFO 2
++#define gcvLEVEL_VERBOSE 3
++
++void
++gckOS_DebugTrace(
++ IN gctUINT32 Level,
++ IN gctCONST_STRING Message,
++ ...
++ );
++
++void
++gckOS_DebugTraceN(
++ IN gctUINT32 Level,
++ IN gctUINT ArgumentSize,
++ IN gctCONST_STRING Message,
++ ...
++ );
++
++void
++gcoOS_DebugTrace(
++ IN gctUINT32 Level,
++ IN gctCONST_STRING Message,
++ ...
++ );
++
++#if gcmIS_DEBUG(gcdDEBUG_TRACE)
++# define gcmTRACE gcoOS_DebugTrace
++# define gcmkTRACE gckOS_DebugTrace
++# define gcmkTRACE_N gckOS_DebugTraceN
++#elif gcdHAS_ELLIPSES
++# define gcmTRACE(...)
++# define gcmkTRACE(...)
++# define gcmkTRACE_N(...)
++#else
++ gcmINLINE static void
++ __dummy_trace(
++ IN gctUINT32 Level,
++ IN gctCONST_STRING Message,
++ ...
++ )
++ {
++ }
++
++ gcmINLINE static void
++ __dummy_trace_n(
++ IN gctUINT32 Level,
++ IN gctUINT ArgumentSize,
++ IN gctCONST_STRING Message,
++ ...
++ )
++ {
++ }
++
++# define gcmTRACE __dummy_trace
++# define gcmkTRACE __dummy_trace
++# define gcmkTRACE_N __dummy_trace_n
++#endif
++
++/* Zones common for kernel and user. */
++#define gcvZONE_OS (1 << 0)
++#define gcvZONE_HARDWARE (1 << 1)
++#define gcvZONE_HEAP (1 << 2)
++#define gcvZONE_SIGNAL (1 << 27)
++
++/* Kernel zones. */
++#define gcvZONE_KERNEL (1 << 3)
++#define gcvZONE_VIDMEM (1 << 4)
++#define gcvZONE_COMMAND (1 << 5)
++#define gcvZONE_DRIVER (1 << 6)
++#define gcvZONE_CMODEL (1 << 7)
++#define gcvZONE_MMU (1 << 8)
++#define gcvZONE_EVENT (1 << 9)
++#define gcvZONE_DEVICE (1 << 10)
++#define gcvZONE_DATABASE (1 << 11)
++#define gcvZONE_INTERRUPT (1 << 12)
++#define gcvZONE_POWER (1 << 13)
++
++/* User zones. */
++#define gcvZONE_HAL (1 << 3)
++#define gcvZONE_BUFFER (1 << 4)
++#define gcvZONE_CONTEXT (1 << 5)
++#define gcvZONE_SURFACE (1 << 6)
++#define gcvZONE_INDEX (1 << 7)
++#define gcvZONE_STREAM (1 << 8)
++#define gcvZONE_TEXTURE (1 << 9)
++#define gcvZONE_2D (1 << 10)
++#define gcvZONE_3D (1 << 11)
++#define gcvZONE_COMPILER (1 << 12)
++#define gcvZONE_MEMORY (1 << 13)
++#define gcvZONE_STATE (1 << 14)
++#define gcvZONE_AUX (1 << 15)
++#define gcvZONE_VERTEX (1 << 16)
++#define gcvZONE_CL (1 << 17)
++#define gcvZONE_COMPOSITION (1 << 17)
++#define gcvZONE_VG (1 << 18)
++#define gcvZONE_IMAGE (1 << 19)
++#define gcvZONE_UTILITY (1 << 20)
++#define gcvZONE_PARAMETERS (1 << 21)
++
++/* API definitions. */
++#define gcvZONE_API_HAL (1 << 28)
++#define gcvZONE_API_EGL (2 << 28)
++#define gcvZONE_API_ES11 (3 << 28)
++#define gcvZONE_API_ES20 (4 << 28)
++#define gcvZONE_API_VG11 (5 << 28)
++#define gcvZONE_API_GL (6 << 28)
++#define gcvZONE_API_DFB (7 << 28)
++#define gcvZONE_API_GDI (8 << 28)
++#define gcvZONE_API_D3D (9 << 28)
++#define gcvZONE_API_ES30 (10 << 28)
++
++
++#define gcmZONE_GET_API(zone) ((zone) >> 28)
++/*Set gcdZONE_MASE like 0x0 | gcvZONE_API_EGL
++will enable print EGL module debug info*/
++#define gcdZONE_MASK 0x0FFFFFFF
++
++/* Handy zones. */
++#define gcvZONE_NONE 0
++#define gcvZONE_ALL 0x0FFFFFFF
++
++/*Dump API depth set 1 for API, 2 for API and API behavior*/
++#define gcvDUMP_API_DEPTH 1
++
++/*******************************************************************************
++**
++** gcmTRACE_ZONE
++**
++** Print a message to the debugger if the correct level and zone has been
++** set. In retail mode this macro does nothing.
++**
++** ARGUMENTS:
++**
++** Level Level of message.
++** Zone Zone of message.
++** Message Message.
++** ... Optional arguments.
++*/
++
++void
++gckOS_DebugTraceZone(
++ IN gctUINT32 Level,
++ IN gctUINT32 Zone,
++ IN gctCONST_STRING Message,
++ ...
++ );
++
++void
++gckOS_DebugTraceZoneN(
++ IN gctUINT32 Level,
++ IN gctUINT32 Zone,
++ IN gctUINT ArgumentSize,
++ IN gctCONST_STRING Message,
++ ...
++ );
++
++void
++gcoOS_DebugTraceZone(
++ IN gctUINT32 Level,
++ IN gctUINT32 Zone,
++ IN gctCONST_STRING Message,
++ ...
++ );
++
++#if gcmIS_DEBUG(gcdDEBUG_TRACE)
++# define gcmTRACE_ZONE gcoOS_DebugTraceZone
++# define gcmkTRACE_ZONE gckOS_DebugTraceZone
++# define gcmkTRACE_ZONE_N gckOS_DebugTraceZoneN
++#elif gcdHAS_ELLIPSES
++# define gcmTRACE_ZONE(...)
++# define gcmkTRACE_ZONE(...)
++# define gcmkTRACE_ZONE_N(...)
++#else
++ gcmINLINE static void
++ __dummy_trace_zone(
++ IN gctUINT32 Level,
++ IN gctUINT32 Zone,
++ IN gctCONST_STRING Message,
++ ...
++ )
++ {
++ }
++
++ gcmINLINE static void
++ __dummy_trace_zone_n(
++ IN gctUINT32 Level,
++ IN gctUINT32 Zone,
++ IN gctUINT ArgumentSize,
++ IN gctCONST_STRING Message,
++ ...
++ )
++ {
++ }
++
++# define gcmTRACE_ZONE __dummy_trace_zone
++# define gcmkTRACE_ZONE __dummy_trace_zone
++# define gcmkTRACE_ZONE_N __dummy_trace_zone_n
++#endif
++
++/*******************************************************************************
++**
++** gcmDEBUG_ONLY
++**
++** Execute a statement or function only in DEBUG mode.
++**
++** ARGUMENTS:
++**
++** f Statement or function to execute.
++*/
++#if gcmIS_DEBUG(gcdDEBUG_CODE)
++# define gcmDEBUG_ONLY(f) f
++#else
++# define gcmDEBUG_ONLY(f)
++#endif
++
++/*******************************************************************************
++**
++** gcmSTACK_PUSH
++** gcmSTACK_POP
++** gcmSTACK_DUMP
++**
++** Push or pop a function with entry arguments on the trace stack.
++**
++** ARGUMENTS:
++**
++** Function Name of function.
++** Line Line number.
++** Text Optional text.
++** ... Optional arguments for text.
++*/
++#if gcmIS_DEBUG(gcdDEBUG_STACK)
++ void
++ gcoOS_StackPush(
++ IN gctCONST_STRING Function,
++ IN gctINT Line,
++ IN gctCONST_STRING Text,
++ ...
++ );
++ void
++ gcoOS_StackPop(
++ IN gctCONST_STRING Function
++ );
++ void
++ gcoOS_StackDump(
++ void
++ );
++# define gcmSTACK_PUSH gcoOS_StackPush
++# define gcmSTACK_POP gcoOS_StackPop
++# define gcmSTACK_DUMP gcoOS_StackDump
++#elif gcdHAS_ELLIPSES
++# define gcmSTACK_PUSH(...) do { } while (0)
++# define gcmSTACK_POP(Function) do { } while (0)
++# define gcmSTACK_DUMP() do { } while (0)
++#else
++ gcmINLINE static void
++ __dummy_stack_push(
++ IN gctCONST_STRING Function,
++ IN gctINT Line,
++ IN gctCONST_STRING Text, ...
++ )
++ {
++ }
++# define gcmSTACK_PUSH __dummy_stack_push
++# define gcmSTACK_POP(Function) do { } while (0)
++# define gcmSTACK_DUMP() do { } while (0)
++#endif
++
++/******************************************************************************\
++******************************** Logging Macros ********************************
++\******************************************************************************/
++
++#define gcdHEADER_LEVEL gcvLEVEL_VERBOSE
++
++
++#if gcdENABLE_PROFILING
++void
++gcoOS_ProfileDB(
++ IN gctCONST_STRING Function,
++ IN OUT gctBOOL_PTR Initialized
++ );
++
++#define gcmHEADER() \
++ static gctBOOL __profile__initialized__ = gcvFALSE; \
++ gcmSTACK_PUSH(__FUNCTION__, __LINE__, gcvNULL, gcvNULL); \
++ gcoOS_ProfileDB(__FUNCTION__, &__profile__initialized__)
++#define gcmHEADER_ARG(...) \
++ static gctBOOL __profile__initialized__ = gcvFALSE; \
++ gcmSTACK_PUSH(__FUNCTION__, __LINE__, Text, __VA_ARGS__); \
++ gcoOS_ProfileDB(__FUNCTION__, &__profile__initialized__)
++#define gcmFOOTER() \
++ gcmSTACK_POP(__FUNCTION__); \
++ gcoOS_ProfileDB(__FUNCTION__, gcvNULL)
++#define gcmFOOTER_NO() \
++ gcmSTACK_POP(__FUNCTION__); \
++ gcoOS_ProfileDB(__FUNCTION__, gcvNULL)
++#define gcmFOOTER_ARG(...) \
++ gcmSTACK_POP(__FUNCTION__); \
++ gcoOS_ProfileDB(__FUNCTION__, gcvNULL)
++#define gcmFOOTER_KILL() \
++ gcmSTACK_POP(__FUNCTION__); \
++ gcoOS_ProfileDB(gcvNULL, gcvNULL)
++
++#else /* gcdENABLE_PROFILING */
++
++#if gcdHAS_ELLIPSES
++#define gcmHEADER() \
++ gctINT8 __user__ = 1; \
++ gctINT8_PTR __user_ptr__ = &__user__; \
++ gcmSTACK_PUSH(__FUNCTION__, __LINE__, gcvNULL, gcvNULL); \
++ gcmTRACE_ZONE(gcdHEADER_LEVEL, _GC_OBJ_ZONE, \
++ "++%s(%d)", __FUNCTION__, __LINE__)
++#else
++ gcmINLINE static void
++ __dummy_header(void)
++ {
++ }
++# define gcmHEADER __dummy_header
++#endif
++
++#if gcdHAS_ELLIPSES
++# define gcmHEADER_ARG(Text, ...) \
++ gctINT8 __user__ = 1; \
++ gctINT8_PTR __user_ptr__ = &__user__; \
++ gcmSTACK_PUSH(__FUNCTION__, __LINE__, Text, __VA_ARGS__); \
++ gcmTRACE_ZONE(gcdHEADER_LEVEL, _GC_OBJ_ZONE, \
++ "++%s(%d): " Text, __FUNCTION__, __LINE__, __VA_ARGS__)
++#else
++ gcmINLINE static void
++ __dummy_header_arg(
++ IN gctCONST_STRING Text,
++ ...
++ )
++ {
++ }
++# define gcmHEADER_ARG __dummy_header_arg
++#endif
++
++#if gcdHAS_ELLIPSES
++# define gcmFOOTER() \
++ gcmSTACK_POP(__FUNCTION__); \
++ gcmPROFILE_ONLY(gcmTRACE_ZONE(gcdHEADER_LEVEL, _GC_OBJ_ZONE, \
++ "--%s(%d) [%llu,%llu]: status=%d(%s)", \
++ __FUNCTION__, __LINE__, \
++ __ticks__, __total__, \
++ status, gcoOS_DebugStatus2Name(status))); \
++ gcmPROFILE_ELSE(gcmTRACE_ZONE(gcdHEADER_LEVEL, _GC_OBJ_ZONE, \
++ "--%s(%d): status=%d(%s)", \
++ __FUNCTION__, __LINE__, \
++ status, gcoOS_DebugStatus2Name(status))); \
++ *__user_ptr__ -= 1
++#else
++ gcmINLINE static void
++ __dummy_footer(void)
++ {
++ }
++# define gcmFOOTER __dummy_footer
++#endif
++
++#if gcdHAS_ELLIPSES
++#define gcmFOOTER_NO() \
++ gcmSTACK_POP(__FUNCTION__); \
++ gcmTRACE_ZONE(gcdHEADER_LEVEL, _GC_OBJ_ZONE, \
++ "--%s(%d)", __FUNCTION__, __LINE__); \
++ *__user_ptr__ -= 1
++#else
++ gcmINLINE static void
++ __dummy_footer_no(void)
++ {
++ }
++# define gcmFOOTER_NO __dummy_footer_no
++#endif
++
++#if gcdHAS_ELLIPSES
++#define gcmFOOTER_KILL() \
++ gcmSTACK_POP(__FUNCTION__); \
++ gcmTRACE_ZONE(gcdHEADER_LEVEL, _GC_OBJ_ZONE, \
++ "--%s(%d)", __FUNCTION__, __LINE__); \
++ *__user_ptr__ -= 1
++#else
++ gcmINLINE static void
++ __dummy_footer_kill(void)
++ {
++ }
++# define gcmFOOTER_KILL __dummy_footer_kill
++#endif
++
++#if gcdHAS_ELLIPSES
++# define gcmFOOTER_ARG(Text, ...) \
++ gcmSTACK_POP(__FUNCTION__); \
++ gcmTRACE_ZONE(gcdHEADER_LEVEL, _GC_OBJ_ZONE, \
++ "--%s(%d): " Text, __FUNCTION__, __LINE__, __VA_ARGS__); \
++ *__user_ptr__ -= 1
++#else
++ gcmINLINE static void
++ __dummy_footer_arg(
++ IN gctCONST_STRING Text,
++ ...
++ )
++ {
++ }
++# define gcmFOOTER_ARG __dummy_footer_arg
++#endif
++
++#endif /* gcdENABLE_PROFILING */
++
++#if gcdHAS_ELLIPSES
++#define gcmkHEADER() \
++ gctINT8 __kernel__ = 1; \
++ gctINT8_PTR __kernel_ptr__ = &__kernel__; \
++ gcmkTRACE_ZONE(gcdHEADER_LEVEL, _GC_OBJ_ZONE, \
++ "++%s(%d)", __FUNCTION__, __LINE__)
++#else
++ gcmINLINE static void
++ __dummy_kheader(void)
++ {
++ }
++# define gcmkHEADER __dummy_kheader
++#endif
++
++#if gcdHAS_ELLIPSES
++# define gcmkHEADER_ARG(Text, ...) \
++ gctINT8 __kernel__ = 1; \
++ gctINT8_PTR __kernel_ptr__ = &__kernel__; \
++ gcmkTRACE_ZONE(gcdHEADER_LEVEL, _GC_OBJ_ZONE, \
++ "++%s(%d): " Text, __FUNCTION__, __LINE__, __VA_ARGS__)
++#else
++ gcmINLINE static void
++ __dummy_kheader_arg(
++ IN gctCONST_STRING Text,
++ ...
++ )
++ {
++ }
++# define gcmkHEADER_ARG __dummy_kheader_arg
++#endif
++
++#if gcdHAS_ELLIPSES
++#define gcmkFOOTER() \
++ gcmkTRACE_ZONE(gcdHEADER_LEVEL, _GC_OBJ_ZONE, \
++ "--%s(%d): status=%d(%s)", \
++ __FUNCTION__, __LINE__, status, gckOS_DebugStatus2Name(status)); \
++ *__kernel_ptr__ -= 1
++#else
++ gcmINLINE static void
++ __dummy_kfooter(void)
++ {
++ }
++# define gcmkFOOTER __dummy_kfooter
++#endif
++
++#if gcdHAS_ELLIPSES
++#define gcmkFOOTER_NO() \
++ gcmkTRACE_ZONE(gcdHEADER_LEVEL, _GC_OBJ_ZONE, \
++ "--%s(%d)", __FUNCTION__, __LINE__); \
++ *__kernel_ptr__ -= 1
++#else
++ gcmINLINE static void
++ __dummy_kfooter_no(void)
++ {
++ }
++# define gcmkFOOTER_NO __dummy_kfooter_no
++#endif
++
++#if gcdHAS_ELLIPSES
++# define gcmkFOOTER_ARG(Text, ...) \
++ gcmkTRACE_ZONE(gcdHEADER_LEVEL, _GC_OBJ_ZONE, \
++ "--%s(%d): " Text, \
++ __FUNCTION__, __LINE__, __VA_ARGS__); \
++ *__kernel_ptr__ -= 1
++#else
++ gcmINLINE static void
++ __dummy_kfooter_arg(
++ IN gctCONST_STRING Text,
++ ...
++ )
++ {
++ }
++# define gcmkFOOTER_ARG __dummy_kfooter_arg
++#endif
++
++#define gcmOPT_VALUE(ptr) (((ptr) == gcvNULL) ? 0 : *(ptr))
++#define gcmOPT_VALUE_INDEX(ptr, index) (((ptr) == gcvNULL) ? 0 : ptr[index])
++#define gcmOPT_POINTER(ptr) (((ptr) == gcvNULL) ? gcvNULL : *(ptr))
++#define gcmOPT_STRING(ptr) (((ptr) == gcvNULL) ? "(nil)" : (ptr))
++
++void
++gckOS_Print(
++ IN gctCONST_STRING Message,
++ ...
++ );
++
++void
++gckOS_PrintN(
++ IN gctUINT ArgumentSize,
++ IN gctCONST_STRING Message,
++ ...
++ );
++
++void
++gckOS_CopyPrint(
++ IN gctCONST_STRING Message,
++ ...
++ );
++
++void
++gcoOS_Print(
++ IN gctCONST_STRING Message,
++ ...
++ );
++
++#define gcmPRINT gcoOS_Print
++#define gcmkPRINT gckOS_Print
++#define gcmkPRINT_N gckOS_PrintN
++
++#if gcdPRINT_VERSION
++# define gcmPRINT_VERSION() do { \
++ _gcmPRINT_VERSION(gcm); \
++ gcmSTACK_DUMP(); \
++ } while (0)
++# define gcmkPRINT_VERSION() _gcmPRINT_VERSION(gcmk)
++# define _gcmPRINT_VERSION(prefix) \
++ prefix##TRACE(gcvLEVEL_ERROR, \
++ "Vivante HAL version %d.%d.%d build %d %s %s", \
++ gcvVERSION_MAJOR, gcvVERSION_MINOR, gcvVERSION_PATCH, \
++ gcvVERSION_BUILD, gcvVERSION_DATE, gcvVERSION_TIME )
++#else
++# define gcmPRINT_VERSION() do { gcmSTACK_DUMP(); } while (gcvFALSE)
++# define gcmkPRINT_VERSION() do { } while (gcvFALSE)
++#endif
++
++typedef enum _gceDUMP_BUFFER
++{
++ gceDUMP_BUFFER_CONTEXT,
++ gceDUMP_BUFFER_USER,
++ gceDUMP_BUFFER_KERNEL,
++ gceDUMP_BUFFER_LINK,
++ gceDUMP_BUFFER_WAITLINK,
++ gceDUMP_BUFFER_FROM_USER,
++}
++gceDUMP_BUFFER;
++
++void
++gckOS_DumpBuffer(
++ IN gckOS Os,
++ IN gctPOINTER Buffer,
++ IN gctUINT Size,
++ IN gceDUMP_BUFFER Type,
++ IN gctBOOL CopyMessage
++ );
++
++#define gcmkDUMPBUFFER gckOS_DumpBuffer
++
++#if gcdDUMP_COMMAND
++# define gcmkDUMPCOMMAND(Os, Buffer, Size, Type, CopyMessage) \
++ gcmkDUMPBUFFER(Os, Buffer, Size, Type, CopyMessage)
++#else
++# define gcmkDUMPCOMMAND(Os, Buffer, Size, Type, CopyMessage)
++#endif
++
++#if gcmIS_DEBUG(gcdDEBUG_CODE)
++
++void
++gckOS_DebugFlush(
++ gctCONST_STRING CallerName,
++ gctUINT LineNumber,
++ gctUINT32 DmaAddress
++ );
++
++# define gcmkDEBUGFLUSH(DmaAddress) \
++ gckOS_DebugFlush(__FUNCTION__, __LINE__, DmaAddress)
++#else
++# define gcmkDEBUGFLUSH(DmaAddress)
++#endif
++
++/*******************************************************************************
++**
++** gcmDUMP_FRAMERATE
++**
++** Print average frame rate
++**
++*/
++#if gcdDUMP_FRAMERATE
++ gceSTATUS
++ gcfDumpFrameRate(
++ void
++ );
++# define gcmDUMP_FRAMERATE gcfDumpFrameRate
++#elif gcdHAS_ELLIPSES
++# define gcmDUMP_FRAMERATE(...)
++#else
++ gcmINLINE static void
++ __dummy_dump_frame_rate(
++ void
++ )
++ {
++ }
++# define gcmDUMP_FRAMERATE __dummy_dump_frame_rate
++#endif
++
++
++/*******************************************************************************
++**
++** gcmDUMP
++**
++** Print a dump message.
++**
++** ARGUMENTS:
++**
++** gctSTRING Message.
++**
++** ... Optional arguments.
++*/
++#if gcdDUMP
++ gceSTATUS
++ gcfDump(
++ IN gcoOS Os,
++ IN gctCONST_STRING String,
++ ...
++ );
++# define gcmDUMP gcfDump
++#elif gcdHAS_ELLIPSES
++# define gcmDUMP(...)
++#else
++ gcmINLINE static void
++ __dummy_dump(
++ IN gcoOS Os,
++ IN gctCONST_STRING Message,
++ ...
++ )
++ {
++ }
++# define gcmDUMP __dummy_dump
++#endif
++
++/*******************************************************************************
++**
++** gcmDUMP_DATA
++**
++** Add data to the dump.
++**
++** ARGUMENTS:
++**
++** gctSTRING Tag
++** Tag for dump.
++**
++** gctPOINTER Logical
++** Logical address of buffer.
++**
++** gctSIZE_T Bytes
++** Number of bytes.
++*/
++
++#if gcdDUMP || gcdDUMP_COMMAND
++ gceSTATUS
++ gcfDumpData(
++ IN gcoOS Os,
++ IN gctSTRING Tag,
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Bytes
++ );
++# define gcmDUMP_DATA gcfDumpData
++#elif gcdHAS_ELLIPSES
++# define gcmDUMP_DATA(...)
++#else
++ gcmINLINE static void
++ __dummy_dump_data(
++ IN gcoOS Os,
++ IN gctSTRING Tag,
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Bytes
++ )
++ {
++ }
++# define gcmDUMP_DATA __dummy_dump_data
++#endif
++
++/*******************************************************************************
++**
++** gcmDUMP_BUFFER
++**
++** Print a buffer to the dump.
++**
++** ARGUMENTS:
++**
++** gctSTRING Tag
++** Tag for dump.
++**
++** gctUINT32 Physical
++** Physical address of buffer.
++**
++** gctPOINTER Logical
++** Logical address of buffer.
++**
++** gctUINT32 Offset
++** Offset into buffer.
++**
++** gctSIZE_T Bytes
++** Number of bytes.
++*/
++
++#if gcdDUMP || gcdDUMP_COMMAND
++gceSTATUS
++gcfDumpBuffer(
++ IN gcoOS Os,
++ IN gctSTRING Tag,
++ IN gctUINT32 Physical,
++ IN gctPOINTER Logical,
++ IN gctUINT32 Offset,
++ IN gctSIZE_T Bytes
++ );
++# define gcmDUMP_BUFFER gcfDumpBuffer
++#elif gcdHAS_ELLIPSES
++# define gcmDUMP_BUFFER(...)
++#else
++ gcmINLINE static void
++ __dummy_dump_buffer(
++ IN gcoOS Os,
++ IN gctSTRING Tag,
++ IN gctUINT32 Physical,
++ IN gctPOINTER Logical,
++ IN gctUINT32 Offset,
++ IN gctSIZE_T Bytes
++ )
++ {
++ }
++# define gcmDUMP_BUFFER __dummy_dump_buffer
++#endif
++
++/*******************************************************************************
++**
++** gcmDUMP_API
++**
++** Print a dump message for a high level API prefixed by the function name.
++**
++** ARGUMENTS:
++**
++** gctSTRING Message.
++**
++** ... Optional arguments.
++*/
++gceSTATUS gcfDumpApi(IN gctCONST_STRING String, ...);
++#if gcdDUMP_API
++# define gcmDUMP_API gcfDumpApi
++#elif gcdHAS_ELLIPSES
++# define gcmDUMP_API(...)
++#else
++ gcmINLINE static void
++ __dummy_dump_api(
++ IN gctCONST_STRING Message,
++ ...
++ )
++ {
++ }
++# define gcmDUMP_API __dummy_dump_api
++#endif
++
++/*******************************************************************************
++**
++** gcmDUMP_API_ARRAY
++**
++** Print an array of data.
++**
++** ARGUMENTS:
++**
++** gctUINT32_PTR Pointer to array.
++** gctUINT32 Size.
++*/
++gceSTATUS gcfDumpArray(IN gctCONST_POINTER Data, IN gctUINT32 Size);
++#if gcdDUMP_API
++# define gcmDUMP_API_ARRAY gcfDumpArray
++#elif gcdHAS_ELLIPSES
++# define gcmDUMP_API_ARRAY(...)
++#else
++ gcmINLINE static void
++ __dummy_dump_api_array(
++ IN gctCONST_POINTER Data,
++ IN gctUINT32 Size
++ )
++ {
++ }
++# define gcmDUMP_API_ARRAY __dummy_dump_api_array
++#endif
++
++/*******************************************************************************
++**
++** gcmDUMP_API_ARRAY_TOKEN
++**
++** Print an array of data terminated by a token.
++**
++** ARGUMENTS:
++**
++** gctUINT32_PTR Pointer to array.
++** gctUINT32 Termination.
++*/
++gceSTATUS gcfDumpArrayToken(IN gctCONST_POINTER Data, IN gctUINT32 Termination);
++#if gcdDUMP_API
++# define gcmDUMP_API_ARRAY_TOKEN gcfDumpArrayToken
++#elif gcdHAS_ELLIPSES
++# define gcmDUMP_API_ARRAY_TOKEN(...)
++#else
++ gcmINLINE static void
++ __dummy_dump_api_array_token(
++ IN gctCONST_POINTER Data,
++ IN gctUINT32 Termination
++ )
++ {
++ }
++# define gcmDUMP_API_ARRAY_TOKEN __dummy_dump_api_array_token
++#endif
++
++/*******************************************************************************
++**
++** gcmDUMP_API_DATA
++**
++** Print an array of bytes.
++**
++** ARGUMENTS:
++**
++** gctCONST_POINTER Pointer to array.
++** gctSIZE_T Size.
++*/
++gceSTATUS gcfDumpApiData(IN gctCONST_POINTER Data, IN gctSIZE_T Size);
++#if gcdDUMP_API
++# define gcmDUMP_API_DATA gcfDumpApiData
++#elif gcdHAS_ELLIPSES
++# define gcmDUMP_API_DATA(...)
++#else
++ gcmINLINE static void
++ __dummy_dump_api_data(
++ IN gctCONST_POINTER Data,
++ IN gctSIZE_T Size
++ )
++ {
++ }
++# define gcmDUMP_API_DATA __dummy_dump_api_data
++#endif
++
++/*******************************************************************************
++**
++** gcmTRACE_RELEASE
++**
++** Print a message to the shader debugger.
++**
++** ARGUMENTS:
++**
++** message Message.
++** ... Optional arguments.
++*/
++
++#define gcmTRACE_RELEASE gcoOS_DebugShaderTrace
++
++void
++gcoOS_DebugShaderTrace(
++ IN gctCONST_STRING Message,
++ ...
++ );
++
++void
++gcoOS_SetDebugShaderFiles(
++ IN gctCONST_STRING VSFileName,
++ IN gctCONST_STRING FSFileName
++ );
++
++void
++gcoOS_SetDebugShaderFileType(
++ IN gctUINT32 ShaderType
++ );
++
++void
++gcoOS_EnableDebugBuffer(
++ IN gctBOOL Enable
++ );
++
++/*******************************************************************************
++**
++** gcmBREAK
++**
++** Break into the debugger. In retail mode this macro does nothing.
++**
++** ARGUMENTS:
++**
++** None.
++*/
++
++void
++gcoOS_DebugBreak(
++ void
++ );
++
++void
++gckOS_DebugBreak(
++ void
++ );
++
++#if gcmIS_DEBUG(gcdDEBUG_BREAK)
++# define gcmBREAK gcoOS_DebugBreak
++# define gcmkBREAK gckOS_DebugBreak
++#else
++# define gcmBREAK()
++# define gcmkBREAK()
++#endif
++
++/*******************************************************************************
++**
++** gcmASSERT
++**
++** Evaluate an expression and break into the debugger if the expression
++** evaluates to false. In retail mode this macro does nothing.
++**
++** ARGUMENTS:
++**
++** exp Expression to evaluate.
++*/
++#if gcmIS_DEBUG(gcdDEBUG_ASSERT)
++# define _gcmASSERT(prefix, exp) \
++ do \
++ { \
++ if (!(exp)) \
++ { \
++ prefix##TRACE(gcvLEVEL_ERROR, \
++ #prefix "ASSERT at %s(%d)", \
++ __FUNCTION__, __LINE__); \
++ prefix##TRACE(gcvLEVEL_ERROR, \
++ "(%s)", #exp); \
++ prefix##BREAK(); \
++ } \
++ } \
++ while (gcvFALSE)
++# define gcmASSERT(exp) _gcmASSERT(gcm, exp)
++# define gcmkASSERT(exp) _gcmASSERT(gcmk, exp)
++#else
++# define gcmASSERT(exp)
++# define gcmkASSERT(exp)
++#endif
++
++/*******************************************************************************
++**
++** gcmVERIFY
++**
++** Verify if an expression returns true. If the expression does not
++** evaluates to true, an assertion will happen in debug mode.
++**
++** ARGUMENTS:
++**
++** exp Expression to evaluate.
++*/
++#if gcmIS_DEBUG(gcdDEBUG_ASSERT)
++# define gcmVERIFY(exp) gcmASSERT(exp)
++# define gcmkVERIFY(exp) gcmkASSERT(exp)
++#else
++# define gcmVERIFY(exp) exp
++# define gcmkVERIFY(exp) exp
++#endif
++
++/*******************************************************************************
++**
++** gcmVERIFY_OK
++**
++** Verify a fucntion returns gcvSTATUS_OK. If the function does not return
++** gcvSTATUS_OK, an assertion will happen in debug mode.
++**
++** ARGUMENTS:
++**
++** func Function to evaluate.
++*/
++
++void
++gcoOS_Verify(
++ IN gceSTATUS status
++ );
++
++void
++gckOS_Verify(
++ IN gceSTATUS status
++ );
++
++#if gcmIS_DEBUG(gcdDEBUG_ASSERT)
++# define gcmVERIFY_OK(func) \
++ do \
++ { \
++ gceSTATUS verifyStatus = func; \
++ gcoOS_Verify(verifyStatus); \
++ if (verifyStatus != gcvSTATUS_OK) \
++ { \
++ gcmTRACE( \
++ gcvLEVEL_ERROR, \
++ "gcmVERIFY_OK(%d): function returned %d", \
++ __LINE__, verifyStatus \
++ ); \
++ } \
++ gcmASSERT(verifyStatus == gcvSTATUS_OK); \
++ } \
++ while (gcvFALSE)
++# define gcmkVERIFY_OK(func) \
++ do \
++ { \
++ gceSTATUS verifyStatus = func; \
++ if (verifyStatus != gcvSTATUS_OK) \
++ { \
++ gcmkTRACE( \
++ gcvLEVEL_ERROR, \
++ "gcmkVERIFY_OK(%d): function returned %d", \
++ __LINE__, verifyStatus \
++ ); \
++ } \
++ gckOS_Verify(verifyStatus); \
++ gcmkASSERT(verifyStatus == gcvSTATUS_OK); \
++ } \
++ while (gcvFALSE)
++#else
++# define gcmVERIFY_OK(func) func
++# define gcmkVERIFY_OK(func) func
++#endif
++
++gctCONST_STRING
++gcoOS_DebugStatus2Name(
++ gceSTATUS status
++ );
++
++gctCONST_STRING
++gckOS_DebugStatus2Name(
++ gceSTATUS status
++ );
++
++/*******************************************************************************
++**
++** gcmERR_BREAK
++**
++** Executes a break statement on error.
++**
++** ASSUMPTIONS:
++**
++** 'status' variable of gceSTATUS type must be defined.
++**
++** ARGUMENTS:
++**
++** func Function to evaluate.
++*/
++#define _gcmERR_BREAK(prefix, func) \
++ status = func; \
++ if (gcmIS_ERROR(status)) \
++ { \
++ prefix##PRINT_VERSION(); \
++ prefix##TRACE(gcvLEVEL_ERROR, \
++ #prefix "ERR_BREAK: status=%d(%s) @ %s(%d)", \
++ status, gcoOS_DebugStatus2Name(status), __FUNCTION__, __LINE__); \
++ break; \
++ } \
++ do { } while (gcvFALSE)
++#define _gcmkERR_BREAK(prefix, func) \
++ status = func; \
++ if (gcmIS_ERROR(status)) \
++ { \
++ prefix##PRINT_VERSION(); \
++ prefix##TRACE(gcvLEVEL_ERROR, \
++ #prefix "ERR_BREAK: status=%d(%s) @ %s(%d)", \
++ status, gckOS_DebugStatus2Name(status), __FUNCTION__, __LINE__); \
++ break; \
++ } \
++ do { } while (gcvFALSE)
++#define gcmERR_BREAK(func) _gcmERR_BREAK(gcm, func)
++#define gcmkERR_BREAK(func) _gcmkERR_BREAK(gcmk, func)
++
++/*******************************************************************************
++**
++** gcmERR_RETURN
++**
++** Executes a return on error.
++**
++** ASSUMPTIONS:
++**
++** 'status' variable of gceSTATUS type must be defined.
++**
++** ARGUMENTS:
++**
++** func Function to evaluate.
++*/
++#define _gcmERR_RETURN(prefix, func) \
++ status = func; \
++ if (gcmIS_ERROR(status)) \
++ { \
++ prefix##PRINT_VERSION(); \
++ prefix##TRACE(gcvLEVEL_ERROR, \
++ #prefix "ERR_RETURN: status=%d(%s) @ %s(%d)", \
++ status, gcoOS_DebugStatus2Name(status), __FUNCTION__, __LINE__); \
++ prefix##FOOTER(); \
++ return status; \
++ } \
++ do { } while (gcvFALSE)
++#define _gcmkERR_RETURN(prefix, func) \
++ status = func; \
++ if (gcmIS_ERROR(status)) \
++ { \
++ prefix##PRINT_VERSION(); \
++ prefix##TRACE(gcvLEVEL_ERROR, \
++ #prefix "ERR_RETURN: status=%d(%s) @ %s(%d)", \
++ status, gckOS_DebugStatus2Name(status), __FUNCTION__, __LINE__); \
++ prefix##FOOTER(); \
++ return status; \
++ } \
++ do { } while (gcvFALSE)
++#define gcmERR_RETURN(func) _gcmERR_RETURN(gcm, func)
++#define gcmkERR_RETURN(func) _gcmkERR_RETURN(gcmk, func)
++
++
++/*******************************************************************************
++**
++** gcmONERROR
++**
++** Jump to the error handler in case there is an error.
++**
++** ASSUMPTIONS:
++**
++** 'status' variable of gceSTATUS type must be defined.
++**
++** ARGUMENTS:
++**
++** func Function to evaluate.
++*/
++#define _gcmONERROR(prefix, func) \
++ do \
++ { \
++ status = func; \
++ if (gcmIS_ERROR(status)) \
++ { \
++ prefix##PRINT_VERSION(); \
++ prefix##TRACE(gcvLEVEL_ERROR, \
++ #prefix "ONERROR: status=%d(%s) @ %s(%d)", \
++ status, gcoOS_DebugStatus2Name(status), __FUNCTION__, __LINE__); \
++ goto OnError; \
++ } \
++ } \
++ while (gcvFALSE)
++#define _gcmkONERROR(prefix, func) \
++ do \
++ { \
++ status = func; \
++ if (gcmIS_ERROR(status)) \
++ { \
++ prefix##PRINT_VERSION(); \
++ prefix##TRACE(gcvLEVEL_ERROR, \
++ #prefix "ONERROR: status=%d(%s) @ %s(%d)", \
++ status, gckOS_DebugStatus2Name(status), __FUNCTION__, __LINE__); \
++ goto OnError; \
++ } \
++ } \
++ while (gcvFALSE)
++#define gcmONERROR(func) _gcmONERROR(gcm, func)
++#define gcmkONERROR(func) _gcmkONERROR(gcmk, func)
++
++/*******************************************************************************
++**
++** gcmVERIFY_LOCK
++**
++** Verifies whether the surface is locked.
++**
++** ARGUMENTS:
++**
++** surfaceInfo Pointer to the surface iniformational structure.
++*/
++#define gcmVERIFY_LOCK(surfaceInfo) \
++ if (!surfaceInfo->node.valid) \
++ { \
++ gcmONERROR(gcvSTATUS_MEMORY_UNLOCKED); \
++ } \
++
++/*******************************************************************************
++**
++** gcmVERIFY_NODE_LOCK
++**
++** Verifies whether the surface node is locked.
++**
++** ARGUMENTS:
++**
++** surfaceInfo Pointer to the surface iniformational structure.
++*/
++#define gcmVERIFY_NODE_LOCK(surfaceNode) \
++ if (!(surfaceNode)->valid) \
++ { \
++ status = gcvSTATUS_MEMORY_UNLOCKED; \
++ break; \
++ } \
++ do { } while (gcvFALSE)
++
++/*******************************************************************************
++**
++** gcmBADOBJECT_BREAK
++**
++** Executes a break statement on bad object.
++**
++** ARGUMENTS:
++**
++** obj Object to test.
++** t Expected type of the object.
++*/
++#define gcmBADOBJECT_BREAK(obj, t) \
++ if ((obj == gcvNULL) \
++ || (((gcsOBJECT *)(obj))->type != t) \
++ ) \
++ { \
++ status = gcvSTATUS_INVALID_OBJECT; \
++ break; \
++ } \
++ do { } while (gcvFALSE)
++
++/*******************************************************************************
++**
++** gcmCHECK_STATUS
++**
++** Executes a break statement on error.
++**
++** ASSUMPTIONS:
++**
++** 'status' variable of gceSTATUS type must be defined.
++**
++** ARGUMENTS:
++**
++** func Function to evaluate.
++*/
++#define _gcmCHECK_STATUS(prefix, func) \
++ do \
++ { \
++ last = func; \
++ if (gcmIS_ERROR(last)) \
++ { \
++ prefix##TRACE(gcvLEVEL_ERROR, \
++ #prefix "CHECK_STATUS: status=%d(%s) @ %s(%d)", \
++ last, gcoOS_DebugStatus2Name(last), __FUNCTION__, __LINE__); \
++ status = last; \
++ } \
++ } \
++ while (gcvFALSE)
++#define _gcmkCHECK_STATUS(prefix, func) \
++ do \
++ { \
++ last = func; \
++ if (gcmIS_ERROR(last)) \
++ { \
++ prefix##TRACE(gcvLEVEL_ERROR, \
++ #prefix "CHECK_STATUS: status=%d(%s) @ %s(%d)", \
++ last, gckOS_DebugStatus2Name(last), __FUNCTION__, __LINE__); \
++ status = last; \
++ } \
++ } \
++ while (gcvFALSE)
++#define gcmCHECK_STATUS(func) _gcmCHECK_STATUS(gcm, func)
++#define gcmkCHECK_STATUS(func) _gcmkCHECK_STATUS(gcmk, func)
++
++/*******************************************************************************
++**
++** gcmVERIFY_ARGUMENT
++**
++** Assert if an argument does not apply to the specified expression. If
++** the argument evaluates to false, gcvSTATUS_INVALID_ARGUMENT will be
++** returned from the current function. In retail mode this macro does
++** nothing.
++**
++** ARGUMENTS:
++**
++** arg Argument to evaluate.
++*/
++# define _gcmVERIFY_ARGUMENT(prefix, arg) \
++ do \
++ { \
++ if (!(arg)) \
++ { \
++ prefix##TRACE(gcvLEVEL_ERROR, #prefix "VERIFY_ARGUMENT failed:"); \
++ prefix##ASSERT(arg); \
++ prefix##FOOTER_ARG("status=%d", gcvSTATUS_INVALID_ARGUMENT); \
++ return gcvSTATUS_INVALID_ARGUMENT; \
++ } \
++ } \
++ while (gcvFALSE)
++# define gcmVERIFY_ARGUMENT(arg) _gcmVERIFY_ARGUMENT(gcm, arg)
++# define gcmkVERIFY_ARGUMENT(arg) _gcmVERIFY_ARGUMENT(gcmk, arg)
++
++/*******************************************************************************
++**
++** gcmDEBUG_VERIFY_ARGUMENT
++**
++** Works just like gcmVERIFY_ARGUMENT, but is only valid in debug mode.
++** Use this to verify arguments inside non-public API functions.
++*/
++#if gcdDEBUG
++# define gcmDEBUG_VERIFY_ARGUMENT(arg) _gcmVERIFY_ARGUMENT(gcm, arg)
++# define gcmkDEBUG_VERIFY_ARGUMENT(arg) _gcmkVERIFY_ARGUMENT(gcm, arg)
++#else
++# define gcmDEBUG_VERIFY_ARGUMENT(arg)
++# define gcmkDEBUG_VERIFY_ARGUMENT(arg)
++#endif
++
++/*******************************************************************************
++**
++** gcmVERIFY_ARGUMENT_RETURN
++**
++** Assert if an argument does not apply to the specified expression. If
++** the argument evaluates to false, gcvSTATUS_INVALID_ARGUMENT will be
++** returned from the current function. In retail mode this macro does
++** nothing.
++**
++** ARGUMENTS:
++**
++** arg Argument to evaluate.
++*/
++# define _gcmVERIFY_ARGUMENT_RETURN(prefix, arg, value) \
++ do \
++ { \
++ if (!(arg)) \
++ { \
++ prefix##TRACE(gcvLEVEL_ERROR, \
++ #prefix "gcmVERIFY_ARGUMENT_RETURN failed:"); \
++ prefix##ASSERT(arg); \
++ prefix##FOOTER_ARG("value=%d", value); \
++ return value; \
++ } \
++ } \
++ while (gcvFALSE)
++# define gcmVERIFY_ARGUMENT_RETURN(arg, value) \
++ _gcmVERIFY_ARGUMENT_RETURN(gcm, arg, value)
++# define gcmkVERIFY_ARGUMENT_RETURN(arg, value) \
++ _gcmVERIFY_ARGUMENT_RETURN(gcmk, arg, value)
++
++#define MAX_LOOP_COUNT 0x7FFFFFFF
++
++/******************************************************************************\
++****************************** User Debug Option ******************************
++\******************************************************************************/
++
++/* User option. */
++typedef enum _gceDEBUG_MSG
++{
++ gcvDEBUG_MSG_NONE,
++ gcvDEBUG_MSG_ERROR,
++ gcvDEBUG_MSG_WARNING
++}
++gceDEBUG_MSG;
++
++typedef struct _gcsUSER_DEBUG_OPTION
++{
++ gceDEBUG_MSG debugMsg;
++}
++gcsUSER_DEBUG_OPTION;
++
++gcsUSER_DEBUG_OPTION *
++gcGetUserDebugOption(
++ void
++ );
++
++struct _gcoOS_SymbolsList
++{
++ gcePATCH_ID patchId;
++ const char * symList[10];
++};
++
++#if gcdHAS_ELLIPSES
++#define gcmUSER_DEBUG_MSG(level, ...) \
++ do \
++ { \
++ if (level <= gcGetUserDebugOption()->debugMsg) \
++ { \
++ gcoOS_Print(__VA_ARGS__); \
++ } \
++ } while (gcvFALSE)
++
++#define gcmUSER_DEBUG_ERROR_MSG(...) gcmUSER_DEBUG_MSG(gcvDEBUG_MSG_ERROR, "Error: " __VA_ARGS__)
++#define gcmUSER_DEBUG_WARNING_MSG(...) gcmUSER_DEBUG_MSG(gcvDEBUG_MSG_WARNING, "Warring: " __VA_ARGS__)
++#else
++#define gcmUSER_DEBUG_MSG
++#define gcmUSER_DEBUG_ERROR_MSG
++#define gcmUSER_DEBUG_WARNING_MSG
++#endif
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif /* __gc_hal_base_h_ */
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_compiler.h linux-openelec/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_compiler.h
+--- linux-3.14.36/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_compiler.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_compiler.h 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,4298 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++/*
++** Include file the defines the front- and back-end compilers, as well as the
++** objects they use.
++*/
++
++#ifndef __gc_hal_compiler_h_
++#define __gc_hal_compiler_h_
++
++#ifndef VIVANTE_NO_3D
++#include "gc_hal_types.h"
++#include "gc_hal_engine.h"
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++#ifndef GC_ENABLE_LOADTIME_OPT
++#define GC_ENABLE_LOADTIME_OPT 1
++#endif
++
++#define TEMP_OPT_CONSTANT_TEXLD_COORD 0
++
++#define TEMP_SHADER_PATCH 1
++
++#define TEMP_INLINE_ALL_EXPANSION 1
++/******************************* IR VERSION ******************/
++#define gcdSL_IR_VERSION gcmCC('\0','\0','\0','\1')
++
++/******************************************************************************\
++|******************************* SHADER LANGUAGE ******************************|
++\******************************************************************************/
++
++ /* allocator/deallocator function pointer */
++typedef gceSTATUS (*gctAllocatorFunc)(
++ IN gctSIZE_T Bytes,
++ OUT gctPOINTER * Memory
++ );
++
++typedef gceSTATUS (*gctDeallocatorFunc)(
++ IN gctPOINTER Memory
++ );
++
++typedef gctBOOL (*compareFunc) (
++ IN void * data,
++ IN void * key
++ );
++
++typedef struct _gcsListNode gcsListNode;
++struct _gcsListNode
++{
++ gcsListNode * next;
++ void * data;
++};
++
++typedef struct _gcsAllocator
++{
++ gctAllocatorFunc allocate;
++ gctDeallocatorFunc deallocate;
++} gcsAllocator;
++
++/* simple map structure */
++typedef struct _SimpleMap SimpleMap;
++struct _SimpleMap
++{
++ gctUINT32 key;
++ gctUINT32 val;
++ SimpleMap *next;
++ gcsAllocator *allocator;
++
++};
++
++/* SimpleMap Operations */
++/* return -1 if not found, otherwise return the mapped value */
++gctUINT32
++gcSimpleMap_Find(
++ IN SimpleMap *Map,
++ IN gctUINT32 Key
++ );
++
++gceSTATUS
++gcSimpleMap_Destory(
++ IN SimpleMap * Map,
++ IN gcsAllocator * Allocator
++ );
++
++/* Add a pair <Key, Val> to the Map head, the user should be aware that the
++ * map pointer is always changed when adding a new node :
++ *
++ * gcSimpleMap_AddNode(&theMap, key, val, allocator);
++ *
++ */
++gceSTATUS
++gcSimpleMap_AddNode(
++ IN SimpleMap ** Map,
++ IN gctUINT32 Key,
++ IN gctUINT32 Val,
++ IN gcsAllocator * Allocator
++ );
++
++/* gcsList data structure and related operations */
++typedef struct _gcsList
++{
++ gcsListNode *head;
++ gcsListNode *tail;
++ gctINT count;
++ gcsAllocator *allocator;
++} gcsList;
++
++/* List operations */
++void
++gcList_Init(
++ IN gcsList *list,
++ IN gcsAllocator *allocator
++ );
++
++gceSTATUS
++gcList_CreateNode(
++ IN void * Data,
++ IN gctAllocatorFunc Allocator,
++ OUT gcsListNode ** ListNode
++ );
++
++gceSTATUS
++gcList_Clean(
++ IN gcsList * List,
++ IN gctBOOL FreeData
++ );
++
++gcsListNode *
++gcList_FindNode(
++ IN gcsList * List,
++ IN void * Key,
++ IN compareFunc compare
++ );
++
++gceSTATUS
++gcList_AddNode(
++ IN gcsList * List,
++ IN void * Data
++ );
++
++gceSTATUS
++gcList_RemoveNode(
++ IN gcsList * List,
++ IN gcsListNode * Node
++ );
++
++/* link list structure for code list */
++typedef gcsList gcsCodeList;
++typedef gcsCodeList * gctCodeList;
++typedef gcsListNode gcsCodeListNode;
++
++/* Possible shader language opcodes. */
++typedef enum _gcSL_OPCODE
++{
++ gcSL_NOP, /* 0x00 */
++ gcSL_MOV, /* 0x01 */
++ gcSL_SAT, /* 0x02 */
++ gcSL_DP3, /* 0x03 */
++ gcSL_DP4, /* 0x04 */
++ gcSL_ABS, /* 0x05 */
++ gcSL_JMP, /* 0x06 */
++ gcSL_ADD, /* 0x07 */
++ gcSL_MUL, /* 0x08 */
++ gcSL_RCP, /* 0x09 */
++ gcSL_SUB, /* 0x0A */
++ gcSL_KILL, /* 0x0B */
++ gcSL_TEXLD, /* 0x0C */
++ gcSL_CALL, /* 0x0D */
++ gcSL_RET, /* 0x0E */
++ gcSL_NORM, /* 0x0F */
++ gcSL_MAX, /* 0x10 */
++ gcSL_MIN, /* 0x11 */
++ gcSL_POW, /* 0x12 */
++ gcSL_RSQ, /* 0x13 */
++ gcSL_LOG, /* 0x14 */
++ gcSL_FRAC, /* 0x15 */
++ gcSL_FLOOR, /* 0x16 */
++ gcSL_CEIL, /* 0x17 */
++ gcSL_CROSS, /* 0x18 */
++ gcSL_TEXLDP, /* 0x19 */
++ gcSL_TEXBIAS, /* 0x1A */
++ gcSL_TEXGRAD, /* 0x1B */
++ gcSL_TEXLOD, /* 0x1C */
++ gcSL_SIN, /* 0x1D */
++ gcSL_COS, /* 0x1E */
++ gcSL_TAN, /* 0x1F */
++ gcSL_EXP, /* 0x20 */
++ gcSL_SIGN, /* 0x21 */
++ gcSL_STEP, /* 0x22 */
++ gcSL_SQRT, /* 0x23 */
++ gcSL_ACOS, /* 0x24 */
++ gcSL_ASIN, /* 0x25 */
++ gcSL_ATAN, /* 0x26 */
++ gcSL_SET, /* 0x27 */
++ gcSL_DSX, /* 0x28 */
++ gcSL_DSY, /* 0x29 */
++ gcSL_FWIDTH, /* 0x2A */
++ gcSL_DIV, /* 0x2B */
++ gcSL_MOD, /* 0x2C */
++ gcSL_AND_BITWISE, /* 0x2D */
++ gcSL_OR_BITWISE, /* 0x2E */
++ gcSL_XOR_BITWISE, /* 0x2F */
++ gcSL_NOT_BITWISE, /* 0x30 */
++ gcSL_LSHIFT, /* 0x31 */
++ gcSL_RSHIFT, /* 0x32 */
++ gcSL_ROTATE, /* 0x33 */
++ gcSL_BITSEL, /* 0x34 */
++ gcSL_LEADZERO, /* 0x35 */
++ gcSL_LOAD, /* 0x36 */
++ gcSL_STORE, /* 0x37 */
++ gcSL_BARRIER, /* 0x38 */
++ gcSL_STORE1, /* 0x39 */
++ gcSL_ATOMADD, /* 0x3A */
++ gcSL_ATOMSUB, /* 0x3B */
++ gcSL_ATOMXCHG, /* 0x3C */
++ gcSL_ATOMCMPXCHG, /* 0x3D */
++ gcSL_ATOMMIN, /* 0x3E */
++ gcSL_ATOMMAX, /* 0x3F */
++ gcSL_ATOMOR, /* 0x40 */
++ gcSL_ATOMAND, /* 0x41 */
++ gcSL_ATOMXOR, /* 0x42 */
++ /*gcSL_UNUSED, 0x43 */
++ /*gcSL_UNUSED, 0x44 */
++ /*gcSL_UNUSED, 0x45 */
++ /*gcSL_UNUSED, 0x46 */
++ /*gcSL_UNUSED, 0x47 */
++ /*gcSL_UNUSED, 0x48 */
++ /*gcSL_UNUSED, 0x49 */
++ /*gcSL_UNUSED, 0x4A */
++ /*gcSL_UNUSED, 0x4B */
++ /*gcSL_UNUSED, 0x4C */
++ /*gcSL_UNUSED, 0x4D */
++ /*gcSL_UNUSED, 0x4E */
++ /*gcSL_UNUSED, 0x4F */
++ /*gcSL_UNUSED, 0x50 */
++ /*gcSL_UNUSED, 0x51 */
++ /*gcSL_UNUSED, 0x52 */
++ gcSL_ADDLO = 0x53, /* 0x53 */ /* Float only. */
++ gcSL_MULLO, /* 0x54 */ /* Float only. */
++ gcSL_CONV, /* 0x55 */
++ gcSL_GETEXP, /* 0x56 */
++ gcSL_GETMANT, /* 0x57 */
++ gcSL_MULHI, /* 0x58 */ /* Integer only. */
++ gcSL_CMP, /* 0x59 */
++ gcSL_I2F, /* 0x5A */
++ gcSL_F2I, /* 0x5B */
++ gcSL_ADDSAT, /* 0x5C */ /* Integer only. */
++ gcSL_SUBSAT, /* 0x5D */ /* Integer only. */
++ gcSL_MULSAT, /* 0x5E */ /* Integer only. */
++ gcSL_DP2, /* 0x5F */
++ gcSL_MAXOPCODE
++}
++gcSL_OPCODE;
++
++typedef enum _gcSL_FORMAT
++{
++ gcSL_FLOAT = 0, /* 0 */
++ gcSL_INTEGER = 1, /* 1 */
++ gcSL_INT32 = 1, /* 1 */
++ gcSL_BOOLEAN = 2, /* 2 */
++ gcSL_UINT32 = 3, /* 3 */
++ gcSL_INT8, /* 4 */
++ gcSL_UINT8, /* 5 */
++ gcSL_INT16, /* 6 */
++ gcSL_UINT16, /* 7 */
++ gcSL_INT64, /* 8 */ /* Reserved for future enhancement. */
++ gcSL_UINT64, /* 9 */ /* Reserved for future enhancement. */
++ gcSL_INT128, /* 10 */ /* Reserved for future enhancement. */
++ gcSL_UINT128, /* 11 */ /* Reserved for future enhancement. */
++ gcSL_FLOAT16, /* 12 */
++ gcSL_FLOAT64, /* 13 */ /* Reserved for future enhancement. */
++ gcSL_FLOAT128, /* 14 */ /* Reserved for future enhancement. */
++}
++gcSL_FORMAT;
++
++/* Destination write enable bits. */
++typedef enum _gcSL_ENABLE
++{
++ gcSL_ENABLE_NONE = 0x0, /* none is enabled, error/uninitialized state */
++ gcSL_ENABLE_X = 0x1,
++ gcSL_ENABLE_Y = 0x2,
++ gcSL_ENABLE_Z = 0x4,
++ gcSL_ENABLE_W = 0x8,
++ /* Combinations. */
++ gcSL_ENABLE_XY = gcSL_ENABLE_X | gcSL_ENABLE_Y,
++ gcSL_ENABLE_XYZ = gcSL_ENABLE_X | gcSL_ENABLE_Y | gcSL_ENABLE_Z,
++ gcSL_ENABLE_XYZW = gcSL_ENABLE_X | gcSL_ENABLE_Y | gcSL_ENABLE_Z | gcSL_ENABLE_W,
++ gcSL_ENABLE_XYW = gcSL_ENABLE_X | gcSL_ENABLE_Y | gcSL_ENABLE_W,
++ gcSL_ENABLE_XZ = gcSL_ENABLE_X | gcSL_ENABLE_Z,
++ gcSL_ENABLE_XZW = gcSL_ENABLE_X | gcSL_ENABLE_Z | gcSL_ENABLE_W,
++ gcSL_ENABLE_XW = gcSL_ENABLE_X | gcSL_ENABLE_W,
++ gcSL_ENABLE_YZ = gcSL_ENABLE_Y | gcSL_ENABLE_Z,
++ gcSL_ENABLE_YZW = gcSL_ENABLE_Y | gcSL_ENABLE_Z | gcSL_ENABLE_W,
++ gcSL_ENABLE_YW = gcSL_ENABLE_Y | gcSL_ENABLE_W,
++ gcSL_ENABLE_ZW = gcSL_ENABLE_Z | gcSL_ENABLE_W,
++}
++gcSL_ENABLE;
++
++/* Possible indices. */
++typedef enum _gcSL_INDEXED
++{
++ gcSL_NOT_INDEXED, /* 0 */
++ gcSL_INDEXED_X, /* 1 */
++ gcSL_INDEXED_Y, /* 2 */
++ gcSL_INDEXED_Z, /* 3 */
++ gcSL_INDEXED_W, /* 4 */
++}
++gcSL_INDEXED;
++
++/* Opcode conditions. */
++typedef enum _gcSL_CONDITION
++{
++ gcSL_ALWAYS, /* 0x0 */
++ gcSL_NOT_EQUAL, /* 0x1 */
++ gcSL_LESS_OR_EQUAL, /* 0x2 */
++ gcSL_LESS, /* 0x3 */
++ gcSL_EQUAL, /* 0x4 */
++ gcSL_GREATER, /* 0x5 */
++ gcSL_GREATER_OR_EQUAL, /* 0x6 */
++ gcSL_AND, /* 0x7 */
++ gcSL_OR, /* 0x8 */
++ gcSL_XOR, /* 0x9 */
++ gcSL_NOT_ZERO, /* 0xA */
++}
++gcSL_CONDITION;
++
++/* Possible source operand types. */
++typedef enum _gcSL_TYPE
++{
++ gcSL_NONE, /* 0x0 */
++ gcSL_TEMP, /* 0x1 */
++ gcSL_ATTRIBUTE, /* 0x2 */
++ gcSL_UNIFORM, /* 0x3 */
++ gcSL_SAMPLER, /* 0x4 */
++ gcSL_CONSTANT, /* 0x5 */
++ gcSL_OUTPUT, /* 0x6 */
++ gcSL_PHYSICAL, /* 0x7 */
++}
++gcSL_TYPE;
++
++/* Swizzle generator macro. */
++#define gcmSWIZZLE(Component1, Component2, Component3, Component4) \
++( \
++ (gcSL_SWIZZLE_ ## Component1 << 0) | \
++ (gcSL_SWIZZLE_ ## Component2 << 2) | \
++ (gcSL_SWIZZLE_ ## Component3 << 4) | \
++ (gcSL_SWIZZLE_ ## Component4 << 6) \
++)
++
++#define gcmExtractSwizzle(Swizzle, Index) \
++ ((gcSL_SWIZZLE) ((((Swizzle) >> (Index * 2)) & 0x3)))
++
++#define gcmComposeSwizzle(SwizzleX, SwizzleY, SwizzleZ, SwizzleW) \
++( \
++ ((SwizzleX) << 0) | \
++ ((SwizzleY) << 2) | \
++ ((SwizzleZ) << 4) | \
++ ((SwizzleW) << 6) \
++)
++
++/* Possible swizzle values. */
++typedef enum _gcSL_SWIZZLE
++{
++ gcSL_SWIZZLE_X, /* 0x0 */
++ gcSL_SWIZZLE_Y, /* 0x1 */
++ gcSL_SWIZZLE_Z, /* 0x2 */
++ gcSL_SWIZZLE_W, /* 0x3 */
++ /* Combinations. */
++ gcSL_SWIZZLE_XXXX = gcmSWIZZLE(X, X, X, X),
++ gcSL_SWIZZLE_YYYY = gcmSWIZZLE(Y, Y, Y, Y),
++ gcSL_SWIZZLE_ZZZZ = gcmSWIZZLE(Z, Z, Z, Z),
++ gcSL_SWIZZLE_WWWW = gcmSWIZZLE(W, W, W, W),
++ gcSL_SWIZZLE_XYYY = gcmSWIZZLE(X, Y, Y, Y),
++ gcSL_SWIZZLE_XZZZ = gcmSWIZZLE(X, Z, Z, Z),
++ gcSL_SWIZZLE_XWWW = gcmSWIZZLE(X, W, W, W),
++ gcSL_SWIZZLE_YZZZ = gcmSWIZZLE(Y, Z, Z, Z),
++ gcSL_SWIZZLE_YWWW = gcmSWIZZLE(Y, W, W, W),
++ gcSL_SWIZZLE_ZWWW = gcmSWIZZLE(Z, W, W, W),
++ gcSL_SWIZZLE_XYZZ = gcmSWIZZLE(X, Y, Z, Z),
++ gcSL_SWIZZLE_XYWW = gcmSWIZZLE(X, Y, W, W),
++ gcSL_SWIZZLE_XZWW = gcmSWIZZLE(X, Z, W, W),
++ gcSL_SWIZZLE_YZWW = gcmSWIZZLE(Y, Z, W, W),
++ gcSL_SWIZZLE_XXYZ = gcmSWIZZLE(X, X, Y, Z),
++ gcSL_SWIZZLE_XYZW = gcmSWIZZLE(X, Y, Z, W),
++ gcSL_SWIZZLE_XYXY = gcmSWIZZLE(X, Y, X, Y),
++ gcSL_SWIZZLE_YYZZ = gcmSWIZZLE(Y, Y, Z, Z),
++ gcSL_SWIZZLE_YYWW = gcmSWIZZLE(Y, Y, W, W),
++ gcSL_SWIZZLE_ZZZW = gcmSWIZZLE(Z, Z, Z, W),
++ gcSL_SWIZZLE_XZZW = gcmSWIZZLE(X, Z, Z, W),
++ gcSL_SWIZZLE_YYZW = gcmSWIZZLE(Y, Y, Z, W),
++
++ gcSL_SWIZZLE_INVALID = 0x7FFFFFFF
++}
++gcSL_SWIZZLE;
++
++typedef enum _gcSL_COMPONENT
++{
++ gcSL_COMPONENT_X, /* 0x0 */
++ gcSL_COMPONENT_Y, /* 0x1 */
++ gcSL_COMPONENT_Z, /* 0x2 */
++ gcSL_COMPONENT_W, /* 0x3 */
++ gcSL_COMPONENT_COUNT /* 0x4 */
++} gcSL_COMPONENT;
++
++#define gcmIsComponentEnabled(Enable, Component) (((Enable) & (1 << (Component))) != 0)
++
++/******************************************************************************\
++|*********************************** SHADERS **********************************|
++\******************************************************************************/
++
++/* Shader types. */
++typedef enum _gcSHADER_KIND {
++ gcSHADER_TYPE_UNKNOWN = 0,
++ gcSHADER_TYPE_VERTEX,
++ gcSHADER_TYPE_FRAGMENT,
++ gcSHADER_TYPE_CL,
++ gcSHADER_TYPE_PRECOMPILED,
++ gcSHADER_KIND_COUNT
++} gcSHADER_KIND;
++
++typedef enum _gcGL_DRIVER_VERSION {
++ gcGL_DRIVER_ES11, /* OpenGL ES 1.1 */
++ gcGL_DRIVER_ES20, /* OpenGL ES 2.0 */
++ gcGL_DRIVER_ES30 /* OpenGL ES 3.0 */
++} gcGL_DRIVER_VERSION;
++
++/* gcSHADER objects. */
++typedef struct _gcSHADER * gcSHADER;
++typedef struct _gcATTRIBUTE * gcATTRIBUTE;
++typedef struct _gcUNIFORM * gcUNIFORM;
++typedef struct _gcOUTPUT * gcOUTPUT;
++typedef struct _gcsFUNCTION * gcFUNCTION;
++typedef struct _gcsKERNEL_FUNCTION * gcKERNEL_FUNCTION;
++typedef struct _gcsHINT * gcsHINT_PTR;
++typedef struct _gcSHADER_PROFILER * gcSHADER_PROFILER;
++typedef struct _gcVARIABLE * gcVARIABLE;
++
++struct _gcsHINT
++{
++ /* Numbr of data transfers for Vertex Shader output. */
++ gctUINT32 vsOutputCount;
++
++ /* Flag whether the VS has point size or not. */
++ gctBOOL vsHasPointSize;
++
++#if gcdUSE_WCLIP_PATCH
++ /* Flag whether the VS gl_position.z depends on gl_position.w
++ it's a hint for wclipping */
++ gctBOOL vsPositionZDependsOnW;
++#endif
++
++ gctBOOL clipW;
++
++ /* Flag whether or not the shader has a KILL instruction. */
++ gctBOOL hasKill;
++
++ /* Element count. */
++ gctUINT32 elementCount;
++
++ /* Component count. */
++ gctUINT32 componentCount;
++
++ /* Number of data transfers for Fragment Shader input. */
++ gctUINT32 fsInputCount;
++
++ /* Maximum number of temporary registers used in FS. */
++ gctUINT32 fsMaxTemp;
++
++ /* Maximum number of temporary registers used in VS. */
++ gctUINT32 vsMaxTemp;
++
++ /* Balance minimum. */
++ gctUINT32 balanceMin;
++
++ /* Balance maximum. */
++ gctUINT32 balanceMax;
++
++ /* Auto-shift balancing. */
++ gctBOOL autoShift;
++
++ /* Flag whether the PS outputs the depth value or not. */
++ gctBOOL psHasFragDepthOut;
++
++ /* Flag whether the ThreadWalker is in PS. */
++ gctBOOL threadWalkerInPS;
++
++ /* HW reg number for position of VS */
++ gctUINT32 hwRegNoOfSIVPos;
++
++#if gcdALPHA_KILL_IN_SHADER
++ /* States to set when alpha kill is enabled. */
++ gctUINT32 killStateAddress;
++ gctUINT32 alphaKillStateValue;
++ gctUINT32 colorKillStateValue;
++
++ /* Shader instructiuon. */
++ gctUINT32 killInstructionAddress;
++ gctUINT32 alphaKillInstruction[3];
++ gctUINT32 colorKillInstruction[3];
++#endif
++
++#if TEMP_SHADER_PATCH
++ gctUINT32 pachedShaderIdentifier;
++#endif
++};
++
++#if TEMP_SHADER_PATCH
++#define INVALID_SHADER_IDENTIFIER 0xFFFFFFFF
++#endif
++
++/* gcSHADER_TYPE enumeration. */
++typedef enum _gcSHADER_TYPE
++{
++ gcSHADER_FLOAT_X1 = 0, /* 0x00 */
++ gcSHADER_FLOAT_X2, /* 0x01 */
++ gcSHADER_FLOAT_X3, /* 0x02 */
++ gcSHADER_FLOAT_X4, /* 0x03 */
++ gcSHADER_FLOAT_2X2, /* 0x04 */
++ gcSHADER_FLOAT_3X3, /* 0x05 */
++ gcSHADER_FLOAT_4X4, /* 0x06 */
++ gcSHADER_BOOLEAN_X1, /* 0x07 */
++ gcSHADER_BOOLEAN_X2, /* 0x08 */
++ gcSHADER_BOOLEAN_X3, /* 0x09 */
++ gcSHADER_BOOLEAN_X4, /* 0x0A */
++ gcSHADER_INTEGER_X1, /* 0x0B */
++ gcSHADER_INTEGER_X2, /* 0x0C */
++ gcSHADER_INTEGER_X3, /* 0x0D */
++ gcSHADER_INTEGER_X4, /* 0x0E */
++ gcSHADER_SAMPLER_1D, /* 0x0F */
++ gcSHADER_SAMPLER_2D, /* 0x10 */
++ gcSHADER_SAMPLER_3D, /* 0x11 */
++ gcSHADER_SAMPLER_CUBIC, /* 0x12 */
++ gcSHADER_FIXED_X1, /* 0x13 */
++ gcSHADER_FIXED_X2, /* 0x14 */
++ gcSHADER_FIXED_X3, /* 0x15 */
++ gcSHADER_FIXED_X4, /* 0x16 */
++ gcSHADER_IMAGE_2D, /* 0x17 */ /* For OCL. */
++ gcSHADER_IMAGE_3D, /* 0x18 */ /* For OCL. */
++ gcSHADER_SAMPLER, /* 0x19 */ /* For OCL. */
++ gcSHADER_FLOAT_2X3, /* 0x1A */
++ gcSHADER_FLOAT_2X4, /* 0x1B */
++ gcSHADER_FLOAT_3X2, /* 0x1C */
++ gcSHADER_FLOAT_3X4, /* 0x1D */
++ gcSHADER_FLOAT_4X2, /* 0x1E */
++ gcSHADER_FLOAT_4X3, /* 0x1F */
++ gcSHADER_ISAMPLER_2D, /* 0x20 */
++ gcSHADER_ISAMPLER_3D, /* 0x21 */
++ gcSHADER_ISAMPLER_CUBIC, /* 0x22 */
++ gcSHADER_USAMPLER_2D, /* 0x23 */
++ gcSHADER_USAMPLER_3D, /* 0x24 */
++ gcSHADER_USAMPLER_CUBIC, /* 0x25 */
++ gcSHADER_SAMPLER_EXTERNAL_OES, /* 0x26 */
++
++ gcSHADER_UINT_X1, /* 0x27 */
++ gcSHADER_UINT_X2, /* 0x28 */
++ gcSHADER_UINT_X3, /* 0x29 */
++ gcSHADER_UINT_X4, /* 0x2A */
++
++ gcSHADER_UNKONWN_TYPE, /* do not add type after this */
++ gcSHADER_TYPE_COUNT /* must to change gcvShaderTypeInfo at the
++ * same time if you add any new type! */}
++gcSHADER_TYPE;
++
++typedef enum _gcSHADER_TYPE_KIND
++{
++ gceTK_UNKOWN,
++ gceTK_FLOAT,
++ gceTK_INT,
++ gceTK_UINT,
++ gceTK_BOOL,
++ gceTK_FIXED,
++ gceTK_SAMPLER,
++ gceTK_IMAGE,
++ gceTK_OTHER
++} gcSHADER_TYPE_KIND;
++
++typedef struct _gcSHADER_TYPEINFO
++{
++ gcSHADER_TYPE type; /* e.g. gcSHADER_FLOAT_2X4 */
++ gctINT components; /* e.g. 4 components */
++ gctINT rows; /* e.g. 2 rows */
++ gcSHADER_TYPE componentType; /* e.g. gcSHADER_FLOAT_X4 */
++ gcSHADER_TYPE_KIND kind; /* e.g. gceTK_FLOAT */
++ gctCONST_STRING name; /* e.g. "FLOAT_2X4" */
++} gcSHADER_TYPEINFO;
++
++extern gcSHADER_TYPEINFO gcvShaderTypeInfo[];
++
++#define gcmType_Comonents(Type) (gcvShaderTypeInfo[Type].components)
++#define gcmType_Rows(Type) (gcvShaderTypeInfo[Type].rows)
++#define gcmType_ComonentType(Type) (gcvShaderTypeInfo[Type].componentType)
++#define gcmType_Kind(Type) (gcvShaderTypeInfo[Type].kind)
++#define gcmType_Name(Type) (gcvShaderTypeInfo[Type].name)
++
++#define gcmType_isMatrix(type) (gcmType_Rows(type) > 1)
++
++typedef enum _gcSHADER_VAR_CATEGORY
++{
++ gcSHADER_VAR_CATEGORY_NORMAL = 0, /* primitive type and its array */
++ gcSHADER_VAR_CATEGORY_STRUCT = 1 /* structure */
++}
++gcSHADER_VAR_CATEGORY;
++
++typedef enum _gceTYPE_QUALIFIER
++{
++ gcvTYPE_QUALIFIER_NONE = 0x0, /* unqualified */
++ gcvTYPE_QUALIFIER_VOLATILE = 0x1, /* volatile */
++}gceTYPE_QUALIFIER;
++
++typedef gctUINT16 gctTYPE_QUALIFIER;
++
++#if GC_ENABLE_LOADTIME_OPT
++typedef struct _gcSHADER_TYPE_INFO
++{
++ gcSHADER_TYPE type; /* eg. gcSHADER_FLOAT_2X3 is the type */
++ gctCONST_STRING name; /* the name of the type: "gcSHADER_FLOAT_2X3" */
++ gcSHADER_TYPE baseType; /* its base type is gcSHADER_FLOAT_2 */
++ gctINT components; /* it has 2 components */
++ gctINT rows; /* and 3 rows */
++ gctINT size; /* the size in byte */
++} gcSHADER_TYPE_INFO;
++
++extern gcSHADER_TYPE_INFO shader_type_info[];
++
++enum gceLTCDumpOption {
++ gceLTC_DUMP_UNIFORM = 0x0001,
++ gceLTC_DUMP_EVALUATION = 0x0002,
++ gceLTC_DUMP_EXPESSION = 0x0004,
++ gceLTC_DUMP_COLLECTING = 0x0008,
++};
++
++gctBOOL gcDumpOption(gctINT Opt);
++
++#endif /* GC_ENABLE_LOADTIME_OPT */
++
++#define IS_MATRIX_TYPE(type) \
++ (((type >= gcSHADER_FLOAT_2X2) && (type <= gcSHADER_FLOAT_4X4)) || \
++ ((type >= gcSHADER_FLOAT_2X3) && (type <= gcSHADER_FLOAT_4X3)))
++
++/* gcSHADER_PRECISION enumeration. */
++typedef enum _gcSHADER_PRECISION
++{
++ gcSHADER_PRECISION_DEFAULT, /* 0x00 */
++ gcSHADER_PRECISION_HIGH, /* 0x01 */
++ gcSHADER_PRECISION_MEDIUM, /* 0x02 */
++ gcSHADER_PRECISION_LOW, /* 0x03 */
++}
++gcSHADER_PRECISION;
++
++/* Shader flags. */
++typedef enum _gceSHADER_FLAGS
++{
++ gcvSHADER_NO_OPTIMIZATION = 0x00,
++ gcvSHADER_DEAD_CODE = 0x01,
++ gcvSHADER_RESOURCE_USAGE = 0x02,
++ gcvSHADER_OPTIMIZER = 0x04,
++ gcvSHADER_USE_GL_Z = 0x08,
++ /*
++ The GC family of GPU cores model GC860 and under require the Z
++ to be from 0 <= z <= w.
++ However, OpenGL specifies the Z to be from -w <= z <= w. So we
++ have to a conversion here:
++
++ z = (z + w) / 2.
++
++ So here we append two instructions to the vertex shader.
++ */
++ gcvSHADER_USE_GL_POSITION = 0x10,
++ gcvSHADER_USE_GL_FACE = 0x20,
++ gcvSHADER_USE_GL_POINT_COORD = 0x40,
++ gcvSHADER_LOADTIME_OPTIMIZER = 0x80,
++#if gcdALPHA_KILL_IN_SHADER
++ gcvSHADER_USE_ALPHA_KILL = 0x100,
++#endif
++
++#if gcdPRE_ROTATION && (ANDROID_SDK_VERSION >= 14)
++ gcvSHADER_VS_PRE_ROTATION = 0x200,
++#endif
++
++#if TEMP_INLINE_ALL_EXPANSION
++ gcvSHADER_INLINE_ALL_EXPANSION = 0x400,
++#endif
++}
++gceSHADER_FLAGS;
++
++gceSTATUS
++gcSHADER_CheckClipW(
++ IN gctCONST_STRING VertexSource,
++ IN gctCONST_STRING FragmentSource,
++ OUT gctBOOL * clipW);
++
++/*******************************************************************************
++** gcSHADER_GetUniformVectorCount
++**
++** Get the number of vectors used by uniforms for this shader.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Count
++** Pointer to a variable receiving the number of vectors.
++*/
++gceSTATUS
++gcSHADER_GetUniformVectorCount(
++ IN gcSHADER Shader,
++ OUT gctSIZE_T * Count
++ );
++
++/*******************************************************************************
++** gcOptimizer Data Structures
++*******************************************************************************/
++typedef enum _gceSHADER_OPTIMIZATION
++{
++ /* No optimization. */
++ gcvOPTIMIZATION_NONE,
++
++ /* Flow graph construction. */
++ gcvOPTIMIZATION_CONSTRUCTION = 1 << 0,
++
++ /* Dead code elimination. */
++ gcvOPTIMIZATION_DEAD_CODE = 1 << 1,
++
++ /* Redundant move instruction elimination. */
++ gcvOPTIMIZATION_REDUNDANT_MOVE = 1 << 2,
++
++ /* Inline expansion. */
++ gcvOPTIMIZATION_INLINE_EXPANSION = 1 << 3,
++
++ /* Constant propagation. */
++ gcvOPTIMIZATION_CONSTANT_PROPAGATION = 1 << 4,
++
++ /* Redundant bounds/checking elimination. */
++ gcvOPTIMIZATION_REDUNDANT_CHECKING = 1 << 5,
++
++ /* Loop invariant movement. */
++ gcvOPTIMIZATION_LOOP_INVARIANT = 1 << 6,
++
++ /* Induction variable removal. */
++ gcvOPTIMIZATION_INDUCTION_VARIABLE = 1 << 7,
++
++ /* Common subexpression elimination. */
++ gcvOPTIMIZATION_COMMON_SUBEXPRESSION = 1 << 8,
++
++ /* Control flow/banch optimization. */
++ gcvOPTIMIZATION_CONTROL_FLOW = 1 << 9,
++
++ /* Vector component operation merge. */
++ gcvOPTIMIZATION_VECTOR_INSTRUCTION_MERGE = 1 << 10,
++
++ /* Algebra simplificaton. */
++ gcvOPTIMIZATION_ALGEBRAIC_SIMPLIFICATION = 1 << 11,
++
++ /* Pattern matching and replacing. */
++ gcvOPTIMIZATION_PATTERN_MATCHING = 1 << 12,
++
++ /* Interprocedural constant propagation. */
++ gcvOPTIMIZATION_IP_CONSTANT_PROPAGATION = 1 << 13,
++
++ /* Interprecedural register optimization. */
++ gcvOPTIMIZATION_IP_REGISTRATION = 1 << 14,
++
++ /* Optimization option number. */
++ gcvOPTIMIZATION_OPTION_NUMBER = 1 << 15,
++
++ /* Loadtime constant. */
++ gcvOPTIMIZATION_LOADTIME_CONSTANT = 1 << 16,
++
++ /* MAD instruction optimization. */
++ gcvOPTIMIZATION_MAD_INSTRUCTION = 1 << 17,
++
++ /* Special optimization for LOAD SW workaround. */
++ gcvOPTIMIZATION_LOAD_SW_WORKAROUND = 1 << 18,
++
++ /* move code into conditional block if possile */
++ gcvOPTIMIZATION_CONDITIONALIZE = 1 << 19,
++
++ /* expriemental: power optimization mode
++ 1. add extra dummy texld to tune performance
++ 2. insert NOP after high power instrucitons
++ 3. split high power vec3/vec4 instruciton to vec2/vec1 operation
++ 4. ...
++ */
++ gcvOPTIMIZATION_POWER_OPTIMIZATION = 1 << 20,
++
++ /* optimize varying packing */
++ gcvOPTIMIZATION_VARYINGPACKING = 1 << 22,
++
++#if TEMP_INLINE_ALL_EXPANSION
++ gcvOPTIMIZATION_INLINE_ALL_EXPANSION = 1 << 23,
++#endif
++
++ /* Full optimization. */
++ /* Note that gcvOPTIMIZATION_LOAD_SW_WORKAROUND is off. */
++ gcvOPTIMIZATION_FULL = 0x7FFFFFFF &
++ ~gcvOPTIMIZATION_LOAD_SW_WORKAROUND &
++ ~gcvOPTIMIZATION_INLINE_ALL_EXPANSION &
++ ~gcvOPTIMIZATION_POWER_OPTIMIZATION,
++
++ /* Optimization Unit Test flag. */
++ gcvOPTIMIZATION_UNIT_TEST = 1 << 31
++}
++gceSHADER_OPTIMIZATION;
++
++typedef enum _gceOPTIMIZATION_VaryingPaking
++{
++ gcvOPTIMIZATION_VARYINGPACKING_NONE = 0,
++ gcvOPTIMIZATION_VARYINGPACKING_NOSPLIT,
++ gcvOPTIMIZATION_VARYINGPACKING_SPLIT
++} gceOPTIMIZATION_VaryingPaking;
++
++typedef struct _gcOPTIMIZER_OPTION
++{
++ gceSHADER_OPTIMIZATION optFlags;
++
++ /* debug & dump options:
++
++ VC_OPTION=-DUMP:SRC:OPT|:OPTV|:CG|:CGV:|ALL|ALLV
++
++ SRC: dump shader source code
++ OPT: dump incoming and final IR
++ OPTV: dump result IR in each optimization phase
++ CG: dump generated machine code
++ CGV: dump BE tree and optimization detail
++
++ ALL = SRC|OPT|CG
++ ALLV = SRC|OPT|OPTV|CG|CGV
++ */
++ gctBOOL dumpShaderSource; /* dump shader source code */
++ gctBOOL dumpOptimizer; /* dump incoming and final IR */
++ gctBOOL dumpOptimizerVerbose; /* dump result IR in each optimization phase */
++ gctBOOL dumpBEGenertedCode; /* dump generated machine code */
++ gctBOOL dumpBEVerbose; /* dump BE tree and optimization detail */
++ gctBOOL dumpBEFinalIR; /* dump BE final IR */
++
++ /* Code generation */
++
++ /* Varying Packing:
++
++ VC_OPTION=-PACKVARYING:[0-2]|:T[-]m[,n]|:LshaderIdx,min,max
++
++ 0: turn off varying packing
++ 1: pack varyings, donot split any varying
++ 2: pack varyings, may split to make fully packed output
++
++ Tm: only packing shader pair which vertex shader id is m
++ Tm,n: only packing shader pair which vertex shader id
++ is in range of [m, n]
++ T-m: do not packing shader pair which vertex shader id is m
++ T-m,n: do not packing shader pair which vertex shader id
++ is in range of [m, n]
++
++ LshaderIdx,min,max : set load balance (min, max) for shaderIdx
++ if shaderIdx is -1, all shaders are impacted
++ newMin = origMin * (min/100.);
++ newMax = origMax * (max/100.);
++ */
++ gceOPTIMIZATION_VaryingPaking packVarying;
++ gctINT _triageStart;
++ gctINT _triageEnd;
++ gctINT _loadBalanceShaderIdx;
++ gctINT _loadBalanceMin;
++ gctINT _loadBalanceMax;
++
++ /* Do not generate immdeiate
++
++ VC_OPTION=-NOIMM
++
++ Force generate immediate even the machine model don't support it,
++ for testing purpose only
++
++ VC_OPTION=-FORCEIMM
++ */
++ gctBOOL noImmediate;
++ gctBOOL forceImmediate;
++
++ /* Power reduction mode options */
++ gctBOOL needPowerOptimization;
++
++ /* Patch TEXLD instruction by adding dummy texld
++ (can be used to tune GPU power usage):
++ for every TEXLD we seen, add n dummy TEXLD
++
++ it can be enabled by environment variable:
++
++ VC_OPTION=-PATCH_TEXLD:M:N
++
++ (for each M texld, add N dummy texld)
++ */
++ gctINT patchEveryTEXLDs;
++ gctINT patchDummyTEXLDs;
++
++ /* Insert NOP after high power consumption instructions
++
++ VC_OPTION="-INSERTNOP:MUL:MULLO:DP3:DP4:SEENTEXLD"
++ */
++ gctBOOL insertNOP;
++ gctBOOL insertNOPAfterMUL;
++ gctBOOL insertNOPAfterMULLO;
++ gctBOOL insertNOPAfterDP3;
++ gctBOOL insertNOPAfterDP4;
++ gctBOOL insertNOPOnlyWhenTexldSeen;
++
++ /* split MAD to MUL and ADD:
++
++ VC_OPTION=-SPLITMAD
++ */
++ gctBOOL splitMAD;
++
++ /* Convert vect3/vec4 operations to multiple vec2/vec1 operations
++
++ VC_OPTION=-SPLITVEC:MUL:MULLO:DP3:DP4
++ */
++ gctBOOL splitVec;
++ gctBOOL splitVec4MUL;
++ gctBOOL splitVec4MULLO;
++ gctBOOL splitVec4DP3;
++ gctBOOL splitVec4DP4;
++
++ /* turn/off features:
++
++ VC_OPTION=-F:n,[0|1]
++ Note: n must be decimal number
++ */
++ gctUINT featureBits;
++
++ /* inline level (default 2 at O1):
++
++ VC_OPTION=-INLINELEVEL:[0-3]
++ 0: no inline
++ 1: only inline the function only called once or small function
++ 2: inline functions be called less than 5 times or medium size function
++ 3: inline everything possible
++ */
++ gctUINT inlineLevel;
++} gcOPTIMIZER_OPTION;
++
++extern gcOPTIMIZER_OPTION theOptimizerOption;
++#define gcmGetOptimizerOption() gcGetOptimizerOption()
++
++#define gcmOPT_DUMP_SHADER_SRC() \
++ (gcmGetOptimizerOption()->dumpShaderSource != 0)
++#define gcmOPT_DUMP_OPTIMIZER() \
++ (gcmGetOptimizerOption()->dumpOptimizer != 0 || \
++ gcmOPT_DUMP_OPTIMIZER_VERBOSE() )
++#define gcmOPT_DUMP_OPTIMIZER_VERBOSE() \
++ (gcmGetOptimizerOption()->dumpOptimizerVerbose != 0)
++#define gcmOPT_DUMP_CODEGEN() \
++ (gcmGetOptimizerOption()->dumpBEGenertedCode != 0 || \
++ gcmOPT_DUMP_CODEGEN_VERBOSE() )
++#define gcmOPT_DUMP_CODEGEN_VERBOSE() \
++ (gcmGetOptimizerOption()->dumpBEVerbose != 0)
++#define gcmOPT_DUMP_FINAL_IR() \
++ (gcmGetOptimizerOption()->dumpBEFinalIR != 0)
++
++#define gcmOPT_SET_DUMP_SHADER_SRC(v) \
++ gcmGetOptimizerOption()->dumpShaderSource = (v)
++
++#define gcmOPT_PATCH_TEXLD() (gcmGetOptimizerOption()->patchDummyTEXLDs != 0)
++#define gcmOPT_INSERT_NOP() (gcmGetOptimizerOption()->insertNOP == gcvTRUE)
++#define gcmOPT_SPLITMAD() (gcmGetOptimizerOption()->splitMAD == gcvTRUE)
++#define gcmOPT_SPLITVEC() (gcmGetOptimizerOption()->splitVec == gcvTRUE)
++
++#define gcmOPT_NOIMMEDIATE() (gcmGetOptimizerOption()->noImmediate == gcvTRUE)
++#define gcmOPT_FORCEIMMEDIATE() (gcmGetOptimizerOption()->forceImmediate == gcvTRUE)
++
++#define gcmOPT_PACKVARYING() (gcmGetOptimizerOption()->packVarying)
++#define gcmOPT_PACKVARYING_triageStart() (gcmGetOptimizerOption()->_triageStart)
++#define gcmOPT_PACKVARYING_triageEnd() (gcmGetOptimizerOption()->_triageEnd)
++
++#define gcmOPT_INLINELEVEL() (gcmGetOptimizerOption()->inlineLevel)
++
++/* Setters */
++#define gcmOPT_SetPatchTexld(m,n) (gcmGetOptimizerOption()->patchEveryTEXLDs = (m),\
++ gcmGetOptimizerOption()->patchDummyTEXLDs = (n))
++#define gcmOPT_SetSplitVecMUL() (gcmGetOptimizerOption()->splitVec = gcvTRUE, \
++ gcmGetOptimizerOption()->splitVec4MUL = gcvTRUE)
++#define gcmOPT_SetSplitVecMULLO() (gcmGetOptimizerOption()->splitVec = gcvTRUE, \
++ gcmGetOptimizerOption()->splitVec4MULLO = gcvTRUE)
++#define gcmOPT_SetSplitVecDP3() (gcmGetOptimizerOption()->splitVec = gcvTRUE, \
++ gcmGetOptimizerOption()->splitVec4DP3 = gcvTRUE)
++#define gcmOPT_SetSplitVecDP4() (gcmGetOptimizerOption()->splitVec = gcvTRUE, \
++ gcmGetOptimizerOption()->splitVec4DP4 = gcvTRUE)
++
++#define gcmOPT_SetPackVarying(v) (gcmGetOptimizerOption()->packVarying = v)
++
++#define FB_LIVERANGE_FIX1 0x0001
++
++
++#define PredefinedDummySamplerId 8
++
++/* Function argument qualifier */
++typedef enum _gceINPUT_OUTPUT
++{
++ gcvFUNCTION_INPUT,
++ gcvFUNCTION_OUTPUT,
++ gcvFUNCTION_INOUT
++}
++gceINPUT_OUTPUT;
++
++/* Kernel function property flags. */
++typedef enum _gcePROPERTY_FLAGS
++{
++ gcvPROPERTY_REQD_WORK_GRP_SIZE = 0x01
++}
++gceKERNEL_FUNCTION_PROPERTY_FLAGS;
++
++/* Uniform flags. */
++typedef enum _gceUNIFORM_FLAGS
++{
++ gcvUNIFORM_KERNEL_ARG = 0x01,
++ gcvUNIFORM_KERNEL_ARG_LOCAL = 0x02,
++ gcvUNIFORM_KERNEL_ARG_SAMPLER = 0x04,
++ gcvUNIFORM_LOCAL_ADDRESS_SPACE = 0x08,
++ gcvUNIFORM_PRIVATE_ADDRESS_SPACE = 0x10,
++ gcvUNIFORM_CONSTANT_ADDRESS_SPACE = 0x20,
++ gcvUNIFORM_GLOBAL_SIZE = 0x40,
++ gcvUNIFORM_LOCAL_SIZE = 0x80,
++ gcvUNIFORM_NUM_GROUPS = 0x100,
++ gcvUNIFORM_GLOBAL_OFFSET = 0x200,
++ gcvUNIFORM_WORK_DIM = 0x400,
++ gcvUNIFORM_KERNEL_ARG_CONSTANT = 0x800,
++ gcvUNIFORM_KERNEL_ARG_LOCAL_MEM_SIZE = 0x1000,
++ gcvUNIFORM_KERNEL_ARG_PRIVATE = 0x2000,
++ gcvUNIFORM_LOADTIME_CONSTANT = 0x4000,
++ gcvUNIFORM_IS_ARRAY = 0x8000,
++}
++gceUNIFORM_FLAGS;
++
++#define gcdUNIFORM_KERNEL_ARG_MASK (gcvUNIFORM_KERNEL_ARG | \
++ gcvUNIFORM_KERNEL_ARG_LOCAL | \
++ gcvUNIFORM_KERNEL_ARG_SAMPLER | \
++ gcvUNIFORM_KERNEL_ARG_PRIVATE | \
++ gcvUNIFORM_KERNEL_ARG_CONSTANT)
++
++typedef enum _gceVARIABLE_UPDATE_FLAGS
++{
++ gcvVARIABLE_UPDATE_NOUPDATE = 0,
++ gcvVARIABLE_UPDATE_TEMPREG,
++ gcvVARIABLE_UPDATE_TYPE_QUALIFIER,
++}gceVARIABLE_UPDATE_FLAGS;
++
++typedef struct _gcMACHINE_INST
++{
++ gctUINT state0;
++ gctUINT state1;
++ gctUINT state2;
++ gctUINT state3;
++}gcMACHINE_INST, *gcMACHINE_INST_PTR;
++
++typedef struct _gcMACHINECODE
++{
++ gcMACHINE_INST_PTR pCode; /* machine code */
++ gctUINT instCount; /* 128-bit count */
++ gctUINT maxConstRegNo;
++ gctUINT maxTempRegNo;
++ gctUINT endPCOfMainRoutine;
++}gcMACHINECODE, *gcMACHINECODE_PTR;
++
++typedef enum NP2_ADDRESS_MODE
++{
++ NP2_ADDRESS_MODE_CLAMP = 0,
++ NP2_ADDRESS_MODE_REPEAT = 1,
++ NP2_ADDRESS_MODE_MIRROR = 2
++}NP2_ADDRESS_MODE;
++
++typedef struct _gcNPOT_PATCH_PARAM
++{
++ gctINT samplerSlot;
++ NP2_ADDRESS_MODE addressMode[3];
++ gctINT texDimension; /* 2 or 3 */
++}gcNPOT_PATCH_PARAM, *gcNPOT_PATCH_PARAM_PTR;
++
++typedef struct _gcZBIAS_PATCH_PARAM
++{
++ /* Driver uses this to program uniform that designating zbias */
++ gctINT uniformAddr;
++ gctINT channel;
++}gcZBIAS_PATCH_PARAM, *gcZBIAS_PATCH_PARAM_PTR;
++
++void
++gcGetOptionFromEnv(
++ IN OUT gcOPTIMIZER_OPTION * Option
++ );
++
++void
++gcSetOptimizerOption(
++ IN gceSHADER_FLAGS Flags
++ );
++
++gcOPTIMIZER_OPTION *
++gcGetOptimizerOption();
++
++/*******************************************************************************
++** gcSHADER_SetCompilerVersion
++**
++** Set the compiler version of a gcSHADER object.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to gcSHADER object
++**
++** gctINT *Version
++** Pointer to a two word version
++*/
++gceSTATUS
++gcSHADER_SetCompilerVersion(
++ IN gcSHADER Shader,
++ IN gctUINT32 *Version
++ );
++
++/*******************************************************************************
++** gcSHADER_GetCompilerVersion
++**
++** Get the compiler version of a gcSHADER object.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** OUTPUT:
++**
++** gctUINT32_PTR *CompilerVersion.
++** Pointer to holder of returned compilerVersion pointer
++*/
++gceSTATUS
++gcSHADER_GetCompilerVersion(
++ IN gcSHADER Shader,
++ OUT gctUINT32_PTR *CompilerVersion
++ );
++
++/*******************************************************************************
++** gcSHADER_GetType
++**
++** Get the gcSHADER object's type.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** OUTPUT:
++**
++** gctINT *Type.
++** Pointer to return shader type.
++*/
++gceSTATUS
++gcSHADER_GetType(
++ IN gcSHADER Shader,
++ OUT gctINT *Type
++ );
++
++gctUINT
++gcSHADER_NextId();
++/*******************************************************************************
++** gcSHADER_Construct
++********************************************************************************
++**
++** Construct a new gcSHADER object.
++**
++** INPUT:
++**
++** gcoOS Hal
++** Pointer to an gcoHAL object.
++**
++** gctINT ShaderType
++** Type of gcSHADER object to cerate. 'ShaderType' can be one of the
++** following:
++**
++** gcSHADER_TYPE_VERTEX Vertex shader.
++** gcSHADER_TYPE_FRAGMENT Fragment shader.
++**
++** OUTPUT:
++**
++** gcSHADER * Shader
++** Pointer to a variable receiving the gcSHADER object pointer.
++*/
++gceSTATUS
++gcSHADER_Construct(
++ IN gcoHAL Hal,
++ IN gctINT ShaderType,
++ OUT gcSHADER * Shader
++ );
++
++/*******************************************************************************
++** gcSHADER_Destroy
++********************************************************************************
++**
++** Destroy a gcSHADER object.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gcSHADER_Destroy(
++ IN gcSHADER Shader
++ );
++
++/*******************************************************************************
++** gcSHADER_Copy
++********************************************************************************
++**
++** Copy a gcSHADER object.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gcSHADER Source
++** Pointer to a gcSHADER object that will be copied.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gcSHADER_Copy(
++ IN gcSHADER Shader,
++ IN gcSHADER Source
++ );
++
++/*******************************************************************************
++** gcSHADER_LoadHeader
++**
++** Load a gcSHADER object from a binary buffer. The binary buffer is layed out
++** as follows:
++** // Six word header
++** // Signature, must be 'S','H','D','R'.
++** gctINT8 signature[4];
++** gctUINT32 binFileVersion;
++** gctUINT32 compilerVersion[2];
++** gctUINT32 gcSLVersion;
++** gctUINT32 binarySize;
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++** Shader type will be returned if type in shader object is not gcSHADER_TYPE_PRECOMPILED
++**
++** gctPOINTER Buffer
++** Pointer to a binary buffer containing the shader data to load.
++**
++** gctSIZE_T BufferSize
++** Number of bytes inside the binary buffer pointed to by 'Buffer'.
++**
++** OUTPUT:
++** nothing
++**
++*/
++gceSTATUS
++gcSHADER_LoadHeader(
++ IN gcSHADER Shader,
++ IN gctPOINTER Buffer,
++ IN gctSIZE_T BufferSize,
++ OUT gctUINT32 * ShaderVersion
++ );
++
++/*******************************************************************************
++** gcSHADER_LoadKernel
++**
++** Load a kernel function given by name into gcSHADER object
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gctSTRING KernelName
++** Pointer to a kernel function name
++**
++** OUTPUT:
++** nothing
++**
++*/
++gceSTATUS
++gcSHADER_LoadKernel(
++ IN gcSHADER Shader,
++ IN gctSTRING KernelName
++ );
++
++/*******************************************************************************
++** gcSHADER_Load
++********************************************************************************
++**
++** Load a gcSHADER object from a binary buffer.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gctPOINTER Buffer
++** Pointer to a binary buffer containg the shader data to load.
++**
++** gctSIZE_T BufferSize
++** Number of bytes inside the binary buffer pointed to by 'Buffer'.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gcSHADER_Load(
++ IN gcSHADER Shader,
++ IN gctPOINTER Buffer,
++ IN gctSIZE_T BufferSize
++ );
++
++/*******************************************************************************
++** gcSHADER_Save
++********************************************************************************
++**
++** Save a gcSHADER object to a binary buffer.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gctPOINTER Buffer
++** Pointer to a binary buffer to be used as storage for the gcSHADER
++** object. If 'Buffer' is gcvNULL, the gcSHADER object will not be saved,
++** but the number of bytes required to hold the binary output for the
++** gcSHADER object will be returned.
++**
++** gctSIZE_T * BufferSize
++** Pointer to a variable holding the number of bytes allocated in
++** 'Buffer'. Only valid if 'Buffer' is not gcvNULL.
++**
++** OUTPUT:
++**
++** gctSIZE_T * BufferSize
++** Pointer to a variable receiving the number of bytes required to hold
++** the binary form of the gcSHADER object.
++*/
++gceSTATUS
++gcSHADER_Save(
++ IN gcSHADER Shader,
++ IN gctPOINTER Buffer,
++ IN OUT gctSIZE_T * BufferSize
++ );
++
++/*******************************************************************************
++** gcSHADER_LoadEx
++********************************************************************************
++**
++** Load a gcSHADER object from a binary buffer.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gctPOINTER Buffer
++** Pointer to a binary buffer containg the shader data to load.
++**
++** gctSIZE_T BufferSize
++** Number of bytes inside the binary buffer pointed to by 'Buffer'.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gcSHADER_LoadEx(
++ IN gcSHADER Shader,
++ IN gctPOINTER Buffer,
++ IN gctSIZE_T BufferSize
++ );
++
++/*******************************************************************************
++** gcSHADER_SaveEx
++********************************************************************************
++**
++** Save a gcSHADER object to a binary buffer.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gctPOINTER Buffer
++** Pointer to a binary buffer to be used as storage for the gcSHADER
++** object. If 'Buffer' is gcvNULL, the gcSHADER object will not be saved,
++** but the number of bytes required to hold the binary output for the
++** gcSHADER object will be returned.
++**
++** gctSIZE_T * BufferSize
++** Pointer to a variable holding the number of bytes allocated in
++** 'Buffer'. Only valid if 'Buffer' is not gcvNULL.
++**
++** OUTPUT:
++**
++** gctSIZE_T * BufferSize
++** Pointer to a variable receiving the number of bytes required to hold
++** the binary form of the gcSHADER object.
++*/
++gceSTATUS
++gcSHADER_SaveEx(
++ IN gcSHADER Shader,
++ IN gctPOINTER Buffer,
++ IN OUT gctSIZE_T * BufferSize
++ );
++
++/*******************************************************************************
++** gcSHADER_ReallocateAttributes
++**
++** Reallocate an array of pointers to gcATTRIBUTE objects.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gctSIZE_T Count
++** Array count to reallocate. 'Count' must be at least 1.
++*/
++gceSTATUS
++gcSHADER_ReallocateAttributes(
++ IN gcSHADER Shader,
++ IN gctSIZE_T Count
++ );
++
++/*******************************************************************************
++** gcSHADER_AddAttribute
++********************************************************************************
++**
++** Add an attribute to a gcSHADER object.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gctCONST_STRING Name
++** Name of the attribute to add.
++**
++** gcSHADER_TYPE Type
++** Type of the attribute to add.
++**
++** gctSIZE_T Length
++** Array length of the attribute to add. 'Length' must be at least 1.
++**
++** gctBOOL IsTexture
++** gcvTRUE if the attribute is used as a texture coordinate, gcvFALSE if not.
++**
++** OUTPUT:
++**
++** gcATTRIBUTE * Attribute
++** Pointer to a variable receiving the gcATTRIBUTE object pointer.
++*/
++gceSTATUS
++gcSHADER_AddAttribute(
++ IN gcSHADER Shader,
++ IN gctCONST_STRING Name,
++ IN gcSHADER_TYPE Type,
++ IN gctSIZE_T Length,
++ IN gctBOOL IsTexture,
++ OUT gcATTRIBUTE * Attribute
++ );
++
++/*******************************************************************************
++** gcSHADER_GetAttributeCount
++********************************************************************************
++**
++** Get the number of attributes for this shader.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Count
++** Pointer to a variable receiving the number of attributes.
++*/
++gceSTATUS
++gcSHADER_GetAttributeCount(
++ IN gcSHADER Shader,
++ OUT gctSIZE_T * Count
++ );
++
++/*******************************************************************************
++** gcSHADER_GetAttribute
++********************************************************************************
++**
++** Get the gcATTRIBUTE object poniter for an indexed attribute for this shader.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gctUINT Index
++** Index of the attribute to retrieve.
++**
++** OUTPUT:
++**
++** gcATTRIBUTE * Attribute
++** Pointer to a variable receiving the gcATTRIBUTE object pointer.
++*/
++gceSTATUS
++gcSHADER_GetAttribute(
++ IN gcSHADER Shader,
++ IN gctUINT Index,
++ OUT gcATTRIBUTE * Attribute
++ );
++
++/*******************************************************************************
++** gcSHADER_ReallocateUniforms
++**
++** Reallocate an array of pointers to gcUNIFORM objects.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gctSIZE_T Count
++** Array count to reallocate. 'Count' must be at least 1.
++*/
++gceSTATUS
++gcSHADER_ReallocateUniforms(
++ IN gcSHADER Shader,
++ IN gctSIZE_T Count
++ );
++
++/*******************************************************************************
++** gcSHADER_AddUniform
++********************************************************************************
++**
++** Add an uniform to a gcSHADER object.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gctCONST_STRING Name
++** Name of the uniform to add.
++**
++** gcSHADER_TYPE Type
++** Type of the uniform to add.
++**
++** gctSIZE_T Length
++** Array length of the uniform to add. 'Length' must be at least 1.
++**
++** OUTPUT:
++**
++** gcUNIFORM * Uniform
++** Pointer to a variable receiving the gcUNIFORM object pointer.
++*/
++gceSTATUS
++gcSHADER_AddUniform(
++ IN gcSHADER Shader,
++ IN gctCONST_STRING Name,
++ IN gcSHADER_TYPE Type,
++ IN gctSIZE_T Length,
++ OUT gcUNIFORM * Uniform
++ );
++
++/*******************************************************************************
++** gcSHADER_AddPreRotationUniform
++********************************************************************************
++**
++** Add an uniform to a gcSHADER object.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gctCONST_STRING Name
++** Name of the uniform to add.
++**
++** gcSHADER_TYPE Type
++** Type of the uniform to add.
++**
++** gctSIZE_T Length
++** Array length of the uniform to add. 'Length' must be at least 1.
++**
++** gctINT col
++** Which uniform.
++**
++** OUTPUT:
++**
++** gcUNIFORM * Uniform
++** Pointer to a variable receiving the gcUNIFORM object pointer.
++*/
++gceSTATUS
++gcSHADER_AddPreRotationUniform(
++ IN gcSHADER Shader,
++ IN gctCONST_STRING Name,
++ IN gcSHADER_TYPE Type,
++ IN gctSIZE_T Length,
++ IN gctINT col,
++ OUT gcUNIFORM * Uniform
++ );
++
++/*******************************************************************************
++** gcSHADER_AddUniformEx
++********************************************************************************
++**
++** Add an uniform to a gcSHADER object.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gctCONST_STRING Name
++** Name of the uniform to add.
++**
++** gcSHADER_TYPE Type
++** Type of the uniform to add.
++**
++** gcSHADER_PRECISION precision
++** Precision of the uniform to add.
++**
++** gctSIZE_T Length
++** Array length of the uniform to add. 'Length' must be at least 1.
++**
++** OUTPUT:
++**
++** gcUNIFORM * Uniform
++** Pointer to a variable receiving the gcUNIFORM object pointer.
++*/
++gceSTATUS
++gcSHADER_AddUniformEx(
++ IN gcSHADER Shader,
++ IN gctCONST_STRING Name,
++ IN gcSHADER_TYPE Type,
++ IN gcSHADER_PRECISION precision,
++ IN gctSIZE_T Length,
++ OUT gcUNIFORM * Uniform
++ );
++
++/*******************************************************************************
++** gcSHADER_AddUniformEx1
++********************************************************************************
++**
++** Add an uniform to a gcSHADER object.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gctCONST_STRING Name
++** Name of the uniform to add.
++**
++** gcSHADER_TYPE Type
++** Type of the uniform to add.
++**
++** gcSHADER_PRECISION precision
++** Precision of the uniform to add.
++**
++** gctSIZE_T Length
++** Array length of the uniform to add. 'Length' must be at least 1.
++**
++** gcSHADER_VAR_CATEGORY varCategory
++** Variable category, normal or struct.
++**
++** gctUINT16 numStructureElement
++** If struct, its element number.
++**
++** gctINT16 parent
++** If struct, parent index in gcSHADER.variables.
++**
++** gctINT16 prevSibling
++** If struct, previous sibling index in gcSHADER.variables.
++**
++** OUTPUT:
++**
++** gcUNIFORM * Uniform
++** Pointer to a variable receiving the gcUNIFORM object pointer.
++**
++** gctINT16* ThisUniformIndex
++** Returned value about uniform index in gcSHADER.
++*/
++gceSTATUS
++gcSHADER_AddUniformEx1(
++ IN gcSHADER Shader,
++ IN gctCONST_STRING Name,
++ IN gcSHADER_TYPE Type,
++ IN gcSHADER_PRECISION precision,
++ IN gctSIZE_T Length,
++ IN gctINT IsArray,
++ IN gcSHADER_VAR_CATEGORY varCategory,
++ IN gctUINT16 numStructureElement,
++ IN gctINT16 parent,
++ IN gctINT16 prevSibling,
++ OUT gctINT16* ThisUniformIndex,
++ OUT gcUNIFORM * Uniform
++ );
++
++/*******************************************************************************
++** gcSHADER_GetUniformCount
++********************************************************************************
++**
++** Get the number of uniforms for this shader.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Count
++** Pointer to a variable receiving the number of uniforms.
++*/
++gceSTATUS
++gcSHADER_GetUniformCount(
++ IN gcSHADER Shader,
++ OUT gctSIZE_T * Count
++ );
++
++/*******************************************************************************
++** gcSHADER_GetPreRotationUniform
++********************************************************************************
++**
++** Get the preRotate Uniform.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** OUTPUT:
++**
++** gcUNIFORM ** pUniform
++** Pointer to a preRotation uniforms array.
++*/
++gceSTATUS
++gcSHADER_GetPreRotationUniform(
++ IN gcSHADER Shader,
++ OUT gcUNIFORM ** pUniform
++ );
++
++/*******************************************************************************
++** gcSHADER_GetUniform
++********************************************************************************
++**
++** Get the gcUNIFORM object pointer for an indexed uniform for this shader.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gctUINT Index
++** Index of the uniform to retrieve.
++**
++** OUTPUT:
++**
++** gcUNIFORM * Uniform
++** Pointer to a variable receiving the gcUNIFORM object pointer.
++*/
++gceSTATUS
++gcSHADER_GetUniform(
++ IN gcSHADER Shader,
++ IN gctUINT Index,
++ OUT gcUNIFORM * Uniform
++ );
++
++
++/*******************************************************************************
++** gcSHADER_GetUniformIndexingRange
++********************************************************************************
++**
++** Get the gcUNIFORM object pointer for an indexed uniform for this shader.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gctINT uniformIndex
++** Index of the start uniform.
++**
++** gctINT offset
++** Offset to indexing.
++**
++** OUTPUT:
++**
++** gctINT * LastUniformIndex
++** Pointer to index of last uniform in indexing range.
++**
++** gctINT * OffsetUniformIndex
++** Pointer to index of uniform that indexing at offset.
++**
++** gctINT * DeviationInOffsetUniform
++** Pointer to offset in uniform picked up.
++*/
++gceSTATUS
++gcSHADER_GetUniformIndexingRange(
++ IN gcSHADER Shader,
++ IN gctINT uniformIndex,
++ IN gctINT offset,
++ OUT gctINT * LastUniformIndex,
++ OUT gctINT * OffsetUniformIndex,
++ OUT gctINT * DeviationInOffsetUniform
++ );
++
++/*******************************************************************************
++** gcSHADER_GetKernelFucntion
++**
++** Get the gcKERNEL_FUNCTION object pointer for an indexed kernel function for this shader.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gctUINT Index
++** Index of kernel function to retreive the name for.
++**
++** OUTPUT:
++**
++** gcKERNEL_FUNCTION * KernelFunction
++** Pointer to a variable receiving the gcKERNEL_FUNCTION object pointer.
++*/
++gceSTATUS
++gcSHADER_GetKernelFunction(
++ IN gcSHADER Shader,
++ IN gctUINT Index,
++ OUT gcKERNEL_FUNCTION * KernelFunction
++ );
++
++gceSTATUS
++gcSHADER_GetKernelFunctionByName(
++ IN gcSHADER Shader,
++ IN gctSTRING KernelName,
++ OUT gcKERNEL_FUNCTION * KernelFunction
++ );
++/*******************************************************************************
++** gcSHADER_GetKernelFunctionCount
++**
++** Get the number of kernel functions for this shader.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Count
++** Pointer to a variable receiving the number of kernel functions.
++*/
++gceSTATUS
++gcSHADER_GetKernelFunctionCount(
++ IN gcSHADER Shader,
++ OUT gctSIZE_T * Count
++ );
++
++/*******************************************************************************
++** gcSHADER_ReallocateOutputs
++**
++** Reallocate an array of pointers to gcOUTPUT objects.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gctSIZE_T Count
++** Array count to reallocate. 'Count' must be at least 1.
++*/
++gceSTATUS
++gcSHADER_ReallocateOutputs(
++ IN gcSHADER Shader,
++ IN gctSIZE_T Count
++ );
++
++/*******************************************************************************
++** gcSHADER_AddOutput
++********************************************************************************
++**
++** Add an output to a gcSHADER object.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gctCONST_STRING Name
++** Name of the output to add.
++**
++** gcSHADER_TYPE Type
++** Type of the output to add.
++**
++** gctSIZE_T Length
++** Array length of the output to add. 'Length' must be at least 1.
++**
++** gctUINT16 TempRegister
++** Temporary register index that holds the output value.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gcSHADER_AddOutput(
++ IN gcSHADER Shader,
++ IN gctCONST_STRING Name,
++ IN gcSHADER_TYPE Type,
++ IN gctSIZE_T Length,
++ IN gctUINT16 TempRegister
++ );
++
++gceSTATUS
++gcSHADER_AddOutputIndexed(
++ IN gcSHADER Shader,
++ IN gctCONST_STRING Name,
++ IN gctSIZE_T Index,
++ IN gctUINT16 TempIndex
++ );
++
++/*******************************************************************************
++** gcSHADER_GetOutputCount
++********************************************************************************
++**
++** Get the number of outputs for this shader.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Count
++** Pointer to a variable receiving the number of outputs.
++*/
++gceSTATUS
++gcSHADER_GetOutputCount(
++ IN gcSHADER Shader,
++ OUT gctSIZE_T * Count
++ );
++
++/*******************************************************************************
++** gcSHADER_GetOutput
++********************************************************************************
++**
++** Get the gcOUTPUT object pointer for an indexed output for this shader.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gctUINT Index
++** Index of output to retrieve.
++**
++** OUTPUT:
++**
++** gcOUTPUT * Output
++** Pointer to a variable receiving the gcOUTPUT object pointer.
++*/
++gceSTATUS
++gcSHADER_GetOutput(
++ IN gcSHADER Shader,
++ IN gctUINT Index,
++ OUT gcOUTPUT * Output
++ );
++
++
++/*******************************************************************************
++** gcSHADER_GetOutputByName
++********************************************************************************
++**
++** Get the gcOUTPUT object pointer for this shader by output name.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gctSTRING name
++** Name of output to retrieve.
++**
++** gctSIZE_T nameLength
++** Length of name to retrieve
++**
++** OUTPUT:
++**
++** gcOUTPUT * Output
++** Pointer to a variable receiving the gcOUTPUT object pointer.
++*/
++gceSTATUS
++gcSHADER_GetOutputByName(
++ IN gcSHADER Shader,
++ IN gctSTRING name,
++ IN gctSIZE_T nameLength,
++ OUT gcOUTPUT * Output
++ );
++
++/*******************************************************************************
++** gcSHADER_ReallocateVariables
++**
++** Reallocate an array of pointers to gcVARIABLE objects.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gctSIZE_T Count
++** Array count to reallocate. 'Count' must be at least 1.
++*/
++gceSTATUS
++gcSHADER_ReallocateVariables(
++ IN gcSHADER Shader,
++ IN gctSIZE_T Count
++ );
++
++/*******************************************************************************
++** gcSHADER_AddVariable
++********************************************************************************
++**
++** Add a variable to a gcSHADER object.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gctCONST_STRING Name
++** Name of the variable to add.
++**
++** gcSHADER_TYPE Type
++** Type of the variable to add.
++**
++** gctSIZE_T Length
++** Array length of the variable to add. 'Length' must be at least 1.
++**
++** gctUINT16 TempRegister
++** Temporary register index that holds the variable value.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gcSHADER_AddVariable(
++ IN gcSHADER Shader,
++ IN gctCONST_STRING Name,
++ IN gcSHADER_TYPE Type,
++ IN gctSIZE_T Length,
++ IN gctUINT16 TempRegister
++ );
++
++
++/*******************************************************************************
++** gcSHADER_AddVariableEx
++********************************************************************************
++**
++** Add a variable to a gcSHADER object.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gctCONST_STRING Name
++** Name of the variable to add.
++**
++** gcSHADER_TYPE Type
++** Type of the variable to add.
++**
++** gctSIZE_T Length
++** Array length of the variable to add. 'Length' must be at least 1.
++**
++** gctUINT16 TempRegister
++** Temporary register index that holds the variable value.
++**
++** gcSHADER_VAR_CATEGORY varCategory
++** Variable category, normal or struct.
++**
++** gctUINT16 numStructureElement
++** If struct, its element number.
++**
++** gctINT16 parent
++** If struct, parent index in gcSHADER.variables.
++**
++** gctINT16 prevSibling
++** If struct, previous sibling index in gcSHADER.variables.
++**
++** OUTPUT:
++**
++** gctINT16* ThisVarIndex
++** Returned value about variable index in gcSHADER.
++*/
++gceSTATUS
++gcSHADER_AddVariableEx(
++ IN gcSHADER Shader,
++ IN gctCONST_STRING Name,
++ IN gcSHADER_TYPE Type,
++ IN gctSIZE_T Length,
++ IN gctUINT16 TempRegister,
++ IN gcSHADER_VAR_CATEGORY varCategory,
++ IN gctUINT16 numStructureElement,
++ IN gctINT16 parent,
++ IN gctINT16 prevSibling,
++ OUT gctINT16* ThisVarIndex
++ );
++
++/*******************************************************************************
++** gcSHADER_UpdateVariable
++********************************************************************************
++**
++** Update a variable to a gcSHADER object.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gctUINT Index
++** Index of variable to retrieve.
++**
++** gceVARIABLE_UPDATE_FLAGS flag
++** Flag which property of variable will be updated.
++**
++** gctUINT newValue
++** New value to update.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gcSHADER_UpdateVariable(
++ IN gcSHADER Shader,
++ IN gctUINT Index,
++ IN gceVARIABLE_UPDATE_FLAGS flag,
++ IN gctUINT newValue
++ );
++
++/*******************************************************************************
++** gcSHADER_GetVariableCount
++********************************************************************************
++**
++** Get the number of variables for this shader.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Count
++** Pointer to a variable receiving the number of variables.
++*/
++gceSTATUS
++gcSHADER_GetVariableCount(
++ IN gcSHADER Shader,
++ OUT gctSIZE_T * Count
++ );
++
++/*******************************************************************************
++** gcSHADER_GetVariable
++********************************************************************************
++**
++** Get the gcVARIABLE object pointer for an indexed variable for this shader.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gctUINT Index
++** Index of variable to retrieve.
++**
++** OUTPUT:
++**
++** gcVARIABLE * Variable
++** Pointer to a variable receiving the gcVARIABLE object pointer.
++*/
++gceSTATUS
++gcSHADER_GetVariable(
++ IN gcSHADER Shader,
++ IN gctUINT Index,
++ OUT gcVARIABLE * Variable
++ );
++
++/*******************************************************************************
++** gcSHADER_GetVariableIndexingRange
++********************************************************************************
++**
++** Get the gcVARIABLE indexing range.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gcVARIABLE variable
++** Start variable.
++**
++** gctBOOL whole
++** Indicate whether maximum indexing range is queried
++**
++** OUTPUT:
++**
++** gctUINT *Start
++** Pointer to range start (temp register index).
++**
++** gctUINT *End
++** Pointer to range end (temp register index).
++*/
++gceSTATUS
++gcSHADER_GetVariableIndexingRange(
++ IN gcSHADER Shader,
++ IN gcVARIABLE variable,
++ IN gctBOOL whole,
++ OUT gctUINT *Start,
++ OUT gctUINT *End
++ );
++
++/*******************************************************************************
++** gcSHADER_AddOpcode
++********************************************************************************
++**
++** Add an opcode to a gcSHADER object.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gcSL_OPCODE Opcode
++** Opcode to add.
++**
++** gctUINT16 TempRegister
++** Temporary register index that acts as the target of the opcode.
++**
++** gctUINT8 Enable
++** Write enable bits for the temporary register that acts as the target
++** of the opcode.
++**
++** gcSL_FORMAT Format
++** Format of the temporary register.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gcSHADER_AddOpcode(
++ IN gcSHADER Shader,
++ IN gcSL_OPCODE Opcode,
++ IN gctUINT16 TempRegister,
++ IN gctUINT8 Enable,
++ IN gcSL_FORMAT Format
++ );
++
++gceSTATUS
++gcSHADER_AddOpcode2(
++ IN gcSHADER Shader,
++ IN gcSL_OPCODE Opcode,
++ IN gcSL_CONDITION Condition,
++ IN gctUINT16 TempRegister,
++ IN gctUINT8 Enable,
++ IN gcSL_FORMAT Format
++ );
++
++/*******************************************************************************
++** gcSHADER_AddOpcodeIndexed
++********************************************************************************
++**
++** Add an opcode to a gcSHADER object that writes to an dynamically indexed
++** target.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gcSL_OPCODE Opcode
++** Opcode to add.
++**
++** gctUINT16 TempRegister
++** Temporary register index that acts as the target of the opcode.
++**
++** gctUINT8 Enable
++** Write enable bits for the temporary register that acts as the
++** target of the opcode.
++**
++** gcSL_INDEXED Mode
++** Location of the dynamic index inside the temporary register. Valid
++** values can be:
++**
++** gcSL_INDEXED_X - Use x component of the temporary register.
++** gcSL_INDEXED_Y - Use y component of the temporary register.
++** gcSL_INDEXED_Z - Use z component of the temporary register.
++** gcSL_INDEXED_W - Use w component of the temporary register.
++**
++** gctUINT16 IndexRegister
++** Temporary register index that holds the dynamic index.
++**
++** gcSL_FORMAT Format
++** Format of the temporary register.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gcSHADER_AddOpcodeIndexed(
++ IN gcSHADER Shader,
++ IN gcSL_OPCODE Opcode,
++ IN gctUINT16 TempRegister,
++ IN gctUINT8 Enable,
++ IN gcSL_INDEXED Mode,
++ IN gctUINT16 IndexRegister,
++ IN gcSL_FORMAT Format
++ );
++
++/*******************************************************************************
++** gcSHADER_AddOpcodeConditionIndexed
++**
++** Add an opcode to a gcSHADER object that writes to an dynamically indexed
++** target.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gcSL_OPCODE Opcode
++** Opcode to add.
++**
++** gcSL_CONDITION Condition
++** Condition to check.
++**
++** gctUINT16 TempRegister
++** Temporary register index that acts as the target of the opcode.
++**
++** gctUINT8 Enable
++** Write enable bits for the temporary register that acts as the
++** target of the opcode.
++**
++** gcSL_INDEXED Indexed
++** Location of the dynamic index inside the temporary register. Valid
++** values can be:
++**
++** gcSL_INDEXED_X - Use x component of the temporary register.
++** gcSL_INDEXED_Y - Use y component of the temporary register.
++** gcSL_INDEXED_Z - Use z component of the temporary register.
++** gcSL_INDEXED_W - Use w component of the temporary register.
++**
++** gctUINT16 IndexRegister
++** Temporary register index that holds the dynamic index.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gcSHADER_AddOpcodeConditionIndexed(
++ IN gcSHADER Shader,
++ IN gcSL_OPCODE Opcode,
++ IN gcSL_CONDITION Condition,
++ IN gctUINT16 TempRegister,
++ IN gctUINT8 Enable,
++ IN gcSL_INDEXED Indexed,
++ IN gctUINT16 IndexRegister,
++ IN gcSL_FORMAT Format
++ );
++
++/*******************************************************************************
++** gcSHADER_AddOpcodeConditional
++********************************************************************************
++**
++** Add an conditional opcode to a gcSHADER object.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gcSL_OPCODE Opcode
++** Opcode to add.
++**
++** gcSL_CONDITION Condition
++** Condition that needs to evaluate to gcvTRUE in order for the opcode to
++** execute.
++**
++** gctUINT Label
++** Target label if 'Condition' evaluates to gcvTRUE.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gcSHADER_AddOpcodeConditional(
++ IN gcSHADER Shader,
++ IN gcSL_OPCODE Opcode,
++ IN gcSL_CONDITION Condition,
++ IN gctUINT Label
++ );
++
++/*******************************************************************************
++** gcSHADER_AddOpcodeConditionalFormatted
++**
++** Add an conditional jump or call opcode to a gcSHADER object.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gcSL_OPCODE Opcode
++** Opcode to add.
++**
++** gcSL_CONDITION Condition
++** Condition that needs to evaluate to gcvTRUE in order for the opcode to
++** execute.
++**
++** gcSL_FORMAT Format
++** Format of conditional operands
++**
++** gctUINT Label
++** Target label if 'Condition' evaluates to gcvTRUE.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gcSHADER_AddOpcodeConditionalFormatted(
++ IN gcSHADER Shader,
++ IN gcSL_OPCODE Opcode,
++ IN gcSL_CONDITION Condition,
++ IN gcSL_FORMAT Format,
++ IN gctUINT Label
++ );
++
++/*******************************************************************************
++** gcSHADER_AddOpcodeConditionalFormattedEnable
++**
++** Add an conditional jump or call opcode to a gcSHADER object.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gcSL_OPCODE Opcode
++** Opcode to add.
++**
++** gcSL_CONDITION Condition
++** Condition that needs to evaluate to gcvTRUE in order for the opcode to
++** execute.
++**
++** gcSL_FORMAT Format
++** Format of conditional operands
++**
++** gctUINT8 Enable
++** Write enable value for the target of the opcode.
++**
++** gctUINT Label
++** Target label if 'Condition' evaluates to gcvTRUE.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gcSHADER_AddOpcodeConditionalFormattedEnable(
++ IN gcSHADER Shader,
++ IN gcSL_OPCODE Opcode,
++ IN gcSL_CONDITION Condition,
++ IN gcSL_FORMAT Format,
++ IN gctUINT8 Enable,
++ IN gctUINT Label
++ );
++
++/*******************************************************************************
++** gcSHADER_AddLabel
++********************************************************************************
++**
++** Define a label at the current instruction of a gcSHADER object.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gctUINT Label
++** Label to define.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gcSHADER_AddLabel(
++ IN gcSHADER Shader,
++ IN gctUINT Label
++ );
++
++/*******************************************************************************
++** gcSHADER_AddSource
++********************************************************************************
++**
++** Add a source operand to a gcSHADER object.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gcSL_TYPE Type
++** Type of the source operand.
++**
++** gctUINT16 SourceIndex
++** Index of the source operand.
++**
++** gctUINT8 Swizzle
++** x, y, z, and w swizzle values packed into one 8-bit value.
++**
++** gcSL_FORMAT Format
++** Format of the source operand.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gcSHADER_AddSource(
++ IN gcSHADER Shader,
++ IN gcSL_TYPE Type,
++ IN gctUINT16 SourceIndex,
++ IN gctUINT8 Swizzle,
++ IN gcSL_FORMAT Format
++ );
++
++/*******************************************************************************
++** gcSHADER_AddSourceIndexed
++********************************************************************************
++**
++** Add a dynamically indexed source operand to a gcSHADER object.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gcSL_TYPE Type
++** Type of the source operand.
++**
++** gctUINT16 SourceIndex
++** Index of the source operand.
++**
++** gctUINT8 Swizzle
++** x, y, z, and w swizzle values packed into one 8-bit value.
++**
++** gcSL_INDEXED Mode
++** Addressing mode for the index.
++**
++** gctUINT16 IndexRegister
++** Temporary register index that holds the dynamic index.
++**
++** gcSL_FORMAT Format
++** Format of the source operand.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gcSHADER_AddSourceIndexed(
++ IN gcSHADER Shader,
++ IN gcSL_TYPE Type,
++ IN gctUINT16 SourceIndex,
++ IN gctUINT8 Swizzle,
++ IN gcSL_INDEXED Mode,
++ IN gctUINT16 IndexRegister,
++ IN gcSL_FORMAT Format
++ );
++
++/*******************************************************************************
++** gcSHADER_AddSourceAttribute
++********************************************************************************
++**
++** Add an attribute as a source operand to a gcSHADER object.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gcATTRIBUTE Attribute
++** Pointer to a gcATTRIBUTE object.
++**
++** gctUINT8 Swizzle
++** x, y, z, and w swizzle values packed into one 8-bit value.
++**
++** gctINT Index
++** Static index into the attribute in case the attribute is a matrix
++** or array.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gcSHADER_AddSourceAttribute(
++ IN gcSHADER Shader,
++ IN gcATTRIBUTE Attribute,
++ IN gctUINT8 Swizzle,
++ IN gctINT Index
++ );
++
++/*******************************************************************************
++** gcSHADER_AddSourceAttributeIndexed
++********************************************************************************
++**
++** Add an indexed attribute as a source operand to a gcSHADER object.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gcATTRIBUTE Attribute
++** Pointer to a gcATTRIBUTE object.
++**
++** gctUINT8 Swizzle
++** x, y, z, and w swizzle values packed into one 8-bit value.
++**
++** gctINT Index
++** Static index into the attribute in case the attribute is a matrix
++** or array.
++**
++** gcSL_INDEXED Mode
++** Addressing mode of the dynamic index.
++**
++** gctUINT16 IndexRegister
++** Temporary register index that holds the dynamic index.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gcSHADER_AddSourceAttributeIndexed(
++ IN gcSHADER Shader,
++ IN gcATTRIBUTE Attribute,
++ IN gctUINT8 Swizzle,
++ IN gctINT Index,
++ IN gcSL_INDEXED Mode,
++ IN gctUINT16 IndexRegister
++ );
++
++/*******************************************************************************
++** gcSHADER_AddSourceUniform
++********************************************************************************
++**
++** Add a uniform as a source operand to a gcSHADER object.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gcUNIFORM Uniform
++** Pointer to a gcUNIFORM object.
++**
++** gctUINT8 Swizzle
++** x, y, z, and w swizzle values packed into one 8-bit value.
++**
++** gctINT Index
++** Static index into the uniform in case the uniform is a matrix or
++** array.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gcSHADER_AddSourceUniform(
++ IN gcSHADER Shader,
++ IN gcUNIFORM Uniform,
++ IN gctUINT8 Swizzle,
++ IN gctINT Index
++ );
++
++/*******************************************************************************
++** gcSHADER_AddSourceUniformIndexed
++********************************************************************************
++**
++** Add an indexed uniform as a source operand to a gcSHADER object.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gcUNIFORM Uniform
++** Pointer to a gcUNIFORM object.
++**
++** gctUINT8 Swizzle
++** x, y, z, and w swizzle values packed into one 8-bit value.
++**
++** gctINT Index
++** Static index into the uniform in case the uniform is a matrix or
++** array.
++**
++** gcSL_INDEXED Mode
++** Addressing mode of the dynamic index.
++**
++** gctUINT16 IndexRegister
++** Temporary register index that holds the dynamic index.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gcSHADER_AddSourceUniformIndexed(
++ IN gcSHADER Shader,
++ IN gcUNIFORM Uniform,
++ IN gctUINT8 Swizzle,
++ IN gctINT Index,
++ IN gcSL_INDEXED Mode,
++ IN gctUINT16 IndexRegister
++ );
++
++gceSTATUS
++gcSHADER_AddSourceSamplerIndexed(
++ IN gcSHADER Shader,
++ IN gctUINT8 Swizzle,
++ IN gcSL_INDEXED Mode,
++ IN gctUINT16 IndexRegister
++ );
++
++gceSTATUS
++gcSHADER_AddSourceAttributeFormatted(
++ IN gcSHADER Shader,
++ IN gcATTRIBUTE Attribute,
++ IN gctUINT8 Swizzle,
++ IN gctINT Index,
++ IN gcSL_FORMAT Format
++ );
++
++gceSTATUS
++gcSHADER_AddSourceAttributeIndexedFormatted(
++ IN gcSHADER Shader,
++ IN gcATTRIBUTE Attribute,
++ IN gctUINT8 Swizzle,
++ IN gctINT Index,
++ IN gcSL_INDEXED Mode,
++ IN gctUINT16 IndexRegister,
++ IN gcSL_FORMAT Format
++ );
++
++gceSTATUS
++gcSHADER_AddSourceUniformFormatted(
++ IN gcSHADER Shader,
++ IN gcUNIFORM Uniform,
++ IN gctUINT8 Swizzle,
++ IN gctINT Index,
++ IN gcSL_FORMAT Format
++ );
++
++gceSTATUS
++gcSHADER_AddSourceUniformIndexedFormatted(
++ IN gcSHADER Shader,
++ IN gcUNIFORM Uniform,
++ IN gctUINT8 Swizzle,
++ IN gctINT Index,
++ IN gcSL_INDEXED Mode,
++ IN gctUINT16 IndexRegister,
++ IN gcSL_FORMAT Format
++ );
++
++gceSTATUS
++gcSHADER_AddSourceSamplerIndexedFormatted(
++ IN gcSHADER Shader,
++ IN gctUINT8 Swizzle,
++ IN gcSL_INDEXED Mode,
++ IN gctUINT16 IndexRegister,
++ IN gcSL_FORMAT Format
++ );
++
++/*******************************************************************************
++** gcSHADER_AddSourceConstant
++********************************************************************************
++**
++** Add a constant floating point value as a source operand to a gcSHADER
++** object.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gctFLOAT Constant
++** Floating point constant.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gcSHADER_AddSourceConstant(
++ IN gcSHADER Shader,
++ IN gctFLOAT Constant
++ );
++
++/*******************************************************************************
++** gcSHADER_AddSourceConstantFormatted
++********************************************************************************
++**
++** Add a constant value as a source operand to a gcSHADER
++** object.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** void * Constant
++** Pointer to constant.
++**
++** gcSL_FORMAT Format
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gcSHADER_AddSourceConstantFormatted(
++ IN gcSHADER Shader,
++ IN void *Constant,
++ IN gcSL_FORMAT Format
++ );
++
++/*******************************************************************************
++** gcSHADER_Pack
++********************************************************************************
++**
++** Pack a dynamically created gcSHADER object by trimming the allocated arrays
++** and resolving all the labeling.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gcSHADER_Pack(
++ IN gcSHADER Shader
++ );
++
++/*******************************************************************************
++** gcSHADER_SetOptimizationOption
++********************************************************************************
++**
++** Set optimization option of a gcSHADER object.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gctUINT OptimizationOption
++** Optimization option. Can be one of the following:
++**
++** 0 - No optimization.
++** 1 - Full optimization.
++** Other value - For optimizer testing.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gcSHADER_SetOptimizationOption(
++ IN gcSHADER Shader,
++ IN gctUINT OptimizationOption
++ );
++
++/*******************************************************************************
++** gcSHADER_ReallocateFunctions
++**
++** Reallocate an array of pointers to gcFUNCTION objects.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gctSIZE_T Count
++** Array count to reallocate. 'Count' must be at least 1.
++*/
++gceSTATUS
++gcSHADER_ReallocateFunctions(
++ IN gcSHADER Shader,
++ IN gctSIZE_T Count
++ );
++
++gceSTATUS
++gcSHADER_AddFunction(
++ IN gcSHADER Shader,
++ IN gctCONST_STRING Name,
++ OUT gcFUNCTION * Function
++ );
++
++gceSTATUS
++gcSHADER_ReallocateKernelFunctions(
++ IN gcSHADER Shader,
++ IN gctSIZE_T Count
++ );
++
++gceSTATUS
++gcSHADER_AddKernelFunction(
++ IN gcSHADER Shader,
++ IN gctCONST_STRING Name,
++ OUT gcKERNEL_FUNCTION * KernelFunction
++ );
++
++gceSTATUS
++gcSHADER_BeginFunction(
++ IN gcSHADER Shader,
++ IN gcFUNCTION Function
++ );
++
++gceSTATUS
++gcSHADER_EndFunction(
++ IN gcSHADER Shader,
++ IN gcFUNCTION Function
++ );
++
++gceSTATUS
++gcSHADER_BeginKernelFunction(
++ IN gcSHADER Shader,
++ IN gcKERNEL_FUNCTION KernelFunction
++ );
++
++gceSTATUS
++gcSHADER_EndKernelFunction(
++ IN gcSHADER Shader,
++ IN gcKERNEL_FUNCTION KernelFunction,
++ IN gctSIZE_T LocalMemorySize
++ );
++
++gceSTATUS
++gcSHADER_SetMaxKernelFunctionArgs(
++ IN gcSHADER Shader,
++ IN gctUINT32 MaxKernelFunctionArgs
++ );
++
++/*******************************************************************************
++** gcSHADER_SetConstantMemorySize
++**
++** Set the constant memory address space size of a gcSHADER object.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gctSIZE_T ConstantMemorySize
++** Constant memory size in bytes
++**
++** gctCHAR *ConstantMemoryBuffer
++** Constant memory buffer
++*/
++gceSTATUS
++gcSHADER_SetConstantMemorySize(
++ IN gcSHADER Shader,
++ IN gctSIZE_T ConstantMemorySize,
++ IN gctCHAR * ConstantMemoryBuffer
++ );
++
++/*******************************************************************************
++** gcSHADER_GetConstantMemorySize
++**
++** Set the constant memory address space size of a gcSHADER object.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** OUTPUT:
++**
++** gctSIZE_T * ConstantMemorySize
++** Pointer to a variable receiving constant memory size in bytes
++**
++** gctCHAR **ConstantMemoryBuffer.
++** Pointer to a variable for returned shader constant memory buffer.
++*/
++gceSTATUS
++gcSHADER_GetConstantMemorySize(
++ IN gcSHADER Shader,
++ OUT gctSIZE_T * ConstantMemorySize,
++ OUT gctCHAR ** ConstantMemoryBuffer
++ );
++
++/*******************************************************************************
++** gcSHADER_SetPrivateMemorySize
++**
++** Set the private memory address space size of a gcSHADER object.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gctSIZE_T PrivateMemorySize
++** Private memory size in bytes
++*/
++gceSTATUS
++gcSHADER_SetPrivateMemorySize(
++ IN gcSHADER Shader,
++ IN gctSIZE_T PrivateMemorySize
++ );
++
++/*******************************************************************************
++** gcSHADER_GetPrivateMemorySize
++**
++** Set the private memory address space size of a gcSHADER object.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** OUTPUT:
++**
++** gctSIZE_T * PrivateMemorySize
++** Pointer to a variable receiving private memory size in bytes
++*/
++gceSTATUS
++gcSHADER_GetPrivateMemorySize(
++ IN gcSHADER Shader,
++ OUT gctSIZE_T * PrivateMemorySize
++ );
++
++/*******************************************************************************
++** gcSHADER_SetLocalMemorySize
++**
++** Set the local memory address space size of a gcSHADER object.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gctSIZE_T LocalMemorySize
++** Local memory size in bytes
++*/
++gceSTATUS
++gcSHADER_SetLocalMemorySize(
++ IN gcSHADER Shader,
++ IN gctSIZE_T LocalMemorySize
++ );
++
++/*******************************************************************************
++** gcSHADER_GetLocalMemorySize
++**
++** Set the local memory address space size of a gcSHADER object.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** OUTPUT:
++**
++** gctSIZE_T * LocalMemorySize
++** Pointer to a variable receiving lcoal memory size in bytes
++*/
++gceSTATUS
++gcSHADER_GetLocalMemorySize(
++ IN gcSHADER Shader,
++ OUT gctSIZE_T * LocalMemorySize
++ );
++
++
++/*******************************************************************************
++** gcSHADER_CheckValidity
++**
++** Check validity for a gcSHADER object.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++*/
++gceSTATUS
++gcSHADER_CheckValidity(
++ IN gcSHADER Shader
++ );
++
++#if gcdUSE_WCLIP_PATCH
++gceSTATUS
++gcATTRIBUTE_IsPosition(
++ IN gcATTRIBUTE Attribute,
++ OUT gctBOOL * IsPosition
++ );
++#endif
++
++/*******************************************************************************
++** gcATTRIBUTE_GetType
++********************************************************************************
++**
++** Get the type and array length of a gcATTRIBUTE object.
++**
++** INPUT:
++**
++** gcATTRIBUTE Attribute
++** Pointer to a gcATTRIBUTE object.
++**
++** OUTPUT:
++**
++** gcSHADER_TYPE * Type
++** Pointer to a variable receiving the type of the attribute. 'Type'
++** can be gcvNULL, in which case no type will be returned.
++**
++** gctSIZE_T * ArrayLength
++** Pointer to a variable receiving the length of the array if the
++** attribute was declared as an array. If the attribute was not
++** declared as an array, the array length will be 1. 'ArrayLength' can
++** be gcvNULL, in which case no array length will be returned.
++*/
++gceSTATUS
++gcATTRIBUTE_GetType(
++ IN gcATTRIBUTE Attribute,
++ OUT gcSHADER_TYPE * Type,
++ OUT gctSIZE_T * ArrayLength
++ );
++
++/*******************************************************************************
++** gcATTRIBUTE_GetName
++********************************************************************************
++**
++** Get the name of a gcATTRIBUTE object.
++**
++** INPUT:
++**
++** gcATTRIBUTE Attribute
++** Pointer to a gcATTRIBUTE object.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Length
++** Pointer to a variable receiving the length of the attribute name.
++** 'Length' can be gcvNULL, in which case no length will be returned.
++**
++** gctCONST_STRING * Name
++** Pointer to a variable receiving the pointer to the attribute name.
++** 'Name' can be gcvNULL, in which case no name will be returned.
++*/
++gceSTATUS
++gcATTRIBUTE_GetName(
++ IN gcATTRIBUTE Attribute,
++ OUT gctSIZE_T * Length,
++ OUT gctCONST_STRING * Name
++ );
++
++/*******************************************************************************
++** gcATTRIBUTE_IsEnabled
++********************************************************************************
++**
++** Query the enabled state of a gcATTRIBUTE object.
++**
++** INPUT:
++**
++** gcATTRIBUTE Attribute
++** Pointer to a gcATTRIBUTE object.
++**
++** OUTPUT:
++**
++** gctBOOL * Enabled
++** Pointer to a variable receiving the enabled state of the attribute.
++*/
++gceSTATUS
++gcATTRIBUTE_IsEnabled(
++ IN gcATTRIBUTE Attribute,
++ OUT gctBOOL * Enabled
++ );
++
++/*******************************************************************************
++** gcUNIFORM_GetType
++********************************************************************************
++**
++** Get the type and array length of a gcUNIFORM object.
++**
++** INPUT:
++**
++** gcUNIFORM Uniform
++** Pointer to a gcUNIFORM object.
++**
++** OUTPUT:
++**
++** gcSHADER_TYPE * Type
++** Pointer to a variable receiving the type of the uniform. 'Type' can
++** be gcvNULL, in which case no type will be returned.
++**
++** gctSIZE_T * ArrayLength
++** Pointer to a variable receiving the length of the array if the
++** uniform was declared as an array. If the uniform was not declared
++** as an array, the array length will be 1. 'ArrayLength' can be gcvNULL,
++** in which case no array length will be returned.
++*/
++gceSTATUS
++gcUNIFORM_GetType(
++ IN gcUNIFORM Uniform,
++ OUT gcSHADER_TYPE * Type,
++ OUT gctSIZE_T * ArrayLength
++ );
++
++/*******************************************************************************
++** gcUNIFORM_GetTypeEx
++********************************************************************************
++**
++** Get the type and array length of a gcUNIFORM object.
++**
++** INPUT:
++**
++** gcUNIFORM Uniform
++** Pointer to a gcUNIFORM object.
++**
++** OUTPUT:
++**
++** gcSHADER_TYPE * Type
++** Pointer to a variable receiving the type of the uniform. 'Type' can
++** be gcvNULL, in which case no type will be returned.
++**
++** gcSHADER_PRECISION * Precision
++** Pointer to a variable receiving the precision of the uniform. 'Precision' can
++** be gcvNULL, in which case no type will be returned.
++**
++** gctSIZE_T * ArrayLength
++** Pointer to a variable receiving the length of the array if the
++** uniform was declared as an array. If the uniform was not declared
++** as an array, the array length will be 1. 'ArrayLength' can be gcvNULL,
++** in which case no array length will be returned.
++*/
++gceSTATUS
++gcUNIFORM_GetTypeEx(
++ IN gcUNIFORM Uniform,
++ OUT gcSHADER_TYPE * Type,
++ OUT gcSHADER_PRECISION * Precision,
++ OUT gctSIZE_T * ArrayLength
++ );
++
++/*******************************************************************************
++** gcUNIFORM_GetFlags
++********************************************************************************
++**
++** Get the flags of a gcUNIFORM object.
++**
++** INPUT:
++**
++** gcUNIFORM Uniform
++** Pointer to a gcUNIFORM object.
++**
++** OUTPUT:
++**
++** gceUNIFORM_FLAGS * Flags
++** Pointer to a variable receiving the flags of the uniform.
++**
++*/
++gceSTATUS
++gcUNIFORM_GetFlags(
++ IN gcUNIFORM Uniform,
++ OUT gceUNIFORM_FLAGS * Flags
++ );
++
++/*******************************************************************************
++** gcUNIFORM_SetFlags
++********************************************************************************
++**
++** Set the flags of a gcUNIFORM object.
++**
++** INPUT:
++**
++** gcUNIFORM Uniform
++** Pointer to a gcUNIFORM object.
++**
++** gceUNIFORM_FLAGS Flags
++** Flags of the uniform to be set.
++**
++** OUTPUT:
++** Nothing.
++**
++*/
++gceSTATUS
++gcUNIFORM_SetFlags(
++ IN gcUNIFORM Uniform,
++ IN gceUNIFORM_FLAGS Flags
++ );
++
++/*******************************************************************************
++** gcUNIFORM_GetName
++********************************************************************************
++**
++** Get the name of a gcUNIFORM object.
++**
++** INPUT:
++**
++** gcUNIFORM Uniform
++** Pointer to a gcUNIFORM object.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Length
++** Pointer to a variable receiving the length of the uniform name.
++** 'Length' can be gcvNULL, in which case no length will be returned.
++**
++** gctCONST_STRING * Name
++** Pointer to a variable receiving the pointer to the uniform name.
++** 'Name' can be gcvNULL, in which case no name will be returned.
++*/
++gceSTATUS
++gcUNIFORM_GetName(
++ IN gcUNIFORM Uniform,
++ OUT gctSIZE_T * Length,
++ OUT gctCONST_STRING * Name
++ );
++
++/*******************************************************************************
++** gcUNIFORM_GetSampler
++********************************************************************************
++**
++** Get the physical sampler number for a sampler gcUNIFORM object.
++**
++** INPUT:
++**
++** gcUNIFORM Uniform
++** Pointer to a gcUNIFORM object.
++**
++** OUTPUT:
++**
++** gctUINT32 * Sampler
++** Pointer to a variable receiving the physical sampler.
++*/
++gceSTATUS
++gcUNIFORM_GetSampler(
++ IN gcUNIFORM Uniform,
++ OUT gctUINT32 * Sampler
++ );
++
++/*******************************************************************************
++** gcUNIFORM_GetFormat
++**
++** Get the type and array length of a gcUNIFORM object.
++**
++** INPUT:
++**
++** gcUNIFORM Uniform
++** Pointer to a gcUNIFORM object.
++**
++** OUTPUT:
++**
++** gcSL_FORMAT * Format
++** Pointer to a variable receiving the format of element of the uniform.
++** 'Type' can be gcvNULL, in which case no type will be returned.
++**
++** gctBOOL * IsPointer
++** Pointer to a variable receiving the state wheter the uniform is a pointer.
++** 'IsPointer' can be gcvNULL, in which case no array length will be returned.
++*/
++gceSTATUS
++gcUNIFORM_GetFormat(
++ IN gcUNIFORM Uniform,
++ OUT gcSL_FORMAT * Format,
++ OUT gctBOOL * IsPointer
++ );
++
++/*******************************************************************************
++** gcUNIFORM_SetFormat
++**
++** Set the format and isPointer of a uniform.
++**
++** INPUT:
++**
++** gcUNIFORM Uniform
++** Pointer to a gcUNIFORM object.
++**
++** gcSL_FORMAT Format
++** Format of element of the uniform shaderType.
++**
++** gctBOOL IsPointer
++** Wheter the uniform is a pointer.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gcUNIFORM_SetFormat(
++ IN gcUNIFORM Uniform,
++ IN gcSL_FORMAT Format,
++ IN gctBOOL IsPointer
++ );
++
++/*******************************************************************************
++** gcUNIFORM_SetValue
++********************************************************************************
++**
++** Set the value of a uniform in integer.
++**
++** INPUT:
++**
++** gcUNIFORM Uniform
++** Pointer to a gcUNIFORM object.
++**
++** gctSIZE_T Count
++** Number of entries to program if the uniform has been declared as an
++** array.
++**
++** const gctINT * Value
++** Pointer to a buffer holding the integer values for the uniform.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gcUNIFORM_SetValue(
++ IN gcUNIFORM Uniform,
++ IN gctSIZE_T Count,
++ IN const gctINT * Value
++ );
++
++/*******************************************************************************
++** gcUNIFORM_SetValueX
++********************************************************************************
++**
++** Set the value of a uniform in fixed point.
++**
++** INPUT:
++**
++** gcUNIFORM Uniform
++** Pointer to a gcUNIFORM object.
++**
++** gctSIZE_T Count
++** Number of entries to program if the uniform has been declared as an
++** array.
++**
++** const gctFIXED_POINT * Value
++** Pointer to a buffer holding the fixed point values for the uniform.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gcUNIFORM_SetValueX(
++ IN gcUNIFORM Uniform,
++ IN gctSIZE_T Count,
++ IN gctFIXED_POINT * Value
++ );
++
++/*******************************************************************************
++** gcUNIFORM_SetValueF
++********************************************************************************
++**
++** Set the value of a uniform in floating point.
++**
++** INPUT:
++**
++** gcUNIFORM Uniform
++** Pointer to a gcUNIFORM object.
++**
++** gctSIZE_T Count
++** Number of entries to program if the uniform has been declared as an
++** array.
++**
++** const gctFLOAT * Value
++** Pointer to a buffer holding the floating point values for the
++** uniform.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gcUNIFORM_SetValueF(
++ IN gcUNIFORM Uniform,
++ IN gctSIZE_T Count,
++ IN const gctFLOAT * Value
++ );
++
++/*******************************************************************************
++** gcUNIFORM_ProgramF
++**
++** Set the value of a uniform in floating point.
++**
++** INPUT:
++**
++** gctUINT32 Address
++** Address of Uniform.
++**
++** gctSIZE_T Row/Col
++**
++** const gctFLOAT * Value
++** Pointer to a buffer holding the floating point values for the
++** uniform.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gcUNIFORM_ProgramF(
++ IN gctUINT32 Address,
++ IN gctSIZE_T Row,
++ IN gctSIZE_T Col,
++ IN const gctFLOAT * Value
++ );
++
++/*******************************************************************************
++** gcUNIFORM_GetModelViewProjMatrix
++********************************************************************************
++**
++** Get the value of uniform modelViewProjMatrix ID if present.
++**
++** INPUT:
++**
++** gcUNIFORM Uniform
++** Pointer to a gcUNIFORM object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gctUINT
++gcUNIFORM_GetModelViewProjMatrix(
++ IN gcUNIFORM Uniform
++ );
++
++/*******************************************************************************
++** gcOUTPUT_GetType
++********************************************************************************
++**
++** Get the type and array length of a gcOUTPUT object.
++**
++** INPUT:
++**
++** gcOUTPUT Output
++** Pointer to a gcOUTPUT object.
++**
++** OUTPUT:
++**
++** gcSHADER_TYPE * Type
++** Pointer to a variable receiving the type of the output. 'Type' can
++** be gcvNULL, in which case no type will be returned.
++**
++** gctSIZE_T * ArrayLength
++** Pointer to a variable receiving the length of the array if the
++** output was declared as an array. If the output was not declared
++** as an array, the array length will be 1. 'ArrayLength' can be gcvNULL,
++** in which case no array length will be returned.
++*/
++gceSTATUS
++gcOUTPUT_GetType(
++ IN gcOUTPUT Output,
++ OUT gcSHADER_TYPE * Type,
++ OUT gctSIZE_T * ArrayLength
++ );
++
++/*******************************************************************************
++** gcOUTPUT_GetIndex
++********************************************************************************
++**
++** Get the index of a gcOUTPUT object.
++**
++** INPUT:
++**
++** gcOUTPUT Output
++** Pointer to a gcOUTPUT object.
++**
++** OUTPUT:
++**
++** gctUINT * Index
++** Pointer to a variable receiving the temporary register index of the
++** output. 'Index' can be gcvNULL,. in which case no index will be
++** returned.
++*/
++gceSTATUS
++gcOUTPUT_GetIndex(
++ IN gcOUTPUT Output,
++ OUT gctUINT * Index
++ );
++
++/*******************************************************************************
++** gcOUTPUT_GetName
++********************************************************************************
++**
++** Get the name of a gcOUTPUT object.
++**
++** INPUT:
++**
++** gcOUTPUT Output
++** Pointer to a gcOUTPUT object.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Length
++** Pointer to a variable receiving the length of the output name.
++** 'Length' can be gcvNULL, in which case no length will be returned.
++**
++** gctCONST_STRING * Name
++** Pointer to a variable receiving the pointer to the output name.
++** 'Name' can be gcvNULL, in which case no name will be returned.
++*/
++gceSTATUS
++gcOUTPUT_GetName(
++ IN gcOUTPUT Output,
++ OUT gctSIZE_T * Length,
++ OUT gctCONST_STRING * Name
++ );
++
++/*******************************************************************************
++*********************************************************** F U N C T I O N S **
++*******************************************************************************/
++
++/*******************************************************************************
++** gcFUNCTION_ReallocateArguments
++**
++** Reallocate an array of gcsFUNCTION_ARGUMENT objects.
++**
++** INPUT:
++**
++** gcFUNCTION Function
++** Pointer to a gcFUNCTION object.
++**
++** gctSIZE_T Count
++** Array count to reallocate. 'Count' must be at least 1.
++*/
++gceSTATUS
++gcFUNCTION_ReallocateArguments(
++ IN gcFUNCTION Function,
++ IN gctSIZE_T Count
++ );
++
++gceSTATUS
++gcFUNCTION_AddArgument(
++ IN gcFUNCTION Function,
++ IN gctUINT16 TempIndex,
++ IN gctUINT8 Enable,
++ IN gctUINT8 Qualifier
++ );
++
++gceSTATUS
++gcFUNCTION_GetArgument(
++ IN gcFUNCTION Function,
++ IN gctUINT16 Index,
++ OUT gctUINT16_PTR Temp,
++ OUT gctUINT8_PTR Enable,
++ OUT gctUINT8_PTR Swizzle
++ );
++
++gceSTATUS
++gcFUNCTION_GetLabel(
++ IN gcFUNCTION Function,
++ OUT gctUINT_PTR Label
++ );
++
++/*******************************************************************************
++************************* K E R N E L P R O P E R T Y F U N C T I O N S **
++*******************************************************************************/
++/*******************************************************************************/
++gceSTATUS
++gcKERNEL_FUNCTION_AddKernelFunctionProperties(
++ IN gcKERNEL_FUNCTION KernelFunction,
++ IN gctINT propertyType,
++ IN gctSIZE_T propertySize,
++ IN gctINT * values
++ );
++
++gceSTATUS
++gcKERNEL_FUNCTION_GetPropertyCount(
++ IN gcKERNEL_FUNCTION KernelFunction,
++ OUT gctSIZE_T * Count
++ );
++
++gceSTATUS
++gcKERNEL_FUNCTION_GetProperty(
++ IN gcKERNEL_FUNCTION KernelFunction,
++ IN gctUINT Index,
++ OUT gctSIZE_T * propertySize,
++ OUT gctINT * propertyType,
++ OUT gctINT * propertyValues
++ );
++
++
++/*******************************************************************************
++*******************************I M A G E S A M P L E R F U N C T I O N S **
++*******************************************************************************/
++/*******************************************************************************
++** gcKERNEL_FUNCTION_ReallocateImageSamplers
++**
++** Reallocate an array of pointers to image sampler pair.
++**
++** INPUT:
++**
++** gcKERNEL_FUNCTION KernelFunction
++** Pointer to a gcKERNEL_FUNCTION object.
++**
++** gctSIZE_T Count
++** Array count to reallocate. 'Count' must be at least 1.
++*/
++gceSTATUS
++gcKERNEL_FUNCTION_ReallocateImageSamplers(
++ IN gcKERNEL_FUNCTION KernelFunction,
++ IN gctSIZE_T Count
++ );
++
++gceSTATUS
++gcKERNEL_FUNCTION_AddImageSampler(
++ IN gcKERNEL_FUNCTION KernelFunction,
++ IN gctUINT8 ImageNum,
++ IN gctBOOL IsConstantSamplerType,
++ IN gctUINT32 SamplerType
++ );
++
++gceSTATUS
++gcKERNEL_FUNCTION_GetImageSamplerCount(
++ IN gcKERNEL_FUNCTION KernelFunction,
++ OUT gctSIZE_T * Count
++ );
++
++gceSTATUS
++gcKERNEL_FUNCTION_GetImageSampler(
++ IN gcKERNEL_FUNCTION KernelFunction,
++ IN gctUINT Index,
++ OUT gctUINT8 *ImageNum,
++ OUT gctBOOL *IsConstantSamplerType,
++ OUT gctUINT32 *SamplerType
++ );
++
++/*******************************************************************************
++*********************************************K E R N E L F U N C T I O N S **
++*******************************************************************************/
++
++/*******************************************************************************
++** gcKERNEL_FUNCTION_ReallocateArguments
++**
++** Reallocate an array of gcsFUNCTION_ARGUMENT objects.
++**
++** INPUT:
++**
++** gcKERNEL_FUNCTION Function
++** Pointer to a gcKERNEL_FUNCTION object.
++**
++** gctSIZE_T Count
++** Array count to reallocate. 'Count' must be at least 1.
++*/
++gceSTATUS
++gcKERNEL_FUNCTION_ReallocateArguments(
++ IN gcKERNEL_FUNCTION Function,
++ IN gctSIZE_T Count
++ );
++
++gceSTATUS
++gcKERNEL_FUNCTION_AddArgument(
++ IN gcKERNEL_FUNCTION Function,
++ IN gctUINT16 TempIndex,
++ IN gctUINT8 Enable,
++ IN gctUINT8 Qualifier
++ );
++
++gceSTATUS
++gcKERNEL_FUNCTION_GetArgument(
++ IN gcKERNEL_FUNCTION Function,
++ IN gctUINT16 Index,
++ OUT gctUINT16_PTR Temp,
++ OUT gctUINT8_PTR Enable,
++ OUT gctUINT8_PTR Swizzle
++ );
++
++gceSTATUS
++gcKERNEL_FUNCTION_GetLabel(
++ IN gcKERNEL_FUNCTION Function,
++ OUT gctUINT_PTR Label
++ );
++
++gceSTATUS
++gcKERNEL_FUNCTION_GetName(
++ IN gcKERNEL_FUNCTION KernelFunction,
++ OUT gctSIZE_T * Length,
++ OUT gctCONST_STRING * Name
++ );
++
++gceSTATUS
++gcKERNEL_FUNCTION_ReallocateUniformArguments(
++ IN gcKERNEL_FUNCTION KernelFunction,
++ IN gctSIZE_T Count
++ );
++
++gceSTATUS
++gcKERNEL_FUNCTION_AddUniformArgument(
++ IN gcKERNEL_FUNCTION KernelFunction,
++ IN gctCONST_STRING Name,
++ IN gcSHADER_TYPE Type,
++ IN gctSIZE_T Length,
++ OUT gcUNIFORM * UniformArgument
++ );
++
++gceSTATUS
++gcKERNEL_FUNCTION_GetUniformArgumentCount(
++ IN gcKERNEL_FUNCTION KernelFunction,
++ OUT gctSIZE_T * Count
++ );
++
++gceSTATUS
++gcKERNEL_FUNCTION_GetUniformArgument(
++ IN gcKERNEL_FUNCTION KernelFunction,
++ IN gctUINT Index,
++ OUT gcUNIFORM * UniformArgument
++ );
++
++gceSTATUS
++gcKERNEL_FUNCTION_SetCodeEnd(
++ IN gcKERNEL_FUNCTION KernelFunction
++ );
++
++/*******************************************************************************
++** gcCompileShader
++********************************************************************************
++**
++** Compile a shader.
++**
++** INPUT:
++**
++** gcoOS Hal
++** Pointer to an gcoHAL object.
++**
++** gctINT ShaderType
++** Shader type to compile. Can be one of the following values:
++**
++** gcSHADER_TYPE_VERTEX
++** Compile a vertex shader.
++**
++** gcSHADER_TYPE_FRAGMENT
++** Compile a fragment shader.
++**
++** gctSIZE_T SourceSize
++** Size of the source buffer in bytes.
++**
++** gctCONST_STRING Source
++** Pointer to the buffer containing the shader source code.
++**
++** OUTPUT:
++**
++** gcSHADER * Binary
++** Pointer to a variable receiving the pointer to a gcSHADER object
++** containg the compiled shader code.
++**
++** gctSTRING * Log
++** Pointer to a variable receiving a string pointer containging the
++** compile log.
++*/
++gceSTATUS
++gcCompileShader(
++ IN gcoHAL Hal,
++ IN gctINT ShaderType,
++ IN gctSIZE_T SourceSize,
++ IN gctCONST_STRING Source,
++ OUT gcSHADER * Binary,
++ OUT gctSTRING * Log
++ );
++
++/*******************************************************************************
++** gcOptimizeShader
++********************************************************************************
++**
++** Optimize a shader.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object holding information about the compiled
++** shader.
++**
++** gctFILE LogFile
++** Pointer to an open FILE object.
++*/
++gceSTATUS
++gcOptimizeShader(
++ IN gcSHADER Shader,
++ IN gctFILE LogFile
++ );
++
++/*******************************************************************************
++** gcLinkShaders
++********************************************************************************
++**
++** Link two shaders and generate a harwdare specific state buffer by compiling
++** the compiler generated code through the resource allocator and code
++** generator.
++**
++** INPUT:
++**
++** gcSHADER VertexShader
++** Pointer to a gcSHADER object holding information about the compiled
++** vertex shader.
++**
++** gcSHADER FragmentShader
++** Pointer to a gcSHADER object holding information about the compiled
++** fragment shader.
++**
++** gceSHADER_FLAGS Flags
++** Compiler flags. Can be any of the following:
++**
++** gcvSHADER_DEAD_CODE - Dead code elimination.
++** gcvSHADER_RESOURCE_USAGE - Resource usage optimizaion.
++** gcvSHADER_OPTIMIZER - Full optimization.
++** gcvSHADER_USE_GL_Z - Use OpenGL ES Z coordinate.
++** gcvSHADER_USE_GL_POSITION - Use OpenGL ES gl_Position.
++** gcvSHADER_USE_GL_FACE - Use OpenGL ES gl_FaceForward.
++**
++** OUTPUT:
++**
++** gctSIZE_T * StateBufferSize
++** Pointer to a variable receicing the number of bytes in the buffer
++** returned in 'StateBuffer'.
++**
++** gctPOINTER * StateBuffer
++** Pointer to a variable receiving a buffer pointer that contains the
++** states required to download the shaders into the hardware.
++**
++** gcsHINT_PTR * Hints
++** Pointer to a variable receiving a gcsHINT structure pointer that
++** contains information required when loading the shader states.
++*/
++gceSTATUS
++gcLinkShaders(
++ IN gcSHADER VertexShader,
++ IN gcSHADER FragmentShader,
++ IN gceSHADER_FLAGS Flags,
++ OUT gctSIZE_T * StateBufferSize,
++ OUT gctPOINTER * StateBuffer,
++ OUT gcsHINT_PTR * Hints,
++ OUT gcMACHINECODE_PTR *ppVsMachineCode,
++ OUT gcMACHINECODE_PTR *ppFsMachineCode
++ );
++
++/*******************************************************************************
++** gcLoadShaders
++********************************************************************************
++**
++** Load a pre-compiled and pre-linked shader program into the hardware.
++**
++** INPUT:
++**
++** gcoHAL Hal
++** Pointer to a gcoHAL object.
++**
++** gctSIZE_T StateBufferSize
++** The number of bytes in the 'StateBuffer'.
++**
++** gctPOINTER StateBuffer
++** Pointer to the states that make up the shader program.
++**
++** gcsHINT_PTR Hints
++** Pointer to a gcsHINT structure that contains information required
++** when loading the shader states.
++*/
++gceSTATUS
++gcLoadShaders(
++ IN gcoHAL Hal,
++ IN gctSIZE_T StateBufferSize,
++ IN gctPOINTER StateBuffer,
++ IN gcsHINT_PTR Hints
++ );
++
++gceSTATUS
++gcRecompileShaders(
++ IN gcoHAL Hal,
++ IN gcMACHINECODE_PTR pVsMachineCode,
++ IN gcMACHINECODE_PTR pPsMachineCode,
++ /*Recompile variables*/
++ IN OUT gctPOINTER *ppRecompileStateBuffer,
++ IN OUT gctSIZE_T *pRecompileStateBufferSize,
++ IN OUT gcsHINT_PTR *ppRecompileHints,
++ /* natvie state*/
++ IN gctPOINTER pNativeStateBuffer,
++ IN gctSIZE_T nativeStateBufferSize,
++ IN gcsHINT_PTR pNativeHints,
++ /* npt info */
++ IN gctUINT32 Samplers,
++ IN gctUINT32 *SamplerWrapS,
++ IN gctUINT32 *SamplerWrapT
++ );
++
++gceSTATUS
++gcRecompileDepthBias(
++ IN gcoHAL Hal,
++ IN gcMACHINECODE_PTR pVsMachineCode,
++ /*Recompile variables*/
++ IN OUT gctPOINTER *ppRecompileStateBuffer,
++ IN OUT gctSIZE_T *pRecompileStateBufferSize,
++ IN OUT gcsHINT_PTR *ppRecompileHints,
++ /* natvie state*/
++ IN gctPOINTER pNativeStateBuffer,
++ IN gctSIZE_T nativeStateBufferSize,
++ IN gcsHINT_PTR pNativeHints,
++ OUT gctINT * uniformAddr,
++ OUT gctINT * uniformChannel
++ );
++
++/*******************************************************************************
++** gcSaveProgram
++********************************************************************************
++**
++** Save pre-compiled shaders and pre-linked programs to a binary file.
++**
++** INPUT:
++**
++** gcSHADER VertexShader
++** Pointer to vertex shader object.
++**
++** gcSHADER FragmentShader
++** Pointer to fragment shader object.
++**
++** gctSIZE_T ProgramBufferSize
++** Number of bytes in 'ProgramBuffer'.
++**
++** gctPOINTER ProgramBuffer
++** Pointer to buffer containing the program states.
++**
++** gcsHINT_PTR Hints
++** Pointer to HINTS structure for program states.
++**
++** OUTPUT:
++**
++** gctPOINTER * Binary
++** Pointer to a variable receiving the binary data to be saved.
++**
++** gctSIZE_T * BinarySize
++** Pointer to a variable receiving the number of bytes inside 'Binary'.
++*/
++gceSTATUS
++gcSaveProgram(
++ IN gcSHADER VertexShader,
++ IN gcSHADER FragmentShader,
++ IN gctSIZE_T ProgramBufferSize,
++ IN gctPOINTER ProgramBuffer,
++ IN gcsHINT_PTR Hints,
++ OUT gctPOINTER * Binary,
++ OUT gctSIZE_T * BinarySize
++ );
++
++/*******************************************************************************
++** gcLoadProgram
++********************************************************************************
++**
++** Load pre-compiled shaders and pre-linked programs from a binary file.
++**
++** INPUT:
++**
++** gctPOINTER Binary
++** Pointer to the binary data loaded.
++**
++** gctSIZE_T BinarySize
++** Number of bytes in 'Binary'.
++**
++** OUTPUT:
++**
++** gcSHADER VertexShader
++** Pointer to a vertex shader object.
++**
++** gcSHADER FragmentShader
++** Pointer to a fragment shader object.
++**
++** gctSIZE_T * ProgramBufferSize
++** Pointer to a variable receicing the number of bytes in the buffer
++** returned in 'ProgramBuffer'.
++**
++** gctPOINTER * ProgramBuffer
++** Pointer to a variable receiving a buffer pointer that contains the
++** states required to download the shaders into the hardware.
++**
++** gcsHINT_PTR * Hints
++** Pointer to a variable receiving a gcsHINT structure pointer that
++** contains information required when loading the shader states.
++*/
++gceSTATUS
++gcLoadProgram(
++ IN gctPOINTER Binary,
++ IN gctSIZE_T BinarySize,
++ OUT gcSHADER VertexShader,
++ OUT gcSHADER FragmentShader,
++ OUT gctSIZE_T * ProgramBufferSize,
++ OUT gctPOINTER * ProgramBuffer,
++ OUT gcsHINT_PTR * Hints
++ );
++
++/*******************************************************************************
++** gcCompileKernel
++********************************************************************************
++**
++** Compile a OpenCL kernel shader.
++**
++** INPUT:
++**
++** gcoOS Hal
++** Pointer to an gcoHAL object.
++**
++** gctSIZE_T SourceSize
++** Size of the source buffer in bytes.
++**
++** gctCONST_STRING Source
++** Pointer to the buffer containing the shader source code.
++**
++** OUTPUT:
++**
++** gcSHADER * Binary
++** Pointer to a variable receiving the pointer to a gcSHADER object
++** containg the compiled shader code.
++**
++** gctSTRING * Log
++** Pointer to a variable receiving a string pointer containging the
++** compile log.
++*/
++gceSTATUS
++gcCompileKernel(
++ IN gcoHAL Hal,
++ IN gctSIZE_T SourceSize,
++ IN gctCONST_STRING Source,
++ IN gctCONST_STRING Options,
++ OUT gcSHADER * Binary,
++ OUT gctSTRING * Log
++ );
++
++/*******************************************************************************
++** gcLinkKernel
++********************************************************************************
++**
++** Link OpenCL kernel and generate a harwdare specific state buffer by compiling
++** the compiler generated code through the resource allocator and code
++** generator.
++**
++** INPUT:
++**
++** gcSHADER Kernel
++** Pointer to a gcSHADER object holding information about the compiled
++** OpenCL kernel.
++**
++** gceSHADER_FLAGS Flags
++** Compiler flags. Can be any of the following:
++**
++** gcvSHADER_DEAD_CODE - Dead code elimination.
++** gcvSHADER_RESOURCE_USAGE - Resource usage optimizaion.
++** gcvSHADER_OPTIMIZER - Full optimization.
++** gcvSHADER_USE_GL_Z - Use OpenGL ES Z coordinate.
++** gcvSHADER_USE_GL_POSITION - Use OpenGL ES gl_Position.
++** gcvSHADER_USE_GL_FACE - Use OpenGL ES gl_FaceForward.
++**
++** OUTPUT:
++**
++** gctSIZE_T * StateBufferSize
++** Pointer to a variable receiving the number of bytes in the buffer
++** returned in 'StateBuffer'.
++**
++** gctPOINTER * StateBuffer
++** Pointer to a variable receiving a buffer pointer that contains the
++** states required to download the shaders into the hardware.
++**
++** gcsHINT_PTR * Hints
++** Pointer to a variable receiving a gcsHINT structure pointer that
++** contains information required when loading the shader states.
++*/
++gceSTATUS
++gcLinkKernel(
++ IN gcSHADER Kernel,
++ IN gceSHADER_FLAGS Flags,
++ OUT gctSIZE_T * StateBufferSize,
++ OUT gctPOINTER * StateBuffer,
++ OUT gcsHINT_PTR * Hints
++ );
++
++/*******************************************************************************
++** gcLoadKernel
++********************************************************************************
++**
++** Load a pre-compiled and pre-linked kernel program into the hardware.
++**
++** INPUT:
++**
++** gctSIZE_T StateBufferSize
++** The number of bytes in the 'StateBuffer'.
++**
++** gctPOINTER StateBuffer
++** Pointer to the states that make up the shader program.
++**
++** gcsHINT_PTR Hints
++** Pointer to a gcsHINT structure that contains information required
++** when loading the shader states.
++*/
++gceSTATUS
++gcLoadKernel(
++ IN gctSIZE_T StateBufferSize,
++ IN gctPOINTER StateBuffer,
++ IN gcsHINT_PTR Hints
++ );
++
++gceSTATUS
++gcInvokeThreadWalker(
++ IN gcsTHREAD_WALKER_INFO_PTR Info
++ );
++
++void
++gcTYPE_GetTypeInfo(
++ IN gcSHADER_TYPE Type,
++ OUT gctINT * Components,
++ OUT gctINT * Rows,
++ OUT gctCONST_STRING * Name
++ );
++
++gctBOOL
++gcOPT_doVaryingPackingForShader(
++ IN gcSHADER Shader
++ );
++
++gceSTATUS
++gcSHADER_PatchNPOTForMachineCode(
++ IN gcSHADER_KIND shaderType,
++ IN gcMACHINECODE_PTR pMachineCode,
++ IN gcNPOT_PATCH_PARAM_PTR pPatchParam,
++ IN gctUINT countOfPatchParam,
++ IN gctUINT hwSupportedInstCount,
++ OUT gctPOINTER* ppCmdBuffer,
++ OUT gctUINT32* pByteSizeOfCmdBuffer,
++ IN OUT gcsHINT_PTR pHints /* User needs copy original hints to this one, then passed this one in */
++ );
++
++gceSTATUS
++gcSHADER_PatchZBiasForMachineCodeVS(
++ IN gcMACHINECODE_PTR pMachineCode,
++ IN OUT gcZBIAS_PATCH_PARAM_PTR pPatchParam,
++ IN gctUINT hwSupportedInstCount,
++ OUT gctPOINTER* ppCmdBuffer,
++ OUT gctUINT32* pByteSizeOfCmdBuffer,
++ IN OUT gcsHINT_PTR pHints /* User needs copy original hints to this one, then passed this one in */
++ );
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif /* VIVANTE_NO_3D */
++#endif /* __gc_hal_compiler_h_ */
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_driver.h linux-openelec/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_driver.h
+--- linux-3.14.36/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_driver.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_driver.h 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,1051 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_driver_h_
++#define __gc_hal_driver_h_
++
++#include "gc_hal_enum.h"
++#include "gc_hal_types.h"
++
++#if gcdENABLE_VG
++#include "gc_hal_driver_vg.h"
++#endif
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/******************************************************************************\
++******************************* I/O Control Codes ******************************
++\******************************************************************************/
++
++#define gcvHAL_CLASS "galcore"
++#define IOCTL_GCHAL_INTERFACE 30000
++#define IOCTL_GCHAL_KERNEL_INTERFACE 30001
++#define IOCTL_GCHAL_TERMINATE 30002
++
++/******************************************************************************\
++********************************* Command Codes ********************************
++\******************************************************************************/
++
++typedef enum _gceHAL_COMMAND_CODES
++{
++ /* Generic query. */
++ gcvHAL_QUERY_VIDEO_MEMORY,
++ gcvHAL_QUERY_CHIP_IDENTITY,
++
++ /* Contiguous memory. */
++ gcvHAL_ALLOCATE_NON_PAGED_MEMORY,
++ gcvHAL_FREE_NON_PAGED_MEMORY,
++ gcvHAL_ALLOCATE_CONTIGUOUS_MEMORY,
++ gcvHAL_FREE_CONTIGUOUS_MEMORY,
++
++ /* Video memory allocation. */
++ gcvHAL_ALLOCATE_VIDEO_MEMORY, /* Enforced alignment. */
++ gcvHAL_ALLOCATE_LINEAR_VIDEO_MEMORY, /* No alignment. */
++ gcvHAL_FREE_VIDEO_MEMORY,
++
++ /* Physical-to-logical mapping. */
++ gcvHAL_MAP_MEMORY,
++ gcvHAL_UNMAP_MEMORY,
++
++ /* Logical-to-physical mapping. */
++ gcvHAL_MAP_USER_MEMORY,
++ gcvHAL_UNMAP_USER_MEMORY,
++
++ /* Surface lock/unlock. */
++ gcvHAL_LOCK_VIDEO_MEMORY,
++ gcvHAL_UNLOCK_VIDEO_MEMORY,
++
++ /* Event queue. */
++ gcvHAL_EVENT_COMMIT,
++
++ gcvHAL_USER_SIGNAL,
++ gcvHAL_SIGNAL,
++ gcvHAL_WRITE_DATA,
++
++ gcvHAL_COMMIT,
++ gcvHAL_STALL,
++
++ gcvHAL_READ_REGISTER,
++ gcvHAL_WRITE_REGISTER,
++
++ gcvHAL_GET_PROFILE_SETTING,
++ gcvHAL_SET_PROFILE_SETTING,
++
++ gcvHAL_READ_ALL_PROFILE_REGISTERS,
++ gcvHAL_PROFILE_REGISTERS_2D,
++#if VIVANTE_PROFILER_PERDRAW
++ gcvHAL_READ_PROFILER_REGISTER_SETTING,
++#endif
++
++ /* Power management. */
++ gcvHAL_SET_POWER_MANAGEMENT_STATE,
++ gcvHAL_QUERY_POWER_MANAGEMENT_STATE,
++
++ gcvHAL_GET_BASE_ADDRESS,
++
++ gcvHAL_SET_IDLE, /* reserved */
++
++ /* Queries. */
++ gcvHAL_QUERY_KERNEL_SETTINGS,
++
++ /* Reset. */
++ gcvHAL_RESET,
++
++ /* Map physical address into handle. */
++ gcvHAL_MAP_PHYSICAL,
++
++ /* Debugger stuff. */
++ gcvHAL_DEBUG,
++
++ /* Cache stuff. */
++ gcvHAL_CACHE,
++
++ /* TimeStamp */
++ gcvHAL_TIMESTAMP,
++
++ /* Database. */
++ gcvHAL_DATABASE,
++
++ /* Version. */
++ gcvHAL_VERSION,
++
++ /* Chip info */
++ gcvHAL_CHIP_INFO,
++
++ /* Process attaching/detaching. */
++ gcvHAL_ATTACH,
++ gcvHAL_DETACH,
++
++ /* Composition. */
++ gcvHAL_COMPOSE,
++
++ /* Set timeOut value */
++ gcvHAL_SET_TIMEOUT,
++
++ /* Frame database. */
++ gcvHAL_GET_FRAME_INFO,
++
++ /* Shared info for each process */
++ gcvHAL_GET_SHARED_INFO,
++ gcvHAL_SET_SHARED_INFO,
++ gcvHAL_QUERY_COMMAND_BUFFER,
++
++ gcvHAL_COMMIT_DONE,
++
++ /* GPU and event dump */
++ gcvHAL_DUMP_GPU_STATE,
++ gcvHAL_DUMP_EVENT,
++
++ /* Virtual command buffer. */
++ gcvHAL_ALLOCATE_VIRTUAL_COMMAND_BUFFER,
++ gcvHAL_FREE_VIRTUAL_COMMAND_BUFFER,
++
++ /* FSCALE_VAL. */
++ gcvHAL_SET_FSCALE_VALUE,
++ gcvHAL_GET_FSCALE_VALUE,
++
++ /* Reset time stamp. */
++ gcvHAL_QUERY_RESET_TIME_STAMP,
++
++ /* Sync point operations. */
++ gcvHAL_SYNC_POINT,
++
++ /* Create native fence and return its fd. */
++ gcvHAL_CREATE_NATIVE_FENCE,
++
++ /* Video memory database */
++ gcvHAL_VIDMEM_DATABASE,
++}
++gceHAL_COMMAND_CODES;
++
++/******************************************************************************\
++****************************** Interface Structure *****************************
++\******************************************************************************/
++
++#define gcdMAX_PROFILE_FILE_NAME 128
++
++/* Kernel settings. */
++typedef struct _gcsKERNEL_SETTINGS
++{
++ /* Used RealTime signal between kernel and user. */
++ gctINT signal;
++}
++gcsKERNEL_SETTINGS;
++
++
++/* gcvHAL_QUERY_CHIP_IDENTITY */
++typedef struct _gcsHAL_QUERY_CHIP_IDENTITY * gcsHAL_QUERY_CHIP_IDENTITY_PTR;
++typedef struct _gcsHAL_QUERY_CHIP_IDENTITY
++{
++
++ /* Chip model. */
++ gceCHIPMODEL chipModel;
++
++ /* Revision value.*/
++ gctUINT32 chipRevision;
++
++ /* Supported feature fields. */
++ gctUINT32 chipFeatures;
++
++ /* Supported minor feature fields. */
++ gctUINT32 chipMinorFeatures;
++
++ /* Supported minor feature 1 fields. */
++ gctUINT32 chipMinorFeatures1;
++
++ /* Supported minor feature 2 fields. */
++ gctUINT32 chipMinorFeatures2;
++
++ /* Supported minor feature 3 fields. */
++ gctUINT32 chipMinorFeatures3;
++
++ /* Supported minor feature 4 fields. */
++ gctUINT32 chipMinorFeatures4;
++
++ /* Number of streams supported. */
++ gctUINT32 streamCount;
++
++ /* Total number of temporary registers per thread. */
++ gctUINT32 registerMax;
++
++ /* Maximum number of threads. */
++ gctUINT32 threadCount;
++
++ /* Number of shader cores. */
++ gctUINT32 shaderCoreCount;
++
++ /* Size of the vertex cache. */
++ gctUINT32 vertexCacheSize;
++
++ /* Number of entries in the vertex output buffer. */
++ gctUINT32 vertexOutputBufferSize;
++
++ /* Number of pixel pipes. */
++ gctUINT32 pixelPipes;
++
++ /* Number of instructions. */
++ gctUINT32 instructionCount;
++
++ /* Number of constants. */
++ gctUINT32 numConstants;
++
++ /* Buffer size */
++ gctUINT32 bufferSize;
++
++ /* Number of varyings */
++ gctUINT32 varyingsCount;
++
++ /* Supertile layout style in hardware */
++ gctUINT32 superTileMode;
++
++ /* Special control bits for 2D chip. */
++ gctUINT32 chip2DControl;
++}
++gcsHAL_QUERY_CHIP_IDENTITY;
++
++/* gcvHAL_COMPOSE. */
++typedef struct _gcsHAL_COMPOSE * gcsHAL_COMPOSE_PTR;
++typedef struct _gcsHAL_COMPOSE
++{
++ /* Composition state buffer. */
++ gctUINT64 physical;
++ gctUINT64 logical;
++ gctUINT offset;
++ gctUINT size;
++
++ /* Composition end signal. */
++ gctUINT64 process;
++ gctUINT64 signal;
++
++ /* User signals. */
++ gctUINT64 userProcess;
++ gctUINT64 userSignal1;
++ gctUINT64 userSignal2;
++
++#if defined(__QNXNTO__)
++ /* Client pulse side-channel connection ID. */
++ gctINT32 coid;
++
++ /* Set by server. */
++ gctINT32 rcvid;
++#endif
++}
++gcsHAL_COMPOSE;
++
++
++typedef struct _gcsHAL_INTERFACE
++{
++ /* Command code. */
++ gceHAL_COMMAND_CODES command;
++
++ /* Hardware type. */
++ gceHARDWARE_TYPE hardwareType;
++
++ /* Status value. */
++ gceSTATUS status;
++
++ /* Handle to this interface channel. */
++ gctUINT64 handle;
++
++ /* Pid of the client. */
++ gctUINT32 pid;
++
++ /* Union of command structures. */
++ union _u
++ {
++ /* gcvHAL_GET_BASE_ADDRESS */
++ struct _gcsHAL_GET_BASE_ADDRESS
++ {
++ /* Physical memory address of internal memory. */
++ OUT gctUINT32 baseAddress;
++ }
++ GetBaseAddress;
++
++ /* gcvHAL_QUERY_VIDEO_MEMORY */
++ struct _gcsHAL_QUERY_VIDEO_MEMORY
++ {
++ /* Physical memory address of internal memory. Just a name. */
++ OUT gctUINT32 internalPhysical;
++
++ /* Size in bytes of internal memory. */
++ OUT gctUINT64 internalSize;
++
++ /* Physical memory address of external memory. Just a name. */
++ OUT gctUINT32 externalPhysical;
++
++ /* Size in bytes of external memory.*/
++ OUT gctUINT64 externalSize;
++
++ /* Physical memory address of contiguous memory. Just a name. */
++ OUT gctUINT32 contiguousPhysical;
++
++ /* Size in bytes of contiguous memory.*/
++ OUT gctUINT64 contiguousSize;
++ }
++ QueryVideoMemory;
++
++ /* gcvHAL_QUERY_CHIP_IDENTITY */
++ gcsHAL_QUERY_CHIP_IDENTITY QueryChipIdentity;
++
++ /* gcvHAL_MAP_MEMORY */
++ struct _gcsHAL_MAP_MEMORY
++ {
++ /* Physical memory address to map. Just a name on Linux/Qnx. */
++ IN gctUINT32 physical;
++
++ /* Number of bytes in physical memory to map. */
++ IN gctUINT64 bytes;
++
++ /* Address of mapped memory. */
++ OUT gctUINT64 logical;
++ }
++ MapMemory;
++
++ /* gcvHAL_UNMAP_MEMORY */
++ struct _gcsHAL_UNMAP_MEMORY
++ {
++ /* Physical memory address to unmap. Just a name on Linux/Qnx. */
++ IN gctUINT32 physical;
++
++ /* Number of bytes in physical memory to unmap. */
++ IN gctUINT64 bytes;
++
++ /* Address of mapped memory to unmap. */
++ IN gctUINT64 logical;
++ }
++ UnmapMemory;
++
++ /* gcvHAL_ALLOCATE_LINEAR_VIDEO_MEMORY */
++ struct _gcsHAL_ALLOCATE_LINEAR_VIDEO_MEMORY
++ {
++ /* Number of bytes to allocate. */
++ IN OUT gctUINT bytes;
++
++ /* Buffer alignment. */
++ IN gctUINT alignment;
++
++ /* Type of allocation. */
++ IN gceSURF_TYPE type;
++
++ /* Memory pool to allocate from. */
++ IN OUT gcePOOL pool;
++
++ /* Allocated video memory in gcuVIDMEM_NODE. */
++ OUT gctUINT64 node;
++ }
++ AllocateLinearVideoMemory;
++
++ /* gcvHAL_ALLOCATE_VIDEO_MEMORY */
++ struct _gcsHAL_ALLOCATE_VIDEO_MEMORY
++ {
++ /* Width of rectangle to allocate. */
++ IN OUT gctUINT width;
++
++ /* Height of rectangle to allocate. */
++ IN OUT gctUINT height;
++
++ /* Depth of rectangle to allocate. */
++ IN gctUINT depth;
++
++ /* Format rectangle to allocate in gceSURF_FORMAT. */
++ IN gceSURF_FORMAT format;
++
++ /* Type of allocation. */
++ IN gceSURF_TYPE type;
++
++ /* Memory pool to allocate from. */
++ IN OUT gcePOOL pool;
++
++ /* Allocated video memory in gcuVIDMEM_NODE. */
++ OUT gctUINT64 node;
++ }
++ AllocateVideoMemory;
++
++ /* gcvHAL_FREE_VIDEO_MEMORY */
++ struct _gcsHAL_FREE_VIDEO_MEMORY
++ {
++ /* Allocated video memory in gcuVIDMEM_NODE. */
++ IN gctUINT64 node;
++
++#ifdef __QNXNTO__
++/* TODO: This is part of the unlock - why is it here? */
++ /* Mapped logical address to unmap in user space. */
++ OUT gctUINT64 memory;
++
++ /* Number of bytes to allocated. */
++ OUT gctUINT64 bytes;
++#endif
++ }
++ FreeVideoMemory;
++
++ /* gcvHAL_LOCK_VIDEO_MEMORY */
++ struct _gcsHAL_LOCK_VIDEO_MEMORY
++ {
++ /* Allocated video memory gcuVIDMEM_NODE gcuVIDMEM_NODE. */
++ IN gctUINT64 node;
++
++ /* Cache configuration. */
++ /* Only gcvPOOL_CONTIGUOUS and gcvPOOL_VIRUTAL
++ ** can be configured */
++ IN gctBOOL cacheable;
++
++ /* Hardware specific address. */
++ OUT gctUINT32 address;
++
++ /* Mapped logical address. */
++ OUT gctUINT64 memory;
++ }
++ LockVideoMemory;
++
++ /* gcvHAL_UNLOCK_VIDEO_MEMORY */
++ struct _gcsHAL_UNLOCK_VIDEO_MEMORY
++ {
++ /* Allocated video memory in gcuVIDMEM_NODE. */
++ IN gctUINT64 node;
++
++ /* Type of surface. */
++ IN gceSURF_TYPE type;
++
++ /* Flag to unlock surface asynchroneously. */
++ IN OUT gctBOOL asynchroneous;
++ }
++ UnlockVideoMemory;
++
++ /* gcvHAL_ALLOCATE_NON_PAGED_MEMORY */
++ struct _gcsHAL_ALLOCATE_NON_PAGED_MEMORY
++ {
++ /* Number of bytes to allocate. */
++ IN OUT gctUINT64 bytes;
++
++ /* Physical address of allocation. Just a name. */
++ OUT gctUINT32 physical;
++
++ /* Logical address of allocation. */
++ OUT gctUINT64 logical;
++ }
++ AllocateNonPagedMemory;
++
++ /* gcvHAL_FREE_NON_PAGED_MEMORY */
++ struct _gcsHAL_FREE_NON_PAGED_MEMORY
++ {
++ /* Number of bytes allocated. */
++ IN gctUINT64 bytes;
++
++ /* Physical address of allocation. Just a name. */
++ IN gctUINT32 physical;
++
++ /* Logical address of allocation. */
++ IN gctUINT64 logical;
++ }
++ FreeNonPagedMemory;
++
++ /* gcvHAL_ALLOCATE_NON_PAGED_MEMORY */
++ struct _gcsHAL_ALLOCATE_VIRTUAL_COMMAND_BUFFER
++ {
++ /* Number of bytes to allocate. */
++ IN OUT gctUINT64 bytes;
++
++ /* Physical address of allocation. Just a name. */
++ OUT gctUINT32 physical;
++
++ /* Logical address of allocation. */
++ OUT gctUINT64 logical;
++ }
++ AllocateVirtualCommandBuffer;
++
++ /* gcvHAL_FREE_NON_PAGED_MEMORY */
++ struct _gcsHAL_FREE_VIRTUAL_COMMAND_BUFFER
++ {
++ /* Number of bytes allocated. */
++ IN gctUINT64 bytes;
++
++ /* Physical address of allocation. Just a name. */
++ IN gctUINT32 physical;
++
++ /* Logical address of allocation. */
++ IN gctUINT64 logical;
++ }
++ FreeVirtualCommandBuffer;
++
++ /* gcvHAL_EVENT_COMMIT. */
++ struct _gcsHAL_EVENT_COMMIT
++ {
++ /* Event queue in gcsQUEUE. */
++ IN gctUINT64 queue;
++ }
++ Event;
++
++ /* gcvHAL_COMMIT */
++ struct _gcsHAL_COMMIT
++ {
++ /* Context buffer object gckCONTEXT. */
++ IN gctUINT64 context;
++
++ /* Command buffer gcoCMDBUF. */
++ IN gctUINT64 commandBuffer;
++
++ /* State delta buffer in gcsSTATE_DELTA. */
++ gctUINT64 delta;
++
++ /* Event queue in gcsQUEUE. */
++ IN gctUINT64 queue;
++ }
++ Commit;
++
++ /* gcvHAL_MAP_USER_MEMORY */
++ struct _gcsHAL_MAP_USER_MEMORY
++ {
++ /* Base address of user memory to map. */
++ IN gctUINT64 memory;
++
++ /* Physical address of user memory to map. */
++ IN gctUINT32 physical;
++
++ /* Size of user memory in bytes to map. */
++ IN gctUINT64 size;
++
++ /* Info record required by gcvHAL_UNMAP_USER_MEMORY. Just a name. */
++ OUT gctUINT32 info;
++
++ /* Physical address of mapped memory. */
++ OUT gctUINT32 address;
++ }
++ MapUserMemory;
++
++ /* gcvHAL_UNMAP_USER_MEMORY */
++ struct _gcsHAL_UNMAP_USER_MEMORY
++ {
++ /* Base address of user memory to unmap. */
++ IN gctUINT64 memory;
++
++ /* Size of user memory in bytes to unmap. */
++ IN gctUINT64 size;
++
++ /* Info record returned by gcvHAL_MAP_USER_MEMORY. Just a name. */
++ IN gctUINT32 info;
++
++ /* Physical address of mapped memory as returned by
++ gcvHAL_MAP_USER_MEMORY. */
++ IN gctUINT32 address;
++ }
++ UnmapUserMemory;
++#if !USE_NEW_LINUX_SIGNAL
++ /* gcsHAL_USER_SIGNAL */
++ struct _gcsHAL_USER_SIGNAL
++ {
++ /* Command. */
++ gceUSER_SIGNAL_COMMAND_CODES command;
++
++ /* Signal ID. */
++ IN OUT gctINT id;
++
++ /* Reset mode. */
++ IN gctBOOL manualReset;
++
++ /* Wait timedout. */
++ IN gctUINT32 wait;
++
++ /* State. */
++ IN gctBOOL state;
++ }
++ UserSignal;
++#endif
++
++ /* gcvHAL_SIGNAL. */
++ struct _gcsHAL_SIGNAL
++ {
++ /* Signal handle to signal gctSIGNAL. */
++ IN gctUINT64 signal;
++
++ /* Reserved gctSIGNAL. */
++ IN gctUINT64 auxSignal;
++
++ /* Process owning the signal gctHANDLE. */
++ IN gctUINT64 process;
++
++#if defined(__QNXNTO__)
++ /* Client pulse side-channel connection ID. Set by client in gcoOS_CreateSignal. */
++ IN gctINT32 coid;
++
++ /* Set by server. */
++ IN gctINT32 rcvid;
++#endif
++ /* Event generated from where of pipeline */
++ IN gceKERNEL_WHERE fromWhere;
++ }
++ Signal;
++
++ /* gcvHAL_WRITE_DATA. */
++ struct _gcsHAL_WRITE_DATA
++ {
++ /* Address to write data to. */
++ IN gctUINT32 address;
++
++ /* Data to write. */
++ IN gctUINT32 data;
++ }
++ WriteData;
++
++ /* gcvHAL_ALLOCATE_CONTIGUOUS_MEMORY */
++ struct _gcsHAL_ALLOCATE_CONTIGUOUS_MEMORY
++ {
++ /* Number of bytes to allocate. */
++ IN OUT gctUINT64 bytes;
++
++ /* Hardware address of allocation. */
++ OUT gctUINT32 address;
++
++ /* Physical address of allocation. Just a name. */
++ OUT gctUINT32 physical;
++
++ /* Logical address of allocation. */
++ OUT gctUINT64 logical;
++ }
++ AllocateContiguousMemory;
++
++ /* gcvHAL_FREE_CONTIGUOUS_MEMORY */
++ struct _gcsHAL_FREE_CONTIGUOUS_MEMORY
++ {
++ /* Number of bytes allocated. */
++ IN gctUINT64 bytes;
++
++ /* Physical address of allocation. Just a name. */
++ IN gctUINT32 physical;
++
++ /* Logical address of allocation. */
++ IN gctUINT64 logical;
++ }
++ FreeContiguousMemory;
++
++ /* gcvHAL_READ_REGISTER */
++ struct _gcsHAL_READ_REGISTER
++ {
++ /* Logical address of memory to write data to. */
++ IN gctUINT32 address;
++
++ /* Data read. */
++ OUT gctUINT32 data;
++ }
++ ReadRegisterData;
++
++ /* gcvHAL_WRITE_REGISTER */
++ struct _gcsHAL_WRITE_REGISTER
++ {
++ /* Logical address of memory to write data to. */
++ IN gctUINT32 address;
++
++ /* Data read. */
++ IN gctUINT32 data;
++ }
++ WriteRegisterData;
++
++#if VIVANTE_PROFILER
++ /* gcvHAL_GET_PROFILE_SETTING */
++ struct _gcsHAL_GET_PROFILE_SETTING
++ {
++ /* Enable profiling */
++ OUT gctBOOL enable;
++
++ /* The profile file name */
++ OUT gctCHAR fileName[gcdMAX_PROFILE_FILE_NAME];
++ }
++ GetProfileSetting;
++
++ /* gcvHAL_SET_PROFILE_SETTING */
++ struct _gcsHAL_SET_PROFILE_SETTING
++ {
++ /* Enable profiling */
++ IN gctBOOL enable;
++
++ /* The profile file name */
++ IN gctCHAR fileName[gcdMAX_PROFILE_FILE_NAME];
++ }
++ SetProfileSetting;
++
++#if VIVANTE_PROFILER_PERDRAW
++ /* gcvHAL_READ_PROFILER_REGISTER_SETTING */
++ struct _gcsHAL_READ_PROFILER_REGISTER_SETTING
++ {
++ /*Should Clear Register*/
++ IN gctBOOL bclear;
++ }
++ SetProfilerRegisterClear;
++#endif
++
++ /* gcvHAL_READ_ALL_PROFILE_REGISTERS */
++ struct _gcsHAL_READ_ALL_PROFILE_REGISTERS
++ {
++#if VIVANTE_PROFILER_CONTEXT
++ /* Context buffer object gckCONTEXT. Just a name. */
++ IN gctUINT32 context;
++#endif
++ /* Data read. */
++ OUT gcsPROFILER_COUNTERS counters;
++ }
++ RegisterProfileData;
++
++ /* gcvHAL_PROFILE_REGISTERS_2D */
++ struct _gcsHAL_PROFILE_REGISTERS_2D
++ {
++ /* Data read in gcs2D_PROFILE. */
++ OUT gctUINT64 hwProfile2D;
++ }
++ RegisterProfileData2D;
++#endif
++ /* Power management. */
++ /* gcvHAL_SET_POWER_MANAGEMENT_STATE */
++ struct _gcsHAL_SET_POWER_MANAGEMENT
++ {
++ /* Data read. */
++ IN gceCHIPPOWERSTATE state;
++ }
++ SetPowerManagement;
++
++ /* gcvHAL_QUERY_POWER_MANAGEMENT_STATE */
++ struct _gcsHAL_QUERY_POWER_MANAGEMENT
++ {
++ /* Data read. */
++ OUT gceCHIPPOWERSTATE state;
++
++ /* Idle query. */
++ OUT gctBOOL isIdle;
++ }
++ QueryPowerManagement;
++
++ /* gcvHAL_QUERY_KERNEL_SETTINGS */
++ struct _gcsHAL_QUERY_KERNEL_SETTINGS
++ {
++ /* Settings.*/
++ OUT gcsKERNEL_SETTINGS settings;
++ }
++ QueryKernelSettings;
++
++ /* gcvHAL_MAP_PHYSICAL */
++ struct _gcsHAL_MAP_PHYSICAL
++ {
++ /* gcvTRUE to map, gcvFALSE to unmap. */
++ IN gctBOOL map;
++
++ /* Physical address. */
++ IN OUT gctUINT64 physical;
++ }
++ MapPhysical;
++
++ /* gcvHAL_DEBUG */
++ struct _gcsHAL_DEBUG
++ {
++ /* If gcvTRUE, set the debug information. */
++ IN gctBOOL set;
++ IN gctUINT32 level;
++ IN gctUINT32 zones;
++ IN gctBOOL enable;
++
++ IN gceDEBUG_MESSAGE_TYPE type;
++ IN gctUINT32 messageSize;
++
++ /* Message to print if not empty. */
++ IN gctCHAR message[80];
++ }
++ Debug;
++
++ /* gcvHAL_CACHE */
++ struct _gcsHAL_CACHE
++ {
++ IN gceCACHEOPERATION operation;
++ /* gctHANDLE */
++ IN gctUINT64 process;
++ IN gctUINT64 logical;
++ IN gctUINT64 bytes;
++ /* gcuVIDMEM_NODE_PTR */
++ IN gctUINT64 node;
++ }
++ Cache;
++
++ /* gcvHAL_TIMESTAMP */
++ struct _gcsHAL_TIMESTAMP
++ {
++ /* Timer select. */
++ IN gctUINT32 timer;
++
++ /* Timer request type (0-stop, 1-start, 2-send delta). */
++ IN gctUINT32 request;
++
++ /* Result of delta time in microseconds. */
++ OUT gctINT32 timeDelta;
++ }
++ TimeStamp;
++
++ /* gcvHAL_DATABASE */
++ struct _gcsHAL_DATABASE
++ {
++ /* Set to gcvTRUE if you want to query a particular process ID.
++ ** Set to gcvFALSE to query the last detached process. */
++ IN gctBOOL validProcessID;
++
++ /* Process ID to query. */
++ IN gctUINT32 processID;
++
++ /* Information. */
++ OUT gcuDATABASE_INFO vidMem;
++ OUT gcuDATABASE_INFO nonPaged;
++ OUT gcuDATABASE_INFO contiguous;
++ OUT gcuDATABASE_INFO gpuIdle;
++ }
++ Database;
++
++ /* gcvHAL_VIDMEM_DATABASE */
++ struct _gcsHAL_VIDMEM_DATABASE
++ {
++ /* Set to gcvTRUE if you want to query a particular process ID.
++ ** Set to gcvFALSE to query the last detached process. */
++ IN gctBOOL validProcessID;
++
++ /* Process ID to query. */
++ IN gctUINT32 processID;
++
++ /* Information. */
++ OUT gcuDATABASE_INFO vidMemResv;
++ OUT gcuDATABASE_INFO vidMemCont;
++ OUT gcuDATABASE_INFO vidMemVirt;
++ }
++ VidMemDatabase;
++
++ /* gcvHAL_VERSION */
++ struct _gcsHAL_VERSION
++ {
++ /* Major version: N.n.n. */
++ OUT gctINT32 major;
++
++ /* Minor version: n.N.n. */
++ OUT gctINT32 minor;
++
++ /* Patch version: n.n.N. */
++ OUT gctINT32 patch;
++
++ /* Build version. */
++ OUT gctUINT32 build;
++ }
++ Version;
++
++ /* gcvHAL_CHIP_INFO */
++ struct _gcsHAL_CHIP_INFO
++ {
++ /* Chip count. */
++ OUT gctINT32 count;
++
++ /* Chip types. */
++ OUT gceHARDWARE_TYPE types[gcdCHIP_COUNT];
++ }
++ ChipInfo;
++
++ /* gcvHAL_ATTACH */
++ struct _gcsHAL_ATTACH
++ {
++ /* Context buffer object gckCONTEXT. Just a name. */
++ OUT gctUINT32 context;
++
++ /* Number of states in the buffer. */
++ OUT gctUINT64 stateCount;
++ }
++ Attach;
++
++ /* gcvHAL_DETACH */
++ struct _gcsHAL_DETACH
++ {
++ /* Context buffer object gckCONTEXT. Just a name. */
++ IN gctUINT32 context;
++ }
++ Detach;
++
++ /* gcvHAL_COMPOSE. */
++ gcsHAL_COMPOSE Compose;
++
++ /* gcvHAL_GET_FRAME_INFO. */
++ struct _gcsHAL_GET_FRAME_INFO
++ {
++ /* gcsHAL_FRAME_INFO* */
++ OUT gctUINT64 frameInfo;
++ }
++ GetFrameInfo;
++
++ /* gcvHAL_SET_TIME_OUT. */
++ struct _gcsHAL_SET_TIMEOUT
++ {
++ gctUINT32 timeOut;
++ }
++ SetTimeOut;
++
++#if gcdENABLE_VG
++ /* gcvHAL_COMMIT */
++ struct _gcsHAL_VGCOMMIT
++ {
++ /* Context buffer in gcsVGCONTEXT. */
++ IN gctUINT64 context;
++
++ /* Command queue in gcsVGCMDQUEUE. */
++ IN gctUINT64 queue;
++
++ /* Number of entries in the queue. */
++ IN gctUINT entryCount;
++
++ /* Task table in gcsTASK_MASTER_TABLE. */
++ IN gctUINT64 taskTable;
++ }
++ VGCommit;
++
++ /* gcvHAL_QUERY_COMMAND_BUFFER */
++ struct _gcsHAL_QUERY_COMMAND_BUFFER
++ {
++ /* Command buffer attributes. */
++ OUT gcsCOMMAND_BUFFER_INFO information;
++ }
++ QueryCommandBuffer;
++
++#endif
++
++ struct _gcsHAL_GET_SHARED_INFO
++ {
++ /* Process id. */
++ IN gctUINT32 pid;
++
++ /* Data id. */
++ IN gctUINT32 dataId;
++
++ /* Data size. */
++ IN gctSIZE_T bytes;
++
++ /* Pointer to save the shared data. */
++ OUT gctPOINTER data;
++ }
++ GetSharedInfo;
++
++ struct _gcsHAL_SET_SHARED_INFO
++ {
++ /* Data id. */
++ IN gctUINT32 dataId;
++
++ /* Data to be shared. */
++ IN gctPOINTER data;
++
++ /* Data size. */
++ IN gctSIZE_T bytes;
++ }
++ SetSharedInfo;
++
++ struct _gcsHAL_SET_FSCALE_VALUE
++ {
++ IN gctUINT value;
++ }
++ SetFscaleValue;
++
++ struct _gcsHAL_GET_FSCALE_VALUE
++ {
++ OUT gctUINT value;
++ OUT gctUINT minValue;
++ OUT gctUINT maxValue;
++ }
++ GetFscaleValue;
++
++ struct _gcsHAL_QUERY_RESET_TIME_STAMP
++ {
++ OUT gctUINT64 timeStamp;
++ }
++ QueryResetTimeStamp;
++
++ struct _gcsHAL_SYNC_POINT
++ {
++ /* Command. */
++ gceSYNC_POINT_COMMAND_CODES command;
++
++ /* Sync point. */
++ IN OUT gctUINT64 syncPoint;
++
++ /* From where. */
++ IN gceKERNEL_WHERE fromWhere;
++
++ /* Signaled state. */
++ OUT gctBOOL state;
++ }
++ SyncPoint;
++
++ struct _gcsHAL_CREATE_NATIVE_FENCE
++ {
++ /* Signal id to dup. */
++ IN gctUINT64 syncPoint;
++
++ /* Native fence file descriptor. */
++ OUT gctINT fenceFD;
++
++ }
++ CreateNativeFence;
++ }
++ u;
++}
++gcsHAL_INTERFACE;
++
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif /* __gc_hal_driver_h_ */
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_driver_vg.h linux-openelec/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_driver_vg.h
+--- linux-3.14.36/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_driver_vg.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_driver_vg.h 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,270 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_driver_vg_h_
++#define __gc_hal_driver_vg_h_
++
++
++
++#include "gc_hal_types.h"
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/******************************************************************************\
++******************************* I/O Control Codes ******************************
++\******************************************************************************/
++
++#define gcvHAL_CLASS "galcore"
++#define IOCTL_GCHAL_INTERFACE 30000
++
++/******************************************************************************\
++********************************* Command Codes ********************************
++\******************************************************************************/
++
++/******************************************************************************\
++********************* Command buffer information structure. ********************
++\******************************************************************************/
++
++typedef struct _gcsCOMMAND_BUFFER_INFO * gcsCOMMAND_BUFFER_INFO_PTR;
++typedef struct _gcsCOMMAND_BUFFER_INFO
++{
++ /* FE command buffer interrupt ID. */
++ gctINT32 feBufferInt;
++
++ /* TS overflow interrupt ID. */
++ gctINT32 tsOverflowInt;
++
++ /* Alignment and mask for the buffer address. */
++ gctUINT addressMask;
++ gctSIZE_T addressAlignment;
++
++ /* Alignment for each command. */
++ gctSIZE_T commandAlignment;
++
++ /* Number of bytes required by the STATE command. */
++ gctSIZE_T stateCommandSize;
++
++ /* Number of bytes required by the RESTART command. */
++ gctSIZE_T restartCommandSize;
++
++ /* Number of bytes required by the FETCH command. */
++ gctSIZE_T fetchCommandSize;
++
++ /* Number of bytes required by the CALL command. */
++ gctSIZE_T callCommandSize;
++
++ /* Number of bytes required by the RETURN command. */
++ gctSIZE_T returnCommandSize;
++
++ /* Number of bytes required by the EVENT command. */
++ gctSIZE_T eventCommandSize;
++
++ /* Number of bytes required by the END command. */
++ gctSIZE_T endCommandSize;
++
++ /* Number of bytes reserved at the tail of a static command buffer. */
++ gctSIZE_T staticTailSize;
++
++ /* Number of bytes reserved at the tail of a dynamic command buffer. */
++ gctSIZE_T dynamicTailSize;
++}
++gcsCOMMAND_BUFFER_INFO;
++
++/******************************************************************************\
++******************************** Task Structures *******************************
++\******************************************************************************/
++
++typedef enum _gceTASK
++{
++ gcvTASK_LINK,
++ gcvTASK_CLUSTER,
++ gcvTASK_INCREMENT,
++ gcvTASK_DECREMENT,
++ gcvTASK_SIGNAL,
++ gcvTASK_LOCKDOWN,
++ gcvTASK_UNLOCK_VIDEO_MEMORY,
++ gcvTASK_FREE_VIDEO_MEMORY,
++ gcvTASK_FREE_CONTIGUOUS_MEMORY,
++ gcvTASK_UNMAP_USER_MEMORY
++}
++gceTASK;
++
++typedef struct _gcsTASK_HEADER * gcsTASK_HEADER_PTR;
++typedef struct _gcsTASK_HEADER
++{
++ /* Task ID. */
++ IN gceTASK id;
++}
++gcsTASK_HEADER;
++
++typedef struct _gcsTASK_LINK * gcsTASK_LINK_PTR;
++typedef struct _gcsTASK_LINK
++{
++ /* Task ID (gcvTASK_LINK). */
++ IN gceTASK id;
++
++ /* Pointer to the next task container. */
++ IN gctPOINTER cotainer;
++
++ /* Pointer to the next task from the next task container. */
++ IN gcsTASK_HEADER_PTR task;
++}
++gcsTASK_LINK;
++
++typedef struct _gcsTASK_CLUSTER * gcsTASK_CLUSTER_PTR;
++typedef struct _gcsTASK_CLUSTER
++{
++ /* Task ID (gcvTASK_CLUSTER). */
++ IN gceTASK id;
++
++ /* Number of tasks in the cluster. */
++ IN gctUINT taskCount;
++}
++gcsTASK_CLUSTER;
++
++typedef struct _gcsTASK_INCREMENT * gcsTASK_INCREMENT_PTR;
++typedef struct _gcsTASK_INCREMENT
++{
++ /* Task ID (gcvTASK_INCREMENT). */
++ IN gceTASK id;
++
++ /* Address of the variable to increment. */
++ IN gctUINT32 address;
++}
++gcsTASK_INCREMENT;
++
++typedef struct _gcsTASK_DECREMENT * gcsTASK_DECREMENT_PTR;
++typedef struct _gcsTASK_DECREMENT
++{
++ /* Task ID (gcvTASK_DECREMENT). */
++ IN gceTASK id;
++
++ /* Address of the variable to decrement. */
++ IN gctUINT32 address;
++}
++gcsTASK_DECREMENT;
++
++typedef struct _gcsTASK_SIGNAL * gcsTASK_SIGNAL_PTR;
++typedef struct _gcsTASK_SIGNAL
++{
++ /* Task ID (gcvTASK_SIGNAL). */
++ IN gceTASK id;
++
++ /* Process owning the signal. */
++ IN gctHANDLE process;
++
++ /* Signal handle to signal. */
++ IN gctSIGNAL signal;
++
++#if defined(__QNXNTO__)
++ IN gctINT32 coid;
++ IN gctINT32 rcvid;
++#endif
++}
++gcsTASK_SIGNAL;
++
++typedef struct _gcsTASK_LOCKDOWN * gcsTASK_LOCKDOWN_PTR;
++typedef struct _gcsTASK_LOCKDOWN
++{
++ /* Task ID (gcvTASK_LOCKDOWN). */
++ IN gceTASK id;
++
++ /* Address of the user space counter. */
++ IN gctUINT32 userCounter;
++
++ /* Address of the kernel space counter. */
++ IN gctUINT32 kernelCounter;
++
++ /* Process owning the signal. */
++ IN gctHANDLE process;
++
++ /* Signal handle to signal. */
++ IN gctSIGNAL signal;
++}
++gcsTASK_LOCKDOWN;
++
++typedef struct _gcsTASK_UNLOCK_VIDEO_MEMORY * gcsTASK_UNLOCK_VIDEO_MEMORY_PTR;
++typedef struct _gcsTASK_UNLOCK_VIDEO_MEMORY
++{
++ /* Task ID (gcvTASK_UNLOCK_VIDEO_MEMORY). */
++ IN gceTASK id;
++
++ /* Allocated video memory. */
++ IN gctUINT64 node;
++}
++gcsTASK_UNLOCK_VIDEO_MEMORY;
++
++typedef struct _gcsTASK_FREE_VIDEO_MEMORY * gcsTASK_FREE_VIDEO_MEMORY_PTR;
++typedef struct _gcsTASK_FREE_VIDEO_MEMORY
++{
++ /* Task ID (gcvTASK_FREE_VIDEO_MEMORY). */
++ IN gceTASK id;
++
++ /* Allocated video memory. */
++ IN gctUINT64 node;
++}
++gcsTASK_FREE_VIDEO_MEMORY;
++
++typedef struct _gcsTASK_FREE_CONTIGUOUS_MEMORY * gcsTASK_FREE_CONTIGUOUS_MEMORY_PTR;
++typedef struct _gcsTASK_FREE_CONTIGUOUS_MEMORY
++{
++ /* Task ID (gcvTASK_FREE_CONTIGUOUS_MEMORY). */
++ IN gceTASK id;
++
++ /* Number of bytes allocated. */
++ IN gctSIZE_T bytes;
++
++ /* Physical address of allocation. */
++ IN gctPHYS_ADDR physical;
++
++ /* Logical address of allocation. */
++ IN gctPOINTER logical;
++}
++gcsTASK_FREE_CONTIGUOUS_MEMORY;
++
++typedef struct _gcsTASK_UNMAP_USER_MEMORY * gcsTASK_UNMAP_USER_MEMORY_PTR;
++typedef struct _gcsTASK_UNMAP_USER_MEMORY
++{
++ /* Task ID (gcvTASK_UNMAP_USER_MEMORY). */
++ IN gceTASK id;
++
++ /* Base address of user memory to unmap. */
++ IN gctPOINTER memory;
++
++ /* Size of user memory in bytes to unmap. */
++ IN gctSIZE_T size;
++
++ /* Info record returned by gcvHAL_MAP_USER_MEMORY. */
++ IN gctPOINTER info;
++
++ /* Physical address of mapped memory as returned by
++ gcvHAL_MAP_USER_MEMORY. */
++ IN gctUINT32 address;
++}
++gcsTASK_UNMAP_USER_MEMORY;
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif /* __gc_hal_driver_h_ */
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_dump.h linux-openelec/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_dump.h
+--- linux-3.14.36/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_dump.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_dump.h 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,88 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_dump_h_
++#define __gc_hal_dump_h_
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++** FILE LAYOUT:
++**
++** gcsDUMP_FILE structure
++**
++** gcsDUMP_DATA frame
++** gcsDUMP_DATA or gcDUMP_DATA_SIZE records rendingring the frame
++** gctUINT8 data[length]
++*/
++
++#define gcvDUMP_FILE_SIGNATURE gcmCC('g','c','D','B')
++
++typedef struct _gcsDUMP_FILE
++{
++ gctUINT32 signature; /* File signature */
++ gctSIZE_T length; /* Length of file */
++ gctUINT32 frames; /* Number of frames in file */
++}
++gcsDUMP_FILE;
++
++typedef enum _gceDUMP_TAG
++{
++ gcvTAG_SURFACE = gcmCC('s','u','r','f'),
++ gcvTAG_FRAME = gcmCC('f','r','m',' '),
++ gcvTAG_COMMAND = gcmCC('c','m','d',' '),
++ gcvTAG_INDEX = gcmCC('i','n','d','x'),
++ gcvTAG_STREAM = gcmCC('s','t','r','m'),
++ gcvTAG_TEXTURE = gcmCC('t','e','x','t'),
++ gcvTAG_RENDER_TARGET = gcmCC('r','n','d','r'),
++ gcvTAG_DEPTH = gcmCC('z','b','u','f'),
++ gcvTAG_RESOLVE = gcmCC('r','s','l','v'),
++ gcvTAG_DELETE = gcmCC('d','e','l',' '),
++}
++gceDUMP_TAG;
++
++typedef struct _gcsDUMP_SURFACE
++{
++ gceDUMP_TAG type; /* Type of record. */
++ gctUINT32 address; /* Address of the surface. */
++ gctINT16 width; /* Width of surface. */
++ gctINT16 height; /* Height of surface. */
++ gceSURF_FORMAT format; /* Surface pixel format. */
++ gctSIZE_T length; /* Number of bytes inside the surface. */
++}
++gcsDUMP_SURFACE;
++
++typedef struct _gcsDUMP_DATA
++{
++ gceDUMP_TAG type; /* Type of record. */
++ gctSIZE_T length; /* Number of bytes of data. */
++ gctUINT32 address; /* Address for the data. */
++}
++gcsDUMP_DATA;
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif /* __gc_hal_dump_h_ */
++
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_eglplatform.h linux-openelec/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_eglplatform.h
+--- linux-3.14.36/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_eglplatform.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_eglplatform.h 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,627 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++#ifndef __gc_hal_eglplatform_h_
++#define __gc_hal_eglplatform_h_
++
++/* Include VDK types. */
++#include "gc_hal_types.h"
++#include "gc_hal_base.h"
++#include "gc_hal_eglplatform_type.h"
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++
++#if defined(_WIN32) || defined(__VC32__) && !defined(__CYGWIN__) && !defined(__SCITECH_SNAP__)
++/* Win32 and Windows CE platforms. */
++#include <windows.h>
++typedef HDC HALNativeDisplayType;
++typedef HWND HALNativeWindowType;
++typedef HBITMAP HALNativePixmapType;
++
++typedef struct __BITFIELDINFO{
++ BITMAPINFO bmi;
++ RGBQUAD bmiColors[2];
++} BITFIELDINFO;
++
++#elif defined(LINUX) && defined(EGL_API_DFB) && !defined(__APPLE__)
++#include <directfb.h>
++typedef struct _DFBDisplay * HALNativeDisplayType;
++typedef struct _DFBWindow * HALNativeWindowType;
++typedef struct _DFBPixmap * HALNativePixmapType;
++
++#elif defined(LINUX) && defined(EGL_API_FB) && !defined(__APPLE__)
++
++#if defined(EGL_API_WL)
++/* Wayland platform. */
++#include "wayland-server.h"
++#include <wayland-egl.h>
++
++#define WL_EGL_NUM_BACKBUFFERS 3
++
++typedef struct _gcsWL_VIV_BUFFER
++{
++ struct wl_resource *wl_buffer;
++ gcoSURF surface;
++ gctINT32 width, height;
++} gcsWL_VIV_BUFFER;
++
++typedef struct _gcsWL_EGL_DISPLAY
++{
++ struct wl_display* wl_display;
++ struct wl_viv* wl_viv;
++ struct wl_registry *registry;
++ struct wl_event_queue *wl_queue;
++} gcsWL_EGL_DISPLAY;
++
++typedef struct _gcsWL_EGL_BUFFER_INFO
++{
++ gctINT32 width;
++ gctINT32 height;
++ gctINT32 stride;
++ gceSURF_FORMAT format;
++ gcuVIDMEM_NODE_PTR node;
++ gcePOOL pool;
++ gctUINT bytes;
++ gcoSURF surface;
++ gcoSURF attached_surface;
++ gctINT32 invalidate;
++ gctBOOL locked;
++} gcsWL_EGL_BUFFER_INFO;
++
++typedef struct _gcsWL_EGL_BUFFER
++{
++ struct wl_buffer* wl_buffer;
++ gcsWL_EGL_BUFFER_INFO info;
++} gcsWL_EGL_BUFFER;
++
++typedef struct _gcsWL_EGL_WINDOW_INFO
++{
++ gctINT32 dx;
++ gctINT32 dy;
++ gctUINT width;
++ gctUINT height;
++ gctINT32 attached_width;
++ gctINT32 attached_height;
++ gceSURF_FORMAT format;
++ gctUINT bpp;
++} gcsWL_EGL_WINDOW_INFO;
++
++struct wl_egl_window
++{
++ gcsWL_EGL_DISPLAY* display;
++ gcsWL_EGL_BUFFER backbuffers[WL_EGL_NUM_BACKBUFFERS];
++ gcsWL_EGL_WINDOW_INFO info;
++ gctUINT current;
++ struct wl_surface* surface;
++ struct wl_callback* frame_callback;
++};
++
++typedef void* HALNativeDisplayType;
++typedef void* HALNativeWindowType;
++typedef void* HALNativePixmapType;
++#else
++/* Linux platform for FBDEV. */
++typedef struct _FBDisplay * HALNativeDisplayType;
++typedef struct _FBWindow * HALNativeWindowType;
++typedef struct _FBPixmap * HALNativePixmapType;
++#endif
++#elif defined(__ANDROID__) || defined(ANDROID)
++
++struct egl_native_pixmap_t;
++
++#if ANDROID_SDK_VERSION >= 9
++ #include <android/native_window.h>
++
++ typedef struct ANativeWindow* HALNativeWindowType;
++ typedef struct egl_native_pixmap_t* HALNativePixmapType;
++ typedef void* HALNativeDisplayType;
++#else
++ struct android_native_window_t;
++ typedef struct android_native_window_t* HALNativeWindowType;
++ typedef struct egl_native_pixmap_t * HALNativePixmapType;
++ typedef void* HALNativeDisplayType;
++#endif
++
++#elif defined(LINUX) || defined(__APPLE__)
++/* X11 platform. */
++#include <X11/Xlib.h>
++#include <X11/Xutil.h>
++
++typedef Display * HALNativeDisplayType;
++typedef Window HALNativeWindowType;
++
++#ifdef CUSTOM_PIXMAP
++typedef void * HALNativePixmapType;
++#else
++typedef Pixmap HALNativePixmapType;
++#endif /* CUSTOM_PIXMAP */
++
++/* Rename some badly named X defines. */
++#ifdef Status
++# define XStatus int
++# undef Status
++#endif
++#ifdef Always
++# define XAlways 2
++# undef Always
++#endif
++#ifdef CurrentTime
++# undef CurrentTime
++# define XCurrentTime 0
++#endif
++
++#elif defined(__QNXNTO__)
++#include <screen/screen.h>
++
++/* VOID */
++typedef int HALNativeDisplayType;
++typedef screen_window_t HALNativeWindowType;
++typedef screen_pixmap_t HALNativePixmapType;
++
++#else
++
++#error "Platform not recognized"
++
++/* VOID */
++typedef void * HALNativeDisplayType;
++typedef void * HALNativeWindowType;
++typedef void * HALNativePixmapType;
++
++#endif
++
++/* define DUMMY according to the system */
++#if defined(EGL_API_WL)
++# define WL_DUMMY (31415926)
++# define EGL_DUMMY WL_DUMMY
++#elif defined(__ANDROID__) || defined(ANDROID)
++# define ANDROID_DUMMY (31415926)
++# define EGL_DUMMY ANDROID_DUMMY
++#else
++# define EGL_DUMMY (31415926)
++#endif
++
++/*******************************************************************************
++** Display. ********************************************************************
++*/
++
++gceSTATUS
++gcoOS_GetDisplay(
++ OUT HALNativeDisplayType * Display,
++ IN gctPOINTER Context
++ );
++
++gceSTATUS
++gcoOS_GetDisplayByIndex(
++ IN gctINT DisplayIndex,
++ OUT HALNativeDisplayType * Display,
++ IN gctPOINTER Context
++ );
++
++gceSTATUS
++gcoOS_GetDisplayInfo(
++ IN HALNativeDisplayType Display,
++ OUT gctINT * Width,
++ OUT gctINT * Height,
++ OUT gctSIZE_T * Physical,
++ OUT gctINT * Stride,
++ OUT gctINT * BitsPerPixel
++ );
++
++
++
++gceSTATUS
++gcoOS_GetDisplayInfoEx(
++ IN HALNativeDisplayType Display,
++ IN HALNativeWindowType Window,
++ IN gctUINT DisplayInfoSize,
++ OUT halDISPLAY_INFO * DisplayInfo
++ );
++
++gceSTATUS
++gcoOS_GetNextDisplayInfoExByIndex(
++ IN gctINT Index,
++ IN HALNativeDisplayType Display,
++ IN HALNativeWindowType Window,
++ IN gctUINT DisplayInfoSize,
++ OUT halDISPLAY_INFO * DisplayInfo
++ );
++
++gceSTATUS
++gcoOS_GetDisplayVirtual(
++ IN HALNativeDisplayType Display,
++ OUT gctINT * Width,
++ OUT gctINT * Height
++ );
++
++gceSTATUS
++gcoOS_GetDisplayBackbuffer(
++ IN HALNativeDisplayType Display,
++ IN HALNativeWindowType Window,
++ OUT gctPOINTER * context,
++ OUT gcoSURF * surface,
++ OUT gctUINT * Offset,
++ OUT gctINT * X,
++ OUT gctINT * Y
++ );
++
++gceSTATUS
++gcoOS_SetDisplayVirtual(
++ IN HALNativeDisplayType Display,
++ IN HALNativeWindowType Window,
++ IN gctUINT Offset,
++ IN gctINT X,
++ IN gctINT Y
++ );
++
++gceSTATUS
++gcoOS_SetDisplayVirtualEx(
++ IN HALNativeDisplayType Display,
++ IN HALNativeWindowType Window,
++ IN gctPOINTER Context,
++ IN gcoSURF Surface,
++ IN gctUINT Offset,
++ IN gctINT X,
++ IN gctINT Y
++ );
++
++gceSTATUS
++gcoOS_SetSwapInterval(
++ IN HALNativeDisplayType Display,
++ IN gctINT Interval
++);
++
++gceSTATUS
++gcoOS_GetSwapInterval(
++ IN HALNativeDisplayType Display,
++ IN gctINT_PTR Min,
++ IN gctINT_PTR Max
++);
++
++gceSTATUS
++gcoOS_DisplayBufferRegions(
++ IN HALNativeDisplayType Display,
++ IN HALNativeWindowType Window,
++ IN gctINT NumRects,
++ IN gctINT_PTR Rects
++ );
++
++gceSTATUS
++gcoOS_DestroyDisplay(
++ IN HALNativeDisplayType Display
++ );
++
++gceSTATUS
++gcoOS_InitLocalDisplayInfo(
++ IN HALNativeDisplayType Display,
++ IN OUT gctPOINTER * localDisplay
++ );
++
++gceSTATUS
++gcoOS_DeinitLocalDisplayInfo(
++ IN HALNativeDisplayType Display,
++ IN OUT gctPOINTER * localDisplay
++ );
++
++gceSTATUS
++gcoOS_GetDisplayInfoEx2(
++ IN HALNativeDisplayType Display,
++ IN HALNativeWindowType Window,
++ IN gctPOINTER localDisplay,
++ IN gctUINT DisplayInfoSize,
++ OUT halDISPLAY_INFO * DisplayInfo
++ );
++
++gceSTATUS
++gcoOS_GetDisplayBackbufferEx(
++ IN HALNativeDisplayType Display,
++ IN HALNativeWindowType Window,
++ IN gctPOINTER localDisplay,
++ OUT gctPOINTER * context,
++ OUT gcoSURF * surface,
++ OUT gctUINT * Offset,
++ OUT gctINT * X,
++ OUT gctINT * Y
++ );
++
++gceSTATUS
++gcoOS_IsValidDisplay(
++ IN HALNativeDisplayType Display
++ );
++
++gceSTATUS
++gcoOS_GetNativeVisualId(
++ IN HALNativeDisplayType Display,
++ OUT gctINT* nativeVisualId
++ );
++
++gctBOOL
++gcoOS_SynchronousFlip(
++ IN HALNativeDisplayType Display
++ );
++
++/*******************************************************************************
++** Windows. ********************************************************************
++*/
++
++gceSTATUS
++gcoOS_CreateWindow(
++ IN HALNativeDisplayType Display,
++ IN gctINT X,
++ IN gctINT Y,
++ IN gctINT Width,
++ IN gctINT Height,
++ OUT HALNativeWindowType * Window
++ );
++
++gceSTATUS
++gcoOS_GetWindowInfo(
++ IN HALNativeDisplayType Display,
++ IN HALNativeWindowType Window,
++ OUT gctINT * X,
++ OUT gctINT * Y,
++ OUT gctINT * Width,
++ OUT gctINT * Height,
++ OUT gctINT * BitsPerPixel,
++ OUT gctUINT * Offset
++ );
++
++gceSTATUS
++gcoOS_DestroyWindow(
++ IN HALNativeDisplayType Display,
++ IN HALNativeWindowType Window
++ );
++
++gceSTATUS
++gcoOS_DrawImage(
++ IN HALNativeDisplayType Display,
++ IN HALNativeWindowType Window,
++ IN gctINT Left,
++ IN gctINT Top,
++ IN gctINT Right,
++ IN gctINT Bottom,
++ IN gctINT Width,
++ IN gctINT Height,
++ IN gctINT BitsPerPixel,
++ IN gctPOINTER Bits
++ );
++
++gceSTATUS
++gcoOS_GetImage(
++ IN HALNativeWindowType Window,
++ IN gctINT Left,
++ IN gctINT Top,
++ IN gctINT Right,
++ IN gctINT Bottom,
++ OUT gctINT * BitsPerPixel,
++ OUT gctPOINTER * Bits
++ );
++
++gceSTATUS
++gcoOS_GetWindowInfoEx(
++ IN HALNativeDisplayType Display,
++ IN HALNativeWindowType Window,
++ OUT gctINT * X,
++ OUT gctINT * Y,
++ OUT gctINT * Width,
++ OUT gctINT * Height,
++ OUT gctINT * BitsPerPixel,
++ OUT gctUINT * Offset,
++ OUT gceSURF_FORMAT * Format
++ );
++
++gceSTATUS
++gcoOS_DrawImageEx(
++ IN HALNativeDisplayType Display,
++ IN HALNativeWindowType Window,
++ IN gctINT Left,
++ IN gctINT Top,
++ IN gctINT Right,
++ IN gctINT Bottom,
++ IN gctINT Width,
++ IN gctINT Height,
++ IN gctINT BitsPerPixel,
++ IN gctPOINTER Bits,
++ IN gceSURF_FORMAT Format
++ );
++
++/*******************************************************************************
++** Pixmaps. ********************************************************************
++*/
++
++gceSTATUS
++gcoOS_CreatePixmap(
++ IN HALNativeDisplayType Display,
++ IN gctINT Width,
++ IN gctINT Height,
++ IN gctINT BitsPerPixel,
++ OUT HALNativePixmapType * Pixmap
++ );
++
++gceSTATUS
++gcoOS_GetPixmapInfo(
++ IN HALNativeDisplayType Display,
++ IN HALNativePixmapType Pixmap,
++ OUT gctINT * Width,
++ OUT gctINT * Height,
++ OUT gctINT * BitsPerPixel,
++ OUT gctINT * Stride,
++ OUT gctPOINTER * Bits
++ );
++
++gceSTATUS
++gcoOS_DrawPixmap(
++ IN HALNativeDisplayType Display,
++ IN HALNativePixmapType Pixmap,
++ IN gctINT Left,
++ IN gctINT Top,
++ IN gctINT Right,
++ IN gctINT Bottom,
++ IN gctINT Width,
++ IN gctINT Height,
++ IN gctINT BitsPerPixel,
++ IN gctPOINTER Bits
++ );
++
++gceSTATUS
++gcoOS_DestroyPixmap(
++ IN HALNativeDisplayType Display,
++ IN HALNativePixmapType Pixmap
++ );
++
++gceSTATUS
++gcoOS_GetPixmapInfoEx(
++ IN HALNativeDisplayType Display,
++ IN HALNativePixmapType Pixmap,
++ OUT gctINT * Width,
++ OUT gctINT * Height,
++ OUT gctINT * BitsPerPixel,
++ OUT gctINT * Stride,
++ OUT gctPOINTER * Bits,
++ OUT gceSURF_FORMAT * Format
++ );
++
++gceSTATUS
++gcoOS_CopyPixmapBits(
++ IN HALNativeDisplayType Display,
++ IN HALNativePixmapType Pixmap,
++ IN gctUINT DstWidth,
++ IN gctUINT DstHeight,
++ IN gctINT DstStride,
++ IN gceSURF_FORMAT DstFormat,
++ OUT gctPOINTER DstBits
++ );
++
++/*******************************************************************************
++** OS relative. ****************************************************************
++*/
++gceSTATUS
++gcoOS_LoadEGLLibrary(
++ OUT gctHANDLE * Handle
++ );
++
++gceSTATUS
++gcoOS_FreeEGLLibrary(
++ IN gctHANDLE Handle
++ );
++
++gceSTATUS
++gcoOS_ShowWindow(
++ IN HALNativeDisplayType Display,
++ IN HALNativeWindowType Window
++ );
++
++gceSTATUS
++gcoOS_HideWindow(
++ IN HALNativeDisplayType Display,
++ IN HALNativeWindowType Window
++ );
++
++gceSTATUS
++gcoOS_SetWindowTitle(
++ IN HALNativeDisplayType Display,
++ IN HALNativeWindowType Window,
++ IN gctCONST_STRING Title
++ );
++
++gceSTATUS
++gcoOS_CapturePointer(
++ IN HALNativeDisplayType Display,
++ IN HALNativeWindowType Window
++ );
++
++gceSTATUS
++gcoOS_GetEvent(
++ IN HALNativeDisplayType Display,
++ IN HALNativeWindowType Window,
++ OUT halEvent * Event
++ );
++
++gceSTATUS
++gcoOS_CreateClientBuffer(
++ IN gctINT Width,
++ IN gctINT Height,
++ IN gctINT Format,
++ IN gctINT Type,
++ OUT gctPOINTER * ClientBuffer
++ );
++
++gceSTATUS
++gcoOS_GetClientBufferInfo(
++ IN gctPOINTER ClientBuffer,
++ OUT gctINT * Width,
++ OUT gctINT * Height,
++ OUT gctINT * Stride,
++ OUT gctPOINTER * Bits
++ );
++
++gceSTATUS
++gcoOS_DestroyClientBuffer(
++ IN gctPOINTER ClientBuffer
++ );
++
++gceSTATUS
++gcoOS_DestroyContext(
++ IN gctPOINTER Display,
++ IN gctPOINTER Context
++ );
++
++gceSTATUS
++gcoOS_CreateContext(
++ IN gctPOINTER LocalDisplay,
++ IN gctPOINTER Context
++ );
++
++gceSTATUS
++gcoOS_MakeCurrent(
++ IN gctPOINTER LocalDisplay,
++ IN HALNativeWindowType DrawDrawable,
++ IN HALNativeWindowType ReadDrawable,
++ IN gctPOINTER Context,
++ IN gcoSURF ResolveTarget
++ );
++
++gceSTATUS
++gcoOS_CreateDrawable(
++ IN gctPOINTER LocalDisplay,
++ IN HALNativeWindowType Drawable
++ );
++
++gceSTATUS
++gcoOS_DestroyDrawable(
++ IN gctPOINTER LocalDisplay,
++ IN HALNativeWindowType Drawable
++ );
++gceSTATUS
++gcoOS_SwapBuffers(
++ IN gctPOINTER LocalDisplay,
++ IN HALNativeWindowType Drawable,
++ IN gcoSURF RenderTarget,
++ IN gcoSURF ResolveTarget,
++ IN gctPOINTER ResolveBits,
++ OUT gctUINT *Width,
++ OUT gctUINT *Height
++ );
++#ifdef __cplusplus
++}
++#endif
++
++#endif /* __gc_hal_eglplatform_h_ */
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_eglplatform_type.h linux-openelec/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_eglplatform_type.h
+--- linux-3.14.36/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_eglplatform_type.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_eglplatform_type.h 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,286 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_eglplatform_type_h_
++#define __gc_hal_eglplatform_type_h_
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*******************************************************************************
++** Events. *********************************************************************
++*/
++
++typedef enum _halEventType
++{
++ /* Keyboard event. */
++ HAL_KEYBOARD,
++
++ /* Mouse move event. */
++ HAL_POINTER,
++
++ /* Mouse button event. */
++ HAL_BUTTON,
++
++ /* Application close event. */
++ HAL_CLOSE,
++
++ /* Application window has been updated. */
++ HAL_WINDOW_UPDATE
++}
++halEventType;
++
++/* Scancodes for keyboard. */
++typedef enum _halKeys
++{
++ HAL_UNKNOWN = -1,
++
++ HAL_BACKSPACE = 0x08,
++ HAL_TAB,
++ HAL_ENTER = 0x0D,
++ HAL_ESCAPE = 0x1B,
++
++ HAL_SPACE = 0x20,
++ HAL_SINGLEQUOTE = 0x27,
++ HAL_PAD_ASTERISK = 0x2A,
++ HAL_COMMA = 0x2C,
++ HAL_HYPHEN,
++ HAL_PERIOD,
++ HAL_SLASH,
++ HAL_0,
++ HAL_1,
++ HAL_2,
++ HAL_3,
++ HAL_4,
++ HAL_5,
++ HAL_6,
++ HAL_7,
++ HAL_8,
++ HAL_9,
++ HAL_SEMICOLON = 0x3B,
++ HAL_EQUAL = 0x3D,
++ HAL_A = 0x41,
++ HAL_B,
++ HAL_C,
++ HAL_D,
++ HAL_E,
++ HAL_F,
++ HAL_G,
++ HAL_H,
++ HAL_I,
++ HAL_J,
++ HAL_K,
++ HAL_L,
++ HAL_M,
++ HAL_N,
++ HAL_O,
++ HAL_P,
++ HAL_Q,
++ HAL_R,
++ HAL_S,
++ HAL_T,
++ HAL_U,
++ HAL_V,
++ HAL_W,
++ HAL_X,
++ HAL_Y,
++ HAL_Z,
++ HAL_LBRACKET,
++ HAL_BACKSLASH,
++ HAL_RBRACKET,
++ HAL_BACKQUOTE = 0x60,
++
++ HAL_F1 = 0x80,
++ HAL_F2,
++ HAL_F3,
++ HAL_F4,
++ HAL_F5,
++ HAL_F6,
++ HAL_F7,
++ HAL_F8,
++ HAL_F9,
++ HAL_F10,
++ HAL_F11,
++ HAL_F12,
++
++ HAL_LCTRL,
++ HAL_RCTRL,
++ HAL_LSHIFT,
++ HAL_RSHIFT,
++ HAL_LALT,
++ HAL_RALT,
++ HAL_CAPSLOCK,
++ HAL_NUMLOCK,
++ HAL_SCROLLLOCK,
++ HAL_PAD_0,
++ HAL_PAD_1,
++ HAL_PAD_2,
++ HAL_PAD_3,
++ HAL_PAD_4,
++ HAL_PAD_5,
++ HAL_PAD_6,
++ HAL_PAD_7,
++ HAL_PAD_8,
++ HAL_PAD_9,
++ HAL_PAD_HYPHEN,
++ HAL_PAD_PLUS,
++ HAL_PAD_SLASH,
++ HAL_PAD_PERIOD,
++ HAL_PAD_ENTER,
++ HAL_SYSRQ,
++ HAL_PRNTSCRN,
++ HAL_BREAK,
++ HAL_UP,
++ HAL_LEFT,
++ HAL_RIGHT,
++ HAL_DOWN,
++ HAL_HOME,
++ HAL_END,
++ HAL_PGUP,
++ HAL_PGDN,
++ HAL_INSERT,
++ HAL_DELETE,
++ HAL_LWINDOW,
++ HAL_RWINDOW,
++ HAL_MENU,
++ HAL_POWER,
++ HAL_SLEEP,
++ HAL_WAKE
++}
++halKeys;
++
++/* Structure that defined keyboard mapping. */
++typedef struct _halKeyMap
++{
++ /* Normal key. */
++ halKeys normal;
++
++ /* Extended key. */
++ halKeys extended;
++}
++halKeyMap;
++
++/* Event structure. */
++typedef struct _halEvent
++{
++ /* Event type. */
++ halEventType type;
++
++ /* Event data union. */
++ union _halEventData
++ {
++ /* Event data for keyboard. */
++ struct _halKeyboard
++ {
++ /* Scancode. */
++ halKeys scancode;
++
++ /* ASCII characte of the key pressed. */
++ char key;
++
++ /* Flag whether the key was pressed (1) or released (0). */
++ char pressed;
++ }
++ keyboard;
++
++ /* Event data for pointer. */
++ struct _halPointer
++ {
++ /* Current pointer coordinate. */
++ int x;
++ int y;
++ }
++ pointer;
++
++ /* Event data for mouse buttons. */
++ struct _halButton
++ {
++ /* Left button state. */
++ int left;
++
++ /* Middle button state. */
++ int middle;
++
++ /* Right button state. */
++ int right;
++
++ /* Current pointer coordinate. */
++ int x;
++ int y;
++ }
++ button;
++ }
++ data;
++}
++halEvent;
++
++/* VFK_DISPLAY_INFO structure defining information returned by
++ vdkGetDisplayInfoEx. */
++typedef struct _halDISPLAY_INFO
++{
++ /* The size of the display in pixels. */
++ int width;
++ int height;
++
++ /* The stride of the dispay. -1 is returned if the stride is not known
++ ** for the specified display.*/
++ int stride;
++
++ /* The color depth of the display in bits per pixel. */
++ int bitsPerPixel;
++
++ /* The logical pointer to the display memory buffer. NULL is returned
++ ** if the pointer is not known for the specified display. */
++ void * logical;
++
++ /* The physical address of the display memory buffer. ~0 is returned
++ ** if the address is not known for the specified display. */
++ unsigned long physical;
++
++ int wrapFB; /* true if compositor, false otherwise. */
++
++#ifndef __QNXNTO__
++ /* 355_FB_MULTI_BUFFER */
++ int multiBuffer;
++ int backBufferY;
++#endif
++
++ /* The color info of the display. */
++ unsigned int alphaLength;
++ unsigned int alphaOffset;
++ unsigned int redLength;
++ unsigned int redOffset;
++ unsigned int greenLength;
++ unsigned int greenOffset;
++ unsigned int blueLength;
++ unsigned int blueOffset;
++
++ /* Display flip support. */
++ int flip;
++}
++halDISPLAY_INFO;
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif /* __gc_hal_eglplatform_type_h_ */
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_engine.h linux-openelec/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_engine.h
+--- linux-3.14.36/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_engine.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_engine.h 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,2053 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_engine_h_
++#define __gc_hal_engine_h_
++
++#ifndef VIVANTE_NO_3D
++#include "gc_hal_types.h"
++#include "gc_hal_enum.h"
++
++#if gcdENABLE_VG
++#include "gc_hal_engine_vg.h"
++#endif
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/******************************************************************************\
++****************************** Object Declarations *****************************
++\******************************************************************************/
++
++typedef struct _gcoSTREAM * gcoSTREAM;
++typedef struct _gcoVERTEX * gcoVERTEX;
++typedef struct _gcoTEXTURE * gcoTEXTURE;
++typedef struct _gcoINDEX * gcoINDEX;
++typedef struct _gcsVERTEX_ATTRIBUTES * gcsVERTEX_ATTRIBUTES_PTR;
++typedef struct _gcoVERTEXARRAY * gcoVERTEXARRAY;
++
++#define gcdATTRIBUTE_COUNT 16
++
++/******************************************************************************\
++********************************* Enumerations *********************************
++\******************************************************************************/
++
++/* Shading format. */
++typedef enum _gceSHADING
++{
++ gcvSHADING_SMOOTH,
++ gcvSHADING_FLAT_D3D,
++ gcvSHADING_FLAT_OPENGL,
++}
++gceSHADING;
++
++/* Culling modes. */
++typedef enum _gceCULL
++{
++ gcvCULL_NONE,
++ gcvCULL_CCW,
++ gcvCULL_CW,
++}
++gceCULL;
++
++/* Fill modes. */
++typedef enum _gceFILL
++{
++ gcvFILL_POINT,
++ gcvFILL_WIRE_FRAME,
++ gcvFILL_SOLID,
++}
++gceFILL;
++
++/* Compare modes. */
++typedef enum _gceCOMPARE
++{
++ gcvCOMPARE_NEVER,
++ gcvCOMPARE_NOT_EQUAL,
++ gcvCOMPARE_LESS,
++ gcvCOMPARE_LESS_OR_EQUAL,
++ gcvCOMPARE_EQUAL,
++ gcvCOMPARE_GREATER,
++ gcvCOMPARE_GREATER_OR_EQUAL,
++ gcvCOMPARE_ALWAYS,
++ gcvCOMPARE_INVALID = -1
++}
++gceCOMPARE;
++
++/* Stencil modes. */
++typedef enum _gceSTENCIL_MODE
++{
++ gcvSTENCIL_NONE,
++ gcvSTENCIL_SINGLE_SIDED,
++ gcvSTENCIL_DOUBLE_SIDED,
++}
++gceSTENCIL_MODE;
++
++/* Stencil operations. */
++typedef enum _gceSTENCIL_OPERATION
++{
++ gcvSTENCIL_KEEP,
++ gcvSTENCIL_REPLACE,
++ gcvSTENCIL_ZERO,
++ gcvSTENCIL_INVERT,
++ gcvSTENCIL_INCREMENT,
++ gcvSTENCIL_DECREMENT,
++ gcvSTENCIL_INCREMENT_SATURATE,
++ gcvSTENCIL_DECREMENT_SATURATE,
++ gcvSTENCIL_OPERATION_INVALID = -1
++}
++gceSTENCIL_OPERATION;
++
++/* Stencil selection. */
++typedef enum _gceSTENCIL_WHERE
++{
++ gcvSTENCIL_FRONT,
++ gcvSTENCIL_BACK,
++}
++gceSTENCIL_WHERE;
++
++/* Texture addressing selection. */
++typedef enum _gceTEXTURE_WHICH
++{
++ gcvTEXTURE_S,
++ gcvTEXTURE_T,
++ gcvTEXTURE_R,
++}
++gceTEXTURE_WHICH;
++
++/* Texture addressing modes. */
++typedef enum _gceTEXTURE_ADDRESSING
++{
++ gcvTEXTURE_WRAP,
++ gcvTEXTURE_CLAMP,
++ gcvTEXTURE_BORDER,
++ gcvTEXTURE_MIRROR,
++ gcvTEXTURE_MIRROR_ONCE,
++}
++gceTEXTURE_ADDRESSING;
++
++/* Texture filters. */
++typedef enum _gceTEXTURE_FILTER
++{
++ gcvTEXTURE_NONE,
++ gcvTEXTURE_POINT,
++ gcvTEXTURE_LINEAR,
++ gcvTEXTURE_ANISOTROPIC,
++}
++gceTEXTURE_FILTER;
++
++/* Primitive types. */
++typedef enum _gcePRIMITIVE
++{
++ gcvPRIMITIVE_POINT_LIST,
++ gcvPRIMITIVE_LINE_LIST,
++ gcvPRIMITIVE_LINE_STRIP,
++ gcvPRIMITIVE_LINE_LOOP,
++ gcvPRIMITIVE_TRIANGLE_LIST,
++ gcvPRIMITIVE_TRIANGLE_STRIP,
++ gcvPRIMITIVE_TRIANGLE_FAN,
++ gcvPRIMITIVE_RECTANGLE,
++}
++gcePRIMITIVE;
++
++/* Index types. */
++typedef enum _gceINDEX_TYPE
++{
++ gcvINDEX_8,
++ gcvINDEX_16,
++ gcvINDEX_32,
++}
++gceINDEX_TYPE;
++
++/******************************************************************************\
++********************************* gcoHAL Object *********************************
++\******************************************************************************/
++
++/* Query the target capabilities. */
++gceSTATUS
++gcoHAL_QueryTargetCaps(
++ IN gcoHAL Hal,
++ OUT gctUINT * MaxWidth,
++ OUT gctUINT * MaxHeight,
++ OUT gctUINT * MultiTargetCount,
++ OUT gctUINT * MaxSamples
++ );
++
++gceSTATUS
++gcoHAL_SetDepthOnly(
++ IN gcoHAL Hal,
++ IN gctBOOL Enable
++ );
++
++gceSTATUS
++gcoHAL_QueryShaderCaps(
++ IN gcoHAL Hal,
++ OUT gctUINT * VertexUniforms,
++ OUT gctUINT * FragmentUniforms,
++ OUT gctUINT * Varyings
++ );
++
++gceSTATUS
++gcoHAL_QueryTextureCaps(
++ IN gcoHAL Hal,
++ OUT gctUINT * MaxWidth,
++ OUT gctUINT * MaxHeight,
++ OUT gctUINT * MaxDepth,
++ OUT gctBOOL * Cubic,
++ OUT gctBOOL * NonPowerOfTwo,
++ OUT gctUINT * VertexSamplers,
++ OUT gctUINT * PixelSamplers
++ );
++
++gceSTATUS
++gcoHAL_QueryTextureMaxAniso(
++ IN gcoHAL Hal,
++ OUT gctUINT * MaxAnisoValue
++ );
++
++gceSTATUS
++gcoHAL_QueryStreamCaps(
++ IN gcoHAL Hal,
++ OUT gctUINT32 * MaxAttributes,
++ OUT gctUINT32 * MaxStreamSize,
++ OUT gctUINT32 * NumberOfStreams,
++ OUT gctUINT32 * Alignment
++ );
++
++/******************************************************************************\
++********************************* gcoSURF Object ********************************
++\******************************************************************************/
++
++/*----------------------------------------------------------------------------*/
++/*--------------------------------- gcoSURF 3D --------------------------------*/
++
++/* Copy surface. */
++gceSTATUS
++gcoSURF_Copy(
++ IN gcoSURF Surface,
++ IN gcoSURF Source
++ );
++
++/* Clear surface. */
++gceSTATUS
++gcoSURF_Clear(
++ IN gcoSURF Surface,
++ IN gctUINT Flags
++ );
++
++/* Set number of samples for a gcoSURF object. */
++gceSTATUS
++gcoSURF_SetSamples(
++ IN gcoSURF Surface,
++ IN gctUINT Samples
++ );
++
++/* Get the number of samples per pixel. */
++gceSTATUS
++gcoSURF_GetSamples(
++ IN gcoSURF Surface,
++ OUT gctUINT_PTR Samples
++ );
++
++/* Clear rectangular surface. */
++gceSTATUS
++gcoSURF_ClearRect(
++ IN gcoSURF Surface,
++ IN gctINT Left,
++ IN gctINT Top,
++ IN gctINT Right,
++ IN gctINT Bottom,
++ IN gctUINT Flags
++ );
++
++/* TO BE REMOVED */
++ gceSTATUS
++ depr_gcoSURF_Resolve(
++ IN gcoSURF SrcSurface,
++ IN gcoSURF DestSurface,
++ IN gctUINT32 DestAddress,
++ IN gctPOINTER DestBits,
++ IN gctINT DestStride,
++ IN gceSURF_TYPE DestType,
++ IN gceSURF_FORMAT DestFormat,
++ IN gctUINT DestWidth,
++ IN gctUINT DestHeight
++ );
++
++ gceSTATUS
++ depr_gcoSURF_ResolveRect(
++ IN gcoSURF SrcSurface,
++ IN gcoSURF DestSurface,
++ IN gctUINT32 DestAddress,
++ IN gctPOINTER DestBits,
++ IN gctINT DestStride,
++ IN gceSURF_TYPE DestType,
++ IN gceSURF_FORMAT DestFormat,
++ IN gctUINT DestWidth,
++ IN gctUINT DestHeight,
++ IN gcsPOINT_PTR SrcOrigin,
++ IN gcsPOINT_PTR DestOrigin,
++ IN gcsPOINT_PTR RectSize
++ );
++
++/* Resample surface. */
++gceSTATUS
++gcoSURF_Resample(
++ IN gcoSURF SrcSurface,
++ IN gcoSURF DestSurface
++ );
++
++/* Resolve surface. */
++gceSTATUS
++gcoSURF_Resolve(
++ IN gcoSURF SrcSurface,
++ IN gcoSURF DestSurface
++ );
++
++gceSTATUS
++gcoSURF_IsHWResolveable(
++ IN gcoSURF SrcSurface,
++ IN gcoSURF DestSurface,
++ IN gcsPOINT_PTR SrcOrigin,
++ IN gcsPOINT_PTR DestOrigin,
++ IN gcsPOINT_PTR RectSize
++ );
++
++/* Resolve rectangular area of a surface. */
++gceSTATUS
++gcoSURF_ResolveRect(
++ IN gcoSURF SrcSurface,
++ IN gcoSURF DestSurface,
++ IN gcsPOINT_PTR SrcOrigin,
++ IN gcsPOINT_PTR DestOrigin,
++ IN gcsPOINT_PTR RectSize
++ );
++
++/* Set surface resolvability. */
++gceSTATUS
++gcoSURF_SetResolvability(
++ IN gcoSURF Surface,
++ IN gctBOOL Resolvable
++ );
++
++gceSTATUS
++gcoSURF_IsRenderable(
++ IN gcoSURF Surface
++ );
++
++gceSTATUS
++gcoSURF_IsFormatRenderableAsRT(
++ IN gcoSURF Surface
++ );
++
++#if gcdSYNC
++gceSTATUS
++gcoSURF_GetFence(
++ IN gcoSURF Surface
++ );
++gceSTATUS
++gcoSURF_WaitFence(
++ IN gcoSURF Surface
++ );
++
++gceSTATUS
++gcoSTREAM_GetFence(
++ IN gcoSTREAM stream
++ );
++
++gceSTATUS
++gcoSTREAM_WaitFence(
++ IN gcoSTREAM stream
++ );
++
++gceSTATUS
++gcoINDEX_GetFence(
++ IN gcoINDEX index
++ );
++
++gceSTATUS
++gcoINDEX_WaitFence(
++ IN gcoINDEX index
++ );
++#endif
++
++/******************************************************************************\
++******************************** gcoINDEX Object *******************************
++\******************************************************************************/
++
++/* Construct a new gcoINDEX object. */
++gceSTATUS
++gcoINDEX_Construct(
++ IN gcoHAL Hal,
++ OUT gcoINDEX * Index
++ );
++
++/* Destroy a gcoINDEX object. */
++gceSTATUS
++gcoINDEX_Destroy(
++ IN gcoINDEX Index
++ );
++
++/* Lock index in memory. */
++gceSTATUS
++gcoINDEX_Lock(
++ IN gcoINDEX Index,
++ OUT gctUINT32 * Address,
++ OUT gctPOINTER * Memory
++ );
++
++/* Unlock index that was previously locked with gcoINDEX_Lock. */
++gceSTATUS
++gcoINDEX_Unlock(
++ IN gcoINDEX Index
++ );
++
++/* Upload index data into the memory. */
++gceSTATUS
++gcoINDEX_Load(
++ IN gcoINDEX Index,
++ IN gceINDEX_TYPE IndexType,
++ IN gctUINT32 IndexCount,
++ IN gctPOINTER IndexBuffer
++ );
++
++/* Bind an index object to the hardware. */
++gceSTATUS
++gcoINDEX_Bind(
++ IN gcoINDEX Index,
++ IN gceINDEX_TYPE Type
++ );
++
++/* Bind an index object to the hardware. */
++gceSTATUS
++gcoINDEX_BindOffset(
++ IN gcoINDEX Index,
++ IN gceINDEX_TYPE Type,
++ IN gctUINT32 Offset
++ );
++
++/* Free existing index buffer. */
++gceSTATUS
++gcoINDEX_Free(
++ IN gcoINDEX Index
++ );
++
++/* Upload data into an index buffer. */
++gceSTATUS
++gcoINDEX_Upload(
++ IN gcoINDEX Index,
++ IN gctCONST_POINTER Buffer,
++ IN gctSIZE_T Bytes
++ );
++
++/* Upload data into an index buffer starting at an offset. */
++gceSTATUS
++gcoINDEX_UploadOffset(
++ IN gcoINDEX Index,
++ IN gctUINT32 Offset,
++ IN gctCONST_POINTER Buffer,
++ IN gctSIZE_T Bytes
++ );
++
++/*Merge index2 to index1 from 0, index2 must subset of inex1*/
++gceSTATUS
++gcoINDEX_Merge(
++ IN gcoINDEX Index1,
++ IN gcoINDEX Index2
++ );
++
++/*check if index buffer is enough for this draw*/
++gctBOOL
++gcoINDEX_CheckRange(
++ IN gcoINDEX Index,
++ IN gceINDEX_TYPE Type,
++ IN gctINT Count,
++ IN gctUINT32 Indices
++ );
++
++/* Query the index capabilities. */
++gceSTATUS
++gcoINDEX_QueryCaps(
++ OUT gctBOOL * Index8,
++ OUT gctBOOL * Index16,
++ OUT gctBOOL * Index32,
++ OUT gctUINT * MaxIndex
++ );
++
++/* Determine the index range in the current index buffer. */
++gceSTATUS
++gcoINDEX_GetIndexRange(
++ IN gcoINDEX Index,
++ IN gceINDEX_TYPE Type,
++ IN gctUINT32 Offset,
++ IN gctUINT32 Count,
++ OUT gctUINT32 * MinimumIndex,
++ OUT gctUINT32 * MaximumIndex
++ );
++
++/* Dynamic buffer management. */
++gceSTATUS
++gcoINDEX_SetDynamic(
++ IN gcoINDEX Index,
++ IN gctSIZE_T Bytes,
++ IN gctUINT Buffers
++ );
++
++gceSTATUS
++gcoINDEX_UploadDynamic(
++ IN gcoINDEX Index,
++ IN gctCONST_POINTER Data,
++ IN gctSIZE_T Bytes
++ );
++
++/******************************************************************************\
++********************************** gco3D Object *********************************
++\******************************************************************************/
++
++/* Clear flags. */
++typedef enum _gceCLEAR
++{
++ gcvCLEAR_COLOR = 0x1,
++ gcvCLEAR_DEPTH = 0x2,
++ gcvCLEAR_STENCIL = 0x4,
++ gcvCLEAR_HZ = 0x8,
++ gcvCLEAR_HAS_VAA = 0x10,
++}
++gceCLEAR;
++
++/* Blending targets. */
++typedef enum _gceBLEND_UNIT
++{
++ gcvBLEND_SOURCE,
++ gcvBLEND_TARGET,
++}
++gceBLEND_UNIT;
++
++/* Construct a new gco3D object. */
++gceSTATUS
++gco3D_Construct(
++ IN gcoHAL Hal,
++ OUT gco3D * Engine
++ );
++
++/* Destroy an gco3D object. */
++gceSTATUS
++gco3D_Destroy(
++ IN gco3D Engine
++ );
++
++/* Set 3D API type. */
++gceSTATUS
++gco3D_SetAPI(
++ IN gco3D Engine,
++ IN gceAPI ApiType
++ );
++
++/* Set render target. */
++gceSTATUS
++gco3D_SetTarget(
++ IN gco3D Engine,
++ IN gcoSURF Surface
++ );
++
++/* Unset render target. */
++gceSTATUS
++gco3D_UnsetTarget(
++ IN gco3D Engine,
++ IN gcoSURF Surface
++ );
++
++/* Set depth buffer. */
++gceSTATUS
++gco3D_SetDepth(
++ IN gco3D Engine,
++ IN gcoSURF Surface
++ );
++
++/* Unset depth buffer. */
++gceSTATUS
++gco3D_UnsetDepth(
++ IN gco3D Engine,
++ IN gcoSURF Surface
++ );
++
++/* Set viewport. */
++gceSTATUS
++gco3D_SetViewport(
++ IN gco3D Engine,
++ IN gctINT32 Left,
++ IN gctINT32 Top,
++ IN gctINT32 Right,
++ IN gctINT32 Bottom
++ );
++
++/* Set scissors. */
++gceSTATUS
++gco3D_SetScissors(
++ IN gco3D Engine,
++ IN gctINT32 Left,
++ IN gctINT32 Top,
++ IN gctINT32 Right,
++ IN gctINT32 Bottom
++ );
++
++/* Set clear color. */
++gceSTATUS
++gco3D_SetClearColor(
++ IN gco3D Engine,
++ IN gctUINT8 Red,
++ IN gctUINT8 Green,
++ IN gctUINT8 Blue,
++ IN gctUINT8 Alpha
++ );
++
++/* Set fixed point clear color. */
++gceSTATUS
++gco3D_SetClearColorX(
++ IN gco3D Engine,
++ IN gctFIXED_POINT Red,
++ IN gctFIXED_POINT Green,
++ IN gctFIXED_POINT Blue,
++ IN gctFIXED_POINT Alpha
++ );
++
++/* Set floating point clear color. */
++gceSTATUS
++gco3D_SetClearColorF(
++ IN gco3D Engine,
++ IN gctFLOAT Red,
++ IN gctFLOAT Green,
++ IN gctFLOAT Blue,
++ IN gctFLOAT Alpha
++ );
++
++/* Set fixed point clear depth. */
++gceSTATUS
++gco3D_SetClearDepthX(
++ IN gco3D Engine,
++ IN gctFIXED_POINT Depth
++ );
++
++/* Set floating point clear depth. */
++gceSTATUS
++gco3D_SetClearDepthF(
++ IN gco3D Engine,
++ IN gctFLOAT Depth
++ );
++
++/* Set clear stencil. */
++gceSTATUS
++gco3D_SetClearStencil(
++ IN gco3D Engine,
++ IN gctUINT32 Stencil
++ );
++
++/* Clear a Rect sub-surface. */
++gceSTATUS
++gco3D_ClearRect(
++ IN gco3D Engine,
++ IN gctUINT32 Address,
++ IN gctPOINTER Memory,
++ IN gctUINT32 Stride,
++ IN gceSURF_FORMAT Format,
++ IN gctINT32 Left,
++ IN gctINT32 Top,
++ IN gctINT32 Right,
++ IN gctINT32 Bottom,
++ IN gctUINT32 Width,
++ IN gctUINT32 Height,
++ IN gctUINT32 Flags
++ );
++
++/* Clear surface. */
++gceSTATUS
++gco3D_Clear(
++ IN gco3D Engine,
++ IN gctUINT32 Address,
++ IN gctUINT32 Stride,
++ IN gceSURF_FORMAT Format,
++ IN gctUINT32 Width,
++ IN gctUINT32 Height,
++ IN gctUINT32 Flags
++ );
++
++
++/* Clear tile status. */
++gceSTATUS
++gco3D_ClearTileStatus(
++ IN gco3D Engine,
++ IN gcsSURF_INFO_PTR Surface,
++ IN gctUINT32 TileStatusAddress,
++ IN gctUINT32 Flags
++ );
++
++/* Set shading mode. */
++gceSTATUS
++gco3D_SetShading(
++ IN gco3D Engine,
++ IN gceSHADING Shading
++ );
++
++/* Set blending mode. */
++gceSTATUS
++gco3D_EnableBlending(
++ IN gco3D Engine,
++ IN gctBOOL Enable
++ );
++
++/* Set blending function. */
++gceSTATUS
++gco3D_SetBlendFunction(
++ IN gco3D Engine,
++ IN gceBLEND_UNIT Unit,
++ IN gceBLEND_FUNCTION FunctionRGB,
++ IN gceBLEND_FUNCTION FunctionAlpha
++ );
++
++/* Set blending mode. */
++gceSTATUS
++gco3D_SetBlendMode(
++ IN gco3D Engine,
++ IN gceBLEND_MODE ModeRGB,
++ IN gceBLEND_MODE ModeAlpha
++ );
++
++/* Set blending color. */
++gceSTATUS
++gco3D_SetBlendColor(
++ IN gco3D Engine,
++ IN gctUINT Red,
++ IN gctUINT Green,
++ IN gctUINT Blue,
++ IN gctUINT Alpha
++ );
++
++/* Set fixed point blending color. */
++gceSTATUS
++gco3D_SetBlendColorX(
++ IN gco3D Engine,
++ IN gctFIXED_POINT Red,
++ IN gctFIXED_POINT Green,
++ IN gctFIXED_POINT Blue,
++ IN gctFIXED_POINT Alpha
++ );
++
++/* Set floating point blending color. */
++gceSTATUS
++gco3D_SetBlendColorF(
++ IN gco3D Engine,
++ IN gctFLOAT Red,
++ IN gctFLOAT Green,
++ IN gctFLOAT Blue,
++ IN gctFLOAT Alpha
++ );
++
++/* Set culling mode. */
++gceSTATUS
++gco3D_SetCulling(
++ IN gco3D Engine,
++ IN gceCULL Mode
++ );
++
++/* Enable point size */
++gceSTATUS
++gco3D_SetPointSizeEnable(
++ IN gco3D Engine,
++ IN gctBOOL Enable
++ );
++
++/* Set point sprite */
++gceSTATUS
++gco3D_SetPointSprite(
++ IN gco3D Engine,
++ IN gctBOOL Enable
++ );
++
++/* Set fill mode. */
++gceSTATUS
++gco3D_SetFill(
++ IN gco3D Engine,
++ IN gceFILL Mode
++ );
++
++/* Set depth compare mode. */
++gceSTATUS
++gco3D_SetDepthCompare(
++ IN gco3D Engine,
++ IN gceCOMPARE Compare
++ );
++
++/* Enable depth writing. */
++gceSTATUS
++gco3D_EnableDepthWrite(
++ IN gco3D Engine,
++ IN gctBOOL Enable
++ );
++
++/* Set depth mode. */
++gceSTATUS
++gco3D_SetDepthMode(
++ IN gco3D Engine,
++ IN gceDEPTH_MODE Mode
++ );
++
++/* Set depth range. */
++gceSTATUS
++gco3D_SetDepthRangeX(
++ IN gco3D Engine,
++ IN gceDEPTH_MODE Mode,
++ IN gctFIXED_POINT Near,
++ IN gctFIXED_POINT Far
++ );
++
++/* Set depth range. */
++gceSTATUS
++gco3D_SetDepthRangeF(
++ IN gco3D Engine,
++ IN gceDEPTH_MODE Mode,
++ IN gctFLOAT Near,
++ IN gctFLOAT Far
++ );
++
++/* Set last pixel enable */
++gceSTATUS
++gco3D_SetLastPixelEnable(
++ IN gco3D Engine,
++ IN gctBOOL Enable
++ );
++
++/* Set depth Bias and Scale */
++gceSTATUS
++gco3D_SetDepthScaleBiasX(
++ IN gco3D Engine,
++ IN gctFIXED_POINT DepthScale,
++ IN gctFIXED_POINT DepthBias
++ );
++
++gceSTATUS
++gco3D_SetDepthScaleBiasF(
++ IN gco3D Engine,
++ IN gctFLOAT DepthScale,
++ IN gctFLOAT DepthBias
++ );
++
++/* Set depth near and far clipping plane. */
++gceSTATUS
++gco3D_SetDepthPlaneF(
++ IN gco3D Engine,
++ IN gctFLOAT Near,
++ IN gctFLOAT Far
++ );
++
++/* Enable or disable dithering. */
++gceSTATUS
++gco3D_EnableDither(
++ IN gco3D Engine,
++ IN gctBOOL Enable
++ );
++
++/* Set color write enable bits. */
++gceSTATUS
++gco3D_SetColorWrite(
++ IN gco3D Engine,
++ IN gctUINT8 Enable
++ );
++
++/* Enable or disable early depth. */
++gceSTATUS
++gco3D_SetEarlyDepth(
++ IN gco3D Engine,
++ IN gctBOOL Enable
++ );
++
++/* Enable or disable all early depth operations. */
++gceSTATUS
++gco3D_SetAllEarlyDepthModes(
++ IN gco3D Engine,
++ IN gctBOOL Disable
++ );
++
++/* Switch dynamic early mode */
++gceSTATUS
++gco3D_SwitchDynamicEarlyDepthMode(
++ IN gco3D Engine
++ );
++
++/* Set dynamic early mode */
++gceSTATUS
++gco3D_DisableDynamicEarlyDepthMode(
++ IN gco3D Engine,
++ IN gctBOOL Disable
++ );
++
++/* Enable or disable depth-only mode. */
++gceSTATUS
++gco3D_SetDepthOnly(
++ IN gco3D Engine,
++ IN gctBOOL Enable
++ );
++
++typedef struct _gcsSTENCIL_INFO * gcsSTENCIL_INFO_PTR;
++typedef struct _gcsSTENCIL_INFO
++{
++ gceSTENCIL_MODE mode;
++
++ gctUINT8 maskFront;
++ gctUINT8 maskBack;
++ gctUINT8 writeMaskFront;
++ gctUINT8 writeMaskBack;
++
++ gctUINT8 referenceFront;
++
++ gceCOMPARE compareFront;
++ gceSTENCIL_OPERATION passFront;
++ gceSTENCIL_OPERATION failFront;
++ gceSTENCIL_OPERATION depthFailFront;
++
++ gctUINT8 referenceBack;
++ gceCOMPARE compareBack;
++ gceSTENCIL_OPERATION passBack;
++ gceSTENCIL_OPERATION failBack;
++ gceSTENCIL_OPERATION depthFailBack;
++}
++gcsSTENCIL_INFO;
++
++/* Set stencil mode. */
++gceSTATUS
++gco3D_SetStencilMode(
++ IN gco3D Engine,
++ IN gceSTENCIL_MODE Mode
++ );
++
++/* Set stencil mask. */
++gceSTATUS
++gco3D_SetStencilMask(
++ IN gco3D Engine,
++ IN gctUINT8 Mask
++ );
++
++/* Set stencil back mask. */
++gceSTATUS
++gco3D_SetStencilMaskBack(
++ IN gco3D Engine,
++ IN gctUINT8 Mask
++ );
++
++/* Set stencil write mask. */
++gceSTATUS
++gco3D_SetStencilWriteMask(
++ IN gco3D Engine,
++ IN gctUINT8 Mask
++ );
++
++/* Set stencil back write mask. */
++gceSTATUS
++gco3D_SetStencilWriteMaskBack(
++ IN gco3D Engine,
++ IN gctUINT8 Mask
++ );
++
++/* Set stencil reference. */
++gceSTATUS
++gco3D_SetStencilReference(
++ IN gco3D Engine,
++ IN gctUINT8 Reference,
++ IN gctBOOL Front
++ );
++
++/* Set stencil compare. */
++gceSTATUS
++gco3D_SetStencilCompare(
++ IN gco3D Engine,
++ IN gceSTENCIL_WHERE Where,
++ IN gceCOMPARE Compare
++ );
++
++/* Set stencil operation on pass. */
++gceSTATUS
++gco3D_SetStencilPass(
++ IN gco3D Engine,
++ IN gceSTENCIL_WHERE Where,
++ IN gceSTENCIL_OPERATION Operation
++ );
++
++/* Set stencil operation on fail. */
++gceSTATUS
++gco3D_SetStencilFail(
++ IN gco3D Engine,
++ IN gceSTENCIL_WHERE Where,
++ IN gceSTENCIL_OPERATION Operation
++ );
++
++/* Set stencil operation on depth fail. */
++gceSTATUS
++gco3D_SetStencilDepthFail(
++ IN gco3D Engine,
++ IN gceSTENCIL_WHERE Where,
++ IN gceSTENCIL_OPERATION Operation
++ );
++
++/* Set all stencil states in one blow. */
++gceSTATUS
++gco3D_SetStencilAll(
++ IN gco3D Engine,
++ IN gcsSTENCIL_INFO_PTR Info
++ );
++
++typedef struct _gcsALPHA_INFO * gcsALPHA_INFO_PTR;
++typedef struct _gcsALPHA_INFO
++{
++ /* Alpha test states. */
++ gctBOOL test;
++ gceCOMPARE compare;
++ gctUINT8 reference;
++ gctFLOAT floatReference;
++
++ /* Alpha blending states. */
++ gctBOOL blend;
++
++ gceBLEND_FUNCTION srcFuncColor;
++ gceBLEND_FUNCTION srcFuncAlpha;
++ gceBLEND_FUNCTION trgFuncColor;
++ gceBLEND_FUNCTION trgFuncAlpha;
++
++ gceBLEND_MODE modeColor;
++ gceBLEND_MODE modeAlpha;
++
++ gctUINT32 color;
++}
++gcsALPHA_INFO;
++
++/* Enable or disable alpha test. */
++gceSTATUS
++gco3D_SetAlphaTest(
++ IN gco3D Engine,
++ IN gctBOOL Enable
++ );
++
++/* Set alpha test compare. */
++gceSTATUS
++gco3D_SetAlphaCompare(
++ IN gco3D Engine,
++ IN gceCOMPARE Compare
++ );
++
++/* Set alpha test reference in unsigned integer. */
++gceSTATUS
++gco3D_SetAlphaReference(
++ IN gco3D Engine,
++ IN gctUINT8 Reference,
++ IN gctFLOAT FloatReference
++ );
++
++/* Set alpha test reference in fixed point. */
++gceSTATUS
++gco3D_SetAlphaReferenceX(
++ IN gco3D Engine,
++ IN gctFIXED_POINT Reference
++ );
++
++/* Set alpha test reference in floating point. */
++gceSTATUS
++gco3D_SetAlphaReferenceF(
++ IN gco3D Engine,
++ IN gctFLOAT Reference
++ );
++
++/* Enable/Disable anti-alias line. */
++gceSTATUS
++gco3D_SetAntiAliasLine(
++ IN gco3D Engine,
++ IN gctBOOL Enable
++ );
++
++/* Set texture slot for anti-alias line. */
++gceSTATUS
++gco3D_SetAALineTexSlot(
++ IN gco3D Engine,
++ IN gctUINT TexSlot
++ );
++
++/* Set anti-alias line width scale. */
++gceSTATUS
++gco3D_SetAALineWidth(
++ IN gco3D Engine,
++ IN gctFLOAT Width
++ );
++
++/* Draw a number of primitives. */
++gceSTATUS
++gco3D_DrawPrimitives(
++ IN gco3D Engine,
++ IN gcePRIMITIVE Type,
++ IN gctINT StartVertex,
++ IN gctSIZE_T PrimitiveCount
++ );
++
++gceSTATUS
++gco3D_DrawPrimitivesCount(
++ IN gco3D Engine,
++ IN gcePRIMITIVE Type,
++ IN gctINT* StartVertex,
++ IN gctSIZE_T* VertexCount,
++ IN gctSIZE_T PrimitiveCount
++ );
++
++
++/* Draw a number of primitives using offsets. */
++gceSTATUS
++gco3D_DrawPrimitivesOffset(
++ IN gco3D Engine,
++ IN gcePRIMITIVE Type,
++ IN gctINT32 StartOffset,
++ IN gctSIZE_T PrimitiveCount
++ );
++
++/* Draw a number of indexed primitives. */
++gceSTATUS
++gco3D_DrawIndexedPrimitives(
++ IN gco3D Engine,
++ IN gcePRIMITIVE Type,
++ IN gctINT BaseVertex,
++ IN gctINT StartIndex,
++ IN gctSIZE_T PrimitiveCount
++ );
++
++/* Draw a number of indexed primitives using offsets. */
++gceSTATUS
++gco3D_DrawIndexedPrimitivesOffset(
++ IN gco3D Engine,
++ IN gcePRIMITIVE Type,
++ IN gctINT32 BaseOffset,
++ IN gctINT32 StartOffset,
++ IN gctSIZE_T PrimitiveCount
++ );
++
++/* Enable or disable anti-aliasing. */
++gceSTATUS
++gco3D_SetAntiAlias(
++ IN gco3D Engine,
++ IN gctBOOL Enable
++ );
++
++/* Write data into the command buffer. */
++gceSTATUS
++gco3D_WriteBuffer(
++ IN gco3D Engine,
++ IN gctCONST_POINTER Data,
++ IN gctSIZE_T Bytes,
++ IN gctBOOL Aligned
++ );
++
++/* Send sempahore and stall until sempahore is signalled. */
++gceSTATUS
++gco3D_Semaphore(
++ IN gco3D Engine,
++ IN gceWHERE From,
++ IN gceWHERE To,
++ IN gceHOW How);
++
++/* Set the subpixels center. */
++gceSTATUS
++gco3D_SetCentroids(
++ IN gco3D Engine,
++ IN gctUINT32 Index,
++ IN gctPOINTER Centroids
++ );
++
++gceSTATUS
++gco3D_SetLogicOp(
++ IN gco3D Engine,
++ IN gctUINT8 Rop
++ );
++
++/* OCL thread walker information. */
++typedef struct _gcsTHREAD_WALKER_INFO * gcsTHREAD_WALKER_INFO_PTR;
++typedef struct _gcsTHREAD_WALKER_INFO
++{
++ gctUINT32 dimensions;
++ gctUINT32 traverseOrder;
++ gctUINT32 enableSwathX;
++ gctUINT32 enableSwathY;
++ gctUINT32 enableSwathZ;
++ gctUINT32 swathSizeX;
++ gctUINT32 swathSizeY;
++ gctUINT32 swathSizeZ;
++ gctUINT32 valueOrder;
++
++ gctUINT32 globalSizeX;
++ gctUINT32 globalOffsetX;
++ gctUINT32 globalSizeY;
++ gctUINT32 globalOffsetY;
++ gctUINT32 globalSizeZ;
++ gctUINT32 globalOffsetZ;
++
++ gctUINT32 workGroupSizeX;
++ gctUINT32 workGroupCountX;
++ gctUINT32 workGroupSizeY;
++ gctUINT32 workGroupCountY;
++ gctUINT32 workGroupSizeZ;
++ gctUINT32 workGroupCountZ;
++
++ gctUINT32 threadAllocation;
++}
++gcsTHREAD_WALKER_INFO;
++
++/* Start OCL thread walker. */
++gceSTATUS
++gco3D_InvokeThreadWalker(
++ IN gco3D Engine,
++ IN gcsTHREAD_WALKER_INFO_PTR Info
++ );
++
++/* Set w clip and w plane limit value. */
++gceSTATUS
++gco3D_SetWClipEnable(
++ IN gco3D Engine,
++ IN gctBOOL Enable
++ );
++
++gceSTATUS
++gco3D_GetWClipEnable(
++ IN gco3D Engine,
++ OUT gctBOOL * Enable
++ );
++
++gceSTATUS
++gco3D_SetWPlaneLimitF(
++ IN gco3D Engine,
++ IN gctFLOAT Value
++ );
++
++gceSTATUS
++gco3D_SetWPlaneLimitX(
++ IN gco3D Engine,
++ IN gctFIXED_POINT Value
++ );
++
++
++gceSTATUS
++gco3D_SetWPlaneLimit(
++ IN gco3D Engine,
++ IN gctFLOAT Value
++ );
++
++/*----------------------------------------------------------------------------*/
++/*-------------------------- gco3D Fragment Processor ------------------------*/
++
++/* Set the fragment processor configuration. */
++gceSTATUS
++gco3D_SetFragmentConfiguration(
++ IN gco3D Engine,
++ IN gctBOOL ColorFromStream,
++ IN gctBOOL EnableFog,
++ IN gctBOOL EnableSmoothPoint,
++ IN gctUINT32 ClipPlanes
++ );
++
++/* Enable/disable texture stage operation. */
++gceSTATUS
++gco3D_EnableTextureStage(
++ IN gco3D Engine,
++ IN gctINT Stage,
++ IN gctBOOL Enable
++ );
++
++/* Program the channel enable masks for the color texture function. */
++gceSTATUS
++gco3D_SetTextureColorMask(
++ IN gco3D Engine,
++ IN gctINT Stage,
++ IN gctBOOL ColorEnabled,
++ IN gctBOOL AlphaEnabled
++ );
++
++/* Program the channel enable masks for the alpha texture function. */
++gceSTATUS
++gco3D_SetTextureAlphaMask(
++ IN gco3D Engine,
++ IN gctINT Stage,
++ IN gctBOOL ColorEnabled,
++ IN gctBOOL AlphaEnabled
++ );
++
++/* Program the constant fragment color. */
++gceSTATUS
++gco3D_SetFragmentColorX(
++ IN gco3D Engine,
++ IN gctFIXED_POINT Red,
++ IN gctFIXED_POINT Green,
++ IN gctFIXED_POINT Blue,
++ IN gctFIXED_POINT Alpha
++ );
++
++gceSTATUS
++gco3D_SetFragmentColorF(
++ IN gco3D Engine,
++ IN gctFLOAT Red,
++ IN gctFLOAT Green,
++ IN gctFLOAT Blue,
++ IN gctFLOAT Alpha
++ );
++
++/* Program the constant fog color. */
++gceSTATUS
++gco3D_SetFogColorX(
++ IN gco3D Engine,
++ IN gctFIXED_POINT Red,
++ IN gctFIXED_POINT Green,
++ IN gctFIXED_POINT Blue,
++ IN gctFIXED_POINT Alpha
++ );
++
++gceSTATUS
++gco3D_SetFogColorF(
++ IN gco3D Engine,
++ IN gctFLOAT Red,
++ IN gctFLOAT Green,
++ IN gctFLOAT Blue,
++ IN gctFLOAT Alpha
++ );
++
++/* Program the constant texture color. */
++gceSTATUS
++gco3D_SetTetxureColorX(
++ IN gco3D Engine,
++ IN gctINT Stage,
++ IN gctFIXED_POINT Red,
++ IN gctFIXED_POINT Green,
++ IN gctFIXED_POINT Blue,
++ IN gctFIXED_POINT Alpha
++ );
++
++gceSTATUS
++gco3D_SetTetxureColorF(
++ IN gco3D Engine,
++ IN gctINT Stage,
++ IN gctFLOAT Red,
++ IN gctFLOAT Green,
++ IN gctFLOAT Blue,
++ IN gctFLOAT Alpha
++ );
++
++/* Configure color texture function. */
++gceSTATUS
++gco3D_SetColorTextureFunction(
++ IN gco3D Engine,
++ IN gctINT Stage,
++ IN gceTEXTURE_FUNCTION Function,
++ IN gceTEXTURE_SOURCE Source0,
++ IN gceTEXTURE_CHANNEL Channel0,
++ IN gceTEXTURE_SOURCE Source1,
++ IN gceTEXTURE_CHANNEL Channel1,
++ IN gceTEXTURE_SOURCE Source2,
++ IN gceTEXTURE_CHANNEL Channel2,
++ IN gctINT Scale
++ );
++
++/* Configure alpha texture function. */
++gceSTATUS
++gco3D_SetAlphaTextureFunction(
++ IN gco3D Engine,
++ IN gctINT Stage,
++ IN gceTEXTURE_FUNCTION Function,
++ IN gceTEXTURE_SOURCE Source0,
++ IN gceTEXTURE_CHANNEL Channel0,
++ IN gceTEXTURE_SOURCE Source1,
++ IN gceTEXTURE_CHANNEL Channel1,
++ IN gceTEXTURE_SOURCE Source2,
++ IN gceTEXTURE_CHANNEL Channel2,
++ IN gctINT Scale
++ );
++
++/* Invoke OCL thread walker. */
++gceSTATUS
++gcoHARDWARE_InvokeThreadWalker(
++ IN gcsTHREAD_WALKER_INFO_PTR Info
++ );
++
++/******************************************************************************\
++******************************* gcoTEXTURE Object *******************************
++\******************************************************************************/
++
++/* Cube faces. */
++typedef enum _gceTEXTURE_FACE
++{
++ gcvFACE_NONE,
++ gcvFACE_POSITIVE_X,
++ gcvFACE_NEGATIVE_X,
++ gcvFACE_POSITIVE_Y,
++ gcvFACE_NEGATIVE_Y,
++ gcvFACE_POSITIVE_Z,
++ gcvFACE_NEGATIVE_Z,
++}
++gceTEXTURE_FACE;
++
++#if gcdFORCE_MIPMAP
++typedef enum
++{
++ gcvForceMipDisabled = 0,
++ gcvForceMipEnable = 1,
++ gcvForceMipGenerated = 2,
++ gcvForceMipNever = 3,
++}gceFORCE_MIPMAP;
++#endif
++
++typedef struct _gcsTEXTURE
++{
++ /* Addressing modes. */
++ gceTEXTURE_ADDRESSING s;
++ gceTEXTURE_ADDRESSING t;
++ gceTEXTURE_ADDRESSING r;
++
++ /* Border color. */
++ gctUINT8 border[4];
++
++ /* Filters. */
++ gceTEXTURE_FILTER minFilter;
++ gceTEXTURE_FILTER magFilter;
++ gceTEXTURE_FILTER mipFilter;
++ gctUINT anisoFilter;
++ gctBOOL forceTopLevel;
++ gctBOOL autoMipmap;
++#if gcdFORCE_MIPMAP
++ gceFORCE_MIPMAP forceMipmap;
++#endif
++ /* Level of detail. */
++ gctFIXED_POINT lodBias;
++ gctFIXED_POINT lodMin;
++ gctFIXED_POINT lodMax;
++}
++gcsTEXTURE, * gcsTEXTURE_PTR;
++
++/* Construct a new gcoTEXTURE object. */
++gceSTATUS
++gcoTEXTURE_Construct(
++ IN gcoHAL Hal,
++ OUT gcoTEXTURE * Texture
++ );
++
++/* Construct a new sized gcoTEXTURE object. */
++gceSTATUS
++gcoTEXTURE_ConstructSized(
++ IN gcoHAL Hal,
++ IN gceSURF_FORMAT Format,
++ IN gctUINT Width,
++ IN gctUINT Height,
++ IN gctUINT Depth,
++ IN gctUINT Faces,
++ IN gctUINT MipMapCount,
++ IN gcePOOL Pool,
++ OUT gcoTEXTURE * Texture
++ );
++
++/* Destroy an gcoTEXTURE object. */
++gceSTATUS
++gcoTEXTURE_Destroy(
++ IN gcoTEXTURE Texture
++ );
++#if gcdFORCE_MIPMAP
++gceSTATUS
++gcoTEXTURE_DestroyForceMipmap(
++ IN gcoTEXTURE Texture
++ );
++
++gceSTATUS
++gcoTEXTURE_GetMipLevels(
++ IN gcoTEXTURE Texture,
++ OUT gctINT * levels
++ );
++#endif
++/* Replace a mipmap in gcoTEXTURE object. */
++gceSTATUS
++gcoTEXTURE_ReplaceMipMap(
++ IN gcoTEXTURE Texture,
++ IN gctUINT Level,
++ IN gctUINT Width,
++ IN gctUINT Height,
++ IN gctINT imageFormat,
++ IN gceSURF_FORMAT Format,
++ IN gctUINT Depth,
++ IN gctUINT Faces,
++ IN gcePOOL Pool
++ );
++
++/* Upload data to an gcoTEXTURE object. */
++gceSTATUS
++gcoTEXTURE_Upload(
++ IN gcoTEXTURE Texture,
++ IN gceTEXTURE_FACE Face,
++ IN gctUINT Width,
++ IN gctUINT Height,
++ IN gctUINT Slice,
++ IN gctCONST_POINTER Memory,
++ IN gctINT Stride,
++ IN gceSURF_FORMAT Format
++ );
++
++/* Upload data to an gcoTEXTURE object. */
++gceSTATUS
++gcoTEXTURE_UploadSub(
++ IN gcoTEXTURE Texture,
++ IN gctUINT MipMap,
++ IN gceTEXTURE_FACE Face,
++ IN gctUINT X,
++ IN gctUINT Y,
++ IN gctUINT Width,
++ IN gctUINT Height,
++ IN gctUINT Slice,
++ IN gctCONST_POINTER Memory,
++ IN gctINT Stride,
++ IN gceSURF_FORMAT Format
++ );
++
++/* Upload YUV data to an gcoTEXTURE object. */
++gceSTATUS
++gcoTEXTURE_UploadYUV(
++ IN gcoTEXTURE Texture,
++ IN gceTEXTURE_FACE Face,
++ IN gctUINT Width,
++ IN gctUINT Height,
++ IN gctUINT Slice,
++ IN gctPOINTER Memory[3],
++ IN gctINT Stride[3],
++ IN gceSURF_FORMAT Format
++ );
++
++/* Upload compressed data to an gcoTEXTURE object. */
++gceSTATUS
++gcoTEXTURE_UploadCompressed(
++ IN gcoTEXTURE Texture,
++ IN gceTEXTURE_FACE Face,
++ IN gctUINT Width,
++ IN gctUINT Height,
++ IN gctUINT Slice,
++ IN gctCONST_POINTER Memory,
++ IN gctSIZE_T Bytes
++ );
++
++/* Upload compressed sub data to an gcoTEXTURE object. */
++gceSTATUS
++gcoTEXTURE_UploadCompressedSub(
++ IN gcoTEXTURE Texture,
++ IN gctUINT MipMap,
++ IN gceTEXTURE_FACE Face,
++ IN gctUINT XOffset,
++ IN gctUINT YOffset,
++ IN gctUINT Width,
++ IN gctUINT Height,
++ IN gctUINT Slice,
++ IN gctCONST_POINTER Memory,
++ IN gctSIZE_T Size
++ );
++
++/* GetImageFormat of texture. */
++gceSTATUS
++gcoTEXTURE_GetImageFormat(
++ IN gcoTEXTURE Texture,
++ IN gctUINT MipMap,
++ OUT gctINT * ImageFormat
++ );
++
++/* Get gcoSURF object for a mipmap level. */
++gceSTATUS
++gcoTEXTURE_GetMipMap(
++ IN gcoTEXTURE Texture,
++ IN gctUINT MipMap,
++ OUT gcoSURF * Surface
++ );
++
++/* Get gcoSURF object for a mipmap level and face offset. */
++gceSTATUS
++gcoTEXTURE_GetMipMapFace(
++ IN gcoTEXTURE Texture,
++ IN gctUINT MipMap,
++ IN gceTEXTURE_FACE Face,
++ OUT gcoSURF * Surface,
++ OUT gctUINT32_PTR Offset
++ );
++
++gceSTATUS
++gcoTEXTURE_AddMipMap(
++ IN gcoTEXTURE Texture,
++ IN gctINT Level,
++ IN gctINT imageFormat,
++ IN gceSURF_FORMAT Format,
++ IN gctUINT Width,
++ IN gctUINT Height,
++ IN gctUINT Depth,
++ IN gctUINT Faces,
++ IN gcePOOL Pool,
++ OUT gcoSURF * Surface
++ );
++
++gceSTATUS
++gcoTEXTURE_AddMipMapFromClient(
++ IN gcoTEXTURE Texture,
++ IN gctINT Level,
++ IN gcoSURF Surface
++ );
++
++gceSTATUS
++gcoTEXTURE_AddMipMapFromSurface(
++ IN gcoTEXTURE Texture,
++ IN gctINT Level,
++ IN gcoSURF Surface
++ );
++
++gceSTATUS
++gcoTEXTURE_SetMaxLevel(
++ IN gcoTEXTURE Texture,
++ IN gctUINT Levels
++ );
++
++gceSTATUS
++gcoTEXTURE_SetEndianHint(
++ IN gcoTEXTURE Texture,
++ IN gceENDIAN_HINT EndianHint
++ );
++
++gceSTATUS
++gcoTEXTURE_Disable(
++ IN gcoHAL Hal,
++ IN gctINT Sampler
++ );
++
++gceSTATUS
++gcoTEXTURE_Flush(
++ IN gcoTEXTURE Texture
++ );
++
++gceSTATUS
++gcoTEXTURE_QueryCaps(
++ IN gcoHAL Hal,
++ OUT gctUINT * MaxWidth,
++ OUT gctUINT * MaxHeight,
++ OUT gctUINT * MaxDepth,
++ OUT gctBOOL * Cubic,
++ OUT gctBOOL * NonPowerOfTwo,
++ OUT gctUINT * VertexSamplers,
++ OUT gctUINT * PixelSamplers
++ );
++
++gceSTATUS
++gcoTEXTURE_GetTiling(
++ IN gcoTEXTURE Texture,
++ IN gctINT preferLevel,
++ OUT gceTILING * Tiling
++ );
++
++gceSTATUS
++gcoTEXTURE_GetClosestFormat(
++ IN gcoHAL Hal,
++ IN gceSURF_FORMAT InFormat,
++ OUT gceSURF_FORMAT* OutFormat
++ );
++
++gceSTATUS
++gcoTEXTURE_RenderIntoMipMap(
++ IN gcoTEXTURE Texture,
++ IN gctINT Level
++ );
++
++gceSTATUS
++gcoTEXTURE_IsRenderable(
++ IN gcoTEXTURE Texture,
++ IN gctUINT Level
++ );
++
++gceSTATUS
++gcoTEXTURE_IsRenderableEx(
++ IN gcoTEXTURE Texture,
++ IN gctUINT Level
++ );
++
++gceSTATUS
++gcoTEXTURE_IsComplete(
++ IN gcoTEXTURE Texture,
++ IN gctINT MaxLevel
++ );
++
++gceSTATUS
++gcoTEXTURE_BindTexture(
++ IN gcoTEXTURE Texture,
++ IN gctINT Target,
++ IN gctINT Sampler,
++ IN gcsTEXTURE_PTR Info
++ );
++
++/******************************************************************************\
++******************************* gcoSTREAM Object ******************************
++\******************************************************************************/
++
++typedef enum _gceVERTEX_FORMAT
++{
++ gcvVERTEX_BYTE,
++ gcvVERTEX_UNSIGNED_BYTE,
++ gcvVERTEX_SHORT,
++ gcvVERTEX_UNSIGNED_SHORT,
++ gcvVERTEX_INT,
++ gcvVERTEX_UNSIGNED_INT,
++ gcvVERTEX_FIXED,
++ gcvVERTEX_HALF,
++ gcvVERTEX_FLOAT,
++ gcvVERTEX_UNSIGNED_INT_10_10_10_2,
++ gcvVERTEX_INT_10_10_10_2,
++}
++gceVERTEX_FORMAT;
++
++gceSTATUS
++gcoSTREAM_Construct(
++ IN gcoHAL Hal,
++ OUT gcoSTREAM * Stream
++ );
++
++gceSTATUS
++gcoSTREAM_Destroy(
++ IN gcoSTREAM Stream
++ );
++
++gceSTATUS
++gcoSTREAM_Upload(
++ IN gcoSTREAM Stream,
++ IN gctCONST_POINTER Buffer,
++ IN gctUINT32 Offset,
++ IN gctSIZE_T Bytes,
++ IN gctBOOL Dynamic
++ );
++
++gceSTATUS
++gcoSTREAM_SetStride(
++ IN gcoSTREAM Stream,
++ IN gctUINT32 Stride
++ );
++
++gceSTATUS
++gcoSTREAM_Lock(
++ IN gcoSTREAM Stream,
++ OUT gctPOINTER * Logical,
++ OUT gctUINT32 * Physical
++ );
++
++gceSTATUS
++gcoSTREAM_Unlock(
++ IN gcoSTREAM Stream
++ );
++
++gceSTATUS
++gcoSTREAM_Reserve(
++ IN gcoSTREAM Stream,
++ IN gctSIZE_T Bytes
++ );
++
++gceSTATUS
++gcoSTREAM_Flush(
++ IN gcoSTREAM Stream
++ );
++
++/* Dynamic buffer API. */
++gceSTATUS
++gcoSTREAM_SetDynamic(
++ IN gcoSTREAM Stream,
++ IN gctSIZE_T Bytes,
++ IN gctUINT Buffers
++ );
++
++typedef struct _gcsSTREAM_INFO
++{
++ gctUINT index;
++ gceVERTEX_FORMAT format;
++ gctBOOL normalized;
++ gctUINT components;
++ gctSIZE_T size;
++ gctCONST_POINTER data;
++ gctUINT stride;
++}
++gcsSTREAM_INFO, * gcsSTREAM_INFO_PTR;
++
++gceSTATUS
++gcoSTREAM_UploadDynamic(
++ IN gcoSTREAM Stream,
++ IN gctUINT VertexCount,
++ IN gctUINT InfoCount,
++ IN gcsSTREAM_INFO_PTR Info,
++ IN gcoVERTEX Vertex
++ );
++
++gceSTATUS
++gcoSTREAM_CPUCacheOperation(
++ IN gcoSTREAM Stream,
++ IN gceCACHEOPERATION Operation
++ );
++
++/******************************************************************************\
++******************************** gcoVERTEX Object ******************************
++\******************************************************************************/
++
++typedef struct _gcsVERTEX_ATTRIBUTES
++{
++ gceVERTEX_FORMAT format;
++ gctBOOL normalized;
++ gctUINT32 components;
++ gctSIZE_T size;
++ gctUINT32 stream;
++ gctUINT32 offset;
++ gctUINT32 stride;
++}
++gcsVERTEX_ATTRIBUTES;
++
++gceSTATUS
++gcoVERTEX_Construct(
++ IN gcoHAL Hal,
++ OUT gcoVERTEX * Vertex
++ );
++
++gceSTATUS
++gcoVERTEX_Destroy(
++ IN gcoVERTEX Vertex
++ );
++
++gceSTATUS
++gcoVERTEX_Reset(
++ IN gcoVERTEX Vertex
++ );
++
++gceSTATUS
++gcoVERTEX_EnableAttribute(
++ IN gcoVERTEX Vertex,
++ IN gctUINT32 Index,
++ IN gceVERTEX_FORMAT Format,
++ IN gctBOOL Normalized,
++ IN gctUINT32 Components,
++ IN gcoSTREAM Stream,
++ IN gctUINT32 Offset,
++ IN gctUINT32 Stride
++ );
++
++gceSTATUS
++gcoVERTEX_DisableAttribute(
++ IN gcoVERTEX Vertex,
++ IN gctUINT32 Index
++ );
++
++gceSTATUS
++gcoVERTEX_Bind(
++ IN gcoVERTEX Vertex
++ );
++
++/*******************************************************************************
++***** gcoVERTEXARRAY Object ***************************************************/
++
++typedef struct _gcsVERTEXARRAY
++{
++ /* Enabled. */
++ gctBOOL enable;
++
++ /* Number of components. */
++ gctINT size;
++
++ /* Attribute format. */
++ gceVERTEX_FORMAT format;
++
++ /* Flag whether the attribute is normalized or not. */
++ gctBOOL normalized;
++
++ /* Stride of the component. */
++ gctUINT stride;
++
++ /* Pointer to the attribute data. */
++ gctCONST_POINTER pointer;
++
++ /* Stream object owning the attribute data. */
++ gcoSTREAM stream;
++
++ /* Generic values for attribute. */
++ gctFLOAT genericValue[4];
++
++ /* Generic size for attribute. */
++ gctINT genericSize;
++
++ /* Vertex shader linkage. */
++ gctUINT linkage;
++
++#if gcdUSE_WCLIP_PATCH
++ gctBOOL isPosition;
++#endif
++}
++gcsVERTEXARRAY,
++* gcsVERTEXARRAY_PTR;
++
++gceSTATUS
++gcoVERTEXARRAY_Construct(
++ IN gcoHAL Hal,
++ OUT gcoVERTEXARRAY * Vertex
++ );
++
++gceSTATUS
++gcoVERTEXARRAY_Destroy(
++ IN gcoVERTEXARRAY Vertex
++ );
++
++gceSTATUS
++gcoVERTEXARRAY_Bind(
++ IN gcoVERTEXARRAY Vertex,
++ IN gctUINT32 EnableBits,
++ IN gcsVERTEXARRAY_PTR VertexArray,
++ IN gctUINT First,
++ IN gctSIZE_T Count,
++ IN gceINDEX_TYPE IndexType,
++ IN gcoINDEX IndexObject,
++ IN gctPOINTER IndexMemory,
++ IN OUT gcePRIMITIVE * PrimitiveType,
++#if gcdUSE_WCLIP_PATCH
++ IN OUT gctUINT * PrimitiveCount,
++ IN OUT gctFLOAT * wLimitRms,
++ IN OUT gctBOOL * wLimitDirty
++#else
++ IN OUT gctUINT * PrimitiveCount
++#endif
++ );
++
++gctUINT
++gcoVERTEXARRAY_GetMaxStream(
++ IN gcoVERTEXARRAY Vertex
++);
++
++gceSTATUS
++gcoVERTEXARRAY_SetMaxStream(
++ IN gcoVERTEXARRAY Vertex,
++ gctUINT maxStreams
++);
++/*******************************************************************************
++***** Composition *************************************************************/
++
++typedef enum _gceCOMPOSITION
++{
++ gcvCOMPOSE_CLEAR = 1,
++ gcvCOMPOSE_BLUR,
++ gcvCOMPOSE_DIM,
++ gcvCOMPOSE_LAYER
++}
++gceCOMPOSITION;
++
++typedef struct _gcsCOMPOSITION * gcsCOMPOSITION_PTR;
++typedef struct _gcsCOMPOSITION
++{
++ /* Structure size. */
++ gctUINT structSize;
++
++ /* Composition operation. */
++ gceCOMPOSITION operation;
++
++ /* Layer to be composed. */
++ gcoSURF layer;
++
++ /* Source and target coordinates. */
++ gcsRECT srcRect;
++ gcsRECT trgRect;
++
++ /* Target rectangle */
++ gcsPOINT v0;
++ gcsPOINT v1;
++ gcsPOINT v2;
++
++ /* Blending parameters. */
++ gctBOOL enableBlending;
++ gctBOOL premultiplied;
++ gctUINT8 alphaValue;
++
++ /* Clear color. */
++ gctFLOAT r;
++ gctFLOAT g;
++ gctFLOAT b;
++ gctFLOAT a;
++}
++gcsCOMPOSITION;
++
++gceSTATUS
++gco3D_ProbeComposition(
++ gctBOOL ResetIfEmpty
++ );
++
++gceSTATUS
++gco3D_CompositionBegin(
++ void
++ );
++
++gceSTATUS
++gco3D_ComposeLayer(
++ IN gcsCOMPOSITION_PTR Layer
++ );
++
++gceSTATUS
++gco3D_CompositionSignals(
++ IN gctHANDLE Process,
++ IN gctSIGNAL Signal1,
++ IN gctSIGNAL Signal2
++ );
++
++gceSTATUS
++gco3D_CompositionEnd(
++ IN gcoSURF Target,
++ IN gctBOOL Synchronous
++ );
++
++/* Frame Database */
++gceSTATUS
++gcoHAL_AddFrameDB(
++ void
++ );
++
++gceSTATUS
++gcoHAL_DumpFrameDB(
++ gctCONST_STRING Filename OPTIONAL
++ );
++
++gceSTATUS
++gcoHAL_GetSharedInfo(
++ IN gctUINT32 Pid,
++ IN gctUINT32 DataId,
++ IN gctSIZE_T Bytes,
++ OUT gctPOINTER Data
++ );
++
++gceSTATUS
++gcoHAL_SetSharedInfo(
++ IN gctUINT32 DataId,
++ IN gctPOINTER Data,
++ IN gctSIZE_T Bytes
++ );
++
++#if VIVANTE_PROFILER_CONTEXT
++gceSTATUS
++gcoHARDWARE_GetContext(
++ IN gcoHARDWARE Hardware,
++ OUT gctUINT32 * Context
++ );
++#endif
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif /* VIVANTE_NO_3D */
++#endif /* __gc_hal_engine_h_ */
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_engine_vg.h linux-openelec/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_engine_vg.h
+--- linux-3.14.36/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_engine_vg.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_engine_vg.h 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,904 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_engine_vg_h_
++#define __gc_hal_engine_vg_h_
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++#include "gc_hal_types.h"
++
++/******************************************************************************\
++******************************** VG Enumerations *******************************
++\******************************************************************************/
++
++/**
++** @ingroup gcoVG
++**
++** @brief Tiling mode for painting and imagig.
++**
++** This enumeration defines the tiling modes supported by the HAL. This is
++** in fact a one-to-one mapping of the OpenVG 1.1 tile modes.
++*/
++typedef enum _gceTILE_MODE
++{
++ gcvTILE_FILL,
++ gcvTILE_PAD,
++ gcvTILE_REPEAT,
++ gcvTILE_REFLECT
++}
++gceTILE_MODE;
++
++/******************************************************************************/
++/** @ingroup gcoVG
++**
++** @brief The different paint modes.
++**
++** This enumeration lists the available paint modes.
++*/
++typedef enum _gcePAINT_TYPE
++{
++ /** Solid color. */
++ gcvPAINT_MODE_SOLID,
++
++ /** Linear gradient. */
++ gcvPAINT_MODE_LINEAR,
++
++ /** Radial gradient. */
++ gcvPAINT_MODE_RADIAL,
++
++ /** Pattern. */
++ gcvPAINT_MODE_PATTERN,
++
++ /** Mode count. */
++ gcvPAINT_MODE_COUNT
++}
++gcePAINT_TYPE;
++
++/**
++** @ingroup gcoVG
++**
++** @brief Types of path data supported by HAL.
++**
++** This enumeration defines the types of path data supported by the HAL.
++** This is in fact a one-to-one mapping of the OpenVG 1.1 path types.
++*/
++typedef enum _gcePATHTYPE
++{
++ gcePATHTYPE_UNKNOWN = -1,
++ gcePATHTYPE_INT8,
++ gcePATHTYPE_INT16,
++ gcePATHTYPE_INT32,
++ gcePATHTYPE_FLOAT
++}
++gcePATHTYPE;
++
++/**
++** @ingroup gcoVG
++**
++** @brief Supported path segment commands.
++**
++** This enumeration defines the path segment commands supported by the HAL.
++*/
++typedef enum _gceVGCMD
++{
++ gcvVGCMD_END, /* 0: GCCMD_TS_OPCODE_END */
++ gcvVGCMD_CLOSE, /* 1: GCCMD_TS_OPCODE_CLOSE */
++ gcvVGCMD_MOVE, /* 2: GCCMD_TS_OPCODE_MOVE */
++ gcvVGCMD_MOVE_REL, /* 3: GCCMD_TS_OPCODE_MOVE_REL */
++ gcvVGCMD_LINE, /* 4: GCCMD_TS_OPCODE_LINE */
++ gcvVGCMD_LINE_REL, /* 5: GCCMD_TS_OPCODE_LINE_REL */
++ gcvVGCMD_QUAD, /* 6: GCCMD_TS_OPCODE_QUADRATIC */
++ gcvVGCMD_QUAD_REL, /* 7: GCCMD_TS_OPCODE_QUADRATIC_REL */
++ gcvVGCMD_CUBIC, /* 8: GCCMD_TS_OPCODE_CUBIC */
++ gcvVGCMD_CUBIC_REL, /* 9: GCCMD_TS_OPCODE_CUBIC_REL */
++ gcvVGCMD_BREAK, /* 10: GCCMD_TS_OPCODE_BREAK */
++ gcvVGCMD_HLINE, /* 11: ******* R E S E R V E D *******/
++ gcvVGCMD_HLINE_REL, /* 12: ******* R E S E R V E D *******/
++ gcvVGCMD_VLINE, /* 13: ******* R E S E R V E D *******/
++ gcvVGCMD_VLINE_REL, /* 14: ******* R E S E R V E D *******/
++ gcvVGCMD_SQUAD, /* 15: ******* R E S E R V E D *******/
++ gcvVGCMD_SQUAD_REL, /* 16: ******* R E S E R V E D *******/
++ gcvVGCMD_SCUBIC, /* 17: ******* R E S E R V E D *******/
++ gcvVGCMD_SCUBIC_REL, /* 18: ******* R E S E R V E D *******/
++ gcvVGCMD_SCCWARC, /* 19: ******* R E S E R V E D *******/
++ gcvVGCMD_SCCWARC_REL, /* 20: ******* R E S E R V E D *******/
++ gcvVGCMD_SCWARC, /* 21: ******* R E S E R V E D *******/
++ gcvVGCMD_SCWARC_REL, /* 22: ******* R E S E R V E D *******/
++ gcvVGCMD_LCCWARC, /* 23: ******* R E S E R V E D *******/
++ gcvVGCMD_LCCWARC_REL, /* 24: ******* R E S E R V E D *******/
++ gcvVGCMD_LCWARC, /* 25: ******* R E S E R V E D *******/
++ gcvVGCMD_LCWARC_REL, /* 26: ******* R E S E R V E D *******/
++
++ /* The width of the command recognized by the hardware on bits. */
++ gcvVGCMD_WIDTH = 5,
++
++ /* Hardware command mask. */
++ gcvVGCMD_MASK = (1 << gcvVGCMD_WIDTH) - 1,
++
++ /* Command modifiers. */
++ gcvVGCMD_H_MOD = 1 << gcvVGCMD_WIDTH, /* = 32 */
++ gcvVGCMD_V_MOD = 2 << gcvVGCMD_WIDTH, /* = 64 */
++ gcvVGCMD_S_MOD = 3 << gcvVGCMD_WIDTH, /* = 96 */
++ gcvVGCMD_ARC_MOD = 4 << gcvVGCMD_WIDTH, /* = 128 */
++
++ /* Emulated LINE commands. */
++ gcvVGCMD_HLINE_EMUL = gcvVGCMD_H_MOD | gcvVGCMD_LINE, /* = 36 */
++ gcvVGCMD_HLINE_EMUL_REL = gcvVGCMD_H_MOD | gcvVGCMD_LINE_REL, /* = 37 */
++ gcvVGCMD_VLINE_EMUL = gcvVGCMD_V_MOD | gcvVGCMD_LINE, /* = 68 */
++ gcvVGCMD_VLINE_EMUL_REL = gcvVGCMD_V_MOD | gcvVGCMD_LINE_REL, /* = 69 */
++
++ /* Emulated SMOOTH commands. */
++ gcvVGCMD_SQUAD_EMUL = gcvVGCMD_S_MOD | gcvVGCMD_QUAD, /* = 102 */
++ gcvVGCMD_SQUAD_EMUL_REL = gcvVGCMD_S_MOD | gcvVGCMD_QUAD_REL, /* = 103 */
++ gcvVGCMD_SCUBIC_EMUL = gcvVGCMD_S_MOD | gcvVGCMD_CUBIC, /* = 104 */
++ gcvVGCMD_SCUBIC_EMUL_REL = gcvVGCMD_S_MOD | gcvVGCMD_CUBIC_REL, /* = 105 */
++
++ /* Emulation ARC commands. */
++ gcvVGCMD_ARC_LINE = gcvVGCMD_ARC_MOD | gcvVGCMD_LINE, /* = 132 */
++ gcvVGCMD_ARC_LINE_REL = gcvVGCMD_ARC_MOD | gcvVGCMD_LINE_REL, /* = 133 */
++ gcvVGCMD_ARC_QUAD = gcvVGCMD_ARC_MOD | gcvVGCMD_QUAD, /* = 134 */
++ gcvVGCMD_ARC_QUAD_REL = gcvVGCMD_ARC_MOD | gcvVGCMD_QUAD_REL /* = 135 */
++}
++gceVGCMD;
++typedef enum _gceVGCMD * gceVGCMD_PTR;
++
++/**
++** @ingroup gcoVG
++**
++** @brief Blending modes supported by the HAL.
++**
++** This enumeration defines the blending modes supported by the HAL. This is
++** in fact a one-to-one mapping of the OpenVG 1.1 blending modes.
++*/
++typedef enum _gceVG_BLEND
++{
++ gcvVG_BLEND_SRC,
++ gcvVG_BLEND_SRC_OVER,
++ gcvVG_BLEND_DST_OVER,
++ gcvVG_BLEND_SRC_IN,
++ gcvVG_BLEND_DST_IN,
++ gcvVG_BLEND_MULTIPLY,
++ gcvVG_BLEND_SCREEN,
++ gcvVG_BLEND_DARKEN,
++ gcvVG_BLEND_LIGHTEN,
++ gcvVG_BLEND_ADDITIVE,
++ gcvVG_BLEND_SUBTRACT,
++ gcvVG_BLEND_FILTER
++}
++gceVG_BLEND;
++
++/**
++** @ingroup gcoVG
++**
++** @brief Image modes supported by the HAL.
++**
++** This enumeration defines the image modes supported by the HAL. This is
++** in fact a one-to-one mapping of the OpenVG 1.1 image modes with the addition
++** of NO IMAGE.
++*/
++typedef enum _gceVG_IMAGE
++{
++ gcvVG_IMAGE_NONE,
++ gcvVG_IMAGE_NORMAL,
++ gcvVG_IMAGE_MULTIPLY,
++ gcvVG_IMAGE_STENCIL,
++ gcvVG_IMAGE_FILTER
++}
++gceVG_IMAGE;
++
++/**
++** @ingroup gcoVG
++**
++** @brief Filter mode patterns and imaging.
++**
++** This enumeration defines the filter modes supported by the HAL.
++*/
++typedef enum _gceIMAGE_FILTER
++{
++ gcvFILTER_POINT,
++ gcvFILTER_LINEAR,
++ gcvFILTER_BI_LINEAR
++}
++gceIMAGE_FILTER;
++
++/**
++** @ingroup gcoVG
++**
++** @brief Primitive modes supported by the HAL.
++**
++** This enumeration defines the primitive modes supported by the HAL.
++*/
++typedef enum _gceVG_PRIMITIVE
++{
++ gcvVG_SCANLINE,
++ gcvVG_RECTANGLE,
++ gcvVG_TESSELLATED,
++ gcvVG_TESSELLATED_TILED
++}
++gceVG_PRIMITIVE;
++
++/**
++** @ingroup gcoVG
++**
++** @brief Rendering quality modes supported by the HAL.
++**
++** This enumeration defines the rendering quality modes supported by the HAL.
++*/
++typedef enum _gceRENDER_QUALITY
++{
++ gcvVG_NONANTIALIASED,
++ gcvVG_2X2_MSAA,
++ gcvVG_2X4_MSAA,
++ gcvVG_4X4_MSAA
++}
++gceRENDER_QUALITY;
++
++/**
++** @ingroup gcoVG
++**
++** @brief Fill rules supported by the HAL.
++**
++** This enumeration defines the fill rules supported by the HAL.
++*/
++typedef enum _gceFILL_RULE
++{
++ gcvVG_EVEN_ODD,
++ gcvVG_NON_ZERO
++}
++gceFILL_RULE;
++
++/**
++** @ingroup gcoVG
++**
++** @brief Cap styles supported by the HAL.
++**
++** This enumeration defines the cap styles supported by the HAL.
++*/
++typedef enum _gceCAP_STYLE
++{
++ gcvCAP_BUTT,
++ gcvCAP_ROUND,
++ gcvCAP_SQUARE
++}
++gceCAP_STYLE;
++
++/**
++** @ingroup gcoVG
++**
++** @brief Join styles supported by the HAL.
++**
++** This enumeration defines the join styles supported by the HAL.
++*/
++typedef enum _gceJOIN_STYLE
++{
++ gcvJOIN_MITER,
++ gcvJOIN_ROUND,
++ gcvJOIN_BEVEL
++}
++gceJOIN_STYLE;
++
++/**
++** @ingroup gcoVG
++**
++** @brief Channel mask values.
++**
++** This enumeration defines the values for channel mask used in image
++** filtering.
++*/
++
++/* Base values for channel mask definitions. */
++#define gcvCHANNEL_X (0)
++#define gcvCHANNEL_R (1 << 0)
++#define gcvCHANNEL_G (1 << 1)
++#define gcvCHANNEL_B (1 << 2)
++#define gcvCHANNEL_A (1 << 3)
++
++typedef enum _gceCHANNEL
++{
++ gcvCHANNEL_XXXX = (gcvCHANNEL_X | gcvCHANNEL_X | gcvCHANNEL_X | gcvCHANNEL_X),
++ gcvCHANNEL_XXXA = (gcvCHANNEL_X | gcvCHANNEL_X | gcvCHANNEL_X | gcvCHANNEL_A),
++ gcvCHANNEL_XXBX = (gcvCHANNEL_X | gcvCHANNEL_X | gcvCHANNEL_B | gcvCHANNEL_X),
++ gcvCHANNEL_XXBA = (gcvCHANNEL_X | gcvCHANNEL_X | gcvCHANNEL_B | gcvCHANNEL_A),
++
++ gcvCHANNEL_XGXX = (gcvCHANNEL_X | gcvCHANNEL_G | gcvCHANNEL_X | gcvCHANNEL_X),
++ gcvCHANNEL_XGXA = (gcvCHANNEL_X | gcvCHANNEL_G | gcvCHANNEL_X | gcvCHANNEL_A),
++ gcvCHANNEL_XGBX = (gcvCHANNEL_X | gcvCHANNEL_G | gcvCHANNEL_B | gcvCHANNEL_X),
++ gcvCHANNEL_XGBA = (gcvCHANNEL_X | gcvCHANNEL_G | gcvCHANNEL_B | gcvCHANNEL_A),
++
++ gcvCHANNEL_RXXX = (gcvCHANNEL_R | gcvCHANNEL_X | gcvCHANNEL_X | gcvCHANNEL_X),
++ gcvCHANNEL_RXXA = (gcvCHANNEL_R | gcvCHANNEL_X | gcvCHANNEL_X | gcvCHANNEL_A),
++ gcvCHANNEL_RXBX = (gcvCHANNEL_R | gcvCHANNEL_X | gcvCHANNEL_B | gcvCHANNEL_X),
++ gcvCHANNEL_RXBA = (gcvCHANNEL_R | gcvCHANNEL_X | gcvCHANNEL_B | gcvCHANNEL_A),
++
++ gcvCHANNEL_RGXX = (gcvCHANNEL_R | gcvCHANNEL_G | gcvCHANNEL_X | gcvCHANNEL_X),
++ gcvCHANNEL_RGXA = (gcvCHANNEL_R | gcvCHANNEL_G | gcvCHANNEL_X | gcvCHANNEL_A),
++ gcvCHANNEL_RGBX = (gcvCHANNEL_R | gcvCHANNEL_G | gcvCHANNEL_B | gcvCHANNEL_X),
++ gcvCHANNEL_RGBA = (gcvCHANNEL_R | gcvCHANNEL_G | gcvCHANNEL_B | gcvCHANNEL_A),
++}
++gceCHANNEL;
++
++/******************************************************************************\
++******************************** VG Structures *******************************
++\******************************************************************************/
++
++/**
++** @ingroup gcoVG
++**
++** @brief Definition of the color ramp used by the gradient paints.
++**
++** The gcsCOLOR_RAMP structure defines the layout of one single color inside
++** a color ramp which is used by gradient paints.
++*/
++typedef struct _gcsCOLOR_RAMP
++{
++ /** Value for the color stop. */
++ gctFLOAT stop;
++
++ /** Red color channel value for the color stop. */
++ gctFLOAT red;
++
++ /** Green color channel value for the color stop. */
++ gctFLOAT green;
++
++ /** Blue color channel value for the color stop. */
++ gctFLOAT blue;
++
++ /** Alpha color channel value for the color stop. */
++ gctFLOAT alpha;
++}
++gcsCOLOR_RAMP, * gcsCOLOR_RAMP_PTR;
++
++/**
++** @ingroup gcoVG
++**
++** @brief Definition of the color ramp used by the gradient paints in fixed form.
++**
++** The gcsCOLOR_RAMP structure defines the layout of one single color inside
++** a color ramp which is used by gradient paints.
++*/
++typedef struct _gcsFIXED_COLOR_RAMP
++{
++ /** Value for the color stop. */
++ gctFIXED_POINT stop;
++
++ /** Red color channel value for the color stop. */
++ gctFIXED_POINT red;
++
++ /** Green color channel value for the color stop. */
++ gctFIXED_POINT green;
++
++ /** Blue color channel value for the color stop. */
++ gctFIXED_POINT blue;
++
++ /** Alpha color channel value for the color stop. */
++ gctFIXED_POINT alpha;
++}
++gcsFIXED_COLOR_RAMP, * gcsFIXED_COLOR_RAMP_PTR;
++
++
++/**
++** @ingroup gcoVG
++**
++** @brief Rectangle structure used by the gcoVG object.
++**
++** This structure defines the layout of a rectangle. Make sure width and
++** height are larger than 0.
++*/
++typedef struct _gcsVG_RECT * gcsVG_RECT_PTR;
++typedef struct _gcsVG_RECT
++{
++ /** Left location of the rectangle. */
++ gctINT x;
++
++ /** Top location of the rectangle. */
++ gctINT y;
++
++ /** Width of the rectangle. */
++ gctINT width;
++
++ /** Height of the rectangle. */
++ gctINT height;
++}
++gcsVG_RECT;
++
++/**
++** @ingroup gcoVG
++**
++** @brief Path command buffer attribute structure.
++**
++** The gcsPATH_BUFFER_INFO structure contains the specifics about
++** the layout of the path data command buffer.
++*/
++typedef struct _gcsPATH_BUFFER_INFO * gcsPATH_BUFFER_INFO_PTR;
++typedef struct _gcsPATH_BUFFER_INFO
++{
++ gctUINT reservedForHead;
++ gctUINT reservedForTail;
++}
++gcsPATH_BUFFER_INFO;
++
++/**
++** @ingroup gcoVG
++**
++** @brief Definition of the path data container structure.
++**
++** The gcsPATH structure defines the layout of the path data container.
++*/
++typedef struct _gcsPATH_DATA * gcsPATH_DATA_PTR;
++typedef struct _gcsPATH_DATA
++{
++ /* Data container in command buffer format. */
++ gcsCMDBUFFER data;
++
++ /* Path data type. */
++ gcePATHTYPE dataType;
++}
++gcsPATH_DATA;
++
++
++/******************************************************************************\
++********************************* gcoHAL Object ********************************
++\******************************************************************************/
++
++/* Query path data storage attributes. */
++gceSTATUS
++gcoHAL_QueryPathStorage(
++ IN gcoHAL Hal,
++ OUT gcsPATH_BUFFER_INFO_PTR Information
++ );
++
++/* Associate a completion signal with the command buffer. */
++gceSTATUS
++gcoHAL_AssociateCompletion(
++ IN gcoHAL Hal,
++ IN gcsPATH_DATA_PTR PathData
++ );
++
++/* Release the current command buffer completion signal. */
++gceSTATUS
++gcoHAL_DeassociateCompletion(
++ IN gcoHAL Hal,
++ IN gcsPATH_DATA_PTR PathData
++ );
++
++/* Verify whether the command buffer is still in use. */
++gceSTATUS
++gcoHAL_CheckCompletion(
++ IN gcoHAL Hal,
++ IN gcsPATH_DATA_PTR PathData
++ );
++
++/* Wait until the command buffer is no longer in use. */
++gceSTATUS
++gcoHAL_WaitCompletion(
++ IN gcoHAL Hal,
++ IN gcsPATH_DATA_PTR PathData
++ );
++
++/* Flush the pixel cache. */
++gceSTATUS
++gcoHAL_Flush(
++ IN gcoHAL Hal
++ );
++
++/* Split a harwdare address into pool and offset. */
++gceSTATUS
++gcoHAL_SplitAddress(
++ IN gcoHAL Hal,
++ IN gctUINT32 Address,
++ OUT gcePOOL * Pool,
++ OUT gctUINT32 * Offset
++ );
++
++/* Combine pool and offset into a harwdare address. */
++gceSTATUS
++gcoHAL_CombineAddress(
++ IN gcoHAL Hal,
++ IN gcePOOL Pool,
++ IN gctUINT32 Offset,
++ OUT gctUINT32 * Address
++ );
++
++/* Schedule to free linear video memory allocated. */
++gceSTATUS
++gcoHAL_ScheduleVideoMemory(
++ IN gcoHAL Hal,
++ IN gctUINT64 Node
++ );
++
++/* Free linear video memory allocated with gcoHAL_AllocateLinearVideoMemory. */
++gceSTATUS
++gcoHAL_FreeVideoMemory(
++ IN gcoHAL Hal,
++ IN gctUINT64 Node
++ );
++
++/* Query command buffer attributes. */
++gceSTATUS
++gcoHAL_QueryCommandBuffer(
++ IN gcoHAL Hal,
++ OUT gcsCOMMAND_BUFFER_INFO_PTR Information
++ );
++/* Allocate and lock linear video memory. */
++gceSTATUS
++gcoHAL_AllocateLinearVideoMemory(
++ IN gcoHAL Hal,
++ IN gctUINT Size,
++ IN gctUINT Alignment,
++ IN gcePOOL Pool,
++ OUT gctUINT64 * Node,
++ OUT gctUINT32 * Address,
++ OUT gctPOINTER * Memory
++ );
++
++/* Align the specified size accordingly to the hardware requirements. */
++gceSTATUS
++gcoHAL_GetAlignedSurfaceSize(
++ IN gcoHAL Hal,
++ IN gceSURF_TYPE Type,
++ IN OUT gctUINT32_PTR Width,
++ IN OUT gctUINT32_PTR Height
++ );
++
++gceSTATUS
++gcoHAL_ReserveTask(
++ IN gcoHAL Hal,
++ IN gceBLOCK Block,
++ IN gctUINT TaskCount,
++ IN gctSIZE_T Bytes,
++ OUT gctPOINTER * Memory
++ );
++/******************************************************************************\
++********************************** gcoVG Object ********************************
++\******************************************************************************/
++
++/** @defgroup gcoVG gcoVG
++**
++** The gcoVG object abstracts the VG hardware pipe.
++*/
++
++gctBOOL
++gcoVG_IsMaskSupported(
++ IN gceSURF_FORMAT Format
++ );
++
++gctBOOL
++gcoVG_IsTargetSupported(
++ IN gceSURF_FORMAT Format
++ );
++
++gctBOOL
++gcoVG_IsImageSupported(
++ IN gceSURF_FORMAT Format
++ );
++
++gctUINT8 gcoVG_PackColorComponent(
++ gctFLOAT Value
++ );
++
++gceSTATUS
++gcoVG_Construct(
++ IN gcoHAL Hal,
++ OUT gcoVG * Vg
++ );
++
++gceSTATUS
++gcoVG_Destroy(
++ IN gcoVG Vg
++ );
++
++gceSTATUS
++gcoVG_SetTarget(
++ IN gcoVG Vg,
++ IN gcoSURF Target
++ );
++
++gceSTATUS
++gcoVG_UnsetTarget(
++ IN gcoVG Vg,
++ IN gcoSURF Surface
++ );
++
++gceSTATUS
++gcoVG_SetUserToSurface(
++ IN gcoVG Vg,
++ IN gctFLOAT UserToSurface[9]
++ );
++
++gceSTATUS
++gcoVG_SetSurfaceToImage(
++ IN gcoVG Vg,
++ IN gctFLOAT SurfaceToImage[9]
++ );
++
++gceSTATUS
++gcoVG_EnableMask(
++ IN gcoVG Vg,
++ IN gctBOOL Enable
++ );
++
++gceSTATUS
++gcoVG_SetMask(
++ IN gcoVG Vg,
++ IN gcoSURF Mask
++ );
++
++gceSTATUS
++gcoVG_UnsetMask(
++ IN gcoVG Vg,
++ IN gcoSURF Surface
++ );
++
++gceSTATUS
++gcoVG_FlushMask(
++ IN gcoVG Vg
++ );
++
++gceSTATUS
++gcoVG_EnableScissor(
++ IN gcoVG Vg,
++ IN gctBOOL Enable
++ );
++
++gceSTATUS
++gcoVG_SetScissor(
++ IN gcoVG Vg,
++ IN gctSIZE_T RectangleCount,
++ IN gcsVG_RECT_PTR Rectangles
++ );
++
++gceSTATUS
++gcoVG_EnableColorTransform(
++ IN gcoVG Vg,
++ IN gctBOOL Enable
++ );
++
++gceSTATUS
++gcoVG_SetColorTransform(
++ IN gcoVG Vg,
++ IN gctFLOAT ColorTransform[8]
++ );
++
++gceSTATUS
++gcoVG_SetTileFillColor(
++ IN gcoVG Vg,
++ IN gctFLOAT Red,
++ IN gctFLOAT Green,
++ IN gctFLOAT Blue,
++ IN gctFLOAT Alpha
++ );
++
++gceSTATUS
++gcoVG_SetSolidPaint(
++ IN gcoVG Vg,
++ IN gctUINT8 Red,
++ IN gctUINT8 Green,
++ IN gctUINT8 Blue,
++ IN gctUINT8 Alpha
++ );
++
++gceSTATUS
++gcoVG_SetLinearPaint(
++ IN gcoVG Vg,
++ IN gctFLOAT Constant,
++ IN gctFLOAT StepX,
++ IN gctFLOAT StepY
++ );
++
++gceSTATUS
++gcoVG_SetRadialPaint(
++ IN gcoVG Vg,
++ IN gctFLOAT LinConstant,
++ IN gctFLOAT LinStepX,
++ IN gctFLOAT LinStepY,
++ IN gctFLOAT RadConstant,
++ IN gctFLOAT RadStepX,
++ IN gctFLOAT RadStepY,
++ IN gctFLOAT RadStepXX,
++ IN gctFLOAT RadStepYY,
++ IN gctFLOAT RadStepXY
++ );
++
++gceSTATUS
++gcoVG_SetPatternPaint(
++ IN gcoVG Vg,
++ IN gctFLOAT UConstant,
++ IN gctFLOAT UStepX,
++ IN gctFLOAT UStepY,
++ IN gctFLOAT VConstant,
++ IN gctFLOAT VStepX,
++ IN gctFLOAT VStepY,
++ IN gctBOOL Linear
++ );
++
++gceSTATUS
++gcoVG_SetColorRamp(
++ IN gcoVG Vg,
++ IN gcoSURF ColorRamp,
++ IN gceTILE_MODE ColorRampSpreadMode
++ );
++
++gceSTATUS
++gcoVG_SetPattern(
++ IN gcoVG Vg,
++ IN gcoSURF Pattern,
++ IN gceTILE_MODE TileMode,
++ IN gceIMAGE_FILTER Filter
++ );
++
++gceSTATUS
++gcoVG_SetImageMode(
++ IN gcoVG Vg,
++ IN gceVG_IMAGE Mode
++ );
++
++gceSTATUS
++gcoVG_SetBlendMode(
++ IN gcoVG Vg,
++ IN gceVG_BLEND Mode
++ );
++
++gceSTATUS
++gcoVG_SetRenderingQuality(
++ IN gcoVG Vg,
++ IN gceRENDER_QUALITY Quality
++ );
++
++gceSTATUS
++gcoVG_SetFillRule(
++ IN gcoVG Vg,
++ IN gceFILL_RULE FillRule
++ );
++
++gceSTATUS
++gcoVG_FinalizePath(
++ IN gcoVG Vg,
++ IN gcsPATH_DATA_PTR PathData
++ );
++
++gceSTATUS
++gcoVG_Clear(
++ IN gcoVG Vg,
++ IN gctINT X,
++ IN gctINT Y,
++ IN gctINT Width,
++ IN gctINT Height
++ );
++
++gceSTATUS
++gcoVG_DrawPath(
++ IN gcoVG Vg,
++ IN gcsPATH_DATA_PTR PathData,
++ IN gctFLOAT Scale,
++ IN gctFLOAT Bias,
++ IN gctBOOL SoftwareTesselation
++ );
++
++gceSTATUS
++gcoVG_DrawImage(
++ IN gcoVG Vg,
++ IN gcoSURF Source,
++ IN gcsPOINT_PTR SourceOrigin,
++ IN gcsPOINT_PTR TargetOrigin,
++ IN gcsSIZE_PTR SourceSize,
++ IN gctINT SourceX,
++ IN gctINT SourceY,
++ IN gctINT TargetX,
++ IN gctINT TargetY,
++ IN gctINT Width,
++ IN gctINT Height,
++ IN gctBOOL Mask
++ );
++
++gceSTATUS
++gcoVG_TesselateImage(
++ IN gcoVG Vg,
++ IN gcoSURF Image,
++ IN gcsVG_RECT_PTR Rectangle,
++ IN gceIMAGE_FILTER Filter,
++ IN gctBOOL Mask,
++ IN gctBOOL SoftwareTesselation
++ );
++
++gceSTATUS
++gcoVG_Blit(
++ IN gcoVG Vg,
++ IN gcoSURF Source,
++ IN gcoSURF Target,
++ IN gcsVG_RECT_PTR SrcRect,
++ IN gcsVG_RECT_PTR TrgRect,
++ IN gceIMAGE_FILTER Filter,
++ IN gceVG_BLEND Mode
++ );
++
++gceSTATUS
++gcoVG_ColorMatrix(
++ IN gcoVG Vg,
++ IN gcoSURF Source,
++ IN gcoSURF Target,
++ IN const gctFLOAT * Matrix,
++ IN gceCHANNEL ColorChannels,
++ IN gctBOOL FilterLinear,
++ IN gctBOOL FilterPremultiplied,
++ IN gcsPOINT_PTR SourceOrigin,
++ IN gcsPOINT_PTR TargetOrigin,
++ IN gctINT Width,
++ IN gctINT Height
++ );
++
++gceSTATUS
++gcoVG_SeparableConvolve(
++ IN gcoVG Vg,
++ IN gcoSURF Source,
++ IN gcoSURF Target,
++ IN gctINT KernelWidth,
++ IN gctINT KernelHeight,
++ IN gctINT ShiftX,
++ IN gctINT ShiftY,
++ IN const gctINT16 * KernelX,
++ IN const gctINT16 * KernelY,
++ IN gctFLOAT Scale,
++ IN gctFLOAT Bias,
++ IN gceTILE_MODE TilingMode,
++ IN gctFLOAT_PTR FillColor,
++ IN gceCHANNEL ColorChannels,
++ IN gctBOOL FilterLinear,
++ IN gctBOOL FilterPremultiplied,
++ IN gcsPOINT_PTR SourceOrigin,
++ IN gcsPOINT_PTR TargetOrigin,
++ IN gcsSIZE_PTR SourceSize,
++ IN gctINT Width,
++ IN gctINT Height
++ );
++
++gceSTATUS
++gcoVG_GaussianBlur(
++ IN gcoVG Vg,
++ IN gcoSURF Source,
++ IN gcoSURF Target,
++ IN gctFLOAT StdDeviationX,
++ IN gctFLOAT StdDeviationY,
++ IN gceTILE_MODE TilingMode,
++ IN gctFLOAT_PTR FillColor,
++ IN gceCHANNEL ColorChannels,
++ IN gctBOOL FilterLinear,
++ IN gctBOOL FilterPremultiplied,
++ IN gcsPOINT_PTR SourceOrigin,
++ IN gcsPOINT_PTR TargetOrigin,
++ IN gcsSIZE_PTR SourceSize,
++ IN gctINT Width,
++ IN gctINT Height
++ );
++
++gceSTATUS
++gcoVG_EnableDither(
++ IN gcoVG Vg,
++ IN gctBOOL Enable
++ );
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif /* __gc_hal_vg_h_ */
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_enum.h linux-openelec/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_enum.h
+--- linux-3.14.36/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_enum.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_enum.h 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,965 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_enum_h_
++#define __gc_hal_enum_h_
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/* Chip models. */
++typedef enum _gceCHIPMODEL
++{
++ gcv300 = 0x0300,
++ gcv320 = 0x0320,
++ gcv350 = 0x0350,
++ gcv355 = 0x0355,
++ gcv400 = 0x0400,
++ gcv410 = 0x0410,
++ gcv420 = 0x0420,
++ gcv450 = 0x0450,
++ gcv500 = 0x0500,
++ gcv530 = 0x0530,
++ gcv600 = 0x0600,
++ gcv700 = 0x0700,
++ gcv800 = 0x0800,
++ gcv860 = 0x0860,
++ gcv880 = 0x0880,
++ gcv1000 = 0x1000,
++ gcv2000 = 0x2000,
++ gcv2100 = 0x2100,
++ gcv4000 = 0x4000,
++}
++gceCHIPMODEL;
++
++/* Chip features. */
++typedef enum _gceFEATURE
++{
++ gcvFEATURE_PIPE_2D = 0,
++ gcvFEATURE_PIPE_3D,
++ gcvFEATURE_PIPE_VG,
++ gcvFEATURE_DC,
++ gcvFEATURE_HIGH_DYNAMIC_RANGE,
++ gcvFEATURE_MODULE_CG,
++ gcvFEATURE_MIN_AREA,
++ gcvFEATURE_BUFFER_INTERLEAVING,
++ gcvFEATURE_BYTE_WRITE_2D,
++ gcvFEATURE_ENDIANNESS_CONFIG,
++ gcvFEATURE_DUAL_RETURN_BUS,
++ gcvFEATURE_DEBUG_MODE,
++ gcvFEATURE_YUY2_RENDER_TARGET,
++ gcvFEATURE_FRAGMENT_PROCESSOR,
++ gcvFEATURE_2DPE20,
++ gcvFEATURE_FAST_CLEAR,
++ gcvFEATURE_YUV420_TILER,
++ gcvFEATURE_YUY2_AVERAGING,
++ gcvFEATURE_FLIP_Y,
++ gcvFEATURE_EARLY_Z,
++ gcvFEATURE_Z_COMPRESSION,
++ gcvFEATURE_MSAA,
++ gcvFEATURE_SPECIAL_ANTI_ALIASING,
++ gcvFEATURE_SPECIAL_MSAA_LOD,
++ gcvFEATURE_422_TEXTURE_COMPRESSION,
++ gcvFEATURE_DXT_TEXTURE_COMPRESSION,
++ gcvFEATURE_ETC1_TEXTURE_COMPRESSION,
++ gcvFEATURE_CORRECT_TEXTURE_CONVERTER,
++ gcvFEATURE_TEXTURE_8K,
++ gcvFEATURE_SCALER,
++ gcvFEATURE_YUV420_SCALER,
++ gcvFEATURE_SHADER_HAS_W,
++ gcvFEATURE_SHADER_HAS_SIGN,
++ gcvFEATURE_SHADER_HAS_FLOOR,
++ gcvFEATURE_SHADER_HAS_CEIL,
++ gcvFEATURE_SHADER_HAS_SQRT,
++ gcvFEATURE_SHADER_HAS_TRIG,
++ gcvFEATURE_VAA,
++ gcvFEATURE_HZ,
++ gcvFEATURE_CORRECT_STENCIL,
++ gcvFEATURE_VG20,
++ gcvFEATURE_VG_FILTER,
++ gcvFEATURE_VG21,
++ gcvFEATURE_VG_DOUBLE_BUFFER,
++ gcvFEATURE_MC20,
++ gcvFEATURE_SUPER_TILED,
++ gcvFEATURE_2D_FILTERBLIT_PLUS_ALPHABLEND,
++ gcvFEATURE_2D_DITHER,
++ gcvFEATURE_2D_A8_TARGET,
++ gcvFEATURE_2D_FILTERBLIT_FULLROTATION,
++ gcvFEATURE_2D_BITBLIT_FULLROTATION,
++ gcvFEATURE_WIDE_LINE,
++ gcvFEATURE_FC_FLUSH_STALL,
++ gcvFEATURE_FULL_DIRECTFB,
++ gcvFEATURE_HALF_FLOAT_PIPE,
++ gcvFEATURE_LINE_LOOP,
++ gcvFEATURE_2D_YUV_BLIT,
++ gcvFEATURE_2D_TILING,
++ gcvFEATURE_NON_POWER_OF_TWO,
++ gcvFEATURE_3D_TEXTURE,
++ gcvFEATURE_TEXTURE_ARRAY,
++ gcvFEATURE_TILE_FILLER,
++ gcvFEATURE_LOGIC_OP,
++ gcvFEATURE_COMPOSITION,
++ gcvFEATURE_MIXED_STREAMS,
++ gcvFEATURE_2D_MULTI_SOURCE_BLT,
++ gcvFEATURE_END_EVENT,
++ gcvFEATURE_VERTEX_10_10_10_2,
++ gcvFEATURE_TEXTURE_10_10_10_2,
++ gcvFEATURE_TEXTURE_ANISOTROPIC_FILTERING,
++ gcvFEATURE_TEXTURE_FLOAT_HALF_FLOAT,
++ gcvFEATURE_2D_ROTATION_STALL_FIX,
++ gcvFEATURE_2D_MULTI_SOURCE_BLT_EX,
++ gcvFEATURE_BUG_FIXES10,
++ gcvFEATURE_2D_MINOR_TILING,
++ /* Supertiled compressed textures are supported. */
++ gcvFEATURE_TEX_COMPRRESSION_SUPERTILED,
++ gcvFEATURE_FAST_MSAA,
++ gcvFEATURE_BUG_FIXED_INDEXED_TRIANGLE_STRIP,
++ gcvFEATURE_TEXTURE_TILED_READ,
++ gcvFEATURE_DEPTH_BIAS_FIX,
++ gcvFEATURE_RECT_PRIMITIVE,
++ gcvFEATURE_BUG_FIXES11,
++ gcvFEATURE_SUPERTILED_TEXTURE,
++ gcvFEATURE_2D_NO_COLORBRUSH_INDEX8,
++ gcvFEATURE_RS_YUV_TARGET,
++ gcvFEATURE_2D_FC_SOURCE,
++ gcvFEATURE_PE_DITHER_FIX,
++ gcvFEATURE_2D_YUV_SEPARATE_STRIDE,
++ gcvFEATURE_FRUSTUM_CLIP_FIX,
++ gcvFEATURE_TEXTURE_LINEAR,
++ gcvFEATURE_TEXTURE_YUV_ASSEMBLER,
++ gcvFEATURE_SHADER_HAS_INSTRUCTION_CACHE,
++ gcvFEATURE_DYNAMIC_FREQUENCY_SCALING,
++ gcvFEATURE_BUGFIX15,
++ gcvFEATURE_2D_GAMMA,
++ gcvFEATURE_2D_COLOR_SPACE_CONVERSION,
++ gcvFEATURE_2D_SUPER_TILE_VERSION,
++ gcvFEATURE_2D_MIRROR_EXTENSION,
++ gcvFEATURE_2D_SUPER_TILE_V1,
++ gcvFEATURE_2D_SUPER_TILE_V2,
++ gcvFEATURE_2D_SUPER_TILE_V3,
++ gcvFEATURE_2D_MULTI_SOURCE_BLT_EX2,
++ gcvFEATURE_ELEMENT_INDEX_UINT,
++ gcvFEATURE_2D_COMPRESSION,
++ gcvFEATURE_2D_OPF_YUV_OUTPUT,
++ gcvFEATURE_2D_MULTI_SRC_BLT_TO_UNIFIED_DST_RECT,
++ gcvFEATURE_2D_YUV_MODE,
++ gcvFEATURE_DECOMPRESS_Z16,
++ gcvFEATURE_LINEAR_RENDER_TARGET,
++ gcvFEATURE_BUG_FIXES8,
++ gcvFEATURE_HALTI2,
++ gcvFEATURE_MMU,
++}
++gceFEATURE;
++
++/* Chip Power Status. */
++typedef enum _gceCHIPPOWERSTATE
++{
++ gcvPOWER_ON = 0,
++ gcvPOWER_OFF,
++ gcvPOWER_IDLE,
++ gcvPOWER_SUSPEND,
++ gcvPOWER_SUSPEND_ATPOWERON,
++ gcvPOWER_OFF_ATPOWERON,
++ gcvPOWER_IDLE_BROADCAST,
++ gcvPOWER_SUSPEND_BROADCAST,
++ gcvPOWER_OFF_BROADCAST,
++ gcvPOWER_OFF_RECOVERY,
++ gcvPOWER_OFF_TIMEOUT,
++ gcvPOWER_ON_AUTO
++}
++gceCHIPPOWERSTATE;
++
++/* CPU cache operations */
++typedef enum _gceCACHEOPERATION
++{
++ gcvCACHE_CLEAN = 0x01,
++ gcvCACHE_INVALIDATE = 0x02,
++ gcvCACHE_FLUSH = gcvCACHE_CLEAN | gcvCACHE_INVALIDATE,
++ gcvCACHE_MEMORY_BARRIER = 0x04
++}
++gceCACHEOPERATION;
++
++/* Surface types. */
++typedef enum _gceSURF_TYPE
++{
++ gcvSURF_TYPE_UNKNOWN = 0,
++ gcvSURF_INDEX,
++ gcvSURF_VERTEX,
++ gcvSURF_TEXTURE,
++ gcvSURF_RENDER_TARGET,
++ gcvSURF_DEPTH,
++ gcvSURF_BITMAP,
++ gcvSURF_TILE_STATUS,
++ gcvSURF_IMAGE,
++ gcvSURF_MASK,
++ gcvSURF_SCISSOR,
++ gcvSURF_HIERARCHICAL_DEPTH,
++ gcvSURF_NUM_TYPES, /* Make sure this is the last one! */
++
++ /* Combinations. */
++ gcvSURF_NO_TILE_STATUS = 0x100,
++ gcvSURF_NO_VIDMEM = 0x200, /* Used to allocate surfaces with no underlying vidmem node.
++ In Android, vidmem node is allocated by another process. */
++ gcvSURF_CACHEABLE = 0x400, /* Used to allocate a cacheable surface */
++ gcvSURF_FLIP = 0x800, /* The Resolve Target the will been flip resolve from RT */
++ gcvSURF_TILE_STATUS_DIRTY = 0x1000, /* Init tile status to all dirty */
++
++ gcvSURF_LINEAR = 0x2000,
++ gcvSURF_VG = 0x4000,
++
++ gcvSURF_TEXTURE_LINEAR = gcvSURF_TEXTURE
++ | gcvSURF_LINEAR,
++
++ gcvSURF_RENDER_TARGET_NO_TILE_STATUS = gcvSURF_RENDER_TARGET
++ | gcvSURF_NO_TILE_STATUS,
++
++ gcvSURF_RENDER_TARGET_TS_DIRTY = gcvSURF_RENDER_TARGET
++ | gcvSURF_TILE_STATUS_DIRTY,
++
++ gcvSURF_DEPTH_NO_TILE_STATUS = gcvSURF_DEPTH
++ | gcvSURF_NO_TILE_STATUS,
++
++ gcvSURF_DEPTH_TS_DIRTY = gcvSURF_DEPTH
++ | gcvSURF_TILE_STATUS_DIRTY,
++
++ /* Supported surface types with no vidmem node. */
++ gcvSURF_BITMAP_NO_VIDMEM = gcvSURF_BITMAP
++ | gcvSURF_NO_VIDMEM,
++
++ gcvSURF_TEXTURE_NO_VIDMEM = gcvSURF_TEXTURE
++ | gcvSURF_NO_VIDMEM,
++
++ /* Cacheable surface types with no vidmem node. */
++ gcvSURF_CACHEABLE_BITMAP_NO_VIDMEM = gcvSURF_BITMAP_NO_VIDMEM
++ | gcvSURF_CACHEABLE,
++
++ gcvSURF_CACHEABLE_BITMAP = gcvSURF_BITMAP
++ | gcvSURF_CACHEABLE,
++
++ gcvSURF_FLIP_BITMAP = gcvSURF_BITMAP
++ | gcvSURF_FLIP,
++}
++gceSURF_TYPE;
++
++typedef enum _gceSURF_USAGE
++{
++ gcvSURF_USAGE_UNKNOWN,
++ gcvSURF_USAGE_RESOLVE_AFTER_CPU,
++ gcvSURF_USAGE_RESOLVE_AFTER_3D
++}
++gceSURF_USAGE;
++
++typedef enum _gceSURF_COLOR_TYPE
++{
++ gcvSURF_COLOR_UNKNOWN = 0,
++ gcvSURF_COLOR_LINEAR = 0x01,
++ gcvSURF_COLOR_ALPHA_PRE = 0x02,
++}
++gceSURF_COLOR_TYPE;
++
++/* Rotation. */
++typedef enum _gceSURF_ROTATION
++{
++ gcvSURF_0_DEGREE = 0,
++ gcvSURF_90_DEGREE,
++ gcvSURF_180_DEGREE,
++ gcvSURF_270_DEGREE,
++ gcvSURF_FLIP_X,
++ gcvSURF_FLIP_Y,
++
++ gcvSURF_POST_FLIP_X = 0x40000000,
++ gcvSURF_POST_FLIP_Y = 0x80000000,
++}
++gceSURF_ROTATION;
++
++typedef enum _gceMIPMAP_IMAGE_FORMAT
++{
++ gcvUNKNOWN_MIPMAP_IMAGE_FORMAT = -2
++}
++gceMIPMAP_IMAGE_FORMAT;
++
++
++/* Surface formats. */
++typedef enum _gceSURF_FORMAT
++{
++ /* Unknown format. */
++ gcvSURF_UNKNOWN = 0,
++
++ /* Palettized formats. */
++ gcvSURF_INDEX1 = 100,
++ gcvSURF_INDEX4,
++ gcvSURF_INDEX8,
++
++ /* RGB formats. */
++ gcvSURF_A2R2G2B2 = 200,
++ gcvSURF_R3G3B2,
++ gcvSURF_A8R3G3B2,
++ gcvSURF_X4R4G4B4,
++ gcvSURF_A4R4G4B4,
++ gcvSURF_R4G4B4A4,
++ gcvSURF_X1R5G5B5,
++ gcvSURF_A1R5G5B5,
++ gcvSURF_R5G5B5A1,
++ gcvSURF_R5G6B5,
++ gcvSURF_R8G8B8,
++ gcvSURF_X8R8G8B8,
++ gcvSURF_A8R8G8B8,
++ gcvSURF_R8G8B8A8,
++ gcvSURF_G8R8G8B8,
++ gcvSURF_R8G8B8G8,
++ gcvSURF_X2R10G10B10,
++ gcvSURF_A2R10G10B10,
++ gcvSURF_X12R12G12B12,
++ gcvSURF_A12R12G12B12,
++ gcvSURF_X16R16G16B16,
++ gcvSURF_A16R16G16B16,
++ gcvSURF_A32R32G32B32,
++ gcvSURF_R8G8B8X8,
++ gcvSURF_R5G5B5X1,
++ gcvSURF_R4G4B4X4,
++
++ /* BGR formats. */
++ gcvSURF_A4B4G4R4 = 300,
++ gcvSURF_A1B5G5R5,
++ gcvSURF_B5G6R5,
++ gcvSURF_B8G8R8,
++ gcvSURF_B16G16R16,
++ gcvSURF_X8B8G8R8,
++ gcvSURF_A8B8G8R8,
++ gcvSURF_A2B10G10R10,
++ gcvSURF_X16B16G16R16,
++ gcvSURF_A16B16G16R16,
++ gcvSURF_B32G32R32,
++ gcvSURF_X32B32G32R32,
++ gcvSURF_A32B32G32R32,
++ gcvSURF_B4G4R4A4,
++ gcvSURF_B5G5R5A1,
++ gcvSURF_B8G8R8X8,
++ gcvSURF_B8G8R8A8,
++ gcvSURF_X4B4G4R4,
++ gcvSURF_X1B5G5R5,
++ gcvSURF_B4G4R4X4,
++ gcvSURF_B5G5R5X1,
++ gcvSURF_X2B10G10R10,
++
++ /* Compressed formats. */
++ gcvSURF_DXT1 = 400,
++ gcvSURF_DXT2,
++ gcvSURF_DXT3,
++ gcvSURF_DXT4,
++ gcvSURF_DXT5,
++ gcvSURF_CXV8U8,
++ gcvSURF_ETC1,
++ gcvSURF_R11_EAC,
++ gcvSURF_SIGNED_R11_EAC,
++ gcvSURF_RG11_EAC,
++ gcvSURF_SIGNED_RG11_EAC,
++ gcvSURF_RGB8_ETC2,
++ gcvSURF_SRGB8_ETC2,
++ gcvSURF_RGB8_PUNCHTHROUGH_ALPHA1_ETC2,
++ gcvSURF_SRGB8_PUNCHTHROUGH_ALPHA1_ETC2,
++ gcvSURF_RGBA8_ETC2_EAC,
++ gcvSURF_SRGB8_ALPHA8_ETC2_EAC,
++
++ /* YUV formats. */
++ gcvSURF_YUY2 = 500,
++ gcvSURF_UYVY,
++ gcvSURF_YV12,
++ gcvSURF_I420,
++ gcvSURF_NV12,
++ gcvSURF_NV21,
++ gcvSURF_NV16,
++ gcvSURF_NV61,
++ gcvSURF_YVYU,
++ gcvSURF_VYUY,
++
++ /* Depth formats. */
++ gcvSURF_D16 = 600,
++ gcvSURF_D24S8,
++ gcvSURF_D32,
++ gcvSURF_D24X8,
++
++ /* Alpha formats. */
++ gcvSURF_A4 = 700,
++ gcvSURF_A8,
++ gcvSURF_A12,
++ gcvSURF_A16,
++ gcvSURF_A32,
++ gcvSURF_A1,
++
++ /* Luminance formats. */
++ gcvSURF_L4 = 800,
++ gcvSURF_L8,
++ gcvSURF_L12,
++ gcvSURF_L16,
++ gcvSURF_L32,
++ gcvSURF_L1,
++
++ /* Alpha/Luminance formats. */
++ gcvSURF_A4L4 = 900,
++ gcvSURF_A2L6,
++ gcvSURF_A8L8,
++ gcvSURF_A4L12,
++ gcvSURF_A12L12,
++ gcvSURF_A16L16,
++
++ /* Bump formats. */
++ gcvSURF_L6V5U5 = 1000,
++ gcvSURF_V8U8,
++ gcvSURF_X8L8V8U8,
++ gcvSURF_Q8W8V8U8,
++ gcvSURF_A2W10V10U10,
++ gcvSURF_V16U16,
++ gcvSURF_Q16W16V16U16,
++
++ /* R/RG/RA formats. */
++ gcvSURF_R8 = 1100,
++ gcvSURF_X8R8,
++ gcvSURF_G8R8,
++ gcvSURF_X8G8R8,
++ gcvSURF_A8R8,
++ gcvSURF_R16,
++ gcvSURF_X16R16,
++ gcvSURF_G16R16,
++ gcvSURF_X16G16R16,
++ gcvSURF_A16R16,
++ gcvSURF_R32,
++ gcvSURF_X32R32,
++ gcvSURF_G32R32,
++ gcvSURF_X32G32R32,
++ gcvSURF_A32R32,
++ gcvSURF_RG16,
++
++ /* Floating point formats. */
++ gcvSURF_R16F = 1200,
++ gcvSURF_X16R16F,
++ gcvSURF_G16R16F,
++ gcvSURF_X16G16R16F,
++ gcvSURF_B16G16R16F,
++ gcvSURF_X16B16G16R16F,
++ gcvSURF_A16B16G16R16F,
++ gcvSURF_R32F,
++ gcvSURF_X32R32F,
++ gcvSURF_G32R32F,
++ gcvSURF_X32G32R32F,
++ gcvSURF_B32G32R32F,
++ gcvSURF_X32B32G32R32F,
++ gcvSURF_A32B32G32R32F,
++ gcvSURF_A16F,
++ gcvSURF_L16F,
++ gcvSURF_A16L16F,
++ gcvSURF_A16R16F,
++ gcvSURF_A32F,
++ gcvSURF_L32F,
++ gcvSURF_A32L32F,
++ gcvSURF_A32R32F,
++
++}
++gceSURF_FORMAT;
++
++/* Pixel swizzle modes. */
++typedef enum _gceSURF_SWIZZLE
++{
++ gcvSURF_NOSWIZZLE = 0,
++ gcvSURF_ARGB,
++ gcvSURF_ABGR,
++ gcvSURF_RGBA,
++ gcvSURF_BGRA
++}
++gceSURF_SWIZZLE;
++
++/* Transparency modes. */
++typedef enum _gceSURF_TRANSPARENCY
++{
++ /* Valid only for PE 1.0 */
++ gcvSURF_OPAQUE = 0,
++ gcvSURF_SOURCE_MATCH,
++ gcvSURF_SOURCE_MASK,
++ gcvSURF_PATTERN_MASK,
++}
++gceSURF_TRANSPARENCY;
++
++/* Surface Alignment. */
++typedef enum _gceSURF_ALIGNMENT
++{
++ gcvSURF_FOUR = 0,
++ gcvSURF_SIXTEEN,
++ gcvSURF_SUPER_TILED,
++ gcvSURF_SPLIT_TILED,
++ gcvSURF_SPLIT_SUPER_TILED,
++}
++gceSURF_ALIGNMENT;
++
++
++/* Surface Addressing. */
++typedef enum _gceSURF_ADDRESSING
++{
++ gcvSURF_NO_STRIDE_TILED = 0,
++ gcvSURF_NO_STRIDE_LINEAR,
++ gcvSURF_STRIDE_TILED,
++ gcvSURF_STRIDE_LINEAR
++}
++gceSURF_ADDRESSING;
++
++/* Transparency modes. */
++typedef enum _gce2D_TRANSPARENCY
++{
++ /* Valid only for PE 2.0 */
++ gcv2D_OPAQUE = 0,
++ gcv2D_KEYED,
++ gcv2D_MASKED
++}
++gce2D_TRANSPARENCY;
++
++/* Mono packing modes. */
++typedef enum _gceSURF_MONOPACK
++{
++ gcvSURF_PACKED8 = 0,
++ gcvSURF_PACKED16,
++ gcvSURF_PACKED32,
++ gcvSURF_UNPACKED,
++}
++gceSURF_MONOPACK;
++
++/* Blending modes. */
++typedef enum _gceSURF_BLEND_MODE
++{
++ /* Porter-Duff blending modes. */
++ /* Fsrc Fdst */
++ gcvBLEND_CLEAR = 0, /* 0 0 */
++ gcvBLEND_SRC, /* 1 0 */
++ gcvBLEND_DST, /* 0 1 */
++ gcvBLEND_SRC_OVER_DST, /* 1 1 - Asrc */
++ gcvBLEND_DST_OVER_SRC, /* 1 - Adst 1 */
++ gcvBLEND_SRC_IN_DST, /* Adst 0 */
++ gcvBLEND_DST_IN_SRC, /* 0 Asrc */
++ gcvBLEND_SRC_OUT_DST, /* 1 - Adst 0 */
++ gcvBLEND_DST_OUT_SRC, /* 0 1 - Asrc */
++ gcvBLEND_SRC_ATOP_DST, /* Adst 1 - Asrc */
++ gcvBLEND_DST_ATOP_SRC, /* 1 - Adst Asrc */
++ gcvBLEND_SRC_XOR_DST, /* 1 - Adst 1 - Asrc */
++
++ /* Special blending modes. */
++ gcvBLEND_SET, /* DST = 1 */
++ gcvBLEND_SUB /* DST = DST * (1 - SRC) */
++}
++gceSURF_BLEND_MODE;
++
++/* Per-pixel alpha modes. */
++typedef enum _gceSURF_PIXEL_ALPHA_MODE
++{
++ gcvSURF_PIXEL_ALPHA_STRAIGHT = 0,
++ gcvSURF_PIXEL_ALPHA_INVERSED
++}
++gceSURF_PIXEL_ALPHA_MODE;
++
++/* Global alpha modes. */
++typedef enum _gceSURF_GLOBAL_ALPHA_MODE
++{
++ gcvSURF_GLOBAL_ALPHA_OFF = 0,
++ gcvSURF_GLOBAL_ALPHA_ON,
++ gcvSURF_GLOBAL_ALPHA_SCALE
++}
++gceSURF_GLOBAL_ALPHA_MODE;
++
++/* Color component modes for alpha blending. */
++typedef enum _gceSURF_PIXEL_COLOR_MODE
++{
++ gcvSURF_COLOR_STRAIGHT = 0,
++ gcvSURF_COLOR_MULTIPLY
++}
++gceSURF_PIXEL_COLOR_MODE;
++
++/* Color component modes for alpha blending. */
++typedef enum _gce2D_PIXEL_COLOR_MULTIPLY_MODE
++{
++ gcv2D_COLOR_MULTIPLY_DISABLE = 0,
++ gcv2D_COLOR_MULTIPLY_ENABLE
++}
++gce2D_PIXEL_COLOR_MULTIPLY_MODE;
++
++/* Color component modes for alpha blending. */
++typedef enum _gce2D_GLOBAL_COLOR_MULTIPLY_MODE
++{
++ gcv2D_GLOBAL_COLOR_MULTIPLY_DISABLE = 0,
++ gcv2D_GLOBAL_COLOR_MULTIPLY_ALPHA,
++ gcv2D_GLOBAL_COLOR_MULTIPLY_COLOR
++}
++gce2D_GLOBAL_COLOR_MULTIPLY_MODE;
++
++/* Alpha blending factor modes. */
++typedef enum _gceSURF_BLEND_FACTOR_MODE
++{
++ gcvSURF_BLEND_ZERO = 0,
++ gcvSURF_BLEND_ONE,
++ gcvSURF_BLEND_STRAIGHT,
++ gcvSURF_BLEND_INVERSED,
++ gcvSURF_BLEND_COLOR,
++ gcvSURF_BLEND_COLOR_INVERSED,
++ gcvSURF_BLEND_SRC_ALPHA_SATURATED,
++ gcvSURF_BLEND_STRAIGHT_NO_CROSS,
++ gcvSURF_BLEND_INVERSED_NO_CROSS,
++ gcvSURF_BLEND_COLOR_NO_CROSS,
++ gcvSURF_BLEND_COLOR_INVERSED_NO_CROSS,
++ gcvSURF_BLEND_SRC_ALPHA_SATURATED_CROSS
++}
++gceSURF_BLEND_FACTOR_MODE;
++
++/* Alpha blending porter duff rules. */
++typedef enum _gce2D_PORTER_DUFF_RULE
++{
++ gcvPD_CLEAR = 0,
++ gcvPD_SRC,
++ gcvPD_SRC_OVER,
++ gcvPD_DST_OVER,
++ gcvPD_SRC_IN,
++ gcvPD_DST_IN,
++ gcvPD_SRC_OUT,
++ gcvPD_DST_OUT,
++ gcvPD_SRC_ATOP,
++ gcvPD_DST_ATOP,
++ gcvPD_ADD,
++ gcvPD_XOR,
++ gcvPD_DST
++}
++gce2D_PORTER_DUFF_RULE;
++
++/* Alpha blending factor modes. */
++typedef enum _gce2D_YUV_COLOR_MODE
++{
++ gcv2D_YUV_601= 0,
++ gcv2D_YUV_709,
++ gcv2D_YUV_USER_DEFINED,
++ gcv2D_YUV_USER_DEFINED_CLAMP,
++
++ /* Default setting is for src. gcv2D_YUV_DST
++ can be ORed to set dst.
++ */
++ gcv2D_YUV_DST = 0x80000000,
++}
++gce2D_YUV_COLOR_MODE;
++
++typedef enum _gce2D_COMMAND
++{
++ gcv2D_CLEAR = 0,
++ gcv2D_LINE,
++ gcv2D_BLT,
++ gcv2D_STRETCH,
++ gcv2D_HOR_FILTER,
++ gcv2D_VER_FILTER,
++ gcv2D_MULTI_SOURCE_BLT,
++}
++gce2D_COMMAND;
++
++typedef enum _gce2D_TILE_STATUS_CONFIG
++{
++ gcv2D_TSC_DISABLE = 0,
++ gcv2D_TSC_ENABLE = 0x00000001,
++ gcv2D_TSC_COMPRESSED = 0x00000002,
++ gcv2D_TSC_DOWN_SAMPLER = 0x00000004,
++ gcv2D_TSC_2D_COMPRESSED = 0x00000008,
++}
++gce2D_TILE_STATUS_CONFIG;
++
++typedef enum _gce2D_QUERY
++{
++ gcv2D_QUERY_RGB_ADDRESS_MIN_ALIGN = 0,
++ gcv2D_QUERY_RGB_STRIDE_MIN_ALIGN,
++ gcv2D_QUERY_YUV_ADDRESS_MIN_ALIGN,
++ gcv2D_QUERY_YUV_STRIDE_MIN_ALIGN,
++}
++gce2D_QUERY;
++
++typedef enum _gce2D_SUPER_TILE_VERSION
++{
++ gcv2D_SUPER_TILE_VERSION_V1 = 1,
++ gcv2D_SUPER_TILE_VERSION_V2 = 2,
++ gcv2D_SUPER_TILE_VERSION_V3 = 3,
++}
++gce2D_SUPER_TILE_VERSION;
++
++typedef enum _gce2D_STATE
++{
++ gcv2D_STATE_SPECIAL_FILTER_MIRROR_MODE = 1,
++ gcv2D_STATE_SUPER_TILE_VERSION,
++ gcv2D_STATE_EN_GAMMA,
++ gcv2D_STATE_DE_GAMMA,
++ gcv2D_STATE_MULTI_SRC_BLIT_UNIFIED_DST_RECT,
++ gcv2D_STATE_XRGB_ENABLE,
++
++ gcv2D_STATE_ARRAY_EN_GAMMA = 0x10001,
++ gcv2D_STATE_ARRAY_DE_GAMMA,
++ gcv2D_STATE_ARRAY_CSC_YUV_TO_RGB,
++ gcv2D_STATE_ARRAY_CSC_RGB_TO_YUV,
++}
++gce2D_STATE;
++
++#ifndef VIVANTE_NO_3D
++/* Texture functions. */
++typedef enum _gceTEXTURE_FUNCTION
++{
++ gcvTEXTURE_DUMMY = 0,
++ gcvTEXTURE_REPLACE = 0,
++ gcvTEXTURE_MODULATE,
++ gcvTEXTURE_ADD,
++ gcvTEXTURE_ADD_SIGNED,
++ gcvTEXTURE_INTERPOLATE,
++ gcvTEXTURE_SUBTRACT,
++ gcvTEXTURE_DOT3
++}
++gceTEXTURE_FUNCTION;
++
++/* Texture sources. */
++typedef enum _gceTEXTURE_SOURCE
++{
++ gcvCOLOR_FROM_TEXTURE = 0,
++ gcvCOLOR_FROM_CONSTANT_COLOR,
++ gcvCOLOR_FROM_PRIMARY_COLOR,
++ gcvCOLOR_FROM_PREVIOUS_COLOR
++}
++gceTEXTURE_SOURCE;
++
++/* Texture source channels. */
++typedef enum _gceTEXTURE_CHANNEL
++{
++ gcvFROM_COLOR = 0,
++ gcvFROM_ONE_MINUS_COLOR,
++ gcvFROM_ALPHA,
++ gcvFROM_ONE_MINUS_ALPHA
++}
++gceTEXTURE_CHANNEL;
++#endif /* VIVANTE_NO_3D */
++
++/* Filter types. */
++typedef enum _gceFILTER_TYPE
++{
++ gcvFILTER_SYNC = 0,
++ gcvFILTER_BLUR,
++ gcvFILTER_USER
++}
++gceFILTER_TYPE;
++
++/* Filter pass types. */
++typedef enum _gceFILTER_PASS_TYPE
++{
++ gcvFILTER_HOR_PASS = 0,
++ gcvFILTER_VER_PASS
++}
++gceFILTER_PASS_TYPE;
++
++/* Endian hints. */
++typedef enum _gceENDIAN_HINT
++{
++ gcvENDIAN_NO_SWAP = 0,
++ gcvENDIAN_SWAP_WORD,
++ gcvENDIAN_SWAP_DWORD
++}
++gceENDIAN_HINT;
++
++/* Tiling modes. */
++typedef enum _gceTILING
++{
++ gcvLINEAR = 0,
++ gcvTILED,
++ gcvSUPERTILED,
++ gcvMULTI_TILED,
++ gcvMULTI_SUPERTILED,
++ gcvMINORTILED,
++}
++gceTILING;
++
++/* 2D pattern type. */
++typedef enum _gce2D_PATTERN
++{
++ gcv2D_PATTERN_SOLID = 0,
++ gcv2D_PATTERN_MONO,
++ gcv2D_PATTERN_COLOR,
++ gcv2D_PATTERN_INVALID
++}
++gce2D_PATTERN;
++
++/* 2D source type. */
++typedef enum _gce2D_SOURCE
++{
++ gcv2D_SOURCE_MASKED = 0,
++ gcv2D_SOURCE_MONO,
++ gcv2D_SOURCE_COLOR,
++ gcv2D_SOURCE_INVALID
++}
++gce2D_SOURCE;
++
++/* Pipes. */
++typedef enum _gcePIPE_SELECT
++{
++ gcvPIPE_INVALID = ~0,
++ gcvPIPE_3D = 0,
++ gcvPIPE_2D
++}
++gcePIPE_SELECT;
++
++/* Hardware type. */
++typedef enum _gceHARDWARE_TYPE
++{
++ gcvHARDWARE_INVALID = 0x00,
++ gcvHARDWARE_3D = 0x01,
++ gcvHARDWARE_2D = 0x02,
++ gcvHARDWARE_VG = 0x04,
++
++ gcvHARDWARE_3D2D = gcvHARDWARE_3D | gcvHARDWARE_2D
++}
++gceHARDWARE_TYPE;
++
++#define gcdCHIP_COUNT 3
++
++typedef enum _gceMMU_MODE
++{
++ gcvMMU_MODE_1K,
++ gcvMMU_MODE_4K,
++} gceMMU_MODE;
++
++/* User signal command codes. */
++typedef enum _gceUSER_SIGNAL_COMMAND_CODES
++{
++ gcvUSER_SIGNAL_CREATE,
++ gcvUSER_SIGNAL_DESTROY,
++ gcvUSER_SIGNAL_SIGNAL,
++ gcvUSER_SIGNAL_WAIT,
++ gcvUSER_SIGNAL_MAP,
++ gcvUSER_SIGNAL_UNMAP,
++}
++gceUSER_SIGNAL_COMMAND_CODES;
++
++/* Sync point command codes. */
++typedef enum _gceSYNC_POINT_COMMAND_CODES
++{
++ gcvSYNC_POINT_CREATE,
++ gcvSYNC_POINT_DESTROY,
++ gcvSYNC_POINT_SIGNAL,
++}
++gceSYNC_POINT_COMMAND_CODES;
++
++/* Event locations. */
++typedef enum _gceKERNEL_WHERE
++{
++ gcvKERNEL_COMMAND,
++ gcvKERNEL_VERTEX,
++ gcvKERNEL_TRIANGLE,
++ gcvKERNEL_TEXTURE,
++ gcvKERNEL_PIXEL,
++}
++gceKERNEL_WHERE;
++
++#if gcdENABLE_VG
++/* Hardware blocks. */
++typedef enum _gceBLOCK
++{
++ gcvBLOCK_COMMAND,
++ gcvBLOCK_TESSELLATOR,
++ gcvBLOCK_TESSELLATOR2,
++ gcvBLOCK_TESSELLATOR3,
++ gcvBLOCK_RASTER,
++ gcvBLOCK_VG,
++ gcvBLOCK_VG2,
++ gcvBLOCK_VG3,
++ gcvBLOCK_PIXEL,
++
++ /* Number of defined blocks. */
++ gcvBLOCK_COUNT
++}
++gceBLOCK;
++#endif
++
++/* gcdDUMP message type. */
++typedef enum _gceDEBUG_MESSAGE_TYPE
++{
++ gcvMESSAGE_TEXT,
++ gcvMESSAGE_DUMP
++}
++gceDEBUG_MESSAGE_TYPE;
++
++typedef enum _gceSPECIAL_HINT
++{
++ gceSPECIAL_HINT0,
++ gceSPECIAL_HINT1,
++ gceSPECIAL_HINT2,
++ gceSPECIAL_HINT3,
++ /* For disable dynamic stream/index */
++ gceSPECIAL_HINT4
++}
++gceSPECIAL_HINT;
++
++typedef enum _gceMACHINECODE
++{
++ gcvMACHINECODE_HOVERJET0 = 0x0,
++ gcvMACHINECODE_HOVERJET1 ,
++
++ gcvMACHINECODE_TAIJI0 ,
++ gcvMACHINECODE_TAIJI1 ,
++ gcvMACHINECODE_TAIJI2 ,
++
++ gcvMACHINECODE_ANTUTU0 ,
++
++ gcvMACHINECODE_GLB27_RELEASE_0,
++ gcvMACHINECODE_GLB27_RELEASE_1,
++
++ gcvMACHINECODE_WAVESCAPE0 ,
++ gcvMACHINECODE_WAVESCAPE1 ,
++
++ gcvMACHINECODE_NENAMARKV2_4_0 ,
++ gcvMACHINECODE_NENAMARKV2_4_1 ,
++
++ gcvMACHINECODE_GLB25_RELEASE_0,
++ gcvMACHINECODE_GLB25_RELEASE_1,
++ gcvMACHINECODE_GLB25_RELEASE_2,
++}
++gceMACHINECODE;
++
++
++/******************************************************************************\
++****************************** Object Declarations *****************************
++\******************************************************************************/
++
++typedef struct _gckCONTEXT * gckCONTEXT;
++typedef struct _gcoCMDBUF * gcoCMDBUF;
++typedef struct _gcsSTATE_DELTA * gcsSTATE_DELTA_PTR;
++typedef struct _gcsQUEUE * gcsQUEUE_PTR;
++typedef struct _gcoQUEUE * gcoQUEUE;
++typedef struct _gcsHAL_INTERFACE * gcsHAL_INTERFACE_PTR;
++typedef struct _gcs2D_PROFILE * gcs2D_PROFILE_PTR;
++
++#if gcdENABLE_VG
++typedef struct _gcoVGHARDWARE * gcoVGHARDWARE;
++typedef struct _gcoVGBUFFER * gcoVGBUFFER;
++typedef struct _gckVGHARDWARE * gckVGHARDWARE;
++typedef struct _gcsVGCONTEXT * gcsVGCONTEXT_PTR;
++typedef struct _gcsVGCONTEXT_MAP * gcsVGCONTEXT_MAP_PTR;
++typedef struct _gcsVGCMDQUEUE * gcsVGCMDQUEUE_PTR;
++typedef struct _gcsTASK_MASTER_TABLE * gcsTASK_MASTER_TABLE_PTR;
++typedef struct _gckVGKERNEL * gckVGKERNEL;
++typedef void * gctTHREAD;
++#endif
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif /* __gc_hal_enum_h_ */
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal.h linux-openelec/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal.h
+--- linux-3.14.36/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal.h 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,2661 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_h_
++#define __gc_hal_h_
++
++#include "gc_hal_rename.h"
++#include "gc_hal_types.h"
++#include "gc_hal_enum.h"
++#include "gc_hal_base.h"
++#include "gc_hal_profiler.h"
++#include "gc_hal_driver.h"
++#ifndef VIVANTE_NO_3D
++#include "gc_hal_statistics.h"
++#endif
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/******************************************************************************\
++******************************* Alignment Macros *******************************
++\******************************************************************************/
++
++#define gcmALIGN(n, align) \
++( \
++ ((n) + ((align) - 1)) & ~((align) - 1) \
++)
++
++#define gcmALIGN_BASE(n, align) \
++( \
++ ((n) & ~((align) - 1)) \
++)
++
++/******************************************************************************\
++***************************** Element Count Macro *****************************
++\******************************************************************************/
++
++#define gcmSIZEOF(a) \
++( \
++ (gctSIZE_T) (sizeof(a)) \
++)
++
++#define gcmCOUNTOF(a) \
++( \
++ sizeof(a) / sizeof(a[0]) \
++)
++
++/******************************************************************************\
++********************************* Cast Macro **********************************
++\******************************************************************************/
++#define gcmNAME_TO_PTR(na) \
++ gckKERNEL_QueryPointerFromName(kernel, gcmALL_TO_UINT32(na))
++
++#define gcmPTR_TO_NAME(ptr) \
++ gckKERNEL_AllocateNameFromPointer(kernel, ptr)
++
++#define gcmRELEASE_NAME(na) \
++ gckKERNEL_DeleteName(kernel, gcmALL_TO_UINT32(na))
++
++#ifdef __LP64__
++
++#define gcmALL_TO_UINT32(t) \
++( \
++ (gctUINT32) (gctUINTPTR_T) (t)\
++)
++
++#define gcmPTR_TO_UINT64(p) \
++( \
++ (gctUINT64) (p)\
++)
++
++#define gcmUINT64_TO_PTR(u) \
++( \
++ (gctPOINTER) (u)\
++)
++
++#else /* 32 bit */
++
++#define gcmALL_TO_UINT32(t) \
++( \
++ (gctUINT32) (t)\
++)
++
++#define gcmPTR_TO_UINT64(p) \
++( \
++ (gctUINT64) (gctUINTPTR_T) (p)\
++)
++
++#define gcmUINT64_TO_PTR(u) \
++( \
++ (gctPOINTER) (gctUINTPTR_T) (u)\
++)
++
++#endif
++
++#define gcmUINT64_TO_TYPE(u, t) \
++( \
++ (t) (gctUINTPTR_T) (u)\
++)
++
++/******************************************************************************\
++******************************** Useful Macro *********************************
++\******************************************************************************/
++
++#define gcvINVALID_ADDRESS ~0U
++
++#define gcmGET_PRE_ROTATION(rotate) \
++ ((rotate) & (~(gcvSURF_POST_FLIP_X | gcvSURF_POST_FLIP_Y)))
++
++#define gcmGET_POST_ROTATION(rotate) \
++ ((rotate) & (gcvSURF_POST_FLIP_X | gcvSURF_POST_FLIP_Y))
++
++/******************************************************************************\
++******************************** gcsOBJECT Object *******************************
++\******************************************************************************/
++
++/* Type of objects. */
++typedef enum _gceOBJECT_TYPE
++{
++ gcvOBJ_UNKNOWN = 0,
++ gcvOBJ_2D = gcmCC('2','D',' ',' '),
++ gcvOBJ_3D = gcmCC('3','D',' ',' '),
++ gcvOBJ_ATTRIBUTE = gcmCC('A','T','T','R'),
++ gcvOBJ_BRUSHCACHE = gcmCC('B','R','U','$'),
++ gcvOBJ_BRUSHNODE = gcmCC('B','R','U','n'),
++ gcvOBJ_BRUSH = gcmCC('B','R','U','o'),
++ gcvOBJ_BUFFER = gcmCC('B','U','F','R'),
++ gcvOBJ_COMMAND = gcmCC('C','M','D',' '),
++ gcvOBJ_COMMANDBUFFER = gcmCC('C','M','D','B'),
++ gcvOBJ_CONTEXT = gcmCC('C','T','X','T'),
++ gcvOBJ_DEVICE = gcmCC('D','E','V',' '),
++ gcvOBJ_DUMP = gcmCC('D','U','M','P'),
++ gcvOBJ_EVENT = gcmCC('E','V','N','T'),
++ gcvOBJ_FUNCTION = gcmCC('F','U','N','C'),
++ gcvOBJ_HAL = gcmCC('H','A','L',' '),
++ gcvOBJ_HARDWARE = gcmCC('H','A','R','D'),
++ gcvOBJ_HEAP = gcmCC('H','E','A','P'),
++ gcvOBJ_INDEX = gcmCC('I','N','D','X'),
++ gcvOBJ_INTERRUPT = gcmCC('I','N','T','R'),
++ gcvOBJ_KERNEL = gcmCC('K','E','R','N'),
++ gcvOBJ_KERNEL_FUNCTION = gcmCC('K','F','C','N'),
++ gcvOBJ_MEMORYBUFFER = gcmCC('M','E','M','B'),
++ gcvOBJ_MMU = gcmCC('M','M','U',' '),
++ gcvOBJ_OS = gcmCC('O','S',' ',' '),
++ gcvOBJ_OUTPUT = gcmCC('O','U','T','P'),
++ gcvOBJ_PAINT = gcmCC('P','N','T',' '),
++ gcvOBJ_PATH = gcmCC('P','A','T','H'),
++ gcvOBJ_QUEUE = gcmCC('Q','U','E',' '),
++ gcvOBJ_SAMPLER = gcmCC('S','A','M','P'),
++ gcvOBJ_SHADER = gcmCC('S','H','D','R'),
++ gcvOBJ_STREAM = gcmCC('S','T','R','M'),
++ gcvOBJ_SURF = gcmCC('S','U','R','F'),
++ gcvOBJ_TEXTURE = gcmCC('T','X','T','R'),
++ gcvOBJ_UNIFORM = gcmCC('U','N','I','F'),
++ gcvOBJ_VARIABLE = gcmCC('V','A','R','I'),
++ gcvOBJ_VERTEX = gcmCC('V','R','T','X'),
++ gcvOBJ_VIDMEM = gcmCC('V','M','E','M'),
++ gcvOBJ_VG = gcmCC('V','G',' ',' '),
++}
++gceOBJECT_TYPE;
++
++/* gcsOBJECT object defintinon. */
++typedef struct _gcsOBJECT
++{
++ /* Type of an object. */
++ gceOBJECT_TYPE type;
++}
++gcsOBJECT;
++
++typedef struct _gckHARDWARE * gckHARDWARE;
++
++/* CORE flags. */
++typedef enum _gceCORE
++{
++ gcvCORE_MAJOR = 0x0,
++ gcvCORE_2D = 0x1,
++ gcvCORE_VG = 0x2
++}
++gceCORE;
++
++#define gcdMAX_GPU_COUNT 3
++
++/*******************************************************************************
++**
++** gcmVERIFY_OBJECT
++**
++** Assert if an object is invalid or is not of the specified type. If the
++** object is invalid or not of the specified type, gcvSTATUS_INVALID_OBJECT
++** will be returned from the current function. In retail mode this macro
++** does nothing.
++**
++** ARGUMENTS:
++**
++** obj Object to test.
++** t Expected type of the object.
++*/
++#if gcmIS_DEBUG(gcdDEBUG_TRACE)
++#define _gcmVERIFY_OBJECT(prefix, obj, t) \
++ if ((obj) == gcvNULL) \
++ { \
++ prefix##TRACE(gcvLEVEL_ERROR, \
++ #prefix "VERIFY_OBJECT failed: NULL"); \
++ prefix##TRACE(gcvLEVEL_ERROR, " expected: %c%c%c%c", \
++ gcmCC_PRINT(t)); \
++ prefix##ASSERT((obj) != gcvNULL); \
++ prefix##FOOTER_ARG("status=%d", gcvSTATUS_INVALID_OBJECT); \
++ return gcvSTATUS_INVALID_OBJECT; \
++ } \
++ else if (((gcsOBJECT*) (obj))->type != t) \
++ { \
++ prefix##TRACE(gcvLEVEL_ERROR, \
++ #prefix "VERIFY_OBJECT failed: %c%c%c%c", \
++ gcmCC_PRINT(((gcsOBJECT*) (obj))->type)); \
++ prefix##TRACE(gcvLEVEL_ERROR, " expected: %c%c%c%c", \
++ gcmCC_PRINT(t)); \
++ prefix##ASSERT(((gcsOBJECT*)(obj))->type == t); \
++ prefix##FOOTER_ARG("status=%d", gcvSTATUS_INVALID_OBJECT); \
++ return gcvSTATUS_INVALID_OBJECT; \
++ }
++
++# define gcmVERIFY_OBJECT(obj, t) _gcmVERIFY_OBJECT(gcm, obj, t)
++# define gcmkVERIFY_OBJECT(obj, t) _gcmVERIFY_OBJECT(gcmk, obj, t)
++#else
++# define gcmVERIFY_OBJECT(obj, t) do {} while (gcvFALSE)
++# define gcmkVERIFY_OBJECT(obj, t) do {} while (gcvFALSE)
++#endif
++
++/******************************************************************************/
++/*VERIFY_OBJECT if special return expected*/
++/******************************************************************************/
++#ifndef EGL_API_ANDROID
++# define _gcmVERIFY_OBJECT_RETURN(prefix, obj, t, retVal) \
++ do \
++ { \
++ if ((obj) == gcvNULL) \
++ { \
++ prefix##PRINT_VERSION(); \
++ prefix##TRACE(gcvLEVEL_ERROR, \
++ #prefix "VERIFY_OBJECT_RETURN failed: NULL"); \
++ prefix##TRACE(gcvLEVEL_ERROR, " expected: %c%c%c%c", \
++ gcmCC_PRINT(t)); \
++ prefix##ASSERT((obj) != gcvNULL); \
++ prefix##FOOTER_ARG("retVal=%d", retVal); \
++ return retVal; \
++ } \
++ else if (((gcsOBJECT*) (obj))->type != t) \
++ { \
++ prefix##PRINT_VERSION(); \
++ prefix##TRACE(gcvLEVEL_ERROR, \
++ #prefix "VERIFY_OBJECT_RETURN failed: %c%c%c%c", \
++ gcmCC_PRINT(((gcsOBJECT*) (obj))->type)); \
++ prefix##TRACE(gcvLEVEL_ERROR, " expected: %c%c%c%c", \
++ gcmCC_PRINT(t)); \
++ prefix##ASSERT(((gcsOBJECT*)(obj))->type == t); \
++ prefix##FOOTER_ARG("retVal=%d", retVal); \
++ return retVal; \
++ } \
++ } \
++ while (gcvFALSE)
++# define gcmVERIFY_OBJECT_RETURN(obj, t, retVal) \
++ _gcmVERIFY_OBJECT_RETURN(gcm, obj, t, retVal)
++# define gcmkVERIFY_OBJECT_RETURN(obj, t, retVal) \
++ _gcmVERIFY_OBJECT_RETURN(gcmk, obj, t, retVal)
++#else
++# define gcmVERIFY_OBJECT_RETURN(obj, t) do {} while (gcvFALSE)
++# define gcmVERIFY_OBJECT_RETURN(obj, t) do {} while (gcvFALSE)
++#endif
++
++/******************************************************************************\
++********************************** gckOS Object *********************************
++\******************************************************************************/
++
++/* Construct a new gckOS object. */
++gceSTATUS
++gckOS_Construct(
++ IN gctPOINTER Context,
++ OUT gckOS * Os
++ );
++
++/* Destroy an gckOS object. */
++gceSTATUS
++gckOS_Destroy(
++ IN gckOS Os
++ );
++
++/* Query the video memory. */
++gceSTATUS
++gckOS_QueryVideoMemory(
++ IN gckOS Os,
++ OUT gctPHYS_ADDR * InternalAddress,
++ OUT gctSIZE_T * InternalSize,
++ OUT gctPHYS_ADDR * ExternalAddress,
++ OUT gctSIZE_T * ExternalSize,
++ OUT gctPHYS_ADDR * ContiguousAddress,
++ OUT gctSIZE_T * ContiguousSize
++ );
++
++/* Allocate memory from the heap. */
++gceSTATUS
++gckOS_Allocate(
++ IN gckOS Os,
++ IN gctSIZE_T Bytes,
++ OUT gctPOINTER * Memory
++ );
++
++/* Free allocated memory. */
++gceSTATUS
++gckOS_Free(
++ IN gckOS Os,
++ IN gctPOINTER Memory
++ );
++
++/* Wrapper for allocation memory.. */
++gceSTATUS
++gckOS_AllocateMemory(
++ IN gckOS Os,
++ IN gctSIZE_T Bytes,
++ OUT gctPOINTER * Memory
++ );
++
++/* Wrapper for freeing memory. */
++gceSTATUS
++gckOS_FreeMemory(
++ IN gckOS Os,
++ IN gctPOINTER Memory
++ );
++
++/* Allocate paged memory. */
++gceSTATUS
++gckOS_AllocatePagedMemory(
++ IN gckOS Os,
++ IN gctSIZE_T Bytes,
++ OUT gctPHYS_ADDR * Physical
++ );
++
++/* Allocate paged memory. */
++gceSTATUS
++gckOS_AllocatePagedMemoryEx(
++ IN gckOS Os,
++ IN gctBOOL Contiguous,
++ IN gctSIZE_T Bytes,
++ OUT gctPHYS_ADDR * Physical
++ );
++
++/* Lock pages. */
++gceSTATUS
++gckOS_LockPages(
++ IN gckOS Os,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Bytes,
++ IN gctBOOL Cacheable,
++ OUT gctPOINTER * Logical,
++ OUT gctSIZE_T * PageCount
++ );
++
++/* Map pages. */
++gceSTATUS
++gckOS_MapPages(
++ IN gckOS Os,
++ IN gctPHYS_ADDR Physical,
++#ifdef __QNXNTO__
++ IN gctPOINTER Logical,
++#endif
++ IN gctSIZE_T PageCount,
++ IN gctPOINTER PageTable
++ );
++
++/* Map pages. */
++gceSTATUS
++gckOS_MapPagesEx(
++ IN gckOS Os,
++ IN gceCORE Core,
++ IN gctPHYS_ADDR Physical,
++#ifdef __QNXNTO__
++ IN gctPOINTER Logical,
++#endif
++ IN gctSIZE_T PageCount,
++ IN gctPOINTER PageTable
++ );
++
++/* Unlock pages. */
++gceSTATUS
++gckOS_UnlockPages(
++ IN gckOS Os,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Bytes,
++ IN gctPOINTER Logical
++ );
++
++/* Free paged memory. */
++gceSTATUS
++gckOS_FreePagedMemory(
++ IN gckOS Os,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Bytes
++ );
++
++/* Allocate non-paged memory. */
++gceSTATUS
++gckOS_AllocateNonPagedMemory(
++ IN gckOS Os,
++ IN gctBOOL InUserSpace,
++ IN OUT gctSIZE_T * Bytes,
++ OUT gctPHYS_ADDR * Physical,
++ OUT gctPOINTER * Logical
++ );
++
++/* Free non-paged memory. */
++gceSTATUS
++gckOS_FreeNonPagedMemory(
++ IN gckOS Os,
++ IN gctSIZE_T Bytes,
++ IN gctPHYS_ADDR Physical,
++ IN gctPOINTER Logical
++ );
++
++/* Allocate contiguous memory. */
++gceSTATUS
++gckOS_AllocateContiguous(
++ IN gckOS Os,
++ IN gctBOOL InUserSpace,
++ IN OUT gctSIZE_T * Bytes,
++ OUT gctPHYS_ADDR * Physical,
++ OUT gctPOINTER * Logical
++ );
++
++/* Free contiguous memory. */
++gceSTATUS
++gckOS_FreeContiguous(
++ IN gckOS Os,
++ IN gctPHYS_ADDR Physical,
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Bytes
++ );
++
++/* Get the number fo bytes per page. */
++gceSTATUS
++gckOS_GetPageSize(
++ IN gckOS Os,
++ OUT gctSIZE_T * PageSize
++ );
++
++/* Get the physical address of a corresponding logical address. */
++gceSTATUS
++gckOS_GetPhysicalAddress(
++ IN gckOS Os,
++ IN gctPOINTER Logical,
++ OUT gctUINT32 * Address
++ );
++
++/* Get the physical address of a corresponding logical address. */
++gceSTATUS
++gckOS_GetPhysicalAddressProcess(
++ IN gckOS Os,
++ IN gctPOINTER Logical,
++ IN gctUINT32 ProcessID,
++ OUT gctUINT32 * Address
++ );
++
++/* Map physical memory. */
++gceSTATUS
++gckOS_MapPhysical(
++ IN gckOS Os,
++ IN gctUINT32 Physical,
++ IN gctSIZE_T Bytes,
++ OUT gctPOINTER * Logical
++ );
++
++/* Unmap previously mapped physical memory. */
++gceSTATUS
++gckOS_UnmapPhysical(
++ IN gckOS Os,
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Bytes
++ );
++
++/* Read data from a hardware register. */
++gceSTATUS
++gckOS_ReadRegister(
++ IN gckOS Os,
++ IN gctUINT32 Address,
++ OUT gctUINT32 * Data
++ );
++
++/* Read data from a hardware register. */
++gceSTATUS
++gckOS_ReadRegisterEx(
++ IN gckOS Os,
++ IN gceCORE Core,
++ IN gctUINT32 Address,
++ OUT gctUINT32 * Data
++ );
++
++/* Write data to a hardware register. */
++gceSTATUS
++gckOS_WriteRegister(
++ IN gckOS Os,
++ IN gctUINT32 Address,
++ IN gctUINT32 Data
++ );
++
++/* Write data to a hardware register. */
++gceSTATUS
++gckOS_WriteRegisterEx(
++ IN gckOS Os,
++ IN gceCORE Core,
++ IN gctUINT32 Address,
++ IN gctUINT32 Data
++ );
++
++/* Write data to a 32-bit memory location. */
++gceSTATUS
++gckOS_WriteMemory(
++ IN gckOS Os,
++ IN gctPOINTER Address,
++ IN gctUINT32 Data
++ );
++
++/* Map physical memory into the process space. */
++gceSTATUS
++gckOS_MapMemory(
++ IN gckOS Os,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Bytes,
++ OUT gctPOINTER * Logical
++ );
++
++/* Unmap physical memory from the specified process space. */
++gceSTATUS
++gckOS_UnmapMemoryEx(
++ IN gckOS Os,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Bytes,
++ IN gctPOINTER Logical,
++ IN gctUINT32 PID
++ );
++
++/* Unmap physical memory from the process space. */
++gceSTATUS
++gckOS_UnmapMemory(
++ IN gckOS Os,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Bytes,
++ IN gctPOINTER Logical
++ );
++
++/* Unmap user logical memory out of physical memory.
++ * This function is only supported in Linux currently.
++ */
++gceSTATUS
++gckOS_UnmapUserLogical(
++ IN gckOS Os,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Bytes,
++ IN gctPOINTER Logical
++ );
++
++/* Create a new mutex. */
++gceSTATUS
++gckOS_CreateMutex(
++ IN gckOS Os,
++ OUT gctPOINTER * Mutex
++ );
++
++/* Delete a mutex. */
++gceSTATUS
++gckOS_DeleteMutex(
++ IN gckOS Os,
++ IN gctPOINTER Mutex
++ );
++
++/* Acquire a mutex. */
++gceSTATUS
++gckOS_AcquireMutex(
++ IN gckOS Os,
++ IN gctPOINTER Mutex,
++ IN gctUINT32 Timeout
++ );
++
++/* Release a mutex. */
++gceSTATUS
++gckOS_ReleaseMutex(
++ IN gckOS Os,
++ IN gctPOINTER Mutex
++ );
++
++/* Atomically exchange a pair of 32-bit values. */
++gceSTATUS
++gckOS_AtomicExchange(
++ IN gckOS Os,
++ IN OUT gctUINT32_PTR Target,
++ IN gctUINT32 NewValue,
++ OUT gctUINT32_PTR OldValue
++ );
++
++/* Atomically exchange a pair of pointers. */
++gceSTATUS
++gckOS_AtomicExchangePtr(
++ IN gckOS Os,
++ IN OUT gctPOINTER * Target,
++ IN gctPOINTER NewValue,
++ OUT gctPOINTER * OldValue
++ );
++
++#if gcdSMP
++gceSTATUS
++gckOS_AtomSetMask(
++ IN gctPOINTER Atom,
++ IN gctUINT32 Mask
++ );
++
++gceSTATUS
++gckOS_AtomClearMask(
++ IN gctPOINTER Atom,
++ IN gctUINT32 Mask
++ );
++#endif
++
++gceSTATUS
++gckOS_DumpCallStack(
++ IN gckOS Os
++ );
++
++gceSTATUS
++gckOS_GetProcessNameByPid(
++ IN gctINT Pid,
++ IN gctSIZE_T Length,
++ OUT gctUINT8_PTR String
++ );
++
++
++
++/*******************************************************************************
++**
++** gckOS_AtomConstruct
++**
++** Create an atom.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to a gckOS object.
++**
++** OUTPUT:
++**
++** gctPOINTER * Atom
++** Pointer to a variable receiving the constructed atom.
++*/
++gceSTATUS
++gckOS_AtomConstruct(
++ IN gckOS Os,
++ OUT gctPOINTER * Atom
++ );
++
++/*******************************************************************************
++**
++** gckOS_AtomDestroy
++**
++** Destroy an atom.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to a gckOS object.
++**
++** gctPOINTER Atom
++** Pointer to the atom to destroy.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_AtomDestroy(
++ IN gckOS Os,
++ OUT gctPOINTER Atom
++ );
++
++/*******************************************************************************
++**
++** gckOS_AtomGet
++**
++** Get the 32-bit value protected by an atom.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to a gckOS object.
++**
++** gctPOINTER Atom
++** Pointer to the atom.
++**
++** OUTPUT:
++**
++** gctINT32_PTR Value
++** Pointer to a variable the receives the value of the atom.
++*/
++gceSTATUS
++gckOS_AtomGet(
++ IN gckOS Os,
++ IN gctPOINTER Atom,
++ OUT gctINT32_PTR Value
++ );
++
++/*******************************************************************************
++**
++** gckOS_AtomSet
++**
++** Set the 32-bit value protected by an atom.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to a gckOS object.
++**
++** gctPOINTER Atom
++** Pointer to the atom.
++**
++** gctINT32 Value
++** The value of the atom.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_AtomSet(
++ IN gckOS Os,
++ IN gctPOINTER Atom,
++ IN gctINT32 Value
++ );
++
++/*******************************************************************************
++**
++** gckOS_AtomIncrement
++**
++** Atomically increment the 32-bit integer value inside an atom.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to a gckOS object.
++**
++** gctPOINTER Atom
++** Pointer to the atom.
++**
++** OUTPUT:
++**
++** gctINT32_PTR Value
++** Pointer to a variable the receives the original value of the atom.
++*/
++gceSTATUS
++gckOS_AtomIncrement(
++ IN gckOS Os,
++ IN gctPOINTER Atom,
++ OUT gctINT32_PTR Value
++ );
++
++/*******************************************************************************
++**
++** gckOS_AtomDecrement
++**
++** Atomically decrement the 32-bit integer value inside an atom.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to a gckOS object.
++**
++** gctPOINTER Atom
++** Pointer to the atom.
++**
++** OUTPUT:
++**
++** gctINT32_PTR Value
++** Pointer to a variable the receives the original value of the atom.
++*/
++gceSTATUS
++gckOS_AtomDecrement(
++ IN gckOS Os,
++ IN gctPOINTER Atom,
++ OUT gctINT32_PTR Value
++ );
++
++/* Delay a number of microseconds. */
++gceSTATUS
++gckOS_Delay(
++ IN gckOS Os,
++ IN gctUINT32 Delay
++ );
++
++/* Get time in milliseconds. */
++gceSTATUS
++gckOS_GetTicks(
++ OUT gctUINT32_PTR Time
++ );
++
++/* Compare time value. */
++gceSTATUS
++gckOS_TicksAfter(
++ IN gctUINT32 Time1,
++ IN gctUINT32 Time2,
++ OUT gctBOOL_PTR IsAfter
++ );
++
++/* Get time in microseconds. */
++gceSTATUS
++gckOS_GetTime(
++ OUT gctUINT64_PTR Time
++ );
++
++/* Memory barrier. */
++gceSTATUS
++gckOS_MemoryBarrier(
++ IN gckOS Os,
++ IN gctPOINTER Address
++ );
++
++/* Map user pointer. */
++gceSTATUS
++gckOS_MapUserPointer(
++ IN gckOS Os,
++ IN gctPOINTER Pointer,
++ IN gctSIZE_T Size,
++ OUT gctPOINTER * KernelPointer
++ );
++
++/* Unmap user pointer. */
++gceSTATUS
++gckOS_UnmapUserPointer(
++ IN gckOS Os,
++ IN gctPOINTER Pointer,
++ IN gctSIZE_T Size,
++ IN gctPOINTER KernelPointer
++ );
++
++/*******************************************************************************
++**
++** gckOS_QueryNeedCopy
++**
++** Query whether the memory can be accessed or mapped directly or it has to be
++** copied.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctUINT32 ProcessID
++** Process ID of the current process.
++**
++** OUTPUT:
++**
++** gctBOOL_PTR NeedCopy
++** Pointer to a boolean receiving gcvTRUE if the memory needs a copy or
++** gcvFALSE if the memory can be accessed or mapped dircetly.
++*/
++gceSTATUS
++gckOS_QueryNeedCopy(
++ IN gckOS Os,
++ IN gctUINT32 ProcessID,
++ OUT gctBOOL_PTR NeedCopy
++ );
++
++/*******************************************************************************
++**
++** gckOS_CopyFromUserData
++**
++** Copy data from user to kernel memory.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPOINTER KernelPointer
++** Pointer to kernel memory.
++**
++** gctPOINTER Pointer
++** Pointer to user memory.
++**
++** gctSIZE_T Size
++** Number of bytes to copy.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_CopyFromUserData(
++ IN gckOS Os,
++ IN gctPOINTER KernelPointer,
++ IN gctPOINTER Pointer,
++ IN gctSIZE_T Size
++ );
++
++/*******************************************************************************
++**
++** gckOS_CopyToUserData
++**
++** Copy data from kernel to user memory.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPOINTER KernelPointer
++** Pointer to kernel memory.
++**
++** gctPOINTER Pointer
++** Pointer to user memory.
++**
++** gctSIZE_T Size
++** Number of bytes to copy.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_CopyToUserData(
++ IN gckOS Os,
++ IN gctPOINTER KernelPointer,
++ IN gctPOINTER Pointer,
++ IN gctSIZE_T Size
++ );
++
++#ifdef __QNXNTO__
++/* Map user physical address. */
++gceSTATUS
++gckOS_MapUserPhysical(
++ IN gckOS Os,
++ IN gctPHYS_ADDR Phys,
++ OUT gctPOINTER * KernelPointer
++ );
++#endif
++
++gceSTATUS
++gckOS_SuspendInterrupt(
++ IN gckOS Os
++ );
++
++gceSTATUS
++gckOS_SuspendInterruptEx(
++ IN gckOS Os,
++ IN gceCORE Core
++ );
++
++gceSTATUS
++gckOS_ResumeInterrupt(
++ IN gckOS Os
++ );
++
++gceSTATUS
++gckOS_ResumeInterruptEx(
++ IN gckOS Os,
++ IN gceCORE Core
++ );
++
++/* Get the base address for the physical memory. */
++gceSTATUS
++gckOS_GetBaseAddress(
++ IN gckOS Os,
++ OUT gctUINT32_PTR BaseAddress
++ );
++
++/* Perform a memory copy. */
++gceSTATUS
++gckOS_MemCopy(
++ IN gctPOINTER Destination,
++ IN gctCONST_POINTER Source,
++ IN gctSIZE_T Bytes
++ );
++
++/* Zero memory. */
++gceSTATUS
++gckOS_ZeroMemory(
++ IN gctPOINTER Memory,
++ IN gctSIZE_T Bytes
++ );
++
++/* Device I/O control to the kernel HAL layer. */
++gceSTATUS
++gckOS_DeviceControl(
++ IN gckOS Os,
++ IN gctBOOL FromUser,
++ IN gctUINT32 IoControlCode,
++ IN gctPOINTER InputBuffer,
++ IN gctSIZE_T InputBufferSize,
++ OUT gctPOINTER OutputBuffer,
++ IN gctSIZE_T OutputBufferSize
++ );
++
++/*******************************************************************************
++**
++** gckOS_GetProcessID
++**
++** Get current process ID.
++**
++** INPUT:
++**
++** Nothing.
++**
++** OUTPUT:
++**
++** gctUINT32_PTR ProcessID
++** Pointer to the variable that receives the process ID.
++*/
++gceSTATUS
++gckOS_GetProcessID(
++ OUT gctUINT32_PTR ProcessID
++ );
++
++gceSTATUS
++gckOS_GetCurrentProcessID(
++ OUT gctUINT32_PTR ProcessID
++ );
++
++/*******************************************************************************
++**
++** gckOS_GetThreadID
++**
++** Get current thread ID.
++**
++** INPUT:
++**
++** Nothing.
++**
++** OUTPUT:
++**
++** gctUINT32_PTR ThreadID
++** Pointer to the variable that receives the thread ID.
++*/
++gceSTATUS
++gckOS_GetThreadID(
++ OUT gctUINT32_PTR ThreadID
++ );
++
++/******************************************************************************\
++********************************** Signal Object *********************************
++\******************************************************************************/
++
++/* Create a signal. */
++gceSTATUS
++gckOS_CreateSignal(
++ IN gckOS Os,
++ IN gctBOOL ManualReset,
++ OUT gctSIGNAL * Signal
++ );
++
++/* Destroy a signal. */
++gceSTATUS
++gckOS_DestroySignal(
++ IN gckOS Os,
++ IN gctSIGNAL Signal
++ );
++
++/* Signal a signal. */
++gceSTATUS
++gckOS_Signal(
++ IN gckOS Os,
++ IN gctSIGNAL Signal,
++ IN gctBOOL State
++ );
++
++/* Wait for a signal. */
++gceSTATUS
++gckOS_WaitSignal(
++ IN gckOS Os,
++ IN gctSIGNAL Signal,
++ IN gctUINT32 Wait
++ );
++
++/* Map a user signal to the kernel space. */
++gceSTATUS
++gckOS_MapSignal(
++ IN gckOS Os,
++ IN gctSIGNAL Signal,
++ IN gctHANDLE Process,
++ OUT gctSIGNAL * MappedSignal
++ );
++
++/* Unmap a user signal */
++gceSTATUS
++gckOS_UnmapSignal(
++ IN gckOS Os,
++ IN gctSIGNAL Signal
++ );
++
++/* Map user memory. */
++gceSTATUS
++gckOS_MapUserMemory(
++ IN gckOS Os,
++ IN gceCORE Core,
++ IN gctPOINTER Memory,
++ IN gctUINT32 Physical,
++ IN gctSIZE_T Size,
++ OUT gctPOINTER * Info,
++ OUT gctUINT32_PTR Address
++ );
++
++/* Unmap user memory. */
++gceSTATUS
++gckOS_UnmapUserMemory(
++ IN gckOS Os,
++ IN gceCORE Core,
++ IN gctPOINTER Memory,
++ IN gctSIZE_T Size,
++ IN gctPOINTER Info,
++ IN gctUINT32 Address
++ );
++
++/******************************************************************************\
++************************** Android Native Fence Sync ***************************
++\******************************************************************************/
++gceSTATUS
++gckOS_CreateSyncTimeline(
++ IN gckOS Os,
++ OUT gctHANDLE * Timeline
++ );
++
++gceSTATUS
++gckOS_DestroySyncTimeline(
++ IN gckOS Os,
++ IN gctHANDLE Timeline
++ );
++
++gceSTATUS
++gckOS_CreateSyncPoint(
++ IN gckOS Os,
++ OUT gctSYNC_POINT * SyncPoint
++ );
++
++gceSTATUS
++gckOS_ReferenceSyncPoint(
++ IN gckOS Os,
++ IN gctSYNC_POINT SyncPoint
++ );
++
++gceSTATUS
++gckOS_DestroySyncPoint(
++ IN gckOS Os,
++ IN gctSYNC_POINT SyncPoint
++ );
++
++gceSTATUS
++gckOS_SignalSyncPoint(
++ IN gckOS Os,
++ IN gctSYNC_POINT SyncPoint
++ );
++
++gceSTATUS
++gckOS_QuerySyncPoint(
++ IN gckOS Os,
++ IN gctSYNC_POINT SyncPoint,
++ OUT gctBOOL_PTR State
++ );
++
++gceSTATUS
++gckOS_CreateNativeFence(
++ IN gckOS Os,
++ IN gctHANDLE Timeline,
++ IN gctSYNC_POINT SyncPoint,
++ OUT gctINT * FenceFD
++ );
++
++#if !USE_NEW_LINUX_SIGNAL
++/* Create signal to be used in the user space. */
++gceSTATUS
++gckOS_CreateUserSignal(
++ IN gckOS Os,
++ IN gctBOOL ManualReset,
++ OUT gctINT * SignalID
++ );
++
++/* Destroy signal used in the user space. */
++gceSTATUS
++gckOS_DestroyUserSignal(
++ IN gckOS Os,
++ IN gctINT SignalID
++ );
++
++/* Wait for signal used in the user space. */
++gceSTATUS
++gckOS_WaitUserSignal(
++ IN gckOS Os,
++ IN gctINT SignalID,
++ IN gctUINT32 Wait
++ );
++
++/* Signal a signal used in the user space. */
++gceSTATUS
++gckOS_SignalUserSignal(
++ IN gckOS Os,
++ IN gctINT SignalID,
++ IN gctBOOL State
++ );
++#endif /* USE_NEW_LINUX_SIGNAL */
++
++/* Set a signal owned by a process. */
++#if defined(__QNXNTO__)
++gceSTATUS
++gckOS_UserSignal(
++ IN gckOS Os,
++ IN gctSIGNAL Signal,
++ IN gctINT Recvid,
++ IN gctINT Coid
++ );
++#else
++gceSTATUS
++gckOS_UserSignal(
++ IN gckOS Os,
++ IN gctSIGNAL Signal,
++ IN gctHANDLE Process
++ );
++#endif
++
++/******************************************************************************\
++** Cache Support
++*/
++
++gceSTATUS
++gckOS_CacheClean(
++ gckOS Os,
++ gctUINT32 ProcessID,
++ gctPHYS_ADDR Handle,
++ gctPOINTER Physical,
++ gctPOINTER Logical,
++ gctSIZE_T Bytes
++ );
++
++gceSTATUS
++gckOS_CacheFlush(
++ gckOS Os,
++ gctUINT32 ProcessID,
++ gctPHYS_ADDR Handle,
++ gctPOINTER Physical,
++ gctPOINTER Logical,
++ gctSIZE_T Bytes
++ );
++
++gceSTATUS
++gckOS_CacheInvalidate(
++ gckOS Os,
++ gctUINT32 ProcessID,
++ gctPHYS_ADDR Handle,
++ gctPOINTER Physical,
++ gctPOINTER Logical,
++ gctSIZE_T Bytes
++ );
++
++/******************************************************************************\
++** Debug Support
++*/
++
++void
++gckOS_SetDebugLevel(
++ IN gctUINT32 Level
++ );
++
++void
++gckOS_SetDebugZone(
++ IN gctUINT32 Zone
++ );
++
++void
++gckOS_SetDebugLevelZone(
++ IN gctUINT32 Level,
++ IN gctUINT32 Zone
++ );
++
++void
++gckOS_SetDebugZones(
++ IN gctUINT32 Zones,
++ IN gctBOOL Enable
++ );
++
++void
++gckOS_SetDebugFile(
++ IN gctCONST_STRING FileName
++ );
++
++/*******************************************************************************
++** Broadcast interface.
++*/
++
++typedef enum _gceBROADCAST
++{
++ /* GPU might be idle. */
++ gcvBROADCAST_GPU_IDLE,
++
++ /* A commit is going to happen. */
++ gcvBROADCAST_GPU_COMMIT,
++
++ /* GPU seems to be stuck. */
++ gcvBROADCAST_GPU_STUCK,
++
++ /* First process gets attached. */
++ gcvBROADCAST_FIRST_PROCESS,
++
++ /* Last process gets detached. */
++ gcvBROADCAST_LAST_PROCESS,
++
++ /* AXI bus error. */
++ gcvBROADCAST_AXI_BUS_ERROR,
++}
++gceBROADCAST;
++
++gceSTATUS
++gckOS_Broadcast(
++ IN gckOS Os,
++ IN gckHARDWARE Hardware,
++ IN gceBROADCAST Reason
++ );
++
++gceSTATUS
++gckOS_BroadcastHurry(
++ IN gckOS Os,
++ IN gckHARDWARE Hardware,
++ IN gctUINT Urgency
++ );
++
++gceSTATUS
++gckOS_BroadcastCalibrateSpeed(
++ IN gckOS Os,
++ IN gckHARDWARE Hardware,
++ IN gctUINT Idle,
++ IN gctUINT Time
++ );
++
++/*******************************************************************************
++**
++** gckOS_SetGPUPower
++**
++** Set the power of the GPU on or off.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to a gckOS object.ß
++**
++** gckCORE Core
++** GPU whose power is set.
++**
++** gctBOOL Clock
++** gcvTRUE to turn on the clock, or gcvFALSE to turn off the clock.
++**
++** gctBOOL Power
++** gcvTRUE to turn on the power, or gcvFALSE to turn off the power.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_SetGPUPower(
++ IN gckOS Os,
++ IN gceCORE Core,
++ IN gctBOOL Clock,
++ IN gctBOOL Power
++ );
++
++gceSTATUS
++gckOS_ResetGPU(
++ IN gckOS Os,
++ IN gceCORE Core
++ );
++
++gceSTATUS
++gckOS_PrepareGPUFrequency(
++ IN gckOS Os,
++ IN gceCORE Core
++ );
++
++gceSTATUS
++gckOS_FinishGPUFrequency(
++ IN gckOS Os,
++ IN gceCORE Core
++ );
++
++gceSTATUS
++gckOS_QueryGPUFrequency(
++ IN gckOS Os,
++ IN gceCORE Core,
++ OUT gctUINT32 * Frequency,
++ OUT gctUINT8 * Scale
++ );
++
++gceSTATUS
++gckOS_SetGPUFrequency(
++ IN gckOS Os,
++ IN gceCORE Core,
++ IN gctUINT8 Scale
++ );
++
++/*******************************************************************************
++** Semaphores.
++*/
++
++/* Create a new semaphore. */
++gceSTATUS
++gckOS_CreateSemaphore(
++ IN gckOS Os,
++ OUT gctPOINTER * Semaphore
++ );
++
++#if gcdENABLE_VG
++gceSTATUS
++gckOS_CreateSemaphoreVG(
++ IN gckOS Os,
++ OUT gctPOINTER * Semaphore
++ );
++#endif
++
++/* Delete a semahore. */
++gceSTATUS
++gckOS_DestroySemaphore(
++ IN gckOS Os,
++ IN gctPOINTER Semaphore
++ );
++
++/* Acquire a semahore. */
++gceSTATUS
++gckOS_AcquireSemaphore(
++ IN gckOS Os,
++ IN gctPOINTER Semaphore
++ );
++
++/* Try to acquire a semahore. */
++gceSTATUS
++gckOS_TryAcquireSemaphore(
++ IN gckOS Os,
++ IN gctPOINTER Semaphore
++ );
++
++/* Release a semahore. */
++gceSTATUS
++gckOS_ReleaseSemaphore(
++ IN gckOS Os,
++ IN gctPOINTER Semaphore
++ );
++
++/*******************************************************************************
++** Timer API.
++*/
++
++typedef void (*gctTIMERFUNCTION)(gctPOINTER);
++
++/* Create a timer. */
++gceSTATUS
++gckOS_CreateTimer(
++ IN gckOS Os,
++ IN gctTIMERFUNCTION Function,
++ IN gctPOINTER Data,
++ OUT gctPOINTER * Timer
++ );
++
++/* Destory a timer. */
++gceSTATUS
++gckOS_DestroyTimer(
++ IN gckOS Os,
++ IN gctPOINTER Timer
++ );
++
++/* Start a timer. */
++gceSTATUS
++gckOS_StartTimer(
++ IN gckOS Os,
++ IN gctPOINTER Timer,
++ IN gctUINT32 Delay
++ );
++
++/* Stop a timer. */
++gceSTATUS
++gckOS_StopTimer(
++ IN gckOS Os,
++ IN gctPOINTER Timer
++ );
++
++/******************************************************************************\
++********************************* gckHEAP Object ********************************
++\******************************************************************************/
++
++typedef struct _gckHEAP * gckHEAP;
++
++/* Construct a new gckHEAP object. */
++gceSTATUS
++gckHEAP_Construct(
++ IN gckOS Os,
++ IN gctSIZE_T AllocationSize,
++ OUT gckHEAP * Heap
++ );
++
++/* Destroy an gckHEAP object. */
++gceSTATUS
++gckHEAP_Destroy(
++ IN gckHEAP Heap
++ );
++
++/* Allocate memory. */
++gceSTATUS
++gckHEAP_Allocate(
++ IN gckHEAP Heap,
++ IN gctSIZE_T Bytes,
++ OUT gctPOINTER * Node
++ );
++
++/* Free memory. */
++gceSTATUS
++gckHEAP_Free(
++ IN gckHEAP Heap,
++ IN gctPOINTER Node
++ );
++
++/* Profile the heap. */
++gceSTATUS
++gckHEAP_ProfileStart(
++ IN gckHEAP Heap
++ );
++
++gceSTATUS
++gckHEAP_ProfileEnd(
++ IN gckHEAP Heap,
++ IN gctCONST_STRING Title
++ );
++
++
++/******************************************************************************\
++******************************** gckVIDMEM Object ******************************
++\******************************************************************************/
++
++typedef struct _gckVIDMEM * gckVIDMEM;
++typedef struct _gckKERNEL * gckKERNEL;
++typedef struct _gckDB * gckDB;
++typedef struct _gckDVFS * gckDVFS;
++
++/* Construct a new gckVIDMEM object. */
++gceSTATUS
++gckVIDMEM_Construct(
++ IN gckOS Os,
++ IN gctUINT32 BaseAddress,
++ IN gctSIZE_T Bytes,
++ IN gctSIZE_T Threshold,
++ IN gctSIZE_T Banking,
++ OUT gckVIDMEM * Memory
++ );
++
++/* Destroy an gckVDIMEM object. */
++gceSTATUS
++gckVIDMEM_Destroy(
++ IN gckVIDMEM Memory
++ );
++
++/* Allocate rectangular memory. */
++gceSTATUS
++gckVIDMEM_Allocate(
++ IN gckVIDMEM Memory,
++ IN gctUINT Width,
++ IN gctUINT Height,
++ IN gctUINT Depth,
++ IN gctUINT BytesPerPixel,
++ IN gctUINT32 Alignment,
++ IN gceSURF_TYPE Type,
++ OUT gcuVIDMEM_NODE_PTR * Node
++ );
++
++/* Allocate linear memory. */
++gceSTATUS
++gckVIDMEM_AllocateLinear(
++ IN gckVIDMEM Memory,
++ IN gctSIZE_T Bytes,
++ IN gctUINT32 Alignment,
++ IN gceSURF_TYPE Type,
++ OUT gcuVIDMEM_NODE_PTR * Node
++ );
++
++/* Free memory. */
++gceSTATUS
++gckVIDMEM_Free(
++ IN gcuVIDMEM_NODE_PTR Node
++ );
++
++/* Lock memory. */
++gceSTATUS
++gckVIDMEM_Lock(
++ IN gckKERNEL Kernel,
++ IN gcuVIDMEM_NODE_PTR Node,
++ IN gctBOOL Cacheable,
++ OUT gctUINT32 * Address
++ );
++
++/* Unlock memory. */
++gceSTATUS
++gckVIDMEM_Unlock(
++ IN gckKERNEL Kernel,
++ IN gcuVIDMEM_NODE_PTR Node,
++ IN gceSURF_TYPE Type,
++ IN OUT gctBOOL * Asynchroneous
++ );
++
++/* Construct a gcuVIDMEM_NODE union for virtual memory. */
++gceSTATUS
++gckVIDMEM_ConstructVirtual(
++ IN gckKERNEL Kernel,
++ IN gctBOOL Contiguous,
++ IN gctSIZE_T Bytes,
++ OUT gcuVIDMEM_NODE_PTR * Node
++ );
++
++/* Destroy a gcuVIDMEM_NODE union for virtual memory. */
++gceSTATUS
++gckVIDMEM_DestroyVirtual(
++ IN gcuVIDMEM_NODE_PTR Node
++ );
++
++/******************************************************************************\
++******************************** gckKERNEL Object ******************************
++\******************************************************************************/
++
++struct _gcsHAL_INTERFACE;
++
++/* Notifications. */
++typedef enum _gceNOTIFY
++{
++ gcvNOTIFY_INTERRUPT,
++ gcvNOTIFY_COMMAND_QUEUE,
++}
++gceNOTIFY;
++
++/* Flush flags. */
++typedef enum _gceKERNEL_FLUSH
++{
++ gcvFLUSH_COLOR = 0x01,
++ gcvFLUSH_DEPTH = 0x02,
++ gcvFLUSH_TEXTURE = 0x04,
++ gcvFLUSH_2D = 0x08,
++ gcvFLUSH_ALL = gcvFLUSH_COLOR
++ | gcvFLUSH_DEPTH
++ | gcvFLUSH_TEXTURE
++ | gcvFLUSH_2D,
++}
++gceKERNEL_FLUSH;
++
++/* Construct a new gckKERNEL object. */
++gceSTATUS
++gckKERNEL_Construct(
++ IN gckOS Os,
++ IN gceCORE Core,
++ IN gctPOINTER Context,
++ IN gckDB SharedDB,
++ OUT gckKERNEL * Kernel
++ );
++
++/* Destroy an gckKERNEL object. */
++gceSTATUS
++gckKERNEL_Destroy(
++ IN gckKERNEL Kernel
++ );
++
++/* Dispatch a user-level command. */
++gceSTATUS
++gckKERNEL_Dispatch(
++ IN gckKERNEL Kernel,
++ IN gctBOOL FromUser,
++ IN OUT struct _gcsHAL_INTERFACE * Interface
++ );
++
++/* Query the video memory. */
++gceSTATUS
++gckKERNEL_QueryVideoMemory(
++ IN gckKERNEL Kernel,
++ OUT struct _gcsHAL_INTERFACE * Interface
++ );
++
++/* Lookup the gckVIDMEM object for a pool. */
++gceSTATUS
++gckKERNEL_GetVideoMemoryPool(
++ IN gckKERNEL Kernel,
++ IN gcePOOL Pool,
++ OUT gckVIDMEM * VideoMemory
++ );
++
++#if gcdUSE_VIDMEM_PER_PID
++gceSTATUS
++gckKERNEL_GetVideoMemoryPoolPid(
++ IN gckKERNEL Kernel,
++ IN gcePOOL Pool,
++ IN gctUINT32 Pid,
++ OUT gckVIDMEM * VideoMemory
++ );
++
++gceSTATUS
++gckKERNEL_CreateVideoMemoryPoolPid(
++ IN gckKERNEL Kernel,
++ IN gcePOOL Pool,
++ IN gctUINT32 Pid,
++ OUT gckVIDMEM * VideoMemory
++ );
++
++gceSTATUS
++gckKERNEL_RemoveVideoMemoryPoolPid(
++ IN gckKERNEL Kernel,
++ IN gckVIDMEM VideoMemory
++ );
++#endif
++
++/* Map video memory. */
++gceSTATUS
++gckKERNEL_MapVideoMemory(
++ IN gckKERNEL Kernel,
++ IN gctBOOL InUserSpace,
++ IN gctUINT32 Address,
++#ifdef __QNXNTO__
++ IN gctUINT32 Pid,
++ IN gctUINT32 Bytes,
++#endif
++ OUT gctPOINTER * Logical
++ );
++
++/* Map video memory. */
++gceSTATUS
++gckKERNEL_MapVideoMemoryEx(
++ IN gckKERNEL Kernel,
++ IN gceCORE Core,
++ IN gctBOOL InUserSpace,
++ IN gctUINT32 Address,
++#ifdef __QNXNTO__
++ IN gctUINT32 Pid,
++ IN gctUINT32 Bytes,
++#endif
++ OUT gctPOINTER * Logical
++ );
++
++#ifdef __QNXNTO__
++/* Unmap video memory. */
++gceSTATUS
++gckKERNEL_UnmapVideoMemory(
++ IN gckKERNEL Kernel,
++ IN gctPOINTER Logical,
++ IN gctUINT32 Pid,
++ IN gctUINT32 Bytes
++ );
++#endif
++
++/* Map memory. */
++gceSTATUS
++gckKERNEL_MapMemory(
++ IN gckKERNEL Kernel,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Bytes,
++ OUT gctPOINTER * Logical
++ );
++
++/* Unmap memory. */
++gceSTATUS
++gckKERNEL_UnmapMemory(
++ IN gckKERNEL Kernel,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Bytes,
++ IN gctPOINTER Logical
++ );
++
++/* Notification of events. */
++gceSTATUS
++gckKERNEL_Notify(
++ IN gckKERNEL Kernel,
++ IN gceNOTIFY Notifcation,
++ IN gctBOOL Data
++ );
++
++gceSTATUS
++gckKERNEL_QuerySettings(
++ IN gckKERNEL Kernel,
++ OUT gcsKERNEL_SETTINGS * Settings
++ );
++
++/*******************************************************************************
++**
++** gckKERNEL_Recovery
++**
++** Try to recover the GPU from a fatal error.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckKERNEL_Recovery(
++ IN gckKERNEL Kernel
++ );
++
++/* Set the value of timeout on HW operation. */
++void
++gckKERNEL_SetTimeOut(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 timeOut
++ );
++
++/* Get access to the user data. */
++gceSTATUS
++gckKERNEL_OpenUserData(
++ IN gckKERNEL Kernel,
++ IN gctBOOL NeedCopy,
++ IN gctPOINTER StaticStorage,
++ IN gctPOINTER UserPointer,
++ IN gctSIZE_T Size,
++ OUT gctPOINTER * KernelPointer
++ );
++
++/* Release resources associated with the user data connection. */
++gceSTATUS
++gckKERNEL_CloseUserData(
++ IN gckKERNEL Kernel,
++ IN gctBOOL NeedCopy,
++ IN gctBOOL FlushData,
++ IN gctPOINTER UserPointer,
++ IN gctSIZE_T Size,
++ OUT gctPOINTER * KernelPointer
++ );
++
++gceSTATUS
++gckDVFS_Construct(
++ IN gckHARDWARE Hardware,
++ OUT gckDVFS * Frequency
++ );
++
++gceSTATUS
++gckDVFS_Destroy(
++ IN gckDVFS Dvfs
++ );
++
++gceSTATUS
++gckDVFS_Start(
++ IN gckDVFS Dvfs
++ );
++
++gceSTATUS
++gckDVFS_Stop(
++ IN gckDVFS Dvfs
++ );
++
++/******************************************************************************\
++******************************* gckHARDWARE Object *****************************
++\******************************************************************************/
++
++/* Construct a new gckHARDWARE object. */
++gceSTATUS
++gckHARDWARE_Construct(
++ IN gckOS Os,
++ IN gceCORE Core,
++ OUT gckHARDWARE * Hardware
++ );
++
++/* Destroy an gckHARDWARE object. */
++gceSTATUS
++gckHARDWARE_Destroy(
++ IN gckHARDWARE Hardware
++ );
++
++/* Get hardware type. */
++gceSTATUS
++gckHARDWARE_GetType(
++ IN gckHARDWARE Hardware,
++ OUT gceHARDWARE_TYPE * Type
++ );
++
++/* Query system memory requirements. */
++gceSTATUS
++gckHARDWARE_QuerySystemMemory(
++ IN gckHARDWARE Hardware,
++ OUT gctSIZE_T * SystemSize,
++ OUT gctUINT32 * SystemBaseAddress
++ );
++
++/* Build virtual address. */
++gceSTATUS
++gckHARDWARE_BuildVirtualAddress(
++ IN gckHARDWARE Hardware,
++ IN gctUINT32 Index,
++ IN gctUINT32 Offset,
++ OUT gctUINT32 * Address
++ );
++
++/* Query command buffer requirements. */
++gceSTATUS
++gckHARDWARE_QueryCommandBuffer(
++ IN gckHARDWARE Hardware,
++ OUT gctSIZE_T * Alignment,
++ OUT gctSIZE_T * ReservedHead,
++ OUT gctSIZE_T * ReservedTail
++ );
++
++/* Add a WAIT/LINK pair in the command queue. */
++gceSTATUS
++gckHARDWARE_WaitLink(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical,
++ IN gctUINT32 Offset,
++ IN OUT gctSIZE_T * Bytes,
++ OUT gctUINT32 * WaitOffset,
++ OUT gctSIZE_T * WaitBytes
++ );
++
++/* Kickstart the command processor. */
++gceSTATUS
++gckHARDWARE_Execute(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical,
++#ifdef __QNXNTO__
++ IN gctPOINTER Physical,
++ IN gctBOOL PhysicalAddresses,
++#endif
++ IN gctSIZE_T Bytes
++ );
++
++/* Add an END command in the command queue. */
++gceSTATUS
++gckHARDWARE_End(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical,
++ IN OUT gctSIZE_T * Bytes
++ );
++
++/* Add a NOP command in the command queue. */
++gceSTATUS
++gckHARDWARE_Nop(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical,
++ IN OUT gctSIZE_T * Bytes
++ );
++
++/* Add a WAIT command in the command queue. */
++gceSTATUS
++gckHARDWARE_Wait(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical,
++ IN gctUINT32 Count,
++ IN OUT gctSIZE_T * Bytes
++ );
++
++/* Add a PIPESELECT command in the command queue. */
++gceSTATUS
++gckHARDWARE_PipeSelect(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical,
++ IN gcePIPE_SELECT Pipe,
++ IN OUT gctSIZE_T * Bytes
++ );
++
++/* Add a LINK command in the command queue. */
++gceSTATUS
++gckHARDWARE_Link(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical,
++ IN gctPOINTER FetchAddress,
++ IN gctSIZE_T FetchSize,
++ IN OUT gctSIZE_T * Bytes
++ );
++
++/* Add an EVENT command in the command queue. */
++gceSTATUS
++gckHARDWARE_Event(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical,
++ IN gctUINT8 Event,
++ IN gceKERNEL_WHERE FromWhere,
++ IN OUT gctSIZE_T * Bytes
++ );
++
++/* Query the available memory. */
++gceSTATUS
++gckHARDWARE_QueryMemory(
++ IN gckHARDWARE Hardware,
++ OUT gctSIZE_T * InternalSize,
++ OUT gctUINT32 * InternalBaseAddress,
++ OUT gctUINT32 * InternalAlignment,
++ OUT gctSIZE_T * ExternalSize,
++ OUT gctUINT32 * ExternalBaseAddress,
++ OUT gctUINT32 * ExternalAlignment,
++ OUT gctUINT32 * HorizontalTileSize,
++ OUT gctUINT32 * VerticalTileSize
++ );
++
++/* Query the identity of the hardware. */
++gceSTATUS
++gckHARDWARE_QueryChipIdentity(
++ IN gckHARDWARE Hardware,
++ OUT gcsHAL_QUERY_CHIP_IDENTITY_PTR Identity
++ );
++
++/* Query the shader support. */
++gceSTATUS
++gckHARDWARE_QueryShaderCaps(
++ IN gckHARDWARE Hardware,
++ OUT gctUINT * VertexUniforms,
++ OUT gctUINT * FragmentUniforms,
++ OUT gctUINT * Varyings
++ );
++
++/* Split a harwdare specific address into API stuff. */
++gceSTATUS
++gckHARDWARE_SplitMemory(
++ IN gckHARDWARE Hardware,
++ IN gctUINT32 Address,
++ OUT gcePOOL * Pool,
++ OUT gctUINT32 * Offset
++ );
++
++/* Update command queue tail pointer. */
++gceSTATUS
++gckHARDWARE_UpdateQueueTail(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical,
++ IN gctUINT32 Offset
++ );
++
++/* Convert logical address to hardware specific address. */
++gceSTATUS
++gckHARDWARE_ConvertLogical(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical,
++ OUT gctUINT32 * Address
++ );
++
++#ifdef __QNXNTO__
++/* Convert physical address to hardware specific address. */
++gceSTATUS
++gckHARDWARE_ConvertPhysical(
++ IN gckHARDWARE Hardware,
++ IN gctPHYS_ADDR Physical,
++ OUT gctUINT32 * Address
++ );
++#endif
++
++/* Interrupt manager. */
++gceSTATUS
++gckHARDWARE_Interrupt(
++ IN gckHARDWARE Hardware,
++ IN gctBOOL InterruptValid
++ );
++
++/* Program MMU. */
++gceSTATUS
++gckHARDWARE_SetMMU(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical
++ );
++
++/* Flush the MMU. */
++gceSTATUS
++gckHARDWARE_FlushMMU(
++ IN gckHARDWARE Hardware
++ );
++
++/* Set the page table base address. */
++gceSTATUS
++gckHARDWARE_SetMMUv2(
++ IN gckHARDWARE Hardware,
++ IN gctBOOL Enable,
++ IN gctPOINTER MtlbAddress,
++ IN gceMMU_MODE Mode,
++ IN gctPOINTER SafeAddress,
++ IN gctBOOL FromPower
++ );
++
++/* Get idle register. */
++gceSTATUS
++gckHARDWARE_GetIdle(
++ IN gckHARDWARE Hardware,
++ IN gctBOOL Wait,
++ OUT gctUINT32 * Data
++ );
++
++/* Flush the caches. */
++gceSTATUS
++gckHARDWARE_Flush(
++ IN gckHARDWARE Hardware,
++ IN gceKERNEL_FLUSH Flush,
++ IN gctPOINTER Logical,
++ IN OUT gctSIZE_T * Bytes
++ );
++
++/* Enable/disable fast clear. */
++gceSTATUS
++gckHARDWARE_SetFastClear(
++ IN gckHARDWARE Hardware,
++ IN gctINT Enable,
++ IN gctINT Compression
++ );
++
++gceSTATUS
++gckHARDWARE_ReadInterrupt(
++ IN gckHARDWARE Hardware,
++ OUT gctUINT32_PTR IDs
++ );
++
++/* Power management. */
++gceSTATUS
++gckHARDWARE_SetPowerManagementState(
++ IN gckHARDWARE Hardware,
++ IN gceCHIPPOWERSTATE State
++ );
++
++gceSTATUS
++gckHARDWARE_QueryPowerManagementState(
++ IN gckHARDWARE Hardware,
++ OUT gceCHIPPOWERSTATE* State
++ );
++
++gceSTATUS
++gckHARDWARE_SetPowerManagement(
++ IN gckHARDWARE Hardware,
++ IN gctBOOL PowerManagement
++ );
++
++gceSTATUS
++gckHARDWARE_SetGpuProfiler(
++ IN gckHARDWARE Hardware,
++ IN gctBOOL GpuProfiler
++ );
++
++#if gcdENABLE_FSCALE_VAL_ADJUST
++gceSTATUS
++gckHARDWARE_SetFscaleValue(
++ IN gckHARDWARE Hardware,
++ IN gctUINT32 FscaleValue
++ );
++
++gceSTATUS
++gckHARDWARE_GetFscaleValue(
++ IN gckHARDWARE Hardware,
++ IN gctUINT * FscaleValue,
++ IN gctUINT * MinFscaleValue,
++ IN gctUINT * MaxFscaleValue
++ );
++#endif
++
++#if gcdPOWEROFF_TIMEOUT
++gceSTATUS
++gckHARDWARE_SetPowerOffTimeout(
++ IN gckHARDWARE Hardware,
++ IN gctUINT32 Timeout
++);
++
++gceSTATUS
++gckHARDWARE_QueryPowerOffTimeout(
++ IN gckHARDWARE Hardware,
++ OUT gctUINT32* Timeout
++);
++#endif
++
++/* Profile 2D Engine. */
++gceSTATUS
++gckHARDWARE_ProfileEngine2D(
++ IN gckHARDWARE Hardware,
++ OUT gcs2D_PROFILE_PTR Profile
++ );
++
++gceSTATUS
++gckHARDWARE_InitializeHardware(
++ IN gckHARDWARE Hardware
++ );
++
++gceSTATUS
++gckHARDWARE_Reset(
++ IN gckHARDWARE Hardware
++ );
++
++typedef gceSTATUS (*gctISRMANAGERFUNC)(gctPOINTER Context, gceCORE Core);
++
++gceSTATUS
++gckHARDWARE_SetIsrManager(
++ IN gckHARDWARE Hardware,
++ IN gctISRMANAGERFUNC StartIsr,
++ IN gctISRMANAGERFUNC StopIsr,
++ IN gctPOINTER Context
++ );
++
++/* Start a composition. */
++gceSTATUS
++gckHARDWARE_Compose(
++ IN gckHARDWARE Hardware,
++ IN gctUINT32 ProcessID,
++ IN gctPHYS_ADDR Physical,
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Offset,
++ IN gctSIZE_T Size,
++ IN gctUINT8 EventID
++ );
++
++/* Check for Hardware features. */
++gceSTATUS
++gckHARDWARE_IsFeatureAvailable(
++ IN gckHARDWARE Hardware,
++ IN gceFEATURE Feature
++ );
++
++gceSTATUS
++gckHARDWARE_DumpMMUException(
++ IN gckHARDWARE Hardware
++ );
++
++gceSTATUS
++gckHARDWARE_DumpGPUState(
++ IN gckHARDWARE Hardware
++ );
++
++gceSTATUS
++gckHARDWARE_InitDVFS(
++ IN gckHARDWARE Hardware
++ );
++
++gceSTATUS
++gckHARDWARE_QueryLoad(
++ IN gckHARDWARE Hardware,
++ OUT gctUINT32 * Load
++ );
++
++gceSTATUS
++gckHARDWARE_SetDVFSPeroid(
++ IN gckHARDWARE Hardware,
++ IN gctUINT32 Frequency
++ );
++
++#if !gcdENABLE_VG
++/******************************************************************************\
++***************************** gckINTERRUPT Object ******************************
++\******************************************************************************/
++
++typedef struct _gckINTERRUPT * gckINTERRUPT;
++
++typedef gceSTATUS (* gctINTERRUPT_HANDLER)(
++ IN gckKERNEL Kernel
++ );
++
++gceSTATUS
++gckINTERRUPT_Construct(
++ IN gckKERNEL Kernel,
++ OUT gckINTERRUPT * Interrupt
++ );
++
++gceSTATUS
++gckINTERRUPT_Destroy(
++ IN gckINTERRUPT Interrupt
++ );
++
++gceSTATUS
++gckINTERRUPT_SetHandler(
++ IN gckINTERRUPT Interrupt,
++ IN OUT gctINT32_PTR Id,
++ IN gctINTERRUPT_HANDLER Handler
++ );
++
++gceSTATUS
++gckINTERRUPT_Notify(
++ IN gckINTERRUPT Interrupt,
++ IN gctBOOL Valid
++ );
++#endif
++/******************************************************************************\
++******************************** gckEVENT Object *******************************
++\******************************************************************************/
++
++typedef struct _gckEVENT * gckEVENT;
++
++/* Construct a new gckEVENT object. */
++gceSTATUS
++gckEVENT_Construct(
++ IN gckKERNEL Kernel,
++ OUT gckEVENT * Event
++ );
++
++/* Destroy an gckEVENT object. */
++gceSTATUS
++gckEVENT_Destroy(
++ IN gckEVENT Event
++ );
++
++/* Add a new event to the list of events. */
++gceSTATUS
++gckEVENT_AddList(
++ IN gckEVENT Event,
++ IN gcsHAL_INTERFACE_PTR Interface,
++ IN gceKERNEL_WHERE FromWhere,
++ IN gctBOOL AllocateAllowed,
++ IN gctBOOL FromKernel
++ );
++
++/* Schedule a FreeNonPagedMemory event. */
++gceSTATUS
++gckEVENT_FreeNonPagedMemory(
++ IN gckEVENT Event,
++ IN gctSIZE_T Bytes,
++ IN gctPHYS_ADDR Physical,
++ IN gctPOINTER Logical,
++ IN gceKERNEL_WHERE FromWhere
++ );
++
++/* Schedule a FreeContiguousMemory event. */
++gceSTATUS
++gckEVENT_FreeContiguousMemory(
++ IN gckEVENT Event,
++ IN gctSIZE_T Bytes,
++ IN gctPHYS_ADDR Physical,
++ IN gctPOINTER Logical,
++ IN gceKERNEL_WHERE FromWhere
++ );
++
++/* Schedule a FreeVideoMemory event. */
++gceSTATUS
++gckEVENT_FreeVideoMemory(
++ IN gckEVENT Event,
++ IN gcuVIDMEM_NODE_PTR VideoMemory,
++ IN gceKERNEL_WHERE FromWhere
++ );
++
++/* Schedule a signal event. */
++gceSTATUS
++gckEVENT_Signal(
++ IN gckEVENT Event,
++ IN gctSIGNAL Signal,
++ IN gceKERNEL_WHERE FromWhere
++ );
++
++/* Schedule an Unlock event. */
++gceSTATUS
++gckEVENT_Unlock(
++ IN gckEVENT Event,
++ IN gceKERNEL_WHERE FromWhere,
++ IN gcuVIDMEM_NODE_PTR Node,
++ IN gceSURF_TYPE Type
++ );
++
++gceSTATUS
++gckEVENT_CommitDone(
++ IN gckEVENT Event,
++ IN gceKERNEL_WHERE FromWhere
++ );
++
++#if gcdVIRTUAL_COMMAND_BUFFER
++/* Schedule a FreeVirtualCommandBuffer event. */
++gceSTATUS
++gckEVENT_DestroyVirtualCommandBuffer(
++ IN gckEVENT Event,
++ IN gctSIZE_T Bytes,
++ IN gctPHYS_ADDR Physical,
++ IN gctPOINTER Logical,
++ IN gceKERNEL_WHERE FromWhere
++ );
++#endif
++
++gceSTATUS
++gckEVENT_Submit(
++ IN gckEVENT Event,
++ IN gctBOOL Wait,
++ IN gctBOOL FromPower
++ );
++
++/* Commit an event queue. */
++gceSTATUS
++gckEVENT_Commit(
++ IN gckEVENT Event,
++ IN gcsQUEUE_PTR Queue
++ );
++
++/* Schedule a composition event. */
++gceSTATUS
++gckEVENT_Compose(
++ IN gckEVENT Event,
++ IN gcsHAL_COMPOSE_PTR Info
++ );
++
++/* Event callback routine. */
++gceSTATUS
++gckEVENT_Notify(
++ IN gckEVENT Event,
++ IN gctUINT32 IDs
++ );
++
++/* Event callback routine. */
++gceSTATUS
++gckEVENT_Interrupt(
++ IN gckEVENT Event,
++ IN gctUINT32 IDs
++ );
++
++gceSTATUS
++gckEVENT_Dump(
++ IN gckEVENT Event
++ );
++/******************************************************************************\
++******************************* gckCOMMAND Object ******************************
++\******************************************************************************/
++
++typedef struct _gckCOMMAND * gckCOMMAND;
++
++/* Construct a new gckCOMMAND object. */
++gceSTATUS
++gckCOMMAND_Construct(
++ IN gckKERNEL Kernel,
++ OUT gckCOMMAND * Command
++ );
++
++/* Destroy an gckCOMMAND object. */
++gceSTATUS
++gckCOMMAND_Destroy(
++ IN gckCOMMAND Command
++ );
++
++/* Acquire command queue synchronization objects. */
++gceSTATUS
++gckCOMMAND_EnterCommit(
++ IN gckCOMMAND Command,
++ IN gctBOOL FromPower
++ );
++
++/* Release command queue synchronization objects. */
++gceSTATUS
++gckCOMMAND_ExitCommit(
++ IN gckCOMMAND Command,
++ IN gctBOOL FromPower
++ );
++
++/* Start the command queue. */
++gceSTATUS
++gckCOMMAND_Start(
++ IN gckCOMMAND Command
++ );
++
++/* Stop the command queue. */
++gceSTATUS
++gckCOMMAND_Stop(
++ IN gckCOMMAND Command,
++ IN gctBOOL FromRecovery
++ );
++
++/* Commit a buffer to the command queue. */
++gceSTATUS
++gckCOMMAND_Commit(
++ IN gckCOMMAND Command,
++ IN gckCONTEXT Context,
++ IN gcoCMDBUF CommandBuffer,
++ IN gcsSTATE_DELTA_PTR StateDelta,
++ IN gcsQUEUE_PTR EventQueue,
++ IN gctUINT32 ProcessID
++ );
++
++/* Reserve space in the command buffer. */
++gceSTATUS
++gckCOMMAND_Reserve(
++ IN gckCOMMAND Command,
++ IN gctSIZE_T RequestedBytes,
++ OUT gctPOINTER * Buffer,
++ OUT gctSIZE_T * BufferSize
++ );
++
++/* Execute reserved space in the command buffer. */
++gceSTATUS
++gckCOMMAND_Execute(
++ IN gckCOMMAND Command,
++ IN gctSIZE_T RequstedBytes
++ );
++
++/* Stall the command queue. */
++gceSTATUS
++gckCOMMAND_Stall(
++ IN gckCOMMAND Command,
++ IN gctBOOL FromPower
++ );
++
++/* Attach user process. */
++gceSTATUS
++gckCOMMAND_Attach(
++ IN gckCOMMAND Command,
++ OUT gckCONTEXT * Context,
++ OUT gctSIZE_T * StateCount,
++ IN gctUINT32 ProcessID
++ );
++
++/* Detach user process. */
++gceSTATUS
++gckCOMMAND_Detach(
++ IN gckCOMMAND Command,
++ IN gckCONTEXT Context
++ );
++
++#if gcdVIRTUAL_COMMAND_BUFFER
++gceSTATUS
++gckCOMMAND_DumpExecutingBuffer(
++ IN gckCOMMAND Command
++ );
++#endif
++
++/******************************************************************************\
++********************************* gckMMU Object ********************************
++\******************************************************************************/
++
++typedef struct _gckMMU * gckMMU;
++
++/* Construct a new gckMMU object. */
++gceSTATUS
++gckMMU_Construct(
++ IN gckKERNEL Kernel,
++ IN gctSIZE_T MmuSize,
++ OUT gckMMU * Mmu
++ );
++
++/* Destroy an gckMMU object. */
++gceSTATUS
++gckMMU_Destroy(
++ IN gckMMU Mmu
++ );
++
++/* Enable the MMU. */
++gceSTATUS
++gckMMU_Enable(
++ IN gckMMU Mmu,
++ IN gctUINT32 PhysBaseAddr,
++ IN gctUINT32 PhysSize
++ );
++
++/* Allocate pages inside the MMU. */
++gceSTATUS
++gckMMU_AllocatePages(
++ IN gckMMU Mmu,
++ IN gctSIZE_T PageCount,
++ OUT gctPOINTER * PageTable,
++ OUT gctUINT32 * Address
++ );
++
++gceSTATUS
++gckMMU_AllocatePagesEx(
++ IN gckMMU Mmu,
++ IN gctSIZE_T PageCount,
++ IN gceSURF_TYPE Type,
++ OUT gctPOINTER * PageTable,
++ OUT gctUINT32 * Address
++ );
++
++/* Remove a page table from the MMU. */
++gceSTATUS
++gckMMU_FreePages(
++ IN gckMMU Mmu,
++ IN gctPOINTER PageTable,
++ IN gctSIZE_T PageCount
++ );
++
++/* Set the MMU page with info. */
++gceSTATUS
++gckMMU_SetPage(
++ IN gckMMU Mmu,
++ IN gctUINT32 PageAddress,
++ IN gctUINT32 *PageEntry
++ );
++
++#ifdef __QNXNTO__
++gceSTATUS
++gckMMU_InsertNode(
++ IN gckMMU Mmu,
++ IN gcuVIDMEM_NODE_PTR Node);
++
++gceSTATUS
++gckMMU_RemoveNode(
++ IN gckMMU Mmu,
++ IN gcuVIDMEM_NODE_PTR Node);
++#endif
++
++#ifdef __QNXNTO__
++gceSTATUS
++gckMMU_FreeHandleMemory(
++ IN gckKERNEL Kernel,
++ IN gckMMU Mmu,
++ IN gctUINT32 Pid
++ );
++#endif
++
++gceSTATUS
++gckMMU_Flush(
++ IN gckMMU Mmu
++ );
++
++gceSTATUS
++gckMMU_DumpPageTableEntry(
++ IN gckMMU Mmu,
++ IN gctUINT32 Address
++ );
++
++
++#if VIVANTE_PROFILER
++gceSTATUS
++gckHARDWARE_QueryProfileRegisters(
++ IN gckHARDWARE Hardware,
++ IN gctBOOL Clear,
++ OUT gcsPROFILER_COUNTERS * Counters
++ );
++#endif
++
++#if VIVANTE_PROFILER_CONTEXT
++gceSTATUS
++gckHARDWARE_QueryContextProfile(
++ IN gckHARDWARE Hardware,
++ IN gctBOOL Clear,
++ IN gckCONTEXT Context,
++ OUT gcsPROFILER_COUNTERS * Counters
++ );
++
++gceSTATUS
++gckHARDWARE_UpdateContextProfile(
++ IN gckHARDWARE Hardware,
++ IN gckCONTEXT Context
++ );
++#endif
++
++gceSTATUS
++gckOS_SignalQueryHardware(
++ IN gckOS Os,
++ IN gctSIGNAL Signal,
++ OUT gckHARDWARE * Hardware
++ );
++
++gceSTATUS
++gckOS_SignalSetHardware(
++ IN gckOS Os,
++ IN gctSIGNAL Signal,
++ gckHARDWARE Hardware
++ );
++
++#ifdef __cplusplus
++}
++#endif
++
++#if gcdENABLE_VG
++#include "gc_hal_vg.h"
++#endif
++
++#endif /* __gc_hal_h_ */
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_kernel_buffer.h linux-openelec/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_kernel_buffer.h
+--- linux-3.14.36/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_kernel_buffer.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_kernel_buffer.h 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,185 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_kernel_buffer_h_
++#define __gc_hal_kernel_buffer_h_
++
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/******************************************************************************\
++************************ Command Buffer and Event Objects **********************
++\******************************************************************************/
++
++/* The number of context buffers per user. */
++#define gcdCONTEXT_BUFFER_COUNT 2
++
++/* State delta record. */
++typedef struct _gcsSTATE_DELTA_RECORD * gcsSTATE_DELTA_RECORD_PTR;
++typedef struct _gcsSTATE_DELTA_RECORD
++{
++ /* State address. */
++ gctUINT address;
++
++ /* State mask. */
++ gctUINT32 mask;
++
++ /* State data. */
++ gctUINT32 data;
++}
++gcsSTATE_DELTA_RECORD;
++
++/* State delta. */
++typedef struct _gcsSTATE_DELTA
++{
++ /* For debugging: the number of delta in the order of creation. */
++#if gcmIS_DEBUG(gcdDEBUG_CODE)
++ gctUINT num;
++#endif
++
++ /* Main state delta ID. Every time state delta structure gets reinitialized,
++ main ID is incremented. If main state ID overflows, all map entry IDs get
++ reinitialized to make sure there is no potential erroneous match after
++ the overflow.*/
++ gctUINT id;
++
++ /* The number of contexts pending modification by the delta. */
++ gctINT refCount;
++
++ /* Vertex element count for the delta buffer. */
++ gctUINT elementCount;
++
++ /* Number of states currently stored in the record array. */
++ gctUINT recordCount;
++
++ /* Record array; holds all modified states in gcsSTATE_DELTA_RECORD. */
++ gctUINT64 recordArray;
++
++ /* Map entry ID is used for map entry validation. If map entry ID does not
++ match the main state delta ID, the entry and the corresponding state are
++ considered not in use. */
++ gctUINT64 mapEntryID;
++ gctUINT mapEntryIDSize;
++
++ /* If the map entry ID matches the main state delta ID, index points to
++ the state record in the record array. */
++ gctUINT64 mapEntryIndex;
++
++ /* Previous and next state deltas in gcsSTATE_DELTA. */
++ gctUINT64 prev;
++ gctUINT64 next;
++}
++gcsSTATE_DELTA;
++
++/* Command buffer object. */
++struct _gcoCMDBUF
++{
++ /* The object. */
++ gcsOBJECT object;
++
++ /* Command buffer entry and exit pipes. */
++ gcePIPE_SELECT entryPipe;
++ gcePIPE_SELECT exitPipe;
++
++ /* Feature usage flags. */
++ gctBOOL using2D;
++ gctBOOL using3D;
++ gctBOOL usingFilterBlit;
++ gctBOOL usingPalette;
++
++ /* Physical address of command buffer. Just a name. */
++ gctUINT32 physical;
++
++ /* Logical address of command buffer. */
++ gctUINT64 logical;
++
++ /* Number of bytes in command buffer. */
++ gctUINT bytes;
++
++ /* Start offset into the command buffer. */
++ gctUINT startOffset;
++
++ /* Current offset into the command buffer. */
++ gctUINT offset;
++
++ /* Number of free bytes in command buffer. */
++ gctUINT free;
++
++ /* Location of the last reserved area. */
++ gctUINT64 lastReserve;
++ gctUINT lastOffset;
++
++#if gcdSECURE_USER
++ /* Hint array for the current command buffer. */
++ gctUINT hintArraySize;
++ gctUINT64 hintArray;
++ gctUINT64 hintArrayTail;
++#endif
++
++#if gcmIS_DEBUG(gcdDEBUG_CODE)
++ /* Last load state command location and hardware address. */
++ gctUINT64 lastLoadStatePtr;
++ gctUINT32 lastLoadStateAddress;
++ gctUINT32 lastLoadStateCount;
++#endif
++};
++
++typedef struct _gcsQUEUE
++{
++ /* Pointer to next gcsQUEUE structure in gcsQUEUE. */
++ gctUINT64 next;
++
++ /* Event information. */
++ gcsHAL_INTERFACE iface;
++}
++gcsQUEUE;
++
++/* Event queue. */
++struct _gcoQUEUE
++{
++ /* The object. */
++ gcsOBJECT object;
++
++ /* Pointer to current event queue. */
++ gcsQUEUE_PTR head;
++ gcsQUEUE_PTR tail;
++
++#ifdef __QNXNTO__
++ /* Buffer for records. */
++ gcsQUEUE_PTR records;
++ gctUINT32 freeBytes;
++ gctUINT32 offset;
++#else
++ /* List of free records. */
++ gcsQUEUE_PTR freeList;
++#endif
++ #define gcdIN_QUEUE_RECORD_LIMIT 16
++ /* Number of records currently in queue */
++ gctUINT32 recordCount;
++};
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif /* __gc_hal_kernel_buffer_h_ */
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_mem.h linux-openelec/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_mem.h
+--- linux-3.14.36/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_mem.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_mem.h 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,530 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++/*
++** Include file for the local memory management.
++*/
++
++#ifndef __gc_hal_mem_h_
++#define __gc_hal_mem_h_
++#ifndef VIVANTE_NO_3D
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*******************************************************************************
++** Usage:
++
++ The macros to declare MemPool type and functions are
++ gcmMEM_DeclareFSMemPool (Type, TypeName, Prefix)
++ gcmMEM_DeclareVSMemPool (Type, TypeName, Prefix)
++ gcmMEM_DeclareAFSMemPool(Type, TypeName, Prefix)
++
++ The data structures for MemPool are
++ typedef struct _gcsMEM_FS_MEM_POOL * gcsMEM_FS_MEM_POOL;
++ typedef struct _gcsMEM_VS_MEM_POOL * gcsMEM_VS_MEM_POOL;
++ typedef struct _gcsMEM_AFS_MEM_POOL * gcsMEM_AFS_MEM_POOL;
++
++ The MemPool constructor and destructor functions are
++ gcfMEM_InitFSMemPool(gcsMEM_FS_MEM_POOL *, gcoOS, gctUINT, gctUINT);
++ gcfMEM_FreeFSMemPool(gcsMEM_FS_MEM_POOL *);
++ gcfMEM_InitVSMemPool(gcsMEM_VS_MEM_POOL *, gcoOS, gctUINT, gctBOOL);
++ gcfMEM_FreeVSMemPool(gcsMEM_VS_MEM_POOL *);
++ gcfMEM_InitAFSMemPool(gcsMEM_AFS_MEM_POOL *, gcoOS, gctUINT);
++ gcfMEM_FreeAFSMemPool(gcsMEM_AFS_MEM_POOL *);
++
++ FS: for Fixed-Size data structures
++ VS: for Variable-size data structures
++ AFS: for Array of Fixed-Size data structures
++
++
++ // Example 1: For a fixed-size data structure, struct gcsNode.
++ // It is used locally in a file, so the functions are static without prefix.
++ // At top level, declear allocate and free functions.
++ // The first argument is the data type.
++ // The second armument is the short name used in the fuctions.
++ gcmMEM_DeclareFSMemPool(struct gcsNode, Node, );
++
++ // The previous macro creates two inline functions,
++ // _AllocateNode and _FreeNode.
++
++ // In function or struct
++ gcsMEM_FS_MEM_POOL nodeMemPool;
++
++ // In function,
++ struct gcsNode * node;
++ gceSTATUS status;
++
++ // Before using the memory pool, initialize it.
++ // The second argument is the gcoOS object.
++ // The third argument is the number of data structures to allocate for each chunk.
++ status = gcfMEM_InitFSMemPool(&nodeMemPool, os, 100, sizeof(struct gcsNode));
++ ...
++
++ // Allocate a node.
++ status = _AllocateNode(nodeMemPool, &node);
++ ...
++ // Free a node.
++ _FreeNode(nodeMemPool, node);
++
++ // After using the memory pool, free it.
++ gcfMEM_FreeFSMemPool(&nodeMemPool);
++
++
++ // Example 2: For array of fixed-size data structures, struct gcsNode.
++ // It is used in several files, so the functions are extern with prefix.
++ // At top level, declear allocate and free functions.
++ // The first argument is the data type, and the second one is the short name
++ // used in the fuctions.
++ gcmMEM_DeclareAFSMemPool(struct gcsNode, NodeArray, gcfOpt);
++
++ // The previous macro creates two inline functions,
++ // gcfOpt_AllocateNodeArray and gcfOpt_FreeNodeArray.
++
++ // In function or struct
++ gcsMEM_AFS_MEM_POOL nodeArrayMemPool;
++
++ // In function,
++ struct gcsNode * nodeArray;
++ gceSTATUS status;
++
++ // Before using the array memory pool, initialize it.
++ // The second argument is the gcoOS object, the third is the number of data
++ // structures to allocate for each chunk.
++ status = gcfMEM_InitAFSMemPool(&nodeArrayMemPool, os, sizeof(struct gcsNode));
++ ...
++
++ // Allocate a node array of size 100.
++ status = gcfOpt_AllocateNodeArray(nodeArrayMemPool, &nodeArray, 100);
++ ...
++ // Free a node array.
++ gcfOpt_FreeNodeArray(&nodeArrayMemPool, nodeArray);
++
++ // After using the array memory pool, free it.
++ gcfMEM_FreeAFSMemPool(&nodeArrayMemPool);
++
++*******************************************************************************/
++
++/*******************************************************************************
++** To switch back to use gcoOS_Allocate and gcoOS_Free, add
++** #define USE_LOCAL_MEMORY_POOL 0
++** before including this file.
++*******************************************************************************/
++#ifndef USE_LOCAL_MEMORY_POOL
++/*
++ USE_LOCAL_MEMORY_POOL
++
++ This define enables the local memory management to improve performance.
++*/
++#define USE_LOCAL_MEMORY_POOL 1
++#endif
++
++/*******************************************************************************
++** Memory Pool Data Structures
++*******************************************************************************/
++#if USE_LOCAL_MEMORY_POOL
++ typedef struct _gcsMEM_FS_MEM_POOL * gcsMEM_FS_MEM_POOL;
++ typedef struct _gcsMEM_VS_MEM_POOL * gcsMEM_VS_MEM_POOL;
++ typedef struct _gcsMEM_AFS_MEM_POOL * gcsMEM_AFS_MEM_POOL;
++#else
++ typedef gcoOS gcsMEM_FS_MEM_POOL;
++ typedef gcoOS gcsMEM_VS_MEM_POOL;
++ typedef gcoOS gcsMEM_AFS_MEM_POOL;
++#endif
++
++/*******************************************************************************
++** Memory Pool Macros
++*******************************************************************************/
++#if USE_LOCAL_MEMORY_POOL
++#define gcmMEM_DeclareFSMemPool(Type, TypeName, Prefix) \
++gceSTATUS \
++Prefix##_Allocate##TypeName( \
++ gcsMEM_FS_MEM_POOL MemPool, \
++ Type ** Pointer \
++ ) \
++{ \
++ return(gcfMEM_FSMemPoolGetANode(MemPool, (gctPOINTER *) Pointer)); \
++} \
++ \
++gceSTATUS \
++Prefix##_CAllocate##TypeName( \
++ gcsMEM_FS_MEM_POOL MemPool, \
++ Type ** Pointer \
++ ) \
++{ \
++ gceSTATUS status; \
++ gcmHEADER_ARG("MemPool=0x%x Pointer=0x%x", MemPool, Pointer); \
++ gcmERR_RETURN(gcfMEM_FSMemPoolGetANode(MemPool, (gctPOINTER *) Pointer)); \
++ gcoOS_ZeroMemory(*(gctPOINTER *) Pointer, gcmSIZEOF(Type)); \
++ gcmFOOTER(); \
++ return gcvSTATUS_OK; \
++} \
++ \
++gceSTATUS \
++Prefix##_Free##TypeName( \
++ gcsMEM_FS_MEM_POOL MemPool, \
++ Type * Pointer \
++ ) \
++{ \
++ gceSTATUS status; \
++ gcmHEADER_ARG("MemPool=0x%x Pointer=0x%x", MemPool, Pointer); \
++ status = gcfMEM_FSMemPoolFreeANode(MemPool, (gctPOINTER) Pointer); \
++ gcmFOOTER(); \
++ return status; \
++} \
++ \
++gceSTATUS \
++Prefix##_Free##TypeName##List( \
++ gcsMEM_FS_MEM_POOL MemPool, \
++ Type * FirstPointer, \
++ Type * LastPointer \
++ ) \
++{ \
++ gceSTATUS status; \
++ gcmHEADER_ARG("MemPool=0x%x FirstPointer=0x%x LastPointer=0x%x", MemPool, FirstPointer, LastPointer); \
++ status = gcfMEM_FSMemPoolFreeAList(MemPool, (gctPOINTER) FirstPointer, (gctPOINTER) LastPointer); \
++ gcmFOOTER(); \
++ return status; \
++}
++
++#define gcmMEM_DeclareVSMemPool(Type, TypeName, Prefix) \
++gceSTATUS \
++Prefix##_Allocate##TypeName( \
++ gcsMEM_FS_MEM_POOL MemPool, \
++ Type ** Pointer, \
++ gctUINT Size \
++ ) \
++{ \
++ gceSTATUS status;\
++ gcmHEADER_ARG("MemPool=0x%x Pointer=0x%x Size=%u", MemPool, Pointer, Size); \
++ status = gcfMEM_VSMemPoolGetANode(MemPool, Size, (gctPOINTER *) Pointer); \
++ gcmFOOTER(); \
++ return status; \
++} \
++ \
++gceSTATUS \
++ Prefix##_CAllocate##TypeName( \
++ gcsMEM_FS_MEM_POOL MemPool, \
++ Type ** Pointer, \
++ gctUINT Size \
++ ) \
++{ \
++ gceSTATUS status; \
++ gcmHEADER_ARG("MemPool=0x%x Pointer=0x%x Size=%u", MemPool, Pointer, Size); \
++ gcmERR_RETURN(gcfMEM_VSMemPoolGetANode(MemPool, Size, (gctPOINTER *) Pointer)); \
++ gcoOS_ZeroMemory(*(gctPOINTER *) Pointer, size); \
++ gcmFOOTER(); \
++ return gcvSTATUS_OK; \
++} \
++ \
++gceSTATUS \
++Prefix##_Free##TypeName( \
++ gcsMEM_FS_MEM_POOL MemPool, \
++ Type * Pointer \
++ ) \
++{ \
++ gceSTATUS status; \
++ gcmHEADER_ARG("MemPool=0x%x Pointer=0x%x", MemPool, Pinter); \
++ status = gcfMEM_VSMemPoolFreeANode(MemPool, (gctPOINTER) Pointer); \
++ gcmFOOTER(); \
++ return status; \
++}
++
++#define gcmMEM_DeclareAFSMemPool(Type, TypeName, Prefix) \
++gceSTATUS \
++Prefix##_Allocate##TypeName( \
++ gcsMEM_AFS_MEM_POOL MemPool, \
++ Type ** Pointer, \
++ gctUINT Count \
++ ) \
++{ \
++ gceSTATUS status; \
++ gcmHEADER_ARG("MemPool=0x%x Pointer=0x%x Count=%u", MemPool, Pointer, Count); \
++ status = gcfMEM_AFSMemPoolGetANode(MemPool, Count, (gctPOINTER *) Pointer); \
++ gcmFOOTER(); \
++ return status; \
++} \
++ \
++gceSTATUS \
++Prefix##_CAllocate##TypeName( \
++ gcsMEM_AFS_MEM_POOL MemPool, \
++ Type ** Pointer, \
++ gctUINT Count \
++ ) \
++{ \
++ gceSTATUS status; \
++ gcmHEADER_ARG("MemPool=0x%x Pointer=0x%x Count=%u", MemPool, Pointer, Count); \
++ gcmERR_RETURN(gcfMEM_AFSMemPoolGetANode(MemPool, Count, (gctPOINTER *) Pointer)); \
++ gcoOS_ZeroMemory(*(gctPOINTER *) Pointer, Count * gcmSIZEOF(Type)); \
++ gcmFOOTER(); \
++ return gcvSTATUS_OK; \
++} \
++ \
++gceSTATUS \
++Prefix##_Free##TypeName( \
++ gcsMEM_AFS_MEM_POOL MemPool, \
++ Type * Pointer \
++ ) \
++{ \
++ gceSTATUS status; \
++ gcmHEADER_ARG("MemPool=0x%x Pointer=0x%x", MemPool, Pointer); \
++ status = gcfMEM_AFSMemPoolFreeANode(MemPool, (gctPOINTER) Pointer); \
++ gcmFOOTER(); \
++ return status; \
++}
++
++#else
++
++#define gcmMEM_DeclareFSMemPool(Type, TypeName, Prefix) \
++gceSTATUS \
++Prefix##_Allocate##TypeName( \
++ gcsMEM_FS_MEM_POOL MemPool, \
++ Type ** Pointer \
++ ) \
++{ \
++ gceSTATUS status; \
++ gcmHEADER_ARG("MemPool=0x%x Pointer=0x%x", MemPool, Pointer); \
++ status = gcoOS_Allocate(MemPool, \
++ gcmSIZEOF(Type), \
++ (gctPOINTER *) Pointer); \
++ gcmFOOTER(); \
++ return status; \
++} \
++ \
++gceSTATUS \
++Prefix##_CAllocate##TypeName( \
++ gcsMEM_FS_MEM_POOL MemPool, \
++ Type ** Pointer \
++ ) \
++{ \
++ gceSTATUS status; \
++ gcmHEADER_ARG("MemPool=0x%x Pointer=0x%x", MemPool, Pointer); \
++ gcmERR_RETURN(gcoOS_Allocate(MemPool, \
++ gcmSIZEOF(Type), \
++ (gctPOINTER *) Pointer)); \
++ gcoOS_ZeroMemory(*(gctPOINTER *) Pointer, gcmSIZEOF(Type)); \
++ gcmFOOTER(); \
++ return gcvSTATUS_OK; \
++} \
++ \
++gceSTATUS \
++Prefix##_Free##TypeName( \
++ gcsMEM_FS_MEM_POOL MemPool, \
++ Type * Pointer \
++ ) \
++{ \
++ gceSTATUS status; \
++ gcmHEADER_ARG("MemPool=0x%x Pointer=0x%x", MemPool, Pointer); \
++ status = gcmOS_SAFE_FREE(MemPool, Pointer); \
++ gcmFOOTER(); \
++ return status; \
++}
++
++#define gcmMEM_DeclareVSMemPool(Type, TypeName, Prefix) \
++gceSTATUS \
++Prefix##_Allocate##TypeName( \
++ gcsMEM_VS_MEM_POOL MemPool, \
++ Type ** Pointer, \
++ gctUINT Size \
++ ) \
++{ \
++ gceSTATUS status; \
++ gcmHEADER_ARG("MemPool=0x%x Pointer=0x%x Size=%u", MemPool, Pointer, Size); \
++ status = gcoOS_Allocate(MemPool, \
++ Size, \
++ (gctPOINTER *) Pointer); \
++ gcmFOOTER(); \
++ return status; \
++} \
++ \
++gceSTATUS \
++Prefix##_CAllocate##TypeName( \
++ gcsMEM_VS_MEM_POOL MemPool, \
++ Type ** Pointer, \
++ gctUINT Size \
++ ) \
++{ \
++ gceSTATUS status; \
++ gcmHEADER_ARG("MemPool=0x%x Pointer=0x%x Size=%u", MemPool, Pointer, Size); \
++ gcmERR_RETURN(gcoOS_Allocate(MemPool, \
++ Size, \
++ (gctPOINTER *) Pointer)); \
++ gcoOS_ZeroMemory(*(gctPOINTER *) Pointer, Size); \
++ gcmFOOTER(); \
++ return gcvSTATUS_OK; \
++} \
++ \
++gceSTATUS \
++Prefix##_Free##TypeName( \
++ gcsMEM_VS_MEM_POOL MemPool, \
++ Type * Pointer \
++ ) \
++{ \
++ gceSTATUS status; \
++ gcmHEADER_ARG("MemPool=0x%x Pointer=0x%x", MemPool, Pointer); \
++ status = gcmOS_SAFE_FREE(MemPool, Pointer); \
++ gcmFOOTER(); \
++ return status; \
++}
++
++#define gcmMEM_DeclareAFSMemPool(Type, TypeName, Prefix) \
++gceSTATUS \
++Prefix##_Allocate##TypeName( \
++ gcsMEM_AFS_MEM_POOL MemPool, \
++ Type ** Pointer, \
++ gctUINT Count \
++ ) \
++{ \
++ gceSTATUS status; \
++ gcmHEADER_ARG("MemPool=0x%x Pointer=0x%x Count=%u", MemPool, Pointer, Count); \
++ status = gcoOS_Allocate(MemPool, \
++ Count * gcmSIZEOF(Type), \
++ (gctPOINTER *) Pointer); \
++ gcmFOOTER(); \
++ return status; \
++} \
++ \
++gceSTATUS \
++Prefix##_CAllocate##TypeName( \
++ gcsMEM_AFS_MEM_POOL MemPool, \
++ Type ** Pointer, \
++ gctUINT Count \
++ ) \
++{ \
++ gceSTATUS status; \
++ gcmHEADER_ARG("MemPool=0x%x Pointer=0x%x Count=%u", MemPool, Pointer, Count); \
++ gcmERR_RETURN(gcoOS_Allocate(MemPool, \
++ Count * gcmSIZEOF(Type), \
++ (gctPOINTER *) Pointer)); \
++ gcoOS_ZeroMemory(*(gctPOINTER *) Pointer, Count * gcmSIZEOF(Type)); \
++ gcmFOOTER(); \
++ return gcvSTATUS_OK; \
++} \
++ \
++gceSTATUS \
++Prefix##_Free##TypeName( \
++ gcsMEM_AFS_MEM_POOL MemPool, \
++ Type * Pointer \
++ ) \
++{ \
++ gceSTATUS status; \
++ gcmHEADER_ARG("MemPool=0x%x Pointer=0x%x", MemPool, Pointer); \
++ status = gcmOS_SAFE_FREE(MemPool, Pointer); \
++ gcmFOOTER(); \
++ return status; \
++}
++#endif
++
++/*******************************************************************************
++** Memory Pool Data Functions
++*******************************************************************************/
++gceSTATUS
++gcfMEM_InitFSMemPool(
++ IN gcsMEM_FS_MEM_POOL * MemPool,
++ IN gcoOS OS,
++ IN gctUINT NodeCount,
++ IN gctUINT NodeSize
++ );
++
++gceSTATUS
++gcfMEM_FreeFSMemPool(
++ IN gcsMEM_FS_MEM_POOL * MemPool
++ );
++
++gceSTATUS
++gcfMEM_FSMemPoolGetANode(
++ IN gcsMEM_FS_MEM_POOL MemPool,
++ OUT gctPOINTER * Node
++ );
++
++gceSTATUS
++gcfMEM_FSMemPoolFreeANode(
++ IN gcsMEM_FS_MEM_POOL MemPool,
++ IN gctPOINTER Node
++ );
++
++gceSTATUS
++gcfMEM_FSMemPoolFreeAList(
++ IN gcsMEM_FS_MEM_POOL MemPool,
++ IN gctPOINTER FirstNode,
++ IN gctPOINTER LastNode
++ );
++
++gceSTATUS
++gcfMEM_InitVSMemPool(
++ IN gcsMEM_VS_MEM_POOL * MemPool,
++ IN gcoOS OS,
++ IN gctUINT BlockSize,
++ IN gctBOOL RecycleFreeNode
++ );
++
++gceSTATUS
++gcfMEM_FreeVSMemPool(
++ IN gcsMEM_VS_MEM_POOL * MemPool
++ );
++
++gceSTATUS
++gcfMEM_VSMemPoolGetANode(
++ IN gcsMEM_VS_MEM_POOL MemPool,
++ IN gctUINT Size,
++ IN gctUINT Alignment,
++ OUT gctPOINTER * Node
++ );
++
++gceSTATUS
++gcfMEM_VSMemPoolFreeANode(
++ IN gcsMEM_VS_MEM_POOL MemPool,
++ IN gctPOINTER Node
++ );
++
++gceSTATUS
++gcfMEM_InitAFSMemPool(
++ IN gcsMEM_AFS_MEM_POOL *MemPool,
++ IN gcoOS OS,
++ IN gctUINT NodeCount,
++ IN gctUINT NodeSize
++ );
++
++gceSTATUS
++gcfMEM_FreeAFSMemPool(
++ IN gcsMEM_AFS_MEM_POOL *MemPool
++ );
++
++gceSTATUS
++gcfMEM_AFSMemPoolGetANode(
++ IN gcsMEM_AFS_MEM_POOL MemPool,
++ IN gctUINT Count,
++ OUT gctPOINTER * Node
++ );
++
++gceSTATUS
++gcfMEM_AFSMemPoolFreeANode(
++ IN gcsMEM_AFS_MEM_POOL MemPool,
++ IN gctPOINTER Node
++ );
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif /* VIVANTE_NO_3D */
++#endif /* __gc_hal_mem_h_ */
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_options.h linux-openelec/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_options.h
+--- linux-3.14.36/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_options.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_options.h 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,947 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_options_h_
++#define __gc_hal_options_h_
++
++/*
++ gcdPRINT_VERSION
++
++ Print HAL version.
++*/
++#ifndef gcdPRINT_VERSION
++# define gcdPRINT_VERSION 0
++#endif
++
++/*
++ USE_NEW_LINUX_SIGNAL
++
++ This define enables the Linux kernel signaling between kernel and user.
++*/
++#ifndef USE_NEW_LINUX_SIGNAL
++# define USE_NEW_LINUX_SIGNAL 0
++#endif
++
++/*
++ VIVANTE_PROFILER
++
++ This define enables the profiler.
++*/
++#ifndef VIVANTE_PROFILER
++# define VIVANTE_PROFILER 1
++#endif
++
++#ifndef VIVANTE_PROFILER_PERDRAW
++# define VIVANTE_PROFILER_PERDRAW 0
++#endif
++
++/*
++ VIVANTE_PROFILER_CONTEXT
++
++ This define enables the profiler according to each hw context.
++*/
++#ifndef VIVANTE_PROFILER_CONTEXT
++# define VIVANTE_PROFILER_CONTEXT 1
++#endif
++
++/*
++ gcdUSE_VG
++
++ Enable VG HAL layer (only for GC350).
++*/
++#ifndef gcdUSE_VG
++# define gcdUSE_VG 0
++#endif
++
++/*
++ USE_SW_FB
++
++ Set to 1 if the frame buffer memory cannot be accessed by the GPU.
++*/
++#ifndef USE_SW_FB
++# define USE_SW_FB 0
++#endif
++
++/*
++ USE_SUPER_SAMPLING
++
++ This define enables super-sampling support.
++*/
++#define USE_SUPER_SAMPLING 0
++
++/*
++ PROFILE_HAL_COUNTERS
++
++ This define enables HAL counter profiling support. HW and SHADER
++ counter profiling depends on this.
++*/
++#ifndef PROFILE_HAL_COUNTERS
++# define PROFILE_HAL_COUNTERS 1
++#endif
++
++/*
++ PROFILE_HW_COUNTERS
++
++ This define enables HW counter profiling support.
++*/
++#ifndef PROFILE_HW_COUNTERS
++# define PROFILE_HW_COUNTERS 1
++#endif
++
++/*
++ PROFILE_SHADER_COUNTERS
++
++ This define enables SHADER counter profiling support.
++*/
++#ifndef PROFILE_SHADER_COUNTERS
++# define PROFILE_SHADER_COUNTERS 1
++#endif
++
++/*
++ COMMAND_PROCESSOR_VERSION
++
++ The version of the command buffer and task manager.
++*/
++#define COMMAND_PROCESSOR_VERSION 1
++
++/*
++ gcdDUMP_KEY
++
++ Set this to a string that appears in 'cat /proc/<pid>/cmdline'. E.g. 'camera'.
++ HAL will create dumps for the processes matching this key.
++*/
++#ifndef gcdDUMP_KEY
++# define gcdDUMP_KEY "process"
++#endif
++
++/*
++ gcdDUMP_PATH
++
++ The dump file location. Some processes cannot write to the sdcard.
++ Try apps' data dir, e.g. /data/data/com.android.launcher
++*/
++#ifndef gcdDUMP_PATH
++#if defined(ANDROID)
++# define gcdDUMP_PATH "/mnt/sdcard/"
++#else
++# define gcdDUMP_PATH "./"
++#endif
++#endif
++
++/*
++ gcdDUMP
++
++ When set to 1, a dump of all states and memory uploads, as well as other
++ hardware related execution will be printed to the debug console. This
++ data can be used for playing back applications.
++*/
++#ifndef gcdDUMP
++# define gcdDUMP 0
++#endif
++
++/*
++ gcdDUMP_API
++
++ When set to 1, a high level dump of the EGL and GL/VG APs's are
++ captured.
++*/
++#ifndef gcdDUMP_API
++# define gcdDUMP_API 0
++#endif
++
++/*
++ gcdDUMP_FRAMERATE
++ When set to a value other than zero, averaqe frame rate will be dumped.
++ The value set is the starting frame that the average will be calculated.
++ This is needed because sometimes first few frames are too slow to be included
++ in the average. Frame count starts from 1.
++*/
++#ifndef gcdDUMP_FRAMERATE
++# define gcdDUMP_FRAMERATE 0
++#endif
++
++/*
++ gcdVIRTUAL_COMMAND_BUFFER
++ When set to 1, user command buffer and context buffer will be allocated
++ from gcvPOOL_VIRTUAL.
++*/
++#ifndef gcdVIRTUAL_COMMAND_BUFFER
++# define gcdVIRTUAL_COMMAND_BUFFER 0
++#endif
++
++/*
++ gcdENABLE_FSCALE_VAL_ADJUST
++ When non-zero, FSCALE_VAL when gcvPOWER_ON can be adjusted externally.
++ */
++#ifndef gcdENABLE_FSCALE_VAL_ADJUST
++# define gcdENABLE_FSCALE_VAL_ADJUST 1
++#endif
++
++/*
++ gcdDUMP_IN_KERNEL
++
++ When set to 1, all dumps will happen in the kernel. This is handy if
++ you want the kernel to dump its command buffers as well and the data
++ needs to be in sync.
++*/
++#ifndef gcdDUMP_IN_KERNEL
++# define gcdDUMP_IN_KERNEL 0
++#endif
++
++/*
++ gcdDUMP_COMMAND
++
++ When set to non-zero, the command queue will dump all incoming command
++ and context buffers as well as all other modifications to the command
++ queue.
++*/
++#ifndef gcdDUMP_COMMAND
++# define gcdDUMP_COMMAND 0
++#endif
++
++/*
++ gcdDUMP_FRAME_TGA
++
++ When set to a value other than 0, a dump of the frame specified by the value,
++ will be done into frame.tga. Frame count starts from 1.
++ */
++#ifndef gcdDUMP_FRAME_TGA
++#define gcdDUMP_FRAME_TGA 0
++#endif
++/*
++ gcdNULL_DRIVER
++
++ Set to 1 for infinite speed hardware.
++ Set to 2 for bypassing the HAL.
++ Set to 3 for bypassing the drivers.
++*/
++#ifndef gcdNULL_DRIVER
++# define gcdNULL_DRIVER 0
++#endif
++
++/*
++ gcdENABLE_TIMEOUT_DETECTION
++
++ Enable timeout detection.
++*/
++#ifndef gcdENABLE_TIMEOUT_DETECTION
++# define gcdENABLE_TIMEOUT_DETECTION 0
++#endif
++
++/*
++ gcdCMD_BUFFER_SIZE
++
++ Number of bytes in a command buffer.
++*/
++#ifndef gcdCMD_BUFFER_SIZE
++# define gcdCMD_BUFFER_SIZE (128 << 10)
++#endif
++
++/*
++ gcdCMD_BUFFERS
++
++ Number of command buffers to use per client.
++*/
++#ifndef gcdCMD_BUFFERS
++# define gcdCMD_BUFFERS 2
++#endif
++
++/*
++ gcdMAX_CMD_BUFFERS
++
++ Maximum number of command buffers to use per client.
++*/
++#ifndef gcdMAX_CMD_BUFFERS
++# define gcdMAX_CMD_BUFFERS 8
++#endif
++
++/*
++ gcdCOMMAND_QUEUES
++
++ Number of command queues in the kernel.
++*/
++#ifndef gcdCOMMAND_QUEUES
++# define gcdCOMMAND_QUEUES 2
++#endif
++
++/*
++ gcdPOWER_CONTROL_DELAY
++
++ The delay in milliseconds required to wait until the GPU has woke up
++ from a suspend or power-down state. This is system dependent because
++ the bus clock also needs to stabalize.
++*/
++#ifndef gcdPOWER_CONTROL_DELAY
++# define gcdPOWER_CONTROL_DELAY 0
++#endif
++
++/*
++ gcdMIRROR_PAGETABLE
++
++ Enable it when GPUs with old MMU and new MMU exist at same SoC. It makes
++ each GPU use same virtual address to access same physical memory.
++*/
++#ifndef gcdMIRROR_PAGETABLE
++# define gcdMIRROR_PAGETABLE 0
++#endif
++
++/*
++ gcdMMU_SIZE
++
++ Size of the MMU page table in bytes. Each 4 bytes can hold 4kB worth of
++ virtual data.
++*/
++#ifndef gcdMMU_SIZE
++#if gcdMIRROR_PAGETABLE
++# define gcdMMU_SIZE 0x200000
++#else
++# define gcdMMU_SIZE (2048 << 10)
++#endif
++#endif
++
++/*
++ gcdSECURE_USER
++
++ Use logical addresses instead of physical addresses in user land. In
++ this case a hint table is created for both command buffers and context
++ buffers, and that hint table will be used to patch up those buffers in
++ the kernel when they are ready to submit.
++*/
++#ifndef gcdSECURE_USER
++# define gcdSECURE_USER 0
++#endif
++
++/*
++ gcdSECURE_CACHE_SLOTS
++
++ Number of slots in the logical to DMA address cache table. Each time a
++ logical address needs to be translated into a DMA address for the GPU,
++ this cache will be walked. The replacement scheme is LRU.
++*/
++#ifndef gcdSECURE_CACHE_SLOTS
++# define gcdSECURE_CACHE_SLOTS 1024
++#endif
++
++/*
++ gcdSECURE_CACHE_METHOD
++
++ Replacement scheme used for Secure Cache. The following options are
++ available:
++
++ gcdSECURE_CACHE_LRU
++ A standard LRU cache.
++
++ gcdSECURE_CACHE_LINEAR
++ A linear walker with the idea that an application will always
++ render the scene in a similar way, so the next entry in the
++ cache should be a hit most of the time.
++
++ gcdSECURE_CACHE_HASH
++ A 256-entry hash table.
++
++ gcdSECURE_CACHE_TABLE
++ A simple cache but with potential of a lot of cache replacement.
++*/
++#ifndef gcdSECURE_CACHE_METHOD
++# define gcdSECURE_CACHE_METHOD gcdSECURE_CACHE_HASH
++#endif
++
++/*
++ gcdREGISTER_ACCESS_FROM_USER
++
++ Set to 1 to allow IOCTL calls to get through from user land. This
++ should only be in debug or development drops.
++*/
++#ifndef gcdREGISTER_ACCESS_FROM_USER
++# define gcdREGISTER_ACCESS_FROM_USER 1
++#endif
++
++/*
++ gcdUSER_HEAP_ALLOCATOR
++
++ Set to 1 to enable user mode heap allocator for fast memory allocation
++ and destroying. Otherwise, memory allocation/destroying in user mode
++ will be directly managed by system. Only for linux for now.
++*/
++#ifndef gcdUSER_HEAP_ALLOCATOR
++# define gcdUSER_HEAP_ALLOCATOR 1
++#endif
++
++/*
++ gcdHEAP_SIZE
++
++ Set the allocation size for the internal heaps. Each time a heap is
++ full, a new heap will be allocated with this minmimum amount of bytes.
++ The bigger this size, the fewer heaps there are to allocate, the better
++ the performance. However, heaps won't be freed until they are
++ completely free, so there might be some more memory waste if the size is
++ too big.
++*/
++#ifndef gcdHEAP_SIZE
++# define gcdHEAP_SIZE (64 << 10)
++#endif
++
++/*
++ gcdPOWER_SUSNPEND_WHEN_IDLE
++
++ Set to 1 to make GPU enter gcvPOWER_SUSPEND when idle detected,
++ otherwise GPU will enter gcvPOWER_IDLE.
++*/
++#ifndef gcdPOWER_SUSNPEND_WHEN_IDLE
++# define gcdPOWER_SUSNPEND_WHEN_IDLE 1
++#endif
++
++/*
++ gcdFPGA_BUILD
++
++ This define enables work arounds for FPGA images.
++*/
++#ifndef gcdFPGA_BUILD
++# define gcdFPGA_BUILD 0
++#endif
++
++/*
++ gcdGPU_TIMEOUT
++
++ This define specified the number of milliseconds the system will wait
++ before it broadcasts the GPU is stuck. In other words, it will define
++ the timeout of any operation that needs to wait for the GPU.
++
++ If the value is 0, no timeout will be checked for.
++*/
++#ifndef gcdGPU_TIMEOUT
++#if gcdFPGA_BUILD
++# define gcdGPU_TIMEOUT 0
++# else
++# define gcdGPU_TIMEOUT 20000
++# endif
++#endif
++
++/*
++ gcdGPU_ADVANCETIMER
++
++ it is advance timer.
++*/
++#ifndef gcdGPU_ADVANCETIMER
++# define gcdGPU_ADVANCETIMER 250
++#endif
++
++/*
++ gcdSTATIC_LINK
++
++ This define disalbes static linking;
++*/
++#ifndef gcdSTATIC_LINK
++# define gcdSTATIC_LINK 0
++#endif
++
++/*
++ gcdUSE_NEW_HEAP
++
++ Setting this define to 1 enables new heap.
++*/
++#ifndef gcdUSE_NEW_HEAP
++# define gcdUSE_NEW_HEAP 0
++#endif
++
++/*
++ gcdCMD_NO_2D_CONTEXT
++
++ This define enables no-context 2D command buffer.
++*/
++#ifndef gcdCMD_NO_2D_CONTEXT
++# define gcdCMD_NO_2D_CONTEXT 1
++#endif
++
++/*
++ gcdENABLE_BANK_ALIGNMENT
++
++ When enabled, video memory is allocated bank aligned. The vendor can modify
++ _GetSurfaceBankAlignment() and gcoSURF_GetBankOffsetBytes() to define how
++ different types of allocations are bank and channel aligned.
++ When disabled (default), no bank alignment is done.
++*/
++#ifndef gcdENABLE_BANK_ALIGNMENT
++# define gcdENABLE_BANK_ALIGNMENT 0
++#endif
++
++/*
++ gcdBANK_BIT_START
++
++ Specifies the start bit of the bank (inclusive).
++*/
++#ifndef gcdBANK_BIT_START
++# define gcdBANK_BIT_START 12
++#endif
++
++/*
++ gcdBANK_BIT_END
++
++ Specifies the end bit of the bank (inclusive).
++*/
++#ifndef gcdBANK_BIT_END
++# define gcdBANK_BIT_END 14
++#endif
++
++/*
++ gcdBANK_CHANNEL_BIT
++
++ When set, video memory when allocated bank aligned is allocated such that
++ render and depth buffer addresses alternate on the channel bit specified.
++ This option has an effect only when gcdENABLE_BANK_ALIGNMENT is enabled.
++ When disabled (default), no alteration is done.
++*/
++#ifndef gcdBANK_CHANNEL_BIT
++# define gcdBANK_CHANNEL_BIT 7
++#endif
++
++/*
++ gcdDYNAMIC_SPEED
++
++ When non-zero, it informs the kernel driver to use the speed throttling
++ broadcasting functions to inform the system the GPU should be spet up or
++ slowed down. It will send a broadcast for slowdown each "interval"
++ specified by this define in milliseconds
++ (gckOS_BroadcastCalibrateSpeed).
++*/
++#ifndef gcdDYNAMIC_SPEED
++# define gcdDYNAMIC_SPEED 2000
++#endif
++
++/*
++ gcdDYNAMIC_EVENT_THRESHOLD
++
++ When non-zero, it specifies the maximum number of available events at
++ which the kernel driver will issue a broadcast to speed up the GPU
++ (gckOS_BroadcastHurry).
++*/
++#ifndef gcdDYNAMIC_EVENT_THRESHOLD
++# define gcdDYNAMIC_EVENT_THRESHOLD 5
++#endif
++
++/*
++ gcdENABLE_PROFILING
++
++ Enable profiling macros.
++*/
++#ifndef gcdENABLE_PROFILING
++# define gcdENABLE_PROFILING 0
++#endif
++
++/*
++ gcdENABLE_128B_MERGE
++
++ Enable 128B merge for the BUS control.
++*/
++#ifndef gcdENABLE_128B_MERGE
++# define gcdENABLE_128B_MERGE 0
++#endif
++
++/*
++ gcdFRAME_DB
++
++ When non-zero, it specified the number of frames inside the frame
++ database. The frame DB will collect per-frame timestamps and hardware
++ counters.
++*/
++#ifndef gcdFRAME_DB
++# define gcdFRAME_DB 0
++# define gcdFRAME_DB_RESET 0
++# define gcdFRAME_DB_NAME "/var/log/frameDB.log"
++#endif
++
++/*
++ gcdENABLE_VG
++ enable the 2D openVG
++*/
++
++#ifndef gcdENABLE_VG
++# define gcdENABLE_VG 0
++#endif
++
++/*
++ gcdDYNAMIC_MAP_RESERVED_MEMORY
++
++ When gcvPOOL_SYSTEM is constructed from RESERVED memory,
++ driver can map the whole reserved memory to kernel space
++ at the beginning, or just map a piece of memory when need
++ to access.
++
++ Notice:
++ - It's only for the 2D openVG. For other cores, there is
++ _NO_ need to map reserved memory to kernel.
++ - It's meaningless when memory is allocated by
++ gckOS_AllocateContiguous, in that case, memory is always
++ mapped by system when allocated.
++*/
++#ifndef gcdDYNAMIC_MAP_RESERVED_MEMORY
++# define gcdDYNAMIC_MAP_RESERVED_MEMORY 1
++#endif
++
++/*
++ gcdPAGED_MEMORY_CACHEABLE
++
++ When non-zero, paged memory will be cacheable.
++
++ Normally, driver will detemines whether a video memory
++ is cacheable or not. When cacheable is not neccessary,
++ it will be writecombine.
++
++ This option is only for those SOC which can't enable
++ writecombine without enabling cacheable.
++*/
++
++#ifndef gcdPAGED_MEMORY_CACHEABLE
++# define gcdPAGED_MEMORY_CACHEABLE 0
++#endif
++
++/*
++ gcdNONPAGED_MEMORY_CACHEABLE
++
++ When non-zero, non paged memory will be cacheable.
++*/
++
++#ifndef gcdNONPAGED_MEMORY_CACHEABLE
++# define gcdNONPAGED_MEMORY_CACHEABLE 0
++#endif
++
++/*
++ gcdNONPAGED_MEMORY_BUFFERABLE
++
++ When non-zero, non paged memory will be bufferable.
++ gcdNONPAGED_MEMORY_BUFFERABLE and gcdNONPAGED_MEMORY_CACHEABLE
++ can't be set 1 at same time
++*/
++
++#ifndef gcdNONPAGED_MEMORY_BUFFERABLE
++# define gcdNONPAGED_MEMORY_BUFFERABLE 1
++#endif
++
++/*
++ gcdENABLE_INFINITE_SPEED_HW
++ enable the Infinte HW , this is for 2D openVG
++*/
++
++#ifndef gcdENABLE_INFINITE_SPEED_HW
++# define gcdENABLE_INFINITE_SPEED_HW 0
++#endif
++
++/*
++ gcdENABLE_TS_DOUBLE_BUFFER
++ enable the TS double buffer, this is for 2D openVG
++*/
++
++#ifndef gcdENABLE_TS_DOUBLE_BUFFER
++# define gcdENABLE_TS_DOUBLE_BUFFER 1
++#endif
++
++/*
++ gcd6000_SUPPORT
++
++ Temporary define to enable/disable 6000 support.
++ */
++#ifndef gcd6000_SUPPORT
++# define gcd6000_SUPPORT 0
++#endif
++
++/*
++ gcdPOWEROFF_TIMEOUT
++
++ When non-zero, GPU will power off automatically from
++ idle state, and gcdPOWEROFF_TIMEOUT is also the default
++ timeout in milliseconds.
++ */
++
++#ifndef gcdPOWEROFF_TIMEOUT
++# define gcdPOWEROFF_TIMEOUT 300
++#endif
++
++/*
++ gcdUSE_VIDMEM_PER_PID
++*/
++#ifndef gcdUSE_VIDMEM_PER_PID
++# define gcdUSE_VIDMEM_PER_PID 0
++#endif
++
++/*
++ QNX_SINGLE_THREADED_DEBUGGING
++*/
++#ifndef QNX_SINGLE_THREADED_DEBUGGING
++# define QNX_SINGLE_THREADED_DEBUGGING 0
++#endif
++
++/*
++ gcdENABLE_RECOVERY
++
++ This define enables the recovery code.
++*/
++#ifndef gcdENABLE_RECOVERY
++# define gcdENABLE_RECOVERY 1
++#endif
++
++/*
++ gcdRENDER_THREADS
++
++ Number of render threads. Make it zero, and there will be no render
++ threads.
++*/
++#ifndef gcdRENDER_THREADS
++# define gcdRENDER_THREADS 0
++#endif
++
++/*
++ gcdSMP
++
++ This define enables SMP support.
++
++ Currently, it only works on Linux/Android,
++ Kbuild will config it according to whether
++ CONFIG_SMP is set.
++
++*/
++#ifndef gcdSMP
++# define gcdSMP 0
++#endif
++
++/*
++ gcdSUPPORT_SWAP_RECTANGLE
++
++ Support swap with a specific rectangle.
++
++ Set the rectangle with eglSetSwapRectangleANDROID api.
++*/
++#ifndef gcdSUPPORT_SWAP_RECTANGLE
++# define gcdSUPPORT_SWAP_RECTANGLE 0
++#endif
++
++/*
++ gcdGPU_LINEAR_BUFFER_ENABLED
++
++ Use linear buffer for GPU apps so HWC can do 2D composition.
++*/
++#ifndef gcdGPU_LINEAR_BUFFER_ENABLED
++# define gcdGPU_LINEAR_BUFFER_ENABLED 1
++#endif
++
++/*
++ gcdENABLE_RENDER_INTO_WINDOW
++
++ Enable Render-Into-Window (ie, No-Resolve) feature on android.
++ NOTE that even if enabled, it still depends on hardware feature and
++ android application behavior. When hardware feature or application
++ behavior can not support render into window mode, it will fail back
++ to normal mode.
++ When Render-Into-Window is finally used, window back buffer of android
++ applications will be allocated matching render target tiling format.
++ Otherwise buffer tiling is decided by the above option
++ 'gcdGPU_LINEAR_BUFFER_ENABLED'.
++*/
++#ifndef gcdENABLE_RENDER_INTO_WINDOW
++# define gcdENABLE_RENDER_INTO_WINDOW 1
++#endif
++
++/*
++ gcdSHARED_RESOLVE_BUFFER_ENABLED
++
++ Use shared resolve buffer for all app buffers.
++*/
++#ifndef gcdSHARED_RESOLVE_BUFFER_ENABLED
++# define gcdSHARED_RESOLVE_BUFFER_ENABLED 0
++#endif
++
++/*
++ gcdUSE_TRIANGLE_STRIP_PATCH
++ */
++#ifndef gcdUSE_TRIANGLE_STRIP_PATCH
++# define gcdUSE_TRIANGLE_STRIP_PATCH 1
++#endif
++
++/*
++ gcdENABLE_OUTER_CACHE_PATCH
++
++ Enable the outer cache patch.
++*/
++#ifndef gcdENABLE_OUTER_CACHE_PATCH
++# define gcdENABLE_OUTER_CACHE_PATCH 0
++#endif
++
++#ifndef gcdANDROID_UNALIGNED_LINEAR_COMPOSITION_ADJUST
++# ifdef ANDROID
++# define gcdANDROID_UNALIGNED_LINEAR_COMPOSITION_ADJUST 1
++# else
++# define gcdANDROID_UNALIGNED_LINEAR_COMPOSITION_ADJUST 0
++# endif
++#endif
++
++#ifndef gcdENABLE_PE_DITHER_FIX
++# define gcdENABLE_PE_DITHER_FIX 1
++#endif
++
++#ifndef gcdSHARED_PAGETABLE
++# define gcdSHARED_PAGETABLE 1
++#endif
++#ifndef gcdUSE_PVR
++# define gcdUSE_PVR 1
++#endif
++
++/*
++ gcdSMALL_BLOCK_SIZE
++
++ When non-zero, a part of VIDMEM will be reserved for requests
++ whose requesting size is less than gcdSMALL_BLOCK_SIZE.
++
++ For Linux, it's the size of a page. If this requeset fallbacks
++ to gcvPOOL_CONTIGUOUS or gcvPOOL_VIRTUAL, memory will be wasted
++ because they allocate a page at least.
++ */
++#ifndef gcdSMALL_BLOCK_SIZE
++# define gcdSMALL_BLOCK_SIZE 4096
++# define gcdRATIO_FOR_SMALL_MEMORY 32
++#endif
++
++/*
++ gcdCONTIGUOUS_SIZE_LIMIT
++ When non-zero, size of video node from gcvPOOL_CONTIGUOUS is
++ limited by gcdCONTIGUOUS_SIZE_LIMIT.
++ */
++#ifndef gcdCONTIGUOUS_SIZE_LIMIT
++# define gcdCONTIGUOUS_SIZE_LIMIT 0
++#endif
++
++#ifndef gcdDISALBE_EARLY_EARLY_Z
++# define gcdDISALBE_EARLY_EARLY_Z 1
++#endif
++
++#ifndef gcdSHADER_SRC_BY_MACHINECODE
++# define gcdSHADER_SRC_BY_MACHINECODE 1
++#endif
++
++/*
++ gcdLINK_QUEUE_SIZE
++
++ When non-zero, driver maintains a queue to record information of
++ latest lined context buffer and command buffer. Data in this queue
++ is be used to debug.
++*/
++#ifndef gcdLINK_QUEUE_SIZE
++# define gcdLINK_QUEUE_SIZE 0
++#endif
++
++/* gcdALPHA_KILL_IN_SHADER
++ *
++ * Enable alpha kill inside the shader. This will be set automatically by the
++ * HAL if certain states match a criteria.
++ */
++#ifndef gcdALPHA_KILL_IN_SHADER
++# define gcdALPHA_KILL_IN_SHADER 1
++#endif
++
++/* gcdHIGH_PRECISION_DELAY_ENABLE
++ *
++ * Enable high precision schedule delay with 1ms unit. otherwise schedule delay up to 10ms.
++ * Browser app performance will have obvious drop without this enablement
++ */
++#ifndef gcdHIGH_PRECISION_DELAY_ENABLE
++# define gcdHIGH_PRECISION_DELAY_ENABLE 1
++#endif
++
++#ifndef gcdUSE_WCLIP_PATCH
++# define gcdUSE_WCLIP_PATCH 1
++#endif
++
++#ifndef gcdHZ_L2_DISALBE
++# define gcdHZ_L2_DISALBE 1
++#endif
++
++#ifndef gcdBUGFIX15_DISABLE
++# define gcdBUGFIX15_DISABLE 1
++#endif
++
++#ifndef gcdDISABLE_HZ_FAST_CLEAR
++# define gcdDISABLE_HZ_FAST_CLEAR 1
++#endif
++
++#ifndef gcdUSE_NPOT_PATCH
++#define gcdUSE_NPOT_PATCH 1
++#endif
++
++#ifndef gcdSYNC
++# define gcdSYNC 1
++#endif
++
++#ifndef gcdENABLE_SPECIAL_HINT3
++# define gcdENABLE_SPECIAL_HINT3 1
++#endif
++
++#if defined(ANDROID)
++#ifndef gcdPRE_ROTATION
++# define gcdPRE_ROTATION 1
++#endif
++#endif
++
++/*
++ gcdDVFS
++
++ When non-zero, software will make use of dynamic voltage and
++ frequency feature.
++ */
++#ifndef gcdDVFS
++# define gcdDVFS 0
++# define gcdDVFS_ANAYLSE_WINDOW 4
++# define gcdDVFS_POLLING_TIME (gcdDVFS_ANAYLSE_WINDOW * 4)
++#endif
++
++/*
++ gcdANDROID_NATIVE_FENCE_SYNC
++
++ Enable android native fence sync. It is introduced since jellybean-4.2.
++ Depends on linux kernel option: CONFIG_SYNC.
++
++ 0: Disabled
++ 1: Build framework for native fence sync feature, and EGL extension
++ 2: Enable async swap buffers for client
++ * Native fence sync for client 'queueBuffer' in EGL, which is
++ 'acquireFenceFd' for layer in compositor side.
++ 3. Enable async hwcomposer composition.
++ * 'releaseFenceFd' for layer in compositor side, which is native
++ fence sync when client 'dequeueBuffer'
++ * Native fence sync for compositor 'queueBuffer' in EGL, which is
++ 'acquireFenceFd' for framebuffer target for DC
++ */
++#ifndef gcdANDROID_NATIVE_FENCE_SYNC
++# define gcdANDROID_NATIVE_FENCE_SYNC 0
++#endif
++
++#ifndef gcdFORCE_MIPMAP
++# define gcdFORCE_MIPMAP 0
++#endif
++
++/*
++ gcdFORCE_GAL_LOAD_TWICE
++
++ When non-zero, each thread except the main one will load libGAL.so twice to avoid potential segmetantion fault when app using dlopen/dlclose.
++ If threads exit arbitrarily, libGAL.so may not unload until the process quit.
++ */
++#ifndef gcdFORCE_GAL_LOAD_TWICE
++# define gcdFORCE_GAL_LOAD_TWICE 0
++#endif
++
++#endif /* __gc_hal_options_h_ */
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_profiler.h linux-openelec/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_profiler.h
+--- linux-3.14.36/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_profiler.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_profiler.h 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,584 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_profiler_h_
++#define __gc_hal_profiler_h_
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++#define GLVERTEX_OBJECT 10
++#define GLVERTEX_OBJECT_BYTES 11
++
++#define GLINDEX_OBJECT 20
++#define GLINDEX_OBJECT_BYTES 21
++
++#define GLTEXTURE_OBJECT 30
++#define GLTEXTURE_OBJECT_BYTES 31
++
++#if VIVANTE_PROFILER
++#define gcmPROFILE_GC(Enum, Value) gcoPROFILER_Count(gcvNULL, Enum, Value)
++#else
++#define gcmPROFILE_GC(Enum, Value) do { } while (gcvFALSE)
++#endif
++
++#ifndef gcdNEW_PROFILER_FILE
++#define gcdNEW_PROFILER_FILE 1
++#endif
++
++#define ES11_CALLS 151
++#define ES11_DRAWCALLS (ES11_CALLS + 1)
++#define ES11_STATECHANGECALLS (ES11_DRAWCALLS + 1)
++#define ES11_POINTCOUNT (ES11_STATECHANGECALLS + 1)
++#define ES11_LINECOUNT (ES11_POINTCOUNT + 1)
++#define ES11_TRIANGLECOUNT (ES11_LINECOUNT + 1)
++
++#define ES20_CALLS 159
++#define ES20_DRAWCALLS (ES20_CALLS + 1)
++#define ES20_STATECHANGECALLS (ES20_DRAWCALLS + 1)
++#define ES20_POINTCOUNT (ES20_STATECHANGECALLS + 1)
++#define ES20_LINECOUNT (ES20_POINTCOUNT + 1)
++#define ES20_TRIANGLECOUNT (ES20_LINECOUNT + 1)
++
++#define VG11_CALLS 88
++#define VG11_DRAWCALLS (VG11_CALLS + 1)
++#define VG11_STATECHANGECALLS (VG11_DRAWCALLS + 1)
++#define VG11_FILLCOUNT (VG11_STATECHANGECALLS + 1)
++#define VG11_STROKECOUNT (VG11_FILLCOUNT + 1)
++/* End of Driver API ID Definitions. */
++
++/* HAL & MISC IDs. */
++#define HAL_VERTBUFNEWBYTEALLOC 1
++#define HAL_VERTBUFTOTALBYTEALLOC (HAL_VERTBUFNEWBYTEALLOC + 1)
++#define HAL_VERTBUFNEWOBJALLOC (HAL_VERTBUFTOTALBYTEALLOC + 1)
++#define HAL_VERTBUFTOTALOBJALLOC (HAL_VERTBUFNEWOBJALLOC + 1)
++#define HAL_INDBUFNEWBYTEALLOC (HAL_VERTBUFTOTALOBJALLOC + 1)
++#define HAL_INDBUFTOTALBYTEALLOC (HAL_INDBUFNEWBYTEALLOC + 1)
++#define HAL_INDBUFNEWOBJALLOC (HAL_INDBUFTOTALBYTEALLOC + 1)
++#define HAL_INDBUFTOTALOBJALLOC (HAL_INDBUFNEWOBJALLOC + 1)
++#define HAL_TEXBUFNEWBYTEALLOC (HAL_INDBUFTOTALOBJALLOC + 1)
++#define HAL_TEXBUFTOTALBYTEALLOC (HAL_TEXBUFNEWBYTEALLOC + 1)
++#define HAL_TEXBUFNEWOBJALLOC (HAL_TEXBUFTOTALBYTEALLOC + 1)
++#define HAL_TEXBUFTOTALOBJALLOC (HAL_TEXBUFNEWOBJALLOC + 1)
++
++#define GPU_CYCLES 1
++#define GPU_READ64BYTE (GPU_CYCLES + 1)
++#define GPU_WRITE64BYTE (GPU_READ64BYTE + 1)
++#define GPU_TOTALCYCLES (GPU_WRITE64BYTE + 1)
++#define GPU_IDLECYCLES (GPU_TOTALCYCLES + 1)
++
++#define VS_INSTCOUNT 1
++#define VS_BRANCHINSTCOUNT (VS_INSTCOUNT + 1)
++#define VS_TEXLDINSTCOUNT (VS_BRANCHINSTCOUNT + 1)
++#define VS_RENDEREDVERTCOUNT (VS_TEXLDINSTCOUNT + 1)
++#define VS_SOURCE (VS_RENDEREDVERTCOUNT + 1)
++
++#define PS_INSTCOUNT 1
++#define PS_BRANCHINSTCOUNT (PS_INSTCOUNT + 1)
++#define PS_TEXLDINSTCOUNT (PS_BRANCHINSTCOUNT + 1)
++#define PS_RENDEREDPIXCOUNT (PS_TEXLDINSTCOUNT + 1)
++#define PS_SOURCE (PS_RENDEREDPIXCOUNT + 1)
++
++#define PA_INVERTCOUNT 1
++#define PA_INPRIMCOUNT (PA_INVERTCOUNT + 1)
++#define PA_OUTPRIMCOUNT (PA_INPRIMCOUNT + 1)
++#define PA_DEPTHCLIPCOUNT (PA_OUTPRIMCOUNT + 1)
++#define PA_TRIVIALREJCOUNT (PA_DEPTHCLIPCOUNT + 1)
++#define PA_CULLCOUNT (PA_TRIVIALREJCOUNT + 1)
++
++#define SE_TRIANGLECOUNT 1
++#define SE_LINECOUNT (SE_TRIANGLECOUNT + 1)
++
++#define RA_VALIDPIXCOUNT 1
++#define RA_TOTALQUADCOUNT (RA_VALIDPIXCOUNT + 1)
++#define RA_VALIDQUADCOUNTEZ (RA_TOTALQUADCOUNT + 1)
++#define RA_TOTALPRIMCOUNT (RA_VALIDQUADCOUNTEZ + 1)
++#define RA_PIPECACHEMISSCOUNT (RA_TOTALPRIMCOUNT + 1)
++#define RA_PREFCACHEMISSCOUNT (RA_PIPECACHEMISSCOUNT + 1)
++#define RA_EEZCULLCOUNT (RA_PREFCACHEMISSCOUNT + 1)
++
++#define TX_TOTBILINEARREQ 1
++#define TX_TOTTRILINEARREQ (TX_TOTBILINEARREQ + 1)
++#define TX_TOTDISCARDTEXREQ (TX_TOTTRILINEARREQ + 1)
++#define TX_TOTTEXREQ (TX_TOTDISCARDTEXREQ + 1)
++#define TX_MEMREADCOUNT (TX_TOTTEXREQ + 1)
++#define TX_MEMREADIN8BCOUNT (TX_MEMREADCOUNT + 1)
++#define TX_CACHEMISSCOUNT (TX_MEMREADIN8BCOUNT + 1)
++#define TX_CACHEHITTEXELCOUNT (TX_CACHEMISSCOUNT + 1)
++#define TX_CACHEMISSTEXELCOUNT (TX_CACHEHITTEXELCOUNT + 1)
++
++#define PE_KILLEDBYCOLOR 1
++#define PE_KILLEDBYDEPTH (PE_KILLEDBYCOLOR + 1)
++#define PE_DRAWNBYCOLOR (PE_KILLEDBYDEPTH + 1)
++#define PE_DRAWNBYDEPTH (PE_DRAWNBYCOLOR + 1)
++
++#define MC_READREQ8BPIPE 1
++#define MC_READREQ8BIP (MC_READREQ8BPIPE + 1)
++#define MC_WRITEREQ8BPIPE (MC_READREQ8BIP + 1)
++
++#define AXI_READREQSTALLED 1
++#define AXI_WRITEREQSTALLED (AXI_READREQSTALLED + 1)
++#define AXI_WRITEDATASTALLED (AXI_WRITEREQSTALLED + 1)
++
++#define PVS_INSTRCOUNT 1
++#define PVS_ALUINSTRCOUNT (PVS_INSTRCOUNT + 1)
++#define PVS_TEXINSTRCOUNT (PVS_ALUINSTRCOUNT + 1)
++#define PVS_ATTRIBCOUNT (PVS_TEXINSTRCOUNT + 1)
++#define PVS_UNIFORMCOUNT (PVS_ATTRIBCOUNT + 1)
++#define PVS_FUNCTIONCOUNT (PVS_UNIFORMCOUNT + 1)
++#define PVS_SOURCE (PVS_FUNCTIONCOUNT + 1)
++
++#define PPS_INSTRCOUNT 1
++#define PPS_ALUINSTRCOUNT (PPS_INSTRCOUNT + 1)
++#define PPS_TEXINSTRCOUNT (PPS_ALUINSTRCOUNT + 1)
++#define PPS_ATTRIBCOUNT (PPS_TEXINSTRCOUNT + 1)
++#define PPS_UNIFORMCOUNT (PPS_ATTRIBCOUNT + 1)
++#define PPS_FUNCTIONCOUNT (PPS_UNIFORMCOUNT + 1)
++#define PPS_SOURCE (PPS_FUNCTIONCOUNT + 1)
++/* End of MISC Counter IDs. */
++
++#ifdef gcdNEW_PROFILER_FILE
++
++/* Category Constants. */
++#define VPHEADER 0x010000
++#define VPG_INFO 0x020000
++#define VPG_TIME 0x030000
++#define VPG_MEM 0x040000
++#define VPG_ES11 0x050000
++#define VPG_ES20 0x060000
++#define VPG_VG11 0x070000
++#define VPG_HAL 0x080000
++#define VPG_HW 0x090000
++#define VPG_GPU 0x0a0000
++#define VPG_VS 0x0b0000
++#define VPG_PS 0x0c0000
++#define VPG_PA 0x0d0000
++#define VPG_SETUP 0x0e0000
++#define VPG_RA 0x0f0000
++#define VPG_TX 0x100000
++#define VPG_PE 0x110000
++#define VPG_MC 0x120000
++#define VPG_AXI 0x130000
++#define VPG_PROG 0x140000
++#define VPG_PVS 0x150000
++#define VPG_PPS 0x160000
++#define VPG_ES11_TIME 0x170000
++#define VPG_ES20_TIME 0x180000
++#define VPG_FRAME 0x190000
++#define VPG_ES11_DRAW 0x200000
++#define VPG_ES20_DRAW 0x210000
++#define VPG_END 0xff0000
++
++/* Info. */
++#define VPC_INFOCOMPANY (VPG_INFO + 1)
++#define VPC_INFOVERSION (VPC_INFOCOMPANY + 1)
++#define VPC_INFORENDERER (VPC_INFOVERSION + 1)
++#define VPC_INFOREVISION (VPC_INFORENDERER + 1)
++#define VPC_INFODRIVER (VPC_INFOREVISION + 1)
++#define VPC_INFODRIVERMODE (VPC_INFODRIVER + 1)
++#define VPC_INFOSCREENSIZE (VPC_INFODRIVERMODE + 1)
++
++/* Counter Constants. */
++#define VPC_ELAPSETIME (VPG_TIME + 1)
++#define VPC_CPUTIME (VPC_ELAPSETIME + 1)
++
++#define VPC_MEMMAXRES (VPG_MEM + 1)
++#define VPC_MEMSHARED (VPC_MEMMAXRES + 1)
++#define VPC_MEMUNSHAREDDATA (VPC_MEMSHARED + 1)
++#define VPC_MEMUNSHAREDSTACK (VPC_MEMUNSHAREDDATA + 1)
++
++/* OpenGL ES11 Statics Counter IDs. */
++#define VPC_ES11CALLS (VPG_ES11 + ES11_CALLS)
++#define VPC_ES11DRAWCALLS (VPG_ES11 + ES11_DRAWCALLS)
++#define VPC_ES11STATECHANGECALLS (VPG_ES11 + ES11_STATECHANGECALLS)
++#define VPC_ES11POINTCOUNT (VPG_ES11 + ES11_POINTCOUNT)
++#define VPC_ES11LINECOUNT (VPG_ES11 + ES11_LINECOUNT)
++#define VPC_ES11TRIANGLECOUNT (VPG_ES11 + ES11_TRIANGLECOUNT)
++
++/* OpenGL ES20 Statistics Counter IDs. */
++#define VPC_ES20CALLS (VPG_ES20 + ES20_CALLS)
++#define VPC_ES20DRAWCALLS (VPG_ES20 + ES20_DRAWCALLS)
++#define VPC_ES20STATECHANGECALLS (VPG_ES20 + ES20_STATECHANGECALLS)
++#define VPC_ES20POINTCOUNT (VPG_ES20 + ES20_POINTCOUNT)
++#define VPC_ES20LINECOUNT (VPG_ES20 + ES20_LINECOUNT)
++#define VPC_ES20TRIANGLECOUNT (VPG_ES20 + ES20_TRIANGLECOUNT)
++
++/* OpenVG Statistics Counter IDs. */
++#define VPC_VG11CALLS (VPG_VG11 + VG11_CALLS)
++#define VPC_VG11DRAWCALLS (VPG_VG11 + VG11_DRAWCALLS)
++#define VPC_VG11STATECHANGECALLS (VPG_VG11 + VG11_STATECHANGECALLS)
++#define VPC_VG11FILLCOUNT (VPG_VG11 + VG11_FILLCOUNT)
++#define VPC_VG11STROKECOUNT (VPG_VG11 + VG11_STROKECOUNT)
++
++/* HAL Counters. */
++#define VPC_HALVERTBUFNEWBYTEALLOC (VPG_HAL + HAL_VERTBUFNEWBYTEALLOC)
++#define VPC_HALVERTBUFTOTALBYTEALLOC (VPG_HAL + HAL_VERTBUFTOTALBYTEALLOC)
++#define VPC_HALVERTBUFNEWOBJALLOC (VPG_HAL + HAL_VERTBUFNEWOBJALLOC)
++#define VPC_HALVERTBUFTOTALOBJALLOC (VPG_HAL + HAL_VERTBUFTOTALOBJALLOC)
++#define VPC_HALINDBUFNEWBYTEALLOC (VPG_HAL + HAL_INDBUFNEWBYTEALLOC)
++#define VPC_HALINDBUFTOTALBYTEALLOC (VPG_HAL + HAL_INDBUFTOTALBYTEALLOC)
++#define VPC_HALINDBUFNEWOBJALLOC (VPG_HAL + HAL_INDBUFNEWOBJALLOC)
++#define VPC_HALINDBUFTOTALOBJALLOC (VPG_HAL + HAL_INDBUFTOTALOBJALLOC)
++#define VPC_HALTEXBUFNEWBYTEALLOC (VPG_HAL + HAL_TEXBUFNEWBYTEALLOC)
++#define VPC_HALTEXBUFTOTALBYTEALLOC (VPG_HAL + HAL_TEXBUFTOTALBYTEALLOC)
++#define VPC_HALTEXBUFNEWOBJALLOC (VPG_HAL + HAL_TEXBUFNEWOBJALLOC)
++#define VPC_HALTEXBUFTOTALOBJALLOC (VPG_HAL + HAL_TEXBUFTOTALOBJALLOC)
++
++/* HW: GPU Counters. */
++#define VPC_GPUCYCLES (VPG_GPU + GPU_CYCLES)
++#define VPC_GPUREAD64BYTE (VPG_GPU + GPU_READ64BYTE)
++#define VPC_GPUWRITE64BYTE (VPG_GPU + GPU_WRITE64BYTE)
++#define VPC_GPUTOTALCYCLES (VPG_GPU + GPU_TOTALCYCLES)
++#define VPC_GPUIDLECYCLES (VPG_GPU + GPU_IDLECYCLES)
++
++/* HW: Shader Counters. */
++#define VPC_VSINSTCOUNT (VPG_VS + VS_INSTCOUNT)
++#define VPC_VSBRANCHINSTCOUNT (VPG_VS + VS_BRANCHINSTCOUNT)
++#define VPC_VSTEXLDINSTCOUNT (VPG_VS + VS_TEXLDINSTCOUNT)
++#define VPC_VSRENDEREDVERTCOUNT (VPG_VS + VS_RENDEREDVERTCOUNT)
++/* HW: PS Count. */
++#define VPC_PSINSTCOUNT (VPG_PS + PS_INSTCOUNT)
++#define VPC_PSBRANCHINSTCOUNT (VPG_PS + PS_BRANCHINSTCOUNT)
++#define VPC_PSTEXLDINSTCOUNT (VPG_PS + PS_TEXLDINSTCOUNT)
++#define VPC_PSRENDEREDPIXCOUNT (VPG_PS + PS_RENDEREDPIXCOUNT)
++
++
++/* HW: PA Counters. */
++#define VPC_PAINVERTCOUNT (VPG_PA + PA_INVERTCOUNT)
++#define VPC_PAINPRIMCOUNT (VPG_PA + PA_INPRIMCOUNT)
++#define VPC_PAOUTPRIMCOUNT (VPG_PA + PA_OUTPRIMCOUNT)
++#define VPC_PADEPTHCLIPCOUNT (VPG_PA + PA_DEPTHCLIPCOUNT)
++#define VPC_PATRIVIALREJCOUNT (VPG_PA + PA_TRIVIALREJCOUNT)
++#define VPC_PACULLCOUNT (VPG_PA + PA_CULLCOUNT)
++
++/* HW: Setup Counters. */
++#define VPC_SETRIANGLECOUNT (VPG_SETUP + SE_TRIANGLECOUNT)
++#define VPC_SELINECOUNT (VPG_SETUP + SE_LINECOUNT)
++
++/* HW: RA Counters. */
++#define VPC_RAVALIDPIXCOUNT (VPG_RA + RA_VALIDPIXCOUNT)
++#define VPC_RATOTALQUADCOUNT (VPG_RA + RA_TOTALQUADCOUNT)
++#define VPC_RAVALIDQUADCOUNTEZ (VPG_RA + RA_VALIDQUADCOUNTEZ)
++#define VPC_RATOTALPRIMCOUNT (VPG_RA + RA_TOTALPRIMCOUNT)
++#define VPC_RAPIPECACHEMISSCOUNT (VPG_RA + RA_PIPECACHEMISSCOUNT)
++#define VPC_RAPREFCACHEMISSCOUNT (VPG_RA + RA_PREFCACHEMISSCOUNT)
++#define VPC_RAEEZCULLCOUNT (VPG_RA + RA_EEZCULLCOUNT)
++
++/* HW: TEX Counters. */
++#define VPC_TXTOTBILINEARREQ (VPG_TX + TX_TOTBILINEARREQ)
++#define VPC_TXTOTTRILINEARREQ (VPG_TX + TX_TOTTRILINEARREQ)
++#define VPC_TXTOTDISCARDTEXREQ (VPG_TX + TX_TOTDISCARDTEXREQ)
++#define VPC_TXTOTTEXREQ (VPG_TX + TX_TOTTEXREQ)
++#define VPC_TXMEMREADCOUNT (VPG_TX + TX_MEMREADCOUNT)
++#define VPC_TXMEMREADIN8BCOUNT (VPG_TX + TX_MEMREADIN8BCOUNT)
++#define VPC_TXCACHEMISSCOUNT (VPG_TX + TX_CACHEMISSCOUNT)
++#define VPC_TXCACHEHITTEXELCOUNT (VPG_TX + TX_CACHEHITTEXELCOUNT)
++#define VPC_TXCACHEMISSTEXELCOUNT (VPG_TX + TX_CACHEMISSTEXELCOUNT)
++
++/* HW: PE Counters. */
++#define VPC_PEKILLEDBYCOLOR (VPG_PE + PE_KILLEDBYCOLOR)
++#define VPC_PEKILLEDBYDEPTH (VPG_PE + PE_KILLEDBYDEPTH)
++#define VPC_PEDRAWNBYCOLOR (VPG_PE + PE_DRAWNBYCOLOR)
++#define VPC_PEDRAWNBYDEPTH (VPG_PE + PE_DRAWNBYDEPTH)
++
++/* HW: MC Counters. */
++#define VPC_MCREADREQ8BPIPE (VPG_MC + MC_READREQ8BPIPE)
++#define VPC_MCREADREQ8BIP (VPG_MC + MC_READREQ8BIP)
++#define VPC_MCWRITEREQ8BPIPE (VPG_MC + MC_WRITEREQ8BPIPE)
++
++/* HW: AXI Counters. */
++#define VPC_AXIREADREQSTALLED (VPG_AXI + AXI_READREQSTALLED)
++#define VPC_AXIWRITEREQSTALLED (VPG_AXI + AXI_WRITEREQSTALLED)
++#define VPC_AXIWRITEDATASTALLED (VPG_AXI + AXI_WRITEDATASTALLED)
++
++/* PROGRAM: Shader program counters. */
++#define VPC_PVSINSTRCOUNT (VPG_PVS + PVS_INSTRCOUNT)
++#define VPC_PVSALUINSTRCOUNT (VPG_PVS + PVS_ALUINSTRCOUNT)
++#define VPC_PVSTEXINSTRCOUNT (VPG_PVS + PVS_TEXINSTRCOUNT)
++#define VPC_PVSATTRIBCOUNT (VPG_PVS + PVS_ATTRIBCOUNT)
++#define VPC_PVSUNIFORMCOUNT (VPG_PVS + PVS_UNIFORMCOUNT)
++#define VPC_PVSFUNCTIONCOUNT (VPG_PVS + PVS_FUNCTIONCOUNT)
++#define VPC_PVSSOURCE (VPG_PVS + PVS_SOURCE)
++
++#define VPC_PPSINSTRCOUNT (VPG_PPS + PPS_INSTRCOUNT)
++#define VPC_PPSALUINSTRCOUNT (VPG_PPS + PPS_ALUINSTRCOUNT)
++#define VPC_PPSTEXINSTRCOUNT (VPG_PPS + PPS_TEXINSTRCOUNT)
++#define VPC_PPSATTRIBCOUNT (VPG_PPS + PPS_ATTRIBCOUNT)
++#define VPC_PPSUNIFORMCOUNT (VPG_PPS + PPS_UNIFORMCOUNT)
++#define VPC_PPSFUNCTIONCOUNT (VPG_PPS + PPS_FUNCTIONCOUNT)
++#define VPC_PPSSOURCE (VPG_PPS + PPS_SOURCE)
++
++#define VPC_PROGRAMHANDLE (VPG_PROG + 1)
++
++#define VPG_ES20_DRAW_NO (VPG_ES20_DRAW + 1)
++#define VPG_ES11_DRAW_NO (VPG_ES11_DRAW + 1)
++
++#define VPG_FRAME_USEVBO (VPG_FRAME + 1)
++
++#endif
++
++
++/* HW profile information. */
++typedef struct _gcsPROFILER_COUNTERS
++{
++ /* HW static counters. */
++ gctUINT32 gpuClock;
++ gctUINT32 axiClock;
++ gctUINT32 shaderClock;
++
++ /* HW vairable counters. */
++ gctUINT32 gpuClockStart;
++ gctUINT32 gpuClockEnd;
++
++ /* HW vairable counters. */
++ gctUINT32 gpuCyclesCounter;
++ gctUINT32 gpuTotalCyclesCounter;
++ gctUINT32 gpuIdleCyclesCounter;
++ gctUINT32 gpuTotalRead64BytesPerFrame;
++ gctUINT32 gpuTotalWrite64BytesPerFrame;
++
++ /* PE */
++ gctUINT32 pe_pixel_count_killed_by_color_pipe;
++ gctUINT32 pe_pixel_count_killed_by_depth_pipe;
++ gctUINT32 pe_pixel_count_drawn_by_color_pipe;
++ gctUINT32 pe_pixel_count_drawn_by_depth_pipe;
++
++ /* SH */
++ gctUINT32 ps_inst_counter;
++ gctUINT32 rendered_pixel_counter;
++ gctUINT32 vs_inst_counter;
++ gctUINT32 rendered_vertice_counter;
++ gctUINT32 vtx_branch_inst_counter;
++ gctUINT32 vtx_texld_inst_counter;
++ gctUINT32 pxl_branch_inst_counter;
++ gctUINT32 pxl_texld_inst_counter;
++
++ /* PA */
++ gctUINT32 pa_input_vtx_counter;
++ gctUINT32 pa_input_prim_counter;
++ gctUINT32 pa_output_prim_counter;
++ gctUINT32 pa_depth_clipped_counter;
++ gctUINT32 pa_trivial_rejected_counter;
++ gctUINT32 pa_culled_counter;
++
++ /* SE */
++ gctUINT32 se_culled_triangle_count;
++ gctUINT32 se_culled_lines_count;
++
++ /* RA */
++ gctUINT32 ra_valid_pixel_count;
++ gctUINT32 ra_total_quad_count;
++ gctUINT32 ra_valid_quad_count_after_early_z;
++ gctUINT32 ra_total_primitive_count;
++ gctUINT32 ra_pipe_cache_miss_counter;
++ gctUINT32 ra_prefetch_cache_miss_counter;
++ gctUINT32 ra_eez_culled_counter;
++
++ /* TX */
++ gctUINT32 tx_total_bilinear_requests;
++ gctUINT32 tx_total_trilinear_requests;
++ gctUINT32 tx_total_discarded_texture_requests;
++ gctUINT32 tx_total_texture_requests;
++ gctUINT32 tx_mem_read_count;
++ gctUINT32 tx_mem_read_in_8B_count;
++ gctUINT32 tx_cache_miss_count;
++ gctUINT32 tx_cache_hit_texel_count;
++ gctUINT32 tx_cache_miss_texel_count;
++
++ /* MC */
++ gctUINT32 mc_total_read_req_8B_from_pipeline;
++ gctUINT32 mc_total_read_req_8B_from_IP;
++ gctUINT32 mc_total_write_req_8B_from_pipeline;
++
++ /* HI */
++ gctUINT32 hi_axi_cycles_read_request_stalled;
++ gctUINT32 hi_axi_cycles_write_request_stalled;
++ gctUINT32 hi_axi_cycles_write_data_stalled;
++}
++gcsPROFILER_COUNTERS;
++
++/* HAL profile information. */
++typedef struct _gcsPROFILER
++{
++ gctUINT32 enable;
++ gctBOOL enableHal;
++ gctBOOL enableHW;
++ gctBOOL enableSH;
++ gctBOOL isSyncMode;
++
++ gctBOOL useSocket;
++ gctINT sockFd;
++
++ gctFILE file;
++
++ /* Aggregate Information */
++
++ /* Clock Info */
++ gctUINT64 frameStart;
++ gctUINT64 frameEnd;
++
++ /* Current frame information */
++ gctUINT32 frameNumber;
++ gctUINT64 frameStartTimeusec;
++ gctUINT64 frameEndTimeusec;
++ gctUINT64 frameStartCPUTimeusec;
++ gctUINT64 frameEndCPUTimeusec;
++
++#if PROFILE_HAL_COUNTERS
++ gctUINT32 vertexBufferTotalBytesAlloc;
++ gctUINT32 vertexBufferNewBytesAlloc;
++ int vertexBufferTotalObjectsAlloc;
++ int vertexBufferNewObjectsAlloc;
++
++ gctUINT32 indexBufferTotalBytesAlloc;
++ gctUINT32 indexBufferNewBytesAlloc;
++ int indexBufferTotalObjectsAlloc;
++ int indexBufferNewObjectsAlloc;
++
++ gctUINT32 textureBufferTotalBytesAlloc;
++ gctUINT32 textureBufferNewBytesAlloc;
++ int textureBufferTotalObjectsAlloc;
++ int textureBufferNewObjectsAlloc;
++
++ gctUINT32 numCommits;
++ gctUINT32 drawPointCount;
++ gctUINT32 drawLineCount;
++ gctUINT32 drawTriangleCount;
++ gctUINT32 drawVertexCount;
++ gctUINT32 redundantStateChangeCalls;
++#endif
++
++ gctUINT32 prevVSInstCount;
++ gctUINT32 prevVSBranchInstCount;
++ gctUINT32 prevVSTexInstCount;
++ gctUINT32 prevVSVertexCount;
++ gctUINT32 prevPSInstCount;
++ gctUINT32 prevPSBranchInstCount;
++ gctUINT32 prevPSTexInstCount;
++ gctUINT32 prevPSPixelCount;
++
++ char* psSource;
++ char* vsSource;
++
++}
++gcsPROFILER;
++
++/* Memory profile information. */
++struct _gcsMemProfile
++{
++ /* Memory Usage */
++ gctUINT32 videoMemUsed;
++ gctUINT32 systemMemUsed;
++ gctUINT32 commitBufferSize;
++ gctUINT32 contextBufferCopyBytes;
++};
++
++/* Shader profile information. */
++struct _gcsSHADER_PROFILER
++{
++ gctUINT32 shaderLength;
++ gctUINT32 shaderALUCycles;
++ gctUINT32 shaderTexLoadCycles;
++ gctUINT32 shaderTempRegCount;
++ gctUINT32 shaderSamplerRegCount;
++ gctUINT32 shaderInputRegCount;
++ gctUINT32 shaderOutputRegCount;
++};
++
++/* Initialize the gcsProfiler. */
++gceSTATUS
++gcoPROFILER_Initialize(
++ IN gcoHAL Hal
++ );
++
++/* Destroy the gcProfiler. */
++gceSTATUS
++gcoPROFILER_Destroy(
++ IN gcoHAL Hal
++ );
++
++/* Write data to profiler. */
++gceSTATUS
++gcoPROFILER_Write(
++ IN gcoHAL Hal,
++ IN gctSIZE_T ByteCount,
++ IN gctCONST_POINTER Data
++ );
++
++/* Flush data out. */
++gceSTATUS
++gcoPROFILER_Flush(
++ IN gcoHAL Hal
++ );
++
++/* Call to signal end of frame. */
++gceSTATUS
++gcoPROFILER_EndFrame(
++ IN gcoHAL Hal
++ );
++
++/* Call to signal end of draw. */
++gceSTATUS
++gcoPROFILER_EndDraw(
++ IN gcoHAL Hal,
++ IN gctBOOL FirstDraw
++ );
++
++/* Increase profile counter Enum by Value. */
++gceSTATUS
++gcoPROFILER_Count(
++ IN gcoHAL Hal,
++ IN gctUINT32 Enum,
++ IN gctINT Value
++ );
++
++gceSTATUS
++gcoPROFILER_ShaderSourceFS(
++ IN gcoHAL Hal,
++ IN char* source
++ );
++
++gceSTATUS
++gcoPROFILER_ShaderSourceVS(
++ IN gcoHAL Hal,
++ IN char* source
++ );
++
++/* Profile input vertex shader. */
++gceSTATUS
++gcoPROFILER_ShaderVS(
++ IN gcoHAL Hal,
++ IN gctPOINTER Vs
++ );
++
++/* Profile input fragment shader. */
++gceSTATUS
++gcoPROFILER_ShaderFS(
++ IN gcoHAL Hal,
++ IN gctPOINTER Fs
++ );
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif /* __gc_hal_profiler_h_ */
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_raster.h linux-openelec/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_raster.h
+--- linux-3.14.36/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_raster.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_raster.h 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,1010 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_raster_h_
++#define __gc_hal_raster_h_
++
++#include "gc_hal_enum.h"
++#include "gc_hal_types.h"
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/******************************************************************************\
++****************************** Object Declarations *****************************
++\******************************************************************************/
++
++typedef struct _gcoBRUSH * gcoBRUSH;
++typedef struct _gcoBRUSH_CACHE * gcoBRUSH_CACHE;
++
++/******************************************************************************\
++******************************** gcoBRUSH Object *******************************
++\******************************************************************************/
++
++/* Create a new solid color gcoBRUSH object. */
++gceSTATUS
++gcoBRUSH_ConstructSingleColor(
++ IN gcoHAL Hal,
++ IN gctUINT32 ColorConvert,
++ IN gctUINT32 Color,
++ IN gctUINT64 Mask,
++ gcoBRUSH * Brush
++ );
++
++/* Create a new monochrome gcoBRUSH object. */
++gceSTATUS
++gcoBRUSH_ConstructMonochrome(
++ IN gcoHAL Hal,
++ IN gctUINT32 OriginX,
++ IN gctUINT32 OriginY,
++ IN gctUINT32 ColorConvert,
++ IN gctUINT32 FgColor,
++ IN gctUINT32 BgColor,
++ IN gctUINT64 Bits,
++ IN gctUINT64 Mask,
++ gcoBRUSH * Brush
++ );
++
++/* Create a color gcoBRUSH object. */
++gceSTATUS
++gcoBRUSH_ConstructColor(
++ IN gcoHAL Hal,
++ IN gctUINT32 OriginX,
++ IN gctUINT32 OriginY,
++ IN gctPOINTER Address,
++ IN gceSURF_FORMAT Format,
++ IN gctUINT64 Mask,
++ gcoBRUSH * Brush
++ );
++
++/* Destroy an gcoBRUSH object. */
++gceSTATUS
++gcoBRUSH_Destroy(
++ IN gcoBRUSH Brush
++ );
++
++/******************************************************************************\
++******************************** gcoSURF Object *******************************
++\******************************************************************************/
++
++/* Set cipping rectangle. */
++gceSTATUS
++gcoSURF_SetClipping(
++ IN gcoSURF Surface
++ );
++
++/* Clear one or more rectangular areas. */
++gceSTATUS
++gcoSURF_Clear2D(
++ IN gcoSURF DestSurface,
++ IN gctUINT32 RectCount,
++ IN gcsRECT_PTR DestRect,
++ IN gctUINT32 LoColor,
++ IN gctUINT32 HiColor
++ );
++
++/* Draw one or more Bresenham lines. */
++gceSTATUS
++gcoSURF_Line(
++ IN gcoSURF Surface,
++ IN gctUINT32 LineCount,
++ IN gcsRECT_PTR Position,
++ IN gcoBRUSH Brush,
++ IN gctUINT8 FgRop,
++ IN gctUINT8 BgRop
++ );
++
++/* Generic rectangular blit. */
++gceSTATUS
++gcoSURF_Blit(
++ IN OPTIONAL gcoSURF SrcSurface,
++ IN gcoSURF DestSurface,
++ IN gctUINT32 RectCount,
++ IN OPTIONAL gcsRECT_PTR SrcRect,
++ IN gcsRECT_PTR DestRect,
++ IN OPTIONAL gcoBRUSH Brush,
++ IN gctUINT8 FgRop,
++ IN gctUINT8 BgRop,
++ IN OPTIONAL gceSURF_TRANSPARENCY Transparency,
++ IN OPTIONAL gctUINT32 TransparencyColor,
++ IN OPTIONAL gctPOINTER Mask,
++ IN OPTIONAL gceSURF_MONOPACK MaskPack
++ );
++
++/* Monochrome blit. */
++gceSTATUS
++gcoSURF_MonoBlit(
++ IN gcoSURF DestSurface,
++ IN gctPOINTER Source,
++ IN gceSURF_MONOPACK SourcePack,
++ IN gcsPOINT_PTR SourceSize,
++ IN gcsPOINT_PTR SourceOrigin,
++ IN gcsRECT_PTR DestRect,
++ IN OPTIONAL gcoBRUSH Brush,
++ IN gctUINT8 FgRop,
++ IN gctUINT8 BgRop,
++ IN gctBOOL ColorConvert,
++ IN gctUINT8 MonoTransparency,
++ IN gceSURF_TRANSPARENCY Transparency,
++ IN gctUINT32 FgColor,
++ IN gctUINT32 BgColor
++ );
++
++/* Filter blit. */
++gceSTATUS
++gcoSURF_FilterBlit(
++ IN gcoSURF SrcSurface,
++ IN gcoSURF DestSurface,
++ IN gcsRECT_PTR SrcRect,
++ IN gcsRECT_PTR DestRect,
++ IN gcsRECT_PTR DestSubRect
++ );
++
++/* Enable alpha blending engine in the hardware and disengage the ROP engine. */
++gceSTATUS
++gcoSURF_EnableAlphaBlend(
++ IN gcoSURF Surface,
++ IN gctUINT8 SrcGlobalAlphaValue,
++ IN gctUINT8 DstGlobalAlphaValue,
++ IN gceSURF_PIXEL_ALPHA_MODE SrcAlphaMode,
++ IN gceSURF_PIXEL_ALPHA_MODE DstAlphaMode,
++ IN gceSURF_GLOBAL_ALPHA_MODE SrcGlobalAlphaMode,
++ IN gceSURF_GLOBAL_ALPHA_MODE DstGlobalAlphaMode,
++ IN gceSURF_BLEND_FACTOR_MODE SrcFactorMode,
++ IN gceSURF_BLEND_FACTOR_MODE DstFactorMode,
++ IN gceSURF_PIXEL_COLOR_MODE SrcColorMode,
++ IN gceSURF_PIXEL_COLOR_MODE DstColorMode
++ );
++
++/* Disable alpha blending engine in the hardware and engage the ROP engine. */
++gceSTATUS
++gcoSURF_DisableAlphaBlend(
++ IN gcoSURF Surface
++ );
++
++/* Copy a rectangular area with format conversion. */
++gceSTATUS
++gcoSURF_CopyPixels(
++ IN gcoSURF Source,
++ IN gcoSURF Target,
++ IN gctINT SourceX,
++ IN gctINT SourceY,
++ IN gctINT TargetX,
++ IN gctINT TargetY,
++ IN gctINT Width,
++ IN gctINT Height
++ );
++
++/* Read surface pixel. */
++gceSTATUS
++gcoSURF_ReadPixel(
++ IN gcoSURF Surface,
++ IN gctPOINTER Memory,
++ IN gctINT X,
++ IN gctINT Y,
++ IN gceSURF_FORMAT Format,
++ OUT gctPOINTER PixelValue
++ );
++
++/* Write surface pixel. */
++gceSTATUS
++gcoSURF_WritePixel(
++ IN gcoSURF Surface,
++ IN gctPOINTER Memory,
++ IN gctINT X,
++ IN gctINT Y,
++ IN gceSURF_FORMAT Format,
++ IN gctPOINTER PixelValue
++ );
++
++gceSTATUS
++gcoSURF_SetDither(
++ IN gcoSURF Surface,
++ IN gctBOOL Dither
++ );
++/******************************************************************************\
++********************************** gco2D Object *********************************
++\******************************************************************************/
++
++/* Construct a new gco2D object. */
++gceSTATUS
++gco2D_Construct(
++ IN gcoHAL Hal,
++ OUT gco2D * Hardware
++ );
++
++/* Destroy an gco2D object. */
++gceSTATUS
++gco2D_Destroy(
++ IN gco2D Hardware
++ );
++
++/* Sets the maximum number of brushes in the brush cache. */
++gceSTATUS
++gco2D_SetBrushLimit(
++ IN gco2D Hardware,
++ IN gctUINT MaxCount
++ );
++
++/* Flush the brush. */
++gceSTATUS
++gco2D_FlushBrush(
++ IN gco2D Engine,
++ IN gcoBRUSH Brush,
++ IN gceSURF_FORMAT Format
++ );
++
++/* Program the specified solid color brush. */
++gceSTATUS
++gco2D_LoadSolidBrush(
++ IN gco2D Engine,
++ IN gceSURF_FORMAT Format,
++ IN gctUINT32 ColorConvert,
++ IN gctUINT32 Color,
++ IN gctUINT64 Mask
++ );
++
++gceSTATUS
++gco2D_LoadMonochromeBrush(
++ IN gco2D Engine,
++ IN gctUINT32 OriginX,
++ IN gctUINT32 OriginY,
++ IN gctUINT32 ColorConvert,
++ IN gctUINT32 FgColor,
++ IN gctUINT32 BgColor,
++ IN gctUINT64 Bits,
++ IN gctUINT64 Mask
++ );
++
++gceSTATUS
++gco2D_LoadColorBrush(
++ IN gco2D Engine,
++ IN gctUINT32 OriginX,
++ IN gctUINT32 OriginY,
++ IN gctUINT32 Address,
++ IN gceSURF_FORMAT Format,
++ IN gctUINT64 Mask
++ );
++
++/* Configure monochrome source. */
++gceSTATUS
++gco2D_SetMonochromeSource(
++ IN gco2D Engine,
++ IN gctBOOL ColorConvert,
++ IN gctUINT8 MonoTransparency,
++ IN gceSURF_MONOPACK DataPack,
++ IN gctBOOL CoordRelative,
++ IN gceSURF_TRANSPARENCY Transparency,
++ IN gctUINT32 FgColor,
++ IN gctUINT32 BgColor
++ );
++
++/* Configure color source. */
++gceSTATUS
++gco2D_SetColorSource(
++ IN gco2D Engine,
++ IN gctUINT32 Address,
++ IN gctUINT32 Stride,
++ IN gceSURF_FORMAT Format,
++ IN gceSURF_ROTATION Rotation,
++ IN gctUINT32 SurfaceWidth,
++ IN gctBOOL CoordRelative,
++ IN gceSURF_TRANSPARENCY Transparency,
++ IN gctUINT32 TransparencyColor
++ );
++
++/* Configure color source extension for full rotation. */
++gceSTATUS
++gco2D_SetColorSourceEx(
++ IN gco2D Engine,
++ IN gctUINT32 Address,
++ IN gctUINT32 Stride,
++ IN gceSURF_FORMAT Format,
++ IN gceSURF_ROTATION Rotation,
++ IN gctUINT32 SurfaceWidth,
++ IN gctUINT32 SurfaceHeight,
++ IN gctBOOL CoordRelative,
++ IN gceSURF_TRANSPARENCY Transparency,
++ IN gctUINT32 TransparencyColor
++ );
++
++/* Configure color source. */
++gceSTATUS
++gco2D_SetColorSourceAdvanced(
++ IN gco2D Engine,
++ IN gctUINT32 Address,
++ IN gctUINT32 Stride,
++ IN gceSURF_FORMAT Format,
++ IN gceSURF_ROTATION Rotation,
++ IN gctUINT32 SurfaceWidth,
++ IN gctUINT32 SurfaceHeight,
++ IN gctBOOL CoordRelative
++ );
++
++gceSTATUS
++gco2D_SetColorSourceN(
++ IN gco2D Engine,
++ IN gctUINT32 Address,
++ IN gctUINT32 Stride,
++ IN gceSURF_FORMAT Format,
++ IN gceSURF_ROTATION Rotation,
++ IN gctUINT32 SurfaceWidth,
++ IN gctUINT32 SurfaceHeight,
++ IN gctUINT32 SurfaceNumber
++ );
++
++/* Configure masked color source. */
++gceSTATUS
++gco2D_SetMaskedSource(
++ IN gco2D Engine,
++ IN gctUINT32 Address,
++ IN gctUINT32 Stride,
++ IN gceSURF_FORMAT Format,
++ IN gctBOOL CoordRelative,
++ IN gceSURF_MONOPACK MaskPack
++ );
++
++/* Configure masked color source extension for full rotation. */
++gceSTATUS
++gco2D_SetMaskedSourceEx(
++ IN gco2D Engine,
++ IN gctUINT32 Address,
++ IN gctUINT32 Stride,
++ IN gceSURF_FORMAT Format,
++ IN gctBOOL CoordRelative,
++ IN gceSURF_MONOPACK MaskPack,
++ IN gceSURF_ROTATION Rotation,
++ IN gctUINT32 SurfaceWidth,
++ IN gctUINT32 SurfaceHeight
++ );
++
++/* Setup the source rectangle. */
++gceSTATUS
++gco2D_SetSource(
++ IN gco2D Engine,
++ IN gcsRECT_PTR SrcRect
++ );
++
++/* Set clipping rectangle. */
++gceSTATUS
++gco2D_SetClipping(
++ IN gco2D Engine,
++ IN gcsRECT_PTR Rect
++ );
++
++/* Configure destination. */
++gceSTATUS
++gco2D_SetTarget(
++ IN gco2D Engine,
++ IN gctUINT32 Address,
++ IN gctUINT32 Stride,
++ IN gceSURF_ROTATION Rotation,
++ IN gctUINT32 SurfaceWidth
++ );
++
++/* Configure destination extension for full rotation. */
++gceSTATUS
++gco2D_SetTargetEx(
++ IN gco2D Engine,
++ IN gctUINT32 Address,
++ IN gctUINT32 Stride,
++ IN gceSURF_ROTATION Rotation,
++ IN gctUINT32 SurfaceWidth,
++ IN gctUINT32 SurfaceHeight
++ );
++
++/* Calculate and program the stretch factors. */
++gceSTATUS
++gco2D_CalcStretchFactor(
++ IN gco2D Engine,
++ IN gctINT32 SrcSize,
++ IN gctINT32 DestSize,
++ OUT gctUINT32_PTR Factor
++ );
++
++gceSTATUS
++gco2D_SetStretchFactors(
++ IN gco2D Engine,
++ IN gctUINT32 HorFactor,
++ IN gctUINT32 VerFactor
++ );
++
++/* Calculate and program the stretch factors based on the rectangles. */
++gceSTATUS
++gco2D_SetStretchRectFactors(
++ IN gco2D Engine,
++ IN gcsRECT_PTR SrcRect,
++ IN gcsRECT_PTR DestRect
++ );
++
++/* Create a new solid color gcoBRUSH object. */
++gceSTATUS
++gco2D_ConstructSingleColorBrush(
++ IN gco2D Engine,
++ IN gctUINT32 ColorConvert,
++ IN gctUINT32 Color,
++ IN gctUINT64 Mask,
++ gcoBRUSH * Brush
++ );
++
++/* Create a new monochrome gcoBRUSH object. */
++gceSTATUS
++gco2D_ConstructMonochromeBrush(
++ IN gco2D Engine,
++ IN gctUINT32 OriginX,
++ IN gctUINT32 OriginY,
++ IN gctUINT32 ColorConvert,
++ IN gctUINT32 FgColor,
++ IN gctUINT32 BgColor,
++ IN gctUINT64 Bits,
++ IN gctUINT64 Mask,
++ gcoBRUSH * Brush
++ );
++
++/* Create a color gcoBRUSH object. */
++gceSTATUS
++gco2D_ConstructColorBrush(
++ IN gco2D Engine,
++ IN gctUINT32 OriginX,
++ IN gctUINT32 OriginY,
++ IN gctPOINTER Address,
++ IN gceSURF_FORMAT Format,
++ IN gctUINT64 Mask,
++ gcoBRUSH * Brush
++ );
++
++/* Clear one or more rectangular areas. */
++gceSTATUS
++gco2D_Clear(
++ IN gco2D Engine,
++ IN gctUINT32 RectCount,
++ IN gcsRECT_PTR Rect,
++ IN gctUINT32 Color32,
++ IN gctUINT8 FgRop,
++ IN gctUINT8 BgRop,
++ IN gceSURF_FORMAT DestFormat
++ );
++
++/* Draw one or more Bresenham lines. */
++gceSTATUS
++gco2D_Line(
++ IN gco2D Engine,
++ IN gctUINT32 LineCount,
++ IN gcsRECT_PTR Position,
++ IN gcoBRUSH Brush,
++ IN gctUINT8 FgRop,
++ IN gctUINT8 BgRop,
++ IN gceSURF_FORMAT DestFormat
++ );
++
++/* Draw one or more Bresenham lines based on the 32-bit color. */
++gceSTATUS
++gco2D_ColorLine(
++ IN gco2D Engine,
++ IN gctUINT32 LineCount,
++ IN gcsRECT_PTR Position,
++ IN gctUINT32 Color32,
++ IN gctUINT8 FgRop,
++ IN gctUINT8 BgRop,
++ IN gceSURF_FORMAT DestFormat
++ );
++
++/* Generic blit. */
++gceSTATUS
++gco2D_Blit(
++ IN gco2D Engine,
++ IN gctUINT32 RectCount,
++ IN gcsRECT_PTR Rect,
++ IN gctUINT8 FgRop,
++ IN gctUINT8 BgRop,
++ IN gceSURF_FORMAT DestFormat
++ );
++
++gceSTATUS
++gco2D_Blend(
++ IN gco2D Engine,
++ IN gctUINT32 SrcCount,
++ IN gctUINT32 RectCount,
++ IN gcsRECT_PTR Rect,
++ IN gctUINT8 FgRop,
++ IN gctUINT8 BgRop,
++ IN gceSURF_FORMAT DestFormat
++ );
++
++/* Batch blit. */
++gceSTATUS
++gco2D_BatchBlit(
++ IN gco2D Engine,
++ IN gctUINT32 RectCount,
++ IN gcsRECT_PTR SrcRect,
++ IN gcsRECT_PTR DestRect,
++ IN gctUINT8 FgRop,
++ IN gctUINT8 BgRop,
++ IN gceSURF_FORMAT DestFormat
++ );
++
++/* Stretch blit. */
++gceSTATUS
++gco2D_StretchBlit(
++ IN gco2D Engine,
++ IN gctUINT32 RectCount,
++ IN gcsRECT_PTR Rect,
++ IN gctUINT8 FgRop,
++ IN gctUINT8 BgRop,
++ IN gceSURF_FORMAT DestFormat
++ );
++
++/* Monochrome blit. */
++gceSTATUS
++gco2D_MonoBlit(
++ IN gco2D Engine,
++ IN gctPOINTER StreamBits,
++ IN gcsPOINT_PTR StreamSize,
++ IN gcsRECT_PTR StreamRect,
++ IN gceSURF_MONOPACK SrcStreamPack,
++ IN gceSURF_MONOPACK DestStreamPack,
++ IN gcsRECT_PTR DestRect,
++ IN gctUINT32 FgRop,
++ IN gctUINT32 BgRop,
++ IN gceSURF_FORMAT DestFormat
++ );
++
++gceSTATUS
++gco2D_MonoBlitEx(
++ IN gco2D Engine,
++ IN gctPOINTER StreamBits,
++ IN gctINT32 StreamStride,
++ IN gctINT32 StreamWidth,
++ IN gctINT32 StreamHeight,
++ IN gctINT32 StreamX,
++ IN gctINT32 StreamY,
++ IN gctUINT32 FgColor,
++ IN gctUINT32 BgColor,
++ IN gcsRECT_PTR SrcRect,
++ IN gcsRECT_PTR DstRect,
++ IN gctUINT8 FgRop,
++ IN gctUINT8 BgRop
++ );
++
++/* Set kernel size. */
++gceSTATUS
++gco2D_SetKernelSize(
++ IN gco2D Engine,
++ IN gctUINT8 HorKernelSize,
++ IN gctUINT8 VerKernelSize
++ );
++
++/* Set filter type. */
++gceSTATUS
++gco2D_SetFilterType(
++ IN gco2D Engine,
++ IN gceFILTER_TYPE FilterType
++ );
++
++/* Set the filter kernel by user. */
++gceSTATUS
++gco2D_SetUserFilterKernel(
++ IN gco2D Engine,
++ IN gceFILTER_PASS_TYPE PassType,
++ IN gctUINT16_PTR KernelArray
++ );
++
++/* Select the pass(es) to be done for user defined filter. */
++gceSTATUS
++gco2D_EnableUserFilterPasses(
++ IN gco2D Engine,
++ IN gctBOOL HorPass,
++ IN gctBOOL VerPass
++ );
++
++/* Frees the temporary buffer allocated by filter blit operation. */
++gceSTATUS
++gco2D_FreeFilterBuffer(
++ IN gco2D Engine
++ );
++
++/* Filter blit. */
++gceSTATUS
++gco2D_FilterBlit(
++ IN gco2D Engine,
++ IN gctUINT32 SrcAddress,
++ IN gctUINT SrcStride,
++ IN gctUINT32 SrcUAddress,
++ IN gctUINT SrcUStride,
++ IN gctUINT32 SrcVAddress,
++ IN gctUINT SrcVStride,
++ IN gceSURF_FORMAT SrcFormat,
++ IN gceSURF_ROTATION SrcRotation,
++ IN gctUINT32 SrcSurfaceWidth,
++ IN gcsRECT_PTR SrcRect,
++ IN gctUINT32 DestAddress,
++ IN gctUINT DestStride,
++ IN gceSURF_FORMAT DestFormat,
++ IN gceSURF_ROTATION DestRotation,
++ IN gctUINT32 DestSurfaceWidth,
++ IN gcsRECT_PTR DestRect,
++ IN gcsRECT_PTR DestSubRect
++ );
++
++/* Filter blit extension for full rotation. */
++gceSTATUS
++gco2D_FilterBlitEx(
++ IN gco2D Engine,
++ IN gctUINT32 SrcAddress,
++ IN gctUINT SrcStride,
++ IN gctUINT32 SrcUAddress,
++ IN gctUINT SrcUStride,
++ IN gctUINT32 SrcVAddress,
++ IN gctUINT SrcVStride,
++ IN gceSURF_FORMAT SrcFormat,
++ IN gceSURF_ROTATION SrcRotation,
++ IN gctUINT32 SrcSurfaceWidth,
++ IN gctUINT32 SrcSurfaceHeight,
++ IN gcsRECT_PTR SrcRect,
++ IN gctUINT32 DestAddress,
++ IN gctUINT DestStride,
++ IN gceSURF_FORMAT DestFormat,
++ IN gceSURF_ROTATION DestRotation,
++ IN gctUINT32 DestSurfaceWidth,
++ IN gctUINT32 DestSurfaceHeight,
++ IN gcsRECT_PTR DestRect,
++ IN gcsRECT_PTR DestSubRect
++ );
++
++gceSTATUS
++gco2D_FilterBlitEx2(
++ IN gco2D Engine,
++ IN gctUINT32_PTR SrcAddresses,
++ IN gctUINT32 SrcAddressNum,
++ IN gctUINT32_PTR SrcStrides,
++ IN gctUINT32 SrcStrideNum,
++ IN gceTILING SrcTiling,
++ IN gceSURF_FORMAT SrcFormat,
++ IN gceSURF_ROTATION SrcRotation,
++ IN gctUINT32 SrcSurfaceWidth,
++ IN gctUINT32 SrcSurfaceHeight,
++ IN gcsRECT_PTR SrcRect,
++ IN gctUINT32_PTR DestAddresses,
++ IN gctUINT32 DestAddressNum,
++ IN gctUINT32_PTR DestStrides,
++ IN gctUINT32 DestStrideNum,
++ IN gceTILING DestTiling,
++ IN gceSURF_FORMAT DestFormat,
++ IN gceSURF_ROTATION DestRotation,
++ IN gctUINT32 DestSurfaceWidth,
++ IN gctUINT32 DestSurfaceHeight,
++ IN gcsRECT_PTR DestRect,
++ IN gcsRECT_PTR DestSubRect
++ );
++
++/* Enable alpha blending engine in the hardware and disengage the ROP engine. */
++gceSTATUS
++gco2D_EnableAlphaBlend(
++ IN gco2D Engine,
++ IN gctUINT8 SrcGlobalAlphaValue,
++ IN gctUINT8 DstGlobalAlphaValue,
++ IN gceSURF_PIXEL_ALPHA_MODE SrcAlphaMode,
++ IN gceSURF_PIXEL_ALPHA_MODE DstAlphaMode,
++ IN gceSURF_GLOBAL_ALPHA_MODE SrcGlobalAlphaMode,
++ IN gceSURF_GLOBAL_ALPHA_MODE DstGlobalAlphaMode,
++ IN gceSURF_BLEND_FACTOR_MODE SrcFactorMode,
++ IN gceSURF_BLEND_FACTOR_MODE DstFactorMode,
++ IN gceSURF_PIXEL_COLOR_MODE SrcColorMode,
++ IN gceSURF_PIXEL_COLOR_MODE DstColorMode
++ );
++
++/* Enable alpha blending engine in the hardware. */
++gceSTATUS
++gco2D_EnableAlphaBlendAdvanced(
++ IN gco2D Engine,
++ IN gceSURF_PIXEL_ALPHA_MODE SrcAlphaMode,
++ IN gceSURF_PIXEL_ALPHA_MODE DstAlphaMode,
++ IN gceSURF_GLOBAL_ALPHA_MODE SrcGlobalAlphaMode,
++ IN gceSURF_GLOBAL_ALPHA_MODE DstGlobalAlphaMode,
++ IN gceSURF_BLEND_FACTOR_MODE SrcFactorMode,
++ IN gceSURF_BLEND_FACTOR_MODE DstFactorMode
++ );
++
++/* Enable alpha blending engine with Porter Duff rule. */
++gceSTATUS
++gco2D_SetPorterDuffBlending(
++ IN gco2D Engine,
++ IN gce2D_PORTER_DUFF_RULE Rule
++ );
++
++/* Disable alpha blending engine in the hardware and engage the ROP engine. */
++gceSTATUS
++gco2D_DisableAlphaBlend(
++ IN gco2D Engine
++ );
++
++/* Retrieve the maximum number of 32-bit data chunks for a single DE command. */
++gctUINT32
++gco2D_GetMaximumDataCount(
++ void
++ );
++
++/* Retrieve the maximum number of rectangles, that can be passed in a single DE command. */
++gctUINT32
++gco2D_GetMaximumRectCount(
++ void
++ );
++
++/* Returns the pixel alignment of the surface. */
++gceSTATUS
++gco2D_GetPixelAlignment(
++ gceSURF_FORMAT Format,
++ gcsPOINT_PTR Alignment
++ );
++
++/* Retrieve monochrome stream pack size. */
++gceSTATUS
++gco2D_GetPackSize(
++ IN gceSURF_MONOPACK StreamPack,
++ OUT gctUINT32 * PackWidth,
++ OUT gctUINT32 * PackHeight
++ );
++
++/* Flush the 2D pipeline. */
++gceSTATUS
++gco2D_Flush(
++ IN gco2D Engine
++ );
++
++/* Load 256-entry color table for INDEX8 source surfaces. */
++gceSTATUS
++gco2D_LoadPalette(
++ IN gco2D Engine,
++ IN gctUINT FirstIndex,
++ IN gctUINT IndexCount,
++ IN gctPOINTER ColorTable,
++ IN gctBOOL ColorConvert
++ );
++
++/* Enable/disable 2D BitBlt mirrorring. */
++gceSTATUS
++gco2D_SetBitBlitMirror(
++ IN gco2D Engine,
++ IN gctBOOL HorizontalMirror,
++ IN gctBOOL VerticalMirror
++ );
++
++/*
++ * Set the transparency for source, destination and pattern.
++ * It also enable or disable the DFB color key mode.
++ */
++gceSTATUS
++gco2D_SetTransparencyAdvancedEx(
++ IN gco2D Engine,
++ IN gce2D_TRANSPARENCY SrcTransparency,
++ IN gce2D_TRANSPARENCY DstTransparency,
++ IN gce2D_TRANSPARENCY PatTransparency,
++ IN gctBOOL EnableDFBColorKeyMode
++ );
++
++/* Set the transparency for source, destination and pattern. */
++gceSTATUS
++gco2D_SetTransparencyAdvanced(
++ IN gco2D Engine,
++ IN gce2D_TRANSPARENCY SrcTransparency,
++ IN gce2D_TRANSPARENCY DstTransparency,
++ IN gce2D_TRANSPARENCY PatTransparency
++ );
++
++/* Set the source color key. */
++gceSTATUS
++gco2D_SetSourceColorKeyAdvanced(
++ IN gco2D Engine,
++ IN gctUINT32 ColorKey
++ );
++
++/* Set the source color key range. */
++gceSTATUS
++gco2D_SetSourceColorKeyRangeAdvanced(
++ IN gco2D Engine,
++ IN gctUINT32 ColorKeyLow,
++ IN gctUINT32 ColorKeyHigh
++ );
++
++/* Set the target color key. */
++gceSTATUS
++gco2D_SetTargetColorKeyAdvanced(
++ IN gco2D Engine,
++ IN gctUINT32 ColorKey
++ );
++
++/* Set the target color key range. */
++gceSTATUS
++gco2D_SetTargetColorKeyRangeAdvanced(
++ IN gco2D Engine,
++ IN gctUINT32 ColorKeyLow,
++ IN gctUINT32 ColorKeyHigh
++ );
++
++/* Set the YUV color space mode. */
++gceSTATUS
++gco2D_SetYUVColorMode(
++ IN gco2D Engine,
++ IN gce2D_YUV_COLOR_MODE Mode
++ );
++
++/* Setup the source global color value in ARGB8 format. */
++gceSTATUS gco2D_SetSourceGlobalColorAdvanced(
++ IN gco2D Engine,
++ IN gctUINT32 Color32
++ );
++
++/* Setup the target global color value in ARGB8 format. */
++gceSTATUS gco2D_SetTargetGlobalColorAdvanced(
++ IN gco2D Engine,
++ IN gctUINT32 Color32
++ );
++
++/* Setup the source and target pixel multiply modes. */
++gceSTATUS
++gco2D_SetPixelMultiplyModeAdvanced(
++ IN gco2D Engine,
++ IN gce2D_PIXEL_COLOR_MULTIPLY_MODE SrcPremultiplySrcAlpha,
++ IN gce2D_PIXEL_COLOR_MULTIPLY_MODE DstPremultiplyDstAlpha,
++ IN gce2D_GLOBAL_COLOR_MULTIPLY_MODE SrcPremultiplyGlobalMode,
++ IN gce2D_PIXEL_COLOR_MULTIPLY_MODE DstDemultiplyDstAlpha
++ );
++
++/* Set the GPU clock cycles after which the idle engine will keep auto-flushing. */
++gceSTATUS
++gco2D_SetAutoFlushCycles(
++ IN gco2D Engine,
++ IN gctUINT32 Cycles
++ );
++
++#if VIVANTE_PROFILER
++/* Read the profile registers available in the 2D engine and sets them in the profile.
++ The function will also reset the pixelsRendered counter every time.
++*/
++gceSTATUS
++gco2D_ProfileEngine(
++ IN gco2D Engine,
++ OPTIONAL gcs2D_PROFILE_PTR Profile
++ );
++#endif
++
++/* Enable or disable 2D dithering. */
++gceSTATUS
++gco2D_EnableDither(
++ IN gco2D Engine,
++ IN gctBOOL Enable
++ );
++
++gceSTATUS
++gco2D_SetGenericSource(
++ IN gco2D Engine,
++ IN gctUINT32_PTR Addresses,
++ IN gctUINT32 AddressNum,
++ IN gctUINT32_PTR Strides,
++ IN gctUINT32 StrideNum,
++ IN gceTILING Tiling,
++ IN gceSURF_FORMAT Format,
++ IN gceSURF_ROTATION Rotation,
++ IN gctUINT32 SurfaceWidth,
++ IN gctUINT32 SurfaceHeight
++);
++
++gceSTATUS
++gco2D_SetGenericTarget(
++ IN gco2D Engine,
++ IN gctUINT32_PTR Addresses,
++ IN gctUINT32 AddressNum,
++ IN gctUINT32_PTR Strides,
++ IN gctUINT32 StrideNum,
++ IN gceTILING Tiling,
++ IN gceSURF_FORMAT Format,
++ IN gceSURF_ROTATION Rotation,
++ IN gctUINT32 SurfaceWidth,
++ IN gctUINT32 SurfaceHeight
++);
++
++gceSTATUS
++gco2D_SetCurrentSourceIndex(
++ IN gco2D Engine,
++ IN gctUINT32 SrcIndex
++ );
++
++gceSTATUS
++gco2D_MultiSourceBlit(
++ IN gco2D Engine,
++ IN gctUINT32 SourceMask,
++ IN gcsRECT_PTR DestRect,
++ IN gctUINT32 RectCount
++ );
++
++gceSTATUS
++gco2D_SetROP(
++ IN gco2D Engine,
++ IN gctUINT8 FgRop,
++ IN gctUINT8 BgRop
++ );
++
++gceSTATUS
++gco2D_SetGdiStretchMode(
++ IN gco2D Engine,
++ IN gctBOOL Enable
++ );
++
++gceSTATUS
++gco2D_SetSourceTileStatus(
++ IN gco2D Engine,
++ IN gce2D_TILE_STATUS_CONFIG TSControl,
++ IN gceSURF_FORMAT CompressedFormat,
++ IN gctUINT32 ClearValue,
++ IN gctUINT32 GpuAddress
++ );
++
++gceSTATUS
++gco2D_SetTargetTileStatus(
++ IN gco2D Engine,
++ IN gce2D_TILE_STATUS_CONFIG TileStatusConfig,
++ IN gceSURF_FORMAT CompressedFormat,
++ IN gctUINT32 ClearValue,
++ IN gctUINT32 GpuAddress
++ );
++
++gceSTATUS
++gco2D_QueryU32(
++ IN gco2D Engine,
++ IN gce2D_QUERY Item,
++ OUT gctUINT32_PTR Value
++ );
++
++gceSTATUS
++gco2D_SetStateU32(
++ IN gco2D Engine,
++ IN gce2D_STATE State,
++ IN gctUINT32 Value
++ );
++
++gceSTATUS
++gco2D_SetStateArrayI32(
++ IN gco2D Engine,
++ IN gce2D_STATE State,
++ IN gctINT32_PTR Array,
++ IN gctINT32 ArraySize
++ );
++
++gceSTATUS
++gco2D_SetStateArrayU32(
++ IN gco2D Engine,
++ IN gce2D_STATE State,
++ IN gctUINT32_PTR Array,
++ IN gctINT32 ArraySize
++ );
++
++gceSTATUS
++gco2D_SetTargetRect(
++ IN gco2D Engine,
++ IN gcsRECT_PTR Rect
++ );
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif /* __gc_hal_raster_h_ */
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_rename.h linux-openelec/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_rename.h
+--- linux-3.14.36/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_rename.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_rename.h 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,248 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_rename_h_
++#define __gc_hal_rename_h_
++
++
++#if defined(_HAL2D_APPENDIX)
++
++#define _HAL2D_RENAME_2(api, appendix) api ## appendix
++#define _HAL2D_RENAME_1(api, appendix) _HAL2D_RENAME_2(api, appendix)
++#define gcmHAL2D(api) _HAL2D_RENAME_1(api, _HAL2D_APPENDIX)
++
++
++#define gckOS_Construct gcmHAL2D(gckOS_Construct)
++#define gckOS_Destroy gcmHAL2D(gckOS_Destroy)
++#define gckOS_QueryVideoMemory gcmHAL2D(gckOS_QueryVideoMemory)
++#define gckOS_Allocate gcmHAL2D(gckOS_Allocate)
++#define gckOS_Free gcmHAL2D(gckOS_Free)
++#define gckOS_AllocateMemory gcmHAL2D(gckOS_AllocateMemory)
++#define gckOS_FreeMemory gcmHAL2D(gckOS_FreeMemory)
++#define gckOS_AllocatePagedMemory gcmHAL2D(gckOS_AllocatePagedMemory)
++#define gckOS_AllocatePagedMemoryEx gcmHAL2D(gckOS_AllocatePagedMemoryEx)
++#define gckOS_LockPages gcmHAL2D(gckOS_LockPages)
++#define gckOS_MapPages gcmHAL2D(gckOS_MapPages)
++#define gckOS_UnlockPages gcmHAL2D(gckOS_UnlockPages)
++#define gckOS_FreePagedMemory gcmHAL2D(gckOS_FreePagedMemory)
++#define gckOS_AllocateNonPagedMemory gcmHAL2D(gckOS_AllocateNonPagedMemory)
++#define gckOS_FreeNonPagedMemory gcmHAL2D(gckOS_FreeNonPagedMemory)
++#define gckOS_AllocateContiguous gcmHAL2D(gckOS_AllocateContiguous)
++#define gckOS_FreeContiguous gcmHAL2D(gckOS_FreeContiguous)
++#define gckOS_GetPageSize gcmHAL2D(gckOS_GetPageSize)
++#define gckOS_GetPhysicalAddress gcmHAL2D(gckOS_GetPhysicalAddress)
++#define gckOS_GetPhysicalAddressProcess gcmHAL2D(gckOS_GetPhysicalAddressProcess)
++#define gckOS_MapPhysical gcmHAL2D(gckOS_MapPhysical)
++#define gckOS_UnmapPhysical gcmHAL2D(gckOS_UnmapPhysical)
++#define gckOS_ReadRegister gcmHAL2D(gckOS_ReadRegister)
++#define gckOS_WriteRegister gcmHAL2D(gckOS_WriteRegister)
++#define gckOS_WriteMemory gcmHAL2D(gckOS_WriteMemory)
++#define gckOS_MapMemory gcmHAL2D(gckOS_MapMemory)
++#define gckOS_UnmapMemory gcmHAL2D(gckOS_UnmapMemory)
++#define gckOS_UnmapMemoryEx gcmHAL2D(gckOS_UnmapMemoryEx)
++#define gckOS_CreateMutex gcmHAL2D(gckOS_CreateMutex)
++#define gckOS_DeleteMutex gcmHAL2D(gckOS_DeleteMutex)
++#define gckOS_AcquireMutex gcmHAL2D(gckOS_AcquireMutex)
++#define gckOS_ReleaseMutex gcmHAL2D(gckOS_ReleaseMutex)
++#define gckOS_AtomicExchange gcmHAL2D(gckOS_AtomicExchange)
++#define gckOS_AtomicExchangePtr gcmHAL2D(gckOS_AtomicExchangePtr)
++#define gckOS_AtomConstruct gcmHAL2D(gckOS_AtomConstruct)
++#define gckOS_AtomDestroy gcmHAL2D(gckOS_AtomDestroy)
++#define gckOS_AtomGet gcmHAL2D(gckOS_AtomGet)
++#define gckOS_AtomIncrement gcmHAL2D(gckOS_AtomIncrement)
++#define gckOS_AtomDecrement gcmHAL2D(gckOS_AtomDecrement)
++#define gckOS_Delay gcmHAL2D(gckOS_Delay)
++#define gckOS_GetTime gcmHAL2D(gckOS_GetTime)
++#define gckOS_MemoryBarrier gcmHAL2D(gckOS_MemoryBarrier)
++#define gckOS_MapUserPointer gcmHAL2D(gckOS_MapUserPointer)
++#define gckOS_UnmapUserPointer gcmHAL2D(gckOS_UnmapUserPointer)
++#define gckOS_QueryNeedCopy gcmHAL2D(gckOS_QueryNeedCopy)
++#define gckOS_CopyFromUserData gcmHAL2D(gckOS_CopyFromUserData)
++#define gckOS_CopyToUserData gcmHAL2D(gckOS_CopyToUserData)
++#define gckOS_MapUserPhysical gcmHAL2D(gckOS_MapUserPhysical)
++#define gckOS_SuspendInterrupt gcmHAL2D(gckOS_SuspendInterrupt)
++#define gckOS_ResumeInterrupt gcmHAL2D(gckOS_ResumeInterrupt)
++#define gckOS_GetBaseAddress gcmHAL2D(gckOS_GetBaseAddress)
++#define gckOS_MemCopy gcmHAL2D(gckOS_MemCopy)
++#define gckOS_ZeroMemory gcmHAL2D(gckOS_ZeroMemory)
++#define gckOS_DeviceControl gcmHAL2D(gckOS_DeviceControl)
++#define gckOS_GetProcessID gcmHAL2D(gckOS_GetProcessID)
++#define gckOS_GetThreadID gcmHAL2D(gckOS_GetThreadID)
++#define gckOS_CreateSignal gcmHAL2D(gckOS_CreateSignal)
++#define gckOS_DestroySignal gcmHAL2D(gckOS_DestroySignal)
++#define gckOS_Signal gcmHAL2D(gckOS_Signal)
++#define gckOS_WaitSignal gcmHAL2D(gckOS_WaitSignal)
++#define gckOS_MapSignal gcmHAL2D(gckOS_MapSignal)
++#define gckOS_MapUserMemory gcmHAL2D(gckOS_MapUserMemory)
++#define gckOS_UnmapUserMemory gcmHAL2D(gckOS_UnmapUserMemory)
++#define gckOS_CreateUserSignal gcmHAL2D(gckOS_CreateUserSignal)
++#define gckOS_DestroyUserSignal gcmHAL2D(gckOS_DestroyUserSignal)
++#define gckOS_WaitUserSignal gcmHAL2D(gckOS_WaitUserSignal)
++#define gckOS_SignalUserSignal gcmHAL2D(gckOS_SignalUserSignal)
++#define gckOS_UserSignal gcmHAL2D(gckOS_UserSignal)
++#define gckOS_UserSignal gcmHAL2D(gckOS_UserSignal)
++#define gckOS_CacheClean gcmHAL2D(gckOS_CacheClean)
++#define gckOS_CacheFlush gcmHAL2D(gckOS_CacheFlush)
++#define gckOS_SetDebugLevel gcmHAL2D(gckOS_SetDebugLevel)
++#define gckOS_SetDebugZone gcmHAL2D(gckOS_SetDebugZone)
++#define gckOS_SetDebugLevelZone gcmHAL2D(gckOS_SetDebugLevelZone)
++#define gckOS_SetDebugZones gcmHAL2D(gckOS_SetDebugZones)
++#define gckOS_SetDebugFile gcmHAL2D(gckOS_SetDebugFile)
++#define gckOS_Broadcast gcmHAL2D(gckOS_Broadcast)
++#define gckOS_SetGPUPower gcmHAL2D(gckOS_SetGPUPower)
++#define gckOS_CreateSemaphore gcmHAL2D(gckOS_CreateSemaphore)
++#define gckOS_DestroySemaphore gcmHAL2D(gckOS_DestroySemaphore)
++#define gckOS_AcquireSemaphore gcmHAL2D(gckOS_AcquireSemaphore)
++#define gckOS_ReleaseSemaphore gcmHAL2D(gckOS_ReleaseSemaphore)
++#define gckHEAP_Construct gcmHAL2D(gckHEAP_Construct)
++#define gckHEAP_Destroy gcmHAL2D(gckHEAP_Destroy)
++#define gckHEAP_Allocate gcmHAL2D(gckHEAP_Allocate)
++#define gckHEAP_Free gcmHAL2D(gckHEAP_Free)
++#define gckHEAP_ProfileStart gcmHAL2D(gckHEAP_ProfileStart)
++#define gckHEAP_ProfileEnd gcmHAL2D(gckHEAP_ProfileEnd)
++#define gckHEAP_Test gcmHAL2D(gckHEAP_Test)
++#define gckVIDMEM_Construct gcmHAL2D(gckVIDMEM_Construct)
++#define gckVIDMEM_Destroy gcmHAL2D(gckVIDMEM_Destroy)
++#define gckVIDMEM_Allocate gcmHAL2D(gckVIDMEM_Allocate)
++#define gckVIDMEM_AllocateLinear gcmHAL2D(gckVIDMEM_AllocateLinear)
++#define gckVIDMEM_Free gcmHAL2D(gckVIDMEM_Free)
++#define gckVIDMEM_Lock gcmHAL2D(gckVIDMEM_Lock)
++#define gckVIDMEM_Unlock gcmHAL2D(gckVIDMEM_Unlock)
++#define gckVIDMEM_ConstructVirtual gcmHAL2D(gckVIDMEM_ConstructVirtual)
++#define gckVIDMEM_DestroyVirtual gcmHAL2D(gckVIDMEM_DestroyVirtual)
++#define gckKERNEL_Construct gcmHAL2D(gckKERNEL_Construct)
++#define gckKERNEL_Destroy gcmHAL2D(gckKERNEL_Destroy)
++#define gckKERNEL_Dispatch gcmHAL2D(gckKERNEL_Dispatch)
++#define gckKERNEL_QueryVideoMemory gcmHAL2D(gckKERNEL_QueryVideoMemory)
++#define gckKERNEL_GetVideoMemoryPool gcmHAL2D(gckKERNEL_GetVideoMemoryPool)
++#define gckKERNEL_MapVideoMemory gcmHAL2D(gckKERNEL_MapVideoMemory)
++#define gckKERNEL_UnmapVideoMemory gcmHAL2D(gckKERNEL_UnmapVideoMemory)
++#define gckKERNEL_MapMemory gcmHAL2D(gckKERNEL_MapMemory)
++#define gckKERNEL_UnmapMemory gcmHAL2D(gckKERNEL_UnmapMemory)
++#define gckKERNEL_Notify gcmHAL2D(gckKERNEL_Notify)
++#define gckKERNEL_QuerySettings gcmHAL2D(gckKERNEL_QuerySettings)
++#define gckKERNEL_Recovery gcmHAL2D(gckKERNEL_Recovery)
++#define gckKERNEL_OpenUserData gcmHAL2D(gckKERNEL_OpenUserData)
++#define gckKERNEL_CloseUserData gcmHAL2D(gckKERNEL_CloseUserData)
++#define gckHARDWARE_Construct gcmHAL2D(gckHARDWARE_Construct)
++#define gckHARDWARE_Destroy gcmHAL2D(gckHARDWARE_Destroy)
++#define gckHARDWARE_QuerySystemMemory gcmHAL2D(gckHARDWARE_QuerySystemMemory)
++#define gckHARDWARE_BuildVirtualAddress gcmHAL2D(gckHARDWARE_BuildVirtualAddress)
++#define gckHARDWARE_QueryCommandBuffer gcmHAL2D(gckHARDWARE_QueryCommandBuffer)
++#define gckHARDWARE_WaitLink gcmHAL2D(gckHARDWARE_WaitLink)
++#define gckHARDWARE_Execute gcmHAL2D(gckHARDWARE_Execute)
++#define gckHARDWARE_End gcmHAL2D(gckHARDWARE_End)
++#define gckHARDWARE_Nop gcmHAL2D(gckHARDWARE_Nop)
++#define gckHARDWARE_Wait gcmHAL2D(gckHARDWARE_Wait)
++#define gckHARDWARE_PipeSelect gcmHAL2D(gckHARDWARE_PipeSelect)
++#define gckHARDWARE_Link gcmHAL2D(gckHARDWARE_Link)
++#define gckHARDWARE_Event gcmHAL2D(gckHARDWARE_Event)
++#define gckHARDWARE_QueryMemory gcmHAL2D(gckHARDWARE_QueryMemory)
++#define gckHARDWARE_QueryChipIdentity gcmHAL2D(gckHARDWARE_QueryChipIdentity)
++#define gckHARDWARE_QueryChipSpecs gcmHAL2D(gckHARDWARE_QueryChipSpecs)
++#define gckHARDWARE_QueryShaderCaps gcmHAL2D(gckHARDWARE_QueryShaderCaps)
++#define gckHARDWARE_ConvertFormat gcmHAL2D(gckHARDWARE_ConvertFormat)
++#define gckHARDWARE_SplitMemory gcmHAL2D(gckHARDWARE_SplitMemory)
++#define gckHARDWARE_AlignToTile gcmHAL2D(gckHARDWARE_AlignToTile)
++#define gckHARDWARE_UpdateQueueTail gcmHAL2D(gckHARDWARE_UpdateQueueTail)
++#define gckHARDWARE_ConvertLogical gcmHAL2D(gckHARDWARE_ConvertLogical)
++#define gckHARDWARE_ConvertPhysical gcmHAL2D(gckHARDWARE_ConvertPhysical)
++#define gckHARDWARE_Interrupt gcmHAL2D(gckHARDWARE_Interrupt)
++#define gckHARDWARE_SetMMU gcmHAL2D(gckHARDWARE_SetMMU)
++#define gckHARDWARE_FlushMMU gcmHAL2D(gckHARDWARE_FlushMMU)
++#define gckHARDWARE_GetIdle gcmHAL2D(gckHARDWARE_GetIdle)
++#define gckHARDWARE_Flush gcmHAL2D(gckHARDWARE_Flush)
++#define gckHARDWARE_SetFastClear gcmHAL2D(gckHARDWARE_SetFastClear)
++#define gckHARDWARE_ReadInterrupt gcmHAL2D(gckHARDWARE_ReadInterrupt)
++#define gckHARDWARE_SetPowerManagementState gcmHAL2D(gckHARDWARE_SetPowerManagementState)
++#define gckHARDWARE_QueryPowerManagementState gcmHAL2D(gckHARDWARE_QueryPowerManagementState)
++#define gckHARDWARE_ProfileEngine2D gcmHAL2D(gckHARDWARE_ProfileEngine2D)
++#define gckHARDWARE_InitializeHardware gcmHAL2D(gckHARDWARE_InitializeHardware)
++#define gckHARDWARE_Reset gcmHAL2D(gckHARDWARE_Reset)
++#define gckINTERRUPT_Construct gcmHAL2D(gckINTERRUPT_Construct)
++#define gckINTERRUPT_Destroy gcmHAL2D(gckINTERRUPT_Destroy)
++#define gckINTERRUPT_SetHandler gcmHAL2D(gckINTERRUPT_SetHandler)
++#define gckINTERRUPT_Notify gcmHAL2D(gckINTERRUPT_Notify)
++#define gckEVENT_Construct gcmHAL2D(gckEVENT_Construct)
++#define gckEVENT_Destroy gcmHAL2D(gckEVENT_Destroy)
++#define gckEVENT_AddList gcmHAL2D(gckEVENT_AddList)
++#define gckEVENT_FreeNonPagedMemory gcmHAL2D(gckEVENT_FreeNonPagedMemory)
++#define gckEVENT_FreeContiguousMemory gcmHAL2D(gckEVENT_FreeContiguousMemory)
++#define gckEVENT_FreeVideoMemory gcmHAL2D(gckEVENT_FreeVideoMemory)
++#define gckEVENT_Signal gcmHAL2D(gckEVENT_Signal)
++#define gckEVENT_Unlock gcmHAL2D(gckEVENT_Unlock)
++#define gckEVENT_Submit gcmHAL2D(gckEVENT_Submit)
++#define gckEVENT_Commit gcmHAL2D(gckEVENT_Commit)
++#define gckEVENT_Notify gcmHAL2D(gckEVENT_Notify)
++#define gckEVENT_Interrupt gcmHAL2D(gckEVENT_Interrupt)
++#define gckCOMMAND_Construct gcmHAL2D(gckCOMMAND_Construct)
++#define gckCOMMAND_Destroy gcmHAL2D(gckCOMMAND_Destroy)
++#define gckCOMMAND_EnterCommit gcmHAL2D(gckCOMMAND_EnterCommit)
++#define gckCOMMAND_ExitCommit gcmHAL2D(gckCOMMAND_ExitCommit)
++#define gckCOMMAND_Start gcmHAL2D(gckCOMMAND_Start)
++#define gckCOMMAND_Stop gcmHAL2D(gckCOMMAND_Stop)
++#define gckCOMMAND_Commit gcmHAL2D(gckCOMMAND_Commit)
++#define gckCOMMAND_Reserve gcmHAL2D(gckCOMMAND_Reserve)
++#define gckCOMMAND_Execute gcmHAL2D(gckCOMMAND_Execute)
++#define gckCOMMAND_Stall gcmHAL2D(gckCOMMAND_Stall)
++#define gckCOMMAND_Attach gcmHAL2D(gckCOMMAND_Attach)
++#define gckCOMMAND_Detach gcmHAL2D(gckCOMMAND_Detach)
++#define gckMMU_Construct gcmHAL2D(gckMMU_Construct)
++#define gckMMU_Destroy gcmHAL2D(gckMMU_Destroy)
++#define gckMMU_AllocatePages gcmHAL2D(gckMMU_AllocatePages)
++#define gckMMU_FreePages gcmHAL2D(gckMMU_FreePages)
++#define gckMMU_InsertNode gcmHAL2D(gckMMU_InsertNode)
++#define gckMMU_RemoveNode gcmHAL2D(gckMMU_RemoveNode)
++#define gckMMU_FreeHandleMemory gcmHAL2D(gckMMU_FreeHandleMemory)
++#define gckMMU_Test gcmHAL2D(gckMMU_Test)
++#define gckHARDWARE_QueryProfileRegisters gcmHAL2D(gckHARDWARE_QueryProfileRegisters)
++
++
++#define FindMdlMap gcmHAL2D(FindMdlMap)
++#define OnProcessExit gcmHAL2D(OnProcessExit)
++
++#define gckGALDEVICE_Destroy gcmHAL2D(gckGALDEVICE_Destroy)
++#define gckOS_Print gcmHAL2D(gckOS_Print)
++#define gckGALDEVICE_FreeMemory gcmHAL2D(gckGALDEVICE_FreeMemory)
++#define gckGALDEVICE_AllocateMemory gcmHAL2D(gckGALDEVICE_AllocateMemory)
++#define gckOS_DebugBreak gcmHAL2D(gckOS_DebugBreak)
++#define gckGALDEVICE_Release_ISR gcmHAL2D(gckGALDEVICE_Release_ISR)
++#define gckOS_Verify gcmHAL2D(gckOS_Verify)
++#define gckCOMMAND_Release gcmHAL2D(gckCOMMAND_Release)
++#define gckGALDEVICE_Stop gcmHAL2D(gckGALDEVICE_Stop)
++#define gckGALDEVICE_Construct gcmHAL2D(gckGALDEVICE_Construct)
++#define gckOS_DebugFatal gcmHAL2D(gckOS_DebugFatal)
++#define gckOS_DebugTrace gcmHAL2D(gckOS_DebugTrace)
++#define gckHARDWARE_GetBaseAddress gcmHAL2D(gckHARDWARE_GetBaseAddress)
++#define gckGALDEVICE_Setup_ISR gcmHAL2D(gckGALDEVICE_Setup_ISR)
++#define gckKERNEL_AttachProcess gcmHAL2D(gckKERNEL_AttachProcess)
++#define gckKERNEL_AttachProcessEx gcmHAL2D(gckKERNEL_AttachProcessEx)
++#define gckGALDEVICE_Start_Thread gcmHAL2D(gckGALDEVICE_Start_Thread)
++#define gckHARDWARE_QueryIdle gcmHAL2D(gckHARDWARE_QueryIdle)
++#define gckGALDEVICE_Start gcmHAL2D(gckGALDEVICE_Start)
++#define gckOS_GetKernelLogical gcmHAL2D(gckOS_GetKernelLogical)
++#define gckOS_DebugTraceZone gcmHAL2D(gckOS_DebugTraceZone)
++#define gckGALDEVICE_Stop_Thread gcmHAL2D(gckGALDEVICE_Stop_Thread)
++#define gckHARDWARE_NeedBaseAddress gcmHAL2D(gckHARDWARE_NeedBaseAddress)
++
++#endif
++
++#endif /* __gc_hal_rename_h_ */
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_statistics.h linux-openelec/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_statistics.h
+--- linux-3.14.36/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_statistics.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_statistics.h 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,115 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_statistics_h_
++#define __gc_hal_statistics_h_
++
++
++#define VIV_STAT_ENABLE_STATISTICS 0
++
++/* Toal number of frames for which the frame time is accounted. We have storage
++ to keep frame times for last this many frames.
++*/
++#define VIV_STAT_FRAME_BUFFER_SIZE 30
++
++/*
++ Total number of frames sampled for a mode. This means
++
++ # of frames for HZ Current : VIV_STAT_EARLY_Z_SAMPLE_FRAMES
++ # of frames for HZ Switched : VIV_STAT_EARLY_Z_SAMPLE_FRAMES
++ +
++ --------------------------------------------------------
++ : (2 * VIV_STAT_EARLY_Z_SAMPLE_FRAMES) frames needed
++
++ IMPORTANT: This total must be smaller than VIV_STAT_FRAME_BUFFER_SIZE
++*/
++#define VIV_STAT_EARLY_Z_SAMPLE_FRAMES 7
++#define VIV_STAT_EARLY_Z_LATENCY_FRAMES 2
++
++/* Multiplication factor for previous Hz off mode. Make it more than 1.0 to advertise HZ on.*/
++#define VIV_STAT_EARLY_Z_FACTOR (1.05f)
++
++/* Defines the statistical data keys monitored by the statistics module */
++typedef enum _gceSTATISTICS
++{
++ gcvFRAME_FPS = 1,
++}
++gceSTATISTICS;
++
++/* HAL statistics information. */
++typedef struct _gcsSTATISTICS_EARLYZ
++{
++ gctUINT switchBackCount;
++ gctUINT nextCheckPoint;
++ gctBOOL disabled;
++}
++gcsSTATISTICS_EARLYZ;
++
++
++/* Defines the statistical data keys monitored by the statistics module */
++typedef enum _gceSTATISTICS_Call
++{
++ gcvSTAT_ES11_GLDRAWELEMENTS = 1,
++}
++gceSTATISTICS_Call;
++
++
++/* HAL statistics information. */
++typedef struct _gcsSTATISTICS
++{
++ gctUINT64 frameTime[VIV_STAT_FRAME_BUFFER_SIZE];
++ gctUINT64 previousFrameTime;
++ gctUINT frame;
++ gcsSTATISTICS_EARLYZ earlyZ;
++ gctUINT ES11_drawElementsCount;
++ gctBOOL applyRTestVAFix;
++}
++gcsSTATISTICS;
++
++
++/* Add a frame based data into current statistics. */
++void
++gcfSTATISTICS_AddData(
++ IN gceSTATISTICS Key,
++ IN gctUINT Value
++ );
++
++/* Marks the frame end and triggers statistical calculations and decisions.*/
++void
++gcfSTATISTICS_MarkFrameEnd (
++ void
++ );
++
++/* Sets whether the dynmaic HZ is disabled or not .*/
++void
++gcfSTATISTICS_DisableDynamicEarlyZ (
++ IN gctBOOL Disabled
++ );
++
++/* Checks whether or not glDrawArray function call will be discarded */
++gctBOOL
++gcfSTATISTICS_DiscardCall(
++ gceSTATISTICS_Call Function
++ );
++
++
++#endif /*__gc_hal_statistics_h_ */
++
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_types.h linux-openelec/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_types.h
+--- linux-3.14.36/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_types.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_types.h 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,1080 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_types_h_
++#define __gc_hal_types_h_
++
++#include "gc_hal_version.h"
++#include "gc_hal_options.h"
++
++#ifdef _WIN32
++#pragma warning(disable:4127) /* Conditional expression is constant (do { }
++ ** while(0)). */
++#pragma warning(disable:4100) /* Unreferenced formal parameter. */
++#pragma warning(disable:4204) /* Non-constant aggregate initializer (C99). */
++#pragma warning(disable:4131) /* Uses old-style declarator (for Bison and
++ ** Flex generated files). */
++#pragma warning(disable:4206) /* Translation unit is empty. */
++#endif
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/******************************************************************************\
++** Platform macros.
++*/
++
++#if defined(__GNUC__)
++# define gcdHAS_ELLIPSES 1 /* GCC always has it. */
++#elif defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L)
++# define gcdHAS_ELLIPSES 1 /* C99 has it. */
++#elif defined(_MSC_VER) && (_MSC_VER >= 1500)
++# define gcdHAS_ELLIPSES 1 /* MSVC 2007+ has it. */
++#elif defined(UNDER_CE)
++#if UNDER_CE >= 600
++# define gcdHAS_ELLIPSES 1
++# else
++# define gcdHAS_ELLIPSES 0
++# endif
++#else
++# error "gcdHAS_ELLIPSES: Platform could not be determined"
++#endif
++
++/******************************************************************************\
++************************************ Keyword ***********************************
++\******************************************************************************/
++
++#if (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L))
++# define gcmINLINE inline /* C99 keyword. */
++#elif defined(__GNUC__)
++# define gcmINLINE __inline__ /* GNU keyword. */
++#elif defined(_MSC_VER) || defined(UNDER_CE)
++# define gcmINLINE __inline /* Internal keyword. */
++#else
++# error "gcmINLINE: Platform could not be determined"
++#endif
++
++/* Possible debug flags. */
++#define gcdDEBUG_NONE 0
++#define gcdDEBUG_ALL (1 << 0)
++#define gcdDEBUG_FATAL (1 << 1)
++#define gcdDEBUG_TRACE (1 << 2)
++#define gcdDEBUG_BREAK (1 << 3)
++#define gcdDEBUG_ASSERT (1 << 4)
++#define gcdDEBUG_CODE (1 << 5)
++#define gcdDEBUG_STACK (1 << 6)
++
++#define gcmIS_DEBUG(flag) ( gcdDEBUG & (flag | gcdDEBUG_ALL) )
++
++#ifndef gcdDEBUG
++#if (defined(DBG) && DBG) || defined(DEBUG) || defined(_DEBUG)
++# define gcdDEBUG gcdDEBUG_ALL
++# else
++# define gcdDEBUG gcdDEBUG_NONE
++# endif
++#endif
++
++#ifdef _USRDLL
++#ifdef _MSC_VER
++#ifdef HAL_EXPORTS
++# define HALAPI __declspec(dllexport)
++# else
++# define HALAPI __declspec(dllimport)
++# endif
++# define HALDECL __cdecl
++# else
++#ifdef HAL_EXPORTS
++# define HALAPI
++# else
++# define HALAPI extern
++# endif
++# endif
++#else
++# define HALAPI
++# define HALDECL
++#endif
++
++/******************************************************************************\
++********************************** Common Types ********************************
++\******************************************************************************/
++
++#define gcvFALSE 0
++#define gcvTRUE 1
++
++#define gcvINFINITE ((gctUINT32) ~0U)
++
++#define gcvINVALID_HANDLE ((gctHANDLE) ~0U)
++
++typedef int gctBOOL;
++typedef gctBOOL * gctBOOL_PTR;
++
++typedef int gctINT;
++typedef long gctLONG;
++typedef signed char gctINT8;
++typedef signed short gctINT16;
++typedef signed int gctINT32;
++typedef signed long long gctINT64;
++
++typedef gctINT * gctINT_PTR;
++typedef gctINT8 * gctINT8_PTR;
++typedef gctINT16 * gctINT16_PTR;
++typedef gctINT32 * gctINT32_PTR;
++typedef gctINT64 * gctINT64_PTR;
++
++typedef unsigned int gctUINT;
++typedef unsigned char gctUINT8;
++typedef unsigned short gctUINT16;
++typedef unsigned int gctUINT32;
++typedef unsigned long long gctUINT64;
++typedef unsigned long gctUINTPTR_T;
++
++typedef gctUINT * gctUINT_PTR;
++typedef gctUINT8 * gctUINT8_PTR;
++typedef gctUINT16 * gctUINT16_PTR;
++typedef gctUINT32 * gctUINT32_PTR;
++typedef gctUINT64 * gctUINT64_PTR;
++
++typedef unsigned long gctSIZE_T;
++typedef gctSIZE_T * gctSIZE_T_PTR;
++
++#ifdef __cplusplus
++# define gcvNULL 0
++#else
++# define gcvNULL ((void *) 0)
++#endif
++
++typedef float gctFLOAT;
++typedef signed int gctFIXED_POINT;
++typedef float * gctFLOAT_PTR;
++
++typedef void * gctPHYS_ADDR;
++typedef void * gctHANDLE;
++typedef void * gctFILE;
++typedef void * gctSIGNAL;
++typedef void * gctWINDOW;
++typedef void * gctIMAGE;
++typedef void * gctSYNC_POINT;
++
++typedef void * gctSEMAPHORE;
++
++typedef void * gctPOINTER;
++typedef const void * gctCONST_POINTER;
++
++typedef char gctCHAR;
++typedef char * gctSTRING;
++typedef const char * gctCONST_STRING;
++
++typedef struct _gcsCOUNT_STRING
++{
++ gctSIZE_T Length;
++ gctCONST_STRING String;
++}
++gcsCOUNT_STRING;
++
++typedef union _gcuFLOAT_UINT32
++{
++ gctFLOAT f;
++ gctUINT32 u;
++}
++gcuFLOAT_UINT32;
++
++/* Fixed point constants. */
++#define gcvZERO_X ((gctFIXED_POINT) 0x00000000)
++#define gcvHALF_X ((gctFIXED_POINT) 0x00008000)
++#define gcvONE_X ((gctFIXED_POINT) 0x00010000)
++#define gcvNEGONE_X ((gctFIXED_POINT) 0xFFFF0000)
++#define gcvTWO_X ((gctFIXED_POINT) 0x00020000)
++
++/* Stringizing macro. */
++#define gcmSTRING(Value) #Value
++
++/******************************************************************************\
++******************************* Fixed Point Math *******************************
++\******************************************************************************/
++
++#define gcmXMultiply(x1, x2) gcoMATH_MultiplyFixed(x1, x2)
++#define gcmXDivide(x1, x2) gcoMATH_DivideFixed(x1, x2)
++#define gcmXMultiplyDivide(x1, x2, x3) gcoMATH_MultiplyDivideFixed(x1, x2, x3)
++
++/* 2D Engine profile. */
++typedef struct _gcs2D_PROFILE
++{
++ /* Cycle count.
++ 32bit counter incremented every 2D clock cycle.
++ Wraps back to 0 when the counter overflows.
++ */
++ gctUINT32 cycleCount;
++
++ /* Pixels rendered by the 2D engine.
++ Resets to 0 every time it is read. */
++ gctUINT32 pixelsRendered;
++}
++gcs2D_PROFILE;
++
++/* Macro to combine four characters into a Charcater Code. */
++#define gcmCC(c1, c2, c3, c4) \
++( \
++ (char) (c1) \
++ | \
++ ((char) (c2) << 8) \
++ | \
++ ((char) (c3) << 16) \
++ | \
++ ((char) (c4) << 24) \
++)
++
++#define gcmPRINTABLE(c) ((((c) >= ' ') && ((c) <= '}')) ? ((c) != '%' ? (c) : ' ') : ' ')
++
++#define gcmCC_PRINT(cc) \
++ gcmPRINTABLE((char) ( (cc) & 0xFF)), \
++ gcmPRINTABLE((char) (((cc) >> 8) & 0xFF)), \
++ gcmPRINTABLE((char) (((cc) >> 16) & 0xFF)), \
++ gcmPRINTABLE((char) (((cc) >> 24) & 0xFF))
++
++/******************************************************************************\
++****************************** Function Parameters *****************************
++\******************************************************************************/
++
++#define IN
++#define OUT
++#define OPTIONAL
++
++/******************************************************************************\
++********************************* Status Codes *********************************
++\******************************************************************************/
++
++typedef enum _gceSTATUS
++{
++ gcvSTATUS_OK = 0,
++ gcvSTATUS_FALSE = 0,
++ gcvSTATUS_TRUE = 1,
++ gcvSTATUS_NO_MORE_DATA = 2,
++ gcvSTATUS_CACHED = 3,
++ gcvSTATUS_MIPMAP_TOO_LARGE = 4,
++ gcvSTATUS_NAME_NOT_FOUND = 5,
++ gcvSTATUS_NOT_OUR_INTERRUPT = 6,
++ gcvSTATUS_MISMATCH = 7,
++ gcvSTATUS_MIPMAP_TOO_SMALL = 8,
++ gcvSTATUS_LARGER = 9,
++ gcvSTATUS_SMALLER = 10,
++ gcvSTATUS_CHIP_NOT_READY = 11,
++ gcvSTATUS_NEED_CONVERSION = 12,
++ gcvSTATUS_SKIP = 13,
++ gcvSTATUS_DATA_TOO_LARGE = 14,
++ gcvSTATUS_INVALID_CONFIG = 15,
++ gcvSTATUS_CHANGED = 16,
++ gcvSTATUS_NOT_SUPPORT_DITHER = 17,
++ gcvSTATUS_EXECUTED = 18,
++ gcvSTATUS_TERMINATE = 19,
++
++ gcvSTATUS_CONVERT_TO_SINGLE_STREAM = 20,
++
++ gcvSTATUS_INVALID_ARGUMENT = -1,
++ gcvSTATUS_INVALID_OBJECT = -2,
++ gcvSTATUS_OUT_OF_MEMORY = -3,
++ gcvSTATUS_MEMORY_LOCKED = -4,
++ gcvSTATUS_MEMORY_UNLOCKED = -5,
++ gcvSTATUS_HEAP_CORRUPTED = -6,
++ gcvSTATUS_GENERIC_IO = -7,
++ gcvSTATUS_INVALID_ADDRESS = -8,
++ gcvSTATUS_CONTEXT_LOSSED = -9,
++ gcvSTATUS_TOO_COMPLEX = -10,
++ gcvSTATUS_BUFFER_TOO_SMALL = -11,
++ gcvSTATUS_INTERFACE_ERROR = -12,
++ gcvSTATUS_NOT_SUPPORTED = -13,
++ gcvSTATUS_MORE_DATA = -14,
++ gcvSTATUS_TIMEOUT = -15,
++ gcvSTATUS_OUT_OF_RESOURCES = -16,
++ gcvSTATUS_INVALID_DATA = -17,
++ gcvSTATUS_INVALID_MIPMAP = -18,
++ gcvSTATUS_NOT_FOUND = -19,
++ gcvSTATUS_NOT_ALIGNED = -20,
++ gcvSTATUS_INVALID_REQUEST = -21,
++ gcvSTATUS_GPU_NOT_RESPONDING = -22,
++ gcvSTATUS_TIMER_OVERFLOW = -23,
++ gcvSTATUS_VERSION_MISMATCH = -24,
++ gcvSTATUS_LOCKED = -25,
++ gcvSTATUS_INTERRUPTED = -26,
++ gcvSTATUS_DEVICE = -27,
++ gcvSTATUS_NOT_MULTI_PIPE_ALIGNED = -28,
++
++ /* Linker errors. */
++ gcvSTATUS_GLOBAL_TYPE_MISMATCH = -1000,
++ gcvSTATUS_TOO_MANY_ATTRIBUTES = -1001,
++ gcvSTATUS_TOO_MANY_UNIFORMS = -1002,
++ gcvSTATUS_TOO_MANY_VARYINGS = -1003,
++ gcvSTATUS_UNDECLARED_VARYING = -1004,
++ gcvSTATUS_VARYING_TYPE_MISMATCH = -1005,
++ gcvSTATUS_MISSING_MAIN = -1006,
++ gcvSTATUS_NAME_MISMATCH = -1007,
++ gcvSTATUS_INVALID_INDEX = -1008,
++ gcvSTATUS_UNIFORM_TYPE_MISMATCH = -1009,
++
++ /* Compiler errors. */
++ gcvSTATUS_COMPILER_FE_PREPROCESSOR_ERROR = -2000,
++ gcvSTATUS_COMPILER_FE_PARSER_ERROR = -2001,
++}
++gceSTATUS;
++
++/******************************************************************************\
++********************************* Status Macros ********************************
++\******************************************************************************/
++
++#define gcmIS_ERROR(status) (status < 0)
++#define gcmNO_ERROR(status) (status >= 0)
++#define gcmIS_SUCCESS(status) (status == gcvSTATUS_OK)
++
++/******************************************************************************\
++********************************* Field Macros *********************************
++\******************************************************************************/
++
++#define __gcmSTART(reg_field) \
++ (0 ? reg_field)
++
++#define __gcmEND(reg_field) \
++ (1 ? reg_field)
++
++#define __gcmGETSIZE(reg_field) \
++ (__gcmEND(reg_field) - __gcmSTART(reg_field) + 1)
++
++#define __gcmALIGN(data, reg_field) \
++ (((gctUINT32) (data)) << __gcmSTART(reg_field))
++
++#define __gcmMASK(reg_field) \
++ ((gctUINT32) ((__gcmGETSIZE(reg_field) == 32) \
++ ? ~0 \
++ : (~(~0 << __gcmGETSIZE(reg_field)))))
++
++/*******************************************************************************
++**
++** gcmFIELDMASK
++**
++** Get aligned field mask.
++**
++** ARGUMENTS:
++**
++** reg Name of register.
++** field Name of field within register.
++*/
++#define gcmFIELDMASK(reg, field) \
++( \
++ __gcmALIGN(__gcmMASK(reg##_##field), reg##_##field) \
++)
++
++/*******************************************************************************
++**
++** gcmGETFIELD
++**
++** Extract the value of a field from specified data.
++**
++** ARGUMENTS:
++**
++** data Data value.
++** reg Name of register.
++** field Name of field within register.
++*/
++#define gcmGETFIELD(data, reg, field) \
++( \
++ ((((gctUINT32) (data)) >> __gcmSTART(reg##_##field)) \
++ & __gcmMASK(reg##_##field)) \
++)
++
++/*******************************************************************************
++**
++** gcmSETFIELD
++**
++** Set the value of a field within specified data.
++**
++** ARGUMENTS:
++**
++** data Data value.
++** reg Name of register.
++** field Name of field within register.
++** value Value for field.
++*/
++#define gcmSETFIELD(data, reg, field, value) \
++( \
++ (((gctUINT32) (data)) \
++ & ~__gcmALIGN(__gcmMASK(reg##_##field), reg##_##field)) \
++ | __gcmALIGN((gctUINT32) (value) \
++ & __gcmMASK(reg##_##field), reg##_##field) \
++)
++
++/*******************************************************************************
++**
++** gcmSETFIELDVALUE
++**
++** Set the value of a field within specified data with a
++** predefined value.
++**
++** ARGUMENTS:
++**
++** data Data value.
++** reg Name of register.
++** field Name of field within register.
++** value Name of the value within the field.
++*/
++#define gcmSETFIELDVALUE(data, reg, field, value) \
++( \
++ (((gctUINT32) (data)) \
++ & ~__gcmALIGN(__gcmMASK(reg##_##field), reg##_##field)) \
++ | __gcmALIGN(reg##_##field##_##value \
++ & __gcmMASK(reg##_##field), reg##_##field) \
++)
++
++/*******************************************************************************
++**
++** gcmGETMASKEDFIELDMASK
++**
++** Determine field mask of a masked field.
++**
++** ARGUMENTS:
++**
++** reg Name of register.
++** field Name of field within register.
++*/
++#define gcmGETMASKEDFIELDMASK(reg, field) \
++( \
++ gcmSETFIELD(0, reg, field, ~0) | \
++ gcmSETFIELD(0, reg, MASK_ ## field, ~0) \
++)
++
++/*******************************************************************************
++**
++** gcmSETMASKEDFIELD
++**
++** Set the value of a masked field with specified data.
++**
++** ARGUMENTS:
++**
++** reg Name of register.
++** field Name of field within register.
++** value Value for field.
++*/
++#define gcmSETMASKEDFIELD(reg, field, value) \
++( \
++ gcmSETFIELD (~0, reg, field, value) & \
++ gcmSETFIELDVALUE(~0, reg, MASK_ ## field, ENABLED) \
++)
++
++/*******************************************************************************
++**
++** gcmSETMASKEDFIELDVALUE
++**
++** Set the value of a masked field with specified data.
++**
++** ARGUMENTS:
++**
++** reg Name of register.
++** field Name of field within register.
++** value Value for field.
++*/
++#define gcmSETMASKEDFIELDVALUE(reg, field, value) \
++( \
++ gcmSETFIELDVALUE(~0, reg, field, value) & \
++ gcmSETFIELDVALUE(~0, reg, MASK_ ## field, ENABLED) \
++)
++
++/*******************************************************************************
++**
++** gcmVERIFYFIELDVALUE
++**
++** Verify if the value of a field within specified data equals a
++** predefined value.
++**
++** ARGUMENTS:
++**
++** data Data value.
++** reg Name of register.
++** field Name of field within register.
++** value Name of the value within the field.
++*/
++#define gcmVERIFYFIELDVALUE(data, reg, field, value) \
++( \
++ (((gctUINT32) (data)) >> __gcmSTART(reg##_##field) & \
++ __gcmMASK(reg##_##field)) \
++ == \
++ (reg##_##field##_##value & __gcmMASK(reg##_##field)) \
++)
++
++/*******************************************************************************
++** Bit field macros.
++*/
++
++#define __gcmSTARTBIT(Field) \
++ ( 1 ? Field )
++
++#define __gcmBITSIZE(Field) \
++ ( 0 ? Field )
++
++#define __gcmBITMASK(Field) \
++( \
++ (1 << __gcmBITSIZE(Field)) - 1 \
++)
++
++#define gcmGETBITS(Value, Type, Field) \
++( \
++ ( ((Type) (Value)) >> __gcmSTARTBIT(Field) ) \
++ & \
++ __gcmBITMASK(Field) \
++)
++
++#define gcmSETBITS(Value, Type, Field, NewValue) \
++( \
++ ( ((Type) (Value)) \
++ & ~(__gcmBITMASK(Field) << __gcmSTARTBIT(Field)) \
++ ) \
++ | \
++ ( ( ((Type) (NewValue)) \
++ & __gcmBITMASK(Field) \
++ ) << __gcmSTARTBIT(Field) \
++ ) \
++)
++
++/*******************************************************************************
++**
++** gcmISINREGRANGE
++**
++** Verify whether the specified address is in the register range.
++**
++** ARGUMENTS:
++**
++** Address Address to be verified.
++** Name Name of a register.
++*/
++
++#define gcmISINREGRANGE(Address, Name) \
++( \
++ ((Address & (~0U << Name ## _LSB)) == (Name ## _Address >> 2)) \
++)
++
++/*******************************************************************************
++**
++** A set of macros to aid state loading.
++**
++** ARGUMENTS:
++**
++** CommandBuffer Pointer to a gcoCMDBUF object.
++** StateDelta Pointer to a gcsSTATE_DELTA state delta structure.
++** Memory Destination memory pointer of gctUINT32_PTR type.
++** PartOfContext Whether or not the state is a part of the context.
++** FixedPoint Whether or not the state is of the fixed point format.
++** Count Number of consecutive states to be loaded.
++** Address State address.
++** Data Data to be set to the state.
++*/
++
++/*----------------------------------------------------------------------------*/
++
++#if gcmIS_DEBUG(gcdDEBUG_CODE)
++
++# define gcmSTORELOADSTATE(CommandBuffer, Memory, Address, Count) \
++ CommandBuffer->lastLoadStatePtr = gcmPTR_TO_UINT64(Memory); \
++ CommandBuffer->lastLoadStateAddress = Address; \
++ CommandBuffer->lastLoadStateCount = Count
++
++# define gcmVERIFYLOADSTATE(CommandBuffer, Memory, Address) \
++ gcmASSERT( \
++ (gctUINT) (Memory - gcmUINT64_TO_TYPE(CommandBuffer->lastLoadStatePtr, gctUINT32_PTR) - 1) \
++ == \
++ (gctUINT) (Address - CommandBuffer->lastLoadStateAddress) \
++ ); \
++ \
++ gcmASSERT(CommandBuffer->lastLoadStateCount > 0); \
++ \
++ CommandBuffer->lastLoadStateCount -= 1
++
++# define gcmVERIFYLOADSTATEDONE(CommandBuffer) \
++ gcmASSERT(CommandBuffer->lastLoadStateCount == 0)
++
++#else
++
++# define gcmSTORELOADSTATE(CommandBuffer, Memory, Address, Count)
++# define gcmVERIFYLOADSTATE(CommandBuffer, Memory, Address)
++# define gcmVERIFYLOADSTATEDONE(CommandBuffer)
++
++#endif
++
++#if gcdSECURE_USER
++
++# define gcmDEFINESECUREUSER() \
++ gctUINT __secure_user_offset__; \
++ gctUINT32_PTR __secure_user_hintArray__;
++
++# define gcmBEGINSECUREUSER() \
++ __secure_user_offset__ = reserve->lastOffset; \
++ \
++ __secure_user_hintArray__ = gcmUINT64_TO_PTR(reserve->hintArrayTail)
++
++# define gcmENDSECUREUSER() \
++ reserve->hintArrayTail = gcmPTR_TO_UINT64(__secure_user_hintArray__)
++
++# define gcmSKIPSECUREUSER() \
++ __secure_user_offset__ += gcmSIZEOF(gctUINT32)
++
++# define gcmUPDATESECUREUSER() \
++ *__secure_user_hintArray__ = __secure_user_offset__; \
++ \
++ __secure_user_offset__ += gcmSIZEOF(gctUINT32); \
++ __secure_user_hintArray__ += 1
++
++#else
++
++# define gcmDEFINESECUREUSER()
++# define gcmBEGINSECUREUSER()
++# define gcmENDSECUREUSER()
++# define gcmSKIPSECUREUSER()
++# define gcmUPDATESECUREUSER()
++
++#endif
++
++/*----------------------------------------------------------------------------*/
++
++#if gcdDUMP
++# define gcmDUMPSTATEDATA(StateDelta, FixedPoint, Address, Data) \
++ if (FixedPoint) \
++ { \
++ gcmDUMP(gcvNULL, "@[state.x 0x%04X 0x%08X]", \
++ Address, Data \
++ ); \
++ } \
++ else \
++ { \
++ gcmDUMP(gcvNULL, "@[state 0x%04X 0x%08X]", \
++ Address, Data \
++ ); \
++ }
++#else
++# define gcmDUMPSTATEDATA(StateDelta, FixedPoint, Address, Data)
++#endif
++
++/*----------------------------------------------------------------------------*/
++
++#define gcmDEFINESTATEBUFFER(CommandBuffer, StateDelta, Memory, ReserveSize) \
++ gcmDEFINESECUREUSER() \
++ gctSIZE_T ReserveSize; \
++ gcoCMDBUF CommandBuffer; \
++ gctUINT32_PTR Memory; \
++ gcsSTATE_DELTA_PTR StateDelta
++
++#define gcmBEGINSTATEBUFFER(Hardware, CommandBuffer, StateDelta, Memory, ReserveSize) \
++{ \
++ gcmONERROR(gcoBUFFER_Reserve( \
++ Hardware->buffer, ReserveSize, gcvTRUE, &CommandBuffer \
++ )); \
++ \
++ Memory = gcmUINT64_TO_PTR(CommandBuffer->lastReserve); \
++ \
++ StateDelta = Hardware->delta; \
++ \
++ gcmBEGINSECUREUSER(); \
++}
++
++#define gcmENDSTATEBUFFER(CommandBuffer, Memory, ReserveSize) \
++{ \
++ gcmENDSECUREUSER(); \
++ \
++ gcmASSERT( \
++ gcmUINT64_TO_TYPE(CommandBuffer->lastReserve, gctUINT8_PTR) + ReserveSize \
++ == \
++ (gctUINT8_PTR) Memory \
++ ); \
++}
++
++/*----------------------------------------------------------------------------*/
++
++#define gcmBEGINSTATEBATCH(CommandBuffer, Memory, FixedPoint, Address, Count) \
++{ \
++ gcmASSERT(((Memory - gcmUINT64_TO_TYPE(CommandBuffer->lastReserve, gctUINT32_PTR)) & 1) == 0); \
++ gcmASSERT((gctUINT32)Count <= 1024); \
++ \
++ gcmVERIFYLOADSTATEDONE(CommandBuffer); \
++ \
++ gcmSTORELOADSTATE(CommandBuffer, Memory, Address, Count); \
++ \
++ *Memory++ \
++ = gcmSETFIELDVALUE(0, AQ_COMMAND_LOAD_STATE_COMMAND, OPCODE, LOAD_STATE) \
++ | gcmSETFIELD (0, AQ_COMMAND_LOAD_STATE_COMMAND, FLOAT, FixedPoint) \
++ | gcmSETFIELD (0, AQ_COMMAND_LOAD_STATE_COMMAND, COUNT, Count) \
++ | gcmSETFIELD (0, AQ_COMMAND_LOAD_STATE_COMMAND, ADDRESS, Address); \
++ \
++ gcmSKIPSECUREUSER(); \
++}
++
++#define gcmENDSTATEBATCH(CommandBuffer, Memory) \
++{ \
++ gcmVERIFYLOADSTATEDONE(CommandBuffer); \
++ \
++ gcmASSERT(((Memory - gcmUINT64_TO_TYPE(CommandBuffer->lastReserve, gctUINT32_PTR)) & 1) == 0); \
++}
++
++/*----------------------------------------------------------------------------*/
++
++#define gcmSETSTATEDATA(StateDelta, CommandBuffer, Memory, FixedPoint, \
++ Address, Data) \
++{ \
++ gctUINT32 __temp_data32__; \
++ \
++ gcmVERIFYLOADSTATE(CommandBuffer, Memory, Address); \
++ \
++ __temp_data32__ = Data; \
++ \
++ *Memory++ = __temp_data32__; \
++ \
++ gcoHARDWARE_UpdateDelta( \
++ StateDelta, FixedPoint, Address, 0, __temp_data32__ \
++ ); \
++ \
++ gcmDUMPSTATEDATA(StateDelta, FixedPoint, Address, __temp_data32__); \
++ \
++ gcmUPDATESECUREUSER(); \
++}
++
++#define gcmSETCTRLSTATE(StateDelta, CommandBuffer, Memory, Address, Data) \
++{ \
++ gctUINT32 __temp_data32__; \
++ \
++ gcmVERIFYLOADSTATE(CommandBuffer, Memory, Address); \
++ \
++ __temp_data32__ = Data; \
++ \
++ *Memory++ = __temp_data32__; \
++ \
++ gcmDUMPSTATEDATA(StateDelta, gcvFALSE, Address, __temp_data32__); \
++ \
++ gcmSKIPSECUREUSER(); \
++}
++
++#define gcmSETFILLER(CommandBuffer, Memory) \
++{ \
++ gcmVERIFYLOADSTATEDONE(CommandBuffer); \
++ \
++ Memory += 1; \
++ \
++ gcmSKIPSECUREUSER(); \
++}
++
++/*----------------------------------------------------------------------------*/
++
++#define gcmSETSINGLESTATE(StateDelta, CommandBuffer, Memory, FixedPoint, \
++ Address, Data) \
++{ \
++ gcmBEGINSTATEBATCH(CommandBuffer, Memory, FixedPoint, Address, 1); \
++ gcmSETSTATEDATA(StateDelta, CommandBuffer, Memory, FixedPoint, \
++ Address, Data); \
++ gcmENDSTATEBATCH(CommandBuffer, Memory); \
++}
++
++#define gcmSETSINGLECTRLSTATE(StateDelta, CommandBuffer, Memory, FixedPoint, \
++ Address, Data) \
++{ \
++ gcmBEGINSTATEBATCH(CommandBuffer, Memory, FixedPoint, Address, 1); \
++ gcmSETCTRLSTATE(StateDelta, CommandBuffer, Memory, Address, Data); \
++ gcmENDSTATEBATCH(CommandBuffer, Memory); \
++}
++
++
++/*******************************************************************************
++**
++** gcmSETSTARTDECOMMAND
++**
++** Form a START_DE command.
++**
++** ARGUMENTS:
++**
++** Memory Destination memory pointer of gctUINT32_PTR type.
++** Count Number of the rectangles.
++*/
++
++#define gcmSETSTARTDECOMMAND(Memory, Count) \
++{ \
++ *Memory++ \
++ = gcmSETFIELDVALUE(0, AQ_COMMAND_START_DE_COMMAND, OPCODE, START_DE) \
++ | gcmSETFIELD (0, AQ_COMMAND_START_DE_COMMAND, COUNT, Count) \
++ | gcmSETFIELD (0, AQ_COMMAND_START_DE_COMMAND, DATA_COUNT, 0); \
++ \
++ *Memory++ = 0xDEADDEED; \
++}
++
++/******************************************************************************\
++******************************** Ceiling Macro ********************************
++\******************************************************************************/
++#define gcmCEIL(x) ((x - (gctUINT32)x) == 0 ? (gctUINT32)x : (gctUINT32)x + 1)
++
++/******************************************************************************\
++******************************** Min/Max Macros ********************************
++\******************************************************************************/
++
++#define gcmMIN(x, y) (((x) <= (y)) ? (x) : (y))
++#define gcmMAX(x, y) (((x) >= (y)) ? (x) : (y))
++#define gcmCLAMP(x, min, max) (((x) < (min)) ? (min) : \
++ ((x) > (max)) ? (max) : (x))
++#define gcmABS(x) (((x) < 0) ? -(x) : (x))
++#define gcmNEG(x) (((x) < 0) ? (x) : -(x))
++
++/*******************************************************************************
++**
++** gcmPTR2INT
++**
++** Convert a pointer to an integer value.
++**
++** ARGUMENTS:
++**
++** p Pointer value.
++*/
++#if defined(_WIN32) || (defined(__LP64__) && __LP64__)
++# define gcmPTR2INT(p) \
++ ( \
++ (gctUINT32) (gctUINT64) (p) \
++ )
++#else
++# define gcmPTR2INT(p) \
++ ( \
++ (gctUINT32) (p) \
++ )
++#endif
++
++/*******************************************************************************
++**
++** gcmINT2PTR
++**
++** Convert an integer value into a pointer.
++**
++** ARGUMENTS:
++**
++** v Integer value.
++*/
++#ifdef __LP64__
++# define gcmINT2PTR(i) \
++ ( \
++ (gctPOINTER) (gctINT64) (i) \
++ )
++#else
++# define gcmINT2PTR(i) \
++ ( \
++ (gctPOINTER) (i) \
++ )
++#endif
++
++/*******************************************************************************
++**
++** gcmOFFSETOF
++**
++** Compute the byte offset of a field inside a structure.
++**
++** ARGUMENTS:
++**
++** s Structure name.
++** field Field name.
++*/
++#define gcmOFFSETOF(s, field) \
++( \
++ gcmPTR2INT(& (((struct s *) 0)->field)) \
++)
++
++#define gcmSWAB32(x) ((gctUINT32)( \
++ (((gctUINT32)(x) & (gctUINT32)0x000000FFUL) << 24) | \
++ (((gctUINT32)(x) & (gctUINT32)0x0000FF00UL) << 8) | \
++ (((gctUINT32)(x) & (gctUINT32)0x00FF0000UL) >> 8) | \
++ (((gctUINT32)(x) & (gctUINT32)0xFF000000UL) >> 24)))
++
++/*******************************************************************************
++***** Database ****************************************************************/
++
++typedef struct _gcsDATABASE_COUNTERS
++{
++ /* Number of currently allocated bytes. */
++ gctUINT64 bytes;
++
++ /* Maximum number of bytes allocated (memory footprint). */
++ gctUINT64 maxBytes;
++
++ /* Total number of bytes allocated. */
++ gctUINT64 totalBytes;
++}
++gcsDATABASE_COUNTERS;
++
++typedef struct _gcuDATABASE_INFO
++{
++ /* Counters. */
++ gcsDATABASE_COUNTERS counters;
++
++ /* Time value. */
++ gctUINT64 time;
++}
++gcuDATABASE_INFO;
++
++/*******************************************************************************
++***** Frame database **********************************************************/
++
++/* gcsHAL_FRAME_INFO */
++typedef struct _gcsHAL_FRAME_INFO
++{
++ /* Current timer tick. */
++ OUT gctUINT64 ticks;
++
++ /* Bandwidth counters. */
++ OUT gctUINT readBytes8[8];
++ OUT gctUINT writeBytes8[8];
++
++ /* Counters. */
++ OUT gctUINT cycles[8];
++ OUT gctUINT idleCycles[8];
++ OUT gctUINT mcCycles[8];
++ OUT gctUINT readRequests[8];
++ OUT gctUINT writeRequests[8];
++
++ /* FE counters. */
++ OUT gctUINT drawCount;
++ OUT gctUINT vertexOutCount;
++ OUT gctUINT vertexMissCount;
++
++ /* 3D counters. */
++ OUT gctUINT vertexCount;
++ OUT gctUINT primitiveCount;
++ OUT gctUINT rejectedPrimitives;
++ OUT gctUINT culledPrimitives;
++ OUT gctUINT clippedPrimitives;
++ OUT gctUINT droppedPrimitives;
++ OUT gctUINT frustumClippedPrimitives;
++ OUT gctUINT outPrimitives;
++ OUT gctUINT inPrimitives;
++ OUT gctUINT culledQuadCount;
++ OUT gctUINT totalQuadCount;
++ OUT gctUINT quadCount;
++ OUT gctUINT totalPixelCount;
++
++ /* PE counters. */
++ OUT gctUINT colorKilled[8];
++ OUT gctUINT colorDrawn[8];
++ OUT gctUINT depthKilled[8];
++ OUT gctUINT depthDrawn[8];
++
++ /* Shader counters. */
++ OUT gctUINT shaderCycles;
++ OUT gctUINT vsInstructionCount;
++ OUT gctUINT vsTextureCount;
++ OUT gctUINT vsBranchCount;
++ OUT gctUINT vsVertices;
++ OUT gctUINT psInstructionCount;
++ OUT gctUINT psTextureCount;
++ OUT gctUINT psBranchCount;
++ OUT gctUINT psPixels;
++
++ /* Texture counters. */
++ OUT gctUINT bilinearRequests;
++ OUT gctUINT trilinearRequests;
++ OUT gctUINT txBytes8[2];
++ OUT gctUINT txHitCount;
++ OUT gctUINT txMissCount;
++}
++gcsHAL_FRAME_INFO;
++
++typedef enum _gcePATCH_ID
++{
++ gcePATCH_UNKNOWN = 0xFFFFFFFF,
++
++ /* Benchmark list*/
++ gcePATCH_GLB11 = 0x0,
++ gcePATCH_GLB21,
++ gcePATCH_GLB25,
++ gcePATCH_GLB27,
++
++ gcePATCH_BM21,
++ gcePATCH_MM,
++ gcePATCH_MM06,
++ gcePATCH_MM07,
++ gcePATCH_QUADRANT,
++ gcePATCH_ANTUTU,
++ gcePATCH_SMARTBENCH,
++ gcePATCH_JPCT,
++ gcePATCH_NENAMARK,
++ gcePATCH_NENAMARK2,
++ gcePATCH_NEOCORE,
++ gcePATCH_GLB,
++ gcePATCH_GB,
++ gcePATCH_RTESTVA,
++ gcePATCH_BMX,
++ gcePATCH_BMGUI,
++
++ /* Game list */
++ gcePATCH_NBA2013,
++ gcePATCH_BARDTALE,
++ gcePATCH_BUSPARKING3D,
++ gcePATCH_FISHBOODLE,
++ gcePATCH_SUBWAYSURFER,
++ gcePATCH_HIGHWAYDRIVER,
++ gcePATCH_PREMIUM,
++ gcePATCH_RACEILLEGAL,
++ gcePATCH_BLABLA,
++ gcePATCH_MEGARUN,
++ gcePATCH_GALAXYONFIRE2,
++ gcePATCH_GLOFTR3HM,
++ gcePATCH_GLOFTSXHM,
++ gcePATCH_GLOFTF3HM,
++ gcePATCH_GLOFTGANG,
++ gcePATCH_XRUNNER,
++ gcePATCH_WP,
++ gcePATCH_DEVIL,
++ gcePATCH_HOLYARCH,
++ gcePATCH_MUSE,
++ gcePATCH_SG,
++ gcePATCH_SIEGECRAFT,
++ gcePATCH_CARCHALLENGE,
++ gcePATCH_HEROESCALL,
++ gcePATCH_MONOPOLY,
++ gcePATCH_CTGL20,
++ gcePATCH_FIREFOX,
++ gcePATCH_CHORME,
++ gcePATCH_DUOKANTV,
++ gcePATCH_TESTAPP,
++ gcePATCH_GOOGLEEARTH,
++
++ /* Count enum*/
++ gcePATCH_COUNT,
++}
++gcePATCH_ID;
++
++#if gcdLINK_QUEUE_SIZE
++typedef struct _gckLINKDATA * gckLINKDATA;
++struct _gckLINKDATA
++{
++ gctUINT32 start;
++ gctUINT32 end;
++ gctINT pid;
++};
++
++typedef struct _gckLINKQUEUE * gckLINKQUEUE;
++struct _gckLINKQUEUE
++{
++ struct _gckLINKDATA data[gcdLINK_QUEUE_SIZE];
++ gctUINT32 rear;
++ gctUINT32 front;
++ gctUINT32 count;
++};
++#endif
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif /* __gc_hal_types_h_ */
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_version.h linux-openelec/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_version.h
+--- linux-3.14.36/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_version.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_version.h 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,37 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_version_h_
++#define __gc_hal_version_h_
++
++#define gcvVERSION_MAJOR 4
++
++#define gcvVERSION_MINOR 6
++
++#define gcvVERSION_PATCH 9
++
++#define gcvVERSION_BUILD 9754
++
++#define gcvVERSION_DATE __DATE__
++
++#define gcvVERSION_TIME __TIME__
++
++#endif /* __gc_hal_version_h_ */
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_vg.h linux-openelec/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_vg.h
+--- linux-3.14.36/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_vg.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_vg.h 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,913 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_vg_h_
++#define __gc_hal_vg_h_
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++
++#include "gc_hal_rename.h"
++#include "gc_hal_types.h"
++#include "gc_hal_enum.h"
++#include "gc_hal_base.h"
++
++#if gcdENABLE_VG
++
++/* Thread routine type. */
++#if defined(LINUX)
++ typedef gctINT gctTHREADFUNCRESULT;
++ typedef gctPOINTER gctTHREADFUNCPARAMETER;
++# define gctTHREADFUNCTYPE
++#elif defined(WIN32)
++ typedef gctUINT gctTHREADFUNCRESULT;
++ typedef gctPOINTER gctTHREADFUNCPARAMETER;
++# define gctTHREADFUNCTYPE __stdcall
++#elif defined(__QNXNTO__)
++ typedef void * gctTHREADFUNCRESULT;
++ typedef gctPOINTER gctTHREADFUNCPARAMETER;
++# define gctTHREADFUNCTYPE
++#endif
++
++typedef gctTHREADFUNCRESULT (gctTHREADFUNCTYPE * gctTHREADFUNC) (
++ gctTHREADFUNCPARAMETER ThreadParameter
++ );
++
++
++#if defined(gcvDEBUG)
++# undef gcvDEBUG
++#endif
++
++#define gcdFORCE_DEBUG 0
++#define gcdFORCE_MESSAGES 0
++
++
++#if DBG || defined(DEBUG) || defined(_DEBUG) || gcdFORCE_DEBUG
++# define gcvDEBUG 1
++#else
++# define gcvDEBUG 0
++#endif
++
++#define _gcmERROR_RETURN(prefix, func) \
++ status = func; \
++ if (gcmIS_ERROR(status)) \
++ { \
++ prefix##PRINT_VERSION(); \
++ prefix##TRACE(gcvLEVEL_ERROR, \
++ #prefix "ERR_RETURN: status=%d(%s) @ %s(%d)", \
++ status, gcoOS_DebugStatus2Name(status), __FUNCTION__, __LINE__); \
++ return status; \
++ } \
++ do { } while (gcvFALSE)
++
++#define gcmERROR_RETURN(func) _gcmERROR_RETURN(gcm, func)
++
++#define gcmLOG_LOCATION()
++
++#define gcmkIS_ERROR(status) (status < 0)
++
++#define gcmALIGNDOWN(n, align) \
++( \
++ (n) & ~((align) - 1) \
++)
++
++#define gcmIS_VALID_INDEX(Index, Array) \
++ (((gctUINT) (Index)) < gcmCOUNTOF(Array))
++
++
++#define gcmIS_NAN(x) \
++( \
++ ((* (gctUINT32_PTR) &(x)) & 0x7FFFFFFF) == 0x7FFFFFFF \
++)
++
++#define gcmLERP(v1, v2, w) \
++ ((v1) * (w) + (v2) * (1.0f - (w)))
++
++#define gcmINTERSECT(Start1, Start2, Length) \
++ (gcmABS((Start1) - (Start2)) < (Length))
++
++/*******************************************************************************
++**
++** gcmERR_GOTO
++**
++** Prints a message and terminates the current loop on error.
++**
++** ASSUMPTIONS:
++**
++** 'status' variable of gceSTATUS type must be defined.
++**
++** ARGUMENTS:
++**
++** Function
++** Function to evaluate.
++*/
++
++#define gcmERR_GOTO(Function) \
++ status = Function; \
++ if (gcmIS_ERROR(status)) \
++ { \
++ gcmTRACE( \
++ gcvLEVEL_ERROR, \
++ "gcmERR_GOTO: status=%d @ line=%d in function %s.\n", \
++ status, __LINE__, __FUNCTION__ \
++ ); \
++ goto ErrorHandler; \
++ }
++
++#if gcvDEBUG || gcdFORCE_MESSAGES
++# define gcmVERIFY_BOOLEAN(Expression) \
++ gcmASSERT( \
++ ( (Expression) == gcvFALSE ) || \
++ ( (Expression) == gcvTRUE ) \
++ )
++#else
++# define gcmVERIFY_BOOLEAN(Expression)
++#endif
++
++/*******************************************************************************
++**
++** gcmVERIFYFIELDFIT
++**
++** Verify whether the value fits in the field.
++**
++** ARGUMENTS:
++**
++** data Data value.
++** reg Name of register.
++** field Name of field within register.
++** value Value for field.
++*/
++#define gcmVERIFYFIELDFIT(reg, field, value) \
++ gcmASSERT( \
++ (value) <= gcmFIELDMAX(reg, field) \
++ )
++/*******************************************************************************
++**
++** gcmFIELDMAX
++**
++** Get field maximum value.
++**
++** ARGUMENTS:
++**
++** reg Name of register.
++** field Name of field within register.
++*/
++#define gcmFIELDMAX(reg, field) \
++( \
++ (gctUINT32) \
++ ( \
++ (__gcmGETSIZE(reg##_##field) == 32) \
++ ? ~0 \
++ : (~(~0 << __gcmGETSIZE(reg##_##field))) \
++ ) \
++)
++
++
++/* ANSI C does not have the 'f' functions, define replacements here. */
++#define gcmSINF(x) ((gctFLOAT) sin(x))
++#define gcmCOSF(x) ((gctFLOAT) cos(x))
++#define gcmASINF(x) ((gctFLOAT) asin(x))
++#define gcmACOSF(x) ((gctFLOAT) acos(x))
++#define gcmSQRTF(x) ((gctFLOAT) sqrt(x))
++#define gcmFABSF(x) ((gctFLOAT) fabs(x))
++#define gcmFMODF(x, y) ((gctFLOAT) fmod((x), (y)))
++#define gcmCEILF(x) ((gctFLOAT) ceil(x))
++#define gcmFLOORF(x) ((gctFLOAT) floor(x))
++
++
++
++/* Fixed point constants. */
++#define gcvZERO_X ((gctFIXED_POINT) 0x00000000)
++#define gcvHALF_X ((gctFIXED_POINT) 0x00008000)
++#define gcvONE_X ((gctFIXED_POINT) 0x00010000)
++#define gcvNEGONE_X ((gctFIXED_POINT) 0xFFFF0000)
++#define gcvTWO_X ((gctFIXED_POINT) 0x00020000)
++
++/* Integer constants. */
++#define gcvMAX_POS_INT ((gctINT) 0x7FFFFFFF)
++#define gcvMAX_NEG_INT ((gctINT) 0x80000000)
++
++/* Float constants. */
++#define gcvMAX_POS_FLOAT ((gctFLOAT) 3.4028235e+038)
++#define gcvMAX_NEG_FLOAT ((gctFLOAT) -3.4028235e+038)
++
++/******************************************************************************\
++***************************** Miscellaneous Macro ******************************
++\******************************************************************************/
++
++#define gcmKB2BYTES(Kilobyte) \
++( \
++ (Kilobyte) << 10 \
++)
++
++#define gcmMB2BYTES(Megabyte) \
++( \
++ (Megabyte) << 20 \
++)
++
++#define gcmMAT(Matrix, Row, Column) \
++( \
++ (Matrix) [(Row) * 3 + (Column)] \
++)
++
++#define gcmMAKE2CHAR(Char1, Char2) \
++( \
++ ((gctUINT16) (gctUINT8) (Char1) << 0) | \
++ ((gctUINT16) (gctUINT8) (Char2) << 8) \
++)
++
++#define gcmMAKE4CHAR(Char1, Char2, Char3, Char4) \
++( \
++ ((gctUINT32)(gctUINT8) (Char1) << 0) | \
++ ((gctUINT32)(gctUINT8) (Char2) << 8) | \
++ ((gctUINT32)(gctUINT8) (Char3) << 16) | \
++ ((gctUINT32)(gctUINT8) (Char4) << 24) \
++)
++
++/* some platforms need to fix the physical address for HW to access*/
++#define gcmFIXADDRESS(address) \
++(\
++ (address)\
++)
++
++#define gcmkFIXADDRESS(address) \
++(\
++ (address)\
++)
++
++/******************************************************************************\
++****************************** Kernel Debug Macro ******************************
++\******************************************************************************/
++
++/* Set signal to signaled state for specified process. */
++gceSTATUS
++gckOS_SetSignal(
++ IN gckOS Os,
++ IN gctHANDLE Process,
++ IN gctSIGNAL Signal
++ );
++
++/* Return the kernel logical pointer for the given physical one. */
++gceSTATUS
++gckOS_GetKernelLogical(
++ IN gckOS Os,
++ IN gctUINT32 Address,
++ OUT gctPOINTER * KernelPointer
++ );
++
++/* Return the kernel logical pointer for the given physical one. */
++gceSTATUS
++gckOS_GetKernelLogicalEx(
++ IN gckOS Os,
++ IN gceCORE Core,
++ IN gctUINT32 Address,
++ OUT gctPOINTER * KernelPointer
++ );
++
++/*----------------------------------------------------------------------------*/
++/*----------------------------- Semaphore Object -----------------------------*/
++
++/* Increment the value of a semaphore. */
++gceSTATUS
++gckOS_IncrementSemaphore(
++ IN gckOS Os,
++ IN gctSEMAPHORE Semaphore
++ );
++
++/* Decrement the value of a semaphore (waiting might occur). */
++gceSTATUS
++gckOS_DecrementSemaphore(
++ IN gckOS Os,
++ IN gctSEMAPHORE Semaphore
++ );
++
++
++/*----------------------------------------------------------------------------*/
++/*------------------------------- Thread Object ------------------------------*/
++
++/* Start a thread. */
++gceSTATUS
++gckOS_StartThread(
++ IN gckOS Os,
++ IN gctTHREADFUNC ThreadFunction,
++ IN gctPOINTER ThreadParameter,
++ OUT gctTHREAD * Thread
++ );
++
++/* Stop a thread. */
++gceSTATUS
++gckOS_StopThread(
++ IN gckOS Os,
++ IN gctTHREAD Thread
++ );
++
++/* Verify whether the thread is still running. */
++gceSTATUS
++gckOS_VerifyThread(
++ IN gckOS Os,
++ IN gctTHREAD Thread
++ );
++
++
++/* Construct a new gckVGKERNEL object. */
++gceSTATUS
++gckVGKERNEL_Construct(
++ IN gckOS Os,
++ IN gctPOINTER Context,
++ IN gckKERNEL inKernel,
++ OUT gckVGKERNEL * Kernel
++ );
++
++/* Destroy an gckVGKERNEL object. */
++gceSTATUS
++gckVGKERNEL_Destroy(
++ IN gckVGKERNEL Kernel
++ );
++
++/* Allocate linear video memory. */
++gceSTATUS
++gckKERNEL_AllocateLinearMemory(
++ IN gckKERNEL Kernel,
++ IN OUT gcePOOL * Pool,
++ IN gctSIZE_T Bytes,
++ IN gctSIZE_T Alignment,
++ IN gceSURF_TYPE Type,
++ OUT gcuVIDMEM_NODE_PTR * Node
++ );
++
++/* Unmap memory. */
++gceSTATUS
++gckKERNEL_UnmapMemory(
++ IN gckKERNEL Kernel,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Bytes,
++ IN gctPOINTER Logical
++ );
++
++/* Dispatch a user-level command. */
++gceSTATUS
++gckVGKERNEL_Dispatch(
++ IN gckKERNEL Kernel,
++ IN gctBOOL FromUser,
++ IN OUT struct _gcsHAL_INTERFACE * Interface
++ );
++
++/* Query command buffer requirements. */
++gceSTATUS
++gckKERNEL_QueryCommandBuffer(
++ IN gckKERNEL Kernel,
++ OUT gcsCOMMAND_BUFFER_INFO_PTR Information
++ );
++
++#if gcdDYNAMIC_MAP_RESERVED_MEMORY
++gceSTATUS
++gckOS_MapReservedMemoryToKernel(
++ IN gckOS Os,
++ IN gctUINT32 Physical,
++ IN gctINT Bytes,
++ IN OUT gctPOINTER *Virtual
++ );
++
++gceSTATUS
++gckOS_UnmapReservedMemoryFromKernel(
++ IN gctPOINTER Virtual
++ );
++#endif
++
++/******************************************************************************\
++******************************* gckVGHARDWARE Object ******************************
++\******************************************************************************/
++
++/* Construct a new gckVGHARDWARE object. */
++gceSTATUS
++gckVGHARDWARE_Construct(
++ IN gckOS Os,
++ OUT gckVGHARDWARE * Hardware
++ );
++
++/* Destroy an gckVGHARDWARE object. */
++gceSTATUS
++gckVGHARDWARE_Destroy(
++ IN gckVGHARDWARE Hardware
++ );
++
++/* Query system memory requirements. */
++gceSTATUS
++gckVGHARDWARE_QuerySystemMemory(
++ IN gckVGHARDWARE Hardware,
++ OUT gctSIZE_T * SystemSize,
++ OUT gctUINT32 * SystemBaseAddress
++ );
++
++/* Build virtual address. */
++gceSTATUS
++gckVGHARDWARE_BuildVirtualAddress(
++ IN gckVGHARDWARE Hardware,
++ IN gctUINT32 Index,
++ IN gctUINT32 Offset,
++ OUT gctUINT32 * Address
++ );
++
++/* Kickstart the command processor. */
++gceSTATUS
++gckVGHARDWARE_Execute(
++ IN gckVGHARDWARE Hardware,
++ IN gctUINT32 Address,
++ IN gctSIZE_T Count
++ );
++
++/* Query the available memory. */
++gceSTATUS
++gckVGHARDWARE_QueryMemory(
++ IN gckVGHARDWARE Hardware,
++ OUT gctSIZE_T * InternalSize,
++ OUT gctUINT32 * InternalBaseAddress,
++ OUT gctUINT32 * InternalAlignment,
++ OUT gctSIZE_T * ExternalSize,
++ OUT gctUINT32 * ExternalBaseAddress,
++ OUT gctUINT32 * ExternalAlignment,
++ OUT gctUINT32 * HorizontalTileSize,
++ OUT gctUINT32 * VerticalTileSize
++ );
++
++/* Query the identity of the hardware. */
++gceSTATUS
++gckVGHARDWARE_QueryChipIdentity(
++ IN gckVGHARDWARE Hardware,
++ OUT gceCHIPMODEL* ChipModel,
++ OUT gctUINT32* ChipRevision,
++ OUT gctUINT32* ChipFeatures,
++ OUT gctUINT32* ChipMinorFeatures,
++ OUT gctUINT32* ChipMinorFeatures1
++ );
++
++/* Convert an API format. */
++gceSTATUS
++gckVGHARDWARE_ConvertFormat(
++ IN gckVGHARDWARE Hardware,
++ IN gceSURF_FORMAT Format,
++ OUT gctUINT32 * BitsPerPixel,
++ OUT gctUINT32 * BytesPerTile
++ );
++
++/* Split a harwdare specific address into API stuff. */
++gceSTATUS
++gckVGHARDWARE_SplitMemory(
++ IN gckVGHARDWARE Hardware,
++ IN gctUINT32 Address,
++ OUT gcePOOL * Pool,
++ OUT gctUINT32 * Offset
++ );
++
++/* Align size to tile boundary. */
++gceSTATUS
++gckVGHARDWARE_AlignToTile(
++ IN gckVGHARDWARE Hardware,
++ IN gceSURF_TYPE Type,
++ IN OUT gctUINT32_PTR Width,
++ IN OUT gctUINT32_PTR Height
++ );
++
++/* Convert logical address to hardware specific address. */
++gceSTATUS
++gckVGHARDWARE_ConvertLogical(
++ IN gckVGHARDWARE Hardware,
++ IN gctPOINTER Logical,
++ OUT gctUINT32 * Address
++ );
++
++/* Program MMU. */
++gceSTATUS
++gckVGHARDWARE_SetMMU(
++ IN gckVGHARDWARE Hardware,
++ IN gctPOINTER Logical
++ );
++
++/* Flush the MMU. */
++gceSTATUS
++gckVGHARDWARE_FlushMMU(
++ IN gckVGHARDWARE Hardware
++ );
++
++/* Get idle register. */
++gceSTATUS
++gckVGHARDWARE_GetIdle(
++ IN gckVGHARDWARE Hardware,
++ OUT gctUINT32 * Data
++ );
++
++/* Flush the caches. */
++gceSTATUS
++gckVGHARDWARE_Flush(
++ IN gckVGHARDWARE Hardware,
++ IN gceKERNEL_FLUSH Flush,
++ IN gctPOINTER Logical,
++ IN OUT gctSIZE_T * Bytes
++ );
++
++/* Enable/disable fast clear. */
++gceSTATUS
++gckVGHARDWARE_SetFastClear(
++ IN gckVGHARDWARE Hardware,
++ IN gctINT Enable
++ );
++
++gceSTATUS
++gckVGHARDWARE_ReadInterrupt(
++ IN gckVGHARDWARE Hardware,
++ OUT gctUINT32_PTR IDs
++ );
++
++/* Power management. */
++gceSTATUS
++gckVGHARDWARE_SetPowerManagementState(
++ IN gckVGHARDWARE Hardware,
++ IN gceCHIPPOWERSTATE State
++ );
++
++gceSTATUS
++gckVGHARDWARE_QueryPowerManagementState(
++ IN gckVGHARDWARE Hardware,
++ OUT gceCHIPPOWERSTATE* State
++ );
++
++gceSTATUS
++gckVGHARDWARE_SetPowerManagement(
++ IN gckVGHARDWARE Hardware,
++ IN gctBOOL PowerManagement
++ );
++
++gceSTATUS
++gckVGHARDWARE_SetPowerOffTimeout(
++ IN gckVGHARDWARE Hardware,
++ IN gctUINT32 Timeout
++ );
++
++gceSTATUS
++gckVGHARDWARE_QueryPowerOffTimeout(
++ IN gckVGHARDWARE Hardware,
++ OUT gctUINT32* Timeout
++ );
++
++gceSTATUS
++gckVGHARDWARE_QueryIdle(
++ IN gckVGHARDWARE Hardware,
++ OUT gctBOOL_PTR IsIdle
++ );
++/******************************************************************************\
++*************************** Command Buffer Structures **************************
++\******************************************************************************/
++
++/* Vacant command buffer marker. */
++#define gcvVACANT_BUFFER ((gcsCOMPLETION_SIGNAL_PTR) (1))
++
++/* Command buffer header. */
++typedef struct _gcsCMDBUFFER * gcsCMDBUFFER_PTR;
++typedef struct _gcsCMDBUFFER
++{
++ /* Pointer to the completion signal. */
++ gcsCOMPLETION_SIGNAL_PTR completion;
++
++ /* The user sets this to the node of the container buffer whitin which
++ this particular command buffer resides. The kernel sets this to the
++ node of the internally allocated buffer. */
++ gctUINT64 node;
++
++ /* Command buffer hardware address. */
++ gctUINT32 address;
++
++ /* The offset of the buffer from the beginning of the header. */
++ gctUINT32 bufferOffset;
++
++ /* Size of the area allocated for the data portion of this particular
++ command buffer (headers and tail reserves are excluded). */
++ gctSIZE_T size;
++
++ /* Offset into the buffer [0..size]; reflects exactly how much data has
++ been put into the command buffer. */
++ gctUINT offset;
++
++ /* The number of command units in the buffer for the hardware to
++ execute. */
++ gctSIZE_T dataCount;
++
++ /* MANAGED BY : user HAL (gcoBUFFER object).
++ USED BY : user HAL (gcoBUFFER object).
++ Points to the immediate next allocated command buffer. */
++ gcsCMDBUFFER_PTR nextAllocated;
++
++ /* MANAGED BY : user layers (HAL and drivers).
++ USED BY : kernel HAL (gcoBUFFER object).
++ Points to the next subbuffer if any. A family of subbuffers are chained
++ together and are meant to be executed inseparably as a unit. Meaning
++ that context switching cannot occur while a chain of subbuffers is being
++ executed. */
++ gcsCMDBUFFER_PTR nextSubBuffer;
++}
++gcsCMDBUFFER;
++
++/* Command queue element. */
++typedef struct _gcsVGCMDQUEUE
++{
++ /* Pointer to the command buffer header. */
++ gcsCMDBUFFER_PTR commandBuffer;
++
++ /* Dynamic vs. static command buffer state. */
++ gctBOOL dynamic;
++}
++gcsVGCMDQUEUE;
++
++/* Context map entry. */
++typedef struct _gcsVGCONTEXT_MAP
++{
++ /* State index. */
++ gctUINT32 index;
++
++ /* New state value. */
++ gctUINT32 data;
++
++ /* Points to the next entry in the mod list. */
++ gcsVGCONTEXT_MAP_PTR next;
++}
++gcsVGCONTEXT_MAP;
++
++/* gcsVGCONTEXT structure that holds the current context. */
++typedef struct _gcsVGCONTEXT
++{
++ /* Context ID. */
++ gctUINT64 id;
++
++ /* State caching ebable flag. */
++ gctBOOL stateCachingEnabled;
++
++ /* Current pipe. */
++ gctUINT32 currentPipe;
++
++ /* State map/mod buffer. */
++ gctSIZE_T mapFirst;
++ gctSIZE_T mapLast;
++#ifdef __QNXNTO__
++ gctSIZE_T mapContainerSize;
++#endif
++ gcsVGCONTEXT_MAP_PTR mapContainer;
++ gcsVGCONTEXT_MAP_PTR mapPrev;
++ gcsVGCONTEXT_MAP_PTR mapCurr;
++ gcsVGCONTEXT_MAP_PTR firstPrevMap;
++ gcsVGCONTEXT_MAP_PTR firstCurrMap;
++
++ /* Main context buffer. */
++ gcsCMDBUFFER_PTR header;
++ gctUINT32_PTR buffer;
++
++ /* Completion signal. */
++ gctHANDLE process;
++ gctSIGNAL signal;
++
++#if defined(__QNXNTO__)
++ gctINT32 coid;
++ gctINT32 rcvid;
++#endif
++}
++gcsVGCONTEXT;
++
++/* User space task header. */
++typedef struct _gcsTASK * gcsTASK_PTR;
++typedef struct _gcsTASK
++{
++ /* Pointer to the next task for the same interrupt in user space. */
++ gcsTASK_PTR next;
++
++ /* Size of the task data that immediately follows the structure. */
++ gctUINT size;
++
++ /* Task data starts here. */
++ /* ... */
++}
++gcsTASK;
++
++/* User space task master table entry. */
++typedef struct _gcsTASK_MASTER_ENTRY * gcsTASK_MASTER_ENTRY_PTR;
++typedef struct _gcsTASK_MASTER_ENTRY
++{
++ /* Pointers to the head and to the tail of the task chain. */
++ gcsTASK_PTR head;
++ gcsTASK_PTR tail;
++}
++gcsTASK_MASTER_ENTRY;
++
++/* User space task master table entry. */
++typedef struct _gcsTASK_MASTER_TABLE
++{
++ /* Table with one entry per block. */
++ gcsTASK_MASTER_ENTRY table[gcvBLOCK_COUNT];
++
++ /* The total number of tasks sckeduled. */
++ gctUINT count;
++
++ /* The total size of event data in bytes. */
++ gctUINT size;
++
++#if defined(__QNXNTO__)
++ gctINT32 coid;
++ gctINT32 rcvid;
++#endif
++}
++gcsTASK_MASTER_TABLE;
++
++/******************************************************************************\
++***************************** gckVGINTERRUPT Object ******************************
++\******************************************************************************/
++
++typedef struct _gckVGINTERRUPT * gckVGINTERRUPT;
++
++typedef gceSTATUS (* gctINTERRUPT_HANDLER)(
++ IN gckVGKERNEL Kernel
++ );
++
++gceSTATUS
++gckVGINTERRUPT_Construct(
++ IN gckVGKERNEL Kernel,
++ OUT gckVGINTERRUPT * Interrupt
++ );
++
++gceSTATUS
++gckVGINTERRUPT_Destroy(
++ IN gckVGINTERRUPT Interrupt
++ );
++
++gceSTATUS
++gckVGINTERRUPT_Enable(
++ IN gckVGINTERRUPT Interrupt,
++ IN OUT gctINT32_PTR Id,
++ IN gctINTERRUPT_HANDLER Handler
++ );
++
++gceSTATUS
++gckVGINTERRUPT_Disable(
++ IN gckVGINTERRUPT Interrupt,
++ IN gctINT32 Id
++ );
++
++#ifndef __QNXNTO__
++
++gceSTATUS
++gckVGINTERRUPT_Enque(
++ IN gckVGINTERRUPT Interrupt
++ );
++
++#else
++
++gceSTATUS
++gckVGINTERRUPT_Enque(
++ IN gckVGINTERRUPT Interrupt,
++ OUT gckOS *Os,
++ OUT gctSEMAPHORE *Semaphore
++ );
++
++#endif
++
++gceSTATUS
++gckVGINTERRUPT_DumpState(
++ IN gckVGINTERRUPT Interrupt
++ );
++
++
++/******************************************************************************\
++******************************* gckVGCOMMAND Object *******************************
++\******************************************************************************/
++
++typedef struct _gckVGCOMMAND * gckVGCOMMAND;
++
++/* Construct a new gckVGCOMMAND object. */
++gceSTATUS
++gckVGCOMMAND_Construct(
++ IN gckVGKERNEL Kernel,
++ IN gctUINT TaskGranularity,
++ IN gctUINT QueueSize,
++ OUT gckVGCOMMAND * Command
++ );
++
++/* Destroy an gckVGCOMMAND object. */
++gceSTATUS
++gckVGCOMMAND_Destroy(
++ IN gckVGCOMMAND Command
++ );
++
++/* Query command buffer attributes. */
++gceSTATUS
++gckVGCOMMAND_QueryCommandBuffer(
++ IN gckVGCOMMAND Command,
++ OUT gcsCOMMAND_BUFFER_INFO_PTR Information
++ );
++
++/* Allocate a command queue. */
++gceSTATUS
++gckVGCOMMAND_Allocate(
++ IN gckVGCOMMAND Command,
++ IN gctSIZE_T Size,
++ OUT gcsCMDBUFFER_PTR * CommandBuffer,
++ OUT gctPOINTER * Data
++ );
++
++/* Release memory held by the command queue. */
++gceSTATUS
++gckVGCOMMAND_Free(
++ IN gckVGCOMMAND Command,
++ IN gcsCMDBUFFER_PTR CommandBuffer
++ );
++
++/* Schedule the command queue for execution. */
++gceSTATUS
++gckVGCOMMAND_Execute(
++ IN gckVGCOMMAND Command,
++ IN gcsCMDBUFFER_PTR CommandBuffer
++ );
++
++/* Commit a buffer to the command queue. */
++gceSTATUS
++gckVGCOMMAND_Commit(
++ IN gckVGCOMMAND Command,
++ IN gcsVGCONTEXT_PTR Context,
++ IN gcsVGCMDQUEUE_PTR Queue,
++ IN gctUINT EntryCount,
++ IN gcsTASK_MASTER_TABLE_PTR TaskTable
++ );
++
++/******************************************************************************\
++********************************* gckVGMMU Object ********************************
++\******************************************************************************/
++
++typedef struct _gckVGMMU * gckVGMMU;
++
++/* Construct a new gckVGMMU object. */
++gceSTATUS
++gckVGMMU_Construct(
++ IN gckVGKERNEL Kernel,
++ IN gctSIZE_T MmuSize,
++ OUT gckVGMMU * Mmu
++ );
++
++/* Destroy an gckVGMMU object. */
++gceSTATUS
++gckVGMMU_Destroy(
++ IN gckVGMMU Mmu
++ );
++
++/* Allocate pages inside the MMU. */
++gceSTATUS
++gckVGMMU_AllocatePages(
++ IN gckVGMMU Mmu,
++ IN gctSIZE_T PageCount,
++ OUT gctPOINTER * PageTable,
++ OUT gctUINT32 * Address
++ );
++
++/* Remove a page table from the MMU. */
++gceSTATUS
++gckVGMMU_FreePages(
++ IN gckVGMMU Mmu,
++ IN gctPOINTER PageTable,
++ IN gctSIZE_T PageCount
++ );
++
++/* Set the MMU page with info. */
++gceSTATUS
++gckVGMMU_SetPage(
++ IN gckVGMMU Mmu,
++ IN gctUINT32 PageAddress,
++ IN gctUINT32 *PageEntry
++ );
++
++/* Flush MMU */
++gceSTATUS
++gckVGMMU_Flush(
++ IN gckVGMMU Mmu
++ );
++
++#endif /* gcdENABLE_VG */
++
++#ifdef __cplusplus
++} /* extern "C" */
++#endif
++
++#endif /* __gc_hal_h_ */
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v4/hal/os/linux/kernel/gc_hal_kernel_debugfs.c linux-openelec/drivers/mxc/gpu-viv/v4/hal/os/linux/kernel/gc_hal_kernel_debugfs.c
+--- linux-3.14.36/drivers/mxc/gpu-viv/v4/hal/os/linux/kernel/gc_hal_kernel_debugfs.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v4/hal/os/linux/kernel/gc_hal_kernel_debugfs.c 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,795 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifdef MODULE
++#include <linux/module.h>
++#endif
++#include <linux/init.h>
++#include <linux/debugfs.h>
++#include <linux/slab.h>
++#ifdef MODVERSIONS
++#include <linux/modversions.h>
++#endif
++#include <linux/stddef.h>
++#include <linux/sched.h>
++#include <linux/kernel.h>
++#include <linux/timer.h>
++#include <linux/delay.h>
++#include <linux/errno.h>
++#include <linux/mutex.h>
++#include <linux/vmalloc.h>
++#include <linux/types.h>
++#include <linux/fs.h>
++#include <linux/poll.h>
++#include <asm/uaccess.h>
++#include <linux/completion.h>
++#include "gc_hal_kernel_linux.h"
++
++/*
++ Prequsite:
++
++ 1) Debugfs feature must be enabled in the kernel.
++ 1.a) You can enable this, in the compilation of the uImage, all you have to do is, In the "make menuconfig" part,
++ you have to enable the debugfs in the kernel hacking part of the menu.
++
++ HOW TO USE:
++ 1) insert the driver with the following option logFileSize, Ex: insmod galcore.ko ...... logFileSize=10240
++ This gives a circular buffer of 10 MB
++
++ 2)Usually after inserting the driver, the debug file system is mounted under /sys/kernel/debug/
++
++ 2.a)If the debugfs is not mounted, you must do "mount -t debugfs none /sys/kernel/debug"
++
++ 3) To read what is being printed in the debugfs file system:
++ Ex : cat /sys/kernel/debug/gpu/galcore_trace
++
++ 4)To write into the debug file system from user side :
++ Ex: echo "hello" > cat /sys/kernel/debug/gpu/galcore_trace
++
++ 5)To write into debugfs from kernel side, Use the function called gckDebugFileSystemPrint
++
++
++ USECASE Kernel Dump:
++
++ 1) Go to /hal/inc/gc_hal_options.h, and enable the following flags:
++ - # define gcdDUMP 1
++ - # define gcdDUMP_IN_KERNEL 1
++ - # define gcdDUMP_COMMAND 1
++
++ 2) Go to /hal/kernel/gc_hal_kernel_command.c and disable the following flag
++ -#define gcdSIMPLE_COMMAND_DUMP 0
++
++ 3) Compile the driver
++ 4) insmod it with the logFileSize option
++ 5) Run an application
++ 6) You can get the dump by cat /sys/kernel/debug/gpu/galcore_trace
++
++ */
++
++/**/
++typedef va_list gctDBGARGS ;
++#define gcmkARGS_START(argument, pointer) va_start(argument, pointer)
++#define gcmkARGS_END(argument) va_end(argument)
++
++#define gcmkDBGFSPRINT(ArgumentSize, Message) \
++ { \
++ gctDBGARGS __arguments__; \
++ gcmkARGS_START(__arguments__, Message); \
++ _DebugFSPrint(ArgumentSize, Message, __arguments__);\
++ gcmkARGS_END(__arguments__); \
++ }
++
++/*Debug File System Node Struct*/
++struct _gcsDebugFileSystemNode
++{
++ /*wait queues for read and write operations*/
++#if defined(DECLARE_WAIT_QUEUE_HEAD)
++ wait_queue_head_t read_q , write_q ;
++#else
++ struct wait_queue *read_q , *write_q ;
++#endif
++ struct dentry *parent ; /*parent directory*/
++ struct dentry *filen ; /*filename*/
++ struct semaphore sem ; /* mutual exclusion semaphore */
++ char *data ; /* The circular buffer data */
++ int size ; /* Size of the buffer pointed to by 'data' */
++ int refcount ; /* Files that have this buffer open */
++ int read_point ; /* Offset in circ. buffer of oldest data */
++ int write_point ; /* Offset in circ. buffer of newest data */
++ int offset ; /* Byte number of read_point in the stream */
++ struct _gcsDebugFileSystemNode *next ;
++} ;
++
++/* amount of data in the queue */
++#define gcmkNODE_QLEN(node) ( (node)->write_point >= (node)->read_point ? \
++ (node)->write_point - (node)->read_point : \
++ (node)->size - (node)->read_point + (node)->write_point)
++
++/* byte number of the last byte in the queue */
++#define gcmkNODE_FIRST_EMPTY_BYTE(node) ((node)->offset + gcmkNODE_QLEN(node))
++
++/*Synchronization primitives*/
++#define gcmkNODE_READQ(node) (&((node)->read_q))
++#define gcmkNODE_WRITEQ(node) (&((node)->write_q))
++#define gcmkNODE_SEM(node) (&((node)->sem))
++
++/*Utilities*/
++#define gcmkMIN(x, y) ((x) < (y) ? (x) : y)
++
++/*Debug File System Struct*/
++typedef struct _gcsDebugFileSystem
++{
++ gcsDebugFileSystemNode* linkedlist ;
++ gcsDebugFileSystemNode* currentNode ;
++ int isInited ;
++} gcsDebugFileSystem ;
++
++
++/*debug file system*/
++static gcsDebugFileSystem gc_dbgfs ;
++
++
++
++/*******************************************************************************
++ **
++ ** READ & WRITE FUNCTIONS (START)
++ **
++ *******************************************************************************/
++
++/*******************************************************************************
++ **
++ ** _ReadFromNode
++ **
++ ** 1) reading bytes out of a circular buffer with wraparound.
++ ** 2)returns caddr_t, pointer to data read, which the caller must free.
++ ** 3) length is (a pointer to) the number of bytes to be read, which will be set by this function to
++ ** be the number of bytes actually returned
++ **
++ *******************************************************************************/
++static caddr_t
++_ReadFromNode (
++ gcsDebugFileSystemNode* Node ,
++ size_t *Length ,
++ loff_t *Offset
++ )
++{
++ caddr_t retval ;
++ int bytes_copied = 0 , n , start_point , remaining ;
++
++ /* is the user trying to read data that has already scrolled off? */
++ if ( *Offset < Node->offset )
++ {
++ *Offset = Node->offset ;
++ }
++
++ /* is the user trying to read past EOF? */
++ if ( *Offset >= gcmkNODE_FIRST_EMPTY_BYTE ( Node ) )
++ {
++ return NULL ;
++ }
++
++ /* find the smaller of the total bytes we have available and what
++ * the user is asking for */
++
++ *Length = gcmkMIN ( *Length , gcmkNODE_FIRST_EMPTY_BYTE ( Node ) - *Offset ) ;
++
++ remaining = * Length ;
++
++ /* figure out where to start based on user's Offset */
++ start_point = Node->read_point + ( *Offset - Node->offset ) ;
++
++ start_point = start_point % Node->size ;
++
++ /* allocate memory to return */
++ if ( ( retval = kmalloc ( sizeof (char ) * remaining , GFP_KERNEL ) ) == NULL )
++ return NULL ;
++
++ /* copy the (possibly noncontiguous) data to our buffer */
++ while ( remaining )
++ {
++ n = gcmkMIN ( remaining , Node->size - start_point ) ;
++ memcpy ( retval + bytes_copied , Node->data + start_point , n ) ;
++ bytes_copied += n ;
++ remaining -= n ;
++ start_point = ( start_point + n ) % Node->size ;
++ }
++
++ /* advance user's file pointer */
++ *Offset += * Length ;
++
++ return retval ;
++}
++
++/*******************************************************************************
++ **
++ ** _WriteToNode
++ **
++ ** 1) writes to a circular buffer with wraparound.
++ ** 2)in case of an overflow, it overwrites the oldest unread data.
++ **
++ *********************************************************************************/
++static void
++_WriteToNode (
++ gcsDebugFileSystemNode* Node ,
++ caddr_t Buf ,
++ int Length
++ )
++{
++ int bytes_copied = 0 ;
++ int overflow = 0 ;
++ int n ;
++
++ if ( Length + gcmkNODE_QLEN ( Node ) >= ( Node->size - 1 ) )
++ {
++ overflow = 1 ;
++
++ /* in case of overflow, figure out where the new buffer will
++ * begin. we start by figuring out where the current buffer ENDS:
++ * node->parent->offset + gcmkNODE_QLEN. we then advance the end-offset
++ * by the Length of the current write, and work backwards to
++ * figure out what the oldest unoverwritten data will be (i.e.,
++ * size of the buffer). */
++ Node->offset = Node->offset + gcmkNODE_QLEN ( Node ) + Length
++ - Node->size + 1 ;
++ }
++
++ while ( Length )
++ {
++ /* how many contiguous bytes are available from the write point to
++ * the end of the circular buffer? */
++ n = gcmkMIN ( Length , Node->size - Node->write_point ) ;
++ memcpy ( Node->data + Node->write_point , Buf + bytes_copied , n ) ;
++ bytes_copied += n ;
++ Length -= n ;
++ Node->write_point = ( Node->write_point + n ) % Node->size ;
++ }
++
++ /* if there is an overflow, reset the read point to read whatever is
++ * the oldest data that we have, that has not yet been
++ * overwritten. */
++ if ( overflow )
++ {
++ Node->read_point = ( Node->write_point + 1 ) % Node->size ;
++ }
++}
++
++
++/*******************************************************************************
++ **
++ ** PRINTING UTILITY (START)
++ **
++ *******************************************************************************/
++
++/*******************************************************************************
++ **
++ ** _GetArgumentSize
++ **
++ **
++ *******************************************************************************/
++static gctINT
++_GetArgumentSize (
++ IN gctCONST_STRING Message
++ )
++{
++ gctINT i , count ;
++
++ for ( i = 0 , count = 0 ; Message[i] ; i += 1 )
++ {
++ if ( Message[i] == '%' )
++ {
++ count += 1 ;
++ }
++ }
++ return count * sizeof (unsigned int ) ;
++}
++
++/*******************************************************************************
++ **
++ ** _AppendString
++ **
++ **
++ *******************************************************************************/
++static ssize_t
++_AppendString (
++ IN gcsDebugFileSystemNode* Node ,
++ IN gctCONST_STRING String ,
++ IN int Length
++ )
++{
++ caddr_t message = NULL ;
++ int n ;
++
++ /* if the message is longer than the buffer, just take the beginning
++ * of it, in hopes that the reader (if any) will have time to read
++ * before we wrap around and obliterate it */
++ n = gcmkMIN ( Length , Node->size - 1 ) ;
++
++ /* make sure we have the memory for it */
++ if ( ( message = kmalloc ( n , GFP_KERNEL ) ) == NULL )
++ return - ENOMEM ;
++
++ /* copy into our temp buffer */
++ memcpy ( message , String , n ) ;
++
++ /* now copy it into the circular buffer and free our temp copy */
++ _WriteToNode ( Node , message , n ) ;
++ kfree ( message ) ;
++ return n ;
++}
++
++/*******************************************************************************
++ **
++ ** _DebugFSPrint
++ **
++ **
++ *******************************************************************************/
++static void
++_DebugFSPrint (
++ IN unsigned int ArgumentSize ,
++ IN const char* Message ,
++ IN gctDBGARGS Arguments
++
++ )
++{
++ char buffer[MAX_LINE_SIZE] ;
++ int len ;
++ down ( gcmkNODE_SEM ( gc_dbgfs.currentNode ) ) ;
++ len = vsnprintf ( buffer , sizeof (buffer ) , Message , *( va_list * ) & Arguments ) ;
++ buffer[len] = '\0' ;
++
++ /* Add end-of-line if missing. */
++ if ( buffer[len - 1] != '\n' )
++ {
++ buffer[len ++] = '\n' ;
++ buffer[len] = '\0' ;
++ }
++ _AppendString ( gc_dbgfs.currentNode , buffer , len ) ;
++ up ( gcmkNODE_SEM ( gc_dbgfs.currentNode ) ) ;
++ wake_up_interruptible ( gcmkNODE_READQ ( gc_dbgfs.currentNode ) ) ; /* blocked in read*/
++}
++
++/*******************************************************************************
++ **
++ ** LINUX SYSTEM FUNCTIONS (START)
++ **
++ *******************************************************************************/
++
++/*******************************************************************************
++ **
++ ** find the vivlog structure associated with an inode.
++ ** returns a pointer to the structure if found, NULL if not found
++ **
++ *******************************************************************************/
++static gcsDebugFileSystemNode*
++_GetNodeInfo (
++ IN struct inode *Inode
++ )
++{
++ gcsDebugFileSystemNode* node ;
++
++ if ( Inode == NULL )
++ return NULL ;
++
++ for ( node = gc_dbgfs.linkedlist ; node != NULL ; node = node->next )
++ if ( node->filen->d_inode->i_ino == Inode->i_ino )
++ return node ;
++
++ return NULL ;
++}
++
++/*******************************************************************************
++ **
++ ** _DebugFSRead
++ **
++ *******************************************************************************/
++static ssize_t
++_DebugFSRead (
++ struct file *file ,
++ char __user * buffer ,
++ size_t length ,
++ loff_t * offset
++ )
++{
++ int retval ;
++ caddr_t data_to_return ;
++ gcsDebugFileSystemNode* node ;
++ /* get the metadata about this emlog */
++ if ( ( node = _GetNodeInfo ( file->f_dentry->d_inode ) ) == NULL )
++ {
++ printk ( "debugfs_read: record not found\n" ) ;
++ return - EIO ;
++ }
++
++ if ( down_interruptible ( gcmkNODE_SEM ( node ) ) )
++ {
++ return - ERESTARTSYS ;
++ }
++
++ /* wait until there's data available (unless we do nonblocking reads) */
++ while ( *offset >= gcmkNODE_FIRST_EMPTY_BYTE ( node ) )
++ {
++ up ( gcmkNODE_SEM ( node ) ) ;
++ if ( file->f_flags & O_NONBLOCK )
++ {
++ return - EAGAIN ;
++ }
++ if ( wait_event_interruptible ( ( *( gcmkNODE_READQ ( node ) ) ) , ( *offset < gcmkNODE_FIRST_EMPTY_BYTE ( node ) ) ) )
++ {
++ return - ERESTARTSYS ; /* signal: tell the fs layer to handle it */
++ }
++ /* otherwise loop, but first reacquire the lock */
++ if ( down_interruptible ( gcmkNODE_SEM ( node ) ) )
++ {
++ return - ERESTARTSYS ;
++ }
++ }
++ data_to_return = _ReadFromNode ( node , &length , offset ) ;
++ if ( data_to_return == NULL )
++ {
++ retval = 0 ;
++ goto unlock ;
++ }
++ if ( copy_to_user ( buffer , data_to_return , length ) > 0 )
++ {
++ retval = - EFAULT ;
++ }
++ else
++ {
++ retval = length ;
++ }
++ kfree ( data_to_return ) ;
++unlock:
++ up ( gcmkNODE_SEM ( node ) ) ;
++ wake_up_interruptible ( gcmkNODE_WRITEQ ( node ) ) ;
++ return retval ;
++}
++
++/*******************************************************************************
++ **
++ **_DebugFSWrite
++ **
++ *******************************************************************************/
++static ssize_t
++_DebugFSWrite (
++ struct file *file ,
++ const char __user * buffer ,
++ size_t length ,
++ loff_t * offset
++ )
++{
++ caddr_t message = NULL ;
++ int n ;
++ gcsDebugFileSystemNode*node ;
++
++ /* get the metadata about this log */
++ if ( ( node = _GetNodeInfo ( file->f_dentry->d_inode ) ) == NULL )
++ {
++ return - EIO ;
++ }
++
++ if ( down_interruptible ( gcmkNODE_SEM ( node ) ) )
++ {
++ return - ERESTARTSYS ;
++ }
++
++ /* if the message is longer than the buffer, just take the beginning
++ * of it, in hopes that the reader (if any) will have time to read
++ * before we wrap around and obliterate it */
++ n = gcmkMIN ( length , node->size - 1 ) ;
++
++ /* make sure we have the memory for it */
++ if ( ( message = kmalloc ( n , GFP_KERNEL ) ) == NULL )
++ {
++ up ( gcmkNODE_SEM ( node ) ) ;
++ return - ENOMEM ;
++ }
++
++ /* copy into our temp buffer */
++ if ( copy_from_user ( message , buffer , n ) > 0 )
++ {
++ up ( gcmkNODE_SEM ( node ) ) ;
++ kfree ( message ) ;
++ return - EFAULT ;
++ }
++
++ /* now copy it into the circular buffer and free our temp copy */
++ _WriteToNode ( node , message , n ) ;
++
++ kfree ( message ) ;
++ up ( gcmkNODE_SEM ( node ) ) ;
++
++ /* wake up any readers that might be waiting for the data. we call
++ * schedule in the vague hope that a reader will run before the
++ * writer's next write, to avoid losing data. */
++ wake_up_interruptible ( gcmkNODE_READQ ( node ) ) ;
++
++ return n ;
++}
++
++/*******************************************************************************
++ **
++ ** File Operations Table
++ **
++ *******************************************************************************/
++static const struct file_operations debugfs_operations = {
++ .owner = THIS_MODULE ,
++ .read = _DebugFSRead ,
++ .write = _DebugFSWrite ,
++} ;
++
++/*******************************************************************************
++ **
++ ** INTERFACE FUNCTIONS (START)
++ **
++ *******************************************************************************/
++
++/*******************************************************************************
++ **
++ ** gckDebugFileSystemIsEnabled
++ **
++ **
++ ** INPUT:
++ **
++ ** OUTPUT:
++ **
++ *******************************************************************************/
++
++
++gctINT
++gckDebugFileSystemIsEnabled ( void )
++{
++ return gc_dbgfs.isInited ;
++}
++/*******************************************************************************
++ **
++ ** gckDebugFileSystemInitialize
++ **
++ **
++ ** INPUT:
++ **
++ ** OUTPUT:
++ **
++ *******************************************************************************/
++
++gctINT
++gckDebugFileSystemInitialize ( void )
++{
++ if ( ! gc_dbgfs.isInited )
++ {
++ gc_dbgfs.linkedlist = gcvNULL ;
++ gc_dbgfs.currentNode = gcvNULL ;
++ gc_dbgfs.isInited = 1 ;
++ }
++ return gc_dbgfs.isInited ;
++}
++/*******************************************************************************
++ **
++ ** gckDebugFileSystemTerminate
++ **
++ **
++ ** INPUT:
++ **
++ ** OUTPUT:
++ **
++ *******************************************************************************/
++
++gctINT
++gckDebugFileSystemTerminate ( void )
++{
++ gcsDebugFileSystemNode * next = gcvNULL ;
++ gcsDebugFileSystemNode * temp = gcvNULL ;
++ if ( gc_dbgfs.isInited )
++ {
++ temp = gc_dbgfs.linkedlist ;
++ while ( temp != gcvNULL )
++ {
++ next = temp->next ;
++ gckDebugFileSystemFreeNode ( temp ) ;
++ kfree ( temp ) ;
++ temp = next ;
++ }
++ gc_dbgfs.isInited = 0 ;
++ }
++ return 0 ;
++}
++
++
++/*******************************************************************************
++ **
++ ** gckDebugFileSystemCreateNode
++ **
++ **
++ ** INPUT:
++ **
++ ** OUTPUT:
++ **
++ ** gckDebugFileSystemFreeNode * Device
++ ** Pointer to a variable receiving the gcsDebugFileSystemNode object pointer on
++ ** success.
++ *********************************************************************************/
++
++gctINT
++gckDebugFileSystemCreateNode (
++ IN gctINT SizeInKB ,
++ IN gctCONST_STRING ParentName ,
++ IN gctCONST_STRING NodeName ,
++ OUT gcsDebugFileSystemNode **Node
++ )
++{
++ gcsDebugFileSystemNode*node ;
++ /* allocate space for our metadata and initialize it */
++ if ( ( node = kmalloc ( sizeof (gcsDebugFileSystemNode ) , GFP_KERNEL ) ) == NULL )
++ goto struct_malloc_failed ;
++
++ /*Zero it out*/
++ memset ( node , 0 , sizeof (gcsDebugFileSystemNode ) ) ;
++
++ /*Init the sync primitives*/
++#if defined(DECLARE_WAIT_QUEUE_HEAD)
++ init_waitqueue_head ( gcmkNODE_READQ ( node ) ) ;
++#else
++ init_waitqueue ( gcmkNODE_READQ ( node ) ) ;
++#endif
++
++#if defined(DECLARE_WAIT_QUEUE_HEAD)
++ init_waitqueue_head ( gcmkNODE_WRITEQ ( node ) ) ;
++#else
++ init_waitqueue ( gcmkNODE_WRITEQ ( node ) ) ;
++#endif
++ sema_init ( gcmkNODE_SEM ( node ) , 1 ) ;
++ /*End the sync primitives*/
++
++
++ /* figure out how much of a buffer this should be and allocate the buffer */
++ node->size = 1024 * SizeInKB ;
++ if ( ( node->data = ( char * ) vmalloc ( sizeof (char ) * node->size ) ) == NULL )
++ goto data_malloc_failed ;
++
++ /*creating the debug file system*/
++ node->parent = debugfs_create_dir ( ParentName , NULL ) ;
++
++ /*creating the file*/
++ node->filen = debugfs_create_file ( NodeName , S_IRUGO | S_IWUSR , node->parent , NULL ,
++ &debugfs_operations ) ;
++
++ /* add it to our linked list */
++ node->next = gc_dbgfs.linkedlist ;
++ gc_dbgfs.linkedlist = node ;
++
++ /* pass the struct back */
++ *Node = node ;
++ return 0 ;
++
++ vfree ( node->data ) ;
++data_malloc_failed:
++ kfree ( node ) ;
++struct_malloc_failed:
++ return - ENOMEM ;
++}
++
++/*******************************************************************************
++ **
++ ** gckDebugFileSystemFreeNode
++ **
++ **
++ ** INPUT:
++ **
++ ** OUTPUT:
++ **
++ *******************************************************************************/
++void
++gckDebugFileSystemFreeNode (
++ IN gcsDebugFileSystemNode * Node
++ )
++{
++
++ gcsDebugFileSystemNode **ptr ;
++
++ if ( Node == NULL )
++ {
++ printk ( "null passed to free_vinfo\n" ) ;
++ return ;
++ }
++
++ down ( gcmkNODE_SEM ( Node ) ) ;
++ /*free data*/
++ vfree ( Node->data ) ;
++
++ /*Close Debug fs*/
++ if ( Node->filen )
++ {
++ debugfs_remove ( Node->filen ) ;
++ }
++ if ( Node->parent )
++ {
++ debugfs_remove ( Node->parent ) ;
++ }
++
++ /* now delete the node from the linked list */
++ ptr = & ( gc_dbgfs.linkedlist ) ;
++ while ( *ptr != Node )
++ {
++ if ( ! *ptr )
++ {
++ printk ( "corrupt info list!\n" ) ;
++ break ;
++ }
++ else
++ ptr = & ( ( **ptr ).next ) ;
++ }
++ *ptr = Node->next ;
++ up ( gcmkNODE_SEM ( Node ) ) ;
++}
++
++/*******************************************************************************
++ **
++ ** gckDebugFileSystemSetCurrentNode
++ **
++ **
++ ** INPUT:
++ **
++ ** OUTPUT:
++ **
++ *******************************************************************************/
++void
++gckDebugFileSystemSetCurrentNode (
++ IN gcsDebugFileSystemNode * Node
++ )
++{
++ gc_dbgfs.currentNode = Node ;
++}
++
++/*******************************************************************************
++ **
++ ** gckDebugFileSystemGetCurrentNode
++ **
++ **
++ ** INPUT:
++ **
++ ** OUTPUT:
++ **
++ *******************************************************************************/
++void
++gckDebugFileSystemGetCurrentNode (
++ OUT gcsDebugFileSystemNode ** Node
++ )
++{
++ *Node = gc_dbgfs.currentNode ;
++}
++
++/*******************************************************************************
++ **
++ ** gckDebugFileSystemPrint
++ **
++ **
++ ** INPUT:
++ **
++ ** OUTPUT:
++ **
++ *******************************************************************************/
++void
++gckDebugFileSystemPrint (
++ IN gctCONST_STRING Message ,
++ ...
++ )
++{
++ gcmkDBGFSPRINT ( _GetArgumentSize ( Message ) , Message ) ;
++}
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v4/hal/os/linux/kernel/gc_hal_kernel_debugfs.h linux-openelec/drivers/mxc/gpu-viv/v4/hal/os/linux/kernel/gc_hal_kernel_debugfs.h
+--- linux-3.14.36/drivers/mxc/gpu-viv/v4/hal/os/linux/kernel/gc_hal_kernel_debugfs.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v4/hal/os/linux/kernel/gc_hal_kernel_debugfs.h 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,84 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include <stdarg.h>
++
++#ifndef __gc_hal_kernel_debugfs_h_
++#define __gc_hal_kernel_debugfs_h_
++
++ #define MAX_LINE_SIZE 768 /* Max bytes for a line of debug info */
++
++
++ typedef struct _gcsDebugFileSystemNode gcsDebugFileSystemNode ;
++
++
++/*******************************************************************************
++ **
++ ** System Related
++ **
++ *******************************************************************************/
++
++gctINT gckDebugFileSystemIsEnabled(void);
++
++gctINT gckDebugFileSystemInitialize(void);
++
++gctINT gckDebugFileSystemTerminate(void);
++
++
++/*******************************************************************************
++ **
++ ** Node Related
++ **
++ *******************************************************************************/
++
++gctINT gckDebugFileSystemCreateNode(
++ IN gctINT SizeInKB,
++ IN gctCONST_STRING ParentName ,
++ IN gctCONST_STRING NodeName,
++ OUT gcsDebugFileSystemNode **Node
++ );
++
++
++void gckDebugFileSystemFreeNode(
++ IN gcsDebugFileSystemNode * Node
++ );
++
++
++
++void gckDebugFileSystemSetCurrentNode(
++ IN gcsDebugFileSystemNode * Node
++ );
++
++
++
++void gckDebugFileSystemGetCurrentNode(
++ OUT gcsDebugFileSystemNode ** Node
++ );
++
++
++void gckDebugFileSystemPrint(
++ IN gctCONST_STRING Message,
++ ...
++ );
++
++#endif
++
++
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v4/hal/os/linux/kernel/gc_hal_kernel_debug.h linux-openelec/drivers/mxc/gpu-viv/v4/hal/os/linux/kernel/gc_hal_kernel_debug.h
+--- linux-3.14.36/drivers/mxc/gpu-viv/v4/hal/os/linux/kernel/gc_hal_kernel_debug.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v4/hal/os/linux/kernel/gc_hal_kernel_debug.h 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,102 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_kernel_debug_h_
++#define __gc_hal_kernel_debug_h_
++
++#include <gc_hal_kernel_linux.h>
++#include <linux/spinlock.h>
++#include <linux/time.h>
++#include <stdarg.h>
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/******************************************************************************\
++****************************** OS-dependent Macros *****************************
++\******************************************************************************/
++
++typedef va_list gctARGUMENTS;
++
++#define gcmkARGUMENTS_START(Arguments, Pointer) \
++ va_start(Arguments, Pointer)
++
++#define gcmkARGUMENTS_END(Arguments) \
++ va_end(Arguments)
++
++#define gcmkDECLARE_LOCK(__spinLock__) \
++ static DEFINE_SPINLOCK(__spinLock__);
++
++#define gcmkLOCKSECTION(__spinLock__) \
++ spin_lock(&__spinLock__)
++
++#define gcmkUNLOCKSECTION(__spinLock__) \
++ spin_unlock(&__spinLock__)
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
++# define gcmkGETPROCESSID() \
++ task_tgid_vnr(current)
++#else
++# define gcmkGETPROCESSID() \
++ current->tgid
++#endif
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
++# define gcmkGETTHREADID() \
++ task_pid_vnr(current)
++#else
++# define gcmkGETTHREADID() \
++ current->pid
++#endif
++
++#define gcmkOUTPUT_STRING(String) \
++ if(gckDebugFileSystemIsEnabled()) \
++ gckDebugFileSystemPrint(String);\
++ else\
++ printk(String); \
++ touch_softlockup_watchdog()
++
++
++#define gcmkSPRINTF(Destination, Size, Message, Value) \
++ snprintf(Destination, Size, Message, Value)
++
++#define gcmkSPRINTF2(Destination, Size, Message, Value1, Value2) \
++ snprintf(Destination, Size, Message, Value1, Value2)
++
++#define gcmkSPRINTF3(Destination, Size, Message, Value1, Value2, Value3) \
++ snprintf(Destination, Size, Message, Value1, Value2, Value3)
++
++#define gcmkVSPRINTF(Destination, Size, Message, Arguments) \
++ vsnprintf(Destination, Size, Message, *(va_list *) &Arguments)
++
++#define gcmkSTRCAT(Destination, Size, String) \
++ strncat(Destination, String, Size)
++
++/* If not zero, forces data alignment in the variable argument list
++ by its individual size. */
++#define gcdALIGNBYSIZE 1
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif /* __gc_hal_kernel_debug_h_ */
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v4/hal/os/linux/kernel/gc_hal_kernel_device.c linux-openelec/drivers/mxc/gpu-viv/v4/hal/os/linux/kernel/gc_hal_kernel_device.c
+--- linux-3.14.36/drivers/mxc/gpu-viv/v4/hal/os/linux/kernel/gc_hal_kernel_device.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v4/hal/os/linux/kernel/gc_hal_kernel_device.c 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,1676 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal_kernel_linux.h"
++#include <linux/pagemap.h>
++#include <linux/seq_file.h>
++#include <linux/mm.h>
++#include <linux/mman.h>
++#include <linux/slab.h>
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0)
++#include <mach/hardware.h>
++#endif
++#include <linux/pm_runtime.h>
++
++#define _GC_OBJ_ZONE gcvZONE_DEVICE
++
++#define DEBUG_FILE "galcore_trace"
++#define PARENT_FILE "gpu"
++
++
++#ifdef FLAREON
++ static struct dove_gpio_irq_handler gc500_handle;
++#endif
++
++#define gcmIS_CORE_PRESENT(Device, Core) (Device->irqLines[Core] > 0)
++
++/******************************************************************************\
++*************************** Memory Allocation Wrappers *************************
++\******************************************************************************/
++
++static gceSTATUS
++_AllocateMemory(
++ IN gckGALDEVICE Device,
++ IN gctSIZE_T Bytes,
++ OUT gctPOINTER *Logical,
++ OUT gctPHYS_ADDR *Physical,
++ OUT gctUINT32 *PhysAddr
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Device=0x%x Bytes=%lu", Device, Bytes);
++
++ gcmkVERIFY_ARGUMENT(Device != NULL);
++ gcmkVERIFY_ARGUMENT(Logical != NULL);
++ gcmkVERIFY_ARGUMENT(Physical != NULL);
++ gcmkVERIFY_ARGUMENT(PhysAddr != NULL);
++
++ gcmkONERROR(gckOS_AllocateContiguous(
++ Device->os, gcvFALSE, &Bytes, Physical, Logical
++ ));
++
++ *PhysAddr = ((PLINUX_MDL)*Physical)->dmaHandle - Device->baseAddress;
++
++ /* Success. */
++ gcmkFOOTER_ARG(
++ "*Logical=0x%x *Physical=0x%x *PhysAddr=0x%08x",
++ *Logical, *Physical, *PhysAddr
++ );
++
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++static gceSTATUS
++_FreeMemory(
++ IN gckGALDEVICE Device,
++ IN gctPOINTER Logical,
++ IN gctPHYS_ADDR Physical)
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Device=0x%x Logical=0x%x Physical=0x%x",
++ Device, Logical, Physical);
++
++ gcmkVERIFY_ARGUMENT(Device != NULL);
++
++ status = gckOS_FreeContiguous(
++ Device->os, Physical, Logical,
++ ((PLINUX_MDL) Physical)->numPages * PAGE_SIZE
++ );
++
++ gcmkFOOTER();
++ return status;
++}
++
++
++
++/******************************************************************************\
++******************************* Interrupt Handler ******************************
++\******************************************************************************/
++static irqreturn_t isrRoutine(int irq, void *ctxt)
++{
++ gceSTATUS status;
++ gckGALDEVICE device;
++
++ device = (gckGALDEVICE) ctxt;
++
++ /* Call kernel interrupt notification. */
++ status = gckKERNEL_Notify(device->kernels[gcvCORE_MAJOR], gcvNOTIFY_INTERRUPT, gcvTRUE);
++
++ if (gcmIS_SUCCESS(status))
++ {
++ device->dataReadys[gcvCORE_MAJOR] = gcvTRUE;
++
++ up(&device->semas[gcvCORE_MAJOR]);
++
++ return IRQ_HANDLED;
++ }
++
++ return IRQ_NONE;
++}
++
++static int threadRoutine(void *ctxt)
++{
++ gckGALDEVICE device = (gckGALDEVICE) ctxt;
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_DRIVER,
++ "Starting isr Thread with extension=%p",
++ device);
++
++ for (;;)
++ {
++ static int down;
++
++ down = down_interruptible(&device->semas[gcvCORE_MAJOR]);
++ if (down); /*To make gcc 4.6 happye*/
++ device->dataReadys[gcvCORE_MAJOR] = gcvFALSE;
++
++ if (device->killThread == gcvTRUE)
++ {
++ /* The daemon exits. */
++ while (!kthread_should_stop())
++ {
++ gckOS_Delay(device->os, 1);
++ }
++
++ return 0;
++ }
++
++ gckKERNEL_Notify(device->kernels[gcvCORE_MAJOR], gcvNOTIFY_INTERRUPT, gcvFALSE);
++ }
++}
++
++static irqreturn_t isrRoutine2D(int irq, void *ctxt)
++{
++ gceSTATUS status;
++ gckGALDEVICE device;
++
++ device = (gckGALDEVICE) ctxt;
++
++ /* Call kernel interrupt notification. */
++ status = gckKERNEL_Notify(device->kernels[gcvCORE_2D], gcvNOTIFY_INTERRUPT, gcvTRUE);
++
++ if (gcmIS_SUCCESS(status))
++ {
++ device->dataReadys[gcvCORE_2D] = gcvTRUE;
++
++ up(&device->semas[gcvCORE_2D]);
++
++ return IRQ_HANDLED;
++ }
++
++ return IRQ_NONE;
++}
++
++static int threadRoutine2D(void *ctxt)
++{
++ gckGALDEVICE device = (gckGALDEVICE) ctxt;
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_DRIVER,
++ "Starting isr Thread with extension=%p",
++ device);
++
++ for (;;)
++ {
++ static int down;
++
++ down = down_interruptible(&device->semas[gcvCORE_2D]);
++ if (down); /*To make gcc 4.6 happye*/
++ device->dataReadys[gcvCORE_2D] = gcvFALSE;
++
++ if (device->killThread == gcvTRUE)
++ {
++ /* The daemon exits. */
++ while (!kthread_should_stop())
++ {
++ gckOS_Delay(device->os, 1);
++ }
++
++ return 0;
++ }
++
++ gckKERNEL_Notify(device->kernels[gcvCORE_2D], gcvNOTIFY_INTERRUPT, gcvFALSE);
++ }
++}
++
++static irqreturn_t isrRoutineVG(int irq, void *ctxt)
++{
++#if gcdENABLE_VG
++ gceSTATUS status;
++ gckGALDEVICE device;
++
++ device = (gckGALDEVICE) ctxt;
++
++ /* Serve the interrupt. */
++ status = gckVGINTERRUPT_Enque(device->kernels[gcvCORE_VG]->vg->interrupt);
++
++ /* Determine the return value. */
++ return (status == gcvSTATUS_NOT_OUR_INTERRUPT)
++ ? IRQ_RETVAL(0)
++ : IRQ_RETVAL(1);
++#else
++ return IRQ_NONE;
++#endif
++}
++
++static int threadRoutineVG(void *ctxt)
++{
++ gckGALDEVICE device = (gckGALDEVICE) ctxt;
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_DRIVER,
++ "Starting isr Thread with extension=%p",
++ device);
++
++ for (;;)
++ {
++ static int down;
++
++ down = down_interruptible(&device->semas[gcvCORE_VG]);
++ if (down); /*To make gcc 4.6 happye*/
++ device->dataReadys[gcvCORE_VG] = gcvFALSE;
++
++ if (device->killThread == gcvTRUE)
++ {
++ /* The daemon exits. */
++ while (!kthread_should_stop())
++ {
++ gckOS_Delay(device->os, 1);
++ }
++
++ return 0;
++ }
++
++ gckKERNEL_Notify(device->kernels[gcvCORE_VG], gcvNOTIFY_INTERRUPT, gcvFALSE);
++ }
++}
++
++/******************************************************************************\
++******************************* gckGALDEVICE Code ******************************
++\******************************************************************************/
++
++/*******************************************************************************
++**
++** gckGALDEVICE_Construct
++**
++** Constructor.
++**
++** INPUT:
++**
++** OUTPUT:
++**
++** gckGALDEVICE * Device
++** Pointer to a variable receiving the gckGALDEVICE object pointer on
++** success.
++*/
++gceSTATUS
++gckGALDEVICE_Construct(
++ IN gctINT IrqLine,
++ IN gctUINT32 RegisterMemBase,
++ IN gctSIZE_T RegisterMemSize,
++ IN gctINT IrqLine2D,
++ IN gctUINT32 RegisterMemBase2D,
++ IN gctSIZE_T RegisterMemSize2D,
++ IN gctINT IrqLineVG,
++ IN gctUINT32 RegisterMemBaseVG,
++ IN gctSIZE_T RegisterMemSizeVG,
++ IN gctUINT32 ContiguousBase,
++ IN gctSIZE_T ContiguousSize,
++ IN gctSIZE_T BankSize,
++ IN gctINT FastClear,
++ IN gctINT Compression,
++ IN gctUINT32 PhysBaseAddr,
++ IN gctUINT32 PhysSize,
++ IN gctINT Signal,
++ IN gctUINT LogFileSize,
++ IN struct device *pdev,
++ IN gctINT PowerManagement,
++ IN gctINT GpuProfiler,
++ OUT gckGALDEVICE *Device
++ )
++{
++ gctUINT32 internalBaseAddress = 0, internalAlignment = 0;
++ gctUINT32 externalBaseAddress = 0, externalAlignment = 0;
++ gctUINT32 horizontalTileSize, verticalTileSize;
++ struct resource* mem_region;
++ gctUINT32 physAddr;
++ gctUINT32 physical;
++ gckGALDEVICE device;
++ gceSTATUS status;
++ gctINT32 i;
++ gceHARDWARE_TYPE type;
++ gckDB sharedDB = gcvNULL;
++ gckKERNEL kernel = gcvNULL;
++
++ gcmkHEADER_ARG("IrqLine=%d RegisterMemBase=0x%08x RegisterMemSize=%u "
++ "IrqLine2D=%d RegisterMemBase2D=0x%08x RegisterMemSize2D=%u "
++ "IrqLineVG=%d RegisterMemBaseVG=0x%08x RegisterMemSizeVG=%u "
++ "ContiguousBase=0x%08x ContiguousSize=%lu BankSize=%lu "
++ "FastClear=%d Compression=%d PhysBaseAddr=0x%x PhysSize=%d Signal=%d",
++ IrqLine, RegisterMemBase, RegisterMemSize,
++ IrqLine2D, RegisterMemBase2D, RegisterMemSize2D,
++ IrqLineVG, RegisterMemBaseVG, RegisterMemSizeVG,
++ ContiguousBase, ContiguousSize, BankSize, FastClear, Compression,
++ PhysBaseAddr, PhysSize, Signal);
++
++ /* Allocate device structure. */
++ device = kmalloc(sizeof(struct _gckGALDEVICE), GFP_KERNEL);
++
++ if (!device)
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ memset(device, 0, sizeof(struct _gckGALDEVICE));
++
++ device->dbgnode = gcvNULL;
++ if(LogFileSize != 0)
++ {
++ if(gckDebugFileSystemCreateNode(LogFileSize,PARENT_FILE,DEBUG_FILE,&(device->dbgnode)) != 0)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): Failed to create the debug file system %s/%s \n",
++ __FUNCTION__, __LINE__,
++ PARENT_FILE, DEBUG_FILE
++ );
++ }
++ else
++ {
++ /*Everything is OK*/
++ gckDebugFileSystemSetCurrentNode(device->dbgnode);
++ }
++ }
++#ifdef CONFIG_PM
++ /*Init runtime pm for gpu*/
++ pm_runtime_enable(pdev);
++ device->pmdev = pdev;
++#endif
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0)
++ /*get gpu regulator*/
++ device->gpu_regulator = regulator_get(pdev, "cpu_vddgpu");
++#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
++ device->gpu_regulator = devm_regulator_get(pdev, "pu");
++#endif
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) || LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
++ if (IS_ERR(device->gpu_regulator)) {
++ gcmkTRACE_ZONE(gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): Failed to get gpu regulator %s/%s \n",
++ __FUNCTION__, __LINE__,
++ PARENT_FILE, DEBUG_FILE);
++ gcmkONERROR(gcvSTATUS_NOT_FOUND);
++ }
++#endif
++ /*Initialize the clock structure*/
++ if (IrqLine != -1) {
++ device->clk_3d_core = clk_get(pdev, "gpu3d_clk");
++ if (!IS_ERR(device->clk_3d_core)) {
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0)
++ if (cpu_is_mx6q()) {
++ device->clk_3d_shader = clk_get(pdev, "gpu3d_shader_clk");
++ if (IS_ERR(device->clk_3d_shader)) {
++ IrqLine = -1;
++ clk_put(device->clk_3d_core);
++ device->clk_3d_core = NULL;
++ device->clk_3d_shader = NULL;
++ gckOS_Print("galcore: clk_get gpu3d_shader_clk failed, disable 3d!\n");
++ }
++ }
++#else
++ device->clk_3d_axi = clk_get(pdev, "gpu3d_axi_clk");
++ device->clk_3d_shader = clk_get(pdev, "gpu3d_shader_clk");
++ if (IS_ERR(device->clk_3d_shader)) {
++ IrqLine = -1;
++ clk_put(device->clk_3d_core);
++ device->clk_3d_core = NULL;
++ device->clk_3d_shader = NULL;
++ gckOS_Print("galcore: clk_get gpu3d_shader_clk failed, disable 3d!\n");
++ }
++#endif
++ } else {
++ IrqLine = -1;
++ device->clk_3d_core = NULL;
++ gckOS_Print("galcore: clk_get gpu3d_clk failed, disable 3d!\n");
++ }
++ }
++ if ((IrqLine2D != -1) || (IrqLineVG != -1)) {
++ device->clk_2d_core = clk_get(pdev, "gpu2d_clk");
++ if (IS_ERR(device->clk_2d_core)) {
++ IrqLine2D = -1;
++ IrqLineVG = -1;
++ device->clk_2d_core = NULL;
++ gckOS_Print("galcore: clk_get 2d core clock failed, disable 2d/vg!\n");
++ } else {
++ if (IrqLine2D != -1) {
++ device->clk_2d_axi = clk_get(pdev, "gpu2d_axi_clk");
++ if (IS_ERR(device->clk_2d_axi)) {
++ device->clk_2d_axi = NULL;
++ IrqLine2D = -1;
++ gckOS_Print("galcore: clk_get 2d axi clock failed, disable 2d\n");
++ }
++ }
++ if (IrqLineVG != -1) {
++ device->clk_vg_axi = clk_get(pdev, "openvg_axi_clk");
++ if (IS_ERR(device->clk_vg_axi)) {
++ IrqLineVG = -1;
++ device->clk_vg_axi = NULL;
++ gckOS_Print("galcore: clk_get vg clock failed, disable vg!\n");
++ }
++ }
++ }
++ }
++
++ if (IrqLine != -1)
++ {
++ device->requestedRegisterMemBases[gcvCORE_MAJOR] = RegisterMemBase;
++ device->requestedRegisterMemSizes[gcvCORE_MAJOR] = RegisterMemSize;
++ }
++
++ if (IrqLine2D != -1)
++ {
++ device->requestedRegisterMemBases[gcvCORE_2D] = RegisterMemBase2D;
++ device->requestedRegisterMemSizes[gcvCORE_2D] = RegisterMemSize2D;
++ }
++
++ if (IrqLineVG != -1)
++ {
++ device->requestedRegisterMemBases[gcvCORE_VG] = RegisterMemBaseVG;
++ device->requestedRegisterMemSizes[gcvCORE_VG] = RegisterMemSizeVG;
++ }
++
++ device->requestedContiguousBase = 0;
++ device->requestedContiguousSize = 0;
++
++
++ for (i = 0; i < gcdMAX_GPU_COUNT; i++)
++ {
++ physical = device->requestedRegisterMemBases[i];
++
++ /* Set up register memory region. */
++ if (physical != 0)
++ {
++ mem_region = request_mem_region(
++ physical, device->requestedRegisterMemSizes[i], "galcore register region"
++ );
++
++ if (mem_region == gcvNULL)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): Failed to claim %lu bytes @ 0x%08X\n",
++ __FUNCTION__, __LINE__,
++ physical, device->requestedRegisterMemSizes[i]
++ );
++
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++
++ device->registerBases[i] = (gctPOINTER) ioremap_nocache(
++ physical, device->requestedRegisterMemSizes[i]);
++
++ if (device->registerBases[i] == gcvNULL)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): Unable to map %ld bytes @ 0x%08X\n",
++ __FUNCTION__, __LINE__,
++ physical, device->requestedRegisterMemSizes[i]
++ );
++
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++
++ physical += device->requestedRegisterMemSizes[i];
++ }
++ else
++ {
++ device->registerBases[i] = gcvNULL;
++ }
++ }
++
++ /* Set the base address */
++ device->baseAddress = PhysBaseAddr;
++
++ /* Construct the gckOS object. */
++ gcmkONERROR(gckOS_Construct(device, &device->os));
++
++ if (IrqLine != -1)
++ {
++ /* Construct the gckKERNEL object. */
++ gcmkONERROR(gckKERNEL_Construct(
++ device->os, gcvCORE_MAJOR, device,
++ gcvNULL, &device->kernels[gcvCORE_MAJOR]));
++
++ sharedDB = device->kernels[gcvCORE_MAJOR]->db;
++
++ /* Initialize core mapping */
++ for (i = 0; i < 8; i++)
++ {
++ device->coreMapping[i] = gcvCORE_MAJOR;
++ }
++
++ /* Setup the ISR manager. */
++ gcmkONERROR(gckHARDWARE_SetIsrManager(
++ device->kernels[gcvCORE_MAJOR]->hardware,
++ (gctISRMANAGERFUNC) gckGALDEVICE_Enable_ISR,
++ (gctISRMANAGERFUNC) gckGALDEVICE_Disable_ISR,
++ device
++ ));
++
++ gcmkONERROR(gckHARDWARE_SetFastClear(
++ device->kernels[gcvCORE_MAJOR]->hardware, FastClear, Compression
++ ));
++
++ gcmkONERROR(gckHARDWARE_SetPowerManagement(
++ device->kernels[gcvCORE_MAJOR]->hardware, PowerManagement
++ ));
++
++ gcmkONERROR(gckHARDWARE_SetGpuProfiler(
++ device->kernels[gcvCORE_MAJOR]->hardware, GpuProfiler
++ ));
++
++#if COMMAND_PROCESSOR_VERSION == 1
++ /* Start the command queue. */
++ gcmkONERROR(gckCOMMAND_Start(device->kernels[gcvCORE_MAJOR]->command));
++#endif
++ }
++ else
++ {
++ device->kernels[gcvCORE_MAJOR] = gcvNULL;
++ }
++
++ if (IrqLine2D != -1)
++ {
++ gcmkONERROR(gckKERNEL_Construct(
++ device->os, gcvCORE_2D, device,
++ sharedDB, &device->kernels[gcvCORE_2D]));
++
++ if (sharedDB == gcvNULL) sharedDB = device->kernels[gcvCORE_2D]->db;
++
++ /* Verify the hardware type */
++ gcmkONERROR(gckHARDWARE_GetType(device->kernels[gcvCORE_2D]->hardware, &type));
++
++ if (type != gcvHARDWARE_2D)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): Unexpected hardware type: %d\n",
++ __FUNCTION__, __LINE__,
++ type
++ );
++
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ /* Initialize core mapping */
++ if (device->kernels[gcvCORE_MAJOR] == gcvNULL)
++ {
++ for (i = 0; i < 8; i++)
++ {
++ device->coreMapping[i] = gcvCORE_2D;
++ }
++ }
++ else
++ {
++ device->coreMapping[gcvHARDWARE_2D] = gcvCORE_2D;
++ }
++
++ /* Setup the ISR manager. */
++ gcmkONERROR(gckHARDWARE_SetIsrManager(
++ device->kernels[gcvCORE_2D]->hardware,
++ (gctISRMANAGERFUNC) gckGALDEVICE_Enable_ISR,
++ (gctISRMANAGERFUNC) gckGALDEVICE_Disable_ISR,
++ device
++ ));
++
++ gcmkONERROR(gckHARDWARE_SetPowerManagement(
++ device->kernels[gcvCORE_2D]->hardware, PowerManagement
++ ));
++
++
++#if COMMAND_PROCESSOR_VERSION == 1
++ /* Start the command queue. */
++ gcmkONERROR(gckCOMMAND_Start(device->kernels[gcvCORE_2D]->command));
++#endif
++ }
++ else
++ {
++ device->kernels[gcvCORE_2D] = gcvNULL;
++ }
++
++ if (IrqLineVG != -1)
++ {
++#if gcdENABLE_VG
++ gcmkONERROR(gckKERNEL_Construct(
++ device->os, gcvCORE_VG, device,
++ sharedDB, &device->kernels[gcvCORE_VG]));
++ /* Initialize core mapping */
++ if (device->kernels[gcvCORE_MAJOR] == gcvNULL
++ && device->kernels[gcvCORE_2D] == gcvNULL
++ )
++ {
++ for (i = 0; i < 8; i++)
++ {
++ device->coreMapping[i] = gcvCORE_VG;
++ }
++ }
++ else
++ {
++ device->coreMapping[gcvHARDWARE_VG] = gcvCORE_VG;
++ }
++
++
++ gcmkONERROR(gckVGHARDWARE_SetPowerManagement(
++ device->kernels[gcvCORE_VG]->vg->hardware,
++ PowerManagement
++ ));
++
++#endif
++ }
++ else
++ {
++ device->kernels[gcvCORE_VG] = gcvNULL;
++ }
++
++ /* Initialize the ISR. */
++ device->irqLines[gcvCORE_MAJOR] = IrqLine;
++ device->irqLines[gcvCORE_2D] = IrqLine2D;
++ device->irqLines[gcvCORE_VG] = IrqLineVG;
++
++ /* Initialize the kernel thread semaphores. */
++ for (i = 0; i < gcdMAX_GPU_COUNT; i++)
++ {
++ if (device->irqLines[i] != -1) sema_init(&device->semas[i], 0);
++ }
++
++ device->signal = Signal;
++
++ for (i = 0; i < gcdMAX_GPU_COUNT; i++)
++ {
++ if (device->kernels[i] != gcvNULL) break;
++ }
++
++ if (i == gcdMAX_GPU_COUNT)
++ {
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++#if gcdENABLE_VG
++ if (i == gcvCORE_VG)
++ {
++ /* Query the ceiling of the system memory. */
++ gcmkONERROR(gckVGHARDWARE_QuerySystemMemory(
++ device->kernels[i]->vg->hardware,
++ &device->systemMemorySize,
++ &device->systemMemoryBaseAddress
++ ));
++ /* query the amount of video memory */
++ gcmkONERROR(gckVGHARDWARE_QueryMemory(
++ device->kernels[i]->vg->hardware,
++ &device->internalSize, &internalBaseAddress, &internalAlignment,
++ &device->externalSize, &externalBaseAddress, &externalAlignment,
++ &horizontalTileSize, &verticalTileSize
++ ));
++ }
++ else
++#endif
++ {
++ /* Query the ceiling of the system memory. */
++ gcmkONERROR(gckHARDWARE_QuerySystemMemory(
++ device->kernels[i]->hardware,
++ &device->systemMemorySize,
++ &device->systemMemoryBaseAddress
++ ));
++
++ /* query the amount of video memory */
++ gcmkONERROR(gckHARDWARE_QueryMemory(
++ device->kernels[i]->hardware,
++ &device->internalSize, &internalBaseAddress, &internalAlignment,
++ &device->externalSize, &externalBaseAddress, &externalAlignment,
++ &horizontalTileSize, &verticalTileSize
++ ));
++ }
++
++
++ /* Grab the first availiable kernel */
++ for (i = 0; i < gcdMAX_GPU_COUNT; i++)
++ {
++ if (device->irqLines[i] != -1)
++ {
++ kernel = device->kernels[i];
++ break;
++ }
++ }
++
++ /* Set up the internal memory region. */
++ if (device->internalSize > 0)
++ {
++ status = gckVIDMEM_Construct(
++ device->os,
++ internalBaseAddress, device->internalSize, internalAlignment,
++ 0, &device->internalVidMem
++ );
++
++ if (gcmIS_ERROR(status))
++ {
++ /* Error, disable internal heap. */
++ device->internalSize = 0;
++ }
++ else
++ {
++ /* Map internal memory. */
++ device->internalLogical
++ = (gctPOINTER) ioremap_nocache(physical, device->internalSize);
++
++ if (device->internalLogical == gcvNULL)
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++
++ device->internalPhysical = (gctPHYS_ADDR)(gctUINTPTR_T) physical;
++ device->internalPhysicalName = gcmPTR_TO_NAME(device->internalPhysical);
++ physical += device->internalSize;
++ }
++ }
++
++ if (device->externalSize > 0)
++ {
++ /* create the external memory heap */
++ status = gckVIDMEM_Construct(
++ device->os,
++ externalBaseAddress, device->externalSize, externalAlignment,
++ 0, &device->externalVidMem
++ );
++
++ if (gcmIS_ERROR(status))
++ {
++ /* Error, disable internal heap. */
++ device->externalSize = 0;
++ }
++ else
++ {
++ /* Map external memory. */
++ device->externalLogical
++ = (gctPOINTER) ioremap_nocache(physical, device->externalSize);
++
++ if (device->externalLogical == gcvNULL)
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++
++ device->externalPhysical = (gctPHYS_ADDR)(gctUINTPTR_T) physical;
++ device->externalPhysicalName = gcmPTR_TO_NAME(device->externalPhysical);
++ physical += device->externalSize;
++ }
++ }
++
++ /* set up the contiguous memory */
++ device->contiguousSize = ContiguousSize;
++
++ if (ContiguousSize > 0)
++ {
++ if (ContiguousBase == 0)
++ {
++ while (device->contiguousSize > 0)
++ {
++ /* Allocate contiguous memory. */
++ status = _AllocateMemory(
++ device,
++ device->contiguousSize,
++ &device->contiguousBase,
++ &device->contiguousPhysical,
++ &physAddr
++ );
++
++ if (gcmIS_SUCCESS(status))
++ {
++ device->contiguousPhysicalName = gcmPTR_TO_NAME(device->contiguousPhysical);
++ status = gckVIDMEM_Construct(
++ device->os,
++ physAddr | device->systemMemoryBaseAddress,
++ device->contiguousSize,
++ 64,
++ BankSize,
++ &device->contiguousVidMem
++ );
++
++ if (gcmIS_SUCCESS(status))
++ {
++ break;
++ }
++
++ gcmkONERROR(_FreeMemory(
++ device,
++ device->contiguousBase,
++ device->contiguousPhysical
++ ));
++
++ gcmRELEASE_NAME(device->contiguousPhysicalName);
++ device->contiguousBase = gcvNULL;
++ device->contiguousPhysical = gcvNULL;
++ }
++
++ if (device->contiguousSize <= (4 << 20))
++ {
++ device->contiguousSize = 0;
++ }
++ else
++ {
++ device->contiguousSize -= (4 << 20);
++ }
++ }
++ }
++ else
++ {
++ /* Create the contiguous memory heap. */
++ status = gckVIDMEM_Construct(
++ device->os,
++ ContiguousBase | device->systemMemoryBaseAddress,
++ ContiguousSize,
++ 64, BankSize,
++ &device->contiguousVidMem
++ );
++
++ if (gcmIS_ERROR(status))
++ {
++ /* Error, disable contiguous memory pool. */
++ device->contiguousVidMem = gcvNULL;
++ device->contiguousSize = 0;
++ }
++ else
++ {
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0)
++ mem_region = request_mem_region(
++ ContiguousBase, ContiguousSize, "galcore managed memory"
++ );
++
++ if (mem_region == gcvNULL)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): Failed to claim %ld bytes @ 0x%08X\n",
++ __FUNCTION__, __LINE__,
++ ContiguousSize, ContiguousBase
++ );
++
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++#endif
++
++ device->requestedContiguousBase = ContiguousBase;
++ device->requestedContiguousSize = ContiguousSize;
++
++#if !gcdDYNAMIC_MAP_RESERVED_MEMORY && gcdENABLE_VG
++ if (gcmIS_CORE_PRESENT(device, gcvCORE_VG))
++ {
++ device->contiguousBase
++#if gcdPAGED_MEMORY_CACHEABLE
++ = (gctPOINTER) ioremap_cached(ContiguousBase, ContiguousSize);
++#else
++ = (gctPOINTER) ioremap_nocache(ContiguousBase, ContiguousSize);
++#endif
++ if (device->contiguousBase == gcvNULL)
++ {
++ device->contiguousVidMem = gcvNULL;
++ device->contiguousSize = 0;
++
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++ }
++#endif
++
++ device->contiguousPhysical = gcvNULL;
++ device->contiguousPhysicalName = 0;
++ device->contiguousSize = ContiguousSize;
++ device->contiguousMapped = gcvTRUE;
++ }
++ }
++ }
++
++ /* Return pointer to the device. */
++ * Device = device;
++
++ gcmkFOOTER_ARG("*Device=0x%x", * Device);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Roll back. */
++ gcmkVERIFY_OK(gckGALDEVICE_Destroy(device));
++
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckGALDEVICE_Destroy
++**
++** Class destructor.
++**
++** INPUT:
++**
++** Nothing.
++**
++** OUTPUT:
++**
++** Nothing.
++**
++** RETURNS:
++**
++** Nothing.
++*/
++gceSTATUS
++gckGALDEVICE_Destroy(
++ gckGALDEVICE Device)
++{
++ gctINT i;
++ gceSTATUS status = gcvSTATUS_OK;
++ gckKERNEL kernel = gcvNULL;
++
++ gcmkHEADER_ARG("Device=0x%x", Device);
++
++ if (Device != gcvNULL)
++ {
++ /* Grab the first availiable kernel */
++ for (i = 0; i < gcdMAX_GPU_COUNT; i++)
++ {
++ if (Device->irqLines[i] != -1)
++ {
++ kernel = Device->kernels[i];
++ break;
++ }
++ }
++ if (Device->internalPhysicalName != 0)
++ {
++ gcmRELEASE_NAME(Device->internalPhysicalName);
++ Device->internalPhysicalName = 0;
++ }
++ if (Device->externalPhysicalName != 0)
++ {
++ gcmRELEASE_NAME(Device->externalPhysicalName);
++ Device->externalPhysicalName = 0;
++ }
++ if (Device->contiguousPhysicalName != 0)
++ {
++ gcmRELEASE_NAME(Device->contiguousPhysicalName);
++ Device->contiguousPhysicalName = 0;
++ }
++
++
++ for (i = 0; i < gcdMAX_GPU_COUNT; i++)
++ {
++ if (Device->kernels[i] != gcvNULL)
++ {
++ /* Destroy the gckKERNEL object. */
++ gcmkVERIFY_OK(gckKERNEL_Destroy(Device->kernels[i]));
++ Device->kernels[i] = gcvNULL;
++ }
++ }
++
++ {
++ if (Device->internalLogical != gcvNULL)
++ {
++ /* Unmap the internal memory. */
++ iounmap(Device->internalLogical);
++ Device->internalLogical = gcvNULL;
++ }
++
++ if (Device->internalVidMem != gcvNULL)
++ {
++ /* Destroy the internal heap. */
++ gcmkVERIFY_OK(gckVIDMEM_Destroy(Device->internalVidMem));
++ Device->internalVidMem = gcvNULL;
++ }
++ }
++
++ {
++ if (Device->externalLogical != gcvNULL)
++ {
++ /* Unmap the external memory. */
++ iounmap(Device->externalLogical);
++ Device->externalLogical = gcvNULL;
++ }
++
++ if (Device->externalVidMem != gcvNULL)
++ {
++ /* destroy the external heap */
++ gcmkVERIFY_OK(gckVIDMEM_Destroy(Device->externalVidMem));
++ Device->externalVidMem = gcvNULL;
++ }
++ }
++
++ {
++ if (Device->contiguousBase != gcvNULL)
++ {
++ if (Device->contiguousMapped)
++ {
++#if !gcdDYNAMIC_MAP_RESERVED_MEMORY && gcdENABLE_VG
++ if (Device->contiguousBase)
++ {
++ /* Unmap the contiguous memory. */
++ iounmap(Device->contiguousBase);
++ }
++#endif
++ }
++ else
++ {
++ gcmkONERROR(_FreeMemory(
++ Device,
++ Device->contiguousBase,
++ Device->contiguousPhysical
++ ));
++ }
++
++ Device->contiguousBase = gcvNULL;
++ Device->contiguousPhysical = gcvNULL;
++ }
++
++ if (Device->requestedContiguousBase != 0)
++ {
++ release_mem_region(Device->requestedContiguousBase, Device->requestedContiguousSize);
++ Device->requestedContiguousBase = 0;
++ Device->requestedContiguousSize = 0;
++ }
++
++ if (Device->contiguousVidMem != gcvNULL)
++ {
++ /* Destroy the contiguous heap. */
++ gcmkVERIFY_OK(gckVIDMEM_Destroy(Device->contiguousVidMem));
++ Device->contiguousVidMem = gcvNULL;
++ }
++ }
++
++ {
++ if(gckDebugFileSystemIsEnabled())
++ {
++ gckDebugFileSystemFreeNode(Device->dbgnode);
++ kfree(Device->dbgnode);
++ Device->dbgnode = gcvNULL;
++ }
++ }
++
++ for (i = 0; i < gcdMAX_GPU_COUNT; i++)
++ {
++ if (Device->registerBases[i] != gcvNULL)
++ {
++ /* Unmap register memory. */
++ iounmap(Device->registerBases[i]);
++ if (Device->requestedRegisterMemBases[i] != 0)
++ {
++ release_mem_region(Device->requestedRegisterMemBases[i], Device->requestedRegisterMemSizes[i]);
++ }
++
++ Device->registerBases[i] = gcvNULL;
++ Device->requestedRegisterMemBases[i] = 0;
++ Device->requestedRegisterMemSizes[i] = 0;
++ }
++ }
++
++ /*Disable clock*/
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0)
++ if (Device->clk_3d_axi) {
++ clk_put(Device->clk_3d_axi);
++ Device->clk_3d_axi = NULL;
++ }
++#endif
++ if (Device->clk_3d_core) {
++ clk_put(Device->clk_3d_core);
++ Device->clk_3d_core = NULL;
++ }
++ if (Device->clk_3d_shader) {
++ clk_put(Device->clk_3d_shader);
++ Device->clk_3d_shader = NULL;
++ }
++ if (Device->clk_2d_core) {
++ clk_put(Device->clk_2d_core);
++ Device->clk_2d_core = NULL;
++ }
++ if (Device->clk_2d_axi) {
++ clk_put(Device->clk_2d_axi);
++ Device->clk_2d_axi = NULL;
++ }
++ if (Device->clk_vg_axi) {
++ clk_put(Device->clk_vg_axi);
++ Device->clk_vg_axi = NULL;
++ }
++
++#ifdef CONFIG_PM
++ if(Device->pmdev)
++ pm_runtime_disable(Device->pmdev);
++#endif
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0)
++ if (Device->gpu_regulator) {
++ regulator_put(Device->gpu_regulator);
++ Device->gpu_regulator = NULL;
++ }
++#endif
++
++ /* Destroy the gckOS object. */
++ if (Device->os != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_Destroy(Device->os));
++ Device->os = gcvNULL;
++ }
++
++ /* Free the device. */
++ kfree(Device);
++ }
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckGALDEVICE_Setup_ISR
++**
++** Start the ISR routine.
++**
++** INPUT:
++**
++** gckGALDEVICE Device
++** Pointer to an gckGALDEVICE object.
++**
++** OUTPUT:
++**
++** Nothing.
++**
++** RETURNS:
++**
++** gcvSTATUS_OK
++** Setup successfully.
++** gcvSTATUS_GENERIC_IO
++** Setup failed.
++*/
++gceSTATUS
++gckGALDEVICE_Setup_ISR(
++ IN gckGALDEVICE Device,
++ IN gceCORE Core
++ )
++{
++ gceSTATUS status;
++ gctINT ret = -1;
++
++ gcmkHEADER_ARG("Device=0x%x Core=%d", Device, Core);
++
++ gcmkVERIFY_ARGUMENT(Device != NULL);
++
++ if (Device->irqLines[Core] < 0)
++ {
++ gcmkONERROR(gcvSTATUS_GENERIC_IO);
++ }
++
++ /* Hook up the isr based on the irq line. */
++#ifdef FLAREON
++ gc500_handle.dev_name = "galcore interrupt service";
++ gc500_handle.dev_id = Device;
++ switch (Core) {
++ case gcvCORE_MAJOR:
++ gc500_handle.handler = isrRoutine;
++ break;
++ case gcvCORE_2D:
++ gc500_handle.handler = isrRoutine2D;
++ break;
++ case gcvCORE_VG:
++ gc500_handle.handler = isrRoutineVG;
++ break;
++ default:
++ break;
++ }
++ gc500_handle.intr_gen = GPIO_INTR_LEVEL_TRIGGER;
++ gc500_handle.intr_trig = GPIO_TRIG_HIGH_LEVEL;
++
++ ret = dove_gpio_request(
++ DOVE_GPIO0_7, &gc500_handle
++ );
++#else
++ switch (Core) {
++ case gcvCORE_MAJOR:
++ ret = request_irq(
++ Device->irqLines[Core], isrRoutine, IRQF_DISABLED,
++ "galcore interrupt service", Device
++ );
++ break;
++ case gcvCORE_2D:
++ ret = request_irq(
++ Device->irqLines[Core], isrRoutine2D, IRQF_DISABLED,
++ "galcore 2D interrupt service", Device
++ );
++ break;
++ case gcvCORE_VG:
++ ret = request_irq(
++ Device->irqLines[Core], isrRoutineVG, IRQF_DISABLED,
++ "galcore VG interrupt service", Device
++ );
++ break;
++ default:
++ break;
++ }
++#endif
++
++ if (ret != 0)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): Could not register irq line %d (error=%d)\n",
++ __FUNCTION__, __LINE__,
++ Device->irqLines[Core], ret
++ );
++
++ gcmkONERROR(gcvSTATUS_GENERIC_IO);
++ }
++
++ Device->isrEnabled[Core] = 1;
++
++ /* Mark ISR as initialized. */
++ Device->isrInitializeds[Core] = gcvTRUE;
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckGALDEVICE_Enable_ISR(
++ IN gckGALDEVICE Device,
++ IN gceCORE Core
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Device=0x%x Core=%d", Device, Core);
++
++ gcmkVERIFY_ARGUMENT(Device != NULL);
++
++ if (Device->irqLines[Core] < 0)
++ {
++ gcmkONERROR(gcvSTATUS_GENERIC_IO);
++ }
++
++ spin_lock(&Device->kernels[Core]->irq_lock);
++ if (Device->isrEnabled[Core] == 0)
++ {
++ enable_irq(Device->irqLines[Core]);
++ /* Mark ISR as initialized. */
++ Device->isrEnabled[Core] = gcvTRUE;
++ }
++ Device->isrEnabled[Core]++;
++ spin_unlock(&Device->kernels[Core]->irq_lock);
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckGALDEVICE_Release_ISR
++**
++** Release the irq line.
++**
++** INPUT:
++**
++** gckGALDEVICE Device
++** Pointer to an gckGALDEVICE object.
++**
++** OUTPUT:
++**
++** Nothing.
++**
++** RETURNS:
++**
++** Nothing.
++*/
++gceSTATUS
++gckGALDEVICE_Release_ISR(
++ IN gckGALDEVICE Device,
++ IN gceCORE Core
++ )
++{
++ gcmkHEADER_ARG("Device=0x%x Core=%d", Device, Core);
++
++ gcmkVERIFY_ARGUMENT(Device != NULL);
++
++ /* release the irq */
++ if (Device->isrInitializeds[Core])
++ {
++#ifdef FLAREON
++ dove_gpio_free(DOVE_GPIO0_7, "galcore interrupt service");
++#else
++ free_irq(Device->irqLines[Core], Device);
++#endif
++
++ Device->isrInitializeds[Core] = gcvFALSE;
++ }
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckGALDEVICE_Disable_ISR(
++ IN gckGALDEVICE Device,
++ IN gceCORE Core
++ )
++{
++ gcmkHEADER_ARG("Device=0x%x Core=%d", Device, Core);
++
++ gcmkVERIFY_ARGUMENT(Device != NULL);
++
++ /* disable the irq */
++ spin_lock(&Device->kernels[Core]->irq_lock);
++ if (Device->isrEnabled[Core] > 0)
++ {
++ Device->isrEnabled[Core]--;
++ if (Device->isrEnabled[Core] == 0)
++ disable_irq(Device->irqLines[Core]);
++ }
++ spin_unlock(&Device->kernels[Core]->irq_lock);
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckGALDEVICE_Start_Threads
++**
++** Start the daemon threads.
++**
++** INPUT:
++**
++** gckGALDEVICE Device
++** Pointer to an gckGALDEVICE object.
++**
++** OUTPUT:
++**
++** Nothing.
++**
++** RETURNS:
++**
++** gcvSTATUS_OK
++** Start successfully.
++** gcvSTATUS_GENERIC_IO
++** Start failed.
++*/
++gceSTATUS
++gckGALDEVICE_Start_Threads(
++ IN gckGALDEVICE Device
++ )
++{
++ gceSTATUS status;
++ struct task_struct * task;
++
++ gcmkHEADER_ARG("Device=0x%x", Device);
++
++ gcmkVERIFY_ARGUMENT(Device != NULL);
++
++ if (Device->kernels[gcvCORE_MAJOR] != gcvNULL)
++ {
++ /* Start the kernel thread. */
++ task = kthread_run(threadRoutine, Device, "galcore daemon thread");
++
++ if (IS_ERR(task))
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): Could not start the kernel thread.\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_GENERIC_IO);
++ }
++
++ Device->threadCtxts[gcvCORE_MAJOR] = task;
++ Device->threadInitializeds[gcvCORE_MAJOR] = gcvTRUE;
++ }
++
++ if (Device->kernels[gcvCORE_2D] != gcvNULL)
++ {
++ /* Start the kernel thread. */
++ task = kthread_run(threadRoutine2D, Device, "galcore daemon thread for 2D");
++
++ if (IS_ERR(task))
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): Could not start the kernel thread.\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_GENERIC_IO);
++ }
++
++ Device->threadCtxts[gcvCORE_2D] = task;
++ Device->threadInitializeds[gcvCORE_2D] = gcvTRUE;
++ }
++ else
++ {
++ Device->threadInitializeds[gcvCORE_2D] = gcvFALSE;
++ }
++
++ if (Device->kernels[gcvCORE_VG] != gcvNULL)
++ {
++ /* Start the kernel thread. */
++ task = kthread_run(threadRoutineVG, Device, "galcore daemon thread for VG");
++
++ if (IS_ERR(task))
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): Could not start the kernel thread.\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_GENERIC_IO);
++ }
++
++ Device->threadCtxts[gcvCORE_VG] = task;
++ Device->threadInitializeds[gcvCORE_VG] = gcvTRUE;
++ }
++ else
++ {
++ Device->threadInitializeds[gcvCORE_VG] = gcvFALSE;
++ }
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckGALDEVICE_Stop_Threads
++**
++** Stop the gal device, including the following actions: stop the daemon
++** thread, release the irq.
++**
++** INPUT:
++**
++** gckGALDEVICE Device
++** Pointer to an gckGALDEVICE object.
++**
++** OUTPUT:
++**
++** Nothing.
++**
++** RETURNS:
++**
++** Nothing.
++*/
++gceSTATUS
++gckGALDEVICE_Stop_Threads(
++ gckGALDEVICE Device
++ )
++{
++ gctINT i;
++
++ gcmkHEADER_ARG("Device=0x%x", Device);
++
++ gcmkVERIFY_ARGUMENT(Device != NULL);
++
++ for (i = 0; i < gcdMAX_GPU_COUNT; i++)
++ {
++ /* Stop the kernel threads. */
++ if (Device->threadInitializeds[i])
++ {
++ Device->killThread = gcvTRUE;
++ up(&Device->semas[i]);
++
++ kthread_stop(Device->threadCtxts[i]);
++ Device->threadCtxts[i] = gcvNULL;
++ Device->threadInitializeds[i] = gcvFALSE;
++ }
++ }
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckGALDEVICE_Start
++**
++** Start the gal device, including the following actions: setup the isr routine
++** and start the daemoni thread.
++**
++** INPUT:
++**
++** gckGALDEVICE Device
++** Pointer to an gckGALDEVICE object.
++**
++** OUTPUT:
++**
++** Nothing.
++**
++** RETURNS:
++**
++** gcvSTATUS_OK
++** Start successfully.
++*/
++gceSTATUS
++gckGALDEVICE_Start(
++ IN gckGALDEVICE Device
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Device=0x%x", Device);
++
++ /* Start the kernel thread. */
++ gcmkONERROR(gckGALDEVICE_Start_Threads(Device));
++
++ if (Device->kernels[gcvCORE_MAJOR] != gcvNULL)
++ {
++ /* Setup the ISR routine. */
++ gcmkONERROR(gckGALDEVICE_Setup_ISR(Device, gcvCORE_MAJOR));
++
++ /* Switch to SUSPEND power state. */
++ gcmkONERROR(gckHARDWARE_SetPowerManagementState(
++ Device->kernels[gcvCORE_MAJOR]->hardware, gcvPOWER_OFF_BROADCAST
++ ));
++ }
++
++ if (Device->kernels[gcvCORE_2D] != gcvNULL)
++ {
++ /* Setup the ISR routine. */
++ gcmkONERROR(gckGALDEVICE_Setup_ISR(Device, gcvCORE_2D));
++
++ /* Switch to SUSPEND power state. */
++ gcmkONERROR(gckHARDWARE_SetPowerManagementState(
++ Device->kernels[gcvCORE_2D]->hardware, gcvPOWER_OFF_BROADCAST
++ ));
++ }
++
++ if (Device->kernels[gcvCORE_VG] != gcvNULL)
++ {
++ /* Setup the ISR routine. */
++ gcmkONERROR(gckGALDEVICE_Setup_ISR(Device, gcvCORE_VG));
++
++ /* Switch to SUSPEND power state. */
++ gcmkONERROR(gckVGHARDWARE_SetPowerManagementState(
++ Device->kernels[gcvCORE_VG]->vg->hardware, gcvPOWER_OFF_BROADCAST
++ ));
++ }
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckGALDEVICE_Stop
++**
++** Stop the gal device, including the following actions: stop the daemon
++** thread, release the irq.
++**
++** INPUT:
++**
++** gckGALDEVICE Device
++** Pointer to an gckGALDEVICE object.
++**
++** OUTPUT:
++**
++** Nothing.
++**
++** RETURNS:
++**
++** Nothing.
++*/
++gceSTATUS
++gckGALDEVICE_Stop(
++ gckGALDEVICE Device
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Device=0x%x", Device);
++
++ gcmkVERIFY_ARGUMENT(Device != NULL);
++
++ if (Device->kernels[gcvCORE_MAJOR] != gcvNULL)
++ {
++ /* Switch to OFF power state. */
++ gcmkONERROR(gckHARDWARE_SetPowerManagementState(
++ Device->kernels[gcvCORE_MAJOR]->hardware, gcvPOWER_OFF
++ ));
++
++ /* Remove the ISR routine. */
++ gcmkONERROR(gckGALDEVICE_Release_ISR(Device, gcvCORE_MAJOR));
++ }
++
++ if (Device->kernels[gcvCORE_2D] != gcvNULL)
++ {
++ /* Setup the ISR routine. */
++ gcmkONERROR(gckGALDEVICE_Release_ISR(Device, gcvCORE_2D));
++
++ /* Switch to OFF power state. */
++ gcmkONERROR(gckHARDWARE_SetPowerManagementState(
++ Device->kernels[gcvCORE_2D]->hardware, gcvPOWER_OFF
++ ));
++ }
++
++ if (Device->kernels[gcvCORE_VG] != gcvNULL)
++ {
++ /* Setup the ISR routine. */
++ gcmkONERROR(gckGALDEVICE_Release_ISR(Device, gcvCORE_VG));
++
++#if gcdENABLE_VG
++ /* Switch to OFF power state. */
++ gcmkONERROR(gckVGHARDWARE_SetPowerManagementState(
++ Device->kernels[gcvCORE_VG]->vg->hardware, gcvPOWER_OFF
++ ));
++#endif
++ }
++
++ /* Stop the kernel thread. */
++ gcmkONERROR(gckGALDEVICE_Stop_Threads(Device));
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v4/hal/os/linux/kernel/gc_hal_kernel_device.h linux-openelec/drivers/mxc/gpu-viv/v4/hal/os/linux/kernel/gc_hal_kernel_device.h
+--- linux-3.14.36/drivers/mxc/gpu-viv/v4/hal/os/linux/kernel/gc_hal_kernel_device.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v4/hal/os/linux/kernel/gc_hal_kernel_device.h 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,192 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_kernel_device_h_
++#define __gc_hal_kernel_device_h_
++
++/******************************************************************************\
++******************************* gckGALDEVICE Structure *******************************
++\******************************************************************************/
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
++struct contiguous_mem_pool {
++ struct dma_attrs attrs;
++ dma_addr_t phys;
++ void *virt;
++ size_t size;
++};
++#endif
++
++typedef struct _gckGALDEVICE
++{
++ /* Objects. */
++ gckOS os;
++ gckKERNEL kernels[gcdMAX_GPU_COUNT];
++
++ /* Attributes. */
++ gctSIZE_T internalSize;
++ gctPHYS_ADDR internalPhysical;
++ gctUINT32 internalPhysicalName;
++ gctPOINTER internalLogical;
++ gckVIDMEM internalVidMem;
++ gctSIZE_T externalSize;
++ gctPHYS_ADDR externalPhysical;
++ gctUINT32 externalPhysicalName;
++ gctPOINTER externalLogical;
++ gckVIDMEM externalVidMem;
++ gckVIDMEM contiguousVidMem;
++ gctPOINTER contiguousBase;
++ gctPHYS_ADDR contiguousPhysical;
++ gctUINT32 contiguousPhysicalName;
++ gctSIZE_T contiguousSize;
++ gctBOOL contiguousMapped;
++ gctPOINTER contiguousMappedUser;
++ gctSIZE_T systemMemorySize;
++ gctUINT32 systemMemoryBaseAddress;
++ gctPOINTER registerBases[gcdMAX_GPU_COUNT];
++ gctSIZE_T registerSizes[gcdMAX_GPU_COUNT];
++ gctUINT32 baseAddress;
++ gctUINT32 requestedRegisterMemBases[gcdMAX_GPU_COUNT];
++ gctSIZE_T requestedRegisterMemSizes[gcdMAX_GPU_COUNT];
++ gctUINT32 requestedContiguousBase;
++ gctSIZE_T requestedContiguousSize;
++
++ /* IRQ management. */
++ gctINT irqLines[gcdMAX_GPU_COUNT];
++ gctBOOL isrInitializeds[gcdMAX_GPU_COUNT];
++ gctINT isrEnabled[gcdMAX_GPU_COUNT];
++ gctBOOL dataReadys[gcdMAX_GPU_COUNT];
++
++ /* Thread management. */
++ struct task_struct *threadCtxts[gcdMAX_GPU_COUNT];
++ struct semaphore semas[gcdMAX_GPU_COUNT];
++ gctBOOL threadInitializeds[gcdMAX_GPU_COUNT];
++ gctBOOL killThread;
++
++ /* Signal management. */
++ gctINT signal;
++
++ /* Core mapping */
++ gceCORE coreMapping[8];
++
++ /* States before suspend. */
++ gceCHIPPOWERSTATE statesStored[gcdMAX_GPU_COUNT];
++
++ /*Device Debug File System Entry in Kernel*/
++ struct _gcsDebugFileSystemNode * dbgnode;
++
++ /* Clock management.*/
++ struct clk *clk_3d_core;
++ struct clk *clk_3d_shader;
++ struct clk *clk_3d_axi;
++ struct clk *clk_2d_core;
++ struct clk *clk_2d_axi;
++ struct clk *clk_vg_axi;
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) || LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
++ /*Power management.*/
++ struct regulator *gpu_regulator;
++#endif
++ /*Run time pm*/
++ struct device *pmdev;
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
++ struct contiguous_mem_pool *pool;
++ struct reset_control *rstc[gcdMAX_GPU_COUNT];
++#endif
++}
++* gckGALDEVICE;
++
++typedef struct _gcsHAL_PRIVATE_DATA
++{
++ gckGALDEVICE device;
++ gctPOINTER mappedMemory;
++ gctPOINTER contiguousLogical;
++ /* The process opening the device may not be the same as the one that closes it. */
++ gctUINT32 pidOpen;
++}
++gcsHAL_PRIVATE_DATA, * gcsHAL_PRIVATE_DATA_PTR;
++
++gceSTATUS gckGALDEVICE_Enable_ISR(
++ IN gckGALDEVICE Device,
++ IN gceCORE Core
++ );
++
++gceSTATUS gckGALDEVICE_Disable_ISR(
++ IN gckGALDEVICE Device,
++ IN gceCORE Core
++ );
++
++gceSTATUS gckGALDEVICE_Setup_ISR(
++ IN gckGALDEVICE Device,
++ IN gceCORE Core
++ );
++
++gceSTATUS gckGALDEVICE_Release_ISR(
++ IN gckGALDEVICE Device,
++ IN gceCORE Core
++ );
++
++gceSTATUS gckGALDEVICE_Start_Threads(
++ IN gckGALDEVICE Device
++ );
++
++gceSTATUS gckGALDEVICE_Stop_Threads(
++ gckGALDEVICE Device
++ );
++
++gceSTATUS gckGALDEVICE_Start(
++ IN gckGALDEVICE Device
++ );
++
++gceSTATUS gckGALDEVICE_Stop(
++ gckGALDEVICE Device
++ );
++
++gceSTATUS gckGALDEVICE_Construct(
++ IN gctINT IrqLine,
++ IN gctUINT32 RegisterMemBase,
++ IN gctSIZE_T RegisterMemSize,
++ IN gctINT IrqLine2D,
++ IN gctUINT32 RegisterMemBase2D,
++ IN gctSIZE_T RegisterMemSize2D,
++ IN gctINT IrqLineVG,
++ IN gctUINT32 RegisterMemBaseVG,
++ IN gctSIZE_T RegisterMemSizeVG,
++ IN gctUINT32 ContiguousBase,
++ IN gctSIZE_T ContiguousSize,
++ IN gctSIZE_T BankSize,
++ IN gctINT FastClear,
++ IN gctINT Compression,
++ IN gctUINT32 PhysBaseAddr,
++ IN gctUINT32 PhysSize,
++ IN gctINT Signal,
++ IN gctUINT LogFileSize,
++ IN struct device *pdev,
++ IN gctINT PowerManagement,
++ IN gctINT GpuProfiler,
++ OUT gckGALDEVICE *Device
++ );
++
++gceSTATUS gckGALDEVICE_Destroy(
++ IN gckGALDEVICE Device
++ );
++
++#endif /* __gc_hal_kernel_device_h_ */
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v4/hal/os/linux/kernel/gc_hal_kernel_driver.c linux-openelec/drivers/mxc/gpu-viv/v4/hal/os/linux/kernel/gc_hal_kernel_driver.c
+--- linux-3.14.36/drivers/mxc/gpu-viv/v4/hal/os/linux/kernel/gc_hal_kernel_driver.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v4/hal/os/linux/kernel/gc_hal_kernel_driver.c 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,1476 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++* Copyright (C) 2011-2013 Freescale Semiconductor, Inc.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++#include <linux/device.h>
++#include <linux/slab.h>
++#include <linux/notifier.h>
++#include "gc_hal_kernel_linux.h"
++#include "gc_hal_driver.h"
++
++#if USE_PLATFORM_DRIVER
++# include <linux/platform_device.h>
++#endif
++
++#ifdef CONFIG_PXA_DVFM
++# include <mach/dvfm.h>
++# include <mach/pxa3xx_dvfm.h>
++#endif
++
++
++#ifdef CONFIG_ANDROID_RESERVED_MEMORY_ACCOUNT
++# include <linux/resmem_account.h>
++# include <linux/kernel.h>
++# include <linux/mm.h>
++# include <linux/oom.h>
++# include <linux/sched.h>
++# include <linux/notifier.h>
++
++struct task_struct *lowmem_deathpending;
++
++static int
++task_notify_func(struct notifier_block *self, unsigned long val, void *data);
++
++static struct notifier_block task_nb = {
++ .notifier_call = task_notify_func,
++};
++
++static int
++task_notify_func(struct notifier_block *self, unsigned long val, void *data)
++{
++ struct task_struct *task = data;
++
++ if (task == lowmem_deathpending)
++ lowmem_deathpending = NULL;
++
++ return NOTIFY_OK;
++}
++#endif
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0)
++#include <mach/viv_gpu.h>
++#else
++#include <linux/pm_runtime.h>
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0)
++#include <mach/busfreq.h>
++#else
++#include <linux/busfreq-imx6.h>
++#include <linux/reset.h>
++#endif
++#endif
++/* Zone used for header/footer. */
++#define _GC_OBJ_ZONE gcvZONE_DRIVER
++
++#if gcdENABLE_FSCALE_VAL_ADJUST
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
++#include <linux/device_cooling.h>
++#define REG_THERMAL_NOTIFIER(a) register_devfreq_cooling_notifier(a);
++#define UNREG_THERMAL_NOTIFIER(a) unregister_devfreq_cooling_notifier(a);
++#else
++extern int register_thermal_notifier(struct notifier_block *nb);
++extern int unregister_thermal_notifier(struct notifier_block *nb);
++#define REG_THERMAL_NOTIFIER(a) register_thermal_notifier(a);
++#define UNREG_THERMAL_NOTIFIER(a) unregister_thermal_notifier(a);
++#endif
++#endif
++
++MODULE_DESCRIPTION("Vivante Graphics Driver");
++MODULE_LICENSE("GPL");
++
++static struct class* gpuClass;
++
++static gckGALDEVICE galDevice;
++
++static uint major = 199;
++module_param(major, uint, 0644);
++
++static int irqLine = -1;
++module_param(irqLine, int, 0644);
++
++static ulong registerMemBase = 0x80000000;
++module_param(registerMemBase, ulong, 0644);
++
++static ulong registerMemSize = 2 << 10;
++module_param(registerMemSize, ulong, 0644);
++
++static int irqLine2D = -1;
++module_param(irqLine2D, int, 0644);
++
++static ulong registerMemBase2D = 0x00000000;
++module_param(registerMemBase2D, ulong, 0644);
++
++static ulong registerMemSize2D = 2 << 10;
++module_param(registerMemSize2D, ulong, 0644);
++
++static int irqLineVG = -1;
++module_param(irqLineVG, int, 0644);
++
++static ulong registerMemBaseVG = 0x00000000;
++module_param(registerMemBaseVG, ulong, 0644);
++
++static ulong registerMemSizeVG = 2 << 10;
++module_param(registerMemSizeVG, ulong, 0644);
++
++#if gcdENABLE_FSCALE_VAL_ADJUST
++static ulong contiguousSize = 128 << 20;
++#else
++static ulong contiguousSize = 4 << 20;
++#endif
++module_param(contiguousSize, ulong, 0644);
++
++static ulong contiguousBase = 0;
++module_param(contiguousBase, ulong, 0644);
++
++static ulong bankSize = 0;
++module_param(bankSize, ulong, 0644);
++
++static int fastClear = -1;
++module_param(fastClear, int, 0644);
++
++static int compression = -1;
++module_param(compression, int, 0644);
++
++static int powerManagement = 1;
++module_param(powerManagement, int, 0644);
++
++static int gpuProfiler = 0;
++module_param(gpuProfiler, int, 0644);
++
++static int signal = 48;
++module_param(signal, int, 0644);
++
++static ulong baseAddress = 0;
++module_param(baseAddress, ulong, 0644);
++
++static ulong physSize = 0;
++module_param(physSize, ulong, 0644);
++
++static uint logFileSize=0;
++module_param(logFileSize,uint, 0644);
++
++static int showArgs = 0;
++module_param(showArgs, int, 0644);
++
++int gpu3DMinClock = 0;
++module_param(gpu3DMinClock, int, 0644);
++
++#if ENABLE_GPU_CLOCK_BY_DRIVER
++ unsigned long coreClock = 156000000;
++ module_param(coreClock, ulong, 0644);
++#endif
++
++static int drv_open(
++ struct inode* inode,
++ struct file* filp
++ );
++
++static int drv_release(
++ struct inode* inode,
++ struct file* filp
++ );
++
++static long drv_ioctl(
++ struct file* filp,
++ unsigned int ioctlCode,
++ unsigned long arg
++ );
++
++static int drv_mmap(
++ struct file* filp,
++ struct vm_area_struct* vma
++ );
++
++static struct file_operations driver_fops =
++{
++ .owner = THIS_MODULE,
++ .open = drv_open,
++ .release = drv_release,
++ .unlocked_ioctl = drv_ioctl,
++#ifdef HAVE_COMPAT_IOCTL
++ .compat_ioctl = drv_ioctl,
++#endif
++ .mmap = drv_mmap,
++};
++
++#ifdef CONFIG_ANDROID_RESERVED_MEMORY_ACCOUNT
++static size_t viv_gpu_resmem_query(struct task_struct *p, struct reserved_memory_account *m);
++static struct reserved_memory_account viv_gpu_resmem_handler = {
++ .name = "viv_gpu",
++ .get_page_used_by_process = viv_gpu_resmem_query,
++};
++
++size_t viv_gpu_resmem_query(struct task_struct *p, struct reserved_memory_account *m)
++{
++ gcuDATABASE_INFO info;
++ unsigned int processid = p->pid;
++ gckKERNEL gpukernel = m->data;
++
++ /* ignore error happens in this api. */
++ if (gckKERNEL_QueryProcessDB(gpukernel, processid, false, gcvDB_VIDEO_MEMORY, &info) != gcvSTATUS_OK)
++ return 0;
++
++ /* we return pages. */
++ if (info.counters.bytes > 0)
++ return info.counters.bytes / PAGE_SIZE;
++ return 0;
++}
++#endif
++
++int drv_open(
++ struct inode* inode,
++ struct file* filp
++ )
++{
++ gceSTATUS status;
++ gctBOOL attached = gcvFALSE;
++ gcsHAL_PRIVATE_DATA_PTR data = gcvNULL;
++ gctINT i;
++
++ gcmkHEADER_ARG("inode=0x%08X filp=0x%08X", inode, filp);
++
++ if (filp == gcvNULL)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): filp is NULL\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ data = kmalloc(sizeof(gcsHAL_PRIVATE_DATA), GFP_KERNEL);
++
++ if (data == gcvNULL)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): private_data is NULL\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ data->device = galDevice;
++ data->mappedMemory = gcvNULL;
++ data->contiguousLogical = gcvNULL;
++ gcmkONERROR(gckOS_GetProcessID(&data->pidOpen));
++
++ /* Attached the process. */
++ for (i = 0; i < gcdMAX_GPU_COUNT; i++)
++ {
++ if (galDevice->kernels[i] != gcvNULL)
++ {
++ gcmkONERROR(gckKERNEL_AttachProcess(galDevice->kernels[i], gcvTRUE));
++ }
++ }
++ attached = gcvTRUE;
++
++ if (!galDevice->contiguousMapped)
++ {
++ gcmkONERROR(gckOS_MapMemory(
++ galDevice->os,
++ galDevice->contiguousPhysical,
++ galDevice->contiguousSize,
++ &data->contiguousLogical
++ ));
++ }
++
++ filp->private_data = data;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return 0;
++
++OnError:
++ if (data != gcvNULL)
++ {
++ if (data->contiguousLogical != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_UnmapMemory(
++ galDevice->os,
++ galDevice->contiguousPhysical,
++ galDevice->contiguousSize,
++ data->contiguousLogical
++ ));
++ }
++
++ kfree(data);
++ }
++
++ if (attached)
++ {
++ for (i = 0; i < gcdMAX_GPU_COUNT; i++)
++ {
++ if (galDevice->kernels[i] != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckKERNEL_AttachProcess(galDevice->kernels[i], gcvFALSE));
++ }
++ }
++ }
++
++ gcmkFOOTER();
++ return -ENOTTY;
++}
++
++int drv_release(
++ struct inode* inode,
++ struct file* filp
++ )
++{
++ gceSTATUS status;
++ gcsHAL_PRIVATE_DATA_PTR data;
++ gckGALDEVICE device;
++ gctINT i;
++
++ gcmkHEADER_ARG("inode=0x%08X filp=0x%08X", inode, filp);
++
++ if (filp == gcvNULL)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): filp is NULL\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ data = filp->private_data;
++
++ if (data == gcvNULL)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): private_data is NULL\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ device = data->device;
++
++ if (device == gcvNULL)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): device is NULL\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ if (!device->contiguousMapped)
++ {
++ if (data->contiguousLogical != gcvNULL)
++ {
++ gcmkONERROR(gckOS_UnmapMemoryEx(
++ galDevice->os,
++ galDevice->contiguousPhysical,
++ galDevice->contiguousSize,
++ data->contiguousLogical,
++ data->pidOpen
++ ));
++
++ data->contiguousLogical = gcvNULL;
++ }
++ }
++
++ /* A process gets detached. */
++ for (i = 0; i < gcdMAX_GPU_COUNT; i++)
++ {
++ if (galDevice->kernels[i] != gcvNULL)
++ {
++ gcmkONERROR(gckKERNEL_AttachProcessEx(galDevice->kernels[i], gcvFALSE, data->pidOpen));
++ }
++ }
++
++ kfree(data);
++ filp->private_data = NULL;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return 0;
++
++OnError:
++ gcmkFOOTER();
++ return -ENOTTY;
++}
++
++long drv_ioctl(
++ struct file* filp,
++ unsigned int ioctlCode,
++ unsigned long arg
++ )
++{
++ gceSTATUS status;
++ gcsHAL_INTERFACE iface;
++ gctUINT32 copyLen;
++ DRIVER_ARGS drvArgs;
++ gckGALDEVICE device;
++ gcsHAL_PRIVATE_DATA_PTR data;
++ gctINT32 i, count;
++
++ gcmkHEADER_ARG(
++ "filp=0x%08X ioctlCode=0x%08X arg=0x%08X",
++ filp, ioctlCode, arg
++ );
++
++ if (filp == gcvNULL)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): filp is NULL\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ data = filp->private_data;
++
++ if (data == gcvNULL)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): private_data is NULL\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ device = data->device;
++
++ if (device == gcvNULL)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): device is NULL\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ if ((ioctlCode != IOCTL_GCHAL_INTERFACE)
++ && (ioctlCode != IOCTL_GCHAL_KERNEL_INTERFACE)
++ )
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): unknown command %d\n",
++ __FUNCTION__, __LINE__,
++ ioctlCode
++ );
++
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ /* Get the drvArgs. */
++ copyLen = copy_from_user(
++ &drvArgs, (void *) arg, sizeof(DRIVER_ARGS)
++ );
++
++ if (copyLen != 0)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): error copying of the input arguments.\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ /* Now bring in the gcsHAL_INTERFACE structure. */
++ if ((drvArgs.InputBufferSize != sizeof(gcsHAL_INTERFACE))
++ || (drvArgs.OutputBufferSize != sizeof(gcsHAL_INTERFACE))
++ )
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): input or/and output structures are invalid.\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ copyLen = copy_from_user(
++ &iface, gcmUINT64_TO_PTR(drvArgs.InputBuffer), sizeof(gcsHAL_INTERFACE)
++ );
++
++ if (copyLen != 0)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): error copying of input HAL interface.\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ if (iface.command == gcvHAL_CHIP_INFO)
++ {
++ count = 0;
++ for (i = 0; i < gcdMAX_GPU_COUNT; i++)
++ {
++ if (device->kernels[i] != gcvNULL)
++ {
++#if gcdENABLE_VG
++ if (i == gcvCORE_VG)
++ {
++ iface.u.ChipInfo.types[count] = gcvHARDWARE_VG;
++ }
++ else
++#endif
++ {
++ gcmkVERIFY_OK(gckHARDWARE_GetType(device->kernels[i]->hardware,
++ &iface.u.ChipInfo.types[count]));
++ }
++ count++;
++ }
++ }
++
++ iface.u.ChipInfo.count = count;
++ iface.status = status = gcvSTATUS_OK;
++ }
++ else
++ {
++ if (iface.hardwareType < 0 || iface.hardwareType > 7)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): unknown hardwareType %d\n",
++ __FUNCTION__, __LINE__,
++ iface.hardwareType
++ );
++
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++#if gcdENABLE_VG
++ if (device->coreMapping[iface.hardwareType] == gcvCORE_VG)
++ {
++ status = gckVGKERNEL_Dispatch(device->kernels[gcvCORE_VG],
++ (ioctlCode == IOCTL_GCHAL_INTERFACE),
++ &iface);
++ }
++ else
++#endif
++ {
++ status = gckKERNEL_Dispatch(device->kernels[device->coreMapping[iface.hardwareType]],
++ (ioctlCode == IOCTL_GCHAL_INTERFACE),
++ &iface);
++ }
++ }
++
++ /* Redo system call after pending signal is handled. */
++ if (status == gcvSTATUS_INTERRUPTED)
++ {
++ gcmkFOOTER();
++ return -ERESTARTSYS;
++ }
++
++ if (gcmIS_SUCCESS(status) && (iface.command == gcvHAL_LOCK_VIDEO_MEMORY))
++ {
++ gcuVIDMEM_NODE_PTR node = gcmUINT64_TO_PTR(iface.u.LockVideoMemory.node);
++ /* Special case for mapped memory. */
++ if ((data->mappedMemory != gcvNULL)
++ && (node->VidMem.memory->object.type == gcvOBJ_VIDMEM)
++ )
++ {
++ /* Compute offset into mapped memory. */
++ gctUINT32 offset
++ = (gctUINT8 *) gcmUINT64_TO_PTR(iface.u.LockVideoMemory.memory)
++ - (gctUINT8 *) device->contiguousBase;
++
++ /* Compute offset into user-mapped region. */
++ iface.u.LockVideoMemory.memory =
++ gcmPTR_TO_UINT64((gctUINT8 *) data->mappedMemory + offset);
++ }
++ }
++
++ /* Copy data back to the user. */
++ copyLen = copy_to_user(
++ gcmUINT64_TO_PTR(drvArgs.OutputBuffer), &iface, sizeof(gcsHAL_INTERFACE)
++ );
++
++ if (copyLen != 0)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): error copying of output HAL interface.\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return 0;
++
++OnError:
++ gcmkFOOTER();
++ return -ENOTTY;
++}
++
++static int drv_mmap(
++ struct file* filp,
++ struct vm_area_struct* vma
++ )
++{
++ gceSTATUS status = gcvSTATUS_OK;
++ gcsHAL_PRIVATE_DATA_PTR data;
++ gckGALDEVICE device;
++
++ gcmkHEADER_ARG("filp=0x%08X vma=0x%08X", filp, vma);
++
++ if (filp == gcvNULL)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): filp is NULL\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ data = filp->private_data;
++
++ if (data == gcvNULL)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): private_data is NULL\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ device = data->device;
++
++ if (device == gcvNULL)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): device is NULL\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++#if !gcdPAGED_MEMORY_CACHEABLE
++ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
++ vma->vm_flags |= gcdVM_FLAGS;
++#endif
++ vma->vm_pgoff = 0;
++
++ if (device->contiguousMapped)
++ {
++ unsigned long size = vma->vm_end - vma->vm_start;
++ int ret = 0;
++
++ if (size > device->contiguousSize)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): Invalid mapping size.\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ ret = io_remap_pfn_range(
++ vma,
++ vma->vm_start,
++ device->requestedContiguousBase >> PAGE_SHIFT,
++ size,
++ vma->vm_page_prot
++ );
++
++ if (ret != 0)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): io_remap_pfn_range failed %d\n",
++ __FUNCTION__, __LINE__,
++ ret
++ );
++
++ data->mappedMemory = gcvNULL;
++
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++
++ data->mappedMemory = (gctPOINTER) vma->vm_start;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return 0;
++ }
++
++
++OnError:
++ gcmkFOOTER();
++ return -ENOTTY;
++}
++
++
++#if !USE_PLATFORM_DRIVER
++static int __init drv_init(void)
++#else
++static int drv_init(struct device *pdev)
++#endif
++{
++ int ret;
++ int result = -EINVAL;
++ gceSTATUS status;
++ gckGALDEVICE device = gcvNULL;
++ struct class* device_class = gcvNULL;
++
++ gcmkHEADER();
++
++#if ENABLE_GPU_CLOCK_BY_DRIVER && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28))
++ {
++# if 0
++ struct clk * clk;
++
++ clk = clk_get(NULL, "GCCLK");
++
++ if (IS_ERR(clk))
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): clk get error: %d\n",
++ __FUNCTION__, __LINE__,
++ PTR_ERR(clk)
++ );
++
++ result = -ENODEV;
++ gcmkONERROR(gcvSTATUS_GENERIC_IO);
++ }
++
++ /*
++ * APMU_GC_156M, APMU_GC_312M, APMU_GC_PLL2, APMU_GC_PLL2_DIV2 currently.
++ * Use the 2X clock.
++ */
++ if (clk_set_rate(clk, coreClock * 2))
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): Failed to set core clock.\n",
++ __FUNCTION__, __LINE__
++ );
++
++ result = -EAGAIN;
++ gcmkONERROR(gcvSTATUS_GENERIC_IO);
++ }
++
++ clk_enable(clk);
++
++#if defined(CONFIG_PXA_DVFM) && (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,29))
++ gc_pwr(1);
++# endif
++# endif
++ }
++#endif
++
++ printk(KERN_INFO "Galcore version %d.%d.%d.%d\n",
++ gcvVERSION_MAJOR, gcvVERSION_MINOR, gcvVERSION_PATCH, gcvVERSION_BUILD);
++ /* when enable gpu profiler, we need to turn off gpu powerMangement */
++ if(gpuProfiler)
++ powerManagement = 0;
++ if (showArgs)
++ {
++ printk("galcore options:\n");
++ printk(" irqLine = %d\n", irqLine);
++ printk(" registerMemBase = 0x%08lX\n", registerMemBase);
++ printk(" registerMemSize = 0x%08lX\n", registerMemSize);
++
++ if (irqLine2D != -1)
++ {
++ printk(" irqLine2D = %d\n", irqLine2D);
++ printk(" registerMemBase2D = 0x%08lX\n", registerMemBase2D);
++ printk(" registerMemSize2D = 0x%08lX\n", registerMemSize2D);
++ }
++
++ if (irqLineVG != -1)
++ {
++ printk(" irqLineVG = %d\n", irqLineVG);
++ printk(" registerMemBaseVG = 0x%08lX\n", registerMemBaseVG);
++ printk(" registerMemSizeVG = 0x%08lX\n", registerMemSizeVG);
++ }
++
++ printk(" contiguousSize = %ld\n", contiguousSize);
++ printk(" contiguousBase = 0x%08lX\n", contiguousBase);
++ printk(" bankSize = 0x%08lX\n", bankSize);
++ printk(" fastClear = %d\n", fastClear);
++ printk(" compression = %d\n", compression);
++ printk(" signal = %d\n", signal);
++ printk(" baseAddress = 0x%08lX\n", baseAddress);
++ printk(" physSize = 0x%08lX\n", physSize);
++ printk(" logFileSize = %d KB \n", logFileSize);
++ printk(" powerManagement = %d\n", powerManagement);
++ printk(" gpuProfiler = %d\n", gpuProfiler);
++#if ENABLE_GPU_CLOCK_BY_DRIVER
++ printk(" coreClock = %lu\n", coreClock);
++#endif
++ }
++
++ if(logFileSize != 0)
++ {
++ gckDebugFileSystemInitialize();
++ }
++
++ /* Create the GAL device. */
++ gcmkONERROR(gckGALDEVICE_Construct(
++ irqLine,
++ registerMemBase, registerMemSize,
++ irqLine2D,
++ registerMemBase2D, registerMemSize2D,
++ irqLineVG,
++ registerMemBaseVG, registerMemSizeVG,
++ contiguousBase, contiguousSize,
++ bankSize, fastClear, compression, baseAddress, physSize, signal,
++ logFileSize,
++ pdev,
++ powerManagement,
++ gpuProfiler,
++ &device
++ ));
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
++ device->pool = dev_get_drvdata(pdev);
++#endif
++
++ /* Start the GAL device. */
++ gcmkONERROR(gckGALDEVICE_Start(device));
++
++ if ((physSize != 0)
++ && (device->kernels[gcvCORE_MAJOR] != gcvNULL)
++ && (device->kernels[gcvCORE_MAJOR]->hardware->mmuVersion != 0))
++ {
++ status = gckMMU_Enable(device->kernels[gcvCORE_MAJOR]->mmu, baseAddress, physSize);
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_DRIVER,
++ "Enable new MMU: status=%d\n", status);
++
++ if ((device->kernels[gcvCORE_2D] != gcvNULL)
++ && (device->kernels[gcvCORE_2D]->hardware->mmuVersion != 0))
++ {
++ status = gckMMU_Enable(device->kernels[gcvCORE_2D]->mmu, baseAddress, physSize);
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_DRIVER,
++ "Enable new MMU for 2D: status=%d\n", status);
++ }
++
++ /* Reset the base address */
++ device->baseAddress = 0;
++ }
++
++#ifdef CONFIG_ANDROID_RESERVED_MEMORY_ACCOUNT
++ task_free_register(&task_nb);
++ viv_gpu_resmem_handler.data = device->kernels[gcvCORE_MAJOR];
++ register_reserved_memory_account(&viv_gpu_resmem_handler);
++#endif
++
++
++ /* Register the character device. */
++ ret = register_chrdev(major, DRV_NAME, &driver_fops);
++
++ if (ret < 0)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): Could not allocate major number for mmap.\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ if (major == 0)
++ {
++ major = ret;
++ }
++
++ /* Create the device class. */
++ device_class = class_create(THIS_MODULE, "graphics_class");
++
++ if (IS_ERR(device_class))
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): Failed to create the class.\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
++ device_create(device_class, NULL, MKDEV(major, 0), NULL, "galcore");
++#else
++ device_create(device_class, NULL, MKDEV(major, 0), "galcore");
++#endif
++
++ galDevice = device;
++ gpuClass = device_class;
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_DRIVER,
++ "%s(%d): irqLine=%d, contiguousSize=%lu, memBase=0x%lX\n",
++ __FUNCTION__, __LINE__,
++ irqLine, contiguousSize, registerMemBase
++ );
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return 0;
++
++OnError:
++ /* Roll back. */
++ if (device_class != gcvNULL)
++ {
++ device_destroy(device_class, MKDEV(major, 0));
++ class_destroy(device_class);
++ }
++
++ if (device != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckGALDEVICE_Stop(device));
++ gcmkVERIFY_OK(gckGALDEVICE_Destroy(device));
++ }
++
++ gcmkFOOTER();
++ return result;
++}
++
++#if !USE_PLATFORM_DRIVER
++static void __exit drv_exit(void)
++#else
++static void drv_exit(void)
++#endif
++{
++ gcmkHEADER();
++
++#ifdef CONFIG_ANDROID_RESERVED_MEMORY_ACCOUNT
++ task_free_unregister(&task_nb);
++ unregister_reserved_memory_account(&viv_gpu_resmem_handler);
++#endif
++
++ gcmkASSERT(gpuClass != gcvNULL);
++ device_destroy(gpuClass, MKDEV(major, 0));
++ class_destroy(gpuClass);
++
++ unregister_chrdev(major, DRV_NAME);
++
++ gcmkVERIFY_OK(gckGALDEVICE_Stop(galDevice));
++ gcmkVERIFY_OK(gckGALDEVICE_Destroy(galDevice));
++
++ if(gckDebugFileSystemIsEnabled())
++ {
++ gckDebugFileSystemTerminate();
++ }
++
++#if ENABLE_GPU_CLOCK_BY_DRIVER && LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28)
++ {
++# if 0
++ struct clk * clk = NULL;
++
++#if defined(CONFIG_PXA_DVFM) && (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,29))
++ gc_pwr(0);
++#endif
++ clk = clk_get(NULL, "GCCLK");
++ clk_disable(clk);
++# endif
++ }
++#endif
++
++ gcmkFOOTER_NO();
++}
++
++#if !USE_PLATFORM_DRIVER
++ module_init(drv_init);
++ module_exit(drv_exit);
++#else
++
++#ifdef CONFIG_DOVE_GPU
++# define DEVICE_NAME "dove_gpu"
++#else
++# define DEVICE_NAME "galcore"
++#endif
++
++#if gcdENABLE_FSCALE_VAL_ADJUST
++static int thermal_hot_pm_notify(struct notifier_block *nb, unsigned long event,
++ void *dummy)
++{
++ static gctUINT orgFscale, minFscale, maxFscale;
++ static gctBOOL critical;
++ gckHARDWARE hardware = galDevice->kernels[gcvCORE_MAJOR]->hardware;
++
++ if (event > 4) {
++ critical = gcvTRUE;
++ gckHARDWARE_GetFscaleValue(hardware,&orgFscale,&minFscale, &maxFscale);
++ gckHARDWARE_SetFscaleValue(hardware, minFscale);
++ gckOS_Print("System is too hot. GPU3D scalign to %d/64 clock.\n", minFscale);
++ } else if (event > 1) {
++ gckHARDWARE_GetFscaleValue(hardware,&orgFscale,&minFscale, &maxFscale);
++ gckHARDWARE_SetFscaleValue(hardware, maxFscale - (8 * event));
++ } else if (orgFscale) {
++ gckHARDWARE_SetFscaleValue(hardware, orgFscale);
++ if (critical) {
++ gckOS_Print("Hot alarm is canceled. GPU3D clock will return to %d/64\n", orgFscale);
++ critical = gcvFALSE;
++ }
++ }
++ return NOTIFY_OK;
++}
++
++static struct notifier_block thermal_hot_pm_notifier = {
++ .notifier_call = thermal_hot_pm_notify,
++ };
++#endif
++
++
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
++static int gpu_probe(struct platform_device *pdev)
++#else
++static int __devinit gpu_probe(struct platform_device *pdev)
++#endif
++{
++ int ret = -ENODEV;
++ struct resource* res;
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
++ struct contiguous_mem_pool *pool;
++ struct reset_control *rstc;
++#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0)
++ struct device_node *dn =pdev->dev.of_node;
++ const u32 *prop;
++#else
++ struct viv_gpu_platform_data *pdata;
++#endif
++ gcmkHEADER();
++
++ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phys_baseaddr");
++ if (res)
++ baseAddress = res->start;
++
++ res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "irq_3d");
++ if (res)
++ irqLine = res->start;
++
++ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "iobase_3d");
++ if (res)
++ {
++ registerMemBase = res->start;
++ registerMemSize = res->end - res->start + 1;
++ }
++
++ res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "irq_2d");
++ if (res)
++ irqLine2D = res->start;
++
++ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "iobase_2d");
++ if (res)
++ {
++ registerMemBase2D = res->start;
++ registerMemSize2D = res->end - res->start + 1;
++ }
++
++ res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "irq_vg");
++ if (res)
++ irqLineVG = res->start;
++
++ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "iobase_vg");
++ if (res)
++ {
++ registerMemBaseVG = res->start;
++ registerMemSizeVG = res->end - res->start + 1;
++ }
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
++ pool = devm_kzalloc(&pdev->dev, sizeof(*pool), GFP_KERNEL);
++ if (!pool)
++ return -ENOMEM;
++ pool->size = contiguousSize;
++ init_dma_attrs(&pool->attrs);
++ dma_set_attr(DMA_ATTR_WRITE_COMBINE, &pool->attrs);
++ pool->virt = dma_alloc_attrs(&pdev->dev, pool->size, &pool->phys,
++ GFP_KERNEL, &pool->attrs);
++ if (!pool->virt) {
++ dev_err(&pdev->dev, "Failed to allocate contiguous memory\n");
++ return -ENOMEM;
++ }
++ contiguousBase = pool->phys;
++ dev_set_drvdata(&pdev->dev, pool);
++#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0)
++ prop = of_get_property(dn, "contiguousbase", NULL);
++ if(prop)
++ contiguousBase = *prop;
++ of_property_read_u32(dn,"contiguoussize", (u32 *)&contiguousSize);
++#else
++ pdata = pdev->dev.platform_data;
++ if (pdata) {
++ contiguousBase = pdata->reserved_mem_base;
++ contiguousSize = pdata->reserved_mem_size;
++ }
++#endif
++ if (contiguousSize == 0)
++ gckOS_Print("Warning: No contiguous memory is reserverd for gpu.!\n ");
++ ret = drv_init(&pdev->dev);
++
++ if (!ret)
++ {
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
++ rstc = devm_reset_control_get(&pdev->dev, "gpu3d");
++ galDevice->rstc[gcvCORE_MAJOR] = IS_ERR(rstc) ? NULL : rstc;
++
++ rstc = devm_reset_control_get(&pdev->dev, "gpu2d");
++ galDevice->rstc[gcvCORE_2D] = IS_ERR(rstc) ? NULL : rstc;
++
++ rstc = devm_reset_control_get(&pdev->dev, "gpuvg");
++ galDevice->rstc[gcvCORE_VG] = IS_ERR(rstc) ? NULL : rstc;
++#endif
++ platform_set_drvdata(pdev, galDevice);
++
++#if gcdENABLE_FSCALE_VAL_ADJUST
++ if (galDevice->kernels[gcvCORE_MAJOR])
++ REG_THERMAL_NOTIFIER(&thermal_hot_pm_notifier);
++#endif
++ gcmkFOOTER_NO();
++ return ret;
++ }
++#if gcdENABLE_FSCALE_VAL_ADJUST
++ UNREG_THERMAL_NOTIFIER(&thermal_hot_pm_notifier);
++#endif
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
++ dma_free_attrs(&pdev->dev, pool->size, pool->virt, pool->phys,
++ &pool->attrs);
++#endif
++ gcmkFOOTER_ARG(KERN_INFO "Failed to register gpu driver: %d\n", ret);
++ return ret;
++}
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
++static int gpu_remove(struct platform_device *pdev)
++#else
++static int __devexit gpu_remove(struct platform_device *pdev)
++#endif
++{
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
++ gckGALDEVICE device = platform_get_drvdata(pdev);
++ struct contiguous_mem_pool *pool = device->pool;
++#endif
++ gcmkHEADER();
++#if gcdENABLE_FSCALE_VAL_ADJUST
++ if(galDevice->kernels[gcvCORE_MAJOR])
++ UNREG_THERMAL_NOTIFIER(&thermal_hot_pm_notifier);
++#endif
++ drv_exit();
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
++ dma_free_attrs(&pdev->dev, pool->size, pool->virt, pool->phys,
++ &pool->attrs);
++#endif
++ gcmkFOOTER_NO();
++ return 0;
++}
++
++static int gpu_suspend(struct platform_device *dev, pm_message_t state)
++{
++ gceSTATUS status;
++ gckGALDEVICE device;
++ gctINT i;
++
++ device = platform_get_drvdata(dev);
++
++ for (i = 0; i < gcdMAX_GPU_COUNT; i++)
++ {
++ if (device->kernels[i] != gcvNULL)
++ {
++ /* Store states. */
++#if gcdENABLE_VG
++ if (i == gcvCORE_VG)
++ {
++ status = gckVGHARDWARE_QueryPowerManagementState(device->kernels[i]->vg->hardware, &device->statesStored[i]);
++ }
++ else
++#endif
++ {
++ status = gckHARDWARE_QueryPowerManagementState(device->kernels[i]->hardware, &device->statesStored[i]);
++ }
++
++ if (gcmIS_ERROR(status))
++ {
++ return -1;
++ }
++
++#if gcdENABLE_VG
++ if (i == gcvCORE_VG)
++ {
++ status = gckVGHARDWARE_SetPowerManagementState(device->kernels[i]->vg->hardware, gcvPOWER_OFF);
++ }
++ else
++#endif
++ {
++ status = gckHARDWARE_SetPowerManagementState(device->kernels[i]->hardware, gcvPOWER_OFF);
++ }
++ if (gcmIS_ERROR(status))
++ {
++ return -1;
++ }
++
++ }
++ }
++
++ return 0;
++}
++
++static int gpu_resume(struct platform_device *dev)
++{
++ gceSTATUS status;
++ gckGALDEVICE device;
++ gctINT i;
++ gceCHIPPOWERSTATE statesStored;
++
++ device = platform_get_drvdata(dev);
++
++ for (i = 0; i < gcdMAX_GPU_COUNT; i++)
++ {
++ if (device->kernels[i] != gcvNULL)
++ {
++#if gcdENABLE_VG
++ if (i == gcvCORE_VG)
++ {
++ status = gckVGHARDWARE_SetPowerManagementState(device->kernels[i]->vg->hardware, gcvPOWER_ON);
++ }
++ else
++#endif
++ {
++ status = gckHARDWARE_SetPowerManagementState(device->kernels[i]->hardware, gcvPOWER_ON);
++ }
++
++ if (gcmIS_ERROR(status))
++ {
++ return -1;
++ }
++
++ /* Convert global state to crossponding internal state. */
++ switch(device->statesStored[i])
++ {
++ case gcvPOWER_OFF:
++ statesStored = gcvPOWER_OFF_BROADCAST;
++ break;
++ case gcvPOWER_IDLE:
++ statesStored = gcvPOWER_IDLE_BROADCAST;
++ break;
++ case gcvPOWER_SUSPEND:
++ statesStored = gcvPOWER_SUSPEND_BROADCAST;
++ break;
++ case gcvPOWER_ON:
++ statesStored = gcvPOWER_ON_AUTO;
++ break;
++ default:
++ statesStored = device->statesStored[i];
++ break;
++ }
++
++ /* Restore states. */
++#if gcdENABLE_VG
++ if (i == gcvCORE_VG)
++ {
++ status = gckVGHARDWARE_SetPowerManagementState(device->kernels[i]->vg->hardware, statesStored);
++ }
++ else
++#endif
++ {
++ status = gckHARDWARE_SetPowerManagementState(device->kernels[i]->hardware, statesStored);
++ }
++
++ if (gcmIS_ERROR(status))
++ {
++ return -1;
++ }
++ }
++ }
++
++ return 0;
++}
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0)
++static const struct of_device_id mxs_gpu_dt_ids[] = {
++ { .compatible = "fsl,imx6q-gpu", },
++ {/* sentinel */}
++};
++MODULE_DEVICE_TABLE(of, mxs_gpu_dt_ids);
++
++#ifdef CONFIG_PM
++static int gpu_runtime_suspend(struct device *dev)
++{
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 7)
++ release_bus_freq(BUS_FREQ_HIGH);
++#endif
++ return 0;
++}
++
++static int gpu_runtime_resume(struct device *dev)
++{
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 7)
++ request_bus_freq(BUS_FREQ_HIGH);
++#endif
++ return 0;
++}
++
++static int gpu_system_suspend(struct device *dev)
++{
++ pm_message_t state={0};
++ return gpu_suspend(to_platform_device(dev), state);
++}
++
++static int gpu_system_resume(struct device *dev)
++{
++ return gpu_resume(to_platform_device(dev));
++}
++
++static const struct dev_pm_ops gpu_pm_ops = {
++ SET_RUNTIME_PM_OPS(gpu_runtime_suspend, gpu_runtime_resume, NULL)
++ SET_SYSTEM_SLEEP_PM_OPS(gpu_system_suspend, gpu_system_resume)
++};
++#endif
++#endif
++
++static struct platform_driver gpu_driver = {
++ .probe = gpu_probe,
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
++ .remove = gpu_remove,
++#else
++ .remove = __devexit_p(gpu_remove),
++#endif
++
++ .suspend = gpu_suspend,
++ .resume = gpu_resume,
++
++ .driver = {
++ .name = DEVICE_NAME,
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0)
++ .of_match_table = mxs_gpu_dt_ids,
++#if CONFIG_PM
++ .pm = &gpu_pm_ops,
++#endif
++#endif
++ }
++};
++
++#if 0 /*CONFIG_DOVE_GPU*/
++static struct resource gpu_resources[] = {
++ {
++ .name = "gpu_irq",
++ .flags = IORESOURCE_IRQ,
++ },
++ {
++ .name = "gpu_base",
++ .flags = IORESOURCE_MEM,
++ },
++ {
++ .name = "gpu_mem",
++ .flags = IORESOURCE_MEM,
++ },
++};
++
++static struct platform_device * gpu_device;
++#endif
++
++static int __init gpu_init(void)
++{
++ int ret = 0;
++
++#if 0 /*ndef CONFIG_DOVE_GPU*/
++ gpu_resources[0].start = gpu_resources[0].end = irqLine;
++
++ gpu_resources[1].start = registerMemBase;
++ gpu_resources[1].end = registerMemBase + registerMemSize - 1;
++
++ gpu_resources[2].start = contiguousBase;
++ gpu_resources[2].end = contiguousBase + contiguousSize - 1;
++
++ /* Allocate device */
++ gpu_device = platform_device_alloc(DEVICE_NAME, -1);
++ if (!gpu_device)
++ {
++ printk(KERN_ERR "galcore: platform_device_alloc failed.\n");
++ ret = -ENOMEM;
++ goto out;
++ }
++
++ /* Insert resource */
++ ret = platform_device_add_resources(gpu_device, gpu_resources, 3);
++ if (ret)
++ {
++ printk(KERN_ERR "galcore: platform_device_add_resources failed.\n");
++ goto put_dev;
++ }
++
++ /* Add device */
++ ret = platform_device_add(gpu_device);
++ if (ret)
++ {
++ printk(KERN_ERR "galcore: platform_device_add failed.\n");
++ goto put_dev;
++ }
++#endif
++
++ ret = platform_driver_register(&gpu_driver);
++ if (!ret)
++ {
++ goto out;
++ }
++
++#if 0 /*ndef CONFIG_DOVE_GPU*/
++ platform_device_del(gpu_device);
++put_dev:
++ platform_device_put(gpu_device);
++#endif
++
++out:
++ return ret;
++}
++
++static void __exit gpu_exit(void)
++{
++ platform_driver_unregister(&gpu_driver);
++#if 0 /*ndef CONFIG_DOVE_GPU*/
++ platform_device_unregister(gpu_device);
++#endif
++}
++
++module_init(gpu_init);
++module_exit(gpu_exit);
++
++#endif
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v4/hal/os/linux/kernel/gc_hal_kernel_linux.c linux-openelec/drivers/mxc/gpu-viv/v4/hal/os/linux/kernel/gc_hal_kernel_linux.c
+--- linux-3.14.36/drivers/mxc/gpu-viv/v4/hal/os/linux/kernel/gc_hal_kernel_linux.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v4/hal/os/linux/kernel/gc_hal_kernel_linux.c 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,481 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal_kernel_linux.h"
++
++#define _GC_OBJ_ZONE gcvZONE_KERNEL
++
++/******************************************************************************\
++******************************* gckKERNEL API Code ******************************
++\******************************************************************************/
++
++/*******************************************************************************
++**
++** gckKERNEL_QueryVideoMemory
++**
++** Query the amount of video memory.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** OUTPUT:
++**
++** gcsHAL_INTERFACE * Interface
++** Pointer to an gcsHAL_INTERFACE structure that will be filled in with
++** the memory information.
++*/
++gceSTATUS
++gckKERNEL_QueryVideoMemory(
++ IN gckKERNEL Kernel,
++ OUT gcsHAL_INTERFACE * Interface
++ )
++{
++ gckGALDEVICE device;
++
++ gcmkHEADER_ARG("Kernel=%p", Kernel);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(Interface != NULL);
++
++ /* Extract the pointer to the gckGALDEVICE class. */
++ device = (gckGALDEVICE) Kernel->context;
++
++ /* Get internal memory size and physical address. */
++ Interface->u.QueryVideoMemory.internalSize = device->internalSize;
++ Interface->u.QueryVideoMemory.internalPhysical = device->internalPhysicalName;
++
++ /* Get external memory size and physical address. */
++ Interface->u.QueryVideoMemory.externalSize = device->externalSize;
++ Interface->u.QueryVideoMemory.externalPhysical = device->externalPhysicalName;
++
++ /* Get contiguous memory size and physical address. */
++ Interface->u.QueryVideoMemory.contiguousSize = device->contiguousSize;
++ Interface->u.QueryVideoMemory.contiguousPhysical = device->contiguousPhysicalName;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckKERNEL_GetVideoMemoryPool
++**
++** Get the gckVIDMEM object belonging to the specified pool.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gcePOOL Pool
++** Pool to query gckVIDMEM object for.
++**
++** OUTPUT:
++**
++** gckVIDMEM * VideoMemory
++** Pointer to a variable that will hold the pointer to the gckVIDMEM
++** object belonging to the requested pool.
++*/
++gceSTATUS
++gckKERNEL_GetVideoMemoryPool(
++ IN gckKERNEL Kernel,
++ IN gcePOOL Pool,
++ OUT gckVIDMEM * VideoMemory
++ )
++{
++ gckGALDEVICE device;
++ gckVIDMEM videoMemory;
++
++ gcmkHEADER_ARG("Kernel=%p Pool=%d", Kernel, Pool);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(VideoMemory != NULL);
++
++ /* Extract the pointer to the gckGALDEVICE class. */
++ device = (gckGALDEVICE) Kernel->context;
++
++ /* Dispatch on pool. */
++ switch (Pool)
++ {
++ case gcvPOOL_LOCAL_INTERNAL:
++ /* Internal memory. */
++ videoMemory = device->internalVidMem;
++ break;
++
++ case gcvPOOL_LOCAL_EXTERNAL:
++ /* External memory. */
++ videoMemory = device->externalVidMem;
++ break;
++
++ case gcvPOOL_SYSTEM:
++ /* System memory. */
++ videoMemory = device->contiguousVidMem;
++ break;
++
++ default:
++ /* Unknown pool. */
++ videoMemory = NULL;
++ }
++
++ /* Return pointer to the gckVIDMEM object. */
++ *VideoMemory = videoMemory;
++
++ /* Return status. */
++ gcmkFOOTER_ARG("*VideoMemory=%p", *VideoMemory);
++ return (videoMemory == NULL) ? gcvSTATUS_OUT_OF_MEMORY : gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckKERNEL_MapMemory
++**
++** Map video memory into the current process space.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gctPHYS_ADDR Physical
++** Physical address of video memory to map.
++**
++** gctSIZE_T Bytes
++** Number of bytes to map.
++**
++** OUTPUT:
++**
++** gctPOINTER * Logical
++** Pointer to a variable that will hold the base address of the mapped
++** memory region.
++*/
++gceSTATUS
++gckKERNEL_MapMemory(
++ IN gckKERNEL Kernel,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Bytes,
++ OUT gctPOINTER * Logical
++ )
++{
++ gckKERNEL kernel = Kernel;
++ gctPHYS_ADDR physical = gcmNAME_TO_PTR(Physical);
++
++ return gckOS_MapMemory(Kernel->os, physical, Bytes, Logical);
++}
++
++/*******************************************************************************
++**
++** gckKERNEL_UnmapMemory
++**
++** Unmap video memory from the current process space.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gctPHYS_ADDR Physical
++** Physical address of video memory to map.
++**
++** gctSIZE_T Bytes
++** Number of bytes to map.
++**
++** gctPOINTER Logical
++** Base address of the mapped memory region.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckKERNEL_UnmapMemory(
++ IN gckKERNEL Kernel,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Bytes,
++ IN gctPOINTER Logical
++ )
++{
++ gckKERNEL kernel = Kernel;
++ gctPHYS_ADDR physical = gcmNAME_TO_PTR(Physical);
++
++ return gckOS_UnmapMemory(Kernel->os, physical, Bytes, Logical);
++}
++
++/*******************************************************************************
++**
++** gckKERNEL_MapVideoMemory
++**
++** Get the logical address for a hardware specific memory address for the
++** current process.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gctBOOL InUserSpace
++** gcvTRUE to map the memory into the user space.
++**
++** gctUINT32 Address
++** Hardware specific memory address.
++**
++** OUTPUT:
++**
++** gctPOINTER * Logical
++** Pointer to a variable that will hold the logical address of the
++** specified memory address.
++*/
++gceSTATUS
++gckKERNEL_MapVideoMemoryEx(
++ IN gckKERNEL Kernel,
++ IN gceCORE Core,
++ IN gctBOOL InUserSpace,
++ IN gctUINT32 Address,
++ OUT gctPOINTER * Logical
++ )
++{
++ gckGALDEVICE device;
++ PLINUX_MDL mdl;
++ PLINUX_MDL_MAP mdlMap;
++ gcePOOL pool;
++ gctUINT32 offset, base;
++ gceSTATUS status;
++ gctPOINTER logical;
++
++ gcmkHEADER_ARG("Kernel=%p InUserSpace=%d Address=%08x",
++ Kernel, InUserSpace, Address);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(Logical != NULL);
++
++ /* Extract the pointer to the gckGALDEVICE class. */
++ device = (gckGALDEVICE) Kernel->context;
++
++#if gcdENABLE_VG
++ if (Core == gcvCORE_VG)
++ {
++ /* Split the memory address into a pool type and offset. */
++ gcmkONERROR(
++ gckVGHARDWARE_SplitMemory(Kernel->vg->hardware, Address, &pool, &offset));
++ }
++ else
++#endif
++ {
++ /* Split the memory address into a pool type and offset. */
++ gcmkONERROR(
++ gckHARDWARE_SplitMemory(Kernel->hardware, Address, &pool, &offset));
++ }
++
++ /* Dispatch on pool. */
++ switch (pool)
++ {
++ case gcvPOOL_LOCAL_INTERNAL:
++ /* Internal memory. */
++ logical = device->internalLogical;
++ break;
++
++ case gcvPOOL_LOCAL_EXTERNAL:
++ /* External memory. */
++ logical = device->externalLogical;
++ break;
++
++ case gcvPOOL_SYSTEM:
++ /* System memory. */
++ if (device->contiguousMapped)
++ {
++ logical = device->contiguousBase;
++ }
++ else
++ {
++ gctINT processID;
++ gckOS_GetProcessID(&processID);
++
++ mdl = (PLINUX_MDL) device->contiguousPhysical;
++
++ mdlMap = FindMdlMap(mdl, processID);
++ gcmkASSERT(mdlMap);
++
++ logical = (gctPOINTER) mdlMap->vmaAddr;
++ }
++#if gcdENABLE_VG
++ if (Core == gcvCORE_VG)
++ {
++ gcmkVERIFY_OK(
++ gckVGHARDWARE_SplitMemory(Kernel->vg->hardware,
++ device->contiguousVidMem->baseAddress,
++ &pool,
++ &base));
++ }
++ else
++#endif
++ {
++ gctUINT32 baseAddress = 0;
++
++ if (Kernel->hardware->mmuVersion == 0)
++ {
++ gcmkONERROR(gckOS_GetBaseAddress(Kernel->os, &baseAddress));
++ }
++
++ gcmkVERIFY_OK(
++ gckHARDWARE_SplitMemory(Kernel->hardware,
++ device->contiguousVidMem->baseAddress - baseAddress,
++ &pool,
++ &base));
++ }
++ offset -= base;
++ break;
++
++ default:
++ /* Invalid memory pool. */
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ /* Build logical address of specified address. */
++ *Logical = (gctPOINTER) ((gctUINT8_PTR) logical + offset);
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Logical=%p", *Logical);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Retunn the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckKERNEL_MapVideoMemory
++**
++** Get the logical address for a hardware specific memory address for the
++** current process.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gctBOOL InUserSpace
++** gcvTRUE to map the memory into the user space.
++**
++** gctUINT32 Address
++** Hardware specific memory address.
++**
++** OUTPUT:
++**
++** gctPOINTER * Logical
++** Pointer to a variable that will hold the logical address of the
++** specified memory address.
++*/
++gceSTATUS
++gckKERNEL_MapVideoMemory(
++ IN gckKERNEL Kernel,
++ IN gctBOOL InUserSpace,
++ IN gctUINT32 Address,
++ OUT gctPOINTER * Logical
++ )
++{
++ return gckKERNEL_MapVideoMemoryEx(Kernel, gcvCORE_MAJOR, InUserSpace, Address, Logical);
++}
++/*******************************************************************************
++**
++** gckKERNEL_Notify
++**
++** This function iscalled by clients to notify the gckKERNRL object of an event.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gceNOTIFY Notification
++** Notification event.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckKERNEL_Notify(
++ IN gckKERNEL Kernel,
++ IN gceNOTIFY Notification,
++ IN gctBOOL Data
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Kernel=%p Notification=%d Data=%d",
++ Kernel, Notification, Data);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++
++ /* Dispatch on notifcation. */
++ switch (Notification)
++ {
++ case gcvNOTIFY_INTERRUPT:
++ /* Process the interrupt. */
++#if COMMAND_PROCESSOR_VERSION > 1
++ status = gckINTERRUPT_Notify(Kernel->interrupt, Data);
++#else
++ status = gckHARDWARE_Interrupt(Kernel->hardware, Data);
++#endif
++ break;
++
++ default:
++ status = gcvSTATUS_OK;
++ break;
++ }
++
++ /* Success. */
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckKERNEL_QuerySettings(
++ IN gckKERNEL Kernel,
++ OUT gcsKERNEL_SETTINGS * Settings
++ )
++{
++ gckGALDEVICE device;
++
++ gcmkHEADER_ARG("Kernel=%p", Kernel);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(Settings != gcvNULL);
++
++ /* Extract the pointer to the gckGALDEVICE class. */
++ device = (gckGALDEVICE) Kernel->context;
++
++ /* Fill in signal. */
++ Settings->signal = device->signal;
++
++ /* Success. */
++ gcmkFOOTER_ARG("Settings->signal=%d", Settings->signal);
++ return gcvSTATUS_OK;
++}
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v4/hal/os/linux/kernel/gc_hal_kernel_linux.h linux-openelec/drivers/mxc/gpu-viv/v4/hal/os/linux/kernel/gc_hal_kernel_linux.h
+--- linux-3.14.36/drivers/mxc/gpu-viv/v4/hal/os/linux/kernel/gc_hal_kernel_linux.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v4/hal/os/linux/kernel/gc_hal_kernel_linux.h 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,94 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_kernel_linux_h_
++#define __gc_hal_kernel_linux_h_
++
++#include <linux/version.h>
++#include <linux/init.h>
++#include <linux/module.h>
++#include <linux/fs.h>
++#include <linux/mm.h>
++#include <linux/sched.h>
++#include <linux/signal.h>
++#ifdef FLAREON
++# include <asm/arch-realview/dove_gpio_irq.h>
++#endif
++#include <linux/interrupt.h>
++#include <linux/vmalloc.h>
++#include <linux/dma-mapping.h>
++#include <linux/kthread.h>
++
++#ifdef MODVERSIONS
++# include <linux/modversions.h>
++#endif
++#include <asm/io.h>
++#include <asm/uaccess.h>
++
++#if ENABLE_GPU_CLOCK_BY_DRIVER && LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28)
++#include <linux/clk.h>
++#include <linux/regulator/consumer.h>
++#endif
++
++#define NTSTRSAFE_NO_CCH_FUNCTIONS
++#include "gc_hal.h"
++#include "gc_hal_driver.h"
++#include "gc_hal_kernel.h"
++#include "gc_hal_kernel_device.h"
++#include "gc_hal_kernel_os.h"
++#include "gc_hal_kernel_debugfs.h"
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,31)
++#define FIND_TASK_BY_PID(x) pid_task(find_vpid(x), PIDTYPE_PID)
++#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
++#define FIND_TASK_BY_PID(x) find_task_by_vpid(x)
++#else
++#define FIND_TASK_BY_PID(x) find_task_by_pid(x)
++#endif
++
++#define _WIDE(string) L##string
++#define WIDE(string) _WIDE(string)
++
++#define countof(a) (sizeof(a) / sizeof(a[0]))
++
++#define DRV_NAME "galcore"
++
++#define GetPageCount(size, offset) ((((size) + ((offset) & ~PAGE_CACHE_MASK)) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT)
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION (3,7,0)
++#define gcdVM_FLAGS (VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP)
++#else
++#define gcdVM_FLAGS (VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_RESERVED)
++#endif
++
++static inline gctINT
++GetOrder(
++ IN gctINT numPages
++ )
++{
++ gctINT order = 0;
++
++ while ((1 << order) < numPages) order++;
++
++ return order;
++}
++
++#endif /* __gc_hal_kernel_linux_h_ */
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v4/hal/os/linux/kernel/gc_hal_kernel_math.c linux-openelec/drivers/mxc/gpu-viv/v4/hal/os/linux/kernel/gc_hal_kernel_math.c
+--- linux-3.14.36/drivers/mxc/gpu-viv/v4/hal/os/linux/kernel/gc_hal_kernel_math.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v4/hal/os/linux/kernel/gc_hal_kernel_math.c 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,32 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal_kernel_linux.h"
++
++gctINT
++gckMATH_ModuloInt(
++ IN gctINT X,
++ IN gctINT Y
++ )
++{
++ if(Y ==0) {return 0;}
++ else {return X % Y;}
++}
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v4/hal/os/linux/kernel/gc_hal_kernel_os.c linux-openelec/drivers/mxc/gpu-viv/v4/hal/os/linux/kernel/gc_hal_kernel_os.c
+--- linux-3.14.36/drivers/mxc/gpu-viv/v4/hal/os/linux/kernel/gc_hal_kernel_os.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v4/hal/os/linux/kernel/gc_hal_kernel_os.c 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,9019 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal_kernel_linux.h"
++
++#include <linux/pagemap.h>
++#include <linux/seq_file.h>
++#include <linux/mm.h>
++#include <linux/mman.h>
++#include <linux/sched.h>
++#include <asm/atomic.h>
++#include <linux/dma-mapping.h>
++#include <linux/slab.h>
++#include <linux/idr.h>
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0)
++#include <mach/hardware.h>
++#endif
++#include <linux/workqueue.h>
++#include <linux/idr.h>
++#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
++#include <linux/math64.h>
++#endif
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
++#include <linux/reset.h>
++static inline void imx_gpc_power_up_pu(bool flag) {}
++#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0)
++#include <mach/common.h>
++#endif
++#include <linux/delay.h>
++#include <linux/pm_runtime.h>
++
++
++#if gcdANDROID_NATIVE_FENCE_SYNC
++#include <linux/file.h>
++#include "gc_hal_kernel_sync.h"
++#endif
++
++
++#define _GC_OBJ_ZONE gcvZONE_OS
++
++/*******************************************************************************
++***** Version Signature *******************************************************/
++
++#ifdef ANDROID
++const char * _PLATFORM = "\n\0$PLATFORM$Android$\n";
++#else
++const char * _PLATFORM = "\n\0$PLATFORM$Linux$\n";
++#endif
++
++#define USER_SIGNAL_TABLE_LEN_INIT 64
++#define gcdSUPPRESS_OOM_MESSAGE 1
++
++#define MEMORY_LOCK(os) \
++ gcmkVERIFY_OK(gckOS_AcquireMutex( \
++ (os), \
++ (os)->memoryLock, \
++ gcvINFINITE))
++
++#define MEMORY_UNLOCK(os) \
++ gcmkVERIFY_OK(gckOS_ReleaseMutex((os), (os)->memoryLock))
++
++#define MEMORY_MAP_LOCK(os) \
++ gcmkVERIFY_OK(gckOS_AcquireMutex( \
++ (os), \
++ (os)->memoryMapLock, \
++ gcvINFINITE))
++
++#define MEMORY_MAP_UNLOCK(os) \
++ gcmkVERIFY_OK(gckOS_ReleaseMutex((os), (os)->memoryMapLock))
++
++/* Protection bit when mapping memroy to user sapce */
++#define gcmkPAGED_MEMROY_PROT(x) pgprot_writecombine(x)
++
++#if gcdNONPAGED_MEMORY_BUFFERABLE
++#define gcmkIOREMAP ioremap_wc
++#define gcmkNONPAGED_MEMROY_PROT(x) pgprot_writecombine(x)
++#elif !gcdNONPAGED_MEMORY_CACHEABLE
++#define gcmkIOREMAP ioremap_nocache
++#define gcmkNONPAGED_MEMROY_PROT(x) pgprot_noncached(x)
++#endif
++
++#if gcdSUPPRESS_OOM_MESSAGE
++#define gcdNOWARN __GFP_NOWARN
++#else
++#define gcdNOWARN 0
++#endif
++
++#define gcdINFINITE_TIMEOUT (60 * 1000)
++#define gcdDETECT_TIMEOUT 0
++#define gcdDETECT_DMA_ADDRESS 1
++#define gcdDETECT_DMA_STATE 1
++
++#define gcdUSE_NON_PAGED_MEMORY_CACHE 10
++
++/******************************************************************************\
++********************************** Structures **********************************
++\******************************************************************************/
++#if gcdUSE_NON_PAGED_MEMORY_CACHE
++typedef struct _gcsNonPagedMemoryCache
++{
++#ifndef NO_DMA_COHERENT
++ gctINT size;
++ gctSTRING addr;
++ dma_addr_t dmaHandle;
++#else
++ long order;
++ struct page * page;
++#endif
++
++ struct _gcsNonPagedMemoryCache * prev;
++ struct _gcsNonPagedMemoryCache * next;
++}
++gcsNonPagedMemoryCache;
++#endif /* gcdUSE_NON_PAGED_MEMORY_CACHE */
++
++typedef struct _gcsUSER_MAPPING * gcsUSER_MAPPING_PTR;
++typedef struct _gcsUSER_MAPPING
++{
++ /* Pointer to next mapping structure. */
++ gcsUSER_MAPPING_PTR next;
++
++ /* Physical address of this mapping. */
++ gctUINT32 physical;
++
++ /* Logical address of this mapping. */
++ gctPOINTER logical;
++
++ /* Number of bytes of this mapping. */
++ gctSIZE_T bytes;
++
++ /* Starting address of this mapping. */
++ gctINT8_PTR start;
++
++ /* Ending address of this mapping. */
++ gctINT8_PTR end;
++}
++gcsUSER_MAPPING;
++
++typedef struct _gcsINTEGER_DB * gcsINTEGER_DB_PTR;
++typedef struct _gcsINTEGER_DB
++{
++ struct idr idr;
++ spinlock_t lock;
++ gctINT curr;
++}
++gcsINTEGER_DB;
++
++struct _gckOS
++{
++ /* Object. */
++ gcsOBJECT object;
++
++ /* Heap. */
++ gckHEAP heap;
++
++ /* Pointer to device */
++ gckGALDEVICE device;
++
++ /* Memory management */
++ gctPOINTER memoryLock;
++ gctPOINTER memoryMapLock;
++
++ struct _LINUX_MDL *mdlHead;
++ struct _LINUX_MDL *mdlTail;
++
++ /* Kernel process ID. */
++ gctUINT32 kernelProcessID;
++
++ /* Signal management. */
++
++ /* Lock. */
++ gctPOINTER signalMutex;
++
++ /* signal id database. */
++ gcsINTEGER_DB signalDB;
++
++#if gcdANDROID_NATIVE_FENCE_SYNC
++ /* Lock. */
++ gctPOINTER syncPointMutex;
++
++ /* sync point id database. */
++ gcsINTEGER_DB syncPointDB;
++#endif
++
++ gcsUSER_MAPPING_PTR userMap;
++ gctPOINTER debugLock;
++
++#if gcdUSE_NON_PAGED_MEMORY_CACHE
++ gctUINT cacheSize;
++ gcsNonPagedMemoryCache * cacheHead;
++ gcsNonPagedMemoryCache * cacheTail;
++#endif
++
++ /* workqueue for os timer. */
++ struct workqueue_struct * workqueue;
++};
++
++typedef struct _gcsSIGNAL * gcsSIGNAL_PTR;
++typedef struct _gcsSIGNAL
++{
++ /* Kernel sync primitive. */
++ struct completion obj;
++
++ /* Manual reset flag. */
++ gctBOOL manualReset;
++
++ /* The reference counter. */
++ atomic_t ref;
++
++ /* The owner of the signal. */
++ gctHANDLE process;
++
++ gckHARDWARE hardware;
++
++ /* ID. */
++ gctUINT32 id;
++}
++gcsSIGNAL;
++
++#if gcdANDROID_NATIVE_FENCE_SYNC
++typedef struct _gcsSYNC_POINT * gcsSYNC_POINT_PTR;
++typedef struct _gcsSYNC_POINT
++{
++ /* The reference counter. */
++ atomic_t ref;
++
++ /* State. */
++ atomic_t state;
++
++ /* timeline. */
++ struct sync_timeline * timeline;
++
++ /* ID. */
++ gctUINT32 id;
++}
++gcsSYNC_POINT;
++#endif
++
++typedef struct _gcsPageInfo * gcsPageInfo_PTR;
++typedef struct _gcsPageInfo
++{
++ struct page **pages;
++ gctUINT32_PTR pageTable;
++}
++gcsPageInfo;
++
++typedef struct _gcsOSTIMER * gcsOSTIMER_PTR;
++typedef struct _gcsOSTIMER
++{
++ struct delayed_work work;
++ gctTIMERFUNCTION function;
++ gctPOINTER data;
++} gcsOSTIMER;
++
++/******************************************************************************\
++******************************* Private Functions ******************************
++\******************************************************************************/
++
++static gctINT
++_GetProcessID(
++ void
++ )
++{
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
++ return task_tgid_vnr(current);
++#else
++ return current->tgid;
++#endif
++}
++
++static gctINT
++_GetThreadID(
++ void
++ )
++{
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
++ return task_pid_vnr(current);
++#else
++ return current->pid;
++#endif
++}
++
++static PLINUX_MDL
++_CreateMdl(
++ IN gctINT ProcessID
++ )
++{
++ PLINUX_MDL mdl;
++
++ gcmkHEADER_ARG("ProcessID=%d", ProcessID);
++
++ mdl = (PLINUX_MDL)kzalloc(sizeof(struct _LINUX_MDL), GFP_KERNEL | gcdNOWARN);
++ if (mdl == gcvNULL)
++ {
++ gcmkFOOTER_NO();
++ return gcvNULL;
++ }
++
++ mdl->pid = ProcessID;
++ mdl->maps = gcvNULL;
++ mdl->prev = gcvNULL;
++ mdl->next = gcvNULL;
++
++ gcmkFOOTER_ARG("0x%X", mdl);
++ return mdl;
++}
++
++static gceSTATUS
++_DestroyMdlMap(
++ IN PLINUX_MDL Mdl,
++ IN PLINUX_MDL_MAP MdlMap
++ );
++
++static gceSTATUS
++_DestroyMdl(
++ IN PLINUX_MDL Mdl
++ )
++{
++ PLINUX_MDL_MAP mdlMap, next;
++
++ gcmkHEADER_ARG("Mdl=0x%X", Mdl);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_ARGUMENT(Mdl != gcvNULL);
++
++ mdlMap = Mdl->maps;
++
++ while (mdlMap != gcvNULL)
++ {
++ next = mdlMap->next;
++
++ gcmkVERIFY_OK(_DestroyMdlMap(Mdl, mdlMap));
++
++ mdlMap = next;
++ }
++
++ kfree(Mdl);
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++static PLINUX_MDL_MAP
++_CreateMdlMap(
++ IN PLINUX_MDL Mdl,
++ IN gctINT ProcessID
++ )
++{
++ PLINUX_MDL_MAP mdlMap;
++
++ gcmkHEADER_ARG("Mdl=0x%X ProcessID=%d", Mdl, ProcessID);
++
++ mdlMap = (PLINUX_MDL_MAP)kmalloc(sizeof(struct _LINUX_MDL_MAP), GFP_KERNEL | gcdNOWARN);
++ if (mdlMap == gcvNULL)
++ {
++ gcmkFOOTER_NO();
++ return gcvNULL;
++ }
++
++ mdlMap->pid = ProcessID;
++ mdlMap->vmaAddr = gcvNULL;
++ mdlMap->vma = gcvNULL;
++ mdlMap->count = 0;
++
++ mdlMap->next = Mdl->maps;
++ Mdl->maps = mdlMap;
++
++ gcmkFOOTER_ARG("0x%X", mdlMap);
++ return mdlMap;
++}
++
++static gceSTATUS
++_DestroyMdlMap(
++ IN PLINUX_MDL Mdl,
++ IN PLINUX_MDL_MAP MdlMap
++ )
++{
++ PLINUX_MDL_MAP prevMdlMap;
++
++ gcmkHEADER_ARG("Mdl=0x%X MdlMap=0x%X", Mdl, MdlMap);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_ARGUMENT(MdlMap != gcvNULL);
++ gcmkASSERT(Mdl->maps != gcvNULL);
++
++ if (Mdl->maps == MdlMap)
++ {
++ Mdl->maps = MdlMap->next;
++ }
++ else
++ {
++ prevMdlMap = Mdl->maps;
++
++ while (prevMdlMap->next != MdlMap)
++ {
++ prevMdlMap = prevMdlMap->next;
++
++ gcmkASSERT(prevMdlMap != gcvNULL);
++ }
++
++ prevMdlMap->next = MdlMap->next;
++ }
++
++ kfree(MdlMap);
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++extern PLINUX_MDL_MAP
++FindMdlMap(
++ IN PLINUX_MDL Mdl,
++ IN gctINT ProcessID
++ )
++{
++ PLINUX_MDL_MAP mdlMap;
++
++ gcmkHEADER_ARG("Mdl=0x%X ProcessID=%d", Mdl, ProcessID);
++ if(Mdl == gcvNULL)
++ {
++ gcmkFOOTER_NO();
++ return gcvNULL;
++ }
++ mdlMap = Mdl->maps;
++
++ while (mdlMap != gcvNULL)
++ {
++ if (mdlMap->pid == ProcessID)
++ {
++ gcmkFOOTER_ARG("0x%X", mdlMap);
++ return mdlMap;
++ }
++
++ mdlMap = mdlMap->next;
++ }
++
++ gcmkFOOTER_NO();
++ return gcvNULL;
++}
++
++void
++OnProcessExit(
++ IN gckOS Os,
++ IN gckKERNEL Kernel
++ )
++{
++}
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
++static inline int
++is_vmalloc_addr(
++ void *Addr
++ )
++{
++ unsigned long addr = (unsigned long)Addr;
++
++ return addr >= VMALLOC_START && addr < VMALLOC_END;
++}
++#endif
++
++static void
++_NonContiguousFree(
++ IN struct page ** Pages,
++ IN gctUINT32 NumPages
++ )
++{
++ gctINT i;
++
++ gcmkHEADER_ARG("Pages=0x%X, NumPages=%d", Pages, NumPages);
++
++ gcmkASSERT(Pages != gcvNULL);
++
++ for (i = 0; i < NumPages; i++)
++ {
++ __free_page(Pages[i]);
++ }
++
++ if (is_vmalloc_addr(Pages))
++ {
++ vfree(Pages);
++ }
++ else
++ {
++ kfree(Pages);
++ }
++
++ gcmkFOOTER_NO();
++}
++
++static struct page **
++_NonContiguousAlloc(
++ IN gctUINT32 NumPages
++ )
++{
++ struct page ** pages;
++ struct page *p;
++ gctINT i, size;
++
++ gcmkHEADER_ARG("NumPages=%lu", NumPages);
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32)
++ if (NumPages > totalram_pages)
++#else
++ if (NumPages > num_physpages)
++#endif
++ {
++ gcmkFOOTER_NO();
++ return gcvNULL;
++ }
++
++ size = NumPages * sizeof(struct page *);
++
++ pages = kmalloc(size, GFP_KERNEL | gcdNOWARN);
++
++ if (!pages)
++ {
++ pages = vmalloc(size);
++
++ if (!pages)
++ {
++ gcmkFOOTER_NO();
++ return gcvNULL;
++ }
++ }
++
++ for (i = 0; i < NumPages; i++)
++ {
++ p = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | gcdNOWARN);
++
++ if (!p)
++ {
++ _NonContiguousFree(pages, i);
++ gcmkFOOTER_NO();
++ return gcvNULL;
++ }
++
++ pages[i] = p;
++ }
++
++ gcmkFOOTER_ARG("pages=0x%X", pages);
++ return pages;
++}
++
++static inline struct page *
++_NonContiguousToPage(
++ IN struct page ** Pages,
++ IN gctUINT32 Index
++ )
++{
++ gcmkASSERT(Pages != gcvNULL);
++ return Pages[Index];
++}
++
++static inline unsigned long
++_NonContiguousToPfn(
++ IN struct page ** Pages,
++ IN gctUINT32 Index
++ )
++{
++ gcmkASSERT(Pages != gcvNULL);
++ return page_to_pfn(_NonContiguousToPage(Pages, Index));
++}
++
++static inline unsigned long
++_NonContiguousToPhys(
++ IN struct page ** Pages,
++ IN gctUINT32 Index
++ )
++{
++ gcmkASSERT(Pages != gcvNULL);
++ return page_to_phys(_NonContiguousToPage(Pages, Index));
++}
++
++
++#if gcdUSE_NON_PAGED_MEMORY_CACHE
++
++static gctBOOL
++_AddNonPagedMemoryCache(
++ gckOS Os,
++#ifndef NO_DMA_COHERENT
++ gctINT Size,
++ gctSTRING Addr,
++ dma_addr_t DmaHandle
++#else
++ long Order,
++ struct page * Page
++#endif
++ )
++{
++ gcsNonPagedMemoryCache *cache;
++
++ if (Os->cacheSize >= gcdUSE_NON_PAGED_MEMORY_CACHE)
++ {
++ return gcvFALSE;
++ }
++
++ /* Allocate the cache record */
++ cache = (gcsNonPagedMemoryCache *)kmalloc(sizeof(gcsNonPagedMemoryCache), GFP_ATOMIC);
++
++ if (cache == gcvNULL) return gcvFALSE;
++
++#ifndef NO_DMA_COHERENT
++ cache->size = Size;
++ cache->addr = Addr;
++ cache->dmaHandle = DmaHandle;
++#else
++ cache->order = Order;
++ cache->page = Page;
++#endif
++
++ /* Add to list */
++ if (Os->cacheHead == gcvNULL)
++ {
++ cache->prev = gcvNULL;
++ cache->next = gcvNULL;
++ Os->cacheHead =
++ Os->cacheTail = cache;
++ }
++ else
++ {
++ /* Add to the tail. */
++ cache->prev = Os->cacheTail;
++ cache->next = gcvNULL;
++ Os->cacheTail->next = cache;
++ Os->cacheTail = cache;
++ }
++
++ Os->cacheSize++;
++
++ return gcvTRUE;
++}
++
++#ifndef NO_DMA_COHERENT
++static gctSTRING
++_GetNonPagedMemoryCache(
++ gckOS Os,
++ gctINT Size,
++ dma_addr_t * DmaHandle
++ )
++#else
++static struct page *
++_GetNonPagedMemoryCache(
++ gckOS Os,
++ long Order
++ )
++#endif
++{
++ gcsNonPagedMemoryCache *cache;
++#ifndef NO_DMA_COHERENT
++ gctSTRING addr;
++#else
++ struct page * page;
++#endif
++
++ if (Os->cacheHead == gcvNULL) return gcvNULL;
++
++ /* Find the right cache */
++ cache = Os->cacheHead;
++
++ while (cache != gcvNULL)
++ {
++#ifndef NO_DMA_COHERENT
++ if (cache->size == Size) break;
++#else
++ if (cache->order == Order) break;
++#endif
++
++ cache = cache->next;
++ }
++
++ if (cache == gcvNULL) return gcvNULL;
++
++ /* Remove the cache from list */
++ if (cache == Os->cacheHead)
++ {
++ Os->cacheHead = cache->next;
++
++ if (Os->cacheHead == gcvNULL)
++ {
++ Os->cacheTail = gcvNULL;
++ }
++ }
++ else
++ {
++ cache->prev->next = cache->next;
++
++ if (cache == Os->cacheTail)
++ {
++ Os->cacheTail = cache->prev;
++ }
++ else
++ {
++ cache->next->prev = cache->prev;
++ }
++ }
++
++ /* Destroy cache */
++#ifndef NO_DMA_COHERENT
++ addr = cache->addr;
++ *DmaHandle = cache->dmaHandle;
++#else
++ page = cache->page;
++#endif
++
++ kfree(cache);
++
++ Os->cacheSize--;
++
++#ifndef NO_DMA_COHERENT
++ return addr;
++#else
++ return page;
++#endif
++}
++
++static void
++_FreeAllNonPagedMemoryCache(
++ gckOS Os
++ )
++{
++ gcsNonPagedMemoryCache *cache, *nextCache;
++
++ MEMORY_LOCK(Os);
++
++ cache = Os->cacheHead;
++
++ while (cache != gcvNULL)
++ {
++ if (cache != Os->cacheTail)
++ {
++ nextCache = cache->next;
++ }
++ else
++ {
++ nextCache = gcvNULL;
++ }
++
++ /* Remove the cache from list */
++ if (cache == Os->cacheHead)
++ {
++ Os->cacheHead = cache->next;
++
++ if (Os->cacheHead == gcvNULL)
++ {
++ Os->cacheTail = gcvNULL;
++ }
++ }
++ else
++ {
++ cache->prev->next = cache->next;
++
++ if (cache == Os->cacheTail)
++ {
++ Os->cacheTail = cache->prev;
++ }
++ else
++ {
++ cache->next->prev = cache->prev;
++ }
++ }
++
++#ifndef NO_DMA_COHERENT
++ dma_free_coherent(gcvNULL,
++ cache->size,
++ cache->addr,
++ cache->dmaHandle);
++#else
++ free_pages((unsigned long)page_address(cache->page), cache->order);
++#endif
++
++ kfree(cache);
++
++ cache = nextCache;
++ }
++
++ MEMORY_UNLOCK(Os);
++}
++
++#endif /* gcdUSE_NON_PAGED_MEMORY_CACHE */
++
++/*******************************************************************************
++** Integer Id Management.
++*/
++gceSTATUS
++_AllocateIntegerId(
++ IN gcsINTEGER_DB_PTR Database,
++ IN gctPOINTER KernelPointer,
++ OUT gctUINT32 *Id
++ )
++{
++ int result;
++ gctINT next;
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0)
++ idr_preload(GFP_KERNEL | gcdNOWARN);
++
++ spin_lock(&Database->lock);
++
++ next = (Database->curr + 1 <= 0) ? 1 : Database->curr + 1;
++ result = idr_alloc(&Database->idr, KernelPointer, next, 0, GFP_ATOMIC);
++
++ if (!result)
++ {
++ Database->curr = *Id;
++ }
++
++ spin_unlock(&Database->lock);
++
++ idr_preload_end();
++
++ if (result < 0)
++ {
++ return gcvSTATUS_OUT_OF_RESOURCES;
++ }
++
++ *Id = result;
++#else
++again:
++ if (idr_pre_get(&Database->idr, GFP_KERNEL | gcdNOWARN) == 0)
++ {
++ return gcvSTATUS_OUT_OF_MEMORY;
++ }
++
++ spin_lock(&Database->lock);
++
++ next = (Database->curr + 1 <= 0) ? 1 : Database->curr + 1;
++
++ /* Try to get a id greater than current id. */
++ result = idr_get_new_above(&Database->idr, KernelPointer, next, Id);
++
++ if (!result)
++ {
++ Database->curr = *Id;
++ }
++
++ spin_unlock(&Database->lock);
++
++ if (result == -EAGAIN)
++ {
++ goto again;
++ }
++
++ if (result != 0)
++ {
++ return gcvSTATUS_OUT_OF_RESOURCES;
++ }
++#endif
++
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++_QueryIntegerId(
++ IN gcsINTEGER_DB_PTR Database,
++ IN gctUINT32 Id,
++ OUT gctPOINTER * KernelPointer
++ )
++{
++ gctPOINTER pointer;
++
++ spin_lock(&Database->lock);
++
++ pointer = idr_find(&Database->idr, Id);
++
++ spin_unlock(&Database->lock);
++
++ if(pointer)
++ {
++ *KernelPointer = pointer;
++ return gcvSTATUS_OK;
++ }
++ else
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_OS,
++ "%s(%d) Id = %d is not found",
++ __FUNCTION__, __LINE__, Id);
++
++ return gcvSTATUS_NOT_FOUND;
++ }
++}
++
++gceSTATUS
++_DestroyIntegerId(
++ IN gcsINTEGER_DB_PTR Database,
++ IN gctUINT32 Id
++ )
++{
++ spin_lock(&Database->lock);
++
++ idr_remove(&Database->idr, Id);
++
++ spin_unlock(&Database->lock);
++
++ return gcvSTATUS_OK;
++}
++
++static void
++_UnmapUserLogical(
++ IN gctINT Pid,
++ IN gctPOINTER Logical,
++ IN gctUINT32 Size
++)
++{
++ if (unlikely(current->mm == gcvNULL))
++ {
++ /* Do nothing if process is exiting. */
++ return;
++ }
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0)
++ if (vm_munmap((unsigned long)Logical, Size) < 0)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_WARNING, gcvZONE_OS,
++ "%s(%d): vm_munmap failed",
++ __FUNCTION__, __LINE__
++ );
++ }
++#else
++ down_write(&current->mm->mmap_sem);
++ if (do_munmap(current->mm, (unsigned long)Logical, Size) < 0)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_WARNING, gcvZONE_OS,
++ "%s(%d): do_munmap failed",
++ __FUNCTION__, __LINE__
++ );
++ }
++ up_write(&current->mm->mmap_sem);
++#endif
++}
++
++gceSTATUS
++_QueryProcessPageTable(
++ IN gctPOINTER Logical,
++ OUT gctUINT32 * Address
++ )
++{
++ spinlock_t *lock;
++ gctUINTPTR_T logical = (gctUINTPTR_T)Logical;
++ pgd_t *pgd;
++ pud_t *pud;
++ pmd_t *pmd;
++ pte_t *pte;
++
++ if (!current->mm)
++ {
++ return gcvSTATUS_NOT_FOUND;
++ }
++
++ pgd = pgd_offset(current->mm, logical);
++ if (pgd_none(*pgd) || pgd_bad(*pgd))
++ {
++ return gcvSTATUS_NOT_FOUND;
++ }
++
++ pud = pud_offset(pgd, logical);
++ if (pud_none(*pud) || pud_bad(*pud))
++ {
++ return gcvSTATUS_NOT_FOUND;
++ }
++
++ pmd = pmd_offset(pud, logical);
++ if (pmd_none(*pmd) || pmd_bad(*pmd))
++ {
++ return gcvSTATUS_NOT_FOUND;
++ }
++
++ pte = pte_offset_map_lock(current->mm, pmd, logical, &lock);
++ if (!pte)
++ {
++ return gcvSTATUS_NOT_FOUND;
++ }
++
++ if (!pte_present(*pte))
++ {
++ pte_unmap_unlock(pte, lock);
++ return gcvSTATUS_NOT_FOUND;
++ }
++
++ *Address = (pte_pfn(*pte) << PAGE_SHIFT) | (logical & ~PAGE_MASK);
++ pte_unmap_unlock(pte, lock);
++
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_Construct
++**
++** Construct a new gckOS object.
++**
++** INPUT:
++**
++** gctPOINTER Context
++** Pointer to the gckGALDEVICE class.
++**
++** OUTPUT:
++**
++** gckOS * Os
++** Pointer to a variable that will hold the pointer to the gckOS object.
++*/
++gceSTATUS
++gckOS_Construct(
++ IN gctPOINTER Context,
++ OUT gckOS * Os
++ )
++{
++ gckOS os;
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Context=0x%X", Context);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_ARGUMENT(Os != gcvNULL);
++
++ /* Allocate the gckOS object. */
++ os = (gckOS) kmalloc(gcmSIZEOF(struct _gckOS), GFP_KERNEL | gcdNOWARN);
++
++ if (os == gcvNULL)
++ {
++ /* Out of memory. */
++ gcmkFOOTER_ARG("status=%d", gcvSTATUS_OUT_OF_MEMORY);
++ return gcvSTATUS_OUT_OF_MEMORY;
++ }
++
++ /* Zero the memory. */
++ gckOS_ZeroMemory(os, gcmSIZEOF(struct _gckOS));
++
++ /* Initialize the gckOS object. */
++ os->object.type = gcvOBJ_OS;
++
++ /* Set device device. */
++ os->device = Context;
++
++ /* IMPORTANT! No heap yet. */
++ os->heap = gcvNULL;
++
++ /* Initialize the memory lock. */
++ gcmkONERROR(gckOS_CreateMutex(os, &os->memoryLock));
++ gcmkONERROR(gckOS_CreateMutex(os, &os->memoryMapLock));
++
++ /* Create debug lock mutex. */
++ gcmkONERROR(gckOS_CreateMutex(os, &os->debugLock));
++
++
++ os->mdlHead = os->mdlTail = gcvNULL;
++
++ /* Get the kernel process ID. */
++ gcmkONERROR(gckOS_GetProcessID(&os->kernelProcessID));
++
++ /*
++ * Initialize the signal manager.
++ */
++
++ /* Initialize mutex. */
++ gcmkONERROR(gckOS_CreateMutex(os, &os->signalMutex));
++
++ /* Initialize signal id database lock. */
++ spin_lock_init(&os->signalDB.lock);
++
++ /* Initialize signal id database. */
++ idr_init(&os->signalDB.idr);
++
++#if gcdANDROID_NATIVE_FENCE_SYNC
++ /*
++ * Initialize the sync point manager.
++ */
++
++ /* Initialize mutex. */
++ gcmkONERROR(gckOS_CreateMutex(os, &os->syncPointMutex));
++
++ /* Initialize sync point id database lock. */
++ spin_lock_init(&os->syncPointDB.lock);
++
++ /* Initialize sync point id database. */
++ idr_init(&os->syncPointDB.idr);
++#endif
++
++#if gcdUSE_NON_PAGED_MEMORY_CACHE
++ os->cacheSize = 0;
++ os->cacheHead = gcvNULL;
++ os->cacheTail = gcvNULL;
++#endif
++
++ /* Create a workqueue for os timer. */
++ os->workqueue = create_singlethread_workqueue("galcore workqueue");
++
++ if (os->workqueue == gcvNULL)
++ {
++ /* Out of memory. */
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ /* Return pointer to the gckOS object. */
++ *Os = os;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Os=0x%X", *Os);
++ return gcvSTATUS_OK;
++
++OnError:
++
++#if gcdANDROID_NATIVE_FENCE_SYNC
++ if (os->syncPointMutex != gcvNULL)
++ {
++ gcmkVERIFY_OK(
++ gckOS_DeleteMutex(os, os->syncPointMutex));
++ }
++#endif
++
++ if (os->signalMutex != gcvNULL)
++ {
++ gcmkVERIFY_OK(
++ gckOS_DeleteMutex(os, os->signalMutex));
++ }
++
++ if (os->heap != gcvNULL)
++ {
++ gcmkVERIFY_OK(
++ gckHEAP_Destroy(os->heap));
++ }
++
++ if (os->memoryMapLock != gcvNULL)
++ {
++ gcmkVERIFY_OK(
++ gckOS_DeleteMutex(os, os->memoryMapLock));
++ }
++
++ if (os->memoryLock != gcvNULL)
++ {
++ gcmkVERIFY_OK(
++ gckOS_DeleteMutex(os, os->memoryLock));
++ }
++
++ if (os->debugLock != gcvNULL)
++ {
++ gcmkVERIFY_OK(
++ gckOS_DeleteMutex(os, os->debugLock));
++ }
++
++ if (os->workqueue != gcvNULL)
++ {
++ destroy_workqueue(os->workqueue);
++ }
++
++ kfree(os);
++
++ /* Return the error. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_Destroy
++**
++** Destroy an gckOS object.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object that needs to be destroyed.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_Destroy(
++ IN gckOS Os
++ )
++{
++ gckHEAP heap;
++
++ gcmkHEADER_ARG("Os=0x%X", Os);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++
++#if gcdUSE_NON_PAGED_MEMORY_CACHE
++ _FreeAllNonPagedMemoryCache(Os);
++#endif
++
++#if gcdANDROID_NATIVE_FENCE_SYNC
++ /*
++ * Destroy the sync point manager.
++ */
++
++ /* Destroy the mutex. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Os, Os->syncPointMutex));
++#endif
++
++ /*
++ * Destroy the signal manager.
++ */
++
++ /* Destroy the mutex. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Os, Os->signalMutex));
++
++ if (Os->heap != gcvNULL)
++ {
++ /* Mark gckHEAP as gone. */
++ heap = Os->heap;
++ Os->heap = gcvNULL;
++
++ /* Destroy the gckHEAP object. */
++ gcmkVERIFY_OK(gckHEAP_Destroy(heap));
++ }
++
++ /* Destroy the memory lock. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Os, Os->memoryMapLock));
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Os, Os->memoryLock));
++
++ /* Destroy debug lock mutex. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Os, Os->debugLock));
++
++ /* Wait for all works done. */
++ flush_workqueue(Os->workqueue);
++
++ /* Destory work queue. */
++ destroy_workqueue(Os->workqueue);
++
++ /* Flush the debug cache. */
++ gcmkDEBUGFLUSH(~0U);
++
++ /* Mark the gckOS object as unknown. */
++ Os->object.type = gcvOBJ_UNKNOWN;
++
++ /* Free the gckOS object. */
++ kfree(Os);
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++static gctSTRING
++_CreateKernelVirtualMapping(
++ IN PLINUX_MDL Mdl
++ )
++{
++ gctSTRING addr = 0;
++ gctINT numPages = Mdl->numPages;
++
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ if (Mdl->contiguous)
++ {
++ addr = page_address(Mdl->u.contiguousPages);
++ }
++ else
++ {
++ addr = vmap(Mdl->u.nonContiguousPages,
++ numPages,
++ 0,
++ PAGE_KERNEL);
++
++ /* Trigger a page fault. */
++ memset(addr, 0, numPages * PAGE_SIZE);
++ }
++#else
++ struct page ** pages;
++ gctBOOL free = gcvFALSE;
++ gctINT i;
++
++ if (Mdl->contiguous)
++ {
++ pages = kmalloc(sizeof(struct page *) * numPages, GFP_KERNEL | gcdNOWARN);
++
++ if (!pages)
++ {
++ return gcvNULL;
++ }
++
++ for (i = 0; i < numPages; i++)
++ {
++ pages[i] = nth_page(Mdl->u.contiguousPages, i);
++ }
++
++ free = gcvTRUE;
++ }
++ else
++ {
++ pages = Mdl->u.nonContiguousPages;
++ }
++
++ /* ioremap() can't work on system memory since 2.6.38. */
++ addr = vmap(pages, numPages, 0, gcmkNONPAGED_MEMROY_PROT(PAGE_KERNEL));
++
++ /* Trigger a page fault. */
++ memset(addr, 0, numPages * PAGE_SIZE);
++
++ if (free)
++ {
++ kfree(pages);
++ }
++
++#endif
++
++ return addr;
++}
++
++static void
++_DestoryKernelVirtualMapping(
++ IN gctSTRING Addr
++ )
++{
++#if !gcdNONPAGED_MEMORY_CACHEABLE
++ vunmap(Addr);
++#endif
++}
++
++gceSTATUS
++gckOS_CreateKernelVirtualMapping(
++ IN gctPHYS_ADDR Physical,
++ OUT gctSIZE_T * PageCount,
++ OUT gctPOINTER * Logical
++ )
++{
++ *PageCount = ((PLINUX_MDL)Physical)->numPages;
++ *Logical = _CreateKernelVirtualMapping((PLINUX_MDL)Physical);
++
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckOS_DestroyKernelVirtualMapping(
++ IN gctPOINTER Logical
++ )
++{
++ _DestoryKernelVirtualMapping((gctSTRING)Logical);
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_Allocate
++**
++** Allocate memory.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctSIZE_T Bytes
++** Number of bytes to allocate.
++**
++** OUTPUT:
++**
++** gctPOINTER * Memory
++** Pointer to a variable that will hold the allocated memory location.
++*/
++gceSTATUS
++gckOS_Allocate(
++ IN gckOS Os,
++ IN gctSIZE_T Bytes,
++ OUT gctPOINTER * Memory
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Os=0x%X Bytes=%lu", Os, Bytes);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++ gcmkVERIFY_ARGUMENT(Memory != gcvNULL);
++
++ /* Do we have a heap? */
++ if (Os->heap != gcvNULL)
++ {
++ /* Allocate from the heap. */
++ gcmkONERROR(gckHEAP_Allocate(Os->heap, Bytes, Memory));
++ }
++ else
++ {
++ gcmkONERROR(gckOS_AllocateMemory(Os, Bytes, Memory));
++ }
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Memory=0x%X", *Memory);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_Free
++**
++** Free allocated memory.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPOINTER Memory
++** Pointer to memory allocation to free.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_Free(
++ IN gckOS Os,
++ IN gctPOINTER Memory
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Os=0x%X Memory=0x%X", Os, Memory);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Memory != gcvNULL);
++
++ /* Do we have a heap? */
++ if (Os->heap != gcvNULL)
++ {
++ /* Free from the heap. */
++ gcmkONERROR(gckHEAP_Free(Os->heap, Memory));
++ }
++ else
++ {
++ gcmkONERROR(gckOS_FreeMemory(Os, Memory));
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_AllocateMemory
++**
++** Allocate memory wrapper.
++**
++** INPUT:
++**
++** gctSIZE_T Bytes
++** Number of bytes to allocate.
++**
++** OUTPUT:
++**
++** gctPOINTER * Memory
++** Pointer to a variable that will hold the allocated memory location.
++*/
++gceSTATUS
++gckOS_AllocateMemory(
++ IN gckOS Os,
++ IN gctSIZE_T Bytes,
++ OUT gctPOINTER * Memory
++ )
++{
++ gctPOINTER memory;
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Os=0x%X Bytes=%lu", Os, Bytes);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++ gcmkVERIFY_ARGUMENT(Memory != gcvNULL);
++
++ if (Bytes > PAGE_SIZE)
++ {
++ memory = (gctPOINTER) vmalloc(Bytes);
++ }
++ else
++ {
++ memory = (gctPOINTER) kmalloc(Bytes, GFP_KERNEL | gcdNOWARN);
++ }
++
++ if (memory == gcvNULL)
++ {
++ /* Out of memory. */
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ /* Return pointer to the memory allocation. */
++ *Memory = memory;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Memory=0x%X", *Memory);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_FreeMemory
++**
++** Free allocated memory wrapper.
++**
++** INPUT:
++**
++** gctPOINTER Memory
++** Pointer to memory allocation to free.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_FreeMemory(
++ IN gckOS Os,
++ IN gctPOINTER Memory
++ )
++{
++ gcmkHEADER_ARG("Memory=0x%X", Memory);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_ARGUMENT(Memory != gcvNULL);
++
++ /* Free the memory from the OS pool. */
++ if (is_vmalloc_addr(Memory))
++ {
++ vfree(Memory);
++ }
++ else
++ {
++ kfree(Memory);
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_MapMemory
++**
++** Map physical memory into the current process.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPHYS_ADDR Physical
++** Start of physical address memory.
++**
++** gctSIZE_T Bytes
++** Number of bytes to map.
++**
++** OUTPUT:
++**
++** gctPOINTER * Memory
++** Pointer to a variable that will hold the logical address of the
++** mapped memory.
++*/
++gceSTATUS
++gckOS_MapMemory(
++ IN gckOS Os,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Bytes,
++ OUT gctPOINTER * Logical
++ )
++{
++ PLINUX_MDL_MAP mdlMap;
++ PLINUX_MDL mdl = (PLINUX_MDL)Physical;
++
++ gcmkHEADER_ARG("Os=0x%X Physical=0x%X Bytes=%lu", Os, Physical, Bytes);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Physical != 0);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++
++ MEMORY_LOCK(Os);
++
++ mdlMap = FindMdlMap(mdl, _GetProcessID());
++
++ if (mdlMap == gcvNULL)
++ {
++ mdlMap = _CreateMdlMap(mdl, _GetProcessID());
++
++ if (mdlMap == gcvNULL)
++ {
++ MEMORY_UNLOCK(Os);
++
++ gcmkFOOTER_ARG("status=%d", gcvSTATUS_OUT_OF_MEMORY);
++ return gcvSTATUS_OUT_OF_MEMORY;
++ }
++ }
++
++ if (mdlMap->vmaAddr == gcvNULL)
++ {
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)
++ mdlMap->vmaAddr = (char *)vm_mmap(gcvNULL,
++ 0L,
++ mdl->numPages * PAGE_SIZE,
++ PROT_READ | PROT_WRITE,
++ MAP_SHARED,
++ 0);
++#else
++ down_write(&current->mm->mmap_sem);
++
++ mdlMap->vmaAddr = (char *)do_mmap_pgoff(gcvNULL,
++ 0L,
++ mdl->numPages * PAGE_SIZE,
++ PROT_READ | PROT_WRITE,
++ MAP_SHARED,
++ 0);
++
++ up_write(&current->mm->mmap_sem);
++#endif
++
++ if (IS_ERR(mdlMap->vmaAddr))
++ {
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s(%d): do_mmap_pgoff error",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s(%d): mdl->numPages: %d mdl->vmaAddr: 0x%X",
++ __FUNCTION__, __LINE__,
++ mdl->numPages,
++ mdlMap->vmaAddr
++ );
++
++ mdlMap->vmaAddr = gcvNULL;
++
++ MEMORY_UNLOCK(Os);
++
++ gcmkFOOTER_ARG("status=%d", gcvSTATUS_OUT_OF_MEMORY);
++ return gcvSTATUS_OUT_OF_MEMORY;
++ }
++
++ down_write(&current->mm->mmap_sem);
++
++ mdlMap->vma = find_vma(current->mm, (unsigned long)mdlMap->vmaAddr);
++
++ if (!mdlMap->vma)
++ {
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s(%d): find_vma error.",
++ __FUNCTION__, __LINE__
++ );
++
++ mdlMap->vmaAddr = gcvNULL;
++
++ up_write(&current->mm->mmap_sem);
++
++ MEMORY_UNLOCK(Os);
++
++ gcmkFOOTER_ARG("status=%d", gcvSTATUS_OUT_OF_RESOURCES);
++ return gcvSTATUS_OUT_OF_RESOURCES;
++ }
++
++#ifndef NO_DMA_COHERENT
++ if (dma_mmap_coherent(gcvNULL,
++ mdlMap->vma,
++ mdl->addr,
++ mdl->dmaHandle,
++ mdl->numPages * PAGE_SIZE) < 0)
++ {
++ up_write(&current->mm->mmap_sem);
++
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s(%d): dma_mmap_coherent error.",
++ __FUNCTION__, __LINE__
++ );
++
++ mdlMap->vmaAddr = gcvNULL;
++
++ MEMORY_UNLOCK(Os);
++
++ gcmkFOOTER_ARG("status=%d", gcvSTATUS_OUT_OF_RESOURCES);
++ return gcvSTATUS_OUT_OF_RESOURCES;
++ }
++#else
++#if !gcdPAGED_MEMORY_CACHEABLE
++ mdlMap->vma->vm_page_prot = gcmkPAGED_MEMROY_PROT(mdlMap->vma->vm_page_prot);
++ mdlMap->vma->vm_flags |= gcdVM_FLAGS;
++# endif
++ mdlMap->vma->vm_pgoff = 0;
++
++ if (remap_pfn_range(mdlMap->vma,
++ mdlMap->vma->vm_start,
++ mdl->dmaHandle >> PAGE_SHIFT,
++ mdl->numPages*PAGE_SIZE,
++ mdlMap->vma->vm_page_prot) < 0)
++ {
++ up_write(&current->mm->mmap_sem);
++
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s(%d): remap_pfn_range error.",
++ __FUNCTION__, __LINE__
++ );
++
++ mdlMap->vmaAddr = gcvNULL;
++
++ MEMORY_UNLOCK(Os);
++
++ gcmkFOOTER_ARG("status=%d", gcvSTATUS_OUT_OF_RESOURCES);
++ return gcvSTATUS_OUT_OF_RESOURCES;
++ }
++#endif
++
++ up_write(&current->mm->mmap_sem);
++ }
++
++ MEMORY_UNLOCK(Os);
++
++ *Logical = mdlMap->vmaAddr;
++
++ gcmkFOOTER_ARG("*Logical=0x%X", *Logical);
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_UnmapMemory
++**
++** Unmap physical memory out of the current process.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPHYS_ADDR Physical
++** Start of physical address memory.
++**
++** gctSIZE_T Bytes
++** Number of bytes to unmap.
++**
++** gctPOINTER Memory
++** Pointer to a previously mapped memory region.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_UnmapMemory(
++ IN gckOS Os,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Bytes,
++ IN gctPOINTER Logical
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X Physical=0x%X Bytes=%lu Logical=0x%X",
++ Os, Physical, Bytes, Logical);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Physical != 0);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++
++ gckOS_UnmapMemoryEx(Os, Physical, Bytes, Logical, _GetProcessID());
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++
++/*******************************************************************************
++**
++** gckOS_UnmapMemoryEx
++**
++** Unmap physical memory in the specified process.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPHYS_ADDR Physical
++** Start of physical address memory.
++**
++** gctSIZE_T Bytes
++** Number of bytes to unmap.
++**
++** gctPOINTER Memory
++** Pointer to a previously mapped memory region.
++**
++** gctUINT32 PID
++** Pid of the process that opened the device and mapped this memory.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_UnmapMemoryEx(
++ IN gckOS Os,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Bytes,
++ IN gctPOINTER Logical,
++ IN gctUINT32 PID
++ )
++{
++ PLINUX_MDL_MAP mdlMap;
++ PLINUX_MDL mdl = (PLINUX_MDL)Physical;
++
++ gcmkHEADER_ARG("Os=0x%X Physical=0x%X Bytes=%lu Logical=0x%X PID=%d",
++ Os, Physical, Bytes, Logical, PID);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Physical != 0);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(PID != 0);
++
++ MEMORY_LOCK(Os);
++
++ if (Logical)
++ {
++ mdlMap = FindMdlMap(mdl, PID);
++
++ if (mdlMap == gcvNULL || mdlMap->vmaAddr == gcvNULL)
++ {
++ MEMORY_UNLOCK(Os);
++
++ gcmkFOOTER_ARG("status=%d", gcvSTATUS_INVALID_ARGUMENT);
++ return gcvSTATUS_INVALID_ARGUMENT;
++ }
++
++ _UnmapUserLogical(PID, mdlMap->vmaAddr, mdl->numPages * PAGE_SIZE);
++
++ gcmkVERIFY_OK(_DestroyMdlMap(mdl, mdlMap));
++ }
++
++ MEMORY_UNLOCK(Os);
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_UnmapUserLogical
++**
++** Unmap user logical memory out of physical memory.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPHYS_ADDR Physical
++** Start of physical address memory.
++**
++** gctSIZE_T Bytes
++** Number of bytes to unmap.
++**
++** gctPOINTER Memory
++** Pointer to a previously mapped memory region.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_UnmapUserLogical(
++ IN gckOS Os,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Bytes,
++ IN gctPOINTER Logical
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X Physical=0x%X Bytes=%lu Logical=0x%X",
++ Os, Physical, Bytes, Logical);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Physical != 0);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++
++ gckOS_UnmapMemory(Os, Physical, Bytes, Logical);
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++}
++
++/*******************************************************************************
++**
++** gckOS_AllocateNonPagedMemory
++**
++** Allocate a number of pages from non-paged memory.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctBOOL InUserSpace
++** gcvTRUE if the pages need to be mapped into user space.
++**
++** gctSIZE_T * Bytes
++** Pointer to a variable that holds the number of bytes to allocate.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Bytes
++** Pointer to a variable that hold the number of bytes allocated.
++**
++** gctPHYS_ADDR * Physical
++** Pointer to a variable that will hold the physical address of the
++** allocation.
++**
++** gctPOINTER * Logical
++** Pointer to a variable that will hold the logical address of the
++** allocation.
++*/
++gceSTATUS
++gckOS_AllocateNonPagedMemory(
++ IN gckOS Os,
++ IN gctBOOL InUserSpace,
++ IN OUT gctSIZE_T * Bytes,
++ OUT gctPHYS_ADDR * Physical,
++ OUT gctPOINTER * Logical
++ )
++{
++ gctSIZE_T bytes;
++ gctINT numPages;
++ PLINUX_MDL mdl = gcvNULL;
++ PLINUX_MDL_MAP mdlMap = gcvNULL;
++ gctSTRING addr;
++#ifdef NO_DMA_COHERENT
++ struct page * page;
++ long size, order;
++ gctPOINTER vaddr;
++#endif
++ gctBOOL locked = gcvFALSE;
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Os=0x%X InUserSpace=%d *Bytes=%lu",
++ Os, InUserSpace, gcmOPT_VALUE(Bytes));
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Bytes != gcvNULL);
++ gcmkVERIFY_ARGUMENT(*Bytes > 0);
++ gcmkVERIFY_ARGUMENT(Physical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++
++ /* Align number of bytes to page size. */
++ bytes = gcmALIGN(*Bytes, PAGE_SIZE);
++
++ /* Get total number of pages.. */
++ numPages = GetPageCount(bytes, 0);
++
++ /* Allocate mdl+vector structure */
++ mdl = _CreateMdl(_GetProcessID());
++ if (mdl == gcvNULL)
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ mdl->pagedMem = 0;
++ mdl->numPages = numPages;
++
++ MEMORY_LOCK(Os);
++ locked = gcvTRUE;
++
++#ifndef NO_DMA_COHERENT
++#if gcdUSE_NON_PAGED_MEMORY_CACHE
++ addr = _GetNonPagedMemoryCache(Os,
++ mdl->numPages * PAGE_SIZE,
++ &mdl->dmaHandle);
++
++ if (addr == gcvNULL)
++#endif
++ {
++ addr = dma_alloc_coherent(gcvNULL,
++ mdl->numPages * PAGE_SIZE,
++ &mdl->dmaHandle,
++ GFP_KERNEL | gcdNOWARN);
++ }
++#if gcdUSE_NON_PAGED_MEMORY_CACHE
++ if(addr == gcvNULL)
++ {
++ MEMORY_UNLOCK(Os);
++ locked = gcvFALSE;
++ /*Free all cache and try again*/
++ _FreeAllNonPagedMemoryCache(Os);
++ MEMORY_LOCK(Os);
++ locked = gcvTRUE;
++ addr = dma_alloc_coherent(gcvNULL,
++ mdl->numPages * PAGE_SIZE,
++ &mdl->dmaHandle,
++ GFP_KERNEL | gcdNOWARN);
++ }
++#endif
++#else
++ size = mdl->numPages * PAGE_SIZE;
++ order = get_order(size);
++#if gcdUSE_NON_PAGED_MEMORY_CACHE
++ page = _GetNonPagedMemoryCache(Os, order);
++
++ if (page == gcvNULL)
++#endif
++ {
++ page = alloc_pages(GFP_KERNEL | gcdNOWARN, order);
++ }
++
++ if (page == gcvNULL)
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ vaddr = (gctPOINTER)page_address(page);
++ mdl->contiguous = gcvTRUE;
++ mdl->u.contiguousPages = page;
++ addr = _CreateKernelVirtualMapping(mdl);
++ mdl->dmaHandle = virt_to_phys(vaddr);
++ mdl->kaddr = vaddr;
++ mdl->u.contiguousPages = page;
++
++#if !defined(CONFIG_PPC)
++ /* Cache invalidate. */
++ dma_sync_single_for_device(
++ gcvNULL,
++ page_to_phys(page),
++ bytes,
++ DMA_FROM_DEVICE);
++#endif
++
++ while (size > 0)
++ {
++ SetPageReserved(virt_to_page(vaddr));
++
++ vaddr += PAGE_SIZE;
++ size -= PAGE_SIZE;
++ }
++#endif
++
++ if (addr == gcvNULL)
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ mdl->addr = addr;
++
++ /* Return allocated memory. */
++ *Bytes = bytes;
++ *Physical = (gctPHYS_ADDR) mdl;
++
++ if (InUserSpace)
++ {
++ mdlMap = _CreateMdlMap(mdl, _GetProcessID());
++
++ if (mdlMap == gcvNULL)
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ /* Only after mmap this will be valid. */
++
++ /* We need to map this to user space. */
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)
++ mdlMap->vmaAddr = (gctSTRING) vm_mmap(gcvNULL,
++ 0L,
++ mdl->numPages * PAGE_SIZE,
++ PROT_READ | PROT_WRITE,
++ MAP_SHARED,
++ 0);
++#else
++ down_write(&current->mm->mmap_sem);
++
++ mdlMap->vmaAddr = (gctSTRING) do_mmap_pgoff(gcvNULL,
++ 0L,
++ mdl->numPages * PAGE_SIZE,
++ PROT_READ | PROT_WRITE,
++ MAP_SHARED,
++ 0);
++
++ up_write(&current->mm->mmap_sem);
++#endif
++
++ if (IS_ERR(mdlMap->vmaAddr))
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_WARNING, gcvZONE_OS,
++ "%s(%d): do_mmap_pgoff error",
++ __FUNCTION__, __LINE__
++ );
++
++ mdlMap->vmaAddr = gcvNULL;
++
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ down_write(&current->mm->mmap_sem);
++
++ mdlMap->vma = find_vma(current->mm, (unsigned long)mdlMap->vmaAddr);
++
++ if (mdlMap->vma == gcvNULL)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_WARNING, gcvZONE_OS,
++ "%s(%d): find_vma error",
++ __FUNCTION__, __LINE__
++ );
++
++ up_write(&current->mm->mmap_sem);
++
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++
++#ifndef NO_DMA_COHERENT
++ if (dma_mmap_coherent(gcvNULL,
++ mdlMap->vma,
++ mdl->addr,
++ mdl->dmaHandle,
++ mdl->numPages * PAGE_SIZE) < 0)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_WARNING, gcvZONE_OS,
++ "%s(%d): dma_mmap_coherent error",
++ __FUNCTION__, __LINE__
++ );
++
++ up_write(&current->mm->mmap_sem);
++
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++#else
++ mdlMap->vma->vm_page_prot = gcmkNONPAGED_MEMROY_PROT(mdlMap->vma->vm_page_prot);
++ mdlMap->vma->vm_flags |= gcdVM_FLAGS;
++ mdlMap->vma->vm_pgoff = 0;
++
++ if (remap_pfn_range(mdlMap->vma,
++ mdlMap->vma->vm_start,
++ mdl->dmaHandle >> PAGE_SHIFT,
++ mdl->numPages * PAGE_SIZE,
++ mdlMap->vma->vm_page_prot))
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_WARNING, gcvZONE_OS,
++ "%s(%d): remap_pfn_range error",
++ __FUNCTION__, __LINE__
++ );
++
++ up_write(&current->mm->mmap_sem);
++
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++#endif /* NO_DMA_COHERENT */
++
++ up_write(&current->mm->mmap_sem);
++
++ *Logical = mdlMap->vmaAddr;
++ }
++ else
++ {
++ *Logical = (gctPOINTER)mdl->addr;
++ }
++
++ /*
++ * Add this to a global list.
++ * Will be used by get physical address
++ * and mapuser pointer functions.
++ */
++
++ if (!Os->mdlHead)
++ {
++ /* Initialize the queue. */
++ Os->mdlHead = Os->mdlTail = mdl;
++ }
++ else
++ {
++ /* Add to the tail. */
++ mdl->prev = Os->mdlTail;
++ Os->mdlTail->next = mdl;
++ Os->mdlTail = mdl;
++ }
++
++ MEMORY_UNLOCK(Os);
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Bytes=%lu *Physical=0x%X *Logical=0x%X",
++ *Bytes, *Physical, *Logical);
++ return gcvSTATUS_OK;
++
++OnError:
++ if (mdlMap != gcvNULL)
++ {
++ /* Free LINUX_MDL_MAP. */
++ gcmkVERIFY_OK(_DestroyMdlMap(mdl, mdlMap));
++ }
++
++ if (mdl != gcvNULL)
++ {
++ /* Free LINUX_MDL. */
++ gcmkVERIFY_OK(_DestroyMdl(mdl));
++ }
++
++ if (locked)
++ {
++ /* Unlock memory. */
++ MEMORY_UNLOCK(Os);
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_FreeNonPagedMemory
++**
++** Free previously allocated and mapped pages from non-paged memory.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctSIZE_T Bytes
++** Number of bytes allocated.
++**
++** gctPHYS_ADDR Physical
++** Physical address of the allocated memory.
++**
++** gctPOINTER Logical
++** Logical address of the allocated memory.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS gckOS_FreeNonPagedMemory(
++ IN gckOS Os,
++ IN gctSIZE_T Bytes,
++ IN gctPHYS_ADDR Physical,
++ IN gctPOINTER Logical
++ )
++{
++ PLINUX_MDL mdl;
++ PLINUX_MDL_MAP mdlMap;
++#ifdef NO_DMA_COHERENT
++ unsigned size;
++ gctPOINTER vaddr;
++#endif /* NO_DMA_COHERENT */
++
++ gcmkHEADER_ARG("Os=0x%X Bytes=%lu Physical=0x%X Logical=0x%X",
++ Os, Bytes, Physical, Logical);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++ gcmkVERIFY_ARGUMENT(Physical != 0);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++
++ /* Convert physical address into a pointer to a MDL. */
++ mdl = (PLINUX_MDL) Physical;
++
++ MEMORY_LOCK(Os);
++
++#ifndef NO_DMA_COHERENT
++#if gcdUSE_NON_PAGED_MEMORY_CACHE
++ if (!_AddNonPagedMemoryCache(Os,
++ mdl->numPages * PAGE_SIZE,
++ mdl->addr,
++ mdl->dmaHandle))
++#endif
++ {
++ dma_free_coherent(gcvNULL,
++ mdl->numPages * PAGE_SIZE,
++ mdl->addr,
++ mdl->dmaHandle);
++ }
++#else
++ size = mdl->numPages * PAGE_SIZE;
++ vaddr = mdl->kaddr;
++
++ while (size > 0)
++ {
++ ClearPageReserved(virt_to_page(vaddr));
++
++ vaddr += PAGE_SIZE;
++ size -= PAGE_SIZE;
++ }
++
++#if gcdUSE_NON_PAGED_MEMORY_CACHE
++ if (!_AddNonPagedMemoryCache(Os,
++ get_order(mdl->numPages * PAGE_SIZE),
++ virt_to_page(mdl->kaddr)))
++#endif
++ {
++ free_pages((unsigned long)mdl->kaddr, get_order(mdl->numPages * PAGE_SIZE));
++ }
++
++ _DestoryKernelVirtualMapping(mdl->addr);
++#endif /* NO_DMA_COHERENT */
++
++ mdlMap = mdl->maps;
++
++ while (mdlMap != gcvNULL)
++ {
++ if (mdlMap->vmaAddr != gcvNULL)
++ {
++ /* No mapped memory exists when free nonpaged memory */
++ gcmkASSERT(0);
++ }
++
++ mdlMap = mdlMap->next;
++ }
++
++ /* Remove the node from global list.. */
++ if (mdl == Os->mdlHead)
++ {
++ if ((Os->mdlHead = mdl->next) == gcvNULL)
++ {
++ Os->mdlTail = gcvNULL;
++ }
++ }
++ else
++ {
++ mdl->prev->next = mdl->next;
++ if (mdl == Os->mdlTail)
++ {
++ Os->mdlTail = mdl->prev;
++ }
++ else
++ {
++ mdl->next->prev = mdl->prev;
++ }
++ }
++
++ MEMORY_UNLOCK(Os);
++
++ gcmkVERIFY_OK(_DestroyMdl(mdl));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_ReadRegister
++**
++** Read data from a register.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctUINT32 Address
++** Address of register.
++**
++** OUTPUT:
++**
++** gctUINT32 * Data
++** Pointer to a variable that receives the data read from the register.
++*/
++gceSTATUS
++gckOS_ReadRegister(
++ IN gckOS Os,
++ IN gctUINT32 Address,
++ OUT gctUINT32 * Data
++ )
++{
++ return gckOS_ReadRegisterEx(Os, gcvCORE_MAJOR, Address, Data);
++}
++
++gceSTATUS
++gckOS_ReadRegisterEx(
++ IN gckOS Os,
++ IN gceCORE Core,
++ IN gctUINT32 Address,
++ OUT gctUINT32 * Data
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X Core=%d Address=0x%X", Os, Core, Address);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Address < Os->device->requestedRegisterMemSizes[Core]);
++ gcmkVERIFY_ARGUMENT(Data != gcvNULL);
++
++ *Data = readl((gctUINT8 *)Os->device->registerBases[Core] + Address);
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Data=0x%08x", *Data);
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_WriteRegister
++**
++** Write data to a register.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctUINT32 Address
++** Address of register.
++**
++** gctUINT32 Data
++** Data for register.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_WriteRegister(
++ IN gckOS Os,
++ IN gctUINT32 Address,
++ IN gctUINT32 Data
++ )
++{
++ return gckOS_WriteRegisterEx(Os, gcvCORE_MAJOR, Address, Data);
++}
++
++gceSTATUS
++gckOS_WriteRegisterEx(
++ IN gckOS Os,
++ IN gceCORE Core,
++ IN gctUINT32 Address,
++ IN gctUINT32 Data
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X Core=%d Address=0x%X Data=0x%08x", Os, Core, Address, Data);
++
++ gcmkVERIFY_ARGUMENT(Address < Os->device->requestedRegisterMemSizes[Core]);
++
++ writel(Data, (gctUINT8 *)Os->device->registerBases[Core] + Address);
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_GetPageSize
++**
++** Get the system's page size.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** OUTPUT:
++**
++** gctSIZE_T * PageSize
++** Pointer to a variable that will receive the system's page size.
++*/
++gceSTATUS gckOS_GetPageSize(
++ IN gckOS Os,
++ OUT gctSIZE_T * PageSize
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X", Os);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(PageSize != gcvNULL);
++
++ /* Return the page size. */
++ *PageSize = (gctSIZE_T) PAGE_SIZE;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*PageSize", *PageSize);
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_GetPhysicalAddress
++**
++** Get the physical system address of a corresponding virtual address.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPOINTER Logical
++** Logical address.
++**
++** OUTPUT:
++**
++** gctUINT32 * Address
++** Poinetr to a variable that receives the 32-bit physical adress.
++*/
++gceSTATUS
++gckOS_GetPhysicalAddress(
++ IN gckOS Os,
++ IN gctPOINTER Logical,
++ OUT gctUINT32 * Address
++ )
++{
++ gceSTATUS status;
++ gctUINT32 processID;
++
++ gcmkHEADER_ARG("Os=0x%X Logical=0x%X", Os, Logical);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Address != gcvNULL);
++
++ /* Query page table of current process first. */
++ status = _QueryProcessPageTable(Logical, Address);
++
++ if (gcmIS_ERROR(status))
++ {
++ /* Get current process ID. */
++ processID = _GetProcessID();
++
++ /* Route through other function. */
++ gcmkONERROR(
++ gckOS_GetPhysicalAddressProcess(Os, Logical, processID, Address));
++ }
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Address=0x%08x", *Address);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++#if gcdSECURE_USER
++static gceSTATUS
++gckOS_AddMapping(
++ IN gckOS Os,
++ IN gctUINT32 Physical,
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Bytes
++ )
++{
++ gceSTATUS status;
++ gcsUSER_MAPPING_PTR map;
++
++ gcmkHEADER_ARG("Os=0x%X Physical=0x%X Logical=0x%X Bytes=%lu",
++ Os, Physical, Logical, Bytes);
++
++ gcmkONERROR(gckOS_Allocate(Os,
++ gcmSIZEOF(gcsUSER_MAPPING),
++ (gctPOINTER *) &map));
++
++ map->next = Os->userMap;
++ map->physical = Physical - Os->device->baseAddress;
++ map->logical = Logical;
++ map->bytes = Bytes;
++ map->start = (gctINT8_PTR) Logical;
++ map->end = map->start + Bytes;
++
++ Os->userMap = map;
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++static gceSTATUS
++gckOS_RemoveMapping(
++ IN gckOS Os,
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Bytes
++ )
++{
++ gceSTATUS status;
++ gcsUSER_MAPPING_PTR map, prev;
++
++ gcmkHEADER_ARG("Os=0x%X Logical=0x%X Bytes=%lu", Os, Logical, Bytes);
++
++ for (map = Os->userMap, prev = gcvNULL; map != gcvNULL; map = map->next)
++ {
++ if ((map->logical == Logical)
++ && (map->bytes == Bytes)
++ )
++ {
++ break;
++ }
++
++ prev = map;
++ }
++
++ if (map == gcvNULL)
++ {
++ gcmkONERROR(gcvSTATUS_INVALID_ADDRESS);
++ }
++
++ if (prev == gcvNULL)
++ {
++ Os->userMap = map->next;
++ }
++ else
++ {
++ prev->next = map->next;
++ }
++
++ gcmkONERROR(gcmkOS_SAFE_FREE(Os, map));
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++#endif
++
++static gceSTATUS
++_ConvertLogical2Physical(
++ IN gckOS Os,
++ IN gctPOINTER Logical,
++ IN gctUINT32 ProcessID,
++ IN PLINUX_MDL Mdl,
++ OUT gctUINT32_PTR Physical
++ )
++{
++ gctINT8_PTR base, vBase;
++ gctUINT32 offset;
++ PLINUX_MDL_MAP map;
++ gcsUSER_MAPPING_PTR userMap;
++
++ base = (Mdl == gcvNULL) ? gcvNULL : (gctINT8_PTR) Mdl->addr;
++
++ /* Check for the logical address match. */
++ if ((base != gcvNULL)
++ && ((gctINT8_PTR) Logical >= base)
++ && ((gctINT8_PTR) Logical < base + Mdl->numPages * PAGE_SIZE)
++ )
++ {
++ offset = (gctINT8_PTR) Logical - base;
++
++ if (Mdl->dmaHandle != 0)
++ {
++ /* The memory was from coherent area. */
++ *Physical = (gctUINT32) Mdl->dmaHandle + offset;
++ }
++ else if (Mdl->pagedMem && !Mdl->contiguous)
++ {
++ /* paged memory is not mapped to kernel space. */
++ return gcvSTATUS_INVALID_ADDRESS;
++ }
++ else
++ {
++ *Physical = gcmPTR2INT(virt_to_phys(base)) + offset;
++ }
++
++ return gcvSTATUS_OK;
++ }
++
++ /* Walk user maps. */
++ for (userMap = Os->userMap; userMap != gcvNULL; userMap = userMap->next)
++ {
++ if (((gctINT8_PTR) Logical >= userMap->start)
++ && ((gctINT8_PTR) Logical < userMap->end)
++ )
++ {
++ *Physical = userMap->physical
++ + (gctUINT32) ((gctINT8_PTR) Logical - userMap->start);
++
++ return gcvSTATUS_OK;
++ }
++ }
++
++ if (ProcessID != Os->kernelProcessID)
++ {
++ map = FindMdlMap(Mdl, (gctINT) ProcessID);
++ vBase = (map == gcvNULL) ? gcvNULL : (gctINT8_PTR) map->vmaAddr;
++
++ /* Is the given address within that range. */
++ if ((vBase != gcvNULL)
++ && ((gctINT8_PTR) Logical >= vBase)
++ && ((gctINT8_PTR) Logical < vBase + Mdl->numPages * PAGE_SIZE)
++ )
++ {
++ offset = (gctINT8_PTR) Logical - vBase;
++
++ if (Mdl->dmaHandle != 0)
++ {
++ /* The memory was from coherent area. */
++ *Physical = (gctUINT32) Mdl->dmaHandle + offset;
++ }
++ else if (Mdl->pagedMem && !Mdl->contiguous)
++ {
++ *Physical = _NonContiguousToPhys(Mdl->u.nonContiguousPages, offset/PAGE_SIZE);
++ }
++ else
++ {
++ *Physical = page_to_phys(Mdl->u.contiguousPages) + offset;
++ }
++
++ return gcvSTATUS_OK;
++ }
++ }
++
++ /* Address not yet found. */
++ return gcvSTATUS_INVALID_ADDRESS;
++}
++
++/*******************************************************************************
++**
++** gckOS_GetPhysicalAddressProcess
++**
++** Get the physical system address of a corresponding virtual address for a
++** given process.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to gckOS object.
++**
++** gctPOINTER Logical
++** Logical address.
++**
++** gctUINT32 ProcessID
++** Process ID.
++**
++** OUTPUT:
++**
++** gctUINT32 * Address
++** Poinetr to a variable that receives the 32-bit physical adress.
++*/
++gceSTATUS
++gckOS_GetPhysicalAddressProcess(
++ IN gckOS Os,
++ IN gctPOINTER Logical,
++ IN gctUINT32 ProcessID,
++ OUT gctUINT32 * Address
++ )
++{
++ PLINUX_MDL mdl;
++ gctINT8_PTR base;
++ gceSTATUS status = gcvSTATUS_INVALID_ADDRESS;
++
++ gcmkHEADER_ARG("Os=0x%X Logical=0x%X ProcessID=%d", Os, Logical, ProcessID);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Address != gcvNULL);
++
++ MEMORY_LOCK(Os);
++
++ /* First try the contiguous memory pool. */
++ if (Os->device->contiguousMapped)
++ {
++ base = (gctINT8_PTR) Os->device->contiguousBase;
++
++ if (((gctINT8_PTR) Logical >= base)
++ && ((gctINT8_PTR) Logical < base + Os->device->contiguousSize)
++ )
++ {
++ /* Convert logical address into physical. */
++ *Address = Os->device->contiguousVidMem->baseAddress
++ + (gctINT8_PTR) Logical - base;
++ status = gcvSTATUS_OK;
++ }
++ }
++ else
++ {
++ /* Try the contiguous memory pool. */
++ mdl = (PLINUX_MDL) Os->device->contiguousPhysical;
++ status = _ConvertLogical2Physical(Os,
++ Logical,
++ ProcessID,
++ mdl,
++ Address);
++ }
++
++ if (gcmIS_ERROR(status))
++ {
++ /* Walk all MDLs. */
++ for (mdl = Os->mdlHead; mdl != gcvNULL; mdl = mdl->next)
++ {
++ /* Try this MDL. */
++ status = _ConvertLogical2Physical(Os,
++ Logical,
++ ProcessID,
++ mdl,
++ Address);
++ if (gcmIS_SUCCESS(status))
++ {
++ break;
++ }
++ }
++ }
++
++ MEMORY_UNLOCK(Os);
++
++ gcmkONERROR(status);
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Address=0x%08x", *Address);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_MapPhysical
++**
++** Map a physical address into kernel space.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctUINT32 Physical
++** Physical address of the memory to map.
++**
++** gctSIZE_T Bytes
++** Number of bytes to map.
++**
++** OUTPUT:
++**
++** gctPOINTER * Logical
++** Pointer to a variable that receives the base address of the mapped
++** memory.
++*/
++gceSTATUS
++gckOS_MapPhysical(
++ IN gckOS Os,
++ IN gctUINT32 Physical,
++ IN gctSIZE_T Bytes,
++ OUT gctPOINTER * Logical
++ )
++{
++ gctPOINTER logical;
++ PLINUX_MDL mdl;
++ gctUINT32 physical = Physical;
++
++ gcmkHEADER_ARG("Os=0x%X Physical=0x%X Bytes=%lu", Os, Physical, Bytes);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++
++ MEMORY_LOCK(Os);
++
++ /* Go through our mapping to see if we know this physical address already. */
++ mdl = Os->mdlHead;
++
++ while (mdl != gcvNULL)
++ {
++ if (mdl->dmaHandle != 0)
++ {
++ if ((physical >= mdl->dmaHandle)
++ && (physical < mdl->dmaHandle + mdl->numPages * PAGE_SIZE)
++ )
++ {
++ *Logical = mdl->addr + (physical - mdl->dmaHandle);
++ break;
++ }
++ }
++
++ mdl = mdl->next;
++ }
++
++ if (mdl == gcvNULL)
++ {
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
++ struct contiguous_mem_pool *pool = Os->device->pool;
++
++ if (Physical >= pool->phys && Physical < pool->phys + pool->size)
++ logical = (gctPOINTER)(Physical - pool->phys + pool->virt);
++ else
++ logical = gcvNULL;
++#else
++ /* Map memory as cached memory. */
++ request_mem_region(physical, Bytes, "MapRegion");
++ logical = (gctPOINTER) ioremap_nocache(physical, Bytes);
++#endif
++
++ if (logical == gcvNULL)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_OS,
++ "%s(%d): Failed to map physical address 0x%08x",
++ __FUNCTION__, __LINE__, Physical
++ );
++
++ MEMORY_UNLOCK(Os);
++
++ /* Out of resources. */
++ gcmkFOOTER_ARG("status=%d", gcvSTATUS_OUT_OF_RESOURCES);
++ return gcvSTATUS_OUT_OF_RESOURCES;
++ }
++
++ /* Return pointer to mapped memory. */
++ *Logical = logical;
++ }
++
++ MEMORY_UNLOCK(Os);
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Logical=0x%X", *Logical);
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_UnmapPhysical
++**
++** Unmap a previously mapped memory region from kernel memory.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPOINTER Logical
++** Pointer to the base address of the memory to unmap.
++**
++** gctSIZE_T Bytes
++** Number of bytes to unmap.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_UnmapPhysical(
++ IN gckOS Os,
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Bytes
++ )
++{
++ PLINUX_MDL mdl;
++
++ gcmkHEADER_ARG("Os=0x%X Logical=0x%X Bytes=%lu", Os, Logical, Bytes);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++
++ MEMORY_LOCK(Os);
++
++ mdl = Os->mdlHead;
++
++ while (mdl != gcvNULL)
++ {
++ if (mdl->addr != gcvNULL)
++ {
++ if (Logical >= (gctPOINTER)mdl->addr
++ && Logical < (gctPOINTER)((gctSTRING)mdl->addr + mdl->numPages * PAGE_SIZE))
++ {
++ break;
++ }
++ }
++
++ mdl = mdl->next;
++ }
++
++ if (mdl == gcvNULL)
++ {
++ /* Unmap the memory. */
++ iounmap(Logical);
++ }
++
++ MEMORY_UNLOCK(Os);
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_CreateMutex
++**
++** Create a new mutex.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** OUTPUT:
++**
++** gctPOINTER * Mutex
++** Pointer to a variable that will hold a pointer to the mutex.
++*/
++gceSTATUS
++gckOS_CreateMutex(
++ IN gckOS Os,
++ OUT gctPOINTER * Mutex
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Os=0x%X", Os);
++
++ /* Validate the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Mutex != gcvNULL);
++
++ /* Allocate the mutex structure. */
++ gcmkONERROR(gckOS_Allocate(Os, gcmSIZEOF(struct mutex), Mutex));
++
++ /* Initialize the mutex. */
++ mutex_init(*Mutex);
++
++ /* Return status. */
++ gcmkFOOTER_ARG("*Mutex=0x%X", *Mutex);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_DeleteMutex
++**
++** Delete a mutex.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPOINTER Mutex
++** Pointer to the mute to be deleted.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_DeleteMutex(
++ IN gckOS Os,
++ IN gctPOINTER Mutex
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Os=0x%X Mutex=0x%X", Os, Mutex);
++
++ /* Validate the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Mutex != gcvNULL);
++
++ /* Destroy the mutex. */
++ mutex_destroy(Mutex);
++
++ /* Free the mutex structure. */
++ gcmkONERROR(gckOS_Free(Os, Mutex));
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_AcquireMutex
++**
++** Acquire a mutex.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPOINTER Mutex
++** Pointer to the mutex to be acquired.
++**
++** gctUINT32 Timeout
++** Timeout value specified in milliseconds.
++** Specify the value of gcvINFINITE to keep the thread suspended
++** until the mutex has been acquired.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_AcquireMutex(
++ IN gckOS Os,
++ IN gctPOINTER Mutex,
++ IN gctUINT32 Timeout
++ )
++{
++#if gcdDETECT_TIMEOUT
++ gctUINT32 timeout;
++#endif
++
++ gcmkHEADER_ARG("Os=0x%X Mutex=0x%0x Timeout=%u", Os, Mutex, Timeout);
++
++ /* Validate the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Mutex != gcvNULL);
++
++#if gcdDETECT_TIMEOUT
++ timeout = 0;
++
++ for (;;)
++ {
++ /* Try to acquire the mutex. */
++ if (mutex_trylock(Mutex))
++ {
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++
++ /* Advance the timeout. */
++ timeout += 1;
++
++ if (Timeout == gcvINFINITE)
++ {
++ if (timeout == gcdINFINITE_TIMEOUT)
++ {
++ gctUINT32 dmaAddress1, dmaAddress2;
++ gctUINT32 dmaState1, dmaState2;
++
++ dmaState1 = dmaState2 =
++ dmaAddress1 = dmaAddress2 = 0;
++
++ /* Verify whether DMA is running. */
++ gcmkVERIFY_OK(_VerifyDMA(
++ Os, &dmaAddress1, &dmaAddress2, &dmaState1, &dmaState2
++ ));
++
++#if gcdDETECT_DMA_ADDRESS
++ /* Dump only if DMA appears stuck. */
++ if (
++ (dmaAddress1 == dmaAddress2)
++#if gcdDETECT_DMA_STATE
++ && (dmaState1 == dmaState2)
++# endif
++ )
++# endif
++ {
++ gcmkVERIFY_OK(_DumpGPUState(Os, gcvCORE_MAJOR));
++
++ gcmkPRINT(
++ "%s(%d): mutex 0x%X; forced message flush.",
++ __FUNCTION__, __LINE__, Mutex
++ );
++
++ /* Flush the debug cache. */
++ gcmkDEBUGFLUSH(dmaAddress2);
++ }
++
++ timeout = 0;
++ }
++ }
++ else
++ {
++ /* Timedout? */
++ if (timeout >= Timeout)
++ {
++ break;
++ }
++ }
++
++ /* Wait for 1 millisecond. */
++ gcmkVERIFY_OK(gckOS_Delay(Os, 1));
++ }
++#else
++ if (Timeout == gcvINFINITE)
++ {
++ /* Lock the mutex. */
++ mutex_lock(Mutex);
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++
++ for (;;)
++ {
++ /* Try to acquire the mutex. */
++ if (mutex_trylock(Mutex))
++ {
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++
++ if (Timeout-- == 0)
++ {
++ break;
++ }
++
++ /* Wait for 1 millisecond. */
++ gcmkVERIFY_OK(gckOS_Delay(Os, 1));
++ }
++#endif
++
++ /* Timeout. */
++ gcmkFOOTER_ARG("status=%d", gcvSTATUS_TIMEOUT);
++ return gcvSTATUS_TIMEOUT;
++}
++
++/*******************************************************************************
++**
++** gckOS_ReleaseMutex
++**
++** Release an acquired mutex.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPOINTER Mutex
++** Pointer to the mutex to be released.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_ReleaseMutex(
++ IN gckOS Os,
++ IN gctPOINTER Mutex
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X Mutex=0x%0x", Os, Mutex);
++
++ /* Validate the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Mutex != gcvNULL);
++
++ /* Release the mutex. */
++ mutex_unlock(Mutex);
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_AtomicExchange
++**
++** Atomically exchange a pair of 32-bit values.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** IN OUT gctINT32_PTR Target
++** Pointer to the 32-bit value to exchange.
++**
++** IN gctINT32 NewValue
++** Specifies a new value for the 32-bit value pointed to by Target.
++**
++** OUT gctINT32_PTR OldValue
++** The old value of the 32-bit value pointed to by Target.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_AtomicExchange(
++ IN gckOS Os,
++ IN OUT gctUINT32_PTR Target,
++ IN gctUINT32 NewValue,
++ OUT gctUINT32_PTR OldValue
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X Target=0x%X NewValue=%u", Os, Target, NewValue);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++
++ /* Exchange the pair of 32-bit values. */
++ *OldValue = (gctUINT32) atomic_xchg((atomic_t *) Target, (int) NewValue);
++
++ /* Success. */
++ gcmkFOOTER_ARG("*OldValue=%u", *OldValue);
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_AtomicExchangePtr
++**
++** Atomically exchange a pair of pointers.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** IN OUT gctPOINTER * Target
++** Pointer to the 32-bit value to exchange.
++**
++** IN gctPOINTER NewValue
++** Specifies a new value for the pointer pointed to by Target.
++**
++** OUT gctPOINTER * OldValue
++** The old value of the pointer pointed to by Target.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_AtomicExchangePtr(
++ IN gckOS Os,
++ IN OUT gctPOINTER * Target,
++ IN gctPOINTER NewValue,
++ OUT gctPOINTER * OldValue
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X Target=0x%X NewValue=0x%X", Os, Target, NewValue);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++
++ /* Exchange the pair of pointers. */
++ *OldValue = (gctPOINTER)(gctUINTPTR_T) atomic_xchg((atomic_t *) Target, (int)(gctUINTPTR_T) NewValue);
++
++ /* Success. */
++ gcmkFOOTER_ARG("*OldValue=0x%X", *OldValue);
++ return gcvSTATUS_OK;
++}
++
++#if gcdSMP
++/*******************************************************************************
++**
++** gckOS_AtomicSetMask
++**
++** Atomically set mask to Atom
++**
++** INPUT:
++** IN OUT gctPOINTER Atom
++** Pointer to the atom to set.
++**
++** IN gctUINT32 Mask
++** Mask to set.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_AtomSetMask(
++ IN gctPOINTER Atom,
++ IN gctUINT32 Mask
++ )
++{
++ gctUINT32 oval, nval;
++
++ gcmkHEADER_ARG("Atom=0x%0x", Atom);
++ gcmkVERIFY_ARGUMENT(Atom != gcvNULL);
++
++ do
++ {
++ oval = atomic_read((atomic_t *) Atom);
++ nval = oval | Mask;
++ } while (atomic_cmpxchg((atomic_t *) Atom, oval, nval) != oval);
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_AtomClearMask
++**
++** Atomically clear mask from Atom
++**
++** INPUT:
++** IN OUT gctPOINTER Atom
++** Pointer to the atom to clear.
++**
++** IN gctUINT32 Mask
++** Mask to clear.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_AtomClearMask(
++ IN gctPOINTER Atom,
++ IN gctUINT32 Mask
++ )
++{
++ gctUINT32 oval, nval;
++
++ gcmkHEADER_ARG("Atom=0x%0x", Atom);
++ gcmkVERIFY_ARGUMENT(Atom != gcvNULL);
++
++ do
++ {
++ oval = atomic_read((atomic_t *) Atom);
++ nval = oval & ~Mask;
++ } while (atomic_cmpxchg((atomic_t *) Atom, oval, nval) != oval);
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++#endif
++
++/*******************************************************************************
++**
++** gckOS_AtomConstruct
++**
++** Create an atom.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to a gckOS object.
++**
++** OUTPUT:
++**
++** gctPOINTER * Atom
++** Pointer to a variable receiving the constructed atom.
++*/
++gceSTATUS
++gckOS_AtomConstruct(
++ IN gckOS Os,
++ OUT gctPOINTER * Atom
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Os=0x%X", Os);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Atom != gcvNULL);
++
++ /* Allocate the atom. */
++ gcmkONERROR(gckOS_Allocate(Os, gcmSIZEOF(atomic_t), Atom));
++
++ /* Initialize the atom. */
++ atomic_set((atomic_t *) *Atom, 0);
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Atom=0x%X", *Atom);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_AtomDestroy
++**
++** Destroy an atom.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to a gckOS object.
++**
++** gctPOINTER Atom
++** Pointer to the atom to destroy.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_AtomDestroy(
++ IN gckOS Os,
++ OUT gctPOINTER Atom
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Os=0x%X Atom=0x%0x", Os, Atom);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Atom != gcvNULL);
++
++ /* Free the atom. */
++ gcmkONERROR(gcmkOS_SAFE_FREE(Os, Atom));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_AtomGet
++**
++** Get the 32-bit value protected by an atom.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to a gckOS object.
++**
++** gctPOINTER Atom
++** Pointer to the atom.
++**
++** OUTPUT:
++**
++** gctINT32_PTR Value
++** Pointer to a variable the receives the value of the atom.
++*/
++gceSTATUS
++gckOS_AtomGet(
++ IN gckOS Os,
++ IN gctPOINTER Atom,
++ OUT gctINT32_PTR Value
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X Atom=0x%0x", Os, Atom);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Atom != gcvNULL);
++
++ /* Return the current value of atom. */
++ *Value = atomic_read((atomic_t *) Atom);
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Value=%d", *Value);
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_AtomSet
++**
++** Set the 32-bit value protected by an atom.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to a gckOS object.
++**
++** gctPOINTER Atom
++** Pointer to the atom.
++**
++** gctINT32 Value
++** The value of the atom.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_AtomSet(
++ IN gckOS Os,
++ IN gctPOINTER Atom,
++ IN gctINT32 Value
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X Atom=0x%0x Value=%d", Os, Atom);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Atom != gcvNULL);
++
++ /* Set the current value of atom. */
++ atomic_set((atomic_t *) Atom, Value);
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_AtomIncrement
++**
++** Atomically increment the 32-bit integer value inside an atom.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to a gckOS object.
++**
++** gctPOINTER Atom
++** Pointer to the atom.
++**
++** OUTPUT:
++**
++** gctINT32_PTR Value
++** Pointer to a variable that receives the original value of the atom.
++*/
++gceSTATUS
++gckOS_AtomIncrement(
++ IN gckOS Os,
++ IN gctPOINTER Atom,
++ OUT gctINT32_PTR Value
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X Atom=0x%0x", Os, Atom);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Atom != gcvNULL);
++
++ /* Increment the atom. */
++ *Value = atomic_inc_return((atomic_t *) Atom) - 1;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Value=%d", *Value);
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_AtomDecrement
++**
++** Atomically decrement the 32-bit integer value inside an atom.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to a gckOS object.
++**
++** gctPOINTER Atom
++** Pointer to the atom.
++**
++** OUTPUT:
++**
++** gctINT32_PTR Value
++** Pointer to a variable that receives the original value of the atom.
++*/
++gceSTATUS
++gckOS_AtomDecrement(
++ IN gckOS Os,
++ IN gctPOINTER Atom,
++ OUT gctINT32_PTR Value
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X Atom=0x%0x", Os, Atom);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Atom != gcvNULL);
++
++ /* Decrement the atom. */
++ *Value = atomic_dec_return((atomic_t *) Atom) + 1;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Value=%d", *Value);
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_Delay
++**
++** Delay execution of the current thread for a number of milliseconds.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctUINT32 Delay
++** Delay to sleep, specified in milliseconds.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_Delay(
++ IN gckOS Os,
++ IN gctUINT32 Delay
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X Delay=%u", Os, Delay);
++
++ if (Delay > 0)
++ {
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)
++ ktime_t delay = ktime_set(Delay/1000, (Delay%1000) * NSEC_PER_MSEC);
++ __set_current_state(TASK_UNINTERRUPTIBLE);
++ schedule_hrtimeout(&delay, HRTIMER_MODE_REL);
++#else
++ msleep(Delay);
++#endif
++
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_GetTicks
++**
++** Get the number of milliseconds since the system started.
++**
++** INPUT:
++**
++** OUTPUT:
++**
++** gctUINT32_PTR Time
++** Pointer to a variable to get time.
++**
++*/
++gceSTATUS
++gckOS_GetTicks(
++ OUT gctUINT32_PTR Time
++ )
++{
++ gcmkHEADER();
++
++ *Time = jiffies_to_msecs(jiffies);
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_TicksAfter
++**
++** Compare time values got from gckOS_GetTicks.
++**
++** INPUT:
++** gctUINT32 Time1
++** First time value to be compared.
++**
++** gctUINT32 Time2
++** Second time value to be compared.
++**
++** OUTPUT:
++**
++** gctBOOL_PTR IsAfter
++** Pointer to a variable to result.
++**
++*/
++gceSTATUS
++gckOS_TicksAfter(
++ IN gctUINT32 Time1,
++ IN gctUINT32 Time2,
++ OUT gctBOOL_PTR IsAfter
++ )
++{
++ gcmkHEADER();
++
++ *IsAfter = time_after((unsigned long)Time1, (unsigned long)Time2);
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_GetTime
++**
++** Get the number of microseconds since the system started.
++**
++** INPUT:
++**
++** OUTPUT:
++**
++** gctUINT64_PTR Time
++** Pointer to a variable to get time.
++**
++*/
++gceSTATUS
++gckOS_GetTime(
++ OUT gctUINT64_PTR Time
++ )
++{
++ gcmkHEADER();
++
++ *Time = 0;
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_MemoryBarrier
++**
++** Make sure the CPU has executed everything up to this point and the data got
++** written to the specified pointer.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPOINTER Address
++** Address of memory that needs to be barriered.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_MemoryBarrier(
++ IN gckOS Os,
++ IN gctPOINTER Address
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X Address=0x%X", Os, Address);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++
++#if gcdNONPAGED_MEMORY_BUFFERABLE \
++ && defined (CONFIG_ARM) \
++ && (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34))
++ /* drain write buffer */
++ dsb();
++
++ /* drain outer cache's write buffer? */
++#else
++ mb();
++#endif
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_AllocatePagedMemory
++**
++** Allocate memory from the paged pool.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctSIZE_T Bytes
++** Number of bytes to allocate.
++**
++** OUTPUT:
++**
++** gctPHYS_ADDR * Physical
++** Pointer to a variable that receives the physical address of the
++** memory allocation.
++*/
++gceSTATUS
++gckOS_AllocatePagedMemory(
++ IN gckOS Os,
++ IN gctSIZE_T Bytes,
++ OUT gctPHYS_ADDR * Physical
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Os=0x%X Bytes=%lu", Os, Bytes);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++ gcmkVERIFY_ARGUMENT(Physical != gcvNULL);
++
++ /* Allocate the memory. */
++ gcmkONERROR(gckOS_AllocatePagedMemoryEx(Os, gcvFALSE, Bytes, Physical));
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Physical=0x%X", *Physical);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_AllocatePagedMemoryEx
++**
++** Allocate memory from the paged pool.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctBOOL Contiguous
++** Need contiguous memory or not.
++**
++** gctSIZE_T Bytes
++** Number of bytes to allocate.
++**
++** OUTPUT:
++**
++** gctPHYS_ADDR * Physical
++** Pointer to a variable that receives the physical address of the
++** memory allocation.
++*/
++gceSTATUS
++gckOS_AllocatePagedMemoryEx(
++ IN gckOS Os,
++ IN gctBOOL Contiguous,
++ IN gctSIZE_T Bytes,
++ OUT gctPHYS_ADDR * Physical
++ )
++{
++ gctINT numPages;
++ gctINT i;
++ PLINUX_MDL mdl = gcvNULL;
++ gctSIZE_T bytes;
++ gctBOOL locked = gcvFALSE;
++ gceSTATUS status;
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
++ gctPOINTER addr = gcvNULL;
++#endif
++
++ gcmkHEADER_ARG("Os=0x%X Contiguous=%d Bytes=%lu", Os, Contiguous, Bytes);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++ gcmkVERIFY_ARGUMENT(Physical != gcvNULL);
++
++ bytes = gcmALIGN(Bytes, PAGE_SIZE);
++
++ numPages = GetPageCount(bytes, 0);
++
++ MEMORY_LOCK(Os);
++ locked = gcvTRUE;
++
++ mdl = _CreateMdl(_GetProcessID());
++ if (mdl == gcvNULL)
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ if (Contiguous)
++ {
++ gctUINT32 order = get_order(bytes);
++
++ if (order >= MAX_ORDER)
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
++ addr =
++ alloc_pages_exact(numPages * PAGE_SIZE, GFP_KERNEL | gcdNOWARN | __GFP_NORETRY);
++
++ mdl->u.contiguousPages = addr
++ ? virt_to_page(addr)
++ : gcvNULL;
++
++ mdl->exact = gcvTRUE;
++#else
++ mdl->u.contiguousPages =
++ alloc_pages(GFP_KERNEL | gcdNOWARN | __GFP_NORETRY, order);
++#endif
++ if (mdl->u.contiguousPages == gcvNULL)
++ {
++ mdl->u.contiguousPages =
++ alloc_pages(GFP_KERNEL | __GFP_HIGHMEM | gcdNOWARN, order);
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
++ mdl->exact = gcvFALSE;
++#endif
++ }
++ }
++ else
++ {
++ mdl->u.nonContiguousPages = _NonContiguousAlloc(numPages);
++ }
++
++ if (mdl->u.contiguousPages == gcvNULL && mdl->u.nonContiguousPages == gcvNULL)
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ mdl->dmaHandle = 0;
++ mdl->addr = 0;
++ mdl->numPages = numPages;
++ mdl->pagedMem = 1;
++ mdl->contiguous = Contiguous;
++
++ for (i = 0; i < mdl->numPages; i++)
++ {
++ struct page *page;
++
++ if (mdl->contiguous)
++ {
++ page = nth_page(mdl->u.contiguousPages, i);
++ }
++ else
++ {
++ page = _NonContiguousToPage(mdl->u.nonContiguousPages, i);
++ }
++
++ SetPageReserved(page);
++
++ if (!PageHighMem(page) && page_to_phys(page))
++ {
++ gcmkVERIFY_OK(
++ gckOS_CacheFlush(Os, _GetProcessID(), gcvNULL,
++ (gctPOINTER)(gctUINTPTR_T)page_to_phys(page),
++ page_address(page),
++ PAGE_SIZE));
++ }
++ }
++
++ /* Return physical address. */
++ *Physical = (gctPHYS_ADDR) mdl;
++
++ /*
++ * Add this to a global list.
++ * Will be used by get physical address
++ * and mapuser pointer functions.
++ */
++ if (!Os->mdlHead)
++ {
++ /* Initialize the queue. */
++ Os->mdlHead = Os->mdlTail = mdl;
++ }
++ else
++ {
++ /* Add to tail. */
++ mdl->prev = Os->mdlTail;
++ Os->mdlTail->next = mdl;
++ Os->mdlTail = mdl;
++ }
++
++ MEMORY_UNLOCK(Os);
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Physical=0x%X", *Physical);
++ return gcvSTATUS_OK;
++
++OnError:
++ if (mdl != gcvNULL)
++ {
++ /* Free the memory. */
++ _DestroyMdl(mdl);
++ }
++
++ if (locked)
++ {
++ /* Unlock the memory. */
++ MEMORY_UNLOCK(Os);
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_FreePagedMemory
++**
++** Free memory allocated from the paged pool.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPHYS_ADDR Physical
++** Physical address of the allocation.
++**
++** gctSIZE_T Bytes
++** Number of bytes of the allocation.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_FreePagedMemory(
++ IN gckOS Os,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Bytes
++ )
++{
++ PLINUX_MDL mdl = (PLINUX_MDL) Physical;
++ gctINT i;
++
++ gcmkHEADER_ARG("Os=0x%X Physical=0x%X Bytes=%lu", Os, Physical, Bytes);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Physical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++
++ /*addr = mdl->addr;*/
++
++ MEMORY_LOCK(Os);
++
++ for (i = 0; i < mdl->numPages; i++)
++ {
++ if (mdl->contiguous)
++ {
++ ClearPageReserved(nth_page(mdl->u.contiguousPages, i));
++ }
++ else
++ {
++ ClearPageReserved(_NonContiguousToPage(mdl->u.nonContiguousPages, i));
++ }
++ }
++
++ if (mdl->contiguous)
++ {
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
++ if (mdl->exact == gcvTRUE)
++ {
++ free_pages_exact(page_address(mdl->u.contiguousPages), mdl->numPages * PAGE_SIZE);
++ }
++ else
++#endif
++ {
++ __free_pages(mdl->u.contiguousPages, GetOrder(mdl->numPages));
++ }
++ }
++ else
++ {
++ _NonContiguousFree(mdl->u.nonContiguousPages, mdl->numPages);
++ }
++
++ /* Remove the node from global list. */
++ if (mdl == Os->mdlHead)
++ {
++ if ((Os->mdlHead = mdl->next) == gcvNULL)
++ {
++ Os->mdlTail = gcvNULL;
++ }
++ }
++ else
++ {
++ mdl->prev->next = mdl->next;
++
++ if (mdl == Os->mdlTail)
++ {
++ Os->mdlTail = mdl->prev;
++ }
++ else
++ {
++ mdl->next->prev = mdl->prev;
++ }
++ }
++
++ MEMORY_UNLOCK(Os);
++
++ /* Free the structure... */
++ gcmkVERIFY_OK(_DestroyMdl(mdl));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_LockPages
++**
++** Lock memory allocated from the paged pool.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPHYS_ADDR Physical
++** Physical address of the allocation.
++**
++** gctSIZE_T Bytes
++** Number of bytes of the allocation.
++**
++** gctBOOL Cacheable
++** Cache mode of mapping.
++**
++** OUTPUT:
++**
++** gctPOINTER * Logical
++** Pointer to a variable that receives the address of the mapped
++** memory.
++**
++** gctSIZE_T * PageCount
++** Pointer to a variable that receives the number of pages required for
++** the page table according to the GPU page size.
++*/
++gceSTATUS
++gckOS_LockPages(
++ IN gckOS Os,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Bytes,
++ IN gctBOOL Cacheable,
++ OUT gctPOINTER * Logical,
++ OUT gctSIZE_T * PageCount
++ )
++{
++ PLINUX_MDL mdl;
++ PLINUX_MDL_MAP mdlMap;
++ gctSTRING addr;
++ unsigned long start;
++ unsigned long pfn;
++ gctINT i;
++
++ gcmkHEADER_ARG("Os=0x%X Physical=0x%X Bytes=%lu", Os, Physical, Logical);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Physical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(PageCount != gcvNULL);
++
++ mdl = (PLINUX_MDL) Physical;
++
++ MEMORY_LOCK(Os);
++
++ mdlMap = FindMdlMap(mdl, _GetProcessID());
++
++ if (mdlMap == gcvNULL)
++ {
++ mdlMap = _CreateMdlMap(mdl, _GetProcessID());
++
++ if (mdlMap == gcvNULL)
++ {
++ MEMORY_UNLOCK(Os);
++
++ gcmkFOOTER_ARG("*status=%d", gcvSTATUS_OUT_OF_MEMORY);
++ return gcvSTATUS_OUT_OF_MEMORY;
++ }
++ }
++
++ if (mdlMap->vmaAddr == gcvNULL)
++ {
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)
++ mdlMap->vmaAddr = (gctSTRING)vm_mmap(gcvNULL,
++ 0L,
++ mdl->numPages * PAGE_SIZE,
++ PROT_READ | PROT_WRITE,
++ MAP_SHARED,
++ 0);
++#else
++ down_write(&current->mm->mmap_sem);
++
++ mdlMap->vmaAddr = (gctSTRING)do_mmap_pgoff(gcvNULL,
++ 0L,
++ mdl->numPages * PAGE_SIZE,
++ PROT_READ | PROT_WRITE,
++ MAP_SHARED,
++ 0);
++
++ up_write(&current->mm->mmap_sem);
++#endif
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_OS,
++ "%s(%d): vmaAddr->0x%X for phys_addr->0x%X",
++ __FUNCTION__, __LINE__,
++ (gctUINT32)(gctUINTPTR_T)mdlMap->vmaAddr,
++ (gctUINT32)(gctUINTPTR_T)mdl
++ );
++
++ if (IS_ERR(mdlMap->vmaAddr))
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_OS,
++ "%s(%d): do_mmap_pgoff error",
++ __FUNCTION__, __LINE__
++ );
++
++ mdlMap->vmaAddr = gcvNULL;
++
++ MEMORY_UNLOCK(Os);
++
++ gcmkFOOTER_ARG("*status=%d", gcvSTATUS_OUT_OF_MEMORY);
++ return gcvSTATUS_OUT_OF_MEMORY;
++ }
++
++ down_write(&current->mm->mmap_sem);
++
++ mdlMap->vma = find_vma(current->mm, (unsigned long)mdlMap->vmaAddr);
++
++ if (mdlMap->vma == gcvNULL)
++ {
++ up_write(&current->mm->mmap_sem);
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_OS,
++ "%s(%d): find_vma error",
++ __FUNCTION__, __LINE__
++ );
++
++ mdlMap->vmaAddr = gcvNULL;
++
++ MEMORY_UNLOCK(Os);
++
++ gcmkFOOTER_ARG("*status=%d", gcvSTATUS_OUT_OF_RESOURCES);
++ return gcvSTATUS_OUT_OF_RESOURCES;
++ }
++
++ mdlMap->vma->vm_flags |= gcdVM_FLAGS;
++
++ if (Cacheable == gcvFALSE)
++ {
++ /* Make this mapping non-cached. */
++ mdlMap->vma->vm_page_prot = gcmkPAGED_MEMROY_PROT(mdlMap->vma->vm_page_prot);
++ }
++
++ addr = mdl->addr;
++
++ /* Now map all the vmalloc pages to this user address. */
++ if (mdl->contiguous)
++ {
++ /* map kernel memory to user space.. */
++ if (remap_pfn_range(mdlMap->vma,
++ mdlMap->vma->vm_start,
++ page_to_pfn(mdl->u.contiguousPages),
++ mdlMap->vma->vm_end - mdlMap->vma->vm_start,
++ mdlMap->vma->vm_page_prot) < 0)
++ {
++ up_write(&current->mm->mmap_sem);
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_OS,
++ "%s(%d): unable to mmap ret",
++ __FUNCTION__, __LINE__
++ );
++
++ mdlMap->vmaAddr = gcvNULL;
++
++ MEMORY_UNLOCK(Os);
++
++ gcmkFOOTER_ARG("*status=%d", gcvSTATUS_OUT_OF_MEMORY);
++ return gcvSTATUS_OUT_OF_MEMORY;
++ }
++ }
++ else
++ {
++ start = mdlMap->vma->vm_start;
++
++ for (i = 0; i < mdl->numPages; i++)
++ {
++ pfn = _NonContiguousToPfn(mdl->u.nonContiguousPages, i);
++
++ if (remap_pfn_range(mdlMap->vma,
++ start,
++ pfn,
++ PAGE_SIZE,
++ mdlMap->vma->vm_page_prot) < 0)
++ {
++ up_write(&current->mm->mmap_sem);
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_OS,
++ "%s(%d): gctPHYS_ADDR->0x%X Logical->0x%X Unable to map addr->0x%X to start->0x%X",
++ __FUNCTION__, __LINE__,
++ (gctUINT32)(gctUINTPTR_T)Physical,
++ (gctUINT32)(gctUINTPTR_T)*Logical,
++ (gctUINT32)(gctUINTPTR_T)addr,
++ (gctUINT32)(gctUINTPTR_T)start
++ );
++
++ mdlMap->vmaAddr = gcvNULL;
++
++ MEMORY_UNLOCK(Os);
++
++ gcmkFOOTER_ARG("*status=%d", gcvSTATUS_OUT_OF_MEMORY);
++ return gcvSTATUS_OUT_OF_MEMORY;
++ }
++
++ start += PAGE_SIZE;
++ addr += PAGE_SIZE;
++ }
++ }
++
++ up_write(&current->mm->mmap_sem);
++ }
++
++ mdlMap->count++;
++
++ /* Convert pointer to MDL. */
++ *Logical = mdlMap->vmaAddr;
++
++ /* Return the page number according to the GPU page size. */
++ gcmkASSERT((PAGE_SIZE % 4096) == 0);
++ gcmkASSERT((PAGE_SIZE / 4096) >= 1);
++
++ *PageCount = mdl->numPages * (PAGE_SIZE / 4096);
++
++ MEMORY_UNLOCK(Os);
++
++ gcmkVERIFY_OK(gckOS_CacheFlush(
++ Os,
++ _GetProcessID(),
++ Physical,
++ gcvNULL,
++ (gctPOINTER)mdlMap->vmaAddr,
++ mdl->numPages * PAGE_SIZE
++ ));
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Logical=0x%X *PageCount=%lu", *Logical, *PageCount);
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_MapPages
++**
++** Map paged memory into a page table.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPHYS_ADDR Physical
++** Physical address of the allocation.
++**
++** gctSIZE_T PageCount
++** Number of pages required for the physical address.
++**
++** gctPOINTER PageTable
++** Pointer to the page table to fill in.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_MapPages(
++ IN gckOS Os,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T PageCount,
++ IN gctPOINTER PageTable
++ )
++{
++ return gckOS_MapPagesEx(Os,
++ gcvCORE_MAJOR,
++ Physical,
++ PageCount,
++ PageTable);
++}
++
++gceSTATUS
++gckOS_MapPagesEx(
++ IN gckOS Os,
++ IN gceCORE Core,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T PageCount,
++ IN gctPOINTER PageTable
++ )
++{
++ gceSTATUS status = gcvSTATUS_OK;
++ PLINUX_MDL mdl;
++ gctUINT32* table;
++ gctUINT32 offset;
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ gckMMU mmu;
++ PLINUX_MDL mmuMdl;
++ gctUINT32 bytes;
++ gctPHYS_ADDR pageTablePhysical;
++#endif
++
++ gcmkHEADER_ARG("Os=0x%X Core=%d Physical=0x%X PageCount=%u PageTable=0x%X",
++ Os, Core, Physical, PageCount, PageTable);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Physical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(PageCount > 0);
++ gcmkVERIFY_ARGUMENT(PageTable != gcvNULL);
++
++ /* Convert pointer to MDL. */
++ mdl = (PLINUX_MDL)Physical;
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_OS,
++ "%s(%d): Physical->0x%X PageCount->0x%X PagedMemory->?%d",
++ __FUNCTION__, __LINE__,
++ (gctUINT32)(gctUINTPTR_T)Physical,
++ (gctUINT32)(gctUINTPTR_T)PageCount,
++ mdl->pagedMem
++ );
++
++ MEMORY_LOCK(Os);
++
++ table = (gctUINT32 *)PageTable;
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ mmu = Os->device->kernels[Core]->mmu;
++ bytes = PageCount * sizeof(*table);
++ mmuMdl = (PLINUX_MDL)mmu->pageTablePhysical;
++#endif
++
++ /* Get all the physical addresses and store them in the page table. */
++
++ offset = 0;
++
++ if (mdl->pagedMem)
++ {
++ /* Try to get the user pages so DMA can happen. */
++ while (PageCount-- > 0)
++ {
++#if gcdENABLE_VG
++ if (Core == gcvCORE_VG)
++ {
++ if (mdl->contiguous)
++ {
++ gcmkONERROR(
++ gckVGMMU_SetPage(Os->device->kernels[Core]->vg->mmu,
++ page_to_phys(nth_page(mdl->u.contiguousPages, offset)),
++ table));
++ }
++ else
++ {
++ gcmkONERROR(
++ gckVGMMU_SetPage(Os->device->kernels[Core]->vg->mmu,
++ _NonContiguousToPhys(mdl->u.nonContiguousPages, offset),
++ table));
++ }
++ }
++ else
++#endif
++ {
++ if (mdl->contiguous)
++ {
++ gcmkONERROR(
++ gckMMU_SetPage(Os->device->kernels[Core]->mmu,
++ page_to_phys(nth_page(mdl->u.contiguousPages, offset)),
++ table));
++ }
++ else
++ {
++ gcmkONERROR(
++ gckMMU_SetPage(Os->device->kernels[Core]->mmu,
++ _NonContiguousToPhys(mdl->u.nonContiguousPages, offset),
++ table));
++ }
++ }
++
++ table++;
++ offset += 1;
++ }
++ }
++ else
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_OS,
++ "%s(%d): we should not get this call for Non Paged Memory!",
++ __FUNCTION__, __LINE__
++ );
++
++ while (PageCount-- > 0)
++ {
++#if gcdENABLE_VG
++ if (Core == gcvCORE_VG)
++ {
++ gcmkONERROR(
++ gckVGMMU_SetPage(Os->device->kernels[Core]->vg->mmu,
++ page_to_phys(nth_page(mdl->u.contiguousPages, offset)),
++ table));
++ }
++ else
++#endif
++ {
++ gcmkONERROR(
++ gckMMU_SetPage(Os->device->kernels[Core]->mmu,
++ page_to_phys(nth_page(mdl->u.contiguousPages, offset)),
++ table));
++ }
++ table++;
++ offset += 1;
++ }
++ }
++
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ /* Get physical address of pageTable */
++ pageTablePhysical = (gctPHYS_ADDR)(mmuMdl->dmaHandle +
++ ((gctUINT32 *)PageTable - mmu->pageTableLogical));
++
++ /* Flush the mmu page table cache. */
++ gcmkONERROR(gckOS_CacheClean(
++ Os,
++ _GetProcessID(),
++ gcvNULL,
++ pageTablePhysical,
++ PageTable,
++ bytes
++ ));
++#endif
++
++OnError:
++
++ MEMORY_UNLOCK(Os);
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_UnlockPages
++**
++** Unlock memory allocated from the paged pool.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPHYS_ADDR Physical
++** Physical address of the allocation.
++**
++** gctSIZE_T Bytes
++** Number of bytes of the allocation.
++**
++** gctPOINTER Logical
++** Address of the mapped memory.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_UnlockPages(
++ IN gckOS Os,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Bytes,
++ IN gctPOINTER Logical
++ )
++{
++ PLINUX_MDL_MAP mdlMap;
++ PLINUX_MDL mdl = (PLINUX_MDL)Physical;
++
++ gcmkHEADER_ARG("Os=0x%X Physical=0x%X Bytes=%u Logical=0x%X",
++ Os, Physical, Bytes, Logical);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Physical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++
++ /* Make sure there is already a mapping...*/
++ gcmkVERIFY_ARGUMENT(mdl->u.nonContiguousPages != gcvNULL
++ || mdl->u.contiguousPages != gcvNULL);
++
++ MEMORY_LOCK(Os);
++
++ mdlMap = mdl->maps;
++
++ while (mdlMap != gcvNULL)
++ {
++ if ((mdlMap->vmaAddr != gcvNULL) && (_GetProcessID() == mdlMap->pid))
++ {
++ if (--mdlMap->count == 0)
++ {
++ _UnmapUserLogical(mdlMap->pid, mdlMap->vmaAddr, mdl->numPages * PAGE_SIZE);
++ mdlMap->vmaAddr = gcvNULL;
++ }
++ }
++
++ mdlMap = mdlMap->next;
++ }
++
++ MEMORY_UNLOCK(Os);
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++
++/*******************************************************************************
++**
++** gckOS_AllocateContiguous
++**
++** Allocate memory from the contiguous pool.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctBOOL InUserSpace
++** gcvTRUE if the pages need to be mapped into user space.
++**
++** gctSIZE_T * Bytes
++** Pointer to the number of bytes to allocate.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Bytes
++** Pointer to a variable that receives the number of bytes allocated.
++**
++** gctPHYS_ADDR * Physical
++** Pointer to a variable that receives the physical address of the
++** memory allocation.
++**
++** gctPOINTER * Logical
++** Pointer to a variable that receives the logical address of the
++** memory allocation.
++*/
++gceSTATUS
++gckOS_AllocateContiguous(
++ IN gckOS Os,
++ IN gctBOOL InUserSpace,
++ IN OUT gctSIZE_T * Bytes,
++ OUT gctPHYS_ADDR * Physical,
++ OUT gctPOINTER * Logical
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Os=0x%X InUserSpace=%d *Bytes=%lu",
++ Os, InUserSpace, gcmOPT_VALUE(Bytes));
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Bytes != gcvNULL);
++ gcmkVERIFY_ARGUMENT(*Bytes > 0);
++ gcmkVERIFY_ARGUMENT(Physical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++
++ /* Same as non-paged memory for now. */
++ gcmkONERROR(gckOS_AllocateNonPagedMemory(Os,
++ InUserSpace,
++ Bytes,
++ Physical,
++ Logical));
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Bytes=%lu *Physical=0x%X *Logical=0x%X",
++ *Bytes, *Physical, *Logical);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_FreeContiguous
++**
++** Free memory allocated from the contiguous pool.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPHYS_ADDR Physical
++** Physical address of the allocation.
++**
++** gctPOINTER Logical
++** Logicval address of the allocation.
++**
++** gctSIZE_T Bytes
++** Number of bytes of the allocation.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_FreeContiguous(
++ IN gckOS Os,
++ IN gctPHYS_ADDR Physical,
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Bytes
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Os=0x%X Physical=0x%X Logical=0x%X Bytes=%lu",
++ Os, Physical, Logical, Bytes);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Physical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++
++ /* Same of non-paged memory for now. */
++ gcmkONERROR(gckOS_FreeNonPagedMemory(Os, Bytes, Physical, Logical));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++#if gcdENABLE_VG
++/******************************************************************************
++**
++** gckOS_GetKernelLogical
++**
++** Return the kernel logical pointer that corresponods to the specified
++** hardware address.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctUINT32 Address
++** Hardware physical address.
++**
++** OUTPUT:
++**
++** gctPOINTER * KernelPointer
++** Pointer to a variable receiving the pointer in kernel address space.
++*/
++gceSTATUS
++gckOS_GetKernelLogical(
++ IN gckOS Os,
++ IN gctUINT32 Address,
++ OUT gctPOINTER * KernelPointer
++ )
++{
++ return gckOS_GetKernelLogicalEx(Os, gcvCORE_MAJOR, Address, KernelPointer);
++}
++
++gceSTATUS
++gckOS_GetKernelLogicalEx(
++ IN gckOS Os,
++ IN gceCORE Core,
++ IN gctUINT32 Address,
++ OUT gctPOINTER * KernelPointer
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Os=0x%X Core=%d Address=0x%08x", Os, Core, Address);
++
++ do
++ {
++ gckGALDEVICE device;
++ gckKERNEL kernel;
++ gcePOOL pool;
++ gctUINT32 offset;
++ gctPOINTER logical;
++
++ /* Extract the pointer to the gckGALDEVICE class. */
++ device = (gckGALDEVICE) Os->device;
++
++ /* Kernel shortcut. */
++ kernel = device->kernels[Core];
++#if gcdENABLE_VG
++ if (Core == gcvCORE_VG)
++ {
++ gcmkERR_BREAK(gckVGHARDWARE_SplitMemory(
++ kernel->vg->hardware, Address, &pool, &offset
++ ));
++ }
++ else
++#endif
++ {
++ /* Split the memory address into a pool type and offset. */
++ gcmkERR_BREAK(gckHARDWARE_SplitMemory(
++ kernel->hardware, Address, &pool, &offset
++ ));
++ }
++
++ /* Dispatch on pool. */
++ switch (pool)
++ {
++ case gcvPOOL_LOCAL_INTERNAL:
++ /* Internal memory. */
++ logical = device->internalLogical;
++ break;
++
++ case gcvPOOL_LOCAL_EXTERNAL:
++ /* External memory. */
++ logical = device->externalLogical;
++ break;
++
++ case gcvPOOL_SYSTEM:
++ /* System memory. */
++ logical = device->contiguousBase;
++ break;
++
++ default:
++ /* Invalid memory pool. */
++ gcmkFOOTER();
++ return gcvSTATUS_INVALID_ARGUMENT;
++ }
++
++ /* Build logical address of specified address. */
++ * KernelPointer = ((gctUINT8_PTR) logical) + offset;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*KernelPointer=0x%X", *KernelPointer);
++ return gcvSTATUS_OK;
++ }
++ while (gcvFALSE);
++
++ /* Return status. */
++ gcmkFOOTER();
++ return status;
++}
++#endif
++
++/*******************************************************************************
++**
++** gckOS_MapUserPointer
++**
++** Map a pointer from the user process into the kernel address space.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPOINTER Pointer
++** Pointer in user process space that needs to be mapped.
++**
++** gctSIZE_T Size
++** Number of bytes that need to be mapped.
++**
++** OUTPUT:
++**
++** gctPOINTER * KernelPointer
++** Pointer to a variable receiving the mapped pointer in kernel address
++** space.
++*/
++gceSTATUS
++gckOS_MapUserPointer(
++ IN gckOS Os,
++ IN gctPOINTER Pointer,
++ IN gctSIZE_T Size,
++ OUT gctPOINTER * KernelPointer
++ )
++{
++ gctPOINTER buf = gcvNULL;
++ gctUINT32 len;
++
++ gcmkHEADER_ARG("Os=0x%X Pointer=0x%X Size=%lu", Os, Pointer, Size);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Pointer != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Size > 0);
++ gcmkVERIFY_ARGUMENT(KernelPointer != gcvNULL);
++
++ buf = kmalloc(Size, GFP_KERNEL | gcdNOWARN);
++ if (buf == gcvNULL)
++ {
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s(%d): Failed to allocate memory.",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkFOOTER_ARG("*status=%d", gcvSTATUS_OUT_OF_MEMORY);
++ return gcvSTATUS_OUT_OF_MEMORY;
++ }
++
++ len = copy_from_user(buf, Pointer, Size);
++ if (len != 0)
++ {
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s(%d): Failed to copy data from user.",
++ __FUNCTION__, __LINE__
++ );
++
++ if (buf != gcvNULL)
++ {
++ kfree(buf);
++ }
++
++ gcmkFOOTER_ARG("*status=%d", gcvSTATUS_GENERIC_IO);
++ return gcvSTATUS_GENERIC_IO;
++ }
++
++ *KernelPointer = buf;
++
++ gcmkFOOTER_ARG("*KernelPointer=0x%X", *KernelPointer);
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_UnmapUserPointer
++**
++** Unmap a user process pointer from the kernel address space.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPOINTER Pointer
++** Pointer in user process space that needs to be unmapped.
++**
++** gctSIZE_T Size
++** Number of bytes that need to be unmapped.
++**
++** gctPOINTER KernelPointer
++** Pointer in kernel address space that needs to be unmapped.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_UnmapUserPointer(
++ IN gckOS Os,
++ IN gctPOINTER Pointer,
++ IN gctSIZE_T Size,
++ IN gctPOINTER KernelPointer
++ )
++{
++ gctUINT32 len;
++
++ gcmkHEADER_ARG("Os=0x%X Pointer=0x%X Size=%lu KernelPointer=0x%X",
++ Os, Pointer, Size, KernelPointer);
++
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Pointer != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Size > 0);
++ gcmkVERIFY_ARGUMENT(KernelPointer != gcvNULL);
++
++ len = copy_to_user(Pointer, KernelPointer, Size);
++
++ kfree(KernelPointer);
++
++ if (len != 0)
++ {
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s(%d): Failed to copy data to user.",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkFOOTER_ARG("status=%d", gcvSTATUS_GENERIC_IO);
++ return gcvSTATUS_GENERIC_IO;
++ }
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_QueryNeedCopy
++**
++** Query whether the memory can be accessed or mapped directly or it has to be
++** copied.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctUINT32 ProcessID
++** Process ID of the current process.
++**
++** OUTPUT:
++**
++** gctBOOL_PTR NeedCopy
++** Pointer to a boolean receiving gcvTRUE if the memory needs a copy or
++** gcvFALSE if the memory can be accessed or mapped dircetly.
++*/
++gceSTATUS
++gckOS_QueryNeedCopy(
++ IN gckOS Os,
++ IN gctUINT32 ProcessID,
++ OUT gctBOOL_PTR NeedCopy
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X ProcessID=%d", Os, ProcessID);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(NeedCopy != gcvNULL);
++
++ /* We need to copy data. */
++ *NeedCopy = gcvTRUE;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*NeedCopy=%d", *NeedCopy);
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_CopyFromUserData
++**
++** Copy data from user to kernel memory.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPOINTER KernelPointer
++** Pointer to kernel memory.
++**
++** gctPOINTER Pointer
++** Pointer to user memory.
++**
++** gctSIZE_T Size
++** Number of bytes to copy.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_CopyFromUserData(
++ IN gckOS Os,
++ IN gctPOINTER KernelPointer,
++ IN gctPOINTER Pointer,
++ IN gctSIZE_T Size
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Os=0x%X KernelPointer=0x%X Pointer=0x%X Size=%lu",
++ Os, KernelPointer, Pointer, Size);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(KernelPointer != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Pointer != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Size > 0);
++
++ /* Copy data from user. */
++ if (copy_from_user(KernelPointer, Pointer, Size) != 0)
++ {
++ /* Could not copy all the bytes. */
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_CopyToUserData
++**
++** Copy data from kernel to user memory.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPOINTER KernelPointer
++** Pointer to kernel memory.
++**
++** gctPOINTER Pointer
++** Pointer to user memory.
++**
++** gctSIZE_T Size
++** Number of bytes to copy.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_CopyToUserData(
++ IN gckOS Os,
++ IN gctPOINTER KernelPointer,
++ IN gctPOINTER Pointer,
++ IN gctSIZE_T Size
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Os=0x%X KernelPointer=0x%X Pointer=0x%X Size=%lu",
++ Os, KernelPointer, Pointer, Size);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(KernelPointer != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Pointer != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Size > 0);
++
++ /* Copy data to user. */
++ if (copy_to_user(Pointer, KernelPointer, Size) != 0)
++ {
++ /* Could not copy all the bytes. */
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_WriteMemory
++**
++** Write data to a memory.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPOINTER Address
++** Address of the memory to write to.
++**
++** gctUINT32 Data
++** Data for register.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_WriteMemory(
++ IN gckOS Os,
++ IN gctPOINTER Address,
++ IN gctUINT32 Data
++ )
++{
++ gceSTATUS status;
++ gcmkHEADER_ARG("Os=0x%X Address=0x%X Data=%u", Os, Address, Data);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_ARGUMENT(Address != gcvNULL);
++
++ /* Write memory. */
++ if (access_ok(VERIFY_WRITE, Address, 4))
++ {
++ /* User address. */
++ if(put_user(Data, (gctUINT32*)Address))
++ {
++ gcmkONERROR(gcvSTATUS_INVALID_ADDRESS);
++ }
++ }
++ else
++ {
++ /* Kernel address. */
++ *(gctUINT32 *)Address = Data;
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_MapUserMemory
++**
++** Lock down a user buffer and return an DMA'able address to be used by the
++** hardware to access it.
++**
++** INPUT:
++**
++** gctPOINTER Memory
++** Pointer to memory to lock down.
++**
++** gctSIZE_T Size
++** Size in bytes of the memory to lock down.
++**
++** OUTPUT:
++**
++** gctPOINTER * Info
++** Pointer to variable receiving the information record required by
++** gckOS_UnmapUserMemory.
++**
++** gctUINT32_PTR Address
++** Pointer to a variable that will receive the address DMA'able by the
++** hardware.
++*/
++gceSTATUS
++gckOS_MapUserMemory(
++ IN gckOS Os,
++ IN gceCORE Core,
++ IN gctPOINTER Memory,
++ IN gctUINT32 Physical,
++ IN gctSIZE_T Size,
++ OUT gctPOINTER * Info,
++ OUT gctUINT32_PTR Address
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Os=0x%x Core=%d Memory=0x%x Size=%lu", Os, Core, Memory, Size);
++
++#if gcdSECURE_USER
++ gcmkONERROR(gckOS_AddMapping(Os, *Address, Memory, Size));
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++#else
++{
++ gctSIZE_T pageCount, i, j;
++ gctUINT32_PTR pageTable;
++ gctUINT32 address = 0, physical = ~0U;
++ gctUINTPTR_T start, end, memory;
++ gctUINT32 offset;
++ gctINT result = 0;
++
++ gcsPageInfo_PTR info = gcvNULL;
++ struct page **pages = gcvNULL;
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Memory != gcvNULL || Physical != ~0U);
++ gcmkVERIFY_ARGUMENT(Size > 0);
++ gcmkVERIFY_ARGUMENT(Info != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Address != gcvNULL);
++
++ do
++ {
++ memory = (gctUINTPTR_T) Memory;
++
++ /* Get the number of required pages. */
++ end = (memory + Size + PAGE_SIZE - 1) >> PAGE_SHIFT;
++ start = memory >> PAGE_SHIFT;
++ pageCount = end - start;
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_OS,
++ "%s(%d): pageCount: %d.",
++ __FUNCTION__, __LINE__,
++ pageCount
++ );
++
++ /* Overflow. */
++ if ((memory + Size) < memory)
++ {
++ gcmkFOOTER_ARG("status=%d", gcvSTATUS_INVALID_ARGUMENT);
++ return gcvSTATUS_INVALID_ARGUMENT;
++ }
++
++ MEMORY_MAP_LOCK(Os);
++
++ /* Allocate the Info struct. */
++ info = (gcsPageInfo_PTR)kmalloc(sizeof(gcsPageInfo), GFP_KERNEL | gcdNOWARN);
++
++ if (info == gcvNULL)
++ {
++ status = gcvSTATUS_OUT_OF_MEMORY;
++ break;
++ }
++
++ /* Allocate the array of page addresses. */
++ pages = (struct page **)kmalloc(pageCount * sizeof(struct page *), GFP_KERNEL | gcdNOWARN);
++
++ if (pages == gcvNULL)
++ {
++ status = gcvSTATUS_OUT_OF_MEMORY;
++ break;
++ }
++
++ if (Physical != ~0U)
++ {
++ for (i = 0; i < pageCount; i++)
++ {
++ pages[i] = pfn_to_page((Physical >> PAGE_SHIFT) + i);
++ get_page(pages[i]);
++ }
++ }
++ else
++ {
++ /* Get the user pages. */
++ down_read(&current->mm->mmap_sem);
++
++ result = get_user_pages(current,
++ current->mm,
++ memory & PAGE_MASK,
++ pageCount,
++ 1,
++ 0,
++ pages,
++ gcvNULL
++ );
++
++ up_read(&current->mm->mmap_sem);
++
++ if (result <=0 || result < pageCount)
++ {
++ struct vm_area_struct *vma;
++
++ /* Release the pages if any. */
++ if (result > 0)
++ {
++ for (i = 0; i < result; i++)
++ {
++ if (pages[i] == gcvNULL)
++ {
++ break;
++ }
++
++ page_cache_release(pages[i]);
++ pages[i] = gcvNULL;
++ }
++
++ result = 0;
++ }
++
++ vma = find_vma(current->mm, memory);
++
++ if (vma && (vma->vm_flags & VM_PFNMAP))
++ {
++ pte_t * pte;
++ spinlock_t * ptl;
++ gctUINTPTR_T logical = memory;
++
++ for (i = 0; i < pageCount; i++)
++ {
++ pgd_t * pgd = pgd_offset(current->mm, logical);
++ pud_t * pud = pud_offset(pgd, logical);
++
++ if (pud)
++ {
++ pmd_t * pmd = pmd_offset(pud, logical);
++ pte = pte_offset_map_lock(current->mm, pmd, logical, &ptl);
++ if (!pte)
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++ }
++ else
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++
++ pages[i] = pte_page(*pte);
++ pte_unmap_unlock(pte, ptl);
++
++ /* Advance to next. */
++ logical += PAGE_SIZE;
++ }
++ }
++ else
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++
++ /* Check if this memory is contiguous for old mmu. */
++ if (Os->device->kernels[Core]->hardware->mmuVersion == 0)
++ {
++ for (i = 1; i < pageCount; i++)
++ {
++ if (pages[i] != nth_page(pages[0], i))
++ {
++ /* Non-contiguous. */
++ break;
++ }
++ }
++
++ if (i == pageCount)
++ {
++ /* Contiguous memory. */
++ physical = page_to_phys(pages[0]) | (memory & ~PAGE_MASK);
++
++ if (!((physical - Os->device->baseAddress) & 0x80000000))
++ {
++ kfree(pages);
++ pages = gcvNULL;
++
++ info->pages = gcvNULL;
++ info->pageTable = gcvNULL;
++
++ MEMORY_MAP_UNLOCK(Os);
++
++ *Address = physical - Os->device->baseAddress;
++ *Info = info;
++
++ gcmkFOOTER_ARG("*Info=0x%X *Address=0x%08x",
++ *Info, *Address);
++
++ return gcvSTATUS_OK;
++ }
++ }
++ }
++
++ /* Reference pages. */
++ for (i = 0; i < pageCount; i++)
++ {
++ get_page(pages[i]);
++ }
++ }
++ }
++
++ for (i = 0; i < pageCount; i++)
++ {
++#ifdef CONFIG_ARM
++ gctUINT32 data;
++ get_user(data, (gctUINT32*)((memory & PAGE_MASK) + i * PAGE_SIZE));
++#endif
++
++ /* Flush(clean) the data cache. */
++ gcmkONERROR(gckOS_CacheFlush(Os, _GetProcessID(), gcvNULL,
++ (gctPOINTER)(gctUINTPTR_T)page_to_phys(pages[i]),
++ (gctPOINTER)(memory & PAGE_MASK) + i*PAGE_SIZE,
++ PAGE_SIZE));
++ }
++
++#if gcdENABLE_VG
++ if (Core == gcvCORE_VG)
++ {
++ /* Allocate pages inside the page table. */
++ gcmkERR_BREAK(gckVGMMU_AllocatePages(Os->device->kernels[Core]->vg->mmu,
++ pageCount * (PAGE_SIZE/4096),
++ (gctPOINTER *) &pageTable,
++ &address));
++ }
++ else
++#endif
++ {
++ /* Allocate pages inside the page table. */
++ gcmkERR_BREAK(gckMMU_AllocatePages(Os->device->kernels[Core]->mmu,
++ pageCount * (PAGE_SIZE/4096),
++ (gctPOINTER *) &pageTable,
++ &address));
++ }
++
++ /* Fill the page table. */
++ for (i = 0; i < pageCount; i++)
++ {
++ gctUINT32 phys;
++ gctUINT32_PTR tab = pageTable + i * (PAGE_SIZE/4096);
++
++ phys = page_to_phys(pages[i]);
++
++#if gcdENABLE_VG
++ if (Core == gcvCORE_VG)
++ {
++ /* Get the physical address from page struct. */
++ gcmkONERROR(
++ gckVGMMU_SetPage(Os->device->kernels[Core]->vg->mmu,
++ phys,
++ tab));
++ }
++ else
++#endif
++ {
++ /* Get the physical address from page struct. */
++ gcmkONERROR(
++ gckMMU_SetPage(Os->device->kernels[Core]->mmu,
++ phys,
++ tab));
++ }
++
++ for (j = 1; j < (PAGE_SIZE/4096); j++)
++ {
++ pageTable[i * (PAGE_SIZE/4096) + j] = pageTable[i * (PAGE_SIZE/4096)] + 4096 * j;
++ }
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_OS,
++ "%s(%d): pageTable[%d]: 0x%X 0x%X.",
++ __FUNCTION__, __LINE__,
++ i, phys, pageTable[i]);
++ }
++
++#if gcdENABLE_VG
++ if (Core == gcvCORE_VG)
++ {
++ gcmkONERROR(gckVGMMU_Flush(Os->device->kernels[Core]->vg->mmu));
++ }
++ else
++#endif
++ {
++ gcmkONERROR(gckMMU_Flush(Os->device->kernels[Core]->mmu));
++ }
++
++ /* Save pointer to page table. */
++ info->pageTable = pageTable;
++ info->pages = pages;
++
++ *Info = (gctPOINTER) info;
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_OS,
++ "%s(%d): info->pages: 0x%X, info->pageTable: 0x%X, info: 0x%X.",
++ __FUNCTION__, __LINE__,
++ info->pages,
++ info->pageTable,
++ info
++ );
++
++ offset = (Physical != ~0U)
++ ? (Physical & ~PAGE_MASK)
++ : (memory & ~PAGE_MASK);
++
++ /* Return address. */
++ *Address = address + offset;
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_OS,
++ "%s(%d): Address: 0x%X.",
++ __FUNCTION__, __LINE__,
++ *Address
++ );
++
++ /* Success. */
++ status = gcvSTATUS_OK;
++ }
++ while (gcvFALSE);
++
++OnError:
++
++ if (gcmIS_ERROR(status))
++ {
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s(%d): error occured: %d.",
++ __FUNCTION__, __LINE__,
++ status
++ );
++
++ /* Release page array. */
++ if (result > 0 && pages != gcvNULL)
++ {
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s(%d): error: page table is freed.",
++ __FUNCTION__, __LINE__
++ );
++
++ for (i = 0; i < result; i++)
++ {
++ if (pages[i] == gcvNULL)
++ {
++ break;
++ }
++ page_cache_release(pages[i]);
++ }
++ }
++
++ if (info!= gcvNULL && pages != gcvNULL)
++ {
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s(%d): error: pages is freed.",
++ __FUNCTION__, __LINE__
++ );
++
++ /* Free the page table. */
++ kfree(pages);
++ info->pages = gcvNULL;
++ }
++
++ /* Release page info struct. */
++ if (info != gcvNULL)
++ {
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s(%d): error: info is freed.",
++ __FUNCTION__, __LINE__
++ );
++
++ /* Free the page info struct. */
++ kfree(info);
++ *Info = gcvNULL;
++ }
++ }
++
++ MEMORY_MAP_UNLOCK(Os);
++
++ /* Return the status. */
++ if (gcmIS_SUCCESS(status))
++ {
++ gcmkFOOTER_ARG("*Info=0x%X *Address=0x%08x", *Info, *Address);
++ }
++ else
++ {
++ gcmkFOOTER();
++ }
++
++ return status;
++}
++#endif
++}
++
++/*******************************************************************************
++**
++** gckOS_UnmapUserMemory
++**
++** Unlock a user buffer and that was previously locked down by
++** gckOS_MapUserMemory.
++**
++** INPUT:
++**
++** gctPOINTER Memory
++** Pointer to memory to unlock.
++**
++** gctSIZE_T Size
++** Size in bytes of the memory to unlock.
++**
++** gctPOINTER Info
++** Information record returned by gckOS_MapUserMemory.
++**
++** gctUINT32_PTR Address
++** The address returned by gckOS_MapUserMemory.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_UnmapUserMemory(
++ IN gckOS Os,
++ IN gceCORE Core,
++ IN gctPOINTER Memory,
++ IN gctSIZE_T Size,
++ IN gctPOINTER Info,
++ IN gctUINT32 Address
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Os=0x%X Core=%d Memory=0x%X Size=%lu Info=0x%X Address0x%08x",
++ Os, Core, Memory, Size, Info, Address);
++
++#if gcdSECURE_USER
++ gcmkONERROR(gckOS_RemoveMapping(Os, Memory, Size));
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++#else
++{
++ gctUINTPTR_T memory, start, end;
++ gcsPageInfo_PTR info;
++ gctSIZE_T pageCount, i;
++ struct page **pages;
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Memory != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Size > 0);
++ gcmkVERIFY_ARGUMENT(Info != gcvNULL);
++
++ do
++ {
++ info = (gcsPageInfo_PTR) Info;
++
++ pages = info->pages;
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_OS,
++ "%s(%d): info=0x%X, pages=0x%X.",
++ __FUNCTION__, __LINE__,
++ info, pages
++ );
++
++ /* Invalid page array. */
++ if (pages == gcvNULL && info->pageTable == gcvNULL)
++ {
++ kfree(info);
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++
++ memory = (gctUINTPTR_T)Memory;
++ end = (memory + Size + PAGE_SIZE - 1) >> PAGE_SHIFT;
++ start = memory >> PAGE_SHIFT;
++ pageCount = end - start;
++
++ /* Overflow. */
++ if ((memory + Size) < memory)
++ {
++ gcmkFOOTER_ARG("status=%d", gcvSTATUS_INVALID_ARGUMENT);
++ return gcvSTATUS_INVALID_ARGUMENT;
++ }
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_OS,
++ "%s(%d): memory: 0x%X, pageCount: %d, pageTable: 0x%X.",
++ __FUNCTION__, __LINE__,
++ memory, pageCount, info->pageTable
++ );
++
++ MEMORY_MAP_LOCK(Os);
++
++ gcmkASSERT(info->pageTable != gcvNULL);
++
++#if gcdENABLE_VG
++ if (Core == gcvCORE_VG)
++ {
++ /* Free the pages from the MMU. */
++ gcmkERR_BREAK(gckVGMMU_FreePages(Os->device->kernels[Core]->vg->mmu,
++ info->pageTable,
++ pageCount * (PAGE_SIZE/4096)
++ ));
++ }
++ else
++#endif
++ {
++ /* Free the pages from the MMU. */
++ gcmkERR_BREAK(gckMMU_FreePages(Os->device->kernels[Core]->mmu,
++ info->pageTable,
++ pageCount * (PAGE_SIZE/4096)
++ ));
++ }
++
++ /* Release the page cache. */
++ if (pages)
++ {
++ for (i = 0; i < pageCount; i++)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_OS,
++ "%s(%d): pages[%d]: 0x%X.",
++ __FUNCTION__, __LINE__,
++ i, pages[i]
++ );
++
++ if (!PageReserved(pages[i]))
++ {
++ SetPageDirty(pages[i]);
++ }
++
++ page_cache_release(pages[i]);
++ }
++ }
++
++ /* Success. */
++ status = gcvSTATUS_OK;
++ }
++ while (gcvFALSE);
++
++ if (info != gcvNULL)
++ {
++ /* Free the page array. */
++ if (info->pages != gcvNULL)
++ {
++ kfree(info->pages);
++ }
++
++ kfree(info);
++ }
++
++ MEMORY_MAP_UNLOCK(Os);
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++#endif
++}
++
++/*******************************************************************************
++**
++** gckOS_GetBaseAddress
++**
++** Get the base address for the physical memory.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to the gckOS object.
++**
++** OUTPUT:
++**
++** gctUINT32_PTR BaseAddress
++** Pointer to a variable that will receive the base address.
++*/
++gceSTATUS
++gckOS_GetBaseAddress(
++ IN gckOS Os,
++ OUT gctUINT32_PTR BaseAddress
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X", Os);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(BaseAddress != gcvNULL);
++
++ /* Return base address. */
++ *BaseAddress = Os->device->baseAddress;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*BaseAddress=0x%08x", *BaseAddress);
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckOS_SuspendInterrupt(
++ IN gckOS Os
++ )
++{
++ return gckOS_SuspendInterruptEx(Os, gcvCORE_MAJOR);
++}
++
++gceSTATUS
++gckOS_SuspendInterruptEx(
++ IN gckOS Os,
++ IN gceCORE Core
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X Core=%d", Os, Core);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++
++ disable_irq(Os->device->irqLines[Core]);
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckOS_ResumeInterrupt(
++ IN gckOS Os
++ )
++{
++ return gckOS_ResumeInterruptEx(Os, gcvCORE_MAJOR);
++}
++
++gceSTATUS
++gckOS_ResumeInterruptEx(
++ IN gckOS Os,
++ IN gceCORE Core
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X Core=%d", Os, Core);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++
++ enable_irq(Os->device->irqLines[Core]);
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckOS_MemCopy(
++ IN gctPOINTER Destination,
++ IN gctCONST_POINTER Source,
++ IN gctSIZE_T Bytes
++ )
++{
++ gcmkHEADER_ARG("Destination=0x%X Source=0x%X Bytes=%lu",
++ Destination, Source, Bytes);
++
++ gcmkVERIFY_ARGUMENT(Destination != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Source != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++
++ memcpy(Destination, Source, Bytes);
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckOS_ZeroMemory(
++ IN gctPOINTER Memory,
++ IN gctSIZE_T Bytes
++ )
++{
++ gcmkHEADER_ARG("Memory=0x%X Bytes=%lu", Memory, Bytes);
++
++ gcmkVERIFY_ARGUMENT(Memory != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++
++ memset(Memory, 0, Bytes);
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++********************************* Cache Control ********************************
++*******************************************************************************/
++
++#if !gcdCACHE_FUNCTION_UNIMPLEMENTED && defined(CONFIG_OUTER_CACHE)
++static inline gceSTATUS
++outer_func(
++ gceCACHEOPERATION Type,
++ unsigned long Start,
++ unsigned long End
++ )
++{
++ switch (Type)
++ {
++ case gcvCACHE_CLEAN:
++ outer_clean_range(Start, End);
++ break;
++ case gcvCACHE_INVALIDATE:
++ outer_inv_range(Start, End);
++ break;
++ case gcvCACHE_FLUSH:
++ outer_flush_range(Start, End);
++ break;
++ default:
++ return gcvSTATUS_INVALID_ARGUMENT;
++ break;
++ }
++ return gcvSTATUS_OK;
++}
++
++#if gcdENABLE_OUTER_CACHE_PATCH
++/*******************************************************************************
++** _HandleOuterCache
++**
++** Handle the outer cache for the specified addresses.
++**
++** ARGUMENTS:
++**
++** gckOS Os
++** Pointer to gckOS object.
++**
++** gctUINT32 ProcessID
++** Process ID Logical belongs.
++**
++** gctPHYS_ADDR Handle
++** Physical address handle. If gcvNULL it is video memory.
++**
++** gctPOINTER Physical
++** Physical address to flush.
++**
++** gctPOINTER Logical
++** Logical address to flush.
++**
++** gctSIZE_T Bytes
++** Size of the address range in bytes to flush.
++**
++** gceOUTERCACHE_OPERATION Type
++** Operation need to be execute.
++*/
++static gceSTATUS
++_HandleOuterCache(
++ IN gckOS Os,
++ IN gctUINT32 ProcessID,
++ IN gctPHYS_ADDR Handle,
++ IN gctPOINTER Physical,
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Bytes,
++ IN gceCACHEOPERATION Type
++ )
++{
++ gceSTATUS status;
++ gctUINT32 i, pageNum;
++ unsigned long paddr;
++ gctPOINTER vaddr;
++
++ gcmkHEADER_ARG("Os=0x%X ProcessID=%d Handle=0x%X Logical=0x%X Bytes=%lu",
++ Os, ProcessID, Handle, Logical, Bytes);
++
++ if (Physical != gcvNULL)
++ {
++ /* Non paged memory or gcvPOOL_USER surface */
++ paddr = (unsigned long) Physical;
++ gcmkONERROR(outer_func(Type, paddr, paddr + Bytes));
++ }
++ else if ((Handle == gcvNULL)
++ || (Handle != gcvNULL && ((PLINUX_MDL)Handle)->contiguous)
++ )
++ {
++ /* Video Memory or contiguous virtual memory */
++ gcmkONERROR(gckOS_GetPhysicalAddress(Os, Logical, (gctUINT32*)&paddr));
++ gcmkONERROR(outer_func(Type, paddr, paddr + Bytes));
++ }
++ else
++ {
++ /* Non contiguous virtual memory */
++ vaddr = (gctPOINTER)gcmALIGN_BASE((gctUINTPTR_T)Logical, PAGE_SIZE);
++ pageNum = GetPageCount(Bytes, 0);
++
++ for (i = 0; i < pageNum; i += 1)
++ {
++ gcmkONERROR(_ConvertLogical2Physical(
++ Os,
++ vaddr + PAGE_SIZE * i,
++ ProcessID,
++ (PLINUX_MDL)Handle,
++ (gctUINT32*)&paddr
++ ));
++
++ gcmkONERROR(outer_func(Type, paddr, paddr + PAGE_SIZE));
++ }
++ }
++
++ mb();
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++#endif
++#endif
++
++/*******************************************************************************
++** gckOS_CacheClean
++**
++** Clean the cache for the specified addresses. The GPU is going to need the
++** data. If the system is allocating memory as non-cachable, this function can
++** be ignored.
++**
++** ARGUMENTS:
++**
++** gckOS Os
++** Pointer to gckOS object.
++**
++** gctUINT32 ProcessID
++** Process ID Logical belongs.
++**
++** gctPHYS_ADDR Handle
++** Physical address handle. If gcvNULL it is video memory.
++**
++** gctPOINTER Physical
++** Physical address to flush.
++**
++** gctPOINTER Logical
++** Logical address to flush.
++**
++** gctSIZE_T Bytes
++** Size of the address range in bytes to flush.
++*/
++gceSTATUS
++gckOS_CacheClean(
++ IN gckOS Os,
++ IN gctUINT32 ProcessID,
++ IN gctPHYS_ADDR Handle,
++ IN gctPOINTER Physical,
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Bytes
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X ProcessID=%d Handle=0x%X Logical=0x%X Bytes=%lu",
++ Os, ProcessID, Handle, Logical, Bytes);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++
++#if !gcdCACHE_FUNCTION_UNIMPLEMENTED
++#ifdef CONFIG_ARM
++
++ /* Inner cache. */
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)
++ dmac_map_area(Logical, Bytes, DMA_TO_DEVICE);
++# else
++ dmac_clean_range(Logical, Logical + Bytes);
++# endif
++
++#if defined(CONFIG_OUTER_CACHE)
++ /* Outer cache. */
++#if gcdENABLE_OUTER_CACHE_PATCH
++ _HandleOuterCache(Os, ProcessID, Handle, Physical, Logical, Bytes, gcvCACHE_CLEAN);
++#else
++ outer_clean_range((unsigned long) Handle, (unsigned long) Handle + Bytes);
++#endif
++#endif
++
++#elif defined(CONFIG_MIPS)
++
++ dma_cache_wback((unsigned long) Logical, Bytes);
++
++#elif defined(CONFIG_PPC)
++
++ /* TODO */
++
++#else
++ dma_sync_single_for_device(
++ gcvNULL,
++ (dma_addr_t)Physical,
++ Bytes,
++ DMA_TO_DEVICE);
++#endif
++#endif
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++** gckOS_CacheInvalidate
++**
++** Invalidate the cache for the specified addresses. The GPU is going to need
++** data. If the system is allocating memory as non-cachable, this function can
++** be ignored.
++**
++** ARGUMENTS:
++**
++** gckOS Os
++** Pointer to gckOS object.
++**
++** gctUINT32 ProcessID
++** Process ID Logical belongs.
++**
++** gctPHYS_ADDR Handle
++** Physical address handle. If gcvNULL it is video memory.
++**
++** gctPOINTER Logical
++** Logical address to flush.
++**
++** gctSIZE_T Bytes
++** Size of the address range in bytes to flush.
++*/
++gceSTATUS
++gckOS_CacheInvalidate(
++ IN gckOS Os,
++ IN gctUINT32 ProcessID,
++ IN gctPHYS_ADDR Handle,
++ IN gctPOINTER Physical,
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Bytes
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X ProcessID=%d Handle=0x%X Logical=0x%X Bytes=%lu",
++ Os, ProcessID, Handle, Logical, Bytes);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++
++#if !gcdCACHE_FUNCTION_UNIMPLEMENTED
++#ifdef CONFIG_ARM
++
++ /* Inner cache. */
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)
++ dmac_map_area(Logical, Bytes, DMA_FROM_DEVICE);
++# else
++ dmac_inv_range(Logical, Logical + Bytes);
++# endif
++
++#if defined(CONFIG_OUTER_CACHE)
++ /* Outer cache. */
++#if gcdENABLE_OUTER_CACHE_PATCH
++ _HandleOuterCache(Os, ProcessID, Handle, Physical, Logical, Bytes, gcvCACHE_INVALIDATE);
++#else
++ outer_inv_range((unsigned long) Handle, (unsigned long) Handle + Bytes);
++#endif
++#endif
++
++#elif defined(CONFIG_MIPS)
++ dma_cache_inv((unsigned long) Logical, Bytes);
++#elif defined(CONFIG_PPC)
++ /* TODO */
++#else
++ dma_sync_single_for_device(
++ gcvNULL,
++ (dma_addr_t)Physical,
++ Bytes,
++ DMA_FROM_DEVICE);
++#endif
++#endif
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++** gckOS_CacheFlush
++**
++** Clean the cache for the specified addresses and invalidate the lines as
++** well. The GPU is going to need and modify the data. If the system is
++** allocating memory as non-cachable, this function can be ignored.
++**
++** ARGUMENTS:
++**
++** gckOS Os
++** Pointer to gckOS object.
++**
++** gctUINT32 ProcessID
++** Process ID Logical belongs.
++**
++** gctPHYS_ADDR Handle
++** Physical address handle. If gcvNULL it is video memory.
++**
++** gctPOINTER Logical
++** Logical address to flush.
++**
++** gctSIZE_T Bytes
++** Size of the address range in bytes to flush.
++*/
++gceSTATUS
++gckOS_CacheFlush(
++ IN gckOS Os,
++ IN gctUINT32 ProcessID,
++ IN gctPHYS_ADDR Handle,
++ IN gctPOINTER Physical,
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Bytes
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X ProcessID=%d Handle=0x%X Logical=0x%X Bytes=%lu",
++ Os, ProcessID, Handle, Logical, Bytes);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++
++#if !gcdCACHE_FUNCTION_UNIMPLEMENTED
++#ifdef CONFIG_ARM
++ /* Inner cache. */
++ dmac_flush_range(Logical, Logical + Bytes);
++
++#if defined(CONFIG_OUTER_CACHE)
++ /* Outer cache. */
++#if gcdENABLE_OUTER_CACHE_PATCH
++ _HandleOuterCache(Os, ProcessID, Handle, Physical, Logical, Bytes, gcvCACHE_FLUSH);
++#else
++ outer_flush_range((unsigned long) Handle, (unsigned long) Handle + Bytes);
++#endif
++#endif
++
++#elif defined(CONFIG_MIPS)
++ dma_cache_wback_inv((unsigned long) Logical, Bytes);
++#elif defined(CONFIG_PPC)
++ /* TODO */
++#else
++ dma_sync_single_for_device(
++ gcvNULL,
++ (dma_addr_t)Physical,
++ Bytes,
++ DMA_BIDIRECTIONAL);
++#endif
++#endif
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++********************************* Broadcasting *********************************
++*******************************************************************************/
++
++/*******************************************************************************
++**
++** gckOS_Broadcast
++**
++** System hook for broadcast events from the kernel driver.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to the gckOS object.
++**
++** gckHARDWARE Hardware
++** Pointer to the gckHARDWARE object.
++**
++** gceBROADCAST Reason
++** Reason for the broadcast. Can be one of the following values:
++**
++** gcvBROADCAST_GPU_IDLE
++** Broadcasted when the kernel driver thinks the GPU might be
++** idle. This can be used to handle power management.
++**
++** gcvBROADCAST_GPU_COMMIT
++** Broadcasted when any client process commits a command
++** buffer. This can be used to handle power management.
++**
++** gcvBROADCAST_GPU_STUCK
++** Broadcasted when the kernel driver hits the timeout waiting
++** for the GPU.
++**
++** gcvBROADCAST_FIRST_PROCESS
++** First process is trying to connect to the kernel.
++**
++** gcvBROADCAST_LAST_PROCESS
++** Last process has detached from the kernel.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_Broadcast(
++ IN gckOS Os,
++ IN gckHARDWARE Hardware,
++ IN gceBROADCAST Reason
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Os=0x%X Hardware=0x%X Reason=%d", Os, Hardware, Reason);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ switch (Reason)
++ {
++ case gcvBROADCAST_FIRST_PROCESS:
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_OS, "First process has attached");
++ break;
++
++ case gcvBROADCAST_LAST_PROCESS:
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_OS, "Last process has detached");
++
++ /* Put GPU OFF. */
++ gcmkONERROR(
++ gckHARDWARE_SetPowerManagementState(Hardware,
++ gcvPOWER_OFF_BROADCAST));
++ break;
++
++ case gcvBROADCAST_GPU_IDLE:
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_OS, "GPU idle.");
++
++ /* Put GPU IDLE. */
++ gcmkONERROR(
++ gckHARDWARE_SetPowerManagementState(Hardware,
++#if gcdPOWER_SUSNPEND_WHEN_IDLE
++ gcvPOWER_SUSPEND_BROADCAST));
++#else
++ gcvPOWER_IDLE_BROADCAST));
++#endif
++
++ /* Add idle process DB. */
++ gcmkONERROR(gckKERNEL_AddProcessDB(Hardware->kernel,
++ 1,
++ gcvDB_IDLE,
++ gcvNULL, gcvNULL, 0));
++ break;
++
++ case gcvBROADCAST_GPU_COMMIT:
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_OS, "COMMIT has arrived.");
++
++ /* Add busy process DB. */
++ gcmkONERROR(gckKERNEL_AddProcessDB(Hardware->kernel,
++ 0,
++ gcvDB_IDLE,
++ gcvNULL, gcvNULL, 0));
++
++ /* Put GPU ON. */
++ gcmkONERROR(
++ gckHARDWARE_SetPowerManagementState(Hardware, gcvPOWER_ON_AUTO));
++ break;
++
++ case gcvBROADCAST_GPU_STUCK:
++ gcmkTRACE_N(gcvLEVEL_ERROR, 0, "gcvBROADCAST_GPU_STUCK\n");
++#if !gcdENABLE_RECOVERY
++ gcmkONERROR(gckHARDWARE_DumpGPUState(Hardware));
++#endif
++ gcmkONERROR(gckKERNEL_Recovery(Hardware->kernel));
++ break;
++
++ case gcvBROADCAST_AXI_BUS_ERROR:
++ gcmkTRACE_N(gcvLEVEL_ERROR, 0, "gcvBROADCAST_AXI_BUS_ERROR\n");
++ gcmkONERROR(gckHARDWARE_DumpGPUState(Hardware));
++ gcmkONERROR(gckKERNEL_Recovery(Hardware->kernel));
++ break;
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_BroadcastHurry
++**
++** The GPU is running too slow.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to the gckOS object.
++**
++** gckHARDWARE Hardware
++** Pointer to the gckHARDWARE object.
++**
++** gctUINT Urgency
++** The higher the number, the higher the urgency to speed up the GPU.
++** The maximum value is defined by the gcdDYNAMIC_EVENT_THRESHOLD.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_BroadcastHurry(
++ IN gckOS Os,
++ IN gckHARDWARE Hardware,
++ IN gctUINT Urgency
++ )
++{
++ gcmkHEADER_ARG("Os=0x%x Hardware=0x%x Urgency=%u", Os, Hardware, Urgency);
++
++ /* Do whatever you need to do to speed up the GPU now. */
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_BroadcastCalibrateSpeed
++**
++** Calibrate the speed of the GPU.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to the gckOS object.
++**
++** gckHARDWARE Hardware
++** Pointer to the gckHARDWARE object.
++**
++** gctUINT Idle, Time
++** Idle/Time will give the percentage the GPU is idle, so you can use
++** this to calibrate the working point of the GPU.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_BroadcastCalibrateSpeed(
++ IN gckOS Os,
++ IN gckHARDWARE Hardware,
++ IN gctUINT Idle,
++ IN gctUINT Time
++ )
++{
++ gcmkHEADER_ARG("Os=0x%x Hardware=0x%x Idle=%u Time=%u",
++ Os, Hardware, Idle, Time);
++
++ /* Do whatever you need to do to callibrate the GPU speed. */
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++********************************** Semaphores **********************************
++*******************************************************************************/
++
++/*******************************************************************************
++**
++** gckOS_CreateSemaphore
++**
++** Create a semaphore.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to the gckOS object.
++**
++** OUTPUT:
++**
++** gctPOINTER * Semaphore
++** Pointer to the variable that will receive the created semaphore.
++*/
++gceSTATUS
++gckOS_CreateSemaphore(
++ IN gckOS Os,
++ OUT gctPOINTER * Semaphore
++ )
++{
++ gceSTATUS status;
++ struct semaphore *sem = gcvNULL;
++
++ gcmkHEADER_ARG("Os=0x%X", Os);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Semaphore != gcvNULL);
++
++ /* Allocate the semaphore structure. */
++ sem = (struct semaphore *)kmalloc(gcmSIZEOF(struct semaphore), GFP_KERNEL | gcdNOWARN);
++ if (sem == gcvNULL)
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ /* Initialize the semaphore. */
++ sema_init(sem, 1);
++
++ /* Return to caller. */
++ *Semaphore = (gctPOINTER) sem;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_AcquireSemaphore
++**
++** Acquire a semaphore.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to the gckOS object.
++**
++** gctPOINTER Semaphore
++** Pointer to the semaphore thet needs to be acquired.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_AcquireSemaphore(
++ IN gckOS Os,
++ IN gctPOINTER Semaphore
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Os=0x%08X Semaphore=0x%08X", Os, Semaphore);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Semaphore != gcvNULL);
++
++ /* Acquire the semaphore. */
++ if (down_interruptible((struct semaphore *) Semaphore))
++ {
++ gcmkONERROR(gcvSTATUS_INTERRUPTED);
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_TryAcquireSemaphore
++**
++** Try to acquire a semaphore.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to the gckOS object.
++**
++** gctPOINTER Semaphore
++** Pointer to the semaphore thet needs to be acquired.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_TryAcquireSemaphore(
++ IN gckOS Os,
++ IN gctPOINTER Semaphore
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Os=0x%x", Os);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Semaphore != gcvNULL);
++
++ /* Acquire the semaphore. */
++ if (down_trylock((struct semaphore *) Semaphore))
++ {
++ /* Timeout. */
++ status = gcvSTATUS_TIMEOUT;
++ gcmkFOOTER();
++ return status;
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_ReleaseSemaphore
++**
++** Release a previously acquired semaphore.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to the gckOS object.
++**
++** gctPOINTER Semaphore
++** Pointer to the semaphore thet needs to be released.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_ReleaseSemaphore(
++ IN gckOS Os,
++ IN gctPOINTER Semaphore
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X Semaphore=0x%X", Os, Semaphore);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Semaphore != gcvNULL);
++
++ /* Release the semaphore. */
++ up((struct semaphore *) Semaphore);
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_DestroySemaphore
++**
++** Destroy a semaphore.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to the gckOS object.
++**
++** gctPOINTER Semaphore
++** Pointer to the semaphore thet needs to be destroyed.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_DestroySemaphore(
++ IN gckOS Os,
++ IN gctPOINTER Semaphore
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X Semaphore=0x%X", Os, Semaphore);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Semaphore != gcvNULL);
++
++ /* Free the sempahore structure. */
++ kfree(Semaphore);
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_GetProcessID
++**
++** Get current process ID.
++**
++** INPUT:
++**
++** Nothing.
++**
++** OUTPUT:
++**
++** gctUINT32_PTR ProcessID
++** Pointer to the variable that receives the process ID.
++*/
++gceSTATUS
++gckOS_GetProcessID(
++ OUT gctUINT32_PTR ProcessID
++ )
++{
++ /* Get process ID. */
++ if (ProcessID != gcvNULL)
++ {
++ *ProcessID = _GetProcessID();
++ }
++
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_GetThreadID
++**
++** Get current thread ID.
++**
++** INPUT:
++**
++** Nothing.
++**
++** OUTPUT:
++**
++** gctUINT32_PTR ThreadID
++** Pointer to the variable that receives the thread ID.
++*/
++gceSTATUS
++gckOS_GetThreadID(
++ OUT gctUINT32_PTR ThreadID
++ )
++{
++ /* Get thread ID. */
++ if (ThreadID != gcvNULL)
++ {
++ *ThreadID = _GetThreadID();
++ }
++
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_SetGPUPower
++**
++** Set the power of the GPU on or off.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to a gckOS object.
++**
++** gckCORE Core
++** GPU whose power is set.
++**
++** gctBOOL Clock
++** gcvTRUE to turn on the clock, or gcvFALSE to turn off the clock.
++**
++** gctBOOL Power
++** gcvTRUE to turn on the power, or gcvFALSE to turn off the power.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_SetGPUPower(
++ IN gckOS Os,
++ IN gceCORE Core,
++ IN gctBOOL Clock,
++ IN gctBOOL Power
++ )
++{
++ struct clk *clk_3dcore = Os->device->clk_3d_core;
++ struct clk *clk_3dshader = Os->device->clk_3d_shader;
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0)
++ struct clk *clk_3d_axi = Os->device->clk_3d_axi;
++#endif
++ struct clk *clk_2dcore = Os->device->clk_2d_core;
++ struct clk *clk_2d_axi = Os->device->clk_2d_axi;
++ struct clk *clk_vg_axi = Os->device->clk_vg_axi;
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) || LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
++ int ret;
++#endif
++
++ gctBOOL oldClockState = gcvFALSE;
++ gctBOOL oldPowerState = gcvFALSE;
++
++ gcmkHEADER_ARG("Os=0x%X Core=%d Clock=%d Power=%d", Os, Core, Clock, Power);
++
++ if (Os->device->kernels[Core] != NULL)
++ {
++#if gcdENABLE_VG
++ if (Core == gcvCORE_VG)
++ {
++ oldClockState = Os->device->kernels[Core]->vg->hardware->clockState;
++ oldPowerState = Os->device->kernels[Core]->vg->hardware->powerState;
++ }
++ else
++ {
++#endif
++ oldClockState = Os->device->kernels[Core]->hardware->clockState;
++ oldPowerState = Os->device->kernels[Core]->hardware->powerState;
++#if gcdENABLE_VG
++ }
++#endif
++ }
++ if((Power == gcvTRUE) && (oldPowerState == gcvFALSE))
++ {
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) || LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
++ if(!IS_ERR(Os->device->gpu_regulator)) {
++ ret = regulator_enable(Os->device->gpu_regulator);
++ if (ret != 0)
++ gckOS_Print("%s(%d): fail to enable pu regulator %d!\n",
++ __FUNCTION__, __LINE__, ret);
++ }
++#else
++ imx_gpc_power_up_pu(true);
++#endif
++
++#ifdef CONFIG_PM
++ pm_runtime_get_sync(Os->device->pmdev);
++#endif
++ }
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0)
++ if (Clock == gcvTRUE) {
++ if (oldClockState == gcvFALSE) {
++ switch (Core) {
++ case gcvCORE_MAJOR:
++ clk_enable(clk_3dcore);
++ if (cpu_is_mx6q())
++ clk_enable(clk_3dshader);
++ break;
++ case gcvCORE_2D:
++ clk_enable(clk_2dcore);
++ clk_enable(clk_2d_axi);
++ break;
++ case gcvCORE_VG:
++ clk_enable(clk_2dcore);
++ clk_enable(clk_vg_axi);
++ break;
++ default:
++ break;
++ }
++ }
++ } else {
++ if (oldClockState == gcvTRUE) {
++ switch (Core) {
++ case gcvCORE_MAJOR:
++ if (cpu_is_mx6q())
++ clk_disable(clk_3dshader);
++ clk_disable(clk_3dcore);
++ break;
++ case gcvCORE_2D:
++ clk_disable(clk_2dcore);
++ clk_disable(clk_2d_axi);
++ break;
++ case gcvCORE_VG:
++ clk_disable(clk_2dcore);
++ clk_disable(clk_vg_axi);
++ break;
++ default:
++ break;
++ }
++ }
++ }
++#else
++ if (Clock == gcvTRUE) {
++ if (oldClockState == gcvFALSE) {
++ switch (Core) {
++ case gcvCORE_MAJOR:
++ clk_prepare_enable(clk_3dcore);
++ clk_prepare_enable(clk_3dshader);
++ clk_prepare_enable(clk_3d_axi);
++ break;
++ case gcvCORE_2D:
++ clk_prepare_enable(clk_2dcore);
++ clk_prepare_enable(clk_2d_axi);
++ break;
++ case gcvCORE_VG:
++ clk_prepare_enable(clk_2dcore);
++ clk_prepare_enable(clk_vg_axi);
++ break;
++ default:
++ break;
++ }
++ }
++ } else {
++ if (oldClockState == gcvTRUE) {
++ switch (Core) {
++ case gcvCORE_MAJOR:
++ clk_disable_unprepare(clk_3d_axi);
++ clk_disable_unprepare(clk_3dshader);
++ clk_disable_unprepare(clk_3dcore);
++ break;
++ case gcvCORE_2D:
++ clk_disable_unprepare(clk_2d_axi);
++ clk_disable_unprepare(clk_2dcore);
++ break;
++ case gcvCORE_VG:
++ clk_disable_unprepare(clk_vg_axi);
++ clk_disable_unprepare(clk_2dcore);
++ break;
++ default:
++ break;
++ }
++ }
++ }
++#endif
++ if((Power == gcvFALSE) && (oldPowerState == gcvTRUE))
++ {
++#ifdef CONFIG_PM
++ pm_runtime_put_sync(Os->device->pmdev);
++#endif
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) || LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
++ if(!IS_ERR(Os->device->gpu_regulator))
++ regulator_disable(Os->device->gpu_regulator);
++#else
++ imx_gpc_power_up_pu(false);
++#endif
++
++ }
++ /* TODO: Put your code here. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_ResetGPU
++**
++** Reset the GPU.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to a gckOS object.
++**
++** gckCORE Core
++** GPU whose power is set.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_ResetGPU(
++ IN gckOS Os,
++ IN gceCORE Core
++ )
++{
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0)
++#define SRC_SCR_OFFSET 0
++#define BP_SRC_SCR_GPU3D_RST 1
++#define BP_SRC_SCR_GPU2D_RST 4
++ void __iomem *src_base = IO_ADDRESS(SRC_BASE_ADDR);
++ gctUINT32 bit_offset,val;
++
++ gcmkHEADER_ARG("Os=0x%X Core=%d", Os, Core);
++
++ if(Core == gcvCORE_MAJOR) {
++ bit_offset = BP_SRC_SCR_GPU3D_RST;
++ } else if((Core == gcvCORE_VG)
++ ||(Core == gcvCORE_2D)) {
++ bit_offset = BP_SRC_SCR_GPU2D_RST;
++ } else {
++ return gcvSTATUS_INVALID_CONFIG;
++ }
++ val = __raw_readl(src_base + SRC_SCR_OFFSET);
++ val &= ~(1 << (bit_offset));
++ val |= (1 << (bit_offset));
++ __raw_writel(val, src_base + SRC_SCR_OFFSET);
++
++ while ((__raw_readl(src_base + SRC_SCR_OFFSET) &
++ (1 << (bit_offset))) != 0) {
++ }
++
++ gcmkFOOTER_NO();
++#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
++ struct reset_control *rstc = Os->device->rstc[Core];
++ if (rstc)
++ reset_control_reset(rstc);
++#else
++ imx_src_reset_gpu((int)Core);
++#endif
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_PrepareGPUFrequency
++**
++** Prepare to set GPU frequency and voltage.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to a gckOS object.
++**
++** gckCORE Core
++** GPU whose frequency and voltage will be set.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_PrepareGPUFrequency(
++ IN gckOS Os,
++ IN gceCORE Core
++ )
++{
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_FinishGPUFrequency
++**
++** Finish GPU frequency setting.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to a gckOS object.
++**
++** gckCORE Core
++** GPU whose frequency and voltage is set.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_FinishGPUFrequency(
++ IN gckOS Os,
++ IN gceCORE Core
++ )
++{
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_QueryGPUFrequency
++**
++** Query the current frequency of the GPU.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to a gckOS object.
++**
++** gckCORE Core
++** GPU whose power is set.
++**
++** gctUINT32 * Frequency
++** Pointer to a gctUINT32 to obtain current frequency, in MHz.
++**
++** gctUINT8 * Scale
++** Pointer to a gctUINT8 to obtain current scale(1 - 64).
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_QueryGPUFrequency(
++ IN gckOS Os,
++ IN gceCORE Core,
++ OUT gctUINT32 * Frequency,
++ OUT gctUINT8 * Scale
++ )
++{
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_SetGPUFrequency
++**
++** Set frequency and voltage of the GPU.
++**
++** 1. DVFS manager gives the target scale of full frequency, BSP must find
++** a real frequency according to this scale and board's configure.
++**
++** 2. BSP should find a suitable voltage for this frequency.
++**
++** 3. BSP must make sure setting take effect before this function returns.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to a gckOS object.
++**
++** gckCORE Core
++** GPU whose power is set.
++**
++** gctUINT8 Scale
++** Target scale of full frequency, range is [1, 64]. 1 means 1/64 of
++** full frequency and 64 means 64/64 of full frequency.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_SetGPUFrequency(
++ IN gckOS Os,
++ IN gceCORE Core,
++ IN gctUINT8 Scale
++ )
++{
++ return gcvSTATUS_OK;
++}
++
++/*----------------------------------------------------------------------------*/
++/*----- Profile --------------------------------------------------------------*/
++
++gceSTATUS
++gckOS_GetProfileTick(
++ OUT gctUINT64_PTR Tick
++ )
++{
++ struct timespec time;
++
++ ktime_get_ts(&time);
++
++ *Tick = time.tv_nsec + time.tv_sec * 1000000000ULL;
++
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckOS_QueryProfileTickRate(
++ OUT gctUINT64_PTR TickRate
++ )
++{
++ struct timespec res;
++
++ hrtimer_get_res(CLOCK_MONOTONIC, &res);
++
++ *TickRate = res.tv_nsec + res.tv_sec * 1000000000ULL;
++
++ return gcvSTATUS_OK;
++}
++
++gctUINT32
++gckOS_ProfileToMS(
++ IN gctUINT64 Ticks
++ )
++{
++#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
++ return div_u64(Ticks, 1000000);
++#else
++ gctUINT64 rem = Ticks;
++ gctUINT64 b = 1000000;
++ gctUINT64 res, d = 1;
++ gctUINT32 high = rem >> 32;
++
++ /* Reduce the thing a bit first */
++ res = 0;
++ if (high >= 1000000)
++ {
++ high /= 1000000;
++ res = (gctUINT64) high << 32;
++ rem -= (gctUINT64) (high * 1000000) << 32;
++ }
++
++ while (((gctINT64) b > 0) && (b < rem))
++ {
++ b <<= 1;
++ d <<= 1;
++ }
++
++ do
++ {
++ if (rem >= b)
++ {
++ rem -= b;
++ res += d;
++ }
++
++ b >>= 1;
++ d >>= 1;
++ }
++ while (d);
++
++ return (gctUINT32) res;
++#endif
++}
++
++/******************************************************************************\
++******************************* Signal Management ******************************
++\******************************************************************************/
++
++#undef _GC_OBJ_ZONE
++#define _GC_OBJ_ZONE gcvZONE_SIGNAL
++
++/*******************************************************************************
++**
++** gckOS_CreateSignal
++**
++** Create a new signal.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctBOOL ManualReset
++** If set to gcvTRUE, gckOS_Signal with gcvFALSE must be called in
++** order to set the signal to nonsignaled state.
++** If set to gcvFALSE, the signal will automatically be set to
++** nonsignaled state by gckOS_WaitSignal function.
++**
++** OUTPUT:
++**
++** gctSIGNAL * Signal
++** Pointer to a variable receiving the created gctSIGNAL.
++*/
++gceSTATUS
++gckOS_CreateSignal(
++ IN gckOS Os,
++ IN gctBOOL ManualReset,
++ OUT gctSIGNAL * Signal
++ )
++{
++ gceSTATUS status;
++ gcsSIGNAL_PTR signal;
++
++ gcmkHEADER_ARG("Os=0x%X ManualReset=%d", Os, ManualReset);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Signal != gcvNULL);
++
++ /* Create an event structure. */
++ signal = (gcsSIGNAL_PTR) kmalloc(sizeof(gcsSIGNAL), GFP_KERNEL | gcdNOWARN);
++
++ if (signal == gcvNULL)
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ /* Save the process ID. */
++ signal->process = (gctHANDLE)(gctUINTPTR_T) _GetProcessID();
++ signal->manualReset = ManualReset;
++ signal->hardware = gcvNULL;
++ init_completion(&signal->obj);
++ atomic_set(&signal->ref, 1);
++
++ gcmkONERROR(_AllocateIntegerId(&Os->signalDB, signal, &signal->id));
++
++ *Signal = (gctSIGNAL)(gctUINTPTR_T)signal->id;
++
++ gcmkFOOTER_ARG("*Signal=0x%X", *Signal);
++ return gcvSTATUS_OK;
++
++OnError:
++ if (signal != gcvNULL)
++ {
++ kfree(signal);
++ }
++
++ gcmkFOOTER_NO();
++ return status;
++}
++
++gceSTATUS
++gckOS_SignalQueryHardware(
++ IN gckOS Os,
++ IN gctSIGNAL Signal,
++ OUT gckHARDWARE * Hardware
++ )
++{
++ gceSTATUS status;
++ gcsSIGNAL_PTR signal;
++
++ gcmkHEADER_ARG("Os=0x%X Signal=0x%X Hardware=0x%X", Os, Signal, Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Signal != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Hardware != gcvNULL);
++
++ gcmkONERROR(_QueryIntegerId(&Os->signalDB, (gctUINT32)(gctUINTPTR_T)Signal, (gctPOINTER)&signal));
++
++ *Hardware = signal->hardware;
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckOS_SignalSetHardware(
++ IN gckOS Os,
++ IN gctSIGNAL Signal,
++ IN gckHARDWARE Hardware
++ )
++{
++ gceSTATUS status;
++ gcsSIGNAL_PTR signal;
++
++ gcmkHEADER_ARG("Os=0x%X Signal=0x%X Hardware=0x%X", Os, Signal, Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Signal != gcvNULL);
++
++ gcmkONERROR(_QueryIntegerId(&Os->signalDB, (gctUINT32)(gctUINTPTR_T)Signal, (gctPOINTER)&signal));
++
++ signal->hardware = Hardware;
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_DestroySignal
++**
++** Destroy a signal.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctSIGNAL Signal
++** Pointer to the gctSIGNAL.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_DestroySignal(
++ IN gckOS Os,
++ IN gctSIGNAL Signal
++ )
++{
++ gceSTATUS status;
++ gcsSIGNAL_PTR signal;
++ gctBOOL acquired = gcvFALSE;
++
++ gcmkHEADER_ARG("Os=0x%X Signal=0x%X", Os, Signal);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Signal != gcvNULL);
++
++ gcmkONERROR(gckOS_AcquireMutex(Os, Os->signalMutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++ gcmkONERROR(_QueryIntegerId(&Os->signalDB, (gctUINT32)(gctUINTPTR_T)Signal, (gctPOINTER)&signal));
++
++ gcmkASSERT(signal->id == (gctUINT32)(gctUINTPTR_T)Signal);
++
++ if (atomic_dec_and_test(&signal->ref))
++ {
++ gcmkVERIFY_OK(_DestroyIntegerId(&Os->signalDB, signal->id));
++
++ /* Free the sgianl. */
++ kfree(signal);
++ }
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Os, Os->signalMutex));
++ acquired = gcvFALSE;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ /* Release the mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Os, Os->signalMutex));
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_Signal
++**
++** Set a state of the specified signal.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctSIGNAL Signal
++** Pointer to the gctSIGNAL.
++**
++** gctBOOL State
++** If gcvTRUE, the signal will be set to signaled state.
++** If gcvFALSE, the signal will be set to nonsignaled state.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_Signal(
++ IN gckOS Os,
++ IN gctSIGNAL Signal,
++ IN gctBOOL State
++ )
++{
++ gceSTATUS status;
++ gcsSIGNAL_PTR signal;
++ gctBOOL acquired = gcvFALSE;
++
++ gcmkHEADER_ARG("Os=0x%X Signal=0x%X State=%d", Os, Signal, State);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Signal != gcvNULL);
++
++ gcmkONERROR(gckOS_AcquireMutex(Os, Os->signalMutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++ gcmkONERROR(_QueryIntegerId(&Os->signalDB, (gctUINT32)(gctUINTPTR_T)Signal, (gctPOINTER)&signal));
++
++ gcmkASSERT(signal->id == (gctUINT32)(gctUINTPTR_T)Signal);
++
++ if (State)
++ {
++ /* unbind the signal from hardware. */
++ signal->hardware = gcvNULL;
++
++ /* Set the event to a signaled state. */
++ complete(&signal->obj);
++ }
++ else
++ {
++ /* Set the event to an unsignaled state. */
++ reinit_completion(&signal->obj);
++ }
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Os, Os->signalMutex));
++ acquired = gcvFALSE;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ /* Release the mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Os, Os->signalMutex));
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++
++#if gcdENABLE_VG
++gceSTATUS
++gckOS_SetSignalVG(
++ IN gckOS Os,
++ IN gctHANDLE Process,
++ IN gctSIGNAL Signal
++ )
++{
++ gceSTATUS status;
++ gctINT result;
++ struct task_struct * userTask;
++ struct siginfo info;
++
++ userTask = FIND_TASK_BY_PID((pid_t)(gctUINTPTR_T) Process);
++
++ if (userTask != gcvNULL)
++ {
++ info.si_signo = 48;
++ info.si_code = __SI_CODE(__SI_RT, SI_KERNEL);
++ info.si_pid = 0;
++ info.si_uid = 0;
++ info.si_ptr = (gctPOINTER) Signal;
++
++ /* Signals with numbers between 32 and 63 are real-time,
++ send a real-time signal to the user process. */
++ result = send_sig_info(48, &info, userTask);
++
++ printk("gckOS_SetSignalVG:0x%x\n", result);
++ /* Error? */
++ if (result < 0)
++ {
++ status = gcvSTATUS_GENERIC_IO;
++
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s(%d): an error has occurred.\n",
++ __FUNCTION__, __LINE__
++ );
++ }
++ else
++ {
++ status = gcvSTATUS_OK;
++ }
++ }
++ else
++ {
++ status = gcvSTATUS_GENERIC_IO;
++
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s(%d): an error has occurred.\n",
++ __FUNCTION__, __LINE__
++ );
++ }
++
++ /* Return status. */
++ return status;
++}
++#endif
++
++/*******************************************************************************
++**
++** gckOS_UserSignal
++**
++** Set the specified signal which is owned by a process to signaled state.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctSIGNAL Signal
++** Pointer to the gctSIGNAL.
++**
++** gctHANDLE Process
++** Handle of process owning the signal.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_UserSignal(
++ IN gckOS Os,
++ IN gctSIGNAL Signal,
++ IN gctHANDLE Process
++ )
++{
++ gceSTATUS status;
++ gctSIGNAL signal;
++
++ gcmkHEADER_ARG("Os=0x%X Signal=0x%X Process=%d",
++ Os, Signal, (gctINT32)(gctUINTPTR_T)Process);
++
++ /* Map the signal into kernel space. */
++ gcmkONERROR(gckOS_MapSignal(Os, Signal, Process, &signal));
++
++ /* Signal. */
++ status = gckOS_Signal(Os, signal, gcvTRUE);
++
++ /* Unmap the signal */
++ gcmkVERIFY_OK(gckOS_UnmapSignal(Os, Signal));
++
++ gcmkFOOTER();
++ return status;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_WaitSignal
++**
++** Wait for a signal to become signaled.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctSIGNAL Signal
++** Pointer to the gctSIGNAL.
++**
++** gctUINT32 Wait
++** Number of milliseconds to wait.
++** Pass the value of gcvINFINITE for an infinite wait.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_WaitSignal(
++ IN gckOS Os,
++ IN gctSIGNAL Signal,
++ IN gctUINT32 Wait
++ )
++{
++ gceSTATUS status = gcvSTATUS_OK;
++ gcsSIGNAL_PTR signal;
++
++ gcmkHEADER_ARG("Os=0x%X Signal=0x%X Wait=0x%08X", Os, Signal, Wait);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Signal != gcvNULL);
++
++ gcmkONERROR(_QueryIntegerId(&Os->signalDB, (gctUINT32)(gctUINTPTR_T)Signal, (gctPOINTER)&signal));
++
++ gcmkASSERT(signal->id == (gctUINT32)(gctUINTPTR_T)Signal);
++
++ might_sleep();
++
++ spin_lock_irq(&signal->obj.wait.lock);
++
++ if (signal->obj.done)
++ {
++ if (!signal->manualReset)
++ {
++ signal->obj.done = 0;
++ }
++
++ status = gcvSTATUS_OK;
++ }
++ else if (Wait == 0)
++ {
++ status = gcvSTATUS_TIMEOUT;
++ }
++ else
++ {
++ /* Convert wait to milliseconds. */
++#if gcdDETECT_TIMEOUT
++ gctINT timeout = (Wait == gcvINFINITE)
++ ? gcdINFINITE_TIMEOUT * HZ / 1000
++ : Wait * HZ / 1000;
++
++ gctUINT complained = 0;
++#else
++ gctINT timeout = (Wait == gcvINFINITE)
++ ? MAX_SCHEDULE_TIMEOUT
++ : Wait * HZ / 1000;
++#endif
++
++ DECLARE_WAITQUEUE(wait, current);
++ wait.flags |= WQ_FLAG_EXCLUSIVE;
++ __add_wait_queue_tail(&signal->obj.wait, &wait);
++
++ while (gcvTRUE)
++ {
++ if (signal_pending(current))
++ {
++ /* Interrupt received. */
++ status = gcvSTATUS_INTERRUPTED;
++ break;
++ }
++
++ __set_current_state(TASK_INTERRUPTIBLE);
++ spin_unlock_irq(&signal->obj.wait.lock);
++ timeout = schedule_timeout(timeout);
++ spin_lock_irq(&signal->obj.wait.lock);
++
++ if (signal->obj.done)
++ {
++ if (!signal->manualReset)
++ {
++ signal->obj.done = 0;
++ }
++
++ status = gcvSTATUS_OK;
++ break;
++ }
++
++#if gcdDETECT_TIMEOUT
++ if ((Wait == gcvINFINITE) && (timeout == 0))
++ {
++ gctUINT32 dmaAddress1, dmaAddress2;
++ gctUINT32 dmaState1, dmaState2;
++
++ dmaState1 = dmaState2 =
++ dmaAddress1 = dmaAddress2 = 0;
++
++ /* Verify whether DMA is running. */
++ gcmkVERIFY_OK(_VerifyDMA(
++ Os, &dmaAddress1, &dmaAddress2, &dmaState1, &dmaState2
++ ));
++
++#if gcdDETECT_DMA_ADDRESS
++ /* Dump only if DMA appears stuck. */
++ if (
++ (dmaAddress1 == dmaAddress2)
++#if gcdDETECT_DMA_STATE
++ && (dmaState1 == dmaState2)
++#endif
++ )
++#endif
++ {
++ /* Increment complain count. */
++ complained += 1;
++
++ gcmkVERIFY_OK(_DumpGPUState(Os, gcvCORE_MAJOR));
++
++ gcmkPRINT(
++ "%s(%d): signal 0x%X; forced message flush (%d).",
++ __FUNCTION__, __LINE__, Signal, complained
++ );
++
++ /* Flush the debug cache. */
++ gcmkDEBUGFLUSH(dmaAddress2);
++ }
++
++ /* Reset timeout. */
++ timeout = gcdINFINITE_TIMEOUT * HZ / 1000;
++ }
++#endif
++
++ if (timeout == 0)
++ {
++
++ status = gcvSTATUS_TIMEOUT;
++ break;
++ }
++ }
++
++ __remove_wait_queue(&signal->obj.wait, &wait);
++
++#if gcdDETECT_TIMEOUT
++ if (complained)
++ {
++ gcmkPRINT(
++ "%s(%d): signal=0x%X; waiting done; status=%d",
++ __FUNCTION__, __LINE__, Signal, status
++ );
++ }
++#endif
++ }
++
++ spin_unlock_irq(&signal->obj.wait.lock);
++
++OnError:
++ /* Return status. */
++ gcmkFOOTER_ARG("Signal=0x%X status=%d", Signal, status);
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_MapSignal
++**
++** Map a signal in to the current process space.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctSIGNAL Signal
++** Pointer to tha gctSIGNAL to map.
++**
++** gctHANDLE Process
++** Handle of process owning the signal.
++**
++** OUTPUT:
++**
++** gctSIGNAL * MappedSignal
++** Pointer to a variable receiving the mapped gctSIGNAL.
++*/
++gceSTATUS
++gckOS_MapSignal(
++ IN gckOS Os,
++ IN gctSIGNAL Signal,
++ IN gctHANDLE Process,
++ OUT gctSIGNAL * MappedSignal
++ )
++{
++ gceSTATUS status;
++ gcsSIGNAL_PTR signal;
++ gcmkHEADER_ARG("Os=0x%X Signal=0x%X Process=0x%X", Os, Signal, Process);
++
++ gcmkVERIFY_ARGUMENT(Signal != gcvNULL);
++ gcmkVERIFY_ARGUMENT(MappedSignal != gcvNULL);
++
++ gcmkONERROR(_QueryIntegerId(&Os->signalDB, (gctUINT32)(gctUINTPTR_T)Signal, (gctPOINTER)&signal));
++
++ if(atomic_inc_return(&signal->ref) <= 1)
++ {
++ /* The previous value is 0, it has been deleted. */
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ *MappedSignal = (gctSIGNAL) Signal;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*MappedSignal=0x%X", *MappedSignal);
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER_NO();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_UnmapSignal
++**
++** Unmap a signal .
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctSIGNAL Signal
++** Pointer to that gctSIGNAL mapped.
++*/
++gceSTATUS
++gckOS_UnmapSignal(
++ IN gckOS Os,
++ IN gctSIGNAL Signal
++ )
++{
++ return gckOS_DestroySignal(Os, Signal);
++}
++
++/*******************************************************************************
++**
++** gckOS_CreateUserSignal
++**
++** Create a new signal to be used in the user space.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctBOOL ManualReset
++** If set to gcvTRUE, gckOS_Signal with gcvFALSE must be called in
++** order to set the signal to nonsignaled state.
++** If set to gcvFALSE, the signal will automatically be set to
++** nonsignaled state by gckOS_WaitSignal function.
++**
++** OUTPUT:
++**
++** gctINT * SignalID
++** Pointer to a variable receiving the created signal's ID.
++*/
++gceSTATUS
++gckOS_CreateUserSignal(
++ IN gckOS Os,
++ IN gctBOOL ManualReset,
++ OUT gctINT * SignalID
++ )
++{
++ gceSTATUS status;
++ gctSIZE_T signal;
++
++ /* Create a new signal. */
++ status = gckOS_CreateSignal(Os, ManualReset, (gctSIGNAL *) &signal);
++ *SignalID = (gctINT) signal;
++
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_DestroyUserSignal
++**
++** Destroy a signal to be used in the user space.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctINT SignalID
++** The signal's ID.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_DestroyUserSignal(
++ IN gckOS Os,
++ IN gctINT SignalID
++ )
++{
++ return gckOS_DestroySignal(Os, (gctSIGNAL)(gctUINTPTR_T)SignalID);
++}
++
++/*******************************************************************************
++**
++** gckOS_WaitUserSignal
++**
++** Wait for a signal used in the user mode to become signaled.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctINT SignalID
++** Signal ID.
++**
++** gctUINT32 Wait
++** Number of milliseconds to wait.
++** Pass the value of gcvINFINITE for an infinite wait.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_WaitUserSignal(
++ IN gckOS Os,
++ IN gctINT SignalID,
++ IN gctUINT32 Wait
++ )
++{
++ return gckOS_WaitSignal(Os, (gctSIGNAL)(gctUINTPTR_T)SignalID, Wait);
++}
++
++/*******************************************************************************
++**
++** gckOS_SignalUserSignal
++**
++** Set a state of the specified signal to be used in the user space.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctINT SignalID
++** SignalID.
++**
++** gctBOOL State
++** If gcvTRUE, the signal will be set to signaled state.
++** If gcvFALSE, the signal will be set to nonsignaled state.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_SignalUserSignal(
++ IN gckOS Os,
++ IN gctINT SignalID,
++ IN gctBOOL State
++ )
++{
++ return gckOS_Signal(Os, (gctSIGNAL)(gctUINTPTR_T)SignalID, State);
++}
++
++#if gcdENABLE_VG
++gceSTATUS
++gckOS_CreateSemaphoreVG(
++ IN gckOS Os,
++ OUT gctSEMAPHORE * Semaphore
++ )
++{
++ gceSTATUS status;
++ struct semaphore * newSemaphore;
++
++ gcmkHEADER_ARG("Os=0x%X Semaphore=0x%x", Os, Semaphore);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Semaphore != gcvNULL);
++
++ do
++ {
++ /* Allocate the semaphore structure. */
++ newSemaphore = (struct semaphore *)kmalloc(gcmSIZEOF(struct semaphore), GFP_KERNEL | gcdNOWARN);
++ if (newSemaphore == gcvNULL)
++ {
++ gcmkERR_BREAK(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ /* Initialize the semaphore. */
++ sema_init(newSemaphore, 0);
++
++ /* Set the handle. */
++ * Semaphore = (gctSEMAPHORE) newSemaphore;
++
++ /* Success. */
++ status = gcvSTATUS_OK;
++ }
++ while (gcvFALSE);
++
++ gcmkFOOTER();
++ /* Return the status. */
++ return status;
++}
++
++
++gceSTATUS
++gckOS_IncrementSemaphore(
++ IN gckOS Os,
++ IN gctSEMAPHORE Semaphore
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X Semaphore=0x%x", Os, Semaphore);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Semaphore != gcvNULL);
++
++ /* Increment the semaphore's count. */
++ up((struct semaphore *) Semaphore);
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckOS_DecrementSemaphore(
++ IN gckOS Os,
++ IN gctSEMAPHORE Semaphore
++ )
++{
++ gceSTATUS status;
++ gctINT result;
++
++ gcmkHEADER_ARG("Os=0x%X Semaphore=0x%x", Os, Semaphore);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Semaphore != gcvNULL);
++
++ do
++ {
++ /* Decrement the semaphore's count. If the count is zero, wait
++ until it gets incremented. */
++ result = down_interruptible((struct semaphore *) Semaphore);
++
++ /* Signal received? */
++ if (result != 0)
++ {
++ status = gcvSTATUS_TERMINATE;
++ break;
++ }
++
++ /* Success. */
++ status = gcvSTATUS_OK;
++ }
++ while (gcvFALSE);
++
++ gcmkFOOTER();
++ /* Return the status. */
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_SetSignal
++**
++** Set the specified signal to signaled state.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to the gckOS object.
++**
++** gctHANDLE Process
++** Handle of process owning the signal.
++**
++** gctSIGNAL Signal
++** Pointer to the gctSIGNAL.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_SetSignal(
++ IN gckOS Os,
++ IN gctHANDLE Process,
++ IN gctSIGNAL Signal
++ )
++{
++ gceSTATUS status;
++ gctINT result;
++ struct task_struct * userTask;
++ struct siginfo info;
++
++ userTask = FIND_TASK_BY_PID((pid_t)(gctUINTPTR_T) Process);
++
++ if (userTask != gcvNULL)
++ {
++ info.si_signo = 48;
++ info.si_code = __SI_CODE(__SI_RT, SI_KERNEL);
++ info.si_pid = 0;
++ info.si_uid = 0;
++ info.si_ptr = (gctPOINTER) Signal;
++
++ /* Signals with numbers between 32 and 63 are real-time,
++ send a real-time signal to the user process. */
++ result = send_sig_info(48, &info, userTask);
++
++ /* Error? */
++ if (result < 0)
++ {
++ status = gcvSTATUS_GENERIC_IO;
++
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s(%d): an error has occurred.\n",
++ __FUNCTION__, __LINE__
++ );
++ }
++ else
++ {
++ status = gcvSTATUS_OK;
++ }
++ }
++ else
++ {
++ status = gcvSTATUS_GENERIC_IO;
++
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s(%d): an error has occurred.\n",
++ __FUNCTION__, __LINE__
++ );
++ }
++
++ /* Return status. */
++ return status;
++}
++
++/******************************************************************************\
++******************************** Thread Object *********************************
++\******************************************************************************/
++
++gceSTATUS
++gckOS_StartThread(
++ IN gckOS Os,
++ IN gctTHREADFUNC ThreadFunction,
++ IN gctPOINTER ThreadParameter,
++ OUT gctTHREAD * Thread
++ )
++{
++ gceSTATUS status;
++ struct task_struct * thread;
++
++ gcmkHEADER_ARG("Os=0x%X ", Os);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(ThreadFunction != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Thread != gcvNULL);
++
++ do
++ {
++ /* Create the thread. */
++ thread = kthread_create(
++ ThreadFunction,
++ ThreadParameter,
++ "Vivante Kernel Thread"
++ );
++
++ /* Failed? */
++ if (IS_ERR(thread))
++ {
++ status = gcvSTATUS_GENERIC_IO;
++ break;
++ }
++
++ /* Start the thread. */
++ wake_up_process(thread);
++
++ /* Set the thread handle. */
++ * Thread = (gctTHREAD) thread;
++
++ /* Success. */
++ status = gcvSTATUS_OK;
++ }
++ while (gcvFALSE);
++
++ gcmkFOOTER();
++ /* Return the status. */
++ return status;
++}
++
++gceSTATUS
++gckOS_StopThread(
++ IN gckOS Os,
++ IN gctTHREAD Thread
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X Thread=0x%x", Os, Thread);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Thread != gcvNULL);
++
++ /* Thread should have already been enabled to terminate. */
++ kthread_stop((struct task_struct *) Thread);
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckOS_VerifyThread(
++ IN gckOS Os,
++ IN gctTHREAD Thread
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X Thread=0x%x", Os, Thread);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Thread != gcvNULL);
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++#endif
++
++/******************************************************************************\
++******************************** Software Timer ********************************
++\******************************************************************************/
++
++void
++_TimerFunction(
++ struct work_struct * work
++ )
++{
++ gcsOSTIMER_PTR timer = (gcsOSTIMER_PTR)work;
++
++ gctTIMERFUNCTION function = timer->function;
++
++ function(timer->data);
++}
++
++/*******************************************************************************
++**
++** gckOS_CreateTimer
++**
++** Create a software timer.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to the gckOS object.
++**
++** gctTIMERFUNCTION Function.
++** Pointer to a call back function which will be called when timer is
++** expired.
++**
++** gctPOINTER Data.
++** Private data which will be passed to call back function.
++**
++** OUTPUT:
++**
++** gctPOINTER * Timer
++** Pointer to a variable receiving the created timer.
++*/
++gceSTATUS
++gckOS_CreateTimer(
++ IN gckOS Os,
++ IN gctTIMERFUNCTION Function,
++ IN gctPOINTER Data,
++ OUT gctPOINTER * Timer
++ )
++{
++ gceSTATUS status;
++ gcsOSTIMER_PTR pointer;
++ gcmkHEADER_ARG("Os=0x%X Function=0x%X Data=0x%X", Os, Function, Data);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Timer != gcvNULL);
++
++ gcmkONERROR(gckOS_Allocate(Os, sizeof(gcsOSTIMER), (gctPOINTER)&pointer));
++
++ pointer->function = Function;
++ pointer->data = Data;
++
++ INIT_DELAYED_WORK(&pointer->work, _TimerFunction);
++
++ *Timer = pointer;
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_DestroyTimer
++**
++** Destory a software timer.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to the gckOS object.
++**
++** gctPOINTER Timer
++** Pointer to the timer to be destoryed.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_DestroyTimer(
++ IN gckOS Os,
++ IN gctPOINTER Timer
++ )
++{
++ gcsOSTIMER_PTR timer;
++ gcmkHEADER_ARG("Os=0x%X Timer=0x%X", Os, Timer);
++
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Timer != gcvNULL);
++
++ timer = (gcsOSTIMER_PTR)Timer;
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
++ cancel_delayed_work_sync(&timer->work);
++#else
++ cancel_delayed_work(&timer->work);
++ flush_workqueue(Os->workqueue);
++#endif
++
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Os, Timer));
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_StartTimer
++**
++** Schedule a software timer.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to the gckOS object.
++**
++** gctPOINTER Timer
++** Pointer to the timer to be scheduled.
++**
++** gctUINT32 Delay
++** Delay in milliseconds.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_StartTimer(
++ IN gckOS Os,
++ IN gctPOINTER Timer,
++ IN gctUINT32 Delay
++ )
++{
++ gcsOSTIMER_PTR timer;
++
++ gcmkHEADER_ARG("Os=0x%X Timer=0x%X Delay=%u", Os, Timer, Delay);
++
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Timer != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Delay != 0);
++
++ timer = (gcsOSTIMER_PTR)Timer;
++
++ if (unlikely(delayed_work_pending(&timer->work)))
++ {
++ if (unlikely(!cancel_delayed_work(&timer->work)))
++ {
++ cancel_work_sync(&timer->work.work);
++
++ if (unlikely(delayed_work_pending(&timer->work)))
++ {
++ gckOS_Print("gckOS_StartTimer error, the pending worker cannot complete!!!! \n");
++
++ return gcvSTATUS_INVALID_REQUEST;
++ }
++ }
++ }
++
++ queue_delayed_work(Os->workqueue, &timer->work, msecs_to_jiffies(Delay));
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_StopTimer
++**
++** Cancel a unscheduled timer.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to the gckOS object.
++**
++** gctPOINTER Timer
++** Pointer to the timer to be cancel.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_StopTimer(
++ IN gckOS Os,
++ IN gctPOINTER Timer
++ )
++{
++ gcsOSTIMER_PTR timer;
++ gcmkHEADER_ARG("Os=0x%X Timer=0x%X", Os, Timer);
++
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Timer != gcvNULL);
++
++ timer = (gcsOSTIMER_PTR)Timer;
++
++ cancel_delayed_work(&timer->work);
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++
++gceSTATUS
++gckOS_DumpCallStack(
++ IN gckOS Os
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X", Os);
++
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++
++ dump_stack();
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++
++gceSTATUS
++gckOS_GetProcessNameByPid(
++ IN gctINT Pid,
++ IN gctSIZE_T Length,
++ OUT gctUINT8_PTR String
++ )
++{
++ struct task_struct *task;
++
++ /* Get the task_struct of the task with pid. */
++ rcu_read_lock();
++
++ task = FIND_TASK_BY_PID(Pid);
++
++ if (task == gcvNULL)
++ {
++ rcu_read_unlock();
++ return gcvSTATUS_NOT_FOUND;
++ }
++
++ /* Get name of process. */
++ strncpy(String, task->comm, Length);
++
++ rcu_read_unlock();
++
++ return gcvSTATUS_OK;
++}
++
++#if gcdANDROID_NATIVE_FENCE_SYNC
++
++gceSTATUS
++gckOS_CreateSyncPoint(
++ IN gckOS Os,
++ OUT gctSYNC_POINT * SyncPoint
++ )
++{
++ gceSTATUS status;
++ gcsSYNC_POINT_PTR syncPoint;
++
++ gcmkHEADER_ARG("Os=0x%X", Os);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++
++ /* Create an sync point structure. */
++ syncPoint = (gcsSYNC_POINT_PTR) kmalloc(
++ sizeof(gcsSYNC_POINT), GFP_KERNEL | gcdNOWARN);
++
++ if (syncPoint == gcvNULL)
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ /* Initialize the sync point. */
++ atomic_set(&syncPoint->ref, 1);
++ atomic_set(&syncPoint->state, 0);
++
++ gcmkONERROR(_AllocateIntegerId(&Os->syncPointDB, syncPoint, &syncPoint->id));
++
++ *SyncPoint = (gctSYNC_POINT)(gctUINTPTR_T)syncPoint->id;
++
++ gcmkFOOTER_ARG("*SyncPonint=%d", syncPoint->id);
++ return gcvSTATUS_OK;
++
++OnError:
++ if (syncPoint != gcvNULL)
++ {
++ kfree(syncPoint);
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckOS_ReferenceSyncPoint(
++ IN gckOS Os,
++ IN gctSYNC_POINT SyncPoint
++ )
++{
++ gceSTATUS status;
++ gcsSYNC_POINT_PTR syncPoint;
++
++ gcmkHEADER_ARG("Os=0x%X", Os);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(SyncPoint != gcvNULL);
++
++ gcmkONERROR(
++ _QueryIntegerId(&Os->syncPointDB,
++ (gctUINT32)(gctUINTPTR_T)SyncPoint,
++ (gctPOINTER)&syncPoint));
++
++ /* Initialize the sync point. */
++ atomic_inc(&syncPoint->ref);
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckOS_DestroySyncPoint(
++ IN gckOS Os,
++ IN gctSYNC_POINT SyncPoint
++ )
++{
++ gceSTATUS status;
++ gcsSYNC_POINT_PTR syncPoint;
++ gctBOOL acquired = gcvFALSE;
++
++ gcmkHEADER_ARG("Os=0x%X SyncPoint=%d", Os, (gctUINT32)(gctUINTPTR_T)SyncPoint);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(SyncPoint != gcvNULL);
++
++ gcmkONERROR(gckOS_AcquireMutex(Os, Os->syncPointMutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++ gcmkONERROR(
++ _QueryIntegerId(&Os->syncPointDB,
++ (gctUINT32)(gctUINTPTR_T)SyncPoint,
++ (gctPOINTER)&syncPoint));
++
++ gcmkASSERT(syncPoint->id == (gctUINT32)(gctUINTPTR_T)SyncPoint);
++
++ if (atomic_dec_and_test(&syncPoint->ref))
++ {
++ gcmkVERIFY_OK(_DestroyIntegerId(&Os->syncPointDB, syncPoint->id));
++
++ /* Free the sgianl. */
++ syncPoint->timeline = gcvNULL;
++ kfree(syncPoint);
++ }
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Os, Os->syncPointMutex));
++ acquired = gcvFALSE;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ /* Release the mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Os, Os->syncPointMutex));
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckOS_SignalSyncPoint(
++ IN gckOS Os,
++ IN gctSYNC_POINT SyncPoint
++ )
++{
++ gceSTATUS status;
++ gcsSYNC_POINT_PTR syncPoint;
++ gctBOOL acquired = gcvFALSE;
++
++ gcmkHEADER_ARG("Os=0x%X SyncPoint=%d", Os, (gctUINT32)(gctUINTPTR_T)SyncPoint);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(SyncPoint != gcvNULL);
++
++ gcmkONERROR(gckOS_AcquireMutex(Os, Os->syncPointMutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++ gcmkONERROR(
++ _QueryIntegerId(&Os->syncPointDB,
++ (gctUINT32)(gctUINTPTR_T)SyncPoint,
++ (gctPOINTER)&syncPoint));
++
++ gcmkASSERT(syncPoint->id == (gctUINT32)(gctUINTPTR_T)SyncPoint);
++
++ /* Get state. */
++ atomic_set(&syncPoint->state, gcvTRUE);
++
++ /* Signal timeline. */
++ if (syncPoint->timeline)
++ {
++ sync_timeline_signal(syncPoint->timeline);
++ }
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Os, Os->syncPointMutex));
++ acquired = gcvFALSE;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ /* Release the mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Os, Os->syncPointMutex));
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckOS_QuerySyncPoint(
++ IN gckOS Os,
++ IN gctSYNC_POINT SyncPoint,
++ OUT gctBOOL_PTR State
++ )
++{
++ gceSTATUS status;
++ gcsSYNC_POINT_PTR syncPoint;
++
++ gcmkHEADER_ARG("Os=0x%X SyncPoint=%d", Os, (gctUINT32)(gctUINTPTR_T)SyncPoint);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(SyncPoint != gcvNULL);
++
++ gcmkONERROR(
++ _QueryIntegerId(&Os->syncPointDB,
++ (gctUINT32)(gctUINTPTR_T)SyncPoint,
++ (gctPOINTER)&syncPoint));
++
++ gcmkASSERT(syncPoint->id == (gctUINT32)(gctUINTPTR_T)SyncPoint);
++
++ /* Get state. */
++ *State = atomic_read(&syncPoint->state);
++
++ /* Success. */
++ gcmkFOOTER_ARG("*State=%d", *State);
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckOS_CreateSyncTimeline(
++ IN gckOS Os,
++ OUT gctHANDLE * Timeline
++ )
++{
++ struct viv_sync_timeline * timeline;
++
++ /* Create viv sync timeline. */
++ timeline = viv_sync_timeline_create("viv timeline", Os);
++
++ if (timeline == gcvNULL)
++ {
++ /* Out of memory. */
++ return gcvSTATUS_OUT_OF_MEMORY;
++ }
++
++ *Timeline = (gctHANDLE) timeline;
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckOS_DestroySyncTimeline(
++ IN gckOS Os,
++ IN gctHANDLE Timeline
++ )
++{
++ struct viv_sync_timeline * timeline;
++ gcmkASSERT(Timeline != gcvNULL);
++
++ /* Destroy timeline. */
++ timeline = (struct viv_sync_timeline *) Timeline;
++ sync_timeline_destroy(&timeline->obj);
++
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckOS_CreateNativeFence(
++ IN gckOS Os,
++ IN gctHANDLE Timeline,
++ IN gctSYNC_POINT SyncPoint,
++ OUT gctINT * FenceFD
++ )
++{
++ int fd = -1;
++ struct viv_sync_timeline *timeline;
++ struct sync_pt * pt = gcvNULL;
++ struct sync_fence * fence;
++ char name[32];
++ gcsSYNC_POINT_PTR syncPoint;
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Os=0x%X Timeline=0x%X SyncPoint=%d",
++ Os, Timeline, (gctUINT)(gctUINTPTR_T)SyncPoint);
++
++ gcmkONERROR(
++ _QueryIntegerId(&Os->syncPointDB,
++ (gctUINT32)(gctUINTPTR_T)SyncPoint,
++ (gctPOINTER)&syncPoint));
++
++ /* Cast timeline. */
++ timeline = (struct viv_sync_timeline *) Timeline;
++
++ fd = get_unused_fd();
++
++ if (fd < 0)
++ {
++ /* Out of resources. */
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++
++ /* Create viv_sync_pt. */
++ pt = viv_sync_pt_create(timeline, SyncPoint);
++
++ if (pt == gcvNULL)
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ /* Reference sync_timeline. */
++ syncPoint->timeline = &timeline->obj;
++
++ /* Build fence name. */
++ snprintf(name, 32, "viv sync_fence-%u", (gctUINT)(gctUINTPTR_T)SyncPoint);
++
++ /* Create sync_fence. */
++ fence = sync_fence_create(name, pt);
++
++ if (fence == NULL)
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ /* Install fence to fd. */
++ sync_fence_install(fence, fd);
++
++ *FenceFD = fd;
++ gcmkFOOTER_ARG("*FenceFD=%d", fd);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Error roll back. */
++ if (pt)
++ {
++ sync_pt_free(pt);
++ }
++
++ if (fd > 0)
++ {
++ put_unused_fd(fd);
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++#endif
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v4/hal/os/linux/kernel/gc_hal_kernel_os.h linux-openelec/drivers/mxc/gpu-viv/v4/hal/os/linux/kernel/gc_hal_kernel_os.h
+--- linux-3.14.36/drivers/mxc/gpu-viv/v4/hal/os/linux/kernel/gc_hal_kernel_os.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v4/hal/os/linux/kernel/gc_hal_kernel_os.h 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,83 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_kernel_os_h_
++#define __gc_hal_kernel_os_h_
++
++typedef struct _LINUX_MDL_MAP
++{
++ gctINT pid;
++ gctPOINTER vmaAddr;
++ gctUINT32 count;
++ struct vm_area_struct * vma;
++ struct _LINUX_MDL_MAP * next;
++}
++LINUX_MDL_MAP;
++
++typedef struct _LINUX_MDL_MAP * PLINUX_MDL_MAP;
++
++typedef struct _LINUX_MDL
++{
++ gctINT pid;
++ char * addr;
++
++ union _pages
++ {
++ /* Pointer to a array of pages. */
++ struct page * contiguousPages;
++ /* Pointer to a array of pointers to page. */
++ struct page ** nonContiguousPages;
++ }
++ u;
++
++#ifdef NO_DMA_COHERENT
++ gctPOINTER kaddr;
++#endif /* NO_DMA_COHERENT */
++
++ gctINT numPages;
++ gctINT pagedMem;
++ gctBOOL contiguous;
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
++ gctBOOL exact;
++#endif
++ dma_addr_t dmaHandle;
++ PLINUX_MDL_MAP maps;
++ struct _LINUX_MDL * prev;
++ struct _LINUX_MDL * next;
++}
++LINUX_MDL, *PLINUX_MDL;
++
++extern PLINUX_MDL_MAP
++FindMdlMap(
++ IN PLINUX_MDL Mdl,
++ IN gctINT PID
++ );
++
++typedef struct _DRIVER_ARGS
++{
++ gctUINT64 InputBuffer;
++ gctUINT64 InputBufferSize;
++ gctUINT64 OutputBuffer;
++ gctUINT64 OutputBufferSize;
++}
++DRIVER_ARGS;
++
++#endif /* __gc_hal_kernel_os_h_ */
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v4/hal/os/linux/kernel/gc_hal_kernel_sync.c linux-openelec/drivers/mxc/gpu-viv/v4/hal/os/linux/kernel/gc_hal_kernel_sync.c
+--- linux-3.14.36/drivers/mxc/gpu-viv/v4/hal/os/linux/kernel/gc_hal_kernel_sync.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v4/hal/os/linux/kernel/gc_hal_kernel_sync.c 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,174 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include <linux/kernel.h>
++#include <linux/file.h>
++#include <linux/fs.h>
++#include <linux/miscdevice.h>
++#include <linux/module.h>
++#include <linux/syscalls.h>
++#include <linux/uaccess.h>
++
++#include "gc_hal_kernel_sync.h"
++
++#if gcdANDROID_NATIVE_FENCE_SYNC
++
++static struct sync_pt *
++viv_sync_pt_dup(
++ struct sync_pt * sync_pt
++ )
++{
++ gceSTATUS status;
++ struct viv_sync_pt *pt;
++ struct viv_sync_pt *src;
++ struct viv_sync_timeline *obj;
++
++ src = (struct viv_sync_pt *) sync_pt;
++ obj = (struct viv_sync_timeline *) sync_pt->parent;
++
++ /* Create the new sync_pt. */
++ pt = (struct viv_sync_pt *)
++ sync_pt_create(&obj->obj, sizeof(struct viv_sync_pt));
++
++ pt->stamp = src->stamp;
++ pt->sync = src->sync;
++
++ /* Reference sync point. */
++ status = gckOS_ReferenceSyncPoint(obj->os, pt->sync);
++
++ if (gcmIS_ERROR(status))
++ {
++ sync_pt_free((struct sync_pt *)pt);
++ return NULL;
++ }
++
++ return (struct sync_pt *)pt;
++}
++
++static int
++viv_sync_pt_has_signaled(
++ struct sync_pt * sync_pt
++ )
++{
++ gceSTATUS status;
++ gctBOOL state;
++ struct viv_sync_pt * pt;
++ struct viv_sync_timeline * obj;
++
++ pt = (struct viv_sync_pt *)sync_pt;
++ obj = (struct viv_sync_timeline *)sync_pt->parent;
++
++ status = gckOS_QuerySyncPoint(obj->os, pt->sync, &state);
++
++ if (gcmIS_ERROR(status))
++ {
++ /* Error. */
++ return -1;
++ }
++
++ return state;
++}
++
++static int
++viv_sync_pt_compare(
++ struct sync_pt * a,
++ struct sync_pt * b
++ )
++{
++ int ret;
++ struct viv_sync_pt * pt1 = (struct viv_sync_pt *) a;
++ struct viv_sync_pt * pt2 = (struct viv_sync_pt *) b;
++
++ ret = (pt1->stamp < pt2->stamp) ? -1
++ : (pt1->stamp == pt2->stamp) ? 0
++ : 1;
++
++ return ret;
++}
++
++static void
++viv_sync_pt_free(
++ struct sync_pt * sync_pt
++ )
++{
++ struct viv_sync_pt * pt;
++ struct viv_sync_timeline * obj;
++
++ pt = (struct viv_sync_pt *) sync_pt;
++ obj = (struct viv_sync_timeline *) sync_pt->parent;
++
++ gckOS_DestroySyncPoint(obj->os, pt->sync);
++}
++
++static struct sync_timeline_ops viv_timeline_ops =
++{
++ .driver_name = "viv_sync",
++ .dup = viv_sync_pt_dup,
++ .has_signaled = viv_sync_pt_has_signaled,
++ .compare = viv_sync_pt_compare,
++ .free_pt = viv_sync_pt_free,
++};
++
++struct viv_sync_timeline *
++viv_sync_timeline_create(
++ const char * name,
++ gckOS os
++ )
++{
++ struct viv_sync_timeline * obj;
++
++ obj = (struct viv_sync_timeline *)
++ sync_timeline_create(&viv_timeline_ops, sizeof(struct viv_sync_timeline), name);
++
++ obj->os = os;
++ obj->stamp = 0;
++
++ return obj;
++}
++
++struct sync_pt *
++viv_sync_pt_create(
++ struct viv_sync_timeline * obj,
++ gctSYNC_POINT SyncPoint
++ )
++{
++ gceSTATUS status;
++ struct viv_sync_pt * pt;
++
++ pt = (struct viv_sync_pt *)
++ sync_pt_create(&obj->obj, sizeof(struct viv_sync_pt));
++
++ pt->stamp = obj->stamp++;
++ pt->sync = SyncPoint;
++
++ /* Dup signal. */
++ status = gckOS_ReferenceSyncPoint(obj->os, SyncPoint);
++
++ if (gcmIS_ERROR(status))
++ {
++ sync_pt_free((struct sync_pt *)pt);
++ return NULL;
++ }
++
++ return (struct sync_pt *) pt;
++}
++
++#endif
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v4/hal/os/linux/kernel/gc_hal_kernel_sync.h linux-openelec/drivers/mxc/gpu-viv/v4/hal/os/linux/kernel/gc_hal_kernel_sync.h
+--- linux-3.14.36/drivers/mxc/gpu-viv/v4/hal/os/linux/kernel/gc_hal_kernel_sync.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v4/hal/os/linux/kernel/gc_hal_kernel_sync.h 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,71 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_kernel_sync_h_
++#define __gc_hal_kernel_sync_h_
++
++#include <linux/types.h>
++
++#include <linux/sync.h>
++
++#include <gc_hal.h>
++#include <gc_hal_base.h>
++
++struct viv_sync_timeline
++{
++ /* Parent object. */
++ struct sync_timeline obj;
++
++ /* Timestamp when sync_pt is created. */
++ gctUINT stamp;
++
++ /* Pointer to os struct. */
++ gckOS os;
++};
++
++
++struct viv_sync_pt
++{
++ /* Parent object. */
++ struct sync_pt pt;
++
++ /* Reference sync point*/
++ gctSYNC_POINT sync;
++
++ /* Timestamp when sync_pt is created. */
++ gctUINT stamp;
++};
++
++/* Create viv_sync_timeline object. */
++struct viv_sync_timeline *
++viv_sync_timeline_create(
++ const char * Name,
++ gckOS Os
++ );
++
++/* Create viv_sync_pt object. */
++struct sync_pt *
++viv_sync_pt_create(
++ struct viv_sync_timeline * Obj,
++ gctSYNC_POINT SyncPoint
++ );
++
++#endif /* __gc_hal_kernel_sync_h_ */
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v4/Kbuild linux-openelec/drivers/mxc/gpu-viv/v4/Kbuild
+--- linux-3.14.36/drivers/mxc/gpu-viv/v4/Kbuild 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v4/Kbuild 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,236 @@
++##############################################################################
++#
++# Copyright (C) 2005 - 2013 by Vivante Corp.
++#
++# This program is free software; you can redistribute it and/or modify
++# it under the terms of the GNU General Public License as published by
++# the Free Software Foundation; either version 2 of the license, or
++# (at your option) any later version.
++#
++# This program is distributed in the hope that it will be useful,
++# but WITHOUT ANY WARRANTY; without even the implied warranty of
++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++# GNU General Public License for more details.
++#
++# You should have received a copy of the GNU General Public License
++# along with this program; if not write to the Free Software
++# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++#
++##############################################################################
++
++
++#
++# Linux build file for kernel HAL driver.
++#
++
++AQROOT := $(srctree)/drivers/mxc/gpu-viv/v4
++AQARCH := $(AQROOT)/arch/XAQ2
++AQVGARCH := $(AQROOT)/arch/GC350
++
++include $(AQROOT)/config
++
++KERNEL_DIR ?= $(TOOL_DIR)/kernel
++
++OS_KERNEL_DIR := hal/os/linux/kernel
++ARCH_KERNEL_DIR := arch/$(notdir $(AQARCH))/hal/kernel
++ARCH_VG_KERNEL_DIR := arch/$(notdir $(AQVGARCH))/hal/kernel
++HAL_KERNEL_DIR := hal/kernel
++
++# EXTRA_CFLAGS += -Werror
++
++OBJS := $(OS_KERNEL_DIR)/gc_hal_kernel_device.o \
++ $(OS_KERNEL_DIR)/gc_hal_kernel_driver.o \
++ $(OS_KERNEL_DIR)/gc_hal_kernel_linux.o \
++ $(OS_KERNEL_DIR)/gc_hal_kernel_math.o \
++ $(OS_KERNEL_DIR)/gc_hal_kernel_os.o \
++ $(OS_KERNEL_DIR)/gc_hal_kernel_debugfs.o
++
++OBJS += $(HAL_KERNEL_DIR)/gc_hal_kernel.o \
++ $(HAL_KERNEL_DIR)/gc_hal_kernel_command.o \
++ $(HAL_KERNEL_DIR)/gc_hal_kernel_db.o \
++ $(HAL_KERNEL_DIR)/gc_hal_kernel_debug.o \
++ $(HAL_KERNEL_DIR)/gc_hal_kernel_event.o \
++ $(HAL_KERNEL_DIR)/gc_hal_kernel_heap.o \
++ $(HAL_KERNEL_DIR)/gc_hal_kernel_mmu.o \
++ $(HAL_KERNEL_DIR)/gc_hal_kernel_video_memory.o \
++ $(HAL_KERNEL_DIR)/gc_hal_kernel_power.o
++
++OBJS += $(ARCH_KERNEL_DIR)/gc_hal_kernel_context.o \
++ $(ARCH_KERNEL_DIR)/gc_hal_kernel_hardware.o
++
++ifeq ($(VIVANTE_ENABLE_VG), 1)
++OBJS +=\
++ $(HAL_KERNEL_DIR)/gc_hal_kernel_vg.o\
++ $(HAL_KERNEL_DIR)/gc_hal_kernel_command_vg.o\
++ $(HAL_KERNEL_DIR)/gc_hal_kernel_interrupt_vg.o\
++ $(HAL_KERNEL_DIR)/gc_hal_kernel_mmu_vg.o\
++ $(ARCH_VG_KERNEL_DIR)/gc_hal_kernel_hardware_command_vg.o\
++ $(ARCH_VG_KERNEL_DIR)/gc_hal_kernel_hardware_vg.o
++endif
++
++ifneq ($(CONFIG_SYNC),)
++OBJS += $(OS_KERNEL_DIR)/gc_hal_kernel_sync.o
++endif
++
++ifeq ($(KERNELRELEASE), )
++
++.PHONY: all clean install
++
++# Define targets.
++all:
++ @make V=$(V) ARCH=$(ARCH_TYPE) -C $(KERNEL_DIR) SUBDIRS=`pwd` modules
++
++clean:
++ @rm -rf $(OBJS)
++ @rm -rf modules.order Module.symvers
++ @find $(AQROOT) -name ".gc_*.cmd" | xargs rm -f
++
++install: all
++ @mkdir -p $(SDK_DIR)/drivers
++
++else
++
++
++EXTRA_CFLAGS += -DLINUX -DDRIVER
++
++ifeq ($(ENUM_WORKAROUND), 1)
++EXTRA_CFLAGS += -DENUM_WORKAROUND=1
++else
++EXTRA_CFLAGS += -DENUM_WORKAROUND=0
++endif
++
++ifeq ($(FLAREON),1)
++EXTRA_CFLAGS += -DFLAREON
++endif
++
++ifeq ($(DEBUG), 1)
++EXTRA_CFLAGS += -DDBG=1 -DDEBUG -D_DEBUG
++else
++EXTRA_CFLAGS += -DDBG=0
++endif
++
++ifeq ($(NO_DMA_COHERENT), 1)
++EXTRA_CFLAGS += -DNO_DMA_COHERENT
++endif
++
++ifeq ($(CONFIG_DOVE_GPU), 1)
++EXTRA_CFLAGS += -DCONFIG_DOVE_GPU=1
++endif
++
++ifneq ($(USE_PLATFORM_DRIVER), 0)
++EXTRA_CFLAGS += -DUSE_PLATFORM_DRIVER=1
++else
++EXTRA_CFLAGS += -DUSE_PLATFORM_DRIVER=0
++endif
++
++
++EXTRA_CFLAGS += -DVIVANTE_PROFILER=1
++EXTRA_CFLAGS += -DVIVANTE_PROFILER_CONTEXT=1
++
++
++ifeq ($(ANDROID), 1)
++EXTRA_CFLAGS += -DANDROID=1
++endif
++
++ifeq ($(ENABLE_GPU_CLOCK_BY_DRIVER), 1)
++EXTRA_CFLAGS += -DENABLE_GPU_CLOCK_BY_DRIVER=1
++else
++EXTRA_CFLAGS += -DENABLE_GPU_CLOCK_BY_DRIVER=0
++endif
++
++ifeq ($(USE_NEW_LINUX_SIGNAL), 1)
++EXTRA_CFLAGS += -DUSE_NEW_LINUX_SIGNAL=1
++else
++EXTRA_CFLAGS += -DUSE_NEW_LINUX_SIGNAL=0
++endif
++
++ifeq ($(NO_USER_DIRECT_ACCESS_FROM_KERNEL), 1)
++EXTRA_CFLAGS += -DNO_USER_DIRECT_ACCESS_FROM_KERNEL=1
++else
++EXTRA_CFLAGS += -DNO_USER_DIRECT_ACCESS_FROM_KERNEL=0
++endif
++
++ifeq ($(FORCE_ALL_VIDEO_MEMORY_CACHED), 1)
++EXTRA_CFLAGS += -DgcdPAGED_MEMORY_CACHEABLE=1
++else
++EXTRA_CFLAGS += -DgcdPAGED_MEMORY_CACHEABLE=0
++endif
++
++ifeq ($(NONPAGED_MEMORY_CACHEABLE), 1)
++EXTRA_CFLAGS += -DgcdNONPAGED_MEMORY_CACHEABLE=1
++else
++EXTRA_CFLAGS += -DgcdNONPAGED_MEMORY_CACHEABLE=0
++endif
++
++ifeq ($(NONPAGED_MEMORY_BUFFERABLE), 1)
++EXTRA_CFLAGS += -DgcdNONPAGED_MEMORY_BUFFERABLE=1
++else
++EXTRA_CFLAGS += -DgcdNONPAGED_MEMORY_BUFFERABLE=0
++endif
++
++ifeq ($(CACHE_FUNCTION_UNIMPLEMENTED), 1)
++EXTRA_CFLAGS += -DgcdCACHE_FUNCTION_UNIMPLEMENTED=1
++else
++EXTRA_CFLAGS += -DgcdCACHE_FUNCTION_UNIMPLEMENTED=0
++endif
++
++ifeq ($(SUPPORT_SWAP_RECTANGLE), 1)
++EXTRA_CFLAGS += -DgcdSUPPORT_SWAP_RECTANGLE=1
++else
++EXTRA_CFLAGS += -DgcdSUPPORT_SWAP_RECTANGLE=0
++endif
++
++ifeq ($(VIVANTE_ENABLE_VG), 1)
++EXTRA_CFLAGS += -DgcdENABLE_VG=1
++else
++EXTRA_CFLAGS += -DgcdENABLE_VG=0
++endif
++
++ifeq ($(CONFIG_SMP), y)
++EXTRA_CFLAGS += -DgcdSMP=1
++else
++EXTRA_CFLAGS += -DgcdSMP=0
++endif
++
++ifeq ($(VIVANTE_NO_3D),1)
++EXTRA_CFLAGS += -DVIVANTE_NO_3D
++endif
++
++ifeq ($(ENABLE_OUTER_CACHE_PATCH), 1)
++EXTRA_CFLAGS += -DgcdENABLE_OUTER_CACHE_PATCH=1
++else
++EXTRA_CFLAGS += -DgcdENABLE_OUTER_CACHE_PATCH=0
++endif
++
++ifeq ($(USE_BANK_ALIGNMENT), 1)
++ EXTRA_CFLAGS += -DgcdENABLE_BANK_ALIGNMENT=1
++ ifneq ($(BANK_BIT_START), 0)
++ ifneq ($(BANK_BIT_END), 0)
++ EXTRA_CFLAGS += -DgcdBANK_BIT_START=$(BANK_BIT_START)
++ EXTRA_CFLAGS += -DgcdBANK_BIT_END=$(BANK_BIT_END)
++ endif
++ endif
++
++ ifneq ($(BANK_CHANNEL_BIT), 0)
++ EXTRA_CFLAGS += -DgcdBANK_CHANNEL_BIT=$(BANK_CHANNEL_BIT)
++ endif
++endif
++
++ifneq ($(CONFIG_SYNC),)
++EXTRA_CFLAGS += -DgcdANDROID_NATIVE_FENCE_SYNC=1
++endif
++
++EXTRA_CFLAGS += -I$(AQROOT)/hal/kernel/inc
++EXTRA_CFLAGS += -I$(AQROOT)/hal/kernel
++EXTRA_CFLAGS += -I$(AQARCH)/hal/kernel
++EXTRA_CFLAGS += -I$(AQROOT)/hal/os/linux/kernel
++
++ifeq ($(VIVANTE_ENABLE_VG), 1)
++EXTRA_CFLAGS += -I$(AQVGARCH)/hal/kernel
++endif
++
++obj-$(CONFIG_MXC_GPU_VIV) += galcore.o
++
++galcore-objs := $(OBJS)
++
++endif
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v5/config linux-openelec/drivers/mxc/gpu-viv/v5/config
+--- linux-3.14.36/drivers/mxc/gpu-viv/v5/config 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v5/config 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,36 @@
++##############################################################################
++#
++# Copyright (C) 2005 - 2014 by Vivante Corp.
++#
++# This program is free software; you can redistribute it and/or modify
++# it under the terms of the GNU General Public License as published by
++# the Free Software Foundation; either version 2 of the license, or
++# (at your option) any later version.
++#
++# This program is distributed in the hope that it will be useful,
++# but WITHOUT ANY WARRANTY; without even the implied warranty of
++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++# GNU General Public License for more details.
++#
++# You should have received a copy of the GNU General Public License
++# along with this program; if not write to the Free Software
++# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++#
++##############################################################################
++
++
++ARCH_TYPE ?= arm
++SDK_DIR ?= $(AQROOT)/build/sdk
++VIVANTE_ENABLE_3D ?= 1
++VIVANTE_ENABLE_2D ?= 1
++VIVANTE_ENABLE_VG ?= 1
++FORCE_ALL_VIDEO_MEMORY_CACHED ?= 0
++NONPAGED_MEMORY_CACHEABLE ?= 0
++NONPAGED_MEMORY_BUFFERABLE ?= 1
++CACHE_FUNCTION_UNIMPLEMENTED ?= 0
++ENABLE_OUTER_CACHE_PATCH ?= 1
++USE_BANK_ALIGNMENT ?= 1
++BANK_BIT_START ?= 13
++BANK_BIT_END ?= 15
++BANK_CHANNEL_BIT ?= 12
++PLATFORM ?= freescale/gc_hal_kernel_platform_imx6q14
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/kernel/arch/gc_hal_kernel_context.c linux-openelec/drivers/mxc/gpu-viv/v5/hal/kernel/arch/gc_hal_kernel_context.c
+--- linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/kernel/arch/gc_hal_kernel_context.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v5/hal/kernel/arch/gc_hal_kernel_context.c 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,2317 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal.h"
++#include "gc_hal_kernel.h"
++#include "gc_hal_kernel_context.h"
++#include "gc_hal_kernel_buffer.h"
++
++/******************************************************************************\
++******************************** Debugging Macro *******************************
++\******************************************************************************/
++
++/* Zone used for header/footer. */
++#define _GC_OBJ_ZONE gcvZONE_HARDWARE
++
++
++/******************************************************************************\
++************************** Context State Buffer Helpers ************************
++\******************************************************************************/
++
++#define _STATE(reg) \
++ _State(\
++ Context, index, \
++ reg ## _Address >> 2, \
++ reg ## _ResetValue, \
++ reg ## _Count, \
++ gcvFALSE, gcvFALSE \
++ )
++
++#define _STATE_COUNT(reg, count) \
++ _State(\
++ Context, index, \
++ reg ## _Address >> 2, \
++ reg ## _ResetValue, \
++ count, \
++ gcvFALSE, gcvFALSE \
++ )
++
++#define _STATE_COUNT_OFFSET(reg, offset, count) \
++ _State(\
++ Context, index, \
++ (reg ## _Address >> 2) + offset, \
++ reg ## _ResetValue, \
++ count, \
++ gcvFALSE, gcvFALSE \
++ )
++
++#define _STATE_MIRROR_COUNT(reg, mirror, count) \
++ _StateMirror(\
++ Context, \
++ reg ## _Address >> 2, \
++ count, \
++ mirror ## _Address >> 2 \
++ )
++
++#define _STATE_HINT(reg) \
++ _State(\
++ Context, index, \
++ reg ## _Address >> 2, \
++ reg ## _ResetValue, \
++ reg ## _Count, \
++ gcvFALSE, gcvTRUE \
++ )
++
++#define _STATE_HINT_BLOCK(reg, block, count) \
++ _State(\
++ Context, index, \
++ (reg ## _Address >> 2) + (block << reg ## _BLK), \
++ reg ## _ResetValue, \
++ count, \
++ gcvFALSE, gcvTRUE \
++ )
++
++#define _STATE_COUNT_OFFSET_HINT(reg, offset, count) \
++ _State(\
++ Context, index, \
++ (reg ## _Address >> 2) + offset, \
++ reg ## _ResetValue, \
++ count, \
++ gcvFALSE, gcvTRUE \
++ )
++
++#define _STATE_X(reg) \
++ _State(\
++ Context, index, \
++ reg ## _Address >> 2, \
++ reg ## _ResetValue, \
++ reg ## _Count, \
++ gcvTRUE, gcvFALSE \
++ )
++
++#define _STATE_INIT_VALUE(reg, value) \
++ _State(\
++ Context, index, \
++ reg ## _Address >> 2, \
++ value, \
++ reg ## _Count, \
++ gcvFALSE, gcvFALSE \
++ )
++
++#define _CLOSE_RANGE() \
++ _TerminateStateBlock(Context, index)
++
++#define _ENABLE(reg, field) \
++ do \
++ { \
++ if (gcmVERIFYFIELDVALUE(data, reg, MASK_ ## field, ENABLED)) \
++ { \
++ enable |= gcmFIELDMASK(reg, field); \
++ } \
++ } \
++ while (gcvFALSE)
++
++#define _BLOCK_COUNT(reg) \
++ ((reg ## _Count) >> (reg ## _BLK))
++
++
++/******************************************************************************\
++*********************** Support Functions and Definitions **********************
++\******************************************************************************/
++
++#define gcdSTATE_MASK \
++ (((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x03 | 0xC0FFEE & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))))
++
++#if gcdENABLE_3D
++static gctUINT32
++_TerminateStateBlock(
++ IN gckCONTEXT Context,
++ IN gctUINT32 Index
++ )
++{
++ gctUINT32_PTR buffer;
++ gctUINT32 align;
++
++ /* Determine if we need alignment. */
++ align = (Index & 1) ? 1 : 0;
++
++ /* Address correct index. */
++ buffer = (Context->buffer == gcvNULL)
++ ? gcvNULL
++ : Context->buffer->logical;
++
++ /* Flush the current state block; make sure no pairing with the states
++ to follow happens. */
++ if (align && (buffer != gcvNULL))
++ {
++ buffer[Index] = 0xDEADDEAD;
++ }
++
++ /* Reset last address. */
++ Context->lastAddress = ~0U;
++
++ /* Return alignment requirement. */
++ return align;
++}
++#endif
++
++
++#if (gcdENABLE_3D || gcdENABLE_2D)
++static gctUINT32
++_FlushPipe(
++ IN gckCONTEXT Context,
++ IN gctUINT32 Index,
++ IN gcePIPE_SELECT Pipe
++ )
++{
++ gctBOOL fcFlushStall;
++ gctUINT32 flushSlots;
++ gctBOOL iCacheInvalidate;
++
++ fcFlushStall
++ = gckHARDWARE_IsFeatureAvailable(Context->hardware, gcvFEATURE_FC_FLUSH_STALL);
++
++ iCacheInvalidate
++ = ((((gctUINT32) (Context->hardware->identity.chipMinorFeatures3)) >> (0 ? 3:3) & ((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1)))))));
++
++ flushSlots = 6;
++
++ if (fcFlushStall)
++ {
++ /* Flush tile status cache. */
++ flushSlots += 6;
++ }
++
++ if (iCacheInvalidate)
++ {
++ flushSlots += 12;
++ }
++
++ if (Context->buffer != gcvNULL)
++ {
++ gctUINT32_PTR buffer;
++
++ /* Address correct index. */
++ buffer = Context->buffer->logical + Index;
++
++ /* Flush the current pipe. */
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E03) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ *buffer++
++ = (Pipe == gcvPIPE_2D)
++ ? ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3)))
++ : ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1))))))) << (0 ? 2:2))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1))))))) << (0 ? 2:2)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5)));
++
++ /* Semaphore from FE to PE. */
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E02) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));
++
++ /* Stall from FE to PE. */
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x09 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)));
++
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));
++
++ if (fcFlushStall)
++ {
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0594) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)));
++
++ /* Semaphore from FE to PE. */
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E02) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));
++
++ /* Stall from FE to PE. */
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x09 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)));
++
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));
++ }
++
++ if (iCacheInvalidate)
++ {
++ /* Invalidate I$ after pipe is stalled */
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0218) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)));
++
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x021A) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4)));
++
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0218) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)));
++
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x021A) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5)));
++
++ /* Semaphore from FE to PE. */
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E02) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));
++
++ /* Stall from FE to PE. */
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x09 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)));
++
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));
++ }
++ }
++
++ /* Number of slots taken by flushing pipe. */
++ return flushSlots;
++}
++#endif
++
++#if gcdENABLE_3D
++static gctUINT32
++_SemaphoreStall(
++ IN gckCONTEXT Context,
++ IN gctUINT32 Index
++ )
++{
++ if (Context->buffer != gcvNULL)
++ {
++ gctUINT32_PTR buffer;
++
++ /* Address correct index. */
++ buffer = Context->buffer->logical + Index;
++
++ /* Semaphore from FE to PE. */
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E02) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));
++
++ /* Stall from FE to PE. */
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x09 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)));
++
++ *buffer
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));
++ }
++
++ /* Semaphore/stall takes 4 slots. */
++ return 4;
++}
++#endif
++
++#if (gcdENABLE_3D || gcdENABLE_2D)
++static gctUINT32
++_SwitchPipe(
++ IN gckCONTEXT Context,
++ IN gctUINT32 Index,
++ IN gcePIPE_SELECT Pipe
++ )
++{
++ gctUINT32 slots = 6;
++
++ if (Context->buffer != gcvNULL)
++ {
++ gctUINT32_PTR buffer;
++
++ /* Address correct index. */
++ buffer = Context->buffer->logical + Index;
++
++ /* LoadState(AQPipeSelect, 1), pipe. */
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E00) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ *buffer++
++ = (Pipe == gcvPIPE_2D)
++ ? 0x1
++ : 0x0;
++
++ /* Semaphore from FE to PE. */
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E02) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));
++
++ /* Stall from FE to PE. */
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x09 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)));
++
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));
++ }
++
++ Context->pipeSelectBytes = slots * gcmSIZEOF(gctUINT32);
++
++ return slots;
++}
++#endif
++
++#if gcdENABLE_3D
++static gctUINT32
++_State(
++ IN gckCONTEXT Context,
++ IN gctUINT32 Index,
++ IN gctUINT32 Address,
++ IN gctUINT32 Value,
++ IN gctUINT32 Size,
++ IN gctBOOL FixedPoint,
++ IN gctBOOL Hinted
++ )
++{
++ gctUINT32_PTR buffer;
++ gctUINT32 align;
++ gctUINT32 i;
++
++ /* Determine if we need alignment. */
++ align = (Index & 1) ? 1 : 0;
++
++ /* Address correct index. */
++ buffer = (Context->buffer == gcvNULL)
++ ? gcvNULL
++ : Context->buffer->logical;
++
++ if ((buffer == gcvNULL) && (Address + Size > Context->stateCount))
++ {
++ /* Determine maximum state. */
++ Context->stateCount = Address + Size;
++ }
++
++ /* Do we need a new entry? */
++ if ((Address != Context->lastAddress) || (FixedPoint != Context->lastFixed))
++ {
++ if (buffer != gcvNULL)
++ {
++ if (align)
++ {
++ /* Add filler. */
++ buffer[Index++] = 0xDEADDEAD;
++ }
++
++ /* LoadState(Address, Count). */
++ gcmkASSERT((Index & 1) == 0);
++
++ if (FixedPoint)
++ {
++ buffer[Index]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 26:26) - (0 ? 26:26) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 26:26) - (0 ? 26:26) + 1))))))) << (0 ? 26:26))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 26:26) - (0 ? 26:26) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 26:26) - (0 ? 26:26) + 1))))))) << (0 ? 26:26)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (Size) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (Address) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++ }
++ else
++ {
++ buffer[Index]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 26:26) - (0 ? 26:26) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 26:26) - (0 ? 26:26) + 1))))))) << (0 ? 26:26))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? 26:26) - (0 ? 26:26) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 26:26) - (0 ? 26:26) + 1))))))) << (0 ? 26:26)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (Size) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (Address) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++ }
++
++ /* Walk all the states. */
++ for (i = 0; i < (gctUINT32)Size; i += 1)
++ {
++ /* Set state to uninitialized value. */
++ buffer[Index + 1 + i] = Value;
++
++ /* Set index in state mapping table. */
++ Context->map[Address + i].index = (gctUINT)Index + 1 + i;
++
++#if gcdSECURE_USER
++ /* Save hint. */
++ if (Context->hint != gcvNULL)
++ {
++ Context->hint[Address + i] = Hinted;
++ }
++#endif
++ }
++ }
++
++ /* Save information for this LoadState. */
++ Context->lastIndex = (gctUINT)Index;
++ Context->lastAddress = Address + (gctUINT32)Size;
++ Context->lastSize = Size;
++ Context->lastFixed = FixedPoint;
++
++ /* Return size for load state. */
++ return align + 1 + Size;
++ }
++
++ /* Append this state to the previous one. */
++ if (buffer != gcvNULL)
++ {
++ /* Update last load state. */
++ buffer[Context->lastIndex] =
++ ((((gctUINT32) (buffer[Context->lastIndex])) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (Context->lastSize + Size) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ /* Walk all the states. */
++ for (i = 0; i < (gctUINT32)Size; i += 1)
++ {
++ /* Set state to uninitialized value. */
++ buffer[Index + i] = Value;
++
++ /* Set index in state mapping table. */
++ Context->map[Address + i].index = (gctUINT)Index + i;
++
++#if gcdSECURE_USER
++ /* Save hint. */
++ if (Context->hint != gcvNULL)
++ {
++ Context->hint[Address + i] = Hinted;
++ }
++#endif
++ }
++ }
++
++ /* Update last address and size. */
++ Context->lastAddress += (gctUINT32)Size;
++ Context->lastSize += Size;
++
++ /* Return number of slots required. */
++ return Size;
++}
++
++static gctUINT32
++_StateMirror(
++ IN gckCONTEXT Context,
++ IN gctUINT32 Address,
++ IN gctUINT32 Size,
++ IN gctUINT32 AddressMirror
++ )
++{
++ gctUINT32 i;
++
++ /* Process when buffer is set. */
++ if (Context->buffer != gcvNULL)
++ {
++ /* Walk all states. */
++ for (i = 0; i < Size; i++)
++ {
++ /* Copy the mapping address. */
++ Context->map[Address + i].index =
++ Context->map[AddressMirror + i].index;
++ }
++ }
++
++ /* Return the number of required maps. */
++ return Size;
++}
++#endif
++
++#if (gcdENABLE_3D || gcdENABLE_2D)
++static gceSTATUS
++_InitializeContextBuffer(
++ IN gckCONTEXT Context
++ )
++{
++ gctUINT32_PTR buffer;
++ gctUINT32 index;
++
++#if gcdENABLE_3D
++ gctBOOL halti0, halti1, halti2, halti3;
++ gctUINT i;
++ gctUINT vertexUniforms, fragmentUniforms, vsConstBase, psConstBase, constMax;
++ gctBOOL unifiedUniform;
++ gctUINT fe2vsCount;
++#endif
++
++ /* Reset the buffer index. */
++ index = 0;
++
++ /* Reset the last state address. */
++ Context->lastAddress = ~0U;
++
++ /* Get the buffer pointer. */
++ buffer = (Context->buffer == gcvNULL)
++ ? gcvNULL
++ : Context->buffer->logical;
++
++
++ /**************************************************************************/
++ /* Build 2D states. *******************************************************/
++
++
++#if gcdENABLE_3D
++ /**************************************************************************/
++ /* Build 3D states. *******************************************************/
++
++ halti0 = (((((gctUINT32) (Context->hardware->identity.chipMinorFeatures1)) >> (0 ? 23:23)) & ((gctUINT32) ((((1 ? 23:23) - (0 ? 23:23) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:23) - (0 ? 23:23) + 1)))))) );
++ halti1 = (((((gctUINT32) (Context->hardware->identity.chipMinorFeatures2)) >> (0 ? 11:11)) & ((gctUINT32) ((((1 ? 11:11) - (0 ? 11:11) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 11:11) - (0 ? 11:11) + 1)))))) );
++ halti2 = (((((gctUINT32) (Context->hardware->identity.chipMinorFeatures4)) >> (0 ? 16:16)) & ((gctUINT32) ((((1 ? 16:16) - (0 ? 16:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 16:16) - (0 ? 16:16) + 1)))))) );
++ halti3 = (((((gctUINT32) (Context->hardware->identity.chipMinorFeatures5)) >> (0 ? 9:9)) & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1)))))) );
++
++ /* Query how many uniforms can support for non-unified uniform mode. */
++ {if (Context->hardware->identity.numConstants > 256){ unifiedUniform = gcvTRUE; vsConstBase = 0xC000; psConstBase = 0xC000; constMax = Context->hardware->identity.numConstants; vertexUniforms = 256; fragmentUniforms = constMax - vertexUniforms;}else if (Context->hardware->identity.numConstants == 256){ if (Context->hardware->identity.chipModel == gcv2000 && Context->hardware->identity.chipRevision == 0x5118) { unifiedUniform = gcvFALSE; vsConstBase = 0x1400; psConstBase = 0x1C00; vertexUniforms = 256; fragmentUniforms = 64; constMax = 320; } else { unifiedUniform = gcvFALSE; vsConstBase = 0x1400; psConstBase = 0x1C00; vertexUniforms = 256; fragmentUniforms = 256; constMax = 512; }}else{ unifiedUniform = gcvFALSE; vsConstBase = 0x1400; psConstBase = 0x1C00; vertexUniforms = 168; fragmentUniforms = 64; constMax = 232;}};
++
++#if !gcdENABLE_UNIFIED_CONSTANT
++ if (Context->hardware->identity.numConstants > 256)
++ {
++ unifiedUniform = gcvTRUE;
++ }
++ else
++ {
++ unifiedUniform = gcvFALSE;
++ }
++#endif
++
++ /* Store the 3D entry index. */
++ Context->entryOffset3D = (gctUINT)index * gcmSIZEOF(gctUINT32);
++
++ /* Switch to 3D pipe. */
++ index += _SwitchPipe(Context, index, gcvPIPE_3D);
++
++ /* Current context pointer. */
++#if gcdDEBUG
++ index += _State(Context, index, 0x03850 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++#endif
++
++ index += _FlushPipe(Context, index, gcvPIPE_3D);
++
++ /* Global states. */
++ index += _State(Context, index, 0x03814 >> 2, 0x00000001, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x03818 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x0381C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x03820 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x03828 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x0382C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x03834 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x03838 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x03854 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x0384C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++
++ /* Front End states. */
++ fe2vsCount = 12;
++ if (halti0)
++ {
++ fe2vsCount = 16;
++ }
++ index += _State(Context, index, 0x00600 >> 2, 0x00000000, fe2vsCount, gcvFALSE, gcvFALSE);
++ index += _CLOSE_RANGE();
++
++ index += _State(Context, index, 0x00644 >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x00648 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x0064C >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x00650 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00680 >> 2, 0x00000000, 8, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x006A0 >> 2, 0x00000000, 8, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00674 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00670 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00678 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x0067C >> 2, 0xFFFFFFFF, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x006C0 >> 2, 0x00000000, 16, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00700 >> 2, 0x00000000, 16, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00740 >> 2, 0x00000000, 16, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00780 >> 2, 0x3F800000, 16, gcvFALSE, gcvFALSE);
++
++ if (halti2)
++ {
++ index += _State(Context, index, 0x14600 >> 2, 0x00000000, 16, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x14640 >> 2, 0x00000000, 16, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x14680 >> 2, 0x00000000, 16, gcvFALSE, gcvFALSE);
++ }
++
++ /* This register is programed by all chips, which program all DECODE_SELECT as VS
++ ** except SAMPLER_DECODE_SELECT.
++ */
++ index += _State(Context, index, 0x00860 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++
++ if (((((gctUINT32) (Context->hardware->identity.chipMinorFeatures3)) >> (0 ? 3:3) & ((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))))
++ {
++ /* I-Cache states. */
++ index += _State(Context, index, 0x00868 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x0086C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x0304C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01028 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _CLOSE_RANGE();
++
++ if (halti3)
++ {
++ index += _State(Context, index, 0x00890 >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x0104C >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE);
++ index += _CLOSE_RANGE();
++ }
++ }
++
++ /* Vertex Shader states. */
++ index += _State(Context, index, 0x00804 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00808 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x0080C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00810 >> 2, 0x00000000, 4, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00820 >> 2, 0x00000000, 4, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00830 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++
++ index += _CLOSE_RANGE();
++
++ /* Primitive Assembly states. */
++ index += _State(Context, index, 0x00A00 >> 2, 0x00000000, 1, gcvTRUE, gcvFALSE);
++ index += _State(Context, index, 0x00A04 >> 2, 0x00000000, 1, gcvTRUE, gcvFALSE);
++ index += _State(Context, index, 0x00A08 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00A0C >> 2, 0x00000000, 1, gcvTRUE, gcvFALSE);
++ index += _State(Context, index, 0x00A10 >> 2, 0x00000000, 1, gcvTRUE, gcvFALSE);
++ index += _State(Context, index, 0x00A14 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00A18 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00A1C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00A28 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00A2C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00A30 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00A40 >> 2, 0x00000000, Context->hardware->identity.varyingsCount, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00A34 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00A38 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00A3C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00A80 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00A84 >> 2, 0x00000000, 1, gcvTRUE, gcvFALSE);
++ index += _State(Context, index, 0x00A8C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00A88 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++
++#if gcdMULTI_GPU
++ index += _State(Context, index, 0x03A00 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x03A04 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x03A08 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++#endif
++ /* Setup states. */
++ index += _State(Context, index, 0x00C00 >> 2, 0x00000000, 1, gcvTRUE, gcvFALSE);
++ index += _State(Context, index, 0x00C04 >> 2, 0x00000000, 1, gcvTRUE, gcvFALSE);
++ index += _State(Context, index, 0x00C08 >> 2, 0x45000000, 1, gcvTRUE, gcvFALSE);
++ index += _State(Context, index, 0x00C0C >> 2, 0x45000000, 1, gcvTRUE, gcvFALSE);
++ index += _State(Context, index, 0x00C10 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00C14 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00C18 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00C1C >> 2, 0x42000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00C20 >> 2, 0x00000000, 1, gcvTRUE, gcvFALSE);
++ index += _State(Context, index, 0x00C24 >> 2, 0x00000000, 1, gcvTRUE, gcvFALSE);
++
++ /* Raster states. */
++ index += _State(Context, index, 0x00E00 >> 2, 0x00000001, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00E10 >> 2, 0x00000000, 4, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00E04 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00E40 >> 2, 0x00000000, 16, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00E08 >> 2, 0x00000031, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00E24 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00E20 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++
++ if (halti2)
++ {
++ index += _State(Context, index, 0x00E0C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ }
++
++ /* Pixel Shader states. */
++ index += _State(Context, index, 0x01004 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01008 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x0100C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01010 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01030 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++
++ index += _CLOSE_RANGE();
++
++ /* Texture states. */
++ index += _State(Context, index, 0x02000 >> 2, 0x00000000, 12, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x02040 >> 2, 0x00000000, 12, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x02080 >> 2, 0x00000000, 12, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x020C0 >> 2, 0x00000000, 12, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x02100 >> 2, 0x00000000, 12, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x02140 >> 2, 0x00000000, 12, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x02180 >> 2, 0x00000000, 12, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x021C0 >> 2, 0x00321000, 12, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x02200 >> 2, 0x00000000, 12, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x02240 >> 2, 0x00000000, 12, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, (0x02400 >> 2) + (0 << 4), 0x00000000, 12, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, (0x02440 >> 2) + (0 << 4), 0x00000000, 12, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, (0x02480 >> 2) + (0 << 4), 0x00000000, 12, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, (0x024C0 >> 2) + (0 << 4), 0x00000000, 12, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, (0x02500 >> 2) + (0 << 4), 0x00000000, 12, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, (0x02540 >> 2) + (0 << 4), 0x00000000, 12, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, (0x02580 >> 2) + (0 << 4), 0x00000000, 12, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, (0x025C0 >> 2) + (0 << 4), 0x00000000, 12, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, (0x02600 >> 2) + (0 << 4), 0x00000000, 12, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, (0x02640 >> 2) + (0 << 4), 0x00000000, 12, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, (0x02680 >> 2) + (0 << 4), 0x00000000, 12, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, (0x026C0 >> 2) + (0 << 4), 0x00000000, 12, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, (0x02700 >> 2) + (0 << 4), 0x00000000, 12, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, (0x02740 >> 2) + (0 << 4), 0x00000000, 12, gcvFALSE, gcvTRUE);
++ index += _CLOSE_RANGE();
++
++ if ((((((gctUINT32) (Context->hardware->identity.chipMinorFeatures1)) >> (0 ? 22:22)) & ((gctUINT32) ((((1 ? 22:22) - (0 ? 22:22) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 22:22) - (0 ? 22:22) + 1)))))) ))
++ {
++ /*
++ * Linear stride LODn will overwrite LOD0 on GC880,GC2000.
++ * And only LOD0 is valid for this register.
++ */
++ gctUINT count = halti1 ? 14 : 1;
++
++ for (i = 0; i < 12; i += 1)
++ {
++ index += _State(Context, index, (0x02C00 >> 2) + i * 16, 0x00000000, count, gcvFALSE, gcvFALSE);
++ }
++ }
++
++ if (halti1)
++ {
++ gctUINT texBlockCount;
++ gctUINT gcregTXLogSizeResetValue;
++
++ /* Enable the integer filter pipe for all texture samplers
++ so that the floating point filter clock will shut off until
++ we start using the floating point filter.
++ */
++ gcregTXLogSizeResetValue = ((((gctUINT32) (0x00000000)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 29:29) - (0 ? 29:29) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 29:29) - (0 ? 29:29) + 1))))))) << (0 ? 29:29))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 29:29) - (0 ? 29:29) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 29:29) - (0 ? 29:29) + 1))))))) << (0 ? 29:29)));
++
++ /* New texture block. */
++ index += _State(Context, index, 0x10000 >> 2, 0x00000000, 32, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x10080 >> 2, 0x00000000, 32, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x10100 >> 2, gcregTXLogSizeResetValue, 32, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x10180 >> 2, 0x00000000, 32, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x10200 >> 2, 0x00000000, 32, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x10280 >> 2, 0x00000000, 32, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x10300 >> 2, 0x00000000, 32, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x10380 >> 2, 0x00321000, 32, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x10400 >> 2, 0x00000000, 32, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x10480 >> 2, 0x00000000, 32, gcvFALSE, gcvFALSE);
++
++ if ((((((gctUINT32) (Context->hardware->identity.chipMinorFeatures2)) >> (0 ? 15:15)) & ((gctUINT32) ((((1 ? 15:15) - (0 ? 15:15) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:15) - (0 ? 15:15) + 1)))))) ))
++ {
++ index += _State(Context, index, 0x12000 >> 2, 0x00000000, 256, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x12400 >> 2, 0x00000000, 256, gcvFALSE, gcvFALSE);
++ }
++
++ texBlockCount = ((512) >> (4));
++
++ for (i = 0; i < texBlockCount; i += 1)
++ {
++ index += _State(Context, index, (0x10800 >> 2) + (i << 4), 0x00000000, 14, gcvFALSE, gcvTRUE);
++ }
++ }
++
++ if (halti2)
++ {
++ index += _State(Context, index, 0x10700 >> 2, 0x00000F00, 32, gcvFALSE, gcvFALSE);
++ }
++
++ if (halti3)
++ {
++ index += _State(Context, index, 0x10780 >> 2, 0x00030000, 32, gcvFALSE, gcvFALSE);
++ }
++
++ /* ASTC */
++ if ((((((gctUINT32) (Context->hardware->identity.chipMinorFeatures4)) >> (0 ? 13:13)) & ((gctUINT32) ((((1 ? 13:13) - (0 ? 13:13) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 13:13) - (0 ? 13:13) + 1)))))) ))
++ {
++ index += _State(Context, index, 0x10500 >> 2, 0x00000000, 32, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x10580 >> 2, 0x00000000, 32, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x10600 >> 2, 0x00000000, 32, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x10680 >> 2, 0x00000000, 32, gcvFALSE, gcvFALSE);
++ }
++
++ /* YUV. */
++ index += _State(Context, index, 0x01678 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x0167C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01680 >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x01684 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01688 >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x0168C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01690 >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x01694 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01698 >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x0169C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _CLOSE_RANGE();
++
++ /* Thread walker states. */
++ index += _State(Context, index, 0x00900 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00904 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00908 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x0090C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00910 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00914 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00918 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x0091C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00924 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++
++ if (((((gctUINT32) (Context->hardware->identity.chipMinorFeatures3)) >> (0 ? 21:21) & ((gctUINT32) ((((1 ? 21:21) - (0 ? 21:21) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 21:21) - (0 ? 21:21) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 21:21) - (0 ? 21:21) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 21:21) - (0 ? 21:21) + 1))))))))
++ {
++ index += _State(Context, index, 0x00940 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00944 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00948 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x0094C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00950 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00954 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ }
++
++ index += _CLOSE_RANGE();
++
++ if (!halti3)
++ {
++ if (Context->hardware->identity.instructionCount > 1024)
++ {
++ /* New Shader instruction PC registers. */
++ index += _State(Context, index, 0x0085C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x0101C >> 2, 0x00000100, 1, gcvFALSE, gcvFALSE);
++ index += _CLOSE_RANGE();
++
++ for (i = 0;
++ i < Context->hardware->identity.instructionCount << 2;
++ i += 256 << 2
++ )
++ {
++ index += _State(Context, index, (0x20000 >> 2) + i, 0x00000000, 256 << 2, gcvFALSE, gcvFALSE);
++ index += _CLOSE_RANGE();
++ }
++ }
++ else if (Context->hardware->identity.instructionCount > 256)
++ {
++ /* New Shader instruction PC registers. */
++ index += _State(Context, index, 0x0085C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x0101C >> 2, 0x00000100, 1, gcvFALSE, gcvFALSE);
++ index += _CLOSE_RANGE();
++
++ /* VX instruction memory. */
++ for (i = 0;
++ i < Context->hardware->identity.instructionCount << 2;
++ i += 256 << 2
++ )
++ {
++ index += _State(Context, index, (0x0C000 >> 2) + i, 0x00000000, 256 << 2, gcvFALSE, gcvFALSE);
++ index += _CLOSE_RANGE();
++ }
++
++ _StateMirror(Context, 0x08000 >> 2, Context->hardware->identity.instructionCount << 2 , 0x0C000 >> 2);
++ }
++ else /* if (Context->hardware->identity.instructionCount <= 256) */
++ {
++ /* old shader instruction PC registers */
++ index += _State(Context, index, 0x00800 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00838 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _CLOSE_RANGE();
++
++ index += _State(Context, index, 0x01000 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01018 >> 2, 0x01000000, 1, gcvFALSE, gcvFALSE);
++ index += _CLOSE_RANGE();
++
++ index += _State(Context, index, 0x04000 >> 2, 0x00000000, 1024, gcvFALSE, gcvFALSE);
++ index += _CLOSE_RANGE();
++ index += _State(Context, index, 0x06000 >> 2, 0x00000000, 1024, gcvFALSE, gcvFALSE);
++ index += _CLOSE_RANGE();
++ }
++ }
++ /* I cache use the new instruction PC registers */
++ else
++ {
++ /* New Shader instruction PC registers. */
++ index += _State(Context, index, 0x0085C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x0101C >> 2, 0x00000100, 1, gcvFALSE, gcvFALSE);
++ index += _CLOSE_RANGE();
++ }
++
++ if (unifiedUniform)
++ {
++ gctINT numConstants = Context->hardware->identity.numConstants;
++
++ index += _State(Context, index, 0x01024 >> 2, 0x00000100, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00864 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _CLOSE_RANGE();
++
++ for (i = 0;
++ numConstants > 0;
++ i += 256 << 2,
++ numConstants -= 256
++ )
++ {
++ if (numConstants >= 256)
++ {
++ index += _State(Context, index, (0x30000 >> 2) + i, 0x00000000, 256 << 2, gcvFALSE, gcvFALSE);
++ }
++ else
++ {
++ index += _State(Context, index, (0x30000 >> 2) + i, 0x00000000, numConstants << 2, gcvFALSE, gcvFALSE);
++ }
++ index += _CLOSE_RANGE();
++ }
++ }
++#if gcdENABLE_UNIFIED_CONSTANT
++ else
++#endif
++ {
++ index += _State(Context, index, 0x05000 >> 2, 0x00000000, vertexUniforms * 4, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x07000 >> 2, 0x00000000, fragmentUniforms * 4, gcvFALSE, gcvFALSE);
++ }
++
++ /* Store the index of the "XD" entry. */
++ Context->entryOffsetXDFrom3D = (gctUINT)index * gcmSIZEOF(gctUINT32);
++
++
++ /* Pixel Engine states. */
++ index += _State(Context, index, 0x01400 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01404 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01408 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x0140C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01414 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01418 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x0141C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01420 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01424 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01428 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x0142C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01434 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01454 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01458 >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x0145C >> 2, 0x00000010, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x014A0 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x014A8 >> 2, 0xFFFFFFFF, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x014AC >> 2, 0xFFFFFFFF, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x014B0 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x014B4 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x014A4 >> 2, 0x000E400C, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01580 >> 2, 0x00000000, 3, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x014B8 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++
++ /* Composition states. */
++ index += _State(Context, index, 0x03008 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++
++ if (Context->hardware->identity.pixelPipes == 1)
++ {
++ index += _State(Context, index, 0x01460 >> 2, 0x00000000, 8, gcvFALSE, gcvTRUE);
++
++ index += _State(Context, index, 0x01430 >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x01410 >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE);
++ }
++ else
++ {
++ index += _State(Context, index, (0x01460 >> 2) + (0 << 3), 0x00000000, Context->hardware->identity.pixelPipes, gcvFALSE, gcvTRUE);
++ }
++
++ if (Context->hardware->identity.pixelPipes > 1 || halti0)
++ {
++ index += _State(Context, index, (0x01480 >> 2) + (0 << 3), 0x00000000, Context->hardware->identity.pixelPipes, gcvFALSE, gcvTRUE);
++ }
++
++ for (i = 0; i < 3; i++)
++ {
++ index += _State(Context, index, (0x01500 >> 2) + (i << 3), 0x00000000, Context->hardware->identity.pixelPipes, gcvFALSE, gcvTRUE);
++ }
++
++ if (halti2)
++ {
++ for (i = 0; i < 7; i++)
++ {
++ index += _State(Context, index, (0x14800 >> 2) + (i << 3), 0x00000000, Context->hardware->identity.pixelPipes, gcvFALSE, gcvTRUE);
++ }
++ index += _State(Context, index, 0x14900 >> 2, 0x00000000, 7, gcvFALSE, gcvFALSE);
++ }
++
++
++ if (halti3)
++ {
++ index += _State(Context, index, 0x014BC >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ }
++
++ /* Resolve states. */
++ index += _State(Context, index, 0x01604 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01608 >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x0160C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01610 >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x01614 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01620 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01630 >> 2, 0x00000000, 2, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01640 >> 2, 0x00000000, 4, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x0163C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x016A0 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x016B4 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _CLOSE_RANGE();
++
++ if ((Context->hardware->identity.pixelPipes > 1) || halti1)
++ {
++ index += _State(Context, index, (0x016C0 >> 2) + (0 << 3), 0x00000000, Context->hardware->identity.pixelPipes, gcvFALSE, gcvTRUE);
++
++ index += _State(Context, index, (0x016E0 >> 2) + (0 << 3), 0x00000000, Context->hardware->identity.pixelPipes, gcvFALSE, gcvTRUE);
++
++ index += _State(Context, index, 0x01700 >> 2, 0x00000000, Context->hardware->identity.pixelPipes, gcvFALSE, gcvFALSE);
++ }
++
++#if gcd3DBLIT
++ index += _State(Context, index, (0x14000 >> 2) + (0 << 1), 0x00000000, 2, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x14008 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x1400C >> 2, 0x0001C800, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x14010 >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x14014 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, (0x14018 >> 2) + (0 << 1), 0x00000000, 2, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x14020 >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x14024 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x14028 >> 2, 0x0001C800, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x1402C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x14030 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x14034 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x14038 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x1403C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x14040 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x14044 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x14048 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x1404C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x14050 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x14058 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x1405C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x14054 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x14100 >> 2, 0x00000000, 64, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x14200 >> 2, 0x00000000, 64, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x14064 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x14068 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++
++ index += _State(Context, index, 0x1406C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x14070 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x14074 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x14078 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x1407C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x14080 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x14084 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x14088 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x1408C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x14090 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++
++ index += _State(Context, index, 0x14094 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x14098 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++#endif
++
++ /* Tile status. */
++ index += _State(Context, index, 0x01654 >> 2, 0x00200000, 1, gcvFALSE, gcvFALSE);
++
++ index += _CLOSE_RANGE();
++ index += _State(Context, index, 0x01658 >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x0165C >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x01660 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01664 >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x01668 >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x0166C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01670 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01674 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x016A4 >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x016AC >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x016A8 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01720 >> 2, 0x00000000, 8, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01740 >> 2, 0x00000000, 8, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x01760 >> 2, 0x00000000, 8, gcvFALSE, gcvFALSE);
++
++
++ if (halti2)
++ {
++ index += _State(Context, index, 0x01780 >> 2, 0x00000000, 8, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x016BC >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, (0x017A0 >> 2) + 1, 0x00000000, 7, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, (0x017C0 >> 2) + 1, 0x00000000, 7, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, (0x017E0 >> 2) + 1, 0x00000000, 7, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, (0x01A00 >> 2) + 1, 0x00000000, 7, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, (0x01A20 >> 2) + 1, 0x00000000, 7, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, (0x01A40 >> 2) + 1, 0x00000000, 7, gcvFALSE, gcvFALSE);
++ }
++
++ index += _CLOSE_RANGE();
++
++ if(((((gctUINT32) (Context->hardware->identity.chipMinorFeatures4)) >> (0 ? 25:25) & ((gctUINT32) ((((1 ? 25:25) - (0 ? 25:25) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:25) - (0 ? 25:25) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 25:25) - (0 ? 25:25) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:25) - (0 ? 25:25) + 1))))))))
++ {
++ index += _State(Context, index, 0x03860 >> 2, 0x6, 1, gcvFALSE, gcvFALSE);
++ index += _CLOSE_RANGE();
++ }
++
++ if (halti3)
++ {
++ index += _State(Context, index, 0x01A80 >> 2, 0x00000000, 8, gcvFALSE, gcvTRUE);
++ index += _CLOSE_RANGE();
++ }
++
++ /* Semaphore/stall. */
++ index += _SemaphoreStall(Context, index);
++#endif
++
++ /**************************************************************************/
++ /* Link to another address. ***********************************************/
++
++ Context->linkIndex3D = (gctUINT)index;
++
++ if (buffer != gcvNULL)
++ {
++ buffer[index + 0]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x08 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ buffer[index + 1]
++ = 0;
++ }
++
++ index += 2;
++
++ /* Store the end of the context buffer. */
++ Context->bufferSize = index * gcmSIZEOF(gctUINT32);
++
++
++ /**************************************************************************/
++ /* Pipe switch for the case where neither 2D nor 3D are used. *************/
++
++ /* Store the 3D entry index. */
++ Context->entryOffsetXDFrom2D = (gctUINT)index * gcmSIZEOF(gctUINT32);
++
++ /* Flush 2D pipe. */
++ index += _FlushPipe(Context, index, gcvPIPE_2D);
++
++ /* Switch to 3D pipe. */
++ index += _SwitchPipe(Context, index, gcvPIPE_3D);
++
++ /* Store the location of the link. */
++ Context->linkIndexXD = (gctUINT)index;
++
++ if (buffer != gcvNULL)
++ {
++ buffer[index + 0]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x08 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ buffer[index + 1]
++ = 0;
++ }
++
++ index += 2;
++
++
++ /**************************************************************************/
++ /* Save size for buffer. **************************************************/
++
++ Context->totalSize = index * gcmSIZEOF(gctUINT32);
++
++
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++#endif
++
++static gceSTATUS
++_DestroyContext(
++ IN gckCONTEXT Context
++ )
++{
++ gceSTATUS status = gcvSTATUS_OK;
++
++ if (Context != gcvNULL)
++ {
++ gcsCONTEXT_PTR bufferHead;
++
++ /* Free context buffers. */
++ for (bufferHead = Context->buffer; Context->buffer != gcvNULL;)
++ {
++ /* Get a shortcut to the current buffer. */
++ gcsCONTEXT_PTR buffer = Context->buffer;
++
++ /* Get the next buffer. */
++ gcsCONTEXT_PTR next = buffer->next;
++
++ /* Last item? */
++ if (next == bufferHead)
++ {
++ next = gcvNULL;
++ }
++
++ /* Destroy the signal. */
++ if (buffer->signal != gcvNULL)
++ {
++ gcmkONERROR(gckOS_DestroySignal(
++ Context->os, buffer->signal
++ ));
++
++ buffer->signal = gcvNULL;
++ }
++
++ /* Free state delta map. */
++ if (buffer->logical != gcvNULL)
++ {
++ if (Context->hardware->kernel->virtualCommandBuffer)
++ {
++ gcmkONERROR(gckEVENT_DestroyVirtualCommandBuffer(
++ Context->hardware->kernel->eventObj,
++ Context->totalSize,
++ buffer->physical,
++ buffer->logical,
++ gcvKERNEL_PIXEL
++ ));
++ }
++ else
++ {
++ gcmkONERROR(gckEVENT_FreeContiguousMemory(
++ Context->hardware->kernel->eventObj,
++ Context->totalSize,
++ buffer->physical,
++ buffer->logical,
++ gcvKERNEL_PIXEL
++ ));
++ }
++
++ buffer->logical = gcvNULL;
++ }
++
++ /* Free context buffer. */
++ gcmkONERROR(gcmkOS_SAFE_FREE(Context->os, buffer));
++
++ /* Remove from the list. */
++ Context->buffer = next;
++ }
++
++#if gcdSECURE_USER
++ /* Free the hint array. */
++ if (Context->hint != gcvNULL)
++ {
++ gcmkONERROR(gcmkOS_SAFE_FREE(Context->os, Context->hint));
++ }
++#endif
++ /* Free record array copy. */
++#if REMOVE_DUPLICATED_COPY_FROM_USER
++ if (Context->recordArrayMap != gcvNULL)
++ {
++ gcsRECORD_ARRAY_MAP_PTR map = Context->recordArrayMap;
++
++ do
++ {
++ /* Free record array. */
++ gcmkONERROR(gcmkOS_SAFE_FREE(Context->os, map->kData));
++ map = map->next;
++ }
++ while (map != Context->recordArrayMap);
++
++ gcmkONERROR(gcmkOS_SAFE_FREE(Context->os, Context->recordArrayMap));
++ }
++#else
++ if (Context->recordArray != gcvNULL)
++ {
++ gcmkONERROR(gcmkOS_SAFE_FREE(Context->os, Context->recordArray));
++ }
++#endif
++
++ /* Free the state mapping. */
++ if (Context->map != gcvNULL)
++ {
++ gcmkONERROR(gcmkOS_SAFE_FREE(Context->os, Context->map));
++ }
++
++ /* Mark the gckCONTEXT object as unknown. */
++ Context->object.type = gcvOBJ_UNKNOWN;
++
++ /* Free the gckCONTEXT object. */
++ gcmkONERROR(gcmkOS_SAFE_FREE(Context->os, Context));
++ }
++
++OnError:
++ return status;
++}
++
++
++/******************************************************************************\
++**************************** Context Management API ****************************
++\******************************************************************************/
++
++/******************************************************************************\
++**
++** gckCONTEXT_Construct
++**
++** Construct a new gckCONTEXT object.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to gckOS object.
++**
++** gctUINT32 ProcessID
++** Current process ID.
++**
++** gckHARDWARE Hardware
++** Pointer to gckHARDWARE object.
++**
++** OUTPUT:
++**
++** gckCONTEXT * Context
++** Pointer to a variable thet will receive the gckCONTEXT object
++** pointer.
++*/
++#if (gcdENABLE_3D || gcdENABLE_2D)
++gceSTATUS
++gckCONTEXT_Construct(
++ IN gckOS Os,
++ IN gckHARDWARE Hardware,
++ IN gctUINT32 ProcessID,
++ OUT gckCONTEXT * Context
++ )
++{
++ gceSTATUS status;
++ gckCONTEXT context = gcvNULL;
++ gctUINT32 allocationSize;
++ gctUINT i;
++ gctPOINTER pointer = gcvNULL;
++ gctUINT32 address;
++
++ gcmkHEADER_ARG("Os=0x%08X Hardware=0x%08X", Os, Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Context != gcvNULL);
++
++
++ /**************************************************************************/
++ /* Allocate and initialize basic fields of gckCONTEXT. ********************/
++
++ /* The context object size. */
++ allocationSize = gcmSIZEOF(struct _gckCONTEXT);
++
++ /* Allocate the object. */
++ gcmkONERROR(gckOS_Allocate(
++ Os, allocationSize, &pointer
++ ));
++
++ context = pointer;
++
++ /* Reset the entire object. */
++ gcmkONERROR(gckOS_ZeroMemory(context, allocationSize));
++
++ /* Initialize the gckCONTEXT object. */
++ context->object.type = gcvOBJ_CONTEXT;
++ context->os = Os;
++ context->hardware = Hardware;
++
++
++#if !gcdENABLE_3D
++ context->entryPipe = gcvPIPE_2D;
++ context->exitPipe = gcvPIPE_2D;
++#elif gcdCMD_NO_2D_CONTEXT
++ context->entryPipe = gcvPIPE_3D;
++ context->exitPipe = gcvPIPE_3D;
++#else
++ context->entryPipe
++ = (((((gctUINT32) (context->hardware->identity.chipFeatures)) >> (0 ? 9:9)) & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1)))))) )
++ ? gcvPIPE_2D
++ : gcvPIPE_3D;
++ context->exitPipe = gcvPIPE_3D;
++#endif
++
++ /* Get the command buffer requirements. */
++ gcmkONERROR(gckHARDWARE_QueryCommandBuffer(
++ Hardware,
++ &context->alignment,
++ &context->reservedHead,
++ &context->reservedTail
++ ));
++
++ /* Mark the context as dirty to force loading of the entire state table
++ the first time. */
++ context->dirty = gcvTRUE;
++
++
++ /**************************************************************************/
++ /* Get the size of the context buffer. ************************************/
++
++ gcmkONERROR(_InitializeContextBuffer(context));
++
++
++ /**************************************************************************/
++ /* Compute the size of the record array. **********************************/
++
++ context->recordArraySize
++ = gcmSIZEOF(gcsSTATE_DELTA_RECORD) * (gctUINT)context->stateCount;
++
++
++ if (context->stateCount > 0)
++ {
++ /**************************************************************************/
++ /* Allocate and reset the state mapping table. ****************************/
++
++ /* Allocate the state mapping table. */
++ gcmkONERROR(gckOS_Allocate(
++ Os,
++ gcmSIZEOF(gcsSTATE_MAP) * context->stateCount,
++ &pointer
++ ));
++
++ context->map = pointer;
++
++ /* Zero the state mapping table. */
++ gcmkONERROR(gckOS_ZeroMemory(
++ context->map, gcmSIZEOF(gcsSTATE_MAP) * context->stateCount
++ ));
++
++
++ /**************************************************************************/
++ /* Allocate the hint array. ***********************************************/
++
++#if gcdSECURE_USER
++ /* Allocate hints. */
++ gcmkONERROR(gckOS_Allocate(
++ Os,
++ gcmSIZEOF(gctBOOL) * context->stateCount,
++ &pointer
++ ));
++
++ context->hint = pointer;
++#endif
++ }
++
++ /**************************************************************************/
++ /* Allocate the context and state delta buffers. **************************/
++
++ for (i = 0; i < gcdCONTEXT_BUFFER_COUNT; i += 1)
++ {
++ /* Allocate a context buffer. */
++ gcsCONTEXT_PTR buffer;
++
++ gctSIZE_T totalSize = context->totalSize;
++
++ /* Allocate the context buffer structure. */
++ gcmkONERROR(gckOS_Allocate(
++ Os,
++ gcmSIZEOF(gcsCONTEXT),
++ &pointer
++ ));
++
++ buffer = pointer;
++
++ /* Reset the context buffer structure. */
++ gcmkVERIFY_OK(gckOS_ZeroMemory(
++ buffer, gcmSIZEOF(gcsCONTEXT)
++ ));
++
++ /* Append to the list. */
++ if (context->buffer == gcvNULL)
++ {
++ buffer->next = buffer;
++ context->buffer = buffer;
++ }
++ else
++ {
++ buffer->next = context->buffer->next;
++ context->buffer->next = buffer;
++ }
++
++ /* Set the number of delta in the order of creation. */
++#if gcmIS_DEBUG(gcdDEBUG_CODE)
++ buffer->num = i;
++#endif
++
++ /* Create the busy signal. */
++ gcmkONERROR(gckOS_CreateSignal(
++ Os, gcvFALSE, &buffer->signal
++ ));
++
++ /* Set the signal, buffer is currently not busy. */
++ gcmkONERROR(gckOS_Signal(
++ Os, buffer->signal, gcvTRUE
++ ));
++
++ /* Create a new physical context buffer. */
++ if (context->hardware->kernel->virtualCommandBuffer)
++ {
++ gcmkONERROR(gckKERNEL_AllocateVirtualCommandBuffer(
++ context->hardware->kernel,
++ gcvFALSE,
++ &totalSize,
++ &buffer->physical,
++ &pointer
++ ));
++
++ gcmkONERROR(gckKERNEL_GetGPUAddress(
++ context->hardware->kernel,
++ pointer,
++ gcvFALSE,
++ &address
++ ));
++ }
++ else
++ {
++ gcmkONERROR(gckOS_AllocateContiguous(
++ Os,
++ gcvFALSE,
++ &totalSize,
++ &buffer->physical,
++ &pointer
++ ));
++
++ gcmkONERROR(gckHARDWARE_ConvertLogical(
++ context->hardware,
++ pointer,
++ gcvFALSE,
++ &address
++ ));
++ }
++
++ buffer->logical = pointer;
++ buffer->address = address;
++
++ /* Set gckEVENT object pointer. */
++ buffer->eventObj = Hardware->kernel->eventObj;
++
++ /* Set the pointers to the LINK commands. */
++ if (context->linkIndex2D != 0)
++ {
++ buffer->link2D = &buffer->logical[context->linkIndex2D];
++ }
++
++ if (context->linkIndex3D != 0)
++ {
++ buffer->link3D = &buffer->logical[context->linkIndex3D];
++ }
++
++ if (context->linkIndexXD != 0)
++ {
++ gctPOINTER xdLink;
++ gctUINT32 xdEntryAddress;
++ gctUINT32 xdEntrySize;
++ gctUINT32 linkBytes;
++
++ /* Determine LINK parameters. */
++ xdLink
++ = &buffer->logical[context->linkIndexXD];
++
++ xdEntryAddress
++ = buffer->address
++ + context->entryOffsetXDFrom3D;
++
++ xdEntrySize
++ = context->bufferSize
++ - context->entryOffsetXDFrom3D;
++
++ /* Query LINK size. */
++ gcmkONERROR(gckHARDWARE_Link(
++ Hardware, gcvNULL, 0, 0, &linkBytes
++ ));
++
++ /* Generate a LINK. */
++ gcmkONERROR(gckHARDWARE_Link(
++ Hardware,
++ xdLink,
++ xdEntryAddress,
++ xdEntrySize,
++ &linkBytes
++ ));
++ }
++ }
++
++
++ /**************************************************************************/
++ /* Initialize the context buffers. ****************************************/
++
++ /* Initialize the current context buffer. */
++ gcmkONERROR(_InitializeContextBuffer(context));
++
++ /* Make all created contexts equal. */
++ {
++ gcsCONTEXT_PTR currContext, tempContext;
++
++ /* Set the current context buffer. */
++ currContext = context->buffer;
++
++ /* Get the next context buffer. */
++ tempContext = currContext->next;
++
++ /* Loop through all buffers. */
++ while (tempContext != currContext)
++ {
++ if (tempContext == gcvNULL)
++ {
++ gcmkONERROR(gcvSTATUS_NOT_FOUND);
++ }
++
++ /* Copy the current context. */
++ gckOS_MemCopy(
++ tempContext->logical,
++ currContext->logical,
++ context->totalSize
++ );
++
++ /* Get the next context buffer. */
++ tempContext = tempContext->next;
++ }
++ }
++
++ /* Return pointer to the gckCONTEXT object. */
++ *Context = context;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Context=0x%08X", *Context);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Roll back on error. */
++ gcmkVERIFY_OK(_DestroyContext(context));
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++#endif
++
++/******************************************************************************\
++**
++** gckCONTEXT_Destroy
++**
++** Destroy a gckCONTEXT object.
++**
++** INPUT:
++**
++** gckCONTEXT Context
++** Pointer to an gckCONTEXT object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckCONTEXT_Destroy(
++ IN gckCONTEXT Context
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Context=0x%08X", Context);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Context, gcvOBJ_CONTEXT);
++
++ /* Destroy the context and all related objects. */
++ status = _DestroyContext(Context);
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return status;
++}
++
++/******************************************************************************\
++**
++** gckCONTEXT_Update
++**
++** Merge all pending state delta buffers into the current context buffer.
++**
++** INPUT:
++**
++** gckCONTEXT Context
++** Pointer to an gckCONTEXT object.
++**
++** gctUINT32 ProcessID
++** Current process ID.
++**
++** gcsSTATE_DELTA_PTR StateDelta
++** Pointer to the state delta.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckCONTEXT_Update(
++ IN gckCONTEXT Context,
++ IN gctUINT32 ProcessID,
++ IN gcsSTATE_DELTA_PTR StateDelta
++ )
++{
++#if gcdENABLE_3D
++ gceSTATUS status = gcvSTATUS_OK;
++ gcsSTATE_DELTA _stateDelta;
++ gckKERNEL kernel;
++ gcsCONTEXT_PTR buffer;
++ gcsSTATE_MAP_PTR map;
++ gctBOOL needCopy = gcvFALSE;
++ gcsSTATE_DELTA_PTR nDelta;
++ gcsSTATE_DELTA_PTR uDelta = gcvNULL;
++ gcsSTATE_DELTA_PTR kDelta = gcvNULL;
++ gcsSTATE_DELTA_RECORD_PTR record;
++ gcsSTATE_DELTA_RECORD_PTR recordArray = gcvNULL;
++#if REMOVE_DUPLICATED_COPY_FROM_USER
++ gcsRECORD_ARRAY_MAP_PTR recordArrayMap = gcvNULL;
++#endif
++ gctUINT elementCount;
++ gctUINT address;
++ gctUINT32 mask;
++ gctUINT32 data;
++ gctUINT index;
++ gctUINT i, j;
++
++#if gcdSECURE_USER
++ gcskSECURE_CACHE_PTR cache;
++#endif
++
++ gcmkHEADER_ARG(
++ "Context=0x%08X ProcessID=%d StateDelta=0x%08X",
++ Context, ProcessID, StateDelta
++ );
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Context, gcvOBJ_CONTEXT);
++
++ /* Get a shortcut to the kernel object. */
++ kernel = Context->hardware->kernel;
++
++ /* Check wehther we need to copy the structures or not. */
++ gcmkONERROR(gckOS_QueryNeedCopy(Context->os, ProcessID, &needCopy));
++
++ /* Allocate the copy buffer for the user record array. */
++#if REMOVE_DUPLICATED_COPY_FROM_USER
++ if (needCopy && (Context->recordArrayMap == gcvNULL))
++ {
++ /* Allocate enough maps. */
++ gcmkONERROR(gckOS_Allocate(
++ Context->os,
++ gcmSIZEOF(gcsRECORD_ARRAY_MAP_PTR) * gcdCONTEXT_BUFFER_COUNT,
++ (gctPOINTER *) &Context->recordArrayMap
++ ));
++
++ for (i = 0; i < gcdCONTEXT_BUFFER_COUNT; i++)
++ {
++ /* Next mapping id. */
++ gctUINT n = (i + 1) % gcdCONTEXT_BUFFER_COUNT;
++
++ recordArrayMap = &Context->recordArrayMap[i];
++
++ /* Allocate the buffer. */
++ gcmkONERROR(gckOS_Allocate(
++ Context->os,
++ Context->recordArraySize,
++ (gctPOINTER *) &recordArrayMap->kData
++ ));
++
++ /* Initialize fields. */
++ recordArrayMap->key = 0;
++ recordArrayMap->next = &Context->recordArrayMap[n];
++ }
++ }
++#else
++ if (needCopy && (Context->recordArray == gcvNULL))
++ {
++ /* Allocate the buffer. */
++ gcmkONERROR(gckOS_Allocate(
++ Context->os,
++ Context->recordArraySize,
++ (gctPOINTER *) &Context->recordArray
++ ));
++ }
++#endif
++
++ /* Get the current context buffer. */
++ buffer = Context->buffer;
++
++ /* Wait until the context buffer becomes available; this will
++ also reset the signal and mark the buffer as busy. */
++ gcmkONERROR(gckOS_WaitSignal(
++ Context->os, buffer->signal, gcvINFINITE
++ ));
++
++#if gcdSECURE_USER
++ /* Get the cache form the database. */
++ gcmkONERROR(gckKERNEL_GetProcessDBCache(kernel, ProcessID, &cache));
++#endif
++
++#if gcmIS_DEBUG(gcdDEBUG_CODE) && 1 && gcdENABLE_3D
++ /* Update current context token. */
++ buffer->logical[Context->map[0x0E14].index]
++ = (gctUINT32)gcmPTR2INT32(Context);
++#endif
++
++ /* Are there any pending deltas? */
++ if (buffer->deltaCount != 0)
++ {
++ /* Get the state map. */
++ map = Context->map;
++
++ /* Get the first delta item. */
++ uDelta = buffer->delta;
++
++ /* Reset the vertex stream count. */
++ elementCount = 0;
++
++ /* Merge all pending deltas. */
++ for (i = 0; i < buffer->deltaCount; i += 1)
++ {
++ /* Get access to the state delta. */
++ gcmkONERROR(gckKERNEL_OpenUserData(
++ kernel, needCopy,
++ &_stateDelta,
++ uDelta, gcmSIZEOF(gcsSTATE_DELTA),
++ (gctPOINTER *) &kDelta
++ ));
++
++#if REMOVE_DUPLICATED_COPY_FROM_USER
++ if (needCopy)
++ {
++ recordArray = gcvNULL;
++ recordArrayMap = Context->recordArrayMap;
++
++ do
++ {
++ /* Check if recordArray is alreay opened. */
++ if (recordArrayMap->key == kDelta->recordArray)
++ {
++ /* Found. */
++ recordArray = recordArrayMap->kData;
++ break;
++ }
++
++ recordArrayMap = recordArrayMap->next;
++ }
++ while (recordArrayMap != Context->recordArrayMap);
++
++ if (recordArray == gcvNULL)
++ {
++ while (recordArrayMap->key != 0)
++ {
++ /* Found an empty slot. */
++ recordArrayMap = recordArrayMap->next;
++ }
++
++ /* Get access to the state records. */
++ gcmkONERROR(gckOS_CopyFromUserData(
++ kernel->os,
++ recordArrayMap->kData,
++ gcmUINT64_TO_PTR(kDelta->recordArray),
++ Context->recordArraySize
++ ));
++
++ /* Save user pointer as key. */
++ recordArrayMap->key = kDelta->recordArray;
++ recordArray = recordArrayMap->kData;
++ }
++ }
++ else
++ {
++ /* Get access to the state records. */
++ gcmkONERROR(gckOS_MapUserPointer(
++ kernel->os,
++ gcmUINT64_TO_PTR(kDelta->recordArray),
++ Context->recordArraySize,
++ (gctPOINTER *) &recordArray
++ ));
++ }
++#else
++ /* Get access to the state records. */
++ gcmkONERROR(gckKERNEL_OpenUserData(
++ kernel, needCopy,
++ Context->recordArray,
++ gcmUINT64_TO_PTR(kDelta->recordArray), Context->recordArraySize,
++ (gctPOINTER *) &recordArray
++ ));
++#endif
++
++ /* Merge all pending states. */
++ for (j = 0; j < kDelta->recordCount; j += 1)
++ {
++ if (j >= Context->stateCount)
++ {
++ break;
++ }
++
++ /* Get the current state record. */
++ record = &recordArray[j];
++
++ /* Get the state address. */
++ address = record->address;
++
++ /* Make sure the state is a part of the mapping table. */
++ if (address >= Context->stateCount)
++ {
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s(%d): State 0x%04X is not mapped.\n",
++ __FUNCTION__, __LINE__,
++ address
++ );
++
++ continue;
++ }
++
++ /* Get the state index. */
++ index = map[address].index;
++
++ /* Skip the state if not mapped. */
++ if (index == 0)
++ {
++ continue;
++ }
++
++ /* Get the data mask. */
++ mask = record->mask;
++
++ /* Masked states that are being completly reset or regular states. */
++ if ((mask == 0) || (mask == ~0U))
++ {
++ /* Get the new data value. */
++ data = record->data;
++
++ /* Process special states. */
++ if (address == 0x0595)
++ {
++ /* Force auto-disable to be disabled. */
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5)));
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4)));
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 13:13) - (0 ? 13:13) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 13:13) - (0 ? 13:13) + 1))))))) << (0 ? 13:13))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? 13:13) - (0 ? 13:13) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 13:13) - (0 ? 13:13) + 1))))))) << (0 ? 13:13)));
++ }
++
++#if gcdSECURE_USER
++ /* Do we need to convert the logical address? */
++ if (Context->hint[address])
++ {
++ /* Map handle into physical address. */
++ gcmkONERROR(gckKERNEL_MapLogicalToPhysical(
++ kernel, cache, (gctPOINTER) &data
++ ));
++ }
++#endif
++
++ /* Set new data. */
++ buffer->logical[index] = data;
++ }
++
++ /* Masked states that are being set partially. */
++ else
++ {
++ buffer->logical[index]
++ = (~mask & buffer->logical[index])
++ | (mask & record->data);
++ }
++ }
++
++ /* Get the element count. */
++ if (kDelta->elementCount != 0)
++ {
++ elementCount = kDelta->elementCount;
++ }
++
++ /* Dereference delta. */
++ kDelta->refCount -= 1;
++ gcmkASSERT(kDelta->refCount >= 0);
++
++ /* Get the next state delta. */
++ nDelta = gcmUINT64_TO_PTR(kDelta->next);
++
++#if REMOVE_DUPLICATED_COPY_FROM_USER
++ if (needCopy)
++ {
++ if (kDelta->refCount == 0)
++ {
++ /* No other reference, reset the mapping. */
++ recordArrayMap->key = 0;
++ }
++ }
++ else
++ {
++ /* Close access to the state records. */
++ gcmkONERROR(gckOS_UnmapUserPointer(
++ kernel->os,
++ gcmUINT64_TO_PTR(kDelta->recordArray),
++ Context->recordArraySize,
++ (gctPOINTER *) recordArray
++ ));
++
++ recordArray = gcvNULL;
++ }
++#else
++ /* Get access to the state records. */
++ gcmkONERROR(gckKERNEL_CloseUserData(
++ kernel, needCopy,
++ gcvFALSE,
++ gcmUINT64_TO_PTR(kDelta->recordArray), Context->recordArraySize,
++ (gctPOINTER *) &recordArray
++ ));
++#endif
++
++ /* Close access to the current state delta. */
++ gcmkONERROR(gckKERNEL_CloseUserData(
++ kernel, needCopy,
++ gcvTRUE,
++ uDelta, gcmSIZEOF(gcsSTATE_DELTA),
++ (gctPOINTER *) &kDelta
++ ));
++
++ /* Update the user delta pointer. */
++ uDelta = nDelta;
++ }
++
++ /* Hardware disables all input streams when the stream 0 is programmed,
++ it then reenables those streams that were explicitely programmed by
++ the software. Because of this we cannot program the entire array of
++ values, otherwise we'll get all streams reenabled, but rather program
++ only those that are actully needed by the software. */
++ if (elementCount != 0)
++ {
++ gctUINT base;
++ gctUINT nopCount;
++ gctUINT32_PTR nop;
++ gctUINT fe2vsCount = 12;
++
++ if ((((((gctUINT32) (Context->hardware->identity.chipMinorFeatures1)) >> (0 ? 23:23)) & ((gctUINT32) ((((1 ? 23:23) - (0 ? 23:23) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:23) - (0 ? 23:23) + 1)))))) ))
++ {
++ fe2vsCount = 16;
++ }
++
++ /* Determine the base index of the vertex stream array. */
++ base = map[0x0180].index;
++
++ /* Set the proper state count. */
++ buffer->logical[base - 1]
++ = ((((gctUINT32) (buffer->logical[base - 1])) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (elementCount ) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ /* Determine the number of NOP commands. */
++ nopCount
++ = (fe2vsCount / 2)
++ - (elementCount / 2);
++
++ /* Determine the location of the first NOP. */
++ nop = &buffer->logical[base + (elementCount | 1)];
++
++ /* Fill the unused space with NOPs. */
++ for (i = 0; i < nopCount; i += 1)
++ {
++ if (nop >= buffer->logical + Context->totalSize)
++ {
++ break;
++ }
++
++ /* Generate a NOP command. */
++ *nop = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x03 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)));
++
++ /* Advance. */
++ nop += 2;
++ }
++ }
++
++ /* Reset pending deltas. */
++ buffer->deltaCount = 0;
++ buffer->delta = gcvNULL;
++ }
++
++ /* Set state delta user pointer. */
++ uDelta = StateDelta;
++
++ /* Get access to the state delta. */
++ gcmkONERROR(gckKERNEL_OpenUserData(
++ kernel, needCopy,
++ &_stateDelta,
++ uDelta, gcmSIZEOF(gcsSTATE_DELTA),
++ (gctPOINTER *) &kDelta
++ ));
++
++ /* State delta cannot be attached to anything yet. */
++ if (kDelta->refCount != 0)
++ {
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s(%d): kDelta->refCount = %d (has to be 0).\n",
++ __FUNCTION__, __LINE__,
++ kDelta->refCount
++ );
++ }
++
++ /* Attach to all contexts. */
++ buffer = Context->buffer;
++
++ do
++ {
++ /* Attach to the context if nothing is attached yet. If a delta
++ is allready attached, all we need to do is to increment
++ the number of deltas in the context. */
++ if (buffer->delta == gcvNULL)
++ {
++ buffer->delta = uDelta;
++ }
++
++ /* Update reference count. */
++ kDelta->refCount += 1;
++
++ /* Update counters. */
++ buffer->deltaCount += 1;
++
++ /* Get the next context buffer. */
++ buffer = buffer->next;
++
++ if (buffer == gcvNULL)
++ {
++ gcmkONERROR(gcvSTATUS_NOT_FOUND);
++ }
++ }
++ while (Context->buffer != buffer);
++
++ /* Close access to the current state delta. */
++ gcmkONERROR(gckKERNEL_CloseUserData(
++ kernel, needCopy,
++ gcvTRUE,
++ uDelta, gcmSIZEOF(gcsSTATE_DELTA),
++ (gctPOINTER *) &kDelta
++ ));
++
++ /* Schedule an event to mark the context buffer as available. */
++ gcmkONERROR(gckEVENT_Signal(
++ buffer->eventObj, buffer->signal, gcvKERNEL_PIXEL
++ ));
++
++ /* Advance to the next context buffer. */
++ Context->buffer = buffer->next;
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Get access to the state records. */
++ if (kDelta != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckKERNEL_CloseUserData(
++ kernel, needCopy,
++ gcvFALSE,
++ gcmUINT64_TO_PTR(kDelta->recordArray), Context->recordArraySize,
++ (gctPOINTER *) &recordArray
++ ));
++ }
++
++ /* Close access to the current state delta. */
++ gcmkVERIFY_OK(gckKERNEL_CloseUserData(
++ kernel, needCopy,
++ gcvTRUE,
++ uDelta, gcmSIZEOF(gcsSTATE_DELTA),
++ (gctPOINTER *) &kDelta
++ ));
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++#else
++ return gcvSTATUS_OK;
++#endif
++}
++
++gceSTATUS
++gckCONTEXT_MapBuffer(
++ IN gckCONTEXT Context,
++ OUT gctUINT32 *Physicals,
++ OUT gctUINT64 *Logicals,
++ OUT gctUINT32 *Bytes
++ )
++{
++ gceSTATUS status;
++ int i = 0;
++ gctSIZE_T pageCount;
++ gckVIRTUAL_COMMAND_BUFFER_PTR commandBuffer;
++ gckKERNEL kernel = Context->hardware->kernel;
++ gctPOINTER logical;
++ gctPHYS_ADDR physical;
++
++ gcsCONTEXT_PTR buffer;
++
++ gcmkHEADER();
++
++ gcmkVERIFY_OBJECT(Context, gcvOBJ_CONTEXT);
++
++ buffer = Context->buffer;
++
++ for (i = 0; i < gcdCONTEXT_BUFFER_COUNT; i++)
++ {
++ if (kernel->virtualCommandBuffer)
++ {
++ commandBuffer = (gckVIRTUAL_COMMAND_BUFFER_PTR)buffer->physical;
++ physical = commandBuffer->physical;
++
++ gcmkONERROR(gckOS_CreateUserVirtualMapping(
++ kernel->os,
++ physical,
++ Context->totalSize,
++ &logical,
++ &pageCount));
++ }
++ else
++ {
++ physical = buffer->physical;
++
++ gcmkONERROR(gckOS_MapMemory(
++ kernel->os,
++ physical,
++ Context->totalSize,
++ &logical));
++ }
++
++ Physicals[i] = gcmPTR_TO_NAME(physical);
++
++ Logicals[i] = gcmPTR_TO_UINT64(logical);
++
++ buffer = buffer->next;
++ }
++
++ *Bytes = (gctUINT)Context->totalSize;
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/kernel/arch/gc_hal_kernel_context.h linux-openelec/drivers/mxc/gpu-viv/v5/hal/kernel/arch/gc_hal_kernel_context.h
+--- linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/kernel/arch/gc_hal_kernel_context.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v5/hal/kernel/arch/gc_hal_kernel_context.h 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,183 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_kernel_context_h_
++#define __gc_hal_kernel_context_h_
++
++#include "gc_hal_kernel_buffer.h"
++
++/* Exprimental optimization. */
++#define REMOVE_DUPLICATED_COPY_FROM_USER 1
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/* Maps state locations within the context buffer. */
++typedef struct _gcsSTATE_MAP * gcsSTATE_MAP_PTR;
++typedef struct _gcsSTATE_MAP
++{
++ /* Index of the state in the context buffer. */
++ gctUINT index;
++
++ /* State mask. */
++ gctUINT32 mask;
++}
++gcsSTATE_MAP;
++
++/* Context buffer. */
++typedef struct _gcsCONTEXT * gcsCONTEXT_PTR;
++typedef struct _gcsCONTEXT
++{
++ /* For debugging: the number of context buffer in the order of creation. */
++#if gcmIS_DEBUG(gcdDEBUG_CODE)
++ gctUINT num;
++#endif
++
++ /* Pointer to gckEVENT object. */
++ gckEVENT eventObj;
++
++ /* Context busy signal. */
++ gctSIGNAL signal;
++
++ /* Physical address of the context buffer. */
++ gctPHYS_ADDR physical;
++
++ /* Logical address of the context buffer. */
++ gctUINT32_PTR logical;
++
++ /* Hardware address of the context buffer. */
++ gctUINT32 address;
++
++ /* Pointer to the LINK commands. */
++ gctPOINTER link2D;
++ gctPOINTER link3D;
++
++ /* The number of pending state deltas. */
++ gctUINT deltaCount;
++
++ /* Pointer to the first delta to be applied. */
++ gcsSTATE_DELTA_PTR delta;
++
++ /* Next context buffer. */
++ gcsCONTEXT_PTR next;
++}
++gcsCONTEXT;
++
++typedef struct _gcsRECORD_ARRAY_MAP * gcsRECORD_ARRAY_MAP_PTR;
++struct _gcsRECORD_ARRAY_MAP
++{
++ /* User pointer key. */
++ gctUINT64 key;
++
++ /* Kernel memory buffer. */
++ gcsSTATE_DELTA_RECORD_PTR kData;
++
++ /* Next map. */
++ gcsRECORD_ARRAY_MAP_PTR next;
++
++};
++
++/* gckCONTEXT structure that hold the current context. */
++struct _gckCONTEXT
++{
++ /* Object. */
++ gcsOBJECT object;
++
++ /* Pointer to gckOS object. */
++ gckOS os;
++
++ /* Pointer to gckHARDWARE object. */
++ gckHARDWARE hardware;
++
++ /* Command buffer alignment. */
++ gctUINT32 alignment;
++ gctUINT32 reservedHead;
++ gctUINT32 reservedTail;
++
++ /* Context buffer metrics. */
++ gctSIZE_T stateCount;
++ gctUINT32 totalSize;
++ gctUINT32 bufferSize;
++ gctUINT32 linkIndex2D;
++ gctUINT32 linkIndex3D;
++ gctUINT32 linkIndexXD;
++ gctUINT32 entryOffset3D;
++ gctUINT32 entryOffsetXDFrom2D;
++ gctUINT32 entryOffsetXDFrom3D;
++
++ /* Dirty flags. */
++ gctBOOL dirty;
++ gctBOOL dirty2D;
++ gctBOOL dirty3D;
++ gcsCONTEXT_PTR dirtyBuffer;
++
++ /* State mapping. */
++ gcsSTATE_MAP_PTR map;
++
++ /* List of context buffers. */
++ gcsCONTEXT_PTR buffer;
++
++ /* A copy of the user record array. */
++ gctUINT recordArraySize;
++#if REMOVE_DUPLICATED_COPY_FROM_USER
++ gcsRECORD_ARRAY_MAP_PTR recordArrayMap;
++#else
++ gcsSTATE_DELTA_RECORD_PTR recordArray;
++#endif
++
++ /* Requested pipe select for context. */
++ gcePIPE_SELECT entryPipe;
++ gcePIPE_SELECT exitPipe;
++
++ /* Variables used for building state buffer. */
++ gctUINT32 lastAddress;
++ gctSIZE_T lastSize;
++ gctUINT32 lastIndex;
++ gctBOOL lastFixed;
++
++ gctUINT32 pipeSelectBytes;
++
++ /* Hint array. */
++#if gcdSECURE_USER
++ gctBOOL_PTR hint;
++#endif
++
++#if VIVANTE_PROFILER_CONTEXT
++ gcsPROFILER_COUNTERS latestProfiler;
++ gcsPROFILER_COUNTERS histroyProfiler;
++ gctUINT32 prevVSInstCount;
++ gctUINT32 prevVSBranchInstCount;
++ gctUINT32 prevVSTexInstCount;
++ gctUINT32 prevVSVertexCount;
++ gctUINT32 prevPSInstCount;
++ gctUINT32 prevPSBranchInstCount;
++ gctUINT32 prevPSTexInstCount;
++ gctUINT32 prevPSPixelCount;
++#endif
++};
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif /* __gc_hal_kernel_context_h_ */
++
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/kernel/arch/gc_hal_kernel_hardware.c linux-openelec/drivers/mxc/gpu-viv/v5/hal/kernel/arch/gc_hal_kernel_hardware.c
+--- linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/kernel/arch/gc_hal_kernel_hardware.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v5/hal/kernel/arch/gc_hal_kernel_hardware.c 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,8036 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal.h"
++#include "gc_hal_kernel.h"
++#if VIVANTE_PROFILER_CONTEXT
++#include "gc_hal_kernel_context.h"
++#endif
++
++#define gcdDISABLE_FE_L2 1
++
++#define _GC_OBJ_ZONE gcvZONE_HARDWARE
++
++#define gcmSEMAPHORESTALL(buffer) \
++ do \
++ { \
++ /* Arm the PE-FE Semaphore. */ \
++ *buffer++ \
++ = gcmSETFIELDVALUE(0, AQ_COMMAND_LOAD_STATE_COMMAND, OPCODE, LOAD_STATE) \
++ | gcmSETFIELD (0, AQ_COMMAND_LOAD_STATE_COMMAND, COUNT, 1) \
++ | gcmSETFIELD (0, AQ_COMMAND_LOAD_STATE_COMMAND, ADDRESS, 0x0E02); \
++ \
++ *buffer++ \
++ = gcmSETFIELDVALUE(0, AQ_SEMAPHORE, SOURCE, FRONT_END) \
++ | gcmSETFIELDVALUE(0, AQ_SEMAPHORE, DESTINATION, PIXEL_ENGINE);\
++ \
++ /* STALL FE until PE is done flushing. */ \
++ *buffer++ \
++ = gcmSETFIELDVALUE(0, STALL_COMMAND, OPCODE, STALL); \
++ \
++ *buffer++ \
++ = gcmSETFIELDVALUE(0, STALL_STALL, SOURCE, FRONT_END) \
++ | gcmSETFIELDVALUE(0, STALL_STALL, DESTINATION, PIXEL_ENGINE); \
++ } while(0)
++
++typedef struct _gcsiDEBUG_REGISTERS * gcsiDEBUG_REGISTERS_PTR;
++typedef struct _gcsiDEBUG_REGISTERS
++{
++ gctSTRING module;
++ gctUINT index;
++ gctUINT shift;
++ gctUINT data;
++ gctUINT count;
++ gctUINT32 signature;
++}
++gcsiDEBUG_REGISTERS;
++
++/******************************************************************************\
++********************************* Support Code *********************************
++\******************************************************************************/
++static gctBOOL
++_IsHardwareMatch(
++ IN gckHARDWARE Hardware,
++ IN gctINT32 ChipModel,
++ IN gctUINT32 ChipRevision
++ )
++{
++ return ((Hardware->identity.chipModel == ChipModel) &&
++ (Hardware->identity.chipRevision == ChipRevision));
++}
++
++static gceSTATUS
++_ResetGPU(
++ IN gckHARDWARE Hardware,
++ IN gckOS Os,
++ IN gceCORE Core
++ );
++
++static gceSTATUS
++_IdentifyHardware(
++ IN gckOS Os,
++ IN gceCORE Core,
++ OUT gcsHAL_QUERY_CHIP_IDENTITY_PTR Identity
++ )
++{
++ gceSTATUS status;
++
++ gctUINT32 chipIdentity;
++
++ gctUINT32 streamCount = 0;
++ gctUINT32 registerMax = 0;
++ gctUINT32 threadCount = 0;
++ gctUINT32 shaderCoreCount = 0;
++ gctUINT32 vertexCacheSize = 0;
++ gctUINT32 vertexOutputBufferSize = 0;
++ gctUINT32 pixelPipes = 0;
++ gctUINT32 instructionCount = 0;
++ gctUINT32 numConstants = 0;
++ gctUINT32 bufferSize = 0;
++ gctUINT32 varyingsCount = 0;
++#if gcdMULTI_GPU
++ gctUINT32 gpuCoreCount = 0;
++#endif
++
++ gcmkHEADER_ARG("Os=0x%x", Os);
++
++ /***************************************************************************
++ ** Get chip ID and revision.
++ */
++
++ /* Read chip identity register. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Os, Core,
++ 0x00018,
++ &chipIdentity));
++
++ /* Special case for older graphic cores. */
++ if (((((gctUINT32) (chipIdentity)) >> (0 ? 31:24) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1)))))) == (0x01 & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))))
++ {
++ Identity->chipModel = gcv500;
++ Identity->chipRevision = (((((gctUINT32) (chipIdentity)) >> (0 ? 15:12)) & ((gctUINT32) ((((1 ? 15:12) - (0 ? 15:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:12) - (0 ? 15:12) + 1)))))) );
++ }
++
++ else
++ {
++ /* Read chip identity register. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Os, Core,
++ 0x00020,
++ (gctUINT32_PTR) &Identity->chipModel));
++
++ if (((Identity->chipModel & 0xFF00) == 0x0400)
++ && (Identity->chipModel != 0x0420)
++ && (Identity->chipModel != 0x0428))
++ {
++ Identity->chipModel = (gceCHIPMODEL) (Identity->chipModel & 0x0400);
++ }
++
++ /* Read CHIP_REV register. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Os, Core,
++ 0x00024,
++ &Identity->chipRevision));
++
++ if ((Identity->chipModel == gcv300)
++ && (Identity->chipRevision == 0x2201)
++ )
++ {
++ gctUINT32 chipDate;
++ gctUINT32 chipTime;
++
++ /* Read date and time registers. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Os, Core,
++ 0x00028,
++ &chipDate));
++
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Os, Core,
++ 0x0002C,
++ &chipTime));
++
++ if ((chipDate == 0x20080814) && (chipTime == 0x12051100))
++ {
++ /* This IP has an ECO; put the correct revision in it. */
++ Identity->chipRevision = 0x1051;
++ }
++ }
++
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Os, Core,
++ 0x000A8,
++ &Identity->productID));
++ }
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Identity: chipModel=%X",
++ Identity->chipModel);
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Identity: chipRevision=%X",
++ Identity->chipRevision);
++
++
++ /***************************************************************************
++ ** Get chip features.
++ */
++
++ /* Read chip feature register. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Os, Core,
++ 0x0001C,
++ &Identity->chipFeatures));
++
++#if gcdENABLE_3D
++ /* Disable fast clear on GC700. */
++ if (Identity->chipModel == gcv700)
++ {
++ Identity->chipFeatures
++ = ((((gctUINT32) (Identity->chipFeatures)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)));
++ }
++#endif
++
++ if (((Identity->chipModel == gcv500) && (Identity->chipRevision < 2))
++ || ((Identity->chipModel == gcv300) && (Identity->chipRevision < 0x2000))
++ )
++ {
++ /* GC500 rev 1.x and GC300 rev < 2.0 doesn't have these registers. */
++ Identity->chipMinorFeatures = 0;
++ Identity->chipMinorFeatures1 = 0;
++ Identity->chipMinorFeatures2 = 0;
++ Identity->chipMinorFeatures3 = 0;
++ Identity->chipMinorFeatures4 = 0;
++ Identity->chipMinorFeatures5 = 0;
++ }
++ else
++ {
++ /* Read chip minor feature register #0. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Os, Core,
++ 0x00034,
++ &Identity->chipMinorFeatures));
++
++ if (((((gctUINT32) (Identity->chipMinorFeatures)) >> (0 ? 21:21) & ((gctUINT32) ((((1 ? 21:21) - (0 ? 21:21) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 21:21) - (0 ? 21:21) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 21:21) - (0 ? 21:21) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 21:21) - (0 ? 21:21) + 1)))))))
++ )
++ {
++ /* Read chip minor featuress register #1. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Os, Core,
++ 0x00074,
++ &Identity->chipMinorFeatures1));
++
++ /* Read chip minor featuress register #2. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Os, Core,
++ 0x00084,
++ &Identity->chipMinorFeatures2));
++
++ /*Identity->chipMinorFeatures2 &= ~(0x1 << 3);*/
++
++ /* Read chip minor featuress register #1. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Os, Core,
++ 0x00088,
++ &Identity->chipMinorFeatures3));
++
++
++ /* Read chip minor featuress register #4. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Os, Core,
++ 0x00094,
++ &Identity->chipMinorFeatures4));
++
++ /* Read chip minor featuress register #5. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Os, Core,
++ 0x000A0,
++ &Identity->chipMinorFeatures5));
++ }
++ else
++ {
++ /* Chip doesn't has minor features register #1 or 2 or 3 or 4. */
++ Identity->chipMinorFeatures1 = 0;
++ Identity->chipMinorFeatures2 = 0;
++ Identity->chipMinorFeatures3 = 0;
++ Identity->chipMinorFeatures4 = 0;
++ Identity->chipMinorFeatures5 = 0;
++ }
++ }
++
++ /* Get the Supertile layout in the hardware. */
++ if (((((gctUINT32) (Identity->chipMinorFeatures3)) >> (0 ? 26:26) & ((gctUINT32) ((((1 ? 26:26) - (0 ? 26:26) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 26:26) - (0 ? 26:26) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 26:26) - (0 ? 26:26) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 26:26) - (0 ? 26:26) + 1)))))))
++ || ((((gctUINT32) (Identity->chipMinorFeatures3)) >> (0 ? 8:8) & ((gctUINT32) ((((1 ? 8:8) - (0 ? 8:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:8) - (0 ? 8:8) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 8:8) - (0 ? 8:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:8) - (0 ? 8:8) + 1))))))))
++ {
++ Identity->superTileMode = 2;
++ }
++ else if (((((gctUINT32) (Identity->chipMinorFeatures)) >> (0 ? 27:27) & ((gctUINT32) ((((1 ? 27:27) - (0 ? 27:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 27:27) - (0 ? 27:27) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 27:27) - (0 ? 27:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 27:27) - (0 ? 27:27) + 1))))))))
++ {
++ Identity->superTileMode = 1;
++ }
++ else
++ {
++ Identity->superTileMode = 0;
++ }
++
++ /* Exception for GC1000, revision 5035 & GC800, revision 4612 */
++ if (((Identity->chipModel == gcv1000) && ((Identity->chipRevision == 0x5035)
++ || (Identity->chipRevision == 0x5036)
++ || (Identity->chipRevision == 0x5037)
++ || (Identity->chipRevision == 0x5039)
++ || (Identity->chipRevision >= 0x5040)))
++ || ((Identity->chipModel == gcv800) && (Identity->chipRevision == 0x4612))
++ || ((Identity->chipModel == gcv600) && (Identity->chipRevision >= 0x4650))
++ || ((Identity->chipModel == gcv860) && (Identity->chipRevision == 0x4647))
++ || ((Identity->chipModel == gcv400) && (Identity->chipRevision >= 0x4633)))
++ {
++ Identity->superTileMode = 1;
++ }
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Identity: chipFeatures=0x%08X",
++ Identity->chipFeatures);
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Identity: chipMinorFeatures=0x%08X",
++ Identity->chipMinorFeatures);
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Identity: chipMinorFeatures1=0x%08X",
++ Identity->chipMinorFeatures1);
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Identity: chipMinorFeatures2=0x%08X",
++ Identity->chipMinorFeatures2);
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Identity: chipMinorFeatures3=0x%08X",
++ Identity->chipMinorFeatures3);
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Identity: chipMinorFeatures4=0x%08X",
++ Identity->chipMinorFeatures4);
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Identity: chipMinorFeatures5=0x%08X",
++ Identity->chipMinorFeatures5);
++
++ /***************************************************************************
++ ** Get chip specs.
++ */
++
++ if (((((gctUINT32) (Identity->chipMinorFeatures)) >> (0 ? 21:21) & ((gctUINT32) ((((1 ? 21:21) - (0 ? 21:21) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 21:21) - (0 ? 21:21) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 21:21) - (0 ? 21:21) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 21:21) - (0 ? 21:21) + 1))))))))
++ {
++ gctUINT32 specs, specs2, specs3, specs4;
++
++ /* Read gcChipSpecs register. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Os, Core,
++ 0x00048,
++ &specs));
++
++ /* Extract the fields. */
++ registerMax = (((((gctUINT32) (specs)) >> (0 ? 7:4)) & ((gctUINT32) ((((1 ? 7:4) - (0 ? 7:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:4) - (0 ? 7:4) + 1)))))) );
++ threadCount = (((((gctUINT32) (specs)) >> (0 ? 11:8)) & ((gctUINT32) ((((1 ? 11:8) - (0 ? 11:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 11:8) - (0 ? 11:8) + 1)))))) );
++ shaderCoreCount = (((((gctUINT32) (specs)) >> (0 ? 24:20)) & ((gctUINT32) ((((1 ? 24:20) - (0 ? 24:20) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 24:20) - (0 ? 24:20) + 1)))))) );
++ vertexCacheSize = (((((gctUINT32) (specs)) >> (0 ? 16:12)) & ((gctUINT32) ((((1 ? 16:12) - (0 ? 16:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 16:12) - (0 ? 16:12) + 1)))))) );
++ vertexOutputBufferSize = (((((gctUINT32) (specs)) >> (0 ? 31:28)) & ((gctUINT32) ((((1 ? 31:28) - (0 ? 31:28) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:28) - (0 ? 31:28) + 1)))))) );
++ pixelPipes = (((((gctUINT32) (specs)) >> (0 ? 27:25)) & ((gctUINT32) ((((1 ? 27:25) - (0 ? 27:25) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 27:25) - (0 ? 27:25) + 1)))))) );
++
++ /* Read gcChipSpecs2 register. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Os, Core,
++ 0x00080,
++ &specs2));
++
++ instructionCount = (((((gctUINT32) (specs2)) >> (0 ? 15:8)) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1)))))) );
++ numConstants = (((((gctUINT32) (specs2)) >> (0 ? 31:16)) & ((gctUINT32) ((((1 ? 31:16) - (0 ? 31:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:16) - (0 ? 31:16) + 1)))))) );
++ bufferSize = (((((gctUINT32) (specs2)) >> (0 ? 7:0)) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1)))))) );
++
++ /* Read gcChipSpecs3 register. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Os, Core,
++ 0x0008C,
++ &specs3));
++
++ varyingsCount = (((((gctUINT32) (specs3)) >> (0 ? 8:4)) & ((gctUINT32) ((((1 ? 8:4) - (0 ? 8:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:4) - (0 ? 8:4) + 1)))))) );
++#if gcdMULTI_GPU
++ gpuCoreCount = (((((gctUINT32) (specs3)) >> (0 ? 2:0)) & ((gctUINT32) ((((1 ? 2:0) - (0 ? 2:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:0) - (0 ? 2:0) + 1)))))) );
++#endif
++
++ /* Read gcChipSpecs4 register. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Os, Core,
++ 0x0009C,
++ &specs4));
++
++
++ streamCount = (((((gctUINT32) (specs4)) >> (0 ? 16:12)) & ((gctUINT32) ((((1 ? 16:12) - (0 ? 16:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 16:12) - (0 ? 16:12) + 1)))))) );
++ if (streamCount == 0)
++ {
++ /* Extract stream count from older register. */
++ streamCount = (((((gctUINT32) (specs)) >> (0 ? 3:0)) & ((gctUINT32) ((((1 ? 3:0) - (0 ? 3:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:0) - (0 ? 3:0) + 1)))))) );
++ }
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Identity: chipSpecs1=0x%08X",
++ specs);
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Identity: chipSpecs2=0x%08X",
++ specs2);
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Identity: chipSpecs3=0x%08X",
++ specs3);
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Identity: chipSpecs4=0x%08X",
++ specs4);
++ }
++
++ /* Get the number of pixel pipes. */
++ Identity->pixelPipes = gcmMAX(pixelPipes, 1);
++
++ /* Get the stream count. */
++ Identity->streamCount = (streamCount != 0)
++ ? streamCount
++ : (Identity->chipModel >= gcv1000) ? 4 : 1;
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Specs: streamCount=%u%s",
++ Identity->streamCount,
++ (streamCount == 0) ? " (default)" : "");
++
++ /* Get the vertex output buffer size. */
++ Identity->vertexOutputBufferSize = (vertexOutputBufferSize != 0)
++ ? 1 << vertexOutputBufferSize
++ : (Identity->chipModel == gcv400)
++ ? (Identity->chipRevision < 0x4000) ? 512
++ : (Identity->chipRevision < 0x4200) ? 256
++ : 128
++ : (Identity->chipModel == gcv530)
++ ? (Identity->chipRevision < 0x4200) ? 512
++ : 128
++ : 512;
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Specs: vertexOutputBufferSize=%u%s",
++ Identity->vertexOutputBufferSize,
++ (vertexOutputBufferSize == 0) ? " (default)" : "");
++
++ /* Get the maximum number of threads. */
++ Identity->threadCount = (threadCount != 0)
++ ? 1 << threadCount
++ : (Identity->chipModel == gcv400) ? 64
++ : (Identity->chipModel == gcv500) ? 128
++ : (Identity->chipModel == gcv530) ? 128
++ : 256;
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Specs: threadCount=%u%s",
++ Identity->threadCount,
++ (threadCount == 0) ? " (default)" : "");
++
++ /* Get the number of shader cores. */
++ Identity->shaderCoreCount = (shaderCoreCount != 0)
++ ? shaderCoreCount
++ : (Identity->chipModel >= gcv1000) ? 2
++ : 1;
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Specs: shaderCoreCount=%u%s",
++ Identity->shaderCoreCount,
++ (shaderCoreCount == 0) ? " (default)" : "");
++
++ /* Get the vertex cache size. */
++ Identity->vertexCacheSize = (vertexCacheSize != 0)
++ ? vertexCacheSize
++ : 8;
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Specs: vertexCacheSize=%u%s",
++ Identity->vertexCacheSize,
++ (vertexCacheSize == 0) ? " (default)" : "");
++
++ /* Get the maximum number of temporary registers. */
++ Identity->registerMax = (registerMax != 0)
++ /* Maximum of registerMax/4 registers are accessible to 1 shader */
++ ? 1 << registerMax
++ : (Identity->chipModel == gcv400) ? 32
++ : 64;
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Specs: registerMax=%u%s",
++ Identity->registerMax,
++ (registerMax == 0) ? " (default)" : "");
++
++ /* Get the instruction count. */
++ Identity->instructionCount = (instructionCount == 0) ? 256
++ : (instructionCount == 1) ? 1024
++ : (instructionCount == 2) ? 2048
++ : (instructionCount == 0xFF) ? 512
++ : 256;
++
++ if (Identity->instructionCount == 256)
++ {
++ if ((Identity->chipModel == gcv2000 && Identity->chipRevision == 0x5108)
++ || Identity->chipModel == gcv880)
++ {
++ Identity->instructionCount = 512;
++ }
++ else if (((((gctUINT32) (Identity->chipMinorFeatures3)) >> (0 ? 3:3) & ((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))))
++ {
++ Identity->instructionCount = 512;
++ }
++ }
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Specs: instructionCount=%u%s",
++ Identity->instructionCount,
++ (instructionCount == 0) ? " (default)" : "");
++
++ /* Get the number of constants. */
++ Identity->numConstants = (numConstants == 0) ? 168 : numConstants;
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Specs: numConstants=%u%s",
++ Identity->numConstants,
++ (numConstants == 0) ? " (default)" : "");
++
++ /* Get the buffer size. */
++ Identity->bufferSize = bufferSize;
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Specs: bufferSize=%u%s",
++ Identity->bufferSize,
++ (bufferSize == 0) ? " (default)" : "");
++
++
++ if (varyingsCount != 0)
++ {
++ Identity->varyingsCount = varyingsCount;
++ }
++ else if (((((gctUINT32) (Identity->chipMinorFeatures1)) >> (0 ? 23:23) & ((gctUINT32) ((((1 ? 23:23) - (0 ? 23:23) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:23) - (0 ? 23:23) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 23:23) - (0 ? 23:23) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:23) - (0 ? 23:23) + 1))))))))
++ {
++ Identity->varyingsCount = 12;
++ }
++ else
++ {
++ Identity->varyingsCount = 8;
++ }
++
++ /* For some cores, it consumes two varying for position, so the max varying vectors should minus one. */
++ if ((Identity->chipModel == gcv5000 && Identity->chipRevision == 0x5434) ||
++ (Identity->chipModel == gcv4000 && Identity->chipRevision == 0x5222) ||
++ (Identity->chipModel == gcv4000 && Identity->chipRevision == 0x5208) ||
++ (Identity->chipModel == gcv4000 && Identity->chipRevision == 0x5245) ||
++ (Identity->chipModel == gcv3000 && Identity->chipRevision == 0x5435) ||
++ (Identity->chipModel == gcv2200 && Identity->chipRevision == 0x5244) ||
++ (Identity->chipModel == gcv1500 && Identity->chipRevision == 0x5246) ||
++ ((Identity->chipModel == gcv2100 || Identity->chipModel == gcv2000) && Identity->chipRevision == 0x5108) ||
++ (Identity->chipModel == gcv880 && (Identity->chipRevision == 0x5107 || Identity->chipRevision == 0x5106)))
++ {
++ Identity->varyingsCount -= 1;
++ }
++
++ Identity->chip2DControl = 0;
++ if (Identity->chipModel == gcv320)
++ {
++ gctUINT32 data;
++
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Os,
++ Core,
++ 0x0002C,
++ &data));
++
++ if ((data != 33956864) &&
++ ((Identity->chipRevision == 0x5007) ||
++ (Identity->chipRevision == 0x5220)))
++ {
++ Identity->chip2DControl |= 0xFF &
++ (Identity->chipRevision == 0x5220 ? 8 :
++ (Identity->chipRevision == 0x5007 ? 12 : 0));
++ }
++
++ if (Identity->chipRevision == 0x5007)
++ {
++ /* Disable splitting rectangle. */
++ Identity->chip2DControl |= 0x100;
++
++ /* Enable 2D Flush. */
++ Identity->chip2DControl |= 0x200;
++ }
++ }
++
++#if gcdMULTI_GPU
++#if gcdMULTI_GPU > 1
++ Identity->gpuCoreCount = gpuCoreCount + 1;
++#else
++ Identity->gpuCoreCount = 1;
++#endif
++#endif
++
++ /* Success. */
++ gcmkFOOTER();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++#define gcdDEBUG_MODULE_CLOCK_GATING 0
++#define gcdDISABLE_MODULE_CLOCK_GATING 0
++#define gcdDISABLE_FE_CLOCK_GATING 0
++#define gcdDISABLE_PE_CLOCK_GATING 0
++#define gcdDISABLE_SH_CLOCK_GATING 0
++#define gcdDISABLE_PA_CLOCK_GATING 0
++#define gcdDISABLE_SE_CLOCK_GATING 0
++#define gcdDISABLE_RA_CLOCK_GATING 0
++#define gcdDISABLE_RA_EZ_CLOCK_GATING 0
++#define gcdDISABLE_RA_HZ_CLOCK_GATING 0
++#define gcdDISABLE_TX_CLOCK_GATING 0
++
++#if gcdDEBUG_MODULE_CLOCK_GATING
++gceSTATUS
++_ConfigureModuleLevelClockGating(
++ gckHARDWARE Hardware
++ )
++{
++ gctUINT32 data;
++
++ gcmkVERIFY_OK(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ Hardware->powerBaseAddress
++ + 0x00104,
++ &data));
++
++#if gcdDISABLE_FE_CLOCK_GATING
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)));
++#endif
++
++#if gcdDISABLE_PE_CLOCK_GATING
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1))))))) << (0 ? 2:2))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1))))))) << (0 ? 2:2)));
++#endif
++
++#if gcdDISABLE_SH_CLOCK_GATING
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3)));
++#endif
++
++#if gcdDISABLE_PA_CLOCK_GATING
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4)));
++#endif
++
++#if gcdDISABLE_SE_CLOCK_GATING
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5)));
++#endif
++
++#if gcdDISABLE_RA_CLOCK_GATING
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6)));
++#endif
++
++#if gcdDISABLE_TX_CLOCK_GATING
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:7) - (0 ? 7:7) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:7) - (0 ? 7:7) + 1))))))) << (0 ? 7:7))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 7:7) - (0 ? 7:7) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:7) - (0 ? 7:7) + 1))))))) << (0 ? 7:7)));
++#endif
++
++#if gcdDISABLE_RA_EZ_CLOCK_GATING
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 16:16) - (0 ? 16:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 16:16) - (0 ? 16:16) + 1))))))) << (0 ? 16:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 16:16) - (0 ? 16:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 16:16) - (0 ? 16:16) + 1))))))) << (0 ? 16:16)));
++#endif
++
++#if gcdDISABLE_RA_HZ_CLOCK_GATING
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 17:17) - (0 ? 17:17) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 17:17) - (0 ? 17:17) + 1))))))) << (0 ? 17:17))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 17:17) - (0 ? 17:17) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 17:17) - (0 ? 17:17) + 1))))))) << (0 ? 17:17)));
++#endif
++
++ gcmkVERIFY_OK(
++ gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ Hardware->powerBaseAddress
++ + 0x00104,
++ data));
++
++#if gcdDISABLE_MODULE_CLOCK_GATING
++ gcmkVERIFY_OK(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ Hardware->powerBaseAddress +
++ 0x00100,
++ &data));
++
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)));
++
++
++ gcmkVERIFY_OK(
++ gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ Hardware->powerBaseAddress
++ + 0x00100,
++ data));
++#endif
++
++ return gcvSTATUS_OK;
++}
++#endif
++
++#if gcdPOWEROFF_TIMEOUT
++void
++_PowerTimerFunction(
++ gctPOINTER Data
++ )
++{
++ gckHARDWARE hardware = (gckHARDWARE)Data;
++ gcmkVERIFY_OK(
++ gckHARDWARE_SetPowerManagementState(hardware, gcvPOWER_OFF_TIMEOUT));
++}
++#endif
++
++static gceSTATUS
++_VerifyDMA(
++ IN gckOS Os,
++ IN gceCORE Core,
++ gctUINT32_PTR Address1,
++ gctUINT32_PTR Address2,
++ gctUINT32_PTR State1,
++ gctUINT32_PTR State2
++ )
++{
++ gceSTATUS status;
++ gctUINT32 i;
++
++ gcmkONERROR(gckOS_ReadRegisterEx(Os, Core, 0x660, State1));
++ gcmkONERROR(gckOS_ReadRegisterEx(Os, Core, 0x664, Address1));
++
++ for (i = 0; i < 500; i += 1)
++ {
++ gcmkONERROR(gckOS_ReadRegisterEx(Os, Core, 0x660, State2));
++ gcmkONERROR(gckOS_ReadRegisterEx(Os, Core, 0x664, Address2));
++
++ if (*Address1 != *Address2)
++ {
++ break;
++ }
++
++ if (*State1 != *State2)
++ {
++ break;
++ }
++ }
++
++OnError:
++ return status;
++}
++
++static gceSTATUS
++_DumpDebugRegisters(
++ IN gckOS Os,
++ IN gceCORE Core,
++ IN gcsiDEBUG_REGISTERS_PTR Descriptor
++ )
++{
++ gceSTATUS status = gcvSTATUS_OK;
++ gctUINT32 select;
++ gctUINT32 data = 0;
++ gctUINT i;
++
++ gcmkHEADER_ARG("Os=0x%X Descriptor=0x%X", Os, Descriptor);
++
++ gcmkPRINT_N(4, " %s debug registers:\n", Descriptor->module);
++
++ for (i = 0; i < Descriptor->count; i += 1)
++ {
++ select = i << Descriptor->shift;
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Os, Core, Descriptor->index, select));
++#if gcdFPGA_BUILD
++ gcmkONERROR(gckOS_Delay(Os, 1000));
++#endif
++ gcmkONERROR(gckOS_ReadRegisterEx(Os, Core, Descriptor->data, &data));
++
++ gcmkPRINT_N(12, " [0x%02X] 0x%08X\n", i, data);
++ }
++
++ select = 0xF << Descriptor->shift;
++
++ for (i = 0; i < 500; i += 1)
++ {
++ gcmkONERROR(gckOS_WriteRegisterEx(Os, Core, Descriptor->index, select));
++#if gcdFPGA_BUILD
++ gcmkONERROR(gckOS_Delay(Os, 1000));
++#endif
++ gcmkONERROR(gckOS_ReadRegisterEx(Os, Core, Descriptor->data, &data));
++
++ if (data == Descriptor->signature)
++ {
++ break;
++ }
++ }
++
++ if (i == 500)
++ {
++ gcmkPRINT_N(4, " failed to obtain the signature (read 0x%08X).\n", data);
++ }
++ else
++ {
++ gcmkPRINT_N(8, " signature = 0x%08X (%d read attempt(s))\n", data, i + 1);
++ }
++
++OnError:
++ /* Return the error. */
++ gcmkFOOTER();
++ return status;
++}
++
++static gceSTATUS
++_IsGPUPresent(
++ IN gckHARDWARE Hardware
++ )
++{
++ gceSTATUS status;
++ gcsHAL_QUERY_CHIP_IDENTITY identity;
++ gctUINT32 control;
++
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00000,
++ &control));
++
++ control = ((((gctUINT32) (control)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1)));
++ control = ((((gctUINT32) (control)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)));
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00000,
++ control));
++
++ /* Identify the hardware. */
++ gcmkONERROR(_IdentifyHardware(Hardware->os,
++ Hardware->core,
++ &identity));
++
++ /* Check if these are the same values as saved before. */
++ if ((Hardware->identity.chipModel != identity.chipModel)
++ || (Hardware->identity.chipRevision != identity.chipRevision)
++ || (Hardware->identity.chipFeatures != identity.chipFeatures)
++ || (Hardware->identity.chipMinorFeatures != identity.chipMinorFeatures)
++ || (Hardware->identity.chipMinorFeatures1 != identity.chipMinorFeatures1)
++ || (Hardware->identity.chipMinorFeatures2 != identity.chipMinorFeatures2)
++ )
++ {
++ gcmkPRINT("[galcore]: GPU is not present.");
++ gcmkONERROR(gcvSTATUS_GPU_NOT_RESPONDING);
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the error. */
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++_FlushCache(
++ gckHARDWARE Hardware,
++ gckCOMMAND Command
++ )
++{
++ gceSTATUS status;
++ gctUINT32 bytes, requested;
++ gctPOINTER buffer;
++
++ /* Get the size of the flush command. */
++ gcmkONERROR(gckHARDWARE_Flush(Hardware,
++ gcvFLUSH_ALL,
++ gcvNULL,
++ &requested));
++
++ /* Reserve space in the command queue. */
++ gcmkONERROR(gckCOMMAND_Reserve(Command,
++ requested,
++ &buffer,
++ &bytes));
++
++ /* Append a flush. */
++ gcmkONERROR(gckHARDWARE_Flush(
++ Hardware, gcvFLUSH_ALL, buffer, &bytes
++ ));
++
++ /* Execute the command queue. */
++ gcmkONERROR(gckCOMMAND_Execute(Command, requested));
++
++ return gcvSTATUS_OK;
++
++OnError:
++ return status;
++}
++
++gctBOOL
++_IsGPUIdle(
++ IN gctUINT32 Idle
++ )
++{
++ return (((((gctUINT32) (Idle)) >> (0 ? 0:0)) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1)))))) )
++ && (((((gctUINT32) (Idle)) >> (0 ? 1:1)) & ((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1)))))) )
++ && (((((gctUINT32) (Idle)) >> (0 ? 3:3)) & ((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1)))))) )
++ && (((((gctUINT32) (Idle)) >> (0 ? 4:4)) & ((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1)))))) )
++ && (((((gctUINT32) (Idle)) >> (0 ? 5:5)) & ((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1)))))) )
++ && (((((gctUINT32) (Idle)) >> (0 ? 6:6)) & ((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1)))))) )
++ && (((((gctUINT32) (Idle)) >> (0 ? 7:7)) & ((gctUINT32) ((((1 ? 7:7) - (0 ? 7:7) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:7) - (0 ? 7:7) + 1)))))) )
++ && (((((gctUINT32) (Idle)) >> (0 ? 2:2)) & ((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1)))))) )
++ ;
++}
++
++/******************************************************************************\
++****************************** gckHARDWARE API code *****************************
++\******************************************************************************/
++
++/*******************************************************************************
++**
++** gckHARDWARE_Construct
++**
++** Construct a new gckHARDWARE object.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an initialized gckOS object.
++**
++** gceCORE Core
++** Specified core.
++**
++** OUTPUT:
++**
++** gckHARDWARE * Hardware
++** Pointer to a variable that will hold the pointer to the gckHARDWARE
++** object.
++*/
++gceSTATUS
++gckHARDWARE_Construct(
++ IN gckOS Os,
++ IN gceCORE Core,
++ OUT gckHARDWARE * Hardware
++ )
++{
++ gceSTATUS status;
++ gckHARDWARE hardware = gcvNULL;
++ gctUINT16 data = 0xff00;
++ gctPOINTER pointer = gcvNULL;
++#if gcdMULTI_GPU_AFFINITY
++ gctUINT32 control;
++#endif
++
++ gcmkHEADER_ARG("Os=0x%x", Os);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Hardware != gcvNULL);
++
++ /* Enable the GPU. */
++ gcmkONERROR(gckOS_SetGPUPower(Os, Core, gcvTRUE, gcvTRUE));
++ gcmkONERROR(gckOS_WriteRegisterEx(Os,
++ Core,
++ 0x00000,
++ 0x00000900));
++
++ /* Allocate the gckHARDWARE object. */
++ gcmkONERROR(gckOS_Allocate(Os,
++ gcmSIZEOF(struct _gckHARDWARE),
++ &pointer));
++
++ hardware = (gckHARDWARE) pointer;
++
++ /* Initialize the gckHARDWARE object. */
++ hardware->object.type = gcvOBJ_HARDWARE;
++ hardware->os = Os;
++ hardware->core = Core;
++
++ /* Identify the hardware. */
++ gcmkONERROR(_IdentifyHardware(Os, Core, &hardware->identity));
++
++ /* Determine the hardware type */
++ switch (hardware->identity.chipModel)
++ {
++ case gcv350:
++ case gcv355:
++ hardware->type = gcvHARDWARE_VG;
++ break;
++
++ case gcv200:
++ case gcv300:
++ case gcv320:
++ case gcv328:
++ case gcv420:
++ case gcv428:
++ hardware->type = gcvHARDWARE_2D;
++ break;
++
++ default:
++#if gcdMULTI_GPU_AFFINITY
++ hardware->type = (Core == gcvCORE_MAJOR) ? gcvHARDWARE_3D : gcvHARDWARE_OCL;
++#else
++ hardware->type = gcvHARDWARE_3D;
++#endif
++
++ if(hardware->identity.chipModel == gcv880 && hardware->identity.chipRevision == 0x5107)
++ {
++ /*set outstanding limit*/
++ gctUINT32 axi_ot;
++ gcmkONERROR(gckOS_ReadRegisterEx(Os, Core, 0x00414, &axi_ot));
++ axi_ot = (axi_ot & (~0xFF)) | 0x00010;
++ gcmkONERROR(gckOS_WriteRegisterEx(Os, Core, 0x00414, axi_ot));
++ }
++
++
++ if ((((((gctUINT32) (hardware->identity.chipFeatures)) >> (0 ? 9:9)) & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1)))))) ))
++ {
++ hardware->type = (gceHARDWARE_TYPE) (hardware->type | gcvHARDWARE_2D);
++ }
++ }
++
++ hardware->powerBaseAddress
++ = ((hardware->identity.chipModel == gcv300)
++ && (hardware->identity.chipRevision < 0x2000))
++ ? 0x0100
++ : 0x0000;
++
++ /* _ResetGPU need powerBaseAddress. */
++ status = _ResetGPU(hardware, Os, Core);
++
++ if (status != gcvSTATUS_OK)
++ {
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "_ResetGPU failed: status=%d\n", status);
++ }
++
++#if gcdMULTI_GPU
++ gcmkONERROR(gckOS_WriteRegisterEx(Os,
++ Core,
++ 0x0055C,
++#if gcdDISABLE_FE_L2
++ 0x00FFFFFF));
++#else
++ 0x00FFFF05));
++#endif
++
++#elif gcdMULTI_GPU_AFFINITY
++ control = ((((gctUINT32) (0x00FF0A05)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 27:27) - (0 ? 27:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 27:27) - (0 ? 27:27) + 1))))))) << (0 ? 27:27))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 27:27) - (0 ? 27:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 27:27) - (0 ? 27:27) + 1))))))) << (0 ? 27:27)));
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Os,
++ Core,
++ 0x0055C,
++ control));
++#endif
++
++ hardware->powerMutex = gcvNULL;
++
++ hardware->mmuVersion
++ = (((((gctUINT32) (hardware->identity.chipMinorFeatures1)) >> (0 ? 28:28)) & ((gctUINT32) ((((1 ? 28:28) - (0 ? 28:28) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 28:28) - (0 ? 28:28) + 1)))))) );
++
++ /* Determine whether bug fixes #1 are present. */
++ hardware->extraEventStates = ((((gctUINT32) (hardware->identity.chipMinorFeatures1)) >> (0 ? 3:3) & ((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1)))))) == (0x0 & ((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1)))))));
++
++ /* Check if big endian */
++ hardware->bigEndian = (*(gctUINT8 *)&data == 0xff);
++
++ /* Initialize the fast clear. */
++ gcmkONERROR(gckHARDWARE_SetFastClear(hardware, -1, -1));
++
++#if !gcdENABLE_128B_MERGE
++
++ if (((((gctUINT32) (hardware->identity.chipMinorFeatures2)) >> (0 ? 21:21) & ((gctUINT32) ((((1 ? 21:21) - (0 ? 21:21) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 21:21) - (0 ? 21:21) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 21:21) - (0 ? 21:21) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 21:21) - (0 ? 21:21) + 1))))))))
++ {
++ /* 128B merge is turned on by default. Disable it. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Os, Core, 0x00558, 0));
++ }
++
++#endif
++
++ /* Set power state to ON. */
++ hardware->chipPowerState = gcvPOWER_ON;
++ hardware->clockState = gcvTRUE;
++ hardware->powerState = gcvTRUE;
++ hardware->lastWaitLink = ~0U;
++ hardware->lastEnd = ~0U;
++ hardware->globalSemaphore = gcvNULL;
++#if gcdENABLE_FSCALE_VAL_ADJUST
++ hardware->powerOnFscaleVal = 64;
++#endif
++
++ gcmkONERROR(gckOS_CreateMutex(Os, &hardware->powerMutex));
++ gcmkONERROR(gckOS_CreateSemaphore(Os, &hardware->globalSemaphore));
++ hardware->startIsr = gcvNULL;
++ hardware->stopIsr = gcvNULL;
++
++#if gcdPOWEROFF_TIMEOUT
++ hardware->powerOffTimeout = gcdPOWEROFF_TIMEOUT;
++
++ gcmkVERIFY_OK(gckOS_CreateTimer(Os,
++ _PowerTimerFunction,
++ (gctPOINTER)hardware,
++ &hardware->powerOffTimer));
++#endif
++
++ gcmkONERROR(gckOS_AtomConstruct(Os, &hardware->pageTableDirty));
++ gcmkONERROR(gckOS_AtomConstruct(Os, &hardware->pendingEvent));
++
++#if gcdLINK_QUEUE_SIZE
++ hardware->linkQueue.front = 0;
++ hardware->linkQueue.rear = 0;
++ hardware->linkQueue.count = 0;
++#endif
++
++ /* Enable power management by default. */
++ hardware->powerManagement = gcvTRUE;
++
++ /* Disable profiler by default */
++ hardware->gpuProfiler = gcvFALSE;
++
++#if defined(LINUX) || defined(__QNXNTO__) || defined(UNDERCE)
++ if (hardware->mmuVersion)
++ {
++ hardware->endAfterFlushMmuCache = gcvTRUE;
++ }
++ else
++#endif
++ {
++ hardware->endAfterFlushMmuCache = gcvFALSE;
++ }
++
++ gcmkONERROR(gckOS_QueryOption(Os, "mmu", (gctUINT32_PTR)&hardware->enableMMU));
++
++ hardware->minFscaleValue = 1;
++
++ /* Return pointer to the gckHARDWARE object. */
++ *Hardware = hardware;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Hardware=0x%x", *Hardware);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Roll back. */
++ if (hardware != gcvNULL)
++ {
++ /* Turn off the power. */
++ gcmkVERIFY_OK(gckOS_SetGPUPower(Os, Core, gcvFALSE, gcvFALSE));
++
++ if (hardware->globalSemaphore != gcvNULL)
++ {
++ /* Destroy the global semaphore. */
++ gcmkVERIFY_OK(gckOS_DestroySemaphore(Os,
++ hardware->globalSemaphore));
++ }
++
++ if (hardware->powerMutex != gcvNULL)
++ {
++ /* Destroy the power mutex. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Os, hardware->powerMutex));
++ }
++
++#if gcdPOWEROFF_TIMEOUT
++ if (hardware->powerOffTimer != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_StopTimer(Os, hardware->powerOffTimer));
++ gcmkVERIFY_OK(gckOS_DestroyTimer(Os, hardware->powerOffTimer));
++ }
++#endif
++
++ if (hardware->pageTableDirty != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_AtomDestroy(Os, hardware->pageTableDirty));
++ }
++
++ if (hardware->pendingEvent != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_AtomDestroy(Os, hardware->pendingEvent));
++ }
++
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Os, hardware));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_Destroy
++**
++** Destroy an gckHARDWARE object.
++**
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to the gckHARDWARE object that needs to be destroyed.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckHARDWARE_Destroy(
++ IN gckHARDWARE Hardware
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ /* Destroy the power semaphore. */
++ gcmkVERIFY_OK(gckOS_DestroySemaphore(Hardware->os,
++ Hardware->globalSemaphore));
++
++ /* Destroy the power mutex. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Hardware->os, Hardware->powerMutex));
++
++#if gcdPOWEROFF_TIMEOUT
++ gcmkVERIFY_OK(gckOS_StopTimer(Hardware->os, Hardware->powerOffTimer));
++ gcmkVERIFY_OK(gckOS_DestroyTimer(Hardware->os, Hardware->powerOffTimer));
++#endif
++
++ gcmkVERIFY_OK(gckOS_AtomDestroy(Hardware->os, Hardware->pageTableDirty));
++
++ gcmkVERIFY_OK(gckOS_AtomDestroy(Hardware->os, Hardware->pendingEvent));
++
++ gcmkVERIFY_OK(gckOS_FreeNonPagedMemory(
++ Hardware->os,
++ Hardware->functionBytes,
++ Hardware->functionPhysical,
++ Hardware->functionLogical
++ ));
++
++ /* Mark the object as unknown. */
++ Hardware->object.type = gcvOBJ_UNKNOWN;
++
++ /* Free the object. */
++ gcmkONERROR(gcmkOS_SAFE_FREE(Hardware->os, Hardware));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_GetType
++**
++** Get the hardware type.
++**
++** INPUT:
++**
++** gckHARDWARE Harwdare
++** Pointer to an gckHARDWARE object.
++**
++** OUTPUT:
++**
++** gceHARDWARE_TYPE * Type
++** Pointer to a variable that receives the type of hardware object.
++*/
++gceSTATUS
++gckHARDWARE_GetType(
++ IN gckHARDWARE Hardware,
++ OUT gceHARDWARE_TYPE * Type
++ )
++{
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++ gcmkVERIFY_ARGUMENT(Type != gcvNULL);
++
++ *Type = Hardware->type;
++
++ gcmkFOOTER_ARG("*Type=%d", *Type);
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_InitializeHardware
++**
++** Initialize the hardware.
++**
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to the gckHARDWARE object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckHARDWARE_InitializeHardware(
++ IN gckHARDWARE Hardware
++ )
++{
++ gceSTATUS status;
++ gctUINT32 baseAddress;
++ gctUINT32 chipRev;
++ gctUINT32 control;
++ gctUINT32 data;
++ gctUINT32 regPMC = 0;
++
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ /* Read the chip revision register. */
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00024,
++ &chipRev));
++
++ if (chipRev != Hardware->identity.chipRevision)
++ {
++ /* Chip is not there! */
++ gcmkONERROR(gcvSTATUS_CONTEXT_LOSSED);
++ }
++
++ /* Disable isolate GPU bit. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00000,
++ ((((gctUINT32) (0x00000900)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 19:19) - (0 ? 19:19) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 19:19) - (0 ? 19:19) + 1))))))) << (0 ? 19:19))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 19:19) - (0 ? 19:19) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 19:19) - (0 ? 19:19) + 1))))))) << (0 ? 19:19)))));
++
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00000,
++ &control));
++
++ /* Enable debug register. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00000,
++ ((((gctUINT32) (control)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 11:11) - (0 ? 11:11) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 11:11) - (0 ? 11:11) + 1))))))) << (0 ? 11:11))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 11:11) - (0 ? 11:11) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 11:11) - (0 ? 11:11) + 1))))))) << (0 ? 11:11)))));
++
++ /* Reset memory counters. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x0003C,
++ ~0U));
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x0003C,
++ 0));
++
++ /* Get the system's physical base address. */
++ gcmkONERROR(gckOS_GetBaseAddress(Hardware->os, &baseAddress));
++
++ /* Program the base addesses. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x0041C,
++ baseAddress));
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00418,
++ baseAddress));
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00428,
++ baseAddress));
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00420,
++ baseAddress));
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00424,
++ baseAddress));
++
++ {
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ Hardware->powerBaseAddress +
++ 0x00100,
++ &data));
++
++ /* Enable clock gating. */
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)));
++
++ if ((Hardware->identity.chipRevision == 0x4301)
++ || (Hardware->identity.chipRevision == 0x4302)
++ )
++ {
++ /* Disable stall module level clock gating for 4.3.0.1 and 4.3.0.2
++ ** revisions. */
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1)));
++ }
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ Hardware->powerBaseAddress
++ + 0x00100,
++ data));
++
++#if gcdENABLE_3D
++ /* Disable PE clock gating on revs < 5.0 when HZ is present without a
++ ** bug fix. */
++ if ((Hardware->identity.chipRevision < 0x5000)
++ && gckHARDWARE_IsFeatureAvailable(Hardware, gcvFEATURE_HZ)
++ && ((((gctUINT32) (Hardware->identity.chipMinorFeatures1)) >> (0 ? 9:9) & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1)))))) == (0x0 & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1)))))))
++ )
++ {
++ if (regPMC == 0)
++ {
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ Hardware->powerBaseAddress
++ + 0x00104,
++ &regPMC));
++ }
++
++ /* Disable PE clock gating. */
++ regPMC = ((((gctUINT32) (regPMC)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1))))))) << (0 ? 2:2))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1))))))) << (0 ? 2:2)));
++ }
++
++#endif
++ }
++
++ if (Hardware->identity.chipModel == gcv4000 &&
++ ((Hardware->identity.chipRevision == 0x5208) || (Hardware->identity.chipRevision == 0x5222)))
++ {
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x0010C,
++ ((((gctUINT32) (0x01590880)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:23) - (0 ? 23:23) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:23) - (0 ? 23:23) + 1))))))) << (0 ? 23:23))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 23:23) - (0 ? 23:23) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:23) - (0 ? 23:23) + 1))))))) << (0 ? 23:23)))));
++ }
++
++ if (Hardware->identity.chipModel == gcv1000 &&
++ (Hardware->identity.chipRevision == 0x5039 ||
++ Hardware->identity.chipRevision == 0x5040))
++ {
++ gctUINT32 pulseEater;
++
++ pulseEater = ((((gctUINT32) (0x01590880)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 16:16) - (0 ? 16:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 16:16) - (0 ? 16:16) + 1))))))) << (0 ? 16:16))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 16:16) - (0 ? 16:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 16:16) - (0 ? 16:16) + 1))))))) << (0 ? 16:16)));
++
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x0010C,
++ ((((gctUINT32) (pulseEater)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 17:17) - (0 ? 17:17) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 17:17) - (0 ? 17:17) + 1))))))) << (0 ? 17:17))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 17:17) - (0 ? 17:17) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 17:17) - (0 ? 17:17) + 1))))))) << (0 ? 17:17)))));
++ }
++
++ if ((gckHARDWARE_IsFeatureAvailable(Hardware, gcvFEATURE_HALTI2) == gcvSTATUS_FALSE)
++ || (Hardware->identity.chipRevision < 0x5422)
++ )
++ {
++ if (regPMC == 0)
++ {
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ Hardware->powerBaseAddress
++ + 0x00104,
++ &regPMC));
++ }
++
++ regPMC = ((((gctUINT32) (regPMC)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:15) - (0 ? 15:15) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:15) - (0 ? 15:15) + 1))))))) << (0 ? 15:15))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 15:15) - (0 ? 15:15) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:15) - (0 ? 15:15) + 1))))))) << (0 ? 15:15)));
++ }
++
++ if (_IsHardwareMatch(Hardware, gcv2000, 0x5108))
++ {
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00480,
++ &data));
++
++ /* Set FE bus to one, TX bus to zero */
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3)));
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:7) - (0 ? 7:7) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:7) - (0 ? 7:7) + 1))))))) << (0 ? 7:7))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 7:7) - (0 ? 7:7) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:7) - (0 ? 7:7) + 1))))))) << (0 ? 7:7)));
++
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00480,
++ data));
++ }
++
++ gcmkONERROR(
++ gckHARDWARE_SetMMU(Hardware,
++ Hardware->kernel->mmu->pageTableLogical));
++
++ if (Hardware->identity.chipModel >= gcv400
++ && Hardware->identity.chipModel != gcv420)
++ {
++ if (regPMC == 0)
++ {
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ Hardware->powerBaseAddress
++ + 0x00104,
++ &regPMC));
++ }
++
++ /* Disable PA clock gating. */
++ regPMC = ((((gctUINT32) (regPMC)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4)));
++ }
++
++ /* Limit 2D outstanding request. */
++ if (_IsHardwareMatch(Hardware, gcv880, 0x5107))
++ {
++ gctUINT32 axi_ot;
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00414, &axi_ot));
++ axi_ot = (axi_ot & (~0xFF)) | 0x00010;
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00414, axi_ot));
++ }
++
++ if (Hardware->identity.chip2DControl & 0xFF)
++ {
++ gctUINT32 data;
++
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00414,
++ &data));
++
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (Hardware->identity.chip2DControl & 0xFF) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0)));
++
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00414,
++ data));
++ }
++
++ if (_IsHardwareMatch(Hardware, gcv1000, 0x5035))
++ {
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00414,
++ &data));
++
++ /* Disable HZ-L2. */
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:12) - (0 ? 12:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:12) - (0 ? 12:12) + 1))))))) << (0 ? 12:12))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 12:12) - (0 ? 12:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:12) - (0 ? 12:12) + 1))))))) << (0 ? 12:12)));
++
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00414,
++ data));
++ }
++
++ if (_IsHardwareMatch(Hardware, gcv4000, 0x5222))
++ {
++ if (regPMC == 0)
++ {
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ Hardware->powerBaseAddress
++ + 0x00104,
++ &regPMC));
++ }
++
++ /* Disable TX clock gating. */
++ regPMC = ((((gctUINT32) (regPMC)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:7) - (0 ? 7:7) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:7) - (0 ? 7:7) + 1))))))) << (0 ? 7:7))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 7:7) - (0 ? 7:7) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:7) - (0 ? 7:7) + 1))))))) << (0 ? 7:7)));
++ }
++
++ if (_IsHardwareMatch(Hardware, gcv880, 0x5106))
++ {
++ Hardware->kernel->timeOut = 140 * 1000;
++ }
++
++ if (regPMC == 0)
++ {
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ Hardware->powerBaseAddress
++ + 0x00104,
++ &regPMC));
++ }
++
++ /* Disable RA HZ clock gating. */
++ regPMC = ((((gctUINT32) (regPMC)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 17:17) - (0 ? 17:17) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 17:17) - (0 ? 17:17) + 1))))))) << (0 ? 17:17))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 17:17) - (0 ? 17:17) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 17:17) - (0 ? 17:17) + 1))))))) << (0 ? 17:17)));
++
++ /* Disable RA EZ clock gating. */
++ regPMC = ((((gctUINT32) (regPMC)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 16:16) - (0 ? 16:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 16:16) - (0 ? 16:16) + 1))))))) << (0 ? 16:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 16:16) - (0 ? 16:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 16:16) - (0 ? 16:16) + 1))))))) << (0 ? 16:16)));
++
++ if (regPMC != 0)
++ {
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ Hardware->powerBaseAddress
++ + 0x00104,
++ regPMC));
++ }
++
++ if (_IsHardwareMatch(Hardware, gcv2000, 0x5108)
++ || _IsHardwareMatch(Hardware, gcv320, 0x5007)
++ || _IsHardwareMatch(Hardware, gcv880, 0x5106)
++ || _IsHardwareMatch(Hardware, gcv400, 0x4645)
++ )
++ {
++ /* Update GPU AXI cache atttribute. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00008,
++ 0x00002200));
++ }
++
++
++ if ((Hardware->identity.chipRevision > 0x5420)
++ && gckHARDWARE_IsFeatureAvailable(Hardware, gcvFEATURE_PIPE_3D))
++ {
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x0010C,
++ &data));
++
++ /* Disable internal DFS. */
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 18:18) - (0 ? 18:18) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 18:18) - (0 ? 18:18) + 1))))))) << (0 ? 18:18))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 18:18) - (0 ? 18:18) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 18:18) - (0 ? 18:18) + 1))))))) << (0 ? 18:18)));
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x0010C,
++ data));
++ }
++
++#if gcdDEBUG_MODULE_CLOCK_GATING
++ _ConfigureModuleLevelClockGating(Hardware);
++#endif
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the error. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_QueryMemory
++**
++** Query the amount of memory available on the hardware.
++**
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to the gckHARDWARE object.
++**
++** OUTPUT:
++**
++** gctSIZE_T * InternalSize
++** Pointer to a variable that will hold the size of the internal video
++** memory in bytes. If 'InternalSize' is gcvNULL, no information of the
++** internal memory will be returned.
++**
++** gctUINT32 * InternalBaseAddress
++** Pointer to a variable that will hold the hardware's base address for
++** the internal video memory. This pointer cannot be gcvNULL if
++** 'InternalSize' is also non-gcvNULL.
++**
++** gctUINT32 * InternalAlignment
++** Pointer to a variable that will hold the hardware's base address for
++** the internal video memory. This pointer cannot be gcvNULL if
++** 'InternalSize' is also non-gcvNULL.
++**
++** gctSIZE_T * ExternalSize
++** Pointer to a variable that will hold the size of the external video
++** memory in bytes. If 'ExternalSize' is gcvNULL, no information of the
++** external memory will be returned.
++**
++** gctUINT32 * ExternalBaseAddress
++** Pointer to a variable that will hold the hardware's base address for
++** the external video memory. This pointer cannot be gcvNULL if
++** 'ExternalSize' is also non-gcvNULL.
++**
++** gctUINT32 * ExternalAlignment
++** Pointer to a variable that will hold the hardware's base address for
++** the external video memory. This pointer cannot be gcvNULL if
++** 'ExternalSize' is also non-gcvNULL.
++**
++** gctUINT32 * HorizontalTileSize
++** Number of horizontal pixels per tile. If 'HorizontalTileSize' is
++** gcvNULL, no horizontal pixel per tile will be returned.
++**
++** gctUINT32 * VerticalTileSize
++** Number of vertical pixels per tile. If 'VerticalTileSize' is
++** gcvNULL, no vertical pixel per tile will be returned.
++*/
++gceSTATUS
++gckHARDWARE_QueryMemory(
++ IN gckHARDWARE Hardware,
++ OUT gctSIZE_T * InternalSize,
++ OUT gctUINT32 * InternalBaseAddress,
++ OUT gctUINT32 * InternalAlignment,
++ OUT gctSIZE_T * ExternalSize,
++ OUT gctUINT32 * ExternalBaseAddress,
++ OUT gctUINT32 * ExternalAlignment,
++ OUT gctUINT32 * HorizontalTileSize,
++ OUT gctUINT32 * VerticalTileSize
++ )
++{
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ if (InternalSize != gcvNULL)
++ {
++ /* No internal memory. */
++ *InternalSize = 0;
++ }
++
++ if (ExternalSize != gcvNULL)
++ {
++ /* No external memory. */
++ *ExternalSize = 0;
++ }
++
++ if (HorizontalTileSize != gcvNULL)
++ {
++ /* 4x4 tiles. */
++ *HorizontalTileSize = 4;
++ }
++
++ if (VerticalTileSize != gcvNULL)
++ {
++ /* 4x4 tiles. */
++ *VerticalTileSize = 4;
++ }
++
++ /* Success. */
++ gcmkFOOTER_ARG("*InternalSize=%lu *InternalBaseAddress=0x%08x "
++ "*InternalAlignment=0x%08x *ExternalSize=%lu "
++ "*ExternalBaseAddress=0x%08x *ExtenalAlignment=0x%08x "
++ "*HorizontalTileSize=%u *VerticalTileSize=%u",
++ gcmOPT_VALUE(InternalSize),
++ gcmOPT_VALUE(InternalBaseAddress),
++ gcmOPT_VALUE(InternalAlignment),
++ gcmOPT_VALUE(ExternalSize),
++ gcmOPT_VALUE(ExternalBaseAddress),
++ gcmOPT_VALUE(ExternalAlignment),
++ gcmOPT_VALUE(HorizontalTileSize),
++ gcmOPT_VALUE(VerticalTileSize));
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_QueryChipIdentity
++**
++** Query the identity of the hardware.
++**
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to the gckHARDWARE object.
++**
++** OUTPUT:
++**
++** gcsHAL_QUERY_CHIP_IDENTITY_PTR Identity
++** Pointer to the identity structure.
++**
++*/
++gceSTATUS
++gckHARDWARE_QueryChipIdentity(
++ IN gckHARDWARE Hardware,
++ OUT gcsHAL_QUERY_CHIP_IDENTITY_PTR Identity
++ )
++{
++ gctUINT32 features;
++
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(Identity != gcvNULL);
++
++ /* Return chip model and revision. */
++ Identity->chipModel = Hardware->identity.chipModel;
++ Identity->chipRevision = Hardware->identity.chipRevision;
++
++ /* Return feature set. */
++ features = Hardware->identity.chipFeatures;
++
++ if ((((((gctUINT32) (features)) >> (0 ? 0:0)) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1)))))) ))
++ {
++ /* Override fast clear by command line. */
++ features = ((((gctUINT32) (features)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) ((gctUINT32) (Hardware->allowFastClear) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)));
++ }
++
++ if ((((((gctUINT32) (features)) >> (0 ? 5:5)) & ((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1)))))) ))
++ {
++ /* Override compression by command line. */
++ features = ((((gctUINT32) (features)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5))) | (((gctUINT32) ((gctUINT32) (Hardware->allowCompression) & ((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5)));
++ }
++
++ /* Mark 2D pipe as available for GC500.0 through GC500.2 and GC300,
++ ** since they did not have this bit. */
++ if (((Hardware->identity.chipModel == gcv500) && (Hardware->identity.chipRevision <= 2))
++ || (Hardware->identity.chipModel == gcv300)
++ )
++ {
++ features = ((((gctUINT32) (features)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9)));
++ }
++
++ Identity->chipFeatures = features;
++
++ /* Return minor features. */
++ Identity->chipMinorFeatures = Hardware->identity.chipMinorFeatures;
++ Identity->chipMinorFeatures1 = Hardware->identity.chipMinorFeatures1;
++ Identity->chipMinorFeatures2 = Hardware->identity.chipMinorFeatures2;
++ Identity->chipMinorFeatures3 = Hardware->identity.chipMinorFeatures3;
++ Identity->chipMinorFeatures4 = Hardware->identity.chipMinorFeatures4;
++ Identity->chipMinorFeatures5 = Hardware->identity.chipMinorFeatures5;
++
++ /* Return chip specs. */
++ Identity->streamCount = Hardware->identity.streamCount;
++ Identity->registerMax = Hardware->identity.registerMax;
++ Identity->threadCount = Hardware->identity.threadCount;
++ Identity->shaderCoreCount = Hardware->identity.shaderCoreCount;
++ Identity->vertexCacheSize = Hardware->identity.vertexCacheSize;
++ Identity->vertexOutputBufferSize = Hardware->identity.vertexOutputBufferSize;
++ Identity->pixelPipes = Hardware->identity.pixelPipes;
++ Identity->instructionCount = Hardware->identity.instructionCount;
++ Identity->numConstants = Hardware->identity.numConstants;
++ Identity->bufferSize = Hardware->identity.bufferSize;
++ Identity->varyingsCount = Hardware->identity.varyingsCount;
++ Identity->superTileMode = Hardware->identity.superTileMode;
++#if gcdMULTI_GPU
++ Identity->gpuCoreCount = Hardware->identity.gpuCoreCount;
++#endif
++ Identity->chip2DControl = Hardware->identity.chip2DControl;
++
++ Identity->productID = Hardware->identity.productID;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_SplitMemory
++**
++** Split a hardware specific memory address into a pool and offset.
++**
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to the gckHARDWARE object.
++**
++** gctUINT32 Address
++** Address in hardware specific format.
++**
++** OUTPUT:
++**
++** gcePOOL * Pool
++** Pointer to a variable that will hold the pool type for the address.
++**
++** gctUINT32 * Offset
++** Pointer to a variable that will hold the offset for the address.
++*/
++gceSTATUS
++gckHARDWARE_SplitMemory(
++ IN gckHARDWARE Hardware,
++ IN gctUINT32 Address,
++ OUT gcePOOL * Pool,
++ OUT gctUINT32 * Offset
++ )
++{
++ gcmkHEADER_ARG("Hardware=0x%x Addres=0x%08x", Hardware, Address);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(Pool != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Offset != gcvNULL);
++
++ if (Hardware->mmuVersion == 0)
++ {
++ /* Dispatch on memory type. */
++ switch ((((((gctUINT32) (Address)) >> (0 ? 31:31)) & ((gctUINT32) ((((1 ? 31:31) - (0 ? 31:31) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:31) - (0 ? 31:31) + 1)))))) ))
++ {
++ case 0x0:
++ /* System memory. */
++ *Pool = gcvPOOL_SYSTEM;
++ break;
++
++ case 0x1:
++ /* Virtual memory. */
++ *Pool = gcvPOOL_VIRTUAL;
++ break;
++
++ default:
++ /* Invalid memory type. */
++ gcmkFOOTER_ARG("status=%d", gcvSTATUS_INVALID_ARGUMENT);
++ return gcvSTATUS_INVALID_ARGUMENT;
++ }
++
++ /* Return offset of address. */
++ *Offset = (((((gctUINT32) (Address)) >> (0 ? 30:0)) & ((gctUINT32) ((((1 ? 30:0) - (0 ? 30:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 30:0) - (0 ? 30:0) + 1)))))) );
++ }
++ else
++ {
++ *Pool = gcvPOOL_SYSTEM;
++ *Offset = Address;
++ }
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Pool=%d *Offset=0x%08x", *Pool, *Offset);
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_Execute
++**
++** Kickstart the hardware's command processor with an initialized command
++** buffer.
++**
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to the gckHARDWARE object.
++**
++** gctUINT32 Address
++** Hardware address of command buffer.
++**
++** gctSIZE_T Bytes
++** Number of bytes for the prefetch unit (until after the first LINK).
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckHARDWARE_Execute(
++ IN gckHARDWARE Hardware,
++ IN gctUINT32 Address,
++ IN gctSIZE_T Bytes
++ )
++{
++ gceSTATUS status;
++ gctUINT32 control;
++
++ gcmkHEADER_ARG("Hardware=0x%x Address=0x%x Bytes=%lu",
++ Hardware, Address, Bytes);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ /* Enable all events. */
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00014, ~0U));
++
++ /* Write address register. */
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00654, Address));
++
++ /* Build control register. */
++ control = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 16:16) - (0 ? 16:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 16:16) - (0 ? 16:16) + 1))))))) << (0 ? 16:16))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 16:16) - (0 ? 16:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 16:16) - (0 ? 16:16) + 1))))))) << (0 ? 16:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) ((Bytes + 7) >> 3) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ /* Set big endian */
++ if (Hardware->bigEndian)
++ {
++ control |= ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 21:20) - (0 ? 21:20) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 21:20) - (0 ? 21:20) + 1))))))) << (0 ? 21:20))) | (((gctUINT32) (0x2 & ((gctUINT32) ((((1 ? 21:20) - (0 ? 21:20) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 21:20) - (0 ? 21:20) + 1))))))) << (0 ? 21:20)));
++ }
++
++ /* Write control register. */
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00658, control));
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Started command buffer @ 0x%08x",
++ Address);
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_WaitLink
++**
++** Append a WAIT/LINK command sequence at the specified location in the command
++** queue.
++**
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to an gckHARDWARE object.
++**
++** gctPOINTER Logical
++** Pointer to the current location inside the command queue to append
++** WAIT/LINK command sequence at or gcvNULL just to query the size of the
++** WAIT/LINK command sequence.
++**
++** gctUINT32 Offset
++** Offset into command buffer required for alignment.
++**
++** gctSIZE_T * Bytes
++** Pointer to the number of bytes available for the WAIT/LINK command
++** sequence. If 'Logical' is gcvNULL, this argument will be ignored.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Bytes
++** Pointer to a variable that will receive the number of bytes required
++** by the WAIT/LINK command sequence. If 'Bytes' is gcvNULL, nothing will
++** be returned.
++**
++** gctUINT32 * WaitOffset
++** Pointer to a variable that will receive the offset of the WAIT command
++** from the specified logcial pointer.
++** If 'WaitOffset' is gcvNULL nothing will be returned.
++**
++** gctSIZE_T * WaitSize
++** Pointer to a variable that will receive the number of bytes used by
++** the WAIT command. If 'LinkSize' is gcvNULL nothing will be returned.
++*/
++gceSTATUS
++gckHARDWARE_WaitLink(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical,
++ IN gctUINT32 Offset,
++ IN OUT gctUINT32 * Bytes,
++ OUT gctUINT32 * WaitOffset,
++ OUT gctUINT32 * WaitSize
++ )
++{
++ static const gctUINT waitCount = 200;
++
++ gceSTATUS status;
++ gctUINT32 address;
++ gctUINT32_PTR logical;
++ gctUINT32 bytes;
++
++ gcmkHEADER_ARG("Hardware=0x%x Logical=0x%x Offset=0x%08x *Bytes=%lu",
++ Hardware, Logical, Offset, gcmOPT_VALUE(Bytes));
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT((Logical != gcvNULL) || (Bytes != gcvNULL));
++
++#if gcdMULTI_GPU && !gcdDISABLE_FE_L2
++ bytes = gcmALIGN(Offset + 40, 8) - Offset;
++#else
++ /* Compute number of bytes required. */
++ bytes = gcmALIGN(Offset + 16, 8) - Offset;
++#endif
++ /* Cast the input pointer. */
++ logical = (gctUINT32_PTR) Logical;
++
++ if (logical != gcvNULL)
++ {
++ /* Not enough space? */
++ if (*Bytes < bytes)
++ {
++ /* Command queue too small. */
++ gcmkONERROR(gcvSTATUS_BUFFER_TOO_SMALL);
++ }
++
++ /* Convert logical into hardware specific address. */
++ gcmkONERROR(gckHARDWARE_ConvertLogical(Hardware, logical, gcvFALSE, &address));
++
++ /* Store the WAIT/LINK address. */
++ Hardware->lastWaitLink = address;
++
++ /* Append WAIT(count). */
++ logical[0]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (waitCount) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++#if gcdMULTI_GPU && !gcdDISABLE_FE_L2
++ logical[2] = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x0D & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | gcvCORE_3D_0_MASK;
++
++ logical[3] = 0;
++
++ /* LoadState(AQFlush, 1), flush. */
++ logical[4] = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E03) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ logical[5] = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6)));
++
++ logical[6] = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x0D & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | gcvCORE_3D_ALL_MASK;
++
++ logical[7] = 0;
++
++ /* Append LINK(2, address). */
++ logical[8] = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x08 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (bytes >> 3) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ logical[9] = address;
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "0x%08x: WAIT %u", address, waitCount
++ );
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "0x%x: FLUSH 0x%x", address + 8, logical[3]);
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "0x%08x: LINK 0x%08x, #%lu",
++ address + 16, address, bytes
++ );
++#else
++
++ /* Append LINK(2, address). */
++ logical[2]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x08 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (bytes >> 3) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ logical[3] = address;
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "0x%08x: WAIT %u", address, waitCount
++ );
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "0x%08x: LINK 0x%08x, #%lu",
++ address + 8, address, bytes
++ );
++#endif
++ if (WaitOffset != gcvNULL)
++ {
++ /* Return the offset pointer to WAIT command. */
++ *WaitOffset = 0;
++ }
++
++ if (WaitSize != gcvNULL)
++ {
++ /* Return number of bytes used by the WAIT command. */
++#if gcdMULTI_GPU && !gcdDISABLE_FE_L2
++ *WaitSize = 32;
++#else
++ *WaitSize = 8;
++#endif
++ }
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* Return number of bytes required by the WAIT/LINK command
++ ** sequence. */
++ *Bytes = bytes;
++ }
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Bytes=%lu *WaitOffset=0x%x *WaitSize=%lu",
++ gcmOPT_VALUE(Bytes), gcmOPT_VALUE(WaitOffset),
++ gcmOPT_VALUE(WaitSize));
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_End
++**
++** Append an END command at the specified location in the command queue.
++**
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to an gckHARDWARE object.
++**
++** gctPOINTER Logical
++** Pointer to the current location inside the command queue to append
++** END command at or gcvNULL just to query the size of the END command.
++**
++** gctSIZE_T * Bytes
++** Pointer to the number of bytes available for the END command. If
++** 'Logical' is gcvNULL, this argument will be ignored.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Bytes
++** Pointer to a variable that will receive the number of bytes required
++** for the END command. If 'Bytes' is gcvNULL, nothing will be returned.
++*/
++gceSTATUS
++gckHARDWARE_End(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical,
++ IN OUT gctUINT32 * Bytes
++ )
++{
++ gctUINT32_PTR logical = (gctUINT32_PTR) Logical;
++ gctUINT32 address;
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Hardware=0x%x Logical=0x%x *Bytes=%lu",
++ Hardware, Logical, gcmOPT_VALUE(Bytes));
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT((Logical == gcvNULL) || (Bytes != gcvNULL));
++
++ if (Logical != gcvNULL)
++ {
++ if (*Bytes < 8)
++ {
++ /* Command queue too small. */
++ gcmkONERROR(gcvSTATUS_BUFFER_TOO_SMALL);
++ }
++
++ /* Append END. */
++ logical[0] =
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x02 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)));
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE, "0x%x: END", Logical);
++
++ /* Make sure the CPU writes out the data to memory. */
++ gcmkONERROR(
++ gckOS_MemoryBarrier(Hardware->os, Logical));
++
++ gcmkONERROR(gckHARDWARE_ConvertLogical(Hardware, logical, gcvFALSE, &address));
++
++ Hardware->lastEnd = address;
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* Return number of bytes required by the END command. */
++ *Bytes = 8;
++ }
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Bytes=%lu", gcmOPT_VALUE(Bytes));
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++#if gcdMULTI_GPU
++gceSTATUS
++gckHARDWARE_ChipEnable(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical,
++ IN gceCORE_3D_MASK ChipEnable,
++ IN OUT gctSIZE_T * Bytes
++ )
++{
++ gctUINT32_PTR logical = (gctUINT32_PTR) Logical;
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Hardware=0x%x Logical=0x%x ChipEnable=0x%x *Bytes=%lu",
++ Hardware, Logical, ChipEnable, gcmOPT_VALUE(Bytes));
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT((Logical == gcvNULL) || (Bytes != gcvNULL));
++
++ if (Logical != gcvNULL)
++ {
++ if (*Bytes < 8)
++ {
++ /* Command queue too small. */
++ gcmkONERROR(gcvSTATUS_BUFFER_TOO_SMALL);
++ }
++
++ /* Append CHIPENABLE. */
++ logical[0] = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x0D & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ChipEnable;
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE, "0x%x: CHIPENABLE 0x%x", Logical, ChipEnable);
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* Return number of bytes required by the CHIPENABLE command. */
++ *Bytes = 8;
++ }
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Bytes=%lu", gcmOPT_VALUE(Bytes));
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++#endif
++
++/*******************************************************************************
++**
++** gckHARDWARE_Nop
++**
++** Append a NOP command at the specified location in the command queue.
++**
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to an gckHARDWARE object.
++**
++** gctPOINTER Logical
++** Pointer to the current location inside the command queue to append
++** NOP command at or gcvNULL just to query the size of the NOP command.
++**
++** gctSIZE_T * Bytes
++** Pointer to the number of bytes available for the NOP command. If
++** 'Logical' is gcvNULL, this argument will be ignored.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Bytes
++** Pointer to a variable that will receive the number of bytes required
++** for the NOP command. If 'Bytes' is gcvNULL, nothing will be returned.
++*/
++gceSTATUS
++gckHARDWARE_Nop(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical,
++ IN OUT gctSIZE_T * Bytes
++ )
++{
++ gctUINT32_PTR logical = (gctUINT32_PTR) Logical;
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Hardware=0x%x Logical=0x%x *Bytes=%lu",
++ Hardware, Logical, gcmOPT_VALUE(Bytes));
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT((Logical == gcvNULL) || (Bytes != gcvNULL));
++
++ if (Logical != gcvNULL)
++ {
++ if (*Bytes < 8)
++ {
++ /* Command queue too small. */
++ gcmkONERROR(gcvSTATUS_BUFFER_TOO_SMALL);
++ }
++
++ /* Append NOP. */
++ logical[0] = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x03 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)));
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE, "0x%x: NOP", Logical);
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* Return number of bytes required by the NOP command. */
++ *Bytes = 8;
++ }
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Bytes=%lu", gcmOPT_VALUE(Bytes));
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_Event
++**
++** Append an EVENT command at the specified location in the command queue.
++**
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to an gckHARDWARE object.
++**
++** gctPOINTER Logical
++** Pointer to the current location inside the command queue to append
++** the EVENT command at or gcvNULL just to query the size of the EVENT
++** command.
++**
++** gctUINT8 Event
++** Event ID to program.
++**
++** gceKERNEL_WHERE FromWhere
++** Location of the pipe to send the event.
++**
++** gctSIZE_T * Bytes
++** Pointer to the number of bytes available for the EVENT command. If
++** 'Logical' is gcvNULL, this argument will be ignored.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Bytes
++** Pointer to a variable that will receive the number of bytes required
++** for the EVENT command. If 'Bytes' is gcvNULL, nothing will be
++** returned.
++*/
++gceSTATUS
++gckHARDWARE_Event(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical,
++ IN gctUINT8 Event,
++ IN gceKERNEL_WHERE FromWhere,
++ IN OUT gctUINT32 * Bytes
++ )
++{
++ gctUINT size;
++ gctUINT32 destination = 0;
++ gctUINT32_PTR logical = (gctUINT32_PTR) Logical;
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Hardware=0x%x Logical=0x%x Event=%u FromWhere=%d *Bytes=%lu",
++ Hardware, Logical, Event, FromWhere, gcmOPT_VALUE(Bytes));
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT((Logical == gcvNULL) || (Bytes != gcvNULL));
++ gcmkVERIFY_ARGUMENT(Event < 32);
++
++#if gcdMULTI_GPU
++ if (FromWhere == gcvKERNEL_COMMAND) FromWhere = gcvKERNEL_PIXEL;
++#endif
++
++ /* Determine the size of the command. */
++
++ size = (Hardware->extraEventStates && (FromWhere == gcvKERNEL_PIXEL))
++ ? gcmALIGN(8 + (1 + 5) * 4, 8) /* EVENT + 5 STATES */
++ : 8;
++
++ if (Logical != gcvNULL)
++ {
++ if (*Bytes < size)
++ {
++ /* Command queue too small. */
++ gcmkONERROR(gcvSTATUS_BUFFER_TOO_SMALL);
++ }
++
++ switch (FromWhere)
++ {
++ case gcvKERNEL_COMMAND:
++ /* From command processor. */
++ destination = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5)));
++ break;
++
++ case gcvKERNEL_PIXEL:
++ /* From pixel engine. */
++ destination = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6)));
++ break;
++
++ default:
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ /* Append EVENT(Event, destiantion). */
++ logical[0] = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E01) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ logical[1] = ((((gctUINT32) (destination)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) ((gctUINT32) (Event) & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)));
++
++ /* Make sure the event ID gets written out before GPU can access it. */
++ gcmkONERROR(
++ gckOS_MemoryBarrier(Hardware->os, logical + 1));
++
++#if gcmIS_DEBUG(gcdDEBUG_TRACE)
++ {
++ gctUINT32 phys;
++ gckOS_GetPhysicalAddress(Hardware->os, Logical, &phys);
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "0x%08x: EVENT %d", phys, Event);
++ }
++#endif
++
++ /* Append the extra states. These are needed for the chips that do not
++ ** support back-to-back events due to the async interface. The extra
++ ** states add the necessary delay to ensure that event IDs do not
++ ** collide. */
++ if (size > 8)
++ {
++ logical[2] = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0100) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (5) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++ logical[3] = 0;
++ logical[4] = 0;
++ logical[5] = 0;
++ logical[6] = 0;
++ logical[7] = 0;
++ }
++
++#if gcdINTERRUPT_STATISTIC
++ if (Event < gcmCOUNTOF(Hardware->kernel->eventObj->queues))
++ {
++ gckOS_AtomSetMask(Hardware->pendingEvent, 1 << Event);
++ }
++#endif
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* Return number of bytes required by the EVENT command. */
++ *Bytes = size;
++ }
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Bytes=%lu", gcmOPT_VALUE(Bytes));
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_PipeSelect
++**
++** Append a PIPESELECT command at the specified location in the command queue.
++**
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to an gckHARDWARE object.
++**
++** gctPOINTER Logical
++** Pointer to the current location inside the command queue to append
++** the PIPESELECT command at or gcvNULL just to query the size of the
++** PIPESELECT command.
++**
++** gcePIPE_SELECT Pipe
++** Pipe value to select.
++**
++** gctSIZE_T * Bytes
++** Pointer to the number of bytes available for the PIPESELECT command.
++** If 'Logical' is gcvNULL, this argument will be ignored.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Bytes
++** Pointer to a variable that will receive the number of bytes required
++** for the PIPESELECT command. If 'Bytes' is gcvNULL, nothing will be
++** returned.
++*/
++gceSTATUS
++gckHARDWARE_PipeSelect(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical,
++ IN gcePIPE_SELECT Pipe,
++ IN OUT gctUINT32 * Bytes
++ )
++{
++ gctUINT32_PTR logical = (gctUINT32_PTR) Logical;
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Hardware=0x%x Logical=0x%x Pipe=%d *Bytes=%lu",
++ Hardware, Logical, Pipe, gcmOPT_VALUE(Bytes));
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT((Logical == gcvNULL) || (Bytes != gcvNULL));
++
++ /* Append a PipeSelect. */
++ if (Logical != gcvNULL)
++ {
++ gctUINT32 flush, stall;
++
++ if (*Bytes < 32)
++ {
++ /* Command queue too small. */
++ gcmkONERROR(gcvSTATUS_BUFFER_TOO_SMALL);
++ }
++
++ flush = (Pipe == gcvPIPE_2D)
++ ? ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)))
++ : ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3)));
++
++ stall = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));
++
++ /* LoadState(AQFlush, 1), flush. */
++ logical[0]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E03) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ logical[1]
++ = flush;
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "0x%x: FLUSH 0x%x", logical, flush);
++
++ /* LoadState(AQSempahore, 1), stall. */
++ logical[2]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E02) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ logical[3]
++ = stall;
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "0x%x: SEMAPHORE 0x%x", logical + 2, stall);
++
++ /* Stall, stall. */
++ logical[4] = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x09 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)));
++ logical[5] = stall;
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "0x%x: STALL 0x%x", logical + 4, stall);
++
++ /* LoadState(AQPipeSelect, 1), pipe. */
++ logical[6]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E00) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ logical[7] = (Pipe == gcvPIPE_2D)
++ ? 0x1
++ : 0x0;
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "0x%x: PIPE %d", logical + 6, Pipe);
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* Return number of bytes required by the PIPESELECT command. */
++ *Bytes = 32;
++ }
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Bytes=%lu", gcmOPT_VALUE(Bytes));
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_Link
++**
++** Append a LINK command at the specified location in the command queue.
++**
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to an gckHARDWARE object.
++**
++** gctPOINTER Logical
++** Pointer to the current location inside the command queue to append
++** the LINK command at or gcvNULL just to query the size of the LINK
++** command.
++**
++** gctUINT32 FetchAddress
++** Hardware address of destination of LINK.
++**
++** gctSIZE_T FetchSize
++** Number of bytes in destination of LINK.
++**
++** gctSIZE_T * Bytes
++** Pointer to the number of bytes available for the LINK command. If
++** 'Logical' is gcvNULL, this argument will be ignored.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Bytes
++** Pointer to a variable that will receive the number of bytes required
++** for the LINK command. If 'Bytes' is gcvNULL, nothing will be returned.
++*/
++gceSTATUS
++gckHARDWARE_Link(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical,
++ IN gctUINT32 FetchAddress,
++ IN gctUINT32 FetchSize,
++ IN OUT gctUINT32 * Bytes
++ )
++{
++ gceSTATUS status;
++ gctSIZE_T bytes;
++ gctUINT32 link;
++ gctUINT32_PTR logical = (gctUINT32_PTR) Logical;
++
++ gcmkHEADER_ARG("Hardware=0x%x Logical=0x%x FetchAddress=0x%x FetchSize=%lu "
++ "*Bytes=%lu",
++ Hardware, Logical, FetchAddress, FetchSize,
++ gcmOPT_VALUE(Bytes));
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT((Logical == gcvNULL) || (Bytes != gcvNULL));
++
++ if (Logical != gcvNULL)
++ {
++ if (*Bytes < 8)
++ {
++ /* Command queue too small. */
++ gcmkONERROR(gcvSTATUS_BUFFER_TOO_SMALL);
++ }
++
++ gcmkONERROR(
++ gckOS_WriteMemory(Hardware->os, logical + 1, FetchAddress));
++
++ /* Make sure the address got written before the LINK command. */
++ gcmkONERROR(
++ gckOS_MemoryBarrier(Hardware->os, logical + 1));
++
++ /* Compute number of 64-byte aligned bytes to fetch. */
++ bytes = gcmALIGN(FetchAddress + FetchSize, 64) - FetchAddress;
++
++ /* Append LINK(bytes / 8), FetchAddress. */
++ link = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x08 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (bytes >> 3) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ gcmkONERROR(
++ gckOS_WriteMemory(Hardware->os, logical, link));
++
++ /* Memory barrier. */
++ gcmkONERROR(
++ gckOS_MemoryBarrier(Hardware->os, logical));
++
++#if gcdLINK_QUEUE_SIZE && !gcdPROCESS_ADDRESS_SPACE
++ if ((Hardware->kernel->virtualCommandBuffer)
++ && (Hardware->kernel->stuckDump > 2)
++ )
++ {
++ gctBOOL in;
++
++ gcmkVERIFY_OK(gckCOMMAND_AddressInKernelCommandBuffer(
++ Hardware->kernel->command, FetchAddress, &in));
++
++ if (in == gcvFALSE)
++ {
++ /* Record user command buffer and context buffer link
++ ** information for stuck dump.
++ **/
++ gckLINKQUEUE_Enqueue(
++ &Hardware->linkQueue, FetchAddress, FetchAddress + (gctUINT)bytes);
++ }
++ }
++#endif
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* Return number of bytes required by the LINK command. */
++ *Bytes = 8;
++ }
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Bytes=%lu", gcmOPT_VALUE(Bytes));
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_UpdateQueueTail
++**
++** Update the tail of the command queue.
++**
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to an gckHARDWARE object.
++**
++** gctPOINTER Logical
++** Logical address of the start of the command queue.
++**
++** gctUINT32 Offset
++** Offset into the command queue of the tail (last command).
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckHARDWARE_UpdateQueueTail(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical,
++ IN gctUINT32 Offset
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Hardware=0x%x Logical=0x%x Offset=0x%08x",
++ Hardware, Logical, Offset);
++
++ /* Verify the hardware. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ /* Force a barrier. */
++ gcmkONERROR(
++ gckOS_MemoryBarrier(Hardware->os, Logical));
++
++ /* Notify gckKERNEL object of change. */
++#if gcdMULTI_GPU
++ gcmkONERROR(
++ gckKERNEL_Notify(Hardware->kernel,
++ 0,
++ gcvNOTIFY_COMMAND_QUEUE,
++ gcvFALSE));
++#else
++ gcmkONERROR(
++ gckKERNEL_Notify(Hardware->kernel,
++ gcvNOTIFY_COMMAND_QUEUE,
++ gcvFALSE));
++#endif
++
++ if (status == gcvSTATUS_CHIP_NOT_READY)
++ {
++ gcmkONERROR(gcvSTATUS_DEVICE);
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_ConvertLogical
++**
++** Convert a logical system address into a hardware specific address.
++**
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to an gckHARDWARE object.
++**
++** gctPOINTER Logical
++** Logical address to convert.
++**
++** gctBOOL InUserSpace
++** gcvTRUE if the memory in user space.
++**
++** gctUINT32* Address
++** Return hardware specific address.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckHARDWARE_ConvertLogical(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical,
++ IN gctBOOL InUserSpace,
++ OUT gctUINT32 * Address
++ )
++{
++ gctUINT32 address;
++ gceSTATUS status;
++ gctUINT32 baseAddress;
++
++ gcmkHEADER_ARG("Hardware=0x%x Logical=0x%x InUserSpace=%d",
++ Hardware, Logical, InUserSpace);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Address != gcvNULL);
++
++ /* Convert logical address into a physical address. */
++ if (InUserSpace)
++ {
++ gcmkONERROR(gckOS_UserLogicalToPhysical(Hardware->os, Logical, &address));
++ }
++ else
++ {
++ gcmkONERROR(gckOS_GetPhysicalAddress(Hardware->os, Logical, &address));
++ }
++
++ /* For old MMU, get GPU address according to baseAddress. */
++ if (Hardware->mmuVersion == 0)
++ {
++ gcmkONERROR(gckOS_GetBaseAddress(Hardware->os, &baseAddress));
++
++ /* Subtract base address to get a GPU address. */
++ gcmkASSERT(address >= baseAddress);
++ address -= baseAddress;
++ }
++
++ /* Return hardware specific address. */
++ *Address = (Hardware->mmuVersion == 0)
++ ? ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:31) - (0 ? 31:31) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:31) - (0 ? 31:31) + 1))))))) << (0 ? 31:31))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? 31:31) - (0 ? 31:31) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:31) - (0 ? 31:31) + 1))))))) << (0 ? 31:31)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 30:0) - (0 ? 30:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 30:0) - (0 ? 30:0) + 1))))))) << (0 ? 30:0))) | (((gctUINT32) ((gctUINT32) (address) & ((gctUINT32) ((((1 ? 30:0) - (0 ? 30:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 30:0) - (0 ? 30:0) + 1))))))) << (0 ? 30:0)))
++ : address;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Address=0x%08x", *Address);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_Interrupt
++**
++** Process an interrupt.
++**
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to an gckHARDWARE object.
++**
++** gctBOOL InterruptValid
++** If gcvTRUE, this function will read the interrupt acknowledge
++** register, stores the data, and return whether or not the interrupt
++** is ours or not. If gcvFALSE, this functions will read the interrupt
++** acknowledge register and combine it with any stored value to handle
++** the event notifications.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckHARDWARE_Interrupt(
++ IN gckHARDWARE Hardware,
++#if gcdMULTI_GPU
++ IN gctUINT CoreId,
++#endif
++ IN gctBOOL InterruptValid
++ )
++{
++ gckEVENT eventObj;
++ gctUINT32 data = 0;
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Hardware=0x%x InterruptValid=%d", Hardware, InterruptValid);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ /* Extract gckEVENT object. */
++ eventObj = Hardware->kernel->eventObj;
++ gcmkVERIFY_OBJECT(eventObj, gcvOBJ_EVENT);
++
++ if (InterruptValid)
++ {
++ /* Read AQIntrAcknowledge register. */
++#if gcdMULTI_GPU
++ if (Hardware->core == gcvCORE_MAJOR)
++ {
++ gcmkONERROR(
++ gckOS_ReadRegisterByCoreId(Hardware->os,
++ Hardware->core,
++ CoreId,
++ 0x00010,
++ &data));
++ }
++ else
++ {
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00010,
++ &data));
++ }
++#else
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00010,
++ &data));
++#endif
++
++ if (data == 0)
++ {
++ /* Not our interrupt. */
++ status = gcvSTATUS_NOT_OUR_INTERRUPT;
++ }
++ else
++ {
++
++#if gcdINTERRUPT_STATISTIC
++ gckOS_AtomClearMask(Hardware->pendingEvent, data);
++#endif
++
++ /* Inform gckEVENT of the interrupt. */
++ status = gckEVENT_Interrupt(eventObj,
++#if gcdMULTI_GPU
++ CoreId,
++#endif
++ data);
++ }
++ }
++ else
++ {
++ /* Handle events. */
++ status = gckEVENT_Notify(eventObj, 0);
++ }
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_QueryCommandBuffer
++**
++** Query the command buffer alignment and number of reserved bytes.
++**
++** INPUT:
++**
++** gckHARDWARE Harwdare
++** Pointer to an gckHARDWARE object.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Alignment
++** Pointer to a variable receiving the alignment for each command.
++**
++** gctSIZE_T * ReservedHead
++** Pointer to a variable receiving the number of reserved bytes at the
++** head of each command buffer.
++**
++** gctSIZE_T * ReservedTail
++** Pointer to a variable receiving the number of bytes reserved at the
++** tail of each command buffer.
++*/
++gceSTATUS
++gckHARDWARE_QueryCommandBuffer(
++ IN gckHARDWARE Hardware,
++ OUT gctUINT32 * Alignment,
++ OUT gctUINT32 * ReservedHead,
++ OUT gctUINT32 * ReservedTail
++ )
++{
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ if (Alignment != gcvNULL)
++ {
++ /* Align every 8 bytes. */
++ *Alignment = 8;
++ }
++
++ if (ReservedHead != gcvNULL)
++ {
++ /* Reserve space for SelectPipe(). */
++ *ReservedHead = 32;
++ }
++
++ if (ReservedTail != gcvNULL)
++ {
++ /* Reserve space for Link(). */
++ *ReservedTail = 8;
++ }
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Alignment=%lu *ReservedHead=%lu *ReservedTail=%lu",
++ gcmOPT_VALUE(Alignment), gcmOPT_VALUE(ReservedHead),
++ gcmOPT_VALUE(ReservedTail));
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_QuerySystemMemory
++**
++** Query the command buffer alignment and number of reserved bytes.
++**
++** INPUT:
++**
++** gckHARDWARE Harwdare
++** Pointer to an gckHARDWARE object.
++**
++** OUTPUT:
++**
++** gctSIZE_T * SystemSize
++** Pointer to a variable that receives the maximum size of the system
++** memory.
++**
++** gctUINT32 * SystemBaseAddress
++** Poinetr to a variable that receives the base address for system
++** memory.
++*/
++gceSTATUS
++gckHARDWARE_QuerySystemMemory(
++ IN gckHARDWARE Hardware,
++ OUT gctSIZE_T * SystemSize,
++ OUT gctUINT32 * SystemBaseAddress
++ )
++{
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ if (SystemSize != gcvNULL)
++ {
++ /* Maximum system memory can be 2GB. */
++ *SystemSize = 1U << 31;
++ }
++
++ if (SystemBaseAddress != gcvNULL)
++ {
++ /* Set system memory base address. */
++ *SystemBaseAddress = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:31) - (0 ? 31:31) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:31) - (0 ? 31:31) + 1))))))) << (0 ? 31:31))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? 31:31) - (0 ? 31:31) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:31) - (0 ? 31:31) + 1))))))) << (0 ? 31:31)));
++ }
++
++ /* Success. */
++ gcmkFOOTER_ARG("*SystemSize=%lu *SystemBaseAddress=%lu",
++ gcmOPT_VALUE(SystemSize), gcmOPT_VALUE(SystemBaseAddress));
++ return gcvSTATUS_OK;
++}
++
++#if gcdENABLE_3D
++/*******************************************************************************
++**
++** gckHARDWARE_QueryShaderCaps
++**
++** Query the shader capabilities.
++**
++** INPUT:
++**
++** Nothing.
++**
++** OUTPUT:
++**
++** gctUINT * VertexUniforms
++** Pointer to a variable receiving the number of uniforms in the vertex
++** shader.
++**
++** gctUINT * FragmentUniforms
++** Pointer to a variable receiving the number of uniforms in the
++** fragment shader.
++**
++** gctBOOL * UnifiedUnforms
++** Pointer to a variable receiving whether the uniformas are unified.
++*/
++gceSTATUS
++gckHARDWARE_QueryShaderCaps(
++ IN gckHARDWARE Hardware,
++ OUT gctUINT * VertexUniforms,
++ OUT gctUINT * FragmentUniforms,
++ OUT gctBOOL * UnifiedUnforms
++ )
++{
++ gctBOOL unifiedConst;
++ gctUINT32 vsConstMax;
++ gctUINT32 psConstMax;
++ gctUINT32 vsConstBase;
++ gctUINT32 psConstBase;
++ gctUINT32 ConstMax;
++
++ gcmkHEADER_ARG("Hardware=0x%x VertexUniforms=0x%x "
++ "FragmentUniforms=0x%x UnifiedUnforms=0x%x",
++ Hardware, VertexUniforms,
++ FragmentUniforms, UnifiedUnforms);
++
++ {if (Hardware->identity.numConstants > 256){ unifiedConst = gcvTRUE; vsConstBase = 0xC000; psConstBase = 0xC000; ConstMax = Hardware->identity.numConstants; vsConstMax = 256; psConstMax = ConstMax - vsConstMax;}else if (Hardware->identity.numConstants == 256){ if (Hardware->identity.chipModel == gcv2000 && Hardware->identity.chipRevision == 0x5118) { unifiedConst = gcvFALSE; vsConstBase = 0x1400; psConstBase = 0x1C00; vsConstMax = 256; psConstMax = 64; ConstMax = 320; } else { unifiedConst = gcvFALSE; vsConstBase = 0x1400; psConstBase = 0x1C00; vsConstMax = 256; psConstMax = 256; ConstMax = 512; }}else{ unifiedConst = gcvFALSE; vsConstBase = 0x1400; psConstBase = 0x1C00; vsConstMax = 168; psConstMax = 64; ConstMax = 232;}};
++
++ if (VertexUniforms != gcvNULL)
++ {
++ /* Return the vs shader const count. */
++ *VertexUniforms = vsConstMax;
++ }
++
++ if (FragmentUniforms != gcvNULL)
++ {
++ /* Return the ps shader const count. */
++ *FragmentUniforms = psConstMax;
++ }
++
++ if (UnifiedUnforms != gcvNULL)
++ {
++ /* Return whether the uniformas are unified. */
++ *UnifiedUnforms = unifiedConst;
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++#endif
++
++/*******************************************************************************
++**
++** gckHARDWARE_SetMMU
++**
++** Set the page table base address.
++**
++** INPUT:
++**
++** gckHARDWARE Harwdare
++** Pointer to an gckHARDWARE object.
++**
++** gctPOINTER Logical
++** Logical address of the page table.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckHARDWARE_SetMMU(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical
++ )
++{
++ gceSTATUS status;
++ gctUINT32 address = 0;
++ gctUINT32 idle;
++ gctUINT32 timer = 0, delay = 1;
++
++ gcmkHEADER_ARG("Hardware=0x%x Logical=0x%x", Hardware, Logical);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ if (Hardware->mmuVersion == 0)
++ {
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++
++ /* Convert the logical address into physical address. */
++ gcmkONERROR(gckOS_GetPhysicalAddress(Hardware->os, Logical, &address));
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Setting page table to 0x%08X",
++ address);
++
++ /* Write the AQMemoryFePageTable register. */
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00400,
++ address));
++
++ /* Write the AQMemoryRaPageTable register. */
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00410,
++ address));
++
++ /* Write the AQMemoryTxPageTable register. */
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00404,
++ address));
++
++
++ /* Write the AQMemoryPePageTable register. */
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00408,
++ address));
++
++ /* Write the AQMemoryPezPageTable register. */
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x0040C,
++ address));
++ }
++ else if (Hardware->enableMMU == gcvTRUE)
++ {
++ /* Execute prepared command sequence. */
++ gcmkONERROR(gckHARDWARE_Execute(
++ Hardware,
++ Hardware->functions[gcvHARDWARE_FUNCTION_MMU].address,
++ Hardware->functions[gcvHARDWARE_FUNCTION_MMU].bytes
++ ));
++
++ /* Wait until MMU configure finishes. */
++ do
++ {
++ gckOS_Delay(Hardware->os, delay);
++
++ gcmkONERROR(gckOS_ReadRegisterEx(
++ Hardware->os,
++ Hardware->core,
++ 0x00004,
++ &idle));
++
++ timer += delay;
++ delay *= 2;
++
++#if gcdGPU_TIMEOUT
++ if (timer >= Hardware->kernel->timeOut)
++ {
++ /* Even if hardware is not reset correctly, let software
++ ** continue to avoid software stuck. Software will timeout again
++ ** and try to recover GPU in next timeout.
++ */
++ gcmkONERROR(gcvSTATUS_DEVICE);
++ }
++#endif
++ }
++ while (!(((((gctUINT32) (idle)) >> (0 ? 0:0)) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1)))))) ));
++
++ /* Enable MMU. */
++ gcmkONERROR(gckOS_WriteRegisterEx(
++ Hardware->os,
++ Hardware->core,
++ 0x0018C,
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) ((gctUINT32) (gcvTRUE) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)))
++ ));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_FlushMMU
++**
++** Flush the page table.
++**
++** INPUT:
++**
++** gckHARDWARE Harwdare
++** Pointer to an gckHARDWARE object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckHARDWARE_FlushMMU(
++ IN gckHARDWARE Hardware
++ )
++{
++ gceSTATUS status;
++ gckCOMMAND command;
++ gctUINT32_PTR buffer;
++ gctUINT32 bufferSize;
++ gctPOINTER pointer = gcvNULL;
++ gctUINT32 flushSize;
++ gctUINT32 count;
++ gctUINT32 physical;
++
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ /* Verify the gckCOMMAND object pointer. */
++ command = Hardware->kernel->command;
++
++ /* Flush the memory controller. */
++ if (Hardware->mmuVersion == 0)
++ {
++ gcmkONERROR(gckCOMMAND_Reserve(
++ command, 8, &pointer, &bufferSize
++ ));
++
++ buffer = (gctUINT32_PTR) pointer;
++
++ buffer[0]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E04) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ buffer[1]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1))))))) << (0 ? 2:2))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1))))))) << (0 ? 2:2)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4)));
++
++ gcmkONERROR(gckCOMMAND_Execute(command, 8));
++ }
++ else
++ {
++ flushSize = 16 * 4;
++
++ gcmkONERROR(gckCOMMAND_Reserve(
++ command, flushSize, &pointer, &bufferSize
++ ));
++
++ buffer = (gctUINT32_PTR) pointer;
++
++ count = ((gctUINT)bufferSize - flushSize + 7) >> 3;
++
++ gcmkONERROR(gckOS_GetPhysicalAddress(command->os, buffer, &physical));
++
++ /* Flush cache. */
++ buffer[0]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E03) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ buffer[1]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1))))))) << (0 ? 2:2))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1))))))) << (0 ? 2:2)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6)));
++
++ /* Arm the PE-FE Semaphore. */
++ buffer[2]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E02) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ buffer[3]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));
++
++ /* STALL FE until PE is done flushing. */
++ buffer[4]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x09 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)));
++
++ buffer[5]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));
++
++ /* LINK to next slot to flush FE FIFO. */
++ buffer[6]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x08 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (4) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ buffer[7]
++ = physical + 8 * gcmSIZEOF(gctUINT32);
++
++ /* Flush MMU cache. */
++ buffer[8]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0061) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ buffer[9]
++ = (((((gctUINT32) (~0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4))) & ((((gctUINT32) (~0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:7) - (0 ? 7:7) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:7) - (0 ? 7:7) + 1))))))) << (0 ? 7:7))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? 7:7) - (0 ? 7:7) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:7) - (0 ? 7:7) + 1))))))) << (0 ? 7:7))));
++
++ /* Arm the PE-FE Semaphore. */
++ buffer[10]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E02) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ buffer[11]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));
++
++ /* STALL FE until PE is done flushing. */
++ buffer[12]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x09 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)));
++
++ buffer[13]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));
++
++ /* LINK to next slot to flush FE FIFO. */
++ buffer[14]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x08 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (count) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ buffer[15]
++ = physical + flushSize;
++
++ gcmkONERROR(gckCOMMAND_Execute(command, flushSize));
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckHARDWARE_SetMMUStates(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER MtlbAddress,
++ IN gceMMU_MODE Mode,
++ IN gctPOINTER SafeAddress,
++ IN gctPOINTER Logical,
++ IN OUT gctUINT32 * Bytes
++ )
++{
++ gceSTATUS status;
++ gctUINT32 config, address;
++ gctUINT32_PTR buffer;
++ gctBOOL ace;
++ gctUINT32 reserveBytes = 16 + 4 * 4;
++
++ gctBOOL config2D;
++
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(Hardware->mmuVersion != 0);
++
++ ace = gckHARDWARE_IsFeatureAvailable(Hardware, gcvFEATURE_ACE);
++
++ if (ace)
++ {
++ reserveBytes += 8;
++ }
++
++ config2D = gckHARDWARE_IsFeatureAvailable(Hardware, gcvFEATURE_PIPE_3D)
++ && gckHARDWARE_IsFeatureAvailable(Hardware, gcvFEATURE_PIPE_2D);
++
++ if (config2D)
++ {
++ reserveBytes +=
++ /* Pipe Select. */
++ 4 * 4
++ /* Configure MMU States. */
++ + 4 * 4
++ /* Semaphore stall */
++ + 4 * 8;
++ }
++
++ /* Convert logical address into physical address. */
++ gcmkONERROR(
++ gckOS_GetPhysicalAddress(Hardware->os, MtlbAddress, &config));
++
++ gcmkONERROR(
++ gckOS_GetPhysicalAddress(Hardware->os, SafeAddress, &address));
++
++ if (address & 0x3F)
++ {
++ gcmkONERROR(gcvSTATUS_NOT_ALIGNED);
++ }
++
++ switch (Mode)
++ {
++ case gcvMMU_MODE_1K:
++ if (config & 0x3FF)
++ {
++ gcmkONERROR(gcvSTATUS_NOT_ALIGNED);
++ }
++
++ config |= ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)));
++
++ break;
++
++ case gcvMMU_MODE_4K:
++ if (config & 0xFFF)
++ {
++ gcmkONERROR(gcvSTATUS_NOT_ALIGNED);
++ }
++
++ config |= ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)));
++
++ break;
++
++ default:
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ if (Logical != gcvNULL)
++ {
++ buffer = Logical;
++
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0061) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ *buffer++ = config;
++
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0060) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ *buffer++ = address;
++
++ if (ace)
++ {
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0068) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ *buffer++ = 0;
++ }
++
++ do{*buffer++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E02) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))); *buffer++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))); *buffer++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x09 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))); *buffer++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));} while(0);;
++
++ if (config2D)
++ {
++ /* LoadState(AQPipeSelect, 1), pipe. */
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E00) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ *buffer++ = 0x1;
++
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0061) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ *buffer++ = config;
++
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0060) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ *buffer++ = address;
++
++ do{*buffer++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E02) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))); *buffer++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))); *buffer++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x09 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))); *buffer++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));} while(0);;
++
++ /* LoadState(AQPipeSelect, 1), pipe. */
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E00) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ *buffer++ = 0x0;
++
++ do{*buffer++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E02) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))); *buffer++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))); *buffer++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x09 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))); *buffer++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));} while(0);;
++ }
++
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ *Bytes = reserveBytes;
++ }
++
++ /* Return the status. */
++ gcmkFOOTER_NO();
++ return status;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++#if gcdPROCESS_ADDRESS_SPACE
++/*******************************************************************************
++**
++** gckHARDWARE_ConfigMMU
++**
++** Append a MMU Configuration command sequence at the specified location in the command
++** queue. That command sequence consists of mmu configuration, LINK and WAIT/LINK.
++** LINK is fetched and paresed with new mmu configuration.
++**
++** If MMU Configuration is not changed between commit, change last WAIT/LINK to
++** link to ENTRY.
++**
++** -+-----------+-----------+-----------------------------------------
++** | WAIT/LINK | WAIT/LINK |
++** -+-----------+-----------+-----------------------------------------
++** | /|\
++** \|/ |
++** +--------------------+
++** | ENTRY | ... | LINK |
++** +--------------------+
++**
++** If MMU Configuration is changed between commit, change last WAIT/LINK to
++** link to MMU CONFIGURATION command sequence, and there are an EVNET and
++** an END at the end of this command sequence, when interrupt handler
++** receives this event, it will start FE at ENTRY to continue the command
++** buffer execution.
++**
++** -+-----------+-------------------+---------+---------+-----------+--
++** | WAIT/LINK | MMU CONFIGURATION | EVENT | END | WAIT/LINK |
++** -+-----------+-------------------+---------+---------+-----------+--
++** | /|\ /|\
++** +-------------+ |
++** +--------------------+
++** | ENTRY | ... | LINK |
++** +--------------------+
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to an gckHARDWARE object.
++**
++** gctPOINTER Logical
++** Pointer to the current location inside the command queue to append
++** command sequence at or gcvNULL just to query the size of the
++** command sequence.
++**
++** gctPOINTER MtlbLogical
++** Pointer to the current Master TLB.
++**
++** gctUINT32 Offset
++** Offset into command buffer required for alignment.
++**
++** gctSIZE_T * Bytes
++** Pointer to the number of bytes available for the command
++** sequence. If 'Logical' is gcvNULL, this argument will be ignored.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Bytes
++** Pointer to a variable that will receive the number of bytes required
++** by the command sequence. If 'Bytes' is gcvNULL, nothing will
++** be returned.
++**
++** gctUINT32 * WaitLinkOffset
++** Pointer to a variable that will receive the offset of the WAIT/LINK command
++** from the specified logcial pointer.
++** If 'WaitLinkOffset' is gcvNULL nothing will be returned.
++**
++** gctSIZE_T * WaitLinkBytes
++** Pointer to a variable that will receive the number of bytes used by
++** the WAIT command.
++** If 'WaitLinkBytes' is gcvNULL nothing will be returned.
++*/
++gceSTATUS
++gckHARDWARE_ConfigMMU(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical,
++ IN gctPOINTER MtlbLogical,
++ IN gctUINT32 Offset,
++ IN OUT gctSIZE_T * Bytes,
++ OUT gctSIZE_T * WaitLinkOffset,
++ OUT gctSIZE_T * WaitLinkBytes
++ )
++{
++ gceSTATUS status;
++ gctSIZE_T bytes, bytesAligned;
++ gctUINT32 config;
++ gctUINT32_PTR buffer = (gctUINT32_PTR) Logical;
++ gctUINT32 physical;
++ gctUINT32 event;
++
++ gcmkHEADER_ARG("Hardware=0x%08X Logical=0x%08x MtlbLogical=0x%08X",
++ Hardware, Logical, MtlbLogical);
++
++ bytes
++ /* Flush cache states. */
++ = 18 * 4
++ /* MMU configuration states. */
++ + 6 * 4
++ /* EVENT. */
++ + 2 * 4
++ /* END. */
++ + 2 * 4
++ /* WAIT/LINK. */
++ + 4 * 4;
++
++ /* Compute number of bytes required. */
++ bytesAligned = gcmALIGN(Offset + bytes, 8) - Offset;
++
++ if (buffer != gcvNULL)
++ {
++ if (MtlbLogical == gcvNULL)
++ {
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ /* Get physical address of this command buffer segment. */
++ gcmkONERROR(gckOS_GetPhysicalAddress(Hardware->os, buffer, &physical));
++
++ /* Get physical address of Master TLB. */
++ gcmkONERROR(gckOS_GetPhysicalAddress(Hardware->os, MtlbLogical, &config));
++
++ config |= ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4)));
++
++ /* Flush cache. */
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E03) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1))))))) << (0 ? 2:2))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1))))))) << (0 ? 2:2)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6)));
++
++ /* Flush tile status cache. */
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0594) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)));
++
++ /* Arm the PE-FE Semaphore. */
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E02) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));
++
++ /* STALL FE until PE is done flushing. */
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x09 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)));
++
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));
++
++ /* LINK to next slot to flush FE FIFO. */
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x08 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (4) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ *buffer++
++ = physical + 10 * gcmSIZEOF(gctUINT32);
++
++ /* Configure MMU. */
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0061) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ *buffer++
++ = (((((gctUINT32) (~0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4))) & ((((gctUINT32) (~0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:7) - (0 ? 7:7) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:7) - (0 ? 7:7) + 1))))))) << (0 ? 7:7))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? 7:7) - (0 ? 7:7) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:7) - (0 ? 7:7) + 1))))))) << (0 ? 7:7))));
++
++ /* Arm the PE-FE Semaphore. */
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E02) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));
++
++ /* STALL FE until PE is done flushing. */
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x09 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)));
++
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));
++
++ /* LINK to next slot to flush FE FIFO. */
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x08 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (5) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ *buffer++
++ = physical + 18 * 4;
++
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0061) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ *buffer++
++ = config;
++
++ /* Arm the PE-FE Semaphore. */
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E02) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));
++
++ /* STALL FE until PE is done flushing. */
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x09 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)));
++
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));
++
++ /* Event 29. */
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E01) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ event = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6)));
++ event = ((((gctUINT32) (event)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) ((gctUINT32) (29) & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)));
++
++ *buffer++
++ = event;
++
++ /* Append END. */
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x02 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)));
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ *Bytes = bytesAligned;
++ }
++
++ if (WaitLinkOffset != gcvNULL)
++ {
++ *WaitLinkOffset = bytes - 4 * 4;
++ }
++
++ if (WaitLinkBytes != gcvNULL)
++ {
++#if gcdMULTI_GPU
++ *WaitLinkBytes = 40;
++#else
++ *WaitLinkBytes = 4 * 4;
++#endif
++ }
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++#endif
++
++/*******************************************************************************
++**
++** gckHARDWARE_BuildVirtualAddress
++**
++** Build a virtual address.
++**
++** INPUT:
++**
++** gckHARDWARE Harwdare
++** Pointer to an gckHARDWARE object.
++**
++** gctUINT32 Index
++** Index into page table.
++**
++** gctUINT32 Offset
++** Offset into page.
++**
++** OUTPUT:
++**
++** gctUINT32 * Address
++** Pointer to a variable receiving te hardware address.
++*/
++gceSTATUS
++gckHARDWARE_BuildVirtualAddress(
++ IN gckHARDWARE Hardware,
++ IN gctUINT32 Index,
++ IN gctUINT32 Offset,
++ OUT gctUINT32 * Address
++ )
++{
++ gcmkHEADER_ARG("Hardware=0x%x Index=%u Offset=%u", Hardware, Index, Offset);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(Address != gcvNULL);
++
++ /* Build virtual address. */
++ *Address = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:31) - (0 ? 31:31) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:31) - (0 ? 31:31) + 1))))))) << (0 ? 31:31))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 31:31) - (0 ? 31:31) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:31) - (0 ? 31:31) + 1))))))) << (0 ? 31:31)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 30:0) - (0 ? 30:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 30:0) - (0 ? 30:0) + 1))))))) << (0 ? 30:0))) | (((gctUINT32) ((gctUINT32) (Offset | (Index << 12)) & ((gctUINT32) ((((1 ? 30:0) - (0 ? 30:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 30:0) - (0 ? 30:0) + 1))))))) << (0 ? 30:0)));
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Address=0x%08x", *Address);
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckHARDWARE_GetIdle(
++ IN gckHARDWARE Hardware,
++ IN gctBOOL Wait,
++ OUT gctUINT32 * Data
++ )
++{
++ gceSTATUS status;
++ gctUINT32 idle = 0;
++ gctINT retry, poll, pollCount;
++ gctUINT32 address;
++
++ gcmkHEADER_ARG("Hardware=0x%x Wait=%d", Hardware, Wait);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(Data != gcvNULL);
++
++
++ /* If we have to wait, try 100 polls per millisecond. */
++ pollCount = Wait ? 100 : 1;
++
++ /* At most, try for 1 second. */
++ for (retry = 0; retry < 1000; ++retry)
++ {
++ /* If we have to wait, try 100 polls per millisecond. */
++ for (poll = pollCount; poll > 0; --poll)
++ {
++ /* Read register. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00004, &idle));
++
++ /* Read the current FE address. */
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00664,
++ &address));
++
++
++ /* See if we have to wait for FE idle. */
++ if (_IsGPUIdle(idle)
++ && (address == Hardware->lastEnd + 8)
++ )
++ {
++ /* FE is idle. */
++ break;
++ }
++ }
++
++ /* Check if we need to wait for FE and FE is busy. */
++ if (Wait && !_IsGPUIdle(idle))
++ {
++ /* Wait a little. */
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "%s: Waiting for idle: 0x%08X",
++ __FUNCTION__, idle);
++
++ gcmkVERIFY_OK(gckOS_Delay(Hardware->os, 1));
++ }
++ else
++ {
++ break;
++ }
++ }
++
++ /* Return idle to caller. */
++ *Data = idle;
++
++#if defined(EMULATOR)
++ /* Wait a little while until CModel FE gets END.
++ * END is supposed to be appended by caller.
++ */
++ gckOS_Delay(gcvNULL, 100);
++#endif
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Data=0x%08x", *Data);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/* Flush the caches. */
++gceSTATUS
++gckHARDWARE_Flush(
++ IN gckHARDWARE Hardware,
++ IN gceKERNEL_FLUSH Flush,
++ IN gctPOINTER Logical,
++ IN OUT gctUINT32 * Bytes
++ )
++{
++ gctUINT32 pipe;
++ gctUINT32 flush = 0;
++ gctBOOL flushTileStatus;
++ gctUINT32_PTR logical = (gctUINT32_PTR) Logical;
++ gceSTATUS status;
++ gctUINT32 reserveBytes
++ /* Semaphore/Stall */
++ = 4 * gcmSIZEOF(gctUINT32);
++
++ gcmkHEADER_ARG("Hardware=0x%x Flush=0x%x Logical=0x%x *Bytes=%lu",
++ Hardware, Flush, Logical, gcmOPT_VALUE(Bytes));
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ /* Get current pipe. */
++ pipe = Hardware->kernel->command->pipeSelect;
++
++ /* Flush tile status cache. */
++ flushTileStatus = Flush & gcvFLUSH_TILE_STATUS;
++
++ /* Flush 3D color cache. */
++ if ((Flush & gcvFLUSH_COLOR) && (pipe == 0x0))
++ {
++ flush |= ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1)));
++ }
++
++ /* Flush 3D depth cache. */
++ if ((Flush & gcvFLUSH_DEPTH) && (pipe == 0x0))
++ {
++ flush |= ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)));
++ }
++
++ /* Flush 3D texture cache. */
++ if ((Flush & gcvFLUSH_TEXTURE) && (pipe == 0x0))
++ {
++ flush |= ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1))))))) << (0 ? 2:2))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1))))))) << (0 ? 2:2)));
++ }
++
++ /* Flush 2D cache. */
++ if ((Flush & gcvFLUSH_2D) && (pipe == 0x1))
++ {
++ flush |= ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3)));
++ }
++
++#if gcdMULTI_GPU
++ /* Flush L2 cache. */
++ if ((Flush & gcvFLUSH_L2) && (pipe == 0x0))
++ {
++ flush |= ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6)));
++ }
++#endif
++
++ /* Determine reserve bytes. */
++ if (flush)
++ {
++ reserveBytes += 2 * gcmSIZEOF(gctUINT32);
++ }
++
++ if (flushTileStatus)
++ {
++ reserveBytes += 2 * gcmSIZEOF(gctUINT32);
++ }
++
++ /* See if there is a valid flush. */
++ if ((flush == 0) && (flushTileStatus == gcvFALSE))
++ {
++ if (Bytes != gcvNULL)
++ {
++ /* No bytes required. */
++ *Bytes = 0;
++ }
++ }
++
++ else
++ {
++ /* Copy to command queue. */
++ if (Logical != gcvNULL)
++ {
++ if (*Bytes < reserveBytes)
++ {
++ /* Command queue too small. */
++ gcmkONERROR(gcvSTATUS_BUFFER_TOO_SMALL);
++ }
++
++ if (flush)
++ {
++ /* Append LOAD_STATE to AQFlush. */
++ *logical++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E03) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ *logical++
++ = flush;
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "0x%x: FLUSH 0x%x", logical - 1, flush);
++ }
++
++ if (flushTileStatus)
++ {
++ *logical++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0594) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ *logical++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)));
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "0x%x: FLUSH TILE STATUS 0x%x", logical - 1, logical[-1]);
++ }
++
++ /* Semaphore. */
++ *logical++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E02) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ *logical++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));
++
++ /* Stall. */
++ *logical++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x09 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)));
++
++ *logical++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x05 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* bytes required. */
++ *Bytes = reserveBytes;
++ }
++ }
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Bytes=%lu", gcmOPT_VALUE(Bytes));
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckHARDWARE_SetFastClear(
++ IN gckHARDWARE Hardware,
++ IN gctINT Enable,
++ IN gctINT Compression
++ )
++{
++#if gcdENABLE_3D
++ gctUINT32 debug;
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Hardware=0x%x Enable=%d Compression=%d",
++ Hardware, Enable, Compression);
++
++ /* Only process if fast clear is available. */
++ if ((((((gctUINT32) (Hardware->identity.chipFeatures)) >> (0 ? 0:0)) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1)))))) ))
++ {
++ if (Enable == -1)
++ {
++ /* Determine automatic value for fast clear. */
++ Enable = ((Hardware->identity.chipModel != gcv500)
++ || (Hardware->identity.chipRevision >= 3)
++ ) ? 1 : 0;
++ }
++
++ if (Compression == -1)
++ {
++ /* Determine automatic value for compression. */
++ Compression = Enable
++ & (((((gctUINT32) (Hardware->identity.chipFeatures)) >> (0 ? 5:5)) & ((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1)))))) );
++ }
++
++ /* Read AQMemoryDebug register. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00414, &debug));
++
++ /* Set fast clear bypass. */
++ debug = ((((gctUINT32) (debug)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 20:20) - (0 ? 20:20) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 20:20) - (0 ? 20:20) + 1))))))) << (0 ? 20:20))) | (((gctUINT32) ((gctUINT32) (Enable == 0) & ((gctUINT32) ((((1 ? 20:20) - (0 ? 20:20) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 20:20) - (0 ? 20:20) + 1))))))) << (0 ? 20:20)));
++
++ if (
++ ((((gctUINT32) (Hardware->identity.chipMinorFeatures2)) >> (0 ? 27:27) & ((gctUINT32) ((((1 ? 27:27) - (0 ? 27:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 27:27) - (0 ? 27:27) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 27:27) - (0 ? 27:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 27:27) - (0 ? 27:27) + 1))))))) ||
++ (Hardware->identity.chipModel >= gcv4000))
++ {
++ /* Set compression bypass. */
++ debug = ((((gctUINT32) (debug)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 21:21) - (0 ? 21:21) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 21:21) - (0 ? 21:21) + 1))))))) << (0 ? 21:21))) | (((gctUINT32) ((gctUINT32) (Compression == 0) & ((gctUINT32) ((((1 ? 21:21) - (0 ? 21:21) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 21:21) - (0 ? 21:21) + 1))))))) << (0 ? 21:21)));
++ }
++
++ /* Write back AQMemoryDebug register. */
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00414,
++ debug));
++
++ /* Store fast clear and comprersison flags. */
++ Hardware->allowFastClear = Enable;
++ Hardware->allowCompression = Compression;
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "FastClear=%d Compression=%d", Enable, Compression);
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++#else
++ return gcvSTATUS_OK;
++#endif
++}
++
++typedef enum
++{
++ gcvPOWER_FLAG_INITIALIZE = 1 << 0,
++ gcvPOWER_FLAG_STALL = 1 << 1,
++ gcvPOWER_FLAG_STOP = 1 << 2,
++ gcvPOWER_FLAG_START = 1 << 3,
++ gcvPOWER_FLAG_RELEASE = 1 << 4,
++ gcvPOWER_FLAG_DELAY = 1 << 5,
++ gcvPOWER_FLAG_SAVE = 1 << 6,
++ gcvPOWER_FLAG_ACQUIRE = 1 << 7,
++ gcvPOWER_FLAG_POWER_OFF = 1 << 8,
++ gcvPOWER_FLAG_CLOCK_OFF = 1 << 9,
++ gcvPOWER_FLAG_CLOCK_ON = 1 << 10,
++}
++gcePOWER_FLAGS;
++
++#if gcmIS_DEBUG(gcdDEBUG_TRACE)
++static gctCONST_STRING
++_PowerEnum(gceCHIPPOWERSTATE State)
++{
++ const gctCONST_STRING states[] =
++ {
++ gcmSTRING(gcvPOWER_ON),
++ gcmSTRING(gcvPOWER_OFF),
++ gcmSTRING(gcvPOWER_IDLE),
++ gcmSTRING(gcvPOWER_SUSPEND),
++ gcmSTRING(gcvPOWER_SUSPEND_ATPOWERON),
++ gcmSTRING(gcvPOWER_OFF_ATPOWERON),
++ gcmSTRING(gcvPOWER_IDLE_BROADCAST),
++ gcmSTRING(gcvPOWER_SUSPEND_BROADCAST),
++ gcmSTRING(gcvPOWER_OFF_BROADCAST),
++ gcmSTRING(gcvPOWER_OFF_RECOVERY),
++ gcmSTRING(gcvPOWER_OFF_TIMEOUT),
++ gcmSTRING(gcvPOWER_ON_AUTO)
++ };
++
++ if ((State >= gcvPOWER_ON) && (State <= gcvPOWER_ON_AUTO))
++ {
++ return states[State - gcvPOWER_ON];
++ }
++
++ return "unknown";
++}
++#endif
++
++/*******************************************************************************
++**
++** gckHARDWARE_SetPowerManagementState
++**
++** Set GPU to a specified power state.
++**
++** INPUT:
++**
++** gckHARDWARE Harwdare
++** Pointer to an gckHARDWARE object.
++**
++** gceCHIPPOWERSTATE State
++** Power State.
++**
++*/
++gceSTATUS
++gckHARDWARE_SetPowerManagementState(
++ IN gckHARDWARE Hardware,
++ IN gceCHIPPOWERSTATE State
++ )
++{
++ gceSTATUS status;
++ gckCOMMAND command = gcvNULL;
++ gckOS os;
++ gctUINT flag, clock;
++ gctPOINTER buffer;
++ gctUINT32 bytes, requested;
++ gctBOOL acquired = gcvFALSE;
++ gctBOOL mutexAcquired = gcvFALSE;
++ gctBOOL stall = gcvTRUE;
++ gctBOOL broadcast = gcvFALSE;
++#if gcdPOWEROFF_TIMEOUT
++ gctBOOL timeout = gcvFALSE;
++ gctBOOL isAfter = gcvFALSE;
++ gctUINT32 currentTime;
++#endif
++ gctUINT32 process, thread;
++ gctBOOL commitEntered = gcvFALSE;
++ gctBOOL commandStarted = gcvFALSE;
++ gctBOOL isrStarted = gcvFALSE;
++
++#if gcdENABLE_PROFILING
++ gctUINT64 time, freq, mutexTime, onTime, stallTime, stopTime, delayTime,
++ initTime, offTime, startTime, totalTime;
++#endif
++ gctBOOL global = gcvFALSE;
++ gctBOOL globalAcquired = gcvFALSE;
++ gctBOOL configMmu = gcvFALSE;
++
++ /* State transition flags. */
++ static const gctUINT flags[4][4] =
++ {
++ /* gcvPOWER_ON */
++ { /* ON */ 0,
++ /* OFF */ gcvPOWER_FLAG_ACQUIRE |
++ gcvPOWER_FLAG_STALL |
++ gcvPOWER_FLAG_STOP |
++ gcvPOWER_FLAG_POWER_OFF |
++ gcvPOWER_FLAG_CLOCK_OFF,
++ /* IDLE */ gcvPOWER_FLAG_ACQUIRE |
++ gcvPOWER_FLAG_STALL,
++ /* SUSPEND */ gcvPOWER_FLAG_ACQUIRE |
++ gcvPOWER_FLAG_STALL |
++ gcvPOWER_FLAG_STOP |
++ gcvPOWER_FLAG_CLOCK_OFF,
++ },
++
++ /* gcvPOWER_OFF */
++ { /* ON */ gcvPOWER_FLAG_INITIALIZE |
++ gcvPOWER_FLAG_START |
++ gcvPOWER_FLAG_RELEASE |
++ gcvPOWER_FLAG_DELAY,
++ /* OFF */ 0,
++ /* IDLE */ gcvPOWER_FLAG_INITIALIZE |
++ gcvPOWER_FLAG_START |
++ gcvPOWER_FLAG_DELAY,
++ /* SUSPEND */ gcvPOWER_FLAG_INITIALIZE |
++ gcvPOWER_FLAG_CLOCK_OFF,
++ },
++
++ /* gcvPOWER_IDLE */
++ { /* ON */ gcvPOWER_FLAG_RELEASE,
++ /* OFF */ gcvPOWER_FLAG_STOP |
++ gcvPOWER_FLAG_POWER_OFF |
++ gcvPOWER_FLAG_CLOCK_OFF,
++ /* IDLE */ 0,
++ /* SUSPEND */ gcvPOWER_FLAG_STOP |
++ gcvPOWER_FLAG_CLOCK_OFF,
++ },
++
++ /* gcvPOWER_SUSPEND */
++ { /* ON */ gcvPOWER_FLAG_START |
++ gcvPOWER_FLAG_RELEASE |
++ gcvPOWER_FLAG_DELAY |
++ gcvPOWER_FLAG_CLOCK_ON,
++ /* OFF */ gcvPOWER_FLAG_SAVE |
++ gcvPOWER_FLAG_POWER_OFF |
++ gcvPOWER_FLAG_CLOCK_OFF,
++ /* IDLE */ gcvPOWER_FLAG_START |
++ gcvPOWER_FLAG_DELAY |
++ gcvPOWER_FLAG_CLOCK_ON,
++ /* SUSPEND */ 0,
++ },
++ };
++
++ /* Clocks. */
++ static const gctUINT clocks[4] =
++ {
++ /* gcvPOWER_ON */
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) |
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) |
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 8:2) - (0 ? 8:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:2) - (0 ? 8:2) + 1))))))) << (0 ? 8:2))) | (((gctUINT32) ((gctUINT32) (64) & ((gctUINT32) ((((1 ? 8:2) - (0 ? 8:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:2) - (0 ? 8:2) + 1))))))) << (0 ? 8:2))) |
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))),
++
++ /* gcvPOWER_OFF */
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) |
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) |
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 8:2) - (0 ? 8:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:2) - (0 ? 8:2) + 1))))))) << (0 ? 8:2))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 8:2) - (0 ? 8:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:2) - (0 ? 8:2) + 1))))))) << (0 ? 8:2))) |
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))),
++
++ /* gcvPOWER_IDLE */
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) |
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) |
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 8:2) - (0 ? 8:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:2) - (0 ? 8:2) + 1))))))) << (0 ? 8:2))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 8:2) - (0 ? 8:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:2) - (0 ? 8:2) + 1))))))) << (0 ? 8:2))) |
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))),
++
++ /* gcvPOWER_SUSPEND */
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) |
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) |
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 8:2) - (0 ? 8:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:2) - (0 ? 8:2) + 1))))))) << (0 ? 8:2))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 8:2) - (0 ? 8:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:2) - (0 ? 8:2) + 1))))))) << (0 ? 8:2))) |
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))),
++ };
++
++ gcmkHEADER_ARG("Hardware=0x%x State=%d", Hardware, State);
++#if gcmIS_DEBUG(gcdDEBUG_TRACE)
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Switching to power state %d(%s)",
++ State, _PowerEnum(State));
++#endif
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ /* Get the gckOS object pointer. */
++ os = Hardware->os;
++ gcmkVERIFY_OBJECT(os, gcvOBJ_OS);
++
++ /* Get the gckCOMMAND object pointer. */
++ gcmkVERIFY_OBJECT(Hardware->kernel, gcvOBJ_KERNEL);
++ command = Hardware->kernel->command;
++ gcmkVERIFY_OBJECT(command, gcvOBJ_COMMAND);
++
++ /* Start profiler. */
++ gcmkPROFILE_INIT(freq, time);
++
++ /* Convert the broadcast power state. */
++ switch (State)
++ {
++ case gcvPOWER_SUSPEND_ATPOWERON:
++ /* Convert to SUSPEND and don't wait for STALL. */
++ State = gcvPOWER_SUSPEND;
++ stall = gcvFALSE;
++ break;
++
++ case gcvPOWER_OFF_ATPOWERON:
++ /* Convert to OFF and don't wait for STALL. */
++ State = gcvPOWER_OFF;
++ stall = gcvFALSE;
++ break;
++
++ case gcvPOWER_IDLE_BROADCAST:
++ /* Convert to IDLE and note we are inside broadcast. */
++ State = gcvPOWER_IDLE;
++ broadcast = gcvTRUE;
++ break;
++
++ case gcvPOWER_SUSPEND_BROADCAST:
++ /* Convert to SUSPEND and note we are inside broadcast. */
++ State = gcvPOWER_SUSPEND;
++ broadcast = gcvTRUE;
++ break;
++
++ case gcvPOWER_OFF_BROADCAST:
++ /* Convert to OFF and note we are inside broadcast. */
++ State = gcvPOWER_OFF;
++ broadcast = gcvTRUE;
++ break;
++
++ case gcvPOWER_OFF_RECOVERY:
++ /* Convert to OFF and note we are inside recovery. */
++ State = gcvPOWER_OFF;
++ stall = gcvFALSE;
++ broadcast = gcvTRUE;
++ break;
++
++ case gcvPOWER_ON_AUTO:
++ /* Convert to ON and note we are inside recovery. */
++ State = gcvPOWER_ON;
++ break;
++
++ case gcvPOWER_ON:
++ case gcvPOWER_IDLE:
++ case gcvPOWER_SUSPEND:
++ case gcvPOWER_OFF:
++ /* Mark as global power management. */
++ global = gcvTRUE;
++ break;
++
++#if gcdPOWEROFF_TIMEOUT
++ case gcvPOWER_OFF_TIMEOUT:
++ /* Convert to OFF and note we are inside broadcast. */
++ State = gcvPOWER_OFF;
++ broadcast = gcvTRUE;
++ /* Check time out */
++ timeout = gcvTRUE;
++ break;
++#endif
++
++ default:
++ break;
++ }
++
++ if (Hardware->powerManagement == gcvFALSE
++ && State != gcvPOWER_ON
++ )
++ {
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++
++ /* Get current process and thread IDs. */
++ gcmkONERROR(gckOS_GetProcessID(&process));
++ gcmkONERROR(gckOS_GetThreadID(&thread));
++
++ if (broadcast)
++ {
++ /* Try to acquire the power mutex. */
++ status = gckOS_AcquireMutex(os, Hardware->powerMutex, 0);
++
++ if (status == gcvSTATUS_TIMEOUT)
++ {
++ /* Check if we already own this mutex. */
++ if ((Hardware->powerProcess == process)
++ && (Hardware->powerThread == thread)
++ )
++ {
++ /* Bail out on recursive power management. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++ else if (State != gcvPOWER_ON)
++ {
++ /* Called from IST,
++ ** so waiting here will cause deadlock,
++ ** if lock holder call gckCOMMAND_Stall() */
++ status = gcvSTATUS_INVALID_REQUEST;
++ goto OnError;
++ }
++ else
++ {
++ /* Acquire the power mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(os,
++ Hardware->powerMutex,
++ gcvINFINITE));
++ }
++ }
++ }
++ else
++ {
++ /* Acquire the power mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(os, Hardware->powerMutex, gcvINFINITE));
++ }
++
++ /* Get time until mtuex acquired. */
++ gcmkPROFILE_QUERY(time, mutexTime);
++
++ Hardware->powerProcess = process;
++ Hardware->powerThread = thread;
++ mutexAcquired = gcvTRUE;
++
++ /* Grab control flags and clock. */
++ flag = flags[Hardware->chipPowerState][State];
++ clock = clocks[State];
++
++#if gcdENABLE_FSCALE_VAL_ADJUST
++ if (State == gcvPOWER_ON)
++ {
++ clock = ((((gctUINT32) (clock)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 8:2) - (0 ? 8:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:2) - (0 ? 8:2) + 1))))))) << (0 ? 8:2))) | (((gctUINT32) ((gctUINT32) (Hardware->powerOnFscaleVal) & ((gctUINT32) ((((1 ? 8:2) - (0 ? 8:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:2) - (0 ? 8:2) + 1))))))) << (0 ? 8:2)));
++ }
++#endif
++
++ if (State == gcvPOWER_SUSPEND && Hardware->chipPowerState == gcvPOWER_OFF && broadcast)
++ {
++#if gcdPOWER_SUSPEND_WHEN_IDLE
++ /* Do nothing */
++
++ /* Release the power mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(os, Hardware->powerMutex));
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++#else
++ /* Clock should be on when switch power from off to suspend */
++ clock = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) |
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) |
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 8:2) - (0 ? 8:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:2) - (0 ? 8:2) + 1))))))) << (0 ? 8:2))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 8:2) - (0 ? 8:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:2) - (0 ? 8:2) + 1))))))) << (0 ? 8:2))) |
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))) ;
++#endif
++ }
++
++#if gcdPOWEROFF_TIMEOUT
++ if (timeout)
++ {
++ gcmkONERROR(gckOS_GetTicks(&currentTime));
++
++ gcmkONERROR(
++ gckOS_TicksAfter(Hardware->powerOffTime, currentTime, &isAfter));
++
++ /* powerOffTime is pushed forward, give up.*/
++ if (isAfter
++ /* Expect a transition start from IDLE or SUSPEND. */
++ || (Hardware->chipPowerState == gcvPOWER_ON)
++ || (Hardware->chipPowerState == gcvPOWER_OFF)
++ )
++ {
++ /* Release the power mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(os, Hardware->powerMutex));
++
++ /* No need to do anything. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Power Off GPU[%d] at %u [supposed to be at %u]",
++ Hardware->core, currentTime, Hardware->powerOffTime);
++ }
++#endif
++
++ if (flag == 0)
++ {
++ /* Release the power mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(os, Hardware->powerMutex));
++
++ /* No need to do anything. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++
++ /* If this is an internal power management, we have to check if we can grab
++ ** the global power semaphore. If we cannot, we have to wait until the
++ ** external world changes power management. */
++ if (!global)
++ {
++ /* Try to acquire the global semaphore. */
++ status = gckOS_TryAcquireSemaphore(os, Hardware->globalSemaphore);
++ if (status == gcvSTATUS_TIMEOUT)
++ {
++ if (State == gcvPOWER_IDLE || State == gcvPOWER_SUSPEND)
++ {
++ /* Called from thread routine which should NEVER sleep.*/
++ gcmkONERROR(gcvSTATUS_INVALID_REQUEST);
++ }
++
++ /* Release the power mutex. */
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Releasing the power mutex.");
++ gcmkONERROR(gckOS_ReleaseMutex(os, Hardware->powerMutex));
++ mutexAcquired = gcvFALSE;
++
++ /* Wait for the semaphore. */
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Waiting for global semaphore.");
++ gcmkONERROR(gckOS_AcquireSemaphore(os, Hardware->globalSemaphore));
++ globalAcquired = gcvTRUE;
++
++ /* Acquire the power mutex. */
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Reacquiring the power mutex.");
++ gcmkONERROR(gckOS_AcquireMutex(os,
++ Hardware->powerMutex,
++ gcvINFINITE));
++ mutexAcquired = gcvTRUE;
++
++ /* chipPowerState may be changed by external world during the time
++ ** we give up powerMutex, so updating flag now is necessary. */
++ flag = flags[Hardware->chipPowerState][State];
++
++ if (flag == 0)
++ {
++ gcmkONERROR(gckOS_ReleaseSemaphore(os, Hardware->globalSemaphore));
++ globalAcquired = gcvFALSE;
++
++ gcmkONERROR(gckOS_ReleaseMutex(os, Hardware->powerMutex));
++ mutexAcquired = gcvFALSE;
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++ }
++ else
++ {
++ /* Error. */
++ gcmkONERROR(status);
++ }
++
++ /* Release the global semaphore again. */
++ gcmkONERROR(gckOS_ReleaseSemaphore(os, Hardware->globalSemaphore));
++ globalAcquired = gcvFALSE;
++ }
++ else
++ {
++ if (State == gcvPOWER_OFF || State == gcvPOWER_SUSPEND || State == gcvPOWER_IDLE)
++ {
++ /* Acquire the global semaphore if it has not been acquired. */
++ status = gckOS_TryAcquireSemaphore(os, Hardware->globalSemaphore);
++ if (status == gcvSTATUS_OK)
++ {
++ globalAcquired = gcvTRUE;
++ }
++ else if (status != gcvSTATUS_TIMEOUT)
++ {
++ /* Other errors. */
++ gcmkONERROR(status);
++ }
++ /* Ignore gcvSTATUS_TIMEOUT and leave globalAcquired as gcvFALSE.
++ ** gcvSTATUS_TIMEOUT means global semaphore has already
++ ** been acquired before this operation, so even if we fail,
++ ** we should not release it in our error handling. It should be
++ ** released by the next successful global gcvPOWER_ON. */
++ }
++
++ /* Global power management can't be aborted, so sync with
++ ** proceeding last commit. */
++ if (flag & gcvPOWER_FLAG_ACQUIRE)
++ {
++ /* Acquire the power management semaphore. */
++ gcmkONERROR(gckOS_AcquireSemaphore(os, command->powerSemaphore));
++ acquired = gcvTRUE;
++
++ /* avoid acquiring again. */
++ flag &= ~gcvPOWER_FLAG_ACQUIRE;
++ }
++ }
++
++ if (flag & (gcvPOWER_FLAG_INITIALIZE | gcvPOWER_FLAG_CLOCK_ON))
++ {
++ /* Turn on the power. */
++ gcmkONERROR(gckOS_SetGPUPower(os, Hardware->core, gcvTRUE, gcvTRUE));
++
++ /* Mark clock and power as enabled. */
++ Hardware->clockState = gcvTRUE;
++ Hardware->powerState = gcvTRUE;
++
++ for (;;)
++ {
++ /* Check if GPU is present and awake. */
++ status = _IsGPUPresent(Hardware);
++
++ /* Check if the GPU is not responding. */
++ if (status == gcvSTATUS_GPU_NOT_RESPONDING)
++ {
++ /* Turn off the power and clock. */
++ gcmkONERROR(gckOS_SetGPUPower(os, Hardware->core, gcvFALSE, gcvFALSE));
++
++ Hardware->clockState = gcvFALSE;
++ Hardware->powerState = gcvFALSE;
++
++ /* Wait a little. */
++ gckOS_Delay(os, 1);
++
++ /* Turn on the power and clock. */
++ gcmkONERROR(gckOS_SetGPUPower(os, Hardware->core, gcvTRUE, gcvTRUE));
++
++ Hardware->clockState = gcvTRUE;
++ Hardware->powerState = gcvTRUE;
++
++ /* We need to initialize the hardware and start the command
++ * processor. */
++ flag |= gcvPOWER_FLAG_INITIALIZE | gcvPOWER_FLAG_START;
++ }
++ else
++ {
++ /* Test for error. */
++ gcmkONERROR(status);
++
++ /* Break out of loop. */
++ break;
++ }
++ }
++ }
++
++ /* Get time until powered on. */
++ gcmkPROFILE_QUERY(time, onTime);
++
++ if ((flag & gcvPOWER_FLAG_STALL) && stall)
++ {
++ gctBOOL idle;
++ gctINT32 atomValue;
++
++ /* For global operation, all pending commits have already been
++ ** blocked by globalSemaphore or powerSemaphore.*/
++ if (!global)
++ {
++ /* Check commit atom. */
++ gcmkONERROR(gckOS_AtomGet(os, command->atomCommit, &atomValue));
++
++ if (atomValue > 0)
++ {
++ /* Commits are pending - abort power management. */
++ status = broadcast ? gcvSTATUS_CHIP_NOT_READY
++ : gcvSTATUS_MORE_DATA;
++ goto OnError;
++ }
++ }
++
++ if (broadcast)
++ {
++ /* Check for idle. */
++ gcmkONERROR(gckHARDWARE_QueryIdle(Hardware, &idle));
++
++ if (!idle)
++ {
++ status = gcvSTATUS_CHIP_NOT_READY;
++ goto OnError;
++ }
++ }
++
++ else
++ {
++ /* Acquire the command queue. */
++ gcmkONERROR(gckCOMMAND_EnterCommit(command, gcvTRUE));
++ commitEntered = gcvTRUE;
++
++ /* Get the size of the flush command. */
++ gcmkONERROR(gckHARDWARE_Flush(Hardware,
++ gcvFLUSH_ALL,
++ gcvNULL,
++ &requested));
++
++ /* Reserve space in the command queue. */
++ gcmkONERROR(gckCOMMAND_Reserve(command,
++ requested,
++ &buffer,
++ &bytes));
++
++ /* Append a flush. */
++ gcmkONERROR(gckHARDWARE_Flush(
++ Hardware, gcvFLUSH_ALL, buffer, &bytes
++ ));
++
++ /* Execute the command queue. */
++ gcmkONERROR(gckCOMMAND_Execute(command, requested));
++
++ /* Release the command queue. */
++ gcmkONERROR(gckCOMMAND_ExitCommit(command, gcvTRUE));
++ commitEntered = gcvFALSE;
++
++ /* Wait to finish all commands. */
++#if gcdMULTI_GPU
++ gcmkONERROR(gckCOMMAND_Stall(command, gcvTRUE, gcvCORE_3D_ALL_MASK));
++#else
++ gcmkONERROR(gckCOMMAND_Stall(command, gcvTRUE));
++#endif
++ }
++ }
++
++ /* Get time until stalled. */
++ gcmkPROFILE_QUERY(time, stallTime);
++
++ if (flag & gcvPOWER_FLAG_ACQUIRE)
++ {
++ /* Acquire the power management semaphore. */
++ gcmkONERROR(gckOS_AcquireSemaphore(os, command->powerSemaphore));
++ acquired = gcvTRUE;
++ }
++
++ if (flag & gcvPOWER_FLAG_STOP)
++ {
++ /* Stop the command parser. */
++ gcmkONERROR(gckCOMMAND_Stop(command, gcvFALSE));
++
++ /* Stop the Isr. */
++ if (Hardware->stopIsr)
++ {
++ gcmkONERROR(Hardware->stopIsr(Hardware->isrContext));
++ }
++ }
++
++ /* Flush Cache before Power Off. */
++ if (flag & gcvPOWER_FLAG_POWER_OFF)
++ {
++ if (Hardware->clockState == gcvFALSE)
++ {
++ /* Turn off the GPU power. */
++ gcmkONERROR(
++ gckOS_SetGPUPower(os,
++ Hardware->core,
++ gcvTRUE,
++ gcvTRUE));
++
++ Hardware->clockState = gcvTRUE;
++
++ if (gckHARDWARE_IsFeatureAvailable(Hardware, gcvFEATURE_DYNAMIC_FREQUENCY_SCALING) != gcvTRUE)
++ {
++ /* Write the clock control register. */
++ gcmkONERROR(gckOS_WriteRegisterEx(os,
++ Hardware->core,
++ 0x00000,
++ clocks[0]));
++
++ /* Done loading the frequency scaler. */
++ gcmkONERROR(gckOS_WriteRegisterEx(os,
++ Hardware->core,
++ 0x00000,
++ ((((gctUINT32) (clocks[0])) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9)))));
++ }
++ }
++
++ gcmkONERROR(gckCOMMAND_Start(command));
++
++ gcmkONERROR(_FlushCache(Hardware, command));
++
++ gckOS_Delay(gcvNULL, 1);
++
++ /* Stop the command parser. */
++ gcmkONERROR(gckCOMMAND_Stop(command, gcvFALSE));
++
++ flag |= gcvPOWER_FLAG_CLOCK_OFF;
++ }
++
++ /* Get time until stopped. */
++ gcmkPROFILE_QUERY(time, stopTime);
++
++ /* Only process this when hardware is enabled. */
++ if (Hardware->clockState && Hardware->powerState
++ /* Don't touch clock control if dynamic frequency scaling is available. */
++ && gckHARDWARE_IsFeatureAvailable(Hardware, gcvFEATURE_DYNAMIC_FREQUENCY_SCALING) != gcvTRUE
++ )
++ {
++ if (flag & (gcvPOWER_FLAG_POWER_OFF | gcvPOWER_FLAG_CLOCK_OFF))
++ {
++ if (Hardware->identity.chipModel == gcv4000
++ && ((Hardware->identity.chipRevision == 0x5208) || (Hardware->identity.chipRevision == 0x5222)))
++ {
++ clock &= ~2U;
++ }
++ }
++
++ /* Write the clock control register. */
++ gcmkONERROR(gckOS_WriteRegisterEx(os,
++ Hardware->core,
++ 0x00000,
++ clock));
++
++ /* Done loading the frequency scaler. */
++ gcmkONERROR(gckOS_WriteRegisterEx(os,
++ Hardware->core,
++ 0x00000,
++ ((((gctUINT32) (clock)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9)))));
++ }
++
++ if (flag & gcvPOWER_FLAG_DELAY)
++ {
++ /* Wait for the specified amount of time to settle coming back from
++ ** power-off or suspend state. */
++ gcmkONERROR(gckOS_Delay(os, gcdPOWER_CONTROL_DELAY));
++ }
++
++ /* Get time until delayed. */
++ gcmkPROFILE_QUERY(time, delayTime);
++
++ if (flag & gcvPOWER_FLAG_INITIALIZE)
++ {
++ /* Initialize hardware. */
++ gcmkONERROR(gckHARDWARE_InitializeHardware(Hardware));
++
++ gcmkONERROR(gckHARDWARE_SetFastClear(Hardware,
++ Hardware->allowFastClear,
++ Hardware->allowCompression));
++
++ /* Force the command queue to reload the next context. */
++ command->currContext = gcvNULL;
++
++ /* Need to config mmu after command start. */
++ configMmu = gcvTRUE;
++ }
++
++ /* Get time until initialized. */
++ gcmkPROFILE_QUERY(time, initTime);
++
++ if (flag & (gcvPOWER_FLAG_POWER_OFF | gcvPOWER_FLAG_CLOCK_OFF))
++ {
++ /* Turn off the GPU power. */
++ gcmkONERROR(
++ gckOS_SetGPUPower(os,
++ Hardware->core,
++ (flag & gcvPOWER_FLAG_CLOCK_OFF) ? gcvFALSE
++ : gcvTRUE,
++ (flag & gcvPOWER_FLAG_POWER_OFF) ? gcvFALSE
++ : gcvTRUE));
++
++ /* Save current hardware power and clock states. */
++ Hardware->clockState = (flag & gcvPOWER_FLAG_CLOCK_OFF) ? gcvFALSE
++ : gcvTRUE;
++ Hardware->powerState = (flag & gcvPOWER_FLAG_POWER_OFF) ? gcvFALSE
++ : gcvTRUE;
++ }
++
++ /* Get time until off. */
++ gcmkPROFILE_QUERY(time, offTime);
++
++ if (flag & gcvPOWER_FLAG_START)
++ {
++ /* Start the command processor. */
++ gcmkONERROR(gckCOMMAND_Start(command));
++ commandStarted = gcvTRUE;
++
++ if (Hardware->startIsr)
++ {
++ /* Start the Isr. */
++ gcmkONERROR(Hardware->startIsr(Hardware->isrContext));
++ isrStarted = gcvTRUE;
++ }
++ }
++
++ /* Get time until started. */
++ gcmkPROFILE_QUERY(time, startTime);
++
++ if (flag & gcvPOWER_FLAG_RELEASE)
++ {
++ /* Release the power management semaphore. */
++ gcmkONERROR(gckOS_ReleaseSemaphore(os, command->powerSemaphore));
++ acquired = gcvFALSE;
++
++ if (global)
++ {
++ /* Verify global semaphore has been acquired already before
++ ** we release it.
++ ** If it was acquired, gckOS_TryAcquireSemaphore will return
++ ** gcvSTATUS_TIMEOUT and we release it. Otherwise, global
++ ** semaphore will be acquried now, but it still is released
++ ** immediately. */
++ status = gckOS_TryAcquireSemaphore(os, Hardware->globalSemaphore);
++ if (status != gcvSTATUS_TIMEOUT)
++ {
++ gcmkONERROR(status);
++ }
++
++ /* Release the global semaphore. */
++ gcmkONERROR(gckOS_ReleaseSemaphore(os, Hardware->globalSemaphore));
++ globalAcquired = gcvFALSE;
++ }
++ }
++
++ /* Save the new power state. */
++ Hardware->chipPowerState = State;
++
++#if gcdDVFS
++ if (State == gcvPOWER_ON && Hardware->kernel->dvfs)
++ {
++ gckDVFS_Start(Hardware->kernel->dvfs);
++ }
++#endif
++
++#if gcdPOWEROFF_TIMEOUT
++ /* Reset power off time */
++ gcmkONERROR(gckOS_GetTicks(&currentTime));
++
++ Hardware->powerOffTime = currentTime + Hardware->powerOffTimeout;
++
++ if (State == gcvPOWER_IDLE || State == gcvPOWER_SUSPEND)
++ {
++ /* Start a timer to power off GPU when GPU enters IDLE or SUSPEND. */
++ gcmkVERIFY_OK(gckOS_StartTimer(os,
++ Hardware->powerOffTimer,
++ Hardware->powerOffTimeout));
++ }
++ else
++ {
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE, "Cancel powerOfftimer");
++
++ /* Cancel running timer when GPU enters ON or OFF. */
++ gcmkVERIFY_OK(gckOS_StopTimer(os, Hardware->powerOffTimer));
++ }
++#endif
++
++ /* Release the power mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(os, Hardware->powerMutex));
++
++ /* Get total time. */
++ gcmkPROFILE_QUERY(time, totalTime);
++#if gcdENABLE_PROFILING
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "PROF(%llu): mutex:%llu on:%llu stall:%llu stop:%llu",
++ freq, mutexTime, onTime, stallTime, stopTime);
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ " delay:%llu init:%llu off:%llu start:%llu total:%llu",
++ delayTime, initTime, offTime, startTime, totalTime);
++#endif
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (commandStarted)
++ {
++ gcmkVERIFY_OK(gckCOMMAND_Stop(command, gcvFALSE));
++ }
++
++ if (isrStarted)
++ {
++ gcmkVERIFY_OK(Hardware->stopIsr(Hardware->isrContext));
++ }
++
++ if (commitEntered)
++ {
++ /* Release the command queue mutex. */
++ gcmkVERIFY_OK(gckCOMMAND_ExitCommit(command, gcvTRUE));
++ }
++
++ if (acquired)
++ {
++ /* Release semaphore. */
++ gcmkVERIFY_OK(gckOS_ReleaseSemaphore(Hardware->os,
++ command->powerSemaphore));
++ }
++
++ if (globalAcquired)
++ {
++ gcmkVERIFY_OK(gckOS_ReleaseSemaphore(Hardware->os,
++ Hardware->globalSemaphore));
++ }
++
++ if (mutexAcquired)
++ {
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Hardware->os, Hardware->powerMutex));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_QueryPowerManagementState
++**
++** Get GPU power state.
++**
++** INPUT:
++**
++** gckHARDWARE Harwdare
++** Pointer to an gckHARDWARE object.
++**
++** gceCHIPPOWERSTATE* State
++** Power State.
++**
++*/
++gceSTATUS
++gckHARDWARE_QueryPowerManagementState(
++ IN gckHARDWARE Hardware,
++ OUT gceCHIPPOWERSTATE* State
++ )
++{
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(State != gcvNULL);
++
++ /* Return the statue. */
++ *State = Hardware->chipPowerState;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*State=%d", *State);
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_SetPowerManagement
++**
++** Configure GPU power management function.
++**
++** INPUT:
++**
++** gckHARDWARE Harwdare
++** Pointer to an gckHARDWARE object.
++**
++** gctBOOL PowerManagement
++** Power Mangement State.
++**
++*/
++gceSTATUS
++gckHARDWARE_SetPowerManagement(
++ IN gckHARDWARE Hardware,
++ IN gctBOOL PowerManagement
++ )
++{
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ if(!Hardware->powerManagementLock)
++ {
++ gcmkVERIFY_OK(
++ gckOS_AcquireMutex(Hardware->os, Hardware->powerMutex, gcvINFINITE));
++
++ Hardware->powerManagement = PowerManagement;
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Hardware->os, Hardware->powerMutex));
++ }
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_SetPowerManagementLock
++**
++** Disable dynamic GPU power management switch.
++** Only used in driver initialization stage.
++**
++** INPUT:
++**
++** gckHARDWARE Harwdare
++** Pointer to an gckHARDWARE object.
++**
++** gctBOOL Lock
++** Power Mangement Lock State.
++**
++*/
++gceSTATUS
++gckHARDWARE_SetPowerManagementLock(
++ IN gckHARDWARE Hardware,
++ IN gctBOOL Lock
++ )
++{
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ Hardware->powerManagementLock = Lock;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++/*******************************************************************************
++**
++** gckHARDWARE_SetGpuProfiler
++**
++** Configure GPU profiler function.
++** Only used in driver initialization stage.
++**
++** INPUT:
++**
++** gckHARDWARE Harwdare
++** Pointer to an gckHARDWARE object.
++**
++** gctBOOL GpuProfiler
++** GOU Profiler State.
++**
++*/
++gceSTATUS
++gckHARDWARE_SetGpuProfiler(
++ IN gckHARDWARE Hardware,
++ IN gctBOOL GpuProfiler
++ )
++{
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ if (GpuProfiler == gcvTRUE)
++ {
++ gctUINT32 data = 0;
++
++ /* Need to disable clock gating when doing profiling. */
++ gcmkVERIFY_OK(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ Hardware->powerBaseAddress +
++ 0x00100,
++ &data));
++
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)));
++
++
++ gcmkVERIFY_OK(
++ gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ Hardware->powerBaseAddress
++ + 0x00100,
++ data));
++ }
++
++ Hardware->gpuProfiler = GpuProfiler;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++#if gcdENABLE_FSCALE_VAL_ADJUST
++gceSTATUS
++gckHARDWARE_SetFscaleValue(
++ IN gckHARDWARE Hardware,
++ IN gctUINT32 FscaleValue
++ )
++{
++ gceSTATUS status;
++ gctUINT32 clock;
++ gctBOOL acquired = gcvFALSE;
++
++ gcmkHEADER_ARG("Hardware=0x%x FscaleValue=%d", Hardware, FscaleValue);
++
++ gcmkVERIFY_ARGUMENT(FscaleValue > 0 && FscaleValue <= 64);
++
++ gcmkONERROR(
++ gckOS_AcquireMutex(Hardware->os, Hardware->powerMutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++ Hardware->powerOnFscaleVal = FscaleValue;
++
++ if (Hardware->chipPowerState == gcvPOWER_ON)
++ {
++ gctUINT32 data;
++
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ Hardware->powerBaseAddress
++ + 0x00104,
++ &data));
++
++ /* Disable all clock gating. */
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ Hardware->powerBaseAddress
++ + 0x00104,
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1))))))) << (0 ? 2:2))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1))))))) << (0 ? 2:2)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:7) - (0 ? 7:7) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:7) - (0 ? 7:7) + 1))))))) << (0 ? 7:7))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 7:7) - (0 ? 7:7) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:7) - (0 ? 7:7) + 1))))))) << (0 ? 7:7)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 8:8) - (0 ? 8:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:8) - (0 ? 8:8) + 1))))))) << (0 ? 8:8))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 8:8) - (0 ? 8:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:8) - (0 ? 8:8) + 1))))))) << (0 ? 8:8)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 11:11) - (0 ? 11:11) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 11:11) - (0 ? 11:11) + 1))))))) << (0 ? 11:11))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 11:11) - (0 ? 11:11) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 11:11) - (0 ? 11:11) + 1))))))) << (0 ? 11:11)))));
++
++ clock = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 8:2) - (0 ? 8:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:2) - (0 ? 8:2) + 1))))))) << (0 ? 8:2))) | (((gctUINT32) ((gctUINT32) (FscaleValue) & ((gctUINT32) ((((1 ? 8:2) - (0 ? 8:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:2) - (0 ? 8:2) + 1))))))) << (0 ? 8:2)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9)));
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00000,
++ clock));
++
++ /* Done loading the frequency scaler. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00000,
++ ((((gctUINT32) (clock)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9)))));
++
++ /* Restore all clock gating. */
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ Hardware->powerBaseAddress
++ + 0x00104,
++ data));
++ }
++
++ gcmkVERIFY(gckOS_ReleaseMutex(Hardware->os, Hardware->powerMutex));
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ gcmkVERIFY(gckOS_ReleaseMutex(Hardware->os, Hardware->powerMutex));
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckHARDWARE_GetFscaleValue(
++ IN gckHARDWARE Hardware,
++ IN gctUINT * FscaleValue,
++ IN gctUINT * MinFscaleValue,
++ IN gctUINT * MaxFscaleValue
++ )
++{
++ *FscaleValue = Hardware->powerOnFscaleVal;
++ *MinFscaleValue = Hardware->minFscaleValue;
++ *MaxFscaleValue = 64;
++
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckHARDWARE_SetMinFscaleValue(
++ IN gckHARDWARE Hardware,
++ IN gctUINT MinFscaleValue
++ )
++{
++ if (MinFscaleValue >= 1 && MinFscaleValue <= 64)
++ {
++ Hardware->minFscaleValue = MinFscaleValue;
++ }
++
++ return gcvSTATUS_OK;
++}
++#endif
++
++#if gcdPOWEROFF_TIMEOUT
++gceSTATUS
++gckHARDWARE_SetPowerOffTimeout(
++ IN gckHARDWARE Hardware,
++ IN gctUINT32 Timeout
++)
++{
++ gcmkHEADER_ARG("Hardware=0x%x Timeout=%d", Hardware, Timeout);
++
++ Hardware->powerOffTimeout = Timeout;
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++
++gceSTATUS
++gckHARDWARE_QueryPowerOffTimeout(
++ IN gckHARDWARE Hardware,
++ OUT gctUINT32* Timeout
++)
++{
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ *Timeout = Hardware->powerOffTimeout;
++
++ gcmkFOOTER_ARG("*Timeout=%d", *Timeout);
++ return gcvSTATUS_OK;
++}
++#endif
++
++gceSTATUS
++gckHARDWARE_QueryIdle(
++ IN gckHARDWARE Hardware,
++ OUT gctBOOL_PTR IsIdle
++ )
++{
++ gceSTATUS status;
++ gctUINT32 idle, address;
++ gctBOOL isIdle;
++#if gcdMULTI_GPU > 1
++ gctUINT32 idle3D1 = 0;
++ gctUINT32 address3D1;
++ gctBOOL isIdle3D1 = gcvFALSE;
++#endif
++
++#if gcdINTERRUPT_STATISTIC
++ gctINT32 pendingInterrupt;
++#endif
++
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(IsIdle != gcvNULL);
++
++ /* We are idle when the power is not ON. */
++ if (Hardware->chipPowerState != gcvPOWER_ON)
++ {
++ isIdle = gcvTRUE;
++#if gcdMULTI_GPU > 1
++ isIdle3D1 = gcvTRUE;
++#endif
++ }
++
++ else
++ {
++ /* Read idle register. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00004, &idle));
++
++#if gcdMULTI_GPU > 1
++ if (Hardware->core == gcvCORE_MAJOR)
++ {
++ gcmkONERROR(
++ gckOS_ReadRegisterByCoreId(Hardware->os,
++ Hardware->core,
++ gcvCORE_3D_1_ID,
++ 0x00004,
++ &idle3D1));
++ }
++#endif
++
++ /* Pipe must be idle. */
++ if (((((((gctUINT32) (idle)) >> (0 ? 1:1)) & ((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1)))))) ) != 1)
++ || ((((((gctUINT32) (idle)) >> (0 ? 3:3)) & ((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1)))))) ) != 1)
++ || ((((((gctUINT32) (idle)) >> (0 ? 4:4)) & ((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1)))))) ) != 1)
++ || ((((((gctUINT32) (idle)) >> (0 ? 5:5)) & ((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1)))))) ) != 1)
++ || ((((((gctUINT32) (idle)) >> (0 ? 6:6)) & ((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1)))))) ) != 1)
++ || ((((((gctUINT32) (idle)) >> (0 ? 7:7)) & ((gctUINT32) ((((1 ? 7:7) - (0 ? 7:7) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:7) - (0 ? 7:7) + 1)))))) ) != 1)
++ || ((((((gctUINT32) (idle)) >> (0 ? 2:2)) & ((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1)))))) ) != 1)
++ )
++ {
++ /* Something is busy. */
++ isIdle = gcvFALSE;
++ }
++
++ else
++ {
++#if gcdSECURITY
++ isIdle = gcvTRUE;
++ address = 0;
++#else
++ /* Read the current FE address. */
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00664,
++ &address));
++
++ /* Test if address is inside the last WAIT/LINK sequence. */
++ if ((address >= Hardware->lastWaitLink)
++#if gcdMULTI_GPU
++ && (address <= Hardware->lastWaitLink + 40)
++#else
++ && (address <= Hardware->lastWaitLink + 16)
++#endif
++ )
++ {
++ /* FE is in last WAIT/LINK and the pipe is idle. */
++ isIdle = gcvTRUE;
++ }
++ else
++ {
++ /* FE is not in WAIT/LINK yet. */
++ isIdle = gcvFALSE;
++ }
++#endif
++ }
++
++#if gcdMULTI_GPU > 1
++ if (Hardware->core == gcvCORE_MAJOR)
++ {
++ /* Pipe must be idle. */
++ if (((((((gctUINT32) (idle3D1)) >> (0 ? 1:1)) & ((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1)))))) ) != 1)
++ || ((((((gctUINT32) (idle3D1)) >> (0 ? 3:3)) & ((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1)))))) ) != 1)
++ || ((((((gctUINT32) (idle3D1)) >> (0 ? 4:4)) & ((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1)))))) ) != 1)
++ || ((((((gctUINT32) (idle3D1)) >> (0 ? 5:5)) & ((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1)))))) ) != 1)
++ || ((((((gctUINT32) (idle3D1)) >> (0 ? 6:6)) & ((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1)))))) ) != 1)
++ || ((((((gctUINT32) (idle3D1)) >> (0 ? 7:7)) & ((gctUINT32) ((((1 ? 7:7) - (0 ? 7:7) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:7) - (0 ? 7:7) + 1)))))) ) != 1)
++ || ((((((gctUINT32) (idle3D1)) >> (0 ? 2:2)) & ((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1)))))) ) != 1)
++ )
++ {
++ /* Something is busy. */
++ isIdle3D1 = gcvFALSE;
++ }
++
++ else
++ {
++ /* Read the current FE address. */
++ gcmkONERROR(gckOS_ReadRegisterByCoreId(Hardware->os,
++ Hardware->core,
++ gcvCORE_3D_1_ID,
++ 0x00664,
++ &address3D1));
++
++ /* Test if address is inside the last WAIT/LINK sequence. */
++ if ((address3D1 >= Hardware->lastWaitLink)
++ && (address3D1 <= Hardware->lastWaitLink + 40)
++ )
++ {
++ /* FE is in last WAIT/LINK and the pipe is idle. */
++ isIdle3D1 = gcvTRUE;
++ }
++ else
++ {
++ /* FE is not in WAIT/LINK yet. */
++ isIdle3D1 = gcvFALSE;
++ }
++ }
++ }
++#endif
++
++ }
++
++#if gcdINTERRUPT_STATISTIC
++ gcmkONERROR(gckOS_AtomGet(
++ Hardware->os,
++ Hardware->kernel->eventObj->interruptCount,
++ &pendingInterrupt
++ ));
++
++ if (pendingInterrupt)
++ {
++ isIdle = gcvFALSE;
++ }
++#endif
++
++#if gcdMULTI_GPU > 1
++ if (Hardware->core == gcvCORE_MAJOR)
++ {
++ *IsIdle = (isIdle & isIdle3D1);
++ }
++ else
++#endif
++ {
++ *IsIdle = isIdle;
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++** Handy macros that will help in reading those debug registers.
++*/
++
++#define gcmkREAD_DEBUG_REGISTER(control, block, index, data) \
++ gcmkONERROR(\
++ gckOS_WriteRegisterEx(Hardware->os, \
++ Hardware->core, \
++ GC_DEBUG_CONTROL##control##_Address, \
++ gcmSETFIELD(0, \
++ GC_DEBUG_CONTROL##control, \
++ block, \
++ index))); \
++ gcmkONERROR(\
++ gckOS_ReadRegisterEx(Hardware->os, \
++ Hardware->core, \
++ GC_DEBUG_SIGNALS_##block##_Address, \
++ &profiler->data))
++
++#define gcmkREAD_DEBUG_REGISTER_N(control, block, index, data) \
++ gcmkONERROR(\
++ gckOS_WriteRegisterEx(Hardware->os, \
++ Hardware->core, \
++ GC_DEBUG_CONTROL##control##_Address, \
++ gcmSETFIELD(0, \
++ GC_DEBUG_CONTROL##control, \
++ block, \
++ index))); \
++ gcmkONERROR(\
++ gckOS_ReadRegisterEx(Hardware->os, \
++ Hardware->core, \
++ GC_DEBUG_SIGNALS_##block##_Address, \
++ &data))
++
++#define gcmkRESET_DEBUG_REGISTER(control, block) \
++ gcmkONERROR(\
++ gckOS_WriteRegisterEx(Hardware->os, \
++ Hardware->core, \
++ GC_DEBUG_CONTROL##control##_Address, \
++ gcmSETFIELD(0, \
++ GC_DEBUG_CONTROL##control, \
++ block, \
++ 15))); \
++ gcmkONERROR(\
++ gckOS_WriteRegisterEx(Hardware->os, \
++ Hardware->core, \
++ GC_DEBUG_CONTROL##control##_Address, \
++ gcmSETFIELD(0, \
++ GC_DEBUG_CONTROL##control, \
++ block, \
++ 0)))
++
++/*******************************************************************************
++**
++** gckHARDWARE_ProfileEngine2D
++**
++** Read the profile registers available in the 2D engine and sets them in the
++** profile. The function will also reset the pixelsRendered counter every time.
++**
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to an gckHARDWARE object.
++**
++** OPTIONAL gcs2D_PROFILE_PTR Profile
++** Pointer to a gcs2D_Profile structure.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckHARDWARE_ProfileEngine2D(
++ IN gckHARDWARE Hardware,
++ OPTIONAL gcs2D_PROFILE_PTR Profile
++ )
++{
++ gceSTATUS status;
++ gcs2D_PROFILE_PTR profiler = Profile;
++
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ if (Profile != gcvNULL)
++ {
++ /* Read the cycle count. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00438,
++ &Profile->cycleCount));
++
++ /* Read pixels rendered by 2D engine. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (11) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00454, &profiler->pixelsRendered));
++
++ /* Reset counter. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (15) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) ));
++gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16)))
++));
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++#if VIVANTE_PROFILER
++gceSTATUS
++gckHARDWARE_QueryProfileRegisters(
++ IN gckHARDWARE Hardware,
++ IN gctBOOL Reset,
++ OUT gcsPROFILER_COUNTERS * Counters
++ )
++{
++ gceSTATUS status;
++ gcsPROFILER_COUNTERS * profiler = Counters;
++ gctUINT i, clock;
++ gctUINT32 colorKilled, colorDrawn, depthKilled, depthDrawn;
++ gctUINT32 totalRead, totalWrite;
++
++ gcmkHEADER_ARG("Hardware=0x%x Counters=0x%x", Hardware, Counters);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ /* Read the counters. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00438,
++ &profiler->gpuCyclesCounter));
++
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00078,
++ &profiler->gpuTotalCyclesCounter));
++
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x0007C,
++ &profiler->gpuIdleCyclesCounter));
++
++
++ /* Read clock control register. */
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00000,
++ &clock));
++
++ profiler->gpuTotalRead64BytesPerFrame = 0;
++ profiler->gpuTotalWrite64BytesPerFrame = 0;
++ profiler->pe_pixel_count_killed_by_color_pipe = 0;
++ profiler->pe_pixel_count_killed_by_depth_pipe = 0;
++ profiler->pe_pixel_count_drawn_by_color_pipe = 0;
++ profiler->pe_pixel_count_drawn_by_depth_pipe = 0;
++
++ /* Walk through all avaiable pixel pipes. */
++ for (i = 0; i < Hardware->identity.pixelPipes; ++i)
++ {
++ /* Select proper pipe. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00000,
++ ((((gctUINT32) (clock)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:20) - (0 ? 23:20) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:20) - (0 ? 23:20) + 1))))))) << (0 ? 23:20))) | (((gctUINT32) ((gctUINT32) (i) & ((gctUINT32) ((((1 ? 23:20) - (0 ? 23:20) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:20) - (0 ? 23:20) + 1))))))) << (0 ? 23:20)))));
++
++ /* BW */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00040,
++ &totalRead));
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00044,
++ &totalWrite));
++
++ profiler->gpuTotalRead64BytesPerFrame += totalRead;
++ profiler->gpuTotalWrite64BytesPerFrame += totalWrite;
++
++ /* PE */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16)))));gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00454, &colorKilled));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16)))));gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00454, &depthKilled));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (2) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16)))));gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00454, &colorDrawn));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (3) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16)))));gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00454, &depthDrawn));
++
++ profiler->pe_pixel_count_killed_by_color_pipe += colorKilled;
++ profiler->pe_pixel_count_killed_by_depth_pipe += depthKilled;
++ profiler->pe_pixel_count_drawn_by_color_pipe += colorDrawn;
++ profiler->pe_pixel_count_drawn_by_depth_pipe += depthDrawn;
++ }
++
++ /* Reset clock control register. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00000,
++ clock));
++
++ /* Reset counters. */
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x0003C, 1));
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x0003C, 0));
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00438, 0));
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00078, 0));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (15) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) ));
++gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16)))
++));
++
++ /* SH */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (7) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler->ps_inst_counter));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (8) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler->rendered_pixel_counter));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (9) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler->vs_inst_counter));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (10) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler->rendered_vertice_counter));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (11) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler->vtx_branch_inst_counter));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (12) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler->vtx_texld_inst_counter));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (13) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler->pxl_branch_inst_counter));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (14) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler->pxl_texld_inst_counter));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (15) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24)))
++));
++
++ /* PA */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (3) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00460, &profiler->pa_input_vtx_counter));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (4) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00460, &profiler->pa_input_prim_counter));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (5) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00460, &profiler->pa_output_prim_counter));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (6) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00460, &profiler->pa_depth_clipped_counter));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (7) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00460, &profiler->pa_trivial_rejected_counter));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (8) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00460, &profiler->pa_culled_counter));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (15) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0)))
++));
++
++ /* SE */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00464, &profiler->se_culled_triangle_count));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00464, &profiler->se_culled_lines_count));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) | (((gctUINT32) ((gctUINT32) (15) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) ));
++gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8)))
++));
++
++ /* RA */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00448, &profiler->ra_valid_pixel_count));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00448, &profiler->ra_total_quad_count));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (2) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00448, &profiler->ra_valid_quad_count_after_early_z));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (3) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00448, &profiler->ra_total_primitive_count));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (9) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00448, &profiler->ra_pipe_cache_miss_counter));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (10) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00448, &profiler->ra_prefetch_cache_miss_counter));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (15) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) ));
++gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16)))
++));
++
++ /* TX */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler->tx_total_bilinear_requests));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler->tx_total_trilinear_requests));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (2) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler->tx_total_discarded_texture_requests));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (3) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler->tx_total_texture_requests));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (5) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler->tx_mem_read_count));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (6) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler->tx_mem_read_in_8B_count));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (7) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler->tx_cache_miss_count));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (8) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler->tx_cache_hit_texel_count));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (9) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler->tx_cache_miss_texel_count));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (15) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24)))
++));
++
++ /* MC */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00468, &profiler->mc_total_read_req_8B_from_pipeline));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (2) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00468, &profiler->mc_total_read_req_8B_from_IP));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (3) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00468, &profiler->mc_total_write_req_8B_from_pipeline));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (15) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0)))
++));
++
++ /* HI */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0046C, &profiler->hi_axi_cycles_read_request_stalled));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0046C, &profiler->hi_axi_cycles_write_request_stalled));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) | (((gctUINT32) ((gctUINT32) (2) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0046C, &profiler->hi_axi_cycles_write_data_stalled));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) | (((gctUINT32) ((gctUINT32) (15) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) ));
++gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8)))
++));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++#endif
++
++
++#if VIVANTE_PROFILER_CONTEXT
++#define gcmkUPDATE_PROFILE_DATA(data) \
++ profilerHistroy->data += profiler->data
++
++gceSTATUS
++gckHARDWARE_QueryContextProfile(
++ IN gckHARDWARE Hardware,
++ IN gctBOOL Reset,
++ IN gckCONTEXT Context,
++ OUT gcsPROFILER_COUNTERS * Counters
++ )
++{
++ gceSTATUS status;
++ gckCOMMAND command = Hardware->kernel->command;
++ gcsPROFILER_COUNTERS * profiler = Counters;
++
++ gcmkHEADER_ARG("Hardware=0x%x Counters=0x%x", Hardware, Counters);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ /* Acquire the context sequnence mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(
++ command->os, command->mutexContextSeq, gcvINFINITE
++ ));
++
++ /* Read the counters. */
++ gcmkVERIFY_OK(gckOS_MemCopy(
++ profiler, &Context->histroyProfiler, gcmSIZEOF(gcsPROFILER_COUNTERS)
++ ));
++
++ /* Reset counters. */
++ gcmkVERIFY_OK(gckOS_ZeroMemory(
++ &Context->histroyProfiler, gcmSIZEOF(gcsPROFILER_COUNTERS)
++ ));
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(
++ command->os, command->mutexContextSeq
++ ));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++static gctUINT32
++CalcDelta(
++ IN gctUINT32 new,
++ IN gctUINT32 old
++ )
++{
++ if (new >= old)
++ {
++ return new - old;
++ }
++ else
++ {
++ return (gctUINT32)((gctUINT64)new + 0x100000000ll - old);
++ }
++}
++
++gceSTATUS
++gckHARDWARE_UpdateContextProfile(
++ IN gckHARDWARE Hardware,
++ IN gckCONTEXT Context
++ )
++{
++ gceSTATUS status;
++ gcsPROFILER_COUNTERS * profiler = &Context->latestProfiler;
++ gcsPROFILER_COUNTERS * profilerHistroy = &Context->histroyProfiler;
++ gctUINT i, clock;
++ gctUINT32 colorKilled = 0, colorDrawn = 0, depthKilled = 0, depthDrawn = 0;
++ gctUINT32 totalRead, totalWrite;
++ gceCHIPMODEL chipModel;
++ gctUINT32 chipRevision;
++ gctUINT32 temp;
++ gctBOOL needResetShader = gcvFALSE;
++
++ gcmkHEADER_ARG("Hardware=0x%x Context=0x%x", Hardware, Context);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_OBJECT(Context, gcvOBJ_CONTEXT);
++
++ chipModel = Hardware->identity.chipModel;
++ chipRevision = Hardware->identity.chipRevision;
++ if (chipModel == gcv2000 || (chipModel == gcv2100 && chipRevision == 0x5118))
++ {
++ needResetShader = gcvTRUE;
++ }
++
++ /* Read the counters. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00438,
++ &profiler->gpuCyclesCounter));
++ gcmkUPDATE_PROFILE_DATA(gpuCyclesCounter);
++
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00078,
++ &profiler->gpuTotalCyclesCounter));
++ gcmkUPDATE_PROFILE_DATA(gpuTotalCyclesCounter);
++
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x0007C,
++ &profiler->gpuIdleCyclesCounter));
++ gcmkUPDATE_PROFILE_DATA(gpuIdleCyclesCounter);
++
++ /* Read clock control register. */
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00000,
++ &clock));
++
++ profiler->gpuTotalRead64BytesPerFrame = 0;
++ profiler->gpuTotalWrite64BytesPerFrame = 0;
++ profiler->pe_pixel_count_killed_by_color_pipe = 0;
++ profiler->pe_pixel_count_killed_by_depth_pipe = 0;
++ profiler->pe_pixel_count_drawn_by_color_pipe = 0;
++ profiler->pe_pixel_count_drawn_by_depth_pipe = 0;
++
++ /* Walk through all avaiable pixel pipes. */
++ for (i = 0; i < Hardware->identity.pixelPipes; ++i)
++ {
++ /* Select proper pipe. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00000,
++ ((((gctUINT32) (clock)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:20) - (0 ? 23:20) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:20) - (0 ? 23:20) + 1))))))) << (0 ? 23:20))) | (((gctUINT32) ((gctUINT32) (i) & ((gctUINT32) ((((1 ? 23:20) - (0 ? 23:20) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:20) - (0 ? 23:20) + 1))))))) << (0 ? 23:20)))));
++
++ /* BW */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00040,
++ &totalRead));
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00044,
++ &totalWrite));
++
++ profiler->gpuTotalRead64BytesPerFrame += totalRead;
++ profiler->gpuTotalWrite64BytesPerFrame += totalWrite;
++ gcmkUPDATE_PROFILE_DATA(gpuTotalRead64BytesPerFrame);
++ gcmkUPDATE_PROFILE_DATA(gpuTotalWrite64BytesPerFrame);
++
++ /* PE */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16)))));gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00454, &colorKilled));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16)))));gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00454, &depthKilled));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (2) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16)))));gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00454, &colorDrawn));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (3) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16)))));gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00454, &depthDrawn));
++
++ profiler->pe_pixel_count_killed_by_color_pipe += colorKilled;
++ profiler->pe_pixel_count_killed_by_depth_pipe += depthKilled;
++ profiler->pe_pixel_count_drawn_by_color_pipe += colorDrawn;
++ profiler->pe_pixel_count_drawn_by_depth_pipe += depthDrawn;
++ gcmkUPDATE_PROFILE_DATA(pe_pixel_count_killed_by_color_pipe);
++ gcmkUPDATE_PROFILE_DATA(pe_pixel_count_killed_by_depth_pipe);
++ gcmkUPDATE_PROFILE_DATA(pe_pixel_count_drawn_by_color_pipe);
++ gcmkUPDATE_PROFILE_DATA(pe_pixel_count_drawn_by_depth_pipe);
++ }
++
++ /* Reset clock control register. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00000,
++ clock));
++
++
++ /* Reset counters. */
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x0003C, 1));
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x0003C, 0));
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00438, 0));
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00078, 0));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (15) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) ));
++gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16)))
++));
++
++ /* SH */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (7) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler->ps_inst_counter));
++ if (needResetShader)
++ {
++ temp = profiler->ps_inst_counter;
++ profiler->ps_inst_counter = CalcDelta(temp, Context->prevPSInstCount);
++ Context->prevPSInstCount = temp;
++ }
++ gcmkUPDATE_PROFILE_DATA(ps_inst_counter);
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (8) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler->rendered_pixel_counter));
++ if (needResetShader)
++ {
++ temp = profiler->rendered_pixel_counter;
++ profiler->rendered_pixel_counter = CalcDelta(temp, Context->prevPSPixelCount);
++ Context->prevPSPixelCount = temp;
++ }
++ gcmkUPDATE_PROFILE_DATA(rendered_pixel_counter);
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (9) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler->vs_inst_counter));
++ if (needResetShader)
++ {
++ temp = profiler->vs_inst_counter;
++ profiler->vs_inst_counter = CalcDelta(temp, Context->prevVSInstCount);
++ Context->prevVSInstCount = temp;
++ }
++ gcmkUPDATE_PROFILE_DATA(vs_inst_counter);
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (10) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler->rendered_vertice_counter));
++ if (needResetShader)
++ {
++ temp = profiler->rendered_vertice_counter;
++ profiler->rendered_vertice_counter = CalcDelta(temp, Context->prevVSVertexCount);
++ Context->prevVSVertexCount = temp;
++ }
++ gcmkUPDATE_PROFILE_DATA(rendered_vertice_counter);
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (11) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler->vtx_branch_inst_counter));
++ if (needResetShader)
++ {
++ temp = profiler->vtx_branch_inst_counter;
++ profiler->vtx_branch_inst_counter = CalcDelta(temp, Context->prevVSBranchInstCount);
++ Context->prevVSBranchInstCount = temp;
++ }
++ gcmkUPDATE_PROFILE_DATA(vtx_branch_inst_counter);
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (12) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler->vtx_texld_inst_counter));
++ if (needResetShader)
++ {
++ temp = profiler->vtx_texld_inst_counter;
++ profiler->vtx_texld_inst_counter = CalcDelta(temp, Context->prevVSTexInstCount);
++ Context->prevVSTexInstCount = temp;
++ }
++ gcmkUPDATE_PROFILE_DATA(vtx_texld_inst_counter);
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (13) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler->pxl_branch_inst_counter));
++ if (needResetShader)
++ {
++ temp = profiler->pxl_branch_inst_counter;
++ profiler->pxl_branch_inst_counter = CalcDelta(temp, Context->prevPSBranchInstCount);
++ Context->prevPSBranchInstCount = temp;
++ }
++ gcmkUPDATE_PROFILE_DATA(pxl_branch_inst_counter);
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (14) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler->pxl_texld_inst_counter));
++ if (needResetShader)
++ {
++ temp = profiler->pxl_texld_inst_counter;
++ profiler->pxl_texld_inst_counter = CalcDelta(temp, Context->prevPSTexInstCount);
++ Context->prevPSTexInstCount = temp;
++ }
++ gcmkUPDATE_PROFILE_DATA(pxl_texld_inst_counter);
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (15) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24)))
++));
++
++ /* PA */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (3) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00460, &profiler->pa_input_vtx_counter));
++ gcmkUPDATE_PROFILE_DATA(pa_input_vtx_counter);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (4) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00460, &profiler->pa_input_prim_counter));
++ gcmkUPDATE_PROFILE_DATA(pa_input_prim_counter);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (5) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00460, &profiler->pa_output_prim_counter));
++ gcmkUPDATE_PROFILE_DATA(pa_output_prim_counter);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (6) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00460, &profiler->pa_depth_clipped_counter));
++ gcmkUPDATE_PROFILE_DATA(pa_depth_clipped_counter);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (7) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00460, &profiler->pa_trivial_rejected_counter));
++ gcmkUPDATE_PROFILE_DATA(pa_trivial_rejected_counter);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (8) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00460, &profiler->pa_culled_counter));
++ gcmkUPDATE_PROFILE_DATA(pa_culled_counter);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (15) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0)))
++));
++
++ /* SE */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00464, &profiler->se_culled_triangle_count));
++ gcmkUPDATE_PROFILE_DATA(se_culled_triangle_count);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00464, &profiler->se_culled_lines_count));
++ gcmkUPDATE_PROFILE_DATA(se_culled_lines_count);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) | (((gctUINT32) ((gctUINT32) (15) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) ));
++gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8)))
++));
++
++ /* RA */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00448, &profiler->ra_valid_pixel_count));
++ gcmkUPDATE_PROFILE_DATA(ra_valid_pixel_count);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00448, &profiler->ra_total_quad_count));
++ gcmkUPDATE_PROFILE_DATA(ra_total_quad_count);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (2) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00448, &profiler->ra_valid_quad_count_after_early_z));
++ gcmkUPDATE_PROFILE_DATA(ra_valid_quad_count_after_early_z);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (3) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00448, &profiler->ra_total_primitive_count));
++ gcmkUPDATE_PROFILE_DATA(ra_total_primitive_count);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (9) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00448, &profiler->ra_pipe_cache_miss_counter));
++ gcmkUPDATE_PROFILE_DATA(ra_pipe_cache_miss_counter);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (10) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00448, &profiler->ra_prefetch_cache_miss_counter));
++ gcmkUPDATE_PROFILE_DATA(ra_prefetch_cache_miss_counter);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (15) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) ));
++gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16)))
++));
++
++ /* TX */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler->tx_total_bilinear_requests));
++ gcmkUPDATE_PROFILE_DATA(tx_total_bilinear_requests);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler->tx_total_trilinear_requests));
++ gcmkUPDATE_PROFILE_DATA(tx_total_trilinear_requests);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (2) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler->tx_total_discarded_texture_requests));
++ gcmkUPDATE_PROFILE_DATA(tx_total_discarded_texture_requests);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (3) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler->tx_total_texture_requests));
++ gcmkUPDATE_PROFILE_DATA(tx_total_texture_requests);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (5) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler->tx_mem_read_count));
++ gcmkUPDATE_PROFILE_DATA(tx_mem_read_count);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (6) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler->tx_mem_read_in_8B_count));
++ gcmkUPDATE_PROFILE_DATA(tx_mem_read_in_8B_count);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (7) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler->tx_cache_miss_count));
++ gcmkUPDATE_PROFILE_DATA(tx_cache_miss_count);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (8) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler->tx_cache_hit_texel_count));
++ gcmkUPDATE_PROFILE_DATA(tx_cache_hit_texel_count);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (9) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler->tx_cache_miss_texel_count));
++ gcmkUPDATE_PROFILE_DATA(tx_cache_miss_texel_count);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (15) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24)))
++));
++
++ /* MC */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00468, &profiler->mc_total_read_req_8B_from_pipeline));
++ gcmkUPDATE_PROFILE_DATA(mc_total_read_req_8B_from_pipeline);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (2) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00468, &profiler->mc_total_read_req_8B_from_IP));
++ gcmkUPDATE_PROFILE_DATA(mc_total_read_req_8B_from_IP);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (3) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00468, &profiler->mc_total_write_req_8B_from_pipeline));
++ gcmkUPDATE_PROFILE_DATA(mc_total_write_req_8B_from_pipeline);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (15) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0)))
++));
++
++ /* HI */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0046C, &profiler->hi_axi_cycles_read_request_stalled));
++ gcmkUPDATE_PROFILE_DATA(hi_axi_cycles_read_request_stalled);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0046C, &profiler->hi_axi_cycles_write_request_stalled));
++ gcmkUPDATE_PROFILE_DATA(hi_axi_cycles_write_request_stalled);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) | (((gctUINT32) ((gctUINT32) (2) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0046C, &profiler->hi_axi_cycles_write_data_stalled));
++ gcmkUPDATE_PROFILE_DATA(hi_axi_cycles_write_data_stalled);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) | (((gctUINT32) ((gctUINT32) (15) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) ));
++gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8)))
++));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++#endif
++
++
++#if VIVANTE_PROFILER_NEW
++gceSTATUS
++gckHARDWARE_InitProfiler(
++ IN gckHARDWARE Hardware
++ )
++{
++ gceSTATUS status;
++ gctUINT32 control;
++
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00000,
++ &control));
++ /* Enable debug register. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00000,
++ ((((gctUINT32) (control)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 11:11) - (0 ? 11:11) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 11:11) - (0 ? 11:11) + 1))))))) << (0 ? 11:11))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 11:11) - (0 ? 11:11) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 11:11) - (0 ? 11:11) + 1))))))) << (0 ? 11:11)))));
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++#endif
++
++static gceSTATUS
++_ResetGPU(
++ IN gckHARDWARE Hardware,
++ IN gckOS Os,
++ IN gceCORE Core
++ )
++{
++ gctUINT32 control, idle;
++ gceSTATUS status;
++
++ for (;;)
++ {
++ /* Disable clock gating. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Os,
++ Core,
++ Hardware->powerBaseAddress +
++ 0x00104,
++ 0x00000000));
++
++ control = ((((gctUINT32) (0x01590880)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 17:17) - (0 ? 17:17) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 17:17) - (0 ? 17:17) + 1))))))) << (0 ? 17:17))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 17:17) - (0 ? 17:17) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 17:17) - (0 ? 17:17) + 1))))))) << (0 ? 17:17)));
++
++ /* Disable pulse-eater. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Os,
++ Core,
++ 0x0010C,
++ control));
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Os,
++ Core,
++ 0x0010C,
++ ((((gctUINT32) (control)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)))));
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Os,
++ Core,
++ 0x0010C,
++ control));
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Os,
++ Core,
++ 0x00000,
++ ((((gctUINT32) (0x00000900)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9)))));
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Os,
++ Core,
++ 0x00000,
++ 0x00000900));
++
++ /* Wait for clock being stable. */
++ gcmkONERROR(gckOS_Delay(Os, 1));
++
++ /* Isolate the GPU. */
++ control = ((((gctUINT32) (0x00000900)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 19:19) - (0 ? 19:19) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 19:19) - (0 ? 19:19) + 1))))))) << (0 ? 19:19))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 19:19) - (0 ? 19:19) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 19:19) - (0 ? 19:19) + 1))))))) << (0 ? 19:19)));
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Os,
++ Core,
++ 0x00000,
++ control));
++
++ /* Set soft reset. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Os,
++ Core,
++ 0x00000,
++ ((((gctUINT32) (control)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:12) - (0 ? 12:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:12) - (0 ? 12:12) + 1))))))) << (0 ? 12:12))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 12:12) - (0 ? 12:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:12) - (0 ? 12:12) + 1))))))) << (0 ? 12:12)))));
++
++ /* Wait for reset. */
++ gcmkONERROR(gckOS_Delay(Os, 1));
++
++ /* Reset soft reset bit. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Os,
++ Core,
++ 0x00000,
++ ((((gctUINT32) (control)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:12) - (0 ? 12:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:12) - (0 ? 12:12) + 1))))))) << (0 ? 12:12))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 12:12) - (0 ? 12:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:12) - (0 ? 12:12) + 1))))))) << (0 ? 12:12)))));
++
++ /* Reset GPU isolation. */
++ control = ((((gctUINT32) (control)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 19:19) - (0 ? 19:19) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 19:19) - (0 ? 19:19) + 1))))))) << (0 ? 19:19))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 19:19) - (0 ? 19:19) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 19:19) - (0 ? 19:19) + 1))))))) << (0 ? 19:19)));
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Os,
++ Core,
++ 0x00000,
++ control));
++
++ /* Read idle register. */
++ gcmkONERROR(gckOS_ReadRegisterEx(Os,
++ Core,
++ 0x00004,
++ &idle));
++
++ if ((((((gctUINT32) (idle)) >> (0 ? 0:0)) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1)))))) ) == 0)
++ {
++ continue;
++ }
++
++#if gcdMULTI_GPU > 1
++ if (Core == gcvCORE_MAJOR)
++ {
++ /* Read idle register. */
++ gcmkONERROR(gckOS_ReadRegisterByCoreId(Os,
++ Core,
++ gcvCORE_3D_1_ID,
++ 0x00004,
++ &idle));
++
++ if ((((((gctUINT32) (idle)) >> (0 ? 0:0)) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1)))))) ) == 0)
++ {
++ continue;
++ }
++ }
++#endif
++ /* Read reset register. */
++ gcmkONERROR(gckOS_ReadRegisterEx(Os,
++ Core,
++ 0x00000,
++ &control));
++
++ if (((((((gctUINT32) (control)) >> (0 ? 16:16)) & ((gctUINT32) ((((1 ? 16:16) - (0 ? 16:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 16:16) - (0 ? 16:16) + 1)))))) ) == 0)
++ || ((((((gctUINT32) (control)) >> (0 ? 17:17)) & ((gctUINT32) ((((1 ? 17:17) - (0 ? 17:17) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 17:17) - (0 ? 17:17) + 1)))))) ) == 0)
++ )
++ {
++ continue;
++ }
++
++#if gcdMULTI_GPU > 1
++ if (Core == gcvCORE_MAJOR)
++ {
++ /* Read reset register. */
++ gcmkONERROR(gckOS_ReadRegisterByCoreId(Os,
++ Core,
++ gcvCORE_3D_1_ID,
++ 0x00000,
++ &control));
++
++ if (((((((gctUINT32) (control)) >> (0 ? 16:16)) & ((gctUINT32) ((((1 ? 16:16) - (0 ? 16:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 16:16) - (0 ? 16:16) + 1)))))) ) == 0)
++ || ((((((gctUINT32) (control)) >> (0 ? 17:17)) & ((gctUINT32) ((((1 ? 17:17) - (0 ? 17:17) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 17:17) - (0 ? 17:17) + 1)))))) ) == 0)
++ )
++ {
++ continue;
++ }
++ }
++#endif
++ /* GPU is idle. */
++ break;
++ }
++
++ /* Success. */
++ return gcvSTATUS_OK;
++
++OnError:
++
++ /* Return the error. */
++ return status;
++}
++
++gceSTATUS
++gckHARDWARE_Reset(
++ IN gckHARDWARE Hardware
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_OBJECT(Hardware->kernel, gcvOBJ_KERNEL);
++
++ /* Hardware reset. */
++ status = gckOS_ResetGPU(Hardware->os, Hardware->core);
++
++ if (gcmIS_ERROR(status))
++ {
++ if (Hardware->identity.chipRevision < 0x4600)
++ {
++ /* Not supported - we need the isolation bit. */
++ gcmkONERROR(gcvSTATUS_NOT_SUPPORTED);
++ }
++
++ /* Soft reset. */
++ gcmkONERROR(_ResetGPU(Hardware, Hardware->os, Hardware->core));
++ }
++
++ /* Initialize hardware. */
++ gcmkONERROR(gckHARDWARE_InitializeHardware(Hardware));
++
++ /* Jump to address into which GPU should run if it doesn't stuck. */
++ gcmkONERROR(gckHARDWARE_Execute(Hardware, Hardware->kernel->restoreAddress, 16));
++
++ gcmkPRINT("[galcore]: recovery done");
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkPRINT("[galcore]: Hardware not reset successfully, give up");
++
++ /* Return the error. */
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckHARDWARE_GetBaseAddress(
++ IN gckHARDWARE Hardware,
++ OUT gctUINT32_PTR BaseAddress
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(BaseAddress != gcvNULL);
++
++ /* Test if we have a new Memory Controller. */
++ if (((((gctUINT32) (Hardware->identity.chipMinorFeatures)) >> (0 ? 22:22) & ((gctUINT32) ((((1 ? 22:22) - (0 ? 22:22) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 22:22) - (0 ? 22:22) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 22:22) - (0 ? 22:22) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 22:22) - (0 ? 22:22) + 1))))))))
++ {
++ /* No base address required. */
++ *BaseAddress = 0;
++ }
++ else
++ {
++ /* Get the base address from the OS. */
++ gcmkONERROR(gckOS_GetBaseAddress(Hardware->os, BaseAddress));
++ }
++
++ /* Success. */
++ gcmkFOOTER_ARG("*BaseAddress=0x%08x", *BaseAddress);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckHARDWARE_NeedBaseAddress(
++ IN gckHARDWARE Hardware,
++ IN gctUINT32 State,
++ OUT gctBOOL_PTR NeedBase
++ )
++{
++ gctBOOL need = gcvFALSE;
++
++ gcmkHEADER_ARG("Hardware=0x%x State=0x%08x", Hardware, State);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(NeedBase != gcvNULL);
++
++ /* Make sure this is a load state. */
++ if (((((gctUINT32) (State)) >> (0 ? 31:27) & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1)))))) == (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))))
++ {
++#if gcdENABLE_3D
++ /* Get the state address. */
++ switch ((((((gctUINT32) (State)) >> (0 ? 15:0)) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1)))))) ))
++ {
++ case 0x0596:
++ case 0x0597:
++ case 0x0599:
++ case 0x059A:
++ case 0x05A9:
++ /* These states need a TRUE physical address. */
++ need = gcvTRUE;
++ break;
++ }
++#else
++ /* 2D addresses don't need a base address. */
++#endif
++ }
++
++ /* Return the flag. */
++ *NeedBase = need;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*NeedBase=%d", *NeedBase);
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckHARDWARE_SetIsrManager(
++ IN gckHARDWARE Hardware,
++ IN gctISRMANAGERFUNC StartIsr,
++ IN gctISRMANAGERFUNC StopIsr,
++ IN gctPOINTER Context
++ )
++{
++ gceSTATUS status = gcvSTATUS_OK;
++
++ gcmkHEADER_ARG("Hardware=0x%x, StartIsr=0x%x, StopIsr=0x%x, Context=0x%x",
++ Hardware, StartIsr, StopIsr, Context);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ if (StartIsr == gcvNULL ||
++ StopIsr == gcvNULL ||
++ Context == gcvNULL)
++ {
++ status = gcvSTATUS_INVALID_ARGUMENT;
++
++ gcmkFOOTER();
++ return status;
++ }
++
++ Hardware->startIsr = StartIsr;
++ Hardware->stopIsr = StopIsr;
++ Hardware->isrContext = Context;
++
++ /* Success. */
++ gcmkFOOTER();
++
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_Compose
++**
++** Start a composition.
++**
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to the gckHARDWARE object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckHARDWARE_Compose(
++ IN gckHARDWARE Hardware,
++ IN gctUINT32 ProcessID,
++ IN gctPHYS_ADDR Physical,
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Offset,
++ IN gctSIZE_T Size,
++ IN gctUINT8 EventID
++ )
++{
++#if gcdENABLE_3D
++ gceSTATUS status;
++ gctUINT32_PTR triggerState;
++
++ gcmkHEADER_ARG("Hardware=0x%x Physical=0x%x Logical=0x%x"
++ " Offset=%d Size=%d EventID=%d",
++ Hardware, Physical, Logical, Offset, Size, EventID);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(((Size + 8) & 63) == 0);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++
++ /* Program the trigger state. */
++ triggerState = (gctUINT32_PTR) ((gctUINT8_PTR) Logical + Offset + Size);
++ triggerState[0] = 0x0C03;
++ triggerState[1]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:0) - (0 ? 1:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:0) - (0 ? 1:0) + 1))))))) << (0 ? 1:0))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 1:0) - (0 ? 1:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:0) - (0 ? 1:0) + 1))))))) << (0 ? 1:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 5:4) - (0 ? 5:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:4) - (0 ? 5:4) + 1))))))) << (0 ? 5:4))) | (((gctUINT32) (0x3 & ((gctUINT32) ((((1 ? 5:4) - (0 ? 5:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:4) - (0 ? 5:4) + 1))))))) << (0 ? 5:4)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 8:8) - (0 ? 8:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:8) - (0 ? 8:8) + 1))))))) << (0 ? 8:8))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 8:8) - (0 ? 8:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:8) - (0 ? 8:8) + 1))))))) << (0 ? 8:8)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 24:24) - (0 ? 24:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 24:24) - (0 ? 24:24) + 1))))))) << (0 ? 24:24))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 24:24) - (0 ? 24:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 24:24) - (0 ? 24:24) + 1))))))) << (0 ? 24:24)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:12) - (0 ? 12:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:12) - (0 ? 12:12) + 1))))))) << (0 ? 12:12))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 12:12) - (0 ? 12:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:12) - (0 ? 12:12) + 1))))))) << (0 ? 12:12)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 20:16) - (0 ? 20:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 20:16) - (0 ? 20:16) + 1))))))) << (0 ? 20:16))) | (((gctUINT32) ((gctUINT32) (EventID) & ((gctUINT32) ((((1 ? 20:16) - (0 ? 20:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 20:16) - (0 ? 20:16) + 1))))))) << (0 ? 20:16)))
++ ;
++
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ /* Flush the cache for the wait/link. */
++ gcmkONERROR(gckOS_CacheClean(
++ Hardware->os, ProcessID, gcvNULL,
++ (gctUINT32)Physical, Logical, Offset + Size
++ ));
++#endif
++
++ /* Start composition. */
++ gcmkONERROR(gckOS_WriteRegisterEx(
++ Hardware->os, Hardware->core, 0x00554,
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:0) - (0 ? 1:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:0) - (0 ? 1:0) + 1))))))) << (0 ? 1:0))) | (((gctUINT32) (0x3 & ((gctUINT32) ((((1 ? 1:0) - (0 ? 1:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:0) - (0 ? 1:0) + 1))))))) << (0 ? 1:0)))
++ ));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++#else
++ /* Return the status. */
++ return gcvSTATUS_NOT_SUPPORTED;
++#endif
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_IsFeatureAvailable
++**
++** Verifies whether the specified feature is available in hardware.
++**
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to an gckHARDWARE object.
++**
++** gceFEATURE Feature
++** Feature to be verified.
++*/
++gceSTATUS
++gckHARDWARE_IsFeatureAvailable(
++ IN gckHARDWARE Hardware,
++ IN gceFEATURE Feature
++ )
++{
++ gctBOOL available;
++
++ gcmkHEADER_ARG("Hardware=0x%x Feature=%d", Hardware, Feature);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ /* Only features needed by common kernel logic added here. */
++ switch (Feature)
++ {
++ case gcvFEATURE_END_EVENT:
++ /*available = gcmVERIFYFIELDVALUE(Hardware->identity.chipMinorFeatures2,
++ GC_MINOR_FEATURES2, END_EVENT, AVAILABLE
++ );*/
++ available = gcvFALSE;
++ break;
++
++ case gcvFEATURE_MC20:
++ available = ((((gctUINT32) (Hardware->identity.chipMinorFeatures)) >> (0 ? 22:22) & ((gctUINT32) ((((1 ? 22:22) - (0 ? 22:22) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 22:22) - (0 ? 22:22) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 22:22) - (0 ? 22:22) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 22:22) - (0 ? 22:22) + 1)))))));
++ break;
++
++ case gcvFEATURE_EARLY_Z:
++ available = ((((gctUINT32) (Hardware->identity.chipFeatures)) >> (0 ? 16:16) & ((gctUINT32) ((((1 ? 16:16) - (0 ? 16:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 16:16) - (0 ? 16:16) + 1)))))) == (0x0 & ((gctUINT32) ((((1 ? 16:16) - (0 ? 16:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 16:16) - (0 ? 16:16) + 1)))))));
++ break;
++
++ case gcvFEATURE_HZ:
++ available = ((((gctUINT32) (Hardware->identity.chipMinorFeatures)) >> (0 ? 27:27) & ((gctUINT32) ((((1 ? 27:27) - (0 ? 27:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 27:27) - (0 ? 27:27) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 27:27) - (0 ? 27:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 27:27) - (0 ? 27:27) + 1)))))));
++ break;
++
++ case gcvFEATURE_NEW_HZ:
++ available = ((((gctUINT32) (Hardware->identity.chipMinorFeatures3)) >> (0 ? 26:26) & ((gctUINT32) ((((1 ? 26:26) - (0 ? 26:26) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 26:26) - (0 ? 26:26) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 26:26) - (0 ? 26:26) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 26:26) - (0 ? 26:26) + 1)))))));
++ break;
++
++ case gcvFEATURE_FAST_MSAA:
++ available = ((((gctUINT32) (Hardware->identity.chipMinorFeatures3)) >> (0 ? 8:8) & ((gctUINT32) ((((1 ? 8:8) - (0 ? 8:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:8) - (0 ? 8:8) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 8:8) - (0 ? 8:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:8) - (0 ? 8:8) + 1)))))));
++ break;
++
++ case gcvFEATURE_SMALL_MSAA:
++ available = ((((gctUINT32) (Hardware->identity.chipMinorFeatures4)) >> (0 ? 18:18) & ((gctUINT32) ((((1 ? 18:18) - (0 ? 18:18) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 18:18) - (0 ? 18:18) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 18:18) - (0 ? 18:18) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 18:18) - (0 ? 18:18) + 1)))))));
++ break;
++
++ case gcvFEATURE_DYNAMIC_FREQUENCY_SCALING:
++ /* This feature doesn't apply for 2D cores. */
++ available = ((((gctUINT32) (Hardware->identity.chipMinorFeatures2)) >> (0 ? 14:14) & ((gctUINT32) ((((1 ? 14:14) - (0 ? 14:14) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 14:14) - (0 ? 14:14) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 14:14) - (0 ? 14:14) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 14:14) - (0 ? 14:14) + 1)))))))
++ && ((((gctUINT32) (Hardware->identity.chipFeatures)) >> (0 ? 2:2) & ((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1)))))));
++
++ if (Hardware->identity.chipModel == gcv1000 &&
++ (Hardware->identity.chipRevision == 0x5039 ||
++ Hardware->identity.chipRevision == 0x5040))
++ {
++ available = gcvFALSE;
++ }
++ break;
++
++ case gcvFEATURE_ACE:
++ available = ((((gctUINT32) (Hardware->identity.chipMinorFeatures3)) >> (0 ? 18:18) & ((gctUINT32) ((((1 ? 18:18) - (0 ? 18:18) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 18:18) - (0 ? 18:18) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 18:18) - (0 ? 18:18) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 18:18) - (0 ? 18:18) + 1)))))));
++ break;
++
++ case gcvFEATURE_HALTI2:
++ available = ((((gctUINT32) (Hardware->identity.chipMinorFeatures4)) >> (0 ? 16:16) & ((gctUINT32) ((((1 ? 16:16) - (0 ? 16:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 16:16) - (0 ? 16:16) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 16:16) - (0 ? 16:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 16:16) - (0 ? 16:16) + 1)))))));
++ break;
++
++ case gcvFEATURE_PIPE_2D:
++ available = ((((gctUINT32) (Hardware->identity.chipFeatures)) >> (0 ? 9:9) & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1)))))));
++ break;
++
++ case gcvFEATURE_PIPE_3D:
++#if gcdENABLE_3D
++ available = ((((gctUINT32) (Hardware->identity.chipFeatures)) >> (0 ? 2:2) & ((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1)))))));
++#else
++ available = gcvFALSE;
++#endif
++ break;
++
++ case gcvFEATURE_FC_FLUSH_STALL:
++ available = ((((gctUINT32) (Hardware->identity.chipMinorFeatures1)) >> (0 ? 31:31) & ((gctUINT32) ((((1 ? 31:31) - (0 ? 31:31) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:31) - (0 ? 31:31) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 31:31) - (0 ? 31:31) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:31) - (0 ? 31:31) + 1)))))));
++ break;
++
++ default:
++ gcmkFATAL("Invalid feature has been requested.");
++ available = gcvFALSE;
++ }
++
++ /* Return result. */
++ gcmkFOOTER_ARG("%d", available ? gcvSTATUS_TRUE : gcvSTATUS_FALSE);
++ return available ? gcvSTATUS_TRUE : gcvSTATUS_FALSE;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_DumpMMUException
++**
++** Dump the MMU debug info on an MMU exception.
++**
++** INPUT:
++**
++** gckHARDWARE Harwdare
++** Pointer to an gckHARDWARE object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckHARDWARE_DumpMMUException(
++ IN gckHARDWARE Hardware
++ )
++{
++ gctUINT32 mmu = 0;
++ gctUINT32 mmuStatus = 0;
++ gctUINT32 address = 0;
++ gctUINT32 i = 0;
++ gctUINT32 mtlb = 0;
++ gctUINT32 stlb = 0;
++ gctUINT32 offset = 0;
++#if gcdPROCESS_ADDRESS_SPACE
++ gcsDATABASE_PTR database;
++#endif
++
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ gcmkPRINT("GPU[%d](ChipModel=0x%x ChipRevision=0x%x):\n",
++ Hardware->core,
++ Hardware->identity.chipModel,
++ Hardware->identity.chipRevision);
++
++ gcmkPRINT("**************************\n");
++ gcmkPRINT("*** MMU ERROR DUMP ***\n");
++ gcmkPRINT("**************************\n");
++
++ gcmkVERIFY_OK(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00188,
++ &mmuStatus));
++
++ gcmkPRINT(" MMU status = 0x%08X\n", mmuStatus);
++
++ for (i = 0; i < 4; i += 1)
++ {
++ mmu = mmuStatus & 0xF;
++ mmuStatus >>= 4;
++
++ if (mmu == 0)
++ {
++ continue;
++ }
++
++ switch (mmu)
++ {
++ case 1:
++ gcmkPRINT(" MMU%d: slave not present\n", i);
++ break;
++
++ case 2:
++ gcmkPRINT(" MMU%d: page not present\n", i);
++ break;
++
++ case 3:
++ gcmkPRINT(" MMU%d: write violation\n", i);
++ break;
++
++ default:
++ gcmkPRINT(" MMU%d: unknown state\n", i);
++ }
++
++ gcmkVERIFY_OK(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00190 + i * 4,
++ &address));
++
++ mtlb = (address & gcdMMU_MTLB_MASK) >> gcdMMU_MTLB_SHIFT;
++ stlb = (address & gcdMMU_STLB_4K_MASK) >> gcdMMU_STLB_4K_SHIFT;
++ offset = address & gcdMMU_OFFSET_4K_MASK;
++
++ gcmkPRINT(" MMU%d: exception address = 0x%08X\n", i, address);
++
++ gcmkPRINT(" MTLB entry = %d\n", mtlb);
++
++ gcmkPRINT(" STLB entry = %d\n", stlb);
++
++ gcmkPRINT(" Offset = 0x%08X (%d)\n", offset, offset);
++
++ gckMMU_DumpPageTableEntry(Hardware->kernel->mmu, address);
++
++#if gcdPROCESS_ADDRESS_SPACE
++ for (i = 0; i < gcmCOUNTOF(Hardware->kernel->db->db); ++i)
++ {
++ for (database = Hardware->kernel->db->db[i];
++ database != gcvNULL;
++ database = database->next)
++ {
++ gcmkPRINT(" database [%d] :", database->processID);
++ gckMMU_DumpPageTableEntry(database->mmu, address);
++ }
++ }
++#endif
++ }
++
++ gckHARDWARE_DumpGPUState(Hardware);
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_DumpGPUState
++**
++** Dump the GPU debug registers.
++**
++** INPUT:
++**
++** gckHARDWARE Harwdare
++** Pointer to an gckHARDWARE object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckHARDWARE_DumpGPUState(
++ IN gckHARDWARE Hardware
++ )
++{
++ static gctCONST_STRING _cmdState[] =
++ {
++ "PAR_IDLE_ST", "PAR_DEC_ST", "PAR_ADR0_ST", "PAR_LOAD0_ST",
++ "PAR_ADR1_ST", "PAR_LOAD1_ST", "PAR_3DADR_ST", "PAR_3DCMD_ST",
++ "PAR_3DCNTL_ST", "PAR_3DIDXCNTL_ST", "PAR_INITREQDMA_ST",
++ "PAR_DRAWIDX_ST", "PAR_DRAW_ST", "PAR_2DRECT0_ST", "PAR_2DRECT1_ST",
++ "PAR_2DDATA0_ST", "PAR_2DDATA1_ST", "PAR_WAITFIFO_ST", "PAR_WAIT_ST",
++ "PAR_LINK_ST", "PAR_END_ST", "PAR_STALL_ST"
++ };
++
++ static gctCONST_STRING _cmdDmaState[] =
++ {
++ "CMD_IDLE_ST", "CMD_START_ST", "CMD_REQ_ST", "CMD_END_ST"
++ };
++
++ static gctCONST_STRING _cmdFetState[] =
++ {
++ "FET_IDLE_ST", "FET_RAMVALID_ST", "FET_VALID_ST"
++ };
++
++ static gctCONST_STRING _reqDmaState[] =
++ {
++ "REQ_IDLE_ST", "REQ_WAITIDX_ST", "REQ_CAL_ST"
++ };
++
++ static gctCONST_STRING _calState[] =
++ {
++ "CAL_IDLE_ST", "CAL_LDADR_ST", "CAL_IDXCALC_ST"
++ };
++
++ static gctCONST_STRING _veReqState[] =
++ {
++ "VER_IDLE_ST", "VER_CKCACHE_ST", "VER_MISS_ST"
++ };
++
++ static gcsiDEBUG_REGISTERS _dbgRegs[] =
++ {
++ { "RA", 0x474, 16, 0x448, 16, 0x12344321 },
++ { "TX", 0x474, 24, 0x44C, 16, 0x12211221 },
++ { "FE", 0x470, 0, 0x450, 16, 0xBABEF00D },
++ { "PE", 0x470, 16, 0x454, 16, 0xBABEF00D },
++ { "DE", 0x470, 8, 0x458, 16, 0xBABEF00D },
++ { "SH", 0x470, 24, 0x45C, 16, 0xDEADBEEF },
++ { "PA", 0x474, 0, 0x460, 16, 0x0000AAAA },
++ { "SE", 0x474, 8, 0x464, 16, 0x5E5E5E5E },
++ { "MC", 0x478, 0, 0x468, 16, 0x12345678 },
++ { "HI", 0x478, 8, 0x46C, 16, 0xAAAAAAAA }
++ };
++
++ static gctUINT32 _otherRegs[] =
++ {
++ 0x040, 0x044, 0x04C, 0x050, 0x054, 0x058, 0x05C, 0x060,
++ 0x43c, 0x440, 0x444, 0x414,
++ };
++
++ gceSTATUS status;
++ gckKERNEL kernel = gcvNULL;
++ gctUINT32 idle = 0, axi = 0;
++ gctUINT32 dmaAddress1 = 0, dmaAddress2 = 0;
++ gctUINT32 dmaState1 = 0, dmaState2 = 0;
++ gctUINT32 dmaLow = 0, dmaHigh = 0;
++ gctUINT32 cmdState = 0, cmdDmaState = 0, cmdFetState = 0;
++ gctUINT32 dmaReqState = 0, calState = 0, veReqState = 0;
++ gctUINT i;
++ gctUINT pipe = 0, pixelPipes = 0;
++ gctUINT32 control = 0, oldControl = 0;
++ gckOS os = Hardware->os;
++ gceCORE core = Hardware->core;
++
++ gcmkHEADER_ARG("Hardware=0x%X", Hardware);
++
++ kernel = Hardware->kernel;
++
++ gcmkPRINT_N(12, "GPU[%d](ChipModel=0x%x ChipRevision=0x%x):\n",
++ core,
++ Hardware->identity.chipModel,
++ Hardware->identity.chipRevision);
++
++ pixelPipes = Hardware->identity.pixelPipes
++ ? Hardware->identity.pixelPipes
++ : 1;
++
++ /* Reset register values. */
++ idle = axi =
++ dmaState1 = dmaState2 =
++ dmaAddress1 = dmaAddress2 =
++ dmaLow = dmaHigh = 0;
++
++ /* Verify whether DMA is running. */
++ gcmkONERROR(_VerifyDMA(
++ os, core, &dmaAddress1, &dmaAddress2, &dmaState1, &dmaState2
++ ));
++
++ cmdState = dmaState2 & 0x1F;
++ cmdDmaState = (dmaState2 >> 8) & 0x03;
++ cmdFetState = (dmaState2 >> 10) & 0x03;
++ dmaReqState = (dmaState2 >> 12) & 0x03;
++ calState = (dmaState2 >> 14) & 0x03;
++ veReqState = (dmaState2 >> 16) & 0x03;
++
++ gcmkONERROR(gckOS_ReadRegisterEx(os, core, 0x004, &idle));
++ gcmkONERROR(gckOS_ReadRegisterEx(os, core, 0x00C, &axi));
++ gcmkONERROR(gckOS_ReadRegisterEx(os, core, 0x668, &dmaLow));
++ gcmkONERROR(gckOS_ReadRegisterEx(os, core, 0x66C, &dmaHigh));
++
++ gcmkPRINT_N(0, "**************************\n");
++ gcmkPRINT_N(0, "*** GPU STATE DUMP ***\n");
++ gcmkPRINT_N(0, "**************************\n");
++
++ gcmkPRINT_N(4, " axi = 0x%08X\n", axi);
++
++ gcmkPRINT_N(4, " idle = 0x%08X\n", idle);
++ if ((idle & 0x00000001) == 0) gcmkPRINT_N(0, " FE not idle\n");
++ if ((idle & 0x00000002) == 0) gcmkPRINT_N(0, " DE not idle\n");
++ if ((idle & 0x00000004) == 0) gcmkPRINT_N(0, " PE not idle\n");
++ if ((idle & 0x00000008) == 0) gcmkPRINT_N(0, " SH not idle\n");
++ if ((idle & 0x00000010) == 0) gcmkPRINT_N(0, " PA not idle\n");
++ if ((idle & 0x00000020) == 0) gcmkPRINT_N(0, " SE not idle\n");
++ if ((idle & 0x00000040) == 0) gcmkPRINT_N(0, " RA not idle\n");
++ if ((idle & 0x00000080) == 0) gcmkPRINT_N(0, " TX not idle\n");
++ if ((idle & 0x00000100) == 0) gcmkPRINT_N(0, " VG not idle\n");
++ if ((idle & 0x00000200) == 0) gcmkPRINT_N(0, " IM not idle\n");
++ if ((idle & 0x00000400) == 0) gcmkPRINT_N(0, " FP not idle\n");
++ if ((idle & 0x00000800) == 0) gcmkPRINT_N(0, " TS not idle\n");
++ if ((idle & 0x80000000) != 0) gcmkPRINT_N(0, " AXI low power mode\n");
++
++ if (
++ (dmaAddress1 == dmaAddress2)
++ && (dmaState1 == dmaState2)
++ )
++ {
++ gcmkPRINT_N(0, " DMA appears to be stuck at this address:\n");
++ gcmkPRINT_N(4, " 0x%08X\n", dmaAddress1);
++ }
++ else
++ {
++ if (dmaAddress1 == dmaAddress2)
++ {
++ gcmkPRINT_N(0, " DMA address is constant, but state is changing:\n");
++ gcmkPRINT_N(4, " 0x%08X\n", dmaState1);
++ gcmkPRINT_N(4, " 0x%08X\n", dmaState2);
++ }
++ else
++ {
++ gcmkPRINT_N(0, " DMA is running; known addresses are:\n");
++ gcmkPRINT_N(4, " 0x%08X\n", dmaAddress1);
++ gcmkPRINT_N(4, " 0x%08X\n", dmaAddress2);
++ }
++ }
++
++ gcmkPRINT_N(4, " dmaLow = 0x%08X\n", dmaLow);
++ gcmkPRINT_N(4, " dmaHigh = 0x%08X\n", dmaHigh);
++ gcmkPRINT_N(4, " dmaState = 0x%08X\n", dmaState2);
++ gcmkPRINT_N(8, " command state = %d (%s)\n", cmdState, _cmdState [cmdState]);
++ gcmkPRINT_N(8, " command DMA state = %d (%s)\n", cmdDmaState, _cmdDmaState[cmdDmaState]);
++ gcmkPRINT_N(8, " command fetch state = %d (%s)\n", cmdFetState, _cmdFetState[cmdFetState]);
++ gcmkPRINT_N(8, " DMA request state = %d (%s)\n", dmaReqState, _reqDmaState[dmaReqState]);
++ gcmkPRINT_N(8, " cal state = %d (%s)\n", calState, _calState [calState]);
++ gcmkPRINT_N(8, " VE request state = %d (%s)\n", veReqState, _veReqState [veReqState]);
++
++ /* Record control. */
++ gckOS_ReadRegisterEx(os, core, 0x0, &oldControl);
++
++ for (pipe = 0; pipe < pixelPipes; pipe++)
++ {
++ gcmkPRINT_N(4, " Debug registers of pipe[%d]:\n", pipe);
++
++ /* Switch pipe. */
++ gcmkONERROR(gckOS_ReadRegisterEx(os, core, 0x0, &control));
++ control &= ~(0xF << 20);
++ control |= (pipe << 20);
++ gcmkONERROR(gckOS_WriteRegisterEx(os, core, 0x0, control));
++
++ for (i = 0; i < gcmCOUNTOF(_dbgRegs); i += 1)
++ {
++ gcmkONERROR(_DumpDebugRegisters(os, core, &_dbgRegs[i]));
++ }
++
++ gcmkPRINT_N(0, " Other Registers:\n");
++ for (i = 0; i < gcmCOUNTOF(_otherRegs); i += 1)
++ {
++ gctUINT32 read;
++ gcmkONERROR(gckOS_ReadRegisterEx(os, core, _otherRegs[i], &read));
++ gcmkPRINT_N(12, " [0x%04X] 0x%08X\n", _otherRegs[i], read);
++ }
++ }
++
++ if (kernel->hardware->identity.chipFeatures & (1 << 4))
++ {
++ gctUINT32 read0, read1, write;
++
++ read0 = read1 = write = 0;
++
++ gcmkONERROR(gckOS_ReadRegisterEx(os, core, 0x43C, &read0));
++ gcmkONERROR(gckOS_ReadRegisterEx(os, core, 0x440, &read1));
++ gcmkONERROR(gckOS_ReadRegisterEx(os, core, 0x444, &write));
++
++ gcmkPRINT_N(4, " read0 = 0x%08X\n", read0);
++ gcmkPRINT_N(4, " read1 = 0x%08X\n", read1);
++ gcmkPRINT_N(4, " write = 0x%08X\n", write);
++ }
++
++ /* Restore control. */
++ gcmkONERROR(gckOS_WriteRegisterEx(os, core, 0x0, oldControl));
++
++ /* dump stack. */
++ gckOS_DumpCallStack(os);
++
++OnError:
++
++ /* Return the error. */
++ gcmkFOOTER();
++ return status;
++}
++
++static gceSTATUS
++gckHARDWARE_ReadPerformanceRegister(
++ IN gckHARDWARE Hardware,
++ IN gctUINT PerformanceAddress,
++ IN gctUINT IndexAddress,
++ IN gctUINT IndexShift,
++ IN gctUINT Index,
++ OUT gctUINT32_PTR Value
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Hardware=0x%x PerformanceAddress=0x%x IndexAddress=0x%x "
++ "IndexShift=%u Index=%u",
++ Hardware, PerformanceAddress, IndexAddress, IndexShift,
++ Index);
++
++ /* Write the index. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ IndexAddress,
++ Index << IndexShift));
++
++ /* Read the register. */
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ PerformanceAddress,
++ Value));
++
++ /* Test for reset. */
++ if (Index == 15)
++ {
++ /* Index another register to get out of reset. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, IndexAddress, 0));
++ }
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Value=0x%x", *Value);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckHARDWARE_GetFrameInfo(
++ IN gckHARDWARE Hardware,
++ OUT gcsHAL_FRAME_INFO * FrameInfo
++ )
++{
++ gceSTATUS status;
++ gctUINT i, clock;
++ gcsHAL_FRAME_INFO info;
++#if gcdFRAME_DB_RESET
++ gctUINT reset;
++#endif
++
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Get profile tick. */
++ gcmkONERROR(gckOS_GetProfileTick(&info.ticks));
++
++ /* Read SH counters and reset them. */
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x0045C,
++ 0x00470,
++ 24,
++ 4,
++ &info.shaderCycles));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x0045C,
++ 0x00470,
++ 24,
++ 9,
++ &info.vsInstructionCount));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x0045C,
++ 0x00470,
++ 24,
++ 12,
++ &info.vsTextureCount));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x0045C,
++ 0x00470,
++ 24,
++ 7,
++ &info.psInstructionCount));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x0045C,
++ 0x00470,
++ 24,
++ 14,
++ &info.psTextureCount));
++#if gcdFRAME_DB_RESET
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x0045C,
++ 0x00470,
++ 24,
++ 15,
++ &reset));
++#endif
++
++ /* Read PA counters and reset them. */
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x00460,
++ 0x00474,
++ 0,
++ 3,
++ &info.vertexCount));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x00460,
++ 0x00474,
++ 0,
++ 4,
++ &info.primitiveCount));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x00460,
++ 0x00474,
++ 0,
++ 7,
++ &info.rejectedPrimitives));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x00460,
++ 0x00474,
++ 0,
++ 8,
++ &info.culledPrimitives));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x00460,
++ 0x00474,
++ 0,
++ 6,
++ &info.clippedPrimitives));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x00460,
++ 0x00474,
++ 0,
++ 5,
++ &info.outPrimitives));
++#if gcdFRAME_DB_RESET
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x00460,
++ 0x00474,
++ 0,
++ 15,
++ &reset));
++#endif
++
++ /* Read RA counters and reset them. */
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x00448,
++ 0x00474,
++ 16,
++ 3,
++ &info.inPrimitives));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x00448,
++ 0x00474,
++ 16,
++ 11,
++ &info.culledQuadCount));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x00448,
++ 0x00474,
++ 16,
++ 1,
++ &info.totalQuadCount));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x00448,
++ 0x00474,
++ 16,
++ 2,
++ &info.quadCount));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x00448,
++ 0x00474,
++ 16,
++ 0,
++ &info.totalPixelCount));
++#if gcdFRAME_DB_RESET
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x00448,
++ 0x00474,
++ 16,
++ 15,
++ &reset));
++#endif
++
++ /* Read TX counters and reset them. */
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x0044C,
++ 0x00474,
++ 24,
++ 0,
++ &info.bilinearRequests));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x0044C,
++ 0x00474,
++ 24,
++ 1,
++ &info.trilinearRequests));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x0044C,
++ 0x00474,
++ 24,
++ 8,
++ &info.txHitCount));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x0044C,
++ 0x00474,
++ 24,
++ 9,
++ &info.txMissCount));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x0044C,
++ 0x00474,
++ 24,
++ 6,
++ &info.txBytes8));
++#if gcdFRAME_DB_RESET
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x0044C,
++ 0x00474,
++ 24,
++ 15,
++ &reset));
++#endif
++
++ /* Read clock control register. */
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00000,
++ &clock));
++
++ /* Walk through all avaiable pixel pipes. */
++ for (i = 0; i < Hardware->identity.pixelPipes; ++i)
++ {
++ /* Select proper pipe. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00000,
++ ((((gctUINT32) (clock)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:20) - (0 ? 23:20) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:20) - (0 ? 23:20) + 1))))))) << (0 ? 23:20))) | (((gctUINT32) ((gctUINT32) (i) & ((gctUINT32) ((((1 ? 23:20) - (0 ? 23:20) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:20) - (0 ? 23:20) + 1))))))) << (0 ? 23:20)))));
++
++ /* Read cycle registers. */
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00078,
++ &info.cycles[i]));
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x0007C,
++ &info.idleCycles[i]));
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00438,
++ &info.mcCycles[i]));
++
++ /* Read bandwidth registers. */
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x0005C,
++ &info.readRequests[i]));
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00040,
++ &info.readBytes8[i]));
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00050,
++ &info.writeRequests[i]));
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00044,
++ &info.writeBytes8[i]));
++
++ /* Read PE counters. */
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x00454,
++ 0x00470,
++ 16,
++ 0,
++ &info.colorKilled[i]));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x00454,
++ 0x00470,
++ 16,
++ 2,
++ &info.colorDrawn[i]));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x00454,
++ 0x00470,
++ 16,
++ 1,
++ &info.depthKilled[i]));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x00454,
++ 0x00470,
++ 16,
++ 3,
++ &info.depthDrawn[i]));
++ }
++
++ /* Zero out remaning reserved counters. */
++ for (; i < 8; ++i)
++ {
++ info.readBytes8[i] = 0;
++ info.writeBytes8[i] = 0;
++ info.cycles[i] = 0;
++ info.idleCycles[i] = 0;
++ info.mcCycles[i] = 0;
++ info.readRequests[i] = 0;
++ info.writeRequests[i] = 0;
++ info.colorKilled[i] = 0;
++ info.colorDrawn[i] = 0;
++ info.depthKilled[i] = 0;
++ info.depthDrawn[i] = 0;
++ }
++
++ /* Reset clock control register. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00000,
++ clock));
++
++ /* Reset cycle and bandwidth counters. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x0003C,
++ 1));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x0003C,
++ 0));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00078,
++ 0));
++
++#if gcdFRAME_DB_RESET
++ /* Reset PE counters. */
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x00454,
++ 0x00470,
++ 16,
++ 15,
++ &reset));
++#endif
++
++ /* Copy to user. */
++ gcmkONERROR(gckOS_CopyToUserData(Hardware->os,
++ &info,
++ FrameInfo,
++ gcmSIZEOF(info)));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++#if gcdDVFS
++#define READ_FROM_EATER1 0
++
++gceSTATUS
++gckHARDWARE_QueryLoad(
++ IN gckHARDWARE Hardware,
++ OUT gctUINT32 * Load
++ )
++{
++ gctUINT32 debug1;
++ gceSTATUS status;
++ gcmkHEADER_ARG("Hardware=0x%X", Hardware);
++
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(Load != gcvNULL);
++
++ gckOS_AcquireMutex(Hardware->os, Hardware->powerMutex, gcvINFINITE);
++
++ if (Hardware->chipPowerState == gcvPOWER_ON)
++ {
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00110,
++ Load));
++#if READ_FROM_EATER1
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00134,
++ Load));
++#endif
++
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00114,
++ &debug1));
++
++ /* Patch result of 0x110 with result of 0x114. */
++ if ((debug1 & 0xFF) == 1)
++ {
++ *Load &= ~0xFF;
++ *Load |= 1;
++ }
++
++ if (((debug1 & 0xFF00) >> 8) == 1)
++ {
++ *Load &= ~(0xFF << 8);
++ *Load |= 1 << 8;
++ }
++
++ if (((debug1 & 0xFF0000) >> 16) == 1)
++ {
++ *Load &= ~(0xFF << 16);
++ *Load |= 1 << 16;
++ }
++
++ if (((debug1 & 0xFF000000) >> 24) == 1)
++ {
++ *Load &= ~(0xFF << 24);
++ *Load |= 1 << 24;
++ }
++ }
++ else
++ {
++ status = gcvSTATUS_INVALID_REQUEST;
++ }
++
++OnError:
++
++ gckOS_ReleaseMutex(Hardware->os, Hardware->powerMutex);
++
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckHARDWARE_SetDVFSPeroid(
++ IN gckHARDWARE Hardware,
++ OUT gctUINT32 Frequency
++ )
++{
++ gceSTATUS status;
++ gctUINT32 period;
++ gctUINT32 eater;
++
++#if READ_FROM_EATER1
++ gctUINT32 period1;
++ gctUINT32 eater1;
++#endif
++
++ gcmkHEADER_ARG("Hardware=0x%X Frequency=%d", Hardware, Frequency);
++
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ period = 0;
++
++ while((64 << period) < (gcdDVFS_ANAYLSE_WINDOW * Frequency * 1000) )
++ {
++ period++;
++ }
++
++#if READ_FROM_EATER1
++ /*
++ * Peroid = F * 1000 * 1000 / (60 * 16 * 1024);
++ */
++ period1 = Frequency * 6250 / 6114;
++#endif
++
++ gckOS_AcquireMutex(Hardware->os, Hardware->powerMutex, gcvINFINITE);
++
++ if (Hardware->chipPowerState == gcvPOWER_ON)
++ {
++ /* Get current configure. */
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x0010C,
++ &eater));
++
++ /* Change peroid. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x0010C,
++ ((((gctUINT32) (eater)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) | (((gctUINT32) ((gctUINT32) (period) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8)))));
++
++#if READ_FROM_EATER1
++ /* Config eater1. */
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00130,
++ &eater1));
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00130,
++ ((((gctUINT32) (eater1)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:16) - (0 ? 31:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:16) - (0 ? 31:16) + 1))))))) << (0 ? 31:16))) | (((gctUINT32) ((gctUINT32) (period1) & ((gctUINT32) ((((1 ? 31:16) - (0 ? 31:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:16) - (0 ? 31:16) + 1))))))) << (0 ? 31:16)))));
++#endif
++ }
++ else
++ {
++ status = gcvSTATUS_INVALID_REQUEST;
++ }
++
++OnError:
++ gckOS_ReleaseMutex(Hardware->os, Hardware->powerMutex);
++
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckHARDWARE_InitDVFS(
++ IN gckHARDWARE Hardware
++ )
++{
++ gceSTATUS status;
++ gctUINT32 data;
++
++ gcmkHEADER_ARG("Hardware=0x%X", Hardware);
++
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x0010C,
++ &data));
++
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 16:16) - (0 ? 16:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 16:16) - (0 ? 16:16) + 1))))))) << (0 ? 16:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 16:16) - (0 ? 16:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 16:16) - (0 ? 16:16) + 1))))))) << (0 ? 16:16)));
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 18:18) - (0 ? 18:18) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 18:18) - (0 ? 18:18) + 1))))))) << (0 ? 18:18))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 18:18) - (0 ? 18:18) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 18:18) - (0 ? 18:18) + 1))))))) << (0 ? 18:18)));
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 19:19) - (0 ? 19:19) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 19:19) - (0 ? 19:19) + 1))))))) << (0 ? 19:19))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 19:19) - (0 ? 19:19) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 19:19) - (0 ? 19:19) + 1))))))) << (0 ? 19:19)));
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 20:20) - (0 ? 20:20) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 20:20) - (0 ? 20:20) + 1))))))) << (0 ? 20:20))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 20:20) - (0 ? 20:20) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 20:20) - (0 ? 20:20) + 1))))))) << (0 ? 20:20)));
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:23) - (0 ? 23:23) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:23) - (0 ? 23:23) + 1))))))) << (0 ? 23:23))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 23:23) - (0 ? 23:23) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:23) - (0 ? 23:23) + 1))))))) << (0 ? 23:23)));
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 22:22) - (0 ? 22:22) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 22:22) - (0 ? 22:22) + 1))))))) << (0 ? 22:22))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 22:22) - (0 ? 22:22) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 22:22) - (0 ? 22:22) + 1))))))) << (0 ? 22:22)));
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "DVFS Configure=0x%X",
++ data);
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x0010C,
++ data));
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++#endif
++
++/*******************************************************************************
++**
++** gckHARDWARE_PrepareFunctions
++**
++** Generate command buffer snippets which will be used by gckHARDWARE, by which
++** gckHARDWARE can manipulate GPU by FE command without using gckCOMMAND to avoid
++** race condition and deadlock.
++**
++** Notice:
++** 1. Each snippet can only be executed when GPU is idle.
++** 2. Execution is triggered by AHB (0x658)
++** 3. Each snippet followed by END so software can sync with GPU by checking GPU
++** idle
++** 4. It is transparent to gckCOMMAND command buffer.
++**
++** Existing Snippets:
++** 1. MMU Configure
++** For new MMU, after GPU is reset, FE execute this command sequence to enble MMU.
++*/
++gceSTATUS
++gckHARDWARE_PrepareFunctions(
++ gckHARDWARE Hardware
++ )
++{
++ gceSTATUS status;
++ gckOS os;
++ gctUINT32 offset = 0;
++ gctUINT32 mmuBytes;
++ gctUINT32 endBytes;
++ gctUINT8_PTR logical;
++
++ gcmkHEADER_ARG("%x", Hardware);
++
++ os = Hardware->os;
++
++ gcmkVERIFY_OK(gckOS_GetPageSize(os, &Hardware->functionBytes));
++
++ /* Allocate a command buffer. */
++ gcmkONERROR(gckOS_AllocateNonPagedMemory(
++ os,
++ gcvFALSE,
++ &Hardware->functionBytes,
++ &Hardware->functionPhysical,
++ &Hardware->functionLogical
++ ));
++
++ gcmkONERROR(gckOS_GetPhysicalAddress(
++ os,
++ Hardware->functionLogical,
++ &Hardware->functionAddress
++ ));
++
++ if (Hardware->mmuVersion > 0)
++ {
++ /* MMU configure command sequence. */
++ logical = (gctUINT8_PTR)Hardware->functionLogical + offset;
++
++ Hardware->functions[gcvHARDWARE_FUNCTION_MMU].address
++ = Hardware->functionAddress + offset;
++
++ gcmkONERROR(gckHARDWARE_SetMMUStates(
++ Hardware,
++ Hardware->kernel->mmu->mtlbLogical,
++ gcvMMU_MODE_4K,
++ (gctUINT8_PTR)Hardware->kernel->mmu->mtlbLogical + gcdMMU_MTLB_SIZE,
++ logical,
++ &mmuBytes
++ ));
++
++ offset += mmuBytes;
++
++ logical = (gctUINT8_PTR)Hardware->functionLogical + offset;
++
++ gcmkONERROR(gckHARDWARE_End(
++ Hardware,
++ gcvNULL,
++ &endBytes
++ ));
++
++ gcmkONERROR(gckHARDWARE_End(
++ Hardware,
++ logical,
++ &endBytes
++ ));
++
++ offset += endBytes;
++
++ Hardware->functions[gcvHARDWARE_FUNCTION_MMU].bytes = mmuBytes + endBytes;
++ }
++
++ gcmkASSERT(offset < Hardware->functionBytes);
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/kernel/arch/gc_hal_kernel_hardware.h linux-openelec/drivers/mxc/gpu-viv/v5/hal/kernel/arch/gc_hal_kernel_hardware.h
+--- linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/kernel/arch/gc_hal_kernel_hardware.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v5/hal/kernel/arch/gc_hal_kernel_hardware.h 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,160 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_kernel_hardware_h_
++#define __gc_hal_kernel_hardware_h_
++
++#if gcdENABLE_VG
++#include "gc_hal_kernel_hardware_vg.h"
++#endif
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++typedef enum {
++ gcvHARDWARE_FUNCTION_MMU,
++ gcvHARDWARE_FUNCTION_FLUSH,
++
++ gcvHARDWARE_FUNCTION_NUM,
++}
++gceHARDWARE_FUNCTION;
++
++
++typedef struct _gcsHARWARE_FUNCTION
++{
++ /* Entry of the function. */
++ gctUINT32 address;
++
++ /* Bytes of the function. */
++ gctUINT32 bytes;
++}
++gcsHARDWARE_FUNCTION;
++
++/* gckHARDWARE object. */
++struct _gckHARDWARE
++{
++ /* Object. */
++ gcsOBJECT object;
++
++ /* Pointer to gctKERNEL object. */
++ gckKERNEL kernel;
++
++ /* Pointer to gctOS object. */
++ gckOS os;
++
++ /* Core */
++ gceCORE core;
++
++ /* Chip characteristics. */
++ gcsHAL_QUERY_CHIP_IDENTITY identity;
++ gctBOOL allowFastClear;
++ gctBOOL allowCompression;
++ gctUINT32 powerBaseAddress;
++ gctBOOL extraEventStates;
++
++ /* Big endian */
++ gctBOOL bigEndian;
++
++ /* Chip status */
++ gctPOINTER powerMutex;
++ gctUINT32 powerProcess;
++ gctUINT32 powerThread;
++ gceCHIPPOWERSTATE chipPowerState;
++ gctUINT32 lastWaitLink;
++ gctUINT32 lastEnd;
++ gctBOOL clockState;
++ gctBOOL powerState;
++ gctPOINTER globalSemaphore;
++
++ gctISRMANAGERFUNC startIsr;
++ gctISRMANAGERFUNC stopIsr;
++ gctPOINTER isrContext;
++
++ gctUINT32 mmuVersion;
++
++ /* Whether use new MMU. It is meaningless
++ ** for old MMU since old MMU is always enabled.
++ */
++ gctBOOL enableMMU;
++
++ /* Type */
++ gceHARDWARE_TYPE type;
++
++#if gcdPOWEROFF_TIMEOUT
++ gctUINT32 powerOffTime;
++ gctUINT32 powerOffTimeout;
++ gctPOINTER powerOffTimer;
++#endif
++
++#if gcdENABLE_FSCALE_VAL_ADJUST
++ gctUINT32 powerOnFscaleVal;
++#endif
++ gctPOINTER pageTableDirty;
++
++#if gcdLINK_QUEUE_SIZE
++ struct _gckLINKQUEUE linkQueue;
++#endif
++
++ gctBOOL powerManagement;
++ gctBOOL powerManagementLock;
++ gctBOOL gpuProfiler;
++
++ gctBOOL endAfterFlushMmuCache;
++
++ gctUINT32 minFscaleValue;
++
++ gctPOINTER pendingEvent;
++
++ /* Function used by gckHARDWARE. */
++ gctPHYS_ADDR functionPhysical;
++ gctPOINTER functionLogical;
++ gctUINT32 functionAddress;
++ gctSIZE_T functionBytes;
++
++ gcsHARDWARE_FUNCTION functions[gcvHARDWARE_FUNCTION_NUM];
++};
++
++gceSTATUS
++gckHARDWARE_GetBaseAddress(
++ IN gckHARDWARE Hardware,
++ OUT gctUINT32_PTR BaseAddress
++ );
++
++gceSTATUS
++gckHARDWARE_NeedBaseAddress(
++ IN gckHARDWARE Hardware,
++ IN gctUINT32 State,
++ OUT gctBOOL_PTR NeedBase
++ );
++
++gceSTATUS
++gckHARDWARE_GetFrameInfo(
++ IN gckHARDWARE Hardware,
++ OUT gcsHAL_FRAME_INFO * FrameInfo
++ );
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif /* __gc_hal_kernel_hardware_h_ */
++
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/kernel/arch/gc_hal_kernel_recorder.c linux-openelec/drivers/mxc/gpu-viv/v5/hal/kernel/arch/gc_hal_kernel_recorder.c
+--- linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/kernel/arch/gc_hal_kernel_recorder.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v5/hal/kernel/arch/gc_hal_kernel_recorder.c 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,679 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal.h"
++#include "gc_hal_kernel.h"
++#include "gc_hal_kernel_context.h"
++
++/*
++ * -----------------------
++ * HARDWARE STATE RECORDER
++ * -----------------------
++ *
++ * State mirror buffer is used to 'mirror' hardware states since hardware
++ * states can't be dumpped. It is a context buffer which stores 'global'
++ * context.
++ *
++ * For each commit, state recorder
++ * 1) Records context buffer (if there is) and command buffers in this commit.
++ * 2) Parse those buffers to estimate the state changed.
++ * 3) Stores result to a mirror buffer.
++ *
++ * == Commit 0 ====================================================================
++ *
++ * Context Buffer 0
++ *
++ * Command Buffer 0
++ *
++ * Mirror Buffer 0 <- Context Buffer 0 + Command Buffer 0
++ *
++ * == Commit 1 ====================================================================
++ *
++ * Command Buffer 1
++ *
++ * Mirror Buffer 1 <- Command buffer 1 + Mirror Buffer 0
++ *
++ * == Commit 2 ====================================================================
++ *
++ * Context Buffer 2 (optional)
++ *
++ * Command Buffer 2
++ *
++ * Mirror Buffer 2 <- Command buffer 2 + Context Buffer 2 + Mirror Buffer 1
++ *
++ * == Commit N ====================================================================
++ *
++ * For Commit N, these buffers are needed to reproduce hardware's behavior in
++ * this commit.
++ *
++ * Mirror Buffer [N - 1] : State Mirror accumlated by past commits,
++ * which is used to restore hardware state.
++ * Context Buffer [N] :
++ * Command Buffer [N] : Command buffer executed by hardware in this commit.
++ *
++ * If sequence of states programming matters, hardware's behavior can't be reproduced,
++ * but the state values stored in mirror buffer are assuring.
++ */
++
++/* Queue size. */
++#define gcdNUM_RECORDS 6
++
++typedef struct _gcsPARSER_HANDLER * gckPARSER_HANDLER;
++
++typedef void
++(*HandlerFunction)(
++ IN gckPARSER_HANDLER Handler,
++ IN gctUINT32 Addr,
++ IN gctUINT32 Data
++ );
++
++typedef struct _gcsPARSER_HANDLER
++{
++ gctUINT32 type;
++ gctUINT32 cmd;
++ gctPOINTER private;
++ HandlerFunction function;
++}
++gcsPARSER_HANDLER;
++
++typedef struct _gcsPARSER * gckPARSER;
++typedef struct _gcsPARSER
++{
++ gctUINT8_PTR currentCmdBufferAddr;
++
++ /* Current command. */
++ gctUINT32 lo;
++ gctUINT32 hi;
++
++ gctUINT8 cmdOpcode;
++ gctUINT16 cmdAddr;
++ gctUINT32 cmdSize;
++ gctUINT32 cmdRectCount;
++ gctUINT8 skip;
++ gctUINT32 skipCount;
++
++ gctBOOL allow;
++
++ /* Callback used by parser to handle a command. */
++ gckPARSER_HANDLER commandHandler;
++}
++gcsPARSER;
++
++typedef struct _gcsMIRROR
++{
++ gctUINT32_PTR logical[gcdNUM_RECORDS];
++ gctUINT32 bytes;
++ gcsSTATE_MAP_PTR map;
++ gctUINT32 stateCount;
++}
++gcsMIRROR;
++
++typedef struct _gcsDELTA
++{
++ gctUINT64 commitStamp;
++ gctUINT32_PTR command;
++ gctUINT32 commandBytes;
++ gctUINT32_PTR context;
++ gctUINT32 contextBytes;
++}
++gcsDELTA;
++
++typedef struct _gcsRECORDER
++{
++ gckOS os;
++ gcsMIRROR mirror;
++ gcsDELTA deltas[gcdNUM_RECORDS];
++
++ /* Index of current record. */
++ gctUINT index;
++
++ /* Number of records. */
++ gctUINT num;
++
++ /* Plugin used by gckPARSER. */
++ gcsPARSER_HANDLER recorderHandler;
++ gckPARSER parser;
++}
++gcsRECORDER;
++
++
++/******************************************************************************\
++***************************** Command Buffer Parser ****************************
++\******************************************************************************/
++
++/*
++** Command buffer parser checks command buffer in FE's view to make sure there
++** is no format error.
++**
++** Parser provide a callback mechnisam, so plug-in can be added to implement
++** other functions.
++*/
++
++static void
++_HandleLoadState(
++ IN OUT gckPARSER Parser
++ )
++{
++ gctUINT i;
++ gctUINT32_PTR data = (gctUINT32_PTR)Parser->currentCmdBufferAddr;
++ gctUINT32 cmdAddr = Parser->cmdAddr;
++
++ if (Parser->commandHandler == gcvNULL
++ || Parser->commandHandler->cmd != 0x01
++ )
++ {
++ /* No handler for this command. */
++ return;
++ }
++
++ for (i = 0; i < Parser->cmdSize; i++)
++ {
++ Parser->commandHandler->function(Parser->commandHandler, cmdAddr, *data);
++
++ /* Advance to next state. */
++ cmdAddr++;
++ data++;
++ }
++}
++
++static void
++_GetCommand(
++ IN OUT gckPARSER Parser
++ )
++{
++ gctUINT32 * buffer = (gctUINT32 *)Parser->currentCmdBufferAddr;
++
++ gctUINT16 cmdRectCount;
++ gctUINT16 cmdDataCount;
++
++ Parser->hi = buffer[0];
++ Parser->lo = buffer[1];
++
++ Parser->cmdOpcode = (((((gctUINT32) (Parser->hi)) >> (0 ? 31:27)) & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1)))))) );
++ Parser->cmdRectCount = 1;
++
++ switch (Parser->cmdOpcode)
++ {
++ case 0x01:
++ /* Extract count. */
++ Parser->cmdSize = (((((gctUINT32) (Parser->hi)) >> (0 ? 25:16)) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1)))))) );
++ if (Parser->cmdSize == 0)
++ {
++ /* 0 means 1024. */
++ Parser->cmdSize = 1024;
++ }
++ Parser->skip = (Parser->cmdSize & 0x1) ? 0 : 1;
++
++ /* Extract address. */
++ Parser->cmdAddr = (((((gctUINT32) (Parser->hi)) >> (0 ? 15:0)) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1)))))) );
++
++ Parser->currentCmdBufferAddr = Parser->currentCmdBufferAddr + 4;
++ Parser->skipCount = Parser->cmdSize + Parser->skip;
++ break;
++
++ case 0x05:
++ Parser->cmdSize = 4;
++ Parser->skipCount = gcmALIGN(Parser->cmdSize, 2);
++ break;
++
++ case 0x06:
++ Parser->cmdSize = 5;
++ Parser->skipCount = gcmALIGN(Parser->cmdSize, 2);
++ break;
++
++ case 0x0C:
++ Parser->cmdSize = 3;
++ Parser->skipCount = gcmALIGN(Parser->cmdSize, 2);
++ break;
++
++ case 0x09:
++ Parser->cmdSize = 2;
++ Parser->cmdAddr = 0x0F16;
++ Parser->skipCount = gcmALIGN(Parser->cmdSize, 2);
++ break;
++
++ case 0x04:
++ Parser->cmdSize = 1;
++ Parser->cmdAddr = 0x0F06;
++
++ cmdRectCount = (((((gctUINT32) (Parser->hi)) >> (0 ? 15:8)) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1)))))) );
++ cmdDataCount = (((((gctUINT32) (Parser->hi)) >> (0 ? 26:16)) & ((gctUINT32) ((((1 ? 26:16) - (0 ? 26:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 26:16) - (0 ? 26:16) + 1)))))) );
++
++ Parser->skipCount = gcmALIGN(Parser->cmdSize, 2)
++ + cmdRectCount * 2
++ + gcmALIGN(cmdDataCount, 2);
++
++ Parser->cmdRectCount = cmdRectCount;
++ break;
++
++ case 0x03:
++ Parser->currentCmdBufferAddr = Parser->currentCmdBufferAddr + 8;
++ Parser->skipCount = 0;
++ break;
++
++ case 0x02:
++ Parser->currentCmdBufferAddr = Parser->currentCmdBufferAddr + 8;
++ Parser->skipCount = 0;
++ break;
++
++ default:
++ /* Unknown command is a risk. */
++ Parser->allow = gcvFALSE;
++ break;
++ }
++}
++
++static void
++_ParseCommand(
++ IN OUT gckPARSER Parser
++ )
++{
++ switch(Parser->cmdOpcode)
++ {
++ case 0x01:
++ _HandleLoadState(Parser);
++ break;
++ case 0x05:
++ case 0x06:
++ case 0x0C:
++ break;
++ case 0x04:
++ break;
++ default:
++ break;
++ }
++
++ /* Advance to next command. */
++ Parser->currentCmdBufferAddr = Parser->currentCmdBufferAddr
++ + (Parser->skipCount << 2);
++}
++
++gceSTATUS
++gckPARSER_Parse(
++ IN gckPARSER Parser,
++ IN gctUINT8_PTR Buffer,
++ IN gctUINT32 Bytes
++ )
++{
++ gckPARSER parser = Parser;
++ gctUINT8_PTR end = (gctUINT8_PTR)Buffer + Bytes;
++
++ /* Initialize parser. */
++ parser->currentCmdBufferAddr = (gctUINT8_PTR)Buffer;
++ parser->skip = 0;
++ parser->allow = gcvTRUE;
++
++ /* Go through command buffer until reaching the end
++ ** or meeting an error. */
++ do
++ {
++ _GetCommand(parser);
++
++ _ParseCommand(parser);
++ }
++ while ((parser->currentCmdBufferAddr < end) && (parser->allow == gcvTRUE));
++
++ if (parser->allow == gcvFALSE)
++ {
++ /* Error detected. */
++ return gcvSTATUS_NOT_SUPPORTED;
++ }
++
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckPARSER_RegisterCommandHandler
++**
++** Register a command handler which will be called when parser get a command.
++**
++*/
++gceSTATUS
++gckPARSER_RegisterCommandHandler(
++ IN gckPARSER Parser,
++ IN gckPARSER_HANDLER Handler
++ )
++{
++ Parser->commandHandler = Handler;
++
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckPARSER_Construct(
++ IN gckOS Os,
++ IN gckPARSER_HANDLER Handler,
++ OUT gckPARSER * Parser
++ )
++{
++ gceSTATUS status;
++ gckPARSER pointer;
++
++ gcmkONERROR(gckOS_Allocate(Os, gcmSIZEOF(gcsPARSER), (gctPOINTER *)&pointer));
++
++ /* Put it here temp, should have a more general plug-in mechnisam. */
++ pointer->commandHandler = Handler;
++
++ *Parser = pointer;
++
++ return gcvSTATUS_OK;
++
++OnError:
++ return status;
++}
++
++void
++gckPARSER_Destroy(
++ IN gckOS Os,
++ IN gckPARSER Parser
++ )
++{
++ gcmkOS_SAFE_FREE(Os, Parser);
++}
++
++/******************************************************************************\
++**************************** Hardware States Recorder **************************
++\******************************************************************************/
++
++static void
++_RecodeState(
++ IN gckPARSER_HANDLER Handler,
++ IN gctUINT32 Addr,
++ IN gctUINT32 Data
++ )
++{
++ gcmkVERIFY_OK(gckRECORDER_UpdateMirror(Handler->private, Addr, Data));
++}
++
++static gctUINT
++_Previous(
++ IN gctUINT Index
++ )
++{
++ if (Index == 0)
++ {
++ return gcdNUM_RECORDS - 1;
++ }
++
++ return Index - 1;
++}
++
++static gctUINT
++_Next(
++ IN gctUINT Index
++ )
++{
++ return (Index + 1) % gcdNUM_RECORDS;
++}
++
++gceSTATUS
++gckRECORDER_Construct(
++ IN gckOS Os,
++ IN gckHARDWARE Hardware,
++ OUT gckRECORDER * Recorder
++ )
++{
++ gceSTATUS status;
++ gckCONTEXT context = gcvNULL;
++ gckRECORDER recorder = gcvNULL;
++ gctUINT32 mapSize;
++ gctUINT i;
++ gctBOOL virtualCommandBuffer = Hardware->kernel->virtualCommandBuffer;
++
++ /* TODO: We only need context buffer and state map, it should be able to get without construct a
++ ** new context.
++ ** Now it is leaked, since we can't free it when command buffer is gone.
++ */
++
++ /* MMU is not ready now. */
++ Hardware->kernel->virtualCommandBuffer = gcvFALSE;
++
++ gcmkONERROR(gckCONTEXT_Construct(Os, Hardware, 0, &context));
++
++ /* Restore. */
++ Hardware->kernel->virtualCommandBuffer = virtualCommandBuffer;
++
++ gcmkONERROR(gckOS_Allocate(Os, gcmSIZEOF(gcsRECORDER), (gctPOINTER *)&recorder));
++
++ gckOS_ZeroMemory(recorder, gcmSIZEOF(gcsRECORDER));
++
++ /* Copy state map. */
++ recorder->mirror.stateCount = context->stateCount;
++
++ mapSize = context->stateCount * gcmSIZEOF(gcsSTATE_MAP);
++
++ gcmkONERROR(gckOS_Allocate(Os, mapSize, (gctPOINTER *)&recorder->mirror.map));
++
++ gckOS_MemCopy(recorder->mirror.map, context->map, mapSize);
++
++ /* Copy context buffer. */
++ recorder->mirror.bytes = context->totalSize;
++
++ for (i = 0; i < gcdNUM_RECORDS; i++)
++ {
++ gcmkONERROR(gckOS_Allocate(Os, context->totalSize, (gctPOINTER *)&recorder->mirror.logical[i]));
++ gckOS_MemCopy(recorder->mirror.logical[i], context->buffer->logical, context->totalSize);
++ }
++
++ for (i = 0; i < gcdNUM_RECORDS; i++)
++ {
++ /* TODO : Optimize size. */
++ gcmkONERROR(gckOS_Allocate(Os, gcdCMD_BUFFER_SIZE, (gctPOINTER *)&recorder->deltas[i].command));
++ gcmkONERROR(gckOS_Allocate(Os, context->totalSize, (gctPOINTER *)&recorder->deltas[i].context));
++ }
++
++ recorder->index = 0;
++ recorder->num = 0;
++
++ /* Initialize Parser plugin. */
++ recorder->recorderHandler.cmd = 0x01;
++ recorder->recorderHandler.private = recorder;
++ recorder->recorderHandler.function = _RecodeState;
++
++ gcmkONERROR(gckPARSER_Construct(Os, &recorder->recorderHandler, &recorder->parser));
++
++ recorder->os = Os;
++
++ *Recorder = recorder;
++
++ return gcvSTATUS_OK;
++
++OnError:
++ if (recorder)
++ {
++ gckRECORDER_Destory(Os, recorder);
++ }
++
++ return status;
++}
++
++gceSTATUS
++gckRECORDER_Destory(
++ IN gckOS Os,
++ IN gckRECORDER Recorder
++ )
++{
++ gctUINT i;
++
++ if (Recorder->mirror.map)
++ {
++ gcmkOS_SAFE_FREE(Os, Recorder->mirror.map);
++ }
++
++ for (i = 0; i < gcdNUM_RECORDS; i++)
++ {
++ if (Recorder->mirror.logical[i])
++ {
++ gcmkOS_SAFE_FREE(Os, Recorder->mirror.logical[i]);
++ }
++ }
++
++ for (i = 0; i < gcdNUM_RECORDS; i++)
++ {
++ if (Recorder->deltas[i].command)
++ {
++ gcmkOS_SAFE_FREE(Os, Recorder->deltas[i].command);
++ }
++
++ if (Recorder->deltas[i].context)
++ {
++ gcmkOS_SAFE_FREE(Os, Recorder->deltas[i].context);
++ }
++ }
++
++ if (Recorder->parser)
++ {
++ gckPARSER_Destroy(Os, Recorder->parser);
++ }
++
++ gcmkOS_SAFE_FREE(Os, Recorder);
++
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckRECORDER_UpdateMirror(
++ IN gckRECORDER Recorder,
++ IN gctUINT32 State,
++ IN gctUINT32 Data
++ )
++{
++ gctUINT32 index;
++ gcsSTATE_MAP_PTR map = Recorder->mirror.map;
++ gctUINT32_PTR buffer = Recorder->mirror.logical[Recorder->index];
++
++ if (State >= Recorder->mirror.stateCount)
++ {
++ /* Ignore them just like HW does. */
++ return gcvSTATUS_OK;
++ }
++
++ index = map[State].index;
++
++ if (index)
++ {
++ buffer[index] = Data;
++ }
++
++ return gcvSTATUS_OK;
++}
++
++void
++gckRECORDER_AdvanceIndex(
++ IN gckRECORDER Recorder,
++ IN gctUINT64 CommitStamp
++ )
++{
++ /* Get next record. */
++ gctUINT next = (Recorder->index + 1) % gcdNUM_RECORDS;
++
++ /* Record stamp of this commit. */
++ Recorder->deltas[Recorder->index].commitStamp = CommitStamp;
++
++ /* Mirror of next record is mirror of this record and delta in next record. */
++ gckOS_MemCopy(Recorder->mirror.logical[next],
++ Recorder->mirror.logical[Recorder->index], Recorder->mirror.bytes);
++
++ /* Advance to next record. */
++ Recorder->index = next;
++
++ Recorder->num = gcmMIN(Recorder->num + 1, gcdNUM_RECORDS - 1);
++
++
++ /* Reset delta. */
++ Recorder->deltas[Recorder->index].commandBytes = 0;
++ Recorder->deltas[Recorder->index].contextBytes = 0;
++}
++
++void
++gckRECORDER_Record(
++ IN gckRECORDER Recorder,
++ IN gctUINT8_PTR CommandBuffer,
++ IN gctUINT32 CommandBytes,
++ IN gctUINT8_PTR ContextBuffer,
++ IN gctUINT32 ContextBytes
++ )
++{
++ gcsDELTA * delta = &Recorder->deltas[Recorder->index];
++
++ if (CommandBytes != 0xFFFFFFFF)
++ {
++ gckPARSER_Parse(Recorder->parser, CommandBuffer, CommandBytes);
++ gckOS_MemCopy(delta->command, CommandBuffer, CommandBytes);
++ delta->commandBytes = CommandBytes;
++ }
++
++ if (ContextBytes != 0xFFFFFFFF)
++ {
++ gckPARSER_Parse(Recorder->parser, ContextBuffer, ContextBytes);
++ gckOS_MemCopy(delta->context, ContextBuffer, ContextBytes);
++ delta->contextBytes = ContextBytes;
++ }
++}
++
++void
++gckRECORDER_Dump(
++ IN gckRECORDER Recorder
++ )
++{
++ gctUINT last = Recorder->index;
++ gctUINT previous;
++ gctUINT i;
++ gcsMIRROR *mirror = &Recorder->mirror;
++ gcsDELTA *delta;
++ gckOS os = Recorder->os;
++
++ for (i = 0; i < Recorder->num; i++)
++ {
++ last = _Previous(last);
++ }
++
++ for (i = 0; i < Recorder->num; i++)
++ {
++ delta = &Recorder->deltas[last];
++
++ /* Dump record */
++ gcmkPRINT("#[commit %llu]", delta->commitStamp);
++
++ if (delta->commitStamp)
++ {
++ previous = _Previous(last);
++
++ gcmkPRINT("#[mirror]");
++ gckOS_DumpBuffer(os, mirror->logical[previous], mirror->bytes, gceDUMP_BUFFER_CONTEXT, gcvTRUE);
++ gcmkPRINT("@[kernel.execute]");
++ }
++
++ if (delta->contextBytes)
++ {
++ gckOS_DumpBuffer(os, delta->context, delta->contextBytes, gceDUMP_BUFFER_CONTEXT, gcvTRUE);
++ gcmkPRINT("@[kernel.execute]");
++ }
++
++ gckOS_DumpBuffer(os, delta->command, delta->commandBytes, gceDUMP_BUFFER_USER, gcvTRUE);
++ gcmkPRINT("@[kernel.execute]");
++
++ last = _Next(last);
++ }
++}
++
++
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/kernel/archvg/gc_hal_kernel_hardware_command_vg.c linux-openelec/drivers/mxc/gpu-viv/v5/hal/kernel/archvg/gc_hal_kernel_hardware_command_vg.c
+--- linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/kernel/archvg/gc_hal_kernel_hardware_command_vg.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v5/hal/kernel/archvg/gc_hal_kernel_hardware_command_vg.c 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,932 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal.h"
++#include "gc_hal_kernel.h"
++
++#if gcdENABLE_VG
++
++#include "gc_hal_kernel_hardware_command_vg.h"
++
++#define _GC_OBJ_ZONE gcvZONE_COMMAND
++
++/******************************************************************************\
++****************************** gckVGCOMMAND API code *****************************
++\******************************************************************************/
++
++/*******************************************************************************
++**
++** gckVGCOMMAND_InitializeInfo
++**
++** Initialize architecture dependent command buffer information.
++**
++** INPUT:
++**
++** gckVGCOMMAND Command
++** Pointer to the Command object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckVGCOMMAND_InitializeInfo(
++ IN gckVGCOMMAND Command
++ )
++{
++ gceSTATUS status;
++ gcmkHEADER_ARG("Command=0x%x", Command);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++
++ do
++ {
++ /* Reset interrupts. */
++ Command->info.feBufferInt = -1;
++ Command->info.tsOverflowInt = -1;
++
++ /* Set command buffer attributes. */
++ Command->info.addressAlignment = 64;
++ Command->info.commandAlignment = 8;
++
++ /* Determine command alignment address mask. */
++ Command->info.addressMask = ((((gctUINT32) (Command->info.addressAlignment - 1)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:0) - (0 ? 1:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:0) - (0 ? 1:0) + 1))))))) << (0 ? 1:0))) | (((gctUINT32) ((gctUINT32) (0 ) & ((gctUINT32) ((((1 ? 1:0) - (0 ? 1:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:0) - (0 ? 1:0) + 1))))))) << (0 ? 1:0)));
++
++ /* Query the number of bytes needed by the STATE command. */
++ gcmkERR_BREAK(gckVGCOMMAND_StateCommand(
++ Command, 0x0, gcvNULL, (gctUINT32)~0, 0,
++ &Command->info.stateCommandSize
++ ));
++
++ /* Query the number of bytes needed by the RESTART command. */
++ gcmkERR_BREAK(gckVGCOMMAND_RestartCommand(
++ Command, gcvNULL, (gctUINT32)~0, 0,
++ &Command->info.restartCommandSize
++ ));
++
++ /* Query the number of bytes needed by the FETCH command. */
++ gcmkERR_BREAK(gckVGCOMMAND_FetchCommand(
++ Command, gcvNULL, (gctUINT32)~0, 0,
++ &Command->info.fetchCommandSize
++ ));
++
++ /* Query the number of bytes needed by the CALL command. */
++ gcmkERR_BREAK(gckVGCOMMAND_CallCommand(
++ Command, gcvNULL, (gctUINT32)~0, 0,
++ &Command->info.callCommandSize
++ ));
++
++ /* Query the number of bytes needed by the RETURN command. */
++ gcmkERR_BREAK(gckVGCOMMAND_ReturnCommand(
++ Command, gcvNULL,
++ &Command->info.returnCommandSize
++ ));
++
++ /* Query the number of bytes needed by the EVENT command. */
++ gcmkERR_BREAK(gckVGCOMMAND_EventCommand(
++ Command, gcvNULL, gcvBLOCK_PIXEL, -1,
++ &Command->info.eventCommandSize
++ ));
++
++ /* Query the number of bytes needed by the END command. */
++ gcmkERR_BREAK(gckVGCOMMAND_EndCommand(
++ Command, gcvNULL, -1,
++ &Command->info.endCommandSize
++ ));
++
++ /* Determine the tail reserve size. */
++ Command->info.staticTailSize = gcmMAX(
++ Command->info.fetchCommandSize,
++ gcmMAX(
++ Command->info.returnCommandSize,
++ Command->info.endCommandSize
++ )
++ );
++
++ /* Determine the maximum tail size. */
++ Command->info.dynamicTailSize
++ = Command->info.staticTailSize
++ + Command->info.eventCommandSize * gcvBLOCK_COUNT;
++ }
++ while (gcvFALSE);
++
++ gcmkFOOTER();
++ /* Return status. */
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckVGCOMMAND_StateCommand
++**
++** Append a STATE command at the specified location in the command buffer.
++**
++** INPUT:
++**
++** gckVGCOMMAND Command
++** Pointer to an gckVGCOMMAND object.
++**
++** gctUINT32 Pipe
++** Harwdare destination pipe.
++**
++** gctPOINTER Logical
++** Pointer to the current location inside the command buffer to append
++** STATE command at or gcvNULL to query the size of the command.
++**
++** gctUINT32 Address
++** Starting register address of the state buffer.
++** If 'Logical' is gcvNULL, this argument is ignored.
++**
++** gctUINT32 Count
++** Number of states in state buffer.
++** If 'Logical' is gcvNULL, this argument is ignored.
++**
++** gctSIZE_T * Bytes
++** Pointer to the number of bytes available for the STATE command.
++** If 'Logical' is gcvNULL, the value from this argument is ignored.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Bytes
++** Pointer to a variable that will receive the number of bytes required
++** for the STATE command. If 'Bytes' is gcvNULL, nothing is returned.
++*/
++gceSTATUS
++gckVGCOMMAND_StateCommand(
++ IN gckVGCOMMAND Command,
++ IN gctUINT32 Pipe,
++ IN gctPOINTER Logical,
++ IN gctUINT32 Address,
++ IN gctUINT32 Count,
++ IN OUT gctUINT32 * Bytes
++ )
++{
++ gcmkHEADER_ARG("Command=0x%x Pipe=0x%x Logical=0x%x Address=0x%x Count=0x%x Bytes = 0x%x",
++ Command, Pipe, Logical, Address, Count, Bytes);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++
++ if (Command->fe20)
++ {
++ if (Logical != gcvNULL)
++ {
++ gctUINT32_PTR buffer;
++
++ /* Cast the buffer pointer. */
++ buffer = (gctUINT32_PTR) Logical;
++
++ /* Append STATE. */
++ buffer[0]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:28) - (0 ? 31:28) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:28) - (0 ? 31:28) + 1))))))) << (0 ? 31:28))) | (((gctUINT32) (0x3 & ((gctUINT32) ((((1 ? 31:28) - (0 ? 31:28) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:28) - (0 ? 31:28) + 1))))))) << (0 ? 31:28)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 11:0) - (0 ? 11:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 11:0) - (0 ? 11:0) + 1))))))) << (0 ? 11:0))) | (((gctUINT32) ((gctUINT32) (Address) & ((gctUINT32) ((((1 ? 11:0) - (0 ? 11:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 11:0) - (0 ? 11:0) + 1))))))) << (0 ? 11:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 27:16) - (0 ? 27:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 27:16) - (0 ? 27:16) + 1))))))) << (0 ? 27:16))) | (((gctUINT32) ((gctUINT32) (Count) & ((gctUINT32) ((((1 ? 27:16) - (0 ? 27:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 27:16) - (0 ? 27:16) + 1))))))) << (0 ? 27:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 13:12) - (0 ? 13:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 13:12) - (0 ? 13:12) + 1))))))) << (0 ? 13:12))) | (((gctUINT32) ((gctUINT32) (Pipe) & ((gctUINT32) ((((1 ? 13:12) - (0 ? 13:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 13:12) - (0 ? 13:12) + 1))))))) << (0 ? 13:12)));
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* Return number of bytes required by the STATE command. */
++ *Bytes = 4 * (Count + 1);
++ }
++ }
++ else
++ {
++ if (Logical != gcvNULL)
++ {
++ gctUINT32_PTR buffer;
++
++ /* Cast the buffer pointer. */
++ buffer = (gctUINT32_PTR) Logical;
++
++ /* Append LOAD_STATE. */
++ buffer[0]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (Count) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (Address) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* Return number of bytes required by the STATE command. */
++ *Bytes = 4 * (Count + 1);
++ }
++ }
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckVGCOMMAND_RestartCommand
++**
++** Form a RESTART command at the specified location in the command buffer.
++**
++** INPUT:
++**
++** gckVGCOMMAND Command
++** Pointer to an gckVGCOMMAND object.
++**
++** gctPOINTER Logical
++** Pointer to the current location inside the command buffer to append
++** RESTART command at or gcvNULL to query the size of the command.
++**
++** gctUINT32 FetchAddress
++** The address of another command buffer to be executed by this RESTART
++** command. If 'Logical' is gcvNULL, this argument is ignored.
++**
++** gctUINT FetchCount
++** The number of 64-bit data quantities in another command buffer to
++** be executed by this RESTART command. If 'Logical' is gcvNULL, this
++** argument is ignored.
++**
++** gctSIZE_T * Bytes
++** Pointer to the number of bytes available for the RESTART command.
++** If 'Logical' is gcvNULL, the value from this argument is ignored.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Bytes
++** Pointer to a variable that will receive the number of bytes required
++** for the RESTART command. If 'Bytes' is gcvNULL, nothing is returned.
++*/
++gceSTATUS
++gckVGCOMMAND_RestartCommand(
++ IN gckVGCOMMAND Command,
++ IN gctPOINTER Logical,
++ IN gctUINT32 FetchAddress,
++ IN gctUINT FetchCount,
++ IN OUT gctUINT32 * Bytes
++ )
++{
++ gcmkHEADER_ARG("Command=0x%x Logical=0x%x FetchAddress=0x%x FetchCount=0x%x Bytes = 0x%x",
++ Command, Logical, FetchAddress, FetchCount, Bytes);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++
++ if (Command->fe20)
++ {
++ if (Logical != gcvNULL)
++ {
++ gctUINT32_PTR buffer;
++ gctUINT32 beginEndMark;
++
++ /* Cast the buffer pointer. */
++ buffer = (gctUINT32_PTR) Logical;
++
++ /* Determine Begin/End flag. */
++ beginEndMark = (FetchCount > 0)
++ ? ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 24:24) - (0 ? 24:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 24:24) - (0 ? 24:24) + 1))))))) << (0 ? 24:24))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? 24:24) - (0 ? 24:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 24:24) - (0 ? 24:24) + 1))))))) << (0 ? 24:24)))
++ : ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 24:24) - (0 ? 24:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 24:24) - (0 ? 24:24) + 1))))))) << (0 ? 24:24))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 24:24) - (0 ? 24:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 24:24) - (0 ? 24:24) + 1))))))) << (0 ? 24:24)));
++
++ /* Append RESTART. */
++ buffer[0]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:28) - (0 ? 31:28) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:28) - (0 ? 31:28) + 1))))))) << (0 ? 31:28))) | (((gctUINT32) (0x9 & ((gctUINT32) ((((1 ? 31:28) - (0 ? 31:28) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:28) - (0 ? 31:28) + 1))))))) << (0 ? 31:28)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 20:0) - (0 ? 20:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 20:0) - (0 ? 20:0) + 1))))))) << (0 ? 20:0))) | (((gctUINT32) ((gctUINT32) (FetchCount) & ((gctUINT32) ((((1 ? 20:0) - (0 ? 20:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 20:0) - (0 ? 20:0) + 1))))))) << (0 ? 20:0)))
++ | beginEndMark;
++
++ buffer[1]
++ = FetchAddress;
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* Return number of bytes required by the RESTART command. */
++ *Bytes = 8;
++ }
++ }
++ else
++ {
++ gcmkFOOTER_NO();
++ return gcvSTATUS_NOT_SUPPORTED;
++ }
++
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckVGCOMMAND_FetchCommand
++**
++** Form a FETCH command at the specified location in the command buffer.
++**
++** INPUT:
++**
++** gckVGCOMMAND Command
++** Pointer to an gckVGCOMMAND object.
++**
++** gctPOINTER Logical
++** Pointer to the current location inside the command buffer to append
++** FETCH command at or gcvNULL to query the size of the command.
++**
++** gctUINT32 FetchAddress
++** The address of another command buffer to be executed by this FETCH
++** command. If 'Logical' is gcvNULL, this argument is ignored.
++**
++** gctUINT FetchCount
++** The number of 64-bit data quantities in another command buffer to
++** be executed by this FETCH command. If 'Logical' is gcvNULL, this
++** argument is ignored.
++**
++** gctSIZE_T * Bytes
++** Pointer to the number of bytes available for the FETCH command.
++** If 'Logical' is gcvNULL, the value from this argument is ignored.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Bytes
++** Pointer to a variable that will receive the number of bytes required
++** for the FETCH command. If 'Bytes' is gcvNULL, nothing is returned.
++*/
++gceSTATUS
++gckVGCOMMAND_FetchCommand(
++ IN gckVGCOMMAND Command,
++ IN gctPOINTER Logical,
++ IN gctUINT32 FetchAddress,
++ IN gctUINT FetchCount,
++ IN OUT gctUINT32 * Bytes
++ )
++{
++ gcmkHEADER_ARG("Command=0x%x Logical=0x%x FetchAddress=0x%x FetchCount=0x%x Bytes = 0x%x",
++ Command, Logical, FetchAddress, FetchCount, Bytes);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++
++ if (Command->fe20)
++ {
++ if (Logical != gcvNULL)
++ {
++ gctUINT32_PTR buffer;
++
++ /* Cast the buffer pointer. */
++ buffer = (gctUINT32_PTR) Logical;
++
++ /* Append FETCH. */
++ buffer[0]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:28) - (0 ? 31:28) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:28) - (0 ? 31:28) + 1))))))) << (0 ? 31:28))) | (((gctUINT32) (0x5 & ((gctUINT32) ((((1 ? 31:28) - (0 ? 31:28) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:28) - (0 ? 31:28) + 1))))))) << (0 ? 31:28)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 20:0) - (0 ? 20:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 20:0) - (0 ? 20:0) + 1))))))) << (0 ? 20:0))) | (((gctUINT32) ((gctUINT32) (FetchCount) & ((gctUINT32) ((((1 ? 20:0) - (0 ? 20:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 20:0) - (0 ? 20:0) + 1))))))) << (0 ? 20:0)));
++
++ buffer[1]
++ = gcmkFIXADDRESS(FetchAddress);
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* Return number of bytes required by the FETCH command. */
++ *Bytes = 8;
++ }
++ }
++ else
++ {
++ if (Logical != gcvNULL)
++ {
++ gctUINT32_PTR buffer;
++
++ /* Cast the buffer pointer. */
++ buffer = (gctUINT32_PTR) Logical;
++
++ /* Append LINK. */
++ buffer[0]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x08 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (FetchCount) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ buffer[1]
++ = gcmkFIXADDRESS(FetchAddress);
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* Return number of bytes required by the LINK command. */
++ *Bytes = 8;
++ }
++ }
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckVGCOMMAND_CallCommand
++**
++** Append a CALL command at the specified location in the command buffer.
++**
++** INPUT:
++**
++** gckVGCOMMAND Command
++** Pointer to an gckVGCOMMAND object.
++**
++** gctPOINTER Logical
++** Pointer to the current location inside the command buffer to append
++** CALL command at or gcvNULL to query the size of the command.
++**
++** gctUINT32 FetchAddress
++** The address of another command buffer to be executed by this CALL
++** command. If 'Logical' is gcvNULL, this argument is ignored.
++**
++** gctUINT FetchCount
++** The number of 64-bit data quantities in another command buffer to
++** be executed by this CALL command. If 'Logical' is gcvNULL, this
++** argument is ignored.
++**
++** gctSIZE_T * Bytes
++** Pointer to the number of bytes available for the CALL command.
++** If 'Logical' is gcvNULL, the value from this argument is ignored.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Bytes
++** Pointer to a variable that will receive the number of bytes required
++** for the CALL command. If 'Bytes' is gcvNULL, nothing is returned.
++*/
++gceSTATUS
++gckVGCOMMAND_CallCommand(
++ IN gckVGCOMMAND Command,
++ IN gctPOINTER Logical,
++ IN gctUINT32 FetchAddress,
++ IN gctUINT FetchCount,
++ IN OUT gctUINT32 * Bytes
++ )
++{
++ gcmkHEADER_ARG("Command=0x%x Logical=0x%x FetchAddress=0x%x FetchCount=0x%x Bytes = 0x%x",
++ Command, Logical, FetchAddress, FetchCount, Bytes);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++
++ if (Command->fe20)
++ {
++ if (Logical != gcvNULL)
++ {
++ gctUINT32_PTR buffer;
++
++ /* Cast the buffer pointer. */
++ buffer = (gctUINT32_PTR) Logical;
++
++ /* Append CALL. */
++ buffer[0]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:28) - (0 ? 31:28) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:28) - (0 ? 31:28) + 1))))))) << (0 ? 31:28))) | (((gctUINT32) (0x6 & ((gctUINT32) ((((1 ? 31:28) - (0 ? 31:28) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:28) - (0 ? 31:28) + 1))))))) << (0 ? 31:28)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 20:0) - (0 ? 20:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 20:0) - (0 ? 20:0) + 1))))))) << (0 ? 20:0))) | (((gctUINT32) ((gctUINT32) (FetchCount) & ((gctUINT32) ((((1 ? 20:0) - (0 ? 20:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 20:0) - (0 ? 20:0) + 1))))))) << (0 ? 20:0)));
++
++ buffer[1]
++ = gcmkFIXADDRESS(FetchAddress);
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* Return number of bytes required by the CALL command. */
++ *Bytes = 8;
++ }
++ }
++ else
++ {
++ gcmkFOOTER_NO();
++ return gcvSTATUS_NOT_SUPPORTED;
++ }
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckVGCOMMAND_ReturnCommand
++**
++** Append a RETURN command at the specified location in the command buffer.
++**
++** INPUT:
++**
++** gckVGCOMMAND Command
++** Pointer to an gckVGCOMMAND object.
++**
++** gctPOINTER Logical
++** Pointer to the current location inside the command buffer to append
++** RETURN command at or gcvNULL to query the size of the command.
++**
++** gctSIZE_T * Bytes
++** Pointer to the number of bytes available for the RETURN command.
++** If 'Logical' is gcvNULL, the value from this argument is ignored.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Bytes
++** Pointer to a variable that will receive the number of bytes required
++** for the RETURN command. If 'Bytes' is gcvNULL, nothing is returned.
++*/
++gceSTATUS
++gckVGCOMMAND_ReturnCommand(
++ IN gckVGCOMMAND Command,
++ IN gctPOINTER Logical,
++ IN OUT gctUINT32 * Bytes
++ )
++{
++ gcmkHEADER_ARG("Command=0x%x Logical=0x%x Bytes = 0x%x",
++ Command, Logical, Bytes);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++
++ if (Command->fe20)
++ {
++ if (Logical != gcvNULL)
++ {
++ gctUINT32_PTR buffer;
++
++ /* Cast the buffer pointer. */
++ buffer = (gctUINT32_PTR) Logical;
++
++ /* Append RETURN. */
++ buffer[0]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:28) - (0 ? 31:28) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:28) - (0 ? 31:28) + 1))))))) << (0 ? 31:28))) | (((gctUINT32) (0x7 & ((gctUINT32) ((((1 ? 31:28) - (0 ? 31:28) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:28) - (0 ? 31:28) + 1))))))) << (0 ? 31:28)));
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* Return number of bytes required by the RETURN command. */
++ *Bytes = 8;
++ }
++ }
++ else
++ {
++ gcmkFOOTER_NO();
++ return gcvSTATUS_NOT_SUPPORTED;
++ }
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckVGCOMMAND_EventCommand
++**
++** Form an EVENT command at the specified location in the command buffer.
++**
++** INPUT:
++**
++** gckVGCOMMAND Command
++** Pointer to the Command object.
++**
++** gctPOINTER Logical
++** Pointer to the current location inside the command buffer to append
++** EVENT command at or gcvNULL to query the size of the command.
++**
++** gctINT32 InterruptId
++** The ID of the interrupt to generate.
++** If 'Logical' is gcvNULL, this argument is ignored.
++**
++** gceBLOCK Block
++** Block that will generate the interrupt.
++**
++** gctSIZE_T * Bytes
++** Pointer to the number of bytes available for the EVENT command.
++** If 'Logical' is gcvNULL, the value from this argument is ignored.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Bytes
++** Pointer to a variable that will receive the number of bytes required
++** for the END command. If 'Bytes' is gcvNULL, nothing is returned.
++*/
++gceSTATUS
++gckVGCOMMAND_EventCommand(
++ IN gckVGCOMMAND Command,
++ IN gctPOINTER Logical,
++ IN gceBLOCK Block,
++ IN gctINT32 InterruptId,
++ IN OUT gctUINT32 * Bytes
++ )
++{
++ gcmkHEADER_ARG("Command=0x%x Logical=0x%x Block=0x%x InterruptId=0x%x Bytes = 0x%x",
++ Command, Logical, Block, InterruptId, Bytes);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++
++ if (Command->fe20)
++ {
++ typedef struct _gcsEVENTSTATES
++ {
++ /* Chips before VG21 use these values. */
++ gctUINT eventFromFE;
++ gctUINT eventFromPE;
++
++ /* VG21 chips and later use SOURCE field. */
++ gctUINT eventSource;
++ }
++ gcsEVENTSTATES;
++
++ static gcsEVENTSTATES states[] =
++ {
++ /* gcvBLOCK_COMMAND */
++ {
++ (gctUINT)~0,
++ (gctUINT)~0,
++ (gctUINT)~0
++ },
++
++ /* gcvBLOCK_TESSELLATOR */
++ {
++ 0x0,
++ 0x1,
++ 0x10
++ },
++
++ /* gcvBLOCK_TESSELLATOR2 */
++ {
++ 0x0,
++ 0x1,
++ 0x12
++ },
++
++ /* gcvBLOCK_TESSELLATOR3 */
++ {
++ 0x0,
++ 0x1,
++ 0x14
++ },
++
++ /* gcvBLOCK_RASTER */
++ {
++ 0x0,
++ 0x1,
++ 0x07,
++ },
++
++ /* gcvBLOCK_VG */
++ {
++ 0x0,
++ 0x1,
++ 0x0F
++ },
++
++ /* gcvBLOCK_VG2 */
++ {
++ 0x0,
++ 0x1,
++ 0x11
++ },
++
++ /* gcvBLOCK_VG3 */
++ {
++ 0x0,
++ 0x1,
++ 0x13
++ },
++
++ /* gcvBLOCK_PIXEL */
++ {
++ 0x0,
++ 0x1,
++ 0x07
++ },
++ };
++
++ /* Verify block ID. */
++ gcmkVERIFY_ARGUMENT(gcmIS_VALID_INDEX(Block, states));
++
++ if (Logical != gcvNULL)
++ {
++ gctUINT32_PTR buffer;
++
++ /* Verify the event ID. */
++ gcmkVERIFY_ARGUMENT(InterruptId >= 0);
++ gcmkVERIFY_ARGUMENT(InterruptId <= ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))));
++
++ /* Cast the buffer pointer. */
++ buffer = (gctUINT32_PTR) Logical;
++
++ /* Append EVENT. */
++ buffer[0]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:28) - (0 ? 31:28) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:28) - (0 ? 31:28) + 1))))))) << (0 ? 31:28))) | (((gctUINT32) (0x3 & ((gctUINT32) ((((1 ? 31:28) - (0 ? 31:28) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:28) - (0 ? 31:28) + 1))))))) << (0 ? 31:28)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 11:0) - (0 ? 11:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 11:0) - (0 ? 11:0) + 1))))))) << (0 ? 11:0))) | (((gctUINT32) ((gctUINT32) (0x0E01) & ((gctUINT32) ((((1 ? 11:0) - (0 ? 11:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 11:0) - (0 ? 11:0) + 1))))))) << (0 ? 11:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 27:16) - (0 ? 27:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 27:16) - (0 ? 27:16) + 1))))))) << (0 ? 27:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 27:16) - (0 ? 27:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 27:16) - (0 ? 27:16) + 1))))))) << (0 ? 27:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 13:12) - (0 ? 13:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 13:12) - (0 ? 13:12) + 1))))))) << (0 ? 13:12))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? 13:12) - (0 ? 13:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 13:12) - (0 ? 13:12) + 1))))))) << (0 ? 13:12)));
++
++ /* Determine chip version. */
++ if (Command->vg21)
++ {
++ /* Get the event source for the block. */
++ gctUINT eventSource = states[Block].eventSource;
++
++ /* Supported? */
++ if (eventSource == ~0)
++ {
++ gcmkFOOTER_NO();
++ return gcvSTATUS_NOT_SUPPORTED;
++ }
++
++ buffer[1]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) ((gctUINT32) (InterruptId) & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) ((gctUINT32) (eventSource) & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));
++ }
++ else
++ {
++ /* Get the event source for the block. */
++ gctUINT eventFromFE = states[Block].eventFromFE;
++ gctUINT eventFromPE = states[Block].eventFromPE;
++
++ /* Supported? */
++ if (eventFromFE == ~0)
++ {
++ gcmkFOOTER_NO();
++ return gcvSTATUS_NOT_SUPPORTED;
++ }
++
++ buffer[1]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) ((gctUINT32) (InterruptId) & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5))) | (((gctUINT32) ((gctUINT32) (eventFromFE) & ((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6))) | (((gctUINT32) ((gctUINT32) (eventFromPE) & ((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6)));
++ }
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* Make sure the events are directly supported for the block. */
++ if (states[Block].eventSource == ~0)
++ {
++ gcmkFOOTER_NO();
++ return gcvSTATUS_NOT_SUPPORTED;
++ }
++
++ /* Return number of bytes required by the END command. */
++ *Bytes = 8;
++ }
++ }
++ else
++ {
++ if (Logical != gcvNULL)
++ {
++ gctUINT32_PTR buffer;
++
++ /* Verify the event ID. */
++ gcmkVERIFY_ARGUMENT(InterruptId >= 0);
++ gcmkVERIFY_ARGUMENT(InterruptId <= ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))));
++
++ /* Cast the buffer pointer. */
++ buffer = (gctUINT32_PTR) Logical;
++
++ /* Append EVENT. */
++ buffer[0]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E01) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ /* Determine event source. */
++ if (Block == gcvBLOCK_COMMAND)
++ {
++ buffer[1]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) ((gctUINT32) (InterruptId) & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5)));
++ }
++ else
++ {
++ buffer[1]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) ((gctUINT32) (InterruptId) & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6)));
++ }
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* Return number of bytes required by the EVENT and END commands. */
++ *Bytes = 8;
++ }
++ }
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckVGCOMMAND_EndCommand
++**
++** Form an END command at the specified location in the command buffer.
++**
++** INPUT:
++**
++** gckVGCOMMAND Command
++** Pointer to the Command object.
++**
++** gctPOINTER Logical
++** Pointer to the current location inside the command buffer to append
++** END command at or gcvNULL to query the size of the command.
++**
++** gctINT32 InterruptId
++** The ID of the interrupt to generate.
++** If 'Logical' is gcvNULL, this argument will be ignored.
++**
++** gctSIZE_T * Bytes
++** Pointer to the number of bytes available for the END command.
++** If 'Logical' is gcvNULL, the value from this argument is ignored.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Bytes
++** Pointer to a variable that will receive the number of bytes required
++** for the END command. If 'Bytes' is gcvNULL, nothing is returned.
++*/
++gceSTATUS
++gckVGCOMMAND_EndCommand(
++ IN gckVGCOMMAND Command,
++ IN gctPOINTER Logical,
++ IN gctINT32 InterruptId,
++ IN OUT gctUINT32 * Bytes
++ )
++{
++ gcmkHEADER_ARG("Command=0x%x Logical=0x%x InterruptId=0x%x Bytes = 0x%x",
++ Command, Logical, InterruptId, Bytes);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++
++ if (Command->fe20)
++ {
++ if (Logical != gcvNULL)
++ {
++ gctUINT32_PTR buffer;
++
++ /* Verify the event ID. */
++ gcmkVERIFY_ARGUMENT(InterruptId >= 0);
++
++ /* Cast the buffer pointer. */
++ buffer = (gctUINT32_PTR) Logical;
++
++ /* Append END. */
++ buffer[0]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:28) - (0 ? 31:28) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:28) - (0 ? 31:28) + 1))))))) << (0 ? 31:28))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? 31:28) - (0 ? 31:28) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:28) - (0 ? 31:28) + 1))))))) << (0 ? 31:28)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) ((gctUINT32) (InterruptId) & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)));
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* Return number of bytes required by the END command. */
++ *Bytes = 8;
++ }
++ }
++ else
++ {
++ if (Logical != gcvNULL)
++ {
++ gctUINT32_PTR memory;
++
++ /* Verify the event ID. */
++ gcmkVERIFY_ARGUMENT(InterruptId >= 0);
++
++ /* Cast the buffer pointer. */
++ memory = (gctUINT32_PTR) Logical;
++
++ /* Append EVENT. */
++ memory[0]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E01) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ memory[1]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) ((gctUINT32) (InterruptId) & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6)));
++
++ /* Append END. */
++ memory[2]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x02 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)));
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* Return number of bytes required by the EVENT and END commands. */
++ *Bytes = 16;
++ }
++ }
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++#endif /* gcdENABLE_VG */
++
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/kernel/archvg/gc_hal_kernel_hardware_command_vg.h linux-openelec/drivers/mxc/gpu-viv/v5/hal/kernel/archvg/gc_hal_kernel_hardware_command_vg.h
+--- linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/kernel/archvg/gc_hal_kernel_hardware_command_vg.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v5/hal/kernel/archvg/gc_hal_kernel_hardware_command_vg.h 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,319 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_kernel_hardware_command_vg_h_
++#define __gc_hal_kernel_hardware_command_vg_h_
++
++/******************************************************************************\
++******************* Task and Interrupt Management Structures. ******************
++\******************************************************************************/
++
++/* Task storage header. */
++typedef struct _gcsTASK_STORAGE * gcsTASK_STORAGE_PTR;
++typedef struct _gcsTASK_STORAGE
++{
++ /* Next allocated storage buffer. */
++ gcsTASK_STORAGE_PTR next;
++}
++gcsTASK_STORAGE;
++
++/* Task container header. */
++typedef struct _gcsTASK_CONTAINER * gcsTASK_CONTAINER_PTR;
++typedef struct _gcsTASK_CONTAINER
++{
++ /* The number of tasks left to be processed in the container. */
++ gctINT referenceCount;
++
++ /* Size of the buffer. */
++ gctUINT size;
++
++ /* Link to the previous and the next allocated containers. */
++ gcsTASK_CONTAINER_PTR allocPrev;
++ gcsTASK_CONTAINER_PTR allocNext;
++
++ /* Link to the previous and the next containers in the free list. */
++ gcsTASK_CONTAINER_PTR freePrev;
++ gcsTASK_CONTAINER_PTR freeNext;
++}
++gcsTASK_CONTAINER;
++
++/* Kernel space task master table entry. */
++typedef struct _gcsBLOCK_TASK_ENTRY * gcsBLOCK_TASK_ENTRY_PTR;
++typedef struct _gcsBLOCK_TASK_ENTRY
++{
++ /* Pointer to the current task container for the block. */
++ gcsTASK_CONTAINER_PTR container;
++
++ /* Pointer to the current task data within the container. */
++ gcsTASK_HEADER_PTR task;
++
++ /* Pointer to the last link task within the container. */
++ gcsTASK_LINK_PTR link;
++
++ /* Number of interrupts allocated for this block. */
++ gctUINT interruptCount;
++
++ /* The index of the current interrupt. */
++ gctUINT interruptIndex;
++
++ /* Interrupt semaphore. */
++ gctSEMAPHORE interruptSemaphore;
++
++ /* Interrupt value array. */
++ gctINT32 interruptArray[32];
++}
++gcsBLOCK_TASK_ENTRY;
++
++
++/******************************************************************************\
++********************* Command Queue Management Structures. *********************
++\******************************************************************************/
++
++/* Command queue kernel element pointer. */
++typedef struct _gcsKERNEL_CMDQUEUE * gcsKERNEL_CMDQUEUE_PTR;
++
++/* Command queue object handler function type. */
++typedef gceSTATUS (* gctOBJECT_HANDLER) (
++ gckVGKERNEL Kernel,
++ gcsKERNEL_CMDQUEUE_PTR Entry
++ );
++
++/* Command queue kernel element. */
++typedef struct _gcsKERNEL_CMDQUEUE
++{
++ /* The number of buffers in the queue. */
++ gcsCMDBUFFER_PTR commandBuffer;
++
++ /* Pointer to the object handler function. */
++ gctOBJECT_HANDLER handler;
++}
++gcsKERNEL_CMDQUEUE;
++
++/* Command queue header. */
++typedef struct _gcsKERNEL_QUEUE_HEADER * gcsKERNEL_QUEUE_HEADER_PTR;
++typedef struct _gcsKERNEL_QUEUE_HEADER
++{
++ /* The size of the buffer in bytes. */
++ gctUINT size;
++
++ /* The number of pending entries to be processed. */
++ volatile gctUINT pending;
++
++ /* The current command queue entry. */
++ gcsKERNEL_CMDQUEUE_PTR currentEntry;
++
++ /* Next buffer. */
++ gcsKERNEL_QUEUE_HEADER_PTR next;
++}
++gcsKERNEL_QUEUE_HEADER;
++
++
++/******************************************************************************\
++******************************* gckVGCOMMAND Object *******************************
++\******************************************************************************/
++
++/* gckVGCOMMAND object. */
++struct _gckVGCOMMAND
++{
++ /***************************************************************************
++ ** Object data and pointers.
++ */
++
++ gcsOBJECT object;
++ gckVGKERNEL kernel;
++ gckOS os;
++ gckVGHARDWARE hardware;
++
++ /* Features. */
++ gctBOOL fe20;
++ gctBOOL vg20;
++ gctBOOL vg21;
++
++
++ /***************************************************************************
++ ** Enable command queue dumping.
++ */
++
++ gctBOOL enableDumping;
++
++
++ /***************************************************************************
++ ** Bus Error interrupt.
++ */
++
++ gctINT32 busErrorInt;
++
++
++ /***************************************************************************
++ ** Command buffer information.
++ */
++
++ gcsCOMMAND_BUFFER_INFO info;
++
++
++ /***************************************************************************
++ ** Synchronization objects.
++ */
++
++ gctPOINTER queueMutex;
++ gctPOINTER taskMutex;
++ gctPOINTER commitMutex;
++
++
++ /***************************************************************************
++ ** Task management.
++ */
++
++ /* The head of the storage buffer linked list. */
++ gcsTASK_STORAGE_PTR taskStorage;
++
++ /* Allocation size. */
++ gctUINT taskStorageGranularity;
++ gctUINT taskStorageUsable;
++
++ /* The free container list. */
++ gcsTASK_CONTAINER_PTR taskFreeHead;
++ gcsTASK_CONTAINER_PTR taskFreeTail;
++
++ /* Task table */
++ gcsBLOCK_TASK_ENTRY taskTable[gcvBLOCK_COUNT];
++
++
++ /***************************************************************************
++ ** Command queue.
++ */
++
++ /* Pointer to the allocated queue memory. */
++ gcsKERNEL_QUEUE_HEADER_PTR queue;
++
++ /* Pointer to the current available queue from which new queue entries
++ will be allocated. */
++ gcsKERNEL_QUEUE_HEADER_PTR queueHead;
++
++ /* If different from queueHead, points to the command queue which is
++ currently being executed by the hardware. */
++ gcsKERNEL_QUEUE_HEADER_PTR queueTail;
++
++ /* Points to the queue to merge the tail with when the tail is processed. */
++ gcsKERNEL_QUEUE_HEADER_PTR mergeQueue;
++
++ /* Queue overflow counter. */
++ gctUINT queueOverflow;
++
++
++ /***************************************************************************
++ ** Context.
++ */
++
++ /* Context counter used for unique ID. */
++ gctUINT64 contextCounter;
++
++ /* Current context ID. */
++ gctUINT64 currentContext;
++
++ /* Command queue power semaphore. */
++ gctPOINTER powerSemaphore;
++ gctINT32 powerStallInt;
++ gcsCMDBUFFER_PTR powerStallBuffer;
++ gctSIGNAL powerStallSignal;
++
++};
++
++/******************************************************************************\
++************************ gckVGCOMMAND Object Internal API. ***********************
++\******************************************************************************/
++
++/* Initialize architecture dependent command buffer information. */
++gceSTATUS
++gckVGCOMMAND_InitializeInfo(
++ IN gckVGCOMMAND Command
++ );
++
++/* Form a STATE command at the specified location in the command buffer. */
++gceSTATUS
++gckVGCOMMAND_StateCommand(
++ IN gckVGCOMMAND Command,
++ IN gctUINT32 Pipe,
++ IN gctPOINTER Logical,
++ IN gctUINT32 Address,
++ IN gctUINT32 Count,
++ IN OUT gctUINT32 * Bytes
++ );
++
++/* Form a RESTART command at the specified location in the command buffer. */
++gceSTATUS
++gckVGCOMMAND_RestartCommand(
++ IN gckVGCOMMAND Command,
++ IN gctPOINTER Logical,
++ IN gctUINT32 FetchAddress,
++ IN gctUINT FetchCount,
++ IN OUT gctUINT32 * Bytes
++ );
++
++/* Form a FETCH command at the specified location in the command buffer. */
++gceSTATUS
++gckVGCOMMAND_FetchCommand(
++ IN gckVGCOMMAND Command,
++ IN gctPOINTER Logical,
++ IN gctUINT32 FetchAddress,
++ IN gctUINT FetchCount,
++ IN OUT gctUINT32 * Bytes
++ );
++
++/* Form a CALL command at the specified location in the command buffer. */
++gceSTATUS
++gckVGCOMMAND_CallCommand(
++ IN gckVGCOMMAND Command,
++ IN gctPOINTER Logical,
++ IN gctUINT32 FetchAddress,
++ IN gctUINT FetchCount,
++ IN OUT gctUINT32 * Bytes
++ );
++
++/* Form a RETURN command at the specified location in the command buffer. */
++gceSTATUS
++gckVGCOMMAND_ReturnCommand(
++ IN gckVGCOMMAND Command,
++ IN gctPOINTER Logical,
++ IN OUT gctUINT32 * Bytes
++ );
++
++/* Form an EVENT command at the specified location in the command buffer. */
++gceSTATUS
++gckVGCOMMAND_EventCommand(
++ IN gckVGCOMMAND Command,
++ IN gctPOINTER Logical,
++ IN gceBLOCK Block,
++ IN gctINT32 InterruptId,
++ IN OUT gctUINT32 * Bytes
++ );
++
++/* Form an END command at the specified location in the command buffer. */
++gceSTATUS
++gckVGCOMMAND_EndCommand(
++ IN gckVGCOMMAND Command,
++ IN gctPOINTER Logical,
++ IN gctINT32 InterruptId,
++ IN OUT gctUINT32 * Bytes
++ );
++
++#endif /* __gc_hal_kernel_hardware_command_h_ */
++
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/kernel/archvg/gc_hal_kernel_hardware_vg.c linux-openelec/drivers/mxc/gpu-viv/v5/hal/kernel/archvg/gc_hal_kernel_hardware_vg.c
+--- linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/kernel/archvg/gc_hal_kernel_hardware_vg.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v5/hal/kernel/archvg/gc_hal_kernel_hardware_vg.c 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,2119 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal.h"
++#include "gc_hal_kernel.h"
++#include "gc_hal_kernel_hardware_command_vg.h"
++
++#if gcdENABLE_VG
++
++#define _GC_OBJ_ZONE gcvZONE_HARDWARE
++
++typedef enum
++{
++ gcvPOWER_FLAG_INITIALIZE = 1 << 0,
++ gcvPOWER_FLAG_STALL = 1 << 1,
++ gcvPOWER_FLAG_STOP = 1 << 2,
++ gcvPOWER_FLAG_START = 1 << 3,
++ gcvPOWER_FLAG_RELEASE = 1 << 4,
++ gcvPOWER_FLAG_DELAY = 1 << 5,
++ gcvPOWER_FLAG_SAVE = 1 << 6,
++ gcvPOWER_FLAG_ACQUIRE = 1 << 7,
++ gcvPOWER_FLAG_POWER_OFF = 1 << 8,
++ gcvPOWER_FLAG_CLOCK_OFF = 1 << 9,
++ gcvPOWER_FLAG_CLOCK_ON = 1 << 10,
++ gcvPOWER_FLAG_NOP = 1 << 11,
++}
++gcePOWER_FLAGS;
++
++/******************************************************************************\
++********************************* Support Code *********************************
++\******************************************************************************/
++static gceSTATUS
++_ResetGPU(
++ IN gckOS Os
++ )
++{
++ gctUINT32 control, idle;
++ gceSTATUS status;
++
++ /* Read register. */
++ gcmkONERROR(gckOS_ReadRegisterEx(Os,
++ gcvCORE_VG,
++ 0x00000,
++ &control));
++
++ for (;;)
++ {
++ /* Disable clock gating. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Os,
++ gcvCORE_VG,
++ 0x00104,
++ 0x00000000));
++
++ /* Wait for clock being stable. */
++ gcmkONERROR(gckOS_Delay(Os, 1));
++
++ /* Isolate the GPU. */
++ control = ((((gctUINT32) (control)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 19:19) - (0 ? 19:19) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 19:19) - (0 ? 19:19) + 1))))))) << (0 ? 19:19))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 19:19) - (0 ? 19:19) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 19:19) - (0 ? 19:19) + 1))))))) << (0 ? 19:19)));
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Os,
++ gcvCORE_VG,
++ 0x00000,
++ control));
++
++ /* Set soft reset. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Os,
++ gcvCORE_VG,
++ 0x00000,
++ ((((gctUINT32) (control)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:12) - (0 ? 12:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:12) - (0 ? 12:12) + 1))))))) << (0 ? 12:12))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 12:12) - (0 ? 12:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:12) - (0 ? 12:12) + 1))))))) << (0 ? 12:12)))));
++
++ /* Wait for reset. */
++ gcmkONERROR(gckOS_Delay(Os, 1));
++
++ /* Reset soft reset bit. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Os,
++ gcvCORE_VG,
++ 0x00000,
++ ((((gctUINT32) (control)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:12) - (0 ? 12:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:12) - (0 ? 12:12) + 1))))))) << (0 ? 12:12))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 12:12) - (0 ? 12:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:12) - (0 ? 12:12) + 1))))))) << (0 ? 12:12)))));
++
++ /* Reset GPU isolation. */
++ control = ((((gctUINT32) (control)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 19:19) - (0 ? 19:19) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 19:19) - (0 ? 19:19) + 1))))))) << (0 ? 19:19))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 19:19) - (0 ? 19:19) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 19:19) - (0 ? 19:19) + 1))))))) << (0 ? 19:19)));
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Os,
++ gcvCORE_VG,
++ 0x00000,
++ control));
++
++ /* Read idle register. */
++ gcmkONERROR(gckOS_ReadRegisterEx(Os,
++ gcvCORE_VG,
++ 0x00004,
++ &idle));
++
++ if ((((((gctUINT32) (idle)) >> (0 ? 0:0)) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1)))))) ) == 0)
++ {
++ continue;
++ }
++
++ /* Read reset register. */
++ gcmkONERROR(gckOS_ReadRegisterEx(Os,
++ gcvCORE_VG,
++ 0x00000,
++ &control));
++
++ if (((((((gctUINT32) (control)) >> (0 ? 16:16)) & ((gctUINT32) ((((1 ? 16:16) - (0 ? 16:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 16:16) - (0 ? 16:16) + 1)))))) ) == 0)
++ || ((((((gctUINT32) (control)) >> (0 ? 17:17)) & ((gctUINT32) ((((1 ? 17:17) - (0 ? 17:17) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 17:17) - (0 ? 17:17) + 1)))))) ) == 0)
++ )
++ {
++ continue;
++ }
++
++ /* GPU is idle. */
++ break;
++ }
++
++ /* Success. */
++ return gcvSTATUS_OK;
++
++OnError:
++
++ /* Return the error. */
++ return status;
++}
++
++
++static gceSTATUS
++_IdentifyHardware(
++ IN gckOS Os,
++ OUT gceCHIPMODEL * ChipModel,
++ OUT gctUINT32 * ChipRevision,
++ OUT gctUINT32 * ChipFeatures,
++ OUT gctUINT32 * ChipMinorFeatures,
++ OUT gctUINT32 * ChipMinorFeatures2
++ )
++{
++ gceSTATUS status;
++ gctUINT32 chipIdentity;
++
++ do
++ {
++ /* Read chip identity register. */
++ gcmkERR_BREAK(gckOS_ReadRegisterEx(Os, gcvCORE_VG, 0x00018, &chipIdentity));
++
++ /* Special case for older graphic cores. */
++ if (((((gctUINT32) (chipIdentity)) >> (0 ? 31:24) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1)))))) == (0x01 & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))))
++ {
++ *ChipModel = gcv500;
++ *ChipRevision = (((((gctUINT32) (chipIdentity)) >> (0 ? 15:12)) & ((gctUINT32) ((((1 ? 15:12) - (0 ? 15:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:12) - (0 ? 15:12) + 1)))))) );
++ }
++
++ else
++ {
++ /* Read chip identity register. */
++ gcmkERR_BREAK(gckOS_ReadRegisterEx(Os, gcvCORE_VG,
++ 0x00020,
++ (gctUINT32 *) ChipModel));
++
++ /* Read CHIP_REV register. */
++ gcmkERR_BREAK(gckOS_ReadRegisterEx(Os, gcvCORE_VG,
++ 0x00024,
++ ChipRevision));
++ }
++
++ /* Read chip feature register. */
++ gcmkERR_BREAK(gckOS_ReadRegisterEx(
++ Os, gcvCORE_VG, 0x0001C, ChipFeatures
++ ));
++
++ /* Read chip minor feature register. */
++ gcmkERR_BREAK(gckOS_ReadRegisterEx(
++ Os, gcvCORE_VG, 0x00034, ChipMinorFeatures
++ ));
++
++ /* Read chip minor feature register #2. */
++ gcmkERR_BREAK(gckOS_ReadRegisterEx(
++ Os, gcvCORE_VG, 0x00074, ChipMinorFeatures2
++ ));
++
++ gcmkTRACE(
++ gcvLEVEL_VERBOSE,
++ "ChipModel=0x%08X\n"
++ "ChipRevision=0x%08X\n"
++ "ChipFeatures=0x%08X\n"
++ "ChipMinorFeatures=0x%08X\n"
++ "ChipMinorFeatures2=0x%08X\n",
++ *ChipModel,
++ *ChipRevision,
++ *ChipFeatures,
++ *ChipMinorFeatures,
++ *ChipMinorFeatures2
++ );
++
++ /* Success. */
++ return gcvSTATUS_OK;
++ }
++ while (gcvFALSE);
++
++ /* Return the status. */
++ return status;
++}
++
++#if gcdPOWEROFF_TIMEOUT
++void
++_VGPowerTimerFunction(
++ gctPOINTER Data
++ )
++{
++ gckVGHARDWARE hardware = (gckVGHARDWARE)Data;
++ gcmkVERIFY_OK(
++ gckVGHARDWARE_SetPowerManagementState(hardware, gcvPOWER_OFF_TIMEOUT));
++}
++#endif
++
++/******************************************************************************\
++****************************** gckVGHARDWARE API code *****************************
++\******************************************************************************/
++
++/*******************************************************************************
++**
++** gckVGHARDWARE_Construct
++**
++** Construct a new gckVGHARDWARE object.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an initialized gckOS object.
++**
++** OUTPUT:
++**
++** gckVGHARDWARE * Hardware
++** Pointer to a variable that will hold the pointer to the gckVGHARDWARE
++** object.
++*/
++gceSTATUS
++gckVGHARDWARE_Construct(
++ IN gckOS Os,
++ OUT gckVGHARDWARE * Hardware
++ )
++{
++ gckVGHARDWARE hardware = gcvNULL;
++ gceSTATUS status;
++ gceCHIPMODEL chipModel;
++ gctUINT32 chipRevision;
++ gctUINT32 chipFeatures;
++ gctUINT32 chipMinorFeatures;
++ gctUINT32 chipMinorFeatures2;
++
++ gcmkHEADER_ARG("Os=0x%x Hardware=0x%x ", Os, Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Hardware != gcvNULL);
++
++ do
++ {
++ gcmkERR_BREAK(gckOS_SetGPUPower(Os, gcvCORE_VG, gcvTRUE, gcvTRUE));
++
++ status = _ResetGPU(Os);
++
++ if (status != gcvSTATUS_OK)
++ {
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "_ResetGPU failed: status=%d\n", status);
++ }
++
++ /* Identify the hardware. */
++ gcmkERR_BREAK(_IdentifyHardware(Os,
++ &chipModel, &chipRevision,
++ &chipFeatures, &chipMinorFeatures, &chipMinorFeatures2
++ ));
++
++ /* Allocate the gckVGHARDWARE object. */
++ gcmkERR_BREAK(gckOS_Allocate(Os,
++ gcmSIZEOF(struct _gckVGHARDWARE), (gctPOINTER *) &hardware
++ ));
++
++ /* Initialize the gckVGHARDWARE object. */
++ hardware->object.type = gcvOBJ_HARDWARE;
++ hardware->os = Os;
++
++ /* Set chip identity. */
++ hardware->chipModel = chipModel;
++ hardware->chipRevision = chipRevision;
++ hardware->chipFeatures = chipFeatures;
++ hardware->chipMinorFeatures = chipMinorFeatures;
++ hardware->chipMinorFeatures2 = chipMinorFeatures2;
++
++ hardware->powerMutex = gcvNULL;
++ hardware->chipPowerState = gcvPOWER_ON;
++ hardware->chipPowerStateGlobal = gcvPOWER_ON;
++ hardware->clockState = gcvTRUE;
++ hardware->powerState = gcvTRUE;
++
++#if gcdPOWEROFF_TIMEOUT
++ hardware->powerOffTime = 0;
++ hardware->powerOffTimeout = gcdPOWEROFF_TIMEOUT;
++
++ gcmkVERIFY_OK(gckOS_CreateTimer(Os,
++ _VGPowerTimerFunction,
++ (gctPOINTER)hardware,
++ &hardware->powerOffTimer));
++#endif
++
++ /* Determine whether FE 2.0 is present. */
++ hardware->fe20 = ((((gctUINT32) (hardware->chipFeatures)) >> (0 ? 28:28) & ((gctUINT32) ((((1 ? 28:28) - (0 ? 28:28) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 28:28) - (0 ? 28:28) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 28:28) - (0 ? 28:28) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 28:28) - (0 ? 28:28) + 1)))))));
++
++ /* Determine whether VG 2.0 is present. */
++ hardware->vg20 = ((((gctUINT32) (hardware->chipMinorFeatures)) >> (0 ? 13:13) & ((gctUINT32) ((((1 ? 13:13) - (0 ? 13:13) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 13:13) - (0 ? 13:13) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 13:13) - (0 ? 13:13) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 13:13) - (0 ? 13:13) + 1)))))));
++
++ /* Determine whether VG 2.1 is present. */
++ hardware->vg21 = ((((gctUINT32) (hardware->chipMinorFeatures)) >> (0 ? 18:18) & ((gctUINT32) ((((1 ? 18:18) - (0 ? 18:18) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 18:18) - (0 ? 18:18) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 18:18) - (0 ? 18:18) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 18:18) - (0 ? 18:18) + 1)))))));
++
++ /* Set default event mask. */
++ hardware->eventMask = 0xFFFFFFFF;
++
++ gcmkERR_BREAK(gckOS_AtomConstruct(Os, &hardware->pageTableDirty));
++
++ /* Set fast clear to auto. */
++ gcmkVERIFY_OK(gckVGHARDWARE_SetFastClear(hardware, -1));
++
++ gcmkERR_BREAK(gckOS_CreateMutex(Os, &hardware->powerMutex));
++
++ /* Enable power management by default. */
++ hardware->powerManagement = gcvTRUE;
++
++ /* Return pointer to the gckVGHARDWARE object. */
++ *Hardware = hardware;
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++ }
++ while (gcvFALSE);
++
++#if gcdPOWEROFF_TIMEOUT
++ if (hardware->powerOffTimer != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_StopTimer(Os, hardware->powerOffTimer));
++ gcmkVERIFY_OK(gckOS_DestroyTimer(Os, hardware->powerOffTimer));
++ }
++#endif
++
++ gcmkVERIFY_OK(gckOS_SetGPUPower(Os, gcvCORE_VG, gcvFALSE, gcvFALSE));
++
++ if (hardware != gcvNULL && hardware->pageTableDirty != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_AtomDestroy(Os, hardware->pageTableDirty));
++ }
++
++ if (hardware != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_Free(Os, hardware));
++ }
++
++ gcmkFOOTER();
++ /* Return the status. */
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckVGHARDWARE_Destroy
++**
++** Destroy an gckVGHARDWARE object.
++**
++** INPUT:
++**
++** gckVGHARDWARE Hardware
++** Pointer to the gckVGHARDWARE object that needs to be destroyed.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckVGHARDWARE_Destroy(
++ IN gckVGHARDWARE Hardware
++ )
++{
++ gceSTATUS status;
++ gcmkHEADER_ARG("Hardware=0x%x ", Hardware);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ /* Mark the object as unknown. */
++ Hardware->object.type = gcvOBJ_UNKNOWN;
++
++ if (Hardware->powerMutex != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_DeleteMutex(
++ Hardware->os, Hardware->powerMutex));
++ }
++
++#if gcdPOWEROFF_TIMEOUT
++ gcmkVERIFY_OK(gckOS_StopTimer(Hardware->os, Hardware->powerOffTimer));
++ gcmkVERIFY_OK(gckOS_DestroyTimer(Hardware->os, Hardware->powerOffTimer));
++#endif
++
++ if (Hardware->pageTableDirty != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_AtomDestroy(Hardware->os, Hardware->pageTableDirty));
++ }
++
++ /* Free the object. */
++ status = gckOS_Free(Hardware->os, Hardware);
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckVGHARDWARE_QueryMemory
++**
++** Query the amount of memory available on the hardware.
++**
++** INPUT:
++**
++** gckVGHARDWARE Hardware
++** Pointer to the gckVGHARDWARE object.
++**
++** OUTPUT:
++**
++** gctSIZE_T * InternalSize
++** Pointer to a variable that will hold the size of the internal video
++** memory in bytes. If 'InternalSize' is gcvNULL, no information of the
++** internal memory will be returned.
++**
++** gctUINT32 * InternalBaseAddress
++** Pointer to a variable that will hold the hardware's base address for
++** the internal video memory. This pointer cannot be gcvNULL if
++** 'InternalSize' is also non-gcvNULL.
++**
++** gctUINT32 * InternalAlignment
++** Pointer to a variable that will hold the hardware's base address for
++** the internal video memory. This pointer cannot be gcvNULL if
++** 'InternalSize' is also non-gcvNULL.
++**
++** gctSIZE_T * ExternalSize
++** Pointer to a variable that will hold the size of the external video
++** memory in bytes. If 'ExternalSize' is gcvNULL, no information of the
++** external memory will be returned.
++**
++** gctUINT32 * ExternalBaseAddress
++** Pointer to a variable that will hold the hardware's base address for
++** the external video memory. This pointer cannot be gcvNULL if
++** 'ExternalSize' is also non-gcvNULL.
++**
++** gctUINT32 * ExternalAlignment
++** Pointer to a variable that will hold the hardware's base address for
++** the external video memory. This pointer cannot be gcvNULL if
++** 'ExternalSize' is also non-gcvNULL.
++**
++** gctUINT32 * HorizontalTileSize
++** Number of horizontal pixels per tile. If 'HorizontalTileSize' is
++** gcvNULL, no horizontal pixel per tile will be returned.
++**
++** gctUINT32 * VerticalTileSize
++** Number of vertical pixels per tile. If 'VerticalTileSize' is
++** gcvNULL, no vertical pixel per tile will be returned.
++*/
++gceSTATUS
++gckVGHARDWARE_QueryMemory(
++ IN gckVGHARDWARE Hardware,
++ OUT gctSIZE_T * InternalSize,
++ OUT gctUINT32 * InternalBaseAddress,
++ OUT gctUINT32 * InternalAlignment,
++ OUT gctSIZE_T * ExternalSize,
++ OUT gctUINT32 * ExternalBaseAddress,
++ OUT gctUINT32 * ExternalAlignment,
++ OUT gctUINT32 * HorizontalTileSize,
++ OUT gctUINT32 * VerticalTileSize
++ )
++{
++ gcmkHEADER_ARG("Hardware=0x%x InternalSize=0x%x InternalBaseAddress=0x%x InternalAlignment=0x%x"
++ "ExternalSize=0x%x ExternalBaseAddress=0x%x ExternalAlignment=0x%x HorizontalTileSize=0x%x VerticalTileSize=0x%x",
++ Hardware, InternalSize, InternalBaseAddress, InternalAlignment,
++ ExternalSize, ExternalBaseAddress, ExternalAlignment, HorizontalTileSize, VerticalTileSize);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ if (InternalSize != gcvNULL)
++ {
++ /* No internal memory. */
++ *InternalSize = 0;
++ }
++
++ if (ExternalSize != gcvNULL)
++ {
++ /* No external memory. */
++ *ExternalSize = 0;
++ }
++
++ if (HorizontalTileSize != gcvNULL)
++ {
++ /* 4x4 tiles. */
++ *HorizontalTileSize = 4;
++ }
++
++ if (VerticalTileSize != gcvNULL)
++ {
++ /* 4x4 tiles. */
++ *VerticalTileSize = 4;
++ }
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckVGHARDWARE_QueryChipIdentity
++**
++** Query the identity of the hardware.
++**
++** INPUT:
++**
++** gckVGHARDWARE Hardware
++** Pointer to the gckVGHARDWARE object.
++**
++** OUTPUT:
++**
++** gceCHIPMODEL * ChipModel
++** If 'ChipModel' is not gcvNULL, the variable it points to will
++** receive the model of the chip.
++**
++** gctUINT32 * ChipRevision
++** If 'ChipRevision' is not gcvNULL, the variable it points to will
++** receive the revision of the chip.
++**
++** gctUINT32 * ChipFeatures
++** If 'ChipFeatures' is not gcvNULL, the variable it points to will
++** receive the feature set of the chip.
++**
++** gctUINT32 * ChipMinorFeatures
++** If 'ChipMinorFeatures' is not gcvNULL, the variable it points to
++** will receive the minor feature set of the chip.
++**
++** gctUINT32 * ChipMinorFeatures2
++** If 'ChipMinorFeatures2' is not gcvNULL, the variable it points to
++** will receive the minor feature set of the chip.
++**
++*/
++gceSTATUS
++gckVGHARDWARE_QueryChipIdentity(
++ IN gckVGHARDWARE Hardware,
++ OUT gceCHIPMODEL * ChipModel,
++ OUT gctUINT32 * ChipRevision,
++ OUT gctUINT32* ChipFeatures,
++ OUT gctUINT32* ChipMinorFeatures,
++ OUT gctUINT32* ChipMinorFeatures2
++ )
++{
++ gcmkHEADER_ARG("Hardware=0x%x ChipModel=0x%x ChipRevision=0x%x ChipFeatures = 0x%x ChipMinorFeatures = 0x%x ChipMinorFeatures2 = 0x%x",
++ Hardware, ChipModel, ChipRevision, ChipFeatures, ChipMinorFeatures, ChipMinorFeatures2);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ /* Return chip model. */
++ if (ChipModel != gcvNULL)
++ {
++ *ChipModel = Hardware->chipModel;
++ }
++
++ /* Return revision number. */
++ if (ChipRevision != gcvNULL)
++ {
++ *ChipRevision = Hardware->chipRevision;
++ }
++
++ /* Return feature set. */
++ if (ChipFeatures != gcvNULL)
++ {
++ gctUINT32 features = Hardware->chipFeatures;
++
++ if ((((((gctUINT32) (features)) >> (0 ? 0:0)) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1)))))) ))
++ {
++ features = ((((gctUINT32) (features)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) ((gctUINT32) (Hardware->allowFastClear) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)));
++ }
++
++ /* Mark 2D pipe as available for GC500.0 since it did not have this *\
++ \* bit. */
++ if ((Hardware->chipModel == gcv500)
++ && (Hardware->chipRevision == 0)
++ )
++ {
++ features = ((((gctUINT32) (features)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9)));
++ }
++
++ /* Mark 2D pipe as available for GC300 since it did not have this *\
++ \* bit. */
++ if (Hardware->chipModel == gcv300)
++ {
++ features = ((((gctUINT32) (features)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9)));
++ }
++
++ *ChipFeatures = features;
++ }
++
++ /* Return minor feature set. */
++ if (ChipMinorFeatures != gcvNULL)
++ {
++ *ChipMinorFeatures = Hardware->chipMinorFeatures;
++ }
++
++ /* Return minor feature set #2. */
++ if (ChipMinorFeatures2 != gcvNULL)
++ {
++ *ChipMinorFeatures2 = Hardware->chipMinorFeatures2;
++ }
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckVGHARDWARE_ConvertFormat
++**
++** Convert an API format to hardware parameters.
++**
++** INPUT:
++**
++** gckVGHARDWARE Hardware
++** Pointer to the gckVGHARDWARE object.
++**
++** gceSURF_FORMAT Format
++** API format to convert.
++**
++** OUTPUT:
++**
++** gctUINT32 * BitsPerPixel
++** Pointer to a variable that will hold the number of bits per pixel.
++**
++** gctUINT32 * BytesPerTile
++** Pointer to a variable that will hold the number of bytes per tile.
++*/
++gceSTATUS
++gckVGHARDWARE_ConvertFormat(
++ IN gckVGHARDWARE Hardware,
++ IN gceSURF_FORMAT Format,
++ OUT gctUINT32 * BitsPerPixel,
++ OUT gctUINT32 * BytesPerTile
++ )
++{
++ gctUINT32 bitsPerPixel;
++ gctUINT32 bytesPerTile;
++
++ gcmkHEADER_ARG("Hardware=0x%x Format=0x%x BitsPerPixel=0x%x BytesPerTile = 0x%x",
++ Hardware, Format, BitsPerPixel, BytesPerTile);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ /* Dispatch on format. */
++ switch (Format)
++ {
++ case gcvSURF_A1:
++ case gcvSURF_L1:
++ /* 1-bpp format. */
++ bitsPerPixel = 1;
++ bytesPerTile = (1 * 4 * 4) / 8;
++ break;
++
++ case gcvSURF_A4:
++ /* 4-bpp format. */
++ bitsPerPixel = 4;
++ bytesPerTile = (4 * 4 * 4) / 8;
++ break;
++
++ case gcvSURF_INDEX8:
++ case gcvSURF_A8:
++ case gcvSURF_L8:
++ /* 8-bpp format. */
++ bitsPerPixel = 8;
++ bytesPerTile = (8 * 4 * 4) / 8;
++ break;
++
++ case gcvSURF_YV12:
++ /* 12-bpp planar YUV formats. */
++ bitsPerPixel = 12;
++ bytesPerTile = (12 * 4 * 4) / 8;
++ break;
++
++ case gcvSURF_NV12:
++ /* 12-bpp planar YUV formats. */
++ bitsPerPixel = 12;
++ bytesPerTile = (12 * 4 * 4) / 8;
++ break;
++
++ /* 4444 variations. */
++ case gcvSURF_X4R4G4B4:
++ case gcvSURF_A4R4G4B4:
++ case gcvSURF_R4G4B4X4:
++ case gcvSURF_R4G4B4A4:
++ case gcvSURF_B4G4R4X4:
++ case gcvSURF_B4G4R4A4:
++ case gcvSURF_X4B4G4R4:
++ case gcvSURF_A4B4G4R4:
++
++ /* 1555 variations. */
++ case gcvSURF_X1R5G5B5:
++ case gcvSURF_A1R5G5B5:
++ case gcvSURF_R5G5B5X1:
++ case gcvSURF_R5G5B5A1:
++ case gcvSURF_X1B5G5R5:
++ case gcvSURF_A1B5G5R5:
++ case gcvSURF_B5G5R5X1:
++ case gcvSURF_B5G5R5A1:
++
++ /* 565 variations. */
++ case gcvSURF_R5G6B5:
++ case gcvSURF_B5G6R5:
++
++ case gcvSURF_A8L8:
++ case gcvSURF_YUY2:
++ case gcvSURF_UYVY:
++ case gcvSURF_D16:
++ /* 16-bpp format. */
++ bitsPerPixel = 16;
++ bytesPerTile = (16 * 4 * 4) / 8;
++ break;
++
++ case gcvSURF_X8R8G8B8:
++ case gcvSURF_A8R8G8B8:
++ case gcvSURF_X8B8G8R8:
++ case gcvSURF_A8B8G8R8:
++ case gcvSURF_R8G8B8X8:
++ case gcvSURF_R8G8B8A8:
++ case gcvSURF_B8G8R8X8:
++ case gcvSURF_B8G8R8A8:
++ case gcvSURF_D32:
++ /* 32-bpp format. */
++ bitsPerPixel = 32;
++ bytesPerTile = (32 * 4 * 4) / 8;
++ break;
++
++ case gcvSURF_D24S8:
++ /* 24-bpp format. */
++ bitsPerPixel = 32;
++ bytesPerTile = (32 * 4 * 4) / 8;
++ break;
++
++ case gcvSURF_DXT1:
++ case gcvSURF_ETC1:
++ bitsPerPixel = 4;
++ bytesPerTile = (4 * 4 * 4) / 8;
++ break;
++
++ case gcvSURF_DXT2:
++ case gcvSURF_DXT3:
++ case gcvSURF_DXT4:
++ case gcvSURF_DXT5:
++ bitsPerPixel = 8;
++ bytesPerTile = (8 * 4 * 4) / 8;
++ break;
++
++ default:
++ /* Invalid format. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_INVALID_ARGUMENT;
++ }
++
++ /* Set the result. */
++ if (BitsPerPixel != gcvNULL)
++ {
++ * BitsPerPixel = bitsPerPixel;
++ }
++
++ if (BytesPerTile != gcvNULL)
++ {
++ * BytesPerTile = bytesPerTile;
++ }
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckVGHARDWARE_SplitMemory
++**
++** Split a hardware specific memory address into a pool and offset.
++**
++** INPUT:
++**
++** gckVGHARDWARE Hardware
++** Pointer to the gckVGHARDWARE object.
++**
++** gctUINT32 Address
++** Address in hardware specific format.
++**
++** OUTPUT:
++**
++** gcePOOL * Pool
++** Pointer to a variable that will hold the pool type for the address.
++**
++** gctUINT32 * Offset
++** Pointer to a variable that will hold the offset for the address.
++*/
++gceSTATUS
++gckVGHARDWARE_SplitMemory(
++ IN gckVGHARDWARE Hardware,
++ IN gctUINT32 Address,
++ OUT gcePOOL * Pool,
++ OUT gctUINT32 * Offset
++ )
++{
++ gcmkHEADER_ARG("Hardware=0x%x Address=0x%x Pool=0x%x Offset = 0x%x",
++ Hardware, Address, Pool, Offset);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(Pool != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Offset != gcvNULL);
++
++ /* Dispatch on memory type. */
++ switch ((((((gctUINT32) (Address)) >> (0 ? 1:0)) & ((gctUINT32) ((((1 ? 1:0) - (0 ? 1:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:0) - (0 ? 1:0) + 1)))))) ))
++ {
++ case 0x0:
++ /* System memory. */
++ *Pool = gcvPOOL_SYSTEM;
++ break;
++
++ case 0x2:
++ /* Virtual memory. */
++ *Pool = gcvPOOL_VIRTUAL;
++ break;
++
++ default:
++ /* Invalid memory type. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_INVALID_ARGUMENT;
++ }
++
++ /* Return offset of address. */
++ *Offset = ((((gctUINT32) (Address)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:0) - (0 ? 1:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:0) - (0 ? 1:0) + 1))))))) << (0 ? 1:0))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 1:0) - (0 ? 1:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:0) - (0 ? 1:0) + 1))))))) << (0 ? 1:0)));
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckVGHARDWARE_Execute
++**
++** Kickstart the hardware's command processor with an initialized command
++** buffer.
++**
++** INPUT:
++**
++** gckVGHARDWARE Hardware
++** Pointer to the gckVGHARDWARE object.
++**
++** gctUINT32 Address
++** Address of the command buffer.
++**
++** gctSIZE_T Count
++** Number of command-sized data units to be executed.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckVGHARDWARE_Execute(
++ IN gckVGHARDWARE Hardware,
++ IN gctUINT32 Address,
++ IN gctUINT32 Count
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Hardware=0x%x Address=0x%x Count=0x%x",
++ Hardware, Address, Count);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ do
++ {
++ /* Enable all events. */
++ gcmkERR_BREAK(gckOS_WriteRegisterEx(
++ Hardware->os,
++ gcvCORE_VG,
++ 0x00014,
++ Hardware->eventMask
++ ));
++
++ if (Hardware->fe20)
++ {
++ /* Write address register. */
++ gcmkERR_BREAK(gckOS_WriteRegisterEx(
++ Hardware->os,
++ gcvCORE_VG,
++ 0x00500,
++ gcmkFIXADDRESS(Address)
++ ));
++
++ /* Write control register. */
++ gcmkERR_BREAK(gckOS_WriteRegisterEx(
++ Hardware->os,
++ gcvCORE_VG,
++ 0x00504,
++ Count
++ ));
++ }
++ else
++ {
++ /* Write address register. */
++ gcmkERR_BREAK(gckOS_WriteRegisterEx(
++ Hardware->os,
++ gcvCORE_VG,
++ 0x00654,
++ gcmkFIXADDRESS(Address)
++ ));
++
++ /* Write control register. */
++ gcmkERR_BREAK(gckOS_WriteRegisterEx(
++ Hardware->os,
++ gcvCORE_VG,
++ 0x00658,
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 16:16) - (0 ? 16:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 16:16) - (0 ? 16:16) + 1))))))) << (0 ? 16:16))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 16:16) - (0 ? 16:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 16:16) - (0 ? 16:16) + 1))))))) << (0 ? 16:16))) |
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (Count) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ ));
++ }
++
++ /* Success. */
++ gcmkFOOTER();
++ return gcvSTATUS_OK;
++ }
++ while (gcvFALSE);
++
++
++ gcmkFOOTER();
++ /* Return the status. */
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckVGHARDWARE_AlignToTile
++**
++** Align the specified width and height to tile boundaries.
++**
++** INPUT:
++**
++** gckVGHARDWARE Hardware
++** Pointer to an gckVGHARDWARE object.
++**
++** gceSURF_TYPE Type
++** Type of alignment.
++**
++** gctUINT32 * Width
++** Pointer to the width to be aligned. If 'Width' is gcvNULL, no width
++** will be aligned.
++**
++** gctUINT32 * Height
++** Pointer to the height to be aligned. If 'Height' is gcvNULL, no height
++** will be aligned.
++**
++** OUTPUT:
++**
++** gctUINT32 * Width
++** Pointer to a variable that will receive the aligned width.
++**
++** gctUINT32 * Height
++** Pointer to a variable that will receive the aligned height.
++*/
++gceSTATUS
++gckVGHARDWARE_AlignToTile(
++ IN gckVGHARDWARE Hardware,
++ IN gceSURF_TYPE Type,
++ IN OUT gctUINT32 * Width,
++ IN OUT gctUINT32 * Height
++ )
++{
++ gcmkHEADER_ARG("Hardware=0x%x Type=0x%x Width=0x%x Height=0x%x",
++ Hardware, Type, Width, Height);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ if (Width != gcvNULL)
++ {
++ /* Align the width. */
++ *Width = gcmALIGN(*Width, (Type == gcvSURF_TEXTURE) ? 4 : 16);
++ }
++
++ if (Height != gcvNULL)
++ {
++ /* Special case for VG images. */
++ if ((*Height == 0) && (Type == gcvSURF_IMAGE))
++ {
++ *Height = 4;
++ }
++ else
++ {
++ /* Align the height. */
++ *Height = gcmALIGN(*Height, 4);
++ }
++ }
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckVGHARDWARE_ConvertLogical
++**
++** Convert a logical system address into a hardware specific address.
++**
++** INPUT:
++**
++** gckVGHARDWARE Hardware
++** Pointer to an gckVGHARDWARE object.
++**
++** gctPOINTER Logical
++** Logical address to convert.
++**
++** gctBOOL InUserSpace
++** gcvTRUE if the memory in user space.
++**
++** gctUINT32* Address
++** Return hardware specific address.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckVGHARDWARE_ConvertLogical(
++ IN gckVGHARDWARE Hardware,
++ IN gctPOINTER Logical,
++ IN gctBOOL InUserSpace,
++ OUT gctUINT32 * Address
++ )
++{
++ gctUINT32 address;
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Hardware=0x%x Logical=0x%x InUserSpace=%d Address=0x%x",
++ Hardware, Logical, InUserSpace, Address);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Address != gcvNULL);
++
++ do
++ {
++ /* Convert logical address into a physical address. */
++ if (InUserSpace)
++ {
++ gcmkERR_BREAK(gckOS_UserLogicalToPhysical(
++ Hardware->os, Logical, &address
++ ));
++ }
++ else
++ {
++ gcmkERR_BREAK(gckOS_GetPhysicalAddress(
++ Hardware->os, Logical, &address
++ ));
++ }
++
++ /* Return hardware specific address. */
++ *Address = ((((gctUINT32) (address)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:0) - (0 ? 1:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:0) - (0 ? 1:0) + 1))))))) << (0 ? 1:0))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? 1:0) - (0 ? 1:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:0) - (0 ? 1:0) + 1))))))) << (0 ? 1:0)));
++
++ /* Success. */
++ gcmkFOOTER();
++ return gcvSTATUS_OK;
++ }
++ while (gcvFALSE);
++
++ gcmkFOOTER();
++ /* Return the status. */
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckVGHARDWARE_QuerySystemMemory
++**
++** Query the command buffer alignment and number of reserved bytes.
++**
++** INPUT:
++**
++** gckVGHARDWARE Harwdare
++** Pointer to an gckVGHARDWARE object.
++**
++** OUTPUT:
++**
++** gctSIZE_T * SystemSize
++** Pointer to a variable that receives the maximum size of the system
++** memory.
++**
++** gctUINT32 * SystemBaseAddress
++** Poinetr to a variable that receives the base address for system
++** memory.
++*/
++gceSTATUS gckVGHARDWARE_QuerySystemMemory(
++ IN gckVGHARDWARE Hardware,
++ OUT gctSIZE_T * SystemSize,
++ OUT gctUINT32 * SystemBaseAddress
++ )
++{
++ gcmkHEADER_ARG("Hardware=0x%x SystemSize=0x%x SystemBaseAddress=0x%x",
++ Hardware, SystemSize, SystemBaseAddress);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ if (SystemSize != gcvNULL)
++ {
++ /* Maximum system memory can be 2GB. */
++ *SystemSize = (gctSIZE_T)(1 << 31);
++ }
++
++ if (SystemBaseAddress != gcvNULL)
++ {
++ /* Set system memory base address. */
++ *SystemBaseAddress = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:0) - (0 ? 1:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:0) - (0 ? 1:0) + 1))))))) << (0 ? 1:0))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? 1:0) - (0 ? 1:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:0) - (0 ? 1:0) + 1))))))) << (0 ? 1:0)));
++ }
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckVGHARDWARE_SetMMU
++**
++** Set the page table base address.
++**
++** INPUT:
++**
++** gckVGHARDWARE Harwdare
++** Pointer to an gckVGHARDWARE object.
++**
++** gctPOINTER Logical
++** Logical address of the page table.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS gckVGHARDWARE_SetMMU(
++ IN gckVGHARDWARE Hardware,
++ IN gctPOINTER Logical
++ )
++{
++ gceSTATUS status;
++ gctUINT32 address = 0;
++
++ gcmkHEADER_ARG("Hardware=0x%x Logical=0x%x",
++ Hardware, Logical);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++
++ do
++ {
++ /* Convert the logical address into an hardware address. */
++ gcmkERR_BREAK(gckVGHARDWARE_ConvertLogical(Hardware, Logical,
++ gcvFALSE, &address));
++
++ /* Write the AQMemoryFePageTable register. */
++ gcmkERR_BREAK(gckOS_WriteRegisterEx(Hardware->os, gcvCORE_VG,
++ 0x00400,
++ gcmkFIXADDRESS(address)));
++
++ /* Write the AQMemoryTxPageTable register. */
++ gcmkERR_BREAK(gckOS_WriteRegisterEx(Hardware->os, gcvCORE_VG,
++ 0x00404,
++ gcmkFIXADDRESS(address)));
++
++ /* Write the AQMemoryPePageTable register. */
++ gcmkERR_BREAK(gckOS_WriteRegisterEx(Hardware->os, gcvCORE_VG,
++ 0x00408,
++ gcmkFIXADDRESS(address)));
++
++ /* Write the AQMemoryPezPageTable register. */
++ gcmkERR_BREAK(gckOS_WriteRegisterEx(Hardware->os, gcvCORE_VG,
++ 0x0040C,
++ gcmkFIXADDRESS(address)));
++
++ /* Write the AQMemoryRaPageTable register. */
++ gcmkERR_BREAK(gckOS_WriteRegisterEx(Hardware->os, gcvCORE_VG,
++ 0x00410,
++ gcmkFIXADDRESS(address)));
++ }
++ while (gcvFALSE);
++
++ gcmkFOOTER();
++ /* Return the status. */
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckVGHARDWARE_FlushMMU
++**
++** Flush the page table.
++**
++** INPUT:
++**
++** gckVGHARDWARE Harwdare
++** Pointer to an gckVGHARDWARE object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS gckVGHARDWARE_FlushMMU(
++ IN gckVGHARDWARE Hardware
++ )
++{
++ gceSTATUS status;
++ gckVGCOMMAND command;
++
++ gcmkHEADER_ARG("Hardware=0x%x ", Hardware);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ do
++ {
++ gcsCMDBUFFER_PTR commandBuffer;
++ gctUINT32_PTR buffer;
++
++ /* Create a shortcut to the command buffer object. */
++ command = Hardware->kernel->command;
++
++ /* Allocate command buffer space. */
++ gcmkERR_BREAK(gckVGCOMMAND_Allocate(
++ command, 8, &commandBuffer, (gctPOINTER *) &buffer
++ ));
++
++ buffer[0]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E04) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ buffer[1]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1))))))) << (0 ? 2:2))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1))))))) << (0 ? 2:2)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4)));
++ }
++ while(gcvFALSE);
++
++ gcmkFOOTER();
++ /* Return the status. */
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckVGHARDWARE_BuildVirtualAddress
++**
++** Build a virtual address.
++**
++** INPUT:
++**
++** gckVGHARDWARE Harwdare
++** Pointer to an gckVGHARDWARE object.
++**
++** gctUINT32 Index
++** Index into page table.
++**
++** gctUINT32 Offset
++** Offset into page.
++**
++** OUTPUT:
++**
++** gctUINT32 * Address
++** Pointer to a variable receiving te hardware address.
++*/
++gceSTATUS gckVGHARDWARE_BuildVirtualAddress(
++ IN gckVGHARDWARE Hardware,
++ IN gctUINT32 Index,
++ IN gctUINT32 Offset,
++ OUT gctUINT32 * Address
++ )
++{
++ gctUINT32 address;
++
++ gcmkHEADER_ARG("Hardware=0x%x Index=0x%x Offset=0x%x Address=0x%x",
++ Hardware, Index, Offset, Address);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(Address != gcvNULL);
++
++ /* Build virtual address. */
++ address = (Index << 12) | Offset;
++
++ /* Set virtual type. */
++ address = ((((gctUINT32) (address)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:0) - (0 ? 1:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:0) - (0 ? 1:0) + 1))))))) << (0 ? 1:0))) | (((gctUINT32) (0x2 & ((gctUINT32) ((((1 ? 1:0) - (0 ? 1:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:0) - (0 ? 1:0) + 1))))))) << (0 ? 1:0)));
++
++ /* Set the result. */
++ *Address = address;
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckVGHARDWARE_GetIdle(
++ IN gckVGHARDWARE Hardware,
++ OUT gctUINT32 * Data
++ )
++{
++ gceSTATUS status;
++ gcmkHEADER_ARG("Hardware=0x%x Data=0x%x", Hardware, Data);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(Data != gcvNULL);
++
++ /* Read register and return. */
++ status = gckOS_ReadRegisterEx(Hardware->os, gcvCORE_VG, 0x00004, Data);
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckVGHARDWARE_SetFastClear(
++ IN gckVGHARDWARE Hardware,
++ IN gctINT Enable
++ )
++{
++ gctUINT32 debug;
++ gceSTATUS status;
++
++ if (!(((((gctUINT32) (Hardware->chipFeatures)) >> (0 ? 0:0)) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1)))))) ))
++ {
++ return gcvSTATUS_OK;
++ }
++
++ do
++ {
++ if (Enable == -1)
++ {
++ Enable = (Hardware->chipModel > gcv500) ||
++ ((Hardware->chipModel == gcv500) && (Hardware->chipRevision >= 3));
++ }
++
++ gcmkERR_BREAK(gckOS_ReadRegisterEx(Hardware->os, gcvCORE_VG,
++ 0x00414,
++ &debug));
++
++ debug = ((((gctUINT32) (debug)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 20:20) - (0 ? 20:20) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 20:20) - (0 ? 20:20) + 1))))))) << (0 ? 20:20))) | (((gctUINT32) ((gctUINT32) (Enable == 0) & ((gctUINT32) ((((1 ? 20:20) - (0 ? 20:20) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 20:20) - (0 ? 20:20) + 1))))))) << (0 ? 20:20)));
++
++#ifdef AQ_MEMORY_DEBUG_DISABLE_Z_COMPRESSION
++ debug = ((((gctUINT32) (debug)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? AQ_MEMORY_DEBUG_DISABLE_Z_COMPRESSION) - (0 ? AQ_MEMORY_DEBUG_DISABLE_Z_COMPRESSION) + 1) == 32) ? ~0 : (~(~0 << ((1 ? AQ_MEMORY_DEBUG_DISABLE_Z_COMPRESSION) - (0 ? AQ_MEMORY_DEBUG_DISABLE_Z_COMPRESSION) + 1))))))) << (0 ? AQ_MEMORY_DEBUG_DISABLE_Z_COMPRESSION))) | (((gctUINT32) ((gctUINT32) (Enable == 0) & ((gctUINT32) ((((1 ? AQ_MEMORY_DEBUG_DISABLE_Z_COMPRESSION) - (0 ? AQ_MEMORY_DEBUG_DISABLE_Z_COMPRESSION) + 1) == 32) ? ~0 : (~(~0 << ((1 ? AQ_MEMORY_DEBUG_DISABLE_Z_COMPRESSION) - (0 ? AQ_MEMORY_DEBUG_DISABLE_Z_COMPRESSION) + 1))))))) << (0 ? AQ_MEMORY_DEBUG_DISABLE_Z_COMPRESSION)));
++#endif
++
++ gcmkERR_BREAK(gckOS_WriteRegisterEx(Hardware->os, gcvCORE_VG,
++ 0x00414,
++ debug));
++
++ Hardware->allowFastClear = Enable;
++
++ status = gcvFALSE;
++ }
++ while (gcvFALSE);
++
++ return status;
++}
++
++gceSTATUS
++gckVGHARDWARE_ReadInterrupt(
++ IN gckVGHARDWARE Hardware,
++ OUT gctUINT32_PTR IDs
++ )
++{
++ gceSTATUS status;
++ gcmkHEADER_ARG("Hardware=0x%x IDs=0x%x", Hardware, IDs);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(IDs != gcvNULL);
++
++ /* Read AQIntrAcknowledge register. */
++ status = gckOS_ReadRegisterEx(Hardware->os, gcvCORE_VG,
++ 0x00010,
++ IDs);
++ gcmkFOOTER();
++ return status;
++}
++
++static gceSTATUS _CommandStall(
++ gckVGHARDWARE Hardware)
++{
++ gceSTATUS status;
++ gckVGCOMMAND command;
++
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ do
++ {
++ gctUINT32_PTR buffer;
++ command = Hardware->kernel->command;
++
++ /* Allocate command buffer space. */
++ gcmkERR_BREAK(gckVGCOMMAND_Allocate(
++ command, 8, &command->powerStallBuffer,
++ (gctPOINTER *) &buffer
++ ));
++
++ gcmkERR_BREAK(gckVGCOMMAND_EventCommand(
++ command, buffer, gcvBLOCK_PIXEL,
++ command->powerStallInt, gcvNULL));
++
++ gcmkERR_BREAK(gckVGCOMMAND_Execute(
++ command,
++ command->powerStallBuffer
++ ));
++
++ /* Wait the signal. */
++ gcmkERR_BREAK(gckOS_WaitSignal(
++ command->os,
++ command->powerStallSignal,
++ command->kernel->kernel->timeOut));
++
++
++ }
++ while(gcvFALSE);
++
++ gcmkFOOTER();
++ /* Return the status. */
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_SetPowerManagementState
++**
++** Set GPU to a specified power state.
++**
++** INPUT:
++**
++** gckHARDWARE Harwdare
++** Pointer to an gckHARDWARE object.
++**
++** gceCHIPPOWERSTATE State
++** Power State.
++**
++*/
++gceSTATUS
++gckVGHARDWARE_SetPowerManagementState(
++ IN gckVGHARDWARE Hardware,
++ IN gceCHIPPOWERSTATE State
++ )
++{
++ gceSTATUS status;
++ gckVGCOMMAND command = gcvNULL;
++ gckOS os;
++ gctUINT flag/*, clock*/;
++
++ gctBOOL acquired = gcvFALSE;
++ gctBOOL stall = gcvTRUE;
++ gctBOOL commitMutex = gcvFALSE;
++ gctBOOL mutexAcquired = gcvFALSE;
++
++#if gcdPOWEROFF_TIMEOUT
++ gctBOOL timeout = gcvFALSE;
++ gctBOOL isAfter = gcvFALSE;
++ gctUINT32 currentTime;
++#endif
++
++ gctBOOL broadcast = gcvFALSE;
++ gctUINT32 process, thread;
++ gctBOOL global = gcvFALSE;
++
++#if gcdENABLE_PROFILING
++ gctUINT64 time, freq, mutexTime, onTime, stallTime, stopTime, delayTime,
++ initTime, offTime, startTime, totalTime;
++#endif
++
++ /* State transition flags. */
++ static const gctUINT flags[4][4] =
++ {
++ /* gcvPOWER_ON */
++ { /* ON */ 0,
++ /* OFF */ gcvPOWER_FLAG_ACQUIRE |
++ gcvPOWER_FLAG_STALL |
++ gcvPOWER_FLAG_STOP |
++ gcvPOWER_FLAG_POWER_OFF |
++ gcvPOWER_FLAG_CLOCK_OFF,
++ /* IDLE */ gcvPOWER_FLAG_NOP,
++ /* SUSPEND */ gcvPOWER_FLAG_ACQUIRE |
++ gcvPOWER_FLAG_STALL |
++ gcvPOWER_FLAG_STOP |
++ gcvPOWER_FLAG_CLOCK_OFF,
++ },
++
++ /* gcvPOWER_OFF */
++ { /* ON */ gcvPOWER_FLAG_INITIALIZE |
++ gcvPOWER_FLAG_START |
++ gcvPOWER_FLAG_RELEASE |
++ gcvPOWER_FLAG_DELAY,
++ /* OFF */ 0,
++ /* IDLE */ gcvPOWER_FLAG_INITIALIZE |
++ gcvPOWER_FLAG_START |
++ gcvPOWER_FLAG_RELEASE |
++ gcvPOWER_FLAG_DELAY,
++ /* SUSPEND */ gcvPOWER_FLAG_INITIALIZE |
++ gcvPOWER_FLAG_CLOCK_OFF,
++ },
++
++ /* gcvPOWER_IDLE */
++ { /* ON */ gcvPOWER_FLAG_NOP,
++ /* OFF */ gcvPOWER_FLAG_ACQUIRE |
++ gcvPOWER_FLAG_STOP |
++ gcvPOWER_FLAG_POWER_OFF |
++ gcvPOWER_FLAG_CLOCK_OFF,
++ /* IDLE */ 0,
++ /* SUSPEND */ gcvPOWER_FLAG_ACQUIRE |
++ gcvPOWER_FLAG_STOP |
++ gcvPOWER_FLAG_CLOCK_OFF,
++ },
++
++ /* gcvPOWER_SUSPEND */
++ { /* ON */ gcvPOWER_FLAG_START |
++ gcvPOWER_FLAG_RELEASE |
++ gcvPOWER_FLAG_DELAY |
++ gcvPOWER_FLAG_CLOCK_ON,
++ /* OFF */ gcvPOWER_FLAG_SAVE |
++ gcvPOWER_FLAG_POWER_OFF |
++ gcvPOWER_FLAG_CLOCK_OFF,
++ /* IDLE */ gcvPOWER_FLAG_START |
++ gcvPOWER_FLAG_DELAY |
++ gcvPOWER_FLAG_RELEASE |
++ gcvPOWER_FLAG_CLOCK_ON,
++ /* SUSPEND */ 0,
++ },
++ };
++
++ gcmkHEADER_ARG("Hardware=0x%x State=%d", Hardware, State);
++#if gcmIS_DEBUG(gcdDEBUG_TRACE)
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Switching to power state %d",
++ State);
++#endif
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ /* Get the gckOS object pointer. */
++ os = Hardware->os;
++ gcmkVERIFY_OBJECT(os, gcvOBJ_OS);
++
++ /* Get the gckCOMMAND object pointer. */
++ gcmkVERIFY_OBJECT(Hardware->kernel, gcvOBJ_KERNEL);
++ command = Hardware->kernel->command;
++ gcmkVERIFY_OBJECT(command, gcvOBJ_COMMAND);
++
++ if (Hardware->powerManagement == gcvFALSE)
++ {
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++
++ /* Start profiler. */
++ gcmkPROFILE_INIT(freq, time);
++
++ /* Convert the broadcast power state. */
++ switch (State)
++ {
++ case gcvPOWER_SUSPEND_ATPOWERON:
++ /* Convert to SUSPEND and don't wait for STALL. */
++ State = gcvPOWER_SUSPEND;
++ stall = gcvFALSE;
++ break;
++
++ case gcvPOWER_OFF_ATPOWERON:
++ /* Convert to OFF and don't wait for STALL. */
++ State = gcvPOWER_OFF;
++ stall = gcvFALSE;
++ break;
++
++ case gcvPOWER_IDLE_BROADCAST:
++ /* Convert to IDLE and note we are inside broadcast. */
++ State = gcvPOWER_IDLE;
++ broadcast = gcvTRUE;
++ break;
++
++ case gcvPOWER_SUSPEND_BROADCAST:
++ /* Convert to SUSPEND and note we are inside broadcast. */
++ State = gcvPOWER_SUSPEND;
++ broadcast = gcvTRUE;
++ break;
++
++ case gcvPOWER_OFF_BROADCAST:
++ /* Convert to OFF and note we are inside broadcast. */
++ State = gcvPOWER_OFF;
++ broadcast = gcvTRUE;
++ break;
++
++ case gcvPOWER_OFF_RECOVERY:
++ /* Convert to OFF and note we are inside recovery. */
++ State = gcvPOWER_OFF;
++ stall = gcvFALSE;
++ broadcast = gcvTRUE;
++ break;
++
++ case gcvPOWER_ON_AUTO:
++ /* Convert to ON and note we are inside recovery. */
++ State = gcvPOWER_ON;
++ break;
++
++ case gcvPOWER_ON:
++ case gcvPOWER_IDLE:
++ case gcvPOWER_SUSPEND:
++ case gcvPOWER_OFF:
++ /* Mark as global power management. */
++ global = gcvTRUE;
++ break;
++
++#if gcdPOWEROFF_TIMEOUT
++ case gcvPOWER_OFF_TIMEOUT:
++ /* Convert to OFF and note we are inside broadcast. */
++ State = gcvPOWER_OFF;
++ broadcast = gcvTRUE;
++ /* Check time out */
++ timeout = gcvTRUE;
++ break;
++#endif
++
++ default:
++ break;
++ }
++
++ /* Get current process and thread IDs. */
++ gcmkONERROR(gckOS_GetProcessID(&process));
++ gcmkONERROR(gckOS_GetThreadID(&thread));
++
++ /* Acquire the power mutex. */
++ if (broadcast)
++ {
++ /* Try to acquire the power mutex. */
++ status = gckOS_AcquireMutex(os, Hardware->powerMutex, 0);
++
++ if (status == gcvSTATUS_TIMEOUT)
++ {
++ /* Check if we already own this mutex. */
++ if ((Hardware->powerProcess == process)
++ && (Hardware->powerThread == thread)
++ )
++ {
++ /* Bail out on recursive power management. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++ else if (State == gcvPOWER_IDLE)
++ {
++ /* gcvPOWER_IDLE_BROADCAST is from IST,
++ ** so waiting here will cause deadlock,
++ ** if lock holder call gckCOMMAND_Stall() */
++ gcmkONERROR(gcvSTATUS_INVALID_REQUEST);
++ }
++ else
++ {
++ /* Acquire the power mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(os,
++ Hardware->powerMutex,
++ gcvINFINITE));
++ }
++ }
++ }
++ else
++ {
++ /* Acquire the power mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(os, Hardware->powerMutex, gcvINFINITE));
++ }
++
++ /* Get time until mtuex acquired. */
++ gcmkPROFILE_QUERY(time, mutexTime);
++
++ Hardware->powerProcess = process;
++ Hardware->powerThread = thread;
++ mutexAcquired = gcvTRUE;
++
++ /* Grab control flags and clock. */
++ flag = flags[Hardware->chipPowerState][State];
++ /*clock = clocks[State];*/
++
++#if gcdPOWEROFF_TIMEOUT
++ if (timeout)
++ {
++ gcmkONERROR(gckOS_GetTicks(&currentTime));
++
++ gcmkONERROR(
++ gckOS_TicksAfter(Hardware->powerOffTime, currentTime, &isAfter));
++
++ /* powerOffTime is pushed forward, give up.*/
++ if (isAfter
++ /* Expect a transition start from IDLE. */
++ || (Hardware->chipPowerState == gcvPOWER_ON)
++ || (Hardware->chipPowerState == gcvPOWER_OFF)
++ )
++ {
++ /* Release the power mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(os, Hardware->powerMutex));
++
++ /* No need to do anything. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++ }
++#endif
++
++ if (flag == 0)
++ {
++ /* Release the power mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(os, Hardware->powerMutex));
++
++ /* No need to do anything. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++
++ /* internal power control */
++ if (!global)
++ {
++ if (Hardware->chipPowerStateGlobal == gcvPOWER_OFF)
++ {
++ /* Release the power mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(os, Hardware->powerMutex));
++
++ /* No need to do anything. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++ }
++ else
++ {
++ if (flag & gcvPOWER_FLAG_ACQUIRE)
++ {
++ /* Acquire the power management semaphore. */
++ gcmkONERROR(gckOS_AcquireSemaphore(os, command->powerSemaphore));
++ acquired = gcvTRUE;
++
++ /* avoid acquiring again. */
++ flag &= ~gcvPOWER_FLAG_ACQUIRE;
++ }
++ }
++
++ if (flag & (gcvPOWER_FLAG_INITIALIZE | gcvPOWER_FLAG_CLOCK_ON))
++ {
++ /* Turn on the power. */
++ gcmkONERROR(gckOS_SetGPUPower(os, gcvCORE_VG, gcvTRUE, gcvTRUE));
++
++ /* Mark clock and power as enabled. */
++ Hardware->clockState = gcvTRUE;
++ Hardware->powerState = gcvTRUE;
++ }
++
++ /* Get time until powered on. */
++ gcmkPROFILE_QUERY(time, onTime);
++
++ if ((flag & gcvPOWER_FLAG_STALL) && stall)
++ {
++ /* Acquire the mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(
++ command->os,
++ command->commitMutex,
++ gcvINFINITE
++ ));
++
++ commitMutex = gcvTRUE;
++
++ gcmkONERROR(_CommandStall(Hardware));
++ }
++
++ /* Get time until stalled. */
++ gcmkPROFILE_QUERY(time, stallTime);
++
++ if (flag & gcvPOWER_FLAG_ACQUIRE)
++ {
++ /* Acquire the power management semaphore. */
++ gcmkONERROR(gckOS_AcquireSemaphore(os, command->powerSemaphore));
++
++ acquired = gcvTRUE;
++ }
++
++
++ /* Get time until stopped. */
++ gcmkPROFILE_QUERY(time, stopTime);
++
++
++ if (flag & gcvPOWER_FLAG_DELAY)
++ {
++ /* Wait for the specified amount of time to settle coming back from
++ ** power-off or suspend state. */
++ gcmkONERROR(gckOS_Delay(os, gcdPOWER_CONTROL_DELAY));
++ }
++
++ /* Get time until delayed. */
++ gcmkPROFILE_QUERY(time, delayTime);
++
++ if (flag & gcvPOWER_FLAG_INITIALIZE)
++ {
++
++ /* Initialize GPU here, replaced by InitializeHardware later */
++ gcmkONERROR(gckVGHARDWARE_SetMMU(Hardware, Hardware->kernel->mmu->pageTableLogical));
++ gcmkVERIFY_OK(gckVGHARDWARE_SetFastClear(Hardware, -1));
++
++ /* Force the command queue to reload the next context. */
++ command->currentContext = 0;
++ }
++
++ /* Get time until initialized. */
++ gcmkPROFILE_QUERY(time, initTime);
++
++ if (flag & (gcvPOWER_FLAG_POWER_OFF | gcvPOWER_FLAG_CLOCK_OFF))
++ {
++ /* Turn off the GPU power. */
++ gcmkONERROR(
++ gckOS_SetGPUPower(os,
++ gcvCORE_VG,
++ (flag & gcvPOWER_FLAG_CLOCK_OFF) ? gcvFALSE
++ : gcvTRUE,
++ (flag & gcvPOWER_FLAG_POWER_OFF) ? gcvFALSE
++ : gcvTRUE));
++
++ /* Save current hardware power and clock states. */
++ Hardware->clockState = (flag & gcvPOWER_FLAG_CLOCK_OFF) ? gcvFALSE
++ : gcvTRUE;
++ Hardware->powerState = (flag & gcvPOWER_FLAG_POWER_OFF) ? gcvFALSE
++ : gcvTRUE;
++ }
++
++ /* Get time until off. */
++ gcmkPROFILE_QUERY(time, offTime);
++
++
++ /* Get time until started. */
++ gcmkPROFILE_QUERY(time, startTime);
++
++ if (flag & gcvPOWER_FLAG_RELEASE)
++ {
++ /* Release the power management semaphore. */
++ gcmkONERROR(gckOS_ReleaseSemaphore(os, command->powerSemaphore));
++ acquired = gcvFALSE;
++ }
++
++ /* Save the new power state. */
++ Hardware->chipPowerState = State;
++
++ if (global)
++ {
++ /* Save the new power state. */
++ Hardware->chipPowerStateGlobal = State;
++ }
++
++ if (commitMutex)
++ {
++ /* Acquire the mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(
++ command->os,
++ command->commitMutex
++ ));
++ }
++
++#if gcdPOWEROFF_TIMEOUT
++ /* Reset power off time */
++ gcmkONERROR(gckOS_GetTicks(&currentTime));
++
++ Hardware->powerOffTime = currentTime + Hardware->powerOffTimeout;
++
++ if (State == gcvPOWER_IDLE)
++ {
++ /* Start a timer to power off GPU when GPU enters IDLE or SUSPEND. */
++ gcmkVERIFY_OK(gckOS_StartTimer(os,
++ Hardware->powerOffTimer,
++ Hardware->powerOffTimeout));
++ }
++ else
++ {
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE, "Cancel powerOfftimer");
++
++ /* Cancel running timer when GPU enters ON or OFF. */
++ gcmkVERIFY_OK(gckOS_StopTimer(os, Hardware->powerOffTimer));
++ }
++#endif
++
++ /* Release the power mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(os, Hardware->powerMutex));
++
++ /* Get total time. */
++ gcmkPROFILE_QUERY(time, totalTime);
++#if gcdENABLE_PROFILING
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "PROF(%llu): mutex:%llu on:%llu stall:%llu stop:%llu",
++ freq, mutexTime, onTime, stallTime, stopTime);
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ " delay:%llu init:%llu off:%llu start:%llu total:%llu",
++ delayTime, initTime, offTime, startTime, totalTime);
++#endif
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++
++ if (acquired)
++ {
++ /* Release semaphore. */
++ gcmkVERIFY_OK(gckOS_ReleaseSemaphore(Hardware->os,
++ command->powerSemaphore));
++ }
++
++ if (mutexAcquired)
++ {
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Hardware->os, Hardware->powerMutex));
++ }
++
++ if (commitMutex)
++ {
++ /* Acquire the mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(
++ command->os,
++ command->commitMutex
++ ));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_QueryPowerManagementState
++**
++** Get GPU power state.
++**
++** INPUT:
++**
++** gckHARDWARE Harwdare
++** Pointer to an gckHARDWARE object.
++**
++** gceCHIPPOWERSTATE* State
++** Power State.
++**
++*/
++gceSTATUS
++gckVGHARDWARE_QueryPowerManagementState(
++ IN gckVGHARDWARE Hardware,
++ OUT gceCHIPPOWERSTATE* State
++ )
++{
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(State != gcvNULL);
++
++ /* Return the statue. */
++ *State = Hardware->chipPowerState;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*State=%d", *State);
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckVGHARDWARE_SetPowerManagement
++**
++** Configure GPU power management function.
++** Only used in driver initialization stage.
++**
++** INPUT:
++**
++** gckVGHARDWARE Harwdare
++** Pointer to an gckHARDWARE object.
++**
++** gctBOOL PowerManagement
++** Power Mangement State.
++**
++*/
++gceSTATUS
++gckVGHARDWARE_SetPowerManagement(
++ IN gckVGHARDWARE Hardware,
++ IN gctBOOL PowerManagement
++ )
++{
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ Hardware->powerManagement = PowerManagement;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++#if gcdPOWEROFF_TIMEOUT
++gceSTATUS
++gckVGHARDWARE_SetPowerOffTimeout(
++ IN gckVGHARDWARE Hardware,
++ IN gctUINT32 Timeout
++ )
++{
++ gcmkHEADER_ARG("Hardware=0x%x Timeout=%d", Hardware, Timeout);
++
++ Hardware->powerOffTimeout = Timeout;
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++
++gceSTATUS
++gckVGHARDWARE_QueryPowerOffTimeout(
++ IN gckVGHARDWARE Hardware,
++ OUT gctUINT32* Timeout
++ )
++{
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ *Timeout = Hardware->powerOffTimeout;
++
++ gcmkFOOTER_ARG("*Timeout=%d", *Timeout);
++ return gcvSTATUS_OK;
++}
++#endif
++
++gceSTATUS
++gckVGHARDWARE_QueryIdle(
++ IN gckVGHARDWARE Hardware,
++ OUT gctBOOL_PTR IsIdle
++ )
++{
++ gceSTATUS status;
++ gctUINT32 idle;
++
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(IsIdle != gcvNULL);
++
++ /* We are idle when the power is not ON. */
++ if (Hardware->chipPowerState != gcvPOWER_ON)
++ {
++ *IsIdle = gcvTRUE;
++ }
++
++ else
++ {
++ /* Read idle register. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os, gcvCORE_VG, 0x00004, &idle));
++
++ /* Pipe must be idle. */
++ if (((((((gctUINT32) (idle)) >> (0 ? 0:0)) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1)))))) ) != 1)
++ || ((((((gctUINT32) (idle)) >> (0 ? 8:8)) & ((gctUINT32) ((((1 ? 8:8) - (0 ? 8:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:8) - (0 ? 8:8) + 1)))))) ) != 1)
++ || ((((((gctUINT32) (idle)) >> (0 ? 9:9)) & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1)))))) ) != 1)
++ || ((((((gctUINT32) (idle)) >> (0 ? 10:10)) & ((gctUINT32) ((((1 ? 10:10) - (0 ? 10:10) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 10:10) - (0 ? 10:10) + 1)))))) ) != 1)
++ || ((((((gctUINT32) (idle)) >> (0 ? 11:11)) & ((gctUINT32) ((((1 ? 11:11) - (0 ? 11:11) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 11:11) - (0 ? 11:11) + 1)))))) ) != 1)
++ )
++ {
++ /* Something is busy. */
++ *IsIdle = gcvFALSE;
++ }
++
++ else
++ {
++ *IsIdle = gcvTRUE;
++ }
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++#endif /* gcdENABLE_VG */
++
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/kernel/archvg/gc_hal_kernel_hardware_vg.h linux-openelec/drivers/mxc/gpu-viv/v5/hal/kernel/archvg/gc_hal_kernel_hardware_vg.h
+--- linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/kernel/archvg/gc_hal_kernel_hardware_vg.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v5/hal/kernel/archvg/gc_hal_kernel_hardware_vg.h 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,74 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_kernel_hardware_vg_h_
++#define __gc_hal_kernel_hardware_vg_h_
++
++/* gckHARDWARE object. */
++struct _gckVGHARDWARE
++{
++ /* Object. */
++ gcsOBJECT object;
++
++ /* Pointer to gckKERNEL object. */
++ gckVGKERNEL kernel;
++
++ /* Pointer to gckOS object. */
++ gckOS os;
++
++ /* Chip characteristics. */
++ gceCHIPMODEL chipModel;
++ gctUINT32 chipRevision;
++ gctUINT32 chipFeatures;
++ gctUINT32 chipMinorFeatures;
++ gctUINT32 chipMinorFeatures2;
++ gctBOOL allowFastClear;
++
++ /* Features. */
++ gctBOOL fe20;
++ gctBOOL vg20;
++ gctBOOL vg21;
++
++ /* Event mask. */
++ gctUINT32 eventMask;
++
++ gctBOOL clockState;
++ gctBOOL powerState;
++ gctPOINTER powerMutex;
++ gctUINT32 powerProcess;
++ gctUINT32 powerThread;
++ gceCHIPPOWERSTATE chipPowerState;
++ gceCHIPPOWERSTATE chipPowerStateGlobal;
++ gctISRMANAGERFUNC startIsr;
++ gctISRMANAGERFUNC stopIsr;
++ gctPOINTER isrContext;
++ gctPOINTER pageTableDirty;
++#if gcdPOWEROFF_TIMEOUT
++ gctUINT32 powerOffTime;
++ gctUINT32 powerOffTimeout;
++ gctPOINTER powerOffTimer;
++#endif
++
++ gctBOOL powerManagement;
++};
++
++#endif /* __gc_hal_kernel_hardware_h_ */
++
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel.c linux-openelec/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel.c
+--- linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel.c 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,5040 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal_kernel_precomp.h"
++
++#define _GC_OBJ_ZONE gcvZONE_KERNEL
++
++/*******************************************************************************
++***** Version Signature *******************************************************/
++
++#define _gcmTXT2STR(t) #t
++#define gcmTXT2STR(t) _gcmTXT2STR(t)
++const char * _VERSION = "\n\0$VERSION$"
++ gcmTXT2STR(gcvVERSION_MAJOR) "."
++ gcmTXT2STR(gcvVERSION_MINOR) "."
++ gcmTXT2STR(gcvVERSION_PATCH) ":"
++ gcmTXT2STR(gcvVERSION_BUILD) "$\n";
++
++/******************************************************************************\
++******************************* gckKERNEL API Code ******************************
++\******************************************************************************/
++
++#if gcmIS_DEBUG(gcdDEBUG_TRACE)
++#define gcmDEFINE2TEXT(d) #d
++gctCONST_STRING _DispatchText[] =
++{
++ gcmDEFINE2TEXT(gcvHAL_QUERY_VIDEO_MEMORY),
++ gcmDEFINE2TEXT(gcvHAL_QUERY_CHIP_IDENTITY),
++ gcmDEFINE2TEXT(gcvHAL_ALLOCATE_NON_PAGED_MEMORY),
++ gcmDEFINE2TEXT(gcvHAL_FREE_NON_PAGED_MEMORY),
++ gcmDEFINE2TEXT(gcvHAL_ALLOCATE_CONTIGUOUS_MEMORY),
++ gcmDEFINE2TEXT(gcvHAL_FREE_CONTIGUOUS_MEMORY),
++ gcmDEFINE2TEXT(gcvHAL_ALLOCATE_VIDEO_MEMORY),
++ gcmDEFINE2TEXT(gcvHAL_ALLOCATE_LINEAR_VIDEO_MEMORY),
++ gcmDEFINE2TEXT(gcvHAL_RELEASE_VIDEO_MEMORY),
++ gcmDEFINE2TEXT(gcvHAL_MAP_MEMORY),
++ gcmDEFINE2TEXT(gcvHAL_UNMAP_MEMORY),
++ gcmDEFINE2TEXT(gcvHAL_MAP_USER_MEMORY),
++ gcmDEFINE2TEXT(gcvHAL_UNMAP_USER_MEMORY),
++ gcmDEFINE2TEXT(gcvHAL_LOCK_VIDEO_MEMORY),
++ gcmDEFINE2TEXT(gcvHAL_UNLOCK_VIDEO_MEMORY),
++ gcmDEFINE2TEXT(gcvHAL_EVENT_COMMIT),
++ gcmDEFINE2TEXT(gcvHAL_USER_SIGNAL),
++ gcmDEFINE2TEXT(gcvHAL_SIGNAL),
++ gcmDEFINE2TEXT(gcvHAL_WRITE_DATA),
++ gcmDEFINE2TEXT(gcvHAL_COMMIT),
++ gcmDEFINE2TEXT(gcvHAL_STALL),
++ gcmDEFINE2TEXT(gcvHAL_READ_REGISTER),
++ gcmDEFINE2TEXT(gcvHAL_WRITE_REGISTER),
++ gcmDEFINE2TEXT(gcvHAL_GET_PROFILE_SETTING),
++ gcmDEFINE2TEXT(gcvHAL_SET_PROFILE_SETTING),
++ gcmDEFINE2TEXT(gcvHAL_READ_ALL_PROFILE_REGISTERS),
++ gcmDEFINE2TEXT(gcvHAL_PROFILE_REGISTERS_2D),
++#if VIVANTE_PROFILER_PERDRAW
++ gcvHAL_READ_PROFILER_REGISTER_SETTING,
++#endif
++ gcmDEFINE2TEXT(gcvHAL_SET_POWER_MANAGEMENT_STATE),
++ gcmDEFINE2TEXT(gcvHAL_QUERY_POWER_MANAGEMENT_STATE),
++ gcmDEFINE2TEXT(gcvHAL_GET_BASE_ADDRESS),
++ gcmDEFINE2TEXT(gcvHAL_SET_IDLE),
++ gcmDEFINE2TEXT(gcvHAL_QUERY_KERNEL_SETTINGS),
++ gcmDEFINE2TEXT(gcvHAL_RESET),
++ gcmDEFINE2TEXT(gcvHAL_MAP_PHYSICAL),
++ gcmDEFINE2TEXT(gcvHAL_DEBUG),
++ gcmDEFINE2TEXT(gcvHAL_CACHE),
++ gcmDEFINE2TEXT(gcvHAL_TIMESTAMP),
++ gcmDEFINE2TEXT(gcvHAL_DATABASE),
++ gcmDEFINE2TEXT(gcvHAL_VERSION),
++ gcmDEFINE2TEXT(gcvHAL_CHIP_INFO),
++ gcmDEFINE2TEXT(gcvHAL_ATTACH),
++ gcmDEFINE2TEXT(gcvHAL_DETACH),
++ gcmDEFINE2TEXT(gcvHAL_COMPOSE),
++ gcmDEFINE2TEXT(gcvHAL_SET_TIMEOUT),
++ gcmDEFINE2TEXT(gcvHAL_GET_FRAME_INFO),
++ gcmDEFINE2TEXT(gcvHAL_QUERY_COMMAND_BUFFER),
++ gcmDEFINE2TEXT(gcvHAL_COMMIT_DONE),
++ gcmDEFINE2TEXT(gcvHAL_DUMP_GPU_STATE),
++ gcmDEFINE2TEXT(gcvHAL_DUMP_EVENT),
++ gcmDEFINE2TEXT(gcvHAL_ALLOCATE_VIRTUAL_COMMAND_BUFFER),
++ gcmDEFINE2TEXT(gcvHAL_FREE_VIRTUAL_COMMAND_BUFFER),
++ gcmDEFINE2TEXT(gcvHAL_SET_FSCALE_VALUE),
++ gcmDEFINE2TEXT(gcvHAL_GET_FSCALE_VALUE),
++ gcmDEFINE2TEXT(gcvHAL_NAME_VIDEO_MEMORY),
++ gcmDEFINE2TEXT(gcvHAL_IMPORT_VIDEO_MEMORY),
++ gcmDEFINE2TEXT(gcvHAL_QUERY_RESET_TIME_STAMP),
++ gcmDEFINE2TEXT(gcvHAL_READ_REGISTER_EX),
++ gcmDEFINE2TEXT(gcvHAL_WRITE_REGISTER_EX),
++ gcmDEFINE2TEXT(gcvHAL_SYNC_POINT),
++ gcmDEFINE2TEXT(gcvHAL_CREATE_NATIVE_FENCE),
++ gcmDEFINE2TEXT(gcvHAL_DESTROY_MMU),
++ gcmDEFINE2TEXT(gcvHAL_SHBUF),
++};
++#endif
++
++#if gcdGPU_TIMEOUT && gcdINTERRUPT_STATISTIC
++void
++_MonitorTimerFunction(
++ gctPOINTER Data
++ )
++{
++ gckKERNEL kernel = (gckKERNEL)Data;
++ gctUINT32 pendingInterrupt;
++ gctBOOL reset = gcvFALSE;
++ gctUINT32 mask;
++ gctUINT32 advance = kernel->timeOut/2;
++
++#if gcdENABLE_VG
++ if (kernel->core == gcvCORE_VG)
++ {
++ return;
++ }
++#endif
++
++ if (kernel->monitorTimerStop)
++ {
++ /* Stop. */
++ return;
++ }
++
++ gckOS_AtomGet(kernel->os, kernel->eventObj->interruptCount, &pendingInterrupt);
++
++ if (kernel->monitoring == gcvFALSE)
++ {
++ if (pendingInterrupt)
++ {
++ /* Begin to mointor GPU state. */
++ kernel->monitoring = gcvTRUE;
++
++ /* Record current state. */
++ kernel->lastCommitStamp = kernel->eventObj->lastCommitStamp;
++ kernel->restoreAddress = kernel->hardware->lastWaitLink;
++ gcmkVERIFY_OK(gckOS_AtomGet(
++ kernel->os,
++ kernel->hardware->pendingEvent,
++ &kernel->restoreMask
++ ));
++
++ /* Clear timeout. */
++ kernel->timer = 0;
++ }
++ }
++ else
++ {
++ if (pendingInterrupt)
++ {
++ gcmkVERIFY_OK(gckOS_AtomGet(
++ kernel->os,
++ kernel->hardware->pendingEvent,
++ &mask
++ ));
++
++ if (kernel->eventObj->lastCommitStamp == kernel->lastCommitStamp
++ && kernel->hardware->lastWaitLink == kernel->restoreAddress
++ && mask == kernel->restoreMask
++ )
++ {
++ /* GPU state is not changed, accumlate timeout. */
++ kernel->timer += advance;
++
++ if (kernel->timer >= kernel->timeOut)
++ {
++ /* GPU stuck, trigger reset. */
++ reset = gcvTRUE;
++ }
++ }
++ else
++ {
++ /* GPU state changed, cancel current timeout.*/
++ kernel->monitoring = gcvFALSE;
++ }
++ }
++ else
++ {
++ /* GPU finish all jobs, cancel current timeout*/
++ kernel->monitoring = gcvFALSE;
++ }
++ }
++
++ if (reset)
++ {
++ gckKERNEL_Recovery(kernel);
++
++ /* Work in this timeout is done. */
++ kernel->monitoring = gcvFALSE;
++ }
++
++ gcmkVERIFY_OK(gckOS_StartTimer(kernel->os, kernel->monitorTimer, advance));
++}
++#endif
++
++#if gcdPROCESS_ADDRESS_SPACE
++gceSTATUS
++_MapCommandBuffer(
++ IN gckKERNEL Kernel
++ )
++{
++ gceSTATUS status;
++ gctUINT32 i;
++ gctUINT32 physical;
++ gckMMU mmu;
++
++ gcmkONERROR(gckKERNEL_GetProcessMMU(Kernel, &mmu));
++
++ for (i = 0; i < gcdCOMMAND_QUEUES; i++)
++ {
++ gcmkONERROR(gckOS_GetPhysicalAddress(
++ Kernel->os,
++ Kernel->command->queues[i].logical,
++ &physical
++ ));
++
++ gcmkONERROR(gckMMU_FlatMapping(mmu, physical));
++ }
++
++ return gcvSTATUS_OK;
++
++OnError:
++ return status;
++}
++#endif
++
++void
++_DumpDriverConfigure(
++ IN gckKERNEL Kernel
++ )
++{
++ gcmkPRINT_N(0, "**************************\n");
++ gcmkPRINT_N(0, "*** GPU DRV CONFIG ***\n");
++ gcmkPRINT_N(0, "**************************\n");
++
++ gcmkPRINT("Galcore version %d.%d.%d.%d\n",
++ gcvVERSION_MAJOR, gcvVERSION_MINOR, gcvVERSION_PATCH, gcvVERSION_BUILD);
++
++ gckOS_DumpParam();
++}
++
++void
++_DumpState(
++ IN gckKERNEL Kernel
++ )
++{
++ /* Dump GPU Debug registers. */
++ gcmkVERIFY_OK(gckHARDWARE_DumpGPUState(Kernel->hardware));
++
++ if (Kernel->virtualCommandBuffer)
++ {
++ gcmkVERIFY_OK(gckCOMMAND_DumpExecutingBuffer(Kernel->command));
++ }
++
++ /* Dump Pending event. */
++ gcmkVERIFY_OK(gckEVENT_Dump(Kernel->eventObj));
++
++ /* Dump Process DB. */
++ gcmkVERIFY_OK(gckKERNEL_DumpProcessDB(Kernel));
++
++#if gcdRECORD_COMMAND
++ /* Dump record. */
++ gckRECORDER_Dump(Kernel->command->recorder);
++#endif
++}
++
++/*******************************************************************************
++**
++** gckKERNEL_Construct
++**
++** Construct a new gckKERNEL object.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gceCORE Core
++** Specified core.
++**
++** IN gctPOINTER Context
++** Pointer to a driver defined context.
++**
++** IN gckDB SharedDB,
++** Pointer to a shared DB.
++**
++** OUTPUT:
++**
++** gckKERNEL * Kernel
++** Pointer to a variable that will hold the pointer to the gckKERNEL
++** object.
++*/
++
++gceSTATUS
++gckKERNEL_Construct(
++ IN gckOS Os,
++ IN gceCORE Core,
++ IN gctPOINTER Context,
++ IN gckDB SharedDB,
++ OUT gckKERNEL * Kernel
++ )
++{
++ gckKERNEL kernel = gcvNULL;
++ gceSTATUS status;
++ gctSIZE_T i;
++ gctPOINTER pointer = gcvNULL;
++
++ gcmkHEADER_ARG("Os=0x%x Context=0x%x", Os, Context);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Kernel != gcvNULL);
++
++ /* Allocate the gckKERNEL object. */
++ gcmkONERROR(gckOS_Allocate(Os,
++ gcmSIZEOF(struct _gckKERNEL),
++ &pointer));
++
++ kernel = pointer;
++
++ /* Zero the object pointers. */
++ kernel->hardware = gcvNULL;
++ kernel->command = gcvNULL;
++ kernel->eventObj = gcvNULL;
++ kernel->mmu = gcvNULL;
++#if gcdDVFS
++ kernel->dvfs = gcvNULL;
++#endif
++ kernel->monitorTimer = gcvNULL;
++
++ /* Initialize the gckKERNEL object. */
++ kernel->object.type = gcvOBJ_KERNEL;
++ kernel->os = Os;
++ kernel->core = Core;
++
++ if (SharedDB == gcvNULL)
++ {
++ gcmkONERROR(gckOS_Allocate(Os,
++ gcmSIZEOF(struct _gckDB),
++ &pointer));
++
++ kernel->db = pointer;
++ kernel->dbCreated = gcvTRUE;
++ kernel->db->freeDatabase = gcvNULL;
++ kernel->db->freeRecord = gcvNULL;
++ kernel->db->dbMutex = gcvNULL;
++ kernel->db->lastDatabase = gcvNULL;
++ kernel->db->idleTime = 0;
++ kernel->db->lastIdle = 0;
++ kernel->db->lastSlowdown = 0;
++
++ for (i = 0; i < gcmCOUNTOF(kernel->db->db); ++i)
++ {
++ kernel->db->db[i] = gcvNULL;
++ }
++
++ /* Construct a database mutex. */
++ gcmkONERROR(gckOS_CreateMutex(Os, &kernel->db->dbMutex));
++
++ /* Construct a video memory name database. */
++ gcmkONERROR(gckKERNEL_CreateIntegerDatabase(kernel, &kernel->db->nameDatabase));
++
++ /* Construct a video memory name database mutex. */
++ gcmkONERROR(gckOS_CreateMutex(Os, &kernel->db->nameDatabaseMutex));
++
++ /* Construct a pointer name database. */
++ gcmkONERROR(gckKERNEL_CreateIntegerDatabase(kernel, &kernel->db->pointerDatabase));
++
++ /* Construct a pointer name database mutex. */
++ gcmkONERROR(gckOS_CreateMutex(Os, &kernel->db->pointerDatabaseMutex));
++ }
++ else
++ {
++ kernel->db = SharedDB;
++ kernel->dbCreated = gcvFALSE;
++ }
++
++ for (i = 0; i < gcmCOUNTOF(kernel->timers); ++i)
++ {
++ kernel->timers[i].startTime = 0;
++ kernel->timers[i].stopTime = 0;
++ }
++
++ /* Save context. */
++ kernel->context = Context;
++
++ /* Construct atom holding number of clients. */
++ kernel->atomClients = gcvNULL;
++ gcmkONERROR(gckOS_AtomConstruct(Os, &kernel->atomClients));
++
++#if gcdENABLE_VG
++ kernel->vg = gcvNULL;
++
++ if (Core == gcvCORE_VG)
++ {
++ /* Construct the gckMMU object. */
++ gcmkONERROR(
++ gckVGKERNEL_Construct(Os, Context, kernel, &kernel->vg));
++
++ kernel->timeOut = gcdGPU_TIMEOUT;
++ }
++ else
++#endif
++ {
++ /* Construct the gckHARDWARE object. */
++ gcmkONERROR(
++ gckHARDWARE_Construct(Os, kernel->core, &kernel->hardware));
++
++ /* Set pointer to gckKERNEL object in gckHARDWARE object. */
++ kernel->hardware->kernel = kernel;
++
++ kernel->timeOut = kernel->hardware->type == gcvHARDWARE_2D
++ ? gcdGPU_2D_TIMEOUT
++ : gcdGPU_TIMEOUT
++ ;
++
++ /* Initialize virtual command buffer. */
++ /* TODO: Remove platform limitation after porting. */
++#if (defined(LINUX) || defined(__QNXNTO__))
++ kernel->virtualCommandBuffer = gcvTRUE;
++#else
++ kernel->virtualCommandBuffer = gcvFALSE;
++#endif
++
++#if gcdSECURITY
++ kernel->virtualCommandBuffer = gcvFALSE;
++#endif
++
++ /* Construct the gckCOMMAND object. */
++ gcmkONERROR(
++ gckCOMMAND_Construct(kernel, &kernel->command));
++
++ /* Construct the gckEVENT object. */
++ gcmkONERROR(
++ gckEVENT_Construct(kernel, &kernel->eventObj));
++
++ /* Construct the gckMMU object. */
++ gcmkONERROR(
++ gckMMU_Construct(kernel, gcdMMU_SIZE, &kernel->mmu));
++
++ gcmkVERIFY_OK(gckOS_GetTime(&kernel->resetTimeStamp));
++
++ gcmkONERROR(gckHARDWARE_PrepareFunctions(kernel->hardware));
++
++ /* Initialize the hardware. */
++ gcmkONERROR(
++ gckHARDWARE_InitializeHardware(kernel->hardware));
++
++#if gcdDVFS
++ if (gckHARDWARE_IsFeatureAvailable(kernel->hardware,
++ gcvFEATURE_DYNAMIC_FREQUENCY_SCALING))
++ {
++ gcmkONERROR(gckDVFS_Construct(kernel->hardware, &kernel->dvfs));
++ gcmkONERROR(gckDVFS_Start(kernel->dvfs));
++ }
++#endif
++ }
++
++#if VIVANTE_PROFILER
++ /* Initialize profile setting */
++ kernel->profileEnable = gcvFALSE;
++ kernel->profileCleanRegister = gcvTRUE;
++#endif
++
++#if gcdANDROID_NATIVE_FENCE_SYNC
++ gcmkONERROR(gckOS_CreateSyncTimeline(Os, &kernel->timeline));
++#endif
++
++ kernel->recovery = gcvTRUE;
++ kernel->stuckDump = 1;
++
++ kernel->virtualBufferHead =
++ kernel->virtualBufferTail = gcvNULL;
++
++ gcmkONERROR(
++ gckOS_CreateMutex(Os, (gctPOINTER)&kernel->virtualBufferLock));
++
++#if gcdSECURITY
++ /* Connect to security service for this GPU. */
++ gcmkONERROR(gckKERNEL_SecurityOpen(kernel, kernel->core, &kernel->securityChannel));
++#endif
++
++#if gcdGPU_TIMEOUT && gcdINTERRUPT_STATISTIC
++ if (kernel->timeOut)
++ {
++ gcmkVERIFY_OK(gckOS_CreateTimer(
++ Os,
++ (gctTIMERFUNCTION)_MonitorTimerFunction,
++ (gctPOINTER)kernel,
++ &kernel->monitorTimer
++ ));
++
++ kernel->monitoring = gcvFALSE;
++
++ kernel->monitorTimerStop = gcvFALSE;
++
++ gcmkVERIFY_OK(gckOS_StartTimer(
++ Os,
++ kernel->monitorTimer,
++ 100
++ ));
++ }
++#endif
++
++ /* Return pointer to the gckKERNEL object. */
++ *Kernel = kernel;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Kernel=0x%x", *Kernel);
++ return gcvSTATUS_OK;
++
++OnError:
++ if (kernel != gcvNULL)
++ {
++#if gcdENABLE_VG
++ if (Core != gcvCORE_VG)
++#endif
++ {
++ if (kernel->eventObj != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckEVENT_Destroy(kernel->eventObj));
++ }
++
++ if (kernel->command != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckCOMMAND_Destroy(kernel->command));
++ }
++
++ if (kernel->hardware != gcvNULL)
++ {
++ /* Turn off the power. */
++ gcmkVERIFY_OK(gckOS_SetGPUPower(kernel->hardware->os,
++ kernel->hardware->core,
++ gcvFALSE,
++ gcvFALSE));
++ gcmkVERIFY_OK(gckHARDWARE_Destroy(kernel->hardware));
++ }
++ }
++
++ if (kernel->atomClients != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_AtomDestroy(Os, kernel->atomClients));
++ }
++
++ if (kernel->dbCreated && kernel->db != gcvNULL)
++ {
++ if (kernel->db->dbMutex != gcvNULL)
++ {
++ /* Destroy the database mutex. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Os, kernel->db->dbMutex));
++ }
++
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Os, kernel->db));
++ }
++
++ if (kernel->virtualBufferLock != gcvNULL)
++ {
++ /* Destroy the virtual command buffer mutex. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Os, kernel->virtualBufferLock));
++ }
++
++#if gcdDVFS
++ if (kernel->dvfs)
++ {
++ gcmkVERIFY_OK(gckDVFS_Stop(kernel->dvfs));
++ gcmkVERIFY_OK(gckDVFS_Destroy(kernel->dvfs));
++ }
++#endif
++
++#if gcdANDROID_NATIVE_FENCE_SYNC
++ if (kernel->timeline)
++ {
++ gcmkVERIFY_OK(gckOS_DestroySyncTimeline(Os, kernel->timeline));
++ }
++#endif
++
++ if (kernel->monitorTimer)
++ {
++ gcmkVERIFY_OK(gckOS_StopTimer(Os, kernel->monitorTimer));
++ gcmkVERIFY_OK(gckOS_DestroyTimer(Os, kernel->monitorTimer));
++ }
++
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Os, kernel));
++ }
++
++ /* Return the error. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckKERNEL_Destroy
++**
++** Destroy an gckKERNEL object.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object to destroy.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckKERNEL_Destroy(
++ IN gckKERNEL Kernel
++ )
++{
++ gctSIZE_T i;
++ gcsDATABASE_PTR database, databaseNext;
++ gcsDATABASE_RECORD_PTR record, recordNext;
++
++ gcmkHEADER_ARG("Kernel=0x%x", Kernel);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++#if QNX_SINGLE_THREADED_DEBUGGING
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Kernel->os, Kernel->debugMutex));
++#endif
++
++ /* Destroy the database. */
++ if (Kernel->dbCreated)
++ {
++ for (i = 0; i < gcmCOUNTOF(Kernel->db->db); ++i)
++ {
++ if (Kernel->db->db[i] != gcvNULL)
++ {
++ gcmkVERIFY_OK(
++ gckKERNEL_DestroyProcessDB(Kernel, Kernel->db->db[i]->processID));
++ }
++ }
++
++ /* Free all databases. */
++ for (database = Kernel->db->freeDatabase;
++ database != gcvNULL;
++ database = databaseNext)
++ {
++ databaseNext = database->next;
++
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Kernel->os, database->counterMutex));
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Kernel->os, database));
++ }
++
++ if (Kernel->db->lastDatabase != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Kernel->os, Kernel->db->lastDatabase->counterMutex));
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Kernel->os, Kernel->db->lastDatabase));
++ }
++
++ /* Free all database records. */
++ for (record = Kernel->db->freeRecord; record != gcvNULL; record = recordNext)
++ {
++ recordNext = record->next;
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Kernel->os, record));
++ }
++
++ /* Destroy the database mutex. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Kernel->os, Kernel->db->dbMutex));
++
++ /* Destroy video memory name database. */
++ gcmkVERIFY_OK(gckKERNEL_DestroyIntegerDatabase(Kernel, Kernel->db->nameDatabase));
++
++ /* Destroy video memory name database mutex. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Kernel->os, Kernel->db->nameDatabaseMutex));
++
++
++ /* Destroy id-pointer database. */
++ gcmkVERIFY_OK(gckKERNEL_DestroyIntegerDatabase(Kernel, Kernel->db->pointerDatabase));
++
++ /* Destroy id-pointer database mutex. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Kernel->os, Kernel->db->pointerDatabaseMutex));
++
++ /* Destroy the database. */
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Kernel->os, Kernel->db));
++
++ /* Notify stuck timer to quit. */
++ Kernel->monitorTimerStop = gcvTRUE;
++ }
++
++#if gcdENABLE_VG
++ if (Kernel->vg)
++ {
++ gcmkVERIFY_OK(gckVGKERNEL_Destroy(Kernel->vg));
++ }
++ else
++#endif
++ {
++ /* Destroy the gckMMU object. */
++ gcmkVERIFY_OK(gckMMU_Destroy(Kernel->mmu));
++
++ /* Destroy the gckCOMMNAND object. */
++ gcmkVERIFY_OK(gckCOMMAND_Destroy(Kernel->command));
++
++ /* Destroy the gckEVENT object. */
++ gcmkVERIFY_OK(gckEVENT_Destroy(Kernel->eventObj));
++
++ /* Destroy the gckHARDWARE object. */
++ gcmkVERIFY_OK(gckHARDWARE_Destroy(Kernel->hardware));
++ }
++
++ /* Detsroy the client atom. */
++ gcmkVERIFY_OK(gckOS_AtomDestroy(Kernel->os, Kernel->atomClients));
++
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Kernel->os, Kernel->virtualBufferLock));
++
++#if gcdDVFS
++ if (Kernel->dvfs)
++ {
++ gcmkVERIFY_OK(gckDVFS_Stop(Kernel->dvfs));
++ gcmkVERIFY_OK(gckDVFS_Destroy(Kernel->dvfs));
++ }
++#endif
++
++#if gcdANDROID_NATIVE_FENCE_SYNC
++ gcmkVERIFY_OK(gckOS_DestroySyncTimeline(Kernel->os, Kernel->timeline));
++#endif
++
++#if gcdSECURITY
++ gcmkVERIFY_OK(gckKERNEL_SecurityClose(Kernel->securityChannel));
++#endif
++
++ if (Kernel->monitorTimer)
++ {
++ gcmkVERIFY_OK(gckOS_StopTimer(Kernel->os, Kernel->monitorTimer));
++ gcmkVERIFY_OK(gckOS_DestroyTimer(Kernel->os, Kernel->monitorTimer));
++ }
++
++ /* Mark the gckKERNEL object as unknown. */
++ Kernel->object.type = gcvOBJ_UNKNOWN;
++
++ /* Free the gckKERNEL object. */
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Kernel->os, Kernel));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** _AllocateMemory
++**
++** Private function to walk all required memory pools to allocate the requested
++** amount of video memory.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gcsHAL_INTERFACE * Interface
++** Pointer to a gcsHAL_INTERFACE structure that defines the command to
++** be dispatched.
++**
++** OUTPUT:
++**
++** gcsHAL_INTERFACE * Interface
++** Pointer to a gcsHAL_INTERFACE structure that receives any data to be
++** returned.
++*/
++gceSTATUS
++gckKERNEL_AllocateLinearMemory(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 ProcessID,
++ IN OUT gcePOOL * Pool,
++ IN gctSIZE_T Bytes,
++ IN gctUINT32 Alignment,
++ IN gceSURF_TYPE Type,
++ IN gctUINT32 Flag,
++ OUT gctUINT32 * Node
++ )
++{
++ gcePOOL pool;
++ gceSTATUS status;
++ gckVIDMEM videoMemory;
++ gctINT loopCount;
++ gcuVIDMEM_NODE_PTR node = gcvNULL;
++ gctBOOL tileStatusInVirtual;
++ gctBOOL contiguous = gcvFALSE;
++ gctBOOL cacheable = gcvFALSE;
++ gctSIZE_T bytes = Bytes;
++ gctUINT32 handle = 0;
++ gceDATABASE_TYPE type;
++
++ gcmkHEADER_ARG("Kernel=0x%x *Pool=%d Bytes=%lu Alignment=%lu Type=%d",
++ Kernel, *Pool, Bytes, Alignment, Type);
++
++ gcmkVERIFY_ARGUMENT(Pool != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Bytes != 0);
++
++ /* Get basic type. */
++ Type &= 0xFF;
++
++ /* Check flags. */
++ contiguous = Flag & gcvALLOC_FLAG_CONTIGUOUS;
++ cacheable = Flag & gcvALLOC_FLAG_CACHEABLE;
++
++AllocateMemory:
++
++ /* Get initial pool. */
++ switch (pool = *Pool)
++ {
++ case gcvPOOL_DEFAULT:
++ case gcvPOOL_LOCAL:
++ pool = gcvPOOL_LOCAL_INTERNAL;
++ loopCount = (gctINT) gcvPOOL_NUMBER_OF_POOLS;
++ break;
++
++ case gcvPOOL_UNIFIED:
++ pool = gcvPOOL_SYSTEM;
++ loopCount = (gctINT) gcvPOOL_NUMBER_OF_POOLS;
++ break;
++
++ case gcvPOOL_CONTIGUOUS:
++ loopCount = (gctINT) gcvPOOL_NUMBER_OF_POOLS;
++ break;
++
++ default:
++ loopCount = 1;
++ break;
++ }
++
++ while (loopCount-- > 0)
++ {
++ if (pool == gcvPOOL_VIRTUAL)
++ {
++ /* Create a gcuVIDMEM_NODE for virtual memory. */
++ gcmkONERROR(
++ gckVIDMEM_ConstructVirtual(Kernel, Flag | gcvALLOC_FLAG_NON_CONTIGUOUS, Bytes, &node));
++
++ bytes = node->Virtual.bytes;
++ node->Virtual.type = Type;
++
++ /* Success. */
++ break;
++ }
++
++ else
++ if (pool == gcvPOOL_CONTIGUOUS)
++ {
++#if gcdCONTIGUOUS_SIZE_LIMIT
++ if (Bytes > gcdCONTIGUOUS_SIZE_LIMIT && contiguous == gcvFALSE)
++ {
++ status = gcvSTATUS_OUT_OF_MEMORY;
++ }
++ else
++#endif
++ {
++ /* Create a gcuVIDMEM_NODE from contiguous memory. */
++ status = gckVIDMEM_ConstructVirtual(
++ Kernel,
++ Flag | gcvALLOC_FLAG_CONTIGUOUS,
++ Bytes,
++ &node);
++ }
++
++ if (gcmIS_SUCCESS(status))
++ {
++ bytes = node->Virtual.bytes;
++ node->Virtual.type = Type;
++
++ /* Memory allocated. */
++ break;
++ }
++ }
++
++ else
++ /* gcvPOOL_SYSTEM can't be cacheable. */
++ if (cacheable == gcvFALSE)
++ {
++ /* Get pointer to gckVIDMEM object for pool. */
++ status = gckKERNEL_GetVideoMemoryPool(Kernel, pool, &videoMemory);
++
++ if (gcmIS_SUCCESS(status))
++ {
++ /* Allocate memory. */
++#if defined(gcdLINEAR_SIZE_LIMIT)
++ /* 512 KB */
++ if (Bytes > gcdLINEAR_SIZE_LIMIT)
++ {
++ status = gcvSTATUS_OUT_OF_MEMORY;
++ }
++ else
++#endif
++ {
++ status = gckVIDMEM_AllocateLinear(Kernel,
++ videoMemory,
++ Bytes,
++ Alignment,
++ Type,
++ (*Pool == gcvPOOL_SYSTEM),
++ &node);
++ }
++
++ if (gcmIS_SUCCESS(status))
++ {
++ /* Memory allocated. */
++ node->VidMem.pool = pool;
++ bytes = node->VidMem.bytes;
++ break;
++ }
++ }
++ }
++
++ if (pool == gcvPOOL_LOCAL_INTERNAL)
++ {
++ /* Advance to external memory. */
++ pool = gcvPOOL_LOCAL_EXTERNAL;
++ }
++
++ else
++ if (pool == gcvPOOL_LOCAL_EXTERNAL)
++ {
++ /* Advance to contiguous system memory. */
++ pool = gcvPOOL_SYSTEM;
++ }
++
++ else
++ if (pool == gcvPOOL_SYSTEM)
++ {
++ /* Advance to contiguous memory. */
++ pool = gcvPOOL_CONTIGUOUS;
++ }
++
++ else
++ if (pool == gcvPOOL_CONTIGUOUS)
++ {
++#if gcdENABLE_VG
++ if (Kernel->vg)
++ {
++ tileStatusInVirtual = gcvFALSE;
++ }
++ else
++#endif
++ {
++ tileStatusInVirtual =
++ gckHARDWARE_IsFeatureAvailable(Kernel->hardware,
++ gcvFEATURE_MC20);
++ }
++
++ if (Type == gcvSURF_TILE_STATUS && tileStatusInVirtual != gcvTRUE)
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ if (contiguous)
++ {
++ break;
++ }
++
++ /* Advance to virtual memory. */
++ pool = gcvPOOL_VIRTUAL;
++ }
++
++ else
++ {
++ /* Out of pools. */
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++ }
++
++ if (node == gcvNULL)
++ {
++ if (contiguous)
++ {
++ /* Broadcast OOM message. */
++ status = gckOS_Broadcast(Kernel->os, Kernel->hardware, gcvBROADCAST_OUT_OF_MEMORY);
++
++ if (gcmIS_SUCCESS(status))
++ {
++ /* Get some memory. */
++ gckOS_Delay(gcvNULL, 1);
++ goto AllocateMemory;
++ }
++ }
++
++ /* Nothing allocated. */
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ /* Allocate handle for this video memory. */
++ gcmkONERROR(
++ gckVIDMEM_NODE_Allocate(Kernel, node, Type, pool, &handle));
++
++ /* Return node and pool used for allocation. */
++ *Node = handle;
++ *Pool = pool;
++
++ /* Encode surface type and pool to database type. */
++ type = gcvDB_VIDEO_MEMORY
++ | (Type << gcdDB_VIDEO_MEMORY_TYPE_SHIFT)
++ | (pool << gcdDB_VIDEO_MEMORY_POOL_SHIFT);
++
++ /* Record in process db. */
++ gcmkONERROR(
++ gckKERNEL_AddProcessDB(Kernel,
++ ProcessID,
++ type,
++ gcmINT2PTR(handle),
++ gcvNULL,
++ bytes));
++
++ /* Return status. */
++ gcmkFOOTER_ARG("*Pool=%d *Node=0x%x", *Pool, *Node);
++ return gcvSTATUS_OK;
++
++OnError:
++ if (handle)
++ {
++ /* Destroy handle allocated. */
++ gcmkVERIFY_OK(gckVIDMEM_HANDLE_Dereference(Kernel, ProcessID, handle));
++ }
++
++ if (node)
++ {
++ /* Free video memory allocated. */
++ gcmkVERIFY_OK(gckVIDMEM_Free(Kernel, node));
++ }
++
++ /* For some case like chrome with webgl test, it needs too much memory so that it invokes oom_killer
++ * And the case is killed by oom_killer, the user wants not to see the crash and hope the case iteself handles the condition
++ * So the patch reports the out_of_memory to the case */
++ if ( status == gcvSTATUS_OUT_OF_MEMORY && (Flag & gcvALLOC_FLAG_MEMLIMIT) )
++ gcmkPRINT("The running case is out_of_memory");
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckKERNEL_ReleaseVideoMemory
++**
++** Release handle of a video memory.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gctUINT32 ProcessID
++** ProcessID of current process.
++**
++** gctUINT32 Handle
++** Handle of video memory.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckKERNEL_ReleaseVideoMemory(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 ProcessID,
++ IN gctUINT32 Handle
++ )
++{
++ gceSTATUS status;
++ gckVIDMEM_NODE nodeObject;
++ gceDATABASE_TYPE type;
++
++ gcmkHEADER_ARG("Kernel=0x%08X ProcessID=%d Handle=%d",
++ Kernel, ProcessID, Handle);
++
++ gcmkONERROR(
++ gckVIDMEM_HANDLE_Lookup(Kernel, ProcessID, Handle, &nodeObject));
++
++ type = gcvDB_VIDEO_MEMORY
++ | (nodeObject->type << gcdDB_VIDEO_MEMORY_TYPE_SHIFT)
++ | (nodeObject->pool << gcdDB_VIDEO_MEMORY_POOL_SHIFT);
++
++ gcmkONERROR(
++ gckKERNEL_RemoveProcessDB(Kernel,
++ ProcessID,
++ type,
++ gcmINT2PTR(Handle)));
++
++ gckVIDMEM_HANDLE_Dereference(Kernel, ProcessID, Handle);
++
++ gckVIDMEM_NODE_Dereference(Kernel, nodeObject);
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckKERNEL_LockVideoMemory
++**
++** Lock a video memory node. It will generate a cpu virtual address used
++** by software and a GPU address used by GPU.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gceCORE Core
++** GPU to which video memory is locked.
++**
++** gcsHAL_INTERFACE * Interface
++** Pointer to a gcsHAL_INTERFACE structure that defines the command to
++** be dispatched.
++**
++** OUTPUT:
++**
++** gcsHAL_INTERFACE * Interface
++** Pointer to a gcsHAL_INTERFACE structure that receives any data to be
++** returned.
++*/
++gceSTATUS
++gckKERNEL_LockVideoMemory(
++ IN gckKERNEL Kernel,
++ IN gceCORE Core,
++ IN gctUINT32 ProcessID,
++ IN gctBOOL FromUser,
++ IN OUT gcsHAL_INTERFACE * Interface
++ )
++{
++ gceSTATUS status;
++ gckVIDMEM_NODE nodeObject = gcvNULL;
++ gcuVIDMEM_NODE_PTR node = gcvNULL;
++ gctBOOL locked = gcvFALSE;
++ gctBOOL asynchronous = gcvFALSE;
++#ifndef __QNXNTO__
++ gctPOINTER pointer = gcvNULL;
++#endif
++
++ gcmkHEADER_ARG("Kernel=0x%08X ProcessID=%d",
++ Kernel, ProcessID);
++
++ gcmkONERROR(
++ gckVIDMEM_HANDLE_LookupAndReference(Kernel,
++ Interface->u.LockVideoMemory.node,
++ &nodeObject));
++
++ node = nodeObject->node;
++
++ Interface->u.LockVideoMemory.gid = 0;
++
++ /* Lock video memory. */
++ gcmkONERROR(
++ gckVIDMEM_Lock(Kernel,
++ nodeObject,
++ Interface->u.LockVideoMemory.cacheable,
++ &Interface->u.LockVideoMemory.address,
++ &Interface->u.LockVideoMemory.gid,
++ &Interface->u.LockVideoMemory.physicalAddress));
++
++ locked = gcvTRUE;
++
++ if (node->VidMem.memory->object.type == gcvOBJ_VIDMEM)
++ {
++ /* Map video memory address into user space. */
++#ifdef __QNXNTO__
++ if (node->VidMem.logical == gcvNULL)
++ {
++ gcmkONERROR(
++ gckKERNEL_MapVideoMemory(Kernel,
++ FromUser,
++ Interface->u.LockVideoMemory.address,
++ ProcessID,
++ node->VidMem.bytes,
++ &node->VidMem.logical));
++ }
++ gcmkASSERT(node->VidMem.logical != gcvNULL);
++
++ Interface->u.LockVideoMemory.memory = gcmPTR_TO_UINT64(node->VidMem.logical);
++#else
++ gcmkONERROR(
++ gckKERNEL_MapVideoMemoryEx(Kernel,
++ Core,
++ FromUser,
++ Interface->u.LockVideoMemory.address,
++ &pointer));
++
++ Interface->u.LockVideoMemory.memory = gcmPTR_TO_UINT64(pointer);
++#endif
++ }
++ else
++ {
++ Interface->u.LockVideoMemory.memory = gcmPTR_TO_UINT64(node->Virtual.logical);
++
++ /* Success. */
++ status = gcvSTATUS_OK;
++ }
++
++#if gcdPROCESS_ADDRESS_SPACE
++ gcmkONERROR(gckVIDMEM_Node_Lock(
++ Kernel,
++ nodeObject,
++ &Interface->u.LockVideoMemory.address
++ ));
++#endif
++
++
++#if gcdSECURE_USER
++ /* Return logical address as physical address. */
++ Interface->u.LockVideoMemory.address =
++ (gctUINT32)(Interface->u.LockVideoMemory.memory);
++#endif
++ gcmkONERROR(
++ gckKERNEL_AddProcessDB(Kernel,
++ ProcessID, gcvDB_VIDEO_MEMORY_LOCKED,
++ gcmINT2PTR(Interface->u.LockVideoMemory.node),
++ gcvNULL,
++ 0));
++
++ gckVIDMEM_HANDLE_Reference(
++ Kernel, ProcessID, (gctUINT32)Interface->u.LockVideoMemory.node);
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (locked)
++ {
++ /* Roll back the lock. */
++ gcmkVERIFY_OK(gckVIDMEM_Unlock(Kernel,
++ nodeObject,
++ gcvSURF_TYPE_UNKNOWN,
++ &asynchronous));
++
++ if (gcvTRUE == asynchronous)
++ {
++ /* Bottom Half */
++ gcmkVERIFY_OK(gckVIDMEM_Unlock(Kernel,
++ nodeObject,
++ gcvSURF_TYPE_UNKNOWN,
++ gcvNULL));
++ }
++ }
++
++ if (nodeObject != gcvNULL)
++ {
++ gckVIDMEM_NODE_Dereference(Kernel, nodeObject);
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckKERNEL_UnlockVideoMemory
++**
++** Unlock a video memory node.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gctUINT32 ProcessID
++** ProcessID of current process.
++**
++** gcsHAL_INTERFACE * Interface
++** Pointer to a gcsHAL_INTERFACE structure that defines the command to
++** be dispatched.
++**
++** OUTPUT:
++**
++** gcsHAL_INTERFACE * Interface
++** Pointer to a gcsHAL_INTERFACE structure that receives any data to be
++** returned.
++*/
++gceSTATUS
++gckKERNEL_UnlockVideoMemory(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 ProcessID,
++ IN OUT gcsHAL_INTERFACE * Interface
++ )
++{
++ gceSTATUS status;
++ gckVIDMEM_NODE nodeObject;
++ gcuVIDMEM_NODE_PTR node;
++
++ gcmkHEADER_ARG("Kernel=0x%08X ProcessID=%d",
++ Kernel, ProcessID);
++
++ gcmkONERROR(gckVIDMEM_HANDLE_Lookup(
++ Kernel,
++ ProcessID,
++ (gctUINT32)Interface->u.UnlockVideoMemory.node,
++ &nodeObject));
++
++ node = nodeObject->node;
++
++ /* Unlock video memory. */
++#if gcdSECURE_USER
++ /* Save node information before it disappears. */
++ if (node->VidMem.memory->object.type == gcvOBJ_VIDMEM)
++ {
++ logical = gcvNULL;
++ bytes = 0;
++ }
++ else
++ {
++ logical = node->Virtual.logical;
++ bytes = node->Virtual.bytes;
++ }
++#endif
++
++ /* Unlock video memory. */
++ gcmkONERROR(gckVIDMEM_Unlock(
++ Kernel,
++ nodeObject,
++ Interface->u.UnlockVideoMemory.type,
++ &Interface->u.UnlockVideoMemory.asynchroneous));
++
++#if gcdSECURE_USER
++ /* Flush the translation cache for virtual surfaces. */
++ if (logical != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckKERNEL_FlushTranslationCache(Kernel,
++ cache,
++ logical,
++ bytes));
++ }
++#endif
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckKERNEL_QueryDatabase(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 ProcessID,
++ IN OUT gcsHAL_INTERFACE * Interface
++ )
++{
++ gceSTATUS status;
++ gctINT i;
++ gcuDATABASE_INFO tmp;
++
++ gceDATABASE_TYPE type[3] = {
++ gcvDB_VIDEO_MEMORY | (gcvPOOL_SYSTEM << gcdDB_VIDEO_MEMORY_POOL_SHIFT),
++ gcvDB_VIDEO_MEMORY | (gcvPOOL_CONTIGUOUS << gcdDB_VIDEO_MEMORY_POOL_SHIFT),
++ gcvDB_VIDEO_MEMORY | (gcvPOOL_VIRTUAL << gcdDB_VIDEO_MEMORY_POOL_SHIFT),
++ };
++
++ gcmkHEADER();
++
++ /* Query video memory. */
++ gcmkONERROR(
++ gckKERNEL_QueryProcessDB(Kernel,
++ Interface->u.Database.processID,
++ !Interface->u.Database.validProcessID,
++ gcvDB_VIDEO_MEMORY,
++ &Interface->u.Database.vidMem));
++
++ /* Query non-paged memory. */
++ gcmkONERROR(
++ gckKERNEL_QueryProcessDB(Kernel,
++ Interface->u.Database.processID,
++ !Interface->u.Database.validProcessID,
++ gcvDB_NON_PAGED,
++ &Interface->u.Database.nonPaged));
++
++ /* Query contiguous memory. */
++ gcmkONERROR(
++ gckKERNEL_QueryProcessDB(Kernel,
++ Interface->u.Database.processID,
++ !Interface->u.Database.validProcessID,
++ gcvDB_CONTIGUOUS,
++ &Interface->u.Database.contiguous));
++
++ /* Query GPU idle time. */
++ gcmkONERROR(
++ gckKERNEL_QueryProcessDB(Kernel,
++ Interface->u.Database.processID,
++ !Interface->u.Database.validProcessID,
++ gcvDB_IDLE,
++ &Interface->u.Database.gpuIdle));
++ for (i = 0; i < 3; i++)
++ {
++ /* Query each video memory pool. */
++ gcmkONERROR(
++ gckKERNEL_QueryProcessDB(Kernel,
++ Interface->u.Database.processID,
++ !Interface->u.Database.validProcessID,
++ type[i],
++ &Interface->u.Database.vidMemPool[i]));
++ }
++
++ /* Query virtual command buffer pool. */
++ gcmkONERROR(
++ gckKERNEL_QueryProcessDB(Kernel,
++ Interface->u.Database.processID,
++ !Interface->u.Database.validProcessID,
++ gcvDB_COMMAND_BUFFER,
++ &tmp));
++
++ Interface->u.Database.vidMemPool[2].counters.bytes += tmp.counters.bytes;
++ Interface->u.Database.vidMemPool[2].counters.maxBytes += tmp.counters.maxBytes;
++ Interface->u.Database.vidMemPool[2].counters.totalBytes += tmp.counters.totalBytes;
++
++ Interface->u.Database.vidMem.counters.bytes += tmp.counters.bytes;
++ Interface->u.Database.vidMem.counters.maxBytes += tmp.counters.maxBytes;
++ Interface->u.Database.vidMem.counters.totalBytes += tmp.counters.totalBytes;
++
++#if gcmIS_DEBUG(gcdDEBUG_TRACE)
++ gckKERNEL_DumpVidMemUsage(Kernel, Interface->u.Database.processID);
++#endif
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckKERNEL_ConfigPowerManagement(
++ IN gckKERNEL Kernel,
++ IN OUT gcsHAL_INTERFACE * Interface
++)
++{
++ gceSTATUS status;
++ gctBOOL enable = Interface->u.ConfigPowerManagement.enable;
++
++ gcmkHEADER();
++
++ gcmkONERROR(gckHARDWARE_SetPowerManagement(Kernel->hardware, enable));
++
++ if (enable == gcvTRUE)
++ {
++ gcmkONERROR(
++ gckHARDWARE_SetPowerManagementState(Kernel->hardware, gcvPOWER_ON));
++ }
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckKERNEL_Dispatch
++**
++** Dispatch a command received from the user HAL layer.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gctBOOL FromUser
++** whether the call is from the user space.
++**
++** gcsHAL_INTERFACE * Interface
++** Pointer to a gcsHAL_INTERFACE structure that defines the command to
++** be dispatched.
++**
++** OUTPUT:
++**
++** gcsHAL_INTERFACE * Interface
++** Pointer to a gcsHAL_INTERFACE structure that receives any data to be
++** returned.
++*/
++gceSTATUS
++gckKERNEL_Dispatch(
++ IN gckKERNEL Kernel,
++ IN gctBOOL FromUser,
++ IN OUT gcsHAL_INTERFACE * Interface
++ )
++{
++ gceSTATUS status = gcvSTATUS_OK;
++ gctPHYS_ADDR physical = gcvNULL;
++ gctSIZE_T bytes;
++ gctPOINTER logical = gcvNULL;
++ gctPOINTER info = gcvNULL;
++#if (gcdENABLE_3D || gcdENABLE_2D)
++ gckCONTEXT context = gcvNULL;
++#endif
++ gckKERNEL kernel = Kernel;
++ gctUINT32 address;
++ gctUINT32 processID;
++#if gcdSECURE_USER
++ gcskSECURE_CACHE_PTR cache;
++ gctPOINTER logical;
++#endif
++ gctUINT32 paddr = gcvINVALID_ADDRESS;
++#if !USE_NEW_LINUX_SIGNAL
++ gctSIGNAL signal;
++#endif
++ gckVIRTUAL_COMMAND_BUFFER_PTR buffer;
++
++ gckVIDMEM_NODE nodeObject;
++ gctBOOL powerMutexAcquired = gcvFALSE;
++
++ gcmkHEADER_ARG("Kernel=0x%x FromUser=%d Interface=0x%x",
++ Kernel, FromUser, Interface);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(Interface != gcvNULL);
++
++#if gcmIS_DEBUG(gcdDEBUG_TRACE)
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_KERNEL,
++ "Dispatching command %d (%s)",
++ Interface->command, _DispatchText[Interface->command]);
++#endif
++#if QNX_SINGLE_THREADED_DEBUGGING
++ gckOS_AcquireMutex(Kernel->os, Kernel->debugMutex, gcvINFINITE);
++#endif
++
++ /* Get the current process ID. */
++ gcmkONERROR(gckOS_GetProcessID(&processID));
++
++#if gcdSECURE_USER
++ gcmkONERROR(gckKERNEL_GetProcessDBCache(Kernel, processID, &cache));
++#endif
++
++ /* Dispatch on command. */
++ switch (Interface->command)
++ {
++ case gcvHAL_GET_BASE_ADDRESS:
++ /* Get base address. */
++ gcmkONERROR(
++ gckOS_GetBaseAddress(Kernel->os,
++ &Interface->u.GetBaseAddress.baseAddress));
++ break;
++
++ case gcvHAL_QUERY_VIDEO_MEMORY:
++ /* Query video memory size. */
++ gcmkONERROR(gckKERNEL_QueryVideoMemory(Kernel, Interface));
++ break;
++
++ case gcvHAL_QUERY_CHIP_IDENTITY:
++ /* Query chip identity. */
++ gcmkONERROR(
++ gckHARDWARE_QueryChipIdentity(
++ Kernel->hardware,
++ &Interface->u.QueryChipIdentity));
++ break;
++
++ case gcvHAL_MAP_MEMORY:
++ physical = gcmINT2PTR(Interface->u.MapMemory.physical);
++
++ /* Map memory. */
++ gcmkONERROR(
++ gckKERNEL_MapMemory(Kernel,
++ physical,
++ (gctSIZE_T) Interface->u.MapMemory.bytes,
++ &logical));
++
++ Interface->u.MapMemory.logical = gcmPTR_TO_UINT64(logical);
++
++ gcmkVERIFY_OK(
++ gckKERNEL_AddProcessDB(Kernel,
++ processID, gcvDB_MAP_MEMORY,
++ logical,
++ physical,
++ (gctSIZE_T) Interface->u.MapMemory.bytes));
++ break;
++
++ case gcvHAL_UNMAP_MEMORY:
++ physical = gcmINT2PTR(Interface->u.UnmapMemory.physical);
++
++ gcmkVERIFY_OK(
++ gckKERNEL_RemoveProcessDB(Kernel,
++ processID, gcvDB_MAP_MEMORY,
++ gcmUINT64_TO_PTR(Interface->u.UnmapMemory.logical)));
++
++ /* Unmap memory. */
++ gcmkONERROR(
++ gckKERNEL_UnmapMemory(Kernel,
++ physical,
++ (gctSIZE_T) Interface->u.UnmapMemory.bytes,
++ gcmUINT64_TO_PTR(Interface->u.UnmapMemory.logical)));
++ break;
++
++ case gcvHAL_ALLOCATE_NON_PAGED_MEMORY:
++ bytes = (gctSIZE_T) Interface->u.AllocateNonPagedMemory.bytes;
++
++ /* Allocate non-paged memory. */
++ gcmkONERROR(
++ gckOS_AllocateNonPagedMemory(
++ Kernel->os,
++ FromUser,
++ &bytes,
++ &physical,
++ &logical));
++
++ Interface->u.AllocateNonPagedMemory.bytes = bytes;
++ Interface->u.AllocateNonPagedMemory.logical = gcmPTR_TO_UINT64(logical);
++ Interface->u.AllocateNonPagedMemory.physical = gcmPTR_TO_NAME(physical);
++
++ gcmkVERIFY_OK(
++ gckKERNEL_AddProcessDB(Kernel,
++ processID, gcvDB_NON_PAGED,
++ logical,
++ gcmINT2PTR(Interface->u.AllocateNonPagedMemory.physical),
++ bytes));
++ break;
++
++ case gcvHAL_ALLOCATE_VIRTUAL_COMMAND_BUFFER:
++ bytes = (gctSIZE_T) Interface->u.AllocateVirtualCommandBuffer.bytes;
++
++ gcmkONERROR(
++ gckKERNEL_AllocateVirtualCommandBuffer(
++ Kernel,
++ FromUser,
++ &bytes,
++ &physical,
++ &logical));
++
++ Interface->u.AllocateVirtualCommandBuffer.bytes = bytes;
++ Interface->u.AllocateVirtualCommandBuffer.logical = gcmPTR_TO_UINT64(logical);
++ Interface->u.AllocateVirtualCommandBuffer.physical = gcmPTR_TO_NAME(physical);
++
++ gcmkVERIFY_OK(
++ gckKERNEL_AddProcessDB(Kernel,
++ processID, gcvDB_COMMAND_BUFFER,
++ logical,
++ gcmINT2PTR(Interface->u.AllocateVirtualCommandBuffer.physical),
++ bytes));
++ break;
++
++ case gcvHAL_FREE_NON_PAGED_MEMORY:
++ physical = gcmNAME_TO_PTR(Interface->u.FreeNonPagedMemory.physical);
++
++ gcmkVERIFY_OK(
++ gckKERNEL_RemoveProcessDB(Kernel,
++ processID, gcvDB_NON_PAGED,
++ gcmUINT64_TO_PTR(Interface->u.FreeNonPagedMemory.logical)));
++
++ /* Unmap user logical out of physical memory first. */
++ gcmkONERROR(gckOS_UnmapUserLogical(Kernel->os,
++ physical,
++ (gctSIZE_T) Interface->u.FreeNonPagedMemory.bytes,
++ gcmUINT64_TO_PTR(Interface->u.FreeNonPagedMemory.logical)));
++
++ /* Free non-paged memory. */
++ gcmkONERROR(
++ gckOS_FreeNonPagedMemory(Kernel->os,
++ (gctSIZE_T) Interface->u.FreeNonPagedMemory.bytes,
++ physical,
++ gcmUINT64_TO_PTR(Interface->u.FreeNonPagedMemory.logical)));
++
++#if gcdSECURE_USER
++ gcmkVERIFY_OK(gckKERNEL_FlushTranslationCache(
++ Kernel,
++ cache,
++ gcmUINT64_TO_PTR(Interface->u.FreeNonPagedMemory.logical),
++ (gctSIZE_T) Interface->u.FreeNonPagedMemory.bytes));
++#endif
++
++ gcmRELEASE_NAME(Interface->u.FreeNonPagedMemory.physical);
++ break;
++
++ case gcvHAL_ALLOCATE_CONTIGUOUS_MEMORY:
++ bytes = (gctSIZE_T) Interface->u.AllocateContiguousMemory.bytes;
++
++ /* Allocate contiguous memory. */
++ gcmkONERROR(gckOS_AllocateContiguous(
++ Kernel->os,
++ FromUser,
++ &bytes,
++ &physical,
++ &logical));
++
++ Interface->u.AllocateContiguousMemory.bytes = bytes;
++ Interface->u.AllocateContiguousMemory.logical = gcmPTR_TO_UINT64(logical);
++ Interface->u.AllocateContiguousMemory.physical = gcmPTR_TO_NAME(physical);
++
++ gcmkONERROR(gckHARDWARE_ConvertLogical(
++ Kernel->hardware,
++ logical,
++ gcvTRUE,
++ &Interface->u.AllocateContiguousMemory.address));
++
++ gcmkVERIFY_OK(gckKERNEL_AddProcessDB(
++ Kernel,
++ processID, gcvDB_CONTIGUOUS,
++ logical,
++ gcmINT2PTR(Interface->u.AllocateContiguousMemory.physical),
++ bytes));
++ break;
++
++ case gcvHAL_FREE_CONTIGUOUS_MEMORY:
++ physical = gcmNAME_TO_PTR(Interface->u.FreeContiguousMemory.physical);
++
++ gcmkVERIFY_OK(
++ gckKERNEL_RemoveProcessDB(Kernel,
++ processID, gcvDB_CONTIGUOUS,
++ gcmUINT64_TO_PTR(Interface->u.FreeNonPagedMemory.logical)));
++
++ /* Unmap user logical out of physical memory first. */
++ gcmkONERROR(gckOS_UnmapUserLogical(Kernel->os,
++ physical,
++ (gctSIZE_T) Interface->u.FreeContiguousMemory.bytes,
++ gcmUINT64_TO_PTR(Interface->u.FreeContiguousMemory.logical)));
++
++ /* Free contiguous memory. */
++ gcmkONERROR(
++ gckOS_FreeContiguous(Kernel->os,
++ physical,
++ gcmUINT64_TO_PTR(Interface->u.FreeContiguousMemory.logical),
++ (gctSIZE_T) Interface->u.FreeContiguousMemory.bytes));
++
++#if gcdSECURE_USER
++ gcmkVERIFY_OK(gckKERNEL_FlushTranslationCache(
++ Kernel,
++ cache,
++ gcmUINT64_TO_PTR(Interface->u.FreeContiguousMemory.logical),
++ (gctSIZE_T) Interface->u.FreeContiguousMemory.bytes));
++#endif
++
++ gcmRELEASE_NAME(Interface->u.FreeContiguousMemory.physical);
++ break;
++
++ case gcvHAL_ALLOCATE_VIDEO_MEMORY:
++
++ gcmkONERROR(gcvSTATUS_NOT_SUPPORTED);
++
++ break;
++
++ case gcvHAL_ALLOCATE_LINEAR_VIDEO_MEMORY:
++ /* Allocate memory. */
++ gcmkONERROR(
++ gckKERNEL_AllocateLinearMemory(Kernel, processID,
++ &Interface->u.AllocateLinearVideoMemory.pool,
++ Interface->u.AllocateLinearVideoMemory.bytes,
++ Interface->u.AllocateLinearVideoMemory.alignment,
++ Interface->u.AllocateLinearVideoMemory.type,
++ Interface->u.AllocateLinearVideoMemory.flag,
++ &Interface->u.AllocateLinearVideoMemory.node));
++ break;
++
++ case gcvHAL_RELEASE_VIDEO_MEMORY:
++ /* Release video memory. */
++ gcmkONERROR(gckKERNEL_ReleaseVideoMemory(
++ Kernel, processID,
++ (gctUINT32)Interface->u.ReleaseVideoMemory.node
++ ));
++ break;
++
++ case gcvHAL_LOCK_VIDEO_MEMORY:
++ /* Lock video memory. */
++ gcmkONERROR(gckKERNEL_LockVideoMemory(Kernel, Kernel->core, processID, FromUser, Interface));
++ break;
++
++ case gcvHAL_UNLOCK_VIDEO_MEMORY:
++ /* Unlock video memory. */
++ gcmkONERROR(gckKERNEL_UnlockVideoMemory(Kernel, processID, Interface));
++ break;
++
++ case gcvHAL_EVENT_COMMIT:
++ /* Commit an event queue. */
++#if gcdMULTI_GPU
++ if (Interface->u.Event.gpuMode == gcvMULTI_GPU_MODE_INDEPENDENT)
++ {
++ gcmkONERROR(
++ gckEVENT_Commit(Kernel->eventObj,
++ gcmUINT64_TO_PTR(Interface->u.Event.queue),
++ Interface->u.Event.chipEnable));
++ }
++ else
++ {
++ gcmkONERROR(
++ gckEVENT_Commit(Kernel->eventObj,
++ gcmUINT64_TO_PTR(Interface->u.Event.queue),
++ gcvCORE_3D_ALL_MASK));
++ }
++#else
++ gcmkONERROR(
++ gckEVENT_Commit(Kernel->eventObj,
++ gcmUINT64_TO_PTR(Interface->u.Event.queue)));
++#endif
++ break;
++
++ case gcvHAL_COMMIT:
++ /* Commit a command and context buffer. */
++#if gcdMULTI_GPU
++ if (Interface->u.Commit.gpuMode == gcvMULTI_GPU_MODE_INDEPENDENT)
++ {
++ gcmkONERROR(
++ gckCOMMAND_Commit(Kernel->command,
++ Interface->u.Commit.context ?
++ gcmNAME_TO_PTR(Interface->u.Commit.context) : gcvNULL,
++ gcmUINT64_TO_PTR(Interface->u.Commit.commandBuffer),
++ gcmUINT64_TO_PTR(Interface->u.Commit.delta),
++ gcmUINT64_TO_PTR(Interface->u.Commit.queue),
++ processID,
++ Interface->u.Commit.chipEnable));
++ }
++ else
++ {
++ gcmkONERROR(
++ gckCOMMAND_Commit(Kernel->command,
++ Interface->u.Commit.context ?
++ gcmNAME_TO_PTR(Interface->u.Commit.context) : gcvNULL,
++ gcmUINT64_TO_PTR(Interface->u.Commit.commandBuffer),
++ gcmUINT64_TO_PTR(Interface->u.Commit.delta),
++ gcmUINT64_TO_PTR(Interface->u.Commit.queue),
++ processID,
++ gcvCORE_3D_ALL_MASK));
++ }
++#else
++ gcmkONERROR(
++ gckCOMMAND_Commit(Kernel->command,
++ Interface->u.Commit.context ?
++ gcmNAME_TO_PTR(Interface->u.Commit.context) : gcvNULL,
++ gcmUINT64_TO_PTR(Interface->u.Commit.commandBuffer),
++ gcmUINT64_TO_PTR(Interface->u.Commit.delta),
++ gcmUINT64_TO_PTR(Interface->u.Commit.queue),
++ processID));
++#endif
++
++ break;
++
++ case gcvHAL_STALL:
++ /* Stall the command queue. */
++#if gcdMULTI_GPU
++ gcmkONERROR(gckCOMMAND_Stall(Kernel->command, gcvFALSE, gcvCORE_3D_ALL_MASK));
++#else
++ gcmkONERROR(gckCOMMAND_Stall(Kernel->command, gcvFALSE));
++#endif
++ break;
++
++ case gcvHAL_MAP_USER_MEMORY:
++ /* Map user memory to DMA. */
++ gcmkONERROR(
++ gckOS_MapUserMemory(Kernel->os,
++ Kernel->core,
++ gcmUINT64_TO_PTR(Interface->u.MapUserMemory.memory),
++ Interface->u.MapUserMemory.physical,
++ (gctSIZE_T) Interface->u.MapUserMemory.size,
++ &info,
++ &Interface->u.MapUserMemory.address));
++
++ Interface->u.MapUserMemory.info = gcmPTR_TO_NAME(info);
++
++ gcmkVERIFY_OK(
++ gckKERNEL_AddProcessDB(Kernel,
++ processID, gcvDB_MAP_USER_MEMORY,
++ gcmINT2PTR(Interface->u.MapUserMemory.info),
++ gcmUINT64_TO_PTR(Interface->u.MapUserMemory.memory),
++ (gctSIZE_T) Interface->u.MapUserMemory.size));
++ break;
++
++ case gcvHAL_UNMAP_USER_MEMORY:
++ address = Interface->u.UnmapUserMemory.address;
++ info = gcmNAME_TO_PTR(Interface->u.UnmapUserMemory.info);
++
++ gcmkVERIFY_OK(
++ gckKERNEL_RemoveProcessDB(Kernel,
++ processID, gcvDB_MAP_USER_MEMORY,
++ gcmINT2PTR(Interface->u.UnmapUserMemory.info)));
++ /* Unmap user memory. */
++ gcmkONERROR(
++ gckOS_UnmapUserMemory(Kernel->os,
++ Kernel->core,
++ gcmUINT64_TO_PTR(Interface->u.UnmapUserMemory.memory),
++ (gctSIZE_T) Interface->u.UnmapUserMemory.size,
++ info,
++ address));
++
++#if gcdSECURE_USER
++ gcmkVERIFY_OK(gckKERNEL_FlushTranslationCache(
++ Kernel,
++ cache,
++ gcmUINT64_TO_PTR(Interface->u.UnmapUserMemory.memory),
++ (gctSIZE_T) Interface->u.UnmapUserMemory.size));
++#endif
++
++ gcmRELEASE_NAME(Interface->u.UnmapUserMemory.info);
++ break;
++
++#if !USE_NEW_LINUX_SIGNAL
++ case gcvHAL_USER_SIGNAL:
++ /* Dispatch depends on the user signal subcommands. */
++ switch(Interface->u.UserSignal.command)
++ {
++ case gcvUSER_SIGNAL_CREATE:
++ /* Create a signal used in the user space. */
++ gcmkONERROR(
++ gckOS_CreateUserSignal(Kernel->os,
++ Interface->u.UserSignal.manualReset,
++ &Interface->u.UserSignal.id));
++
++ gcmkVERIFY_OK(
++ gckKERNEL_AddProcessDB(Kernel,
++ processID, gcvDB_SIGNAL,
++ gcmINT2PTR(Interface->u.UserSignal.id),
++ gcvNULL,
++ 0));
++ break;
++
++ case gcvUSER_SIGNAL_DESTROY:
++ gcmkVERIFY_OK(gckKERNEL_RemoveProcessDB(
++ Kernel,
++ processID, gcvDB_SIGNAL,
++ gcmINT2PTR(Interface->u.UserSignal.id)));
++
++ /* Destroy the signal. */
++ gcmkONERROR(
++ gckOS_DestroyUserSignal(Kernel->os,
++ Interface->u.UserSignal.id));
++ break;
++
++ case gcvUSER_SIGNAL_SIGNAL:
++ /* Signal the signal. */
++ gcmkONERROR(
++ gckOS_SignalUserSignal(Kernel->os,
++ Interface->u.UserSignal.id,
++ Interface->u.UserSignal.state));
++ break;
++
++ case gcvUSER_SIGNAL_WAIT:
++ /* Wait on the signal. */
++ status = gckOS_WaitUserSignal(Kernel->os,
++ Interface->u.UserSignal.id,
++ Interface->u.UserSignal.wait);
++
++ break;
++
++ case gcvUSER_SIGNAL_MAP:
++ gcmkONERROR(
++ gckOS_MapSignal(Kernel->os,
++ (gctSIGNAL)(gctUINTPTR_T)Interface->u.UserSignal.id,
++ (gctHANDLE)(gctUINTPTR_T)processID,
++ &signal));
++
++ gcmkVERIFY_OK(
++ gckKERNEL_AddProcessDB(Kernel,
++ processID, gcvDB_SIGNAL,
++ gcmINT2PTR(Interface->u.UserSignal.id),
++ gcvNULL,
++ 0));
++ break;
++
++ case gcvUSER_SIGNAL_UNMAP:
++ gcmkVERIFY_OK(gckKERNEL_RemoveProcessDB(
++ Kernel,
++ processID, gcvDB_SIGNAL,
++ gcmINT2PTR(Interface->u.UserSignal.id)));
++
++ /* Destroy the signal. */
++ gcmkONERROR(
++ gckOS_DestroyUserSignal(Kernel->os,
++ Interface->u.UserSignal.id));
++ break;
++
++ default:
++ /* Invalid user signal command. */
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++ break;
++#endif
++
++ case gcvHAL_SET_POWER_MANAGEMENT_STATE:
++ /* Set the power management state. */
++ gcmkONERROR(
++ gckHARDWARE_SetPowerManagementState(
++ Kernel->hardware,
++ Interface->u.SetPowerManagement.state));
++ break;
++
++ case gcvHAL_QUERY_POWER_MANAGEMENT_STATE:
++ /* Chip is not idle. */
++ Interface->u.QueryPowerManagement.isIdle = gcvFALSE;
++
++ /* Query the power management state. */
++ gcmkONERROR(gckHARDWARE_QueryPowerManagementState(
++ Kernel->hardware,
++ &Interface->u.QueryPowerManagement.state));
++
++ /* Query the idle state. */
++ gcmkONERROR(
++ gckHARDWARE_QueryIdle(Kernel->hardware,
++ &Interface->u.QueryPowerManagement.isIdle));
++ break;
++
++ case gcvHAL_READ_REGISTER:
++#if gcdREGISTER_ACCESS_FROM_USER
++ {
++ gceCHIPPOWERSTATE power;
++
++ gcmkONERROR(gckOS_AcquireMutex(Kernel->os, Kernel->hardware->powerMutex, gcvINFINITE));
++ powerMutexAcquired = gcvTRUE;
++ gcmkONERROR(gckHARDWARE_QueryPowerManagementState(Kernel->hardware,
++ &power));
++ if (power == gcvPOWER_ON)
++ {
++ /* Read a register. */
++ gcmkONERROR(gckOS_ReadRegisterEx(
++ Kernel->os,
++ Kernel->core,
++ Interface->u.ReadRegisterData.address,
++ &Interface->u.ReadRegisterData.data));
++ }
++ else
++ {
++ /* Chip is in power-state. */
++ Interface->u.ReadRegisterData.data = 0;
++ status = gcvSTATUS_CHIP_NOT_READY;
++ }
++ gcmkONERROR(gckOS_ReleaseMutex(Kernel->os, Kernel->hardware->powerMutex));
++ powerMutexAcquired = gcvFALSE;
++ }
++#else
++ /* No access from user land to read registers. */
++ Interface->u.ReadRegisterData.data = 0;
++ status = gcvSTATUS_NOT_SUPPORTED;
++#endif
++ break;
++
++#if gcdMULTI_GPU
++ case gcvHAL_READ_REGISTER_EX:
++#if gcdREGISTER_ACCESS_FROM_USER
++ {
++ gceCHIPPOWERSTATE power;
++ gctUINT32 coreId = 0;
++ gctUINT32 coreSelect = Interface->u.ReadRegisterDataEx.coreSelect;
++
++ gcmkONERROR(gckOS_AcquireMutex(Kernel->os, Kernel->hardware->powerMutex, gcvINFINITE));
++ powerMutexAcquired = gcvTRUE;
++ gcmkONERROR(gckHARDWARE_QueryPowerManagementState(Kernel->hardware,
++ &power));
++ if (power == gcvPOWER_ON)
++ {
++ for (; coreSelect != 0; coreSelect >>= 1, coreId++)
++ {
++ if (coreSelect & 1UL)
++ {
++ /* Read a register. */
++ gcmkONERROR(
++ gckOS_ReadRegisterByCoreId(
++ Kernel->os,
++ Kernel->core,
++ coreId,
++ Interface->u.ReadRegisterDataEx.address,
++ &Interface->u.ReadRegisterDataEx.data[coreId]));
++ }
++ }
++ }
++ else
++ {
++ for (coreId = 0; coreId < gcdMULTI_GPU; coreId++)
++ {
++ /* Chip is in power-state. */
++ Interface->u.ReadRegisterDataEx.data[coreId] = 0;
++ }
++ status = gcvSTATUS_CHIP_NOT_READY;
++ }
++ gcmkONERROR(gckOS_ReleaseMutex(Kernel->os, Kernel->hardware->powerMutex));
++ powerMutexAcquired = gcvFALSE;
++ }
++#else
++ gctUINT32 coreId;
++
++ /* No access from user land to read registers. */
++ for (coreId = 0; coreId < gcdMULTI_GPU; coreId++)
++ {
++ Interface->u.ReadRegisterDataEx.data[coreId] = 0;
++ }
++
++ status = gcvSTATUS_NOT_SUPPORTED;
++#endif
++ break;
++
++ case gcvHAL_WRITE_REGISTER_EX:
++#if gcdREGISTER_ACCESS_FROM_USER
++ {
++ gceCHIPPOWERSTATE power;
++ gctUINT32 coreId = 0;
++ gctUINT32 coreSelect = Interface->u.WriteRegisterDataEx.coreSelect;
++
++ gcmkONERROR(gckOS_AcquireMutex(Kernel->os, Kernel->hardware->powerMutex, gcvINFINITE));
++ powerMutexAcquired = gcvTRUE;
++ gcmkONERROR(gckHARDWARE_QueryPowerManagementState(Kernel->hardware,
++ &power));
++ if (power == gcvPOWER_ON)
++ {
++ for (; coreSelect != 0; coreSelect >>= 1, coreId++)
++ {
++ if (coreSelect & 1UL)
++ {
++ /* Write a register. */
++ gcmkONERROR(
++ gckOS_WriteRegisterByCoreId(
++ Kernel->os,
++ Kernel->core,
++ coreId,
++ Interface->u.WriteRegisterDataEx.address,
++ Interface->u.WriteRegisterDataEx.data[coreId]));
++ }
++ }
++ }
++ else
++ {
++ /* Chip is in power-state. */
++ for (coreId = 0; coreId < gcdMULTI_GPU; coreId++)
++ {
++ Interface->u.WriteRegisterDataEx.data[coreId] = 0;
++ }
++ status = gcvSTATUS_CHIP_NOT_READY;
++ }
++ gcmkONERROR(gckOS_ReleaseMutex(Kernel->os, Kernel->hardware->powerMutex));
++ powerMutexAcquired = gcvFALSE;
++ }
++#else
++ status = gcvSTATUS_NOT_SUPPORTED;
++#endif
++ break;
++#endif
++
++ case gcvHAL_WRITE_REGISTER:
++#if gcdREGISTER_ACCESS_FROM_USER
++ {
++ gceCHIPPOWERSTATE power;
++
++ gcmkONERROR(gckOS_AcquireMutex(Kernel->os, Kernel->hardware->powerMutex, gcvINFINITE));
++ powerMutexAcquired = gcvTRUE;
++ gcmkONERROR(gckHARDWARE_QueryPowerManagementState(Kernel->hardware,
++ &power));
++ if (power == gcvPOWER_ON)
++ {
++ /* Write a register. */
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Kernel->os,
++ Kernel->core,
++ Interface->u.WriteRegisterData.address,
++ Interface->u.WriteRegisterData.data));
++ }
++ else
++ {
++ /* Chip is in power-state. */
++ Interface->u.WriteRegisterData.data = 0;
++ status = gcvSTATUS_CHIP_NOT_READY;
++ }
++ gcmkONERROR(gckOS_ReleaseMutex(Kernel->os, Kernel->hardware->powerMutex));
++ powerMutexAcquired = gcvFALSE;
++ }
++#else
++ /* No access from user land to write registers. */
++ status = gcvSTATUS_NOT_SUPPORTED;
++#endif
++ break;
++
++ case gcvHAL_READ_ALL_PROFILE_REGISTERS:
++#if VIVANTE_PROFILER && VIVANTE_PROFILER_CONTEXT
++ /* Read profile data according to the context. */
++ gcmkONERROR(
++ gckHARDWARE_QueryContextProfile(
++ Kernel->hardware,
++ Kernel->profileCleanRegister,
++ gcmNAME_TO_PTR(Interface->u.RegisterProfileData.context),
++ &Interface->u.RegisterProfileData.counters));
++#elif VIVANTE_PROFILER
++ /* Read all 3D profile registers. */
++ gcmkONERROR(
++ gckHARDWARE_QueryProfileRegisters(
++ Kernel->hardware,
++ Kernel->profileCleanRegister,
++ &Interface->u.RegisterProfileData.counters));
++#else
++ status = gcvSTATUS_OK;
++#endif
++ break;
++
++ case gcvHAL_PROFILE_REGISTERS_2D:
++#if VIVANTE_PROFILER
++ /* Read all 2D profile registers. */
++ gcmkONERROR(
++ gckHARDWARE_ProfileEngine2D(
++ Kernel->hardware,
++ gcmUINT64_TO_PTR(Interface->u.RegisterProfileData2D.hwProfile2D)));
++#else
++ status = gcvSTATUS_OK;
++#endif
++ break;
++
++ case gcvHAL_GET_PROFILE_SETTING:
++#if VIVANTE_PROFILER
++ /* Get profile setting */
++ Interface->u.GetProfileSetting.enable = Kernel->profileEnable;
++#endif
++
++ status = gcvSTATUS_OK;
++ break;
++
++ case gcvHAL_SET_PROFILE_SETTING:
++#if VIVANTE_PROFILER
++ /* Set profile setting */
++ if(Kernel->hardware->gpuProfiler)
++ {
++ Kernel->profileEnable = Interface->u.SetProfileSetting.enable;
++#if VIVANTE_PROFILER_NEW
++ if (Kernel->profileEnable)
++ gckHARDWARE_InitProfiler(Kernel->hardware);
++#endif
++ }
++ else
++ {
++ status = gcvSTATUS_NOT_SUPPORTED;
++ break;
++ }
++#endif
++
++ status = gcvSTATUS_OK;
++ break;
++
++#if VIVANTE_PROFILER_PERDRAW
++ case gcvHAL_READ_PROFILER_REGISTER_SETTING:
++ #if VIVANTE_PROFILER
++ Kernel->profileCleanRegister = Interface->u.SetProfilerRegisterClear.bclear;
++ #endif
++ status = gcvSTATUS_OK;
++ break;
++#endif
++
++ case gcvHAL_QUERY_KERNEL_SETTINGS:
++ /* Get kernel settings. */
++ gcmkONERROR(
++ gckKERNEL_QuerySettings(Kernel,
++ &Interface->u.QueryKernelSettings.settings));
++ break;
++
++ case gcvHAL_RESET:
++ /* Reset the hardware. */
++ gcmkONERROR(
++ gckHARDWARE_Reset(Kernel->hardware));
++ break;
++
++ case gcvHAL_DEBUG:
++ /* Set debug level and zones. */
++ if (Interface->u.Debug.set)
++ {
++ gckOS_SetDebugLevel(Interface->u.Debug.level);
++ gckOS_SetDebugZones(Interface->u.Debug.zones,
++ Interface->u.Debug.enable);
++ }
++
++ if (Interface->u.Debug.message[0] != '\0')
++ {
++ /* Print a message to the debugger. */
++ if (Interface->u.Debug.type == gcvMESSAGE_TEXT)
++ {
++ gckOS_CopyPrint(Interface->u.Debug.message);
++ }
++ else
++ {
++ gckOS_DumpBuffer(Kernel->os,
++ Interface->u.Debug.message,
++ Interface->u.Debug.messageSize,
++ gceDUMP_BUFFER_FROM_USER,
++ gcvTRUE);
++ }
++ }
++ status = gcvSTATUS_OK;
++ break;
++
++ case gcvHAL_DUMP_GPU_STATE:
++ {
++ gceCHIPPOWERSTATE power;
++
++ _DumpDriverConfigure(Kernel);
++
++ gcmkONERROR(gckHARDWARE_QueryPowerManagementState(
++ Kernel->hardware,
++ &power
++ ));
++
++ if (power == gcvPOWER_ON)
++ {
++ Interface->u.ReadRegisterData.data = 1;
++
++ _DumpState(Kernel);
++ }
++ else
++ {
++ Interface->u.ReadRegisterData.data = 0;
++ status = gcvSTATUS_CHIP_NOT_READY;
++
++ gcmkPRINT("[galcore]: Can't dump state if GPU isn't POWER ON.");
++ }
++ }
++ break;
++
++ case gcvHAL_DUMP_EVENT:
++ break;
++
++ case gcvHAL_CACHE:
++
++ logical = gcmUINT64_TO_PTR(Interface->u.Cache.logical);
++
++ if (Interface->u.Cache.node)
++ {
++ gcmkONERROR(gckVIDMEM_HANDLE_Lookup(
++ Kernel,
++ processID,
++ Interface->u.Cache.node,
++ &nodeObject));
++
++ if (nodeObject->node->VidMem.memory->object.type == gcvOBJ_VIDMEM
++ || nodeObject->node->Virtual.contiguous
++ )
++ {
++ /* If memory is contiguous, get physical address. */
++ gcmkONERROR(gckOS_GetPhysicalAddress(
++ Kernel->os, logical, (gctUINT32*)&paddr));
++ }
++ }
++
++ bytes = (gctSIZE_T) Interface->u.Cache.bytes;
++ switch(Interface->u.Cache.operation)
++ {
++ case gcvCACHE_FLUSH:
++ /* Clean and invalidate the cache. */
++ status = gckOS_CacheFlush(Kernel->os,
++ processID,
++ physical,
++ paddr,
++ logical,
++ bytes);
++ break;
++ case gcvCACHE_CLEAN:
++ /* Clean the cache. */
++ status = gckOS_CacheClean(Kernel->os,
++ processID,
++ physical,
++ paddr,
++ logical,
++ bytes);
++ break;
++ case gcvCACHE_INVALIDATE:
++ /* Invalidate the cache. */
++ status = gckOS_CacheInvalidate(Kernel->os,
++ processID,
++ physical,
++ paddr,
++ logical,
++ bytes);
++ break;
++
++ case gcvCACHE_MEMORY_BARRIER:
++ status = gckOS_MemoryBarrier(Kernel->os,
++ logical);
++ break;
++ default:
++ status = gcvSTATUS_INVALID_ARGUMENT;
++ break;
++ }
++ break;
++
++ case gcvHAL_TIMESTAMP:
++ /* Check for invalid timer. */
++ if ((Interface->u.TimeStamp.timer >= gcmCOUNTOF(Kernel->timers))
++ || (Interface->u.TimeStamp.request != 2))
++ {
++ Interface->u.TimeStamp.timeDelta = 0;
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ /* Return timer results and reset timer. */
++ {
++ gcsTIMER_PTR timer = &(Kernel->timers[Interface->u.TimeStamp.timer]);
++ gctUINT64 timeDelta = 0;
++
++ if (timer->stopTime < timer->startTime )
++ {
++ Interface->u.TimeStamp.timeDelta = 0;
++ gcmkONERROR(gcvSTATUS_TIMER_OVERFLOW);
++ }
++
++ timeDelta = timer->stopTime - timer->startTime;
++
++ /* Check truncation overflow. */
++ Interface->u.TimeStamp.timeDelta = (gctINT32) timeDelta;
++ /*bit0~bit30 is available*/
++ if (timeDelta>>31)
++ {
++ Interface->u.TimeStamp.timeDelta = 0;
++ gcmkONERROR(gcvSTATUS_TIMER_OVERFLOW);
++ }
++
++ status = gcvSTATUS_OK;
++ }
++ break;
++
++ case gcvHAL_DATABASE:
++ gcmkONERROR(gckKERNEL_QueryDatabase(Kernel, processID, Interface));
++ break;
++
++ case gcvHAL_VERSION:
++ Interface->u.Version.major = gcvVERSION_MAJOR;
++ Interface->u.Version.minor = gcvVERSION_MINOR;
++ Interface->u.Version.patch = gcvVERSION_PATCH;
++ Interface->u.Version.build = gcvVERSION_BUILD;
++#if gcmIS_DEBUG(gcdDEBUG_TRACE)
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_KERNEL,
++ "KERNEL version %d.%d.%d build %u %s %s",
++ gcvVERSION_MAJOR, gcvVERSION_MINOR, gcvVERSION_PATCH,
++ gcvVERSION_BUILD, gcvVERSION_DATE, gcvVERSION_TIME);
++#endif
++ break;
++
++ case gcvHAL_CHIP_INFO:
++ /* Only if not support multi-core */
++ Interface->u.ChipInfo.count = 1;
++ Interface->u.ChipInfo.types[0] = Kernel->hardware->type;
++ break;
++
++#if (gcdENABLE_3D || gcdENABLE_2D)
++ case gcvHAL_ATTACH:
++ /* Attach user process. */
++ gcmkONERROR(
++ gckCOMMAND_Attach(Kernel->command,
++ &context,
++ &bytes,
++ processID));
++
++ Interface->u.Attach.stateCount = bytes;
++ Interface->u.Attach.context = gcmPTR_TO_NAME(context);
++
++ if (Interface->u.Attach.map == gcvTRUE)
++ {
++ gcmkVERIFY_OK(
++ gckCONTEXT_MapBuffer(context,
++ Interface->u.Attach.physicals,
++ Interface->u.Attach.logicals,
++ &Interface->u.Attach.bytes));
++ }
++
++ gcmkVERIFY_OK(
++ gckKERNEL_AddProcessDB(Kernel,
++ processID, gcvDB_CONTEXT,
++ gcmINT2PTR(Interface->u.Attach.context),
++ gcvNULL,
++ 0));
++ break;
++#endif
++
++ case gcvHAL_DETACH:
++ gcmkVERIFY_OK(
++ gckKERNEL_RemoveProcessDB(Kernel,
++ processID, gcvDB_CONTEXT,
++ gcmINT2PTR(Interface->u.Detach.context)));
++
++ /* Detach user process. */
++ gcmkONERROR(
++ gckCOMMAND_Detach(Kernel->command,
++ gcmNAME_TO_PTR(Interface->u.Detach.context)));
++
++ gcmRELEASE_NAME(Interface->u.Detach.context);
++ break;
++
++ case gcvHAL_COMPOSE:
++ Interface->u.Compose.physical = gcmPTR_TO_UINT64(gcmNAME_TO_PTR(Interface->u.Compose.physical));
++ /* Start composition. */
++ gcmkONERROR(
++ gckEVENT_Compose(Kernel->eventObj,
++ &Interface->u.Compose));
++ break;
++
++ case gcvHAL_SET_TIMEOUT:
++ /* set timeOut value from user */
++ gckKERNEL_SetTimeOut(Kernel, Interface->u.SetTimeOut.timeOut);
++ break;
++
++ case gcvHAL_GET_FRAME_INFO:
++ gcmkONERROR(gckHARDWARE_GetFrameInfo(
++ Kernel->hardware,
++ gcmUINT64_TO_PTR(Interface->u.GetFrameInfo.frameInfo)));
++ break;
++
++ case gcvHAL_SET_FSCALE_VALUE:
++#if gcdENABLE_FSCALE_VAL_ADJUST
++ status = gckHARDWARE_SetFscaleValue(Kernel->hardware,
++ Interface->u.SetFscaleValue.value);
++#else
++ status = gcvSTATUS_NOT_SUPPORTED;
++#endif
++ break;
++ case gcvHAL_GET_FSCALE_VALUE:
++#if gcdENABLE_FSCALE_VAL_ADJUST
++ status = gckHARDWARE_GetFscaleValue(Kernel->hardware,
++ &Interface->u.GetFscaleValue.value,
++ &Interface->u.GetFscaleValue.minValue,
++ &Interface->u.GetFscaleValue.maxValue);
++#else
++ status = gcvSTATUS_NOT_SUPPORTED;
++#endif
++ break;
++
++ case gcvHAL_NAME_VIDEO_MEMORY:
++ gcmkONERROR(gckVIDMEM_NODE_Name(Kernel,
++ Interface->u.NameVideoMemory.handle,
++ &Interface->u.NameVideoMemory.name));
++ break;
++
++ case gcvHAL_IMPORT_VIDEO_MEMORY:
++ gcmkONERROR(gckVIDMEM_NODE_Import(Kernel,
++ Interface->u.ImportVideoMemory.name,
++ &Interface->u.ImportVideoMemory.handle));
++
++ gcmkONERROR(
++ gckKERNEL_AddProcessDB(Kernel,
++ processID, gcvDB_VIDEO_MEMORY,
++ gcmINT2PTR(Interface->u.ImportVideoMemory.handle),
++ gcvNULL,
++ 0));
++ break;
++
++ case gcvHAL_GET_VIDEO_MEMORY_FD:
++ gcmkONERROR(gckVIDMEM_NODE_GetFd(
++ Kernel,
++ Interface->u.GetVideoMemoryFd.handle,
++ &Interface->u.GetVideoMemoryFd.fd
++ ));
++
++ /* No need to add it to processDB because OS will release all fds when
++ ** process quits.
++ */
++ break;
++
++ case gcvHAL_QUERY_RESET_TIME_STAMP:
++ Interface->u.QueryResetTimeStamp.timeStamp = Kernel->resetTimeStamp;
++ break;
++
++ case gcvHAL_FREE_VIRTUAL_COMMAND_BUFFER:
++ buffer = (gckVIRTUAL_COMMAND_BUFFER_PTR)gcmNAME_TO_PTR(Interface->u.FreeVirtualCommandBuffer.physical);
++
++ gcmkVERIFY_OK(gckKERNEL_RemoveProcessDB(
++ Kernel,
++ processID,
++ gcvDB_COMMAND_BUFFER,
++ gcmUINT64_TO_PTR(Interface->u.FreeVirtualCommandBuffer.logical)));
++
++ gcmkONERROR(gckOS_DestroyUserVirtualMapping(
++ Kernel->os,
++ buffer->physical,
++ (gctSIZE_T)Interface->u.FreeVirtualCommandBuffer.bytes,
++ gcmUINT64_TO_PTR(Interface->u.FreeVirtualCommandBuffer.logical)));
++
++ gcmkONERROR(gckKERNEL_DestroyVirtualCommandBuffer(
++ Kernel,
++ (gctSIZE_T)Interface->u.FreeVirtualCommandBuffer.bytes,
++ (gctPHYS_ADDR)buffer,
++ gcmUINT64_TO_PTR(Interface->u.FreeVirtualCommandBuffer.logical)));
++
++ gcmRELEASE_NAME(Interface->u.FreeVirtualCommandBuffer.physical);
++ break;
++
++#if gcdANDROID_NATIVE_FENCE_SYNC
++ case gcvHAL_SYNC_POINT:
++ {
++ gctSYNC_POINT syncPoint;
++
++ switch (Interface->u.SyncPoint.command)
++ {
++ case gcvSYNC_POINT_CREATE:
++ gcmkONERROR(gckOS_CreateSyncPoint(Kernel->os, &syncPoint));
++
++ Interface->u.SyncPoint.syncPoint = gcmPTR_TO_UINT64(syncPoint);
++
++ gcmkVERIFY_OK(
++ gckKERNEL_AddProcessDB(Kernel,
++ processID, gcvDB_SYNC_POINT,
++ syncPoint,
++ gcvNULL,
++ 0));
++ break;
++
++ case gcvSYNC_POINT_DESTROY:
++ syncPoint = gcmUINT64_TO_PTR(Interface->u.SyncPoint.syncPoint);
++
++ gcmkONERROR(gckOS_DestroySyncPoint(Kernel->os, syncPoint));
++
++ gcmkVERIFY_OK(
++ gckKERNEL_RemoveProcessDB(Kernel,
++ processID, gcvDB_SYNC_POINT,
++ syncPoint));
++ break;
++
++ default:
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ break;
++ }
++ }
++ break;
++
++ case gcvHAL_CREATE_NATIVE_FENCE:
++ {
++ gctINT fenceFD;
++ gctSYNC_POINT syncPoint =
++ gcmUINT64_TO_PTR(Interface->u.CreateNativeFence.syncPoint);
++
++ gcmkONERROR(
++ gckOS_CreateNativeFence(Kernel->os,
++ Kernel->timeline,
++ syncPoint,
++ &fenceFD));
++
++ Interface->u.CreateNativeFence.fenceFD = fenceFD;
++ }
++ break;
++#endif
++
++ case gcvHAL_SHBUF:
++ {
++ gctSHBUF shBuf;
++ gctPOINTER uData;
++ gctUINT32 bytes;
++
++ switch (Interface->u.ShBuf.command)
++ {
++ case gcvSHBUF_CREATE:
++ bytes = Interface->u.ShBuf.bytes;
++
++ /* Create. */
++ gcmkONERROR(gckKERNEL_CreateShBuffer(Kernel, bytes, &shBuf));
++
++ Interface->u.ShBuf.id = gcmPTR_TO_UINT64(shBuf);
++
++ gcmkVERIFY_OK(
++ gckKERNEL_AddProcessDB(Kernel,
++ processID,
++ gcvDB_SHBUF,
++ shBuf,
++ gcvNULL,
++ 0));
++ break;
++
++ case gcvSHBUF_DESTROY:
++ shBuf = gcmUINT64_TO_PTR(Interface->u.ShBuf.id);
++
++ /* Check db first to avoid illegal destroy in the process. */
++ gcmkONERROR(
++ gckKERNEL_RemoveProcessDB(Kernel,
++ processID,
++ gcvDB_SHBUF,
++ shBuf));
++
++ gcmkONERROR(gckKERNEL_DestroyShBuffer(Kernel, shBuf));
++ break;
++
++ case gcvSHBUF_MAP:
++ shBuf = gcmUINT64_TO_PTR(Interface->u.ShBuf.id);
++
++ /* Map for current process access. */
++ gcmkONERROR(gckKERNEL_MapShBuffer(Kernel, shBuf));
++
++ gcmkVERIFY_OK(
++ gckKERNEL_AddProcessDB(Kernel,
++ processID,
++ gcvDB_SHBUF,
++ shBuf,
++ gcvNULL,
++ 0));
++ break;
++
++ case gcvSHBUF_WRITE:
++ shBuf = gcmUINT64_TO_PTR(Interface->u.ShBuf.id);
++ uData = gcmUINT64_TO_PTR(Interface->u.ShBuf.data);
++ bytes = Interface->u.ShBuf.bytes;
++
++ /* Write. */
++ gcmkONERROR(
++ gckKERNEL_WriteShBuffer(Kernel, shBuf, uData, bytes));
++ break;
++
++ case gcvSHBUF_READ:
++ shBuf = gcmUINT64_TO_PTR(Interface->u.ShBuf.id);
++ uData = gcmUINT64_TO_PTR(Interface->u.ShBuf.data);
++ bytes = Interface->u.ShBuf.bytes;
++
++ /* Read. */
++ gcmkONERROR(
++ gckKERNEL_ReadShBuffer(Kernel,
++ shBuf,
++ uData,
++ bytes,
++ &bytes));
++
++ /* Return copied size. */
++ Interface->u.ShBuf.bytes = bytes;
++ break;
++
++ default:
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ break;
++ }
++ }
++ break;
++
++ case gcvHAL_CONFIG_POWER_MANAGEMENT:
++ gcmkONERROR(gckKERNEL_ConfigPowerManagement(Kernel, Interface));
++ break;
++
++ default:
++ /* Invalid command. */
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++OnError:
++ /* Save status. */
++ Interface->status = status;
++
++#if QNX_SINGLE_THREADED_DEBUGGING
++ gckOS_ReleaseMutex(Kernel->os, Kernel->debugMutex);
++#endif
++
++ if (powerMutexAcquired == gcvTRUE)
++ {
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, Kernel->hardware->powerMutex));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++** gckKERNEL_AttachProcess
++**
++** Attach or detach a process.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gctBOOL Attach
++** gcvTRUE if a new process gets attached or gcFALSE when a process
++** gets detatched.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckKERNEL_AttachProcess(
++ IN gckKERNEL Kernel,
++ IN gctBOOL Attach
++ )
++{
++ gceSTATUS status;
++ gctUINT32 processID;
++
++ gcmkHEADER_ARG("Kernel=0x%x Attach=%d", Kernel, Attach);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++
++ /* Get current process ID. */
++ gcmkONERROR(gckOS_GetProcessID(&processID));
++
++ gcmkONERROR(gckKERNEL_AttachProcessEx(Kernel, Attach, processID));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++** gckKERNEL_AttachProcessEx
++**
++** Attach or detach a process with the given PID. Can be paired with gckKERNEL_AttachProcess
++** provided the programmer is aware of the consequences.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gctBOOL Attach
++** gcvTRUE if a new process gets attached or gcFALSE when a process
++** gets detatched.
++**
++** gctUINT32 PID
++** PID of the process to attach or detach.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckKERNEL_AttachProcessEx(
++ IN gckKERNEL Kernel,
++ IN gctBOOL Attach,
++ IN gctUINT32 PID
++ )
++{
++ gceSTATUS status;
++ gctINT32 old;
++
++ gcmkHEADER_ARG("Kernel=0x%x Attach=%d PID=%d", Kernel, Attach, PID);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++
++ if (Attach)
++ {
++ /* Increment the number of clients attached. */
++ gcmkONERROR(
++ gckOS_AtomIncrement(Kernel->os, Kernel->atomClients, &old));
++
++ if (old == 0)
++ {
++#if gcdENABLE_VG
++ if (Kernel->vg == gcvNULL)
++#endif
++ {
++ gcmkONERROR(gckOS_Broadcast(Kernel->os,
++ Kernel->hardware,
++ gcvBROADCAST_FIRST_PROCESS));
++ }
++ }
++
++ if (Kernel->dbCreated)
++ {
++ /* Create the process database. */
++ gcmkONERROR(gckKERNEL_CreateProcessDB(Kernel, PID));
++ }
++
++#if gcdPROCESS_ADDRESS_SPACE
++ /* Map kernel command buffer in the process's own MMU. */
++ gcmkONERROR(_MapCommandBuffer(Kernel));
++#endif
++ }
++ else
++ {
++ if (Kernel->dbCreated)
++ {
++ /* Clean up the process database. */
++ gcmkONERROR(gckKERNEL_DestroyProcessDB(Kernel, PID));
++
++ /* Save the last know process ID. */
++ Kernel->db->lastProcessID = PID;
++ }
++
++#if gcdENABLE_VG
++ if (Kernel->vg == gcvNULL)
++#endif
++ {
++#if gcdMULTI_GPU
++ status = gckEVENT_Submit(Kernel->eventObj, gcvTRUE, gcvFALSE, gcvCORE_3D_ALL_MASK);
++#else
++ status = gckEVENT_Submit(Kernel->eventObj, gcvTRUE, gcvFALSE);
++#endif
++
++ if (status == gcvSTATUS_INTERRUPTED && Kernel->eventObj->submitTimer)
++ {
++ gcmkONERROR(gckOS_StartTimer(Kernel->os,
++ Kernel->eventObj->submitTimer,
++ 1));
++ }
++ else
++ {
++ gcmkONERROR(status);
++ }
++ }
++
++ /* Decrement the number of clients attached. */
++ gcmkONERROR(
++ gckOS_AtomDecrement(Kernel->os, Kernel->atomClients, &old));
++
++ if (old == 1)
++ {
++#if gcdENABLE_VG
++ if (Kernel->vg == gcvNULL)
++#endif
++ {
++ /* Last client detached, switch to SUSPEND power state. */
++ gcmkONERROR(gckOS_Broadcast(Kernel->os,
++ Kernel->hardware,
++ gcvBROADCAST_LAST_PROCESS));
++ }
++
++ /* Flush the debug cache. */
++ gcmkDEBUGFLUSH(~0U);
++ }
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++#if gcdSECURE_USER
++gceSTATUS
++gckKERNEL_MapLogicalToPhysical(
++ IN gckKERNEL Kernel,
++ IN gcskSECURE_CACHE_PTR Cache,
++ IN OUT gctPOINTER * Data
++ )
++{
++ gceSTATUS status;
++ static gctBOOL baseAddressValid = gcvFALSE;
++ static gctUINT32 baseAddress;
++ gctBOOL needBase;
++ gcskLOGICAL_CACHE_PTR slot;
++
++ gcmkHEADER_ARG("Kernel=0x%x Cache=0x%x *Data=0x%x",
++ Kernel, Cache, gcmOPT_POINTER(Data));
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++
++ if (!baseAddressValid)
++ {
++ /* Get base address. */
++ gcmkONERROR(gckHARDWARE_GetBaseAddress(Kernel->hardware, &baseAddress));
++
++ baseAddressValid = gcvTRUE;
++ }
++
++ /* Does this state load need a base address? */
++ gcmkONERROR(gckHARDWARE_NeedBaseAddress(Kernel->hardware,
++ ((gctUINT32_PTR) Data)[-1],
++ &needBase));
++
++#if gcdSECURE_CACHE_METHOD == gcdSECURE_CACHE_LRU
++ {
++ gcskLOGICAL_CACHE_PTR next;
++ gctINT i;
++
++ /* Walk all used cache slots. */
++ for (i = 1, slot = Cache->cache[0].next, next = gcvNULL;
++ (i <= gcdSECURE_CACHE_SLOTS) && (slot->logical != gcvNULL);
++ ++i, slot = slot->next
++ )
++ {
++ if (slot->logical == *Data)
++ {
++ /* Bail out. */
++ next = slot;
++ break;
++ }
++ }
++
++ /* See if we had a miss. */
++ if (next == gcvNULL)
++ {
++ /* Use the tail of the cache. */
++ slot = Cache->cache[0].prev;
++
++ /* Initialize the cache line. */
++ slot->logical = *Data;
++
++ /* Map the logical address to a DMA address. */
++ gcmkONERROR(
++ gckOS_GetPhysicalAddress(Kernel->os, *Data, &slot->dma));
++ }
++
++ /* Move slot to head of list. */
++ if (slot != Cache->cache[0].next)
++ {
++ /* Unlink. */
++ slot->prev->next = slot->next;
++ slot->next->prev = slot->prev;
++
++ /* Move to head of chain. */
++ slot->prev = &Cache->cache[0];
++ slot->next = Cache->cache[0].next;
++ slot->prev->next = slot;
++ slot->next->prev = slot;
++ }
++ }
++#elif gcdSECURE_CACHE_METHOD == gcdSECURE_CACHE_LINEAR
++ {
++ gctINT i;
++ gcskLOGICAL_CACHE_PTR next = gcvNULL;
++ gcskLOGICAL_CACHE_PTR oldestSlot = gcvNULL;
++ slot = gcvNULL;
++
++ if (Cache->cacheIndex != gcvNULL)
++ {
++ /* Walk the cache forwards. */
++ for (i = 1, slot = Cache->cacheIndex;
++ (i <= gcdSECURE_CACHE_SLOTS) && (slot->logical != gcvNULL);
++ ++i, slot = slot->next)
++ {
++ if (slot->logical == *Data)
++ {
++ /* Bail out. */
++ next = slot;
++ break;
++ }
++
++ /* Determine age of this slot. */
++ if ((oldestSlot == gcvNULL)
++ || (oldestSlot->stamp > slot->stamp)
++ )
++ {
++ oldestSlot = slot;
++ }
++ }
++
++ if (next == gcvNULL)
++ {
++ /* Walk the cache backwards. */
++ for (slot = Cache->cacheIndex->prev;
++ (i <= gcdSECURE_CACHE_SLOTS) && (slot->logical != gcvNULL);
++ ++i, slot = slot->prev)
++ {
++ if (slot->logical == *Data)
++ {
++ /* Bail out. */
++ next = slot;
++ break;
++ }
++
++ /* Determine age of this slot. */
++ if ((oldestSlot == gcvNULL)
++ || (oldestSlot->stamp > slot->stamp)
++ )
++ {
++ oldestSlot = slot;
++ }
++ }
++ }
++ }
++
++ /* See if we had a miss. */
++ if (next == gcvNULL)
++ {
++ if (Cache->cacheFree != 0)
++ {
++ slot = &Cache->cache[Cache->cacheFree];
++ gcmkASSERT(slot->logical == gcvNULL);
++
++ ++ Cache->cacheFree;
++ if (Cache->cacheFree >= gcmCOUNTOF(Cache->cache))
++ {
++ Cache->cacheFree = 0;
++ }
++ }
++ else
++ {
++ /* Use the oldest cache slot. */
++ gcmkASSERT(oldestSlot != gcvNULL);
++ slot = oldestSlot;
++
++ /* Unlink from the chain. */
++ slot->prev->next = slot->next;
++ slot->next->prev = slot->prev;
++
++ /* Append to the end. */
++ slot->prev = Cache->cache[0].prev;
++ slot->next = &Cache->cache[0];
++ slot->prev->next = slot;
++ slot->next->prev = slot;
++ }
++
++ /* Initialize the cache line. */
++ slot->logical = *Data;
++
++ /* Map the logical address to a DMA address. */
++ gcmkONERROR(
++ gckOS_GetPhysicalAddress(Kernel->os, *Data, &slot->dma));
++ }
++
++ /* Save time stamp. */
++ slot->stamp = ++ Cache->cacheStamp;
++
++ /* Save current slot for next lookup. */
++ Cache->cacheIndex = slot;
++ }
++#elif gcdSECURE_CACHE_METHOD == gcdSECURE_CACHE_HASH
++ {
++ gctINT i;
++ gctUINT32 data = gcmPTR2INT32(*Data);
++ gctUINT32 key, index;
++ gcskLOGICAL_CACHE_PTR hash;
++
++ /* Generate a hash key. */
++ key = (data >> 24) + (data >> 16) + (data >> 8) + data;
++ index = key % gcmCOUNTOF(Cache->hash);
++
++ /* Get the hash entry. */
++ hash = &Cache->hash[index];
++
++ for (slot = hash->nextHash, i = 0;
++ (slot != gcvNULL) && (i < gcdSECURE_CACHE_SLOTS);
++ slot = slot->nextHash, ++i
++ )
++ {
++ if (slot->logical == (*Data))
++ {
++ break;
++ }
++ }
++
++ if (slot == gcvNULL)
++ {
++ /* Grab from the tail of the cache. */
++ slot = Cache->cache[0].prev;
++
++ /* Unlink slot from any hash table it is part of. */
++ if (slot->prevHash != gcvNULL)
++ {
++ slot->prevHash->nextHash = slot->nextHash;
++ }
++ if (slot->nextHash != gcvNULL)
++ {
++ slot->nextHash->prevHash = slot->prevHash;
++ }
++
++ /* Initialize the cache line. */
++ slot->logical = *Data;
++
++ /* Map the logical address to a DMA address. */
++ gcmkONERROR(
++ gckOS_GetPhysicalAddress(Kernel->os, *Data, &slot->dma));
++
++ if (hash->nextHash != gcvNULL)
++ {
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_KERNEL,
++ "Hash Collision: logical=0x%x key=0x%08x",
++ *Data, key);
++ }
++
++ /* Insert the slot at the head of the hash list. */
++ slot->nextHash = hash->nextHash;
++ if (slot->nextHash != gcvNULL)
++ {
++ slot->nextHash->prevHash = slot;
++ }
++ slot->prevHash = hash;
++ hash->nextHash = slot;
++ }
++
++ /* Move slot to head of list. */
++ if (slot != Cache->cache[0].next)
++ {
++ /* Unlink. */
++ slot->prev->next = slot->next;
++ slot->next->prev = slot->prev;
++
++ /* Move to head of chain. */
++ slot->prev = &Cache->cache[0];
++ slot->next = Cache->cache[0].next;
++ slot->prev->next = slot;
++ slot->next->prev = slot;
++ }
++ }
++#elif gcdSECURE_CACHE_METHOD == gcdSECURE_CACHE_TABLE
++ {
++ gctUINT32 index = (gcmPTR2INT32(*Data) % gcdSECURE_CACHE_SLOTS) + 1;
++
++ /* Get cache slot. */
++ slot = &Cache->cache[index];
++
++ /* Check for cache miss. */
++ if (slot->logical != *Data)
++ {
++ /* Initialize the cache line. */
++ slot->logical = *Data;
++
++ /* Map the logical address to a DMA address. */
++ gcmkONERROR(
++ gckOS_GetPhysicalAddress(Kernel->os, *Data, &slot->dma));
++ }
++ }
++#endif
++
++ /* Return DMA address. */
++ *Data = gcmINT2PTR(slot->dma + (needBase ? baseAddress : 0));
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Data=0x%08x", *Data);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckKERNEL_FlushTranslationCache(
++ IN gckKERNEL Kernel,
++ IN gcskSECURE_CACHE_PTR Cache,
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Bytes
++ )
++{
++ gctINT i;
++ gcskLOGICAL_CACHE_PTR slot;
++ gctUINT8_PTR ptr;
++
++ gcmkHEADER_ARG("Kernel=0x%x Cache=0x%x Logical=0x%x Bytes=%lu",
++ Kernel, Cache, Logical, Bytes);
++
++ /* Do we need to flush the entire cache? */
++ if (Logical == gcvNULL)
++ {
++ /* Clear all cache slots. */
++ for (i = 1; i <= gcdSECURE_CACHE_SLOTS; ++i)
++ {
++ Cache->cache[i].logical = gcvNULL;
++
++#if gcdSECURE_CACHE_METHOD == gcdSECURE_CACHE_HASH
++ Cache->cache[i].nextHash = gcvNULL;
++ Cache->cache[i].prevHash = gcvNULL;
++#endif
++}
++
++#if gcdSECURE_CACHE_METHOD == gcdSECURE_CACHE_HASH
++ /* Zero the hash table. */
++ for (i = 0; i < gcmCOUNTOF(Cache->hash); ++i)
++ {
++ Cache->hash[i].nextHash = gcvNULL;
++ }
++#endif
++
++ /* Reset the cache functionality. */
++ Cache->cacheIndex = gcvNULL;
++ Cache->cacheFree = 1;
++ Cache->cacheStamp = 0;
++ }
++
++ else
++ {
++ gctUINT8_PTR low = (gctUINT8_PTR) Logical;
++ gctUINT8_PTR high = low + Bytes;
++
++#if gcdSECURE_CACHE_METHOD == gcdSECURE_CACHE_LRU
++ gcskLOGICAL_CACHE_PTR next;
++
++ /* Walk all used cache slots. */
++ for (i = 1, slot = Cache->cache[0].next;
++ (i <= gcdSECURE_CACHE_SLOTS) && (slot->logical != gcvNULL);
++ ++i, slot = next
++ )
++ {
++ /* Save pointer to next slot. */
++ next = slot->next;
++
++ /* Test if this slot falls within the range to flush. */
++ ptr = (gctUINT8_PTR) slot->logical;
++ if ((ptr >= low) && (ptr < high))
++ {
++ /* Unlink slot. */
++ slot->prev->next = slot->next;
++ slot->next->prev = slot->prev;
++
++ /* Append slot to tail of cache. */
++ slot->prev = Cache->cache[0].prev;
++ slot->next = &Cache->cache[0];
++ slot->prev->next = slot;
++ slot->next->prev = slot;
++
++ /* Mark slot as empty. */
++ slot->logical = gcvNULL;
++ }
++ }
++
++#elif gcdSECURE_CACHE_METHOD == gcdSECURE_CACHE_LINEAR
++ gcskLOGICAL_CACHE_PTR next;
++
++ for (i = 1, slot = Cache->cache[0].next;
++ (i <= gcdSECURE_CACHE_SLOTS) && (slot->logical != gcvNULL);
++ ++i, slot = next)
++ {
++ /* Save pointer to next slot. */
++ next = slot->next;
++
++ /* Test if this slot falls within the range to flush. */
++ ptr = (gctUINT8_PTR) slot->logical;
++ if ((ptr >= low) && (ptr < high))
++ {
++ /* Test if this slot is the current slot. */
++ if (slot == Cache->cacheIndex)
++ {
++ /* Move to next or previous slot. */
++ Cache->cacheIndex = (slot->next->logical != gcvNULL)
++ ? slot->next
++ : (slot->prev->logical != gcvNULL)
++ ? slot->prev
++ : gcvNULL;
++ }
++
++ /* Unlink slot from cache. */
++ slot->prev->next = slot->next;
++ slot->next->prev = slot->prev;
++
++ /* Insert slot to head of cache. */
++ slot->prev = &Cache->cache[0];
++ slot->next = Cache->cache[0].next;
++ slot->prev->next = slot;
++ slot->next->prev = slot;
++
++ /* Mark slot as empty. */
++ slot->logical = gcvNULL;
++ slot->stamp = 0;
++ }
++ }
++
++#elif gcdSECURE_CACHE_METHOD == gcdSECURE_CACHE_HASH
++ gctINT j;
++ gcskLOGICAL_CACHE_PTR hash, next;
++
++ /* Walk all hash tables. */
++ for (i = 0, hash = Cache->hash;
++ i < gcmCOUNTOF(Cache->hash);
++ ++i, ++hash)
++ {
++ /* Walk all slots in the hash. */
++ for (j = 0, slot = hash->nextHash;
++ (j < gcdSECURE_CACHE_SLOTS) && (slot != gcvNULL);
++ ++j, slot = next)
++ {
++ /* Save pointer to next slot. */
++ next = slot->next;
++
++ /* Test if this slot falls within the range to flush. */
++ ptr = (gctUINT8_PTR) slot->logical;
++ if ((ptr >= low) && (ptr < high))
++ {
++ /* Unlink slot from hash table. */
++ if (slot->prevHash == hash)
++ {
++ hash->nextHash = slot->nextHash;
++ }
++ else
++ {
++ slot->prevHash->nextHash = slot->nextHash;
++ }
++
++ if (slot->nextHash != gcvNULL)
++ {
++ slot->nextHash->prevHash = slot->prevHash;
++ }
++
++ /* Unlink slot from cache. */
++ slot->prev->next = slot->next;
++ slot->next->prev = slot->prev;
++
++ /* Append slot to tail of cache. */
++ slot->prev = Cache->cache[0].prev;
++ slot->next = &Cache->cache[0];
++ slot->prev->next = slot;
++ slot->next->prev = slot;
++
++ /* Mark slot as empty. */
++ slot->logical = gcvNULL;
++ slot->prevHash = gcvNULL;
++ slot->nextHash = gcvNULL;
++ }
++ }
++ }
++
++#elif gcdSECURE_CACHE_METHOD == gcdSECURE_CACHE_TABLE
++ gctUINT32 index;
++
++ /* Loop while inside the range. */
++ for (i = 1; (low < high) && (i <= gcdSECURE_CACHE_SLOTS); ++i)
++ {
++ /* Get index into cache for this range. */
++ index = (gcmPTR2INT32(low) % gcdSECURE_CACHE_SLOTS) + 1;
++ slot = &Cache->cache[index];
++
++ /* Test if this slot falls within the range to flush. */
++ ptr = (gctUINT8_PTR) slot->logical;
++ if ((ptr >= low) && (ptr < high))
++ {
++ /* Remove entry from cache. */
++ slot->logical = gcvNULL;
++ }
++
++ /* Next block. */
++ low += gcdSECURE_CACHE_SLOTS;
++ }
++#endif
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++#endif
++
++/*******************************************************************************
++**
++** gckKERNEL_Recovery
++**
++** Try to recover the GPU from a fatal error.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckKERNEL_Recovery(
++ IN gckKERNEL Kernel
++ )
++{
++ gceSTATUS status;
++ gckEVENT eventObj;
++ gckHARDWARE hardware;
++#if gcdSECURE_USER
++ gctUINT32 processID;
++ gcskSECURE_CACHE_PTR cache;
++#endif
++ gctUINT32 mask = 0;
++ gckCOMMAND command;
++ gckENTRYDATA data;
++ gctUINT32 i = 0, count = 0;
++#if gcdINTERRUPT_STATISTIC
++ gctINT32 oldValue;
++#endif
++
++ gcmkHEADER_ARG("Kernel=0x%x", Kernel);
++
++ /* Validate the arguemnts. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++
++ /* Grab gckEVENT object. */
++ eventObj = Kernel->eventObj;
++ gcmkVERIFY_OBJECT(eventObj, gcvOBJ_EVENT);
++
++ /* Grab gckHARDWARE object. */
++ hardware = Kernel->hardware;
++ gcmkVERIFY_OBJECT(hardware, gcvOBJ_HARDWARE);
++
++ /* Grab gckCOMMAND object. */
++ command = Kernel->command;
++ gcmkVERIFY_OBJECT(command, gcvOBJ_COMMAND);
++
++#if gcdSECURE_USER
++ /* Flush the secure mapping cache. */
++ gcmkONERROR(gckOS_GetProcessID(&processID));
++ gcmkONERROR(gckKERNEL_GetProcessDBCache(Kernel, processID, &cache));
++ gcmkONERROR(gckKERNEL_FlushTranslationCache(Kernel, cache, gcvNULL, 0));
++#endif
++
++ if (Kernel->stuckDump == gcdSTUCK_DUMP_MINIMAL)
++ {
++ gcmkPRINT("[galcore]: GPU[%d] hang, automatic recovery.", Kernel->core);
++ }
++ else
++ {
++ _DumpDriverConfigure(Kernel);
++ _DumpState(Kernel);
++ }
++
++ if (Kernel->recovery == gcvFALSE)
++ {
++ gcmkPRINT("[galcore]: Stop driver to keep scene.");
++
++ for (;;)
++ {
++ gckOS_Delay(Kernel->os, 10000);
++ }
++ }
++
++ /* Clear queue. */
++ do
++ {
++ status = gckENTRYQUEUE_Dequeue(&command->queue, &data);
++ }
++ while (status == gcvSTATUS_OK);
++
++ /* Issuing a soft reset for the GPU. */
++ gcmkONERROR(gckHARDWARE_Reset(hardware));
++
++ mask = Kernel->restoreMask;
++
++ for (i = 0; i < 32; i++)
++ {
++ if (mask & (1 << i))
++ {
++ count++;
++ }
++ }
++
++ /* Handle all outstanding events now. */
++#if gcdSMP
++#if gcdMULTI_GPU
++ if (Kernel->core == gcvCORE_MAJOR)
++ {
++ for (i = 0; i < gcdMULTI_GPU; i++)
++ {
++ gcmkONERROR(gckOS_AtomSet(Kernel->os, eventObj->pending3D[i], mask));
++ }
++ }
++ else
++ {
++ gcmkONERROR(gckOS_AtomSet(Kernel->os, eventObj->pending, mask));
++ }
++#else
++ gcmkONERROR(gckOS_AtomSet(Kernel->os, eventObj->pending, mask));
++#endif
++#else
++#if gcdMULTI_GPU
++ if (Kernel->core == gcvCORE_MAJOR)
++ {
++ for (i = 0; i < gcdMULTI_GPU; i++)
++ {
++ eventObj->pending3D[i] = mask;
++ }
++ }
++ else
++ {
++ eventObj->pending = mask;
++ }
++#else
++ eventObj->pending = mask;
++#endif
++#endif
++
++#if gcdINTERRUPT_STATISTIC
++ while (count--)
++ {
++ gcmkONERROR(gckOS_AtomDecrement(
++ Kernel->os,
++ eventObj->interruptCount,
++ &oldValue
++ ));
++ }
++
++ gckOS_AtomClearMask(Kernel->hardware->pendingEvent, mask);
++#endif
++
++ gcmkONERROR(gckEVENT_Notify(eventObj, 1));
++
++ gcmkVERIFY_OK(gckOS_GetTime(&Kernel->resetTimeStamp));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckKERNEL_OpenUserData
++**
++** Get access to the user data.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gctBOOL NeedCopy
++** The flag indicating whether or not the data should be copied.
++**
++** gctPOINTER StaticStorage
++** Pointer to the kernel storage where the data is to be copied if
++** NeedCopy is gcvTRUE.
++**
++** gctPOINTER UserPointer
++** User pointer to the data.
++**
++** gctSIZE_T Size
++** Size of the data.
++**
++** OUTPUT:
++**
++** gctPOINTER * KernelPointer
++** Pointer to the kernel pointer that will be pointing to the data.
++*/
++gceSTATUS
++gckKERNEL_OpenUserData(
++ IN gckKERNEL Kernel,
++ IN gctBOOL NeedCopy,
++ IN gctPOINTER StaticStorage,
++ IN gctPOINTER UserPointer,
++ IN gctSIZE_T Size,
++ OUT gctPOINTER * KernelPointer
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG(
++ "Kernel=0x%08X NeedCopy=%d StaticStorage=0x%08X "
++ "UserPointer=0x%08X Size=%lu KernelPointer=0x%08X",
++ Kernel, NeedCopy, StaticStorage, UserPointer, Size, KernelPointer
++ );
++
++ /* Validate the arguemnts. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(!NeedCopy || (StaticStorage != gcvNULL));
++ gcmkVERIFY_ARGUMENT(UserPointer != gcvNULL);
++ gcmkVERIFY_ARGUMENT(KernelPointer != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Size > 0);
++
++ if (NeedCopy)
++ {
++ /* Copy the user data to the static storage. */
++ gcmkONERROR(gckOS_CopyFromUserData(
++ Kernel->os, StaticStorage, UserPointer, Size
++ ));
++
++ /* Set the kernel pointer. */
++ * KernelPointer = StaticStorage;
++ }
++ else
++ {
++ gctPOINTER pointer = gcvNULL;
++
++ /* Map the user pointer. */
++ gcmkONERROR(gckOS_MapUserPointer(
++ Kernel->os, UserPointer, Size, &pointer
++ ));
++
++ /* Set the kernel pointer. */
++ * KernelPointer = pointer;
++ }
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckKERNEL_CloseUserData
++**
++** Release resources associated with the user data connection opened by
++** gckKERNEL_OpenUserData.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gctBOOL NeedCopy
++** The flag indicating whether or not the data should be copied.
++**
++** gctBOOL FlushData
++** If gcvTRUE, the data is written back to the user.
++**
++** gctPOINTER UserPointer
++** User pointer to the data.
++**
++** gctSIZE_T Size
++** Size of the data.
++**
++** OUTPUT:
++**
++** gctPOINTER * KernelPointer
++** Kernel pointer to the data.
++*/
++gceSTATUS
++gckKERNEL_CloseUserData(
++ IN gckKERNEL Kernel,
++ IN gctBOOL NeedCopy,
++ IN gctBOOL FlushData,
++ IN gctPOINTER UserPointer,
++ IN gctSIZE_T Size,
++ OUT gctPOINTER * KernelPointer
++ )
++{
++ gceSTATUS status = gcvSTATUS_OK;
++ gctPOINTER pointer;
++
++ gcmkHEADER_ARG(
++ "Kernel=0x%08X NeedCopy=%d FlushData=%d "
++ "UserPointer=0x%08X Size=%lu KernelPointer=0x%08X",
++ Kernel, NeedCopy, FlushData, UserPointer, Size, KernelPointer
++ );
++
++ /* Validate the arguemnts. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(UserPointer != gcvNULL);
++ gcmkVERIFY_ARGUMENT(KernelPointer != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Size > 0);
++
++ /* Get a shortcut to the kernel pointer. */
++ pointer = * KernelPointer;
++
++ if (pointer != gcvNULL)
++ {
++ if (NeedCopy)
++ {
++ if (FlushData)
++ {
++ gcmkONERROR(gckOS_CopyToUserData(
++ Kernel->os, * KernelPointer, UserPointer, Size
++ ));
++ }
++ }
++ else
++ {
++ /* Unmap record from kernel memory. */
++ gcmkONERROR(gckOS_UnmapUserPointer(
++ Kernel->os,
++ UserPointer,
++ Size,
++ * KernelPointer
++ ));
++ }
++
++ /* Reset the kernel pointer. */
++ * KernelPointer = gcvNULL;
++ }
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++void
++gckKERNEL_SetTimeOut(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 timeOut
++ )
++{
++ gcmkHEADER_ARG("Kernel=0x%x timeOut=%d", Kernel, timeOut);
++#if gcdGPU_TIMEOUT
++ Kernel->timeOut = timeOut;
++#endif
++ gcmkFOOTER_NO();
++}
++
++gceSTATUS
++gckKERNEL_AllocateVirtualCommandBuffer(
++ IN gckKERNEL Kernel,
++ IN gctBOOL InUserSpace,
++ IN OUT gctSIZE_T * Bytes,
++ OUT gctPHYS_ADDR * Physical,
++ OUT gctPOINTER * Logical
++ )
++{
++ gckOS os = Kernel->os;
++ gceSTATUS status;
++ gctPOINTER logical = gcvNULL;
++ gctSIZE_T pageCount;
++ gctSIZE_T bytes = *Bytes;
++ gckVIRTUAL_COMMAND_BUFFER_PTR buffer = gcvNULL;
++ gckMMU mmu;
++ gctUINT32 flag = gcvALLOC_FLAG_NON_CONTIGUOUS;
++
++ gcmkHEADER_ARG("Os=0x%X InUserSpace=%d *Bytes=%lu",
++ os, InUserSpace, gcmOPT_VALUE(Bytes));
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Bytes != gcvNULL);
++ gcmkVERIFY_ARGUMENT(*Bytes > 0);
++ gcmkVERIFY_ARGUMENT(Physical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++
++ gcmkONERROR(gckOS_Allocate(os,
++ sizeof(gckVIRTUAL_COMMAND_BUFFER),
++ (gctPOINTER)&buffer));
++
++ gcmkONERROR(gckOS_ZeroMemory(buffer, sizeof(gckVIRTUAL_COMMAND_BUFFER)));
++
++ buffer->bytes = bytes;
++
++ gcmkONERROR(gckOS_AllocatePagedMemoryEx(os,
++ flag,
++ bytes,
++ gcvNULL,
++ &buffer->physical));
++
++ if (InUserSpace)
++ {
++ gcmkONERROR(gckOS_CreateUserVirtualMapping(os,
++ buffer->physical,
++ bytes,
++ &logical,
++ &pageCount));
++
++ *Logical =
++ buffer->userLogical = logical;
++ }
++ else
++ {
++ gcmkONERROR(gckOS_CreateKernelVirtualMapping(os,
++ buffer->physical,
++ bytes,
++ &logical,
++ &pageCount));
++
++ *Logical =
++ buffer->kernelLogical = logical;
++ }
++
++ buffer->pageCount = pageCount;
++ buffer->kernel = Kernel;
++
++ gcmkONERROR(gckOS_GetProcessID(&buffer->pid));
++
++#if gcdPROCESS_ADDRESS_SPACE
++ gcmkONERROR(gckKERNEL_GetProcessMMU(Kernel, &mmu));
++ buffer->mmu = mmu;
++#else
++ mmu = Kernel->mmu;
++#endif
++
++ gcmkONERROR(gckMMU_AllocatePages(mmu,
++ pageCount,
++ &buffer->pageTable,
++ &buffer->gpuAddress));
++
++
++ gcmkONERROR(gckOS_MapPagesEx(os,
++ Kernel->core,
++ buffer->physical,
++ pageCount,
++ buffer->gpuAddress,
++ buffer->pageTable));
++
++ gcmkONERROR(gckMMU_Flush(mmu, gcvSURF_INDEX));
++
++ *Physical = buffer;
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_KERNEL,
++ "gpuAddress = %x pageCount = %d kernelLogical = %x userLogical=%x",
++ buffer->gpuAddress, buffer->pageCount,
++ buffer->kernelLogical, buffer->userLogical);
++
++ gcmkVERIFY_OK(gckOS_AcquireMutex(os, Kernel->virtualBufferLock, gcvINFINITE));
++
++ if (Kernel->virtualBufferHead == gcvNULL)
++ {
++ Kernel->virtualBufferHead =
++ Kernel->virtualBufferTail = buffer;
++ }
++ else
++ {
++ buffer->prev = Kernel->virtualBufferTail;
++ Kernel->virtualBufferTail->next = buffer;
++ Kernel->virtualBufferTail = buffer;
++ }
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(os, Kernel->virtualBufferLock));
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (buffer->gpuAddress)
++ {
++#if gcdPROCESS_ADDRESS_SPACE
++ gcmkVERIFY_OK(
++ gckMMU_FreePages(mmu, buffer->pageTable, buffer->pageCount));
++#else
++ gcmkVERIFY_OK(
++ gckMMU_FreePages(Kernel->mmu, buffer->pageTable, buffer->pageCount));
++#endif
++ }
++
++ if (buffer->userLogical)
++ {
++ gcmkVERIFY_OK(
++ gckOS_DestroyUserVirtualMapping(os,
++ buffer->physical,
++ bytes,
++ buffer->userLogical));
++ }
++
++ if (buffer->kernelLogical)
++ {
++ gcmkVERIFY_OK(
++ gckOS_DestroyKernelVirtualMapping(os,
++ buffer->physical,
++ bytes,
++ buffer->kernelLogical));
++ }
++
++ if (buffer->physical)
++ {
++ gcmkVERIFY_OK(gckOS_FreePagedMemory(os, buffer->physical, bytes));
++ }
++
++ gcmkVERIFY_OK(gckOS_Free(os, buffer));
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckKERNEL_DestroyVirtualCommandBuffer(
++ IN gckKERNEL Kernel,
++ IN gctSIZE_T Bytes,
++ IN gctPHYS_ADDR Physical,
++ IN gctPOINTER Logical
++ )
++{
++ gckOS os;
++ gckKERNEL kernel;
++ gckVIRTUAL_COMMAND_BUFFER_PTR buffer = (gckVIRTUAL_COMMAND_BUFFER_PTR)Physical;
++
++ gcmkHEADER();
++ gcmkVERIFY_ARGUMENT(buffer != gcvNULL);
++
++ kernel = buffer->kernel;
++ os = kernel->os;
++
++ if (!buffer->userLogical)
++ {
++ gcmkVERIFY_OK(gckOS_DestroyKernelVirtualMapping(os,
++ buffer->physical,
++ Bytes,
++ Logical));
++ }
++
++#if !gcdPROCESS_ADDRESS_SPACE
++ gcmkVERIFY_OK(
++ gckMMU_FreePages(kernel->mmu, buffer->pageTable, buffer->pageCount));
++#endif
++
++ gcmkVERIFY_OK(gckOS_UnmapPages(os, buffer->pageCount, buffer->gpuAddress));
++
++ gcmkVERIFY_OK(gckOS_FreePagedMemory(os, buffer->physical, Bytes));
++
++ gcmkVERIFY_OK(gckOS_AcquireMutex(os, kernel->virtualBufferLock, gcvINFINITE));
++
++ if (buffer == kernel->virtualBufferHead)
++ {
++ if ((kernel->virtualBufferHead = buffer->next) == gcvNULL)
++ {
++ kernel->virtualBufferTail = gcvNULL;
++ }
++ }
++ else
++ {
++ buffer->prev->next = buffer->next;
++
++ if (buffer == kernel->virtualBufferTail)
++ {
++ kernel->virtualBufferTail = buffer->prev;
++ }
++ else
++ {
++ buffer->next->prev = buffer->prev;
++ }
++ }
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(os, kernel->virtualBufferLock));
++
++ gcmkVERIFY_OK(gckOS_Free(os, buffer));
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckKERNEL_GetGPUAddress(
++ IN gckKERNEL Kernel,
++ IN gctPOINTER Logical,
++ IN gctBOOL InUserSpace,
++ OUT gctUINT32 * Address
++ )
++{
++ gceSTATUS status;
++ gckVIRTUAL_COMMAND_BUFFER_PTR buffer;
++ gctPOINTER start;
++ gctUINT32 pid;
++
++ gcmkHEADER_ARG("Logical = %x InUserSpace=%d.", Logical, InUserSpace);
++
++ gcmkVERIFY_OK(gckOS_GetProcessID(&pid));
++
++ status = gcvSTATUS_INVALID_ADDRESS;
++
++ gcmkVERIFY_OK(gckOS_AcquireMutex(Kernel->os, Kernel->virtualBufferLock, gcvINFINITE));
++
++ /* Walk all command buffer. */
++ for (buffer = Kernel->virtualBufferHead; buffer != gcvNULL; buffer = buffer->next)
++ {
++ if (InUserSpace)
++ {
++ start = buffer->userLogical;
++ }
++ else
++ {
++ start = buffer->kernelLogical;
++ }
++
++ if (start == gcvNULL)
++ {
++ continue;
++ }
++
++ if (Logical >= start
++ && (Logical < (gctPOINTER)((gctUINT8_PTR)start + buffer->pageCount * 4096))
++ && pid == buffer->pid
++ )
++ {
++ * Address = buffer->gpuAddress + (gctUINT32)((gctUINT8_PTR)Logical - (gctUINT8_PTR)start);
++ status = gcvSTATUS_OK;
++ break;
++ }
++ }
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, Kernel->virtualBufferLock));
++
++ gcmkFOOTER_NO();
++ return status;
++}
++
++gceSTATUS
++gckKERNEL_QueryGPUAddress(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 GpuAddress,
++ OUT gckVIRTUAL_COMMAND_BUFFER_PTR * Buffer
++ )
++{
++ gckVIRTUAL_COMMAND_BUFFER_PTR buffer;
++ gctUINT32 start;
++ gceSTATUS status = gcvSTATUS_NOT_SUPPORTED;
++
++ gcmkVERIFY_OK(gckOS_AcquireMutex(Kernel->os, Kernel->virtualBufferLock, gcvINFINITE));
++
++ /* Walk all command buffers. */
++ for (buffer = Kernel->virtualBufferHead; buffer != gcvNULL; buffer = buffer->next)
++ {
++ start = (gctUINT32)buffer->gpuAddress;
++
++ if (GpuAddress >= start && GpuAddress < (start + buffer->pageCount * 4096))
++ {
++ /* Find a range matched. */
++ *Buffer = buffer;
++ status = gcvSTATUS_OK;
++ break;
++ }
++ }
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, Kernel->virtualBufferLock));
++
++ return status;
++}
++
++#if gcdLINK_QUEUE_SIZE
++static void
++gckLINKQUEUE_Dequeue(
++ IN gckLINKQUEUE LinkQueue
++ )
++{
++ gcmkASSERT(LinkQueue->count == gcdLINK_QUEUE_SIZE);
++
++ LinkQueue->count--;
++ LinkQueue->front = (LinkQueue->front + 1) % gcdLINK_QUEUE_SIZE;
++}
++
++void
++gckLINKQUEUE_Enqueue(
++ IN gckLINKQUEUE LinkQueue,
++ IN gctUINT32 start,
++ IN gctUINT32 end
++ )
++{
++ if (LinkQueue->count == gcdLINK_QUEUE_SIZE)
++ {
++ gckLINKQUEUE_Dequeue(LinkQueue);
++ }
++
++ gcmkASSERT(LinkQueue->count < gcdLINK_QUEUE_SIZE);
++
++ LinkQueue->count++;
++
++ LinkQueue->data[LinkQueue->rear].start = start;
++ LinkQueue->data[LinkQueue->rear].end = end;
++
++ gcmkVERIFY_OK(
++ gckOS_GetProcessID(&LinkQueue->data[LinkQueue->rear].pid));
++
++ LinkQueue->rear = (LinkQueue->rear + 1) % gcdLINK_QUEUE_SIZE;
++}
++
++void
++gckLINKQUEUE_GetData(
++ IN gckLINKQUEUE LinkQueue,
++ IN gctUINT32 Index,
++ OUT gckLINKDATA * Data
++ )
++{
++ gcmkASSERT(Index >= 0 && Index < gcdLINK_QUEUE_SIZE);
++
++ *Data = &LinkQueue->data[(Index + LinkQueue->front) % gcdLINK_QUEUE_SIZE];
++}
++#endif
++
++/*
++* gckENTRYQUEUE_Enqueue is called with Command->mutexQueue acquired.
++*/
++gceSTATUS
++gckENTRYQUEUE_Enqueue(
++ IN gckKERNEL Kernel,
++ IN gckENTRYQUEUE Queue,
++ IN gctUINT32 physical,
++ IN gctUINT32 bytes
++ )
++{
++ gctUINT32 next = (Queue->rear + 1) % gcdENTRY_QUEUE_SIZE;
++
++ if (next == Queue->front)
++ {
++ /* Queue is full. */
++ return gcvSTATUS_INVALID_REQUEST;
++ }
++
++ /* Copy data. */
++ Queue->data[Queue->rear].physical = physical;
++ Queue->data[Queue->rear].bytes = bytes;
++
++ gcmkVERIFY_OK(gckOS_MemoryBarrier(Kernel->os, &Queue->rear));
++
++ /* Update rear. */
++ Queue->rear = next;
++
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckENTRYQUEUE_Dequeue(
++ IN gckENTRYQUEUE Queue,
++ OUT gckENTRYDATA * Data
++ )
++{
++ if (Queue->front == Queue->rear)
++ {
++ /* Queue is empty. */
++ return gcvSTATUS_INVALID_REQUEST;
++ }
++
++ /* Copy data. */
++ *Data = &Queue->data[Queue->front];
++
++ /* Update front. */
++ Queue->front = (Queue->front + 1) % gcdENTRY_QUEUE_SIZE;
++
++ return gcvSTATUS_OK;
++}
++
++/******************************************************************************\
++*************************** Pointer - ID translation ***************************
++\******************************************************************************/
++#define gcdID_TABLE_LENGTH 1024
++typedef struct _gcsINTEGERDB * gckINTEGERDB;
++typedef struct _gcsINTEGERDB
++{
++ gckOS os;
++ gctPOINTER* table;
++ gctPOINTER mutex;
++ gctUINT32 tableLen;
++ gctUINT32 currentID;
++ gctUINT32 unused;
++}
++gcsINTEGERDB;
++
++gceSTATUS
++gckKERNEL_CreateIntegerDatabase(
++ IN gckKERNEL Kernel,
++ OUT gctPOINTER * Database
++ )
++{
++ gceSTATUS status;
++ gckINTEGERDB database = gcvNULL;
++
++ gcmkHEADER_ARG("Kernel=0x%08X Datbase=0x%08X", Kernel, Database);
++
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(Database != gcvNULL);
++
++ /* Allocate a database. */
++ gcmkONERROR(gckOS_Allocate(
++ Kernel->os, gcmSIZEOF(gcsINTEGERDB), (gctPOINTER *)&database));
++
++ gcmkONERROR(gckOS_ZeroMemory(database, gcmSIZEOF(gcsINTEGERDB)));
++
++ /* Allocate a pointer table. */
++ gcmkONERROR(gckOS_Allocate(
++ Kernel->os, gcmSIZEOF(gctPOINTER) * gcdID_TABLE_LENGTH, (gctPOINTER *)&database->table));
++
++ gcmkONERROR(gckOS_ZeroMemory(database->table, gcmSIZEOF(gctPOINTER) * gcdID_TABLE_LENGTH));
++
++ /* Allocate a database mutex. */
++ gcmkONERROR(gckOS_CreateMutex(Kernel->os, &database->mutex));
++
++ /* Initialize. */
++ database->currentID = 0;
++ database->unused = gcdID_TABLE_LENGTH;
++ database->os = Kernel->os;
++ database->tableLen = gcdID_TABLE_LENGTH;
++
++ *Database = database;
++
++ gcmkFOOTER_ARG("*Database=0x%08X", *Database);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Rollback. */
++ if (database)
++ {
++ if (database->table)
++ {
++ gcmkOS_SAFE_FREE(Kernel->os, database->table);
++ }
++
++ gcmkOS_SAFE_FREE(Kernel->os, database);
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckKERNEL_DestroyIntegerDatabase(
++ IN gckKERNEL Kernel,
++ IN gctPOINTER Database
++ )
++{
++ gckINTEGERDB database = Database;
++
++ gcmkHEADER_ARG("Kernel=0x%08X Datbase=0x%08X", Kernel, Database);
++
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(Database != gcvNULL);
++
++ /* Destroy pointer table. */
++ gcmkOS_SAFE_FREE(Kernel->os, database->table);
++
++ /* Destroy database mutex. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Kernel->os, database->mutex));
++
++ /* Destroy database. */
++ gcmkOS_SAFE_FREE(Kernel->os, database);
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckKERNEL_AllocateIntegerId(
++ IN gctPOINTER Database,
++ IN gctPOINTER Pointer,
++ OUT gctUINT32 * Id
++ )
++{
++ gceSTATUS status;
++ gckINTEGERDB database = Database;
++ gctUINT32 i, unused, currentID, tableLen;
++ gctPOINTER * table;
++ gckOS os = database->os;
++ gctBOOL acquired = gcvFALSE;
++
++ gcmkHEADER_ARG("Database=0x%08X Pointer=0x%08X", Database, Pointer);
++
++ gcmkVERIFY_ARGUMENT(Id != gcvNULL);
++
++ gcmkVERIFY_OK(gckOS_AcquireMutex(os, database->mutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++ if (database->unused < 1)
++ {
++ /* Extend table. */
++ gcmkONERROR(
++ gckOS_Allocate(os,
++ gcmSIZEOF(gctPOINTER) * (database->tableLen + gcdID_TABLE_LENGTH),
++ (gctPOINTER *)&table));
++
++ gcmkONERROR(gckOS_ZeroMemory(table + database->tableLen,
++ gcmSIZEOF(gctPOINTER) * gcdID_TABLE_LENGTH));
++
++ /* Copy data from old table. */
++ gckOS_MemCopy(table,
++ database->table,
++ database->tableLen * gcmSIZEOF(gctPOINTER));
++
++ gcmkOS_SAFE_FREE(os, database->table);
++
++ /* Update databse with new allocated table. */
++ database->table = table;
++ database->currentID = database->tableLen;
++ database->tableLen += gcdID_TABLE_LENGTH;
++ database->unused += gcdID_TABLE_LENGTH;
++ }
++
++ table = database->table;
++ currentID = database->currentID;
++ tableLen = database->tableLen;
++ unused = database->unused;
++
++ /* Connect id with pointer. */
++ table[currentID] = Pointer;
++
++ *Id = currentID + 1;
++
++ /* Update the currentID. */
++ if (--unused > 0)
++ {
++ for (i = 0; i < tableLen; i++)
++ {
++ if (++currentID >= tableLen)
++ {
++ /* Wrap to the begin. */
++ currentID = 0;
++ }
++
++ if (table[currentID] == gcvNULL)
++ {
++ break;
++ }
++ }
++ }
++
++ database->table = table;
++ database->currentID = currentID;
++ database->tableLen = tableLen;
++ database->unused = unused;
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(os, database->mutex));
++ acquired = gcvFALSE;
++
++ gcmkFOOTER_ARG("*Id=%d", *Id);
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(os, database->mutex));
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckKERNEL_FreeIntegerId(
++ IN gctPOINTER Database,
++ IN gctUINT32 Id
++ )
++{
++ gceSTATUS status;
++ gckINTEGERDB database = Database;
++ gckOS os = database->os;
++ gctBOOL acquired = gcvFALSE;
++
++ gcmkHEADER_ARG("Database=0x%08X Id=%d", Database, Id);
++
++ gcmkVERIFY_OK(gckOS_AcquireMutex(os, database->mutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++ if (!(Id > 0 && Id <= database->tableLen))
++ {
++ gcmkONERROR(gcvSTATUS_NOT_FOUND);
++ }
++
++ Id -= 1;
++
++ database->table[Id] = gcvNULL;
++
++ if (database->unused++ == 0)
++ {
++ database->currentID = Id;
++ }
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(os, database->mutex));
++ acquired = gcvFALSE;
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(os, database->mutex));
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckKERNEL_QueryIntegerId(
++ IN gctPOINTER Database,
++ IN gctUINT32 Id,
++ OUT gctPOINTER * Pointer
++ )
++{
++ gceSTATUS status;
++ gckINTEGERDB database = Database;
++ gctPOINTER pointer;
++ gckOS os = database->os;
++ gctBOOL acquired = gcvFALSE;
++
++ gcmkHEADER_ARG("Database=0x%08X Id=%d", Database, Id);
++ gcmkVERIFY_ARGUMENT(Pointer != gcvNULL);
++
++ gcmkVERIFY_OK(gckOS_AcquireMutex(os, database->mutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++ if (!(Id > 0 && Id <= database->tableLen))
++ {
++ gcmkONERROR(gcvSTATUS_NOT_FOUND);
++ }
++
++ Id -= 1;
++
++ pointer = database->table[Id];
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(os, database->mutex));
++ acquired = gcvFALSE;
++
++ if (pointer)
++ {
++ *Pointer = pointer;
++ }
++ else
++ {
++ gcmkONERROR(gcvSTATUS_NOT_FOUND);
++ }
++
++ gcmkFOOTER_ARG("*Pointer=0x%08X", *Pointer);
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(os, database->mutex));
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++
++
++gctUINT32
++gckKERNEL_AllocateNameFromPointer(
++ IN gckKERNEL Kernel,
++ IN gctPOINTER Pointer
++ )
++{
++ gceSTATUS status;
++ gctUINT32 name;
++ gctPOINTER database = Kernel->db->pointerDatabase;
++
++ gcmkHEADER_ARG("Kernel=0x%X Pointer=0x%X", Kernel, Pointer);
++
++ gcmkONERROR(
++ gckKERNEL_AllocateIntegerId(database, Pointer, &name));
++
++ gcmkFOOTER_ARG("name=%d", name);
++ return name;
++
++OnError:
++ gcmkFOOTER();
++ return 0;
++}
++
++gctPOINTER
++gckKERNEL_QueryPointerFromName(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 Name
++ )
++{
++ gceSTATUS status;
++ gctPOINTER pointer = gcvNULL;
++ gctPOINTER database = Kernel->db->pointerDatabase;
++
++ gcmkHEADER_ARG("Kernel=0x%X Name=%d", Kernel, Name);
++
++ /* Lookup in database to get pointer. */
++ gcmkONERROR(gckKERNEL_QueryIntegerId(database, Name, &pointer));
++
++ gcmkFOOTER_ARG("pointer=0x%X", pointer);
++ return pointer;
++
++OnError:
++ gcmkFOOTER();
++ return gcvNULL;
++}
++
++gceSTATUS
++gckKERNEL_DeleteName(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 Name
++ )
++{
++ gctPOINTER database = Kernel->db->pointerDatabase;
++
++ gcmkHEADER_ARG("Kernel=0x%X Name=0x%X", Kernel, Name);
++
++ /* Free name if exists. */
++ gcmkVERIFY_OK(gckKERNEL_FreeIntegerId(database, Name));
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckKERNEL_SetRecovery(
++ IN gckKERNEL Kernel,
++ IN gctBOOL Recovery,
++ IN gctUINT32 StuckDump
++ )
++{
++ Kernel->recovery = Recovery;
++
++ if (Recovery == gcvFALSE)
++ {
++ /* Dump stuck information if Recovery is disabled. */
++ Kernel->stuckDump = gcmMAX(StuckDump, gcdSTUCK_DUMP_MIDDLE);
++ }
++
++ return gcvSTATUS_OK;
++}
++
++
++/*******************************************************************************
++***** Shared Buffer ************************************************************
++*******************************************************************************/
++
++/*******************************************************************************
++**
++** gckKERNEL_CreateShBuffer
++**
++** Create shared buffer.
++** The shared buffer can be used across processes. Other process needs call
++** gckKERNEL_MapShBuffer before use it.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gctUINT32 Size
++** Specify the shared buffer size.
++**
++** OUTPUT:
++**
++** gctSHBUF * ShBuf
++** Pointer to hold return shared buffer handle.
++*/
++gceSTATUS
++gckKERNEL_CreateShBuffer(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 Size,
++ OUT gctSHBUF * ShBuf
++ )
++{
++ gceSTATUS status;
++ gcsSHBUF_PTR shBuf = gcvNULL;
++
++ gcmkHEADER_ARG("Kernel=0x%X, Size=%u", Kernel, Size);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++
++ if (Size == 0)
++ {
++ /* Invalid size. */
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++ else if (Size > 1024)
++ {
++ /* Limite shared buffer size. */
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++
++ /* Create a shared buffer structure. */
++ gcmkONERROR(
++ gckOS_Allocate(Kernel->os,
++ sizeof (gcsSHBUF),
++ (gctPOINTER *)&shBuf));
++
++ /* Initialize shared buffer. */
++ shBuf->id = 0;
++ shBuf->reference = gcvNULL;
++ shBuf->size = Size;
++ shBuf->data = gcvNULL;
++
++ /* Allocate integer id for this shared buffer. */
++ gcmkONERROR(
++ gckKERNEL_AllocateIntegerId(Kernel->db->pointerDatabase,
++ shBuf,
++ &shBuf->id));
++
++ /* Allocate atom. */
++ gcmkONERROR(gckOS_AtomConstruct(Kernel->os, &shBuf->reference));
++
++ /* Set default reference count to 1. */
++ gcmkVERIFY_OK(gckOS_AtomSet(Kernel->os, shBuf->reference, 1));
++
++ /* Return integer id. */
++ *ShBuf = (gctSHBUF)(gctUINTPTR_T)shBuf->id;
++
++ gcmkFOOTER_ARG("*ShBuf=%u", shBuf->id);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Error roll back. */
++ if (shBuf != gcvNULL)
++ {
++ if (shBuf->id != 0)
++ {
++ gcmkVERIFY_OK(
++ gckKERNEL_FreeIntegerId(Kernel->db->pointerDatabase,
++ shBuf->id));
++ }
++
++ gcmkOS_SAFE_FREE(Kernel->os, shBuf);
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckKERNEL_DestroyShBuffer
++**
++** Destroy shared buffer.
++** This will decrease reference of specified shared buffer and do actual
++** destroy when no reference on it.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gctSHBUF ShBuf
++** Specify the shared buffer to be destroyed.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckKERNEL_DestroyShBuffer(
++ IN gckKERNEL Kernel,
++ IN gctSHBUF ShBuf
++ )
++{
++ gceSTATUS status;
++ gcsSHBUF_PTR shBuf;
++ gctINT32 oldValue = 0;
++ gctBOOL acquired = gcvFALSE;
++
++ gcmkHEADER_ARG("Kernel=0x%X ShBuf=%u",
++ Kernel, (gctUINT32)(gctUINTPTR_T) ShBuf);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(ShBuf != gcvNULL);
++
++ /* Acquire mutex. */
++ gcmkONERROR(
++ gckOS_AcquireMutex(Kernel->os,
++ Kernel->db->pointerDatabaseMutex,
++ gcvINFINITE));
++ acquired = gcvTRUE;
++
++ /* Find shared buffer structure. */
++ gcmkONERROR(
++ gckKERNEL_QueryIntegerId(Kernel->db->pointerDatabase,
++ (gctUINT32)(gctUINTPTR_T)ShBuf,
++ (gctPOINTER)&shBuf));
++
++ gcmkASSERT(shBuf->id == (gctUINT32)(gctUINTPTR_T)ShBuf);
++
++ /* Decrease the reference count. */
++ gckOS_AtomDecrement(Kernel->os, shBuf->reference, &oldValue);
++
++ if (oldValue == 1)
++ {
++ /* Free integer id. */
++ gcmkVERIFY_OK(
++ gckKERNEL_FreeIntegerId(Kernel->db->pointerDatabase,
++ shBuf->id));
++
++ /* Free atom. */
++ gcmkVERIFY_OK(gckOS_AtomDestroy(Kernel->os, shBuf->reference));
++
++ if (shBuf->data)
++ {
++ gcmkOS_SAFE_FREE(Kernel->os, shBuf->data);
++ shBuf->data = gcvNULL;
++ }
++
++ /* Free the shared buffer. */
++ gcmkOS_SAFE_FREE(Kernel->os, shBuf);
++ }
++
++ /* Release the mutex. */
++ gcmkVERIFY_OK(
++ gckOS_ReleaseMutex(Kernel->os, Kernel->db->pointerDatabaseMutex));
++ acquired = gcvFALSE;
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ /* Release the mutex. */
++ gcmkVERIFY_OK(
++ gckOS_ReleaseMutex(Kernel->os, Kernel->db->pointerDatabaseMutex));
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckKERNEL_MapShBuffer
++**
++** Map shared buffer into this process so that it can be used in this process.
++** This will increase reference count on the specified shared buffer.
++** Call gckKERNEL_DestroyShBuffer to dereference.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gctSHBUF ShBuf
++** Specify the shared buffer to be mapped.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckKERNEL_MapShBuffer(
++ IN gckKERNEL Kernel,
++ IN gctSHBUF ShBuf
++ )
++{
++ gceSTATUS status;
++ gcsSHBUF_PTR shBuf;
++ gctINT32 oldValue = 0;
++ gctBOOL acquired = gcvFALSE;
++
++ gcmkHEADER_ARG("Kernel=0x%X ShBuf=%u",
++ Kernel, (gctUINT32)(gctUINTPTR_T) ShBuf);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(ShBuf != gcvNULL);
++
++ /* Acquire mutex. */
++ gcmkONERROR(
++ gckOS_AcquireMutex(Kernel->os,
++ Kernel->db->pointerDatabaseMutex,
++ gcvINFINITE));
++ acquired = gcvTRUE;
++
++ /* Find shared buffer structure. */
++ gcmkONERROR(
++ gckKERNEL_QueryIntegerId(Kernel->db->pointerDatabase,
++ (gctUINT32)(gctUINTPTR_T)ShBuf,
++ (gctPOINTER)&shBuf));
++
++ gcmkASSERT(shBuf->id == (gctUINT32)(gctUINTPTR_T)ShBuf);
++
++ /* Increase the reference count. */
++ gckOS_AtomIncrement(Kernel->os, shBuf->reference, &oldValue);
++
++ /* Release the mutex. */
++ gcmkVERIFY_OK(
++ gckOS_ReleaseMutex(Kernel->os, Kernel->db->pointerDatabaseMutex));
++ acquired = gcvFALSE;
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ /* Release the mutex. */
++ gcmkVERIFY_OK(
++ gckOS_ReleaseMutex(Kernel->os, Kernel->db->pointerDatabaseMutex));
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckKERNEL_WriteShBuffer
++**
++** Write user data into shared buffer.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gctSHBUF ShBuf
++** Specify the shared buffer to be written to.
++**
++** gctPOINTER UserData
++** User mode pointer to hold the source data.
++**
++** gctUINT32 ByteCount
++** Specify number of bytes to write. If this is larger than
++** shared buffer size, gcvSTATUS_INVALID_ARGUMENT is returned.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckKERNEL_WriteShBuffer(
++ IN gckKERNEL Kernel,
++ IN gctSHBUF ShBuf,
++ IN gctPOINTER UserData,
++ IN gctUINT32 ByteCount
++ )
++{
++ gceSTATUS status;
++ gcsSHBUF_PTR shBuf;
++ gctBOOL acquired = gcvFALSE;
++
++ gcmkHEADER_ARG("Kernel=0x%X ShBuf=%u UserData=0x%X ByteCount=%u",
++ Kernel, (gctUINT32)(gctUINTPTR_T) ShBuf, UserData, ByteCount);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(ShBuf != gcvNULL);
++
++ /* Acquire mutex. */
++ gcmkONERROR(
++ gckOS_AcquireMutex(Kernel->os,
++ Kernel->db->pointerDatabaseMutex,
++ gcvINFINITE));
++ acquired = gcvTRUE;
++
++ /* Find shared buffer structure. */
++ gcmkONERROR(
++ gckKERNEL_QueryIntegerId(Kernel->db->pointerDatabase,
++ (gctUINT32)(gctUINTPTR_T)ShBuf,
++ (gctPOINTER)&shBuf));
++
++ gcmkASSERT(shBuf->id == (gctUINT32)(gctUINTPTR_T)ShBuf);
++
++ if ((ByteCount > shBuf->size) ||
++ (ByteCount == 0) ||
++ (UserData == gcvNULL))
++ {
++ /* Exceeds buffer max size or invalid. */
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ if (shBuf->data == gcvNULL)
++ {
++ /* Allocate buffer data when first time write. */
++ gcmkONERROR(gckOS_Allocate(Kernel->os, ByteCount, &shBuf->data));
++ }
++
++ /* Copy data from user. */
++ gcmkONERROR(
++ gckOS_CopyFromUserData(Kernel->os,
++ shBuf->data,
++ UserData,
++ ByteCount));
++
++ /* Release the mutex. */
++ gcmkVERIFY_OK(
++ gckOS_ReleaseMutex(Kernel->os, Kernel->db->pointerDatabaseMutex));
++ acquired = gcvFALSE;
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ /* Release the mutex. */
++ gcmkVERIFY_OK(
++ gckOS_ReleaseMutex(Kernel->os, Kernel->db->pointerDatabaseMutex));
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckKERNEL_ReadShBuffer
++**
++** Read data from shared buffer and copy to user pointer.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gctSHBUF ShBuf
++** Specify the shared buffer to be read from.
++**
++** gctPOINTER UserData
++** User mode pointer to save output data.
++**
++** gctUINT32 ByteCount
++** Specify number of bytes to read.
++** If this is larger than shared buffer size, only avaiable bytes are
++** copied. If smaller, copy requested size.
++**
++** OUTPUT:
++**
++** gctUINT32 * BytesRead
++** Pointer to hold how many bytes actually read from shared buffer.
++*/
++gceSTATUS
++gckKERNEL_ReadShBuffer(
++ IN gckKERNEL Kernel,
++ IN gctSHBUF ShBuf,
++ IN gctPOINTER UserData,
++ IN gctUINT32 ByteCount,
++ OUT gctUINT32 * BytesRead
++ )
++{
++ gceSTATUS status;
++ gcsSHBUF_PTR shBuf;
++ gctUINT32 bytes;
++ gctBOOL acquired = gcvFALSE;
++
++ gcmkHEADER_ARG("Kernel=0x%X ShBuf=%u UserData=0x%X ByteCount=%u",
++ Kernel, (gctUINT32)(gctUINTPTR_T) ShBuf, UserData, ByteCount);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(ShBuf != gcvNULL);
++
++ /* Acquire mutex. */
++ gcmkONERROR(
++ gckOS_AcquireMutex(Kernel->os,
++ Kernel->db->pointerDatabaseMutex,
++ gcvINFINITE));
++ acquired = gcvTRUE;
++
++ /* Find shared buffer structure. */
++ gcmkONERROR(
++ gckKERNEL_QueryIntegerId(Kernel->db->pointerDatabase,
++ (gctUINT32)(gctUINTPTR_T)ShBuf,
++ (gctPOINTER)&shBuf));
++
++ gcmkASSERT(shBuf->id == (gctUINT32)(gctUINTPTR_T)ShBuf);
++
++ if (shBuf->data == gcvNULL)
++ {
++ *BytesRead = 0;
++
++ /* No data in shared buffer, skip copy. */
++ status = gcvSTATUS_SKIP;
++ goto OnError;
++ }
++ else if (ByteCount == 0)
++ {
++ /* Invalid size to read. */
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ /* Determine bytes to copy. */
++ bytes = (ByteCount < shBuf->size) ? ByteCount : shBuf->size;
++
++ /* Copy data to user. */
++ gcmkONERROR(
++ gckOS_CopyToUserData(Kernel->os,
++ shBuf->data,
++ UserData,
++ bytes));
++
++ /* Return copied size. */
++ *BytesRead = bytes;
++
++ /* Release the mutex. */
++ gcmkVERIFY_OK(
++ gckOS_ReleaseMutex(Kernel->os, Kernel->db->pointerDatabaseMutex));
++ acquired = gcvFALSE;
++
++ gcmkFOOTER_ARG("*BytesRead=%u", bytes);
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ /* Release the mutex. */
++ gcmkVERIFY_OK(
++ gckOS_ReleaseMutex(Kernel->os, Kernel->db->pointerDatabaseMutex));
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++
++
++/*******************************************************************************
++***** Test Code ****************************************************************
++*******************************************************************************/
++
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_command.c linux-openelec/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_command.c
+--- linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_command.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_command.c 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,3423 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal_kernel_precomp.h"
++#include "gc_hal_kernel_context.h"
++
++#define _GC_OBJ_ZONE gcvZONE_COMMAND
++
++/******************************************************************************\
++********************************* Support Code *********************************
++\******************************************************************************/
++
++/*******************************************************************************
++**
++** _NewQueue
++**
++** Allocate a new command queue.
++**
++** INPUT:
++**
++** gckCOMMAND Command
++** Pointer to an gckCOMMAND object.
++**
++** OUTPUT:
++**
++** gckCOMMAND Command
++** gckCOMMAND object has been updated with a new command queue.
++*/
++static gceSTATUS
++_NewQueue(
++ IN OUT gckCOMMAND Command
++ )
++{
++ gceSTATUS status;
++ gctINT currentIndex, newIndex;
++
++ gcmkHEADER_ARG("Command=0x%x", Command);
++
++ /* Switch to the next command buffer. */
++ currentIndex = Command->index;
++ newIndex = (currentIndex + 1) % gcdCOMMAND_QUEUES;
++
++ /* Wait for availability. */
++#if gcdDUMP_COMMAND
++ gcmkPRINT("@[kernel.waitsignal]");
++#endif
++
++ gcmkONERROR(gckOS_WaitSignal(
++ Command->os,
++ Command->queues[newIndex].signal,
++ gcvINFINITE
++ ));
++
++#if gcmIS_DEBUG(gcdDEBUG_TRACE)
++ if (newIndex < currentIndex)
++ {
++ Command->wrapCount += 1;
++
++ gcmkTRACE_ZONE_N(
++ gcvLEVEL_INFO, gcvZONE_COMMAND,
++ 2 * 4,
++ "%s(%d): queue array wrapped around.\n",
++ __FUNCTION__, __LINE__
++ );
++ }
++
++ gcmkTRACE_ZONE_N(
++ gcvLEVEL_INFO, gcvZONE_COMMAND,
++ 3 * 4,
++ "%s(%d): total queue wrap arounds %d.\n",
++ __FUNCTION__, __LINE__, Command->wrapCount
++ );
++
++ gcmkTRACE_ZONE_N(
++ gcvLEVEL_INFO, gcvZONE_COMMAND,
++ 3 * 4,
++ "%s(%d): switched to queue %d.\n",
++ __FUNCTION__, __LINE__, newIndex
++ );
++#endif
++
++ /* Update gckCOMMAND object with new command queue. */
++ Command->index = newIndex;
++ Command->newQueue = gcvTRUE;
++ Command->logical = Command->queues[newIndex].logical;
++ Command->address = Command->queues[newIndex].address;
++ Command->offset = 0;
++
++ gcmkONERROR(gckOS_GetPhysicalAddress(
++ Command->os,
++ Command->logical,
++ (gctUINT32 *) &Command->physical
++ ));
++
++ if (currentIndex != -1)
++ {
++ /* Mark the command queue as available. */
++ gcmkONERROR(gckEVENT_Signal(
++ Command->kernel->eventObj,
++ Command->queues[currentIndex].signal,
++ gcvKERNEL_COMMAND
++ ));
++ }
++
++ /* Success. */
++ gcmkFOOTER_ARG("Command->index=%d", Command->index);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++static gceSTATUS
++_IncrementCommitAtom(
++ IN gckCOMMAND Command,
++ IN gctBOOL Increment
++ )
++{
++ gceSTATUS status;
++ gckHARDWARE hardware;
++ gctINT32 atomValue;
++ gctBOOL powerAcquired = gcvFALSE;
++
++ gcmkHEADER_ARG("Command=0x%x", Command);
++
++ /* Extract the gckHARDWARE and gckEVENT objects. */
++ hardware = Command->kernel->hardware;
++ gcmkVERIFY_OBJECT(hardware, gcvOBJ_HARDWARE);
++
++ /* Grab the power mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(
++ Command->os, hardware->powerMutex, gcvINFINITE
++ ));
++ powerAcquired = gcvTRUE;
++
++ /* Increment the commit atom. */
++ if (Increment)
++ {
++ gcmkONERROR(gckOS_AtomIncrement(
++ Command->os, Command->atomCommit, &atomValue
++ ));
++ }
++ else
++ {
++ gcmkONERROR(gckOS_AtomDecrement(
++ Command->os, Command->atomCommit, &atomValue
++ ));
++ }
++
++ /* Release the power mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(
++ Command->os, hardware->powerMutex
++ ));
++ powerAcquired = gcvFALSE;
++
++ /* Success. */
++ gcmkFOOTER();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (powerAcquired)
++ {
++ /* Release the power mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(
++ Command->os, hardware->powerMutex
++ ));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++#if gcdSECURE_USER
++static gceSTATUS
++_ProcessHints(
++ IN gckCOMMAND Command,
++ IN gctUINT32 ProcessID,
++ IN gcoCMDBUF CommandBuffer
++ )
++{
++ gceSTATUS status = gcvSTATUS_OK;
++ gckKERNEL kernel;
++ gctBOOL needCopy = gcvFALSE;
++ gcskSECURE_CACHE_PTR cache;
++ gctUINT8_PTR commandBufferLogical;
++ gctUINT8_PTR hintedData;
++ gctUINT32_PTR hintArray;
++ gctUINT i, hintCount;
++
++ gcmkHEADER_ARG(
++ "Command=0x%08X ProcessID=%d CommandBuffer=0x%08X",
++ Command, ProcessID, CommandBuffer
++ );
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++
++ /* Reset state array pointer. */
++ hintArray = gcvNULL;
++
++ /* Get the kernel object. */
++ kernel = Command->kernel;
++
++ /* Get the cache form the database. */
++ gcmkONERROR(gckKERNEL_GetProcessDBCache(kernel, ProcessID, &cache));
++
++ /* Determine the start of the command buffer. */
++ commandBufferLogical
++ = (gctUINT8_PTR) CommandBuffer->logical
++ + CommandBuffer->startOffset;
++
++ /* Determine the number of records in the state array. */
++ hintCount = CommandBuffer->hintArrayTail - CommandBuffer->hintArray;
++
++ /* Check wehther we need to copy the structures or not. */
++ gcmkONERROR(gckOS_QueryNeedCopy(Command->os, ProcessID, &needCopy));
++
++ /* Get access to the state array. */
++ if (needCopy)
++ {
++ gctUINT copySize;
++
++ if (Command->hintArrayAllocated &&
++ (Command->hintArraySize < CommandBuffer->hintArraySize))
++ {
++ gcmkONERROR(gcmkOS_SAFE_FREE(Command->os, gcmUINT64_TO_PTR(Command->hintArray)));
++ Command->hintArraySize = gcvFALSE;
++ }
++
++ if (!Command->hintArrayAllocated)
++ {
++ gctPOINTER pointer = gcvNULL;
++
++ gcmkONERROR(gckOS_Allocate(
++ Command->os,
++ CommandBuffer->hintArraySize,
++ &pointer
++ ));
++
++ Command->hintArray = gcmPTR_TO_UINT64(pointer);
++ Command->hintArrayAllocated = gcvTRUE;
++ Command->hintArraySize = CommandBuffer->hintArraySize;
++ }
++
++ hintArray = gcmUINT64_TO_PTR(Command->hintArray);
++ copySize = hintCount * gcmSIZEOF(gctUINT32);
++
++ gcmkONERROR(gckOS_CopyFromUserData(
++ Command->os,
++ hintArray,
++ gcmUINT64_TO_PTR(CommandBuffer->hintArray),
++ copySize
++ ));
++ }
++ else
++ {
++ gctPOINTER pointer = gcvNULL;
++
++ gcmkONERROR(gckOS_MapUserPointer(
++ Command->os,
++ gcmUINT64_TO_PTR(CommandBuffer->hintArray),
++ CommandBuffer->hintArraySize,
++ &pointer
++ ));
++
++ hintArray = pointer;
++ }
++
++ /* Scan through the buffer. */
++ for (i = 0; i < hintCount; i += 1)
++ {
++ /* Determine the location of the hinted data. */
++ hintedData = commandBufferLogical + hintArray[i];
++
++ /* Map handle into physical address. */
++ gcmkONERROR(gckKERNEL_MapLogicalToPhysical(
++ kernel, cache, (gctPOINTER) hintedData
++ ));
++ }
++
++OnError:
++ /* Get access to the state array. */
++ if (!needCopy && (hintArray != gcvNULL))
++ {
++ gcmkVERIFY_OK(gckOS_UnmapUserPointer(
++ Command->os,
++ gcmUINT64_TO_PTR(CommandBuffer->hintArray),
++ CommandBuffer->hintArraySize,
++ hintArray
++ ));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++#endif
++
++static gceSTATUS
++_FlushMMU(
++ IN gckCOMMAND Command
++ )
++{
++#if gcdSECURITY
++ return gcvSTATUS_OK;
++#else
++ gceSTATUS status;
++ gctUINT32 oldValue;
++ gckHARDWARE hardware = Command->kernel->hardware;
++ gctBOOL pause = gcvFALSE;
++
++ gctUINT8_PTR pointer;
++ gctUINT32 eventBytes;
++ gctUINT32 endBytes;
++ gctUINT32 bufferSize;
++ gctUINT32 executeBytes;
++ gctUINT32 waitLinkBytes;
++
++ gcmkONERROR(gckOS_AtomicExchange(Command->os,
++ hardware->pageTableDirty,
++ 0,
++ &oldValue));
++
++ if (oldValue)
++ {
++ /* Page Table is upated, flush mmu before commit. */
++ gcmkONERROR(gckHARDWARE_FlushMMU(hardware));
++
++ if ((oldValue & gcvPAGE_TABLE_DIRTY_BIT_FE)
++ && (hardware->endAfterFlushMmuCache)
++ )
++ {
++ pause = gcvTRUE;
++ }
++ }
++
++ if (pause)
++ {
++ /* Query size. */
++ gcmkONERROR(gckHARDWARE_Event(hardware, gcvNULL, 0, gcvKERNEL_PIXEL, &eventBytes));
++ gcmkONERROR(gckHARDWARE_End(hardware, gcvNULL, &endBytes));
++
++ executeBytes = eventBytes + endBytes;
++
++ gcmkONERROR(gckHARDWARE_WaitLink(
++ hardware,
++ gcvNULL,
++ Command->offset + executeBytes,
++ &waitLinkBytes,
++ gcvNULL,
++ gcvNULL
++ ));
++
++ /* Reserve space. */
++ gcmkONERROR(gckCOMMAND_Reserve(
++ Command,
++ executeBytes,
++ (gctPOINTER *)&pointer,
++ &bufferSize
++ ));
++
++ /* Append EVENT(29). */
++ gcmkONERROR(gckHARDWARE_Event(
++ hardware,
++ pointer,
++ 29,
++ gcvKERNEL_PIXEL,
++ &eventBytes
++ ));
++
++ /* Append END. */
++ pointer += eventBytes;
++ gcmkONERROR(gckHARDWARE_End(hardware, pointer, &endBytes));
++
++ /* Store address to queue. */
++ gcmkONERROR(gckENTRYQUEUE_Enqueue(
++ Command->kernel,
++ &Command->queue,
++ Command->address + Command->offset + executeBytes,
++ waitLinkBytes
++ ));
++
++ gcmkONERROR(gckCOMMAND_Execute(Command, executeBytes));
++ }
++
++ return gcvSTATUS_OK;
++OnError:
++ return status;
++#endif
++}
++
++static void
++_DumpBuffer(
++ IN gctPOINTER Buffer,
++ IN gctUINT32 GpuAddress,
++ IN gctSIZE_T Size
++ )
++{
++ gctSIZE_T i, line, left;
++ gctUINT32_PTR data = Buffer;
++
++ line = Size / 32;
++ left = Size % 32;
++
++ for (i = 0; i < line; i++)
++ {
++ gcmkPRINT("%X : %08X %08X %08X %08X %08X %08X %08X %08X ",
++ GpuAddress, data[0], data[1], data[2], data[3], data[4], data[5], data[6], data[7]);
++ data += 8;
++ GpuAddress += 8 * 4;
++ }
++
++ switch(left)
++ {
++ case 28:
++ gcmkPRINT("%X : %08X %08X %08X %08X %08X %08X %08X ",
++ GpuAddress, data[0], data[1], data[2], data[3], data[4], data[5], data[6]);
++ break;
++ case 24:
++ gcmkPRINT("%X : %08X %08X %08X %08X %08X %08X ",
++ GpuAddress, data[0], data[1], data[2], data[3], data[4], data[5]);
++ break;
++ case 20:
++ gcmkPRINT("%X : %08X %08X %08X %08X %08X ",
++ GpuAddress, data[0], data[1], data[2], data[3], data[4]);
++ break;
++ case 16:
++ gcmkPRINT("%X : %08X %08X %08X %08X ",
++ GpuAddress, data[0], data[1], data[2], data[3]);
++ break;
++ case 12:
++ gcmkPRINT("%X : %08X %08X %08X ",
++ GpuAddress, data[0], data[1], data[2]);
++ break;
++ case 8:
++ gcmkPRINT("%X : %08X %08X ",
++ GpuAddress, data[0], data[1]);
++ break;
++ case 4:
++ gcmkPRINT("%X : %08X ",
++ GpuAddress, data[0]);
++ break;
++ default:
++ break;
++ }
++}
++
++static void
++_DumpKernelCommandBuffer(
++ IN gckCOMMAND Command
++ )
++{
++ gctINT i;
++ gctUINT32 physical = 0;
++ gctPOINTER entry = gcvNULL;
++
++ for (i = 0; i < gcdCOMMAND_QUEUES; i++)
++ {
++ entry = Command->queues[i].logical;
++
++ gckOS_GetPhysicalAddress(Command->os, entry, &physical);
++
++ gcmkPRINT("Kernel command buffer %d\n", i);
++
++ _DumpBuffer(entry, physical, Command->pageSize);
++ }
++}
++
++/******************************************************************************\
++****************************** gckCOMMAND API Code ******************************
++\******************************************************************************/
++
++/*******************************************************************************
++**
++** gckCOMMAND_Construct
++**
++** Construct a new gckCOMMAND object.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** OUTPUT:
++**
++** gckCOMMAND * Command
++** Pointer to a variable that will hold the pointer to the gckCOMMAND
++** object.
++*/
++gceSTATUS
++gckCOMMAND_Construct(
++ IN gckKERNEL Kernel,
++ OUT gckCOMMAND * Command
++ )
++{
++ gckOS os;
++ gckCOMMAND command = gcvNULL;
++ gceSTATUS status;
++ gctINT i;
++ gctPOINTER pointer = gcvNULL;
++ gctSIZE_T pageSize;
++
++ gcmkHEADER_ARG("Kernel=0x%x", Kernel);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(Command != gcvNULL);
++
++ /* Extract the gckOS object. */
++ os = Kernel->os;
++
++ /* Allocate the gckCOMMAND structure. */
++ gcmkONERROR(gckOS_Allocate(os, gcmSIZEOF(struct _gckCOMMAND), &pointer));
++ command = pointer;
++
++ /* Reset the entire object. */
++ gcmkONERROR(gckOS_ZeroMemory(command, gcmSIZEOF(struct _gckCOMMAND)));
++
++ /* Initialize the gckCOMMAND object.*/
++ command->object.type = gcvOBJ_COMMAND;
++ command->kernel = Kernel;
++ command->os = os;
++
++ /* Get the command buffer requirements. */
++ gcmkONERROR(gckHARDWARE_QueryCommandBuffer(
++ Kernel->hardware,
++ &command->alignment,
++ &command->reservedHead,
++ &command->reservedTail
++ ));
++
++ /* Create the command queue mutex. */
++ gcmkONERROR(gckOS_CreateMutex(os, &command->mutexQueue));
++
++ /* Create the context switching mutex. */
++ gcmkONERROR(gckOS_CreateMutex(os, &command->mutexContext));
++
++#if VIVANTE_PROFILER_CONTEXT
++ /* Create the context switching mutex. */
++ gcmkONERROR(gckOS_CreateMutex(os, &command->mutexContextSeq));
++#endif
++
++ /* Create the power management semaphore. */
++ gcmkONERROR(gckOS_CreateSemaphore(os, &command->powerSemaphore));
++
++ /* Create the commit atom. */
++ gcmkONERROR(gckOS_AtomConstruct(os, &command->atomCommit));
++
++ /* Get the page size from teh OS. */
++ gcmkONERROR(gckOS_GetPageSize(os, &pageSize));
++
++ gcmkSAFECASTSIZET(command->pageSize, pageSize);
++
++ /* Get process ID. */
++ gcmkONERROR(gckOS_GetProcessID(&command->kernelProcessID));
++
++ /* Set hardware to pipe 0. */
++ command->pipeSelect = gcvPIPE_INVALID;
++
++ /* Pre-allocate the command queues. */
++ for (i = 0; i < gcdCOMMAND_QUEUES; ++i)
++ {
++ gcmkONERROR(gckOS_AllocateNonPagedMemory(
++ os,
++ gcvFALSE,
++ &pageSize,
++ &command->queues[i].physical,
++ &command->queues[i].logical
++ ));
++
++ gcmkONERROR(gckHARDWARE_ConvertLogical(
++ Kernel->hardware,
++ command->queues[i].logical,
++ gcvFALSE,
++ &command->queues[i].address
++ ));
++
++ gcmkONERROR(gckOS_CreateSignal(
++ os, gcvFALSE, &command->queues[i].signal
++ ));
++
++ gcmkONERROR(gckOS_Signal(
++ os, command->queues[i].signal, gcvTRUE
++ ));
++ }
++
++#if gcdRECORD_COMMAND
++ gcmkONERROR(gckRECORDER_Construct(os, Kernel->hardware, &command->recorder));
++#endif
++
++ /* No command queue in use yet. */
++ command->index = -1;
++ command->logical = gcvNULL;
++ command->newQueue = gcvFALSE;
++
++ /* Command is not yet running. */
++ command->running = gcvFALSE;
++
++ /* Command queue is idle. */
++ command->idle = gcvTRUE;
++
++ /* Commit stamp is zero. */
++ command->commitStamp = 0;
++
++ /* END event signal not created. */
++ command->endEventSignal = gcvNULL;
++
++ command->queue.front = 0;
++ command->queue.rear = 0;
++ command->queue.count = 0;
++
++ /* Return pointer to the gckCOMMAND object. */
++ *Command = command;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Command=0x%x", *Command);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Roll back. */
++ if (command != gcvNULL)
++ {
++ if (command->atomCommit != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_AtomDestroy(os, command->atomCommit));
++ }
++
++ if (command->powerSemaphore != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_DestroySemaphore(os, command->powerSemaphore));
++ }
++
++ if (command->mutexContext != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_DeleteMutex(os, command->mutexContext));
++ }
++
++#if VIVANTE_PROFILER_CONTEXT
++ if (command->mutexContextSeq != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_DeleteMutex(os, command->mutexContextSeq));
++ }
++#endif
++
++ if (command->mutexQueue != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_DeleteMutex(os, command->mutexQueue));
++ }
++
++ for (i = 0; i < gcdCOMMAND_QUEUES; ++i)
++ {
++ if (command->queues[i].signal != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_DestroySignal(
++ os, command->queues[i].signal
++ ));
++ }
++
++ if (command->queues[i].logical != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_FreeNonPagedMemory(
++ os,
++ command->pageSize,
++ command->queues[i].physical,
++ command->queues[i].logical
++ ));
++ }
++ }
++
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(os, command));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckCOMMAND_Destroy
++**
++** Destroy an gckCOMMAND object.
++**
++** INPUT:
++**
++** gckCOMMAND Command
++** Pointer to an gckCOMMAND object to destroy.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckCOMMAND_Destroy(
++ IN gckCOMMAND Command
++ )
++{
++ gctINT i;
++
++ gcmkHEADER_ARG("Command=0x%x", Command);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++
++ /* Stop the command queue. */
++ gcmkVERIFY_OK(gckCOMMAND_Stop(Command, gcvFALSE));
++
++ for (i = 0; i < gcdCOMMAND_QUEUES; ++i)
++ {
++ gcmkASSERT(Command->queues[i].signal != gcvNULL);
++ gcmkVERIFY_OK(gckOS_DestroySignal(
++ Command->os, Command->queues[i].signal
++ ));
++
++ gcmkASSERT(Command->queues[i].logical != gcvNULL);
++ gcmkVERIFY_OK(gckOS_FreeNonPagedMemory(
++ Command->os,
++ Command->pageSize,
++ Command->queues[i].physical,
++ Command->queues[i].logical
++ ));
++ }
++
++ /* END event signal. */
++ if (Command->endEventSignal != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_DestroySignal(
++ Command->os, Command->endEventSignal
++ ));
++ }
++
++ /* Delete the context switching mutex. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Command->os, Command->mutexContext));
++
++#if VIVANTE_PROFILER_CONTEXT
++ if (Command->mutexContextSeq != gcvNULL)
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Command->os, Command->mutexContextSeq));
++#endif
++
++ /* Delete the command queue mutex. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Command->os, Command->mutexQueue));
++
++ /* Destroy the power management semaphore. */
++ gcmkVERIFY_OK(gckOS_DestroySemaphore(Command->os, Command->powerSemaphore));
++
++ /* Destroy the commit atom. */
++ gcmkVERIFY_OK(gckOS_AtomDestroy(Command->os, Command->atomCommit));
++
++#if gcdSECURE_USER
++ /* Free state array. */
++ if (Command->hintArrayAllocated)
++ {
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Command->os, gcmUINT64_TO_PTR(Command->hintArray)));
++ Command->hintArrayAllocated = gcvFALSE;
++ }
++#endif
++
++#if gcdRECORD_COMMAND
++ gckRECORDER_Destory(Command->os, Command->recorder);
++#endif
++
++ /* Mark object as unknown. */
++ Command->object.type = gcvOBJ_UNKNOWN;
++
++ /* Free the gckCOMMAND object. */
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Command->os, Command));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckCOMMAND_EnterCommit
++**
++** Acquire command queue synchronization objects.
++**
++** INPUT:
++**
++** gckCOMMAND Command
++** Pointer to an gckCOMMAND object to destroy.
++**
++** gctBOOL FromPower
++** Determines whether the call originates from inside the power
++** management or not.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckCOMMAND_EnterCommit(
++ IN gckCOMMAND Command,
++ IN gctBOOL FromPower
++ )
++{
++ gceSTATUS status;
++ gckHARDWARE hardware;
++ gctBOOL atomIncremented = gcvFALSE;
++ gctBOOL semaAcquired = gcvFALSE;
++
++ gcmkHEADER_ARG("Command=0x%x", Command);
++
++ /* Extract the gckHARDWARE and gckEVENT objects. */
++ hardware = Command->kernel->hardware;
++ gcmkVERIFY_OBJECT(hardware, gcvOBJ_HARDWARE);
++
++ if (!FromPower)
++ {
++ /* Increment COMMIT atom to let power management know that a commit is
++ ** in progress. */
++ gcmkONERROR(_IncrementCommitAtom(Command, gcvTRUE));
++ atomIncremented = gcvTRUE;
++
++ /* Notify the system the GPU has a commit. */
++ gcmkONERROR(gckOS_Broadcast(Command->os,
++ hardware,
++ gcvBROADCAST_GPU_COMMIT));
++
++ /* Acquire the power management semaphore. */
++ gcmkONERROR(gckOS_AcquireSemaphore(Command->os,
++ Command->powerSemaphore));
++ semaAcquired = gcvTRUE;
++ }
++
++ /* Grab the conmmand queue mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(Command->os,
++ Command->mutexQueue,
++ gcvINFINITE));
++
++ /* Success. */
++ gcmkFOOTER();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (semaAcquired)
++ {
++ /* Release the power management semaphore. */
++ gcmkVERIFY_OK(gckOS_ReleaseSemaphore(
++ Command->os, Command->powerSemaphore
++ ));
++ }
++
++ if (atomIncremented)
++ {
++ /* Decrement the commit atom. */
++ gcmkVERIFY_OK(_IncrementCommitAtom(
++ Command, gcvFALSE
++ ));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckCOMMAND_ExitCommit
++**
++** Release command queue synchronization objects.
++**
++** INPUT:
++**
++** gckCOMMAND Command
++** Pointer to an gckCOMMAND object to destroy.
++**
++** gctBOOL FromPower
++** Determines whether the call originates from inside the power
++** management or not.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckCOMMAND_ExitCommit(
++ IN gckCOMMAND Command,
++ IN gctBOOL FromPower
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Command=0x%x", Command);
++
++ /* Release the power mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(Command->os, Command->mutexQueue));
++
++ if (!FromPower)
++ {
++ /* Release the power management semaphore. */
++ gcmkONERROR(gckOS_ReleaseSemaphore(Command->os,
++ Command->powerSemaphore));
++
++ /* Decrement the commit atom. */
++ gcmkONERROR(_IncrementCommitAtom(Command, gcvFALSE));
++ }
++
++ /* Success. */
++ gcmkFOOTER();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckCOMMAND_Start
++**
++** Start up the command queue.
++**
++** INPUT:
++**
++** gckCOMMAND Command
++** Pointer to an gckCOMMAND object to start.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckCOMMAND_Start(
++ IN gckCOMMAND Command
++ )
++{
++ gceSTATUS status;
++ gckHARDWARE hardware;
++ gctUINT32 waitOffset = 0;
++ gctUINT32 waitLinkBytes;
++
++ gcmkHEADER_ARG("Command=0x%x", Command);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++
++ if (Command->running)
++ {
++ /* Command queue already running. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++
++ /* Extract the gckHARDWARE object. */
++ hardware = Command->kernel->hardware;
++ gcmkVERIFY_OBJECT(hardware, gcvOBJ_HARDWARE);
++
++ if (Command->logical == gcvNULL)
++ {
++ /* Start at beginning of a new queue. */
++ gcmkONERROR(_NewQueue(Command));
++ }
++
++ /* Start at beginning of page. */
++ Command->offset = 0;
++
++ /* Set abvailable number of bytes for WAIT/LINK command sequence. */
++ waitLinkBytes = Command->pageSize;
++
++ /* Append WAIT/LINK. */
++ gcmkONERROR(gckHARDWARE_WaitLink(
++ hardware,
++ Command->logical,
++ 0,
++ &waitLinkBytes,
++ &waitOffset,
++ &Command->waitSize
++ ));
++
++ Command->waitLogical = (gctUINT8_PTR) Command->logical + waitOffset;
++ Command->waitPhysical = (gctUINT8_PTR) Command->physical + waitOffset;
++
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ /* Flush the cache for the wait/link. */
++ gcmkONERROR(gckOS_CacheClean(
++ Command->os,
++ Command->kernelProcessID,
++ gcvNULL,
++ (gctUINT32)Command->physical,
++ Command->logical,
++ waitLinkBytes
++ ));
++#endif
++
++ /* Adjust offset. */
++ Command->offset = waitLinkBytes;
++ Command->newQueue = gcvFALSE;
++
++#if gcdSECURITY
++ /* Start FE by calling security service. */
++ gckKERNEL_SecurityStartCommand(
++ Command->kernel
++ );
++#else
++ /* Enable command processor. */
++ gcmkONERROR(gckHARDWARE_Execute(
++ hardware,
++ Command->address,
++ waitLinkBytes
++ ));
++#endif
++
++ /* Command queue is running. */
++ Command->running = gcvTRUE;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckCOMMAND_Stop
++**
++** Stop the command queue.
++**
++** INPUT:
++**
++** gckCOMMAND Command
++** Pointer to an gckCOMMAND object to stop.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckCOMMAND_Stop(
++ IN gckCOMMAND Command,
++ IN gctBOOL FromRecovery
++ )
++{
++ gckHARDWARE hardware;
++ gceSTATUS status;
++ gctUINT32 idle;
++
++ gcmkHEADER_ARG("Command=0x%x", Command);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++
++ if (!Command->running)
++ {
++ /* Command queue is not running. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++
++ /* Extract the gckHARDWARE object. */
++ hardware = Command->kernel->hardware;
++ gcmkVERIFY_OBJECT(hardware, gcvOBJ_HARDWARE);
++
++ if (gckHARDWARE_IsFeatureAvailable(hardware,
++ gcvFEATURE_END_EVENT) == gcvSTATUS_TRUE)
++ {
++ /* Allocate the signal. */
++ if (Command->endEventSignal == gcvNULL)
++ {
++ gcmkONERROR(gckOS_CreateSignal(Command->os,
++ gcvTRUE,
++ &Command->endEventSignal));
++ }
++
++ /* Append the END EVENT command to trigger the signal. */
++ gcmkONERROR(gckEVENT_Stop(Command->kernel->eventObj,
++ Command->kernelProcessID,
++ Command->waitPhysical,
++ Command->waitLogical,
++ Command->endEventSignal,
++ &Command->waitSize));
++ }
++ else
++ {
++ /* Replace last WAIT with END. */
++ gcmkONERROR(gckHARDWARE_End(
++ hardware, Command->waitLogical, &Command->waitSize
++ ));
++
++#if gcdSECURITY
++ gcmkONERROR(gckKERNEL_SecurityExecute(
++ Command->kernel, Command->waitLogical, 8
++ ));
++#endif
++
++ /* Update queue tail pointer. */
++ gcmkONERROR(gckHARDWARE_UpdateQueueTail(Command->kernel->hardware,
++ Command->logical,
++ Command->offset));
++
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ /* Flush the cache for the END. */
++ gcmkONERROR(gckOS_CacheClean(
++ Command->os,
++ Command->kernelProcessID,
++ gcvNULL,
++ (gctUINT32)Command->waitPhysical,
++ Command->waitLogical,
++ Command->waitSize
++ ));
++#endif
++
++ /* Wait for idle. */
++ gcmkONERROR(gckHARDWARE_GetIdle(hardware, !FromRecovery, &idle));
++ }
++
++ /* Command queue is no longer running. */
++ Command->running = gcvFALSE;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckCOMMAND_Commit
++**
++** Commit a command buffer to the command queue.
++**
++** INPUT:
++**
++** gckCOMMAND Command
++** Pointer to a gckCOMMAND object.
++**
++** gckCONTEXT Context
++** Pointer to a gckCONTEXT object.
++**
++** gcoCMDBUF CommandBuffer
++** Pointer to a gcoCMDBUF object.
++**
++** gcsSTATE_DELTA_PTR StateDelta
++** Pointer to the state delta.
++**
++** gctUINT32 ProcessID
++** Current process ID.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++#if gcdMULTI_GPU
++gceSTATUS
++gckCOMMAND_Commit(
++ IN gckCOMMAND Command,
++ IN gckCONTEXT Context,
++ IN gcoCMDBUF CommandBuffer,
++ IN gcsSTATE_DELTA_PTR StateDelta,
++ IN gcsQUEUE_PTR EventQueue,
++ IN gctUINT32 ProcessID,
++ IN gceCORE_3D_MASK ChipEnable
++ )
++#else
++gceSTATUS
++gckCOMMAND_Commit(
++ IN gckCOMMAND Command,
++ IN gckCONTEXT Context,
++ IN gcoCMDBUF CommandBuffer,
++ IN gcsSTATE_DELTA_PTR StateDelta,
++ IN gcsQUEUE_PTR EventQueue,
++ IN gctUINT32 ProcessID
++ )
++#endif
++{
++ gceSTATUS status;
++ gctBOOL commitEntered = gcvFALSE;
++ gctBOOL contextAcquired = gcvFALSE;
++ gckHARDWARE hardware;
++ gctBOOL needCopy = gcvFALSE;
++ gcsQUEUE_PTR eventRecord = gcvNULL;
++ gcsQUEUE _eventRecord;
++ gcsQUEUE_PTR nextEventRecord;
++ gctBOOL commandBufferMapped = gcvFALSE;
++ gcoCMDBUF commandBufferObject = gcvNULL;
++
++#if !gcdNULL_DRIVER
++ gcsCONTEXT_PTR contextBuffer;
++ struct _gcoCMDBUF _commandBufferObject;
++ gctPHYS_ADDR commandBufferPhysical;
++ gctUINT8_PTR commandBufferLogical = gcvNULL;
++ gctUINT32 commandBufferAddress = 0;
++ gctUINT8_PTR commandBufferLink = gcvNULL;
++ gctUINT commandBufferSize;
++ gctSIZE_T nopBytes;
++ gctUINT32 pipeBytes;
++ gctUINT32 linkBytes;
++ gctSIZE_T bytes;
++ gctUINT32 offset;
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ gctPHYS_ADDR entryPhysical;
++#endif
++ gctPOINTER entryLogical;
++ gctUINT32 entryAddress;
++ gctUINT32 entryBytes;
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ gctPHYS_ADDR exitPhysical;
++#endif
++ gctPOINTER exitLogical;
++ gctUINT32 exitAddress;
++ gctUINT32 exitBytes;
++ gctPHYS_ADDR waitLinkPhysical;
++ gctPOINTER waitLinkLogical;
++ gctUINT32 waitLinkAddress;
++ gctUINT32 waitLinkBytes;
++ gctPHYS_ADDR waitPhysical;
++ gctPOINTER waitLogical;
++ gctUINT32 waitOffset;
++ gctUINT32 waitSize;
++
++#ifdef __QNXNTO__
++ gctPOINTER userCommandBufferLogical = gcvNULL;
++ gctBOOL userCommandBufferLogicalMapped = gcvFALSE;
++ gctPOINTER userCommandBufferLink = gcvNULL;
++ gctBOOL userCommandBufferLinkMapped = gcvFALSE;
++#endif
++
++#if gcdPROCESS_ADDRESS_SPACE
++ gctSIZE_T mmuConfigureBytes;
++ gctPOINTER mmuConfigureLogical = gcvNULL;
++ gctUINT32 mmuConfigureAddress;
++ gctPOINTER mmuConfigurePhysical = 0;
++ gctSIZE_T mmuConfigureWaitLinkOffset;
++ gckMMU mmu;
++ gctSIZE_T reservedBytes;
++ gctUINT32 oldValue;
++#endif
++
++#if gcdDUMP_COMMAND
++ gctPOINTER contextDumpLogical = gcvNULL;
++ gctSIZE_T contextDumpBytes = 0;
++ gctPOINTER bufferDumpLogical = gcvNULL;
++ gctSIZE_T bufferDumpBytes = 0;
++# endif
++#endif
++
++#if VIVANTE_PROFILER_CONTEXT
++ gctBOOL sequenceAcquired = gcvFALSE;
++#endif
++
++ gctPOINTER pointer = gcvNULL;
++
++#if gcdMULTI_GPU
++ gctSIZE_T chipEnableBytes;
++#endif
++
++ gcmkHEADER_ARG(
++ "Command=0x%x CommandBuffer=0x%x ProcessID=%d",
++ Command, CommandBuffer, ProcessID
++ );
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++
++ if (Command->kernel->hardware->type== gcvHARDWARE_2D)
++ {
++ /* There is no context for 2D. */
++ Context = gcvNULL;
++ }
++
++#if gcdPROCESS_ADDRESS_SPACE
++ gcmkONERROR(gckKERNEL_GetProcessMMU(Command->kernel, &mmu));
++
++ gcmkONERROR(gckOS_AtomicExchange(Command->os,
++ mmu->pageTableDirty[Command->kernel->core],
++ 0,
++ &oldValue));
++#else
++#endif
++
++#if VIVANTE_PROFILER_CONTEXT
++ if((Command->kernel->hardware->gpuProfiler) && (Command->kernel->profileEnable))
++ {
++ /* Acquire the context sequnence mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(
++ Command->os, Command->mutexContextSeq, gcvINFINITE
++ ));
++ sequenceAcquired = gcvTRUE;
++ }
++#endif
++
++ /* Acquire the command queue. */
++ gcmkONERROR(gckCOMMAND_EnterCommit(Command, gcvFALSE));
++ commitEntered = gcvTRUE;
++
++ /* Acquire the context switching mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(
++ Command->os, Command->mutexContext, gcvINFINITE
++ ));
++ contextAcquired = gcvTRUE;
++
++ /* Extract the gckHARDWARE and gckEVENT objects. */
++ hardware = Command->kernel->hardware;
++
++ /* Check wehther we need to copy the structures or not. */
++ gcmkONERROR(gckOS_QueryNeedCopy(Command->os, ProcessID, &needCopy));
++
++#if gcdNULL_DRIVER
++ /* Context switch required? */
++ if ((Context != gcvNULL) && (Command->currContext != Context))
++ {
++ /* Yes, merge in the deltas. */
++ gckCONTEXT_Update(Context, ProcessID, StateDelta);
++
++ /* Update the current context. */
++ Command->currContext = Context;
++ }
++#else
++ if (needCopy)
++ {
++ commandBufferObject = &_commandBufferObject;
++
++ gcmkONERROR(gckOS_CopyFromUserData(
++ Command->os,
++ commandBufferObject,
++ CommandBuffer,
++ gcmSIZEOF(struct _gcoCMDBUF)
++ ));
++
++ gcmkVERIFY_OBJECT(commandBufferObject, gcvOBJ_COMMANDBUFFER);
++ }
++ else
++ {
++ gcmkONERROR(gckOS_MapUserPointer(
++ Command->os,
++ CommandBuffer,
++ gcmSIZEOF(struct _gcoCMDBUF),
++ &pointer
++ ));
++
++ commandBufferObject = pointer;
++
++ gcmkVERIFY_OBJECT(commandBufferObject, gcvOBJ_COMMANDBUFFER);
++ commandBufferMapped = gcvTRUE;
++ }
++
++ /* Query the size of NOP command. */
++ gcmkONERROR(gckHARDWARE_Nop(
++ hardware, gcvNULL, &nopBytes
++ ));
++
++ /* Query the size of pipe select command sequence. */
++ gcmkONERROR(gckHARDWARE_PipeSelect(
++ hardware, gcvNULL, gcvPIPE_3D, &pipeBytes
++ ));
++
++ /* Query the size of LINK command. */
++ gcmkONERROR(gckHARDWARE_Link(
++ hardware, gcvNULL, 0, 0, &linkBytes
++ ));
++
++#if gcdMULTI_GPU
++ /* Query the size of chip enable command sequence. */
++ gcmkONERROR(gckHARDWARE_ChipEnable(
++ hardware, gcvNULL, 0, &chipEnableBytes
++ ));
++#endif
++
++ /* Compute the command buffer entry and the size. */
++ commandBufferLogical
++ = (gctUINT8_PTR) gcmUINT64_TO_PTR(commandBufferObject->logical)
++ + commandBufferObject->startOffset;
++
++ /* Get the hardware address. */
++ if (Command->kernel->virtualCommandBuffer)
++ {
++ gcmkONERROR(gckKERNEL_GetGPUAddress(
++ Command->kernel,
++ commandBufferLogical,
++ gcvTRUE,
++ &commandBufferAddress
++ ));
++ }
++ else
++ {
++ gcmkONERROR(gckHARDWARE_ConvertLogical(
++ hardware,
++ commandBufferLogical,
++ gcvTRUE,
++ &commandBufferAddress
++ ));
++ }
++
++ /* Get the physical address. */
++ gcmkONERROR(gckOS_UserLogicalToPhysical(
++ Command->os,
++ commandBufferLogical,
++ (gctUINT32_PTR)&commandBufferPhysical
++ ));
++
++#ifdef __QNXNTO__
++ userCommandBufferLogical = (gctPOINTER) commandBufferLogical;
++
++ gcmkONERROR(gckOS_MapUserPointer(
++ Command->os,
++ userCommandBufferLogical,
++ 0,
++ &pointer));
++
++ commandBufferLogical = pointer;
++
++ userCommandBufferLogicalMapped = gcvTRUE;
++#endif
++
++ commandBufferSize
++ = commandBufferObject->offset
++ + Command->reservedTail
++ - commandBufferObject->startOffset;
++
++ gcmkONERROR(_FlushMMU(Command));
++
++ /* Get the current offset. */
++ offset = Command->offset;
++
++ /* Compute number of bytes left in current kernel command queue. */
++ bytes = Command->pageSize - offset;
++
++#if gcdMULTI_GPU
++ if (Command->kernel->core == gcvCORE_MAJOR)
++ {
++ commandBufferSize += chipEnableBytes;
++
++ gcmkONERROR(gckHARDWARE_ChipEnable(
++ hardware,
++ commandBufferLogical + pipeBytes,
++ ChipEnable,
++ &chipEnableBytes
++ ));
++
++ gcmkONERROR(gckHARDWARE_ChipEnable(
++ hardware,
++ commandBufferLogical + commandBufferSize - linkBytes - chipEnableBytes,
++ gcvCORE_3D_ALL_MASK,
++ &chipEnableBytes
++ ));
++ }
++ else
++ {
++ commandBufferSize += nopBytes;
++
++ gcmkONERROR(gckHARDWARE_Nop(
++ hardware,
++ commandBufferLogical + pipeBytes,
++ &nopBytes
++ ));
++
++ gcmkONERROR(gckHARDWARE_Nop(
++ hardware,
++ commandBufferLogical + commandBufferSize - linkBytes - nopBytes,
++ &nopBytes
++ ));
++ }
++#endif
++
++ /* Query the size of WAIT/LINK command sequence. */
++ gcmkONERROR(gckHARDWARE_WaitLink(
++ hardware,
++ gcvNULL,
++ offset,
++ &waitLinkBytes,
++ gcvNULL,
++ gcvNULL
++ ));
++
++ /* Is there enough space in the current command queue? */
++ if (bytes < waitLinkBytes)
++ {
++ /* No, create a new one. */
++ gcmkONERROR(_NewQueue(Command));
++
++ /* Get the new current offset. */
++ offset = Command->offset;
++
++ /* Recompute the number of bytes in the new kernel command queue. */
++ bytes = Command->pageSize - offset;
++ gcmkASSERT(bytes >= waitLinkBytes);
++ }
++
++ /* Compute the location if WAIT/LINK command sequence. */
++ waitLinkPhysical = (gctUINT8_PTR) Command->physical + offset;
++ waitLinkLogical = (gctUINT8_PTR) Command->logical + offset;
++ waitLinkAddress = Command->address + offset;
++
++ /* Context switch required? */
++ if (Context == gcvNULL)
++ {
++ /* See if we have to switch pipes for the command buffer. */
++ if (commandBufferObject->entryPipe == Command->pipeSelect)
++ {
++ /* Skip pipe switching sequence. */
++ offset = pipeBytes;
++ }
++ else
++ {
++ /* The current hardware and the entry command buffer pipes
++ ** are different, switch to the correct pipe. */
++ gcmkONERROR(gckHARDWARE_PipeSelect(
++ Command->kernel->hardware,
++ commandBufferLogical,
++ commandBufferObject->entryPipe,
++ &pipeBytes
++ ));
++
++ /* Do not skip pipe switching sequence. */
++ offset = 0;
++ }
++
++ /* Compute the entry. */
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ entryPhysical = (gctUINT8_PTR) commandBufferPhysical + offset;
++#endif
++ entryLogical = commandBufferLogical + offset;
++ entryAddress = commandBufferAddress + offset;
++ entryBytes = commandBufferSize - offset;
++
++ Command->currContext = gcvNULL;
++ }
++ else if (Command->currContext != Context)
++ {
++ /* Temporary disable context length oprimization. */
++ Context->dirty = gcvTRUE;
++
++ /* Get the current context buffer. */
++ contextBuffer = Context->buffer;
++
++ /* Yes, merge in the deltas. */
++ gcmkONERROR(gckCONTEXT_Update(Context, ProcessID, StateDelta));
++
++ /* Determine context entry and exit points. */
++ if (0)
++ {
++ /* Reset 2D dirty flag. */
++ Context->dirty2D = gcvFALSE;
++
++ if (Context->dirty || commandBufferObject->using3D)
++ {
++ /***************************************************************
++ ** SWITCHING CONTEXT: 2D and 3D are used.
++ */
++
++ /* Reset 3D dirty flag. */
++ Context->dirty3D = gcvFALSE;
++
++ /* Compute the entry. */
++ if (Command->pipeSelect == gcvPIPE_2D)
++ {
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ entryPhysical = (gctUINT8_PTR) contextBuffer->physical + pipeBytes;
++#endif
++ entryLogical = (gctUINT8_PTR) contextBuffer->logical + pipeBytes;
++ entryAddress = contextBuffer->address + pipeBytes;
++ entryBytes = Context->bufferSize - pipeBytes;
++ }
++ else
++ {
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ entryPhysical = (gctUINT8_PTR) contextBuffer->physical;
++#endif
++ entryLogical = (gctUINT8_PTR) contextBuffer->logical;
++ entryAddress = contextBuffer->address;
++ entryBytes = Context->bufferSize;
++ }
++
++ /* See if we have to switch pipes between the context
++ and command buffers. */
++ if (commandBufferObject->entryPipe == gcvPIPE_3D)
++ {
++ /* Skip pipe switching sequence. */
++ offset = pipeBytes;
++ }
++ else
++ {
++ /* The current hardware and the initial context pipes are
++ different, switch to the correct pipe. */
++ gcmkONERROR(gckHARDWARE_PipeSelect(
++ Command->kernel->hardware,
++ commandBufferLogical,
++ commandBufferObject->entryPipe,
++ &pipeBytes
++ ));
++
++ /* Do not skip pipe switching sequence. */
++ offset = 0;
++ }
++
++ /* Ensure the NOP between 2D and 3D is in place so that the
++ execution falls through from 2D to 3D. */
++ gcmkONERROR(gckHARDWARE_Nop(
++ hardware,
++ contextBuffer->link2D,
++ &nopBytes
++ ));
++
++ /* Generate a LINK from the context buffer to
++ the command buffer. */
++ gcmkONERROR(gckHARDWARE_Link(
++ hardware,
++ contextBuffer->link3D,
++ commandBufferAddress + offset,
++ commandBufferSize - offset,
++ &linkBytes
++ ));
++
++ /* Mark context as not dirty. */
++ Context->dirty = gcvFALSE;
++ }
++ else
++ {
++ /***************************************************************
++ ** SWITCHING CONTEXT: 2D only command buffer.
++ */
++
++ /* Mark 3D as dirty. */
++ Context->dirty3D = gcvTRUE;
++
++ /* Compute the entry. */
++ if (Command->pipeSelect == gcvPIPE_2D)
++ {
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ entryPhysical = (gctUINT8_PTR) contextBuffer->physical + pipeBytes;
++#endif
++ entryLogical = (gctUINT8_PTR) contextBuffer->logical + pipeBytes;
++ entryAddress = contextBuffer->address + pipeBytes;
++ entryBytes = Context->entryOffset3D - pipeBytes;
++ }
++ else
++ {
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ entryPhysical = (gctUINT8_PTR) contextBuffer->physical;
++#endif
++ entryLogical = (gctUINT8_PTR) contextBuffer->logical;
++ entryAddress = contextBuffer->address;
++ entryBytes = Context->entryOffset3D;
++ }
++
++ /* Store the current context buffer. */
++ Context->dirtyBuffer = contextBuffer;
++
++ /* See if we have to switch pipes between the context
++ and command buffers. */
++ if (commandBufferObject->entryPipe == gcvPIPE_2D)
++ {
++ /* Skip pipe switching sequence. */
++ offset = pipeBytes;
++ }
++ else
++ {
++ /* The current hardware and the initial context pipes are
++ different, switch to the correct pipe. */
++ gcmkONERROR(gckHARDWARE_PipeSelect(
++ Command->kernel->hardware,
++ commandBufferLogical,
++ commandBufferObject->entryPipe,
++ &pipeBytes
++ ));
++
++ /* Do not skip pipe switching sequence. */
++ offset = 0;
++ }
++
++ /* 3D is not used, generate a LINK from the end of 2D part of
++ the context buffer to the command buffer. */
++ gcmkONERROR(gckHARDWARE_Link(
++ hardware,
++ contextBuffer->link2D,
++ commandBufferAddress + offset,
++ commandBufferSize - offset,
++ &linkBytes
++ ));
++ }
++ }
++
++ /* Not using 2D. */
++ else
++ {
++
++ /* Store the current context buffer. */
++ Context->dirtyBuffer = contextBuffer;
++
++ if (Context->dirty || commandBufferObject->using3D)
++ {
++ /***************************************************************
++ ** SWITCHING CONTEXT: 3D only command buffer.
++ */
++
++ /* Reset 3D dirty flag. */
++ Context->dirty3D = gcvFALSE;
++
++ /* Determine context buffer entry offset. */
++ offset = (Command->pipeSelect == gcvPIPE_3D)
++
++ /* Skip pipe switching sequence. */
++ ? Context->entryOffset3D + Context->pipeSelectBytes
++
++ /* Do not skip pipe switching sequence. */
++ : Context->entryOffset3D;
++
++ /* Compute the entry. */
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ entryPhysical = (gctUINT8_PTR) contextBuffer->physical + offset;
++#endif
++ entryLogical = (gctUINT8_PTR) contextBuffer->logical + offset;
++ entryAddress = contextBuffer->address + offset;
++ entryBytes = Context->bufferSize - offset;
++
++ /* See if we have to switch pipes between the context
++ and command buffers. */
++ if (commandBufferObject->entryPipe == gcvPIPE_3D)
++ {
++ /* Skip pipe switching sequence. */
++ offset = pipeBytes;
++ }
++ else
++ {
++ /* The current hardware and the initial context pipes are
++ different, switch to the correct pipe. */
++ gcmkONERROR(gckHARDWARE_PipeSelect(
++ Command->kernel->hardware,
++ commandBufferLogical,
++ commandBufferObject->entryPipe,
++ &pipeBytes
++ ));
++
++ /* Do not skip pipe switching sequence. */
++ offset = 0;
++ }
++
++ /* Generate a LINK from the context buffer to
++ the command buffer. */
++ gcmkONERROR(gckHARDWARE_Link(
++ hardware,
++ contextBuffer->link3D,
++ commandBufferAddress + offset,
++ commandBufferSize - offset,
++ &linkBytes
++ ));
++ }
++ else
++ {
++ /***************************************************************
++ ** SWITCHING CONTEXT: "XD" command buffer - neither 2D nor 3D.
++ */
++
++ /* Mark 3D as dirty. */
++ Context->dirty3D = gcvTRUE;
++
++ /* Compute the entry. */
++ if (Command->pipeSelect == gcvPIPE_3D)
++ {
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ entryPhysical
++ = (gctUINT8_PTR) contextBuffer->physical
++ + Context->entryOffsetXDFrom3D;
++#endif
++ entryLogical
++ = (gctUINT8_PTR) contextBuffer->logical
++ + Context->entryOffsetXDFrom3D;
++
++ entryAddress
++ = contextBuffer->address
++ + Context->entryOffsetXDFrom3D;
++
++ entryBytes
++ = Context->bufferSize
++ - Context->entryOffsetXDFrom3D;
++ }
++ else
++ {
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ entryPhysical
++ = (gctUINT8_PTR) contextBuffer->physical
++ + Context->entryOffsetXDFrom2D;
++#endif
++ entryLogical
++ = (gctUINT8_PTR) contextBuffer->logical
++ + Context->entryOffsetXDFrom2D;
++
++ entryAddress
++ = contextBuffer->address
++ + Context->entryOffsetXDFrom2D;
++
++ entryBytes
++ = Context->totalSize
++ - Context->entryOffsetXDFrom2D;
++ }
++
++ /* See if we have to switch pipes between the context
++ and command buffers. */
++ if (commandBufferObject->entryPipe == gcvPIPE_3D)
++ {
++ /* Skip pipe switching sequence. */
++ offset = pipeBytes;
++ }
++ else
++ {
++ /* The current hardware and the initial context pipes are
++ different, switch to the correct pipe. */
++ gcmkONERROR(gckHARDWARE_PipeSelect(
++ Command->kernel->hardware,
++ commandBufferLogical,
++ commandBufferObject->entryPipe,
++ &pipeBytes
++ ));
++
++ /* Do not skip pipe switching sequence. */
++ offset = 0;
++ }
++
++ /* Generate a LINK from the context buffer to
++ the command buffer. */
++ gcmkONERROR(gckHARDWARE_Link(
++ hardware,
++ contextBuffer->link3D,
++ commandBufferAddress + offset,
++ commandBufferSize - offset,
++ &linkBytes
++ ));
++ }
++ }
++
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ /* Flush the context buffer cache. */
++ gcmkONERROR(gckOS_CacheClean(
++ Command->os,
++ Command->kernelProcessID,
++ gcvNULL,
++ (gctUINT32)entryPhysical,
++ entryLogical,
++ entryBytes
++ ));
++#endif
++
++ /* Update the current context. */
++ Command->currContext = Context;
++
++#if gcdDUMP_COMMAND
++ contextDumpLogical = entryLogical;
++ contextDumpBytes = entryBytes;
++#endif
++
++#if gcdSECURITY
++ /* Commit context buffer to trust zone. */
++ gckKERNEL_SecurityExecute(
++ Command->kernel,
++ entryLogical,
++ entryBytes - 8
++ );
++#endif
++
++#if gcdRECORD_COMMAND
++ gckRECORDER_Record(
++ Command->recorder,
++ gcvNULL,
++ 0xFFFFFFFF,
++ entryLogical,
++ entryBytes - 8
++ );
++#endif
++ }
++
++ /* Same context. */
++ else
++ {
++ /* Determine context entry and exit points. */
++ if (commandBufferObject->using2D && Context->dirty2D)
++ {
++ /* Reset 2D dirty flag. */
++ Context->dirty2D = gcvFALSE;
++
++ /* Get the "dirty" context buffer. */
++ contextBuffer = Context->dirtyBuffer;
++
++ if (commandBufferObject->using3D && Context->dirty3D)
++ {
++ /* Reset 3D dirty flag. */
++ Context->dirty3D = gcvFALSE;
++
++ /* Compute the entry. */
++ if (Command->pipeSelect == gcvPIPE_2D)
++ {
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ entryPhysical = (gctUINT8_PTR) contextBuffer->physical + pipeBytes;
++#endif
++ entryLogical = (gctUINT8_PTR) contextBuffer->logical + pipeBytes;
++ entryAddress = contextBuffer->address + pipeBytes;
++ entryBytes = Context->bufferSize - pipeBytes;
++ }
++ else
++ {
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ entryPhysical = (gctUINT8_PTR) contextBuffer->physical;
++#endif
++ entryLogical = (gctUINT8_PTR) contextBuffer->logical;
++ entryAddress = contextBuffer->address;
++ entryBytes = Context->bufferSize;
++ }
++
++ /* See if we have to switch pipes between the context
++ and command buffers. */
++ if (commandBufferObject->entryPipe == gcvPIPE_3D)
++ {
++ /* Skip pipe switching sequence. */
++ offset = pipeBytes;
++ }
++ else
++ {
++ /* The current hardware and the initial context pipes are
++ different, switch to the correct pipe. */
++ gcmkONERROR(gckHARDWARE_PipeSelect(
++ Command->kernel->hardware,
++ commandBufferLogical,
++ commandBufferObject->entryPipe,
++ &pipeBytes
++ ));
++
++ /* Do not skip pipe switching sequence. */
++ offset = 0;
++ }
++
++ /* Ensure the NOP between 2D and 3D is in place so that the
++ execution falls through from 2D to 3D. */
++ gcmkONERROR(gckHARDWARE_Nop(
++ hardware,
++ contextBuffer->link2D,
++ &nopBytes
++ ));
++
++ /* Generate a LINK from the context buffer to
++ the command buffer. */
++ gcmkONERROR(gckHARDWARE_Link(
++ hardware,
++ contextBuffer->link3D,
++ commandBufferAddress + offset,
++ commandBufferSize - offset,
++ &linkBytes
++ ));
++ }
++ else
++ {
++ /* Compute the entry. */
++ if (Command->pipeSelect == gcvPIPE_2D)
++ {
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ entryPhysical = (gctUINT8_PTR) contextBuffer->physical + pipeBytes;
++#endif
++ entryLogical = (gctUINT8_PTR) contextBuffer->logical + pipeBytes;
++ entryAddress = contextBuffer->address + pipeBytes;
++ entryBytes = Context->entryOffset3D - pipeBytes;
++ }
++ else
++ {
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ entryPhysical = (gctUINT8_PTR) contextBuffer->physical;
++#endif
++ entryLogical = (gctUINT8_PTR) contextBuffer->logical;
++ entryAddress = contextBuffer->address;
++ entryBytes = Context->entryOffset3D;
++ }
++
++ /* See if we have to switch pipes between the context
++ and command buffers. */
++ if (commandBufferObject->entryPipe == gcvPIPE_2D)
++ {
++ /* Skip pipe switching sequence. */
++ offset = pipeBytes;
++ }
++ else
++ {
++ /* The current hardware and the initial context pipes are
++ different, switch to the correct pipe. */
++ gcmkONERROR(gckHARDWARE_PipeSelect(
++ Command->kernel->hardware,
++ commandBufferLogical,
++ commandBufferObject->entryPipe,
++ &pipeBytes
++ ));
++
++ /* Do not skip pipe switching sequence. */
++ offset = 0;
++ }
++
++ /* 3D is not used, generate a LINK from the end of 2D part of
++ the context buffer to the command buffer. */
++ gcmkONERROR(gckHARDWARE_Link(
++ hardware,
++ contextBuffer->link2D,
++ commandBufferAddress + offset,
++ commandBufferSize - offset,
++ &linkBytes
++ ));
++ }
++ }
++ else
++ {
++ if (commandBufferObject->using3D && Context->dirty3D)
++ {
++ /* Reset 3D dirty flag. */
++ Context->dirty3D = gcvFALSE;
++
++ /* Get the "dirty" context buffer. */
++ contextBuffer = Context->dirtyBuffer;
++
++ /* Determine context buffer entry offset. */
++ offset = (Command->pipeSelect == gcvPIPE_3D)
++
++ /* Skip pipe switching sequence. */
++ ? Context->entryOffset3D + pipeBytes
++
++ /* Do not skip pipe switching sequence. */
++ : Context->entryOffset3D;
++
++ /* Compute the entry. */
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ entryPhysical = (gctUINT8_PTR) contextBuffer->physical + offset;
++#endif
++ entryLogical = (gctUINT8_PTR) contextBuffer->logical + offset;
++ entryAddress = contextBuffer->address + offset;
++ entryBytes = Context->bufferSize - offset;
++
++ /* See if we have to switch pipes between the context
++ and command buffers. */
++ if (commandBufferObject->entryPipe == gcvPIPE_3D)
++ {
++ /* Skip pipe switching sequence. */
++ offset = pipeBytes;
++ }
++ else
++ {
++ /* The current hardware and the initial context pipes are
++ different, switch to the correct pipe. */
++ gcmkONERROR(gckHARDWARE_PipeSelect(
++ Command->kernel->hardware,
++ commandBufferLogical,
++ commandBufferObject->entryPipe,
++ &pipeBytes
++ ));
++
++ /* Do not skip pipe switching sequence. */
++ offset = 0;
++ }
++
++ /* Generate a LINK from the context buffer to
++ the command buffer. */
++ gcmkONERROR(gckHARDWARE_Link(
++ hardware,
++ contextBuffer->link3D,
++ commandBufferAddress + offset,
++ commandBufferSize - offset,
++ &linkBytes
++ ));
++ }
++ else
++ {
++ /* See if we have to switch pipes for the command buffer. */
++ if (commandBufferObject->entryPipe == Command->pipeSelect)
++ {
++ /* Skip pipe switching sequence. */
++ offset = pipeBytes;
++ }
++ else
++ {
++ /* The current hardware and the entry command buffer pipes
++ ** are different, switch to the correct pipe. */
++ gcmkONERROR(gckHARDWARE_PipeSelect(
++ Command->kernel->hardware,
++ commandBufferLogical,
++ commandBufferObject->entryPipe,
++ &pipeBytes
++ ));
++
++ /* Do not skip pipe switching sequence. */
++ offset = 0;
++ }
++
++ /* Compute the entry. */
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ entryPhysical = (gctUINT8_PTR) commandBufferPhysical + offset;
++#endif
++ entryLogical = commandBufferLogical + offset;
++ entryAddress = commandBufferAddress + offset;
++ entryBytes = commandBufferSize - offset;
++ }
++ }
++ }
++
++#if gcdDUMP_COMMAND
++ bufferDumpLogical = commandBufferLogical + offset;
++ bufferDumpBytes = commandBufferSize - offset;
++#endif
++
++#if gcdSECURE_USER
++ /* Process user hints. */
++ gcmkONERROR(_ProcessHints(Command, ProcessID, commandBufferObject));
++#endif
++
++ /* Determine the location to jump to for the command buffer being
++ ** scheduled. */
++ if (Command->newQueue)
++ {
++ /* New command queue, jump to the beginning of it. */
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ exitPhysical = Command->physical;
++#endif
++
++ exitLogical = Command->logical;
++ exitAddress = Command->address;
++ exitBytes = Command->offset + waitLinkBytes;
++ }
++ else
++ {
++ /* Still within the preexisting command queue, jump to the new
++ WAIT/LINK command sequence. */
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ exitPhysical = waitLinkPhysical;
++#endif
++ exitLogical = waitLinkLogical;
++ exitAddress = waitLinkAddress;
++ exitBytes = waitLinkBytes;
++ }
++
++ /* Add a new WAIT/LINK command sequence. When the command buffer which is
++ currently being scheduled is fully executed by the GPU, the FE will
++ jump to this WAIT/LINK sequence. */
++ gcmkONERROR(gckHARDWARE_WaitLink(
++ hardware,
++ waitLinkLogical,
++ offset,
++ &waitLinkBytes,
++ &waitOffset,
++ &waitSize
++ ));
++
++ /* Compute the location if WAIT command. */
++ waitPhysical = (gctUINT8_PTR) waitLinkPhysical + waitOffset;
++ waitLogical = (gctUINT8_PTR) waitLinkLogical + waitOffset;
++
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ /* Flush the command queue cache. */
++ gcmkONERROR(gckOS_CacheClean(
++ Command->os,
++ Command->kernelProcessID,
++ gcvNULL,
++ (gctUINT32)exitPhysical,
++ exitLogical,
++ exitBytes
++ ));
++#endif
++
++ /* Determine the location of the LINK command in the command buffer. */
++ commandBufferLink
++ = (gctUINT8_PTR) gcmUINT64_TO_PTR(commandBufferObject->logical)
++ + commandBufferObject->offset;
++
++#ifdef __QNXNTO__
++ userCommandBufferLink = (gctPOINTER) commandBufferLink;
++
++ gcmkONERROR(gckOS_MapUserPointer(
++ Command->os,
++ userCommandBufferLink,
++ 0,
++ &pointer));
++
++ commandBufferLink = pointer;
++
++ userCommandBufferLinkMapped = gcvTRUE;
++#endif
++
++#if gcdMULTI_GPU
++ if (Command->kernel->core == gcvCORE_MAJOR)
++ {
++ commandBufferLink += chipEnableBytes;
++ }
++ else
++ {
++ commandBufferLink += nopBytes;
++ }
++#endif
++
++ /* Generate a LINK from the end of the command buffer being scheduled
++ back to the kernel command queue. */
++#if !gcdSECURITY
++ gcmkONERROR(gckHARDWARE_Link(
++ hardware,
++ commandBufferLink,
++ exitAddress,
++ exitBytes,
++ &linkBytes
++ ));
++#endif
++
++#ifdef __QNXNTO__
++ gcmkONERROR(gckOS_UnmapUserPointer(
++ Command->os,
++ userCommandBufferLink,
++ 0,
++ commandBufferLink));
++
++ userCommandBufferLinkMapped = gcvFALSE;
++#endif
++
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ /* Flush the command buffer cache. */
++ gcmkONERROR(gckOS_CacheClean(
++ Command->os,
++ ProcessID,
++ gcvNULL,
++ (gctUINT32)commandBufferPhysical,
++ commandBufferLogical,
++ commandBufferSize
++ ));
++#endif
++
++#if gcdRECORD_COMMAND
++ gckRECORDER_Record(
++ Command->recorder,
++ commandBufferLogical + offset,
++ commandBufferSize - offset - 8,
++ gcvNULL,
++ 0xFFFFFFFF
++ );
++
++ gckRECORDER_AdvanceIndex(Command->recorder, Command->commitStamp);
++
++ Command->commitStamp++;
++#endif
++
++#if gcdSECURITY
++ /* Submit command buffer to trust zone. */
++ gckKERNEL_SecurityExecute(
++ Command->kernel,
++ commandBufferLogical + offset,
++ commandBufferSize - offset - 8
++ );
++#else
++ /* Generate a LINK from the previous WAIT/LINK command sequence to the
++ entry determined above (either the context or the command buffer).
++ This LINK replaces the WAIT instruction from the previous WAIT/LINK
++ pair, therefore we use WAIT metrics for generation of this LINK.
++ This action will execute the entire sequence. */
++ gcmkONERROR(gckHARDWARE_Link(
++ hardware,
++ Command->waitLogical,
++ entryAddress,
++ entryBytes,
++ &Command->waitSize
++ ));
++#endif
++
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ /* Flush the cache for the link. */
++ gcmkONERROR(gckOS_CacheClean(
++ Command->os,
++ Command->kernelProcessID,
++ gcvNULL,
++ (gctUINT32)Command->waitPhysical,
++ Command->waitLogical,
++ Command->waitSize
++ ));
++#endif
++
++ gcmkDUMPCOMMAND(
++ Command->os,
++ Command->waitLogical,
++ Command->waitSize,
++ gceDUMP_BUFFER_LINK,
++ gcvFALSE
++ );
++
++ gcmkDUMPCOMMAND(
++ Command->os,
++ contextDumpLogical,
++ contextDumpBytes,
++ gceDUMP_BUFFER_CONTEXT,
++ gcvFALSE
++ );
++
++ gcmkDUMPCOMMAND(
++ Command->os,
++ bufferDumpLogical,
++ bufferDumpBytes,
++ gceDUMP_BUFFER_USER,
++ gcvFALSE
++ );
++
++ gcmkDUMPCOMMAND(
++ Command->os,
++ waitLinkLogical,
++ waitLinkBytes,
++ gceDUMP_BUFFER_WAITLINK,
++ gcvFALSE
++ );
++
++ /* Update the current pipe. */
++ Command->pipeSelect = commandBufferObject->exitPipe;
++
++ /* Update command queue offset. */
++ Command->offset += waitLinkBytes;
++ Command->newQueue = gcvFALSE;
++
++ /* Update address of last WAIT. */
++ Command->waitPhysical = waitPhysical;
++ Command->waitLogical = waitLogical;
++ Command->waitSize = waitSize;
++
++ /* Update queue tail pointer. */
++ gcmkONERROR(gckHARDWARE_UpdateQueueTail(
++ hardware, Command->logical, Command->offset
++ ));
++
++#if gcdDUMP_COMMAND
++ gcmkPRINT("@[kernel.commit]");
++#endif
++#endif /* gcdNULL_DRIVER */
++
++ /* Release the context switching mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(Command->os, Command->mutexContext));
++ contextAcquired = gcvFALSE;
++
++ /* Release the command queue. */
++ gcmkONERROR(gckCOMMAND_ExitCommit(Command, gcvFALSE));
++ commitEntered = gcvFALSE;
++
++#if VIVANTE_PROFILER_CONTEXT
++ if(sequenceAcquired)
++ {
++#if gcdMULTI_GPU
++ gcmkONERROR(gckCOMMAND_Stall(Command, gcvTRUE, ChipEnable));
++#else
++ gcmkONERROR(gckCOMMAND_Stall(Command, gcvTRUE));
++#endif
++ if (Command->currContext)
++ {
++ gcmkONERROR(gckHARDWARE_UpdateContextProfile(
++ hardware,
++ Command->currContext));
++ }
++
++ /* Release the context switching mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(Command->os, Command->mutexContextSeq));
++ sequenceAcquired = gcvFALSE;
++ }
++#endif
++
++ /* Loop while there are records in the queue. */
++ while (EventQueue != gcvNULL)
++ {
++ if (needCopy)
++ {
++ /* Point to stack record. */
++ eventRecord = &_eventRecord;
++
++ /* Copy the data from the client. */
++ gcmkONERROR(gckOS_CopyFromUserData(
++ Command->os, eventRecord, EventQueue, gcmSIZEOF(gcsQUEUE)
++ ));
++ }
++ else
++ {
++ /* Map record into kernel memory. */
++ gcmkONERROR(gckOS_MapUserPointer(Command->os,
++ EventQueue,
++ gcmSIZEOF(gcsQUEUE),
++ &pointer));
++
++ eventRecord = pointer;
++ }
++
++ /* Append event record to event queue. */
++ gcmkONERROR(gckEVENT_AddList(
++ Command->kernel->eventObj, &eventRecord->iface, gcvKERNEL_PIXEL, gcvTRUE, gcvFALSE
++ ));
++
++ /* Next record in the queue. */
++ nextEventRecord = gcmUINT64_TO_PTR(eventRecord->next);
++
++ if (!needCopy)
++ {
++ /* Unmap record from kernel memory. */
++ gcmkONERROR(gckOS_UnmapUserPointer(
++ Command->os, EventQueue, gcmSIZEOF(gcsQUEUE), (gctPOINTER *) eventRecord
++ ));
++
++ eventRecord = gcvNULL;
++ }
++
++ EventQueue = nextEventRecord;
++ }
++
++ if (Command->kernel->eventObj->queueHead == gcvNULL
++ && Command->kernel->hardware->powerManagement == gcvTRUE
++ )
++ {
++ /* Commit done event by which work thread knows all jobs done. */
++ gcmkVERIFY_OK(
++ gckEVENT_CommitDone(Command->kernel->eventObj, gcvKERNEL_PIXEL));
++ }
++
++ /* Submit events. */
++#if gcdMULTI_GPU
++ status = gckEVENT_Submit(Command->kernel->eventObj, gcvTRUE, gcvFALSE, ChipEnable);
++#else
++ status = gckEVENT_Submit(Command->kernel->eventObj, gcvTRUE, gcvFALSE);
++#endif
++ if (status == gcvSTATUS_INTERRUPTED)
++ {
++ gcmkTRACE(
++ gcvLEVEL_INFO,
++ "%s(%d): Intterupted in gckEVENT_Submit",
++ __FUNCTION__, __LINE__
++ );
++ status = gcvSTATUS_OK;
++ }
++ else
++ {
++ gcmkONERROR(status);
++ }
++
++#ifdef __QNXNTO__
++ if (userCommandBufferLogicalMapped)
++ {
++ gcmkONERROR(gckOS_UnmapUserPointer(
++ Command->os,
++ userCommandBufferLogical,
++ 0,
++ commandBufferLogical));
++
++ userCommandBufferLogicalMapped = gcvFALSE;
++ }
++#endif
++
++ /* Unmap the command buffer pointer. */
++ if (commandBufferMapped)
++ {
++ gcmkONERROR(gckOS_UnmapUserPointer(
++ Command->os,
++ CommandBuffer,
++ gcmSIZEOF(struct _gcoCMDBUF),
++ commandBufferObject
++ ));
++
++ commandBufferMapped = gcvFALSE;
++ }
++
++ /* Return status. */
++ gcmkFOOTER();
++ return gcvSTATUS_OK;
++
++OnError:
++ if ((eventRecord != gcvNULL) && !needCopy)
++ {
++ /* Roll back. */
++ gcmkVERIFY_OK(gckOS_UnmapUserPointer(
++ Command->os,
++ EventQueue,
++ gcmSIZEOF(gcsQUEUE),
++ (gctPOINTER *) eventRecord
++ ));
++ }
++
++ if (contextAcquired)
++ {
++ /* Release the context switching mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Command->os, Command->mutexContext));
++ }
++
++ if (commitEntered)
++ {
++ /* Release the command queue mutex. */
++ gcmkVERIFY_OK(gckCOMMAND_ExitCommit(Command, gcvFALSE));
++ }
++
++#if VIVANTE_PROFILER_CONTEXT
++ if (sequenceAcquired)
++ {
++ /* Release the context sequence mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Command->os, Command->mutexContextSeq));
++ }
++#endif
++
++#ifdef __QNXNTO__
++ if (userCommandBufferLinkMapped)
++ {
++ gcmkONERROR(gckOS_UnmapUserPointer(
++ Command->os,
++ userCommandBufferLink,
++ 0,
++ commandBufferLink));
++ }
++
++ if (userCommandBufferLogicalMapped)
++ {
++ gcmkVERIFY_OK(gckOS_UnmapUserPointer(
++ Command->os,
++ userCommandBufferLogical,
++ 0,
++ commandBufferLogical));
++ }
++#endif
++
++ /* Unmap the command buffer pointer. */
++ if (commandBufferMapped)
++ {
++ gcmkVERIFY_OK(gckOS_UnmapUserPointer(
++ Command->os,
++ CommandBuffer,
++ gcmSIZEOF(struct _gcoCMDBUF),
++ commandBufferObject
++ ));
++ }
++
++ /* Return status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckCOMMAND_Reserve
++**
++** Reserve space in the command queue. Also acquire the command queue mutex.
++**
++** INPUT:
++**
++** gckCOMMAND Command
++** Pointer to an gckCOMMAND object.
++**
++** gctSIZE_T RequestedBytes
++** Number of bytes previously reserved.
++**
++** OUTPUT:
++**
++** gctPOINTER * Buffer
++** Pointer to a variable that will receive the address of the reserved
++** space.
++**
++** gctSIZE_T * BufferSize
++** Pointer to a variable that will receive the number of bytes
++** available in the command queue.
++*/
++gceSTATUS
++gckCOMMAND_Reserve(
++ IN gckCOMMAND Command,
++ IN gctUINT32 RequestedBytes,
++ OUT gctPOINTER * Buffer,
++ OUT gctUINT32 * BufferSize
++ )
++{
++ gceSTATUS status;
++ gctUINT32 bytes;
++ gctUINT32 requiredBytes;
++ gctUINT32 requestedAligned;
++
++ gcmkHEADER_ARG("Command=0x%x RequestedBytes=%lu", Command, RequestedBytes);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++
++ /* Compute aligned number of reuested bytes. */
++ requestedAligned = gcmALIGN(RequestedBytes, Command->alignment);
++
++ /* Another WAIT/LINK command sequence will have to be appended after
++ the requested area being reserved. Compute the number of bytes
++ required for WAIT/LINK at the location after the reserved area. */
++ gcmkONERROR(gckHARDWARE_WaitLink(
++ Command->kernel->hardware,
++ gcvNULL,
++ Command->offset + requestedAligned,
++ &requiredBytes,
++ gcvNULL,
++ gcvNULL
++ ));
++
++ /* Compute total number of bytes required. */
++ requiredBytes += requestedAligned;
++
++ /* Compute number of bytes available in command queue. */
++ bytes = Command->pageSize - Command->offset;
++
++ /* Is there enough space in the current command queue? */
++ if (bytes < requiredBytes)
++ {
++ /* Create a new command queue. */
++ gcmkONERROR(_NewQueue(Command));
++
++ /* Recompute the number of bytes in the new kernel command queue. */
++ bytes = Command->pageSize - Command->offset;
++
++ /* Still not enough space? */
++ if (bytes < requiredBytes)
++ {
++ /* Rare case, not enough room in command queue. */
++ gcmkONERROR(gcvSTATUS_BUFFER_TOO_SMALL);
++ }
++ }
++
++ /* Return pointer to empty slot command queue. */
++ *Buffer = (gctUINT8 *) Command->logical + Command->offset;
++
++ /* Return number of bytes left in command queue. */
++ *BufferSize = bytes;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Buffer=0x%x *BufferSize=%lu", *Buffer, *BufferSize);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckCOMMAND_Execute
++**
++** Execute a previously reserved command queue by appending a WAIT/LINK command
++** sequence after it and modifying the last WAIT into a LINK command. The
++** command FIFO mutex will be released whether this function succeeds or not.
++**
++** INPUT:
++**
++** gckCOMMAND Command
++** Pointer to an gckCOMMAND object.
++**
++** gctSIZE_T RequestedBytes
++** Number of bytes previously reserved.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckCOMMAND_Execute(
++ IN gckCOMMAND Command,
++ IN gctUINT32 RequestedBytes
++ )
++{
++ gceSTATUS status;
++
++ gctPHYS_ADDR waitLinkPhysical;
++ gctUINT8_PTR waitLinkLogical;
++ gctUINT32 waitLinkOffset;
++ gctUINT32 waitLinkBytes;
++
++ gctPHYS_ADDR waitPhysical;
++ gctPOINTER waitLogical;
++ gctUINT32 waitOffset;
++ gctUINT32 waitBytes;
++
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ gctPHYS_ADDR execPhysical;
++#endif
++ gctPOINTER execLogical;
++ gctUINT32 execAddress;
++ gctUINT32 execBytes;
++
++ gcmkHEADER_ARG("Command=0x%x RequestedBytes=%lu", Command, RequestedBytes);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++
++ /* Compute offset for WAIT/LINK. */
++ waitLinkOffset = Command->offset + RequestedBytes;
++
++ /* Compute number of bytes left in command queue. */
++ waitLinkBytes = Command->pageSize - waitLinkOffset;
++
++ /* Compute the location if WAIT/LINK command sequence. */
++ waitLinkPhysical = (gctUINT8_PTR) Command->physical + waitLinkOffset;
++ waitLinkLogical = (gctUINT8_PTR) Command->logical + waitLinkOffset;
++
++ /* Append WAIT/LINK in command queue. */
++ gcmkONERROR(gckHARDWARE_WaitLink(
++ Command->kernel->hardware,
++ waitLinkLogical,
++ waitLinkOffset,
++ &waitLinkBytes,
++ &waitOffset,
++ &waitBytes
++ ));
++
++ /* Compute the location if WAIT command. */
++ waitPhysical = (gctUINT8_PTR) waitLinkPhysical + waitOffset;
++ waitLogical = waitLinkLogical + waitOffset;
++
++ /* Determine the location to jump to for the command buffer being
++ ** scheduled. */
++ if (Command->newQueue)
++ {
++ /* New command queue, jump to the beginning of it. */
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ execPhysical = Command->physical;
++#endif
++ execLogical = Command->logical;
++ execAddress = Command->address;
++ execBytes = waitLinkOffset + waitLinkBytes;
++ }
++ else
++ {
++ /* Still within the preexisting command queue, jump directly to the
++ reserved area. */
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ execPhysical = (gctUINT8 *) Command->physical + Command->offset;
++#endif
++ execLogical = (gctUINT8 *) Command->logical + Command->offset;
++ execAddress = Command->address + Command->offset;
++ execBytes = RequestedBytes + waitLinkBytes;
++ }
++
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ /* Flush the cache. */
++ gcmkONERROR(gckOS_CacheClean(
++ Command->os,
++ Command->kernelProcessID,
++ gcvNULL,
++ (gctUINT32)execPhysical,
++ execLogical,
++ execBytes
++ ));
++#endif
++
++ /* Convert the last WAIT into a LINK. */
++ gcmkONERROR(gckHARDWARE_Link(
++ Command->kernel->hardware,
++ Command->waitLogical,
++ execAddress,
++ execBytes,
++ &Command->waitSize
++ ));
++
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ /* Flush the cache. */
++ gcmkONERROR(gckOS_CacheClean(
++ Command->os,
++ Command->kernelProcessID,
++ gcvNULL,
++ (gctUINT32)Command->waitPhysical,
++ Command->waitLogical,
++ Command->waitSize
++ ));
++#endif
++
++ gcmkDUMPCOMMAND(
++ Command->os,
++ Command->waitLogical,
++ Command->waitSize,
++ gceDUMP_BUFFER_LINK,
++ gcvFALSE
++ );
++
++ gcmkDUMPCOMMAND(
++ Command->os,
++ execLogical,
++ execBytes,
++ gceDUMP_BUFFER_KERNEL,
++ gcvFALSE
++ );
++
++ /* Update the pointer to the last WAIT. */
++ Command->waitPhysical = waitPhysical;
++ Command->waitLogical = waitLogical;
++ Command->waitSize = waitBytes;
++
++ /* Update the command queue. */
++ Command->offset += RequestedBytes + waitLinkBytes;
++ Command->newQueue = gcvFALSE;
++
++ /* Update queue tail pointer. */
++ gcmkONERROR(gckHARDWARE_UpdateQueueTail(
++ Command->kernel->hardware, Command->logical, Command->offset
++ ));
++
++#if gcdDUMP_COMMAND
++ gcmkPRINT("@[kernel.execute]");
++#endif
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckCOMMAND_Stall
++**
++** The calling thread will be suspended until the command queue has been
++** completed.
++**
++** INPUT:
++**
++** gckCOMMAND Command
++** Pointer to an gckCOMMAND object.
++**
++** gctBOOL FromPower
++** Determines whether the call originates from inside the power
++** management or not.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++#if gcdMULTI_GPU
++gceSTATUS
++gckCOMMAND_Stall(
++ IN gckCOMMAND Command,
++ IN gctBOOL FromPower,
++ IN gceCORE_3D_MASK ChipEnable
++ )
++#else
++gceSTATUS
++gckCOMMAND_Stall(
++ IN gckCOMMAND Command,
++ IN gctBOOL FromPower
++ )
++#endif
++{
++#if gcdNULL_DRIVER
++ /* Do nothing with infinite hardware. */
++ return gcvSTATUS_OK;
++#else
++ gckOS os;
++ gckHARDWARE hardware;
++ gckEVENT eventObject;
++ gceSTATUS status;
++ gctSIGNAL signal = gcvNULL;
++ gctUINT timer = 0;
++
++ gcmkHEADER_ARG("Command=0x%x", Command);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++
++ /* Extract the gckOS object pointer. */
++ os = Command->os;
++ gcmkVERIFY_OBJECT(os, gcvOBJ_OS);
++
++ /* Extract the gckHARDWARE object pointer. */
++ hardware = Command->kernel->hardware;
++ gcmkVERIFY_OBJECT(hardware, gcvOBJ_HARDWARE);
++
++ /* Extract the gckEVENT object pointer. */
++ eventObject = Command->kernel->eventObj;
++ gcmkVERIFY_OBJECT(eventObject, gcvOBJ_EVENT);
++
++ /* Allocate the signal. */
++ gcmkONERROR(gckOS_CreateSignal(os, gcvTRUE, &signal));
++
++ /* Append the EVENT command to trigger the signal. */
++ gcmkONERROR(gckEVENT_Signal(eventObject, signal, gcvKERNEL_PIXEL));
++
++ /* Submit the event queue. */
++#if gcdMULTI_GPU
++ gcmkONERROR(gckEVENT_Submit(eventObject, gcvTRUE, FromPower, ChipEnable));
++#else
++ gcmkONERROR(gckEVENT_Submit(eventObject, gcvTRUE, FromPower));
++#endif
++
++#if gcdDUMP_COMMAND
++ gcmkPRINT("@[kernel.stall]");
++#endif
++
++ if (status == gcvSTATUS_CHIP_NOT_READY)
++ {
++ /* Error. */
++ goto OnError;
++ }
++
++ do
++ {
++ /* Wait for the signal. */
++ status = gckOS_WaitSignal(os, signal, gcdGPU_ADVANCETIMER);
++
++ if (status == gcvSTATUS_TIMEOUT)
++ {
++#if gcmIS_DEBUG(gcdDEBUG_CODE)
++ gctUINT32 idle;
++
++ /* Read idle register. */
++ gcmkVERIFY_OK(gckHARDWARE_GetIdle(
++ hardware, gcvFALSE, &idle
++ ));
++
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s(%d): idle=%08x",
++ __FUNCTION__, __LINE__, idle
++ );
++
++ gcmkVERIFY_OK(gckOS_MemoryBarrier(os, gcvNULL));
++#endif
++
++ /* Advance timer. */
++ timer += gcdGPU_ADVANCETIMER;
++ }
++ else if (status == gcvSTATUS_INTERRUPTED)
++ {
++ gcmkONERROR(gcvSTATUS_INTERRUPTED);
++ }
++
++ }
++ while (gcmIS_ERROR(status));
++
++ /* Bail out on timeout. */
++ if (gcmIS_ERROR(status))
++ {
++ /* Broadcast the stuck GPU. */
++ gcmkONERROR(gckOS_Broadcast(
++ os, hardware, gcvBROADCAST_GPU_STUCK
++ ));
++ }
++
++ /* Delete the signal. */
++ gcmkVERIFY_OK(gckOS_DestroySignal(os, signal));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (signal != gcvNULL)
++ {
++ /* Free the signal. */
++ gcmkVERIFY_OK(gckOS_DestroySignal(os, signal));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++#endif
++}
++
++/*******************************************************************************
++**
++** gckCOMMAND_Attach
++**
++** Attach user process.
++**
++** INPUT:
++**
++** gckCOMMAND Command
++** Pointer to a gckCOMMAND object.
++**
++** gctUINT32 ProcessID
++** Current process ID.
++**
++** OUTPUT:
++**
++** gckCONTEXT * Context
++** Pointer to a variable that will receive a pointer to a new
++** gckCONTEXT object.
++**
++** gctSIZE_T * StateCount
++** Pointer to a variable that will receive the number of states
++** in the context buffer.
++*/
++#if (gcdENABLE_3D || gcdENABLE_2D)
++gceSTATUS
++gckCOMMAND_Attach(
++ IN gckCOMMAND Command,
++ OUT gckCONTEXT * Context,
++ OUT gctSIZE_T * StateCount,
++ IN gctUINT32 ProcessID
++ )
++{
++ gceSTATUS status;
++ gctBOOL acquired = gcvFALSE;
++
++ gcmkHEADER_ARG("Command=0x%x", Command);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++
++ /* Acquire the context switching mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(
++ Command->os, Command->mutexContext, gcvINFINITE
++ ));
++ acquired = gcvTRUE;
++
++ /* Construct a gckCONTEXT object. */
++ gcmkONERROR(gckCONTEXT_Construct(
++ Command->os,
++ Command->kernel->hardware,
++ ProcessID,
++ Context
++ ));
++
++ /* Return the number of states in the context. */
++ * StateCount = (* Context)->stateCount;
++
++ /* Release the context switching mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(Command->os, Command->mutexContext));
++ acquired = gcvFALSE;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Context=0x%x", *Context);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Release mutex. */
++ if (acquired)
++ {
++ /* Release the context switching mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Command->os, Command->mutexContext));
++ acquired = gcvFALSE;
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++#endif
++
++/*******************************************************************************
++**
++** gckCOMMAND_Detach
++**
++** Detach user process.
++**
++** INPUT:
++**
++** gckCOMMAND Command
++** Pointer to a gckCOMMAND object.
++**
++** gckCONTEXT Context
++** Pointer to a gckCONTEXT object to be destroyed.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckCOMMAND_Detach(
++ IN gckCOMMAND Command,
++ IN gckCONTEXT Context
++ )
++{
++ gceSTATUS status;
++ gctBOOL acquired = gcvFALSE;
++
++ gcmkHEADER_ARG("Command=0x%x Context=0x%x", Command, Context);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++
++ /* Acquire the context switching mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(
++ Command->os, Command->mutexContext, gcvINFINITE
++ ));
++ acquired = gcvTRUE;
++
++ /* Construct a gckCONTEXT object. */
++ gcmkONERROR(gckCONTEXT_Destroy(Context));
++
++ if (Command->currContext == Context)
++ {
++ /* Detach from gckCOMMAND object if the destoryed context is current context. */
++ Command->currContext = gcvNULL;
++ }
++
++ /* Release the context switching mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(Command->os, Command->mutexContext));
++ acquired = gcvFALSE;
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Release mutex. */
++ if (acquired)
++ {
++ /* Release the context switching mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Command->os, Command->mutexContext));
++ acquired = gcvFALSE;
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckCOMMAND_DumpExecutingBuffer
++**
++** Dump the command buffer which GPU is executing.
++**
++** INPUT:
++**
++** gckCOMMAND Command
++** Pointer to a gckCOMMAND object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckCOMMAND_DumpExecutingBuffer(
++ IN gckCOMMAND Command
++ )
++{
++ gceSTATUS status;
++ gckVIRTUAL_COMMAND_BUFFER_PTR buffer;
++ gctUINT32 gpuAddress;
++ gctSIZE_T pageCount;
++ gctPOINTER entry;
++ gckOS os = Command->os;
++ gckKERNEL kernel = Command->kernel;
++ gctINT pid;
++ gctUINT32 i, rear;
++ gctUINT32 start, end;
++ gctUINT32 dumpFront, dumpRear;
++ gckLINKQUEUE queue = &kernel->hardware->linkQueue;
++ gckLINKQUEUE queueMirror;
++ gctUINT32 bytes;
++ gckLINKDATA linkData;
++
++ gcmkPRINT("**************************\n");
++ gcmkPRINT("**** COMMAND BUF DUMP ****\n");
++ gcmkPRINT("**************************\n");
++
++ gcmkVERIFY_OK(gckOS_ReadRegisterEx(os, kernel->core, 0x664, &gpuAddress));
++
++ gcmkPRINT("DMA Address 0x%08X", gpuAddress);
++
++ if (Command->kernel->stuckDump > gcdSTUCK_DUMP_MIDDLE)
++ {
++ gcmkPRINT("Dump Level is %d", Command->kernel->stuckDump);
++
++ /* Duplicate queue because it will be changed.*/
++ gcmkONERROR(gckOS_AllocateMemory(os,
++ sizeof(struct _gckLINKQUEUE),
++ (gctPOINTER *)&queueMirror));
++
++ gckOS_MemCopy(queueMirror,
++ queue,
++ sizeof(struct _gckLINKQUEUE));
++
++ /* If kernel command buffer link to a context buffer, then link to a user command
++ ** buffer, the second link will be in queue first, so we must fix this.
++ ** In Queue: C1 U1 U2 C2 U3 U4 U5 C3
++ ** Real: C1 X1 U1 C2 U2 U3 U4 C3 U5
++ ** Command buffer X1 which is after C1 is out of queue, so C1 is meaningless.
++ */
++ for (i = 0; i < gcdLINK_QUEUE_SIZE; i++)
++ {
++ gckLINKQUEUE_GetData(queueMirror, i, &linkData);
++
++ status = gckKERNEL_QueryGPUAddress(kernel, linkData->start, &buffer);
++
++ if (gcmIS_ERROR(status))
++ {
++ /* Can't find it in virtual command buffer list, ignore it. */
++ continue;
++ }
++
++ if (buffer->kernelLogical)
++ {
++ /* It is a context buffer. */
++ if (i == 0)
++ {
++ /* The real command buffer is out, so clear this slot. */
++ linkData->start = 0;
++ linkData->end = 0;
++ linkData->pid = 0;
++ }
++ else
++ {
++ /* switch context buffer and command buffer. */
++ struct _gckLINKDATA tmp = *linkData;
++ gckLINKDATA linkDataPrevious;
++
++ gckLINKQUEUE_GetData(queueMirror, i - 1, &linkDataPrevious);
++ *linkData = *linkDataPrevious;
++ *linkDataPrevious = tmp;
++ }
++ }
++ }
++
++ /* Clear search result. */
++ dumpFront = dumpRear = gcvINFINITE;
++
++ gcmkPRINT("Link Stack:");
++
++ /* Search stuck address in link queue from rear. */
++ rear = gcdLINK_QUEUE_SIZE - 1;
++ for (i = 0; i < gcdLINK_QUEUE_SIZE; i++)
++ {
++ gckLINKQUEUE_GetData(queueMirror, rear, &linkData);
++
++ start = linkData->start;
++ end = linkData->end;
++ pid = linkData->pid;
++
++ if (gpuAddress >= start && gpuAddress < end)
++ {
++ /* Find latest matched command buffer. */
++ gcmkPRINT(" %d, [%08X - %08X]", pid, start, end);
++
++ /* Initiliaze dump information. */
++ dumpFront = dumpRear = rear;
++ }
++
++ /* Advance to previous one. */
++ rear--;
++
++ if (dumpFront != gcvINFINITE)
++ {
++ break;
++ }
++ }
++
++ if (dumpFront == gcvINFINITE)
++ {
++ /* Can't find matched record in link queue, dump kernel command buffer. */
++ _DumpKernelCommandBuffer(Command);
++
++ /* Free local copy. */
++ gcmkOS_SAFE_FREE(os, queueMirror);
++ return gcvSTATUS_OK;
++ }
++
++ /* Search the last context buffer linked. */
++ while (rear > 0)
++ {
++ gckLINKQUEUE_GetData(queueMirror, rear, &linkData);
++
++ gcmkPRINT(" %d, [%08X - %08X]",
++ linkData->pid,
++ linkData->start,
++ linkData->end);
++
++ status = gckKERNEL_QueryGPUAddress(kernel, linkData->start, &buffer);
++
++ if (gcmIS_SUCCESS(status) && buffer->kernelLogical)
++ {
++ /* Find a context buffer. */
++ dumpFront = rear;
++ break;
++ }
++
++ rear--;
++ }
++
++ if (dumpFront == dumpRear)
++ {
++ /* No context buffer is found, dump all we got.*/
++ dumpFront = 0;
++ }
++
++ /* Dump from last context buffer to last command buffer where hang happens. */
++ for (i = dumpFront; i <= dumpRear; i++)
++ {
++ gckLINKQUEUE_GetData(queueMirror, i, &linkData);
++
++ /* Get gpu address of this command buffer. */
++ gpuAddress = linkData->start;
++ bytes = linkData->end - gpuAddress;
++
++ /* Get the whole buffer. */
++ status = gckKERNEL_QueryGPUAddress(kernel, gpuAddress, &buffer);
++
++ if (gcmIS_ERROR(status))
++ {
++ gcmkPRINT("Buffer [%08X - %08X] is lost or not belong to current process",
++ linkData->start,
++ linkData->end);
++ continue;
++ }
++
++ /* Get kernel logical for dump. */
++ if (buffer->kernelLogical)
++ {
++ /* Get kernel logical directly if it is a context buffer. */
++ entry = buffer->kernelLogical;
++ gcmkPRINT("Context Buffer:");
++ }
++ else
++ {
++ /* Make it accessiable by kernel if it is a user command buffer. */
++ gcmkVERIFY_OK(
++ gckOS_CreateKernelVirtualMapping(os,
++ buffer->physical,
++ buffer->bytes,
++ &entry,
++ &pageCount));
++ gcmkPRINT("User Command Buffer:");
++ }
++
++ /* Dump from the entry. */
++ _DumpBuffer((gctUINT8_PTR)entry + (gpuAddress - buffer->gpuAddress), gpuAddress, bytes);
++
++ /* Release kernel logical address if neccessary. */
++ if (!buffer->kernelLogical)
++ {
++ gcmkVERIFY_OK(
++ gckOS_DestroyKernelVirtualMapping(os,
++ buffer->physical,
++ buffer->bytes,
++ entry));
++ }
++ }
++
++ /* Free local copy. */
++ gcmkOS_SAFE_FREE(os, queueMirror);
++ return gcvSTATUS_OK;
++ OnError:
++ return status;
++ }
++ else
++ {
++ gcmkPRINT("Dump Level is %d, dump memory near the stuck address",
++ Command->kernel->stuckDump);
++
++ /* Without link queue information, we don't know the entry of last command
++ ** buffer, just dump the page where GPU stuck. */
++ status = gckKERNEL_QueryGPUAddress(kernel, gpuAddress, &buffer);
++
++ if (gcmIS_SUCCESS(status))
++ {
++ gcmkVERIFY_OK(
++ gckOS_CreateKernelVirtualMapping(os,
++ buffer->physical,
++ buffer->bytes,
++ &entry,
++ &pageCount));
++
++ if (entry)
++ {
++ gctUINT32 offset = gpuAddress - buffer->gpuAddress;
++ gctPOINTER entryDump = entry;
++
++ /* Dump one pages. */
++ gctUINT32 bytes = 4096;
++
++ /* Align to page. */
++ offset &= 0xfffff000;
++
++ /* Kernel address of page where stall point stay. */
++ entryDump = (gctUINT8_PTR)entryDump + offset;
++
++ /* Align to page. */
++ gpuAddress &= 0xfffff000;
++
++ gcmkPRINT("User Command Buffer:\n");
++ _DumpBuffer(entryDump, gpuAddress, bytes);
++ }
++
++ gcmkVERIFY_OK(
++ gckOS_DestroyKernelVirtualMapping(os,
++ buffer->physical,
++ buffer->bytes,
++ entry));
++ }
++ else
++ {
++ _DumpKernelCommandBuffer(Command);
++ }
++
++ return gcvSTATUS_OK;
++ }
++}
++
++gceSTATUS
++gckCOMMAND_AddressInKernelCommandBuffer(
++ IN gckCOMMAND Command,
++ IN gctUINT32 Address,
++ OUT gctBOOL *In
++ )
++{
++ gctBOOL in = gcvFALSE;
++ gctINT i;
++
++ for (i = 0; i < gcdCOMMAND_QUEUES; i++)
++ {
++ if ((Address >= Command->queues[i].address)
++ && (Address < (Command->queues[i].address + Command->pageSize))
++ )
++ {
++ in = gcvTRUE;
++ break;
++ }
++ }
++
++ *In = in;
++ return gcvSTATUS_OK;
++}
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_command_vg.c linux-openelec/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_command_vg.c
+--- linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_command_vg.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_command_vg.c 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,3787 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal_kernel_precomp.h"
++
++#if gcdENABLE_VG
++
++#include "gc_hal_kernel_hardware_command_vg.h"
++
++#define _GC_OBJ_ZONE gcvZONE_COMMAND
++
++/******************************************************************************\
++*********************************** Debugging **********************************
++\******************************************************************************/
++
++#define gcvDISABLE_TIMEOUT 1
++#define gcvDUMP_COMMAND_BUFFER 0
++#define gcvDUMP_COMMAND_LINES 0
++
++
++#if gcvDEBUG || defined(EMULATOR) || gcvDISABLE_TIMEOUT
++# define gcvQUEUE_TIMEOUT ~0
++#else
++# define gcvQUEUE_TIMEOUT 10
++#endif
++
++
++/******************************************************************************\
++********************************** Definitions *********************************
++\******************************************************************************/
++
++/* Minimum buffer size. */
++#define gcvMINUMUM_BUFFER \
++ gcmSIZEOF(gcsKERNEL_QUEUE_HEADER) + \
++ gcmSIZEOF(gcsKERNEL_CMDQUEUE) * 2
++
++#define gcmDECLARE_INTERRUPT_HANDLER(Block, Number) \
++ static gceSTATUS \
++ _EventHandler_##Block##_##Number( \
++ IN gckVGKERNEL Kernel \
++ )
++
++#define gcmDEFINE_INTERRUPT_HANDLER(Block, Number) \
++ gcmDECLARE_INTERRUPT_HANDLER(Block, Number) \
++ { \
++ return _EventHandler_Block( \
++ Kernel, \
++ &Kernel->command->taskTable[gcvBLOCK_##Block], \
++ gcvFALSE \
++ ); \
++ }
++
++#define gcmDEFINE_INTERRUPT_HANDLER_ENTRY(Block, Number) \
++ { gcvBLOCK_##Block, _EventHandler_##Block##_##Number }
++
++/* Block interrupt handling table entry. */
++typedef struct _gcsBLOCK_INTERRUPT_HANDLER * gcsBLOCK_INTERRUPT_HANDLER_PTR;
++typedef struct _gcsBLOCK_INTERRUPT_HANDLER
++{
++ gceBLOCK block;
++ gctINTERRUPT_HANDLER handler;
++}
++gcsBLOCK_INTERRUPT_HANDLER;
++
++/* Queue control functions. */
++typedef struct _gcsQUEUE_UPDATE_CONTROL * gcsQUEUE_UPDATE_CONTROL_PTR;
++typedef struct _gcsQUEUE_UPDATE_CONTROL
++{
++ gctOBJECT_HANDLER execute;
++ gctOBJECT_HANDLER update;
++ gctOBJECT_HANDLER lastExecute;
++ gctOBJECT_HANDLER lastUpdate;
++}
++gcsQUEUE_UPDATE_CONTROL;
++
++
++/******************************************************************************\
++********************************* Support Code *********************************
++\******************************************************************************/
++static gceSTATUS
++_FlushMMU(
++ IN gckVGCOMMAND Command
++ )
++{
++ gceSTATUS status;
++ gctUINT32 oldValue;
++ gckVGHARDWARE hardware = Command->hardware;
++
++ gcmkONERROR(gckOS_AtomicExchange(Command->os,
++ hardware->pageTableDirty,
++ 0,
++ &oldValue));
++
++ if (oldValue)
++ {
++ /* Page Table is upated, flush mmu before commit. */
++ gcmkONERROR(gckVGHARDWARE_FlushMMU(hardware));
++ }
++
++ return gcvSTATUS_OK;
++OnError:
++ return status;
++}
++
++static gceSTATUS
++_WaitForIdle(
++ IN gckVGCOMMAND Command,
++ IN gcsKERNEL_QUEUE_HEADER_PTR Queue
++ )
++{
++ gceSTATUS status = gcvSTATUS_OK;
++ gctUINT32 idle;
++ gctUINT timeout = 0;
++
++ /* Loop while not idle. */
++ while (Queue->pending)
++ {
++ /* Did we reach the timeout limit? */
++ if (timeout == gcvQUEUE_TIMEOUT)
++ {
++ /* Hardware is probably dead... */
++ return gcvSTATUS_TIMEOUT;
++ }
++
++ /* Sleep for 100ms. */
++ gcmkERR_BREAK(gckOS_Delay(Command->os, 100));
++
++ /* Not the first loop? */
++ if (timeout > 0)
++ {
++ /* Read IDLE register. */
++ gcmkVERIFY_OK(gckVGHARDWARE_GetIdle(Command->hardware, &idle));
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_COMMAND,
++ "%s: timeout, IDLE=%08X\n",
++ __FUNCTION__, idle
++ );
++ }
++
++ /* Increment the timeout counter. */
++ timeout += 1;
++ }
++
++ /* Return status. */
++ return status;
++}
++
++static gctINT32
++_GetNextInterrupt(
++ IN gckVGCOMMAND Command,
++ IN gceBLOCK Block
++ )
++{
++ gctUINT index;
++ gcsBLOCK_TASK_ENTRY_PTR entry;
++ gctINT32 interrupt;
++
++ /* Get the block entry. */
++ entry = &Command->taskTable[Block];
++
++ /* Make sure we have initialized interrupts. */
++ gcmkASSERT(entry->interruptCount > 0);
++
++ /* Decrement the interrupt usage semaphore. */
++ gcmkVERIFY_OK(gckOS_DecrementSemaphore(
++ Command->os, entry->interruptSemaphore
++ ));
++
++ /* Get the value index. */
++ index = entry->interruptIndex;
++
++ /* Get the interrupt value. */
++ interrupt = entry->interruptArray[index];
++
++ /* Must be a valid value. */
++ gcmkASSERT((interrupt >= 0) && (interrupt <= 31));
++
++ /* Advance the index to the next value. */
++ index += 1;
++
++ /* Set the new index. */
++ entry->interruptIndex = (index == entry->interruptCount)
++ ? 0
++ : index;
++
++ /* Return interrupt value. */
++ return interrupt;
++}
++
++
++/******************************************************************************\
++***************************** Task Storage Management **************************
++\******************************************************************************/
++
++/* Minimum task buffer size. */
++#define gcvMIN_TASK_BUFFER \
++( \
++ gcmSIZEOF(gcsTASK_CONTAINER) + 128 \
++)
++
++/* Free list terminator. */
++#define gcvFREE_TASK_TERMINATOR \
++( \
++ (gcsTASK_CONTAINER_PTR) gcmINT2PTR(~0) \
++)
++
++
++/*----------------------------------------------------------------------------*/
++/*------------------- Allocated Task Buffer List Management ------------------*/
++
++static void
++_InsertTaskBuffer(
++ IN gcsTASK_CONTAINER_PTR AddAfter,
++ IN gcsTASK_CONTAINER_PTR Buffer
++ )
++{
++ gcsTASK_CONTAINER_PTR addBefore;
++
++ /* Cannot add before the first buffer. */
++ gcmkASSERT(AddAfter != gcvNULL);
++
++ /* Create a shortcut to the next buffer. */
++ addBefore = AddAfter->allocNext;
++
++ /* Initialize the links. */
++ Buffer->allocPrev = AddAfter;
++ Buffer->allocNext = addBefore;
++
++ /* Link to the previous buffer. */
++ AddAfter->allocNext = Buffer;
++
++ /* Link to the next buffer. */
++ if (addBefore != gcvNULL)
++ {
++ addBefore->allocPrev = Buffer;
++ }
++}
++
++static void
++_RemoveTaskBuffer(
++ IN gcsTASK_CONTAINER_PTR Buffer
++ )
++{
++ gcsTASK_CONTAINER_PTR prev;
++ gcsTASK_CONTAINER_PTR next;
++
++ /* Cannot remove the first buffer. */
++ gcmkASSERT(Buffer->allocPrev != gcvNULL);
++
++ /* Create shortcuts to the previous and next buffers. */
++ prev = Buffer->allocPrev;
++ next = Buffer->allocNext;
++
++ /* Tail buffer? */
++ if (next == gcvNULL)
++ {
++ /* Remove from the list. */
++ prev->allocNext = gcvNULL;
++ }
++
++ /* Buffer from the middle. */
++ else
++ {
++ prev->allocNext = next;
++ next->allocPrev = prev;
++ }
++}
++
++
++/*----------------------------------------------------------------------------*/
++/*--------------------- Free Task Buffer List Management ---------------------*/
++
++static void
++_AppendToFreeList(
++ IN gckVGCOMMAND Command,
++ IN gcsTASK_CONTAINER_PTR Buffer
++ )
++{
++ /* Cannot be a part of the free list already. */
++ gcmkASSERT(Buffer->freePrev == gcvNULL);
++ gcmkASSERT(Buffer->freeNext == gcvNULL);
++
++ /* First buffer to add? */
++ if (Command->taskFreeHead == gcvNULL)
++ {
++ /* Terminate the links. */
++ Buffer->freePrev = gcvFREE_TASK_TERMINATOR;
++ Buffer->freeNext = gcvFREE_TASK_TERMINATOR;
++
++ /* Initialize the list pointer. */
++ Command->taskFreeHead = Command->taskFreeTail = Buffer;
++ }
++
++ /* Not the first, add after the tail. */
++ else
++ {
++ /* Initialize the new tail buffer. */
++ Buffer->freePrev = Command->taskFreeTail;
++ Buffer->freeNext = gcvFREE_TASK_TERMINATOR;
++
++ /* Add after the tail. */
++ Command->taskFreeTail->freeNext = Buffer;
++ Command->taskFreeTail = Buffer;
++ }
++}
++
++static void
++_RemoveFromFreeList(
++ IN gckVGCOMMAND Command,
++ IN gcsTASK_CONTAINER_PTR Buffer
++ )
++{
++ /* Has to be a part of the free list. */
++ gcmkASSERT(Buffer->freePrev != gcvNULL);
++ gcmkASSERT(Buffer->freeNext != gcvNULL);
++
++ /* Head buffer? */
++ if (Buffer->freePrev == gcvFREE_TASK_TERMINATOR)
++ {
++ /* Tail buffer as well? */
++ if (Buffer->freeNext == gcvFREE_TASK_TERMINATOR)
++ {
++ /* Reset the list pointer. */
++ Command->taskFreeHead = Command->taskFreeTail = gcvNULL;
++ }
++
++ /* No, just the head. */
++ else
++ {
++ /* Update the head. */
++ Command->taskFreeHead = Buffer->freeNext;
++
++ /* Terminate the next buffer. */
++ Command->taskFreeHead->freePrev = gcvFREE_TASK_TERMINATOR;
++ }
++ }
++
++ /* Not the head. */
++ else
++ {
++ /* Tail buffer? */
++ if (Buffer->freeNext == gcvFREE_TASK_TERMINATOR)
++ {
++ /* Update the tail. */
++ Command->taskFreeTail = Buffer->freePrev;
++
++ /* Terminate the previous buffer. */
++ Command->taskFreeTail->freeNext = gcvFREE_TASK_TERMINATOR;
++ }
++
++ /* A buffer in the middle. */
++ else
++ {
++ /* Remove the buffer from the list. */
++ Buffer->freePrev->freeNext = Buffer->freeNext;
++ Buffer->freeNext->freePrev = Buffer->freePrev;
++ }
++ }
++
++ /* Reset free list pointers. */
++ Buffer->freePrev = gcvNULL;
++ Buffer->freeNext = gcvNULL;
++}
++
++
++/*----------------------------------------------------------------------------*/
++/*-------------------------- Task Buffer Allocation --------------------------*/
++
++static void
++_SplitTaskBuffer(
++ IN gckVGCOMMAND Command,
++ IN gcsTASK_CONTAINER_PTR Buffer,
++ IN gctUINT Size
++ )
++{
++ /* Determine the size of the new buffer. */
++ gctINT splitBufferSize = Buffer->size - Size;
++ gcmkASSERT(splitBufferSize >= 0);
++
++ /* Is the split buffer big enough to become a separate buffer? */
++ if (splitBufferSize >= gcvMIN_TASK_BUFFER)
++ {
++ /* Place the new path data. */
++ gcsTASK_CONTAINER_PTR splitBuffer = (gcsTASK_CONTAINER_PTR)
++ (
++ (gctUINT8_PTR) Buffer + Size
++ );
++
++ /* Set the trimmed buffer size. */
++ Buffer->size = Size;
++
++ /* Initialize the split buffer. */
++ splitBuffer->referenceCount = 0;
++ splitBuffer->size = splitBufferSize;
++ splitBuffer->freePrev = gcvNULL;
++ splitBuffer->freeNext = gcvNULL;
++
++ /* Link in. */
++ _InsertTaskBuffer(Buffer, splitBuffer);
++ _AppendToFreeList(Command, splitBuffer);
++ }
++}
++
++static gceSTATUS
++_AllocateTaskContainer(
++ IN gckVGCOMMAND Command,
++ IN gctUINT Size,
++ OUT gcsTASK_CONTAINER_PTR * Buffer
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Command=0x%x Size=0x%x, Buffer ==0x%x", Command, Size, Buffer);
++
++ /* Verify arguments. */
++ gcmkVERIFY_ARGUMENT(Buffer != gcvNULL);
++
++ do
++ {
++ gcsTASK_STORAGE_PTR storage;
++ gcsTASK_CONTAINER_PTR buffer;
++
++ /* Adjust the size. */
++ Size += gcmSIZEOF(gcsTASK_CONTAINER);
++
++ /* Adjust the allocation size if not big enough. */
++ if (Size > Command->taskStorageUsable)
++ {
++ Command->taskStorageGranularity
++ = gcmALIGN(Size + gcmSIZEOF(gcsTASK_STORAGE), 1024);
++
++ Command->taskStorageUsable
++ = Command->taskStorageGranularity - gcmSIZEOF(gcsTASK_STORAGE);
++ }
++
++ /* Is there a free buffer available? */
++ else if (Command->taskFreeHead != gcvNULL)
++ {
++ /* Set the initial free buffer. */
++ gcsTASK_CONTAINER_PTR buffer = Command->taskFreeHead;
++
++ do
++ {
++ /* Is the buffer big enough? */
++ if (buffer->size >= Size)
++ {
++ /* Remove the buffer from the free list. */
++ _RemoveFromFreeList(Command, buffer);
++
++ /* Split the buffer. */
++ _SplitTaskBuffer(Command, buffer, Size);
++
++ /* Set the result. */
++ * Buffer = buffer;
++
++ gcmkFOOTER_ARG("*Buffer=0x%x",*Buffer);
++ /* Success. */
++ return gcvSTATUS_OK;
++ }
++
++ /* Get the next free buffer. */
++ buffer = buffer->freeNext;
++ }
++ while (buffer != gcvFREE_TASK_TERMINATOR);
++ }
++
++ /* Allocate a container. */
++ gcmkERR_BREAK(gckOS_Allocate(
++ Command->os,
++ Command->taskStorageGranularity,
++ (gctPOINTER *) &storage
++ ));
++
++ /* Link in the storage buffer. */
++ storage->next = Command->taskStorage;
++ Command->taskStorage = storage;
++
++ /* Place the task buffer. */
++ buffer = (gcsTASK_CONTAINER_PTR) (storage + 1);
++
++ /* Determine the size of the buffer. */
++ buffer->size
++ = Command->taskStorageGranularity
++ - gcmSIZEOF(gcsTASK_STORAGE);
++
++ /* Initialize the task buffer. */
++ buffer->referenceCount = 0;
++ buffer->allocPrev = gcvNULL;
++ buffer->allocNext = gcvNULL;
++ buffer->freePrev = gcvNULL;
++ buffer->freeNext = gcvNULL;
++
++ /* Split the buffer. */
++ _SplitTaskBuffer(Command, buffer, Size);
++
++ /* Set the result. */
++ * Buffer = buffer;
++
++ gcmkFOOTER_ARG("*Buffer=0x%x",*Buffer);
++ /* Success. */
++ return gcvSTATUS_OK;
++ }
++ while (gcvFALSE);
++
++ gcmkFOOTER();
++ /* Return status. */
++ return status;
++}
++
++static void
++_FreeTaskContainer(
++ IN gckVGCOMMAND Command,
++ IN gcsTASK_CONTAINER_PTR Buffer
++ )
++{
++ gcsTASK_CONTAINER_PTR prev;
++ gcsTASK_CONTAINER_PTR next;
++ gcsTASK_CONTAINER_PTR merged;
++
++ gctUINT32 mergedSize;
++
++ /* Verify arguments. */
++ gcmkASSERT(Buffer != gcvNULL);
++ gcmkASSERT(Buffer->freePrev == gcvNULL);
++ gcmkASSERT(Buffer->freeNext == gcvNULL);
++
++ /* Get shortcuts to the previous and next path data buffers. */
++ prev = Buffer->allocPrev;
++ next = Buffer->allocNext;
++
++ /* Is the previous path data buffer already free? */
++ if (prev && prev->freeNext)
++ {
++ /* The previous path data buffer is the one that remains. */
++ merged = prev;
++
++ /* Is the next path data buffer already free? */
++ if (next && next->freeNext)
++ {
++ /* Merge all three path data buffers into the previous. */
++ mergedSize = prev->size + Buffer->size + next->size;
++
++ /* Remove the next path data buffer. */
++ _RemoveFromFreeList(Command, next);
++ _RemoveTaskBuffer(next);
++ }
++ else
++ {
++ /* Merge the current path data buffer into the previous. */
++ mergedSize = prev->size + Buffer->size;
++ }
++
++ /* Delete the current path data buffer. */
++ _RemoveTaskBuffer(Buffer);
++
++ /* Set new size. */
++ merged->size = mergedSize;
++ }
++ else
++ {
++ /* The current path data buffer is the one that remains. */
++ merged = Buffer;
++
++ /* Is the next buffer already free? */
++ if (next && next->freeNext)
++ {
++ /* Merge the next into the current. */
++ mergedSize = Buffer->size + next->size;
++
++ /* Remove the next buffer. */
++ _RemoveFromFreeList(Command, next);
++ _RemoveTaskBuffer(next);
++
++ /* Set new size. */
++ merged->size = mergedSize;
++ }
++
++ /* Add the current buffer into the free list. */
++ _AppendToFreeList(Command, merged);
++ }
++}
++
++gceSTATUS
++_RemoveRecordFromProcesDB(
++ IN gckVGCOMMAND Command,
++ IN gcsTASK_HEADER_PTR Task
++ )
++{
++ gceSTATUS status;
++ gcsTASK_PTR task = (gcsTASK_PTR)((gctUINT8_PTR)Task - sizeof(gcsTASK));
++ gcsTASK_FREE_VIDEO_MEMORY_PTR freeVideoMemory;
++ gcsTASK_UNLOCK_VIDEO_MEMORY_PTR unlockVideoMemory;
++ gctINT pid;
++ gctUINT32 size;
++ gctUINT32 handle;
++ gckKERNEL kernel = Command->kernel->kernel;
++ gckVIDMEM_NODE unlockNode = gcvNULL;
++ gckVIDMEM_NODE nodeObject = gcvNULL;
++ gceDATABASE_TYPE type;
++
++ /* Get the total size of all tasks. */
++ size = task->size;
++
++ gcmkVERIFY_OK(gckOS_GetProcessID((gctUINT32_PTR)&pid));
++
++ do
++ {
++ switch (Task->id)
++ {
++ case gcvTASK_FREE_VIDEO_MEMORY:
++ freeVideoMemory = (gcsTASK_FREE_VIDEO_MEMORY_PTR)Task;
++
++ handle = (gctUINT32)freeVideoMemory->node;
++
++ status = gckVIDMEM_HANDLE_Lookup(
++ Command->kernel->kernel,
++ pid,
++ handle,
++ &nodeObject);
++
++ if (gcmIS_ERROR(status))
++ {
++ return status;
++ }
++
++ gckVIDMEM_HANDLE_Dereference(kernel, pid, handle);
++ freeVideoMemory->node = gcmALL_TO_UINT32(nodeObject);
++
++ type = gcvDB_VIDEO_MEMORY
++ | (nodeObject->type << gcdDB_VIDEO_MEMORY_TYPE_SHIFT)
++ | (nodeObject->pool << gcdDB_VIDEO_MEMORY_POOL_SHIFT);
++
++ /* Remove record from process db. */
++ gcmkVERIFY_OK(gckKERNEL_RemoveProcessDB(
++ Command->kernel->kernel,
++ pid,
++ type,
++ gcmINT2PTR(handle)));
++
++ /* Advance to next task. */
++ size -= sizeof(gcsTASK_FREE_VIDEO_MEMORY);
++ Task = (gcsTASK_HEADER_PTR)(freeVideoMemory + 1);
++
++ break;
++ case gcvTASK_UNLOCK_VIDEO_MEMORY:
++ unlockVideoMemory = (gcsTASK_UNLOCK_VIDEO_MEMORY_PTR)Task;
++
++ /* Remove record from process db. */
++ gcmkVERIFY_OK(gckKERNEL_RemoveProcessDB(
++ Command->kernel->kernel,
++ pid,
++ gcvDB_VIDEO_MEMORY_LOCKED,
++ gcmUINT64_TO_PTR(unlockVideoMemory->node)));
++
++ handle = (gctUINT32)unlockVideoMemory->node;
++
++ status = gckVIDMEM_HANDLE_Lookup(
++ Command->kernel->kernel,
++ pid,
++ handle,
++ &unlockNode);
++
++ if (gcmIS_ERROR(status))
++ {
++ return status;
++ }
++
++ gckVIDMEM_HANDLE_Dereference(kernel, pid, handle);
++ unlockVideoMemory->node = gcmPTR_TO_UINT64(unlockNode);
++
++ /* Advance to next task. */
++ size -= sizeof(gcsTASK_UNLOCK_VIDEO_MEMORY);
++ Task = (gcsTASK_HEADER_PTR)(unlockVideoMemory + 1);
++
++ break;
++ default:
++ /* Skip the whole task. */
++ size = 0;
++ break;
++ }
++ }
++ while(size);
++
++ return gcvSTATUS_OK;
++}
++
++/******************************************************************************\
++********************************* Task Scheduling ******************************
++\******************************************************************************/
++
++static gceSTATUS
++_ScheduleTasks(
++ IN gckVGCOMMAND Command,
++ IN gcsTASK_MASTER_TABLE_PTR TaskTable,
++ IN gctUINT8_PTR PreviousEnd
++ )
++{
++ gceSTATUS status;
++
++ do
++ {
++ gctINT block;
++ gcsTASK_CONTAINER_PTR container;
++ gcsTASK_MASTER_ENTRY_PTR userTaskEntry;
++ gcsBLOCK_TASK_ENTRY_PTR kernelTaskEntry;
++ gcsTASK_PTR userTask;
++ gctUINT8_PTR kernelTask;
++ gctINT32 interrupt;
++ gctUINT8_PTR eventCommand;
++
++#ifdef __QNXNTO__
++ gcsTASK_PTR oldUserTask = gcvNULL;
++ gctPOINTER pointer;
++#endif
++
++ /* Nothing to schedule? */
++ if (TaskTable->size == 0)
++ {
++ status = gcvSTATUS_OK;
++ break;
++ }
++
++ /* Acquire the mutex. */
++ gcmkERR_BREAK(gckOS_AcquireMutex(
++ Command->os,
++ Command->taskMutex,
++ gcvINFINITE
++ ));
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ "%s(%d)\n",
++ __FUNCTION__, __LINE__
++ );
++
++ do
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ " number of tasks scheduled = %d\n"
++ " size of event data in bytes = %d\n",
++ TaskTable->count,
++ TaskTable->size
++ );
++
++ /* Allocate task buffer. */
++ gcmkERR_BREAK(_AllocateTaskContainer(
++ Command,
++ TaskTable->size,
++ &container
++ ));
++
++ /* Determine the task data pointer. */
++ kernelTask = (gctUINT8_PTR) (container + 1);
++
++ /* Initialize the reference count. */
++ container->referenceCount = TaskTable->count;
++
++ /* Process tasks. */
++ for (block = gcvBLOCK_COUNT - 1; block >= 0; block -= 1)
++ {
++ /* Get the current user table entry. */
++ userTaskEntry = &TaskTable->table[block];
++
++ /* Are there tasks scheduled? */
++ if (userTaskEntry->head == gcvNULL)
++ {
++ /* No, skip to the next block. */
++ continue;
++ }
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ " processing tasks for block %d\n",
++ block
++ );
++
++ /* Get the current kernel table entry. */
++ kernelTaskEntry = &Command->taskTable[block];
++
++ /* Are there tasks for the current block scheduled? */
++ if (kernelTaskEntry->container == gcvNULL)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ " first task container for the block added\n",
++ block
++ );
++
++ /* Nothing yet, set the container buffer pointer. */
++ kernelTaskEntry->container = container;
++ kernelTaskEntry->task = (gcsTASK_HEADER_PTR) kernelTask;
++ }
++
++ /* Yes, append to the end. */
++ else
++ {
++ kernelTaskEntry->link->cotainer = container;
++ kernelTaskEntry->link->task = (gcsTASK_HEADER_PTR) kernelTask;
++ }
++
++ /* Set initial task. */
++ userTask = userTaskEntry->head;
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ " copying user tasks over to the kernel\n"
++ );
++
++ /* Copy tasks. */
++ do
++ {
++ gcsTASK_HEADER_PTR taskHeader;
++
++#ifdef __QNXNTO__
++ oldUserTask = userTask;
++
++ gcmkERR_BREAK(gckOS_MapUserPointer(
++ Command->os,
++ oldUserTask,
++ 0,
++ &pointer));
++
++ userTask = pointer;
++#endif
++
++ taskHeader = (gcsTASK_HEADER_PTR) (userTask + 1);
++
++ gcmkVERIFY_OK(_RemoveRecordFromProcesDB(Command, taskHeader));
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ " task ID = %d, size = %d\n",
++ ((gcsTASK_HEADER_PTR) (userTask + 1))->id,
++ userTask->size
++ );
++
++#ifdef __QNXNTO__
++ if (taskHeader->id == gcvTASK_SIGNAL)
++ {
++ ((gcsTASK_SIGNAL_PTR)taskHeader)->coid = TaskTable->coid;
++ ((gcsTASK_SIGNAL_PTR)taskHeader)->rcvid = TaskTable->rcvid;
++ }
++#endif
++
++ /* Copy the task data. */
++ gcmkVERIFY_OK(gckOS_MemCopy(
++ kernelTask, taskHeader, userTask->size
++ ));
++
++ /* Advance to the next task. */
++ kernelTask += userTask->size;
++ userTask = userTask->next;
++
++#ifdef __QNXNTO__
++ gcmkERR_BREAK(gckOS_UnmapUserPointer(
++ Command->os,
++ oldUserTask,
++ 0,
++ pointer));
++#endif
++ }
++ while (userTask != gcvNULL);
++
++ /* Update link pointer in the header. */
++ kernelTaskEntry->link = (gcsTASK_LINK_PTR) kernelTask;
++
++ /* Initialize link task. */
++ kernelTaskEntry->link->id = gcvTASK_LINK;
++ kernelTaskEntry->link->cotainer = gcvNULL;
++ kernelTaskEntry->link->task = gcvNULL;
++
++ /* Advance the task data pointer. */
++ kernelTask += gcmSIZEOF(gcsTASK_LINK);
++ }
++ }
++ while (gcvFALSE);
++
++ /* Release the mutex. */
++ gcmkERR_BREAK(gckOS_ReleaseMutex(
++ Command->os,
++ Command->taskMutex
++ ));
++
++ /* Assign interrupts to the blocks. */
++ eventCommand = PreviousEnd;
++
++ for (block = gcvBLOCK_COUNT - 1; block >= 0; block -= 1)
++ {
++ /* Get the current user table entry. */
++ userTaskEntry = &TaskTable->table[block];
++
++ /* Are there tasks scheduled? */
++ if (userTaskEntry->head == gcvNULL)
++ {
++ /* No, skip to the next block. */
++ continue;
++ }
++
++ /* Get the interrupt number. */
++ interrupt = _GetNextInterrupt(Command, block);
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ "%s(%d): block = %d interrupt = %d\n",
++ __FUNCTION__, __LINE__,
++ block, interrupt
++ );
++
++ /* Determine the command position. */
++ eventCommand -= Command->info.eventCommandSize;
++
++ /* Append an EVENT command. */
++ gcmkERR_BREAK(gckVGCOMMAND_EventCommand(
++ Command, eventCommand, block, interrupt, gcvNULL
++ ));
++ }
++ }
++ while (gcvFALSE);
++
++ /* Return status. */
++ return status;
++}
++
++
++/******************************************************************************\
++******************************** Memory Management *****************************
++\******************************************************************************/
++
++static gceSTATUS
++_HardwareToKernel(
++ IN gckOS Os,
++ IN gcuVIDMEM_NODE_PTR Node,
++ IN gctUINT32 Address,
++ OUT gctPOINTER * KernelPointer
++ )
++{
++ gceSTATUS status;
++ gckVIDMEM memory;
++ gctUINT32 offset;
++ gctUINT32 nodePhysical;
++ gctPOINTER *logical;
++ gctSIZE_T bytes;
++ status = gcvSTATUS_OK;
++
++ memory = Node->VidMem.memory;
++
++ if (memory->object.type == gcvOBJ_VIDMEM)
++ {
++ nodePhysical = memory->baseAddress
++ + (gctUINT32)Node->VidMem.offset
++ + Node->VidMem.alignment;
++ bytes = Node->VidMem.bytes;
++ logical = &Node->VidMem.kernelVirtual;
++ }
++ else
++ {
++ nodePhysical = Node->Virtual.physicalAddress;
++ bytes = Node->Virtual.bytes;
++ logical = &Node->Virtual.kernelVirtual;
++ }
++
++ if (*logical == gcvNULL)
++ {
++ status = gckOS_MapPhysical(Os, nodePhysical, bytes, logical);
++
++ if (gcmkIS_ERROR(status))
++ {
++ return status;
++ }
++ }
++
++ offset = Address - nodePhysical;
++ *KernelPointer = (gctPOINTER)((gctUINT8_PTR)(*logical) + offset);
++
++ /* Return status. */
++ return status;
++}
++
++static gceSTATUS
++_ConvertUserCommandBufferPointer(
++ IN gckVGCOMMAND Command,
++ IN gcsCMDBUFFER_PTR UserCommandBuffer,
++ OUT gcsCMDBUFFER_PTR * KernelCommandBuffer
++ )
++{
++ gceSTATUS status, last;
++ gcsCMDBUFFER_PTR mappedUserCommandBuffer = gcvNULL;
++ gckKERNEL kernel = Command->kernel->kernel;
++ gctUINT32 pid;
++ gckVIDMEM_NODE node;
++
++ gckOS_GetProcessID(&pid);
++
++ do
++ {
++ gctUINT32 headerAddress;
++
++ /* Map the command buffer structure into the kernel space. */
++ gcmkERR_BREAK(gckOS_MapUserPointer(
++ Command->os,
++ UserCommandBuffer,
++ gcmSIZEOF(gcsCMDBUFFER),
++ (gctPOINTER *) &mappedUserCommandBuffer
++ ));
++
++ /* Determine the address of the header. */
++ headerAddress
++ = mappedUserCommandBuffer->address
++ - mappedUserCommandBuffer->bufferOffset;
++
++ gcmkERR_BREAK(gckVIDMEM_HANDLE_Lookup(
++ kernel,
++ pid,
++ gcmPTR2INT32(mappedUserCommandBuffer->node),
++ &node));
++
++ /* Translate the logical address to the kernel space. */
++ gcmkERR_BREAK(_HardwareToKernel(
++ Command->os,
++ node->node,
++ headerAddress,
++ (gctPOINTER *) KernelCommandBuffer
++ ));
++ }
++ while (gcvFALSE);
++
++ /* Unmap the user command buffer. */
++ if (mappedUserCommandBuffer != gcvNULL)
++ {
++ gcmkCHECK_STATUS(gckOS_UnmapUserPointer(
++ Command->os,
++ UserCommandBuffer,
++ gcmSIZEOF(gcsCMDBUFFER),
++ mappedUserCommandBuffer
++ ));
++ }
++
++ /* Return status. */
++ return status;
++}
++
++static gceSTATUS
++_AllocateLinear(
++ IN gckVGCOMMAND Command,
++ IN gctUINT Size,
++ IN gctUINT Alignment,
++ OUT gcuVIDMEM_NODE_PTR * Node,
++ OUT gctUINT32 * Address,
++ OUT gctPOINTER * Logical
++ )
++{
++ gceSTATUS status, last;
++ gctPOINTER logical;
++ gctPHYS_ADDR physical;
++ gctUINT32 address;
++ gctSIZE_T size = Size;
++
++ do
++ {
++ gcmkERR_BREAK(gckOS_AllocateContiguous(
++ Command->os,
++ gcvFALSE,
++ &size,
++ &physical,
++ &logical
++ ));
++
++ gcmkERR_BREAK(gckOS_GetPhysicalAddress(Command->os, logical, &address));
++
++ /* Set return values. */
++ * Node = physical;
++ * Address = address;
++ * Logical = logical;
++
++ /* Success. */
++ return gcvSTATUS_OK;
++ }
++ while (gcvFALSE);
++
++ /* Roll back. */
++ if (physical != gcvNULL)
++ {
++ /* Free the command buffer. */
++ gcmkCHECK_STATUS(gckOS_FreeContiguous(Command->os, physical, logical, size));
++ }
++
++ /* Return status. */
++ return status;
++}
++
++static gceSTATUS
++_FreeLinear(
++ IN gckVGKERNEL Kernel,
++ IN gcuVIDMEM_NODE_PTR Node,
++ IN gctPOINTER Logical
++ )
++{
++ gceSTATUS status = gcvSTATUS_OK;
++
++ do
++ {
++ gcmkERR_BREAK(gckOS_FreeContiguous(Kernel->os, Node, Logical, 1));
++ }
++ while (gcvFALSE);
++
++ /* Return status. */
++ return status;
++}
++
++gceSTATUS
++_AllocateCommandBuffer(
++ IN gckVGCOMMAND Command,
++ IN gctSIZE_T Size,
++ OUT gcsCMDBUFFER_PTR * CommandBuffer
++ )
++{
++ gceSTATUS status, last;
++ gcuVIDMEM_NODE_PTR node = gcvNULL;
++ gcsCMDBUFFER_PTR commandBuffer = gcvNULL;
++
++ do
++ {
++ gctUINT alignedHeaderSize;
++ gctUINT requestedSize;
++ gctUINT allocationSize;
++ gctUINT32 address = 0;
++ gctUINT8_PTR endCommand;
++
++ /* Determine the aligned header size. */
++ alignedHeaderSize
++ = (gctUINT32)gcmALIGN(gcmSIZEOF(gcsCMDBUFFER), Command->info.addressAlignment);
++
++ /* Align the requested size. */
++ requestedSize
++ = (gctUINT32)gcmALIGN(Size, Command->info.commandAlignment);
++
++ /* Determine the size of the buffer to allocate. */
++ allocationSize
++ = alignedHeaderSize
++ + requestedSize
++ + (gctUINT32)Command->info.staticTailSize;
++
++ /* Allocate the command buffer. */
++ gcmkERR_BREAK(_AllocateLinear(
++ Command,
++ allocationSize,
++ Command->info.addressAlignment,
++ &node,
++ &address,
++ (gctPOINTER *) &commandBuffer
++ ));
++
++ /* Initialize the structure. */
++ commandBuffer->completion = gcvVACANT_BUFFER;
++ commandBuffer->node = node;
++ commandBuffer->address = address + alignedHeaderSize;
++ commandBuffer->bufferOffset = alignedHeaderSize;
++ commandBuffer->size = requestedSize;
++ commandBuffer->offset = requestedSize;
++ commandBuffer->nextAllocated = gcvNULL;
++ commandBuffer->nextSubBuffer = gcvNULL;
++
++ /* Determine the data count. */
++ commandBuffer->dataCount
++ = (requestedSize + Command->info.staticTailSize)
++ / Command->info.commandAlignment;
++
++ /* Determine the location of the END command. */
++ endCommand
++ = (gctUINT8_PTR) commandBuffer
++ + alignedHeaderSize
++ + requestedSize;
++
++ /* Append an END command. */
++ gcmkERR_BREAK(gckVGCOMMAND_EndCommand(
++ Command,
++ endCommand,
++ Command->info.feBufferInt,
++ gcvNULL
++ ));
++
++ /* Set the return pointer. */
++ * CommandBuffer = commandBuffer;
++
++ /* Success. */
++ return gcvSTATUS_OK;
++ }
++ while (gcvFALSE);
++
++ /* Roll back. */
++ if (node != gcvNULL)
++ {
++ /* Free the command buffer. */
++ gcmkCHECK_STATUS(_FreeLinear(Command->kernel, node, commandBuffer));
++ }
++
++ /* Return status. */
++ return status;
++}
++
++static gceSTATUS
++_FreeCommandBuffer(
++ IN gckVGKERNEL Kernel,
++ IN gcsCMDBUFFER_PTR CommandBuffer
++ )
++{
++ gceSTATUS status;
++
++ /* Free the buffer. */
++ status = _FreeLinear(Kernel, CommandBuffer->node, CommandBuffer);
++
++ /* Return status. */
++ return status;
++}
++
++
++/******************************************************************************\
++****************************** TS Overflow Handler *****************************
++\******************************************************************************/
++
++static gceSTATUS
++_EventHandler_TSOverflow(
++ IN gckVGKERNEL Kernel
++ )
++{
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s(%d): **** TS OVERFLOW ENCOUNTERED ****\n",
++ __FUNCTION__, __LINE__
++ );
++
++ return gcvSTATUS_OK;
++}
++
++
++/******************************************************************************\
++****************************** Bus Error Handler *******************************
++\******************************************************************************/
++
++static gceSTATUS
++_EventHandler_BusError(
++ IN gckVGKERNEL Kernel
++ )
++{
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s(%d): **** BUS ERROR ENCOUNTERED ****\n",
++ __FUNCTION__, __LINE__
++ );
++
++ return gcvSTATUS_OK;
++}
++
++/******************************************************************************\
++****************************** Power Stall Handler *******************************
++\******************************************************************************/
++
++static gceSTATUS
++_EventHandler_PowerStall(
++ IN gckVGKERNEL Kernel
++ )
++{
++ /* Signal. */
++ return gckOS_Signal(
++ Kernel->os,
++ Kernel->command->powerStallSignal,
++ gcvTRUE);
++}
++
++/******************************************************************************\
++******************************** Task Routines *********************************
++\******************************************************************************/
++
++typedef gceSTATUS (* gctTASKROUTINE) (
++ gckVGCOMMAND Command,
++ gcsBLOCK_TASK_ENTRY_PTR TaskHeader
++ );
++
++static gceSTATUS
++_TaskLink(
++ gckVGCOMMAND Command,
++ gcsBLOCK_TASK_ENTRY_PTR TaskHeader
++ );
++
++static gceSTATUS
++_TaskCluster(
++ gckVGCOMMAND Command,
++ gcsBLOCK_TASK_ENTRY_PTR TaskHeader
++ );
++
++static gceSTATUS
++_TaskIncrement(
++ gckVGCOMMAND Command,
++ gcsBLOCK_TASK_ENTRY_PTR TaskHeader
++ );
++
++static gceSTATUS
++_TaskDecrement(
++ gckVGCOMMAND Command,
++ gcsBLOCK_TASK_ENTRY_PTR TaskHeader
++ );
++
++static gceSTATUS
++_TaskSignal(
++ gckVGCOMMAND Command,
++ gcsBLOCK_TASK_ENTRY_PTR TaskHeader
++ );
++
++static gceSTATUS
++_TaskLockdown(
++ gckVGCOMMAND Command,
++ gcsBLOCK_TASK_ENTRY_PTR TaskHeader
++ );
++
++static gceSTATUS
++_TaskUnlockVideoMemory(
++ gckVGCOMMAND Command,
++ gcsBLOCK_TASK_ENTRY_PTR TaskHeader
++ );
++
++static gceSTATUS
++_TaskFreeVideoMemory(
++ gckVGCOMMAND Command,
++ gcsBLOCK_TASK_ENTRY_PTR TaskHeader
++ );
++
++static gceSTATUS
++_TaskFreeContiguousMemory(
++ gckVGCOMMAND Command,
++ gcsBLOCK_TASK_ENTRY_PTR TaskHeader
++ );
++
++static gceSTATUS
++_TaskUnmapUserMemory(
++ gckVGCOMMAND Command,
++ gcsBLOCK_TASK_ENTRY_PTR TaskHeader
++ );
++
++static gctTASKROUTINE _taskRoutine[] =
++{
++ _TaskLink, /* gcvTASK_LINK */
++ _TaskCluster, /* gcvTASK_CLUSTER */
++ _TaskIncrement, /* gcvTASK_INCREMENT */
++ _TaskDecrement, /* gcvTASK_DECREMENT */
++ _TaskSignal, /* gcvTASK_SIGNAL */
++ _TaskLockdown, /* gcvTASK_LOCKDOWN */
++ _TaskUnlockVideoMemory, /* gcvTASK_UNLOCK_VIDEO_MEMORY */
++ _TaskFreeVideoMemory, /* gcvTASK_FREE_VIDEO_MEMORY */
++ _TaskFreeContiguousMemory, /* gcvTASK_FREE_CONTIGUOUS_MEMORY */
++ _TaskUnmapUserMemory, /* gcvTASK_UNMAP_USER_MEMORY */
++};
++
++static gceSTATUS
++_TaskLink(
++ gckVGCOMMAND Command,
++ gcsBLOCK_TASK_ENTRY_PTR TaskHeader
++ )
++{
++ /* Cast the task pointer. */
++ gcsTASK_LINK_PTR task = (gcsTASK_LINK_PTR) TaskHeader->task;
++
++ /* Save the pointer to the container. */
++ gcsTASK_CONTAINER_PTR container = TaskHeader->container;
++
++ /* No more tasks in the list? */
++ if (task->task == gcvNULL)
++ {
++ /* Reset the entry. */
++ TaskHeader->container = gcvNULL;
++ TaskHeader->task = gcvNULL;
++ TaskHeader->link = gcvNULL;
++ }
++ else
++ {
++ /* Update the entry. */
++ TaskHeader->container = task->cotainer;
++ TaskHeader->task = task->task;
++ }
++
++ /* Decrement the task buffer reference. */
++ gcmkASSERT(container->referenceCount >= 0);
++ if (container->referenceCount == 0)
++ {
++ /* Free the container. */
++ _FreeTaskContainer(Command, container);
++ }
++
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++static gceSTATUS
++_TaskCluster(
++ gckVGCOMMAND Command,
++ gcsBLOCK_TASK_ENTRY_PTR TaskHeader
++ )
++{
++ gceSTATUS status = gcvSTATUS_OK;
++
++ /* Cast the task pointer. */
++ gcsTASK_CLUSTER_PTR cluster = (gcsTASK_CLUSTER_PTR) TaskHeader->task;
++
++ /* Get the number of tasks. */
++ gctUINT taskCount = cluster->taskCount;
++
++ /* Advance to the next task. */
++ TaskHeader->task = (gcsTASK_HEADER_PTR) (cluster + 1);
++
++ /* Perform all tasks in the cluster. */
++ while (taskCount)
++ {
++ /* Perform the current task. */
++ gcmkERR_BREAK(_taskRoutine[TaskHeader->task->id](
++ Command,
++ TaskHeader
++ ));
++
++ /* Update the task count. */
++ taskCount -= 1;
++ }
++
++ /* Return status. */
++ return status;
++}
++
++static gceSTATUS
++_TaskIncrement(
++ gckVGCOMMAND Command,
++ gcsBLOCK_TASK_ENTRY_PTR TaskHeader
++ )
++{
++ gceSTATUS status;
++
++ do
++ {
++ /* Cast the task pointer. */
++ gcsTASK_INCREMENT_PTR task = (gcsTASK_INCREMENT_PTR) TaskHeader->task;
++
++ /* Convert physical into logical address. */
++ gctUINT32_PTR logical;
++ gcmkERR_BREAK(gckOS_MapPhysical(
++ Command->os,
++ task->address,
++ gcmSIZEOF(gctUINT32),
++ (gctPOINTER *) &logical
++ ));
++
++ /* Increment data. */
++ (* logical) += 1;
++
++ /* Unmap the physical memory. */
++ gcmkERR_BREAK(gckOS_UnmapPhysical(
++ Command->os,
++ logical,
++ gcmSIZEOF(gctUINT32)
++ ));
++
++ /* Update the reference counter. */
++ TaskHeader->container->referenceCount -= 1;
++
++ /* Update the task pointer. */
++ TaskHeader->task = (gcsTASK_HEADER_PTR) (task + 1);
++ }
++ while (gcvFALSE);
++
++ /* Return status. */
++ return status;
++}
++
++static gceSTATUS
++_TaskDecrement(
++ gckVGCOMMAND Command,
++ gcsBLOCK_TASK_ENTRY_PTR TaskHeader
++ )
++{
++ gceSTATUS status;
++
++ do
++ {
++ /* Cast the task pointer. */
++ gcsTASK_DECREMENT_PTR task = (gcsTASK_DECREMENT_PTR) TaskHeader->task;
++
++ /* Convert physical into logical address. */
++ gctUINT32_PTR logical;
++ gcmkERR_BREAK(gckOS_MapPhysical(
++ Command->os,
++ task->address,
++ gcmSIZEOF(gctUINT32),
++ (gctPOINTER *) &logical
++ ));
++
++ /* Decrement data. */
++ (* logical) -= 1;
++
++ /* Unmap the physical memory. */
++ gcmkERR_BREAK(gckOS_UnmapPhysical(
++ Command->os,
++ logical,
++ gcmSIZEOF(gctUINT32)
++ ));
++
++ /* Update the reference counter. */
++ TaskHeader->container->referenceCount -= 1;
++
++ /* Update the task pointer. */
++ TaskHeader->task = (gcsTASK_HEADER_PTR) (task + 1);
++ }
++ while (gcvFALSE);
++
++ /* Return status. */
++ return status;
++}
++
++static gceSTATUS
++_TaskSignal(
++ gckVGCOMMAND Command,
++ gcsBLOCK_TASK_ENTRY_PTR TaskHeader
++ )
++{
++ gceSTATUS status;
++
++ do
++ {
++ /* Cast the task pointer. */
++ gcsTASK_SIGNAL_PTR task = (gcsTASK_SIGNAL_PTR) TaskHeader->task;
++
++
++ /* Map the signal into kernel space. */
++#ifdef __QNXNTO__
++ gcmkERR_BREAK(gckOS_UserSignal(
++ Command->os, task->signal, task->rcvid, task->coid
++ ));
++#else
++ gcmkERR_BREAK(gckOS_UserSignal(
++ Command->os, task->signal, task->process
++ ));
++#endif /* __QNXNTO__ */
++
++ /* Update the reference counter. */
++ TaskHeader->container->referenceCount -= 1;
++
++ /* Update the task pointer. */
++ TaskHeader->task = (gcsTASK_HEADER_PTR) (task + 1);
++ }
++ while (gcvFALSE);
++
++ /* Return status. */
++ return status;
++}
++
++static gceSTATUS
++_TaskLockdown(
++ gckVGCOMMAND Command,
++ gcsBLOCK_TASK_ENTRY_PTR TaskHeader
++ )
++{
++ gceSTATUS status;
++ gctUINT32_PTR userCounter = gcvNULL;
++ gctUINT32_PTR kernelCounter = gcvNULL;
++ gctSIGNAL signal = gcvNULL;
++
++ do
++ {
++ /* Cast the task pointer. */
++ gcsTASK_LOCKDOWN_PTR task = (gcsTASK_LOCKDOWN_PTR) TaskHeader->task;
++
++ /* Convert physical addresses into logical. */
++ gcmkERR_BREAK(gckOS_MapPhysical(
++ Command->os,
++ task->userCounter,
++ gcmSIZEOF(gctUINT32),
++ (gctPOINTER *) &userCounter
++ ));
++
++ gcmkERR_BREAK(gckOS_MapPhysical(
++ Command->os,
++ task->kernelCounter,
++ gcmSIZEOF(gctUINT32),
++ (gctPOINTER *) &kernelCounter
++ ));
++
++ /* Update the kernel counter. */
++ (* kernelCounter) += 1;
++
++ /* Are the counters equal? */
++ if ((* userCounter) == (* kernelCounter))
++ {
++ /* Map the signal into kernel space. */
++ gcmkERR_BREAK(gckOS_MapSignal(
++ Command->os, task->signal, task->process, &signal
++ ));
++
++ if (signal == gcvNULL)
++ {
++ /* Signal. */
++ gcmkERR_BREAK(gckOS_Signal(
++ Command->os, task->signal, gcvTRUE
++ ));
++ }
++ else
++ {
++ /* Signal. */
++ gcmkERR_BREAK(gckOS_Signal(
++ Command->os, signal, gcvTRUE
++ ));
++ }
++ }
++
++ /* Update the reference counter. */
++ TaskHeader->container->referenceCount -= 1;
++
++ /* Update the task pointer. */
++ TaskHeader->task = (gcsTASK_HEADER_PTR) (task + 1);
++ }
++ while (gcvFALSE);
++
++ /* Destroy the mapped signal. */
++ if (signal != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_DestroySignal(
++ Command->os, signal
++ ));
++ }
++
++ /* Unmap the physical memory. */
++ if (kernelCounter != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_UnmapPhysical(
++ Command->os,
++ kernelCounter,
++ gcmSIZEOF(gctUINT32)
++ ));
++ }
++
++ if (userCounter != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_UnmapPhysical(
++ Command->os,
++ userCounter,
++ gcmSIZEOF(gctUINT32)
++ ));
++ }
++
++ /* Return status. */
++ return status;
++}
++
++static gceSTATUS
++_TaskUnlockVideoMemory(
++ gckVGCOMMAND Command,
++ gcsBLOCK_TASK_ENTRY_PTR TaskHeader
++ )
++{
++ gceSTATUS status;
++
++ do
++ {
++ /* Cast the task pointer. */
++ gcsTASK_UNLOCK_VIDEO_MEMORY_PTR task
++ = (gcsTASK_UNLOCK_VIDEO_MEMORY_PTR) TaskHeader->task;
++
++ /* Unlock video memory. */
++ gcmkERR_BREAK(gckVIDMEM_Unlock(
++ Command->kernel->kernel,
++ (gckVIDMEM_NODE)gcmUINT64_TO_PTR(task->node),
++ gcvSURF_TYPE_UNKNOWN,
++ gcvNULL));
++
++ gcmkERR_BREAK(gckVIDMEM_NODE_Dereference(
++ Command->kernel->kernel,
++ gcmUINT64_TO_PTR(task->node)));
++
++ /* Update the reference counter. */
++ TaskHeader->container->referenceCount -= 1;
++
++ /* Update the task pointer. */
++ TaskHeader->task = (gcsTASK_HEADER_PTR) (task + 1);
++ }
++ while (gcvFALSE);
++
++ /* Return status. */
++ return status;
++}
++
++static gceSTATUS
++_TaskFreeVideoMemory(
++ gckVGCOMMAND Command,
++ gcsBLOCK_TASK_ENTRY_PTR TaskHeader
++ )
++{
++ gceSTATUS status;
++
++ do
++ {
++ /* Cast the task pointer. */
++ gcsTASK_FREE_VIDEO_MEMORY_PTR task
++ = (gcsTASK_FREE_VIDEO_MEMORY_PTR) TaskHeader->task;
++
++ /* Free video memory. */
++ gcmkERR_BREAK(gckVIDMEM_NODE_Dereference(
++ Command->kernel->kernel,
++ gcmINT2PTR(task->node)));
++
++ /* Update the reference counter. */
++ TaskHeader->container->referenceCount -= 1;
++
++ /* Update the task pointer. */
++ TaskHeader->task = (gcsTASK_HEADER_PTR) (task + 1);
++ }
++ while (gcvFALSE);
++
++ /* Return status. */
++ return status;
++}
++
++static gceSTATUS
++_TaskFreeContiguousMemory(
++ gckVGCOMMAND Command,
++ gcsBLOCK_TASK_ENTRY_PTR TaskHeader
++ )
++{
++ gceSTATUS status;
++
++ do
++ {
++ /* Cast the task pointer. */
++ gcsTASK_FREE_CONTIGUOUS_MEMORY_PTR task
++ = (gcsTASK_FREE_CONTIGUOUS_MEMORY_PTR) TaskHeader->task;
++
++ /* Free contiguous memory. */
++ gcmkERR_BREAK(gckOS_FreeContiguous(
++ Command->os, task->physical, task->logical, task->bytes
++ ));
++
++ /* Update the reference counter. */
++ TaskHeader->container->referenceCount -= 1;
++
++ /* Update the task pointer. */
++ TaskHeader->task = (gcsTASK_HEADER_PTR) (task + 1);
++ }
++ while (gcvFALSE);
++
++ /* Return status. */
++ return status;
++}
++
++static gceSTATUS
++_TaskUnmapUserMemory(
++ gckVGCOMMAND Command,
++ gcsBLOCK_TASK_ENTRY_PTR TaskHeader
++ )
++{
++ gceSTATUS status;
++ gctPOINTER info;
++
++ do
++ {
++ /* Cast the task pointer. */
++ gcsTASK_UNMAP_USER_MEMORY_PTR task
++ = (gcsTASK_UNMAP_USER_MEMORY_PTR) TaskHeader->task;
++
++ info = gckKERNEL_QueryPointerFromName(
++ Command->kernel->kernel, gcmALL_TO_UINT32(task->info));
++
++ /* Unmap the user memory. */
++ gcmkERR_BREAK(gckOS_UnmapUserMemory(
++ Command->os, gcvCORE_VG, task->memory, task->size, info, task->address
++ ));
++
++ /* Update the reference counter. */
++ TaskHeader->container->referenceCount -= 1;
++
++ /* Update the task pointer. */
++ TaskHeader->task = (gcsTASK_HEADER_PTR) (task + 1);
++ }
++ while (gcvFALSE);
++
++ /* Return status. */
++ return status;
++}
++
++/******************************************************************************\
++************ Hardware Block Interrupt Handlers For Scheduled Events ************
++\******************************************************************************/
++
++static gceSTATUS
++_EventHandler_Block(
++ IN gckVGKERNEL Kernel,
++ IN gcsBLOCK_TASK_ENTRY_PTR TaskHeader,
++ IN gctBOOL ProcessAll
++ )
++{
++ gceSTATUS status = gcvSTATUS_OK, last;
++
++ gcmkHEADER_ARG("Kernel=0x%x TaskHeader=0x%x ProcessAll=0x%x", Kernel, TaskHeader, ProcessAll);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++
++ if (TaskHeader->task == gcvNULL)
++ {
++ gcmkFOOTER();
++ return gcvSTATUS_OK;
++ }
++
++ do
++ {
++ gckVGCOMMAND command;
++
++ /* Get the command buffer object. */
++ command = Kernel->command;
++
++ /* Increment the interrupt usage semaphore. */
++ gcmkERR_BREAK(gckOS_IncrementSemaphore(
++ command->os, TaskHeader->interruptSemaphore
++ ));
++
++ /* Acquire the mutex. */
++ gcmkERR_BREAK(gckOS_AcquireMutex(
++ command->os,
++ command->taskMutex,
++ gcvINFINITE
++ ));
++
++ /* Verify inputs. */
++ gcmkASSERT(TaskHeader != gcvNULL);
++ gcmkASSERT(TaskHeader->container != gcvNULL);
++ gcmkASSERT(TaskHeader->task != gcvNULL);
++ gcmkASSERT(TaskHeader->link != gcvNULL);
++
++ /* Process tasks. */
++ do
++ {
++ /* Process the current task. */
++ gcmkERR_BREAK(_taskRoutine[TaskHeader->task->id](
++ command,
++ TaskHeader
++ ));
++
++ /* Is the next task is LINK? */
++ if (TaskHeader->task->id == gcvTASK_LINK)
++ {
++ gcmkERR_BREAK(_taskRoutine[TaskHeader->task->id](
++ command,
++ TaskHeader
++ ));
++
++ /* Done. */
++ break;
++ }
++ }
++ while (ProcessAll);
++
++ /* Release the mutex. */
++ gcmkCHECK_STATUS(gckOS_ReleaseMutex(
++ command->os,
++ command->taskMutex
++ ));
++ }
++ while (gcvFALSE);
++
++ gcmkFOOTER();
++ /* Return status. */
++ return status;
++}
++
++gcmDECLARE_INTERRUPT_HANDLER(COMMAND, 0)
++{
++ gceSTATUS status, last;
++
++ gcmkHEADER_ARG("Kernel=0x%x ", Kernel);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++
++
++ do
++ {
++ gckVGCOMMAND command;
++ gcsKERNEL_QUEUE_HEADER_PTR mergeQueue;
++ gcsKERNEL_QUEUE_HEADER_PTR queueTail;
++ gcsKERNEL_CMDQUEUE_PTR entry;
++ gctUINT entryCount;
++
++ /* Get the command buffer object. */
++ command = Kernel->command;
++
++ /* Acquire the mutex. */
++ gcmkERR_BREAK(gckOS_AcquireMutex(
++ command->os,
++ command->queueMutex,
++ gcvINFINITE
++ ));
++
++ /* Get the current queue. */
++ queueTail = command->queueTail;
++
++ /* Get the current queue entry. */
++ entry = queueTail->currentEntry;
++
++ /* Get the number of entries in the queue. */
++ entryCount = queueTail->pending;
++
++ /* Process all entries. */
++ while (gcvTRUE)
++ {
++ /* Call post-execution function. */
++ status = entry->handler(Kernel, entry);
++
++ /* Failed? */
++ if (gcmkIS_ERROR(status))
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR,
++ gcvZONE_COMMAND,
++ "[%s] line %d: post action failed.\n",
++ __FUNCTION__, __LINE__
++ );
++ }
++
++ /* Executed the next buffer? */
++ if (status == gcvSTATUS_EXECUTED)
++ {
++ /* Update the queue. */
++ queueTail->pending = entryCount;
++ queueTail->currentEntry = entry;
++
++ /* Success. */
++ status = gcvSTATUS_OK;
++
++ /* Break out of the loop. */
++ break;
++ }
++
++ /* Advance to the next entry. */
++ entry += 1;
++ entryCount -= 1;
++
++ /* Last entry? */
++ if (entryCount == 0)
++ {
++ /* Reset the queue to idle. */
++ queueTail->pending = 0;
++
++ /* Get a shortcut to the queue to merge with. */
++ mergeQueue = command->mergeQueue;
++
++ /* Merge the queues if necessary. */
++ if (mergeQueue != queueTail)
++ {
++ gcmkASSERT(mergeQueue < queueTail);
++ gcmkASSERT(mergeQueue->next == queueTail);
++
++ mergeQueue->size
++ += gcmSIZEOF(gcsKERNEL_QUEUE_HEADER)
++ + queueTail->size;
++
++ mergeQueue->next = queueTail->next;
++ }
++
++ /* Advance to the next queue. */
++ queueTail = queueTail->next;
++
++ /* Did it wrap around? */
++ if (command->queue == queueTail)
++ {
++ /* Reset merge queue. */
++ command->mergeQueue = queueTail;
++ }
++
++ /* Set new queue. */
++ command->queueTail = queueTail;
++
++ /* Is the next queue scheduled? */
++ if (queueTail->pending > 0)
++ {
++ gcsCMDBUFFER_PTR commandBuffer;
++
++ /* The first entry must be a command buffer. */
++ commandBuffer = queueTail->currentEntry->commandBuffer;
++
++ /* Start the command processor. */
++ status = gckVGHARDWARE_Execute(
++ command->hardware,
++ commandBuffer->address,
++ commandBuffer->dataCount
++ );
++
++ /* Failed? */
++ if (gcmkIS_ERROR(status))
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR,
++ gcvZONE_COMMAND,
++ "[%s] line %d: failed to start the next queue.\n",
++ __FUNCTION__, __LINE__
++ );
++ }
++ }
++ else
++ {
++ status = gckVGHARDWARE_SetPowerManagementState(
++ Kernel->command->hardware, gcvPOWER_IDLE_BROADCAST
++ );
++ }
++
++ /* Break out of the loop. */
++ break;
++ }
++ }
++
++ /* Release the mutex. */
++ gcmkCHECK_STATUS(gckOS_ReleaseMutex(
++ command->os,
++ command->queueMutex
++ ));
++ }
++ while (gcvFALSE);
++
++
++ gcmkFOOTER();
++ /* Return status. */
++ return status;
++}
++
++/* Define standard block interrupt handlers. */
++gcmDEFINE_INTERRUPT_HANDLER(TESSELLATOR, 0)
++gcmDEFINE_INTERRUPT_HANDLER(VG, 0)
++gcmDEFINE_INTERRUPT_HANDLER(PIXEL, 0)
++gcmDEFINE_INTERRUPT_HANDLER(PIXEL, 1)
++gcmDEFINE_INTERRUPT_HANDLER(PIXEL, 2)
++gcmDEFINE_INTERRUPT_HANDLER(PIXEL, 3)
++gcmDEFINE_INTERRUPT_HANDLER(PIXEL, 4)
++gcmDEFINE_INTERRUPT_HANDLER(PIXEL, 5)
++gcmDEFINE_INTERRUPT_HANDLER(PIXEL, 6)
++gcmDEFINE_INTERRUPT_HANDLER(PIXEL, 7)
++gcmDEFINE_INTERRUPT_HANDLER(PIXEL, 8)
++gcmDEFINE_INTERRUPT_HANDLER(PIXEL, 9)
++
++/* The entries in the array are arranged by event priority. */
++static gcsBLOCK_INTERRUPT_HANDLER _blockHandlers[] =
++{
++ gcmDEFINE_INTERRUPT_HANDLER_ENTRY(TESSELLATOR, 0),
++ gcmDEFINE_INTERRUPT_HANDLER_ENTRY(VG, 0),
++ gcmDEFINE_INTERRUPT_HANDLER_ENTRY(PIXEL, 0),
++ gcmDEFINE_INTERRUPT_HANDLER_ENTRY(PIXEL, 1),
++ gcmDEFINE_INTERRUPT_HANDLER_ENTRY(PIXEL, 2),
++ gcmDEFINE_INTERRUPT_HANDLER_ENTRY(PIXEL, 3),
++ gcmDEFINE_INTERRUPT_HANDLER_ENTRY(PIXEL, 4),
++ gcmDEFINE_INTERRUPT_HANDLER_ENTRY(PIXEL, 5),
++ gcmDEFINE_INTERRUPT_HANDLER_ENTRY(PIXEL, 6),
++ gcmDEFINE_INTERRUPT_HANDLER_ENTRY(PIXEL, 7),
++ gcmDEFINE_INTERRUPT_HANDLER_ENTRY(PIXEL, 8),
++ gcmDEFINE_INTERRUPT_HANDLER_ENTRY(PIXEL, 9),
++ gcmDEFINE_INTERRUPT_HANDLER_ENTRY(COMMAND, 0),
++};
++
++
++/******************************************************************************\
++************************* Static Command Buffer Handlers ***********************
++\******************************************************************************/
++
++static gceSTATUS
++_UpdateStaticCommandBuffer(
++ IN gckVGKERNEL Kernel,
++ IN gcsKERNEL_CMDQUEUE_PTR Entry
++ )
++{
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ "%s(%d)\n",
++ __FUNCTION__, __LINE__
++ );
++
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++static gceSTATUS
++_ExecuteStaticCommandBuffer(
++ IN gckVGKERNEL Kernel,
++ IN gcsKERNEL_CMDQUEUE_PTR Entry
++ )
++{
++ gceSTATUS status;
++
++ do
++ {
++ gcsCMDBUFFER_PTR commandBuffer;
++
++ /* Cast the command buffer header. */
++ commandBuffer = Entry->commandBuffer;
++
++ /* Set to update the command buffer next time. */
++ Entry->handler = _UpdateStaticCommandBuffer;
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ "%s(%d): executing next buffer @ 0x%08X, data count = %d\n",
++ __FUNCTION__, __LINE__,
++ commandBuffer->address,
++ commandBuffer->dataCount
++ );
++
++ /* Start the command processor. */
++ gcmkERR_BREAK(gckVGHARDWARE_Execute(
++ Kernel->hardware,
++ commandBuffer->address,
++ commandBuffer->dataCount
++ ));
++
++ /* Success. */
++ return gcvSTATUS_EXECUTED;
++ }
++ while (gcvFALSE);
++
++ /* Return status. */
++ return status;
++}
++
++static gceSTATUS
++_UpdateLastStaticCommandBuffer(
++ IN gckVGKERNEL Kernel,
++ IN gcsKERNEL_CMDQUEUE_PTR Entry
++ )
++{
++#if gcvDEBUG || gcdFORCE_MESSAGES
++ /* Get the command buffer header. */
++ gcsCMDBUFFER_PTR commandBuffer = Entry->commandBuffer;
++
++ /* Validate the command buffer. */
++ gcmkASSERT(commandBuffer->completion != gcvNULL);
++ gcmkASSERT(commandBuffer->completion != gcvVACANT_BUFFER);
++
++#endif
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ "%s(%d): processing all tasks scheduled for FE.\n",
++ __FUNCTION__, __LINE__
++ );
++
++ /* Perform scheduled tasks. */
++ return _EventHandler_Block(
++ Kernel,
++ &Kernel->command->taskTable[gcvBLOCK_COMMAND],
++ gcvTRUE
++ );
++}
++
++static gceSTATUS
++_ExecuteLastStaticCommandBuffer(
++ IN gckVGKERNEL Kernel,
++ IN gcsKERNEL_CMDQUEUE_PTR Entry
++ )
++{
++ gceSTATUS status;
++
++ do
++ {
++ /* Cast the command buffer header. */
++ gcsCMDBUFFER_PTR commandBuffer = Entry->commandBuffer;
++
++ /* Set to update the command buffer next time. */
++ Entry->handler = _UpdateLastStaticCommandBuffer;
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ "%s(%d): executing next buffer @ 0x%08X, data count = %d\n",
++ __FUNCTION__, __LINE__,
++ commandBuffer->address,
++ commandBuffer->dataCount
++ );
++
++ /* Start the command processor. */
++ gcmkERR_BREAK(gckVGHARDWARE_Execute(
++ Kernel->hardware,
++ commandBuffer->address,
++ commandBuffer->dataCount
++ ));
++
++ /* Success. */
++ return gcvSTATUS_EXECUTED;
++ }
++ while (gcvFALSE);
++
++ /* Return status. */
++ return status;
++}
++
++
++/******************************************************************************\
++************************* Dynamic Command Buffer Handlers **********************
++\******************************************************************************/
++
++static gceSTATUS
++_UpdateDynamicCommandBuffer(
++ IN gckVGKERNEL Kernel,
++ IN gcsKERNEL_CMDQUEUE_PTR Entry
++ )
++{
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ "%s(%d)\n",
++ __FUNCTION__, __LINE__
++ );
++
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++static gceSTATUS
++_ExecuteDynamicCommandBuffer(
++ IN gckVGKERNEL Kernel,
++ IN gcsKERNEL_CMDQUEUE_PTR Entry
++ )
++{
++ gceSTATUS status;
++
++ do
++ {
++ /* Cast the command buffer header. */
++ gcsCMDBUFFER_PTR commandBuffer = Entry->commandBuffer;
++
++ /* Set to update the command buffer next time. */
++ Entry->handler = _UpdateDynamicCommandBuffer;
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ "%s(%d): executing next buffer @ 0x%08X, data count = %d\n",
++ __FUNCTION__, __LINE__,
++ commandBuffer->address,
++ commandBuffer->dataCount
++ );
++
++ /* Start the command processor. */
++ gcmkERR_BREAK(gckVGHARDWARE_Execute(
++ Kernel->hardware,
++ commandBuffer->address,
++ commandBuffer->dataCount
++ ));
++
++ /* Success. */
++ return gcvSTATUS_EXECUTED;
++ }
++ while (gcvFALSE);
++
++ /* Return status. */
++ return status;
++}
++
++static gceSTATUS
++_UpdateLastDynamicCommandBuffer(
++ IN gckVGKERNEL Kernel,
++ IN gcsKERNEL_CMDQUEUE_PTR Entry
++ )
++{
++#if gcvDEBUG || gcdFORCE_MESSAGES
++ /* Get the command buffer header. */
++ gcsCMDBUFFER_PTR commandBuffer = Entry->commandBuffer;
++
++ /* Validate the command buffer. */
++ gcmkASSERT(commandBuffer->completion != gcvNULL);
++ gcmkASSERT(commandBuffer->completion != gcvVACANT_BUFFER);
++
++#endif
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ "%s(%d): processing all tasks scheduled for FE.\n",
++ __FUNCTION__, __LINE__
++ );
++
++ /* Perform scheduled tasks. */
++ return _EventHandler_Block(
++ Kernel,
++ &Kernel->command->taskTable[gcvBLOCK_COMMAND],
++ gcvTRUE
++ );
++}
++
++static gceSTATUS
++_ExecuteLastDynamicCommandBuffer(
++ IN gckVGKERNEL Kernel,
++ IN gcsKERNEL_CMDQUEUE_PTR Entry
++ )
++{
++ gceSTATUS status;
++
++ do
++ {
++ /* Cast the command buffer header. */
++ gcsCMDBUFFER_PTR commandBuffer = Entry->commandBuffer;
++
++ /* Set to update the command buffer next time. */
++ Entry->handler = _UpdateLastDynamicCommandBuffer;
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ "%s(%d): executing next buffer @ 0x%08X, data count = %d\n",
++ __FUNCTION__, __LINE__,
++ commandBuffer->address,
++ commandBuffer->dataCount
++ );
++
++ /* Start the command processor. */
++ gcmkERR_BREAK(gckVGHARDWARE_Execute(
++ Kernel->hardware,
++ commandBuffer->address,
++ commandBuffer->dataCount
++ ));
++
++ /* Success. */
++ return gcvSTATUS_EXECUTED;
++ }
++ while (gcvFALSE);
++
++ /* Return status. */
++ return status;
++}
++
++
++/******************************************************************************\
++********************************* Other Handlers *******************************
++\******************************************************************************/
++
++static gceSTATUS
++_FreeKernelCommandBuffer(
++ IN gckVGKERNEL Kernel,
++ IN gcsKERNEL_CMDQUEUE_PTR Entry
++ )
++{
++ gceSTATUS status;
++
++ /* Free the command buffer. */
++ status = _FreeCommandBuffer(Kernel, Entry->commandBuffer);
++
++ /* Return status. */
++ return status;
++}
++
++
++/******************************************************************************\
++******************************* Queue Management *******************************
++\******************************************************************************/
++
++#if gcvDUMP_COMMAND_BUFFER
++static void
++_DumpCommandQueue(
++ IN gckVGCOMMAND Command,
++ IN gcsKERNEL_QUEUE_HEADER_PTR QueueHeader,
++ IN gctUINT EntryCount
++ )
++{
++ gcsKERNEL_CMDQUEUE_PTR entry;
++ gctUINT queueIndex;
++
++#if defined(gcvCOMMAND_BUFFER_NAME)
++ static gctUINT arrayCount = 0;
++#endif
++
++ /* Is dumpinng enabled? */
++ if (!Commad->enableDumping)
++ {
++ return;
++ }
++
++#if !defined(gcvCOMMAND_BUFFER_NAME)
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_COMMAND,
++ "COMMAND QUEUE DUMP: %d entries\n", EntryCount
++ );
++#endif
++
++ /* Get the pointer to the first entry. */
++ entry = QueueHeader->currentEntry;
++
++ /* Iterate through the queue. */
++ for (queueIndex = 0; queueIndex < EntryCount; queueIndex += 1)
++ {
++ gcsCMDBUFFER_PTR buffer;
++ gctUINT bufferCount;
++ gctUINT bufferIndex;
++ gctUINT i, count;
++ gctUINT size;
++ gctUINT32_PTR data;
++
++#if gcvDUMP_COMMAND_LINES
++ gctUINT lineNumber;
++#endif
++
++#if !defined(gcvCOMMAND_BUFFER_NAME)
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_COMMAND,
++ "ENTRY %d\n", queueIndex
++ );
++#endif
++
++ /* Reset the count. */
++ bufferCount = 0;
++
++ /* Set the initial buffer. */
++ buffer = entry->commandBuffer;
++
++ /* Loop through all subbuffers. */
++ while (buffer)
++ {
++ /* Update the count. */
++ bufferCount += 1;
++
++ /* Advance to the next subbuffer. */
++ buffer = buffer->nextSubBuffer;
++ }
++
++#if !defined(gcvCOMMAND_BUFFER_NAME)
++ if (bufferCount > 1)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO,
++ gcvZONE_COMMAND,
++ " COMMAND BUFFER SET: %d buffers.\n",
++ bufferCount
++ );
++ }
++#endif
++
++ /* Reset the buffer index. */
++ bufferIndex = 0;
++
++ /* Set the initial buffer. */
++ buffer = entry->commandBuffer;
++
++ /* Loop through all subbuffers. */
++ while (buffer)
++ {
++ /* Determine the size of the buffer. */
++ size = buffer->dataCount * Command->info.commandAlignment;
++
++#if !defined(gcvCOMMAND_BUFFER_NAME)
++ /* A single buffer? */
++ if (bufferCount == 1)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO,
++ gcvZONE_COMMAND,
++ " COMMAND BUFFER: count=%d (0x%X), size=%d bytes @ %08X.\n",
++ buffer->dataCount,
++ buffer->dataCount,
++ size,
++ buffer->address
++ );
++ }
++ else
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO,
++ gcvZONE_COMMAND,
++ " COMMAND BUFFER %d: count=%d (0x%X), size=%d bytes @ %08X\n",
++ bufferIndex,
++ buffer->dataCount,
++ buffer->dataCount,
++ size,
++ buffer->address
++ );
++ }
++#endif
++
++ /* Determine the number of double words to print. */
++ count = size / 4;
++
++ /* Determine the buffer location. */
++ data = (gctUINT32_PTR)
++ (
++ (gctUINT8_PTR) buffer + buffer->bufferOffset
++ );
++
++#if defined(gcvCOMMAND_BUFFER_NAME)
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO,
++ gcvZONE_COMMAND,
++ "unsigned int _" gcvCOMMAND_BUFFER_NAME "_%d[] =\n",
++ arrayCount
++ );
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO,
++ gcvZONE_COMMAND,
++ "{\n"
++ );
++
++ arrayCount += 1;
++#endif
++
++#if gcvDUMP_COMMAND_LINES
++ /* Reset the line number. */
++ lineNumber = 0;
++#endif
++
++#if defined(gcvCOMMAND_BUFFER_NAME)
++ count -= 2;
++#endif
++
++ for (i = 0; i < count; i += 1)
++ {
++ if ((i % 8) == 0)
++ {
++#if defined(gcvCOMMAND_BUFFER_NAME)
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_COMMAND, "\t");
++#else
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_COMMAND, " ");
++#endif
++ }
++
++#if gcvDUMP_COMMAND_LINES
++ if (lineNumber == gcvDUMP_COMMAND_LINES)
++ {
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_COMMAND, " . . . . . . . . .\n");
++ break;
++ }
++#endif
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_COMMAND, "0x%08X", data[i]);
++
++ if (i + 1 == count)
++ {
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_COMMAND, "\n");
++
++#if gcvDUMP_COMMAND_LINES
++ lineNumber += 1;
++#endif
++ }
++ else
++ {
++ if (((i + 1) % 8) == 0)
++ {
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_COMMAND, ",\n");
++
++#if gcvDUMP_COMMAND_LINES
++ lineNumber += 1;
++#endif
++ }
++ else
++ {
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_COMMAND, ", ");
++ }
++ }
++ }
++
++#if defined(gcvCOMMAND_BUFFER_NAME)
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO,
++ gcvZONE_COMMAND,
++ "};\n\n"
++ );
++#endif
++
++ /* Advance to the next subbuffer. */
++ buffer = buffer->nextSubBuffer;
++ bufferIndex += 1;
++ }
++
++ /* Advance to the next entry. */
++ entry += 1;
++ }
++}
++#endif
++
++static gceSTATUS
++_LockCurrentQueue(
++ IN gckVGCOMMAND Command,
++ OUT gcsKERNEL_CMDQUEUE_PTR * Entries,
++ OUT gctUINT_PTR EntryCount
++ )
++{
++ gceSTATUS status;
++
++ do
++ {
++ gcsKERNEL_QUEUE_HEADER_PTR queueHead;
++
++ /* Get a shortcut to the head of the queue. */
++ queueHead = Command->queueHead;
++
++ /* Is the head buffer still being worked on? */
++ if (queueHead->pending)
++ {
++ /* Increment overflow count. */
++ Command->queueOverflow += 1;
++
++ /* Wait until the head becomes idle. */
++ gcmkERR_BREAK(_WaitForIdle(Command, queueHead));
++ }
++
++ /* Acquire the mutex. */
++ gcmkERR_BREAK(gckOS_AcquireMutex(
++ Command->os,
++ Command->queueMutex,
++ gcvINFINITE
++ ));
++
++ /* Determine the first queue entry. */
++ queueHead->currentEntry = (gcsKERNEL_CMDQUEUE_PTR)
++ (
++ (gctUINT8_PTR) queueHead + gcmSIZEOF(gcsKERNEL_QUEUE_HEADER)
++ );
++
++ /* Set the pointer to the first entry. */
++ * Entries = queueHead->currentEntry;
++
++ /* Determine the number of available entries. */
++ * EntryCount = queueHead->size / gcmSIZEOF(gcsKERNEL_CMDQUEUE);
++
++ /* Success. */
++ return gcvSTATUS_OK;
++ }
++ while (gcvFALSE);
++
++ /* Return status. */
++ return status;
++}
++
++static gceSTATUS
++_UnlockCurrentQueue(
++ IN gckVGCOMMAND Command,
++ IN gctUINT EntryCount
++ )
++{
++ gceSTATUS status;
++
++ do
++ {
++#if !gcdENABLE_INFINITE_SPEED_HW
++ gcsKERNEL_QUEUE_HEADER_PTR queueTail;
++ gcsKERNEL_QUEUE_HEADER_PTR queueHead;
++ gcsKERNEL_QUEUE_HEADER_PTR queueNext;
++ gctUINT queueSize;
++ gctUINT newSize;
++ gctUINT unusedSize;
++
++ /* Get shortcut to the head and to the tail of the queue. */
++ queueTail = Command->queueTail;
++ queueHead = Command->queueHead;
++
++ /* Dump the command buffer. */
++#if gcvDUMP_COMMAND_BUFFER
++ _DumpCommandQueue(Command, queueHead, EntryCount);
++#endif
++
++ /* Get a shortcut to the current queue size. */
++ queueSize = queueHead->size;
++
++ /* Determine the new queue size. */
++ newSize = EntryCount * gcmSIZEOF(gcsKERNEL_CMDQUEUE);
++ gcmkASSERT(newSize <= queueSize);
++
++ /* Determine the size of the unused area. */
++ unusedSize = queueSize - newSize;
++
++ /* Is the unused area big enough to become a buffer? */
++ if (unusedSize >= gcvMINUMUM_BUFFER)
++ {
++ gcsKERNEL_QUEUE_HEADER_PTR nextHead;
++
++ /* Place the new header. */
++ nextHead = (gcsKERNEL_QUEUE_HEADER_PTR)
++ (
++ (gctUINT8_PTR) queueHead
++ + gcmSIZEOF(gcsKERNEL_QUEUE_HEADER)
++ + newSize
++ );
++
++ /* Initialize the buffer. */
++ nextHead->size = unusedSize - gcmSIZEOF(gcsKERNEL_QUEUE_HEADER);
++ nextHead->pending = 0;
++
++ /* Link the buffer in. */
++ nextHead->next = queueHead->next;
++ queueHead->next = nextHead;
++ queueNext = nextHead;
++
++ /* Update the size of the current buffer. */
++ queueHead->size = newSize;
++ }
++
++ /* Not big enough. */
++ else
++ {
++ /* Determine the next queue. */
++ queueNext = queueHead->next;
++ }
++
++ /* Mark the buffer as busy. */
++ queueHead->pending = EntryCount;
++
++ /* Advance to the next buffer. */
++ Command->queueHead = queueNext;
++
++ /* Start the command processor if the queue was empty. */
++ if (queueTail == queueHead)
++ {
++ gcsCMDBUFFER_PTR commandBuffer;
++
++ /* The first entry must be a command buffer. */
++ commandBuffer = queueTail->currentEntry->commandBuffer;
++
++ /* Start the command processor. */
++ gcmkERR_BREAK(gckVGHARDWARE_Execute(
++ Command->hardware,
++ commandBuffer->address,
++ commandBuffer->dataCount
++ ));
++ }
++
++ /* The queue was not empty. */
++ else
++ {
++ /* Advance the merge buffer if needed. */
++ if (queueHead == Command->mergeQueue)
++ {
++ Command->mergeQueue = queueNext;
++ }
++ }
++#endif
++
++ /* Release the mutex. */
++ gcmkERR_BREAK(gckOS_ReleaseMutex(
++ Command->os,
++ Command->queueMutex
++ ));
++
++ /* Success. */
++ return gcvSTATUS_OK;
++ }
++ while (gcvFALSE);
++
++ /* Return status. */
++ return status;
++}
++
++
++
++/******************************************************************************\
++****************************** gckVGCOMMAND API Code *****************************
++\******************************************************************************/
++gceSTATUS
++gckVGCOMMAND_Construct(
++ IN gckVGKERNEL Kernel,
++ IN gctUINT TaskGranularity,
++ IN gctUINT QueueSize,
++ OUT gckVGCOMMAND * Command
++ )
++{
++ gceSTATUS status, last;
++ gckVGCOMMAND command = gcvNULL;
++ gcsKERNEL_QUEUE_HEADER_PTR queue;
++ gctUINT i, j;
++
++ gcmkHEADER_ARG("Kernel=0x%x TaskGranularity=0x%x QueueSize=0x%x Command=0x%x",
++ Kernel, TaskGranularity, QueueSize, Command);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(QueueSize >= gcvMINUMUM_BUFFER);
++ gcmkVERIFY_ARGUMENT(Command != gcvNULL);
++
++ do
++ {
++ /***********************************************************************
++ ** Generic object initialization.
++ */
++
++ /* Allocate the gckVGCOMMAND structure. */
++ gcmkERR_BREAK(gckOS_Allocate(
++ Kernel->os,
++ gcmSIZEOF(struct _gckVGCOMMAND),
++ (gctPOINTER *) &command
++ ));
++
++ /* Initialize the object. */
++ command->object.type = gcvOBJ_COMMAND;
++
++ /* Set the object pointers. */
++ command->kernel = Kernel;
++ command->os = Kernel->os;
++ command->hardware = Kernel->hardware;
++
++ /* Reset pointers. */
++ command->queue = gcvNULL;
++ command->queueMutex = gcvNULL;
++ command->taskMutex = gcvNULL;
++ command->commitMutex = gcvNULL;
++
++ command->powerStallBuffer = gcvNULL;
++ command->powerStallSignal = gcvNULL;
++ command->powerSemaphore = gcvNULL;
++
++ /* Reset context states. */
++ command->contextCounter = 0;
++ command->currentContext = 0;
++
++ /* Enable command buffer dumping. */
++ command->enableDumping = gcvTRUE;
++
++ /* Set features. */
++ command->fe20 = Kernel->hardware->fe20;
++ command->vg20 = Kernel->hardware->vg20;
++ command->vg21 = Kernel->hardware->vg21;
++
++ /* Reset task table .*/
++ gcmkVERIFY_OK(gckOS_ZeroMemory(
++ command->taskTable, gcmSIZEOF(command->taskTable)
++ ));
++
++ /* Query command buffer attributes. */
++ gcmkERR_BREAK(gckVGCOMMAND_InitializeInfo(command));
++
++ /* Create the control mutexes. */
++ gcmkERR_BREAK(gckOS_CreateMutex(Kernel->os, &command->queueMutex));
++ gcmkERR_BREAK(gckOS_CreateMutex(Kernel->os, &command->taskMutex));
++ gcmkERR_BREAK(gckOS_CreateMutex(Kernel->os, &command->commitMutex));
++
++ /* Create the power management semaphore. */
++ gcmkERR_BREAK(gckOS_CreateSemaphore(Kernel->os,
++ &command->powerSemaphore));
++
++ gcmkERR_BREAK(gckOS_CreateSignal(Kernel->os,
++ gcvFALSE, &command->powerStallSignal));
++
++ /***********************************************************************
++ ** Command queue initialization.
++ */
++
++ /* Allocate the command queue. */
++ gcmkERR_BREAK(gckOS_Allocate(
++ Kernel->os,
++ QueueSize,
++ (gctPOINTER *) &command->queue
++ ));
++
++ /* Initialize the command queue. */
++ queue = command->queue;
++
++ queue->size = QueueSize - gcmSIZEOF(gcsKERNEL_QUEUE_HEADER);
++ queue->pending = 0;
++ queue->next = queue;
++
++ command->queueHead =
++ command->queueTail =
++ command->mergeQueue = command->queue;
++
++ command->queueOverflow = 0;
++
++
++ /***********************************************************************
++ ** Enable TS overflow interrupt.
++ */
++
++ command->info.tsOverflowInt = 0;
++ gcmkERR_BREAK(gckVGINTERRUPT_Enable(
++ Kernel->interrupt,
++ &command->info.tsOverflowInt,
++ _EventHandler_TSOverflow
++ ));
++
++ /* Mask out the interrupt. */
++ Kernel->hardware->eventMask &= ~(1 << command->info.tsOverflowInt);
++
++
++ /***********************************************************************
++ ** Enable Bus Error interrupt.
++ */
++
++ /* Hardwired to bit 31. */
++ command->busErrorInt = 31;
++
++ /* Enable the interrupt. */
++ gcmkERR_BREAK(gckVGINTERRUPT_Enable(
++ Kernel->interrupt,
++ &command->busErrorInt,
++ _EventHandler_BusError
++ ));
++
++
++ command->powerStallInt = 30;
++ /* Enable the interrupt. */
++ gcmkERR_BREAK(gckVGINTERRUPT_Enable(
++ Kernel->interrupt,
++ &command->powerStallInt,
++ _EventHandler_PowerStall
++ ));
++
++ /***********************************************************************
++ ** Task management initialization.
++ */
++
++ command->taskStorage = gcvNULL;
++ command->taskStorageGranularity = TaskGranularity;
++ command->taskStorageUsable = TaskGranularity - gcmSIZEOF(gcsTASK_STORAGE);
++
++ command->taskFreeHead = gcvNULL;
++ command->taskFreeTail = gcvNULL;
++
++ /* Enable block handlers. */
++ for (i = 0; i < gcmCOUNTOF(_blockHandlers); i += 1)
++ {
++ /* Get the target hardware block. */
++ gceBLOCK block = _blockHandlers[i].block;
++
++ /* Get the interrupt array entry. */
++ gcsBLOCK_TASK_ENTRY_PTR entry = &command->taskTable[block];
++
++ /* Determine the interrupt value index. */
++ gctUINT index = entry->interruptCount;
++
++ /* Create the block semaphore. */
++ if (entry->interruptSemaphore == gcvNULL)
++ {
++ gcmkERR_BREAK(gckOS_CreateSemaphoreVG(
++ command->os, &entry->interruptSemaphore
++ ));
++ }
++
++ /* Enable auto-detection. */
++ entry->interruptArray[index] = -1;
++
++ /* Enable interrupt for the block. */
++ gcmkERR_BREAK(gckVGINTERRUPT_Enable(
++ Kernel->interrupt,
++ &entry->interruptArray[index],
++ _blockHandlers[i].handler
++ ));
++
++ /* Update the number of registered interrupts. */
++ entry->interruptCount += 1;
++
++ /* Inrement the semaphore to allow the usage of the registered
++ interrupt. */
++ gcmkERR_BREAK(gckOS_IncrementSemaphore(
++ command->os, entry->interruptSemaphore
++ ));
++
++ }
++
++ /* Error? */
++ if (gcmkIS_ERROR(status))
++ {
++ break;
++ }
++
++ /* Get the FE interrupt. */
++ command->info.feBufferInt
++ = command->taskTable[gcvBLOCK_COMMAND].interruptArray[0];
++
++ /* Return gckVGCOMMAND object pointer. */
++ *Command = command;
++
++ gcmkFOOTER_ARG("*Command=0x%x",*Command);
++ /* Success. */
++ return gcvSTATUS_OK;
++ }
++ while (gcvFALSE);
++
++ /* Roll back. */
++ if (command != gcvNULL)
++ {
++ /* Disable block handlers. */
++ for (i = 0; i < gcvBLOCK_COUNT; i += 1)
++ {
++ /* Get the task table entry. */
++ gcsBLOCK_TASK_ENTRY_PTR entry = &command->taskTable[i];
++
++ /* Destroy the semaphore. */
++ if (entry->interruptSemaphore != gcvNULL)
++ {
++ gcmkCHECK_STATUS(gckOS_DestroySemaphore(
++ command->os, entry->interruptSemaphore
++ ));
++ }
++
++ /* Disable all enabled interrupts. */
++ for (j = 0; j < entry->interruptCount; j += 1)
++ {
++ /* Must be a valid value. */
++ gcmkASSERT(entry->interruptArray[j] >= 0);
++ gcmkASSERT(entry->interruptArray[j] <= 31);
++
++ /* Disable the interrupt. */
++ gcmkCHECK_STATUS(gckVGINTERRUPT_Disable(
++ Kernel->interrupt,
++ entry->interruptArray[j]
++ ));
++ }
++ }
++
++ /* Disable the bus error interrupt. */
++ gcmkCHECK_STATUS(gckVGINTERRUPT_Disable(
++ Kernel->interrupt,
++ command->busErrorInt
++ ));
++
++ /* Disable TS overflow interrupt. */
++ if (command->info.tsOverflowInt != -1)
++ {
++ gcmkCHECK_STATUS(gckVGINTERRUPT_Disable(
++ Kernel->interrupt,
++ command->info.tsOverflowInt
++ ));
++ }
++
++ /* Delete the commit mutex. */
++ if (command->commitMutex != gcvNULL)
++ {
++ gcmkCHECK_STATUS(gckOS_DeleteMutex(
++ Kernel->os, command->commitMutex
++ ));
++ }
++
++ /* Delete the command queue mutex. */
++ if (command->taskMutex != gcvNULL)
++ {
++ gcmkCHECK_STATUS(gckOS_DeleteMutex(
++ Kernel->os, command->taskMutex
++ ));
++ }
++
++ /* Delete the command queue mutex. */
++ if (command->queueMutex != gcvNULL)
++ {
++ gcmkCHECK_STATUS(gckOS_DeleteMutex(
++ Kernel->os, command->queueMutex
++ ));
++ }
++
++ /* Delete the command queue. */
++ if (command->queue != gcvNULL)
++ {
++ gcmkCHECK_STATUS(gckOS_Free(
++ Kernel->os, command->queue
++ ));
++ }
++
++ if (command->powerSemaphore != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_DestroySemaphore(
++ Kernel->os, command->powerSemaphore));
++ }
++
++ if (command->powerStallSignal != gcvNULL)
++ {
++ /* Create the power management semaphore. */
++ gcmkVERIFY_OK(gckOS_DestroySignal(
++ Kernel->os,
++ command->powerStallSignal));
++ }
++
++ /* Free the gckVGCOMMAND structure. */
++ gcmkCHECK_STATUS(gckOS_Free(
++ Kernel->os, command
++ ));
++ }
++
++ gcmkFOOTER();
++ /* Return the error. */
++ return status;
++}
++
++gceSTATUS
++gckVGCOMMAND_Destroy(
++ OUT gckVGCOMMAND Command
++ )
++{
++ gceSTATUS status = gcvSTATUS_OK;
++
++ gcmkHEADER_ARG("Command=0x%x", Command);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++
++ do
++ {
++ gctUINT i;
++ gcsTASK_STORAGE_PTR nextStorage;
++
++ if (Command->queueHead != gcvNULL)
++ {
++ /* Wait until the head becomes idle. */
++ gcmkERR_BREAK(_WaitForIdle(Command, Command->queueHead));
++ }
++
++ /* Disable block handlers. */
++ for (i = 0; i < gcvBLOCK_COUNT; i += 1)
++ {
++ /* Get the interrupt array entry. */
++ gcsBLOCK_TASK_ENTRY_PTR entry = &Command->taskTable[i];
++
++ /* Determine the index of the last interrupt in the array. */
++ gctINT index = entry->interruptCount - 1;
++
++ /* Destroy the semaphore. */
++ if (entry->interruptSemaphore != gcvNULL)
++ {
++ gcmkERR_BREAK(gckOS_DestroySemaphore(
++ Command->os, entry->interruptSemaphore
++ ));
++ }
++
++ /* Disable all enabled interrupts. */
++ while (index >= 0)
++ {
++ /* Must be a valid value. */
++ gcmkASSERT(entry->interruptArray[index] >= 0);
++ gcmkASSERT(entry->interruptArray[index] <= 31);
++
++ /* Disable the interrupt. */
++ gcmkERR_BREAK(gckVGINTERRUPT_Disable(
++ Command->kernel->interrupt,
++ entry->interruptArray[index]
++ ));
++
++ /* Update to the next interrupt. */
++ index -= 1;
++ entry->interruptCount -= 1;
++ }
++
++ /* Error? */
++ if (gcmkIS_ERROR(status))
++ {
++ break;
++ }
++ }
++
++ /* Error? */
++ if (gcmkIS_ERROR(status))
++ {
++ break;
++ }
++
++ /* Disable the bus error interrupt. */
++ gcmkERR_BREAK(gckVGINTERRUPT_Disable(
++ Command->kernel->interrupt,
++ Command->busErrorInt
++ ));
++
++ /* Disable TS overflow interrupt. */
++ if (Command->info.tsOverflowInt != -1)
++ {
++ gcmkERR_BREAK(gckVGINTERRUPT_Disable(
++ Command->kernel->interrupt,
++ Command->info.tsOverflowInt
++ ));
++
++ Command->info.tsOverflowInt = -1;
++ }
++
++ /* Delete the commit mutex. */
++ if (Command->commitMutex != gcvNULL)
++ {
++ gcmkERR_BREAK(gckOS_DeleteMutex(
++ Command->os, Command->commitMutex
++ ));
++
++ Command->commitMutex = gcvNULL;
++ }
++
++ /* Delete the command queue mutex. */
++ if (Command->taskMutex != gcvNULL)
++ {
++ gcmkERR_BREAK(gckOS_DeleteMutex(
++ Command->os, Command->taskMutex
++ ));
++
++ Command->taskMutex = gcvNULL;
++ }
++
++ /* Delete the command queue mutex. */
++ if (Command->queueMutex != gcvNULL)
++ {
++ gcmkERR_BREAK(gckOS_DeleteMutex(
++ Command->os, Command->queueMutex
++ ));
++
++ Command->queueMutex = gcvNULL;
++ }
++
++ if (Command->powerSemaphore != gcvNULL)
++ {
++ /* Destroy the power management semaphore. */
++ gcmkERR_BREAK(gckOS_DestroySemaphore(
++ Command->os, Command->powerSemaphore));
++ }
++
++ if (Command->powerStallSignal != gcvNULL)
++ {
++ /* Create the power management semaphore. */
++ gcmkERR_BREAK(gckOS_DestroySignal(
++ Command->os,
++ Command->powerStallSignal));
++ }
++
++ if (Command->queue != gcvNULL)
++ {
++ /* Delete the command queue. */
++ gcmkERR_BREAK(gckOS_Free(
++ Command->os, Command->queue
++ ));
++ }
++
++ /* Destroy all allocated buffers. */
++ while (Command->taskStorage)
++ {
++ /* Copy the buffer pointer. */
++ nextStorage = Command->taskStorage->next;
++
++ /* Free the current container. */
++ gcmkERR_BREAK(gckOS_Free(
++ Command->os, Command->taskStorage
++ ));
++
++ /* Advance to the next one. */
++ Command->taskStorage = nextStorage;
++ }
++
++ /* Error? */
++ if (gcmkIS_ERROR(status))
++ {
++ break;
++ }
++
++ /* Mark the object as unknown. */
++ Command->object.type = gcvOBJ_UNKNOWN;
++
++ /* Free the gckVGCOMMAND structure. */
++ gcmkERR_BREAK(gckOS_Free(Command->os, Command));
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++ }
++ while (gcvFALSE);
++
++ /* Restore the object type if failed. */
++ Command->object.type = gcvOBJ_COMMAND;
++
++ gcmkFOOTER();
++ /* Return the error. */
++ return status;
++}
++
++gceSTATUS
++gckVGCOMMAND_QueryCommandBuffer(
++ IN gckVGCOMMAND Command,
++ OUT gcsCOMMAND_BUFFER_INFO_PTR Information
++ )
++{
++ gcmkHEADER_ARG("Command=0x%x Information=0x%x", Command, Information);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++ gcmkVERIFY_ARGUMENT(Information != gcvNULL);
++
++ /* Copy the information. */
++ gcmkVERIFY_OK(gckOS_MemCopy(
++ Information, &Command->info, sizeof(gcsCOMMAND_BUFFER_INFO)
++ ));
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckVGCOMMAND_Allocate(
++ IN gckVGCOMMAND Command,
++ IN gctSIZE_T Size,
++ OUT gcsCMDBUFFER_PTR * CommandBuffer,
++ OUT gctPOINTER * Data
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Command=0x%x Size=0x%x CommandBuffer=0x%x Data=0x%x",
++ Command, Size, CommandBuffer, Data);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++ gcmkVERIFY_ARGUMENT(Data != gcvNULL);
++
++ do
++ {
++ /* Allocate the buffer. */
++ gcmkERR_BREAK(_AllocateCommandBuffer(Command, Size, CommandBuffer));
++
++ /* Determine the data pointer. */
++ * Data = (gctUINT8_PTR) (*CommandBuffer) + (* CommandBuffer)->bufferOffset;
++ }
++ while (gcvFALSE);
++
++ gcmkFOOTER();
++ /* Return status. */
++ return status;
++}
++
++gceSTATUS
++gckVGCOMMAND_Free(
++ IN gckVGCOMMAND Command,
++ IN gcsCMDBUFFER_PTR CommandBuffer
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Command=0x%x CommandBuffer=0x%x",
++ Command, CommandBuffer);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++ gcmkVERIFY_ARGUMENT(CommandBuffer != gcvNULL);
++
++ /* Free command buffer. */
++ status = _FreeCommandBuffer(Command->kernel, CommandBuffer);
++
++ gcmkFOOTER();
++ /* Return status. */
++ return status;
++}
++
++gceSTATUS
++gckVGCOMMAND_Execute(
++ IN gckVGCOMMAND Command,
++ IN gcsCMDBUFFER_PTR CommandBuffer
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Command=0x%x CommandBuffer=0x%x",
++ Command, CommandBuffer);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++ gcmkVERIFY_ARGUMENT(CommandBuffer != gcvNULL);
++
++ do
++ {
++ gctUINT queueLength;
++ gcsKERNEL_CMDQUEUE_PTR kernelEntry;
++
++ /* Lock the current queue. */
++ gcmkERR_BREAK(_LockCurrentQueue(
++ Command, &kernelEntry, &queueLength
++ ));
++
++ /* Set the buffer. */
++ kernelEntry->commandBuffer = CommandBuffer;
++ kernelEntry->handler = _FreeKernelCommandBuffer;
++
++ /* Lock the current queue. */
++ gcmkERR_BREAK(_UnlockCurrentQueue(
++ Command, 1
++ ));
++ }
++ while (gcvFALSE);
++
++ gcmkFOOTER();
++ /* Return status. */
++ return status;
++}
++
++gceSTATUS
++gckVGCOMMAND_Commit(
++ IN gckVGCOMMAND Command,
++ IN gcsVGCONTEXT_PTR Context,
++ IN gcsVGCMDQUEUE_PTR Queue,
++ IN gctUINT EntryCount,
++ IN gcsTASK_MASTER_TABLE_PTR TaskTable
++ )
++{
++ /*
++ The first buffer is executed through a direct gckVGHARDWARE_Execute call,
++ therefore only an update is needed after the execution is over. All
++ consequent buffers need to be executed upon the first update call from
++ the FE interrupt handler.
++ */
++
++ static gcsQUEUE_UPDATE_CONTROL _dynamicBuffer[] =
++ {
++ {
++ _UpdateDynamicCommandBuffer,
++ _UpdateDynamicCommandBuffer,
++ _UpdateLastDynamicCommandBuffer,
++ _UpdateLastDynamicCommandBuffer
++ },
++ {
++ _ExecuteDynamicCommandBuffer,
++ _UpdateDynamicCommandBuffer,
++ _ExecuteLastDynamicCommandBuffer,
++ _UpdateLastDynamicCommandBuffer
++ }
++ };
++
++ static gcsQUEUE_UPDATE_CONTROL _staticBuffer[] =
++ {
++ {
++ _UpdateStaticCommandBuffer,
++ _UpdateStaticCommandBuffer,
++ _UpdateLastStaticCommandBuffer,
++ _UpdateLastStaticCommandBuffer
++ },
++ {
++ _ExecuteStaticCommandBuffer,
++ _UpdateStaticCommandBuffer,
++ _ExecuteLastStaticCommandBuffer,
++ _UpdateLastStaticCommandBuffer
++ }
++ };
++
++ gceSTATUS status, last;
++
++#ifdef __QNXNTO__
++ gcsVGCONTEXT_PTR userContext = gcvNULL;
++ gctBOOL userContextMapped = gcvFALSE;
++ gcsTASK_MASTER_TABLE_PTR userTaskTable = gcvNULL;
++ gctBOOL userTaskTableMapped = gcvFALSE;
++ gctPOINTER pointer = gcvNULL;
++#endif
++
++ gcmkHEADER_ARG("Command=0x%x Context=0x%x Queue=0x%x EntryCount=0x%x TaskTable=0x%x",
++ Command, Context, Queue, EntryCount, TaskTable);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++ gcmkVERIFY_ARGUMENT(Context != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Queue != gcvNULL);
++ gcmkVERIFY_ARGUMENT(EntryCount > 1);
++
++ do
++ {
++ gctBOOL haveFETasks;
++ gctUINT queueSize;
++ gcsVGCMDQUEUE_PTR mappedQueue;
++ gcsVGCMDQUEUE_PTR userEntry;
++ gcsKERNEL_CMDQUEUE_PTR kernelEntry;
++ gcsQUEUE_UPDATE_CONTROL_PTR queueControl;
++ gctUINT currentLength;
++ gctUINT queueLength;
++ gctUINT entriesQueued;
++ gctUINT8_PTR previousEnd;
++ gctBOOL previousDynamic;
++ gctBOOL previousExecuted;
++ gctUINT controlIndex;
++
++#ifdef __QNXNTO__
++ /* Map the context into the kernel space. */
++ userContext = Context;
++
++ gcmkERR_BREAK(gckOS_MapUserPointer(
++ Command->os,
++ userContext,
++ gcmSIZEOF(*userContext),
++ &pointer));
++
++ Context = pointer;
++
++ userContextMapped = gcvTRUE;
++
++ /* Map the taskTable into the kernel space. */
++ userTaskTable = TaskTable;
++
++ gcmkERR_BREAK(gckOS_MapUserPointer(
++ Command->os,
++ userTaskTable,
++ gcmSIZEOF(*userTaskTable),
++ &pointer));
++
++ TaskTable = pointer;
++
++ userTaskTableMapped = gcvTRUE;
++
++ /* Update the signal info. */
++ TaskTable->coid = Context->coid;
++ TaskTable->rcvid = Context->rcvid;
++#endif
++
++ gcmkERR_BREAK(gckVGHARDWARE_SetPowerManagementState(
++ Command->hardware, gcvPOWER_ON_AUTO
++ ));
++
++ /* Acquire the power semaphore. */
++ gcmkERR_BREAK(gckOS_AcquireSemaphore(
++ Command->os, Command->powerSemaphore
++ ));
++
++ /* Acquire the mutex. */
++ status = gckOS_AcquireMutex(
++ Command->os,
++ Command->commitMutex,
++ gcvINFINITE
++ );
++
++ if (gcmIS_ERROR(status))
++ {
++ gcmkVERIFY_OK(gckOS_ReleaseSemaphore(
++ Command->os, Command->powerSemaphore));
++ break;
++ }
++
++ do
++ {
++ gcmkERR_BREAK(_FlushMMU(Command));
++
++ /* Assign a context ID if not yet assigned. */
++ if (Context->id == 0)
++ {
++ /* Assign the next context number. */
++ Context->id = ++ Command->contextCounter;
++
++ /* See if we overflowed. */
++ if (Command->contextCounter == 0)
++ {
++ /* We actually did overflow, wow... */
++ status = gcvSTATUS_OUT_OF_RESOURCES;
++ break;
++ }
++ }
++
++ /* The first entry in the queue is always the context buffer.
++ Verify whether the user context is the same as the current
++ context and if that's the case, skip the first entry. */
++ if (Context->id == Command->currentContext)
++ {
++ /* Same context as before, skip the first entry. */
++ EntryCount -= 1;
++ Queue += 1;
++
++ /* Set the signal to avoid user waiting. */
++#ifdef __QNXNTO__
++ gcmkERR_BREAK(gckOS_UserSignal(
++ Command->os, Context->signal, Context->rcvid, Context->coid
++ ));
++#else
++ gcmkERR_BREAK(gckOS_UserSignal(
++ Command->os, Context->signal, Context->process
++ ));
++
++#endif /* __QNXNTO__ */
++
++ }
++ else
++ {
++ /* Different user context - keep the first entry.
++ Set the user context as the current one. */
++ Command->currentContext = Context->id;
++ }
++
++ /* Reset pointers. */
++ queueControl = gcvNULL;
++ previousEnd = gcvNULL;
++
++ /* Determine whether there are FE tasks to be performed. */
++ haveFETasks = (TaskTable->table[gcvBLOCK_COMMAND].head != gcvNULL);
++
++ /* Determine the size of the queue. */
++ queueSize = EntryCount * gcmSIZEOF(gcsVGCMDQUEUE);
++
++ /* Map the command queue into the kernel space. */
++ gcmkERR_BREAK(gckOS_MapUserPointer(
++ Command->os,
++ Queue,
++ queueSize,
++ (gctPOINTER *) &mappedQueue
++ ));
++
++ /* Set the first entry. */
++ userEntry = mappedQueue;
++
++ /* Process the command queue. */
++ while (EntryCount)
++ {
++ /* Lock the current queue. */
++ gcmkERR_BREAK(_LockCurrentQueue(
++ Command, &kernelEntry, &queueLength
++ ));
++
++ /* Determine the number of entries to process. */
++ currentLength = (queueLength < EntryCount)
++ ? queueLength
++ : EntryCount;
++
++ /* Update the number of the entries left to process. */
++ EntryCount -= currentLength;
++
++ /* Reset previous flags. */
++ previousDynamic = gcvFALSE;
++ previousExecuted = gcvFALSE;
++
++ /* Set the initial control index. */
++ controlIndex = 0;
++
++ /* Process entries. */
++ for (entriesQueued = 0; entriesQueued < currentLength; entriesQueued += 1)
++ {
++ /* Get the kernel pointer to the command buffer header. */
++ gcsCMDBUFFER_PTR commandBuffer = gcvNULL;
++ gcmkERR_BREAK(_ConvertUserCommandBufferPointer(
++ Command,
++ userEntry->commandBuffer,
++ &commandBuffer
++ ));
++
++ /* Is it a dynamic command buffer? */
++ if (userEntry->dynamic)
++ {
++ /* Select dynamic buffer control functions. */
++ queueControl = &_dynamicBuffer[controlIndex];
++ }
++
++ /* No, a static command buffer. */
++ else
++ {
++ /* Select static buffer control functions. */
++ queueControl = &_staticBuffer[controlIndex];
++ }
++
++ /* Set the command buffer pointer to the entry. */
++ kernelEntry->commandBuffer = commandBuffer;
++
++ /* If the previous entry was a dynamic command buffer,
++ link it to the current. */
++ if (previousDynamic)
++ {
++ gcmkERR_BREAK(gckVGCOMMAND_FetchCommand(
++ Command,
++ previousEnd,
++ commandBuffer->address,
++ commandBuffer->dataCount,
++ gcvNULL
++ ));
++
++ /* The buffer will be auto-executed, only need to
++ update it after it has been executed. */
++ kernelEntry->handler = queueControl->update;
++
++ /* The buffer is only being updated. */
++ previousExecuted = gcvFALSE;
++ }
++ else
++ {
++ /* Set the buffer up for execution. */
++ kernelEntry->handler = queueControl->execute;
++
++ /* The buffer is being updated. */
++ previousExecuted = gcvTRUE;
++ }
++
++ /* The current buffer's END command becomes the last END. */
++ previousEnd
++ = ((gctUINT8_PTR) commandBuffer)
++ + commandBuffer->bufferOffset
++ + commandBuffer->dataCount * Command->info.commandAlignment
++ - Command->info.staticTailSize;
++
++ /* Update the last entry info. */
++ previousDynamic = userEntry->dynamic;
++
++ /* Advance entries. */
++ userEntry += 1;
++ kernelEntry += 1;
++
++ /* Update the control index. */
++ controlIndex = 1;
++ }
++
++ /* If the previous entry was a dynamic command buffer,
++ terminate it with an END. */
++ if (previousDynamic)
++ {
++ gcmkERR_BREAK(gckVGCOMMAND_EndCommand(
++ Command,
++ previousEnd,
++ Command->info.feBufferInt,
++ gcvNULL
++ ));
++ }
++
++ /* Last buffer? */
++ if (EntryCount == 0)
++ {
++ /* Modify the last command buffer's routines to handle
++ tasks if any.*/
++ if (haveFETasks)
++ {
++ if (previousExecuted)
++ {
++ kernelEntry[-1].handler = queueControl->lastExecute;
++ }
++ else
++ {
++ kernelEntry[-1].handler = queueControl->lastUpdate;
++ }
++ }
++
++ /* Release the mutex. */
++ gcmkERR_BREAK(gckOS_ReleaseMutex(
++ Command->os,
++ Command->queueMutex
++ ));
++ /* Schedule tasks. */
++ gcmkERR_BREAK(_ScheduleTasks(Command, TaskTable, previousEnd));
++
++ /* Acquire the mutex. */
++ gcmkERR_BREAK(gckOS_AcquireMutex(
++ Command->os,
++ Command->queueMutex,
++ gcvINFINITE
++ ));
++ }
++
++ /* Unkock and schedule the current queue for execution. */
++ gcmkERR_BREAK(_UnlockCurrentQueue(
++ Command, currentLength
++ ));
++ }
++
++
++ /* Unmap the user command buffer. */
++ gcmkERR_BREAK(gckOS_UnmapUserPointer(
++ Command->os,
++ Queue,
++ queueSize,
++ mappedQueue
++ ));
++ }
++ while (gcvFALSE);
++
++ /* Release the mutex. */
++ gcmkCHECK_STATUS(gckOS_ReleaseMutex(
++ Command->os,
++ Command->commitMutex
++ ));
++
++ gcmkVERIFY_OK(gckOS_ReleaseSemaphore(
++ Command->os, Command->powerSemaphore));
++ }
++ while (gcvFALSE);
++
++#ifdef __QNXNTO__
++ if (userContextMapped)
++ {
++ /* Unmap the user context. */
++ gcmkVERIFY_OK(gckOS_UnmapUserPointer(
++ Command->os,
++ userContext,
++ gcmSIZEOF(*userContext),
++ Context));
++ }
++
++ if (userTaskTableMapped)
++ {
++ /* Unmap the user taskTable. */
++ gcmkVERIFY_OK(gckOS_UnmapUserPointer(
++ Command->os,
++ userTaskTable,
++ gcmSIZEOF(*userTaskTable),
++ TaskTable));
++ }
++#endif
++
++ gcmkFOOTER();
++ /* Return status. */
++ return status;
++}
++
++#endif /* gcdENABLE_VG */
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_db.c linux-openelec/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_db.c
+--- linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_db.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_db.c 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,1861 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal_kernel_precomp.h"
++
++#define _GC_OBJ_ZONE gcvZONE_DATABASE
++
++/*******************************************************************************
++***** Private fuctions ********************************************************/
++
++#define _GetSlot(database, x) \
++ (gctUINT32)(gcmPTR_TO_UINT64(x) % gcmCOUNTOF(database->list))
++
++/*******************************************************************************
++** gckKERNEL_NewDatabase
++**
++** Create a new database structure and insert it to the head of the hash list.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to a gckKERNEL object.
++**
++** gctUINT32 ProcessID
++** ProcessID that identifies the database.
++**
++** OUTPUT:
++**
++** gcsDATABASE_PTR * Database
++** Pointer to a variable receiving the database structure pointer on
++** success.
++*/
++static gceSTATUS
++gckKERNEL_NewDatabase(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 ProcessID,
++ OUT gcsDATABASE_PTR * Database
++ )
++{
++ gceSTATUS status;
++ gcsDATABASE_PTR database;
++ gctBOOL acquired = gcvFALSE;
++ gctSIZE_T slot;
++ gcsDATABASE_PTR existingDatabase;
++
++ gcmkHEADER_ARG("Kernel=0x%x ProcessID=%d", Kernel, ProcessID);
++
++ /* Acquire the database mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(Kernel->os, Kernel->db->dbMutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++ /* Compute the hash for the database. */
++ slot = ProcessID % gcmCOUNTOF(Kernel->db->db);
++
++ /* Walk the hash list. */
++ for (existingDatabase = Kernel->db->db[slot];
++ existingDatabase != gcvNULL;
++ existingDatabase = existingDatabase->next)
++ {
++ if (existingDatabase->processID == ProcessID)
++ {
++ /* One process can't be added twice. */
++ gcmkONERROR(gcvSTATUS_NOT_SUPPORTED);
++ }
++ }
++
++ if (Kernel->db->freeDatabase != gcvNULL)
++ {
++ /* Allocate a database from the free list. */
++ database = Kernel->db->freeDatabase;
++ Kernel->db->freeDatabase = database->next;
++ }
++ else
++ {
++ gctPOINTER pointer = gcvNULL;
++
++ /* Allocate a new database from the heap. */
++ gcmkONERROR(gckOS_Allocate(Kernel->os,
++ gcmSIZEOF(gcsDATABASE),
++ &pointer));
++
++ gckOS_ZeroMemory(pointer, gcmSIZEOF(gcsDATABASE));
++
++ database = pointer;
++
++ gcmkONERROR(gckOS_CreateMutex(Kernel->os, &database->counterMutex));
++ }
++
++ /* Insert the database into the hash. */
++ database->next = Kernel->db->db[slot];
++ Kernel->db->db[slot] = database;
++
++ /* Save the hash slot. */
++ database->slot = slot;
++
++ /* Release the database mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(Kernel->os, Kernel->db->dbMutex));
++
++ /* Return the database. */
++ *Database = database;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Database=0x%x", *Database);
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ /* Release the database mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, Kernel->db->dbMutex));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++** gckKERNEL_FindDatabase
++**
++** Find a database identified by a process ID and move it to the head of the
++** hash list.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to a gckKERNEL object.
++**
++** gctUINT32 ProcessID
++** ProcessID that identifies the database.
++**
++** gctBOOL LastProcessID
++** gcvTRUE if searching for the last known process ID. gcvFALSE if
++** we need to search for the process ID specified by the ProcessID
++** argument.
++**
++** OUTPUT:
++**
++** gcsDATABASE_PTR * Database
++** Pointer to a variable receiving the database structure pointer on
++** success.
++*/
++gceSTATUS
++gckKERNEL_FindDatabase(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 ProcessID,
++ IN gctBOOL LastProcessID,
++ OUT gcsDATABASE_PTR * Database
++ )
++{
++ gceSTATUS status;
++ gcsDATABASE_PTR database, previous;
++ gctSIZE_T slot;
++ gctBOOL acquired = gcvFALSE;
++
++ gcmkHEADER_ARG("Kernel=0x%x ProcessID=%d LastProcessID=%d",
++ Kernel, ProcessID, LastProcessID);
++
++ /* Compute the hash for the database. */
++ slot = ProcessID % gcmCOUNTOF(Kernel->db->db);
++
++ /* Acquire the database mutex. */
++ gcmkONERROR(
++ gckOS_AcquireMutex(Kernel->os, Kernel->db->dbMutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++ /* Check whether we are getting the last known database. */
++ if (LastProcessID)
++ {
++ /* Use last database. */
++ database = Kernel->db->lastDatabase;
++
++ if (database == gcvNULL)
++ {
++ /* Database not found. */
++ gcmkONERROR(gcvSTATUS_INVALID_DATA);
++ }
++ }
++ else
++ {
++ /* Walk the hash list. */
++ for (previous = gcvNULL, database = Kernel->db->db[slot];
++ database != gcvNULL;
++ database = database->next)
++ {
++ if (database->processID == ProcessID)
++ {
++ /* Found it! */
++ break;
++ }
++
++ previous = database;
++ }
++
++ if (database == gcvNULL)
++ {
++ /* Database not found. */
++ gcmkONERROR(gcvSTATUS_INVALID_DATA);
++ }
++
++ if (previous != gcvNULL)
++ {
++ /* Move database to the head of the hash list. */
++ previous->next = database->next;
++ database->next = Kernel->db->db[slot];
++ Kernel->db->db[slot] = database;
++ }
++ }
++
++ /* Release the database mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(Kernel->os, Kernel->db->dbMutex));
++
++ /* Return the database. */
++ *Database = database;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Database=0x%x", *Database);
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ /* Release the database mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, Kernel->db->dbMutex));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++** gckKERNEL_DeleteDatabase
++**
++** Remove a database from the hash list and delete its structure.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to a gckKERNEL object.
++**
++** gcsDATABASE_PTR Database
++** Pointer to the database structure to remove.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++static gceSTATUS
++gckKERNEL_DeleteDatabase(
++ IN gckKERNEL Kernel,
++ IN gcsDATABASE_PTR Database
++ )
++{
++ gceSTATUS status;
++ gctBOOL acquired = gcvFALSE;
++ gcsDATABASE_PTR database;
++
++ gcmkHEADER_ARG("Kernel=0x%x Database=0x%x", Kernel, Database);
++
++ /* Acquire the database mutex. */
++ gcmkONERROR(
++ gckOS_AcquireMutex(Kernel->os, Kernel->db->dbMutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++ /* Check slot value. */
++ gcmkVERIFY_ARGUMENT(Database->slot < gcmCOUNTOF(Kernel->db->db));
++
++ if (Database->slot < gcmCOUNTOF(Kernel->db->db))
++ {
++ /* Check if database if the head of the hash list. */
++ if (Kernel->db->db[Database->slot] == Database)
++ {
++ /* Remove the database from the hash list. */
++ Kernel->db->db[Database->slot] = Database->next;
++ }
++ else
++ {
++ /* Walk the has list to find the database. */
++ for (database = Kernel->db->db[Database->slot];
++ database != gcvNULL;
++ database = database->next
++ )
++ {
++ /* Check if the next list entry is this database. */
++ if (database->next == Database)
++ {
++ /* Remove the database from the hash list. */
++ database->next = Database->next;
++ break;
++ }
++ }
++
++ if (database == gcvNULL)
++ {
++ /* Ouch! Something got corrupted. */
++ gcmkONERROR(gcvSTATUS_INVALID_DATA);
++ }
++ }
++ }
++
++ if (Kernel->db->lastDatabase != gcvNULL)
++ {
++ /* Insert database to the free list. */
++ Kernel->db->lastDatabase->next = Kernel->db->freeDatabase;
++ Kernel->db->freeDatabase = Kernel->db->lastDatabase;
++ }
++
++ /* Keep database as the last database. */
++ Kernel->db->lastDatabase = Database;
++
++ /* Destory handle db. */
++ gcmkVERIFY_OK(gckKERNEL_DestroyIntegerDatabase(Kernel, Database->handleDatabase));
++ Database->handleDatabase = gcvNULL;
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Kernel->os, Database->handleDatabaseMutex));
++ Database->handleDatabaseMutex = gcvNULL;
++
++#if gcdPROCESS_ADDRESS_SPACE
++ /* Destory process MMU. */
++ gcmkVERIFY_OK(gckEVENT_DestroyMmu(Kernel->eventObj, Database->mmu, gcvKERNEL_PIXEL));
++ Database->mmu = gcvNULL;
++#endif
++
++ /* Release the database mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(Kernel->os, Kernel->db->dbMutex));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ /* Release the database mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, Kernel->db->dbMutex));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++** gckKERNEL_NewRecord
++**
++** Create a new database record structure and insert it to the head of the
++** database.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to a gckKERNEL object.
++**
++** gcsDATABASE_PTR Database
++** Pointer to a database structure.
++**
++** OUTPUT:
++**
++** gcsDATABASE_RECORD_PTR * Record
++** Pointer to a variable receiving the database record structure
++** pointer on success.
++*/
++static gceSTATUS
++gckKERNEL_NewRecord(
++ IN gckKERNEL Kernel,
++ IN gcsDATABASE_PTR Database,
++ IN gctUINT32 Slot,
++ OUT gcsDATABASE_RECORD_PTR * Record
++ )
++{
++ gceSTATUS status;
++ gctBOOL acquired = gcvFALSE;
++ gcsDATABASE_RECORD_PTR record = gcvNULL;
++
++ gcmkHEADER_ARG("Kernel=0x%x Database=0x%x", Kernel, Database);
++
++ /* Acquire the database mutex. */
++ gcmkONERROR(
++ gckOS_AcquireMutex(Kernel->os, Kernel->db->dbMutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++ if (Kernel->db->freeRecord != gcvNULL)
++ {
++ /* Allocate the record from the free list. */
++ record = Kernel->db->freeRecord;
++ Kernel->db->freeRecord = record->next;
++ }
++ else
++ {
++ gctPOINTER pointer = gcvNULL;
++
++ /* Allocate the record from the heap. */
++ gcmkONERROR(gckOS_Allocate(Kernel->os,
++ gcmSIZEOF(gcsDATABASE_RECORD),
++ &pointer));
++
++ record = pointer;
++ }
++
++ /* Insert the record in the database. */
++ record->next = Database->list[Slot];
++ Database->list[Slot] = record;
++
++ /* Release the database mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(Kernel->os, Kernel->db->dbMutex));
++
++ /* Return the record. */
++ *Record = record;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Record=0x%x", *Record);
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ /* Release the database mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, Kernel->db->dbMutex));
++ }
++ if (record != gcvNULL)
++ {
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Kernel->os, record));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++** gckKERNEL_DeleteRecord
++**
++** Remove a database record from the database and delete its structure.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to a gckKERNEL object.
++**
++** gcsDATABASE_PTR Database
++** Pointer to a database structure.
++**
++** gceDATABASE_TYPE Type
++** Type of the record to remove.
++**
++** gctPOINTER Data
++** Data of the record to remove.
++**
++** OUTPUT:
++**
++** gctSIZE_T_PTR Bytes
++** Pointer to a variable that receives the size of the record deleted.
++** Can be gcvNULL if the size is not required.
++*/
++static gceSTATUS
++gckKERNEL_DeleteRecord(
++ IN gckKERNEL Kernel,
++ IN gcsDATABASE_PTR Database,
++ IN gceDATABASE_TYPE Type,
++ IN gctPOINTER Data,
++ OUT gctSIZE_T_PTR Bytes OPTIONAL
++ )
++{
++ gceSTATUS status;
++ gctBOOL acquired = gcvFALSE;
++ gcsDATABASE_RECORD_PTR record, previous;
++ gctUINT32 slot = _GetSlot(Database, Data);
++
++ gcmkHEADER_ARG("Kernel=0x%x Database=0x%x Type=%d Data=0x%x",
++ Kernel, Database, Type, Data);
++
++ /* Acquire the database mutex. */
++ gcmkONERROR(
++ gckOS_AcquireMutex(Kernel->os, Kernel->db->dbMutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++ /* Scan the database for this record. */
++ for (record = Database->list[slot], previous = gcvNULL;
++ record != gcvNULL;
++ record = record->next
++ )
++ {
++ if ((record->type == Type)
++ && (record->data == Data)
++ )
++ {
++ /* Found it! */
++ break;
++ }
++
++ previous = record;
++ }
++
++ if (record == gcvNULL)
++ {
++ /* Ouch! This record is not found? */
++ gcmkONERROR(gcvSTATUS_INVALID_DATA);
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* Return size of record. */
++ *Bytes = record->bytes;
++ }
++
++ /* Remove record from database. */
++ if (previous == gcvNULL)
++ {
++ Database->list[slot] = record->next;
++ }
++ else
++ {
++ previous->next = record->next;
++ }
++
++ /* Insert record in free list. */
++ record->next = Kernel->db->freeRecord;
++ Kernel->db->freeRecord = record;
++
++ /* Release the database mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(Kernel->os, Kernel->db->dbMutex));
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Bytes=%lu", gcmOPT_VALUE(Bytes));
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ /* Release the database mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, Kernel->db->dbMutex));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++** gckKERNEL_FindRecord
++**
++** Find a database record from the database.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to a gckKERNEL object.
++**
++** gcsDATABASE_PTR Database
++** Pointer to a database structure.
++**
++** gceDATABASE_TYPE Type
++** Type of the record to remove.
++**
++** gctPOINTER Data
++** Data of the record to remove.
++**
++** OUTPUT:
++**
++** gctSIZE_T_PTR Bytes
++** Pointer to a variable that receives the size of the record deleted.
++** Can be gcvNULL if the size is not required.
++*/
++static gceSTATUS
++gckKERNEL_FindRecord(
++ IN gckKERNEL Kernel,
++ IN gcsDATABASE_PTR Database,
++ IN gceDATABASE_TYPE Type,
++ IN gctPOINTER Data,
++ OUT gcsDATABASE_RECORD_PTR Record
++ )
++{
++ gceSTATUS status;
++ gctBOOL acquired = gcvFALSE;
++ gcsDATABASE_RECORD_PTR record;
++ gctUINT32 slot = _GetSlot(Database, Data);
++
++ gcmkHEADER_ARG("Kernel=0x%x Database=0x%x Type=%d Data=0x%x",
++ Kernel, Database, Type, Data);
++
++ /* Acquire the database mutex. */
++ gcmkONERROR(
++ gckOS_AcquireMutex(Kernel->os, Kernel->db->dbMutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++ /* Scan the database for this record. */
++ for (record = Database->list[slot];
++ record != gcvNULL;
++ record = record->next
++ )
++ {
++ if ((record->type == Type)
++ && (record->data == Data)
++ )
++ {
++ /* Found it! */
++ break;
++ }
++ }
++
++ if (record == gcvNULL)
++ {
++ /* Ouch! This record is not found? */
++ gcmkONERROR(gcvSTATUS_INVALID_DATA);
++ }
++
++ if (Record != gcvNULL)
++ {
++ /* Return information of record. */
++ gcmkONERROR(
++ gckOS_MemCopy(Record, record, sizeof(gcsDATABASE_RECORD)));
++ }
++
++ /* Release the database mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(Kernel->os, Kernel->db->dbMutex));
++
++ /* Success. */
++ gcmkFOOTER_ARG("Record=0x%x", Record);
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ /* Release the database mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, Kernel->db->dbMutex));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++***** Public API **************************************************************/
++
++/*******************************************************************************
++** gckKERNEL_CreateProcessDB
++**
++** Create a new process database.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to a gckKERNEL object.
++**
++** gctUINT32 ProcessID
++** Process ID used to identify the database.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckKERNEL_CreateProcessDB(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 ProcessID
++ )
++{
++ gceSTATUS status;
++ gcsDATABASE_PTR database = gcvNULL;
++ gctUINT32 i;
++
++ gcmkHEADER_ARG("Kernel=0x%x ProcessID=%d", Kernel, ProcessID);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++
++ /* Create a new database. */
++ gcmkONERROR(gckKERNEL_NewDatabase(Kernel, ProcessID, &database));
++
++ /* Initialize the database. */
++ database->processID = ProcessID;
++ database->vidMem.bytes = 0;
++ database->vidMem.maxBytes = 0;
++ database->vidMem.totalBytes = 0;
++ database->nonPaged.bytes = 0;
++ database->nonPaged.maxBytes = 0;
++ database->nonPaged.totalBytes = 0;
++ database->contiguous.bytes = 0;
++ database->contiguous.maxBytes = 0;
++ database->contiguous.totalBytes = 0;
++ database->mapMemory.bytes = 0;
++ database->mapMemory.maxBytes = 0;
++ database->mapMemory.totalBytes = 0;
++ database->mapUserMemory.bytes = 0;
++ database->mapUserMemory.maxBytes = 0;
++ database->mapUserMemory.totalBytes = 0;
++ database->virtualCommandBuffer.bytes = 0;
++ database->virtualCommandBuffer.maxBytes = 0;
++ database->virtualCommandBuffer.totalBytes = 0;
++
++ for (i = 0; i < gcmCOUNTOF(database->list); i++)
++ {
++ database->list[i] = gcvNULL;
++ }
++
++ for (i = 0; i < gcvSURF_NUM_TYPES; i++)
++ {
++ database->vidMemType[i].bytes = 0;
++ database->vidMemType[i].maxBytes = 0;
++ database->vidMemType[i].totalBytes = 0;
++ }
++
++ for (i = 0; i < gcvPOOL_NUMBER_OF_POOLS; i++)
++ {
++ database->vidMemPool[i].bytes = 0;
++ database->vidMemPool[i].maxBytes = 0;
++ database->vidMemPool[i].totalBytes = 0;
++ }
++
++ gcmkASSERT(database->handleDatabase == gcvNULL);
++ gcmkONERROR(
++ gckKERNEL_CreateIntegerDatabase(Kernel, &database->handleDatabase));
++
++ gcmkASSERT(database->handleDatabaseMutex == gcvNULL);
++ gcmkONERROR(
++ gckOS_CreateMutex(Kernel->os, &database->handleDatabaseMutex));
++
++#if gcdPROCESS_ADDRESS_SPACE
++ gcmkASSERT(database->mmu == gcvNULL);
++ gcmkONERROR(
++ gckMMU_Construct(Kernel, gcdMMU_SIZE, &database->mmu));
++#endif
++
++#if gcdSECURE_USER
++ {
++ gctINT slot;
++ gcskSECURE_CACHE * cache = &database->cache;
++
++ /* Setup the linked list of cache nodes. */
++ for (slot = 1; slot <= gcdSECURE_CACHE_SLOTS; ++slot)
++ {
++ cache->cache[slot].logical = gcvNULL;
++
++#if gcdSECURE_CACHE_METHOD != gcdSECURE_CACHE_TABLE
++ cache->cache[slot].prev = &cache->cache[slot - 1];
++ cache->cache[slot].next = &cache->cache[slot + 1];
++# endif
++#if gcdSECURE_CACHE_METHOD == gcdSECURE_CACHE_HASH
++ cache->cache[slot].nextHash = gcvNULL;
++ cache->cache[slot].prevHash = gcvNULL;
++# endif
++ }
++
++#if gcdSECURE_CACHE_METHOD != gcdSECURE_CACHE_TABLE
++ /* Setup the head and tail of the cache. */
++ cache->cache[0].next = &cache->cache[1];
++ cache->cache[0].prev = &cache->cache[gcdSECURE_CACHE_SLOTS];
++ cache->cache[0].logical = gcvNULL;
++
++ /* Fix up the head and tail pointers. */
++ cache->cache[0].next->prev = &cache->cache[0];
++ cache->cache[0].prev->next = &cache->cache[0];
++# endif
++
++#if gcdSECURE_CACHE_METHOD == gcdSECURE_CACHE_HASH
++ /* Zero out the hash table. */
++ for (slot = 0; slot < gcmCOUNTOF(cache->hash); ++slot)
++ {
++ cache->hash[slot].logical = gcvNULL;
++ cache->hash[slot].nextHash = gcvNULL;
++ }
++# endif
++
++ /* Initialize cache index. */
++ cache->cacheIndex = gcvNULL;
++ cache->cacheFree = 1;
++ cache->cacheStamp = 0;
++ }
++#endif
++
++ /* Reset idle timer. */
++ Kernel->db->lastIdle = 0;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++** gckKERNEL_AddProcessDB
++**
++** Add a record to a process database.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to a gckKERNEL object.
++**
++** gctUINT32 ProcessID
++** Process ID used to identify the database.
++**
++** gceDATABASE_TYPE TYPE
++** Type of the record to add.
++**
++** gctPOINTER Pointer
++** Data of the record to add.
++**
++** gctPHYS_ADDR Physical
++** Physical address of the record to add.
++**
++** gctSIZE_T Size
++** Size of the record to add.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckKERNEL_AddProcessDB(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 ProcessID,
++ IN gceDATABASE_TYPE Type,
++ IN gctPOINTER Pointer,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Size
++ )
++{
++ gceSTATUS status;
++ gcsDATABASE_PTR database;
++ gcsDATABASE_RECORD_PTR record = gcvNULL;
++ gcsDATABASE_COUNTERS * count;
++ gctUINT32 vidMemType;
++ gcePOOL vidMemPool;
++
++ gcmkHEADER_ARG("Kernel=0x%x ProcessID=%d Type=%d Pointer=0x%x "
++ "Physical=0x%x Size=%lu",
++ Kernel, ProcessID, Type, Pointer, Physical, Size);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++
++ /* Decode type. */
++ vidMemType = (Type & gcdDB_VIDEO_MEMORY_TYPE_MASK) >> gcdDB_VIDEO_MEMORY_TYPE_SHIFT;
++ vidMemPool = (Type & gcdDB_VIDEO_MEMORY_POOL_MASK) >> gcdDB_VIDEO_MEMORY_POOL_SHIFT;
++
++ Type &= gcdDATABASE_TYPE_MASK;
++
++ /* Special case the idle record. */
++ if (Type == gcvDB_IDLE)
++ {
++ gctUINT64 time;
++
++ /* Get the current profile time. */
++ gcmkONERROR(gckOS_GetProfileTick(&time));
++
++ if ((ProcessID == 0) && (Kernel->db->lastIdle != 0))
++ {
++ /* Out of idle, adjust time it was idle. */
++ Kernel->db->idleTime += time - Kernel->db->lastIdle;
++ Kernel->db->lastIdle = 0;
++ }
++ else if (ProcessID == 1)
++ {
++ /* Save current idle time. */
++ Kernel->db->lastIdle = time;
++ }
++
++#if gcdDYNAMIC_SPEED
++ {
++ /* Test for first call. */
++ if (Kernel->db->lastSlowdown == 0)
++ {
++ /* Save milliseconds. */
++ Kernel->db->lastSlowdown = time;
++ Kernel->db->lastSlowdownIdle = Kernel->db->idleTime;
++ }
++ else
++ {
++ /* Compute ellapsed time in milliseconds. */
++ gctUINT delta = gckOS_ProfileToMS(time - Kernel->db->lastSlowdown);
++
++ /* Test for end of period. */
++ if (delta >= gcdDYNAMIC_SPEED)
++ {
++ /* Compute number of idle milliseconds. */
++ gctUINT idle = gckOS_ProfileToMS(
++ Kernel->db->idleTime - Kernel->db->lastSlowdownIdle);
++
++ /* Broadcast to slow down the GPU. */
++ gcmkONERROR(gckOS_BroadcastCalibrateSpeed(Kernel->os,
++ Kernel->hardware,
++ idle,
++ delta));
++
++ /* Save current time. */
++ Kernel->db->lastSlowdown = time;
++ Kernel->db->lastSlowdownIdle = Kernel->db->idleTime;
++ }
++ }
++ }
++#endif
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++
++ /* Verify the arguments. */
++ gcmkVERIFY_ARGUMENT(Pointer != gcvNULL);
++
++ /* Find the database. */
++ gcmkONERROR(gckKERNEL_FindDatabase(Kernel, ProcessID, gcvFALSE, &database));
++
++ /* Create a new record in the database. */
++ gcmkONERROR(gckKERNEL_NewRecord(Kernel, database, _GetSlot(database, Pointer), &record));
++
++ /* Initialize the record. */
++ record->kernel = Kernel;
++ record->type = Type;
++ record->data = Pointer;
++ record->physical = Physical;
++ record->bytes = Size;
++
++ /* Get pointer to counters. */
++ switch (Type)
++ {
++ case gcvDB_VIDEO_MEMORY:
++ count = &database->vidMem;
++ break;
++
++ case gcvDB_NON_PAGED:
++ count = &database->nonPaged;
++ break;
++
++ case gcvDB_CONTIGUOUS:
++ count = &database->contiguous;
++ break;
++
++ case gcvDB_MAP_MEMORY:
++ count = &database->mapMemory;
++ break;
++
++ case gcvDB_MAP_USER_MEMORY:
++ count = &database->mapUserMemory;
++ break;
++
++ case gcvDB_COMMAND_BUFFER:
++ count = &database->virtualCommandBuffer;
++ break;
++
++ default:
++ count = gcvNULL;
++ break;
++ }
++
++ gcmkVERIFY_OK(gckOS_AcquireMutex(Kernel->os, database->counterMutex, gcvINFINITE));
++
++ if (count != gcvNULL)
++ {
++ /* Adjust counters. */
++ count->totalBytes += Size;
++ count->bytes += Size;
++
++ if (count->bytes > count->maxBytes)
++ {
++ count->maxBytes = count->bytes;
++ }
++ }
++
++ if (Type == gcvDB_VIDEO_MEMORY)
++ {
++ count = &database->vidMemType[vidMemType];
++
++ /* Adjust counters. */
++ count->totalBytes += Size;
++ count->bytes += Size;
++
++ if (count->bytes > count->maxBytes)
++ {
++ count->maxBytes = count->bytes;
++ }
++
++ count = &database->vidMemPool[vidMemPool];
++
++ /* Adjust counters. */
++ count->totalBytes += Size;
++ count->bytes += Size;
++
++ if (count->bytes > count->maxBytes)
++ {
++ count->maxBytes = count->bytes;
++ }
++ }
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, database->counterMutex));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++** gckKERNEL_RemoveProcessDB
++**
++** Remove a record from a process database.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to a gckKERNEL object.
++**
++** gctUINT32 ProcessID
++** Process ID used to identify the database.
++**
++** gceDATABASE_TYPE TYPE
++** Type of the record to remove.
++**
++** gctPOINTER Pointer
++** Data of the record to remove.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckKERNEL_RemoveProcessDB(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 ProcessID,
++ IN gceDATABASE_TYPE Type,
++ IN gctPOINTER Pointer
++ )
++{
++ gceSTATUS status;
++ gcsDATABASE_PTR database;
++ gctSIZE_T bytes = 0;
++ gctUINT32 vidMemType;
++ gcePOOL vidMempool;
++
++ gcmkHEADER_ARG("Kernel=0x%x ProcessID=%d Type=%d Pointer=0x%x",
++ Kernel, ProcessID, Type, Pointer);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(Pointer != gcvNULL);
++
++ /* Decode type. */
++ vidMemType = (Type & gcdDB_VIDEO_MEMORY_TYPE_MASK) >> gcdDB_VIDEO_MEMORY_TYPE_SHIFT;
++ vidMempool = (Type & gcdDB_VIDEO_MEMORY_POOL_MASK) >> gcdDB_VIDEO_MEMORY_POOL_SHIFT;
++
++ Type &= gcdDATABASE_TYPE_MASK;
++
++ /* Find the database. */
++ gcmkONERROR(gckKERNEL_FindDatabase(Kernel, ProcessID, gcvFALSE, &database));
++
++ /* Delete the record. */
++ gcmkONERROR(
++ gckKERNEL_DeleteRecord(Kernel, database, Type, Pointer, &bytes));
++
++ gcmkVERIFY_OK(gckOS_AcquireMutex(Kernel->os, database->counterMutex, gcvINFINITE));
++
++ /* Update counters. */
++ switch (Type)
++ {
++ case gcvDB_VIDEO_MEMORY:
++ database->vidMem.bytes -= bytes;
++ database->vidMemType[vidMemType].bytes -= bytes;
++ database->vidMemPool[vidMempool].bytes -= bytes;
++ break;
++
++ case gcvDB_NON_PAGED:
++ database->nonPaged.bytes -= bytes;
++ break;
++
++ case gcvDB_CONTIGUOUS:
++ database->contiguous.bytes -= bytes;
++ break;
++
++ case gcvDB_MAP_MEMORY:
++ database->mapMemory.bytes -= bytes;
++ break;
++
++ case gcvDB_MAP_USER_MEMORY:
++ database->mapUserMemory.bytes -= bytes;
++ break;
++
++ case gcvDB_COMMAND_BUFFER:
++ database->virtualCommandBuffer.bytes -= bytes;
++ break;
++
++ default:
++ break;
++ }
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, database->counterMutex));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++** gckKERNEL_FindProcessDB
++**
++** Find a record from a process database.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to a gckKERNEL object.
++**
++** gctUINT32 ProcessID
++** Process ID used to identify the database.
++**
++** gceDATABASE_TYPE TYPE
++** Type of the record to remove.
++**
++** gctPOINTER Pointer
++** Data of the record to remove.
++**
++** OUTPUT:
++**
++** gcsDATABASE_RECORD_PTR Record
++** Copy of record.
++*/
++gceSTATUS
++gckKERNEL_FindProcessDB(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 ProcessID,
++ IN gctUINT32 ThreadID,
++ IN gceDATABASE_TYPE Type,
++ IN gctPOINTER Pointer,
++ OUT gcsDATABASE_RECORD_PTR Record
++ )
++{
++ gceSTATUS status;
++ gcsDATABASE_PTR database;
++
++ gcmkHEADER_ARG("Kernel=0x%x ProcessID=%d Type=%d Pointer=0x%x",
++ Kernel, ProcessID, ThreadID, Type, Pointer);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(Pointer != gcvNULL);
++
++ /* Find the database. */
++ gcmkONERROR(gckKERNEL_FindDatabase(Kernel, ProcessID, gcvFALSE, &database));
++
++ /* Find the record. */
++ gcmkONERROR(
++ gckKERNEL_FindRecord(Kernel, database, Type, Pointer, Record));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++** gckKERNEL_DestroyProcessDB
++**
++** Destroy a process database. If the database contains any records, the data
++** inside those records will be deleted as well. This aids in the cleanup if
++** a process has died unexpectedly or has memory leaks.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to a gckKERNEL object.
++**
++** gctUINT32 ProcessID
++** Process ID used to identify the database.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckKERNEL_DestroyProcessDB(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 ProcessID
++ )
++{
++ gceSTATUS status;
++ gcsDATABASE_PTR database;
++ gcsDATABASE_RECORD_PTR record, next;
++ gctBOOL asynchronous = gcvTRUE;
++ gckVIDMEM_NODE nodeObject;
++ gctPHYS_ADDR physical;
++ gckKERNEL kernel = Kernel;
++ gctUINT32 handle;
++ gctUINT32 i;
++
++ gcmkHEADER_ARG("Kernel=0x%x ProcessID=%d", Kernel, ProcessID);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++
++ /* Find the database. */
++ gcmkONERROR(gckKERNEL_FindDatabase(Kernel, ProcessID, gcvFALSE, &database));
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_DATABASE,
++ "DB(%d): VidMem: total=%lu max=%lu",
++ ProcessID, database->vidMem.totalBytes,
++ database->vidMem.maxBytes);
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_DATABASE,
++ "DB(%d): NonPaged: total=%lu max=%lu",
++ ProcessID, database->nonPaged.totalBytes,
++ database->nonPaged.maxBytes);
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_DATABASE,
++ "DB(%d): Contiguous: total=%lu max=%lu",
++ ProcessID, database->contiguous.totalBytes,
++ database->contiguous.maxBytes);
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_DATABASE,
++ "DB(%d): Idle time=%llu",
++ ProcessID, Kernel->db->idleTime);
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_DATABASE,
++ "DB(%d): Map: total=%lu max=%lu",
++ ProcessID, database->mapMemory.totalBytes,
++ database->mapMemory.maxBytes);
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_DATABASE,
++ "DB(%d): Map: total=%lu max=%lu",
++ ProcessID, database->mapUserMemory.totalBytes,
++ database->mapUserMemory.maxBytes);
++
++ if (database->list != gcvNULL)
++ {
++ gcmkTRACE_ZONE(gcvLEVEL_WARNING, gcvZONE_DATABASE,
++ "Process %d has entries in its database:",
++ ProcessID);
++ }
++
++ for(i = 0; i < gcmCOUNTOF(database->list); i++)
++ {
++
++ /* Walk all records. */
++ for (record = database->list[i]; record != gcvNULL; record = next)
++ {
++ /* Next next record. */
++ next = record->next;
++
++ /* Dispatch on record type. */
++ switch (record->type)
++ {
++ case gcvDB_VIDEO_MEMORY:
++ gcmkERR_BREAK(gckVIDMEM_HANDLE_Lookup(record->kernel,
++ ProcessID,
++ gcmPTR2INT32(record->data),
++ &nodeObject));
++
++ /* Free the video memory. */
++ gcmkVERIFY_OK(gckVIDMEM_HANDLE_Dereference(record->kernel,
++ ProcessID,
++ gcmPTR2INT32(record->data)));
++
++ gcmkVERIFY_OK(gckVIDMEM_NODE_Dereference(record->kernel,
++ nodeObject));
++
++ gcmkTRACE_ZONE(gcvLEVEL_WARNING, gcvZONE_DATABASE,
++ "DB: VIDEO_MEMORY 0x%x (status=%d)",
++ record->data, status);
++ break;
++
++ case gcvDB_NON_PAGED:
++ physical = gcmNAME_TO_PTR(record->physical);
++ /* Unmap user logical memory first. */
++ status = gckOS_UnmapUserLogical(Kernel->os,
++ physical,
++ record->bytes,
++ record->data);
++
++ /* Free the non paged memory. */
++ status = gckEVENT_FreeNonPagedMemory(Kernel->eventObj,
++ record->bytes,
++ physical,
++ record->data,
++ gcvKERNEL_PIXEL);
++ gcmRELEASE_NAME(record->physical);
++
++ gcmkTRACE_ZONE(gcvLEVEL_WARNING, gcvZONE_DATABASE,
++ "DB: NON_PAGED 0x%x, bytes=%lu (status=%d)",
++ record->data, record->bytes, status);
++ break;
++
++ case gcvDB_COMMAND_BUFFER:
++ /* Free the command buffer. */
++ status = gckEVENT_DestroyVirtualCommandBuffer(record->kernel->eventObj,
++ record->bytes,
++ gcmNAME_TO_PTR(record->physical),
++ record->data,
++ gcvKERNEL_PIXEL);
++ gcmRELEASE_NAME(record->physical);
++
++ gcmkTRACE_ZONE(gcvLEVEL_WARNING, gcvZONE_DATABASE,
++ "DB: COMMAND_BUFFER 0x%x, bytes=%lu (status=%d)",
++ record->data, record->bytes, status);
++ break;
++
++ case gcvDB_CONTIGUOUS:
++ physical = gcmNAME_TO_PTR(record->physical);
++ /* Unmap user logical memory first. */
++ status = gckOS_UnmapUserLogical(Kernel->os,
++ physical,
++ record->bytes,
++ record->data);
++
++ /* Free the contiguous memory. */
++ status = gckEVENT_FreeContiguousMemory(Kernel->eventObj,
++ record->bytes,
++ physical,
++ record->data,
++ gcvKERNEL_PIXEL);
++ gcmRELEASE_NAME(record->physical);
++
++ gcmkTRACE_ZONE(gcvLEVEL_WARNING, gcvZONE_DATABASE,
++ "DB: CONTIGUOUS 0x%x bytes=%lu (status=%d)",
++ record->data, record->bytes, status);
++ break;
++
++ case gcvDB_SIGNAL:
++#if USE_NEW_LINUX_SIGNAL
++ status = gcvSTATUS_NOT_SUPPORTED;
++#else
++ /* Free the user signal. */
++ status = gckOS_DestroyUserSignal(Kernel->os,
++ gcmPTR2INT32(record->data));
++#endif /* USE_NEW_LINUX_SIGNAL */
++
++ gcmkTRACE_ZONE(gcvLEVEL_WARNING, gcvZONE_DATABASE,
++ "DB: SIGNAL %d (status=%d)",
++ (gctINT)(gctUINTPTR_T)record->data, status);
++ break;
++
++ case gcvDB_VIDEO_MEMORY_LOCKED:
++ handle = gcmPTR2INT32(record->data);
++
++ gcmkERR_BREAK(gckVIDMEM_HANDLE_Lookup(record->kernel,
++ ProcessID,
++ handle,
++ &nodeObject));
++
++ /* Unlock what we still locked */
++ status = gckVIDMEM_Unlock(record->kernel,
++ nodeObject,
++ nodeObject->type,
++ &asynchronous);
++
++#if gcdENABLE_VG
++ if (record->kernel->core == gcvCORE_VG)
++ {
++ if (gcmIS_SUCCESS(status) && (gcvTRUE == asynchronous))
++ {
++ /* TODO: we maybe need to schedule a event here */
++ status = gckVIDMEM_Unlock(record->kernel,
++ nodeObject,
++ nodeObject->type,
++ gcvNULL);
++ }
++
++ gcmkVERIFY_OK(gckVIDMEM_HANDLE_Dereference(record->kernel,
++ ProcessID,
++ handle));
++
++ gcmkVERIFY_OK(gckVIDMEM_NODE_Dereference(record->kernel,
++ nodeObject));
++ }
++ else
++#endif
++ {
++ gcmkVERIFY_OK(gckVIDMEM_HANDLE_Dereference(record->kernel,
++ ProcessID,
++ handle));
++
++ if (gcmIS_SUCCESS(status) && (gcvTRUE == asynchronous))
++ {
++ status = gckEVENT_Unlock(record->kernel->eventObj,
++ gcvKERNEL_PIXEL,
++ nodeObject,
++ nodeObject->type);
++ }
++ else
++ {
++ gcmkVERIFY_OK(gckVIDMEM_NODE_Dereference(record->kernel,
++ nodeObject));
++ }
++ }
++
++ gcmkTRACE_ZONE(gcvLEVEL_WARNING, gcvZONE_DATABASE,
++ "DB: VIDEO_MEMORY_LOCKED 0x%x (status=%d)",
++ record->data, status);
++ break;
++
++ case gcvDB_CONTEXT:
++ /* TODO: Free the context */
++ status = gckCOMMAND_Detach(Kernel->command, gcmNAME_TO_PTR(record->data));
++ gcmRELEASE_NAME(record->data);
++
++ gcmkTRACE_ZONE(gcvLEVEL_WARNING, gcvZONE_DATABASE,
++ "DB: CONTEXT 0x%x (status=%d)",
++ record->data, status);
++ break;
++
++ case gcvDB_MAP_MEMORY:
++ /* Unmap memory. */
++ status = gckKERNEL_UnmapMemory(Kernel,
++ record->physical,
++ record->bytes,
++ record->data);
++
++ gcmkTRACE_ZONE(gcvLEVEL_WARNING, gcvZONE_DATABASE,
++ "DB: MAP MEMORY %d (status=%d)",
++ gcmPTR2INT32(record->data), status);
++ break;
++
++ case gcvDB_MAP_USER_MEMORY:
++ /* TODO: Unmap user memory. */
++ status = gckOS_UnmapUserMemory(Kernel->os,
++ Kernel->core,
++ record->physical,
++ record->bytes,
++ gcmNAME_TO_PTR(record->data),
++ 0);
++ gcmRELEASE_NAME(record->data);
++
++ gcmkTRACE_ZONE(gcvLEVEL_WARNING, gcvZONE_DATABASE,
++ "DB: MAP USER MEMORY %d (status=%d)",
++ gcmPTR2INT32(record->data), status);
++ break;
++
++#if gcdANDROID_NATIVE_FENCE_SYNC
++ case gcvDB_SYNC_POINT:
++ /* Free the user signal. */
++ status = gckOS_DestroySyncPoint(Kernel->os,
++ (gctSYNC_POINT) record->data);
++
++ gcmkTRACE_ZONE(gcvLEVEL_WARNING, gcvZONE_DATABASE,
++ "DB: SYNC POINT %d (status=%d)",
++ (gctINT)(gctUINTPTR_T)record->data, status);
++ break;
++#endif
++
++ case gcvDB_SHBUF:
++ /* Free shared buffer. */
++ status = gckKERNEL_DestroyShBuffer(Kernel,
++ (gctSHBUF) record->data);
++
++ gcmkTRACE_ZONE(gcvLEVEL_WARNING, gcvZONE_DATABASE,
++ "DB: SHBUF %u (status=%d)",
++ (gctUINT32)(gctUINTPTR_T) record->data, status);
++ break;
++
++ default:
++ gcmkTRACE_ZONE(gcvLEVEL_ERROR, gcvZONE_DATABASE,
++ "DB: Correcupted record=0x%08x type=%d",
++ record, record->type);
++ break;
++ }
++
++ /* Delete the record. */
++ gcmkONERROR(gckKERNEL_DeleteRecord(Kernel,
++ database,
++ record->type,
++ record->data,
++ gcvNULL));
++ }
++
++ }
++
++ /* Delete the database. */
++ gcmkONERROR(gckKERNEL_DeleteDatabase(Kernel, database));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++** gckKERNEL_QueryProcessDB
++**
++** Query a process database for the current usage of a particular record type.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to a gckKERNEL object.
++**
++** gctUINT32 ProcessID
++** Process ID used to identify the database.
++**
++** gctBOOL LastProcessID
++** gcvTRUE if searching for the last known process ID. gcvFALSE if
++** we need to search for the process ID specified by the ProcessID
++** argument.
++**
++** gceDATABASE_TYPE Type
++** Type of the record to query.
++**
++** OUTPUT:
++**
++** gcuDATABASE_INFO * Info
++** Pointer to a variable that receives the requested information.
++*/
++gceSTATUS
++gckKERNEL_QueryProcessDB(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 ProcessID,
++ IN gctBOOL LastProcessID,
++ IN gceDATABASE_TYPE Type,
++ OUT gcuDATABASE_INFO * Info
++ )
++{
++ gceSTATUS status;
++ gcsDATABASE_PTR database;
++ gcePOOL vidMemPool;
++
++ gcmkHEADER_ARG("Kernel=0x%x ProcessID=%d Type=%d Info=0x%x",
++ Kernel, ProcessID, Type, Info);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(Info != gcvNULL);
++
++ /* Deocde pool. */
++ vidMemPool = (Type & gcdDB_VIDEO_MEMORY_POOL_MASK) >> gcdDB_VIDEO_MEMORY_POOL_SHIFT;
++
++ Type &= gcdDATABASE_TYPE_MASK;
++
++ /* Find the database. */
++ if(Type != gcvDB_IDLE)
++ {
++ gcmkONERROR(
++ gckKERNEL_FindDatabase(Kernel, ProcessID, LastProcessID, &database));
++
++ gcmkVERIFY_OK(gckOS_AcquireMutex(Kernel->os, database->counterMutex, gcvINFINITE));
++
++ /* Get pointer to counters. */
++ switch (Type)
++ {
++ case gcvDB_VIDEO_MEMORY:
++ if (vidMemPool != gcvPOOL_UNKNOWN)
++ {
++ gckOS_MemCopy(&Info->counters,
++ &database->vidMemPool[vidMemPool],
++ gcmSIZEOF(database->vidMemPool[vidMemPool]));
++ }
++ else
++ {
++ gckOS_MemCopy(&Info->counters,
++ &database->vidMem,
++ gcmSIZEOF(database->vidMem));
++ }
++ break;
++
++ case gcvDB_NON_PAGED:
++ gckOS_MemCopy(&Info->counters,
++ &database->nonPaged,
++ gcmSIZEOF(database->vidMem));
++ break;
++
++ case gcvDB_CONTIGUOUS:
++ gckOS_MemCopy(&Info->counters,
++ &database->contiguous,
++ gcmSIZEOF(database->vidMem));
++ break;
++
++ case gcvDB_MAP_MEMORY:
++ gckOS_MemCopy(&Info->counters,
++ &database->mapMemory,
++ gcmSIZEOF(database->mapMemory));
++ break;
++
++ case gcvDB_MAP_USER_MEMORY:
++ gckOS_MemCopy(&Info->counters,
++ &database->mapUserMemory,
++ gcmSIZEOF(database->mapUserMemory));
++ break;
++
++ case gcvDB_COMMAND_BUFFER:
++ gckOS_MemCopy(&Info->counters,
++ &database->virtualCommandBuffer,
++ gcmSIZEOF(database->virtualCommandBuffer));
++ break;
++
++ default:
++ break;
++ }
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, database->counterMutex));
++ }
++ else
++ {
++ Info->time = Kernel->db->idleTime;
++ Kernel->db->idleTime = 0;
++ }
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckKERNEL_FindHandleDatbase(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 ProcessID,
++ OUT gctPOINTER * HandleDatabase,
++ OUT gctPOINTER * HandleDatabaseMutex
++ )
++{
++ gceSTATUS status;
++ gcsDATABASE_PTR database;
++
++ gcmkHEADER_ARG("Kernel=0x%x ProcessID=%d",
++ Kernel, ProcessID);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++
++ /* Find the database. */
++ gcmkONERROR(gckKERNEL_FindDatabase(Kernel, ProcessID, gcvFALSE, &database));
++
++ *HandleDatabase = database->handleDatabase;
++ *HandleDatabaseMutex = database->handleDatabaseMutex;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++#if gcdPROCESS_ADDRESS_SPACE
++gceSTATUS
++gckKERNEL_GetProcessMMU(
++ IN gckKERNEL Kernel,
++ OUT gckMMU * Mmu
++ )
++{
++ gceSTATUS status;
++ gcsDATABASE_PTR database;
++ gctUINT32 processID;
++
++ gcmkONERROR(gckOS_GetProcessID(&processID));
++
++ gcmkONERROR(gckKERNEL_FindDatabase(Kernel, processID, gcvFALSE, &database));
++
++ *Mmu = database->mmu;
++
++ return gcvSTATUS_OK;
++
++OnError:
++ return status;
++}
++#endif
++
++#if gcdSECURE_USER
++/*******************************************************************************
++** gckKERNEL_GetProcessDBCache
++**
++** Get teh secure cache from a process database.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to a gckKERNEL object.
++**
++** gctUINT32 ProcessID
++** Process ID used to identify the database.
++**
++** OUTPUT:
++**
++** gcskSECURE_CACHE_PTR * Cache
++** Pointer to a variable that receives the secure cache pointer.
++*/
++gceSTATUS
++gckKERNEL_GetProcessDBCache(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 ProcessID,
++ OUT gcskSECURE_CACHE_PTR * Cache
++ )
++{
++ gceSTATUS status;
++ gcsDATABASE_PTR database;
++
++ gcmkHEADER_ARG("Kernel=0x%x ProcessID=%d", Kernel, ProcessID);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(Cache != gcvNULL);
++
++ /* Find the database. */
++ gcmkONERROR(gckKERNEL_FindDatabase(Kernel, ProcessID, gcvFALSE, &database));
++
++ /* Return the pointer to the cache. */
++ *Cache = &database->cache;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Cache=0x%x", *Cache);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++#endif
++
++gceSTATUS
++gckKERNEL_DumpProcessDB(
++ IN gckKERNEL Kernel
++ )
++{
++ gcsDATABASE_PTR database;
++ gctINT i, pid;
++ gctUINT8 name[24];
++
++ gcmkHEADER_ARG("Kernel=0x%x", Kernel);
++
++ /* Acquire the database mutex. */
++ gcmkVERIFY_OK(
++ gckOS_AcquireMutex(Kernel->os, Kernel->db->dbMutex, gcvINFINITE));
++
++ gcmkPRINT("**************************\n");
++ gcmkPRINT("*** PROCESS DB DUMP ***\n");
++ gcmkPRINT("**************************\n");
++
++ gcmkPRINT_N(8, "%-8s%s\n", "PID", "NAME");
++ /* Walk the databases. */
++ for (i = 0; i < gcmCOUNTOF(Kernel->db->db); ++i)
++ {
++ for (database = Kernel->db->db[i];
++ database != gcvNULL;
++ database = database->next)
++ {
++ pid = database->processID;
++
++ gcmkVERIFY_OK(gckOS_ZeroMemory(name, gcmSIZEOF(name)));
++
++ gcmkVERIFY_OK(gckOS_GetProcessNameByPid(pid, gcmSIZEOF(name), name));
++
++ gcmkPRINT_N(8, "%-8d%s\n", pid, name);
++ }
++ }
++
++ /* Release the database mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, Kernel->db->dbMutex));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++void
++_DumpCounter(
++ IN gcsDATABASE_COUNTERS * Counter,
++ IN gctCONST_STRING Name
++ )
++{
++ gcmkPRINT("%s:", Name);
++ gcmkPRINT(" Currently allocated : %10lld", Counter->bytes);
++ gcmkPRINT(" Maximum allocated : %10lld", Counter->maxBytes);
++ gcmkPRINT(" Total allocated : %10lld", Counter->totalBytes);
++}
++
++gceSTATUS
++gckKERNEL_DumpVidMemUsage(
++ IN gckKERNEL Kernel,
++ IN gctINT32 ProcessID
++ )
++{
++ gceSTATUS status;
++ gcsDATABASE_PTR database;
++ gcsDATABASE_COUNTERS * counter;
++ gctUINT32 i = 0;
++
++ static gctCONST_STRING surfaceTypes[] = {
++ "UNKNOWN",
++ "INDEX",
++ "VERTEX",
++ "TEXTURE",
++ "RENDER_TARGET",
++ "DEPTH",
++ "BITMAP",
++ "TILE_STATUS",
++ "IMAGE",
++ "MASK",
++ "SCISSOR",
++ "HIERARCHICAL_DEPTH",
++ };
++
++ gcmkHEADER_ARG("Kernel=0x%x ProcessID=%d",
++ Kernel, ProcessID);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++
++ /* Find the database. */
++ gcmkONERROR(
++ gckKERNEL_FindDatabase(Kernel, ProcessID, gcvFALSE, &database));
++
++ gcmkPRINT("VidMem Usage (Process %d):", ProcessID);
++
++ /* Get pointer to counters. */
++ counter = &database->vidMem;
++
++ _DumpCounter(counter, "Total Video Memory");
++
++ for (i = 0; i < gcvSURF_NUM_TYPES; i++)
++ {
++ counter = &database->vidMemType[i];
++
++ _DumpCounter(counter, surfaceTypes[i]);
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_debug.c linux-openelec/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_debug.c
+--- linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_debug.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_debug.c 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,2785 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal_kernel_precomp.h"
++#include <gc_hal_kernel_debug.h>
++
++/******************************************************************************\
++******************************** Debug Variables *******************************
++\******************************************************************************/
++
++static gceSTATUS _lastError = gcvSTATUS_OK;
++static gctUINT32 _debugLevel = gcvLEVEL_ERROR;
++/*
++_debugZones config value
++Please Reference define in gc_hal_base.h
++*/
++static gctUINT32 _debugZones = gcvZONE_NONE;
++
++/******************************************************************************\
++********************************* Debug Switches *******************************
++\******************************************************************************/
++
++/*
++ gcdBUFFERED_OUTPUT
++
++ When set to non-zero, all output is collected into a buffer with the
++ specified size. Once the buffer gets full, the debug buffer will be
++ printed to the console. gcdBUFFERED_SIZE determines the size of the buffer.
++*/
++#define gcdBUFFERED_OUTPUT 0
++
++/*
++ gcdBUFFERED_SIZE
++
++ When set to non-zero, all output is collected into a buffer with the
++ specified size. Once the buffer gets full, the debug buffer will be
++ printed to the console.
++*/
++#define gcdBUFFERED_SIZE (1024 * 1024 * 2)
++
++/*
++ gcdDMA_BUFFER_COUNT
++
++ If greater then zero, the debugger will attempt to find the command buffer
++ where DMA is currently executing and then print this buffer and
++ (gcdDMA_BUFFER_COUNT - 1) buffers before the current one. If set to zero
++ or the current buffer is not found, all buffers are printed.
++*/
++#define gcdDMA_BUFFER_COUNT 0
++
++/*
++ gcdTHREAD_BUFFERS
++
++ When greater then one, will accumulate messages from the specified number
++ of threads in separate output buffers.
++*/
++#define gcdTHREAD_BUFFERS 1
++
++/*
++ gcdENABLE_OVERFLOW
++
++ When set to non-zero, and the output buffer gets full, instead of being
++ printed, it will be allowed to overflow removing the oldest messages.
++*/
++#define gcdENABLE_OVERFLOW 1
++
++/*
++ gcdSHOW_LINE_NUMBER
++
++ When enabledm each print statement will be preceeded with the current
++ line number.
++*/
++#define gcdSHOW_LINE_NUMBER 0
++
++/*
++ gcdSHOW_PROCESS_ID
++
++ When enabledm each print statement will be preceeded with the current
++ process ID.
++*/
++#define gcdSHOW_PROCESS_ID 0
++
++/*
++ gcdSHOW_THREAD_ID
++
++ When enabledm each print statement will be preceeded with the current
++ thread ID.
++*/
++#define gcdSHOW_THREAD_ID 0
++
++/*
++ gcdSHOW_TIME
++
++ When enabled each print statement will be preceeded with the current
++ high-resolution time.
++*/
++#define gcdSHOW_TIME 0
++
++
++/******************************************************************************\
++****************************** Miscellaneous Macros ****************************
++\******************************************************************************/
++
++#if gcmIS_DEBUG(gcdDEBUG_TRACE)
++# define gcmDBGASSERT(Expression, Format, Value) \
++ if (!(Expression)) \
++ { \
++ _DirectPrint( \
++ "*** gcmDBGASSERT ***************************\n" \
++ " function : %s\n" \
++ " line : %d\n" \
++ " expression : " #Expression "\n" \
++ " actual value : " Format "\n", \
++ __FUNCTION__, __LINE__, Value \
++ ); \
++ }
++#else
++# define gcmDBGASSERT(Expression, Format, Value)
++#endif
++
++#define gcmPTRALIGNMENT(Pointer, Alignemnt) \
++( \
++ gcmALIGN(gcmPTR2INT32(Pointer), Alignemnt) - gcmPTR2INT32(Pointer) \
++)
++
++#if gcdALIGNBYSIZE
++# define gcmISALIGNED(Offset, Alignment) \
++ (((Offset) & ((Alignment) - 1)) == 0)
++
++# define gcmkALIGNPTR(Type, Pointer, Alignment) \
++ Pointer = (Type) gcmINT2PTR(gcmALIGN(gcmPTR2INT32(Pointer), Alignment))
++#else
++# define gcmISALIGNED(Offset, Alignment) \
++ gcvTRUE
++
++# define gcmkALIGNPTR(Type, Pointer, Alignment)
++#endif
++
++#define gcmALIGNSIZE(Offset, Size) \
++ ((Size - Offset) + Size)
++
++#define gcdHAVEPREFIX \
++( \
++ gcdSHOW_TIME \
++ || gcdSHOW_LINE_NUMBER \
++ || gcdSHOW_PROCESS_ID \
++ || gcdSHOW_THREAD_ID \
++)
++
++#if gcdHAVEPREFIX
++
++# define gcdOFFSET 0
++
++#if gcdSHOW_TIME
++#if gcmISALIGNED(gcdOFFSET, 8)
++# define gcdTIMESIZE gcmSIZEOF(gctUINT64)
++# elif gcdOFFSET == 4
++# define gcdTIMESIZE gcmALIGNSIZE(4, gcmSIZEOF(gctUINT64))
++# else
++# error "Unexpected offset value."
++# endif
++# undef gcdOFFSET
++# define gcdOFFSET 8
++#if !defined(gcdPREFIX_LEADER)
++# define gcdPREFIX_LEADER gcmSIZEOF(gctUINT64)
++# define gcdTIMEFORMAT "0x%016llX"
++# else
++# define gcdTIMEFORMAT ", 0x%016llX"
++# endif
++# else
++# define gcdTIMESIZE 0
++# define gcdTIMEFORMAT
++# endif
++
++#if gcdSHOW_LINE_NUMBER
++#if gcmISALIGNED(gcdOFFSET, 8)
++# define gcdNUMSIZE gcmSIZEOF(gctUINT64)
++# elif gcdOFFSET == 4
++# define gcdNUMSIZE gcmALIGNSIZE(4, gcmSIZEOF(gctUINT64))
++# else
++# error "Unexpected offset value."
++# endif
++# undef gcdOFFSET
++# define gcdOFFSET 8
++#if !defined(gcdPREFIX_LEADER)
++# define gcdPREFIX_LEADER gcmSIZEOF(gctUINT64)
++# define gcdNUMFORMAT "%8llu"
++# else
++# define gcdNUMFORMAT ", %8llu"
++# endif
++# else
++# define gcdNUMSIZE 0
++# define gcdNUMFORMAT
++# endif
++
++#if gcdSHOW_PROCESS_ID
++#if gcmISALIGNED(gcdOFFSET, 4)
++# define gcdPIDSIZE gcmSIZEOF(gctUINT32)
++# else
++# error "Unexpected offset value."
++# endif
++# undef gcdOFFSET
++# define gcdOFFSET 4
++#if !defined(gcdPREFIX_LEADER)
++# define gcdPREFIX_LEADER gcmSIZEOF(gctUINT32)
++# define gcdPIDFORMAT "pid=%5d"
++# else
++# define gcdPIDFORMAT ", pid=%5d"
++# endif
++# else
++# define gcdPIDSIZE 0
++# define gcdPIDFORMAT
++# endif
++
++#if gcdSHOW_THREAD_ID
++#if gcmISALIGNED(gcdOFFSET, 4)
++# define gcdTIDSIZE gcmSIZEOF(gctUINT32)
++# else
++# error "Unexpected offset value."
++# endif
++# undef gcdOFFSET
++# define gcdOFFSET 4
++#if !defined(gcdPREFIX_LEADER)
++# define gcdPREFIX_LEADER gcmSIZEOF(gctUINT32)
++# define gcdTIDFORMAT "tid=%5d"
++# else
++# define gcdTIDFORMAT ", tid=%5d"
++# endif
++# else
++# define gcdTIDSIZE 0
++# define gcdTIDFORMAT
++# endif
++
++# define gcdPREFIX_SIZE \
++ ( \
++ gcdTIMESIZE \
++ + gcdNUMSIZE \
++ + gcdPIDSIZE \
++ + gcdTIDSIZE \
++ )
++
++ static const char * _prefixFormat =
++ "["
++ gcdTIMEFORMAT
++ gcdNUMFORMAT
++ gcdPIDFORMAT
++ gcdTIDFORMAT
++ "] ";
++
++#else
++
++# define gcdPREFIX_LEADER gcmSIZEOF(gctUINT32)
++# define gcdPREFIX_SIZE 0
++
++#endif
++
++/* Assumed largest variable argument leader size. */
++#define gcdVARARG_LEADER gcmSIZEOF(gctUINT64)
++
++/* Alignnments. */
++#if gcdALIGNBYSIZE
++# define gcdPREFIX_ALIGNMENT gcdPREFIX_LEADER
++# define gcdVARARG_ALIGNMENT gcdVARARG_LEADER
++#else
++# define gcdPREFIX_ALIGNMENT 0
++# define gcdVARARG_ALIGNMENT 0
++#endif
++
++#if gcdBUFFERED_OUTPUT
++# define gcdOUTPUTPREFIX _AppendPrefix
++# define gcdOUTPUTSTRING _AppendString
++# define gcdOUTPUTCOPY _AppendCopy
++# define gcdOUTPUTBUFFER _AppendBuffer
++#else
++# define gcdOUTPUTPREFIX _PrintPrefix
++# define gcdOUTPUTSTRING _PrintString
++# define gcdOUTPUTCOPY _PrintString
++# define gcdOUTPUTBUFFER _PrintBuffer
++#endif
++
++/******************************************************************************\
++****************************** Private Structures ******************************
++\******************************************************************************/
++
++typedef enum _gceBUFITEM
++{
++ gceBUFITEM_NONE,
++ gcvBUFITEM_PREFIX,
++ gcvBUFITEM_STRING,
++ gcvBUFITEM_COPY,
++ gcvBUFITEM_BUFFER
++}
++gceBUFITEM;
++
++/* Common item head/buffer terminator. */
++typedef struct _gcsBUFITEM_HEAD * gcsBUFITEM_HEAD_PTR;
++typedef struct _gcsBUFITEM_HEAD
++{
++ gceBUFITEM type;
++}
++gcsBUFITEM_HEAD;
++
++/* String prefix (for ex. [ 1,tid=0x019A]) */
++typedef struct _gcsBUFITEM_PREFIX * gcsBUFITEM_PREFIX_PTR;
++typedef struct _gcsBUFITEM_PREFIX
++{
++ gceBUFITEM type;
++#if gcdHAVEPREFIX
++ gctPOINTER prefixData;
++#endif
++}
++gcsBUFITEM_PREFIX;
++
++/* Buffered string. */
++typedef struct _gcsBUFITEM_STRING * gcsBUFITEM_STRING_PTR;
++typedef struct _gcsBUFITEM_STRING
++{
++ gceBUFITEM type;
++ gctINT indent;
++ gctCONST_STRING message;
++ gctPOINTER messageData;
++ gctUINT messageDataSize;
++}
++gcsBUFITEM_STRING;
++
++/* Buffered string (copy of the string is included with the record). */
++typedef struct _gcsBUFITEM_COPY * gcsBUFITEM_COPY_PTR;
++typedef struct _gcsBUFITEM_COPY
++{
++ gceBUFITEM type;
++ gctINT indent;
++ gctPOINTER messageData;
++ gctUINT messageDataSize;
++}
++gcsBUFITEM_COPY;
++
++/* Memory buffer. */
++typedef struct _gcsBUFITEM_BUFFER * gcsBUFITEM_BUFFER_PTR;
++typedef struct _gcsBUFITEM_BUFFER
++{
++ gceBUFITEM type;
++ gctINT indent;
++ gceDUMP_BUFFER bufferType;
++
++#if gcdDMA_BUFFER_COUNT && (gcdTHREAD_BUFFERS == 1)
++ gctUINT32 dmaAddress;
++#endif
++
++ gctUINT dataSize;
++ gctUINT32 address;
++#if gcdHAVEPREFIX
++ gctPOINTER prefixData;
++#endif
++}
++gcsBUFITEM_BUFFER;
++
++typedef struct _gcsBUFFERED_OUTPUT * gcsBUFFERED_OUTPUT_PTR;
++typedef struct _gcsBUFFERED_OUTPUT
++{
++#if gcdTHREAD_BUFFERS > 1
++ gctUINT32 threadID;
++#endif
++
++#if gcdSHOW_LINE_NUMBER
++ gctUINT64 lineNumber;
++#endif
++
++ gctINT indent;
++
++#if gcdBUFFERED_OUTPUT
++ gctINT start;
++ gctINT index;
++ gctINT count;
++ gctUINT8 buffer[gcdBUFFERED_SIZE];
++#endif
++
++ gcsBUFFERED_OUTPUT_PTR prev;
++ gcsBUFFERED_OUTPUT_PTR next;
++}
++gcsBUFFERED_OUTPUT;
++
++typedef gctUINT (* gcfPRINTSTRING) (
++ IN gcsBUFFERED_OUTPUT_PTR OutputBuffer,
++ IN gcsBUFITEM_HEAD_PTR Item
++ );
++
++typedef gctINT (* gcfGETITEMSIZE) (
++ IN gcsBUFITEM_HEAD_PTR Item
++ );
++
++/******************************************************************************\
++******************************* Private Variables ******************************
++\******************************************************************************/
++
++static gcsBUFFERED_OUTPUT _outputBuffer[gcdTHREAD_BUFFERS];
++static gcsBUFFERED_OUTPUT_PTR _outputBufferHead = gcvNULL;
++static gcsBUFFERED_OUTPUT_PTR _outputBufferTail = gcvNULL;
++
++/******************************************************************************\
++****************************** Item Size Functions *****************************
++\******************************************************************************/
++
++#if gcdBUFFERED_OUTPUT
++static gctINT
++_GetTerminatorItemSize(
++ IN gcsBUFITEM_HEAD_PTR Item
++ )
++{
++ return gcmSIZEOF(gcsBUFITEM_HEAD);
++}
++
++static gctINT
++_GetPrefixItemSize(
++ IN gcsBUFITEM_HEAD_PTR Item
++ )
++{
++#if gcdHAVEPREFIX
++ gcsBUFITEM_PREFIX_PTR item = (gcsBUFITEM_PREFIX_PTR) Item;
++ gctUINT vlen = ((gctUINT8_PTR) item->prefixData) - ((gctUINT8_PTR) item);
++ return vlen + gcdPREFIX_SIZE;
++#else
++ return gcmSIZEOF(gcsBUFITEM_PREFIX);
++#endif
++}
++
++static gctINT
++_GetStringItemSize(
++ IN gcsBUFITEM_HEAD_PTR Item
++ )
++{
++ gcsBUFITEM_STRING_PTR item = (gcsBUFITEM_STRING_PTR) Item;
++ gctUINT vlen = ((gctUINT8_PTR) item->messageData) - ((gctUINT8_PTR) item);
++ return vlen + item->messageDataSize;
++}
++
++static gctINT
++_GetCopyItemSize(
++ IN gcsBUFITEM_HEAD_PTR Item
++ )
++{
++ gcsBUFITEM_COPY_PTR item = (gcsBUFITEM_COPY_PTR) Item;
++ gctUINT vlen = ((gctUINT8_PTR) item->messageData) - ((gctUINT8_PTR) item);
++ return vlen + item->messageDataSize;
++}
++
++static gctINT
++_GetBufferItemSize(
++ IN gcsBUFITEM_HEAD_PTR Item
++ )
++{
++#if gcdHAVEPREFIX
++ gcsBUFITEM_BUFFER_PTR item = (gcsBUFITEM_BUFFER_PTR) Item;
++ gctUINT vlen = ((gctUINT8_PTR) item->prefixData) - ((gctUINT8_PTR) item);
++ return vlen + gcdPREFIX_SIZE + item->dataSize;
++#else
++ gcsBUFITEM_BUFFER_PTR item = (gcsBUFITEM_BUFFER_PTR) Item;
++ return gcmSIZEOF(gcsBUFITEM_BUFFER) + item->dataSize;
++#endif
++}
++
++static gcfGETITEMSIZE _itemSize[] =
++{
++ _GetTerminatorItemSize,
++ _GetPrefixItemSize,
++ _GetStringItemSize,
++ _GetCopyItemSize,
++ _GetBufferItemSize
++};
++#endif
++
++/******************************************************************************\
++******************************* Printing Functions *****************************
++\******************************************************************************/
++
++#if gcdDEBUG || gcdBUFFERED_OUTPUT
++static void
++_DirectPrint(
++ gctCONST_STRING Message,
++ ...
++ )
++{
++ gctINT len;
++ char buffer[768];
++ gctARGUMENTS arguments;
++
++ gcmkARGUMENTS_START(arguments, Message);
++ len = gcmkVSPRINTF(buffer, gcmSIZEOF(buffer), Message, &arguments);
++ gcmkARGUMENTS_END(arguments);
++
++ buffer[len] = '\0';
++ gcmkOUTPUT_STRING(buffer);
++}
++#endif
++
++static int
++_AppendIndent(
++ IN gctINT Indent,
++ IN char * Buffer,
++ IN int BufferSize
++ )
++{
++ gctINT i;
++
++ gctINT len = 0;
++ gctINT indent = Indent % 40;
++
++ for (i = 0; i < indent; i += 1)
++ {
++ Buffer[len++] = ' ';
++ }
++
++ if (indent != Indent)
++ {
++ len += gcmkSPRINTF(
++ Buffer + len, BufferSize - len, " <%d> ", Indent
++ );
++
++ Buffer[len] = '\0';
++ }
++
++ return len;
++}
++
++#if gcdHAVEPREFIX
++static void
++_PrintPrefix(
++ IN gcsBUFFERED_OUTPUT_PTR OutputBuffer,
++ IN gctPOINTER Data
++ )
++{
++ char buffer[768];
++ gctINT len;
++
++ /* Format the string. */
++ len = gcmkVSPRINTF(buffer, gcmSIZEOF(buffer), _prefixFormat, Data);
++ buffer[len] = '\0';
++
++ /* Print the string. */
++ gcmkOUTPUT_STRING(buffer);
++}
++#endif
++
++static void
++_PrintString(
++ IN gcsBUFFERED_OUTPUT_PTR OutputBuffer,
++ IN gctINT Indent,
++ IN gctCONST_STRING Message,
++ IN gctUINT ArgumentSize,
++ IN gctPOINTER Data
++ )
++{
++ char buffer[768];
++ gctINT len;
++
++ /* Append the indent string. */
++ len = _AppendIndent(Indent, buffer, gcmSIZEOF(buffer));
++
++ /* Format the string. */
++ len += gcmkVSPRINTF(buffer + len, gcmSIZEOF(buffer) - len, Message, Data);
++ buffer[len] = '\0';
++
++ /* Add end-of-line if missing. */
++ if (buffer[len - 1] != '\n')
++ {
++ buffer[len++] = '\n';
++ buffer[len] = '\0';
++ }
++
++ /* Print the string. */
++ gcmkOUTPUT_STRING(buffer);
++}
++
++static void
++_PrintBuffer(
++ IN gcsBUFFERED_OUTPUT_PTR OutputBuffer,
++ IN gctINT Indent,
++ IN gctPOINTER PrefixData,
++ IN gctPOINTER Data,
++ IN gctUINT Address,
++ IN gctUINT DataSize,
++ IN gceDUMP_BUFFER Type,
++ IN gctUINT32 DmaAddress
++ )
++{
++ static gctCONST_STRING _titleString[] =
++ {
++ "CONTEXT BUFFER",
++ "USER COMMAND BUFFER",
++ "KERNEL COMMAND BUFFER",
++ "LINK BUFFER",
++ "WAIT LINK BUFFER",
++ ""
++ };
++
++ static const gctINT COLUMN_COUNT = 8;
++
++ gctUINT i, count, column, address;
++ gctUINT32_PTR data;
++ gctCHAR buffer[768];
++ gctUINT indent, len;
++ gctBOOL command;
++
++ /* Append space for the prefix. */
++#if gcdHAVEPREFIX
++ indent = gcmkVSPRINTF(buffer, gcmSIZEOF(buffer), _prefixFormat, PrefixData);
++ buffer[indent] = '\0';
++#else
++ indent = 0;
++#endif
++
++ /* Append the indent string. */
++ indent += _AppendIndent(
++ Indent, buffer + indent, gcmSIZEOF(buffer) - indent
++ );
++
++ switch (Type)
++ {
++ case gceDUMP_BUFFER_CONTEXT:
++ case gceDUMP_BUFFER_USER:
++ case gceDUMP_BUFFER_KERNEL:
++ case gceDUMP_BUFFER_LINK:
++ case gceDUMP_BUFFER_WAITLINK:
++ /* Form and print the title string. */
++ gcmkSPRINTF2(
++ buffer + indent, gcmSIZEOF(buffer) - indent,
++ "%s%s\n", _titleString[Type],
++ ((DmaAddress >= Address) && (DmaAddress < Address + DataSize))
++ ? " (CURRENT)" : ""
++ );
++
++ gcmkOUTPUT_STRING(buffer);
++
++ /* Terminate the string. */
++ buffer[indent] = '\0';
++
++ /* This is a command buffer. */
++ command = gcvTRUE;
++ break;
++
++ case gceDUMP_BUFFER_FROM_USER:
++ /* This is not a command buffer. */
++ command = gcvFALSE;
++
++ /* No title. */
++ break;
++
++ default:
++ gcmDBGASSERT(gcvFALSE, "%s", "invalid buffer type");
++
++ /* This is not a command buffer. */
++ command = gcvFALSE;
++ }
++
++ /* Overwrite the prefix with spaces. */
++ for (i = 0; i < indent; i += 1)
++ {
++ buffer[i] = ' ';
++ }
++
++ /* Form and print the opening string. */
++ if (command)
++ {
++ gcmkSPRINTF2(
++ buffer + indent, gcmSIZEOF(buffer) - indent,
++ "@[kernel.command %08X %08X\n", Address, DataSize
++ );
++
++ gcmkOUTPUT_STRING(buffer);
++
++ /* Terminate the string. */
++ buffer[indent] = '\0';
++ }
++
++ /* Get initial address. */
++ address = Address;
++
++ /* Cast the data pointer. */
++ data = (gctUINT32_PTR) Data;
++
++ /* Compute the number of double words. */
++ count = DataSize / gcmSIZEOF(gctUINT32);
++
++ /* Print the buffer. */
++ for (i = 0, len = indent, column = 0; i < count; i += 1)
++ {
++ /* Append the address. */
++ if (column == 0)
++ {
++ len += gcmkSPRINTF(
++ buffer + len, gcmSIZEOF(buffer) - len, "0x%08X:", address
++ );
++ }
++
++ /* Append the data value. */
++ len += gcmkSPRINTF2(
++ buffer + len, gcmSIZEOF(buffer) - len, "%c%08X",
++ (address == DmaAddress)? '>' : ' ', data[i]
++ );
++
++ buffer[len] = '\0';
++
++ /* Update the address. */
++ address += gcmSIZEOF(gctUINT32);
++
++ /* Advance column count. */
++ column += 1;
++
++ /* End of line? */
++ if ((column % COLUMN_COUNT) == 0)
++ {
++ /* Append EOL. */
++ gcmkSTRCAT(buffer + len, gcmSIZEOF(buffer) - len, "\n");
++
++ /* Print the string. */
++ gcmkOUTPUT_STRING(buffer);
++
++ /* Reset. */
++ len = indent;
++ column = 0;
++ }
++ }
++
++ /* Print the last partial string. */
++ if (column != 0)
++ {
++ /* Append EOL. */
++ gcmkSTRCAT(buffer + len, gcmSIZEOF(buffer) - len, "\n");
++
++ /* Print the string. */
++ gcmkOUTPUT_STRING(buffer);
++ }
++
++ /* Form and print the opening string. */
++ if (command)
++ {
++ buffer[indent] = '\0';
++ gcmkSTRCAT(buffer, gcmSIZEOF(buffer), "] -- command\n");
++ gcmkOUTPUT_STRING(buffer);
++ }
++}
++
++#if gcdBUFFERED_OUTPUT
++static gctUINT
++_PrintNone(
++ IN gcsBUFFERED_OUTPUT_PTR OutputBuffer,
++ IN gcsBUFITEM_HEAD_PTR Item
++ )
++{
++ /* Return the size of the node. */
++ return gcmSIZEOF(gcsBUFITEM_HEAD);
++}
++
++static gctUINT
++_PrintPrefixWrapper(
++ IN gcsBUFFERED_OUTPUT_PTR OutputBuffer,
++ IN gcsBUFITEM_HEAD_PTR Item
++ )
++{
++#if gcdHAVEPREFIX
++ gcsBUFITEM_PREFIX_PTR item;
++ gctUINT vlen;
++
++ /* Get access to the data. */
++ item = (gcsBUFITEM_PREFIX_PTR) Item;
++
++ /* Print the message. */
++ _PrintPrefix(OutputBuffer, item->prefixData);
++
++ /* Compute the size of the variable portion of the structure. */
++ vlen = ((gctUINT8_PTR) item->prefixData) - ((gctUINT8_PTR) item);
++
++ /* Return the size of the node. */
++ return vlen + gcdPREFIX_SIZE;
++#else
++ return gcmSIZEOF(gcsBUFITEM_PREFIX);
++#endif
++}
++
++static gctUINT
++_PrintStringWrapper(
++ IN gcsBUFFERED_OUTPUT_PTR OutputBuffer,
++ IN gcsBUFITEM_HEAD_PTR Item
++ )
++{
++ gcsBUFITEM_STRING_PTR item;
++ gctUINT vlen;
++
++ /* Get access to the data. */
++ item = (gcsBUFITEM_STRING_PTR) Item;
++
++ /* Print the message. */
++ _PrintString(
++ OutputBuffer,
++ item->indent, item->message, item->messageDataSize, item->messageData
++ );
++
++ /* Compute the size of the variable portion of the structure. */
++ vlen = ((gctUINT8_PTR) item->messageData) - ((gctUINT8_PTR) item);
++
++ /* Return the size of the node. */
++ return vlen + item->messageDataSize;
++}
++
++static gctUINT
++_PrintCopyWrapper(
++ IN gcsBUFFERED_OUTPUT_PTR OutputBuffer,
++ IN gcsBUFITEM_HEAD_PTR Item
++ )
++{
++ gcsBUFITEM_COPY_PTR item;
++ gctCONST_STRING message;
++ gctUINT vlen;
++
++ /* Get access to the data. */
++ item = (gcsBUFITEM_COPY_PTR) Item;
++
++ /* Determine the string pointer. */
++ message = (gctCONST_STRING) (item + 1);
++
++ /* Print the message. */
++ _PrintString(
++ OutputBuffer,
++ item->indent, message, item->messageDataSize, item->messageData
++ );
++
++ /* Compute the size of the variable portion of the structure. */
++ vlen = ((gctUINT8_PTR) item->messageData) - ((gctUINT8_PTR) item);
++
++ /* Return the size of the node. */
++ return vlen + item->messageDataSize;
++}
++
++static gctUINT
++_PrintBufferWrapper(
++ IN gcsBUFFERED_OUTPUT_PTR OutputBuffer,
++ IN gcsBUFITEM_HEAD_PTR Item
++ )
++{
++#if gcdHAVEPREFIX
++ gctUINT32 dmaAddress;
++ gcsBUFITEM_BUFFER_PTR item;
++ gctPOINTER data;
++ gctUINT vlen;
++
++ /* Get access to the data. */
++ item = (gcsBUFITEM_BUFFER_PTR) Item;
++
++#if gcdDMA_BUFFER_COUNT && (gcdTHREAD_BUFFERS == 1)
++ dmaAddress = item->dmaAddress;
++#else
++ dmaAddress = 0xFFFFFFFF;
++#endif
++
++ if (dmaAddress != 0)
++ {
++ /* Compute the data address. */
++ data = ((gctUINT8_PTR) item->prefixData) + gcdPREFIX_SIZE;
++
++ /* Print buffer. */
++ _PrintBuffer(
++ OutputBuffer,
++ item->indent, item->prefixData,
++ data, item->address, item->dataSize,
++ item->bufferType, dmaAddress
++ );
++ }
++
++ /* Compute the size of the variable portion of the structure. */
++ vlen = ((gctUINT8_PTR) item->prefixData) - ((gctUINT8_PTR) item);
++
++ /* Return the size of the node. */
++ return vlen + gcdPREFIX_SIZE + item->dataSize;
++#else
++ gctUINT32 dmaAddress;
++ gcsBUFITEM_BUFFER_PTR item;
++
++ /* Get access to the data. */
++ item = (gcsBUFITEM_BUFFER_PTR) Item;
++
++#if gcdDMA_BUFFER_COUNT && (gcdTHREAD_BUFFERS == 1)
++ dmaAddress = item->dmaAddress;
++#else
++ dmaAddress = 0xFFFFFFFF;
++#endif
++
++ if (dmaAddress != 0)
++ {
++ /* Print buffer. */
++ _PrintBuffer(
++ OutputBuffer,
++ item->indent, gcvNULL,
++ item + 1, item->address, item->dataSize,
++ item->bufferType, dmaAddress
++ );
++ }
++
++ /* Return the size of the node. */
++ return gcmSIZEOF(gcsBUFITEM_BUFFER) + item->dataSize;
++#endif
++}
++
++static gcfPRINTSTRING _printArray[] =
++{
++ _PrintNone,
++ _PrintPrefixWrapper,
++ _PrintStringWrapper,
++ _PrintCopyWrapper,
++ _PrintBufferWrapper
++};
++#endif
++
++/******************************************************************************\
++******************************* Private Functions ******************************
++\******************************************************************************/
++
++#if gcdBUFFERED_OUTPUT
++
++#if gcdDMA_BUFFER_COUNT && (gcdTHREAD_BUFFERS == 1)
++static gcsBUFITEM_BUFFER_PTR
++_FindCurrentDMABuffer(
++ gctUINT32 DmaAddress
++ )
++{
++ gctINT i, skip;
++ gcsBUFITEM_HEAD_PTR item;
++ gcsBUFITEM_BUFFER_PTR dmaCurrent;
++
++ /* Reset the current buffer. */
++ dmaCurrent = gcvNULL;
++
++ /* Get the first stored item. */
++ item = (gcsBUFITEM_HEAD_PTR) &_outputBufferHead->buffer[_outputBufferHead->start];
++
++ /* Run through all items. */
++ for (i = 0; i < _outputBufferHead->count; i += 1)
++ {
++ /* Buffer item? */
++ if (item->type == gcvBUFITEM_BUFFER)
++ {
++ gcsBUFITEM_BUFFER_PTR buffer = (gcsBUFITEM_BUFFER_PTR) item;
++
++ if ((DmaAddress >= buffer->address) &&
++ (DmaAddress < buffer->address + buffer->dataSize))
++ {
++ dmaCurrent = buffer;
++ }
++ }
++
++ /* Get the item size and skip it. */
++ skip = (* _itemSize[item->type]) (item);
++ item = (gcsBUFITEM_HEAD_PTR) ((gctUINT8_PTR) item + skip);
++
++ /* End of the buffer? Wrap around. */
++ if (item->type == gceBUFITEM_NONE)
++ {
++ item = (gcsBUFITEM_HEAD_PTR) _outputBufferHead->buffer;
++ }
++ }
++
++ /* Return result. */
++ return dmaCurrent;
++}
++
++static void
++_EnableAllDMABuffers(
++ void
++ )
++{
++ gctINT i, skip;
++ gcsBUFITEM_HEAD_PTR item;
++
++ /* Get the first stored item. */
++ item = (gcsBUFITEM_HEAD_PTR) &_outputBufferHead->buffer[_outputBufferHead->start];
++
++ /* Run through all items. */
++ for (i = 0; i < _outputBufferHead->count; i += 1)
++ {
++ /* Buffer item? */
++ if (item->type == gcvBUFITEM_BUFFER)
++ {
++ gcsBUFITEM_BUFFER_PTR buffer = (gcsBUFITEM_BUFFER_PTR) item;
++
++ /* Enable the buffer. */
++ buffer->dmaAddress = ~0U;
++ }
++
++ /* Get the item size and skip it. */
++ skip = (* _itemSize[item->type]) (item);
++ item = (gcsBUFITEM_HEAD_PTR) ((gctUINT8_PTR) item + skip);
++
++ /* End of the buffer? Wrap around. */
++ if (item->type == gceBUFITEM_NONE)
++ {
++ item = (gcsBUFITEM_HEAD_PTR) _outputBufferHead->buffer;
++ }
++ }
++}
++
++static void
++_EnableDMABuffers(
++ gctUINT32 DmaAddress,
++ gcsBUFITEM_BUFFER_PTR CurrentDMABuffer
++ )
++{
++ gctINT i, skip, index;
++ gcsBUFITEM_HEAD_PTR item;
++ gcsBUFITEM_BUFFER_PTR buffers[gcdDMA_BUFFER_COUNT];
++
++ /* Reset buffer pointers. */
++ gckOS_ZeroMemory(buffers, gcmSIZEOF(buffers));
++
++ /* Set the current buffer index. */
++ index = -1;
++
++ /* Get the first stored item. */
++ item = (gcsBUFITEM_HEAD_PTR) &_outputBufferHead->buffer[_outputBufferHead->start];
++
++ /* Run through all items until the current DMA buffer is found. */
++ for (i = 0; i < _outputBufferHead->count; i += 1)
++ {
++ /* Buffer item? */
++ if (item->type == gcvBUFITEM_BUFFER)
++ {
++ /* Advance the index. */
++ index = (index + 1) % gcdDMA_BUFFER_COUNT;
++
++ /* Add to the buffer array. */
++ buffers[index] = (gcsBUFITEM_BUFFER_PTR) item;
++
++ /* Stop if this is the current DMA buffer. */
++ if ((gcsBUFITEM_BUFFER_PTR) item == CurrentDMABuffer)
++ {
++ break;
++ }
++ }
++
++ /* Get the item size and skip it. */
++ skip = (* _itemSize[item->type]) (item);
++ item = (gcsBUFITEM_HEAD_PTR) ((gctUINT8_PTR) item + skip);
++
++ /* End of the buffer? Wrap around. */
++ if (item->type == gceBUFITEM_NONE)
++ {
++ item = (gcsBUFITEM_HEAD_PTR) _outputBufferHead->buffer;
++ }
++ }
++
++ /* Enable the found buffers. */
++ gcmDBGASSERT(index != -1, "%d", index);
++
++ for (i = 0; i < gcdDMA_BUFFER_COUNT; i += 1)
++ {
++ if (buffers[index] == gcvNULL)
++ {
++ break;
++ }
++
++ buffers[index]->dmaAddress = DmaAddress;
++
++ index -= 1;
++
++ if (index == -1)
++ {
++ index = gcdDMA_BUFFER_COUNT - 1;
++ }
++ }
++}
++#endif
++
++static void
++_Flush(
++ gctUINT32 DmaAddress
++ )
++{
++ gctINT i, skip;
++ gcsBUFITEM_HEAD_PTR item;
++
++ gcsBUFFERED_OUTPUT_PTR outputBuffer = _outputBufferHead;
++
++#if gcdDMA_BUFFER_COUNT && (gcdTHREAD_BUFFERS == 1)
++ if ((outputBuffer != gcvNULL) && (outputBuffer->count != 0))
++ {
++ /* Find the current DMA buffer. */
++ gcsBUFITEM_BUFFER_PTR dmaCurrent = _FindCurrentDMABuffer(DmaAddress);
++
++ /* Was the current buffer found? */
++ if (dmaCurrent == gcvNULL)
++ {
++ /* No, print all buffers. */
++ _EnableAllDMABuffers();
++ }
++ else
++ {
++ /* Yes, enable only specified number of buffers. */
++ _EnableDMABuffers(DmaAddress, dmaCurrent);
++ }
++ }
++#endif
++
++ while (outputBuffer != gcvNULL)
++ {
++ if (outputBuffer->count != 0)
++ {
++ _DirectPrint("********************************************************************************\n");
++ _DirectPrint("FLUSHING DEBUG OUTPUT BUFFER (%d elements).\n", outputBuffer->count);
++ _DirectPrint("********************************************************************************\n");
++
++ item = (gcsBUFITEM_HEAD_PTR) &outputBuffer->buffer[outputBuffer->start];
++
++ for (i = 0; i < outputBuffer->count; i += 1)
++ {
++ skip = (* _printArray[item->type]) (outputBuffer, item);
++
++ item = (gcsBUFITEM_HEAD_PTR) ((gctUINT8_PTR) item + skip);
++
++ if (item->type == gceBUFITEM_NONE)
++ {
++ item = (gcsBUFITEM_HEAD_PTR) outputBuffer->buffer;
++ }
++ }
++
++ outputBuffer->start = 0;
++ outputBuffer->index = 0;
++ outputBuffer->count = 0;
++ }
++
++ outputBuffer = outputBuffer->next;
++ }
++}
++
++static gcsBUFITEM_HEAD_PTR
++_AllocateItem(
++ IN gcsBUFFERED_OUTPUT_PTR OutputBuffer,
++ IN gctINT Size
++ )
++{
++ gctINT skip;
++ gcsBUFITEM_HEAD_PTR item, next;
++
++#if gcdENABLE_OVERFLOW
++ if (
++ (OutputBuffer->index + Size >= gcdBUFFERED_SIZE - gcmSIZEOF(gcsBUFITEM_HEAD))
++ ||
++ (
++ (OutputBuffer->index < OutputBuffer->start) &&
++ (OutputBuffer->index + Size >= OutputBuffer->start)
++ )
++ )
++ {
++ if (OutputBuffer->index + Size >= gcdBUFFERED_SIZE - gcmSIZEOF(gcsBUFITEM_HEAD))
++ {
++ if (OutputBuffer->index < OutputBuffer->start)
++ {
++ item = (gcsBUFITEM_HEAD_PTR) &OutputBuffer->buffer[OutputBuffer->start];
++
++ while (item->type != gceBUFITEM_NONE)
++ {
++ skip = (* _itemSize[item->type]) (item);
++
++ OutputBuffer->start += skip;
++ OutputBuffer->count -= 1;
++
++ item->type = gceBUFITEM_NONE;
++ item = (gcsBUFITEM_HEAD_PTR) ((gctUINT8_PTR) item + skip);
++ }
++
++ OutputBuffer->start = 0;
++ }
++
++ OutputBuffer->index = 0;
++ }
++
++ item = (gcsBUFITEM_HEAD_PTR) &OutputBuffer->buffer[OutputBuffer->start];
++
++ while (OutputBuffer->start - OutputBuffer->index <= Size)
++ {
++ skip = (* _itemSize[item->type]) (item);
++
++ OutputBuffer->start += skip;
++ OutputBuffer->count -= 1;
++
++ item->type = gceBUFITEM_NONE;
++ item = (gcsBUFITEM_HEAD_PTR) ((gctUINT8_PTR) item + skip);
++
++ if (item->type == gceBUFITEM_NONE)
++ {
++ OutputBuffer->start = 0;
++ break;
++ }
++ }
++ }
++#else
++ if (OutputBuffer->index + Size > gcdBUFFERED_SIZE - gcmSIZEOF(gcsBUFITEM_HEAD))
++ {
++ _DirectPrint("\nMessage buffer full; forcing message flush.\n\n");
++ _Flush(~0U);
++ }
++#endif
++
++ item = (gcsBUFITEM_HEAD_PTR) &OutputBuffer->buffer[OutputBuffer->index];
++
++ OutputBuffer->index += Size;
++ OutputBuffer->count += 1;
++
++ next = (gcsBUFITEM_HEAD_PTR) ((gctUINT8_PTR) item + Size);
++ next->type = gceBUFITEM_NONE;
++
++ return item;
++}
++
++#if gcdALIGNBYSIZE
++static void
++_FreeExtraSpace(
++ IN gcsBUFFERED_OUTPUT_PTR OutputBuffer,
++ IN gctPOINTER Item,
++ IN gctINT ItemSize,
++ IN gctINT FreeSize
++ )
++{
++ gcsBUFITEM_HEAD_PTR next;
++
++ OutputBuffer->index -= FreeSize;
++
++ next = (gcsBUFITEM_HEAD_PTR) ((gctUINT8_PTR) Item + ItemSize);
++ next->type = gceBUFITEM_NONE;
++}
++#endif
++
++#if gcdHAVEPREFIX
++static void
++_AppendPrefix(
++ IN gcsBUFFERED_OUTPUT_PTR OutputBuffer,
++ IN gctPOINTER Data
++ )
++{
++ gctUINT8_PTR prefixData;
++ gcsBUFITEM_PREFIX_PTR item;
++ gctINT allocSize;
++
++#if gcdALIGNBYSIZE
++ gctUINT alignment;
++ gctINT size, freeSize;
++#endif
++
++ gcmDBGASSERT(Data != gcvNULL, "%p", Data);
++
++ /* Determine the maximum item size. */
++ allocSize
++ = gcmSIZEOF(gcsBUFITEM_PREFIX)
++ + gcdPREFIX_SIZE
++ + gcdPREFIX_ALIGNMENT;
++
++ /* Allocate prefix item. */
++ item = (gcsBUFITEM_PREFIX_PTR) _AllocateItem(OutputBuffer, allocSize);
++
++ /* Compute the initial prefix data pointer. */
++ prefixData = (gctUINT8_PTR) (item + 1);
++
++ /* Align the data pointer as necessary. */
++#if gcdALIGNBYSIZE
++ alignment = gcmPTRALIGNMENT(prefixData, gcdPREFIX_ALIGNMENT);
++ prefixData += alignment;
++#endif
++
++ /* Set item data. */
++ item->type = gcvBUFITEM_PREFIX;
++ item->prefixData = prefixData;
++
++ /* Copy argument value. */
++ memcpy(prefixData, Data, gcdPREFIX_SIZE);
++
++#if gcdALIGNBYSIZE
++ /* Compute the actual node size. */
++ size = gcmSIZEOF(gcsBUFITEM_PREFIX) + gcdPREFIX_SIZE + alignment;
++
++ /* Free extra memory if any. */
++ freeSize = allocSize - size;
++ if (freeSize != 0)
++ {
++ _FreeExtraSpace(OutputBuffer, item, size, freeSize);
++ }
++#endif
++}
++#endif
++
++static void
++_AppendString(
++ IN gcsBUFFERED_OUTPUT_PTR OutputBuffer,
++ IN gctINT Indent,
++ IN gctCONST_STRING Message,
++ IN gctUINT ArgumentSize,
++ IN gctPOINTER Data
++ )
++{
++ gctUINT8_PTR messageData;
++ gcsBUFITEM_STRING_PTR item;
++ gctINT allocSize;
++
++#if gcdALIGNBYSIZE
++ gctUINT alignment;
++ gctINT size, freeSize;
++#endif
++
++ /* Determine the maximum item size. */
++ allocSize
++ = gcmSIZEOF(gcsBUFITEM_STRING)
++ + ArgumentSize
++ + gcdVARARG_ALIGNMENT;
++
++ /* Allocate prefix item. */
++ item = (gcsBUFITEM_STRING_PTR) _AllocateItem(OutputBuffer, allocSize);
++
++ /* Compute the initial message data pointer. */
++ messageData = (gctUINT8_PTR) (item + 1);
++
++ /* Align the data pointer as necessary. */
++#if gcdALIGNBYSIZE
++ alignment = gcmPTRALIGNMENT(messageData, gcdVARARG_ALIGNMENT);
++ messageData += alignment;
++#endif
++
++ /* Set item data. */
++ item->type = gcvBUFITEM_STRING;
++ item->indent = Indent;
++ item->message = Message;
++ item->messageData = messageData;
++ item->messageDataSize = ArgumentSize;
++
++ /* Copy argument value. */
++ if (ArgumentSize != 0)
++ {
++ memcpy(messageData, Data, ArgumentSize);
++ }
++
++#if gcdALIGNBYSIZE
++ /* Compute the actual node size. */
++ size = gcmSIZEOF(gcsBUFITEM_STRING) + ArgumentSize + alignment;
++
++ /* Free extra memory if any. */
++ freeSize = allocSize - size;
++ if (freeSize != 0)
++ {
++ _FreeExtraSpace(OutputBuffer, item, size, freeSize);
++ }
++#endif
++}
++
++static void
++_AppendCopy(
++ IN gcsBUFFERED_OUTPUT_PTR OutputBuffer,
++ IN gctINT Indent,
++ IN gctCONST_STRING Message,
++ IN gctUINT ArgumentSize,
++ IN gctPOINTER Data
++ )
++{
++ gctUINT8_PTR messageData;
++ gcsBUFITEM_COPY_PTR item;
++ gctINT allocSize;
++ gctINT messageLength;
++ gctCONST_STRING message;
++
++#if gcdALIGNBYSIZE
++ gctUINT alignment;
++ gctINT size, freeSize;
++#endif
++
++ /* Get the length of the string. */
++ messageLength = strlen(Message) + 1;
++
++ /* Determine the maximum item size. */
++ allocSize
++ = gcmSIZEOF(gcsBUFITEM_COPY)
++ + messageLength
++ + ArgumentSize
++ + gcdVARARG_ALIGNMENT;
++
++ /* Allocate prefix item. */
++ item = (gcsBUFITEM_COPY_PTR) _AllocateItem(OutputBuffer, allocSize);
++
++ /* Determine the message placement. */
++ message = (gctCONST_STRING) (item + 1);
++
++ /* Compute the initial message data pointer. */
++ messageData = (gctUINT8_PTR) message + messageLength;
++
++ /* Align the data pointer as necessary. */
++#if gcdALIGNBYSIZE
++ if (ArgumentSize == 0)
++ {
++ alignment = 0;
++ }
++ else
++ {
++ alignment = gcmPTRALIGNMENT(messageData, gcdVARARG_ALIGNMENT);
++ messageData += alignment;
++ }
++#endif
++
++ /* Set item data. */
++ item->type = gcvBUFITEM_COPY;
++ item->indent = Indent;
++ item->messageData = messageData;
++ item->messageDataSize = ArgumentSize;
++
++ /* Copy the message. */
++ memcpy((gctPOINTER) message, Message, messageLength);
++
++ /* Copy argument value. */
++ if (ArgumentSize != 0)
++ {
++ memcpy(messageData, Data, ArgumentSize);
++ }
++
++#if gcdALIGNBYSIZE
++ /* Compute the actual node size. */
++ size
++ = gcmSIZEOF(gcsBUFITEM_COPY)
++ + messageLength
++ + ArgumentSize
++ + alignment;
++
++ /* Free extra memory if any. */
++ freeSize = allocSize - size;
++ if (freeSize != 0)
++ {
++ _FreeExtraSpace(OutputBuffer, item, size, freeSize);
++ }
++#endif
++}
++
++static void
++_AppendBuffer(
++ IN gcsBUFFERED_OUTPUT_PTR OutputBuffer,
++ IN gctINT Indent,
++ IN gctPOINTER PrefixData,
++ IN gctPOINTER Data,
++ IN gctUINT Address,
++ IN gctUINT DataSize,
++ IN gceDUMP_BUFFER Type,
++ IN gctUINT32 DmaAddress
++ )
++{
++#if gcdHAVEPREFIX
++ gctUINT8_PTR prefixData;
++ gcsBUFITEM_BUFFER_PTR item;
++ gctINT allocSize;
++ gctPOINTER data;
++
++#if gcdALIGNBYSIZE
++ gctUINT alignment;
++ gctINT size, freeSize;
++#endif
++
++ gcmDBGASSERT(DataSize != 0, "%d", DataSize);
++ gcmDBGASSERT(Data != gcvNULL, "%p", Data);
++
++ /* Determine the maximum item size. */
++ allocSize
++ = gcmSIZEOF(gcsBUFITEM_BUFFER)
++ + gcdPREFIX_SIZE
++ + gcdPREFIX_ALIGNMENT
++ + DataSize;
++
++ /* Allocate prefix item. */
++ item = (gcsBUFITEM_BUFFER_PTR) _AllocateItem(OutputBuffer, allocSize);
++
++ /* Compute the initial prefix data pointer. */
++ prefixData = (gctUINT8_PTR) (item + 1);
++
++#if gcdALIGNBYSIZE
++ /* Align the data pointer as necessary. */
++ alignment = gcmPTRALIGNMENT(prefixData, gcdPREFIX_ALIGNMENT);
++ prefixData += alignment;
++#endif
++
++ /* Set item data. */
++ item->type = gcvBUFITEM_BUFFER;
++ item->indent = Indent;
++ item->bufferType = Type;
++ item->dataSize = DataSize;
++ item->address = Address;
++ item->prefixData = prefixData;
++
++#if gcdDMA_BUFFER_COUNT && (gcdTHREAD_BUFFERS == 1)
++ item->dmaAddress = DmaAddress;
++#endif
++
++ /* Copy prefix data. */
++ memcpy(prefixData, PrefixData, gcdPREFIX_SIZE);
++
++ /* Compute the data pointer. */
++ data = prefixData + gcdPREFIX_SIZE;
++
++ /* Copy argument value. */
++ memcpy(data, Data, DataSize);
++
++#if gcdALIGNBYSIZE
++ /* Compute the actual node size. */
++ size
++ = gcmSIZEOF(gcsBUFITEM_BUFFER)
++ + gcdPREFIX_SIZE
++ + alignment
++ + DataSize;
++
++ /* Free extra memory if any. */
++ freeSize = allocSize - size;
++ if (freeSize != 0)
++ {
++ _FreeExtraSpace(OutputBuffer, item, size, freeSize);
++ }
++#endif
++#else
++ gcsBUFITEM_BUFFER_PTR item;
++ gctINT size;
++
++ gcmDBGASSERT(DataSize != 0, "%d", DataSize);
++ gcmDBGASSERT(Data != gcvNULL, "%p", Data);
++
++ /* Determine the maximum item size. */
++ size = gcmSIZEOF(gcsBUFITEM_BUFFER) + DataSize;
++
++ /* Allocate prefix item. */
++ item = (gcsBUFITEM_BUFFER_PTR) _AllocateItem(OutputBuffer, size);
++
++ /* Set item data. */
++ item->type = gcvBUFITEM_BUFFER;
++ item->indent = Indent;
++ item->dataSize = DataSize;
++ item->address = Address;
++
++ /* Copy argument value. */
++ memcpy(item + 1, Data, DataSize);
++#endif
++}
++#endif
++
++static gcmINLINE void
++_InitBuffers(
++ void
++ )
++{
++ int i;
++
++ if (_outputBufferHead == gcvNULL)
++ {
++ for (i = 0; i < gcdTHREAD_BUFFERS; i += 1)
++ {
++ if (_outputBufferTail == gcvNULL)
++ {
++ _outputBufferHead = &_outputBuffer[i];
++ }
++ else
++ {
++ _outputBufferTail->next = &_outputBuffer[i];
++ }
++
++#if gcdTHREAD_BUFFERS > 1
++ _outputBuffer[i].threadID = ~0U;
++#endif
++
++ _outputBuffer[i].prev = _outputBufferTail;
++ _outputBuffer[i].next = gcvNULL;
++
++ _outputBufferTail = &_outputBuffer[i];
++ }
++ }
++}
++
++static gcmINLINE gcsBUFFERED_OUTPUT_PTR
++_GetOutputBuffer(
++ void
++ )
++{
++ gcsBUFFERED_OUTPUT_PTR outputBuffer;
++
++#if gcdTHREAD_BUFFERS > 1
++ /* Get the current thread ID. */
++ gctUINT32 ThreadID = gcmkGETTHREADID();
++
++ /* Locate the output buffer for the thread. */
++ outputBuffer = _outputBufferHead;
++
++ while (outputBuffer != gcvNULL)
++ {
++ if (outputBuffer->threadID == ThreadID)
++ {
++ break;
++ }
++
++ outputBuffer = outputBuffer->next;
++ }
++
++ /* No matching buffer found? */
++ if (outputBuffer == gcvNULL)
++ {
++ /* Get the tail for the buffer. */
++ outputBuffer = _outputBufferTail;
++
++ /* Move it to the head. */
++ _outputBufferTail = _outputBufferTail->prev;
++ _outputBufferTail->next = gcvNULL;
++
++ outputBuffer->prev = gcvNULL;
++ outputBuffer->next = _outputBufferHead;
++
++ _outputBufferHead->prev = outputBuffer;
++ _outputBufferHead = outputBuffer;
++
++ /* Reset the buffer. */
++ outputBuffer->threadID = ThreadID;
++#if gcdBUFFERED_OUTPUT
++ outputBuffer->start = 0;
++ outputBuffer->index = 0;
++ outputBuffer->count = 0;
++#endif
++#if gcdSHOW_LINE_NUMBER
++ outputBuffer->lineNumber = 0;
++#endif
++ }
++#else
++ outputBuffer = _outputBufferHead;
++#endif
++
++ return outputBuffer;
++}
++
++static gcmINLINE int _GetArgumentSize(
++ IN gctCONST_STRING Message
++ )
++{
++ int i, count;
++
++ gcmDBGASSERT(Message != gcvNULL, "%p", Message);
++
++ for (i = 0, count = 0; Message[i]; i += 1)
++ {
++ if (Message[i] == '%')
++ {
++ count += 1;
++ }
++ }
++
++ return count * gcmSIZEOF(gctUINT32);
++}
++
++#if gcdHAVEPREFIX
++static void
++_InitPrefixData(
++ IN gcsBUFFERED_OUTPUT_PTR OutputBuffer,
++ IN gctPOINTER Data
++ )
++{
++ gctUINT8_PTR data = (gctUINT8_PTR) Data;
++
++#if gcdSHOW_TIME
++ {
++ gctUINT64 time;
++ gckOS_GetProfileTick(&time);
++ gcmkALIGNPTR(gctUINT8_PTR, data, gcmSIZEOF(gctUINT64));
++ * ((gctUINT64_PTR) data) = time;
++ data += gcmSIZEOF(gctUINT64);
++ }
++#endif
++
++#if gcdSHOW_LINE_NUMBER
++ {
++ gcmkALIGNPTR(gctUINT8_PTR, data, gcmSIZEOF(gctUINT64));
++ * ((gctUINT64_PTR) data) = OutputBuffer->lineNumber;
++ data += gcmSIZEOF(gctUINT64);
++ }
++#endif
++
++#if gcdSHOW_PROCESS_ID
++ {
++ gcmkALIGNPTR(gctUINT8_PTR, data, gcmSIZEOF(gctUINT32));
++ * ((gctUINT32_PTR) data) = gcmkGETPROCESSID();
++ data += gcmSIZEOF(gctUINT32);
++ }
++#endif
++
++#if gcdSHOW_THREAD_ID
++ {
++ gcmkALIGNPTR(gctUINT8_PTR, data, gcmSIZEOF(gctUINT32));
++ * ((gctUINT32_PTR) data) = gcmkGETTHREADID();
++ }
++#endif
++}
++#endif
++
++static void
++_Print(
++ IN gctUINT ArgumentSize,
++ IN gctBOOL CopyMessage,
++ IN gctCONST_STRING Message,
++ IN gctARGUMENTS * Arguments
++ )
++{
++ gcsBUFFERED_OUTPUT_PTR outputBuffer;
++ gcmkDECLARE_LOCK(lockHandle);
++
++ gcmkLOCKSECTION(lockHandle);
++
++ /* Initialize output buffer list. */
++ _InitBuffers();
++
++ /* Locate the proper output buffer. */
++ outputBuffer = _GetOutputBuffer();
++
++ /* Update the line number. */
++#if gcdSHOW_LINE_NUMBER
++ outputBuffer->lineNumber += 1;
++#endif
++
++ /* Print prefix. */
++#if gcdHAVEPREFIX
++ {
++ gctUINT8_PTR alignedPrefixData;
++ gctUINT8 prefixData[gcdPREFIX_SIZE + gcdPREFIX_ALIGNMENT];
++
++ /* Compute aligned pointer. */
++ alignedPrefixData = prefixData;
++ gcmkALIGNPTR(gctUINT8_PTR, alignedPrefixData, gcdPREFIX_ALIGNMENT);
++
++ /* Initialize the prefix data. */
++ _InitPrefixData(outputBuffer, alignedPrefixData);
++
++ /* Print the prefix. */
++ gcdOUTPUTPREFIX(outputBuffer, alignedPrefixData);
++ }
++#endif
++
++ /* Form the indent string. */
++ if (strncmp(Message, "--", 2) == 0)
++ {
++ outputBuffer->indent -= 2;
++ }
++
++ /* Print the message. */
++ if (CopyMessage)
++ {
++ gcdOUTPUTCOPY(
++ outputBuffer, outputBuffer->indent,
++ Message, ArgumentSize, (gctPOINTER) Arguments
++ );
++ }
++ else
++ {
++ gcdOUTPUTSTRING(
++ outputBuffer, outputBuffer->indent,
++ Message, ArgumentSize, ((gctPOINTER) Arguments)
++ );
++ }
++
++ /* Check increasing indent. */
++ if (strncmp(Message, "++", 2) == 0)
++ {
++ outputBuffer->indent += 2;
++ }
++
++ gcmkUNLOCKSECTION(lockHandle);
++}
++
++
++/******************************************************************************\
++********************************* Debug Macros *********************************
++\******************************************************************************/
++
++#ifdef __QNXNTO__
++
++extern volatile unsigned g_nQnxInIsrs;
++
++#define gcmDEBUGPRINT(ArgumentSize, CopyMessage, Message) \
++{ \
++ if (atomic_add_value(&g_nQnxInIsrs, 1) == 0) \
++ { \
++ gctARGUMENTS __arguments__; \
++ gcmkARGUMENTS_START(__arguments__, Message); \
++ _Print(ArgumentSize, CopyMessage, Message, &__arguments__); \
++ gcmkARGUMENTS_END(__arguments__); \
++ } \
++ atomic_sub(&g_nQnxInIsrs, 1); \
++}
++
++#else
++
++#define gcmDEBUGPRINT(ArgumentSize, CopyMessage, Message) \
++{ \
++ gctARGUMENTS __arguments__; \
++ gcmkARGUMENTS_START(__arguments__, Message); \
++ _Print(ArgumentSize, CopyMessage, Message, &__arguments__); \
++ gcmkARGUMENTS_END(__arguments__); \
++}
++
++#endif
++
++/******************************************************************************\
++********************************** Debug Code **********************************
++\******************************************************************************/
++
++/*******************************************************************************
++**
++** gckOS_Print
++**
++** Send a message to the debugger.
++**
++** INPUT:
++**
++** gctCONST_STRING Message
++** Pointer to message.
++**
++** ...
++** Optional arguments.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++
++void
++gckOS_Print(
++ IN gctCONST_STRING Message,
++ ...
++ )
++{
++ gcmDEBUGPRINT(_GetArgumentSize(Message), gcvFALSE, Message);
++}
++
++/*******************************************************************************
++**
++** gckOS_PrintN
++**
++** Send a message to the debugger.
++**
++** INPUT:
++**
++** gctUINT ArgumentSize
++** The size of the optional arguments in bytes.
++**
++** gctCONST_STRING Message
++** Pointer to message.
++**
++** ...
++** Optional arguments.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++
++void
++gckOS_PrintN(
++ IN gctUINT ArgumentSize,
++ IN gctCONST_STRING Message,
++ ...
++ )
++{
++ gcmDEBUGPRINT(ArgumentSize, gcvFALSE, Message);
++}
++
++/*******************************************************************************
++**
++** gckOS_CopyPrint
++**
++** Send a message to the debugger. If in buffered output mode, the entire
++** message will be copied into the buffer instead of using the pointer to
++** the string.
++**
++** INPUT:
++**
++** gctCONST_STRING Message
++** Pointer to message.
++**
++** ...
++** Optional arguments.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++
++void
++gckOS_CopyPrint(
++ IN gctCONST_STRING Message,
++ ...
++ )
++{
++ gcmDEBUGPRINT(_GetArgumentSize(Message), gcvTRUE, Message);
++}
++
++/*******************************************************************************
++**
++** gckOS_DumpBuffer
++**
++** Print the contents of the specified buffer.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to gckOS object.
++**
++** gctPOINTER Buffer
++** Pointer to the buffer to print.
++**
++** gctUINT Size
++** Size of the buffer.
++**
++** gceDUMP_BUFFER Type
++** Buffer type.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++
++void
++gckOS_DumpBuffer(
++ IN gckOS Os,
++ IN gctPOINTER Buffer,
++ IN gctUINT Size,
++ IN gceDUMP_BUFFER Type,
++ IN gctBOOL CopyMessage
++ )
++{
++ gctUINT32 address = 0;
++ gcsBUFFERED_OUTPUT_PTR outputBuffer = gcvNULL;
++ static gctBOOL userLocked;
++ gctCHAR *buffer = (gctCHAR*)Buffer;
++
++ gcmkDECLARE_LOCK(lockHandle);
++
++ /* Request lock when not coming from user,
++ or coming from user and not yet locked
++ and message is starting with @[. */
++ if (Type == gceDUMP_BUFFER_FROM_USER)
++ {
++ if ((Size > 2)
++ && (buffer[0] == '@')
++ && (buffer[1] == '['))
++ {
++ /* Beginning of a user dump. */
++ gcmkLOCKSECTION(lockHandle);
++ userLocked = gcvTRUE;
++ }
++ /* Else, let it pass through. */
++ }
++ else
++ {
++ gcmkLOCKSECTION(lockHandle);
++ userLocked = gcvFALSE;
++ }
++
++ if (Buffer != gcvNULL)
++ {
++ /* Initialize output buffer list. */
++ _InitBuffers();
++
++ /* Locate the proper output buffer. */
++ outputBuffer = _GetOutputBuffer();
++
++ /* Update the line number. */
++#if gcdSHOW_LINE_NUMBER
++ outputBuffer->lineNumber += 1;
++#endif
++
++ /* Get the physical address of the buffer. */
++ if (Type != gceDUMP_BUFFER_FROM_USER)
++ {
++ gcmkVERIFY_OK(gckOS_GetPhysicalAddress(Os, Buffer, &address));
++ }
++ else
++ {
++ address = 0;
++ }
++
++#if gcdHAVEPREFIX
++ {
++ gctUINT8_PTR alignedPrefixData;
++ gctUINT8 prefixData[gcdPREFIX_SIZE + gcdPREFIX_ALIGNMENT];
++
++ /* Compute aligned pointer. */
++ alignedPrefixData = prefixData;
++ gcmkALIGNPTR(gctUINT8_PTR, alignedPrefixData, gcdPREFIX_ALIGNMENT);
++
++ /* Initialize the prefix data. */
++ _InitPrefixData(outputBuffer, alignedPrefixData);
++
++ /* Print/schedule the buffer. */
++ gcdOUTPUTBUFFER(
++ outputBuffer, outputBuffer->indent,
++ alignedPrefixData, Buffer, address, Size, Type, 0
++ );
++ }
++#else
++ /* Print/schedule the buffer. */
++ if (Type == gceDUMP_BUFFER_FROM_USER)
++ {
++ gcdOUTPUTSTRING(
++ outputBuffer, outputBuffer->indent,
++ Buffer, 0, gcvNULL
++ );
++ }
++ else
++ {
++ gcdOUTPUTBUFFER(
++ outputBuffer, outputBuffer->indent,
++ gcvNULL, Buffer, address, Size, Type, 0
++ );
++ }
++#endif
++ }
++
++ /* Unlock when not coming from user,
++ or coming from user and not yet locked. */
++ if (userLocked)
++ {
++ if ((Size > 4)
++ && (buffer[0] == ']')
++ && (buffer[1] == ' ')
++ && (buffer[2] == '-')
++ && (buffer[3] == '-'))
++ {
++ /* End of a user dump. */
++ gcmkUNLOCKSECTION(lockHandle);
++ userLocked = gcvFALSE;
++ }
++ /* Else, let it pass through, don't unlock. */
++ }
++ else
++ {
++ gcmkUNLOCKSECTION(lockHandle);
++ }
++}
++
++/*******************************************************************************
++**
++** gckOS_DebugTrace
++**
++** Send a leveled message to the debugger.
++**
++** INPUT:
++**
++** gctUINT32 Level
++** Debug level of message.
++**
++** gctCONST_STRING Message
++** Pointer to message.
++**
++** ...
++** Optional arguments.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++
++void
++gckOS_DebugTrace(
++ IN gctUINT32 Level,
++ IN gctCONST_STRING Message,
++ ...
++ )
++{
++ if (Level > _debugLevel)
++ {
++ return;
++ }
++
++ gcmDEBUGPRINT(_GetArgumentSize(Message), gcvFALSE, Message);
++}
++
++/*******************************************************************************
++**
++** gckOS_DebugTraceN
++**
++** Send a leveled message to the debugger.
++**
++** INPUT:
++**
++** gctUINT32 Level
++** Debug level of message.
++**
++** gctUINT ArgumentSize
++** The size of the optional arguments in bytes.
++**
++** gctCONST_STRING Message
++** Pointer to message.
++**
++** ...
++** Optional arguments.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++
++void
++gckOS_DebugTraceN(
++ IN gctUINT32 Level,
++ IN gctUINT ArgumentSize,
++ IN gctCONST_STRING Message,
++ ...
++ )
++{
++ if (Level > _debugLevel)
++ {
++ return;
++ }
++
++ gcmDEBUGPRINT(ArgumentSize, gcvFALSE, Message);
++}
++
++/*******************************************************************************
++**
++** gckOS_DebugTraceZone
++**
++** Send a leveled and zoned message to the debugger.
++**
++** INPUT:
++**
++** gctUINT32 Level
++** Debug level for message.
++**
++** gctUINT32 Zone
++** Debug zone for message.
++**
++** gctCONST_STRING Message
++** Pointer to message.
++**
++** ...
++** Optional arguments.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++
++void
++gckOS_DebugTraceZone(
++ IN gctUINT32 Level,
++ IN gctUINT32 Zone,
++ IN gctCONST_STRING Message,
++ ...
++ )
++{
++ if ((Level > _debugLevel) || !(Zone & _debugZones))
++ {
++ return;
++ }
++
++ gcmDEBUGPRINT(_GetArgumentSize(Message), gcvFALSE, Message);
++}
++
++/*******************************************************************************
++**
++** gckOS_DebugTraceZoneN
++**
++** Send a leveled and zoned message to the debugger.
++**
++** INPUT:
++**
++** gctUINT32 Level
++** Debug level for message.
++**
++** gctUINT32 Zone
++** Debug zone for message.
++**
++** gctUINT ArgumentSize
++** The size of the optional arguments in bytes.
++**
++** gctCONST_STRING Message
++** Pointer to message.
++**
++** ...
++** Optional arguments.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++
++void
++gckOS_DebugTraceZoneN(
++ IN gctUINT32 Level,
++ IN gctUINT32 Zone,
++ IN gctUINT ArgumentSize,
++ IN gctCONST_STRING Message,
++ ...
++ )
++{
++ if ((Level > _debugLevel) || !(Zone & _debugZones))
++ {
++ return;
++ }
++
++ gcmDEBUGPRINT(ArgumentSize, gcvFALSE, Message);
++}
++
++/*******************************************************************************
++**
++** gckOS_DebugBreak
++**
++** Break into the debugger.
++**
++** INPUT:
++**
++** Nothing.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++void
++gckOS_DebugBreak(
++ void
++ )
++{
++ gckOS_DebugTrace(gcvLEVEL_ERROR, "%s(%d)", __FUNCTION__, __LINE__);
++}
++
++/*******************************************************************************
++**
++** gckOS_DebugFatal
++**
++** Send a message to the debugger and break into the debugger.
++**
++** INPUT:
++**
++** gctCONST_STRING Message
++** Pointer to message.
++**
++** ...
++** Optional arguments.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++void
++gckOS_DebugFatal(
++ IN gctCONST_STRING Message,
++ ...
++ )
++{
++ gcmkPRINT_VERSION();
++ gcmDEBUGPRINT(_GetArgumentSize(Message), gcvFALSE, Message);
++
++ /* Break into the debugger. */
++ gckOS_DebugBreak();
++}
++
++/*******************************************************************************
++**
++** gckOS_SetDebugLevel
++**
++** Set the debug level.
++**
++** INPUT:
++**
++** gctUINT32 Level
++** New debug level.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++
++void
++gckOS_SetDebugLevel(
++ IN gctUINT32 Level
++ )
++{
++ _debugLevel = Level;
++}
++
++/*******************************************************************************
++**
++** gckOS_SetDebugZone
++**
++** Set the debug zone.
++**
++** INPUT:
++**
++** gctUINT32 Zone
++** New debug zone.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++void
++gckOS_SetDebugZone(
++ IN gctUINT32 Zone
++ )
++{
++ _debugZones = Zone;
++}
++
++/*******************************************************************************
++**
++** gckOS_SetDebugLevelZone
++**
++** Set the debug level and zone.
++**
++** INPUT:
++**
++** gctUINT32 Level
++** New debug level.
++**
++** gctUINT32 Zone
++** New debug zone.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++
++void
++gckOS_SetDebugLevelZone(
++ IN gctUINT32 Level,
++ IN gctUINT32 Zone
++ )
++{
++ _debugLevel = Level;
++ _debugZones = Zone;
++}
++
++/*******************************************************************************
++**
++** gckOS_SetDebugZones
++**
++** Enable or disable debug zones.
++**
++** INPUT:
++**
++** gctUINT32 Zones
++** Debug zones to enable or disable.
++**
++** gctBOOL Enable
++** Set to gcvTRUE to enable the zones (or the Zones with the current
++** zones) or gcvFALSE to disable the specified Zones.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++
++void
++gckOS_SetDebugZones(
++ IN gctUINT32 Zones,
++ IN gctBOOL Enable
++ )
++{
++ if (Enable)
++ {
++ /* Enable the zones. */
++ _debugZones |= Zones;
++ }
++ else
++ {
++ /* Disable the zones. */
++ _debugZones &= ~Zones;
++ }
++}
++
++/*******************************************************************************
++**
++** gckOS_Verify
++**
++** Called to verify the result of a function call.
++**
++** INPUT:
++**
++** gceSTATUS Status
++** Function call result.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++
++void
++gckOS_Verify(
++ IN gceSTATUS status
++ )
++{
++ _lastError = status;
++}
++
++/*******************************************************************************
++**
++** gckOS_DebugFlush
++**
++** Force messages to be flushed out.
++**
++** INPUT:
++**
++** gctCONST_STRING CallerName
++** Name of the caller function.
++**
++** gctUINT LineNumber
++** Line number of the caller.
++**
++** gctUINT32 DmaAddress
++** The current DMA address or ~0U to ignore.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++
++void
++gckOS_DebugFlush(
++ gctCONST_STRING CallerName,
++ gctUINT LineNumber,
++ gctUINT32 DmaAddress
++ )
++{
++#if gcdBUFFERED_OUTPUT
++ _DirectPrint("\nFlush requested by %s(%d).\n\n", CallerName, LineNumber);
++ _Flush(DmaAddress);
++#endif
++}
++gctCONST_STRING
++gckOS_DebugStatus2Name(
++ gceSTATUS status
++ )
++{
++ switch (status)
++ {
++ case gcvSTATUS_OK:
++ return "gcvSTATUS_OK";
++ case gcvSTATUS_TRUE:
++ return "gcvSTATUS_TRUE";
++ case gcvSTATUS_NO_MORE_DATA:
++ return "gcvSTATUS_NO_MORE_DATA";
++ case gcvSTATUS_CACHED:
++ return "gcvSTATUS_CACHED";
++ case gcvSTATUS_MIPMAP_TOO_LARGE:
++ return "gcvSTATUS_MIPMAP_TOO_LARGE";
++ case gcvSTATUS_NAME_NOT_FOUND:
++ return "gcvSTATUS_NAME_NOT_FOUND";
++ case gcvSTATUS_NOT_OUR_INTERRUPT:
++ return "gcvSTATUS_NOT_OUR_INTERRUPT";
++ case gcvSTATUS_MISMATCH:
++ return "gcvSTATUS_MISMATCH";
++ case gcvSTATUS_MIPMAP_TOO_SMALL:
++ return "gcvSTATUS_MIPMAP_TOO_SMALL";
++ case gcvSTATUS_LARGER:
++ return "gcvSTATUS_LARGER";
++ case gcvSTATUS_SMALLER:
++ return "gcvSTATUS_SMALLER";
++ case gcvSTATUS_CHIP_NOT_READY:
++ return "gcvSTATUS_CHIP_NOT_READY";
++ case gcvSTATUS_NEED_CONVERSION:
++ return "gcvSTATUS_NEED_CONVERSION";
++ case gcvSTATUS_SKIP:
++ return "gcvSTATUS_SKIP";
++ case gcvSTATUS_DATA_TOO_LARGE:
++ return "gcvSTATUS_DATA_TOO_LARGE";
++ case gcvSTATUS_INVALID_CONFIG:
++ return "gcvSTATUS_INVALID_CONFIG";
++ case gcvSTATUS_CHANGED:
++ return "gcvSTATUS_CHANGED";
++ case gcvSTATUS_NOT_SUPPORT_DITHER:
++ return "gcvSTATUS_NOT_SUPPORT_DITHER";
++
++ case gcvSTATUS_INVALID_ARGUMENT:
++ return "gcvSTATUS_INVALID_ARGUMENT";
++ case gcvSTATUS_INVALID_OBJECT:
++ return "gcvSTATUS_INVALID_OBJECT";
++ case gcvSTATUS_OUT_OF_MEMORY:
++ return "gcvSTATUS_OUT_OF_MEMORY";
++ case gcvSTATUS_MEMORY_LOCKED:
++ return "gcvSTATUS_MEMORY_LOCKED";
++ case gcvSTATUS_MEMORY_UNLOCKED:
++ return "gcvSTATUS_MEMORY_UNLOCKED";
++ case gcvSTATUS_HEAP_CORRUPTED:
++ return "gcvSTATUS_HEAP_CORRUPTED";
++ case gcvSTATUS_GENERIC_IO:
++ return "gcvSTATUS_GENERIC_IO";
++ case gcvSTATUS_INVALID_ADDRESS:
++ return "gcvSTATUS_INVALID_ADDRESS";
++ case gcvSTATUS_CONTEXT_LOSSED:
++ return "gcvSTATUS_CONTEXT_LOSSED";
++ case gcvSTATUS_TOO_COMPLEX:
++ return "gcvSTATUS_TOO_COMPLEX";
++ case gcvSTATUS_BUFFER_TOO_SMALL:
++ return "gcvSTATUS_BUFFER_TOO_SMALL";
++ case gcvSTATUS_INTERFACE_ERROR:
++ return "gcvSTATUS_INTERFACE_ERROR";
++ case gcvSTATUS_NOT_SUPPORTED:
++ return "gcvSTATUS_NOT_SUPPORTED";
++ case gcvSTATUS_MORE_DATA:
++ return "gcvSTATUS_MORE_DATA";
++ case gcvSTATUS_TIMEOUT:
++ return "gcvSTATUS_TIMEOUT";
++ case gcvSTATUS_OUT_OF_RESOURCES:
++ return "gcvSTATUS_OUT_OF_RESOURCES";
++ case gcvSTATUS_INVALID_DATA:
++ return "gcvSTATUS_INVALID_DATA";
++ case gcvSTATUS_INVALID_MIPMAP:
++ return "gcvSTATUS_INVALID_MIPMAP";
++ case gcvSTATUS_NOT_FOUND:
++ return "gcvSTATUS_NOT_FOUND";
++ case gcvSTATUS_NOT_ALIGNED:
++ return "gcvSTATUS_NOT_ALIGNED";
++ case gcvSTATUS_INVALID_REQUEST:
++ return "gcvSTATUS_INVALID_REQUEST";
++ case gcvSTATUS_GPU_NOT_RESPONDING:
++ return "gcvSTATUS_GPU_NOT_RESPONDING";
++ case gcvSTATUS_TIMER_OVERFLOW:
++ return "gcvSTATUS_TIMER_OVERFLOW";
++ case gcvSTATUS_VERSION_MISMATCH:
++ return "gcvSTATUS_VERSION_MISMATCH";
++ case gcvSTATUS_LOCKED:
++ return "gcvSTATUS_LOCKED";
++ case gcvSTATUS_INTERRUPTED:
++ return "gcvSTATUS_INTERRUPTED";
++ case gcvSTATUS_DEVICE:
++ return "gcvSTATUS_DEVICE";
++ case gcvSTATUS_NOT_MULTI_PIPE_ALIGNED:
++ return "gcvSTATUS_NOT_MULTI_PIPE_ALIGNED";
++
++ /* Linker errors. */
++ case gcvSTATUS_GLOBAL_TYPE_MISMATCH:
++ return "gcvSTATUS_GLOBAL_TYPE_MISMATCH";
++ case gcvSTATUS_TOO_MANY_ATTRIBUTES:
++ return "gcvSTATUS_TOO_MANY_ATTRIBUTES";
++ case gcvSTATUS_TOO_MANY_UNIFORMS:
++ return "gcvSTATUS_TOO_MANY_UNIFORMS";
++ case gcvSTATUS_TOO_MANY_SAMPLER:
++ return "gcvSTATUS_TOO_MANY_SAMPLER";
++ case gcvSTATUS_TOO_MANY_VARYINGS:
++ return "gcvSTATUS_TOO_MANY_VARYINGS";
++ case gcvSTATUS_UNDECLARED_VARYING:
++ return "gcvSTATUS_UNDECLARED_VARYING";
++ case gcvSTATUS_VARYING_TYPE_MISMATCH:
++ return "gcvSTATUS_VARYING_TYPE_MISMATCH";
++ case gcvSTATUS_MISSING_MAIN:
++ return "gcvSTATUS_MISSING_MAIN";
++ case gcvSTATUS_NAME_MISMATCH:
++ return "gcvSTATUS_NAME_MISMATCH";
++ case gcvSTATUS_INVALID_INDEX:
++ return "gcvSTATUS_INVALID_INDEX";
++ case gcvSTATUS_UNIFORM_MISMATCH:
++ return "gcvSTATUS_UNIFORM_MISMATCH";
++ case gcvSTATUS_UNSAT_LIB_SYMBOL:
++ return "gcvSTATUS_UNSAT_LIB_SYMBOL";
++ case gcvSTATUS_TOO_MANY_SHADERS:
++ return "gcvSTATUS_TOO_MANY_SHADERS";
++ case gcvSTATUS_LINK_INVALID_SHADERS:
++ return "gcvSTATUS_LINK_INVALID_SHADERS";
++ case gcvSTATUS_CS_NO_WORKGROUP_SIZE:
++ return "gcvSTATUS_CS_NO_WORKGROUP_SIZE";
++ case gcvSTATUS_LINK_LIB_ERROR:
++ return "gcvSTATUS_LINK_LIB_ERROR";
++ case gcvSTATUS_SHADER_VERSION_MISMATCH:
++ return "gcvSTATUS_SHADER_VERSION_MISMATCH";
++ case gcvSTATUS_TOO_MANY_INSTRUCTION:
++ return "gcvSTATUS_TOO_MANY_INSTRUCTION";
++ case gcvSTATUS_SSBO_MISMATCH:
++ return "gcvSTATUS_SSBO_MISMATCH";
++ case gcvSTATUS_TOO_MANY_OUTPUT:
++ return "gcvSTATUS_TOO_MANY_OUTPUT";
++ case gcvSTATUS_TOO_MANY_INPUT:
++ return "gcvSTATUS_TOO_MANY_INPUT";
++ case gcvSTATUS_NOT_SUPPORT_CL:
++ return "gcvSTATUS_NOT_SUPPORT_CL";
++ case gcvSTATUS_NOT_SUPPORT_INTEGER:
++ return "gcvSTATUS_NOT_SUPPORT_INTEGER";
++
++ /* Compiler errors. */
++ case gcvSTATUS_COMPILER_FE_PREPROCESSOR_ERROR:
++ return "gcvSTATUS_COMPILER_FE_PREPROCESSOR_ERROR";
++ case gcvSTATUS_COMPILER_FE_PARSER_ERROR:
++ return "gcvSTATUS_COMPILER_FE_PARSER_ERROR";
++
++ default:
++ return "nil";
++ }
++}
++
++/*******************************************************************************
++***** Binary Trace *************************************************************
++*******************************************************************************/
++
++/*******************************************************************************
++** _VerifyMessage
++**
++** Verify a binary trace message, decode it to human readable string and print
++** it.
++**
++** ARGUMENTS:
++**
++** gctCONST_STRING Buffer
++** Pointer to buffer to store.
++**
++** gctSIZE_T Bytes
++** Buffer length.
++*/
++void
++_VerifyMessage(
++ IN gctCONST_STRING Buffer,
++ IN gctSIZE_T Bytes
++ )
++{
++ char arguments[150] = {0};
++ char format[100] = {0};
++
++ gctSTRING function;
++ gctPOINTER args;
++ gctUINT32 numArguments;
++ int i = 0;
++ gctUINT32 functionBytes;
++
++ gcsBINARY_TRACE_MESSAGE_PTR message = (gcsBINARY_TRACE_MESSAGE_PTR)Buffer;
++
++ /* Check signature. */
++ if (message->signature != 0x7FFFFFFF)
++ {
++ gcmkPRINT("Signature error");
++ return;
++ }
++
++ /* Get function name. */
++ function = (gctSTRING)&message->payload;
++ functionBytes = (gctUINT32)strlen(function) + 1;
++
++ /* Get arguments number. */
++ numArguments = message->numArguments;
++
++ /* Get arguments . */
++ args = function + functionBytes;
++
++ /* Prepare format string. */
++ while (numArguments--)
++ {
++ format[i++] = '%';
++ format[i++] = 'x';
++ format[i++] = ' ';
++ }
++
++ format[i] = '\0';
++
++ if (numArguments)
++ {
++ gcmkVSPRINTF(arguments, 150, format, (gctARGUMENTS *) &args);
++ }
++
++ gcmkPRINT("[%d](%d): %s(%d) %s",
++ message->pid,
++ message->tid,
++ function,
++ message->line,
++ arguments);
++}
++
++
++/*******************************************************************************
++** gckOS_WriteToRingBuffer
++**
++** Store a buffer to ring buffer.
++**
++** ARGUMENTS:
++**
++** gctCONST_STRING Buffer
++** Pointer to buffer to store.
++**
++** gctSIZE_T Bytes
++** Buffer length.
++*/
++void
++gckOS_WriteToRingBuffer(
++ IN gctCONST_STRING Buffer,
++ IN gctSIZE_T Bytes
++ )
++{
++
++}
++
++/*******************************************************************************
++** gckOS_BinaryTrace
++**
++** Output a binary trace message.
++**
++** ARGUMENTS:
++**
++** gctCONST_STRING Function
++** Pointer to function name.
++**
++** gctINT Line
++** Line number.
++**
++** gctCONST_STRING Text OPTIONAL
++** Optional pointer to a descriptive text.
++**
++** ...
++** Optional arguments to the descriptive text.
++*/
++void
++gckOS_BinaryTrace(
++ IN gctCONST_STRING Function,
++ IN gctINT Line,
++ IN gctCONST_STRING Text OPTIONAL,
++ ...
++ )
++{
++ static gctUINT32 messageSignature = 0x7FFFFFFF;
++ char buffer[gcdBINARY_TRACE_MESSAGE_SIZE];
++ gctUINT32 numArguments = 0;
++ gctUINT32 functionBytes;
++ gctUINT32 i = 0;
++ gctSTRING payload;
++ gcsBINARY_TRACE_MESSAGE_PTR message = (gcsBINARY_TRACE_MESSAGE_PTR)buffer;
++
++ /* Calculate arguments number. */
++ if (Text)
++ {
++ while (Text[i] != '\0')
++ {
++ if (Text[i] == '%')
++ {
++ numArguments++;
++ }
++ i++;
++ }
++ }
++
++ message->signature = messageSignature;
++ message->pid = gcmkGETPROCESSID();
++ message->tid = gcmkGETTHREADID();
++ message->line = Line;
++ message->numArguments = numArguments;
++
++ payload = (gctSTRING)&message->payload;
++
++ /* Function name. */
++ functionBytes = (gctUINT32)gcmkSTRLEN(Function) + 1;
++ gcmkMEMCPY(payload, Function, functionBytes);
++
++ /* Advance to next payload. */
++ payload += functionBytes;
++
++ /* Arguments value. */
++ if (numArguments)
++ {
++ gctARGUMENTS p;
++ gcmkARGUMENTS_START(p, Text);
++
++ for (i = 0; i < numArguments; ++i)
++ {
++ gctPOINTER value = gcmkARGUMENTS_ARG(p, gctPOINTER);
++ gcmkMEMCPY(payload, &value, gcmSIZEOF(gctPOINTER));
++ payload += gcmSIZEOF(gctPOINTER);
++ }
++
++ gcmkARGUMENTS_END(p);
++ }
++
++ gcmkASSERT(payload - buffer <= gcdBINARY_TRACE_MESSAGE_SIZE);
++
++
++ /* Send buffer to ring buffer. */
++ gckOS_WriteToRingBuffer(buffer, (gctUINT32)(payload - buffer));
++}
++
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_event.c linux-openelec/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_event.c
+--- linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_event.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_event.c 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,3459 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal_kernel_precomp.h"
++#include "gc_hal_kernel_buffer.h"
++
++#ifdef __QNXNTO__
++#include <atomic.h>
++#include "gc_hal_kernel_qnx.h"
++#endif
++
++#define _GC_OBJ_ZONE gcvZONE_EVENT
++
++#define gcdEVENT_ALLOCATION_COUNT (4096 / gcmSIZEOF(gcsHAL_INTERFACE))
++#define gcdEVENT_MIN_THRESHOLD 4
++
++/******************************************************************************\
++********************************* Support Code *********************************
++\******************************************************************************/
++
++static gceSTATUS
++gckEVENT_AllocateQueue(
++ IN gckEVENT Event,
++ OUT gcsEVENT_QUEUE_PTR * Queue
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Event=0x%x", Event);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
++ gcmkVERIFY_ARGUMENT(Queue != gcvNULL);
++
++ /* Do we have free queues? */
++ if (Event->freeList == gcvNULL)
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++
++ /* Move one free queue from the free list. */
++ * Queue = Event->freeList;
++ Event->freeList = Event->freeList->next;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Queue=0x%x", gcmOPT_POINTER(Queue));
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++static gceSTATUS
++gckEVENT_FreeQueue(
++ IN gckEVENT Event,
++ OUT gcsEVENT_QUEUE_PTR Queue
++ )
++{
++ gceSTATUS status = gcvSTATUS_OK;
++
++ gcmkHEADER_ARG("Event=0x%x", Event);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
++ gcmkVERIFY_ARGUMENT(Queue != gcvNULL);
++
++ /* Move one free queue from the free list. */
++ Queue->next = Event->freeList;
++ Event->freeList = Queue;
++
++ /* Success. */
++ gcmkFOOTER();
++ return status;
++}
++
++static gceSTATUS
++gckEVENT_FreeRecord(
++ IN gckEVENT Event,
++ IN gcsEVENT_PTR Record
++ )
++{
++ gceSTATUS status;
++ gctBOOL acquired = gcvFALSE;
++
++ gcmkHEADER_ARG("Event=0x%x Record=0x%x", Event, Record);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
++ gcmkVERIFY_ARGUMENT(Record != gcvNULL);
++
++ /* Acquire the mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(Event->os,
++ Event->freeEventMutex,
++ gcvINFINITE));
++ acquired = gcvTRUE;
++
++ /* Push the record on the free list. */
++ Record->next = Event->freeEventList;
++ Event->freeEventList = Record;
++ Event->freeEventCount += 1;
++
++ /* Release the mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(Event->os, Event->freeEventMutex));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Roll back. */
++ if (acquired)
++ {
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Event->os, Event->freeEventMutex));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return gcvSTATUS_OK;
++}
++
++static gceSTATUS
++gckEVENT_IsEmpty(
++ IN gckEVENT Event,
++ OUT gctBOOL_PTR IsEmpty
++ )
++{
++ gceSTATUS status;
++ gctSIZE_T i;
++
++ gcmkHEADER_ARG("Event=0x%x", Event);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
++ gcmkVERIFY_ARGUMENT(IsEmpty != gcvNULL);
++
++ /* Assume the event queue is empty. */
++ *IsEmpty = gcvTRUE;
++
++ /* Walk the event queue. */
++ for (i = 0; i < gcmCOUNTOF(Event->queues); ++i)
++ {
++ /* Check whether this event is in use. */
++ if (Event->queues[i].head != gcvNULL)
++ {
++ /* The event is in use, hence the queue is not empty. */
++ *IsEmpty = gcvFALSE;
++ break;
++ }
++ }
++
++ /* Try acquiring the mutex. */
++ status = gckOS_AcquireMutex(Event->os, Event->eventQueueMutex, 0);
++ if (status == gcvSTATUS_TIMEOUT)
++ {
++ /* Timeout - queue is no longer empty. */
++ *IsEmpty = gcvFALSE;
++ }
++ else
++ {
++ /* Bail out on error. */
++ gcmkONERROR(status);
++
++ /* Release the mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Event->os, Event->eventQueueMutex));
++ }
++
++ /* Success. */
++ gcmkFOOTER_ARG("*IsEmpty=%d", gcmOPT_VALUE(IsEmpty));
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++static gceSTATUS
++_TryToIdleGPU(
++ IN gckEVENT Event
++)
++{
++ gceSTATUS status;
++ gctBOOL empty = gcvFALSE, idle = gcvFALSE;
++ gctBOOL powerLocked = gcvFALSE;
++ gckHARDWARE hardware;
++
++ gcmkHEADER_ARG("Event=0x%x", Event);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
++
++ /* Grab gckHARDWARE object. */
++ hardware = Event->kernel->hardware;
++ gcmkVERIFY_OBJECT(hardware, gcvOBJ_HARDWARE);
++
++ /* Check whether the event queue is empty. */
++ gcmkONERROR(gckEVENT_IsEmpty(Event, &empty));
++
++ if (empty)
++ {
++ status = gckOS_AcquireMutex(hardware->os, hardware->powerMutex, 0);
++ if (status == gcvSTATUS_TIMEOUT)
++ {
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++
++ powerLocked = gcvTRUE;
++
++ /* Query whether the hardware is idle. */
++ gcmkONERROR(gckHARDWARE_QueryIdle(Event->kernel->hardware, &idle));
++
++ gcmkONERROR(gckOS_ReleaseMutex(hardware->os, hardware->powerMutex));
++ powerLocked = gcvFALSE;
++
++ if (idle)
++ {
++ /* Inform the system of idle GPU. */
++ gcmkONERROR(gckOS_Broadcast(Event->os,
++ Event->kernel->hardware,
++ gcvBROADCAST_GPU_IDLE));
++ }
++ }
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (powerLocked)
++ {
++ gcmkONERROR(gckOS_ReleaseMutex(hardware->os, hardware->powerMutex));
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++
++static gceSTATUS
++__RemoveRecordFromProcessDB(
++ IN gckEVENT Event,
++ IN gcsEVENT_PTR Record
++ )
++{
++ gcmkHEADER_ARG("Event=0x%x Record=0x%x", Event, Record);
++ gcmkVERIFY_ARGUMENT(Record != gcvNULL);
++
++ while (Record != gcvNULL)
++ {
++ if (Record->info.command == gcvHAL_SIGNAL)
++ {
++ /* TODO: Find a better place to bind signal to hardware.*/
++ gcmkVERIFY_OK(gckOS_SignalSetHardware(Event->os,
++ gcmUINT64_TO_PTR(Record->info.u.Signal.signal),
++ Event->kernel->hardware));
++ }
++
++ if (Record->fromKernel)
++ {
++ /* No need to check db if event is from kernel. */
++ Record = Record->next;
++ continue;
++ }
++
++ switch (Record->info.command)
++ {
++ case gcvHAL_FREE_NON_PAGED_MEMORY:
++ gcmkVERIFY_OK(gckKERNEL_RemoveProcessDB(
++ Event->kernel,
++ Record->processID,
++ gcvDB_NON_PAGED,
++ gcmUINT64_TO_PTR(Record->info.u.FreeNonPagedMemory.logical)));
++ break;
++
++ case gcvHAL_FREE_CONTIGUOUS_MEMORY:
++ gcmkVERIFY_OK(gckKERNEL_RemoveProcessDB(
++ Event->kernel,
++ Record->processID,
++ gcvDB_CONTIGUOUS,
++ gcmUINT64_TO_PTR(Record->info.u.FreeContiguousMemory.logical)));
++ break;
++
++ case gcvHAL_UNLOCK_VIDEO_MEMORY:
++ gcmkVERIFY_OK(gckKERNEL_RemoveProcessDB(
++ Event->kernel,
++ Record->processID,
++ gcvDB_VIDEO_MEMORY_LOCKED,
++ gcmUINT64_TO_PTR(Record->info.u.UnlockVideoMemory.node)));
++ break;
++
++ case gcvHAL_UNMAP_USER_MEMORY:
++ gcmkVERIFY_OK(gckKERNEL_RemoveProcessDB(
++ Event->kernel,
++ Record->processID,
++ gcvDB_MAP_USER_MEMORY,
++ gcmINT2PTR(Record->info.u.UnmapUserMemory.info)));
++ break;
++
++ case gcvHAL_FREE_VIRTUAL_COMMAND_BUFFER:
++ gcmkVERIFY_OK(gckKERNEL_RemoveProcessDB(
++ Event->kernel,
++ Record->processID,
++ gcvDB_COMMAND_BUFFER,
++ gcmUINT64_TO_PTR(Record->info.u.FreeVirtualCommandBuffer.logical)));
++ break;
++
++ default:
++ break;
++ }
++
++ Record = Record->next;
++ }
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++_ReleaseVideoMemoryHandle(
++ IN gckKERNEL Kernel,
++ IN OUT gcsEVENT_PTR Record,
++ IN OUT gcsHAL_INTERFACE * Interface
++ )
++{
++ gceSTATUS status;
++ gckVIDMEM_NODE nodeObject;
++ gctUINT32 handle;
++
++ switch(Interface->command)
++ {
++ case gcvHAL_UNLOCK_VIDEO_MEMORY:
++ handle = (gctUINT32)Interface->u.UnlockVideoMemory.node;
++
++ gcmkONERROR(gckVIDMEM_HANDLE_Lookup(
++ Kernel, Record->processID, handle, &nodeObject));
++
++ Record->info.u.UnlockVideoMemory.node = gcmPTR_TO_UINT64(nodeObject);
++
++ gckVIDMEM_HANDLE_Dereference(Kernel, Record->processID, handle);
++ break;
++
++ default:
++ break;
++ }
++
++ return gcvSTATUS_OK;
++OnError:
++ return status;
++}
++
++/*******************************************************************************
++**
++** _QueryFlush
++**
++** Check the type of surfaces which will be released by current event and
++** determine the cache needed to flush.
++**
++*/
++static gceSTATUS
++_QueryFlush(
++ IN gckEVENT Event,
++ IN gcsEVENT_PTR Record,
++ OUT gceKERNEL_FLUSH *Flush
++ )
++{
++ gceKERNEL_FLUSH flush = 0;
++ gcmkHEADER_ARG("Event=0x%x Record=0x%x", Event, Record);
++ gcmkVERIFY_ARGUMENT(Record != gcvNULL);
++
++ while (Record != gcvNULL)
++ {
++ switch (Record->info.command)
++ {
++ case gcvHAL_UNLOCK_VIDEO_MEMORY:
++ switch(Record->info.u.UnlockVideoMemory.type)
++ {
++ case gcvSURF_TILE_STATUS:
++ flush |= gcvFLUSH_TILE_STATUS;
++ break;
++ case gcvSURF_RENDER_TARGET:
++ flush |= gcvFLUSH_COLOR;
++ break;
++ case gcvSURF_DEPTH:
++ flush |= gcvFLUSH_DEPTH;
++ break;
++ case gcvSURF_TEXTURE:
++ flush |= gcvFLUSH_TEXTURE;
++ break;
++ case gcvSURF_TYPE_UNKNOWN:
++ gcmkASSERT(0);
++ break;
++ default:
++ break;
++ }
++ break;
++ case gcvHAL_UNMAP_USER_MEMORY:
++ *Flush = gcvFLUSH_ALL;
++ return gcvSTATUS_OK;
++
++ default:
++ break;
++ }
++
++ Record = Record->next;
++ }
++
++ *Flush = flush;
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++void
++_SubmitTimerFunction(
++ gctPOINTER Data
++ )
++{
++ gckEVENT event = (gckEVENT)Data;
++#if gcdMULTI_GPU
++ gcmkVERIFY_OK(gckEVENT_Submit(event, gcvTRUE, gcvFALSE, gcvCORE_3D_ALL_MASK));
++#else
++ gcmkVERIFY_OK(gckEVENT_Submit(event, gcvTRUE, gcvFALSE));
++#endif
++}
++
++/******************************************************************************\
++******************************* gckEVENT API Code *******************************
++\******************************************************************************/
++
++/*******************************************************************************
++**
++** gckEVENT_Construct
++**
++** Construct a new gckEVENT object.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** OUTPUT:
++**
++** gckEVENT * Event
++** Pointer to a variable that receives the gckEVENT object pointer.
++*/
++gceSTATUS
++gckEVENT_Construct(
++ IN gckKERNEL Kernel,
++ OUT gckEVENT * Event
++ )
++{
++ gckOS os;
++ gceSTATUS status;
++ gckEVENT eventObj = gcvNULL;
++ int i;
++ gcsEVENT_PTR record;
++ gctPOINTER pointer = gcvNULL;
++
++ gcmkHEADER_ARG("Kernel=0x%x", Kernel);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(Event != gcvNULL);
++
++ /* Extract the pointer to the gckOS object. */
++ os = Kernel->os;
++ gcmkVERIFY_OBJECT(os, gcvOBJ_OS);
++
++ /* Allocate the gckEVENT object. */
++ gcmkONERROR(gckOS_Allocate(os, gcmSIZEOF(struct _gckEVENT), &pointer));
++
++ eventObj = pointer;
++
++ /* Reset the object. */
++ gcmkVERIFY_OK(gckOS_ZeroMemory(eventObj, gcmSIZEOF(struct _gckEVENT)));
++
++ /* Initialize the gckEVENT object. */
++ eventObj->object.type = gcvOBJ_EVENT;
++ eventObj->kernel = Kernel;
++ eventObj->os = os;
++
++ /* Create the mutexes. */
++ gcmkONERROR(gckOS_CreateMutex(os, &eventObj->eventQueueMutex));
++ gcmkONERROR(gckOS_CreateMutex(os, &eventObj->freeEventMutex));
++ gcmkONERROR(gckOS_CreateMutex(os, &eventObj->eventListMutex));
++
++ /* Create a bunch of event reccords. */
++ for (i = 0; i < gcdEVENT_ALLOCATION_COUNT; i += 1)
++ {
++ /* Allocate an event record. */
++ gcmkONERROR(gckOS_Allocate(os, gcmSIZEOF(gcsEVENT), &pointer));
++
++ record = pointer;
++
++ /* Push it on the free list. */
++ record->next = eventObj->freeEventList;
++ eventObj->freeEventList = record;
++ eventObj->freeEventCount += 1;
++ }
++
++ /* Initialize the free list of event queues. */
++ for (i = 0; i < gcdREPO_LIST_COUNT; i += 1)
++ {
++ eventObj->repoList[i].next = eventObj->freeList;
++ eventObj->freeList = &eventObj->repoList[i];
++ }
++
++ /* Construct the atom. */
++ gcmkONERROR(gckOS_AtomConstruct(os, &eventObj->freeAtom));
++ gcmkONERROR(gckOS_AtomSet(os,
++ eventObj->freeAtom,
++ gcmCOUNTOF(eventObj->queues)));
++
++#if gcdSMP
++ gcmkONERROR(gckOS_AtomConstruct(os, &eventObj->pending));
++
++#if gcdMULTI_GPU
++ for (i = 0; i < gcdMULTI_GPU; i++)
++ {
++ gcmkONERROR(gckOS_AtomConstruct(os, &eventObj->pending3D[i]));
++ gcmkONERROR(gckOS_AtomConstruct(os, &eventObj->pending3DMask[i]));
++ }
++
++ gcmkONERROR(gckOS_AtomConstruct(os, &eventObj->pendingMask));
++#endif
++
++#endif
++
++ gcmkVERIFY_OK(gckOS_CreateTimer(os,
++ _SubmitTimerFunction,
++ (gctPOINTER)eventObj,
++ &eventObj->submitTimer));
++
++#if gcdINTERRUPT_STATISTIC
++ gcmkONERROR(gckOS_AtomConstruct(os, &eventObj->interruptCount));
++ gcmkONERROR(gckOS_AtomSet(os,eventObj->interruptCount, 0));
++#endif
++
++ /* Return pointer to the gckEVENT object. */
++ *Event = eventObj;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Event=0x%x", *Event);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Roll back. */
++ if (eventObj != gcvNULL)
++ {
++ if (eventObj->eventQueueMutex != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_DeleteMutex(os, eventObj->eventQueueMutex));
++ }
++
++ if (eventObj->freeEventMutex != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_DeleteMutex(os, eventObj->freeEventMutex));
++ }
++
++ if (eventObj->eventListMutex != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_DeleteMutex(os, eventObj->eventListMutex));
++ }
++
++ while (eventObj->freeEventList != gcvNULL)
++ {
++ record = eventObj->freeEventList;
++ eventObj->freeEventList = record->next;
++
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(os, record));
++ }
++
++ if (eventObj->freeAtom != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_AtomDestroy(os, eventObj->freeAtom));
++ }
++
++#if gcdSMP
++ if (eventObj->pending != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_AtomDestroy(os, eventObj->pending));
++ }
++
++#if gcdMULTI_GPU
++ for (i = 0; i < gcdMULTI_GPU; i++)
++ {
++ if (eventObj->pending3D[i] != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_AtomDestroy(os, eventObj->pending3D[i]));
++ }
++
++ if (eventObj->pending3DMask[i] != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_AtomDestroy(os, eventObj->pending3DMask[i]));
++ }
++ }
++#endif
++#endif
++
++#if gcdINTERRUPT_STATISTIC
++ if (eventObj->interruptCount)
++ {
++ gcmkVERIFY_OK(gckOS_AtomDestroy(os, eventObj->interruptCount));
++ }
++#endif
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(os, eventObj));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckEVENT_Destroy
++**
++** Destroy an gckEVENT object.
++**
++** INPUT:
++**
++** gckEVENT Event
++** Pointer to an gckEVENT object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckEVENT_Destroy(
++ IN gckEVENT Event
++ )
++{
++ gcsEVENT_PTR record;
++ gcsEVENT_QUEUE_PTR queue;
++
++ gcmkHEADER_ARG("Event=0x%x", Event);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
++
++ if (Event->submitTimer != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_StopTimer(Event->os, Event->submitTimer));
++ gcmkVERIFY_OK(gckOS_DestroyTimer(Event->os, Event->submitTimer));
++ }
++
++ /* Delete the queue mutex. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Event->os, Event->eventQueueMutex));
++
++ /* Free all free events. */
++ while (Event->freeEventList != gcvNULL)
++ {
++ record = Event->freeEventList;
++ Event->freeEventList = record->next;
++
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Event->os, record));
++ }
++
++ /* Delete the free mutex. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Event->os, Event->freeEventMutex));
++
++ /* Free all pending queues. */
++ while (Event->queueHead != gcvNULL)
++ {
++ /* Get the current queue. */
++ queue = Event->queueHead;
++
++ /* Free all pending events. */
++ while (queue->head != gcvNULL)
++ {
++ record = queue->head;
++ queue->head = record->next;
++
++ gcmkTRACE_ZONE_N(
++ gcvLEVEL_WARNING, gcvZONE_EVENT,
++ gcmSIZEOF(record) + gcmSIZEOF(queue->source),
++ "Event record 0x%x is still pending for %d.",
++ record, queue->source
++ );
++
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Event->os, record));
++ }
++
++ /* Remove the top queue from the list. */
++ if (Event->queueHead == Event->queueTail)
++ {
++ Event->queueHead =
++ Event->queueTail = gcvNULL;
++ }
++ else
++ {
++ Event->queueHead = Event->queueHead->next;
++ }
++
++ /* Free the queue. */
++ gcmkVERIFY_OK(gckEVENT_FreeQueue(Event, queue));
++ }
++
++ /* Delete the list mutex. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Event->os, Event->eventListMutex));
++
++ /* Delete the atom. */
++ gcmkVERIFY_OK(gckOS_AtomDestroy(Event->os, Event->freeAtom));
++
++#if gcdSMP
++ gcmkVERIFY_OK(gckOS_AtomDestroy(Event->os, Event->pending));
++
++#if gcdMULTI_GPU
++ {
++ gctINT i;
++ for (i = 0; i < gcdMULTI_GPU; i++)
++ {
++ gcmkVERIFY_OK(gckOS_AtomDestroy(Event->os, Event->pending3D[i]));
++ gcmkVERIFY_OK(gckOS_AtomDestroy(Event->os, Event->pending3DMask[i]));
++ }
++ }
++#endif
++#endif
++
++#if gcdINTERRUPT_STATISTIC
++ gcmkVERIFY_OK(gckOS_AtomDestroy(Event->os, Event->interruptCount));
++#endif
++
++ /* Mark the gckEVENT object as unknown. */
++ Event->object.type = gcvOBJ_UNKNOWN;
++
++ /* Free the gckEVENT object. */
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Event->os, Event));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckEVENT_GetEvent
++**
++** Reserve the next available hardware event.
++**
++** INPUT:
++**
++** gckEVENT Event
++** Pointer to an gckEVENT object.
++**
++** gctBOOL Wait
++** Set to gcvTRUE to force the function to wait if no events are
++** immediately available.
++**
++** gceKERNEL_WHERE Source
++** Source of the event.
++**
++** OUTPUT:
++**
++** gctUINT8 * EventID
++** Reserved event ID.
++*/
++#define gcdINVALID_EVENT_PTR ((gcsEVENT_PTR)gcvMAXUINTPTR_T)
++
++#if gcdMULTI_GPU
++gceSTATUS
++gckEVENT_GetEvent(
++ IN gckEVENT Event,
++ IN gctBOOL Wait,
++ OUT gctUINT8 * EventID,
++ IN gceKERNEL_WHERE Source,
++ IN gceCORE_3D_MASK ChipEnable
++ )
++#else
++gceSTATUS
++gckEVENT_GetEvent(
++ IN gckEVENT Event,
++ IN gctBOOL Wait,
++ OUT gctUINT8 * EventID,
++ IN gceKERNEL_WHERE Source
++ )
++#endif
++{
++ gctINT i, id;
++ gceSTATUS status;
++ gctBOOL acquired = gcvFALSE;
++ gctINT32 free;
++#if gcdMULTI_GPU
++ gctINT j;
++#endif
++
++ gcmkHEADER_ARG("Event=0x%x Source=%d", Event, Source);
++
++ while (gcvTRUE)
++ {
++ /* Grab the queue mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(Event->os,
++ Event->eventQueueMutex,
++ gcvINFINITE));
++ acquired = gcvTRUE;
++
++ /* Walk through all events. */
++ id = Event->lastID;
++ for (i = 0; i < gcmCOUNTOF(Event->queues); ++i)
++ {
++ gctINT nextID = gckMATH_ModuloInt((id + 1),
++ gcmCOUNTOF(Event->queues));
++
++ if (Event->queues[id].head == gcvNULL)
++ {
++ *EventID = (gctUINT8) id;
++
++ Event->lastID = (gctUINT8) nextID;
++
++ /* Save time stamp of event. */
++ Event->queues[id].head = gcdINVALID_EVENT_PTR;
++ Event->queues[id].stamp = ++(Event->stamp);
++ Event->queues[id].source = Source;
++
++#if gcdMULTI_GPU
++ Event->queues[id].chipEnable = ChipEnable;
++
++ if (ChipEnable == gcvCORE_3D_ALL_MASK)
++ {
++ gckOS_AtomSetMask(Event->pendingMask, (1 << id));
++
++ for (j = 0; j < gcdMULTI_GPU; j++)
++ {
++ gckOS_AtomSetMask(Event->pending3DMask[j], (1 << id));
++ }
++ }
++ else
++ {
++ for (j = 0; j < gcdMULTI_GPU; j++)
++ {
++ if (ChipEnable & (1 << j))
++ {
++ gckOS_AtomSetMask(Event->pending3DMask[j], (1 << id));
++ }
++ }
++ }
++#endif
++
++ gcmkONERROR(gckOS_AtomDecrement(Event->os,
++ Event->freeAtom,
++ &free));
++#if gcdDYNAMIC_SPEED
++ if (free <= gcdDYNAMIC_EVENT_THRESHOLD)
++ {
++ gcmkONERROR(gckOS_BroadcastHurry(
++ Event->os,
++ Event->kernel->hardware,
++ gcdDYNAMIC_EVENT_THRESHOLD - free));
++ }
++#endif
++
++ /* Release the queue mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(Event->os,
++ Event->eventQueueMutex));
++
++ /* Success. */
++ gcmkTRACE_ZONE_N(
++ gcvLEVEL_INFO, gcvZONE_EVENT,
++ gcmSIZEOF(id),
++ "Using id=%d",
++ id
++ );
++
++ gcmkFOOTER_ARG("*EventID=%u", *EventID);
++ return gcvSTATUS_OK;
++ }
++
++ id = nextID;
++ }
++
++#if gcdDYNAMIC_SPEED
++ /* No free events, speed up the GPU right now! */
++ gcmkONERROR(gckOS_BroadcastHurry(Event->os,
++ Event->kernel->hardware,
++ gcdDYNAMIC_EVENT_THRESHOLD));
++#endif
++
++ /* Release the queue mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(Event->os, Event->eventQueueMutex));
++ acquired = gcvFALSE;
++
++ /* Fail if wait is not requested. */
++ if (!Wait)
++ {
++ /* Out of resources. */
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++
++ /* Delay a while. */
++ gcmkONERROR(gckOS_Delay(Event->os, 1));
++ }
++
++OnError:
++ if (acquired)
++ {
++ /* Release the queue mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Event->os, Event->eventQueueMutex));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckEVENT_AllocateRecord
++**
++** Allocate a record for the new event.
++**
++** INPUT:
++**
++** gckEVENT Event
++** Pointer to an gckEVENT object.
++**
++** gctBOOL AllocateAllowed
++** State for allocation if out of free events.
++**
++** OUTPUT:
++**
++** gcsEVENT_PTR * Record
++** Allocated event record.
++*/
++gceSTATUS
++gckEVENT_AllocateRecord(
++ IN gckEVENT Event,
++ IN gctBOOL AllocateAllowed,
++ OUT gcsEVENT_PTR * Record
++ )
++{
++ gceSTATUS status;
++ gctBOOL acquired = gcvFALSE;
++ gctINT i;
++ gcsEVENT_PTR record;
++ gctPOINTER pointer = gcvNULL;
++
++ gcmkHEADER_ARG("Event=0x%x AllocateAllowed=%d", Event, AllocateAllowed);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
++ gcmkVERIFY_ARGUMENT(Record != gcvNULL);
++
++ /* Acquire the mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(Event->os, Event->freeEventMutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++ /* Test if we are below the allocation threshold. */
++ if ( (AllocateAllowed && (Event->freeEventCount < gcdEVENT_MIN_THRESHOLD)) ||
++ (Event->freeEventCount == 0) )
++ {
++ /* Allocate a bunch of records. */
++ for (i = 0; i < gcdEVENT_ALLOCATION_COUNT; i += 1)
++ {
++ /* Allocate an event record. */
++ gcmkONERROR(gckOS_Allocate(Event->os,
++ gcmSIZEOF(gcsEVENT),
++ &pointer));
++
++ record = pointer;
++
++ /* Push it on the free list. */
++ record->next = Event->freeEventList;
++ Event->freeEventList = record;
++ Event->freeEventCount += 1;
++ }
++ }
++
++ *Record = Event->freeEventList;
++ Event->freeEventList = Event->freeEventList->next;
++ Event->freeEventCount -= 1;
++
++ /* Release the mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(Event->os, Event->freeEventMutex));
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Record=0x%x", gcmOPT_POINTER(Record));
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Roll back. */
++ if (acquired)
++ {
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Event->os, Event->freeEventMutex));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckEVENT_AddList
++**
++** Add a new event to the list of events.
++**
++** INPUT:
++**
++** gckEVENT Event
++** Pointer to an gckEVENT object.
++**
++** gcsHAL_INTERFACE_PTR Interface
++** Pointer to the interface for the event to be added.
++**
++** gceKERNEL_WHERE FromWhere
++** Place in the pipe where the event needs to be generated.
++**
++** gctBOOL AllocateAllowed
++** State for allocation if out of free events.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckEVENT_AddList(
++ IN gckEVENT Event,
++ IN gcsHAL_INTERFACE_PTR Interface,
++ IN gceKERNEL_WHERE FromWhere,
++ IN gctBOOL AllocateAllowed,
++ IN gctBOOL FromKernel
++ )
++{
++ gceSTATUS status;
++ gctBOOL acquired = gcvFALSE;
++ gcsEVENT_PTR record = gcvNULL;
++ gcsEVENT_QUEUE_PTR queue;
++ gckVIRTUAL_COMMAND_BUFFER_PTR buffer;
++ gckKERNEL kernel = Event->kernel;
++
++ gcmkHEADER_ARG("Event=0x%x Interface=0x%x",
++ Event, Interface);
++
++ gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, _GC_OBJ_ZONE,
++ "FromWhere=%d AllocateAllowed=%d",
++ FromWhere, AllocateAllowed);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
++ gcmkVERIFY_ARGUMENT(Interface != gcvNULL);
++
++ /* Verify the event command. */
++ gcmkASSERT
++ ( (Interface->command == gcvHAL_FREE_NON_PAGED_MEMORY)
++ || (Interface->command == gcvHAL_FREE_CONTIGUOUS_MEMORY)
++ || (Interface->command == gcvHAL_WRITE_DATA)
++ || (Interface->command == gcvHAL_UNLOCK_VIDEO_MEMORY)
++ || (Interface->command == gcvHAL_SIGNAL)
++ || (Interface->command == gcvHAL_UNMAP_USER_MEMORY)
++ || (Interface->command == gcvHAL_TIMESTAMP)
++ || (Interface->command == gcvHAL_COMMIT_DONE)
++ || (Interface->command == gcvHAL_FREE_VIRTUAL_COMMAND_BUFFER)
++ || (Interface->command == gcvHAL_SYNC_POINT)
++ || (Interface->command == gcvHAL_DESTROY_MMU)
++ );
++
++ /* Validate the source. */
++ if ((FromWhere != gcvKERNEL_COMMAND) && (FromWhere != gcvKERNEL_PIXEL))
++ {
++ /* Invalid argument. */
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ /* Allocate a free record. */
++ gcmkONERROR(gckEVENT_AllocateRecord(Event, AllocateAllowed, &record));
++
++ /* Termninate the record. */
++ record->next = gcvNULL;
++
++ /* Record the committer. */
++ record->fromKernel = FromKernel;
++
++ /* Copy the event interface into the record. */
++ gckOS_MemCopy(&record->info, Interface, gcmSIZEOF(record->info));
++
++ /* Get process ID. */
++ gcmkONERROR(gckOS_GetProcessID(&record->processID));
++
++ gcmkONERROR(__RemoveRecordFromProcessDB(Event, record));
++
++ /* Handle is belonged to current process, it must be released now. */
++ if (FromKernel == gcvFALSE)
++ {
++ status = _ReleaseVideoMemoryHandle(Event->kernel, record, Interface);
++
++ if (gcmIS_ERROR(status))
++ {
++ /* Ingore error because there are other events in the queue. */
++ status = gcvSTATUS_OK;
++ goto OnError;
++ }
++ }
++
++#ifdef __QNXNTO__
++ record->kernel = Event->kernel;
++#endif
++
++ /* Acquire the mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(Event->os, Event->eventListMutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++ /* Do we need to allocate a new queue? */
++ if ((Event->queueTail == gcvNULL) || (Event->queueTail->source < FromWhere))
++ {
++ /* Allocate a new queue. */
++ gcmkONERROR(gckEVENT_AllocateQueue(Event, &queue));
++
++ /* Initialize the queue. */
++ queue->source = FromWhere;
++ queue->head = gcvNULL;
++ queue->next = gcvNULL;
++
++ /* Attach it to the list of allocated queues. */
++ if (Event->queueTail == gcvNULL)
++ {
++ Event->queueHead =
++ Event->queueTail = queue;
++ }
++ else
++ {
++ Event->queueTail->next = queue;
++ Event->queueTail = queue;
++ }
++ }
++ else
++ {
++ queue = Event->queueTail;
++ }
++
++ /* Attach the record to the queue. */
++ if (queue->head == gcvNULL)
++ {
++ queue->head = record;
++ queue->tail = record;
++ }
++ else
++ {
++ queue->tail->next = record;
++ queue->tail = record;
++ }
++
++ /* Unmap user space logical address.
++ * Linux kernel does not support unmap the memory of other process any more since 3.5.
++ * Let's unmap memory of self process before submit the event to gpu.
++ * */
++ switch(Interface->command)
++ {
++ case gcvHAL_FREE_NON_PAGED_MEMORY:
++ gcmkONERROR(gckOS_UnmapUserLogical(
++ Event->os,
++ gcmNAME_TO_PTR(Interface->u.FreeNonPagedMemory.physical),
++ (gctSIZE_T) Interface->u.FreeNonPagedMemory.bytes,
++ gcmUINT64_TO_PTR(Interface->u.FreeNonPagedMemory.logical)));
++ break;
++ case gcvHAL_FREE_CONTIGUOUS_MEMORY:
++ gcmkONERROR(gckOS_UnmapUserLogical(
++ Event->os,
++ gcmNAME_TO_PTR(Interface->u.FreeContiguousMemory.physical),
++ (gctSIZE_T) Interface->u.FreeContiguousMemory.bytes,
++ gcmUINT64_TO_PTR(Interface->u.FreeContiguousMemory.logical)));
++ break;
++
++ case gcvHAL_FREE_VIRTUAL_COMMAND_BUFFER:
++ buffer = (gckVIRTUAL_COMMAND_BUFFER_PTR)gcmNAME_TO_PTR(Interface->u.FreeVirtualCommandBuffer.physical);
++ if (buffer->userLogical)
++ {
++ gcmkONERROR(gckOS_DestroyUserVirtualMapping(
++ Event->os,
++ buffer->physical,
++ (gctSIZE_T) Interface->u.FreeVirtualCommandBuffer.bytes,
++ gcmUINT64_TO_PTR(Interface->u.FreeVirtualCommandBuffer.logical)));
++ }
++ break;
++
++ default:
++ break;
++ }
++
++ /* Release the mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(Event->os, Event->eventListMutex));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Roll back. */
++ if (acquired)
++ {
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Event->os, Event->eventListMutex));
++ }
++
++ if (record != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckEVENT_FreeRecord(Event, record));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckEVENT_Unlock
++**
++** Schedule an event to unlock virtual memory.
++**
++** INPUT:
++**
++** gckEVENT Event
++** Pointer to an gckEVENT object.
++**
++** gceKERNEL_WHERE FromWhere
++** Place in the pipe where the event needs to be generated.
++**
++** gcuVIDMEM_NODE_PTR Node
++** Pointer to a gcuVIDMEM_NODE union that specifies the virtual memory
++** to unlock.
++**
++** gceSURF_TYPE Type
++** Type of surface to unlock.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckEVENT_Unlock(
++ IN gckEVENT Event,
++ IN gceKERNEL_WHERE FromWhere,
++ IN gctPOINTER Node,
++ IN gceSURF_TYPE Type
++ )
++{
++ gceSTATUS status;
++ gcsHAL_INTERFACE iface;
++
++ gcmkHEADER_ARG("Event=0x%x FromWhere=%d Node=0x%x Type=%d",
++ Event, FromWhere, Node, Type);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
++ gcmkVERIFY_ARGUMENT(Node != gcvNULL);
++
++ /* Mark the event as an unlock. */
++ iface.command = gcvHAL_UNLOCK_VIDEO_MEMORY;
++ iface.u.UnlockVideoMemory.node = gcmPTR_TO_UINT64(Node);
++ iface.u.UnlockVideoMemory.type = Type;
++ iface.u.UnlockVideoMemory.asynchroneous = 0;
++
++ /* Append it to the queue. */
++ gcmkONERROR(gckEVENT_AddList(Event, &iface, FromWhere, gcvFALSE, gcvTRUE));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckEVENT_FreeNonPagedMemory
++**
++** Schedule an event to free non-paged memory.
++**
++** INPUT:
++**
++** gckEVENT Event
++** Pointer to an gckEVENT object.
++**
++** gctSIZE_T Bytes
++** Number of bytes of non-paged memory to free.
++**
++** gctPHYS_ADDR Physical
++** Physical address of non-paged memory to free.
++**
++** gctPOINTER Logical
++** Logical address of non-paged memory to free.
++**
++** gceKERNEL_WHERE FromWhere
++** Place in the pipe where the event needs to be generated.
++*/
++gceSTATUS
++gckEVENT_FreeNonPagedMemory(
++ IN gckEVENT Event,
++ IN gctSIZE_T Bytes,
++ IN gctPHYS_ADDR Physical,
++ IN gctPOINTER Logical,
++ IN gceKERNEL_WHERE FromWhere
++ )
++{
++ gceSTATUS status;
++ gcsHAL_INTERFACE iface;
++ gckKERNEL kernel = Event->kernel;
++
++ gcmkHEADER_ARG("Event=0x%x Bytes=%lu Physical=0x%x Logical=0x%x "
++ "FromWhere=%d",
++ Event, Bytes, Physical, Logical, FromWhere);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
++ gcmkVERIFY_ARGUMENT(Physical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++
++ /* Create an event. */
++ iface.command = gcvHAL_FREE_NON_PAGED_MEMORY;
++ iface.u.FreeNonPagedMemory.bytes = Bytes;
++ iface.u.FreeNonPagedMemory.physical = gcmPTR_TO_NAME(Physical);
++ iface.u.FreeNonPagedMemory.logical = gcmPTR_TO_UINT64(Logical);
++
++ /* Append it to the queue. */
++ gcmkONERROR(gckEVENT_AddList(Event, &iface, FromWhere, gcvFALSE, gcvTRUE));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckEVENT_DestroyVirtualCommandBuffer(
++ IN gckEVENT Event,
++ IN gctSIZE_T Bytes,
++ IN gctPHYS_ADDR Physical,
++ IN gctPOINTER Logical,
++ IN gceKERNEL_WHERE FromWhere
++ )
++{
++ gceSTATUS status;
++ gcsHAL_INTERFACE iface;
++ gckKERNEL kernel = Event->kernel;
++
++ gcmkHEADER_ARG("Event=0x%x Bytes=%lu Physical=0x%x Logical=0x%x "
++ "FromWhere=%d",
++ Event, Bytes, Physical, Logical, FromWhere);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
++ gcmkVERIFY_ARGUMENT(Physical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++
++ /* Create an event. */
++ iface.command = gcvHAL_FREE_VIRTUAL_COMMAND_BUFFER;
++ iface.u.FreeVirtualCommandBuffer.bytes = Bytes;
++ iface.u.FreeVirtualCommandBuffer.physical = gcmPTR_TO_NAME(Physical);
++ iface.u.FreeVirtualCommandBuffer.logical = gcmPTR_TO_UINT64(Logical);
++
++ /* Append it to the queue. */
++ gcmkONERROR(gckEVENT_AddList(Event, &iface, FromWhere, gcvFALSE, gcvTRUE));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckEVENT_FreeContigiuousMemory
++**
++** Schedule an event to free contiguous memory.
++**
++** INPUT:
++**
++** gckEVENT Event
++** Pointer to an gckEVENT object.
++**
++** gctSIZE_T Bytes
++** Number of bytes of contiguous memory to free.
++**
++** gctPHYS_ADDR Physical
++** Physical address of contiguous memory to free.
++**
++** gctPOINTER Logical
++** Logical address of contiguous memory to free.
++**
++** gceKERNEL_WHERE FromWhere
++** Place in the pipe where the event needs to be generated.
++*/
++gceSTATUS
++gckEVENT_FreeContiguousMemory(
++ IN gckEVENT Event,
++ IN gctSIZE_T Bytes,
++ IN gctPHYS_ADDR Physical,
++ IN gctPOINTER Logical,
++ IN gceKERNEL_WHERE FromWhere
++ )
++{
++ gceSTATUS status;
++ gcsHAL_INTERFACE iface;
++ gckKERNEL kernel = Event->kernel;
++
++ gcmkHEADER_ARG("Event=0x%x Bytes=%lu Physical=0x%x Logical=0x%x "
++ "FromWhere=%d",
++ Event, Bytes, Physical, Logical, FromWhere);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
++ gcmkVERIFY_ARGUMENT(Physical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++
++ /* Create an event. */
++ iface.command = gcvHAL_FREE_CONTIGUOUS_MEMORY;
++ iface.u.FreeContiguousMemory.bytes = Bytes;
++ iface.u.FreeContiguousMemory.physical = gcmPTR_TO_NAME(Physical);
++ iface.u.FreeContiguousMemory.logical = gcmPTR_TO_UINT64(Logical);
++
++ /* Append it to the queue. */
++ gcmkONERROR(gckEVENT_AddList(Event, &iface, FromWhere, gcvFALSE, gcvTRUE));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckEVENT_Signal
++**
++** Schedule an event to trigger a signal.
++**
++** INPUT:
++**
++** gckEVENT Event
++** Pointer to an gckEVENT object.
++**
++** gctSIGNAL Signal
++** Pointer to the signal to trigger.
++**
++** gceKERNEL_WHERE FromWhere
++** Place in the pipe where the event needs to be generated.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckEVENT_Signal(
++ IN gckEVENT Event,
++ IN gctSIGNAL Signal,
++ IN gceKERNEL_WHERE FromWhere
++ )
++{
++ gceSTATUS status;
++ gcsHAL_INTERFACE iface;
++
++ gcmkHEADER_ARG("Event=0x%x Signal=0x%x FromWhere=%d",
++ Event, Signal, FromWhere);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
++ gcmkVERIFY_ARGUMENT(Signal != gcvNULL);
++
++ /* Mark the event as a signal. */
++ iface.command = gcvHAL_SIGNAL;
++ iface.u.Signal.signal = gcmPTR_TO_UINT64(Signal);
++#ifdef __QNXNTO__
++ iface.u.Signal.coid = 0;
++ iface.u.Signal.rcvid = 0;
++#endif
++ iface.u.Signal.auxSignal = 0;
++ iface.u.Signal.process = 0;
++
++ /* Append it to the queue. */
++ gcmkONERROR(gckEVENT_AddList(Event, &iface, FromWhere, gcvFALSE, gcvTRUE));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckEVENT_CommitDone
++**
++** Schedule an event to wake up work thread when commit is done by GPU.
++**
++** INPUT:
++**
++** gckEVENT Event
++** Pointer to an gckEVENT object.
++**
++** gceKERNEL_WHERE FromWhere
++** Place in the pipe where the event needs to be generated.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckEVENT_CommitDone(
++ IN gckEVENT Event,
++ IN gceKERNEL_WHERE FromWhere
++ )
++{
++ gceSTATUS status;
++ gcsHAL_INTERFACE iface;
++
++ gcmkHEADER_ARG("Event=0x%x FromWhere=%d", Event, FromWhere);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
++
++ iface.command = gcvHAL_COMMIT_DONE;
++
++ /* Append it to the queue. */
++ gcmkONERROR(gckEVENT_AddList(Event, &iface, FromWhere, gcvFALSE, gcvTRUE));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++#if gcdPROCESS_ADDRESS_SPACE
++gceSTATUS
++gckEVENT_DestroyMmu(
++ IN gckEVENT Event,
++ IN gckMMU Mmu,
++ IN gceKERNEL_WHERE FromWhere
++ )
++{
++ gceSTATUS status;
++ gcsHAL_INTERFACE iface;
++
++ gcmkHEADER_ARG("Event=0x%x FromWhere=%d", Event, FromWhere);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
++
++ iface.command = gcvHAL_DESTROY_MMU;
++ iface.u.DestroyMmu.mmu = gcmPTR_TO_UINT64(Mmu);
++
++ /* Append it to the queue. */
++ gcmkONERROR(gckEVENT_AddList(Event, &iface, FromWhere, gcvFALSE, gcvTRUE));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++#endif
++
++/*******************************************************************************
++**
++** gckEVENT_Submit
++**
++** Submit the current event queue to the GPU.
++**
++** INPUT:
++**
++** gckEVENT Event
++** Pointer to an gckEVENT object.
++**
++** gctBOOL Wait
++** Submit requires one vacant event; if Wait is set to not zero,
++** and there are no vacant events at this time, the function will
++** wait until an event becomes vacant so that submission of the
++** queue is successful.
++**
++** gctBOOL FromPower
++** Determines whether the call originates from inside the power
++** management or not.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++#if gcdMULTI_GPU
++gceSTATUS
++gckEVENT_Submit(
++ IN gckEVENT Event,
++ IN gctBOOL Wait,
++ IN gctBOOL FromPower,
++ IN gceCORE_3D_MASK ChipEnable
++ )
++#else
++gceSTATUS
++gckEVENT_Submit(
++ IN gckEVENT Event,
++ IN gctBOOL Wait,
++ IN gctBOOL FromPower
++ )
++#endif
++{
++ gceSTATUS status;
++ gctUINT8 id = 0xFF;
++ gcsEVENT_QUEUE_PTR queue;
++ gctBOOL acquired = gcvFALSE;
++ gckCOMMAND command = gcvNULL;
++ gctBOOL commitEntered = gcvFALSE;
++#if !gcdNULL_DRIVER
++ gctUINT32 bytes;
++ gctPOINTER buffer;
++#endif
++
++#if gcdMULTI_GPU
++ gctSIZE_T chipEnableBytes;
++#endif
++
++#if gcdINTERRUPT_STATISTIC
++ gctINT32 oldValue;
++#endif
++
++#if gcdSECURITY
++ gctPOINTER reservedBuffer;
++#endif
++
++ gctUINT32 flushBytes;
++ gctUINT32 executeBytes;
++ gckHARDWARE hardware;
++
++ gceKERNEL_FLUSH flush = gcvFALSE;
++
++ gcmkHEADER_ARG("Event=0x%x Wait=%d", Event, Wait);
++
++ /* Get gckCOMMAND object. */
++ command = Event->kernel->command;
++ hardware = Event->kernel->hardware;
++
++ gcmkVERIFY_OBJECT(hardware, gcvOBJ_HARDWARE);
++
++ gckOS_GetTicks(&Event->lastCommitStamp);
++
++ /* Are there event queues? */
++ if (Event->queueHead != gcvNULL)
++ {
++ /* Acquire the command queue. */
++ gcmkONERROR(gckCOMMAND_EnterCommit(command, FromPower));
++ commitEntered = gcvTRUE;
++
++ /* Process all queues. */
++ while (Event->queueHead != gcvNULL)
++ {
++ /* Acquire the list mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(Event->os,
++ Event->eventListMutex,
++ gcvINFINITE));
++ acquired = gcvTRUE;
++
++ /* Get the current queue. */
++ queue = Event->queueHead;
++
++ /* Allocate an event ID. */
++#if gcdMULTI_GPU
++ gcmkONERROR(gckEVENT_GetEvent(Event, Wait, &id, queue->source, ChipEnable));
++#else
++ gcmkONERROR(gckEVENT_GetEvent(Event, Wait, &id, queue->source));
++#endif
++
++ /* Copy event list to event ID queue. */
++ Event->queues[id].head = queue->head;
++
++ /* Remove the top queue from the list. */
++ if (Event->queueHead == Event->queueTail)
++ {
++ Event->queueHead = gcvNULL;
++ Event->queueTail = gcvNULL;
++ }
++ else
++ {
++ Event->queueHead = Event->queueHead->next;
++ }
++
++ /* Free the queue. */
++ gcmkONERROR(gckEVENT_FreeQueue(Event, queue));
++
++ /* Release the list mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(Event->os, Event->eventListMutex));
++ acquired = gcvFALSE;
++
++ /* Determine cache needed to flush. */
++ gcmkVERIFY_OK(_QueryFlush(Event, Event->queues[id].head, &flush));
++
++#if gcdINTERRUPT_STATISTIC
++ gcmkVERIFY_OK(gckOS_AtomIncrement(
++ Event->os,
++ Event->interruptCount,
++ &oldValue
++ ));
++#endif
++
++#if gcdNULL_DRIVER
++ /* Notify immediately on infinite hardware. */
++ gcmkONERROR(gckEVENT_Interrupt(Event, 1 << id));
++
++ gcmkONERROR(gckEVENT_Notify(Event, 0));
++#else
++ /* Get the size of the hardware event. */
++ gcmkONERROR(gckHARDWARE_Event(
++ hardware,
++ gcvNULL,
++ id,
++ Event->queues[id].source,
++ &bytes
++ ));
++
++ /* Get the size of flush command. */
++ gcmkONERROR(gckHARDWARE_Flush(
++ hardware,
++ flush,
++ gcvNULL,
++ &flushBytes
++ ));
++
++ bytes += flushBytes;
++
++#if gcdMULTI_GPU
++ gcmkONERROR(gckHARDWARE_ChipEnable(
++ hardware,
++ gcvNULL,
++ 0,
++ &chipEnableBytes
++ ));
++
++ bytes += chipEnableBytes * 2;
++#endif
++
++ /* Total bytes need to execute. */
++ executeBytes = bytes;
++
++ /* Reserve space in the command queue. */
++ gcmkONERROR(gckCOMMAND_Reserve(command, bytes, &buffer, &bytes));
++#if gcdSECURITY
++ reservedBuffer = buffer;
++#endif
++
++#if gcdMULTI_GPU
++ gcmkONERROR(gckHARDWARE_ChipEnable(
++ hardware,
++ buffer,
++ ChipEnable,
++ &chipEnableBytes
++ ));
++
++ buffer = (gctUINT8_PTR)buffer + chipEnableBytes;
++#endif
++
++ /* Set the flush in the command queue. */
++ gcmkONERROR(gckHARDWARE_Flush(
++ hardware,
++ flush,
++ buffer,
++ &flushBytes
++ ));
++
++ /* Advance to next command. */
++ buffer = (gctUINT8_PTR)buffer + flushBytes;
++
++ /* Set the hardware event in the command queue. */
++ gcmkONERROR(gckHARDWARE_Event(
++ hardware,
++ buffer,
++ id,
++ Event->queues[id].source,
++ &bytes
++ ));
++
++ /* Advance to next command. */
++ buffer = (gctUINT8_PTR)buffer + bytes;
++
++#if gcdMULTI_GPU
++ gcmkONERROR(gckHARDWARE_ChipEnable(
++ hardware,
++ buffer,
++ gcvCORE_3D_ALL_MASK,
++ &chipEnableBytes
++ ));
++#endif
++
++#if gcdSECURITY
++ gckKERNEL_SecurityExecute(
++ Event->kernel,
++ reservedBuffer,
++ executeBytes
++ );
++#else
++ /* Execute the hardware event. */
++ gcmkONERROR(gckCOMMAND_Execute(command, executeBytes));
++#endif
++#endif
++ }
++
++ /* Release the command queue. */
++ gcmkONERROR(gckCOMMAND_ExitCommit(command, FromPower));
++
++#if !gcdNULL_DRIVER
++ gcmkVERIFY_OK(_TryToIdleGPU(Event));
++#endif
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ /* Need to unroll the mutex acquire. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Event->os, Event->eventListMutex));
++ }
++
++ if (commitEntered)
++ {
++ /* Release the command queue mutex. */
++ gcmkVERIFY_OK(gckCOMMAND_ExitCommit(command, FromPower));
++ }
++
++ if (id != 0xFF)
++ {
++ /* Need to unroll the event allocation. */
++ Event->queues[id].head = gcvNULL;
++ }
++
++ if (status == gcvSTATUS_GPU_NOT_RESPONDING)
++ {
++ /* Broadcast GPU stuck. */
++ status = gckOS_Broadcast(Event->os,
++ Event->kernel->hardware,
++ gcvBROADCAST_GPU_STUCK);
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckEVENT_Commit
++**
++** Commit an event queue from the user.
++**
++** INPUT:
++**
++** gckEVENT Event
++** Pointer to an gckEVENT object.
++**
++** gcsQUEUE_PTR Queue
++** User event queue.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++#if gcdMULTI_GPU
++gceSTATUS
++gckEVENT_Commit(
++ IN gckEVENT Event,
++ IN gcsQUEUE_PTR Queue,
++ IN gceCORE_3D_MASK ChipEnable
++ )
++#else
++gceSTATUS
++gckEVENT_Commit(
++ IN gckEVENT Event,
++ IN gcsQUEUE_PTR Queue
++ )
++#endif
++{
++ gceSTATUS status;
++ gcsQUEUE_PTR record = gcvNULL, next;
++ gctUINT32 processID;
++ gctBOOL needCopy = gcvFALSE;
++
++ gcmkHEADER_ARG("Event=0x%x Queue=0x%x", Event, Queue);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
++
++ /* Get the current process ID. */
++ gcmkONERROR(gckOS_GetProcessID(&processID));
++
++ /* Query if we need to copy the client data. */
++ gcmkONERROR(gckOS_QueryNeedCopy(Event->os, processID, &needCopy));
++
++ /* Loop while there are records in the queue. */
++ while (Queue != gcvNULL)
++ {
++ gcsQUEUE queue;
++
++ if (needCopy)
++ {
++ /* Point to stack record. */
++ record = &queue;
++
++ /* Copy the data from the client. */
++ gcmkONERROR(gckOS_CopyFromUserData(Event->os,
++ record,
++ Queue,
++ gcmSIZEOF(gcsQUEUE)));
++ }
++ else
++ {
++ gctPOINTER pointer = gcvNULL;
++
++ /* Map record into kernel memory. */
++ gcmkONERROR(gckOS_MapUserPointer(Event->os,
++ Queue,
++ gcmSIZEOF(gcsQUEUE),
++ &pointer));
++
++ record = pointer;
++ }
++
++ /* Append event record to event queue. */
++ gcmkONERROR(
++ gckEVENT_AddList(Event, &record->iface, gcvKERNEL_PIXEL, gcvTRUE, gcvFALSE));
++
++ /* Next record in the queue. */
++ next = gcmUINT64_TO_PTR(record->next);
++
++ if (!needCopy)
++ {
++ /* Unmap record from kernel memory. */
++ gcmkONERROR(
++ gckOS_UnmapUserPointer(Event->os,
++ Queue,
++ gcmSIZEOF(gcsQUEUE),
++ (gctPOINTER *) record));
++ record = gcvNULL;
++ }
++
++ Queue = next;
++ }
++
++ /* Submit the event list. */
++#if gcdMULTI_GPU
++ gcmkONERROR(gckEVENT_Submit(Event, gcvTRUE, gcvFALSE, ChipEnable));
++#else
++ gcmkONERROR(gckEVENT_Submit(Event, gcvTRUE, gcvFALSE));
++#endif
++
++ /* Success */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ if ((record != gcvNULL) && !needCopy)
++ {
++ /* Roll back. */
++ gcmkVERIFY_OK(gckOS_UnmapUserPointer(Event->os,
++ Queue,
++ gcmSIZEOF(gcsQUEUE),
++ (gctPOINTER *) record));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckEVENT_Compose
++**
++** Schedule a composition event and start a composition.
++**
++** INPUT:
++**
++** gckEVENT Event
++** Pointer to an gckEVENT object.
++**
++** gcsHAL_COMPOSE_PTR Info
++** Pointer to the composition structure.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckEVENT_Compose(
++ IN gckEVENT Event,
++ IN gcsHAL_COMPOSE_PTR Info
++ )
++{
++ gceSTATUS status;
++ gcsEVENT_PTR headRecord;
++ gcsEVENT_PTR tailRecord;
++ gcsEVENT_PTR tempRecord;
++ gctUINT8 id = 0xFF;
++ gctUINT32 processID;
++
++ gcmkHEADER_ARG("Event=0x%x Info=0x%x", Event, Info);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
++ gcmkVERIFY_ARGUMENT(Info != gcvNULL);
++
++ /* Allocate an event ID. */
++#if gcdMULTI_GPU
++ gcmkONERROR(gckEVENT_GetEvent(Event, gcvTRUE, &id, gcvKERNEL_PIXEL, gcvCORE_3D_ALL_MASK));
++#else
++ gcmkONERROR(gckEVENT_GetEvent(Event, gcvTRUE, &id, gcvKERNEL_PIXEL));
++#endif
++
++ /* Get process ID. */
++ gcmkONERROR(gckOS_GetProcessID(&processID));
++
++ /* Allocate a record. */
++ gcmkONERROR(gckEVENT_AllocateRecord(Event, gcvTRUE, &tempRecord));
++ headRecord = tailRecord = tempRecord;
++
++ /* Initialize the record. */
++ tempRecord->info.command = gcvHAL_SIGNAL;
++ tempRecord->info.u.Signal.process = Info->process;
++#ifdef __QNXNTO__
++ tempRecord->info.u.Signal.coid = Info->coid;
++ tempRecord->info.u.Signal.rcvid = Info->rcvid;
++#endif
++ tempRecord->info.u.Signal.signal = Info->signal;
++ tempRecord->info.u.Signal.auxSignal = 0;
++ tempRecord->next = gcvNULL;
++ tempRecord->processID = processID;
++
++ /* Allocate another record for user signal #1. */
++ if (gcmUINT64_TO_PTR(Info->userSignal1) != gcvNULL)
++ {
++ /* Allocate a record. */
++ gcmkONERROR(gckEVENT_AllocateRecord(Event, gcvTRUE, &tempRecord));
++ tailRecord->next = tempRecord;
++ tailRecord = tempRecord;
++
++ /* Initialize the record. */
++ tempRecord->info.command = gcvHAL_SIGNAL;
++ tempRecord->info.u.Signal.process = Info->userProcess;
++#ifdef __QNXNTO__
++ tempRecord->info.u.Signal.coid = Info->coid;
++ tempRecord->info.u.Signal.rcvid = Info->rcvid;
++#endif
++ tempRecord->info.u.Signal.signal = Info->userSignal1;
++ tempRecord->info.u.Signal.auxSignal = 0;
++ tempRecord->next = gcvNULL;
++ tempRecord->processID = processID;
++ }
++
++ /* Allocate another record for user signal #2. */
++ if (gcmUINT64_TO_PTR(Info->userSignal2) != gcvNULL)
++ {
++ /* Allocate a record. */
++ gcmkONERROR(gckEVENT_AllocateRecord(Event, gcvTRUE, &tempRecord));
++ tailRecord->next = tempRecord;
++
++ /* Initialize the record. */
++ tempRecord->info.command = gcvHAL_SIGNAL;
++ tempRecord->info.u.Signal.process = Info->userProcess;
++#ifdef __QNXNTO__
++ tempRecord->info.u.Signal.coid = Info->coid;
++ tempRecord->info.u.Signal.rcvid = Info->rcvid;
++#endif
++ tempRecord->info.u.Signal.signal = Info->userSignal2;
++ tempRecord->info.u.Signal.auxSignal = 0;
++ tempRecord->next = gcvNULL;
++ tempRecord->processID = processID;
++ }
++
++ /* Set the event list. */
++ Event->queues[id].head = headRecord;
++
++ /* Start composition. */
++ gcmkONERROR(gckHARDWARE_Compose(
++ Event->kernel->hardware, processID,
++ gcmUINT64_TO_PTR(Info->physical), gcmUINT64_TO_PTR(Info->logical), Info->offset, Info->size, id
++ ));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckEVENT_Interrupt
++**
++** Called by the interrupt service routine to store the triggered interrupt
++** mask to be later processed by gckEVENT_Notify.
++**
++** INPUT:
++**
++** gckEVENT Event
++** Pointer to an gckEVENT object.
++**
++** gctUINT32 Data
++** Mask for the 32 interrupts.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckEVENT_Interrupt(
++ IN gckEVENT Event,
++#if gcdMULTI_GPU
++ IN gctUINT CoreId,
++#endif
++ IN gctUINT32 Data
++ )
++{
++#if gcdMULTI_GPU
++#if defined(WIN32)
++ gctUINT32 i;
++#endif
++#endif
++ gcmkHEADER_ARG("Event=0x%x Data=0x%x", Event, Data);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
++
++ if (Data & 0x20000000)
++ {
++ gckENTRYDATA data;
++ gctUINT32 idle;
++ Data &= ~0x20000000;
++
++#if gcdMULTI_GPU
++ if (Event->kernel->core == gcvCORE_MAJOR)
++#endif
++ {
++ /* Get first entry information. */
++ gcmkVERIFY_OK(
++ gckENTRYQUEUE_Dequeue(&Event->kernel->command->queue, &data));
++
++ /* Make sure FE is idle. */
++ do
++ {
++ gcmkVERIFY_OK(gckOS_ReadRegisterEx(
++ Event->os,
++ Event->kernel->core,
++ 0x4,
++ &idle));
++ }
++ while (idle != 0x7FFFFFFF);
++
++ /* Start Command Parser. */
++ gcmkVERIFY_OK(gckHARDWARE_Execute(
++ Event->kernel->hardware,
++ data->physical,
++ data->bytes
++ ));
++ }
++ }
++
++ /* Combine current interrupt status with pending flags. */
++#if gcdSMP
++#if gcdMULTI_GPU
++ if (Event->kernel->core == gcvCORE_MAJOR)
++ {
++ gckOS_AtomSetMask(Event->pending3D[CoreId], Data);
++ }
++ else
++#endif
++ {
++ gckOS_AtomSetMask(Event->pending, Data);
++ }
++#elif defined(__QNXNTO__)
++#if gcdMULTI_GPU
++ if (Event->kernel->core == gcvCORE_MAJOR)
++ {
++ atomic_set(&Event->pending3D[CoreId], Data);
++ }
++ else
++#endif
++ {
++ atomic_set(&Event->pending, Data);
++ }
++#else
++#if gcdMULTI_GPU
++#if defined(WIN32)
++ if (Event->kernel->core == gcvCORE_MAJOR)
++ {
++ for (i = 0; i < gcdMULTI_GPU; i++)
++ {
++ Event->pending3D[i] |= Data;
++ }
++ }
++ else
++#else
++ if (Event->kernel->core == gcvCORE_MAJOR)
++ {
++ Event->pending3D[CoreId] |= Data;
++ }
++ else
++#endif
++#endif
++ {
++ Event->pending |= Data;
++ }
++#endif
++
++#if gcdINTERRUPT_STATISTIC
++ {
++ gctINT j = 0;
++ gctINT32 oldValue;
++
++ for (j = 0; j < gcmCOUNTOF(Event->queues); j++)
++ {
++ if ((Data & (1 << j)))
++ {
++ gcmkVERIFY_OK(gckOS_AtomDecrement(Event->os,
++ Event->interruptCount,
++ &oldValue));
++ }
++ }
++ }
++#endif
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckEVENT_Notify
++**
++** Process all triggered interrupts.
++**
++** INPUT:
++**
++** gckEVENT Event
++** Pointer to an gckEVENT object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckEVENT_Notify(
++ IN gckEVENT Event,
++ IN gctUINT32 IDs
++ )
++{
++ gceSTATUS status = gcvSTATUS_OK;
++ gctINT i;
++ gcsEVENT_QUEUE * queue;
++ gctUINT mask = 0;
++ gctBOOL acquired = gcvFALSE;
++ gctPOINTER info;
++ gctSIGNAL signal;
++ gctUINT pending = 0;
++ gckKERNEL kernel = Event->kernel;
++#if gcdMULTI_GPU
++ gceCORE core = Event->kernel->core;
++ gctUINT32 busy;
++ gctUINT32 oldValue;
++ gctUINT pendingMask;
++#endif
++#if !gcdSMP
++ gctBOOL suspended = gcvFALSE;
++#endif
++#if gcmIS_DEBUG(gcdDEBUG_TRACE)
++ gctINT eventNumber = 0;
++#endif
++ gctINT32 free;
++#if gcdSECURE_USER
++ gcskSECURE_CACHE_PTR cache;
++#endif
++ gckVIDMEM_NODE nodeObject;
++ gcuVIDMEM_NODE_PTR node;
++
++ gcmkHEADER_ARG("Event=0x%x IDs=0x%x", Event, IDs);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
++
++ gcmDEBUG_ONLY(
++ if (IDs != 0)
++ {
++ for (i = 0; i < gcmCOUNTOF(Event->queues); ++i)
++ {
++ if (Event->queues[i].head != gcvNULL)
++ {
++ gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_EVENT,
++ "Queue(%d): stamp=%llu source=%d",
++ i,
++ Event->queues[i].stamp,
++ Event->queues[i].source);
++ }
++ }
++ }
++ );
++
++#if gcdMULTI_GPU
++ /* Set busy flag. */
++ gckOS_AtomicExchange(Event->os, &Event->busy, 1, &busy);
++ if (busy)
++ {
++ /* Another thread is already busy - abort. */
++ goto OnSuccess;
++ }
++#endif
++
++ for (;;)
++ {
++ gcsEVENT_PTR record;
++#if gcdMULTI_GPU
++ gctUINT32 pend[gcdMULTI_GPU];
++ gctUINT32 pendMask[gcdMULTI_GPU];
++#endif
++
++ /* Grab the mutex queue. */
++ gcmkONERROR(gckOS_AcquireMutex(Event->os,
++ Event->eventQueueMutex,
++ gcvINFINITE));
++ acquired = gcvTRUE;
++
++#if gcdSMP
++#if gcdMULTI_GPU
++ if (core == gcvCORE_MAJOR)
++ {
++ /* Get current interrupts. */
++ for (i = 0; i < gcdMULTI_GPU; i++)
++ {
++ gckOS_AtomGet(Event->os, Event->pending3D[i], (gctINT32_PTR)&pend[i]);
++ gckOS_AtomGet(Event->os, Event->pending3DMask[i], (gctINT32_PTR)&pendMask[i]);
++ }
++
++ gckOS_AtomGet(Event->os, Event->pendingMask, (gctINT32_PTR)&pendingMask);
++ }
++ else
++#endif
++ {
++ gckOS_AtomGet(Event->os, Event->pending, (gctINT32_PTR)&pending);
++ }
++#else
++ /* Suspend interrupts. */
++ gcmkONERROR(gckOS_SuspendInterruptEx(Event->os, Event->kernel->core));
++ suspended = gcvTRUE;
++
++#if gcdMULTI_GPU
++ if (core == gcvCORE_MAJOR)
++ {
++ for (i = 0; i < gcdMULTI_GPU; i++)
++ {
++ /* Get current interrupts. */
++ pend[i] = Event->pending3D[i];
++ pendMask[i] = Event->pending3DMask[i];
++ }
++
++ pendingMask = Event->pendingMask;
++ }
++ else
++#endif
++ {
++ pending = Event->pending;
++ }
++
++ /* Resume interrupts. */
++ gcmkONERROR(gckOS_ResumeInterruptEx(Event->os, Event->kernel->core));
++ suspended = gcvFALSE;
++#endif
++
++#if gcdMULTI_GPU
++ if (core == gcvCORE_MAJOR)
++ {
++ for (i = 0; i < gcdMULTI_GPU; i++)
++ {
++ gctUINT32 bad_pend = (pend[i] & ~pendMask[i]);
++
++ if (bad_pend != 0)
++ {
++ gcmkTRACE_ZONE_N(
++ gcvLEVEL_ERROR, gcvZONE_EVENT,
++ gcmSIZEOF(bad_pend) + gcmSIZEOF(i),
++ "Interrupts 0x%x are not unexpected for Core%d.",
++ bad_pend, i
++ );
++
++ gckOS_AtomClearMask(Event->pending3D[i], bad_pend);
++
++ pend[i] &= pendMask[i];
++ }
++ }
++
++ pending = (pend[0] & pend[1] & pendingMask) /* Check combined events on both GPUs */
++ | (pend[0] & ~pendingMask) /* Check individual events on GPU 0 */
++ | (pend[1] & ~pendingMask); /* Check individual events on GPU 1 */
++ }
++#endif
++
++ if (pending == 0)
++ {
++ /* Release the mutex queue. */
++ gcmkONERROR(gckOS_ReleaseMutex(Event->os, Event->eventQueueMutex));
++ acquired = gcvFALSE;
++
++ /* No more pending interrupts - done. */
++ break;
++ }
++
++ if (pending & 0x80000000)
++ {
++ gctUINT32 AQAxiStatus = 0;
++ gckOS_ReadRegisterEx(Event->os, Event->kernel->hardware->core, 0xC, &AQAxiStatus);
++
++ gcmkPRINT("GPU[%d]: AXI BUS ERROR, AQAxiStatus=0x%x\n", Event->kernel->hardware->core, AQAxiStatus);
++ pending &= 0x7FFFFFFF;
++ }
++
++ if (pending & 0x40000000)
++ {
++ gckHARDWARE_DumpMMUException(Event->kernel->hardware);
++
++ pending &= 0xBFFFFFFF;
++ }
++
++ gcmkTRACE_ZONE_N(
++ gcvLEVEL_INFO, gcvZONE_EVENT,
++ gcmSIZEOF(pending),
++ "Pending interrupts 0x%x",
++ pending
++ );
++
++ queue = gcvNULL;
++
++ gcmDEBUG_ONLY(
++ if (IDs == 0)
++ {
++ for (i = 0; i < gcmCOUNTOF(Event->queues); ++i)
++ {
++ if (Event->queues[i].head != gcvNULL)
++ {
++ gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_EVENT,
++ "Queue(%d): stamp=%llu source=%d",
++ i,
++ Event->queues[i].stamp,
++ Event->queues[i].source);
++ }
++ }
++ }
++ );
++
++ /* Find the oldest pending interrupt. */
++ for (i = 0; i < gcmCOUNTOF(Event->queues); ++i)
++ {
++ if ((Event->queues[i].head != gcvNULL)
++ && (pending & (1 << i))
++ )
++ {
++ if ((queue == gcvNULL)
++ || (Event->queues[i].stamp < queue->stamp)
++ )
++ {
++ queue = &Event->queues[i];
++ mask = 1 << i;
++#if gcmIS_DEBUG(gcdDEBUG_TRACE)
++ eventNumber = i;
++#endif
++ }
++ }
++ }
++
++ if (queue == gcvNULL)
++ {
++ gcmkTRACE_ZONE_N(
++ gcvLEVEL_ERROR, gcvZONE_EVENT,
++ gcmSIZEOF(pending),
++ "Interrupts 0x%x are not pending.",
++ pending
++ );
++
++#if gcdSMP
++#if gcdMULTI_GPU
++ if (core == gcvCORE_MAJOR)
++ {
++ /* Mark pending interrupts as handled. */
++ for (i = 0; i < gcdMULTI_GPU; i++)
++ {
++ gckOS_AtomClearMask(Event->pending3D[i], pending);
++ gckOS_AtomClearMask(Event->pending3DMask[i], pending);
++ }
++
++ gckOS_AtomClearMask(Event->pendingMask, pending);
++ }
++ else
++#endif
++ {
++ gckOS_AtomClearMask(Event->pending, pending);
++ }
++
++#elif defined(__QNXNTO__)
++#if gcdMULTI_GPU
++ if (core == gcvCORE_MAJOR)
++ {
++ for (i = 0; i < gcdMULTI_GPU; i++)
++ {
++ atomic_clr((gctUINT32_PTR)&Event->pending3D[i], pending);
++ atomic_clr((gctUINT32_PTR)&Event->pending3DMask[i], pending);
++ }
++
++ atomic_clr((gctUINT32_PTR)&Event->pendingMask, pending);
++ }
++ else
++#endif
++ {
++ atomic_clr((gctUINT32_PTR)&Event->pending, pending);
++ }
++#else
++ /* Suspend interrupts. */
++ gcmkONERROR(gckOS_SuspendInterruptEx(Event->os, Event->kernel->core));
++ suspended = gcvTRUE;
++
++#if gcdMULTI_GPU
++ if (core == gcvCORE_MAJOR)
++ {
++ for (i = 0; i < gcdMULTI_GPU; i++)
++ {
++ /* Mark pending interrupts as handled. */
++ Event->pending3D[i] &= ~pending;
++ Event->pending3DMask[i] &= ~pending;
++ }
++ }
++ else
++#endif
++ {
++ Event->pending &= ~pending;
++ }
++
++ /* Resume interrupts. */
++ gcmkONERROR(gckOS_ResumeInterruptEx(Event->os, Event->kernel->core));
++ suspended = gcvFALSE;
++#endif
++
++ /* Release the mutex queue. */
++ gcmkONERROR(gckOS_ReleaseMutex(Event->os, Event->eventQueueMutex));
++ acquired = gcvFALSE;
++ break;
++ }
++
++ /* Check whether there is a missed interrupt. */
++ for (i = 0; i < gcmCOUNTOF(Event->queues); ++i)
++ {
++ if ((Event->queues[i].head != gcvNULL)
++ && (Event->queues[i].stamp < queue->stamp)
++ && (Event->queues[i].source <= queue->source)
++#if gcdMULTI_GPU
++ && (Event->queues[i].chipEnable == queue->chipEnable)
++#endif
++ )
++ {
++ gcmkTRACE_N(
++ gcvLEVEL_ERROR,
++ gcmSIZEOF(i) + gcmSIZEOF(Event->queues[i].stamp),
++ "Event %d lost (stamp %llu)",
++ i, Event->queues[i].stamp
++ );
++
++ /* Use this event instead. */
++ queue = &Event->queues[i];
++ mask = 0;
++ }
++ }
++
++ if (mask != 0)
++ {
++#if gcmIS_DEBUG(gcdDEBUG_TRACE)
++ gcmkTRACE_ZONE_N(
++ gcvLEVEL_INFO, gcvZONE_EVENT,
++ gcmSIZEOF(eventNumber),
++ "Processing interrupt %d",
++ eventNumber
++ );
++#endif
++ }
++
++#if gcdSMP
++#if gcdMULTI_GPU
++ if (core == gcvCORE_MAJOR)
++ {
++ for (i = 0; i < gcdMULTI_GPU; i++)
++ {
++ /* Mark pending interrupt as handled. */
++ gckOS_AtomClearMask(Event->pending3D[i], mask);
++ gckOS_AtomClearMask(Event->pending3DMask[i], mask);
++ }
++
++ gckOS_AtomClearMask(Event->pendingMask, mask);
++ }
++ else
++#endif
++ {
++ gckOS_AtomClearMask(Event->pending, mask);
++ }
++
++#elif defined(__QNXNTO__)
++#if gcdMULTI_GPU
++ if (core == gcvCORE_MAJOR)
++ {
++ for (i = 0; i < gcdMULTI_GPU; i++)
++ {
++ atomic_clr(&Event->pending3D[i], mask);
++ atomic_clr(&Event->pending3DMask[i], mask);
++ }
++
++ atomic_clr(&Event->pendingMask, mask);
++ }
++ else
++#endif
++ {
++ atomic_clr(&Event->pending, mask);
++ }
++#else
++ /* Suspend interrupts. */
++ gcmkONERROR(gckOS_SuspendInterruptEx(Event->os, Event->kernel->core));
++ suspended = gcvTRUE;
++
++#if gcdMULTI_GPU
++ if (core == gcvCORE_MAJOR)
++ {
++ for (i = 0; i < gcdMULTI_GPU; i++)
++ {
++ /* Mark pending interrupt as handled. */
++ Event->pending3D[i] &= ~mask;
++ Event->pending3DMask[i] &= ~mask;
++ }
++
++ Event->pendingMask &= ~mask;
++ }
++ else
++#endif
++ {
++ Event->pending &= ~mask;
++ }
++
++ /* Resume interrupts. */
++ gcmkONERROR(gckOS_ResumeInterruptEx(Event->os, Event->kernel->core));
++ suspended = gcvFALSE;
++#endif
++
++ /* Grab the event head. */
++ record = queue->head;
++
++ /* Now quickly clear its event list. */
++ queue->head = gcvNULL;
++
++ /* Release the mutex queue. */
++ gcmkONERROR(gckOS_ReleaseMutex(Event->os, Event->eventQueueMutex));
++ acquired = gcvFALSE;
++
++ /* Increase the number of free events. */
++ gcmkONERROR(gckOS_AtomIncrement(Event->os, Event->freeAtom, &free));
++
++ /* Walk all events for this interrupt. */
++ while (record != gcvNULL)
++ {
++ gcsEVENT_PTR recordNext;
++#ifndef __QNXNTO__
++ gctPOINTER logical;
++#endif
++#if gcdSECURE_USER
++ gctSIZE_T bytes;
++#endif
++
++ /* Grab next record. */
++ recordNext = record->next;
++
++#ifdef __QNXNTO__
++ /* Assign record->processID as the pid for this galcore thread.
++ * Used in OS calls like gckOS_UnlockMemory() which do not take a pid.
++ */
++ drv_thread_specific_key_assign(record->processID, 0, Event->kernel->core);
++#endif
++
++#if gcdSECURE_USER
++ /* Get the cache that belongs to this process. */
++ gcmkONERROR(gckKERNEL_GetProcessDBCache(Event->kernel,
++ record->processID,
++ &cache));
++#endif
++
++ gcmkTRACE_ZONE_N(
++ gcvLEVEL_INFO, gcvZONE_EVENT,
++ gcmSIZEOF(record->info.command),
++ "Processing event type: %d",
++ record->info.command
++ );
++
++ switch (record->info.command)
++ {
++ case gcvHAL_FREE_NON_PAGED_MEMORY:
++ gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_EVENT,
++ "gcvHAL_FREE_NON_PAGED_MEMORY: 0x%x",
++ gcmNAME_TO_PTR(record->info.u.FreeNonPagedMemory.physical));
++
++ /* Free non-paged memory. */
++ status = gckOS_FreeNonPagedMemory(
++ Event->os,
++ (gctSIZE_T) record->info.u.FreeNonPagedMemory.bytes,
++ gcmNAME_TO_PTR(record->info.u.FreeNonPagedMemory.physical),
++ gcmUINT64_TO_PTR(record->info.u.FreeNonPagedMemory.logical));
++
++ if (gcmIS_SUCCESS(status))
++ {
++#if gcdSECURE_USER
++ gcmkVERIFY_OK(gckKERNEL_FlushTranslationCache(
++ Event->kernel,
++ cache,
++ gcmUINT64_TO_PTR(record->record.u.FreeNonPagedMemory.logical),
++ (gctSIZE_T) record->record.u.FreeNonPagedMemory.bytes));
++#endif
++ }
++ gcmRELEASE_NAME(record->info.u.FreeNonPagedMemory.physical);
++ break;
++
++ case gcvHAL_FREE_CONTIGUOUS_MEMORY:
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_EVENT,
++ "gcvHAL_FREE_CONTIGUOUS_MEMORY: 0x%x",
++ gcmNAME_TO_PTR(record->info.u.FreeContiguousMemory.physical));
++
++ /* Unmap the user memory. */
++ status = gckOS_FreeContiguous(
++ Event->os,
++ gcmNAME_TO_PTR(record->info.u.FreeContiguousMemory.physical),
++ gcmUINT64_TO_PTR(record->info.u.FreeContiguousMemory.logical),
++ (gctSIZE_T) record->info.u.FreeContiguousMemory.bytes);
++
++ if (gcmIS_SUCCESS(status))
++ {
++#if gcdSECURE_USER
++ gcmkVERIFY_OK(gckKERNEL_FlushTranslationCache(
++ Event->kernel,
++ cache,
++ gcmUINT64_TO_PTR(event->event.u.FreeContiguousMemory.logical),
++ (gctSIZE_T) event->event.u.FreeContiguousMemory.bytes));
++#endif
++ }
++ gcmRELEASE_NAME(record->info.u.FreeContiguousMemory.physical);
++ break;
++
++ case gcvHAL_WRITE_DATA:
++#ifndef __QNXNTO__
++ /* Convert physical into logical address. */
++ gcmkERR_BREAK(
++ gckOS_MapPhysical(Event->os,
++ record->info.u.WriteData.address,
++ gcmSIZEOF(gctUINT32),
++ &logical));
++
++ /* Write data. */
++ gcmkERR_BREAK(
++ gckOS_WriteMemory(Event->os,
++ logical,
++ record->info.u.WriteData.data));
++
++ /* Unmap the physical memory. */
++ gcmkERR_BREAK(
++ gckOS_UnmapPhysical(Event->os,
++ logical,
++ gcmSIZEOF(gctUINT32)));
++#else
++ /* Write data. */
++ gcmkERR_BREAK(
++ gckOS_WriteMemory(Event->os,
++ (gctPOINTER)
++ record->info.u.WriteData.address,
++ record->info.u.WriteData.data));
++#endif
++ break;
++
++ case gcvHAL_UNLOCK_VIDEO_MEMORY:
++ gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_EVENT,
++ "gcvHAL_UNLOCK_VIDEO_MEMORY: 0x%x",
++ record->info.u.UnlockVideoMemory.node);
++
++ nodeObject = gcmUINT64_TO_PTR(record->info.u.UnlockVideoMemory.node);
++
++ node = nodeObject->node;
++
++ /* Save node information before it disappears. */
++#if gcdSECURE_USER
++ node = event->event.u.UnlockVideoMemory.node;
++ if (node->VidMem.memory->object.type == gcvOBJ_VIDMEM)
++ {
++ logical = gcvNULL;
++ bytes = 0;
++ }
++ else
++ {
++ logical = node->Virtual.logical;
++ bytes = node->Virtual.bytes;
++ }
++#endif
++
++ /* Unlock. */
++ status = gckVIDMEM_Unlock(
++ Event->kernel,
++ nodeObject,
++ record->info.u.UnlockVideoMemory.type,
++ gcvNULL);
++
++#if gcdSECURE_USER
++ if (gcmIS_SUCCESS(status) && (logical != gcvNULL))
++ {
++ gcmkVERIFY_OK(gckKERNEL_FlushTranslationCache(
++ Event->kernel,
++ cache,
++ logical,
++ bytes));
++ }
++#endif
++
++#if gcdPROCESS_ADDRESS_SPACE
++ gcmkVERIFY_OK(gckVIDMEM_NODE_Unlock(
++ Event->kernel,
++ nodeObject,
++ record->processID
++ ));
++#endif
++
++ status = gckVIDMEM_NODE_Dereference(Event->kernel, nodeObject);
++ break;
++
++ case gcvHAL_SIGNAL:
++ signal = gcmUINT64_TO_PTR(record->info.u.Signal.signal);
++ gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_EVENT,
++ "gcvHAL_SIGNAL: 0x%x",
++ signal);
++
++#ifdef __QNXNTO__
++ if ((record->info.u.Signal.coid == 0)
++ && (record->info.u.Signal.rcvid == 0)
++ )
++ {
++ /* Kernel signal. */
++ gcmkERR_BREAK(
++ gckOS_Signal(Event->os,
++ signal,
++ gcvTRUE));
++ }
++ else
++ {
++ /* User signal. */
++ gcmkERR_BREAK(
++ gckOS_UserSignal(Event->os,
++ signal,
++ record->info.u.Signal.rcvid,
++ record->info.u.Signal.coid));
++ }
++#else
++ /* Set signal. */
++ if (gcmUINT64_TO_PTR(record->info.u.Signal.process) == gcvNULL)
++ {
++ /* Kernel signal. */
++ gcmkERR_BREAK(
++ gckOS_Signal(Event->os,
++ signal,
++ gcvTRUE));
++ }
++ else
++ {
++ /* User signal. */
++ gcmkERR_BREAK(
++ gckOS_UserSignal(Event->os,
++ signal,
++ gcmUINT64_TO_PTR(record->info.u.Signal.process)));
++ }
++
++ gcmkASSERT(record->info.u.Signal.auxSignal == 0);
++#endif
++ break;
++
++ case gcvHAL_UNMAP_USER_MEMORY:
++ info = gcmNAME_TO_PTR(record->info.u.UnmapUserMemory.info);
++ gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_EVENT,
++ "gcvHAL_UNMAP_USER_MEMORY: 0x%x",
++ info);
++
++ /* Unmap the user memory. */
++ status = gckOS_UnmapUserMemory(
++ Event->os,
++ Event->kernel->core,
++ gcmUINT64_TO_PTR(record->info.u.UnmapUserMemory.memory),
++ (gctSIZE_T) record->info.u.UnmapUserMemory.size,
++ info,
++ record->info.u.UnmapUserMemory.address);
++
++#if gcdSECURE_USER
++ if (gcmIS_SUCCESS(status))
++ {
++ gcmkVERIFY_OK(gckKERNEL_FlushTranslationCache(
++ Event->kernel,
++ cache,
++ gcmUINT64_TO_PTR(record->info.u.UnmapUserMemory.memory),
++ (gctSIZE_T) record->info.u.UnmapUserMemory.size));
++ }
++#endif
++ gcmRELEASE_NAME(record->info.u.UnmapUserMemory.info);
++ break;
++
++ case gcvHAL_TIMESTAMP:
++ gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_EVENT,
++ "gcvHAL_TIMESTAMP: %d %d",
++ record->info.u.TimeStamp.timer,
++ record->info.u.TimeStamp.request);
++
++ /* Process the timestamp. */
++ switch (record->info.u.TimeStamp.request)
++ {
++ case 0:
++ status = gckOS_GetTime(&Event->kernel->timers[
++ record->info.u.TimeStamp.timer].
++ stopTime);
++ break;
++
++ case 1:
++ status = gckOS_GetTime(&Event->kernel->timers[
++ record->info.u.TimeStamp.timer].
++ startTime);
++ break;
++
++ default:
++ gcmkTRACE_ZONE_N(
++ gcvLEVEL_ERROR, gcvZONE_EVENT,
++ gcmSIZEOF(record->info.u.TimeStamp.request),
++ "Invalid timestamp request: %d",
++ record->info.u.TimeStamp.request
++ );
++
++ status = gcvSTATUS_INVALID_ARGUMENT;
++ break;
++ }
++ break;
++
++ case gcvHAL_FREE_VIRTUAL_COMMAND_BUFFER:
++ gcmkVERIFY_OK(
++ gckKERNEL_DestroyVirtualCommandBuffer(Event->kernel,
++ (gctSIZE_T) record->info.u.FreeVirtualCommandBuffer.bytes,
++ gcmNAME_TO_PTR(record->info.u.FreeVirtualCommandBuffer.physical),
++ gcmUINT64_TO_PTR(record->info.u.FreeVirtualCommandBuffer.logical)
++ ));
++ gcmRELEASE_NAME(record->info.u.FreeVirtualCommandBuffer.physical);
++ break;
++
++#if gcdANDROID_NATIVE_FENCE_SYNC
++ case gcvHAL_SYNC_POINT:
++ {
++ gctSYNC_POINT syncPoint;
++
++ syncPoint = gcmUINT64_TO_PTR(record->info.u.SyncPoint.syncPoint);
++ status = gckOS_SignalSyncPoint(Event->os, syncPoint);
++ }
++ break;
++#endif
++
++#if gcdPROCESS_ADDRESS_SPACE
++ case gcvHAL_DESTROY_MMU:
++ status = gckMMU_Destroy(gcmUINT64_TO_PTR(record->info.u.DestroyMmu.mmu));
++ break;
++#endif
++
++ case gcvHAL_COMMIT_DONE:
++ break;
++
++ default:
++ /* Invalid argument. */
++ gcmkTRACE_ZONE_N(
++ gcvLEVEL_ERROR, gcvZONE_EVENT,
++ gcmSIZEOF(record->info.command),
++ "Unknown event type: %d",
++ record->info.command
++ );
++
++ status = gcvSTATUS_INVALID_ARGUMENT;
++ break;
++ }
++
++ /* Make sure there are no errors generated. */
++ if (gcmIS_ERROR(status))
++ {
++ gcmkTRACE_ZONE_N(
++ gcvLEVEL_WARNING, gcvZONE_EVENT,
++ gcmSIZEOF(status),
++ "Event produced status: %d(%s)",
++ status, gckOS_DebugStatus2Name(status));
++ }
++
++ /* Free the event. */
++ gcmkVERIFY_OK(gckEVENT_FreeRecord(Event, record));
++
++ /* Advance to next record. */
++ record = recordNext;
++ }
++
++ gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_EVENT,
++ "Handled interrupt 0x%x", mask);
++ }
++
++#if gcdMULTI_GPU
++ /* Clear busy flag. */
++ gckOS_AtomicExchange(Event->os, &Event->busy, 0, &oldValue);
++#endif
++
++ if (IDs == 0)
++ {
++ gcmkONERROR(_TryToIdleGPU(Event));
++ }
++
++#if gcdMULTI_GPU
++OnSuccess:
++#endif
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ /* Release mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Event->os, Event->eventQueueMutex));
++ }
++
++#if !gcdSMP
++ if (suspended)
++ {
++ /* Resume interrupts. */
++ gcmkVERIFY_OK(gckOS_ResumeInterruptEx(Event->os, Event->kernel->core));
++ }
++#endif
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++** gckEVENT_FreeProcess
++**
++** Free all events owned by a particular process ID.
++**
++** INPUT:
++**
++** gckEVENT Event
++** Pointer to an gckEVENT object.
++**
++** gctUINT32 ProcessID
++** Process ID of the process to be freed up.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckEVENT_FreeProcess(
++ IN gckEVENT Event,
++ IN gctUINT32 ProcessID
++ )
++{
++ gctSIZE_T i;
++ gctBOOL acquired = gcvFALSE;
++ gcsEVENT_PTR record, next;
++ gceSTATUS status;
++ gcsEVENT_PTR deleteHead, deleteTail;
++
++ gcmkHEADER_ARG("Event=0x%x ProcessID=%d", Event, ProcessID);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
++
++ /* Walk through all queues. */
++ for (i = 0; i < gcmCOUNTOF(Event->queues); ++i)
++ {
++ if (Event->queues[i].head != gcvNULL)
++ {
++ /* Grab the event queue mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(Event->os,
++ Event->eventQueueMutex,
++ gcvINFINITE));
++ acquired = gcvTRUE;
++
++ /* Grab the mutex head. */
++ record = Event->queues[i].head;
++ Event->queues[i].head = gcvNULL;
++ Event->queues[i].tail = gcvNULL;
++ deleteHead = gcvNULL;
++ deleteTail = gcvNULL;
++
++ while (record != gcvNULL)
++ {
++ next = record->next;
++ if (record->processID == ProcessID)
++ {
++ if (deleteHead == gcvNULL)
++ {
++ deleteHead = record;
++ }
++ else
++ {
++ deleteTail->next = record;
++ }
++
++ deleteTail = record;
++ }
++ else
++ {
++ if (Event->queues[i].head == gcvNULL)
++ {
++ Event->queues[i].head = record;
++ }
++ else
++ {
++ Event->queues[i].tail->next = record;
++ }
++
++ Event->queues[i].tail = record;
++ }
++
++ record->next = gcvNULL;
++ record = next;
++ }
++
++ /* Release the mutex queue. */
++ gcmkONERROR(gckOS_ReleaseMutex(Event->os, Event->eventQueueMutex));
++ acquired = gcvFALSE;
++
++ /* Loop through the entire list of events. */
++ for (record = deleteHead; record != gcvNULL; record = next)
++ {
++ /* Get the next event record. */
++ next = record->next;
++
++ /* Free the event record. */
++ gcmkONERROR(gckEVENT_FreeRecord(Event, record));
++ }
++ }
++ }
++
++ gcmkONERROR(_TryToIdleGPU(Event));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Release the event queue mutex. */
++ if (acquired)
++ {
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Event->os, Event->eventQueueMutex));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++** gckEVENT_Stop
++**
++** Stop the hardware using the End event mechanism.
++**
++** INPUT:
++**
++** gckEVENT Event
++** Pointer to an gckEVENT object.
++**
++** gctUINT32 ProcessID
++** Process ID Logical belongs.
++**
++** gctPHYS_ADDR Handle
++** Physical address handle. If gcvNULL it is video memory.
++**
++** gctPOINTER Logical
++** Logical address to flush.
++**
++** gctSIGNAL Signal
++** Pointer to the signal to trigger.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckEVENT_Stop(
++ IN gckEVENT Event,
++ IN gctUINT32 ProcessID,
++ IN gctPHYS_ADDR Handle,
++ IN gctPOINTER Logical,
++ IN gctSIGNAL Signal,
++ IN OUT gctUINT32 * waitSize
++ )
++{
++ gceSTATUS status;
++ /* gctSIZE_T waitSize;*/
++ gcsEVENT_PTR record;
++ gctUINT8 id = 0xFF;
++
++ gcmkHEADER_ARG("Event=0x%x ProcessID=%u Handle=0x%x Logical=0x%x "
++ "Signal=0x%x",
++ Event, ProcessID, Handle, Logical, Signal);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
++
++ /* Submit the current event queue. */
++#if gcdMULTI_GPU
++ gcmkONERROR(gckEVENT_Submit(Event, gcvTRUE, gcvFALSE, gcvCORE_3D_ALL_MASK));
++#else
++ gcmkONERROR(gckEVENT_Submit(Event, gcvTRUE, gcvFALSE));
++#endif
++#if gcdMULTI_GPU
++ gcmkONERROR(gckEVENT_GetEvent(Event, gcvTRUE, &id, gcvKERNEL_PIXEL, gcvCORE_3D_ALL_MASK));
++#else
++ gcmkONERROR(gckEVENT_GetEvent(Event, gcvTRUE, &id, gcvKERNEL_PIXEL));
++#endif
++
++ /* Allocate a record. */
++ gcmkONERROR(gckEVENT_AllocateRecord(Event, gcvTRUE, &record));
++
++ /* Initialize the record. */
++ record->next = gcvNULL;
++ record->processID = ProcessID;
++ record->info.command = gcvHAL_SIGNAL;
++ record->info.u.Signal.signal = gcmPTR_TO_UINT64(Signal);
++#ifdef __QNXNTO__
++ record->info.u.Signal.coid = 0;
++ record->info.u.Signal.rcvid = 0;
++#endif
++ record->info.u.Signal.auxSignal = 0;
++ record->info.u.Signal.process = 0;
++
++ /* Append the record. */
++ Event->queues[id].head = record;
++
++ /* Replace last WAIT with END. */
++ gcmkONERROR(gckHARDWARE_End(
++ Event->kernel->hardware, Logical, waitSize
++ ));
++
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ /* Flush the cache for the END. */
++ gcmkONERROR(gckOS_CacheClean(
++ Event->os,
++ ProcessID,
++ gcvNULL,
++ (gctUINT32)Handle,
++ Logical,
++ *waitSize
++ ));
++#endif
++
++ /* Wait for the signal. */
++ gcmkONERROR(gckOS_WaitSignal(Event->os, Signal, gcvINFINITE));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++static void
++_PrintRecord(
++ gcsEVENT_PTR record
++ )
++{
++ switch (record->info.command)
++ {
++ case gcvHAL_FREE_NON_PAGED_MEMORY:
++ gcmkPRINT(" gcvHAL_FREE_NON_PAGED_MEMORY");
++ break;
++
++ case gcvHAL_FREE_CONTIGUOUS_MEMORY:
++ gcmkPRINT(" gcvHAL_FREE_CONTIGUOUS_MEMORY");
++ break;
++
++ case gcvHAL_WRITE_DATA:
++ gcmkPRINT(" gcvHAL_WRITE_DATA");
++ break;
++
++ case gcvHAL_UNLOCK_VIDEO_MEMORY:
++ gcmkPRINT(" gcvHAL_UNLOCK_VIDEO_MEMORY");
++ break;
++
++ case gcvHAL_SIGNAL:
++ gcmkPRINT(" gcvHAL_SIGNAL process=%d signal=0x%x",
++ record->info.u.Signal.process,
++ record->info.u.Signal.signal);
++ break;
++
++ case gcvHAL_UNMAP_USER_MEMORY:
++ gcmkPRINT(" gcvHAL_UNMAP_USER_MEMORY");
++ break;
++
++ case gcvHAL_TIMESTAMP:
++ gcmkPRINT(" gcvHAL_TIMESTAMP");
++ break;
++
++ case gcvHAL_COMMIT_DONE:
++ gcmkPRINT(" gcvHAL_COMMIT_DONE");
++ break;
++
++ case gcvHAL_FREE_VIRTUAL_COMMAND_BUFFER:
++ gcmkPRINT(" gcvHAL_FREE_VIRTUAL_COMMAND_BUFFER logical=0x%08x",
++ record->info.u.FreeVirtualCommandBuffer.logical);
++ break;
++
++ case gcvHAL_SYNC_POINT:
++ gcmkPRINT(" gcvHAL_SYNC_POINT syncPoint=0x%08x",
++ gcmUINT64_TO_PTR(record->info.u.SyncPoint.syncPoint));
++
++ break;
++
++ case gcvHAL_DESTROY_MMU:
++ gcmkPRINT(" gcvHAL_DESTORY_MMU mmu=0x%08x",
++ gcmUINT64_TO_PTR(record->info.u.DestroyMmu.mmu));
++
++ break;
++ default:
++ gcmkPRINT(" Illegal Event %d", record->info.command);
++ break;
++ }
++}
++
++/*******************************************************************************
++** gckEVENT_Dump
++**
++** Dump record in event queue when stuck happens.
++** No protection for the event queue.
++**/
++gceSTATUS
++gckEVENT_Dump(
++ IN gckEVENT Event
++ )
++{
++ gcsEVENT_QUEUE_PTR queueHead = Event->queueHead;
++ gcsEVENT_QUEUE_PTR queue;
++ gcsEVENT_PTR record = gcvNULL;
++ gctINT i;
++#if gcdINTERRUPT_STATISTIC
++ gctINT32 pendingInterrupt;
++ gctUINT32 intrAcknowledge;
++#endif
++
++ gcmkHEADER_ARG("Event=0x%x", Event);
++
++ gcmkPRINT("**************************\n");
++ gcmkPRINT("*** EVENT STATE DUMP ***\n");
++ gcmkPRINT("**************************\n");
++
++ gcmkPRINT(" Unsumbitted Event:");
++ while(queueHead)
++ {
++ queue = queueHead;
++ record = queueHead->head;
++
++ gcmkPRINT(" [%x]:", queue);
++ while(record)
++ {
++ _PrintRecord(record);
++ record = record->next;
++ }
++
++ if (queueHead == Event->queueTail)
++ {
++ queueHead = gcvNULL;
++ }
++ else
++ {
++ queueHead = queueHead->next;
++ }
++ }
++
++ gcmkPRINT(" Untriggered Event:");
++ for (i = 0; i < gcmCOUNTOF(Event->queues); i++)
++ {
++ queue = &Event->queues[i];
++ record = queue->head;
++
++ gcmkPRINT(" [%d]:", i);
++ while(record)
++ {
++ _PrintRecord(record);
++ record = record->next;
++ }
++ }
++
++#if gcdINTERRUPT_STATISTIC
++ gckOS_AtomGet(Event->os, Event->interruptCount, &pendingInterrupt);
++ gcmkPRINT(" Number of Pending Interrupt: %d", pendingInterrupt);
++
++ if (Event->kernel->recovery == 0)
++ {
++ gckOS_ReadRegisterEx(
++ Event->os,
++ Event->kernel->core,
++ 0x10,
++ &intrAcknowledge
++ );
++
++ gcmkPRINT(" INTR_ACKNOWLEDGE=0x%x", intrAcknowledge);
++ }
++#endif
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel.h linux-openelec/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel.h
+--- linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel.h 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,1489 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_kernel_h_
++#define __gc_hal_kernel_h_
++
++#include "gc_hal.h"
++#include "gc_hal_kernel_hardware.h"
++#include "gc_hal_driver.h"
++
++#if gcdENABLE_VG
++#include "gc_hal_kernel_vg.h"
++#endif
++
++#if gcdSECURITY
++#include "gc_hal_security_interface.h"
++#endif
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++
++/*******************************************************************************
++***** New MMU Defination *******************************************************/
++#define gcdMMU_MTLB_SHIFT 22
++#define gcdMMU_STLB_4K_SHIFT 12
++#define gcdMMU_STLB_64K_SHIFT 16
++
++#define gcdMMU_MTLB_BITS (32 - gcdMMU_MTLB_SHIFT)
++#define gcdMMU_PAGE_4K_BITS gcdMMU_STLB_4K_SHIFT
++#define gcdMMU_STLB_4K_BITS (32 - gcdMMU_MTLB_BITS - gcdMMU_PAGE_4K_BITS)
++#define gcdMMU_PAGE_64K_BITS gcdMMU_STLB_64K_SHIFT
++#define gcdMMU_STLB_64K_BITS (32 - gcdMMU_MTLB_BITS - gcdMMU_PAGE_64K_BITS)
++
++#define gcdMMU_MTLB_ENTRY_NUM (1 << gcdMMU_MTLB_BITS)
++#define gcdMMU_MTLB_SIZE (gcdMMU_MTLB_ENTRY_NUM << 2)
++#define gcdMMU_STLB_4K_ENTRY_NUM (1 << gcdMMU_STLB_4K_BITS)
++#define gcdMMU_STLB_4K_SIZE (gcdMMU_STLB_4K_ENTRY_NUM << 2)
++#define gcdMMU_PAGE_4K_SIZE (1 << gcdMMU_STLB_4K_SHIFT)
++#define gcdMMU_STLB_64K_ENTRY_NUM (1 << gcdMMU_STLB_64K_BITS)
++#define gcdMMU_STLB_64K_SIZE (gcdMMU_STLB_64K_ENTRY_NUM << 2)
++#define gcdMMU_PAGE_64K_SIZE (1 << gcdMMU_STLB_64K_SHIFT)
++
++#define gcdMMU_MTLB_MASK (~((1U << gcdMMU_MTLB_SHIFT)-1))
++#define gcdMMU_STLB_4K_MASK ((~0U << gcdMMU_STLB_4K_SHIFT) ^ gcdMMU_MTLB_MASK)
++#define gcdMMU_PAGE_4K_MASK (gcdMMU_PAGE_4K_SIZE - 1)
++#define gcdMMU_STLB_64K_MASK ((~((1U << gcdMMU_STLB_64K_SHIFT)-1)) ^ gcdMMU_MTLB_MASK)
++#define gcdMMU_PAGE_64K_MASK (gcdMMU_PAGE_64K_SIZE - 1)
++
++/* Page offset definitions. */
++#define gcdMMU_OFFSET_4K_BITS (32 - gcdMMU_MTLB_BITS - gcdMMU_STLB_4K_BITS)
++#define gcdMMU_OFFSET_4K_MASK ((1U << gcdMMU_OFFSET_4K_BITS) - 1)
++#define gcdMMU_OFFSET_16K_BITS (32 - gcdMMU_MTLB_BITS - gcdMMU_STLB_16K_BITS)
++#define gcdMMU_OFFSET_16K_MASK ((1U << gcdMMU_OFFSET_16K_BITS) - 1)
++
++#define gcdMMU_MTLB_PRESENT 0x00000001
++#define gcdMMU_MTLB_EXCEPTION 0x00000002
++#define gcdMMU_MTLB_4K_PAGE 0x00000000
++
++#define gcdMMU_STLB_PRESENT 0x00000001
++#define gcdMMU_STLB_EXCEPTION 0x00000002
++#define gcdMMU_STLB_4K_PAGE 0x00000000
++
++/*******************************************************************************
++***** Stuck Dump Level ********************************************************/
++
++#define gcdSTUCK_DUMP_MINIMAL 1
++#define gcdSTUCK_DUMP_MIDDLE 2
++#define gcdSTUCK_DUMP_MAXIMAL 3
++
++/*******************************************************************************
++***** Process Secure Cache ****************************************************/
++
++#define gcdSECURE_CACHE_LRU 1
++#define gcdSECURE_CACHE_LINEAR 2
++#define gcdSECURE_CACHE_HASH 3
++#define gcdSECURE_CACHE_TABLE 4
++
++#define gcvPAGE_TABLE_DIRTY_BIT_OTHER (1 << 0)
++#define gcvPAGE_TABLE_DIRTY_BIT_FE (1 << 1)
++
++typedef struct _gcskLOGICAL_CACHE * gcskLOGICAL_CACHE_PTR;
++typedef struct _gcskLOGICAL_CACHE gcskLOGICAL_CACHE;
++struct _gcskLOGICAL_CACHE
++{
++ /* Logical address. */
++ gctPOINTER logical;
++
++ /* DMAable address. */
++ gctUINT32 dma;
++
++#if gcdSECURE_CACHE_METHOD == gcdSECURE_CACHE_HASH
++ /* Pointer to the previous and next hash tables. */
++ gcskLOGICAL_CACHE_PTR nextHash;
++ gcskLOGICAL_CACHE_PTR prevHash;
++#endif
++
++#if gcdSECURE_CACHE_METHOD != gcdSECURE_CACHE_TABLE
++ /* Pointer to the previous and next slot. */
++ gcskLOGICAL_CACHE_PTR next;
++ gcskLOGICAL_CACHE_PTR prev;
++#endif
++
++#if gcdSECURE_CACHE_METHOD == gcdSECURE_CACHE_LINEAR
++ /* Time stamp. */
++ gctUINT64 stamp;
++#endif
++};
++
++typedef struct _gcskSECURE_CACHE * gcskSECURE_CACHE_PTR;
++typedef struct _gcskSECURE_CACHE
++{
++ /* Cache memory. */
++ gcskLOGICAL_CACHE cache[1 + gcdSECURE_CACHE_SLOTS];
++
++ /* Last known index for LINEAR mode. */
++ gcskLOGICAL_CACHE_PTR cacheIndex;
++
++ /* Current free slot for LINEAR mode. */
++ gctUINT32 cacheFree;
++
++ /* Time stamp for LINEAR mode. */
++ gctUINT64 cacheStamp;
++
++#if gcdSECURE_CACHE_METHOD == gcdSECURE_CACHE_HASH
++ /* Hash table for HASH mode. */
++ gcskLOGICAL_CACHE hash[256];
++#endif
++}
++gcskSECURE_CACHE;
++
++/*******************************************************************************
++***** Process Database Management *********************************************/
++
++typedef enum _gceDATABASE_TYPE
++{
++ gcvDB_VIDEO_MEMORY = 1, /* Video memory created. */
++ gcvDB_COMMAND_BUFFER, /* Command Buffer. */
++ gcvDB_NON_PAGED, /* Non paged memory. */
++ gcvDB_CONTIGUOUS, /* Contiguous memory. */
++ gcvDB_SIGNAL, /* Signal. */
++ gcvDB_VIDEO_MEMORY_LOCKED, /* Video memory locked. */
++ gcvDB_CONTEXT, /* Context */
++ gcvDB_IDLE, /* GPU idle. */
++ gcvDB_MAP_MEMORY, /* Map memory */
++ gcvDB_MAP_USER_MEMORY, /* Map user memory */
++ gcvDB_SYNC_POINT, /* Sync point. */
++ gcvDB_SHBUF, /* Shared buffer. */
++}
++gceDATABASE_TYPE;
++
++#define gcdDATABASE_TYPE_MASK 0x000000FF
++#define gcdDB_VIDEO_MEMORY_TYPE_MASK 0x0000FF00
++#define gcdDB_VIDEO_MEMORY_TYPE_SHIFT 8
++
++#define gcdDB_VIDEO_MEMORY_POOL_MASK 0x00FF0000
++#define gcdDB_VIDEO_MEMORY_POOL_SHIFT 16
++
++typedef struct _gcsDATABASE_RECORD * gcsDATABASE_RECORD_PTR;
++typedef struct _gcsDATABASE_RECORD
++{
++ /* Pointer to kernel. */
++ gckKERNEL kernel;
++
++ /* Pointer to next database record. */
++ gcsDATABASE_RECORD_PTR next;
++
++ /* Type of record. */
++ gceDATABASE_TYPE type;
++
++ /* Data for record. */
++ gctPOINTER data;
++ gctPHYS_ADDR physical;
++ gctSIZE_T bytes;
++}
++gcsDATABASE_RECORD;
++
++typedef struct _gcsDATABASE * gcsDATABASE_PTR;
++typedef struct _gcsDATABASE
++{
++ /* Pointer to next entry is hash list. */
++ gcsDATABASE_PTR next;
++ gctSIZE_T slot;
++
++ /* Process ID. */
++ gctUINT32 processID;
++
++ /* Sizes to query. */
++ gcsDATABASE_COUNTERS vidMem;
++ gcsDATABASE_COUNTERS nonPaged;
++ gcsDATABASE_COUNTERS contiguous;
++ gcsDATABASE_COUNTERS mapUserMemory;
++ gcsDATABASE_COUNTERS mapMemory;
++ gcsDATABASE_COUNTERS virtualCommandBuffer;
++
++ gcsDATABASE_COUNTERS vidMemType[gcvSURF_NUM_TYPES];
++ /* Counter for each video memory pool. */
++ gcsDATABASE_COUNTERS vidMemPool[gcvPOOL_NUMBER_OF_POOLS];
++ gctPOINTER counterMutex;
++
++ /* Idle time management. */
++ gctUINT64 lastIdle;
++ gctUINT64 idle;
++
++ /* Pointer to database. */
++ gcsDATABASE_RECORD_PTR list[48];
++
++#if gcdSECURE_USER
++ /* Secure cache. */
++ gcskSECURE_CACHE cache;
++#endif
++
++ gctPOINTER handleDatabase;
++ gctPOINTER handleDatabaseMutex;
++
++#if gcdPROCESS_ADDRESS_SPACE
++ gckMMU mmu;
++#endif
++}
++gcsDATABASE;
++
++typedef struct _gcsRECORDER * gckRECORDER;
++
++typedef struct _gcsFDPRIVATE * gcsFDPRIVATE_PTR;
++typedef struct _gcsFDPRIVATE
++{
++ gctINT (* release) (gcsFDPRIVATE_PTR Private);
++}
++gcsFDPRIVATE;
++
++/* Create a process database that will contain all its allocations. */
++gceSTATUS
++gckKERNEL_CreateProcessDB(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 ProcessID
++ );
++
++/* Add a record to the process database. */
++gceSTATUS
++gckKERNEL_AddProcessDB(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 ProcessID,
++ IN gceDATABASE_TYPE Type,
++ IN gctPOINTER Pointer,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Size
++ );
++
++/* Remove a record to the process database. */
++gceSTATUS
++gckKERNEL_RemoveProcessDB(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 ProcessID,
++ IN gceDATABASE_TYPE Type,
++ IN gctPOINTER Pointer
++ );
++
++/* Destroy the process database. */
++gceSTATUS
++gckKERNEL_DestroyProcessDB(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 ProcessID
++ );
++
++/* Find a record to the process database. */
++gceSTATUS
++gckKERNEL_FindProcessDB(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 ProcessID,
++ IN gctUINT32 ThreadID,
++ IN gceDATABASE_TYPE Type,
++ IN gctPOINTER Pointer,
++ OUT gcsDATABASE_RECORD_PTR Record
++ );
++
++/* Query the process database. */
++gceSTATUS
++gckKERNEL_QueryProcessDB(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 ProcessID,
++ IN gctBOOL LastProcessID,
++ IN gceDATABASE_TYPE Type,
++ OUT gcuDATABASE_INFO * Info
++ );
++
++/* Dump the process database. */
++gceSTATUS
++gckKERNEL_DumpProcessDB(
++ IN gckKERNEL Kernel
++ );
++
++/* Dump the video memory usage for process specified. */
++gceSTATUS
++gckKERNEL_DumpVidMemUsage(
++ IN gckKERNEL Kernel,
++ IN gctINT32 ProcessID
++ );
++
++gceSTATUS
++gckKERNEL_FindDatabase(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 ProcessID,
++ IN gctBOOL LastProcessID,
++ OUT gcsDATABASE_PTR * Database
++ );
++
++gceSTATUS
++gckKERNEL_FindHandleDatbase(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 ProcessID,
++ OUT gctPOINTER * HandleDatabase,
++ OUT gctPOINTER * HandleDatabaseMutex
++ );
++
++gceSTATUS
++gckKERNEL_GetProcessMMU(
++ IN gckKERNEL Kernel,
++ OUT gckMMU * Mmu
++ );
++
++gceSTATUS
++gckKERNEL_SetRecovery(
++ IN gckKERNEL Kernel,
++ IN gctBOOL Recovery,
++ IN gctUINT32 StuckDump
++ );
++
++gceSTATUS
++gckMMU_FlatMapping(
++ IN gckMMU Mmu,
++ IN gctUINT32 Physical
++ );
++
++gceSTATUS
++gckMMU_GetPageEntry(
++ IN gckMMU Mmu,
++ IN gctUINT32 Address,
++ IN gctUINT32_PTR *PageTable
++ );
++
++gceSTATUS
++gckMMU_FreePagesEx(
++ IN gckMMU Mmu,
++ IN gctUINT32 Address,
++ IN gctSIZE_T PageCount
++ );
++
++gceSTATUS
++gckKERNEL_CreateIntegerDatabase(
++ IN gckKERNEL Kernel,
++ OUT gctPOINTER * Database
++ );
++
++gceSTATUS
++gckKERNEL_DestroyIntegerDatabase(
++ IN gckKERNEL Kernel,
++ IN gctPOINTER Database
++ );
++
++gceSTATUS
++gckKERNEL_AllocateIntegerId(
++ IN gctPOINTER Database,
++ IN gctPOINTER Pointer,
++ OUT gctUINT32 * Id
++ );
++
++gceSTATUS
++gckKERNEL_FreeIntegerId(
++ IN gctPOINTER Database,
++ IN gctUINT32 Id
++ );
++
++gceSTATUS
++gckKERNEL_QueryIntegerId(
++ IN gctPOINTER Database,
++ IN gctUINT32 Id,
++ OUT gctPOINTER * Pointer
++ );
++
++/* Pointer rename */
++gctUINT32
++gckKERNEL_AllocateNameFromPointer(
++ IN gckKERNEL Kernel,
++ IN gctPOINTER Pointer
++ );
++
++gctPOINTER
++gckKERNEL_QueryPointerFromName(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 Name
++ );
++
++gceSTATUS
++gckKERNEL_DeleteName(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 Name
++ );
++
++#if gcdSECURE_USER
++/* Get secure cache from the process database. */
++gceSTATUS
++gckKERNEL_GetProcessDBCache(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 ProcessID,
++ OUT gcskSECURE_CACHE_PTR * Cache
++ );
++#endif
++
++/*******************************************************************************
++********* Timer Management ****************************************************/
++typedef struct _gcsTIMER * gcsTIMER_PTR;
++typedef struct _gcsTIMER
++{
++ /* Start and Stop time holders. */
++ gctUINT64 startTime;
++ gctUINT64 stopTime;
++}
++gcsTIMER;
++
++/******************************************************************************\
++********************************** Structures **********************************
++\******************************************************************************/
++
++/* gckDB object. */
++struct _gckDB
++{
++ /* Database management. */
++ gcsDATABASE_PTR db[16];
++ gctPOINTER dbMutex;
++ gcsDATABASE_PTR freeDatabase;
++ gcsDATABASE_RECORD_PTR freeRecord;
++ gcsDATABASE_PTR lastDatabase;
++ gctUINT32 lastProcessID;
++ gctUINT64 lastIdle;
++ gctUINT64 idleTime;
++ gctUINT64 lastSlowdown;
++ gctUINT64 lastSlowdownIdle;
++ gctPOINTER nameDatabase;
++ gctPOINTER nameDatabaseMutex;
++
++ gctPOINTER pointerDatabase;
++ gctPOINTER pointerDatabaseMutex;
++};
++
++typedef struct _gckVIRTUAL_COMMAND_BUFFER * gckVIRTUAL_COMMAND_BUFFER_PTR;
++typedef struct _gckVIRTUAL_COMMAND_BUFFER
++{
++ gctPHYS_ADDR physical;
++ gctPOINTER userLogical;
++ gctPOINTER kernelLogical;
++ gctSIZE_T bytes;
++ gctSIZE_T pageCount;
++ gctPOINTER pageTable;
++ gctUINT32 gpuAddress;
++ gctUINT pid;
++ gckVIRTUAL_COMMAND_BUFFER_PTR next;
++ gckVIRTUAL_COMMAND_BUFFER_PTR prev;
++ gckKERNEL kernel;
++#if gcdPROCESS_ADDRESS_SPACE
++ gckMMU mmu;
++#endif
++}
++gckVIRTUAL_COMMAND_BUFFER;
++
++/* gckKERNEL object. */
++struct _gckKERNEL
++{
++ /* Object. */
++ gcsOBJECT object;
++
++ /* Pointer to gckOS object. */
++ gckOS os;
++
++ /* Core */
++ gceCORE core;
++
++ /* Pointer to gckHARDWARE object. */
++ gckHARDWARE hardware;
++
++ /* Pointer to gckCOMMAND object. */
++ gckCOMMAND command;
++
++ /* Pointer to gckEVENT object. */
++ gckEVENT eventObj;
++
++ /* Pointer to context. */
++ gctPOINTER context;
++
++ /* Pointer to gckMMU object. */
++ gckMMU mmu;
++
++ /* Arom holding number of clients. */
++ gctPOINTER atomClients;
++
++#if VIVANTE_PROFILER
++ /* Enable profiling */
++ gctBOOL profileEnable;
++ /* Clear profile register or not*/
++ gctBOOL profileCleanRegister;
++#endif
++
++#ifdef QNX_SINGLE_THREADED_DEBUGGING
++ gctPOINTER debugMutex;
++#endif
++
++ /* Database management. */
++ gckDB db;
++ gctBOOL dbCreated;
++
++ gctUINT64 resetTimeStamp;
++
++ /* Pointer to gckEVENT object. */
++ gcsTIMER timers[8];
++ gctUINT32 timeOut;
++
++#if gcdENABLE_VG
++ gckVGKERNEL vg;
++#endif
++
++ /* Virtual command buffer list. */
++ gckVIRTUAL_COMMAND_BUFFER_PTR virtualBufferHead;
++ gckVIRTUAL_COMMAND_BUFFER_PTR virtualBufferTail;
++ gctPOINTER virtualBufferLock;
++
++ /* Enable virtual command buffer. */
++ gctBOOL virtualCommandBuffer;
++
++#if gcdDVFS
++ gckDVFS dvfs;
++#endif
++
++#if gcdANDROID_NATIVE_FENCE_SYNC
++ gctHANDLE timeline;
++#endif
++
++ /* Enable recovery. */
++ gctBOOL recovery;
++
++ /* Level of dump information after stuck. */
++ gctUINT stuckDump;
++
++#if gcdSECURITY
++ gctUINT32 securityChannel;
++#endif
++
++ /* Timer to monitor GPU stuck. */
++ gctPOINTER monitorTimer;
++
++ /* Flag to quit monitor timer. */
++ gctBOOL monitorTimerStop;
++
++ /* Monitor states. */
++ gctBOOL monitoring;
++ gctUINT32 lastCommitStamp;
++ gctUINT32 timer;
++ gctUINT32 restoreAddress;
++ gctUINT32 restoreMask;
++};
++
++struct _FrequencyHistory
++{
++ gctUINT32 frequency;
++ gctUINT32 count;
++};
++
++/* gckDVFS object. */
++struct _gckDVFS
++{
++ gckOS os;
++ gckHARDWARE hardware;
++ gctPOINTER timer;
++ gctUINT32 pollingTime;
++ gctBOOL stop;
++ gctUINT32 totalConfig;
++ gctUINT32 loads[8];
++ gctUINT8 currentScale;
++ struct _FrequencyHistory frequencyHistory[16];
++};
++
++/* gckCOMMAND object. */
++struct _gckCOMMAND
++{
++ /* Object. */
++ gcsOBJECT object;
++
++ /* Pointer to required object. */
++ gckKERNEL kernel;
++ gckOS os;
++
++ /* Number of bytes per page. */
++ gctUINT32 pageSize;
++
++ /* Current pipe select. */
++ gcePIPE_SELECT pipeSelect;
++
++ /* Command queue running flag. */
++ gctBOOL running;
++
++ /* Idle flag and commit stamp. */
++ gctBOOL idle;
++ gctUINT64 commitStamp;
++
++ /* Command queue mutex. */
++ gctPOINTER mutexQueue;
++
++ /* Context switching mutex. */
++ gctPOINTER mutexContext;
++
++#if VIVANTE_PROFILER_CONTEXT
++ /* Context sequence mutex. */
++ gctPOINTER mutexContextSeq;
++#endif
++
++ /* Command queue power semaphore. */
++ gctPOINTER powerSemaphore;
++
++ /* Current command queue. */
++ struct _gcskCOMMAND_QUEUE
++ {
++ gctSIGNAL signal;
++ gctPHYS_ADDR physical;
++ gctPOINTER logical;
++ gctUINT32 address;
++ }
++ queues[gcdCOMMAND_QUEUES];
++
++ gctPHYS_ADDR physical;
++ gctPOINTER logical;
++ gctUINT32 address;
++ gctUINT32 offset;
++ gctINT index;
++#if gcmIS_DEBUG(gcdDEBUG_TRACE)
++ gctUINT wrapCount;
++#endif
++
++ /* The command queue is new. */
++ gctBOOL newQueue;
++
++ /* Context management. */
++ gckCONTEXT currContext;
++
++ /* Pointer to last WAIT command. */
++ gctPHYS_ADDR waitPhysical;
++ gctPOINTER waitLogical;
++ gctUINT32 waitSize;
++
++ /* Command buffer alignment. */
++ gctUINT32 alignment;
++ gctUINT32 reservedHead;
++ gctUINT32 reservedTail;
++
++ /* Commit counter. */
++ gctPOINTER atomCommit;
++
++ /* Kernel process ID. */
++ gctUINT32 kernelProcessID;
++
++ /* End Event signal. */
++ gctSIGNAL endEventSignal;
++
++#if gcdSECURE_USER
++ /* Hint array copy buffer. */
++ gctBOOL hintArrayAllocated;
++ gctUINT hintArraySize;
++ gctUINT32_PTR hintArray;
++#endif
++
++#if gcdPROCESS_ADDRESS_SPACE
++ gckMMU currentMmu;
++#endif
++ struct _gckENTRYQUEUE queue;
++};
++
++typedef struct _gcsEVENT * gcsEVENT_PTR;
++
++/* Structure holding one event to be processed. */
++typedef struct _gcsEVENT
++{
++ /* Pointer to next event in queue. */
++ gcsEVENT_PTR next;
++
++ /* Event information. */
++ gcsHAL_INTERFACE info;
++
++ /* Process ID owning the event. */
++ gctUINT32 processID;
++
++#ifdef __QNXNTO__
++ /* Kernel. */
++ gckKERNEL kernel;
++#endif
++
++ gctBOOL fromKernel;
++}
++gcsEVENT;
++
++/* Structure holding a list of events to be processed by an interrupt. */
++typedef struct _gcsEVENT_QUEUE * gcsEVENT_QUEUE_PTR;
++typedef struct _gcsEVENT_QUEUE
++{
++ /* Time stamp. */
++ gctUINT64 stamp;
++
++ /* Source of the event. */
++ gceKERNEL_WHERE source;
++
++#if gcdMULTI_GPU
++ /* Which chip(s) of the event */
++ gceCORE_3D_MASK chipEnable;
++#endif
++
++ /* Pointer to head of event queue. */
++ gcsEVENT_PTR head;
++
++ /* Pointer to tail of event queue. */
++ gcsEVENT_PTR tail;
++
++ /* Next list of events. */
++ gcsEVENT_QUEUE_PTR next;
++}
++gcsEVENT_QUEUE;
++
++/*
++ gcdREPO_LIST_COUNT defines the maximum number of event queues with different
++ hardware module sources that may coexist at the same time. Only two sources
++ are supported - gcvKERNEL_COMMAND and gcvKERNEL_PIXEL. gcvKERNEL_COMMAND
++ source is used only for managing the kernel command queue and is only issued
++ when the current command queue gets full. Since we commit event queues every
++ time we commit command buffers, in the worst case we can have up to three
++ pending event queues:
++ - gcvKERNEL_PIXEL
++ - gcvKERNEL_COMMAND (queue overflow)
++ - gcvKERNEL_PIXEL
++*/
++#define gcdREPO_LIST_COUNT 3
++
++/* gckEVENT object. */
++struct _gckEVENT
++{
++ /* The object. */
++ gcsOBJECT object;
++
++ /* Pointer to required objects. */
++ gckOS os;
++ gckKERNEL kernel;
++
++ /* Time stamp. */
++ gctUINT64 stamp;
++ gctUINT32 lastCommitStamp;
++
++ /* Queue mutex. */
++ gctPOINTER eventQueueMutex;
++
++ /* Array of event queues. */
++ gcsEVENT_QUEUE queues[29];
++ gctUINT8 lastID;
++ gctPOINTER freeAtom;
++
++ /* Pending events. */
++#if gcdSMP
++#if gcdMULTI_GPU
++ gctPOINTER pending3D[gcdMULTI_GPU];
++ gctPOINTER pending3DMask[gcdMULTI_GPU];
++ gctPOINTER pendingMask;
++#endif
++ gctPOINTER pending;
++#else
++#if gcdMULTI_GPU
++ volatile gctUINT pending3D[gcdMULTI_GPU];
++ volatile gctUINT pending3DMask[gcdMULTI_GPU];
++ volatile gctUINT pendingMask;
++#endif
++ volatile gctUINT pending;
++#endif
++#if gcdMULTI_GPU
++ gctUINT32 busy;
++#endif
++
++ /* List of free event structures and its mutex. */
++ gcsEVENT_PTR freeEventList;
++ gctSIZE_T freeEventCount;
++ gctPOINTER freeEventMutex;
++
++ /* Event queues. */
++ gcsEVENT_QUEUE_PTR queueHead;
++ gcsEVENT_QUEUE_PTR queueTail;
++ gcsEVENT_QUEUE_PTR freeList;
++ gcsEVENT_QUEUE repoList[gcdREPO_LIST_COUNT];
++ gctPOINTER eventListMutex;
++
++ gctPOINTER submitTimer;
++
++#if gcdINTERRUPT_STATISTIC
++ gctPOINTER interruptCount;
++#endif
++
++#if gcdRECORD_COMMAND
++ gckRECORDER recorder;
++#endif
++};
++
++/* Free all events belonging to a process. */
++gceSTATUS
++gckEVENT_FreeProcess(
++ IN gckEVENT Event,
++ IN gctUINT32 ProcessID
++ );
++
++gceSTATUS
++gckEVENT_Stop(
++ IN gckEVENT Event,
++ IN gctUINT32 ProcessID,
++ IN gctPHYS_ADDR Handle,
++ IN gctPOINTER Logical,
++ IN gctSIGNAL Signal,
++ IN OUT gctUINT32 * waitSize
++ );
++
++typedef struct _gcsLOCK_INFO * gcsLOCK_INFO_PTR;
++typedef struct _gcsLOCK_INFO
++{
++ gctUINT32 GPUAddresses[gcdMAX_GPU_COUNT];
++ gctPOINTER pageTables[gcdMAX_GPU_COUNT];
++ gctUINT32 lockeds[gcdMAX_GPU_COUNT];
++ gckKERNEL lockKernels[gcdMAX_GPU_COUNT];
++ gckMMU lockMmus[gcdMAX_GPU_COUNT];
++}
++gcsLOCK_INFO;
++
++typedef struct _gcsGPU_MAP * gcsGPU_MAP_PTR;
++typedef struct _gcsGPU_MAP
++{
++ gctINT pid;
++ gcsLOCK_INFO lockInfo;
++ gcsGPU_MAP_PTR prev;
++ gcsGPU_MAP_PTR next;
++}
++gcsGPU_MAP;
++
++/* gcuVIDMEM_NODE structure. */
++typedef union _gcuVIDMEM_NODE
++{
++ /* Allocated from gckVIDMEM. */
++ struct _gcsVIDMEM_NODE_VIDMEM
++ {
++ /* Owner of this node. */
++ gckVIDMEM memory;
++
++ /* Dual-linked list of nodes. */
++ gcuVIDMEM_NODE_PTR next;
++ gcuVIDMEM_NODE_PTR prev;
++
++ /* Dual linked list of free nodes. */
++ gcuVIDMEM_NODE_PTR nextFree;
++ gcuVIDMEM_NODE_PTR prevFree;
++
++ /* Information for this node. */
++ gctSIZE_T offset;
++ gctSIZE_T bytes;
++ gctUINT32 alignment;
++
++#ifdef __QNXNTO__
++ /* Client virtual address. */
++ gctPOINTER logical;
++#endif
++
++ /* Locked counter. */
++ gctINT32 locked;
++
++ /* Memory pool. */
++ gcePOOL pool;
++ gctUINT32 physical;
++
++ /* Process ID owning this memory. */
++ gctUINT32 processID;
++
++#if gcdENABLE_VG
++ gctPOINTER kernelVirtual;
++#endif
++ }
++ VidMem;
++
++ /* Allocated from gckOS. */
++ struct _gcsVIDMEM_NODE_VIRTUAL
++ {
++ /* Pointer to gckKERNEL object. */
++ gckKERNEL kernel;
++
++ /* Information for this node. */
++ /* Contiguously allocated? */
++ gctBOOL contiguous;
++ /* mdl record pointer... a kmalloc address. Process agnostic. */
++ gctPHYS_ADDR physical;
++ gctSIZE_T bytes;
++ /* do_mmap_pgoff address... mapped per-process. */
++ gctPOINTER logical;
++
++#if gcdENABLE_VG
++ /* Physical address of this node, only meaningful when it is contiguous. */
++ gctUINT32 physicalAddress;
++
++ /* Kernel logical of this node. */
++ gctPOINTER kernelVirtual;
++#endif
++
++ /* Customer private handle */
++ gctUINT32 gid;
++
++ /* Page table information. */
++ /* Used only when node is not contiguous */
++ gctSIZE_T pageCount;
++
++ /* Used only when node is not contiguous */
++ gctPOINTER pageTables[gcdMAX_GPU_COUNT];
++ /* Pointer to gckKERNEL object who lock this. */
++ gckKERNEL lockKernels[gcdMAX_GPU_COUNT];
++ /* Actual physical address */
++ gctUINT32 addresses[gcdMAX_GPU_COUNT];
++
++ /* Locked counter. */
++ gctINT32 lockeds[gcdMAX_GPU_COUNT];
++
++ /* Process ID owning this memory. */
++ gctUINT32 processID;
++
++ /* Surface type. */
++ gceSURF_TYPE type;
++ }
++ Virtual;
++}
++gcuVIDMEM_NODE;
++
++/* gckVIDMEM object. */
++struct _gckVIDMEM
++{
++ /* Object. */
++ gcsOBJECT object;
++
++ /* Pointer to gckOS object. */
++ gckOS os;
++
++ /* Information for this video memory heap. */
++ gctUINT32 baseAddress;
++ gctSIZE_T bytes;
++ gctSIZE_T freeBytes;
++
++ /* Mapping for each type of surface. */
++ gctINT mapping[gcvSURF_NUM_TYPES];
++
++ /* Sentinel nodes for up to 8 banks. */
++ gcuVIDMEM_NODE sentinel[8];
++
++ /* Allocation threshold. */
++ gctSIZE_T threshold;
++
++ /* The heap mutex. */
++ gctPOINTER mutex;
++};
++
++typedef struct _gcsVIDMEM_NODE
++{
++ /* Pointer to gcuVIDMEM_NODE. */
++ gcuVIDMEM_NODE_PTR node;
++
++ /* Mutex to protect node. */
++ gctPOINTER mutex;
++
++ /* Reference count. */
++ gctPOINTER reference;
++
++ /* Name for client to import. */
++ gctUINT32 name;
++
++#if gcdPROCESS_ADDRESS_SPACE
++ /* Head of mapping list. */
++ gcsGPU_MAP_PTR mapHead;
++
++ /* Tail of mapping list. */
++ gcsGPU_MAP_PTR mapTail;
++
++ gctPOINTER mapMutex;
++#endif
++
++ /* Surface Type. */
++ gceSURF_TYPE type;
++
++ /* Pool from which node is allocated. */
++ gcePOOL pool;
++}
++gcsVIDMEM_NODE;
++
++typedef struct _gcsVIDMEM_HANDLE * gckVIDMEM_HANDLE;
++typedef struct _gcsVIDMEM_HANDLE
++{
++ /* Pointer to gckVIDMEM_NODE. */
++ gckVIDMEM_NODE node;
++
++ /* Handle for current process. */
++ gctUINT32 handle;
++
++ /* Reference count for this handle. */
++ gctPOINTER reference;
++}
++gcsVIDMEM_HANDLE;
++
++typedef struct _gcsSHBUF * gcsSHBUF_PTR;
++typedef struct _gcsSHBUF
++{
++ /* ID. */
++ gctUINT32 id;
++
++ /* Reference count. */
++ gctPOINTER reference;
++
++ /* Data size. */
++ gctUINT32 size;
++
++ /* Data. */
++ gctPOINTER data;
++}
++gcsSHBUF;
++
++gceSTATUS
++gckVIDMEM_HANDLE_Reference(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 ProcessID,
++ IN gctUINT32 Handle
++ );
++
++gceSTATUS
++gckVIDMEM_HANDLE_Dereference(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 ProcessID,
++ IN gctUINT32 Handle
++ );
++
++gceSTATUS
++gckVIDMEM_NODE_Allocate(
++ IN gckKERNEL Kernel,
++ IN gcuVIDMEM_NODE_PTR VideoNode,
++ IN gceSURF_TYPE Type,
++ IN gcePOOL Pool,
++ IN gctUINT32 * Handle
++ );
++
++gceSTATUS
++gckVIDMEM_Node_Lock(
++ IN gckKERNEL Kernel,
++ IN gckVIDMEM_NODE Node,
++ OUT gctUINT32 *Address
++ );
++
++gceSTATUS
++gckVIDMEM_NODE_Unlock(
++ IN gckKERNEL Kernel,
++ IN gckVIDMEM_NODE Node,
++ IN gctUINT32 ProcessID
++ );
++
++gceSTATUS
++gckVIDMEM_NODE_Dereference(
++ IN gckKERNEL Kernel,
++ IN gckVIDMEM_NODE Node
++ );
++
++gceSTATUS
++gckVIDMEM_NODE_Name(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 Handle,
++ IN gctUINT32 * Name
++ );
++
++gceSTATUS
++gckVIDMEM_NODE_Import(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 Name,
++ IN gctUINT32 * Handle
++ );
++
++gceSTATUS
++gckVIDMEM_HANDLE_LookupAndReference(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 Handle,
++ OUT gckVIDMEM_NODE * Node
++ );
++
++gceSTATUS
++gckVIDMEM_HANDLE_Lookup(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 ProcessID,
++ IN gctUINT32 Handle,
++ OUT gckVIDMEM_NODE * Node
++ );
++
++gceSTATUS
++gckVIDMEM_NODE_GetFd(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 Handle,
++ OUT gctINT * Fd
++ );
++
++#if gcdPROCESS_ADDRESS_SPACE
++gceSTATUS
++gckEVENT_DestroyMmu(
++ IN gckEVENT Event,
++ IN gckMMU Mmu,
++ IN gceKERNEL_WHERE FromWhere
++ );
++#endif
++
++/* gckMMU object. */
++struct _gckMMU
++{
++ /* The object. */
++ gcsOBJECT object;
++
++ /* Pointer to gckOS object. */
++ gckOS os;
++
++ /* Pointer to gckHARDWARE object. */
++ gckHARDWARE hardware;
++
++ /* The page table mutex. */
++ gctPOINTER pageTableMutex;
++
++ /* Page table information. */
++ gctSIZE_T pageTableSize;
++ gctPHYS_ADDR pageTablePhysical;
++ gctUINT32_PTR pageTableLogical;
++ gctUINT32 pageTableEntries;
++
++ /* Master TLB information. */
++ gctSIZE_T mtlbSize;
++ gctPHYS_ADDR mtlbPhysical;
++ gctUINT32_PTR mtlbLogical;
++ gctUINT32 mtlbEntries;
++
++ /* Free entries. */
++ gctUINT32 heapList;
++ gctBOOL freeNodes;
++
++ gctPOINTER staticSTLB;
++ gctBOOL enabled;
++
++ gctUINT32 dynamicMappingStart;
++
++ gctUINT32_PTR mapLogical;
++#if gcdPROCESS_ADDRESS_SPACE
++ gctPOINTER pageTableDirty[gcdMAX_GPU_COUNT];
++ gctPOINTER stlbs;
++#endif
++};
++
++gceSTATUS
++gckOS_CreateKernelVirtualMapping(
++ IN gckOS Os,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Bytes,
++ OUT gctPOINTER * Logical,
++ OUT gctSIZE_T * PageCount
++ );
++
++gceSTATUS
++gckOS_DestroyKernelVirtualMapping(
++ IN gckOS Os,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Bytes,
++ IN gctPOINTER Logical
++ );
++
++gceSTATUS
++gckOS_CreateUserVirtualMapping(
++ IN gckOS Os,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Bytes,
++ OUT gctPOINTER * Logical,
++ OUT gctSIZE_T * PageCount
++ );
++
++gceSTATUS
++gckOS_DestroyUserVirtualMapping(
++ IN gckOS Os,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Bytes,
++ IN gctPOINTER Logical
++ );
++
++gceSTATUS
++gckOS_GetFd(
++ IN gctSTRING Name,
++ IN gcsFDPRIVATE_PTR Private,
++ OUT gctINT *Fd
++ );
++
++gceSTATUS
++gckKERNEL_AllocateVirtualCommandBuffer(
++ IN gckKERNEL Kernel,
++ IN gctBOOL InUserSpace,
++ IN OUT gctSIZE_T * Bytes,
++ OUT gctPHYS_ADDR * Physical,
++ OUT gctPOINTER * Logical
++ );
++
++gceSTATUS
++gckKERNEL_DestroyVirtualCommandBuffer(
++ IN gckKERNEL Kernel,
++ IN gctSIZE_T Bytes,
++ IN gctPHYS_ADDR Physical,
++ IN gctPOINTER Logical
++ );
++
++gceSTATUS
++gckKERNEL_GetGPUAddress(
++ IN gckKERNEL Kernel,
++ IN gctPOINTER Logical,
++ IN gctBOOL InUserSpace,
++ OUT gctUINT32 * Address
++ );
++
++gceSTATUS
++gckKERNEL_QueryGPUAddress(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 GpuAddress,
++ OUT gckVIRTUAL_COMMAND_BUFFER_PTR * Buffer
++ );
++
++gceSTATUS
++gckKERNEL_AttachProcess(
++ IN gckKERNEL Kernel,
++ IN gctBOOL Attach
++ );
++
++gceSTATUS
++gckKERNEL_AttachProcessEx(
++ IN gckKERNEL Kernel,
++ IN gctBOOL Attach,
++ IN gctUINT32 PID
++ );
++
++#if gcdSECURE_USER
++gceSTATUS
++gckKERNEL_MapLogicalToPhysical(
++ IN gckKERNEL Kernel,
++ IN gcskSECURE_CACHE_PTR Cache,
++ IN OUT gctPOINTER * Data
++ );
++
++gceSTATUS
++gckKERNEL_FlushTranslationCache(
++ IN gckKERNEL Kernel,
++ IN gcskSECURE_CACHE_PTR Cache,
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Bytes
++ );
++#endif
++
++gceSTATUS
++gckHARDWARE_QueryIdle(
++ IN gckHARDWARE Hardware,
++ OUT gctBOOL_PTR IsIdle
++ );
++
++#if gcdSECURITY
++gceSTATUS
++gckKERNEL_SecurityOpen(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 GPU,
++ OUT gctUINT32 *Channel
++ );
++
++/*
++** Close a security service channel
++*/
++gceSTATUS
++gckKERNEL_SecurityClose(
++ IN gctUINT32 Channel
++ );
++
++/*
++** Security service interface.
++*/
++gceSTATUS
++gckKERNEL_SecurityCallService(
++ IN gctUINT32 Channel,
++ IN OUT gcsTA_INTERFACE * Interface
++ );
++
++gceSTATUS
++gckKERNEL_SecurityStartCommand(
++ IN gckKERNEL Kernel
++ );
++
++gceSTATUS
++gckKERNEL_SecurityAllocateSecurityMemory(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 Bytes,
++ OUT gctUINT32 * Handle
++ );
++
++gceSTATUS
++gckKERNEL_SecurityExecute(
++ IN gckKERNEL Kernel,
++ IN gctPOINTER Buffer,
++ IN gctUINT32 Bytes
++ );
++
++gceSTATUS
++gckKERNEL_SecurityMapMemory(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 *PhysicalArray,
++ IN gctUINT32 PageCount,
++ OUT gctUINT32 * GPUAddress
++ );
++
++gceSTATUS
++gckKERNEL_SecurityUnmapMemory(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 GPUAddress,
++ IN gctUINT32 PageCount
++ );
++
++#endif
++
++gceSTATUS
++gckKERNEL_CreateShBuffer(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 Size,
++ OUT gctSHBUF * ShBuf
++ );
++
++gceSTATUS
++gckKERNEL_DestroyShBuffer(
++ IN gckKERNEL Kernel,
++ IN gctSHBUF ShBuf
++ );
++
++gceSTATUS
++gckKERNEL_MapShBuffer(
++ IN gckKERNEL Kernel,
++ IN gctSHBUF ShBuf
++ );
++
++gceSTATUS
++gckKERNEL_WriteShBuffer(
++ IN gckKERNEL Kernel,
++ IN gctSHBUF ShBuf,
++ IN gctPOINTER UserData,
++ IN gctUINT32 ByteCount
++ );
++
++gceSTATUS
++gckKERNEL_ReadShBuffer(
++ IN gckKERNEL Kernel,
++ IN gctSHBUF ShBuf,
++ IN gctPOINTER UserData,
++ IN gctUINT32 ByteCount,
++ OUT gctUINT32 * BytesRead
++ );
++
++
++/******************************************************************************\
++******************************* gckCONTEXT Object *******************************
++\******************************************************************************/
++
++gceSTATUS
++gckCONTEXT_Construct(
++ IN gckOS Os,
++ IN gckHARDWARE Hardware,
++ IN gctUINT32 ProcessID,
++ OUT gckCONTEXT * Context
++ );
++
++gceSTATUS
++gckCONTEXT_Destroy(
++ IN gckCONTEXT Context
++ );
++
++gceSTATUS
++gckCONTEXT_Update(
++ IN gckCONTEXT Context,
++ IN gctUINT32 ProcessID,
++ IN gcsSTATE_DELTA_PTR StateDelta
++ );
++
++gceSTATUS
++gckCONTEXT_MapBuffer(
++ IN gckCONTEXT Context,
++ OUT gctUINT32 *Physicals,
++ OUT gctUINT64 *Logicals,
++ OUT gctUINT32 *Bytes
++ );
++
++#if gcdLINK_QUEUE_SIZE
++void
++gckLINKQUEUE_Enqueue(
++ IN gckLINKQUEUE LinkQueue,
++ IN gctUINT32 start,
++ IN gctUINT32 end
++ );
++
++void
++gckLINKQUEUE_GetData(
++ IN gckLINKQUEUE LinkQueue,
++ IN gctUINT32 Index,
++ OUT gckLINKDATA * Data
++ );
++#endif
++
++gceSTATUS
++gckENTRYQUEUE_Enqueue(
++ IN gckKERNEL Kernel,
++ IN gckENTRYQUEUE Queue,
++ IN gctUINT32 physical,
++ IN gctUINT32 bytes
++ );
++
++gceSTATUS
++gckENTRYQUEUE_Dequeue(
++ IN gckENTRYQUEUE Queue,
++ OUT gckENTRYDATA * Data
++ );
++
++/******************************************************************************\
++****************************** gckRECORDER Object ******************************
++\******************************************************************************/
++gceSTATUS
++gckRECORDER_Construct(
++ IN gckOS Os,
++ IN gckHARDWARE Hardware,
++ OUT gckRECORDER * Recorder
++ );
++
++gceSTATUS
++gckRECORDER_Destory(
++ IN gckOS Os,
++ IN gckRECORDER Recorder
++ );
++
++void
++gckRECORDER_AdvanceIndex(
++ gckRECORDER Recorder,
++ gctUINT64 CommitStamp
++ );
++
++void
++gckRECORDER_Record(
++ gckRECORDER Recorder,
++ gctUINT8_PTR CommandBuffer,
++ gctUINT32 CommandBytes,
++ gctUINT8_PTR ContextBuffer,
++ gctUINT32 ContextBytes
++ );
++
++void
++gckRECORDER_Dump(
++ gckRECORDER Recorder
++ );
++
++gceSTATUS
++gckRECORDER_UpdateMirror(
++ gckRECORDER Recorder,
++ gctUINT32 State,
++ gctUINT32 Data
++ );
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif /* __gc_hal_kernel_h_ */
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_heap.c linux-openelec/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_heap.c
+--- linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_heap.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_heap.c 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,858 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++/**
++** @file
++** gckHEAP object for kernel HAL layer. The heap implemented here is an arena-
++** based memory allocation. An arena-based memory heap allocates data quickly
++** from specified arenas and reduces memory fragmentation.
++**
++*/
++#include "gc_hal_kernel_precomp.h"
++
++#define _GC_OBJ_ZONE gcvZONE_HEAP
++
++/*******************************************************************************
++***** Structures ***************************************************************
++*******************************************************************************/
++#define gcdIN_USE ((gcskNODE_PTR)gcvMAXUINTPTR_T)
++
++typedef struct _gcskNODE * gcskNODE_PTR;
++typedef struct _gcskNODE
++{
++ /* Number of byets in node. */
++ gctSIZE_T bytes;
++
++ /* Pointer to next free node, or gcvNULL to mark the node as freed, or
++ ** gcdIN_USE to mark the node as used. */
++ gcskNODE_PTR next;
++
++#if gcmIS_DEBUG(gcdDEBUG_CODE)
++ /* Time stamp of allocation. */
++ gctUINT64 timeStamp;
++#endif
++}
++gcskNODE;
++
++typedef struct _gcskHEAP * gcskHEAP_PTR;
++typedef struct _gcskHEAP
++{
++ /* Linked list. */
++ gcskHEAP_PTR next;
++ gcskHEAP_PTR prev;
++
++ /* Heap size. */
++ gctSIZE_T size;
++
++ /* Free list. */
++ gcskNODE_PTR freeList;
++}
++gcskHEAP;
++
++struct _gckHEAP
++{
++ /* Object. */
++ gcsOBJECT object;
++
++ /* Pointer to a gckOS object. */
++ gckOS os;
++
++ /* Locking mutex. */
++ gctPOINTER mutex;
++
++ /* Allocation parameters. */
++ gctSIZE_T allocationSize;
++
++ /* Heap list. */
++ gcskHEAP_PTR heap;
++#if gcmIS_DEBUG(gcdDEBUG_CODE)
++ gctUINT64 timeStamp;
++#endif
++
++#if VIVANTE_PROFILER || gcmIS_DEBUG(gcdDEBUG_CODE)
++ /* Profile information. */
++ gctUINT32 allocCount;
++ gctUINT64 allocBytes;
++ gctUINT64 allocBytesMax;
++ gctUINT64 allocBytesTotal;
++ gctUINT32 heapCount;
++ gctUINT32 heapCountMax;
++ gctUINT64 heapMemory;
++ gctUINT64 heapMemoryMax;
++#endif
++};
++
++/*******************************************************************************
++***** Static Support Functions *************************************************
++*******************************************************************************/
++
++#if gcmIS_DEBUG(gcdDEBUG_CODE)
++static gctSIZE_T
++_DumpHeap(
++ IN gcskHEAP_PTR Heap
++ )
++{
++ gctPOINTER p;
++ gctSIZE_T leaked = 0;
++
++ /* Start at first node. */
++ for (p = Heap + 1;;)
++ {
++ /* Convert the pointer. */
++ gcskNODE_PTR node = (gcskNODE_PTR) p;
++
++ /* Check if this is a used node. */
++ if (node->next == gcdIN_USE)
++ {
++ /* Print the leaking node. */
++ gcmkTRACE_ZONE(gcvLEVEL_WARNING, gcvZONE_HEAP,
++ "Detected leaking: node=0x%x bytes=%lu timeStamp=%llu "
++ "(%08X %c%c%c%c)",
++ node, node->bytes, node->timeStamp,
++ ((gctUINT32_PTR) (node + 1))[0],
++ gcmPRINTABLE(((gctUINT8_PTR) (node + 1))[0]),
++ gcmPRINTABLE(((gctUINT8_PTR) (node + 1))[1]),
++ gcmPRINTABLE(((gctUINT8_PTR) (node + 1))[2]),
++ gcmPRINTABLE(((gctUINT8_PTR) (node + 1))[3]));
++
++ /* Add leaking byte count. */
++ leaked += node->bytes;
++ }
++
++ /* Test for end of heap. */
++ if (node->bytes == 0)
++ {
++ break;
++ }
++
++ else
++ {
++ /* Move to next node. */
++ p = (gctUINT8_PTR) node + node->bytes;
++ }
++ }
++
++ /* Return the number of leaked bytes. */
++ return leaked;
++}
++#endif
++
++static gceSTATUS
++_CompactKernelHeap(
++ IN gckHEAP Heap
++ )
++{
++ gcskHEAP_PTR heap, next;
++ gctPOINTER p;
++ gcskHEAP_PTR freeList = gcvNULL;
++
++ gcmkHEADER_ARG("Heap=0x%x", Heap);
++
++ /* Walk all the heaps. */
++ for (heap = Heap->heap; heap != gcvNULL; heap = next)
++ {
++ gcskNODE_PTR lastFree = gcvNULL;
++
++ /* Zero out the free list. */
++ heap->freeList = gcvNULL;
++
++ /* Start at the first node. */
++ for (p = (gctUINT8_PTR) (heap + 1);;)
++ {
++ /* Convert the pointer. */
++ gcskNODE_PTR node = (gcskNODE_PTR) p;
++
++ gcmkASSERT(p <= (gctPOINTER) ((gctUINT8_PTR) (heap + 1) + heap->size));
++
++ /* Test if this node not used. */
++ if (node->next != gcdIN_USE)
++ {
++ /* Test if this is the end of the heap. */
++ if (node->bytes == 0)
++ {
++ break;
++ }
++
++ /* Test of this is the first free node. */
++ else if (lastFree == gcvNULL)
++ {
++ /* Initialzie the free list. */
++ heap->freeList = node;
++ lastFree = node;
++ }
++
++ else
++ {
++ /* Test if this free node is contiguous with the previous
++ ** free node. */
++ if ((gctUINT8_PTR) lastFree + lastFree->bytes == p)
++ {
++ /* Just increase the size of the previous free node. */
++ lastFree->bytes += node->bytes;
++ }
++ else
++ {
++ /* Add to linked list. */
++ lastFree->next = node;
++ lastFree = node;
++ }
++ }
++ }
++
++ /* Move to next node. */
++ p = (gctUINT8_PTR) node + node->bytes;
++ }
++
++ /* Mark the end of the chain. */
++ if (lastFree != gcvNULL)
++ {
++ lastFree->next = gcvNULL;
++ }
++
++ /* Get next heap. */
++ next = heap->next;
++
++ /* Check if the entire heap is free. */
++ if ((heap->freeList != gcvNULL)
++ && (heap->freeList->bytes == heap->size - gcmSIZEOF(gcskNODE))
++ )
++ {
++ /* Remove the heap from the linked list. */
++ if (heap->prev == gcvNULL)
++ {
++ Heap->heap = next;
++ }
++ else
++ {
++ heap->prev->next = next;
++ }
++
++ if (heap->next != gcvNULL)
++ {
++ heap->next->prev = heap->prev;
++ }
++
++#if VIVANTE_PROFILER || gcmIS_DEBUG(gcdDEBUG_CODE)
++ /* Update profiling. */
++ Heap->heapCount -= 1;
++ Heap->heapMemory -= heap->size + gcmSIZEOF(gcskHEAP);
++#endif
++
++ /* Add this heap to the list of heaps that need to be freed. */
++ heap->next = freeList;
++ freeList = heap;
++ }
++ }
++
++ if (freeList != gcvNULL)
++ {
++ /* Release the mutex, remove any chance for a dead lock. */
++ gcmkVERIFY_OK(
++ gckOS_ReleaseMutex(Heap->os, Heap->mutex));
++
++ /* Free all heaps in the free list. */
++ for (heap = freeList; heap != gcvNULL; heap = next)
++ {
++ /* Get pointer to the next heap. */
++ next = heap->next;
++
++ /* Free the heap. */
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HEAP,
++ "Freeing heap 0x%x (%lu bytes)",
++ heap, heap->size + gcmSIZEOF(gcskHEAP));
++ gcmkVERIFY_OK(gckOS_FreeMemory(Heap->os, heap));
++ }
++
++ /* Acquire the mutex again. */
++ gcmkVERIFY_OK(
++ gckOS_AcquireMutex(Heap->os, Heap->mutex, gcvINFINITE));
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++***** gckHEAP API Code *********************************************************
++*******************************************************************************/
++
++/*******************************************************************************
++**
++** gckHEAP_Construct
++**
++** Construct a new gckHEAP object.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to a gckOS object.
++**
++** gctSIZE_T AllocationSize
++** Minimum size per arena.
++**
++** OUTPUT:
++**
++** gckHEAP * Heap
++** Pointer to a variable that will hold the pointer to the gckHEAP
++** object.
++*/
++gceSTATUS
++gckHEAP_Construct(
++ IN gckOS Os,
++ IN gctSIZE_T AllocationSize,
++ OUT gckHEAP * Heap
++ )
++{
++ gceSTATUS status;
++ gckHEAP heap = gcvNULL;
++ gctPOINTER pointer = gcvNULL;
++
++ gcmkHEADER_ARG("Os=0x%x AllocationSize=%lu", Os, AllocationSize);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Heap != gcvNULL);
++
++ /* Allocate the gckHEAP object. */
++ gcmkONERROR(gckOS_AllocateMemory(Os,
++ gcmSIZEOF(struct _gckHEAP),
++ &pointer));
++
++ heap = pointer;
++
++ /* Initialize the gckHEAP object. */
++ heap->object.type = gcvOBJ_HEAP;
++ heap->os = Os;
++ heap->allocationSize = AllocationSize;
++ heap->heap = gcvNULL;
++#if gcmIS_DEBUG(gcdDEBUG_CODE)
++ heap->timeStamp = 0;
++#endif
++
++#if VIVANTE_PROFILER || gcmIS_DEBUG(gcdDEBUG_CODE)
++ /* Zero the counters. */
++ heap->allocCount = 0;
++ heap->allocBytes = 0;
++ heap->allocBytesMax = 0;
++ heap->allocBytesTotal = 0;
++ heap->heapCount = 0;
++ heap->heapCountMax = 0;
++ heap->heapMemory = 0;
++ heap->heapMemoryMax = 0;
++#endif
++
++ /* Create the mutex. */
++ gcmkONERROR(gckOS_CreateMutex(Os, &heap->mutex));
++
++ /* Return the pointer to the gckHEAP object. */
++ *Heap = heap;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Heap=0x%x", *Heap);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Roll back. */
++ if (heap != gcvNULL)
++ {
++ /* Free the heap structure. */
++ gcmkVERIFY_OK(gckOS_FreeMemory(Os, heap));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHEAP_Destroy
++**
++** Destroy a gckHEAP object.
++**
++** INPUT:
++**
++** gckHEAP Heap
++** Pointer to a gckHEAP object to destroy.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckHEAP_Destroy(
++ IN gckHEAP Heap
++ )
++{
++ gcskHEAP_PTR heap;
++#if gcmIS_DEBUG(gcdDEBUG_CODE)
++ gctSIZE_T leaked = 0;
++#endif
++
++ gcmkHEADER_ARG("Heap=0x%x", Heap);
++
++ for (heap = Heap->heap; heap != gcvNULL; heap = Heap->heap)
++ {
++ /* Unlink heap from linked list. */
++ Heap->heap = heap->next;
++
++#if gcmIS_DEBUG(gcdDEBUG_CODE)
++ /* Check for leaked memory. */
++ leaked += _DumpHeap(heap);
++#endif
++
++ /* Free the heap. */
++ gcmkVERIFY_OK(gckOS_FreeMemory(Heap->os, heap));
++ }
++
++ /* Free the mutex. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Heap->os, Heap->mutex));
++
++ /* Free the heap structure. */
++ gcmkVERIFY_OK(gckOS_FreeMemory(Heap->os, Heap));
++
++ /* Success. */
++#if gcmIS_DEBUG(gcdDEBUG_CODE)
++ gcmkFOOTER_ARG("leaked=%lu", leaked);
++#else
++ gcmkFOOTER_NO();
++#endif
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckHEAP_Allocate
++**
++** Allocate data from the heap.
++**
++** INPUT:
++**
++** gckHEAP Heap
++** Pointer to a gckHEAP object.
++**
++** IN gctSIZE_T Bytes
++** Number of byte to allocate.
++**
++** OUTPUT:
++**
++** gctPOINTER * Memory
++** Pointer to a variable that will hold the address of the allocated
++** memory.
++*/
++gceSTATUS
++gckHEAP_Allocate(
++ IN gckHEAP Heap,
++ IN gctSIZE_T Bytes,
++ OUT gctPOINTER * Memory
++ )
++{
++ gctBOOL acquired = gcvFALSE;
++ gcskHEAP_PTR heap;
++ gceSTATUS status;
++ gctSIZE_T bytes;
++ gcskNODE_PTR node, used, prevFree = gcvNULL;
++ gctPOINTER memory = gcvNULL;
++
++ gcmkHEADER_ARG("Heap=0x%x Bytes=%lu", Heap, Bytes);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Heap, gcvOBJ_HEAP);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++ gcmkVERIFY_ARGUMENT(Memory != gcvNULL);
++
++ /* Determine number of bytes required for a node. */
++ bytes = gcmALIGN(Bytes + gcmSIZEOF(gcskNODE), 8);
++
++ /* Acquire the mutex. */
++ gcmkONERROR(
++ gckOS_AcquireMutex(Heap->os, Heap->mutex, gcvINFINITE));
++
++ acquired = gcvTRUE;
++
++ /* Check if this allocation is bigger than the default allocation size. */
++ if (bytes > Heap->allocationSize - gcmSIZEOF(gcskHEAP) - gcmSIZEOF(gcskNODE))
++ {
++ /* Adjust allocation size. */
++ Heap->allocationSize = bytes * 2;
++ }
++
++ else if (Heap->heap != gcvNULL)
++ {
++ gctINT i;
++
++ /* 2 retries, since we might need to compact. */
++ for (i = 0; i < 2; ++i)
++ {
++ /* Walk all the heaps. */
++ for (heap = Heap->heap; heap != gcvNULL; heap = heap->next)
++ {
++ /* Check if this heap has enough bytes to hold the request. */
++ if (bytes <= heap->size - gcmSIZEOF(gcskNODE))
++ {
++ prevFree = gcvNULL;
++
++ /* Walk the chain of free nodes. */
++ for (node = heap->freeList;
++ node != gcvNULL;
++ node = node->next
++ )
++ {
++ gcmkASSERT(node->next != gcdIN_USE);
++
++ /* Check if this free node has enough bytes. */
++ if (node->bytes >= bytes)
++ {
++ /* Use the node. */
++ goto UseNode;
++ }
++
++ /* Save current free node for linked list management. */
++ prevFree = node;
++ }
++ }
++ }
++
++ if (i == 0)
++ {
++ /* Compact the heap. */
++ gcmkVERIFY_OK(_CompactKernelHeap(Heap));
++
++#if gcmIS_DEBUG(gcdDEBUG_CODE)
++ gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_HEAP,
++ "===== KERNEL HEAP =====");
++ gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_HEAP,
++ "Number of allocations : %12u",
++ Heap->allocCount);
++ gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_HEAP,
++ "Number of bytes allocated : %12llu",
++ Heap->allocBytes);
++ gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_HEAP,
++ "Maximum allocation size : %12llu",
++ Heap->allocBytesMax);
++ gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_HEAP,
++ "Total number of bytes allocated : %12llu",
++ Heap->allocBytesTotal);
++ gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_HEAP,
++ "Number of heaps : %12u",
++ Heap->heapCount);
++ gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_HEAP,
++ "Heap memory in bytes : %12llu",
++ Heap->heapMemory);
++ gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_HEAP,
++ "Maximum number of heaps : %12u",
++ Heap->heapCountMax);
++ gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_HEAP,
++ "Maximum heap memory in bytes : %12llu",
++ Heap->heapMemoryMax);
++#endif
++ }
++ }
++ }
++
++ /* Release the mutex. */
++ gcmkONERROR(
++ gckOS_ReleaseMutex(Heap->os, Heap->mutex));
++
++ acquired = gcvFALSE;
++
++ /* Allocate a new heap. */
++ gcmkONERROR(
++ gckOS_AllocateMemory(Heap->os,
++ Heap->allocationSize,
++ &memory));
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HEAP,
++ "Allocated heap 0x%x (%lu bytes)",
++ memory, Heap->allocationSize);
++
++ /* Acquire the mutex. */
++ gcmkONERROR(
++ gckOS_AcquireMutex(Heap->os, Heap->mutex, gcvINFINITE));
++
++ acquired = gcvTRUE;
++
++ /* Use the allocated memory as the heap. */
++ heap = (gcskHEAP_PTR) memory;
++
++ /* Insert this heap to the head of the chain. */
++ heap->next = Heap->heap;
++ heap->prev = gcvNULL;
++ heap->size = Heap->allocationSize - gcmSIZEOF(gcskHEAP);
++
++ if (heap->next != gcvNULL)
++ {
++ heap->next->prev = heap;
++ }
++ Heap->heap = heap;
++
++ /* Mark the end of the heap. */
++ node = (gcskNODE_PTR) ( (gctUINT8_PTR) heap
++ + Heap->allocationSize
++ - gcmSIZEOF(gcskNODE)
++ );
++ node->bytes = 0;
++ node->next = gcvNULL;
++
++ /* Create a free list. */
++ node = (gcskNODE_PTR) (heap + 1);
++ heap->freeList = node;
++
++ /* Initialize the free list. */
++ node->bytes = heap->size - gcmSIZEOF(gcskNODE);
++ node->next = gcvNULL;
++
++ /* No previous free. */
++ prevFree = gcvNULL;
++
++#if VIVANTE_PROFILER || gcmIS_DEBUG(gcdDEBUG_CODE)
++ /* Update profiling. */
++ Heap->heapCount += 1;
++ Heap->heapMemory += Heap->allocationSize;
++
++ if (Heap->heapCount > Heap->heapCountMax)
++ {
++ Heap->heapCountMax = Heap->heapCount;
++ }
++ if (Heap->heapMemory > Heap->heapMemoryMax)
++ {
++ Heap->heapMemoryMax = Heap->heapMemory;
++ }
++#endif
++
++UseNode:
++ /* Verify some stuff. */
++ gcmkASSERT(heap != gcvNULL);
++ gcmkASSERT(node != gcvNULL);
++ gcmkASSERT(node->bytes >= bytes);
++
++ if (heap->prev != gcvNULL)
++ {
++ /* Unlink the heap from the linked list. */
++ heap->prev->next = heap->next;
++ if (heap->next != gcvNULL)
++ {
++ heap->next->prev = heap->prev;
++ }
++
++ /* Move the heap to the front of the list. */
++ heap->next = Heap->heap;
++ heap->prev = gcvNULL;
++ Heap->heap = heap;
++ heap->next->prev = heap;
++ }
++
++ /* Check if there is enough free space left after usage for another free
++ ** node. */
++ if (node->bytes - bytes >= gcmSIZEOF(gcskNODE))
++ {
++ /* Allocated used space from the back of the free list. */
++ used = (gcskNODE_PTR) ((gctUINT8_PTR) node + node->bytes - bytes);
++
++ /* Adjust the number of free bytes. */
++ node->bytes -= bytes;
++ gcmkASSERT(node->bytes >= gcmSIZEOF(gcskNODE));
++ }
++ else
++ {
++ /* Remove this free list from the chain. */
++ if (prevFree == gcvNULL)
++ {
++ heap->freeList = node->next;
++ }
++ else
++ {
++ prevFree->next = node->next;
++ }
++
++ /* Consume the entire free node. */
++ used = (gcskNODE_PTR) node;
++ bytes = node->bytes;
++ }
++
++ /* Mark node as used. */
++ used->bytes = bytes;
++ used->next = gcdIN_USE;
++#if gcmIS_DEBUG(gcdDEBUG_CODE)
++ used->timeStamp = ++Heap->timeStamp;
++#endif
++
++#if VIVANTE_PROFILER || gcmIS_DEBUG(gcdDEBUG_CODE)
++ /* Update profile counters. */
++ Heap->allocCount += 1;
++ Heap->allocBytes += bytes;
++ Heap->allocBytesMax = gcmMAX(Heap->allocBytes, Heap->allocBytesMax);
++ Heap->allocBytesTotal += bytes;
++#endif
++
++ /* Release the mutex. */
++ gcmkVERIFY_OK(
++ gckOS_ReleaseMutex(Heap->os, Heap->mutex));
++
++ /* Return pointer to memory. */
++ *Memory = used + 1;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Memory=0x%x", *Memory);
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ /* Release the mutex. */
++ gcmkVERIFY_OK(
++ gckOS_ReleaseMutex(Heap->os, Heap->mutex));
++ }
++
++ if (memory != gcvNULL)
++ {
++ /* Free the heap memory. */
++ gckOS_FreeMemory(Heap->os, memory);
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHEAP_Free
++**
++** Free allocated memory from the heap.
++**
++** INPUT:
++**
++** gckHEAP Heap
++** Pointer to a gckHEAP object.
++**
++** IN gctPOINTER Memory
++** Pointer to memory to free.
++**
++** OUTPUT:
++**
++** NOTHING.
++*/
++gceSTATUS
++gckHEAP_Free(
++ IN gckHEAP Heap,
++ IN gctPOINTER Memory
++ )
++{
++ gcskNODE_PTR node;
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Heap=0x%x Memory=0x%x", Heap, Memory);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Heap, gcvOBJ_HEAP);
++ gcmkVERIFY_ARGUMENT(Memory != gcvNULL);
++
++ /* Acquire the mutex. */
++ gcmkONERROR(
++ gckOS_AcquireMutex(Heap->os, Heap->mutex, gcvINFINITE));
++
++ /* Pointer to structure. */
++ node = (gcskNODE_PTR) Memory - 1;
++
++ /* Mark the node as freed. */
++ node->next = gcvNULL;
++
++#if VIVANTE_PROFILER || gcmIS_DEBUG(gcdDEBUG_CODE)
++ /* Update profile counters. */
++ Heap->allocBytes -= node->bytes;
++#endif
++
++ /* Release the mutex. */
++ gcmkVERIFY_OK(
++ gckOS_ReleaseMutex(Heap->os, Heap->mutex));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++#if VIVANTE_PROFILER
++gceSTATUS
++gckHEAP_ProfileStart(
++ IN gckHEAP Heap
++ )
++{
++ gcmkHEADER_ARG("Heap=0x%x", Heap);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Heap, gcvOBJ_HEAP);
++
++ /* Zero the counters. */
++ Heap->allocCount = 0;
++ Heap->allocBytes = 0;
++ Heap->allocBytesMax = 0;
++ Heap->allocBytesTotal = 0;
++ Heap->heapCount = 0;
++ Heap->heapCountMax = 0;
++ Heap->heapMemory = 0;
++ Heap->heapMemoryMax = 0;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckHEAP_ProfileEnd(
++ IN gckHEAP Heap,
++ IN gctCONST_STRING Title
++ )
++{
++ gcmkHEADER_ARG("Heap=0x%x Title=0x%x", Heap, Title);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Heap, gcvOBJ_HEAP);
++ gcmkVERIFY_ARGUMENT(Title != gcvNULL);
++
++ gcmkPRINT("");
++ gcmkPRINT("=====[ HEAP - %s ]=====", Title);
++ gcmkPRINT("Number of allocations : %12u", Heap->allocCount);
++ gcmkPRINT("Number of bytes allocated : %12llu", Heap->allocBytes);
++ gcmkPRINT("Maximum allocation size : %12llu", Heap->allocBytesMax);
++ gcmkPRINT("Total number of bytes allocated : %12llu", Heap->allocBytesTotal);
++ gcmkPRINT("Number of heaps : %12u", Heap->heapCount);
++ gcmkPRINT("Heap memory in bytes : %12llu", Heap->heapMemory);
++ gcmkPRINT("Maximum number of heaps : %12u", Heap->heapCountMax);
++ gcmkPRINT("Maximum heap memory in bytes : %12llu", Heap->heapMemoryMax);
++ gcmkPRINT("==============================================");
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++#endif /* VIVANTE_PROFILER */
++
++/*******************************************************************************
++***** Test Code ****************************************************************
++*******************************************************************************/
++
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_interrupt_vg.c linux-openelec/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_interrupt_vg.c
+--- linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_interrupt_vg.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_interrupt_vg.c 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,877 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal_kernel_precomp.h"
++
++#if gcdENABLE_VG
++
++/******************************************************************************\
++*********************** Support Functions and Definitions **********************
++\******************************************************************************/
++
++/* Interruot statistics will be accumulated if not zero. */
++#define gcmENABLE_INTERRUPT_STATISTICS 0
++
++#define _GC_OBJ_ZONE gcvZONE_INTERRUPT
++
++/* Object structure. */
++struct _gckVGINTERRUPT
++{
++ /* Object. */
++ gcsOBJECT object;
++
++ /* gckVGKERNEL pointer. */
++ gckVGKERNEL kernel;
++
++ /* gckOS pointer. */
++ gckOS os;
++
++ /* Interrupt handlers. */
++ gctINTERRUPT_HANDLER handlers[32];
++
++ /* Main interrupt handler thread. */
++ gctTHREAD handler;
++ gctBOOL terminate;
++
++ /* Interrupt FIFO. */
++ gctSEMAPHORE fifoValid;
++ gctUINT32 fifo[256];
++ gctUINT fifoItems;
++ gctUINT8 head;
++ gctUINT8 tail;
++
++ /* Interrupt statistics. */
++#if gcmENABLE_INTERRUPT_STATISTICS
++ gctUINT maxFifoItems;
++ gctUINT fifoOverflow;
++ gctUINT maxSimultaneous;
++ gctUINT multipleCount;
++#endif
++};
++
++
++/*******************************************************************************
++**
++** _ProcessInterrupt
++**
++** The interrupt processor.
++**
++** INPUT:
++**
++** ThreadParameter
++** Pointer to the gckVGINTERRUPT object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++
++#if gcmENABLE_INTERRUPT_STATISTICS
++static void
++_ProcessInterrupt(
++ gckVGINTERRUPT Interrupt,
++ gctUINT_PTR TriggeredCount
++ )
++#else
++static void
++_ProcessInterrupt(
++ gckVGINTERRUPT Interrupt
++ )
++#endif
++{
++ gceSTATUS status;
++ gctUINT32 triggered;
++ gctUINT i;
++
++ /* Advance to the next entry. */
++ Interrupt->tail += 1;
++ Interrupt->fifoItems -= 1;
++
++ /* Get the interrupt value. */
++ triggered = Interrupt->fifo[Interrupt->tail];
++ gcmkASSERT(triggered != 0);
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ "%s: triggered=0x%08X\n",
++ __FUNCTION__,
++ triggered
++ );
++
++ /* Walk through all possible interrupts. */
++ for (i = 0; i < gcmSIZEOF(Interrupt->handlers); i += 1)
++ {
++ /* Test if interrupt happened. */
++ if ((triggered & 1) == 1)
++ {
++#if gcmENABLE_INTERRUPT_STATISTICS
++ if (TriggeredCount != gcvNULL)
++ {
++ (* TriggeredCount) += 1;
++ }
++#endif
++
++ /* Make sure we have valid handler. */
++ if (Interrupt->handlers[i] == gcvNULL)
++ {
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s: Interrupt %d isn't registered.\n",
++ __FUNCTION__, i
++ );
++ }
++ else
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ "%s: interrupt=%d\n",
++ __FUNCTION__,
++ i
++ );
++
++ /* Call the handler. */
++ status = Interrupt->handlers[i] (Interrupt->kernel);
++
++ if (gcmkIS_ERROR(status))
++ {
++ /* Failed to signal the semaphore. */
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s: Error %d incrementing the semaphore #%d.\n",
++ __FUNCTION__, status, i
++ );
++ }
++ }
++ }
++
++ /* Next interrupt. */
++ triggered >>= 1;
++
++ /* No more interrupts to handle? */
++ if (triggered == 0)
++ {
++ break;
++ }
++ }
++}
++
++
++/*******************************************************************************
++**
++** _MainInterruptHandler
++**
++** The main interrupt thread serves the interrupt FIFO and calls registered
++** handlers for the interrupts that occured. The handlers are called in the
++** sequence interrupts occured with the exception when multiple interrupts
++** occured at the same time. In that case the handler calls are "sorted" by
++** the interrupt number therefore giving the interrupts with lower numbers
++** higher priority.
++**
++** INPUT:
++**
++** ThreadParameter
++** Pointer to the gckVGINTERRUPT object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++
++static gctTHREADFUNCRESULT gctTHREADFUNCTYPE
++_MainInterruptHandler(
++ gctTHREADFUNCPARAMETER ThreadParameter
++ )
++{
++ gceSTATUS status;
++ gckVGINTERRUPT interrupt;
++
++#if gcmENABLE_INTERRUPT_STATISTICS
++ gctUINT count;
++#endif
++
++ /* Cast the object. */
++ interrupt = (gckVGINTERRUPT) ThreadParameter;
++
++ /* Enter the loop. */
++ while (gcvTRUE)
++ {
++ /* Wait for an interrupt. */
++ status = gckOS_DecrementSemaphore(interrupt->os, interrupt->fifoValid);
++
++ /* Error? */
++ if (gcmkIS_ERROR(status))
++ {
++ break;
++ }
++
++ /* System termination request? */
++ if (status == gcvSTATUS_TERMINATE)
++ {
++ break;
++ }
++
++ /* Driver is shutting down? */
++ if (interrupt->terminate)
++ {
++ break;
++ }
++
++#if gcmENABLE_INTERRUPT_STATISTICS
++ /* Reset triggered count. */
++ count = 0;
++
++ /* Process the interrupt. */
++ _ProcessInterrupt(interrupt, &count);
++
++ /* Update conters. */
++ if (count > interrupt->maxSimultaneous)
++ {
++ interrupt->maxSimultaneous = count;
++ }
++
++ if (count > 1)
++ {
++ interrupt->multipleCount += 1;
++ }
++#else
++ /* Process the interrupt. */
++ _ProcessInterrupt(interrupt);
++#endif
++ }
++
++ return 0;
++}
++
++
++/*******************************************************************************
++**
++** _StartInterruptHandler / _StopInterruptHandler
++**
++** Main interrupt handler routine control.
++**
++** INPUT:
++**
++** ThreadParameter
++** Pointer to the gckVGINTERRUPT object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++
++static gceSTATUS
++_StartInterruptHandler(
++ gckVGINTERRUPT Interrupt
++ )
++{
++ gceSTATUS status, last;
++
++ do
++ {
++ /* Objects must not be already created. */
++ gcmkASSERT(Interrupt->fifoValid == gcvNULL);
++ gcmkASSERT(Interrupt->handler == gcvNULL);
++
++ /* Reset the termination request. */
++ Interrupt->terminate = gcvFALSE;
++
++#if !gcdENABLE_INFINITE_SPEED_HW
++ /* Construct the fifo semaphore. */
++ gcmkERR_BREAK(gckOS_CreateSemaphoreVG(
++ Interrupt->os, &Interrupt->fifoValid
++ ));
++
++ /* Start the interrupt handler thread. */
++ gcmkERR_BREAK(gckOS_StartThread(
++ Interrupt->os,
++ _MainInterruptHandler,
++ Interrupt,
++ &Interrupt->handler
++ ));
++#endif
++
++ /* Success. */
++ return gcvSTATUS_OK;
++ }
++ while (gcvFALSE);
++
++ /* Roll back. */
++ if (Interrupt->fifoValid != gcvNULL)
++ {
++ gcmkCHECK_STATUS(gckOS_DestroySemaphore(
++ Interrupt->os, Interrupt->fifoValid
++ ));
++
++ Interrupt->fifoValid = gcvNULL;
++ }
++
++ /* Return the status. */
++ return status;
++}
++
++static gceSTATUS
++_StopInterruptHandler(
++ gckVGINTERRUPT Interrupt
++ )
++{
++ gceSTATUS status;
++
++ do
++ {
++ /* Does the thread exist? */
++ if (Interrupt->handler == gcvNULL)
++ {
++ /* The semaphore must be NULL as well. */
++ gcmkASSERT(Interrupt->fifoValid == gcvNULL);
++
++ /* Success. */
++ status = gcvSTATUS_OK;
++ break;
++ }
++
++ /* The semaphore must exist as well. */
++ gcmkASSERT(Interrupt->fifoValid != gcvNULL);
++
++ /* Set the termination request. */
++ Interrupt->terminate = gcvTRUE;
++
++ /* Unlock the thread. */
++ gcmkERR_BREAK(gckOS_IncrementSemaphore(
++ Interrupt->os, Interrupt->fifoValid
++ ));
++
++ /* Wait until the thread quits. */
++ gcmkERR_BREAK(gckOS_StopThread(
++ Interrupt->os,
++ Interrupt->handler
++ ));
++
++ /* Destroy the semaphore. */
++ gcmkERR_BREAK(gckOS_DestroySemaphore(
++ Interrupt->os, Interrupt->fifoValid
++ ));
++
++ /* Reset handles. */
++ Interrupt->handler = gcvNULL;
++ Interrupt->fifoValid = gcvNULL;
++ }
++ while (gcvFALSE);
++
++ /* Return the status. */
++ return status;
++}
++
++
++/******************************************************************************\
++***************************** Interrupt Object API *****************************
++\******************************************************************************/
++
++/*******************************************************************************
++**
++** gckVGINTERRUPT_Construct
++**
++** Construct an interrupt object.
++**
++** INPUT:
++**
++** Kernel
++** Pointer to the gckVGKERNEL object.
++**
++** OUTPUT:
++**
++** Interrupt
++** Pointer to the new gckVGINTERRUPT object.
++*/
++
++gceSTATUS
++gckVGINTERRUPT_Construct(
++ IN gckVGKERNEL Kernel,
++ OUT gckVGINTERRUPT * Interrupt
++ )
++{
++ gceSTATUS status;
++ gckVGINTERRUPT interrupt = gcvNULL;
++
++ gcmkHEADER_ARG("Kernel=0x%x Interrupt=0x%x", Kernel, Interrupt);
++
++ /* Verify argeuments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(Interrupt != gcvNULL);
++
++ do
++ {
++ /* Allocate the gckVGINTERRUPT structure. */
++ gcmkERR_BREAK(gckOS_Allocate(
++ Kernel->os,
++ gcmSIZEOF(struct _gckVGINTERRUPT),
++ (gctPOINTER *) &interrupt
++ ));
++
++ /* Reset the object data. */
++ gcmkVERIFY_OK(gckOS_ZeroMemory(
++ interrupt, gcmSIZEOF(struct _gckVGINTERRUPT)
++ ));
++
++ /* Initialize the object. */
++ interrupt->object.type = gcvOBJ_INTERRUPT;
++
++ /* Initialize the object pointers. */
++ interrupt->kernel = Kernel;
++ interrupt->os = Kernel->os;
++
++ /* Initialize the current FIFO position. */
++ interrupt->head = (gctUINT8)~0;
++ interrupt->tail = (gctUINT8)~0;
++
++ /* Start the thread. */
++ gcmkERR_BREAK(_StartInterruptHandler(interrupt));
++
++ /* Return interrupt object. */
++ *Interrupt = interrupt;
++
++ gcmkFOOTER_ARG("*Interrup=0x%x", *Interrupt);
++ /* Success. */
++ return gcvSTATUS_OK;
++ }
++ while (gcvFALSE);
++
++ /* Roll back. */
++ if (interrupt != gcvNULL)
++ {
++ /* Free the gckVGINTERRUPT structure. */
++ gcmkVERIFY_OK(gckOS_Free(interrupt->os, interrupt));
++ }
++
++ gcmkFOOTER();
++
++ /* Return the status. */
++ return status;
++}
++
++
++/*******************************************************************************
++**
++** gckVGINTERRUPT_Destroy
++**
++** Destroy an interrupt object.
++**
++** INPUT:
++**
++** Interrupt
++** Pointer to the gckVGINTERRUPT object to destroy.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++
++gceSTATUS
++gckVGINTERRUPT_Destroy(
++ IN gckVGINTERRUPT Interrupt
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Interrupt=0x%x", Interrupt);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Interrupt, gcvOBJ_INTERRUPT);
++
++ do
++ {
++ /* Stop the interrupt thread. */
++ gcmkERR_BREAK(_StopInterruptHandler(Interrupt));
++
++ /* Mark the object as unknown. */
++ Interrupt->object.type = gcvOBJ_UNKNOWN;
++
++ /* Free the gckVGINTERRUPT structure. */
++ gcmkERR_BREAK(gckOS_Free(Interrupt->os, Interrupt));
++ }
++ while (gcvFALSE);
++
++ gcmkFOOTER();
++
++ /* Return the status. */
++ return status;
++}
++
++
++/*******************************************************************************
++**
++** gckVGINTERRUPT_DumpState
++**
++** Print the current state of the interrupt manager.
++**
++** INPUT:
++**
++** Interrupt
++** Pointer to a gckVGINTERRUPT object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++
++#if gcvDEBUG
++gceSTATUS
++gckVGINTERRUPT_DumpState(
++ IN gckVGINTERRUPT Interrupt
++ )
++{
++ gcmkHEADER_ARG("Interrupt=0x%x", Interrupt);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Interrupt, gcvOBJ_INTERRUPT);
++
++ /* Print the header. */
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ "%s: INTERRUPT OBJECT STATUS\n",
++ __FUNCTION__
++ );
++
++ /* Print statistics. */
++#if gcmENABLE_INTERRUPT_STATISTICS
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ " Maximum number of FIFO items accumulated at a single time: %d\n",
++ Interrupt->maxFifoItems
++ );
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ " Interrupt FIFO overflow happened times: %d\n",
++ Interrupt->fifoOverflow
++ );
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ " Maximum number of interrupts simultaneously generated: %d\n",
++ Interrupt->maxSimultaneous
++ );
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ " Number of times when there were multiple interrupts generated: %d\n",
++ Interrupt->multipleCount
++ );
++#endif
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ " The current number of entries in the FIFO: %d\n",
++ Interrupt->fifoItems
++ );
++
++ /* Print the FIFO contents. */
++ if (Interrupt->fifoItems != 0)
++ {
++ gctUINT8 index;
++ gctUINT8 last;
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ " FIFO current contents:\n"
++ );
++
++ /* Get the current pointers. */
++ index = Interrupt->tail;
++ last = Interrupt->head;
++
++ while (index != last)
++ {
++ /* Advance to the next entry. */
++ index += 1;
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ " %d: 0x%08X\n",
++ index, Interrupt->fifo[index]
++ );
++ }
++ }
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++#endif
++
++
++/*******************************************************************************
++**
++** gckVGINTERRUPT_Enable
++**
++** Enable the specified interrupt.
++**
++** INPUT:
++**
++** Interrupt
++** Pointer to a gckVGINTERRUPT object.
++**
++** Id
++** Pointer to the variable that holds the interrupt number to be
++** registered in range 0..31.
++** If the value is less then 0, gckVGINTERRUPT_Enable will attempt
++** to find an unused interrupt. If such interrupt is found, the number
++** will be assigned to the variable if the functuion call succeedes.
++**
++** Handler
++** Pointer to the handler to register for the interrupt.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++
++gceSTATUS
++gckVGINTERRUPT_Enable(
++ IN gckVGINTERRUPT Interrupt,
++ IN OUT gctINT32_PTR Id,
++ IN gctINTERRUPT_HANDLER Handler
++ )
++{
++ gceSTATUS status;
++ gctINT32 i;
++
++ gcmkHEADER_ARG("Interrupt=0x%x Id=0x%x Handler=0x%x", Interrupt, Id, Handler);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Interrupt, gcvOBJ_INTERRUPT);
++ gcmkVERIFY_ARGUMENT(Id != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Handler != gcvNULL);
++
++ do
++ {
++ /* See if we need to allocate an ID. */
++ if (*Id < 0)
++ {
++ /* Find the first unused interrupt handler. */
++ for (i = 0; i < gcmCOUNTOF(Interrupt->handlers); ++i)
++ {
++ if (Interrupt->handlers[i] == gcvNULL)
++ {
++ break;
++ }
++ }
++
++ /* No unused innterrupts? */
++ if (i == gcmCOUNTOF(Interrupt->handlers))
++ {
++ status = gcvSTATUS_OUT_OF_RESOURCES;
++ break;
++ }
++
++ /* Update the interrupt ID. */
++ *Id = i;
++ }
++
++ /* Make sure the ID is in range. */
++ else if (*Id >= gcmCOUNTOF(Interrupt->handlers))
++ {
++ status = gcvSTATUS_INVALID_ARGUMENT;
++ break;
++ }
++
++ /* Set interrupt handler. */
++ Interrupt->handlers[*Id] = Handler;
++
++ /* Success. */
++ status = gcvSTATUS_OK;
++ }
++ while (gcvFALSE);
++
++ gcmkFOOTER();
++ /* Return the status. */
++ return status;
++}
++
++
++/*******************************************************************************
++**
++** gckVGINTERRUPT_Disable
++**
++** Disable the specified interrupt.
++**
++** INPUT:
++**
++** Interrupt
++** Pointer to a gckVGINTERRUPT object.
++**
++** Id
++** Interrupt number to be disabled in range 0..31.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++
++gceSTATUS
++gckVGINTERRUPT_Disable(
++ IN gckVGINTERRUPT Interrupt,
++ IN gctINT32 Id
++ )
++{
++ gcmkHEADER_ARG("Interrupt=0x%x Id=0x%x", Interrupt, Id);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Interrupt, gcvOBJ_INTERRUPT);
++ gcmkVERIFY_ARGUMENT((Id >= 0) && (Id < gcmCOUNTOF(Interrupt->handlers)));
++
++ /* Reset interrupt handler. */
++ Interrupt->handlers[Id] = gcvNULL;
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++
++/*******************************************************************************
++**
++** gckVGINTERRUPT_Enque
++**
++** Read the interrupt status register and put the value in the interrupt FIFO.
++**
++** INPUT:
++**
++** Interrupt
++** Pointer to a gckVGINTERRUPT object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++
++#ifndef __QNXNTO__
++gceSTATUS
++gckVGINTERRUPT_Enque(
++ IN gckVGINTERRUPT Interrupt
++ )
++#else
++gceSTATUS
++gckVGINTERRUPT_Enque(
++ IN gckVGINTERRUPT Interrupt,
++ OUT gckOS *Os,
++ OUT gctSEMAPHORE *Semaphore
++ )
++#endif
++{
++ gceSTATUS status;
++ gctUINT32 triggered;
++
++ gcmkHEADER_ARG("Interrupt=0x%x", Interrupt);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Interrupt, gcvOBJ_INTERRUPT);
++
++#ifdef __QNXNTO__
++ *Os = gcvNULL;
++ *Semaphore = gcvNULL;
++#endif
++
++ do
++ {
++ /* Read interrupt status register. */
++ gcmkERR_BREAK(gckVGHARDWARE_ReadInterrupt(
++ Interrupt->kernel->hardware, &triggered
++ ));
++
++ /* Mask out TS overflow interrupt */
++ triggered &= 0xfffffffe;
++
++ /* No interrupts to process? */
++ if (triggered == 0)
++ {
++ status = gcvSTATUS_NOT_OUR_INTERRUPT;
++ break;
++ }
++
++ /* FIFO overflow? */
++ if (Interrupt->fifoItems == gcmCOUNTOF(Interrupt->fifo))
++ {
++#if gcmENABLE_INTERRUPT_STATISTICS
++ Interrupt->fifoOverflow += 1;
++#endif
++
++ /* OR the interrupt with the last value in the FIFO. */
++ Interrupt->fifo[Interrupt->head] |= triggered;
++
++ /* Success (kind of). */
++ status = gcvSTATUS_OK;
++ }
++ else
++ {
++ /* Advance to the next entry. */
++ Interrupt->head += 1;
++ Interrupt->fifoItems += 1;
++
++#if gcmENABLE_INTERRUPT_STATISTICS
++ if (Interrupt->fifoItems > Interrupt->maxFifoItems)
++ {
++ Interrupt->maxFifoItems = Interrupt->fifoItems;
++ }
++#endif
++
++ /* Set the new value. */
++ Interrupt->fifo[Interrupt->head] = triggered;
++
++#ifndef __QNXNTO__
++ /* Increment the FIFO semaphore. */
++ gcmkERR_BREAK(gckOS_IncrementSemaphore(
++ Interrupt->os, Interrupt->fifoValid
++ ));
++#else
++ *Os = Interrupt->os;
++ *Semaphore = Interrupt->fifoValid;
++#endif
++
++ /* Windows kills our threads prematurely when the application
++ exists. Verify here that the thread is still alive. */
++ status = gckOS_VerifyThread(Interrupt->os, Interrupt->handler);
++
++ /* Has the thread been prematurely terminated? */
++ if (status != gcvSTATUS_OK)
++ {
++ /* Process all accumulated interrupts. */
++ while (Interrupt->head != Interrupt->tail)
++ {
++#if gcmENABLE_INTERRUPT_STATISTICS
++ /* Process the interrupt. */
++ _ProcessInterrupt(Interrupt, gcvNULL);
++#else
++ /* Process the interrupt. */
++ _ProcessInterrupt(Interrupt);
++#endif
++ }
++
++ /* Set success. */
++ status = gcvSTATUS_OK;
++ }
++ }
++ }
++ while (gcvFALSE);
++
++ gcmkFOOTER();
++ /* Return status. */
++ return status;
++}
++
++#endif /* gcdENABLE_VG */
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_mmu.c linux-openelec/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_mmu.c
+--- linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_mmu.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_mmu.c 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,2260 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal_kernel_precomp.h"
++
++#define _GC_OBJ_ZONE gcvZONE_MMU
++
++typedef enum _gceMMU_TYPE
++{
++ gcvMMU_USED = (0 << 4),
++ gcvMMU_SINGLE = (1 << 4),
++ gcvMMU_FREE = (2 << 4),
++}
++gceMMU_TYPE;
++
++#define gcmENTRY_TYPE(x) (x & 0xF0)
++
++#define gcdMMU_TABLE_DUMP 0
++
++#define gcdUSE_MMU_EXCEPTION 1
++
++/*
++ gcdMMU_CLEAR_VALUE
++
++ The clear value for the entry of the old MMU.
++*/
++#ifndef gcdMMU_CLEAR_VALUE
++# define gcdMMU_CLEAR_VALUE 0x00000ABC
++#endif
++
++#define gcdVERTEX_START (128 << 10)
++
++typedef struct _gcsMMU_STLB *gcsMMU_STLB_PTR;
++
++typedef struct _gcsMMU_STLB
++{
++ gctPHYS_ADDR physical;
++ gctUINT32_PTR logical;
++ gctSIZE_T size;
++ gctUINT32 physBase;
++ gctSIZE_T pageCount;
++ gctUINT32 mtlbIndex;
++ gctUINT32 mtlbEntryNum;
++ gcsMMU_STLB_PTR next;
++} gcsMMU_STLB;
++
++#if gcdSHARED_PAGETABLE
++typedef struct _gcsSharedPageTable * gcsSharedPageTable_PTR;
++typedef struct _gcsSharedPageTable
++{
++ /* Shared gckMMU object. */
++ gckMMU mmu;
++
++ /* Hardwares which use this shared pagetable. */
++ gckHARDWARE hardwares[gcdMAX_GPU_COUNT];
++
++ /* Number of cores use this shared pagetable. */
++ gctUINT32 reference;
++}
++gcsSharedPageTable;
++
++static gcsSharedPageTable_PTR sharedPageTable = gcvNULL;
++#endif
++
++#if gcdMIRROR_PAGETABLE
++typedef struct _gcsMirrorPageTable * gcsMirrorPageTable_PTR;
++typedef struct _gcsMirrorPageTable
++{
++ /* gckMMU objects. */
++ gckMMU mmus[gcdMAX_GPU_COUNT];
++
++ /* Hardwares which use this shared pagetable. */
++ gckHARDWARE hardwares[gcdMAX_GPU_COUNT];
++
++ /* Number of cores use this shared pagetable. */
++ gctUINT32 reference;
++}
++gcsMirrorPageTable;
++
++static gcsMirrorPageTable_PTR mirrorPageTable = gcvNULL;
++static gctPOINTER mirrorPageTableMutex = gcvNULL;
++#endif
++
++typedef struct _gcsDynamicSpaceNode * gcsDynamicSpaceNode_PTR;
++typedef struct _gcsDynamicSpaceNode
++{
++ gctUINT32 start;
++ gctINT32 entries;
++}
++gcsDynamicSpaceNode;
++
++static void
++_WritePageEntry(
++ IN gctUINT32_PTR PageEntry,
++ IN gctUINT32 EntryValue
++ )
++{
++ static gctUINT16 data = 0xff00;
++
++ if (*(gctUINT8 *)&data == 0xff)
++ {
++ *PageEntry = gcmSWAB32(EntryValue);
++ }
++ else
++ {
++ *PageEntry = EntryValue;
++ }
++}
++
++static gctUINT32
++_ReadPageEntry(
++ IN gctUINT32_PTR PageEntry
++ )
++{
++ static gctUINT16 data = 0xff00;
++ gctUINT32 entryValue;
++
++ if (*(gctUINT8 *)&data == 0xff)
++ {
++ entryValue = *PageEntry;
++ return gcmSWAB32(entryValue);
++ }
++ else
++ {
++ return *PageEntry;
++ }
++}
++
++static gceSTATUS
++_FillPageTable(
++ IN gctUINT32_PTR PageTable,
++ IN gctUINT32 PageCount,
++ IN gctUINT32 EntryValue
++)
++{
++ gctUINT i;
++
++ for (i = 0; i < PageCount; i++)
++ {
++ _WritePageEntry(PageTable + i, EntryValue);
++ }
++
++ return gcvSTATUS_OK;
++}
++
++static gceSTATUS
++_Link(
++ IN gckMMU Mmu,
++ IN gctUINT32 Index,
++ IN gctUINT32 Next
++ )
++{
++ if (Index >= Mmu->pageTableEntries)
++ {
++ /* Just move heap pointer. */
++ Mmu->heapList = Next;
++ }
++ else
++ {
++ /* Address page table. */
++ gctUINT32_PTR map = Mmu->mapLogical;
++
++ /* Dispatch on node type. */
++ switch (gcmENTRY_TYPE(_ReadPageEntry(&map[Index])))
++ {
++ case gcvMMU_SINGLE:
++ /* Set single index. */
++ _WritePageEntry(&map[Index], (Next << 8) | gcvMMU_SINGLE);
++ break;
++
++ case gcvMMU_FREE:
++ /* Set index. */
++ _WritePageEntry(&map[Index + 1], Next);
++ break;
++
++ default:
++ gcmkFATAL("MMU table correcupted at index %u!", Index);
++ return gcvSTATUS_HEAP_CORRUPTED;
++ }
++ }
++
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++static gceSTATUS
++_AddFree(
++ IN gckMMU Mmu,
++ IN gctUINT32 Index,
++ IN gctUINT32 Node,
++ IN gctUINT32 Count
++ )
++{
++ gctUINT32_PTR map = Mmu->mapLogical;
++
++ if (Count == 1)
++ {
++ /* Initialize a single page node. */
++ _WritePageEntry(map + Node, (~((1U<<8)-1)) | gcvMMU_SINGLE);
++ }
++ else
++ {
++ /* Initialize the node. */
++ _WritePageEntry(map + Node + 0, (Count << 8) | gcvMMU_FREE);
++ _WritePageEntry(map + Node + 1, ~0U);
++ }
++
++ /* Append the node. */
++ return _Link(Mmu, Index, Node);
++}
++
++static gceSTATUS
++_Collect(
++ IN gckMMU Mmu
++ )
++{
++ gctUINT32_PTR map = Mmu->mapLogical;
++ gceSTATUS status;
++ gctUINT32 i, previous, start = 0, count = 0;
++
++ previous = Mmu->heapList = ~0U;
++ Mmu->freeNodes = gcvFALSE;
++
++ /* Walk the entire page table. */
++ for (i = 0; i < Mmu->pageTableEntries; ++i)
++ {
++ /* Dispatch based on type of page. */
++ switch (gcmENTRY_TYPE(_ReadPageEntry(&map[i])))
++ {
++ case gcvMMU_USED:
++ /* Used page, so close any open node. */
++ if (count > 0)
++ {
++ /* Add the node. */
++ gcmkONERROR(_AddFree(Mmu, previous, start, count));
++
++ /* Reset the node. */
++ previous = start;
++ count = 0;
++ }
++ break;
++
++ case gcvMMU_SINGLE:
++ /* Single free node. */
++ if (count++ == 0)
++ {
++ /* Start a new node. */
++ start = i;
++ }
++ break;
++
++ case gcvMMU_FREE:
++ /* A free node. */
++ if (count == 0)
++ {
++ /* Start a new node. */
++ start = i;
++ }
++
++ /* Advance the count. */
++ count += _ReadPageEntry(&map[i]) >> 8;
++
++ /* Advance the index into the page table. */
++ i += (_ReadPageEntry(&map[i]) >> 8) - 1;
++ break;
++
++ default:
++ gcmkFATAL("MMU page table correcupted at index %u!", i);
++ return gcvSTATUS_HEAP_CORRUPTED;
++ }
++ }
++
++ /* See if we have an open node left. */
++ if (count > 0)
++ {
++ /* Add the node to the list. */
++ gcmkONERROR(_AddFree(Mmu, previous, start, count));
++ }
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_MMU,
++ "Performed a garbage collection of the MMU heap.");
++
++ /* Success. */
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the staus. */
++ return status;
++}
++
++static gctUINT32
++_SetPage(gctUINT32 PageAddress)
++{
++ return PageAddress
++ /* writable */
++ | (1 << 2)
++ /* Ignore exception */
++ | (0 << 1)
++ /* Present */
++ | (1 << 0);
++}
++
++#if gcdPROCESS_ADDRESS_SPACE
++gctUINT32
++_AddressToIndex(
++ IN gckMMU Mmu,
++ IN gctUINT32 Address
++ )
++{
++ gctUINT32 mtlbOffset = (Address & gcdMMU_MTLB_MASK) >> gcdMMU_MTLB_SHIFT;
++ gctUINT32 stlbOffset = (Address & gcdMMU_STLB_4K_MASK) >> gcdMMU_STLB_4K_SHIFT;
++
++ return (mtlbOffset - Mmu->dynamicMappingStart) * gcdMMU_STLB_4K_ENTRY_NUM + stlbOffset;
++}
++
++gctUINT32
++_MtlbOffset(
++ gctUINT32 Address
++ )
++{
++ return (Address & gcdMMU_MTLB_MASK) >> gcdMMU_MTLB_SHIFT;
++}
++
++gctUINT32
++_StlbOffset(
++ gctUINT32 Address
++ )
++{
++ return (Address & gcdMMU_STLB_4K_MASK) >> gcdMMU_STLB_4K_SHIFT;
++}
++
++static gceSTATUS
++_AllocateStlb(
++ IN gckOS Os,
++ OUT gcsMMU_STLB_PTR *Stlb
++ )
++{
++ gceSTATUS status;
++ gcsMMU_STLB_PTR stlb;
++ gctPOINTER pointer;
++
++ /* Allocate slave TLB record. */
++ gcmkONERROR(gckOS_Allocate(Os, gcmSIZEOF(gcsMMU_STLB), &pointer));
++ stlb = pointer;
++
++ stlb->size = gcdMMU_STLB_4K_SIZE;
++
++ /* Allocate slave TLB entries. */
++ gcmkONERROR(gckOS_AllocateContiguous(
++ Os,
++ gcvFALSE,
++ &stlb->size,
++ &stlb->physical,
++ (gctPOINTER)&stlb->logical
++ ));
++
++ gcmkONERROR(gckOS_GetPhysicalAddress(Os, stlb->logical, &stlb->physBase));
++
++#if gcdUSE_MMU_EXCEPTION
++ _FillPageTable(stlb->logical, stlb->size / 4, gcdMMU_STLB_EXCEPTION);
++#else
++ gckOS_ZeroMemory(stlb->logical, stlb->size);
++#endif
++
++ *Stlb = stlb;
++
++ return gcvSTATUS_OK;
++
++OnError:
++ return status;
++}
++
++gceSTATUS
++_SetupProcessAddressSpace(
++ IN gckMMU Mmu
++ )
++{
++ gceSTATUS status;
++ gctINT numEntries = 0;
++ gctUINT32_PTR map;
++
++ numEntries = gcdPROCESS_ADDRESS_SPACE_SIZE
++ /* Address space mapped by one MTLB entry. */
++ / (1 << gcdMMU_MTLB_SHIFT);
++
++ Mmu->dynamicMappingStart = 0;
++
++ Mmu->pageTableSize = numEntries * 4096;
++
++ Mmu->pageTableEntries = Mmu->pageTableSize / gcmSIZEOF(gctUINT32);
++
++ gcmkONERROR(gckOS_Allocate(Mmu->os,
++ Mmu->pageTableSize,
++ (void **)&Mmu->mapLogical));
++
++ /* Initilization. */
++ map = Mmu->mapLogical;
++ _WritePageEntry(map, (Mmu->pageTableEntries << 8) | gcvMMU_FREE);
++ _WritePageEntry(map + 1, ~0U);
++ Mmu->heapList = 0;
++ Mmu->freeNodes = gcvFALSE;
++
++ return gcvSTATUS_OK;
++
++OnError:
++ return status;
++}
++#else
++static gceSTATUS
++_FillFlatMapping(
++ IN gckMMU Mmu,
++ IN gctUINT32 PhysBase,
++ OUT gctSIZE_T Size
++ )
++{
++ gceSTATUS status;
++ gctBOOL mutex = gcvFALSE;
++ gcsMMU_STLB_PTR head = gcvNULL, pre = gcvNULL;
++ gctUINT32 start = PhysBase & (~gcdMMU_PAGE_64K_MASK);
++ gctUINT32 end = (PhysBase + Size - 1) & (~gcdMMU_PAGE_64K_MASK);
++ gctUINT32 mStart = start >> gcdMMU_MTLB_SHIFT;
++ gctUINT32 mEnd = end >> gcdMMU_MTLB_SHIFT;
++ gctUINT32 sStart = (start & gcdMMU_STLB_64K_MASK) >> gcdMMU_STLB_64K_SHIFT;
++ gctUINT32 sEnd = (end & gcdMMU_STLB_64K_MASK) >> gcdMMU_STLB_64K_SHIFT;
++ gctBOOL ace = gckHARDWARE_IsFeatureAvailable(Mmu->hardware, gcvFEATURE_ACE);
++
++ /* Grab the mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(Mmu->os, Mmu->pageTableMutex, gcvINFINITE));
++ mutex = gcvTRUE;
++
++ while (mStart <= mEnd)
++ {
++ gcmkASSERT(mStart < gcdMMU_MTLB_ENTRY_NUM);
++ if (*(Mmu->mtlbLogical + mStart) == 0)
++ {
++ gcsMMU_STLB_PTR stlb;
++ gctPOINTER pointer = gcvNULL;
++ gctUINT32 last = (mStart == mEnd) ? sEnd : (gcdMMU_STLB_64K_ENTRY_NUM - 1);
++ gctUINT32 mtlbEntry;
++
++ gcmkONERROR(gckOS_Allocate(Mmu->os, sizeof(struct _gcsMMU_STLB), &pointer));
++ stlb = pointer;
++
++ stlb->mtlbEntryNum = 0;
++ stlb->next = gcvNULL;
++ stlb->physical = gcvNULL;
++ stlb->logical = gcvNULL;
++ stlb->size = gcdMMU_STLB_64K_SIZE;
++ stlb->pageCount = 0;
++
++ if (pre == gcvNULL)
++ {
++ pre = head = stlb;
++ }
++ else
++ {
++ gcmkASSERT(pre->next == gcvNULL);
++ pre->next = stlb;
++ pre = stlb;
++ }
++
++ gcmkONERROR(
++ gckOS_AllocateContiguous(Mmu->os,
++ gcvFALSE,
++ &stlb->size,
++ &stlb->physical,
++ (gctPOINTER)&stlb->logical));
++
++ gcmkONERROR(gckOS_ZeroMemory(stlb->logical, stlb->size));
++
++ gcmkONERROR(gckOS_GetPhysicalAddress(
++ Mmu->os,
++ stlb->logical,
++ &stlb->physBase));
++
++ if (stlb->physBase & (gcdMMU_STLB_64K_SIZE - 1))
++ {
++ gcmkONERROR(gcvSTATUS_NOT_ALIGNED);
++ }
++
++ mtlbEntry = stlb->physBase
++ /* 64KB page size */
++ | (1 << 2)
++ /* Ignore exception */
++ | (0 << 1)
++ /* Present */
++ | (1 << 0);
++
++ if (ace)
++ {
++ mtlbEntry = mtlbEntry
++ /* Secure */
++ | (1 << 4);
++ }
++
++ _WritePageEntry(Mmu->mtlbLogical + mStart, mtlbEntry);
++
++#if gcdMMU_TABLE_DUMP
++ gckOS_Print("%s(%d): insert MTLB[%d]: %08x\n",
++ __FUNCTION__, __LINE__,
++ mStart,
++ _ReadPageEntry(Mmu->mtlbLogical + mStart));
++#endif
++
++ stlb->mtlbIndex = mStart;
++ stlb->mtlbEntryNum = 1;
++#if gcdMMU_TABLE_DUMP
++ gckOS_Print("%s(%d): STLB: logical:%08x -> physical:%08x\n",
++ __FUNCTION__, __LINE__,
++ stlb->logical,
++ stlb->physBase);
++#endif
++
++ while (sStart <= last)
++ {
++ gcmkASSERT(!(start & gcdMMU_PAGE_64K_MASK));
++ _WritePageEntry(stlb->logical + sStart, _SetPage(start));
++#if gcdMMU_TABLE_DUMP
++ gckOS_Print("%s(%d): insert STLB[%d]: %08x\n",
++ __FUNCTION__, __LINE__,
++ sStart,
++ _ReadPageEntry(stlb->logical + sStart));
++#endif
++ /* next page. */
++ start += gcdMMU_PAGE_64K_SIZE;
++ sStart++;
++ stlb->pageCount++;
++ }
++
++ sStart = 0;
++ ++mStart;
++ }
++ else
++ {
++ gcmkONERROR(gcvSTATUS_INVALID_REQUEST);
++ }
++ }
++
++ /* Insert the stlb into staticSTLB. */
++ if (Mmu->staticSTLB == gcvNULL)
++ {
++ Mmu->staticSTLB = head;
++ }
++ else
++ {
++ gcmkASSERT(pre == gcvNULL);
++ gcmkASSERT(pre->next == gcvNULL);
++ pre->next = Mmu->staticSTLB;
++ Mmu->staticSTLB = head;
++ }
++
++ /* Release the mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Mmu->os, Mmu->pageTableMutex));
++
++ return gcvSTATUS_OK;
++
++OnError:
++
++ /* Roll back. */
++ while (head != gcvNULL)
++ {
++ pre = head;
++ head = head->next;
++
++ if (pre->physical != gcvNULL)
++ {
++ gcmkVERIFY_OK(
++ gckOS_FreeContiguous(Mmu->os,
++ pre->physical,
++ pre->logical,
++ pre->size));
++ }
++
++ if (pre->mtlbEntryNum != 0)
++ {
++ gcmkASSERT(pre->mtlbEntryNum == 1);
++ _WritePageEntry(Mmu->mtlbLogical + pre->mtlbIndex, 0);
++ }
++
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Mmu->os, pre));
++ }
++
++ if (mutex)
++ {
++ /* Release the mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Mmu->os, Mmu->pageTableMutex));
++ }
++
++ return status;
++}
++
++static gceSTATUS
++_FindDynamicSpace(
++ IN gckMMU Mmu,
++ OUT gcsDynamicSpaceNode_PTR *Array,
++ OUT gctINT * Size
++ )
++{
++ gceSTATUS status = gcvSTATUS_OK;
++ gctPOINTER pointer = gcvNULL;
++ gcsDynamicSpaceNode_PTR array = gcvNULL;
++ gctINT size = 0;
++ gctINT i = 0, nodeStart = -1, nodeEntries = 0;
++
++ /* Allocate memory for the array. */
++ gcmkONERROR(gckOS_Allocate(Mmu->os,
++ gcmSIZEOF(*array) * (gcdMMU_MTLB_ENTRY_NUM / 2),
++ &pointer));
++
++ array = (gcsDynamicSpaceNode_PTR)pointer;
++
++ /* Loop all the entries. */
++ while (i < gcdMMU_MTLB_ENTRY_NUM)
++ {
++ if (!Mmu->mtlbLogical[i])
++ {
++ if (nodeStart < 0)
++ {
++ /* This is the first entry of the dynamic space. */
++ nodeStart = i;
++ nodeEntries = 1;
++ }
++ else
++ {
++ /* Other entries of the dynamic space. */
++ nodeEntries++;
++ }
++ }
++ else if (nodeStart >= 0)
++ {
++ /* Save the previous node. */
++ array[size].start = nodeStart;
++ array[size].entries = nodeEntries;
++ size++;
++
++ /* Reset the start. */
++ nodeStart = -1;
++ nodeEntries = 0;
++ }
++
++ i++;
++ }
++
++ /* Save the previous node. */
++ if (nodeStart >= 0)
++ {
++ array[size].start = nodeStart;
++ array[size].entries = nodeEntries;
++ size++;
++ }
++
++#if gcdMMU_TABLE_DUMP
++ for (i = 0; i < size; i++)
++ {
++ gckOS_Print("%s(%d): [%d]: start=%d, entries=%d.\n",
++ __FUNCTION__, __LINE__,
++ i,
++ array[i].start,
++ array[i].entries);
++ }
++#endif
++
++ *Array = array;
++ *Size = size;
++
++ return gcvSTATUS_OK;
++
++OnError:
++ if (pointer != gcvNULL)
++ {
++ gckOS_Free(Mmu->os, pointer);
++ }
++
++ return status;
++}
++
++static gceSTATUS
++_SetupDynamicSpace(
++ IN gckMMU Mmu
++ )
++{
++ gceSTATUS status;
++ gcsDynamicSpaceNode_PTR nodeArray = gcvNULL;
++ gctINT i, nodeArraySize = 0;
++ gctUINT32 physical;
++ gctINT numEntries = 0;
++ gctUINT32_PTR map;
++ gctBOOL acquired = gcvFALSE;
++ gctUINT32 mtlbEntry;
++ gctBOOL ace = gckHARDWARE_IsFeatureAvailable(Mmu->hardware, gcvFEATURE_ACE);
++
++ /* Find all the dynamic address space. */
++ gcmkONERROR(_FindDynamicSpace(Mmu, &nodeArray, &nodeArraySize));
++
++ /* TODO: We only use the largest one for now. */
++ for (i = 0; i < nodeArraySize; i++)
++ {
++ if (nodeArray[i].entries > numEntries)
++ {
++ Mmu->dynamicMappingStart = nodeArray[i].start;
++ numEntries = nodeArray[i].entries;
++ }
++ }
++
++ gckOS_Free(Mmu->os, (gctPOINTER)nodeArray);
++
++ Mmu->pageTableSize = numEntries * 4096;
++
++ gcmkSAFECASTSIZET(Mmu->pageTableEntries, Mmu->pageTableSize / gcmSIZEOF(gctUINT32));
++
++ gcmkONERROR(gckOS_Allocate(Mmu->os,
++ Mmu->pageTableSize,
++ (void **)&Mmu->mapLogical));
++
++ /* Construct Slave TLB. */
++ gcmkONERROR(gckOS_AllocateContiguous(Mmu->os,
++ gcvFALSE,
++ &Mmu->pageTableSize,
++ &Mmu->pageTablePhysical,
++ (gctPOINTER)&Mmu->pageTableLogical));
++
++#if gcdUSE_MMU_EXCEPTION
++ gcmkONERROR(_FillPageTable(Mmu->pageTableLogical,
++ Mmu->pageTableEntries,
++ /* Enable exception */
++ 1 << 1));
++#else
++ /* Invalidate all entries. */
++ gcmkONERROR(gckOS_ZeroMemory(Mmu->pageTableLogical,
++ Mmu->pageTableSize));
++#endif
++
++ /* Initilization. */
++ map = Mmu->mapLogical;
++ _WritePageEntry(map, (Mmu->pageTableEntries << 8) | gcvMMU_FREE);
++ _WritePageEntry(map + 1, ~0U);
++ Mmu->heapList = 0;
++ Mmu->freeNodes = gcvFALSE;
++
++ gcmkONERROR(gckOS_GetPhysicalAddress(Mmu->os,
++ Mmu->pageTableLogical,
++ &physical));
++
++ /* Grab the mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(Mmu->os, Mmu->pageTableMutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++ /* Map to Master TLB. */
++ for (i = (gctINT)Mmu->dynamicMappingStart;
++ i < (gctINT)Mmu->dynamicMappingStart + numEntries;
++ i++)
++ {
++ mtlbEntry = physical
++ /* 4KB page size */
++ | (0 << 2)
++ /* Ignore exception */
++ | (0 << 1)
++ /* Present */
++ | (1 << 0);
++
++ if (ace)
++ {
++ mtlbEntry = mtlbEntry
++ /* Secure */
++ | (1 << 4);
++ }
++
++ _WritePageEntry(Mmu->mtlbLogical + i, mtlbEntry);
++
++#if gcdMMU_TABLE_DUMP
++ gckOS_Print("%s(%d): insert MTLB[%d]: %08x\n",
++ __FUNCTION__, __LINE__,
++ i,
++ _ReadPageEntry(Mmu->mtlbLogical + i));
++#endif
++ physical += gcdMMU_STLB_4K_SIZE;
++ }
++
++ /* Release the mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Mmu->os, Mmu->pageTableMutex));
++
++ return gcvSTATUS_OK;
++
++OnError:
++ if (Mmu->mapLogical)
++ {
++ gcmkVERIFY_OK(
++ gckOS_Free(Mmu->os, (gctPOINTER) Mmu->mapLogical));
++
++
++ gcmkVERIFY_OK(
++ gckOS_FreeContiguous(Mmu->os,
++ Mmu->pageTablePhysical,
++ (gctPOINTER) Mmu->pageTableLogical,
++ Mmu->pageTableSize));
++ }
++
++ if (acquired)
++ {
++ /* Release the mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Mmu->os, Mmu->pageTableMutex));
++ }
++
++ return status;
++}
++#endif
++
++/*******************************************************************************
++**
++** _Construct
++**
++** Construct a new gckMMU object.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gctSIZE_T MmuSize
++** Number of bytes for the page table.
++**
++** OUTPUT:
++**
++** gckMMU * Mmu
++** Pointer to a variable that receives the gckMMU object pointer.
++*/
++gceSTATUS
++_Construct(
++ IN gckKERNEL Kernel,
++ IN gctSIZE_T MmuSize,
++ OUT gckMMU * Mmu
++ )
++{
++ gckOS os;
++ gckHARDWARE hardware;
++ gceSTATUS status;
++ gckMMU mmu = gcvNULL;
++ gctUINT32_PTR map;
++ gctPOINTER pointer = gcvNULL;
++#if gcdPROCESS_ADDRESS_SPACE
++ gctUINT32 i;
++ gctUINT32 physical;
++#endif
++ gctUINT32 physBase;
++ gctUINT32 physSize;
++ gctUINT32 gpuAddress;
++
++ gcmkHEADER_ARG("Kernel=0x%x MmuSize=%lu", Kernel, MmuSize);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(MmuSize > 0);
++ gcmkVERIFY_ARGUMENT(Mmu != gcvNULL);
++
++ /* Extract the gckOS object pointer. */
++ os = Kernel->os;
++ gcmkVERIFY_OBJECT(os, gcvOBJ_OS);
++
++ /* Extract the gckHARDWARE object pointer. */
++ hardware = Kernel->hardware;
++ gcmkVERIFY_OBJECT(hardware, gcvOBJ_HARDWARE);
++
++ /* Allocate memory for the gckMMU object. */
++ gcmkONERROR(gckOS_Allocate(os, sizeof(struct _gckMMU), &pointer));
++
++ mmu = pointer;
++
++ /* Initialize the gckMMU object. */
++ mmu->object.type = gcvOBJ_MMU;
++ mmu->os = os;
++ mmu->hardware = hardware;
++ mmu->pageTableMutex = gcvNULL;
++ mmu->pageTableLogical = gcvNULL;
++ mmu->mtlbLogical = gcvNULL;
++ mmu->staticSTLB = gcvNULL;
++ mmu->enabled = gcvFALSE;
++ mmu->mapLogical = gcvNULL;
++
++ /* Create the page table mutex. */
++ gcmkONERROR(gckOS_CreateMutex(os, &mmu->pageTableMutex));
++
++ if (hardware->mmuVersion == 0)
++ {
++ mmu->pageTableSize = MmuSize;
++
++ /* Construct address space management table. */
++ gcmkONERROR(gckOS_Allocate(mmu->os,
++ mmu->pageTableSize,
++ &pointer));
++
++ mmu->mapLogical = pointer;
++
++ /* Construct page table read by GPU. */
++ gcmkONERROR(gckOS_AllocateContiguous(mmu->os,
++ gcvFALSE,
++ &mmu->pageTableSize,
++ &mmu->pageTablePhysical,
++ (gctPOINTER)&mmu->pageTableLogical));
++
++
++ /* Compute number of entries in page table. */
++ gcmkSAFECASTSIZET(mmu->pageTableEntries, mmu->pageTableSize / sizeof(gctUINT32));
++
++ /* Mark all pages as free. */
++ map = mmu->mapLogical;
++
++#if gcdMMU_CLEAR_VALUE
++ _FillPageTable(mmu->pageTableLogical, mmu->pageTableEntries, gcdMMU_CLEAR_VALUE);
++#endif
++
++ _WritePageEntry(map, (mmu->pageTableEntries << 8) | gcvMMU_FREE);
++ _WritePageEntry(map + 1, ~0U);
++ mmu->heapList = 0;
++ mmu->freeNodes = gcvFALSE;
++ }
++ else
++ {
++ /* Allocate the 4K mode MTLB table. */
++ mmu->mtlbSize = gcdMMU_MTLB_SIZE + 64;
++
++ gcmkONERROR(
++ gckOS_AllocateContiguous(os,
++ gcvFALSE,
++ &mmu->mtlbSize,
++ &mmu->mtlbPhysical,
++ &pointer));
++
++ mmu->mtlbLogical = pointer;
++
++#if gcdPROCESS_ADDRESS_SPACE
++ _FillPageTable(pointer, mmu->mtlbSize / 4, gcdMMU_MTLB_EXCEPTION);
++
++ /* Allocate a array to store stlbs. */
++ gcmkONERROR(gckOS_Allocate(os, mmu->mtlbSize, &mmu->stlbs));
++
++ gckOS_ZeroMemory(mmu->stlbs, mmu->mtlbSize);
++
++ for (i = 0; i < gcdMAX_GPU_COUNT; i++)
++ {
++ gcmkONERROR(gckOS_AtomConstruct(os, &mmu->pageTableDirty[i]));
++ }
++
++ _SetupProcessAddressSpace(mmu);
++
++ /* Map kernel command buffer in MMU. */
++ for (i = 0; i < gcdCOMMAND_QUEUES; i++)
++ {
++ gcmkONERROR(gckOS_GetPhysicalAddress(
++ mmu->os,
++ Kernel->command->queues[i].logical,
++ &physical
++ ));
++
++ gcmkONERROR(gckMMU_FlatMapping(mmu, physical));
++ }
++#else
++ /* Invalid all the entries. */
++ gcmkONERROR(
++ gckOS_ZeroMemory(pointer, mmu->mtlbSize));
++
++ gcmkONERROR(
++ gckOS_QueryOption(mmu->os, "physBase", &physBase));
++
++ gcmkONERROR(
++ gckOS_QueryOption(mmu->os, "physSize", &physSize));
++
++ gcmkONERROR(
++ gckOS_CPUPhysicalToGPUPhysical(mmu->os, physBase, &gpuAddress));
++
++ /* Setup [physBase - physSize) flat mapping. */
++ gcmkONERROR(_FillFlatMapping(
++ mmu,
++ gpuAddress,
++ physSize
++ ));
++
++ gcmkONERROR(_SetupDynamicSpace(mmu));
++#endif
++ }
++
++ /* Return the gckMMU object pointer. */
++ *Mmu = mmu;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Mmu=0x%x", *Mmu);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Roll back. */
++ if (mmu != gcvNULL)
++ {
++ if (mmu->mapLogical != gcvNULL)
++ {
++ gcmkVERIFY_OK(
++ gckOS_Free(os, (gctPOINTER) mmu->mapLogical));
++
++
++ gcmkVERIFY_OK(
++ gckOS_FreeContiguous(os,
++ mmu->pageTablePhysical,
++ (gctPOINTER) mmu->pageTableLogical,
++ mmu->pageTableSize));
++ }
++
++ if (mmu->mtlbLogical != gcvNULL)
++ {
++ gcmkVERIFY_OK(
++ gckOS_FreeContiguous(os,
++ mmu->mtlbPhysical,
++ (gctPOINTER) mmu->mtlbLogical,
++ mmu->mtlbSize));
++ }
++
++ if (mmu->pageTableMutex != gcvNULL)
++ {
++ /* Delete the mutex. */
++ gcmkVERIFY_OK(
++ gckOS_DeleteMutex(os, mmu->pageTableMutex));
++ }
++
++ /* Mark the gckMMU object as unknown. */
++ mmu->object.type = gcvOBJ_UNKNOWN;
++
++ /* Free the allocates memory. */
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(os, mmu));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** _Destroy
++**
++** Destroy a gckMMU object.
++**
++** INPUT:
++**
++** gckMMU Mmu
++** Pointer to an gckMMU object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++_Destroy(
++ IN gckMMU Mmu
++ )
++{
++#if gcdPROCESS_ADDRESS_SPACE
++ gctUINT32 i;
++#endif
++ gcmkHEADER_ARG("Mmu=0x%x", Mmu);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Mmu, gcvOBJ_MMU);
++
++ while (Mmu->staticSTLB != gcvNULL)
++ {
++ gcsMMU_STLB_PTR pre = Mmu->staticSTLB;
++ Mmu->staticSTLB = pre->next;
++
++ if (pre->physical != gcvNULL)
++ {
++ gcmkVERIFY_OK(
++ gckOS_FreeContiguous(Mmu->os,
++ pre->physical,
++ pre->logical,
++ pre->size));
++ }
++
++ if (pre->mtlbEntryNum != 0)
++ {
++ gcmkASSERT(pre->mtlbEntryNum == 1);
++ _WritePageEntry(Mmu->mtlbLogical + pre->mtlbIndex, 0);
++#if gcdMMU_TABLE_DUMP
++ gckOS_Print("%s(%d): clean MTLB[%d]\n",
++ __FUNCTION__, __LINE__,
++ pre->mtlbIndex);
++#endif
++ }
++
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Mmu->os, pre));
++ }
++
++ if (Mmu->hardware->mmuVersion != 0)
++ {
++ gcmkVERIFY_OK(
++ gckOS_FreeContiguous(Mmu->os,
++ Mmu->mtlbPhysical,
++ (gctPOINTER) Mmu->mtlbLogical,
++ Mmu->mtlbSize));
++ }
++
++ /* Free address space management table. */
++ if (Mmu->mapLogical != gcvNULL)
++ {
++ gcmkVERIFY_OK(
++ gckOS_Free(Mmu->os, (gctPOINTER) Mmu->mapLogical));
++ }
++
++ if (Mmu->pageTableLogical != gcvNULL)
++ {
++ /* Free page table. */
++ gcmkVERIFY_OK(
++ gckOS_FreeContiguous(Mmu->os,
++ Mmu->pageTablePhysical,
++ (gctPOINTER) Mmu->pageTableLogical,
++ Mmu->pageTableSize));
++ }
++
++ /* Delete the page table mutex. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Mmu->os, Mmu->pageTableMutex));
++
++#if gcdPROCESS_ADDRESS_SPACE
++ for (i = 0; i < Mmu->mtlbSize / 4; i++)
++ {
++ struct _gcsMMU_STLB *stlb = ((struct _gcsMMU_STLB **)Mmu->stlbs)[i];
++
++ if (stlb)
++ {
++ gcmkVERIFY_OK(gckOS_FreeContiguous(
++ Mmu->os,
++ stlb->physical,
++ stlb->logical,
++ stlb->size));
++
++ gcmkOS_SAFE_FREE(Mmu->os, stlb);
++ }
++ }
++
++ gcmkOS_SAFE_FREE(Mmu->os, Mmu->stlbs);
++#endif
++
++ /* Mark the gckMMU object as unknown. */
++ Mmu->object.type = gcvOBJ_UNKNOWN;
++
++ /* Free the gckMMU object. */
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Mmu->os, Mmu));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++** _AdjstIndex
++**
++** Adjust the index from which we search for a usable node to make sure
++** index allocated is greater than Start.
++*/
++gceSTATUS
++_AdjustIndex(
++ IN gckMMU Mmu,
++ IN gctUINT32 Index,
++ IN gctUINT32 PageCount,
++ IN gctUINT32 Start,
++ OUT gctUINT32 * IndexAdjusted
++ )
++{
++ gceSTATUS status;
++ gctUINT32 index = Index;
++ gctUINT32_PTR map = Mmu->mapLogical;
++
++ gcmkHEADER();
++
++ for (; index < Mmu->pageTableEntries;)
++ {
++ gctUINT32 result = 0;
++ gctUINT32 nodeSize = 0;
++
++ if (index >= Start)
++ {
++ break;
++ }
++
++ switch (gcmENTRY_TYPE(map[index]))
++ {
++ case gcvMMU_SINGLE:
++ nodeSize = 1;
++ break;
++
++ case gcvMMU_FREE:
++ nodeSize = map[index] >> 8;
++ break;
++
++ default:
++ gcmkFATAL("MMU table correcupted at index %u!", index);
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++
++ if (nodeSize > PageCount)
++ {
++ result = index + (nodeSize - PageCount);
++
++ if (result >= Start)
++ {
++ break;
++ }
++ }
++
++ switch (gcmENTRY_TYPE(map[index]))
++ {
++ case gcvMMU_SINGLE:
++ index = map[index] >> 8;
++ break;
++
++ case gcvMMU_FREE:
++ index = map[index + 1];
++ break;
++
++ default:
++ gcmkFATAL("MMU table correcupted at index %u!", index);
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++ }
++
++ *IndexAdjusted = index;
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckMMU_Construct(
++ IN gckKERNEL Kernel,
++ IN gctSIZE_T MmuSize,
++ OUT gckMMU * Mmu
++ )
++{
++#if gcdSHARED_PAGETABLE
++ gceSTATUS status;
++ gctPOINTER pointer;
++
++ gcmkHEADER_ARG("Kernel=0x%08x", Kernel);
++
++ if (sharedPageTable == gcvNULL)
++ {
++ gcmkONERROR(
++ gckOS_Allocate(Kernel->os,
++ sizeof(struct _gcsSharedPageTable),
++ &pointer));
++ sharedPageTable = pointer;
++
++ gcmkONERROR(
++ gckOS_ZeroMemory(sharedPageTable,
++ sizeof(struct _gcsSharedPageTable)));
++
++ gcmkONERROR(_Construct(Kernel, MmuSize, &sharedPageTable->mmu));
++ }
++
++ *Mmu = sharedPageTable->mmu;
++
++ sharedPageTable->hardwares[sharedPageTable->reference] = Kernel->hardware;
++
++ sharedPageTable->reference++;
++
++ gcmkFOOTER_ARG("sharedPageTable->reference=%lu", sharedPageTable->reference);
++ return gcvSTATUS_OK;
++
++OnError:
++ if (sharedPageTable)
++ {
++ if (sharedPageTable->mmu)
++ {
++ gcmkVERIFY_OK(gckMMU_Destroy(sharedPageTable->mmu));
++ }
++
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Kernel->os, sharedPageTable));
++ }
++
++ gcmkFOOTER();
++ return status;
++#elif gcdMIRROR_PAGETABLE
++ gceSTATUS status;
++ gctPOINTER pointer;
++
++ gcmkHEADER_ARG("Kernel=0x%08x", Kernel);
++
++ if (mirrorPageTable == gcvNULL)
++ {
++ gcmkONERROR(
++ gckOS_Allocate(Kernel->os,
++ sizeof(struct _gcsMirrorPageTable),
++ &pointer));
++ mirrorPageTable = pointer;
++
++ gcmkONERROR(
++ gckOS_ZeroMemory(mirrorPageTable,
++ sizeof(struct _gcsMirrorPageTable)));
++
++ gcmkONERROR(
++ gckOS_CreateMutex(Kernel->os, &mirrorPageTableMutex));
++ }
++
++ gcmkONERROR(_Construct(Kernel, MmuSize, Mmu));
++
++ mirrorPageTable->mmus[mirrorPageTable->reference] = *Mmu;
++
++ mirrorPageTable->hardwares[mirrorPageTable->reference] = Kernel->hardware;
++
++ mirrorPageTable->reference++;
++
++ gcmkFOOTER_ARG("mirrorPageTable->reference=%lu", mirrorPageTable->reference);
++ return gcvSTATUS_OK;
++
++OnError:
++ if (mirrorPageTable && mirrorPageTable->reference == 0)
++ {
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Kernel->os, mirrorPageTable));
++ }
++
++ gcmkFOOTER();
++ return status;
++#else
++ return _Construct(Kernel, MmuSize, Mmu);
++#endif
++}
++
++gceSTATUS
++gckMMU_Destroy(
++ IN gckMMU Mmu
++ )
++{
++#if gcdSHARED_PAGETABLE
++ gckOS os = Mmu->os;
++
++ sharedPageTable->reference--;
++
++ if (sharedPageTable->reference == 0)
++ {
++ if (sharedPageTable->mmu)
++ {
++ gcmkVERIFY_OK(_Destroy(Mmu));
++ }
++
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(os, sharedPageTable));
++ }
++
++ return gcvSTATUS_OK;
++#elif gcdMIRROR_PAGETABLE
++ mirrorPageTable->reference--;
++
++ if (mirrorPageTable->reference == 0)
++ {
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Mmu->os, mirrorPageTable));
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Mmu->os, mirrorPageTableMutex));
++ }
++
++ return _Destroy(Mmu);
++#else
++ return _Destroy(Mmu);
++#endif
++}
++
++/*******************************************************************************
++**
++** gckMMU_AllocatePages
++**
++** Allocate pages inside the page table.
++**
++** INPUT:
++**
++** gckMMU Mmu
++** Pointer to an gckMMU object.
++**
++** gctSIZE_T PageCount
++** Number of pages to allocate.
++**
++** OUTPUT:
++**
++** gctPOINTER * PageTable
++** Pointer to a variable that receives the base address of the page
++** table.
++**
++** gctUINT32 * Address
++** Pointer to a variable that receives the hardware specific address.
++*/
++gceSTATUS
++_AllocatePages(
++ IN gckMMU Mmu,
++ IN gctSIZE_T PageCount,
++ IN gceSURF_TYPE Type,
++ OUT gctPOINTER * PageTable,
++ OUT gctUINT32 * Address
++ )
++{
++ gceSTATUS status;
++ gctBOOL mutex = gcvFALSE;
++ gctUINT32 index = 0, previous = ~0U, left;
++ gctUINT32_PTR map;
++ gctBOOL gotIt;
++ gctUINT32 address;
++ gctUINT32 pageCount;
++
++ gcmkHEADER_ARG("Mmu=0x%x PageCount=%lu", Mmu, PageCount);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Mmu, gcvOBJ_MMU);
++ gcmkVERIFY_ARGUMENT(PageCount > 0);
++ gcmkVERIFY_ARGUMENT(PageTable != gcvNULL);
++
++ if (PageCount > Mmu->pageTableEntries)
++ {
++ /* Not enough pages avaiable. */
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++
++ gcmkSAFECASTSIZET(pageCount, PageCount);
++
++ /* Grab the mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(Mmu->os, Mmu->pageTableMutex, gcvINFINITE));
++ mutex = gcvTRUE;
++
++ /* Cast pointer to page table. */
++ for (map = Mmu->mapLogical, gotIt = gcvFALSE; !gotIt;)
++ {
++ index = Mmu->heapList;
++
++ if ((Mmu->hardware->mmuVersion == 0) && (Type == gcvSURF_VERTEX))
++ {
++ gcmkONERROR(_AdjustIndex(
++ Mmu,
++ index,
++ pageCount,
++ gcdVERTEX_START / gcmSIZEOF(gctUINT32),
++ &index
++ ));
++ }
++
++ /* Walk the heap list. */
++ for (; !gotIt && (index < Mmu->pageTableEntries);)
++ {
++ /* Check the node type. */
++ switch (gcmENTRY_TYPE(_ReadPageEntry(&map[index])))
++ {
++ case gcvMMU_SINGLE:
++ /* Single odes are valid if we only need 1 page. */
++ if (pageCount == 1)
++ {
++ gotIt = gcvTRUE;
++ }
++ else
++ {
++ /* Move to next node. */
++ previous = index;
++ index = _ReadPageEntry(&map[index]) >> 8;
++ }
++ break;
++
++ case gcvMMU_FREE:
++ /* Test if the node has enough space. */
++ if (pageCount <= (_ReadPageEntry(&map[index]) >> 8))
++ {
++ gotIt = gcvTRUE;
++ }
++ else
++ {
++ /* Move to next node. */
++ previous = index;
++ index = _ReadPageEntry(&map[index + 1]);
++ }
++ break;
++
++ default:
++ gcmkFATAL("MMU table correcupted at index %u!", index);
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++ }
++
++ /* Test if we are out of memory. */
++ if (index >= Mmu->pageTableEntries)
++ {
++ if (Mmu->freeNodes)
++ {
++ /* Time to move out the trash! */
++ gcmkONERROR(_Collect(Mmu));
++ }
++ else
++ {
++ /* Out of resources. */
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++ }
++ }
++
++ switch (gcmENTRY_TYPE(_ReadPageEntry(&map[index])))
++ {
++ case gcvMMU_SINGLE:
++ /* Unlink single node from free list. */
++ gcmkONERROR(
++ _Link(Mmu, previous, _ReadPageEntry(&map[index]) >> 8));
++ break;
++
++ case gcvMMU_FREE:
++ /* Check how many pages will be left. */
++ left = (_ReadPageEntry(&map[index]) >> 8) - pageCount;
++ switch (left)
++ {
++ case 0:
++ /* The entire node is consumed, just unlink it. */
++ gcmkONERROR(
++ _Link(Mmu, previous, _ReadPageEntry(&map[index + 1])));
++ break;
++
++ case 1:
++ /* One page will remain. Convert the node to a single node and
++ ** advance the index. */
++ _WritePageEntry(&map[index], (_ReadPageEntry(&map[index + 1]) << 8) | gcvMMU_SINGLE);
++ index ++;
++ break;
++
++ default:
++ /* Enough pages remain for a new node. However, we will just adjust
++ ** the size of the current node and advance the index. */
++ _WritePageEntry(&map[index], (left << 8) | gcvMMU_FREE);
++ index += left;
++ break;
++ }
++ break;
++ }
++
++ /* Mark node as used. */
++ gcmkONERROR(_FillPageTable(&map[index], pageCount, gcvMMU_USED));
++
++ /* Return pointer to page table. */
++ *PageTable = &Mmu->pageTableLogical[index];
++
++ /* Build virtual address. */
++ if (Mmu->hardware->mmuVersion == 0)
++ {
++ gcmkONERROR(
++ gckHARDWARE_BuildVirtualAddress(Mmu->hardware, index, 0, &address));
++ }
++ else
++ {
++ gctUINT32 masterOffset = index / gcdMMU_STLB_4K_ENTRY_NUM
++ + Mmu->dynamicMappingStart;
++ gctUINT32 slaveOffset = index % gcdMMU_STLB_4K_ENTRY_NUM;
++
++ address = (masterOffset << gcdMMU_MTLB_SHIFT)
++ | (slaveOffset << gcdMMU_STLB_4K_SHIFT);
++ }
++
++ if (Address != gcvNULL)
++ {
++ *Address = address;
++ }
++
++ /* Release the mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Mmu->os, Mmu->pageTableMutex));
++
++ /* Success. */
++ gcmkFOOTER_ARG("*PageTable=0x%x *Address=%08x",
++ *PageTable, gcmOPT_VALUE(Address));
++ return gcvSTATUS_OK;
++
++OnError:
++
++ if (mutex)
++ {
++ /* Release the mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Mmu->os, Mmu->pageTableMutex));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckMMU_FreePages
++**
++** Free pages inside the page table.
++**
++** INPUT:
++**
++** gckMMU Mmu
++** Pointer to an gckMMU object.
++**
++** gctPOINTER PageTable
++** Base address of the page table to free.
++**
++** gctSIZE_T PageCount
++** Number of pages to free.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++_FreePages(
++ IN gckMMU Mmu,
++ IN gctPOINTER PageTable,
++ IN gctSIZE_T PageCount
++ )
++{
++ gctUINT32_PTR node;
++ gceSTATUS status;
++ gctBOOL acquired = gcvFALSE;
++ gctUINT32 pageCount;
++
++ gcmkHEADER_ARG("Mmu=0x%x PageTable=0x%x PageCount=%lu",
++ Mmu, PageTable, PageCount);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Mmu, gcvOBJ_MMU);
++ gcmkVERIFY_ARGUMENT(PageTable != gcvNULL);
++ gcmkVERIFY_ARGUMENT(PageCount > 0);
++
++ gcmkSAFECASTSIZET(pageCount, PageCount);
++
++ /* Get the node by index. */
++ node = Mmu->mapLogical + ((gctUINT32_PTR)PageTable - Mmu->pageTableLogical);
++
++ gcmkONERROR(gckOS_AcquireMutex(Mmu->os, Mmu->pageTableMutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++#if gcdMMU_CLEAR_VALUE
++ if (Mmu->hardware->mmuVersion == 0)
++ {
++ _FillPageTable(PageTable, pageCount, gcdMMU_CLEAR_VALUE);
++ }
++#endif
++
++ if (PageCount == 1)
++ {
++ /* Single page node. */
++ _WritePageEntry(node, (~((1U<<8)-1)) | gcvMMU_SINGLE);
++#if gcdUSE_MMU_EXCEPTION
++ /* Enable exception */
++ _WritePageEntry(PageTable, (1 << 1));
++#endif
++ }
++ else
++ {
++ /* Mark the node as free. */
++ _WritePageEntry(node, (pageCount << 8) | gcvMMU_FREE);
++ _WritePageEntry(node + 1, ~0U);
++
++#if gcdUSE_MMU_EXCEPTION
++ /* Enable exception */
++ gcmkVERIFY_OK(_FillPageTable(PageTable, pageCount, 1 << 1));
++#endif
++ }
++
++ /* We have free nodes. */
++ Mmu->freeNodes = gcvTRUE;
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Mmu->os, Mmu->pageTableMutex));
++ acquired = gcvFALSE;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Mmu->os, Mmu->pageTableMutex));
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckMMU_AllocatePages(
++ IN gckMMU Mmu,
++ IN gctSIZE_T PageCount,
++ OUT gctPOINTER * PageTable,
++ OUT gctUINT32 * Address
++ )
++{
++ return gckMMU_AllocatePagesEx(
++ Mmu, PageCount, gcvSURF_TYPE_UNKNOWN, PageTable, Address);
++}
++
++gceSTATUS
++gckMMU_AllocatePagesEx(
++ IN gckMMU Mmu,
++ IN gctSIZE_T PageCount,
++ IN gceSURF_TYPE Type,
++ OUT gctPOINTER * PageTable,
++ OUT gctUINT32 * Address
++ )
++{
++#if gcdMIRROR_PAGETABLE
++ gceSTATUS status;
++ gctPOINTER pageTable;
++ gctUINT32 address;
++ gctINT i;
++ gckMMU mmu;
++ gctBOOL acquired = gcvFALSE;
++ gctBOOL allocated = gcvFALSE;
++
++ gckOS_AcquireMutex(Mmu->os, mirrorPageTableMutex, gcvINFINITE);
++ acquired = gcvTRUE;
++
++ /* Allocate page table for current MMU. */
++ for (i = 0; i < (gctINT)mirrorPageTable->reference; i++)
++ {
++ if (Mmu == mirrorPageTable->mmus[i])
++ {
++ gcmkONERROR(_AllocatePages(Mmu, PageCount, Type, PageTable, Address));
++ allocated = gcvTRUE;
++ }
++ }
++
++ /* Allocate page table for other MMUs. */
++ for (i = 0; i < (gctINT)mirrorPageTable->reference; i++)
++ {
++ mmu = mirrorPageTable->mmus[i];
++
++ if (Mmu != mmu)
++ {
++ gcmkONERROR(_AllocatePages(mmu, PageCount, Type, &pageTable, &address));
++ gcmkASSERT(address == *Address);
++ }
++ }
++
++ gckOS_ReleaseMutex(Mmu->os, mirrorPageTableMutex);
++ acquired = gcvFALSE;
++
++ return gcvSTATUS_OK;
++OnError:
++
++ if (allocated)
++ {
++ /* Page tables for multiple GPU always keep the same. So it is impossible
++ * the fist one allocates successfully but others fail.
++ */
++ gcmkASSERT(0);
++ }
++
++ if (acquired)
++ {
++ gckOS_ReleaseMutex(Mmu->os, mirrorPageTableMutex);
++ }
++
++ return status;
++#else
++ return _AllocatePages(Mmu, PageCount, Type, PageTable, Address);
++#endif
++}
++
++gceSTATUS
++gckMMU_FreePages(
++ IN gckMMU Mmu,
++ IN gctPOINTER PageTable,
++ IN gctSIZE_T PageCount
++ )
++{
++#if gcdMIRROR_PAGETABLE
++ gctINT i;
++ gctUINT32 offset;
++ gckMMU mmu;
++
++ gckOS_AcquireMutex(Mmu->os, mirrorPageTableMutex, gcvINFINITE);
++
++ gcmkVERIFY_OK(_FreePages(Mmu, PageTable, PageCount));
++
++ offset = (gctUINT32)PageTable - (gctUINT32)Mmu->pageTableLogical;
++
++ for (i = 0; i < (gctINT)mirrorPageTable->reference; i++)
++ {
++ mmu = mirrorPageTable->mmus[i];
++
++ if (mmu != Mmu)
++ {
++ gcmkVERIFY_OK(_FreePages(mmu, mmu->pageTableLogical + offset/4, PageCount));
++ }
++ }
++
++ gckOS_ReleaseMutex(Mmu->os, mirrorPageTableMutex);
++
++ return gcvSTATUS_OK;
++#else
++ return _FreePages(Mmu, PageTable, PageCount);
++#endif
++}
++
++gceSTATUS
++gckMMU_SetPage(
++ IN gckMMU Mmu,
++ IN gctUINT32 PageAddress,
++ IN gctUINT32 *PageEntry
++ )
++{
++#if gcdMIRROR_PAGETABLE
++ gctUINT32_PTR pageEntry;
++ gctINT i;
++ gckMMU mmu;
++ gctUINT32 offset = (gctUINT32)PageEntry - (gctUINT32)Mmu->pageTableLogical;
++#endif
++
++ gcmkHEADER_ARG("Mmu=0x%x", Mmu);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Mmu, gcvOBJ_MMU);
++ gcmkVERIFY_ARGUMENT(PageEntry != gcvNULL);
++ gcmkVERIFY_ARGUMENT(!(PageAddress & 0xFFF));
++
++ if (Mmu->hardware->mmuVersion == 0)
++ {
++ _WritePageEntry(PageEntry, PageAddress);
++ }
++ else
++ {
++ _WritePageEntry(PageEntry, _SetPage(PageAddress));
++ }
++
++#if gcdMIRROR_PAGETABLE
++ for (i = 0; i < (gctINT)mirrorPageTable->reference; i++)
++ {
++ mmu = mirrorPageTable->mmus[i];
++
++ if (mmu != Mmu)
++ {
++ pageEntry = mmu->pageTableLogical + offset / 4;
++
++ if (mmu->hardware->mmuVersion == 0)
++ {
++ _WritePageEntry(pageEntry, PageAddress);
++ }
++ else
++ {
++ _WritePageEntry(pageEntry, _SetPage(PageAddress));
++ }
++ }
++
++ }
++#endif
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++#if gcdPROCESS_ADDRESS_SPACE
++gceSTATUS
++gckMMU_GetPageEntry(
++ IN gckMMU Mmu,
++ IN gctUINT32 Address,
++ IN gctUINT32_PTR *PageTable
++ )
++{
++ gceSTATUS status;
++ struct _gcsMMU_STLB *stlb;
++ struct _gcsMMU_STLB **stlbs = Mmu->stlbs;
++ gctUINT32 offset = _MtlbOffset(Address);
++ gctUINT32 mtlbEntry;
++ gctBOOL ace = gckHARDWARE_IsFeatureAvailable(Mmu->hardware, gcvFEATURE_ACE);
++
++ gcmkHEADER_ARG("Mmu=0x%x", Mmu);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Mmu, gcvOBJ_MMU);
++ gcmkVERIFY_ARGUMENT((Address & 0xFFF) == 0);
++
++ stlb = stlbs[offset];
++
++ if (stlb == gcvNULL)
++ {
++ gcmkONERROR(_AllocateStlb(Mmu->os, &stlb));
++
++ mtlbEntry = stlb->physBase
++ | gcdMMU_MTLB_4K_PAGE
++ | gcdMMU_MTLB_PRESENT
++ ;
++
++ if (ace)
++ {
++ mtlbEntry = mtlbEntry
++ /* Secure */
++ | (1 << 4);
++ }
++
++ /* Insert Slave TLB address to Master TLB entry.*/
++ _WritePageEntry(Mmu->mtlbLogical + offset, mtlbEntry);
++
++ /* Record stlb. */
++ stlbs[offset] = stlb;
++ }
++
++ *PageTable = &stlb->logical[_StlbOffset(Address)];
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++_CheckMap(
++ IN gckMMU Mmu
++ )
++{
++ gceSTATUS status;
++ gctUINT32_PTR map = Mmu->mapLogical;
++ gctUINT32 index;
++
++ for (index = Mmu->heapList; index < Mmu->pageTableEntries;)
++ {
++ /* Check the node type. */
++ switch (gcmENTRY_TYPE(_ReadPageEntry(&map[index])))
++ {
++ case gcvMMU_SINGLE:
++ /* Move to next node. */
++ index = _ReadPageEntry(&map[index]) >> 8;
++ break;
++
++ case gcvMMU_FREE:
++ /* Move to next node. */
++ index = _ReadPageEntry(&map[index + 1]);
++ break;
++
++ default:
++ gcmkFATAL("MMU table correcupted at index [%u] = %x!", index, map[index]);
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++ }
++
++ return gcvSTATUS_OK;
++
++OnError:
++ return status;
++}
++
++gceSTATUS
++gckMMU_FlatMapping(
++ IN gckMMU Mmu,
++ IN gctUINT32 Physical
++ )
++{
++ gceSTATUS status;
++ gctUINT32 index = _AddressToIndex(Mmu, Physical);
++ gctUINT32 i;
++ gctBOOL gotIt = gcvFALSE;
++ gctUINT32_PTR map = Mmu->mapLogical;
++ gctUINT32 previous = ~0U;
++ gctUINT32_PTR pageTable;
++
++ gckMMU_GetPageEntry(Mmu, Physical, &pageTable);
++
++ _WritePageEntry(pageTable, _SetPage(Physical));
++
++ if (map)
++ {
++ /* Find node which contains index. */
++ for (i = 0; !gotIt && (i < Mmu->pageTableEntries);)
++ {
++ gctUINT32 numPages;
++
++ switch (gcmENTRY_TYPE(_ReadPageEntry(&map[i])))
++ {
++ case gcvMMU_SINGLE:
++ if (i == index)
++ {
++ gotIt = gcvTRUE;
++ }
++ else
++ {
++ previous = i;
++ i = _ReadPageEntry(&map[i]) >> 8;
++ }
++ break;
++
++ case gcvMMU_FREE:
++ numPages = _ReadPageEntry(&map[i]) >> 8;
++ if (index >= i && index < i + numPages)
++ {
++ gotIt = gcvTRUE;
++ }
++ else
++ {
++ previous = i;
++ i = _ReadPageEntry(&map[i + 1]);
++ }
++ break;
++
++ default:
++ gcmkFATAL("MMU table correcupted at index %u!", index);
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++ }
++
++ switch (gcmENTRY_TYPE(_ReadPageEntry(&map[i])))
++ {
++ case gcvMMU_SINGLE:
++ /* Unlink single node from free list. */
++ gcmkONERROR(
++ _Link(Mmu, previous, _ReadPageEntry(&map[i]) >> 8));
++ break;
++
++ case gcvMMU_FREE:
++ /* Split the node. */
++ {
++ gctUINT32 start;
++ gctUINT32 next = _ReadPageEntry(&map[i+1]);
++ gctUINT32 total = _ReadPageEntry(&map[i]) >> 8;
++ gctUINT32 countLeft = index - i;
++ gctUINT32 countRight = total - countLeft - 1;
++
++ if (countLeft)
++ {
++ start = i;
++ _AddFree(Mmu, previous, start, countLeft);
++ previous = start;
++ }
++
++ if (countRight)
++ {
++ start = index + 1;
++ _AddFree(Mmu, previous, start, countRight);
++ previous = start;
++ }
++
++ _Link(Mmu, previous, next);
++ }
++ break;
++ }
++ }
++
++ return gcvSTATUS_OK;
++
++OnError:
++
++ /* Roll back. */
++ return status;
++}
++
++
++
++gceSTATUS
++gckMMU_FreePagesEx(
++ IN gckMMU Mmu,
++ IN gctUINT32 Address,
++ IN gctSIZE_T PageCount
++ )
++{
++ gctUINT32_PTR node;
++ gceSTATUS status;
++
++#if gcdUSE_MMU_EXCEPTION
++ gctUINT32 i;
++ struct _gcsMMU_STLB *stlb;
++ struct _gcsMMU_STLB **stlbs = Mmu->stlbs;
++#endif
++
++ gcmkHEADER_ARG("Mmu=0x%x Address=0x%x PageCount=%lu",
++ Mmu, Address, PageCount);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Mmu, gcvOBJ_MMU);
++ gcmkVERIFY_ARGUMENT(PageCount > 0);
++
++ /* Get the node by index. */
++ node = Mmu->mapLogical + _AddressToIndex(Mmu, Address);
++
++ gcmkONERROR(gckOS_AcquireMutex(Mmu->os, Mmu->pageTableMutex, gcvINFINITE));
++
++ if (PageCount == 1)
++ {
++ /* Single page node. */
++ _WritePageEntry(node, (~((1U<<8)-1)) | gcvMMU_SINGLE);
++ }
++ else
++ {
++ /* Mark the node as free. */
++ _WritePageEntry(node, (PageCount << 8) | gcvMMU_FREE);
++ _WritePageEntry(node + 1, ~0U);
++ }
++
++ /* We have free nodes. */
++ Mmu->freeNodes = gcvTRUE;
++
++#if gcdUSE_MMU_EXCEPTION
++ for (i = 0; i < PageCount; i++)
++ {
++ /* Get */
++ stlb = stlbs[_MtlbOffset(Address)];
++
++ /* Enable exception */
++ stlb->logical[_StlbOffset(Address)] = gcdMMU_STLB_EXCEPTION;
++ }
++#endif
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Mmu->os, Mmu->pageTableMutex));
++
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++#endif
++
++gceSTATUS
++gckMMU_Flush(
++ IN gckMMU Mmu,
++ IN gceSURF_TYPE Type
++ )
++{
++ gckHARDWARE hardware;
++ gctUINT32 mask;
++ gctINT i;
++
++ if (Type == gcvSURF_VERTEX || Type == gcvSURF_INDEX)
++ {
++ mask = gcvPAGE_TABLE_DIRTY_BIT_FE;
++ }
++ else
++ {
++ mask = gcvPAGE_TABLE_DIRTY_BIT_OTHER;
++ }
++
++#if gcdPROCESS_ADDRESS_SPACE
++ for (i = 0; i < gcdMAX_GPU_COUNT; i++)
++ {
++ gcmkVERIFY_OK(
++ gckOS_AtomSetMask(Mmu->pageTableDirty[i], mask));
++ }
++#else
++#if gcdSHARED_PAGETABLE
++ for (i = 0; i < gcdMAX_GPU_COUNT; i++)
++ {
++ hardware = sharedPageTable->hardwares[i];
++ if (hardware)
++ {
++ gcmkVERIFY_OK(gckOS_AtomSetMask(hardware->pageTableDirty, mask));
++ }
++ }
++#elif gcdMIRROR_PAGETABLE
++ for (i = 0; i < (gctINT)mirrorPageTable->reference; i++)
++ {
++ hardware = mirrorPageTable->hardwares[i];
++
++ /* Notify cores who use this page table. */
++ gcmkVERIFY_OK(
++ gckOS_AtomSetMask(hardware->pageTableDirty, mask));
++ }
++#else
++ hardware = Mmu->hardware;
++ gcmkVERIFY_OK(
++ gckOS_AtomSetMask(hardware->pageTableDirty, mask));
++#endif
++#endif
++
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckMMU_DumpPageTableEntry(
++ IN gckMMU Mmu,
++ IN gctUINT32 Address
++ )
++{
++#if gcdPROCESS_ADDRESS_SPACE
++ gcsMMU_STLB_PTR *stlbs = Mmu->stlbs;
++ gcsMMU_STLB_PTR stlbDesc = stlbs[_MtlbOffset(Address)];
++#else
++ gctUINT32_PTR pageTable;
++ gctUINT32 index;
++ gctUINT32 mtlb, stlb;
++#endif
++
++ gcmkHEADER_ARG("Mmu=0x%08X Address=0x%08X", Mmu, Address);
++ gcmkVERIFY_OBJECT(Mmu, gcvOBJ_MMU);
++
++ gcmkASSERT(Mmu->hardware->mmuVersion > 0);
++
++#if gcdPROCESS_ADDRESS_SPACE
++ if (stlbDesc)
++ {
++ gcmkPRINT(" STLB entry = 0x%08X",
++ _ReadPageEntry(&stlbDesc->logical[_StlbOffset(Address)]));
++ }
++ else
++ {
++ gcmkPRINT(" MTLB entry is empty.");
++ }
++#else
++ mtlb = (Address & gcdMMU_MTLB_MASK) >> gcdMMU_MTLB_SHIFT;
++
++ if (mtlb >= Mmu->dynamicMappingStart)
++ {
++ stlb = (Address & gcdMMU_STLB_4K_MASK) >> gcdMMU_STLB_4K_SHIFT;
++
++ pageTable = Mmu->pageTableLogical;
++
++ index = (mtlb - Mmu->dynamicMappingStart)
++ * gcdMMU_STLB_4K_ENTRY_NUM
++ + stlb;
++
++ gcmkPRINT(" Page table entry = 0x%08X", _ReadPageEntry(pageTable + index));
++ }
++ else
++ {
++ gcsMMU_STLB_PTR stlbObj = Mmu->staticSTLB;
++ gctUINT32 entry = Mmu->mtlbLogical[mtlb];
++
++ stlb = (Address & gcdMMU_STLB_64K_MASK) >> gcdMMU_STLB_64K_SHIFT;
++
++ entry &= 0xFFFFFFF0;
++
++ while (stlbObj)
++ {
++
++ if (entry == stlbObj->physBase)
++ {
++ gcmkPRINT(" Page table entry = 0x%08X", stlbObj->logical[stlb]);
++ break;
++ }
++
++ stlbObj = stlbObj->next;
++ }
++ }
++#endif
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/******************************************************************************
++****************************** T E S T C O D E ******************************
++******************************************************************************/
++
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_mmu_vg.c linux-openelec/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_mmu_vg.c
+--- linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_mmu_vg.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_mmu_vg.c 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,522 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal_kernel_precomp.h"
++
++#if gcdENABLE_VG
++
++#define _GC_OBJ_ZONE gcvZONE_MMU
++
++/*******************************************************************************
++**
++** gckVGMMU_Construct
++**
++** Construct a new gckVGMMU object.
++**
++** INPUT:
++**
++** gckVGKERNEL Kernel
++** Pointer to an gckVGKERNEL object.
++**
++** gctSIZE_T MmuSize
++** Number of bytes for the page table.
++**
++** OUTPUT:
++**
++** gckVGMMU * Mmu
++** Pointer to a variable that receives the gckVGMMU object pointer.
++*/
++gceSTATUS gckVGMMU_Construct(
++ IN gckVGKERNEL Kernel,
++ IN gctUINT32 MmuSize,
++ OUT gckVGMMU * Mmu
++ )
++{
++ gckOS os;
++ gckVGHARDWARE hardware;
++ gceSTATUS status;
++ gckVGMMU mmu;
++ gctUINT32 * pageTable;
++ gctUINT32 i;
++
++ gcmkHEADER_ARG("Kernel=0x%x MmuSize=0x%x Mmu=0x%x", Kernel, MmuSize, Mmu);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(MmuSize > 0);
++ gcmkVERIFY_ARGUMENT(Mmu != gcvNULL);
++
++ /* Extract the gckOS object pointer. */
++ os = Kernel->os;
++ gcmkVERIFY_OBJECT(os, gcvOBJ_OS);
++
++ /* Extract the gckVGHARDWARE object pointer. */
++ hardware = Kernel->hardware;
++ gcmkVERIFY_OBJECT(hardware, gcvOBJ_HARDWARE);
++
++ /* Allocate memory for the gckVGMMU object. */
++ status = gckOS_Allocate(os, sizeof(struct _gckVGMMU), (gctPOINTER *) &mmu);
++
++ if (status < 0)
++ {
++ /* Error. */
++ gcmkFATAL(
++ "%s(%d): could not allocate gckVGMMU object.",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkFOOTER();
++ return status;
++ }
++
++ /* Initialize the gckVGMMU object. */
++ mmu->object.type = gcvOBJ_MMU;
++ mmu->os = os;
++ mmu->hardware = hardware;
++
++ /* Create the mutex. */
++ status = gckOS_CreateMutex(os, &mmu->mutex);
++
++ if (status < 0)
++ {
++ /* Roll back. */
++ mmu->object.type = gcvOBJ_UNKNOWN;
++ gcmkVERIFY_OK(gckOS_Free(os, mmu));
++
++ gcmkFOOTER();
++ /* Error. */
++ return status;
++ }
++
++ /* Allocate the page table. */
++ mmu->pageTableSize = (gctUINT32)MmuSize;
++ status = gckOS_AllocateContiguous(os,
++ gcvFALSE,
++ &mmu->pageTableSize,
++ &mmu->pageTablePhysical,
++ &mmu->pageTableLogical);
++
++ if (status < 0)
++ {
++ /* Roll back. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(os, mmu->mutex));
++
++ mmu->object.type = gcvOBJ_UNKNOWN;
++ gcmkVERIFY_OK(gckOS_Free(os, mmu));
++
++ /* Error. */
++ gcmkFATAL(
++ "%s(%d): could not allocate page table.",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkFOOTER();
++ return status;
++ }
++
++ /* Compute number of entries in page table. */
++ mmu->entryCount = (gctUINT32)mmu->pageTableSize / sizeof(gctUINT32);
++ mmu->entry = 0;
++
++ /* Mark the entire page table as available. */
++ pageTable = (gctUINT32 *) mmu->pageTableLogical;
++ for (i = 0; i < mmu->entryCount; i++)
++ {
++ pageTable[i] = (gctUINT32)~0;
++ }
++
++ /* Set page table address. */
++ status = gckVGHARDWARE_SetMMU(hardware, mmu->pageTableLogical);
++
++ if (status < 0)
++ {
++ /* Free the page table. */
++ gcmkVERIFY_OK(gckOS_FreeContiguous(mmu->os,
++ mmu->pageTablePhysical,
++ mmu->pageTableLogical,
++ mmu->pageTableSize));
++
++ /* Roll back. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(os, mmu->mutex));
++
++ mmu->object.type = gcvOBJ_UNKNOWN;
++ gcmkVERIFY_OK(gckOS_Free(os, mmu));
++
++ /* Error. */
++ gcmkFATAL(
++ "%s(%d): could not program page table.",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkFOOTER();
++ return status;
++ }
++
++ /* Return the gckVGMMU object pointer. */
++ *Mmu = mmu;
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_MMU,
++ "%s(%d): %u entries at %p.(0x%08X)\n",
++ __FUNCTION__, __LINE__,
++ mmu->entryCount,
++ mmu->pageTableLogical,
++ mmu->pageTablePhysical
++ );
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckVGMMU_Destroy
++**
++** Destroy a nAQMMU object.
++**
++** INPUT:
++**
++** gckVGMMU Mmu
++** Pointer to an gckVGMMU object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS gckVGMMU_Destroy(
++ IN gckVGMMU Mmu
++ )
++{
++ gcmkHEADER_ARG("Mmu=0x%x", Mmu);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Mmu, gcvOBJ_MMU);
++
++ /* Free the page table. */
++ gcmkVERIFY_OK(gckOS_FreeContiguous(Mmu->os,
++ Mmu->pageTablePhysical,
++ Mmu->pageTableLogical,
++ Mmu->pageTableSize));
++
++ /* Roll back. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Mmu->os, Mmu->mutex));
++
++ /* Mark the gckVGMMU object as unknown. */
++ Mmu->object.type = gcvOBJ_UNKNOWN;
++
++ /* Free the gckVGMMU object. */
++ gcmkVERIFY_OK(gckOS_Free(Mmu->os, Mmu));
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckVGMMU_AllocatePages
++**
++** Allocate pages inside the page table.
++**
++** INPUT:
++**
++** gckVGMMU Mmu
++** Pointer to an gckVGMMU object.
++**
++** gctSIZE_T PageCount
++** Number of pages to allocate.
++**
++** OUTPUT:
++**
++** gctPOINTER * PageTable
++** Pointer to a variable that receives the base address of the page
++** table.
++**
++** gctUINT32 * Address
++** Pointer to a variable that receives the hardware specific address.
++*/
++gceSTATUS gckVGMMU_AllocatePages(
++ IN gckVGMMU Mmu,
++ IN gctSIZE_T PageCount,
++ OUT gctPOINTER * PageTable,
++ OUT gctUINT32 * Address
++ )
++{
++ gceSTATUS status;
++ gctUINT32 tail, index, i;
++ gctUINT32 * table;
++ gctBOOL allocated = gcvFALSE;
++
++ gcmkHEADER_ARG("Mmu=0x%x PageCount=0x%x PageTable=0x%x Address=0x%x",
++ Mmu, PageCount, PageTable, Address);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Mmu, gcvOBJ_MMU);
++ gcmkVERIFY_ARGUMENT(PageCount > 0);
++ gcmkVERIFY_ARGUMENT(PageTable != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Address != gcvNULL);
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_MMU,
++ "%s(%d): %u pages.\n",
++ __FUNCTION__, __LINE__,
++ PageCount
++ );
++
++ if (PageCount > Mmu->entryCount)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_MMU,
++ "%s(%d): page table too small for %u pages.\n",
++ __FUNCTION__, __LINE__,
++ PageCount
++ );
++
++ gcmkFOOTER_NO();
++ /* Not enough pages avaiable. */
++ return gcvSTATUS_OUT_OF_RESOURCES;
++ }
++
++ /* Grab the mutex. */
++ status = gckOS_AcquireMutex(Mmu->os, Mmu->mutex, gcvINFINITE);
++
++ if (status < 0)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_MMU,
++ "%s(%d): could not acquire mutex.\n"
++ ,__FUNCTION__, __LINE__
++ );
++
++ gcmkFOOTER();
++ /* Error. */
++ return status;
++ }
++
++ /* Compute the tail for this allocation. */
++ tail = Mmu->entryCount - (gctUINT32)PageCount;
++
++ /* Walk all entries until we find enough slots. */
++ for (index = Mmu->entry; index <= tail;)
++ {
++ /* Access page table. */
++ table = (gctUINT32 *) Mmu->pageTableLogical + index;
++
++ /* See if all slots are available. */
++ for (i = 0; i < PageCount; i++, table++)
++ {
++ if (*table != ~0)
++ {
++ /* Start from next slot. */
++ index += i + 1;
++ break;
++ }
++ }
++
++ if (i == PageCount)
++ {
++ /* Bail out if we have enough page entries. */
++ allocated = gcvTRUE;
++ break;
++ }
++ }
++
++ if (!allocated)
++ {
++ if (status >= 0)
++ {
++ /* Walk all entries until we find enough slots. */
++ for (index = 0; index <= tail;)
++ {
++ /* Access page table. */
++ table = (gctUINT32 *) Mmu->pageTableLogical + index;
++
++ /* See if all slots are available. */
++ for (i = 0; i < PageCount; i++, table++)
++ {
++ if (*table != ~0)
++ {
++ /* Start from next slot. */
++ index += i + 1;
++ break;
++ }
++ }
++
++ if (i == PageCount)
++ {
++ /* Bail out if we have enough page entries. */
++ allocated = gcvTRUE;
++ break;
++ }
++ }
++ }
++ }
++
++ if (!allocated && (status >= 0))
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_MMU,
++ "%s(%d): not enough free pages for %u pages.\n",
++ __FUNCTION__, __LINE__,
++ PageCount
++ );
++
++ /* Not enough empty slots available. */
++ status = gcvSTATUS_OUT_OF_RESOURCES;
++ }
++
++ if (status >= 0)
++ {
++ /* Build virtual address. */
++ status = gckVGHARDWARE_BuildVirtualAddress(Mmu->hardware,
++ index,
++ 0,
++ Address);
++
++ if (status >= 0)
++ {
++ /* Update current entry into page table. */
++ Mmu->entry = index + (gctUINT32)PageCount;
++
++ /* Return pointer to page table. */
++ *PageTable = (gctUINT32 *) Mmu->pageTableLogical + index;
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_MMU,
++ "%s(%d): allocated %u pages at index %u (0x%08X) @ %p.\n",
++ __FUNCTION__, __LINE__,
++ PageCount,
++ index,
++ *Address,
++ *PageTable
++ );
++ }
++ }
++
++ /* Release the mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Mmu->os, Mmu->mutex));
++ gcmkFOOTER();
++
++ /* Return status. */
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckVGMMU_FreePages
++**
++** Free pages inside the page table.
++**
++** INPUT:
++**
++** gckVGMMU Mmu
++** Pointer to an gckVGMMU object.
++**
++** gctPOINTER PageTable
++** Base address of the page table to free.
++**
++** gctSIZE_T PageCount
++** Number of pages to free.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS gckVGMMU_FreePages(
++ IN gckVGMMU Mmu,
++ IN gctPOINTER PageTable,
++ IN gctSIZE_T PageCount
++ )
++{
++ gctUINT32 * table;
++
++ gcmkHEADER_ARG("Mmu=0x%x PageTable=0x%x PageCount=0x%x",
++ Mmu, PageTable, PageCount);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Mmu, gcvOBJ_MMU);
++ gcmkVERIFY_ARGUMENT(PageTable != gcvNULL);
++ gcmkVERIFY_ARGUMENT(PageCount > 0);
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_MMU,
++ "%s(%d): freeing %u pages at index %u @ %p.\n",
++ __FUNCTION__, __LINE__,
++ PageCount,
++ ((gctUINT32 *) PageTable - (gctUINT32 *) Mmu->pageTableLogical),
++ PageTable
++ );
++
++ /* Convert pointer. */
++ table = (gctUINT32 *) PageTable;
++
++ /* Mark the page table entries as available. */
++ while (PageCount-- > 0)
++ {
++ *table++ = (gctUINT32)~0;
++ }
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckVGMMU_SetPage(
++ IN gckVGMMU Mmu,
++ IN gctUINT32 PageAddress,
++ IN gctUINT32 *PageEntry
++ )
++{
++ gcmkHEADER_ARG("Mmu=0x%x", Mmu);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Mmu, gcvOBJ_MMU);
++ gcmkVERIFY_ARGUMENT(PageEntry != gcvNULL);
++ gcmkVERIFY_ARGUMENT(!(PageAddress & 0xFFF));
++
++ *PageEntry = PageAddress;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckVGMMU_Flush(
++ IN gckVGMMU Mmu
++ )
++{
++ gckVGHARDWARE hardware;
++
++ gcmkHEADER_ARG("Mmu=0x%x", Mmu);
++
++ hardware = Mmu->hardware;
++ gcmkVERIFY_OK(
++ gckOS_AtomSet(hardware->os, hardware->pageTableDirty, 1));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++#endif /* gcdENABLE_VG */
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_power.c linux-openelec/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_power.c
+--- linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_power.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_power.c 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,347 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal_kernel_precomp.h"
++
++#define _GC_OBJ_ZONE gcvZONE_POWER
++
++/******************************************************************************\
++************************ Dynamic Voltage Frequency Setting *********************
++\******************************************************************************/
++#if gcdDVFS
++static gctUINT32
++_GetLoadHistory(
++ IN gckDVFS Dvfs,
++ IN gctUINT32 Select,
++ IN gctUINT32 Index
++)
++{
++ return Dvfs->loads[Index];
++}
++
++static void
++_IncreaseScale(
++ IN gckDVFS Dvfs,
++ IN gctUINT32 Load,
++ OUT gctUINT8 *Scale
++ )
++{
++ if (Dvfs->currentScale < 32)
++ {
++ *Scale = Dvfs->currentScale + 8;
++ }
++ else
++ {
++ *Scale = Dvfs->currentScale + 8;
++ *Scale = gcmMIN(64, *Scale);
++ }
++}
++
++static void
++_RecordFrequencyHistory(
++ gckDVFS Dvfs,
++ gctUINT32 Frequency
++ )
++{
++ gctUINT32 i = 0;
++
++ struct _FrequencyHistory *history = Dvfs->frequencyHistory;
++
++ for (i = 0; i < 16; i++)
++ {
++ if (history->frequency == Frequency)
++ {
++ break;
++ }
++
++ if (history->frequency == 0)
++ {
++ history->frequency = Frequency;
++ break;
++ }
++
++ history++;
++ }
++
++ if (i < 16)
++ {
++ history->count++;
++ }
++}
++
++static gctUINT32
++_GetFrequencyHistory(
++ gckDVFS Dvfs,
++ gctUINT32 Frequency
++ )
++{
++ gctUINT32 i = 0;
++
++ struct _FrequencyHistory * history = Dvfs->frequencyHistory;
++
++ for (i = 0; i < 16; i++)
++ {
++ if (history->frequency == Frequency)
++ {
++ break;
++ }
++
++ history++;
++ }
++
++ if (i < 16)
++ {
++ return history->count;
++ }
++
++ return 0;
++}
++
++static void
++_Policy(
++ IN gckDVFS Dvfs,
++ IN gctUINT32 Load,
++ OUT gctUINT8 *Scale
++ )
++{
++ gctUINT8 load[4], nextLoad;
++ gctUINT8 scale;
++
++ /* Last 4 history. */
++ load[0] = (Load & 0xFF);
++ load[1] = (Load & 0xFF00) >> 8;
++ load[2] = (Load & 0xFF0000) >> 16;
++ load[3] = (Load & 0xFF000000) >> 24;
++
++ /* Determine target scale. */
++ if (load[0] > 54)
++ {
++ _IncreaseScale(Dvfs, Load, &scale);
++ }
++ else
++ {
++ nextLoad = (load[0] + load[1] + load[2] + load[3])/4;
++
++ scale = Dvfs->currentScale * (nextLoad) / 54;
++
++ scale = gcmMAX(1, scale);
++ scale = gcmMIN(64, scale);
++ }
++
++ Dvfs->totalConfig++;
++
++ Dvfs->loads[(load[0]-1)/8]++;
++
++ *Scale = scale;
++
++
++ if (Dvfs->totalConfig % 100 == 0)
++ {
++ gcmkPRINT("=======================================================");
++ gcmkPRINT("GPU Load: %-8d %-8d %-8d %-8d %-8d %-8d %-8d %-8d",
++ 8, 16, 24, 32, 40, 48, 56, 64);
++ gcmkPRINT(" %-8d %-8d %-8d %-8d %-8d %-8d %-8d %-8d",
++ _GetLoadHistory(Dvfs,2, 0),
++ _GetLoadHistory(Dvfs,2, 1),
++ _GetLoadHistory(Dvfs,2, 2),
++ _GetLoadHistory(Dvfs,2, 3),
++ _GetLoadHistory(Dvfs,2, 4),
++ _GetLoadHistory(Dvfs,2, 5),
++ _GetLoadHistory(Dvfs,2, 6),
++ _GetLoadHistory(Dvfs,2, 7)
++ );
++
++ gcmkPRINT("Frequency(MHz) %-8d %-8d %-8d %-8d %-8d",
++ 58, 120, 240, 360, 480);
++ gcmkPRINT(" %-8d %-8d %-8d %-8d %-8d",
++ _GetFrequencyHistory(Dvfs, 58),
++ _GetFrequencyHistory(Dvfs,120),
++ _GetFrequencyHistory(Dvfs,240),
++ _GetFrequencyHistory(Dvfs,360),
++ _GetFrequencyHistory(Dvfs,480)
++ );
++ }
++}
++
++static void
++_TimerFunction(
++ gctPOINTER Data
++ )
++{
++ gceSTATUS status;
++ gckDVFS dvfs = (gckDVFS) Data;
++ gckHARDWARE hardware = dvfs->hardware;
++ gctUINT32 value;
++ gctUINT32 frequency;
++ gctUINT8 scale;
++ gctUINT32 t1, t2, consumed;
++
++ gckOS_GetTicks(&t1);
++
++ gcmkONERROR(gckHARDWARE_QueryLoad(hardware, &value));
++
++ /* determine target sacle. */
++ _Policy(dvfs, value, &scale);
++
++ /* Set frequency and voltage. */
++ gcmkONERROR(gckOS_SetGPUFrequency(hardware->os, hardware->core, scale));
++
++ /* Query real frequency. */
++ gcmkONERROR(
++ gckOS_QueryGPUFrequency(hardware->os,
++ hardware->core,
++ &frequency,
++ &dvfs->currentScale));
++
++ _RecordFrequencyHistory(dvfs, frequency);
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_POWER,
++ "Current frequency = %d",
++ frequency);
++
++ /* Set period. */
++ gcmkONERROR(gckHARDWARE_SetDVFSPeroid(hardware, frequency));
++
++OnError:
++ /* Determine next querying time. */
++ gckOS_GetTicks(&t2);
++
++ consumed = gcmMIN(((long)t2 - (long)t1), 5);
++
++ if (dvfs->stop == gcvFALSE)
++ {
++ gcmkVERIFY_OK(gckOS_StartTimer(hardware->os,
++ dvfs->timer,
++ dvfs->pollingTime - consumed));
++ }
++
++ return;
++}
++
++gceSTATUS
++gckDVFS_Construct(
++ IN gckHARDWARE Hardware,
++ OUT gckDVFS * Dvfs
++ )
++{
++ gceSTATUS status;
++ gctPOINTER pointer;
++ gckDVFS dvfs = gcvNULL;
++ gckOS os = Hardware->os;
++
++ gcmkHEADER_ARG("Hardware=0x%X", Hardware);
++
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(Dvfs != gcvNULL);
++
++ /* Allocate a gckDVFS manager. */
++ gcmkONERROR(gckOS_Allocate(os, gcmSIZEOF(struct _gckDVFS), &pointer));
++
++ gckOS_ZeroMemory(pointer, gcmSIZEOF(struct _gckDVFS));
++
++ dvfs = pointer;
++
++ /* Initialization. */
++ dvfs->hardware = Hardware;
++ dvfs->pollingTime = gcdDVFS_POLLING_TIME;
++ dvfs->os = Hardware->os;
++ dvfs->currentScale = 64;
++
++ /* Create a polling timer. */
++ gcmkONERROR(gckOS_CreateTimer(os, _TimerFunction, pointer, &dvfs->timer));
++
++ /* Initialize frequency and voltage adjustment helper. */
++ gcmkONERROR(gckOS_PrepareGPUFrequency(os, Hardware->core));
++
++ /* Return result. */
++ *Dvfs = dvfs;
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Roll back. */
++ if (dvfs)
++ {
++ if (dvfs->timer)
++ {
++ gcmkVERIFY_OK(gckOS_DestroyTimer(os, dvfs->timer));
++ }
++
++ gcmkOS_SAFE_FREE(os, dvfs);
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckDVFS_Destroy(
++ IN gckDVFS Dvfs
++ )
++{
++ gcmkHEADER_ARG("Dvfs=0x%X", Dvfs);
++ gcmkVERIFY_ARGUMENT(Dvfs != gcvNULL);
++
++ /* Deinitialize helper fuunction. */
++ gcmkVERIFY_OK(gckOS_FinishGPUFrequency(Dvfs->os, Dvfs->hardware->core));
++
++ /* DestroyTimer. */
++ gcmkVERIFY_OK(gckOS_DestroyTimer(Dvfs->os, Dvfs->timer));
++
++ gcmkOS_SAFE_FREE(Dvfs->os, Dvfs);
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckDVFS_Start(
++ IN gckDVFS Dvfs
++ )
++{
++ gcmkHEADER_ARG("Dvfs=0x%X", Dvfs);
++ gcmkVERIFY_ARGUMENT(Dvfs != gcvNULL);
++
++ gckHARDWARE_InitDVFS(Dvfs->hardware);
++
++ Dvfs->stop = gcvFALSE;
++
++ gckOS_StartTimer(Dvfs->os, Dvfs->timer, Dvfs->pollingTime);
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckDVFS_Stop(
++ IN gckDVFS Dvfs
++ )
++{
++ gcmkHEADER_ARG("Dvfs=0x%X", Dvfs);
++ gcmkVERIFY_ARGUMENT(Dvfs != gcvNULL);
++
++ Dvfs->stop = gcvTRUE;
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++#endif
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_precomp.h linux-openelec/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_precomp.h
+--- linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_precomp.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_precomp.h 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,29 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_kernel_precomp_h_
++#define __gc_hal_kernel_precomp_h_
++
++#include "gc_hal.h"
++#include "gc_hal_driver.h"
++#include "gc_hal_kernel.h"
++
++#endif /* __gc_hal_kernel_precomp_h_ */
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_security.c linux-openelec/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_security.c
+--- linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_security.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_security.c 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,239 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal_kernel_precomp.h"
++
++
++
++
++#define _GC_OBJ_ZONE gcvZONE_KERNEL
++
++#if gcdSECURITY
++
++/*
++** Open a security service channel.
++*/
++gceSTATUS
++gckKERNEL_SecurityOpen(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 GPU,
++ OUT gctUINT32 *Channel
++ )
++{
++ gceSTATUS status;
++
++ gcmkONERROR(gckOS_OpenSecurityChannel(Kernel->os, Kernel->core, Channel));
++ gcmkONERROR(gckOS_InitSecurityChannel(*Channel));
++
++ return gcvSTATUS_OK;
++
++OnError:
++ return status;
++}
++
++/*
++** Close a security service channel
++*/
++gceSTATUS
++gckKERNEL_SecurityClose(
++ IN gctUINT32 Channel
++ )
++{
++ return gcvSTATUS_OK;
++}
++
++/*
++** Security service interface.
++*/
++gceSTATUS
++gckKERNEL_SecurityCallService(
++ IN gctUINT32 Channel,
++ IN OUT gcsTA_INTERFACE * Interface
++)
++{
++ gceSTATUS status;
++ gcmkHEADER();
++
++ gcmkVERIFY_ARGUMENT(Interface != gcvNULL);
++
++ gckOS_CallSecurityService(Channel, Interface);
++
++ status = Interface->result;
++
++ gcmkONERROR(status);
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckKERNEL_SecurityStartCommand(
++ IN gckKERNEL Kernel
++ )
++{
++ gceSTATUS status;
++ gcsTA_INTERFACE iface;
++
++ gcmkHEADER();
++
++ iface.command = KERNEL_START_COMMAND;
++ iface.u.StartCommand.gpu = Kernel->core;
++
++ gcmkONERROR(gckKERNEL_SecurityCallService(Kernel->securityChannel, &iface));
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckKERNEL_SecurityAllocateSecurityMemory(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 Bytes,
++ OUT gctUINT32 * Handle
++ )
++{
++ gceSTATUS status;
++ gcsTA_INTERFACE iface;
++
++ gcmkHEADER();
++
++ iface.command = KERNEL_ALLOCATE_SECRUE_MEMORY;
++ iface.u.AllocateSecurityMemory.bytes = Bytes;
++
++ gcmkONERROR(gckKERNEL_SecurityCallService(Kernel->securityChannel, &iface));
++
++ *Handle = iface.u.AllocateSecurityMemory.memory_handle;
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckKERNEL_SecurityExecute(
++ IN gckKERNEL Kernel,
++ IN gctPOINTER Buffer,
++ IN gctUINT32 Bytes
++ )
++{
++ gceSTATUS status;
++ gcsTA_INTERFACE iface;
++
++ gcmkHEADER();
++
++ iface.command = KERNEL_EXECUTE;
++ iface.u.Execute.command_buffer = (gctUINT32 *)Buffer;
++ iface.u.Execute.gpu = Kernel->core;
++ iface.u.Execute.command_buffer_length = Bytes;
++
++#if defined(LINUX)
++ gcmkONERROR(gckOS_GetPhysicalAddress(Kernel->os, Buffer,
++ (gctUINT32 *)&iface.u.Execute.command_buffer));
++#endif
++
++ gcmkONERROR(gckKERNEL_SecurityCallService(Kernel->securityChannel, &iface));
++
++ /* Update queue tail pointer. */
++ gcmkONERROR(gckHARDWARE_UpdateQueueTail(
++ Kernel->hardware, 0, 0
++ ));
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckKERNEL_SecurityMapMemory(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 *PhysicalArray,
++ IN gctUINT32 PageCount,
++ OUT gctUINT32 * GPUAddress
++ )
++{
++ gceSTATUS status;
++ gcsTA_INTERFACE iface;
++
++ gcmkHEADER();
++
++ iface.command = KERNEL_MAP_MEMORY;
++
++#if defined(LINUX)
++ gcmkONERROR(gckOS_GetPhysicalAddress(Kernel->os, PhysicalArray,
++ (gctUINT32 *)&iface.u.MapMemory.physicals));
++#endif
++
++ iface.u.MapMemory.pageCount = PageCount;
++
++ gcmkONERROR(gckKERNEL_SecurityCallService(Kernel->securityChannel, &iface));
++
++ *GPUAddress = iface.u.MapMemory.gpuAddress;
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckKERNEL_SecurityUnmapMemory(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 GPUAddress,
++ IN gctUINT32 PageCount
++ )
++{
++ gceSTATUS status;
++ gcsTA_INTERFACE iface;
++
++ gcmkHEADER();
++
++ iface.command = KERNEL_UNMAP_MEMORY;
++
++ iface.u.UnmapMemory.gpuAddress = GPUAddress;
++ iface.u.UnmapMemory.pageCount = PageCount;
++
++ gcmkONERROR(gckKERNEL_SecurityCallService(Kernel->securityChannel, &iface));
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++#endif
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_vg.c linux-openelec/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_vg.c
+--- linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_vg.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_vg.c 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,833 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal_kernel_precomp.h"
++
++#if gcdENABLE_VG
++
++#define _GC_OBJ_ZONE gcvZONE_VG
++
++/******************************************************************************\
++******************************* gckKERNEL API Code ******************************
++\******************************************************************************/
++
++/*******************************************************************************
++**
++** gckKERNEL_Construct
++**
++** Construct a new gckKERNEL object.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** IN gctPOINTER Context
++** Pointer to a driver defined context.
++**
++** OUTPUT:
++**
++** gckKERNEL * Kernel
++** Pointer to a variable that will hold the pointer to the gckKERNEL
++** object.
++*/
++gceSTATUS gckVGKERNEL_Construct(
++ IN gckOS Os,
++ IN gctPOINTER Context,
++ IN gckKERNEL inKernel,
++ OUT gckVGKERNEL * Kernel
++ )
++{
++ gceSTATUS status;
++ gckVGKERNEL kernel = gcvNULL;
++
++ gcmkHEADER_ARG("Os=0x%x Context=0x%x", Os, Context);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Kernel != gcvNULL);
++
++ do
++ {
++ /* Allocate the gckKERNEL object. */
++ gcmkERR_BREAK(gckOS_Allocate(
++ Os,
++ sizeof(struct _gckVGKERNEL),
++ (gctPOINTER *) &kernel
++ ));
++
++ /* Initialize the gckKERNEL object. */
++ kernel->object.type = gcvOBJ_KERNEL;
++ kernel->os = Os;
++ kernel->context = Context;
++ kernel->hardware = gcvNULL;
++ kernel->interrupt = gcvNULL;
++ kernel->command = gcvNULL;
++ kernel->mmu = gcvNULL;
++ kernel->kernel = inKernel;
++
++ /* Construct the gckVGHARDWARE object. */
++ gcmkERR_BREAK(gckVGHARDWARE_Construct(
++ Os, &kernel->hardware
++ ));
++
++ /* Set pointer to gckKERNEL object in gckVGHARDWARE object. */
++ kernel->hardware->kernel = kernel;
++
++ /* Construct the gckVGINTERRUPT object. */
++ gcmkERR_BREAK(gckVGINTERRUPT_Construct(
++ kernel, &kernel->interrupt
++ ));
++
++ /* Construct the gckVGCOMMAND object. */
++ gcmkERR_BREAK(gckVGCOMMAND_Construct(
++ kernel, gcmKB2BYTES(8), gcmKB2BYTES(2), &kernel->command
++ ));
++
++ /* Construct the gckVGMMU object. */
++ gcmkERR_BREAK(gckVGMMU_Construct(
++ kernel, gcmKB2BYTES(32), &kernel->mmu
++ ));
++
++ /* Return pointer to the gckKERNEL object. */
++ *Kernel = kernel;
++
++ gcmkFOOTER_ARG("*Kernel=0x%x", *Kernel);
++ /* Success. */
++ return gcvSTATUS_OK;
++ }
++ while (gcvFALSE);
++
++ /* Roll back. */
++ if (kernel != gcvNULL)
++ {
++ if (kernel->mmu != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckVGMMU_Destroy(kernel->mmu));
++ }
++
++ if (kernel->command != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckVGCOMMAND_Destroy(kernel->command));
++ }
++
++ if (kernel->interrupt != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckVGINTERRUPT_Destroy(kernel->interrupt));
++ }
++
++ if (kernel->hardware != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckVGHARDWARE_Destroy(kernel->hardware));
++ }
++
++ gcmkVERIFY_OK(gckOS_Free(Os, kernel));
++ }
++
++ gcmkFOOTER();
++ /* Return status. */
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckKERNEL_Destroy
++**
++** Destroy an gckKERNEL object.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object to destroy.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS gckVGKERNEL_Destroy(
++ IN gckVGKERNEL Kernel
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Kernel=0x%x", Kernel);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++
++ do
++ {
++ /* Destroy the gckVGMMU object. */
++ if (Kernel->mmu != gcvNULL)
++ {
++ gcmkERR_BREAK(gckVGMMU_Destroy(Kernel->mmu));
++ Kernel->mmu = gcvNULL;
++ }
++
++ /* Destroy the gckVGCOMMAND object. */
++ if (Kernel->command != gcvNULL)
++ {
++ gcmkERR_BREAK(gckVGCOMMAND_Destroy(Kernel->command));
++ Kernel->command = gcvNULL;
++ }
++
++ /* Destroy the gckVGINTERRUPT object. */
++ if (Kernel->interrupt != gcvNULL)
++ {
++ gcmkERR_BREAK(gckVGINTERRUPT_Destroy(Kernel->interrupt));
++ Kernel->interrupt = gcvNULL;
++ }
++
++ /* Destroy the gckVGHARDWARE object. */
++ if (Kernel->hardware != gcvNULL)
++ {
++ gcmkERR_BREAK(gckVGHARDWARE_Destroy(Kernel->hardware));
++ Kernel->hardware = gcvNULL;
++ }
++
++ /* Mark the gckKERNEL object as unknown. */
++ Kernel->object.type = gcvOBJ_UNKNOWN;
++
++ /* Free the gckKERNEL object. */
++ gcmkERR_BREAK(gckOS_Free(Kernel->os, Kernel));
++ }
++ while (gcvFALSE);
++
++ gcmkFOOTER();
++
++ /* Return status. */
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckKERNEL_AllocateLinearMemory
++**
++** Function walks all required memory pools and allocates the requested
++** amount of video memory.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gcePOOL * Pool
++** Pointer the desired memory pool.
++**
++** gctSIZE_T Bytes
++** Number of bytes to allocate.
++**
++** gctSIZE_T Alignment
++** Required buffer alignment.
++**
++** gceSURF_TYPE Type
++** Surface type.
++**
++** OUTPUT:
++**
++** gcePOOL * Pool
++** Pointer to the actual pool where the memory was allocated.
++**
++** gcuVIDMEM_NODE_PTR * Node
++** Allocated node.
++*/
++gceSTATUS
++gckVGKERNEL_AllocateLinearMemory(
++ IN gckKERNEL Kernel,
++ IN OUT gcePOOL * Pool,
++ IN gctSIZE_T Bytes,
++ IN gctUINT32 Alignment,
++ IN gceSURF_TYPE Type,
++ OUT gcuVIDMEM_NODE_PTR * Node
++ )
++{
++ gcePOOL pool;
++ gceSTATUS status;
++ gckVIDMEM videoMemory;
++
++ /* Get initial pool. */
++ switch (pool = *Pool)
++ {
++ case gcvPOOL_DEFAULT:
++ case gcvPOOL_LOCAL:
++ pool = gcvPOOL_LOCAL_INTERNAL;
++ break;
++
++ case gcvPOOL_UNIFIED:
++ pool = gcvPOOL_SYSTEM;
++ break;
++
++ default:
++ break;
++ }
++
++ do
++ {
++ /* Verify the number of bytes to allocate. */
++ if (Bytes == 0)
++ {
++ status = gcvSTATUS_INVALID_ARGUMENT;
++ break;
++ }
++
++ if (pool == gcvPOOL_VIRTUAL)
++ {
++ /* Create a gcuVIDMEM_NODE for virtual memory. */
++ gcmkERR_BREAK(gckVIDMEM_ConstructVirtual(Kernel, gcvFALSE, Bytes, Node));
++
++ /* Success. */
++ break;
++ }
++
++ else
++ {
++ /* Get pointer to gckVIDMEM object for pool. */
++ status = gckKERNEL_GetVideoMemoryPool(Kernel, pool, &videoMemory);
++
++ if (status == gcvSTATUS_OK)
++ {
++ /* Allocate memory. */
++ status = gckVIDMEM_AllocateLinear(Kernel,
++ videoMemory,
++ Bytes,
++ Alignment,
++ Type,
++ (*Pool == gcvPOOL_SYSTEM),
++ Node);
++
++ if (status == gcvSTATUS_OK)
++ {
++ /* Memory allocated. */
++ break;
++ }
++ }
++ }
++
++ if (pool == gcvPOOL_LOCAL_INTERNAL)
++ {
++ /* Advance to external memory. */
++ pool = gcvPOOL_LOCAL_EXTERNAL;
++ }
++ else if (pool == gcvPOOL_LOCAL_EXTERNAL)
++ {
++ /* Advance to contiguous system memory. */
++ pool = gcvPOOL_SYSTEM;
++ }
++ else if (pool == gcvPOOL_SYSTEM)
++ {
++ /* Advance to virtual memory. */
++ pool = gcvPOOL_VIRTUAL;
++ }
++ else
++ {
++ /* Out of pools. */
++ break;
++ }
++ }
++ /* Loop only for multiple selection pools. */
++ while ((*Pool == gcvPOOL_DEFAULT)
++ || (*Pool == gcvPOOL_LOCAL)
++ || (*Pool == gcvPOOL_UNIFIED)
++ );
++
++ if (gcmIS_SUCCESS(status))
++ {
++ /* Return pool used for allocation. */
++ *Pool = pool;
++ }
++
++ /* Return status. */
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckKERNEL_Dispatch
++**
++** Dispatch a command received from the user HAL layer.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gcsHAL_INTERFACE * Interface
++** Pointer to a gcsHAL_INTERFACE structure that defines the command to
++** be dispatched.
++**
++** OUTPUT:
++**
++** gcsHAL_INTERFACE * Interface
++** Pointer to a gcsHAL_INTERFACE structure that receives any data to be
++** returned.
++*/
++gceSTATUS gckVGKERNEL_Dispatch(
++ IN gckKERNEL Kernel,
++ IN gctBOOL FromUser,
++ IN OUT gcsHAL_INTERFACE * Interface
++ )
++{
++ gceSTATUS status;
++ gcsHAL_INTERFACE * kernelInterface = Interface;
++ gctUINT32 processID;
++ gckKERNEL kernel = Kernel;
++ gctPOINTER info = gcvNULL;
++ gctPHYS_ADDR physical = gcvNULL;
++ gctPOINTER logical = gcvNULL;
++ gctSIZE_T bytes = 0;
++
++ gcmkHEADER_ARG("Kernel=0x%x Interface=0x%x ", Kernel, Interface);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(Interface != gcvNULL);
++
++ gcmkONERROR(gckOS_GetProcessID(&processID));
++
++ /* Dispatch on command. */
++ switch (Interface->command)
++ {
++ case gcvHAL_QUERY_VIDEO_MEMORY:
++ /* Query video memory size. */
++ gcmkERR_BREAK(gckKERNEL_QueryVideoMemory(
++ Kernel, kernelInterface
++ ));
++ break;
++
++ case gcvHAL_QUERY_CHIP_IDENTITY:
++ /* Query chip identity. */
++ gcmkERR_BREAK(gckVGHARDWARE_QueryChipIdentity(
++ Kernel->vg->hardware,
++ &kernelInterface->u.QueryChipIdentity.chipModel,
++ &kernelInterface->u.QueryChipIdentity.chipRevision,
++ &kernelInterface->u.QueryChipIdentity.chipFeatures,
++ &kernelInterface->u.QueryChipIdentity.chipMinorFeatures,
++ &kernelInterface->u.QueryChipIdentity.chipMinorFeatures2
++ ));
++ break;
++
++ case gcvHAL_QUERY_COMMAND_BUFFER:
++ /* Query command buffer information. */
++ gcmkERR_BREAK(gckKERNEL_QueryCommandBuffer(
++ Kernel,
++ &kernelInterface->u.QueryCommandBuffer.information
++ ));
++ break;
++ case gcvHAL_ALLOCATE_NON_PAGED_MEMORY:
++ bytes = (gctSIZE_T) kernelInterface->u.AllocateNonPagedMemory.bytes;
++ /* Allocate non-paged memory. */
++ gcmkERR_BREAK(gckOS_AllocateNonPagedMemory(
++ Kernel->os,
++ gcvTRUE,
++ &bytes,
++ &physical,
++ &logical
++ ));
++
++ kernelInterface->u.AllocateNonPagedMemory.bytes = bytes;
++ kernelInterface->u.AllocateNonPagedMemory.logical = gcmPTR_TO_UINT64(logical);
++ kernelInterface->u.AllocateNonPagedMemory.physical = gcmPTR_TO_NAME(physical);
++ break;
++
++ case gcvHAL_FREE_NON_PAGED_MEMORY:
++ physical = gcmNAME_TO_PTR(kernelInterface->u.AllocateNonPagedMemory.physical);
++
++ /* Unmap user logical out of physical memory first. */
++ gcmkERR_BREAK(gckOS_UnmapUserLogical(
++ Kernel->os,
++ physical,
++ (gctSIZE_T) kernelInterface->u.AllocateNonPagedMemory.bytes,
++ gcmUINT64_TO_PTR(kernelInterface->u.AllocateNonPagedMemory.logical)
++ ));
++
++ /* Free non-paged memory. */
++ gcmkERR_BREAK(gckOS_FreeNonPagedMemory(
++ Kernel->os,
++ (gctSIZE_T) kernelInterface->u.AllocateNonPagedMemory.bytes,
++ physical,
++ gcmUINT64_TO_PTR(kernelInterface->u.AllocateNonPagedMemory.logical)
++ ));
++
++ gcmRELEASE_NAME(kernelInterface->u.AllocateNonPagedMemory.physical);
++ break;
++
++ case gcvHAL_ALLOCATE_CONTIGUOUS_MEMORY:
++ bytes = (gctSIZE_T) kernelInterface->u.AllocateNonPagedMemory.bytes;
++ /* Allocate contiguous memory. */
++ gcmkERR_BREAK(gckOS_AllocateContiguous(
++ Kernel->os,
++ gcvTRUE,
++ &bytes,
++ &physical,
++ &logical
++ ));
++
++ kernelInterface->u.AllocateNonPagedMemory.bytes = bytes;
++ kernelInterface->u.AllocateNonPagedMemory.logical = gcmPTR_TO_UINT64(logical);
++ kernelInterface->u.AllocateNonPagedMemory.physical = gcmPTR_TO_NAME(physical);
++ break;
++
++ case gcvHAL_FREE_CONTIGUOUS_MEMORY:
++ physical = gcmNAME_TO_PTR(kernelInterface->u.AllocateNonPagedMemory.physical);
++ /* Unmap user logical out of physical memory first. */
++ gcmkERR_BREAK(gckOS_UnmapUserLogical(
++ Kernel->os,
++ physical,
++ (gctSIZE_T) kernelInterface->u.AllocateNonPagedMemory.bytes,
++ gcmUINT64_TO_PTR(kernelInterface->u.AllocateNonPagedMemory.logical)
++ ));
++
++ /* Free contiguous memory. */
++ gcmkERR_BREAK(gckOS_FreeContiguous(
++ Kernel->os,
++ physical,
++ gcmUINT64_TO_PTR(kernelInterface->u.AllocateNonPagedMemory.logical),
++ (gctSIZE_T) kernelInterface->u.AllocateNonPagedMemory.bytes
++ ));
++
++ gcmRELEASE_NAME(kernelInterface->u.AllocateNonPagedMemory.physical);
++ break;
++
++ case gcvHAL_ALLOCATE_VIDEO_MEMORY:
++ gcmkERR_BREAK(gcvSTATUS_NOT_SUPPORTED);
++ break;
++
++ case gcvHAL_ALLOCATE_LINEAR_VIDEO_MEMORY:
++ /* Allocate memory. */
++ gcmkERR_BREAK(gckKERNEL_AllocateLinearMemory(
++ Kernel, processID,
++ &kernelInterface->u.AllocateLinearVideoMemory.pool,
++ kernelInterface->u.AllocateLinearVideoMemory.bytes,
++ kernelInterface->u.AllocateLinearVideoMemory.alignment,
++ kernelInterface->u.AllocateLinearVideoMemory.type,
++ kernelInterface->u.AllocateLinearVideoMemory.flag,
++ &kernelInterface->u.AllocateLinearVideoMemory.node
++ ));
++
++ break;
++
++ case gcvHAL_RELEASE_VIDEO_MEMORY:
++ /* Free video memory. */
++ gcmkERR_BREAK(gckKERNEL_ReleaseVideoMemory(
++ Kernel, processID,
++ (gctUINT32)kernelInterface->u.ReleaseVideoMemory.node
++ ));
++
++ break;
++
++ case gcvHAL_MAP_MEMORY:
++ /* Map memory. */
++ gcmkERR_BREAK(gckKERNEL_MapMemory(
++ Kernel,
++ gcmINT2PTR(kernelInterface->u.MapMemory.physical),
++ (gctSIZE_T) kernelInterface->u.MapMemory.bytes,
++ &logical
++ ));
++ kernelInterface->u.MapMemory.logical = gcmPTR_TO_UINT64(logical);
++ break;
++
++ case gcvHAL_UNMAP_MEMORY:
++ /* Unmap memory. */
++ gcmkERR_BREAK(gckKERNEL_UnmapMemory(
++ Kernel,
++ gcmINT2PTR(kernelInterface->u.MapMemory.physical),
++ (gctSIZE_T) kernelInterface->u.MapMemory.bytes,
++ gcmUINT64_TO_PTR(kernelInterface->u.MapMemory.logical)
++ ));
++ break;
++
++ case gcvHAL_MAP_USER_MEMORY:
++ /* Map user memory to DMA. */
++ gcmkERR_BREAK(gckOS_MapUserMemory(
++ Kernel->os,
++ gcvCORE_VG,
++ gcmUINT64_TO_PTR(kernelInterface->u.MapUserMemory.memory),
++ kernelInterface->u.MapUserMemory.physical,
++ (gctSIZE_T) kernelInterface->u.MapUserMemory.size,
++ &info,
++ &kernelInterface->u.MapUserMemory.address
++ ));
++
++ kernelInterface->u.MapUserMemory.info = gcmPTR_TO_NAME(info);
++
++ /* Clear temp storage. */
++ info = gcvNULL;
++ break;
++
++ case gcvHAL_UNMAP_USER_MEMORY:
++ /* Unmap user memory. */
++ gcmkERR_BREAK(gckOS_UnmapUserMemory(
++ Kernel->os,
++ gcvCORE_VG,
++ gcmUINT64_TO_PTR(kernelInterface->u.UnmapUserMemory.memory),
++ (gctSIZE_T) kernelInterface->u.UnmapUserMemory.size,
++ gcmNAME_TO_PTR(kernelInterface->u.UnmapUserMemory.info),
++ kernelInterface->u.UnmapUserMemory.address
++ ));
++
++ gcmRELEASE_NAME(kernelInterface->u.UnmapUserMemory.info);
++ break;
++
++ case gcvHAL_LOCK_VIDEO_MEMORY:
++ gcmkONERROR(gckKERNEL_LockVideoMemory(Kernel, gcvCORE_VG, processID, FromUser, Interface));
++ break;
++
++ case gcvHAL_UNLOCK_VIDEO_MEMORY:
++ gcmkONERROR(gckKERNEL_UnlockVideoMemory(Kernel, processID, Interface));
++ break;
++
++ case gcvHAL_USER_SIGNAL:
++#if !USE_NEW_LINUX_SIGNAL
++ /* Dispatch depends on the user signal subcommands. */
++ switch(Interface->u.UserSignal.command)
++ {
++ case gcvUSER_SIGNAL_CREATE:
++ /* Create a signal used in the user space. */
++ gcmkERR_BREAK(
++ gckOS_CreateUserSignal(Kernel->os,
++ Interface->u.UserSignal.manualReset,
++ &Interface->u.UserSignal.id));
++
++ gcmkVERIFY_OK(
++ gckKERNEL_AddProcessDB(Kernel,
++ processID, gcvDB_SIGNAL,
++ gcmINT2PTR(Interface->u.UserSignal.id),
++ gcvNULL,
++ 0));
++ break;
++
++ case gcvUSER_SIGNAL_DESTROY:
++ gcmkVERIFY_OK(gckKERNEL_RemoveProcessDB(
++ Kernel,
++ processID, gcvDB_SIGNAL,
++ gcmINT2PTR(Interface->u.UserSignal.id)));
++
++ /* Destroy the signal. */
++ gcmkERR_BREAK(
++ gckOS_DestroyUserSignal(Kernel->os,
++ Interface->u.UserSignal.id));
++
++ break;
++
++ case gcvUSER_SIGNAL_SIGNAL:
++ /* Signal the signal. */
++ gcmkERR_BREAK(
++ gckOS_SignalUserSignal(Kernel->os,
++ Interface->u.UserSignal.id,
++ Interface->u.UserSignal.state));
++ break;
++
++ case gcvUSER_SIGNAL_WAIT:
++ /* Wait on the signal. */
++ status = gckOS_WaitUserSignal(Kernel->os,
++ Interface->u.UserSignal.id,
++ Interface->u.UserSignal.wait);
++ break;
++
++ default:
++ /* Invalid user signal command. */
++ gcmkERR_BREAK(gcvSTATUS_INVALID_ARGUMENT);
++ }
++#endif
++ break;
++
++ case gcvHAL_COMMIT:
++ /* Commit a command and context buffer. */
++ gcmkERR_BREAK(gckVGCOMMAND_Commit(
++ Kernel->vg->command,
++ gcmUINT64_TO_PTR(kernelInterface->u.VGCommit.context),
++ gcmUINT64_TO_PTR(kernelInterface->u.VGCommit.queue),
++ kernelInterface->u.VGCommit.entryCount,
++ gcmUINT64_TO_PTR(kernelInterface->u.VGCommit.taskTable)
++ ));
++ break;
++ case gcvHAL_VERSION:
++ kernelInterface->u.Version.major = gcvVERSION_MAJOR;
++ kernelInterface->u.Version.minor = gcvVERSION_MINOR;
++ kernelInterface->u.Version.patch = gcvVERSION_PATCH;
++ kernelInterface->u.Version.build = gcvVERSION_BUILD;
++ status = gcvSTATUS_OK;
++ break;
++
++ case gcvHAL_GET_BASE_ADDRESS:
++ /* Get base address. */
++ gcmkERR_BREAK(
++ gckOS_GetBaseAddress(Kernel->os,
++ &kernelInterface->u.GetBaseAddress.baseAddress));
++ break;
++ case gcvHAL_IMPORT_VIDEO_MEMORY:
++ gcmkONERROR(gckVIDMEM_NODE_Import(Kernel,
++ Interface->u.ImportVideoMemory.name,
++ &Interface->u.ImportVideoMemory.handle));
++ gcmkONERROR(gckKERNEL_AddProcessDB(Kernel,
++ processID, gcvDB_VIDEO_MEMORY,
++ gcmINT2PTR(Interface->u.ImportVideoMemory.handle),
++ gcvNULL,
++ 0));
++ break;
++
++ case gcvHAL_NAME_VIDEO_MEMORY:
++ gcmkONERROR(gckVIDMEM_NODE_Name(Kernel,
++ Interface->u.NameVideoMemory.handle,
++ &Interface->u.NameVideoMemory.name));
++ break;
++
++ case gcvHAL_DATABASE:
++ gcmkONERROR(gckKERNEL_QueryDatabase(Kernel, processID, Interface));
++ break;
++ case gcvHAL_SHBUF:
++ {
++ gctSHBUF shBuf;
++ gctPOINTER uData;
++ gctUINT32 bytes;
++
++ switch (Interface->u.ShBuf.command)
++ {
++ case gcvSHBUF_CREATE:
++ bytes = Interface->u.ShBuf.bytes;
++
++ /* Create. */
++ gcmkONERROR(gckKERNEL_CreateShBuffer(Kernel, bytes, &shBuf));
++
++ Interface->u.ShBuf.id = gcmPTR_TO_UINT64(shBuf);
++
++ gcmkVERIFY_OK(
++ gckKERNEL_AddProcessDB(Kernel,
++ processID,
++ gcvDB_SHBUF,
++ shBuf,
++ gcvNULL,
++ 0));
++ break;
++
++ case gcvSHBUF_DESTROY:
++ shBuf = gcmUINT64_TO_PTR(Interface->u.ShBuf.id);
++
++ /* Check db first to avoid illegal destroy in the process. */
++ gcmkONERROR(
++ gckKERNEL_RemoveProcessDB(Kernel,
++ processID,
++ gcvDB_SHBUF,
++ shBuf));
++
++ gcmkONERROR(gckKERNEL_DestroyShBuffer(Kernel, shBuf));
++ break;
++
++ case gcvSHBUF_MAP:
++ shBuf = gcmUINT64_TO_PTR(Interface->u.ShBuf.id);
++
++ /* Map for current process access. */
++ gcmkONERROR(gckKERNEL_MapShBuffer(Kernel, shBuf));
++
++ gcmkVERIFY_OK(
++ gckKERNEL_AddProcessDB(Kernel,
++ processID,
++ gcvDB_SHBUF,
++ shBuf,
++ gcvNULL,
++ 0));
++ break;
++
++ case gcvSHBUF_WRITE:
++ shBuf = gcmUINT64_TO_PTR(Interface->u.ShBuf.id);
++ uData = gcmUINT64_TO_PTR(Interface->u.ShBuf.data);
++ bytes = Interface->u.ShBuf.bytes;
++
++ /* Write. */
++ gcmkONERROR(
++ gckKERNEL_WriteShBuffer(Kernel, shBuf, uData, bytes));
++ break;
++
++ case gcvSHBUF_READ:
++ shBuf = gcmUINT64_TO_PTR(Interface->u.ShBuf.id);
++ uData = gcmUINT64_TO_PTR(Interface->u.ShBuf.data);
++ bytes = Interface->u.ShBuf.bytes;
++
++ /* Read. */
++ gcmkONERROR(
++ gckKERNEL_ReadShBuffer(Kernel,
++ shBuf,
++ uData,
++ bytes,
++ &bytes));
++
++ /* Return copied size. */
++ Interface->u.ShBuf.bytes = bytes;
++ break;
++
++ default:
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ break;
++ }
++ }
++ break;
++ default:
++ /* Invalid command. */
++ status = gcvSTATUS_INVALID_ARGUMENT;
++ }
++
++OnError:
++ /* Save status. */
++ kernelInterface->status = status;
++
++ gcmkFOOTER();
++
++ /* Return the status. */
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckKERNEL_QueryCommandBuffer
++**
++** Query command buffer attributes.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckVGHARDWARE object.
++**
++** OUTPUT:
++**
++** gcsCOMMAND_BUFFER_INFO_PTR Information
++** Pointer to the information structure to receive buffer attributes.
++*/
++gceSTATUS
++gckKERNEL_QueryCommandBuffer(
++ IN gckKERNEL Kernel,
++ OUT gcsCOMMAND_BUFFER_INFO_PTR Information
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Kernel=0x%x *Pool=0x%x",
++ Kernel, Information);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++
++ /* Get the information. */
++ status = gckVGCOMMAND_QueryCommandBuffer(Kernel->vg->command, Information);
++
++ gcmkFOOTER();
++ /* Return status. */
++ return status;
++}
++
++#endif /* gcdENABLE_VG */
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_vg.h linux-openelec/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_vg.h
+--- linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_vg.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_vg.h 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,85 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_kernel_vg_h_
++#define __gc_hal_kernel_vg_h_
++
++#include "gc_hal.h"
++#include "gc_hal_driver.h"
++#include "gc_hal_kernel_hardware.h"
++
++/******************************************************************************\
++********************************** Structures **********************************
++\******************************************************************************/
++
++/* gckKERNEL object. */
++struct _gckVGKERNEL
++{
++ /* Object. */
++ gcsOBJECT object;
++
++ /* Pointer to gckOS object. */
++ gckOS os;
++
++ /* Pointer to gckHARDWARE object. */
++ gckVGHARDWARE hardware;
++
++ /* Pointer to gckINTERRUPT object. */
++ gckVGINTERRUPT interrupt;
++
++ /* Pointer to gckCOMMAND object. */
++ gckVGCOMMAND command;
++
++ /* Pointer to context. */
++ gctPOINTER context;
++
++ /* Pointer to gckMMU object. */
++ gckVGMMU mmu;
++
++ gckKERNEL kernel;
++};
++
++/* gckMMU object. */
++struct _gckVGMMU
++{
++ /* The object. */
++ gcsOBJECT object;
++
++ /* Pointer to gckOS object. */
++ gckOS os;
++
++ /* Pointer to gckHARDWARE object. */
++ gckVGHARDWARE hardware;
++
++ /* The page table mutex. */
++ gctPOINTER mutex;
++
++ /* Page table information. */
++ gctSIZE_T pageTableSize;
++ gctPHYS_ADDR pageTablePhysical;
++ gctPOINTER pageTableLogical;
++
++ /* Allocation index. */
++ gctUINT32 entryCount;
++ gctUINT32 entry;
++};
++
++#endif /* __gc_hal_kernel_h_ */
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_video_memory.c linux-openelec/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_video_memory.c
+--- linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_video_memory.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_video_memory.c 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,2807 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal_kernel_precomp.h"
++
++#define _GC_OBJ_ZONE gcvZONE_VIDMEM
++
++/******************************************************************************\
++******************************* Private Functions ******************************
++\******************************************************************************/
++
++/*******************************************************************************
++**
++** _Split
++**
++** Split a node on the required byte boundary.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gcuVIDMEM_NODE_PTR Node
++** Pointer to the node to split.
++**
++** gctSIZE_T Bytes
++** Number of bytes to keep in the node.
++**
++** OUTPUT:
++**
++** Nothing.
++**
++** RETURNS:
++**
++** gctBOOL
++** gcvTRUE if the node was split successfully, or gcvFALSE if there is an
++** error.
++**
++*/
++static gctBOOL
++_Split(
++ IN gckOS Os,
++ IN gcuVIDMEM_NODE_PTR Node,
++ IN gctSIZE_T Bytes
++ )
++{
++ gcuVIDMEM_NODE_PTR node;
++ gctPOINTER pointer = gcvNULL;
++
++ /* Make sure the byte boundary makes sense. */
++ if ((Bytes <= 0) || (Bytes > Node->VidMem.bytes))
++ {
++ return gcvFALSE;
++ }
++
++ /* Allocate a new gcuVIDMEM_NODE object. */
++ if (gcmIS_ERROR(gckOS_Allocate(Os,
++ gcmSIZEOF(gcuVIDMEM_NODE),
++ &pointer)))
++ {
++ /* Error. */
++ return gcvFALSE;
++ }
++
++ node = pointer;
++
++ /* Initialize gcuVIDMEM_NODE structure. */
++ node->VidMem.offset = Node->VidMem.offset + Bytes;
++ node->VidMem.bytes = Node->VidMem.bytes - Bytes;
++ node->VidMem.alignment = 0;
++ node->VidMem.locked = 0;
++ node->VidMem.memory = Node->VidMem.memory;
++ node->VidMem.pool = Node->VidMem.pool;
++ node->VidMem.physical = Node->VidMem.physical;
++#ifdef __QNXNTO__
++ node->VidMem.processID = 0;
++ node->VidMem.logical = gcvNULL;
++#endif
++
++ /* Insert node behind specified node. */
++ node->VidMem.next = Node->VidMem.next;
++ node->VidMem.prev = Node;
++ Node->VidMem.next = node->VidMem.next->VidMem.prev = node;
++
++ /* Insert free node behind specified node. */
++ node->VidMem.nextFree = Node->VidMem.nextFree;
++ node->VidMem.prevFree = Node;
++ Node->VidMem.nextFree = node->VidMem.nextFree->VidMem.prevFree = node;
++
++ /* Adjust size of specified node. */
++ Node->VidMem.bytes = Bytes;
++
++ /* Success. */
++ return gcvTRUE;
++}
++
++/*******************************************************************************
++**
++** _Merge
++**
++** Merge two adjacent nodes together.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gcuVIDMEM_NODE_PTR Node
++** Pointer to the first of the two nodes to merge.
++**
++** OUTPUT:
++**
++** Nothing.
++**
++*/
++static gceSTATUS
++_Merge(
++ IN gckOS Os,
++ IN gcuVIDMEM_NODE_PTR Node
++ )
++{
++ gcuVIDMEM_NODE_PTR node;
++ gceSTATUS status;
++
++ /* Save pointer to next node. */
++ node = Node->VidMem.next;
++
++ /* This is a good time to make sure the heap is not corrupted. */
++ if (Node->VidMem.offset + Node->VidMem.bytes != node->VidMem.offset)
++ {
++ /* Corrupted heap. */
++ gcmkASSERT(
++ Node->VidMem.offset + Node->VidMem.bytes == node->VidMem.offset);
++ return gcvSTATUS_HEAP_CORRUPTED;
++ }
++
++ /* Adjust byte count. */
++ Node->VidMem.bytes += node->VidMem.bytes;
++
++ /* Unlink next node from linked list. */
++ Node->VidMem.next = node->VidMem.next;
++ Node->VidMem.nextFree = node->VidMem.nextFree;
++
++ Node->VidMem.next->VidMem.prev =
++ Node->VidMem.nextFree->VidMem.prevFree = Node;
++
++ /* Free next node. */
++ status = gcmkOS_SAFE_FREE(Os, node);
++ return status;
++}
++
++/******************************************************************************\
++******************************* gckVIDMEM API Code ******************************
++\******************************************************************************/
++
++/*******************************************************************************
++**
++** gckVIDMEM_ConstructVirtual
++**
++** Construct a new gcuVIDMEM_NODE union for virtual memory.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gctSIZE_T Bytes
++** Number of byte to allocate.
++**
++** OUTPUT:
++**
++** gcuVIDMEM_NODE_PTR * Node
++** Pointer to a variable that receives the gcuVIDMEM_NODE union pointer.
++*/
++gceSTATUS
++gckVIDMEM_ConstructVirtual(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 Flag,
++ IN gctSIZE_T Bytes,
++ OUT gcuVIDMEM_NODE_PTR * Node
++ )
++{
++ gckOS os;
++ gceSTATUS status;
++ gcuVIDMEM_NODE_PTR node = gcvNULL;
++ gctPOINTER pointer = gcvNULL;
++ gctINT i;
++
++ gcmkHEADER_ARG("Kernel=0x%x Flag=%x Bytes=%lu", Kernel, Flag, Bytes);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++ gcmkVERIFY_ARGUMENT(Node != gcvNULL);
++
++ /* Extract the gckOS object pointer. */
++ os = Kernel->os;
++ gcmkVERIFY_OBJECT(os, gcvOBJ_OS);
++
++ /* Allocate an gcuVIDMEM_NODE union. */
++ gcmkONERROR(gckOS_Allocate(os, gcmSIZEOF(gcuVIDMEM_NODE), &pointer));
++
++ node = pointer;
++
++ /* Initialize gcuVIDMEM_NODE union for virtual memory. */
++ node->Virtual.kernel = Kernel;
++ node->Virtual.contiguous = Flag & gcvALLOC_FLAG_CONTIGUOUS;
++ node->Virtual.logical = gcvNULL;
++#if gcdENABLE_VG
++ node->Virtual.kernelVirtual = gcvNULL;
++#endif
++
++ for (i = 0; i < gcdMAX_GPU_COUNT; i++)
++ {
++ node->Virtual.lockeds[i] = 0;
++ node->Virtual.pageTables[i] = gcvNULL;
++ node->Virtual.lockKernels[i] = gcvNULL;
++ }
++
++ gcmkONERROR(gckOS_GetProcessID(&node->Virtual.processID));
++
++ /* Allocate the virtual memory. */
++ gcmkONERROR(
++ gckOS_AllocatePagedMemoryEx(os,
++ Flag,
++ node->Virtual.bytes = Bytes,
++ &node->Virtual.gid,
++ &node->Virtual.physical));
++
++ /* Return pointer to the gcuVIDMEM_NODE union. */
++ *Node = node;
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
++ "Created virtual node 0x%x for %u bytes @ 0x%x",
++ node, Bytes, node->Virtual.physical);
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Node=0x%x", *Node);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Roll back. */
++ if (node != gcvNULL)
++ {
++ /* Free the structure. */
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(os, node));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckVIDMEM_DestroyVirtual
++**
++** Destroy an gcuVIDMEM_NODE union for virtual memory.
++**
++** INPUT:
++**
++** gcuVIDMEM_NODE_PTR Node
++** Pointer to a gcuVIDMEM_NODE union.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckVIDMEM_DestroyVirtual(
++ IN gcuVIDMEM_NODE_PTR Node
++ )
++{
++ gckOS os;
++
++ gcmkHEADER_ARG("Node=0x%x", Node);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Node->Virtual.kernel, gcvOBJ_KERNEL);
++
++ /* Extact the gckOS object pointer. */
++ os = Node->Virtual.kernel->os;
++ gcmkVERIFY_OBJECT(os, gcvOBJ_OS);
++
++ /* Delete the gcuVIDMEM_NODE union. */
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(os, Node));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckVIDMEM_Construct
++**
++** Construct a new gckVIDMEM object.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctUINT32 BaseAddress
++** Base address for the video memory heap.
++**
++** gctSIZE_T Bytes
++** Number of bytes in the video memory heap.
++**
++** gctSIZE_T Threshold
++** Minimum number of bytes beyond am allocation before the node is
++** split. Can be used as a minimum alignment requirement.
++**
++** gctSIZE_T BankSize
++** Number of bytes per physical memory bank. Used by bank
++** optimization.
++**
++** OUTPUT:
++**
++** gckVIDMEM * Memory
++** Pointer to a variable that will hold the pointer to the gckVIDMEM
++** object.
++*/
++gceSTATUS
++gckVIDMEM_Construct(
++ IN gckOS Os,
++ IN gctUINT32 BaseAddress,
++ IN gctSIZE_T Bytes,
++ IN gctSIZE_T Threshold,
++ IN gctSIZE_T BankSize,
++ OUT gckVIDMEM * Memory
++ )
++{
++ gckVIDMEM memory = gcvNULL;
++ gceSTATUS status;
++ gcuVIDMEM_NODE_PTR node;
++ gctINT i, banks = 0;
++ gctPOINTER pointer = gcvNULL;
++ gctUINT32 heapBytes;
++ gctUINT32 bankSize;
++
++ gcmkHEADER_ARG("Os=0x%x BaseAddress=%08x Bytes=%lu Threshold=%lu "
++ "BankSize=%lu",
++ Os, BaseAddress, Bytes, Threshold, BankSize);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++ gcmkVERIFY_ARGUMENT(Memory != gcvNULL);
++
++ gcmkSAFECASTSIZET(heapBytes, Bytes);
++ gcmkSAFECASTSIZET(bankSize, BankSize);
++
++ /* Allocate the gckVIDMEM object. */
++ gcmkONERROR(gckOS_Allocate(Os, gcmSIZEOF(struct _gckVIDMEM), &pointer));
++
++ memory = pointer;
++
++ /* Initialize the gckVIDMEM object. */
++ memory->object.type = gcvOBJ_VIDMEM;
++ memory->os = Os;
++
++ /* Set video memory heap information. */
++ memory->baseAddress = BaseAddress;
++ memory->bytes = heapBytes;
++ memory->freeBytes = heapBytes;
++ memory->threshold = Threshold;
++ memory->mutex = gcvNULL;
++
++ BaseAddress = 0;
++
++ /* Walk all possible banks. */
++ for (i = 0; i < gcmCOUNTOF(memory->sentinel); ++i)
++ {
++ gctUINT32 bytes;
++
++ if (BankSize == 0)
++ {
++ /* Use all bytes for the first bank. */
++ bytes = heapBytes;
++ }
++ else
++ {
++ /* Compute number of bytes for this bank. */
++ bytes = gcmALIGN(BaseAddress + 1, bankSize) - BaseAddress;
++
++ if (bytes > heapBytes)
++ {
++ /* Make sure we don't exceed the total number of bytes. */
++ bytes = heapBytes;
++ }
++ }
++
++ if (bytes == 0)
++ {
++ /* Mark heap is not used. */
++ memory->sentinel[i].VidMem.next =
++ memory->sentinel[i].VidMem.prev =
++ memory->sentinel[i].VidMem.nextFree =
++ memory->sentinel[i].VidMem.prevFree = gcvNULL;
++ continue;
++ }
++
++ /* Allocate one gcuVIDMEM_NODE union. */
++ gcmkONERROR(gckOS_Allocate(Os, gcmSIZEOF(gcuVIDMEM_NODE), &pointer));
++
++ node = pointer;
++
++ /* Initialize gcuVIDMEM_NODE union. */
++ node->VidMem.memory = memory;
++
++ node->VidMem.next =
++ node->VidMem.prev =
++ node->VidMem.nextFree =
++ node->VidMem.prevFree = &memory->sentinel[i];
++
++ node->VidMem.offset = BaseAddress;
++ node->VidMem.bytes = bytes;
++ node->VidMem.alignment = 0;
++ node->VidMem.physical = 0;
++ node->VidMem.pool = gcvPOOL_UNKNOWN;
++
++ node->VidMem.locked = 0;
++
++#ifdef __QNXNTO__
++ node->VidMem.processID = 0;
++ node->VidMem.logical = gcvNULL;
++#endif
++
++#if gcdENABLE_VG
++ node->VidMem.kernelVirtual = gcvNULL;
++#endif
++
++ /* Initialize the linked list of nodes. */
++ memory->sentinel[i].VidMem.next =
++ memory->sentinel[i].VidMem.prev =
++ memory->sentinel[i].VidMem.nextFree =
++ memory->sentinel[i].VidMem.prevFree = node;
++
++ /* Mark sentinel. */
++ memory->sentinel[i].VidMem.bytes = 0;
++
++ /* Adjust address for next bank. */
++ BaseAddress += bytes;
++ heapBytes -= bytes;
++ banks ++;
++ }
++
++ /* Assign all the bank mappings. */
++ memory->mapping[gcvSURF_RENDER_TARGET] = banks - 1;
++ memory->mapping[gcvSURF_BITMAP] = banks - 1;
++ if (banks > 1) --banks;
++ memory->mapping[gcvSURF_DEPTH] = banks - 1;
++ memory->mapping[gcvSURF_HIERARCHICAL_DEPTH] = banks - 1;
++ if (banks > 1) --banks;
++ memory->mapping[gcvSURF_TEXTURE] = banks - 1;
++ if (banks > 1) --banks;
++ memory->mapping[gcvSURF_VERTEX] = banks - 1;
++ if (banks > 1) --banks;
++ memory->mapping[gcvSURF_INDEX] = banks - 1;
++ if (banks > 1) --banks;
++ memory->mapping[gcvSURF_TILE_STATUS] = banks - 1;
++ if (banks > 1) --banks;
++ memory->mapping[gcvSURF_TYPE_UNKNOWN] = 0;
++
++#if gcdENABLE_VG
++ memory->mapping[gcvSURF_IMAGE] = 0;
++ memory->mapping[gcvSURF_MASK] = 0;
++ memory->mapping[gcvSURF_SCISSOR] = 0;
++#endif
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
++ "[GALCORE] INDEX: bank %d",
++ memory->mapping[gcvSURF_INDEX]);
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
++ "[GALCORE] VERTEX: bank %d",
++ memory->mapping[gcvSURF_VERTEX]);
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
++ "[GALCORE] TEXTURE: bank %d",
++ memory->mapping[gcvSURF_TEXTURE]);
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
++ "[GALCORE] RENDER_TARGET: bank %d",
++ memory->mapping[gcvSURF_RENDER_TARGET]);
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
++ "[GALCORE] DEPTH: bank %d",
++ memory->mapping[gcvSURF_DEPTH]);
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
++ "[GALCORE] TILE_STATUS: bank %d",
++ memory->mapping[gcvSURF_TILE_STATUS]);
++
++ /* Allocate the mutex. */
++ gcmkONERROR(gckOS_CreateMutex(Os, &memory->mutex));
++
++ /* Return pointer to the gckVIDMEM object. */
++ *Memory = memory;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Memory=0x%x", *Memory);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Roll back. */
++ if (memory != gcvNULL)
++ {
++ if (memory->mutex != gcvNULL)
++ {
++ /* Delete the mutex. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Os, memory->mutex));
++ }
++
++ for (i = 0; i < banks; ++i)
++ {
++ /* Free the heap. */
++ gcmkASSERT(memory->sentinel[i].VidMem.next != gcvNULL);
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Os, memory->sentinel[i].VidMem.next));
++ }
++
++ /* Free the object. */
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Os, memory));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckVIDMEM_Destroy
++**
++** Destroy an gckVIDMEM object.
++**
++** INPUT:
++**
++** gckVIDMEM Memory
++** Pointer to an gckVIDMEM object to destroy.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckVIDMEM_Destroy(
++ IN gckVIDMEM Memory
++ )
++{
++ gcuVIDMEM_NODE_PTR node, next;
++ gctINT i;
++
++ gcmkHEADER_ARG("Memory=0x%x", Memory);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Memory, gcvOBJ_VIDMEM);
++
++ /* Walk all sentinels. */
++ for (i = 0; i < gcmCOUNTOF(Memory->sentinel); ++i)
++ {
++ /* Bail out of the heap is not used. */
++ if (Memory->sentinel[i].VidMem.next == gcvNULL)
++ {
++ break;
++ }
++
++ /* Walk all the nodes until we reach the sentinel. */
++ for (node = Memory->sentinel[i].VidMem.next;
++ node->VidMem.bytes != 0;
++ node = next)
++ {
++ /* Save pointer to the next node. */
++ next = node->VidMem.next;
++
++ /* Free the node. */
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Memory->os, node));
++ }
++ }
++
++ /* Free the mutex. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Memory->os, Memory->mutex));
++
++ /* Mark the object as unknown. */
++ Memory->object.type = gcvOBJ_UNKNOWN;
++
++ /* Free the gckVIDMEM object. */
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Memory->os, Memory));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++#if gcdENABLE_BANK_ALIGNMENT
++
++#if !gcdBANK_BIT_START
++#error gcdBANK_BIT_START not defined.
++#endif
++
++#if !gcdBANK_BIT_END
++#error gcdBANK_BIT_END not defined.
++#endif
++/*******************************************************************************
++** _GetSurfaceBankAlignment
++**
++** Return the required offset alignment required to the make BaseAddress
++** aligned properly.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to gcoOS object.
++**
++** gceSURF_TYPE Type
++** Type of allocation.
++**
++** gctUINT32 BaseAddress
++** Base address of current video memory node.
++**
++** OUTPUT:
++**
++** gctUINT32_PTR AlignmentOffset
++** Pointer to a variable that will hold the number of bytes to skip in
++** the current video memory node in order to make the alignment bank
++** aligned.
++*/
++static gceSTATUS
++_GetSurfaceBankAlignment(
++ IN gckKERNEL Kernel,
++ IN gceSURF_TYPE Type,
++ IN gctUINT32 BaseAddress,
++ OUT gctUINT32_PTR AlignmentOffset
++ )
++{
++ gctUINT32 bank;
++ /* To retrieve the bank. */
++ static const gctUINT32 bankMask = (0xFFFFFFFF << gcdBANK_BIT_START)
++ ^ (0xFFFFFFFF << (gcdBANK_BIT_END + 1));
++
++ /* To retrieve the bank and all the lower bytes. */
++ static const gctUINT32 byteMask = ~(0xFFFFFFFF << (gcdBANK_BIT_END + 1));
++
++ gcmkHEADER_ARG("Type=%d BaseAddress=0x%x ", Type, BaseAddress);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_ARGUMENT(AlignmentOffset != gcvNULL);
++
++ switch (Type)
++ {
++ case gcvSURF_RENDER_TARGET:
++ bank = (BaseAddress & bankMask) >> (gcdBANK_BIT_START);
++
++ /* Align to the first bank. */
++ *AlignmentOffset = (bank == 0) ?
++ 0 :
++ ((1 << (gcdBANK_BIT_END + 1)) + 0) - (BaseAddress & byteMask);
++ break;
++
++ case gcvSURF_DEPTH:
++ bank = (BaseAddress & bankMask) >> (gcdBANK_BIT_START);
++
++ /* Align to the third bank. */
++ *AlignmentOffset = (bank == 2) ?
++ 0 :
++ ((1 << (gcdBANK_BIT_END + 1)) + (2 << gcdBANK_BIT_START)) - (BaseAddress & byteMask);
++
++ /* Minimum 256 byte alignment needed for fast_msaa. */
++ if ((gcdBANK_CHANNEL_BIT > 7) ||
++ ((gckHARDWARE_IsFeatureAvailable(Kernel->hardware, gcvFEATURE_FAST_MSAA) != gcvSTATUS_TRUE) &&
++ (gckHARDWARE_IsFeatureAvailable(Kernel->hardware, gcvFEATURE_SMALL_MSAA) != gcvSTATUS_TRUE)))
++ {
++ /* Add a channel offset at the channel bit. */
++ *AlignmentOffset += (1 << gcdBANK_CHANNEL_BIT);
++ }
++ break;
++
++ default:
++ /* no alignment needed. */
++ *AlignmentOffset = 0;
++ }
++
++ /* Return the status. */
++ gcmkFOOTER_ARG("*AlignmentOffset=%u", *AlignmentOffset);
++ return gcvSTATUS_OK;
++}
++#endif
++
++static gcuVIDMEM_NODE_PTR
++_FindNode(
++ IN gckKERNEL Kernel,
++ IN gckVIDMEM Memory,
++ IN gctINT Bank,
++ IN gctSIZE_T Bytes,
++ IN gceSURF_TYPE Type,
++ IN OUT gctUINT32_PTR Alignment
++ )
++{
++ gcuVIDMEM_NODE_PTR node;
++ gctUINT32 alignment;
++
++#if gcdENABLE_BANK_ALIGNMENT
++ gctUINT32 bankAlignment;
++ gceSTATUS status;
++#endif
++
++ if (Memory->sentinel[Bank].VidMem.nextFree == gcvNULL)
++ {
++ /* No free nodes left. */
++ return gcvNULL;
++ }
++
++#if gcdENABLE_BANK_ALIGNMENT
++ /* Walk all free nodes until we have one that is big enough or we have
++ ** reached the sentinel. */
++ for (node = Memory->sentinel[Bank].VidMem.nextFree;
++ node->VidMem.bytes != 0;
++ node = node->VidMem.nextFree)
++ {
++ if (node->VidMem.bytes < Bytes)
++ {
++ continue;
++ }
++
++ gcmkONERROR(_GetSurfaceBankAlignment(
++ Kernel,
++ Type,
++ node->VidMem.memory->baseAddress + node->VidMem.offset,
++ &bankAlignment));
++
++ bankAlignment = gcmALIGN(bankAlignment, *Alignment);
++
++ /* Compute number of bytes to skip for alignment. */
++ alignment = (*Alignment == 0)
++ ? 0
++ : (*Alignment - (node->VidMem.offset % *Alignment));
++
++ if (alignment == *Alignment)
++ {
++ /* Node is already aligned. */
++ alignment = 0;
++ }
++
++ if (node->VidMem.bytes >= Bytes + alignment + bankAlignment)
++ {
++ /* This node is big enough. */
++ *Alignment = alignment + bankAlignment;
++ return node;
++ }
++ }
++#endif
++
++ /* Walk all free nodes until we have one that is big enough or we have
++ reached the sentinel. */
++ for (node = Memory->sentinel[Bank].VidMem.nextFree;
++ node->VidMem.bytes != 0;
++ node = node->VidMem.nextFree)
++ {
++ gctUINT offset;
++
++ gctINT modulo;
++
++ gcmkSAFECASTSIZET(offset, node->VidMem.offset);
++
++ modulo = gckMATH_ModuloInt(offset, *Alignment);
++
++ /* Compute number of bytes to skip for alignment. */
++ alignment = (*Alignment == 0) ? 0 : (*Alignment - modulo);
++
++ if (alignment == *Alignment)
++ {
++ /* Node is already aligned. */
++ alignment = 0;
++ }
++
++ if (node->VidMem.bytes >= Bytes + alignment)
++ {
++ /* This node is big enough. */
++ *Alignment = alignment;
++ return node;
++ }
++ }
++
++#if gcdENABLE_BANK_ALIGNMENT
++OnError:
++#endif
++ /* Not enough memory. */
++ return gcvNULL;
++}
++
++/*******************************************************************************
++**
++** gckVIDMEM_AllocateLinear
++**
++** Allocate linear memory from the gckVIDMEM object.
++**
++** INPUT:
++**
++** gckVIDMEM Memory
++** Pointer to an gckVIDMEM object.
++**
++** gctSIZE_T Bytes
++** Number of bytes to allocate.
++**
++** gctUINT32 Alignment
++** Byte alignment for allocation.
++**
++** gceSURF_TYPE Type
++** Type of surface to allocate (use by bank optimization).
++**
++** gctBOOL Specified
++** If user must use this pool, it should set Specified to gcvTRUE,
++** otherwise allocator may reserve some memory for other usage, such
++** as small block size allocation request.
++**
++** OUTPUT:
++**
++** gcuVIDMEM_NODE_PTR * Node
++** Pointer to a variable that will hold the allocated memory node.
++*/
++gceSTATUS
++gckVIDMEM_AllocateLinear(
++ IN gckKERNEL Kernel,
++ IN gckVIDMEM Memory,
++ IN gctSIZE_T Bytes,
++ IN gctUINT32 Alignment,
++ IN gceSURF_TYPE Type,
++ IN gctBOOL Specified,
++ OUT gcuVIDMEM_NODE_PTR * Node
++ )
++{
++ gceSTATUS status;
++ gcuVIDMEM_NODE_PTR node;
++ gctUINT32 alignment;
++ gctINT bank, i;
++ gctBOOL acquired = gcvFALSE;
++
++ gcmkHEADER_ARG("Memory=0x%x Bytes=%lu Alignment=%u Type=%d",
++ Memory, Bytes, Alignment, Type);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Memory, gcvOBJ_VIDMEM);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++ gcmkVERIFY_ARGUMENT(Node != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Type < gcvSURF_NUM_TYPES);
++
++ /* Acquire the mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(Memory->os, Memory->mutex, gcvINFINITE));
++
++ acquired = gcvTRUE;
++
++ if (Bytes > Memory->freeBytes)
++ {
++ /* Not enough memory. */
++ status = gcvSTATUS_OUT_OF_MEMORY;
++ goto OnError;
++ }
++
++#if gcdSMALL_BLOCK_SIZE
++ if ((Memory->freeBytes < (Memory->bytes/gcdRATIO_FOR_SMALL_MEMORY))
++ && (Bytes >= gcdSMALL_BLOCK_SIZE)
++ && (Specified == gcvFALSE)
++ )
++ {
++ /* The left memory is for small memory.*/
++ status = gcvSTATUS_OUT_OF_MEMORY;
++ goto OnError;
++ }
++#endif
++
++ /* Find the default bank for this surface type. */
++ gcmkASSERT((gctINT) Type < gcmCOUNTOF(Memory->mapping));
++ bank = Memory->mapping[Type];
++ alignment = Alignment;
++
++ /* Find a free node in the default bank. */
++ node = _FindNode(Kernel, Memory, bank, Bytes, Type, &alignment);
++
++ /* Out of memory? */
++ if (node == gcvNULL)
++ {
++ /* Walk all lower banks. */
++ for (i = bank - 1; i >= 0; --i)
++ {
++ /* Find a free node inside the current bank. */
++ node = _FindNode(Kernel, Memory, i, Bytes, Type, &alignment);
++ if (node != gcvNULL)
++ {
++ break;
++ }
++ }
++ }
++
++ if (node == gcvNULL)
++ {
++ /* Walk all upper banks. */
++ for (i = bank + 1; i < gcmCOUNTOF(Memory->sentinel); ++i)
++ {
++ if (Memory->sentinel[i].VidMem.nextFree == gcvNULL)
++ {
++ /* Abort when we reach unused banks. */
++ break;
++ }
++
++ /* Find a free node inside the current bank. */
++ node = _FindNode(Kernel, Memory, i, Bytes, Type, &alignment);
++ if (node != gcvNULL)
++ {
++ break;
++ }
++ }
++ }
++
++ if (node == gcvNULL)
++ {
++ /* Out of memory. */
++ status = gcvSTATUS_OUT_OF_MEMORY;
++ goto OnError;
++ }
++
++ /* Do we have an alignment? */
++ if (alignment > 0)
++ {
++ /* Split the node so it is aligned. */
++ if (_Split(Memory->os, node, alignment))
++ {
++ /* Successful split, move to aligned node. */
++ node = node->VidMem.next;
++
++ /* Remove alignment. */
++ alignment = 0;
++ }
++ }
++
++ /* Do we have enough memory after the allocation to split it? */
++ if (node->VidMem.bytes - Bytes > Memory->threshold)
++ {
++ /* Adjust the node size. */
++ _Split(Memory->os, node, Bytes);
++ }
++
++ /* Remove the node from the free list. */
++ node->VidMem.prevFree->VidMem.nextFree = node->VidMem.nextFree;
++ node->VidMem.nextFree->VidMem.prevFree = node->VidMem.prevFree;
++ node->VidMem.nextFree =
++ node->VidMem.prevFree = gcvNULL;
++
++ /* Fill in the information. */
++ node->VidMem.alignment = alignment;
++ node->VidMem.memory = Memory;
++#ifdef __QNXNTO__
++ node->VidMem.logical = gcvNULL;
++ gcmkONERROR(gckOS_GetProcessID(&node->VidMem.processID));
++#endif
++
++ /* Adjust the number of free bytes. */
++ Memory->freeBytes -= node->VidMem.bytes;
++
++#if gcdENABLE_VG
++ node->VidMem.kernelVirtual = gcvNULL;
++#endif
++
++ /* Release the mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Memory->os, Memory->mutex));
++
++ /* Return the pointer to the node. */
++ *Node = node;
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
++ "Allocated %u bytes @ 0x%x [0x%08X]",
++ node->VidMem.bytes, node, node->VidMem.offset);
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Node=0x%x", *Node);
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ /* Release the mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Memory->os, Memory->mutex));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckVIDMEM_Free
++**
++** Free an allocated video memory node.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gcuVIDMEM_NODE_PTR Node
++** Pointer to a gcuVIDMEM_NODE object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckVIDMEM_Free(
++ IN gckKERNEL Kernel,
++ IN gcuVIDMEM_NODE_PTR Node
++ )
++{
++ gceSTATUS status;
++ gckKERNEL kernel = gcvNULL;
++ gckVIDMEM memory = gcvNULL;
++ gcuVIDMEM_NODE_PTR node;
++ gctBOOL mutexAcquired = gcvFALSE;
++
++ gcmkHEADER_ARG("Node=0x%x", Node);
++
++ /* Verify the arguments. */
++ if ((Node == gcvNULL)
++ || (Node->VidMem.memory == gcvNULL)
++ )
++ {
++ /* Invalid object. */
++ gcmkONERROR(gcvSTATUS_INVALID_OBJECT);
++ }
++
++ /**************************** Video Memory ********************************/
++
++ if (Node->VidMem.memory->object.type == gcvOBJ_VIDMEM)
++ {
++ /* Extract pointer to gckVIDMEM object owning the node. */
++ memory = Node->VidMem.memory;
++
++ /* Acquire the mutex. */
++ gcmkONERROR(
++ gckOS_AcquireMutex(memory->os, memory->mutex, gcvINFINITE));
++
++ mutexAcquired = gcvTRUE;
++
++#ifdef __QNXNTO__
++ /* Unmap the video memory. */
++ if (Node->VidMem.logical != gcvNULL)
++ {
++ gckKERNEL_UnmapVideoMemory(
++ Kernel,
++ Node->VidMem.logical,
++ Node->VidMem.processID,
++ Node->VidMem.bytes);
++ Node->VidMem.logical = gcvNULL;
++ }
++
++ /* Reset. */
++ Node->VidMem.processID = 0;
++
++ /* Don't try to re-free an already freed node. */
++ if ((Node->VidMem.nextFree == gcvNULL)
++ && (Node->VidMem.prevFree == gcvNULL)
++ )
++#endif
++ {
++#if gcdENABLE_VG
++ if (Node->VidMem.kernelVirtual)
++ {
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
++ "%s(%d) Unmap %x from kernel space.",
++ __FUNCTION__, __LINE__,
++ Node->VidMem.kernelVirtual);
++
++ gcmkVERIFY_OK(
++ gckOS_UnmapPhysical(memory->os,
++ Node->VidMem.kernelVirtual,
++ Node->VidMem.bytes));
++
++ Node->VidMem.kernelVirtual = gcvNULL;
++ }
++#endif
++
++ /* Check if Node is already freed. */
++ if (Node->VidMem.nextFree)
++ {
++ /* Node is alread freed. */
++ gcmkONERROR(gcvSTATUS_INVALID_DATA);
++ }
++
++ /* Update the number of free bytes. */
++ memory->freeBytes += Node->VidMem.bytes;
++
++ /* Find the next free node. */
++ for (node = Node->VidMem.next;
++ node != gcvNULL && node->VidMem.nextFree == gcvNULL;
++ node = node->VidMem.next) ;
++
++ /* Insert this node in the free list. */
++ Node->VidMem.nextFree = node;
++ Node->VidMem.prevFree = node->VidMem.prevFree;
++
++ Node->VidMem.prevFree->VidMem.nextFree =
++ node->VidMem.prevFree = Node;
++
++ /* Is the next node a free node and not the sentinel? */
++ if ((Node->VidMem.next == Node->VidMem.nextFree)
++ && (Node->VidMem.next->VidMem.bytes != 0)
++ )
++ {
++ /* Merge this node with the next node. */
++ gcmkONERROR(_Merge(memory->os, node = Node));
++ gcmkASSERT(node->VidMem.nextFree != node);
++ gcmkASSERT(node->VidMem.prevFree != node);
++ }
++
++ /* Is the previous node a free node and not the sentinel? */
++ if ((Node->VidMem.prev == Node->VidMem.prevFree)
++ && (Node->VidMem.prev->VidMem.bytes != 0)
++ )
++ {
++ /* Merge this node with the previous node. */
++ gcmkONERROR(_Merge(memory->os, node = Node->VidMem.prev));
++ gcmkASSERT(node->VidMem.nextFree != node);
++ gcmkASSERT(node->VidMem.prevFree != node);
++ }
++ }
++
++ /* Release the mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(memory->os, memory->mutex));
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
++ "Node 0x%x is freed.",
++ Node);
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++
++ /*************************** Virtual Memory *******************************/
++
++ /* Get gckKERNEL object. */
++ kernel = Node->Virtual.kernel;
++
++ /* Verify the gckKERNEL object pointer. */
++ gcmkVERIFY_OBJECT(kernel, gcvOBJ_KERNEL);
++
++#if gcdENABLE_VG
++ if (Node->Virtual.kernelVirtual)
++ {
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
++ "%s(%d) Unmap %x from kernel space.",
++ __FUNCTION__, __LINE__,
++ Node->Virtual.kernelVirtual);
++
++ gcmkVERIFY_OK(
++ gckOS_UnmapPhysical(kernel->os,
++ Node->Virtual.kernelVirtual,
++ Node->Virtual.bytes));
++
++ Node->Virtual.kernelVirtual = gcvNULL;
++ }
++#endif
++
++ /* Free the virtual memory. */
++ gcmkVERIFY_OK(gckOS_FreePagedMemory(kernel->os,
++ Node->Virtual.physical,
++ Node->Virtual.bytes));
++
++ /* Destroy the gcuVIDMEM_NODE union. */
++ gcmkVERIFY_OK(gckVIDMEM_DestroyVirtual(Node));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (mutexAcquired)
++ {
++ /* Release the mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(
++ memory->os, memory->mutex
++ ));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++#if !gcdPROCESS_ADDRESS_SPACE
++/*******************************************************************************
++**
++** _NeedVirtualMapping
++**
++** Whether setup GPU page table for video node.
++**
++** INPUT:
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gcuVIDMEM_NODE_PTR Node
++** Pointer to a gcuVIDMEM_NODE union.
++**
++** gceCORE Core
++** Id of current GPU.
++**
++** OUTPUT:
++** gctBOOL * NeedMapping
++** A pointer hold the result whether Node should be mapping.
++*/
++static gceSTATUS
++_NeedVirtualMapping(
++ IN gckKERNEL Kernel,
++ IN gceCORE Core,
++ IN gcuVIDMEM_NODE_PTR Node,
++ OUT gctBOOL * NeedMapping
++)
++{
++ gceSTATUS status;
++ gctUINT32 phys;
++ gctUINT32 end;
++ gcePOOL pool;
++ gctUINT32 offset;
++ gctUINT32 baseAddress;
++ gctUINT32 bytes;
++
++ gcmkHEADER_ARG("Node=0x%X", Node);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_ARGUMENT(Kernel != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Node != gcvNULL);
++ gcmkVERIFY_ARGUMENT(NeedMapping != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Core < gcdMAX_GPU_COUNT);
++
++ if (Node->Virtual.contiguous)
++ {
++#if gcdENABLE_VG
++ if (Core == gcvCORE_VG)
++ {
++ *NeedMapping = gcvFALSE;
++ }
++ else
++#endif
++ {
++ /* Convert logical address into a physical address. */
++ gcmkONERROR(gckOS_UserLogicalToPhysical(
++ Kernel->os, Node->Virtual.logical, &phys
++ ));
++
++ gcmkONERROR(gckOS_GetBaseAddress(Kernel->os, &baseAddress));
++
++ gcmkASSERT(phys >= baseAddress);
++
++ /* Subtract baseAddress to get a GPU address used for programming. */
++ phys -= baseAddress;
++
++ /* If part of region is belong to gcvPOOL_VIRTUAL,
++ ** whole region has to be mapped. */
++ gcmkSAFECASTSIZET(bytes, Node->Virtual.bytes);
++ end = phys + bytes - 1;
++
++ gcmkONERROR(gckHARDWARE_SplitMemory(
++ Kernel->hardware, end, &pool, &offset
++ ));
++
++ *NeedMapping = (pool == gcvPOOL_VIRTUAL);
++ }
++ }
++ else
++ {
++ *NeedMapping = gcvTRUE;
++ }
++
++ gcmkFOOTER_ARG("*NeedMapping=%d", *NeedMapping);
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++#endif
++
++#if gcdPROCESS_ADDRESS_SPACE
++gcsGPU_MAP_PTR
++_FindGPUMap(
++ IN gcsGPU_MAP_PTR Head,
++ IN gctINT ProcessID
++ )
++{
++ gcsGPU_MAP_PTR map = Head;
++
++ while (map)
++ {
++ if (map->pid == ProcessID)
++ {
++ return map;
++ }
++
++ map = map->next;
++ }
++
++ return gcvNULL;
++}
++
++gcsGPU_MAP_PTR
++_CreateGPUMap(
++ IN gckOS Os,
++ IN gcsGPU_MAP_PTR *Head,
++ IN gcsGPU_MAP_PTR *Tail,
++ IN gctINT ProcessID
++ )
++{
++ gcsGPU_MAP_PTR gpuMap;
++ gctPOINTER pointer = gcvNULL;
++
++ gckOS_Allocate(Os, sizeof(gcsGPU_MAP), &pointer);
++
++ if (pointer == gcvNULL)
++ {
++ return gcvNULL;
++ }
++
++ gpuMap = pointer;
++
++ gckOS_ZeroMemory(pointer, sizeof(gcsGPU_MAP));
++
++ gpuMap->pid = ProcessID;
++
++ if (!*Head)
++ {
++ *Head = *Tail = gpuMap;
++ }
++ else
++ {
++ gpuMap->prev = *Tail;
++ (*Tail)->next = gpuMap;
++ *Tail = gpuMap;
++ }
++
++ return gpuMap;
++}
++
++void
++_DestroyGPUMap(
++ IN gckOS Os,
++ IN gcsGPU_MAP_PTR *Head,
++ IN gcsGPU_MAP_PTR *Tail,
++ IN gcsGPU_MAP_PTR gpuMap
++ )
++{
++
++ if (gpuMap == *Head)
++ {
++ if ((*Head = gpuMap->next) == gcvNULL)
++ {
++ *Tail = gcvNULL;
++ }
++ }
++ else
++ {
++ gpuMap->prev->next = gpuMap->next;
++ if (gpuMap == *Tail)
++ {
++ *Tail = gpuMap->prev;
++ }
++ else
++ {
++ gpuMap->next->prev = gpuMap->prev;
++ }
++ }
++
++ gcmkOS_SAFE_FREE(Os, gpuMap);
++}
++#endif
++
++/*******************************************************************************
++**
++** gckVIDMEM_Lock
++**
++** Lock a video memory node and return its hardware specific address.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gcuVIDMEM_NODE_PTR Node
++** Pointer to a gcuVIDMEM_NODE union.
++**
++** OUTPUT:
++**
++** gctUINT32 * Address
++** Pointer to a variable that will hold the hardware specific address.
++**
++** gctUINT32 * PhysicalAddress
++** Pointer to a variable that will hold the bus address of a contiguous
++** video node.
++*/
++gceSTATUS
++gckVIDMEM_Lock(
++ IN gckKERNEL Kernel,
++ IN gckVIDMEM_NODE Node,
++ IN gctBOOL Cacheable,
++ OUT gctUINT32 * Address,
++ OUT gctUINT32 * Gid,
++ OUT gctUINT64 * PhysicalAddress
++ )
++{
++ gceSTATUS status;
++ gctBOOL acquired = gcvFALSE;
++ gctBOOL locked = gcvFALSE;
++ gckOS os = gcvNULL;
++#if !gcdPROCESS_ADDRESS_SPACE
++ gctBOOL needMapping = gcvFALSE;
++#endif
++ gctUINT32 baseAddress;
++ gctUINT32 physicalAddress;
++ gcuVIDMEM_NODE_PTR node = Node->node;
++
++ gcmkHEADER_ARG("Node=0x%x", Node);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_ARGUMENT(Address != gcvNULL);
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++
++ /* Extract the gckOS object pointer. */
++ os = Kernel->os;
++ gcmkVERIFY_OBJECT(os, gcvOBJ_OS);
++
++ if ((node == gcvNULL)
++ || (node->VidMem.memory == gcvNULL)
++ )
++ {
++ /* Invalid object. */
++ gcmkONERROR(gcvSTATUS_INVALID_OBJECT);
++ }
++
++ /* Grab the mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(os, Node->mutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++ /**************************** Video Memory ********************************/
++
++ if (node->VidMem.memory->object.type == gcvOBJ_VIDMEM)
++ {
++ gctUINT32 offset;
++
++ if (Cacheable == gcvTRUE)
++ {
++ gcmkONERROR(gcvSTATUS_INVALID_REQUEST);
++ }
++
++ /* Increment the lock count. */
++ node->VidMem.locked ++;
++
++ /* Return the physical address of the node. */
++ gcmkSAFECASTSIZET(offset, node->VidMem.offset);
++
++ *Address = node->VidMem.memory->baseAddress
++ + offset
++ + node->VidMem.alignment;
++
++ physicalAddress = *Address;
++
++ /* Get hardware specific address. */
++#if gcdENABLE_VG
++ if (Kernel->vg == gcvNULL)
++#endif
++ {
++ if (Kernel->hardware->mmuVersion == 0)
++ {
++ /* Convert physical to GPU address for old mmu. */
++ gcmkONERROR(gckOS_GetBaseAddress(Kernel->os, &baseAddress));
++ gcmkASSERT(*Address > baseAddress);
++ *Address -= baseAddress;
++ }
++ }
++
++ gcmkVERIFY_OK(gckOS_CPUPhysicalToGPUPhysical(
++ Kernel->os,
++ *Address,
++ Address
++ ));
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
++ "Locked node 0x%x (%d) @ 0x%08X",
++ node,
++ node->VidMem.locked,
++ *Address);
++ }
++
++ /*************************** Virtual Memory *******************************/
++
++ else
++ {
++
++ *Gid = node->Virtual.gid;
++
++#if gcdPAGED_MEMORY_CACHEABLE
++ /* Force video memory cacheable. */
++ Cacheable = gcvTRUE;
++#endif
++
++ gcmkONERROR(
++ gckOS_LockPages(os,
++ node->Virtual.physical,
++ node->Virtual.bytes,
++ Cacheable,
++ &node->Virtual.logical,
++ &node->Virtual.pageCount));
++
++ gcmkONERROR(gckOS_GetPhysicalAddress(
++ os,
++ node->Virtual.logical,
++ &physicalAddress
++ ));
++
++#if gcdENABLE_VG
++ node->Virtual.physicalAddress = physicalAddress;
++#endif
++
++#if !gcdPROCESS_ADDRESS_SPACE
++ /* Increment the lock count. */
++ if (node->Virtual.lockeds[Kernel->core] ++ == 0)
++ {
++ locked = gcvTRUE;
++
++ gcmkONERROR(_NeedVirtualMapping(Kernel, Kernel->core, node, &needMapping));
++
++ if (needMapping == gcvFALSE)
++ {
++ /* Get hardware specific address. */
++#if gcdENABLE_VG
++ if (Kernel->vg != gcvNULL)
++ {
++ gcmkONERROR(gckVGHARDWARE_ConvertLogical(
++ Kernel->vg->hardware,
++ node->Virtual.logical,
++ gcvTRUE,
++ &node->Virtual.addresses[Kernel->core]));
++ }
++ else
++#endif
++ {
++ gcmkONERROR(gckHARDWARE_ConvertLogical(
++ Kernel->hardware,
++ node->Virtual.logical,
++ gcvTRUE,
++ &node->Virtual.addresses[Kernel->core]));
++ }
++ }
++ else
++ {
++#if gcdSECURITY
++ gctPHYS_ADDR physicalArrayPhysical;
++ gctPOINTER physicalArrayLogical;
++
++ gcmkONERROR(gckOS_AllocatePageArray(
++ os,
++ node->Virtual.physical,
++ node->Virtual.pageCount,
++ &physicalArrayLogical,
++ &physicalArrayPhysical
++ ));
++
++ gcmkONERROR(gckKERNEL_SecurityMapMemory(
++ Kernel,
++ physicalArrayLogical,
++ node->Virtual.pageCount,
++ &node->Virtual.addresses[Kernel->core]
++ ));
++
++ gcmkONERROR(gckOS_FreeNonPagedMemory(
++ os,
++ 1,
++ physicalArrayPhysical,
++ physicalArrayLogical
++ ));
++#else
++#if gcdENABLE_VG
++ if (Kernel->vg != gcvNULL)
++ {
++ /* Allocate pages inside the MMU. */
++ gcmkONERROR(
++ gckVGMMU_AllocatePages(Kernel->vg->mmu,
++ node->Virtual.pageCount,
++ &node->Virtual.pageTables[Kernel->core],
++ &node->Virtual.addresses[Kernel->core]));
++ }
++ else
++#endif
++ {
++ /* Allocate pages inside the MMU. */
++ gcmkONERROR(
++ gckMMU_AllocatePagesEx(Kernel->mmu,
++ node->Virtual.pageCount,
++ node->Virtual.type,
++ &node->Virtual.pageTables[Kernel->core],
++ &node->Virtual.addresses[Kernel->core]));
++ }
++
++ node->Virtual.lockKernels[Kernel->core] = Kernel;
++
++ /* Map the pages. */
++ gcmkONERROR(
++ gckOS_MapPagesEx(os,
++ Kernel->core,
++ node->Virtual.physical,
++ node->Virtual.pageCount,
++ node->Virtual.addresses[Kernel->core],
++ node->Virtual.pageTables[Kernel->core]));
++
++#if gcdENABLE_VG
++ if (Kernel->core == gcvCORE_VG)
++ {
++ gcmkONERROR(gckVGMMU_Flush(Kernel->vg->mmu));
++ }
++ else
++#endif
++ {
++ gcmkONERROR(gckMMU_Flush(Kernel->mmu, node->Virtual.type));
++ }
++#endif
++ }
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
++ "Mapped virtual node 0x%x to 0x%08X",
++ node,
++ node->Virtual.addresses[Kernel->core]);
++ }
++
++ /* Return hardware address. */
++ *Address = node->Virtual.addresses[Kernel->core];
++#endif
++ }
++
++ /* Release the mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(os, Node->mutex));
++
++ *PhysicalAddress = (gctUINT64)physicalAddress;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Address=%08x", *Address);
++ return gcvSTATUS_OK;
++
++OnError:
++ if (locked)
++ {
++ if (node->Virtual.pageTables[Kernel->core] != gcvNULL)
++ {
++#if gcdENABLE_VG
++ if (Kernel->vg != gcvNULL)
++ {
++ /* Free the pages from the MMU. */
++ gcmkVERIFY_OK(
++ gckVGMMU_FreePages(Kernel->vg->mmu,
++ node->Virtual.pageTables[Kernel->core],
++ node->Virtual.pageCount));
++ }
++ else
++#endif
++ {
++ /* Free the pages from the MMU. */
++ gcmkVERIFY_OK(
++ gckMMU_FreePages(Kernel->mmu,
++ node->Virtual.pageTables[Kernel->core],
++ node->Virtual.pageCount));
++ }
++ node->Virtual.pageTables[Kernel->core] = gcvNULL;
++ node->Virtual.lockKernels[Kernel->core] = gcvNULL;
++ }
++
++ /* Unlock the pages. */
++ gcmkVERIFY_OK(
++ gckOS_UnlockPages(os,
++ node->Virtual.physical,
++ node->Virtual.bytes,
++ node->Virtual.logical
++ ));
++
++ node->Virtual.lockeds[Kernel->core]--;
++ }
++
++ if (acquired)
++ {
++ /* Release the mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(os, Node->mutex));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckVIDMEM_Unlock
++**
++** Unlock a video memory node.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gcuVIDMEM_NODE_PTR Node
++** Pointer to a locked gcuVIDMEM_NODE union.
++**
++** gceSURF_TYPE Type
++** Type of surface to unlock.
++**
++** gctBOOL * Asynchroneous
++** Pointer to a variable specifying whether the surface should be
++** unlocked asynchroneously or not.
++**
++** OUTPUT:
++**
++** gctBOOL * Asynchroneous
++** Pointer to a variable receiving the number of bytes used in the
++** command buffer specified by 'Commands'. If gcvNULL, there is no
++** command buffer.
++*/
++gceSTATUS
++gckVIDMEM_Unlock(
++ IN gckKERNEL Kernel,
++ IN gckVIDMEM_NODE Node,
++ IN gceSURF_TYPE Type,
++ IN OUT gctBOOL * Asynchroneous
++ )
++{
++ gceSTATUS status;
++ gckOS os = gcvNULL;
++ gctBOOL acquired = gcvFALSE;
++ gcuVIDMEM_NODE_PTR node = Node->node;
++
++ gcmkHEADER_ARG("Node=0x%x Type=%d *Asynchroneous=%d",
++ Node, Type, gcmOPT_VALUE(Asynchroneous));
++
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++
++ /* Get the gckOS object pointer. */
++ os = Kernel->os;
++ gcmkVERIFY_OBJECT(os, gcvOBJ_OS);
++
++ /* Verify the arguments. */
++ if ((node == gcvNULL)
++ || (node->VidMem.memory == gcvNULL)
++ )
++ {
++ /* Invalid object. */
++ gcmkONERROR(gcvSTATUS_INVALID_OBJECT);
++ }
++
++ /* Grab the mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(os, Node->mutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++ /**************************** Video Memory ********************************/
++
++ if (node->VidMem.memory->object.type == gcvOBJ_VIDMEM)
++ {
++ if (node->VidMem.locked <= 0)
++ {
++ /* The surface was not locked. */
++ status = gcvSTATUS_MEMORY_UNLOCKED;
++ goto OnError;
++ }
++
++ if (Asynchroneous != gcvNULL)
++ {
++ /* Schedule an event to sync with GPU. */
++ *Asynchroneous = gcvTRUE;
++ }
++ else
++ {
++ /* Decrement the lock count. */
++ node->VidMem.locked --;
++ }
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
++ "Unlocked node 0x%x (%d)",
++ node,
++ node->VidMem.locked);
++ }
++
++ /*************************** Virtual Memory *******************************/
++
++ else
++ {
++
++
++ if (Asynchroneous == gcvNULL)
++ {
++#if !gcdPROCESS_ADDRESS_SPACE
++ if (node->Virtual.lockeds[Kernel->core] == 0)
++ {
++ status = gcvSTATUS_MEMORY_UNLOCKED;
++ goto OnError;
++ }
++
++ /* Decrement lock count. */
++ -- node->Virtual.lockeds[Kernel->core];
++
++ /* See if we can unlock the resources. */
++ if (node->Virtual.lockeds[Kernel->core] == 0)
++ {
++#if gcdSECURITY
++ if (node->Virtual.addresses[Kernel->core] > 0x80000000)
++ {
++ gcmkONERROR(gckKERNEL_SecurityUnmapMemory(
++ Kernel,
++ node->Virtual.addresses[Kernel->core],
++ node->Virtual.pageCount
++ ));
++ }
++#else
++ /* Free the page table. */
++ if (node->Virtual.pageTables[Kernel->core] != gcvNULL)
++ {
++#if gcdENABLE_VG
++ if (Kernel->vg != gcvNULL)
++ {
++ gcmkONERROR(
++ gckVGMMU_FreePages(Kernel->vg->mmu,
++ node->Virtual.pageTables[Kernel->core],
++ node->Virtual.pageCount));
++ }
++ else
++#endif
++ {
++ gcmkONERROR(
++ gckMMU_FreePages(Kernel->mmu,
++ node->Virtual.pageTables[Kernel->core],
++ node->Virtual.pageCount));
++ }
++
++ gcmkONERROR(gckOS_UnmapPages(
++ Kernel->os,
++ node->Virtual.pageCount,
++ node->Virtual.addresses[Kernel->core]
++ ));
++
++ /* Mark page table as freed. */
++ node->Virtual.pageTables[Kernel->core] = gcvNULL;
++ node->Virtual.lockKernels[Kernel->core] = gcvNULL;
++ }
++#endif
++ }
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
++ "Unmapped virtual node 0x%x from 0x%08X",
++ node, node->Virtual.addresses[Kernel->core]);
++#endif
++
++ }
++
++ else
++ {
++ gcmkONERROR(
++ gckOS_UnlockPages(os,
++ node->Virtual.physical,
++ node->Virtual.bytes,
++ node->Virtual.logical));
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
++ "Scheduled unlock for virtual node 0x%x",
++ node);
++
++ /* Schedule the surface to be unlocked. */
++ *Asynchroneous = gcvTRUE;
++ }
++ }
++
++ /* Release the mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(os, Node->mutex));
++ acquired = gcvFALSE;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Asynchroneous=%d", gcmOPT_VALUE(Asynchroneous));
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ /* Release the mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(os, Node->mutex));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++#if gcdPROCESS_ADDRESS_SPACE
++gceSTATUS
++gckVIDMEM_Node_Lock(
++ IN gckKERNEL Kernel,
++ IN gckVIDMEM_NODE Node,
++ OUT gctUINT32 *Address
++ )
++{
++ gceSTATUS status;
++ gckOS os;
++ gcuVIDMEM_NODE_PTR node = Node->node;
++ gcsGPU_MAP_PTR gpuMap;
++ gctPHYS_ADDR physical = gcvNULL;
++ gctUINT32 phys = gcvINVALID_ADDRESS;
++ gctUINT32 processID;
++ gcsLOCK_INFO_PTR lockInfo;
++ gctUINT32 pageCount;
++ gckMMU mmu;
++ gctUINT32 i;
++ gctUINT32_PTR pageTableEntry;
++ gctUINT32 offset = 0;
++ gctBOOL acquired = gcvFALSE;
++
++ gcmkHEADER_ARG("Node = %x", Node);
++
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(Node != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Address != gcvNULL);
++
++ os = Kernel->os;
++ gcmkVERIFY_OBJECT(os, gcvOBJ_OS);
++
++ gcmkONERROR(gckOS_GetProcessID(&processID));
++
++ gcmkONERROR(gckKERNEL_GetProcessMMU(Kernel, &mmu));
++
++ gcmkONERROR(gckOS_AcquireMutex(os, Node->mapMutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++ /* Get map information for current process. */
++ gpuMap = _FindGPUMap(Node->mapHead, processID);
++
++ if (gpuMap == gcvNULL)
++ {
++ gpuMap = _CreateGPUMap(os, &Node->mapHead, &Node->mapTail, processID);
++
++ if (gpuMap == gcvNULL)
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++ }
++
++ lockInfo = &gpuMap->lockInfo;
++
++ if (lockInfo->lockeds[Kernel->core] ++ == 0)
++ {
++ /* Get necessary information. */
++ if (node->VidMem.memory->object.type == gcvOBJ_VIDMEM)
++ {
++ phys = node->VidMem.memory->baseAddress
++ + node->VidMem.offset
++ + node->VidMem.alignment;
++
++ /* GPU page table use 4K page. */
++ pageCount = ((phys + node->VidMem.bytes + 4096 - 1) >> 12)
++ - (phys >> 12);
++
++ offset = phys & 0xFFF;
++ }
++ else
++ {
++ pageCount = node->Virtual.pageCount;
++ physical = node->Virtual.physical;
++ }
++
++ /* Allocate pages inside the MMU. */
++ gcmkONERROR(gckMMU_AllocatePages(
++ mmu,
++ pageCount,
++ &lockInfo->pageTables[Kernel->core],
++ &lockInfo->GPUAddresses[Kernel->core]));
++
++ /* Record MMU from which pages are allocated. */
++ lockInfo->lockMmus[Kernel->core] = mmu;
++
++ pageTableEntry = lockInfo->pageTables[Kernel->core];
++
++ /* Fill page table entries. */
++ if (phys != gcvINVALID_ADDRESS)
++ {
++ gctUINT32 address = lockInfo->GPUAddresses[Kernel->core];
++ for (i = 0; i < pageCount; i++)
++ {
++ gckMMU_GetPageEntry(mmu, address, &pageTableEntry);
++ gckMMU_SetPage(mmu, phys & 0xFFFFF000, pageTableEntry);
++ phys += 4096;
++ address += 4096;
++ pageTableEntry += 1;
++ }
++ }
++ else
++ {
++ gctUINT32 address = lockInfo->GPUAddresses[Kernel->core];
++ gcmkASSERT(physical != gcvNULL);
++ gcmkONERROR(gckOS_MapPagesEx(os,
++ Kernel->core,
++ physical,
++ pageCount,
++ address,
++ pageTableEntry));
++ }
++
++ gcmkONERROR(gckMMU_Flush(mmu));
++ }
++
++ *Address = lockInfo->GPUAddresses[Kernel->core] + offset;
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(os, Node->mapMutex));
++ acquired = gcvFALSE;
++
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(os, Node->mapMutex));
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckVIDMEM_NODE_Unlock(
++ IN gckKERNEL Kernel,
++ IN gckVIDMEM_NODE Node,
++ IN gctUINT32 ProcessID
++ )
++{
++ gceSTATUS status;
++ gcsGPU_MAP_PTR gpuMap;
++ gcsLOCK_INFO_PTR lockInfo;
++ gckMMU mmu;
++ gcuVIDMEM_NODE_PTR node;
++ gctUINT32 pageCount;
++ gctBOOL acquired = gcvFALSE;
++
++ gcmkHEADER_ARG("Kernel=0x%08X, Node = %x, ProcessID=%d",
++ Kernel, Node, ProcessID);
++
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(Node != gcvNULL);
++
++ gcmkONERROR(gckOS_AcquireMutex(Kernel->os, Node->mapMutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++ /* Get map information for current process. */
++ gpuMap = _FindGPUMap(Node->mapHead, ProcessID);
++
++ if (gpuMap == gcvNULL)
++ {
++ /* No mapping for this process. */
++ gcmkONERROR(gcvSTATUS_INVALID_DATA);
++ }
++
++ lockInfo = &gpuMap->lockInfo;
++
++ if (--lockInfo->lockeds[Kernel->core] == 0)
++ {
++ node = Node->node;
++
++ /* Get necessary information. */
++ if (node->VidMem.memory->object.type == gcvOBJ_VIDMEM)
++ {
++ gctUINT32 phys = node->VidMem.memory->baseAddress
++ + node->VidMem.offset
++ + node->VidMem.alignment;
++
++ /* GPU page table use 4K page. */
++ pageCount = ((phys + node->VidMem.bytes + 4096 - 1) >> 12)
++ - (phys >> 12);
++ }
++ else
++ {
++ pageCount = node->Virtual.pageCount;
++ }
++
++ /* Get MMU which allocates pages. */
++ mmu = lockInfo->lockMmus[Kernel->core];
++
++ /* Free virtual spaces in page table. */
++ gcmkVERIFY_OK(gckMMU_FreePagesEx(
++ mmu,
++ lockInfo->GPUAddresses[Kernel->core],
++ pageCount
++ ));
++
++ _DestroyGPUMap(Kernel->os, &Node->mapHead, &Node->mapTail, gpuMap);
++ }
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, Node->mapMutex));
++ acquired = gcvFALSE;
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, Node->mapMutex));
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++#endif
++
++/*******************************************************************************
++**
++** gckVIDMEM_HANDLE_Allocate
++**
++** Allocate a handle for a gckVIDMEM_NODE object.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gckVIDMEM_NODE Node
++** Pointer to a gckVIDMEM_NODE object.
++**
++** OUTPUT:
++**
++** gctUINT32 * Handle
++** Pointer to a variable receiving a handle represent this
++** gckVIDMEM_NODE in userspace.
++*/
++static gceSTATUS
++gckVIDMEM_HANDLE_Allocate(
++ IN gckKERNEL Kernel,
++ IN gckVIDMEM_NODE Node,
++ OUT gctUINT32 * Handle
++ )
++{
++ gceSTATUS status;
++ gctUINT32 processID = 0;
++ gctPOINTER pointer = gcvNULL;
++ gctPOINTER handleDatabase = gcvNULL;
++ gctPOINTER mutex = gcvNULL;
++ gctUINT32 handle = 0;
++ gckVIDMEM_HANDLE handleObject = gcvNULL;
++ gckOS os = Kernel->os;
++
++ gcmkHEADER_ARG("Kernel=0x%X, Node=0x%X", Kernel, Node);
++
++ gcmkVERIFY_OBJECT(os, gcvOBJ_OS);
++
++ /* Allocate a gckVIDMEM_HANDLE object. */
++ gcmkONERROR(gckOS_Allocate(os, gcmSIZEOF(gcsVIDMEM_HANDLE), &pointer));
++
++ gcmkVERIFY_OK(gckOS_ZeroMemory(pointer, gcmSIZEOF(gcsVIDMEM_HANDLE)));
++
++ handleObject = pointer;
++
++ gcmkONERROR(gckOS_AtomConstruct(os, &handleObject->reference));
++
++ /* Set default reference count to 1. */
++ gckOS_AtomSet(os, handleObject->reference, 1);
++
++ gcmkVERIFY_OK(gckOS_GetProcessID(&processID));
++
++ gcmkONERROR(
++ gckKERNEL_FindHandleDatbase(Kernel,
++ processID,
++ &handleDatabase,
++ &mutex));
++
++ /* Allocate a handle for this object. */
++ gcmkONERROR(
++ gckKERNEL_AllocateIntegerId(handleDatabase, handleObject, &handle));
++
++ handleObject->node = Node;
++ handleObject->handle = handle;
++
++ *Handle = handle;
++
++ gcmkFOOTER_ARG("*Handle=%d", *Handle);
++ return gcvSTATUS_OK;
++
++OnError:
++ if (handleObject != gcvNULL)
++ {
++ if (handleObject->reference != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_AtomDestroy(os, handleObject->reference));
++ }
++
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(os, handleObject));
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++
++static gceSTATUS
++gckVIDMEM_NODE_Reference(
++ IN gckKERNEL Kernel,
++ IN gckVIDMEM_NODE Node
++ )
++{
++ gctINT32 oldValue;
++ gcmkHEADER_ARG("Kernel=0x%X Node=0x%X", Kernel, Node);
++
++ gckOS_AtomIncrement(Kernel->os, Node->reference, &oldValue);
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckVIDMEM_HANDLE_Reference(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 ProcessID,
++ IN gctUINT32 Handle
++ )
++{
++ gceSTATUS status;
++ gckVIDMEM_HANDLE handleObject = gcvNULL;
++ gctPOINTER database = gcvNULL;
++ gctPOINTER mutex = gcvNULL;
++ gctINT32 oldValue = 0;
++ gctBOOL acquired = gcvFALSE;
++
++ gcmkHEADER_ARG("Handle=%d PrcoessID=%d", Handle, ProcessID);
++
++ gcmkONERROR(
++ gckKERNEL_FindHandleDatbase(Kernel, ProcessID, &database, &mutex));
++
++ gcmkVERIFY_OK(gckOS_AcquireMutex(Kernel->os, mutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++ /* Translate handle to gckVIDMEM_HANDLE object. */
++ gcmkONERROR(
++ gckKERNEL_QueryIntegerId(database, Handle, (gctPOINTER *)&handleObject));
++
++ /* Increase the reference count. */
++ gckOS_AtomIncrement(Kernel->os, handleObject->reference, &oldValue);
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, mutex));
++ acquired = gcvFALSE;
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, mutex));
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckVIDMEM_HANDLE_Dereference(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 ProcessID,
++ IN gctUINT32 Handle
++ )
++{
++ gceSTATUS status;
++ gctPOINTER handleDatabase = gcvNULL;
++ gctPOINTER mutex = gcvNULL;
++ gctINT32 oldValue = 0;
++ gckVIDMEM_HANDLE handleObject = gcvNULL;
++ gctBOOL acquired = gcvFALSE;
++
++ gcmkHEADER_ARG("Handle=%d PrcoessID=%d", Handle, ProcessID);
++
++ gcmkONERROR(
++ gckKERNEL_FindHandleDatbase(Kernel,
++ ProcessID,
++ &handleDatabase,
++ &mutex));
++
++ gcmkVERIFY_OK(gckOS_AcquireMutex(Kernel->os, mutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++ /* Translate handle to gckVIDMEM_HANDLE. */
++ gcmkONERROR(
++ gckKERNEL_QueryIntegerId(handleDatabase, Handle, (gctPOINTER *)&handleObject));
++
++ gckOS_AtomDecrement(Kernel->os, handleObject->reference, &oldValue);
++
++ if (oldValue == 1)
++ {
++ /* Remove handle from database if this is the last reference. */
++ gcmkVERIFY_OK(gckKERNEL_FreeIntegerId(handleDatabase, Handle));
++ }
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, mutex));
++ acquired = gcvFALSE;
++
++ if (oldValue == 1)
++ {
++ gcmkVERIFY_OK(gckOS_AtomDestroy(Kernel->os, handleObject->reference));
++ gcmkOS_SAFE_FREE(Kernel->os, handleObject);
++ }
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, mutex));
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckVIDMEM_HANDLE_LookupAndReference(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 Handle,
++ OUT gckVIDMEM_NODE * Node
++ )
++{
++ gceSTATUS status;
++ gckVIDMEM_HANDLE handleObject = gcvNULL;
++ gckVIDMEM_NODE node = gcvNULL;
++ gctPOINTER database = gcvNULL;
++ gctPOINTER mutex = gcvNULL;
++ gctUINT32 processID = 0;
++ gctBOOL acquired = gcvFALSE;
++
++ gcmkHEADER_ARG("Kernel=0x%X Handle=%d", Kernel, Handle);
++
++ gckOS_GetProcessID(&processID);
++
++ gcmkONERROR(
++ gckKERNEL_FindHandleDatbase(Kernel, processID, &database, &mutex));
++
++ gcmkVERIFY_OK(gckOS_AcquireMutex(Kernel->os, mutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++ /* Translate handle to gckVIDMEM_HANDLE object. */
++ gcmkONERROR(
++ gckKERNEL_QueryIntegerId(database, Handle, (gctPOINTER *)&handleObject));
++
++ /* Get gckVIDMEM_NODE object. */
++ node = handleObject->node;
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, mutex));
++ acquired = gcvFALSE;
++
++ /* Reference this gckVIDMEM_NODE object. */
++ gcmkVERIFY_OK(gckVIDMEM_NODE_Reference(Kernel, node));
++
++ /* Return result. */
++ *Node = node;
++
++ gcmkFOOTER_ARG("*Node=%d", *Node);
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, mutex));
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckVIDMEM_HANDLE_Lookup(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 ProcessID,
++ IN gctUINT32 Handle,
++ OUT gckVIDMEM_NODE * Node
++ )
++{
++ gceSTATUS status;
++ gckVIDMEM_HANDLE handleObject = gcvNULL;
++ gckVIDMEM_NODE node = gcvNULL;
++ gctPOINTER database = gcvNULL;
++ gctPOINTER mutex = gcvNULL;
++ gctBOOL acquired = gcvFALSE;
++
++ gcmkHEADER_ARG("Kernel=0x%X ProcessID=%d Handle=%d",
++ Kernel, ProcessID, Handle);
++
++ gcmkONERROR(
++ gckKERNEL_FindHandleDatbase(Kernel, ProcessID, &database, &mutex));
++
++ gcmkVERIFY_OK(gckOS_AcquireMutex(Kernel->os, mutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++ gcmkONERROR(
++ gckKERNEL_QueryIntegerId(database, Handle, (gctPOINTER *)&handleObject));
++
++ node = handleObject->node;
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, mutex));
++ acquired = gcvFALSE;
++
++ *Node = node;
++
++ gcmkFOOTER_ARG("*Node=%d", *Node);
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, mutex));
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckVIDMEM_NODE_Allocate
++**
++** Allocate a gckVIDMEM_NODE object.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gcuVIDMEM_NODE_PTR Node
++** Pointer to a gcuVIDMEM_NODE union.
++**
++** OUTPUT:
++**
++** gctUINT32 * Handle
++** Pointer to a variable receiving a handle represent this
++** gckVIDMEM_NODE in userspace.
++*/
++gceSTATUS
++gckVIDMEM_NODE_Allocate(
++ IN gckKERNEL Kernel,
++ IN gcuVIDMEM_NODE_PTR VideoNode,
++ IN gceSURF_TYPE Type,
++ IN gcePOOL Pool,
++ IN gctUINT32 * Handle
++ )
++{
++ gceSTATUS status;
++ gckVIDMEM_NODE node = gcvNULL;
++ gctPOINTER pointer = gcvNULL;
++ gctUINT32 handle = 0;
++ gckOS os = Kernel->os;
++
++ gcmkHEADER_ARG("Kernel=0x%X VideoNode=0x%X", Kernel, VideoNode);
++
++ /* Construct a node. */
++ gcmkONERROR(gckOS_Allocate(os, gcmSIZEOF(gcsVIDMEM_NODE), &pointer));
++
++ gcmkVERIFY_OK(gckOS_ZeroMemory(pointer, gcmSIZEOF(gcsVIDMEM_NODE)));
++
++ node = pointer;
++
++ node->node = VideoNode;
++ node->type = Type;
++ node->pool = Pool;
++
++#if gcdPROCESS_ADDRESS_SPACE
++ gcmkONERROR(gckOS_CreateMutex(os, &node->mapMutex));
++#endif
++
++ gcmkONERROR(gckOS_AtomConstruct(os, &node->reference));
++
++ gcmkONERROR(gckOS_CreateMutex(os, &node->mutex));
++
++ /* Reference is 1 by default . */
++ gckVIDMEM_NODE_Reference(Kernel, node);
++
++ /* Create a handle to represent this node. */
++ gcmkONERROR(gckVIDMEM_HANDLE_Allocate(Kernel, node, &handle));
++
++ *Handle = handle;
++
++ gcmkFOOTER_ARG("*Handle=%d", *Handle);
++ return gcvSTATUS_OK;
++
++OnError:
++ if (node != gcvNULL)
++ {
++#if gcdPROCESS_ADDRESS_SPACE
++ if (node->mapMutex != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_DeleteMutex(os, node->mapMutex));
++ }
++#endif
++
++ if (node->mutex)
++ {
++ gcmkVERIFY_OK(gckOS_DeleteMutex(os, node->mutex));
++ }
++
++ if (node->reference != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_AtomDestroy(os, node->reference));
++ }
++
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(os, node));
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckVIDMEM_NODE_Dereference(
++ IN gckKERNEL Kernel,
++ IN gckVIDMEM_NODE Node
++ )
++{
++ gctINT32 oldValue = 0;
++ gctPOINTER database = Kernel->db->nameDatabase;
++ gctPOINTER mutex = Kernel->db->nameDatabaseMutex;
++
++ gcmkHEADER_ARG("Kernel=0x%X Node=0x%X", Kernel, Node);
++
++ gcmkVERIFY_OK(gckOS_AcquireMutex(Kernel->os, mutex, gcvINFINITE));
++
++ gcmkVERIFY_OK(gckOS_AtomDecrement(Kernel->os, Node->reference, &oldValue));
++
++ if (oldValue == 1 && Node->name)
++ {
++ /* Free name if exists. */
++ gcmkVERIFY_OK(gckKERNEL_FreeIntegerId(database, Node->name));
++ }
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, mutex));
++
++ if (oldValue == 1)
++ {
++ /* Free gcuVIDMEM_NODE. */
++ gcmkVERIFY_OK(gckVIDMEM_Free(Kernel, Node->node));
++ gcmkVERIFY_OK(gckOS_AtomDestroy(Kernel->os, Node->reference));
++#if gcdPROCESS_ADDRESS_SPACE
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Kernel->os, Node->mapMutex));
++#endif
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Kernel->os, Node->mutex));
++ gcmkOS_SAFE_FREE(Kernel->os, Node);
++ }
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckVIDMEM_NODE_Name
++**
++** Naming a gckVIDMEM_NODE object.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gctUINT32 Handle
++** Handle to a gckVIDMEM_NODE object.
++**
++** OUTPUT:
++**
++** gctUINT32 * Name
++** Pointer to a variable receiving a name which can be pass to another
++** process.
++*/
++gceSTATUS
++gckVIDMEM_NODE_Name(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 Handle,
++ IN gctUINT32 * Name
++ )
++{
++ gceSTATUS status;
++ gckVIDMEM_NODE node = gcvNULL;
++ gctUINT32 name = 0;
++ gctUINT32 processID = 0;
++ gctPOINTER database = Kernel->db->nameDatabase;
++ gctPOINTER mutex = Kernel->db->nameDatabaseMutex;
++ gctBOOL acquired = gcvFALSE;
++ gctBOOL referenced = gcvFALSE;
++ gcmkHEADER_ARG("Kernel=0x%X Handle=%d", Kernel, Handle);
++
++ gcmkONERROR(gckOS_GetProcessID(&processID));
++
++ gcmkONERROR(gckOS_AcquireMutex(Kernel->os, mutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++ gcmkONERROR(gckVIDMEM_HANDLE_LookupAndReference(Kernel, Handle, &node));
++ referenced = gcvTRUE;
++
++ if (node->name == 0)
++ {
++ /* Name this node. */
++ gcmkONERROR(gckKERNEL_AllocateIntegerId(database, node, &name));
++ node->name = name;
++ }
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, mutex));
++ acquired = gcvFALSE;
++
++ gcmkVERIFY_OK(gckVIDMEM_NODE_Dereference(Kernel, node));
++
++ if(node)
++ {
++ *Name = node->name;
++ }
++
++ gcmkFOOTER_ARG("*Name=%d", *Name);
++ return gcvSTATUS_OK;
++
++OnError:
++ if (referenced)
++ {
++ gcmkVERIFY_OK(gckVIDMEM_NODE_Dereference(Kernel, node));
++ }
++
++ if (acquired)
++ {
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, mutex));
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckVIDMEM_NODE_Import
++**
++** Import a gckVIDMEM_NODE object.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gctUINT32 Name
++** Name of a gckVIDMEM_NODE object.
++**
++** OUTPUT:
++**
++** gctUINT32 * Handle
++** Pointer to a variable receiving a handle represent this
++** gckVIDMEM_NODE in userspace.
++*/
++gceSTATUS
++gckVIDMEM_NODE_Import(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 Name,
++ IN gctUINT32 * Handle
++ )
++{
++ gceSTATUS status;
++ gckVIDMEM_NODE node = gcvNULL;
++ gctPOINTER database = Kernel->db->nameDatabase;
++ gctPOINTER mutex = Kernel->db->nameDatabaseMutex;
++ gctBOOL acquired = gcvFALSE;
++ gctBOOL referenced = gcvFALSE;
++
++ gcmkHEADER_ARG("Kernel=0x%X Name=%d", Kernel, Name);
++
++ gcmkONERROR(gckOS_AcquireMutex(Kernel->os, mutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++ /* Lookup in database to get the node. */
++ gcmkONERROR(gckKERNEL_QueryIntegerId(database, Name, (gctPOINTER *)&node));
++
++ /* Reference the node. */
++ gcmkONERROR(gckVIDMEM_NODE_Reference(Kernel, node));
++ referenced = gcvTRUE;
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, mutex));
++ acquired = gcvFALSE;
++
++ /* Allocate a handle for current process. */
++ gcmkONERROR(gckVIDMEM_HANDLE_Allocate(Kernel, node, Handle));
++
++ gcmkFOOTER_ARG("*Handle=%d", *Handle);
++ return gcvSTATUS_OK;
++
++OnError:
++ if (referenced)
++ {
++ gcmkVERIFY_OK(gckVIDMEM_NODE_Dereference(Kernel, node));
++ }
++
++ if (acquired)
++ {
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, mutex));
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++
++
++typedef struct _gcsVIDMEM_NODE_FDPRIVATE
++{
++ gcsFDPRIVATE base;
++ gckKERNEL kernel;
++ gckVIDMEM_NODE node;
++}
++gcsVIDMEM_NODE_FDPRIVATE;
++
++
++static gctINT
++_ReleaseFdPrivate(
++ gcsFDPRIVATE_PTR FdPrivate
++ )
++{
++ /* Cast private info. */
++ gcsVIDMEM_NODE_FDPRIVATE * private = (gcsVIDMEM_NODE_FDPRIVATE *) FdPrivate;
++
++ gckVIDMEM_NODE_Dereference(private->kernel, private->node);
++ gckOS_Free(private->kernel->os, private);
++
++ return 0;
++}
++
++/*******************************************************************************
++**
++** gckVIDMEM_NODE_GetFd
++**
++** Attach a gckVIDMEM_NODE object to a native fd.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gctUINT32 Handle
++** Handle to a gckVIDMEM_NODE object.
++**
++** OUTPUT:
++**
++** gctUINT32 * Fd
++** Pointer to a variable receiving a native fd from os.
++*/
++gceSTATUS
++gckVIDMEM_NODE_GetFd(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 Handle,
++ OUT gctINT * Fd
++ )
++{
++ gceSTATUS status;
++ gckVIDMEM_NODE node = gcvNULL;
++ gctBOOL referenced = gcvFALSE;
++ gcsVIDMEM_NODE_FDPRIVATE * fdPrivate = gcvNULL;
++ gcmkHEADER_ARG("Kernel=0x%X Handle=%d", Kernel, Handle);
++
++ /* Query and reference handle. */
++ gcmkONERROR(gckVIDMEM_HANDLE_LookupAndReference(Kernel, Handle, &node));
++ referenced = gcvTRUE;
++
++ /* Allocate memory for private info. */
++ gcmkONERROR(gckOS_Allocate(
++ Kernel->os,
++ gcmSIZEOF(gcsVIDMEM_NODE_FDPRIVATE),
++ (gctPOINTER *)&fdPrivate
++ ));
++
++ fdPrivate->base.release = _ReleaseFdPrivate;
++ fdPrivate->kernel = Kernel;
++ fdPrivate->node = node;
++
++ /* Allocated fd owns a reference. */
++ gcmkONERROR(gckOS_GetFd("vidmem", &fdPrivate->base, Fd));
++
++ gcmkFOOTER_ARG("*Fd=%d", *Fd);
++ return gcvSTATUS_OK;
++
++OnError:
++ if (referenced)
++ {
++ gcmkVERIFY_OK(gckVIDMEM_NODE_Dereference(Kernel, node));
++ }
++
++ if (fdPrivate)
++ {
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Kernel->os, fdPrivate));
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_base.h linux-openelec/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_base.h
+--- linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_base.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_base.h 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,5520 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++#ifndef __gc_hal_base_h_
++#define __gc_hal_base_h_
++
++#include "gc_hal_enum.h"
++#include "gc_hal_types.h"
++#include "gc_hal_dump.h"
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/******************************************************************************\
++****************************** Object Declarations *****************************
++\******************************************************************************/
++
++typedef struct _gckOS * gckOS;
++typedef struct _gcoHAL * gcoHAL;
++typedef struct _gcoOS * gcoOS;
++typedef struct _gco2D * gco2D;
++typedef struct gcsATOM * gcsATOM_PTR;
++
++#if gcdENABLE_3D
++typedef struct _gco3D * gco3D;
++typedef struct _gcoCL * gcoCL;
++typedef struct _gcsFAST_FLUSH * gcsFAST_FLUSH_PTR;
++#endif
++
++typedef struct _gcoSURF * gcoSURF;
++typedef struct _gcsSURF_INFO * gcsSURF_INFO_PTR;
++typedef struct _gcsSURF_NODE * gcsSURF_NODE_PTR;
++typedef struct _gcsSURF_FORMAT_INFO * gcsSURF_FORMAT_INFO_PTR;
++typedef struct _gcsPOINT * gcsPOINT_PTR;
++typedef struct _gcsSIZE * gcsSIZE_PTR;
++typedef struct _gcsRECT * gcsRECT_PTR;
++typedef struct _gcsBOUNDARY * gcsBOUNDARY_PTR;
++typedef struct _gcoDUMP * gcoDUMP;
++typedef struct _gcoHARDWARE * gcoHARDWARE;
++typedef union _gcuVIDMEM_NODE * gcuVIDMEM_NODE_PTR;
++typedef struct _gcsVIDMEM_NODE * gckVIDMEM_NODE;
++
++#if gcdENABLE_VG
++typedef struct _gcoVG * gcoVG;
++typedef struct _gcsCOMPLETION_SIGNAL * gcsCOMPLETION_SIGNAL_PTR;
++typedef struct _gcsCONTEXT_MAP * gcsCONTEXT_MAP_PTR;
++#else
++typedef void * gcoVG;
++#endif
++
++#if gcdSYNC
++typedef struct _gcoFENCE * gcoFENCE;
++typedef struct _gcsSYNC_CONTEXT * gcsSYNC_CONTEXT_PTR;
++#endif
++
++#if defined(ANDROID)
++typedef struct _gcoOS_SymbolsList gcoOS_SymbolsList;
++#endif
++
++/******************************************************************************\
++******************************* Process local storage *************************
++\******************************************************************************/
++
++typedef struct _gcsPLS * gcsPLS_PTR;
++
++#if gcdENABLE_3D
++/******************************************************************************
++**
++** Patch defines which should be moved to dedicate file later
++**
++** !!! ALWAYS ADD new ID in the TAIL, otherwise will break exising TRACE FILE
++*******************************************************************************/
++typedef enum _gcePATCH_ID
++{
++ gcvPATCH_NOTINIT = -1,
++ gcvPATCH_INVALID = 0,
++
++#if gcdDEBUG_OPTION
++ gcvPATCH_DEBUG,
++#endif
++
++ gcvPATCH_GTFES30,
++ gcvPATCH_CTGL11,
++ gcvPATCH_CTGL20,
++ gcvPATCH_GLBM11,
++ gcvPATCH_GLBM21,
++ gcvPATCH_GLBM25,
++ gcvPATCH_GLBM27,
++ gcvPATCH_GLBMGUI,
++ gcvPATCH_GFXBENCH,
++ gcvPATCH_ANTUTU, /* Antutu 3.x */
++ gcvPATCH_ANTUTU4X, /* Antutu 4.x */
++ gcvPATCH_QUADRANT,
++ gcvPATCH_GPUBENCH,
++ gcvPATCH_DUOKAN,
++ gcvPATCH_GLOFTSXHM,
++ gcvPATCH_XRUNNER,
++ gcvPATCH_BUSPARKING3D,
++ gcvPATCH_SIEGECRAFT,
++ gcvPATCH_PREMIUM,
++ gcvPATCH_RACEILLEGAL,
++ gcvPATCH_MEGARUN,
++ gcvPATCH_BMGUI,
++ gcvPATCH_NENAMARK,
++ gcvPATCH_NENAMARK2,
++ gcvPATCH_FISHNOODLE,
++ gcvPATCH_MM06,
++ gcvPATCH_MM07,
++ gcvPATCH_BM21,
++ gcvPATCH_SMARTBENCH,
++ gcvPATCH_JPCT,
++ gcvPATCH_NEOCORE,
++ gcvPATCH_RTESTVA,
++ gcvPATCH_NBA2013,
++ gcvPATCH_BARDTALE,
++ gcvPATCH_F18,
++ gcvPATCH_CARPARK,
++ gcvPATCH_CARCHALLENGE,
++ gcvPATCH_HEROESCALL,
++ gcvPATCH_GLOFTF3HM,
++ gcvPATCH_CRAZYRACING,
++ gcvPATCH_FIREFOX,
++ gcvPATCH_CHROME,
++ gcvPATCH_MONOPOLY,
++ gcvPATCH_SNOWCOLD,
++ gcvPATCH_BM3,
++ gcvPATCH_BASEMARKX,
++ gcvPATCH_DEQP,
++ gcvPATCH_SF4,
++ gcePATCH_MGOHEAVEN2,
++ gcePATCH_SILIBILI,
++ gcePATCH_ELEMENTSDEF,
++ gcePATCH_GLOFTKRHM,
++ gcvPATCH_OCLCTS,
++ gcvPATCH_A8HP,
++ gcvPATCH_A8CN,
++ gcvPATCH_WISTONESG,
++ gcvPATCH_SPEEDRACE,
++ gcvPATCH_FSBHAWAIIF,
++ gcvPATCH_AIRNAVY,
++ gcvPATCH_F18NEW,
++ gcvPATCH_CKZOMBIES2,
++ gcvPATCH_EADGKEEPER,
++ gcvPATCH_BASEMARK2V2,
++ gcvPATCH_RIPTIDEGP2,
++ gcvPATCH_OESCTS,
++ gcvPATCH_GANGSTAR,
++ gcvPATCH_WHRKYZIXOVAN,
++ gcvPATCH_NAMESGAS,
++ gcvPATCH_AFTERBURNER,
++ gcvPATCH_UIMARK,
++ gcvPATCH_FM_OES_PLAYER,
++ gcvPATCH_SUMSUNG_BENCH,
++ gcvPATCH_ROCKSTAR_MAXPAYNE,
++ gcvPATCH_TITANPACKING,
++ gcvPATCH_BASEMARKOSIICN,
++ gcvPATCH_FRUITNINJA,
++#if defined(ANDROID)
++ gcePATCH_ANDROID_CTS_MEDIA_PRESENTATIONTIME,
++#endif
++ gcvPATCH_ANDROID_COMPOSITOR,
++ gcvPATCH_CTS_TEXTUREVIEW,
++ gcvPATCH_WATER2_CHUKONG,
++
++ gcvPATCH_COUNT
++} gcePATCH_ID;
++#endif /* gcdENABLE_3D */
++
++typedef void (* gctPLS_DESTRUCTOR) (
++ gcsPLS_PTR
++ );
++
++typedef struct _gcsPLS
++{
++ /* Global objects. */
++ gcoOS os;
++ gcoHAL hal;
++
++ /* Internal memory pool. */
++ gctSIZE_T internalSize;
++ gctPHYS_ADDR internalPhysical;
++ gctPOINTER internalLogical;
++
++ /* External memory pool. */
++ gctSIZE_T externalSize;
++ gctPHYS_ADDR externalPhysical;
++ gctPOINTER externalLogical;
++
++ /* Contiguous memory pool. */
++ gctSIZE_T contiguousSize;
++ gctPHYS_ADDR contiguousPhysical;
++ gctPOINTER contiguousLogical;
++
++ /* EGL-specific process-wide objects. */
++ gctPOINTER eglDisplayInfo;
++ gctPOINTER eglSurfaceInfo;
++ gceSURF_FORMAT eglConfigFormat;
++
++ /* PLS reference count */
++ gcsATOM_PTR reference;
++
++ /* PorcessID of the constrcutor process */
++ gctUINT32 processID;
++
++ /* ThreadID of the constrcutor process. */
++ gctSIZE_T threadID;
++ /* Flag for calling module destructor. */
++ gctBOOL exiting;
++
++ gctBOOL bNeedSupportNP2Texture;
++
++ gctPLS_DESTRUCTOR destructor;
++ /* Mutex to guard PLS access. currently it's for EGL.
++ ** We can use this mutex for every PLS access.
++ */
++ gctPOINTER accessLock;
++#if gcdENABLE_3D
++ /* Global patchID to overwrite the detection */
++ gcePATCH_ID patchID;
++#endif
++}
++gcsPLS;
++
++extern gcsPLS gcPLS;
++
++#if gcdENABLE_3D
++#define gcPLS_INITIALIZER \
++{ \
++ gcvNULL, /* gcoOS object. */ \
++ gcvNULL, /* gcoHAL object. */ \
++ 0, /* internalSize */ \
++ gcvNULL, /* internalPhysical */ \
++ gcvNULL, /* internalLogical */ \
++ 0, /* externalSize */ \
++ gcvNULL, /* externalPhysical */ \
++ gcvNULL, /* externalLogical */ \
++ 0, /* contiguousSize */ \
++ gcvNULL, /* contiguousPhysical */ \
++ gcvNULL, /* contiguousLogical */ \
++ gcvNULL, /* eglDisplayInfo */ \
++ gcvNULL, /* eglSurfaceInfo */ \
++ gcvSURF_A8R8G8B8,/* eglConfigFormat */ \
++ gcvNULL, /* reference */ \
++ 0, /* processID */ \
++ 0, /* threadID */ \
++ gcvFALSE, /* exiting */ \
++ gcvFALSE, /* Special flag for NP2 texture. */ \
++ gcvNULL, /* destructor */ \
++ gcvNULL, /* accessLock */ \
++ gcvPATCH_NOTINIT,/* global patchID */ \
++}
++#else
++#define gcPLS_INITIALIZER \
++{ \
++ gcvNULL, /* gcoOS object. */ \
++ gcvNULL, /* gcoHAL object. */ \
++ 0, /* internalSize */ \
++ gcvNULL, /* internalPhysical */ \
++ gcvNULL, /* internalLogical */ \
++ 0, /* externalSize */ \
++ gcvNULL, /* externalPhysical */ \
++ gcvNULL, /* externalLogical */ \
++ 0, /* contiguousSize */ \
++ gcvNULL, /* contiguousPhysical */ \
++ gcvNULL, /* contiguousLogical */ \
++ gcvNULL, /* eglDisplayInfo */ \
++ gcvNULL, /* eglSurfaceInfo */ \
++ gcvSURF_A8R8G8B8,/* eglConfigFormat */ \
++ gcvNULL, /* reference */ \
++ 0, /* processID */ \
++ 0, /* threadID */ \
++ gcvFALSE, /* exiting */ \
++ gcvFALSE, /* Special flag for NP2 texture. */ \
++ gcvNULL, /* destructor */ \
++ gcvNULL, /* accessLock */ \
++}
++#endif
++
++/******************************************************************************\
++******************************* Thread local storage *************************
++\******************************************************************************/
++
++typedef struct _gcsTLS * gcsTLS_PTR;
++
++typedef void (* gctTLS_DESTRUCTOR) (
++ gcsTLS_PTR
++ );
++
++typedef struct _gcsTLS
++{
++ gceHARDWARE_TYPE currentType;
++
++ /* Current 3D hardwre of this thread */
++ gcoHARDWARE currentHardware;
++
++ /* Default 3D hardware of this thread */
++ gcoHARDWARE defaultHardware;
++
++ /* Only for separated 3D and 2D */
++ gcoHARDWARE hardware2D;
++#if gcdENABLE_VG
++ gcoVGHARDWARE vg;
++ gcoVG engineVG;
++#endif /* gcdENABLE_VG */
++#if gcdENABLE_3D
++ gco3D engine3D;
++#endif
++#if gcdENABLE_2D
++ gco2D engine2D;
++#endif
++
++ /*thread data */
++ gctPOINTER context;
++ /* ES(including es1 and es2) client driver context which is current state */
++ gctPOINTER esClientCtx;
++ gctTLS_DESTRUCTOR destructor;
++
++ gctBOOL copied;
++
++ /* libGAL.so handle */
++ gctHANDLE handle;
++
++ /* If true, do not releas 2d engine and hardware in hal layer */
++ gctBOOL release2DUpper;
++}
++gcsTLS;
++
++/******************************************************************************\
++********************************* Enumerations *********************************
++\******************************************************************************/
++
++typedef enum _gcePLS_VALUE
++{
++ gcePLS_VALUE_EGL_DISPLAY_INFO,
++ gcePLS_VALUE_EGL_SURFACE_INFO,
++ gcePLS_VALUE_EGL_CONFIG_FORMAT_INFO,
++ gcePLS_VALUE_EGL_DESTRUCTOR_INFO,
++}
++gcePLS_VALUE;
++
++/* Video memory pool type. */
++typedef enum _gcePOOL
++{
++ gcvPOOL_UNKNOWN = 0,
++ gcvPOOL_DEFAULT,
++ gcvPOOL_LOCAL,
++ gcvPOOL_LOCAL_INTERNAL,
++ gcvPOOL_LOCAL_EXTERNAL,
++ gcvPOOL_UNIFIED,
++ gcvPOOL_SYSTEM,
++ gcvPOOL_VIRTUAL,
++ gcvPOOL_USER,
++ gcvPOOL_CONTIGUOUS,
++
++ gcvPOOL_NUMBER_OF_POOLS
++}
++gcePOOL;
++
++#if gcdENABLE_3D
++/* Blending functions. */
++typedef enum _gceBLEND_FUNCTION
++{
++ gcvBLEND_ZERO,
++ gcvBLEND_ONE,
++ gcvBLEND_SOURCE_COLOR,
++ gcvBLEND_INV_SOURCE_COLOR,
++ gcvBLEND_SOURCE_ALPHA,
++ gcvBLEND_INV_SOURCE_ALPHA,
++ gcvBLEND_TARGET_COLOR,
++ gcvBLEND_INV_TARGET_COLOR,
++ gcvBLEND_TARGET_ALPHA,
++ gcvBLEND_INV_TARGET_ALPHA,
++ gcvBLEND_SOURCE_ALPHA_SATURATE,
++ gcvBLEND_CONST_COLOR,
++ gcvBLEND_INV_CONST_COLOR,
++ gcvBLEND_CONST_ALPHA,
++ gcvBLEND_INV_CONST_ALPHA,
++}
++gceBLEND_FUNCTION;
++
++/* Blending modes. */
++typedef enum _gceBLEND_MODE
++{
++ gcvBLEND_ADD,
++ gcvBLEND_SUBTRACT,
++ gcvBLEND_REVERSE_SUBTRACT,
++ gcvBLEND_MIN,
++ gcvBLEND_MAX,
++}
++gceBLEND_MODE;
++
++/* Depth modes. */
++typedef enum _gceDEPTH_MODE
++{
++ gcvDEPTH_NONE,
++ gcvDEPTH_Z,
++ gcvDEPTH_W,
++}
++gceDEPTH_MODE;
++#endif /* gcdENABLE_3D */
++
++#if (gcdENABLE_3D || gcdENABLE_VG)
++/* API flags. */
++typedef enum _gceAPI
++{
++ gcvAPI_D3D = 1,
++ gcvAPI_OPENGL_ES11,
++ gcvAPI_OPENGL_ES20,
++ gcvAPI_OPENGL_ES30,
++ gcvAPI_OPENGL,
++ gcvAPI_OPENVG,
++ gcvAPI_OPENCL,
++}
++gceAPI;
++#endif
++
++
++typedef enum _gceWHERE
++{
++ gcvWHERE_COMMAND,
++ gcvWHERE_RASTER,
++ gcvWHERE_PIXEL,
++}
++gceWHERE;
++
++typedef enum _gceHOW
++{
++ gcvHOW_SEMAPHORE = 0x1,
++ gcvHOW_STALL = 0x2,
++ gcvHOW_SEMAPHORE_STALL = 0x3,
++}
++gceHOW;
++
++typedef enum _gceSignalHandlerType
++{
++ gcvHANDLE_SIGFPE_WHEN_SIGNAL_CODE_IS_0 = 0x1,
++}
++gceSignalHandlerType;
++
++/* gcsHAL_Limits*/
++typedef struct _gcsHAL_LIMITS
++{
++ /* chip info */
++ gceCHIPMODEL chipModel;
++ gctUINT32 chipRevision;
++ gctUINT32 featureCount;
++ gctUINT32 *chipFeatures;
++
++ /* target caps */
++ gctUINT32 maxWidth;
++ gctUINT32 maxHeight;
++ gctUINT32 multiTargetCount;
++ gctUINT32 maxSamples;
++
++}gcsHAL_LIMITS;
++
++/******************************************************************************\
++*********** Generic Memory Allocation Optimization Using Containers ************
++\******************************************************************************/
++
++/* Generic container definition. */
++typedef struct _gcsCONTAINER_LINK * gcsCONTAINER_LINK_PTR;
++typedef struct _gcsCONTAINER_LINK
++{
++ /* Points to the next container. */
++ gcsCONTAINER_LINK_PTR next;
++}
++gcsCONTAINER_LINK;
++
++typedef struct _gcsCONTAINER_RECORD * gcsCONTAINER_RECORD_PTR;
++typedef struct _gcsCONTAINER_RECORD
++{
++ gcsCONTAINER_RECORD_PTR prev;
++ gcsCONTAINER_RECORD_PTR next;
++}
++gcsCONTAINER_RECORD;
++
++typedef struct _gcsCONTAINER * gcsCONTAINER_PTR;
++typedef struct _gcsCONTAINER
++{
++ gctUINT containerSize;
++ gctUINT recordSize;
++ gctUINT recordCount;
++ gcsCONTAINER_LINK_PTR containers;
++ gcsCONTAINER_RECORD freeList;
++ gcsCONTAINER_RECORD allocList;
++}
++gcsCONTAINER;
++
++gceSTATUS
++gcsCONTAINER_Construct(
++ IN gcsCONTAINER_PTR Container,
++ gctUINT RecordsPerContainer,
++ gctUINT RecordSize
++ );
++
++gceSTATUS
++gcsCONTAINER_Destroy(
++ IN gcsCONTAINER_PTR Container
++ );
++
++gceSTATUS
++gcsCONTAINER_AllocateRecord(
++ IN gcsCONTAINER_PTR Container,
++ OUT gctPOINTER * Record
++ );
++
++gceSTATUS
++gcsCONTAINER_FreeRecord(
++ IN gcsCONTAINER_PTR Container,
++ IN gctPOINTER Record
++ );
++
++gceSTATUS
++gcsCONTAINER_FreeAll(
++ IN gcsCONTAINER_PTR Container
++ );
++
++/******************************************************************************\
++********************************* gcoHAL Object *********************************
++\******************************************************************************/
++
++/* Construct a new gcoHAL object. */
++gceSTATUS
++gcoHAL_ConstructEx(
++ IN gctPOINTER Context,
++ IN gcoOS Os,
++ OUT gcoHAL * Hal
++ );
++
++/* Destroy an gcoHAL object. */
++gceSTATUS
++gcoHAL_DestroyEx(
++ IN gcoHAL Hal
++ );
++
++/* Empty function for compatibility. */
++gceSTATUS
++gcoHAL_Construct(
++ IN gctPOINTER Context,
++ IN gcoOS Os,
++ OUT gcoHAL * Hal
++ );
++
++/* Empty function for compatibility. */
++gceSTATUS
++gcoHAL_Destroy(
++ IN gcoHAL Hal
++ );
++
++/* Get HAL options */
++gceSTATUS
++gcoHAL_GetOption(
++ IN gcoHAL Hal,
++ IN gceOPTION Option
++ );
++
++gceSTATUS
++gcoHAL_FrameInfoOps(
++ IN gcoHAL Hal,
++ IN gceFRAMEINFO FrameInfo,
++ IN gceFRAMEINFO_OP Op,
++ IN OUT gctUINT * Val
++ );
++
++
++gceSTATUS
++gcoHAL_GetHardware(
++ IN gcoHAL Hal,
++ OUT gcoHARDWARE* Hw
++ );
++
++#if gcdENABLE_2D
++/* Get pointer to gco2D object. */
++gceSTATUS
++gcoHAL_Get2DEngine(
++ IN gcoHAL Hal,
++ OUT gco2D * Engine
++ );
++#endif
++
++#if gcdENABLE_3D
++gceSTATUS
++gcoHAL_GetSpecialHintData(
++ IN gcoHAL Hal,
++ OUT gctINT * Hint
++ );
++/*
++** Deprecated(Don't use it), keep it here for external library(libgcu.so)
++*/
++gceSTATUS
++gcoHAL_Get3DEngine(
++ IN gcoHAL Hal,
++ OUT gco3D * Engine
++ );
++#endif /* gcdEANBLE_3D */
++
++
++gceSTATUS
++gcoHAL_GetProductName(
++ IN gcoHAL Hal,
++ OUT gctSTRING *ProductName
++ );
++
++gceSTATUS
++gcoHAL_SetFscaleValue(
++ IN gctUINT FscaleValue
++ );
++
++gceSTATUS
++gcoHAL_GetFscaleValue(
++ OUT gctUINT * FscaleValue,
++ OUT gctUINT * MinFscaleValue,
++ OUT gctUINT * MaxFscaleValue
++ );
++
++gceSTATUS
++gcoHAL_SetBltNP2Texture(
++ gctBOOL enable
++ );
++
++gceSTATUS
++gcoHAL_NameVideoMemory(
++ IN gctUINT32 Handle,
++ OUT gctUINT32 * Name
++ );
++
++gceSTATUS
++gcoHAL_ImportVideoMemory(
++ IN gctUINT32 Name,
++ OUT gctUINT32 * Handle
++ );
++
++gceSTATUS
++gcoHAL_GetVideoMemoryFd(
++ IN gctUINT32 Handle,
++ OUT gctINT * Fd
++ );
++
++/* Verify whether the specified feature is available in hardware. */
++gceSTATUS
++gcoHAL_IsFeatureAvailable(
++ IN gcoHAL Hal,
++ IN gceFEATURE Feature
++ );
++
++gceSTATUS
++gcoHAL_IsSwwaNeeded(
++ IN gcoHAL Hal,
++ IN gceSWWA Swwa
++ );
++
++gceSTATUS
++gcoHAL_IsFeatureAvailable1(
++ IN gcoHAL Hal,
++ IN gceFEATURE Feature
++ );
++
++/* Query the identity of the hardware. */
++gceSTATUS
++gcoHAL_QueryChipIdentity(
++ IN gcoHAL Hal,
++ OUT gceCHIPMODEL* ChipModel,
++ OUT gctUINT32* ChipRevision,
++ OUT gctUINT32* ChipFeatures,
++ OUT gctUINT32* ChipMinorFeatures
++ );
++
++/* Query the minor features of the hardware. */
++gceSTATUS gcoHAL_QueryChipMinorFeatures(
++ IN gcoHAL Hal,
++ OUT gctUINT32* NumFeatures,
++ OUT gctUINT32* ChipMinorFeatures
++ );
++
++gctINT32
++gcoOS_EndRecordAllocation(void);
++void
++gcoOS_RecordAllocation(void);
++void
++gcoOS_AddRecordAllocation(gctSIZE_T Size);
++
++/* Query the amount of video memory. */
++gceSTATUS
++gcoHAL_QueryVideoMemory(
++ IN gcoHAL Hal,
++ OUT gctPHYS_ADDR * InternalAddress,
++ OUT gctSIZE_T * InternalSize,
++ OUT gctPHYS_ADDR * ExternalAddress,
++ OUT gctSIZE_T * ExternalSize,
++ OUT gctPHYS_ADDR * ContiguousAddress,
++ OUT gctSIZE_T * ContiguousSize
++ );
++
++/* Map video memory. */
++gceSTATUS
++gcoHAL_MapMemory(
++ IN gcoHAL Hal,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T NumberOfBytes,
++ OUT gctPOINTER * Logical
++ );
++
++/* Unmap video memory. */
++gceSTATUS
++gcoHAL_UnmapMemory(
++ IN gcoHAL Hal,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T NumberOfBytes,
++ IN gctPOINTER Logical
++ );
++
++/* Schedule an unmap of a buffer mapped through its physical address. */
++gceSTATUS
++gcoHAL_ScheduleUnmapMemory(
++ IN gcoHAL Hal,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T NumberOfBytes,
++ IN gctPOINTER Logical
++ );
++
++/* Allocate video memory. */
++gceSTATUS
++gcoOS_AllocateVideoMemory(
++ IN gcoOS Os,
++ IN gctBOOL InUserSpace,
++ IN gctBOOL InCacheable,
++ IN OUT gctSIZE_T * Bytes,
++ OUT gctUINT32 * Physical,
++ OUT gctPOINTER * Logical,
++ OUT gctPOINTER * Handle
++ );
++
++/* Free video memory. */
++gceSTATUS
++gcoOS_FreeVideoMemory(
++ IN gcoOS Os,
++ IN gctPOINTER Handle
++ );
++
++/* Lock video memory. */
++gceSTATUS
++gcoOS_LockVideoMemory(
++ IN gcoOS Os,
++ IN gctPOINTER Handle,
++ IN gctBOOL InUserSpace,
++ IN gctBOOL InCacheable,
++ OUT gctUINT32 * Physical,
++ OUT gctPOINTER * Logical
++ );
++
++/* Map user memory. */
++gceSTATUS
++gcoHAL_MapUserMemory(
++ IN gctPOINTER Logical,
++ IN gctUINT32 Physical,
++ IN gctSIZE_T Size,
++ OUT gctPOINTER * Info,
++ OUT gctUINT32_PTR GPUAddress
++ );
++
++/* Unmap user memory. */
++gceSTATUS
++gcoHAL_UnmapUserMemory(
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Size,
++ IN gctPOINTER Info,
++ IN gctUINT32 GPUAddress
++ );
++
++/* Schedule an unmap of a user buffer using event mechanism. */
++gceSTATUS
++gcoHAL_ScheduleUnmapUserMemory(
++ IN gcoHAL Hal,
++ IN gctPOINTER Info,
++ IN gctSIZE_T Size,
++ IN gctUINT32 Address,
++ IN gctPOINTER Memory
++ );
++
++/* Commit the current command buffer. */
++gceSTATUS
++gcoHAL_Commit(
++ IN gcoHAL Hal,
++ IN gctBOOL Stall
++ );
++
++#if gcdENABLE_3D
++/* Sencd fence command. */
++gceSTATUS
++gcoHAL_SendFence(
++ IN gcoHAL Hal
++ );
++#endif /* gcdENABLE_3D */
++
++/* Query the tile capabilities. */
++gceSTATUS
++gcoHAL_QueryTiled(
++ IN gcoHAL Hal,
++ OUT gctINT32 * TileWidth2D,
++ OUT gctINT32 * TileHeight2D,
++ OUT gctINT32 * TileWidth3D,
++ OUT gctINT32 * TileHeight3D
++ );
++
++gceSTATUS
++gcoHAL_Compact(
++ IN gcoHAL Hal
++ );
++
++#if VIVANTE_PROFILER
++gceSTATUS
++gcoHAL_ProfileStart(
++ IN gcoHAL Hal
++ );
++
++gceSTATUS
++gcoHAL_ProfileEnd(
++ IN gcoHAL Hal,
++ IN gctCONST_STRING Title
++ );
++#endif
++
++/* Power Management */
++gceSTATUS
++gcoHAL_SetPowerManagementState(
++ IN gcoHAL Hal,
++ IN gceCHIPPOWERSTATE State
++ );
++
++gceSTATUS
++gcoHAL_QueryPowerManagementState(
++ IN gcoHAL Hal,
++ OUT gceCHIPPOWERSTATE *State
++ );
++
++/* Set the filter type for filter blit. */
++gceSTATUS
++gcoHAL_SetFilterType(
++ IN gcoHAL Hal,
++ IN gceFILTER_TYPE FilterType
++ );
++
++gceSTATUS
++gcoHAL_GetDump(
++ IN gcoHAL Hal,
++ OUT gcoDUMP * Dump
++ );
++
++#if gcdENABLE_3D
++gceSTATUS
++gcoHAL_SetPatchID(
++ IN gcoHAL Hal,
++ IN gcePATCH_ID PatchID
++ );
++
++/* Get Patch ID based on process name */
++gceSTATUS
++gcoHAL_GetPatchID(
++ IN gcoHAL Hal,
++ OUT gcePATCH_ID * PatchID
++ );
++
++gceSTATUS
++gcoHAL_SetGlobalPatchID(
++ IN gcoHAL Hal,
++ IN gcePATCH_ID PatchID
++ );
++#endif /* gcdENABLE_3D */
++/* Call the kernel HAL layer. */
++gceSTATUS
++gcoHAL_Call(
++ IN gcoHAL Hal,
++ IN OUT gcsHAL_INTERFACE_PTR Interface
++ );
++
++/* Schedule an event. */
++gceSTATUS
++gcoHAL_ScheduleEvent(
++ IN gcoHAL Hal,
++ IN OUT gcsHAL_INTERFACE_PTR Interface
++ );
++
++/* Destroy a surface. */
++gceSTATUS
++gcoHAL_DestroySurface(
++ IN gcoHAL Hal,
++ IN gcoSURF Surface
++ );
++
++/* Request a start/stop timestamp. */
++gceSTATUS
++gcoHAL_SetTimer(
++ IN gcoHAL Hal,
++ IN gctUINT32 Index,
++ IN gctBOOL Start
++ );
++
++/* Get Time delta from a Timer in microseconds. */
++gceSTATUS
++gcoHAL_GetTimerTime(
++ IN gcoHAL Hal,
++ IN gctUINT32 Timer,
++ OUT gctINT32_PTR TimeDelta
++ );
++
++/* set timeout value. */
++gceSTATUS
++gcoHAL_SetTimeOut(
++ IN gcoHAL Hal,
++ IN gctUINT32 timeOut
++ );
++
++gceSTATUS
++gcoHAL_SetHardwareType(
++ IN gcoHAL Hal,
++ IN gceHARDWARE_TYPE HardwardType
++ );
++
++gceSTATUS
++gcoHAL_GetHardwareType(
++ IN gcoHAL Hal,
++ OUT gceHARDWARE_TYPE * HardwardType
++ );
++
++gceSTATUS
++gcoHAL_QueryChipCount(
++ IN gcoHAL Hal,
++ OUT gctINT32 * Count
++ );
++
++gceSTATUS
++gcoHAL_Query3DCoreCount(
++ IN gcoHAL Hal,
++ OUT gctUINT32 *Count
++ );
++
++gceSTATUS
++gcoHAL_QuerySeparated2D(
++ IN gcoHAL Hal
++ );
++
++gceSTATUS
++gcoHAL_Is3DAvailable(
++ IN gcoHAL Hal
++ );
++
++/* Get pointer to gcoVG object. */
++gceSTATUS
++gcoHAL_GetVGEngine(
++ IN gcoHAL Hal,
++ OUT gcoVG * Engine
++ );
++
++gceSTATUS
++gcoHAL_QueryChipLimits(
++ IN gcoHAL Hal,
++ IN gctINT32 Chip,
++ IN gctINT32 Mask,
++ OUT gcsHAL_LIMITS *Limits);
++
++gceSTATUS
++gcoHAL_QueryChipFeature(
++ IN gcoHAL Hal,
++ IN gctINT32 Chip,
++ IN gctINT32 Mask,
++ IN gceFEATURE Feature);
++
++/*----------------------------------------------------------------------------*/
++/*----- Shared Buffer --------------------------------------------------------*/
++
++/* Create shared buffer. */
++gceSTATUS
++gcoHAL_CreateShBuffer(
++ IN gctUINT32 Size,
++ OUT gctSHBUF * ShBuf
++ );
++
++/* Destroy shared buffer. */
++gceSTATUS
++gcoHAL_DestroyShBuffer(
++ IN gctSHBUF ShBuf
++ );
++
++/* Map shared buffer to current process. */
++gceSTATUS
++gcoHAL_MapShBuffer(
++ IN gctSHBUF ShBuf
++ );
++
++/* Write user data to shared buffer. */
++gceSTATUS
++gcoHAL_WriteShBuffer(
++ IN gctSHBUF ShBuf,
++ IN gctCONST_POINTER Data,
++ IN gctUINT32 ByteCount
++ );
++
++/* Read user data from shared buffer. */
++gceSTATUS
++gcoHAL_ReadShBuffer(
++ IN gctSHBUF ShBuf,
++ IN gctPOINTER Data,
++ IN gctUINT32 BytesCount,
++ OUT gctUINT32 * BytesRead
++ );
++
++/* Config power management to be enabled or disabled. */
++gceSTATUS
++gcoHAL_ConfigPowerManagement(
++ IN gctBOOL Enable
++ );
++
++#if gcdENABLE_3D || gcdENABLE_VG
++/* Query the target capabilities. */
++gceSTATUS
++gcoHAL_QueryTargetCaps(
++ IN gcoHAL Hal,
++ OUT gctUINT * MaxWidth,
++ OUT gctUINT * MaxHeight,
++ OUT gctUINT * MultiTargetCount,
++ OUT gctUINT * MaxSamples
++ );
++#endif
++
++/******************************************************************************\
++********************************** gcoOS Object *********************************
++\******************************************************************************/
++/* Lock PLS access */
++gceSTATUS
++gcoOS_LockPLS(
++ void
++ );
++
++/* Unlock PLS access */
++gceSTATUS
++gcoOS_UnLockPLS(
++ void
++ );
++
++/* Get PLS value for given key */
++gctPOINTER
++gcoOS_GetPLSValue(
++ IN gcePLS_VALUE key
++ );
++
++/* Set PLS value of a given key */
++void
++gcoOS_SetPLSValue(
++ IN gcePLS_VALUE key,
++ OUT gctPOINTER value
++ );
++
++/* Get access to the thread local storage. */
++gceSTATUS
++gcoOS_GetTLS(
++ OUT gcsTLS_PTR * TLS
++ );
++
++ /* Copy the TLS from a source thread. */
++ gceSTATUS gcoOS_CopyTLS(IN gcsTLS_PTR Source);
++
++/* Destroy the objects associated with the current thread. */
++void
++gcoOS_FreeThreadData(
++ void
++ );
++
++/* Empty function for compatibility. */
++gceSTATUS
++gcoOS_Construct(
++ IN gctPOINTER Context,
++ OUT gcoOS * Os
++ );
++
++/* Empty function for compatibility. */
++gceSTATUS
++gcoOS_Destroy(
++ IN gcoOS Os
++ );
++
++/* Get the base address for the physical memory. */
++gceSTATUS
++gcoOS_GetBaseAddress(
++ IN gcoOS Os,
++ OUT gctUINT32_PTR BaseAddress
++ );
++
++/* Allocate memory from the heap. */
++gceSTATUS
++gcoOS_Allocate(
++ IN gcoOS Os,
++ IN gctSIZE_T Bytes,
++ OUT gctPOINTER * Memory
++ );
++
++/* Get allocated memory size. */
++gceSTATUS
++gcoOS_GetMemorySize(
++ IN gcoOS Os,
++ IN gctPOINTER Memory,
++ OUT gctSIZE_T_PTR MemorySize
++ );
++
++/* Free allocated memory. */
++gceSTATUS
++gcoOS_Free(
++ IN gcoOS Os,
++ IN gctPOINTER Memory
++ );
++
++/* Allocate memory. */
++gceSTATUS
++gcoOS_AllocateSharedMemory(
++ IN gcoOS Os,
++ IN gctSIZE_T Bytes,
++ OUT gctPOINTER * Memory
++ );
++
++/* Free memory. */
++gceSTATUS
++gcoOS_FreeSharedMemory(
++ IN gcoOS Os,
++ IN gctPOINTER Memory
++ );
++
++/* Allocate memory. */
++gceSTATUS
++gcoOS_AllocateMemory(
++ IN gcoOS Os,
++ IN gctSIZE_T Bytes,
++ OUT gctPOINTER * Memory
++ );
++
++/* Free memory. */
++gceSTATUS
++gcoOS_FreeMemory(
++ IN gcoOS Os,
++ IN gctPOINTER Memory
++ );
++
++/* Allocate contiguous memory. */
++gceSTATUS
++gcoOS_AllocateContiguous(
++ IN gcoOS Os,
++ IN gctBOOL InUserSpace,
++ IN OUT gctSIZE_T * Bytes,
++ OUT gctPHYS_ADDR * Physical,
++ OUT gctPOINTER * Logical
++ );
++
++/* Free contiguous memory. */
++gceSTATUS
++gcoOS_FreeContiguous(
++ IN gcoOS Os,
++ IN gctPHYS_ADDR Physical,
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Bytes
++ );
++
++/* Map user memory. */
++gceSTATUS
++gcoOS_MapUserMemory(
++ IN gcoOS Os,
++ IN gctPOINTER Memory,
++ IN gctSIZE_T Size,
++ OUT gctPOINTER * Info,
++ OUT gctUINT32_PTR Address
++ );
++
++/* Map user memory. */
++gceSTATUS
++gcoOS_MapUserMemoryEx(
++ IN gcoOS Os,
++ IN gctPOINTER Memory,
++ IN gctUINT32 Physical,
++ IN gctSIZE_T Size,
++ OUT gctPOINTER * Info,
++ OUT gctUINT32_PTR Address
++ );
++
++/* Unmap user memory. */
++gceSTATUS
++gcoOS_UnmapUserMemory(
++ IN gcoOS Os,
++ IN gctPOINTER Memory,
++ IN gctSIZE_T Size,
++ IN gctPOINTER Info,
++ IN gctUINT32 Address
++ );
++
++/* Device I/O Control call to the kernel HAL layer. */
++gceSTATUS
++gcoOS_DeviceControl(
++ IN gcoOS Os,
++ IN gctUINT32 IoControlCode,
++ IN gctPOINTER InputBuffer,
++ IN gctSIZE_T InputBufferSize,
++ IN gctPOINTER OutputBuffer,
++ IN gctSIZE_T OutputBufferSize
++ );
++
++/* Allocate non paged memory. */
++gceSTATUS
++gcoOS_AllocateNonPagedMemory(
++ IN gcoOS Os,
++ IN gctBOOL InUserSpace,
++ IN OUT gctSIZE_T * Bytes,
++ OUT gctPHYS_ADDR * Physical,
++ OUT gctPOINTER * Logical
++ );
++
++/* Free non paged memory. */
++gceSTATUS
++gcoOS_FreeNonPagedMemory(
++ IN gcoOS Os,
++ IN gctSIZE_T Bytes,
++ IN gctPHYS_ADDR Physical,
++ IN gctPOINTER Logical
++ );
++
++#define gcmOS_SAFE_FREE(os, mem) \
++ gcoOS_Free(os, mem); \
++ mem = gcvNULL
++
++#define gcmOS_SAFE_FREE_SHARED_MEMORY(os, mem) \
++ gcoOS_FreeSharedMemory(os, mem); \
++ mem = gcvNULL
++
++#define gcmkOS_SAFE_FREE(os, mem) \
++ gckOS_Free(os, mem); \
++ mem = gcvNULL
++
++typedef enum _gceFILE_MODE
++{
++ gcvFILE_CREATE = 0,
++ gcvFILE_APPEND,
++ gcvFILE_READ,
++ gcvFILE_CREATETEXT,
++ gcvFILE_APPENDTEXT,
++ gcvFILE_READTEXT,
++}
++gceFILE_MODE;
++
++/* Open a file. */
++gceSTATUS
++gcoOS_Open(
++ IN gcoOS Os,
++ IN gctCONST_STRING FileName,
++ IN gceFILE_MODE Mode,
++ OUT gctFILE * File
++ );
++
++/* Close a file. */
++gceSTATUS
++gcoOS_Close(
++ IN gcoOS Os,
++ IN gctFILE File
++ );
++
++/* Read data from a file. */
++gceSTATUS
++gcoOS_Read(
++ IN gcoOS Os,
++ IN gctFILE File,
++ IN gctSIZE_T ByteCount,
++ IN gctPOINTER Data,
++ OUT gctSIZE_T * ByteRead
++ );
++
++/* Write data to a file. */
++gceSTATUS
++gcoOS_Write(
++ IN gcoOS Os,
++ IN gctFILE File,
++ IN gctSIZE_T ByteCount,
++ IN gctCONST_POINTER Data
++ );
++
++/* Flush data to a file. */
++gceSTATUS
++gcoOS_Flush(
++ IN gcoOS Os,
++ IN gctFILE File
++ );
++
++/* Close a file descriptor. */
++gceSTATUS
++gcoOS_CloseFD(
++ IN gcoOS Os,
++ IN gctINT FD
++ );
++
++/* Dup file descriptor to another. */
++gceSTATUS
++gcoOS_DupFD(
++ IN gcoOS Os,
++ IN gctINT FD,
++ OUT gctINT * FD2
++ );
++
++/* Create an endpoint for communication. */
++gceSTATUS
++gcoOS_Socket(
++ IN gcoOS Os,
++ IN gctINT Domain,
++ IN gctINT Type,
++ IN gctINT Protocol,
++ OUT gctINT *SockFd
++ );
++
++/* Close a socket. */
++gceSTATUS
++gcoOS_CloseSocket(
++ IN gcoOS Os,
++ IN gctINT SockFd
++ );
++
++/* Initiate a connection on a socket. */
++gceSTATUS
++gcoOS_Connect(
++ IN gcoOS Os,
++ IN gctINT SockFd,
++ IN gctCONST_POINTER HostName,
++ IN gctUINT Port);
++
++/* Shut down part of connection on a socket. */
++gceSTATUS
++gcoOS_Shutdown(
++ IN gcoOS Os,
++ IN gctINT SockFd,
++ IN gctINT How
++ );
++
++/* Send a message on a socket. */
++gceSTATUS
++gcoOS_Send(
++ IN gcoOS Os,
++ IN gctINT SockFd,
++ IN gctSIZE_T ByteCount,
++ IN gctCONST_POINTER Data,
++ IN gctINT Flags
++ );
++
++/* Initiate a connection on a socket. */
++gceSTATUS
++gcoOS_WaitForSend(
++ IN gcoOS Os,
++ IN gctINT SockFd,
++ IN gctINT Seconds,
++ IN gctINT MicroSeconds);
++
++/* Get environment variable value. */
++gceSTATUS
++gcoOS_GetEnv(
++ IN gcoOS Os,
++ IN gctCONST_STRING VarName,
++ OUT gctSTRING * Value
++ );
++
++/* Set environment variable value. */
++gceSTATUS
++gcoOS_SetEnv(
++ IN gcoOS Os,
++ IN gctCONST_STRING VarName,
++ IN gctSTRING Value
++ );
++
++/* Get current working directory. */
++gceSTATUS
++gcoOS_GetCwd(
++ IN gcoOS Os,
++ IN gctINT SizeInBytes,
++ OUT gctSTRING Buffer
++ );
++
++/* Get file status info. */
++gceSTATUS
++gcoOS_Stat(
++ IN gcoOS Os,
++ IN gctCONST_STRING FileName,
++ OUT gctPOINTER Buffer
++ );
++
++typedef enum _gceFILE_WHENCE
++{
++ gcvFILE_SEEK_SET,
++ gcvFILE_SEEK_CUR,
++ gcvFILE_SEEK_END
++}
++gceFILE_WHENCE;
++
++/* Set the current position of a file. */
++gceSTATUS
++gcoOS_Seek(
++ IN gcoOS Os,
++ IN gctFILE File,
++ IN gctUINT32 Offset,
++ IN gceFILE_WHENCE Whence
++ );
++
++/* Set the current position of a file. */
++gceSTATUS
++gcoOS_SetPos(
++ IN gcoOS Os,
++ IN gctFILE File,
++ IN gctUINT32 Position
++ );
++
++/* Get the current position of a file. */
++gceSTATUS
++gcoOS_GetPos(
++ IN gcoOS Os,
++ IN gctFILE File,
++ OUT gctUINT32 * Position
++ );
++
++/* Same as strstr. */
++gceSTATUS
++gcoOS_StrStr(
++ IN gctCONST_STRING String,
++ IN gctCONST_STRING SubString,
++ OUT gctSTRING * Output
++ );
++
++/* Find the last occurance of a character inside a string. */
++gceSTATUS
++gcoOS_StrFindReverse(
++ IN gctCONST_STRING String,
++ IN gctINT8 Character,
++ OUT gctSTRING * Output
++ );
++
++gceSTATUS
++gcoOS_StrDup(
++ IN gcoOS Os,
++ IN gctCONST_STRING String,
++ OUT gctSTRING * Target
++ );
++
++/* Copy a string. */
++gceSTATUS
++gcoOS_StrCopySafe(
++ IN gctSTRING Destination,
++ IN gctSIZE_T DestinationSize,
++ IN gctCONST_STRING Source
++ );
++
++/* Append a string. */
++gceSTATUS
++gcoOS_StrCatSafe(
++ IN gctSTRING Destination,
++ IN gctSIZE_T DestinationSize,
++ IN gctCONST_STRING Source
++ );
++
++/* Compare two strings. */
++gceSTATUS
++gcoOS_StrCmp(
++ IN gctCONST_STRING String1,
++ IN gctCONST_STRING String2
++ );
++
++/* Compare characters of two strings. */
++gceSTATUS
++gcoOS_StrNCmp(
++ IN gctCONST_STRING String1,
++ IN gctCONST_STRING String2,
++ IN gctSIZE_T Count
++ );
++
++/* Convert string to float. */
++gceSTATUS
++gcoOS_StrToFloat(
++ IN gctCONST_STRING String,
++ OUT gctFLOAT * Float
++ );
++
++/* Convert hex string to integer. */
++gceSTATUS gcoOS_HexStrToInt(
++ IN gctCONST_STRING String,
++ OUT gctINT * Int
++ );
++
++/* Convert hex string to float. */
++gceSTATUS
++gcoOS_HexStrToFloat(
++ IN gctCONST_STRING String,
++ OUT gctFLOAT * Float
++ );
++
++/* Convert string to integer. */
++gceSTATUS
++gcoOS_StrToInt(
++ IN gctCONST_STRING String,
++ OUT gctINT * Int
++ );
++
++gceSTATUS
++gcoOS_MemCmp(
++ IN gctCONST_POINTER Memory1,
++ IN gctCONST_POINTER Memory2,
++ IN gctSIZE_T Bytes
++ );
++
++gceSTATUS
++gcoOS_PrintStrSafe(
++ OUT gctSTRING String,
++ IN gctSIZE_T StringSize,
++ IN OUT gctUINT * Offset,
++ IN gctCONST_STRING Format,
++ ...
++ );
++
++gceSTATUS
++gcoOS_LoadLibrary(
++ IN gcoOS Os,
++ IN gctCONST_STRING Library,
++ OUT gctHANDLE * Handle
++ );
++
++gceSTATUS
++gcoOS_FreeLibrary(
++ IN gcoOS Os,
++ IN gctHANDLE Handle
++ );
++
++gceSTATUS
++gcoOS_GetProcAddress(
++ IN gcoOS Os,
++ IN gctHANDLE Handle,
++ IN gctCONST_STRING Name,
++ OUT gctPOINTER * Function
++ );
++
++gceSTATUS
++gcoOS_Compact(
++ IN gcoOS Os
++ );
++
++gceSTATUS
++gcoOS_AddSignalHandler (
++ IN gceSignalHandlerType SignalHandlerType
++ );
++
++#if VIVANTE_PROFILER
++gceSTATUS
++gcoOS_ProfileStart(
++ IN gcoOS Os
++ );
++
++gceSTATUS
++gcoOS_ProfileEnd(
++ IN gcoOS Os,
++ IN gctCONST_STRING Title
++ );
++
++gceSTATUS
++gcoOS_SetProfileSetting(
++ IN gcoOS Os,
++ IN gctBOOL Enable,
++ IN gctCONST_STRING FileName
++ );
++#endif
++
++/* Query the video memory. */
++gceSTATUS
++gcoOS_QueryVideoMemory(
++ IN gcoOS Os,
++ OUT gctPHYS_ADDR * InternalAddress,
++ OUT gctSIZE_T * InternalSize,
++ OUT gctPHYS_ADDR * ExternalAddress,
++ OUT gctSIZE_T * ExternalSize,
++ OUT gctPHYS_ADDR * ContiguousAddress,
++ OUT gctSIZE_T * ContiguousSize
++ );
++
++/* Detect if the process is the executable specified. */
++gceSTATUS
++gcoOS_DetectProcessByNamePid(
++ IN gctCONST_STRING Name,
++ IN gctHANDLE Pid
++ );
++
++/* Detect if the current process is the executable specified. */
++gceSTATUS
++gcoOS_DetectProcessByName(
++ IN gctCONST_STRING Name
++ );
++
++gceSTATUS
++gcoOS_DetectProcessByEncryptedName(
++ IN gctCONST_STRING Name
++ );
++
++#if defined(ANDROID)
++gceSTATUS
++gcoOS_DetectProgrameByEncryptedSymbols(
++ IN gcoOS_SymbolsList Symbols
++ );
++#endif
++
++/*----------------------------------------------------------------------------*/
++/*----- Atoms ----------------------------------------------------------------*/
++
++/* Construct an atom. */
++gceSTATUS
++gcoOS_AtomConstruct(
++ IN gcoOS Os,
++ OUT gcsATOM_PTR * Atom
++ );
++
++/* Destroy an atom. */
++gceSTATUS
++gcoOS_AtomDestroy(
++ IN gcoOS Os,
++ IN gcsATOM_PTR Atom
++ );
++
++/* Get the 32-bit value protected by an atom. */
++gceSTATUS
++gcoOS_AtomGet(
++ IN gcoOS Os,
++ IN gcsATOM_PTR Atom,
++ OUT gctINT32_PTR Value
++ );
++
++/* Set the 32-bit value protected by an atom. */
++gceSTATUS
++gcoOS_AtomSet(
++ IN gcoOS Os,
++ IN gcsATOM_PTR Atom,
++ IN gctINT32 Value
++ );
++
++/* Increment an atom. */
++gceSTATUS
++gcoOS_AtomIncrement(
++ IN gcoOS Os,
++ IN gcsATOM_PTR Atom,
++ OUT gctINT32_PTR OldValue
++ );
++
++/* Decrement an atom. */
++gceSTATUS
++gcoOS_AtomDecrement(
++ IN gcoOS Os,
++ IN gcsATOM_PTR Atom,
++ OUT gctINT32_PTR OldValue
++ );
++
++gctHANDLE
++gcoOS_GetCurrentProcessID(
++ void
++ );
++
++gctHANDLE
++gcoOS_GetCurrentThreadID(
++ void
++ );
++
++/*----------------------------------------------------------------------------*/
++/*----- Time -----------------------------------------------------------------*/
++
++/* Get the number of milliseconds since the system started. */
++gctUINT32
++gcoOS_GetTicks(
++ void
++ );
++
++/* Get time in microseconds. */
++gceSTATUS
++gcoOS_GetTime(
++ gctUINT64_PTR Time
++ );
++
++/* Get CPU usage in microseconds. */
++gceSTATUS
++gcoOS_GetCPUTime(
++ gctUINT64_PTR CPUTime
++ );
++
++/* Get memory usage. */
++gceSTATUS
++gcoOS_GetMemoryUsage(
++ gctUINT32_PTR MaxRSS,
++ gctUINT32_PTR IxRSS,
++ gctUINT32_PTR IdRSS,
++ gctUINT32_PTR IsRSS
++ );
++
++/* Delay a number of microseconds. */
++gceSTATUS
++gcoOS_Delay(
++ IN gcoOS Os,
++ IN gctUINT32 Delay
++ );
++
++/*----------------------------------------------------------------------------*/
++/*----- Threads --------------------------------------------------------------*/
++
++#ifdef _WIN32
++/* Cannot include windows.h here becuase "near" and "far"
++ * which are used in gcsDEPTH_INFO, are defined to nothing in WinDef.h.
++ * So, use the real value of DWORD and WINAPI, instead.
++ * DWORD is unsigned long, and WINAPI is __stdcall.
++ * If these two are change in WinDef.h, the following two typdefs
++ * need to be changed, too.
++ */
++typedef unsigned long gctTHREAD_RETURN;
++typedef unsigned long (__stdcall * gcTHREAD_ROUTINE)(void * Argument);
++#else
++typedef void * gctTHREAD_RETURN;
++typedef void * (* gcTHREAD_ROUTINE)(void *);
++#endif
++
++/* Create a new thread. */
++gceSTATUS
++gcoOS_CreateThread(
++ IN gcoOS Os,
++ IN gcTHREAD_ROUTINE Worker,
++ IN gctPOINTER Argument,
++ OUT gctPOINTER * Thread
++ );
++
++/* Close a thread. */
++gceSTATUS
++gcoOS_CloseThread(
++ IN gcoOS Os,
++ IN gctPOINTER Thread
++ );
++
++/*----------------------------------------------------------------------------*/
++/*----- Mutexes --------------------------------------------------------------*/
++
++/* Create a new mutex. */
++gceSTATUS
++gcoOS_CreateMutex(
++ IN gcoOS Os,
++ OUT gctPOINTER * Mutex
++ );
++
++/* Delete a mutex. */
++gceSTATUS
++gcoOS_DeleteMutex(
++ IN gcoOS Os,
++ IN gctPOINTER Mutex
++ );
++
++/* Acquire a mutex. */
++gceSTATUS
++gcoOS_AcquireMutex(
++ IN gcoOS Os,
++ IN gctPOINTER Mutex,
++ IN gctUINT32 Timeout
++ );
++
++/* Release a mutex. */
++gceSTATUS
++gcoOS_ReleaseMutex(
++ IN gcoOS Os,
++ IN gctPOINTER Mutex
++ );
++
++/*----------------------------------------------------------------------------*/
++/*----- Signals --------------------------------------------------------------*/
++
++/* Create a signal. */
++gceSTATUS
++gcoOS_CreateSignal(
++ IN gcoOS Os,
++ IN gctBOOL ManualReset,
++ OUT gctSIGNAL * Signal
++ );
++
++/* Destroy a signal. */
++gceSTATUS
++gcoOS_DestroySignal(
++ IN gcoOS Os,
++ IN gctSIGNAL Signal
++ );
++
++/* Signal a signal. */
++gceSTATUS
++gcoOS_Signal(
++ IN gcoOS Os,
++ IN gctSIGNAL Signal,
++ IN gctBOOL State
++ );
++
++/* Wait for a signal. */
++gceSTATUS
++gcoOS_WaitSignal(
++ IN gcoOS Os,
++ IN gctSIGNAL Signal,
++ IN gctUINT32 Wait
++ );
++
++/* Map a signal from another process */
++gceSTATUS
++gcoOS_MapSignal(
++ IN gctSIGNAL RemoteSignal,
++ OUT gctSIGNAL * LocalSignal
++ );
++
++/* Unmap a signal mapped from another process */
++gceSTATUS
++gcoOS_UnmapSignal(
++ IN gctSIGNAL Signal
++ );
++
++/*----------------------------------------------------------------------------*/
++/*----- Android Native Fence -------------------------------------------------*/
++
++/* Create sync point. */
++gceSTATUS
++gcoOS_CreateSyncPoint(
++ IN gcoOS Os,
++ OUT gctSYNC_POINT * SyncPoint
++ );
++
++/* Destroy sync point. */
++gceSTATUS
++gcoOS_DestroySyncPoint(
++ IN gcoOS Os,
++ IN gctSYNC_POINT SyncPoint
++ );
++
++/* Create native fence. */
++gceSTATUS
++gcoOS_CreateNativeFence(
++ IN gcoOS Os,
++ IN gctSYNC_POINT SyncPoint,
++ OUT gctINT * FenceFD
++ );
++
++/* Wait on native fence. */
++gceSTATUS
++gcoOS_WaitNativeFence(
++ IN gcoOS Os,
++ IN gctINT FenceFD,
++ IN gctUINT32 Timeout
++ );
++
++/*----------------------------------------------------------------------------*/
++/*----- Memory Access and Cache ----------------------------------------------*/
++
++/* Write a register. */
++gceSTATUS
++gcoOS_WriteRegister(
++ IN gcoOS Os,
++ IN gctUINT32 Address,
++ IN gctUINT32 Data
++ );
++
++/* Read a register. */
++gceSTATUS
++gcoOS_ReadRegister(
++ IN gcoOS Os,
++ IN gctUINT32 Address,
++ OUT gctUINT32 * Data
++ );
++
++gceSTATUS
++gcoOS_CacheClean(
++ IN gcoOS Os,
++ IN gctUINT32 Node,
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Bytes
++ );
++
++gceSTATUS
++gcoOS_CacheFlush(
++ IN gcoOS Os,
++ IN gctUINT32 Node,
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Bytes
++ );
++
++gceSTATUS
++gcoOS_CacheInvalidate(
++ IN gcoOS Os,
++ IN gctUINT32 Node,
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Bytes
++ );
++
++gceSTATUS
++gcoOS_MemoryBarrier(
++ IN gcoOS Os,
++ IN gctPOINTER Logical
++ );
++
++gceSTATUS
++gcoOS_CPUPhysicalToGPUPhysical(
++ IN gctUINT32 CPUPhysical,
++ OUT gctUINT32_PTR GPUPhysical
++ );
++
++/*----------------------------------------------------------------------------*/
++/*----- Profile --------------------------------------------------------------*/
++
++gceSTATUS
++gckOS_GetProfileTick(
++ OUT gctUINT64_PTR Tick
++ );
++
++gceSTATUS
++gckOS_QueryProfileTickRate(
++ OUT gctUINT64_PTR TickRate
++ );
++
++gctUINT32
++gckOS_ProfileToMS(
++ IN gctUINT64 Ticks
++ );
++
++gceSTATUS
++gcoOS_GetProfileTick(
++ OUT gctUINT64_PTR Tick
++ );
++
++gceSTATUS
++gcoOS_QueryProfileTickRate(
++ OUT gctUINT64_PTR TickRate
++ );
++
++#define _gcmPROFILE_INIT(prefix, freq, start) \
++ do { \
++ prefix ## OS_QueryProfileTickRate(&(freq)); \
++ prefix ## OS_GetProfileTick(&(start)); \
++ } while (gcvFALSE)
++
++#define _gcmPROFILE_QUERY(prefix, start, ticks) \
++ do { \
++ prefix ## OS_GetProfileTick(&(ticks)); \
++ (ticks) = ((ticks) > (start)) ? ((ticks) - (start)) \
++ : (~0ull - (start) + (ticks) + 1); \
++ } while (gcvFALSE)
++
++#if gcdENABLE_PROFILING
++# define gcmkPROFILE_INIT(freq, start) _gcmPROFILE_INIT(gck, freq, start)
++# define gcmkPROFILE_QUERY(start, ticks) _gcmPROFILE_QUERY(gck, start, ticks)
++# define gcmPROFILE_INIT(freq, start) _gcmPROFILE_INIT(gco, freq, start)
++# define gcmPROFILE_QUERY(start, ticks) _gcmPROFILE_QUERY(gco, start, ticks)
++# define gcmPROFILE_ONLY(x) x
++# define gcmPROFILE_ELSE(x) do { } while (gcvFALSE)
++# define gcmPROFILE_DECLARE_ONLY(x) x
++# define gcmPROFILE_DECLARE_ELSE(x) typedef x
++#else
++# define gcmkPROFILE_INIT(start, freq) do { } while (gcvFALSE)
++# define gcmkPROFILE_QUERY(start, ticks) do { } while (gcvFALSE)
++# define gcmPROFILE_INIT(start, freq) do { } while (gcvFALSE)
++# define gcmPROFILE_QUERY(start, ticks) do { } while (gcvFALSE)
++# define gcmPROFILE_ONLY(x) do { } while (gcvFALSE)
++# define gcmPROFILE_ELSE(x) x
++# define gcmPROFILE_DECLARE_ONLY(x) do { } while (gcvFALSE)
++# define gcmPROFILE_DECLARE_ELSE(x) x
++#endif
++
++/*******************************************************************************
++** gcoMATH object
++*/
++
++#define gcdPI 3.14159265358979323846f
++
++/* Kernel. */
++gctINT
++gckMATH_ModuloInt(
++ IN gctINT X,
++ IN gctINT Y
++ );
++
++/* User. */
++gctUINT32
++gcoMATH_Log2in5dot5(
++ IN gctINT X
++ );
++
++
++gctFLOAT
++gcoMATH_UIntAsFloat(
++ IN gctUINT32 X
++ );
++
++gctUINT32
++gcoMATH_FloatAsUInt(
++ IN gctFLOAT X
++ );
++
++gctBOOL
++gcoMATH_CompareEqualF(
++ IN gctFLOAT X,
++ IN gctFLOAT Y
++ );
++
++gctUINT16
++gcoMATH_UInt8AsFloat16(
++ IN gctUINT8 X
++ );
++
++gctUINT32
++gcoMATH_Float16ToFloat(
++ IN gctUINT16 In
++ );
++
++gctUINT16
++gcoMATH_FloatToFloat16(
++ IN gctUINT32 In
++ );
++
++gctUINT32
++gcoMATH_Float11ToFloat(
++ IN gctUINT32 In
++ );
++
++gctUINT16
++gcoMATH_FloatToFloat11(
++ IN gctUINT32 In
++ );
++
++gctUINT32
++gcoMATH_Float10ToFloat(
++ IN gctUINT32 In
++ );
++
++gctUINT16
++gcoMATH_FloatToFloat10(
++ IN gctUINT32 In
++ );
++
++gctUINT32
++gcoMATH_Float14ToFloat(
++ IN gctUINT16 In
++ );
++
++/******************************************************************************\
++**************************** Coordinate Structures *****************************
++\******************************************************************************/
++
++typedef struct _gcsPOINT
++{
++ gctINT32 x;
++ gctINT32 y;
++}
++gcsPOINT;
++
++typedef struct _gcsSIZE
++{
++ gctINT32 width;
++ gctINT32 height;
++}
++gcsSIZE;
++
++typedef struct _gcsRECT
++{
++ gctINT32 left;
++ gctINT32 top;
++ gctINT32 right;
++ gctINT32 bottom;
++}
++gcsRECT;
++
++typedef union _gcsPIXEL
++{
++ struct
++ {
++ gctFLOAT r, g, b, a;
++ gctFLOAT d, s;
++ } pf;
++
++ struct
++ {
++ gctINT32 r, g, b, a;
++ gctINT32 d, s;
++ } pi;
++
++ struct
++ {
++ gctUINT32 r, g, b, a;
++ gctUINT32 d, s;
++ } pui;
++
++} gcsPIXEL;
++
++/******************************************************************************\
++********************************* gcoSURF Object ********************************
++\******************************************************************************/
++
++/*----------------------------------------------------------------------------*/
++/*------------------------------- gcoSURF Common ------------------------------*/
++
++/* Color format classes. */
++typedef enum _gceFORMAT_CLASS
++{
++ gcvFORMAT_CLASS_RGBA = 4500,
++ gcvFORMAT_CLASS_YUV,
++ gcvFORMAT_CLASS_INDEX,
++ gcvFORMAT_CLASS_LUMINANCE,
++ gcvFORMAT_CLASS_BUMP,
++ gcvFORMAT_CLASS_DEPTH,
++ gcvFORMAT_CLASS_ASTC,
++ gcvFORMAT_CLASS_OTHER
++}
++gceFORMAT_CLASS;
++
++/* Color format data type */
++typedef enum _gceFORMAT_DATATYPE
++{
++ gcvFORMAT_DATATYPE_UNSIGNED_NORMALIZED,
++ gcvFORMAT_DATATYPE_SIGNED_NORMALIZED,
++ gcvFORMAT_DATATYPE_UNSIGNED_INTEGER,
++ gcvFORMAT_DATATYPE_SIGNED_INTEGER,
++ gcvFORMAT_DATATYPE_FLOAT16,
++ gcvFORMAT_DATATYPE_FLOAT32,
++ gcvFORMAT_DATATYPE_FLOAT_E5B9G9R9,
++ gcvFORMAT_DATATYPE_FLOAT_B10G11R11F,
++ gcvFORMAT_DATATYPE_INDEX,
++ gcvFORMAT_DATATYPE_SRGB,
++ gcvFORMAT_DATATYPE_FLOAT32_UINT,
++}
++gceFORMAT_DATATYPE;
++
++/* Special enums for width field in gcsFORMAT_COMPONENT. */
++typedef enum _gceCOMPONENT_CONTROL
++{
++ gcvCOMPONENT_NOTPRESENT = 0x00,
++ gcvCOMPONENT_DONTCARE = 0x80,
++ gcvCOMPONENT_WIDTHMASK = 0x7F,
++ gcvCOMPONENT_ODD = 0x80
++}
++gceCOMPONENT_CONTROL;
++
++/* Color format component parameters. */
++typedef struct _gcsFORMAT_COMPONENT
++{
++ gctUINT8 start;
++ gctUINT8 width;
++}
++gcsFORMAT_COMPONENT;
++
++/* RGBA color format class. */
++typedef struct _gcsFORMAT_CLASS_TYPE_RGBA
++{
++ gcsFORMAT_COMPONENT alpha;
++ gcsFORMAT_COMPONENT red;
++ gcsFORMAT_COMPONENT green;
++ gcsFORMAT_COMPONENT blue;
++}
++gcsFORMAT_CLASS_TYPE_RGBA;
++
++/* YUV color format class. */
++typedef struct _gcsFORMAT_CLASS_TYPE_YUV
++{
++ gcsFORMAT_COMPONENT y;
++ gcsFORMAT_COMPONENT u;
++ gcsFORMAT_COMPONENT v;
++}
++gcsFORMAT_CLASS_TYPE_YUV;
++
++/* Index color format class. */
++typedef struct _gcsFORMAT_CLASS_TYPE_INDEX
++{
++ gcsFORMAT_COMPONENT value;
++}
++gcsFORMAT_CLASS_TYPE_INDEX;
++
++/* Luminance color format class. */
++typedef struct _gcsFORMAT_CLASS_TYPE_LUMINANCE
++{
++ gcsFORMAT_COMPONENT alpha;
++ gcsFORMAT_COMPONENT value;
++}
++gcsFORMAT_CLASS_TYPE_LUMINANCE;
++
++/* Bump map color format class. */
++typedef struct _gcsFORMAT_CLASS_TYPE_BUMP
++{
++ gcsFORMAT_COMPONENT alpha;
++ gcsFORMAT_COMPONENT l;
++ gcsFORMAT_COMPONENT v;
++ gcsFORMAT_COMPONENT u;
++ gcsFORMAT_COMPONENT q;
++ gcsFORMAT_COMPONENT w;
++}
++gcsFORMAT_CLASS_TYPE_BUMP;
++
++/* Depth and stencil format class. */
++typedef struct _gcsFORMAT_CLASS_TYPE_DEPTH
++{
++ gcsFORMAT_COMPONENT depth;
++ gcsFORMAT_COMPONENT stencil;
++}
++gcsFORMAT_CLASS_TYPE_DEPTH;
++
++typedef union _gcuPIXEL_FORMAT_CLASS
++{
++ gcsFORMAT_CLASS_TYPE_BUMP bump;
++ gcsFORMAT_CLASS_TYPE_RGBA rgba;
++ gcsFORMAT_CLASS_TYPE_YUV yuv;
++ gcsFORMAT_CLASS_TYPE_LUMINANCE lum;
++ gcsFORMAT_CLASS_TYPE_INDEX index;
++ gcsFORMAT_CLASS_TYPE_DEPTH depth;
++}
++gcuPIXEL_FORMAT_CLASS;
++
++/* Format parameters. */
++typedef struct _gcsSURF_FORMAT_INFO
++{
++ /* Name of the format */
++ gctCONST_STRING formatName;
++
++ /* Format code and class. */
++ gceSURF_FORMAT format;
++ gceFORMAT_CLASS fmtClass;
++
++ /* Format data type */
++ gceFORMAT_DATATYPE fmtDataType;
++
++ /* The size of one pixel in bits. */
++ gctUINT8 bitsPerPixel;
++
++ /* Pixel block dimensions. */
++ gctUINT blockWidth;
++ gctUINT blockHeight;
++
++ /* Pixel block size in bits. */
++ gctUINT blockSize;
++
++ /* Some formats are larger than what the GPU can support. */
++ /* These formats are read in the number of layers specified. */
++ gctUINT8 layers;
++
++ /* The format is faked and software will interpret it differently
++ ** with HW. Most of them can't be blendable(PE) or filterable(TX).
++ */
++ gctBOOL fakedFormat;
++
++ /* Some formats have two neighbour pixels interleaved together. */
++ /* To describe such format, set the flag to 1 and add another */
++ /* like this one describing the odd pixel format. */
++ gctBOOL interleaved;
++
++ /* sRGB format. */
++ gctBOOL sRGB;
++
++ /* Format components. */
++ gcuPIXEL_FORMAT_CLASS u;
++
++ /* Format components. */
++ gcuPIXEL_FORMAT_CLASS uOdd;
++
++ /* Render format. */
++ gceSURF_FORMAT closestRenderFormat;
++ /*gctCLOSEST_FORMAT dynamicClosestRenderFormat;*/
++ gctUINT renderFormat;
++ const gceTEXTURE_SWIZZLE * pixelSwizzle;
++
++ /* Texture format. */
++ gceSURF_FORMAT closestTXFormat;
++ gctUINT txFormat;
++ const gceTEXTURE_SWIZZLE * txSwizzle;
++ gctBOOL txIntFilter;
++}
++gcsSURF_FORMAT_INFO;
++
++/* Frame buffer information. */
++typedef struct _gcsSURF_FRAMEBUFFER
++{
++ gctPOINTER logical;
++ gctUINT width, height;
++ gctINT stride;
++ gceSURF_FORMAT format;
++}
++gcsSURF_FRAMEBUFFER;
++
++/* Generic pixel component descriptors. */
++extern gcsFORMAT_COMPONENT gcvPIXEL_COMP_XXX8;
++extern gcsFORMAT_COMPONENT gcvPIXEL_COMP_XX8X;
++extern gcsFORMAT_COMPONENT gcvPIXEL_COMP_X8XX;
++extern gcsFORMAT_COMPONENT gcvPIXEL_COMP_8XXX;
++
++typedef enum _gceORIENTATION
++{
++ gcvORIENTATION_TOP_BOTTOM,
++ gcvORIENTATION_BOTTOM_TOP,
++}
++gceORIENTATION;
++
++
++/* Construct a new gcoSURF object. */
++gceSTATUS
++gcoSURF_Construct(
++ IN gcoHAL Hal,
++ IN gctUINT Width,
++ IN gctUINT Height,
++ IN gctUINT Depth,
++ IN gceSURF_TYPE Type,
++ IN gceSURF_FORMAT Format,
++ IN gcePOOL Pool,
++ OUT gcoSURF * Surface
++ );
++
++/* Destroy an gcoSURF object. */
++gceSTATUS
++gcoSURF_Destroy(
++ IN gcoSURF Surface
++ );
++
++/* Map user-allocated surface. */
++gceSTATUS
++gcoSURF_MapUserSurface(
++ IN gcoSURF Surface,
++ IN gctUINT Alignment,
++ IN gctPOINTER Logical,
++ IN gctUINT32 Physical
++ );
++
++/* Wrapp surface with known logical/GPU address */
++gceSTATUS
++gcoSURF_WrapSurface(
++ IN gcoSURF Surface,
++ IN gctUINT Alignment,
++ IN gctPOINTER Logical,
++ IN gctUINT32 Physical
++ );
++
++
++/* Query vid mem node info. */
++gceSTATUS
++gcoSURF_QueryVidMemNode(
++ IN gcoSURF Surface,
++ OUT gctUINT32 * Node,
++ OUT gcePOOL * Pool,
++ OUT gctSIZE_T_PTR Bytes
++ );
++
++/* Set the color type of the surface. */
++gceSTATUS
++gcoSURF_SetColorType(
++ IN gcoSURF Surface,
++ IN gceSURF_COLOR_TYPE ColorType
++ );
++
++/* Get the color type of the surface. */
++gceSTATUS
++gcoSURF_GetColorType(
++ IN gcoSURF Surface,
++ OUT gceSURF_COLOR_TYPE *ColorType
++ );
++
++/* Set the color space of the surface. */
++gceSTATUS
++gcoSURF_SetColorSpace(
++ IN gcoSURF Surface,
++ IN gceSURF_COLOR_SPACE ColorSpace
++ );
++
++/* Get the color space of the surface. */
++gceSTATUS
++gcoSURF_GetColorSpace(
++ IN gcoSURF Surface,
++ OUT gceSURF_COLOR_SPACE *ColorSpace
++ );
++
++
++/* Set the surface ration angle. */
++gceSTATUS
++gcoSURF_SetRotation(
++ IN gcoSURF Surface,
++ IN gceSURF_ROTATION Rotation
++ );
++
++gceSTATUS
++gcoSURF_IsValid(
++ IN gcoSURF Surface
++ );
++
++#if gcdENABLE_3D
++/* Verify and return the state of the tile status mechanism. */
++gceSTATUS
++gcoSURF_IsTileStatusSupported(
++ IN gcoSURF Surface
++ );
++
++/* Verify if surface has tile status enabled. */
++gceSTATUS
++gcoSURF_IsTileStatusEnabled(
++ IN gcoSURF Surface
++ );
++
++/* Verify if surface is compressed. */
++gceSTATUS
++gcoSURF_IsCompressed(
++ IN gcoSURF Surface
++ );
++
++/* Enable tile status for the specified surface on zero slot. */
++gceSTATUS
++gcoSURF_EnableTileStatus(
++ IN gcoSURF Surface
++ );
++
++/* Enable tile status for the specified surface on specified slot. */
++gceSTATUS
++gcoSURF_EnableTileStatusEx(
++ IN gcoSURF Surface,
++ IN gctUINT RtIndex
++ );
++
++/* Disable tile status for the specified surface. */
++gceSTATUS
++gcoSURF_DisableTileStatus(
++ IN gcoSURF Surface,
++ IN gctBOOL Decompress
++ );
++
++/* Flush tile status cache for the specified surface. */
++gceSTATUS
++gcoSURF_FlushTileStatus(
++ IN gcoSURF Surface,
++ IN gctBOOL Decompress
++ );
++#endif /* gcdENABLE_3D */
++
++/* Get surface size. */
++gceSTATUS
++gcoSURF_GetSize(
++ IN gcoSURF Surface,
++ OUT gctUINT * Width,
++ OUT gctUINT * Height,
++ OUT gctUINT * Depth
++ );
++
++/* Get surface aligned sizes. */
++gceSTATUS
++gcoSURF_GetAlignedSize(
++ IN gcoSURF Surface,
++ OUT gctUINT * Width,
++ OUT gctUINT * Height,
++ OUT gctINT * Stride
++ );
++
++/* Get alignments. */
++gceSTATUS
++gcoSURF_GetAlignment(
++ IN gceSURF_TYPE Type,
++ IN gceSURF_FORMAT Format,
++ OUT gctUINT * AddressAlignment,
++ OUT gctUINT * XAlignment,
++ OUT gctUINT * YAlignment
++ );
++
++gceSTATUS
++gcoSURF_AlignResolveRect(
++ IN gcoSURF Surf,
++ IN gcsPOINT_PTR RectOrigin,
++ IN gcsPOINT_PTR RectSize,
++ OUT gcsPOINT_PTR AlignedOrigin,
++ OUT gcsPOINT_PTR AlignedSize
++ );
++
++/* Get surface type and format. */
++gceSTATUS
++gcoSURF_GetFormat(
++ IN gcoSURF Surface,
++ OUT OPTIONAL gceSURF_TYPE * Type,
++ OUT OPTIONAL gceSURF_FORMAT * Format
++ );
++
++/* Get surface information */
++gceSTATUS
++gcoSURF_GetFormatInfo(
++ IN gcoSURF Surface,
++ OUT gcsSURF_FORMAT_INFO_PTR * formatInfo
++ );
++
++/* Get Surface pack format */
++gceSTATUS
++gcoSURF_GetPackedFormat(
++ IN gcoSURF Surface,
++ OUT gceSURF_FORMAT * Format
++ );
++
++/* Get surface tiling. */
++gceSTATUS
++gcoSURF_GetTiling(
++ IN gcoSURF Surface,
++ OUT gceTILING * Tiling
++ );
++
++/* Get flip bitmap offset bytes. */
++gceSTATUS
++gcoSURF_GetFlipBitmapOffset(
++ IN gcoSURF Surface,
++ OUT gctUINT_PTR FlipBitmapOffset
++ );
++
++/* Get bottom buffer offset bytes. */
++gceSTATUS
++gcoSURF_GetBottomBufferOffset(
++ IN gcoSURF Surface,
++ OUT gctUINT_PTR BottomBufferOffset
++ );
++
++/* Lock the surface. */
++gceSTATUS
++gcoSURF_Lock(
++ IN gcoSURF Surface,
++ IN OUT gctUINT32 * Address,
++ IN OUT gctPOINTER * Memory
++ );
++
++/* Unlock the surface. */
++gceSTATUS
++gcoSURF_Unlock(
++ IN gcoSURF Surface,
++ IN gctPOINTER Memory
++ );
++
++/*. Query surface flags.*/
++gceSTATUS
++gcoSURF_QueryFlags(
++ IN gcoSURF Surface,
++ IN gceSURF_FLAG Flag
++ );
++
++/* Return pixel format parameters; Info is required to be a pointer to an
++ * array of at least two items because some formats have up to two records
++ * of description. */
++gceSTATUS
++gcoSURF_QueryFormat(
++ IN gceSURF_FORMAT Format,
++ OUT gcsSURF_FORMAT_INFO_PTR * Info
++ );
++
++/* Compute the color pixel mask. */
++gceSTATUS
++gcoSURF_ComputeColorMask(
++ IN gcsSURF_FORMAT_INFO_PTR Format,
++ OUT gctUINT32_PTR ColorMask
++ );
++
++/* Flush the surface. */
++gceSTATUS
++gcoSURF_Flush(
++ IN gcoSURF Surface
++ );
++
++/* Fill surface from it's tile status buffer. */
++gceSTATUS
++gcoSURF_FillFromTile(
++ IN gcoSURF Surface
++ );
++
++/* Fill surface with a value. */
++gceSTATUS
++gcoSURF_Fill(
++ IN gcoSURF Surface,
++ IN gcsPOINT_PTR Origin,
++ IN gcsSIZE_PTR Size,
++ IN gctUINT32 Value,
++ IN gctUINT32 Mask
++ );
++
++/* Alpha blend two surfaces together. */
++gceSTATUS
++gcoSURF_Blend(
++ IN gcoSURF SrcSurface,
++ IN gcoSURF DestSurface,
++ IN gcsPOINT_PTR SrcOrig,
++ IN gcsPOINT_PTR DestOrigin,
++ IN gcsSIZE_PTR Size,
++ IN gceSURF_BLEND_MODE Mode
++ );
++
++/* Create a new gcoSURF wrapper object. */
++gceSTATUS
++gcoSURF_ConstructWrapper(
++ IN gcoHAL Hal,
++ OUT gcoSURF * Surface
++ );
++
++/* Set surface flags.*/
++gceSTATUS
++gcoSURF_SetFlags(
++ IN gcoSURF Surface,
++ IN gceSURF_FLAG Flag,
++ IN gctBOOL Value
++ );
++
++/* Set the underlying buffer for the surface wrapper. */
++gceSTATUS
++gcoSURF_SetBuffer(
++ IN gcoSURF Surface,
++ IN gceSURF_TYPE Type,
++ IN gceSURF_FORMAT Format,
++ IN gctUINT Stride,
++ IN gctPOINTER Logical,
++ IN gctUINT32 Physical
++ );
++
++/* Set the underlying video buffer for the surface wrapper. */
++gceSTATUS
++gcoSURF_SetVideoBuffer(
++ IN gcoSURF Surface,
++ IN gceSURF_TYPE Type,
++ IN gceSURF_FORMAT Format,
++ IN gctUINT Width,
++ IN gctUINT Height,
++ IN gctUINT Stride,
++ IN gctPOINTER *LogicalPlane1,
++ IN gctUINT32 *PhysicalPlane1
++ );
++
++/* Set the size of the surface in pixels and map the underlying buffer. */
++gceSTATUS
++gcoSURF_SetWindow(
++ IN gcoSURF Surface,
++ IN gctUINT X,
++ IN gctUINT Y,
++ IN gctUINT Width,
++ IN gctUINT Height
++ );
++
++/* Set width/height alignment of the surface directly and calculate stride/size. This is only for dri backend now. Please be careful before use. */
++gceSTATUS
++gcoSURF_SetAlignment(
++ IN gcoSURF Surface,
++ IN gctUINT Width,
++ IN gctUINT Height
++ );
++
++/* Increase reference count of the surface. */
++gceSTATUS
++gcoSURF_ReferenceSurface(
++ IN gcoSURF Surface
++ );
++
++/* Get surface reference count. */
++gceSTATUS
++gcoSURF_QueryReferenceCount(
++ IN gcoSURF Surface,
++ OUT gctINT32 * ReferenceCount
++ );
++
++/* Set surface orientation. */
++gceSTATUS
++gcoSURF_SetOrientation(
++ IN gcoSURF Surface,
++ IN gceORIENTATION Orientation
++ );
++
++/* Query surface orientation. */
++gceSTATUS
++gcoSURF_QueryOrientation(
++ IN gcoSURF Surface,
++ OUT gceORIENTATION * Orientation
++ );
++
++gceSTATUS
++gcoSURF_SetOffset(
++ IN gcoSURF Surface,
++ IN gctSIZE_T Offset
++ );
++
++gceSTATUS
++gcoSURF_NODE_Cache(
++ IN gcsSURF_NODE_PTR Node,
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Bytes,
++ IN gceCACHEOPERATION Operation
++ );
++
++/* Lock and unlock surface node */
++gceSTATUS
++gcoSURF_LockNode(
++ IN gcsSURF_NODE_PTR Node,
++ OUT gctUINT32 * Address,
++ OUT gctPOINTER * Memory
++ );
++
++gceSTATUS
++gcoSURF_UnLockNode(
++ IN gcsSURF_NODE_PTR Node,
++ IN gceSURF_TYPE Type
++ );
++
++/* Perform CPU cache operation on surface node */
++gceSTATUS
++gcoSURF_NODE_CPUCacheOperation(
++ IN gcsSURF_NODE_PTR Node,
++ IN gceSURF_TYPE Type,
++ IN gctSIZE_T Offset,
++ IN gctSIZE_T Length,
++ IN gceCACHEOPERATION Operation
++ );
++
++/* Perform CPU cache operation on surface */
++gceSTATUS
++gcoSURF_CPUCacheOperation(
++ IN gcoSURF Surface,
++ IN gceCACHEOPERATION Operation
++ );
++
++
++gceSTATUS
++gcoSURF_Swap(
++ IN gcoSURF Surface1,
++ IN gcoSURF Surface2
++ );
++
++gceSTATUS
++gcoSURF_ResetSurWH(
++ IN gcoSURF Surface,
++ IN gctUINT oriw,
++ IN gctUINT orih,
++ IN gctUINT alignw,
++ IN gctUINT alignh,
++ IN gceSURF_FORMAT fmt
++);
++
++/* Update surface timestamp. */
++gceSTATUS
++gcoSURF_UpdateTimeStamp(
++ IN gcoSURF Surface
++ );
++
++/* Query surface current timestamp. */
++gceSTATUS
++gcoSURF_QueryTimeStamp(
++ IN gcoSURF Surface,
++ OUT gctUINT64 * TimeStamp
++ );
++
++/*
++ * Allocate shared buffer for this surface, so that
++ * surface states can be shared across processes.
++ */
++gceSTATUS
++gcoSURF_AllocShBuffer(
++ IN gcoSURF Surface,
++ OUT gctSHBUF * ShBuf
++ );
++
++/* Bind shared buffer to this surface */
++gceSTATUS
++gcoSURF_BindShBuffer(
++ IN gcoSURF Surface,
++ IN gctSHBUF ShBuf
++ );
++
++/* Push surface shared states to shared buffer. */
++gceSTATUS
++gcoSURF_PushSharedInfo(
++ IN gcoSURF Surface
++ );
++
++/* Pop shared states from shared buffer. */
++gceSTATUS
++gcoSURF_PopSharedInfo(
++ IN gcoSURF Surface
++ );
++
++#if (gcdENABLE_3D || gcdENABLE_VG)
++/* Copy surface. */
++gceSTATUS
++gcoSURF_Copy(
++ IN gcoSURF Surface,
++ IN gcoSURF Source
++ );
++
++/* Set number of samples for a gcoSURF object. */
++gceSTATUS
++gcoSURF_SetSamples(
++ IN gcoSURF Surface,
++ IN gctUINT Samples
++ );
++
++/* Get the number of samples per pixel. */
++gceSTATUS
++gcoSURF_GetSamples(
++ IN gcoSURF Surface,
++ OUT gctUINT_PTR Samples
++ );
++#endif
++
++/******************************************************************************\
++********************************* gcoDUMP Object ********************************
++\******************************************************************************/
++
++/* Construct a new gcoDUMP object. */
++gceSTATUS
++gcoDUMP_Construct(
++ IN gcoOS Os,
++ IN gcoHAL Hal,
++ OUT gcoDUMP * Dump
++ );
++
++/* Destroy a gcoDUMP object. */
++gceSTATUS
++gcoDUMP_Destroy(
++ IN gcoDUMP Dump
++ );
++
++/* Enable/disable dumping. */
++gceSTATUS
++gcoDUMP_Control(
++ IN gcoDUMP Dump,
++ IN gctSTRING FileName
++ );
++
++gceSTATUS
++gcoDUMP_IsEnabled(
++ IN gcoDUMP Dump,
++ OUT gctBOOL * Enabled
++ );
++
++/* Add surface. */
++gceSTATUS
++gcoDUMP_AddSurface(
++ IN gcoDUMP Dump,
++ IN gctINT32 Width,
++ IN gctINT32 Height,
++ IN gceSURF_FORMAT PixelFormat,
++ IN gctUINT32 Address,
++ IN gctSIZE_T ByteCount
++ );
++
++/* Mark the beginning of a frame. */
++gceSTATUS
++gcoDUMP_FrameBegin(
++ IN gcoDUMP Dump
++ );
++
++/* Mark the end of a frame. */
++gceSTATUS
++gcoDUMP_FrameEnd(
++ IN gcoDUMP Dump
++ );
++
++/* Dump data. */
++gceSTATUS
++gcoDUMP_DumpData(
++ IN gcoDUMP Dump,
++ IN gceDUMP_TAG Type,
++ IN gctUINT32 Address,
++ IN gctSIZE_T ByteCount,
++ IN gctCONST_POINTER Data
++ );
++
++/* Delete an address. */
++gceSTATUS
++gcoDUMP_Delete(
++ IN gcoDUMP Dump,
++ IN gctUINT32 Address
++ );
++
++/* Enable dump or not. */
++gceSTATUS
++gcoDUMP_SetDumpFlag(
++ IN gctBOOL DumpState
++ );
++
++/******************************************************************************\
++******************************* gcsRECT Structure ******************************
++\******************************************************************************/
++
++/* Initialize rectangle structure. */
++gceSTATUS
++gcsRECT_Set(
++ OUT gcsRECT_PTR Rect,
++ IN gctINT32 Left,
++ IN gctINT32 Top,
++ IN gctINT32 Right,
++ IN gctINT32 Bottom
++ );
++
++/* Return the width of the rectangle. */
++gceSTATUS
++gcsRECT_Width(
++ IN gcsRECT_PTR Rect,
++ OUT gctINT32 * Width
++ );
++
++/* Return the height of the rectangle. */
++gceSTATUS
++gcsRECT_Height(
++ IN gcsRECT_PTR Rect,
++ OUT gctINT32 * Height
++ );
++
++/* Ensure that top left corner is to the left and above the right bottom. */
++gceSTATUS
++gcsRECT_Normalize(
++ IN OUT gcsRECT_PTR Rect
++ );
++
++/* Compare two rectangles. */
++gceSTATUS
++gcsRECT_IsEqual(
++ IN gcsRECT_PTR Rect1,
++ IN gcsRECT_PTR Rect2,
++ OUT gctBOOL * Equal
++ );
++
++/* Compare the sizes of two rectangles. */
++gceSTATUS
++gcsRECT_IsOfEqualSize(
++ IN gcsRECT_PTR Rect1,
++ IN gcsRECT_PTR Rect2,
++ OUT gctBOOL * EqualSize
++ );
++
++gceSTATUS
++gcsRECT_RelativeRotation(
++ IN gceSURF_ROTATION Orientation,
++ IN OUT gceSURF_ROTATION *Relation);
++
++gceSTATUS
++
++gcsRECT_Rotate(
++
++ IN OUT gcsRECT_PTR Rect,
++
++ IN gceSURF_ROTATION Rotation,
++
++ IN gceSURF_ROTATION toRotation,
++
++ IN gctINT32 SurfaceWidth,
++
++ IN gctINT32 SurfaceHeight
++
++ );
++
++/******************************************************************************\
++**************************** gcsBOUNDARY Structure *****************************
++\******************************************************************************/
++
++typedef struct _gcsBOUNDARY
++{
++ gctINT x;
++ gctINT y;
++ gctINT width;
++ gctINT height;
++}
++gcsBOUNDARY;
++
++/******************************************************************************\
++********************************* gcoHEAP Object ********************************
++\******************************************************************************/
++
++typedef struct _gcoHEAP * gcoHEAP;
++
++/* Construct a new gcoHEAP object. */
++gceSTATUS
++gcoHEAP_Construct(
++ IN gcoOS Os,
++ IN gctSIZE_T AllocationSize,
++ OUT gcoHEAP * Heap
++ );
++
++/* Destroy an gcoHEAP object. */
++gceSTATUS
++gcoHEAP_Destroy(
++ IN gcoHEAP Heap
++ );
++
++/* Allocate memory. */
++gceSTATUS
++gcoHEAP_Allocate(
++ IN gcoHEAP Heap,
++ IN gctSIZE_T Bytes,
++ OUT gctPOINTER * Node
++ );
++
++gceSTATUS
++gcoHEAP_GetMemorySize(
++ IN gcoHEAP Heap,
++ IN gctPOINTER Memory,
++ OUT gctSIZE_T_PTR MemorySize
++ );
++
++/* Free memory. */
++gceSTATUS
++gcoHEAP_Free(
++ IN gcoHEAP Heap,
++ IN gctPOINTER Node
++ );
++
++#if (VIVANTE_PROFILER || gcdDEBUG)
++/* Profile the heap. */
++gceSTATUS
++gcoHEAP_ProfileStart(
++ IN gcoHEAP Heap
++ );
++
++gceSTATUS
++gcoHEAP_ProfileEnd(
++ IN gcoHEAP Heap,
++ IN gctCONST_STRING Title
++ );
++#endif
++
++
++/******************************************************************************\
++******************************* Debugging Macros *******************************
++\******************************************************************************/
++
++void
++gcoOS_SetDebugLevel(
++ IN gctUINT32 Level
++ );
++
++void
++gcoOS_GetDebugLevel(
++ OUT gctUINT32_PTR DebugLevel
++ );
++
++void
++gcoOS_SetDebugZone(
++ IN gctUINT32 Zone
++ );
++
++void
++gcoOS_GetDebugZone(
++ IN gctUINT32 Zone,
++ OUT gctUINT32_PTR DebugZone
++ );
++
++void
++gcoOS_SetDebugLevelZone(
++ IN gctUINT32 Level,
++ IN gctUINT32 Zone
++ );
++
++void
++gcoOS_SetDebugZones(
++ IN gctUINT32 Zones,
++ IN gctBOOL Enable
++ );
++
++void
++gcoOS_SetDebugFile(
++ IN gctCONST_STRING FileName
++ );
++
++gctFILE
++gcoOS_ReplaceDebugFile(
++ IN gctFILE fp
++ );
++
++void
++gcoOS_SysTraceBegin(
++ IN gctCONST_STRING FuncName
++ );
++
++void
++gcoOS_SysTraceEnd(
++ IN void);
++
++/*******************************************************************************
++**
++** gcmFATAL
++**
++** Print a message to the debugger and execute a break point.
++**
++** ARGUMENTS:
++**
++** message Message.
++** ... Optional arguments.
++*/
++
++void
++gckOS_DebugFatal(
++ IN gctCONST_STRING Message,
++ ...
++ );
++
++void
++gcoOS_DebugFatal(
++ IN gctCONST_STRING Message,
++ ...
++ );
++
++#if gcmIS_DEBUG(gcdDEBUG_FATAL)
++# define gcmFATAL gcoOS_DebugFatal
++# define gcmkFATAL gckOS_DebugFatal
++#elif gcdHAS_ELLIPSIS
++# define gcmFATAL(...)
++# define gcmkFATAL(...)
++#else
++ gcmINLINE static void
++ __dummy_fatal(
++ IN gctCONST_STRING Message,
++ ...
++ )
++ {
++ }
++# define gcmFATAL __dummy_fatal
++# define gcmkFATAL __dummy_fatal
++#endif
++
++#define gcmENUM2TEXT(e) case e: return #e
++
++/*******************************************************************************
++**
++** gcmTRACE
++**
++** Print a message to the debugfer if the correct level has been set. In
++** retail mode this macro does nothing.
++**
++** ARGUMENTS:
++**
++** level Level of message.
++** message Message.
++** ... Optional arguments.
++*/
++#define gcvLEVEL_NONE -1
++#define gcvLEVEL_ERROR 0
++#define gcvLEVEL_WARNING 1
++#define gcvLEVEL_INFO 2
++#define gcvLEVEL_VERBOSE 3
++
++void
++gckOS_DebugTrace(
++ IN gctUINT32 Level,
++ IN gctCONST_STRING Message,
++ ...
++ );
++
++void
++gckOS_DebugTraceN(
++ IN gctUINT32 Level,
++ IN gctUINT ArgumentSize,
++ IN gctCONST_STRING Message,
++ ...
++ );
++
++void
++gcoOS_DebugTrace(
++ IN gctUINT32 Level,
++ IN gctCONST_STRING Message,
++ ...
++ );
++
++#if gcmIS_DEBUG(gcdDEBUG_TRACE)
++# define gcmTRACE gcoOS_DebugTrace
++# define gcmkTRACE gckOS_DebugTrace
++# define gcmkTRACE_N gckOS_DebugTraceN
++#elif gcdHAS_ELLIPSIS
++# define gcmTRACE(...)
++# define gcmkTRACE(...)
++# define gcmkTRACE_N(...)
++#else
++ gcmINLINE static void
++ __dummy_trace(
++ IN gctUINT32 Level,
++ IN gctCONST_STRING Message,
++ ...
++ )
++ {
++ }
++
++ gcmINLINE static void
++ __dummy_trace_n(
++ IN gctUINT32 Level,
++ IN gctUINT ArgumentSize,
++ IN gctCONST_STRING Message,
++ ...
++ )
++ {
++ }
++
++# define gcmTRACE __dummy_trace
++# define gcmkTRACE __dummy_trace
++# define gcmkTRACE_N __dummy_trace_n
++#endif
++
++/* Zones common for kernel and user. */
++#define gcvZONE_OS (1 << 0)
++#define gcvZONE_HARDWARE (1 << 1)
++#define gcvZONE_HEAP (1 << 2)
++#define gcvZONE_SIGNAL (1 << 27)
++
++/* Kernel zones. */
++#define gcvZONE_KERNEL (1 << 3)
++#define gcvZONE_VIDMEM (1 << 4)
++#define gcvZONE_COMMAND (1 << 5)
++#define gcvZONE_DRIVER (1 << 6)
++#define gcvZONE_CMODEL (1 << 7)
++#define gcvZONE_MMU (1 << 8)
++#define gcvZONE_EVENT (1 << 9)
++#define gcvZONE_DEVICE (1 << 10)
++#define gcvZONE_DATABASE (1 << 11)
++#define gcvZONE_INTERRUPT (1 << 12)
++#define gcvZONE_POWER (1 << 13)
++
++/* User zones. */
++#define gcvZONE_HAL (1 << 3)
++#define gcvZONE_BUFFER (1 << 4)
++#define gcvZONE_CONTEXT (1 << 5)
++#define gcvZONE_SURFACE (1 << 6)
++#define gcvZONE_INDEX (1 << 7)
++#define gcvZONE_STREAM (1 << 8)
++#define gcvZONE_TEXTURE (1 << 9)
++#define gcvZONE_2D (1 << 10)
++#define gcvZONE_3D (1 << 11)
++#define gcvZONE_COMPILER (1 << 12)
++#define gcvZONE_MEMORY (1 << 13)
++#define gcvZONE_STATE (1 << 14)
++#define gcvZONE_AUX (1 << 15)
++#define gcvZONE_VERTEX (1 << 16)
++#define gcvZONE_CL (1 << 17)
++#define gcvZONE_COMPOSITION (1 << 17)
++#define gcvZONE_VG (1 << 18)
++#define gcvZONE_IMAGE (1 << 19)
++#define gcvZONE_UTILITY (1 << 20)
++#define gcvZONE_PARAMETERS (1 << 21)
++#define gcvZONE_BUFOBJ (1 << 22)
++#define gcvZONE_SHADER (1 << 23)
++#define gcvZONE_STREAM_OUT (1 << 24)
++
++/* API definitions. */
++#define gcvZONE_API_HAL (1 << 28)
++#define gcvZONE_API_EGL (2 << 28)
++#define gcvZONE_API_ES11 (3 << 28)
++#define gcvZONE_API_ES20 (4 << 28)
++#define gcvZONE_API_VG11 (5 << 28)
++#define gcvZONE_API_GL (6 << 28)
++#define gcvZONE_API_DFB (7 << 28)
++#define gcvZONE_API_GDI ((gctUINT32)8 << 28)
++#define gcvZONE_API_D3D ((gctUINT32)9 << 28)
++#define gcvZONE_API_ES30 ((gctUINT32)10 << 28)
++
++
++#define gcmZONE_GET_API(zone) ((zone) >> 28)
++/*Set gcdZONE_MASE like 0x0 | gcvZONE_API_EGL
++will enable print EGL module debug info*/
++#define gcdZONE_MASK 0x0FFFFFFF
++
++/* Handy zones. */
++#define gcvZONE_NONE 0
++#define gcvZONE_ALL 0x0FFFFFFF
++
++/*Dump API depth set 1 for API, 2 for API and API behavior*/
++#define gcvDUMP_API_DEPTH 1
++
++/*******************************************************************************
++**
++** gcmTRACE_ZONE
++**
++** Print a message to the debugger if the correct level and zone has been
++** set. In retail mode this macro does nothing.
++**
++** ARGUMENTS:
++**
++** Level Level of message.
++** Zone Zone of message.
++** Message Message.
++** ... Optional arguments.
++*/
++
++void
++gckOS_DebugTraceZone(
++ IN gctUINT32 Level,
++ IN gctUINT32 Zone,
++ IN gctCONST_STRING Message,
++ ...
++ );
++
++void
++gckOS_DebugTraceZoneN(
++ IN gctUINT32 Level,
++ IN gctUINT32 Zone,
++ IN gctUINT ArgumentSize,
++ IN gctCONST_STRING Message,
++ ...
++ );
++
++void
++gcoOS_DebugTraceZone(
++ IN gctUINT32 Level,
++ IN gctUINT32 Zone,
++ IN gctCONST_STRING Message,
++ ...
++ );
++
++#if gcmIS_DEBUG(gcdDEBUG_TRACE)
++# define gcmTRACE_ZONE gcoOS_DebugTraceZone
++# define gcmkTRACE_ZONE gckOS_DebugTraceZone
++# define gcmkTRACE_ZONE_N gckOS_DebugTraceZoneN
++#elif gcdHAS_ELLIPSIS
++# define gcmTRACE_ZONE(...)
++# define gcmkTRACE_ZONE(...)
++# define gcmkTRACE_ZONE_N(...)
++#else
++ gcmINLINE static void
++ __dummy_trace_zone(
++ IN gctUINT32 Level,
++ IN gctUINT32 Zone,
++ IN gctCONST_STRING Message,
++ ...
++ )
++ {
++ }
++
++ gcmINLINE static void
++ __dummy_trace_zone_n(
++ IN gctUINT32 Level,
++ IN gctUINT32 Zone,
++ IN gctUINT ArgumentSize,
++ IN gctCONST_STRING Message,
++ ...
++ )
++ {
++ }
++
++# define gcmTRACE_ZONE __dummy_trace_zone
++# define gcmkTRACE_ZONE __dummy_trace_zone
++# define gcmkTRACE_ZONE_N __dummy_trace_zone_n
++#endif
++
++/*******************************************************************************
++**
++** gcmDEBUG_ONLY
++**
++** Execute a statement or function only in DEBUG mode.
++**
++** ARGUMENTS:
++**
++** f Statement or function to execute.
++*/
++#if gcmIS_DEBUG(gcdDEBUG_CODE)
++# define gcmDEBUG_ONLY(f) f
++#else
++# define gcmDEBUG_ONLY(f)
++#endif
++
++/*******************************************************************************
++**
++** gcmSTACK_PUSH
++** gcmSTACK_POP
++** gcmSTACK_DUMP
++**
++** Push or pop a function with entry arguments on the trace stack.
++**
++** ARGUMENTS:
++**
++** Function Name of function.
++** Line Line number.
++** Text Optional text.
++** ... Optional arguments for text.
++*/
++#if gcmIS_DEBUG(gcdDEBUG_STACK)
++ void gcoOS_StackPush(IN gctINT8_PTR Identity, IN gctCONST_STRING Function, IN gctINT Line, IN gctCONST_STRING Text, ...);
++ void gcoOS_StackPop(IN gctINT8_PTR Identity, IN gctCONST_STRING Function);
++ void gcoOS_StackDump(void);
++ void gcoOS_StackRemove(IN gctHANDLE Thread);
++
++# define gcmSTACK_PUSH gcoOS_StackPush
++# define gcmSTACK_POP gcoOS_StackPop
++# define gcmSTACK_DUMP gcoOS_StackDump
++# define gcmSTACK_REMOVE gcoOS_StackRemove
++#elif gcdHAS_ELLIPSIS
++# define gcmSTACK_PUSH(...) do { } while (0)
++# define gcmSTACK_POP(...) do { } while (0)
++# define gcmSTACK_DUMP() do { } while (0)
++# define gcmSTACK_REMOVE(...) do { } while (0)
++#else
++ gcmINLINE static void
++ __dummy_stack_push(
++ IN gctCONST_STRING Function,
++ IN gctINT Line,
++ IN gctCONST_STRING Text, ...
++ )
++ {
++ }
++# define gcmSTACK_PUSH __dummy_stack_push
++# define gcmSTACK_POP(a,b) do { } while (0)
++# define gcmSTACK_DUMP() do { } while (0)
++# define gcmSTACK_REMOVE(a) do { } while (0)
++#endif
++
++/******************************************************************************\
++******************************** Binary Trace **********************************
++\******************************************************************************/
++typedef struct _gcsBINARY_TRACE_MESSAGE * gcsBINARY_TRACE_MESSAGE_PTR;
++typedef struct _gcsBINARY_TRACE_MESSAGE
++{
++ gctUINT32 signature;
++ gctUINT32 pid;
++ gctUINT32 tid;
++ gctUINT32 line;
++ gctUINT32 numArguments;
++ gctUINT8 payload;
++}
++gcsBINARY_TRACE_MESSAGE;
++
++#define gcdBINARY_TRACE_MESSAGE_SIZE 240
++
++#if gcdBINARY_TRACE
++ void
++ gcoOS_BinaryTrace(
++ IN gctCONST_STRING Function,
++ IN gctINT Line,
++ IN gctCONST_STRING Text OPTIONAL,
++ ...
++ );
++
++ void
++ gckOS_BinaryTrace(
++ IN gctCONST_STRING Function,
++ IN gctINT Line,
++ IN gctCONST_STRING Text OPTIONAL,
++ ...
++ );
++
++# define gcmBINARY_TRACE gcoOS_BinaryTrace
++# define gcmkBINARY_TRACE gckOS_BinaryTrace
++#elif gcdHAS_ELLIPSIS
++# define gcmBINARY_TRACE(Function, Line, Text, ...)
++# define gcmkBINARY_TRACE(Function, Line, Text, ...)
++#else
++ gcmINLINE static void
++ __dummy_binary_trace(
++ IN gctCONST_STRING Function,
++ IN gctINT Line,
++ IN gctCONST_STRING Text,
++ )
++ {
++ }
++
++# define gcmBINARY_TRACE __dummy_binary_trace
++# define gcmkBINARY_TRACE __dummy_binary_trace
++#endif
++
++/******************************************************************************\
++******************************** Logging Macros ********************************
++\******************************************************************************/
++
++#define gcdHEADER_LEVEL gcvLEVEL_VERBOSE
++
++#ifndef gcdEMPTY_HEADER_FOOTER
++#define gcdEMPTY_HEADER_FOOTER 0
++#endif
++
++#if gcdENABLE_PROFILING
++void
++gcoOS_ProfileDB(
++ IN gctCONST_STRING Function,
++ IN OUT gctBOOL_PTR Initialized
++ );
++
++#define gcmHEADER() \
++ gctINT8 __user__ = 1; \
++ static gctBOOL __profile__initialized__ = gcvFALSE; \
++ gcmSTACK_PUSH(&__user__, __FUNCTION__, __LINE__, gcvNULL, gcvNULL); \
++ gcoOS_ProfileDB(__FUNCTION__, &__profile__initialized__)
++#define gcmHEADER_ARG(...) \
++ gctINT8 __user__ = 1; \
++ static gctBOOL __profile__initialized__ = gcvFALSE; \
++ gcmSTACK_PUSH(&__user__, __FUNCTION__, __LINE__, Text, __VA_ARGS__); \
++ gcoOS_ProfileDB(__FUNCTION__, &__profile__initialized__)
++#define gcmFOOTER() \
++ gcmSTACK_POP(&__user__, __FUNCTION__); \
++ gcoOS_ProfileDB(__FUNCTION__, gcvNULL)
++#define gcmFOOTER_NO() \
++ gcmSTACK_POP(&__user__, __FUNCTION__); \
++ gcoOS_ProfileDB(__FUNCTION__, gcvNULL)
++#define gcmFOOTER_ARG(...) \
++ gcmSTACK_POP(&__user__, __FUNCTION__); \
++ gcoOS_ProfileDB(__FUNCTION__, gcvNULL)
++#define gcmFOOTER_KILL() \
++ gcmSTACK_POP(&__user__, __FUNCTION__); \
++ gcoOS_ProfileDB(gcvNULL, gcvNULL)
++
++#else /* gcdENABLE_PROFILING */
++
++#ifdef gcdFSL_REL_BUILD
++#define gcmHEADER()
++#elif gcdEMPTY_HEADER_FOOTER
++# define gcmHEADER()
++#elif gcdHAS_ELLIPSIS
++#define gcmHEADER() \
++ gctINT8 __user__ = 1; \
++ gctINT8_PTR __user_ptr__ = &__user__; \
++ gcmSTACK_PUSH(__user_ptr__, __FUNCTION__, __LINE__, gcvNULL, gcvNULL); \
++ gcmBINARY_TRACE(__FUNCTION__, __LINE__, gcvNULL, gcvNULL); \
++ gcmTRACE_ZONE(gcdHEADER_LEVEL, _GC_OBJ_ZONE, \
++ "++%s(%d)", __FUNCTION__, __LINE__)
++#else
++ gcmINLINE static void
++ __dummy_header(void)
++ {
++ }
++# define gcmHEADER __dummy_header
++#endif
++
++#ifdef gcdFSL_REL_BUILD
++#define gcmHEADER_ARG(Text, ...)
++#elif gcdHAS_ELLIPSIS
++#if gcdEMPTY_HEADER_FOOTER
++# define gcmHEADER_ARG(Text, ...)
++#else
++# define gcmHEADER_ARG(Text, ...) \
++ gctINT8 __user__ = 1; \
++ gctINT8_PTR __user_ptr__ = &__user__; \
++ gcmSTACK_PUSH(__user_ptr__, __FUNCTION__, __LINE__, Text, __VA_ARGS__); \
++ gcmBINARY_TRACE(__FUNCTION__, __LINE__, Text, __VA_ARGS__); \
++ gcmTRACE_ZONE(gcdHEADER_LEVEL, _GC_OBJ_ZONE, \
++ "++%s(%d): " Text, __FUNCTION__, __LINE__, __VA_ARGS__)
++#endif
++#else
++ gcmINLINE static void
++ __dummy_header_arg(
++ IN gctCONST_STRING Text,
++ ...
++ )
++ {
++ }
++# define gcmHEADER_ARG __dummy_header_arg
++#endif
++
++#ifdef gcdFSL_REL_BUILD
++# define gcmFOOTER()
++#elif gcdEMPTY_HEADER_FOOTER
++# define gcmFOOTER()
++#elif gcdHAS_ELLIPSIS
++# define gcmFOOTER() \
++ gcmSTACK_POP(__user_ptr__, __FUNCTION__); \
++ gcmBINARY_TRACE(__FUNCTION__, __LINE__, gcvNULL, gcvNULL); \
++ gcmTRACE_ZONE(gcdHEADER_LEVEL, _GC_OBJ_ZONE, \
++ "--%s(%d): status=%d(%s)", \
++ __FUNCTION__, __LINE__, \
++ status, gcoOS_DebugStatus2Name(status)); \
++ *__user_ptr__ -= 1
++#else
++ gcmINLINE static void
++ __dummy_footer(void)
++ {
++ }
++# define gcmFOOTER __dummy_footer
++#endif
++
++#ifdef gcdFSL_REL_BUILD
++#define gcmFOOTER_NO()
++#elif gcdEMPTY_HEADER_FOOTER
++# define gcmFOOTER_NO()
++#elif gcdHAS_ELLIPSIS
++#define gcmFOOTER_NO() \
++ gcmSTACK_POP(__user_ptr__, __FUNCTION__); \
++ gcmBINARY_TRACE(__FUNCTION__, __LINE__, gcvNULL, gcvNULL); \
++ gcmTRACE_ZONE(gcdHEADER_LEVEL, _GC_OBJ_ZONE, \
++ "--%s(%d)", __FUNCTION__, __LINE__); \
++ *__user_ptr__ -= 1
++#else
++ gcmINLINE static void
++ __dummy_footer_no(void)
++ {
++ }
++# define gcmFOOTER_NO __dummy_footer_no
++#endif
++
++#ifdef gcdFSL_REL_BUILD
++#define gcmFOOTER_KILL()
++#elif gcdEMPTY_HEADER_FOOTER
++# define gcmFOOTER_KILL()
++#elif gcdHAS_ELLIPSIS
++#define gcmFOOTER_KILL() \
++ gcmSTACK_POP(__user_ptr__, __FUNCTION__); \
++ gcmBINARY_TRACE(__FUNCTION__, __LINE__, gcvNULL, gcvNULL); \
++ gcmTRACE_ZONE(gcdHEADER_LEVEL, _GC_OBJ_ZONE, \
++ "--%s(%d)", __FUNCTION__, __LINE__); \
++ *__user_ptr__ -= 1
++#else
++ gcmINLINE static void
++ __dummy_footer_kill(void)
++ {
++ }
++# define gcmFOOTER_KILL __dummy_footer_kill
++#endif
++
++#ifdef gcdFSL_REL_BUILD
++# define gcmFOOTER_ARG(Text, ...)
++#elif gcdHAS_ELLIPSIS
++#if gcdEMPTY_HEADER_FOOTER
++# define gcmFOOTER_ARG(Text, ...)
++#else
++# define gcmFOOTER_ARG(Text, ...) \
++ gcmSTACK_POP(__user_ptr__, __FUNCTION__); \
++ gcmBINARY_TRACE(__FUNCTION__, __LINE__, Text, __VA_ARGS__); \
++ gcmTRACE_ZONE(gcdHEADER_LEVEL, _GC_OBJ_ZONE, \
++ "--%s(%d): " Text, __FUNCTION__, __LINE__, __VA_ARGS__); \
++ *__user_ptr__ -= 1
++#endif
++#else
++ gcmINLINE static void
++ __dummy_footer_arg(
++ IN gctCONST_STRING Text,
++ ...
++ )
++ {
++ }
++# define gcmFOOTER_ARG __dummy_footer_arg
++#endif
++
++#endif /* gcdENABLE_PROFILING */
++
++#ifdef gcdFSL_REL_BUILD
++#define gcmkHEADER()
++#elif gcdHAS_ELLIPSIS
++#define gcmkHEADER() \
++ gctINT8 __kernel__ = 1; \
++ gctINT8_PTR __kernel_ptr__ = &__kernel__; \
++ gcmkBINARY_TRACE(__FUNCTION__, __LINE__, gcvNULL, gcvNULL); \
++ gcmkTRACE_ZONE(gcdHEADER_LEVEL, _GC_OBJ_ZONE, \
++ "++%s(%d)", __FUNCTION__, __LINE__)
++#else
++ gcmINLINE static void
++ __dummy_kheader(void)
++ {
++ }
++# define gcmkHEADER __dummy_kheader
++#endif
++
++#ifdef gcdFSL_REL_BUILD
++# define gcmkHEADER_ARG(Text, ...)
++#elif gcdHAS_ELLIPSIS
++# define gcmkHEADER_ARG(Text, ...) \
++ gctINT8 __kernel__ = 1; \
++ gctINT8_PTR __kernel_ptr__ = &__kernel__; \
++ gcmkBINARY_TRACE(__FUNCTION__, __LINE__, Text, __VA_ARGS__); \
++ gcmkTRACE_ZONE(gcdHEADER_LEVEL, _GC_OBJ_ZONE, \
++ "++%s(%d): " Text, __FUNCTION__, __LINE__, __VA_ARGS__)
++#else
++ gcmINLINE static void
++ __dummy_kheader_arg(
++ IN gctCONST_STRING Text,
++ ...
++ )
++ {
++ }
++# define gcmkHEADER_ARG __dummy_kheader_arg
++#endif
++
++#ifdef gcdFSL_REL_BUILD
++#define gcmkFOOTER()
++#elif gcdHAS_ELLIPSIS
++#define gcmkFOOTER() \
++ gcmkBINARY_TRACE(__FUNCTION__, __LINE__, gcvNULL, status); \
++ gcmkTRACE_ZONE(gcdHEADER_LEVEL, _GC_OBJ_ZONE, \
++ "--%s(%d): status=%d(%s)", \
++ __FUNCTION__, __LINE__, status, gckOS_DebugStatus2Name(status)); \
++ *__kernel_ptr__ -= 1
++#else
++ gcmINLINE static void
++ __dummy_kfooter(void)
++ {
++ }
++# define gcmkFOOTER __dummy_kfooter
++#endif
++
++#ifdef gcdFSL_REL_BUILD
++#define gcmkFOOTER_NO()
++#elif gcdHAS_ELLIPSIS
++#define gcmkFOOTER_NO() \
++ gcmkBINARY_TRACE(__FUNCTION__, __LINE__, gcvNULL, gcvNULL); \
++ gcmkTRACE_ZONE(gcdHEADER_LEVEL, _GC_OBJ_ZONE, \
++ "--%s(%d)", __FUNCTION__, __LINE__); \
++ *__kernel_ptr__ -= 1
++#else
++ gcmINLINE static void
++ __dummy_kfooter_no(void)
++ {
++ }
++# define gcmkFOOTER_NO __dummy_kfooter_no
++#endif
++
++#ifdef gcdFSL_REL_BUILD
++# define gcmkFOOTER_ARG(Text, ...)
++#elif gcdHAS_ELLIPSIS
++# define gcmkFOOTER_ARG(Text, ...) \
++ gcmkBINARY_TRACE(__FUNCTION__, __LINE__, Text, __VA_ARGS__); \
++ gcmkTRACE_ZONE(gcdHEADER_LEVEL, _GC_OBJ_ZONE, \
++ "--%s(%d): " Text, \
++ __FUNCTION__, __LINE__, __VA_ARGS__); \
++ *__kernel_ptr__ -= 1
++#else
++ gcmINLINE static void
++ __dummy_kfooter_arg(
++ IN gctCONST_STRING Text,
++ ...
++ )
++ {
++ }
++# define gcmkFOOTER_ARG __dummy_kfooter_arg
++#endif
++
++#define gcmOPT_VALUE(ptr) (((ptr) == gcvNULL) ? 0 : *(ptr))
++#define gcmOPT_VALUE_INDEX(ptr, index) (((ptr) == gcvNULL) ? 0 : ptr[index])
++#define gcmOPT_POINTER(ptr) (((ptr) == gcvNULL) ? gcvNULL : *(ptr))
++#define gcmOPT_STRING(ptr) (((ptr) == gcvNULL) ? "(nil)" : (ptr))
++
++void
++gckOS_Print(
++ IN gctCONST_STRING Message,
++ ...
++ );
++
++void
++gckOS_PrintN(
++ IN gctUINT ArgumentSize,
++ IN gctCONST_STRING Message,
++ ...
++ );
++
++void
++gckOS_CopyPrint(
++ IN gctCONST_STRING Message,
++ ...
++ );
++
++void
++gcoOS_Print(
++ IN gctCONST_STRING Message,
++ ...
++ );
++
++#define gcmPRINT gcoOS_Print
++#define gcmkPRINT gckOS_Print
++#define gcmkPRINT_N gckOS_PrintN
++
++#if gcdPRINT_VERSION
++# define gcmPRINT_VERSION() do { \
++ _gcmPRINT_VERSION(gcm); \
++ gcmSTACK_DUMP(); \
++ } while (0)
++# define gcmkPRINT_VERSION() _gcmPRINT_VERSION(gcmk)
++# define _gcmPRINT_VERSION(prefix) \
++ prefix##TRACE(gcvLEVEL_ERROR, \
++ "Vivante HAL version %d.%d.%d build %d %s %s", \
++ gcvVERSION_MAJOR, gcvVERSION_MINOR, gcvVERSION_PATCH, \
++ gcvVERSION_BUILD, gcvVERSION_DATE, gcvVERSION_TIME )
++#else
++# define gcmPRINT_VERSION() do { gcmSTACK_DUMP(); } while (gcvFALSE)
++# define gcmkPRINT_VERSION() do { } while (gcvFALSE)
++#endif
++
++typedef enum _gceDUMP_BUFFER
++{
++ gceDUMP_BUFFER_CONTEXT,
++ gceDUMP_BUFFER_USER,
++ gceDUMP_BUFFER_KERNEL,
++ gceDUMP_BUFFER_LINK,
++ gceDUMP_BUFFER_WAITLINK,
++ gceDUMP_BUFFER_FROM_USER,
++}
++gceDUMP_BUFFER;
++
++void
++gckOS_DumpBuffer(
++ IN gckOS Os,
++ IN gctPOINTER Buffer,
++ IN gctUINT Size,
++ IN gceDUMP_BUFFER Type,
++ IN gctBOOL CopyMessage
++ );
++
++#define gcmkDUMPBUFFER gckOS_DumpBuffer
++
++#if gcdDUMP_COMMAND
++# define gcmkDUMPCOMMAND(Os, Buffer, Size, Type, CopyMessage) \
++ gcmkDUMPBUFFER(Os, Buffer, Size, Type, CopyMessage)
++#else
++# define gcmkDUMPCOMMAND(Os, Buffer, Size, Type, CopyMessage)
++#endif
++
++#if gcmIS_DEBUG(gcdDEBUG_CODE)
++
++void
++gckOS_DebugFlush(
++ gctCONST_STRING CallerName,
++ gctUINT LineNumber,
++ gctUINT32 DmaAddress
++ );
++
++# define gcmkDEBUGFLUSH(DmaAddress) \
++ gckOS_DebugFlush(__FUNCTION__, __LINE__, DmaAddress)
++#else
++# define gcmkDEBUGFLUSH(DmaAddress)
++#endif
++
++/*******************************************************************************
++**
++** gcmDUMP_FRAMERATE
++**
++** Print average frame rate
++**
++*/
++#if gcdDUMP_FRAMERATE
++ gceSTATUS
++ gcfDumpFrameRate(
++ void
++ );
++# define gcmDUMP_FRAMERATE gcfDumpFrameRate
++#elif gcdHAS_ELLIPSIS
++# define gcmDUMP_FRAMERATE(...)
++#else
++ gcmINLINE static void
++ __dummy_dump_frame_rate(
++ void
++ )
++ {
++ }
++# define gcmDUMP_FRAMERATE __dummy_dump_frame_rate
++#endif
++
++
++/*******************************************************************************
++**
++** gcmDUMP
++**
++** Print a dump message.
++**
++** ARGUMENTS:
++**
++** gctSTRING Message.
++**
++** ... Optional arguments.
++*/
++#if gcdDUMP
++ gceSTATUS
++ gcfDump(
++ IN gcoOS Os,
++ IN gctCONST_STRING String,
++ ...
++ );
++# define gcmDUMP gcfDump
++#elif gcdHAS_ELLIPSIS
++# define gcmDUMP(...)
++#else
++ gcmINLINE static void
++ __dummy_dump(
++ IN gcoOS Os,
++ IN gctCONST_STRING Message,
++ ...
++ )
++ {
++ }
++# define gcmDUMP __dummy_dump
++#endif
++
++/*******************************************************************************
++**
++** gcmDUMP_DATA
++**
++** Add data to the dump.
++**
++** ARGUMENTS:
++**
++** gctSTRING Tag
++** Tag for dump.
++**
++** gctPOINTER Logical
++** Logical address of buffer.
++**
++** gctSIZE_T Bytes
++** Number of bytes.
++*/
++
++#if gcdDUMP || gcdDUMP_COMMAND
++ gceSTATUS
++ gcfDumpData(
++ IN gcoOS Os,
++ IN gctSTRING Tag,
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Bytes
++ );
++# define gcmDUMP_DATA gcfDumpData
++#elif gcdHAS_ELLIPSIS
++# define gcmDUMP_DATA(...)
++#else
++ gcmINLINE static void
++ __dummy_dump_data(
++ IN gcoOS Os,
++ IN gctSTRING Tag,
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Bytes
++ )
++ {
++ }
++# define gcmDUMP_DATA __dummy_dump_data
++#endif
++
++/*******************************************************************************
++**
++** gcmDUMP_BUFFER
++**
++** Print a buffer to the dump.
++**
++** ARGUMENTS:
++**
++** gctSTRING Tag
++** Tag for dump.
++**
++** gctUINT32 Physical
++** Physical address of buffer.
++**
++** gctPOINTER Logical
++** Logical address of buffer.
++**
++** gctUINT32 Offset
++** Offset into buffer.
++**
++** gctSIZE_T Bytes
++** Number of bytes.
++*/
++
++#if gcdDUMP || gcdDUMP_COMMAND
++gceSTATUS
++gcfDumpBuffer(
++ IN gcoOS Os,
++ IN gctSTRING Tag,
++ IN gctUINT32 Physical,
++ IN gctPOINTER Logical,
++ IN gctUINT32 Offset,
++ IN gctSIZE_T Bytes
++ );
++# define gcmDUMP_BUFFER gcfDumpBuffer
++#elif gcdHAS_ELLIPSIS
++# define gcmDUMP_BUFFER(...)
++#else
++ gcmINLINE static void
++ __dummy_dump_buffer(
++ IN gcoOS Os,
++ IN gctSTRING Tag,
++ IN gctUINT32 Physical,
++ IN gctPOINTER Logical,
++ IN gctUINT32 Offset,
++ IN gctSIZE_T Bytes
++ )
++ {
++ }
++# define gcmDUMP_BUFFER __dummy_dump_buffer
++#endif
++
++/*******************************************************************************
++**
++** gcmDUMP_API
++**
++** Print a dump message for a high level API prefixed by the function name.
++**
++** ARGUMENTS:
++**
++** gctSTRING Message.
++**
++** ... Optional arguments.
++*/
++gceSTATUS gcfDumpApi(IN gctCONST_STRING String, ...);
++#if gcdDUMP_API
++# define gcmDUMP_API gcfDumpApi
++#elif gcdHAS_ELLIPSIS
++# define gcmDUMP_API(...)
++#else
++ gcmINLINE static void
++ __dummy_dump_api(
++ IN gctCONST_STRING Message,
++ ...
++ )
++ {
++ }
++# define gcmDUMP_API __dummy_dump_api
++#endif
++
++/*******************************************************************************
++**
++** gcmDUMP_API_ARRAY
++**
++** Print an array of data.
++**
++** ARGUMENTS:
++**
++** gctUINT32_PTR Pointer to array.
++** gctUINT32 Size.
++*/
++gceSTATUS gcfDumpArray(IN gctCONST_POINTER Data, IN gctUINT32 Size);
++#if gcdDUMP_API
++# define gcmDUMP_API_ARRAY gcfDumpArray
++#elif gcdHAS_ELLIPSIS
++# define gcmDUMP_API_ARRAY(...)
++#else
++ gcmINLINE static void
++ __dummy_dump_api_array(
++ IN gctCONST_POINTER Data,
++ IN gctUINT32 Size
++ )
++ {
++ }
++# define gcmDUMP_API_ARRAY __dummy_dump_api_array
++#endif
++
++/*******************************************************************************
++**
++** gcmDUMP_API_ARRAY_TOKEN
++**
++** Print an array of data terminated by a token.
++**
++** ARGUMENTS:
++**
++** gctUINT32_PTR Pointer to array.
++** gctUINT32 Termination.
++*/
++gceSTATUS gcfDumpArrayToken(IN gctCONST_POINTER Data, IN gctUINT32 Termination);
++#if gcdDUMP_API
++# define gcmDUMP_API_ARRAY_TOKEN gcfDumpArrayToken
++#elif gcdHAS_ELLIPSIS
++# define gcmDUMP_API_ARRAY_TOKEN(...)
++#else
++ gcmINLINE static void
++ __dummy_dump_api_array_token(
++ IN gctCONST_POINTER Data,
++ IN gctUINT32 Termination
++ )
++ {
++ }
++# define gcmDUMP_API_ARRAY_TOKEN __dummy_dump_api_array_token
++#endif
++
++/*******************************************************************************
++**
++** gcmDUMP_API_DATA
++**
++** Print an array of bytes.
++**
++** ARGUMENTS:
++**
++** gctCONST_POINTER Pointer to array.
++** gctSIZE_T Size.
++*/
++gceSTATUS gcfDumpApiData(IN gctCONST_POINTER Data, IN gctSIZE_T Size);
++#if gcdDUMP_API
++# define gcmDUMP_API_DATA gcfDumpApiData
++#elif gcdHAS_ELLIPSIS
++# define gcmDUMP_API_DATA(...)
++#else
++ gcmINLINE static void
++ __dummy_dump_api_data(
++ IN gctCONST_POINTER Data,
++ IN gctSIZE_T Size
++ )
++ {
++ }
++# define gcmDUMP_API_DATA __dummy_dump_api_data
++#endif
++
++/*******************************************************************************
++** gcmDUMP_2D_COMMAND
++**
++** Print the 2D command buffer.
++**
++** ARGUMENTS:
++**
++** gctUINT32_PTR Pointer to the command buffer.
++** gctUINT32 Command buffer size.
++*/
++gceSTATUS gcfDump2DCommand(IN gctUINT32_PTR Command, IN gctUINT32 Size);
++#if gcdDUMP_2D
++# define gcmDUMP_2D_COMMAND gcfDump2DCommand
++#elif gcdHAS_ELLIPSIS
++# define gcmDUMP_2D_COMMAND(...)
++#else
++ gcmINLINE static void
++ __dummy_dump_2d_command(
++ IN gctUINT32_PTR Command,
++ IN gctUINT32 Size
++ )
++ {
++ }
++# define gcmDUMP_2D_COMMAND __dummy_dump_2d_command
++#endif
++
++/*******************************************************************************
++** gcmDUMP_2D_SURFACE
++**
++** Print the 2D surface memory.
++**
++** ARGUMENTS:
++**
++** gctBOOL Src.
++** gctUINT32 Address.
++*/
++gceSTATUS gcfDump2DSurface(IN gctBOOL Src, IN gctUINT32 Address);
++#if gcdDUMP_2D
++# define gcmDUMP_2D_SURFACE gcfDump2DSurface
++#elif gcdHAS_ELLIPSIS
++# define gcmDUMP_2D_SURFACE(...)
++#else
++ gcmINLINE static void
++ __dummy_dump_2d_surface(
++ IN gctBOOL Src,
++ IN gctUINT32 Address
++ )
++ {
++ }
++# define gcmDUMP_2D_SURFACE __dummy_dump_2d_surface
++#endif
++
++/*******************************************************************************
++** gcmDUMP_ADD_MEMORY_INFO
++**
++** Record the memory info.
++**
++** ARGUMENTS:
++**
++** gctUINT32 Address.
++** gctSIZE_T Size.
++*/
++gceSTATUS gcfAddMemoryInfo(IN gctUINT32 GPUAddress, IN gctPOINTER Logical, IN gctUINT32 Physical, IN gctUINT32 Size);
++#if gcdDUMP_2D
++# define gcmDUMP_ADD_MEMORY_INFO gcfAddMemoryInfo
++#elif gcdHAS_ELLIPSIS
++# define gcmDUMP_ADD_MEMORY_INFO(...)
++#else
++ gcmINLINE static void
++ __dummy_dump_add_memory_info(
++ IN gctUINT32 GPUAddress,
++ IN gctPOINTER Logical,
++ IN gctUINT32 Physical,
++ IN gctUINT32 Size
++ )
++ {
++ }
++# define gcmDUMP_ADD_MEMORY_INFO __dummy_dump_add_memory_info
++#endif
++
++/*******************************************************************************
++** gcmDUMP_DEL_MEMORY_INFO
++**
++** Record the memory info.
++**
++** ARGUMENTS:
++**
++** gctUINT32 Address.
++*/
++gceSTATUS gcfDelMemoryInfo(IN gctUINT32 Address);
++#if gcdDUMP_2D
++# define gcmDUMP_DEL_MEMORY_INFO gcfDelMemoryInfo
++#elif gcdHAS_ELLIPSIS
++# define gcmDUMP_DEL_MEMORY_INFO(...)
++#else
++ gcmINLINE static void
++ __dummy_dump_del_memory_info(
++ IN gctUINT32 Address
++ )
++ {
++ }
++# define gcmDUMP_DEL_MEMORY_INFO __dummy_dump_del_memory_info
++#endif
++
++#if gcdDUMP_2D
++extern gctPOINTER dumpMemInfoListMutex;
++extern gctBOOL dump2DFlag;
++#endif
++
++/*******************************************************************************
++**
++** gcmTRACE_RELEASE
++**
++** Print a message to the shader debugger.
++**
++** ARGUMENTS:
++**
++** message Message.
++** ... Optional arguments.
++*/
++
++#define gcmTRACE_RELEASE gcoOS_DebugShaderTrace
++
++void
++gcoOS_DebugShaderTrace(
++ IN gctCONST_STRING Message,
++ ...
++ );
++
++void
++gcoOS_SetDebugShaderFiles(
++ IN gctCONST_STRING VSFileName,
++ IN gctCONST_STRING FSFileName
++ );
++
++void
++gcoOS_SetDebugShaderFileType(
++ IN gctUINT32 ShaderType
++ );
++
++void
++gcoOS_EnableDebugBuffer(
++ IN gctBOOL Enable
++ );
++
++/*******************************************************************************
++**
++** gcmBREAK
++**
++** Break into the debugger. In retail mode this macro does nothing.
++**
++** ARGUMENTS:
++**
++** None.
++*/
++
++void
++gcoOS_DebugBreak(
++ void
++ );
++
++void
++gckOS_DebugBreak(
++ void
++ );
++
++#if gcmIS_DEBUG(gcdDEBUG_BREAK)
++# define gcmBREAK gcoOS_DebugBreak
++# define gcmkBREAK gckOS_DebugBreak
++#else
++# define gcmBREAK()
++# define gcmkBREAK()
++#endif
++
++/*******************************************************************************
++**
++** gcmASSERT
++**
++** Evaluate an expression and break into the debugger if the expression
++** evaluates to false. In retail mode this macro does nothing.
++**
++** ARGUMENTS:
++**
++** exp Expression to evaluate.
++*/
++#if gcmIS_DEBUG(gcdDEBUG_ASSERT)
++# define _gcmASSERT(prefix, exp) \
++ do \
++ { \
++ if (!(exp)) \
++ { \
++ prefix##TRACE(gcvLEVEL_ERROR, \
++ #prefix "ASSERT at %s(%d)", \
++ __FUNCTION__, __LINE__); \
++ prefix##TRACE(gcvLEVEL_ERROR, \
++ "(%s)", #exp); \
++ prefix##BREAK(); \
++ } \
++ } \
++ while (gcvFALSE)
++# define gcmASSERT(exp) _gcmASSERT(gcm, exp)
++# define gcmkASSERT(exp) _gcmASSERT(gcmk, exp)
++#else
++# define gcmASSERT(exp)
++# define gcmkASSERT(exp)
++#endif
++
++/*******************************************************************************
++**
++** gcmVERIFY
++**
++** Verify if an expression returns true. If the expression does not
++** evaluates to true, an assertion will happen in debug mode.
++**
++** ARGUMENTS:
++**
++** exp Expression to evaluate.
++*/
++#if gcmIS_DEBUG(gcdDEBUG_ASSERT)
++# define gcmVERIFY(exp) gcmASSERT(exp)
++# define gcmkVERIFY(exp) gcmkASSERT(exp)
++#else
++# define gcmVERIFY(exp) exp
++# define gcmkVERIFY(exp) exp
++#endif
++
++/*******************************************************************************
++**
++** gcmVERIFY_OK
++**
++** Verify a fucntion returns gcvSTATUS_OK. If the function does not return
++** gcvSTATUS_OK, an assertion will happen in debug mode.
++**
++** ARGUMENTS:
++**
++** func Function to evaluate.
++*/
++
++void
++gcoOS_Verify(
++ IN gceSTATUS status
++ );
++
++void
++gckOS_Verify(
++ IN gceSTATUS status
++ );
++
++#if gcmIS_DEBUG(gcdDEBUG_ASSERT)
++# define gcmVERIFY_OK(func) \
++ do \
++ { \
++ gceSTATUS verifyStatus = func; \
++ gcoOS_Verify(verifyStatus); \
++ if (verifyStatus != gcvSTATUS_OK) \
++ { \
++ gcmTRACE( \
++ gcvLEVEL_ERROR, \
++ "gcmVERIFY_OK(%d): function returned %d", \
++ __LINE__, verifyStatus \
++ ); \
++ } \
++ gcmASSERT(verifyStatus == gcvSTATUS_OK); \
++ } \
++ while (gcvFALSE)
++# define gcmkVERIFY_OK(func) \
++ do \
++ { \
++ gceSTATUS verifyStatus = func; \
++ if (verifyStatus != gcvSTATUS_OK) \
++ { \
++ gcmkTRACE( \
++ gcvLEVEL_ERROR, \
++ "gcmkVERIFY_OK(%d): function returned %d", \
++ __LINE__, verifyStatus \
++ ); \
++ } \
++ gckOS_Verify(verifyStatus); \
++ gcmkASSERT(verifyStatus == gcvSTATUS_OK); \
++ } \
++ while (gcvFALSE)
++#else
++# define gcmVERIFY_OK(func) func
++# define gcmkVERIFY_OK(func) func
++#endif
++
++gctCONST_STRING
++gcoOS_DebugStatus2Name(
++ gceSTATUS status
++ );
++
++gctCONST_STRING
++gckOS_DebugStatus2Name(
++ gceSTATUS status
++ );
++
++/*******************************************************************************
++**
++** gcmERR_BREAK
++**
++** Executes a break statement on error.
++**
++** ASSUMPTIONS:
++**
++** 'status' variable of gceSTATUS type must be defined.
++**
++** ARGUMENTS:
++**
++** func Function to evaluate.
++*/
++#define _gcmERR_BREAK(prefix, func) \
++ status = func; \
++ if (gcmIS_ERROR(status)) \
++ { \
++ prefix##PRINT_VERSION(); \
++ prefix##TRACE(gcvLEVEL_ERROR, \
++ #prefix "ERR_BREAK: status=%d(%s) @ %s(%d)", \
++ status, gcoOS_DebugStatus2Name(status), __FUNCTION__, __LINE__); \
++ break; \
++ } \
++ do { } while (gcvFALSE)
++#define _gcmkERR_BREAK(prefix, func) \
++ status = func; \
++ if (gcmIS_ERROR(status)) \
++ { \
++ prefix##PRINT_VERSION(); \
++ prefix##TRACE(gcvLEVEL_ERROR, \
++ #prefix "ERR_BREAK: status=%d(%s) @ %s(%d)", \
++ status, gckOS_DebugStatus2Name(status), __FUNCTION__, __LINE__); \
++ break; \
++ } \
++ do { } while (gcvFALSE)
++#define gcmERR_BREAK(func) _gcmERR_BREAK(gcm, func)
++#define gcmkERR_BREAK(func) _gcmkERR_BREAK(gcmk, func)
++
++/*******************************************************************************
++**
++** gcmERR_RETURN
++**
++** Executes a return on error.
++**
++** ASSUMPTIONS:
++**
++** 'status' variable of gceSTATUS type must be defined.
++**
++** ARGUMENTS:
++**
++** func Function to evaluate.
++*/
++#define _gcmERR_RETURN(prefix, func) \
++ status = func; \
++ if (gcmIS_ERROR(status)) \
++ { \
++ prefix##PRINT_VERSION(); \
++ prefix##TRACE(gcvLEVEL_ERROR, \
++ #prefix "ERR_RETURN: status=%d(%s) @ %s(%d)", \
++ status, gcoOS_DebugStatus2Name(status), __FUNCTION__, __LINE__); \
++ prefix##FOOTER(); \
++ return status; \
++ } \
++ do { } while (gcvFALSE)
++#define _gcmkERR_RETURN(prefix, func) \
++ status = func; \
++ if (gcmIS_ERROR(status)) \
++ { \
++ prefix##PRINT_VERSION(); \
++ prefix##TRACE(gcvLEVEL_ERROR, \
++ #prefix "ERR_RETURN: status=%d(%s) @ %s(%d)", \
++ status, gckOS_DebugStatus2Name(status), __FUNCTION__, __LINE__); \
++ prefix##FOOTER(); \
++ return status; \
++ } \
++ do { } while (gcvFALSE)
++#define gcmERR_RETURN(func) _gcmERR_RETURN(gcm, func)
++#define gcmkERR_RETURN(func) _gcmkERR_RETURN(gcmk, func)
++
++
++/*******************************************************************************
++**
++** gcmONERROR
++**
++** Jump to the error handler in case there is an error.
++**
++** ASSUMPTIONS:
++**
++** 'status' variable of gceSTATUS type must be defined.
++**
++** ARGUMENTS:
++**
++** func Function to evaluate.
++*/
++#define _gcmONERROR(prefix, func) \
++ do \
++ { \
++ status = func; \
++ if (gcmIS_ERROR(status)) \
++ { \
++ prefix##PRINT_VERSION(); \
++ prefix##TRACE(gcvLEVEL_ERROR, \
++ #prefix "ONERROR: status=%d(%s) @ %s(%d)", \
++ status, gcoOS_DebugStatus2Name(status), __FUNCTION__, __LINE__); \
++ goto OnError; \
++ } \
++ } \
++ while (gcvFALSE)
++#define _gcmkONERROR(prefix, func) \
++ do \
++ { \
++ status = func; \
++ if (gcmIS_ERROR(status)) \
++ { \
++ prefix##PRINT_VERSION(); \
++ prefix##TRACE(gcvLEVEL_ERROR, \
++ #prefix "ONERROR: status=%d(%s) @ %s(%d)", \
++ status, gckOS_DebugStatus2Name(status), __FUNCTION__, __LINE__); \
++ goto OnError; \
++ } \
++ } \
++ while (gcvFALSE)
++#define gcmONERROR(func) _gcmONERROR(gcm, func)
++#define gcmkONERROR(func) _gcmkONERROR(gcmk, func)
++
++/*******************************************************************************
++**
++** gcmkSAFECASTSIZET
++**
++** Check wether value of a gctSIZE_T varible beyond the capability
++** of 32bits GPU hardware.
++**
++** ASSUMPTIONS:
++**
++**
++**
++** ARGUMENTS:
++**
++** x A gctUINT32 variable
++** y A gctSIZE_T variable
++*/
++#define gcmkSAFECASTSIZET(x, y) \
++ do \
++ { \
++ gctUINT32 tmp = (gctUINT32)(y); \
++ if (gcmSIZEOF(gctSIZE_T) > gcmSIZEOF(gctUINT32)) \
++ { \
++ gcmkASSERT(tmp <= gcvMAXUINT32); \
++ } \
++ (x) = tmp; \
++ } \
++ while (gcvFALSE)
++
++#define gcmSAFECASTSIZET(x, y) \
++ do \
++ { \
++ gctUINT32 tmp = (gctUINT32)(y); \
++ if (gcmSIZEOF(gctSIZE_T) > gcmSIZEOF(gctUINT32)) \
++ { \
++ gcmASSERT(tmp <= gcvMAXUINT32); \
++ } \
++ (x) = tmp; \
++ } \
++ while (gcvFALSE)
++
++/*******************************************************************************
++**
++** gcmVERIFY_LOCK
++**
++** Verifies whether the surface is locked.
++**
++** ARGUMENTS:
++**
++** surfaceInfo Pointer to the surface iniformational structure.
++*/
++#define gcmVERIFY_LOCK(surfaceInfo) \
++ if (!surfaceInfo->node.valid) \
++ { \
++ gcmONERROR(gcvSTATUS_MEMORY_UNLOCKED); \
++ } \
++
++/*******************************************************************************
++**
++** gcmVERIFY_NODE_LOCK
++**
++** Verifies whether the surface node is locked.
++**
++** ARGUMENTS:
++**
++** surfaceInfo Pointer to the surface iniformational structure.
++*/
++#define gcmVERIFY_NODE_LOCK(surfaceNode) \
++ if (!(surfaceNode)->valid) \
++ { \
++ status = gcvSTATUS_MEMORY_UNLOCKED; \
++ break; \
++ } \
++ do { } while (gcvFALSE)
++
++/*******************************************************************************
++**
++** gcmBADOBJECT_BREAK
++**
++** Executes a break statement on bad object.
++**
++** ARGUMENTS:
++**
++** obj Object to test.
++** t Expected type of the object.
++*/
++#define gcmBADOBJECT_BREAK(obj, t) \
++ if ((obj == gcvNULL) \
++ || (((gcsOBJECT *)(obj))->type != t) \
++ ) \
++ { \
++ status = gcvSTATUS_INVALID_OBJECT; \
++ break; \
++ } \
++ do { } while (gcvFALSE)
++
++/*******************************************************************************
++**
++** gcmCHECK_STATUS
++**
++** Executes a break statement on error.
++**
++** ASSUMPTIONS:
++**
++** 'status' variable of gceSTATUS type must be defined.
++**
++** ARGUMENTS:
++**
++** func Function to evaluate.
++*/
++#define _gcmCHECK_STATUS(prefix, func) \
++ do \
++ { \
++ last = func; \
++ if (gcmIS_ERROR(last)) \
++ { \
++ prefix##TRACE(gcvLEVEL_ERROR, \
++ #prefix "CHECK_STATUS: status=%d(%s) @ %s(%d)", \
++ last, gcoOS_DebugStatus2Name(last), __FUNCTION__, __LINE__); \
++ status = last; \
++ } \
++ } \
++ while (gcvFALSE)
++#define _gcmkCHECK_STATUS(prefix, func) \
++ do \
++ { \
++ last = func; \
++ if (gcmIS_ERROR(last)) \
++ { \
++ prefix##TRACE(gcvLEVEL_ERROR, \
++ #prefix "CHECK_STATUS: status=%d(%s) @ %s(%d)", \
++ last, gckOS_DebugStatus2Name(last), __FUNCTION__, __LINE__); \
++ status = last; \
++ } \
++ } \
++ while (gcvFALSE)
++#define gcmCHECK_STATUS(func) _gcmCHECK_STATUS(gcm, func)
++#define gcmkCHECK_STATUS(func) _gcmkCHECK_STATUS(gcmk, func)
++
++/*******************************************************************************
++**
++** gcmVERIFY_ARGUMENT
++**
++** Assert if an argument does not apply to the specified expression. If
++** the argument evaluates to false, gcvSTATUS_INVALID_ARGUMENT will be
++** returned from the current function. In retail mode this macro does
++** nothing.
++**
++** ARGUMENTS:
++**
++** arg Argument to evaluate.
++*/
++# define _gcmVERIFY_ARGUMENT(prefix, arg) \
++ do \
++ { \
++ if (!(arg)) \
++ { \
++ prefix##TRACE(gcvLEVEL_ERROR, #prefix "VERIFY_ARGUMENT failed:"); \
++ prefix##ASSERT(arg); \
++ prefix##FOOTER_ARG("status=%d", gcvSTATUS_INVALID_ARGUMENT); \
++ return gcvSTATUS_INVALID_ARGUMENT; \
++ } \
++ } \
++ while (gcvFALSE)
++# define gcmVERIFY_ARGUMENT(arg) _gcmVERIFY_ARGUMENT(gcm, arg)
++# define gcmkVERIFY_ARGUMENT(arg) _gcmVERIFY_ARGUMENT(gcmk, arg)
++
++/*******************************************************************************
++**
++** gcmDEBUG_VERIFY_ARGUMENT
++**
++** Works just like gcmVERIFY_ARGUMENT, but is only valid in debug mode.
++** Use this to verify arguments inside non-public API functions.
++*/
++#if gcdDEBUG
++# define gcmDEBUG_VERIFY_ARGUMENT(arg) _gcmVERIFY_ARGUMENT(gcm, arg)
++# define gcmkDEBUG_VERIFY_ARGUMENT(arg) _gcmkVERIFY_ARGUMENT(gcm, arg)
++#else
++# define gcmDEBUG_VERIFY_ARGUMENT(arg)
++# define gcmkDEBUG_VERIFY_ARGUMENT(arg)
++#endif
++
++/*******************************************************************************
++**
++** gcmVERIFY_ARGUMENT_RETURN
++**
++** Assert if an argument does not apply to the specified expression. If
++** the argument evaluates to false, gcvSTATUS_INVALID_ARGUMENT will be
++** returned from the current function. In retail mode this macro does
++** nothing.
++**
++** ARGUMENTS:
++**
++** arg Argument to evaluate.
++*/
++# define _gcmVERIFY_ARGUMENT_RETURN(prefix, arg, value) \
++ do \
++ { \
++ if (!(arg)) \
++ { \
++ prefix##TRACE(gcvLEVEL_ERROR, \
++ #prefix "gcmVERIFY_ARGUMENT_RETURN failed:"); \
++ prefix##ASSERT(arg); \
++ prefix##FOOTER_ARG("value=%d", value); \
++ return value; \
++ } \
++ } \
++ while (gcvFALSE)
++# define gcmVERIFY_ARGUMENT_RETURN(arg, value) \
++ _gcmVERIFY_ARGUMENT_RETURN(gcm, arg, value)
++# define gcmkVERIFY_ARGUMENT_RETURN(arg, value) \
++ _gcmVERIFY_ARGUMENT_RETURN(gcmk, arg, value)
++
++#define MAX_LOOP_COUNT 0x7FFFFFFF
++
++/******************************************************************************\
++****************************** User Debug Option ******************************
++\******************************************************************************/
++
++/* User option. */
++typedef enum _gceDEBUG_MSG
++{
++ gcvDEBUG_MSG_NONE,
++ gcvDEBUG_MSG_ERROR,
++ gcvDEBUG_MSG_WARNING
++}
++gceDEBUG_MSG;
++
++typedef struct _gcsUSER_DEBUG_OPTION
++{
++ gceDEBUG_MSG debugMsg;
++}
++gcsUSER_DEBUG_OPTION;
++
++gcsUSER_DEBUG_OPTION *
++gcGetUserDebugOption(
++ void
++ );
++
++#if defined(ANDROID)
++struct _gcoOS_SymbolsList
++{
++#if gcdENABLE_3D
++ gcePATCH_ID patchId;
++#endif
++ const char * symList[10];
++};
++#endif
++
++#if gcdHAS_ELLIPSIS
++#define gcmUSER_DEBUG_MSG(level, ...) \
++ do \
++ { \
++ if (level <= gcGetUserDebugOption()->debugMsg) \
++ { \
++ gcoOS_Print(__VA_ARGS__); \
++ } \
++ } while (gcvFALSE)
++
++#define gcmUSER_DEBUG_ERROR_MSG(...) gcmUSER_DEBUG_MSG(gcvDEBUG_MSG_ERROR, "Error: " __VA_ARGS__)
++#define gcmUSER_DEBUG_WARNING_MSG(...) gcmUSER_DEBUG_MSG(gcvDEBUG_MSG_WARNING, "Warring: " __VA_ARGS__)
++#else
++#define gcmUSER_DEBUG_MSG
++#define gcmUSER_DEBUG_ERROR_MSG
++#define gcmUSER_DEBUG_WARNING_MSG
++#endif
++
++/*******************************************************************************
++**
++** A set of macros to aid state loading.
++**
++** ARGUMENTS:
++**
++** CommandBuffer Pointer to a gcoCMDBUF object.
++** StateDelta Pointer to a gcsSTATE_DELTA state delta structure.
++** Memory Destination memory pointer of gctUINT32_PTR type.
++** PartOfContext Whether or not the state is a part of the context.
++** FixedPoint Whether or not the state is of the fixed point format.
++** Count Number of consecutive states to be loaded.
++** Address State address.
++** Data Data to be set to the state.
++*/
++
++/*----------------------------------------------------------------------------*/
++
++#if gcmIS_DEBUG(gcdDEBUG_CODE)
++
++# define gcmSTORELOADSTATE(CommandBuffer, Memory, Address, Count) \
++ CommandBuffer->lastLoadStatePtr = gcmPTR_TO_UINT64(Memory); \
++ CommandBuffer->lastLoadStateAddress = Address; \
++ CommandBuffer->lastLoadStateCount = Count
++
++# define gcmVERIFYLOADSTATE(CommandBuffer, Memory, Address) \
++ gcmASSERT( \
++ (gctUINT) (Memory - gcmUINT64_TO_TYPE(CommandBuffer->lastLoadStatePtr, gctUINT32_PTR) - 1) \
++ == \
++ (gctUINT) (Address - CommandBuffer->lastLoadStateAddress) \
++ ); \
++ \
++ gcmASSERT(CommandBuffer->lastLoadStateCount > 0); \
++ \
++ CommandBuffer->lastLoadStateCount -= 1
++
++# define gcmVERIFYLOADSTATEDONE(CommandBuffer) \
++ gcmASSERT(CommandBuffer->lastLoadStateCount == 0);
++
++# define gcmDEFINELOADSTATEBASE() \
++ gctUINT32_PTR LoadStateBase;
++
++# define gcmSETLOADSTATEBASE(CommandBuffer, OutSide) \
++ if (OutSide) \
++ {\
++ LoadStateBase = (gctUINT32_PTR)*OutSide; \
++ }\
++ else\
++ {\
++ LoadStateBase = (gctUINT_PTR)CommandBuffer->buffer;\
++ }
++
++
++# define gcmVERIFYLOADSTATEALIGNED(CommandBuffer, Memory) \
++ gcmASSERT(((Memory - LoadStateBase) & 1) == 0);
++
++# define gcmUNSETLOADSTATEBASE() \
++ LoadStateBase = LoadStateBase;
++
++#else
++
++# define gcmSTORELOADSTATE(CommandBuffer, Memory, Address, Count)
++# define gcmVERIFYLOADSTATE(CommandBuffer, Memory, Address)
++# define gcmVERIFYLOADSTATEDONE(CommandBuffer)
++
++# define gcmDEFINELOADSTATEBASE()
++# define gcmSETLOADSTATEBASE(CommandBuffer, OutSide)
++# define gcmVERIFYLOADSTATEALIGNED(CommandBuffer, Memory)
++# define gcmUNSETLOADSTATEBASE()
++
++#endif
++
++#if gcdSECURE_USER
++
++# define gcmDEFINESECUREUSER() \
++ gctUINT __secure_user_offset__; \
++ gctUINT32_PTR __secure_user_hintArray__;
++
++# define gcmBEGINSECUREUSER() \
++ __secure_user_offset__ = reserve->lastOffset; \
++ \
++ __secure_user_hintArray__ = gcmUINT64_TO_PTR(reserve->hintArrayTail)
++
++# define gcmENDSECUREUSER() \
++ reserve->hintArrayTail = gcmPTR_TO_UINT64(__secure_user_hintArray__)
++
++# define gcmSKIPSECUREUSER() \
++ __secure_user_offset__ += gcmSIZEOF(gctUINT32)
++
++# define gcmUPDATESECUREUSER() \
++ *__secure_user_hintArray__ = __secure_user_offset__; \
++ \
++ __secure_user_offset__ += gcmSIZEOF(gctUINT32); \
++ __secure_user_hintArray__ += 1
++
++#else
++
++# define gcmDEFINESECUREUSER()
++# define gcmBEGINSECUREUSER()
++# define gcmENDSECUREUSER()
++# define gcmSKIPSECUREUSER()
++# define gcmUPDATESECUREUSER()
++
++#endif
++
++/*----------------------------------------------------------------------------*/
++
++#if gcdDUMP
++# define gcmDUMPSTATEDATA(StateDelta, FixedPoint, Address, Data) \
++ if (FixedPoint) \
++ { \
++ gcmDUMP(gcvNULL, "#[state.x 0x%04X 0x%08X]", \
++ Address, Data \
++ ); \
++ } \
++ else \
++ { \
++ gcmDUMP(gcvNULL, "#[state 0x%04X 0x%08X]", \
++ Address, Data \
++ ); \
++ }
++#else
++# define gcmDUMPSTATEDATA(StateDelta, FixedPoint, Address, Data)
++#endif
++
++#define gcmDEFINESTATEBUFFER(CommandBuffer, StateDelta, Memory, ReserveSize) \
++ gcmDEFINESECUREUSER() \
++ gctSIZE_T ReserveSize; \
++ gcoCMDBUF CommandBuffer; \
++ gctUINT32_PTR Memory; \
++ gcsSTATE_DELTA_PTR StateDelta
++
++#define gcmBEGINSTATEBUFFER(Hardware, CommandBuffer, StateDelta, Memory, ReserveSize) \
++{ \
++ gcmONERROR(gcoBUFFER_Reserve( \
++ Hardware->buffer, ReserveSize, gcvTRUE, gcvCOMMAND_3D, &CommandBuffer \
++ )); \
++ \
++ Memory = (gctUINT32_PTR) gcmUINT64_TO_PTR(CommandBuffer->lastReserve); \
++ \
++ StateDelta = Hardware->delta; \
++ \
++ gcmBEGINSECUREUSER(); \
++}
++
++#define gcmENDSTATEBUFFER(Hardware, CommandBuffer, Memory, ReserveSize) \
++{ \
++ gcmENDSECUREUSER(); \
++ \
++ gcmASSERT( \
++ gcmUINT64_TO_TYPE(CommandBuffer->lastReserve, gctUINT8_PTR) + ReserveSize \
++ == \
++ (gctUINT8_PTR) Memory \
++ ); \
++}
++
++/*----------------------------------------------------------------------------*/
++
++#define gcmBEGINSTATEBATCH(CommandBuffer, Memory, FixedPoint, Address, Count) \
++{ \
++ gcmASSERT(((Memory - gcmUINT64_TO_TYPE(CommandBuffer->lastReserve, gctUINT32_PTR)) & 1) == 0); \
++ gcmASSERT((gctUINT32)Count <= 1024); \
++ \
++ gcmVERIFYLOADSTATEDONE(CommandBuffer); \
++ \
++ gcmSTORELOADSTATE(CommandBuffer, Memory, Address, Count); \
++ \
++ *Memory++ \
++ = gcmSETFIELDVALUE(0, AQ_COMMAND_LOAD_STATE_COMMAND, OPCODE, LOAD_STATE) \
++ | gcmSETFIELD (0, AQ_COMMAND_LOAD_STATE_COMMAND, FLOAT, FixedPoint) \
++ | gcmSETFIELD (0, AQ_COMMAND_LOAD_STATE_COMMAND, COUNT, Count) \
++ | gcmSETFIELD (0, AQ_COMMAND_LOAD_STATE_COMMAND, ADDRESS, Address); \
++ \
++ gcmSKIPSECUREUSER(); \
++}
++
++#define gcmENDSTATEBATCH(CommandBuffer, Memory) \
++{ \
++ gcmVERIFYLOADSTATEDONE(CommandBuffer); \
++ \
++ gcmASSERT(((Memory - gcmUINT64_TO_TYPE(CommandBuffer->lastReserve, gctUINT32_PTR)) & 1) == 0); \
++}
++
++/*----------------------------------------------------------------------------*/
++
++#define gcmSETSTATEDATA(StateDelta, CommandBuffer, Memory, FixedPoint, \
++ Address, Data) \
++{ \
++ gctUINT32 __temp_data32__; \
++ \
++ gcmVERIFYLOADSTATE(CommandBuffer, Memory, Address); \
++ \
++ gcmSAFECASTSIZET(__temp_data32__, Data); \
++ \
++ *Memory++ = __temp_data32__; \
++ \
++ gcoHARDWARE_UpdateDelta( \
++ StateDelta, Address, 0, __temp_data32__ \
++ ); \
++ \
++ gcmDUMPSTATEDATA(StateDelta, FixedPoint, Address, __temp_data32__); \
++ \
++ gcmUPDATESECUREUSER(); \
++}
++
++#define gcmSETSTATEDATAWITHMASK(StateDelta, CommandBuffer, Memory, FixedPoint, \
++ Address, Mask, Data) \
++{ \
++ gctUINT32 __temp_data32__; \
++ \
++ gcmVERIFYLOADSTATE(CommandBuffer, Memory, Address); \
++ \
++ __temp_data32__ = Data; \
++ \
++ *Memory++ = __temp_data32__; \
++ \
++ gcoHARDWARE_UpdateDelta( \
++ StateDelta, Address, Mask, __temp_data32__ \
++ ); \
++ \
++ gcmDUMPSTATEDATA(StateDelta, FixedPoint, Address, __temp_data32__); \
++ \
++ gcmUPDATESECUREUSER(); \
++}
++
++
++#define gcmSETCTRLSTATE(StateDelta, CommandBuffer, Memory, Address, Data) \
++{ \
++ gctUINT32 __temp_data32__; \
++ \
++ gcmVERIFYLOADSTATE(CommandBuffer, Memory, Address); \
++ \
++ __temp_data32__ = Data; \
++ \
++ *Memory++ = __temp_data32__; \
++ \
++ gcmDUMPSTATEDATA(StateDelta, gcvFALSE, Address, __temp_data32__); \
++ \
++ gcmSKIPSECUREUSER(); \
++}
++
++#define gcmSETFILLER(CommandBuffer, Memory) \
++{ \
++ gcmVERIFYLOADSTATEDONE(CommandBuffer); \
++ \
++ Memory += 1; \
++ \
++ gcmSKIPSECUREUSER(); \
++}
++
++/*----------------------------------------------------------------------------*/
++
++#define gcmSETSINGLESTATE(StateDelta, CommandBuffer, Memory, FixedPoint, \
++ Address, Data) \
++{ \
++ gcmBEGINSTATEBATCH(CommandBuffer, Memory, FixedPoint, Address, 1); \
++ gcmSETSTATEDATA(StateDelta, CommandBuffer, Memory, FixedPoint, \
++ Address, Data); \
++ gcmENDSTATEBATCH(CommandBuffer, Memory); \
++}
++
++#define gcmSETSINGLESTATEWITHMASK(StateDelta, CommandBuffer, Memory, FixedPoint, \
++ Address, Mask, Data) \
++{ \
++ gcmBEGINSTATEBATCH(CommandBuffer, Memory, FixedPoint, Address, 1); \
++ gcmSETSTATEDATAWITHMASK(StateDelta, CommandBuffer, Memory, FixedPoint, \
++ Address, Mask, Data); \
++ gcmENDSTATEBATCH(CommandBuffer, Memory); \
++}
++
++
++#define gcmSETSINGLECTRLSTATE(StateDelta, CommandBuffer, Memory, FixedPoint, \
++ Address, Data) \
++{ \
++ gcmBEGINSTATEBATCH(CommandBuffer, Memory, FixedPoint, Address, 1); \
++ gcmSETCTRLSTATE(StateDelta, CommandBuffer, Memory, Address, Data); \
++ gcmENDSTATEBATCH(CommandBuffer, Memory); \
++}
++
++
++
++#define gcmSETSEMASTALLPIPE(StateDelta, CommandBuffer, Memory, Data) \
++{ \
++ gcmSETSINGLESTATE(StateDelta, CommandBuffer, Memory, gcvFALSE, AQSemaphoreRegAddrs, Data); \
++ \
++ *Memory++ = gcmSETFIELDVALUE(0, STALL_COMMAND, OPCODE, STALL); \
++ \
++ *Memory++ = Data; \
++ \
++ gcmDUMP(gcvNULL, "#[stall 0x%08X 0x%08X]", \
++ gcmSETFIELDVALUE(0, AQ_SEMAPHORE, SOURCE, FRONT_END), \
++ gcmSETFIELDVALUE(0, AQ_SEMAPHORE, DESTINATION, PIXEL_ENGINE)); \
++ \
++ gcmSKIPSECUREUSER(); \
++}
++
++/*******************************************************************************
++**
++** gcmSETSTARTDECOMMAND
++**
++** Form a START_DE command.
++**
++** ARGUMENTS:
++**
++** Memory Destination memory pointer of gctUINT32_PTR type.
++** Count Number of the rectangles.
++*/
++
++#define gcmSETSTARTDECOMMAND(Memory, Count) \
++{ \
++ *Memory++ \
++ = gcmSETFIELDVALUE(0, AQ_COMMAND_START_DE_COMMAND, OPCODE, START_DE) \
++ | gcmSETFIELD (0, AQ_COMMAND_START_DE_COMMAND, COUNT, Count) \
++ | gcmSETFIELD (0, AQ_COMMAND_START_DE_COMMAND, DATA_COUNT, 0); \
++ \
++ *Memory++ = 0xDEADDEED; \
++}
++
++/*****************************************
++** Temp command buffer macro
++*/
++#define gcmDEFINESTATEBUFFER_NEW(CommandBuffer, StateDelta, Memory) \
++ gcmDEFINESECUREUSER() \
++ gcmDEFINELOADSTATEBASE() \
++ gcsTEMPCMDBUF CommandBuffer = gcvNULL; \
++ gctUINT32_PTR Memory; \
++ gcsSTATE_DELTA_PTR StateDelta
++
++
++#define gcmBEGINSTATEBUFFER_NEW(Hardware, CommandBuffer, StateDelta, Memory, OutSide) \
++{ \
++ if (OutSide) \
++ {\
++ Memory = (gctUINT32_PTR)*OutSide; \
++ }\
++ else \
++ {\
++ gcmONERROR(gcoBUFFER_StartTEMPCMDBUF( \
++ Hardware->buffer, &CommandBuffer \
++ ));\
++ \
++ Memory = (gctUINT32_PTR)(CommandBuffer->buffer); \
++ \
++ }\
++ StateDelta = Hardware->delta; \
++ \
++ gcmBEGINSECUREUSER(); \
++ gcmSETLOADSTATEBASE(CommandBuffer,OutSide);\
++}
++
++#define gcmENDSTATEBUFFER_NEW(Hardware, CommandBuffer, Memory, OutSide) \
++{ \
++ gcmENDSECUREUSER(); \
++ \
++ if (OutSide) \
++ {\
++ *OutSide = Memory; \
++ }\
++ else \
++ {\
++ CommandBuffer->currentByteSize = (gctUINT32)((gctUINT8_PTR)Memory - \
++ (gctUINT8_PTR)CommandBuffer->buffer); \
++ \
++ gcmONERROR(gcoBUFFER_EndTEMPCMDBUF(Hardware->buffer));\
++ }\
++ gcmUNSETLOADSTATEBASE()\
++}
++
++/*----------------------------------------------------------------------------*/
++
++#define gcmBEGINSTATEBATCH_NEW(CommandBuffer, Memory, FixedPoint, Address, Count) \
++{ \
++ gcmVERIFYLOADSTATEALIGNED(CommandBuffer,Memory);\
++ gcmASSERT((gctUINT32)Count <= 1024); \
++ \
++ *Memory++ \
++ = gcmSETFIELDVALUE(0, AQ_COMMAND_LOAD_STATE_COMMAND, OPCODE, LOAD_STATE) \
++ | gcmSETFIELD (0, AQ_COMMAND_LOAD_STATE_COMMAND, FLOAT, FixedPoint) \
++ | gcmSETFIELD (0, AQ_COMMAND_LOAD_STATE_COMMAND, COUNT, Count) \
++ | gcmSETFIELD (0, AQ_COMMAND_LOAD_STATE_COMMAND, ADDRESS, Address); \
++ \
++ gcmSKIPSECUREUSER(); \
++}
++
++#define gcmENDSTATEBATCH_NEW(CommandBuffer, Memory) \
++ gcmVERIFYLOADSTATEALIGNED(CommandBuffer,Memory);
++
++/*----------------------------------------------------------------------------*/
++
++#define gcmSETSTATEDATA_NEW(StateDelta, CommandBuffer, Memory, FixedPoint, \
++ Address, Data) \
++{ \
++ gctUINT32 __temp_data32__; \
++ \
++ __temp_data32__ = Data; \
++ \
++ *Memory++ = __temp_data32__; \
++ \
++ gcoHARDWARE_UpdateDelta( \
++ StateDelta, Address, 0, __temp_data32__ \
++ ); \
++ \
++ gcmDUMPSTATEDATA(StateDelta, FixedPoint, Address, __temp_data32__); \
++ \
++ gcmUPDATESECUREUSER(); \
++}
++
++#define gcmSETSTATEDATAWITHMASK_NEW(StateDelta, CommandBuffer, Memory, FixedPoint, \
++ Address, Mask, Data) \
++{ \
++ gctUINT32 __temp_data32__; \
++ \
++ __temp_data32__ = Data; \
++ \
++ *Memory++ = __temp_data32__; \
++ \
++ gcoHARDWARE_UpdateDelta( \
++ StateDelta, Address, Mask, __temp_data32__ \
++ ); \
++ \
++ gcmDUMPSTATEDATA(StateDelta, FixedPoint, Address, __temp_data32__); \
++ \
++ gcmUPDATESECUREUSER(); \
++}
++
++
++#define gcmSETCTRLSTATE_NEW(StateDelta, CommandBuffer, Memory, Address, Data) \
++{ \
++ gctUINT32 __temp_data32__; \
++ \
++ __temp_data32__ = Data; \
++ \
++ *Memory++ = __temp_data32__; \
++ \
++ gcmDUMPSTATEDATA(StateDelta, gcvFALSE, Address, __temp_data32__); \
++ \
++ gcmSKIPSECUREUSER(); \
++}
++
++#define gcmSETFILLER_NEW(CommandBuffer, Memory) \
++{ \
++ Memory += 1; \
++ \
++ gcmSKIPSECUREUSER(); \
++}
++
++/*----------------------------------------------------------------------------*/
++
++#define gcmSETSINGLESTATE_NEW(StateDelta, CommandBuffer, Memory, FixedPoint, \
++ Address, Data) \
++{ \
++ gcmBEGINSTATEBATCH_NEW(CommandBuffer, Memory, FixedPoint, Address, 1); \
++ gcmSETSTATEDATA_NEW(StateDelta, CommandBuffer, Memory, FixedPoint, \
++ Address, Data); \
++ gcmENDSTATEBATCH_NEW(CommandBuffer, Memory); \
++}
++
++#define gcmSETSINGLESTATEWITHMASK_NEW(StateDelta, CommandBuffer, Memory, FixedPoint, \
++ Address, Mask, Data) \
++{ \
++ gcmBEGINSTATEBATCH_NEW(CommandBuffer, Memory, FixedPoint, Address, 1); \
++ gcmSETSTATEDATAWITHMASK_NEW(StateDelta, CommandBuffer, Memory, FixedPoint, \
++ Address, Mask, Data); \
++ gcmENDSTATEBATCH_NEW(CommandBuffer, Memory); \
++}
++
++
++#define gcmSETSINGLECTRLSTATE_NEW(StateDelta, CommandBuffer, Memory, FixedPoint, \
++ Address, Data) \
++{ \
++ gcmBEGINSTATEBATCH_NEW(CommandBuffer, Memory, FixedPoint, Address, 1); \
++ gcmSETCTRLSTATE_NEW(StateDelta, CommandBuffer, Memory, Address, Data); \
++ gcmENDSTATEBATCH_NEW(CommandBuffer, Memory); \
++}
++
++
++
++#define gcmSETSEMASTALLPIPE_NEW(StateDelta, CommandBuffer, Memory, Data) \
++{ \
++ gcmSETSINGLESTATE_NEW(StateDelta, CommandBuffer, Memory, gcvFALSE, AQSemaphoreRegAddrs, Data); \
++ \
++ *Memory++ = gcmSETFIELDVALUE(0, STALL_COMMAND, OPCODE, STALL); \
++ \
++ *Memory++ = Data; \
++ \
++ gcmDUMP(gcvNULL, "#[stall 0x%08X 0x%08X]", \
++ gcmSETFIELDVALUE(0, AQ_SEMAPHORE, SOURCE, FRONT_END), \
++ gcmSETFIELDVALUE(0, AQ_SEMAPHORE, DESTINATION, PIXEL_ENGINE)); \
++ \
++ gcmSKIPSECUREUSER(); \
++}
++
++#define gcmSETSTARTDECOMMAND_NEW(CommandBuffer, Memory, Count) \
++{ \
++ *Memory++ \
++ = gcmSETFIELDVALUE(0, AQ_COMMAND_START_DE_COMMAND, OPCODE, START_DE) \
++ | gcmSETFIELD (0, AQ_COMMAND_START_DE_COMMAND, COUNT, Count) \
++ | gcmSETFIELD (0, AQ_COMMAND_START_DE_COMMAND, DATA_COUNT, 0); \
++ \
++ *Memory++ = 0xDEADDEED; \
++ \
++}
++
++#define gcmSETSTATEDATA_NEW_FAST(StateDelta, CommandBuffer, Memory, FixedPoint, \
++ Address, Data) \
++{ \
++ gctUINT32 __temp_data32__; \
++ \
++ __temp_data32__ = Data; \
++ \
++ *Memory++ = __temp_data32__; \
++ \
++ gcmDUMPSTATEDATA(StateDelta, FixedPoint, Address, __temp_data32__); \
++ \
++ gcmUPDATESECUREUSER(); \
++}
++
++#define gcmSETSTATEDATAWITHMASK_NEW_FAST(StateDelta, CommandBuffer, Memory, FixedPoint, \
++ Address, Mask, Data) \
++{ \
++ gctUINT32 __temp_data32__; \
++ \
++ __temp_data32__ = Data; \
++ \
++ *Memory++ = __temp_data32__; \
++ \
++ gcmDUMPSTATEDATA(StateDelta, FixedPoint, Address, __temp_data32__); \
++ \
++ gcmUPDATESECUREUSER(); \
++}
++
++#define gcmSETSINGLESTATE_NEW_FAST(StateDelta, CommandBuffer, Memory, FixedPoint, \
++ Address, Data) \
++{ \
++ gcmBEGINSTATEBATCH_NEW(CommandBuffer, Memory, FixedPoint, Address, 1); \
++ gcmSETSTATEDATA_NEW_FAST(StateDelta, CommandBuffer, Memory, FixedPoint, \
++ Address, Data); \
++ gcmENDSTATEBATCH_NEW(CommandBuffer, Memory); \
++}
++
++#define gcmSETSINGLESTATEWITHMASK_NEW_FAST(StateDelta, CommandBuffer, Memory, FixedPoint, \
++ Address, Mask, Data) \
++{ \
++ gcmBEGINSTATEBATCH_NEW(CommandBuffer, Memory, FixedPoint, Address, 1); \
++ gcmSETSTATEDATAWITHMASK_NEW_FAST(StateDelta, CommandBuffer, Memory, FixedPoint, \
++ Address, Mask, Data); \
++ gcmENDSTATEBATCH_NEW(CommandBuffer, Memory); \
++}
++
++#define gcmSETSTATEDATA_FAST(StateDelta, CommandBuffer, Memory, FixedPoint, \
++ Address, Data) \
++{ \
++ gctUINT32 __temp_data32__; \
++ \
++ gcmVERIFYLOADSTATE(CommandBuffer, Memory, Address); \
++ \
++ gcmSAFECASTSIZET(__temp_data32__, Data); \
++ \
++ *Memory++ = __temp_data32__; \
++ \
++ gcmDUMPSTATEDATA(StateDelta, FixedPoint, Address, __temp_data32__); \
++ \
++ gcmUPDATESECUREUSER(); \
++}
++
++#define gcmSETSTATEDATAWITHMASK_FAST(StateDelta, CommandBuffer, Memory, FixedPoint, \
++ Address, Mask, Data) \
++{ \
++ gctUINT32 __temp_data32__; \
++ \
++ gcmVERIFYLOADSTATE(CommandBuffer, Memory, Address); \
++ \
++ __temp_data32__ = Data; \
++ \
++ *Memory++ = __temp_data32__; \
++ \
++ gcmDUMPSTATEDATA(StateDelta, FixedPoint, Address, __temp_data32__); \
++ \
++ gcmUPDATESECUREUSER(); \
++}
++
++#define gcmSETSINGLESTATE_FAST(StateDelta, CommandBuffer, Memory, FixedPoint, \
++ Address, Data) \
++{ \
++ gcmBEGINSTATEBATCH(CommandBuffer, Memory, FixedPoint, Address, 1); \
++ gcmSETSTATEDATA_FAST(StateDelta, CommandBuffer, Memory, FixedPoint, \
++ Address, Data); \
++ gcmENDSTATEBATCH(CommandBuffer, Memory); \
++}
++
++#define gcmSETSINGLESTATEWITHMASK_FAST(StateDelta, CommandBuffer, Memory, FixedPoint, \
++ Address, Mask, Data) \
++{ \
++ gcmBEGINSTATEBATCH(CommandBuffer, Memory, FixedPoint, Address, 1); \
++ gcmSETSTATEDATAWITHMASK_FAST(StateDelta, CommandBuffer, Memory, FixedPoint, \
++ Address, Mask, Data); \
++ gcmENDSTATEBATCH(CommandBuffer, Memory); \
++}
++
++#define gcmDEFINESTATEBUFFER_NEW_FAST(CommandBuffer, Memory) \
++ gcmDEFINESECUREUSER() \
++ gcmDEFINELOADSTATEBASE() \
++ gcsTEMPCMDBUF CommandBuffer = gcvNULL; \
++ gctUINT32_PTR Memory;
++
++#define gcmDEFINESTATEBUFFER_FAST(CommandBuffer, Memory, ReserveSize) \
++ gcmDEFINESECUREUSER() \
++ gctSIZE_T ReserveSize; \
++ gcoCMDBUF CommandBuffer; \
++ gctUINT32_PTR Memory;
++
++#define gcmBEGINSTATEBUFFER_FAST(Hardware, CommandBuffer, Memory, ReserveSize) \
++{ \
++ gcmONERROR(gcoBUFFER_Reserve( \
++ Hardware->buffer, ReserveSize, gcvTRUE, &CommandBuffer \
++ )); \
++ \
++ Memory = (gctUINT32_PTR) gcmUINT64_TO_PTR(CommandBuffer->lastReserve); \
++ \
++ gcmBEGINSECUREUSER(); \
++}
++
++#define gcmBEGINSTATEBUFFER_NEW_FAST(Hardware, CommandBuffer, Memory, OutSide) \
++{ \
++ if (OutSide) \
++ {\
++ Memory = (gctUINT32_PTR)*OutSide; \
++ }\
++ else \
++ {\
++ gcmONERROR(gcoBUFFER_StartTEMPCMDBUF( \
++ Hardware->buffer, &CommandBuffer \
++ ));\
++ \
++ Memory = (gctUINT32_PTR)(CommandBuffer->buffer); \
++ \
++ }\
++ \
++ gcmBEGINSECUREUSER(); \
++ gcmSETLOADSTATEBASE(CommandBuffer,OutSide);\
++}
++/*******************************************************************************
++**
++** gcmCONFIGUREUNIFORMS
++**
++** Configure uniforms according to chip and numConstants.
++*/
++#if !gcdENABLE_UNIFIED_CONSTANT
++#define gcmCONFIGUREUNIFORMS(ChipModel, ChipRevision, NumConstants, \
++ UnifiedConst, VsConstBase, PsConstBase, VsConstMax, PsConstMax, ConstMax) \
++{ \
++ if (ChipModel == gcv2000 && ChipRevision == 0x5118) \
++ { \
++ UnifiedConst = gcvFALSE; \
++ VsConstBase = AQVertexShaderConstRegAddrs; \
++ PsConstBase = AQPixelShaderConstRegAddrs; \
++ VsConstMax = 256; \
++ PsConstMax = 64; \
++ ConstMax = 320; \
++ } \
++ else if (NumConstants == 320) \
++ { \
++ UnifiedConst = gcvFALSE; \
++ VsConstBase = AQVertexShaderConstRegAddrs; \
++ PsConstBase = AQPixelShaderConstRegAddrs; \
++ VsConstMax = 256; \
++ PsConstMax = 64; \
++ ConstMax = 320; \
++ } \
++ /* All GC1000 series chips can only support 64 uniforms for ps on non-unified const mode. */ \
++ else if (NumConstants > 256 && ChipModel == gcv1000) \
++ { \
++ UnifiedConst = gcvFALSE; \
++ VsConstBase = AQVertexShaderConstRegAddrs; \
++ PsConstBase = AQPixelShaderConstRegAddrs; \
++ VsConstMax = 256; \
++ PsConstMax = 64; \
++ ConstMax = 320; \
++ } \
++ else if (NumConstants > 256) \
++ { \
++ UnifiedConst = gcvFALSE; \
++ VsConstBase = AQVertexShaderConstRegAddrs; \
++ PsConstBase = AQPixelShaderConstRegAddrs; \
++ VsConstMax = 256; \
++ PsConstMax = 256; \
++ ConstMax = 512; \
++ } \
++ else if (NumConstants == 256) \
++ { \
++ UnifiedConst = gcvFALSE; \
++ VsConstBase = AQVertexShaderConstRegAddrs; \
++ PsConstBase = AQPixelShaderConstRegAddrs; \
++ VsConstMax = 256; \
++ PsConstMax = 256; \
++ ConstMax = 512; \
++ } \
++ else \
++ { \
++ UnifiedConst = gcvFALSE; \
++ VsConstBase = AQVertexShaderConstRegAddrs; \
++ PsConstBase = AQPixelShaderConstRegAddrs; \
++ VsConstMax = 168; \
++ PsConstMax = 64; \
++ ConstMax = 232; \
++ } \
++}
++#else
++#define gcmCONFIGUREUNIFORMS(ChipModel, ChipRevision, NumConstants, \
++ UnifiedConst, VsConstBase, PsConstBase, VsConstMax, PsConstMax, ConstMax) \
++{ \
++ if (NumConstants > 256) \
++ { \
++ UnifiedConst = gcvTRUE; \
++ VsConstBase = gcregSHUniformsRegAddrs; \
++ PsConstBase = gcregSHUniformsRegAddrs; \
++ ConstMax = NumConstants; \
++ VsConstMax = 256; \
++ PsConstMax = ConstMax - VsConstMax; \
++ } \
++ else if (NumConstants == 256) \
++ { \
++ if (ChipModel == gcv2000 && ChipRevision == 0x5118) \
++ { \
++ UnifiedConst = gcvFALSE; \
++ VsConstBase = AQVertexShaderConstRegAddrs; \
++ PsConstBase = AQPixelShaderConstRegAddrs; \
++ VsConstMax = 256; \
++ PsConstMax = 64; \
++ ConstMax = 320; \
++ } \
++ else \
++ { \
++ UnifiedConst = gcvFALSE; \
++ VsConstBase = AQVertexShaderConstRegAddrs; \
++ PsConstBase = AQPixelShaderConstRegAddrs; \
++ VsConstMax = 256; \
++ PsConstMax = 256; \
++ ConstMax = 512; \
++ } \
++ } \
++ else \
++ { \
++ UnifiedConst = gcvFALSE; \
++ VsConstBase = AQVertexShaderConstRegAddrs; \
++ PsConstBase = AQPixelShaderConstRegAddrs; \
++ VsConstMax = 168; \
++ PsConstMax = 64; \
++ ConstMax = 232; \
++ } \
++}
++#endif
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif /* __gc_hal_base_h_ */
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_driver.h linux-openelec/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_driver.h
+--- linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_driver.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_driver.h 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,1136 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++#ifndef __gc_hal_driver_h_
++#define __gc_hal_driver_h_
++
++#include "gc_hal_enum.h"
++#include "gc_hal_types.h"
++
++#if gcdENABLE_VG
++#include "gc_hal_driver_vg.h"
++#endif
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/******************************************************************************\
++******************************* I/O Control Codes ******************************
++\******************************************************************************/
++
++#define gcvHAL_CLASS "galcore"
++#define IOCTL_GCHAL_INTERFACE 30000
++#define IOCTL_GCHAL_KERNEL_INTERFACE 30001
++#define IOCTL_GCHAL_TERMINATE 30002
++
++#undef CONFIG_ANDROID_RESERVED_MEMORY_ACCOUNT
++/******************************************************************************\
++********************************* Command Codes ********************************
++\******************************************************************************/
++
++typedef enum _gceHAL_COMMAND_CODES
++{
++ /* Generic query. */
++ gcvHAL_QUERY_VIDEO_MEMORY,
++ gcvHAL_QUERY_CHIP_IDENTITY,
++
++ /* Contiguous memory. */
++ gcvHAL_ALLOCATE_NON_PAGED_MEMORY,
++ gcvHAL_FREE_NON_PAGED_MEMORY,
++ gcvHAL_ALLOCATE_CONTIGUOUS_MEMORY,
++ gcvHAL_FREE_CONTIGUOUS_MEMORY,
++
++ /* Video memory allocation. */
++ gcvHAL_ALLOCATE_VIDEO_MEMORY, /* Enforced alignment. */
++ gcvHAL_ALLOCATE_LINEAR_VIDEO_MEMORY, /* No alignment. */
++ gcvHAL_RELEASE_VIDEO_MEMORY,
++
++ /* Physical-to-logical mapping. */
++ gcvHAL_MAP_MEMORY,
++ gcvHAL_UNMAP_MEMORY,
++
++ /* Logical-to-physical mapping. */
++ gcvHAL_MAP_USER_MEMORY,
++ gcvHAL_UNMAP_USER_MEMORY,
++
++ /* Surface lock/unlock. */
++ gcvHAL_LOCK_VIDEO_MEMORY,
++ gcvHAL_UNLOCK_VIDEO_MEMORY,
++
++ /* Event queue. */
++ gcvHAL_EVENT_COMMIT,
++
++ gcvHAL_USER_SIGNAL,
++ gcvHAL_SIGNAL,
++ gcvHAL_WRITE_DATA,
++
++ gcvHAL_COMMIT,
++ gcvHAL_STALL,
++
++ gcvHAL_READ_REGISTER,
++ gcvHAL_WRITE_REGISTER,
++
++ gcvHAL_GET_PROFILE_SETTING,
++ gcvHAL_SET_PROFILE_SETTING,
++
++ gcvHAL_READ_ALL_PROFILE_REGISTERS,
++ gcvHAL_PROFILE_REGISTERS_2D,
++#if VIVANTE_PROFILER_PERDRAW
++ gcvHAL_READ_PROFILER_REGISTER_SETTING,
++#endif
++
++ /* Power management. */
++ gcvHAL_SET_POWER_MANAGEMENT_STATE,
++ gcvHAL_QUERY_POWER_MANAGEMENT_STATE,
++
++ gcvHAL_GET_BASE_ADDRESS,
++
++ gcvHAL_SET_IDLE, /* reserved */
++
++ /* Queries. */
++ gcvHAL_QUERY_KERNEL_SETTINGS,
++
++ /* Reset. */
++ gcvHAL_RESET,
++
++ /* Map physical address into handle. */
++ gcvHAL_MAP_PHYSICAL,
++
++ /* Debugger stuff. */
++ gcvHAL_DEBUG,
++
++ /* Cache stuff. */
++ gcvHAL_CACHE,
++
++ /* TimeStamp */
++ gcvHAL_TIMESTAMP,
++
++ /* Database. */
++ gcvHAL_DATABASE,
++
++ /* Version. */
++ gcvHAL_VERSION,
++
++ /* Chip info */
++ gcvHAL_CHIP_INFO,
++
++ /* Process attaching/detaching. */
++ gcvHAL_ATTACH,
++ gcvHAL_DETACH,
++
++ /* Composition. */
++ gcvHAL_COMPOSE,
++
++ /* Set timeOut value */
++ gcvHAL_SET_TIMEOUT,
++
++ /* Frame database. */
++ gcvHAL_GET_FRAME_INFO,
++
++ gcvHAL_QUERY_COMMAND_BUFFER,
++
++ gcvHAL_COMMIT_DONE,
++
++ /* GPU and event dump */
++ gcvHAL_DUMP_GPU_STATE,
++ gcvHAL_DUMP_EVENT,
++
++ /* Virtual command buffer. */
++ gcvHAL_ALLOCATE_VIRTUAL_COMMAND_BUFFER,
++ gcvHAL_FREE_VIRTUAL_COMMAND_BUFFER,
++
++ /* FSCALE_VAL. */
++ gcvHAL_SET_FSCALE_VALUE,
++ gcvHAL_GET_FSCALE_VALUE,
++
++ gcvHAL_NAME_VIDEO_MEMORY,
++ gcvHAL_IMPORT_VIDEO_MEMORY,
++
++ /* Reset time stamp. */
++ gcvHAL_QUERY_RESET_TIME_STAMP,
++
++ /* Multi-GPU read/write. */
++ gcvHAL_READ_REGISTER_EX,
++ gcvHAL_WRITE_REGISTER_EX,
++
++ /* Sync point operations. */
++ gcvHAL_SYNC_POINT,
++
++ /* Create native fence and return its fd. */
++ gcvHAL_CREATE_NATIVE_FENCE,
++
++ /* Destory MMU. */
++ gcvHAL_DESTROY_MMU,
++
++ /* Shared buffer. */
++ gcvHAL_SHBUF,
++
++ /* Config power management. */
++ gcvHAL_CONFIG_POWER_MANAGEMENT,
++
++ /* Connect a video node to an OS native fd. */
++ gcvHAL_GET_VIDEO_MEMORY_FD,
++}
++gceHAL_COMMAND_CODES;
++
++/******************************************************************************\
++****************************** Interface Structure *****************************
++\******************************************************************************/
++
++#define gcdMAX_PROFILE_FILE_NAME 128
++
++/* Kernel settings. */
++typedef struct _gcsKERNEL_SETTINGS
++{
++ /* Used RealTime signal between kernel and user. */
++ gctINT signal;
++}
++gcsKERNEL_SETTINGS;
++
++
++/* gcvHAL_QUERY_CHIP_IDENTITY */
++typedef struct _gcsHAL_QUERY_CHIP_IDENTITY * gcsHAL_QUERY_CHIP_IDENTITY_PTR;
++typedef struct _gcsHAL_QUERY_CHIP_IDENTITY
++{
++
++ /* Chip model. */
++ gceCHIPMODEL chipModel;
++
++ /* Revision value.*/
++ gctUINT32 chipRevision;
++
++ /* Supported feature fields. */
++ gctUINT32 chipFeatures;
++
++ /* Supported minor feature fields. */
++ gctUINT32 chipMinorFeatures;
++
++ /* Supported minor feature 1 fields. */
++ gctUINT32 chipMinorFeatures1;
++
++ /* Supported minor feature 2 fields. */
++ gctUINT32 chipMinorFeatures2;
++
++ /* Supported minor feature 3 fields. */
++ gctUINT32 chipMinorFeatures3;
++
++ /* Supported minor feature 4 fields. */
++ gctUINT32 chipMinorFeatures4;
++
++ /* Supported minor feature 5 fields. */
++ gctUINT32 chipMinorFeatures5;
++
++ /* Number of streams supported. */
++ gctUINT32 streamCount;
++
++ /* Total number of temporary registers per thread. */
++ gctUINT32 registerMax;
++
++ /* Maximum number of threads. */
++ gctUINT32 threadCount;
++
++ /* Number of shader cores. */
++ gctUINT32 shaderCoreCount;
++
++ /* Size of the vertex cache. */
++ gctUINT32 vertexCacheSize;
++
++ /* Number of entries in the vertex output buffer. */
++ gctUINT32 vertexOutputBufferSize;
++
++ /* Number of pixel pipes. */
++ gctUINT32 pixelPipes;
++
++ /* Number of instructions. */
++ gctUINT32 instructionCount;
++
++ /* Number of constants. */
++ gctUINT32 numConstants;
++
++ /* Buffer size */
++ gctUINT32 bufferSize;
++
++ /* Number of varyings */
++ gctUINT32 varyingsCount;
++
++ /* Supertile layout style in hardware */
++ gctUINT32 superTileMode;
++
++#if gcdMULTI_GPU
++ /* Number of 3D GPUs */
++ gctUINT32 gpuCoreCount;
++#endif
++
++ /* Special control bits for 2D chip. */
++ gctUINT32 chip2DControl;
++
++ /* Product ID */
++ gctUINT32 productID;
++}
++gcsHAL_QUERY_CHIP_IDENTITY;
++
++/* gcvHAL_COMPOSE. */
++typedef struct _gcsHAL_COMPOSE * gcsHAL_COMPOSE_PTR;
++typedef struct _gcsHAL_COMPOSE
++{
++ /* Composition state buffer. */
++ gctUINT64 physical;
++ gctUINT64 logical;
++ gctUINT offset;
++ gctUINT size;
++
++ /* Composition end signal. */
++ gctUINT64 process;
++ gctUINT64 signal;
++
++ /* User signals. */
++ gctUINT64 userProcess;
++ gctUINT64 userSignal1;
++ gctUINT64 userSignal2;
++
++#if defined(__QNXNTO__)
++ /* Client pulse side-channel connection ID. */
++ gctINT32 coid;
++
++ /* Set by server. */
++ gctINT32 rcvid;
++#endif
++}
++gcsHAL_COMPOSE;
++
++
++typedef struct _gcsHAL_INTERFACE
++{
++ /* Command code. */
++ gceHAL_COMMAND_CODES command;
++
++ /* Hardware type. */
++ gceHARDWARE_TYPE hardwareType;
++
++ /* Status value. */
++ gceSTATUS status;
++
++ /* Handle to this interface channel. */
++ gctUINT64 handle;
++
++ /* Pid of the client. */
++ gctUINT32 pid;
++
++ /* Union of command structures. */
++ union _u
++ {
++ /* gcvHAL_GET_BASE_ADDRESS */
++ struct _gcsHAL_GET_BASE_ADDRESS
++ {
++ /* Physical memory address of internal memory. */
++ OUT gctUINT32 baseAddress;
++ }
++ GetBaseAddress;
++
++ /* gcvHAL_QUERY_VIDEO_MEMORY */
++ struct _gcsHAL_QUERY_VIDEO_MEMORY
++ {
++ /* Physical memory address of internal memory. Just a name. */
++ OUT gctUINT32 internalPhysical;
++
++ /* Size in bytes of internal memory. */
++ OUT gctUINT64 internalSize;
++
++ /* Physical memory address of external memory. Just a name. */
++ OUT gctUINT32 externalPhysical;
++
++ /* Size in bytes of external memory.*/
++ OUT gctUINT64 externalSize;
++
++ /* Physical memory address of contiguous memory. Just a name. */
++ OUT gctUINT32 contiguousPhysical;
++
++ /* Size in bytes of contiguous memory.*/
++ OUT gctUINT64 contiguousSize;
++ }
++ QueryVideoMemory;
++
++ /* gcvHAL_QUERY_CHIP_IDENTITY */
++ gcsHAL_QUERY_CHIP_IDENTITY QueryChipIdentity;
++
++ /* gcvHAL_MAP_MEMORY */
++ struct _gcsHAL_MAP_MEMORY
++ {
++ /* Physical memory address to map. Just a name on Linux/Qnx. */
++ IN gctUINT32 physical;
++
++ /* Number of bytes in physical memory to map. */
++ IN gctUINT64 bytes;
++
++ /* Address of mapped memory. */
++ OUT gctUINT64 logical;
++ }
++ MapMemory;
++
++ /* gcvHAL_UNMAP_MEMORY */
++ struct _gcsHAL_UNMAP_MEMORY
++ {
++ /* Physical memory address to unmap. Just a name on Linux/Qnx. */
++ IN gctUINT32 physical;
++
++ /* Number of bytes in physical memory to unmap. */
++ IN gctUINT64 bytes;
++
++ /* Address of mapped memory to unmap. */
++ IN gctUINT64 logical;
++ }
++ UnmapMemory;
++
++ /* gcvHAL_ALLOCATE_LINEAR_VIDEO_MEMORY */
++ struct _gcsHAL_ALLOCATE_LINEAR_VIDEO_MEMORY
++ {
++ /* Number of bytes to allocate. */
++ IN OUT gctUINT bytes;
++
++ /* Buffer alignment. */
++ IN gctUINT alignment;
++
++ /* Type of allocation. */
++ IN gceSURF_TYPE type;
++
++ /* Flag of allocation. */
++ IN gctUINT32 flag;
++
++ /* Memory pool to allocate from. */
++ IN OUT gcePOOL pool;
++
++ /* Allocated video memory. */
++ OUT gctUINT32 node;
++ }
++ AllocateLinearVideoMemory;
++
++ /* gcvHAL_ALLOCATE_VIDEO_MEMORY */
++ struct _gcsHAL_ALLOCATE_VIDEO_MEMORY
++ {
++ /* Width of rectangle to allocate. */
++ IN OUT gctUINT width;
++
++ /* Height of rectangle to allocate. */
++ IN OUT gctUINT height;
++
++ /* Depth of rectangle to allocate. */
++ IN gctUINT depth;
++
++ /* Format rectangle to allocate in gceSURF_FORMAT. */
++ IN gceSURF_FORMAT format;
++
++ /* Type of allocation. */
++ IN gceSURF_TYPE type;
++
++ /* Memory pool to allocate from. */
++ IN OUT gcePOOL pool;
++
++ /* Allocated video memory. */
++ OUT gctUINT32 node;
++ }
++ AllocateVideoMemory;
++
++ /* gcvHAL_RELEASE_VIDEO_MEMORY */
++ struct _gcsHAL_RELEASE_VIDEO_MEMORY
++ {
++ /* Allocated video memory. */
++ IN gctUINT32 node;
++
++#ifdef __QNXNTO__
++/* TODO: This is part of the unlock - why is it here? */
++ /* Mapped logical address to unmap in user space. */
++ OUT gctUINT64 memory;
++
++ /* Number of bytes to allocated. */
++ OUT gctUINT64 bytes;
++#endif
++ }
++ ReleaseVideoMemory;
++
++ /* gcvHAL_LOCK_VIDEO_MEMORY */
++ struct _gcsHAL_LOCK_VIDEO_MEMORY
++ {
++ /* Allocated video memory. */
++ IN gctUINT32 node;
++
++ /* Cache configuration. */
++ /* Only gcvPOOL_CONTIGUOUS and gcvPOOL_VIRUTAL
++ ** can be configured */
++ IN gctBOOL cacheable;
++
++ /* Hardware specific address. */
++ OUT gctUINT32 address;
++
++ /* Mapped logical address. */
++ OUT gctUINT64 memory;
++
++ /* Customer priviate handle*/
++ OUT gctUINT32 gid;
++
++ /* Bus address of a contiguous video node. */
++ OUT gctUINT64 physicalAddress;
++ }
++ LockVideoMemory;
++
++ /* gcvHAL_UNLOCK_VIDEO_MEMORY */
++ struct _gcsHAL_UNLOCK_VIDEO_MEMORY
++ {
++ /* Allocated video memory. */
++ IN gctUINT64 node;
++
++ /* Type of surface. */
++ IN gceSURF_TYPE type;
++
++ /* Flag to unlock surface asynchroneously. */
++ IN OUT gctBOOL asynchroneous;
++ }
++ UnlockVideoMemory;
++
++ /* gcvHAL_ALLOCATE_NON_PAGED_MEMORY */
++ struct _gcsHAL_ALLOCATE_NON_PAGED_MEMORY
++ {
++ /* Number of bytes to allocate. */
++ IN OUT gctUINT64 bytes;
++
++ /* Physical address of allocation. Just a name. */
++ OUT gctUINT32 physical;
++
++ /* Logical address of allocation. */
++ OUT gctUINT64 logical;
++ }
++ AllocateNonPagedMemory;
++
++ /* gcvHAL_FREE_NON_PAGED_MEMORY */
++ struct _gcsHAL_FREE_NON_PAGED_MEMORY
++ {
++ /* Number of bytes allocated. */
++ IN gctUINT64 bytes;
++
++ /* Physical address of allocation. Just a name. */
++ IN gctUINT32 physical;
++
++ /* Logical address of allocation. */
++ IN gctUINT64 logical;
++ }
++ FreeNonPagedMemory;
++
++ /* gcvHAL_ALLOCATE_NON_PAGED_MEMORY */
++ struct _gcsHAL_ALLOCATE_VIRTUAL_COMMAND_BUFFER
++ {
++ /* Number of bytes to allocate. */
++ IN OUT gctUINT64 bytes;
++
++ /* Physical address of allocation. Just a name. */
++ OUT gctUINT32 physical;
++
++ /* Logical address of allocation. */
++ OUT gctUINT64 logical;
++ }
++ AllocateVirtualCommandBuffer;
++
++ /* gcvHAL_FREE_NON_PAGED_MEMORY */
++ struct _gcsHAL_FREE_VIRTUAL_COMMAND_BUFFER
++ {
++ /* Number of bytes allocated. */
++ IN gctUINT64 bytes;
++
++ /* Physical address of allocation. Just a name. */
++ IN gctUINT32 physical;
++
++ /* Logical address of allocation. */
++ IN gctUINT64 logical;
++ }
++ FreeVirtualCommandBuffer;
++
++ /* gcvHAL_EVENT_COMMIT. */
++ struct _gcsHAL_EVENT_COMMIT
++ {
++ /* Event queue in gcsQUEUE. */
++ IN gctUINT64 queue;
++
++#if gcdMULTI_GPU
++ IN gceCORE_3D_MASK chipEnable;
++
++ IN gceMULTI_GPU_MODE gpuMode;
++#endif
++ }
++ Event;
++
++ /* gcvHAL_COMMIT */
++ struct _gcsHAL_COMMIT
++ {
++ /* Context buffer object gckCONTEXT. */
++ IN gctUINT64 context;
++
++ /* Command buffer gcoCMDBUF. */
++ IN gctUINT64 commandBuffer;
++
++ /* State delta buffer in gcsSTATE_DELTA. */
++ gctUINT64 delta;
++
++ /* Event queue in gcsQUEUE. */
++ IN gctUINT64 queue;
++
++#if gcdMULTI_GPU
++ IN gceCORE_3D_MASK chipEnable;
++
++ IN gceMULTI_GPU_MODE gpuMode;
++#endif
++ }
++ Commit;
++
++ /* gcvHAL_MAP_USER_MEMORY */
++ struct _gcsHAL_MAP_USER_MEMORY
++ {
++ /* Base address of user memory to map. */
++ IN gctUINT64 memory;
++
++ /* Physical address of user memory to map. */
++ IN gctUINT32 physical;
++
++ /* Size of user memory in bytes to map. */
++ IN gctUINT64 size;
++
++ /* Info record required by gcvHAL_UNMAP_USER_MEMORY. Just a name. */
++ OUT gctUINT32 info;
++
++ /* Physical address of mapped memory. */
++ OUT gctUINT32 address;
++ }
++ MapUserMemory;
++
++ /* gcvHAL_UNMAP_USER_MEMORY */
++ struct _gcsHAL_UNMAP_USER_MEMORY
++ {
++ /* Base address of user memory to unmap. */
++ IN gctUINT64 memory;
++
++ /* Size of user memory in bytes to unmap. */
++ IN gctUINT64 size;
++
++ /* Info record returned by gcvHAL_MAP_USER_MEMORY. Just a name. */
++ IN gctUINT32 info;
++
++ /* Physical address of mapped memory as returned by
++ gcvHAL_MAP_USER_MEMORY. */
++ IN gctUINT32 address;
++ }
++ UnmapUserMemory;
++#if !USE_NEW_LINUX_SIGNAL
++ /* gcsHAL_USER_SIGNAL */
++ struct _gcsHAL_USER_SIGNAL
++ {
++ /* Command. */
++ gceUSER_SIGNAL_COMMAND_CODES command;
++
++ /* Signal ID. */
++ IN OUT gctINT id;
++
++ /* Reset mode. */
++ IN gctBOOL manualReset;
++
++ /* Wait timedout. */
++ IN gctUINT32 wait;
++
++ /* State. */
++ IN gctBOOL state;
++ }
++ UserSignal;
++#endif
++
++ /* gcvHAL_SIGNAL. */
++ struct _gcsHAL_SIGNAL
++ {
++ /* Signal handle to signal gctSIGNAL. */
++ IN gctUINT64 signal;
++
++ /* Reserved gctSIGNAL. */
++ IN gctUINT64 auxSignal;
++
++ /* Process owning the signal gctHANDLE. */
++ IN gctUINT64 process;
++
++#if defined(__QNXNTO__)
++ /* Client pulse side-channel connection ID. Set by client in gcoOS_CreateSignal. */
++ IN gctINT32 coid;
++
++ /* Set by server. */
++ IN gctINT32 rcvid;
++#endif
++ /* Event generated from where of pipeline */
++ IN gceKERNEL_WHERE fromWhere;
++ }
++ Signal;
++
++ /* gcvHAL_WRITE_DATA. */
++ struct _gcsHAL_WRITE_DATA
++ {
++ /* Address to write data to. */
++ IN gctUINT32 address;
++
++ /* Data to write. */
++ IN gctUINT32 data;
++ }
++ WriteData;
++
++ /* gcvHAL_ALLOCATE_CONTIGUOUS_MEMORY */
++ struct _gcsHAL_ALLOCATE_CONTIGUOUS_MEMORY
++ {
++ /* Number of bytes to allocate. */
++ IN OUT gctUINT64 bytes;
++
++ /* Hardware address of allocation. */
++ OUT gctUINT32 address;
++
++ /* Physical address of allocation. Just a name. */
++ OUT gctUINT32 physical;
++
++ /* Logical address of allocation. */
++ OUT gctUINT64 logical;
++ }
++ AllocateContiguousMemory;
++
++ /* gcvHAL_FREE_CONTIGUOUS_MEMORY */
++ struct _gcsHAL_FREE_CONTIGUOUS_MEMORY
++ {
++ /* Number of bytes allocated. */
++ IN gctUINT64 bytes;
++
++ /* Physical address of allocation. Just a name. */
++ IN gctUINT32 physical;
++
++ /* Logical address of allocation. */
++ IN gctUINT64 logical;
++ }
++ FreeContiguousMemory;
++
++ /* gcvHAL_READ_REGISTER */
++ struct _gcsHAL_READ_REGISTER
++ {
++ /* Logical address of memory to write data to. */
++ IN gctUINT32 address;
++
++ /* Data read. */
++ OUT gctUINT32 data;
++ }
++ ReadRegisterData;
++
++ /* gcvHAL_WRITE_REGISTER */
++ struct _gcsHAL_WRITE_REGISTER
++ {
++ /* Logical address of memory to write data to. */
++ IN gctUINT32 address;
++
++ /* Data read. */
++ IN gctUINT32 data;
++ }
++ WriteRegisterData;
++
++#if gcdMULTI_GPU
++ /* gcvHAL_READ_REGISTER_EX */
++ struct _gcsHAL_READ_REGISTER_EX
++ {
++ /* Logical address of memory to write data to. */
++ IN gctUINT32 address;
++
++ IN gctUINT32 coreSelect;
++
++ /* Data read. */
++ OUT gctUINT32 data[gcdMULTI_GPU];
++ }
++ ReadRegisterDataEx;
++
++ /* gcvHAL_WRITE_REGISTER_EX */
++ struct _gcsHAL_WRITE_REGISTER_EX
++ {
++ /* Logical address of memory to write data to. */
++ IN gctUINT32 address;
++
++ IN gctUINT32 coreSelect;
++
++ /* Data read. */
++ IN gctUINT32 data[gcdMULTI_GPU];
++ }
++ WriteRegisterDataEx;
++#endif
++
++#if VIVANTE_PROFILER
++ /* gcvHAL_GET_PROFILE_SETTING */
++ struct _gcsHAL_GET_PROFILE_SETTING
++ {
++ /* Enable profiling */
++ OUT gctBOOL enable;
++ }
++ GetProfileSetting;
++
++ /* gcvHAL_SET_PROFILE_SETTING */
++ struct _gcsHAL_SET_PROFILE_SETTING
++ {
++ /* Enable profiling */
++ IN gctBOOL enable;
++ }
++ SetProfileSetting;
++
++#if VIVANTE_PROFILER_PERDRAW
++ /* gcvHAL_READ_PROFILER_REGISTER_SETTING */
++ struct _gcsHAL_READ_PROFILER_REGISTER_SETTING
++ {
++ /*Should Clear Register*/
++ IN gctBOOL bclear;
++ }
++ SetProfilerRegisterClear;
++#endif
++
++ /* gcvHAL_READ_ALL_PROFILE_REGISTERS */
++ struct _gcsHAL_READ_ALL_PROFILE_REGISTERS
++ {
++#if VIVANTE_PROFILER_CONTEXT
++ /* Context buffer object gckCONTEXT. Just a name. */
++ IN gctUINT32 context;
++#endif
++
++ /* Data read. */
++ OUT gcsPROFILER_COUNTERS counters;
++ }
++ RegisterProfileData;
++
++ /* gcvHAL_PROFILE_REGISTERS_2D */
++ struct _gcsHAL_PROFILE_REGISTERS_2D
++ {
++ /* Data read in gcs2D_PROFILE. */
++ OUT gctUINT64 hwProfile2D;
++ }
++ RegisterProfileData2D;
++#endif
++
++ /* Power management. */
++ /* gcvHAL_SET_POWER_MANAGEMENT_STATE */
++ struct _gcsHAL_SET_POWER_MANAGEMENT
++ {
++ /* Data read. */
++ IN gceCHIPPOWERSTATE state;
++ }
++ SetPowerManagement;
++
++ /* gcvHAL_QUERY_POWER_MANAGEMENT_STATE */
++ struct _gcsHAL_QUERY_POWER_MANAGEMENT
++ {
++ /* Data read. */
++ OUT gceCHIPPOWERSTATE state;
++
++ /* Idle query. */
++ OUT gctBOOL isIdle;
++ }
++ QueryPowerManagement;
++
++ /* gcvHAL_QUERY_KERNEL_SETTINGS */
++ struct _gcsHAL_QUERY_KERNEL_SETTINGS
++ {
++ /* Settings.*/
++ OUT gcsKERNEL_SETTINGS settings;
++ }
++ QueryKernelSettings;
++
++ /* gcvHAL_MAP_PHYSICAL */
++ struct _gcsHAL_MAP_PHYSICAL
++ {
++ /* gcvTRUE to map, gcvFALSE to unmap. */
++ IN gctBOOL map;
++
++ /* Physical address. */
++ IN OUT gctUINT64 physical;
++ }
++ MapPhysical;
++
++ /* gcvHAL_DEBUG */
++ struct _gcsHAL_DEBUG
++ {
++ /* If gcvTRUE, set the debug information. */
++ IN gctBOOL set;
++ IN gctUINT32 level;
++ IN gctUINT32 zones;
++ IN gctBOOL enable;
++
++ IN gceDEBUG_MESSAGE_TYPE type;
++ IN gctUINT32 messageSize;
++
++ /* Message to print if not empty. */
++ IN gctCHAR message[80];
++ }
++ Debug;
++
++ /* gcvHAL_CACHE */
++ struct _gcsHAL_CACHE
++ {
++ IN gceCACHEOPERATION operation;
++ IN gctUINT64 process;
++ IN gctUINT64 logical;
++ IN gctUINT64 bytes;
++ IN gctUINT32 node;
++ }
++ Cache;
++
++ /* gcvHAL_TIMESTAMP */
++ struct _gcsHAL_TIMESTAMP
++ {
++ /* Timer select. */
++ IN gctUINT32 timer;
++
++ /* Timer request type (0-stop, 1-start, 2-send delta). */
++ IN gctUINT32 request;
++
++ /* Result of delta time in microseconds. */
++ OUT gctINT32 timeDelta;
++ }
++ TimeStamp;
++
++ /* gcvHAL_DATABASE */
++ struct _gcsHAL_DATABASE
++ {
++ /* Set to gcvTRUE if you want to query a particular process ID.
++ ** Set to gcvFALSE to query the last detached process. */
++ IN gctBOOL validProcessID;
++
++ /* Process ID to query. */
++ IN gctUINT32 processID;
++
++ /* Information. */
++ OUT gcuDATABASE_INFO vidMem;
++ OUT gcuDATABASE_INFO nonPaged;
++ OUT gcuDATABASE_INFO contiguous;
++ OUT gcuDATABASE_INFO gpuIdle;
++
++ /* Detail information about video memory. */
++ OUT gcuDATABASE_INFO vidMemPool[3];
++ }
++ Database;
++
++ /* gcvHAL_VERSION */
++ struct _gcsHAL_VERSION
++ {
++ /* Major version: N.n.n. */
++ OUT gctINT32 major;
++
++ /* Minor version: n.N.n. */
++ OUT gctINT32 minor;
++
++ /* Patch version: n.n.N. */
++ OUT gctINT32 patch;
++
++ /* Build version. */
++ OUT gctUINT32 build;
++ }
++ Version;
++
++ /* gcvHAL_CHIP_INFO */
++ struct _gcsHAL_CHIP_INFO
++ {
++ /* Chip count. */
++ OUT gctINT32 count;
++
++ /* Chip types. */
++ OUT gceHARDWARE_TYPE types[gcdCHIP_COUNT];
++ }
++ ChipInfo;
++
++ /* gcvHAL_ATTACH */
++ struct _gcsHAL_ATTACH
++ {
++ /* Handle of context buffer object. */
++ OUT gctUINT32 context;
++
++ /* Number of states in the buffer. */
++ OUT gctUINT64 stateCount;
++
++ /* Map context buffer to user or not. */
++ IN gctBOOL map;
++
++ /* Physical of context buffer. */
++ OUT gctUINT32 physicals[2];
++
++ /* Physical of context buffer. */
++ OUT gctUINT64 logicals[2];
++
++ /* Bytes of context buffer. */
++ OUT gctUINT32 bytes;
++ }
++ Attach;
++
++ /* gcvHAL_DETACH */
++ struct _gcsHAL_DETACH
++ {
++ /* Context buffer object gckCONTEXT. Just a name. */
++ IN gctUINT32 context;
++ }
++ Detach;
++
++ /* gcvHAL_COMPOSE. */
++ gcsHAL_COMPOSE Compose;
++
++ /* gcvHAL_GET_FRAME_INFO. */
++ struct _gcsHAL_GET_FRAME_INFO
++ {
++ /* gcsHAL_FRAME_INFO* */
++ OUT gctUINT64 frameInfo;
++ }
++ GetFrameInfo;
++
++ /* gcvHAL_SET_TIME_OUT. */
++ struct _gcsHAL_SET_TIMEOUT
++ {
++ gctUINT32 timeOut;
++ }
++ SetTimeOut;
++
++#if gcdENABLE_VG
++ /* gcvHAL_COMMIT */
++ struct _gcsHAL_VGCOMMIT
++ {
++ /* Context buffer. gcsVGCONTEXT_PTR */
++ IN gctUINT64 context;
++
++ /* Command queue. gcsVGCMDQUEUE_PTR */
++ IN gctUINT64 queue;
++
++ /* Number of entries in the queue. */
++ IN gctUINT entryCount;
++
++ /* Task table. gcsTASK_MASTER_TABLE_PTR */
++ IN gctUINT64 taskTable;
++ }
++ VGCommit;
++
++ /* gcvHAL_QUERY_COMMAND_BUFFER */
++ struct _gcsHAL_QUERY_COMMAND_BUFFER
++ {
++ /* Command buffer attributes. */
++ OUT gcsCOMMAND_BUFFER_INFO information;
++ }
++ QueryCommandBuffer;
++
++#endif
++
++ struct _gcsHAL_SET_FSCALE_VALUE
++ {
++ IN gctUINT value;
++ }
++ SetFscaleValue;
++
++ struct _gcsHAL_GET_FSCALE_VALUE
++ {
++ OUT gctUINT value;
++ OUT gctUINT minValue;
++ OUT gctUINT maxValue;
++ }
++ GetFscaleValue;
++
++ struct _gcsHAL_NAME_VIDEO_MEMORY
++ {
++ IN gctUINT32 handle;
++ OUT gctUINT32 name;
++ }
++ NameVideoMemory;
++
++ struct _gcsHAL_IMPORT_VIDEO_MEMORY
++ {
++ IN gctUINT32 name;
++ OUT gctUINT32 handle;
++ }
++ ImportVideoMemory;
++
++ struct _gcsHAL_QUERY_RESET_TIME_STAMP
++ {
++ OUT gctUINT64 timeStamp;
++ }
++ QueryResetTimeStamp;
++
++ struct _gcsHAL_SYNC_POINT
++ {
++ /* Command. */
++ gceSYNC_POINT_COMMAND_CODES command;
++
++ /* Sync point. */
++ IN OUT gctUINT64 syncPoint;
++
++ /* From where. */
++ IN gceKERNEL_WHERE fromWhere;
++
++ /* Signaled state. */
++ OUT gctBOOL state;
++ }
++ SyncPoint;
++
++ struct _gcsHAL_CREATE_NATIVE_FENCE
++ {
++ /* Signal id to dup. */
++ IN gctUINT64 syncPoint;
++
++ /* Native fence file descriptor. */
++ OUT gctINT fenceFD;
++
++ }
++ CreateNativeFence;
++
++ struct _gcsHAL_DESTROY_MMU
++ {
++ /* Mmu object. */
++ IN gctUINT64 mmu;
++ }
++ DestroyMmu;
++
++ struct _gcsHAL_SHBUF
++ {
++ gceSHBUF_COMMAND_CODES command;
++
++ /* Shared buffer. */
++ IN OUT gctUINT64 id;
++
++ /* User data to be shared. */
++ IN gctUINT64 data;
++
++ /* Data size. */
++ IN OUT gctUINT32 bytes;
++ }
++ ShBuf;
++
++ struct _gcsHAL_CONFIG_POWER_MANAGEMENT
++ {
++ IN gctBOOL enable;
++ }
++ ConfigPowerManagement;
++
++ struct _gcsHAL_GET_VIDEO_MEMORY_FD
++ {
++ IN gctUINT32 handle;
++ OUT gctINT fd;
++ }
++ GetVideoMemoryFd;
++ }
++ u;
++}
++gcsHAL_INTERFACE;
++
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif /* __gc_hal_driver_h_ */
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_driver_vg.h linux-openelec/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_driver_vg.h
+--- linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_driver_vg.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_driver_vg.h 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,270 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_driver_vg_h_
++#define __gc_hal_driver_vg_h_
++
++
++
++#include "gc_hal_types.h"
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/******************************************************************************\
++******************************* I/O Control Codes ******************************
++\******************************************************************************/
++
++#define gcvHAL_CLASS "galcore"
++#define IOCTL_GCHAL_INTERFACE 30000
++
++/******************************************************************************\
++********************************* Command Codes ********************************
++\******************************************************************************/
++
++/******************************************************************************\
++********************* Command buffer information structure. ********************
++\******************************************************************************/
++
++typedef struct _gcsCOMMAND_BUFFER_INFO * gcsCOMMAND_BUFFER_INFO_PTR;
++typedef struct _gcsCOMMAND_BUFFER_INFO
++{
++ /* FE command buffer interrupt ID. */
++ gctINT32 feBufferInt;
++
++ /* TS overflow interrupt ID. */
++ gctINT32 tsOverflowInt;
++
++ /* Alignment and mask for the buffer address. */
++ gctUINT addressMask;
++ gctUINT32 addressAlignment;
++
++ /* Alignment for each command. */
++ gctUINT32 commandAlignment;
++
++ /* Number of bytes required by the STATE command. */
++ gctUINT32 stateCommandSize;
++
++ /* Number of bytes required by the RESTART command. */
++ gctUINT32 restartCommandSize;
++
++ /* Number of bytes required by the FETCH command. */
++ gctUINT32 fetchCommandSize;
++
++ /* Number of bytes required by the CALL command. */
++ gctUINT32 callCommandSize;
++
++ /* Number of bytes required by the RETURN command. */
++ gctUINT32 returnCommandSize;
++
++ /* Number of bytes required by the EVENT command. */
++ gctUINT32 eventCommandSize;
++
++ /* Number of bytes required by the END command. */
++ gctUINT32 endCommandSize;
++
++ /* Number of bytes reserved at the tail of a static command buffer. */
++ gctUINT32 staticTailSize;
++
++ /* Number of bytes reserved at the tail of a dynamic command buffer. */
++ gctUINT32 dynamicTailSize;
++}
++gcsCOMMAND_BUFFER_INFO;
++
++/******************************************************************************\
++******************************** Task Structures *******************************
++\******************************************************************************/
++
++typedef enum _gceTASK
++{
++ gcvTASK_LINK,
++ gcvTASK_CLUSTER,
++ gcvTASK_INCREMENT,
++ gcvTASK_DECREMENT,
++ gcvTASK_SIGNAL,
++ gcvTASK_LOCKDOWN,
++ gcvTASK_UNLOCK_VIDEO_MEMORY,
++ gcvTASK_FREE_VIDEO_MEMORY,
++ gcvTASK_FREE_CONTIGUOUS_MEMORY,
++ gcvTASK_UNMAP_USER_MEMORY
++}
++gceTASK;
++
++typedef struct _gcsTASK_HEADER * gcsTASK_HEADER_PTR;
++typedef struct _gcsTASK_HEADER
++{
++ /* Task ID. */
++ IN gceTASK id;
++}
++gcsTASK_HEADER;
++
++typedef struct _gcsTASK_LINK * gcsTASK_LINK_PTR;
++typedef struct _gcsTASK_LINK
++{
++ /* Task ID (gcvTASK_LINK). */
++ IN gceTASK id;
++
++ /* Pointer to the next task container. */
++ IN gctPOINTER cotainer;
++
++ /* Pointer to the next task from the next task container. */
++ IN gcsTASK_HEADER_PTR task;
++}
++gcsTASK_LINK;
++
++typedef struct _gcsTASK_CLUSTER * gcsTASK_CLUSTER_PTR;
++typedef struct _gcsTASK_CLUSTER
++{
++ /* Task ID (gcvTASK_CLUSTER). */
++ IN gceTASK id;
++
++ /* Number of tasks in the cluster. */
++ IN gctUINT taskCount;
++}
++gcsTASK_CLUSTER;
++
++typedef struct _gcsTASK_INCREMENT * gcsTASK_INCREMENT_PTR;
++typedef struct _gcsTASK_INCREMENT
++{
++ /* Task ID (gcvTASK_INCREMENT). */
++ IN gceTASK id;
++
++ /* Address of the variable to increment. */
++ IN gctUINT32 address;
++}
++gcsTASK_INCREMENT;
++
++typedef struct _gcsTASK_DECREMENT * gcsTASK_DECREMENT_PTR;
++typedef struct _gcsTASK_DECREMENT
++{
++ /* Task ID (gcvTASK_DECREMENT). */
++ IN gceTASK id;
++
++ /* Address of the variable to decrement. */
++ IN gctUINT32 address;
++}
++gcsTASK_DECREMENT;
++
++typedef struct _gcsTASK_SIGNAL * gcsTASK_SIGNAL_PTR;
++typedef struct _gcsTASK_SIGNAL
++{
++ /* Task ID (gcvTASK_SIGNAL). */
++ IN gceTASK id;
++
++ /* Process owning the signal. */
++ IN gctHANDLE process;
++
++ /* Signal handle to signal. */
++ IN gctSIGNAL signal;
++
++#if defined(__QNXNTO__)
++ IN gctINT32 coid;
++ IN gctINT32 rcvid;
++#endif
++}
++gcsTASK_SIGNAL;
++
++typedef struct _gcsTASK_LOCKDOWN * gcsTASK_LOCKDOWN_PTR;
++typedef struct _gcsTASK_LOCKDOWN
++{
++ /* Task ID (gcvTASK_LOCKDOWN). */
++ IN gceTASK id;
++
++ /* Address of the user space counter. */
++ IN gctUINT32 userCounter;
++
++ /* Address of the kernel space counter. */
++ IN gctUINT32 kernelCounter;
++
++ /* Process owning the signal. */
++ IN gctHANDLE process;
++
++ /* Signal handle to signal. */
++ IN gctSIGNAL signal;
++}
++gcsTASK_LOCKDOWN;
++
++typedef struct _gcsTASK_UNLOCK_VIDEO_MEMORY * gcsTASK_UNLOCK_VIDEO_MEMORY_PTR;
++typedef struct _gcsTASK_UNLOCK_VIDEO_MEMORY
++{
++ /* Task ID (gcvTASK_UNLOCK_VIDEO_MEMORY). */
++ IN gceTASK id;
++
++ /* Allocated video memory. */
++ IN gctUINT64 node;
++}
++gcsTASK_UNLOCK_VIDEO_MEMORY;
++
++typedef struct _gcsTASK_FREE_VIDEO_MEMORY * gcsTASK_FREE_VIDEO_MEMORY_PTR;
++typedef struct _gcsTASK_FREE_VIDEO_MEMORY
++{
++ /* Task ID (gcvTASK_FREE_VIDEO_MEMORY). */
++ IN gceTASK id;
++
++ /* Allocated video memory. */
++ IN gctUINT32 node;
++}
++gcsTASK_FREE_VIDEO_MEMORY;
++
++typedef struct _gcsTASK_FREE_CONTIGUOUS_MEMORY * gcsTASK_FREE_CONTIGUOUS_MEMORY_PTR;
++typedef struct _gcsTASK_FREE_CONTIGUOUS_MEMORY
++{
++ /* Task ID (gcvTASK_FREE_CONTIGUOUS_MEMORY). */
++ IN gceTASK id;
++
++ /* Number of bytes allocated. */
++ IN gctSIZE_T bytes;
++
++ /* Physical address of allocation. */
++ IN gctPHYS_ADDR physical;
++
++ /* Logical address of allocation. */
++ IN gctPOINTER logical;
++}
++gcsTASK_FREE_CONTIGUOUS_MEMORY;
++
++typedef struct _gcsTASK_UNMAP_USER_MEMORY * gcsTASK_UNMAP_USER_MEMORY_PTR;
++typedef struct _gcsTASK_UNMAP_USER_MEMORY
++{
++ /* Task ID (gcvTASK_UNMAP_USER_MEMORY). */
++ IN gceTASK id;
++
++ /* Base address of user memory to unmap. */
++ IN gctPOINTER memory;
++
++ /* Size of user memory in bytes to unmap. */
++ IN gctSIZE_T size;
++
++ /* Info record returned by gcvHAL_MAP_USER_MEMORY. */
++ IN gctPOINTER info;
++
++ /* Physical address of mapped memory as returned by
++ gcvHAL_MAP_USER_MEMORY. */
++ IN gctUINT32 address;
++}
++gcsTASK_UNMAP_USER_MEMORY;
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif /* __gc_hal_driver_h_ */
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_dump.h linux-openelec/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_dump.h
+--- linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_dump.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_dump.h 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,89 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_dump_h_
++#define __gc_hal_dump_h_
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++** FILE LAYOUT:
++**
++** gcsDUMP_FILE structure
++**
++** gcsDUMP_DATA frame
++** gcsDUMP_DATA or gcDUMP_DATA_SIZE records rendingring the frame
++** gctUINT8 data[length]
++*/
++
++#define gcvDUMP_FILE_SIGNATURE gcmCC('g','c','D','B')
++
++typedef struct _gcsDUMP_FILE
++{
++ gctUINT32 signature; /* File signature */
++ gctSIZE_T length; /* Length of file */
++ gctUINT32 frames; /* Number of frames in file */
++}
++gcsDUMP_FILE;
++
++typedef enum _gceDUMP_TAG
++{
++ gcvTAG_SURFACE = gcmCC('s','u','r','f'),
++ gcvTAG_FRAME = gcmCC('f','r','m',' '),
++ gcvTAG_COMMAND = gcmCC('c','m','d',' '),
++ gcvTAG_INDEX = gcmCC('i','n','d','x'),
++ gcvTAG_STREAM = gcmCC('s','t','r','m'),
++ gcvTAG_TEXTURE = gcmCC('t','e','x','t'),
++ gcvTAG_RENDER_TARGET = gcmCC('r','n','d','r'),
++ gcvTAG_DEPTH = gcmCC('z','b','u','f'),
++ gcvTAG_RESOLVE = gcmCC('r','s','l','v'),
++ gcvTAG_DELETE = gcmCC('d','e','l',' '),
++ gcvTAG_BUFOBJ = gcmCC('b','u','f','o'),
++}
++gceDUMP_TAG;
++
++typedef struct _gcsDUMP_SURFACE
++{
++ gceDUMP_TAG type; /* Type of record. */
++ gctUINT32 address; /* Address of the surface. */
++ gctINT16 width; /* Width of surface. */
++ gctINT16 height; /* Height of surface. */
++ gceSURF_FORMAT format; /* Surface pixel format. */
++ gctSIZE_T length; /* Number of bytes inside the surface. */
++}
++gcsDUMP_SURFACE;
++
++typedef struct _gcsDUMP_DATA
++{
++ gceDUMP_TAG type; /* Type of record. */
++ gctSIZE_T length; /* Number of bytes of data. */
++ gctUINT32 address; /* Address for the data. */
++}
++gcsDUMP_DATA;
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif /* __gc_hal_dump_h_ */
++
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_eglplatform.h linux-openelec/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_eglplatform.h
+--- linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_eglplatform.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_eglplatform.h 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,672 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_eglplatform_h_
++#define __gc_hal_eglplatform_h_
++
++/* Include VDK types. */
++#include "gc_hal_types.h"
++#include "gc_hal_base.h"
++#include "gc_hal_eglplatform_type.h"
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++
++#if defined(_WIN32) || defined(__VC32__) && !defined(__CYGWIN__) && !defined(__SCITECH_SNAP__)
++/* Win32 and Windows CE platforms. */
++#include <windows.h>
++typedef HDC HALNativeDisplayType;
++typedef HWND HALNativeWindowType;
++typedef HBITMAP HALNativePixmapType;
++
++typedef struct __BITFIELDINFO{
++ BITMAPINFO bmi;
++ RGBQUAD bmiColors[2];
++} BITFIELDINFO;
++
++#elif defined(LINUX) && defined(EGL_API_DFB) && !defined(__APPLE__)
++#include <directfb.h>
++typedef struct _DFBDisplay * HALNativeDisplayType;
++typedef struct _DFBWindow * HALNativeWindowType;
++typedef struct _DFBPixmap * HALNativePixmapType;
++
++#elif defined(LINUX) && defined(EGL_API_FB) && !defined(__APPLE__)
++
++#if defined(EGL_API_WL)
++
++#if defined(__GNUC__)
++# define inline __inline__ /* GNU keyword. */
++#endif
++
++/* Wayland platform. */
++#include <wayland-egl.h>
++
++#define WL_EGL_NUM_BACKBUFFERS 3
++
++typedef struct _gcsWL_VIV_BUFFER
++{
++ struct wl_resource *wl_buffer;
++ gcoSURF surface;
++ gctINT32 width, height;
++} gcsWL_VIV_BUFFER;
++
++typedef struct _gcsWL_EGL_DISPLAY
++{
++ struct wl_display* wl_display;
++ struct wl_viv* wl_viv;
++ struct wl_registry *registry;
++ struct wl_event_queue *wl_queue;
++ gctINT swapInterval;
++} gcsWL_EGL_DISPLAY;
++
++typedef struct _gcsWL_EGL_BUFFER_INFO
++{
++ gctINT32 width;
++ gctINT32 height;
++ gctINT32 stride;
++ gceSURF_FORMAT format;
++ gcuVIDMEM_NODE_PTR node;
++ gcePOOL pool;
++ gctUINT bytes;
++ gcoSURF surface;
++ gcoSURF attached_surface;
++ gctINT32 invalidate;
++ gctBOOL locked;
++} gcsWL_EGL_BUFFER_INFO;
++
++typedef struct _gcsWL_EGL_BUFFER
++{
++ struct wl_buffer* wl_buffer;
++ gcsWL_EGL_BUFFER_INFO info;
++} gcsWL_EGL_BUFFER;
++
++typedef struct _gcsWL_EGL_WINDOW_INFO
++{
++ gctINT32 dx;
++ gctINT32 dy;
++ gctUINT width;
++ gctUINT height;
++ gctINT32 attached_width;
++ gctINT32 attached_height;
++ gceSURF_FORMAT format;
++ gctUINT bpp;
++} gcsWL_EGL_WINDOW_INFO;
++
++struct wl_egl_window
++{
++ gcsWL_EGL_DISPLAY* display;
++ gcsWL_EGL_BUFFER backbuffers[WL_EGL_NUM_BACKBUFFERS];
++ gcsWL_EGL_WINDOW_INFO info;
++ gctUINT current;
++ struct wl_surface* surface;
++ struct wl_callback* frame_callback;
++};
++
++typedef void* HALNativeDisplayType;
++typedef void* HALNativeWindowType;
++typedef void* HALNativePixmapType;
++#else
++/* Linux platform for FBDEV. */
++typedef struct _FBDisplay * HALNativeDisplayType;
++typedef struct _FBWindow * HALNativeWindowType;
++typedef struct _FBPixmap * HALNativePixmapType;
++#endif
++#elif defined(__ANDROID__) || defined(ANDROID)
++
++struct egl_native_pixmap_t;
++
++#if ANDROID_SDK_VERSION >= 9
++ #include <android/native_window.h>
++
++ typedef struct ANativeWindow* HALNativeWindowType;
++ typedef struct egl_native_pixmap_t* HALNativePixmapType;
++ typedef void* HALNativeDisplayType;
++#else
++ struct android_native_window_t;
++ typedef struct android_native_window_t* HALNativeWindowType;
++ typedef struct egl_native_pixmap_t * HALNativePixmapType;
++ typedef void* HALNativeDisplayType;
++#endif
++
++#elif defined(LINUX) || defined(__APPLE__)
++/* X11 platform. */
++#include <X11/Xlib.h>
++#include <X11/Xutil.h>
++
++typedef Display * HALNativeDisplayType;
++typedef Window HALNativeWindowType;
++
++#ifdef CUSTOM_PIXMAP
++typedef void * HALNativePixmapType;
++#else
++typedef Pixmap HALNativePixmapType;
++#endif /* CUSTOM_PIXMAP */
++
++/* Rename some badly named X defines. */
++#ifdef Status
++# define XStatus int
++# undef Status
++#endif
++#ifdef Always
++# define XAlways 2
++# undef Always
++#endif
++#ifdef CurrentTime
++# undef CurrentTime
++# define XCurrentTime 0
++#endif
++
++#elif defined(__QNXNTO__)
++#include <screen/screen.h>
++
++/* VOID */
++typedef int HALNativeDisplayType;
++typedef screen_window_t HALNativeWindowType;
++typedef screen_pixmap_t HALNativePixmapType;
++
++#else
++
++#error "Platform not recognized"
++
++/* VOID */
++typedef void * HALNativeDisplayType;
++typedef void * HALNativeWindowType;
++typedef void * HALNativePixmapType;
++
++#endif
++
++/* define DUMMY according to the system */
++#if defined(EGL_API_WL)
++# define WL_DUMMY (31415926)
++# define EGL_DUMMY WL_DUMMY
++#elif defined(__ANDROID__) || defined(ANDROID)
++# define ANDROID_DUMMY (31415926)
++# define EGL_DUMMY ANDROID_DUMMY
++#else
++# define EGL_DUMMY (31415926)
++#endif
++
++/*******************************************************************************
++** Display. ********************************************************************
++*/
++
++gceSTATUS
++gcoOS_GetDisplay(
++ OUT HALNativeDisplayType * Display,
++ IN gctPOINTER Context
++ );
++
++gceSTATUS
++gcoOS_GetDisplayByIndex(
++ IN gctINT DisplayIndex,
++ OUT HALNativeDisplayType * Display,
++ IN gctPOINTER Context
++ );
++
++gceSTATUS
++gcoOS_GetDisplayInfo(
++ IN HALNativeDisplayType Display,
++ OUT gctINT * Width,
++ OUT gctINT * Height,
++ OUT gctSIZE_T * Physical,
++ OUT gctINT * Stride,
++ OUT gctINT * BitsPerPixel
++ );
++
++
++
++gceSTATUS
++gcoOS_GetDisplayInfoEx(
++ IN HALNativeDisplayType Display,
++ IN HALNativeWindowType Window,
++ IN gctUINT DisplayInfoSize,
++ OUT halDISPLAY_INFO * DisplayInfo
++ );
++
++gceSTATUS
++gcoOS_GetNextDisplayInfoExByIndex(
++ IN gctINT Index,
++ IN HALNativeDisplayType Display,
++ IN HALNativeWindowType Window,
++ IN gctUINT DisplayInfoSize,
++ OUT halDISPLAY_INFO * DisplayInfo
++ );
++
++gceSTATUS
++gcoOS_GetDisplayVirtual(
++ IN HALNativeDisplayType Display,
++ OUT gctINT * Width,
++ OUT gctINT * Height
++ );
++
++gceSTATUS
++gcoOS_GetDisplayBackbuffer(
++ IN HALNativeDisplayType Display,
++ IN HALNativeWindowType Window,
++ OUT gctPOINTER * context,
++ OUT gcoSURF * surface,
++ OUT gctUINT * Offset,
++ OUT gctINT * X,
++ OUT gctINT * Y
++ );
++
++gceSTATUS
++gcoOS_SetDisplayVirtual(
++ IN HALNativeDisplayType Display,
++ IN HALNativeWindowType Window,
++ IN gctUINT Offset,
++ IN gctINT X,
++ IN gctINT Y
++ );
++
++gceSTATUS
++gcoOS_SetDisplayVirtualEx(
++ IN HALNativeDisplayType Display,
++ IN HALNativeWindowType Window,
++ IN gctPOINTER Context,
++ IN gcoSURF Surface,
++ IN gctUINT Offset,
++ IN gctINT X,
++ IN gctINT Y
++ );
++
++gceSTATUS
++gcoOS_SetSwapInterval(
++ IN HALNativeDisplayType Display,
++ IN gctINT Interval
++);
++
++gceSTATUS
++gcoOS_SetSwapIntervalEx(
++ IN HALNativeDisplayType Display,
++ IN gctINT Interval,
++ IN gctPOINTER localDisplay);
++
++gceSTATUS
++gcoOS_GetSwapInterval(
++ IN HALNativeDisplayType Display,
++ IN gctINT_PTR Min,
++ IN gctINT_PTR Max
++);
++
++gceSTATUS
++gcoOS_DisplayBufferRegions(
++ IN HALNativeDisplayType Display,
++ IN HALNativeWindowType Window,
++ IN gctINT NumRects,
++ IN gctINT_PTR Rects
++ );
++
++gceSTATUS
++gcoOS_DestroyDisplay(
++ IN HALNativeDisplayType Display
++ );
++
++gceSTATUS
++gcoOS_InitLocalDisplayInfo(
++ IN HALNativeDisplayType Display,
++ IN OUT gctPOINTER * localDisplay
++ );
++
++gceSTATUS
++gcoOS_DeinitLocalDisplayInfo(
++ IN HALNativeDisplayType Display,
++ IN OUT gctPOINTER * localDisplay
++ );
++
++gceSTATUS
++gcoOS_GetDisplayInfoEx2(
++ IN HALNativeDisplayType Display,
++ IN HALNativeWindowType Window,
++ IN gctPOINTER localDisplay,
++ IN gctUINT DisplayInfoSize,
++ OUT halDISPLAY_INFO * DisplayInfo
++ );
++
++gceSTATUS
++gcoOS_GetDisplayBackbufferEx(
++ IN HALNativeDisplayType Display,
++ IN HALNativeWindowType Window,
++ IN gctPOINTER localDisplay,
++ OUT gctPOINTER * context,
++ OUT gcoSURF * surface,
++ OUT gctUINT * Offset,
++ OUT gctINT * X,
++ OUT gctINT * Y
++ );
++
++gceSTATUS
++gcoOS_IsValidDisplay(
++ IN HALNativeDisplayType Display
++ );
++
++gceSTATUS
++gcoOS_GetNativeVisualId(
++ IN HALNativeDisplayType Display,
++ OUT gctINT* nativeVisualId
++ );
++
++gctBOOL
++gcoOS_SynchronousFlip(
++ IN HALNativeDisplayType Display
++ );
++
++/*******************************************************************************
++** Windows. ********************************************************************
++*/
++
++gceSTATUS
++gcoOS_CreateWindow(
++ IN HALNativeDisplayType Display,
++ IN gctINT X,
++ IN gctINT Y,
++ IN gctINT Width,
++ IN gctINT Height,
++ OUT HALNativeWindowType * Window
++ );
++
++gceSTATUS
++gcoOS_GetWindowInfo(
++ IN HALNativeDisplayType Display,
++ IN HALNativeWindowType Window,
++ OUT gctINT * X,
++ OUT gctINT * Y,
++ OUT gctINT * Width,
++ OUT gctINT * Height,
++ OUT gctINT * BitsPerPixel,
++ OUT gctUINT * Offset
++ );
++
++gceSTATUS
++gcoOS_DestroyWindow(
++ IN HALNativeDisplayType Display,
++ IN HALNativeWindowType Window
++ );
++
++gceSTATUS
++gcoOS_DrawImage(
++ IN HALNativeDisplayType Display,
++ IN HALNativeWindowType Window,
++ IN gctINT Left,
++ IN gctINT Top,
++ IN gctINT Right,
++ IN gctINT Bottom,
++ IN gctINT Width,
++ IN gctINT Height,
++ IN gctINT BitsPerPixel,
++ IN gctPOINTER Bits
++ );
++
++gceSTATUS
++gcoOS_GetImage(
++ IN HALNativeWindowType Window,
++ IN gctINT Left,
++ IN gctINT Top,
++ IN gctINT Right,
++ IN gctINT Bottom,
++ OUT gctINT * BitsPerPixel,
++ OUT gctPOINTER * Bits
++ );
++
++gceSTATUS
++gcoOS_GetWindowInfoEx(
++ IN HALNativeDisplayType Display,
++ IN HALNativeWindowType Window,
++ OUT gctINT * X,
++ OUT gctINT * Y,
++ OUT gctINT * Width,
++ OUT gctINT * Height,
++ OUT gctINT * BitsPerPixel,
++ OUT gctUINT * Offset,
++ OUT gceSURF_FORMAT * Format
++ );
++
++gceSTATUS
++gcoOS_DrawImageEx(
++ IN HALNativeDisplayType Display,
++ IN HALNativeWindowType Window,
++ IN gctINT Left,
++ IN gctINT Top,
++ IN gctINT Right,
++ IN gctINT Bottom,
++ IN gctINT Width,
++ IN gctINT Height,
++ IN gctINT BitsPerPixel,
++ IN gctPOINTER Bits,
++ IN gceSURF_FORMAT Format
++ );
++
++/*******************************************************************************
++** Pixmaps. ********************************************************************
++*/
++
++gceSTATUS
++gcoOS_CreatePixmap(
++ IN HALNativeDisplayType Display,
++ IN gctINT Width,
++ IN gctINT Height,
++ IN gctINT BitsPerPixel,
++ OUT HALNativePixmapType * Pixmap
++ );
++
++gceSTATUS
++gcoOS_GetPixmapInfo(
++ IN HALNativeDisplayType Display,
++ IN HALNativePixmapType Pixmap,
++ OUT gctINT * Width,
++ OUT gctINT * Height,
++ OUT gctINT * BitsPerPixel,
++ OUT gctINT * Stride,
++ OUT gctPOINTER * Bits
++ );
++
++gceSTATUS
++gcoOS_DrawPixmap(
++ IN HALNativeDisplayType Display,
++ IN HALNativePixmapType Pixmap,
++ IN gctINT Left,
++ IN gctINT Top,
++ IN gctINT Right,
++ IN gctINT Bottom,
++ IN gctINT Width,
++ IN gctINT Height,
++ IN gctINT BitsPerPixel,
++ IN gctPOINTER Bits
++ );
++
++gceSTATUS
++gcoOS_DestroyPixmap(
++ IN HALNativeDisplayType Display,
++ IN HALNativePixmapType Pixmap
++ );
++
++gceSTATUS
++gcoOS_GetPixmapInfoEx(
++ IN HALNativeDisplayType Display,
++ IN HALNativePixmapType Pixmap,
++ OUT gctINT * Width,
++ OUT gctINT * Height,
++ OUT gctINT * BitsPerPixel,
++ OUT gctINT * Stride,
++ OUT gctPOINTER * Bits,
++ OUT gceSURF_FORMAT * Format
++ );
++
++gceSTATUS
++gcoOS_CopyPixmapBits(
++ IN HALNativeDisplayType Display,
++ IN HALNativePixmapType Pixmap,
++ IN gctUINT DstWidth,
++ IN gctUINT DstHeight,
++ IN gctINT DstStride,
++ IN gceSURF_FORMAT DstFormat,
++ OUT gctPOINTER DstBits
++ );
++
++/*******************************************************************************
++** OS relative. ****************************************************************
++*/
++gceSTATUS
++gcoOS_LoadEGLLibrary(
++ OUT gctHANDLE * Handle
++ );
++
++gceSTATUS
++gcoOS_FreeEGLLibrary(
++ IN gctHANDLE Handle
++ );
++
++gceSTATUS
++gcoOS_ShowWindow(
++ IN HALNativeDisplayType Display,
++ IN HALNativeWindowType Window
++ );
++
++gceSTATUS
++gcoOS_HideWindow(
++ IN HALNativeDisplayType Display,
++ IN HALNativeWindowType Window
++ );
++
++gceSTATUS
++gcoOS_SetWindowTitle(
++ IN HALNativeDisplayType Display,
++ IN HALNativeWindowType Window,
++ IN gctCONST_STRING Title
++ );
++
++gceSTATUS
++gcoOS_CapturePointer(
++ IN HALNativeDisplayType Display,
++ IN HALNativeWindowType Window
++ );
++
++gceSTATUS
++gcoOS_GetEvent(
++ IN HALNativeDisplayType Display,
++ IN HALNativeWindowType Window,
++ OUT halEvent * Event
++ );
++
++gceSTATUS
++gcoOS_CreateClientBuffer(
++ IN gctINT Width,
++ IN gctINT Height,
++ IN gctINT Format,
++ IN gctINT Type,
++ OUT gctPOINTER * ClientBuffer
++ );
++
++gceSTATUS
++gcoOS_GetClientBufferInfo(
++ IN gctPOINTER ClientBuffer,
++ OUT gctINT * Width,
++ OUT gctINT * Height,
++ OUT gctINT * Stride,
++ OUT gctPOINTER * Bits
++ );
++
++gceSTATUS
++gcoOS_DestroyClientBuffer(
++ IN gctPOINTER ClientBuffer
++ );
++
++gceSTATUS
++gcoOS_DestroyContext(
++ IN gctPOINTER Display,
++ IN gctPOINTER Context
++ );
++
++gceSTATUS
++gcoOS_CreateContext(
++ IN gctPOINTER LocalDisplay,
++ IN gctPOINTER Context
++ );
++
++gceSTATUS
++gcoOS_MakeCurrent(
++ IN gctPOINTER LocalDisplay,
++ IN HALNativeWindowType DrawDrawable,
++ IN HALNativeWindowType ReadDrawable,
++ IN gctPOINTER Context,
++ IN gcoSURF ResolveTarget
++ );
++
++gceSTATUS
++gcoOS_CreateDrawable(
++ IN gctPOINTER LocalDisplay,
++ IN HALNativeWindowType Drawable
++ );
++
++gceSTATUS
++gcoOS_DestroyDrawable(
++ IN gctPOINTER LocalDisplay,
++ IN HALNativeWindowType Drawable
++ );
++gceSTATUS
++gcoOS_SwapBuffers(
++ IN gctPOINTER LocalDisplay,
++ IN HALNativeWindowType Drawable,
++ IN gcoSURF RenderTarget,
++ IN gcoSURF ResolveTarget,
++ IN gctPOINTER ResolveBits,
++ OUT gctUINT *Width,
++ OUT gctUINT *Height
++ );
++
++#ifdef EGL_API_DRI
++gceSTATUS
++gcoOS_ResizeWindow(
++ IN gctPOINTER localDisplay,
++ IN HALNativeWindowType Drawable,
++ IN gctUINT Width,
++ IN gctUINT Height)
++ ;
++
++#ifdef USE_FREESCALE_EGL_ACCEL
++gceSTATUS
++gcoOS_SwapBuffersGeneric_Async(
++ IN gctPOINTER localDisplay,
++ IN HALNativeWindowType Drawable,
++ IN gcoSURF RenderTarget,
++ IN gcoSURF ResolveTarget,
++ IN gctPOINTER ResolveBits,
++ OUT gctUINT *Width,
++ OUT gctUINT *Height,
++ IN void * resolveRect
++ );
++
++gceSTATUS
++gcoOS_DrawSurface(
++ IN gctPOINTER localDisplay,
++ IN HALNativeWindowType Drawable
++ );
++#endif
++
++#endif
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif /* __gc_hal_eglplatform_h_ */
++
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_eglplatform_type.h linux-openelec/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_eglplatform_type.h
+--- linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_eglplatform_type.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_eglplatform_type.h 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,286 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_eglplatform_type_h_
++#define __gc_hal_eglplatform_type_h_
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*******************************************************************************
++** Events. *********************************************************************
++*/
++
++typedef enum _halEventType
++{
++ /* Keyboard event. */
++ HAL_KEYBOARD,
++
++ /* Mouse move event. */
++ HAL_POINTER,
++
++ /* Mouse button event. */
++ HAL_BUTTON,
++
++ /* Application close event. */
++ HAL_CLOSE,
++
++ /* Application window has been updated. */
++ HAL_WINDOW_UPDATE
++}
++halEventType;
++
++/* Scancodes for keyboard. */
++typedef enum _halKeys
++{
++ HAL_UNKNOWN = -1,
++
++ HAL_BACKSPACE = 0x08,
++ HAL_TAB,
++ HAL_ENTER = 0x0D,
++ HAL_ESCAPE = 0x1B,
++
++ HAL_SPACE = 0x20,
++ HAL_SINGLEQUOTE = 0x27,
++ HAL_PAD_ASTERISK = 0x2A,
++ HAL_COMMA = 0x2C,
++ HAL_HYPHEN,
++ HAL_PERIOD,
++ HAL_SLASH,
++ HAL_0,
++ HAL_1,
++ HAL_2,
++ HAL_3,
++ HAL_4,
++ HAL_5,
++ HAL_6,
++ HAL_7,
++ HAL_8,
++ HAL_9,
++ HAL_SEMICOLON = 0x3B,
++ HAL_EQUAL = 0x3D,
++ HAL_A = 0x41,
++ HAL_B,
++ HAL_C,
++ HAL_D,
++ HAL_E,
++ HAL_F,
++ HAL_G,
++ HAL_H,
++ HAL_I,
++ HAL_J,
++ HAL_K,
++ HAL_L,
++ HAL_M,
++ HAL_N,
++ HAL_O,
++ HAL_P,
++ HAL_Q,
++ HAL_R,
++ HAL_S,
++ HAL_T,
++ HAL_U,
++ HAL_V,
++ HAL_W,
++ HAL_X,
++ HAL_Y,
++ HAL_Z,
++ HAL_LBRACKET,
++ HAL_BACKSLASH,
++ HAL_RBRACKET,
++ HAL_BACKQUOTE = 0x60,
++
++ HAL_F1 = 0x80,
++ HAL_F2,
++ HAL_F3,
++ HAL_F4,
++ HAL_F5,
++ HAL_F6,
++ HAL_F7,
++ HAL_F8,
++ HAL_F9,
++ HAL_F10,
++ HAL_F11,
++ HAL_F12,
++
++ HAL_LCTRL,
++ HAL_RCTRL,
++ HAL_LSHIFT,
++ HAL_RSHIFT,
++ HAL_LALT,
++ HAL_RALT,
++ HAL_CAPSLOCK,
++ HAL_NUMLOCK,
++ HAL_SCROLLLOCK,
++ HAL_PAD_0,
++ HAL_PAD_1,
++ HAL_PAD_2,
++ HAL_PAD_3,
++ HAL_PAD_4,
++ HAL_PAD_5,
++ HAL_PAD_6,
++ HAL_PAD_7,
++ HAL_PAD_8,
++ HAL_PAD_9,
++ HAL_PAD_HYPHEN,
++ HAL_PAD_PLUS,
++ HAL_PAD_SLASH,
++ HAL_PAD_PERIOD,
++ HAL_PAD_ENTER,
++ HAL_SYSRQ,
++ HAL_PRNTSCRN,
++ HAL_BREAK,
++ HAL_UP,
++ HAL_LEFT,
++ HAL_RIGHT,
++ HAL_DOWN,
++ HAL_HOME,
++ HAL_END,
++ HAL_PGUP,
++ HAL_PGDN,
++ HAL_INSERT,
++ HAL_DELETE,
++ HAL_LWINDOW,
++ HAL_RWINDOW,
++ HAL_MENU,
++ HAL_POWER,
++ HAL_SLEEP,
++ HAL_WAKE
++}
++halKeys;
++
++/* Structure that defined keyboard mapping. */
++typedef struct _halKeyMap
++{
++ /* Normal key. */
++ halKeys normal;
++
++ /* Extended key. */
++ halKeys extended;
++}
++halKeyMap;
++
++/* Event structure. */
++typedef struct _halEvent
++{
++ /* Event type. */
++ halEventType type;
++
++ /* Event data union. */
++ union _halEventData
++ {
++ /* Event data for keyboard. */
++ struct _halKeyboard
++ {
++ /* Scancode. */
++ halKeys scancode;
++
++ /* ASCII characte of the key pressed. */
++ char key;
++
++ /* Flag whether the key was pressed (1) or released (0). */
++ char pressed;
++ }
++ keyboard;
++
++ /* Event data for pointer. */
++ struct _halPointer
++ {
++ /* Current pointer coordinate. */
++ int x;
++ int y;
++ }
++ pointer;
++
++ /* Event data for mouse buttons. */
++ struct _halButton
++ {
++ /* Left button state. */
++ int left;
++
++ /* Middle button state. */
++ int middle;
++
++ /* Right button state. */
++ int right;
++
++ /* Current pointer coordinate. */
++ int x;
++ int y;
++ }
++ button;
++ }
++ data;
++}
++halEvent;
++
++/* VFK_DISPLAY_INFO structure defining information returned by
++ vdkGetDisplayInfoEx. */
++typedef struct _halDISPLAY_INFO
++{
++ /* The size of the display in pixels. */
++ int width;
++ int height;
++
++ /* The stride of the dispay. -1 is returned if the stride is not known
++ ** for the specified display.*/
++ int stride;
++
++ /* The color depth of the display in bits per pixel. */
++ int bitsPerPixel;
++
++ /* The logical pointer to the display memory buffer. NULL is returned
++ ** if the pointer is not known for the specified display. */
++ void * logical;
++
++ /* The physical address of the display memory buffer. ~0 is returned
++ ** if the address is not known for the specified display. */
++ unsigned long physical;
++
++ int wrapFB; /* true if compositor, false otherwise. */
++
++#ifndef __QNXNTO__
++ /* 355_FB_MULTI_BUFFER */
++ int multiBuffer;
++ int backBufferY;
++#endif
++
++ /* The color info of the display. */
++ unsigned int alphaLength;
++ unsigned int alphaOffset;
++ unsigned int redLength;
++ unsigned int redOffset;
++ unsigned int greenLength;
++ unsigned int greenOffset;
++ unsigned int blueLength;
++ unsigned int blueOffset;
++
++ /* Display flip support. */
++ int flip;
++}
++halDISPLAY_INFO;
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif /* __gc_hal_eglplatform_type_h_ */
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_engine.h linux-openelec/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_engine.h
+--- linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_engine.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_engine.h 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,2587 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_engine_h_
++#define __gc_hal_engine_h_
++
++#include "gc_hal_types.h"
++#include "gc_hal_enum.h"
++
++#if gcdENABLE_3D
++#if gcdENABLE_VG
++#include "gc_hal_engine_vg.h"
++#endif
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/******************************************************************************\
++****************************** Object Declarations *****************************
++\******************************************************************************/
++
++typedef struct _gcoSTREAM * gcoSTREAM;
++typedef struct _gcoVERTEX * gcoVERTEX;
++typedef struct _gcoTEXTURE * gcoTEXTURE;
++typedef struct _gcoINDEX * gcoINDEX;
++typedef struct _gcsVERTEX_ATTRIBUTES * gcsVERTEX_ATTRIBUTES_PTR;
++typedef struct _gcoVERTEXARRAY * gcoVERTEXARRAY;
++typedef struct _gcoBUFOBJ * gcoBUFOBJ;
++
++#define gcdATTRIBUTE_COUNT 16
++
++typedef enum _gcePROGRAM_STAGE
++{
++ gcvPROGRAM_STAGE_VERTEX = 0x0,
++ gcvPROGRAM_STAGE_TES = 0x1,
++ gcvPROGRAM_STAGE_TCS = 0x2,
++ gcvPROGRAM_STAGE_GEOMETRY = 0x3,
++ gcvPROGRAM_STAGE_FRAGMENT = 0x4,
++ gcvPROGRAM_STAGE_COMPUTE = 0x5,
++ gcvPROGRAM_STAGE_OPENCL = 0x6,
++ gcvPROGRAM_STAGE_LAST
++}
++gcePROGRAM_STAGE;
++
++typedef enum _gcePROGRAM_STAGE_BIT
++{
++ gcvPROGRAM_STAGE_VERTEX_BIT = 1 << gcvPROGRAM_STAGE_VERTEX,
++ gcvPROGRAM_STAGE_TES_BIT = 1 << gcvPROGRAM_STAGE_TES,
++ gcvPROGRAM_STAGE_TCS_BIT = 1 << gcvPROGRAM_STAGE_TCS,
++ gcvPROGRAM_STAGE_GEOMETRY_BIT = 1 << gcvPROGRAM_STAGE_GEOMETRY,
++ gcvPROGRAM_STAGE_FRAGMENT_BIT = 1 << gcvPROGRAM_STAGE_FRAGMENT,
++ gcvPROGRAM_STAGE_COMPUTE_BIT = 1 << gcvPROGRAM_STAGE_COMPUTE,
++ gcvPROGRAM_STAGE_OPENCL_BIT = 1 << gcvPROGRAM_STAGE_OPENCL,
++}
++gcePROGRAM_STAGE_BIT;
++
++
++/******************************************************************************\
++********************************* gcoHAL Object *********************************
++\******************************************************************************/
++
++gceSTATUS
++gcoHAL_QueryShaderCaps(
++ IN gcoHAL Hal,
++ OUT gctUINT * VertexUniforms,
++ OUT gctUINT * FragmentUniforms,
++ OUT gctUINT * Varyings
++ );
++
++gceSTATUS
++gcoHAL_QueryShaderCapsEx(
++ IN gcoHAL Hal,
++ OUT gctUINT * ShaderCoreCount,
++ OUT gctUINT * ThreadCount,
++ OUT gctUINT * VertexInstructionCount,
++ OUT gctUINT * FragmentInstructionCount
++ );
++
++gceSTATUS
++gcoHAL_QuerySamplerBase(
++ IN gcoHAL Hal,
++ OUT gctUINT32 * VertexCount,
++ OUT gctINT_PTR VertexBase,
++ OUT gctUINT32 * FragmentCount,
++ OUT gctINT_PTR FragmentBase
++ );
++
++gceSTATUS
++gcoHAL_QueryUniformBase(
++ IN gcoHAL Hal,
++ OUT gctUINT32 * VertexBase,
++ OUT gctUINT32 * FragmentBase
++ );
++
++gceSTATUS
++gcoHAL_QueryTextureCaps(
++ IN gcoHAL Hal,
++ OUT gctUINT * MaxWidth,
++ OUT gctUINT * MaxHeight,
++ OUT gctUINT * MaxDepth,
++ OUT gctBOOL * Cubic,
++ OUT gctBOOL * NonPowerOfTwo,
++ OUT gctUINT * VertexSamplers,
++ OUT gctUINT * PixelSamplers
++ );
++
++gceSTATUS
++gcoHAL_QueryTextureMaxAniso(
++ IN gcoHAL Hal,
++ OUT gctUINT * MaxAnisoValue
++ );
++
++gceSTATUS
++gcoHAL_QueryStreamCaps(
++ IN gcoHAL Hal,
++ OUT gctUINT32 * MaxAttributes,
++ OUT gctUINT32 * MaxStreamSize,
++ OUT gctUINT32 * NumberOfStreams,
++ OUT gctUINT32 * Alignment
++ );
++
++/******************************************************************************\
++********************************* gcoSURF Object ********************************
++\******************************************************************************/
++
++/*----------------------------------------------------------------------------*/
++/*--------------------------------- gcoSURF 3D --------------------------------*/
++typedef enum _gceBLIT_FLAG
++{
++ gcvBLIT_FLAG_SKIP_DEPTH_WRITE = 0x1,
++ gcvBLIT_FLAG_SKIP_STENCIL_WRITE = 0x2,
++} gceBLIT_FLAG;
++
++typedef struct _gcsSURF_BLIT_ARGS
++{
++ gcoSURF srcSurface;
++ gctINT srcX, srcY, srcZ;
++ gctINT srcWidth, srcHeight, srcDepth;
++ gcoSURF dstSurface;
++ gctINT dstX, dstY, dstZ;
++ gctINT dstWidth, dstHeight, dstDepth;
++ gctBOOL xReverse;
++ gctBOOL yReverse;
++ gctBOOL scissorTest;
++ gcsRECT scissor;
++ gctUINT flags;
++}
++gcsSURF_BLIT_ARGS;
++
++
++
++
++/* Clear flags. */
++typedef enum _gceCLEAR
++{
++ gcvCLEAR_COLOR = 0x1,
++ gcvCLEAR_DEPTH = 0x2,
++ gcvCLEAR_STENCIL = 0x4,
++ gcvCLEAR_HZ = 0x8,
++ gcvCLEAR_HAS_VAA = 0x10,
++ gcvCLEAR_WITH_GPU_ONLY = 0x100,
++ gcvCLEAR_WITH_CPU_ONLY = 0x200,
++}
++gceCLEAR;
++
++typedef struct _gcsSURF_CLEAR_ARGS
++{
++ /*
++ ** Color to fill the color portion of the framebuffer when clear
++ ** is called.
++ */
++ struct {
++ gcuVALUE r;
++ gcuVALUE g;
++ gcuVALUE b;
++ gcuVALUE a;
++ /*
++ ** Color has multiple value type so we must specify it.
++ */
++ gceVALUE_TYPE valueType;
++ } color;
++
++ gcuVALUE depth;
++
++ gctUINT stencil;
++
++
++
++ /*
++ ** stencil bit-wise mask
++ */
++ gctUINT8 stencilMask;
++ /*
++ ** Depth Write Mask
++ */
++ gctBOOL depthMask;
++ /*
++ ** 4-bit channel Mask: ABGR:MSB->LSB
++ */
++ gctUINT8 colorMask;
++ /*
++ ** If ClearRect is NULL, it means full clear
++ */
++ gcsRECT_PTR clearRect;
++ /*
++ ** clear flags
++ */
++ gceCLEAR flags;
++
++ /*
++ ** Offset in surface to cube/array/3D
++ */
++ gctUINT32 offset;
++
++} gcsSURF_CLEAR_ARGS;
++
++
++typedef gcsSURF_CLEAR_ARGS* gcsSURF_CLEAR_ARGS_PTR;
++
++typedef struct _gscSURF_BLITDRAW_BLIT
++{
++ gcoSURF srcSurface;
++ gcoSURF dstSurface;
++ gcsRECT srcRect;
++ gcsRECT dstRect;
++ gceTEXTURE_FILTER filterMode;
++ gctBOOL xReverse;
++ gctBOOL yReverse;
++ gctBOOL scissorEnabled;
++ gcsRECT scissor;
++}gscSURF_BLITDRAW_BLIT;
++
++
++typedef enum _gceBLITDRAW_TYPE
++{
++ gcvBLITDRAW_CLEAR = 0,
++ gcvBLITDRAW_BLIT = 1,
++
++ /* last number, not a real type */
++ gcvBLITDRAW_NUM_TYPE
++ }
++gceBLITDRAW_TYPE;
++
++
++typedef struct _gscSURF_BLITDRAW_ARGS
++{
++ /* always the fist member */
++ gceHAL_ARG_VERSION version;
++
++ union _gcsSURF_BLITDRAW_ARGS_UNION
++ {
++ struct _gscSURF_BLITDRAW_ARG_v1
++ {
++ /* Whether it's clear or blit operation, can be extended. */
++ gceBLITDRAW_TYPE type;
++
++ union _gscSURF_BLITDRAW_UNION
++ {
++ gscSURF_BLITDRAW_BLIT blit;
++
++ struct _gscSURF_BLITDRAW_CLEAR
++ {
++ gcsSURF_CLEAR_ARGS clearArgs;
++ gcoSURF rtSurface;
++ gcoSURF dsSurface;
++ } clear;
++ } u;
++ } v1;
++ } uArgs;
++}
++gcsSURF_BLITDRAW_ARGS;
++
++
++typedef struct _gcsSURF_RESOLVE_ARGS
++{
++ gceHAL_ARG_VERSION version;
++ union _gcsSURF_RESOLVE_ARGS_UNION
++ {
++ struct _gcsSURF_RESOLVE_ARG_v1
++ {
++ gctBOOL yInverted;
++ }v1;
++ } uArgs;
++}
++gcsSURF_RESOLVE_ARGS;
++
++
++/* CPU Blit with format (including linear <-> tile) conversion*/
++gceSTATUS
++gcoSURF_BlitCPU(
++ gcsSURF_BLIT_ARGS* args
++ );
++
++
++gceSTATUS
++gcoSURF_BlitDraw(
++ IN gcsSURF_BLITDRAW_ARGS *args
++ );
++#endif /* gcdENABLE_3D */
++
++
++
++#if gcdENABLE_3D
++/* Clear surface function. */
++gceSTATUS
++gcoSURF_Clear(
++ IN gcoSURF Surface,
++ IN gcsSURF_CLEAR_ARGS_PTR clearArg
++ );
++
++/* Preserve pixels from source. */
++gceSTATUS
++gcoSURF_Preserve(
++ IN gcoSURF Source,
++ IN gcoSURF Dest,
++ IN gcsRECT_PTR MaskRect
++ );
++
++
++/* TO BE REMOVED */
++ gceSTATUS
++ depr_gcoSURF_Resolve(
++ IN gcoSURF SrcSurface,
++ IN gcoSURF DestSurface,
++ IN gctUINT32 DestAddress,
++ IN gctPOINTER DestBits,
++ IN gctINT DestStride,
++ IN gceSURF_TYPE DestType,
++ IN gceSURF_FORMAT DestFormat,
++ IN gctUINT DestWidth,
++ IN gctUINT DestHeight
++ );
++
++ gceSTATUS
++ depr_gcoSURF_ResolveRect(
++ IN gcoSURF SrcSurface,
++ IN gcoSURF DestSurface,
++ IN gctUINT32 DestAddress,
++ IN gctPOINTER DestBits,
++ IN gctINT DestStride,
++ IN gceSURF_TYPE DestType,
++ IN gceSURF_FORMAT DestFormat,
++ IN gctUINT DestWidth,
++ IN gctUINT DestHeight,
++ IN gcsPOINT_PTR SrcOrigin,
++ IN gcsPOINT_PTR DestOrigin,
++ IN gcsPOINT_PTR RectSize
++ );
++
++/* Resample surface. */
++gceSTATUS
++gcoSURF_Resample(
++ IN gcoSURF SrcSurface,
++ IN gcoSURF DestSurface
++ );
++
++/* Resolve surface. */
++gceSTATUS
++gcoSURF_Resolve(
++ IN gcoSURF SrcSurface,
++ IN gcoSURF DestSurface
++ );
++
++gceSTATUS
++gcoSURF_ResolveEx(
++ IN gcoSURF SrcSurface,
++ IN gcoSURF DestSurface,
++ IN gcsSURF_RESOLVE_ARGS *args
++ );
++
++
++/* Resolve rectangular area of a surface. */
++gceSTATUS
++gcoSURF_ResolveRect(
++ IN gcoSURF SrcSurface,
++ IN gcoSURF DestSurface,
++ IN gcsPOINT_PTR SrcOrigin,
++ IN gcsPOINT_PTR DestOrigin,
++ IN gcsPOINT_PTR RectSize
++ );
++
++/* Resolve rectangular area of a surface. */
++gceSTATUS
++gcoSURF_ResolveRectEx(
++ IN gcoSURF SrcSurface,
++ IN gcoSURF DestSurface,
++ IN gcsPOINT_PTR SrcOrigin,
++ IN gcsPOINT_PTR DestOrigin,
++ IN gcsPOINT_PTR RectSize,
++ IN gcsSURF_RESOLVE_ARGS *args
++ );
++
++
++gceSTATUS
++gcoSURF_GetResolveAlignment(
++ IN gcoSURF Surface,
++ OUT gctUINT *originX,
++ OUT gctUINT *originY,
++ OUT gctUINT *sizeX,
++ OUT gctUINT *sizeY
++ );
++
++gceSTATUS
++gcoSURF_IsHWResolveable(
++ IN gcoSURF SrcSurface,
++ IN gcoSURF DestSurface,
++ IN gcsPOINT_PTR SrcOrigin,
++ IN gcsPOINT_PTR DestOrigin,
++ IN gcsPOINT_PTR RectSize
++ );
++
++/* Set surface resolvability. */
++gceSTATUS
++gcoSURF_SetResolvability(
++ IN gcoSURF Surface,
++ IN gctBOOL Resolvable
++ );
++
++gceSTATUS
++gcoSURF_IsRenderable(
++ IN gcoSURF Surface
++ );
++
++gceSTATUS
++gcoSURF_IsFormatRenderableAsRT(
++ IN gcoSURF Surface
++ );
++
++gceSTATUS
++gcoSURF_GetFence(
++ IN gcoSURF Surface
++ );
++
++gceSTATUS
++gcoBUFOBJ_GetFence(
++ IN gcoBUFOBJ bufObj
++ );
++
++gceSTATUS
++gcoBUFOBJ_WaitFence(
++ IN gcoBUFOBJ bufObj
++ );
++
++gceSTATUS
++gcoBUFOBJ_IsFenceEnabled(
++ IN gcoBUFOBJ bufObj
++ );
++
++gceSTATUS
++gcoSURF_WaitFence(
++ IN gcoSURF Surface
++ );
++
++gceSTATUS
++gcoSTREAM_GetFence(
++ IN gcoSTREAM stream
++ );
++
++gceSTATUS
++gcoSTREAM_WaitFence(
++ IN gcoSTREAM stream
++ );
++
++gceSTATUS
++gcoINDEX_GetFence(
++ IN gcoINDEX index
++ );
++
++gceSTATUS
++gcoINDEX_WaitFence(
++ IN gcoINDEX index
++ );
++
++gceSTATUS
++gcoSURF_3DBlitClearRect(
++ IN gcoSURF Surface,
++ IN gcsSURF_CLEAR_ARGS_PTR ClearArgs
++ );
++
++
++gceSTATUS
++gcoSURF_3DBlitBltRect(
++ IN gcoSURF SrcSurf,
++ IN gcoSURF DestSurf,
++ IN gcsPOINT_PTR SrcOrigin,
++ IN gcsPOINT_PTR DestOrigin,
++ IN gcsPOINT_PTR RectSize
++ );
++
++gceSTATUS
++gcoSURF_3DBlitCopy(
++ IN gctUINT32 SrcAddress,
++ IN gctUINT32 DestAddress,
++ IN gctUINT32 Bytes
++ );
++
++
++/******************************************************************************\
++******************************** gcoINDEX Object *******************************
++\******************************************************************************/
++
++/* Construct a new gcoINDEX object. */
++gceSTATUS
++gcoINDEX_Construct(
++ IN gcoHAL Hal,
++ OUT gcoINDEX * Index
++ );
++
++/* Destroy a gcoINDEX object. */
++gceSTATUS
++gcoINDEX_Destroy(
++ IN gcoINDEX Index
++ );
++
++/* Lock index in memory. */
++gceSTATUS
++gcoINDEX_Lock(
++ IN gcoINDEX Index,
++ OUT gctUINT32 * Address,
++ OUT gctPOINTER * Memory
++ );
++
++/* Unlock index that was previously locked with gcoINDEX_Lock. */
++gceSTATUS
++gcoINDEX_Unlock(
++ IN gcoINDEX Index
++ );
++
++/* Upload index data into the memory. */
++gceSTATUS
++gcoINDEX_Load(
++ IN gcoINDEX Index,
++ IN gceINDEX_TYPE IndexType,
++ IN gctUINT32 IndexCount,
++ IN gctPOINTER IndexBuffer
++ );
++
++/* Bind an index object to the hardware. */
++gceSTATUS
++gcoINDEX_Bind(
++ IN gcoINDEX Index,
++ IN gceINDEX_TYPE Type
++ );
++
++/* Bind an index object to the hardware. */
++gceSTATUS
++gcoINDEX_BindOffset(
++ IN gcoINDEX Index,
++ IN gceINDEX_TYPE Type,
++ IN gctUINT32 Offset
++ );
++
++/* Free existing index buffer. */
++gceSTATUS
++gcoINDEX_Free(
++ IN gcoINDEX Index
++ );
++
++/* Upload data into an index buffer. */
++gceSTATUS
++gcoINDEX_Upload(
++ IN gcoINDEX Index,
++ IN gctCONST_POINTER Buffer,
++ IN gctSIZE_T Bytes
++ );
++
++/* Upload data into an index buffer starting at an offset. */
++gceSTATUS
++gcoINDEX_UploadOffset(
++ IN gcoINDEX Index,
++ IN gctSIZE_T Offset,
++ IN gctCONST_POINTER Buffer,
++ IN gctSIZE_T Bytes
++ );
++
++/*Merge index2 to index1 from 0, index2 must subset of inex1*/
++gceSTATUS
++gcoINDEX_Merge(
++ IN gcoINDEX Index1,
++ IN gcoINDEX Index2
++ );
++
++/*check if index buffer is enough for this draw*/
++gctBOOL
++gcoINDEX_CheckRange(
++ IN gcoINDEX Index,
++ IN gceINDEX_TYPE Type,
++ IN gctINT Count,
++ IN gctUINT32 Indices
++ );
++
++/* Query the index capabilities. */
++gceSTATUS
++gcoINDEX_QueryCaps(
++ OUT gctBOOL * Index8,
++ OUT gctBOOL * Index16,
++ OUT gctBOOL * Index32,
++ OUT gctUINT * MaxIndex
++ );
++
++/* Determine the index range in the current index buffer. */
++gceSTATUS
++gcoINDEX_GetIndexRange(
++ IN gcoINDEX Index,
++ IN gceINDEX_TYPE Type,
++ IN gctUINT32 Offset,
++ IN gctUINT32 Count,
++ OUT gctUINT32 * MinimumIndex,
++ OUT gctUINT32 * MaximumIndex
++ );
++
++/* Dynamic buffer management. */
++gceSTATUS
++gcoINDEX_SetDynamic(
++ IN gcoINDEX Index,
++ IN gctSIZE_T Bytes,
++ IN gctUINT Buffers
++ );
++
++/******************************************************************************\
++********************************** gco3D Object *********************************
++\******************************************************************************/
++
++/* Blending targets. */
++typedef enum _gceBLEND_UNIT
++{
++ gcvBLEND_SOURCE,
++ gcvBLEND_TARGET,
++}
++gceBLEND_UNIT;
++
++/* Construct a new gco3D object. */
++gceSTATUS
++gco3D_Construct(
++ IN gcoHAL Hal,
++ OUT gco3D * Engine
++ );
++
++/* Destroy an gco3D object. */
++gceSTATUS
++gco3D_Destroy(
++ IN gco3D Engine
++ );
++
++/* Set 3D API type. */
++gceSTATUS
++gco3D_SetAPI(
++ IN gco3D Engine,
++ IN gceAPI ApiType
++ );
++
++/* Get 3D API type. */
++gceSTATUS
++gco3D_GetAPI(
++ IN gco3D Engine,
++ OUT gceAPI * ApiType
++ );
++
++/* Set render target. */
++gceSTATUS
++gco3D_SetTarget(
++ IN gco3D Engine,
++ IN gcoSURF Surface
++ );
++
++/* Unset render target. */
++gceSTATUS
++gco3D_UnsetTarget(
++ IN gco3D Engine,
++ IN gcoSURF Surface
++ );
++
++gceSTATUS
++gco3D_SetTargetEx(
++ IN gco3D Engine,
++ IN gctUINT32 TargetIndex,
++ IN gcoSURF Surface,
++ IN gctUINT32 LayerIndex
++ );
++
++gceSTATUS
++gco3D_UnsetTargetEx(
++ IN gco3D Engine,
++ IN gctUINT32 TargetIndex,
++ IN gcoSURF Surface
++ );
++
++gceSTATUS
++gco3D_SetTargetOffsetEx(
++ IN gco3D Engine,
++ IN gctUINT32 TargetIndex,
++ IN gctSIZE_T Offset
++ );
++
++
++gceSTATUS
++gco3D_SetPSOutputMapping(
++ IN gco3D Engine,
++ IN gctINT32 * psOutputMapping
++ );
++
++
++/* Set depth buffer. */
++gceSTATUS
++gco3D_SetDepth(
++ IN gco3D Engine,
++ IN gcoSURF Surface
++ );
++
++gceSTATUS
++gco3D_SetDepthBufferOffset(
++ IN gco3D Engine,
++ IN gctSIZE_T Offset
++ );
++
++/* Unset depth buffer. */
++gceSTATUS
++gco3D_UnsetDepth(
++ IN gco3D Engine,
++ IN gcoSURF Surface
++ );
++
++/* Set viewport. */
++gceSTATUS
++gco3D_SetViewport(
++ IN gco3D Engine,
++ IN gctINT32 Left,
++ IN gctINT32 Top,
++ IN gctINT32 Right,
++ IN gctINT32 Bottom
++ );
++
++/* Set scissors. */
++gceSTATUS
++gco3D_SetScissors(
++ IN gco3D Engine,
++ IN gctINT32 Left,
++ IN gctINT32 Top,
++ IN gctINT32 Right,
++ IN gctINT32 Bottom
++ );
++
++/* Set clear color. */
++gceSTATUS
++gco3D_SetClearColor(
++ IN gco3D Engine,
++ IN gctUINT8 Red,
++ IN gctUINT8 Green,
++ IN gctUINT8 Blue,
++ IN gctUINT8 Alpha
++ );
++
++/* Set fixed point clear color. */
++gceSTATUS
++gco3D_SetClearColorX(
++ IN gco3D Engine,
++ IN gctFIXED_POINT Red,
++ IN gctFIXED_POINT Green,
++ IN gctFIXED_POINT Blue,
++ IN gctFIXED_POINT Alpha
++ );
++
++/* Set floating point clear color. */
++gceSTATUS
++gco3D_SetClearColorF(
++ IN gco3D Engine,
++ IN gctFLOAT Red,
++ IN gctFLOAT Green,
++ IN gctFLOAT Blue,
++ IN gctFLOAT Alpha
++ );
++
++/* Set fixed point clear depth. */
++gceSTATUS
++gco3D_SetClearDepthX(
++ IN gco3D Engine,
++ IN gctFIXED_POINT Depth
++ );
++
++/* Set floating point clear depth. */
++gceSTATUS
++gco3D_SetClearDepthF(
++ IN gco3D Engine,
++ IN gctFLOAT Depth
++ );
++
++/* Set clear stencil. */
++gceSTATUS
++gco3D_SetClearStencil(
++ IN gco3D Engine,
++ IN gctUINT32 Stencil
++ );
++
++/* Set shading mode. */
++gceSTATUS
++gco3D_SetShading(
++ IN gco3D Engine,
++ IN gceSHADING Shading
++ );
++
++/* Set blending mode. */
++gceSTATUS
++gco3D_EnableBlending(
++ IN gco3D Engine,
++ IN gctBOOL Enable
++ );
++
++/* Set blending function. */
++gceSTATUS
++gco3D_SetBlendFunction(
++ IN gco3D Engine,
++ IN gceBLEND_UNIT Unit,
++ IN gceBLEND_FUNCTION FunctionRGB,
++ IN gceBLEND_FUNCTION FunctionAlpha
++ );
++
++/* Set blending mode. */
++gceSTATUS
++gco3D_SetBlendMode(
++ IN gco3D Engine,
++ IN gceBLEND_MODE ModeRGB,
++ IN gceBLEND_MODE ModeAlpha
++ );
++
++/* Set blending color. */
++gceSTATUS
++gco3D_SetBlendColor(
++ IN gco3D Engine,
++ IN gctUINT Red,
++ IN gctUINT Green,
++ IN gctUINT Blue,
++ IN gctUINT Alpha
++ );
++
++/* Set fixed point blending color. */
++gceSTATUS
++gco3D_SetBlendColorX(
++ IN gco3D Engine,
++ IN gctFIXED_POINT Red,
++ IN gctFIXED_POINT Green,
++ IN gctFIXED_POINT Blue,
++ IN gctFIXED_POINT Alpha
++ );
++
++/* Set floating point blending color. */
++gceSTATUS
++gco3D_SetBlendColorF(
++ IN gco3D Engine,
++ IN gctFLOAT Red,
++ IN gctFLOAT Green,
++ IN gctFLOAT Blue,
++ IN gctFLOAT Alpha
++ );
++
++/* Set culling mode. */
++gceSTATUS
++gco3D_SetCulling(
++ IN gco3D Engine,
++ IN gceCULL Mode
++ );
++
++/* Enable point size */
++gceSTATUS
++gco3D_SetPointSizeEnable(
++ IN gco3D Engine,
++ IN gctBOOL Enable
++ );
++
++/* Set point sprite */
++gceSTATUS
++gco3D_SetPointSprite(
++ IN gco3D Engine,
++ IN gctBOOL Enable
++ );
++
++/* Set fill mode. */
++gceSTATUS
++gco3D_SetFill(
++ IN gco3D Engine,
++ IN gceFILL Mode
++ );
++
++/* Set depth compare mode. */
++gceSTATUS
++gco3D_SetDepthCompare(
++ IN gco3D Engine,
++ IN gceCOMPARE Compare
++ );
++
++/* Enable depth writing. */
++gceSTATUS
++gco3D_EnableDepthWrite(
++ IN gco3D Engine,
++ IN gctBOOL Enable
++ );
++
++/* Set depth mode. */
++gceSTATUS
++gco3D_SetDepthMode(
++ IN gco3D Engine,
++ IN gceDEPTH_MODE Mode
++ );
++
++/* Set depth range. */
++gceSTATUS
++gco3D_SetDepthRangeX(
++ IN gco3D Engine,
++ IN gceDEPTH_MODE Mode,
++ IN gctFIXED_POINT Near,
++ IN gctFIXED_POINT Far
++ );
++
++/* Set depth range. */
++gceSTATUS
++gco3D_SetDepthRangeF(
++ IN gco3D Engine,
++ IN gceDEPTH_MODE Mode,
++ IN gctFLOAT Near,
++ IN gctFLOAT Far
++ );
++
++/* Set last pixel enable */
++gceSTATUS
++gco3D_SetLastPixelEnable(
++ IN gco3D Engine,
++ IN gctBOOL Enable
++ );
++
++/* Set depth Bias and Scale */
++gceSTATUS
++gco3D_SetDepthScaleBiasX(
++ IN gco3D Engine,
++ IN gctFIXED_POINT DepthScale,
++ IN gctFIXED_POINT DepthBias
++ );
++
++gceSTATUS
++gco3D_SetDepthScaleBiasF(
++ IN gco3D Engine,
++ IN gctFLOAT DepthScale,
++ IN gctFLOAT DepthBias
++ );
++
++/* Set depth near and far clipping plane. */
++gceSTATUS
++gco3D_SetDepthPlaneF(
++ IN gco3D Engine,
++ IN gctFLOAT Near,
++ IN gctFLOAT Far
++ );
++
++/* Enable or disable dithering. */
++gceSTATUS
++gco3D_EnableDither(
++ IN gco3D Engine,
++ IN gctBOOL Enable
++ );
++
++/* Set color write enable bits. */
++gceSTATUS
++gco3D_SetColorWrite(
++ IN gco3D Engine,
++ IN gctUINT8 Enable
++ );
++
++/* Enable or disable early depth. */
++gceSTATUS
++gco3D_SetEarlyDepth(
++ IN gco3D Engine,
++ IN gctBOOL Enable
++ );
++
++/* Deprecated: Enable or disable all early depth operations. */
++gceSTATUS
++gco3D_SetAllEarlyDepthModes(
++ IN gco3D Engine,
++ IN gctBOOL Disable
++ );
++
++/* Enable or disable all early depth operations. */
++gceSTATUS
++gco3D_SetAllEarlyDepthModesEx(
++ IN gco3D Engine,
++ IN gctBOOL Disable,
++ IN gctBOOL DisableModify,
++ IN gctBOOL DisablePassZ
++ );
++
++/* Switch dynamic early mode */
++gceSTATUS
++gco3D_SwitchDynamicEarlyDepthMode(
++ IN gco3D Engine
++ );
++
++/* Set dynamic early mode */
++gceSTATUS
++gco3D_DisableDynamicEarlyDepthMode(
++ IN gco3D Engine,
++ IN gctBOOL Disable
++ );
++
++/* Enable or disable depth-only mode. */
++gceSTATUS
++gco3D_SetDepthOnly(
++ IN gco3D Engine,
++ IN gctBOOL Enable
++ );
++
++typedef struct _gcsSTENCIL_INFO * gcsSTENCIL_INFO_PTR;
++typedef struct _gcsSTENCIL_INFO
++{
++ gceSTENCIL_MODE mode;
++
++ gctUINT8 maskFront;
++ gctUINT8 maskBack;
++ gctUINT8 writeMaskFront;
++ gctUINT8 writeMaskBack;
++
++ gctUINT8 referenceFront;
++
++ gceCOMPARE compareFront;
++ gceSTENCIL_OPERATION passFront;
++ gceSTENCIL_OPERATION failFront;
++ gceSTENCIL_OPERATION depthFailFront;
++
++ gctUINT8 referenceBack;
++ gceCOMPARE compareBack;
++ gceSTENCIL_OPERATION passBack;
++ gceSTENCIL_OPERATION failBack;
++ gceSTENCIL_OPERATION depthFailBack;
++}
++gcsSTENCIL_INFO;
++
++/* Set stencil mode. */
++gceSTATUS
++gco3D_SetStencilMode(
++ IN gco3D Engine,
++ IN gceSTENCIL_MODE Mode
++ );
++
++/* Set stencil mask. */
++gceSTATUS
++gco3D_SetStencilMask(
++ IN gco3D Engine,
++ IN gctUINT8 Mask
++ );
++
++/* Set stencil back mask. */
++gceSTATUS
++gco3D_SetStencilMaskBack(
++ IN gco3D Engine,
++ IN gctUINT8 Mask
++ );
++
++/* Set stencil write mask. */
++gceSTATUS
++gco3D_SetStencilWriteMask(
++ IN gco3D Engine,
++ IN gctUINT8 Mask
++ );
++
++/* Set stencil back write mask. */
++gceSTATUS
++gco3D_SetStencilWriteMaskBack(
++ IN gco3D Engine,
++ IN gctUINT8 Mask
++ );
++
++/* Set stencil reference. */
++gceSTATUS
++gco3D_SetStencilReference(
++ IN gco3D Engine,
++ IN gctUINT8 Reference,
++ IN gctBOOL Front
++ );
++
++/* Set stencil compare. */
++gceSTATUS
++gco3D_SetStencilCompare(
++ IN gco3D Engine,
++ IN gceSTENCIL_WHERE Where,
++ IN gceCOMPARE Compare
++ );
++
++/* Set stencil operation on pass. */
++gceSTATUS
++gco3D_SetStencilPass(
++ IN gco3D Engine,
++ IN gceSTENCIL_WHERE Where,
++ IN gceSTENCIL_OPERATION Operation
++ );
++
++/* Set stencil operation on fail. */
++gceSTATUS
++gco3D_SetStencilFail(
++ IN gco3D Engine,
++ IN gceSTENCIL_WHERE Where,
++ IN gceSTENCIL_OPERATION Operation
++ );
++
++/* Set stencil operation on depth fail. */
++gceSTATUS
++gco3D_SetStencilDepthFail(
++ IN gco3D Engine,
++ IN gceSTENCIL_WHERE Where,
++ IN gceSTENCIL_OPERATION Operation
++ );
++
++/* Set all stencil states in one blow. */
++gceSTATUS
++gco3D_SetStencilAll(
++ IN gco3D Engine,
++ IN gcsSTENCIL_INFO_PTR Info
++ );
++
++typedef struct _gcsALPHA_INFO * gcsALPHA_INFO_PTR;
++typedef struct _gcsALPHA_INFO
++{
++ /* Alpha test states. */
++ gctBOOL test;
++ gceCOMPARE compare;
++ gctUINT8 reference;
++ gctFLOAT floatReference;
++
++ /* Alpha blending states. */
++ gctBOOL blend;
++
++ gceBLEND_FUNCTION srcFuncColor;
++ gceBLEND_FUNCTION srcFuncAlpha;
++ gceBLEND_FUNCTION trgFuncColor;
++ gceBLEND_FUNCTION trgFuncAlpha;
++
++ gceBLEND_MODE modeColor;
++ gceBLEND_MODE modeAlpha;
++
++ gctUINT32 color;
++}
++gcsALPHA_INFO;
++
++/* Enable or disable alpha test. */
++gceSTATUS
++gco3D_SetAlphaTest(
++ IN gco3D Engine,
++ IN gctBOOL Enable
++ );
++
++/* Set alpha test compare. */
++gceSTATUS
++gco3D_SetAlphaCompare(
++ IN gco3D Engine,
++ IN gceCOMPARE Compare
++ );
++
++/* Set alpha test reference in unsigned integer. */
++gceSTATUS
++gco3D_SetAlphaReference(
++ IN gco3D Engine,
++ IN gctUINT8 Reference,
++ IN gctFLOAT FloatReference
++ );
++
++/* Set alpha test reference in fixed point. */
++gceSTATUS
++gco3D_SetAlphaReferenceX(
++ IN gco3D Engine,
++ IN gctFIXED_POINT Reference
++ );
++
++/* Set alpha test reference in floating point. */
++gceSTATUS
++gco3D_SetAlphaReferenceF(
++ IN gco3D Engine,
++ IN gctFLOAT Reference
++ );
++
++/* Enable/Disable anti-alias line. */
++gceSTATUS
++gco3D_SetAntiAliasLine(
++ IN gco3D Engine,
++ IN gctBOOL Enable
++ );
++
++/* Set texture slot for anti-alias line. */
++gceSTATUS
++gco3D_SetAALineTexSlot(
++ IN gco3D Engine,
++ IN gctUINT TexSlot
++ );
++
++/* Set anti-alias line width scale. */
++gceSTATUS
++gco3D_SetAALineWidth(
++ IN gco3D Engine,
++ IN gctFLOAT Width
++ );
++
++/* Draw a number of primitives. */
++gceSTATUS
++gco3D_DrawPrimitives(
++ IN gco3D Engine,
++ IN gcePRIMITIVE Type,
++ IN gctSIZE_T StartVertex,
++ IN gctSIZE_T PrimitiveCount
++ );
++
++gceSTATUS
++gco3D_DrawInstancedPrimitives(
++ IN gco3D Engine,
++ IN gcePRIMITIVE Type,
++ IN gctBOOL DrawIndex,
++ IN gctSIZE_T StartVertex,
++ IN gctSIZE_T StartIndex,
++ IN gctSIZE_T PrimitiveCount,
++ IN gctSIZE_T VertexCount,
++ IN gctSIZE_T InstanceCount
++ );
++
++gceSTATUS
++gco3D_DrawPrimitivesCount(
++ IN gco3D Engine,
++ IN gcePRIMITIVE Type,
++ IN gctINT* StartVertex,
++ IN gctSIZE_T* VertexCount,
++ IN gctSIZE_T PrimitiveCount
++ );
++
++
++/* Draw a number of primitives using offsets. */
++gceSTATUS
++gco3D_DrawPrimitivesOffset(
++ IN gco3D Engine,
++ IN gcePRIMITIVE Type,
++ IN gctINT32 StartOffset,
++ IN gctSIZE_T PrimitiveCount
++ );
++
++/* Draw a number of indexed primitives. */
++gceSTATUS
++gco3D_DrawIndexedPrimitives(
++ IN gco3D Engine,
++ IN gcePRIMITIVE Type,
++ IN gctSIZE_T BaseVertex,
++ IN gctSIZE_T StartIndex,
++ IN gctSIZE_T PrimitiveCount
++ );
++
++/* Draw a number of indexed primitives using offsets. */
++gceSTATUS
++gco3D_DrawIndexedPrimitivesOffset(
++ IN gco3D Engine,
++ IN gcePRIMITIVE Type,
++ IN gctINT32 BaseOffset,
++ IN gctINT32 StartOffset,
++ IN gctSIZE_T PrimitiveCount
++ );
++
++/* Draw a element from pattern */
++gceSTATUS
++gco3D_DrawPattern(
++ IN gco3D Engine,
++ IN gcsFAST_FLUSH_PTR FastFlushInfo
++ );
++
++/* Enable or disable anti-aliasing. */
++gceSTATUS
++gco3D_SetAntiAlias(
++ IN gco3D Engine,
++ IN gctBOOL Enable
++ );
++
++/* Write data into the command buffer. */
++gceSTATUS
++gco3D_WriteBuffer(
++ IN gco3D Engine,
++ IN gctCONST_POINTER Data,
++ IN gctSIZE_T Bytes,
++ IN gctBOOL Aligned
++ );
++
++/* Send sempahore and stall until sempahore is signalled. */
++gceSTATUS
++gco3D_Semaphore(
++ IN gco3D Engine,
++ IN gceWHERE From,
++ IN gceWHERE To,
++ IN gceHOW How);
++
++/* Explicitly flush shader L1 cache */
++gceSTATUS
++gco3D_FlushSHL1Cache(
++ IN gco3D Engine
++ );
++
++/* Set the subpixels center. */
++gceSTATUS
++gco3D_SetCentroids(
++ IN gco3D Engine,
++ IN gctUINT32 Index,
++ IN gctPOINTER Centroids
++ );
++
++gceSTATUS
++gco3D_SetLogicOp(
++ IN gco3D Engine,
++ IN gctUINT8 Rop
++ );
++
++gceSTATUS
++gco3D_SetOQ(
++ IN gco3D Engine,
++ INOUT gctPOINTER * Result,
++ IN gctBOOL Enable
++ );
++
++gceSTATUS
++gco3D_GetOQ(
++ IN gco3D Engine,
++ IN gctPOINTER Result,
++ OUT gctINT64 * Logical
++ );
++
++gceSTATUS
++gco3D_DeleteOQ(
++ IN gco3D Engine,
++ INOUT gctPOINTER Result
++ );
++
++gceSTATUS
++gco3D_SetColorOutCount(
++ IN gco3D Engine,
++ IN gctUINT32 ColorOutCount
++ );
++
++gceSTATUS
++gco3D_Set3DEngine(
++ IN gco3D Engine
++ );
++
++gceSTATUS
++gco3D_UnSet3DEngine(
++ IN gco3D Engine
++ );
++
++gceSTATUS
++gco3D_Get3DEngine(
++ OUT gco3D * Engine
++ );
++
++
++/* OCL thread walker information. */
++typedef struct _gcsTHREAD_WALKER_INFO * gcsTHREAD_WALKER_INFO_PTR;
++typedef struct _gcsTHREAD_WALKER_INFO
++{
++ gctUINT32 dimensions;
++ gctUINT32 traverseOrder;
++ gctUINT32 enableSwathX;
++ gctUINT32 enableSwathY;
++ gctUINT32 enableSwathZ;
++ gctUINT32 swathSizeX;
++ gctUINT32 swathSizeY;
++ gctUINT32 swathSizeZ;
++ gctUINT32 valueOrder;
++
++ gctUINT32 globalSizeX;
++ gctUINT32 globalOffsetX;
++ gctUINT32 globalSizeY;
++ gctUINT32 globalOffsetY;
++ gctUINT32 globalSizeZ;
++ gctUINT32 globalOffsetZ;
++
++ gctUINT32 workGroupSizeX;
++ gctUINT32 workGroupCountX;
++ gctUINT32 workGroupSizeY;
++ gctUINT32 workGroupCountY;
++ gctUINT32 workGroupSizeZ;
++ gctUINT32 workGroupCountZ;
++
++ gctUINT32 threadAllocation;
++}
++gcsTHREAD_WALKER_INFO;
++
++/* Start OCL thread walker. */
++gceSTATUS
++gco3D_InvokeThreadWalker(
++ IN gco3D Engine,
++ IN gcsTHREAD_WALKER_INFO_PTR Info
++ );
++
++gceSTATUS
++gco3D_GetClosestRenderFormat(
++ IN gco3D Engine,
++ IN gceSURF_FORMAT InFormat,
++ OUT gceSURF_FORMAT* OutFormat
++ );
++
++/* Set w clip and w plane limit value. */
++gceSTATUS
++gco3D_SetWClipEnable(
++ IN gco3D Engine,
++ IN gctBOOL Enable
++ );
++
++gceSTATUS
++gco3D_GetWClipEnable(
++ IN gco3D Engine,
++ OUT gctBOOL * Enable
++ );
++
++gceSTATUS
++gco3D_SetWPlaneLimitF(
++ IN gco3D Engine,
++ IN gctFLOAT Value
++ );
++
++gceSTATUS
++gco3D_SetWPlaneLimitX(
++ IN gco3D Engine,
++ IN gctFIXED_POINT Value
++ );
++
++gceSTATUS
++gco3D_SetWPlaneLimit(
++ IN gco3D Engine,
++ IN gctFLOAT Value
++ );
++
++gceSTATUS
++gco3D_PrimitiveRestart(
++ IN gco3D Engine,
++ IN gctBOOL PrimitiveRestart);
++
++#if gcdSTREAM_OUT_BUFFER
++
++gceSTATUS
++gco3D_QueryStreamOut(
++ IN gco3D Engine,
++ IN gctUINT32 OriginalIndexAddress,
++ IN gctUINT32 OriginalIndexOffset,
++ IN gctUINT32 OriginalIndexCount,
++ OUT gctBOOL_PTR Found
++ );
++
++gceSTATUS
++gco3D_StartStreamOut(
++ IN gco3D Engine,
++ IN gctINT StreamOutStatus,
++ IN gctUINT32 IndexAddress,
++ IN gctUINT32 IndexOffset,
++ IN gctUINT32 IndexCount
++ );
++
++gceSTATUS
++gco3D_StopStreamOut(
++ IN gco3D Engine
++ );
++
++gceSTATUS
++gco3D_ReplayStreamOut(
++ IN gco3D Engine,
++ IN gctUINT32 IndexAddress,
++ IN gctUINT32 IndexOffset,
++ IN gctUINT32 IndexCount
++ );
++
++gceSTATUS
++gco3D_EndStreamOut(
++ IN gco3D Engine
++ );
++
++#endif
++
++/*----------------------------------------------------------------------------*/
++/*-------------------------- gco3D Fragment Processor ------------------------*/
++
++/* Set the fragment processor configuration. */
++gceSTATUS
++gco3D_SetFragmentConfiguration(
++ IN gco3D Engine,
++ IN gctBOOL ColorFromStream,
++ IN gctBOOL EnableFog,
++ IN gctBOOL EnableSmoothPoint,
++ IN gctUINT32 ClipPlanes
++ );
++
++/* Enable/disable texture stage operation. */
++gceSTATUS
++gco3D_EnableTextureStage(
++ IN gco3D Engine,
++ IN gctINT Stage,
++ IN gctBOOL Enable
++ );
++
++/* Program the channel enable masks for the color texture function. */
++gceSTATUS
++gco3D_SetTextureColorMask(
++ IN gco3D Engine,
++ IN gctINT Stage,
++ IN gctBOOL ColorEnabled,
++ IN gctBOOL AlphaEnabled
++ );
++
++/* Program the channel enable masks for the alpha texture function. */
++gceSTATUS
++gco3D_SetTextureAlphaMask(
++ IN gco3D Engine,
++ IN gctINT Stage,
++ IN gctBOOL ColorEnabled,
++ IN gctBOOL AlphaEnabled
++ );
++
++/* Program the constant fragment color. */
++gceSTATUS
++gco3D_SetFragmentColorX(
++ IN gco3D Engine,
++ IN gctFIXED_POINT Red,
++ IN gctFIXED_POINT Green,
++ IN gctFIXED_POINT Blue,
++ IN gctFIXED_POINT Alpha
++ );
++
++gceSTATUS
++gco3D_SetFragmentColorF(
++ IN gco3D Engine,
++ IN gctFLOAT Red,
++ IN gctFLOAT Green,
++ IN gctFLOAT Blue,
++ IN gctFLOAT Alpha
++ );
++
++/* Program the constant fog color. */
++gceSTATUS
++gco3D_SetFogColorX(
++ IN gco3D Engine,
++ IN gctFIXED_POINT Red,
++ IN gctFIXED_POINT Green,
++ IN gctFIXED_POINT Blue,
++ IN gctFIXED_POINT Alpha
++ );
++
++gceSTATUS
++gco3D_SetFogColorF(
++ IN gco3D Engine,
++ IN gctFLOAT Red,
++ IN gctFLOAT Green,
++ IN gctFLOAT Blue,
++ IN gctFLOAT Alpha
++ );
++
++/* Program the constant texture color. */
++gceSTATUS
++gco3D_SetTetxureColorX(
++ IN gco3D Engine,
++ IN gctINT Stage,
++ IN gctFIXED_POINT Red,
++ IN gctFIXED_POINT Green,
++ IN gctFIXED_POINT Blue,
++ IN gctFIXED_POINT Alpha
++ );
++
++gceSTATUS
++gco3D_SetTetxureColorF(
++ IN gco3D Engine,
++ IN gctINT Stage,
++ IN gctFLOAT Red,
++ IN gctFLOAT Green,
++ IN gctFLOAT Blue,
++ IN gctFLOAT Alpha
++ );
++
++/* Configure color texture function. */
++gceSTATUS
++gco3D_SetColorTextureFunction(
++ IN gco3D Engine,
++ IN gctINT Stage,
++ IN gceTEXTURE_FUNCTION Function,
++ IN gceTEXTURE_SOURCE Source0,
++ IN gceTEXTURE_CHANNEL Channel0,
++ IN gceTEXTURE_SOURCE Source1,
++ IN gceTEXTURE_CHANNEL Channel1,
++ IN gceTEXTURE_SOURCE Source2,
++ IN gceTEXTURE_CHANNEL Channel2,
++ IN gctINT Scale
++ );
++
++/* Configure alpha texture function. */
++gceSTATUS
++gco3D_SetAlphaTextureFunction(
++ IN gco3D Engine,
++ IN gctINT Stage,
++ IN gceTEXTURE_FUNCTION Function,
++ IN gceTEXTURE_SOURCE Source0,
++ IN gceTEXTURE_CHANNEL Channel0,
++ IN gceTEXTURE_SOURCE Source1,
++ IN gceTEXTURE_CHANNEL Channel1,
++ IN gceTEXTURE_SOURCE Source2,
++ IN gceTEXTURE_CHANNEL Channel2,
++ IN gctINT Scale
++ );
++
++/******************************************************************************\
++******************************* gcoTEXTURE Object *******************************
++\******************************************************************************/
++
++/* Cube faces. */
++typedef enum _gceTEXTURE_FACE
++{
++ gcvFACE_NONE,
++ gcvFACE_POSITIVE_X,
++ gcvFACE_NEGATIVE_X,
++ gcvFACE_POSITIVE_Y,
++ gcvFACE_NEGATIVE_Y,
++ gcvFACE_POSITIVE_Z,
++ gcvFACE_NEGATIVE_Z,
++}
++gceTEXTURE_FACE;
++
++typedef struct _gcsTEXTURE
++{
++ /* Addressing modes. */
++ gceTEXTURE_ADDRESSING s;
++ gceTEXTURE_ADDRESSING t;
++ gceTEXTURE_ADDRESSING r;
++
++ gceTEXTURE_SWIZZLE swizzle[gcvTEXTURE_COMPONENT_NUM];
++
++ /* Border color. */
++ gctUINT8 border[gcvTEXTURE_COMPONENT_NUM];
++
++ /* Filters. */
++ gceTEXTURE_FILTER minFilter;
++ gceTEXTURE_FILTER magFilter;
++ gceTEXTURE_FILTER mipFilter;
++ gctUINT anisoFilter;
++
++ /* Level of detail. */
++ gctFLOAT lodBias;
++ gctFLOAT lodMin;
++ gctFLOAT lodMax;
++
++ /* base/max level */
++ gctINT32 baseLevel;
++ gctINT32 maxLevel;
++
++ /* depth texture comparison */
++ gceTEXTURE_COMPARE_MODE compareMode;
++ gceCOMPARE compareFunc;
++
++}
++gcsTEXTURE, * gcsTEXTURE_PTR;
++
++/* Construct a new gcoTEXTURE object. */
++gceSTATUS
++gcoTEXTURE_Construct(
++ IN gcoHAL Hal,
++ OUT gcoTEXTURE * Texture
++ );
++
++/* Construct a new gcoTEXTURE object with type information. */
++gceSTATUS
++gcoTEXTURE_ConstructEx(
++ IN gcoHAL Hal,
++ IN gceTEXTURE_TYPE Type,
++ OUT gcoTEXTURE * Texture
++ );
++
++
++/* Construct a new sized gcoTEXTURE object. */
++gceSTATUS
++gcoTEXTURE_ConstructSized(
++ IN gcoHAL Hal,
++ IN gceSURF_FORMAT Format,
++ IN gctUINT Width,
++ IN gctUINT Height,
++ IN gctUINT Depth,
++ IN gctUINT Faces,
++ IN gctUINT MipMapCount,
++ IN gcePOOL Pool,
++ OUT gcoTEXTURE * Texture
++ );
++
++/* Destroy an gcoTEXTURE object. */
++gceSTATUS
++gcoTEXTURE_Destroy(
++ IN gcoTEXTURE Texture
++ );
++
++/* Upload data to an gcoTEXTURE object. */
++gceSTATUS
++gcoTEXTURE_Upload(
++ IN gcoTEXTURE Texture,
++ IN gctINT MipMap,
++ IN gceTEXTURE_FACE Face,
++ IN gctSIZE_T Width,
++ IN gctSIZE_T Height,
++ IN gctUINT Slice,
++ IN gctCONST_POINTER Memory,
++ IN gctSIZE_T Stride,
++ IN gceSURF_FORMAT Format,
++ IN gceSURF_COLOR_SPACE SrcColorSpace
++ );
++
++/* Upload data to an gcoTEXTURE object. */
++gceSTATUS
++gcoTEXTURE_UploadSub(
++ IN gcoTEXTURE Texture,
++ IN gctINT MipMap,
++ IN gceTEXTURE_FACE Face,
++ IN gctSIZE_T X,
++ IN gctSIZE_T Y,
++ IN gctSIZE_T Width,
++ IN gctSIZE_T Height,
++ IN gctUINT Slice,
++ IN gctCONST_POINTER Memory,
++ IN gctSIZE_T Stride,
++ IN gceSURF_FORMAT Format,
++ IN gceSURF_COLOR_SPACE SrcColorSpace,
++ IN gctUINT32 PhysicalAddress
++ );
++
++
++/* Upload YUV data to an gcoTEXTURE object. */
++gceSTATUS
++gcoTEXTURE_UploadYUV(
++ IN gcoTEXTURE Texture,
++ IN gceTEXTURE_FACE Face,
++ IN gctUINT Width,
++ IN gctUINT Height,
++ IN gctUINT Slice,
++ IN gctPOINTER Memory[3],
++ IN gctINT Stride[3],
++ IN gceSURF_FORMAT Format
++ );
++
++/* Upload compressed data to an gcoTEXTURE object. */
++gceSTATUS
++gcoTEXTURE_UploadCompressed(
++ IN gcoTEXTURE Texture,
++ IN gctINT MipMap,
++ IN gceTEXTURE_FACE Face,
++ IN gctSIZE_T Width,
++ IN gctSIZE_T Height,
++ IN gctUINT Slice,
++ IN gctCONST_POINTER Memory,
++ IN gctSIZE_T Bytes
++ );
++
++/* Upload compressed sub data to an gcoTEXTURE object. */
++gceSTATUS
++gcoTEXTURE_UploadCompressedSub(
++ IN gcoTEXTURE Texture,
++ IN gctINT MipMap,
++ IN gceTEXTURE_FACE Face,
++ IN gctSIZE_T XOffset,
++ IN gctSIZE_T YOffset,
++ IN gctSIZE_T Width,
++ IN gctSIZE_T Height,
++ IN gctUINT Slice,
++ IN gctCONST_POINTER Memory,
++ IN gctSIZE_T Size
++ );
++
++/* Get gcoSURF object for a mipmap level. */
++gceSTATUS
++gcoTEXTURE_GetMipMap(
++ IN gcoTEXTURE Texture,
++ IN gctUINT MipMap,
++ OUT gcoSURF * Surface
++ );
++
++/* Get gcoSURF object for a mipmap level and face offset. */
++gceSTATUS
++gcoTEXTURE_GetMipMapFace(
++ IN gcoTEXTURE Texture,
++ IN gctUINT MipMap,
++ IN gceTEXTURE_FACE Face,
++ OUT gcoSURF * Surface,
++ OUT gctSIZE_T_PTR Offset
++ );
++
++gceSTATUS
++gcoTEXTURE_GetMipMapSlice(
++ IN gcoTEXTURE Texture,
++ IN gctUINT MipMap,
++ IN gctUINT Slice,
++ OUT gcoSURF * Surface,
++ OUT gctSIZE_T_PTR Offset
++ );
++
++gceSTATUS
++gcoTEXTURE_AddMipMap(
++ IN gcoTEXTURE Texture,
++ IN gctINT Level,
++ IN gctINT InternalFormat,
++ IN gceSURF_FORMAT Format,
++ IN gctSIZE_T Width,
++ IN gctSIZE_T Height,
++ IN gctSIZE_T Depth,
++ IN gctUINT Faces,
++ IN gcePOOL Pool,
++ OUT gcoSURF * Surface
++ );
++
++gceSTATUS
++gcoTEXTURE_AddMipMapWithFlag(
++ IN gcoTEXTURE Texture,
++ IN gctINT Level,
++ IN gctINT InternalFormat,
++ IN gceSURF_FORMAT Format,
++ IN gctSIZE_T Width,
++ IN gctSIZE_T Height,
++ IN gctSIZE_T Depth,
++ IN gctUINT Faces,
++ IN gcePOOL Pool,
++ IN gctBOOL Protected,
++ OUT gcoSURF * Surface
++ );
++
++gceSTATUS
++gcoTEXTURE_AddMipMapFromClient(
++ IN gcoTEXTURE Texture,
++ IN gctINT Level,
++ IN gcoSURF Surface
++ );
++
++gceSTATUS
++gcoTEXTURE_AddMipMapFromSurface(
++ IN gcoTEXTURE Texture,
++ IN gctINT Level,
++ IN gcoSURF Surface
++ );
++
++gceSTATUS
++gcoTEXTURE_SetEndianHint(
++ IN gcoTEXTURE Texture,
++ IN gceENDIAN_HINT EndianHint
++ );
++
++gceSTATUS
++gcoTEXTURE_Disable(
++ IN gcoHAL Hal,
++ IN gctINT Sampler
++ );
++
++gceSTATUS
++gcoTEXTURE_Flush(
++ IN gcoTEXTURE Texture
++ );
++
++gceSTATUS
++gcoTEXTURE_FlushVS(
++ IN gcoTEXTURE Texture
++ );
++
++gceSTATUS
++gcoTEXTURE_QueryCaps(
++ IN gcoHAL Hal,
++ OUT gctUINT * MaxWidth,
++ OUT gctUINT * MaxHeight,
++ OUT gctUINT * MaxDepth,
++ OUT gctBOOL * Cubic,
++ OUT gctBOOL * NonPowerOfTwo,
++ OUT gctUINT * VertexSamplers,
++ OUT gctUINT * PixelSamplers
++ );
++
++gceSTATUS
++gcoTEXTURE_GetClosestFormat(
++ IN gcoHAL Hal,
++ IN gceSURF_FORMAT InFormat,
++ OUT gceSURF_FORMAT* OutFormat
++ );
++
++gceSTATUS
++gcoTEXTURE_GetClosestFormatEx(
++ IN gcoHAL Hal,
++ IN gceSURF_FORMAT InFormat,
++ IN gceTEXTURE_TYPE TextureType,
++ OUT gceSURF_FORMAT* OutFormat
++ );
++
++gceSTATUS
++gcoTEXTURE_GetFormatInfo(
++ IN gcoTEXTURE Texture,
++ IN gctINT preferLevel,
++ OUT gcsSURF_FORMAT_INFO_PTR * TxFormatInfo
++ );
++
++gceSTATUS
++gcoTEXTURE_GetTextureFormatName(
++ IN gcsSURF_FORMAT_INFO_PTR TxFormatInfo,
++ OUT gctCONST_STRING * TxName
++ );
++
++gceSTATUS
++gcoTEXTURE_RenderIntoMipMap(
++ IN gcoTEXTURE Texture,
++ IN gctINT Level
++ );
++
++gceSTATUS
++gcoTEXTURE_RenderIntoMipMap2(
++ IN gcoTEXTURE Texture,
++ IN gctINT Level,
++ IN gctBOOL Sync
++ );
++
++gceSTATUS
++gcoTEXTURE_IsRenderable(
++ IN gcoTEXTURE Texture,
++ IN gctUINT Level
++ );
++
++gceSTATUS
++gcoTEXTURE_IsComplete(
++ IN gcoTEXTURE Texture,
++ IN gcsTEXTURE_PTR Info,
++ IN gctINT BaseLevel,
++ IN gctINT MaxLevel
++ );
++
++gceSTATUS
++gcoTEXTURE_BindTexture(
++ IN gcoTEXTURE Texture,
++ IN gctINT Target,
++ IN gctINT Sampler,
++ IN gcsTEXTURE_PTR Info
++ );
++
++gceSTATUS
++gcoTEXTURE_BindTextureEx(
++ IN gcoTEXTURE Texture,
++ IN gctINT Target,
++ IN gctINT Sampler,
++ IN gcsTEXTURE_PTR Info,
++ IN gctINT textureLayer
++ );
++
++gceSTATUS
++gcoTEXTURE_InitParams(
++ IN gcoHAL Hal,
++ IN gcsTEXTURE_PTR TexParams
++ );
++
++gceSTATUS
++gcoTEXTURE_SetDepthTextureFlag(
++ IN gcoTEXTURE Texture,
++ IN gctBOOL unsized
++ );
++
++
++/******************************************************************************\
++******************************* gcoSTREAM Object ******************************
++\******************************************************************************/
++
++typedef enum _gceVERTEX_FORMAT
++{
++ gcvVERTEX_BYTE,
++ gcvVERTEX_UNSIGNED_BYTE,
++ gcvVERTEX_SHORT,
++ gcvVERTEX_UNSIGNED_SHORT,
++ gcvVERTEX_INT,
++ gcvVERTEX_UNSIGNED_INT,
++ gcvVERTEX_FIXED,
++ gcvVERTEX_HALF,
++ gcvVERTEX_FLOAT,
++ gcvVERTEX_UNSIGNED_INT_10_10_10_2,
++ gcvVERTEX_INT_10_10_10_2,
++ gcvVERTEX_UNSIGNED_INT_2_10_10_10_REV,
++ gcvVERTEX_INT_2_10_10_10_REV,
++ /* integer format */
++ gcvVERTEX_INT8,
++ gcvVERTEX_INT16,
++ gcvVERTEX_INT32,
++}
++gceVERTEX_FORMAT;
++
++/* What the SW converting scheme to create temp attrib */
++typedef enum _gceATTRIB_SCHEME
++{
++ gcvATTRIB_SCHEME_KEEP = 0,
++ gcvATTRIB_SCHEME_2_10_10_10_REV_TO_FLOAT,
++ gcvATTRIB_SCHEME_BYTE_TO_INT,
++ gcvATTRIB_SCHEME_SHORT_TO_INT,
++ gcvATTRIB_SCHEME_UBYTE_TO_UINT,
++ gcvATTRIB_SCHEME_USHORT_TO_UINT,
++} gceATTRIB_SCHEME;
++
++gceSTATUS
++gcoSTREAM_Construct(
++ IN gcoHAL Hal,
++ OUT gcoSTREAM * Stream
++ );
++
++gceSTATUS
++gcoSTREAM_Destroy(
++ IN gcoSTREAM Stream
++ );
++
++gceSTATUS
++gcoSTREAM_Upload(
++ IN gcoSTREAM Stream,
++ IN gctCONST_POINTER Buffer,
++ IN gctSIZE_T Offset,
++ IN gctSIZE_T Bytes,
++ IN gctBOOL Dynamic
++ );
++
++gceSTATUS
++gcoSTREAM_SetStride(
++ IN gcoSTREAM Stream,
++ IN gctUINT32 Stride
++ );
++
++gceSTATUS
++gcoSTREAM_Size(
++ IN gcoSTREAM Stream,
++ OUT gctSIZE_T *Size
++ );
++
++gceSTATUS
++gcoSTREAM_Node(
++ IN gcoSTREAM Stream,
++ OUT gcsSURF_NODE_PTR * Node
++ );
++
++gceSTATUS
++gcoSTREAM_Lock(
++ IN gcoSTREAM Stream,
++ OUT gctPOINTER * Logical,
++ OUT gctUINT32 * Physical
++ );
++
++gceSTATUS
++gcoSTREAM_Unlock(
++ IN gcoSTREAM Stream
++ );
++
++gceSTATUS
++gcoSTREAM_Reserve(
++ IN gcoSTREAM Stream,
++ IN gctSIZE_T Bytes
++ );
++
++gceSTATUS
++gcoSTREAM_Flush(
++ IN gcoSTREAM Stream
++ );
++
++/* Dynamic buffer API. */
++gceSTATUS
++gcoSTREAM_SetDynamic(
++ IN gcoSTREAM Stream,
++ IN gctSIZE_T Bytes,
++ IN gctUINT Buffers
++ );
++
++typedef struct _gcsSTREAM_INFO
++{
++ gctUINT index;
++ gceVERTEX_FORMAT format;
++ gctBOOL normalized;
++ gctUINT components;
++ gctSIZE_T size;
++ gctCONST_POINTER data;
++ gctUINT stride;
++}
++gcsSTREAM_INFO, * gcsSTREAM_INFO_PTR;
++
++gceSTATUS
++gcoSTREAM_UploadDynamic(
++ IN gcoSTREAM Stream,
++ IN gctUINT VertexCount,
++ IN gctUINT InfoCount,
++ IN gcsSTREAM_INFO_PTR Info,
++ IN gcoVERTEX Vertex
++ );
++
++gceSTATUS
++gcoSTREAM_CPUCacheOperation(
++ IN gcoSTREAM Stream,
++ IN gceCACHEOPERATION Operation
++ );
++
++gceSTATUS
++gcoSTREAM_CPUCacheOperation_Range(
++ IN gcoSTREAM Stream,
++ IN gctSIZE_T Offset,
++ IN gctSIZE_T Length,
++ IN gceCACHEOPERATION Operation
++ );
++
++/******************************************************************************\
++******************************** gcoVERTEX Object ******************************
++\******************************************************************************/
++
++typedef struct _gcsVERTEX_ATTRIBUTES
++{
++ gceVERTEX_FORMAT format;
++ gctBOOL normalized;
++ gctUINT32 components;
++ gctSIZE_T size;
++ gctUINT32 stream;
++ gctUINT32 offset;
++ gctUINT32 stride;
++}
++gcsVERTEX_ATTRIBUTES;
++
++gceSTATUS
++gcoVERTEX_Construct(
++ IN gcoHAL Hal,
++ OUT gcoVERTEX * Vertex
++ );
++
++gceSTATUS
++gcoVERTEX_Destroy(
++ IN gcoVERTEX Vertex
++ );
++
++gceSTATUS
++gcoVERTEX_Reset(
++ IN gcoVERTEX Vertex
++ );
++
++gceSTATUS
++gcoVERTEX_EnableAttribute(
++ IN gcoVERTEX Vertex,
++ IN gctUINT32 Index,
++ IN gceVERTEX_FORMAT Format,
++ IN gctBOOL Normalized,
++ IN gctUINT32 Components,
++ IN gcoSTREAM Stream,
++ IN gctUINT32 Offset,
++ IN gctUINT32 Stride
++ );
++
++gceSTATUS
++gcoVERTEX_DisableAttribute(
++ IN gcoVERTEX Vertex,
++ IN gctUINT32 Index
++ );
++
++gceSTATUS
++gcoVERTEX_Bind(
++ IN gcoVERTEX Vertex
++ );
++
++/*******************************************************************************
++***** gcoVERTEXARRAY Object ***************************************************/
++
++typedef struct _gcsATTRIBUTE
++{
++ /* Enabled. */
++ gctBOOL enable;
++
++ /* Number of components. */
++ gctINT size;
++
++ /* Attribute format. */
++ gceVERTEX_FORMAT format;
++
++ /* Flag whether the attribute is normalized or not. */
++ gctBOOL normalized;
++
++ /* Stride of the component. */
++ gctSIZE_T stride;
++
++ /* Divisor of the attribute */
++ gctUINT divisor;
++
++ /* Pointer to the attribute data. */
++ gctCONST_POINTER pointer;
++
++ /* Stream object owning the attribute data. */
++ gcoBUFOBJ stream;
++
++ /* Generic values for attribute. */
++ gctFLOAT genericValue[4];
++
++ /* Generic size for attribute. */
++ gctINT genericSize;
++
++ /* Vertex shader linkage. */
++ gctUINT linkage;
++
++#if gcdUSE_WCLIP_PATCH
++ /* Does it hold positions? */
++ gctBOOL isPosition;
++#endif
++
++ /* Index to vertex array */
++ gctINT arrayIdx;
++
++ gceATTRIB_SCHEME convertScheme;
++
++ /* Pointer to the temporary buffer to be freed */
++ gcoBUFOBJ tempStream;
++
++ /* Pointer to the temporary memory to be freed */
++ gctCONST_POINTER tempMemory;
++}
++gcsATTRIBUTE,
++* gcsATTRIBUTE_PTR;
++
++
++typedef struct _gcsVERTEXARRAY
++{
++ /* Enabled. */
++ gctBOOL enable;
++
++ /* Number of components. */
++ gctINT size;
++
++ /* Attribute format. */
++ gceVERTEX_FORMAT format;
++
++ /* Flag whether the attribute is normalized or not. */
++ gctBOOL normalized;
++
++ /* Stride of the component. */
++ gctUINT stride;
++
++ /* Divisor of the attribute */
++ gctUINT divisor;
++
++ /* Pointer to the attribute data. */
++ gctCONST_POINTER pointer;
++
++ /* Stream object owning the attribute data. */
++ gcoSTREAM stream;
++
++ /* Generic values for attribute. */
++ gctFLOAT genericValue[4];
++
++ /* Generic size for attribute. */
++ gctINT genericSize;
++
++ /* Vertex shader linkage. */
++ gctUINT linkage;
++
++ gctBOOL isPosition;
++}
++gcsVERTEXARRAY,
++* gcsVERTEXARRAY_PTR;
++
++gceSTATUS
++gcoVERTEXARRAY_Construct(
++ IN gcoHAL Hal,
++ OUT gcoVERTEXARRAY * Vertex
++ );
++
++gceSTATUS
++gcoVERTEXARRAY_Destroy(
++ IN gcoVERTEXARRAY Vertex
++ );
++
++gceSTATUS
++gcoVERTEXARRAY_Bind_Ex(
++ IN gcoVERTEXARRAY Vertex,
++ IN gctUINT32 EnableBits,
++ IN gcsVERTEXARRAY_PTR VertexArray,
++ IN gctUINT First,
++ IN gctSIZE_T Count,
++ IN gctBOOL DrawArraysInstanced,
++ IN gctSIZE_T InstanceCount,
++ IN gceINDEX_TYPE IndexType,
++ IN gcoINDEX IndexObject,
++ IN gctPOINTER IndexMemory,
++ IN OUT gcePRIMITIVE * PrimitiveType,
++#if gcdUSE_WCLIP_PATCH
++ IN OUT gctUINT * PrimitiveCount,
++ IN OUT gctFLOAT * wLimitRms,
++ IN OUT gctBOOL * wLimitDirty
++#else
++ IN OUT gctUINT * PrimitiveCount
++#endif
++ );
++
++gceSTATUS
++gcoVERTEXARRAY_Bind_Ex2(
++ IN gcoVERTEXARRAY Vertex,
++ IN gctUINT32 EnableBits,
++ IN gcsATTRIBUTE_PTR VertexArray,
++ IN gctSIZE_T First,
++ IN gctSIZE_T Count,
++ IN gctBOOL DrawArraysInstanced,
++ IN gctSIZE_T InstanceCount,
++ IN gceINDEX_TYPE IndexType,
++ IN gcoBUFOBJ IndexObject,
++ IN gctPOINTER IndexMemory,
++ IN OUT gcePRIMITIVE * PrimitiveType,
++#if gcdUSE_WCLIP_PATCH
++ IN OUT gctSIZE_T * PrimitiveCount,
++ IN OUT gctFLOAT * wLimitRms,
++ IN OUT gctBOOL * wLimitDirty,
++#else
++ IN OUT gctUINT * PrimitiveCount,
++#endif
++ IN gctINT VertexInstanceIdLinkage
++ );
++
++gceSTATUS
++gcoVERTEXARRAY_Bind(
++ IN gcoVERTEXARRAY Vertex,
++ IN gctUINT32 EnableBits,
++ IN gcsVERTEXARRAY_PTR VertexArray,
++ IN gctUINT First,
++ IN gctSIZE_T Count,
++ IN gceINDEX_TYPE IndexType,
++ IN gcoINDEX IndexObject,
++ IN gctPOINTER IndexMemory,
++ IN OUT gcePRIMITIVE * PrimitiveType,
++#if gcdUSE_WCLIP_PATCH
++ IN OUT gctUINT * PrimitiveCount,
++ IN OUT gctFLOAT * wLimitRms,
++ IN OUT gctBOOL * wLimitDirty
++#else
++ IN OUT gctUINT * PrimitiveCount
++#endif
++ );
++
++/*******************************************************************************
++***** Composition *************************************************************/
++
++typedef enum _gceCOMPOSITION
++{
++ gcvCOMPOSE_CLEAR = 1,
++ gcvCOMPOSE_BLUR,
++ gcvCOMPOSE_DIM,
++ gcvCOMPOSE_LAYER
++}
++gceCOMPOSITION;
++
++typedef struct _gcsCOMPOSITION * gcsCOMPOSITION_PTR;
++typedef struct _gcsCOMPOSITION
++{
++ /* Structure size. */
++ gctUINT structSize;
++
++ /* Composition operation. */
++ gceCOMPOSITION operation;
++
++ /* Layer to be composed. */
++ gcoSURF layer;
++
++ /* Source and target coordinates. */
++ gcsRECT srcRect;
++ gcsRECT trgRect;
++
++ /* Target rectangle */
++ gcsPOINT v0;
++ gcsPOINT v1;
++ gcsPOINT v2;
++
++ /* Blending parameters. */
++ gctBOOL enableBlending;
++ gctBOOL premultiplied;
++ gctUINT8 alphaValue;
++
++ /* Clear color. */
++ gctFLOAT r;
++ gctFLOAT g;
++ gctFLOAT b;
++ gctFLOAT a;
++}
++gcsCOMPOSITION;
++
++gceSTATUS
++gco3D_ProbeComposition(
++ IN gcoHARDWARE Hardware,
++ IN gctBOOL ResetIfEmpty
++ );
++
++gceSTATUS
++gco3D_CompositionBegin(
++ IN gcoHARDWARE Hardware
++ );
++
++gceSTATUS
++gco3D_ComposeLayer(
++ IN gcoHARDWARE Hardware,
++ IN gcsCOMPOSITION_PTR Layer
++ );
++
++gceSTATUS
++gco3D_CompositionSignals(
++ IN gcoHARDWARE Hardware,
++ IN gctHANDLE Process,
++ IN gctSIGNAL Signal1,
++ IN gctSIGNAL Signal2
++ );
++
++gceSTATUS
++gco3D_CompositionEnd(
++ IN gcoHARDWARE Hardware,
++ IN gcoSURF Target,
++ IN gctBOOL Synchronous
++ );
++
++/* Frame Database */
++gceSTATUS
++gcoHAL_AddFrameDB(
++ void
++ );
++
++gceSTATUS
++gcoHAL_DumpFrameDB(
++ gctCONST_STRING Filename OPTIONAL
++ );
++
++/******************************************************************************
++**********************gcoBUFOBJ object*****************************************
++*******************************************************************************/
++typedef enum _gceBUFOBJ_TYPE
++{
++ gcvBUFOBJ_TYPE_ARRAY_BUFFER = 1,
++ gcvBUFOBJ_TYPE_ELEMENT_ARRAY_BUFFER = 2,
++ gcvBUFOBJ_TYPE_GENERIC_BUFFER = 100
++
++} gceBUFOBJ_TYPE;
++
++typedef enum _gceBUFOBJ_USAGE
++{
++ gcvBUFOBJ_USAGE_STREAM_DRAW = 1,
++ gcvBUFOBJ_USAGE_STREAM_READ,
++ gcvBUFOBJ_USAGE_STREAM_COPY,
++ gcvBUFOBJ_USAGE_STATIC_DRAW,
++ gcvBUFOBJ_USAGE_STATIC_READ,
++ gcvBUFOBJ_USAGE_STATIC_COPY,
++ gcvBUFOBJ_USAGE_DYNAMIC_DRAW,
++ gcvBUFOBJ_USAGE_DYNAMIC_READ,
++ gcvBUFOBJ_USAGE_DYNAMIC_COPY,
++
++} gceBUFOBJ_USAGE;
++
++/* Construct a new gcoBUFOBJ object. */
++gceSTATUS
++gcoBUFOBJ_Construct(
++ IN gcoHAL Hal,
++ IN gceBUFOBJ_TYPE Type,
++ OUT gcoBUFOBJ * BufObj
++ );
++
++/* Destroy a gcoBUFOBJ object. */
++gceSTATUS
++gcoBUFOBJ_Destroy(
++ IN gcoBUFOBJ BufObj
++ );
++
++/* Lock pbo in memory. */
++gceSTATUS
++gcoBUFOBJ_Lock(
++ IN gcoBUFOBJ BufObj,
++ OUT gctUINT32 * Address,
++ OUT gctPOINTER * Memory
++ );
++
++/* Lock pbo in memory. */
++gceSTATUS
++gcoBUFOBJ_FastLock(
++ IN gcoBUFOBJ BufObj,
++ OUT gctUINT32 * Address,
++ OUT gctPOINTER * Memory
++ );
++
++/* Unlock pbo that was previously locked with gcoBUFOBJ_Lock. */
++gceSTATUS
++gcoBUFOBJ_Unlock(
++ IN gcoBUFOBJ BufObj
++ );
++
++/* Free existing pbo buffer. */
++gceSTATUS
++gcoBUFOBJ_Free(
++ IN gcoBUFOBJ BufObj
++ );
++
++/* Upload data into an pbo buffer. */
++gceSTATUS
++gcoBUFOBJ_Upload(
++ IN gcoBUFOBJ BufObj,
++ IN gctCONST_POINTER Buffer,
++ IN gctSIZE_T Offset,
++ IN gctSIZE_T Bytes,
++ IN gceBUFOBJ_USAGE Usage
++ );
++
++/* Bind an index object to the hardware. */
++gceSTATUS
++gcoBUFOBJ_IndexBind (
++ IN gcoBUFOBJ Index,
++ IN gceINDEX_TYPE Type,
++ IN gctUINT32 Offset,
++ IN gctSIZE_T Count
++ );
++
++/* Find min and max index for the index buffer */
++gceSTATUS
++gcoBUFOBJ_IndexGetRange(
++ IN gcoBUFOBJ Index,
++ IN gceINDEX_TYPE Type,
++ IN gctUINT32 Offset,
++ IN gctUINT32 Count,
++ OUT gctUINT32 * MinimumIndex,
++ OUT gctUINT32 * MaximumIndex
++ );
++
++/* Sets a buffer object as dirty */
++gceSTATUS
++gcoBUFOBJ_SetDirty(
++ IN gcoBUFOBJ BufObj
++ );
++
++/* Creates a new buffer if needed */
++gceSTATUS
++gcoBUFOBJ_AlignIndexBufferWhenNeeded(
++ IN gcoBUFOBJ BufObj,
++ IN gctSIZE_T Offset,
++ OUT gcoBUFOBJ * AlignedBufObj
++ );
++
++/* Cache operations on whole range */
++gceSTATUS
++gcoBUFOBJ_CPUCacheOperation(
++ IN gcoBUFOBJ BufObj,
++ IN gceCACHEOPERATION Operation
++ );
++
++/* Cache operations on a specified range */
++gceSTATUS
++gcoBUFOBJ_CPUCacheOperation_Range(
++ IN gcoBUFOBJ BufObj,
++ IN gctSIZE_T Offset,
++ IN gctSIZE_T Length,
++ IN gceCACHEOPERATION Operation
++ );
++
++/* Return size of the bufobj */
++gceSTATUS
++gcoBUFOBJ_GetSize(
++ IN gcoBUFOBJ BufObj,
++ OUT gctSIZE_T_PTR Size
++ );
++
++/* Return memory node of the bufobj */
++gceSTATUS
++gcoBUFOBJ_GetNode(
++ IN gcoBUFOBJ BufObj,
++ OUT gcsSURF_NODE_PTR * Node
++ );
++
++/* Handle GPU cache operations */
++gceSTATUS
++gcoBUFOBJ_GPUCacheOperation(
++ gcoBUFOBJ BufObj
++ );
++
++/* Dump buffer. */
++void
++gcoBUFOBJ_Dump(
++ IN gcoBUFOBJ BufObj
++ );
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif /* gcdENABLE_3D */
++#endif /* __gc_hal_engine_h_ */
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_engine_vg.h linux-openelec/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_engine_vg.h
+--- linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_engine_vg.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_engine_vg.h 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,1215 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_engine_vg_h_
++#define __gc_hal_engine_vg_h_
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++#include "gc_hal_types.h"
++
++/******************************************************************************\
++******************************** VG Enumerations *******************************
++\******************************************************************************/
++
++/**
++** @ingroup gcoVG
++**
++** @brief Tiling mode for painting and imagig.
++**
++** This enumeration defines the tiling modes supported by the HAL. This is
++** in fact a one-to-one mapping of the OpenVG 1.1 tile modes.
++*/
++typedef enum _gceTILE_MODE
++{
++ gcvTILE_FILL,
++ gcvTILE_PAD,
++ gcvTILE_REPEAT,
++ gcvTILE_REFLECT
++}
++gceTILE_MODE;
++
++/******************************************************************************/
++/** @ingroup gcoVG
++**
++** @brief The different paint modes.
++**
++** This enumeration lists the available paint modes.
++*/
++typedef enum _gcePAINT_TYPE
++{
++ /** Solid color. */
++ gcvPAINT_MODE_SOLID,
++
++ /** Linear gradient. */
++ gcvPAINT_MODE_LINEAR,
++
++ /** Radial gradient. */
++ gcvPAINT_MODE_RADIAL,
++
++ /** Pattern. */
++ gcvPAINT_MODE_PATTERN,
++
++ /** Mode count. */
++ gcvPAINT_MODE_COUNT
++}
++gcePAINT_TYPE;
++
++/**
++** @ingroup gcoVG
++**
++** @brief Types of path data supported by HAL.
++**
++** This enumeration defines the types of path data supported by the HAL.
++** This is in fact a one-to-one mapping of the OpenVG 1.1 path types.
++*/
++typedef enum _gcePATHTYPE
++{
++ gcePATHTYPE_UNKNOWN = -1,
++ gcePATHTYPE_INT8,
++ gcePATHTYPE_INT16,
++ gcePATHTYPE_INT32,
++ gcePATHTYPE_FLOAT
++}
++gcePATHTYPE;
++
++/**
++** @ingroup gcoVG
++**
++** @brief Supported path segment commands.
++**
++** This enumeration defines the path segment commands supported by the HAL.
++*/
++typedef enum _gceVGCMD
++{
++ gcvVGCMD_END, /* 0: GCCMD_TS_OPCODE_END */
++ gcvVGCMD_CLOSE, /* 1: GCCMD_TS_OPCODE_CLOSE */
++ gcvVGCMD_MOVE, /* 2: GCCMD_TS_OPCODE_MOVE */
++ gcvVGCMD_MOVE_REL, /* 3: GCCMD_TS_OPCODE_MOVE_REL */
++ gcvVGCMD_LINE, /* 4: GCCMD_TS_OPCODE_LINE */
++ gcvVGCMD_LINE_REL, /* 5: GCCMD_TS_OPCODE_LINE_REL */
++ gcvVGCMD_QUAD, /* 6: GCCMD_TS_OPCODE_QUADRATIC */
++ gcvVGCMD_QUAD_REL, /* 7: GCCMD_TS_OPCODE_QUADRATIC_REL */
++ gcvVGCMD_CUBIC, /* 8: GCCMD_TS_OPCODE_CUBIC */
++ gcvVGCMD_CUBIC_REL, /* 9: GCCMD_TS_OPCODE_CUBIC_REL */
++ gcvVGCMD_BREAK, /* 10: GCCMD_TS_OPCODE_BREAK */
++ gcvVGCMD_HLINE, /* 11: ******* R E S E R V E D *******/
++ gcvVGCMD_HLINE_REL, /* 12: ******* R E S E R V E D *******/
++ gcvVGCMD_VLINE, /* 13: ******* R E S E R V E D *******/
++ gcvVGCMD_VLINE_REL, /* 14: ******* R E S E R V E D *******/
++ gcvVGCMD_SQUAD, /* 15: ******* R E S E R V E D *******/
++ gcvVGCMD_SQUAD_REL, /* 16: ******* R E S E R V E D *******/
++ gcvVGCMD_SCUBIC, /* 17: ******* R E S E R V E D *******/
++ gcvVGCMD_SCUBIC_REL, /* 18: ******* R E S E R V E D *******/
++ gcvVGCMD_SCCWARC, /* 19: ******* R E S E R V E D *******/
++ gcvVGCMD_SCCWARC_REL, /* 20: ******* R E S E R V E D *******/
++ gcvVGCMD_SCWARC, /* 21: ******* R E S E R V E D *******/
++ gcvVGCMD_SCWARC_REL, /* 22: ******* R E S E R V E D *******/
++ gcvVGCMD_LCCWARC, /* 23: ******* R E S E R V E D *******/
++ gcvVGCMD_LCCWARC_REL, /* 24: ******* R E S E R V E D *******/
++ gcvVGCMD_LCWARC, /* 25: ******* R E S E R V E D *******/
++ gcvVGCMD_LCWARC_REL, /* 26: ******* R E S E R V E D *******/
++
++ /* The width of the command recognized by the hardware on bits. */
++ gcvVGCMD_WIDTH = 5,
++
++ /* Hardware command mask. */
++ gcvVGCMD_MASK = (1 << gcvVGCMD_WIDTH) - 1,
++
++ /* Command modifiers. */
++ gcvVGCMD_H_MOD = 1 << gcvVGCMD_WIDTH, /* = 32 */
++ gcvVGCMD_V_MOD = 2 << gcvVGCMD_WIDTH, /* = 64 */
++ gcvVGCMD_S_MOD = 3 << gcvVGCMD_WIDTH, /* = 96 */
++ gcvVGCMD_ARC_MOD = 4 << gcvVGCMD_WIDTH, /* = 128 */
++
++ /* Emulated LINE commands. */
++ gcvVGCMD_HLINE_EMUL = gcvVGCMD_H_MOD | gcvVGCMD_LINE, /* = 36 */
++ gcvVGCMD_HLINE_EMUL_REL = gcvVGCMD_H_MOD | gcvVGCMD_LINE_REL, /* = 37 */
++ gcvVGCMD_VLINE_EMUL = gcvVGCMD_V_MOD | gcvVGCMD_LINE, /* = 68 */
++ gcvVGCMD_VLINE_EMUL_REL = gcvVGCMD_V_MOD | gcvVGCMD_LINE_REL, /* = 69 */
++
++ /* Emulated SMOOTH commands. */
++ gcvVGCMD_SQUAD_EMUL = gcvVGCMD_S_MOD | gcvVGCMD_QUAD, /* = 102 */
++ gcvVGCMD_SQUAD_EMUL_REL = gcvVGCMD_S_MOD | gcvVGCMD_QUAD_REL, /* = 103 */
++ gcvVGCMD_SCUBIC_EMUL = gcvVGCMD_S_MOD | gcvVGCMD_CUBIC, /* = 104 */
++ gcvVGCMD_SCUBIC_EMUL_REL = gcvVGCMD_S_MOD | gcvVGCMD_CUBIC_REL, /* = 105 */
++
++ /* Emulation ARC commands. */
++ gcvVGCMD_ARC_LINE = gcvVGCMD_ARC_MOD | gcvVGCMD_LINE, /* = 132 */
++ gcvVGCMD_ARC_LINE_REL = gcvVGCMD_ARC_MOD | gcvVGCMD_LINE_REL, /* = 133 */
++ gcvVGCMD_ARC_QUAD = gcvVGCMD_ARC_MOD | gcvVGCMD_QUAD, /* = 134 */
++ gcvVGCMD_ARC_QUAD_REL = gcvVGCMD_ARC_MOD | gcvVGCMD_QUAD_REL /* = 135 */
++}
++gceVGCMD;
++typedef enum _gceVGCMD * gceVGCMD_PTR;
++
++/**
++** @ingroup gcoVG
++**
++** @brief Blending modes supported by the HAL.
++**
++** This enumeration defines the blending modes supported by the HAL. This is
++** in fact a one-to-one mapping of the OpenVG 1.1 blending modes.
++*/
++typedef enum _gceVG_BLEND
++{
++ gcvVG_BLEND_SRC,
++ gcvVG_BLEND_SRC_OVER,
++ gcvVG_BLEND_DST_OVER,
++ gcvVG_BLEND_SRC_IN,
++ gcvVG_BLEND_DST_IN,
++ gcvVG_BLEND_MULTIPLY,
++ gcvVG_BLEND_SCREEN,
++ gcvVG_BLEND_DARKEN,
++ gcvVG_BLEND_LIGHTEN,
++ gcvVG_BLEND_ADDITIVE,
++ gcvVG_BLEND_SUBTRACT,
++ gcvVG_BLEND_FILTER
++}
++gceVG_BLEND;
++
++/**
++** @ingroup gcoVG
++**
++** @brief Image modes supported by the HAL.
++**
++** This enumeration defines the image modes supported by the HAL. This is
++** in fact a one-to-one mapping of the OpenVG 1.1 image modes with the addition
++** of NO IMAGE.
++*/
++typedef enum _gceVG_IMAGE
++{
++ gcvVG_IMAGE_NONE,
++ gcvVG_IMAGE_NORMAL,
++ gcvVG_IMAGE_MULTIPLY,
++ gcvVG_IMAGE_STENCIL,
++ gcvVG_IMAGE_FILTER
++}
++gceVG_IMAGE;
++
++/**
++** @ingroup gcoVG
++**
++** @brief Filter mode patterns and imaging.
++**
++** This enumeration defines the filter modes supported by the HAL.
++*/
++typedef enum _gceIMAGE_FILTER
++{
++ gcvFILTER_POINT,
++ gcvFILTER_LINEAR,
++ gcvFILTER_BI_LINEAR
++}
++gceIMAGE_FILTER;
++
++/**
++** @ingroup gcoVG
++**
++** @brief Primitive modes supported by the HAL.
++**
++** This enumeration defines the primitive modes supported by the HAL.
++*/
++typedef enum _gceVG_PRIMITIVE
++{
++ gcvVG_SCANLINE,
++ gcvVG_RECTANGLE,
++ gcvVG_TESSELLATED,
++ gcvVG_TESSELLATED_TILED
++}
++gceVG_PRIMITIVE;
++
++/**
++** @ingroup gcoVG
++**
++** @brief Rendering quality modes supported by the HAL.
++**
++** This enumeration defines the rendering quality modes supported by the HAL.
++*/
++typedef enum _gceRENDER_QUALITY
++{
++ gcvVG_NONANTIALIASED,
++ gcvVG_2X2_MSAA,
++ gcvVG_2X4_MSAA,
++ gcvVG_4X4_MSAA
++}
++gceRENDER_QUALITY;
++
++/**
++** @ingroup gcoVG
++**
++** @brief Fill rules supported by the HAL.
++**
++** This enumeration defines the fill rules supported by the HAL.
++*/
++typedef enum _gceFILL_RULE
++{
++ gcvVG_EVEN_ODD,
++ gcvVG_NON_ZERO
++}
++gceFILL_RULE;
++
++/**
++** @ingroup gcoVG
++**
++** @brief Cap styles supported by the HAL.
++**
++** This enumeration defines the cap styles supported by the HAL.
++*/
++typedef enum _gceCAP_STYLE
++{
++ gcvCAP_BUTT,
++ gcvCAP_ROUND,
++ gcvCAP_SQUARE
++}
++gceCAP_STYLE;
++
++/**
++** @ingroup gcoVG
++**
++** @brief Join styles supported by the HAL.
++**
++** This enumeration defines the join styles supported by the HAL.
++*/
++typedef enum _gceJOIN_STYLE
++{
++ gcvJOIN_MITER,
++ gcvJOIN_ROUND,
++ gcvJOIN_BEVEL
++}
++gceJOIN_STYLE;
++
++/**
++** @ingroup gcoVG
++**
++** @brief Channel mask values.
++**
++** This enumeration defines the values for channel mask used in image
++** filtering.
++*/
++
++/* Base values for channel mask definitions. */
++#define gcvCHANNEL_X (0)
++#define gcvCHANNEL_R (1 << 0)
++#define gcvCHANNEL_G (1 << 1)
++#define gcvCHANNEL_B (1 << 2)
++#define gcvCHANNEL_A (1 << 3)
++
++typedef enum _gceCHANNEL
++{
++ gcvCHANNEL_XXXX = (gcvCHANNEL_X | gcvCHANNEL_X | gcvCHANNEL_X | gcvCHANNEL_X),
++ gcvCHANNEL_XXXA = (gcvCHANNEL_X | gcvCHANNEL_X | gcvCHANNEL_X | gcvCHANNEL_A),
++ gcvCHANNEL_XXBX = (gcvCHANNEL_X | gcvCHANNEL_X | gcvCHANNEL_B | gcvCHANNEL_X),
++ gcvCHANNEL_XXBA = (gcvCHANNEL_X | gcvCHANNEL_X | gcvCHANNEL_B | gcvCHANNEL_A),
++
++ gcvCHANNEL_XGXX = (gcvCHANNEL_X | gcvCHANNEL_G | gcvCHANNEL_X | gcvCHANNEL_X),
++ gcvCHANNEL_XGXA = (gcvCHANNEL_X | gcvCHANNEL_G | gcvCHANNEL_X | gcvCHANNEL_A),
++ gcvCHANNEL_XGBX = (gcvCHANNEL_X | gcvCHANNEL_G | gcvCHANNEL_B | gcvCHANNEL_X),
++ gcvCHANNEL_XGBA = (gcvCHANNEL_X | gcvCHANNEL_G | gcvCHANNEL_B | gcvCHANNEL_A),
++
++ gcvCHANNEL_RXXX = (gcvCHANNEL_R | gcvCHANNEL_X | gcvCHANNEL_X | gcvCHANNEL_X),
++ gcvCHANNEL_RXXA = (gcvCHANNEL_R | gcvCHANNEL_X | gcvCHANNEL_X | gcvCHANNEL_A),
++ gcvCHANNEL_RXBX = (gcvCHANNEL_R | gcvCHANNEL_X | gcvCHANNEL_B | gcvCHANNEL_X),
++ gcvCHANNEL_RXBA = (gcvCHANNEL_R | gcvCHANNEL_X | gcvCHANNEL_B | gcvCHANNEL_A),
++
++ gcvCHANNEL_RGXX = (gcvCHANNEL_R | gcvCHANNEL_G | gcvCHANNEL_X | gcvCHANNEL_X),
++ gcvCHANNEL_RGXA = (gcvCHANNEL_R | gcvCHANNEL_G | gcvCHANNEL_X | gcvCHANNEL_A),
++ gcvCHANNEL_RGBX = (gcvCHANNEL_R | gcvCHANNEL_G | gcvCHANNEL_B | gcvCHANNEL_X),
++ gcvCHANNEL_RGBA = (gcvCHANNEL_R | gcvCHANNEL_G | gcvCHANNEL_B | gcvCHANNEL_A),
++}
++gceCHANNEL;
++
++/******************************************************************************\
++******************************** VG Structures *******************************
++\******************************************************************************/
++
++/**
++** @ingroup gcoVG
++**
++** @brief Definition of the color ramp used by the gradient paints.
++**
++** The gcsCOLOR_RAMP structure defines the layout of one single color inside
++** a color ramp which is used by gradient paints.
++*/
++typedef struct _gcsCOLOR_RAMP
++{
++ /** Value for the color stop. */
++ gctFLOAT stop;
++
++ /** Red color channel value for the color stop. */
++ gctFLOAT red;
++
++ /** Green color channel value for the color stop. */
++ gctFLOAT green;
++
++ /** Blue color channel value for the color stop. */
++ gctFLOAT blue;
++
++ /** Alpha color channel value for the color stop. */
++ gctFLOAT alpha;
++}
++gcsCOLOR_RAMP, * gcsCOLOR_RAMP_PTR;
++
++/**
++** @ingroup gcoVG
++**
++** @brief Definition of the color ramp used by the gradient paints in fixed form.
++**
++** The gcsCOLOR_RAMP structure defines the layout of one single color inside
++** a color ramp which is used by gradient paints.
++*/
++typedef struct _gcsFIXED_COLOR_RAMP
++{
++ /** Value for the color stop. */
++ gctFIXED_POINT stop;
++
++ /** Red color channel value for the color stop. */
++ gctFIXED_POINT red;
++
++ /** Green color channel value for the color stop. */
++ gctFIXED_POINT green;
++
++ /** Blue color channel value for the color stop. */
++ gctFIXED_POINT blue;
++
++ /** Alpha color channel value for the color stop. */
++ gctFIXED_POINT alpha;
++}
++gcsFIXED_COLOR_RAMP, * gcsFIXED_COLOR_RAMP_PTR;
++
++
++/**
++** @ingroup gcoVG
++**
++** @brief Rectangle structure used by the gcoVG object.
++**
++** This structure defines the layout of a rectangle. Make sure width and
++** height are larger than 0.
++*/
++typedef struct _gcsVG_RECT * gcsVG_RECT_PTR;
++typedef struct _gcsVG_RECT
++{
++ /** Left location of the rectangle. */
++ gctINT x;
++
++ /** Top location of the rectangle. */
++ gctINT y;
++
++ /** Width of the rectangle. */
++ gctINT width;
++
++ /** Height of the rectangle. */
++ gctINT height;
++}
++gcsVG_RECT;
++
++/**
++** @ingroup gcoVG
++**
++** @brief Path command buffer attribute structure.
++**
++** The gcsPATH_BUFFER_INFO structure contains the specifics about
++** the layout of the path data command buffer.
++*/
++typedef struct _gcsPATH_BUFFER_INFO * gcsPATH_BUFFER_INFO_PTR;
++typedef struct _gcsPATH_BUFFER_INFO
++{
++ gctUINT reservedForHead;
++ gctUINT reservedForTail;
++}
++gcsPATH_BUFFER_INFO;
++
++/**
++** @ingroup gcoVG
++**
++** @brief Definition of the path data container structure.
++**
++** The gcsPATH structure defines the layout of the path data container.
++*/
++typedef struct _gcsPATH_DATA * gcsPATH_DATA_PTR;
++typedef struct _gcsPATH_DATA
++{
++ /* Data container in command buffer format. */
++ gcsCMDBUFFER data;
++
++ /* Path data type. */
++ gcePATHTYPE dataType;
++}
++gcsPATH_DATA;
++
++
++/******************************************************************************\
++********************************* gcoHAL Object ********************************
++\******************************************************************************/
++
++/* Query path data storage attributes. */
++gceSTATUS
++gcoHAL_QueryPathStorage(
++ IN gcoHAL Hal,
++#if GC355_PROFILER
++ IN gcoVG Vg,
++ IN gctUINT TreeDepth,
++ IN gctUINT saveLayerTreeDepth,
++ IN gctUINT varTreeDepth,
++#endif
++ OUT gcsPATH_BUFFER_INFO_PTR Information
++ );
++
++/* Associate a completion signal with the command buffer. */
++gceSTATUS
++gcoHAL_AssociateCompletion(
++ IN gcoHAL Hal,
++#if GC355_PROFILER
++ IN gcoVG Vg,
++ IN gctUINT TreeDepth,
++ IN gctUINT saveLayerTreeDepth,
++ IN gctUINT varTreeDepth,
++#endif
++ IN gcsPATH_DATA_PTR PathData
++ );
++
++/* Release the current command buffer completion signal. */
++gceSTATUS
++gcoHAL_DeassociateCompletion(
++ IN gcoHAL Hal,
++#if GC355_PROFILER
++ IN gcoVG Vg,
++ IN gctUINT TreeDepth,
++ IN gctUINT saveLayerTreeDepth,
++ IN gctUINT varTreeDepth,
++#endif
++ IN gcsPATH_DATA_PTR PathData
++ );
++
++/* Verify whether the command buffer is still in use. */
++gceSTATUS
++gcoHAL_CheckCompletion(
++ IN gcoHAL Hal,
++#if GC355_PROFILER
++ IN gcoVG Vg,
++ IN gctUINT TreeDepth,
++ IN gctUINT saveLayerTreeDepth,
++ IN gctUINT varTreeDepth,
++#endif
++ IN gcsPATH_DATA_PTR PathData
++ );
++
++/* Wait until the command buffer is no longer in use. */
++gceSTATUS
++gcoHAL_WaitCompletion(
++ IN gcoHAL Hal,
++#if GC355_PROFILER
++ IN gcoVG Vg,
++ IN gctUINT TreeDepth,
++ IN gctUINT saveLayerTreeDepth,
++ IN gctUINT varTreeDepth,
++#endif
++ IN gcsPATH_DATA_PTR PathData
++ );
++
++/* Flush the pixel cache. */
++gceSTATUS
++gcoHAL_Flush(
++ IN gcoHAL Hal
++#if GC355_PROFILER
++ ,
++ IN gcoVG Vg,
++ IN gctUINT TreeDepth,
++ IN gctUINT saveLayerTreeDepth,
++ IN gctUINT varTreeDepth
++#endif
++ );
++
++/* Split a harwdare address into pool and offset. */
++gceSTATUS
++gcoHAL_SplitAddress(
++ IN gcoHAL Hal,
++#if GC355_PROFILER
++ IN gcoVG Vg,
++ IN gctUINT TreeDepth,
++ IN gctUINT saveLayerTreeDepth,
++ IN gctUINT varTreeDepth,
++#endif
++ IN gctUINT32 Address,
++ OUT gcePOOL * Pool,
++ OUT gctUINT32 * Offset
++ );
++
++/* Combine pool and offset into a harwdare address. */
++gceSTATUS
++gcoHAL_CombineAddress(
++ IN gcoHAL Hal,
++#if GC355_PROFILER
++ IN gcoVG Vg,
++ IN gctUINT TreeDepth,
++ IN gctUINT saveLayerTreeDepth,
++ IN gctUINT varTreeDepth,
++#endif
++ IN gcePOOL Pool,
++ IN gctUINT32 Offset,
++ OUT gctUINT32 * Address
++ );
++
++/* Schedule to free linear video memory allocated. */
++gceSTATUS
++gcoHAL_ScheduleVideoMemory(
++ IN gcoHAL Hal,
++#if GC355_PROFILER
++ IN gcoVG Vg,
++ IN gctUINT TreeDepth,
++ IN gctUINT saveLayerTreeDepth,
++ IN gctUINT varTreeDepth,
++#endif
++ IN gctUINT32 Node
++ );
++
++/* Free linear video memory allocated with gcoHAL_AllocateLinearVideoMemory. */
++gceSTATUS
++gcoHAL_FreeVideoMemory(
++ IN gcoHAL Hal,
++#if GC355_PROFILER
++ IN gcoVG Vg,
++ IN gctUINT TreeDepth,
++ IN gctUINT saveLayerTreeDepth,
++ IN gctUINT varTreeDepth,
++#endif
++ IN gctUINT32 Node
++ );
++
++/* Query command buffer attributes. */
++gceSTATUS
++gcoHAL_QueryCommandBuffer(
++ IN gcoHAL Hal,
++#if GC355_PROFILER
++ IN gcoVG Vg,
++ IN gctUINT TreeDepth,
++ IN gctUINT saveLayerTreeDepth,
++ IN gctUINT varTreeDepth,
++#endif
++ OUT gcsCOMMAND_BUFFER_INFO_PTR Information
++ );
++/* Allocate and lock linear video memory. */
++gceSTATUS
++gcoHAL_AllocateLinearVideoMemory(
++ IN gcoHAL Hal,
++#if GC355_PROFILER
++ IN gcoVG Vg,
++ IN gctUINT TreeDepth,
++ IN gctUINT saveLayerTreeDepth,
++ IN gctUINT varTreeDepth,
++#endif
++ IN gctUINT Size,
++ IN gctUINT Alignment,
++ IN gcePOOL Pool,
++ OUT gctUINT32 * Node,
++ OUT gctUINT32 * Address,
++ OUT gctPOINTER * Memory
++ );
++
++/* Align the specified size accordingly to the hardware requirements. */
++gceSTATUS
++gcoHAL_GetAlignedSurfaceSize(
++ IN gcoHAL Hal,
++#if GC355_PROFILER
++ IN gcoVG Vg,
++ IN gctUINT TreeDepth,
++ IN gctUINT saveLayerTreeDepth,
++ IN gctUINT varTreeDepth,
++#endif
++ IN gceSURF_TYPE Type,
++ IN OUT gctUINT32_PTR Width,
++ IN OUT gctUINT32_PTR Height
++ );
++
++gceSTATUS
++gcoHAL_ReserveTask(
++ IN gcoHAL Hal,
++#if GC355_PROFILER
++ IN gcoVG Vg,
++ IN gctUINT TreeDepth,
++ IN gctUINT saveLayerTreeDepth,
++ IN gctUINT varTreeDepth,
++#endif
++ IN gceBLOCK Block,
++ IN gctUINT TaskCount,
++ IN gctUINT32 Bytes,
++ OUT gctPOINTER * Memory
++ );
++/******************************************************************************\
++********************************** gcoVG Object ********************************
++\******************************************************************************/
++
++/** @defgroup gcoVG gcoVG
++**
++** The gcoVG object abstracts the VG hardware pipe.
++*/
++#if GC355_PROFILER
++void
++gcoVG_ProfilerEnableDisable(
++ IN gcoVG Vg,
++ IN gctUINT enableGetAPITimes,
++ IN gctFILE apiTimeFile
++ );
++
++void
++gcoVG_ProfilerTreeDepth(
++ IN gcoVG Vg,
++ IN gctUINT TreeDepth
++ );
++
++void
++gcoVG_ProfilerSetStates(
++ IN gcoVG Vg,
++ IN gctUINT treeDepth,
++ IN gctUINT saveLayerTreeDepth,
++ IN gctUINT varTreeDepth
++ );
++#endif
++
++gctBOOL
++gcoVG_IsMaskSupported(
++#if GC355_PROFILER
++ IN gcoVG Vg,
++ IN gctUINT TreeDepth,
++ IN gctUINT saveLayerTreeDepth,
++ IN gctUINT varTreeDepth,
++#endif
++ IN gceSURF_FORMAT Format
++ );
++
++gctBOOL
++gcoVG_IsTargetSupported(
++#if GC355_PROFILER
++ IN gcoVG Vg,
++ IN gctUINT TreeDepth,
++ IN gctUINT saveLayerTreeDepth,
++ IN gctUINT varTreeDepth,
++#endif
++ IN gceSURF_FORMAT Format
++ );
++
++gctBOOL
++gcoVG_IsImageSupported(
++#if GC355_PROFILER
++ IN gcoVG Vg,
++ IN gctUINT TreeDepth,
++ IN gctUINT saveLayerTreeDepth,
++ IN gctUINT varTreeDepth,
++#endif
++ IN gceSURF_FORMAT Format
++ );
++
++gctUINT8 gcoVG_PackColorComponent(
++#if GC355_PROFILER
++ gcoVG Vg,
++ gctUINT TreeDepth,
++ gctUINT saveLayerTreeDepth,
++ gctUINT varTreeDepth,
++#endif
++ gctFLOAT Value
++ );
++
++gceSTATUS
++gcoVG_Construct(
++ IN gcoHAL Hal,
++ OUT gcoVG * Vg
++ );
++
++gceSTATUS
++gcoVG_Destroy(
++ IN gcoVG Vg
++#if GC355_PROFILER
++ ,
++ IN gctUINT TreeDepth,
++ IN gctUINT saveLayerTreeDepth,
++ IN gctUINT varTreeDepth
++#endif
++ );
++
++gceSTATUS
++gcoVG_SetTarget(
++ IN gcoVG Vg,
++#if GC355_PROFILER
++ IN gctUINT TreeDepth,
++ IN gctUINT saveLayerTreeDepth,
++ IN gctUINT varTreeDepth,
++#endif
++ IN gcoSURF Target
++ );
++
++gceSTATUS
++gcoVG_UnsetTarget(
++ IN gcoVG Vg,
++#if GC355_PROFILER
++ IN gctUINT TreeDepth,
++ IN gctUINT saveLayerTreeDepth,
++ IN gctUINT varTreeDepth,
++#endif
++ IN gcoSURF Surface
++ );
++
++gceSTATUS
++gcoVG_SetUserToSurface(
++ IN gcoVG Vg,
++#if GC355_PROFILER
++ IN gctUINT TreeDepth,
++ IN gctUINT saveLayerTreeDepth,
++ IN gctUINT varTreeDepth,
++#endif
++ IN gctFLOAT UserToSurface[9]
++ );
++
++gceSTATUS
++gcoVG_SetSurfaceToImage(
++ IN gcoVG Vg,
++#if GC355_PROFILER
++ IN gctUINT TreeDepth,
++ IN gctUINT saveLayerTreeDepth,
++ IN gctUINT varTreeDepth,
++#endif
++ IN gctFLOAT SurfaceToImage[9]
++ );
++
++gceSTATUS
++gcoVG_EnableMask(
++ IN gcoVG Vg,
++#if GC355_PROFILER
++ IN gctUINT TreeDepth,
++ IN gctUINT saveLayerTreeDepth,
++ IN gctUINT varTreeDepth,
++#endif
++ IN gctBOOL Enable
++ );
++
++gceSTATUS
++gcoVG_SetMask(
++ IN gcoVG Vg,
++#if GC355_PROFILER
++ IN gctUINT TreeDepth,
++ IN gctUINT saveLayerTreeDepth,
++ IN gctUINT varTreeDepth,
++#endif
++ IN gcoSURF Mask
++ );
++
++gceSTATUS
++gcoVG_UnsetMask(
++ IN gcoVG Vg,
++#if GC355_PROFILER
++ IN gctUINT TreeDepth,
++ IN gctUINT saveLayerTreeDepth,
++ IN gctUINT varTreeDepth,
++#endif
++ IN gcoSURF Surface
++ );
++
++gceSTATUS
++gcoVG_FlushMask(
++ IN gcoVG Vg
++#if GC355_PROFILER
++ ,
++ IN gctUINT TreeDepth,
++ IN gctUINT saveLayerTreeDepth,
++ IN gctUINT varTreeDepth
++#endif
++ );
++
++gceSTATUS
++gcoVG_EnableScissor(
++ IN gcoVG Vg,
++#if GC355_PROFILER
++ IN gctUINT TreeDepth,
++ IN gctUINT saveLayerTreeDepth,
++ IN gctUINT varTreeDepth,
++#endif
++ IN gctBOOL Enable
++ );
++
++gceSTATUS
++gcoVG_SetScissor(
++ IN gcoVG Vg,
++#if GC355_PROFILER
++ IN gctUINT TreeDepth,
++ IN gctUINT saveLayerTreeDepth,
++ IN gctUINT varTreeDepth,
++#endif
++ IN gctSIZE_T RectangleCount,
++ IN gcsVG_RECT_PTR Rectangles
++ );
++
++gceSTATUS
++gcoVG_EnableColorTransform(
++ IN gcoVG Vg,
++#if GC355_PROFILER
++ IN gctUINT TreeDepth,
++ IN gctUINT saveLayerTreeDepth,
++ IN gctUINT varTreeDepth,
++#endif
++ IN gctBOOL Enable
++ );
++
++gceSTATUS
++gcoVG_SetColorTransform(
++ IN gcoVG Vg,
++#if GC355_PROFILER
++ IN gctUINT TreeDepth,
++ IN gctUINT saveLayerTreeDepth,
++ IN gctUINT varTreeDepth,
++#endif
++ IN gctFLOAT ColorTransform[8]
++ );
++
++gceSTATUS
++gcoVG_SetTileFillColor(
++ IN gcoVG Vg,
++#if GC355_PROFILER
++ IN gctUINT TreeDepth,
++ IN gctUINT saveLayerTreeDepth,
++ IN gctUINT varTreeDepth,
++#endif
++ IN gctFLOAT Red,
++ IN gctFLOAT Green,
++ IN gctFLOAT Blue,
++ IN gctFLOAT Alpha
++ );
++
++gceSTATUS
++gcoVG_SetSolidPaint(
++ IN gcoVG Vg,
++#if GC355_PROFILER
++ IN gctUINT TreeDepth,
++ IN gctUINT saveLayerTreeDepth,
++ IN gctUINT varTreeDepth,
++#endif
++ IN gctUINT8 Red,
++ IN gctUINT8 Green,
++ IN gctUINT8 Blue,
++ IN gctUINT8 Alpha
++ );
++
++gceSTATUS
++gcoVG_SetLinearPaint(
++ IN gcoVG Vg,
++#if GC355_PROFILER
++ IN gctUINT TreeDepth,
++ IN gctUINT saveLayerTreeDepth,
++ IN gctUINT varTreeDepth,
++#endif
++ IN gctFLOAT Constant,
++ IN gctFLOAT StepX,
++ IN gctFLOAT StepY
++ );
++
++gceSTATUS
++gcoVG_SetRadialPaint(
++ IN gcoVG Vg,
++#if GC355_PROFILER
++ IN gctUINT TreeDepth,
++ IN gctUINT saveLayerTreeDepth,
++ IN gctUINT varTreeDepth,
++#endif
++ IN gctFLOAT LinConstant,
++ IN gctFLOAT LinStepX,
++ IN gctFLOAT LinStepY,
++ IN gctFLOAT RadConstant,
++ IN gctFLOAT RadStepX,
++ IN gctFLOAT RadStepY,
++ IN gctFLOAT RadStepXX,
++ IN gctFLOAT RadStepYY,
++ IN gctFLOAT RadStepXY
++ );
++
++gceSTATUS
++gcoVG_SetPatternPaint(
++ IN gcoVG Vg,
++#if GC355_PROFILER
++ IN gctUINT TreeDepth,
++ IN gctUINT saveLayerTreeDepth,
++ IN gctUINT varTreeDepth,
++#endif
++ IN gctFLOAT UConstant,
++ IN gctFLOAT UStepX,
++ IN gctFLOAT UStepY,
++ IN gctFLOAT VConstant,
++ IN gctFLOAT VStepX,
++ IN gctFLOAT VStepY,
++ IN gctBOOL Linear
++ );
++
++gceSTATUS
++gcoVG_SetColorRamp(
++ IN gcoVG Vg,
++#if GC355_PROFILER
++ IN gctUINT TreeDepth,
++ IN gctUINT saveLayerTreeDepth,
++ IN gctUINT varTreeDepth,
++#endif
++ IN gcoSURF ColorRamp,
++ IN gceTILE_MODE ColorRampSpreadMode
++ );
++
++gceSTATUS
++gcoVG_SetPattern(
++ IN gcoVG Vg,
++#if GC355_PROFILER
++ IN gctUINT TreeDepth,
++ IN gctUINT saveLayerTreeDepth,
++ IN gctUINT varTreeDepth,
++#endif
++ IN gctINT32 width,
++ IN gctINT32 height,
++ IN gcoSURF Pattern,
++ IN gceTILE_MODE TileMode,
++ IN gceIMAGE_FILTER Filter
++ );
++
++gceSTATUS
++gcoVG_SetImageMode(
++ IN gcoVG Vg,
++#if GC355_PROFILER
++ IN gctUINT TreeDepth,
++ IN gctUINT saveLayerTreeDepth,
++ IN gctUINT varTreeDepth,
++#endif
++ IN gceVG_IMAGE Mode
++ );
++
++gceSTATUS
++gcoVG_SetBlendMode(
++ IN gcoVG Vg,
++#if GC355_PROFILER
++ IN gctUINT TreeDepth,
++ IN gctUINT saveLayerTreeDepth,
++ IN gctUINT varTreeDepth,
++#endif
++ IN gceVG_BLEND Mode
++ );
++
++gceSTATUS
++gcoVG_SetRenderingQuality(
++ IN gcoVG Vg,
++#if GC355_PROFILER
++ IN gctUINT TreeDepth,
++ IN gctUINT saveLayerTreeDepth,
++ IN gctUINT varTreeDepth,
++#endif
++ IN gceRENDER_QUALITY Quality
++ );
++
++gceSTATUS
++gcoVG_SetFillRule(
++ IN gcoVG Vg,
++#if GC355_PROFILER
++ IN gctUINT TreeDepth,
++ IN gctUINT saveLayerTreeDepth,
++ IN gctUINT varTreeDepth,
++#endif
++ IN gceFILL_RULE FillRule
++ );
++
++gceSTATUS
++gcoVG_FinalizePath(
++ IN gcoVG Vg,
++ IN gcsPATH_DATA_PTR PathData
++ );
++
++gceSTATUS
++gcoVG_Clear(
++ IN gcoVG Vg,
++#if GC355_PROFILER
++ IN gctUINT TreeDepth,
++ IN gctUINT saveLayerTreeDepth,
++ IN gctUINT varTreeDepth,
++#endif
++ IN gctINT X,
++ IN gctINT Y,
++ IN gctINT Width,
++ IN gctINT Height
++ );
++
++gceSTATUS
++gcoVG_DrawPath(
++ IN gcoVG Vg,
++#if GC355_PROFILER
++ IN gctUINT TreeDepth,
++ IN gctUINT saveLayerTreeDepth,
++ IN gctUINT varTreeDepth,
++#endif
++ IN gcsPATH_DATA_PTR PathData,
++ IN gctFLOAT Scale,
++ IN gctFLOAT Bias,
++#if gcdMOVG
++ IN gctUINT32 Width,
++ IN gctUINT32 Height,
++ IN gctFLOAT *Bounds,
++#endif
++ IN gctBOOL SoftwareTesselation
++ );
++
++gceSTATUS
++gcoVG_DrawImage(
++ IN gcoVG Vg,
++#if GC355_PROFILER
++ IN gctUINT TreeDepth,
++ IN gctUINT saveLayerTreeDepth,
++ IN gctUINT varTreeDepth,
++#endif
++ IN gcoSURF Source,
++ IN gcsPOINT_PTR SourceOrigin,
++ IN gcsPOINT_PTR TargetOrigin,
++ IN gcsSIZE_PTR SourceSize,
++ IN gctINT SourceX,
++ IN gctINT SourceY,
++ IN gctINT TargetX,
++ IN gctINT TargetY,
++ IN gctINT Width,
++ IN gctINT Height,
++ IN gctBOOL Mask,
++ IN gctBOOL isDrawImage
++ );
++
++gceSTATUS
++gcoVG_TesselateImage(
++ IN gcoVG Vg,
++#if GC355_PROFILER
++ IN gctUINT TreeDepth,
++ IN gctUINT saveLayerTreeDepth,
++ IN gctUINT varTreeDepth,
++#endif
++ IN gcoSURF Image,
++ IN gcsVG_RECT_PTR Rectangle,
++ IN gceIMAGE_FILTER Filter,
++ IN gctBOOL Mask,
++#if gcdMOVG
++ IN gctBOOL SoftwareTesselation,
++ IN gceVG_BLEND BlendMode
++#else
++ IN gctBOOL SoftwareTesselation
++#endif
++ );
++
++gceSTATUS
++gcoVG_Blit(
++ IN gcoVG Vg,
++#if GC355_PROFILER
++ IN gctUINT TreeDepth,
++ IN gctUINT saveLayerTreeDepth,
++ IN gctUINT varTreeDepth,
++#endif
++ IN gcoSURF Source,
++ IN gcoSURF Target,
++ IN gcsVG_RECT_PTR SrcRect,
++ IN gcsVG_RECT_PTR TrgRect,
++ IN gceIMAGE_FILTER Filter,
++ IN gceVG_BLEND Mode
++ );
++
++gceSTATUS
++gcoVG_ColorMatrix(
++ IN gcoVG Vg,
++#if GC355_PROFILER
++ IN gctUINT TreeDepth,
++ IN gctUINT saveLayerTreeDepth,
++ IN gctUINT varTreeDepth,
++#endif
++ IN gcoSURF Source,
++ IN gcoSURF Target,
++ IN const gctFLOAT * Matrix,
++ IN gceCHANNEL ColorChannels,
++ IN gctBOOL FilterLinear,
++ IN gctBOOL FilterPremultiplied,
++ IN gcsPOINT_PTR SourceOrigin,
++ IN gcsPOINT_PTR TargetOrigin,
++ IN gctINT Width,
++ IN gctINT Height
++ );
++
++gceSTATUS
++gcoVG_SeparableConvolve(
++ IN gcoVG Vg,
++#if GC355_PROFILER
++ IN gctUINT TreeDepth,
++ IN gctUINT saveLayerTreeDepth,
++ IN gctUINT varTreeDepth,
++#endif
++ IN gcoSURF Source,
++ IN gcoSURF Target,
++ IN gctINT KernelWidth,
++ IN gctINT KernelHeight,
++ IN gctINT ShiftX,
++ IN gctINT ShiftY,
++ IN const gctINT16 * KernelX,
++ IN const gctINT16 * KernelY,
++ IN gctFLOAT Scale,
++ IN gctFLOAT Bias,
++ IN gceTILE_MODE TilingMode,
++ IN gctFLOAT_PTR FillColor,
++ IN gceCHANNEL ColorChannels,
++ IN gctBOOL FilterLinear,
++ IN gctBOOL FilterPremultiplied,
++ IN gcsPOINT_PTR SourceOrigin,
++ IN gcsPOINT_PTR TargetOrigin,
++ IN gcsSIZE_PTR SourceSize,
++ IN gctINT Width,
++ IN gctINT Height
++ );
++
++gceSTATUS
++gcoVG_GaussianBlur(
++ IN gcoVG Vg,
++#if GC355_PROFILER
++ IN gctUINT TreeDepth,
++ IN gctUINT saveLayerTreeDepth,
++ IN gctUINT varTreeDepth,
++#endif
++ IN gcoSURF Source,
++ IN gcoSURF Target,
++ IN gctFLOAT StdDeviationX,
++ IN gctFLOAT StdDeviationY,
++ IN gceTILE_MODE TilingMode,
++ IN gctFLOAT_PTR FillColor,
++ IN gceCHANNEL ColorChannels,
++ IN gctBOOL FilterLinear,
++ IN gctBOOL FilterPremultiplied,
++ IN gcsPOINT_PTR SourceOrigin,
++ IN gcsPOINT_PTR TargetOrigin,
++ IN gcsSIZE_PTR SourceSize,
++ IN gctINT Width,
++ IN gctINT Height
++ );
++
++gceSTATUS
++gcoVG_EnableDither(
++ IN gcoVG Vg,
++#if GC355_PROFILER
++ IN gctUINT TreeDepth,
++ IN gctUINT saveLayerTreeDepth,
++ IN gctUINT varTreeDepth,
++#endif
++ IN gctBOOL Enable
++ );
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif /* __gc_hal_vg_h_ */
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_enum.h linux-openelec/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_enum.h
+--- linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_enum.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_enum.h 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,1608 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_enum_h_
++#define __gc_hal_enum_h_
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/* Chip models. */
++typedef enum _gceCHIPMODEL
++{
++ gcv200 = 0x0200,
++ gcv300 = 0x0300,
++ gcv320 = 0x0320,
++ gcv328 = 0x0328,
++ gcv350 = 0x0350,
++ gcv355 = 0x0355,
++ gcv400 = 0x0400,
++ gcv410 = 0x0410,
++ gcv420 = 0x0420,
++ gcv428 = 0x0428,
++ gcv450 = 0x0450,
++ gcv500 = 0x0500,
++ gcv520 = 0x0520,
++ gcv530 = 0x0530,
++ gcv600 = 0x0600,
++ gcv700 = 0x0700,
++ gcv800 = 0x0800,
++ gcv860 = 0x0860,
++ gcv880 = 0x0880,
++ gcv1000 = 0x1000,
++ gcv1500 = 0x1500,
++ gcv2000 = 0x2000,
++ gcv2100 = 0x2100,
++ gcv2200 = 0x2200,
++ gcv2500 = 0x2500,
++ gcv3000 = 0x3000,
++ gcv4000 = 0x4000,
++ gcv5000 = 0x5000,
++ gcv5200 = 0x5200,
++ gcv6400 = 0x6400,
++}
++gceCHIPMODEL;
++
++/* Chip features. */
++typedef enum _gceFEATURE
++{
++ gcvFEATURE_PIPE_2D = 0,
++ gcvFEATURE_PIPE_3D,
++ gcvFEATURE_PIPE_VG,
++ gcvFEATURE_DC,
++ gcvFEATURE_HIGH_DYNAMIC_RANGE,
++ gcvFEATURE_MODULE_CG,
++ gcvFEATURE_MIN_AREA,
++ gcvFEATURE_BUFFER_INTERLEAVING,
++ gcvFEATURE_BYTE_WRITE_2D,
++ gcvFEATURE_ENDIANNESS_CONFIG,
++ gcvFEATURE_DUAL_RETURN_BUS,
++ gcvFEATURE_DEBUG_MODE,
++ gcvFEATURE_YUY2_RENDER_TARGET,
++ gcvFEATURE_FRAGMENT_PROCESSOR,
++ gcvFEATURE_2DPE20,
++ gcvFEATURE_FAST_CLEAR,
++ gcvFEATURE_YUV420_TILER,
++ gcvFEATURE_YUY2_AVERAGING,
++ gcvFEATURE_FLIP_Y,
++ gcvFEATURE_EARLY_Z,
++ gcvFEATURE_COMPRESSION,
++ gcvFEATURE_MSAA,
++ gcvFEATURE_SPECIAL_ANTI_ALIASING,
++ gcvFEATURE_SPECIAL_MSAA_LOD,
++ gcvFEATURE_422_TEXTURE_COMPRESSION,
++ gcvFEATURE_DXT_TEXTURE_COMPRESSION,
++ gcvFEATURE_ETC1_TEXTURE_COMPRESSION,
++ gcvFEATURE_CORRECT_TEXTURE_CONVERTER,
++ gcvFEATURE_TEXTURE_8K,
++ gcvFEATURE_SCALER,
++ gcvFEATURE_YUV420_SCALER,
++ gcvFEATURE_SHADER_HAS_W,
++ gcvFEATURE_SHADER_HAS_SIGN,
++ gcvFEATURE_SHADER_HAS_FLOOR,
++ gcvFEATURE_SHADER_HAS_CEIL,
++ gcvFEATURE_SHADER_HAS_SQRT,
++ gcvFEATURE_SHADER_HAS_TRIG,
++ gcvFEATURE_VAA,
++ gcvFEATURE_HZ,
++ gcvFEATURE_CORRECT_STENCIL,
++ gcvFEATURE_VG20,
++ gcvFEATURE_VG_FILTER,
++ gcvFEATURE_VG21,
++ gcvFEATURE_VG_DOUBLE_BUFFER,
++ gcvFEATURE_MC20,
++ gcvFEATURE_SUPER_TILED,
++ gcvFEATURE_FAST_CLEAR_FLUSH,
++ gcvFEATURE_2D_FILTERBLIT_PLUS_ALPHABLEND,
++ gcvFEATURE_2D_DITHER,
++ gcvFEATURE_2D_A8_TARGET,
++ gcvFEATURE_2D_A8_NO_ALPHA,
++ gcvFEATURE_2D_FILTERBLIT_FULLROTATION,
++ gcvFEATURE_2D_BITBLIT_FULLROTATION,
++ gcvFEATURE_WIDE_LINE,
++ gcvFEATURE_FC_FLUSH_STALL,
++ gcvFEATURE_FULL_DIRECTFB,
++ gcvFEATURE_HALF_FLOAT_PIPE,
++ gcvFEATURE_LINE_LOOP,
++ gcvFEATURE_2D_YUV_BLIT,
++ gcvFEATURE_2D_TILING,
++ gcvFEATURE_NON_POWER_OF_TWO,
++ gcvFEATURE_3D_TEXTURE,
++ gcvFEATURE_TEXTURE_ARRAY,
++ gcvFEATURE_TILE_FILLER,
++ gcvFEATURE_LOGIC_OP,
++ gcvFEATURE_COMPOSITION,
++ gcvFEATURE_MIXED_STREAMS,
++ gcvFEATURE_2D_MULTI_SOURCE_BLT,
++ gcvFEATURE_END_EVENT,
++ gcvFEATURE_VERTEX_10_10_10_2,
++ gcvFEATURE_TEXTURE_10_10_10_2,
++ gcvFEATURE_TEXTURE_ANISOTROPIC_FILTERING,
++ gcvFEATURE_TEXTURE_FLOAT_HALF_FLOAT,
++ gcvFEATURE_2D_ROTATION_STALL_FIX,
++ gcvFEATURE_2D_MULTI_SOURCE_BLT_EX,
++ gcvFEATURE_BUG_FIXES10,
++ gcvFEATURE_2D_MINOR_TILING,
++ /* Supertiled compressed textures are supported. */
++ gcvFEATURE_TEX_COMPRRESSION_SUPERTILED,
++ gcvFEATURE_FAST_MSAA,
++ gcvFEATURE_BUG_FIXED_INDEXED_TRIANGLE_STRIP,
++ gcvFEATURE_TEXTURE_TILE_STATUS_READ,
++ gcvFEATURE_DEPTH_BIAS_FIX,
++ gcvFEATURE_RECT_PRIMITIVE,
++ gcvFEATURE_BUG_FIXES11,
++ gcvFEATURE_SUPERTILED_TEXTURE,
++ gcvFEATURE_2D_NO_COLORBRUSH_INDEX8,
++ gcvFEATURE_RS_YUV_TARGET,
++ gcvFEATURE_2D_FC_SOURCE,
++ gcvFEATURE_2D_CC_NOAA_SOURCE,
++ gcvFEATURE_PE_DITHER_FIX,
++ gcvFEATURE_2D_YUV_SEPARATE_STRIDE,
++ gcvFEATURE_FRUSTUM_CLIP_FIX,
++ gcvFEATURE_TEXTURE_SWIZZLE,
++ gcvFEATURE_PRIMITIVE_RESTART,
++ gcvFEATURE_TEXTURE_LINEAR,
++ gcvFEATURE_TEXTURE_YUV_ASSEMBLER,
++ gcvFEATURE_LINEAR_RENDER_TARGET,
++ gcvFEATURE_SHADER_HAS_ATOMIC,
++ gcvFEATURE_SHADER_HAS_INSTRUCTION_CACHE,
++ gcvFEATURE_SHADER_ENHANCEMENTS2,
++ gcvFEATURE_BUG_FIXES7,
++ gcvFEATURE_SHADER_HAS_RTNE,
++ gcvFEATURE_SHADER_HAS_EXTRA_INSTRUCTIONS2,
++ gcvFEATURE_SHADER_ENHANCEMENTS3,
++ gcvFEATURE_DYNAMIC_FREQUENCY_SCALING,
++ gcvFEATURE_SINGLE_BUFFER,
++ gcvFEATURE_OCCLUSION_QUERY,
++ gcvFEATURE_2D_GAMMA,
++ gcvFEATURE_2D_COLOR_SPACE_CONVERSION,
++ gcvFEATURE_2D_SUPER_TILE_VERSION,
++ gcvFEATURE_HALTI0,
++ gcvFEATURE_HALTI1,
++ gcvFEATURE_HALTI2,
++ gcvFEATURE_2D_MIRROR_EXTENSION,
++ gcvFEATURE_TEXTURE_ASTC,
++ gcvFEATURE_2D_SUPER_TILE_V1,
++ gcvFEATURE_2D_SUPER_TILE_V2,
++ gcvFEATURE_2D_SUPER_TILE_V3,
++ gcvFEATURE_2D_MULTI_SOURCE_BLT_EX2,
++ gcvFEATURE_NEW_RA,
++ gcvFEATURE_BUG_FIXED_IMPLICIT_PRIMITIVE_RESTART,
++ gcvFEATURE_PE_MULTI_RT_BLEND_ENABLE_CONTROL,
++ gcvFEATURE_SMALL_MSAA, /* An upgraded version of Fast MSAA */
++ gcvFEATURE_VERTEX_INST_ID_AS_ATTRIBUTE,
++ gcvFEATURE_DUAL_16,
++ gcvFEATURE_BRANCH_ON_IMMEDIATE_REG,
++ gcvFEATURE_2D_COMPRESSION,
++ gcvFEATURE_TPC_COMPRESSION,
++ gcvFEATURE_2D_OPF_YUV_OUTPUT,
++ gcvFEATURE_2D_FILTERBLIT_A8_ALPHA,
++ gcvFEATURE_2D_MULTI_SRC_BLT_TO_UNIFIED_DST_RECT,
++ gcvFEATURE_V2_COMPRESSION_Z16_FIX,
++
++ gcvFEATURE_VERTEX_INST_ID_AS_INTEGER,
++ gcvFEATURE_2D_YUV_MODE,
++ gcvFEATURE_ACE,
++ gcvFEATURE_COLOR_COMPRESSION,
++
++ gcvFEATURE_32BPP_COMPONENT_TEXTURE_CHANNEL_SWIZZLE,
++ gcvFEATURE_64BPP_HW_CLEAR_SUPPORT,
++ gcvFEATURE_TX_LERP_PRECISION_FIX,
++ gcvFEATURE_COMPRESSION_V2,
++ gcvFEATURE_MMU,
++ gcvFEATURE_COMPRESSION_V3,
++ gcvFEATURE_TX_DECOMPRESSOR,
++ gcvFEATURE_MRT_TILE_STATUS_BUFFER,
++ gcvFEATURE_COMPRESSION_V1,
++ gcvFEATURE_V1_COMPRESSION_Z16_DECOMPRESS_FIX,
++ gcvFEATURE_RTT,
++ gcvFEATURE_GENERICS,
++ gcvFEATURE_2D_ONE_PASS_FILTER,
++ gcvFEATURE_2D_ONE_PASS_FILTER_TAP,
++ gcvFEATURE_2D_POST_FLIP,
++ gcvFEATURE_2D_PIXEL_ALIGNMENT,
++ gcvFEATURE_CORRECT_AUTO_DISABLE_COUNT,
++ gcvFEATURE_CORRECT_AUTO_DISABLE_COUNT_WIDTH,
++
++ gcvFEATURE_HALTI3,
++ gcvFEATURE_EEZ,
++ gcvFEATURE_INTEGER_PIPE_FIX,
++ gcvFEATURE_PSOUTPUT_MAPPING,
++ gcvFEATURE_8K_RT_FIX,
++ gcvFEATURE_TX_TILE_STATUS_MAPPING,
++ gcvFEATURE_SRGB_RT_SUPPORT,
++ gcvFEATURE_UNIFORM_APERTURE,
++ gcvFEATURE_TEXTURE_16K,
++ gcvFEATURE_PA_FARZCLIPPING_FIX,
++ gcvFEATURE_PE_DITHER_COLORMASK_FIX,
++ gcvFEATURE_ZSCALE_FIX,
++
++ gcvFEATURE_MULTI_PIXELPIPES,
++ gcvFEATURE_PIPE_CL,
++
++ gcvFEATURE_BUG_FIXES18,
++
++ gcvFEATURE_UNIFIED_SAMPLERS,
++ gcvFEATURE_CL_PS_WALKER,
++ gcvFEATURE_NEW_HZ,
++
++ gcvFEATURE_TX_FRAC_PRECISION_6BIT,
++ gcvFEATURE_SH_INSTRUCTION_PREFETCH,
++ gcvFEATURE_PROBE,
++
++ gcvFEATURE_BUG_FIXES8,
++ gcvFEATURE_2D_ALL_QUAD,
++
++ gcvFEATURE_SINGLE_PIPE_HALTI1,
++
++ gcvFEATURE_BLOCK_SIZE_16x16,
++
++ gcvFEATURE_NO_USER_CSC,
++ gcvFEATURE_ANDROID_ONLY,
++ gcvFEATURE_HAS_PRODUCTID,
++
++ gcvFEATURE_V2_MSAA_COMP_FIX,
++
++ gcvFEATURE_S8_ONLY_RENDERING,
++
++ gcvFEATURE_SEPARATE_SRC_DST,
++
++ gcvFEATURE_FE_START_VERTEX_SUPPORT,
++ gcvFEATURE_RS_DEPTHSTENCIL_NATIVE_SUPPORT,
++
++ /* Insert features above this comment only. */
++ gcvFEATURE_COUNT /* Not a feature. */
++}
++gceFEATURE;
++
++/* Chip SWWA. */
++typedef enum _gceSWWA
++{
++ gcvSWWA_601 = 0,
++ gcvSWWA_706,
++ gcvSWWA_1163,
++ gcvSWWA_1165,
++ /* Insert SWWA above this comment only. */
++ gcvSWWA_COUNT /* Not a SWWA. */
++}
++gceSWWA;
++
++
++/* Option Set*/
++typedef enum _gceOPITON
++{
++ /* HW setting we take PREFER */
++ gcvOPTION_PREFER_MULTIPIPE_RS = 0,
++ gcvOPTION_PREFER_ZCONVERT_BYPASS =1,
++
++
++ gcvOPTION_HW_NULL = 50,
++ gcvOPTION_PRINT_OPTION = 51,
++
++ gcvOPTION_FBO_PREFER_MEM = 80,
++
++ /* Insert option above this comment only */
++ gcvOPTION_COUNT /* Not a OPTION*/
++}
++gceOPTION;
++
++typedef enum _gceFRAMEINFO
++{
++ gcvFRAMEINFO_FRAME_NUM = 0,
++ gcvFRAMEINFO_DRAW_NUM = 1,
++ gcvFRAMEINFO_DRAW_DUAL16_NUM = 2,
++ gcvFRAMEINFO_DRAW_FL32_NUM = 3,
++
++
++ gcvFRAMEINFO_COUNT,
++}
++gceFRAMEINFO;
++
++typedef enum _gceFRAMEINFO_OP
++{
++ gcvFRAMEINFO_OP_INC = 0,
++ gcvFRAMEINFO_OP_DEC = 1,
++ gcvFRAMEINFO_OP_ZERO = 2,
++ gcvFRAMEINFO_OP_GET = 3,
++
++
++ gcvFRAMEINFO_OP_COUNT,
++}
++gceFRAMEINFO_OP;
++
++
++/* Chip Power Status. */
++typedef enum _gceCHIPPOWERSTATE
++{
++ gcvPOWER_ON = 0,
++ gcvPOWER_OFF,
++ gcvPOWER_IDLE,
++ gcvPOWER_SUSPEND,
++ gcvPOWER_SUSPEND_ATPOWERON,
++ gcvPOWER_OFF_ATPOWERON,
++ gcvPOWER_IDLE_BROADCAST,
++ gcvPOWER_SUSPEND_BROADCAST,
++ gcvPOWER_OFF_BROADCAST,
++ gcvPOWER_OFF_RECOVERY,
++ gcvPOWER_OFF_TIMEOUT,
++ gcvPOWER_ON_AUTO
++}
++gceCHIPPOWERSTATE;
++
++/* CPU cache operations */
++typedef enum _gceCACHEOPERATION
++{
++ gcvCACHE_CLEAN = 0x01,
++ gcvCACHE_INVALIDATE = 0x02,
++ gcvCACHE_FLUSH = gcvCACHE_CLEAN | gcvCACHE_INVALIDATE,
++ gcvCACHE_MEMORY_BARRIER = 0x04
++}
++gceCACHEOPERATION;
++
++/* Surface types. */
++typedef enum _gceSURF_TYPE
++{
++ gcvSURF_TYPE_UNKNOWN = 0,
++ gcvSURF_INDEX,
++ gcvSURF_VERTEX,
++ gcvSURF_TEXTURE,
++ gcvSURF_RENDER_TARGET,
++ gcvSURF_DEPTH,
++ gcvSURF_BITMAP,
++ gcvSURF_TILE_STATUS,
++ gcvSURF_IMAGE,
++ gcvSURF_MASK,
++ gcvSURF_SCISSOR,
++ gcvSURF_HIERARCHICAL_DEPTH,
++ gcvSURF_NUM_TYPES, /* Make sure this is the last one! */
++
++ /* Combinations. */
++ gcvSURF_NO_TILE_STATUS = 0x100,
++ gcvSURF_NO_VIDMEM = 0x200, /* Used to allocate surfaces with no underlying vidmem node.
++ In Android, vidmem node is allocated by another process. */
++ gcvSURF_CACHEABLE = 0x400, /* Used to allocate a cacheable surface */
++
++ gcvSURF_FLIP = 0x800, /* The Resolve Target the will been flip resolve from RT */
++
++ gcvSURF_TILE_STATUS_DIRTY = 0x1000, /* Init tile status to all dirty */
++
++ gcvSURF_LINEAR = 0x2000,
++
++ gcvSURF_CREATE_AS_TEXTURE = 0x4000, /* create it as a texture */
++
++ gcvSURF_PROTECTED_CONTENT = 0x8000, /* create it as content protected */
++
++ /* Create it as no compression, valid on when it has tile status. */
++ gcvSURF_NO_COMPRESSION = 0x40000,
++
++ gcvSURF_CONTIGUOUS = 0x20000, /*create it as contiguous */
++
++ gcvSURF_TEXTURE_LINEAR = gcvSURF_TEXTURE
++ | gcvSURF_LINEAR,
++
++ gcvSURF_RENDER_TARGET_LINEAR = gcvSURF_RENDER_TARGET
++ | gcvSURF_LINEAR,
++
++ gcvSURF_RENDER_TARGET_NO_TILE_STATUS = gcvSURF_RENDER_TARGET
++ | gcvSURF_NO_TILE_STATUS,
++
++ gcvSURF_RENDER_TARGET_TS_DIRTY = gcvSURF_RENDER_TARGET
++ | gcvSURF_TILE_STATUS_DIRTY,
++
++ gcvSURF_DEPTH_NO_TILE_STATUS = gcvSURF_DEPTH
++ | gcvSURF_NO_TILE_STATUS,
++
++ gcvSURF_DEPTH_TS_DIRTY = gcvSURF_DEPTH
++ | gcvSURF_TILE_STATUS_DIRTY,
++
++ /* Supported surface types with no vidmem node. */
++ gcvSURF_BITMAP_NO_VIDMEM = gcvSURF_BITMAP
++ | gcvSURF_NO_VIDMEM,
++
++ gcvSURF_TEXTURE_NO_VIDMEM = gcvSURF_TEXTURE
++ | gcvSURF_NO_VIDMEM,
++
++ /* Cacheable surface types with no vidmem node. */
++ gcvSURF_CACHEABLE_BITMAP_NO_VIDMEM = gcvSURF_BITMAP_NO_VIDMEM
++ | gcvSURF_CACHEABLE,
++
++ gcvSURF_CACHEABLE_BITMAP = gcvSURF_BITMAP
++ | gcvSURF_CACHEABLE,
++
++ gcvSURF_FLIP_BITMAP = gcvSURF_BITMAP
++ | gcvSURF_FLIP,
++}
++gceSURF_TYPE;
++
++typedef enum _gceSURF_USAGE
++{
++ gcvSURF_USAGE_UNKNOWN,
++ gcvSURF_USAGE_RESOLVE_AFTER_CPU,
++ gcvSURF_USAGE_RESOLVE_AFTER_3D
++}
++gceSURF_USAGE;
++
++typedef enum _gceSURF_COLOR_SPACE
++{
++ gcvSURF_COLOR_SPACE_UNKNOWN,
++ gcvSURF_COLOR_SPACE_LINEAR,
++ gcvSURF_COLOR_SPACE_NONLINEAR,
++}
++gceSURF_COLOR_SPACE;
++
++typedef enum _gceSURF_COLOR_TYPE
++{
++ gcvSURF_COLOR_UNKNOWN = 0,
++ gcvSURF_COLOR_LINEAR = 0x01,
++ gcvSURF_COLOR_ALPHA_PRE = 0x02,
++}
++gceSURF_COLOR_TYPE;
++
++/* Rotation. */
++typedef enum _gceSURF_ROTATION
++{
++ gcvSURF_0_DEGREE = 0,
++ gcvSURF_90_DEGREE,
++ gcvSURF_180_DEGREE,
++ gcvSURF_270_DEGREE,
++ gcvSURF_FLIP_X,
++ gcvSURF_FLIP_Y,
++
++ gcvSURF_POST_FLIP_X = 0x40000000,
++ gcvSURF_POST_FLIP_Y = 0x80000000,
++}
++gceSURF_ROTATION;
++
++/* Surface flag */
++typedef enum _gceSURF_FLAG
++{
++ /* None flag */
++ gcvSURF_FLAG_NONE = 0x0,
++ /* content is preserved after swap */
++ gcvSURF_FLAG_CONTENT_PRESERVED = 0x1,
++ /* content is updated after swap*/
++ gcvSURF_FLAG_CONTENT_UPDATED = 0x2,
++ /* content is y inverted */
++ gcvSURF_FLAG_CONTENT_YINVERTED = 0x4,
++ /* content is protected */
++ gcvSURF_FLAG_CONTENT_PROTECTED = 0x8,
++ /* surface is contiguous. */
++ gcvSURF_FLAG_CONTIGUOUS = (1 << 4),
++}
++gceSURF_FLAG;
++
++typedef enum _gceMIPMAP_IMAGE_FORMAT
++{
++ gcvUNKNOWN_MIPMAP_IMAGE_FORMAT = -2
++}
++gceMIPMAP_IMAGE_FORMAT;
++
++/* Surface formats. */
++typedef enum _gceSURF_FORMAT
++{
++ /* Unknown format. */
++ gcvSURF_UNKNOWN = 0,
++
++ /* Palettized formats. */
++ gcvSURF_INDEX1 = 100,
++ gcvSURF_INDEX4,
++ gcvSURF_INDEX8,
++
++ /* RGB formats. */
++ gcvSURF_A2R2G2B2 = 200,
++ gcvSURF_R3G3B2,
++ gcvSURF_A8R3G3B2,
++ gcvSURF_X4R4G4B4,
++ gcvSURF_A4R4G4B4,
++ gcvSURF_R4G4B4A4,
++ gcvSURF_X1R5G5B5,
++ gcvSURF_A1R5G5B5,
++ gcvSURF_R5G5B5A1,
++ gcvSURF_R5G6B5,
++ gcvSURF_R8G8B8,
++ gcvSURF_X8R8G8B8,
++ gcvSURF_A8R8G8B8,
++ gcvSURF_R8G8B8A8,
++ gcvSURF_G8R8G8B8,
++ gcvSURF_R8G8B8G8,
++ gcvSURF_X2R10G10B10,
++ gcvSURF_A2R10G10B10,
++ gcvSURF_X12R12G12B12,
++ gcvSURF_A12R12G12B12,
++ gcvSURF_X16R16G16B16,
++ gcvSURF_A16R16G16B16,
++ gcvSURF_A32R32G32B32,
++ gcvSURF_R8G8B8X8,
++ gcvSURF_R5G5B5X1,
++ gcvSURF_R4G4B4X4,
++ gcvSURF_X16R16G16B16_2_A8R8G8B8,
++ gcvSURF_A16R16G16B16_2_A8R8G8B8,
++ gcvSURF_A32R32G32B32_2_G32R32F,
++ gcvSURF_A32R32G32B32_4_A8R8G8B8,
++
++ /* BGR formats. */
++ gcvSURF_A4B4G4R4 = 300,
++ gcvSURF_A1B5G5R5,
++ gcvSURF_B5G6R5,
++ gcvSURF_B8G8R8,
++ gcvSURF_B16G16R16,
++ gcvSURF_X8B8G8R8,
++ gcvSURF_A8B8G8R8,
++ gcvSURF_A2B10G10R10,
++ gcvSURF_X16B16G16R16,
++ gcvSURF_A16B16G16R16,
++ gcvSURF_B32G32R32,
++ gcvSURF_X32B32G32R32,
++ gcvSURF_A32B32G32R32,
++ gcvSURF_B4G4R4A4,
++ gcvSURF_B5G5R5A1,
++ gcvSURF_B8G8R8X8,
++ gcvSURF_B8G8R8A8,
++ gcvSURF_X4B4G4R4,
++ gcvSURF_X1B5G5R5,
++ gcvSURF_B4G4R4X4,
++ gcvSURF_B5G5R5X1,
++ gcvSURF_X2B10G10R10,
++ gcvSURF_B8G8R8_SNORM,
++ gcvSURF_X8B8G8R8_SNORM,
++ gcvSURF_A8B8G8R8_SNORM,
++ gcvSURF_A8B12G12R12_2_A8R8G8B8,
++
++ /* Compressed formats. */
++ gcvSURF_DXT1 = 400,
++ gcvSURF_DXT2,
++ gcvSURF_DXT3,
++ gcvSURF_DXT4,
++ gcvSURF_DXT5,
++ gcvSURF_CXV8U8,
++ gcvSURF_ETC1,
++ gcvSURF_R11_EAC,
++ gcvSURF_SIGNED_R11_EAC,
++ gcvSURF_RG11_EAC,
++ gcvSURF_SIGNED_RG11_EAC,
++ gcvSURF_RGB8_ETC2,
++ gcvSURF_SRGB8_ETC2,
++ gcvSURF_RGB8_PUNCHTHROUGH_ALPHA1_ETC2,
++ gcvSURF_SRGB8_PUNCHTHROUGH_ALPHA1_ETC2,
++ gcvSURF_RGBA8_ETC2_EAC,
++ gcvSURF_SRGB8_ALPHA8_ETC2_EAC,
++
++ /* YUV formats. */
++ gcvSURF_YUY2 = 500,
++ gcvSURF_UYVY,
++ gcvSURF_YV12,
++ gcvSURF_I420,
++ gcvSURF_NV12,
++ gcvSURF_NV21,
++ gcvSURF_NV16,
++ gcvSURF_NV61,
++ gcvSURF_YVYU,
++ gcvSURF_VYUY,
++
++ /* Depth formats. */
++ gcvSURF_D16 = 600,
++ gcvSURF_D24S8,
++ gcvSURF_D32,
++ gcvSURF_D24X8,
++ gcvSURF_D32F,
++ gcvSURF_S8D32F,
++ gcvSURF_S8D32F_1_G32R32F,
++ gcvSURF_S8D32F_2_A8R8G8B8,
++ gcvSURF_D24S8_1_A8R8G8B8,
++ gcvSURF_S8,
++
++ /* Alpha formats. */
++ gcvSURF_A4 = 700,
++ gcvSURF_A8,
++ gcvSURF_A12,
++ gcvSURF_A16,
++ gcvSURF_A32,
++ gcvSURF_A1,
++
++ /* Luminance formats. */
++ gcvSURF_L4 = 800,
++ gcvSURF_L8,
++ gcvSURF_L12,
++ gcvSURF_L16,
++ gcvSURF_L32,
++ gcvSURF_L1,
++
++ /* Alpha/Luminance formats. */
++ gcvSURF_A4L4 = 900,
++ gcvSURF_A2L6,
++ gcvSURF_A8L8,
++ gcvSURF_A4L12,
++ gcvSURF_A12L12,
++ gcvSURF_A16L16,
++
++ /* Bump formats. */
++ gcvSURF_L6V5U5 = 1000,
++ gcvSURF_V8U8,
++ gcvSURF_X8L8V8U8,
++ gcvSURF_Q8W8V8U8,
++ gcvSURF_A2W10V10U10,
++ gcvSURF_V16U16,
++ gcvSURF_Q16W16V16U16,
++
++ /* R/RG/RA formats. */
++ gcvSURF_R8 = 1100,
++ gcvSURF_X8R8,
++ gcvSURF_G8R8,
++ gcvSURF_X8G8R8,
++ gcvSURF_A8R8,
++ gcvSURF_R16,
++ gcvSURF_X16R16,
++ gcvSURF_G16R16,
++ gcvSURF_X16G16R16,
++ gcvSURF_A16R16,
++ gcvSURF_R32,
++ gcvSURF_X32R32,
++ gcvSURF_G32R32,
++ gcvSURF_X32G32R32,
++ gcvSURF_A32R32,
++ gcvSURF_RG16,
++ gcvSURF_R8_SNORM,
++ gcvSURF_G8R8_SNORM,
++
++ gcvSURF_R8_1_X8R8G8B8,
++ gcvSURF_G8R8_1_X8R8G8B8,
++
++ /* Floating point formats. */
++ gcvSURF_R16F = 1200,
++ gcvSURF_X16R16F,
++ gcvSURF_G16R16F,
++ gcvSURF_X16G16R16F,
++ gcvSURF_B16G16R16F,
++ gcvSURF_X16B16G16R16F,
++ gcvSURF_A16B16G16R16F,
++ gcvSURF_R32F,
++ gcvSURF_X32R32F,
++ gcvSURF_G32R32F,
++ gcvSURF_X32G32R32F,
++ gcvSURF_B32G32R32F,
++ gcvSURF_X32B32G32R32F,
++ gcvSURF_A32B32G32R32F,
++ gcvSURF_A16F,
++ gcvSURF_L16F,
++ gcvSURF_A16L16F,
++ gcvSURF_A16R16F,
++ gcvSURF_A32F,
++ gcvSURF_L32F,
++ gcvSURF_A32L32F,
++ gcvSURF_A32R32F,
++ gcvSURF_E5B9G9R9,
++ gcvSURF_B10G11R11F,
++
++ gcvSURF_X16B16G16R16F_2_A8R8G8B8,
++ gcvSURF_A16B16G16R16F_2_A8R8G8B8,
++ gcvSURF_G32R32F_2_A8R8G8B8,
++ gcvSURF_X32B32G32R32F_2_G32R32F,
++ gcvSURF_A32B32G32R32F_2_G32R32F,
++ gcvSURF_X32B32G32R32F_4_A8R8G8B8,
++ gcvSURF_A32B32G32R32F_4_A8R8G8B8,
++
++ gcvSURF_R16F_1_A4R4G4B4,
++ gcvSURF_G16R16F_1_A8R8G8B8,
++ gcvSURF_B16G16R16F_2_A8R8G8B8,
++
++ gcvSURF_R32F_1_A8R8G8B8,
++ gcvSURF_B32G32R32F_3_A8R8G8B8,
++
++ gcvSURF_B10G11R11F_1_A8R8G8B8,
++
++
++ /* sRGB format. */
++ gcvSURF_SBGR8 = 1400,
++ gcvSURF_A8_SBGR8,
++ gcvSURF_X8_SBGR8,
++
++ /* Integer formats. */
++ gcvSURF_R8I = 1500,
++ gcvSURF_R8UI,
++ gcvSURF_R16I,
++ gcvSURF_R16UI,
++ gcvSURF_R32I,
++ gcvSURF_R32UI,
++ gcvSURF_X8R8I,
++ gcvSURF_G8R8I,
++ gcvSURF_X8R8UI,
++ gcvSURF_G8R8UI,
++ gcvSURF_X16R16I,
++ gcvSURF_G16R16I,
++ gcvSURF_X16R16UI,
++ gcvSURF_G16R16UI,
++ gcvSURF_X32R32I,
++ gcvSURF_G32R32I,
++ gcvSURF_X32R32UI,
++ gcvSURF_G32R32UI,
++ gcvSURF_X8G8R8I,
++ gcvSURF_B8G8R8I,
++ gcvSURF_X8G8R8UI,
++ gcvSURF_B8G8R8UI,
++ gcvSURF_X16G16R16I,
++ gcvSURF_B16G16R16I,
++ gcvSURF_X16G16R16UI,
++ gcvSURF_B16G16R16UI,
++ gcvSURF_X32G32R32I,
++ gcvSURF_B32G32R32I,
++ gcvSURF_X32G32R32UI,
++ gcvSURF_B32G32R32UI,
++ gcvSURF_X8B8G8R8I,
++ gcvSURF_A8B8G8R8I,
++ gcvSURF_X8B8G8R8UI,
++ gcvSURF_A8B8G8R8UI,
++ gcvSURF_X16B16G16R16I,
++ gcvSURF_A16B16G16R16I,
++ gcvSURF_X16B16G16R16UI,
++ gcvSURF_A16B16G16R16UI,
++ gcvSURF_X32B32G32R32I,
++ gcvSURF_A32B32G32R32I,
++ gcvSURF_X32B32G32R32UI,
++ gcvSURF_A32B32G32R32UI,
++ gcvSURF_A2B10G10R10UI,
++ gcvSURF_G32R32I_2_A8R8G8B8,
++ gcvSURF_G32R32UI_2_A8R8G8B8,
++ gcvSURF_X16B16G16R16I_2_A8R8G8B8,
++ gcvSURF_A16B16G16R16I_2_A8R8G8B8,
++ gcvSURF_X16B16G16R16UI_2_A8R8G8B8,
++ gcvSURF_A16B16G16R16UI_2_A8R8G8B8,
++ gcvSURF_X32B32G32R32I_2_G32R32I,
++ gcvSURF_A32B32G32R32I_2_G32R32I,
++ gcvSURF_X32B32G32R32I_3_A8R8G8B8,
++ gcvSURF_A32B32G32R32I_4_A8R8G8B8,
++ gcvSURF_X32B32G32R32UI_2_G32R32UI,
++ gcvSURF_A32B32G32R32UI_2_G32R32UI,
++ gcvSURF_X32B32G32R32UI_3_A8R8G8B8,
++ gcvSURF_A32B32G32R32UI_4_A8R8G8B8,
++ gcvSURF_A2B10G10R10UI_1_A8R8G8B8,
++ gcvSURF_A8B8G8R8I_1_A8R8G8B8,
++ gcvSURF_A8B8G8R8UI_1_A8R8G8B8,
++ gcvSURF_R8I_1_A4R4G4B4,
++ gcvSURF_R8UI_1_A4R4G4B4,
++ gcvSURF_R16I_1_A4R4G4B4,
++ gcvSURF_R16UI_1_A4R4G4B4,
++ gcvSURF_R32I_1_A8R8G8B8,
++ gcvSURF_R32UI_1_A8R8G8B8,
++ gcvSURF_X8R8I_1_A4R4G4B4,
++ gcvSURF_X8R8UI_1_A4R4G4B4,
++ gcvSURF_G8R8I_1_A4R4G4B4,
++ gcvSURF_G8R8UI_1_A4R4G4B4,
++ gcvSURF_X16R16I_1_A4R4G4B4,
++ gcvSURF_X16R16UI_1_A4R4G4B4,
++ gcvSURF_G16R16I_1_A8R8G8B8,
++ gcvSURF_G16R16UI_1_A8R8G8B8,
++ gcvSURF_X32R32I_1_A8R8G8B8,
++ gcvSURF_X32R32UI_1_A8R8G8B8,
++ gcvSURF_X8G8R8I_1_A4R4G4B4,
++ gcvSURF_X8G8R8UI_1_A4R4G4B4,
++ gcvSURF_B8G8R8I_1_A8R8G8B8,
++ gcvSURF_B8G8R8UI_1_A8R8G8B8,
++ gcvSURF_B16G16R16I_2_A8R8G8B8,
++ gcvSURF_B16G16R16UI_2_A8R8G8B8,
++ gcvSURF_B32G32R32I_3_A8R8G8B8,
++ gcvSURF_B32G32R32UI_3_A8R8G8B8,
++
++ /* ASTC formats. */
++ gcvSURF_ASTC4x4 = 1600,
++ gcvSURF_ASTC5x4,
++ gcvSURF_ASTC5x5,
++ gcvSURF_ASTC6x5,
++ gcvSURF_ASTC6x6,
++ gcvSURF_ASTC8x5,
++ gcvSURF_ASTC8x6,
++ gcvSURF_ASTC8x8,
++ gcvSURF_ASTC10x5,
++ gcvSURF_ASTC10x6,
++ gcvSURF_ASTC10x8,
++ gcvSURF_ASTC10x10,
++ gcvSURF_ASTC12x10,
++ gcvSURF_ASTC12x12,
++ gcvSURF_ASTC4x4_SRGB,
++ gcvSURF_ASTC5x4_SRGB,
++ gcvSURF_ASTC5x5_SRGB,
++ gcvSURF_ASTC6x5_SRGB,
++ gcvSURF_ASTC6x6_SRGB,
++ gcvSURF_ASTC8x5_SRGB,
++ gcvSURF_ASTC8x6_SRGB,
++ gcvSURF_ASTC8x8_SRGB,
++ gcvSURF_ASTC10x5_SRGB,
++ gcvSURF_ASTC10x6_SRGB,
++ gcvSURF_ASTC10x8_SRGB,
++ gcvSURF_ASTC10x10_SRGB,
++ gcvSURF_ASTC12x10_SRGB,
++ gcvSURF_ASTC12x12_SRGB,
++
++ gcvSURF_FORMAT_COUNT
++}
++gceSURF_FORMAT;
++
++/* Format modifiers. */
++typedef enum _gceSURF_FORMAT_MODE
++{
++ gcvSURF_FORMAT_OCL = 0x80000000
++}
++gceSURF_FORMAT_MODE;
++
++/* Pixel swizzle modes. */
++typedef enum _gceSURF_SWIZZLE
++{
++ gcvSURF_NOSWIZZLE = 0,
++ gcvSURF_ARGB,
++ gcvSURF_ABGR,
++ gcvSURF_RGBA,
++ gcvSURF_BGRA
++}
++gceSURF_SWIZZLE;
++
++/* Transparency modes. */
++typedef enum _gceSURF_TRANSPARENCY
++{
++ /* Valid only for PE 1.0 */
++ gcvSURF_OPAQUE = 0,
++ gcvSURF_SOURCE_MATCH,
++ gcvSURF_SOURCE_MASK,
++ gcvSURF_PATTERN_MASK,
++}
++gceSURF_TRANSPARENCY;
++
++/* Surface Alignment. */
++typedef enum _gceSURF_ALIGNMENT
++{
++ gcvSURF_FOUR = 0,
++ gcvSURF_SIXTEEN,
++ gcvSURF_SUPER_TILED,
++ gcvSURF_SPLIT_TILED,
++ gcvSURF_SPLIT_SUPER_TILED
++}
++gceSURF_ALIGNMENT;
++
++/* Surface Addressing. */
++typedef enum _gceSURF_ADDRESSING
++{
++ gcvSURF_NO_STRIDE_TILED = 0,
++ gcvSURF_NO_STRIDE_LINEAR,
++ gcvSURF_STRIDE_TILED,
++ gcvSURF_STRIDE_LINEAR
++}
++gceSURF_ADDRESSING;
++
++/* Transparency modes. */
++typedef enum _gce2D_TRANSPARENCY
++{
++ /* Valid only for PE 2.0 */
++ gcv2D_OPAQUE = 0,
++ gcv2D_KEYED,
++ gcv2D_MASKED
++}
++gce2D_TRANSPARENCY;
++
++/* Mono packing modes. */
++typedef enum _gceSURF_MONOPACK
++{
++ gcvSURF_PACKED8 = 0,
++ gcvSURF_PACKED16,
++ gcvSURF_PACKED32,
++ gcvSURF_UNPACKED,
++}
++gceSURF_MONOPACK;
++
++/* Blending modes. */
++typedef enum _gceSURF_BLEND_MODE
++{
++ /* Porter-Duff blending modes. */
++ /* Fsrc Fdst */
++ gcvBLEND_CLEAR = 0, /* 0 0 */
++ gcvBLEND_SRC, /* 1 0 */
++ gcvBLEND_DST, /* 0 1 */
++ gcvBLEND_SRC_OVER_DST, /* 1 1 - Asrc */
++ gcvBLEND_DST_OVER_SRC, /* 1 - Adst 1 */
++ gcvBLEND_SRC_IN_DST, /* Adst 0 */
++ gcvBLEND_DST_IN_SRC, /* 0 Asrc */
++ gcvBLEND_SRC_OUT_DST, /* 1 - Adst 0 */
++ gcvBLEND_DST_OUT_SRC, /* 0 1 - Asrc */
++ gcvBLEND_SRC_ATOP_DST, /* Adst 1 - Asrc */
++ gcvBLEND_DST_ATOP_SRC, /* 1 - Adst Asrc */
++ gcvBLEND_SRC_XOR_DST, /* 1 - Adst 1 - Asrc */
++
++ /* Special blending modes. */
++ gcvBLEND_SET, /* DST = 1 */
++ gcvBLEND_SUB /* DST = DST * (1 - SRC) */
++}
++gceSURF_BLEND_MODE;
++
++/* Per-pixel alpha modes. */
++typedef enum _gceSURF_PIXEL_ALPHA_MODE
++{
++ gcvSURF_PIXEL_ALPHA_STRAIGHT = 0,
++ gcvSURF_PIXEL_ALPHA_INVERSED
++}
++gceSURF_PIXEL_ALPHA_MODE;
++
++/* Global alpha modes. */
++typedef enum _gceSURF_GLOBAL_ALPHA_MODE
++{
++ gcvSURF_GLOBAL_ALPHA_OFF = 0,
++ gcvSURF_GLOBAL_ALPHA_ON,
++ gcvSURF_GLOBAL_ALPHA_SCALE
++}
++gceSURF_GLOBAL_ALPHA_MODE;
++
++/* Color component modes for alpha blending. */
++typedef enum _gceSURF_PIXEL_COLOR_MODE
++{
++ gcvSURF_COLOR_STRAIGHT = 0,
++ gcvSURF_COLOR_MULTIPLY
++}
++gceSURF_PIXEL_COLOR_MODE;
++
++/* Color component modes for alpha blending. */
++typedef enum _gce2D_PIXEL_COLOR_MULTIPLY_MODE
++{
++ gcv2D_COLOR_MULTIPLY_DISABLE = 0,
++ gcv2D_COLOR_MULTIPLY_ENABLE
++}
++gce2D_PIXEL_COLOR_MULTIPLY_MODE;
++
++/* Color component modes for alpha blending. */
++typedef enum _gce2D_GLOBAL_COLOR_MULTIPLY_MODE
++{
++ gcv2D_GLOBAL_COLOR_MULTIPLY_DISABLE = 0,
++ gcv2D_GLOBAL_COLOR_MULTIPLY_ALPHA,
++ gcv2D_GLOBAL_COLOR_MULTIPLY_COLOR
++}
++gce2D_GLOBAL_COLOR_MULTIPLY_MODE;
++
++/* Alpha blending factor modes. */
++typedef enum _gceSURF_BLEND_FACTOR_MODE
++{
++ gcvSURF_BLEND_ZERO = 0,
++ gcvSURF_BLEND_ONE,
++ gcvSURF_BLEND_STRAIGHT,
++ gcvSURF_BLEND_INVERSED,
++ gcvSURF_BLEND_COLOR,
++ gcvSURF_BLEND_COLOR_INVERSED,
++ gcvSURF_BLEND_SRC_ALPHA_SATURATED,
++ gcvSURF_BLEND_STRAIGHT_NO_CROSS,
++ gcvSURF_BLEND_INVERSED_NO_CROSS,
++ gcvSURF_BLEND_COLOR_NO_CROSS,
++ gcvSURF_BLEND_COLOR_INVERSED_NO_CROSS,
++ gcvSURF_BLEND_SRC_ALPHA_SATURATED_CROSS
++}
++gceSURF_BLEND_FACTOR_MODE;
++
++/* Alpha blending porter duff rules. */
++typedef enum _gce2D_PORTER_DUFF_RULE
++{
++ gcvPD_CLEAR = 0,
++ gcvPD_SRC,
++ gcvPD_SRC_OVER,
++ gcvPD_DST_OVER,
++ gcvPD_SRC_IN,
++ gcvPD_DST_IN,
++ gcvPD_SRC_OUT,
++ gcvPD_DST_OUT,
++ gcvPD_SRC_ATOP,
++ gcvPD_DST_ATOP,
++ gcvPD_ADD,
++ gcvPD_XOR,
++ gcvPD_DST
++}
++gce2D_PORTER_DUFF_RULE;
++
++/* Alpha blending factor modes. */
++typedef enum _gce2D_YUV_COLOR_MODE
++{
++ gcv2D_YUV_601= 0,
++ gcv2D_YUV_709,
++ gcv2D_YUV_USER_DEFINED,
++ gcv2D_YUV_USER_DEFINED_CLAMP,
++
++ /* Default setting is for src. gcv2D_YUV_DST
++ can be ORed to set dst.
++ */
++ gcv2D_YUV_DST = 0x80000000,
++}
++gce2D_YUV_COLOR_MODE;
++
++typedef enum _gce2D_COMMAND
++{
++ gcv2D_CLEAR = 0,
++ gcv2D_LINE,
++ gcv2D_BLT,
++ gcv2D_STRETCH,
++ gcv2D_HOR_FILTER,
++ gcv2D_VER_FILTER,
++ gcv2D_MULTI_SOURCE_BLT,
++ gcv2D_FILTER_BLT,
++}
++gce2D_COMMAND;
++
++typedef enum _gce2D_TILE_STATUS_CONFIG
++{
++ gcv2D_TSC_DISABLE = 0,
++ gcv2D_TSC_ENABLE = 0x00000001,
++ gcv2D_TSC_COMPRESSED = 0x00000002,
++ gcv2D_TSC_DOWN_SAMPLER = 0x00000004,
++ gcv2D_TSC_2D_COMPRESSED = 0x00000008,
++ gcv2D_TSC_TPC_COMPRESSED = 0x00000010,
++}
++gce2D_TILE_STATUS_CONFIG;
++
++typedef enum _gce2D_QUERY
++{
++ gcv2D_QUERY_RGB_ADDRESS_MIN_ALIGN = 0,
++ gcv2D_QUERY_RGB_STRIDE_MIN_ALIGN,
++ gcv2D_QUERY_YUV_ADDRESS_MIN_ALIGN,
++ gcv2D_QUERY_YUV_STRIDE_MIN_ALIGN,
++}
++gce2D_QUERY;
++
++typedef enum _gce2D_SUPER_TILE_VERSION
++{
++ gcv2D_SUPER_TILE_VERSION_V1 = 1,
++ gcv2D_SUPER_TILE_VERSION_V2 = 2,
++ gcv2D_SUPER_TILE_VERSION_V3 = 3,
++}
++gce2D_SUPER_TILE_VERSION;
++
++typedef enum _gce2D_STATE
++{
++ gcv2D_STATE_SPECIAL_FILTER_MIRROR_MODE = 1,
++ gcv2D_STATE_SUPER_TILE_VERSION,
++ gcv2D_STATE_EN_GAMMA,
++ gcv2D_STATE_DE_GAMMA,
++ gcv2D_STATE_MULTI_SRC_BLIT_UNIFIED_DST_RECT,
++ gcv2D_STATE_PROFILE_ENABLE,
++ gcv2D_STATE_XRGB_ENABLE,
++
++ gcv2D_STATE_ARRAY_EN_GAMMA = 0x10001,
++ gcv2D_STATE_ARRAY_DE_GAMMA,
++ gcv2D_STATE_ARRAY_CSC_YUV_TO_RGB,
++ gcv2D_STATE_ARRAY_CSC_RGB_TO_YUV,
++}
++gce2D_STATE;
++
++typedef enum _gce2D_STATE_PROFILE
++{
++ gcv2D_STATE_PROFILE_NONE = 0x0,
++ gcv2D_STATE_PROFILE_COMMAND = 0x1,
++ gcv2D_STATE_PROFILE_SURFACE = 0x2,
++ gcv2D_STATE_PROFILE_ALL = 0xFFFF,
++}
++gce2D_STATE_PROFILE;
++
++/* Texture object types */
++typedef enum _gceTEXTURE_TYPE
++{
++ gcvTEXTURE_UNKNOWN = 0,
++ gcvTEXTURE_1D,
++ gcvTEXTURE_2D,
++ gcvTEXTURE_3D,
++ gcvTEXTURE_CUBEMAP,
++ gcvTEXTURE_1D_ARRAY,
++ gcvTEXTURE_2D_ARRAY,
++ gcvTEXTURE_EXTERNAL
++}
++gceTEXTURE_TYPE;
++
++#if gcdENABLE_3D
++/* Texture functions. */
++typedef enum _gceTEXTURE_FUNCTION
++{
++ gcvTEXTURE_DUMMY = 0,
++ gcvTEXTURE_REPLACE = 0,
++ gcvTEXTURE_MODULATE,
++ gcvTEXTURE_ADD,
++ gcvTEXTURE_ADD_SIGNED,
++ gcvTEXTURE_INTERPOLATE,
++ gcvTEXTURE_SUBTRACT,
++ gcvTEXTURE_DOT3
++}
++gceTEXTURE_FUNCTION;
++
++/* Texture sources. */
++typedef enum _gceTEXTURE_SOURCE
++{
++ gcvCOLOR_FROM_TEXTURE = 0,
++ gcvCOLOR_FROM_CONSTANT_COLOR,
++ gcvCOLOR_FROM_PRIMARY_COLOR,
++ gcvCOLOR_FROM_PREVIOUS_COLOR
++}
++gceTEXTURE_SOURCE;
++
++/* Texture source channels. */
++typedef enum _gceTEXTURE_CHANNEL
++{
++ gcvFROM_COLOR = 0,
++ gcvFROM_ONE_MINUS_COLOR,
++ gcvFROM_ALPHA,
++ gcvFROM_ONE_MINUS_ALPHA
++}
++gceTEXTURE_CHANNEL;
++#endif /* gcdENABLE_3D */
++
++/* Filter types. */
++typedef enum _gceFILTER_TYPE
++{
++ gcvFILTER_SYNC = 0,
++ gcvFILTER_BLUR,
++ gcvFILTER_USER
++}
++gceFILTER_TYPE;
++
++/* Filter pass types. */
++typedef enum _gceFILTER_PASS_TYPE
++{
++ gcvFILTER_HOR_PASS = 0,
++ gcvFILTER_VER_PASS
++}
++gceFILTER_PASS_TYPE;
++
++/* Endian hints. */
++typedef enum _gceENDIAN_HINT
++{
++ gcvENDIAN_NO_SWAP = 0,
++ gcvENDIAN_SWAP_WORD,
++ gcvENDIAN_SWAP_DWORD
++}
++gceENDIAN_HINT;
++
++/* Tiling modes. */
++typedef enum _gceTILING
++{
++ gcvINVALIDTILED = 0x0, /* Invalid tiling */
++ /* Tiling basic modes enum'ed in power of 2. */
++ gcvLINEAR = 0x1, /* No tiling. */
++ gcvTILED = 0x2, /* 4x4 tiling. */
++ gcvSUPERTILED = 0x4, /* 64x64 tiling. */
++ gcvMINORTILED = 0x8, /* 2x2 tiling. */
++
++ /* Tiling special layouts. */
++ gcvTILING_SPLIT_BUFFER = 0x100,
++
++ /* Tiling combination layouts. */
++ gcvMULTI_TILED = gcvTILED
++ | gcvTILING_SPLIT_BUFFER,
++
++ gcvMULTI_SUPERTILED = gcvSUPERTILED
++ | gcvTILING_SPLIT_BUFFER,
++}
++gceTILING;
++
++/* 2D pattern type. */
++typedef enum _gce2D_PATTERN
++{
++ gcv2D_PATTERN_SOLID = 0,
++ gcv2D_PATTERN_MONO,
++ gcv2D_PATTERN_COLOR,
++ gcv2D_PATTERN_INVALID
++}
++gce2D_PATTERN;
++
++/* 2D source type. */
++typedef enum _gce2D_SOURCE
++{
++ gcv2D_SOURCE_MASKED = 0,
++ gcv2D_SOURCE_MONO,
++ gcv2D_SOURCE_COLOR,
++ gcv2D_SOURCE_INVALID
++}
++gce2D_SOURCE;
++
++/* Pipes. */
++typedef enum _gcePIPE_SELECT
++{
++ gcvPIPE_INVALID = ~0,
++ gcvPIPE_3D = 0,
++ gcvPIPE_2D
++}
++gcePIPE_SELECT;
++
++/* Hardware type. */
++typedef enum _gceHARDWARE_TYPE
++{
++ gcvHARDWARE_INVALID = 0x00,
++ gcvHARDWARE_3D = 0x01,
++ gcvHARDWARE_2D = 0x02,
++ gcvHARDWARE_VG = 0x04,
++#if gcdMULTI_GPU_AFFINITY
++ gcvHARDWARE_OCL = 0x05,
++#endif
++ gcvHARDWARE_3D2D = gcvHARDWARE_3D | gcvHARDWARE_2D
++}
++gceHARDWARE_TYPE;
++
++#define gcdCHIP_COUNT 3
++
++typedef enum _gceMMU_MODE
++{
++ gcvMMU_MODE_1K,
++ gcvMMU_MODE_4K,
++} gceMMU_MODE;
++
++/* User signal command codes. */
++typedef enum _gceUSER_SIGNAL_COMMAND_CODES
++{
++ gcvUSER_SIGNAL_CREATE,
++ gcvUSER_SIGNAL_DESTROY,
++ gcvUSER_SIGNAL_SIGNAL,
++ gcvUSER_SIGNAL_WAIT,
++ gcvUSER_SIGNAL_MAP,
++ gcvUSER_SIGNAL_UNMAP,
++}
++gceUSER_SIGNAL_COMMAND_CODES;
++
++/* Sync point command codes. */
++typedef enum _gceSYNC_POINT_COMMAND_CODES
++{
++ gcvSYNC_POINT_CREATE,
++ gcvSYNC_POINT_DESTROY,
++ gcvSYNC_POINT_SIGNAL,
++}
++gceSYNC_POINT_COMMAND_CODES;
++
++/* Shared buffer command codes. */
++typedef enum _gceSHBUF_COMMAND_CODES
++{
++ gcvSHBUF_CREATE,
++ gcvSHBUF_DESTROY,
++ gcvSHBUF_MAP,
++ gcvSHBUF_WRITE,
++ gcvSHBUF_READ,
++}
++gceSHBUF_COMMAND_CODES;
++
++/* Event locations. */
++typedef enum _gceKERNEL_WHERE
++{
++ gcvKERNEL_COMMAND,
++ gcvKERNEL_VERTEX,
++ gcvKERNEL_TRIANGLE,
++ gcvKERNEL_TEXTURE,
++ gcvKERNEL_PIXEL,
++}
++gceKERNEL_WHERE;
++
++#if gcdENABLE_VG
++/* Hardware blocks. */
++typedef enum _gceBLOCK
++{
++ gcvBLOCK_COMMAND,
++ gcvBLOCK_TESSELLATOR,
++ gcvBLOCK_TESSELLATOR2,
++ gcvBLOCK_TESSELLATOR3,
++ gcvBLOCK_RASTER,
++ gcvBLOCK_VG,
++ gcvBLOCK_VG2,
++ gcvBLOCK_VG3,
++ gcvBLOCK_PIXEL,
++
++ /* Number of defined blocks. */
++ gcvBLOCK_COUNT
++}
++gceBLOCK;
++#endif
++
++/* gcdDUMP message type. */
++typedef enum _gceDEBUG_MESSAGE_TYPE
++{
++ gcvMESSAGE_TEXT,
++ gcvMESSAGE_DUMP
++}
++gceDEBUG_MESSAGE_TYPE;
++
++/* Shading format. */
++typedef enum _gceSHADING
++{
++ gcvSHADING_SMOOTH,
++ gcvSHADING_FLAT_D3D,
++ gcvSHADING_FLAT_OPENGL,
++}
++gceSHADING;
++
++/* Culling modes. */
++typedef enum _gceCULL
++{
++ gcvCULL_NONE,
++ gcvCULL_CCW,
++ gcvCULL_CW,
++}
++gceCULL;
++
++/* Fill modes. */
++typedef enum _gceFILL
++{
++ gcvFILL_POINT,
++ gcvFILL_WIRE_FRAME,
++ gcvFILL_SOLID,
++}
++gceFILL;
++
++/* Compare modes. */
++typedef enum _gceCOMPARE
++{
++ gcvCOMPARE_INVALID = 0,
++ gcvCOMPARE_NEVER,
++ gcvCOMPARE_NOT_EQUAL,
++ gcvCOMPARE_LESS,
++ gcvCOMPARE_LESS_OR_EQUAL,
++ gcvCOMPARE_EQUAL,
++ gcvCOMPARE_GREATER,
++ gcvCOMPARE_GREATER_OR_EQUAL,
++ gcvCOMPARE_ALWAYS,
++}
++gceCOMPARE;
++
++/* Stencil modes. */
++typedef enum _gceSTENCIL_MODE
++{
++ gcvSTENCIL_NONE,
++ gcvSTENCIL_SINGLE_SIDED,
++ gcvSTENCIL_DOUBLE_SIDED,
++}
++gceSTENCIL_MODE;
++
++/* Stencil operations. */
++typedef enum _gceSTENCIL_OPERATION
++{
++ gcvSTENCIL_KEEP,
++ gcvSTENCIL_REPLACE,
++ gcvSTENCIL_ZERO,
++ gcvSTENCIL_INVERT,
++ gcvSTENCIL_INCREMENT,
++ gcvSTENCIL_DECREMENT,
++ gcvSTENCIL_INCREMENT_SATURATE,
++ gcvSTENCIL_DECREMENT_SATURATE,
++ gcvSTENCIL_OPERATION_INVALID = -1
++}
++gceSTENCIL_OPERATION;
++
++/* Stencil selection. */
++typedef enum _gceSTENCIL_WHERE
++{
++ gcvSTENCIL_FRONT,
++ gcvSTENCIL_BACK,
++}
++gceSTENCIL_WHERE;
++
++/* Texture addressing selection. */
++typedef enum _gceTEXTURE_WHICH
++{
++ gcvTEXTURE_S,
++ gcvTEXTURE_T,
++ gcvTEXTURE_R,
++}
++gceTEXTURE_WHICH;
++
++/* Texture addressing modes. */
++typedef enum _gceTEXTURE_ADDRESSING
++{
++ gcvTEXTURE_INVALID = 0,
++ gcvTEXTURE_CLAMP,
++ gcvTEXTURE_WRAP,
++ gcvTEXTURE_MIRROR,
++ gcvTEXTURE_BORDER,
++ gcvTEXTURE_MIRROR_ONCE,
++}
++gceTEXTURE_ADDRESSING;
++
++/* Texture filters. */
++typedef enum _gceTEXTURE_FILTER
++{
++ gcvTEXTURE_NONE,
++ gcvTEXTURE_POINT,
++ gcvTEXTURE_LINEAR,
++ gcvTEXTURE_ANISOTROPIC,
++}
++gceTEXTURE_FILTER;
++
++typedef enum _gceTEXTURE_COMPONENT
++{
++ gcvTEXTURE_COMPONENT_R,
++ gcvTEXTURE_COMPONENT_G,
++ gcvTEXTURE_COMPONENT_B,
++ gcvTEXTURE_COMPONENT_A,
++
++ gcvTEXTURE_COMPONENT_NUM,
++} gceTEXTURE_COMPONENT;
++
++/* Texture swizzle modes. */
++typedef enum _gceTEXTURE_SWIZZLE
++{
++ gcvTEXTURE_SWIZZLE_R = 0,
++ gcvTEXTURE_SWIZZLE_G,
++ gcvTEXTURE_SWIZZLE_B,
++ gcvTEXTURE_SWIZZLE_A,
++ gcvTEXTURE_SWIZZLE_0,
++ gcvTEXTURE_SWIZZLE_1,
++
++ gcvTEXTURE_SWIZZLE_INVALID,
++} gceTEXTURE_SWIZZLE;
++
++typedef enum _gceTEXTURE_COMPARE_MODE
++{
++ gcvTEXTURE_COMPARE_MODE_INVALID = 0,
++ gcvTEXTURE_COMPARE_MODE_NONE,
++ gcvTEXTURE_COMPARE_MODE_REF,
++} gceTEXTURE_COMPARE_MODE;
++
++/* Pixel output swizzle modes. */
++typedef enum _gcePIXEL_SWIZZLE
++{
++ gcvPIXEL_SWIZZLE_R = gcvTEXTURE_SWIZZLE_R,
++ gcvPIXEL_SWIZZLE_G = gcvTEXTURE_SWIZZLE_G,
++ gcvPIXEL_SWIZZLE_B = gcvTEXTURE_SWIZZLE_B,
++ gcvPIXEL_SWIZZLE_A = gcvTEXTURE_SWIZZLE_A,
++
++ gcvPIXEL_SWIZZLE_INVALID,
++} gcePIXEL_SWIZZLE;
++
++/* Primitive types. */
++typedef enum _gcePRIMITIVE
++{
++ gcvPRIMITIVE_POINT_LIST,
++ gcvPRIMITIVE_LINE_LIST,
++ gcvPRIMITIVE_LINE_STRIP,
++ gcvPRIMITIVE_LINE_LOOP,
++ gcvPRIMITIVE_TRIANGLE_LIST,
++ gcvPRIMITIVE_TRIANGLE_STRIP,
++ gcvPRIMITIVE_TRIANGLE_FAN,
++ gcvPRIMITIVE_RECTANGLE,
++}
++gcePRIMITIVE;
++
++/* Index types. */
++typedef enum _gceINDEX_TYPE
++{
++ gcvINDEX_8,
++ gcvINDEX_16,
++ gcvINDEX_32,
++}
++gceINDEX_TYPE;
++
++/* Multi GPU rendering modes. */
++typedef enum _gceMULTI_GPU_RENDERING_MODE
++{
++ gcvMULTI_GPU_RENDERING_MODE_OFF,
++ gcvMULTI_GPU_RENDERING_MODE_SPLIT_WIDTH,
++ gcvMULTI_GPU_RENDERING_MODE_SPLIT_HEIGHT,
++ gcvMULTI_GPU_RENDERING_MODE_INTERLEAVED_64x64,
++ gcvMULTI_GPU_RENDERING_MODE_INTERLEAVED_128x64,
++ gcvMULTI_GPU_RENDERING_MODE_INTERLEAVED_128x128
++}
++gceMULTI_GPU_RENDERING_MODE;
++
++typedef enum _gceCORE_3D_MASK
++{
++ gcvCORE_3D_0_MASK = (1 << 0),
++ gcvCORE_3D_1_MASK = (1 << 1),
++
++ gcvCORE_3D_ALL_MASK = (0xFFFF)
++}
++gceCORE_3D_MASK;
++
++typedef enum _gceCORE_3D_ID
++{
++ gcvCORE_3D_0_ID = 0,
++ gcvCORE_3D_1_ID = 1,
++
++ gcvCORE_3D_ID_INVALID = ~0UL
++}
++gceCORE_3D_ID;
++
++typedef enum _gceMULTI_GPU_MODE
++{
++ gcvMULTI_GPU_MODE_COMBINED = 0,
++ gcvMULTI_GPU_MODE_INDEPENDENT = 1
++}
++gceMULTI_GPU_MODE;
++
++typedef enum _gceMACHINECODE
++{
++ gcvMACHINECODE_ANTUTU0 = 0x0,
++
++ gcvMACHINECODE_GLB27_RELEASE_0,
++
++ gcvMACHINECODE_GLB25_RELEASE_0,
++ gcvMACHINECODE_GLB25_RELEASE_1,
++ gcvMACHINECODE_GLB25_RELEASE_2,
++
++ /* keep it as the last enum */
++ gcvMACHINECODE_COUNT
++}
++gceMACHINECODE;
++
++typedef enum _gceUNIFORMCVT
++{
++ gcvUNIFORMCVT_NONE = 0,
++ gcvUNIFORMCVT_TO_BOOL,
++ gcvUNIFORMCVT_TO_FLOAT,
++} gceUNIFORMCVT;
++
++typedef enum _gceHAL_ARG_VERSION
++{
++ gcvHAL_ARG_VERSION_V1 = 0x0,
++}
++gceHAL_ARG_VERSION;
++
++
++/*
++* Bit of a requirment is 1 means requirement is a must, 0 means requirement can
++* be ignored.
++*/
++#define gcvALLOC_FLAG_CONTIGUOUS_BIT 0
++#define gcvALLOC_FLAG_CACHEABLE_BIT 1
++#define gcvALLOC_FLAG_SECURITY_BIT 2
++#define gcvALLOC_FLAG_NON_CONTIGUOUS_BIT 3
++#define gcvALLOC_FLAG_MEMLIMIT_BIT 4
++
++/* No special needs. */
++#define gcvALLOC_FLAG_NONE (0)
++/* Physical contiguous. */
++#define gcvALLOC_FLAG_CONTIGUOUS (1 << gcvALLOC_FLAG_CONTIGUOUS_BIT)
++/* Can be remapped as cacheable. */
++#define gcvALLOC_FLAG_CACHEABLE (1 << gcvALLOC_FLAG_CACHEABLE_BIT)
++/* Secure buffer. */
++#define gcvALLOC_FLAG_SECURITY (1 << gcvALLOC_FLAG_SECURITY_BIT)
++/* Physical non contiguous. */
++#define gcvALLOC_FLAG_NON_CONTIGUOUS (1 << gcvALLOC_FLAG_NON_CONTIGUOUS_BIT)
++#define gcvALLOC_FLAG_MEMLIMIT (1 << gcvALLOC_FLAG_MEMLIMIT_BIT)
++
++/* GL_VIV internal usage */
++#ifndef GL_MAP_BUFFER_OBJ_VIV
++#define GL_MAP_BUFFER_OBJ_VIV 0x10000
++#endif
++
++/* Command buffer usage. */
++#define gcvCOMMAND_2D (1 << 0)
++#define gcvCOMMAND_3D (1 << 1)
++
++/******************************************************************************\
++****************************** Object Declarations *****************************
++\******************************************************************************/
++
++typedef struct _gckCONTEXT * gckCONTEXT;
++typedef struct _gcoCMDBUF * gcoCMDBUF;
++
++typedef struct _gcsSTATE_DELTA * gcsSTATE_DELTA_PTR;
++typedef struct _gcsQUEUE * gcsQUEUE_PTR;
++typedef struct _gcoQUEUE * gcoQUEUE;
++typedef struct _gcsHAL_INTERFACE * gcsHAL_INTERFACE_PTR;
++typedef struct _gcs2D_PROFILE * gcs2D_PROFILE_PTR;
++
++#if gcdENABLE_VG
++typedef struct _gcoVGHARDWARE * gcoVGHARDWARE;
++typedef struct _gcoVGBUFFER * gcoVGBUFFER;
++typedef struct _gckVGHARDWARE * gckVGHARDWARE;
++typedef struct _gcsVGCONTEXT * gcsVGCONTEXT_PTR;
++typedef struct _gcsVGCONTEXT_MAP * gcsVGCONTEXT_MAP_PTR;
++typedef struct _gcsVGCMDQUEUE * gcsVGCMDQUEUE_PTR;
++typedef struct _gcsTASK_MASTER_TABLE * gcsTASK_MASTER_TABLE_PTR;
++typedef struct _gckVGKERNEL * gckVGKERNEL;
++typedef void * gctTHREAD;
++#endif
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif /* __gc_hal_enum_h_ */
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal.h linux-openelec/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal.h
+--- linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal.h 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,2859 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_h_
++#define __gc_hal_h_
++
++#include "gc_hal_rename.h"
++#include "gc_hal_types.h"
++#include "gc_hal_enum.h"
++#include "gc_hal_base.h"
++#include "gc_hal_profiler.h"
++#include "gc_hal_driver.h"
++#if gcdENABLE_3D
++#include "gc_hal_statistics.h"
++#endif
++
++#if gcdSECURITY
++#include "gc_hal_security_interface.h"
++#endif
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/******************************************************************************\
++******************************* Alignment Macros *******************************
++\******************************************************************************/
++
++/* Alignment with a non-power of two value. */
++#define gcmALIGN_NP2(n, align) \
++( \
++ ((n) + (align) - 1) - (((n) + (align) - 1) % (align)) \
++)
++
++/* Alignment with a power of two value. */
++#define gcmALIGN(n, align) \
++( \
++ ((n) + ((align) - 1)) & ~((align) - 1) \
++)
++
++#define gcmALIGN_BASE(n, align) \
++( \
++ ((n) & ~((align) - 1)) \
++)
++
++/******************************************************************************\
++***************************** Element Count Macro *****************************
++\******************************************************************************/
++
++#define gcmSIZEOF(a) \
++( \
++ (gctSIZE_T) (sizeof(a)) \
++)
++
++#define gcmCOUNTOF(a) \
++( \
++ sizeof(a) / sizeof(a[0]) \
++)
++
++/******************************************************************************\
++********************************* Cast Macro **********************************
++\******************************************************************************/
++#define gcmNAME_TO_PTR(na) \
++ gckKERNEL_QueryPointerFromName(kernel, gcmALL_TO_UINT32(na))
++
++#define gcmPTR_TO_NAME(ptr) \
++ gckKERNEL_AllocateNameFromPointer(kernel, ptr)
++
++#define gcmRELEASE_NAME(na) \
++ gckKERNEL_DeleteName(kernel, gcmALL_TO_UINT32(na))
++
++#define gcmALL_TO_UINT32(t) \
++( \
++ (gctUINT32) (gctUINTPTR_T) (t)\
++)
++
++#define gcmPTR_TO_UINT64(p) \
++( \
++ (gctUINT64) (gctUINTPTR_T) (p)\
++)
++
++#define gcmUINT64_TO_PTR(u) \
++( \
++ (gctPOINTER) (gctUINTPTR_T) (u)\
++)
++
++#define gcmUINT64_TO_TYPE(u, t) \
++( \
++ (t) (gctUINTPTR_T) (u)\
++)
++
++/******************************************************************************\
++******************************** Useful Macro *********************************
++\******************************************************************************/
++
++#define gcvINVALID_ADDRESS ~0U
++
++#define gcmGET_PRE_ROTATION(rotate) \
++ ((rotate) & (~(gcvSURF_POST_FLIP_X | gcvSURF_POST_FLIP_Y)))
++
++#define gcmGET_POST_ROTATION(rotate) \
++ ((rotate) & (gcvSURF_POST_FLIP_X | gcvSURF_POST_FLIP_Y))
++
++/******************************************************************************\
++******************************** gcsOBJECT Object *******************************
++\******************************************************************************/
++
++/* Type of objects. */
++typedef enum _gceOBJECT_TYPE
++{
++ gcvOBJ_UNKNOWN = 0,
++ gcvOBJ_2D = gcmCC('2','D',' ',' '),
++ gcvOBJ_3D = gcmCC('3','D',' ',' '),
++ gcvOBJ_ATTRIBUTE = gcmCC('A','T','T','R'),
++ gcvOBJ_BRUSHCACHE = gcmCC('B','R','U','$'),
++ gcvOBJ_BRUSHNODE = gcmCC('B','R','U','n'),
++ gcvOBJ_BRUSH = gcmCC('B','R','U','o'),
++ gcvOBJ_BUFFER = gcmCC('B','U','F','R'),
++ gcvOBJ_COMMAND = gcmCC('C','M','D',' '),
++ gcvOBJ_COMMANDBUFFER = gcmCC('C','M','D','B'),
++ gcvOBJ_CONTEXT = gcmCC('C','T','X','T'),
++ gcvOBJ_DEVICE = gcmCC('D','E','V',' '),
++ gcvOBJ_DUMP = gcmCC('D','U','M','P'),
++ gcvOBJ_EVENT = gcmCC('E','V','N','T'),
++ gcvOBJ_FUNCTION = gcmCC('F','U','N','C'),
++ gcvOBJ_HAL = gcmCC('H','A','L',' '),
++ gcvOBJ_HARDWARE = gcmCC('H','A','R','D'),
++ gcvOBJ_HEAP = gcmCC('H','E','A','P'),
++ gcvOBJ_INDEX = gcmCC('I','N','D','X'),
++ gcvOBJ_INTERRUPT = gcmCC('I','N','T','R'),
++ gcvOBJ_KERNEL = gcmCC('K','E','R','N'),
++ gcvOBJ_KERNEL_FUNCTION = gcmCC('K','F','C','N'),
++ gcvOBJ_MEMORYBUFFER = gcmCC('M','E','M','B'),
++ gcvOBJ_MMU = gcmCC('M','M','U',' '),
++ gcvOBJ_OS = gcmCC('O','S',' ',' '),
++ gcvOBJ_OUTPUT = gcmCC('O','U','T','P'),
++ gcvOBJ_PAINT = gcmCC('P','N','T',' '),
++ gcvOBJ_PATH = gcmCC('P','A','T','H'),
++ gcvOBJ_QUEUE = gcmCC('Q','U','E',' '),
++ gcvOBJ_SAMPLER = gcmCC('S','A','M','P'),
++ gcvOBJ_SHADER = gcmCC('S','H','D','R'),
++ gcvOBJ_STREAM = gcmCC('S','T','R','M'),
++ gcvOBJ_SURF = gcmCC('S','U','R','F'),
++ gcvOBJ_TEXTURE = gcmCC('T','X','T','R'),
++ gcvOBJ_UNIFORM = gcmCC('U','N','I','F'),
++ gcvOBJ_VARIABLE = gcmCC('V','A','R','I'),
++ gcvOBJ_VERTEX = gcmCC('V','R','T','X'),
++ gcvOBJ_VIDMEM = gcmCC('V','M','E','M'),
++ gcvOBJ_VG = gcmCC('V','G',' ',' '),
++ gcvOBJ_BUFOBJ = gcmCC('B','U','F','O'),
++ gcvOBJ_UNIFORM_BLOCK = gcmCC('U','B','L','K'),
++ gcvOBJ_CL = gcmCC('C','L',' ',' '),
++}
++gceOBJECT_TYPE;
++
++/* gcsOBJECT object defintinon. */
++typedef struct _gcsOBJECT
++{
++ /* Type of an object. */
++ gceOBJECT_TYPE type;
++}
++gcsOBJECT;
++
++typedef struct _gckHARDWARE * gckHARDWARE;
++
++/* CORE flags. */
++typedef enum _gceCORE
++{
++ gcvCORE_MAJOR = 0x0,
++ gcvCORE_2D = 0x1,
++ gcvCORE_VG = 0x2,
++#if gcdMULTI_GPU_AFFINITY
++ gcvCORE_OCL = 0x3,
++#endif
++}
++gceCORE;
++
++#if gcdMULTI_GPU_AFFINITY
++#define gcdMAX_GPU_COUNT 4
++#else
++#define gcdMAX_GPU_COUNT 3
++#endif
++
++#define gcdMAX_SURF_LAYER 4
++
++#define gcdMAX_DRAW_BUFFERS 4
++
++/*******************************************************************************
++**
++** gcmVERIFY_OBJECT
++**
++** Assert if an object is invalid or is not of the specified type. If the
++** object is invalid or not of the specified type, gcvSTATUS_INVALID_OBJECT
++** will be returned from the current function. In retail mode this macro
++** does nothing.
++**
++** ARGUMENTS:
++**
++** obj Object to test.
++** t Expected type of the object.
++*/
++#if gcmIS_DEBUG(gcdDEBUG_TRACE)
++#define _gcmVERIFY_OBJECT(prefix, obj, t) \
++ if ((obj) == gcvNULL) \
++ { \
++ prefix##TRACE(gcvLEVEL_ERROR, \
++ #prefix "VERIFY_OBJECT failed: NULL"); \
++ prefix##TRACE(gcvLEVEL_ERROR, " expected: %c%c%c%c", \
++ gcmCC_PRINT(t)); \
++ prefix##ASSERT((obj) != gcvNULL); \
++ prefix##FOOTER_ARG("status=%d", gcvSTATUS_INVALID_OBJECT); \
++ return gcvSTATUS_INVALID_OBJECT; \
++ } \
++ else if (((gcsOBJECT*) (obj))->type != t) \
++ { \
++ prefix##TRACE(gcvLEVEL_ERROR, \
++ #prefix "VERIFY_OBJECT failed: %c%c%c%c", \
++ gcmCC_PRINT(((gcsOBJECT*) (obj))->type)); \
++ prefix##TRACE(gcvLEVEL_ERROR, " expected: %c%c%c%c", \
++ gcmCC_PRINT(t)); \
++ prefix##ASSERT(((gcsOBJECT*)(obj))->type == t); \
++ prefix##FOOTER_ARG("status=%d", gcvSTATUS_INVALID_OBJECT); \
++ return gcvSTATUS_INVALID_OBJECT; \
++ }
++
++# define gcmVERIFY_OBJECT(obj, t) _gcmVERIFY_OBJECT(gcm, obj, t)
++# define gcmkVERIFY_OBJECT(obj, t) _gcmVERIFY_OBJECT(gcmk, obj, t)
++#else
++# define gcmVERIFY_OBJECT(obj, t) do {} while (gcvFALSE)
++# define gcmkVERIFY_OBJECT(obj, t) do {} while (gcvFALSE)
++#endif
++
++/******************************************************************************/
++/*VERIFY_OBJECT if special return expected*/
++/******************************************************************************/
++#ifndef EGL_API_ANDROID
++# define _gcmVERIFY_OBJECT_RETURN(prefix, obj, t, retVal) \
++ do \
++ { \
++ if ((obj) == gcvNULL) \
++ { \
++ prefix##PRINT_VERSION(); \
++ prefix##TRACE(gcvLEVEL_ERROR, \
++ #prefix "VERIFY_OBJECT_RETURN failed: NULL"); \
++ prefix##TRACE(gcvLEVEL_ERROR, " expected: %c%c%c%c", \
++ gcmCC_PRINT(t)); \
++ prefix##ASSERT((obj) != gcvNULL); \
++ prefix##FOOTER_ARG("retVal=%d", retVal); \
++ return retVal; \
++ } \
++ else if (((gcsOBJECT*) (obj))->type != t) \
++ { \
++ prefix##PRINT_VERSION(); \
++ prefix##TRACE(gcvLEVEL_ERROR, \
++ #prefix "VERIFY_OBJECT_RETURN failed: %c%c%c%c", \
++ gcmCC_PRINT(((gcsOBJECT*) (obj))->type)); \
++ prefix##TRACE(gcvLEVEL_ERROR, " expected: %c%c%c%c", \
++ gcmCC_PRINT(t)); \
++ prefix##ASSERT(((gcsOBJECT*)(obj))->type == t); \
++ prefix##FOOTER_ARG("retVal=%d", retVal); \
++ return retVal; \
++ } \
++ } \
++ while (gcvFALSE)
++# define gcmVERIFY_OBJECT_RETURN(obj, t, retVal) \
++ _gcmVERIFY_OBJECT_RETURN(gcm, obj, t, retVal)
++# define gcmkVERIFY_OBJECT_RETURN(obj, t, retVal) \
++ _gcmVERIFY_OBJECT_RETURN(gcmk, obj, t, retVal)
++#else
++# define gcmVERIFY_OBJECT_RETURN(obj, t) do {} while (gcvFALSE)
++# define gcmVERIFY_OBJECT_RETURN(obj, t) do {} while (gcvFALSE)
++#endif
++
++/******************************************************************************\
++********************************** gckOS Object *********************************
++\******************************************************************************/
++
++/* Construct a new gckOS object. */
++gceSTATUS
++gckOS_Construct(
++ IN gctPOINTER Context,
++ OUT gckOS * Os
++ );
++
++/* Destroy an gckOS object. */
++gceSTATUS
++gckOS_Destroy(
++ IN gckOS Os
++ );
++
++/* Query the video memory. */
++gceSTATUS
++gckOS_QueryVideoMemory(
++ IN gckOS Os,
++ OUT gctPHYS_ADDR * InternalAddress,
++ OUT gctSIZE_T * InternalSize,
++ OUT gctPHYS_ADDR * ExternalAddress,
++ OUT gctSIZE_T * ExternalSize,
++ OUT gctPHYS_ADDR * ContiguousAddress,
++ OUT gctSIZE_T * ContiguousSize
++ );
++
++/* Allocate memory from the heap. */
++gceSTATUS
++gckOS_Allocate(
++ IN gckOS Os,
++ IN gctSIZE_T Bytes,
++ OUT gctPOINTER * Memory
++ );
++
++/* Free allocated memory. */
++gceSTATUS
++gckOS_Free(
++ IN gckOS Os,
++ IN gctPOINTER Memory
++ );
++
++/* Wrapper for allocation memory.. */
++gceSTATUS
++gckOS_AllocateMemory(
++ IN gckOS Os,
++ IN gctSIZE_T Bytes,
++ OUT gctPOINTER * Memory
++ );
++
++/* Wrapper for freeing memory. */
++gceSTATUS
++gckOS_FreeMemory(
++ IN gckOS Os,
++ IN gctPOINTER Memory
++ );
++
++/* Allocate paged memory. */
++gceSTATUS
++gckOS_AllocatePagedMemory(
++ IN gckOS Os,
++ IN gctSIZE_T Bytes,
++ OUT gctPHYS_ADDR * Physical
++ );
++
++/* Allocate paged memory. */
++gceSTATUS
++gckOS_AllocatePagedMemoryEx(
++ IN gckOS Os,
++ IN gctUINT32 Flag,
++ IN gctSIZE_T Bytes,
++ OUT gctUINT32 * Gid,
++ OUT gctPHYS_ADDR * Physical
++ );
++
++/* Lock pages. */
++gceSTATUS
++gckOS_LockPages(
++ IN gckOS Os,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Bytes,
++ IN gctBOOL Cacheable,
++ OUT gctPOINTER * Logical,
++ OUT gctSIZE_T * PageCount
++ );
++
++/* Map pages. */
++gceSTATUS
++gckOS_MapPages(
++ IN gckOS Os,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T PageCount,
++ IN gctPOINTER PageTable
++ );
++
++/* Map pages. */
++gceSTATUS
++gckOS_MapPagesEx(
++ IN gckOS Os,
++ IN gceCORE Core,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T PageCount,
++ IN gctUINT32 Address,
++ IN gctPOINTER PageTable
++ );
++
++gceSTATUS
++gckOS_UnmapPages(
++ IN gckOS Os,
++ IN gctSIZE_T PageCount,
++ IN gctUINT32 Address
++ );
++
++/* Unlock pages. */
++gceSTATUS
++gckOS_UnlockPages(
++ IN gckOS Os,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Bytes,
++ IN gctPOINTER Logical
++ );
++
++/* Free paged memory. */
++gceSTATUS
++gckOS_FreePagedMemory(
++ IN gckOS Os,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Bytes
++ );
++
++/* Allocate non-paged memory. */
++gceSTATUS
++gckOS_AllocateNonPagedMemory(
++ IN gckOS Os,
++ IN gctBOOL InUserSpace,
++ IN OUT gctSIZE_T * Bytes,
++ OUT gctPHYS_ADDR * Physical,
++ OUT gctPOINTER * Logical
++ );
++
++/* Free non-paged memory. */
++gceSTATUS
++gckOS_FreeNonPagedMemory(
++ IN gckOS Os,
++ IN gctSIZE_T Bytes,
++ IN gctPHYS_ADDR Physical,
++ IN gctPOINTER Logical
++ );
++
++/* Allocate contiguous memory. */
++gceSTATUS
++gckOS_AllocateContiguous(
++ IN gckOS Os,
++ IN gctBOOL InUserSpace,
++ IN OUT gctSIZE_T * Bytes,
++ OUT gctPHYS_ADDR * Physical,
++ OUT gctPOINTER * Logical
++ );
++
++/* Free contiguous memory. */
++gceSTATUS
++gckOS_FreeContiguous(
++ IN gckOS Os,
++ IN gctPHYS_ADDR Physical,
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Bytes
++ );
++
++/* Get the number fo bytes per page. */
++gceSTATUS
++gckOS_GetPageSize(
++ IN gckOS Os,
++ OUT gctSIZE_T * PageSize
++ );
++
++/* Get the physical address of a corresponding logical address. */
++gceSTATUS
++gckOS_GetPhysicalAddress(
++ IN gckOS Os,
++ IN gctPOINTER Logical,
++ OUT gctUINT32 * Address
++ );
++
++/* Get the physical address of a corresponding user logical address. */
++gceSTATUS
++gckOS_UserLogicalToPhysical(
++ IN gckOS Os,
++ IN gctPOINTER Logical,
++ OUT gctUINT32 * Address
++ );
++
++/* Get the physical address of a corresponding logical address. */
++gceSTATUS
++gckOS_GetPhysicalAddressProcess(
++ IN gckOS Os,
++ IN gctPOINTER Logical,
++ IN gctUINT32 ProcessID,
++ OUT gctUINT32 * Address
++ );
++
++/* Map physical memory. */
++gceSTATUS
++gckOS_MapPhysical(
++ IN gckOS Os,
++ IN gctUINT32 Physical,
++ IN gctSIZE_T Bytes,
++ OUT gctPOINTER * Logical
++ );
++
++/* Unmap previously mapped physical memory. */
++gceSTATUS
++gckOS_UnmapPhysical(
++ IN gckOS Os,
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Bytes
++ );
++
++/* Get real physical address from descriptor. */
++gceSTATUS
++gckOS_PhysicalToPhysicalAddress(
++ IN gckOS Os,
++ IN gctPOINTER Physical,
++ OUT gctUINT32 * PhysicalAddress
++ );
++
++/* Read data from a hardware register. */
++gceSTATUS
++gckOS_ReadRegister(
++ IN gckOS Os,
++ IN gctUINT32 Address,
++ OUT gctUINT32 * Data
++ );
++
++/* Read data from a hardware register. */
++gceSTATUS
++gckOS_ReadRegisterEx(
++ IN gckOS Os,
++ IN gceCORE Core,
++ IN gctUINT32 Address,
++ OUT gctUINT32 * Data
++ );
++
++/* Write data to a hardware register. */
++gceSTATUS
++gckOS_WriteRegister(
++ IN gckOS Os,
++ IN gctUINT32 Address,
++ IN gctUINT32 Data
++ );
++
++/* Write data to a hardware register. */
++gceSTATUS
++gckOS_WriteRegisterEx(
++ IN gckOS Os,
++ IN gceCORE Core,
++ IN gctUINT32 Address,
++ IN gctUINT32 Data
++ );
++
++#if gcdMULTI_GPU
++gceSTATUS
++gckOS_ReadRegisterByCoreId(
++ IN gckOS Os,
++ IN gceCORE Core,
++ IN gctUINT32 CoreId,
++ IN gctUINT32 Address,
++ OUT gctUINT32 * Data
++ );
++
++gceSTATUS
++gckOS_WriteRegisterByCoreId(
++ IN gckOS Os,
++ IN gceCORE Core,
++ IN gctUINT32 CoreId,
++ IN gctUINT32 Address,
++ IN gctUINT32 Data
++ );
++#endif
++
++/* Write data to a 32-bit memory location. */
++gceSTATUS
++gckOS_WriteMemory(
++ IN gckOS Os,
++ IN gctPOINTER Address,
++ IN gctUINT32 Data
++ );
++
++/* Map physical memory into the process space. */
++gceSTATUS
++gckOS_MapMemory(
++ IN gckOS Os,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Bytes,
++ OUT gctPOINTER * Logical
++ );
++
++/* Unmap physical memory from the specified process space. */
++gceSTATUS
++gckOS_UnmapMemoryEx(
++ IN gckOS Os,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Bytes,
++ IN gctPOINTER Logical,
++ IN gctUINT32 PID
++ );
++
++/* Unmap physical memory from the process space. */
++gceSTATUS
++gckOS_UnmapMemory(
++ IN gckOS Os,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Bytes,
++ IN gctPOINTER Logical
++ );
++
++/* Unmap user logical memory out of physical memory.
++ * This function is only supported in Linux currently.
++ */
++gceSTATUS
++gckOS_UnmapUserLogical(
++ IN gckOS Os,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Bytes,
++ IN gctPOINTER Logical
++ );
++
++/* Create a new mutex. */
++gceSTATUS
++gckOS_CreateMutex(
++ IN gckOS Os,
++ OUT gctPOINTER * Mutex
++ );
++
++/* Delete a mutex. */
++gceSTATUS
++gckOS_DeleteMutex(
++ IN gckOS Os,
++ IN gctPOINTER Mutex
++ );
++
++/* Acquire a mutex. */
++gceSTATUS
++gckOS_AcquireMutex(
++ IN gckOS Os,
++ IN gctPOINTER Mutex,
++ IN gctUINT32 Timeout
++ );
++
++/* Release a mutex. */
++gceSTATUS
++gckOS_ReleaseMutex(
++ IN gckOS Os,
++ IN gctPOINTER Mutex
++ );
++
++/* Atomically exchange a pair of 32-bit values. */
++gceSTATUS
++gckOS_AtomicExchange(
++ IN gckOS Os,
++ IN OUT gctUINT32_PTR Target,
++ IN gctUINT32 NewValue,
++ OUT gctUINT32_PTR OldValue
++ );
++
++/* Atomically exchange a pair of pointers. */
++gceSTATUS
++gckOS_AtomicExchangePtr(
++ IN gckOS Os,
++ IN OUT gctPOINTER * Target,
++ IN gctPOINTER NewValue,
++ OUT gctPOINTER * OldValue
++ );
++
++gceSTATUS
++gckOS_AtomSetMask(
++ IN gctPOINTER Atom,
++ IN gctUINT32 Mask
++ );
++
++gceSTATUS
++gckOS_AtomClearMask(
++ IN gctPOINTER Atom,
++ IN gctUINT32 Mask
++ );
++
++gceSTATUS
++gckOS_DumpCallStack(
++ IN gckOS Os
++ );
++
++gceSTATUS
++gckOS_GetProcessNameByPid(
++ IN gctINT Pid,
++ IN gctSIZE_T Length,
++ OUT gctUINT8_PTR String
++ );
++
++/*******************************************************************************
++**
++** gckOS_AtomConstruct
++**
++** Create an atom.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to a gckOS object.
++**
++** OUTPUT:
++**
++** gctPOINTER * Atom
++** Pointer to a variable receiving the constructed atom.
++*/
++gceSTATUS
++gckOS_AtomConstruct(
++ IN gckOS Os,
++ OUT gctPOINTER * Atom
++ );
++
++/*******************************************************************************
++**
++** gckOS_AtomDestroy
++**
++** Destroy an atom.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to a gckOS object.
++**
++** gctPOINTER Atom
++** Pointer to the atom to destroy.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_AtomDestroy(
++ IN gckOS Os,
++ OUT gctPOINTER Atom
++ );
++
++/*******************************************************************************
++**
++** gckOS_AtomGet
++**
++** Get the 32-bit value protected by an atom.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to a gckOS object.
++**
++** gctPOINTER Atom
++** Pointer to the atom.
++**
++** OUTPUT:
++**
++** gctINT32_PTR Value
++** Pointer to a variable the receives the value of the atom.
++*/
++gceSTATUS
++gckOS_AtomGet(
++ IN gckOS Os,
++ IN gctPOINTER Atom,
++ OUT gctINT32_PTR Value
++ );
++
++/*******************************************************************************
++**
++** gckOS_AtomSet
++**
++** Set the 32-bit value protected by an atom.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to a gckOS object.
++**
++** gctPOINTER Atom
++** Pointer to the atom.
++**
++** gctINT32 Value
++** The value of the atom.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_AtomSet(
++ IN gckOS Os,
++ IN gctPOINTER Atom,
++ IN gctINT32 Value
++ );
++
++/*******************************************************************************
++**
++** gckOS_AtomIncrement
++**
++** Atomically increment the 32-bit integer value inside an atom.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to a gckOS object.
++**
++** gctPOINTER Atom
++** Pointer to the atom.
++**
++** OUTPUT:
++**
++** gctINT32_PTR Value
++** Pointer to a variable the receives the original value of the atom.
++*/
++gceSTATUS
++gckOS_AtomIncrement(
++ IN gckOS Os,
++ IN gctPOINTER Atom,
++ OUT gctINT32_PTR Value
++ );
++
++/*******************************************************************************
++**
++** gckOS_AtomDecrement
++**
++** Atomically decrement the 32-bit integer value inside an atom.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to a gckOS object.
++**
++** gctPOINTER Atom
++** Pointer to the atom.
++**
++** OUTPUT:
++**
++** gctINT32_PTR Value
++** Pointer to a variable the receives the original value of the atom.
++*/
++gceSTATUS
++gckOS_AtomDecrement(
++ IN gckOS Os,
++ IN gctPOINTER Atom,
++ OUT gctINT32_PTR Value
++ );
++
++/* Delay a number of microseconds. */
++gceSTATUS
++gckOS_Delay(
++ IN gckOS Os,
++ IN gctUINT32 Delay
++ );
++
++/* Get time in milliseconds. */
++gceSTATUS
++gckOS_GetTicks(
++ OUT gctUINT32_PTR Time
++ );
++
++/* Compare time value. */
++gceSTATUS
++gckOS_TicksAfter(
++ IN gctUINT32 Time1,
++ IN gctUINT32 Time2,
++ OUT gctBOOL_PTR IsAfter
++ );
++
++/* Get time in microseconds. */
++gceSTATUS
++gckOS_GetTime(
++ OUT gctUINT64_PTR Time
++ );
++
++/* Memory barrier. */
++gceSTATUS
++gckOS_MemoryBarrier(
++ IN gckOS Os,
++ IN gctPOINTER Address
++ );
++
++/* Map user pointer. */
++gceSTATUS
++gckOS_MapUserPointer(
++ IN gckOS Os,
++ IN gctPOINTER Pointer,
++ IN gctSIZE_T Size,
++ OUT gctPOINTER * KernelPointer
++ );
++
++/* Unmap user pointer. */
++gceSTATUS
++gckOS_UnmapUserPointer(
++ IN gckOS Os,
++ IN gctPOINTER Pointer,
++ IN gctSIZE_T Size,
++ IN gctPOINTER KernelPointer
++ );
++
++/*******************************************************************************
++**
++** gckOS_QueryNeedCopy
++**
++** Query whether the memory can be accessed or mapped directly or it has to be
++** copied.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctUINT32 ProcessID
++** Process ID of the current process.
++**
++** OUTPUT:
++**
++** gctBOOL_PTR NeedCopy
++** Pointer to a boolean receiving gcvTRUE if the memory needs a copy or
++** gcvFALSE if the memory can be accessed or mapped dircetly.
++*/
++gceSTATUS
++gckOS_QueryNeedCopy(
++ IN gckOS Os,
++ IN gctUINT32 ProcessID,
++ OUT gctBOOL_PTR NeedCopy
++ );
++
++/*******************************************************************************
++**
++** gckOS_CopyFromUserData
++**
++** Copy data from user to kernel memory.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPOINTER KernelPointer
++** Pointer to kernel memory.
++**
++** gctPOINTER Pointer
++** Pointer to user memory.
++**
++** gctSIZE_T Size
++** Number of bytes to copy.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_CopyFromUserData(
++ IN gckOS Os,
++ IN gctPOINTER KernelPointer,
++ IN gctPOINTER Pointer,
++ IN gctSIZE_T Size
++ );
++
++/*******************************************************************************
++**
++** gckOS_CopyToUserData
++**
++** Copy data from kernel to user memory.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPOINTER KernelPointer
++** Pointer to kernel memory.
++**
++** gctPOINTER Pointer
++** Pointer to user memory.
++**
++** gctSIZE_T Size
++** Number of bytes to copy.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_CopyToUserData(
++ IN gckOS Os,
++ IN gctPOINTER KernelPointer,
++ IN gctPOINTER Pointer,
++ IN gctSIZE_T Size
++ );
++
++gceSTATUS
++gckOS_SuspendInterrupt(
++ IN gckOS Os
++ );
++
++gceSTATUS
++gckOS_SuspendInterruptEx(
++ IN gckOS Os,
++ IN gceCORE Core
++ );
++
++gceSTATUS
++gckOS_ResumeInterrupt(
++ IN gckOS Os
++ );
++
++gceSTATUS
++gckOS_ResumeInterruptEx(
++ IN gckOS Os,
++ IN gceCORE Core
++ );
++
++/* Get the base address for the physical memory. */
++gceSTATUS
++gckOS_GetBaseAddress(
++ IN gckOS Os,
++ OUT gctUINT32_PTR BaseAddress
++ );
++
++/* Perform a memory copy. */
++gceSTATUS
++gckOS_MemCopy(
++ IN gctPOINTER Destination,
++ IN gctCONST_POINTER Source,
++ IN gctSIZE_T Bytes
++ );
++
++/* Zero memory. */
++gceSTATUS
++gckOS_ZeroMemory(
++ IN gctPOINTER Memory,
++ IN gctSIZE_T Bytes
++ );
++
++/* Device I/O control to the kernel HAL layer. */
++gceSTATUS
++gckOS_DeviceControl(
++ IN gckOS Os,
++ IN gctBOOL FromUser,
++ IN gctUINT32 IoControlCode,
++ IN gctPOINTER InputBuffer,
++ IN gctSIZE_T InputBufferSize,
++ OUT gctPOINTER OutputBuffer,
++ IN gctSIZE_T OutputBufferSize
++ );
++
++/*******************************************************************************
++**
++** gckOS_GetProcessID
++**
++** Get current process ID.
++**
++** INPUT:
++**
++** Nothing.
++**
++** OUTPUT:
++**
++** gctUINT32_PTR ProcessID
++** Pointer to the variable that receives the process ID.
++*/
++gceSTATUS
++gckOS_GetProcessID(
++ OUT gctUINT32_PTR ProcessID
++ );
++
++gceSTATUS
++gckOS_GetCurrentProcessID(
++ OUT gctUINT32_PTR ProcessID
++ );
++
++/*******************************************************************************
++**
++** gckOS_GetThreadID
++**
++** Get current thread ID.
++**
++** INPUT:
++**
++** Nothing.
++**
++** OUTPUT:
++**
++** gctUINT32_PTR ThreadID
++** Pointer to the variable that receives the thread ID.
++*/
++gceSTATUS
++gckOS_GetThreadID(
++ OUT gctUINT32_PTR ThreadID
++ );
++
++#if gcdSECURITY
++gceSTATUS
++gckOS_OpenSecurityChannel(
++ IN gckOS Os,
++ IN gceCORE Core,
++ OUT gctUINT32 *Channel
++ );
++
++gceSTATUS
++gckOS_CloseSecurityChannel(
++ IN gctUINT32 Channel
++ );
++
++gceSTATUS
++gckOS_CallSecurityService(
++ IN gctUINT32 Channel,
++ IN gcsTA_INTERFACE * Interface
++ );
++
++gceSTATUS
++gckOS_InitSecurityChannel(
++ OUT gctUINT32 Channel
++ );
++
++gceSTATUS
++gckOS_AllocatePageArray(
++ IN gckOS Os,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T PageCount,
++ OUT gctPOINTER * PageArrayLogical,
++ OUT gctPHYS_ADDR * PageArrayPhysical
++ );
++#endif
++
++/******************************************************************************\
++********************************** Signal Object *********************************
++\******************************************************************************/
++
++/* Create a signal. */
++gceSTATUS
++gckOS_CreateSignal(
++ IN gckOS Os,
++ IN gctBOOL ManualReset,
++ OUT gctSIGNAL * Signal
++ );
++
++/* Destroy a signal. */
++gceSTATUS
++gckOS_DestroySignal(
++ IN gckOS Os,
++ IN gctSIGNAL Signal
++ );
++
++/* Signal a signal. */
++gceSTATUS
++gckOS_Signal(
++ IN gckOS Os,
++ IN gctSIGNAL Signal,
++ IN gctBOOL State
++ );
++
++/* Wait for a signal. */
++gceSTATUS
++gckOS_WaitSignal(
++ IN gckOS Os,
++ IN gctSIGNAL Signal,
++ IN gctUINT32 Wait
++ );
++
++/* Map a user signal to the kernel space. */
++gceSTATUS
++gckOS_MapSignal(
++ IN gckOS Os,
++ IN gctSIGNAL Signal,
++ IN gctHANDLE Process,
++ OUT gctSIGNAL * MappedSignal
++ );
++
++/* Unmap a user signal */
++gceSTATUS
++gckOS_UnmapSignal(
++ IN gckOS Os,
++ IN gctSIGNAL Signal
++ );
++
++/* Map user memory. */
++gceSTATUS
++gckOS_MapUserMemory(
++ IN gckOS Os,
++ IN gceCORE Core,
++ IN gctPOINTER Memory,
++ IN gctUINT32 Physical,
++ IN gctSIZE_T Size,
++ OUT gctPOINTER * Info,
++ OUT gctUINT32_PTR Address
++ );
++
++/* Unmap user memory. */
++gceSTATUS
++gckOS_UnmapUserMemory(
++ IN gckOS Os,
++ IN gceCORE Core,
++ IN gctPOINTER Memory,
++ IN gctSIZE_T Size,
++ IN gctPOINTER Info,
++ IN gctUINT32 Address
++ );
++
++/******************************************************************************\
++************************** Android Native Fence Sync ***************************
++\******************************************************************************/
++gceSTATUS
++gckOS_CreateSyncTimeline(
++ IN gckOS Os,
++ OUT gctHANDLE * Timeline
++ );
++
++gceSTATUS
++gckOS_DestroySyncTimeline(
++ IN gckOS Os,
++ IN gctHANDLE Timeline
++ );
++
++gceSTATUS
++gckOS_CreateSyncPoint(
++ IN gckOS Os,
++ OUT gctSYNC_POINT * SyncPoint
++ );
++
++gceSTATUS
++gckOS_ReferenceSyncPoint(
++ IN gckOS Os,
++ IN gctSYNC_POINT SyncPoint
++ );
++
++gceSTATUS
++gckOS_DestroySyncPoint(
++ IN gckOS Os,
++ IN gctSYNC_POINT SyncPoint
++ );
++
++gceSTATUS
++gckOS_SignalSyncPoint(
++ IN gckOS Os,
++ IN gctSYNC_POINT SyncPoint
++ );
++
++gceSTATUS
++gckOS_QuerySyncPoint(
++ IN gckOS Os,
++ IN gctSYNC_POINT SyncPoint,
++ OUT gctBOOL_PTR State
++ );
++
++gceSTATUS
++gckOS_CreateNativeFence(
++ IN gckOS Os,
++ IN gctHANDLE Timeline,
++ IN gctSYNC_POINT SyncPoint,
++ OUT gctINT * FenceFD
++ );
++
++#if !USE_NEW_LINUX_SIGNAL
++/* Create signal to be used in the user space. */
++gceSTATUS
++gckOS_CreateUserSignal(
++ IN gckOS Os,
++ IN gctBOOL ManualReset,
++ OUT gctINT * SignalID
++ );
++
++/* Destroy signal used in the user space. */
++gceSTATUS
++gckOS_DestroyUserSignal(
++ IN gckOS Os,
++ IN gctINT SignalID
++ );
++
++/* Wait for signal used in the user space. */
++gceSTATUS
++gckOS_WaitUserSignal(
++ IN gckOS Os,
++ IN gctINT SignalID,
++ IN gctUINT32 Wait
++ );
++
++/* Signal a signal used in the user space. */
++gceSTATUS
++gckOS_SignalUserSignal(
++ IN gckOS Os,
++ IN gctINT SignalID,
++ IN gctBOOL State
++ );
++#endif /* USE_NEW_LINUX_SIGNAL */
++
++/* Set a signal owned by a process. */
++#if defined(__QNXNTO__)
++gceSTATUS
++gckOS_UserSignal(
++ IN gckOS Os,
++ IN gctSIGNAL Signal,
++ IN gctINT Recvid,
++ IN gctINT Coid
++ );
++#else
++gceSTATUS
++gckOS_UserSignal(
++ IN gckOS Os,
++ IN gctSIGNAL Signal,
++ IN gctHANDLE Process
++ );
++#endif
++
++/******************************************************************************\
++** Cache Support
++*/
++
++gceSTATUS
++gckOS_CacheClean(
++ gckOS Os,
++ gctUINT32 ProcessID,
++ gctPHYS_ADDR Handle,
++ gctUINT32 Physical,
++ gctPOINTER Logical,
++ gctSIZE_T Bytes
++ );
++
++gceSTATUS
++gckOS_CacheFlush(
++ gckOS Os,
++ gctUINT32 ProcessID,
++ gctPHYS_ADDR Handle,
++ gctUINT32 Physical,
++ gctPOINTER Logical,
++ gctSIZE_T Bytes
++ );
++
++gceSTATUS
++gckOS_CacheInvalidate(
++ gckOS Os,
++ gctUINT32 ProcessID,
++ gctPHYS_ADDR Handle,
++ gctUINT32 Physical,
++ gctPOINTER Logical,
++ gctSIZE_T Bytes
++ );
++
++gceSTATUS
++gckOS_CPUPhysicalToGPUPhysical(
++ IN gckOS Os,
++ IN gctUINT32 CPUPhysical,
++ IN gctUINT32_PTR GPUPhysical
++ );
++
++gceSTATUS
++gckOS_GPUPhysicalToCPUPhysical(
++ IN gckOS Os,
++ IN gctUINT32 GPUPhysical,
++ IN gctUINT32_PTR CPUPhysical
++ );
++
++gceSTATUS
++gckOS_QueryOption(
++ IN gckOS Os,
++ IN gctCONST_STRING Option,
++ OUT gctUINT32 * Value
++ );
++
++/******************************************************************************\
++** Debug Support
++*/
++
++void
++gckOS_SetDebugLevel(
++ IN gctUINT32 Level
++ );
++
++void
++gckOS_SetDebugZone(
++ IN gctUINT32 Zone
++ );
++
++void
++gckOS_SetDebugLevelZone(
++ IN gctUINT32 Level,
++ IN gctUINT32 Zone
++ );
++
++void
++gckOS_SetDebugZones(
++ IN gctUINT32 Zones,
++ IN gctBOOL Enable
++ );
++
++void
++gckOS_SetDebugFile(
++ IN gctCONST_STRING FileName
++ );
++
++/*******************************************************************************
++** Broadcast interface.
++*/
++
++typedef enum _gceBROADCAST
++{
++ /* GPU might be idle. */
++ gcvBROADCAST_GPU_IDLE,
++
++ /* A commit is going to happen. */
++ gcvBROADCAST_GPU_COMMIT,
++
++ /* GPU seems to be stuck. */
++ gcvBROADCAST_GPU_STUCK,
++
++ /* First process gets attached. */
++ gcvBROADCAST_FIRST_PROCESS,
++
++ /* Last process gets detached. */
++ gcvBROADCAST_LAST_PROCESS,
++
++ /* AXI bus error. */
++ gcvBROADCAST_AXI_BUS_ERROR,
++
++ /* Out of memory. */
++ gcvBROADCAST_OUT_OF_MEMORY,
++}
++gceBROADCAST;
++
++gceSTATUS
++gckOS_Broadcast(
++ IN gckOS Os,
++ IN gckHARDWARE Hardware,
++ IN gceBROADCAST Reason
++ );
++
++gceSTATUS
++gckOS_BroadcastHurry(
++ IN gckOS Os,
++ IN gckHARDWARE Hardware,
++ IN gctUINT Urgency
++ );
++
++gceSTATUS
++gckOS_BroadcastCalibrateSpeed(
++ IN gckOS Os,
++ IN gckHARDWARE Hardware,
++ IN gctUINT Idle,
++ IN gctUINT Time
++ );
++
++/*******************************************************************************
++**
++** gckOS_SetGPUPower
++**
++** Set the power of the GPU on or off.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to a gckOS object.
++**
++** gceCORE Core
++** GPU whose power is set.
++**
++** gctBOOL Clock
++** gcvTRUE to turn on the clock, or gcvFALSE to turn off the clock.
++**
++** gctBOOL Power
++** gcvTRUE to turn on the power, or gcvFALSE to turn off the power.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_SetGPUPower(
++ IN gckOS Os,
++ IN gceCORE Core,
++ IN gctBOOL Clock,
++ IN gctBOOL Power
++ );
++
++gceSTATUS
++gckOS_ResetGPU(
++ IN gckOS Os,
++ IN gceCORE Core
++ );
++
++gceSTATUS
++gckOS_PrepareGPUFrequency(
++ IN gckOS Os,
++ IN gceCORE Core
++ );
++
++gceSTATUS
++gckOS_FinishGPUFrequency(
++ IN gckOS Os,
++ IN gceCORE Core
++ );
++
++gceSTATUS
++gckOS_QueryGPUFrequency(
++ IN gckOS Os,
++ IN gceCORE Core,
++ OUT gctUINT32 * Frequency,
++ OUT gctUINT8 * Scale
++ );
++
++gceSTATUS
++gckOS_SetGPUFrequency(
++ IN gckOS Os,
++ IN gceCORE Core,
++ IN gctUINT8 Scale
++ );
++
++/*******************************************************************************
++** Semaphores.
++*/
++
++/* Create a new semaphore. */
++gceSTATUS
++gckOS_CreateSemaphore(
++ IN gckOS Os,
++ OUT gctPOINTER * Semaphore
++ );
++
++#if gcdENABLE_VG
++gceSTATUS
++gckOS_CreateSemaphoreVG(
++ IN gckOS Os,
++ OUT gctPOINTER * Semaphore
++ );
++#endif
++
++/* Delete a semahore. */
++gceSTATUS
++gckOS_DestroySemaphore(
++ IN gckOS Os,
++ IN gctPOINTER Semaphore
++ );
++
++/* Acquire a semahore. */
++gceSTATUS
++gckOS_AcquireSemaphore(
++ IN gckOS Os,
++ IN gctPOINTER Semaphore
++ );
++
++/* Try to acquire a semahore. */
++gceSTATUS
++gckOS_TryAcquireSemaphore(
++ IN gckOS Os,
++ IN gctPOINTER Semaphore
++ );
++
++/* Release a semahore. */
++gceSTATUS
++gckOS_ReleaseSemaphore(
++ IN gckOS Os,
++ IN gctPOINTER Semaphore
++ );
++
++/*******************************************************************************
++** Timer API.
++*/
++
++typedef void (*gctTIMERFUNCTION)(gctPOINTER);
++
++/* Create a timer. */
++gceSTATUS
++gckOS_CreateTimer(
++ IN gckOS Os,
++ IN gctTIMERFUNCTION Function,
++ IN gctPOINTER Data,
++ OUT gctPOINTER * Timer
++ );
++
++/* Destory a timer. */
++gceSTATUS
++gckOS_DestroyTimer(
++ IN gckOS Os,
++ IN gctPOINTER Timer
++ );
++
++/* Start a timer. */
++gceSTATUS
++gckOS_StartTimer(
++ IN gckOS Os,
++ IN gctPOINTER Timer,
++ IN gctUINT32 Delay
++ );
++
++/* Stop a timer. */
++gceSTATUS
++gckOS_StopTimer(
++ IN gckOS Os,
++ IN gctPOINTER Timer
++ );
++
++/******************************************************************************\
++********************************* gckHEAP Object ********************************
++\******************************************************************************/
++
++typedef struct _gckHEAP * gckHEAP;
++
++/* Construct a new gckHEAP object. */
++gceSTATUS
++gckHEAP_Construct(
++ IN gckOS Os,
++ IN gctSIZE_T AllocationSize,
++ OUT gckHEAP * Heap
++ );
++
++/* Destroy an gckHEAP object. */
++gceSTATUS
++gckHEAP_Destroy(
++ IN gckHEAP Heap
++ );
++
++/* Allocate memory. */
++gceSTATUS
++gckHEAP_Allocate(
++ IN gckHEAP Heap,
++ IN gctSIZE_T Bytes,
++ OUT gctPOINTER * Node
++ );
++
++/* Free memory. */
++gceSTATUS
++gckHEAP_Free(
++ IN gckHEAP Heap,
++ IN gctPOINTER Node
++ );
++
++/* Profile the heap. */
++gceSTATUS
++gckHEAP_ProfileStart(
++ IN gckHEAP Heap
++ );
++
++gceSTATUS
++gckHEAP_ProfileEnd(
++ IN gckHEAP Heap,
++ IN gctCONST_STRING Title
++ );
++
++
++/******************************************************************************\
++******************************** gckVIDMEM Object ******************************
++\******************************************************************************/
++
++typedef struct _gckVIDMEM * gckVIDMEM;
++typedef struct _gckKERNEL * gckKERNEL;
++typedef struct _gckDB * gckDB;
++typedef struct _gckDVFS * gckDVFS;
++
++/* Construct a new gckVIDMEM object. */
++gceSTATUS
++gckVIDMEM_Construct(
++ IN gckOS Os,
++ IN gctUINT32 BaseAddress,
++ IN gctSIZE_T Bytes,
++ IN gctSIZE_T Threshold,
++ IN gctSIZE_T Banking,
++ OUT gckVIDMEM * Memory
++ );
++
++/* Destroy an gckVDIMEM object. */
++gceSTATUS
++gckVIDMEM_Destroy(
++ IN gckVIDMEM Memory
++ );
++
++/* Allocate linear memory. */
++gceSTATUS
++gckVIDMEM_AllocateLinear(
++ IN gckKERNEL Kernel,
++ IN gckVIDMEM Memory,
++ IN gctSIZE_T Bytes,
++ IN gctUINT32 Alignment,
++ IN gceSURF_TYPE Type,
++ IN gctBOOL Specified,
++ OUT gcuVIDMEM_NODE_PTR * Node
++ );
++
++/* Free memory. */
++gceSTATUS
++gckVIDMEM_Free(
++ IN gckKERNEL Kernel,
++ IN gcuVIDMEM_NODE_PTR Node
++ );
++
++/* Lock memory. */
++gceSTATUS
++gckVIDMEM_Lock(
++ IN gckKERNEL Kernel,
++ IN gckVIDMEM_NODE Node,
++ IN gctBOOL Cacheable,
++ OUT gctUINT32 * Address,
++ OUT gctUINT32 * Gid,
++ OUT gctUINT64 * PhysicalAddress
++ );
++
++/* Unlock memory. */
++gceSTATUS
++gckVIDMEM_Unlock(
++ IN gckKERNEL Kernel,
++ IN gckVIDMEM_NODE Node,
++ IN gceSURF_TYPE Type,
++ IN OUT gctBOOL * Asynchroneous
++ );
++
++/* Construct a gcuVIDMEM_NODE union for virtual memory. */
++gceSTATUS
++gckVIDMEM_ConstructVirtual(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 Flag,
++ IN gctSIZE_T Bytes,
++ OUT gcuVIDMEM_NODE_PTR * Node
++ );
++
++/* Destroy a gcuVIDMEM_NODE union for virtual memory. */
++gceSTATUS
++gckVIDMEM_DestroyVirtual(
++ IN gcuVIDMEM_NODE_PTR Node
++ );
++
++/******************************************************************************\
++******************************** gckKERNEL Object ******************************
++\******************************************************************************/
++
++struct _gcsHAL_INTERFACE;
++
++/* Notifications. */
++typedef enum _gceNOTIFY
++{
++ gcvNOTIFY_INTERRUPT,
++ gcvNOTIFY_COMMAND_QUEUE,
++}
++gceNOTIFY;
++
++/* Flush flags. */
++typedef enum _gceKERNEL_FLUSH
++{
++ gcvFLUSH_COLOR = 0x01,
++ gcvFLUSH_DEPTH = 0x02,
++ gcvFLUSH_TEXTURE = 0x04,
++ gcvFLUSH_2D = 0x08,
++#if gcdMULTI_GPU
++ gcvFLUSH_L2 = 0x10,
++#endif
++ gcvFLUSH_TILE_STATUS = 0x20,
++ gcvFLUSH_ALL = gcvFLUSH_COLOR
++ | gcvFLUSH_DEPTH
++ | gcvFLUSH_TEXTURE
++ | gcvFLUSH_2D
++#if gcdMULTI_GPU
++ | gcvFLUSH_L2
++#endif
++ | gcvFLUSH_TILE_STATUS
++}
++gceKERNEL_FLUSH;
++
++/* Construct a new gckKERNEL object. */
++gceSTATUS
++gckKERNEL_Construct(
++ IN gckOS Os,
++ IN gceCORE Core,
++ IN gctPOINTER Context,
++ IN gckDB SharedDB,
++ OUT gckKERNEL * Kernel
++ );
++
++/* Destroy an gckKERNEL object. */
++gceSTATUS
++gckKERNEL_Destroy(
++ IN gckKERNEL Kernel
++ );
++
++/* Dispatch a user-level command. */
++gceSTATUS
++gckKERNEL_Dispatch(
++ IN gckKERNEL Kernel,
++ IN gctBOOL FromUser,
++ IN OUT struct _gcsHAL_INTERFACE * Interface
++ );
++
++/* Query Database requirements. */
++gceSTATUS
++ gckKERNEL_QueryDatabase(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 ProcessID,
++ IN OUT gcsHAL_INTERFACE * Interface
++ );
++
++/* Query the video memory. */
++gceSTATUS
++gckKERNEL_QueryVideoMemory(
++ IN gckKERNEL Kernel,
++ OUT struct _gcsHAL_INTERFACE * Interface
++ );
++
++/* Lookup the gckVIDMEM object for a pool. */
++gceSTATUS
++gckKERNEL_GetVideoMemoryPool(
++ IN gckKERNEL Kernel,
++ IN gcePOOL Pool,
++ OUT gckVIDMEM * VideoMemory
++ );
++
++gceSTATUS
++gckKERNEL_AllocateLinearMemory(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 ProcessID,
++ IN OUT gcePOOL * Pool,
++ IN gctSIZE_T Bytes,
++ IN gctUINT32 Alignment,
++ IN gceSURF_TYPE Type,
++ IN gctUINT32 Flag,
++ OUT gctUINT32 * Node
++ );
++
++gceSTATUS
++gckKERNEL_ReleaseVideoMemory(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 ProcessID,
++ IN gctUINT32 Handle
++ );
++
++gceSTATUS
++gckKERNEL_LockVideoMemory(
++ IN gckKERNEL Kernel,
++ IN gceCORE Core,
++ IN gctUINT32 ProcessID,
++ IN gctBOOL FromUser,
++ IN OUT gcsHAL_INTERFACE * Interface
++ );
++
++gceSTATUS
++gckKERNEL_UnlockVideoMemory(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 ProcessID,
++ IN OUT gcsHAL_INTERFACE * Interface
++ );
++
++/* Map video memory. */
++gceSTATUS
++gckKERNEL_MapVideoMemory(
++ IN gckKERNEL Kernel,
++ IN gctBOOL InUserSpace,
++ IN gctUINT32 Address,
++#ifdef __QNXNTO__
++ IN gctUINT32 Pid,
++ IN gctUINT32 Bytes,
++#endif
++ OUT gctPOINTER * Logical
++ );
++
++/* Map video memory. */
++gceSTATUS
++gckKERNEL_MapVideoMemoryEx(
++ IN gckKERNEL Kernel,
++ IN gceCORE Core,
++ IN gctBOOL InUserSpace,
++ IN gctUINT32 Address,
++#ifdef __QNXNTO__
++ IN gctUINT32 Pid,
++ IN gctUINT32 Bytes,
++#endif
++ OUT gctPOINTER * Logical
++ );
++
++#ifdef __QNXNTO__
++/* Unmap video memory. */
++gceSTATUS
++gckKERNEL_UnmapVideoMemory(
++ IN gckKERNEL Kernel,
++ IN gctPOINTER Logical,
++ IN gctUINT32 Pid,
++ IN gctUINT32 Bytes
++ );
++#endif
++
++/* Map memory. */
++gceSTATUS
++gckKERNEL_MapMemory(
++ IN gckKERNEL Kernel,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Bytes,
++ OUT gctPOINTER * Logical
++ );
++
++/* Unmap memory. */
++gceSTATUS
++gckKERNEL_UnmapMemory(
++ IN gckKERNEL Kernel,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Bytes,
++ IN gctPOINTER Logical
++ );
++
++/* Notification of events. */
++gceSTATUS
++gckKERNEL_Notify(
++ IN gckKERNEL Kernel,
++#if gcdMULTI_GPU
++ IN gctUINT CoreId,
++#endif
++ IN gceNOTIFY Notifcation,
++ IN gctBOOL Data
++ );
++
++gceSTATUS
++gckKERNEL_QuerySettings(
++ IN gckKERNEL Kernel,
++ OUT gcsKERNEL_SETTINGS * Settings
++ );
++
++/*******************************************************************************
++**
++** gckKERNEL_Recovery
++**
++** Try to recover the GPU from a fatal error.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckKERNEL_Recovery(
++ IN gckKERNEL Kernel
++ );
++
++/* Set the value of timeout on HW operation. */
++void
++gckKERNEL_SetTimeOut(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 timeOut
++ );
++
++/* Get access to the user data. */
++gceSTATUS
++gckKERNEL_OpenUserData(
++ IN gckKERNEL Kernel,
++ IN gctBOOL NeedCopy,
++ IN gctPOINTER StaticStorage,
++ IN gctPOINTER UserPointer,
++ IN gctSIZE_T Size,
++ OUT gctPOINTER * KernelPointer
++ );
++
++/* Release resources associated with the user data connection. */
++gceSTATUS
++gckKERNEL_CloseUserData(
++ IN gckKERNEL Kernel,
++ IN gctBOOL NeedCopy,
++ IN gctBOOL FlushData,
++ IN gctPOINTER UserPointer,
++ IN gctSIZE_T Size,
++ OUT gctPOINTER * KernelPointer
++ );
++
++gceSTATUS
++gckDVFS_Construct(
++ IN gckHARDWARE Hardware,
++ OUT gckDVFS * Frequency
++ );
++
++gceSTATUS
++gckDVFS_Destroy(
++ IN gckDVFS Dvfs
++ );
++
++gceSTATUS
++gckDVFS_Start(
++ IN gckDVFS Dvfs
++ );
++
++gceSTATUS
++gckDVFS_Stop(
++ IN gckDVFS Dvfs
++ );
++
++/******************************************************************************\
++******************************* gckHARDWARE Object *****************************
++\******************************************************************************/
++
++/* Construct a new gckHARDWARE object. */
++gceSTATUS
++gckHARDWARE_Construct(
++ IN gckOS Os,
++ IN gceCORE Core,
++ OUT gckHARDWARE * Hardware
++ );
++
++/* Destroy an gckHARDWARE object. */
++gceSTATUS
++gckHARDWARE_Destroy(
++ IN gckHARDWARE Hardware
++ );
++
++/* Get hardware type. */
++gceSTATUS
++gckHARDWARE_GetType(
++ IN gckHARDWARE Hardware,
++ OUT gceHARDWARE_TYPE * Type
++ );
++
++/* Query system memory requirements. */
++gceSTATUS
++gckHARDWARE_QuerySystemMemory(
++ IN gckHARDWARE Hardware,
++ OUT gctSIZE_T * SystemSize,
++ OUT gctUINT32 * SystemBaseAddress
++ );
++
++/* Build virtual address. */
++gceSTATUS
++gckHARDWARE_BuildVirtualAddress(
++ IN gckHARDWARE Hardware,
++ IN gctUINT32 Index,
++ IN gctUINT32 Offset,
++ OUT gctUINT32 * Address
++ );
++
++/* Query command buffer requirements. */
++gceSTATUS
++gckHARDWARE_QueryCommandBuffer(
++ IN gckHARDWARE Hardware,
++ OUT gctUINT32 * Alignment,
++ OUT gctUINT32 * ReservedHead,
++ OUT gctUINT32 * ReservedTail
++ );
++
++/* Add a WAIT/LINK pair in the command queue. */
++gceSTATUS
++gckHARDWARE_WaitLink(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical,
++ IN gctUINT32 Offset,
++ IN OUT gctUINT32 * Bytes,
++ OUT gctUINT32 * WaitOffset,
++ OUT gctUINT32 * WaitBytes
++ );
++
++/* Kickstart the command processor. */
++gceSTATUS
++gckHARDWARE_Execute(
++ IN gckHARDWARE Hardware,
++ IN gctUINT32 Address,
++ IN gctSIZE_T Bytes
++ );
++
++/* Add an END command in the command queue. */
++gceSTATUS
++gckHARDWARE_End(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical,
++ IN OUT gctUINT32 * Bytes
++ );
++
++#if gcdMULTI_GPU
++gceSTATUS
++gckHARDWARE_ChipEnable(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical,
++ IN gceCORE_3D_MASK ChipEnable,
++ IN OUT gctSIZE_T * Bytes
++ );
++#endif
++
++/* Add a NOP command in the command queue. */
++gceSTATUS
++gckHARDWARE_Nop(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical,
++ IN OUT gctSIZE_T * Bytes
++ );
++
++/* Add a PIPESELECT command in the command queue. */
++gceSTATUS
++gckHARDWARE_PipeSelect(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical,
++ IN gcePIPE_SELECT Pipe,
++ IN OUT gctUINT32 * Bytes
++ );
++
++/* Add a LINK command in the command queue. */
++gceSTATUS
++gckHARDWARE_Link(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical,
++ IN gctUINT32 FetchAddress,
++ IN gctUINT32 FetchSize,
++ IN OUT gctUINT32 * Bytes
++ );
++
++/* Add an EVENT command in the command queue. */
++gceSTATUS
++gckHARDWARE_Event(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical,
++ IN gctUINT8 Event,
++ IN gceKERNEL_WHERE FromWhere,
++ IN OUT gctUINT32 * Bytes
++ );
++
++/* Query the available memory. */
++gceSTATUS
++gckHARDWARE_QueryMemory(
++ IN gckHARDWARE Hardware,
++ OUT gctSIZE_T * InternalSize,
++ OUT gctUINT32 * InternalBaseAddress,
++ OUT gctUINT32 * InternalAlignment,
++ OUT gctSIZE_T * ExternalSize,
++ OUT gctUINT32 * ExternalBaseAddress,
++ OUT gctUINT32 * ExternalAlignment,
++ OUT gctUINT32 * HorizontalTileSize,
++ OUT gctUINT32 * VerticalTileSize
++ );
++
++/* Query the identity of the hardware. */
++gceSTATUS
++gckHARDWARE_QueryChipIdentity(
++ IN gckHARDWARE Hardware,
++ OUT gcsHAL_QUERY_CHIP_IDENTITY_PTR Identity
++ );
++
++/* Query the shader uniforms support. */
++gceSTATUS
++gckHARDWARE_QueryShaderCaps(
++ IN gckHARDWARE Hardware,
++ OUT gctUINT * VertexUniforms,
++ OUT gctUINT * FragmentUniforms,
++ OUT gctBOOL * UnifiedUnforms
++ );
++
++/* Split a harwdare specific address into API stuff. */
++gceSTATUS
++gckHARDWARE_SplitMemory(
++ IN gckHARDWARE Hardware,
++ IN gctUINT32 Address,
++ OUT gcePOOL * Pool,
++ OUT gctUINT32 * Offset
++ );
++
++/* Update command queue tail pointer. */
++gceSTATUS
++gckHARDWARE_UpdateQueueTail(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical,
++ IN gctUINT32 Offset
++ );
++
++/* Convert logical address to hardware specific address. */
++gceSTATUS
++gckHARDWARE_ConvertLogical(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical,
++ IN gctBOOL InUserSpace,
++ OUT gctUINT32 * Address
++ );
++
++/* Interrupt manager. */
++gceSTATUS
++gckHARDWARE_Interrupt(
++ IN gckHARDWARE Hardware,
++#if gcdMULTI_GPU
++ IN gctUINT CoreId,
++#endif
++ IN gctBOOL InterruptValid
++ );
++
++/* Program MMU. */
++gceSTATUS
++gckHARDWARE_SetMMU(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical
++ );
++
++/* Flush the MMU. */
++gceSTATUS
++gckHARDWARE_FlushMMU(
++ IN gckHARDWARE Hardware
++ );
++
++/* Set the page table base address. */
++gceSTATUS
++gckHARDWARE_SetMMUv2(
++ IN gckHARDWARE Hardware,
++ IN gctBOOL Enable,
++ IN gctPOINTER MtlbAddress,
++ IN gceMMU_MODE Mode,
++ IN gctPOINTER SafeAddress,
++ IN gctBOOL FromPower
++ );
++
++#if gcdPROCESS_ADDRESS_SPACE
++/* Configure mmu configuration. */
++gceSTATUS
++gckHARDWARE_ConfigMMU(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical,
++ IN gctPOINTER MtlbLogical,
++ IN gctUINT32 Offset,
++ IN OUT gctSIZE_T * Bytes,
++ OUT gctSIZE_T * WaitLinkOffset,
++ OUT gctSIZE_T * WaitLinkBytes
++ );
++#endif
++
++/* Get idle register. */
++gceSTATUS
++gckHARDWARE_GetIdle(
++ IN gckHARDWARE Hardware,
++ IN gctBOOL Wait,
++ OUT gctUINT32 * Data
++ );
++
++/* Flush the caches. */
++gceSTATUS
++gckHARDWARE_Flush(
++ IN gckHARDWARE Hardware,
++ IN gceKERNEL_FLUSH Flush,
++ IN gctPOINTER Logical,
++ IN OUT gctUINT32 * Bytes
++ );
++
++/* Enable/disable fast clear. */
++gceSTATUS
++gckHARDWARE_SetFastClear(
++ IN gckHARDWARE Hardware,
++ IN gctINT Enable,
++ IN gctINT Compression
++ );
++
++gceSTATUS
++gckHARDWARE_ReadInterrupt(
++ IN gckHARDWARE Hardware,
++ OUT gctUINT32_PTR IDs
++ );
++
++/* Power management. */
++gceSTATUS
++gckHARDWARE_SetPowerManagementState(
++ IN gckHARDWARE Hardware,
++ IN gceCHIPPOWERSTATE State
++ );
++
++gceSTATUS
++gckHARDWARE_QueryPowerManagementState(
++ IN gckHARDWARE Hardware,
++ OUT gceCHIPPOWERSTATE* State
++ );
++
++gceSTATUS
++gckHARDWARE_SetPowerManagement(
++ IN gckHARDWARE Hardware,
++ IN gctBOOL PowerManagement
++ );
++
++gceSTATUS
++gckHARDWARE_SetPowerManagementLock(
++ IN gckHARDWARE Hardware,
++ IN gctBOOL Lock
++ );
++
++gceSTATUS
++gckHARDWARE_SetGpuProfiler(
++ IN gckHARDWARE Hardware,
++ IN gctBOOL GpuProfiler
++ );
++
++#if gcdENABLE_FSCALE_VAL_ADJUST
++gceSTATUS
++gckHARDWARE_SetFscaleValue(
++ IN gckHARDWARE Hardware,
++ IN gctUINT32 FscaleValue
++ );
++
++gceSTATUS
++gckHARDWARE_GetFscaleValue(
++ IN gckHARDWARE Hardware,
++ IN gctUINT * FscaleValue,
++ IN gctUINT * MinFscaleValue,
++ IN gctUINT * MaxFscaleValue
++ );
++
++gceSTATUS
++gckHARDWARE_SetMinFscaleValue(
++ IN gckHARDWARE Hardware,
++ IN gctUINT MinFscaleValue
++ );
++#endif
++
++#if gcdPOWEROFF_TIMEOUT
++gceSTATUS
++gckHARDWARE_SetPowerOffTimeout(
++ IN gckHARDWARE Hardware,
++ IN gctUINT32 Timeout
++);
++
++gceSTATUS
++gckHARDWARE_QueryPowerOffTimeout(
++ IN gckHARDWARE Hardware,
++ OUT gctUINT32* Timeout
++);
++#endif
++
++/* Profile 2D Engine. */
++gceSTATUS
++gckHARDWARE_ProfileEngine2D(
++ IN gckHARDWARE Hardware,
++ OUT gcs2D_PROFILE_PTR Profile
++ );
++
++gceSTATUS
++gckHARDWARE_InitializeHardware(
++ IN gckHARDWARE Hardware
++ );
++
++gceSTATUS
++gckHARDWARE_Reset(
++ IN gckHARDWARE Hardware
++ );
++
++typedef gceSTATUS (*gctISRMANAGERFUNC)(gctPOINTER Context);
++
++gceSTATUS
++gckHARDWARE_SetIsrManager(
++ IN gckHARDWARE Hardware,
++ IN gctISRMANAGERFUNC StartIsr,
++ IN gctISRMANAGERFUNC StopIsr,
++ IN gctPOINTER Context
++ );
++
++/* Start a composition. */
++gceSTATUS
++gckHARDWARE_Compose(
++ IN gckHARDWARE Hardware,
++ IN gctUINT32 ProcessID,
++ IN gctPHYS_ADDR Physical,
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Offset,
++ IN gctSIZE_T Size,
++ IN gctUINT8 EventID
++ );
++
++/* Check for Hardware features. */
++gceSTATUS
++gckHARDWARE_IsFeatureAvailable(
++ IN gckHARDWARE Hardware,
++ IN gceFEATURE Feature
++ );
++
++gceSTATUS
++gckHARDWARE_DumpMMUException(
++ IN gckHARDWARE Hardware
++ );
++
++gceSTATUS
++gckHARDWARE_DumpGPUState(
++ IN gckHARDWARE Hardware
++ );
++
++gceSTATUS
++gckHARDWARE_InitDVFS(
++ IN gckHARDWARE Hardware
++ );
++
++gceSTATUS
++gckHARDWARE_QueryLoad(
++ IN gckHARDWARE Hardware,
++ OUT gctUINT32 * Load
++ );
++
++gceSTATUS
++gckHARDWARE_SetDVFSPeroid(
++ IN gckHARDWARE Hardware,
++ IN gctUINT32 Frequency
++ );
++
++gceSTATUS
++gckHARDWARE_PrepareFunctions(
++ gckHARDWARE Hardware
++ );
++
++gceSTATUS
++gckHARDWARE_SetMMUStates(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER MtlbAddress,
++ IN gceMMU_MODE Mode,
++ IN gctPOINTER SafeAddress,
++ IN gctPOINTER Logical,
++ IN OUT gctUINT32 * Bytes
++ );
++
++#if !gcdENABLE_VG
++/******************************************************************************\
++***************************** gckINTERRUPT Object ******************************
++\******************************************************************************/
++
++typedef struct _gckINTERRUPT * gckINTERRUPT;
++
++typedef gceSTATUS (* gctINTERRUPT_HANDLER)(
++ IN gckKERNEL Kernel
++ );
++
++gceSTATUS
++gckINTERRUPT_Construct(
++ IN gckKERNEL Kernel,
++ OUT gckINTERRUPT * Interrupt
++ );
++
++gceSTATUS
++gckINTERRUPT_Destroy(
++ IN gckINTERRUPT Interrupt
++ );
++
++gceSTATUS
++gckINTERRUPT_SetHandler(
++ IN gckINTERRUPT Interrupt,
++ IN OUT gctINT32_PTR Id,
++ IN gctINTERRUPT_HANDLER Handler
++ );
++
++gceSTATUS
++gckINTERRUPT_Notify(
++ IN gckINTERRUPT Interrupt,
++ IN gctBOOL Valid
++ );
++#endif
++/******************************************************************************\
++******************************** gckEVENT Object *******************************
++\******************************************************************************/
++
++typedef struct _gckEVENT * gckEVENT;
++
++/* Construct a new gckEVENT object. */
++gceSTATUS
++gckEVENT_Construct(
++ IN gckKERNEL Kernel,
++ OUT gckEVENT * Event
++ );
++
++/* Destroy an gckEVENT object. */
++gceSTATUS
++gckEVENT_Destroy(
++ IN gckEVENT Event
++ );
++
++/* Reserve the next available hardware event. */
++#if gcdMULTI_GPU
++gceSTATUS
++gckEVENT_GetEvent(
++ IN gckEVENT Event,
++ IN gctBOOL Wait,
++ OUT gctUINT8 * EventID,
++ IN gceKERNEL_WHERE Source,
++ IN gceCORE_3D_MASK ChipEnable
++ );
++#else
++gceSTATUS
++gckEVENT_GetEvent(
++ IN gckEVENT Event,
++ IN gctBOOL Wait,
++ OUT gctUINT8 * EventID,
++ IN gceKERNEL_WHERE Source
++ );
++#endif
++
++/* Add a new event to the list of events. */
++gceSTATUS
++gckEVENT_AddList(
++ IN gckEVENT Event,
++ IN gcsHAL_INTERFACE_PTR Interface,
++ IN gceKERNEL_WHERE FromWhere,
++ IN gctBOOL AllocateAllowed,
++ IN gctBOOL FromKernel
++ );
++
++/* Schedule a FreeNonPagedMemory event. */
++gceSTATUS
++gckEVENT_FreeNonPagedMemory(
++ IN gckEVENT Event,
++ IN gctSIZE_T Bytes,
++ IN gctPHYS_ADDR Physical,
++ IN gctPOINTER Logical,
++ IN gceKERNEL_WHERE FromWhere
++ );
++
++/* Schedule a FreeContiguousMemory event. */
++gceSTATUS
++gckEVENT_FreeContiguousMemory(
++ IN gckEVENT Event,
++ IN gctSIZE_T Bytes,
++ IN gctPHYS_ADDR Physical,
++ IN gctPOINTER Logical,
++ IN gceKERNEL_WHERE FromWhere
++ );
++
++/* Schedule a FreeVideoMemory event. */
++gceSTATUS
++gckEVENT_FreeVideoMemory(
++ IN gckEVENT Event,
++ IN gcuVIDMEM_NODE_PTR VideoMemory,
++ IN gceKERNEL_WHERE FromWhere
++ );
++
++/* Schedule a signal event. */
++gceSTATUS
++gckEVENT_Signal(
++ IN gckEVENT Event,
++ IN gctSIGNAL Signal,
++ IN gceKERNEL_WHERE FromWhere
++ );
++
++/* Schedule an Unlock event. */
++gceSTATUS
++gckEVENT_Unlock(
++ IN gckEVENT Event,
++ IN gceKERNEL_WHERE FromWhere,
++ IN gctPOINTER Node,
++ IN gceSURF_TYPE Type
++ );
++
++gceSTATUS
++gckEVENT_CommitDone(
++ IN gckEVENT Event,
++ IN gceKERNEL_WHERE FromWhere
++ );
++
++/* Schedule a FreeVirtualCommandBuffer event. */
++gceSTATUS
++gckEVENT_DestroyVirtualCommandBuffer(
++ IN gckEVENT Event,
++ IN gctSIZE_T Bytes,
++ IN gctPHYS_ADDR Physical,
++ IN gctPOINTER Logical,
++ IN gceKERNEL_WHERE FromWhere
++ );
++
++#if gcdMULTI_GPU
++gceSTATUS
++gckEVENT_Submit(
++ IN gckEVENT Event,
++ IN gctBOOL Wait,
++ IN gctBOOL FromPower,
++ IN gceCORE_3D_MASK ChipEnable
++ );
++#else
++gceSTATUS
++gckEVENT_Submit(
++ IN gckEVENT Event,
++ IN gctBOOL Wait,
++ IN gctBOOL FromPower
++ );
++#endif
++
++#if gcdMULTI_GPU
++gceSTATUS
++gckEVENT_Commit(
++ IN gckEVENT Event,
++ IN gcsQUEUE_PTR Queue,
++ IN gceCORE_3D_MASK ChipEnable
++ );
++#else
++gceSTATUS
++gckEVENT_Commit(
++ IN gckEVENT Event,
++ IN gcsQUEUE_PTR Queue
++ );
++#endif
++
++/* Schedule a composition event. */
++gceSTATUS
++gckEVENT_Compose(
++ IN gckEVENT Event,
++ IN gcsHAL_COMPOSE_PTR Info
++ );
++
++/* Event callback routine. */
++gceSTATUS
++gckEVENT_Notify(
++ IN gckEVENT Event,
++ IN gctUINT32 IDs
++ );
++
++/* Event callback routine. */
++gceSTATUS
++gckEVENT_Interrupt(
++ IN gckEVENT Event,
++#if gcdMULTI_GPU
++ IN gctUINT CoreId,
++#endif
++ IN gctUINT32 IDs
++ );
++
++gceSTATUS
++gckEVENT_Dump(
++ IN gckEVENT Event
++ );
++/******************************************************************************\
++******************************* gckCOMMAND Object ******************************
++\******************************************************************************/
++
++typedef struct _gckCOMMAND * gckCOMMAND;
++
++/* Construct a new gckCOMMAND object. */
++gceSTATUS
++gckCOMMAND_Construct(
++ IN gckKERNEL Kernel,
++ OUT gckCOMMAND * Command
++ );
++
++/* Destroy an gckCOMMAND object. */
++gceSTATUS
++gckCOMMAND_Destroy(
++ IN gckCOMMAND Command
++ );
++
++/* Acquire command queue synchronization objects. */
++gceSTATUS
++gckCOMMAND_EnterCommit(
++ IN gckCOMMAND Command,
++ IN gctBOOL FromPower
++ );
++
++/* Release command queue synchronization objects. */
++gceSTATUS
++gckCOMMAND_ExitCommit(
++ IN gckCOMMAND Command,
++ IN gctBOOL FromPower
++ );
++
++/* Start the command queue. */
++gceSTATUS
++gckCOMMAND_Start(
++ IN gckCOMMAND Command
++ );
++
++/* Stop the command queue. */
++gceSTATUS
++gckCOMMAND_Stop(
++ IN gckCOMMAND Command,
++ IN gctBOOL FromRecovery
++ );
++
++#if gcdMULTI_GPU
++/* Commit a buffer to the command queue. */
++gceSTATUS
++gckCOMMAND_Commit(
++ IN gckCOMMAND Command,
++ IN gckCONTEXT Context,
++ IN gcoCMDBUF CommandBuffer,
++ IN gcsSTATE_DELTA_PTR StateDelta,
++ IN gcsQUEUE_PTR EventQueue,
++ IN gctUINT32 ProcessID,
++ IN gceCORE_3D_MASK ChipEnable
++ );
++#else
++gceSTATUS
++gckCOMMAND_Commit(
++ IN gckCOMMAND Command,
++ IN gckCONTEXT Context,
++ IN gcoCMDBUF CommandBuffer,
++ IN gcsSTATE_DELTA_PTR StateDelta,
++ IN gcsQUEUE_PTR EventQueue,
++ IN gctUINT32 ProcessID
++ );
++#endif
++
++/* Reserve space in the command buffer. */
++gceSTATUS
++gckCOMMAND_Reserve(
++ IN gckCOMMAND Command,
++ IN gctUINT32 RequestedBytes,
++ OUT gctPOINTER * Buffer,
++ OUT gctUINT32 * BufferSize
++ );
++
++/* Execute reserved space in the command buffer. */
++gceSTATUS
++gckCOMMAND_Execute(
++ IN gckCOMMAND Command,
++ IN gctUINT32 RequstedBytes
++ );
++
++/* Stall the command queue. */
++#if gcdMULTI_GPU
++gceSTATUS
++gckCOMMAND_Stall(
++ IN gckCOMMAND Command,
++ IN gctBOOL FromPower,
++ IN gceCORE_3D_MASK ChipEnable
++ );
++#else
++gceSTATUS
++gckCOMMAND_Stall(
++ IN gckCOMMAND Command,
++ IN gctBOOL FromPower
++ );
++#endif
++
++/* Attach user process. */
++gceSTATUS
++gckCOMMAND_Attach(
++ IN gckCOMMAND Command,
++ OUT gckCONTEXT * Context,
++ OUT gctSIZE_T * StateCount,
++ IN gctUINT32 ProcessID
++ );
++
++/* Detach user process. */
++gceSTATUS
++gckCOMMAND_Detach(
++ IN gckCOMMAND Command,
++ IN gckCONTEXT Context
++ );
++
++/* Dump command buffer being executed by GPU. */
++gceSTATUS
++gckCOMMAND_DumpExecutingBuffer(
++ IN gckCOMMAND Command
++ );
++
++/* Whether a kernel command buffer address. */
++gceSTATUS
++gckCOMMAND_AddressInKernelCommandBuffer(
++ IN gckCOMMAND Command,
++ IN gctUINT32 Address,
++ OUT gctBOOL *In
++ );
++
++/******************************************************************************\
++********************************* gckMMU Object ********************************
++\******************************************************************************/
++
++typedef struct _gckMMU * gckMMU;
++
++/* Construct a new gckMMU object. */
++gceSTATUS
++gckMMU_Construct(
++ IN gckKERNEL Kernel,
++ IN gctSIZE_T MmuSize,
++ OUT gckMMU * Mmu
++ );
++
++/* Destroy an gckMMU object. */
++gceSTATUS
++gckMMU_Destroy(
++ IN gckMMU Mmu
++ );
++
++/* Allocate pages inside the MMU. */
++gceSTATUS
++gckMMU_AllocatePages(
++ IN gckMMU Mmu,
++ IN gctSIZE_T PageCount,
++ OUT gctPOINTER * PageTable,
++ OUT gctUINT32 * Address
++ );
++
++gceSTATUS
++gckMMU_AllocatePagesEx(
++ IN gckMMU Mmu,
++ IN gctSIZE_T PageCount,
++ IN gceSURF_TYPE Type,
++ OUT gctPOINTER * PageTable,
++ OUT gctUINT32 * Address
++ );
++
++/* Remove a page table from the MMU. */
++gceSTATUS
++gckMMU_FreePages(
++ IN gckMMU Mmu,
++ IN gctPOINTER PageTable,
++ IN gctSIZE_T PageCount
++ );
++
++/* Set the MMU page with info. */
++gceSTATUS
++gckMMU_SetPage(
++ IN gckMMU Mmu,
++ IN gctUINT32 PageAddress,
++ IN gctUINT32 *PageEntry
++ );
++
++gceSTATUS
++gckMMU_Flush(
++ IN gckMMU Mmu,
++ IN gceSURF_TYPE Type
++ );
++
++gceSTATUS
++gckMMU_DumpPageTableEntry(
++ IN gckMMU Mmu,
++ IN gctUINT32 Address
++ );
++
++
++#if VIVANTE_PROFILER
++gceSTATUS
++gckHARDWARE_QueryProfileRegisters(
++ IN gckHARDWARE Hardware,
++ IN gctBOOL Reset,
++ OUT gcsPROFILER_COUNTERS * Counters
++ );
++#endif
++
++#if VIVANTE_PROFILER_CONTEXT
++gceSTATUS
++gckHARDWARE_QueryContextProfile(
++ IN gckHARDWARE Hardware,
++ IN gctBOOL Reset,
++ IN gckCONTEXT Context,
++ OUT gcsPROFILER_COUNTERS * Counters
++ );
++
++gceSTATUS
++gckHARDWARE_UpdateContextProfile(
++ IN gckHARDWARE Hardware,
++ IN gckCONTEXT Context
++ );
++#endif
++
++#if VIVANTE_PROFILER_NEW
++gceSTATUS
++gckHARDWARE_InitProfiler(
++ IN gckHARDWARE Hardware
++ );
++#endif
++
++gceSTATUS
++gckOS_SignalQueryHardware(
++ IN gckOS Os,
++ IN gctSIGNAL Signal,
++ OUT gckHARDWARE * Hardware
++ );
++
++gceSTATUS
++gckOS_SignalSetHardware(
++ IN gckOS Os,
++ IN gctSIGNAL Signal,
++ gckHARDWARE Hardware
++ );
++
++gceSTATUS
++gckOS_DetectProcessByName(
++ IN gctCONST_POINTER Name
++ );
++
++void
++gckOS_DumpParam(
++ void
++ );
++
++#ifdef __cplusplus
++}
++#endif
++
++#if gcdENABLE_VG
++#include "gc_hal_vg.h"
++#endif
++
++#endif /* __gc_hal_h_ */
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_kernel_buffer.h linux-openelec/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_kernel_buffer.h
+--- linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_kernel_buffer.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_kernel_buffer.h 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,225 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_kernel_buffer_h_
++#define __gc_hal_kernel_buffer_h_
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/******************************************************************************\
++************************ Command Buffer and Event Objects **********************
++\******************************************************************************/
++
++/* The number of context buffers per user. */
++#define gcdCONTEXT_BUFFER_COUNT 2
++
++/* State delta record. */
++typedef struct _gcsSTATE_DELTA_RECORD * gcsSTATE_DELTA_RECORD_PTR;
++typedef struct _gcsSTATE_DELTA_RECORD
++{
++ /* State address. */
++ gctUINT address;
++
++ /* State mask. */
++ gctUINT32 mask;
++
++ /* State data. */
++ gctUINT32 data;
++}
++gcsSTATE_DELTA_RECORD;
++
++/* State delta. */
++typedef struct _gcsSTATE_DELTA
++{
++ /* For debugging: the number of delta in the order of creation. */
++#if gcmIS_DEBUG(gcdDEBUG_CODE)
++ gctUINT num;
++#endif
++
++ /* Main state delta ID. Every time state delta structure gets reinitialized,
++ main ID is incremented. If main state ID overflows, all map entry IDs get
++ reinitialized to make sure there is no potential erroneous match after
++ the overflow.*/
++ gctUINT id;
++
++ /* The number of contexts pending modification by the delta. */
++ gctINT refCount;
++
++ /* Vertex element count for the delta buffer. */
++ gctUINT elementCount;
++
++ /* Number of states currently stored in the record array. */
++ gctUINT recordCount;
++
++ /* Record array; holds all modified states in gcsSTATE_DELTA_RECORD. */
++ gctUINT64 recordArray;
++
++ /* Map entry ID is used for map entry validation. If map entry ID does not
++ match the main state delta ID, the entry and the corresponding state are
++ considered not in use. */
++ gctUINT64 mapEntryID;
++ gctUINT mapEntryIDSize;
++
++ /* If the map entry ID matches the main state delta ID, index points to
++ the state record in the record array. */
++ gctUINT64 mapEntryIndex;
++
++ /* Previous and next state deltas in gcsSTATE_DELTA. */
++ gctUINT64 prev;
++ gctUINT64 next;
++}
++gcsSTATE_DELTA;
++
++/* Command buffer patch record. */
++struct _gcsPATCH
++{
++ /* Pointer within the buffer. */
++ gctUINT32_PTR pointer;
++
++ /* 32-bit data to write at the specified offset. */
++ gctUINT32 data;
++};
++
++/* List of patches for the command buffer. */
++struct _gcsPATCH_LIST
++{
++ /* Array of patch records. */
++ struct _gcsPATCH patch[1024];
++
++ /* Number of patches in the array. */
++ gctUINT count;
++
++ /* Next item in the list. */
++ struct _gcsPATCH_LIST *next;
++};
++
++/* Command buffer object. */
++struct _gcoCMDBUF
++{
++ /* The object. */
++ gcsOBJECT object;
++
++ /* Commit count. */
++ gctUINT count;
++
++ /* Command buffer entry and exit pipes. */
++ gcePIPE_SELECT entryPipe;
++ gcePIPE_SELECT exitPipe;
++
++ /* Feature usage flags. */
++ gctBOOL using2D;
++ gctBOOL using3D;
++ gctBOOL usingFilterBlit;
++ gctBOOL usingPalette;
++
++ /* Physical address of command buffer. Just a name. */
++ gctUINT32 physical;
++
++ /* Logical address of command buffer. */
++ gctUINT64 logical;
++
++ /* Number of bytes in command buffer. */
++ gctUINT32 bytes;
++
++ /* Start offset into the command buffer. */
++ gctUINT32 startOffset;
++
++ /* Current offset into the command buffer. */
++ gctUINT32 offset;
++
++ /* Number of free bytes in command buffer. */
++ gctUINT32 free;
++
++ /* Location of the last reserved area. */
++ gctUINT64 lastReserve;
++ gctUINT32 lastOffset;
++
++#if gcdSECURE_USER
++ /* Hint array for the current command buffer. */
++ gctUINT hintArraySize;
++ gctUINT64 hintArray;
++ gctUINT64 hintArrayTail;
++#endif
++
++#if gcmIS_DEBUG(gcdDEBUG_CODE)
++ /* Last load state command location and hardware address. */
++ gctUINT64 lastLoadStatePtr;
++ gctUINT32 lastLoadStateAddress;
++ gctUINT32 lastLoadStateCount;
++#endif
++
++ /* Completion signal. */
++ gctSIGNAL signal;
++
++ /* List of patches. */
++ struct _gcsPATCH_LIST *patchHead;
++ struct _gcsPATCH_LIST *patchTail;
++
++ /* Link to the siblings. */
++ gcoCMDBUF prev;
++ gcoCMDBUF next;
++};
++
++typedef struct _gcsQUEUE
++{
++ /* Pointer to next gcsQUEUE structure in gcsQUEUE. */
++ gctUINT64 next;
++
++ /* Event information. */
++ gcsHAL_INTERFACE iface;
++}
++gcsQUEUE;
++
++/* Event queue. */
++struct _gcoQUEUE
++{
++ /* The object. */
++ gcsOBJECT object;
++
++ /* Pointer to current event queue. */
++ gcsQUEUE_PTR head;
++ gcsQUEUE_PTR tail;
++
++ /* chunks of the records. */
++ gctPOINTER chunks;
++
++ /* List of free records. */
++ gcsQUEUE_PTR freeList;
++
++ #define gcdIN_QUEUE_RECORD_LIMIT 16
++ /* Number of records currently in queue */
++ gctUINT32 recordCount;
++};
++
++struct _gcsTEMPCMDBUF
++{
++ gctUINT32 currentByteSize;
++ gctPOINTER buffer;
++ gctBOOL inUse;
++};
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif /* __gc_hal_kernel_buffer_h_ */
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_mem.h linux-openelec/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_mem.h
+--- linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_mem.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_mem.h 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,530 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++/*
++** Include file for the local memory management.
++*/
++
++#ifndef __gc_hal_mem_h_
++#define __gc_hal_mem_h_
++#if (gcdENABLE_3D || gcdENABLE_VG)
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*******************************************************************************
++** Usage:
++
++ The macros to declare MemPool type and functions are
++ gcmMEM_DeclareFSMemPool (Type, TypeName, Prefix)
++ gcmMEM_DeclareVSMemPool (Type, TypeName, Prefix)
++ gcmMEM_DeclareAFSMemPool(Type, TypeName, Prefix)
++
++ The data structures for MemPool are
++ typedef struct _gcsMEM_FS_MEM_POOL * gcsMEM_FS_MEM_POOL;
++ typedef struct _gcsMEM_VS_MEM_POOL * gcsMEM_VS_MEM_POOL;
++ typedef struct _gcsMEM_AFS_MEM_POOL * gcsMEM_AFS_MEM_POOL;
++
++ The MemPool constructor and destructor functions are
++ gcfMEM_InitFSMemPool(gcsMEM_FS_MEM_POOL *, gcoOS, gctUINT, gctUINT);
++ gcfMEM_FreeFSMemPool(gcsMEM_FS_MEM_POOL *);
++ gcfMEM_InitVSMemPool(gcsMEM_VS_MEM_POOL *, gcoOS, gctUINT, gctBOOL);
++ gcfMEM_FreeVSMemPool(gcsMEM_VS_MEM_POOL *);
++ gcfMEM_InitAFSMemPool(gcsMEM_AFS_MEM_POOL *, gcoOS, gctUINT);
++ gcfMEM_FreeAFSMemPool(gcsMEM_AFS_MEM_POOL *);
++
++ FS: for Fixed-Size data structures
++ VS: for Variable-size data structures
++ AFS: for Array of Fixed-Size data structures
++
++
++ // Example 1: For a fixed-size data structure, struct gcsNode.
++ // It is used locally in a file, so the functions are static without prefix.
++ // At top level, declear allocate and free functions.
++ // The first argument is the data type.
++ // The second armument is the short name used in the fuctions.
++ gcmMEM_DeclareFSMemPool(struct gcsNode, Node, );
++
++ // The previous macro creates two inline functions,
++ // _AllocateNode and _FreeNode.
++
++ // In function or struct
++ gcsMEM_FS_MEM_POOL nodeMemPool;
++
++ // In function,
++ struct gcsNode * node;
++ gceSTATUS status;
++
++ // Before using the memory pool, initialize it.
++ // The second argument is the gcoOS object.
++ // The third argument is the number of data structures to allocate for each chunk.
++ status = gcfMEM_InitFSMemPool(&nodeMemPool, os, 100, sizeof(struct gcsNode));
++ ...
++
++ // Allocate a node.
++ status = _AllocateNode(nodeMemPool, &node);
++ ...
++ // Free a node.
++ _FreeNode(nodeMemPool, node);
++
++ // After using the memory pool, free it.
++ gcfMEM_FreeFSMemPool(&nodeMemPool);
++
++
++ // Example 2: For array of fixed-size data structures, struct gcsNode.
++ // It is used in several files, so the functions are extern with prefix.
++ // At top level, declear allocate and free functions.
++ // The first argument is the data type, and the second one is the short name
++ // used in the fuctions.
++ gcmMEM_DeclareAFSMemPool(struct gcsNode, NodeArray, gcfOpt);
++
++ // The previous macro creates two inline functions,
++ // gcfOpt_AllocateNodeArray and gcfOpt_FreeNodeArray.
++
++ // In function or struct
++ gcsMEM_AFS_MEM_POOL nodeArrayMemPool;
++
++ // In function,
++ struct gcsNode * nodeArray;
++ gceSTATUS status;
++
++ // Before using the array memory pool, initialize it.
++ // The second argument is the gcoOS object, the third is the number of data
++ // structures to allocate for each chunk.
++ status = gcfMEM_InitAFSMemPool(&nodeArrayMemPool, os, sizeof(struct gcsNode));
++ ...
++
++ // Allocate a node array of size 100.
++ status = gcfOpt_AllocateNodeArray(nodeArrayMemPool, &nodeArray, 100);
++ ...
++ // Free a node array.
++ gcfOpt_FreeNodeArray(&nodeArrayMemPool, nodeArray);
++
++ // After using the array memory pool, free it.
++ gcfMEM_FreeAFSMemPool(&nodeArrayMemPool);
++
++*******************************************************************************/
++
++/*******************************************************************************
++** To switch back to use gcoOS_Allocate and gcoOS_Free, add
++** #define USE_LOCAL_MEMORY_POOL 0
++** before including this file.
++*******************************************************************************/
++#ifndef USE_LOCAL_MEMORY_POOL
++/*
++ USE_LOCAL_MEMORY_POOL
++
++ This define enables the local memory management to improve performance.
++*/
++#define USE_LOCAL_MEMORY_POOL 1
++#endif
++
++/*******************************************************************************
++** Memory Pool Data Structures
++*******************************************************************************/
++#if USE_LOCAL_MEMORY_POOL
++ typedef struct _gcsMEM_FS_MEM_POOL * gcsMEM_FS_MEM_POOL;
++ typedef struct _gcsMEM_VS_MEM_POOL * gcsMEM_VS_MEM_POOL;
++ typedef struct _gcsMEM_AFS_MEM_POOL * gcsMEM_AFS_MEM_POOL;
++#else
++ typedef gcoOS gcsMEM_FS_MEM_POOL;
++ typedef gcoOS gcsMEM_VS_MEM_POOL;
++ typedef gcoOS gcsMEM_AFS_MEM_POOL;
++#endif
++
++/*******************************************************************************
++** Memory Pool Macros
++*******************************************************************************/
++#if USE_LOCAL_MEMORY_POOL
++#define gcmMEM_DeclareFSMemPool(Type, TypeName, Prefix) \
++gceSTATUS \
++Prefix##_Allocate##TypeName( \
++ gcsMEM_FS_MEM_POOL MemPool, \
++ Type ** Pointer \
++ ) \
++{ \
++ return(gcfMEM_FSMemPoolGetANode(MemPool, (gctPOINTER *) Pointer)); \
++} \
++ \
++gceSTATUS \
++Prefix##_CAllocate##TypeName( \
++ gcsMEM_FS_MEM_POOL MemPool, \
++ Type ** Pointer \
++ ) \
++{ \
++ gceSTATUS status; \
++ gcmHEADER_ARG("MemPool=0x%x Pointer=0x%x", MemPool, Pointer); \
++ gcmERR_RETURN(gcfMEM_FSMemPoolGetANode(MemPool, (gctPOINTER *) Pointer)); \
++ gcoOS_ZeroMemory(*(gctPOINTER *) Pointer, gcmSIZEOF(Type)); \
++ gcmFOOTER(); \
++ return gcvSTATUS_OK; \
++} \
++ \
++gceSTATUS \
++Prefix##_Free##TypeName( \
++ gcsMEM_FS_MEM_POOL MemPool, \
++ Type * Pointer \
++ ) \
++{ \
++ gceSTATUS status; \
++ gcmHEADER_ARG("MemPool=0x%x Pointer=0x%x", MemPool, Pointer); \
++ status = gcfMEM_FSMemPoolFreeANode(MemPool, (gctPOINTER) Pointer); \
++ gcmFOOTER(); \
++ return status; \
++} \
++ \
++gceSTATUS \
++Prefix##_Free##TypeName##List( \
++ gcsMEM_FS_MEM_POOL MemPool, \
++ Type * FirstPointer, \
++ Type * LastPointer \
++ ) \
++{ \
++ gceSTATUS status; \
++ gcmHEADER_ARG("MemPool=0x%x FirstPointer=0x%x LastPointer=0x%x", MemPool, FirstPointer, LastPointer); \
++ status = gcfMEM_FSMemPoolFreeAList(MemPool, (gctPOINTER) FirstPointer, (gctPOINTER) LastPointer); \
++ gcmFOOTER(); \
++ return status; \
++}
++
++#define gcmMEM_DeclareVSMemPool(Type, TypeName, Prefix) \
++gceSTATUS \
++Prefix##_Allocate##TypeName( \
++ gcsMEM_FS_MEM_POOL MemPool, \
++ Type ** Pointer, \
++ gctUINT Size \
++ ) \
++{ \
++ gceSTATUS status;\
++ gcmHEADER_ARG("MemPool=0x%x Pointer=0x%x Size=%u", MemPool, Pointer, Size); \
++ status = gcfMEM_VSMemPoolGetANode(MemPool, Size, (gctPOINTER *) Pointer); \
++ gcmFOOTER(); \
++ return status; \
++} \
++ \
++gceSTATUS \
++ Prefix##_CAllocate##TypeName( \
++ gcsMEM_FS_MEM_POOL MemPool, \
++ Type ** Pointer, \
++ gctUINT Size \
++ ) \
++{ \
++ gceSTATUS status; \
++ gcmHEADER_ARG("MemPool=0x%x Pointer=0x%x Size=%u", MemPool, Pointer, Size); \
++ gcmERR_RETURN(gcfMEM_VSMemPoolGetANode(MemPool, Size, (gctPOINTER *) Pointer)); \
++ gcoOS_ZeroMemory(*(gctPOINTER *) Pointer, size); \
++ gcmFOOTER(); \
++ return gcvSTATUS_OK; \
++} \
++ \
++gceSTATUS \
++Prefix##_Free##TypeName( \
++ gcsMEM_FS_MEM_POOL MemPool, \
++ Type * Pointer \
++ ) \
++{ \
++ gceSTATUS status; \
++ gcmHEADER_ARG("MemPool=0x%x Pointer=0x%x", MemPool, Pinter); \
++ status = gcfMEM_VSMemPoolFreeANode(MemPool, (gctPOINTER) Pointer); \
++ gcmFOOTER(); \
++ return status; \
++}
++
++#define gcmMEM_DeclareAFSMemPool(Type, TypeName, Prefix) \
++gceSTATUS \
++Prefix##_Allocate##TypeName( \
++ gcsMEM_AFS_MEM_POOL MemPool, \
++ Type ** Pointer, \
++ gctUINT Count \
++ ) \
++{ \
++ gceSTATUS status; \
++ gcmHEADER_ARG("MemPool=0x%x Pointer=0x%x Count=%u", MemPool, Pointer, Count); \
++ status = gcfMEM_AFSMemPoolGetANode(MemPool, Count, (gctPOINTER *) Pointer); \
++ gcmFOOTER(); \
++ return status; \
++} \
++ \
++gceSTATUS \
++Prefix##_CAllocate##TypeName( \
++ gcsMEM_AFS_MEM_POOL MemPool, \
++ Type ** Pointer, \
++ gctUINT Count \
++ ) \
++{ \
++ gceSTATUS status; \
++ gcmHEADER_ARG("MemPool=0x%x Pointer=0x%x Count=%u", MemPool, Pointer, Count); \
++ gcmERR_RETURN(gcfMEM_AFSMemPoolGetANode(MemPool, Count, (gctPOINTER *) Pointer)); \
++ gcoOS_ZeroMemory(*(gctPOINTER *) Pointer, Count * gcmSIZEOF(Type)); \
++ gcmFOOTER(); \
++ return gcvSTATUS_OK; \
++} \
++ \
++gceSTATUS \
++Prefix##_Free##TypeName( \
++ gcsMEM_AFS_MEM_POOL MemPool, \
++ Type * Pointer \
++ ) \
++{ \
++ gceSTATUS status; \
++ gcmHEADER_ARG("MemPool=0x%x Pointer=0x%x", MemPool, Pointer); \
++ status = gcfMEM_AFSMemPoolFreeANode(MemPool, (gctPOINTER) Pointer); \
++ gcmFOOTER(); \
++ return status; \
++}
++
++#else
++
++#define gcmMEM_DeclareFSMemPool(Type, TypeName, Prefix) \
++gceSTATUS \
++Prefix##_Allocate##TypeName( \
++ gcsMEM_FS_MEM_POOL MemPool, \
++ Type ** Pointer \
++ ) \
++{ \
++ gceSTATUS status; \
++ gcmHEADER_ARG("MemPool=0x%x Pointer=0x%x", MemPool, Pointer); \
++ status = gcoOS_Allocate(MemPool, \
++ gcmSIZEOF(Type), \
++ (gctPOINTER *) Pointer); \
++ gcmFOOTER(); \
++ return status; \
++} \
++ \
++gceSTATUS \
++Prefix##_CAllocate##TypeName( \
++ gcsMEM_FS_MEM_POOL MemPool, \
++ Type ** Pointer \
++ ) \
++{ \
++ gceSTATUS status; \
++ gcmHEADER_ARG("MemPool=0x%x Pointer=0x%x", MemPool, Pointer); \
++ gcmERR_RETURN(gcoOS_Allocate(MemPool, \
++ gcmSIZEOF(Type), \
++ (gctPOINTER *) Pointer)); \
++ gcoOS_ZeroMemory(*(gctPOINTER *) Pointer, gcmSIZEOF(Type)); \
++ gcmFOOTER(); \
++ return gcvSTATUS_OK; \
++} \
++ \
++gceSTATUS \
++Prefix##_Free##TypeName( \
++ gcsMEM_FS_MEM_POOL MemPool, \
++ Type * Pointer \
++ ) \
++{ \
++ gceSTATUS status; \
++ gcmHEADER_ARG("MemPool=0x%x Pointer=0x%x", MemPool, Pointer); \
++ status = gcmOS_SAFE_FREE(MemPool, Pointer); \
++ gcmFOOTER(); \
++ return status; \
++}
++
++#define gcmMEM_DeclareVSMemPool(Type, TypeName, Prefix) \
++gceSTATUS \
++Prefix##_Allocate##TypeName( \
++ gcsMEM_VS_MEM_POOL MemPool, \
++ Type ** Pointer, \
++ gctUINT Size \
++ ) \
++{ \
++ gceSTATUS status; \
++ gcmHEADER_ARG("MemPool=0x%x Pointer=0x%x Size=%u", MemPool, Pointer, Size); \
++ status = gcoOS_Allocate(MemPool, \
++ Size, \
++ (gctPOINTER *) Pointer); \
++ gcmFOOTER(); \
++ return status; \
++} \
++ \
++gceSTATUS \
++Prefix##_CAllocate##TypeName( \
++ gcsMEM_VS_MEM_POOL MemPool, \
++ Type ** Pointer, \
++ gctUINT Size \
++ ) \
++{ \
++ gceSTATUS status; \
++ gcmHEADER_ARG("MemPool=0x%x Pointer=0x%x Size=%u", MemPool, Pointer, Size); \
++ gcmERR_RETURN(gcoOS_Allocate(MemPool, \
++ Size, \
++ (gctPOINTER *) Pointer)); \
++ gcoOS_ZeroMemory(*(gctPOINTER *) Pointer, Size); \
++ gcmFOOTER(); \
++ return gcvSTATUS_OK; \
++} \
++ \
++gceSTATUS \
++Prefix##_Free##TypeName( \
++ gcsMEM_VS_MEM_POOL MemPool, \
++ Type * Pointer \
++ ) \
++{ \
++ gceSTATUS status; \
++ gcmHEADER_ARG("MemPool=0x%x Pointer=0x%x", MemPool, Pointer); \
++ status = gcmOS_SAFE_FREE(MemPool, Pointer); \
++ gcmFOOTER(); \
++ return status; \
++}
++
++#define gcmMEM_DeclareAFSMemPool(Type, TypeName, Prefix) \
++gceSTATUS \
++Prefix##_Allocate##TypeName( \
++ gcsMEM_AFS_MEM_POOL MemPool, \
++ Type ** Pointer, \
++ gctUINT Count \
++ ) \
++{ \
++ gceSTATUS status; \
++ gcmHEADER_ARG("MemPool=0x%x Pointer=0x%x Count=%u", MemPool, Pointer, Count); \
++ status = gcoOS_Allocate(MemPool, \
++ Count * gcmSIZEOF(Type), \
++ (gctPOINTER *) Pointer); \
++ gcmFOOTER(); \
++ return status; \
++} \
++ \
++gceSTATUS \
++Prefix##_CAllocate##TypeName( \
++ gcsMEM_AFS_MEM_POOL MemPool, \
++ Type ** Pointer, \
++ gctUINT Count \
++ ) \
++{ \
++ gceSTATUS status; \
++ gcmHEADER_ARG("MemPool=0x%x Pointer=0x%x Count=%u", MemPool, Pointer, Count); \
++ gcmERR_RETURN(gcoOS_Allocate(MemPool, \
++ Count * gcmSIZEOF(Type), \
++ (gctPOINTER *) Pointer)); \
++ gcoOS_ZeroMemory(*(gctPOINTER *) Pointer, Count * gcmSIZEOF(Type)); \
++ gcmFOOTER(); \
++ return gcvSTATUS_OK; \
++} \
++ \
++gceSTATUS \
++Prefix##_Free##TypeName( \
++ gcsMEM_AFS_MEM_POOL MemPool, \
++ Type * Pointer \
++ ) \
++{ \
++ gceSTATUS status; \
++ gcmHEADER_ARG("MemPool=0x%x Pointer=0x%x", MemPool, Pointer); \
++ status = gcmOS_SAFE_FREE(MemPool, Pointer); \
++ gcmFOOTER(); \
++ return status; \
++}
++#endif
++
++/*******************************************************************************
++** Memory Pool Data Functions
++*******************************************************************************/
++gceSTATUS
++gcfMEM_InitFSMemPool(
++ IN gcsMEM_FS_MEM_POOL * MemPool,
++ IN gcoOS OS,
++ IN gctUINT NodeCount,
++ IN gctUINT NodeSize
++ );
++
++gceSTATUS
++gcfMEM_FreeFSMemPool(
++ IN gcsMEM_FS_MEM_POOL * MemPool
++ );
++
++gceSTATUS
++gcfMEM_FSMemPoolGetANode(
++ IN gcsMEM_FS_MEM_POOL MemPool,
++ OUT gctPOINTER * Node
++ );
++
++gceSTATUS
++gcfMEM_FSMemPoolFreeANode(
++ IN gcsMEM_FS_MEM_POOL MemPool,
++ IN gctPOINTER Node
++ );
++
++gceSTATUS
++gcfMEM_FSMemPoolFreeAList(
++ IN gcsMEM_FS_MEM_POOL MemPool,
++ IN gctPOINTER FirstNode,
++ IN gctPOINTER LastNode
++ );
++
++gceSTATUS
++gcfMEM_InitVSMemPool(
++ IN gcsMEM_VS_MEM_POOL * MemPool,
++ IN gcoOS OS,
++ IN gctUINT BlockSize,
++ IN gctBOOL RecycleFreeNode
++ );
++
++gceSTATUS
++gcfMEM_FreeVSMemPool(
++ IN gcsMEM_VS_MEM_POOL * MemPool
++ );
++
++gceSTATUS
++gcfMEM_VSMemPoolGetANode(
++ IN gcsMEM_VS_MEM_POOL MemPool,
++ IN gctUINT Size,
++ IN gctUINT Alignment,
++ OUT gctPOINTER * Node
++ );
++
++gceSTATUS
++gcfMEM_VSMemPoolFreeANode(
++ IN gcsMEM_VS_MEM_POOL MemPool,
++ IN gctPOINTER Node
++ );
++
++gceSTATUS
++gcfMEM_InitAFSMemPool(
++ IN gcsMEM_AFS_MEM_POOL *MemPool,
++ IN gcoOS OS,
++ IN gctUINT NodeCount,
++ IN gctUINT NodeSize
++ );
++
++gceSTATUS
++gcfMEM_FreeAFSMemPool(
++ IN gcsMEM_AFS_MEM_POOL *MemPool
++ );
++
++gceSTATUS
++gcfMEM_AFSMemPoolGetANode(
++ IN gcsMEM_AFS_MEM_POOL MemPool,
++ IN gctUINT Count,
++ OUT gctPOINTER * Node
++ );
++
++gceSTATUS
++gcfMEM_AFSMemPoolFreeANode(
++ IN gcsMEM_AFS_MEM_POOL MemPool,
++ IN gctPOINTER Node
++ );
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif /* (gcdENABLE_3D || gcdENABLE_VG) */
++#endif /* __gc_hal_mem_h_ */
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_options.h linux-openelec/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_options.h
+--- linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_options.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_options.h 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,1271 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++#ifndef __gc_hal_options_h_
++#define __gc_hal_options_h_
++
++/*
++ gcdSECURITY
++
++*/
++#ifndef gcdSECURITY
++# define gcdSECURITY 0
++#endif
++
++/*
++ gcdPRINT_VERSION
++
++ Print HAL version.
++*/
++#ifndef gcdPRINT_VERSION
++# define gcdPRINT_VERSION 0
++#endif
++
++/*
++ USE_NEW_LINUX_SIGNAL
++
++ This define enables the Linux kernel signaling between kernel and user.
++*/
++#ifndef USE_NEW_LINUX_SIGNAL
++# define USE_NEW_LINUX_SIGNAL 0
++#endif
++
++/*
++ VIVANTE_PROFILER
++
++ This define enables the profiler.
++*/
++#ifndef VIVANTE_PROFILER
++# define VIVANTE_PROFILER 1
++#endif
++
++/*
++ VIVANTE_PROFILER_CONTEXT
++
++ This define enables the profiler according each context.
++*/
++#ifndef VIVANTE_PROFILER_CONTEXT
++# define VIVANTE_PROFILER_CONTEXT 1
++#endif
++
++#ifndef VIVANTE_PROFILER_PERDRAW
++# define VIVANTE_PROFILER_PERDRAW 0
++#endif
++
++#ifndef VIVANTE_PROFILER_NEW
++# define VIVANTE_PROFILER_NEW 0
++#endif
++
++#ifndef VIVANTE_PROFILER_PM
++# define VIVANTE_PROFILER_PM 1
++#endif
++/*
++ gcdUSE_VG
++
++ Enable VG HAL layer (only for GC350).
++*/
++#ifndef gcdUSE_VG
++# define gcdUSE_VG 0
++#endif
++
++/*
++ USE_SW_FB
++
++ Set to 1 if the frame buffer memory cannot be accessed by the GPU.
++*/
++#ifndef USE_SW_FB
++# define USE_SW_FB 0
++#endif
++
++/*
++ PROFILE_HAL_COUNTERS
++
++ This define enables HAL counter profiling support. HW and SHADER
++ counter profiling depends on this.
++*/
++#ifndef PROFILE_HAL_COUNTERS
++# define PROFILE_HAL_COUNTERS 1
++#endif
++
++/*
++ PROFILE_HW_COUNTERS
++
++ This define enables HW counter profiling support.
++*/
++#ifndef PROFILE_HW_COUNTERS
++# define PROFILE_HW_COUNTERS 1
++#endif
++
++/*
++ PROFILE_SHADER_COUNTERS
++
++ This define enables SHADER counter profiling support.
++*/
++#ifndef PROFILE_SHADER_COUNTERS
++# define PROFILE_SHADER_COUNTERS 1
++#endif
++
++/*
++ COMMAND_PROCESSOR_VERSION
++
++ The version of the command buffer and task manager.
++*/
++#define COMMAND_PROCESSOR_VERSION 1
++
++/*
++ gcdDUMP_KEY
++
++ Set this to a string that appears in 'cat /proc/<pid>/cmdline'. E.g. 'camera'.
++ HAL will create dumps for the processes matching this key.
++*/
++#ifndef gcdDUMP_KEY
++# define gcdDUMP_KEY "process"
++#endif
++
++/*
++ gcdDUMP_PATH
++
++ The dump file location. Some processes cannot write to the sdcard.
++ Try apps' data dir, e.g. /data/data/com.android.launcher
++*/
++#ifndef gcdDUMP_PATH
++#if defined(ANDROID)
++# define gcdDUMP_PATH "/mnt/sdcard/"
++#else
++# define gcdDUMP_PATH "./"
++#endif
++#endif
++
++/*
++ gcdDUMP
++
++ When set to 1, a dump of all states and memory uploads, as well as other
++ hardware related execution will be printed to the debug console. This
++ data can be used for playing back applications.
++*/
++#ifndef gcdDUMP
++# define gcdDUMP 0
++#endif
++
++/*
++ gcdDUMP_API
++
++ When set to 1, a high level dump of the EGL and GL/VG APs's are
++ captured.
++*/
++#ifndef gcdDUMP_API
++# define gcdDUMP_API 0
++#endif
++
++
++
++/*
++ gcdDEBUG_OPTION
++ When set to 1, the debug options are enabled. We must set other MACRO to enable
++ sub case.
++*/
++#ifndef gcdDEBUG_OPTION
++# define gcdDEBUG_OPTION 0
++
++#if gcdDEBUG_OPTION
++/*
++ gcdDEBUG_OPTION_KEY
++ The process name of debug application.
++*/
++#ifndef gcdDEBUG_OPTION_KEY
++# define gcdDEBUG_OPTION_KEY "process"
++# endif
++/*
++ gcdDEBUG_OPTION_NO_GL_DRAWS
++ When set to 1, all glDrawArrays and glDrawElements will be skip.
++*/
++#ifndef gcdDEBUG_OPTION_NO_GL_DRAWS
++# define gcdDEBUG_OPTION_NO_GL_DRAWS 0
++# endif
++/*
++ gcdDEBUG_OPTION_NO_DRAW_PRIMITIVES
++ When set to 1, all DrawPrimitives will be skip.
++*/
++#ifndef gcdDEBUG_OPTION_NO_DRAW_PRIMITIVES
++# define gcdDEBUG_OPTION_NO_DRAW_PRIMITIVES 0
++# endif
++/*
++ gcdDEBUG_OPTION_SKIP_SWAP
++ When set to 1, just one out of gcdDEBUG_OPTION_SKIP_FRAMES(such as 1/10) eglSwapBuffers will be resolve,
++ others skip.
++*/
++#ifndef gcdDEBUG_OPTION_SKIP_SWAP
++# define gcdDEBUG_OPTION_SKIP_SWAP 0
++# define gcdDEBUG_OPTION_SKIP_FRAMES 10
++# endif
++/*
++ gcdDEBUG_OPTION_FORCE_16BIT_RENDER_TARGET
++ When set to 1, the format of render target will force to RGB565.
++*/
++#ifndef gcdDEBUG_OPTION_FORCE_16BIT_RENDER_TARGET
++# define gcdDEBUG_OPTION_FORCE_16BIT_RENDER_TARGET 0
++# endif
++/*
++ gcdDEBUG_OPTION_NONE_TEXTURE
++ When set to 1, the type of texture will be set to AQ_TEXTURE_SAMPLE_MODE_TYPE_NONE.
++*/
++#ifndef gcdDEBUG_OPTION_NONE_TEXTURE
++# define gcdDEBUG_OPTION_NONE_TEXTURE 0
++# endif
++/*
++ gcdDEBUG_OPTION_NONE_DEPTH
++ When set to 1, the depth format of surface will be set to gcvSURF_UNKNOWN.
++*/
++#ifndef gcdDEBUG_OPTION_NONE_DEPTH
++# define gcdDEBUG_OPTION_NONE_DEPTH 0
++# endif
++
++# endif
++#endif
++
++/*
++ gcdDUMP_SWAP_PER_DRAW
++
++ When set to 1, dump swap command for every single draw to make simulation comparison happy.
++ Only valid for ES3 driver for now.
++*/
++#ifndef gcdDUMP_SWAP_PER_DRAW
++# define gcdDUMP_SWAP_PER_DRAW 0
++#endif
++
++/*
++ gcdDUMP_FRAMERATE
++ When set to a value other than zero, averaqe frame rate will be dumped.
++ The value set is the starting frame that the average will be calculated.
++ This is needed because sometimes first few frames are too slow to be included
++ in the average. Frame count starts from 1.
++*/
++#ifndef gcdDUMP_FRAMERATE
++# define gcdDUMP_FRAMERATE 0
++#endif
++
++/*
++ gcdENABLE_FSCALE_VAL_ADJUST
++ When non-zero, FSCALE_VAL when gcvPOWER_ON can be adjusted externally.
++ */
++#ifndef gcdENABLE_FSCALE_VAL_ADJUST
++# define gcdENABLE_FSCALE_VAL_ADJUST 1
++#endif
++
++/*
++ gcdDUMP_IN_KERNEL
++
++ When set to 1, all dumps will happen in the kernel. This is handy if
++ you want the kernel to dump its command buffers as well and the data
++ needs to be in sync.
++*/
++#ifndef gcdDUMP_IN_KERNEL
++# define gcdDUMP_IN_KERNEL 0
++#endif
++
++/*
++ gcdDUMP_COMMAND
++
++ When set to non-zero, the command queue will dump all incoming command
++ and context buffers as well as all other modifications to the command
++ queue.
++*/
++#ifndef gcdDUMP_COMMAND
++# define gcdDUMP_COMMAND 0
++#endif
++
++/*
++ gcdDUMP_2D
++
++ When set to non-zero, it will dump the 2D command and surface.
++*/
++#ifndef gcdDUMP_2D
++# define gcdDUMP_2D 0
++#endif
++
++/*
++ gcdDUMP_FRAME_TGA
++
++ When set to a value other than 0, a dump of the frame specified by the value,
++ will be done into frame.tga. Frame count starts from 1.
++ */
++#ifndef gcdDUMP_FRAME_TGA
++# define gcdDUMP_FRAME_TGA 0
++#endif
++/*
++ gcdNULL_DRIVER
++
++ Set to 1 for infinite speed hardware.
++ Set to 2 for bypassing the HAL.
++ Set to 3 for bypassing the drivers.
++*/
++#ifndef gcdNULL_DRIVER
++# define gcdNULL_DRIVER 0
++#endif
++
++/*
++ gcdENABLE_TIMEOUT_DETECTION
++
++ Enable timeout detection.
++*/
++#ifndef gcdENABLE_TIMEOUT_DETECTION
++# define gcdENABLE_TIMEOUT_DETECTION 0
++#endif
++
++/*
++ gcdCMD_BUFFER_SIZE
++
++ Number of bytes in a command buffer.
++*/
++#ifndef gcdCMD_BUFFER_SIZE
++# define gcdCMD_BUFFER_SIZE (128 << 10)
++#endif
++
++/*
++ gcdCMD_BUFFERS
++
++ Number of command buffers to use per client.
++*/
++#ifndef gcdCMD_BUFFERS
++# define gcdCMD_BUFFERS 2
++#endif
++
++/*
++ gcdMAX_CMD_BUFFERS
++
++ Maximum number of command buffers to use per client.
++*/
++#ifndef gcdMAX_CMD_BUFFERS
++# define gcdMAX_CMD_BUFFERS 8
++#endif
++
++/*
++ gcdCOMMAND_QUEUES
++
++ Number of command queues in the kernel.
++*/
++#ifndef gcdCOMMAND_QUEUES
++# define gcdCOMMAND_QUEUES 2
++#endif
++
++/*
++ gcdPOWER_CONTROL_DELAY
++
++ The delay in milliseconds required to wait until the GPU has woke up
++ from a suspend or power-down state. This is system dependent because
++ the bus clock also needs to stabalize.
++*/
++#ifndef gcdPOWER_CONTROL_DELAY
++# define gcdPOWER_CONTROL_DELAY 0
++#endif
++
++/*
++ gcdMIRROR_PAGETABLE
++
++ Enable it when GPUs with old MMU and new MMU exist at same SoC. It makes
++ each GPU use same virtual address to access same physical memory.
++*/
++#ifndef gcdMIRROR_PAGETABLE
++# define gcdMIRROR_PAGETABLE 0
++#endif
++
++/*
++ gcdMMU_SIZE
++
++ Size of the MMU page table in bytes. Each 4 bytes can hold 4kB worth of
++ virtual data.
++*/
++#ifndef gcdMMU_SIZE
++#if gcdMIRROR_PAGETABLE
++# define gcdMMU_SIZE 0x200000
++#else
++# define gcdMMU_SIZE (2048 << 10)
++#endif
++#endif
++
++/*
++ gcdSECURE_USER
++
++ Use logical addresses instead of physical addresses in user land. In
++ this case a hint table is created for both command buffers and context
++ buffers, and that hint table will be used to patch up those buffers in
++ the kernel when they are ready to submit.
++*/
++#ifndef gcdSECURE_USER
++# define gcdSECURE_USER 0
++#endif
++
++/*
++ gcdSECURE_CACHE_SLOTS
++
++ Number of slots in the logical to DMA address cache table. Each time a
++ logical address needs to be translated into a DMA address for the GPU,
++ this cache will be walked. The replacement scheme is LRU.
++*/
++#ifndef gcdSECURE_CACHE_SLOTS
++# define gcdSECURE_CACHE_SLOTS 1024
++#endif
++
++/*
++ gcdSECURE_CACHE_METHOD
++
++ Replacement scheme used for Secure Cache. The following options are
++ available:
++
++ gcdSECURE_CACHE_LRU
++ A standard LRU cache.
++
++ gcdSECURE_CACHE_LINEAR
++ A linear walker with the idea that an application will always
++ render the scene in a similar way, so the next entry in the
++ cache should be a hit most of the time.
++
++ gcdSECURE_CACHE_HASH
++ A 256-entry hash table.
++
++ gcdSECURE_CACHE_TABLE
++ A simple cache but with potential of a lot of cache replacement.
++*/
++#ifndef gcdSECURE_CACHE_METHOD
++# define gcdSECURE_CACHE_METHOD gcdSECURE_CACHE_HASH
++#endif
++
++/*
++ gcdREGISTER_ACCESS_FROM_USER
++
++ Set to 1 to allow IOCTL calls to get through from user land. This
++ should only be in debug or development drops.
++*/
++#ifndef gcdREGISTER_ACCESS_FROM_USER
++# define gcdREGISTER_ACCESS_FROM_USER 1
++#endif
++
++/*
++ gcdHEAP_SIZE
++
++ Set the allocation size for the internal heaps. Each time a heap is
++ full, a new heap will be allocated with this minmimum amount of bytes.
++ The bigger this size, the fewer heaps there are to allocate, the better
++ the performance. However, heaps won't be freed until they are
++ completely free, so there might be some more memory waste if the size is
++ too big.
++*/
++#ifndef gcdHEAP_SIZE
++# define gcdHEAP_SIZE (64 << 10)
++#endif
++
++/*
++ gcdPOWER_SUSPEND_WHEN_IDLE
++
++ Set to 1 to make GPU enter gcvPOWER_SUSPEND when idle detected,
++ otherwise GPU will enter gcvPOWER_IDLE.
++*/
++#ifndef gcdPOWER_SUSPEND_WHEN_IDLE
++# define gcdPOWER_SUSPEND_WHEN_IDLE 1
++#endif
++
++#ifndef gcdFPGA_BUILD
++# define gcdFPGA_BUILD 0
++#endif
++
++/*
++ gcdGPU_TIMEOUT
++
++ This define specified the number of milliseconds the system will wait
++ before it broadcasts the GPU is stuck. In other words, it will define
++ the timeout of any operation that needs to wait for the GPU.
++
++ If the value is 0, no timeout will be checked for.
++*/
++#ifndef gcdGPU_TIMEOUT
++#if gcdFPGA_BUILD
++# define gcdGPU_TIMEOUT 0
++# define gcdGPU_2D_TIMEOUT 0
++# else
++# define gcdGPU_TIMEOUT 20000
++# define gcdGPU_2D_TIMEOUT 4000
++# endif
++#endif
++
++/*
++ gcdGPU_ADVANCETIMER
++
++ it is advance timer.
++*/
++#ifndef gcdGPU_ADVANCETIMER
++# define gcdGPU_ADVANCETIMER 250
++#endif
++
++/*
++ gcdSTATIC_LINK
++
++ This define disalbes static linking;
++*/
++#ifndef gcdSTATIC_LINK
++# define gcdSTATIC_LINK 0
++#endif
++
++/*
++ gcdUSE_NEW_HEAP
++
++ Setting this define to 1 enables new heap.
++*/
++#ifndef gcdUSE_NEW_HEAP
++# define gcdUSE_NEW_HEAP 0
++#endif
++
++/*
++ gcdCMD_NO_2D_CONTEXT
++
++ This define enables no-context 2D command buffer.
++*/
++#ifndef gcdCMD_NO_2D_CONTEXT
++# define gcdCMD_NO_2D_CONTEXT 1
++#endif
++
++/*
++ gcdENABLE_BUFFER_ALIGNMENT
++
++ When enabled, video memory is allocated with atleast 16KB aligment
++ between multiple sub-buffers.
++*/
++#ifndef gcdENABLE_BUFFER_ALIGNMENT
++# define gcdENABLE_BUFFER_ALIGNMENT 1
++#endif
++
++/*
++ gcdENABLE_BANK_ALIGNMENT
++
++ When enabled, video memory is allocated bank aligned. The vendor can modify
++ _GetSurfaceBankAlignment() and _GetBankOffsetBytes() to define how
++ different types of allocations are bank and channel aligned.
++ When disabled (default), no bank alignment is done.
++*/
++#ifndef gcdENABLE_BANK_ALIGNMENT
++# define gcdENABLE_BANK_ALIGNMENT 0
++#endif
++
++/*
++ gcdBANK_BIT_START
++
++ Specifies the start bit of the bank (inclusive).
++*/
++#ifndef gcdBANK_BIT_START
++# define gcdBANK_BIT_START 12
++#endif
++
++/*
++ gcdBANK_BIT_END
++
++ Specifies the end bit of the bank (inclusive).
++*/
++#ifndef gcdBANK_BIT_END
++# define gcdBANK_BIT_END 14
++#endif
++
++/*
++ gcdBANK_CHANNEL_BIT
++
++ When set, video memory when allocated bank aligned is allocated such that
++ render and depth buffer addresses alternate on the channel bit specified.
++ This option has an effect only when gcdENABLE_BANK_ALIGNMENT is enabled.
++ When disabled (default), no alteration is done.
++*/
++#ifndef gcdBANK_CHANNEL_BIT
++# define gcdBANK_CHANNEL_BIT 7
++#endif
++
++/*
++ gcdDYNAMIC_SPEED
++
++ When non-zero, it informs the kernel driver to use the speed throttling
++ broadcasting functions to inform the system the GPU should be spet up or
++ slowed down. It will send a broadcast for slowdown each "interval"
++ specified by this define in milliseconds
++ (gckOS_BroadcastCalibrateSpeed).
++*/
++#ifndef gcdDYNAMIC_SPEED
++# define gcdDYNAMIC_SPEED 2000
++#endif
++
++/*
++ gcdDYNAMIC_EVENT_THRESHOLD
++
++ When non-zero, it specifies the maximum number of available events at
++ which the kernel driver will issue a broadcast to speed up the GPU
++ (gckOS_BroadcastHurry).
++*/
++#ifndef gcdDYNAMIC_EVENT_THRESHOLD
++# define gcdDYNAMIC_EVENT_THRESHOLD 5
++#endif
++
++/*
++ gcdENABLE_PROFILING
++
++ Enable profiling macros.
++*/
++#ifndef gcdENABLE_PROFILING
++# define gcdENABLE_PROFILING 0
++#endif
++
++/*
++ gcdENABLE_128B_MERGE
++
++ Enable 128B merge for the BUS control.
++*/
++#ifndef gcdENABLE_128B_MERGE
++# define gcdENABLE_128B_MERGE 0
++#endif
++
++/*
++ gcdFRAME_DB
++
++ When non-zero, it specified the number of frames inside the frame
++ database. The frame DB will collect per-frame timestamps and hardware
++ counters.
++*/
++#ifndef gcdFRAME_DB
++# define gcdFRAME_DB 0
++# define gcdFRAME_DB_RESET 0
++# define gcdFRAME_DB_NAME "/var/log/frameDB.log"
++#endif
++
++/*
++ gcdDISABLE_CORES_2D3D
++ disable the 2D3D cores for 2D openVG
++*/
++#ifndef gcdDISABLE_CORES_2D3D
++# define gcdDISABLE_CORES_2D3D 0
++#endif
++
++/*
++ gcdPAGED_MEMORY_CACHEABLE
++
++ When non-zero, paged memory will be cacheable.
++
++ Normally, driver will detemines whether a video memory
++ is cacheable or not. When cacheable is not neccessary,
++ it will be writecombine.
++
++ This option is only for those SOC which can't enable
++ writecombine without enabling cacheable.
++*/
++#ifndef gcdPAGED_MEMORY_CACHEABLE
++# define gcdPAGED_MEMORY_CACHEABLE 0
++#endif
++
++/*
++ gcdNONPAGED_MEMORY_CACHEABLE
++
++ When non-zero, non paged memory will be cacheable.
++*/
++#ifndef gcdNONPAGED_MEMORY_CACHEABLE
++# define gcdNONPAGED_MEMORY_CACHEABLE 0
++#endif
++
++/*
++ gcdNONPAGED_MEMORY_BUFFERABLE
++
++ When non-zero, non paged memory will be bufferable.
++ gcdNONPAGED_MEMORY_BUFFERABLE and gcdNONPAGED_MEMORY_CACHEABLE
++ can't be set 1 at same time
++*/
++#ifndef gcdNONPAGED_MEMORY_BUFFERABLE
++# define gcdNONPAGED_MEMORY_BUFFERABLE 1
++#endif
++
++/*
++ gcdENABLE_INFINITE_SPEED_HW
++ enable the Infinte HW , this is for 2D openVG
++*/
++#ifndef gcdENABLE_INFINITE_SPEED_HW
++# define gcdENABLE_INFINITE_SPEED_HW 0
++#endif
++
++/*
++ gcdMULTI_GPU
++
++ Enable/disable multi-GPU support.
++ 0 : Disable multi-GPU support
++ 1 : Enable one of the 3D cores
++ [2..X] : Number of 3D GPU Cores
++*/
++#ifndef gcdMULTI_GPU
++# define gcdMULTI_GPU 0
++#endif
++
++/*
++ gcdMULTI_GPU_AFFINITY
++
++ Enable/disable the binding of a context to one GPU
++*/
++#ifndef gcdMULTI_GPU_AFFINITY
++# define gcdMULTI_GPU_AFFINITY 0
++#endif
++
++/*
++ gcdPOWEROFF_TIMEOUT
++
++ When non-zero, GPU will power off automatically from
++ idle state, and gcdPOWEROFF_TIMEOUT is also the default
++ timeout in milliseconds.
++ */
++#ifndef gcdPOWEROFF_TIMEOUT
++# define gcdPOWEROFF_TIMEOUT 300
++#endif
++
++/*
++ QNX_SINGLE_THREADED_DEBUGGING
++*/
++#ifndef QNX_SINGLE_THREADED_DEBUGGING
++# define QNX_SINGLE_THREADED_DEBUGGING 0
++#endif
++
++/*
++ gcdRENDER_THREADS
++
++ Number of render threads. Make it zero, and there will be no render
++ threads.
++*/
++#ifndef gcdRENDER_THREADS
++# define gcdRENDER_THREADS 0
++#endif
++
++/*
++ gcdSMP
++
++ This define enables SMP support.
++
++ Currently, it only works on Linux/Android,
++ Kbuild will config it according to whether
++ CONFIG_SMP is set.
++
++*/
++#ifndef gcdSMP
++#ifdef __APPLE__
++# define gcdSMP 1
++#else
++# define gcdSMP 0
++#endif
++#endif
++
++/*
++ gcdSHARED_RESOLVE_BUFFER_ENABLED
++
++ Use shared resolve buffer for all app buffers.
++*/
++#ifndef gcdSHARED_RESOLVE_BUFFER_ENABLED
++# define gcdSHARED_RESOLVE_BUFFER_ENABLED 0
++#endif
++
++/*
++ gcdUSE_TRIANGLE_STRIP_PATCH
++ */
++#ifndef gcdUSE_TRIANGLE_STRIP_PATCH
++# define gcdUSE_TRIANGLE_STRIP_PATCH 1
++#endif
++
++/*
++ gcdENABLE_OUTER_CACHE_PATCH
++
++ Enable the outer cache patch.
++*/
++#ifndef gcdENABLE_OUTER_CACHE_PATCH
++# define gcdENABLE_OUTER_CACHE_PATCH 0
++#endif
++
++/*
++ gcdPROCESS_ADDRESS_SPACE
++
++ When non-zero, every process which attaches to galcore has its own GPU
++ address space, size of which is gcdPROCESS_ADDRESS_SPACE_SIZE.
++*/
++#ifndef gcdPROCESS_ADDRESS_SPACE
++# define gcdPROCESS_ADDRESS_SPACE 0
++# define gcdPROCESS_ADDRESS_SPACE_SIZE 0x80000000
++#endif
++
++/*
++ gcdSHARED_PAGETABLE
++
++ When non-zero, multiple GPUs in one chip with same MMU use
++ one shared pagetable. So that when accessing same surface,
++ they can use same GPU virtual address.
++*/
++#ifndef gcdSHARED_PAGETABLE
++# define gcdSHARED_PAGETABLE !gcdPROCESS_ADDRESS_SPACE
++#endif
++
++#ifndef gcdUSE_PVR
++# define gcdUSE_PVR 1
++#endif
++
++/*
++ gcdSMALL_BLOCK_SIZE
++
++ When non-zero, a part of VIDMEM will be reserved for requests
++ whose requesting size is less than gcdSMALL_BLOCK_SIZE.
++
++ For Linux, it's the size of a page. If this requeset fallbacks
++ to gcvPOOL_CONTIGUOUS or gcvPOOL_VIRTUAL, memory will be wasted
++ because they allocate a page at least.
++*/
++#ifndef gcdSMALL_BLOCK_SIZE
++# define gcdSMALL_BLOCK_SIZE 4096
++# define gcdRATIO_FOR_SMALL_MEMORY 32
++#endif
++
++/*
++ gcdCONTIGUOUS_SIZE_LIMIT
++ When non-zero, size of video node from gcvPOOL_CONTIGUOUS is
++ limited by gcdCONTIGUOUS_SIZE_LIMIT.
++*/
++#ifndef gcdCONTIGUOUS_SIZE_LIMIT
++# define gcdCONTIGUOUS_SIZE_LIMIT 0
++#endif
++
++/*
++ gcdLINK_QUEUE_SIZE
++
++ When non-zero, driver maintains a queue to record information of
++ latest lined context buffer and command buffer. Data in this queue
++ is be used to debug.
++*/
++#ifndef gcdLINK_QUEUE_SIZE
++# define gcdLINK_QUEUE_SIZE 5
++#endif
++
++/* gcdALPHA_KILL_IN_SHADER
++
++ Enable alpha kill inside the shader. This will be set automatically by the
++ HAL if certain states match a criteria.
++*/
++#ifndef gcdALPHA_KILL_IN_SHADER
++# define gcdALPHA_KILL_IN_SHADER 1
++#endif
++
++
++
++/*
++ gcdDVFS
++
++ When non-zero, software will make use of dynamic voltage and
++ frequency feature.
++ */
++#ifndef gcdDVFS
++# define gcdDVFS 0
++# define gcdDVFS_ANAYLSE_WINDOW 4
++# define gcdDVFS_POLLING_TIME (gcdDVFS_ANAYLSE_WINDOW * 4)
++#endif
++
++#ifndef gcdSYNC
++# define gcdSYNC 1
++#endif
++
++#ifndef gcdSHADER_SRC_BY_MACHINECODE
++# define gcdSHADER_SRC_BY_MACHINECODE 1
++#endif
++
++#ifndef gcdGLB27_SHADER_REPLACE_OPTIMIZATION
++# define gcdGLB27_SHADER_REPLACE_OPTIMIZATION 1
++#endif
++
++/*
++ gcdSTREAM_OUT_BUFFER
++
++ Enable suppport for the secondary stream out buffer.
++*/
++#ifndef gcdSTREAM_OUT_BUFFER
++# define gcdSTREAM_OUT_BUFFER 0
++# define gcdSTREAM_OUT_NAIVE_SYNC 0
++#endif
++
++/*
++ gcdUSE_HARDWARE_CONFIGURATION_TABLES
++
++ Enable the use of hardware configuration tables,
++ instead of query hardware and determine the features.
++*/
++#ifndef gcdUSE_HARDWARE_CONFIGURATION_TABLES
++# define gcdUSE_HARDWARE_CONFIGURATION_TABLES 0
++#endif
++
++/*
++ gcdSUPPORT_SWAP_RECTANGLE
++
++ Support swap with a specific rectangle.
++
++ Set the rectangle with eglSetSwapRectangleVIV api.
++ Android only.
++*/
++#ifndef gcdSUPPORT_SWAP_RECTANGLE
++# define gcdSUPPORT_SWAP_RECTANGLE 1
++#endif
++
++/*
++ gcdGPU_LINEAR_BUFFER_ENABLED
++
++ Use linear buffer for GPU apps so HWC can do 2D composition.
++ Android only.
++*/
++#ifndef gcdGPU_LINEAR_BUFFER_ENABLED
++# define gcdGPU_LINEAR_BUFFER_ENABLED 1
++#endif
++
++/*
++ gcdENABLE_RENDER_INTO_WINDOW
++
++ Enable Render-Into-Window (ie, No-Resolve) feature on android.
++ NOTE that even if enabled, it still depends on hardware feature and
++ android application behavior. When hardware feature or application
++ behavior can not support render into window mode, it will fail back
++ to normal mode.
++ When Render-Into-Window is finally used, window back buffer of android
++ applications will be allocated matching render target tiling format.
++ Otherwise buffer tiling is decided by the above option
++ 'gcdGPU_LINEAR_BUFFER_ENABLED'.
++ Android only for now.
++*/
++#ifndef gcdENABLE_RENDER_INTO_WINDOW
++# define gcdENABLE_RENDER_INTO_WINDOW 1
++#endif
++
++/*
++ gcdENABLE_RENDER_INTO_WINDOW_WITH_FC
++
++ Enable Direct-rendering (ie, No-Resolve) with tile status.
++ This is expremental and in development stage.
++ This will dynamically check if color compression is available.
++*/
++#ifndef gcdENABLE_RENDER_INTO_WINDOW_WITH_FC
++# define gcdENABLE_RENDER_INTO_WINDOW_WITH_FC 1
++#endif
++
++/*
++ gcdENABLE_BLIT_BUFFER_PRESERVE
++
++ Render-Into-Window (ie, No-Resolve) does not include preserved swap
++ behavior. This feature can enable buffer preserve in No-Resolve mode.
++ When enabled, previous buffer (may be part of ) will be resolve-blitted
++ to current buffer.
++*/
++#ifndef gcdENABLE_BLIT_BUFFER_PRESERVE
++# define gcdENABLE_BLIT_BUFFER_PRESERVE 1
++#endif
++
++/*
++ gcdANDROID_NATIVE_FENCE_SYNC
++
++ Enable android native fence sync. It is introduced since jellybean-4.2.
++ Depends on linux kernel option: CONFIG_SYNC.
++
++ 0: Disabled
++ 1: Build framework for native fence sync feature, and EGL extension
++ 2: Enable async swap buffers for client
++ * Native fence sync for client 'queueBuffer' in EGL, which is
++ 'acquireFenceFd' for layer in compositor side.
++ 3. Enable async hwcomposer composition.
++ * 'releaseFenceFd' for layer in compositor side, which is native
++ fence sync when client 'dequeueBuffer'
++ * Native fence sync for compositor 'queueBuffer' in EGL, which is
++ 'acquireFenceFd' for framebuffer target for DC
++ */
++#ifndef gcdANDROID_NATIVE_FENCE_SYNC
++# define gcdANDROID_NATIVE_FENCE_SYNC 0
++#endif
++
++/*
++ gcdANDROID_IMPLICIT_NATIVE_BUFFER_SYNC
++
++ Enable implicit android native buffer sync.
++
++ For non-HW_RENDER buffer, CPU (or other hardware) and GPU can access
++ the buffer at the same time. This is to add implicit synchronization
++ between CPU (or the hardware) and GPU.
++
++ Eventually, please do not use implicit native buffer sync, but use
++ "fence sync" or "android native fence sync" instead in libgui, which
++ can be enabled in frameworks/native/libs/gui/Android.mk. This kind
++ of synchronization should be done by app but not driver itself.
++
++ Please disable this option when either "fence sync" or
++ "android native fence sync" is enabled.
++ */
++#ifndef gcdANDROID_IMPLICIT_NATIVE_BUFFER_SYNC
++# define gcdANDROID_IMPLICIT_NATIVE_BUFFER_SYNC 1
++#endif
++
++/*
++ * Implicit native buffer sync is not needed when ANDROID_native_fence_sync
++ * is available.
++ */
++#if gcdANDROID_NATIVE_FENCE_SYNC
++# undef gcdANDROID_IMPLICIT_NATIVE_BUFFER_SYNC
++# define gcdANDROID_IMPLICIT_NATIVE_BUFFER_SYNC 0
++#endif
++
++/*
++ gcdANDROID_UNALIGNED_LINEAR_COMPOSITION_ADJUST
++
++ Enable source surface address adjust when composition on android.
++ Android only.
++*/
++#ifndef gcdANDROID_UNALIGNED_LINEAR_COMPOSITION_ADJUST
++# define gcdANDROID_UNALIGNED_LINEAR_COMPOSITION_ADJUST 1
++#endif
++
++/*
++ gcdUSE_WCLIP_PATCH
++
++ Enable wclipping patch.
++*/
++#ifndef gcdUSE_WCLIP_PATCH
++# define gcdUSE_WCLIP_PATCH 1
++#endif
++
++#ifndef gcdUSE_NPOT_PATCH
++# define gcdUSE_NPOT_PATCH 1
++#endif
++
++/*
++ gcd3DBLIT
++
++ TODO: Should be replaced by feature bit if available.
++*/
++#ifndef gcd3DBLIT
++# define gcd3DBLIT 0
++#endif
++
++/*
++ gcdINTERNAL_COMMENT
++
++ Wrap internal comment, content wrapped by it and the macor itself
++ will be removed in release driver.
++*/
++#ifndef gcdINTERNAL_COMMENT
++# define gcdINTERNAL_COMMENT 1
++#endif
++
++/*
++ gcdRTT_DISABLE_FC
++
++ Disable RTT FC support. For test only.
++*/
++#ifndef gcdRTT_DISABLE_FC
++# define gcdRTT_DISABLE_FC 0
++#endif
++
++/*
++ gcdFORCE_MIPMAP
++
++ Force generate mipmap for texture.
++*/
++#ifndef gcdFORCE_MIPMAP
++# define gcdFORCE_MIPMAP 0
++#endif
++
++/*
++ gcdFORCE_BILINEAR
++
++ Force bilinear for mipfilter.
++*/
++#ifndef gcdFORCE_BILINEAR
++# define gcdFORCE_BILINEAR 1
++#endif
++
++/*
++ gcdBINARY_TRACE
++
++ When non-zero, binary trace will be generated.
++
++ When gcdBINARY_TRACE_FILE_SIZE is non-zero, binary trace buffer will
++ be written to a file which size is limited to
++ gcdBINARY_TRACE_FILE_SIZE.
++*/
++#ifndef gcdBINARY_TRACE
++# define gcdBINARY_TRACE 0
++# define gcdBINARY_TRACE_FILE_SIZE 0
++#endif
++
++#ifndef gcdMOVG
++# define gcdMOVG 0
++#if gcdMOVG
++# define GC355_PROFILER 1
++# endif
++# define gcdENABLE_TS_DOUBLE_BUFFER 1
++#else
++#if gcdMOVG
++# define GC355_PROFILER 1
++# define gcdENABLE_TS_DOUBLE_BUFFER 0
++#else
++# define gcdENABLE_TS_DOUBLE_BUFFER 1
++#endif
++#endif
++
++/* gcdINTERRUPT_STATISTIC
++ *
++ * Monitor the event send to GPU and interrupt issued by GPU.
++ */
++
++#ifndef gcdINTERRUPT_STATISTIC
++#if defined(LINUX)
++# define gcdINTERRUPT_STATISTIC 1
++#else
++# define gcdINTERRUPT_STATISTIC 0
++#endif
++#endif
++
++/*
++ gcdYINVERTED_RENDERING
++ When it's not zero, we will rendering display buffer
++ with top-bottom direction. All other offscreen rendering
++ will be bottom-top, which follow OpenGL ES spec.
++*/
++#ifndef gcdYINVERTED_RENDERING
++# define gcdYINVERTED_RENDERING 1
++#endif
++
++#if gcdYINVERTED_RENDERING
++/* disable unaligned linear composition adjust in Y-inverted rendering mode. */
++# undef gcdANDROID_UNALIGNED_LINEAR_COMPOSITION_ADJUST
++# define gcdANDROID_UNALIGNED_LINEAR_COMPOSITION_ADJUST 0
++#endif
++
++/*
++ gcdFENCE_WAIT_LOOP_COUNT
++ Wait fence, loop count.
++*/
++#ifndef gcdFENCE_WAIT_LOOP_COUNT
++# define gcdFENCE_WAIT_LOOP_COUNT 100
++#endif
++
++/*
++ gcdHAL_3D_DRAWBLIT
++ When it's not zero, we will enable HAL 3D drawblit
++ to replace client 3dblit.
++*/
++#ifndef gcdHAL_3D_DRAWBLIT
++# define gcdHAL_3D_DRAWBLIT 1
++#endif
++
++/*
++ gcdPARTIAL_FAST_CLEAR
++ When it's not zero, partial fast clear is enabled.
++ Depends on gcdHAL_3D_DRAWBLIT, if gcdHAL_3D_DRAWBLIT is not enabled,
++ only available when scissor box is completely aligned.
++ Expremental, under test.
++*/
++#ifndef gcdPARTIAL_FAST_CLEAR
++# define gcdPARTIAL_FAST_CLEAR 1
++#endif
++
++/*
++ gcdREMOVE_SURF_ORIENTATION
++ When it's not zero, we will remove surface orientation function.
++ It wil become to a parameter of resolve function.
++*/
++#ifndef gcdREMOVE_SURF_ORIENTATION
++# define gcdREMOVE_SURF_ORIENTATION 0
++#endif
++
++/*
++ gcdPATTERN_FAST_PATH
++ For pattern match
++*/
++#ifndef gcdPATTERN_FAST_PATH
++# define gcdPATTERN_FAST_PATH 1
++#endif
++
++/*
++ gcdUSE_INPUT_DEVICE
++ disable input devices usage under fb mode to support fb+vdk multi-process
++*/
++#ifndef gcdUSE_INPUT_DEVICE
++# define gcdUSE_INPUT_DEVICE 1
++#endif
++
++
++/*
++ gcdFRAMEINFO_STATISTIC
++ When enable, collect frame information.
++*/
++#ifndef gcdFRAMEINFO_STATISTIC
++
++#if (defined(DBG) && DBG) || defined(DEBUG) || defined(_DEBUG) || gcdDUMP
++# define gcdFRAMEINFO_STATISTIC 1
++#else
++# define gcdFRAMEINFO_STATISTIC 0
++#endif
++
++#endif
++
++/*
++ gcdPACKED_OUTPUT_ADDRESS
++ When it's not zero, ps output is already packed after linked
++*/
++#ifndef gcdPACKED_OUTPUT_ADDRESS
++# define gcdPACKED_OUTPUT_ADDRESS 1
++#endif
++
++/*
++ gcdENABLE_THIRD_PARTY_OPERATION
++ Enable third party operation like tpc or not.
++*/
++#ifndef gcdENABLE_THIRD_PARTY_OPERATION
++# define gcdENABLE_THIRD_PARTY_OPERATION 1
++#endif
++
++
++/*
++ Core configurations. By default enable all cores.
++*/
++#ifndef gcdENABLE_3D
++# define gcdENABLE_3D 1
++#endif
++
++#ifndef gcdENABLE_2D
++# define gcdENABLE_2D 1
++#endif
++
++#ifndef gcdENABLE_VG
++# define gcdENABLE_VG 0
++#endif
++
++#ifndef gcdGC355_MEM_PRINT
++# define gcdGC355_MEM_PRINT 0
++#else
++#if (!((gcdENABLE_3D == 0) && (gcdENABLE_2D == 0) && (gcdENABLE_VG == 1)))
++# undef gcdGC355_MEM_PRINT
++# define gcdGC355_MEM_PRINT 0
++# endif
++#endif
++
++#ifndef gcdENABLE_UNIFIED_CONSTANT
++# define gcdENABLE_UNIFIED_CONSTANT 1
++#endif
++
++/*
++ gcdRECORD_COMMAND
++*/
++#ifndef gcdRECORD_COMMAND
++# define gcdRECORD_COMMAND 0
++#endif
++
++#endif /* __gc_hal_options_h_ */
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_profiler.h linux-openelec/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_profiler.h
+--- linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_profiler.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_profiler.h 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,585 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_profiler_h_
++#define __gc_hal_profiler_h_
++
++#if VIVANTE_PROFILER_NEW
++#include "gc_hal_engine.h"
++#endif
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++#define GLVERTEX_OBJECT 10
++#define GLVERTEX_OBJECT_BYTES 11
++
++#define GLINDEX_OBJECT 20
++#define GLINDEX_OBJECT_BYTES 21
++
++#define GLTEXTURE_OBJECT 30
++#define GLTEXTURE_OBJECT_BYTES 31
++
++#define GLBUFOBJ_OBJECT 40
++#define GLBUFOBJ_OBJECT_BYTES 41
++
++#if VIVANTE_PROFILER
++#define gcmPROFILE_GC(Enum, Value) gcoPROFILER_Count(gcvNULL, Enum, Value)
++#else
++#define gcmPROFILE_GC(Enum, Value) do { } while (gcvFALSE)
++#endif
++
++#ifndef gcdNEW_PROFILER_FILE
++#define gcdNEW_PROFILER_FILE 1
++#endif
++
++#define ES11_CALLS 151
++#define ES11_DRAWCALLS (ES11_CALLS + 1)
++#define ES11_STATECHANGECALLS (ES11_DRAWCALLS + 1)
++#define ES11_POINTCOUNT (ES11_STATECHANGECALLS + 1)
++#define ES11_LINECOUNT (ES11_POINTCOUNT + 1)
++#define ES11_TRIANGLECOUNT (ES11_LINECOUNT + 1)
++
++#define ES30_CALLS 159
++#define ES30_DRAWCALLS (ES30_CALLS + 1)
++#define ES30_STATECHANGECALLS (ES30_DRAWCALLS + 1)
++#define ES30_POINTCOUNT (ES30_STATECHANGECALLS + 1)
++#define ES30_LINECOUNT (ES30_POINTCOUNT + 1)
++#define ES30_TRIANGLECOUNT (ES30_LINECOUNT + 1)
++
++#define VG11_CALLS 88
++#define VG11_DRAWCALLS (VG11_CALLS + 1)
++#define VG11_STATECHANGECALLS (VG11_DRAWCALLS + 1)
++#define VG11_FILLCOUNT (VG11_STATECHANGECALLS + 1)
++#define VG11_STROKECOUNT (VG11_FILLCOUNT + 1)
++/* End of Driver API ID Definitions. */
++
++/* HAL & MISC IDs. */
++#define HAL_VERTBUFNEWBYTEALLOC 1
++#define HAL_VERTBUFTOTALBYTEALLOC (HAL_VERTBUFNEWBYTEALLOC + 1)
++#define HAL_VERTBUFNEWOBJALLOC (HAL_VERTBUFTOTALBYTEALLOC + 1)
++#define HAL_VERTBUFTOTALOBJALLOC (HAL_VERTBUFNEWOBJALLOC + 1)
++#define HAL_INDBUFNEWBYTEALLOC (HAL_VERTBUFTOTALOBJALLOC + 1)
++#define HAL_INDBUFTOTALBYTEALLOC (HAL_INDBUFNEWBYTEALLOC + 1)
++#define HAL_INDBUFNEWOBJALLOC (HAL_INDBUFTOTALBYTEALLOC + 1)
++#define HAL_INDBUFTOTALOBJALLOC (HAL_INDBUFNEWOBJALLOC + 1)
++#define HAL_TEXBUFNEWBYTEALLOC (HAL_INDBUFTOTALOBJALLOC + 1)
++#define HAL_TEXBUFTOTALBYTEALLOC (HAL_TEXBUFNEWBYTEALLOC + 1)
++#define HAL_TEXBUFNEWOBJALLOC (HAL_TEXBUFTOTALBYTEALLOC + 1)
++#define HAL_TEXBUFTOTALOBJALLOC (HAL_TEXBUFNEWOBJALLOC + 1)
++
++#define GPU_CYCLES 1
++#define GPU_READ64BYTE (GPU_CYCLES + 1)
++#define GPU_WRITE64BYTE (GPU_READ64BYTE + 1)
++#define GPU_TOTALCYCLES (GPU_WRITE64BYTE + 1)
++#define GPU_IDLECYCLES (GPU_TOTALCYCLES + 1)
++
++#define VS_INSTCOUNT 1
++#define VS_BRANCHINSTCOUNT (VS_INSTCOUNT + 1)
++#define VS_TEXLDINSTCOUNT (VS_BRANCHINSTCOUNT + 1)
++#define VS_RENDEREDVERTCOUNT (VS_TEXLDINSTCOUNT + 1)
++#define VS_SOURCE (VS_RENDEREDVERTCOUNT + 1)
++
++#define PS_INSTCOUNT 1
++#define PS_BRANCHINSTCOUNT (PS_INSTCOUNT + 1)
++#define PS_TEXLDINSTCOUNT (PS_BRANCHINSTCOUNT + 1)
++#define PS_RENDEREDPIXCOUNT (PS_TEXLDINSTCOUNT + 1)
++#define PS_SOURCE (PS_RENDEREDPIXCOUNT + 1)
++
++#define PA_INVERTCOUNT 1
++#define PA_INPRIMCOUNT (PA_INVERTCOUNT + 1)
++#define PA_OUTPRIMCOUNT (PA_INPRIMCOUNT + 1)
++#define PA_DEPTHCLIPCOUNT (PA_OUTPRIMCOUNT + 1)
++#define PA_TRIVIALREJCOUNT (PA_DEPTHCLIPCOUNT + 1)
++#define PA_CULLCOUNT (PA_TRIVIALREJCOUNT + 1)
++
++#define SE_TRIANGLECOUNT 1
++#define SE_LINECOUNT (SE_TRIANGLECOUNT + 1)
++
++#define RA_VALIDPIXCOUNT 1
++#define RA_TOTALQUADCOUNT (RA_VALIDPIXCOUNT + 1)
++#define RA_VALIDQUADCOUNTEZ (RA_TOTALQUADCOUNT + 1)
++#define RA_TOTALPRIMCOUNT (RA_VALIDQUADCOUNTEZ + 1)
++#define RA_PIPECACHEMISSCOUNT (RA_TOTALPRIMCOUNT + 1)
++#define RA_PREFCACHEMISSCOUNT (RA_PIPECACHEMISSCOUNT + 1)
++#define RA_EEZCULLCOUNT (RA_PREFCACHEMISSCOUNT + 1)
++
++#define TX_TOTBILINEARREQ 1
++#define TX_TOTTRILINEARREQ (TX_TOTBILINEARREQ + 1)
++#define TX_TOTDISCARDTEXREQ (TX_TOTTRILINEARREQ + 1)
++#define TX_TOTTEXREQ (TX_TOTDISCARDTEXREQ + 1)
++#define TX_MEMREADCOUNT (TX_TOTTEXREQ + 1)
++#define TX_MEMREADIN8BCOUNT (TX_MEMREADCOUNT + 1)
++#define TX_CACHEMISSCOUNT (TX_MEMREADIN8BCOUNT + 1)
++#define TX_CACHEHITTEXELCOUNT (TX_CACHEMISSCOUNT + 1)
++#define TX_CACHEMISSTEXELCOUNT (TX_CACHEHITTEXELCOUNT + 1)
++
++#define PE_KILLEDBYCOLOR 1
++#define PE_KILLEDBYDEPTH (PE_KILLEDBYCOLOR + 1)
++#define PE_DRAWNBYCOLOR (PE_KILLEDBYDEPTH + 1)
++#define PE_DRAWNBYDEPTH (PE_DRAWNBYCOLOR + 1)
++
++#define MC_READREQ8BPIPE 1
++#define MC_READREQ8BIP (MC_READREQ8BPIPE + 1)
++#define MC_WRITEREQ8BPIPE (MC_READREQ8BIP + 1)
++
++#define AXI_READREQSTALLED 1
++#define AXI_WRITEREQSTALLED (AXI_READREQSTALLED + 1)
++#define AXI_WRITEDATASTALLED (AXI_WRITEREQSTALLED + 1)
++
++#define PVS_INSTRCOUNT 1
++#define PVS_ALUINSTRCOUNT (PVS_INSTRCOUNT + 1)
++#define PVS_TEXINSTRCOUNT (PVS_ALUINSTRCOUNT + 1)
++#define PVS_ATTRIBCOUNT (PVS_TEXINSTRCOUNT + 1)
++#define PVS_UNIFORMCOUNT (PVS_ATTRIBCOUNT + 1)
++#define PVS_FUNCTIONCOUNT (PVS_UNIFORMCOUNT + 1)
++#define PVS_SOURCE (PVS_FUNCTIONCOUNT + 1)
++
++#define PPS_INSTRCOUNT 1
++#define PPS_ALUINSTRCOUNT (PPS_INSTRCOUNT + 1)
++#define PPS_TEXINSTRCOUNT (PPS_ALUINSTRCOUNT + 1)
++#define PPS_ATTRIBCOUNT (PPS_TEXINSTRCOUNT + 1)
++#define PPS_UNIFORMCOUNT (PPS_ATTRIBCOUNT + 1)
++#define PPS_FUNCTIONCOUNT (PPS_UNIFORMCOUNT + 1)
++#define PPS_SOURCE (PPS_FUNCTIONCOUNT + 1)
++/* End of MISC Counter IDs. */
++
++#ifdef gcdNEW_PROFILER_FILE
++
++/* Category Constants. */
++#define VPHEADER 0x010000
++#define VPG_INFO 0x020000
++#define VPG_TIME 0x030000
++#define VPG_MEM 0x040000
++#define VPG_ES11 0x050000
++#define VPG_ES30 0x060000
++#define VPG_VG11 0x070000
++#define VPG_HAL 0x080000
++#define VPG_HW 0x090000
++#define VPG_GPU 0x0a0000
++#define VPG_VS 0x0b0000
++#define VPG_PS 0x0c0000
++#define VPG_PA 0x0d0000
++#define VPG_SETUP 0x0e0000
++#define VPG_RA 0x0f0000
++#define VPG_TX 0x100000
++#define VPG_PE 0x110000
++#define VPG_MC 0x120000
++#define VPG_AXI 0x130000
++#define VPG_PROG 0x140000
++#define VPG_PVS 0x150000
++#define VPG_PPS 0x160000
++#define VPG_ES11_TIME 0x170000
++#define VPG_ES30_TIME 0x180000
++#define VPG_FRAME 0x190000
++#define VPG_ES11_DRAW 0x200000
++#define VPG_ES30_DRAW 0x210000
++#define VPG_VG11_TIME 0x220000
++#define VPG_END 0xff0000
++
++/* Info. */
++#define VPC_INFOCOMPANY (VPG_INFO + 1)
++#define VPC_INFOVERSION (VPC_INFOCOMPANY + 1)
++#define VPC_INFORENDERER (VPC_INFOVERSION + 1)
++#define VPC_INFOREVISION (VPC_INFORENDERER + 1)
++#define VPC_INFODRIVER (VPC_INFOREVISION + 1)
++#define VPC_INFODRIVERMODE (VPC_INFODRIVER + 1)
++#define VPC_INFOSCREENSIZE (VPC_INFODRIVERMODE + 1)
++
++/* Counter Constants. */
++#define VPC_ELAPSETIME (VPG_TIME + 1)
++#define VPC_CPUTIME (VPC_ELAPSETIME + 1)
++
++#define VPC_MEMMAXRES (VPG_MEM + 1)
++#define VPC_MEMSHARED (VPC_MEMMAXRES + 1)
++#define VPC_MEMUNSHAREDDATA (VPC_MEMSHARED + 1)
++#define VPC_MEMUNSHAREDSTACK (VPC_MEMUNSHAREDDATA + 1)
++
++/* OpenGL ES11 Statics Counter IDs. */
++#define VPC_ES11CALLS (VPG_ES11 + ES11_CALLS)
++#define VPC_ES11DRAWCALLS (VPG_ES11 + ES11_DRAWCALLS)
++#define VPC_ES11STATECHANGECALLS (VPG_ES11 + ES11_STATECHANGECALLS)
++#define VPC_ES11POINTCOUNT (VPG_ES11 + ES11_POINTCOUNT)
++#define VPC_ES11LINECOUNT (VPG_ES11 + ES11_LINECOUNT)
++#define VPC_ES11TRIANGLECOUNT (VPG_ES11 + ES11_TRIANGLECOUNT)
++
++/* OpenGL ES30 Statistics Counter IDs. */
++#define VPC_ES30CALLS (VPG_ES30 + ES30_CALLS)
++#define VPC_ES30DRAWCALLS (VPG_ES30 + ES30_DRAWCALLS)
++#define VPC_ES30STATECHANGECALLS (VPG_ES30 + ES30_STATECHANGECALLS)
++#define VPC_ES30POINTCOUNT (VPG_ES30 + ES30_POINTCOUNT)
++#define VPC_ES30LINECOUNT (VPG_ES30 + ES30_LINECOUNT)
++#define VPC_ES30TRIANGLECOUNT (VPG_ES30 + ES30_TRIANGLECOUNT)
++
++/* OpenVG Statistics Counter IDs. */
++#define VPC_VG11CALLS (VPG_VG11 + VG11_CALLS)
++#define VPC_VG11DRAWCALLS (VPG_VG11 + VG11_DRAWCALLS)
++#define VPC_VG11STATECHANGECALLS (VPG_VG11 + VG11_STATECHANGECALLS)
++#define VPC_VG11FILLCOUNT (VPG_VG11 + VG11_FILLCOUNT)
++#define VPC_VG11STROKECOUNT (VPG_VG11 + VG11_STROKECOUNT)
++
++/* HAL Counters. */
++#define VPC_HALVERTBUFNEWBYTEALLOC (VPG_HAL + HAL_VERTBUFNEWBYTEALLOC)
++#define VPC_HALVERTBUFTOTALBYTEALLOC (VPG_HAL + HAL_VERTBUFTOTALBYTEALLOC)
++#define VPC_HALVERTBUFNEWOBJALLOC (VPG_HAL + HAL_VERTBUFNEWOBJALLOC)
++#define VPC_HALVERTBUFTOTALOBJALLOC (VPG_HAL + HAL_VERTBUFTOTALOBJALLOC)
++#define VPC_HALINDBUFNEWBYTEALLOC (VPG_HAL + HAL_INDBUFNEWBYTEALLOC)
++#define VPC_HALINDBUFTOTALBYTEALLOC (VPG_HAL + HAL_INDBUFTOTALBYTEALLOC)
++#define VPC_HALINDBUFNEWOBJALLOC (VPG_HAL + HAL_INDBUFNEWOBJALLOC)
++#define VPC_HALINDBUFTOTALOBJALLOC (VPG_HAL + HAL_INDBUFTOTALOBJALLOC)
++#define VPC_HALTEXBUFNEWBYTEALLOC (VPG_HAL + HAL_TEXBUFNEWBYTEALLOC)
++#define VPC_HALTEXBUFTOTALBYTEALLOC (VPG_HAL + HAL_TEXBUFTOTALBYTEALLOC)
++#define VPC_HALTEXBUFNEWOBJALLOC (VPG_HAL + HAL_TEXBUFNEWOBJALLOC)
++#define VPC_HALTEXBUFTOTALOBJALLOC (VPG_HAL + HAL_TEXBUFTOTALOBJALLOC)
++
++/* HW: GPU Counters. */
++#define VPC_GPUCYCLES (VPG_GPU + GPU_CYCLES)
++#define VPC_GPUREAD64BYTE (VPG_GPU + GPU_READ64BYTE)
++#define VPC_GPUWRITE64BYTE (VPG_GPU + GPU_WRITE64BYTE)
++#define VPC_GPUTOTALCYCLES (VPG_GPU + GPU_TOTALCYCLES)
++#define VPC_GPUIDLECYCLES (VPG_GPU + GPU_IDLECYCLES)
++
++/* HW: Shader Counters. */
++#define VPC_VSINSTCOUNT (VPG_VS + VS_INSTCOUNT)
++#define VPC_VSBRANCHINSTCOUNT (VPG_VS + VS_BRANCHINSTCOUNT)
++#define VPC_VSTEXLDINSTCOUNT (VPG_VS + VS_TEXLDINSTCOUNT)
++#define VPC_VSRENDEREDVERTCOUNT (VPG_VS + VS_RENDEREDVERTCOUNT)
++/* HW: PS Count. */
++#define VPC_PSINSTCOUNT (VPG_PS + PS_INSTCOUNT)
++#define VPC_PSBRANCHINSTCOUNT (VPG_PS + PS_BRANCHINSTCOUNT)
++#define VPC_PSTEXLDINSTCOUNT (VPG_PS + PS_TEXLDINSTCOUNT)
++#define VPC_PSRENDEREDPIXCOUNT (VPG_PS + PS_RENDEREDPIXCOUNT)
++
++
++/* HW: PA Counters. */
++#define VPC_PAINVERTCOUNT (VPG_PA + PA_INVERTCOUNT)
++#define VPC_PAINPRIMCOUNT (VPG_PA + PA_INPRIMCOUNT)
++#define VPC_PAOUTPRIMCOUNT (VPG_PA + PA_OUTPRIMCOUNT)
++#define VPC_PADEPTHCLIPCOUNT (VPG_PA + PA_DEPTHCLIPCOUNT)
++#define VPC_PATRIVIALREJCOUNT (VPG_PA + PA_TRIVIALREJCOUNT)
++#define VPC_PACULLCOUNT (VPG_PA + PA_CULLCOUNT)
++
++/* HW: Setup Counters. */
++#define VPC_SETRIANGLECOUNT (VPG_SETUP + SE_TRIANGLECOUNT)
++#define VPC_SELINECOUNT (VPG_SETUP + SE_LINECOUNT)
++
++/* HW: RA Counters. */
++#define VPC_RAVALIDPIXCOUNT (VPG_RA + RA_VALIDPIXCOUNT)
++#define VPC_RATOTALQUADCOUNT (VPG_RA + RA_TOTALQUADCOUNT)
++#define VPC_RAVALIDQUADCOUNTEZ (VPG_RA + RA_VALIDQUADCOUNTEZ)
++#define VPC_RATOTALPRIMCOUNT (VPG_RA + RA_TOTALPRIMCOUNT)
++#define VPC_RAPIPECACHEMISSCOUNT (VPG_RA + RA_PIPECACHEMISSCOUNT)
++#define VPC_RAPREFCACHEMISSCOUNT (VPG_RA + RA_PREFCACHEMISSCOUNT)
++#define VPC_RAEEZCULLCOUNT (VPG_RA + RA_EEZCULLCOUNT)
++
++/* HW: TEX Counters. */
++#define VPC_TXTOTBILINEARREQ (VPG_TX + TX_TOTBILINEARREQ)
++#define VPC_TXTOTTRILINEARREQ (VPG_TX + TX_TOTTRILINEARREQ)
++#define VPC_TXTOTDISCARDTEXREQ (VPG_TX + TX_TOTDISCARDTEXREQ)
++#define VPC_TXTOTTEXREQ (VPG_TX + TX_TOTTEXREQ)
++#define VPC_TXMEMREADCOUNT (VPG_TX + TX_MEMREADCOUNT)
++#define VPC_TXMEMREADIN8BCOUNT (VPG_TX + TX_MEMREADIN8BCOUNT)
++#define VPC_TXCACHEMISSCOUNT (VPG_TX + TX_CACHEMISSCOUNT)
++#define VPC_TXCACHEHITTEXELCOUNT (VPG_TX + TX_CACHEHITTEXELCOUNT)
++#define VPC_TXCACHEMISSTEXELCOUNT (VPG_TX + TX_CACHEMISSTEXELCOUNT)
++
++/* HW: PE Counters. */
++#define VPC_PEKILLEDBYCOLOR (VPG_PE + PE_KILLEDBYCOLOR)
++#define VPC_PEKILLEDBYDEPTH (VPG_PE + PE_KILLEDBYDEPTH)
++#define VPC_PEDRAWNBYCOLOR (VPG_PE + PE_DRAWNBYCOLOR)
++#define VPC_PEDRAWNBYDEPTH (VPG_PE + PE_DRAWNBYDEPTH)
++
++/* HW: MC Counters. */
++#define VPC_MCREADREQ8BPIPE (VPG_MC + MC_READREQ8BPIPE)
++#define VPC_MCREADREQ8BIP (VPG_MC + MC_READREQ8BIP)
++#define VPC_MCWRITEREQ8BPIPE (VPG_MC + MC_WRITEREQ8BPIPE)
++
++/* HW: AXI Counters. */
++#define VPC_AXIREADREQSTALLED (VPG_AXI + AXI_READREQSTALLED)
++#define VPC_AXIWRITEREQSTALLED (VPG_AXI + AXI_WRITEREQSTALLED)
++#define VPC_AXIWRITEDATASTALLED (VPG_AXI + AXI_WRITEDATASTALLED)
++
++/* PROGRAM: Shader program counters. */
++#define VPC_PVSINSTRCOUNT (VPG_PVS + PVS_INSTRCOUNT)
++#define VPC_PVSALUINSTRCOUNT (VPG_PVS + PVS_ALUINSTRCOUNT)
++#define VPC_PVSTEXINSTRCOUNT (VPG_PVS + PVS_TEXINSTRCOUNT)
++#define VPC_PVSATTRIBCOUNT (VPG_PVS + PVS_ATTRIBCOUNT)
++#define VPC_PVSUNIFORMCOUNT (VPG_PVS + PVS_UNIFORMCOUNT)
++#define VPC_PVSFUNCTIONCOUNT (VPG_PVS + PVS_FUNCTIONCOUNT)
++#define VPC_PVSSOURCE (VPG_PVS + PVS_SOURCE)
++
++#define VPC_PPSINSTRCOUNT (VPG_PPS + PPS_INSTRCOUNT)
++#define VPC_PPSALUINSTRCOUNT (VPG_PPS + PPS_ALUINSTRCOUNT)
++#define VPC_PPSTEXINSTRCOUNT (VPG_PPS + PPS_TEXINSTRCOUNT)
++#define VPC_PPSATTRIBCOUNT (VPG_PPS + PPS_ATTRIBCOUNT)
++#define VPC_PPSUNIFORMCOUNT (VPG_PPS + PPS_UNIFORMCOUNT)
++#define VPC_PPSFUNCTIONCOUNT (VPG_PPS + PPS_FUNCTIONCOUNT)
++#define VPC_PPSSOURCE (VPG_PPS + PPS_SOURCE)
++
++#define VPC_PROGRAMHANDLE (VPG_PROG + 1)
++
++#define VPC_ES30_DRAW_NO (VPG_ES30_DRAW + 1)
++#define VPC_ES11_DRAW_NO (VPG_ES11_DRAW + 1)
++#endif
++
++
++/* HW profile information. */
++typedef struct _gcsPROFILER_COUNTERS
++{
++ /* HW static counters. */
++ gctUINT32 gpuClock;
++ gctUINT32 axiClock;
++ gctUINT32 shaderClock;
++
++ /* HW vairable counters. */
++ gctUINT32 gpuClockStart;
++ gctUINT32 gpuClockEnd;
++
++ /* HW vairable counters. */
++ gctUINT32 gpuCyclesCounter;
++ gctUINT32 gpuTotalCyclesCounter;
++ gctUINT32 gpuIdleCyclesCounter;
++ gctUINT32 gpuTotalRead64BytesPerFrame;
++ gctUINT32 gpuTotalWrite64BytesPerFrame;
++
++ /* PE */
++ gctUINT32 pe_pixel_count_killed_by_color_pipe;
++ gctUINT32 pe_pixel_count_killed_by_depth_pipe;
++ gctUINT32 pe_pixel_count_drawn_by_color_pipe;
++ gctUINT32 pe_pixel_count_drawn_by_depth_pipe;
++
++ /* SH */
++ gctUINT32 ps_inst_counter;
++ gctUINT32 rendered_pixel_counter;
++ gctUINT32 vs_inst_counter;
++ gctUINT32 rendered_vertice_counter;
++ gctUINT32 vtx_branch_inst_counter;
++ gctUINT32 vtx_texld_inst_counter;
++ gctUINT32 pxl_branch_inst_counter;
++ gctUINT32 pxl_texld_inst_counter;
++
++ /* PA */
++ gctUINT32 pa_input_vtx_counter;
++ gctUINT32 pa_input_prim_counter;
++ gctUINT32 pa_output_prim_counter;
++ gctUINT32 pa_depth_clipped_counter;
++ gctUINT32 pa_trivial_rejected_counter;
++ gctUINT32 pa_culled_counter;
++
++ /* SE */
++ gctUINT32 se_culled_triangle_count;
++ gctUINT32 se_culled_lines_count;
++
++ /* RA */
++ gctUINT32 ra_valid_pixel_count;
++ gctUINT32 ra_total_quad_count;
++ gctUINT32 ra_valid_quad_count_after_early_z;
++ gctUINT32 ra_total_primitive_count;
++ gctUINT32 ra_pipe_cache_miss_counter;
++ gctUINT32 ra_prefetch_cache_miss_counter;
++ gctUINT32 ra_eez_culled_counter;
++
++ /* TX */
++ gctUINT32 tx_total_bilinear_requests;
++ gctUINT32 tx_total_trilinear_requests;
++ gctUINT32 tx_total_discarded_texture_requests;
++ gctUINT32 tx_total_texture_requests;
++ gctUINT32 tx_mem_read_count;
++ gctUINT32 tx_mem_read_in_8B_count;
++ gctUINT32 tx_cache_miss_count;
++ gctUINT32 tx_cache_hit_texel_count;
++ gctUINT32 tx_cache_miss_texel_count;
++
++ /* MC */
++ gctUINT32 mc_total_read_req_8B_from_pipeline;
++ gctUINT32 mc_total_read_req_8B_from_IP;
++ gctUINT32 mc_total_write_req_8B_from_pipeline;
++
++ /* HI */
++ gctUINT32 hi_axi_cycles_read_request_stalled;
++ gctUINT32 hi_axi_cycles_write_request_stalled;
++ gctUINT32 hi_axi_cycles_write_data_stalled;
++}
++gcsPROFILER_COUNTERS;
++
++#if VIVANTE_PROFILER_NEW
++#define NumOfDrawBuf 64
++#endif
++
++/* HAL profile information. */
++typedef struct _gcsPROFILER
++{
++ gctUINT32 enable;
++ gctBOOL enableHal;
++ gctBOOL enableHW;
++ gctBOOL enableSH;
++ gctBOOL isSyncMode;
++ gctBOOL disableOutputCounter;
++
++ gctBOOL useSocket;
++ gctINT sockFd;
++
++ gctFILE file;
++
++ /* Aggregate Information */
++
++ /* Clock Info */
++ gctUINT64 frameStart;
++ gctUINT64 frameEnd;
++
++ /* Current frame information */
++ gctUINT32 frameNumber;
++ gctUINT64 frameStartTimeusec;
++ gctUINT64 frameEndTimeusec;
++ gctUINT64 frameStartCPUTimeusec;
++ gctUINT64 frameEndCPUTimeusec;
++
++#if PROFILE_HAL_COUNTERS
++ gctUINT32 vertexBufferTotalBytesAlloc;
++ gctUINT32 vertexBufferNewBytesAlloc;
++ int vertexBufferTotalObjectsAlloc;
++ int vertexBufferNewObjectsAlloc;
++
++ gctUINT32 indexBufferTotalBytesAlloc;
++ gctUINT32 indexBufferNewBytesAlloc;
++ int indexBufferTotalObjectsAlloc;
++ int indexBufferNewObjectsAlloc;
++
++ gctUINT32 textureBufferTotalBytesAlloc;
++ gctUINT32 textureBufferNewBytesAlloc;
++ int textureBufferTotalObjectsAlloc;
++ int textureBufferNewObjectsAlloc;
++
++ gctUINT32 numCommits;
++ gctUINT32 drawPointCount;
++ gctUINT32 drawLineCount;
++ gctUINT32 drawTriangleCount;
++ gctUINT32 drawVertexCount;
++ gctUINT32 redundantStateChangeCalls;
++#endif
++
++ gctUINT32 prevVSInstCount;
++ gctUINT32 prevVSBranchInstCount;
++ gctUINT32 prevVSTexInstCount;
++ gctUINT32 prevVSVertexCount;
++ gctUINT32 prevPSInstCount;
++ gctUINT32 prevPSBranchInstCount;
++ gctUINT32 prevPSTexInstCount;
++ gctUINT32 prevPSPixelCount;
++
++#if VIVANTE_PROFILER_NEW
++ gcoBUFOBJ newCounterBuf[NumOfDrawBuf];
++ gctUINT32 curBufId;
++#endif
++
++}
++gcsPROFILER;
++
++/* Memory profile information. */
++struct _gcsMemProfile
++{
++ /* Memory Usage */
++ gctUINT32 videoMemUsed;
++ gctUINT32 systemMemUsed;
++ gctUINT32 commitBufferSize;
++ gctUINT32 contextBufferCopyBytes;
++};
++
++/* Shader profile information. */
++struct _gcsSHADER_PROFILER
++{
++ gctUINT32 shaderLength;
++ gctUINT32 shaderALUCycles;
++ gctUINT32 shaderTexLoadCycles;
++ gctUINT32 shaderTempRegCount;
++ gctUINT32 shaderSamplerRegCount;
++ gctUINT32 shaderInputRegCount;
++ gctUINT32 shaderOutputRegCount;
++};
++
++/* Initialize the gcsProfiler. */
++gceSTATUS
++gcoPROFILER_Initialize(
++ IN gcoHAL Hal,
++ IN gctBOOL Enable
++ );
++
++/* Destroy the gcProfiler. */
++gceSTATUS
++gcoPROFILER_Destroy(
++ IN gcoHAL Hal
++ );
++
++/* Write data to profiler. */
++gceSTATUS
++gcoPROFILER_Write(
++ IN gcoHAL Hal,
++ IN gctSIZE_T ByteCount,
++ IN gctCONST_POINTER Data
++ );
++
++/* Flush data out. */
++gceSTATUS
++gcoPROFILER_Flush(
++ IN gcoHAL Hal
++ );
++
++/* Call to signal end of frame. */
++gceSTATUS
++gcoPROFILER_EndFrame(
++ IN gcoHAL Hal
++ );
++
++/* Call to signal end of draw. */
++gceSTATUS
++gcoPROFILER_EndDraw(
++ IN gcoHAL Hal,
++ IN gctBOOL FirstDraw
++ );
++
++/* Increase profile counter Enum by Value. */
++gceSTATUS
++gcoPROFILER_Count(
++ IN gcoHAL Hal,
++ IN gctUINT32 Enum,
++ IN gctINT Value
++ );
++
++/* Profile input vertex shader. */
++gceSTATUS
++gcoPROFILER_ShaderVS(
++ IN gcoHAL Hal,
++ IN gctPOINTER Vs
++ );
++
++/* Profile input fragment shader. */
++gceSTATUS
++gcoPROFILER_ShaderFS(
++ IN gcoHAL Hal,
++ IN gctPOINTER Fs
++ );
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif /* __gc_hal_profiler_h_ */
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_raster.h linux-openelec/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_raster.h
+--- linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_raster.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_raster.h 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,1038 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_raster_h_
++#define __gc_hal_raster_h_
++
++#include "gc_hal_enum.h"
++#include "gc_hal_types.h"
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/******************************************************************************\
++****************************** Object Declarations *****************************
++\******************************************************************************/
++
++typedef struct _gcoBRUSH * gcoBRUSH;
++typedef struct _gcoBRUSH_CACHE * gcoBRUSH_CACHE;
++
++/******************************************************************************\
++******************************** gcoBRUSH Object *******************************
++\******************************************************************************/
++
++/* Create a new solid color gcoBRUSH object. */
++gceSTATUS
++gcoBRUSH_ConstructSingleColor(
++ IN gcoHAL Hal,
++ IN gctUINT32 ColorConvert,
++ IN gctUINT32 Color,
++ IN gctUINT64 Mask,
++ gcoBRUSH * Brush
++ );
++
++/* Create a new monochrome gcoBRUSH object. */
++gceSTATUS
++gcoBRUSH_ConstructMonochrome(
++ IN gcoHAL Hal,
++ IN gctUINT32 OriginX,
++ IN gctUINT32 OriginY,
++ IN gctUINT32 ColorConvert,
++ IN gctUINT32 FgColor,
++ IN gctUINT32 BgColor,
++ IN gctUINT64 Bits,
++ IN gctUINT64 Mask,
++ gcoBRUSH * Brush
++ );
++
++/* Create a color gcoBRUSH object. */
++gceSTATUS
++gcoBRUSH_ConstructColor(
++ IN gcoHAL Hal,
++ IN gctUINT32 OriginX,
++ IN gctUINT32 OriginY,
++ IN gctPOINTER Address,
++ IN gceSURF_FORMAT Format,
++ IN gctUINT64 Mask,
++ gcoBRUSH * Brush
++ );
++
++/* Destroy an gcoBRUSH object. */
++gceSTATUS
++gcoBRUSH_Destroy(
++ IN gcoBRUSH Brush
++ );
++
++/******************************************************************************\
++******************************** gcoSURF Object *******************************
++\******************************************************************************/
++
++/* Set cipping rectangle. */
++gceSTATUS
++gcoSURF_SetClipping(
++ IN gcoSURF Surface
++ );
++
++/* Clear one or more rectangular areas. */
++gceSTATUS
++gcoSURF_Clear2D(
++ IN gcoSURF DestSurface,
++ IN gctUINT32 RectCount,
++ IN gcsRECT_PTR DestRect,
++ IN gctUINT32 LoColor,
++ IN gctUINT32 HiColor
++ );
++
++/* Draw one or more Bresenham lines. */
++gceSTATUS
++gcoSURF_Line(
++ IN gcoSURF Surface,
++ IN gctUINT32 LineCount,
++ IN gcsRECT_PTR Position,
++ IN gcoBRUSH Brush,
++ IN gctUINT8 FgRop,
++ IN gctUINT8 BgRop
++ );
++
++/* Generic rectangular blit. */
++gceSTATUS
++gcoSURF_Blit(
++ IN OPTIONAL gcoSURF SrcSurface,
++ IN gcoSURF DestSurface,
++ IN gctUINT32 RectCount,
++ IN OPTIONAL gcsRECT_PTR SrcRect,
++ IN gcsRECT_PTR DestRect,
++ IN OPTIONAL gcoBRUSH Brush,
++ IN gctUINT8 FgRop,
++ IN gctUINT8 BgRop,
++ IN OPTIONAL gceSURF_TRANSPARENCY Transparency,
++ IN OPTIONAL gctUINT32 TransparencyColor,
++ IN OPTIONAL gctPOINTER Mask,
++ IN OPTIONAL gceSURF_MONOPACK MaskPack
++ );
++
++/* Monochrome blit. */
++gceSTATUS
++gcoSURF_MonoBlit(
++ IN gcoSURF DestSurface,
++ IN gctPOINTER Source,
++ IN gceSURF_MONOPACK SourcePack,
++ IN gcsPOINT_PTR SourceSize,
++ IN gcsPOINT_PTR SourceOrigin,
++ IN gcsRECT_PTR DestRect,
++ IN OPTIONAL gcoBRUSH Brush,
++ IN gctUINT8 FgRop,
++ IN gctUINT8 BgRop,
++ IN gctBOOL ColorConvert,
++ IN gctUINT8 MonoTransparency,
++ IN gceSURF_TRANSPARENCY Transparency,
++ IN gctUINT32 FgColor,
++ IN gctUINT32 BgColor
++ );
++
++/* Filter blit. */
++gceSTATUS
++gcoSURF_FilterBlit(
++ IN gcoSURF SrcSurface,
++ IN gcoSURF DestSurface,
++ IN gcsRECT_PTR SrcRect,
++ IN gcsRECT_PTR DestRect,
++ IN gcsRECT_PTR DestSubRect
++ );
++
++/* Enable alpha blending engine in the hardware and disengage the ROP engine. */
++gceSTATUS
++gcoSURF_EnableAlphaBlend(
++ IN gcoSURF Surface,
++ IN gctUINT8 SrcGlobalAlphaValue,
++ IN gctUINT8 DstGlobalAlphaValue,
++ IN gceSURF_PIXEL_ALPHA_MODE SrcAlphaMode,
++ IN gceSURF_PIXEL_ALPHA_MODE DstAlphaMode,
++ IN gceSURF_GLOBAL_ALPHA_MODE SrcGlobalAlphaMode,
++ IN gceSURF_GLOBAL_ALPHA_MODE DstGlobalAlphaMode,
++ IN gceSURF_BLEND_FACTOR_MODE SrcFactorMode,
++ IN gceSURF_BLEND_FACTOR_MODE DstFactorMode,
++ IN gceSURF_PIXEL_COLOR_MODE SrcColorMode,
++ IN gceSURF_PIXEL_COLOR_MODE DstColorMode
++ );
++
++/* Disable alpha blending engine in the hardware and engage the ROP engine. */
++gceSTATUS
++gcoSURF_DisableAlphaBlend(
++ IN gcoSURF Surface
++ );
++
++/* Copy a rectangular area with format conversion. */
++gceSTATUS
++gcoSURF_CopyPixels(
++ IN gcoSURF Source,
++ IN gcoSURF Target,
++ IN gctINT SourceX,
++ IN gctINT SourceY,
++ IN gctINT TargetX,
++ IN gctINT TargetY,
++ IN gctINT Width,
++ IN gctINT Height
++ );
++
++/* Read surface pixel. */
++gceSTATUS
++gcoSURF_ReadPixel(
++ IN gcoSURF Surface,
++ IN gctPOINTER Memory,
++ IN gctINT X,
++ IN gctINT Y,
++ IN gceSURF_FORMAT Format,
++ OUT gctPOINTER PixelValue
++ );
++
++/* Write surface pixel. */
++gceSTATUS
++gcoSURF_WritePixel(
++ IN gcoSURF Surface,
++ IN gctPOINTER Memory,
++ IN gctINT X,
++ IN gctINT Y,
++ IN gceSURF_FORMAT Format,
++ IN gctPOINTER PixelValue
++ );
++
++gceSTATUS
++gcoSURF_SetDither(
++ IN gcoSURF Surface,
++ IN gctBOOL Dither
++ );
++
++gceSTATUS
++gcoSURF_Set2DSource(
++ gcoSURF Surface,
++ gceSURF_ROTATION Rotation
++ );
++
++gceSTATUS
++gcoSURF_Set2DTarget(
++ gcoSURF Surface,
++ gceSURF_ROTATION Rotation
++ );
++
++/******************************************************************************\
++********************************** gco2D Object *********************************
++\******************************************************************************/
++
++/* Construct a new gco2D object. */
++gceSTATUS
++gco2D_Construct(
++ IN gcoHAL Hal,
++ OUT gco2D * Hardware
++ );
++
++/* Destroy an gco2D object. */
++gceSTATUS
++gco2D_Destroy(
++ IN gco2D Hardware
++ );
++
++/* Sets the maximum number of brushes in the brush cache. */
++gceSTATUS
++gco2D_SetBrushLimit(
++ IN gco2D Hardware,
++ IN gctUINT MaxCount
++ );
++
++/* Flush the brush. */
++gceSTATUS
++gco2D_FlushBrush(
++ IN gco2D Engine,
++ IN gcoBRUSH Brush,
++ IN gceSURF_FORMAT Format
++ );
++
++/* Program the specified solid color brush. */
++gceSTATUS
++gco2D_LoadSolidBrush(
++ IN gco2D Engine,
++ IN gceSURF_FORMAT Format,
++ IN gctUINT32 ColorConvert,
++ IN gctUINT32 Color,
++ IN gctUINT64 Mask
++ );
++
++gceSTATUS
++gco2D_LoadMonochromeBrush(
++ IN gco2D Engine,
++ IN gctUINT32 OriginX,
++ IN gctUINT32 OriginY,
++ IN gctUINT32 ColorConvert,
++ IN gctUINT32 FgColor,
++ IN gctUINT32 BgColor,
++ IN gctUINT64 Bits,
++ IN gctUINT64 Mask
++ );
++
++gceSTATUS
++gco2D_LoadColorBrush(
++ IN gco2D Engine,
++ IN gctUINT32 OriginX,
++ IN gctUINT32 OriginY,
++ IN gctUINT32 Address,
++ IN gceSURF_FORMAT Format,
++ IN gctUINT64 Mask
++ );
++
++/* Configure monochrome source. */
++gceSTATUS
++gco2D_SetMonochromeSource(
++ IN gco2D Engine,
++ IN gctBOOL ColorConvert,
++ IN gctUINT8 MonoTransparency,
++ IN gceSURF_MONOPACK DataPack,
++ IN gctBOOL CoordRelative,
++ IN gceSURF_TRANSPARENCY Transparency,
++ IN gctUINT32 FgColor,
++ IN gctUINT32 BgColor
++ );
++
++/* Configure color source. */
++gceSTATUS
++gco2D_SetColorSource(
++ IN gco2D Engine,
++ IN gctUINT32 Address,
++ IN gctUINT32 Stride,
++ IN gceSURF_FORMAT Format,
++ IN gceSURF_ROTATION Rotation,
++ IN gctUINT32 SurfaceWidth,
++ IN gctBOOL CoordRelative,
++ IN gceSURF_TRANSPARENCY Transparency,
++ IN gctUINT32 TransparencyColor
++ );
++
++/* Configure color source extension for full rotation. */
++gceSTATUS
++gco2D_SetColorSourceEx(
++ IN gco2D Engine,
++ IN gctUINT32 Address,
++ IN gctUINT32 Stride,
++ IN gceSURF_FORMAT Format,
++ IN gceSURF_ROTATION Rotation,
++ IN gctUINT32 SurfaceWidth,
++ IN gctUINT32 SurfaceHeight,
++ IN gctBOOL CoordRelative,
++ IN gceSURF_TRANSPARENCY Transparency,
++ IN gctUINT32 TransparencyColor
++ );
++
++/* Configure color source. */
++gceSTATUS
++gco2D_SetColorSourceAdvanced(
++ IN gco2D Engine,
++ IN gctUINT32 Address,
++ IN gctUINT32 Stride,
++ IN gceSURF_FORMAT Format,
++ IN gceSURF_ROTATION Rotation,
++ IN gctUINT32 SurfaceWidth,
++ IN gctUINT32 SurfaceHeight,
++ IN gctBOOL CoordRelative
++ );
++
++gceSTATUS
++gco2D_SetColorSourceN(
++ IN gco2D Engine,
++ IN gctUINT32 Address,
++ IN gctUINT32 Stride,
++ IN gceSURF_FORMAT Format,
++ IN gceSURF_ROTATION Rotation,
++ IN gctUINT32 SurfaceWidth,
++ IN gctUINT32 SurfaceHeight,
++ IN gctUINT32 SurfaceNumber
++ );
++
++/* Configure masked color source. */
++gceSTATUS
++gco2D_SetMaskedSource(
++ IN gco2D Engine,
++ IN gctUINT32 Address,
++ IN gctUINT32 Stride,
++ IN gceSURF_FORMAT Format,
++ IN gctBOOL CoordRelative,
++ IN gceSURF_MONOPACK MaskPack
++ );
++
++/* Configure masked color source extension for full rotation. */
++gceSTATUS
++gco2D_SetMaskedSourceEx(
++ IN gco2D Engine,
++ IN gctUINT32 Address,
++ IN gctUINT32 Stride,
++ IN gceSURF_FORMAT Format,
++ IN gctBOOL CoordRelative,
++ IN gceSURF_MONOPACK MaskPack,
++ IN gceSURF_ROTATION Rotation,
++ IN gctUINT32 SurfaceWidth,
++ IN gctUINT32 SurfaceHeight
++ );
++
++/* Setup the source rectangle. */
++gceSTATUS
++gco2D_SetSource(
++ IN gco2D Engine,
++ IN gcsRECT_PTR SrcRect
++ );
++
++/* Set clipping rectangle. */
++gceSTATUS
++gco2D_SetClipping(
++ IN gco2D Engine,
++ IN gcsRECT_PTR Rect
++ );
++
++/* Configure destination. */
++gceSTATUS
++gco2D_SetTarget(
++ IN gco2D Engine,
++ IN gctUINT32 Address,
++ IN gctUINT32 Stride,
++ IN gceSURF_ROTATION Rotation,
++ IN gctUINT32 SurfaceWidth
++ );
++
++/* Configure destination extension for full rotation. */
++gceSTATUS
++gco2D_SetTargetEx(
++ IN gco2D Engine,
++ IN gctUINT32 Address,
++ IN gctUINT32 Stride,
++ IN gceSURF_ROTATION Rotation,
++ IN gctUINT32 SurfaceWidth,
++ IN gctUINT32 SurfaceHeight
++ );
++
++/* Calculate and program the stretch factors. */
++gceSTATUS
++gco2D_CalcStretchFactor(
++ IN gco2D Engine,
++ IN gctINT32 SrcSize,
++ IN gctINT32 DestSize,
++ OUT gctUINT32_PTR Factor
++ );
++
++gceSTATUS
++gco2D_SetStretchFactors(
++ IN gco2D Engine,
++ IN gctUINT32 HorFactor,
++ IN gctUINT32 VerFactor
++ );
++
++/* Calculate and program the stretch factors based on the rectangles. */
++gceSTATUS
++gco2D_SetStretchRectFactors(
++ IN gco2D Engine,
++ IN gcsRECT_PTR SrcRect,
++ IN gcsRECT_PTR DestRect
++ );
++
++/* Create a new solid color gcoBRUSH object. */
++gceSTATUS
++gco2D_ConstructSingleColorBrush(
++ IN gco2D Engine,
++ IN gctUINT32 ColorConvert,
++ IN gctUINT32 Color,
++ IN gctUINT64 Mask,
++ gcoBRUSH * Brush
++ );
++
++/* Create a new monochrome gcoBRUSH object. */
++gceSTATUS
++gco2D_ConstructMonochromeBrush(
++ IN gco2D Engine,
++ IN gctUINT32 OriginX,
++ IN gctUINT32 OriginY,
++ IN gctUINT32 ColorConvert,
++ IN gctUINT32 FgColor,
++ IN gctUINT32 BgColor,
++ IN gctUINT64 Bits,
++ IN gctUINT64 Mask,
++ gcoBRUSH * Brush
++ );
++
++/* Create a color gcoBRUSH object. */
++gceSTATUS
++gco2D_ConstructColorBrush(
++ IN gco2D Engine,
++ IN gctUINT32 OriginX,
++ IN gctUINT32 OriginY,
++ IN gctPOINTER Address,
++ IN gceSURF_FORMAT Format,
++ IN gctUINT64 Mask,
++ gcoBRUSH * Brush
++ );
++
++/* Clear one or more rectangular areas. */
++gceSTATUS
++gco2D_Clear(
++ IN gco2D Engine,
++ IN gctUINT32 RectCount,
++ IN gcsRECT_PTR Rect,
++ IN gctUINT32 Color32,
++ IN gctUINT8 FgRop,
++ IN gctUINT8 BgRop,
++ IN gceSURF_FORMAT DestFormat
++ );
++
++/* Draw one or more Bresenham lines. */
++gceSTATUS
++gco2D_Line(
++ IN gco2D Engine,
++ IN gctUINT32 LineCount,
++ IN gcsRECT_PTR Position,
++ IN gcoBRUSH Brush,
++ IN gctUINT8 FgRop,
++ IN gctUINT8 BgRop,
++ IN gceSURF_FORMAT DestFormat
++ );
++
++/* Draw one or more Bresenham lines based on the 32-bit color. */
++gceSTATUS
++gco2D_ColorLine(
++ IN gco2D Engine,
++ IN gctUINT32 LineCount,
++ IN gcsRECT_PTR Position,
++ IN gctUINT32 Color32,
++ IN gctUINT8 FgRop,
++ IN gctUINT8 BgRop,
++ IN gceSURF_FORMAT DestFormat
++ );
++
++/* Generic blit. */
++gceSTATUS
++gco2D_Blit(
++ IN gco2D Engine,
++ IN gctUINT32 RectCount,
++ IN gcsRECT_PTR Rect,
++ IN gctUINT8 FgRop,
++ IN gctUINT8 BgRop,
++ IN gceSURF_FORMAT DestFormat
++ );
++
++gceSTATUS
++gco2D_Blend(
++ IN gco2D Engine,
++ IN gctUINT32 SrcCount,
++ IN gctUINT32 RectCount,
++ IN gcsRECT_PTR Rect,
++ IN gctUINT8 FgRop,
++ IN gctUINT8 BgRop,
++ IN gceSURF_FORMAT DestFormat
++ );
++
++/* Batch blit. */
++gceSTATUS
++gco2D_BatchBlit(
++ IN gco2D Engine,
++ IN gctUINT32 RectCount,
++ IN gcsRECT_PTR SrcRect,
++ IN gcsRECT_PTR DestRect,
++ IN gctUINT8 FgRop,
++ IN gctUINT8 BgRop,
++ IN gceSURF_FORMAT DestFormat
++ );
++
++/* Stretch blit. */
++gceSTATUS
++gco2D_StretchBlit(
++ IN gco2D Engine,
++ IN gctUINT32 RectCount,
++ IN gcsRECT_PTR Rect,
++ IN gctUINT8 FgRop,
++ IN gctUINT8 BgRop,
++ IN gceSURF_FORMAT DestFormat
++ );
++
++/* Monochrome blit. */
++gceSTATUS
++gco2D_MonoBlit(
++ IN gco2D Engine,
++ IN gctPOINTER StreamBits,
++ IN gcsPOINT_PTR StreamSize,
++ IN gcsRECT_PTR StreamRect,
++ IN gceSURF_MONOPACK SrcStreamPack,
++ IN gceSURF_MONOPACK DestStreamPack,
++ IN gcsRECT_PTR DestRect,
++ IN gctUINT32 FgRop,
++ IN gctUINT32 BgRop,
++ IN gceSURF_FORMAT DestFormat
++ );
++
++gceSTATUS
++gco2D_MonoBlitEx(
++ IN gco2D Engine,
++ IN gctPOINTER StreamBits,
++ IN gctINT32 StreamStride,
++ IN gctINT32 StreamWidth,
++ IN gctINT32 StreamHeight,
++ IN gctINT32 StreamX,
++ IN gctINT32 StreamY,
++ IN gctUINT32 FgColor,
++ IN gctUINT32 BgColor,
++ IN gcsRECT_PTR SrcRect,
++ IN gcsRECT_PTR DstRect,
++ IN gctUINT8 FgRop,
++ IN gctUINT8 BgRop
++ );
++
++/* Set kernel size. */
++gceSTATUS
++gco2D_SetKernelSize(
++ IN gco2D Engine,
++ IN gctUINT8 HorKernelSize,
++ IN gctUINT8 VerKernelSize
++ );
++
++/* Set filter type. */
++gceSTATUS
++gco2D_SetFilterType(
++ IN gco2D Engine,
++ IN gceFILTER_TYPE FilterType
++ );
++
++/* Set the filter kernel by user. */
++gceSTATUS
++gco2D_SetUserFilterKernel(
++ IN gco2D Engine,
++ IN gceFILTER_PASS_TYPE PassType,
++ IN gctUINT16_PTR KernelArray
++ );
++
++/* Select the pass(es) to be done for user defined filter. */
++gceSTATUS
++gco2D_EnableUserFilterPasses(
++ IN gco2D Engine,
++ IN gctBOOL HorPass,
++ IN gctBOOL VerPass
++ );
++
++/* Frees the temporary buffer allocated by filter blit operation. */
++gceSTATUS
++gco2D_FreeFilterBuffer(
++ IN gco2D Engine
++ );
++
++/* Filter blit. */
++gceSTATUS
++gco2D_FilterBlit(
++ IN gco2D Engine,
++ IN gctUINT32 SrcAddress,
++ IN gctUINT SrcStride,
++ IN gctUINT32 SrcUAddress,
++ IN gctUINT SrcUStride,
++ IN gctUINT32 SrcVAddress,
++ IN gctUINT SrcVStride,
++ IN gceSURF_FORMAT SrcFormat,
++ IN gceSURF_ROTATION SrcRotation,
++ IN gctUINT32 SrcSurfaceWidth,
++ IN gcsRECT_PTR SrcRect,
++ IN gctUINT32 DestAddress,
++ IN gctUINT DestStride,
++ IN gceSURF_FORMAT DestFormat,
++ IN gceSURF_ROTATION DestRotation,
++ IN gctUINT32 DestSurfaceWidth,
++ IN gcsRECT_PTR DestRect,
++ IN gcsRECT_PTR DestSubRect
++ );
++
++/* Filter blit extension for full rotation. */
++gceSTATUS
++gco2D_FilterBlitEx(
++ IN gco2D Engine,
++ IN gctUINT32 SrcAddress,
++ IN gctUINT SrcStride,
++ IN gctUINT32 SrcUAddress,
++ IN gctUINT SrcUStride,
++ IN gctUINT32 SrcVAddress,
++ IN gctUINT SrcVStride,
++ IN gceSURF_FORMAT SrcFormat,
++ IN gceSURF_ROTATION SrcRotation,
++ IN gctUINT32 SrcSurfaceWidth,
++ IN gctUINT32 SrcSurfaceHeight,
++ IN gcsRECT_PTR SrcRect,
++ IN gctUINT32 DestAddress,
++ IN gctUINT DestStride,
++ IN gceSURF_FORMAT DestFormat,
++ IN gceSURF_ROTATION DestRotation,
++ IN gctUINT32 DestSurfaceWidth,
++ IN gctUINT32 DestSurfaceHeight,
++ IN gcsRECT_PTR DestRect,
++ IN gcsRECT_PTR DestSubRect
++ );
++
++gceSTATUS
++gco2D_FilterBlitEx2(
++ IN gco2D Engine,
++ IN gctUINT32_PTR SrcAddresses,
++ IN gctUINT32 SrcAddressNum,
++ IN gctUINT32_PTR SrcStrides,
++ IN gctUINT32 SrcStrideNum,
++ IN gceTILING SrcTiling,
++ IN gceSURF_FORMAT SrcFormat,
++ IN gceSURF_ROTATION SrcRotation,
++ IN gctUINT32 SrcSurfaceWidth,
++ IN gctUINT32 SrcSurfaceHeight,
++ IN gcsRECT_PTR SrcRect,
++ IN gctUINT32_PTR DestAddresses,
++ IN gctUINT32 DestAddressNum,
++ IN gctUINT32_PTR DestStrides,
++ IN gctUINT32 DestStrideNum,
++ IN gceTILING DestTiling,
++ IN gceSURF_FORMAT DestFormat,
++ IN gceSURF_ROTATION DestRotation,
++ IN gctUINT32 DestSurfaceWidth,
++ IN gctUINT32 DestSurfaceHeight,
++ IN gcsRECT_PTR DestRect,
++ IN gcsRECT_PTR DestSubRect
++ );
++
++/* Enable alpha blending engine in the hardware and disengage the ROP engine. */
++gceSTATUS
++gco2D_EnableAlphaBlend(
++ IN gco2D Engine,
++ IN gctUINT8 SrcGlobalAlphaValue,
++ IN gctUINT8 DstGlobalAlphaValue,
++ IN gceSURF_PIXEL_ALPHA_MODE SrcAlphaMode,
++ IN gceSURF_PIXEL_ALPHA_MODE DstAlphaMode,
++ IN gceSURF_GLOBAL_ALPHA_MODE SrcGlobalAlphaMode,
++ IN gceSURF_GLOBAL_ALPHA_MODE DstGlobalAlphaMode,
++ IN gceSURF_BLEND_FACTOR_MODE SrcFactorMode,
++ IN gceSURF_BLEND_FACTOR_MODE DstFactorMode,
++ IN gceSURF_PIXEL_COLOR_MODE SrcColorMode,
++ IN gceSURF_PIXEL_COLOR_MODE DstColorMode
++ );
++
++/* Enable alpha blending engine in the hardware. */
++gceSTATUS
++gco2D_EnableAlphaBlendAdvanced(
++ IN gco2D Engine,
++ IN gceSURF_PIXEL_ALPHA_MODE SrcAlphaMode,
++ IN gceSURF_PIXEL_ALPHA_MODE DstAlphaMode,
++ IN gceSURF_GLOBAL_ALPHA_MODE SrcGlobalAlphaMode,
++ IN gceSURF_GLOBAL_ALPHA_MODE DstGlobalAlphaMode,
++ IN gceSURF_BLEND_FACTOR_MODE SrcFactorMode,
++ IN gceSURF_BLEND_FACTOR_MODE DstFactorMode
++ );
++
++/* Enable alpha blending engine with Porter Duff rule. */
++gceSTATUS
++gco2D_SetPorterDuffBlending(
++ IN gco2D Engine,
++ IN gce2D_PORTER_DUFF_RULE Rule
++ );
++
++/* Disable alpha blending engine in the hardware and engage the ROP engine. */
++gceSTATUS
++gco2D_DisableAlphaBlend(
++ IN gco2D Engine
++ );
++
++/* Retrieve the maximum number of 32-bit data chunks for a single DE command. */
++gctUINT32
++gco2D_GetMaximumDataCount(
++ void
++ );
++
++/* Retrieve the maximum number of rectangles, that can be passed in a single DE command. */
++gctUINT32
++gco2D_GetMaximumRectCount(
++ void
++ );
++
++/* Returns the pixel alignment of the surface. */
++gceSTATUS
++gco2D_GetPixelAlignment(
++ gceSURF_FORMAT Format,
++ gcsPOINT_PTR Alignment
++ );
++
++/* Retrieve monochrome stream pack size. */
++gceSTATUS
++gco2D_GetPackSize(
++ IN gceSURF_MONOPACK StreamPack,
++ OUT gctUINT32 * PackWidth,
++ OUT gctUINT32 * PackHeight
++ );
++
++/* Flush the 2D pipeline. */
++gceSTATUS
++gco2D_Flush(
++ IN gco2D Engine
++ );
++
++/* Load 256-entry color table for INDEX8 source surfaces. */
++gceSTATUS
++gco2D_LoadPalette(
++ IN gco2D Engine,
++ IN gctUINT FirstIndex,
++ IN gctUINT IndexCount,
++ IN gctPOINTER ColorTable,
++ IN gctBOOL ColorConvert
++ );
++
++/* Enable/disable 2D BitBlt mirrorring. */
++gceSTATUS
++gco2D_SetBitBlitMirror(
++ IN gco2D Engine,
++ IN gctBOOL HorizontalMirror,
++ IN gctBOOL VerticalMirror
++ );
++
++/*
++ * Set the transparency for source, destination and pattern.
++ * It also enable or disable the DFB color key mode.
++ */
++gceSTATUS
++gco2D_SetTransparencyAdvancedEx(
++ IN gco2D Engine,
++ IN gce2D_TRANSPARENCY SrcTransparency,
++ IN gce2D_TRANSPARENCY DstTransparency,
++ IN gce2D_TRANSPARENCY PatTransparency,
++ IN gctBOOL EnableDFBColorKeyMode
++ );
++
++/* Set the transparency for source, destination and pattern. */
++gceSTATUS
++gco2D_SetTransparencyAdvanced(
++ IN gco2D Engine,
++ IN gce2D_TRANSPARENCY SrcTransparency,
++ IN gce2D_TRANSPARENCY DstTransparency,
++ IN gce2D_TRANSPARENCY PatTransparency
++ );
++
++/* Set the source color key. */
++gceSTATUS
++gco2D_SetSourceColorKeyAdvanced(
++ IN gco2D Engine,
++ IN gctUINT32 ColorKey
++ );
++
++/* Set the source color key range. */
++gceSTATUS
++gco2D_SetSourceColorKeyRangeAdvanced(
++ IN gco2D Engine,
++ IN gctUINT32 ColorKeyLow,
++ IN gctUINT32 ColorKeyHigh
++ );
++
++/* Set the target color key. */
++gceSTATUS
++gco2D_SetTargetColorKeyAdvanced(
++ IN gco2D Engine,
++ IN gctUINT32 ColorKey
++ );
++
++/* Set the target color key range. */
++gceSTATUS
++gco2D_SetTargetColorKeyRangeAdvanced(
++ IN gco2D Engine,
++ IN gctUINT32 ColorKeyLow,
++ IN gctUINT32 ColorKeyHigh
++ );
++
++/* Set the YUV color space mode. */
++gceSTATUS
++gco2D_SetYUVColorMode(
++ IN gco2D Engine,
++ IN gce2D_YUV_COLOR_MODE Mode
++ );
++
++/* Setup the source global color value in ARGB8 format. */
++gceSTATUS gco2D_SetSourceGlobalColorAdvanced(
++ IN gco2D Engine,
++ IN gctUINT32 Color32
++ );
++
++/* Setup the target global color value in ARGB8 format. */
++gceSTATUS gco2D_SetTargetGlobalColorAdvanced(
++ IN gco2D Engine,
++ IN gctUINT32 Color32
++ );
++
++/* Setup the source and target pixel multiply modes. */
++gceSTATUS
++gco2D_SetPixelMultiplyModeAdvanced(
++ IN gco2D Engine,
++ IN gce2D_PIXEL_COLOR_MULTIPLY_MODE SrcPremultiplySrcAlpha,
++ IN gce2D_PIXEL_COLOR_MULTIPLY_MODE DstPremultiplyDstAlpha,
++ IN gce2D_GLOBAL_COLOR_MULTIPLY_MODE SrcPremultiplyGlobalMode,
++ IN gce2D_PIXEL_COLOR_MULTIPLY_MODE DstDemultiplyDstAlpha
++ );
++
++/* Set the GPU clock cycles after which the idle engine will keep auto-flushing. */
++gceSTATUS
++gco2D_SetAutoFlushCycles(
++ IN gco2D Engine,
++ IN gctUINT32 Cycles
++ );
++
++#if VIVANTE_PROFILER
++/* Read the profile registers available in the 2D engine and sets them in the profile.
++ The function will also reset the pixelsRendered counter every time.
++*/
++gceSTATUS
++gco2D_ProfileEngine(
++ IN gco2D Engine,
++ OPTIONAL gcs2D_PROFILE_PTR Profile
++ );
++#endif
++
++/* Enable or disable 2D dithering. */
++gceSTATUS
++gco2D_EnableDither(
++ IN gco2D Engine,
++ IN gctBOOL Enable
++ );
++
++gceSTATUS
++gco2D_SetGenericSource(
++ IN gco2D Engine,
++ IN gctUINT32_PTR Addresses,
++ IN gctUINT32 AddressNum,
++ IN gctUINT32_PTR Strides,
++ IN gctUINT32 StrideNum,
++ IN gceTILING Tiling,
++ IN gceSURF_FORMAT Format,
++ IN gceSURF_ROTATION Rotation,
++ IN gctUINT32 SurfaceWidth,
++ IN gctUINT32 SurfaceHeight
++);
++
++gceSTATUS
++gco2D_SetGenericTarget(
++ IN gco2D Engine,
++ IN gctUINT32_PTR Addresses,
++ IN gctUINT32 AddressNum,
++ IN gctUINT32_PTR Strides,
++ IN gctUINT32 StrideNum,
++ IN gceTILING Tiling,
++ IN gceSURF_FORMAT Format,
++ IN gceSURF_ROTATION Rotation,
++ IN gctUINT32 SurfaceWidth,
++ IN gctUINT32 SurfaceHeight
++);
++
++gceSTATUS
++gco2D_SetCurrentSourceIndex(
++ IN gco2D Engine,
++ IN gctUINT32 SrcIndex
++ );
++
++gceSTATUS
++gco2D_MultiSourceBlit(
++ IN gco2D Engine,
++ IN gctUINT32 SourceMask,
++ IN gcsRECT_PTR DestRect,
++ IN gctUINT32 RectCount
++ );
++
++gceSTATUS
++gco2D_SetROP(
++ IN gco2D Engine,
++ IN gctUINT8 FgRop,
++ IN gctUINT8 BgRop
++ );
++
++gceSTATUS
++gco2D_SetGdiStretchMode(
++ IN gco2D Engine,
++ IN gctBOOL Enable
++ );
++
++gceSTATUS
++gco2D_SetSourceTileStatus(
++ IN gco2D Engine,
++ IN gce2D_TILE_STATUS_CONFIG TSControl,
++ IN gceSURF_FORMAT CompressedFormat,
++ IN gctUINT32 ClearValue,
++ IN gctUINT32 GpuAddress
++ );
++
++gceSTATUS
++gco2D_SetTargetTileStatus(
++ IN gco2D Engine,
++ IN gce2D_TILE_STATUS_CONFIG TileStatusConfig,
++ IN gceSURF_FORMAT CompressedFormat,
++ IN gctUINT32 ClearValue,
++ IN gctUINT32 GpuAddress
++ );
++
++gceSTATUS
++gco2D_QueryU32(
++ IN gco2D Engine,
++ IN gce2D_QUERY Item,
++ OUT gctUINT32_PTR Value
++ );
++
++gceSTATUS
++gco2D_SetStateU32(
++ IN gco2D Engine,
++ IN gce2D_STATE State,
++ IN gctUINT32 Value
++ );
++
++gceSTATUS
++gco2D_SetStateArrayI32(
++ IN gco2D Engine,
++ IN gce2D_STATE State,
++ IN gctINT32_PTR Array,
++ IN gctINT32 ArraySize
++ );
++
++gceSTATUS
++gco2D_SetStateArrayU32(
++ IN gco2D Engine,
++ IN gce2D_STATE State,
++ IN gctUINT32_PTR Array,
++ IN gctINT32 ArraySize
++ );
++
++gceSTATUS
++gco2D_SetTargetRect(
++ IN gco2D Engine,
++ IN gcsRECT_PTR Rect
++ );
++
++gceSTATUS
++gco2D_Set2DEngine(
++ IN gco2D Engine
++ );
++
++gceSTATUS
++gco2D_UnSet2DEngine(
++ IN gco2D Engine
++ );
++
++gceSTATUS
++gco2D_Get2DEngine(
++ OUT gco2D * Engine
++ );
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif /* __gc_hal_raster_h_ */
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_rename.h linux-openelec/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_rename.h
+--- linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_rename.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_rename.h 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,243 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_rename_h_
++#define __gc_hal_rename_h_
++
++
++#if defined(_HAL2D_APPENDIX)
++
++#define _HAL2D_RENAME_2(api, appendix) api ## appendix
++#define _HAL2D_RENAME_1(api, appendix) _HAL2D_RENAME_2(api, appendix)
++#define gcmHAL2D(api) _HAL2D_RENAME_1(api, _HAL2D_APPENDIX)
++
++
++#define gckOS_Construct gcmHAL2D(gckOS_Construct)
++#define gckOS_Destroy gcmHAL2D(gckOS_Destroy)
++#define gckOS_QueryVideoMemory gcmHAL2D(gckOS_QueryVideoMemory)
++#define gckOS_Allocate gcmHAL2D(gckOS_Allocate)
++#define gckOS_Free gcmHAL2D(gckOS_Free)
++#define gckOS_AllocateMemory gcmHAL2D(gckOS_AllocateMemory)
++#define gckOS_FreeMemory gcmHAL2D(gckOS_FreeMemory)
++#define gckOS_AllocatePagedMemory gcmHAL2D(gckOS_AllocatePagedMemory)
++#define gckOS_AllocatePagedMemoryEx gcmHAL2D(gckOS_AllocatePagedMemoryEx)
++#define gckOS_LockPages gcmHAL2D(gckOS_LockPages)
++#define gckOS_MapPages gcmHAL2D(gckOS_MapPages)
++#define gckOS_UnlockPages gcmHAL2D(gckOS_UnlockPages)
++#define gckOS_FreePagedMemory gcmHAL2D(gckOS_FreePagedMemory)
++#define gckOS_AllocateNonPagedMemory gcmHAL2D(gckOS_AllocateNonPagedMemory)
++#define gckOS_FreeNonPagedMemory gcmHAL2D(gckOS_FreeNonPagedMemory)
++#define gckOS_AllocateContiguous gcmHAL2D(gckOS_AllocateContiguous)
++#define gckOS_FreeContiguous gcmHAL2D(gckOS_FreeContiguous)
++#define gckOS_GetPageSize gcmHAL2D(gckOS_GetPageSize)
++#define gckOS_GetPhysicalAddress gcmHAL2D(gckOS_GetPhysicalAddress)
++#define gckOS_UserLogicalToPhysical gcmHAL2D(gckOS_UserLogicalToPhysical)
++#define gckOS_GetPhysicalAddressProcess gcmHAL2D(gckOS_GetPhysicalAddressProcess)
++#define gckOS_MapPhysical gcmHAL2D(gckOS_MapPhysical)
++#define gckOS_UnmapPhysical gcmHAL2D(gckOS_UnmapPhysical)
++#define gckOS_ReadRegister gcmHAL2D(gckOS_ReadRegister)
++#define gckOS_WriteRegister gcmHAL2D(gckOS_WriteRegister)
++#define gckOS_WriteMemory gcmHAL2D(gckOS_WriteMemory)
++#define gckOS_MapMemory gcmHAL2D(gckOS_MapMemory)
++#define gckOS_UnmapMemory gcmHAL2D(gckOS_UnmapMemory)
++#define gckOS_UnmapMemoryEx gcmHAL2D(gckOS_UnmapMemoryEx)
++#define gckOS_CreateMutex gcmHAL2D(gckOS_CreateMutex)
++#define gckOS_DeleteMutex gcmHAL2D(gckOS_DeleteMutex)
++#define gckOS_AcquireMutex gcmHAL2D(gckOS_AcquireMutex)
++#define gckOS_ReleaseMutex gcmHAL2D(gckOS_ReleaseMutex)
++#define gckOS_AtomicExchange gcmHAL2D(gckOS_AtomicExchange)
++#define gckOS_AtomicExchangePtr gcmHAL2D(gckOS_AtomicExchangePtr)
++#define gckOS_AtomConstruct gcmHAL2D(gckOS_AtomConstruct)
++#define gckOS_AtomDestroy gcmHAL2D(gckOS_AtomDestroy)
++#define gckOS_AtomGet gcmHAL2D(gckOS_AtomGet)
++#define gckOS_AtomIncrement gcmHAL2D(gckOS_AtomIncrement)
++#define gckOS_AtomDecrement gcmHAL2D(gckOS_AtomDecrement)
++#define gckOS_Delay gcmHAL2D(gckOS_Delay)
++#define gckOS_GetTime gcmHAL2D(gckOS_GetTime)
++#define gckOS_MemoryBarrier gcmHAL2D(gckOS_MemoryBarrier)
++#define gckOS_MapUserPointer gcmHAL2D(gckOS_MapUserPointer)
++#define gckOS_UnmapUserPointer gcmHAL2D(gckOS_UnmapUserPointer)
++#define gckOS_QueryNeedCopy gcmHAL2D(gckOS_QueryNeedCopy)
++#define gckOS_CopyFromUserData gcmHAL2D(gckOS_CopyFromUserData)
++#define gckOS_CopyToUserData gcmHAL2D(gckOS_CopyToUserData)
++#define gckOS_SuspendInterrupt gcmHAL2D(gckOS_SuspendInterrupt)
++#define gckOS_ResumeInterrupt gcmHAL2D(gckOS_ResumeInterrupt)
++#define gckOS_GetBaseAddress gcmHAL2D(gckOS_GetBaseAddress)
++#define gckOS_MemCopy gcmHAL2D(gckOS_MemCopy)
++#define gckOS_ZeroMemory gcmHAL2D(gckOS_ZeroMemory)
++#define gckOS_DeviceControl gcmHAL2D(gckOS_DeviceControl)
++#define gckOS_GetProcessID gcmHAL2D(gckOS_GetProcessID)
++#define gckOS_GetThreadID gcmHAL2D(gckOS_GetThreadID)
++#define gckOS_CreateSignal gcmHAL2D(gckOS_CreateSignal)
++#define gckOS_DestroySignal gcmHAL2D(gckOS_DestroySignal)
++#define gckOS_Signal gcmHAL2D(gckOS_Signal)
++#define gckOS_WaitSignal gcmHAL2D(gckOS_WaitSignal)
++#define gckOS_MapSignal gcmHAL2D(gckOS_MapSignal)
++#define gckOS_MapUserMemory gcmHAL2D(gckOS_MapUserMemory)
++#define gckOS_UnmapUserMemory gcmHAL2D(gckOS_UnmapUserMemory)
++#define gckOS_CreateUserSignal gcmHAL2D(gckOS_CreateUserSignal)
++#define gckOS_DestroyUserSignal gcmHAL2D(gckOS_DestroyUserSignal)
++#define gckOS_WaitUserSignal gcmHAL2D(gckOS_WaitUserSignal)
++#define gckOS_SignalUserSignal gcmHAL2D(gckOS_SignalUserSignal)
++#define gckOS_UserSignal gcmHAL2D(gckOS_UserSignal)
++#define gckOS_UserSignal gcmHAL2D(gckOS_UserSignal)
++#define gckOS_CacheClean gcmHAL2D(gckOS_CacheClean)
++#define gckOS_CacheFlush gcmHAL2D(gckOS_CacheFlush)
++#define gckOS_SetDebugLevel gcmHAL2D(gckOS_SetDebugLevel)
++#define gckOS_SetDebugZone gcmHAL2D(gckOS_SetDebugZone)
++#define gckOS_SetDebugLevelZone gcmHAL2D(gckOS_SetDebugLevelZone)
++#define gckOS_SetDebugZones gcmHAL2D(gckOS_SetDebugZones)
++#define gckOS_SetDebugFile gcmHAL2D(gckOS_SetDebugFile)
++#define gckOS_Broadcast gcmHAL2D(gckOS_Broadcast)
++#define gckOS_SetGPUPower gcmHAL2D(gckOS_SetGPUPower)
++#define gckOS_CreateSemaphore gcmHAL2D(gckOS_CreateSemaphore)
++#define gckOS_DestroySemaphore gcmHAL2D(gckOS_DestroySemaphore)
++#define gckOS_AcquireSemaphore gcmHAL2D(gckOS_AcquireSemaphore)
++#define gckOS_ReleaseSemaphore gcmHAL2D(gckOS_ReleaseSemaphore)
++#define gckHEAP_Construct gcmHAL2D(gckHEAP_Construct)
++#define gckHEAP_Destroy gcmHAL2D(gckHEAP_Destroy)
++#define gckHEAP_Allocate gcmHAL2D(gckHEAP_Allocate)
++#define gckHEAP_Free gcmHAL2D(gckHEAP_Free)
++#define gckHEAP_ProfileStart gcmHAL2D(gckHEAP_ProfileStart)
++#define gckHEAP_ProfileEnd gcmHAL2D(gckHEAP_ProfileEnd)
++#define gckHEAP_Test gcmHAL2D(gckHEAP_Test)
++#define gckVIDMEM_Construct gcmHAL2D(gckVIDMEM_Construct)
++#define gckVIDMEM_Destroy gcmHAL2D(gckVIDMEM_Destroy)
++#define gckVIDMEM_Allocate gcmHAL2D(gckVIDMEM_Allocate)
++#define gckVIDMEM_AllocateLinear gcmHAL2D(gckVIDMEM_AllocateLinear)
++#define gckVIDMEM_Free gcmHAL2D(gckVIDMEM_Free)
++#define gckVIDMEM_Lock gcmHAL2D(gckVIDMEM_Lock)
++#define gckVIDMEM_Unlock gcmHAL2D(gckVIDMEM_Unlock)
++#define gckVIDMEM_ConstructVirtual gcmHAL2D(gckVIDMEM_ConstructVirtual)
++#define gckVIDMEM_DestroyVirtual gcmHAL2D(gckVIDMEM_DestroyVirtual)
++#define gckKERNEL_Construct gcmHAL2D(gckKERNEL_Construct)
++#define gckKERNEL_Destroy gcmHAL2D(gckKERNEL_Destroy)
++#define gckKERNEL_Dispatch gcmHAL2D(gckKERNEL_Dispatch)
++#define gckKERNEL_QueryVideoMemory gcmHAL2D(gckKERNEL_QueryVideoMemory)
++#define gckKERNEL_GetVideoMemoryPool gcmHAL2D(gckKERNEL_GetVideoMemoryPool)
++#define gckKERNEL_MapVideoMemory gcmHAL2D(gckKERNEL_MapVideoMemory)
++#define gckKERNEL_UnmapVideoMemory gcmHAL2D(gckKERNEL_UnmapVideoMemory)
++#define gckKERNEL_MapMemory gcmHAL2D(gckKERNEL_MapMemory)
++#define gckKERNEL_UnmapMemory gcmHAL2D(gckKERNEL_UnmapMemory)
++#define gckKERNEL_Notify gcmHAL2D(gckKERNEL_Notify)
++#define gckKERNEL_QuerySettings gcmHAL2D(gckKERNEL_QuerySettings)
++#define gckKERNEL_Recovery gcmHAL2D(gckKERNEL_Recovery)
++#define gckKERNEL_OpenUserData gcmHAL2D(gckKERNEL_OpenUserData)
++#define gckKERNEL_CloseUserData gcmHAL2D(gckKERNEL_CloseUserData)
++#define gckHARDWARE_Construct gcmHAL2D(gckHARDWARE_Construct)
++#define gckHARDWARE_Destroy gcmHAL2D(gckHARDWARE_Destroy)
++#define gckHARDWARE_QuerySystemMemory gcmHAL2D(gckHARDWARE_QuerySystemMemory)
++#define gckHARDWARE_BuildVirtualAddress gcmHAL2D(gckHARDWARE_BuildVirtualAddress)
++#define gckHARDWARE_QueryCommandBuffer gcmHAL2D(gckHARDWARE_QueryCommandBuffer)
++#define gckHARDWARE_WaitLink gcmHAL2D(gckHARDWARE_WaitLink)
++#define gckHARDWARE_Execute gcmHAL2D(gckHARDWARE_Execute)
++#define gckHARDWARE_End gcmHAL2D(gckHARDWARE_End)
++#define gckHARDWARE_Nop gcmHAL2D(gckHARDWARE_Nop)
++#define gckHARDWARE_PipeSelect gcmHAL2D(gckHARDWARE_PipeSelect)
++#define gckHARDWARE_Link gcmHAL2D(gckHARDWARE_Link)
++#define gckHARDWARE_Event gcmHAL2D(gckHARDWARE_Event)
++#define gckHARDWARE_QueryMemory gcmHAL2D(gckHARDWARE_QueryMemory)
++#define gckHARDWARE_QueryChipIdentity gcmHAL2D(gckHARDWARE_QueryChipIdentity)
++#define gckHARDWARE_QueryChipSpecs gcmHAL2D(gckHARDWARE_QueryChipSpecs)
++#define gckHARDWARE_QueryShaderCaps gcmHAL2D(gckHARDWARE_QueryShaderCaps)
++#define gckHARDWARE_ConvertFormat gcmHAL2D(gckHARDWARE_ConvertFormat)
++#define gckHARDWARE_SplitMemory gcmHAL2D(gckHARDWARE_SplitMemory)
++#define gckHARDWARE_AlignToTile gcmHAL2D(gckHARDWARE_AlignToTile)
++#define gckHARDWARE_UpdateQueueTail gcmHAL2D(gckHARDWARE_UpdateQueueTail)
++#define gckHARDWARE_ConvertLogical gcmHAL2D(gckHARDWARE_ConvertLogical)
++#define gckHARDWARE_Interrupt gcmHAL2D(gckHARDWARE_Interrupt)
++#define gckHARDWARE_SetMMU gcmHAL2D(gckHARDWARE_SetMMU)
++#define gckHARDWARE_FlushMMU gcmHAL2D(gckHARDWARE_FlushMMU)
++#define gckHARDWARE_GetIdle gcmHAL2D(gckHARDWARE_GetIdle)
++#define gckHARDWARE_Flush gcmHAL2D(gckHARDWARE_Flush)
++#define gckHARDWARE_SetFastClear gcmHAL2D(gckHARDWARE_SetFastClear)
++#define gckHARDWARE_ReadInterrupt gcmHAL2D(gckHARDWARE_ReadInterrupt)
++#define gckHARDWARE_SetPowerManagementState gcmHAL2D(gckHARDWARE_SetPowerManagementState)
++#define gckHARDWARE_QueryPowerManagementState gcmHAL2D(gckHARDWARE_QueryPowerManagementState)
++#define gckHARDWARE_ProfileEngine2D gcmHAL2D(gckHARDWARE_ProfileEngine2D)
++#define gckHARDWARE_InitializeHardware gcmHAL2D(gckHARDWARE_InitializeHardware)
++#define gckHARDWARE_Reset gcmHAL2D(gckHARDWARE_Reset)
++#define gckINTERRUPT_Construct gcmHAL2D(gckINTERRUPT_Construct)
++#define gckINTERRUPT_Destroy gcmHAL2D(gckINTERRUPT_Destroy)
++#define gckINTERRUPT_SetHandler gcmHAL2D(gckINTERRUPT_SetHandler)
++#define gckINTERRUPT_Notify gcmHAL2D(gckINTERRUPT_Notify)
++#define gckEVENT_Construct gcmHAL2D(gckEVENT_Construct)
++#define gckEVENT_Destroy gcmHAL2D(gckEVENT_Destroy)
++#define gckEVENT_AddList gcmHAL2D(gckEVENT_AddList)
++#define gckEVENT_FreeNonPagedMemory gcmHAL2D(gckEVENT_FreeNonPagedMemory)
++#define gckEVENT_FreeContiguousMemory gcmHAL2D(gckEVENT_FreeContiguousMemory)
++#define gckEVENT_FreeVideoMemory gcmHAL2D(gckEVENT_FreeVideoMemory)
++#define gckEVENT_Signal gcmHAL2D(gckEVENT_Signal)
++#define gckEVENT_Unlock gcmHAL2D(gckEVENT_Unlock)
++#define gckEVENT_Submit gcmHAL2D(gckEVENT_Submit)
++#define gckEVENT_Commit gcmHAL2D(gckEVENT_Commit)
++#define gckEVENT_Notify gcmHAL2D(gckEVENT_Notify)
++#define gckEVENT_Interrupt gcmHAL2D(gckEVENT_Interrupt)
++#define gckCOMMAND_Construct gcmHAL2D(gckCOMMAND_Construct)
++#define gckCOMMAND_Destroy gcmHAL2D(gckCOMMAND_Destroy)
++#define gckCOMMAND_EnterCommit gcmHAL2D(gckCOMMAND_EnterCommit)
++#define gckCOMMAND_ExitCommit gcmHAL2D(gckCOMMAND_ExitCommit)
++#define gckCOMMAND_Start gcmHAL2D(gckCOMMAND_Start)
++#define gckCOMMAND_Stop gcmHAL2D(gckCOMMAND_Stop)
++#define gckCOMMAND_Commit gcmHAL2D(gckCOMMAND_Commit)
++#define gckCOMMAND_Reserve gcmHAL2D(gckCOMMAND_Reserve)
++#define gckCOMMAND_Execute gcmHAL2D(gckCOMMAND_Execute)
++#define gckCOMMAND_Stall gcmHAL2D(gckCOMMAND_Stall)
++#define gckCOMMAND_Attach gcmHAL2D(gckCOMMAND_Attach)
++#define gckCOMMAND_Detach gcmHAL2D(gckCOMMAND_Detach)
++#define gckMMU_Construct gcmHAL2D(gckMMU_Construct)
++#define gckMMU_Destroy gcmHAL2D(gckMMU_Destroy)
++#define gckMMU_AllocatePages gcmHAL2D(gckMMU_AllocatePages)
++#define gckMMU_FreePages gcmHAL2D(gckMMU_FreePages)
++#define gckMMU_Test gcmHAL2D(gckMMU_Test)
++#define gckHARDWARE_QueryProfileRegisters gcmHAL2D(gckHARDWARE_QueryProfileRegisters)
++
++
++#define FindMdlMap gcmHAL2D(FindMdlMap)
++#define OnProcessExit gcmHAL2D(OnProcessExit)
++
++#define gckGALDEVICE_Destroy gcmHAL2D(gckGALDEVICE_Destroy)
++#define gckOS_Print gcmHAL2D(gckOS_Print)
++#define gckGALDEVICE_FreeMemory gcmHAL2D(gckGALDEVICE_FreeMemory)
++#define gckGALDEVICE_AllocateMemory gcmHAL2D(gckGALDEVICE_AllocateMemory)
++#define gckOS_DebugBreak gcmHAL2D(gckOS_DebugBreak)
++#define gckGALDEVICE_Release_ISR gcmHAL2D(gckGALDEVICE_Release_ISR)
++#define gckOS_Verify gcmHAL2D(gckOS_Verify)
++#define gckCOMMAND_Release gcmHAL2D(gckCOMMAND_Release)
++#define gckGALDEVICE_Stop gcmHAL2D(gckGALDEVICE_Stop)
++#define gckGALDEVICE_Construct gcmHAL2D(gckGALDEVICE_Construct)
++#define gckOS_DebugFatal gcmHAL2D(gckOS_DebugFatal)
++#define gckOS_DebugTrace gcmHAL2D(gckOS_DebugTrace)
++#define gckHARDWARE_GetBaseAddress gcmHAL2D(gckHARDWARE_GetBaseAddress)
++#define gckGALDEVICE_Setup_ISR gcmHAL2D(gckGALDEVICE_Setup_ISR)
++#define gckKERNEL_AttachProcess gcmHAL2D(gckKERNEL_AttachProcess)
++#define gckKERNEL_AttachProcessEx gcmHAL2D(gckKERNEL_AttachProcessEx)
++#define gckGALDEVICE_Start_Thread gcmHAL2D(gckGALDEVICE_Start_Thread)
++#define gckHARDWARE_QueryIdle gcmHAL2D(gckHARDWARE_QueryIdle)
++#define gckGALDEVICE_Start gcmHAL2D(gckGALDEVICE_Start)
++#define gckOS_GetKernelLogical gcmHAL2D(gckOS_GetKernelLogical)
++#define gckOS_DebugTraceZone gcmHAL2D(gckOS_DebugTraceZone)
++#define gckGALDEVICE_Stop_Thread gcmHAL2D(gckGALDEVICE_Stop_Thread)
++#define gckHARDWARE_NeedBaseAddress gcmHAL2D(gckHARDWARE_NeedBaseAddress)
++
++#endif
++
++#endif /* __gc_hal_rename_h_ */
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_security_interface.h linux-openelec/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_security_interface.h
+--- linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_security_interface.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_security_interface.h 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,137 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef _GC_HAL_SECURITY_INTERFACE_H_
++#define _GC_HAL_SECURITY_INTERFACE_H_
++/*!
++ @brief Command codes between kernel module and TrustZone
++ @discussion
++ Critical services must be done in TrustZone to avoid sensitive content leak. Most of kernel module is kept in non-Secure os to minimize
++ code in TrustZone.
++ */
++typedef enum kernel_packet_command {
++ KERNEL_START_COMMAND,
++ KERNEL_SUBMIT,
++ KERNEL_MAP_MEMORY, /* */
++ KERNEL_UNMAP_MEMORY,
++ KERNEL_ALLOCATE_SECRUE_MEMORY, /*! Security memory management. */
++ KERNEL_FREE_SECURE_MEMORY,
++ KERNEL_EXECUTE, /* Execute a command buffer. */
++} kernel_packet_command_t;
++
++/*!
++ @brief gckCOMMAND Object requests TrustZone to start FE.
++ @discussion
++ DMA enabled register can only be written in TrustZone to avoid GPU from jumping to a hacked code.
++ Kernel module need use these command to ask TrustZone start command parser.
++ */
++struct kernel_start_command {
++ kernel_packet_command_t command; /*! The command (always needs to be the first entry in a structure). */
++ gctUINT8 gpu; /*! Which GPU. */
++};
++
++/*!
++ @brief gckCOMMAND Object requests TrustZone to submit command buffer.
++ @discussion
++ Code in trustzone will check content of command buffer after copying command buffer to TrustZone.
++ */
++struct kernel_submit {
++ kernel_packet_command_t command; /*! The command (always needs to be the first entry in a structure). */
++ gctUINT8 gpu; /*! Which GPU. */
++ gctUINT8 kernel_command; /*! Whether it is a kernel command. */
++ gctUINT32 command_buffer_handle; /*! Handle to command buffer. */
++ gctUINT32 offset; /* Offset in command buffer. */
++ gctUINT32 * command_buffer; /*! Content of command buffer need to be submit. */
++ gctUINT32 command_buffer_length; /*! Length of command buffer. */
++};
++
++
++/*!
++ @brief gckVIDMEM Object requests TrustZone to allocate security memory.
++ @discussion
++ Allocate a buffer from security GPU memory.
++ */
++struct kernel_allocate_security_memory {
++ kernel_packet_command_t command; /*! The command (always needs to be the first entry in a structure). */
++ gctUINT32 bytes; /*! Requested bytes. */
++ gctUINT32 memory_handle; /*! Handle of allocated memory. */
++};
++
++/*!
++ @brief gckVIDMEM Object requests TrustZone to allocate security memory.
++ @discussion
++ Free a video memory buffer from security GPU memory.
++ */
++struct kernel_free_security_memory {
++ kernel_packet_command_t command; /*! The command (always needs to be the first entry in a structure). */
++ gctUINT32 memory_handle; /*! Handle of allocated memory. */
++};
++
++struct kernel_execute {
++ kernel_packet_command_t command; /*! The command (always needs to be the first entry in a structure). */
++ gctUINT8 gpu; /*! Which GPU. */
++ gctUINT8 kernel_command; /*! Whether it is a kernel command. */
++ gctUINT32 * command_buffer; /*! Content of command buffer need to be submit. */
++ gctUINT32 command_buffer_length; /*! Length of command buffer. */
++};
++
++typedef struct kernel_map_scatter_gather {
++ gctUINT32 bytes;
++ gctUINT32 physical;
++ struct kernel_map_scatter_gather *next;
++}
++kernel_map_scatter_gather_t;
++
++struct kernel_map_memory {
++ kernel_packet_command_t command;
++ kernel_map_scatter_gather_t *scatter;
++ gctUINT32 *physicals;
++ gctUINT32 pageCount;
++ gctUINT32 gpuAddress;
++};
++
++struct kernel_unmap_memory {
++ gctUINT32 gpuAddress;
++ gctUINT32 pageCount;
++};
++
++typedef struct _gcsTA_INTERFACE {
++ kernel_packet_command_t command;
++ union {
++ struct kernel_submit Submit;
++ struct kernel_start_command StartCommand;
++ struct kernel_allocate_security_memory AllocateSecurityMemory;
++ struct kernel_execute Execute;
++ struct kernel_map_memory MapMemory;
++ struct kernel_unmap_memory UnmapMemory;
++ } u;
++ gceSTATUS result;
++} gcsTA_INTERFACE;
++
++enum {
++ gcvTA_COMMAND_INIT,
++ gcvTA_COMMAND_DISPATCH,
++
++ gcvTA_CALLBACK_ALLOC_SECURE_MEM,
++ gcvTA_CALLBACK_FREE_SECURE_MEM,
++};
++
++#endif
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_statistics.h linux-openelec/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_statistics.h
+--- linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_statistics.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_statistics.h 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,99 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_statistics_h_
++#define __gc_hal_statistics_h_
++
++
++#define VIV_STAT_ENABLE_STATISTICS 0
++
++/* Toal number of frames for which the frame time is accounted. We have storage
++ to keep frame times for last this many frames.
++*/
++#define VIV_STAT_FRAME_BUFFER_SIZE 30
++
++
++/*
++ Total number of frames sampled for a mode. This means
++
++ # of frames for HZ Current : VIV_STAT_EARLY_Z_SAMPLE_FRAMES
++ # of frames for HZ Switched : VIV_STAT_EARLY_Z_SAMPLE_FRAMES
++ +
++ --------------------------------------------------------
++ : (2 * VIV_STAT_EARLY_Z_SAMPLE_FRAMES) frames needed
++
++ IMPORTANT: This total must be smaller than VIV_STAT_FRAME_BUFFER_SIZE
++*/
++#define VIV_STAT_EARLY_Z_SAMPLE_FRAMES 7
++#define VIV_STAT_EARLY_Z_LATENCY_FRAMES 2
++
++/* Multiplication factor for previous Hz off mode. Make it more than 1.0 to advertise HZ on.*/
++#define VIV_STAT_EARLY_Z_FACTOR (1.05f)
++
++/* Defines the statistical data keys monitored by the statistics module */
++typedef enum _gceSTATISTICS
++{
++ gcvFRAME_FPS = 1,
++}
++gceSTATISTICS;
++
++/* HAL statistics information. */
++typedef struct _gcsSTATISTICS_EARLYZ
++{
++ gctUINT switchBackCount;
++ gctUINT nextCheckPoint;
++ gctBOOL disabled;
++}
++gcsSTATISTICS_EARLYZ;
++
++
++/* HAL statistics information. */
++typedef struct _gcsSTATISTICS
++{
++ gctUINT64 frameTime[VIV_STAT_FRAME_BUFFER_SIZE];
++ gctUINT64 previousFrameTime;
++ gctUINT frame;
++ gcsSTATISTICS_EARLYZ earlyZ;
++}
++gcsSTATISTICS;
++
++
++/* Add a frame based data into current statistics. */
++void
++gcfSTATISTICS_AddData(
++ IN gceSTATISTICS Key,
++ IN gctUINT Value
++ );
++
++/* Marks the frame end and triggers statistical calculations and decisions.*/
++void
++gcfSTATISTICS_MarkFrameEnd (
++ void
++ );
++
++/* Sets whether the dynmaic HZ is disabled or not .*/
++void
++gcfSTATISTICS_DisableDynamicEarlyZ (
++ IN gctBOOL Disabled
++ );
++
++#endif /*__gc_hal_statistics_h_ */
++
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_types.h linux-openelec/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_types.h
+--- linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_types.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_types.h 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,932 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_types_h_
++#define __gc_hal_types_h_
++
++#include "gc_hal_version.h"
++#include "gc_hal_options.h"
++
++#if !defined(VIV_KMD)
++#if defined(__KERNEL__)
++#include "linux/version.h"
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
++ typedef unsigned long uintptr_t;
++# endif
++# include "linux/types.h"
++#elif defined(UNDER_CE)
++#include <crtdefs.h>
++#elif defined(_MSC_VER) && (_MSC_VER <= 1500)
++#include <crtdefs.h>
++#include "vadefs.h"
++#elif defined(__QNXNTO__)
++#define _QNX_SOURCE
++#include <stdint.h>
++#include <stddef.h>
++#else
++#include <stdlib.h>
++#include <stddef.h>
++#include <stdint.h>
++#endif
++#endif
++
++#ifdef _WIN32
++#pragma warning(disable:4127) /* Conditional expression is constant (do { }
++ ** while(0)). */
++#pragma warning(disable:4100) /* Unreferenced formal parameter. */
++#pragma warning(disable:4204) /* Non-constant aggregate initializer (C99). */
++#pragma warning(disable:4131) /* Uses old-style declarator (for Bison and
++ ** Flex generated files). */
++#pragma warning(disable:4206) /* Translation unit is empty. */
++#pragma warning(disable:4214) /* Nonstandard extension used :
++ ** bit field types other than int. */
++#endif
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/******************************************************************************\
++** Platform macros.
++*/
++
++#if defined(__GNUC__)
++# define gcdHAS_ELLIPSIS 1 /* GCC always has it. */
++#elif defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L)
++# define gcdHAS_ELLIPSIS 1 /* C99 has it. */
++#elif defined(_MSC_VER) && (_MSC_VER >= 1500)
++# define gcdHAS_ELLIPSIS 1 /* MSVC 2007+ has it. */
++#elif defined(UNDER_CE)
++#if UNDER_CE >= 600
++# define gcdHAS_ELLIPSIS 1
++# else
++# define gcdHAS_ELLIPSIS 0
++# endif
++#else
++# error "gcdHAS_ELLIPSIS: Platform could not be determined"
++#endif
++
++/******************************************************************************\
++************************************ Keyword ***********************************
++\******************************************************************************/
++#if defined(ANDROID) && defined(__BIONIC_FORTIFY)
++# define gcmINLINE __inline__ __attribute__ ((always_inline)) __attribute__ ((gnu_inline)) __attribute__ ((artificial))
++#elif ((defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L)) || defined(__APPLE__))
++# define gcmINLINE inline /* C99 keyword. */
++#elif defined(__GNUC__)
++# define gcmINLINE __inline__ /* GNU keyword. */
++#elif defined(_MSC_VER) || defined(UNDER_CE)
++# define gcmINLINE __inline /* Internal keyword. */
++#else
++# error "gcmINLINE: Platform could not be determined"
++#endif
++
++/* Possible debug flags. */
++#define gcdDEBUG_NONE 0
++#define gcdDEBUG_ALL (1 << 0)
++#define gcdDEBUG_FATAL (1 << 1)
++#define gcdDEBUG_TRACE (1 << 2)
++#define gcdDEBUG_BREAK (1 << 3)
++#define gcdDEBUG_ASSERT (1 << 4)
++#define gcdDEBUG_CODE (1 << 5)
++#define gcdDEBUG_STACK (1 << 6)
++
++#define gcmIS_DEBUG(flag) ( gcdDEBUG & (flag | gcdDEBUG_ALL) )
++
++#ifndef gcdDEBUG
++#if (defined(DBG) && DBG) || defined(DEBUG) || defined(_DEBUG)
++# define gcdDEBUG gcdDEBUG_ALL
++# else
++# define gcdDEBUG gcdDEBUG_NONE
++# endif
++#endif
++
++#ifdef _USRDLL
++#ifdef _MSC_VER
++#ifdef HAL_EXPORTS
++# define HALAPI __declspec(dllexport)
++# else
++# define HALAPI __declspec(dllimport)
++# endif
++# define HALDECL __cdecl
++# else
++#ifdef HAL_EXPORTS
++# define HALAPI
++# else
++# define HALAPI extern
++# endif
++# endif
++#else
++# define HALAPI
++# define HALDECL
++#endif
++
++/******************************************************************************\
++********************************** Common Types ********************************
++\******************************************************************************/
++
++#define gcvFALSE 0
++#define gcvTRUE 1
++
++#define gcvINFINITE ((gctUINT32) ~0U)
++
++#define gcvINVALID_HANDLE ((gctHANDLE) ~0U)
++
++typedef int gctBOOL;
++typedef gctBOOL * gctBOOL_PTR;
++
++typedef int gctINT;
++typedef signed char gctINT8;
++typedef signed short gctINT16;
++typedef signed int gctINT32;
++typedef signed long long gctINT64;
++
++typedef gctINT * gctINT_PTR;
++typedef gctINT8 * gctINT8_PTR;
++typedef gctINT16 * gctINT16_PTR;
++typedef gctINT32 * gctINT32_PTR;
++typedef gctINT64 * gctINT64_PTR;
++
++typedef unsigned int gctUINT;
++typedef unsigned char gctUINT8;
++typedef unsigned short gctUINT16;
++typedef unsigned int gctUINT32;
++typedef unsigned long long gctUINT64;
++typedef uintptr_t gctUINTPTR_T;
++
++typedef gctUINT * gctUINT_PTR;
++typedef gctUINT8 * gctUINT8_PTR;
++typedef gctUINT16 * gctUINT16_PTR;
++typedef gctUINT32 * gctUINT32_PTR;
++typedef gctUINT64 * gctUINT64_PTR;
++
++typedef size_t gctSIZE_T;
++typedef gctSIZE_T * gctSIZE_T_PTR;
++typedef gctUINT32 gctTRACE;
++
++#ifdef __cplusplus
++# define gcvNULL 0
++#else
++# define gcvNULL ((void *) 0)
++#endif
++
++#define gcvMAXINT8 0x7f
++#define gcvMININT8 0x80
++#define gcvMAXINT16 0x7fff
++#define gcvMININT16 0x8000
++#define gcvMAXINT32 0x7fffffff
++#define gcvMININT32 0x80000000
++#define gcvMAXINT64 0x7fffffffffffffff
++#define gcvMININT64 0x8000000000000000
++#define gcvMAXUINT8 0xff
++#define gcvMINUINT8 0x0
++#define gcvMAXUINT16 0xffff
++#define gcvMINUINT16 0x8000
++#define gcvMAXUINT32 0xffffffff
++#define gcvMINUINT32 0x80000000
++#define gcvMAXUINT64 0xffffffffffffffff
++#define gcvMINUINT64 0x8000000000000000
++#define gcvMAXUINTPTR_T (~(gctUINTPTR_T)0)
++
++typedef float gctFLOAT;
++typedef signed int gctFIXED_POINT;
++typedef float * gctFLOAT_PTR;
++
++typedef void * gctPHYS_ADDR;
++typedef void * gctHANDLE;
++typedef void * gctFILE;
++typedef void * gctSIGNAL;
++typedef void * gctWINDOW;
++typedef void * gctIMAGE;
++typedef void * gctSYNC_POINT;
++typedef void * gctSHBUF;
++
++typedef void * gctSEMAPHORE;
++
++typedef void * gctPOINTER;
++typedef const void * gctCONST_POINTER;
++
++typedef char gctCHAR;
++typedef char * gctSTRING;
++typedef const char * gctCONST_STRING;
++
++typedef struct _gcsCOUNT_STRING
++{
++ gctSIZE_T Length;
++ gctCONST_STRING String;
++}
++gcsCOUNT_STRING;
++
++typedef union _gcuFLOAT_UINT32
++{
++ gctFLOAT f;
++ gctUINT32 u;
++}
++gcuFLOAT_UINT32;
++
++/* Fixed point constants. */
++#define gcvZERO_X ((gctFIXED_POINT) 0x00000000)
++#define gcvHALF_X ((gctFIXED_POINT) 0x00008000)
++#define gcvONE_X ((gctFIXED_POINT) 0x00010000)
++#define gcvNEGONE_X ((gctFIXED_POINT) 0xFFFF0000)
++#define gcvTWO_X ((gctFIXED_POINT) 0x00020000)
++
++
++
++#define gcmFIXEDCLAMP_NEG1_TO_1(_x) \
++ (((_x) < gcvNEGONE_X) \
++ ? gcvNEGONE_X \
++ : (((_x) > gcvONE_X) \
++ ? gcvONE_X \
++ : (_x)))
++
++#define gcmFLOATCLAMP_NEG1_TO_1(_f) \
++ (((_f) < -1.0f) \
++ ? -1.0f \
++ : (((_f) > 1.0f) \
++ ? 1.0f \
++ : (_f)))
++
++
++#define gcmFIXEDCLAMP_0_TO_1(_x) \
++ (((_x) < 0) \
++ ? 0 \
++ : (((_x) > gcvONE_X) \
++ ? gcvONE_X \
++ : (_x)))
++
++#define gcmFLOATCLAMP_0_TO_1(_f) \
++ (((_f) < 0.0f) \
++ ? 0.0f \
++ : (((_f) > 1.0f) \
++ ? 1.0f \
++ : (_f)))
++
++
++/******************************************************************************\
++******************************* Multicast Values *******************************
++\******************************************************************************/
++
++/* Value types. */
++typedef enum _gceVALUE_TYPE
++{
++ gcvVALUE_UINT = 0x0,
++ gcvVALUE_FIXED,
++ gcvVALUE_FLOAT,
++ gcvVALUE_INT,
++
++ /*
++ ** The value need be unsigned denormalized. clamp (0.0-1.0) should be done first.
++ */
++ gcvVALUE_FLAG_UNSIGNED_DENORM = 0x00010000,
++
++ /*
++ ** The value need be signed denormalized. clamp (-1.0-1.0) should be done first.
++ */
++ gcvVALUE_FLAG_SIGNED_DENORM = 0x00020000,
++
++ /*
++ ** The value need to gammar
++ */
++ gcvVALUE_FLAG_GAMMAR = 0x00040000,
++
++ /*
++ ** The value need to convert from float to float16
++ */
++ gcvVALUE_FLAG_FLOAT_TO_FLOAT16 = 0x0080000,
++
++ /*
++ ** Mask for flag field.
++ */
++ gcvVALUE_FLAG_MASK = 0xFFFF0000,
++}
++gceVALUE_TYPE;
++
++/* Value unions. */
++typedef union _gcuVALUE
++{
++ gctUINT uintValue;
++ gctFIXED_POINT fixedValue;
++ gctFLOAT floatValue;
++ gctINT intValue;
++}
++gcuVALUE;
++
++
++
++
++/* Stringizing macro. */
++#define gcmSTRING(Value) #Value
++
++/******************************************************************************\
++******************************* Fixed Point Math *******************************
++\******************************************************************************/
++
++#define gcmXMultiply(x1, x2) gcoMATH_MultiplyFixed(x1, x2)
++#define gcmXDivide(x1, x2) gcoMATH_DivideFixed(x1, x2)
++#define gcmXMultiplyDivide(x1, x2, x3) gcoMATH_MultiplyDivideFixed(x1, x2, x3)
++
++/* 2D Engine profile. */
++typedef struct _gcs2D_PROFILE
++{
++ /* Cycle count.
++ 32bit counter incremented every 2D clock cycle.
++ Wraps back to 0 when the counter overflows.
++ */
++ gctUINT32 cycleCount;
++
++ /* Pixels rendered by the 2D engine.
++ Resets to 0 every time it is read. */
++ gctUINT32 pixelsRendered;
++}
++gcs2D_PROFILE;
++
++/* Macro to combine four characters into a Charcater Code. */
++#define gcmCC(c1, c2, c3, c4) \
++( \
++ (char) (c1) \
++ | \
++ ((char) (c2) << 8) \
++ | \
++ ((char) (c3) << 16) \
++ | \
++ ((char) (c4) << 24) \
++)
++
++#define gcmPRINTABLE(c) ((((c) >= ' ') && ((c) <= '}')) ? ((c) != '%' ? (c) : ' ') : ' ')
++
++#define gcmCC_PRINT(cc) \
++ gcmPRINTABLE((char) ( (cc) & 0xFF)), \
++ gcmPRINTABLE((char) (((cc) >> 8) & 0xFF)), \
++ gcmPRINTABLE((char) (((cc) >> 16) & 0xFF)), \
++ gcmPRINTABLE((char) (((cc) >> 24) & 0xFF))
++
++/******************************************************************************\
++****************************** Function Parameters *****************************
++\******************************************************************************/
++
++#define IN
++#define OUT
++#define INOUT
++#define OPTIONAL
++
++/******************************************************************************\
++********************************* Status Codes *********************************
++\******************************************************************************/
++
++typedef enum _gceSTATUS
++{
++ gcvSTATUS_OK = 0,
++ gcvSTATUS_FALSE = 0,
++ gcvSTATUS_TRUE = 1,
++ gcvSTATUS_NO_MORE_DATA = 2,
++ gcvSTATUS_CACHED = 3,
++ gcvSTATUS_MIPMAP_TOO_LARGE = 4,
++ gcvSTATUS_NAME_NOT_FOUND = 5,
++ gcvSTATUS_NOT_OUR_INTERRUPT = 6,
++ gcvSTATUS_MISMATCH = 7,
++ gcvSTATUS_MIPMAP_TOO_SMALL = 8,
++ gcvSTATUS_LARGER = 9,
++ gcvSTATUS_SMALLER = 10,
++ gcvSTATUS_CHIP_NOT_READY = 11,
++ gcvSTATUS_NEED_CONVERSION = 12,
++ gcvSTATUS_SKIP = 13,
++ gcvSTATUS_DATA_TOO_LARGE = 14,
++ gcvSTATUS_INVALID_CONFIG = 15,
++ gcvSTATUS_CHANGED = 16,
++ gcvSTATUS_NOT_SUPPORT_DITHER = 17,
++ gcvSTATUS_EXECUTED = 18,
++ gcvSTATUS_TERMINATE = 19,
++
++ gcvSTATUS_INVALID_ARGUMENT = -1,
++ gcvSTATUS_INVALID_OBJECT = -2,
++ gcvSTATUS_OUT_OF_MEMORY = -3,
++ gcvSTATUS_MEMORY_LOCKED = -4,
++ gcvSTATUS_MEMORY_UNLOCKED = -5,
++ gcvSTATUS_HEAP_CORRUPTED = -6,
++ gcvSTATUS_GENERIC_IO = -7,
++ gcvSTATUS_INVALID_ADDRESS = -8,
++ gcvSTATUS_CONTEXT_LOSSED = -9,
++ gcvSTATUS_TOO_COMPLEX = -10,
++ gcvSTATUS_BUFFER_TOO_SMALL = -11,
++ gcvSTATUS_INTERFACE_ERROR = -12,
++ gcvSTATUS_NOT_SUPPORTED = -13,
++ gcvSTATUS_MORE_DATA = -14,
++ gcvSTATUS_TIMEOUT = -15,
++ gcvSTATUS_OUT_OF_RESOURCES = -16,
++ gcvSTATUS_INVALID_DATA = -17,
++ gcvSTATUS_INVALID_MIPMAP = -18,
++ gcvSTATUS_NOT_FOUND = -19,
++ gcvSTATUS_NOT_ALIGNED = -20,
++ gcvSTATUS_INVALID_REQUEST = -21,
++ gcvSTATUS_GPU_NOT_RESPONDING = -22,
++ gcvSTATUS_TIMER_OVERFLOW = -23,
++ gcvSTATUS_VERSION_MISMATCH = -24,
++ gcvSTATUS_LOCKED = -25,
++ gcvSTATUS_INTERRUPTED = -26,
++ gcvSTATUS_DEVICE = -27,
++ gcvSTATUS_NOT_MULTI_PIPE_ALIGNED = -28,
++
++ /* Linker errors. */
++ gcvSTATUS_GLOBAL_TYPE_MISMATCH = -1000,
++ gcvSTATUS_TOO_MANY_ATTRIBUTES = -1001,
++ gcvSTATUS_TOO_MANY_UNIFORMS = -1002,
++ gcvSTATUS_TOO_MANY_VARYINGS = -1003,
++ gcvSTATUS_UNDECLARED_VARYING = -1004,
++ gcvSTATUS_VARYING_TYPE_MISMATCH = -1005,
++ gcvSTATUS_MISSING_MAIN = -1006,
++ gcvSTATUS_NAME_MISMATCH = -1007,
++ gcvSTATUS_INVALID_INDEX = -1008,
++ gcvSTATUS_UNIFORM_MISMATCH = -1009,
++ gcvSTATUS_UNSAT_LIB_SYMBOL = -1010,
++ gcvSTATUS_TOO_MANY_SHADERS = -1011,
++ gcvSTATUS_LINK_INVALID_SHADERS = -1012,
++ gcvSTATUS_CS_NO_WORKGROUP_SIZE = -1013,
++ gcvSTATUS_LINK_LIB_ERROR = -1014,
++ gcvSTATUS_SHADER_VERSION_MISMATCH = -1015,
++ gcvSTATUS_TOO_MANY_INSTRUCTION = -1016,
++ gcvSTATUS_SSBO_MISMATCH = -1017,
++ gcvSTATUS_TOO_MANY_OUTPUT = -1018,
++ gcvSTATUS_TOO_MANY_INPUT = -1019,
++ gcvSTATUS_NOT_SUPPORT_CL = -1020,
++ gcvSTATUS_NOT_SUPPORT_INTEGER = -1021,
++ gcvSTATUS_UNIFORM_TYPE_MISMATCH = -1022,
++ gcvSTATUS_TOO_MANY_SAMPLER = -1023,
++
++ /* Compiler errors. */
++ gcvSTATUS_COMPILER_FE_PREPROCESSOR_ERROR = -2000,
++ gcvSTATUS_COMPILER_FE_PARSER_ERROR = -2001,
++
++ /* Recompilation Errors */
++ gcvSTATUS_RECOMPILER_CONVERT_UNIMPLEMENTED = -3000,
++}
++gceSTATUS;
++
++/******************************************************************************\
++********************************* Status Macros ********************************
++\******************************************************************************/
++
++#define gcmIS_ERROR(status) (status < 0)
++#define gcmNO_ERROR(status) (status >= 0)
++#define gcmIS_SUCCESS(status) (status == gcvSTATUS_OK)
++
++/******************************************************************************\
++********************************* Field Macros *********************************
++\******************************************************************************/
++
++#define __gcmSTART(reg_field) \
++ (0 ? reg_field)
++
++#define __gcmEND(reg_field) \
++ (1 ? reg_field)
++
++#define __gcmGETSIZE(reg_field) \
++ (__gcmEND(reg_field) - __gcmSTART(reg_field) + 1)
++
++#define __gcmALIGN(data, reg_field) \
++ (((gctUINT32) (data)) << __gcmSTART(reg_field))
++
++#define __gcmMASK(reg_field) \
++ ((gctUINT32) ((__gcmGETSIZE(reg_field) == 32) \
++ ? ~0 \
++ : (~(~0 << __gcmGETSIZE(reg_field)))))
++
++/*******************************************************************************
++**
++** gcmFIELDMASK
++**
++** Get aligned field mask.
++**
++** ARGUMENTS:
++**
++** reg Name of register.
++** field Name of field within register.
++*/
++#define gcmFIELDMASK(reg, field) \
++( \
++ __gcmALIGN(__gcmMASK(reg##_##field), reg##_##field) \
++)
++
++/*******************************************************************************
++**
++** gcmGETFIELD
++**
++** Extract the value of a field from specified data.
++**
++** ARGUMENTS:
++**
++** data Data value.
++** reg Name of register.
++** field Name of field within register.
++*/
++#define gcmGETFIELD(data, reg, field) \
++( \
++ ((((gctUINT32) (data)) >> __gcmSTART(reg##_##field)) \
++ & __gcmMASK(reg##_##field)) \
++)
++
++/*******************************************************************************
++**
++** gcmSETFIELD
++**
++** Set the value of a field within specified data.
++**
++** ARGUMENTS:
++**
++** data Data value.
++** reg Name of register.
++** field Name of field within register.
++** value Value for field.
++*/
++#define gcmSETFIELD(data, reg, field, value) \
++( \
++ (((gctUINT32) (data)) \
++ & ~__gcmALIGN(__gcmMASK(reg##_##field), reg##_##field)) \
++ | __gcmALIGN((gctUINT32) (value) \
++ & __gcmMASK(reg##_##field), reg##_##field) \
++)
++
++/*******************************************************************************
++**
++** gcmSETFIELDVALUE
++**
++** Set the value of a field within specified data with a
++** predefined value.
++**
++** ARGUMENTS:
++**
++** data Data value.
++** reg Name of register.
++** field Name of field within register.
++** value Name of the value within the field.
++*/
++#define gcmSETFIELDVALUE(data, reg, field, value) \
++( \
++ (((gctUINT32) (data)) \
++ & ~__gcmALIGN(__gcmMASK(reg##_##field), reg##_##field)) \
++ | __gcmALIGN(reg##_##field##_##value \
++ & __gcmMASK(reg##_##field), reg##_##field) \
++)
++
++/*******************************************************************************
++**
++** gcmGETMASKEDFIELDMASK
++**
++** Determine field mask of a masked field.
++**
++** ARGUMENTS:
++**
++** reg Name of register.
++** field Name of field within register.
++*/
++#define gcmGETMASKEDFIELDMASK(reg, field) \
++( \
++ gcmSETFIELD(0, reg, field, ~0) | \
++ gcmSETFIELD(0, reg, MASK_ ## field, ~0) \
++)
++
++/*******************************************************************************
++**
++** gcmSETMASKEDFIELD
++**
++** Set the value of a masked field with specified data.
++**
++** ARGUMENTS:
++**
++** reg Name of register.
++** field Name of field within register.
++** value Value for field.
++*/
++#define gcmSETMASKEDFIELD(reg, field, value) \
++( \
++ gcmSETFIELD (~0, reg, field, value) & \
++ gcmSETFIELDVALUE(~0, reg, MASK_ ## field, ENABLED) \
++)
++
++/*******************************************************************************
++**
++** gcmSETMASKEDFIELDVALUE
++**
++** Set the value of a masked field with specified data.
++**
++** ARGUMENTS:
++**
++** reg Name of register.
++** field Name of field within register.
++** value Value for field.
++*/
++#define gcmSETMASKEDFIELDVALUE(reg, field, value) \
++( \
++ gcmSETFIELDVALUE(~0, reg, field, value) & \
++ gcmSETFIELDVALUE(~0, reg, MASK_ ## field, ENABLED) \
++)
++
++/*******************************************************************************
++**
++** gcmVERIFYFIELDVALUE
++**
++** Verify if the value of a field within specified data equals a
++** predefined value.
++**
++** ARGUMENTS:
++**
++** data Data value.
++** reg Name of register.
++** field Name of field within register.
++** value Name of the value within the field.
++*/
++#define gcmVERIFYFIELDVALUE(data, reg, field, value) \
++( \
++ (((gctUINT32) (data)) >> __gcmSTART(reg##_##field) & \
++ __gcmMASK(reg##_##field)) \
++ == \
++ (reg##_##field##_##value & __gcmMASK(reg##_##field)) \
++)
++
++/*******************************************************************************
++** Bit field macros.
++*/
++
++#define __gcmSTARTBIT(Field) \
++ ( 1 ? Field )
++
++#define __gcmBITSIZE(Field) \
++ ( 0 ? Field )
++
++#define __gcmBITMASK(Field) \
++( \
++ (1 << __gcmBITSIZE(Field)) - 1 \
++)
++
++#define gcmGETBITS(Value, Type, Field) \
++( \
++ ( ((Type) (Value)) >> __gcmSTARTBIT(Field) ) \
++ & \
++ __gcmBITMASK(Field) \
++)
++
++#define gcmSETBITS(Value, Type, Field, NewValue) \
++( \
++ ( ((Type) (Value)) \
++ & ~(__gcmBITMASK(Field) << __gcmSTARTBIT(Field)) \
++ ) \
++ | \
++ ( ( ((Type) (NewValue)) \
++ & __gcmBITMASK(Field) \
++ ) << __gcmSTARTBIT(Field) \
++ ) \
++)
++
++/*******************************************************************************
++**
++** gcmISINREGRANGE
++**
++** Verify whether the specified address is in the register range.
++**
++** ARGUMENTS:
++**
++** Address Address to be verified.
++** Name Name of a register.
++*/
++
++#define gcmISINREGRANGE(Address, Name) \
++( \
++ ((Address & (~0U << Name ## _LSB)) == (Name ## _Address >> 2)) \
++)
++
++/******************************************************************************\
++******************************** Ceiling Macro ********************************
++\******************************************************************************/
++#define gcmCEIL(x) ((x - (gctUINT32)x) == 0 ? (gctUINT32)x : (gctUINT32)x + 1)
++
++/******************************************************************************\
++******************************** Min/Max Macros ********************************
++\******************************************************************************/
++
++#define gcmMIN(x, y) (((x) <= (y)) ? (x) : (y))
++#define gcmMAX(x, y) (((x) >= (y)) ? (x) : (y))
++#define gcmCLAMP(x, min, max) (((x) < (min)) ? (min) : \
++ ((x) > (max)) ? (max) : (x))
++#define gcmABS(x) (((x) < 0) ? -(x) : (x))
++#define gcmNEG(x) (((x) < 0) ? (x) : -(x))
++
++/******************************************************************************\
++******************************** Bit Macro ********************************
++\******************************************************************************/
++#define gcmBITSET(x, y) ((x) & (y))
++/*******************************************************************************
++**
++** gcmPTR2INT
++**
++** Convert a pointer to an integer value.
++**
++** ARGUMENTS:
++**
++** p Pointer value.
++*/
++#define gcmPTR2INT(p) \
++( \
++ (gctUINTPTR_T) (p) \
++)
++
++#define gcmPTR2INT32(p) \
++( \
++ (gctUINT32)(gctUINTPTR_T) (p) \
++)
++
++/*******************************************************************************
++**
++** gcmINT2PTR
++**
++** Convert an integer value into a pointer.
++**
++** ARGUMENTS:
++**
++** v Integer value.
++*/
++
++#define gcmINT2PTR(i) \
++( \
++ (gctPOINTER) (gctUINTPTR_T)(i) \
++)
++
++/*******************************************************************************
++**
++** gcmOFFSETOF
++**
++** Compute the byte offset of a field inside a structure.
++**
++** ARGUMENTS:
++**
++** s Structure name.
++** field Field name.
++*/
++#define gcmOFFSETOF(s, field) \
++( \
++ gcmPTR2INT32(& (((struct s *) 0)->field)) \
++)
++
++/*******************************************************************************
++**
++** gcmSWAB32
++**
++** Return a value with all bytes in the 32 bit argument swapped.
++*/
++#define gcmSWAB32(x) ((gctUINT32)( \
++ (((gctUINT32)(x) & (gctUINT32)0x000000FFUL) << 24) | \
++ (((gctUINT32)(x) & (gctUINT32)0x0000FF00UL) << 8) | \
++ (((gctUINT32)(x) & (gctUINT32)0x00FF0000UL) >> 8) | \
++ (((gctUINT32)(x) & (gctUINT32)0xFF000000UL) >> 24)))
++
++/*******************************************************************************
++***** Database ****************************************************************/
++
++typedef struct _gcsDATABASE_COUNTERS
++{
++ /* Number of currently allocated bytes. */
++ gctUINT64 bytes;
++
++ /* Maximum number of bytes allocated (memory footprint). */
++ gctUINT64 maxBytes;
++
++ /* Total number of bytes allocated. */
++ gctUINT64 totalBytes;
++}
++gcsDATABASE_COUNTERS;
++
++typedef struct _gcuDATABASE_INFO
++{
++ /* Counters. */
++ gcsDATABASE_COUNTERS counters;
++
++ /* Time value. */
++ gctUINT64 time;
++}
++gcuDATABASE_INFO;
++
++/*******************************************************************************
++***** Frame database **********************************************************/
++
++/* gcsHAL_FRAME_INFO */
++typedef struct _gcsHAL_FRAME_INFO
++{
++ /* Current timer tick. */
++ OUT gctUINT64 ticks;
++
++ /* Bandwidth counters. */
++ OUT gctUINT readBytes8[8];
++ OUT gctUINT writeBytes8[8];
++
++ /* Counters. */
++ OUT gctUINT cycles[8];
++ OUT gctUINT idleCycles[8];
++ OUT gctUINT mcCycles[8];
++ OUT gctUINT readRequests[8];
++ OUT gctUINT writeRequests[8];
++
++ /* 3D counters. */
++ OUT gctUINT vertexCount;
++ OUT gctUINT primitiveCount;
++ OUT gctUINT rejectedPrimitives;
++ OUT gctUINT culledPrimitives;
++ OUT gctUINT clippedPrimitives;
++ OUT gctUINT outPrimitives;
++ OUT gctUINT inPrimitives;
++ OUT gctUINT culledQuadCount;
++ OUT gctUINT totalQuadCount;
++ OUT gctUINT quadCount;
++ OUT gctUINT totalPixelCount;
++
++ /* PE counters. */
++ OUT gctUINT colorKilled[8];
++ OUT gctUINT colorDrawn[8];
++ OUT gctUINT depthKilled[8];
++ OUT gctUINT depthDrawn[8];
++
++ /* Shader counters. */
++ OUT gctUINT shaderCycles;
++ OUT gctUINT vsInstructionCount;
++ OUT gctUINT vsTextureCount;
++ OUT gctUINT psInstructionCount;
++ OUT gctUINT psTextureCount;
++
++ /* Texture counters. */
++ OUT gctUINT bilinearRequests;
++ OUT gctUINT trilinearRequests;
++ OUT gctUINT txBytes8;
++ OUT gctUINT txHitCount;
++ OUT gctUINT txMissCount;
++}
++gcsHAL_FRAME_INFO;
++
++#if gcdLINK_QUEUE_SIZE
++typedef struct _gckLINKDATA * gckLINKDATA;
++struct _gckLINKDATA
++{
++ gctUINT32 start;
++ gctUINT32 end;
++ gctUINT32 pid;
++};
++
++typedef struct _gckLINKQUEUE * gckLINKQUEUE;
++struct _gckLINKQUEUE
++{
++ struct _gckLINKDATA data[gcdLINK_QUEUE_SIZE];
++ gctUINT32 rear;
++ gctUINT32 front;
++ gctUINT32 count;
++};
++#endif
++
++#define gcdENTRY_QUEUE_SIZE 256
++typedef struct _gckENTRYDATA * gckENTRYDATA;
++struct _gckENTRYDATA
++{
++ gctUINT32 physical;
++ gctUINT32 bytes;
++};
++
++typedef struct _gckENTRYQUEUE * gckENTRYQUEUE;
++struct _gckENTRYQUEUE
++{
++ struct _gckENTRYDATA data[gcdENTRY_QUEUE_SIZE];
++ gctUINT32 rear;
++ gctUINT32 front;
++ gctUINT32 count;
++};
++
++typedef enum _gceTRACEMODE
++{
++ gcvTRACEMODE_NONE = 0,
++ gcvTRACEMODE_FULL = 1,
++ gcvTRACEMODE_LOGGER = 2,
++ gcvTRACEMODE_PRE = 3,
++ gcvTRACEMODE_POST = 4,
++ gcvTRACEMODE_SYSTRACE = 5,
++
++} gceTRACEMODE;
++
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif /* __gc_hal_types_h_ */
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_version.h linux-openelec/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_version.h
+--- linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_version.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_version.h 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,39 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_version_h_
++#define __gc_hal_version_h_
++
++#define gcvVERSION_MAJOR 5
++
++#define gcvVERSION_MINOR 0
++
++#define gcvVERSION_PATCH 11
++
++#define gcvVERSION_BUILD 25762
++
++#define gcvVERSION_STRING "5.0.11.p4.25762"
++
++#define gcvVERSION_DATE __DATE__
++
++#define gcvVERSION_TIME __TIME__
++
++#endif /* __gc_hal_version_h_ */
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_vg.h linux-openelec/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_vg.h
+--- linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_vg.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_vg.h 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,896 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_vg_h_
++#define __gc_hal_vg_h_
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++
++#include "gc_hal_rename.h"
++#include "gc_hal_types.h"
++#include "gc_hal_enum.h"
++#include "gc_hal_base.h"
++
++#if gcdENABLE_VG
++
++/* Thread routine type. */
++#if defined(LINUX)
++ typedef gctINT gctTHREADFUNCRESULT;
++ typedef gctPOINTER gctTHREADFUNCPARAMETER;
++# define gctTHREADFUNCTYPE
++#elif defined(WIN32)
++ typedef gctUINT gctTHREADFUNCRESULT;
++ typedef gctPOINTER gctTHREADFUNCPARAMETER;
++# define gctTHREADFUNCTYPE __stdcall
++#elif defined(__QNXNTO__)
++ typedef void * gctTHREADFUNCRESULT;
++ typedef gctPOINTER gctTHREADFUNCPARAMETER;
++# define gctTHREADFUNCTYPE
++#endif
++
++typedef gctTHREADFUNCRESULT (gctTHREADFUNCTYPE * gctTHREADFUNC) (
++ gctTHREADFUNCPARAMETER ThreadParameter
++ );
++
++
++#if defined(gcvDEBUG)
++# undef gcvDEBUG
++#endif
++
++#define gcdFORCE_DEBUG 0
++#define gcdFORCE_MESSAGES 0
++
++
++#if DBG || defined(DEBUG) || defined(_DEBUG) || gcdFORCE_DEBUG
++# define gcvDEBUG 1
++#else
++# define gcvDEBUG 0
++#endif
++
++#define _gcmERROR_RETURN(prefix, func) \
++ status = func; \
++ if (gcmIS_ERROR(status)) \
++ { \
++ prefix##PRINT_VERSION(); \
++ prefix##TRACE(gcvLEVEL_ERROR, \
++ #prefix "ERR_RETURN: status=%d(%s) @ %s(%d)", \
++ status, gcoOS_DebugStatus2Name(status), __FUNCTION__, __LINE__); \
++ return status; \
++ } \
++ do { } while (gcvFALSE)
++
++#define gcmERROR_RETURN(func) _gcmERROR_RETURN(gcm, func)
++
++#define gcmLOG_LOCATION()
++
++#define gcmkIS_ERROR(status) (status < 0)
++
++#define gcmALIGNDOWN(n, align) \
++( \
++ (n) & ~((align) - 1) \
++)
++
++#define gcmIS_VALID_INDEX(Index, Array) \
++ (((gctUINT) (Index)) < gcmCOUNTOF(Array))
++
++
++#define gcmIS_NAN(x) \
++( \
++ ((* (gctUINT32_PTR) &(x)) & 0x7FFFFFFF) == 0x7FFFFFFF \
++)
++
++#define gcmLERP(v1, v2, w) \
++ ((v1) * (w) + (v2) * (1.0f - (w)))
++
++#define gcmINTERSECT(Start1, Start2, Length) \
++ (gcmABS((Start1) - (Start2)) < (Length))
++
++/*******************************************************************************
++**
++** gcmERR_GOTO
++**
++** Prints a message and terminates the current loop on error.
++**
++** ASSUMPTIONS:
++**
++** 'status' variable of gceSTATUS type must be defined.
++**
++** ARGUMENTS:
++**
++** Function
++** Function to evaluate.
++*/
++
++#define gcmERR_GOTO(Function) \
++ status = Function; \
++ if (gcmIS_ERROR(status)) \
++ { \
++ gcmTRACE( \
++ gcvLEVEL_ERROR, \
++ "gcmERR_GOTO: status=%d @ line=%d in function %s.\n", \
++ status, __LINE__, __FUNCTION__ \
++ ); \
++ goto ErrorHandler; \
++ }
++
++#if gcvDEBUG || gcdFORCE_MESSAGES
++# define gcmVERIFY_BOOLEAN(Expression) \
++ gcmASSERT( \
++ ( (Expression) == gcvFALSE ) || \
++ ( (Expression) == gcvTRUE ) \
++ )
++#else
++# define gcmVERIFY_BOOLEAN(Expression)
++#endif
++
++/*******************************************************************************
++**
++** gcmVERIFYFIELDFIT
++**
++** Verify whether the value fits in the field.
++**
++** ARGUMENTS:
++**
++** data Data value.
++** reg Name of register.
++** field Name of field within register.
++** value Value for field.
++*/
++#define gcmVERIFYFIELDFIT(reg, field, value) \
++ gcmASSERT( \
++ (value) <= gcmFIELDMAX(reg, field) \
++ )
++/*******************************************************************************
++**
++** gcmFIELDMAX
++**
++** Get field maximum value.
++**
++** ARGUMENTS:
++**
++** reg Name of register.
++** field Name of field within register.
++*/
++#define gcmFIELDMAX(reg, field) \
++( \
++ (gctUINT32) \
++ ( \
++ (__gcmGETSIZE(reg##_##field) == 32) \
++ ? ~0 \
++ : (~(~0 << __gcmGETSIZE(reg##_##field))) \
++ ) \
++)
++
++
++/* ANSI C does not have the 'f' functions, define replacements here. */
++#define gcmSINF(x) ((gctFLOAT) sin(x))
++#define gcmCOSF(x) ((gctFLOAT) cos(x))
++#define gcmASINF(x) ((gctFLOAT) asin(x))
++#define gcmACOSF(x) ((gctFLOAT) acos(x))
++#define gcmSQRTF(x) ((gctFLOAT) sqrt(x))
++#define gcmFABSF(x) ((gctFLOAT) fabs(x))
++#define gcmFMODF(x, y) ((gctFLOAT) fmod((x), (y)))
++#define gcmCEILF(x) ((gctFLOAT) ceil(x))
++#define gcmFLOORF(x) ((gctFLOAT) floor(x))
++
++
++
++/* Fixed point constants. */
++#define gcvZERO_X ((gctFIXED_POINT) 0x00000000)
++#define gcvHALF_X ((gctFIXED_POINT) 0x00008000)
++#define gcvONE_X ((gctFIXED_POINT) 0x00010000)
++#define gcvNEGONE_X ((gctFIXED_POINT) 0xFFFF0000)
++#define gcvTWO_X ((gctFIXED_POINT) 0x00020000)
++
++/* Integer constants. */
++#define gcvMAX_POS_INT ((gctINT) 0x7FFFFFFF)
++#define gcvMAX_NEG_INT ((gctINT) 0x80000000)
++
++/* Float constants. */
++#define gcvMAX_POS_FLOAT ((gctFLOAT) 3.4028235e+038)
++#define gcvMAX_NEG_FLOAT ((gctFLOAT) -3.4028235e+038)
++
++/******************************************************************************\
++***************************** Miscellaneous Macro ******************************
++\******************************************************************************/
++
++#define gcmKB2BYTES(Kilobyte) \
++( \
++ (Kilobyte) << 10 \
++)
++
++#define gcmMB2BYTES(Megabyte) \
++( \
++ (Megabyte) << 20 \
++)
++
++#define gcmMAT(Matrix, Row, Column) \
++( \
++ (Matrix) [(Row) * 3 + (Column)] \
++)
++
++#define gcmMAKE2CHAR(Char1, Char2) \
++( \
++ ((gctUINT16) (gctUINT8) (Char1) << 0) | \
++ ((gctUINT16) (gctUINT8) (Char2) << 8) \
++)
++
++#define gcmMAKE4CHAR(Char1, Char2, Char3, Char4) \
++( \
++ ((gctUINT32)(gctUINT8) (Char1) << 0) | \
++ ((gctUINT32)(gctUINT8) (Char2) << 8) | \
++ ((gctUINT32)(gctUINT8) (Char3) << 16) | \
++ ((gctUINT32)(gctUINT8) (Char4) << 24) \
++)
++
++/* some platforms need to fix the physical address for HW to access*/
++#define gcmFIXADDRESS(address) \
++(\
++ (address)\
++)
++
++#define gcmkFIXADDRESS(address) \
++(\
++ (address)\
++)
++
++/******************************************************************************\
++****************************** Kernel Debug Macro ******************************
++\******************************************************************************/
++
++/* Set signal to signaled state for specified process. */
++gceSTATUS
++gckOS_SetSignal(
++ IN gckOS Os,
++ IN gctHANDLE Process,
++ IN gctSIGNAL Signal
++ );
++
++/* Return the kernel logical pointer for the given physical one. */
++gceSTATUS
++gckOS_GetKernelLogical(
++ IN gckOS Os,
++ IN gctUINT32 Address,
++ OUT gctPOINTER * KernelPointer
++ );
++
++/* Return the kernel logical pointer for the given physical one. */
++gceSTATUS
++gckOS_GetKernelLogicalEx(
++ IN gckOS Os,
++ IN gceCORE Core,
++ IN gctUINT32 Address,
++ OUT gctPOINTER * KernelPointer
++ );
++
++/*----------------------------------------------------------------------------*/
++/*----------------------------- Semaphore Object -----------------------------*/
++
++/* Increment the value of a semaphore. */
++gceSTATUS
++gckOS_IncrementSemaphore(
++ IN gckOS Os,
++ IN gctSEMAPHORE Semaphore
++ );
++
++/* Decrement the value of a semaphore (waiting might occur). */
++gceSTATUS
++gckOS_DecrementSemaphore(
++ IN gckOS Os,
++ IN gctSEMAPHORE Semaphore
++ );
++
++
++/*----------------------------------------------------------------------------*/
++/*------------------------------- Thread Object ------------------------------*/
++
++/* Start a thread. */
++gceSTATUS
++gckOS_StartThread(
++ IN gckOS Os,
++ IN gctTHREADFUNC ThreadFunction,
++ IN gctPOINTER ThreadParameter,
++ OUT gctTHREAD * Thread
++ );
++
++/* Stop a thread. */
++gceSTATUS
++gckOS_StopThread(
++ IN gckOS Os,
++ IN gctTHREAD Thread
++ );
++
++/* Verify whether the thread is still running. */
++gceSTATUS
++gckOS_VerifyThread(
++ IN gckOS Os,
++ IN gctTHREAD Thread
++ );
++
++
++/* Construct a new gckVGKERNEL object. */
++gceSTATUS
++gckVGKERNEL_Construct(
++ IN gckOS Os,
++ IN gctPOINTER Context,
++ IN gckKERNEL inKernel,
++ OUT gckVGKERNEL * Kernel
++ );
++
++/* Destroy an gckVGKERNEL object. */
++gceSTATUS
++gckVGKERNEL_Destroy(
++ IN gckVGKERNEL Kernel
++ );
++
++/* Allocate linear video memory. */
++gceSTATUS
++gckVGKERNEL_AllocateLinearMemory(
++ IN gckKERNEL Kernel,
++ IN OUT gcePOOL * Pool,
++ IN gctSIZE_T Bytes,
++ IN gctUINT32 Alignment,
++ IN gceSURF_TYPE Type,
++ OUT gcuVIDMEM_NODE_PTR * Node
++ );
++
++/* Unmap memory. */
++gceSTATUS
++gckKERNEL_UnmapMemory(
++ IN gckKERNEL Kernel,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Bytes,
++ IN gctPOINTER Logical
++ );
++
++/* Dispatch a user-level command. */
++gceSTATUS
++gckVGKERNEL_Dispatch(
++ IN gckKERNEL Kernel,
++ IN gctBOOL FromUser,
++ IN OUT struct _gcsHAL_INTERFACE * Interface
++ );
++
++/* Query command buffer requirements. */
++gceSTATUS
++gckKERNEL_QueryCommandBuffer(
++ IN gckKERNEL Kernel,
++ OUT gcsCOMMAND_BUFFER_INFO_PTR Information
++ );
++
++/******************************************************************************\
++******************************* gckVGHARDWARE Object ******************************
++\******************************************************************************/
++
++/* Construct a new gckVGHARDWARE object. */
++gceSTATUS
++gckVGHARDWARE_Construct(
++ IN gckOS Os,
++ OUT gckVGHARDWARE * Hardware
++ );
++
++/* Destroy an gckVGHARDWARE object. */
++gceSTATUS
++gckVGHARDWARE_Destroy(
++ IN gckVGHARDWARE Hardware
++ );
++
++/* Query system memory requirements. */
++gceSTATUS
++gckVGHARDWARE_QuerySystemMemory(
++ IN gckVGHARDWARE Hardware,
++ OUT gctSIZE_T * SystemSize,
++ OUT gctUINT32 * SystemBaseAddress
++ );
++
++/* Build virtual address. */
++gceSTATUS
++gckVGHARDWARE_BuildVirtualAddress(
++ IN gckVGHARDWARE Hardware,
++ IN gctUINT32 Index,
++ IN gctUINT32 Offset,
++ OUT gctUINT32 * Address
++ );
++
++/* Kickstart the command processor. */
++gceSTATUS
++gckVGHARDWARE_Execute(
++ IN gckVGHARDWARE Hardware,
++ IN gctUINT32 Address,
++ IN gctUINT32 Count
++ );
++
++/* Query the available memory. */
++gceSTATUS
++gckVGHARDWARE_QueryMemory(
++ IN gckVGHARDWARE Hardware,
++ OUT gctSIZE_T * InternalSize,
++ OUT gctUINT32 * InternalBaseAddress,
++ OUT gctUINT32 * InternalAlignment,
++ OUT gctSIZE_T * ExternalSize,
++ OUT gctUINT32 * ExternalBaseAddress,
++ OUT gctUINT32 * ExternalAlignment,
++ OUT gctUINT32 * HorizontalTileSize,
++ OUT gctUINT32 * VerticalTileSize
++ );
++
++/* Query the identity of the hardware. */
++gceSTATUS
++gckVGHARDWARE_QueryChipIdentity(
++ IN gckVGHARDWARE Hardware,
++ OUT gceCHIPMODEL* ChipModel,
++ OUT gctUINT32* ChipRevision,
++ OUT gctUINT32* ChipFeatures,
++ OUT gctUINT32* ChipMinorFeatures,
++ OUT gctUINT32* ChipMinorFeatures1
++ );
++
++/* Convert an API format. */
++gceSTATUS
++gckVGHARDWARE_ConvertFormat(
++ IN gckVGHARDWARE Hardware,
++ IN gceSURF_FORMAT Format,
++ OUT gctUINT32 * BitsPerPixel,
++ OUT gctUINT32 * BytesPerTile
++ );
++
++/* Split a harwdare specific address into API stuff. */
++gceSTATUS
++gckVGHARDWARE_SplitMemory(
++ IN gckVGHARDWARE Hardware,
++ IN gctUINT32 Address,
++ OUT gcePOOL * Pool,
++ OUT gctUINT32 * Offset
++ );
++
++/* Align size to tile boundary. */
++gceSTATUS
++gckVGHARDWARE_AlignToTile(
++ IN gckVGHARDWARE Hardware,
++ IN gceSURF_TYPE Type,
++ IN OUT gctUINT32_PTR Width,
++ IN OUT gctUINT32_PTR Height
++ );
++
++/* Convert logical address to hardware specific address. */
++gceSTATUS
++gckVGHARDWARE_ConvertLogical(
++ IN gckVGHARDWARE Hardware,
++ IN gctPOINTER Logical,
++ IN gctBOOL InUserSpace,
++ OUT gctUINT32 * Address
++ );
++
++/* Program MMU. */
++gceSTATUS
++gckVGHARDWARE_SetMMU(
++ IN gckVGHARDWARE Hardware,
++ IN gctPOINTER Logical
++ );
++
++/* Flush the MMU. */
++gceSTATUS
++gckVGHARDWARE_FlushMMU(
++ IN gckVGHARDWARE Hardware
++ );
++
++/* Get idle register. */
++gceSTATUS
++gckVGHARDWARE_GetIdle(
++ IN gckVGHARDWARE Hardware,
++ OUT gctUINT32 * Data
++ );
++
++/* Flush the caches. */
++gceSTATUS
++gckVGHARDWARE_Flush(
++ IN gckVGHARDWARE Hardware,
++ IN gceKERNEL_FLUSH Flush,
++ IN gctPOINTER Logical,
++ IN OUT gctSIZE_T * Bytes
++ );
++
++/* Enable/disable fast clear. */
++gceSTATUS
++gckVGHARDWARE_SetFastClear(
++ IN gckVGHARDWARE Hardware,
++ IN gctINT Enable
++ );
++
++gceSTATUS
++gckVGHARDWARE_ReadInterrupt(
++ IN gckVGHARDWARE Hardware,
++ OUT gctUINT32_PTR IDs
++ );
++
++/* Power management. */
++gceSTATUS
++gckVGHARDWARE_SetPowerManagementState(
++ IN gckVGHARDWARE Hardware,
++ IN gceCHIPPOWERSTATE State
++ );
++
++gceSTATUS
++gckVGHARDWARE_QueryPowerManagementState(
++ IN gckVGHARDWARE Hardware,
++ OUT gceCHIPPOWERSTATE* State
++ );
++
++gceSTATUS
++gckVGHARDWARE_SetPowerManagement(
++ IN gckVGHARDWARE Hardware,
++ IN gctBOOL PowerManagement
++ );
++
++gceSTATUS
++gckVGHARDWARE_SetPowerOffTimeout(
++ IN gckVGHARDWARE Hardware,
++ IN gctUINT32 Timeout
++ );
++
++gceSTATUS
++gckVGHARDWARE_QueryPowerOffTimeout(
++ IN gckVGHARDWARE Hardware,
++ OUT gctUINT32* Timeout
++ );
++
++gceSTATUS
++gckVGHARDWARE_QueryIdle(
++ IN gckVGHARDWARE Hardware,
++ OUT gctBOOL_PTR IsIdle
++ );
++/******************************************************************************\
++*************************** Command Buffer Structures **************************
++\******************************************************************************/
++
++/* Vacant command buffer marker. */
++#define gcvVACANT_BUFFER ((gcsCOMPLETION_SIGNAL_PTR) ((gctSIZE_T)1))
++
++/* Command buffer header. */
++typedef struct _gcsCMDBUFFER * gcsCMDBUFFER_PTR;
++typedef struct _gcsCMDBUFFER
++{
++ /* Pointer to the completion signal. */
++ gcsCOMPLETION_SIGNAL_PTR completion;
++
++ /* The user sets this to the node of the container buffer whitin which
++ this particular command buffer resides. The kernel sets this to the
++ node of the internally allocated buffer. */
++ gcuVIDMEM_NODE_PTR node;
++
++ /* Command buffer hardware address. */
++ gctUINT32 address;
++
++ /* The offset of the buffer from the beginning of the header. */
++ gctUINT32 bufferOffset;
++
++ /* Size of the area allocated for the data portion of this particular
++ command buffer (headers and tail reserves are excluded). */
++ gctUINT32 size;
++
++ /* Offset into the buffer [0..size]; reflects exactly how much data has
++ been put into the command buffer. */
++ gctUINT offset;
++
++ /* The number of command units in the buffer for the hardware to
++ execute. */
++ gctUINT32 dataCount;
++
++ /* MANAGED BY : user HAL (gcoBUFFER object).
++ USED BY : user HAL (gcoBUFFER object).
++ Points to the immediate next allocated command buffer. */
++ gcsCMDBUFFER_PTR nextAllocated;
++
++ /* MANAGED BY : user layers (HAL and drivers).
++ USED BY : kernel HAL (gcoBUFFER object).
++ Points to the next subbuffer if any. A family of subbuffers are chained
++ together and are meant to be executed inseparably as a unit. Meaning
++ that context switching cannot occur while a chain of subbuffers is being
++ executed. */
++ gcsCMDBUFFER_PTR nextSubBuffer;
++}
++gcsCMDBUFFER;
++
++/* Command queue element. */
++typedef struct _gcsVGCMDQUEUE
++{
++ /* Pointer to the command buffer header. */
++ gcsCMDBUFFER_PTR commandBuffer;
++
++ /* Dynamic vs. static command buffer state. */
++ gctBOOL dynamic;
++}
++gcsVGCMDQUEUE;
++
++/* Context map entry. */
++typedef struct _gcsVGCONTEXT_MAP
++{
++ /* State index. */
++ gctUINT32 index;
++
++ /* New state value. */
++ gctUINT32 data;
++
++ /* Points to the next entry in the mod list. */
++ gcsVGCONTEXT_MAP_PTR next;
++}
++gcsVGCONTEXT_MAP;
++
++/* gcsVGCONTEXT structure that holds the current context. */
++typedef struct _gcsVGCONTEXT
++{
++ /* Context ID. */
++ gctUINT64 id;
++
++ /* State caching ebable flag. */
++ gctBOOL stateCachingEnabled;
++
++ /* Current pipe. */
++ gctUINT32 currentPipe;
++
++ /* State map/mod buffer. */
++ gctUINT32 mapFirst;
++ gctUINT32 mapLast;
++ gcsVGCONTEXT_MAP_PTR mapContainer;
++ gcsVGCONTEXT_MAP_PTR mapPrev;
++ gcsVGCONTEXT_MAP_PTR mapCurr;
++ gcsVGCONTEXT_MAP_PTR firstPrevMap;
++ gcsVGCONTEXT_MAP_PTR firstCurrMap;
++
++ /* Main context buffer. */
++ gcsCMDBUFFER_PTR header;
++ gctUINT32_PTR buffer;
++
++ /* Completion signal. */
++ gctHANDLE process;
++ gctSIGNAL signal;
++
++#if defined(__QNXNTO__)
++ gctINT32 coid;
++ gctINT32 rcvid;
++#endif
++}
++gcsVGCONTEXT;
++
++/* User space task header. */
++typedef struct _gcsTASK * gcsTASK_PTR;
++typedef struct _gcsTASK
++{
++ /* Pointer to the next task for the same interrupt in user space. */
++ gcsTASK_PTR next;
++
++ /* Size of the task data that immediately follows the structure. */
++ gctUINT size;
++
++ /* Task data starts here. */
++ /* ... */
++}
++gcsTASK;
++
++/* User space task master table entry. */
++typedef struct _gcsTASK_MASTER_ENTRY * gcsTASK_MASTER_ENTRY_PTR;
++typedef struct _gcsTASK_MASTER_ENTRY
++{
++ /* Pointers to the head and to the tail of the task chain. */
++ gcsTASK_PTR head;
++ gcsTASK_PTR tail;
++}
++gcsTASK_MASTER_ENTRY;
++
++/* User space task master table entry. */
++typedef struct _gcsTASK_MASTER_TABLE
++{
++ /* Table with one entry per block. */
++ gcsTASK_MASTER_ENTRY table[gcvBLOCK_COUNT];
++
++ /* The total number of tasks sckeduled. */
++ gctUINT count;
++
++ /* The total size of event data in bytes. */
++ gctUINT size;
++
++#if defined(__QNXNTO__)
++ gctINT32 coid;
++ gctINT32 rcvid;
++#endif
++}
++gcsTASK_MASTER_TABLE;
++
++/******************************************************************************\
++***************************** gckVGINTERRUPT Object ******************************
++\******************************************************************************/
++
++typedef struct _gckVGINTERRUPT * gckVGINTERRUPT;
++
++typedef gceSTATUS (* gctINTERRUPT_HANDLER)(
++ IN gckVGKERNEL Kernel
++ );
++
++gceSTATUS
++gckVGINTERRUPT_Construct(
++ IN gckVGKERNEL Kernel,
++ OUT gckVGINTERRUPT * Interrupt
++ );
++
++gceSTATUS
++gckVGINTERRUPT_Destroy(
++ IN gckVGINTERRUPT Interrupt
++ );
++
++gceSTATUS
++gckVGINTERRUPT_Enable(
++ IN gckVGINTERRUPT Interrupt,
++ IN OUT gctINT32_PTR Id,
++ IN gctINTERRUPT_HANDLER Handler
++ );
++
++gceSTATUS
++gckVGINTERRUPT_Disable(
++ IN gckVGINTERRUPT Interrupt,
++ IN gctINT32 Id
++ );
++
++#ifndef __QNXNTO__
++
++gceSTATUS
++gckVGINTERRUPT_Enque(
++ IN gckVGINTERRUPT Interrupt
++ );
++
++#else
++
++gceSTATUS
++gckVGINTERRUPT_Enque(
++ IN gckVGINTERRUPT Interrupt,
++ OUT gckOS *Os,
++ OUT gctSEMAPHORE *Semaphore
++ );
++
++#endif
++
++gceSTATUS
++gckVGINTERRUPT_DumpState(
++ IN gckVGINTERRUPT Interrupt
++ );
++
++
++/******************************************************************************\
++******************************* gckVGCOMMAND Object *******************************
++\******************************************************************************/
++
++typedef struct _gckVGCOMMAND * gckVGCOMMAND;
++
++/* Construct a new gckVGCOMMAND object. */
++gceSTATUS
++gckVGCOMMAND_Construct(
++ IN gckVGKERNEL Kernel,
++ IN gctUINT TaskGranularity,
++ IN gctUINT QueueSize,
++ OUT gckVGCOMMAND * Command
++ );
++
++/* Destroy an gckVGCOMMAND object. */
++gceSTATUS
++gckVGCOMMAND_Destroy(
++ IN gckVGCOMMAND Command
++ );
++
++/* Query command buffer attributes. */
++gceSTATUS
++gckVGCOMMAND_QueryCommandBuffer(
++ IN gckVGCOMMAND Command,
++ OUT gcsCOMMAND_BUFFER_INFO_PTR Information
++ );
++
++/* Allocate a command queue. */
++gceSTATUS
++gckVGCOMMAND_Allocate(
++ IN gckVGCOMMAND Command,
++ IN gctSIZE_T Size,
++ OUT gcsCMDBUFFER_PTR * CommandBuffer,
++ OUT gctPOINTER * Data
++ );
++
++/* Release memory held by the command queue. */
++gceSTATUS
++gckVGCOMMAND_Free(
++ IN gckVGCOMMAND Command,
++ IN gcsCMDBUFFER_PTR CommandBuffer
++ );
++
++/* Schedule the command queue for execution. */
++gceSTATUS
++gckVGCOMMAND_Execute(
++ IN gckVGCOMMAND Command,
++ IN gcsCMDBUFFER_PTR CommandBuffer
++ );
++
++/* Commit a buffer to the command queue. */
++gceSTATUS
++gckVGCOMMAND_Commit(
++ IN gckVGCOMMAND Command,
++ IN gcsVGCONTEXT_PTR Context,
++ IN gcsVGCMDQUEUE_PTR Queue,
++ IN gctUINT EntryCount,
++ IN gcsTASK_MASTER_TABLE_PTR TaskTable
++ );
++
++/******************************************************************************\
++********************************* gckVGMMU Object ********************************
++\******************************************************************************/
++
++typedef struct _gckVGMMU * gckVGMMU;
++
++/* Construct a new gckVGMMU object. */
++gceSTATUS
++gckVGMMU_Construct(
++ IN gckVGKERNEL Kernel,
++ IN gctUINT32 MmuSize,
++ OUT gckVGMMU * Mmu
++ );
++
++/* Destroy an gckVGMMU object. */
++gceSTATUS
++gckVGMMU_Destroy(
++ IN gckVGMMU Mmu
++ );
++
++/* Allocate pages inside the MMU. */
++gceSTATUS
++gckVGMMU_AllocatePages(
++ IN gckVGMMU Mmu,
++ IN gctSIZE_T PageCount,
++ OUT gctPOINTER * PageTable,
++ OUT gctUINT32 * Address
++ );
++
++/* Remove a page table from the MMU. */
++gceSTATUS
++gckVGMMU_FreePages(
++ IN gckVGMMU Mmu,
++ IN gctPOINTER PageTable,
++ IN gctSIZE_T PageCount
++ );
++
++/* Set the MMU page with info. */
++gceSTATUS
++gckVGMMU_SetPage(
++ IN gckVGMMU Mmu,
++ IN gctUINT32 PageAddress,
++ IN gctUINT32 *PageEntry
++ );
++
++/* Flush MMU */
++gceSTATUS
++gckVGMMU_Flush(
++ IN gckVGMMU Mmu
++ );
++
++#endif /* gcdENABLE_VG */
++
++#ifdef __cplusplus
++} /* extern "C" */
++#endif
++
++#endif /* __gc_hal_h_ */
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/allocator/default/gc_hal_kernel_allocator_array.h linux-openelec/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/allocator/default/gc_hal_kernel_allocator_array.h
+--- linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/allocator/default/gc_hal_kernel_allocator_array.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/allocator/default/gc_hal_kernel_allocator_array.h 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,34 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++extern gceSTATUS
++_DefaultAlloctorInit(
++ IN gckOS Os,
++ OUT gckALLOCATOR * Allocator
++ );
++
++gcsALLOCATOR_DESC allocatorArray[] =
++{
++ /* Default allocator. */
++ gcmkDEFINE_ALLOCATOR_DESC("default", _DefaultAlloctorInit),
++};
++
++
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/allocator/freescale/gc_hal_kernel_allocator_array.h linux-openelec/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/allocator/freescale/gc_hal_kernel_allocator_array.h
+--- linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/allocator/freescale/gc_hal_kernel_allocator_array.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/allocator/freescale/gc_hal_kernel_allocator_array.h 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,45 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++extern gceSTATUS
++_DefaultAlloctorInit(
++ IN gckOS Os,
++ OUT gckALLOCATOR * Allocator
++ );
++
++#if LINUX_CMA_FSL
++gceSTATUS
++_CMAFSLAlloctorInit(
++ IN gckOS Os,
++ OUT gckALLOCATOR * Allocator
++ );
++#endif
++
++gcsALLOCATOR_DESC allocatorArray[] =
++{
++#if LINUX_CMA_FSL
++ gcmkDEFINE_ALLOCATOR_DESC("cmafsl", _CMAFSLAlloctorInit),
++#endif
++ /* Default allocator. */
++ gcmkDEFINE_ALLOCATOR_DESC("default", _DefaultAlloctorInit),
++};
++
++
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/allocator/freescale/gc_hal_kernel_allocator_cma.c linux-openelec/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/allocator/freescale/gc_hal_kernel_allocator_cma.c
+--- linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/allocator/freescale/gc_hal_kernel_allocator_cma.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/allocator/freescale/gc_hal_kernel_allocator_cma.c 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,412 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal_kernel_linux.h"
++#include "gc_hal_kernel_allocator.h"
++
++#include <linux/pagemap.h>
++#include <linux/seq_file.h>
++#include <linux/mman.h>
++#include <asm/atomic.h>
++#include <linux/dma-mapping.h>
++#include <linux/slab.h>
++#include <linux/dma-mapping.h>
++
++#define _GC_OBJ_ZONE gcvZONE_OS
++
++typedef struct _gcsCMA_PRIV * gcsCMA_PRIV_PTR;
++typedef struct _gcsCMA_PRIV {
++ gctUINT32 cmasize;
++}
++gcsCMA_PRIV;
++
++struct mdl_cma_priv {
++ gctPOINTER kvaddr;
++ dma_addr_t physical;
++};
++
++int gc_cma_usage_show(struct seq_file* m, void* data)
++{
++ gcsINFO_NODE *node = m->private;
++ gckALLOCATOR Allocator = node->device;
++ gcsCMA_PRIV_PTR priv = Allocator->privateData;
++
++ seq_printf(m, "cma: %u bytes\n", priv->cmasize);
++
++ return 0;
++}
++
++static gcsINFO InfoList[] =
++{
++ {"cmausage", gc_cma_usage_show},
++};
++
++static void
++_DefaultAllocatorDebugfsInit(
++ IN gckALLOCATOR Allocator,
++ IN gckDEBUGFS_DIR Root
++ )
++{
++ gcmkVERIFY_OK(
++ gckDEBUGFS_DIR_Init(&Allocator->debugfsDir, Root->root, "cma"));
++
++ gcmkVERIFY_OK(gckDEBUGFS_DIR_CreateFiles(
++ &Allocator->debugfsDir,
++ InfoList,
++ gcmCOUNTOF(InfoList),
++ Allocator
++ ));
++}
++
++static void
++_DefaultAllocatorDebugfsCleanup(
++ IN gckALLOCATOR Allocator
++ )
++{
++ gcmkVERIFY_OK(gckDEBUGFS_DIR_RemoveFiles(
++ &Allocator->debugfsDir,
++ InfoList,
++ gcmCOUNTOF(InfoList)
++ ));
++
++ gckDEBUGFS_DIR_Deinit(&Allocator->debugfsDir);
++}
++
++static gceSTATUS
++_CMAFSLAlloc(
++ IN gckALLOCATOR Allocator,
++ INOUT PLINUX_MDL Mdl,
++ IN gctSIZE_T NumPages,
++ IN gctUINT32 Flags
++ )
++{
++ gceSTATUS status;
++ gcsCMA_PRIV_PTR priv = (gcsCMA_PRIV_PTR)Allocator->privateData;
++
++ struct mdl_cma_priv *mdl_priv=gcvNULL;
++ gckOS os = Allocator->os;
++
++ gcmkHEADER_ARG("Mdl=%p NumPages=%d", Mdl, NumPages);
++
++ gcmkONERROR(gckOS_Allocate(os, sizeof(struct mdl_cma_priv), (gctPOINTER *)&mdl_priv));
++ mdl_priv->kvaddr = gcvNULL;
++
++ mdl_priv->kvaddr = dma_alloc_writecombine(gcvNULL,
++ NumPages * PAGE_SIZE,
++ &mdl_priv->physical,
++ GFP_KERNEL | gcdNOWARN);
++
++ if (mdl_priv->kvaddr == gcvNULL)
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ Mdl->priv = mdl_priv;
++ priv->cmasize += NumPages * PAGE_SIZE;
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ if(mdl_priv)
++ gckOS_Free(os, mdl_priv);
++ gcmkFOOTER();
++ return status;
++}
++
++static void
++_CMAFSLFree(
++ IN gckALLOCATOR Allocator,
++ IN OUT PLINUX_MDL Mdl
++ )
++{
++ gckOS os = Allocator->os;
++ struct mdl_cma_priv *mdl_priv=(struct mdl_cma_priv *)Mdl->priv;
++ gcsCMA_PRIV_PTR priv = (gcsCMA_PRIV_PTR)Allocator->privateData;
++ dma_free_writecombine(gcvNULL,
++ Mdl->numPages * PAGE_SIZE,
++ mdl_priv->kvaddr,
++ mdl_priv->physical);
++ gckOS_Free(os, mdl_priv);
++ priv->cmasize -= Mdl->numPages * PAGE_SIZE;
++}
++
++gctINT
++_CMAFSLMapUser(
++ gckALLOCATOR Allocator,
++ PLINUX_MDL Mdl,
++ PLINUX_MDL_MAP MdlMap,
++ gctBOOL Cacheable
++ )
++{
++
++ PLINUX_MDL mdl = Mdl;
++ PLINUX_MDL_MAP mdlMap = MdlMap;
++ struct mdl_cma_priv *mdl_priv=(struct mdl_cma_priv *)Mdl->priv;
++
++ gcmkHEADER_ARG("Allocator=%p Mdl=%p MdlMap=%p gctBOOL=%d", Allocator, Mdl, MdlMap, Cacheable);
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)
++ mdlMap->vmaAddr = (gctSTRING)vm_mmap(gcvNULL,
++ 0L,
++ mdl->numPages * PAGE_SIZE,
++ PROT_READ | PROT_WRITE,
++ MAP_SHARED,
++ 0);
++#else
++ down_write(&current->mm->mmap_sem);
++
++ mdlMap->vmaAddr = (gctSTRING)do_mmap_pgoff(gcvNULL,
++ 0L,
++ mdl->numPages * PAGE_SIZE,
++ PROT_READ | PROT_WRITE,
++ MAP_SHARED,
++ 0);
++
++ up_write(&current->mm->mmap_sem);
++#endif
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_OS,
++ "%s(%d): vmaAddr->0x%X for phys_addr->0x%X",
++ __FUNCTION__, __LINE__,
++ (gctUINT32)(gctUINTPTR_T)mdlMap->vmaAddr,
++ (gctUINT32)(gctUINTPTR_T)mdl
++ );
++
++ if (IS_ERR(mdlMap->vmaAddr))
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_OS,
++ "%s(%d): do_mmap_pgoff error",
++ __FUNCTION__, __LINE__
++ );
++
++ mdlMap->vmaAddr = gcvNULL;
++
++ gcmkFOOTER_ARG("*status=%d", gcvSTATUS_OUT_OF_MEMORY);
++ return gcvSTATUS_OUT_OF_MEMORY;
++ }
++
++ down_write(&current->mm->mmap_sem);
++
++ mdlMap->vma = find_vma(current->mm, (unsigned long)mdlMap->vmaAddr);
++
++ if (mdlMap->vma == gcvNULL)
++ {
++ up_write(&current->mm->mmap_sem);
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_OS,
++ "%s(%d): find_vma error",
++ __FUNCTION__, __LINE__
++ );
++
++ mdlMap->vmaAddr = gcvNULL;
++
++ gcmkFOOTER_ARG("*status=%d", gcvSTATUS_OUT_OF_RESOURCES);
++ return gcvSTATUS_OUT_OF_RESOURCES;
++ }
++
++ /* Now map all the vmalloc pages to this user address. */
++ if (mdl->contiguous)
++ {
++ /* map kernel memory to user space.. */
++ if (dma_mmap_writecombine(gcvNULL,
++ mdlMap->vma,
++ mdl_priv->kvaddr,
++ mdl_priv->physical,
++ mdl->numPages * PAGE_SIZE) < 0)
++ {
++ up_write(&current->mm->mmap_sem);
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_WARNING, gcvZONE_OS,
++ "%s(%d): dma_mmap_attrs error",
++ __FUNCTION__, __LINE__
++ );
++
++ mdlMap->vmaAddr = gcvNULL;
++
++ gcmkFOOTER_ARG("*status=%d", gcvSTATUS_OUT_OF_MEMORY);
++ return gcvSTATUS_OUT_OF_MEMORY;
++ }
++ }
++ else
++ {
++ gckOS_Print("incorrect mdl:conti%d\n",mdl->contiguous);
++ }
++
++ up_write(&current->mm->mmap_sem);
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++void
++_CMAUnmapUser(
++ IN gckALLOCATOR Allocator,
++ IN gctPOINTER Logical,
++ IN gctUINT32 Size
++ )
++{
++ if (unlikely(current->mm == gcvNULL))
++ {
++ /* Do nothing if process is exiting. */
++ return;
++ }
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0)
++ if (vm_munmap((unsigned long)Logical, Size) < 0)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_WARNING, gcvZONE_OS,
++ "%s(%d): vm_munmap failed",
++ __FUNCTION__, __LINE__
++ );
++ }
++#else
++ down_write(&current->mm->mmap_sem);
++ if (do_munmap(current->mm, (unsigned long)Logical, Size) < 0)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_WARNING, gcvZONE_OS,
++ "%s(%d): do_munmap failed",
++ __FUNCTION__, __LINE__
++ );
++ }
++ up_write(&current->mm->mmap_sem);
++#endif
++}
++
++gceSTATUS
++_CMAMapKernel(
++ IN gckALLOCATOR Allocator,
++ IN PLINUX_MDL Mdl,
++ OUT gctPOINTER *Logical
++ )
++{
++ struct mdl_cma_priv *mdl_priv=(struct mdl_cma_priv *)Mdl->priv;
++ *Logical =mdl_priv->kvaddr;
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++_CMAUnmapKernel(
++ IN gckALLOCATOR Allocator,
++ IN PLINUX_MDL Mdl,
++ IN gctPOINTER Logical
++ )
++{
++ return gcvSTATUS_OK;
++}
++
++extern gceSTATUS
++_DefaultLogicalToPhysical(
++ IN gckALLOCATOR Allocator,
++ IN PLINUX_MDL Mdl,
++ IN gctPOINTER Logical,
++ IN gctUINT32 ProcessID,
++ OUT gctUINT32_PTR Physical
++ );
++
++extern gceSTATUS
++_DefaultCache(
++ IN gckALLOCATOR Allocator,
++ IN PLINUX_MDL Mdl,
++ IN gctPOINTER Logical,
++ IN gctUINT32 Physical,
++ IN gctUINT32 Bytes,
++ IN gceCACHEOPERATION Operation
++ );
++
++gceSTATUS
++_CMAPhysical(
++ IN gckALLOCATOR Allocator,
++ IN PLINUX_MDL Mdl,
++ IN gctUINT32 Offset,
++ OUT gctUINT32_PTR Physical
++ )
++{
++ struct mdl_cma_priv *mdl_priv=(struct mdl_cma_priv *)Mdl->priv;
++ gcmkASSERT(!Offset);
++ *Physical = mdl_priv->physical;
++
++ return gcvSTATUS_OK;
++}
++
++
++extern void
++_DefaultAllocatorDestructor(
++ IN void* PrivateData
++ );
++
++/* Default allocator operations. */
++gcsALLOCATOR_OPERATIONS CMAFSLAllocatorOperations = {
++ .Alloc = _CMAFSLAlloc,
++ .Free = _CMAFSLFree,
++ .MapUser = _CMAFSLMapUser,
++ .UnmapUser = _CMAUnmapUser,
++ .MapKernel = _CMAMapKernel,
++ .UnmapKernel = _CMAUnmapKernel,
++ .LogicalToPhysical = _DefaultLogicalToPhysical,
++ .Cache = _DefaultCache,
++ .Physical = _CMAPhysical,
++};
++
++/* Default allocator entry. */
++gceSTATUS
++_CMAFSLAlloctorInit(
++ IN gckOS Os,
++ OUT gckALLOCATOR * Allocator
++ )
++{
++ gceSTATUS status;
++ gckALLOCATOR allocator;
++ gcsCMA_PRIV_PTR priv = gcvNULL;
++
++ gcmkONERROR(
++ gckALLOCATOR_Construct(Os, &CMAFSLAllocatorOperations, &allocator));
++
++ priv = kzalloc(gcmSIZEOF(gcsCMA_PRIV), GFP_KERNEL | gcdNOWARN);
++
++ if (!priv)
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ /* Register private data. */
++ allocator->privateData = priv;
++ allocator->privateDataDestructor = _DefaultAllocatorDestructor;
++
++ allocator->debugfsInit = _DefaultAllocatorDebugfsInit;
++ allocator->debugfsCleanup = _DefaultAllocatorDebugfsCleanup;
++
++ allocator->capability = gcvALLOC_FLAG_CONTIGUOUS;
++
++ *Allocator = allocator;
++
++ return gcvSTATUS_OK;
++
++OnError:
++ return status;
++}
++
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_allocator.c linux-openelec/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_allocator.c
+--- linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_allocator.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_allocator.c 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,938 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal_kernel_linux.h"
++#include "gc_hal_kernel_allocator.h"
++#include <linux/pagemap.h>
++#include <linux/seq_file.h>
++#include <linux/mman.h>
++#include <asm/atomic.h>
++#include <linux/dma-mapping.h>
++#include <linux/slab.h>
++
++#include "gc_hal_kernel_allocator_array.h"
++#include "gc_hal_kernel_platform.h"
++
++#define _GC_OBJ_ZONE gcvZONE_OS
++
++typedef struct _gcsDEFAULT_PRIV * gcsDEFAULT_PRIV_PTR;
++typedef struct _gcsDEFAULT_PRIV {
++ gctUINT32 low;
++ gctUINT32 high;
++}
++gcsDEFAULT_PRIV;
++
++/******************************************************************************\
++************************** Default Allocator Debugfs ***************************
++\******************************************************************************/
++
++int gc_usage_show(struct seq_file* m, void* data)
++{
++ gcsINFO_NODE *node = m->private;
++ gckALLOCATOR Allocator = node->device;
++ gcsDEFAULT_PRIV_PTR priv = Allocator->privateData;
++
++ seq_printf(m, "low: %u bytes\n", priv->low);
++ seq_printf(m, "high: %u bytes\n", priv->high);
++
++ return 0;
++}
++
++static gcsINFO InfoList[] =
++{
++ {"lowHighUsage", gc_usage_show},
++};
++
++static void
++_DefaultAllocatorDebugfsInit(
++ IN gckALLOCATOR Allocator,
++ IN gckDEBUGFS_DIR Root
++ )
++{
++ gcmkVERIFY_OK(
++ gckDEBUGFS_DIR_Init(&Allocator->debugfsDir, Root->root, "default"));
++
++ gcmkVERIFY_OK(gckDEBUGFS_DIR_CreateFiles(
++ &Allocator->debugfsDir,
++ InfoList,
++ gcmCOUNTOF(InfoList),
++ Allocator
++ ));
++}
++
++static void
++_DefaultAllocatorDebugfsCleanup(
++ IN gckALLOCATOR Allocator
++ )
++{
++ gcmkVERIFY_OK(gckDEBUGFS_DIR_RemoveFiles(
++ &Allocator->debugfsDir,
++ InfoList,
++ gcmCOUNTOF(InfoList)
++ ));
++
++ gckDEBUGFS_DIR_Deinit(&Allocator->debugfsDir);
++}
++
++
++static void
++_NonContiguousFree(
++ IN struct page ** Pages,
++ IN gctUINT32 NumPages
++ )
++{
++ gctINT i;
++
++ gcmkHEADER_ARG("Pages=0x%X, NumPages=%d", Pages, NumPages);
++
++ gcmkASSERT(Pages != gcvNULL);
++
++ for (i = 0; i < NumPages; i++)
++ {
++ __free_page(Pages[i]);
++ }
++
++ if (is_vmalloc_addr(Pages))
++ {
++ vfree(Pages);
++ }
++ else
++ {
++ kfree(Pages);
++ }
++
++ gcmkFOOTER_NO();
++}
++
++static struct page **
++_NonContiguousAlloc(
++ IN gctUINT32 NumPages
++ )
++{
++ struct page ** pages;
++ struct page *p;
++ gctINT i, size;
++
++ gcmkHEADER_ARG("NumPages=%lu", NumPages);
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32)
++ if (NumPages > totalram_pages)
++#else
++ if (NumPages > num_physpages)
++#endif
++ {
++ gcmkFOOTER_NO();
++ return gcvNULL;
++ }
++
++ size = NumPages * sizeof(struct page *);
++
++ pages = kmalloc(size, GFP_KERNEL | gcdNOWARN);
++
++ if (!pages)
++ {
++ pages = vmalloc(size);
++
++ if (!pages)
++ {
++ gcmkFOOTER_NO();
++ return gcvNULL;
++ }
++ }
++
++ for (i = 0; i < NumPages; i++)
++ {
++ p = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | gcdNOWARN);
++
++ if (!p)
++ {
++ _NonContiguousFree(pages, i);
++ gcmkFOOTER_NO();
++ return gcvNULL;
++ }
++
++ pages[i] = p;
++ }
++
++ gcmkFOOTER_ARG("pages=0x%X", pages);
++ return pages;
++}
++
++gctSTRING
++_CreateKernelVirtualMapping(
++ IN PLINUX_MDL Mdl
++ )
++{
++ gctSTRING addr = 0;
++ gctINT numPages = Mdl->numPages;
++
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ if (Mdl->contiguous)
++ {
++ addr = page_address(Mdl->u.contiguousPages);
++ }
++ else
++ {
++ addr = vmap(Mdl->u.nonContiguousPages,
++ numPages,
++ 0,
++ PAGE_KERNEL);
++
++ /* Trigger a page fault. */
++ memset(addr, 0, numPages * PAGE_SIZE);
++ }
++#else
++ struct page ** pages;
++ gctBOOL free = gcvFALSE;
++ gctINT i;
++
++ if (Mdl->contiguous)
++ {
++ pages = kmalloc(sizeof(struct page *) * numPages, GFP_KERNEL | gcdNOWARN);
++
++ if (!pages)
++ {
++ return gcvNULL;
++ }
++
++ for (i = 0; i < numPages; i++)
++ {
++ pages[i] = nth_page(Mdl->u.contiguousPages, i);
++ }
++
++ free = gcvTRUE;
++ }
++ else
++ {
++ pages = Mdl->u.nonContiguousPages;
++ }
++
++ /* ioremap() can't work on system memory since 2.6.38. */
++ addr = vmap(pages, numPages, 0, gcmkNONPAGED_MEMROY_PROT(PAGE_KERNEL));
++
++ if (free)
++ {
++ kfree(pages);
++ }
++
++#endif
++
++ return addr;
++}
++
++void
++_DestoryKernelVirtualMapping(
++ IN gctSTRING Addr
++ )
++{
++#if !gcdNONPAGED_MEMORY_CACHEABLE
++ vunmap(Addr);
++#endif
++}
++
++void
++_UnmapUserLogical(
++ IN gctPOINTER Logical,
++ IN gctUINT32 Size
++)
++{
++ if (unlikely(current->mm == gcvNULL))
++ {
++ /* Do nothing if process is exiting. */
++ return;
++ }
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0)
++ if (vm_munmap((unsigned long)Logical, Size) < 0)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_WARNING, gcvZONE_OS,
++ "%s(%d): vm_munmap failed",
++ __FUNCTION__, __LINE__
++ );
++ }
++#else
++ down_write(&current->mm->mmap_sem);
++ if (do_munmap(current->mm, (unsigned long)Logical, Size) < 0)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_WARNING, gcvZONE_OS,
++ "%s(%d): do_munmap failed",
++ __FUNCTION__, __LINE__
++ );
++ }
++ up_write(&current->mm->mmap_sem);
++#endif
++}
++
++/***************************************************************************\
++************************ Default Allocator **********************************
++\***************************************************************************/
++#define C_MAX_PAGENUM (50*1024)
++static gceSTATUS
++_DefaultAlloc(
++ IN gckALLOCATOR Allocator,
++ INOUT PLINUX_MDL Mdl,
++ IN gctSIZE_T NumPages,
++ IN gctUINT32 Flags
++ )
++{
++ gceSTATUS status;
++ gctUINT32 order;
++ gctSIZE_T bytes;
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
++ gctPOINTER addr = gcvNULL;
++#endif
++ gctUINT32 numPages;
++ gctUINT i = 0;
++ gctBOOL contiguous = Flags & gcvALLOC_FLAG_CONTIGUOUS;
++ struct sysinfo temsysinfo;
++ gcsDEFAULT_PRIV_PTR priv = (gcsDEFAULT_PRIV_PTR)Allocator->privateData;
++
++ gcmkHEADER_ARG("Mdl=%p NumPages=%d", Mdl, NumPages);
++
++ numPages = NumPages;
++ bytes = NumPages * PAGE_SIZE;
++ order = get_order(bytes);
++
++ si_meminfo(&temsysinfo);
++
++ if (Flags & gcvALLOC_FLAG_MEMLIMIT)
++ {
++ if ( (temsysinfo.freeram < NumPages) || ((temsysinfo.freeram-NumPages) < C_MAX_PAGENUM) )
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++ }
++
++ if (contiguous)
++ {
++ if (order >= MAX_ORDER)
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
++ addr =
++ alloc_pages_exact(bytes, GFP_KERNEL | gcdNOWARN | __GFP_NORETRY);
++
++ Mdl->u.contiguousPages = addr
++ ? virt_to_page(addr)
++ : gcvNULL;
++
++ Mdl->exact = gcvTRUE;
++#else
++ Mdl->u.contiguousPages =
++ alloc_pages(GFP_KERNEL | gcdNOWARN | __GFP_NORETRY, order);
++#endif
++
++ if (Mdl->u.contiguousPages == gcvNULL)
++ {
++ Mdl->u.contiguousPages =
++ alloc_pages(GFP_KERNEL | __GFP_HIGHMEM | gcdNOWARN, order);
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
++ Mdl->exact = gcvFALSE;
++#endif
++ }
++ }
++ else
++ {
++ Mdl->u.nonContiguousPages = _NonContiguousAlloc(numPages);
++ }
++
++ if (Mdl->u.contiguousPages == gcvNULL && Mdl->u.nonContiguousPages == gcvNULL)
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ for (i = 0; i < numPages; i++)
++ {
++ struct page *page;
++
++ if (contiguous)
++ {
++ page = nth_page(Mdl->u.contiguousPages, i);
++ }
++ else
++ {
++ page = _NonContiguousToPage(Mdl->u.nonContiguousPages, i);
++ }
++
++ SetPageReserved(page);
++
++ if (!PageHighMem(page) && page_to_phys(page))
++ {
++ gcmkVERIFY_OK(
++ gckOS_CacheFlush(Allocator->os, _GetProcessID(), gcvNULL,
++ page_to_phys(page),
++ page_address(page),
++ PAGE_SIZE));
++
++ priv->low += PAGE_SIZE;
++ }
++ else
++ {
++ flush_dcache_page(page);
++
++#if !gcdCACHE_FUNCTION_UNIMPLEMENTED && defined(CONFIG_OUTER_CACHE) && gcdENABLE_OUTER_CACHE_PATCH
++ if (page_to_phys(page))
++ {
++ _HandleOuterCache(
++ Allocator->os,
++ page_to_phys(page),
++ gcvNULL,
++ PAGE_SIZE,
++ gcvCACHE_FLUSH
++ );
++ }
++#endif
++
++ priv->high += PAGE_SIZE;
++ }
++ }
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++static void
++_DefaultFree(
++ IN gckALLOCATOR Allocator,
++ IN OUT PLINUX_MDL Mdl
++ )
++{
++ gctINT i;
++ struct page * page;
++ gcsDEFAULT_PRIV_PTR priv = (gcsDEFAULT_PRIV_PTR)Allocator->privateData;
++
++ for (i = 0; i < Mdl->numPages; i++)
++ {
++ if (Mdl->contiguous)
++ {
++ page = nth_page(Mdl->u.contiguousPages, i);
++ }
++ else
++ {
++ page = _NonContiguousToPage(Mdl->u.nonContiguousPages, i);
++ }
++
++ ClearPageReserved(page);
++
++ if (PageHighMem(page))
++ {
++ priv->high -= PAGE_SIZE;
++ }
++ else
++ {
++ priv->low -= PAGE_SIZE;
++ }
++ }
++
++ if (Mdl->contiguous)
++ {
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
++ if (Mdl->exact == gcvTRUE)
++ {
++ free_pages_exact(page_address(Mdl->u.contiguousPages), Mdl->numPages * PAGE_SIZE);
++ }
++ else
++#endif
++ {
++ __free_pages(Mdl->u.contiguousPages, get_order(Mdl->numPages * PAGE_SIZE));
++ }
++ }
++ else
++ {
++ _NonContiguousFree(Mdl->u.nonContiguousPages, Mdl->numPages);
++ }
++}
++
++gctINT
++_DefaultMapUser(
++ gckALLOCATOR Allocator,
++ PLINUX_MDL Mdl,
++ PLINUX_MDL_MAP MdlMap,
++ gctBOOL Cacheable
++ )
++{
++
++ gctSTRING addr;
++ unsigned long start;
++ unsigned long pfn;
++ gctINT i;
++ gckOS os = Allocator->os;
++ gcsPLATFORM * platform = os->device->platform;
++
++ PLINUX_MDL mdl = Mdl;
++ PLINUX_MDL_MAP mdlMap = MdlMap;
++
++ gcmkHEADER_ARG("Allocator=%p Mdl=%p MdlMap=%p gctBOOL=%d", Allocator, Mdl, MdlMap, Cacheable);
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)
++ mdlMap->vmaAddr = (gctSTRING)vm_mmap(gcvNULL,
++ 0L,
++ mdl->numPages * PAGE_SIZE,
++ PROT_READ | PROT_WRITE,
++ MAP_SHARED,
++ 0);
++#else
++ down_write(&current->mm->mmap_sem);
++
++ mdlMap->vmaAddr = (gctSTRING)do_mmap_pgoff(gcvNULL,
++ 0L,
++ mdl->numPages * PAGE_SIZE,
++ PROT_READ | PROT_WRITE,
++ MAP_SHARED,
++ 0);
++
++ up_write(&current->mm->mmap_sem);
++#endif
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_OS,
++ "%s(%d): vmaAddr->0x%X for phys_addr->0x%X",
++ __FUNCTION__, __LINE__,
++ (gctUINT32)(gctUINTPTR_T)mdlMap->vmaAddr,
++ (gctUINT32)(gctUINTPTR_T)mdl
++ );
++
++ if (IS_ERR(mdlMap->vmaAddr))
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_OS,
++ "%s(%d): do_mmap_pgoff error",
++ __FUNCTION__, __LINE__
++ );
++
++ mdlMap->vmaAddr = gcvNULL;
++
++ gcmkFOOTER_ARG("*status=%d", gcvSTATUS_OUT_OF_MEMORY);
++ return gcvSTATUS_OUT_OF_MEMORY;
++ }
++
++ down_write(&current->mm->mmap_sem);
++
++ mdlMap->vma = find_vma(current->mm, (unsigned long)mdlMap->vmaAddr);
++
++ if (mdlMap->vma == gcvNULL)
++ {
++ up_write(&current->mm->mmap_sem);
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_OS,
++ "%s(%d): find_vma error",
++ __FUNCTION__, __LINE__
++ );
++
++ mdlMap->vmaAddr = gcvNULL;
++
++ gcmkFOOTER_ARG("*status=%d", gcvSTATUS_OUT_OF_RESOURCES);
++ return gcvSTATUS_OUT_OF_RESOURCES;
++ }
++
++ mdlMap->vma->vm_flags |= gcdVM_FLAGS;
++
++ if (Cacheable == gcvFALSE)
++ {
++ /* Make this mapping non-cached. */
++ mdlMap->vma->vm_page_prot = gcmkPAGED_MEMROY_PROT(mdlMap->vma->vm_page_prot);
++ }
++
++ if (platform && platform->ops->adjustProt)
++ {
++ platform->ops->adjustProt(mdlMap->vma);
++ }
++
++ addr = mdl->addr;
++
++ /* Now map all the vmalloc pages to this user address. */
++ if (mdl->contiguous)
++ {
++ /* map kernel memory to user space.. */
++ if (remap_pfn_range(mdlMap->vma,
++ mdlMap->vma->vm_start,
++ page_to_pfn(mdl->u.contiguousPages),
++ mdlMap->vma->vm_end - mdlMap->vma->vm_start,
++ mdlMap->vma->vm_page_prot) < 0)
++ {
++ up_write(&current->mm->mmap_sem);
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_OS,
++ "%s(%d): unable to mmap ret",
++ __FUNCTION__, __LINE__
++ );
++
++ mdlMap->vmaAddr = gcvNULL;
++
++ gcmkFOOTER_ARG("*status=%d", gcvSTATUS_OUT_OF_MEMORY);
++ return gcvSTATUS_OUT_OF_MEMORY;
++ }
++ }
++ else
++ {
++ start = mdlMap->vma->vm_start;
++
++ for (i = 0; i < mdl->numPages; i++)
++ {
++ pfn = _NonContiguousToPfn(mdl->u.nonContiguousPages, i);
++
++ if (remap_pfn_range(mdlMap->vma,
++ start,
++ pfn,
++ PAGE_SIZE,
++ mdlMap->vma->vm_page_prot) < 0)
++ {
++ up_write(&current->mm->mmap_sem);
++
++ mdlMap->vmaAddr = gcvNULL;
++
++ gcmkFOOTER_ARG("*status=%d", gcvSTATUS_OUT_OF_MEMORY);
++ return gcvSTATUS_OUT_OF_MEMORY;
++ }
++
++ start += PAGE_SIZE;
++ addr += PAGE_SIZE;
++ }
++ }
++
++ up_write(&current->mm->mmap_sem);
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++void
++_DefaultUnmapUser(
++ IN gckALLOCATOR Allocator,
++ IN gctPOINTER Logical,
++ IN gctUINT32 Size
++ )
++{
++ _UnmapUserLogical(Logical, Size);
++}
++
++gceSTATUS
++_DefaultMapKernel(
++ IN gckALLOCATOR Allocator,
++ IN PLINUX_MDL Mdl,
++ OUT gctPOINTER *Logical
++ )
++{
++ *Logical = _CreateKernelVirtualMapping(Mdl);
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++_DefaultUnmapKernel(
++ IN gckALLOCATOR Allocator,
++ IN PLINUX_MDL Mdl,
++ IN gctPOINTER Logical
++ )
++{
++ _DestoryKernelVirtualMapping(Logical);
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++_DefaultLogicalToPhysical(
++ IN gckALLOCATOR Allocator,
++ IN PLINUX_MDL Mdl,
++ IN gctPOINTER Logical,
++ IN gctUINT32 ProcessID,
++ OUT gctUINT32_PTR Physical
++ )
++{
++ return _ConvertLogical2Physical(
++ Allocator->os, Logical, ProcessID, Mdl, Physical);
++}
++
++gceSTATUS
++_DefaultCache(
++ IN gckALLOCATOR Allocator,
++ IN PLINUX_MDL Mdl,
++ IN gctPOINTER Logical,
++ IN gctUINT32 Physical,
++ IN gctUINT32 Bytes,
++ IN gceCACHEOPERATION Operation
++ )
++{
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++_DefaultPhysical(
++ IN gckALLOCATOR Allocator,
++ IN PLINUX_MDL Mdl,
++ IN gctUINT32 Offset,
++ OUT gctUINT32_PTR Physical
++ )
++{
++ gcmkASSERT(Mdl->pagedMem && !Mdl->contiguous);
++ *Physical = _NonContiguousToPhys(Mdl->u.nonContiguousPages, Offset);
++
++ return gcvSTATUS_OK;
++}
++
++void
++_DefaultAllocatorDestructor(
++ IN void* PrivateData
++ )
++{
++ kfree(PrivateData);
++}
++
++/* Default allocator operations. */
++gcsALLOCATOR_OPERATIONS DefaultAllocatorOperations = {
++ .Alloc = _DefaultAlloc,
++ .Free = _DefaultFree,
++ .MapUser = _DefaultMapUser,
++ .UnmapUser = _DefaultUnmapUser,
++ .MapKernel = _DefaultMapKernel,
++ .UnmapKernel = _DefaultUnmapKernel,
++ .LogicalToPhysical = _DefaultLogicalToPhysical,
++ .Cache = _DefaultCache,
++ .Physical = _DefaultPhysical,
++};
++
++/* Default allocator entry. */
++gceSTATUS
++_DefaultAlloctorInit(
++ IN gckOS Os,
++ OUT gckALLOCATOR * Allocator
++ )
++{
++ gceSTATUS status;
++ gckALLOCATOR allocator;
++ gcsDEFAULT_PRIV_PTR priv = gcvNULL;
++
++ gcmkONERROR(
++ gckALLOCATOR_Construct(Os, &DefaultAllocatorOperations, &allocator));
++
++ priv = kzalloc(gcmSIZEOF(gcsDEFAULT_PRIV), GFP_KERNEL | gcdNOWARN);
++
++ if (!priv)
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ /* Register private data. */
++ allocator->privateData = priv;
++ allocator->privateDataDestructor = _DefaultAllocatorDestructor;
++
++ allocator->debugfsInit = _DefaultAllocatorDebugfsInit;
++ allocator->debugfsCleanup = _DefaultAllocatorDebugfsCleanup;
++
++ *Allocator = allocator;
++
++ return gcvSTATUS_OK;
++
++OnError:
++ return status;
++}
++
++/***************************************************************************\
++************************ Allocator helper ***********************************
++\***************************************************************************/
++
++gceSTATUS
++gckALLOCATOR_Construct(
++ IN gckOS Os,
++ IN gcsALLOCATOR_OPERATIONS * Operations,
++ OUT gckALLOCATOR * Allocator
++ )
++{
++ gceSTATUS status;
++ gckALLOCATOR allocator;
++
++ gcmkHEADER_ARG("Os=%p, Operations=%p, Allocator=%p",
++ Os, Operations, Allocator);
++
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Allocator != gcvNULL);
++ gcmkVERIFY_ARGUMENT
++ ( Operations
++ && Operations->Alloc
++ && Operations->Free
++ && Operations->MapUser
++ && Operations->UnmapUser
++ && Operations->MapKernel
++ && Operations->UnmapKernel
++ && Operations->LogicalToPhysical
++ && Operations->Cache
++ && Operations->Physical
++ );
++
++ gcmkONERROR(
++ gckOS_Allocate(Os, gcmSIZEOF(gcsALLOCATOR), (gctPOINTER *)&allocator));
++
++ gckOS_ZeroMemory(allocator, gcmSIZEOF(gcsALLOCATOR));
++
++ /* Record os. */
++ allocator->os = Os;
++
++ /* Set operations. */
++ allocator->ops = Operations;
++
++ allocator->capability = gcvALLOC_FLAG_CONTIGUOUS
++ | gcvALLOC_FLAG_NON_CONTIGUOUS
++ | gcvALLOC_FLAG_CACHEABLE
++ | gcvALLOC_FLAG_MEMLIMIT;
++ ;
++
++ *Allocator = allocator;
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++/******************************************************************************\
++******************************** Debugfs Support *******************************
++\******************************************************************************/
++
++static gceSTATUS
++_AllocatorDebugfsInit(
++ IN gckOS Os
++ )
++{
++ gceSTATUS status;
++ gckGALDEVICE device = Os->device;
++
++ gckDEBUGFS_DIR dir = &Os->allocatorDebugfsDir;
++
++ gcmkONERROR(gckDEBUGFS_DIR_Init(dir, device->debugfsDir.root, "allocators"));
++
++ return gcvSTATUS_OK;
++
++OnError:
++ return status;
++}
++
++static void
++_AllocatorDebugfsCleanup(
++ IN gckOS Os
++ )
++{
++ gckDEBUGFS_DIR dir = &Os->allocatorDebugfsDir;
++
++ gckDEBUGFS_DIR_Deinit(dir);
++}
++
++/***************************************************************************\
++************************ Allocator management *******************************
++\***************************************************************************/
++
++gceSTATUS
++gckOS_ImportAllocators(
++ gckOS Os
++ )
++{
++ gceSTATUS status;
++ gctUINT i;
++ gckALLOCATOR allocator;
++
++ _AllocatorDebugfsInit(Os);
++
++ INIT_LIST_HEAD(&Os->allocatorList);
++
++ for (i = 0; i < gcmCOUNTOF(allocatorArray); i++)
++ {
++ if (allocatorArray[i].construct)
++ {
++ /* Construct allocator. */
++ status = allocatorArray[i].construct(Os, &allocator);
++
++ if (gcmIS_ERROR(status))
++ {
++ gcmkPRINT("["DEVICE_NAME"]: Can't construct allocator(%s)",
++ allocatorArray[i].name);
++
++ continue;
++ }
++
++ allocator->name = allocatorArray[i].name;
++
++ if (allocator->debugfsInit)
++ {
++ /* Init allocator's debugfs. */
++ allocator->debugfsInit(allocator, &Os->allocatorDebugfsDir);
++ }
++
++ list_add_tail(&allocator->head, &Os->allocatorList);
++ }
++ }
++
++#if gcdDEBUG
++ list_for_each_entry(allocator, &Os->allocatorList, head)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_WARNING, gcvZONE_OS,
++ "%s(%d) Allocator: %s",
++ __FUNCTION__, __LINE__,
++ allocator->name
++ );
++ }
++#endif
++
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckOS_FreeAllocators(
++ gckOS Os
++ )
++{
++ gckALLOCATOR allocator;
++ gckALLOCATOR temp;
++
++ list_for_each_entry_safe(allocator, temp, &Os->allocatorList, head)
++ {
++ list_del(&allocator->head);
++
++ if (allocator->debugfsCleanup)
++ {
++ /* Clean up allocator's debugfs. */
++ allocator->debugfsCleanup(allocator);
++ }
++
++ /* Free private data. */
++ if (allocator->privateDataDestructor && allocator->privateData)
++ {
++ allocator->privateDataDestructor(allocator->privateData);
++ }
++
++ gckOS_Free(Os, allocator);
++ }
++
++ _AllocatorDebugfsCleanup(Os);
++
++ return gcvSTATUS_OK;
++}
++
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_allocator.h linux-openelec/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_allocator.h
+--- linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_allocator.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_allocator.h 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,400 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_kernel_allocator_h_
++#define __gc_hal_kernel_allocator_h_
++
++#include "gc_hal_kernel_linux.h"
++
++typedef struct _gcsALLOCATOR * gckALLOCATOR;
++
++typedef struct _gcsALLOCATOR_OPERATIONS
++{
++ /**************************************************************************
++ **
++ ** Alloc
++ **
++ ** Allocte memory, request size is page aligned.
++ **
++ ** INPUT:
++ **
++ ** gckALLOCATOR Allocator
++ ** Pointer to an gckALLOCATOER object.
++ **
++ ** PLINUX_Mdl
++ ** Pointer to Mdl whichs stores information
++ ** about allocated memory.
++ **
++ ** gctSIZE_T NumPages
++ ** Number of pages need to allocate.
++ **
++ ** gctUINT32 Flag
++ ** Allocation option.
++ **
++ ** OUTPUT:
++ **
++ ** Nothing.
++ **
++ */
++ gceSTATUS
++ (*Alloc)(
++ IN gckALLOCATOR Allocator,
++ IN PLINUX_MDL Mdl,
++ IN gctSIZE_T NumPages,
++ IN gctUINT32 Flag
++ );
++
++ /**************************************************************************
++ **
++ ** Free
++ **
++ ** Free memory.
++ **
++ ** INPUT:
++ **
++ ** gckALLOCATOR Allocator
++ ** Pointer to an gckALLOCATOER object.
++ **
++ ** PLINUX_MDL Mdl
++ ** Mdl which stores information.
++ **
++ ** OUTPUT:
++ **
++ ** Nothing.
++ **
++ */
++ void
++ (*Free)(
++ IN gckALLOCATOR Allocator,
++ IN PLINUX_MDL Mdl
++ );
++
++ /**************************************************************************
++ **
++ ** MapUser
++ **
++ ** Map memory to user space.
++ **
++ ** INPUT:
++ ** gckALLOCATOR Allocator
++ ** Pointer to an gckALLOCATOER object.
++ **
++ ** PLINUX_MDL Mdl
++ ** Pointer to a Mdl.
++ **
++ ** PLINUX_MDL_MAP MdlMap
++ ** Pointer to a MdlMap, mapped address is stored
++ ** in MdlMap->vmaAddr
++ **
++ ** gctBOOL Cacheable
++ ** Whether this mapping is cacheable.
++ **
++ ** OUTPUT:
++ **
++ ** Nothing.
++ **
++ */
++ gctINT
++ (*MapUser)(
++ IN gckALLOCATOR Allocator,
++ IN PLINUX_MDL Mdl,
++ IN PLINUX_MDL_MAP MdlMap,
++ IN gctBOOL Cacheable
++ );
++
++ /**************************************************************************
++ **
++ ** UnmapUser
++ **
++ ** Unmap address from user address space.
++ **
++ ** INPUT:
++ ** gckALLOCATOR Allocator
++ ** Pointer to an gckALLOCATOER object.
++ **
++ ** gctPOINTER Logical
++ ** Address to be unmap
++ **
++ ** gctUINT32 Size
++ ** Size of address space
++ **
++ ** OUTPUT:
++ **
++ ** Nothing.
++ **
++ */
++ void
++ (*UnmapUser)(
++ IN gckALLOCATOR Allocator,
++ IN gctPOINTER Logical,
++ IN gctUINT32 Size
++ );
++
++ /**************************************************************************
++ **
++ ** MapKernel
++ **
++ ** Map memory to kernel space.
++ **
++ ** INPUT:
++ ** gckALLOCATOR Allocator
++ ** Pointer to an gckALLOCATOER object.
++ **
++ ** PLINUX_MDL Mdl
++ ** Pointer to a Mdl object.
++ **
++ ** OUTPUT:
++ ** gctPOINTER * Logical
++ ** Mapped kernel address.
++ */
++ gceSTATUS
++ (*MapKernel)(
++ IN gckALLOCATOR Allocator,
++ IN PLINUX_MDL Mdl,
++ OUT gctPOINTER *Logical
++ );
++
++ /**************************************************************************
++ **
++ ** UnmapKernel
++ **
++ ** Unmap memory from kernel space.
++ **
++ ** INPUT:
++ ** gckALLOCATOR Allocator
++ ** Pointer to an gckALLOCATOER object.
++ **
++ ** PLINUX_MDL Mdl
++ ** Pointer to a Mdl object.
++ **
++ ** gctPOINTER Logical
++ ** Mapped kernel address.
++ **
++ ** OUTPUT:
++ **
++ ** Nothing.
++ **
++ */
++ gceSTATUS
++ (*UnmapKernel)(
++ IN gckALLOCATOR Allocator,
++ IN PLINUX_MDL Mdl,
++ IN gctPOINTER Logical
++ );
++
++ /**************************************************************************
++ **
++ ** LogicalToPhysical
++ **
++ ** Get physical address from logical address, logical
++ ** address could be user virtual address or kernel
++ ** virtual address.
++ **
++ ** INPUT:
++ ** gckALLOCATOR Allocator
++ ** Pointer to an gckALLOCATOER object.
++ **
++ ** PLINUX_MDL Mdl
++ ** Pointer to a Mdl object.
++ **
++ ** gctPOINTER Logical
++ ** Mapped kernel address.
++ **
++ ** gctUINT32 ProcessID
++ ** pid of current process.
++ ** OUTPUT:
++ **
++ ** gctUINT32_PTR Physical
++ ** Physical address.
++ **
++ */
++ gceSTATUS
++ (*LogicalToPhysical)(
++ IN gckALLOCATOR Allocator,
++ IN PLINUX_MDL Mdl,
++ IN gctPOINTER Logical,
++ IN gctUINT32 ProcessID,
++ OUT gctUINT32_PTR Physical
++ );
++
++ /**************************************************************************
++ **
++ ** Cache
++ **
++ ** Maintain cache coherency.
++ **
++ ** INPUT:
++ ** gckALLOCATOR Allocator
++ ** Pointer to an gckALLOCATOER object.
++ **
++ ** PLINUX_MDL Mdl
++ ** Pointer to a Mdl object.
++ **
++ ** gctPOINTER Logical
++ ** Logical address, could be user address or kernel address
++ **
++ ** gctUINT32_PTR Physical
++ ** Physical address.
++ **
++ ** gctUINT32 Bytes
++ ** Size of memory region.
++ **
++ ** gceCACHEOPERATION Opertaion
++ ** Cache operation.
++ **
++ ** OUTPUT:
++ **
++ ** Nothing.
++ **
++ */
++ gceSTATUS (*Cache)(
++ IN gckALLOCATOR Allocator,
++ IN PLINUX_MDL Mdl,
++ IN gctPOINTER Logical,
++ IN gctUINT32 Physical,
++ IN gctUINT32 Bytes,
++ IN gceCACHEOPERATION Operation
++ );
++
++ /**************************************************************************
++ **
++ ** Physical
++ **
++ ** Get physical address from a offset in memory region.
++ **
++ ** INPUT:
++ ** gckALLOCATOR Allocator
++ ** Pointer to an gckALLOCATOER object.
++ **
++ ** PLINUX_MDL Mdl
++ ** Pointer to a Mdl object.
++ **
++ ** gctUINT32 Offset
++ ** Offset in this memory region.
++ **
++ ** OUTPUT:
++ ** gctUINT32_PTR Physical
++ ** Physical address.
++ **
++ */
++ gceSTATUS (*Physical)(
++ IN gckALLOCATOR Allocator,
++ IN PLINUX_MDL Mdl,
++ IN gctUINT32 Offset,
++ OUT gctUINT32_PTR Physical
++ );
++}
++gcsALLOCATOR_OPERATIONS;
++
++typedef struct _gcsALLOCATOR
++{
++ /* Pointer to gckOS Object. */
++ gckOS os;
++
++ /* Name. */
++ gctSTRING name;
++
++ /* Operations. */
++ gcsALLOCATOR_OPERATIONS* ops;
++
++ /* Capability of this allocator. */
++ gctUINT32 capability;
++
++ struct list_head head;
++
++ /* Debugfs entry of this allocator. */
++ gcsDEBUGFS_DIR debugfsDir;
++
++ /* Init allocator debugfs. */
++ void (*debugfsInit)(gckALLOCATOR, gckDEBUGFS_DIR);
++
++ /* Cleanup allocator debugfs. */
++ void (*debugfsCleanup)(gckALLOCATOR);
++
++ /* Private data used by customer allocator. */
++ void * privateData;
++
++ /* Private data destructor. */
++ void (*privateDataDestructor)(void *);
++}
++gcsALLOCATOR;
++
++typedef struct _gcsALLOCATOR_DESC
++{
++ /* Name of a allocator. */
++ char * name;
++
++ /* Entry function to construct a allocator. */
++ gceSTATUS (*construct)(gckOS, gckALLOCATOR *);
++}
++gcsALLOCATOR_DESC;
++
++/*
++* Helpers
++*/
++
++/* Fill a gcsALLOCATOR_DESC structure. */
++#define gcmkDEFINE_ALLOCATOR_DESC(Name, Construct) \
++ { \
++ .name = Name, \
++ .construct = Construct, \
++ }
++
++/* Construct a allocator. */
++gceSTATUS
++gckALLOCATOR_Construct(
++ IN gckOS Os,
++ IN gcsALLOCATOR_OPERATIONS * Operations,
++ OUT gckALLOCATOR * Allocator
++ );
++
++/*
++ How to implement customer allocator
++
++ Build in customer alloctor
++
++ It is recommanded that customer allocator is implmented in independent
++ source file(s) which is specified by CUSOMTER_ALLOCATOR_OBJS in Kbuld.
++
++ Register gcsALLOCATOR
++
++ For each customer specified allocator, a desciption entry must be added
++ to allocatorArray defined in gc_hal_kernel_allocator_array.h.
++
++ An entry in allocatorArray is a gcsALLOCATOR_DESC structure which describes
++ name and constructor of a gckALLOCATOR object.
++
++
++ Implement gcsALLOCATOR_DESC.init()
++
++ In gcsALLOCATOR_DESC.init(), gckALLOCATOR_Construct should be called
++ to create a gckALLOCATOR object, customer specified private data can
++ be put in gcsALLOCATOR.privateData.
++
++
++ Implement gcsALLOCATOR_OPERATIONS
++
++ When call gckALLOCATOR_Construct to create a gckALLOCATOR object, a
++ gcsALLOCATOR_OPERATIONS structure must be provided whose all members
++ implemented.
++
++*/
++#endif
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_debugfs.c linux-openelec/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_debugfs.c
+--- linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_debugfs.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_debugfs.c 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,1166 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifdef MODULE
++#include <linux/module.h>
++#endif
++#include <linux/init.h>
++#include <linux/debugfs.h>
++#include <linux/slab.h>
++#ifdef MODVERSIONS
++#include <linux/modversions.h>
++#endif
++#include <linux/stddef.h>
++#include <linux/sched.h>
++#include <linux/kernel.h>
++#include <linux/timer.h>
++#include <linux/delay.h>
++#include <linux/errno.h>
++#include <linux/mutex.h>
++#include <linux/vmalloc.h>
++#include <linux/types.h>
++#include <linux/fs.h>
++#include <linux/poll.h>
++#include <asm/uaccess.h>
++#include <linux/completion.h>
++#include <linux/seq_file.h>
++#include "gc_hal_kernel_linux.h"
++#include "gc_hal_kernel.h"
++
++/*
++ Prequsite:
++
++ 1) Debugfs feature must be enabled in the kernel.
++ 1.a) You can enable this, in the compilation of the uImage, all you have to do is, In the "make menuconfig" part,
++ you have to enable the debugfs in the kernel hacking part of the menu.
++
++ HOW TO USE:
++ 1) insert the driver with the following option logFileSize, Ex: insmod galcore.ko ...... logFileSize=10240
++ This gives a circular buffer of 10 MB
++
++ 2)Usually after inserting the driver, the debug file system is mounted under /sys/kernel/debug/
++
++ 2.a)If the debugfs is not mounted, you must do "mount -t debugfs none /sys/kernel/debug"
++
++ 3) To read what is being printed in the debugfs file system:
++ Ex : cat /sys/kernel/debug/gc/galcore_trace
++
++ 4)To write into the debug file system from user side :
++ Ex: echo "hello" > cat /sys/kernel/debug/gc/galcore_trace
++
++ 5)To write into debugfs from kernel side, Use the function called gckDEBUGFS_Print
++
++ How to Get Video Memory Usage:
++ 1) Select a process whose video memory usage can be dump, no need to reset it until <pid> is needed to be change.
++ echo <pid> > /sys/kernel/debug/gc/vidmem
++
++ 2) Get video memory usage.
++ cat /sys/kernel/debug/gc/vidmem
++
++ USECASE Kernel Dump:
++
++ 1) Go to /hal/inc/gc_hal_options.h, and enable the following flags:
++ - # define gcdDUMP 1
++ - # define gcdDUMP_IN_KERNEL 1
++ - # define gcdDUMP_COMMAND 1
++
++ 2) Go to /hal/kernel/gc_hal_kernel_command.c and disable the following flag
++ -#define gcdSIMPLE_COMMAND_DUMP 0
++
++ 3) Compile the driver
++ 4) insmod it with the logFileSize option
++ 5) Run an application
++ 6) You can get the dump by cat /sys/kernel/debug/gpu/galcore_trace
++
++ */
++
++/**/
++typedef va_list gctDBGARGS ;
++#define gcmkARGS_START(argument, pointer) va_start(argument, pointer)
++#define gcmkARGS_END(argument) va_end(argument)
++
++#define gcmkDEBUGFS_PRINT(ArgumentSize, Message) \
++ { \
++ gctDBGARGS __arguments__; \
++ gcmkARGS_START(__arguments__, Message); \
++ _debugfs_res = _DebugFSPrint(ArgumentSize, Message, &__arguments__);\
++ gcmkARGS_END(__arguments__); \
++ }
++
++/* Debug File System Node Struct. */
++struct _gcsDEBUGFS_Node
++{
++ /*wait queues for read and write operations*/
++#if defined(DECLARE_WAIT_QUEUE_HEAD)
++ wait_queue_head_t read_q , write_q ;
++#else
++ struct wait_queue *read_q , *write_q ;
++#endif
++ struct dentry *parent ; /*parent directory*/
++ struct dentry *filen ; /*filename*/
++ struct dentry *vidmem;
++ struct semaphore sem ; /* mutual exclusion semaphore */
++ char *data ; /* The circular buffer data */
++ int size ; /* Size of the buffer pointed to by 'data' */
++ int refcount ; /* Files that have this buffer open */
++ int read_point ; /* Offset in circ. buffer of oldest data */
++ int write_point ; /* Offset in circ. buffer of newest data */
++ int offset ; /* Byte number of read_point in the stream */
++ struct _gcsDEBUGFS_Node *next ;
++};
++
++/* amount of data in the queue */
++#define gcmkNODE_QLEN(node) ( (node)->write_point >= (node)->read_point ? \
++ (node)->write_point - (node)->read_point : \
++ (node)->size - (node)->read_point + (node)->write_point)
++
++/* byte number of the last byte in the queue */
++#define gcmkNODE_FIRST_EMPTY_BYTE(node) ((node)->offset + gcmkNODE_QLEN(node))
++
++/*Synchronization primitives*/
++#define gcmkNODE_READQ(node) (&((node)->read_q))
++#define gcmkNODE_WRITEQ(node) (&((node)->write_q))
++#define gcmkNODE_SEM(node) (&((node)->sem))
++
++/*Utilities*/
++#define gcmkMIN(x, y) ((x) < (y) ? (x) : y)
++
++/*Debug File System Struct*/
++typedef struct _gcsDEBUGFS_
++{
++ gcsDEBUGFS_Node* linkedlist ;
++ gcsDEBUGFS_Node* currentNode ;
++ int isInited ;
++} gcsDEBUGFS_ ;
++
++/*debug file system*/
++static gcsDEBUGFS_ gc_dbgfs ;
++
++static int gc_debugfs_open(struct inode *inode, struct file *file)
++{
++ gcsINFO_NODE *node = inode->i_private;
++
++ return single_open(file, node->info->show, node);
++}
++
++static const struct file_operations gc_debugfs_operations = {
++ .owner = THIS_MODULE,
++ .open = gc_debugfs_open,
++ .read = seq_read,
++ .llseek = seq_lseek,
++ .release = single_release,
++};
++
++gceSTATUS
++gckDEBUGFS_DIR_Init(
++ IN gckDEBUGFS_DIR Dir,
++ IN struct dentry *root,
++ IN gctCONST_STRING Name
++ )
++{
++ Dir->root = debugfs_create_dir(Name, root);
++
++ if (!Dir->root)
++ {
++ return gcvSTATUS_NOT_SUPPORTED;
++ }
++
++ INIT_LIST_HEAD(&Dir->nodeList);
++
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckDEBUGFS_DIR_CreateFiles(
++ IN gckDEBUGFS_DIR Dir,
++ IN gcsINFO * List,
++ IN int count,
++ IN gctPOINTER Data
++ )
++{
++ int i;
++ gcsINFO_NODE * node;
++ gceSTATUS status;
++
++ for (i = 0; i < count; i++)
++ {
++ /* Create a node. */
++ node = (gcsINFO_NODE *)kzalloc(sizeof(gcsINFO_NODE), GFP_KERNEL);
++
++ node->info = &List[i];
++ node->device = Data;
++
++ /* Bind to a file. TODO: clean up when fail. */
++ node->entry = debugfs_create_file(
++ List[i].name, S_IRUGO|S_IWUSR, Dir->root, node, &gc_debugfs_operations);
++
++ if (!node->entry)
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ list_add(&(node->head), &(Dir->nodeList));
++ }
++
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkVERIFY_OK(gckDEBUGFS_DIR_RemoveFiles(Dir, List, count));
++ return status;
++}
++
++gceSTATUS
++gckDEBUGFS_DIR_RemoveFiles(
++ IN gckDEBUGFS_DIR Dir,
++ IN gcsINFO * List,
++ IN int count
++ )
++{
++ int i;
++ gcsINFO_NODE * node;
++ gcsINFO_NODE * temp;
++
++ for (i = 0; i < count; i++)
++ {
++ list_for_each_entry_safe(node, temp, &Dir->nodeList, head)
++ {
++ if (node->info == &List[i])
++ {
++ debugfs_remove(node->entry);
++ list_del(&node->head);
++ kfree(node);
++ }
++ }
++ }
++
++ return gcvSTATUS_OK;
++}
++
++void
++gckDEBUGFS_DIR_Deinit(
++ IN gckDEBUGFS_DIR Dir
++ )
++{
++ if (Dir->root != NULL)
++ {
++ debugfs_remove(Dir->root);
++ Dir->root = NULL;
++ }
++}
++
++/*******************************************************************************
++ **
++ ** READ & WRITE FUNCTIONS (START)
++ **
++ *******************************************************************************/
++
++/*******************************************************************************
++ **
++ ** _ReadFromNode
++ **
++ ** 1) reading bytes out of a circular buffer with wraparound.
++ ** 2)returns caddr_t, pointer to data read, which the caller must free.
++ ** 3) length is (a pointer to) the number of bytes to be read, which will be set by this function to
++ ** be the number of bytes actually returned
++ **
++ *******************************************************************************/
++static caddr_t
++_ReadFromNode (
++ gcsDEBUGFS_Node* Node ,
++ size_t *Length ,
++ loff_t *Offset
++ )
++{
++ caddr_t retval ;
++ int bytes_copied = 0 , n , start_point , remaining ;
++
++ /* is the user trying to read data that has already scrolled off? */
++ if ( *Offset < Node->offset )
++ {
++ *Offset = Node->offset ;
++ }
++
++ /* is the user trying to read past EOF? */
++ if ( *Offset >= gcmkNODE_FIRST_EMPTY_BYTE ( Node ) )
++ {
++ return NULL ;
++ }
++
++ /* find the smaller of the total bytes we have available and what
++ * the user is asking for */
++
++ *Length = gcmkMIN ( *Length , gcmkNODE_FIRST_EMPTY_BYTE ( Node ) - *Offset ) ;
++
++ remaining = * Length ;
++
++ /* figure out where to start based on user's Offset */
++ start_point = Node->read_point + ( *Offset - Node->offset ) ;
++
++ start_point = start_point % Node->size ;
++
++ /* allocate memory to return */
++ if ( ( retval = kmalloc ( sizeof (char ) * remaining , GFP_KERNEL ) ) == NULL )
++ return NULL ;
++
++ /* copy the (possibly noncontiguous) data to our buffer */
++ while ( remaining )
++ {
++ n = gcmkMIN ( remaining , Node->size - start_point ) ;
++ memcpy ( retval + bytes_copied , Node->data + start_point , n ) ;
++ bytes_copied += n ;
++ remaining -= n ;
++ start_point = ( start_point + n ) % Node->size ;
++ }
++
++ /* advance user's file pointer */
++ *Offset += * Length ;
++
++ return retval ;
++}
++
++/*******************************************************************************
++ **
++ ** _WriteToNode
++ **
++ ** 1) writes to a circular buffer with wraparound.
++ ** 2)in case of an overflow, it overwrites the oldest unread data.
++ **
++ *********************************************************************************/
++static void
++_WriteToNode (
++ gcsDEBUGFS_Node* Node ,
++ caddr_t Buf ,
++ int Length
++ )
++{
++ int bytes_copied = 0 ;
++ int overflow = 0 ;
++ int n ;
++
++ if ( Length + gcmkNODE_QLEN ( Node ) >= ( Node->size - 1 ) )
++ {
++ overflow = 1 ;
++
++ /* in case of overflow, figure out where the new buffer will
++ * begin. we start by figuring out where the current buffer ENDS:
++ * node->parent->offset + gcmkNODE_QLEN. we then advance the end-offset
++ * by the Length of the current write, and work backwards to
++ * figure out what the oldest unoverwritten data will be (i.e.,
++ * size of the buffer). */
++ Node->offset = Node->offset + gcmkNODE_QLEN ( Node ) + Length
++ - Node->size + 1 ;
++ }
++
++ while ( Length )
++ {
++ /* how many contiguous bytes are available from the write point to
++ * the end of the circular buffer? */
++ n = gcmkMIN ( Length , Node->size - Node->write_point ) ;
++ memcpy ( Node->data + Node->write_point , Buf + bytes_copied , n ) ;
++ bytes_copied += n ;
++ Length -= n ;
++ Node->write_point = ( Node->write_point + n ) % Node->size ;
++ }
++
++ /* if there is an overflow, reset the read point to read whatever is
++ * the oldest data that we have, that has not yet been
++ * overwritten. */
++ if ( overflow )
++ {
++ Node->read_point = ( Node->write_point + 1 ) % Node->size ;
++ }
++}
++
++/*******************************************************************************
++ **
++ ** PRINTING UTILITY (START)
++ **
++ *******************************************************************************/
++
++/*******************************************************************************
++ **
++ ** _GetArgumentSize
++ **
++ **
++ *******************************************************************************/
++static gctINT
++_GetArgumentSize (
++ IN gctCONST_STRING Message
++ )
++{
++ gctINT i , count ;
++
++ for ( i = 0 , count = 0 ; Message[i] ; i += 1 )
++ {
++ if ( Message[i] == '%' )
++ {
++ count += 1 ;
++ }
++ }
++ return count * sizeof (unsigned int ) ;
++}
++
++/*******************************************************************************
++ **
++ ** _AppendString
++ **
++ **
++ *******************************************************************************/
++static ssize_t
++_AppendString (
++ IN gcsDEBUGFS_Node* Node ,
++ IN gctCONST_STRING String ,
++ IN int Length
++ )
++{
++ caddr_t message = NULL ;
++ int n ;
++
++ /* if the message is longer than the buffer, just take the beginning
++ * of it, in hopes that the reader (if any) will have time to read
++ * before we wrap around and obliterate it */
++ n = gcmkMIN ( Length , Node->size - 1 ) ;
++
++ /* make sure we have the memory for it */
++ if ( ( message = kmalloc ( n , GFP_KERNEL ) ) == NULL )
++ return - ENOMEM ;
++
++ /* copy into our temp buffer */
++ memcpy ( message , String , n ) ;
++
++ /* now copy it into the circular buffer and free our temp copy */
++ _WriteToNode ( Node , message , n ) ;
++ kfree ( message ) ;
++ return n ;
++}
++
++/*******************************************************************************
++ **
++ ** _DebugFSPrint
++ **
++ **
++ *******************************************************************************/
++static ssize_t
++_DebugFSPrint (
++ IN unsigned int ArgumentSize ,
++ IN const char* Message ,
++ IN gctDBGARGS * Arguments
++
++ )
++{
++ char buffer[MAX_LINE_SIZE] ;
++ int len ;
++ ssize_t res=0;
++
++ if(in_interrupt())
++ {
++ return - ERESTARTSYS ;
++ }
++
++ if(down_interruptible( gcmkNODE_SEM ( gc_dbgfs.currentNode ) ) )
++ {
++ return - ERESTARTSYS ;
++ }
++ len = vsnprintf ( buffer , sizeof (buffer ) , Message , *( va_list * ) Arguments ) ;
++ buffer[len] = '\0' ;
++
++ /* Add end-of-line if missing. */
++ if ( buffer[len - 1] != '\n' )
++ {
++ buffer[len ++] = '\n' ;
++ buffer[len] = '\0' ;
++ }
++ res = _AppendString ( gc_dbgfs.currentNode , buffer , len ) ;
++ up ( gcmkNODE_SEM ( gc_dbgfs.currentNode ) ) ;
++ wake_up_interruptible ( gcmkNODE_READQ ( gc_dbgfs.currentNode ) ) ; /* blocked in read*/
++ return res;
++}
++
++/*******************************************************************************
++ **
++ ** LINUX SYSTEM FUNCTIONS (START)
++ **
++ *******************************************************************************/
++
++/*******************************************************************************
++ **
++ ** find the vivlog structure associated with an inode.
++ ** returns a pointer to the structure if found, NULL if not found
++ **
++ *******************************************************************************/
++static gcsDEBUGFS_Node*
++_GetNodeInfo (
++ IN struct inode *Inode
++ )
++{
++ gcsDEBUGFS_Node* node ;
++
++ if ( Inode == NULL )
++ return NULL ;
++
++ for ( node = gc_dbgfs.linkedlist ; node != NULL ; node = node->next )
++ if ( node->filen->d_inode->i_ino == Inode->i_ino )
++ return node ;
++
++ return NULL ;
++}
++
++/*******************************************************************************
++ **
++ ** _DebugFSRead
++ **
++ *******************************************************************************/
++static ssize_t
++_DebugFSRead (
++ struct file *file ,
++ char __user * buffer ,
++ size_t length ,
++ loff_t * offset
++ )
++{
++ int retval ;
++ caddr_t data_to_return ;
++ gcsDEBUGFS_Node* node ;
++ /* get the metadata about this emlog */
++ if ( ( node = _GetNodeInfo ( file->f_dentry->d_inode ) ) == NULL )
++ {
++ printk ( "debugfs_read: record not found\n" ) ;
++ return - EIO ;
++ }
++
++ if ( down_interruptible ( gcmkNODE_SEM ( node ) ) )
++ {
++ return - ERESTARTSYS ;
++ }
++
++ /* wait until there's data available (unless we do nonblocking reads) */
++ while ( *offset >= gcmkNODE_FIRST_EMPTY_BYTE ( node ) )
++ {
++ up ( gcmkNODE_SEM ( node ) ) ;
++ if ( file->f_flags & O_NONBLOCK )
++ {
++ return - EAGAIN ;
++ }
++ if ( wait_event_interruptible ( ( *( gcmkNODE_READQ ( node ) ) ) , ( *offset < gcmkNODE_FIRST_EMPTY_BYTE ( node ) ) ) )
++ {
++ return - ERESTARTSYS ; /* signal: tell the fs layer to handle it */
++ }
++ /* otherwise loop, but first reacquire the lock */
++ if ( down_interruptible ( gcmkNODE_SEM ( node ) ) )
++ {
++ return - ERESTARTSYS ;
++ }
++ }
++ data_to_return = _ReadFromNode ( node , &length , offset ) ;
++ if ( data_to_return == NULL )
++ {
++ retval = 0 ;
++ goto unlock ;
++ }
++ if ( copy_to_user ( buffer , data_to_return , length ) > 0 )
++ {
++ retval = - EFAULT ;
++ }
++ else
++ {
++ retval = length ;
++ }
++ kfree ( data_to_return ) ;
++unlock:
++ up ( gcmkNODE_SEM ( node ) ) ;
++ wake_up_interruptible ( gcmkNODE_WRITEQ ( node ) ) ;
++ return retval ;
++}
++
++/*******************************************************************************
++ **
++ **_DebugFSWrite
++ **
++ *******************************************************************************/
++static ssize_t
++_DebugFSWrite (
++ struct file *file ,
++ const char __user * buffer ,
++ size_t length ,
++ loff_t * offset
++ )
++{
++ caddr_t message = NULL ;
++ int n ;
++ gcsDEBUGFS_Node*node ;
++
++ /* get the metadata about this log */
++ if ( ( node = _GetNodeInfo ( file->f_dentry->d_inode ) ) == NULL )
++ {
++ return - EIO ;
++ }
++
++ if ( down_interruptible ( gcmkNODE_SEM ( node ) ) )
++ {
++ return - ERESTARTSYS ;
++ }
++
++ /* if the message is longer than the buffer, just take the beginning
++ * of it, in hopes that the reader (if any) will have time to read
++ * before we wrap around and obliterate it */
++ n = gcmkMIN ( length , node->size - 1 ) ;
++
++ /* make sure we have the memory for it */
++ if ( ( message = kmalloc ( n , GFP_KERNEL ) ) == NULL )
++ {
++ up ( gcmkNODE_SEM ( node ) ) ;
++ return - ENOMEM ;
++ }
++
++
++ /* copy into our temp buffer */
++ if ( copy_from_user ( message , buffer , n ) > 0 )
++ {
++ up ( gcmkNODE_SEM ( node ) ) ;
++ kfree ( message ) ;
++ return - EFAULT ;
++ }
++
++ /* now copy it into the circular buffer and free our temp copy */
++ _WriteToNode ( node , message , n ) ;
++
++ kfree ( message ) ;
++ up ( gcmkNODE_SEM ( node ) ) ;
++
++ /* wake up any readers that might be waiting for the data. we call
++ * schedule in the vague hope that a reader will run before the
++ * writer's next write, to avoid losing data. */
++ wake_up_interruptible ( gcmkNODE_READQ ( node ) ) ;
++
++ return n ;
++}
++
++int dumpProcess = 0;
++
++void
++_PrintCounter(
++ struct seq_file *file,
++ gcsDATABASE_COUNTERS * counter,
++ gctCONST_STRING Name
++ )
++{
++ seq_printf(file,"Counter: %s\n", Name);
++
++ seq_printf(file,"%-9s%10s","", "All");
++
++ seq_printf(file, "\n");
++
++ seq_printf(file,"%-9s","Current");
++
++ seq_printf(file,"%10lld", counter->bytes);
++
++ seq_printf(file, "\n");
++
++ seq_printf(file,"%-9s","Maximum");
++
++ seq_printf(file,"%10lld", counter->maxBytes);
++
++ seq_printf(file, "\n");
++
++ seq_printf(file,"%-9s","Total");
++
++ seq_printf(file,"%10lld", counter->totalBytes);
++
++ seq_printf(file, "\n");
++}
++
++void
++_ShowCounters(
++ struct seq_file *file,
++ gcsDATABASE_PTR database
++ )
++{
++ gctUINT i = 0;
++ gcsDATABASE_COUNTERS * counter;
++ gcsDATABASE_COUNTERS * nonPaged;
++
++ static gctCONST_STRING surfaceTypes[] = {
++ "UNKNOWN",
++ "Index",
++ "Vertex",
++ "Texture",
++ "RT",
++ "Depth",
++ "Bitmap",
++ "TS",
++ "Image",
++ "Mask",
++ "Scissor",
++ "HZDepth",
++ };
++
++ /* Get pointer to counters. */
++ counter = &database->vidMem;
++
++ nonPaged = &database->nonPaged;
++
++ seq_printf(file,"Counter: vidMem (for each surface type)\n");
++
++ seq_printf(file,"%-9s%10s","", "All");
++
++ for (i = 1; i < gcvSURF_NUM_TYPES; i++)
++ {
++ counter = &database->vidMemType[i];
++
++ seq_printf(file, "%10s",surfaceTypes[i]);
++ }
++
++ seq_printf(file, "\n");
++
++ seq_printf(file,"%-9s","Current");
++
++ seq_printf(file,"%10lld", database->vidMem.bytes);
++
++ for (i = 1; i < gcvSURF_NUM_TYPES; i++)
++ {
++ counter = &database->vidMemType[i];
++
++ seq_printf(file,"%10lld", counter->bytes);
++ }
++
++ seq_printf(file, "\n");
++
++ seq_printf(file,"%-9s","Maximum");
++
++ seq_printf(file,"%10lld", database->vidMem.maxBytes);
++
++ for (i = 1; i < gcvSURF_NUM_TYPES; i++)
++ {
++ counter = &database->vidMemType[i];
++
++ seq_printf(file,"%10lld", counter->maxBytes);
++ }
++
++ seq_printf(file, "\n");
++
++ seq_printf(file,"%-9s","Total");
++
++ seq_printf(file,"%10lld", database->vidMem.totalBytes);
++
++ for (i = 1; i < gcvSURF_NUM_TYPES; i++)
++ {
++ counter = &database->vidMemType[i];
++
++ seq_printf(file,"%10lld", counter->totalBytes);
++ }
++
++ seq_printf(file, "\n");
++
++ seq_printf(file,"Counter: vidMem (for each pool)\n");
++
++ seq_printf(file,"%-9s%10s","", "All");
++
++ for (i = 1; i < gcvPOOL_NUMBER_OF_POOLS; i++)
++ {
++ seq_printf(file, "%10d", i);
++ }
++
++ seq_printf(file, "\n");
++
++ seq_printf(file,"%-9s","Current");
++
++ seq_printf(file,"%10lld", database->vidMem.bytes);
++
++ for (i = 1; i < gcvPOOL_NUMBER_OF_POOLS; i++)
++ {
++ counter = &database->vidMemPool[i];
++
++ seq_printf(file,"%10lld", counter->bytes);
++ }
++
++ seq_printf(file, "\n");
++
++ seq_printf(file,"%-9s","Maximum");
++
++ seq_printf(file,"%10lld", database->vidMem.maxBytes);
++
++ for (i = 1; i < gcvPOOL_NUMBER_OF_POOLS; i++)
++ {
++ counter = &database->vidMemPool[i];
++
++ seq_printf(file,"%10lld", counter->maxBytes);
++ }
++
++ seq_printf(file, "\n");
++
++ seq_printf(file,"%-9s","Total");
++
++ seq_printf(file,"%10lld", database->vidMem.totalBytes);
++
++ for (i = 1; i < gcvPOOL_NUMBER_OF_POOLS; i++)
++ {
++ counter = &database->vidMemPool[i];
++
++ seq_printf(file,"%10lld", counter->totalBytes);
++ }
++
++ seq_printf(file, "\n");
++
++ /* Print nonPaged. */
++ _PrintCounter(file, &database->nonPaged, "nonPaged");
++ _PrintCounter(file, &database->contiguous, "contiguous");
++ _PrintCounter(file, &database->mapUserMemory, "mapUserMemory");
++ _PrintCounter(file, &database->mapMemory, "mapMemory");
++}
++
++gckKERNEL
++_GetValidKernel(
++ gckGALDEVICE Device
++);
++static int vidmem_show(struct seq_file *file, void *unused)
++{
++ gceSTATUS status;
++ gcsDATABASE_PTR database;
++ gckGALDEVICE device = file->private;
++
++ gckKERNEL kernel = _GetValidKernel(device);
++ if(kernel == gcvNULL)
++ {
++ return 0;
++ }
++
++ /* Find the database. */
++ gcmkONERROR(
++ gckKERNEL_FindDatabase(kernel, dumpProcess, gcvFALSE, &database));
++
++ seq_printf(file, "VidMem Usage (Process %d):\n", dumpProcess);
++
++ _ShowCounters(file, database);
++
++ return 0;
++
++OnError:
++ return 0;
++}
++
++static int
++vidmem_open(
++ struct inode *inode,
++ struct file *file
++ )
++{
++ return single_open(file, vidmem_show, inode->i_private);
++}
++
++static ssize_t
++vidmem_write(
++ struct file *file,
++ const char __user *buf,
++ size_t count,
++ loff_t *pos
++ )
++{
++ dumpProcess = simple_strtol(buf, NULL, 0);
++ return count;
++}
++
++/*******************************************************************************
++ **
++ ** File Operations Table
++ **
++ *******************************************************************************/
++static const struct file_operations debugfs_operations = {
++ .owner = THIS_MODULE ,
++ .read = _DebugFSRead ,
++ .write = _DebugFSWrite ,
++} ;
++
++static const struct file_operations vidmem_operations = {
++ .owner = THIS_MODULE ,
++ .open = vidmem_open,
++ .read = seq_read,
++ .write = vidmem_write,
++ .llseek = seq_lseek,
++} ;
++
++/*******************************************************************************
++ **
++ ** INTERFACE FUNCTIONS (START)
++ **
++ *******************************************************************************/
++
++/*******************************************************************************
++ **
++ ** gckDEBUGFS_IsEnabled
++ **
++ **
++ ** INPUT:
++ **
++ ** OUTPUT:
++ **
++ *******************************************************************************/
++
++
++gctINT
++gckDEBUGFS_IsEnabled ( void )
++{
++ return gc_dbgfs.isInited ;
++}
++/*******************************************************************************
++ **
++ ** gckDEBUGFS_Initialize
++ **
++ **
++ ** INPUT:
++ **
++ ** OUTPUT:
++ **
++ *******************************************************************************/
++
++gctINT
++gckDEBUGFS_Initialize ( void )
++{
++ if ( ! gc_dbgfs.isInited )
++ {
++ gc_dbgfs.linkedlist = gcvNULL ;
++ gc_dbgfs.currentNode = gcvNULL ;
++ gc_dbgfs.isInited = 1 ;
++ }
++ return gc_dbgfs.isInited ;
++}
++/*******************************************************************************
++ **
++ ** gckDEBUGFS_Terminate
++ **
++ **
++ ** INPUT:
++ **
++ ** OUTPUT:
++ **
++ *******************************************************************************/
++
++gctINT
++gckDEBUGFS_Terminate ( void )
++{
++ gcsDEBUGFS_Node * next = gcvNULL ;
++ gcsDEBUGFS_Node * temp = gcvNULL ;
++ if ( gc_dbgfs.isInited )
++ {
++ temp = gc_dbgfs.linkedlist ;
++ while ( temp != gcvNULL )
++ {
++ next = temp->next ;
++ gckDEBUGFS_FreeNode ( temp ) ;
++ kfree ( temp ) ;
++ temp = next ;
++ }
++ gc_dbgfs.isInited = 0 ;
++ }
++ return 0 ;
++}
++
++
++/*******************************************************************************
++ **
++ ** gckDEBUGFS_CreateNode
++ **
++ **
++ ** INPUT:
++ **
++ ** OUTPUT:
++ **
++ ** gckDEBUGFS_FreeNode * Device
++ ** Pointer to a variable receiving the gcsDEBUGFS_Node object pointer on
++ ** success.
++ *********************************************************************************/
++
++gctINT
++gckDEBUGFS_CreateNode (
++ IN gctPOINTER Device,
++ IN gctINT SizeInKB ,
++ IN struct dentry * Root ,
++ IN gctCONST_STRING NodeName ,
++ OUT gcsDEBUGFS_Node **Node
++ )
++{
++ gcsDEBUGFS_Node*node ;
++ /* allocate space for our metadata and initialize it */
++ if ( ( node = kmalloc ( sizeof (gcsDEBUGFS_Node ) , GFP_KERNEL ) ) == NULL )
++ goto struct_malloc_failed ;
++
++ /*Zero it out*/
++ memset ( node , 0 , sizeof (gcsDEBUGFS_Node ) ) ;
++
++ /*Init the sync primitives*/
++#if defined(DECLARE_WAIT_QUEUE_HEAD)
++ init_waitqueue_head ( gcmkNODE_READQ ( node ) ) ;
++#else
++ init_waitqueue ( gcmkNODE_READQ ( node ) ) ;
++#endif
++
++#if defined(DECLARE_WAIT_QUEUE_HEAD)
++ init_waitqueue_head ( gcmkNODE_WRITEQ ( node ) ) ;
++#else
++ init_waitqueue ( gcmkNODE_WRITEQ ( node ) ) ;
++#endif
++ sema_init ( gcmkNODE_SEM ( node ) , 1 ) ;
++ /*End the sync primitives*/
++
++ /*creating the debug file system*/
++ node->parent = Root;
++
++ if (SizeInKB)
++ {
++ /* figure out how much of a buffer this should be and allocate the buffer */
++ node->size = 1024 * SizeInKB ;
++ if ( ( node->data = ( char * ) vmalloc ( sizeof (char ) * node->size ) ) == NULL )
++ goto data_malloc_failed ;
++
++ /*creating the file*/
++ node->filen = debugfs_create_file(NodeName, S_IRUGO|S_IWUSR, node->parent, NULL,
++ &debugfs_operations);
++ }
++
++ node->vidmem
++ = debugfs_create_file("vidmem", S_IRUGO|S_IWUSR, node->parent, Device, &vidmem_operations);
++
++ /* add it to our linked list */
++ node->next = gc_dbgfs.linkedlist ;
++ gc_dbgfs.linkedlist = node ;
++
++
++ /* pass the struct back */
++ *Node = node ;
++ return 0 ;
++
++
++data_malloc_failed:
++ kfree ( node ) ;
++struct_malloc_failed:
++ return - ENOMEM ;
++}
++
++/*******************************************************************************
++ **
++ ** gckDEBUGFS_FreeNode
++ **
++ **
++ ** INPUT:
++ **
++ ** OUTPUT:
++ **
++ *******************************************************************************/
++void
++gckDEBUGFS_FreeNode (
++ IN gcsDEBUGFS_Node * Node
++ )
++{
++
++ gcsDEBUGFS_Node **ptr ;
++
++ if ( Node == NULL )
++ {
++ printk ( "null passed to free_vinfo\n" ) ;
++ return ;
++ }
++
++ down ( gcmkNODE_SEM ( Node ) ) ;
++ /*free data*/
++ vfree ( Node->data ) ;
++
++ /*Close Debug fs*/
++ if (Node->vidmem)
++ {
++ debugfs_remove(Node->vidmem);
++ }
++
++ if ( Node->filen )
++ {
++ debugfs_remove ( Node->filen ) ;
++ }
++
++ /* now delete the node from the linked list */
++ ptr = & ( gc_dbgfs.linkedlist ) ;
++ while ( *ptr != Node )
++ {
++ if ( ! *ptr )
++ {
++ printk ( "corrupt info list!\n" ) ;
++ break ;
++ }
++ else
++ ptr = & ( ( **ptr ).next ) ;
++ }
++ *ptr = Node->next ;
++ up ( gcmkNODE_SEM ( Node ) ) ;
++}
++
++/*******************************************************************************
++ **
++ ** gckDEBUGFS_SetCurrentNode
++ **
++ **
++ ** INPUT:
++ **
++ ** OUTPUT:
++ **
++ *******************************************************************************/
++void
++gckDEBUGFS_SetCurrentNode (
++ IN gcsDEBUGFS_Node * Node
++ )
++{
++ gc_dbgfs.currentNode = Node ;
++}
++
++/*******************************************************************************
++ **
++ ** gckDEBUGFS_GetCurrentNode
++ **
++ **
++ ** INPUT:
++ **
++ ** OUTPUT:
++ **
++ *******************************************************************************/
++void
++gckDEBUGFS_GetCurrentNode (
++ OUT gcsDEBUGFS_Node ** Node
++ )
++{
++ *Node = gc_dbgfs.currentNode ;
++}
++
++/*******************************************************************************
++ **
++ ** gckDEBUGFS_Print
++ **
++ **
++ ** INPUT:
++ **
++ ** OUTPUT:
++ **
++ *******************************************************************************/
++ssize_t
++gckDEBUGFS_Print (
++ IN gctCONST_STRING Message ,
++ ...
++ )
++{
++ ssize_t _debugfs_res;
++ gcmkDEBUGFS_PRINT ( _GetArgumentSize ( Message ) , Message ) ;
++ return _debugfs_res;
++}
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_debugfs.h linux-openelec/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_debugfs.h
+--- linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_debugfs.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_debugfs.h 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,135 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include <stdarg.h>
++
++#ifndef __gc_hal_kernel_debugfs_h_
++#define __gc_hal_kernel_debugfs_h_
++
++ #define MAX_LINE_SIZE 768 /* Max bytes for a line of debug info */
++
++
++ typedef struct _gcsDEBUGFS_Node gcsDEBUGFS_Node;
++
++typedef struct _gcsDEBUGFS_DIR *gckDEBUGFS_DIR;
++typedef struct _gcsDEBUGFS_DIR
++{
++ struct dentry * root;
++ struct list_head nodeList;
++}
++gcsDEBUGFS_DIR;
++
++typedef struct _gcsINFO
++{
++ const char * name;
++ int (*show)(struct seq_file*, void*);
++}
++gcsINFO;
++
++typedef struct _gcsINFO_NODE
++{
++ gcsINFO * info;
++ gctPOINTER device;
++ struct dentry * entry;
++ struct list_head head;
++}
++gcsINFO_NODE;
++
++gceSTATUS
++gckDEBUGFS_DIR_Init(
++ IN gckDEBUGFS_DIR Dir,
++ IN struct dentry *root,
++ IN gctCONST_STRING Name
++ );
++
++gceSTATUS
++gckDEBUGFS_DIR_CreateFiles(
++ IN gckDEBUGFS_DIR Dir,
++ IN gcsINFO * List,
++ IN int count,
++ IN gctPOINTER Data
++ );
++
++gceSTATUS
++gckDEBUGFS_DIR_RemoveFiles(
++ IN gckDEBUGFS_DIR Dir,
++ IN gcsINFO * List,
++ IN int count
++ );
++
++void
++gckDEBUGFS_DIR_Deinit(
++ IN gckDEBUGFS_DIR Dir
++ );
++
++/*******************************************************************************
++ **
++ ** System Related
++ **
++ *******************************************************************************/
++
++gctINT gckDEBUGFS_IsEnabled(void);
++
++gctINT gckDEBUGFS_Initialize(void);
++
++gctINT gckDEBUGFS_Terminate(void);
++
++
++/*******************************************************************************
++ **
++ ** Node Related
++ **
++ *******************************************************************************/
++
++gctINT
++gckDEBUGFS_CreateNode(
++ IN gctPOINTER Device,
++ IN gctINT SizeInKB,
++ IN struct dentry * Root,
++ IN gctCONST_STRING NodeName,
++ OUT gcsDEBUGFS_Node **Node
++ );
++
++void gckDEBUGFS_FreeNode(
++ IN gcsDEBUGFS_Node * Node
++ );
++
++
++
++void gckDEBUGFS_SetCurrentNode(
++ IN gcsDEBUGFS_Node * Node
++ );
++
++
++
++void gckDEBUGFS_GetCurrentNode(
++ OUT gcsDEBUGFS_Node ** Node
++ );
++
++
++ssize_t gckDEBUGFS_Print(
++ IN gctCONST_STRING Message,
++ ...
++ );
++
++#endif
++
++
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_debug.h linux-openelec/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_debug.h
+--- linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_debug.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_debug.h 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,113 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_kernel_debug_h_
++#define __gc_hal_kernel_debug_h_
++
++#include <gc_hal_kernel_linux.h>
++#include <linux/spinlock.h>
++#include <linux/time.h>
++#include <stdarg.h>
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/******************************************************************************\
++****************************** OS-dependent Macros *****************************
++\******************************************************************************/
++
++typedef va_list gctARGUMENTS;
++
++#define gcmkARGUMENTS_START(Arguments, Pointer) \
++ va_start(Arguments, Pointer)
++
++#define gcmkARGUMENTS_END(Arguments) \
++ va_end(Arguments)
++
++#define gcmkARGUMENTS_ARG(Arguments, Type) \
++ va_arg(Arguments, Type)
++
++#define gcmkDECLARE_LOCK(__spinLock__) \
++ static DEFINE_SPINLOCK(__spinLock__); \
++ unsigned long __spinLock__##flags = 0;
++
++#define gcmkLOCKSECTION(__spinLock__) \
++ spin_lock_irqsave(&__spinLock__, __spinLock__##flags)
++
++#define gcmkUNLOCKSECTION(__spinLock__) \
++ spin_unlock_irqrestore(&__spinLock__, __spinLock__##flags)
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
++# define gcmkGETPROCESSID() \
++ task_tgid_vnr(current)
++#else
++# define gcmkGETPROCESSID() \
++ current->tgid
++#endif
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
++# define gcmkGETTHREADID() \
++ task_pid_vnr(current)
++#else
++# define gcmkGETTHREADID() \
++ current->pid
++#endif
++
++#define gcmkOUTPUT_STRING(String) \
++ if(gckDEBUGFS_IsEnabled()) {\
++ while(-ERESTARTSYS == gckDEBUGFS_Print(String));\
++ }else{\
++ printk(String); \
++ }\
++ touch_softlockup_watchdog()
++
++
++#define gcmkSPRINTF(Destination, Size, Message, Value) \
++ snprintf(Destination, Size, Message, Value)
++
++#define gcmkSPRINTF2(Destination, Size, Message, Value1, Value2) \
++ snprintf(Destination, Size, Message, Value1, Value2)
++
++#define gcmkSPRINTF3(Destination, Size, Message, Value1, Value2, Value3) \
++ snprintf(Destination, Size, Message, Value1, Value2, Value3)
++
++#define gcmkVSPRINTF(Destination, Size, Message, Arguments) \
++ vsnprintf(Destination, Size, Message, *((va_list*)Arguments))
++
++#define gcmkSTRCAT(Destination, Size, String) \
++ strncat(Destination, String, Size)
++
++#define gcmkMEMCPY(Destination, Source, Size) \
++ memcpy(Destination, Source, Size)
++
++#define gcmkSTRLEN(String) \
++ strlen(String)
++
++/* If not zero, forces data alignment in the variable argument list
++ by its individual size. */
++#define gcdALIGNBYSIZE 1
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif /* __gc_hal_kernel_debug_h_ */
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_device.c linux-openelec/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_device.c
+--- linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_device.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_device.c 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,2760 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal_kernel_linux.h"
++#include <linux/pagemap.h>
++#include <linux/seq_file.h>
++#include <linux/mman.h>
++#include <linux/slab.h>
++
++#define _GC_OBJ_ZONE gcvZONE_DEVICE
++
++#define DEBUG_FILE "galcore_trace"
++#define PARENT_FILE "gpu"
++
++
++#ifdef FLAREON
++ static struct dove_gpio_irq_handler gc500_handle;
++#endif
++
++gckKERNEL
++_GetValidKernel(
++ gckGALDEVICE Device
++ )
++{
++ if (Device->kernels[gcvCORE_MAJOR])
++ {
++ return Device->kernels[gcvCORE_MAJOR];
++ }
++ else
++ if (Device->kernels[gcvCORE_2D])
++ {
++ return Device->kernels[gcvCORE_2D];
++ }
++ else
++ if (Device->kernels[gcvCORE_VG])
++ {
++ return Device->kernels[gcvCORE_VG];
++ }
++ else
++ {
++ return gcvNULL;
++ }
++}
++
++/******************************************************************************\
++******************************** Debugfs Support *******************************
++\******************************************************************************/
++
++/******************************************************************************\
++***************************** DEBUG SHOW FUNCTIONS *****************************
++\******************************************************************************/
++
++int gc_info_show(struct seq_file* m, void* data)
++{
++ gcsINFO_NODE *node = m->private;
++ gckGALDEVICE device = node->device;
++ int i = 0;
++ gceCHIPMODEL chipModel;
++ gctUINT32 chipRevision;
++
++ for (i = 0; i < gcdMAX_GPU_COUNT; i++)
++ {
++ if (device->irqLines[i] != -1)
++ {
++#if gcdENABLE_VG
++ if (i == gcvCORE_VG)
++ {
++ chipModel = device->kernels[i]->vg->hardware->chipModel;
++ chipRevision = device->kernels[i]->vg->hardware->chipRevision;
++ }
++ else
++#endif
++ {
++ chipModel = device->kernels[i]->hardware->identity.chipModel;
++ chipRevision = device->kernels[i]->hardware->identity.chipRevision;
++ }
++
++ seq_printf(m, "gpu : %d\n", i);
++ seq_printf(m, "model : %4x\n", chipModel);
++ seq_printf(m, "revision : %4x\n", chipRevision);
++ seq_printf(m, "\n");
++ }
++ }
++
++ return 0;
++}
++
++int gc_clients_show(struct seq_file* m, void* data)
++{
++ gcsINFO_NODE *node = m->private;
++ gckGALDEVICE device = node->device;
++
++ gckKERNEL kernel = _GetValidKernel(device);
++
++ gcsDATABASE_PTR database;
++ gctINT i, pid;
++ gctUINT8 name[24];
++
++ seq_printf(m, "%-8s%s\n", "PID", "NAME");
++ seq_printf(m, "------------------------\n");
++
++ /* Acquire the database mutex. */
++ gcmkVERIFY_OK(
++ gckOS_AcquireMutex(kernel->os, kernel->db->dbMutex, gcvINFINITE));
++
++ /* Walk the databases. */
++ for (i = 0; i < gcmCOUNTOF(kernel->db->db); ++i)
++ {
++ for (database = kernel->db->db[i];
++ database != gcvNULL;
++ database = database->next)
++ {
++ pid = database->processID;
++
++ gcmkVERIFY_OK(gckOS_ZeroMemory(name, gcmSIZEOF(name)));
++
++ gcmkVERIFY_OK(gckOS_GetProcessNameByPid(pid, gcmSIZEOF(name), name));
++
++ seq_printf(m, "%-8d%s\n", pid, name);
++ }
++ }
++
++ /* Release the database mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(kernel->os, kernel->db->dbMutex));
++
++ /* Success. */
++ return 0;
++}
++
++static void
++_CounterAdd(
++ gcsDATABASE_COUNTERS * Dest,
++ gcsDATABASE_COUNTERS * Src
++ )
++{
++ Dest->bytes += Src->bytes;
++ Dest->maxBytes += Src->maxBytes;
++ Dest->totalBytes += Src->totalBytes;
++}
++
++static void
++_CounterPrint(
++ gcsDATABASE_COUNTERS * Counter,
++ gctCONST_STRING Name,
++ struct seq_file* m
++ )
++{
++ seq_printf(m, " %s:\n", Name);
++ seq_printf(m, " Used : %10llu B\n", Counter->bytes);
++}
++
++int gc_meminfo_show(struct seq_file* m, void* data)
++{
++ gcsINFO_NODE *node = m->private;
++ gckGALDEVICE device = node->device;
++ gckKERNEL kernel = _GetValidKernel(device);
++ gckVIDMEM memory;
++ gceSTATUS status;
++ gcsDATABASE_PTR database;
++ gctUINT32 i;
++
++ gctUINT32 free = 0, used = 0, total = 0;
++
++ gcsDATABASE_COUNTERS contiguousCounter = {0, 0, 0};
++ gcsDATABASE_COUNTERS virtualCounter = {0, 0, 0};
++ gcsDATABASE_COUNTERS nonPagedCounter = {0, 0, 0};
++
++ status = gckKERNEL_GetVideoMemoryPool(kernel, gcvPOOL_SYSTEM, &memory);
++
++ if (gcmIS_SUCCESS(status))
++ {
++ gcmkVERIFY_OK(
++ gckOS_AcquireMutex(memory->os, memory->mutex, gcvINFINITE));
++
++ free = memory->freeBytes;
++ used = memory->bytes - memory->freeBytes;
++ total = memory->bytes;
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(memory->os, memory->mutex));
++ }
++
++ seq_printf(m, "VIDEO MEMORY:\n");
++ seq_printf(m, " gcvPOOL_SYSTEM:\n");
++ seq_printf(m, " Free : %10u B\n", free);
++ seq_printf(m, " Used : %10u B\n", used);
++ seq_printf(m, " Total : %10u B\n", total);
++
++ /* Acquire the database mutex. */
++ gcmkVERIFY_OK(
++ gckOS_AcquireMutex(kernel->os, kernel->db->dbMutex, gcvINFINITE));
++
++ /* Walk the databases. */
++ for (i = 0; i < gcmCOUNTOF(kernel->db->db); ++i)
++ {
++ for (database = kernel->db->db[i];
++ database != gcvNULL;
++ database = database->next)
++ {
++ gcsDATABASE_COUNTERS * counter = &database->vidMemPool[gcvPOOL_CONTIGUOUS];
++ _CounterAdd(&contiguousCounter, counter);
++
++ counter = &database->vidMemPool[gcvPOOL_VIRTUAL];
++ _CounterAdd(&virtualCounter, counter);
++
++
++ counter = &database->nonPaged;
++ _CounterAdd(&nonPagedCounter, counter);
++ }
++ }
++
++ /* Release the database mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(kernel->os, kernel->db->dbMutex));
++
++ _CounterPrint(&contiguousCounter, "gcvPOOL_CONTIGUOUS", m);
++ _CounterPrint(&virtualCounter, "gcvPOOL_VIRTUAL", m);
++
++ seq_printf(m, "\n");
++
++ seq_printf(m, "NON PAGED MEMORY:\n");
++ seq_printf(m, " Used : %10llu B\n", nonPagedCounter.bytes);
++
++ return 0;
++}
++
++static int
++_ShowRecord(
++ IN struct seq_file *file,
++ IN gcsDATABASE_RECORD_PTR record
++ )
++{
++ seq_printf(file, "%4d%8d%16p%16p%16zu\n",
++ record->type,
++ record->kernel->core,
++ record->data,
++ record->physical,
++ record->bytes
++ );
++
++ return 0;
++}
++
++static int
++_ShowRecords(
++ IN struct seq_file *File,
++ IN gcsDATABASE_PTR Database
++ )
++{
++ gctUINT i;
++
++ seq_printf(File, "Records:\n");
++
++ seq_printf(File, "%s%8s%16s%16s%16s\n",
++ "Type", "GPU", "Data", "Physical", "Bytes");
++
++ for (i = 0; i < gcmCOUNTOF(Database->list); i++)
++ {
++ gcsDATABASE_RECORD_PTR record = Database->list[i];
++
++ while (record != NULL)
++ {
++ _ShowRecord(File, record);
++ record = record->next;
++ }
++ }
++
++ return 0;
++}
++
++void
++_ShowCounters(
++ struct seq_file *File,
++ gcsDATABASE_PTR Database
++ );
++
++static void
++_ShowProcess(
++ IN struct seq_file *File,
++ IN gcsDATABASE_PTR Database
++ )
++{
++ gctINT pid;
++ gctUINT8 name[24];
++
++ /* Process ID and name */
++ pid = Database->processID;
++ gcmkVERIFY_OK(gckOS_ZeroMemory(name, gcmSIZEOF(name)));
++ gcmkVERIFY_OK(gckOS_GetProcessNameByPid(pid, gcmSIZEOF(name), name));
++
++ seq_printf(File, "--------------------------------------------------------------------------------\n");
++ seq_printf(File, "Process: %-8d %s\n", pid, name);
++
++ /* Detailed records */
++ _ShowRecords(File, Database);
++
++ seq_printf(File, "Counters:\n");
++
++ _ShowCounters(File, Database);
++}
++
++static void
++_ShowProcesses(
++ IN struct seq_file * file,
++ IN gckKERNEL Kernel
++ )
++{
++ gcsDATABASE_PTR database;
++ gctINT i;
++
++ /* Acquire the database mutex. */
++ gcmkVERIFY_OK(
++ gckOS_AcquireMutex(Kernel->os, Kernel->db->dbMutex, gcvINFINITE));
++
++ /* Idle time since last call */
++ seq_printf(file, "GPU Idle: %llu ns\n", Kernel->db->idleTime);
++ Kernel->db->idleTime = 0;
++
++ /* Walk the databases. */
++ for (i = 0; i < gcmCOUNTOF(Kernel->db->db); ++i)
++ {
++ for (database = Kernel->db->db[i];
++ database != gcvNULL;
++ database = database->next)
++ {
++ _ShowProcess(file, database);
++ }
++ }
++
++ /* Release the database mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, Kernel->db->dbMutex));
++}
++
++static int
++gc_db_show(struct seq_file *m, void *data)
++{
++ gcsINFO_NODE *node = m->private;
++ gckGALDEVICE device = node->device;
++ gckKERNEL kernel = _GetValidKernel(device);
++ _ShowProcesses(m, kernel);
++ return 0 ;
++}
++
++static int
++gc_version_show(struct seq_file *m, void *data)
++{
++ seq_printf(m, "%s\n", gcvVERSION_STRING);
++
++ return 0 ;
++}
++
++int gc_idle_show(struct seq_file* m, void* data)
++{
++ gcsINFO_NODE *node = m->private;
++ gckGALDEVICE device = node->device;
++ gckKERNEL kernel = _GetValidKernel(device);
++ gcuDATABASE_INFO info;
++
++ gckKERNEL_QueryProcessDB(kernel, 0, gcvFALSE, gcvDB_IDLE, &info);
++
++ seq_printf(m, "GPU idle time since last query: %llu ns\n", info.time);
++
++ return 0;
++}
++
++static gcsINFO InfoList[] =
++{
++ {"info", gc_info_show},
++ {"clients", gc_clients_show},
++ {"meminfo", gc_meminfo_show},
++ {"idle", gc_idle_show},
++ {"database", gc_db_show},
++ {"version", gc_version_show},
++};
++
++static gceSTATUS
++_DebugfsInit(
++ IN gckGALDEVICE Device
++ )
++{
++ gceSTATUS status;
++
++ gckDEBUGFS_DIR dir = &Device->debugfsDir;
++
++ gcmkONERROR(gckDEBUGFS_DIR_Init(dir, gcvNULL, "gc"));
++
++ gcmkONERROR(gckDEBUGFS_DIR_CreateFiles(dir, InfoList, gcmCOUNTOF(InfoList), Device));
++
++ return gcvSTATUS_OK;
++
++OnError:
++ return status;
++}
++
++static void
++_DebugfsCleanup(
++ IN gckGALDEVICE Device
++ )
++{
++ gckDEBUGFS_DIR dir = &Device->debugfsDir;
++
++ if (Device->debugfsDir.root)
++ {
++ gcmkVERIFY_OK(gckDEBUGFS_DIR_RemoveFiles(dir, InfoList, gcmCOUNTOF(InfoList)));
++
++ gckDEBUGFS_DIR_Deinit(dir);
++ }
++}
++
++
++/******************************************************************************\
++*************************** Memory Allocation Wrappers *************************
++\******************************************************************************/
++
++static gceSTATUS
++_AllocateMemory(
++ IN gckGALDEVICE Device,
++ IN gctSIZE_T Bytes,
++ OUT gctPOINTER *Logical,
++ OUT gctPHYS_ADDR *Physical,
++ OUT gctUINT32 *PhysAddr
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Device=0x%x Bytes=%lu", Device, Bytes);
++
++ gcmkVERIFY_ARGUMENT(Device != NULL);
++ gcmkVERIFY_ARGUMENT(Logical != NULL);
++ gcmkVERIFY_ARGUMENT(Physical != NULL);
++ gcmkVERIFY_ARGUMENT(PhysAddr != NULL);
++
++ gcmkONERROR(gckOS_AllocateContiguous(
++ Device->os, gcvFALSE, &Bytes, Physical, Logical
++ ));
++
++ *PhysAddr = ((PLINUX_MDL)*Physical)->dmaHandle;
++
++ /* Success. */
++ gcmkFOOTER_ARG(
++ "*Logical=0x%x *Physical=0x%x *PhysAddr=0x%08x",
++ *Logical, *Physical, *PhysAddr
++ );
++
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++static gceSTATUS
++_FreeMemory(
++ IN gckGALDEVICE Device,
++ IN gctPOINTER Logical,
++ IN gctPHYS_ADDR Physical)
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Device=0x%x Logical=0x%x Physical=0x%x",
++ Device, Logical, Physical);
++
++ gcmkVERIFY_ARGUMENT(Device != NULL);
++
++ status = gckOS_FreeContiguous(
++ Device->os, Physical, Logical,
++ ((PLINUX_MDL) Physical)->numPages * PAGE_SIZE
++ );
++
++ gcmkFOOTER();
++ return status;
++}
++
++
++
++/******************************************************************************\
++******************************* Interrupt Handler ******************************
++\******************************************************************************/
++#if gcdMULTI_GPU
++static irqreturn_t isrRoutine3D0(int irq, void *ctxt)
++{
++ gceSTATUS status;
++ gckGALDEVICE device;
++
++ device = (gckGALDEVICE) ctxt;
++
++ /* Call kernel interrupt notification. */
++ status = gckKERNEL_Notify(device->kernels[gcvCORE_MAJOR],
++ gcvCORE_3D_0_ID,
++ gcvNOTIFY_INTERRUPT,
++ gcvTRUE);
++
++ if (gcmIS_SUCCESS(status))
++ {
++ /* Wake up the threadRoutine to process events. */
++ device->dataReady3D[gcvCORE_3D_0_ID] = gcvTRUE;
++ wake_up_interruptible(&device->intrWaitQueue3D[gcvCORE_3D_0_ID]);
++
++ return IRQ_HANDLED;
++ }
++
++ return IRQ_NONE;
++}
++
++static int threadRoutine3D0(void *ctxt)
++{
++ gckGALDEVICE device = (gckGALDEVICE) ctxt;
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_DRIVER,
++ "Starting isr Thread with extension=%p",
++ device);
++
++ for (;;)
++ {
++ /* Sleep until being awaken by the interrupt handler. */
++ wait_event_interruptible(device->intrWaitQueue3D[gcvCORE_3D_0_ID],
++ device->dataReady3D[gcvCORE_3D_0_ID] == gcvTRUE);
++ device->dataReady3D[gcvCORE_3D_0_ID] = gcvFALSE;
++
++ if (device->killThread == gcvTRUE)
++ {
++ /* The daemon exits. */
++ while (!kthread_should_stop())
++ {
++ gckOS_Delay(device->os, 1);
++ }
++
++ return 0;
++ }
++
++ gckKERNEL_Notify(device->kernels[gcvCORE_MAJOR],
++ gcvCORE_3D_0_ID,
++ gcvNOTIFY_INTERRUPT,
++ gcvFALSE);
++ }
++}
++
++#if gcdMULTI_GPU > 1
++static irqreturn_t isrRoutine3D1(int irq, void *ctxt)
++{
++ gceSTATUS status;
++ gckGALDEVICE device;
++
++ device = (gckGALDEVICE) ctxt;
++
++ /* Call kernel interrupt notification. */
++ status = gckKERNEL_Notify(device->kernels[gcvCORE_MAJOR],
++ gcvCORE_3D_1_ID,
++ gcvNOTIFY_INTERRUPT,
++ gcvTRUE);
++
++ if (gcmIS_SUCCESS(status))
++ {
++ /* Wake up the worker thread to process events. */
++ device->dataReady3D[gcvCORE_3D_1_ID] = gcvTRUE;
++ wake_up_interruptible(&device->intrWaitQueue3D[gcvCORE_3D_1_ID]);
++
++ return IRQ_HANDLED;
++ }
++
++ return IRQ_NONE;
++}
++
++static int threadRoutine3D1(void *ctxt)
++{
++ gckGALDEVICE device = (gckGALDEVICE) ctxt;
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_DRIVER,
++ "Starting isr Thread with extension=%p",
++ device);
++
++ for (;;)
++ {
++ /* Sleep until being awaken by the interrupt handler. */
++ wait_event_interruptible(device->intrWaitQueue3D[gcvCORE_3D_1_ID],
++ device->dataReady3D[gcvCORE_3D_1_ID] == gcvTRUE);
++ device->dataReady3D[gcvCORE_3D_1_ID] = gcvFALSE;
++
++ if (device->killThread == gcvTRUE)
++ {
++ /* The daemon exits. */
++ while (!kthread_should_stop())
++ {
++ gckOS_Delay(device->os, 1);
++ }
++
++ return 0;
++ }
++
++ gckKERNEL_Notify(device->kernels[gcvCORE_MAJOR],
++ gcvCORE_3D_1_ID,
++ gcvNOTIFY_INTERRUPT,
++ gcvFALSE);
++ }
++}
++#endif
++#elif gcdMULTI_GPU_AFFINITY
++static irqreturn_t isrRoutine3D0(int irq, void *ctxt)
++{
++ gceSTATUS status;
++ gckGALDEVICE device;
++
++ device = (gckGALDEVICE) ctxt;
++
++ /* Call kernel interrupt notification. */
++ status = gckKERNEL_Notify(device->kernels[gcvCORE_MAJOR], gcvNOTIFY_INTERRUPT, gcvTRUE);
++
++ if (gcmIS_SUCCESS(status))
++ {
++ up(&device->semas[gcvCORE_MAJOR]);
++
++ return IRQ_HANDLED;
++ }
++
++ return IRQ_NONE;
++}
++
++static int threadRoutine3D0(void *ctxt)
++{
++ gckGALDEVICE device = (gckGALDEVICE) ctxt;
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_DRIVER,
++ "Starting isr Thread with extension=%p",
++ device);
++
++ for (;;)
++ {
++ static int down;
++
++ down = down_interruptible(&device->semas[gcvCORE_MAJOR]);
++ if (down); /*To make gcc 4.6 happye*/
++
++ if (device->killThread == gcvTRUE)
++ {
++ /* The daemon exits. */
++ while (!kthread_should_stop())
++ {
++ gckOS_Delay(device->os, 1);
++ }
++
++ return 0;
++ }
++
++ gckKERNEL_Notify(device->kernels[gcvCORE_MAJOR],
++ gcvNOTIFY_INTERRUPT,
++ gcvFALSE);
++ }
++}
++
++static irqreturn_t isrRoutine3D1(int irq, void *ctxt)
++{
++ gceSTATUS status;
++ gckGALDEVICE device;
++
++ device = (gckGALDEVICE) ctxt;
++
++ /* Call kernel interrupt notification. */
++ status = gckKERNEL_Notify(device->kernels[gcvCORE_OCL], gcvNOTIFY_INTERRUPT, gcvTRUE);
++
++ if (gcmIS_SUCCESS(status))
++ {
++ up(&device->semas[gcvCORE_OCL]);
++
++ return IRQ_HANDLED;
++ }
++
++ return IRQ_NONE;
++}
++
++static int threadRoutine3D1(void *ctxt)
++{
++ gckGALDEVICE device = (gckGALDEVICE) ctxt;
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_DRIVER,
++ "Starting isr Thread with extension=%p",
++ device);
++
++ for (;;)
++ {
++ static int down;
++
++ down = down_interruptible(&device->semas[gcvCORE_OCL]);
++ if (down); /*To make gcc 4.6 happye*/
++
++ if (device->killThread == gcvTRUE)
++ {
++ /* The daemon exits. */
++ while (!kthread_should_stop())
++ {
++ gckOS_Delay(device->os, 1);
++ }
++
++ return 0;
++ }
++
++ gckKERNEL_Notify(device->kernels[gcvCORE_OCL],
++ gcvNOTIFY_INTERRUPT,
++ gcvFALSE);
++ }
++}
++#else
++static irqreturn_t isrRoutine(int irq, void *ctxt)
++{
++ gceSTATUS status;
++ gckGALDEVICE device;
++
++ device = (gckGALDEVICE) ctxt;
++
++ /* Call kernel interrupt notification. */
++ status = gckKERNEL_Notify(device->kernels[gcvCORE_MAJOR], gcvNOTIFY_INTERRUPT, gcvTRUE);
++
++ if (gcmIS_SUCCESS(status))
++ {
++ up(&device->semas[gcvCORE_MAJOR]);
++
++ return IRQ_HANDLED;
++ }
++
++ return IRQ_NONE;
++}
++
++static int threadRoutine(void *ctxt)
++{
++ gckGALDEVICE device = (gckGALDEVICE) ctxt;
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_DRIVER,
++ "Starting isr Thread with extension=%p",
++ device);
++
++ for (;;)
++ {
++ static int down;
++
++ down = down_interruptible(&device->semas[gcvCORE_MAJOR]);
++ if (down); /*To make gcc 4.6 happye*/
++
++ if (device->killThread == gcvTRUE)
++ {
++ /* The daemon exits. */
++ while (!kthread_should_stop())
++ {
++ gckOS_Delay(device->os, 1);
++ }
++
++ return 0;
++ }
++
++ gckKERNEL_Notify(device->kernels[gcvCORE_MAJOR],
++ gcvNOTIFY_INTERRUPT,
++ gcvFALSE);
++ }
++}
++#endif
++
++static irqreturn_t isrRoutine2D(int irq, void *ctxt)
++{
++ gceSTATUS status;
++ gckGALDEVICE device;
++
++ device = (gckGALDEVICE) ctxt;
++
++ /* Call kernel interrupt notification. */
++ status = gckKERNEL_Notify(device->kernels[gcvCORE_2D],
++#if gcdMULTI_GPU
++ 0,
++#endif
++ gcvNOTIFY_INTERRUPT,
++ gcvTRUE);
++ if (gcmIS_SUCCESS(status))
++ {
++ up(&device->semas[gcvCORE_2D]);
++
++ return IRQ_HANDLED;
++ }
++
++ return IRQ_NONE;
++}
++
++static int threadRoutine2D(void *ctxt)
++{
++ gckGALDEVICE device = (gckGALDEVICE) ctxt;
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_DRIVER,
++ "Starting isr Thread with extension=%p",
++ device);
++
++ for (;;)
++ {
++ static int down;
++
++ down = down_interruptible(&device->semas[gcvCORE_2D]);
++ if (down); /*To make gcc 4.6 happye*/
++
++ if (device->killThread == gcvTRUE)
++ {
++ /* The daemon exits. */
++ while (!kthread_should_stop())
++ {
++ gckOS_Delay(device->os, 1);
++ }
++
++ return 0;
++ }
++ gckKERNEL_Notify(device->kernels[gcvCORE_2D],
++#if gcdMULTI_GPU
++ 0,
++#endif
++ gcvNOTIFY_INTERRUPT,
++ gcvFALSE);
++ }
++}
++
++static irqreturn_t isrRoutineVG(int irq, void *ctxt)
++{
++#if gcdENABLE_VG
++ gceSTATUS status;
++ gckGALDEVICE device;
++
++ device = (gckGALDEVICE) ctxt;
++
++ /* Serve the interrupt. */
++ status = gckVGINTERRUPT_Enque(device->kernels[gcvCORE_VG]->vg->interrupt);
++
++ /* Determine the return value. */
++ return (status == gcvSTATUS_NOT_OUR_INTERRUPT)
++ ? IRQ_RETVAL(0)
++ : IRQ_RETVAL(1);
++#else
++ return IRQ_NONE;
++#endif
++}
++
++static int threadRoutineVG(void *ctxt)
++{
++ gckGALDEVICE device = (gckGALDEVICE) ctxt;
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_DRIVER,
++ "Starting isr Thread with extension=%p",
++ device);
++
++ for (;;)
++ {
++ static int down;
++
++ down = down_interruptible(&device->semas[gcvCORE_VG]);
++ if (down); /*To make gcc 4.6 happye*/
++
++ if (device->killThread == gcvTRUE)
++ {
++ /* The daemon exits. */
++ while (!kthread_should_stop())
++ {
++ gckOS_Delay(device->os, 1);
++ }
++
++ return 0;
++ }
++ gckKERNEL_Notify(device->kernels[gcvCORE_VG],
++#if gcdMULTI_GPU
++ 0,
++#endif
++ gcvNOTIFY_INTERRUPT,
++ gcvFALSE);
++ }
++}
++
++/******************************************************************************\
++******************************* gckGALDEVICE Code ******************************
++\******************************************************************************/
++
++/*******************************************************************************
++**
++** gckGALDEVICE_Construct
++**
++** Constructor.
++**
++** INPUT:
++**
++** OUTPUT:
++**
++** gckGALDEVICE * Device
++** Pointer to a variable receiving the gckGALDEVICE object pointer on
++** success.
++*/
++gceSTATUS
++gckGALDEVICE_Construct(
++#if gcdMULTI_GPU || gcdMULTI_GPU_AFFINITY
++ IN gctINT IrqLine3D0,
++ IN gctUINT32 RegisterMemBase3D0,
++ IN gctSIZE_T RegisterMemSize3D0,
++ IN gctINT IrqLine3D1,
++ IN gctUINT32 RegisterMemBase3D1,
++ IN gctSIZE_T RegisterMemSize3D1,
++#else
++ IN gctINT IrqLine,
++ IN gctUINT32 RegisterMemBase,
++ IN gctSIZE_T RegisterMemSize,
++#endif
++ IN gctINT IrqLine2D,
++ IN gctUINT32 RegisterMemBase2D,
++ IN gctSIZE_T RegisterMemSize2D,
++ IN gctINT IrqLineVG,
++ IN gctUINT32 RegisterMemBaseVG,
++ IN gctSIZE_T RegisterMemSizeVG,
++ IN gctUINT32 ContiguousBase,
++ IN gctSIZE_T ContiguousSize,
++ IN gctSIZE_T BankSize,
++ IN gctINT FastClear,
++ IN gctINT Compression,
++ IN gctUINT32 PhysBaseAddr,
++ IN gctUINT32 PhysSize,
++ IN gctINT Signal,
++ IN gctUINT LogFileSize,
++ IN gctINT PowerManagement,
++ IN gctINT GpuProfiler,
++ IN gcsDEVICE_CONSTRUCT_ARGS * Args,
++ OUT gckGALDEVICE *Device
++ )
++{
++ gctUINT32 internalBaseAddress = 0, internalAlignment = 0;
++ gctUINT32 externalBaseAddress = 0, externalAlignment = 0;
++ gctUINT32 horizontalTileSize, verticalTileSize;
++ struct resource* mem_region;
++ gctUINT32 physAddr;
++ gctUINT32 physical;
++ gckGALDEVICE device;
++ gceSTATUS status;
++ gctINT32 i;
++#if gcdMULTI_GPU
++ gctINT32 j;
++#endif
++ gceHARDWARE_TYPE type;
++ gckDB sharedDB = gcvNULL;
++ gckKERNEL kernel = gcvNULL;
++
++#if gcdMULTI_GPU || gcdMULTI_GPU_AFFINITY
++ gcmkHEADER_ARG("IrqLine3D0=%d RegisterMemBase3D0=0x%08x RegisterMemSize3D0=%u "
++ "IrqLine2D=%d RegisterMemBase2D=0x%08x RegisterMemSize2D=%u "
++ "IrqLineVG=%d RegisterMemBaseVG=0x%08x RegisterMemSizeVG=%u "
++ "ContiguousBase=0x%08x ContiguousSize=%lu BankSize=%lu "
++ "FastClear=%d Compression=%d PhysBaseAddr=0x%x PhysSize=%d Signal=%d",
++ IrqLine3D0, RegisterMemBase3D0, RegisterMemSize3D0,
++ IrqLine2D, RegisterMemBase2D, RegisterMemSize2D,
++ IrqLineVG, RegisterMemBaseVG, RegisterMemSizeVG,
++ ContiguousBase, ContiguousSize, BankSize, FastClear, Compression,
++ PhysBaseAddr, PhysSize, Signal);
++#else
++ gcmkHEADER_ARG("IrqLine=%d RegisterMemBase=0x%08x RegisterMemSize=%u "
++ "IrqLine2D=%d RegisterMemBase2D=0x%08x RegisterMemSize2D=%u "
++ "IrqLineVG=%d RegisterMemBaseVG=0x%08x RegisterMemSizeVG=%u "
++ "ContiguousBase=0x%08x ContiguousSize=%lu BankSize=%lu "
++ "FastClear=%d Compression=%d PhysBaseAddr=0x%x PhysSize=%d Signal=%d",
++ IrqLine, RegisterMemBase, RegisterMemSize,
++ IrqLine2D, RegisterMemBase2D, RegisterMemSize2D,
++ IrqLineVG, RegisterMemBaseVG, RegisterMemSizeVG,
++ ContiguousBase, ContiguousSize, BankSize, FastClear, Compression,
++ PhysBaseAddr, PhysSize, Signal);
++#endif
++
++#if gcdDISABLE_CORES_2D3D
++ IrqLine = -1;
++ IrqLine2D = -1;
++#endif
++
++ /* Allocate device structure. */
++ device = kmalloc(sizeof(struct _gckGALDEVICE), GFP_KERNEL | __GFP_NOWARN);
++
++ if (!device)
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ memset(device, 0, sizeof(struct _gckGALDEVICE));
++
++ device->dbgNode = gcvNULL;
++
++ device->platform = Args->platform;
++
++ gcmkONERROR(_DebugfsInit(device));
++
++ if (gckDEBUGFS_CreateNode(
++ device, LogFileSize, device->debugfsDir.root ,DEBUG_FILE, &(device->dbgNode)))
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): Failed to create the debug file system %s/%s \n",
++ __FUNCTION__, __LINE__,
++ PARENT_FILE, DEBUG_FILE
++ );
++ }
++ else if (LogFileSize)
++ {
++ gckDEBUGFS_SetCurrentNode(device->dbgNode);
++ }
++
++#if gcdMULTI_GPU
++ if (IrqLine3D0 != -1)
++ {
++ device->requestedRegisterMemBase3D[gcvCORE_3D_0_ID] = RegisterMemBase3D0;
++ device->requestedRegisterMemSize3D[gcvCORE_3D_0_ID] = RegisterMemSize3D0;
++ }
++
++ if (IrqLine3D1 != -1)
++ {
++ device->requestedRegisterMemBase3D[gcvCORE_3D_1_ID] = RegisterMemBase3D1;
++ device->requestedRegisterMemSize3D[gcvCORE_3D_1_ID] = RegisterMemSize3D1;
++ }
++#elif gcdMULTI_GPU_AFFINITY
++ if (IrqLine3D0 != -1)
++ {
++ device->requestedRegisterMemBases[gcvCORE_MAJOR] = RegisterMemBase3D0;
++ device->requestedRegisterMemSizes[gcvCORE_MAJOR] = RegisterMemSize3D0;
++ }
++
++ if (IrqLine3D1 != -1)
++ {
++ device->requestedRegisterMemBases[gcvCORE_OCL] = RegisterMemBase3D1;
++ device->requestedRegisterMemSizes[gcvCORE_OCL] = RegisterMemSize3D1;
++ }
++#else
++ if (IrqLine != -1)
++ {
++ device->requestedRegisterMemBases[gcvCORE_MAJOR] = RegisterMemBase;
++ device->requestedRegisterMemSizes[gcvCORE_MAJOR] = RegisterMemSize;
++ }
++#endif
++
++ if (IrqLine2D != -1)
++ {
++ device->requestedRegisterMemBases[gcvCORE_2D] = RegisterMemBase2D;
++ device->requestedRegisterMemSizes[gcvCORE_2D] = RegisterMemSize2D;
++ }
++
++ if (IrqLineVG != -1)
++ {
++ device->requestedRegisterMemBases[gcvCORE_VG] = RegisterMemBaseVG;
++ device->requestedRegisterMemSizes[gcvCORE_VG] = RegisterMemSizeVG;
++ }
++
++ device->requestedContiguousBase = 0;
++ device->requestedContiguousSize = 0;
++
++ for (i = 0; i < gcdMAX_GPU_COUNT; i++)
++ {
++#if gcdMULTI_GPU
++ if (i == gcvCORE_MAJOR)
++ {
++ for (j = 0; j < gcdMULTI_GPU; j++)
++ {
++ physical = device->requestedRegisterMemBase3D[j];
++
++ /* Set up register memory region. */
++ if (physical != 0)
++ {
++ mem_region = request_mem_region(physical,
++ device->requestedRegisterMemSize3D[j],
++ "galcore register region");
++
++ if (mem_region == gcvNULL)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): Failed to claim %lu bytes @ 0x%08X\n",
++ __FUNCTION__, __LINE__,
++ physical, device->requestedRegisterMemSize3D[j]
++ );
++
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++
++ device->registerBase3D[j] = (gctPOINTER) ioremap_nocache(
++ physical, device->requestedRegisterMemSize3D[j]);
++
++ if (device->registerBase3D[j] == gcvNULL)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): Unable to map %ld bytes @ 0x%08X\n",
++ __FUNCTION__, __LINE__,
++ physical, device->requestedRegisterMemSize3D[j]
++ );
++
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++
++ physical += device->requestedRegisterMemSize3D[j];
++ }
++ else
++ {
++ device->registerBase3D[j] = gcvNULL;
++ }
++ }
++ }
++ else
++#endif
++ {
++ physical = device->requestedRegisterMemBases[i];
++
++ /* Set up register memory region. */
++ if (physical != 0)
++ {
++ mem_region = request_mem_region(physical,
++ device->requestedRegisterMemSizes[i],
++ "galcore register region");
++
++ if (mem_region == gcvNULL)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): Failed to claim %lu bytes @ 0x%08X\n",
++ __FUNCTION__, __LINE__,
++ physical, device->requestedRegisterMemSizes[i]
++ );
++
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++
++ device->registerBases[i] = (gctPOINTER) ioremap_nocache(
++ physical, device->requestedRegisterMemSizes[i]);
++
++ if (device->registerBases[i] == gcvNULL)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): Unable to map %ld bytes @ 0x%08X\n",
++ __FUNCTION__, __LINE__,
++ physical, device->requestedRegisterMemSizes[i]
++ );
++
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++
++ physical += device->requestedRegisterMemSizes[i];
++ }
++ }
++ }
++
++ /* Set the base address */
++ device->baseAddress = device->physBase = PhysBaseAddr;
++ device->physSize = PhysSize;
++ device->mmu = Args->mmu;
++
++ /* Construct the gckOS object. */
++ gcmkONERROR(gckOS_Construct(device, &device->os));
++
++#if gcdMULTI_GPU || gcdMULTI_GPU_AFFINITY
++ if (IrqLine3D0 != -1)
++#else
++ if (IrqLine != -1)
++#endif
++ {
++ /* Construct the gckKERNEL object. */
++ gcmkONERROR(gckKERNEL_Construct(
++ device->os, gcvCORE_MAJOR, device,
++ gcvNULL, &device->kernels[gcvCORE_MAJOR]));
++
++ sharedDB = device->kernels[gcvCORE_MAJOR]->db;
++
++ /* Initialize core mapping */
++ for (i = 0; i < 8; i++)
++ {
++ device->coreMapping[i] = gcvCORE_MAJOR;
++ }
++
++ /* Setup the ISR manager. */
++ gcmkONERROR(gckHARDWARE_SetIsrManager(
++ device->kernels[gcvCORE_MAJOR]->hardware,
++ (gctISRMANAGERFUNC) gckGALDEVICE_Setup_ISR,
++ (gctISRMANAGERFUNC) gckGALDEVICE_Release_ISR,
++ device
++ ));
++
++ gcmkONERROR(gckHARDWARE_SetFastClear(
++ device->kernels[gcvCORE_MAJOR]->hardware, FastClear, Compression
++ ));
++
++ if(PowerManagement != -1)
++ {
++ gcmkONERROR(gckHARDWARE_SetPowerManagementLock(
++ device->kernels[gcvCORE_MAJOR]->hardware, gcvFALSE
++ ));
++ gcmkONERROR(gckHARDWARE_SetPowerManagement(
++ device->kernels[gcvCORE_MAJOR]->hardware, PowerManagement
++ ));
++ gcmkONERROR(gckHARDWARE_SetPowerManagementLock(
++ device->kernels[gcvCORE_MAJOR]->hardware, gcvTRUE
++ ));
++ }
++ else
++ {
++ gcmkONERROR(gckHARDWARE_SetPowerManagementLock(
++ device->kernels[gcvCORE_MAJOR]->hardware, gcvFALSE
++ ));
++ gcmkONERROR(gckHARDWARE_SetPowerManagement(
++ device->kernels[gcvCORE_MAJOR]->hardware, gcvTRUE
++ ));
++ }
++
++#if gcdENABLE_FSCALE_VAL_ADJUST
++ gcmkONERROR(gckHARDWARE_SetMinFscaleValue(
++ device->kernels[gcvCORE_MAJOR]->hardware, Args->gpu3DMinClock
++ ));
++#endif
++
++ gcmkONERROR(gckHARDWARE_SetGpuProfiler(
++ device->kernels[gcvCORE_MAJOR]->hardware, GpuProfiler
++ ));
++
++ gcmkVERIFY_OK(gckKERNEL_SetRecovery(
++ device->kernels[gcvCORE_MAJOR], Args->recovery, Args->stuckDump
++ ));
++
++#if COMMAND_PROCESSOR_VERSION == 1
++ /* Start the command queue. */
++ gcmkONERROR(gckCOMMAND_Start(device->kernels[gcvCORE_MAJOR]->command));
++#endif
++ }
++ else
++ {
++ device->kernels[gcvCORE_MAJOR] = gcvNULL;
++ }
++
++#if gcdMULTI_GPU_AFFINITY
++ if (IrqLine3D1 != -1)
++ {
++ /* Construct the gckKERNEL object. */
++ gcmkONERROR(gckKERNEL_Construct(
++ device->os, gcvCORE_OCL, device,
++ gcvNULL, &device->kernels[gcvCORE_OCL]));
++
++ if (sharedDB == gcvNULL) sharedDB = device->kernels[gcvCORE_OCL]->db;
++
++ /* Initialize core mapping */
++ if (device->kernels[gcvCORE_MAJOR] == gcvNULL)
++ {
++ for (i = 0; i < 8; i++)
++ {
++ device->coreMapping[i] = gcvCORE_OCL;
++ }
++ }
++ else
++ {
++ device->coreMapping[gcvHARDWARE_OCL] = gcvCORE_OCL;
++ }
++
++ /* Setup the ISR manager. */
++ gcmkONERROR(gckHARDWARE_SetIsrManager(
++ device->kernels[gcvCORE_OCL]->hardware,
++ (gctISRMANAGERFUNC) gckGALDEVICE_Setup_ISR,
++ (gctISRMANAGERFUNC) gckGALDEVICE_Release_ISR,
++ device
++ ));
++
++ gcmkONERROR(gckHARDWARE_SetFastClear(
++ device->kernels[gcvCORE_OCL]->hardware, FastClear, Compression
++ ));
++
++#if gcdENABLE_FSCALE_VAL_ADJUST
++ gcmkONERROR(gckHARDWARE_SetMinFscaleValue(
++ device->kernels[gcvCORE_OCL]->hardware, Args->gpu3DMinClock
++ ));
++#endif
++ if(PowerManagement != -1)
++ {
++ gcmkONERROR(gckHARDWARE_SetPowerManagementLock(
++ device->kernels[gcvCORE_OCL]->hardware, gcvFALSE
++ ));
++ gcmkONERROR(gckHARDWARE_SetPowerManagement(
++ device->kernels[gcvCORE_OCL]->hardware, PowerManagement
++ ));
++ gcmkONERROR(gckHARDWARE_SetPowerManagementLock(
++ device->kernels[gcvCORE_OCL]->hardware, gcvTRUE
++ ));
++ }
++ else
++ {
++ gcmkONERROR(gckHARDWARE_SetPowerManagementLock(
++ device->kernels[gcvCORE_OCL]->hardware, gcvFALSE
++ ));
++ gcmkONERROR(gckHARDWARE_SetPowerManagement(
++ device->kernels[gcvCORE_OCL]->hardware, gcvTRUE
++ ));
++ }
++
++#if COMMAND_PROCESSOR_VERSION == 1
++ /* Start the command queue. */
++ gcmkONERROR(gckCOMMAND_Start(device->kernels[gcvCORE_OCL]->command));
++#endif
++ }
++ else
++ {
++ device->kernels[gcvCORE_OCL] = gcvNULL;
++ }
++#endif
++
++ if (IrqLine2D != -1)
++ {
++ gcmkONERROR(gckKERNEL_Construct(
++ device->os, gcvCORE_2D, device,
++ sharedDB, &device->kernels[gcvCORE_2D]));
++
++ if (sharedDB == gcvNULL) sharedDB = device->kernels[gcvCORE_2D]->db;
++
++ /* Verify the hardware type */
++ gcmkONERROR(gckHARDWARE_GetType(device->kernels[gcvCORE_2D]->hardware, &type));
++
++ if (type != gcvHARDWARE_2D)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): Unexpected hardware type: %d\n",
++ __FUNCTION__, __LINE__,
++ type
++ );
++
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ /* Initialize core mapping */
++ if (device->kernels[gcvCORE_MAJOR] == gcvNULL
++#if gcdMULTI_GPU_AFFINITY
++ && device->kernels[gcvCORE_OCL] == gcvNULL
++#endif
++ )
++ {
++ for (i = 0; i < 8; i++)
++ {
++ device->coreMapping[i] = gcvCORE_2D;
++ }
++ }
++ else
++ {
++ device->coreMapping[gcvHARDWARE_2D] = gcvCORE_2D;
++ }
++
++ /* Setup the ISR manager. */
++ gcmkONERROR(gckHARDWARE_SetIsrManager(
++ device->kernels[gcvCORE_2D]->hardware,
++ (gctISRMANAGERFUNC) gckGALDEVICE_Setup_ISR_2D,
++ (gctISRMANAGERFUNC) gckGALDEVICE_Release_ISR_2D,
++ device
++ ));
++
++ if(PowerManagement != -1)
++ {
++ gcmkONERROR(gckHARDWARE_SetPowerManagementLock(
++ device->kernels[gcvCORE_2D]->hardware, gcvFALSE
++ ));
++ gcmkONERROR(gckHARDWARE_SetPowerManagement(
++ device->kernels[gcvCORE_2D]->hardware, PowerManagement
++ ));
++ gcmkONERROR(gckHARDWARE_SetPowerManagementLock(
++ device->kernels[gcvCORE_2D]->hardware, gcvTRUE
++ ));
++ }
++ else
++ {
++ gcmkONERROR(gckHARDWARE_SetPowerManagementLock(
++ device->kernels[gcvCORE_2D]->hardware, gcvFALSE
++ ));
++ gcmkONERROR(gckHARDWARE_SetPowerManagement(
++ device->kernels[gcvCORE_2D]->hardware, gcvTRUE
++ ));
++ }
++
++#if gcdENABLE_FSCALE_VAL_ADJUST
++ gcmkONERROR(gckHARDWARE_SetMinFscaleValue(
++ device->kernels[gcvCORE_2D]->hardware, 1
++ ));
++#endif
++
++ gcmkVERIFY_OK(gckKERNEL_SetRecovery(
++ device->kernels[gcvCORE_2D], Args->recovery, Args->stuckDump
++ ));
++
++#if COMMAND_PROCESSOR_VERSION == 1
++ /* Start the command queue. */
++ gcmkONERROR(gckCOMMAND_Start(device->kernels[gcvCORE_2D]->command));
++#endif
++ }
++ else
++ {
++ device->kernels[gcvCORE_2D] = gcvNULL;
++ }
++
++ if (IrqLineVG != -1)
++ {
++#if gcdENABLE_VG
++ gcmkONERROR(gckKERNEL_Construct(
++ device->os, gcvCORE_VG, device,
++ sharedDB, &device->kernels[gcvCORE_VG]));
++ /* Initialize core mapping */
++ if (device->kernels[gcvCORE_MAJOR] == gcvNULL
++ && device->kernels[gcvCORE_2D] == gcvNULL
++#if gcdMULTI_GPU_AFFINITY
++ && device->kernels[gcvCORE_OCL] == gcvNULL
++#endif
++ )
++ {
++ for (i = 0; i < 8; i++)
++ {
++ device->coreMapping[i] = gcvCORE_VG;
++ }
++ }
++ else
++ {
++ device->coreMapping[gcvHARDWARE_VG] = gcvCORE_VG;
++ }
++
++ if(PowerManagement != -1)
++ {
++ gcmkONERROR(gckVGHARDWARE_SetPowerManagement(
++ device->kernels[gcvCORE_VG]->vg->hardware,
++ PowerManagement
++ ));
++ }
++ else
++ {
++ gcmkONERROR(gckVGHARDWARE_SetPowerManagement(
++ device->kernels[gcvCORE_VG]->vg->hardware,
++ gcvTRUE
++ ));
++ }
++
++
++#endif
++ }
++ else
++ {
++ device->kernels[gcvCORE_VG] = gcvNULL;
++ }
++
++ /* Initialize the ISR. */
++#if gcdMULTI_GPU
++ device->irqLine3D[gcvCORE_3D_0_ID] = IrqLine3D0;
++#if gcdMULTI_GPU > 1
++ device->irqLine3D[gcvCORE_3D_1_ID] = IrqLine3D1;
++#endif
++#elif gcdMULTI_GPU_AFFINITY
++ device->irqLines[gcvCORE_MAJOR] = IrqLine3D0;
++ device->irqLines[gcvCORE_OCL] = IrqLine3D1;
++#else
++ device->irqLines[gcvCORE_MAJOR] = IrqLine;
++#endif
++ device->irqLines[gcvCORE_2D] = IrqLine2D;
++ device->irqLines[gcvCORE_VG] = IrqLineVG;
++
++ /* Initialize the kernel thread semaphores. */
++ for (i = 0; i < gcdMAX_GPU_COUNT; i++)
++ {
++#if gcdMULTI_GPU
++ if (i == gcvCORE_MAJOR)
++ {
++ for (j = 0; j < gcdMULTI_GPU; j++)
++ {
++ if (device->irqLine3D[j] != -1) init_waitqueue_head(&device->intrWaitQueue3D[j]);
++ }
++ }
++ else
++#endif
++ {
++ if (device->irqLines[i] != -1) sema_init(&device->semas[i], 0);
++ }
++ }
++
++ device->signal = Signal;
++
++ for (i = 0; i < gcdMAX_GPU_COUNT; i++)
++ {
++ if (device->kernels[i] != gcvNULL) break;
++ }
++
++ if (i == gcdMAX_GPU_COUNT)
++ {
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++#if gcdENABLE_VG
++ if (i == gcvCORE_VG)
++ {
++ /* Query the ceiling of the system memory. */
++ gcmkONERROR(gckVGHARDWARE_QuerySystemMemory(
++ device->kernels[i]->vg->hardware,
++ &device->systemMemorySize,
++ &device->systemMemoryBaseAddress
++ ));
++ /* query the amount of video memory */
++ gcmkONERROR(gckVGHARDWARE_QueryMemory(
++ device->kernels[i]->vg->hardware,
++ &device->internalSize, &internalBaseAddress, &internalAlignment,
++ &device->externalSize, &externalBaseAddress, &externalAlignment,
++ &horizontalTileSize, &verticalTileSize
++ ));
++ }
++ else
++#endif
++ {
++ /* Query the ceiling of the system memory. */
++ gcmkONERROR(gckHARDWARE_QuerySystemMemory(
++ device->kernels[i]->hardware,
++ &device->systemMemorySize,
++ &device->systemMemoryBaseAddress
++ ));
++
++ /* query the amount of video memory */
++ gcmkONERROR(gckHARDWARE_QueryMemory(
++ device->kernels[i]->hardware,
++ &device->internalSize, &internalBaseAddress, &internalAlignment,
++ &device->externalSize, &externalBaseAddress, &externalAlignment,
++ &horizontalTileSize, &verticalTileSize
++ ));
++ }
++
++
++ /* Grab the first availiable kernel */
++ for (i = 0; i < gcdMAX_GPU_COUNT; i++)
++ {
++#if gcdMULTI_GPU
++ if (i == gcvCORE_MAJOR)
++ {
++ for (j = 0; j < gcdMULTI_GPU; j++)
++ {
++ if (device->irqLine3D[j] != -1)
++ {
++ kernel = device->kernels[i];
++ break;
++ }
++ }
++ }
++ else
++#endif
++ {
++ if (device->irqLines[i] != -1)
++ {
++ kernel = device->kernels[i];
++ break;
++ }
++ }
++ }
++
++ /* Set up the internal memory region. */
++ if (device->internalSize > 0)
++ {
++ status = gckVIDMEM_Construct(
++ device->os,
++ internalBaseAddress, device->internalSize, internalAlignment,
++ 0, &device->internalVidMem
++ );
++
++ if (gcmIS_ERROR(status))
++ {
++ /* Error, disable internal heap. */
++ device->internalSize = 0;
++ }
++ else
++ {
++ /* Map internal memory. */
++ device->internalLogical
++ = (gctPOINTER) ioremap_nocache(physical, device->internalSize);
++
++ if (device->internalLogical == gcvNULL)
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++
++ device->internalPhysical = (gctPHYS_ADDR)(gctUINTPTR_T) physical;
++ device->internalPhysicalName = gcmPTR_TO_NAME(device->internalPhysical);
++ physical += device->internalSize;
++ }
++ }
++
++ if (device->externalSize > 0)
++ {
++ /* create the external memory heap */
++ status = gckVIDMEM_Construct(
++ device->os,
++ externalBaseAddress, device->externalSize, externalAlignment,
++ 0, &device->externalVidMem
++ );
++
++ if (gcmIS_ERROR(status))
++ {
++ /* Error, disable internal heap. */
++ device->externalSize = 0;
++ }
++ else
++ {
++ /* Map external memory. */
++ device->externalLogical
++ = (gctPOINTER) ioremap_nocache(physical, device->externalSize);
++
++ if (device->externalLogical == gcvNULL)
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++
++ device->externalPhysical = (gctPHYS_ADDR)(gctUINTPTR_T) physical;
++ device->externalPhysicalName = gcmPTR_TO_NAME(device->externalPhysical);
++ physical += device->externalSize;
++ }
++ }
++
++ /* set up the contiguous memory */
++ device->contiguousSize = ContiguousSize;
++
++ if (ContiguousSize > 0)
++ {
++ if (ContiguousBase == 0)
++ {
++ while (device->contiguousSize > 0)
++ {
++ /* Allocate contiguous memory. */
++ status = _AllocateMemory(
++ device,
++ device->contiguousSize,
++ &device->contiguousBase,
++ &device->contiguousPhysical,
++ &physAddr
++ );
++
++ if (gcmIS_SUCCESS(status))
++ {
++ device->contiguousPhysicalName = gcmPTR_TO_NAME(device->contiguousPhysical);
++ status = gckVIDMEM_Construct(
++ device->os,
++ physAddr | device->systemMemoryBaseAddress,
++ device->contiguousSize,
++ 64,
++ BankSize,
++ &device->contiguousVidMem
++ );
++
++ if (gcmIS_SUCCESS(status))
++ {
++ break;
++ }
++
++ gcmkONERROR(_FreeMemory(
++ device,
++ device->contiguousBase,
++ device->contiguousPhysical
++ ));
++
++ gcmRELEASE_NAME(device->contiguousPhysicalName);
++ device->contiguousBase = gcvNULL;
++ device->contiguousPhysical = gcvNULL;
++ }
++
++ if (device->contiguousSize <= (4 << 20))
++ {
++ device->contiguousSize = 0;
++ }
++ else
++ {
++ device->contiguousSize -= (4 << 20);
++ }
++ }
++ }
++ else
++ {
++ /* Create the contiguous memory heap. */
++ status = gckVIDMEM_Construct(
++ device->os,
++ ContiguousBase | device->systemMemoryBaseAddress,
++ ContiguousSize,
++ 64, BankSize,
++ &device->contiguousVidMem
++ );
++
++ if (gcmIS_ERROR(status))
++ {
++ /* Error, disable contiguous memory pool. */
++ device->contiguousVidMem = gcvNULL;
++ device->contiguousSize = 0;
++ }
++ else
++ {
++ if (Args->contiguousRequested == gcvFALSE)
++ {
++ mem_region = request_mem_region(
++ ContiguousBase, ContiguousSize, "galcore managed memory"
++ );
++
++ if (mem_region == gcvNULL)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): Failed to claim %ld bytes @ 0x%08X\n",
++ __FUNCTION__, __LINE__,
++ ContiguousSize, ContiguousBase
++ );
++
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++ }
++
++ device->requestedContiguousBase = ContiguousBase;
++ device->requestedContiguousSize = ContiguousSize;
++ device->contiguousRequested = Args->contiguousRequested;
++
++ device->contiguousPhysical = gcvNULL;
++ device->contiguousPhysicalName = 0;
++ device->contiguousSize = ContiguousSize;
++ device->contiguousMapped = gcvTRUE;
++ }
++ }
++ }
++
++ /* Return pointer to the device. */
++ *Device = device;
++
++ gcmkFOOTER_ARG("*Device=0x%x", * Device);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Roll back. */
++ gcmkVERIFY_OK(gckGALDEVICE_Destroy(device));
++
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckGALDEVICE_Destroy
++**
++** Class destructor.
++**
++** INPUT:
++**
++** Nothing.
++**
++** OUTPUT:
++**
++** Nothing.
++**
++** RETURNS:
++**
++** Nothing.
++*/
++gceSTATUS
++gckGALDEVICE_Destroy(
++ gckGALDEVICE Device)
++{
++ gctINT i;
++#if gcdMULTI_GPU
++ gctINT j;
++#endif
++ gckKERNEL kernel = gcvNULL;
++
++ gcmkHEADER_ARG("Device=0x%x", Device);
++
++ if (Device != gcvNULL)
++ {
++ /* Grab the first availiable kernel */
++ for (i = 0; i < gcdMAX_GPU_COUNT; i++)
++ {
++#if gcdMULTI_GPU
++ if (i == gcvCORE_MAJOR)
++ {
++ for (j = 0; j < gcdMULTI_GPU; j++)
++ {
++ if (Device->irqLine3D[j] != -1)
++ {
++ kernel = Device->kernels[i];
++ break;
++ }
++ }
++ }
++ else
++#endif
++ {
++ if (Device->irqLines[i] != -1)
++ {
++ kernel = Device->kernels[i];
++ break;
++ }
++ }
++ }
++
++ if (Device->internalPhysicalName != 0)
++ {
++ gcmRELEASE_NAME(Device->internalPhysicalName);
++ Device->internalPhysicalName = 0;
++ }
++ if (Device->externalPhysicalName != 0)
++ {
++ gcmRELEASE_NAME(Device->externalPhysicalName);
++ Device->externalPhysicalName = 0;
++ }
++ if (Device->contiguousPhysicalName != 0)
++ {
++ gcmRELEASE_NAME(Device->contiguousPhysicalName);
++ Device->contiguousPhysicalName = 0;
++ }
++
++
++ for (i = 0; i < gcdMAX_GPU_COUNT; i++)
++ {
++ if (Device->kernels[i] != gcvNULL)
++ {
++ /* Destroy the gckKERNEL object. */
++ gcmkVERIFY_OK(gckKERNEL_Destroy(Device->kernels[i]));
++ Device->kernels[i] = gcvNULL;
++ }
++ }
++
++ if (Device->internalLogical != gcvNULL)
++ {
++ /* Unmap the internal memory. */
++ iounmap(Device->internalLogical);
++ Device->internalLogical = gcvNULL;
++ }
++
++ if (Device->internalVidMem != gcvNULL)
++ {
++ /* Destroy the internal heap. */
++ gcmkVERIFY_OK(gckVIDMEM_Destroy(Device->internalVidMem));
++ Device->internalVidMem = gcvNULL;
++ }
++
++ if (Device->externalLogical != gcvNULL)
++ {
++ /* Unmap the external memory. */
++ iounmap(Device->externalLogical);
++ Device->externalLogical = gcvNULL;
++ }
++
++ if (Device->externalVidMem != gcvNULL)
++ {
++ /* destroy the external heap */
++ gcmkVERIFY_OK(gckVIDMEM_Destroy(Device->externalVidMem));
++ Device->externalVidMem = gcvNULL;
++ }
++
++ if (Device->contiguousBase != gcvNULL)
++ {
++ if (Device->contiguousMapped == gcvFALSE)
++ {
++ gcmkVERIFY_OK(_FreeMemory(
++ Device,
++ Device->contiguousBase,
++ Device->contiguousPhysical
++ ));
++ }
++
++ Device->contiguousBase = gcvNULL;
++ Device->contiguousPhysical = gcvNULL;
++ }
++
++ if (Device->requestedContiguousBase != 0
++ && Device->contiguousRequested == gcvFALSE
++ )
++ {
++ release_mem_region(Device->requestedContiguousBase, Device->requestedContiguousSize);
++ Device->requestedContiguousBase = 0;
++ Device->requestedContiguousSize = 0;
++ }
++
++ if (Device->contiguousVidMem != gcvNULL)
++ {
++ /* Destroy the contiguous heap. */
++ gcmkVERIFY_OK(gckVIDMEM_Destroy(Device->contiguousVidMem));
++ Device->contiguousVidMem = gcvNULL;
++ }
++
++ if (Device->dbgNode)
++ {
++ gckDEBUGFS_FreeNode(Device->dbgNode);
++
++ if(Device->dbgNode != gcvNULL)
++ {
++ kfree(Device->dbgNode);
++ Device->dbgNode = gcvNULL;
++ }
++ }
++
++ for (i = 0; i < gcdMAX_GPU_COUNT; i++)
++ {
++#if gcdMULTI_GPU
++ if (i == gcvCORE_MAJOR)
++ {
++ for (j = 0; j < gcdMULTI_GPU; j++)
++ {
++ if (Device->registerBase3D[j] != gcvNULL)
++ {
++ /* Unmap register memory. */
++ iounmap(Device->registerBase3D[j]);
++ if (Device->requestedRegisterMemBase3D[j] != 0)
++ {
++ release_mem_region(Device->requestedRegisterMemBase3D[j],
++ Device->requestedRegisterMemSize3D[j]);
++ }
++
++ Device->registerBase3D[j] = gcvNULL;
++ Device->requestedRegisterMemBase3D[j] = 0;
++ Device->requestedRegisterMemSize3D[j] = 0;
++ }
++ }
++ }
++ else
++#endif
++ {
++ if (Device->registerBases[i] != gcvNULL)
++ {
++ /* Unmap register memory. */
++ iounmap(Device->registerBases[i]);
++ if (Device->requestedRegisterMemBases[i] != 0)
++ {
++ release_mem_region(Device->requestedRegisterMemBases[i],
++ Device->requestedRegisterMemSizes[i]);
++ }
++
++ Device->registerBases[i] = gcvNULL;
++ Device->requestedRegisterMemBases[i] = 0;
++ Device->requestedRegisterMemSizes[i] = 0;
++ }
++ }
++ }
++
++ /* Destroy the gckOS object. */
++ if (Device->os != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_Destroy(Device->os));
++ Device->os = gcvNULL;
++ }
++
++ _DebugfsCleanup(Device);
++
++ /* Free the device. */
++ kfree(Device);
++ }
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckGALDEVICE_Setup_ISR
++**
++** Start the ISR routine.
++**
++** INPUT:
++**
++** gckGALDEVICE Device
++** Pointer to an gckGALDEVICE object.
++**
++** OUTPUT:
++**
++** Nothing.
++**
++** RETURNS:
++**
++** gcvSTATUS_OK
++** Setup successfully.
++** gcvSTATUS_GENERIC_IO
++** Setup failed.
++*/
++gceSTATUS
++gckGALDEVICE_Setup_ISR(
++ IN gckGALDEVICE Device
++ )
++{
++ gceSTATUS status;
++ gctINT ret = 0;
++
++ gcmkHEADER_ARG("Device=0x%x", Device);
++
++ gcmkVERIFY_ARGUMENT(Device != NULL);
++
++ if (Device->irqLines[gcvCORE_MAJOR] < 0)
++ {
++ gcmkONERROR(gcvSTATUS_GENERIC_IO);
++ }
++
++ /* Hook up the isr based on the irq line. */
++#ifdef FLAREON
++ gc500_handle.dev_name = "galcore interrupt service";
++ gc500_handle.dev_id = Device;
++ gc500_handle.handler = isrRoutine;
++ gc500_handle.intr_gen = GPIO_INTR_LEVEL_TRIGGER;
++ gc500_handle.intr_trig = GPIO_TRIG_HIGH_LEVEL;
++
++ ret = dove_gpio_request(
++ DOVE_GPIO0_7, &gc500_handle
++ );
++#else
++#if gcdMULTI_GPU
++ ret = request_irq(
++ Device->irqLine3D[gcvCORE_3D_0_ID], isrRoutine3D0, IRQF_DISABLED,
++ "galcore_3d_0", Device
++ );
++
++ if (ret != 0)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): Could not register irq line %d (error=%d)\n",
++ __FUNCTION__, __LINE__,
++ Device->irqLine3D[gcvCORE_3D_0_ID], ret
++ );
++
++ gcmkONERROR(gcvSTATUS_GENERIC_IO);
++ }
++
++ /* Mark ISR as initialized. */
++ Device->isrInitialized3D[gcvCORE_3D_0_ID] = gcvTRUE;
++
++#if gcdMULTI_GPU > 1
++ ret = request_irq(
++ Device->irqLine3D[gcvCORE_3D_1_ID], isrRoutine3D1, IRQF_DISABLED,
++ "galcore_3d_1", Device
++ );
++
++ if (ret != 0)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): Could not register irq line %d (error=%d)\n",
++ __FUNCTION__, __LINE__,
++ Device->irqLine3D[gcvCORE_3D_1_ID], ret
++ );
++
++ gcmkONERROR(gcvSTATUS_GENERIC_IO);
++ }
++
++ /* Mark ISR as initialized. */
++ Device->isrInitialized3D[gcvCORE_3D_1_ID] = gcvTRUE;
++#endif
++#elif gcdMULTI_GPU_AFFINITY
++ ret = request_irq(
++ Device->irqLines[gcvCORE_MAJOR], isrRoutine3D0, IRQF_DISABLED,
++ "galcore_3d_0", Device
++ );
++
++ if (ret != 0)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): Could not register irq line %d (error=%d)\n",
++ __FUNCTION__, __LINE__,
++ Device->irqLines[gcvCORE_MAJOR], ret
++ );
++
++ gcmkONERROR(gcvSTATUS_GENERIC_IO);
++ }
++
++ /* Mark ISR as initialized. */
++ Device->isrInitializeds[gcvCORE_MAJOR] = gcvTRUE;
++
++ ret = request_irq(
++ Device->irqLines[gcvCORE_OCL], isrRoutine3D1, IRQF_DISABLED,
++ "galcore_3d_1", Device
++ );
++
++ if (ret != 0)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): Could not register irq line %d (error=%d)\n",
++ __FUNCTION__, __LINE__,
++ Device->irqLines[gcvCORE_OCL], ret
++ );
++
++ gcmkONERROR(gcvSTATUS_GENERIC_IO);
++ }
++
++ /* Mark ISR as initialized. */
++ Device->isrInitializeds[gcvCORE_OCL] = gcvTRUE;
++#else
++ ret = request_irq(
++ Device->irqLines[gcvCORE_MAJOR], isrRoutine, IRQF_DISABLED,
++ "galcore interrupt service", Device
++ );
++
++ if (ret != 0)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): Could not register irq line %d (error=%d)\n",
++ __FUNCTION__, __LINE__,
++ Device->irqLines[gcvCORE_MAJOR], ret
++ );
++
++ gcmkONERROR(gcvSTATUS_GENERIC_IO);
++ }
++
++ /* Mark ISR as initialized. */
++ Device->isrInitializeds[gcvCORE_MAJOR] = gcvTRUE;
++#endif
++#endif
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckGALDEVICE_Setup_ISR_2D(
++ IN gckGALDEVICE Device
++ )
++{
++ gceSTATUS status;
++ gctINT ret;
++
++ gcmkHEADER_ARG("Device=0x%x", Device);
++
++ gcmkVERIFY_ARGUMENT(Device != NULL);
++
++ if (Device->irqLines[gcvCORE_2D] < 0)
++ {
++ gcmkONERROR(gcvSTATUS_GENERIC_IO);
++ }
++
++ /* Hook up the isr based on the irq line. */
++#ifdef FLAREON
++ gc500_handle.dev_name = "galcore interrupt service";
++ gc500_handle.dev_id = Device;
++ gc500_handle.handler = isrRoutine2D;
++ gc500_handle.intr_gen = GPIO_INTR_LEVEL_TRIGGER;
++ gc500_handle.intr_trig = GPIO_TRIG_HIGH_LEVEL;
++
++ ret = dove_gpio_request(
++ DOVE_GPIO0_7, &gc500_handle
++ );
++#else
++ ret = request_irq(
++ Device->irqLines[gcvCORE_2D], isrRoutine2D, IRQF_DISABLED,
++ "galcore interrupt service for 2D", Device
++ );
++#endif
++
++ if (ret != 0)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): Could not register irq line %d (error=%d)\n",
++ __FUNCTION__, __LINE__,
++ Device->irqLines[gcvCORE_2D], ret
++ );
++
++ gcmkONERROR(gcvSTATUS_GENERIC_IO);
++ }
++
++ /* Mark ISR as initialized. */
++ Device->isrInitializeds[gcvCORE_2D] = gcvTRUE;
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckGALDEVICE_Setup_ISR_VG(
++ IN gckGALDEVICE Device
++ )
++{
++ gceSTATUS status;
++ gctINT ret;
++
++ gcmkHEADER_ARG("Device=0x%x", Device);
++
++ gcmkVERIFY_ARGUMENT(Device != NULL);
++
++ if (Device->irqLines[gcvCORE_VG] < 0)
++ {
++ gcmkONERROR(gcvSTATUS_GENERIC_IO);
++ }
++
++ /* Hook up the isr based on the irq line. */
++#ifdef FLAREON
++ gc500_handle.dev_name = "galcore interrupt service";
++ gc500_handle.dev_id = Device;
++ gc500_handle.handler = isrRoutineVG;
++ gc500_handle.intr_gen = GPIO_INTR_LEVEL_TRIGGER;
++ gc500_handle.intr_trig = GPIO_TRIG_HIGH_LEVEL;
++
++ ret = dove_gpio_request(
++ DOVE_GPIO0_7, &gc500_handle
++ );
++#else
++ ret = request_irq(
++ Device->irqLines[gcvCORE_VG], isrRoutineVG, IRQF_DISABLED,
++ "galcore interrupt service for 2D", Device
++ );
++#endif
++
++ if (ret != 0)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): Could not register irq line %d (error=%d)\n",
++ __FUNCTION__, __LINE__,
++ Device->irqLines[gcvCORE_VG], ret
++ );
++
++ gcmkONERROR(gcvSTATUS_GENERIC_IO);
++ }
++
++ /* Mark ISR as initialized. */
++ Device->isrInitializeds[gcvCORE_VG] = gcvTRUE;
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckGALDEVICE_Release_ISR
++**
++** Release the irq line.
++**
++** INPUT:
++**
++** gckGALDEVICE Device
++** Pointer to an gckGALDEVICE object.
++**
++** OUTPUT:
++**
++** Nothing.
++**
++** RETURNS:
++**
++** Nothing.
++*/
++gceSTATUS
++gckGALDEVICE_Release_ISR(
++ IN gckGALDEVICE Device
++ )
++{
++ gcmkHEADER_ARG("Device=0x%x", Device);
++
++ gcmkVERIFY_ARGUMENT(Device != NULL);
++
++#if gcdMULTI_GPU
++ /* release the irq */
++ if (Device->isrInitialized3D[gcvCORE_3D_0_ID])
++ {
++ free_irq(Device->irqLine3D[gcvCORE_3D_0_ID], Device);
++ Device->isrInitialized3D[gcvCORE_3D_0_ID] = gcvFALSE;
++ }
++#if gcdMULTI_GPU > 1
++ /* release the irq */
++ if (Device->isrInitialized3D[gcvCORE_3D_1_ID])
++ {
++ free_irq(Device->irqLine3D[gcvCORE_3D_1_ID], Device);
++ Device->isrInitialized3D[gcvCORE_3D_1_ID] = gcvFALSE;
++ }
++#endif
++#else
++ /* release the irq */
++ if (Device->isrInitializeds[gcvCORE_MAJOR])
++ {
++#ifdef FLAREON
++ dove_gpio_free(DOVE_GPIO0_7, "galcore interrupt service");
++#else
++ free_irq(Device->irqLines[gcvCORE_MAJOR], Device);
++#endif
++ Device->isrInitializeds[gcvCORE_MAJOR] = gcvFALSE;
++ }
++#endif
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckGALDEVICE_Release_ISR_2D(
++ IN gckGALDEVICE Device
++ )
++{
++ gcmkHEADER_ARG("Device=0x%x", Device);
++
++ gcmkVERIFY_ARGUMENT(Device != NULL);
++
++ /* release the irq */
++ if (Device->isrInitializeds[gcvCORE_2D])
++ {
++#ifdef FLAREON
++ dove_gpio_free(DOVE_GPIO0_7, "galcore interrupt service");
++#else
++ free_irq(Device->irqLines[gcvCORE_2D], Device);
++#endif
++
++ Device->isrInitializeds[gcvCORE_2D] = gcvFALSE;
++ }
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckGALDEVICE_Release_ISR_VG(
++ IN gckGALDEVICE Device
++ )
++{
++ gcmkHEADER_ARG("Device=0x%x", Device);
++
++ gcmkVERIFY_ARGUMENT(Device != NULL);
++
++ /* release the irq */
++ if (Device->isrInitializeds[gcvCORE_VG])
++ {
++#ifdef FLAREON
++ dove_gpio_free(DOVE_GPIO0_7, "galcore interrupt service");
++#else
++ free_irq(Device->irqLines[gcvCORE_VG], Device);
++#endif
++
++ Device->isrInitializeds[gcvCORE_VG] = gcvFALSE;
++ }
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckGALDEVICE_Start_Threads
++**
++** Start the daemon threads.
++**
++** INPUT:
++**
++** gckGALDEVICE Device
++** Pointer to an gckGALDEVICE object.
++**
++** OUTPUT:
++**
++** Nothing.
++**
++** RETURNS:
++**
++** gcvSTATUS_OK
++** Start successfully.
++** gcvSTATUS_GENERIC_IO
++** Start failed.
++*/
++gceSTATUS
++gckGALDEVICE_Start_Threads(
++ IN gckGALDEVICE Device
++ )
++{
++ gceSTATUS status;
++ struct task_struct * task;
++
++ gcmkHEADER_ARG("Device=0x%x", Device);
++
++ gcmkVERIFY_ARGUMENT(Device != NULL);
++
++#if gcdMULTI_GPU
++ if (Device->kernels[gcvCORE_MAJOR] != gcvNULL)
++ {
++ /* Start the kernel thread. */
++ task = kthread_run(threadRoutine3D0, Device, "galcore_3d_0");
++
++ if (IS_ERR(task))
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): Could not start the kernel thread.\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_GENERIC_IO);
++ }
++
++ Device->threadCtxt3D[gcvCORE_3D_0_ID] = task;
++ Device->threadInitialized3D[gcvCORE_3D_0_ID] = gcvTRUE;
++
++#if gcdMULTI_GPU > 1
++ /* Start the kernel thread. */
++ task = kthread_run(threadRoutine3D1, Device, "galcore_3d_1");
++
++ if (IS_ERR(task))
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): Could not start the kernel thread.\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_GENERIC_IO);
++ }
++
++ Device->threadCtxt3D[gcvCORE_3D_1_ID] = task;
++ Device->threadInitialized3D[gcvCORE_3D_1_ID] = gcvTRUE;
++#endif
++ }
++#elif gcdMULTI_GPU_AFFINITY
++ if (Device->kernels[gcvCORE_MAJOR] != gcvNULL)
++ {
++ /* Start the kernel thread. */
++ task = kthread_run(threadRoutine3D0, Device, "galcore_3d_0");
++
++ if (IS_ERR(task))
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): Could not start the kernel thread.\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_GENERIC_IO);
++ }
++
++ Device->threadCtxts[gcvCORE_MAJOR] = task;
++ Device->threadInitializeds[gcvCORE_MAJOR] = gcvTRUE;
++ }
++
++ if (Device->kernels[gcvCORE_OCL] != gcvNULL)
++ {
++ /* Start the kernel thread. */
++ task = kthread_run(threadRoutine3D1, Device, "galcore_3d_1");
++
++ if (IS_ERR(task))
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): Could not start the kernel thread.\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_GENERIC_IO);
++ }
++
++ Device->threadCtxts[gcvCORE_OCL] = task;
++ Device->threadInitializeds[gcvCORE_OCL] = gcvTRUE;
++ }
++#else
++ if (Device->kernels[gcvCORE_MAJOR] != gcvNULL)
++ {
++ /* Start the kernel thread. */
++ task = kthread_run(threadRoutine, Device, "galcore daemon thread");
++
++ if (IS_ERR(task))
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): Could not start the kernel thread.\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_GENERIC_IO);
++ }
++
++ Device->threadCtxts[gcvCORE_MAJOR] = task;
++ Device->threadInitializeds[gcvCORE_MAJOR] = gcvTRUE;
++ }
++#endif
++
++ if (Device->kernels[gcvCORE_2D] != gcvNULL)
++ {
++ /* Start the kernel thread. */
++ task = kthread_run(threadRoutine2D, Device, "galcore daemon thread for 2D");
++
++ if (IS_ERR(task))
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): Could not start the kernel thread.\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_GENERIC_IO);
++ }
++
++ Device->threadCtxts[gcvCORE_2D] = task;
++ Device->threadInitializeds[gcvCORE_2D] = gcvTRUE;
++ }
++ else
++ {
++ Device->threadInitializeds[gcvCORE_2D] = gcvFALSE;
++ }
++
++ if (Device->kernels[gcvCORE_VG] != gcvNULL)
++ {
++ /* Start the kernel thread. */
++ task = kthread_run(threadRoutineVG, Device, "galcore daemon thread for VG");
++
++ if (IS_ERR(task))
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): Could not start the kernel thread.\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_GENERIC_IO);
++ }
++
++ Device->threadCtxts[gcvCORE_VG] = task;
++ Device->threadInitializeds[gcvCORE_VG] = gcvTRUE;
++ }
++ else
++ {
++ Device->threadInitializeds[gcvCORE_VG] = gcvFALSE;
++ }
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckGALDEVICE_Stop_Threads
++**
++** Stop the gal device, including the following actions: stop the daemon
++** thread, release the irq.
++**
++** INPUT:
++**
++** gckGALDEVICE Device
++** Pointer to an gckGALDEVICE object.
++**
++** OUTPUT:
++**
++** Nothing.
++**
++** RETURNS:
++**
++** Nothing.
++*/
++gceSTATUS
++gckGALDEVICE_Stop_Threads(
++ gckGALDEVICE Device
++ )
++{
++ gctINT i;
++#if gcdMULTI_GPU
++ gctINT j;
++#endif
++
++ gcmkHEADER_ARG("Device=0x%x", Device);
++
++ gcmkVERIFY_ARGUMENT(Device != NULL);
++
++ for (i = 0; i < gcdMAX_GPU_COUNT; i++)
++ {
++#if gcdMULTI_GPU
++ if (i == gcvCORE_MAJOR)
++ {
++ for (j = 0; j < gcdMULTI_GPU; j++)
++ {
++ /* Stop the kernel threads. */
++ if (Device->threadInitialized3D[j])
++ {
++ Device->killThread = gcvTRUE;
++ Device->dataReady3D[j] = gcvTRUE;
++ wake_up_interruptible(&Device->intrWaitQueue3D[j]);
++
++ kthread_stop(Device->threadCtxt3D[j]);
++ Device->threadCtxt3D[j] = gcvNULL;
++ Device->threadInitialized3D[j] = gcvFALSE;
++ }
++ }
++ }
++ else
++#endif
++ {
++ /* Stop the kernel threads. */
++ if (Device->threadInitializeds[i])
++ {
++ Device->killThread = gcvTRUE;
++ up(&Device->semas[i]);
++
++ kthread_stop(Device->threadCtxts[i]);
++ Device->threadCtxts[i] = gcvNULL;
++ Device->threadInitializeds[i] = gcvFALSE;
++ }
++ }
++ }
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckGALDEVICE_Start
++**
++** Start the gal device, including the following actions: setup the isr routine
++** and start the daemoni thread.
++**
++** INPUT:
++**
++** gckGALDEVICE Device
++** Pointer to an gckGALDEVICE object.
++**
++** OUTPUT:
++**
++** Nothing.
++**
++** RETURNS:
++**
++** gcvSTATUS_OK
++** Start successfully.
++*/
++gceSTATUS
++gckGALDEVICE_Start(
++ IN gckGALDEVICE Device
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Device=0x%x", Device);
++
++ /* Start the kernel thread. */
++ gcmkONERROR(gckGALDEVICE_Start_Threads(Device));
++
++ if (Device->kernels[gcvCORE_MAJOR] != gcvNULL)
++ {
++ /* Setup the ISR routine. */
++ gcmkONERROR(gckGALDEVICE_Setup_ISR(Device));
++
++ /* Switch to SUSPEND power state. */
++ gcmkONERROR(gckHARDWARE_SetPowerManagementState(
++ Device->kernels[gcvCORE_MAJOR]->hardware, gcvPOWER_OFF_BROADCAST
++ ));
++ }
++
++ if (Device->kernels[gcvCORE_2D] != gcvNULL)
++ {
++ /* Setup the ISR routine. */
++ gcmkONERROR(gckGALDEVICE_Setup_ISR_2D(Device));
++
++ /* Switch to SUSPEND power state. */
++ gcmkONERROR(gckHARDWARE_SetPowerManagementState(
++ Device->kernels[gcvCORE_2D]->hardware, gcvPOWER_OFF_BROADCAST
++ ));
++ }
++
++ if (Device->kernels[gcvCORE_VG] != gcvNULL)
++ {
++ /* Setup the ISR routine. */
++ gcmkONERROR(gckGALDEVICE_Setup_ISR_VG(Device));
++
++#if gcdENABLE_VG
++ /* Switch to SUSPEND power state. */
++ gcmkONERROR(gckVGHARDWARE_SetPowerManagementState(
++ Device->kernels[gcvCORE_VG]->vg->hardware, gcvPOWER_OFF_BROADCAST
++ ));
++#endif
++ }
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckGALDEVICE_Stop
++**
++** Stop the gal device, including the following actions: stop the daemon
++** thread, release the irq.
++**
++** INPUT:
++**
++** gckGALDEVICE Device
++** Pointer to an gckGALDEVICE object.
++**
++** OUTPUT:
++**
++** Nothing.
++**
++** RETURNS:
++**
++** Nothing.
++*/
++gceSTATUS
++gckGALDEVICE_Stop(
++ gckGALDEVICE Device
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Device=0x%x", Device);
++
++ gcmkVERIFY_ARGUMENT(Device != NULL);
++
++ if (Device->kernels[gcvCORE_MAJOR] != gcvNULL)
++ {
++ /* Switch to OFF power state. */
++ gcmkONERROR(gckHARDWARE_SetPowerManagementState(
++ Device->kernels[gcvCORE_MAJOR]->hardware, gcvPOWER_OFF
++ ));
++
++ /* Remove the ISR routine. */
++ gcmkONERROR(gckGALDEVICE_Release_ISR(Device));
++ }
++
++ if (Device->kernels[gcvCORE_2D] != gcvNULL)
++ {
++ /* Setup the ISR routine. */
++ gcmkONERROR(gckGALDEVICE_Release_ISR_2D(Device));
++
++ /* Switch to OFF power state. */
++ gcmkONERROR(gckHARDWARE_SetPowerManagementState(
++ Device->kernels[gcvCORE_2D]->hardware, gcvPOWER_OFF
++ ));
++ }
++
++ if (Device->kernels[gcvCORE_VG] != gcvNULL)
++ {
++ /* Setup the ISR routine. */
++ gcmkONERROR(gckGALDEVICE_Release_ISR_VG(Device));
++
++#if gcdENABLE_VG
++ /* Switch to OFF power state. */
++ gcmkONERROR(gckVGHARDWARE_SetPowerManagementState(
++ Device->kernels[gcvCORE_VG]->vg->hardware, gcvPOWER_OFF
++ ));
++#endif
++ }
++
++ /* Stop the kernel thread. */
++ gcmkONERROR(gckGALDEVICE_Stop_Threads(Device));
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_device.h linux-openelec/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_device.h
+--- linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_device.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_device.h 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,215 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_kernel_device_h_
++#define __gc_hal_kernel_device_h_
++
++#include "gc_hal_kernel_debugfs.h"
++
++/******************************************************************************\
++******************************* gckGALDEVICE Structure *******************************
++\******************************************************************************/
++
++typedef struct _gckGALDEVICE
++{
++ /* Objects. */
++ gckOS os;
++ gckKERNEL kernels[gcdMAX_GPU_COUNT];
++
++ gcsPLATFORM* platform;
++
++ /* Attributes. */
++ gctSIZE_T internalSize;
++ gctPHYS_ADDR internalPhysical;
++ gctUINT32 internalPhysicalName;
++ gctPOINTER internalLogical;
++ gckVIDMEM internalVidMem;
++ gctSIZE_T externalSize;
++ gctPHYS_ADDR externalPhysical;
++ gctUINT32 externalPhysicalName;
++ gctPOINTER externalLogical;
++ gckVIDMEM externalVidMem;
++ gckVIDMEM contiguousVidMem;
++ gctPOINTER contiguousBase;
++ gctPHYS_ADDR contiguousPhysical;
++ gctUINT32 contiguousPhysicalName;
++ gctSIZE_T contiguousSize;
++ gctBOOL contiguousMapped;
++ gctPOINTER contiguousMappedUser;
++ gctBOOL contiguousRequested;
++ gctSIZE_T systemMemorySize;
++ gctUINT32 systemMemoryBaseAddress;
++#if gcdMULTI_GPU
++ gctPOINTER registerBase3D[gcdMULTI_GPU];
++ gctSIZE_T registerSize3D[gcdMULTI_GPU];
++#endif
++ gctPOINTER registerBases[gcdMAX_GPU_COUNT];
++ gctSIZE_T registerSizes[gcdMAX_GPU_COUNT];
++ gctUINT32 baseAddress;
++ gctUINT32 physBase;
++ gctUINT32 physSize;
++ gctBOOL mmu;
++#if gcdMULTI_GPU
++ gctUINT32 requestedRegisterMemBase3D[gcdMULTI_GPU];
++ gctSIZE_T requestedRegisterMemSize3D[gcdMULTI_GPU];
++#endif
++ gctUINT32 requestedRegisterMemBases[gcdMAX_GPU_COUNT];
++ gctSIZE_T requestedRegisterMemSizes[gcdMAX_GPU_COUNT];
++ gctUINT32 requestedContiguousBase;
++ gctSIZE_T requestedContiguousSize;
++
++ /* IRQ management. */
++#if gcdMULTI_GPU
++ gctINT irqLine3D[gcdMULTI_GPU];
++ gctBOOL isrInitialized3D[gcdMULTI_GPU];
++ gctBOOL dataReady3D[gcdMULTI_GPU];
++#endif
++ gctINT irqLines[gcdMAX_GPU_COUNT];
++ gctBOOL isrInitializeds[gcdMAX_GPU_COUNT];
++
++ /* Thread management. */
++#if gcdMULTI_GPU
++ struct task_struct *threadCtxt3D[gcdMULTI_GPU];
++ wait_queue_head_t intrWaitQueue3D[gcdMULTI_GPU];
++ gctBOOL threadInitialized3D[gcdMULTI_GPU];
++#endif
++ struct task_struct *threadCtxts[gcdMAX_GPU_COUNT];
++ struct semaphore semas[gcdMAX_GPU_COUNT];
++ gctBOOL threadInitializeds[gcdMAX_GPU_COUNT];
++ gctBOOL killThread;
++
++ /* Signal management. */
++ gctINT signal;
++
++ /* Core mapping */
++ gceCORE coreMapping[8];
++
++ /* States before suspend. */
++ gceCHIPPOWERSTATE statesStored[gcdMAX_GPU_COUNT];
++
++ /* Device Debug File System Entry in kernel. */
++ struct _gcsDEBUGFS_Node * dbgNode;
++
++ gcsDEBUGFS_DIR debugfsDir;
++}
++* gckGALDEVICE;
++
++typedef struct _gcsHAL_PRIVATE_DATA
++{
++ gckGALDEVICE device;
++ gctPOINTER mappedMemory;
++ gctPOINTER contiguousLogical;
++ /* The process opening the device may not be the same as the one that closes it. */
++ gctUINT32 pidOpen;
++}
++gcsHAL_PRIVATE_DATA, * gcsHAL_PRIVATE_DATA_PTR;
++
++typedef struct _gcsDEVICE_CONSTRUCT_ARGS
++{
++ gctBOOL recovery;
++ gctUINT stuckDump;
++ gctUINT gpu3DMinClock;
++
++ gctBOOL contiguousRequested;
++ gcsPLATFORM* platform;
++ gctBOOL mmu;
++}
++gcsDEVICE_CONSTRUCT_ARGS;
++
++gceSTATUS gckGALDEVICE_Setup_ISR(
++ IN gckGALDEVICE Device
++ );
++
++gceSTATUS gckGALDEVICE_Setup_ISR_2D(
++ IN gckGALDEVICE Device
++ );
++
++gceSTATUS gckGALDEVICE_Setup_ISR_VG(
++ IN gckGALDEVICE Device
++ );
++
++gceSTATUS gckGALDEVICE_Release_ISR(
++ IN gckGALDEVICE Device
++ );
++
++gceSTATUS gckGALDEVICE_Release_ISR_2D(
++ IN gckGALDEVICE Device
++ );
++
++gceSTATUS gckGALDEVICE_Release_ISR_VG(
++ IN gckGALDEVICE Device
++ );
++
++gceSTATUS gckGALDEVICE_Start_Threads(
++ IN gckGALDEVICE Device
++ );
++
++gceSTATUS gckGALDEVICE_Stop_Threads(
++ gckGALDEVICE Device
++ );
++
++gceSTATUS gckGALDEVICE_Start(
++ IN gckGALDEVICE Device
++ );
++
++gceSTATUS gckGALDEVICE_Stop(
++ gckGALDEVICE Device
++ );
++
++gceSTATUS gckGALDEVICE_Construct(
++#if gcdMULTI_GPU || gcdMULTI_GPU_AFFINITY
++ IN gctINT IrqLine3D0,
++ IN gctUINT32 RegisterMemBase3D0,
++ IN gctSIZE_T RegisterMemSize3D0,
++ IN gctINT IrqLine3D1,
++ IN gctUINT32 RegisterMemBase3D1,
++ IN gctSIZE_T RegisterMemSize3D1,
++#else
++ IN gctINT IrqLine,
++ IN gctUINT32 RegisterMemBase,
++ IN gctSIZE_T RegisterMemSize,
++#endif
++ IN gctINT IrqLine2D,
++ IN gctUINT32 RegisterMemBase2D,
++ IN gctSIZE_T RegisterMemSize2D,
++ IN gctINT IrqLineVG,
++ IN gctUINT32 RegisterMemBaseVG,
++ IN gctSIZE_T RegisterMemSizeVG,
++ IN gctUINT32 ContiguousBase,
++ IN gctSIZE_T ContiguousSize,
++ IN gctSIZE_T BankSize,
++ IN gctINT FastClear,
++ IN gctINT Compression,
++ IN gctUINT32 PhysBaseAddr,
++ IN gctUINT32 PhysSize,
++ IN gctINT Signal,
++ IN gctUINT LogFileSize,
++ IN gctINT PowerManagement,
++ IN gctINT GpuProfiler,
++ IN gcsDEVICE_CONSTRUCT_ARGS * Args,
++ OUT gckGALDEVICE *Device
++ );
++
++gceSTATUS gckGALDEVICE_Destroy(
++ IN gckGALDEVICE Device
++ );
++
++#endif /* __gc_hal_kernel_device_h_ */
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_iommu.c linux-openelec/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_iommu.c
+--- linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_iommu.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_iommu.c 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,216 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal_kernel_linux.h"
++#include "gc_hal_kernel_device.h"
++
++#include <linux/iommu.h>
++#include <linux/platform_device.h>
++
++#define _GC_OBJ_ZONE gcvZONE_OS
++
++typedef struct _gcsIOMMU
++{
++ struct iommu_domain * domain;
++ struct device * device;
++}
++gcsIOMMU;
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0)
++static int
++_IOMMU_Fault_Handler(
++ struct iommu_domain * Domain,
++ struct device * Dev,
++ unsigned long DomainAddress,
++ int flags,
++ void * args
++ )
++#else
++static int
++_IOMMU_Fault_Handler(
++ struct iommu_domain * Domain,
++ struct device * Dev,
++ unsigned long DomainAddress,
++ int flags
++ )
++#endif
++{
++ return 0;
++}
++
++static int
++_FlatMapping(
++ IN gckIOMMU Iommu
++ )
++{
++ gceSTATUS status;
++ gctUINT32 physical;
++
++ for (physical = 0; physical < 0x80000000; physical += PAGE_SIZE)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_OS,
++ "Map %x => %x bytes = %d",
++ physical, physical, PAGE_SIZE
++ );
++
++ gcmkONERROR(gckIOMMU_Map(Iommu, physical, physical, PAGE_SIZE));
++ }
++
++ return gcvSTATUS_OK;
++
++OnError:
++ return status;
++}
++
++void
++gckIOMMU_Destory(
++ IN gckOS Os,
++ IN gckIOMMU Iommu
++ )
++{
++ gcmkHEADER();
++
++ if (Iommu->domain && Iommu->device)
++ {
++ iommu_attach_device(Iommu->domain, Iommu->device);
++ }
++
++ if (Iommu->domain)
++ {
++ iommu_domain_free(Iommu->domain);
++ }
++
++ if (Iommu)
++ {
++ gcmkOS_SAFE_FREE(Os, Iommu);
++ }
++
++ gcmkFOOTER_NO();
++}
++
++gceSTATUS
++gckIOMMU_Construct(
++ IN gckOS Os,
++ OUT gckIOMMU * Iommu
++ )
++{
++ gceSTATUS status;
++ gckIOMMU iommu = gcvNULL;
++ struct device *dev;
++ int ret;
++
++ gcmkHEADER();
++
++ dev = &Os->device->platform->device->dev;
++
++ gcmkONERROR(gckOS_Allocate(Os, gcmSIZEOF(gcsIOMMU), (gctPOINTER *)&iommu));
++
++ gckOS_ZeroMemory(iommu, gcmSIZEOF(gcsIOMMU));
++
++ iommu->domain = iommu_domain_alloc(&platform_bus_type);
++
++ if (!iommu->domain)
++ {
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_OS, "iommu_domain_alloc() fail");
++
++ gcmkONERROR(gcvSTATUS_NOT_SUPPORTED);
++ }
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0)
++ iommu_set_fault_handler(iommu->domain, _IOMMU_Fault_Handler, dev);
++#else
++ iommu_set_fault_handler(iommu->domain, _IOMMU_Fault_Handler);
++#endif
++
++ ret = iommu_attach_device(iommu->domain, dev);
++
++ if (ret)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_OS, "iommu_attach_device() fail %d", ret);
++
++ gcmkONERROR(gcvSTATUS_NOT_SUPPORTED);
++ }
++
++ iommu->device = dev;
++
++ _FlatMapping(iommu);
++
++ *Iommu = iommu;
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++
++ gckIOMMU_Destory(Os, iommu);
++
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckIOMMU_Map(
++ IN gckIOMMU Iommu,
++ IN gctUINT32 DomainAddress,
++ IN gctUINT32 Physical,
++ IN gctUINT32 Bytes
++ )
++{
++ gceSTATUS status;
++ int ret;
++
++ gcmkHEADER_ARG("DomainAddress=%#X, Physical=%#X, Bytes=%d",
++ DomainAddress, Physical, Bytes);
++
++ ret = iommu_map(Iommu->domain, DomainAddress, Physical, Bytes, 0);
++
++ if (ret)
++ {
++ gcmkONERROR(gcvSTATUS_NOT_SUPPORTED);
++ }
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++
++ gcmkFOOTER();
++ return status;
++
++}
++
++gceSTATUS
++gckIOMMU_Unmap(
++ IN gckIOMMU Iommu,
++ IN gctUINT32 DomainAddress,
++ IN gctUINT32 Bytes
++ )
++{
++ gcmkHEADER();
++
++ iommu_unmap(Iommu->domain, DomainAddress, Bytes);
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_linux.c linux-openelec/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_linux.c
+--- linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_linux.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_linux.c 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,497 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal_kernel_linux.h"
++
++#define _GC_OBJ_ZONE gcvZONE_KERNEL
++
++/******************************************************************************\
++******************************* gckKERNEL API Code ******************************
++\******************************************************************************/
++
++/*******************************************************************************
++**
++** gckKERNEL_QueryVideoMemory
++**
++** Query the amount of video memory.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** OUTPUT:
++**
++** gcsHAL_INTERFACE * Interface
++** Pointer to an gcsHAL_INTERFACE structure that will be filled in with
++** the memory information.
++*/
++gceSTATUS
++gckKERNEL_QueryVideoMemory(
++ IN gckKERNEL Kernel,
++ OUT gcsHAL_INTERFACE * Interface
++ )
++{
++ gckGALDEVICE device;
++
++ gcmkHEADER_ARG("Kernel=%p", Kernel);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(Interface != NULL);
++
++ /* Extract the pointer to the gckGALDEVICE class. */
++ device = (gckGALDEVICE) Kernel->context;
++
++ /* Get internal memory size and physical address. */
++ Interface->u.QueryVideoMemory.internalSize = device->internalSize;
++ Interface->u.QueryVideoMemory.internalPhysical = device->internalPhysicalName;
++
++ /* Get external memory size and physical address. */
++ Interface->u.QueryVideoMemory.externalSize = device->externalSize;
++ Interface->u.QueryVideoMemory.externalPhysical = device->externalPhysicalName;
++
++ /* Get contiguous memory size and physical address. */
++ Interface->u.QueryVideoMemory.contiguousSize = device->contiguousSize;
++ Interface->u.QueryVideoMemory.contiguousPhysical = device->contiguousPhysicalName;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckKERNEL_GetVideoMemoryPool
++**
++** Get the gckVIDMEM object belonging to the specified pool.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gcePOOL Pool
++** Pool to query gckVIDMEM object for.
++**
++** OUTPUT:
++**
++** gckVIDMEM * VideoMemory
++** Pointer to a variable that will hold the pointer to the gckVIDMEM
++** object belonging to the requested pool.
++*/
++gceSTATUS
++gckKERNEL_GetVideoMemoryPool(
++ IN gckKERNEL Kernel,
++ IN gcePOOL Pool,
++ OUT gckVIDMEM * VideoMemory
++ )
++{
++ gckGALDEVICE device;
++ gckVIDMEM videoMemory;
++
++ gcmkHEADER_ARG("Kernel=%p Pool=%d", Kernel, Pool);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(VideoMemory != NULL);
++
++ /* Extract the pointer to the gckGALDEVICE class. */
++ device = (gckGALDEVICE) Kernel->context;
++
++ /* Dispatch on pool. */
++ switch (Pool)
++ {
++ case gcvPOOL_LOCAL_INTERNAL:
++ /* Internal memory. */
++ videoMemory = device->internalVidMem;
++ break;
++
++ case gcvPOOL_LOCAL_EXTERNAL:
++ /* External memory. */
++ videoMemory = device->externalVidMem;
++ break;
++
++ case gcvPOOL_SYSTEM:
++ /* System memory. */
++ videoMemory = device->contiguousVidMem;
++ break;
++
++ default:
++ /* Unknown pool. */
++ videoMemory = NULL;
++ }
++
++ /* Return pointer to the gckVIDMEM object. */
++ *VideoMemory = videoMemory;
++
++ /* Return status. */
++ gcmkFOOTER_ARG("*VideoMemory=%p", *VideoMemory);
++ return (videoMemory == NULL) ? gcvSTATUS_OUT_OF_MEMORY : gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckKERNEL_MapMemory
++**
++** Map video memory into the current process space.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gctPHYS_ADDR Physical
++** Physical address of video memory to map.
++**
++** gctSIZE_T Bytes
++** Number of bytes to map.
++**
++** OUTPUT:
++**
++** gctPOINTER * Logical
++** Pointer to a variable that will hold the base address of the mapped
++** memory region.
++*/
++gceSTATUS
++gckKERNEL_MapMemory(
++ IN gckKERNEL Kernel,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Bytes,
++ OUT gctPOINTER * Logical
++ )
++{
++ gckKERNEL kernel = Kernel;
++ gctPHYS_ADDR physical = gcmNAME_TO_PTR(Physical);
++
++ return gckOS_MapMemory(Kernel->os, physical, Bytes, Logical);
++}
++
++/*******************************************************************************
++**
++** gckKERNEL_UnmapMemory
++**
++** Unmap video memory from the current process space.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gctPHYS_ADDR Physical
++** Physical address of video memory to map.
++**
++** gctSIZE_T Bytes
++** Number of bytes to map.
++**
++** gctPOINTER Logical
++** Base address of the mapped memory region.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckKERNEL_UnmapMemory(
++ IN gckKERNEL Kernel,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Bytes,
++ IN gctPOINTER Logical
++ )
++{
++ gckKERNEL kernel = Kernel;
++ gctPHYS_ADDR physical = gcmNAME_TO_PTR(Physical);
++
++ return gckOS_UnmapMemory(Kernel->os, physical, Bytes, Logical);
++}
++
++/*******************************************************************************
++**
++** gckKERNEL_MapVideoMemory
++**
++** Get the logical address for a hardware specific memory address for the
++** current process.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gctBOOL InUserSpace
++** gcvTRUE to map the memory into the user space.
++**
++** gctUINT32 Address
++** Hardware specific memory address.
++**
++** OUTPUT:
++**
++** gctPOINTER * Logical
++** Pointer to a variable that will hold the logical address of the
++** specified memory address.
++*/
++gceSTATUS
++gckKERNEL_MapVideoMemoryEx(
++ IN gckKERNEL Kernel,
++ IN gceCORE Core,
++ IN gctBOOL InUserSpace,
++ IN gctUINT32 Address,
++ OUT gctPOINTER * Logical
++ )
++{
++ gckGALDEVICE device = gcvNULL;
++ PLINUX_MDL mdl = gcvNULL;
++ PLINUX_MDL_MAP mdlMap = gcvNULL;
++ gcePOOL pool = gcvPOOL_UNKNOWN;
++ gctUINT32 offset = 0;
++ gctUINT32 base = 0;
++ gceSTATUS status;
++ gctPOINTER logical = gcvNULL;
++ gctUINT32 baseAddress;
++
++ gcmkHEADER_ARG("Kernel=%p InUserSpace=%d Address=%08x",
++ Kernel, InUserSpace, Address);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(Logical != NULL);
++
++ /* Extract the pointer to the gckGALDEVICE class. */
++ device = (gckGALDEVICE) Kernel->context;
++
++#if gcdENABLE_VG
++ if (Core == gcvCORE_VG)
++ {
++ /* Split the memory address into a pool type and offset. */
++ gcmkONERROR(
++ gckVGHARDWARE_SplitMemory(Kernel->vg->hardware, Address, &pool, &offset));
++ }
++ else
++#endif
++ {
++ /* Split the memory address into a pool type and offset. */
++ gcmkONERROR(
++ gckHARDWARE_SplitMemory(Kernel->hardware, Address, &pool, &offset));
++ }
++
++ /* Dispatch on pool. */
++ switch (pool)
++ {
++ case gcvPOOL_LOCAL_INTERNAL:
++ /* Internal memory. */
++ logical = device->internalLogical;
++ break;
++
++ case gcvPOOL_LOCAL_EXTERNAL:
++ /* External memory. */
++ logical = device->externalLogical;
++ break;
++
++ case gcvPOOL_SYSTEM:
++ /* System memory. */
++ if (device->contiguousMapped)
++ {
++ logical = device->contiguousBase;
++ }
++ else
++ {
++ gctINT processID;
++ gckOS_GetProcessID(&processID);
++
++ mdl = (PLINUX_MDL) device->contiguousPhysical;
++
++ mdlMap = FindMdlMap(mdl, processID);
++ gcmkASSERT(mdlMap);
++
++ logical = (gctPOINTER) mdlMap->vmaAddr;
++ }
++#if gcdENABLE_VG
++ if (Core == gcvCORE_VG)
++ {
++ gcmkVERIFY_OK(
++ gckVGHARDWARE_SplitMemory(Kernel->vg->hardware,
++ device->contiguousVidMem->baseAddress,
++ &pool,
++ &base));
++ }
++ else
++#endif
++ {
++ gctUINT32 systemBaseAddress = 0;
++
++ if (Kernel->hardware->mmuVersion == 0)
++ {
++ gcmkONERROR(gckOS_GetBaseAddress(Kernel->os, &systemBaseAddress));
++ }
++
++ gcmkVERIFY_OK(
++ gckOS_CPUPhysicalToGPUPhysical(
++ Kernel->os,
++ device->contiguousVidMem->baseAddress - systemBaseAddress,
++ &baseAddress
++ ));
++
++ gcmkVERIFY_OK(
++ gckHARDWARE_SplitMemory(Kernel->hardware,
++ baseAddress,
++ &pool,
++ &base));
++ }
++ offset -= base;
++ break;
++
++ default:
++ /* Invalid memory pool. */
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ /* Build logical address of specified address. */
++ *Logical = (gctPOINTER) ((gctUINT8_PTR) logical + offset);
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Logical=%p", *Logical);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Retunn the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckKERNEL_MapVideoMemory
++**
++** Get the logical address for a hardware specific memory address for the
++** current process.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gctBOOL InUserSpace
++** gcvTRUE to map the memory into the user space.
++**
++** gctUINT32 Address
++** Hardware specific memory address.
++**
++** OUTPUT:
++**
++** gctPOINTER * Logical
++** Pointer to a variable that will hold the logical address of the
++** specified memory address.
++*/
++gceSTATUS
++gckKERNEL_MapVideoMemory(
++ IN gckKERNEL Kernel,
++ IN gctBOOL InUserSpace,
++ IN gctUINT32 Address,
++ OUT gctPOINTER * Logical
++ )
++{
++ return gckKERNEL_MapVideoMemoryEx(Kernel, gcvCORE_MAJOR, InUserSpace, Address, Logical);
++}
++/*******************************************************************************
++**
++** gckKERNEL_Notify
++**
++** This function iscalled by clients to notify the gckKERNRL object of an event.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gceNOTIFY Notification
++** Notification event.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckKERNEL_Notify(
++ IN gckKERNEL Kernel,
++#if gcdMULTI_GPU
++ IN gctUINT CoreId,
++#endif
++ IN gceNOTIFY Notification,
++ IN gctBOOL Data
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Kernel=%p Notification=%d Data=%d",
++ Kernel, Notification, Data);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++
++ /* Dispatch on notifcation. */
++ switch (Notification)
++ {
++ case gcvNOTIFY_INTERRUPT:
++ /* Process the interrupt. */
++#if COMMAND_PROCESSOR_VERSION > 1
++ status = gckINTERRUPT_Notify(Kernel->interrupt, Data);
++#else
++ status = gckHARDWARE_Interrupt(Kernel->hardware,
++#if gcdMULTI_GPU
++ CoreId,
++#endif
++ Data);
++#endif
++ break;
++
++ default:
++ status = gcvSTATUS_OK;
++ break;
++ }
++
++ /* Success. */
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckKERNEL_QuerySettings(
++ IN gckKERNEL Kernel,
++ OUT gcsKERNEL_SETTINGS * Settings
++ )
++{
++ gckGALDEVICE device;
++
++ gcmkHEADER_ARG("Kernel=%p", Kernel);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(Settings != gcvNULL);
++
++ /* Extract the pointer to the gckGALDEVICE class. */
++ device = (gckGALDEVICE) Kernel->context;
++
++ /* Fill in signal. */
++ Settings->signal = device->signal;
++
++ /* Success. */
++ gcmkFOOTER_ARG("Settings->signal=%d", Settings->signal);
++ return gcvSTATUS_OK;
++}
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_linux.h linux-openelec/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_linux.h
+--- linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_linux.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_linux.h 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,399 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_kernel_linux_h_
++#define __gc_hal_kernel_linux_h_
++
++#include <linux/version.h>
++#include <linux/init.h>
++#include <linux/module.h>
++#include <linux/fs.h>
++#include <linux/mm.h>
++#include <linux/sched.h>
++#include <linux/signal.h>
++#ifdef FLAREON
++# include <asm/arch-realview/dove_gpio_irq.h>
++#endif
++#include <linux/interrupt.h>
++#include <linux/vmalloc.h>
++#include <linux/dma-mapping.h>
++#include <linux/kthread.h>
++
++#include <linux/idr.h>
++
++#ifdef MODVERSIONS
++# include <linux/modversions.h>
++#endif
++#include <asm/io.h>
++#include <asm/uaccess.h>
++
++#if ENABLE_GPU_CLOCK_BY_DRIVER && LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28)
++#include <linux/clk.h>
++#endif
++
++#define NTSTRSAFE_NO_CCH_FUNCTIONS
++#include "gc_hal.h"
++#include "gc_hal_driver.h"
++#include "gc_hal_kernel.h"
++#include "gc_hal_kernel_platform.h"
++#include "gc_hal_kernel_device.h"
++#include "gc_hal_kernel_os.h"
++#include "gc_hal_kernel_debugfs.h"
++
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,31)
++#define FIND_TASK_BY_PID(x) pid_task(find_vpid(x), PIDTYPE_PID)
++#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
++#define FIND_TASK_BY_PID(x) find_task_by_vpid(x)
++#else
++#define FIND_TASK_BY_PID(x) find_task_by_pid(x)
++#endif
++
++#define _WIDE(string) L##string
++#define WIDE(string) _WIDE(string)
++
++#define countof(a) (sizeof(a) / sizeof(a[0]))
++
++#ifndef DEVICE_NAME
++#ifdef CONFIG_DOVE_GPU
++# define DEVICE_NAME "dove_gpu"
++#else
++# define DEVICE_NAME "galcore"
++#endif
++#endif
++
++#define GetPageCount(size, offset) ((((size) + ((offset) & ~PAGE_CACHE_MASK)) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT)
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION (3,7,0)
++#define gcdVM_FLAGS (VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP)
++#else
++#define gcdVM_FLAGS (VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_RESERVED)
++#endif
++
++/* Protection bit when mapping memroy to user sapce */
++#define gcmkPAGED_MEMROY_PROT(x) pgprot_writecombine(x)
++
++#if gcdNONPAGED_MEMORY_BUFFERABLE
++#define gcmkIOREMAP ioremap_wc
++#define gcmkNONPAGED_MEMROY_PROT(x) pgprot_writecombine(x)
++#elif !gcdNONPAGED_MEMORY_CACHEABLE
++#define gcmkIOREMAP ioremap_nocache
++#define gcmkNONPAGED_MEMROY_PROT(x) pgprot_noncached(x)
++#endif
++
++#define gcdSUPPRESS_OOM_MESSAGE 1
++
++#if gcdSUPPRESS_OOM_MESSAGE
++#define gcdNOWARN __GFP_NOWARN
++#else
++#define gcdNOWARN 0
++#endif
++
++/******************************************************************************\
++********************************** Structures **********************************
++\******************************************************************************/
++typedef struct _gcsIOMMU * gckIOMMU;
++
++typedef struct _gcsUSER_MAPPING * gcsUSER_MAPPING_PTR;
++typedef struct _gcsUSER_MAPPING
++{
++ /* Pointer to next mapping structure. */
++ gcsUSER_MAPPING_PTR next;
++
++ /* Physical address of this mapping. */
++ gctUINT32 physical;
++
++ /* Logical address of this mapping. */
++ gctPOINTER logical;
++
++ /* Number of bytes of this mapping. */
++ gctSIZE_T bytes;
++
++ /* Starting address of this mapping. */
++ gctINT8_PTR start;
++
++ /* Ending address of this mapping. */
++ gctINT8_PTR end;
++}
++gcsUSER_MAPPING;
++
++typedef struct _gcsINTEGER_DB * gcsINTEGER_DB_PTR;
++typedef struct _gcsINTEGER_DB
++{
++ struct idr idr;
++ spinlock_t lock;
++ gctINT curr;
++}
++gcsINTEGER_DB;
++
++struct _gckOS
++{
++ /* Object. */
++ gcsOBJECT object;
++
++ /* Pointer to device */
++ gckGALDEVICE device;
++
++ /* Memory management */
++ gctPOINTER memoryLock;
++ gctPOINTER memoryMapLock;
++
++ struct _LINUX_MDL *mdlHead;
++ struct _LINUX_MDL *mdlTail;
++
++ /* Kernel process ID. */
++ gctUINT32 kernelProcessID;
++
++ /* Signal management. */
++
++ /* Lock. */
++ gctPOINTER signalMutex;
++
++ /* signal id database. */
++ gcsINTEGER_DB signalDB;
++
++#if gcdANDROID_NATIVE_FENCE_SYNC
++ /* Lock. */
++ gctPOINTER syncPointMutex;
++
++ /* sync point id database. */
++ gcsINTEGER_DB syncPointDB;
++#endif
++
++ gcsUSER_MAPPING_PTR userMap;
++ gctPOINTER debugLock;
++
++ /* workqueue for os timer. */
++ struct workqueue_struct * workqueue;
++
++ /* Allocate extra page to avoid cache overflow */
++ struct page* paddingPage;
++
++ /* Detect unfreed allocation. */
++ atomic_t allocateCount;
++
++ struct list_head allocatorList;
++
++ gcsDEBUGFS_DIR allocatorDebugfsDir;
++
++ /* Lock for register access check. */
++ struct mutex registerAccessLocks[gcdMAX_GPU_COUNT];
++
++ /* External power states. */
++ gctBOOL powerStates[gcdMAX_GPU_COUNT];
++
++ /* External clock states. */
++ gctBOOL clockStates[gcdMAX_GPU_COUNT];
++
++ /* IOMMU. */
++ gckIOMMU iommu;
++};
++
++typedef struct _gcsSIGNAL * gcsSIGNAL_PTR;
++typedef struct _gcsSIGNAL
++{
++ /* Kernel sync primitive. */
++ struct completion obj;
++
++ /* Manual reset flag. */
++ gctBOOL manualReset;
++
++ /* The reference counter. */
++ atomic_t ref;
++
++ /* The owner of the signal. */
++ gctHANDLE process;
++
++ gckHARDWARE hardware;
++
++ /* ID. */
++ gctUINT32 id;
++}
++gcsSIGNAL;
++
++#if gcdANDROID_NATIVE_FENCE_SYNC
++typedef struct _gcsSYNC_POINT * gcsSYNC_POINT_PTR;
++typedef struct _gcsSYNC_POINT
++{
++ /* The reference counter. */
++ atomic_t ref;
++
++ /* State. */
++ atomic_t state;
++
++ /* timeline. */
++ struct sync_timeline * timeline;
++
++ /* ID. */
++ gctUINT32 id;
++}
++gcsSYNC_POINT;
++#endif
++
++typedef struct _gcsPageInfo * gcsPageInfo_PTR;
++typedef struct _gcsPageInfo
++{
++ struct page **pages;
++ gctUINT32_PTR pageTable;
++ gctUINT32 extraPage;
++ gctUINT32 address;
++#if gcdPROCESS_ADDRESS_SPACE
++ gckMMU mmu;
++#endif
++}
++gcsPageInfo;
++
++typedef struct _gcsOSTIMER * gcsOSTIMER_PTR;
++typedef struct _gcsOSTIMER
++{
++ struct delayed_work work;
++ gctTIMERFUNCTION function;
++ gctPOINTER data;
++} gcsOSTIMER;
++
++gceSTATUS
++gckOS_ImportAllocators(
++ gckOS Os
++ );
++
++gceSTATUS
++gckOS_FreeAllocators(
++ gckOS Os
++ );
++
++gceSTATUS
++_HandleOuterCache(
++ IN gckOS Os,
++ IN gctUINT32 Physical,
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Bytes,
++ IN gceCACHEOPERATION Type
++ );
++
++gceSTATUS
++_ConvertLogical2Physical(
++ IN gckOS Os,
++ IN gctPOINTER Logical,
++ IN gctUINT32 ProcessID,
++ IN PLINUX_MDL Mdl,
++ OUT gctUINT32_PTR Physical
++ );
++
++gctSTRING
++_CreateKernelVirtualMapping(
++ IN PLINUX_MDL Mdl
++ );
++
++void
++_DestoryKernelVirtualMapping(
++ IN gctSTRING Addr
++ );
++
++void
++_UnmapUserLogical(
++ IN gctPOINTER Logical,
++ IN gctUINT32 Size
++ );
++
++static inline gctINT
++_GetProcessID(
++ void
++ )
++{
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
++ return task_tgid_vnr(current);
++#else
++ return current->tgid;
++#endif
++}
++
++static inline struct page *
++_NonContiguousToPage(
++ IN struct page ** Pages,
++ IN gctUINT32 Index
++ )
++{
++ gcmkASSERT(Pages != gcvNULL);
++ return Pages[Index];
++}
++
++static inline unsigned long
++_NonContiguousToPfn(
++ IN struct page ** Pages,
++ IN gctUINT32 Index
++ )
++{
++ gcmkASSERT(Pages != gcvNULL);
++ return page_to_pfn(_NonContiguousToPage(Pages, Index));
++}
++
++static inline unsigned long
++_NonContiguousToPhys(
++ IN struct page ** Pages,
++ IN gctUINT32 Index
++ )
++{
++ gcmkASSERT(Pages != gcvNULL);
++ return page_to_phys(_NonContiguousToPage(Pages, Index));
++}
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
++static inline int
++is_vmalloc_addr(
++ void *Addr
++ )
++{
++ unsigned long addr = (unsigned long)Addr;
++
++ return addr >= VMALLOC_START && addr < VMALLOC_END;
++}
++#endif
++
++#ifdef CONFIG_IOMMU_SUPPORT
++void
++gckIOMMU_Destory(
++ IN gckOS Os,
++ IN gckIOMMU Iommu
++ );
++
++gceSTATUS
++gckIOMMU_Construct(
++ IN gckOS Os,
++ OUT gckIOMMU * Iommu
++ );
++
++gceSTATUS
++gckIOMMU_Map(
++ IN gckIOMMU Iommu,
++ IN gctUINT32 DomainAddress,
++ IN gctUINT32 Physical,
++ IN gctUINT32 Bytes
++ );
++
++gceSTATUS
++gckIOMMU_Unmap(
++ IN gckIOMMU Iommu,
++ IN gctUINT32 DomainAddress,
++ IN gctUINT32 Bytes
++ );
++#endif
++
++#endif /* __gc_hal_kernel_linux_h_ */
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_math.c linux-openelec/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_math.c
+--- linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_math.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_math.c 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,32 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal_kernel_linux.h"
++
++gctINT
++gckMATH_ModuloInt(
++ IN gctINT X,
++ IN gctINT Y
++ )
++{
++ if(Y ==0) {return 0;}
++ else {return X % Y;}
++}
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_os.c linux-openelec/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_os.c
+--- linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_os.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_os.c 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,8740 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal_kernel_linux.h"
++
++#include <linux/pagemap.h>
++#include <linux/seq_file.h>
++#include <linux/mman.h>
++#include <asm/atomic.h>
++#include <linux/dma-mapping.h>
++#include <linux/slab.h>
++#include <linux/workqueue.h>
++#include <linux/irqflags.h>
++#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
++#include <linux/math64.h>
++#endif
++#include <linux/delay.h>
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
++#include <linux/anon_inodes.h>
++#endif
++
++#if gcdANDROID_NATIVE_FENCE_SYNC
++#include <linux/file.h>
++#include "gc_hal_kernel_sync.h"
++#endif
++
++#define _GC_OBJ_ZONE gcvZONE_OS
++
++#include "gc_hal_kernel_allocator.h"
++
++#define MEMORY_LOCK(os) \
++ gcmkVERIFY_OK(gckOS_AcquireMutex( \
++ (os), \
++ (os)->memoryLock, \
++ gcvINFINITE))
++
++#define MEMORY_UNLOCK(os) \
++ gcmkVERIFY_OK(gckOS_ReleaseMutex((os), (os)->memoryLock))
++
++#define MEMORY_MAP_LOCK(os) \
++ gcmkVERIFY_OK(gckOS_AcquireMutex( \
++ (os), \
++ (os)->memoryMapLock, \
++ gcvINFINITE))
++
++#define MEMORY_MAP_UNLOCK(os) \
++ gcmkVERIFY_OK(gckOS_ReleaseMutex((os), (os)->memoryMapLock))
++
++
++/******************************************************************************\
++******************************* Private Functions ******************************
++\******************************************************************************/
++static gctINT
++_GetThreadID(
++ void
++ )
++{
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
++ return task_pid_vnr(current);
++#else
++ return current->pid;
++#endif
++}
++
++static PLINUX_MDL
++_CreateMdl(
++ void
++ )
++{
++ PLINUX_MDL mdl;
++
++ gcmkHEADER();
++
++ mdl = (PLINUX_MDL)kzalloc(sizeof(struct _LINUX_MDL), GFP_KERNEL | gcdNOWARN);
++
++ gcmkFOOTER_ARG("0x%X", mdl);
++ return mdl;
++}
++
++static gceSTATUS
++_DestroyMdlMap(
++ IN PLINUX_MDL Mdl,
++ IN PLINUX_MDL_MAP MdlMap
++ );
++
++static gceSTATUS
++_DestroyMdl(
++ IN PLINUX_MDL Mdl
++ )
++{
++ PLINUX_MDL_MAP mdlMap, next;
++
++ gcmkHEADER_ARG("Mdl=0x%X", Mdl);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_ARGUMENT(Mdl != gcvNULL);
++
++ mdlMap = Mdl->maps;
++
++ while (mdlMap != gcvNULL)
++ {
++ next = mdlMap->next;
++
++ gcmkVERIFY_OK(_DestroyMdlMap(Mdl, mdlMap));
++
++ mdlMap = next;
++ }
++
++ kfree(Mdl);
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++static PLINUX_MDL_MAP
++_CreateMdlMap(
++ IN PLINUX_MDL Mdl,
++ IN gctINT ProcessID
++ )
++{
++ PLINUX_MDL_MAP mdlMap;
++
++ gcmkHEADER_ARG("Mdl=0x%X ProcessID=%d", Mdl, ProcessID);
++
++ mdlMap = (PLINUX_MDL_MAP)kmalloc(sizeof(struct _LINUX_MDL_MAP), GFP_KERNEL | gcdNOWARN);
++ if (mdlMap == gcvNULL)
++ {
++ gcmkFOOTER_NO();
++ return gcvNULL;
++ }
++
++ mdlMap->pid = ProcessID;
++ mdlMap->vmaAddr = gcvNULL;
++ mdlMap->vma = gcvNULL;
++ mdlMap->count = 0;
++
++ mdlMap->next = Mdl->maps;
++ Mdl->maps = mdlMap;
++
++ gcmkFOOTER_ARG("0x%X", mdlMap);
++ return mdlMap;
++}
++
++static gceSTATUS
++_DestroyMdlMap(
++ IN PLINUX_MDL Mdl,
++ IN PLINUX_MDL_MAP MdlMap
++ )
++{
++ PLINUX_MDL_MAP prevMdlMap;
++
++ gcmkHEADER_ARG("Mdl=0x%X MdlMap=0x%X", Mdl, MdlMap);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_ARGUMENT(MdlMap != gcvNULL);
++ gcmkASSERT(Mdl->maps != gcvNULL);
++
++ if (Mdl->maps == MdlMap)
++ {
++ Mdl->maps = MdlMap->next;
++ }
++ else
++ {
++ prevMdlMap = Mdl->maps;
++
++ while (prevMdlMap->next != MdlMap)
++ {
++ prevMdlMap = prevMdlMap->next;
++
++ gcmkASSERT(prevMdlMap != gcvNULL);
++ }
++
++ prevMdlMap->next = MdlMap->next;
++ }
++
++ kfree(MdlMap);
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++extern PLINUX_MDL_MAP
++FindMdlMap(
++ IN PLINUX_MDL Mdl,
++ IN gctINT ProcessID
++ )
++{
++ PLINUX_MDL_MAP mdlMap;
++
++ gcmkHEADER_ARG("Mdl=0x%X ProcessID=%d", Mdl, ProcessID);
++ if(Mdl == gcvNULL)
++ {
++ gcmkFOOTER_NO();
++ return gcvNULL;
++ }
++ mdlMap = Mdl->maps;
++
++ while (mdlMap != gcvNULL)
++ {
++ if (mdlMap->pid == ProcessID)
++ {
++ gcmkFOOTER_ARG("0x%X", mdlMap);
++ return mdlMap;
++ }
++
++ mdlMap = mdlMap->next;
++ }
++
++ gcmkFOOTER_NO();
++ return gcvNULL;
++}
++
++/*******************************************************************************
++** Integer Id Management.
++*/
++gceSTATUS
++_AllocateIntegerId(
++ IN gcsINTEGER_DB_PTR Database,
++ IN gctPOINTER KernelPointer,
++ OUT gctUINT32 *Id
++ )
++{
++ int result;
++ gctINT next;
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0)
++ idr_preload(GFP_KERNEL | gcdNOWARN);
++
++ spin_lock(&Database->lock);
++
++ next = (Database->curr + 1 <= 0) ? 1 : Database->curr + 1;
++
++ result = idr_alloc(&Database->idr, KernelPointer, next, 0, GFP_ATOMIC);
++
++ /* ID allocated should not be 0. */
++ gcmkASSERT(result != 0);
++
++ if (result > 0)
++ {
++ Database->curr = *Id = result;
++ }
++
++ spin_unlock(&Database->lock);
++
++ idr_preload_end();
++
++ if (result < 0)
++ {
++ return gcvSTATUS_OUT_OF_RESOURCES;
++ }
++#else
++again:
++ if (idr_pre_get(&Database->idr, GFP_KERNEL | gcdNOWARN) == 0)
++ {
++ return gcvSTATUS_OUT_OF_MEMORY;
++ }
++
++ spin_lock(&Database->lock);
++
++ next = (Database->curr + 1 <= 0) ? 1 : Database->curr + 1;
++
++ /* Try to get a id greater than 0. */
++ result = idr_get_new_above(&Database->idr, KernelPointer, next, Id);
++
++ if (!result)
++ {
++ Database->curr = *Id;
++ }
++
++ spin_unlock(&Database->lock);
++
++ if (result == -EAGAIN)
++ {
++ goto again;
++ }
++
++ if (result != 0)
++ {
++ return gcvSTATUS_OUT_OF_RESOURCES;
++ }
++#endif
++
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++_QueryIntegerId(
++ IN gcsINTEGER_DB_PTR Database,
++ IN gctUINT32 Id,
++ OUT gctPOINTER * KernelPointer
++ )
++{
++ gctPOINTER pointer;
++
++ spin_lock(&Database->lock);
++
++ pointer = idr_find(&Database->idr, Id);
++
++ spin_unlock(&Database->lock);
++
++ if(pointer)
++ {
++ *KernelPointer = pointer;
++ return gcvSTATUS_OK;
++ }
++ else
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_OS,
++ "%s(%d) Id = %d is not found",
++ __FUNCTION__, __LINE__, Id);
++
++ return gcvSTATUS_NOT_FOUND;
++ }
++}
++
++gceSTATUS
++_DestroyIntegerId(
++ IN gcsINTEGER_DB_PTR Database,
++ IN gctUINT32 Id
++ )
++{
++ spin_lock(&Database->lock);
++
++ idr_remove(&Database->idr, Id);
++
++ spin_unlock(&Database->lock);
++
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++_QueryProcessPageTable(
++ IN gctPOINTER Logical,
++ OUT gctUINT32 * Address
++ )
++{
++ spinlock_t *lock;
++ gctUINTPTR_T logical = (gctUINTPTR_T)Logical;
++ pgd_t *pgd;
++ pud_t *pud;
++ pmd_t *pmd;
++ pte_t *pte;
++
++ if (!current->mm)
++ {
++ return gcvSTATUS_NOT_FOUND;
++ }
++
++ pgd = pgd_offset(current->mm, logical);
++ if (pgd_none(*pgd) || pgd_bad(*pgd))
++ {
++ return gcvSTATUS_NOT_FOUND;
++ }
++
++ pud = pud_offset(pgd, logical);
++ if (pud_none(*pud) || pud_bad(*pud))
++ {
++ return gcvSTATUS_NOT_FOUND;
++ }
++
++ pmd = pmd_offset(pud, logical);
++ if (pmd_none(*pmd) || pmd_bad(*pmd))
++ {
++ return gcvSTATUS_NOT_FOUND;
++ }
++
++ pte = pte_offset_map_lock(current->mm, pmd, logical, &lock);
++ if (!pte)
++ {
++ return gcvSTATUS_NOT_FOUND;
++ }
++
++ if (!pte_present(*pte))
++ {
++ pte_unmap_unlock(pte, lock);
++ return gcvSTATUS_NOT_FOUND;
++ }
++
++ *Address = (pte_pfn(*pte) << PAGE_SHIFT) | (logical & ~PAGE_MASK);
++ pte_unmap_unlock(pte, lock);
++
++ return gcvSTATUS_OK;
++}
++
++#if !gcdCACHE_FUNCTION_UNIMPLEMENTED && defined(CONFIG_OUTER_CACHE)
++static inline gceSTATUS
++outer_func(
++ gceCACHEOPERATION Type,
++ unsigned long Start,
++ unsigned long End
++ )
++{
++ switch (Type)
++ {
++ case gcvCACHE_CLEAN:
++ outer_clean_range(Start, End);
++ break;
++ case gcvCACHE_INVALIDATE:
++ outer_inv_range(Start, End);
++ break;
++ case gcvCACHE_FLUSH:
++ outer_flush_range(Start, End);
++ break;
++ default:
++ return gcvSTATUS_INVALID_ARGUMENT;
++ break;
++ }
++ return gcvSTATUS_OK;
++}
++
++#if gcdENABLE_OUTER_CACHE_PATCH
++/*******************************************************************************
++** _HandleOuterCache
++**
++** Handle the outer cache for the specified addresses.
++**
++** ARGUMENTS:
++**
++** gckOS Os
++** Pointer to gckOS object.
++**
++** gctPOINTER Physical
++** Physical address to flush.
++**
++** gctPOINTER Logical
++** Logical address to flush.
++**
++** gctSIZE_T Bytes
++** Size of the address range in bytes to flush.
++**
++** gceOUTERCACHE_OPERATION Type
++** Operation need to be execute.
++*/
++gceSTATUS
++_HandleOuterCache(
++ IN gckOS Os,
++ IN gctUINT32 Physical,
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Bytes,
++ IN gceCACHEOPERATION Type
++ )
++{
++ gceSTATUS status;
++ unsigned long paddr;
++ gctPOINTER vaddr;
++ gctUINT32 offset, bytes, left;
++
++ gcmkHEADER_ARG("Os=0x%X Logical=0x%X Bytes=%lu",
++ Os, Logical, Bytes);
++
++ if (Physical != gcvINVALID_ADDRESS)
++ {
++ /* Non paged memory or gcvPOOL_USER surface */
++ paddr = (unsigned long) Physical;
++ gcmkONERROR(outer_func(Type, paddr, paddr + Bytes));
++ }
++ else
++ {
++ /* Non contiguous virtual memory */
++ vaddr = Logical;
++ left = Bytes;
++
++ while (left)
++ {
++ /* Handle (part of) current page. */
++ offset = (gctUINTPTR_T)vaddr & ~PAGE_MASK;
++
++ bytes = gcmMIN(left, PAGE_SIZE - offset);
++
++ gcmkONERROR(_QueryProcessPageTable(vaddr, (gctUINT32*)&paddr));
++ gcmkONERROR(outer_func(Type, paddr, paddr + bytes));
++
++ vaddr = (gctUINT8_PTR)vaddr + bytes;
++ left -= bytes;
++ }
++ }
++
++ mb();
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++#endif
++#endif
++
++gctBOOL
++_AllowAccess(
++ IN gckOS Os,
++ IN gceCORE Core,
++ IN gctUINT32 Address
++ )
++{
++ gctUINT32 data;
++
++ /* Check external clock state. */
++ if (Os->clockStates[Core] == gcvFALSE)
++ {
++ gcmkPRINT("[galcore]: %s(%d) External clock off", __FUNCTION__, __LINE__);
++ return gcvFALSE;
++ }
++
++ /* Check internal clock state. */
++ if (Address == 0)
++ {
++ return gcvTRUE;
++ }
++
++#if gcdMULTI_GPU
++ if (Core == gcvCORE_MAJOR)
++ {
++ data = readl((gctUINT8 *)Os->device->registerBases[gcvCORE_3D_0_ID] + 0x0);
++ }
++ else
++#endif
++ {
++ data = readl((gctUINT8 *)Os->device->registerBases[Core] + 0x0);
++ }
++
++ if ((data & 0x3) == 0x3)
++ {
++ gcmkPRINT("[galcore]: %s(%d) Internal clock off", __FUNCTION__, __LINE__);
++ return gcvFALSE;
++ }
++
++ return gcvTRUE;
++}
++
++static gceSTATUS
++_ShrinkMemory(
++ IN gckOS Os
++ )
++{
++ gcsPLATFORM * platform;
++
++ gcmkHEADER_ARG("Os=0x%X", Os);
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++
++ platform = Os->device->platform;
++
++ if (platform && platform->ops->shrinkMemory)
++ {
++ platform->ops->shrinkMemory(platform);
++ }
++ else
++ {
++ gcmkFOOTER_NO();
++ return gcvSTATUS_NOT_SUPPORTED;
++ }
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_Construct
++**
++** Construct a new gckOS object.
++**
++** INPUT:
++**
++** gctPOINTER Context
++** Pointer to the gckGALDEVICE class.
++**
++** OUTPUT:
++**
++** gckOS * Os
++** Pointer to a variable that will hold the pointer to the gckOS object.
++*/
++gceSTATUS
++gckOS_Construct(
++ IN gctPOINTER Context,
++ OUT gckOS * Os
++ )
++{
++ gckOS os;
++ gceSTATUS status;
++ gctINT i;
++
++ gcmkHEADER_ARG("Context=0x%X", Context);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_ARGUMENT(Os != gcvNULL);
++
++ /* Allocate the gckOS object. */
++ os = (gckOS) kmalloc(gcmSIZEOF(struct _gckOS), GFP_KERNEL | gcdNOWARN);
++
++ if (os == gcvNULL)
++ {
++ /* Out of memory. */
++ gcmkFOOTER_ARG("status=%d", gcvSTATUS_OUT_OF_MEMORY);
++ return gcvSTATUS_OUT_OF_MEMORY;
++ }
++
++ /* Zero the memory. */
++ gckOS_ZeroMemory(os, gcmSIZEOF(struct _gckOS));
++
++ /* Initialize the gckOS object. */
++ os->object.type = gcvOBJ_OS;
++
++ /* Set device device. */
++ os->device = Context;
++
++ /* Set allocateCount to 0, gckOS_Allocate has not been used yet. */
++ atomic_set(&os->allocateCount, 0);
++
++ /* Initialize the memory lock. */
++ gcmkONERROR(gckOS_CreateMutex(os, &os->memoryLock));
++ gcmkONERROR(gckOS_CreateMutex(os, &os->memoryMapLock));
++
++ /* Create debug lock mutex. */
++ gcmkONERROR(gckOS_CreateMutex(os, &os->debugLock));
++
++ os->mdlHead = os->mdlTail = gcvNULL;
++
++ /* Get the kernel process ID. */
++ gcmkONERROR(gckOS_GetProcessID(&os->kernelProcessID));
++
++ /*
++ * Initialize the signal manager.
++ */
++
++ /* Initialize mutex. */
++ gcmkONERROR(gckOS_CreateMutex(os, &os->signalMutex));
++
++ /* Initialize signal id database lock. */
++ spin_lock_init(&os->signalDB.lock);
++
++ /* Initialize signal id database. */
++ idr_init(&os->signalDB.idr);
++
++#if gcdANDROID_NATIVE_FENCE_SYNC
++ /*
++ * Initialize the sync point manager.
++ */
++
++ /* Initialize mutex. */
++ gcmkONERROR(gckOS_CreateMutex(os, &os->syncPointMutex));
++
++ /* Initialize sync point id database lock. */
++ spin_lock_init(&os->syncPointDB.lock);
++
++ /* Initialize sync point id database. */
++ idr_init(&os->syncPointDB.idr);
++#endif
++
++ /* Create a workqueue for os timer. */
++ os->workqueue = create_singlethread_workqueue("galcore workqueue");
++
++ if (os->workqueue == gcvNULL)
++ {
++ /* Out of memory. */
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ os->paddingPage = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | gcdNOWARN);
++ if (os->paddingPage == gcvNULL)
++ {
++ /* Out of memory. */
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++ else
++ {
++ SetPageReserved(os->paddingPage);
++ }
++
++ for (i = 0; i < gcdMAX_GPU_COUNT; i++)
++ {
++ mutex_init(&os->registerAccessLocks[i]);
++ }
++
++ gckOS_ImportAllocators(os);
++
++#ifdef CONFIG_IOMMU_SUPPORT
++ if (((gckGALDEVICE)(os->device))->mmu == gcvFALSE)
++ {
++ /* Only use IOMMU when internal MMU is not enabled. */
++ status = gckIOMMU_Construct(os, &os->iommu);
++
++ if (gcmIS_ERROR(status))
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_OS,
++ "%s(%d): Fail to setup IOMMU",
++ __FUNCTION__, __LINE__
++ );
++ }
++ }
++#endif
++
++ /* Return pointer to the gckOS object. */
++ *Os = os;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Os=0x%X", *Os);
++ return gcvSTATUS_OK;
++
++OnError:
++
++#if gcdANDROID_NATIVE_FENCE_SYNC
++ if (os->syncPointMutex != gcvNULL)
++ {
++ gcmkVERIFY_OK(
++ gckOS_DeleteMutex(os, os->syncPointMutex));
++ }
++#endif
++
++ if (os->signalMutex != gcvNULL)
++ {
++ gcmkVERIFY_OK(
++ gckOS_DeleteMutex(os, os->signalMutex));
++ }
++
++ if (os->memoryMapLock != gcvNULL)
++ {
++ gcmkVERIFY_OK(
++ gckOS_DeleteMutex(os, os->memoryMapLock));
++ }
++
++ if (os->memoryLock != gcvNULL)
++ {
++ gcmkVERIFY_OK(
++ gckOS_DeleteMutex(os, os->memoryLock));
++ }
++
++ if (os->debugLock != gcvNULL)
++ {
++ gcmkVERIFY_OK(
++ gckOS_DeleteMutex(os, os->debugLock));
++ }
++
++ if (os->workqueue != gcvNULL)
++ {
++ destroy_workqueue(os->workqueue);
++ }
++
++ kfree(os);
++
++ /* Return the error. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_Destroy
++**
++** Destroy an gckOS object.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object that needs to be destroyed.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_Destroy(
++ IN gckOS Os
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X", Os);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++
++ if (Os->paddingPage != gcvNULL)
++ {
++ ClearPageReserved(Os->paddingPage);
++ __free_page(Os->paddingPage);
++ Os->paddingPage = gcvNULL;
++ }
++
++#if gcdANDROID_NATIVE_FENCE_SYNC
++ /*
++ * Destroy the sync point manager.
++ */
++
++ /* Destroy the mutex. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Os, Os->syncPointMutex));
++#endif
++
++ /*
++ * Destroy the signal manager.
++ */
++
++ /* Destroy the mutex. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Os, Os->signalMutex));
++
++ /* Destroy the memory lock. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Os, Os->memoryMapLock));
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Os, Os->memoryLock));
++
++ /* Destroy debug lock mutex. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Os, Os->debugLock));
++
++ /* Wait for all works done. */
++ flush_workqueue(Os->workqueue);
++
++ /* Destory work queue. */
++ destroy_workqueue(Os->workqueue);
++
++ gckOS_FreeAllocators(Os);
++
++#ifdef CONFIG_IOMMU_SUPPORT
++ if (Os->iommu)
++ {
++ gckIOMMU_Destory(Os, Os->iommu);
++ }
++#endif
++
++ /* Flush the debug cache. */
++ gcmkDEBUGFLUSH(~0U);
++
++ /* Mark the gckOS object as unknown. */
++ Os->object.type = gcvOBJ_UNKNOWN;
++
++
++ /* Free the gckOS object. */
++ kfree(Os);
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckOS_CreateKernelVirtualMapping(
++ IN gckOS Os,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Bytes,
++ OUT gctPOINTER * Logical,
++ OUT gctSIZE_T * PageCount
++ )
++{
++ gceSTATUS status;
++ PLINUX_MDL mdl = (PLINUX_MDL)Physical;
++ gckALLOCATOR allocator = mdl->allocator;
++
++ gcmkHEADER();
++
++ *PageCount = mdl->numPages;
++
++ gcmkONERROR(allocator->ops->MapKernel(allocator, mdl, Logical));
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckOS_DestroyKernelVirtualMapping(
++ IN gckOS Os,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Bytes,
++ IN gctPOINTER Logical
++ )
++{
++ PLINUX_MDL mdl = (PLINUX_MDL)Physical;
++ gckALLOCATOR allocator = mdl->allocator;
++
++ gcmkHEADER();
++
++ allocator->ops->UnmapKernel(allocator, mdl, Logical);
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckOS_CreateUserVirtualMapping(
++ IN gckOS Os,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Bytes,
++ OUT gctPOINTER * Logical,
++ OUT gctSIZE_T * PageCount
++ )
++{
++ return gckOS_LockPages(Os, Physical, Bytes, gcvFALSE, Logical, PageCount);
++}
++
++gceSTATUS
++gckOS_DestroyUserVirtualMapping(
++ IN gckOS Os,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Bytes,
++ IN gctPOINTER Logical
++ )
++{
++ return gckOS_UnlockPages(Os, Physical, Bytes, Logical);
++}
++
++/*******************************************************************************
++**
++** gckOS_Allocate
++**
++** Allocate memory.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctSIZE_T Bytes
++** Number of bytes to allocate.
++**
++** OUTPUT:
++**
++** gctPOINTER * Memory
++** Pointer to a variable that will hold the allocated memory location.
++*/
++gceSTATUS
++gckOS_Allocate(
++ IN gckOS Os,
++ IN gctSIZE_T Bytes,
++ OUT gctPOINTER * Memory
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Os=0x%X Bytes=%lu", Os, Bytes);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++ gcmkVERIFY_ARGUMENT(Memory != gcvNULL);
++
++ gcmkONERROR(gckOS_AllocateMemory(Os, Bytes, Memory));
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Memory=0x%X", *Memory);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_Free
++**
++** Free allocated memory.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPOINTER Memory
++** Pointer to memory allocation to free.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_Free(
++ IN gckOS Os,
++ IN gctPOINTER Memory
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Os=0x%X Memory=0x%X", Os, Memory);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Memory != gcvNULL);
++
++ gcmkONERROR(gckOS_FreeMemory(Os, Memory));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_AllocateMemory
++**
++** Allocate memory wrapper.
++**
++** INPUT:
++**
++** gctSIZE_T Bytes
++** Number of bytes to allocate.
++**
++** OUTPUT:
++**
++** gctPOINTER * Memory
++** Pointer to a variable that will hold the allocated memory location.
++*/
++gceSTATUS
++gckOS_AllocateMemory(
++ IN gckOS Os,
++ IN gctSIZE_T Bytes,
++ OUT gctPOINTER * Memory
++ )
++{
++ gctPOINTER memory;
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Os=0x%X Bytes=%lu", Os, Bytes);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++ gcmkVERIFY_ARGUMENT(Memory != gcvNULL);
++
++ if (Bytes > PAGE_SIZE)
++ {
++ memory = (gctPOINTER) vmalloc(Bytes);
++ }
++ else
++ {
++ memory = (gctPOINTER) kmalloc(Bytes, GFP_KERNEL | gcdNOWARN);
++ }
++
++ if (memory == gcvNULL)
++ {
++ /* Out of memory. */
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ /* Increase count. */
++ atomic_inc(&Os->allocateCount);
++
++ /* Return pointer to the memory allocation. */
++ *Memory = memory;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Memory=0x%X", *Memory);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_FreeMemory
++**
++** Free allocated memory wrapper.
++**
++** INPUT:
++**
++** gctPOINTER Memory
++** Pointer to memory allocation to free.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_FreeMemory(
++ IN gckOS Os,
++ IN gctPOINTER Memory
++ )
++{
++ gcmkHEADER_ARG("Memory=0x%X", Memory);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_ARGUMENT(Memory != gcvNULL);
++
++ /* Free the memory from the OS pool. */
++ if (is_vmalloc_addr(Memory))
++ {
++ vfree(Memory);
++ }
++ else
++ {
++ kfree(Memory);
++ }
++
++ /* Decrease count. */
++ atomic_dec(&Os->allocateCount);
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_MapMemory
++**
++** Map physical memory into the current process.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPHYS_ADDR Physical
++** Start of physical address memory.
++**
++** gctSIZE_T Bytes
++** Number of bytes to map.
++**
++** OUTPUT:
++**
++** gctPOINTER * Memory
++** Pointer to a variable that will hold the logical address of the
++** mapped memory.
++*/
++gceSTATUS
++gckOS_MapMemory(
++ IN gckOS Os,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Bytes,
++ OUT gctPOINTER * Logical
++ )
++{
++ PLINUX_MDL_MAP mdlMap;
++ PLINUX_MDL mdl = (PLINUX_MDL)Physical;
++
++ gcmkHEADER_ARG("Os=0x%X Physical=0x%X Bytes=%lu", Os, Physical, Bytes);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Physical != 0);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++
++ MEMORY_LOCK(Os);
++
++ mdlMap = FindMdlMap(mdl, _GetProcessID());
++
++ if (mdlMap == gcvNULL)
++ {
++ mdlMap = _CreateMdlMap(mdl, _GetProcessID());
++
++ if (mdlMap == gcvNULL)
++ {
++ MEMORY_UNLOCK(Os);
++
++ gcmkFOOTER_ARG("status=%d", gcvSTATUS_OUT_OF_MEMORY);
++ return gcvSTATUS_OUT_OF_MEMORY;
++ }
++ }
++
++ if (mdlMap->vmaAddr == gcvNULL)
++ {
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)
++ mdlMap->vmaAddr = (char *)vm_mmap(gcvNULL,
++ 0L,
++ mdl->numPages * PAGE_SIZE,
++ PROT_READ | PROT_WRITE,
++ MAP_SHARED,
++ 0);
++#else
++ down_write(&current->mm->mmap_sem);
++
++ mdlMap->vmaAddr = (char *)do_mmap_pgoff(gcvNULL,
++ 0L,
++ mdl->numPages * PAGE_SIZE,
++ PROT_READ | PROT_WRITE,
++ MAP_SHARED,
++ 0);
++
++ up_write(&current->mm->mmap_sem);
++#endif
++
++ if (IS_ERR(mdlMap->vmaAddr))
++ {
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s(%d): do_mmap_pgoff error",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s(%d): mdl->numPages: %d mdl->vmaAddr: 0x%X",
++ __FUNCTION__, __LINE__,
++ mdl->numPages,
++ mdlMap->vmaAddr
++ );
++
++ mdlMap->vmaAddr = gcvNULL;
++
++ MEMORY_UNLOCK(Os);
++
++ gcmkFOOTER_ARG("status=%d", gcvSTATUS_OUT_OF_MEMORY);
++ return gcvSTATUS_OUT_OF_MEMORY;
++ }
++
++ down_write(&current->mm->mmap_sem);
++
++ mdlMap->vma = find_vma(current->mm, (unsigned long)mdlMap->vmaAddr);
++
++ if (!mdlMap->vma)
++ {
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s(%d): find_vma error.",
++ __FUNCTION__, __LINE__
++ );
++
++ mdlMap->vmaAddr = gcvNULL;
++
++ up_write(&current->mm->mmap_sem);
++
++ MEMORY_UNLOCK(Os);
++
++ gcmkFOOTER_ARG("status=%d", gcvSTATUS_OUT_OF_RESOURCES);
++ return gcvSTATUS_OUT_OF_RESOURCES;
++ }
++
++#ifndef NO_DMA_COHERENT
++ if (dma_mmap_writecombine(gcvNULL,
++ mdlMap->vma,
++ mdl->addr,
++ mdl->dmaHandle,
++ mdl->numPages * PAGE_SIZE) < 0)
++ {
++ up_write(&current->mm->mmap_sem);
++
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s(%d): dma_mmap_coherent error.",
++ __FUNCTION__, __LINE__
++ );
++
++ mdlMap->vmaAddr = gcvNULL;
++
++ MEMORY_UNLOCK(Os);
++
++ gcmkFOOTER_ARG("status=%d", gcvSTATUS_OUT_OF_RESOURCES);
++ return gcvSTATUS_OUT_OF_RESOURCES;
++ }
++#else
++#if !gcdPAGED_MEMORY_CACHEABLE
++ mdlMap->vma->vm_page_prot = gcmkPAGED_MEMROY_PROT(mdlMap->vma->vm_page_prot);
++ mdlMap->vma->vm_flags |= gcdVM_FLAGS;
++# endif
++ mdlMap->vma->vm_pgoff = 0;
++
++ if (remap_pfn_range(mdlMap->vma,
++ mdlMap->vma->vm_start,
++ mdl->dmaHandle >> PAGE_SHIFT,
++ mdl->numPages*PAGE_SIZE,
++ mdlMap->vma->vm_page_prot) < 0)
++ {
++ up_write(&current->mm->mmap_sem);
++
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s(%d): remap_pfn_range error.",
++ __FUNCTION__, __LINE__
++ );
++
++ mdlMap->vmaAddr = gcvNULL;
++
++ MEMORY_UNLOCK(Os);
++
++ gcmkFOOTER_ARG("status=%d", gcvSTATUS_OUT_OF_RESOURCES);
++ return gcvSTATUS_OUT_OF_RESOURCES;
++ }
++#endif
++
++ up_write(&current->mm->mmap_sem);
++ }
++
++ MEMORY_UNLOCK(Os);
++
++ *Logical = mdlMap->vmaAddr;
++
++ gcmkFOOTER_ARG("*Logical=0x%X", *Logical);
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_UnmapMemory
++**
++** Unmap physical memory out of the current process.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPHYS_ADDR Physical
++** Start of physical address memory.
++**
++** gctSIZE_T Bytes
++** Number of bytes to unmap.
++**
++** gctPOINTER Memory
++** Pointer to a previously mapped memory region.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_UnmapMemory(
++ IN gckOS Os,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Bytes,
++ IN gctPOINTER Logical
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X Physical=0x%X Bytes=%lu Logical=0x%X",
++ Os, Physical, Bytes, Logical);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Physical != 0);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++
++ gckOS_UnmapMemoryEx(Os, Physical, Bytes, Logical, _GetProcessID());
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++
++/*******************************************************************************
++**
++** gckOS_UnmapMemoryEx
++**
++** Unmap physical memory in the specified process.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPHYS_ADDR Physical
++** Start of physical address memory.
++**
++** gctSIZE_T Bytes
++** Number of bytes to unmap.
++**
++** gctPOINTER Memory
++** Pointer to a previously mapped memory region.
++**
++** gctUINT32 PID
++** Pid of the process that opened the device and mapped this memory.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_UnmapMemoryEx(
++ IN gckOS Os,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Bytes,
++ IN gctPOINTER Logical,
++ IN gctUINT32 PID
++ )
++{
++ PLINUX_MDL_MAP mdlMap;
++ PLINUX_MDL mdl = (PLINUX_MDL)Physical;
++
++ gcmkHEADER_ARG("Os=0x%X Physical=0x%X Bytes=%lu Logical=0x%X PID=%d",
++ Os, Physical, Bytes, Logical, PID);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Physical != 0);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(PID != 0);
++
++ MEMORY_LOCK(Os);
++
++ if (Logical)
++ {
++ mdlMap = FindMdlMap(mdl, PID);
++
++ if (mdlMap == gcvNULL || mdlMap->vmaAddr == gcvNULL)
++ {
++ MEMORY_UNLOCK(Os);
++
++ gcmkFOOTER_ARG("status=%d", gcvSTATUS_INVALID_ARGUMENT);
++ return gcvSTATUS_INVALID_ARGUMENT;
++ }
++
++ _UnmapUserLogical(mdlMap->vmaAddr, mdl->numPages * PAGE_SIZE);
++
++ gcmkVERIFY_OK(_DestroyMdlMap(mdl, mdlMap));
++ }
++
++ MEMORY_UNLOCK(Os);
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_UnmapUserLogical
++**
++** Unmap user logical memory out of physical memory.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPHYS_ADDR Physical
++** Start of physical address memory.
++**
++** gctSIZE_T Bytes
++** Number of bytes to unmap.
++**
++** gctPOINTER Memory
++** Pointer to a previously mapped memory region.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_UnmapUserLogical(
++ IN gckOS Os,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Bytes,
++ IN gctPOINTER Logical
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X Physical=0x%X Bytes=%lu Logical=0x%X",
++ Os, Physical, Bytes, Logical);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Physical != 0);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++
++ gckOS_UnmapMemory(Os, Physical, Bytes, Logical);
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++}
++
++/*******************************************************************************
++**
++** gckOS_AllocateNonPagedMemory
++**
++** Allocate a number of pages from non-paged memory.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctBOOL InUserSpace
++** gcvTRUE if the pages need to be mapped into user space.
++**
++** gctSIZE_T * Bytes
++** Pointer to a variable that holds the number of bytes to allocate.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Bytes
++** Pointer to a variable that hold the number of bytes allocated.
++**
++** gctPHYS_ADDR * Physical
++** Pointer to a variable that will hold the physical address of the
++** allocation.
++**
++** gctPOINTER * Logical
++** Pointer to a variable that will hold the logical address of the
++** allocation.
++*/
++gceSTATUS
++gckOS_AllocateNonPagedMemory(
++ IN gckOS Os,
++ IN gctBOOL InUserSpace,
++ IN OUT gctSIZE_T * Bytes,
++ OUT gctPHYS_ADDR * Physical,
++ OUT gctPOINTER * Logical
++ )
++{
++ gctSIZE_T bytes;
++ gctINT numPages;
++ PLINUX_MDL mdl = gcvNULL;
++ PLINUX_MDL_MAP mdlMap = gcvNULL;
++ gctSTRING addr;
++ gckKERNEL kernel;
++#ifdef NO_DMA_COHERENT
++ struct page * page;
++ long size, order;
++ gctPOINTER vaddr;
++#endif
++ gctBOOL locked = gcvFALSE;
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Os=0x%X InUserSpace=%d *Bytes=%lu",
++ Os, InUserSpace, gcmOPT_VALUE(Bytes));
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Bytes != gcvNULL);
++ gcmkVERIFY_ARGUMENT(*Bytes > 0);
++ gcmkVERIFY_ARGUMENT(Physical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++
++ /* Align number of bytes to page size. */
++ bytes = gcmALIGN(*Bytes, PAGE_SIZE);
++
++ /* Get total number of pages.. */
++ numPages = GetPageCount(bytes, 0);
++
++ /* Allocate mdl+vector structure */
++ mdl = _CreateMdl();
++ if (mdl == gcvNULL)
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ mdl->pagedMem = 0;
++ mdl->numPages = numPages;
++
++ MEMORY_LOCK(Os);
++ locked = gcvTRUE;
++
++#ifndef NO_DMA_COHERENT
++#ifdef CONFIG_ARM64
++ addr = dma_alloc_coherent(gcvNULL,
++#else
++ addr = dma_alloc_writecombine(gcvNULL,
++#endif
++ mdl->numPages * PAGE_SIZE,
++ &mdl->dmaHandle,
++ GFP_KERNEL | gcdNOWARN);
++#else
++ size = mdl->numPages * PAGE_SIZE;
++ order = get_order(size);
++
++ page = alloc_pages(GFP_KERNEL | gcdNOWARN, order);
++
++ if (page == gcvNULL)
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ vaddr = (gctPOINTER)page_address(page);
++ mdl->contiguous = gcvTRUE;
++ mdl->u.contiguousPages = page;
++ addr = _CreateKernelVirtualMapping(mdl);
++ mdl->dmaHandle = virt_to_phys(vaddr);
++ mdl->kaddr = vaddr;
++
++ /* Trigger a page fault. */
++ memset(addr, 0, numPages * PAGE_SIZE);
++
++#if !defined(CONFIG_PPC)
++ /* Cache invalidate. */
++ dma_sync_single_for_device(
++ gcvNULL,
++ page_to_phys(page),
++ bytes,
++ DMA_FROM_DEVICE);
++#endif
++
++ while (size > 0)
++ {
++ SetPageReserved(virt_to_page(vaddr));
++
++ vaddr += PAGE_SIZE;
++ size -= PAGE_SIZE;
++ }
++#endif
++
++ if (addr == gcvNULL)
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ kernel = Os->device->kernels[gcvCORE_MAJOR] != gcvNULL ?
++ Os->device->kernels[gcvCORE_MAJOR] : Os->device->kernels[gcvCORE_2D];
++ if (((Os->device->baseAddress & 0x80000000) != (mdl->dmaHandle & 0x80000000)) &&
++ kernel->hardware->mmuVersion == 0)
++ {
++ mdl->dmaHandle = (mdl->dmaHandle & ~0x80000000)
++ | (Os->device->baseAddress & 0x80000000);
++ }
++
++ mdl->addr = addr;
++
++ if (InUserSpace)
++ {
++ mdlMap = _CreateMdlMap(mdl, _GetProcessID());
++
++ if (mdlMap == gcvNULL)
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ /* Only after mmap this will be valid. */
++
++ /* We need to map this to user space. */
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)
++ mdlMap->vmaAddr = (gctSTRING) vm_mmap(gcvNULL,
++ 0L,
++ mdl->numPages * PAGE_SIZE,
++ PROT_READ | PROT_WRITE,
++ MAP_SHARED,
++ 0);
++#else
++ down_write(&current->mm->mmap_sem);
++
++ mdlMap->vmaAddr = (gctSTRING) do_mmap_pgoff(gcvNULL,
++ 0L,
++ mdl->numPages * PAGE_SIZE,
++ PROT_READ | PROT_WRITE,
++ MAP_SHARED,
++ 0);
++
++ up_write(&current->mm->mmap_sem);
++#endif
++
++ if (IS_ERR(mdlMap->vmaAddr))
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_WARNING, gcvZONE_OS,
++ "%s(%d): do_mmap_pgoff error",
++ __FUNCTION__, __LINE__
++ );
++
++ mdlMap->vmaAddr = gcvNULL;
++
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ down_write(&current->mm->mmap_sem);
++
++ mdlMap->vma = find_vma(current->mm, (unsigned long)mdlMap->vmaAddr);
++
++ if (mdlMap->vma == gcvNULL)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_WARNING, gcvZONE_OS,
++ "%s(%d): find_vma error",
++ __FUNCTION__, __LINE__
++ );
++
++ up_write(&current->mm->mmap_sem);
++
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++
++#ifndef NO_DMA_COHERENT
++ if (dma_mmap_coherent(gcvNULL,
++ mdlMap->vma,
++ mdl->addr,
++ mdl->dmaHandle,
++ mdl->numPages * PAGE_SIZE) < 0)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_WARNING, gcvZONE_OS,
++ "%s(%d): dma_mmap_coherent error",
++ __FUNCTION__, __LINE__
++ );
++
++ up_write(&current->mm->mmap_sem);
++
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++#else
++#if !gcdSECURITY
++ mdlMap->vma->vm_page_prot = gcmkNONPAGED_MEMROY_PROT(mdlMap->vma->vm_page_prot);
++#endif
++ mdlMap->vma->vm_flags |= gcdVM_FLAGS;
++ mdlMap->vma->vm_pgoff = 0;
++
++ if (remap_pfn_range(mdlMap->vma,
++ mdlMap->vma->vm_start,
++ mdl->dmaHandle >> PAGE_SHIFT,
++ mdl->numPages * PAGE_SIZE,
++ mdlMap->vma->vm_page_prot))
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_WARNING, gcvZONE_OS,
++ "%s(%d): remap_pfn_range error",
++ __FUNCTION__, __LINE__
++ );
++
++ up_write(&current->mm->mmap_sem);
++
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++#endif /* NO_DMA_COHERENT */
++
++ up_write(&current->mm->mmap_sem);
++
++ *Logical = mdlMap->vmaAddr;
++ }
++ else
++ {
++#if gcdSECURITY
++ *Logical = (gctPOINTER)mdl->kaddr;
++#else
++ *Logical = (gctPOINTER)mdl->addr;
++#endif
++ }
++
++ /*
++ * Add this to a global list.
++ * Will be used by get physical address
++ * and mapuser pointer functions.
++ */
++
++ if (!Os->mdlHead)
++ {
++ /* Initialize the queue. */
++ Os->mdlHead = Os->mdlTail = mdl;
++ }
++ else
++ {
++ /* Add to the tail. */
++ mdl->prev = Os->mdlTail;
++ Os->mdlTail->next = mdl;
++ Os->mdlTail = mdl;
++ }
++
++ MEMORY_UNLOCK(Os);
++
++ /* Return allocated memory. */
++ *Bytes = bytes;
++ *Physical = (gctPHYS_ADDR) mdl;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Bytes=%lu *Physical=0x%X *Logical=0x%X",
++ *Bytes, *Physical, *Logical);
++ return gcvSTATUS_OK;
++
++OnError:
++ if (mdlMap != gcvNULL)
++ {
++ /* Free LINUX_MDL_MAP. */
++ gcmkVERIFY_OK(_DestroyMdlMap(mdl, mdlMap));
++ }
++
++ if (mdl != gcvNULL)
++ {
++ /* Free LINUX_MDL. */
++ gcmkVERIFY_OK(_DestroyMdl(mdl));
++ }
++ *Physical = gcvNULL;
++ *Bytes = 0;
++
++ if (locked)
++ {
++ /* Unlock memory. */
++ MEMORY_UNLOCK(Os);
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_FreeNonPagedMemory
++**
++** Free previously allocated and mapped pages from non-paged memory.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctSIZE_T Bytes
++** Number of bytes allocated.
++**
++** gctPHYS_ADDR Physical
++** Physical address of the allocated memory.
++**
++** gctPOINTER Logical
++** Logical address of the allocated memory.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS gckOS_FreeNonPagedMemory(
++ IN gckOS Os,
++ IN gctSIZE_T Bytes,
++ IN gctPHYS_ADDR Physical,
++ IN gctPOINTER Logical
++ )
++{
++ PLINUX_MDL mdl;
++ PLINUX_MDL_MAP mdlMap;
++#ifdef NO_DMA_COHERENT
++ unsigned size;
++ gctPOINTER vaddr;
++#endif /* NO_DMA_COHERENT */
++
++ gcmkHEADER_ARG("Os=0x%X Bytes=%lu Physical=0x%X Logical=0x%X",
++ Os, Bytes, Physical, Logical);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++ gcmkVERIFY_ARGUMENT(Physical != 0);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++
++ /* Convert physical address into a pointer to a MDL. */
++ mdl = (PLINUX_MDL) Physical;
++
++ MEMORY_LOCK(Os);
++
++#ifndef NO_DMA_COHERENT
++#ifdef CONFIG_ARM64
++ dma_free_coherent(gcvNULL,
++#else
++ dma_free_writecombine(gcvNULL,
++#endif
++ mdl->numPages * PAGE_SIZE,
++ mdl->addr,
++ mdl->dmaHandle);
++#else
++ size = mdl->numPages * PAGE_SIZE;
++ vaddr = mdl->kaddr;
++
++ while (size > 0)
++ {
++ ClearPageReserved(virt_to_page(vaddr));
++
++ vaddr += PAGE_SIZE;
++ size -= PAGE_SIZE;
++ }
++
++ free_pages((unsigned long)mdl->kaddr, get_order(mdl->numPages * PAGE_SIZE));
++
++ _DestoryKernelVirtualMapping(mdl->addr);
++#endif /* NO_DMA_COHERENT */
++
++ mdlMap = mdl->maps;
++
++ while (mdlMap != gcvNULL)
++ {
++ /* No mapped memory exists when free nonpaged memory */
++ gcmkASSERT(mdlMap->vmaAddr == gcvNULL);
++
++ mdlMap = mdlMap->next;
++ }
++
++ /* Remove the node from global list.. */
++ if (mdl == Os->mdlHead)
++ {
++ if ((Os->mdlHead = mdl->next) == gcvNULL)
++ {
++ Os->mdlTail = gcvNULL;
++ }
++ }
++ else
++ {
++ mdl->prev->next = mdl->next;
++ if (mdl == Os->mdlTail)
++ {
++ Os->mdlTail = mdl->prev;
++ }
++ else
++ {
++ mdl->next->prev = mdl->prev;
++ }
++ }
++
++ MEMORY_UNLOCK(Os);
++
++ gcmkVERIFY_OK(_DestroyMdl(mdl));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_ReadRegister
++**
++** Read data from a register.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctUINT32 Address
++** Address of register.
++**
++** OUTPUT:
++**
++** gctUINT32 * Data
++** Pointer to a variable that receives the data read from the register.
++*/
++gceSTATUS
++gckOS_ReadRegister(
++ IN gckOS Os,
++ IN gctUINT32 Address,
++ OUT gctUINT32 * Data
++ )
++{
++ return gckOS_ReadRegisterEx(Os, gcvCORE_MAJOR, Address, Data);
++}
++
++gceSTATUS
++gckOS_ReadRegisterEx(
++ IN gckOS Os,
++ IN gceCORE Core,
++ IN gctUINT32 Address,
++ OUT gctUINT32 * Data
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X Core=%d Address=0x%X", Os, Core, Address);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++#if !gcdMULTI_GPU
++ gcmkVERIFY_ARGUMENT(Address < Os->device->requestedRegisterMemSizes[Core]);
++#endif
++ gcmkVERIFY_ARGUMENT(Data != gcvNULL);
++
++ if (!in_interrupt())
++ {
++ mutex_lock(&Os->registerAccessLocks[Core]);
++ }
++
++ BUG_ON(!_AllowAccess(Os, Core, Address));
++
++#if gcdMULTI_GPU
++ if (Core == gcvCORE_MAJOR)
++ {
++ *Data = readl((gctUINT8 *)Os->device->registerBase3D[gcvCORE_3D_0_ID] + Address);
++ }
++ else
++#endif
++ {
++ *Data = readl((gctUINT8 *)Os->device->registerBases[Core] + Address);
++ }
++
++ if (!in_interrupt())
++ {
++ mutex_unlock(&Os->registerAccessLocks[Core]);
++ }
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Data=0x%08x", *Data);
++ return gcvSTATUS_OK;
++}
++
++#if gcdMULTI_GPU
++gceSTATUS
++gckOS_ReadRegisterByCoreId(
++ IN gckOS Os,
++ IN gceCORE Core,
++ IN gctUINT32 CoreId,
++ IN gctUINT32 Address,
++ OUT gctUINT32 * Data
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X Core=%d CoreId=%d Address=0x%X",
++ Os, Core, CoreId, Address);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Data != gcvNULL);
++
++ *Data = readl((gctUINT8 *)Os->device->registerBase3D[CoreId] + Address);
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Data=0x%08x", *Data);
++ return gcvSTATUS_OK;
++}
++#endif
++
++/*******************************************************************************
++**
++** gckOS_WriteRegister
++**
++** Write data to a register.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctUINT32 Address
++** Address of register.
++**
++** gctUINT32 Data
++** Data for register.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_WriteRegister(
++ IN gckOS Os,
++ IN gctUINT32 Address,
++ IN gctUINT32 Data
++ )
++{
++ return gckOS_WriteRegisterEx(Os, gcvCORE_MAJOR, Address, Data);
++}
++
++gceSTATUS
++gckOS_WriteRegisterEx(
++ IN gckOS Os,
++ IN gceCORE Core,
++ IN gctUINT32 Address,
++ IN gctUINT32 Data
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X Core=%d Address=0x%X Data=0x%08x", Os, Core, Address, Data);
++
++#if !gcdMULTI_GPU
++ gcmkVERIFY_ARGUMENT(Address < Os->device->requestedRegisterMemSizes[Core]);
++#endif
++
++ if (!in_interrupt())
++ {
++ mutex_lock(&Os->registerAccessLocks[Core]);
++ }
++
++ BUG_ON(!_AllowAccess(Os, Core, Address));
++
++#if gcdMULTI_GPU
++ if (Core == gcvCORE_MAJOR)
++ {
++ writel(Data, (gctUINT8 *)Os->device->registerBase3D[gcvCORE_3D_0_ID] + Address);
++#if gcdMULTI_GPU > 1
++ writel(Data, (gctUINT8 *)Os->device->registerBase3D[gcvCORE_3D_1_ID] + Address);
++#endif
++ }
++ else
++#endif
++ {
++ writel(Data, (gctUINT8 *)Os->device->registerBases[Core] + Address);
++ }
++
++ if (!in_interrupt())
++ {
++ mutex_unlock(&Os->registerAccessLocks[Core]);
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++#if gcdMULTI_GPU
++gceSTATUS
++gckOS_WriteRegisterByCoreId(
++ IN gckOS Os,
++ IN gceCORE Core,
++ IN gctUINT32 CoreId,
++ IN gctUINT32 Address,
++ IN gctUINT32 Data
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X Core=%d CoreId=%d Address=0x%X Data=0x%08x",
++ Os, Core, CoreId, Address, Data);
++
++ writel(Data, (gctUINT8 *)Os->device->registerBase3D[CoreId] + Address);
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++#endif
++
++/*******************************************************************************
++**
++** gckOS_GetPageSize
++**
++** Get the system's page size.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** OUTPUT:
++**
++** gctSIZE_T * PageSize
++** Pointer to a variable that will receive the system's page size.
++*/
++gceSTATUS gckOS_GetPageSize(
++ IN gckOS Os,
++ OUT gctSIZE_T * PageSize
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X", Os);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(PageSize != gcvNULL);
++
++ /* Return the page size. */
++ *PageSize = (gctSIZE_T) PAGE_SIZE;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*PageSize", *PageSize);
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_GetPhysicalAddress
++**
++** Get the physical system address of a corresponding virtual address.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPOINTER Logical
++** Logical address.
++**
++** OUTPUT:
++**
++** gctUINT32 * Address
++** Poinetr to a variable that receives the 32-bit physical adress.
++*/
++gceSTATUS
++gckOS_GetPhysicalAddress(
++ IN gckOS Os,
++ IN gctPOINTER Logical,
++ OUT gctUINT32 * Address
++ )
++{
++ gceSTATUS status;
++ gctUINT32 processID;
++
++ gcmkHEADER_ARG("Os=0x%X Logical=0x%X", Os, Logical);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Address != gcvNULL);
++
++ /* Query page table of current process first. */
++ status = _QueryProcessPageTable(Logical, Address);
++
++ if (gcmIS_ERROR(status))
++ {
++ /* Get current process ID. */
++ processID = _GetProcessID();
++
++ /* Route through other function. */
++ gcmkONERROR(
++ gckOS_GetPhysicalAddressProcess(Os, Logical, processID, Address));
++ }
++
++ gcmkVERIFY_OK(gckOS_CPUPhysicalToGPUPhysical(Os, *Address, Address));
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Address=0x%08x", *Address);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_UserLogicalToPhysical
++**
++** Get the physical system address of a corresponding user virtual address.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPOINTER Logical
++** Logical address.
++**
++** OUTPUT:
++**
++** gctUINT32 * Address
++** Pointer to a variable that receives the 32-bit physical address.
++*/
++gceSTATUS gckOS_UserLogicalToPhysical(
++ IN gckOS Os,
++ IN gctPOINTER Logical,
++ OUT gctUINT32 * Address
++ )
++{
++ return gckOS_GetPhysicalAddress(Os, Logical, Address);
++}
++
++#if gcdSECURE_USER
++static gceSTATUS
++gckOS_AddMapping(
++ IN gckOS Os,
++ IN gctUINT32 Physical,
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Bytes
++ )
++{
++ gceSTATUS status;
++ gcsUSER_MAPPING_PTR map;
++
++ gcmkHEADER_ARG("Os=0x%X Physical=0x%X Logical=0x%X Bytes=%lu",
++ Os, Physical, Logical, Bytes);
++
++ gcmkONERROR(gckOS_Allocate(Os,
++ gcmSIZEOF(gcsUSER_MAPPING),
++ (gctPOINTER *) &map));
++
++ map->next = Os->userMap;
++ map->physical = Physical - Os->device->baseAddress;
++ map->logical = Logical;
++ map->bytes = Bytes;
++ map->start = (gctINT8_PTR) Logical;
++ map->end = map->start + Bytes;
++
++ Os->userMap = map;
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++static gceSTATUS
++gckOS_RemoveMapping(
++ IN gckOS Os,
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Bytes
++ )
++{
++ gceSTATUS status;
++ gcsUSER_MAPPING_PTR map, prev;
++
++ gcmkHEADER_ARG("Os=0x%X Logical=0x%X Bytes=%lu", Os, Logical, Bytes);
++
++ for (map = Os->userMap, prev = gcvNULL; map != gcvNULL; map = map->next)
++ {
++ if ((map->logical == Logical)
++ && (map->bytes == Bytes)
++ )
++ {
++ break;
++ }
++
++ prev = map;
++ }
++
++ if (map == gcvNULL)
++ {
++ gcmkONERROR(gcvSTATUS_INVALID_ADDRESS);
++ }
++
++ if (prev == gcvNULL)
++ {
++ Os->userMap = map->next;
++ }
++ else
++ {
++ prev->next = map->next;
++ }
++
++ gcmkONERROR(gcmkOS_SAFE_FREE(Os, map));
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++#endif
++
++gceSTATUS
++_ConvertLogical2Physical(
++ IN gckOS Os,
++ IN gctPOINTER Logical,
++ IN gctUINT32 ProcessID,
++ IN PLINUX_MDL Mdl,
++ OUT gctUINT32_PTR Physical
++ )
++{
++ gctINT8_PTR base, vBase;
++ gctUINT32 offset;
++ PLINUX_MDL_MAP map;
++ gcsUSER_MAPPING_PTR userMap;
++
++#if gcdSECURITY
++ base = (Mdl == gcvNULL) ? gcvNULL : (gctINT8_PTR) Mdl->kaddr;
++#else
++ base = (Mdl == gcvNULL) ? gcvNULL : (gctINT8_PTR) Mdl->addr;
++#endif
++
++ /* Check for the logical address match. */
++ if ((base != gcvNULL)
++ && ((gctINT8_PTR) Logical >= base)
++ && ((gctINT8_PTR) Logical < base + Mdl->numPages * PAGE_SIZE)
++ )
++ {
++ offset = (gctINT8_PTR) Logical - base;
++
++ if (Mdl->dmaHandle != 0)
++ {
++ /* The memory was from coherent area. */
++ *Physical = (gctUINT32) Mdl->dmaHandle + offset;
++ }
++ else if (Mdl->pagedMem && !Mdl->contiguous)
++ {
++ /* paged memory is not mapped to kernel space. */
++ return gcvSTATUS_INVALID_ADDRESS;
++ }
++ else
++ {
++ *Physical = gcmPTR2INT32(virt_to_phys(base)) + offset;
++ }
++
++ return gcvSTATUS_OK;
++ }
++
++ /* Walk user maps. */
++ for (userMap = Os->userMap; userMap != gcvNULL; userMap = userMap->next)
++ {
++ if (((gctINT8_PTR) Logical >= userMap->start)
++ && ((gctINT8_PTR) Logical < userMap->end)
++ )
++ {
++ *Physical = userMap->physical
++ + (gctUINT32) ((gctINT8_PTR) Logical - userMap->start);
++
++ return gcvSTATUS_OK;
++ }
++ }
++
++ if (ProcessID != Os->kernelProcessID)
++ {
++ map = FindMdlMap(Mdl, (gctINT) ProcessID);
++ vBase = (map == gcvNULL) ? gcvNULL : (gctINT8_PTR) map->vmaAddr;
++
++ /* Is the given address within that range. */
++ if ((vBase != gcvNULL)
++ && ((gctINT8_PTR) Logical >= vBase)
++ && ((gctINT8_PTR) Logical < vBase + Mdl->numPages * PAGE_SIZE)
++ )
++ {
++ offset = (gctINT8_PTR) Logical - vBase;
++
++ if (Mdl->dmaHandle != 0)
++ {
++ /* The memory was from coherent area. */
++ *Physical = (gctUINT32) Mdl->dmaHandle + offset;
++ }
++ else if (Mdl->pagedMem && !Mdl->contiguous)
++ {
++ *Physical = _NonContiguousToPhys(Mdl->u.nonContiguousPages, offset/PAGE_SIZE);
++ }
++ else
++ {
++ *Physical = page_to_phys(Mdl->u.contiguousPages) + offset;
++ }
++
++ return gcvSTATUS_OK;
++ }
++ }
++
++ /* Address not yet found. */
++ return gcvSTATUS_INVALID_ADDRESS;
++}
++
++/*******************************************************************************
++**
++** gckOS_GetPhysicalAddressProcess
++**
++** Get the physical system address of a corresponding virtual address for a
++** given process.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to gckOS object.
++**
++** gctPOINTER Logical
++** Logical address.
++**
++** gctUINT32 ProcessID
++** Process ID.
++**
++** OUTPUT:
++**
++** gctUINT32 * Address
++** Poinetr to a variable that receives the 32-bit physical adress.
++*/
++gceSTATUS
++gckOS_GetPhysicalAddressProcess(
++ IN gckOS Os,
++ IN gctPOINTER Logical,
++ IN gctUINT32 ProcessID,
++ OUT gctUINT32 * Address
++ )
++{
++ PLINUX_MDL mdl;
++ gctINT8_PTR base;
++ gckALLOCATOR allocator = gcvNULL;
++ gceSTATUS status = gcvSTATUS_INVALID_ADDRESS;
++
++ gcmkHEADER_ARG("Os=0x%X Logical=0x%X ProcessID=%d", Os, Logical, ProcessID);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Address != gcvNULL);
++
++ MEMORY_LOCK(Os);
++
++ /* First try the contiguous memory pool. */
++ if (Os->device->contiguousMapped)
++ {
++ base = (gctINT8_PTR) Os->device->contiguousBase;
++
++ if (((gctINT8_PTR) Logical >= base)
++ && ((gctINT8_PTR) Logical < base + Os->device->contiguousSize)
++ )
++ {
++ /* Convert logical address into physical. */
++ *Address = Os->device->contiguousVidMem->baseAddress
++ + (gctINT8_PTR) Logical - base;
++ status = gcvSTATUS_OK;
++ }
++ }
++ else
++ {
++ /* Try the contiguous memory pool. */
++ mdl = (PLINUX_MDL) Os->device->contiguousPhysical;
++ status = _ConvertLogical2Physical(Os,
++ Logical,
++ ProcessID,
++ mdl,
++ Address);
++ }
++
++ if (gcmIS_ERROR(status))
++ {
++ /* Walk all MDLs. */
++ for (mdl = Os->mdlHead; mdl != gcvNULL; mdl = mdl->next)
++ {
++ /* Try this MDL. */
++ allocator = mdl->allocator;
++
++ if (allocator)
++ {
++ status = allocator->ops->LogicalToPhysical(
++ allocator,
++ mdl,
++ Logical,
++ ProcessID,
++ Address
++ );
++ }
++ else
++ {
++ status = _ConvertLogical2Physical(Os,
++ Logical,
++ ProcessID,
++ mdl,
++ Address);
++ }
++
++ if (gcmIS_SUCCESS(status))
++ {
++ break;
++ }
++ }
++ }
++
++ MEMORY_UNLOCK(Os);
++
++ gcmkONERROR(status);
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Address=0x%08x", *Address);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_MapPhysical
++**
++** Map a physical address into kernel space.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctUINT32 Physical
++** Physical address of the memory to map.
++**
++** gctSIZE_T Bytes
++** Number of bytes to map.
++**
++** OUTPUT:
++**
++** gctPOINTER * Logical
++** Pointer to a variable that receives the base address of the mapped
++** memory.
++*/
++gceSTATUS
++gckOS_MapPhysical(
++ IN gckOS Os,
++ IN gctUINT32 Physical,
++ IN gctSIZE_T Bytes,
++ OUT gctPOINTER * Logical
++ )
++{
++ gctPOINTER logical;
++ PLINUX_MDL mdl;
++ gctUINT32 physical = Physical;
++
++ gcmkHEADER_ARG("Os=0x%X Physical=0x%X Bytes=%lu", Os, Physical, Bytes);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++
++ MEMORY_LOCK(Os);
++
++ /* Go through our mapping to see if we know this physical address already. */
++ mdl = Os->mdlHead;
++
++ while (mdl != gcvNULL)
++ {
++ if (mdl->dmaHandle != 0)
++ {
++ if ((physical >= mdl->dmaHandle)
++ && (physical < mdl->dmaHandle + mdl->numPages * PAGE_SIZE)
++ )
++ {
++ *Logical = mdl->addr + (physical - mdl->dmaHandle);
++ break;
++ }
++ }
++
++ mdl = mdl->next;
++ }
++
++ MEMORY_UNLOCK(Os);
++
++ if (mdl == gcvNULL)
++ {
++ struct page * page = pfn_to_page(physical >> PAGE_SHIFT);
++
++ if (pfn_valid(page_to_pfn(page)))
++ {
++ gctUINT32 offset = physical & ~PAGE_MASK;
++ struct page ** pages;
++ gctUINT numPages;
++ gctINT i;
++
++ numPages = GetPageCount(PAGE_ALIGN(offset + Bytes), 0);
++
++ pages = kmalloc(sizeof(struct page *) * numPages, GFP_KERNEL | gcdNOWARN);
++
++ if (!pages)
++ {
++ gcmkFOOTER_ARG("status=%d", gcvSTATUS_OUT_OF_MEMORY);
++ return gcvSTATUS_OUT_OF_MEMORY;
++ }
++
++ for (i = 0; i < numPages; i++)
++ {
++ pages[i] = nth_page(page, i);
++ }
++
++ logical = vmap(pages, numPages, 0, gcmkNONPAGED_MEMROY_PROT(PAGE_KERNEL));
++
++ kfree(pages);
++
++ if (logical == gcvNULL)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_OS,
++ "%s(%d): Failed to vmap",
++ __FUNCTION__, __LINE__
++ );
++
++ /* Out of resources. */
++ gcmkFOOTER_ARG("status=%d", gcvSTATUS_OUT_OF_RESOURCES);
++ return gcvSTATUS_OUT_OF_RESOURCES;
++ }
++
++ logical += offset;
++ }
++ else
++ {
++ /* Map memory as cached memory. */
++ request_mem_region(physical, Bytes, "MapRegion");
++ logical = (gctPOINTER) ioremap_nocache(physical, Bytes);
++
++ if (logical == gcvNULL)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_OS,
++ "%s(%d): Failed to ioremap",
++ __FUNCTION__, __LINE__
++ );
++
++ /* Out of resources. */
++ gcmkFOOTER_ARG("status=%d", gcvSTATUS_OUT_OF_RESOURCES);
++ return gcvSTATUS_OUT_OF_RESOURCES;
++ }
++ }
++
++ /* Return pointer to mapped memory. */
++ *Logical = logical;
++ }
++
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Logical=0x%X", *Logical);
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_UnmapPhysical
++**
++** Unmap a previously mapped memory region from kernel memory.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPOINTER Logical
++** Pointer to the base address of the memory to unmap.
++**
++** gctSIZE_T Bytes
++** Number of bytes to unmap.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_UnmapPhysical(
++ IN gckOS Os,
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Bytes
++ )
++{
++ PLINUX_MDL mdl;
++
++ gcmkHEADER_ARG("Os=0x%X Logical=0x%X Bytes=%lu", Os, Logical, Bytes);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++
++ MEMORY_LOCK(Os);
++
++ mdl = Os->mdlHead;
++
++ while (mdl != gcvNULL)
++ {
++ if (mdl->addr != gcvNULL)
++ {
++ if (Logical >= (gctPOINTER)mdl->addr
++ && Logical < (gctPOINTER)((gctSTRING)mdl->addr + mdl->numPages * PAGE_SIZE))
++ {
++ break;
++ }
++ }
++
++ mdl = mdl->next;
++ }
++
++ if (mdl == gcvNULL)
++ {
++ /* Unmap the memory. */
++ vunmap((void *)((unsigned long)Logical & PAGE_MASK));
++ }
++
++ MEMORY_UNLOCK(Os);
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_CreateMutex
++**
++** Create a new mutex.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** OUTPUT:
++**
++** gctPOINTER * Mutex
++** Pointer to a variable that will hold a pointer to the mutex.
++*/
++gceSTATUS
++gckOS_CreateMutex(
++ IN gckOS Os,
++ OUT gctPOINTER * Mutex
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Os=0x%X", Os);
++
++ /* Validate the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Mutex != gcvNULL);
++
++ /* Allocate the mutex structure. */
++ gcmkONERROR(gckOS_Allocate(Os, gcmSIZEOF(struct mutex), Mutex));
++
++ /* Initialize the mutex. */
++ mutex_init(*Mutex);
++
++ /* Return status. */
++ gcmkFOOTER_ARG("*Mutex=0x%X", *Mutex);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_DeleteMutex
++**
++** Delete a mutex.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPOINTER Mutex
++** Pointer to the mute to be deleted.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_DeleteMutex(
++ IN gckOS Os,
++ IN gctPOINTER Mutex
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Os=0x%X Mutex=0x%X", Os, Mutex);
++
++ /* Validate the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Mutex != gcvNULL);
++
++ /* Destroy the mutex. */
++ mutex_destroy((struct mutex *)Mutex);
++
++ /* Free the mutex structure. */
++ gcmkONERROR(gckOS_Free(Os, Mutex));
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_AcquireMutex
++**
++** Acquire a mutex.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPOINTER Mutex
++** Pointer to the mutex to be acquired.
++**
++** gctUINT32 Timeout
++** Timeout value specified in milliseconds.
++** Specify the value of gcvINFINITE to keep the thread suspended
++** until the mutex has been acquired.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_AcquireMutex(
++ IN gckOS Os,
++ IN gctPOINTER Mutex,
++ IN gctUINT32 Timeout
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X Mutex=0x%0x Timeout=%u", Os, Mutex, Timeout);
++
++ /* Validate the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Mutex != gcvNULL);
++
++ if (Timeout == gcvINFINITE)
++ {
++ /* Lock the mutex. */
++ mutex_lock(Mutex);
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++
++ for (;;)
++ {
++ /* Try to acquire the mutex. */
++ if (mutex_trylock(Mutex))
++ {
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++
++ if (Timeout-- == 0)
++ {
++ break;
++ }
++
++ /* Wait for 1 millisecond. */
++ gcmkVERIFY_OK(gckOS_Delay(Os, 1));
++ }
++
++ /* Timeout. */
++ gcmkFOOTER_ARG("status=%d", gcvSTATUS_TIMEOUT);
++ return gcvSTATUS_TIMEOUT;
++}
++
++/*******************************************************************************
++**
++** gckOS_ReleaseMutex
++**
++** Release an acquired mutex.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPOINTER Mutex
++** Pointer to the mutex to be released.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_ReleaseMutex(
++ IN gckOS Os,
++ IN gctPOINTER Mutex
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X Mutex=0x%0x", Os, Mutex);
++
++ /* Validate the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Mutex != gcvNULL);
++
++ /* Release the mutex. */
++ mutex_unlock(Mutex);
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_AtomicExchange
++**
++** Atomically exchange a pair of 32-bit values.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** IN OUT gctINT32_PTR Target
++** Pointer to the 32-bit value to exchange.
++**
++** IN gctINT32 NewValue
++** Specifies a new value for the 32-bit value pointed to by Target.
++**
++** OUT gctINT32_PTR OldValue
++** The old value of the 32-bit value pointed to by Target.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_AtomicExchange(
++ IN gckOS Os,
++ IN OUT gctUINT32_PTR Target,
++ IN gctUINT32 NewValue,
++ OUT gctUINT32_PTR OldValue
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X Target=0x%X NewValue=%u", Os, Target, NewValue);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(OldValue != gcvNULL);
++
++ /* Exchange the pair of 32-bit values. */
++ *OldValue = (gctUINT32) atomic_xchg((atomic_t *) Target, (int) NewValue);
++
++ /* Success. */
++ gcmkFOOTER_ARG("*OldValue=%u", *OldValue);
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_AtomicExchangePtr
++**
++** Atomically exchange a pair of pointers.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** IN OUT gctPOINTER * Target
++** Pointer to the 32-bit value to exchange.
++**
++** IN gctPOINTER NewValue
++** Specifies a new value for the pointer pointed to by Target.
++**
++** OUT gctPOINTER * OldValue
++** The old value of the pointer pointed to by Target.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_AtomicExchangePtr(
++ IN gckOS Os,
++ IN OUT gctPOINTER * Target,
++ IN gctPOINTER NewValue,
++ OUT gctPOINTER * OldValue
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X Target=0x%X NewValue=0x%X", Os, Target, NewValue);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(OldValue != gcvNULL);
++
++ /* Exchange the pair of pointers. */
++ *OldValue = (gctPOINTER)(gctUINTPTR_T) atomic_xchg((atomic_t *) Target, (int)(gctUINTPTR_T) NewValue);
++
++ /* Success. */
++ gcmkFOOTER_ARG("*OldValue=0x%X", *OldValue);
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_AtomicSetMask
++**
++** Atomically set mask to Atom
++**
++** INPUT:
++** IN OUT gctPOINTER Atom
++** Pointer to the atom to set.
++**
++** IN gctUINT32 Mask
++** Mask to set.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_AtomSetMask(
++ IN gctPOINTER Atom,
++ IN gctUINT32 Mask
++ )
++{
++ gctUINT32 oval, nval;
++
++ gcmkHEADER_ARG("Atom=0x%0x", Atom);
++ gcmkVERIFY_ARGUMENT(Atom != gcvNULL);
++
++ do
++ {
++ oval = atomic_read((atomic_t *) Atom);
++ nval = oval | Mask;
++ } while (atomic_cmpxchg((atomic_t *) Atom, oval, nval) != oval);
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_AtomClearMask
++**
++** Atomically clear mask from Atom
++**
++** INPUT:
++** IN OUT gctPOINTER Atom
++** Pointer to the atom to clear.
++**
++** IN gctUINT32 Mask
++** Mask to clear.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_AtomClearMask(
++ IN gctPOINTER Atom,
++ IN gctUINT32 Mask
++ )
++{
++ gctUINT32 oval, nval;
++
++ gcmkHEADER_ARG("Atom=0x%0x", Atom);
++ gcmkVERIFY_ARGUMENT(Atom != gcvNULL);
++
++ do
++ {
++ oval = atomic_read((atomic_t *) Atom);
++ nval = oval & ~Mask;
++ } while (atomic_cmpxchg((atomic_t *) Atom, oval, nval) != oval);
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_AtomConstruct
++**
++** Create an atom.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to a gckOS object.
++**
++** OUTPUT:
++**
++** gctPOINTER * Atom
++** Pointer to a variable receiving the constructed atom.
++*/
++gceSTATUS
++gckOS_AtomConstruct(
++ IN gckOS Os,
++ OUT gctPOINTER * Atom
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Os=0x%X", Os);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Atom != gcvNULL);
++
++ /* Allocate the atom. */
++ gcmkONERROR(gckOS_Allocate(Os, gcmSIZEOF(atomic_t), Atom));
++
++ /* Initialize the atom. */
++ atomic_set((atomic_t *) *Atom, 0);
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Atom=0x%X", *Atom);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_AtomDestroy
++**
++** Destroy an atom.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to a gckOS object.
++**
++** gctPOINTER Atom
++** Pointer to the atom to destroy.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_AtomDestroy(
++ IN gckOS Os,
++ OUT gctPOINTER Atom
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Os=0x%X Atom=0x%0x", Os, Atom);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Atom != gcvNULL);
++
++ /* Free the atom. */
++ gcmkONERROR(gcmkOS_SAFE_FREE(Os, Atom));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_AtomGet
++**
++** Get the 32-bit value protected by an atom.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to a gckOS object.
++**
++** gctPOINTER Atom
++** Pointer to the atom.
++**
++** OUTPUT:
++**
++** gctINT32_PTR Value
++** Pointer to a variable the receives the value of the atom.
++*/
++gceSTATUS
++gckOS_AtomGet(
++ IN gckOS Os,
++ IN gctPOINTER Atom,
++ OUT gctINT32_PTR Value
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X Atom=0x%0x", Os, Atom);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Atom != gcvNULL);
++
++ /* Return the current value of atom. */
++ *Value = atomic_read((atomic_t *) Atom);
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Value=%d", *Value);
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_AtomSet
++**
++** Set the 32-bit value protected by an atom.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to a gckOS object.
++**
++** gctPOINTER Atom
++** Pointer to the atom.
++**
++** gctINT32 Value
++** The value of the atom.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_AtomSet(
++ IN gckOS Os,
++ IN gctPOINTER Atom,
++ IN gctINT32 Value
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X Atom=0x%0x Value=%d", Os, Atom);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Atom != gcvNULL);
++
++ /* Set the current value of atom. */
++ atomic_set((atomic_t *) Atom, Value);
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_AtomIncrement
++**
++** Atomically increment the 32-bit integer value inside an atom.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to a gckOS object.
++**
++** gctPOINTER Atom
++** Pointer to the atom.
++**
++** OUTPUT:
++**
++** gctINT32_PTR Value
++** Pointer to a variable that receives the original value of the atom.
++*/
++gceSTATUS
++gckOS_AtomIncrement(
++ IN gckOS Os,
++ IN gctPOINTER Atom,
++ OUT gctINT32_PTR Value
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X Atom=0x%0x", Os, Atom);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Atom != gcvNULL);
++
++ /* Increment the atom. */
++ *Value = atomic_inc_return((atomic_t *) Atom) - 1;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Value=%d", *Value);
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_AtomDecrement
++**
++** Atomically decrement the 32-bit integer value inside an atom.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to a gckOS object.
++**
++** gctPOINTER Atom
++** Pointer to the atom.
++**
++** OUTPUT:
++**
++** gctINT32_PTR Value
++** Pointer to a variable that receives the original value of the atom.
++*/
++gceSTATUS
++gckOS_AtomDecrement(
++ IN gckOS Os,
++ IN gctPOINTER Atom,
++ OUT gctINT32_PTR Value
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X Atom=0x%0x", Os, Atom);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Atom != gcvNULL);
++
++ /* Decrement the atom. */
++ *Value = atomic_dec_return((atomic_t *) Atom) + 1;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Value=%d", *Value);
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_Delay
++**
++** Delay execution of the current thread for a number of milliseconds.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctUINT32 Delay
++** Delay to sleep, specified in milliseconds.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_Delay(
++ IN gckOS Os,
++ IN gctUINT32 Delay
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X Delay=%u", Os, Delay);
++
++ if (Delay > 0)
++ {
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)
++ ktime_t delay = ktime_set((Delay / MSEC_PER_SEC), (Delay % MSEC_PER_SEC) * NSEC_PER_MSEC);
++ __set_current_state(TASK_UNINTERRUPTIBLE);
++ schedule_hrtimeout(&delay, HRTIMER_MODE_REL);
++#else
++ msleep(Delay);
++#endif
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_GetTicks
++**
++** Get the number of milliseconds since the system started.
++**
++** INPUT:
++**
++** OUTPUT:
++**
++** gctUINT32_PTR Time
++** Pointer to a variable to get time.
++**
++*/
++gceSTATUS
++gckOS_GetTicks(
++ OUT gctUINT32_PTR Time
++ )
++{
++ gcmkHEADER();
++
++ *Time = jiffies_to_msecs(jiffies);
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_TicksAfter
++**
++** Compare time values got from gckOS_GetTicks.
++**
++** INPUT:
++** gctUINT32 Time1
++** First time value to be compared.
++**
++** gctUINT32 Time2
++** Second time value to be compared.
++**
++** OUTPUT:
++**
++** gctBOOL_PTR IsAfter
++** Pointer to a variable to result.
++**
++*/
++gceSTATUS
++gckOS_TicksAfter(
++ IN gctUINT32 Time1,
++ IN gctUINT32 Time2,
++ OUT gctBOOL_PTR IsAfter
++ )
++{
++ gcmkHEADER();
++
++ *IsAfter = time_after((unsigned long)Time1, (unsigned long)Time2);
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_GetTime
++**
++** Get the number of microseconds since the system started.
++**
++** INPUT:
++**
++** OUTPUT:
++**
++** gctUINT64_PTR Time
++** Pointer to a variable to get time.
++**
++*/
++gceSTATUS
++gckOS_GetTime(
++ OUT gctUINT64_PTR Time
++ )
++{
++ struct timeval tv;
++ gcmkHEADER();
++
++ /* Return the time of day in microseconds. */
++ do_gettimeofday(&tv);
++ *Time = (tv.tv_sec * 1000000ULL) + tv.tv_usec;
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_MemoryBarrier
++**
++** Make sure the CPU has executed everything up to this point and the data got
++** written to the specified pointer.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPOINTER Address
++** Address of memory that needs to be barriered.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_MemoryBarrier(
++ IN gckOS Os,
++ IN gctPOINTER Address
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X Address=0x%X", Os, Address);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++
++#if gcdNONPAGED_MEMORY_BUFFERABLE \
++ && defined (CONFIG_ARM) \
++ && (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34))
++ /* drain write buffer */
++ dsb();
++
++ /* drain outer cache's write buffer? */
++#else
++ mb();
++#endif
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_AllocatePagedMemory
++**
++** Allocate memory from the paged pool.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctSIZE_T Bytes
++** Number of bytes to allocate.
++**
++** OUTPUT:
++**
++** gctPHYS_ADDR * Physical
++** Pointer to a variable that receives the physical address of the
++** memory allocation.
++*/
++gceSTATUS
++gckOS_AllocatePagedMemory(
++ IN gckOS Os,
++ IN gctSIZE_T Bytes,
++ OUT gctPHYS_ADDR * Physical
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Os=0x%X Bytes=%lu", Os, Bytes);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++ gcmkVERIFY_ARGUMENT(Physical != gcvNULL);
++
++ /* Allocate the memory. */
++ gcmkONERROR(gckOS_AllocatePagedMemoryEx(Os, gcvALLOC_FLAG_NONE, Bytes, gcvNULL, Physical));
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Physical=0x%X", *Physical);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_AllocatePagedMemoryEx
++**
++** Allocate memory from the paged pool.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctUINT32 Flag
++** Allocation attribute.
++**
++** gctSIZE_T Bytes
++** Number of bytes to allocate.
++**
++** OUTPUT:
++**
++** gctUINT32 * Gid
++** Save the global ID for the piece of allocated memory.
++**
++** gctPHYS_ADDR * Physical
++** Pointer to a variable that receives the physical address of the
++** memory allocation.
++*/
++gceSTATUS
++gckOS_AllocatePagedMemoryEx(
++ IN gckOS Os,
++ IN gctUINT32 Flag,
++ IN gctSIZE_T Bytes,
++ OUT gctUINT32 * Gid,
++ OUT gctPHYS_ADDR * Physical
++ )
++{
++ gctINT numPages;
++ PLINUX_MDL mdl = gcvNULL;
++ gctSIZE_T bytes;
++ gceSTATUS status = gcvSTATUS_OUT_OF_MEMORY;
++ gckALLOCATOR allocator;
++
++ gcmkHEADER_ARG("Os=0x%X Flag=%x Bytes=%lu", Os, Flag, Bytes);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++ gcmkVERIFY_ARGUMENT(Physical != gcvNULL);
++
++ bytes = gcmALIGN(Bytes, PAGE_SIZE);
++
++ numPages = GetPageCount(bytes, 0);
++
++ mdl = _CreateMdl();
++ if (mdl == gcvNULL)
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ /* Walk all allocators. */
++ list_for_each_entry(allocator, &Os->allocatorList, head)
++ {
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_OS,
++ "%s(%d) flag = %x allocator->capability = %x",
++ __FUNCTION__, __LINE__, Flag, allocator->capability);
++
++ if ((Flag & allocator->capability) != Flag)
++ {
++ continue;
++ }
++
++ status = allocator->ops->Alloc(allocator, mdl, numPages, Flag);
++
++ if (gcmIS_SUCCESS(status))
++ {
++ mdl->allocator = allocator;
++ break;
++ }
++ }
++
++ /* Check status. */
++ gcmkONERROR(status);
++
++ mdl->dmaHandle = 0;
++ mdl->addr = 0;
++ mdl->numPages = numPages;
++ mdl->pagedMem = 1;
++ mdl->contiguous = Flag & gcvALLOC_FLAG_CONTIGUOUS;
++
++ if (Gid != gcvNULL)
++ {
++ *Gid = mdl->gid;
++ }
++
++ MEMORY_LOCK(Os);
++
++ /*
++ * Add this to a global list.
++ * Will be used by get physical address
++ * and mapuser pointer functions.
++ */
++ if (!Os->mdlHead)
++ {
++ /* Initialize the queue. */
++ Os->mdlHead = Os->mdlTail = mdl;
++ }
++ else
++ {
++ /* Add to tail. */
++ mdl->prev = Os->mdlTail;
++ Os->mdlTail->next = mdl;
++ Os->mdlTail = mdl;
++ }
++
++ MEMORY_UNLOCK(Os);
++
++ /* Return physical address. */
++ *Physical = (gctPHYS_ADDR) mdl;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Physical=0x%X", *Physical);
++ return gcvSTATUS_OK;
++
++OnError:
++ if (mdl != gcvNULL)
++ {
++ /* Free the memory. */
++ _DestroyMdl(mdl);
++ }
++ *Physical = gcvNULL;
++
++ /* Return the status. */
++ gcmkFOOTER_ARG("Os=0x%X Flag=%x Bytes=%lu", Os, Flag, Bytes);
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_FreePagedMemory
++**
++** Free memory allocated from the paged pool.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPHYS_ADDR Physical
++** Physical address of the allocation.
++**
++** gctSIZE_T Bytes
++** Number of bytes of the allocation.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_FreePagedMemory(
++ IN gckOS Os,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Bytes
++ )
++{
++ PLINUX_MDL mdl = (PLINUX_MDL) Physical;
++ gckALLOCATOR allocator = (gckALLOCATOR)mdl->allocator;
++
++ gcmkHEADER_ARG("Os=0x%X Physical=0x%X Bytes=%lu", Os, Physical, Bytes);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Physical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++
++ MEMORY_LOCK(Os);
++
++ /* Remove the node from global list. */
++ if (mdl == Os->mdlHead)
++ {
++ if ((Os->mdlHead = mdl->next) == gcvNULL)
++ {
++ Os->mdlTail = gcvNULL;
++ }
++ }
++ else
++ {
++ mdl->prev->next = mdl->next;
++
++ if (mdl == Os->mdlTail)
++ {
++ Os->mdlTail = mdl->prev;
++ }
++ else
++ {
++ mdl->next->prev = mdl->prev;
++ }
++ }
++
++ MEMORY_UNLOCK(Os);
++
++ allocator->ops->Free(allocator, mdl);
++
++ /* Free the structure... */
++ gcmkVERIFY_OK(_DestroyMdl(mdl));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_LockPages
++**
++** Lock memory allocated from the paged pool.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPHYS_ADDR Physical
++** Physical address of the allocation.
++**
++** gctSIZE_T Bytes
++** Number of bytes of the allocation.
++**
++** gctBOOL Cacheable
++** Cache mode of mapping.
++**
++** OUTPUT:
++**
++** gctPOINTER * Logical
++** Pointer to a variable that receives the address of the mapped
++** memory.
++**
++** gctSIZE_T * PageCount
++** Pointer to a variable that receives the number of pages required for
++** the page table according to the GPU page size.
++*/
++gceSTATUS
++gckOS_LockPages(
++ IN gckOS Os,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Bytes,
++ IN gctBOOL Cacheable,
++ OUT gctPOINTER * Logical,
++ OUT gctSIZE_T * PageCount
++ )
++{
++ gceSTATUS status;
++ PLINUX_MDL mdl;
++ PLINUX_MDL_MAP mdlMap;
++ gckALLOCATOR allocator;
++
++ gcmkHEADER_ARG("Os=0x%X Physical=0x%X Bytes=%lu", Os, Physical, Logical);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Physical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(PageCount != gcvNULL);
++
++ mdl = (PLINUX_MDL) Physical;
++ allocator = mdl->allocator;
++
++ MEMORY_LOCK(Os);
++
++ mdlMap = FindMdlMap(mdl, _GetProcessID());
++
++ if (mdlMap == gcvNULL)
++ {
++ mdlMap = _CreateMdlMap(mdl, _GetProcessID());
++
++ if (mdlMap == gcvNULL)
++ {
++ MEMORY_UNLOCK(Os);
++
++ gcmkFOOTER_ARG("*status=%d", gcvSTATUS_OUT_OF_MEMORY);
++ return gcvSTATUS_OUT_OF_MEMORY;
++ }
++ }
++
++ if (mdlMap->vmaAddr == gcvNULL)
++ {
++ status = allocator->ops->MapUser(allocator, mdl, mdlMap, Cacheable);
++
++ if (gcmIS_ERROR(status))
++ {
++ MEMORY_UNLOCK(Os);
++
++ gcmkFOOTER_ARG("*status=%d", status);
++ return status;
++ }
++ }
++
++ mdlMap->count++;
++
++ /* Convert pointer to MDL. */
++ *Logical = mdlMap->vmaAddr;
++
++ /* Return the page number according to the GPU page size. */
++ gcmkASSERT((PAGE_SIZE % 4096) == 0);
++ gcmkASSERT((PAGE_SIZE / 4096) >= 1);
++
++ *PageCount = mdl->numPages * (PAGE_SIZE / 4096);
++
++ MEMORY_UNLOCK(Os);
++
++ gcmkVERIFY_OK(gckOS_CacheFlush(
++ Os,
++ _GetProcessID(),
++ Physical,
++ gcvINVALID_ADDRESS,
++ (gctPOINTER)mdlMap->vmaAddr,
++ mdl->numPages * PAGE_SIZE
++ ));
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Logical=0x%X *PageCount=%lu", *Logical, *PageCount);
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_MapPages
++**
++** Map paged memory into a page table.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPHYS_ADDR Physical
++** Physical address of the allocation.
++**
++** gctSIZE_T PageCount
++** Number of pages required for the physical address.
++**
++** gctPOINTER PageTable
++** Pointer to the page table to fill in.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_MapPages(
++ IN gckOS Os,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T PageCount,
++ IN gctPOINTER PageTable
++ )
++{
++ return gckOS_MapPagesEx(Os,
++ gcvCORE_MAJOR,
++ Physical,
++ PageCount,
++ 0,
++ PageTable);
++}
++
++gceSTATUS
++gckOS_MapPagesEx(
++ IN gckOS Os,
++ IN gceCORE Core,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T PageCount,
++ IN gctUINT32 Address,
++ IN gctPOINTER PageTable
++ )
++{
++ gceSTATUS status = gcvSTATUS_OK;
++ PLINUX_MDL mdl;
++ gctUINT32* table;
++ gctUINT32 offset;
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ gckMMU mmu;
++ PLINUX_MDL mmuMdl;
++ gctUINT32 bytes;
++ gctPHYS_ADDR pageTablePhysical;
++#endif
++
++#if gcdPROCESS_ADDRESS_SPACE
++ gckKERNEL kernel = Os->device->kernels[Core];
++ gckMMU mmu;
++#endif
++ gckALLOCATOR allocator;
++
++ gcmkHEADER_ARG("Os=0x%X Core=%d Physical=0x%X PageCount=%u PageTable=0x%X",
++ Os, Core, Physical, PageCount, PageTable);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Physical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(PageCount > 0);
++ gcmkVERIFY_ARGUMENT(PageTable != gcvNULL);
++
++ /* Convert pointer to MDL. */
++ mdl = (PLINUX_MDL)Physical;
++
++ allocator = mdl->allocator;
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_OS,
++ "%s(%d): Physical->0x%X PageCount->0x%X PagedMemory->?%d",
++ __FUNCTION__, __LINE__,
++ (gctUINT32)(gctUINTPTR_T)Physical,
++ (gctUINT32)(gctUINTPTR_T)PageCount,
++ mdl->pagedMem
++ );
++
++#if gcdPROCESS_ADDRESS_SPACE
++ gcmkONERROR(gckKERNEL_GetProcessMMU(kernel, &mmu));
++#endif
++
++ table = (gctUINT32 *)PageTable;
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ mmu = Os->device->kernels[Core]->mmu;
++ bytes = PageCount * sizeof(*table);
++ mmuMdl = (PLINUX_MDL)mmu->pageTablePhysical;
++#endif
++
++ /* Get all the physical addresses and store them in the page table. */
++
++ offset = 0;
++ PageCount = PageCount / (PAGE_SIZE / 4096);
++
++ /* Try to get the user pages so DMA can happen. */
++ while (PageCount-- > 0)
++ {
++ gctUINT i;
++ gctUINT32 phys = ~0;
++
++ if (mdl->pagedMem && !mdl->contiguous)
++ {
++ allocator->ops->Physical(allocator, mdl, offset, &phys);
++ }
++ else
++ {
++ if (!mdl->pagedMem)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_OS,
++ "%s(%d): we should not get this call for Non Paged Memory!",
++ __FUNCTION__, __LINE__
++ );
++ }
++
++ phys = page_to_phys(nth_page(mdl->u.contiguousPages, offset));
++ }
++
++ gcmkVERIFY_OK(gckOS_CPUPhysicalToGPUPhysical(Os, phys, &phys));
++
++#ifdef CONFIG_IOMMU_SUPPORT
++ if (Os->iommu)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_OS,
++ "%s(%d): Setup mapping in IOMMU %x => %x",
++ __FUNCTION__, __LINE__,
++ Address + (offset * PAGE_SIZE), phys
++ );
++
++ /* When use IOMMU, GPU use system PAGE_SIZE. */
++ gcmkONERROR(gckIOMMU_Map(
++ Os->iommu, Address + (offset * PAGE_SIZE), phys, PAGE_SIZE));
++ }
++ else
++#endif
++ {
++
++#if gcdENABLE_VG
++ if (Core == gcvCORE_VG)
++ {
++ for (i = 0; i < (PAGE_SIZE / 4096); i++)
++ {
++ gcmkONERROR(
++ gckVGMMU_SetPage(Os->device->kernels[Core]->vg->mmu,
++ phys + (i * 4096),
++ table++));
++ }
++ }
++ else
++#endif
++ {
++ for (i = 0; i < (PAGE_SIZE / 4096); i++)
++ {
++#if gcdPROCESS_ADDRESS_SPACE
++ gctUINT32_PTR pageTableEntry;
++ gckMMU_GetPageEntry(mmu, Address + (offset * 4096), &pageTableEntry);
++ gcmkONERROR(
++ gckMMU_SetPage(mmu,
++ phys + (i * 4096),
++ pageTableEntry));
++#else
++ gcmkONERROR(
++ gckMMU_SetPage(Os->device->kernels[Core]->mmu,
++ phys + (i * 4096),
++ table++));
++#endif
++ }
++ }
++ }
++
++ offset += 1;
++ }
++
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ /* Get physical address of pageTable */
++ pageTablePhysical = (gctPHYS_ADDR)(mmuMdl->dmaHandle +
++ ((gctUINT32 *)PageTable - mmu->pageTableLogical));
++
++ /* Flush the mmu page table cache. */
++ gcmkONERROR(gckOS_CacheClean(
++ Os,
++ _GetProcessID(),
++ gcvNULL,
++ pageTablePhysical,
++ PageTable,
++ bytes
++ ));
++#endif
++
++OnError:
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckOS_UnmapPages(
++ IN gckOS Os,
++ IN gctSIZE_T PageCount,
++ IN gctUINT32 Address
++ )
++{
++#ifdef CONFIG_IOMMU_SUPPORT
++ if (Os->iommu)
++ {
++ gcmkVERIFY_OK(gckIOMMU_Unmap(
++ Os->iommu, Address, PageCount * PAGE_SIZE));
++ }
++#endif
++
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_UnlockPages
++**
++** Unlock memory allocated from the paged pool.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPHYS_ADDR Physical
++** Physical address of the allocation.
++**
++** gctSIZE_T Bytes
++** Number of bytes of the allocation.
++**
++** gctPOINTER Logical
++** Address of the mapped memory.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_UnlockPages(
++ IN gckOS Os,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Bytes,
++ IN gctPOINTER Logical
++ )
++{
++ PLINUX_MDL_MAP mdlMap;
++ PLINUX_MDL mdl = (PLINUX_MDL)Physical;
++ gckALLOCATOR allocator = mdl->allocator;
++
++ gcmkHEADER_ARG("Os=0x%X Physical=0x%X Bytes=%u Logical=0x%X",
++ Os, Physical, Bytes, Logical);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Physical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++
++ MEMORY_LOCK(Os);
++
++ mdlMap = mdl->maps;
++
++ while (mdlMap != gcvNULL)
++ {
++ if ((mdlMap->vmaAddr != gcvNULL) && (_GetProcessID() == mdlMap->pid))
++ {
++ if (--mdlMap->count == 0)
++ {
++ allocator->ops->UnmapUser(
++ allocator,
++ mdlMap->vmaAddr,
++ mdl->numPages * PAGE_SIZE);
++
++ mdlMap->vmaAddr = gcvNULL;
++ }
++ }
++
++ mdlMap = mdlMap->next;
++ }
++
++ MEMORY_UNLOCK(Os);
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++
++/*******************************************************************************
++**
++** gckOS_AllocateContiguous
++**
++** Allocate memory from the contiguous pool.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctBOOL InUserSpace
++** gcvTRUE if the pages need to be mapped into user space.
++**
++** gctSIZE_T * Bytes
++** Pointer to the number of bytes to allocate.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Bytes
++** Pointer to a variable that receives the number of bytes allocated.
++**
++** gctPHYS_ADDR * Physical
++** Pointer to a variable that receives the physical address of the
++** memory allocation.
++**
++** gctPOINTER * Logical
++** Pointer to a variable that receives the logical address of the
++** memory allocation.
++*/
++gceSTATUS
++gckOS_AllocateContiguous(
++ IN gckOS Os,
++ IN gctBOOL InUserSpace,
++ IN OUT gctSIZE_T * Bytes,
++ OUT gctPHYS_ADDR * Physical,
++ OUT gctPOINTER * Logical
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Os=0x%X InUserSpace=%d *Bytes=%lu",
++ Os, InUserSpace, gcmOPT_VALUE(Bytes));
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Bytes != gcvNULL);
++ gcmkVERIFY_ARGUMENT(*Bytes > 0);
++ gcmkVERIFY_ARGUMENT(Physical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++
++ /* Same as non-paged memory for now. */
++ gcmkONERROR(gckOS_AllocateNonPagedMemory(Os,
++ InUserSpace,
++ Bytes,
++ Physical,
++ Logical));
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Bytes=%lu *Physical=0x%X *Logical=0x%X",
++ *Bytes, *Physical, *Logical);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_FreeContiguous
++**
++** Free memory allocated from the contiguous pool.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPHYS_ADDR Physical
++** Physical address of the allocation.
++**
++** gctPOINTER Logical
++** Logicval address of the allocation.
++**
++** gctSIZE_T Bytes
++** Number of bytes of the allocation.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_FreeContiguous(
++ IN gckOS Os,
++ IN gctPHYS_ADDR Physical,
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Bytes
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Os=0x%X Physical=0x%X Logical=0x%X Bytes=%lu",
++ Os, Physical, Logical, Bytes);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Physical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++
++ /* Same of non-paged memory for now. */
++ gcmkONERROR(gckOS_FreeNonPagedMemory(Os, Bytes, Physical, Logical));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++#if gcdENABLE_VG
++/******************************************************************************
++**
++** gckOS_GetKernelLogical
++**
++** Return the kernel logical pointer that corresponods to the specified
++** hardware address.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctUINT32 Address
++** Hardware physical address.
++**
++** OUTPUT:
++**
++** gctPOINTER * KernelPointer
++** Pointer to a variable receiving the pointer in kernel address space.
++*/
++gceSTATUS
++gckOS_GetKernelLogical(
++ IN gckOS Os,
++ IN gctUINT32 Address,
++ OUT gctPOINTER * KernelPointer
++ )
++{
++ return gckOS_GetKernelLogicalEx(Os, gcvCORE_MAJOR, Address, KernelPointer);
++}
++
++gceSTATUS
++gckOS_GetKernelLogicalEx(
++ IN gckOS Os,
++ IN gceCORE Core,
++ IN gctUINT32 Address,
++ OUT gctPOINTER * KernelPointer
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Os=0x%X Core=%d Address=0x%08x", Os, Core, Address);
++
++ do
++ {
++ gckGALDEVICE device;
++ gckKERNEL kernel;
++ gcePOOL pool;
++ gctUINT32 offset;
++ gctPOINTER logical;
++
++ /* Extract the pointer to the gckGALDEVICE class. */
++ device = (gckGALDEVICE) Os->device;
++
++ /* Kernel shortcut. */
++ kernel = device->kernels[Core];
++#if gcdENABLE_VG
++ if (Core == gcvCORE_VG)
++ {
++ gcmkERR_BREAK(gckVGHARDWARE_SplitMemory(
++ kernel->vg->hardware, Address, &pool, &offset
++ ));
++ }
++ else
++#endif
++ {
++ /* Split the memory address into a pool type and offset. */
++ gcmkERR_BREAK(gckHARDWARE_SplitMemory(
++ kernel->hardware, Address, &pool, &offset
++ ));
++ }
++
++ /* Dispatch on pool. */
++ switch (pool)
++ {
++ case gcvPOOL_LOCAL_INTERNAL:
++ /* Internal memory. */
++ logical = device->internalLogical;
++ break;
++
++ case gcvPOOL_LOCAL_EXTERNAL:
++ /* External memory. */
++ logical = device->externalLogical;
++ break;
++
++ case gcvPOOL_SYSTEM:
++ /* System memory. */
++ logical = device->contiguousBase;
++ break;
++
++ default:
++ /* Invalid memory pool. */
++ gcmkFOOTER();
++ return gcvSTATUS_INVALID_ARGUMENT;
++ }
++
++ /* Build logical address of specified address. */
++ * KernelPointer = ((gctUINT8_PTR) logical) + offset;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*KernelPointer=0x%X", *KernelPointer);
++ return gcvSTATUS_OK;
++ }
++ while (gcvFALSE);
++
++ /* Return status. */
++ gcmkFOOTER();
++ return status;
++}
++#endif
++
++/*******************************************************************************
++**
++** gckOS_MapUserPointer
++**
++** Map a pointer from the user process into the kernel address space.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPOINTER Pointer
++** Pointer in user process space that needs to be mapped.
++**
++** gctSIZE_T Size
++** Number of bytes that need to be mapped.
++**
++** OUTPUT:
++**
++** gctPOINTER * KernelPointer
++** Pointer to a variable receiving the mapped pointer in kernel address
++** space.
++*/
++gceSTATUS
++gckOS_MapUserPointer(
++ IN gckOS Os,
++ IN gctPOINTER Pointer,
++ IN gctSIZE_T Size,
++ OUT gctPOINTER * KernelPointer
++ )
++{
++ gctPOINTER buf = gcvNULL;
++ gctUINT32 len;
++
++ gcmkHEADER_ARG("Os=0x%X Pointer=0x%X Size=%lu", Os, Pointer, Size);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Pointer != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Size > 0);
++ gcmkVERIFY_ARGUMENT(KernelPointer != gcvNULL);
++
++ buf = kmalloc(Size, GFP_KERNEL | gcdNOWARN);
++ if (buf == gcvNULL)
++ {
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s(%d): Failed to allocate memory.",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkFOOTER_ARG("*status=%d", gcvSTATUS_OUT_OF_MEMORY);
++ return gcvSTATUS_OUT_OF_MEMORY;
++ }
++
++ len = copy_from_user(buf, Pointer, Size);
++ if (len != 0)
++ {
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s(%d): Failed to copy data from user.",
++ __FUNCTION__, __LINE__
++ );
++
++ if (buf != gcvNULL)
++ {
++ kfree(buf);
++ }
++
++ gcmkFOOTER_ARG("*status=%d", gcvSTATUS_GENERIC_IO);
++ return gcvSTATUS_GENERIC_IO;
++ }
++
++ *KernelPointer = buf;
++
++ gcmkFOOTER_ARG("*KernelPointer=0x%X", *KernelPointer);
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_UnmapUserPointer
++**
++** Unmap a user process pointer from the kernel address space.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPOINTER Pointer
++** Pointer in user process space that needs to be unmapped.
++**
++** gctSIZE_T Size
++** Number of bytes that need to be unmapped.
++**
++** gctPOINTER KernelPointer
++** Pointer in kernel address space that needs to be unmapped.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_UnmapUserPointer(
++ IN gckOS Os,
++ IN gctPOINTER Pointer,
++ IN gctSIZE_T Size,
++ IN gctPOINTER KernelPointer
++ )
++{
++ gctUINT32 len;
++
++ gcmkHEADER_ARG("Os=0x%X Pointer=0x%X Size=%lu KernelPointer=0x%X",
++ Os, Pointer, Size, KernelPointer);
++
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Pointer != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Size > 0);
++ gcmkVERIFY_ARGUMENT(KernelPointer != gcvNULL);
++
++ len = copy_to_user(Pointer, KernelPointer, Size);
++
++ kfree(KernelPointer);
++
++ if (len != 0)
++ {
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s(%d): Failed to copy data to user.",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkFOOTER_ARG("status=%d", gcvSTATUS_GENERIC_IO);
++ return gcvSTATUS_GENERIC_IO;
++ }
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_QueryNeedCopy
++**
++** Query whether the memory can be accessed or mapped directly or it has to be
++** copied.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctUINT32 ProcessID
++** Process ID of the current process.
++**
++** OUTPUT:
++**
++** gctBOOL_PTR NeedCopy
++** Pointer to a boolean receiving gcvTRUE if the memory needs a copy or
++** gcvFALSE if the memory can be accessed or mapped dircetly.
++*/
++gceSTATUS
++gckOS_QueryNeedCopy(
++ IN gckOS Os,
++ IN gctUINT32 ProcessID,
++ OUT gctBOOL_PTR NeedCopy
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X ProcessID=%d", Os, ProcessID);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(NeedCopy != gcvNULL);
++
++ /* We need to copy data. */
++ *NeedCopy = gcvTRUE;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*NeedCopy=%d", *NeedCopy);
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_CopyFromUserData
++**
++** Copy data from user to kernel memory.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPOINTER KernelPointer
++** Pointer to kernel memory.
++**
++** gctPOINTER Pointer
++** Pointer to user memory.
++**
++** gctSIZE_T Size
++** Number of bytes to copy.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_CopyFromUserData(
++ IN gckOS Os,
++ IN gctPOINTER KernelPointer,
++ IN gctPOINTER Pointer,
++ IN gctSIZE_T Size
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Os=0x%X KernelPointer=0x%X Pointer=0x%X Size=%lu",
++ Os, KernelPointer, Pointer, Size);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(KernelPointer != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Pointer != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Size > 0);
++
++ /* Copy data from user. */
++ if (copy_from_user(KernelPointer, Pointer, Size) != 0)
++ {
++ /* Could not copy all the bytes. */
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_CopyToUserData
++**
++** Copy data from kernel to user memory.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPOINTER KernelPointer
++** Pointer to kernel memory.
++**
++** gctPOINTER Pointer
++** Pointer to user memory.
++**
++** gctSIZE_T Size
++** Number of bytes to copy.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_CopyToUserData(
++ IN gckOS Os,
++ IN gctPOINTER KernelPointer,
++ IN gctPOINTER Pointer,
++ IN gctSIZE_T Size
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Os=0x%X KernelPointer=0x%X Pointer=0x%X Size=%lu",
++ Os, KernelPointer, Pointer, Size);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(KernelPointer != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Pointer != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Size > 0);
++
++ /* Copy data to user. */
++ if (copy_to_user(Pointer, KernelPointer, Size) != 0)
++ {
++ /* Could not copy all the bytes. */
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_WriteMemory
++**
++** Write data to a memory.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPOINTER Address
++** Address of the memory to write to.
++**
++** gctUINT32 Data
++** Data for register.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_WriteMemory(
++ IN gckOS Os,
++ IN gctPOINTER Address,
++ IN gctUINT32 Data
++ )
++{
++ gceSTATUS status;
++ gcmkHEADER_ARG("Os=0x%X Address=0x%X Data=%u", Os, Address, Data);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_ARGUMENT(Address != gcvNULL);
++
++ /* Write memory. */
++ if (access_ok(VERIFY_WRITE, Address, 4))
++ {
++ /* User address. */
++ if(put_user(Data, (gctUINT32*)Address))
++ {
++ gcmkONERROR(gcvSTATUS_INVALID_ADDRESS);
++ }
++ }
++ else
++ {
++ /* Kernel address. */
++ *(gctUINT32 *)Address = Data;
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_MapUserMemory
++**
++** Lock down a user buffer and return an DMA'able address to be used by the
++** hardware to access it.
++**
++** INPUT:
++**
++** gctPOINTER Memory
++** Pointer to memory to lock down.
++**
++** gctSIZE_T Size
++** Size in bytes of the memory to lock down.
++**
++** OUTPUT:
++**
++** gctPOINTER * Info
++** Pointer to variable receiving the information record required by
++** gckOS_UnmapUserMemory.
++**
++** gctUINT32_PTR Address
++** Pointer to a variable that will receive the address DMA'able by the
++** hardware.
++*/
++gceSTATUS
++gckOS_MapUserMemory(
++ IN gckOS Os,
++ IN gceCORE Core,
++ IN gctPOINTER Memory,
++ IN gctUINT32 Physical,
++ IN gctSIZE_T Size,
++ OUT gctPOINTER * Info,
++ OUT gctUINT32_PTR Address
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Os=0x%x Core=%d Memory=0x%x Size=%lu", Os, Core, Memory, Size);
++
++#if gcdSECURE_USER
++ gcmkONERROR(gckOS_AddMapping(Os, *Address, Memory, Size));
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++#else
++{
++ gctSIZE_T pageCount, i, j;
++ gctUINT32_PTR pageTable;
++ gctUINT32 address = 0, physical = ~0U;
++ gctUINTPTR_T start, end, memory;
++ gctUINT32 offset;
++ gctINT result = 0;
++#if gcdPROCESS_ADDRESS_SPACE
++ gckMMU mmu;
++#endif
++
++ gcsPageInfo_PTR info = gcvNULL;
++ struct page **pages = gcvNULL;
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Memory != gcvNULL || Physical != ~0U);
++ gcmkVERIFY_ARGUMENT(Size > 0);
++ gcmkVERIFY_ARGUMENT(Info != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Address != gcvNULL);
++
++ do
++ {
++ gctSIZE_T extraPage;
++
++ memory = (gctUINTPTR_T) Memory;
++
++ /* Get the number of required pages. */
++ end = (memory + Size + PAGE_SIZE - 1) >> PAGE_SHIFT;
++ start = memory >> PAGE_SHIFT;
++ pageCount = end - start;
++
++ /* Allocate extra 64 bytes to avoid cache overflow */
++ extraPage = (((memory + gcmALIGN(Size + 64, 64) + PAGE_SIZE - 1) >> PAGE_SHIFT) > end) ? 1 : 0;
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_OS,
++ "%s(%d): pageCount: %d.",
++ __FUNCTION__, __LINE__,
++ pageCount
++ );
++
++ /* Overflow. */
++ if ((memory + Size) < memory)
++ {
++ gcmkFOOTER_ARG("status=%d", gcvSTATUS_INVALID_ARGUMENT);
++ return gcvSTATUS_INVALID_ARGUMENT;
++ }
++
++ MEMORY_MAP_LOCK(Os);
++
++ /* Allocate the Info struct. */
++ info = (gcsPageInfo_PTR)kmalloc(sizeof(gcsPageInfo), GFP_KERNEL | gcdNOWARN);
++
++ if (info == gcvNULL)
++ {
++ status = gcvSTATUS_OUT_OF_MEMORY;
++ break;
++ }
++
++ info->extraPage = 0;
++
++ /* Allocate the array of page addresses. */
++ pages = (struct page **)kmalloc((pageCount + extraPage) * sizeof(struct page *), GFP_KERNEL | gcdNOWARN);
++
++ if (pages == gcvNULL)
++ {
++ status = gcvSTATUS_OUT_OF_MEMORY;
++ break;
++ }
++
++ if (Physical != ~0U)
++ {
++ for (i = 0; i < pageCount; i++)
++ {
++ pages[i] = pfn_to_page((Physical >> PAGE_SHIFT) + i);
++
++ if (pfn_valid(page_to_pfn(pages[i])))
++ {
++ get_page(pages[i]);
++ }
++ }
++ }
++ else
++ {
++ /* Get the user pages. */
++ down_read(&current->mm->mmap_sem);
++
++ result = get_user_pages(current,
++ current->mm,
++ memory & PAGE_MASK,
++ pageCount,
++ 1,
++ 0,
++ pages,
++ gcvNULL
++ );
++
++ up_read(&current->mm->mmap_sem);
++
++ if (result <=0 || result < pageCount)
++ {
++ struct vm_area_struct *vma;
++
++ /* Release the pages if any. */
++ if (result > 0)
++ {
++ for (i = 0; i < result; i++)
++ {
++ if (pages[i] == gcvNULL)
++ {
++ break;
++ }
++
++ page_cache_release(pages[i]);
++ pages[i] = gcvNULL;
++ }
++
++ result = 0;
++ }
++
++ vma = find_vma(current->mm, memory);
++
++ if (vma && (vma->vm_flags & VM_PFNMAP))
++ {
++ pte_t * pte;
++ spinlock_t * ptl;
++ gctUINTPTR_T logical = memory;
++
++ for (i = 0; i < pageCount; i++)
++ {
++ pgd_t * pgd = pgd_offset(current->mm, logical);
++ pud_t * pud = pud_offset(pgd, logical);
++
++ if (pud)
++ {
++ pmd_t * pmd = pmd_offset(pud, logical);
++ pte = pte_offset_map_lock(current->mm, pmd, logical, &ptl);
++ if (!pte)
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++ }
++ else
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++
++ pages[i] = pte_page(*pte);
++ pte_unmap_unlock(pte, ptl);
++
++ /* Advance to next. */
++ logical += PAGE_SIZE;
++ }
++ }
++ else
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++
++ /* Check if this memory is contiguous for old mmu. */
++ if (Os->device->kernels[Core]->hardware->mmuVersion == 0)
++ {
++ for (i = 1; i < pageCount; i++)
++ {
++ if (pages[i] != nth_page(pages[0], i))
++ {
++ /* Non-contiguous. */
++ break;
++ }
++ }
++
++ if (i == pageCount)
++ {
++ /* Contiguous memory. */
++ physical = page_to_phys(pages[0]) | (memory & ~PAGE_MASK);
++
++ if (!((physical - Os->device->baseAddress) & 0x80000000))
++ {
++ kfree(pages);
++ pages = gcvNULL;
++
++ info->pages = gcvNULL;
++ info->pageTable = gcvNULL;
++
++ MEMORY_MAP_UNLOCK(Os);
++
++ *Address = physical - Os->device->baseAddress;
++ *Info = info;
++
++ gcmkVERIFY_OK(
++ gckOS_CPUPhysicalToGPUPhysical(Os, *Address, Address));
++
++ gcmkFOOTER_ARG("*Info=0x%X *Address=0x%08x",
++ *Info, *Address);
++
++ return gcvSTATUS_OK;
++ }
++ }
++ }
++
++ /* Reference pages. */
++ for (i = 0; i < pageCount; i++)
++ {
++ if (pfn_valid(page_to_pfn(pages[i])))
++ {
++ get_page(pages[i]);
++ }
++ }
++ }
++ }
++
++ for (i = 0; i < pageCount; i++)
++ {
++#ifdef CONFIG_ARM
++ gctUINT32 data;
++ get_user(data, (gctUINT32*)((memory & PAGE_MASK) + i * PAGE_SIZE));
++#endif
++
++ /* Flush(clean) the data cache. */
++ gcmkONERROR(gckOS_CacheFlush(Os, _GetProcessID(), gcvNULL,
++ page_to_phys(pages[i]),
++ (gctPOINTER)(memory & PAGE_MASK) + i*PAGE_SIZE,
++ PAGE_SIZE));
++ }
++
++#if gcdPROCESS_ADDRESS_SPACE
++ gcmkONERROR(gckKERNEL_GetProcessMMU(Os->device->kernels[Core], &mmu));
++#endif
++
++ if (extraPage)
++ {
++ pages[pageCount++] = Os->paddingPage;
++ info->extraPage = 1;
++ }
++
++#if gcdSECURITY
++ {
++ gctPHYS_ADDR physicalArrayPhysical;
++ gctPOINTER physicalArrayLogical;
++ gctUINT32_PTR logical;
++ gctSIZE_T bytes = pageCount * gcmSIZEOF(gctUINT32);
++ pageTable = gcvNULL;
++
++ gcmkONERROR(gckOS_AllocateNonPagedMemory(
++ Os,
++ gcvFALSE,
++ &bytes,
++ &physicalArrayPhysical,
++ &physicalArrayLogical
++ ));
++
++ logical = physicalArrayLogical;
++
++ /* Fill the page table. */
++ for (i = 0; i < pageCount; i++)
++ {
++ gctUINT32 phys;
++ phys = page_to_phys(pages[i]);
++
++ logical[i] = phys;
++ }
++ j = 0;
++
++
++ gcmkONERROR(gckKERNEL_SecurityMapMemory(
++ Os->device->kernels[Core],
++ physicalArrayLogical,
++ pageCount,
++ &address
++ ));
++
++ gcmkONERROR(gckOS_FreeNonPagedMemory(
++ Os,
++ 1,
++ physicalArrayPhysical,
++ physicalArrayLogical
++ ));
++ }
++
++#else
++#if gcdENABLE_VG
++ if (Core == gcvCORE_VG)
++ {
++ /* Allocate pages inside the page table. */
++ gcmkERR_BREAK(gckVGMMU_AllocatePages(Os->device->kernels[Core]->vg->mmu,
++ pageCount * (PAGE_SIZE/4096),
++ (gctPOINTER *) &pageTable,
++ &address));
++ }
++ else
++#endif
++ {
++#if gcdPROCESS_ADDRESS_SPACE
++ /* Allocate pages inside the page table. */
++ gcmkERR_BREAK(gckMMU_AllocatePages(mmu,
++ pageCount * (PAGE_SIZE/4096),
++ (gctPOINTER *) &pageTable,
++ &address));
++#else
++ /* Allocate pages inside the page table. */
++ gcmkERR_BREAK(gckMMU_AllocatePages(Os->device->kernels[Core]->mmu,
++ pageCount * (PAGE_SIZE/4096),
++ (gctPOINTER *) &pageTable,
++ &address));
++#endif
++ }
++
++ /* Fill the page table. */
++ for (i = 0; i < pageCount; i++)
++ {
++ gctUINT32 phys;
++ gctUINT32_PTR tab = pageTable + i * (PAGE_SIZE/4096);
++
++#if gcdPROCESS_ADDRESS_SPACE
++ gckMMU_GetPageEntry(mmu, address + i * 4096, &tab);
++#endif
++ phys = page_to_phys(pages[i]);
++
++#ifdef CONFIG_IOMMU_SUPPORT
++ if (Os->iommu)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_OS,
++ "%s(%d): Setup mapping in IOMMU %x => %x",
++ __FUNCTION__, __LINE__,
++ Address + (i * PAGE_SIZE), phys
++ );
++
++ gcmkONERROR(gckIOMMU_Map(
++ Os->iommu, address + i * PAGE_SIZE, phys, PAGE_SIZE));
++ }
++ else
++#endif
++ {
++
++#if gcdENABLE_VG
++ if (Core == gcvCORE_VG)
++ {
++ gcmkVERIFY_OK(
++ gckOS_CPUPhysicalToGPUPhysical(Os, phys, &phys));
++
++ /* Get the physical address from page struct. */
++ gcmkONERROR(
++ gckVGMMU_SetPage(Os->device->kernels[Core]->vg->mmu,
++ phys,
++ tab));
++ }
++ else
++#endif
++ {
++ /* Get the physical address from page struct. */
++ gcmkONERROR(
++ gckMMU_SetPage(Os->device->kernels[Core]->mmu,
++ phys,
++ tab));
++ }
++
++ for (j = 1; j < (PAGE_SIZE/4096); j++)
++ {
++ pageTable[i * (PAGE_SIZE/4096) + j] = pageTable[i * (PAGE_SIZE/4096)] + 4096 * j;
++ }
++ }
++
++#if !gcdPROCESS_ADDRESS_SPACE
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_OS,
++ "%s(%d): pageTable[%d]: 0x%X 0x%X.",
++ __FUNCTION__, __LINE__,
++ i, phys, pageTable[i]);
++#endif
++ }
++
++#if gcdENABLE_VG
++ if (Core == gcvCORE_VG)
++ {
++ gcmkONERROR(gckVGMMU_Flush(Os->device->kernels[Core]->vg->mmu));
++ }
++ else
++#endif
++ {
++#if gcdPROCESS_ADDRESS_SPACE
++ info->mmu = mmu;
++ gcmkONERROR(gckMMU_Flush(mmu));
++#else
++ gcmkONERROR(gckMMU_Flush(Os->device->kernels[Core]->mmu, gcvSURF_TYPE_UNKNOWN));
++#endif
++ }
++#endif
++ info->address = address;
++
++ /* Save pointer to page table. */
++ info->pageTable = pageTable;
++ info->pages = pages;
++
++ *Info = (gctPOINTER) info;
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_OS,
++ "%s(%d): info->pages: 0x%X, info->pageTable: 0x%X, info: 0x%X.",
++ __FUNCTION__, __LINE__,
++ info->pages,
++ info->pageTable,
++ info
++ );
++
++ offset = (Physical != ~0U)
++ ? (Physical & ~PAGE_MASK)
++ : (memory & ~PAGE_MASK);
++
++ /* Return address. */
++ *Address = address + offset;
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_OS,
++ "%s(%d): Address: 0x%X.",
++ __FUNCTION__, __LINE__,
++ *Address
++ );
++
++ /* Success. */
++ status = gcvSTATUS_OK;
++ }
++ while (gcvFALSE);
++
++OnError:
++
++ if (gcmIS_ERROR(status))
++ {
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s(%d): error occured: %d.",
++ __FUNCTION__, __LINE__,
++ status
++ );
++
++ /* Release page array. */
++ if (result > 0 && pages != gcvNULL)
++ {
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s(%d): error: page table is freed.",
++ __FUNCTION__, __LINE__
++ );
++
++ for (i = 0; i < result; i++)
++ {
++ if (pages[i] == gcvNULL)
++ {
++ break;
++ }
++ page_cache_release(pages[i]);
++ }
++ }
++
++ if (info!= gcvNULL && pages != gcvNULL)
++ {
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s(%d): error: pages is freed.",
++ __FUNCTION__, __LINE__
++ );
++
++ /* Free the page table. */
++ kfree(pages);
++ info->pages = gcvNULL;
++ }
++
++ /* Release page info struct. */
++ if (info != gcvNULL)
++ {
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s(%d): error: info is freed.",
++ __FUNCTION__, __LINE__
++ );
++
++ /* Free the page info struct. */
++ kfree(info);
++ *Info = gcvNULL;
++ }
++ }
++
++ MEMORY_MAP_UNLOCK(Os);
++
++ /* Return the status. */
++ if (gcmIS_SUCCESS(status))
++ {
++ gcmkFOOTER_ARG("*Info=0x%X *Address=0x%08x", *Info, *Address);
++ }
++ else
++ {
++ gcmkFOOTER();
++ }
++
++ return status;
++}
++#endif
++}
++
++/*******************************************************************************
++**
++** gckOS_UnmapUserMemory
++**
++** Unlock a user buffer and that was previously locked down by
++** gckOS_MapUserMemory.
++**
++** INPUT:
++**
++** gctPOINTER Memory
++** Pointer to memory to unlock.
++**
++** gctSIZE_T Size
++** Size in bytes of the memory to unlock.
++**
++** gctPOINTER Info
++** Information record returned by gckOS_MapUserMemory.
++**
++** gctUINT32_PTR Address
++** The address returned by gckOS_MapUserMemory.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_UnmapUserMemory(
++ IN gckOS Os,
++ IN gceCORE Core,
++ IN gctPOINTER Memory,
++ IN gctSIZE_T Size,
++ IN gctPOINTER Info,
++ IN gctUINT32 Address
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Os=0x%X Core=%d Memory=0x%X Size=%lu Info=0x%X Address0x%08x",
++ Os, Core, Memory, Size, Info, Address);
++
++#if gcdSECURE_USER
++ gcmkONERROR(gckOS_RemoveMapping(Os, Memory, Size));
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++#else
++{
++ gctUINTPTR_T memory, start, end;
++ gcsPageInfo_PTR info;
++ gctSIZE_T pageCount, i;
++ struct page **pages;
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Memory != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Size > 0);
++ gcmkVERIFY_ARGUMENT(Info != gcvNULL);
++
++ do
++ {
++ info = (gcsPageInfo_PTR) Info;
++
++ pages = info->pages;
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_OS,
++ "%s(%d): info=0x%X, pages=0x%X.",
++ __FUNCTION__, __LINE__,
++ info, pages
++ );
++
++ /* Invalid page array. */
++ if (pages == gcvNULL && info->pageTable == gcvNULL)
++ {
++ kfree(info);
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++
++ memory = (gctUINTPTR_T)Memory;
++ end = (memory + Size + PAGE_SIZE - 1) >> PAGE_SHIFT;
++ start = memory >> PAGE_SHIFT;
++ pageCount = end - start;
++
++ /* Overflow. */
++ if ((memory + Size) < memory)
++ {
++ gcmkFOOTER_ARG("status=%d", gcvSTATUS_INVALID_ARGUMENT);
++ return gcvSTATUS_INVALID_ARGUMENT;
++ }
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_OS,
++ "%s(%d): memory: 0x%X, pageCount: %d, pageTable: 0x%X.",
++ __FUNCTION__, __LINE__,
++ memory, pageCount, info->pageTable
++ );
++
++ MEMORY_MAP_LOCK(Os);
++
++#if !gcdSECURITY
++ gcmkASSERT(info->pageTable != gcvNULL);
++#endif
++
++ if (info->extraPage)
++ {
++ pageCount += 1;
++ }
++
++#if gcdSECURITY
++ if (info->address > 0x80000000)
++ {
++ gckKERNEL_SecurityUnmapMemory(
++ Os->device->kernels[Core],
++ info->address,
++ pageCount
++ );
++ }
++ else
++ {
++ gcmkPRINT("Wrong address %s(%d) %x", __FUNCTION__, __LINE__, info->address);
++ }
++#else
++#if gcdENABLE_VG
++ if (Core == gcvCORE_VG)
++ {
++ /* Free the pages from the MMU. */
++ gcmkERR_BREAK(gckVGMMU_FreePages(Os->device->kernels[Core]->vg->mmu,
++ info->pageTable,
++ pageCount * (PAGE_SIZE/4096)
++ ));
++ }
++ else
++#endif
++ {
++ /* Free the pages from the MMU. */
++#if gcdPROCESS_ADDRESS_SPACE
++ gcmkERR_BREAK(gckMMU_FreePagesEx(info->mmu,
++ info->address,
++ pageCount * (PAGE_SIZE/4096)
++ ));
++
++#else
++ gcmkERR_BREAK(gckMMU_FreePages(Os->device->kernels[Core]->mmu,
++ info->pageTable,
++ pageCount * (PAGE_SIZE/4096)
++ ));
++#endif
++
++ gcmkERR_BREAK(gckOS_UnmapPages(
++ Os,
++ pageCount * (PAGE_SIZE/4096),
++ info->address
++ ));
++ }
++#endif
++
++ if (info->extraPage)
++ {
++ pageCount -= 1;
++ info->extraPage = 0;
++ }
++
++ /* Release the page cache. */
++ if (pages)
++ {
++ for (i = 0; i < pageCount; i++)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_OS,
++ "%s(%d): pages[%d]: 0x%X.",
++ __FUNCTION__, __LINE__,
++ i, pages[i]
++ );
++
++ if (!PageReserved(pages[i]))
++ {
++ SetPageDirty(pages[i]);
++ }
++
++ if (pfn_valid(page_to_pfn(pages[i])))
++ {
++ page_cache_release(pages[i]);
++ }
++ }
++ }
++
++ /* Success. */
++ status = gcvSTATUS_OK;
++ }
++ while (gcvFALSE);
++
++ if (info != gcvNULL)
++ {
++ /* Free the page array. */
++ if (info->pages != gcvNULL)
++ {
++ kfree(info->pages);
++ }
++
++ kfree(info);
++ }
++
++ MEMORY_MAP_UNLOCK(Os);
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++#endif
++}
++
++/*******************************************************************************
++**
++** gckOS_GetBaseAddress
++**
++** Get the base address for the physical memory.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to the gckOS object.
++**
++** OUTPUT:
++**
++** gctUINT32_PTR BaseAddress
++** Pointer to a variable that will receive the base address.
++*/
++gceSTATUS
++gckOS_GetBaseAddress(
++ IN gckOS Os,
++ OUT gctUINT32_PTR BaseAddress
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X", Os);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(BaseAddress != gcvNULL);
++
++ /* Return base address. */
++ *BaseAddress = Os->device->baseAddress;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*BaseAddress=0x%08x", *BaseAddress);
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckOS_SuspendInterrupt(
++ IN gckOS Os
++ )
++{
++ return gckOS_SuspendInterruptEx(Os, gcvCORE_MAJOR);
++}
++
++#if gcdMULTI_GPU
++gceSTATUS
++gckOS_SuspendInterruptEx(
++ IN gckOS Os,
++ IN gceCORE Core
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X Core=%d", Os, Core);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++
++ if (Core == gcvCORE_MAJOR)
++ {
++ disable_irq(Os->device->irqLine3D[gcvCORE_3D_0_ID]);
++ disable_irq(Os->device->irqLine3D[gcvCORE_3D_1_ID]);
++ }
++ else
++ {
++ disable_irq(Os->device->irqLines[Core]);
++ }
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++#else
++gceSTATUS
++gckOS_SuspendInterruptEx(
++ IN gckOS Os,
++ IN gceCORE Core
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X Core=%d", Os, Core);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++
++ disable_irq(Os->device->irqLines[Core]);
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++#endif
++
++gceSTATUS
++gckOS_ResumeInterrupt(
++ IN gckOS Os
++ )
++{
++ return gckOS_ResumeInterruptEx(Os, gcvCORE_MAJOR);
++}
++
++#if gcdMULTI_GPU
++gceSTATUS
++gckOS_ResumeInterruptEx(
++ IN gckOS Os,
++ IN gceCORE Core
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X Core=%d", Os, Core);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++
++ if (Core == gcvCORE_MAJOR)
++ {
++ enable_irq(Os->device->irqLine3D[gcvCORE_3D_0_ID]);
++ enable_irq(Os->device->irqLine3D[gcvCORE_3D_1_ID]);
++ }
++ else
++ {
++ enable_irq(Os->device->irqLines[Core]);
++ }
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++#else
++gceSTATUS
++gckOS_ResumeInterruptEx(
++ IN gckOS Os,
++ IN gceCORE Core
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X Core=%d", Os, Core);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++
++ enable_irq(Os->device->irqLines[Core]);
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++#endif
++
++gceSTATUS
++gckOS_MemCopy(
++ IN gctPOINTER Destination,
++ IN gctCONST_POINTER Source,
++ IN gctSIZE_T Bytes
++ )
++{
++ gcmkHEADER_ARG("Destination=0x%X Source=0x%X Bytes=%lu",
++ Destination, Source, Bytes);
++
++ gcmkVERIFY_ARGUMENT(Destination != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Source != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++
++ memcpy(Destination, Source, Bytes);
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckOS_ZeroMemory(
++ IN gctPOINTER Memory,
++ IN gctSIZE_T Bytes
++ )
++{
++ gcmkHEADER_ARG("Memory=0x%X Bytes=%lu", Memory, Bytes);
++
++ gcmkVERIFY_ARGUMENT(Memory != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++
++ memset(Memory, 0, Bytes);
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++********************************* Cache Control ********************************
++*******************************************************************************/
++
++/*******************************************************************************
++** gckOS_CacheClean
++**
++** Clean the cache for the specified addresses. The GPU is going to need the
++** data. If the system is allocating memory as non-cachable, this function can
++** be ignored.
++**
++** ARGUMENTS:
++**
++** gckOS Os
++** Pointer to gckOS object.
++**
++** gctUINT32 ProcessID
++** Process ID Logical belongs.
++**
++** gctPHYS_ADDR Handle
++** Physical address handle. If gcvNULL it is video memory.
++**
++** gctPOINTER Physical
++** Physical address to flush.
++**
++** gctPOINTER Logical
++** Logical address to flush.
++**
++** gctSIZE_T Bytes
++** Size of the address range in bytes to flush.
++*/
++gceSTATUS
++gckOS_CacheClean(
++ IN gckOS Os,
++ IN gctUINT32 ProcessID,
++ IN gctPHYS_ADDR Handle,
++ IN gctUINT32 Physical,
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Bytes
++ )
++{
++ gcsPLATFORM * platform;
++
++ gcmkHEADER_ARG("Os=0x%X ProcessID=%d Handle=0x%X Logical=0x%X Bytes=%lu",
++ Os, ProcessID, Handle, Logical, Bytes);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++
++ platform = Os->device->platform;
++
++ if (platform && platform->ops->cache)
++ {
++ platform->ops->cache(
++ platform,
++ ProcessID,
++ Handle,
++ Physical,
++ Logical,
++ Bytes,
++ gcvCACHE_CLEAN
++ );
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++
++#if !gcdCACHE_FUNCTION_UNIMPLEMENTED
++#ifdef CONFIG_ARM
++
++ /* Inner cache. */
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)
++ dmac_map_area(Logical, Bytes, DMA_TO_DEVICE);
++# else
++ dmac_clean_range(Logical, Logical + Bytes);
++# endif
++
++#if defined(CONFIG_OUTER_CACHE)
++ /* Outer cache. */
++#if gcdENABLE_OUTER_CACHE_PATCH
++ _HandleOuterCache(Os, Physical, Logical, Bytes, gcvCACHE_CLEAN);
++#else
++ outer_clean_range((unsigned long) Handle, (unsigned long) Handle + Bytes);
++#endif
++#endif
++
++#elif defined(CONFIG_MIPS)
++
++ dma_cache_wback((unsigned long) Logical, Bytes);
++
++#elif defined(CONFIG_PPC)
++
++ /* TODO */
++
++#else
++ dma_sync_single_for_device(
++ gcvNULL,
++ (dma_addr_t)Physical,
++ Bytes,
++ DMA_TO_DEVICE);
++#endif
++#endif
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++** gckOS_CacheInvalidate
++**
++** Invalidate the cache for the specified addresses. The GPU is going to need
++** data. If the system is allocating memory as non-cachable, this function can
++** be ignored.
++**
++** ARGUMENTS:
++**
++** gckOS Os
++** Pointer to gckOS object.
++**
++** gctUINT32 ProcessID
++** Process ID Logical belongs.
++**
++** gctPHYS_ADDR Handle
++** Physical address handle. If gcvNULL it is video memory.
++**
++** gctPOINTER Logical
++** Logical address to flush.
++**
++** gctSIZE_T Bytes
++** Size of the address range in bytes to flush.
++*/
++gceSTATUS
++gckOS_CacheInvalidate(
++ IN gckOS Os,
++ IN gctUINT32 ProcessID,
++ IN gctPHYS_ADDR Handle,
++ IN gctUINT32 Physical,
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Bytes
++ )
++{
++ gcsPLATFORM * platform;
++
++ gcmkHEADER_ARG("Os=0x%X ProcessID=%d Handle=0x%X Logical=0x%X Bytes=%lu",
++ Os, ProcessID, Handle, Logical, Bytes);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++
++ platform = Os->device->platform;
++
++ if (platform && platform->ops->cache)
++ {
++ platform->ops->cache(
++ platform,
++ ProcessID,
++ Handle,
++ Physical,
++ Logical,
++ Bytes,
++ gcvCACHE_INVALIDATE
++ );
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++
++#if !gcdCACHE_FUNCTION_UNIMPLEMENTED
++#ifdef CONFIG_ARM
++
++ /* Inner cache. */
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)
++ dmac_map_area(Logical, Bytes, DMA_FROM_DEVICE);
++# else
++ dmac_inv_range(Logical, Logical + Bytes);
++# endif
++
++#if defined(CONFIG_OUTER_CACHE)
++ /* Outer cache. */
++#if gcdENABLE_OUTER_CACHE_PATCH
++ _HandleOuterCache(Os, Physical, Logical, Bytes, gcvCACHE_INVALIDATE);
++#else
++ outer_inv_range((unsigned long) Handle, (unsigned long) Handle + Bytes);
++#endif
++#endif
++
++#elif defined(CONFIG_MIPS)
++ dma_cache_inv((unsigned long) Logical, Bytes);
++#elif defined(CONFIG_PPC)
++ /* TODO */
++#else
++ dma_sync_single_for_device(
++ gcvNULL,
++ (dma_addr_t)Physical,
++ Bytes,
++ DMA_FROM_DEVICE);
++#endif
++#endif
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++** gckOS_CacheFlush
++**
++** Clean the cache for the specified addresses and invalidate the lines as
++** well. The GPU is going to need and modify the data. If the system is
++** allocating memory as non-cachable, this function can be ignored.
++**
++** ARGUMENTS:
++**
++** gckOS Os
++** Pointer to gckOS object.
++**
++** gctUINT32 ProcessID
++** Process ID Logical belongs.
++**
++** gctPHYS_ADDR Handle
++** Physical address handle. If gcvNULL it is video memory.
++**
++** gctPOINTER Logical
++** Logical address to flush.
++**
++** gctSIZE_T Bytes
++** Size of the address range in bytes to flush.
++*/
++gceSTATUS
++gckOS_CacheFlush(
++ IN gckOS Os,
++ IN gctUINT32 ProcessID,
++ IN gctPHYS_ADDR Handle,
++ IN gctUINT32 Physical,
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Bytes
++ )
++{
++ gcsPLATFORM * platform;
++
++ gcmkHEADER_ARG("Os=0x%X ProcessID=%d Handle=0x%X Logical=0x%X Bytes=%lu",
++ Os, ProcessID, Handle, Logical, Bytes);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++
++ platform = Os->device->platform;
++
++ if (platform && platform->ops->cache)
++ {
++ platform->ops->cache(
++ platform,
++ ProcessID,
++ Handle,
++ Physical,
++ Logical,
++ Bytes,
++ gcvCACHE_FLUSH
++ );
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++
++#if !gcdCACHE_FUNCTION_UNIMPLEMENTED
++#ifdef CONFIG_ARM
++ /* Inner cache. */
++ dmac_flush_range(Logical, Logical + Bytes);
++
++#if defined(CONFIG_OUTER_CACHE)
++ /* Outer cache. */
++#if gcdENABLE_OUTER_CACHE_PATCH
++ _HandleOuterCache(Os, Physical, Logical, Bytes, gcvCACHE_FLUSH);
++#else
++ outer_flush_range((unsigned long) Handle, (unsigned long) Handle + Bytes);
++#endif
++#endif
++
++#elif defined(CONFIG_MIPS)
++ dma_cache_wback_inv((unsigned long) Logical, Bytes);
++#elif defined(CONFIG_PPC)
++ /* TODO */
++#else
++ dma_sync_single_for_device(
++ gcvNULL,
++ (dma_addr_t)Physical,
++ Bytes,
++ DMA_BIDIRECTIONAL);
++#endif
++#endif
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++********************************* Broadcasting *********************************
++*******************************************************************************/
++
++/*******************************************************************************
++**
++** gckOS_Broadcast
++**
++** System hook for broadcast events from the kernel driver.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to the gckOS object.
++**
++** gckHARDWARE Hardware
++** Pointer to the gckHARDWARE object.
++**
++** gceBROADCAST Reason
++** Reason for the broadcast. Can be one of the following values:
++**
++** gcvBROADCAST_GPU_IDLE
++** Broadcasted when the kernel driver thinks the GPU might be
++** idle. This can be used to handle power management.
++**
++** gcvBROADCAST_GPU_COMMIT
++** Broadcasted when any client process commits a command
++** buffer. This can be used to handle power management.
++**
++** gcvBROADCAST_GPU_STUCK
++** Broadcasted when the kernel driver hits the timeout waiting
++** for the GPU.
++**
++** gcvBROADCAST_FIRST_PROCESS
++** First process is trying to connect to the kernel.
++**
++** gcvBROADCAST_LAST_PROCESS
++** Last process has detached from the kernel.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_Broadcast(
++ IN gckOS Os,
++ IN gckHARDWARE Hardware,
++ IN gceBROADCAST Reason
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Os=0x%X Hardware=0x%X Reason=%d", Os, Hardware, Reason);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ switch (Reason)
++ {
++ case gcvBROADCAST_FIRST_PROCESS:
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_OS, "First process has attached");
++ break;
++
++ case gcvBROADCAST_LAST_PROCESS:
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_OS, "Last process has detached");
++
++ /* Put GPU OFF. */
++ gcmkONERROR(
++ gckHARDWARE_SetPowerManagementState(Hardware,
++ gcvPOWER_OFF_BROADCAST));
++ break;
++
++ case gcvBROADCAST_GPU_IDLE:
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_OS, "GPU idle.");
++
++ /* Put GPU IDLE. */
++ gcmkONERROR(
++ gckHARDWARE_SetPowerManagementState(Hardware,
++#if gcdPOWER_SUSPEND_WHEN_IDLE
++ gcvPOWER_SUSPEND_BROADCAST));
++#else
++ gcvPOWER_IDLE_BROADCAST));
++#endif
++
++ /* Add idle process DB. */
++ gcmkONERROR(gckKERNEL_AddProcessDB(Hardware->kernel,
++ 1,
++ gcvDB_IDLE,
++ gcvNULL, gcvNULL, 0));
++ break;
++
++ case gcvBROADCAST_GPU_COMMIT:
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_OS, "COMMIT has arrived.");
++
++ /* Add busy process DB. */
++ gcmkONERROR(gckKERNEL_AddProcessDB(Hardware->kernel,
++ 0,
++ gcvDB_IDLE,
++ gcvNULL, gcvNULL, 0));
++
++ /* Put GPU ON. */
++ gcmkONERROR(
++ gckHARDWARE_SetPowerManagementState(Hardware, gcvPOWER_ON_AUTO));
++ break;
++
++ case gcvBROADCAST_GPU_STUCK:
++ gcmkTRACE_N(gcvLEVEL_ERROR, 0, "gcvBROADCAST_GPU_STUCK\n");
++ gcmkONERROR(gckKERNEL_Recovery(Hardware->kernel));
++ break;
++
++ case gcvBROADCAST_AXI_BUS_ERROR:
++ gcmkTRACE_N(gcvLEVEL_ERROR, 0, "gcvBROADCAST_AXI_BUS_ERROR\n");
++ gcmkONERROR(gckHARDWARE_DumpGPUState(Hardware));
++ gcmkONERROR(gckKERNEL_Recovery(Hardware->kernel));
++ break;
++
++ case gcvBROADCAST_OUT_OF_MEMORY:
++ gcmkTRACE_N(gcvLEVEL_INFO, 0, "gcvBROADCAST_OUT_OF_MEMORY\n");
++
++ status = _ShrinkMemory(Os);
++
++ if (status == gcvSTATUS_NOT_SUPPORTED)
++ {
++ goto OnError;
++ }
++
++ gcmkONERROR(status);
++
++ break;
++
++ default:
++ /* Skip unimplemented broadcast. */
++ break;
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_BroadcastHurry
++**
++** The GPU is running too slow.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to the gckOS object.
++**
++** gckHARDWARE Hardware
++** Pointer to the gckHARDWARE object.
++**
++** gctUINT Urgency
++** The higher the number, the higher the urgency to speed up the GPU.
++** The maximum value is defined by the gcdDYNAMIC_EVENT_THRESHOLD.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_BroadcastHurry(
++ IN gckOS Os,
++ IN gckHARDWARE Hardware,
++ IN gctUINT Urgency
++ )
++{
++ gcmkHEADER_ARG("Os=0x%x Hardware=0x%x Urgency=%u", Os, Hardware, Urgency);
++
++ /* Do whatever you need to do to speed up the GPU now. */
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_BroadcastCalibrateSpeed
++**
++** Calibrate the speed of the GPU.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to the gckOS object.
++**
++** gckHARDWARE Hardware
++** Pointer to the gckHARDWARE object.
++**
++** gctUINT Idle, Time
++** Idle/Time will give the percentage the GPU is idle, so you can use
++** this to calibrate the working point of the GPU.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_BroadcastCalibrateSpeed(
++ IN gckOS Os,
++ IN gckHARDWARE Hardware,
++ IN gctUINT Idle,
++ IN gctUINT Time
++ )
++{
++ gcmkHEADER_ARG("Os=0x%x Hardware=0x%x Idle=%u Time=%u",
++ Os, Hardware, Idle, Time);
++
++ /* Do whatever you need to do to callibrate the GPU speed. */
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++********************************** Semaphores **********************************
++*******************************************************************************/
++
++/*******************************************************************************
++**
++** gckOS_CreateSemaphore
++**
++** Create a semaphore.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to the gckOS object.
++**
++** OUTPUT:
++**
++** gctPOINTER * Semaphore
++** Pointer to the variable that will receive the created semaphore.
++*/
++gceSTATUS
++gckOS_CreateSemaphore(
++ IN gckOS Os,
++ OUT gctPOINTER * Semaphore
++ )
++{
++ gceSTATUS status;
++ struct semaphore *sem = gcvNULL;
++
++ gcmkHEADER_ARG("Os=0x%X", Os);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Semaphore != gcvNULL);
++
++ /* Allocate the semaphore structure. */
++ sem = (struct semaphore *)kmalloc(gcmSIZEOF(struct semaphore), GFP_KERNEL | gcdNOWARN);
++ if (sem == gcvNULL)
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ /* Initialize the semaphore. */
++ sema_init(sem, 1);
++
++ /* Return to caller. */
++ *Semaphore = (gctPOINTER) sem;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_AcquireSemaphore
++**
++** Acquire a semaphore.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to the gckOS object.
++**
++** gctPOINTER Semaphore
++** Pointer to the semaphore thet needs to be acquired.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_AcquireSemaphore(
++ IN gckOS Os,
++ IN gctPOINTER Semaphore
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Os=0x%08X Semaphore=0x%08X", Os, Semaphore);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Semaphore != gcvNULL);
++
++ /* Acquire the semaphore. */
++ if (down_interruptible((struct semaphore *) Semaphore))
++ {
++ gcmkONERROR(gcvSTATUS_INTERRUPTED);
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_TryAcquireSemaphore
++**
++** Try to acquire a semaphore.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to the gckOS object.
++**
++** gctPOINTER Semaphore
++** Pointer to the semaphore thet needs to be acquired.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_TryAcquireSemaphore(
++ IN gckOS Os,
++ IN gctPOINTER Semaphore
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Os=0x%x", Os);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Semaphore != gcvNULL);
++
++ /* Acquire the semaphore. */
++ if (down_trylock((struct semaphore *) Semaphore))
++ {
++ /* Timeout. */
++ status = gcvSTATUS_TIMEOUT;
++ gcmkFOOTER();
++ return status;
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_ReleaseSemaphore
++**
++** Release a previously acquired semaphore.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to the gckOS object.
++**
++** gctPOINTER Semaphore
++** Pointer to the semaphore thet needs to be released.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_ReleaseSemaphore(
++ IN gckOS Os,
++ IN gctPOINTER Semaphore
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X Semaphore=0x%X", Os, Semaphore);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Semaphore != gcvNULL);
++
++ /* Release the semaphore. */
++ up((struct semaphore *) Semaphore);
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_DestroySemaphore
++**
++** Destroy a semaphore.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to the gckOS object.
++**
++** gctPOINTER Semaphore
++** Pointer to the semaphore thet needs to be destroyed.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_DestroySemaphore(
++ IN gckOS Os,
++ IN gctPOINTER Semaphore
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X Semaphore=0x%X", Os, Semaphore);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Semaphore != gcvNULL);
++
++ /* Free the sempahore structure. */
++ kfree(Semaphore);
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_GetProcessID
++**
++** Get current process ID.
++**
++** INPUT:
++**
++** Nothing.
++**
++** OUTPUT:
++**
++** gctUINT32_PTR ProcessID
++** Pointer to the variable that receives the process ID.
++*/
++gceSTATUS
++gckOS_GetProcessID(
++ OUT gctUINT32_PTR ProcessID
++ )
++{
++ /* Get process ID. */
++ if (ProcessID != gcvNULL)
++ {
++ *ProcessID = _GetProcessID();
++ }
++
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_GetThreadID
++**
++** Get current thread ID.
++**
++** INPUT:
++**
++** Nothing.
++**
++** OUTPUT:
++**
++** gctUINT32_PTR ThreadID
++** Pointer to the variable that receives the thread ID.
++*/
++gceSTATUS
++gckOS_GetThreadID(
++ OUT gctUINT32_PTR ThreadID
++ )
++{
++ /* Get thread ID. */
++ if (ThreadID != gcvNULL)
++ {
++ *ThreadID = _GetThreadID();
++ }
++
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_SetGPUPower
++**
++** Set the power of the GPU on or off.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to a gckOS object.
++**
++** gceCORE Core
++** GPU whose power is set.
++**
++** gctBOOL Clock
++** gcvTRUE to turn on the clock, or gcvFALSE to turn off the clock.
++**
++** gctBOOL Power
++** gcvTRUE to turn on the power, or gcvFALSE to turn off the power.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_SetGPUPower(
++ IN gckOS Os,
++ IN gceCORE Core,
++ IN gctBOOL Clock,
++ IN gctBOOL Power
++ )
++{
++ gcsPLATFORM * platform;
++
++ gctBOOL powerChange = gcvFALSE;
++ gctBOOL clockChange = gcvFALSE;
++
++ gcmkHEADER_ARG("Os=0x%X Core=%d Clock=%d Power=%d", Os, Core, Clock, Power);
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++
++ platform = Os->device->platform;
++
++ powerChange = (Power != Os->powerStates[Core]);
++
++ clockChange = (Clock != Os->clockStates[Core]);
++
++ if (powerChange && (Power == gcvTRUE))
++ {
++ if (platform && platform->ops->setPower)
++ {
++ gcmkVERIFY_OK(platform->ops->setPower(platform, Core, Power));
++ }
++
++ Os->powerStates[Core] = Power;
++ }
++
++ if (clockChange)
++ {
++ mutex_lock(&Os->registerAccessLocks[Core]);
++
++ if (platform && platform->ops->setClock)
++ {
++ gcmkVERIFY_OK(platform->ops->setClock(platform, Core, Clock));
++ }
++
++ Os->clockStates[Core] = Clock;
++
++ mutex_unlock(&Os->registerAccessLocks[Core]);
++ }
++
++ if (powerChange && (Power == gcvFALSE))
++ {
++ if (platform && platform->ops->setPower)
++ {
++ gcmkVERIFY_OK(platform->ops->setPower(platform, Core, Power));
++ }
++
++ Os->powerStates[Core] = Power;
++ }
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_ResetGPU
++**
++** Reset the GPU.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to a gckOS object.
++**
++** gckCORE Core
++** GPU whose power is set.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_ResetGPU(
++ IN gckOS Os,
++ IN gceCORE Core
++ )
++{
++ gceSTATUS status = gcvSTATUS_NOT_SUPPORTED;
++ gcsPLATFORM * platform;
++
++ gcmkHEADER_ARG("Os=0x%X Core=%d", Os, Core);
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++
++ platform = Os->device->platform;
++
++ if (platform && platform->ops->reset)
++ {
++ status = platform->ops->reset(platform, Core);
++ }
++
++ gcmkFOOTER_NO();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_PrepareGPUFrequency
++**
++** Prepare to set GPU frequency and voltage.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to a gckOS object.
++**
++** gckCORE Core
++** GPU whose frequency and voltage will be set.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_PrepareGPUFrequency(
++ IN gckOS Os,
++ IN gceCORE Core
++ )
++{
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_FinishGPUFrequency
++**
++** Finish GPU frequency setting.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to a gckOS object.
++**
++** gckCORE Core
++** GPU whose frequency and voltage is set.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_FinishGPUFrequency(
++ IN gckOS Os,
++ IN gceCORE Core
++ )
++{
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_QueryGPUFrequency
++**
++** Query the current frequency of the GPU.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to a gckOS object.
++**
++** gckCORE Core
++** GPU whose power is set.
++**
++** gctUINT32 * Frequency
++** Pointer to a gctUINT32 to obtain current frequency, in MHz.
++**
++** gctUINT8 * Scale
++** Pointer to a gctUINT8 to obtain current scale(1 - 64).
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_QueryGPUFrequency(
++ IN gckOS Os,
++ IN gceCORE Core,
++ OUT gctUINT32 * Frequency,
++ OUT gctUINT8 * Scale
++ )
++{
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_SetGPUFrequency
++**
++** Set frequency and voltage of the GPU.
++**
++** 1. DVFS manager gives the target scale of full frequency, BSP must find
++** a real frequency according to this scale and board's configure.
++**
++** 2. BSP should find a suitable voltage for this frequency.
++**
++** 3. BSP must make sure setting take effect before this function returns.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to a gckOS object.
++**
++** gckCORE Core
++** GPU whose power is set.
++**
++** gctUINT8 Scale
++** Target scale of full frequency, range is [1, 64]. 1 means 1/64 of
++** full frequency and 64 means 64/64 of full frequency.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_SetGPUFrequency(
++ IN gckOS Os,
++ IN gceCORE Core,
++ IN gctUINT8 Scale
++ )
++{
++ return gcvSTATUS_OK;
++}
++
++/*----------------------------------------------------------------------------*/
++/*----- Profile --------------------------------------------------------------*/
++
++gceSTATUS
++gckOS_GetProfileTick(
++ OUT gctUINT64_PTR Tick
++ )
++{
++ struct timespec time;
++
++ ktime_get_ts(&time);
++
++ *Tick = time.tv_nsec + time.tv_sec * 1000000000ULL;
++
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckOS_QueryProfileTickRate(
++ OUT gctUINT64_PTR TickRate
++ )
++{
++ struct timespec res;
++
++ hrtimer_get_res(CLOCK_MONOTONIC, &res);
++
++ *TickRate = res.tv_nsec + res.tv_sec * 1000000000ULL;
++
++ return gcvSTATUS_OK;
++}
++
++gctUINT32
++gckOS_ProfileToMS(
++ IN gctUINT64 Ticks
++ )
++{
++#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
++ return div_u64(Ticks, 1000000);
++#else
++ gctUINT64 rem = Ticks;
++ gctUINT64 b = 1000000;
++ gctUINT64 res, d = 1;
++ gctUINT32 high = rem >> 32;
++
++ /* Reduce the thing a bit first */
++ res = 0;
++ if (high >= 1000000)
++ {
++ high /= 1000000;
++ res = (gctUINT64) high << 32;
++ rem -= (gctUINT64) (high * 1000000) << 32;
++ }
++
++ while (((gctINT64) b > 0) && (b < rem))
++ {
++ b <<= 1;
++ d <<= 1;
++ }
++
++ do
++ {
++ if (rem >= b)
++ {
++ rem -= b;
++ res += d;
++ }
++
++ b >>= 1;
++ d >>= 1;
++ }
++ while (d);
++
++ return (gctUINT32) res;
++#endif
++}
++
++/******************************************************************************\
++******************************* Signal Management ******************************
++\******************************************************************************/
++
++#undef _GC_OBJ_ZONE
++#define _GC_OBJ_ZONE gcvZONE_SIGNAL
++
++/*******************************************************************************
++**
++** gckOS_CreateSignal
++**
++** Create a new signal.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctBOOL ManualReset
++** If set to gcvTRUE, gckOS_Signal with gcvFALSE must be called in
++** order to set the signal to nonsignaled state.
++** If set to gcvFALSE, the signal will automatically be set to
++** nonsignaled state by gckOS_WaitSignal function.
++**
++** OUTPUT:
++**
++** gctSIGNAL * Signal
++** Pointer to a variable receiving the created gctSIGNAL.
++*/
++gceSTATUS
++gckOS_CreateSignal(
++ IN gckOS Os,
++ IN gctBOOL ManualReset,
++ OUT gctSIGNAL * Signal
++ )
++{
++ gceSTATUS status;
++ gcsSIGNAL_PTR signal;
++
++ gcmkHEADER_ARG("Os=0x%X ManualReset=%d", Os, ManualReset);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Signal != gcvNULL);
++
++ /* Create an event structure. */
++ signal = (gcsSIGNAL_PTR) kmalloc(sizeof(gcsSIGNAL), GFP_KERNEL | gcdNOWARN);
++
++ if (signal == gcvNULL)
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ /* Save the process ID. */
++ signal->process = (gctHANDLE)(gctUINTPTR_T) _GetProcessID();
++ signal->manualReset = ManualReset;
++ signal->hardware = gcvNULL;
++ init_completion(&signal->obj);
++ atomic_set(&signal->ref, 1);
++
++ gcmkONERROR(_AllocateIntegerId(&Os->signalDB, signal, &signal->id));
++
++ *Signal = (gctSIGNAL)(gctUINTPTR_T)signal->id;
++
++ gcmkFOOTER_ARG("*Signal=0x%X", *Signal);
++ return gcvSTATUS_OK;
++
++OnError:
++ if (signal != gcvNULL)
++ {
++ kfree(signal);
++ }
++
++ gcmkFOOTER_NO();
++ return status;
++}
++
++gceSTATUS
++gckOS_SignalQueryHardware(
++ IN gckOS Os,
++ IN gctSIGNAL Signal,
++ OUT gckHARDWARE * Hardware
++ )
++{
++ gceSTATUS status;
++ gcsSIGNAL_PTR signal;
++
++ gcmkHEADER_ARG("Os=0x%X Signal=0x%X Hardware=0x%X", Os, Signal, Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Signal != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Hardware != gcvNULL);
++
++ gcmkONERROR(_QueryIntegerId(&Os->signalDB, (gctUINT32)(gctUINTPTR_T)Signal, (gctPOINTER)&signal));
++
++ *Hardware = signal->hardware;
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckOS_SignalSetHardware(
++ IN gckOS Os,
++ IN gctSIGNAL Signal,
++ IN gckHARDWARE Hardware
++ )
++{
++ gceSTATUS status;
++ gcsSIGNAL_PTR signal;
++
++ gcmkHEADER_ARG("Os=0x%X Signal=0x%X Hardware=0x%X", Os, Signal, Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Signal != gcvNULL);
++
++ gcmkONERROR(_QueryIntegerId(&Os->signalDB, (gctUINT32)(gctUINTPTR_T)Signal, (gctPOINTER)&signal));
++
++ signal->hardware = Hardware;
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_DestroySignal
++**
++** Destroy a signal.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctSIGNAL Signal
++** Pointer to the gctSIGNAL.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_DestroySignal(
++ IN gckOS Os,
++ IN gctSIGNAL Signal
++ )
++{
++ gceSTATUS status;
++ gcsSIGNAL_PTR signal;
++ gctBOOL acquired = gcvFALSE;
++
++ gcmkHEADER_ARG("Os=0x%X Signal=0x%X", Os, Signal);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Signal != gcvNULL);
++
++ gcmkONERROR(gckOS_AcquireMutex(Os, Os->signalMutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++ gcmkONERROR(_QueryIntegerId(&Os->signalDB, (gctUINT32)(gctUINTPTR_T)Signal, (gctPOINTER)&signal));
++
++ gcmkASSERT(signal->id == (gctUINT32)(gctUINTPTR_T)Signal);
++
++ if (atomic_dec_and_test(&signal->ref))
++ {
++ gcmkVERIFY_OK(_DestroyIntegerId(&Os->signalDB, signal->id));
++
++ /* Free the sgianl. */
++ kfree(signal);
++ }
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Os, Os->signalMutex));
++ acquired = gcvFALSE;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ /* Release the mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Os, Os->signalMutex));
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_Signal
++**
++** Set a state of the specified signal.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctSIGNAL Signal
++** Pointer to the gctSIGNAL.
++**
++** gctBOOL State
++** If gcvTRUE, the signal will be set to signaled state.
++** If gcvFALSE, the signal will be set to nonsignaled state.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_Signal(
++ IN gckOS Os,
++ IN gctSIGNAL Signal,
++ IN gctBOOL State
++ )
++{
++ gceSTATUS status;
++ gcsSIGNAL_PTR signal;
++ gctBOOL acquired = gcvFALSE;
++
++ gcmkHEADER_ARG("Os=0x%X Signal=0x%X State=%d", Os, Signal, State);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Signal != gcvNULL);
++
++ gcmkONERROR(gckOS_AcquireMutex(Os, Os->signalMutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++ gcmkONERROR(_QueryIntegerId(&Os->signalDB, (gctUINT32)(gctUINTPTR_T)Signal, (gctPOINTER)&signal));
++
++ gcmkASSERT(signal->id == (gctUINT32)(gctUINTPTR_T)Signal);
++
++ if (State)
++ {
++ /* unbind the signal from hardware. */
++ signal->hardware = gcvNULL;
++
++ /* Set the event to a signaled state. */
++ complete(&signal->obj);
++ }
++ else
++ {
++ /* Set the event to an unsignaled state. */
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,13,0)
++ reinit_completion(&signal->obj);
++#else
++ INIT_COMPLETION(signal->obj);
++#endif
++ }
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Os, Os->signalMutex));
++ acquired = gcvFALSE;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ /* Release the mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Os, Os->signalMutex));
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++
++#if gcdENABLE_VG
++gceSTATUS
++gckOS_SetSignalVG(
++ IN gckOS Os,
++ IN gctHANDLE Process,
++ IN gctSIGNAL Signal
++ )
++{
++ gceSTATUS status;
++ gctINT result;
++ struct task_struct * userTask;
++ struct siginfo info;
++
++ userTask = FIND_TASK_BY_PID((pid_t)(gctUINTPTR_T) Process);
++
++ if (userTask != gcvNULL)
++ {
++ info.si_signo = 48;
++ info.si_code = __SI_CODE(__SI_RT, SI_KERNEL);
++ info.si_pid = 0;
++ info.si_uid = 0;
++ info.si_ptr = (gctPOINTER) Signal;
++
++ /* Signals with numbers between 32 and 63 are real-time,
++ send a real-time signal to the user process. */
++ result = send_sig_info(48, &info, userTask);
++
++ printk("gckOS_SetSignalVG:0x%x\n", result);
++ /* Error? */
++ if (result < 0)
++ {
++ status = gcvSTATUS_GENERIC_IO;
++
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s(%d): an error has occurred.\n",
++ __FUNCTION__, __LINE__
++ );
++ }
++ else
++ {
++ status = gcvSTATUS_OK;
++ }
++ }
++ else
++ {
++ status = gcvSTATUS_GENERIC_IO;
++
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s(%d): an error has occurred.\n",
++ __FUNCTION__, __LINE__
++ );
++ }
++
++ /* Return status. */
++ return status;
++}
++#endif
++
++/*******************************************************************************
++**
++** gckOS_UserSignal
++**
++** Set the specified signal which is owned by a process to signaled state.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctSIGNAL Signal
++** Pointer to the gctSIGNAL.
++**
++** gctHANDLE Process
++** Handle of process owning the signal.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_UserSignal(
++ IN gckOS Os,
++ IN gctSIGNAL Signal,
++ IN gctHANDLE Process
++ )
++{
++ gceSTATUS status;
++ gctSIGNAL signal;
++
++ gcmkHEADER_ARG("Os=0x%X Signal=0x%X Process=%d",
++ Os, Signal, (gctINT32)(gctUINTPTR_T)Process);
++
++ /* Map the signal into kernel space. */
++ gcmkONERROR(gckOS_MapSignal(Os, Signal, Process, &signal));
++
++ /* Signal. */
++ status = gckOS_Signal(Os, signal, gcvTRUE);
++
++ /* Unmap the signal */
++ gcmkVERIFY_OK(gckOS_UnmapSignal(Os, Signal));
++
++ gcmkFOOTER();
++ return status;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_WaitSignal
++**
++** Wait for a signal to become signaled.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctSIGNAL Signal
++** Pointer to the gctSIGNAL.
++**
++** gctUINT32 Wait
++** Number of milliseconds to wait.
++** Pass the value of gcvINFINITE for an infinite wait.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_WaitSignal(
++ IN gckOS Os,
++ IN gctSIGNAL Signal,
++ IN gctUINT32 Wait
++ )
++{
++ gceSTATUS status = gcvSTATUS_OK;
++ gcsSIGNAL_PTR signal;
++
++ gcmkHEADER_ARG("Os=0x%X Signal=0x%X Wait=0x%08X", Os, Signal, Wait);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Signal != gcvNULL);
++
++ gcmkONERROR(_QueryIntegerId(&Os->signalDB, (gctUINT32)(gctUINTPTR_T)Signal, (gctPOINTER)&signal));
++
++ gcmkASSERT(signal->id == (gctUINT32)(gctUINTPTR_T)Signal);
++
++ might_sleep();
++
++ spin_lock_irq(&signal->obj.wait.lock);
++
++ if (signal->obj.done)
++ {
++ if (!signal->manualReset)
++ {
++ signal->obj.done = 0;
++ }
++
++ status = gcvSTATUS_OK;
++ }
++ else if (Wait == 0)
++ {
++ status = gcvSTATUS_TIMEOUT;
++ }
++ else
++ {
++ /* Convert wait to milliseconds. */
++ long timeout = (Wait == gcvINFINITE)
++ ? MAX_SCHEDULE_TIMEOUT
++ : Wait * HZ / 1000;
++
++ DECLARE_WAITQUEUE(wait, current);
++ wait.flags |= WQ_FLAG_EXCLUSIVE;
++ __add_wait_queue_tail(&signal->obj.wait, &wait);
++
++ while (gcvTRUE)
++ {
++ if (signal_pending(current))
++ {
++ /* Interrupt received. */
++ status = gcvSTATUS_INTERRUPTED;
++ break;
++ }
++
++ __set_current_state(TASK_INTERRUPTIBLE);
++ spin_unlock_irq(&signal->obj.wait.lock);
++ timeout = schedule_timeout(timeout);
++ spin_lock_irq(&signal->obj.wait.lock);
++
++ if (signal->obj.done)
++ {
++ if (!signal->manualReset)
++ {
++ signal->obj.done = 0;
++ }
++
++ status = gcvSTATUS_OK;
++ break;
++ }
++
++ if (timeout == 0)
++ {
++
++ status = gcvSTATUS_TIMEOUT;
++ break;
++ }
++ }
++
++ __remove_wait_queue(&signal->obj.wait, &wait);
++ }
++
++ spin_unlock_irq(&signal->obj.wait.lock);
++
++OnError:
++ /* Return status. */
++ gcmkFOOTER_ARG("Signal=0x%X status=%d", Signal, status);
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_MapSignal
++**
++** Map a signal in to the current process space.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctSIGNAL Signal
++** Pointer to tha gctSIGNAL to map.
++**
++** gctHANDLE Process
++** Handle of process owning the signal.
++**
++** OUTPUT:
++**
++** gctSIGNAL * MappedSignal
++** Pointer to a variable receiving the mapped gctSIGNAL.
++*/
++gceSTATUS
++gckOS_MapSignal(
++ IN gckOS Os,
++ IN gctSIGNAL Signal,
++ IN gctHANDLE Process,
++ OUT gctSIGNAL * MappedSignal
++ )
++{
++ gceSTATUS status;
++ gcsSIGNAL_PTR signal;
++ gcmkHEADER_ARG("Os=0x%X Signal=0x%X Process=0x%X", Os, Signal, Process);
++
++ gcmkVERIFY_ARGUMENT(Signal != gcvNULL);
++ gcmkVERIFY_ARGUMENT(MappedSignal != gcvNULL);
++
++ gcmkONERROR(_QueryIntegerId(&Os->signalDB, (gctUINT32)(gctUINTPTR_T)Signal, (gctPOINTER)&signal));
++
++ if(atomic_inc_return(&signal->ref) <= 1)
++ {
++ /* The previous value is 0, it has been deleted. */
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ *MappedSignal = (gctSIGNAL) Signal;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*MappedSignal=0x%X", *MappedSignal);
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER_NO();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_UnmapSignal
++**
++** Unmap a signal .
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctSIGNAL Signal
++** Pointer to that gctSIGNAL mapped.
++*/
++gceSTATUS
++gckOS_UnmapSignal(
++ IN gckOS Os,
++ IN gctSIGNAL Signal
++ )
++{
++ return gckOS_DestroySignal(Os, Signal);
++}
++
++/*******************************************************************************
++**
++** gckOS_CreateUserSignal
++**
++** Create a new signal to be used in the user space.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctBOOL ManualReset
++** If set to gcvTRUE, gckOS_Signal with gcvFALSE must be called in
++** order to set the signal to nonsignaled state.
++** If set to gcvFALSE, the signal will automatically be set to
++** nonsignaled state by gckOS_WaitSignal function.
++**
++** OUTPUT:
++**
++** gctINT * SignalID
++** Pointer to a variable receiving the created signal's ID.
++*/
++gceSTATUS
++gckOS_CreateUserSignal(
++ IN gckOS Os,
++ IN gctBOOL ManualReset,
++ OUT gctINT * SignalID
++ )
++{
++ gceSTATUS status;
++ gctSIZE_T signal;
++
++ /* Create a new signal. */
++ gcmkONERROR(gckOS_CreateSignal(Os, ManualReset, (gctSIGNAL *) &signal));
++ *SignalID = (gctINT) signal;
++
++OnError:
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_DestroyUserSignal
++**
++** Destroy a signal to be used in the user space.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctINT SignalID
++** The signal's ID.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_DestroyUserSignal(
++ IN gckOS Os,
++ IN gctINT SignalID
++ )
++{
++ return gckOS_DestroySignal(Os, (gctSIGNAL)(gctUINTPTR_T)SignalID);
++}
++
++/*******************************************************************************
++**
++** gckOS_WaitUserSignal
++**
++** Wait for a signal used in the user mode to become signaled.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctINT SignalID
++** Signal ID.
++**
++** gctUINT32 Wait
++** Number of milliseconds to wait.
++** Pass the value of gcvINFINITE for an infinite wait.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_WaitUserSignal(
++ IN gckOS Os,
++ IN gctINT SignalID,
++ IN gctUINT32 Wait
++ )
++{
++ return gckOS_WaitSignal(Os, (gctSIGNAL)(gctUINTPTR_T)SignalID, Wait);
++}
++
++/*******************************************************************************
++**
++** gckOS_SignalUserSignal
++**
++** Set a state of the specified signal to be used in the user space.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctINT SignalID
++** SignalID.
++**
++** gctBOOL State
++** If gcvTRUE, the signal will be set to signaled state.
++** If gcvFALSE, the signal will be set to nonsignaled state.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_SignalUserSignal(
++ IN gckOS Os,
++ IN gctINT SignalID,
++ IN gctBOOL State
++ )
++{
++ return gckOS_Signal(Os, (gctSIGNAL)(gctUINTPTR_T)SignalID, State);
++}
++
++#if gcdENABLE_VG
++gceSTATUS
++gckOS_CreateSemaphoreVG(
++ IN gckOS Os,
++ OUT gctSEMAPHORE * Semaphore
++ )
++{
++ gceSTATUS status;
++ struct semaphore * newSemaphore;
++
++ gcmkHEADER_ARG("Os=0x%X Semaphore=0x%x", Os, Semaphore);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Semaphore != gcvNULL);
++
++ do
++ {
++ /* Allocate the semaphore structure. */
++ newSemaphore = (struct semaphore *)kmalloc(gcmSIZEOF(struct semaphore), GFP_KERNEL | gcdNOWARN);
++ if (newSemaphore == gcvNULL)
++ {
++ gcmkERR_BREAK(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ /* Initialize the semaphore. */
++ sema_init(newSemaphore, 0);
++
++ /* Set the handle. */
++ * Semaphore = (gctSEMAPHORE) newSemaphore;
++
++ /* Success. */
++ status = gcvSTATUS_OK;
++ }
++ while (gcvFALSE);
++
++ gcmkFOOTER();
++ /* Return the status. */
++ return status;
++}
++
++
++gceSTATUS
++gckOS_IncrementSemaphore(
++ IN gckOS Os,
++ IN gctSEMAPHORE Semaphore
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X Semaphore=0x%x", Os, Semaphore);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Semaphore != gcvNULL);
++
++ /* Increment the semaphore's count. */
++ up((struct semaphore *) Semaphore);
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckOS_DecrementSemaphore(
++ IN gckOS Os,
++ IN gctSEMAPHORE Semaphore
++ )
++{
++ gceSTATUS status;
++ gctINT result;
++
++ gcmkHEADER_ARG("Os=0x%X Semaphore=0x%x", Os, Semaphore);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Semaphore != gcvNULL);
++
++ do
++ {
++ /* Decrement the semaphore's count. If the count is zero, wait
++ until it gets incremented. */
++ result = down_interruptible((struct semaphore *) Semaphore);
++
++ /* Signal received? */
++ if (result != 0)
++ {
++ status = gcvSTATUS_TERMINATE;
++ break;
++ }
++
++ /* Success. */
++ status = gcvSTATUS_OK;
++ }
++ while (gcvFALSE);
++
++ gcmkFOOTER();
++ /* Return the status. */
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_SetSignal
++**
++** Set the specified signal to signaled state.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to the gckOS object.
++**
++** gctHANDLE Process
++** Handle of process owning the signal.
++**
++** gctSIGNAL Signal
++** Pointer to the gctSIGNAL.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_SetSignal(
++ IN gckOS Os,
++ IN gctHANDLE Process,
++ IN gctSIGNAL Signal
++ )
++{
++ gceSTATUS status;
++ gctINT result;
++ struct task_struct * userTask;
++ struct siginfo info;
++
++ userTask = FIND_TASK_BY_PID((pid_t)(gctUINTPTR_T) Process);
++
++ if (userTask != gcvNULL)
++ {
++ info.si_signo = 48;
++ info.si_code = __SI_CODE(__SI_RT, SI_KERNEL);
++ info.si_pid = 0;
++ info.si_uid = 0;
++ info.si_ptr = (gctPOINTER) Signal;
++
++ /* Signals with numbers between 32 and 63 are real-time,
++ send a real-time signal to the user process. */
++ result = send_sig_info(48, &info, userTask);
++
++ /* Error? */
++ if (result < 0)
++ {
++ status = gcvSTATUS_GENERIC_IO;
++
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s(%d): an error has occurred.\n",
++ __FUNCTION__, __LINE__
++ );
++ }
++ else
++ {
++ status = gcvSTATUS_OK;
++ }
++ }
++ else
++ {
++ status = gcvSTATUS_GENERIC_IO;
++
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s(%d): an error has occurred.\n",
++ __FUNCTION__, __LINE__
++ );
++ }
++
++ /* Return status. */
++ return status;
++}
++
++/******************************************************************************\
++******************************** Thread Object *********************************
++\******************************************************************************/
++
++gceSTATUS
++gckOS_StartThread(
++ IN gckOS Os,
++ IN gctTHREADFUNC ThreadFunction,
++ IN gctPOINTER ThreadParameter,
++ OUT gctTHREAD * Thread
++ )
++{
++ gceSTATUS status;
++ struct task_struct * thread;
++
++ gcmkHEADER_ARG("Os=0x%X ", Os);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(ThreadFunction != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Thread != gcvNULL);
++
++ do
++ {
++ /* Create the thread. */
++ thread = kthread_create(
++ ThreadFunction,
++ ThreadParameter,
++ "Vivante Kernel Thread"
++ );
++
++ /* Failed? */
++ if (IS_ERR(thread))
++ {
++ status = gcvSTATUS_GENERIC_IO;
++ break;
++ }
++
++ /* Start the thread. */
++ wake_up_process(thread);
++
++ /* Set the thread handle. */
++ * Thread = (gctTHREAD) thread;
++
++ /* Success. */
++ status = gcvSTATUS_OK;
++ }
++ while (gcvFALSE);
++
++ gcmkFOOTER();
++ /* Return the status. */
++ return status;
++}
++
++gceSTATUS
++gckOS_StopThread(
++ IN gckOS Os,
++ IN gctTHREAD Thread
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X Thread=0x%x", Os, Thread);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Thread != gcvNULL);
++
++ /* Thread should have already been enabled to terminate. */
++ kthread_stop((struct task_struct *) Thread);
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckOS_VerifyThread(
++ IN gckOS Os,
++ IN gctTHREAD Thread
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X Thread=0x%x", Os, Thread);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Thread != gcvNULL);
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++#endif
++
++/******************************************************************************\
++******************************** Software Timer ********************************
++\******************************************************************************/
++
++void
++_TimerFunction(
++ struct work_struct * work
++ )
++{
++ gcsOSTIMER_PTR timer = (gcsOSTIMER_PTR)work;
++
++ gctTIMERFUNCTION function = timer->function;
++
++ function(timer->data);
++}
++
++/*******************************************************************************
++**
++** gckOS_CreateTimer
++**
++** Create a software timer.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to the gckOS object.
++**
++** gctTIMERFUNCTION Function.
++** Pointer to a call back function which will be called when timer is
++** expired.
++**
++** gctPOINTER Data.
++** Private data which will be passed to call back function.
++**
++** OUTPUT:
++**
++** gctPOINTER * Timer
++** Pointer to a variable receiving the created timer.
++*/
++gceSTATUS
++gckOS_CreateTimer(
++ IN gckOS Os,
++ IN gctTIMERFUNCTION Function,
++ IN gctPOINTER Data,
++ OUT gctPOINTER * Timer
++ )
++{
++ gceSTATUS status;
++ gcsOSTIMER_PTR pointer;
++ gcmkHEADER_ARG("Os=0x%X Function=0x%X Data=0x%X", Os, Function, Data);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Timer != gcvNULL);
++
++ gcmkONERROR(gckOS_Allocate(Os, sizeof(gcsOSTIMER), (gctPOINTER)&pointer));
++
++ pointer->function = Function;
++ pointer->data = Data;
++
++ INIT_DELAYED_WORK(&pointer->work, _TimerFunction);
++
++ *Timer = pointer;
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_DestroyTimer
++**
++** Destory a software timer.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to the gckOS object.
++**
++** gctPOINTER Timer
++** Pointer to the timer to be destoryed.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_DestroyTimer(
++ IN gckOS Os,
++ IN gctPOINTER Timer
++ )
++{
++ gcsOSTIMER_PTR timer;
++ gcmkHEADER_ARG("Os=0x%X Timer=0x%X", Os, Timer);
++
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Timer != gcvNULL);
++
++ timer = (gcsOSTIMER_PTR)Timer;
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
++ cancel_delayed_work_sync(&timer->work);
++#else
++ cancel_delayed_work(&timer->work);
++ flush_workqueue(Os->workqueue);
++#endif
++
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Os, Timer));
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_StartTimer
++**
++** Schedule a software timer.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to the gckOS object.
++**
++** gctPOINTER Timer
++** Pointer to the timer to be scheduled.
++**
++** gctUINT32 Delay
++** Delay in milliseconds.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_StartTimer(
++ IN gckOS Os,
++ IN gctPOINTER Timer,
++ IN gctUINT32 Delay
++ )
++{
++ gcsOSTIMER_PTR timer;
++
++ gcmkHEADER_ARG("Os=0x%X Timer=0x%X Delay=%u", Os, Timer, Delay);
++
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Timer != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Delay != 0);
++
++ timer = (gcsOSTIMER_PTR)Timer;
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)
++ mod_delayed_work(Os->workqueue, &timer->work, msecs_to_jiffies(Delay));
++#else
++ if (unlikely(delayed_work_pending(&timer->work)))
++ {
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
++ cancel_delayed_work_sync(&timer->work);
++#else
++ cancel_delayed_work(&timer->work);
++ flush_workqueue(Os->workqueue);
++#endif
++ }
++
++ queue_delayed_work(Os->workqueue, &timer->work, msecs_to_jiffies(Delay));
++#endif
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_StopTimer
++**
++** Cancel a unscheduled timer.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to the gckOS object.
++**
++** gctPOINTER Timer
++** Pointer to the timer to be cancel.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_StopTimer(
++ IN gckOS Os,
++ IN gctPOINTER Timer
++ )
++{
++ gcsOSTIMER_PTR timer;
++ gcmkHEADER_ARG("Os=0x%X Timer=0x%X", Os, Timer);
++
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Timer != gcvNULL);
++
++ timer = (gcsOSTIMER_PTR)Timer;
++
++ cancel_delayed_work(&timer->work);
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckOS_GetProcessNameByPid(
++ IN gctINT Pid,
++ IN gctSIZE_T Length,
++ OUT gctUINT8_PTR String
++ )
++{
++ struct task_struct *task;
++
++ /* Get the task_struct of the task with pid. */
++ rcu_read_lock();
++
++ task = FIND_TASK_BY_PID(Pid);
++
++ if (task == gcvNULL)
++ {
++ rcu_read_unlock();
++ return gcvSTATUS_NOT_FOUND;
++ }
++
++ /* Get name of process. */
++ strncpy(String, task->comm, Length);
++
++ rcu_read_unlock();
++
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckOS_DumpCallStack(
++ IN gckOS Os
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X", Os);
++
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++
++ dump_stack();
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_DetectProcessByName
++**
++** task->comm maybe part of process name, so this function
++** can only be used for debugging.
++**
++** INPUT:
++**
++** gctCONST_POINTER Name
++** Pointer to a string to hold name to be check. If the length
++** of name is longer than TASK_COMM_LEN (16), use part of name
++** to detect.
++**
++** OUTPUT:
++**
++** gcvSTATUS_TRUE if name of current process matches Name.
++**
++*/
++gceSTATUS
++gckOS_DetectProcessByName(
++ IN gctCONST_POINTER Name
++ )
++{
++ char comm[sizeof(current->comm)];
++
++ memset(comm, 0, sizeof(comm));
++
++ gcmkVERIFY_OK(
++ gckOS_GetProcessNameByPid(_GetProcessID(), sizeof(current->comm), comm));
++
++ return strstr(comm, Name) ? gcvSTATUS_TRUE
++ : gcvSTATUS_FALSE;
++}
++
++#if gcdANDROID_NATIVE_FENCE_SYNC
++
++gceSTATUS
++gckOS_CreateSyncPoint(
++ IN gckOS Os,
++ OUT gctSYNC_POINT * SyncPoint
++ )
++{
++ gceSTATUS status;
++ gcsSYNC_POINT_PTR syncPoint;
++
++ gcmkHEADER_ARG("Os=0x%X", Os);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++
++ /* Create an sync point structure. */
++ syncPoint = (gcsSYNC_POINT_PTR) kmalloc(
++ sizeof(gcsSYNC_POINT), GFP_KERNEL | gcdNOWARN);
++
++ if (syncPoint == gcvNULL)
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ /* Initialize the sync point. */
++ atomic_set(&syncPoint->ref, 1);
++ atomic_set(&syncPoint->state, 0);
++
++ gcmkONERROR(_AllocateIntegerId(&Os->syncPointDB, syncPoint, &syncPoint->id));
++
++ *SyncPoint = (gctSYNC_POINT)(gctUINTPTR_T)syncPoint->id;
++
++ gcmkFOOTER_ARG("*SyncPonint=%d", syncPoint->id);
++ return gcvSTATUS_OK;
++
++OnError:
++ if (syncPoint != gcvNULL)
++ {
++ kfree(syncPoint);
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckOS_ReferenceSyncPoint(
++ IN gckOS Os,
++ IN gctSYNC_POINT SyncPoint
++ )
++{
++ gceSTATUS status;
++ gcsSYNC_POINT_PTR syncPoint;
++
++ gcmkHEADER_ARG("Os=0x%X", Os);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(SyncPoint != gcvNULL);
++
++ gcmkONERROR(
++ _QueryIntegerId(&Os->syncPointDB,
++ (gctUINT32)(gctUINTPTR_T)SyncPoint,
++ (gctPOINTER)&syncPoint));
++
++ /* Initialize the sync point. */
++ atomic_inc(&syncPoint->ref);
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckOS_DestroySyncPoint(
++ IN gckOS Os,
++ IN gctSYNC_POINT SyncPoint
++ )
++{
++ gceSTATUS status;
++ gcsSYNC_POINT_PTR syncPoint;
++ gctBOOL acquired = gcvFALSE;
++
++ gcmkHEADER_ARG("Os=0x%X SyncPoint=%d", Os, (gctUINT32)(gctUINTPTR_T)SyncPoint);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(SyncPoint != gcvNULL);
++
++ gcmkONERROR(gckOS_AcquireMutex(Os, Os->syncPointMutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++ gcmkONERROR(
++ _QueryIntegerId(&Os->syncPointDB,
++ (gctUINT32)(gctUINTPTR_T)SyncPoint,
++ (gctPOINTER)&syncPoint));
++
++ gcmkASSERT(syncPoint->id == (gctUINT32)(gctUINTPTR_T)SyncPoint);
++
++ if (atomic_dec_and_test(&syncPoint->ref))
++ {
++ gcmkVERIFY_OK(_DestroyIntegerId(&Os->syncPointDB, syncPoint->id));
++
++ /* Free the sgianl. */
++ syncPoint->timeline = gcvNULL;
++ kfree(syncPoint);
++ }
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Os, Os->syncPointMutex));
++ acquired = gcvFALSE;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ /* Release the mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Os, Os->syncPointMutex));
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckOS_SignalSyncPoint(
++ IN gckOS Os,
++ IN gctSYNC_POINT SyncPoint
++ )
++{
++ gceSTATUS status;
++ gcsSYNC_POINT_PTR syncPoint;
++ struct sync_timeline * timeline;
++ gctBOOL acquired = gcvFALSE;
++
++ gcmkHEADER_ARG("Os=0x%X SyncPoint=%d", Os, (gctUINT32)(gctUINTPTR_T)SyncPoint);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(SyncPoint != gcvNULL);
++
++ gcmkONERROR(gckOS_AcquireMutex(Os, Os->syncPointMutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++ gcmkONERROR(
++ _QueryIntegerId(&Os->syncPointDB,
++ (gctUINT32)(gctUINTPTR_T)SyncPoint,
++ (gctPOINTER)&syncPoint));
++
++ gcmkASSERT(syncPoint->id == (gctUINT32)(gctUINTPTR_T)SyncPoint);
++
++ /* Set signaled state. */
++ atomic_set(&syncPoint->state, 1);
++
++ /* Get parent timeline. */
++ timeline = syncPoint->timeline;
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Os, Os->syncPointMutex));
++ acquired = gcvFALSE;
++
++ /* Signal timeline. */
++ if (timeline)
++ {
++ sync_timeline_signal(timeline);
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ /* Release the mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Os, Os->syncPointMutex));
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckOS_QuerySyncPoint(
++ IN gckOS Os,
++ IN gctSYNC_POINT SyncPoint,
++ OUT gctBOOL_PTR State
++ )
++{
++ gceSTATUS status;
++ gcsSYNC_POINT_PTR syncPoint;
++
++ gcmkHEADER_ARG("Os=0x%X SyncPoint=%d", Os, (gctUINT32)(gctUINTPTR_T)SyncPoint);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(SyncPoint != gcvNULL);
++
++ gcmkONERROR(
++ _QueryIntegerId(&Os->syncPointDB,
++ (gctUINT32)(gctUINTPTR_T)SyncPoint,
++ (gctPOINTER)&syncPoint));
++
++ gcmkASSERT(syncPoint->id == (gctUINT32)(gctUINTPTR_T)SyncPoint);
++
++ /* Get state. */
++ *State = atomic_read(&syncPoint->state);
++
++ /* Success. */
++ gcmkFOOTER_ARG("*State=%d", *State);
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckOS_CreateSyncTimeline(
++ IN gckOS Os,
++ OUT gctHANDLE * Timeline
++ )
++{
++ struct viv_sync_timeline * timeline;
++
++ /* Create viv sync timeline. */
++ timeline = viv_sync_timeline_create("viv timeline", Os);
++
++ if (timeline == gcvNULL)
++ {
++ /* Out of memory. */
++ return gcvSTATUS_OUT_OF_MEMORY;
++ }
++
++ *Timeline = (gctHANDLE) timeline;
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckOS_DestroySyncTimeline(
++ IN gckOS Os,
++ IN gctHANDLE Timeline
++ )
++{
++ struct viv_sync_timeline * timeline;
++ gcmkASSERT(Timeline != gcvNULL);
++
++ /* Destroy timeline. */
++ timeline = (struct viv_sync_timeline *) Timeline;
++ sync_timeline_destroy(&timeline->obj);
++
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckOS_CreateNativeFence(
++ IN gckOS Os,
++ IN gctHANDLE Timeline,
++ IN gctSYNC_POINT SyncPoint,
++ OUT gctINT * FenceFD
++ )
++{
++ int fd = -1;
++ struct viv_sync_timeline *timeline;
++ struct sync_pt * pt = gcvNULL;
++ struct sync_fence * fence;
++ char name[32];
++ gcsSYNC_POINT_PTR syncPoint;
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Os=0x%X Timeline=0x%X SyncPoint=%d",
++ Os, Timeline, (gctUINT)(gctUINTPTR_T)SyncPoint);
++
++ gcmkONERROR(
++ _QueryIntegerId(&Os->syncPointDB,
++ (gctUINT32)(gctUINTPTR_T)SyncPoint,
++ (gctPOINTER)&syncPoint));
++
++ /* Cast timeline. */
++ timeline = (struct viv_sync_timeline *) Timeline;
++
++ fd = get_unused_fd();
++
++ if (fd < 0)
++ {
++ /* Out of resources. */
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++
++ /* Create viv_sync_pt. */
++ pt = viv_sync_pt_create(timeline, SyncPoint);
++
++ if (pt == gcvNULL)
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ /* Reference sync_timeline. */
++ syncPoint->timeline = &timeline->obj;
++
++ /* Build fence name. */
++ snprintf(name, 32, "viv sync_fence-%u", (gctUINT)(gctUINTPTR_T)SyncPoint);
++
++ /* Create sync_fence. */
++ fence = sync_fence_create(name, pt);
++
++ if (fence == NULL)
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ /* Install fence to fd. */
++ sync_fence_install(fence, fd);
++
++ *FenceFD = fd;
++ gcmkFOOTER_ARG("*FenceFD=%d", fd);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Error roll back. */
++ if (pt)
++ {
++ sync_pt_free(pt);
++ }
++
++ if (fd > 0)
++ {
++ put_unused_fd(fd);
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++#endif
++
++#if gcdSECURITY
++gceSTATUS
++gckOS_AllocatePageArray(
++ IN gckOS Os,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T PageCount,
++ OUT gctPOINTER * PageArrayLogical,
++ OUT gctPHYS_ADDR * PageArrayPhysical
++ )
++{
++ gceSTATUS status = gcvSTATUS_OK;
++ PLINUX_MDL mdl;
++ gctUINT32* table;
++ gctUINT32 offset;
++ gctSIZE_T bytes;
++ gckALLOCATOR allocator;
++
++ gcmkHEADER_ARG("Os=0x%X Physical=0x%X PageCount=%u",
++ Os, Physical, PageCount);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Physical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(PageCount > 0);
++
++ bytes = PageCount * gcmSIZEOF(gctUINT32);
++ gcmkONERROR(gckOS_AllocateNonPagedMemory(
++ Os,
++ gcvFALSE,
++ &bytes,
++ PageArrayPhysical,
++ PageArrayLogical
++ ));
++
++ table = *PageArrayLogical;
++
++ /* Convert pointer to MDL. */
++ mdl = (PLINUX_MDL)Physical;
++
++ allocator = mdl->allocator;
++
++ /* Get all the physical addresses and store them in the page table. */
++
++ offset = 0;
++ PageCount = PageCount / (PAGE_SIZE / 4096);
++
++ /* Try to get the user pages so DMA can happen. */
++ while (PageCount-- > 0)
++ {
++ unsigned long phys = ~0;
++
++ if (mdl->pagedMem && !mdl->contiguous)
++ {
++ if (allocator)
++ {
++ gctUINT32 phys_addr;
++ allocator->ops->Physical(allocator, mdl, offset, &phys_addr);
++ phys = (unsigned long)phys_addr;
++ }
++ }
++ else
++ {
++ if (!mdl->pagedMem)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_OS,
++ "%s(%d): we should not get this call for Non Paged Memory!",
++ __FUNCTION__, __LINE__
++ );
++ }
++
++ phys = page_to_phys(nth_page(mdl->u.contiguousPages, offset));
++ }
++
++ table[offset] = phys;
++
++ offset += 1;
++ }
++
++OnError:
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++#endif
++
++gceSTATUS
++gckOS_CPUPhysicalToGPUPhysical(
++ IN gckOS Os,
++ IN gctUINT32 CPUPhysical,
++ IN gctUINT32_PTR GPUPhysical
++ )
++{
++ gcsPLATFORM * platform;
++ gcmkHEADER_ARG("CPUPhysical=0x%X", CPUPhysical);
++
++ platform = Os->device->platform;
++
++ if (platform && platform->ops->getGPUPhysical)
++ {
++ gcmkVERIFY_OK(
++ platform->ops->getGPUPhysical(platform, CPUPhysical, GPUPhysical));
++ }
++ else
++ {
++ *GPUPhysical = CPUPhysical;
++ }
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckOS_GPUPhysicalToCPUPhysical(
++ IN gckOS Os,
++ IN gctUINT32 GPUPhysical,
++ IN gctUINT32_PTR CPUPhysical
++ )
++{
++ gcmkHEADER_ARG("GPUPhysical=0x%X", GPUPhysical);
++
++ *CPUPhysical = GPUPhysical;
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckOS_PhysicalToPhysicalAddress(
++ IN gckOS Os,
++ IN gctPOINTER Physical,
++ OUT gctUINT32 * PhysicalAddress
++ )
++{
++ PLINUX_MDL mdl = (PLINUX_MDL)Physical;
++ gckALLOCATOR allocator = mdl->allocator;
++
++ if (allocator)
++ {
++ return allocator->ops->Physical(allocator, mdl, 0, PhysicalAddress);
++ }
++
++ return gcvSTATUS_NOT_SUPPORTED;
++}
++
++gceSTATUS
++gckOS_QueryOption(
++ IN gckOS Os,
++ IN gctCONST_STRING Option,
++ OUT gctUINT32 * Value
++ )
++{
++ gckGALDEVICE device = Os->device;
++
++ if (!strcmp(Option, "physBase"))
++ {
++ *Value = device->physBase;
++ return gcvSTATUS_OK;
++ }
++ else if (!strcmp(Option, "physSize"))
++ {
++ *Value = device->physSize;
++ return gcvSTATUS_OK;
++ }
++ else if (!strcmp(Option, "mmu"))
++ {
++#if gcdSECURITY
++ *Value = 0;
++#else
++ *Value = device->mmu;
++#endif
++ return gcvSTATUS_OK;
++ }
++
++ return gcvSTATUS_NOT_SUPPORTED;
++}
++
++static int
++fd_release(
++ struct inode *inode,
++ struct file *file
++ )
++{
++ gcsFDPRIVATE_PTR private = (gcsFDPRIVATE_PTR)file->private_data;
++
++ if (private && private->release)
++ {
++ return private->release(private);
++ }
++
++ return 0;
++}
++
++static const struct file_operations fd_fops = {
++ .release = fd_release,
++};
++
++gceSTATUS
++gckOS_GetFd(
++ IN gctSTRING Name,
++ IN gcsFDPRIVATE_PTR Private,
++ OUT gctINT *Fd
++ )
++{
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
++ *Fd = anon_inode_getfd(Name, &fd_fops, Private, O_RDWR);
++
++ if (*Fd < 0)
++ {
++ return gcvSTATUS_OUT_OF_RESOURCES;
++ }
++
++ return gcvSTATUS_OK;
++#else
++ return gcvSTATUS_NOT_SUPPORTED;
++#endif
++}
++
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_os.h linux-openelec/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_os.h
+--- linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_os.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_os.h 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,90 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_kernel_os_h_
++#define __gc_hal_kernel_os_h_
++
++typedef struct _LINUX_MDL_MAP
++{
++ gctINT pid;
++ gctPOINTER vmaAddr;
++ gctUINT32 count;
++ struct vm_area_struct * vma;
++ struct _LINUX_MDL_MAP * next;
++}
++LINUX_MDL_MAP;
++
++typedef struct _LINUX_MDL_MAP * PLINUX_MDL_MAP;
++
++typedef struct _LINUX_MDL
++{
++ char * addr;
++
++ union _pages
++ {
++ /* Pointer to a array of pages. */
++ struct page * contiguousPages;
++ /* Pointer to a array of pointers to page. */
++ struct page ** nonContiguousPages;
++ }
++ u;
++
++#ifdef NO_DMA_COHERENT
++ gctPOINTER kaddr;
++#endif /* NO_DMA_COHERENT */
++
++ gctINT numPages;
++ gctINT pagedMem;
++ gctBOOL contiguous;
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
++ gctBOOL exact;
++#endif
++ dma_addr_t dmaHandle;
++ PLINUX_MDL_MAP maps;
++ struct _LINUX_MDL * prev;
++ struct _LINUX_MDL * next;
++
++ /* Pointer to allocator which allocates memory for this mdl. */
++ void * allocator;
++
++ /* Private data used by allocator. */
++ void * priv;
++
++ uint gid;
++}
++LINUX_MDL, *PLINUX_MDL;
++
++extern PLINUX_MDL_MAP
++FindMdlMap(
++ IN PLINUX_MDL Mdl,
++ IN gctINT PID
++ );
++
++typedef struct _DRIVER_ARGS
++{
++ gctUINT64 InputBuffer;
++ gctUINT64 InputBufferSize;
++ gctUINT64 OutputBuffer;
++ gctUINT64 OutputBufferSize;
++}
++DRIVER_ARGS;
++
++#endif /* __gc_hal_kernel_os_h_ */
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_platform.h linux-openelec/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_platform.h
+--- linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_platform.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_platform.h 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,279 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef _gc_hal_kernel_platform_h_
++#define _gc_hal_kernel_platform_h_
++#include <linux/mm.h>
++
++typedef struct _gcsMODULE_PARAMETERS
++{
++#if gcdMULTI_GPU || gcdMULTI_GPU_AFFINITY
++ gctINT irqLine3D0;
++ gctUINT registerMemBase3D0;
++ gctUINT registerMemSize3D0;
++ gctINT irqLine3D1;
++ gctUINT registerMemBase3D1;
++ gctUINT registerMemSize3D1;
++#else
++ gctINT irqLine;
++ gctUINT registerMemBase;
++ gctUINT registerMemSize;
++#endif
++ gctINT irqLine2D;
++ gctUINT registerMemBase2D;
++ gctUINT registerMemSize2D;
++ gctINT irqLineVG;
++ gctUINT registerMemBaseVG;
++ gctUINT registerMemSizeVG;
++ gctUINT contiguousSize;
++ gctUINT contiguousBase;
++ gctUINT contiguousRequested;
++ gctUINT bankSize;
++ gctINT fastClear;
++ gctINT compression;
++ gctINT powerManagement;
++ gctINT gpuProfiler;
++ gctINT signal;
++ gctUINT baseAddress;
++ gctUINT physSize;
++ gctUINT logFileSize;
++ gctUINT recovery;
++ gctUINT stuckDump;
++ gctUINT showArgs;
++ gctUINT gpu3DMinClock;
++}
++gcsMODULE_PARAMETERS;
++
++typedef struct _gcsPLATFORM * gckPLATFORM;
++
++typedef struct _gcsPLATFORM_OPERATIONS
++{
++ /*******************************************************************************
++ **
++ ** needAddDevice
++ **
++ ** Determine whether platform_device is created by initialization code.
++ ** If platform_device is created by BSP, return gcvFLASE here.
++ */
++ gctBOOL
++ (*needAddDevice)(
++ IN gckPLATFORM Platform
++ );
++
++ /*******************************************************************************
++ **
++ ** adjustParam
++ **
++ ** Override content of arguments, if a argument is not changed here, it will
++ ** keep as default value or value set by insmod command line.
++ */
++ gceSTATUS
++ (*adjustParam)(
++ IN gckPLATFORM Platform,
++ OUT gcsMODULE_PARAMETERS *Args
++ );
++
++ /*******************************************************************************
++ **
++ ** adjustDriver
++ **
++ ** Override content of platform_driver which will be registered.
++ */
++ gceSTATUS
++ (*adjustDriver)(
++ IN gckPLATFORM Platform
++ );
++
++ /*******************************************************************************
++ **
++ ** getPower
++ **
++ ** Prepare power and clock operation.
++ */
++ gceSTATUS
++ (*getPower)(
++ IN gckPLATFORM Platform
++ );
++
++ /*******************************************************************************
++ **
++ ** putPower
++ **
++ ** Finish power and clock operation.
++ */
++ gceSTATUS
++ (*putPower)(
++ IN gckPLATFORM Platform
++ );
++
++ /*******************************************************************************
++ **
++ ** allocPriv
++ **
++ ** Construct platform private data.
++ */
++ gceSTATUS
++ (*allocPriv)(
++ IN gckPLATFORM Platform
++ );
++
++ /*******************************************************************************
++ **
++ ** freePriv
++ **
++ ** free platform private data.
++ */
++ gceSTATUS
++ (*freePriv)(
++ IN gckPLATFORM Platform
++ );
++
++ /*******************************************************************************
++ **
++ ** setPower
++ **
++ ** Set power state of specified GPU.
++ **
++ ** INPUT:
++ **
++ ** gceCORE GPU
++ ** GPU neeed to config.
++ **
++ ** gceBOOL Enable
++ ** Enable or disable power.
++ */
++ gceSTATUS
++ (*setPower)(
++ IN gckPLATFORM Platform,
++ IN gceCORE GPU,
++ IN gctBOOL Enable
++ );
++
++ /*******************************************************************************
++ **
++ ** setClock
++ **
++ ** Set clock state of specified GPU.
++ **
++ ** INPUT:
++ **
++ ** gceCORE GPU
++ ** GPU neeed to config.
++ **
++ ** gceBOOL Enable
++ ** Enable or disable clock.
++ */
++ gceSTATUS
++ (*setClock)(
++ IN gckPLATFORM Platform,
++ IN gceCORE GPU,
++ IN gctBOOL Enable
++ );
++
++ /*******************************************************************************
++ **
++ ** reset
++ **
++ ** Reset GPU outside.
++ **
++ ** INPUT:
++ **
++ ** gceCORE GPU
++ ** GPU neeed to reset.
++ */
++ gceSTATUS
++ (*reset)(
++ IN gckPLATFORM Platform,
++ IN gceCORE GPU
++ );
++
++ /*******************************************************************************
++ **
++ ** getGPUPhysical
++ **
++ ** Convert CPU physical address to GPU physical address if they are
++ ** different.
++ */
++ gceSTATUS
++ (*getGPUPhysical)(
++ IN gckPLATFORM Platform,
++ IN gctUINT32 CPUPhysical,
++ OUT gctUINT32_PTR GPUPhysical
++ );
++
++ /*******************************************************************************
++ **
++ ** adjustProt
++ **
++ ** Override Prot flag when mapping paged memory to userspace.
++ */
++ gceSTATUS
++ (*adjustProt)(
++ IN struct vm_area_struct * vma
++ );
++
++ /*******************************************************************************
++ **
++ ** shrinkMemory
++ **
++ ** Do something to collect memory, eg, act as oom killer.
++ */
++ gceSTATUS
++ (*shrinkMemory)(
++ IN gckPLATFORM Platform
++ );
++
++ /*******************************************************************************
++ **
++ ** cache
++ **
++ ** Cache operation.
++ */
++ gceSTATUS
++ (*cache)(
++ IN gckPLATFORM Platform,
++ IN gctUINT32 ProcessID,
++ IN gctPHYS_ADDR Handle,
++ IN gctUINT32 Physical,
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Bytes,
++ IN gceCACHEOPERATION Operation
++ );
++}
++gcsPLATFORM_OPERATIONS;
++
++typedef struct _gcsPLATFORM
++{
++ struct platform_device* device;
++ struct platform_driver* driver;
++
++ gcsPLATFORM_OPERATIONS* ops;
++
++ void* priv;
++}
++gcsPLATFORM;
++
++void
++gckPLATFORM_QueryOperations(
++ IN gcsPLATFORM_OPERATIONS ** Operations
++ );
++
++#endif
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_probe.c linux-openelec/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_probe.c
+--- linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_probe.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_probe.c 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,1347 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include <linux/device.h>
++#include <linux/slab.h>
++
++#include "gc_hal_kernel_linux.h"
++#include "gc_hal_driver.h"
++
++#if USE_PLATFORM_DRIVER
++# include <linux/platform_device.h>
++#endif
++
++#ifdef CONFIG_PXA_DVFM
++# include <mach/dvfm.h>
++# include <mach/pxa3xx_dvfm.h>
++#endif
++
++
++/* Zone used for header/footer. */
++#define _GC_OBJ_ZONE gcvZONE_DRIVER
++
++MODULE_DESCRIPTION("Vivante Graphics Driver");
++MODULE_LICENSE("GPL");
++
++static struct class* gpuClass;
++
++static gcsPLATFORM platform;
++
++static gckGALDEVICE galDevice;
++
++static uint major = 199;
++module_param(major, uint, 0644);
++
++#if gcdMULTI_GPU || gcdMULTI_GPU_AFFINITY
++static int irqLine3D0 = -1;
++module_param(irqLine3D0, int, 0644);
++
++static ulong registerMemBase3D0 = 0;
++module_param(registerMemBase3D0, ulong, 0644);
++
++static ulong registerMemSize3D0 = 2 << 10;
++module_param(registerMemSize3D0, ulong, 0644);
++
++static int irqLine3D1 = -1;
++module_param(irqLine3D1, int, 0644);
++
++static ulong registerMemBase3D1 = 0;
++module_param(registerMemBase3D1, ulong, 0644);
++
++static ulong registerMemSize3D1 = 2 << 10;
++module_param(registerMemSize3D1, ulong, 0644);
++#else
++static int irqLine = -1;
++module_param(irqLine, int, 0644);
++
++static ulong registerMemBase = 0x80000000;
++module_param(registerMemBase, ulong, 0644);
++
++static ulong registerMemSize = 2 << 10;
++module_param(registerMemSize, ulong, 0644);
++#endif
++
++static int irqLine2D = -1;
++module_param(irqLine2D, int, 0644);
++
++static ulong registerMemBase2D = 0x00000000;
++module_param(registerMemBase2D, ulong, 0644);
++
++static ulong registerMemSize2D = 2 << 10;
++module_param(registerMemSize2D, ulong, 0644);
++
++static int irqLineVG = -1;
++module_param(irqLineVG, int, 0644);
++
++static ulong registerMemBaseVG = 0x00000000;
++module_param(registerMemBaseVG, ulong, 0644);
++
++static ulong registerMemSizeVG = 2 << 10;
++module_param(registerMemSizeVG, ulong, 0644);
++
++#ifndef gcdDEFAULT_CONTIGUOUS_SIZE
++#define gcdDEFAULT_CONTIGUOUS_SIZE (4 << 20)
++#endif
++static ulong contiguousSize = gcdDEFAULT_CONTIGUOUS_SIZE;
++module_param(contiguousSize, ulong, 0644);
++
++static ulong contiguousBase = 0;
++module_param(contiguousBase, ulong, 0644);
++
++static ulong bankSize = 0;
++module_param(bankSize, ulong, 0644);
++
++static int fastClear = -1;
++module_param(fastClear, int, 0644);
++
++static int compression = -1;
++module_param(compression, int, 0644);
++
++static int powerManagement = -1;
++module_param(powerManagement, int, 0644);
++
++static int gpuProfiler = 0;
++module_param(gpuProfiler, int, 0644);
++
++static int signal = 48;
++module_param(signal, int, 0644);
++
++static ulong baseAddress = 0;
++module_param(baseAddress, ulong, 0644);
++
++static ulong physSize = 0;
++module_param(physSize, ulong, 0644);
++
++static uint logFileSize = 0;
++module_param(logFileSize,uint, 0644);
++
++static uint recovery = 1;
++module_param(recovery, uint, 0644);
++MODULE_PARM_DESC(recovery, "Recover GPU from stuck (1: Enable, 0: Disable)");
++
++/* Middle needs about 40KB buffer, Maximal may need more than 200KB buffer. */
++static uint stuckDump = 1;
++module_param(stuckDump, uint, 0644);
++MODULE_PARM_DESC(stuckDump, "Level of stuck dump content (1: Minimal, 2: Middle, 3: Maximal)");
++
++static int showArgs = 0;
++module_param(showArgs, int, 0644);
++
++static int mmu = 1;
++module_param(mmu, int, 0644);
++
++static int gpu3DMinClock = 1;
++
++static int contiguousRequested = 0;
++
++static int drv_open(
++ struct inode* inode,
++ struct file* filp
++ );
++
++static int drv_release(
++ struct inode* inode,
++ struct file* filp
++ );
++
++static long drv_ioctl(
++ struct file* filp,
++ unsigned int ioctlCode,
++ unsigned long arg
++ );
++
++static int drv_mmap(
++ struct file* filp,
++ struct vm_area_struct* vma
++ );
++
++static struct file_operations driver_fops =
++{
++ .owner = THIS_MODULE,
++ .open = drv_open,
++ .release = drv_release,
++ .unlocked_ioctl = drv_ioctl,
++#ifdef HAVE_COMPAT_IOCTL
++ .compat_ioctl = drv_ioctl,
++#endif
++ .mmap = drv_mmap,
++};
++
++void
++_UpdateModuleParam(
++ gcsMODULE_PARAMETERS *Param
++ )
++{
++#if gcdMULTI_GPU || gcdMULTI_GPU_AFFINITY
++#else
++ irqLine = Param->irqLine ;
++ registerMemBase = Param->registerMemBase;
++ registerMemSize = Param->registerMemSize;
++#endif
++ irqLine2D = Param->irqLine2D ;
++ registerMemBase2D = Param->registerMemBase2D;
++ registerMemSize2D = Param->registerMemSize2D;
++ irqLineVG = Param->irqLineVG;
++ registerMemBaseVG = Param->registerMemBaseVG;
++ registerMemSizeVG = Param->registerMemSizeVG;
++ contiguousSize = Param->contiguousSize;
++ contiguousBase = Param->contiguousBase;
++ bankSize = Param->bankSize;
++ fastClear = Param->fastClear;
++ compression = Param->compression;
++ powerManagement = Param->powerManagement;
++ gpuProfiler = Param->gpuProfiler;
++ signal = Param->signal;
++ baseAddress = Param->baseAddress;
++ physSize = Param->physSize;
++ logFileSize = Param->logFileSize;
++ recovery = Param->recovery;
++ stuckDump = Param->stuckDump;
++ showArgs = Param->showArgs;
++ contiguousRequested = Param->contiguousRequested;
++ gpu3DMinClock = Param->gpu3DMinClock;
++}
++
++void
++gckOS_DumpParam(
++ void
++ )
++{
++ printk("Galcore options:\n");
++#if gcdMULTI_GPU || gcdMULTI_GPU_AFFINITY
++ printk(" irqLine3D0 = %d\n", irqLine3D0);
++ printk(" registerMemBase3D0 = 0x%08lX\n", registerMemBase3D0);
++ printk(" registerMemSize3D0 = 0x%08lX\n", registerMemSize3D0);
++
++ if (irqLine3D1 != -1)
++ {
++ printk(" irqLine3D1 = %d\n", irqLine3D1);
++ printk(" registerMemBase3D1 = 0x%08lX\n", registerMemBase3D1);
++ printk(" registerMemSize3D1 = 0x%08lX\n", registerMemSize3D1);
++ }
++#else
++ printk(" irqLine = %d\n", irqLine);
++ printk(" registerMemBase = 0x%08lX\n", registerMemBase);
++ printk(" registerMemSize = 0x%08lX\n", registerMemSize);
++#endif
++
++ if (irqLine2D != -1)
++ {
++ printk(" irqLine2D = %d\n", irqLine2D);
++ printk(" registerMemBase2D = 0x%08lX\n", registerMemBase2D);
++ printk(" registerMemSize2D = 0x%08lX\n", registerMemSize2D);
++ }
++
++ if (irqLineVG != -1)
++ {
++ printk(" irqLineVG = %d\n", irqLineVG);
++ printk(" registerMemBaseVG = 0x%08lX\n", registerMemBaseVG);
++ printk(" registerMemSizeVG = 0x%08lX\n", registerMemSizeVG);
++ }
++
++ printk(" contiguousSize = %ld\n", contiguousSize);
++ printk(" contiguousBase = 0x%08lX\n", contiguousBase);
++ printk(" bankSize = 0x%08lX\n", bankSize);
++ printk(" fastClear = %d\n", fastClear);
++ printk(" compression = %d\n", compression);
++ printk(" signal = %d\n", signal);
++ printk(" powerManagement = %d\n", powerManagement);
++ printk(" baseAddress = 0x%08lX\n", baseAddress);
++ printk(" physSize = 0x%08lX\n", physSize);
++ printk(" logFileSize = %d KB \n", logFileSize);
++ printk(" recovery = %d\n", recovery);
++ printk(" stuckDump = %d\n", stuckDump);
++ printk(" gpuProfiler = %d\n", gpuProfiler);
++}
++
++int drv_open(
++ struct inode* inode,
++ struct file* filp
++ )
++{
++ gceSTATUS status;
++ gctBOOL attached = gcvFALSE;
++ gcsHAL_PRIVATE_DATA_PTR data = gcvNULL;
++ gctINT i;
++
++ gcmkHEADER_ARG("inode=0x%08X filp=0x%08X", inode, filp);
++
++ if (filp == gcvNULL)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): filp is NULL\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ data = kmalloc(sizeof(gcsHAL_PRIVATE_DATA), GFP_KERNEL | __GFP_NOWARN);
++
++ if (data == gcvNULL)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): private_data is NULL\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ data->device = galDevice;
++ data->mappedMemory = gcvNULL;
++ data->contiguousLogical = gcvNULL;
++ gcmkONERROR(gckOS_GetProcessID(&data->pidOpen));
++
++ /* Attached the process. */
++ for (i = 0; i < gcdMAX_GPU_COUNT; i++)
++ {
++ if (galDevice->kernels[i] != gcvNULL)
++ {
++ gcmkONERROR(gckKERNEL_AttachProcess(galDevice->kernels[i], gcvTRUE));
++ }
++ }
++ attached = gcvTRUE;
++
++ if (!galDevice->contiguousMapped)
++ {
++ if (galDevice->contiguousPhysical != gcvNULL)
++ {
++ gcmkONERROR(gckOS_MapMemory(
++ galDevice->os,
++ galDevice->contiguousPhysical,
++ galDevice->contiguousSize,
++ &data->contiguousLogical
++ ));
++ }
++ }
++
++ filp->private_data = data;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return 0;
++
++OnError:
++ if (data != gcvNULL)
++ {
++ if (data->contiguousLogical != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_UnmapMemory(
++ galDevice->os,
++ galDevice->contiguousPhysical,
++ galDevice->contiguousSize,
++ data->contiguousLogical
++ ));
++ }
++
++ kfree(data);
++ }
++
++ if (attached)
++ {
++ for (i = 0; i < gcdMAX_GPU_COUNT; i++)
++ {
++ if (galDevice->kernels[i] != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckKERNEL_AttachProcess(galDevice->kernels[i], gcvFALSE));
++ }
++ }
++ }
++
++ gcmkFOOTER();
++ return -ENOTTY;
++}
++
++int drv_release(
++ struct inode* inode,
++ struct file* filp
++ )
++{
++ gceSTATUS status;
++ gcsHAL_PRIVATE_DATA_PTR data;
++ gckGALDEVICE device;
++ gctINT i;
++
++ gcmkHEADER_ARG("inode=0x%08X filp=0x%08X", inode, filp);
++
++ if (filp == gcvNULL)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): filp is NULL\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ data = filp->private_data;
++
++ if (data == gcvNULL)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): private_data is NULL\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ device = data->device;
++
++ if (device == gcvNULL)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): device is NULL\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ if (!device->contiguousMapped)
++ {
++ if (data->contiguousLogical != gcvNULL)
++ {
++ gcmkONERROR(gckOS_UnmapMemoryEx(
++ galDevice->os,
++ galDevice->contiguousPhysical,
++ galDevice->contiguousSize,
++ data->contiguousLogical,
++ data->pidOpen
++ ));
++
++ data->contiguousLogical = gcvNULL;
++ }
++ }
++
++ /* A process gets detached. */
++ for (i = 0; i < gcdMAX_GPU_COUNT; i++)
++ {
++ if (galDevice->kernels[i] != gcvNULL)
++ {
++ gcmkONERROR(gckKERNEL_AttachProcessEx(galDevice->kernels[i], gcvFALSE, data->pidOpen));
++ }
++ }
++
++ kfree(data);
++ filp->private_data = NULL;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return 0;
++
++OnError:
++ gcmkFOOTER();
++ return -ENOTTY;
++}
++
++long drv_ioctl(
++ struct file* filp,
++ unsigned int ioctlCode,
++ unsigned long arg
++ )
++{
++ gceSTATUS status;
++ gcsHAL_INTERFACE iface;
++ gctUINT32 copyLen;
++ DRIVER_ARGS drvArgs;
++ gckGALDEVICE device;
++ gcsHAL_PRIVATE_DATA_PTR data;
++ gctINT32 i, count;
++ gckVIDMEM_NODE nodeObject;
++
++ gcmkHEADER_ARG(
++ "filp=0x%08X ioctlCode=0x%08X arg=0x%08X",
++ filp, ioctlCode, arg
++ );
++
++ if (filp == gcvNULL)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): filp is NULL\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ data = filp->private_data;
++
++ if (data == gcvNULL)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): private_data is NULL\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ device = data->device;
++
++ if (device == gcvNULL)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): device is NULL\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ if ((ioctlCode != IOCTL_GCHAL_INTERFACE)
++ && (ioctlCode != IOCTL_GCHAL_KERNEL_INTERFACE)
++ )
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): unknown command %d\n",
++ __FUNCTION__, __LINE__,
++ ioctlCode
++ );
++
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ /* Get the drvArgs. */
++ copyLen = copy_from_user(
++ &drvArgs, (void *) arg, sizeof(DRIVER_ARGS)
++ );
++
++ if (copyLen != 0)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): error copying of the input arguments.\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ /* Now bring in the gcsHAL_INTERFACE structure. */
++ if ((drvArgs.InputBufferSize != sizeof(gcsHAL_INTERFACE))
++ || (drvArgs.OutputBufferSize != sizeof(gcsHAL_INTERFACE))
++ )
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): input or/and output structures are invalid.\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ copyLen = copy_from_user(
++ &iface, gcmUINT64_TO_PTR(drvArgs.InputBuffer), sizeof(gcsHAL_INTERFACE)
++ );
++
++ if (copyLen != 0)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): error copying of input HAL interface.\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ if (iface.command == gcvHAL_CHIP_INFO)
++ {
++ count = 0;
++ for (i = 0; i < gcdMAX_GPU_COUNT; i++)
++ {
++ if (device->kernels[i] != gcvNULL)
++ {
++#if gcdENABLE_VG
++ if (i == gcvCORE_VG)
++ {
++ iface.u.ChipInfo.types[count] = gcvHARDWARE_VG;
++ }
++ else
++#endif
++ {
++ gcmkVERIFY_OK(gckHARDWARE_GetType(device->kernels[i]->hardware,
++ &iface.u.ChipInfo.types[count]));
++ }
++ count++;
++ }
++ }
++
++ iface.u.ChipInfo.count = count;
++ iface.status = status = gcvSTATUS_OK;
++ }
++ else
++ {
++ if (iface.hardwareType > 7)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): unknown hardwareType %d\n",
++ __FUNCTION__, __LINE__,
++ iface.hardwareType
++ );
++
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++#if gcdENABLE_VG
++ if (device->coreMapping[iface.hardwareType] == gcvCORE_VG)
++ {
++ status = gckVGKERNEL_Dispatch(device->kernels[gcvCORE_VG],
++ (ioctlCode == IOCTL_GCHAL_INTERFACE),
++ &iface);
++ }
++ else
++#endif
++ {
++ status = gckKERNEL_Dispatch(device->kernels[device->coreMapping[iface.hardwareType]],
++ (ioctlCode == IOCTL_GCHAL_INTERFACE),
++ &iface);
++ }
++ }
++
++ /* Redo system call after pending signal is handled. */
++ if (status == gcvSTATUS_INTERRUPTED)
++ {
++ gcmkFOOTER();
++ return -ERESTARTSYS;
++ }
++
++ if (gcmIS_SUCCESS(status) && (iface.command == gcvHAL_LOCK_VIDEO_MEMORY))
++ {
++ gcuVIDMEM_NODE_PTR node;
++ gctUINT32 processID;
++
++ gckOS_GetProcessID(&processID);
++
++ gcmkONERROR(gckVIDMEM_HANDLE_Lookup(device->kernels[device->coreMapping[iface.hardwareType]],
++ processID,
++ (gctUINT32)iface.u.LockVideoMemory.node,
++ &nodeObject));
++ node = nodeObject->node;
++
++ /* Special case for mapped memory. */
++ if ((data->mappedMemory != gcvNULL)
++ && (node->VidMem.memory->object.type == gcvOBJ_VIDMEM)
++ )
++ {
++ /* Compute offset into mapped memory. */
++ gctUINT32 offset
++ = (gctUINT8 *) gcmUINT64_TO_PTR(iface.u.LockVideoMemory.memory)
++ - (gctUINT8 *) device->contiguousBase;
++
++ /* Compute offset into user-mapped region. */
++ iface.u.LockVideoMemory.memory =
++ gcmPTR_TO_UINT64((gctUINT8 *) data->mappedMemory + offset);
++ }
++ }
++
++ /* Copy data back to the user. */
++ copyLen = copy_to_user(
++ gcmUINT64_TO_PTR(drvArgs.OutputBuffer), &iface, sizeof(gcsHAL_INTERFACE)
++ );
++
++ if (copyLen != 0)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): error copying of output HAL interface.\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return 0;
++
++OnError:
++ gcmkFOOTER();
++ return -ENOTTY;
++}
++
++static int drv_mmap(
++ struct file* filp,
++ struct vm_area_struct* vma
++ )
++{
++ gceSTATUS status = gcvSTATUS_OK;
++ gcsHAL_PRIVATE_DATA_PTR data;
++ gckGALDEVICE device;
++
++ gcmkHEADER_ARG("filp=0x%08X vma=0x%08X", filp, vma);
++
++ if (filp == gcvNULL)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): filp is NULL\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ data = filp->private_data;
++
++ if (data == gcvNULL)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): private_data is NULL\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ device = data->device;
++
++ if (device == gcvNULL)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): device is NULL\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++#if !gcdPAGED_MEMORY_CACHEABLE
++ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
++ vma->vm_flags |= gcdVM_FLAGS;
++#endif
++ vma->vm_pgoff = 0;
++
++ if (device->contiguousMapped)
++ {
++ unsigned long size = vma->vm_end - vma->vm_start;
++ int ret = 0;
++
++ if (size > device->contiguousSize)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): Invalid mapping size.\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ ret = io_remap_pfn_range(
++ vma,
++ vma->vm_start,
++ device->requestedContiguousBase >> PAGE_SHIFT,
++ size,
++ vma->vm_page_prot
++ );
++
++ if (ret != 0)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): io_remap_pfn_range failed %d\n",
++ __FUNCTION__, __LINE__,
++ ret
++ );
++
++ data->mappedMemory = gcvNULL;
++
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++
++ data->mappedMemory = (gctPOINTER) vma->vm_start;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return 0;
++ }
++
++OnError:
++ gcmkFOOTER();
++ return -ENOTTY;
++}
++
++
++#if !USE_PLATFORM_DRIVER
++static int __init drv_init(void)
++#else
++static int drv_init(void)
++#endif
++{
++ int ret;
++ int result = -EINVAL;
++ gceSTATUS status;
++ gckGALDEVICE device = gcvNULL;
++ struct class* device_class = gcvNULL;
++
++ gcsDEVICE_CONSTRUCT_ARGS args = {
++ .recovery = recovery,
++ .stuckDump = stuckDump,
++ .gpu3DMinClock = gpu3DMinClock,
++ .contiguousRequested = contiguousRequested,
++ .platform = &platform,
++ .mmu = mmu,
++ };
++
++ gcmkHEADER();
++
++ printk(KERN_INFO "Galcore version %d.%d.%d.%d\n",
++ gcvVERSION_MAJOR, gcvVERSION_MINOR, gcvVERSION_PATCH, gcvVERSION_BUILD);
++
++#if !VIVANTE_PROFILER_PM
++ /* when enable gpu profiler, we need to turn off gpu powerMangement */
++ if (gpuProfiler)
++ {
++ powerManagement = 0;
++ }
++#endif
++
++ if (showArgs)
++ {
++ gckOS_DumpParam();
++ }
++
++ if (logFileSize != 0)
++ {
++ gckDEBUGFS_Initialize();
++ }
++
++ /* Create the GAL device. */
++ status = gckGALDEVICE_Construct(
++#if gcdMULTI_GPU || gcdMULTI_GPU_AFFINITY
++ irqLine3D0,
++ registerMemBase3D0, registerMemSize3D0,
++ irqLine3D1,
++ registerMemBase3D1, registerMemSize3D1,
++#else
++ irqLine,
++ registerMemBase, registerMemSize,
++#endif
++ irqLine2D,
++ registerMemBase2D, registerMemSize2D,
++ irqLineVG,
++ registerMemBaseVG, registerMemSizeVG,
++ contiguousBase, contiguousSize,
++ bankSize, fastClear, compression, baseAddress, physSize, signal,
++ logFileSize,
++ powerManagement,
++ gpuProfiler,
++ &args,
++ &device
++ );
++
++ if (gcmIS_ERROR(status))
++ {
++ gcmkTRACE_ZONE(gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): Failed to create the GAL device: status=%d\n",
++ __FUNCTION__, __LINE__, status);
++
++ goto OnError;
++ }
++
++ /* Start the GAL device. */
++ gcmkONERROR(gckGALDEVICE_Start(device));
++
++ if ((physSize != 0)
++ && (device->kernels[gcvCORE_MAJOR] != gcvNULL)
++ && (device->kernels[gcvCORE_MAJOR]->hardware->mmuVersion != 0))
++ {
++ /* Reset the base address */
++ device->baseAddress = 0;
++ }
++
++ /* Register the character device. */
++ ret = register_chrdev(major, DEVICE_NAME, &driver_fops);
++
++ if (ret < 0)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): Could not allocate major number for mmap.\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ if (major == 0)
++ {
++ major = ret;
++ }
++
++ /* Create the device class. */
++ device_class = class_create(THIS_MODULE, "graphics_class");
++
++ if (IS_ERR(device_class))
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): Failed to create the class.\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
++ device_create(device_class, NULL, MKDEV(major, 0), NULL, DEVICE_NAME);
++#else
++ device_create(device_class, NULL, MKDEV(major, 0), DEVICE_NAME);
++#endif
++
++ galDevice = device;
++ gpuClass = device_class;
++
++#if gcdMULTI_GPU || gcdMULTI_GPU_AFFINITY
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_DRIVER,
++ "%s(%d): irqLine3D0=%d, contiguousSize=%lu, memBase3D0=0x%lX\n",
++ __FUNCTION__, __LINE__,
++ irqLine3D0, contiguousSize, registerMemBase3D0
++ );
++#else
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_DRIVER,
++ "%s(%d): irqLine=%d, contiguousSize=%lu, memBase=0x%lX\n",
++ __FUNCTION__, __LINE__,
++ irqLine, contiguousSize, registerMemBase
++ );
++#endif
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return 0;
++
++OnError:
++ /* Roll back. */
++ if (device_class != gcvNULL)
++ {
++ device_destroy(device_class, MKDEV(major, 0));
++ class_destroy(device_class);
++ }
++
++ if (device != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckGALDEVICE_Stop(device));
++ gcmkVERIFY_OK(gckGALDEVICE_Destroy(device));
++ }
++
++ gcmkFOOTER();
++ return result;
++}
++
++#if !USE_PLATFORM_DRIVER
++static void __exit drv_exit(void)
++#else
++static void drv_exit(void)
++#endif
++{
++ gcmkHEADER();
++
++ gcmkASSERT(gpuClass != gcvNULL);
++ device_destroy(gpuClass, MKDEV(major, 0));
++ class_destroy(gpuClass);
++
++ unregister_chrdev(major, DEVICE_NAME);
++
++ gcmkVERIFY_OK(gckGALDEVICE_Stop(galDevice));
++ gcmkVERIFY_OK(gckGALDEVICE_Destroy(galDevice));
++
++ if(gckDEBUGFS_IsEnabled())
++ {
++ gckDEBUGFS_Terminate();
++ }
++
++ gcmkFOOTER_NO();
++}
++
++#if !USE_PLATFORM_DRIVER
++ module_init(drv_init);
++ module_exit(drv_exit);
++#else
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
++static int gpu_probe(struct platform_device *pdev)
++#else
++static int __devinit gpu_probe(struct platform_device *pdev)
++#endif
++{
++ int ret = -ENODEV;
++ gcsMODULE_PARAMETERS moduleParam = {
++#if gcdMULTI_GPU || gcdMULTI_GPU_AFFINITY
++#else
++ .irqLine = irqLine,
++ .registerMemBase = registerMemBase,
++ .registerMemSize = registerMemSize,
++#endif
++ .irqLine2D = irqLine2D,
++ .registerMemBase2D = registerMemBase2D,
++ .registerMemSize2D = registerMemSize2D,
++ .irqLineVG = irqLineVG,
++ .registerMemBaseVG = registerMemBaseVG,
++ .registerMemSizeVG = registerMemSizeVG,
++ .contiguousSize = contiguousSize,
++ .contiguousBase = contiguousBase,
++ .bankSize = bankSize,
++ .fastClear = fastClear,
++ .compression = compression,
++ .powerManagement = powerManagement,
++ .gpuProfiler = gpuProfiler,
++ .signal = signal,
++ .baseAddress = baseAddress,
++ .physSize = physSize,
++ .logFileSize = logFileSize,
++ .recovery = recovery,
++ .stuckDump = stuckDump,
++ .showArgs = showArgs,
++ .gpu3DMinClock = gpu3DMinClock,
++ };
++
++ gcmkHEADER();
++
++ platform.device = pdev;
++
++ if (platform.ops->getPower)
++ {
++ if (gcmIS_ERROR(platform.ops->getPower(&platform)))
++ {
++ gcmkFOOTER_NO();
++ return ret;
++ }
++ }
++
++ if (platform.ops->adjustParam)
++ {
++ /* Override default module param. */
++ platform.ops->adjustParam(&platform, &moduleParam);
++
++ /* Update module param because drv_init() uses them directly. */
++ _UpdateModuleParam(&moduleParam);
++ }
++
++ ret = drv_init();
++
++ if (!ret)
++ {
++ platform_set_drvdata(pdev, galDevice);
++
++ gcmkFOOTER_NO();
++ return ret;
++ }
++
++ gcmkFOOTER_ARG(KERN_INFO "Failed to register gpu driver: %d\n", ret);
++ return ret;
++}
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
++static int gpu_remove(struct platform_device *pdev)
++#else
++static int __devexit gpu_remove(struct platform_device *pdev)
++#endif
++{
++ gcmkHEADER();
++
++ drv_exit();
++
++ if (platform.ops->putPower)
++ {
++ platform.ops->putPower(&platform);
++ }
++
++ gcmkFOOTER_NO();
++ return 0;
++}
++
++static int gpu_suspend(struct platform_device *dev, pm_message_t state)
++{
++ gceSTATUS status;
++ gckGALDEVICE device;
++ gctINT i;
++
++ device = platform_get_drvdata(dev);
++
++ if (!device)
++ {
++ return -1;
++ }
++
++ for (i = 0; i < gcdMAX_GPU_COUNT; i++)
++ {
++ if (device->kernels[i] != gcvNULL)
++ {
++ /* Store states. */
++#if gcdENABLE_VG
++ if (i == gcvCORE_VG)
++ {
++ status = gckVGHARDWARE_QueryPowerManagementState(device->kernels[i]->vg->hardware, &device->statesStored[i]);
++ }
++ else
++#endif
++ {
++ status = gckHARDWARE_QueryPowerManagementState(device->kernels[i]->hardware, &device->statesStored[i]);
++ }
++
++ if (gcmIS_ERROR(status))
++ {
++ return -1;
++ }
++
++#if gcdENABLE_VG
++ if (i == gcvCORE_VG)
++ {
++ status = gckVGHARDWARE_SetPowerManagementState(device->kernels[i]->vg->hardware, gcvPOWER_OFF);
++ }
++ else
++#endif
++ {
++ status = gckHARDWARE_SetPowerManagementState(device->kernels[i]->hardware, gcvPOWER_OFF);
++ }
++
++ if (gcmIS_ERROR(status))
++ {
++ return -1;
++ }
++
++ }
++ }
++
++ return 0;
++}
++
++static int gpu_resume(struct platform_device *dev)
++{
++ gceSTATUS status;
++ gckGALDEVICE device;
++ gctINT i;
++ gceCHIPPOWERSTATE statesStored;
++
++ device = platform_get_drvdata(dev);
++
++ if (!device)
++ {
++ return -1;
++ }
++
++ for (i = 0; i < gcdMAX_GPU_COUNT; i++)
++ {
++ if (device->kernels[i] != gcvNULL)
++ {
++#if gcdENABLE_VG
++ if (i == gcvCORE_VG)
++ {
++ status = gckVGHARDWARE_SetPowerManagementState(device->kernels[i]->vg->hardware, gcvPOWER_ON);
++ }
++ else
++#endif
++ {
++ status = gckHARDWARE_SetPowerManagementState(device->kernels[i]->hardware, gcvPOWER_ON);
++ }
++
++ if (gcmIS_ERROR(status))
++ {
++ return -1;
++ }
++
++ /* Convert global state to crossponding internal state. */
++ switch(device->statesStored[i])
++ {
++ case gcvPOWER_OFF:
++ statesStored = gcvPOWER_OFF_BROADCAST;
++ break;
++ case gcvPOWER_IDLE:
++ statesStored = gcvPOWER_IDLE_BROADCAST;
++ break;
++ case gcvPOWER_SUSPEND:
++ statesStored = gcvPOWER_SUSPEND_BROADCAST;
++ break;
++ case gcvPOWER_ON:
++ statesStored = gcvPOWER_ON_AUTO;
++ break;
++ default:
++ statesStored = device->statesStored[i];
++ break;
++ }
++
++ /* Restore states. */
++#if gcdENABLE_VG
++ if (i == gcvCORE_VG)
++ {
++ status = gckVGHARDWARE_SetPowerManagementState(device->kernels[i]->vg->hardware, statesStored);
++ }
++ else
++#endif
++ {
++ status = gckHARDWARE_SetPowerManagementState(device->kernels[i]->hardware, statesStored);
++ }
++
++ if (gcmIS_ERROR(status))
++ {
++ return -1;
++ }
++ }
++ }
++
++ return 0;
++}
++
++#if defined(CONFIG_PM) && LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30)
++#ifdef CONFIG_PM_SLEEP
++static int gpu_system_suspend(struct device *dev)
++{
++ pm_message_t state={0};
++ return gpu_suspend(to_platform_device(dev), state);
++}
++
++static int gpu_system_resume(struct device *dev)
++{
++ return gpu_resume(to_platform_device(dev));
++}
++#endif
++
++static const struct dev_pm_ops gpu_pm_ops = {
++ SET_SYSTEM_SLEEP_PM_OPS(gpu_system_suspend, gpu_system_resume)
++};
++#endif
++
++static struct platform_driver gpu_driver = {
++ .probe = gpu_probe,
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
++ .remove = gpu_remove,
++#else
++ .remove = __devexit_p(gpu_remove),
++#endif
++
++ .suspend = gpu_suspend,
++ .resume = gpu_resume,
++
++ .driver = {
++ .name = DEVICE_NAME,
++#if defined(CONFIG_PM) && LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30)
++ .pm = &gpu_pm_ops,
++#endif
++ }
++};
++
++static int __init gpu_init(void)
++{
++ int ret = 0;
++
++ memset(&platform, 0, sizeof(gcsPLATFORM));
++
++ gckPLATFORM_QueryOperations(&platform.ops);
++
++ if (platform.ops == gcvNULL)
++ {
++ printk(KERN_ERR "galcore: No platform specific operations.\n");
++ ret = -ENODEV;
++ goto out;
++ }
++
++ if (platform.ops->allocPriv)
++ {
++ /* Allocate platform private data. */
++ if (gcmIS_ERROR(platform.ops->allocPriv(&platform)))
++ {
++ ret = -ENOMEM;
++ goto out;
++ }
++ }
++
++ if (platform.ops->needAddDevice
++ && platform.ops->needAddDevice(&platform))
++ {
++ /* Allocate device */
++ platform.device = platform_device_alloc(DEVICE_NAME, -1);
++ if (!platform.device)
++ {
++ printk(KERN_ERR "galcore: platform_device_alloc failed.\n");
++ ret = -ENOMEM;
++ goto out;
++ }
++
++ /* Add device */
++ ret = platform_device_add(platform.device);
++ if (ret)
++ {
++ printk(KERN_ERR "galcore: platform_device_add failed.\n");
++ goto put_dev;
++ }
++ }
++
++ platform.driver = &gpu_driver;
++
++ if (platform.ops->adjustDriver)
++ {
++ /* Override default platform_driver struct. */
++ platform.ops->adjustDriver(&platform);
++ }
++
++ ret = platform_driver_register(&gpu_driver);
++ if (!ret)
++ {
++ goto out;
++ }
++
++ platform_device_del(platform.device);
++put_dev:
++ platform_device_put(platform.device);
++
++out:
++ return ret;
++}
++
++static void __exit gpu_exit(void)
++{
++ platform_driver_unregister(&gpu_driver);
++
++ if (platform.ops->needAddDevice
++ && platform.ops->needAddDevice(&platform))
++ {
++ platform_device_unregister(platform.device);
++ }
++
++ if (platform.priv)
++ {
++ /* Free platform private data. */
++ platform.ops->freePriv(&platform);
++ }
++}
++
++module_init(gpu_init);
++module_exit(gpu_exit);
++
++#endif
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_security_channel.c linux-openelec/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_security_channel.c
+--- linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_security_channel.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_security_channel.c 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,385 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal_kernel_linux.h"
++#include <linux/slab.h>
++
++#include "tee_client_api.h"
++
++#define _GC_OBJ_ZONE gcvZONE_OS
++
++#define GPU3D_UUID { 0xcc9f80ea, 0xa836, 0x11e3, { 0x9b, 0x07, 0x78, 0x2b, 0xcb, 0x5c, 0xf3, 0xe3 } }
++
++static const TEEC_UUID gpu3d_uuid = GPU3D_UUID;
++TEEC_Context teecContext;
++
++typedef struct _gcsSecurityChannel {
++ gckOS os;
++ TEEC_Session session;
++ int * virtual;
++ TEEC_SharedMemory inputBuffer;
++ gctUINT32 bytes;
++ gctPOINTER mutex;
++} gcsSecurityChannel;
++
++TEEC_SharedMemory *
++gpu3d_allocate_secure_mem(
++ gckOS Os,
++ unsigned int size
++ )
++{
++ TEEC_Result result;
++ TEEC_Context *context = &teecContext;
++ TEEC_SharedMemory *shm = NULL;
++ void *handle = NULL;
++ unsigned int phyAddr = 0xFFFFFFFF;
++ gceSTATUS status;
++ gctSIZE_T bytes = size;
++
++ shm = kmalloc(sizeof(TEEC_SharedMemory), GFP_KERNEL);
++
++ if (NULL == shm)
++ {
++ return NULL;
++ }
++
++ memset(shm, 0, sizeof(TEEC_SharedMemory));
++
++ status = gckOS_AllocatePagedMemoryEx(
++ Os,
++ gcvALLOC_FLAG_SECURITY,
++ bytes,
++ gcvNULL,
++ (gctPHYS_ADDR *)&handle);
++
++ if (gcmIS_ERROR(status))
++ {
++ kfree(shm);
++ return NULL;
++ }
++
++ status = gckOS_PhysicalToPhysicalAddress(
++ Os,
++ handle,
++ &phyAddr);
++
++ if (gcmIS_ERROR(status))
++ {
++ kfree(shm);
++ return NULL;
++ }
++
++ /* record the handle into shm->user_data */
++ shm->userdata = handle;
++
++ /* [b] Bulk input buffer. */
++ shm->size = size;
++ shm->flags = TEEC_MEM_INPUT;
++
++ /* Use TEE Client API to register the underlying memory buffer. */
++ shm->phyAddr = (void *)phyAddr;
++
++ result = TEEC_RegisterSharedMemory(
++ context,
++ shm);
++
++ if (result != TEEC_SUCCESS)
++ {
++ gckOS_FreePagedMemory(Os, (gctPHYS_ADDR)handle, shm->size);
++ kfree(shm);
++ return NULL;
++ }
++
++ return shm;
++}
++
++void gpu3d_release_secure_mem(
++ gckOS Os,
++ void *shm_handle
++ )
++{
++ TEEC_SharedMemory *shm = shm_handle;
++ void * handle;
++
++ if (!shm)
++ {
++ return;
++ }
++
++ handle = shm->userdata;
++
++ TEEC_ReleaseSharedMemory(shm);
++ gckOS_FreePagedMemory(Os, (gctPHYS_ADDR)handle, shm->size);
++
++ kfree(shm);
++
++ return;
++}
++
++static TEEC_Result gpu3d_session_callback(
++ TEEC_Session* session,
++ uint32_t commandID,
++ TEEC_Operation* operation,
++ void* userdata
++ )
++{
++ gcsSecurityChannel *channel = userdata;
++
++ if (channel == gcvNULL)
++ {
++ return TEEC_ERROR_BAD_PARAMETERS;
++ }
++
++ switch(commandID)
++ {
++ case gcvTA_CALLBACK_ALLOC_SECURE_MEM:
++ {
++ uint32_t size = operation->params[0].value.a;
++ TEEC_SharedMemory *shm = NULL;
++
++ shm = gpu3d_allocate_secure_mem(channel->os, size);
++
++ /* use the value to save the pointer in client side */
++ operation->params[0].value.a = (uint32_t)shm;
++ operation->params[0].value.b = (uint32_t)shm->phyAddr;
++
++ break;
++ }
++ case gcvTA_CALLBACK_FREE_SECURE_MEM:
++ {
++ TEEC_SharedMemory *shm = (TEEC_SharedMemory *)operation->params[0].value.a;
++
++ gpu3d_release_secure_mem(channel->os, shm);
++ break;
++ }
++ default:
++ break;
++ }
++
++ return TEEC_SUCCESS;
++}
++
++gceSTATUS
++gckOS_OpenSecurityChannel(
++ IN gckOS Os,
++ IN gceCORE GPU,
++ OUT gctUINT32 *Channel
++ )
++{
++ gceSTATUS status;
++ TEEC_Result result;
++ static bool initialized = gcvFALSE;
++ gcsSecurityChannel *channel = gcvNULL;
++
++ TEEC_Operation operation = {0};
++
++ /* Connect to TEE. */
++ if (initialized == gcvFALSE)
++ {
++ result = TEEC_InitializeContext(NULL, &teecContext);
++
++ if (result != TEEC_SUCCESS) {
++ gcmkONERROR(gcvSTATUS_CHIP_NOT_READY);
++ }
++
++ initialized = gcvTRUE;
++ }
++
++ /* Construct channel. */
++ gcmkONERROR(
++ gckOS_Allocate(Os, gcmSIZEOF(*channel), (gctPOINTER *)&channel));
++
++ gckOS_ZeroMemory(channel, gcmSIZEOF(gcsSecurityChannel));
++
++ channel->os = Os;
++
++ gcmkONERROR(gckOS_CreateMutex(Os, &channel->mutex));
++
++ /* Allocate shared memory for passing gcTA_INTERFACE. */
++ channel->bytes = gcmSIZEOF(gcsTA_INTERFACE);
++ channel->virtual = kmalloc(channel->bytes, GFP_KERNEL | __GFP_NOWARN);
++
++ if (!channel->virtual)
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ channel->inputBuffer.size = channel->bytes;
++ channel->inputBuffer.flags = TEEC_MEM_INPUT | TEEC_MEM_OUTPUT;
++ channel->inputBuffer.phyAddr = (void *)virt_to_phys(channel->virtual);
++
++ result = TEEC_RegisterSharedMemory(&teecContext, &channel->inputBuffer);
++
++ if (result != TEEC_SUCCESS)
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++
++ operation.paramTypes = TEEC_PARAM_TYPES(
++ TEEC_VALUE_INPUT,
++ TEEC_NONE,
++ TEEC_NONE,
++ TEEC_NONE);
++
++ operation.params[0].value.a = GPU;
++
++ /* Open session with TEE application. */
++ result = TEEC_OpenSession(
++ &teecContext,
++ &channel->session,
++ &gpu3d_uuid,
++ TEEC_LOGIN_USER,
++ NULL,
++ &operation,
++ NULL);
++
++ /* Prepare callback. */
++ TEEC_RegisterCallback(&channel->session, gpu3d_session_callback, channel);
++
++ *Channel = (gctUINT32)channel;
++
++ return gcvSTATUS_OK;
++
++OnError:
++ if (channel)
++ {
++ if (channel->virtual)
++ {
++ }
++
++ if (channel->mutex)
++ {
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Os, channel->mutex));
++ }
++
++ gcmkVERIFY_OK(gckOS_Free(Os, channel));
++ }
++
++ return status;
++}
++
++gceSTATUS
++gckOS_CloseSecurityChannel(
++ IN gctUINT32 Channel
++ )
++{
++ /* TODO . */
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckOS_CallSecurityService(
++ IN gctUINT32 Channel,
++ IN gcsTA_INTERFACE *Interface
++ )
++{
++ gceSTATUS status;
++ TEEC_Result result;
++ gcsSecurityChannel *channel = (gcsSecurityChannel *)Channel;
++ TEEC_Operation operation = {0};
++
++ gcmkHEADER();
++ gcmkVERIFY_ARGUMENT(Channel != 0);
++
++ gckOS_AcquireMutex(channel->os, channel->mutex, gcvINFINITE);
++
++ gckOS_MemCopy(channel->virtual, Interface, channel->bytes);
++
++ operation.paramTypes = TEEC_PARAM_TYPES(
++ TEEC_MEMREF_PARTIAL_INPUT,
++ TEEC_NONE,
++ TEEC_NONE,
++ TEEC_NONE);
++
++ /* Note: we use the updated size in the MemRef output by the encryption. */
++ operation.params[0].memref.parent = &channel->inputBuffer;
++ operation.params[0].memref.offset = 0;
++ operation.params[0].memref.size = sizeof(gcsTA_INTERFACE);
++ operation.started = true;
++
++ /* Start the commit command within the TEE application. */
++ result = TEEC_InvokeCommand(
++ &channel->session,
++ gcvTA_COMMAND_DISPATCH,
++ &operation,
++ NULL);
++
++ gckOS_MemCopy(Interface, channel->virtual, channel->bytes);
++
++ gckOS_ReleaseMutex(channel->os, channel->mutex);
++
++ if (result != TEEC_SUCCESS)
++ {
++ gcmkONERROR(gcvSTATUS_GENERIC_IO);
++ }
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckOS_InitSecurityChannel(
++ IN gctUINT32 Channel
++ )
++{
++ gceSTATUS status;
++ TEEC_Result result;
++ gcsSecurityChannel *channel = (gcsSecurityChannel *)Channel;
++ TEEC_Operation operation = {0};
++
++ gcmkHEADER();
++ gcmkVERIFY_ARGUMENT(Channel != 0);
++
++ operation.paramTypes = TEEC_PARAM_TYPES(
++ TEEC_MEMREF_PARTIAL_INPUT,
++ TEEC_NONE,
++ TEEC_NONE,
++ TEEC_NONE);
++
++ /* Note: we use the updated size in the MemRef output by the encryption. */
++ operation.params[0].memref.parent = &channel->inputBuffer;
++ operation.params[0].memref.offset = 0;
++ operation.params[0].memref.size = gcmSIZEOF(gcsTA_INTERFACE);
++ operation.started = true;
++
++ /* Start the commit command within the TEE application. */
++ result = TEEC_InvokeCommand(
++ &channel->session,
++ gcvTA_COMMAND_INIT,
++ &operation,
++ NULL);
++
++ if (result != TEEC_SUCCESS)
++ {
++ gcmkONERROR(gcvSTATUS_GENERIC_IO);
++ }
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_sync.c linux-openelec/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_sync.c
+--- linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_sync.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_sync.c 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,177 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include <gc_hal.h>
++#include <gc_hal_base.h>
++
++#if gcdANDROID_NATIVE_FENCE_SYNC
++
++#include <linux/kernel.h>
++#include <linux/file.h>
++#include <linux/fs.h>
++#include <linux/miscdevice.h>
++#include <linux/module.h>
++#include <linux/syscalls.h>
++#include <linux/uaccess.h>
++
++#include "gc_hal_kernel_sync.h"
++
++static struct sync_pt *
++viv_sync_pt_dup(
++ struct sync_pt * sync_pt
++ )
++{
++ gceSTATUS status;
++ struct viv_sync_pt *pt;
++ struct viv_sync_pt *src;
++ struct viv_sync_timeline *obj;
++
++ src = (struct viv_sync_pt *) sync_pt;
++ obj = (struct viv_sync_timeline *) sync_pt->parent;
++
++ /* Create the new sync_pt. */
++ pt = (struct viv_sync_pt *)
++ sync_pt_create(&obj->obj, sizeof(struct viv_sync_pt));
++
++ pt->stamp = src->stamp;
++ pt->sync = src->sync;
++
++ /* Reference sync point. */
++ status = gckOS_ReferenceSyncPoint(obj->os, pt->sync);
++
++ if (gcmIS_ERROR(status))
++ {
++ sync_pt_free((struct sync_pt *)pt);
++ return NULL;
++ }
++
++ return (struct sync_pt *)pt;
++}
++
++static int
++viv_sync_pt_has_signaled(
++ struct sync_pt * sync_pt
++ )
++{
++ gceSTATUS status;
++ gctBOOL state;
++ struct viv_sync_pt * pt;
++ struct viv_sync_timeline * obj;
++
++ pt = (struct viv_sync_pt *)sync_pt;
++ obj = (struct viv_sync_timeline *)sync_pt->parent;
++
++ status = gckOS_QuerySyncPoint(obj->os, pt->sync, &state);
++
++ if (gcmIS_ERROR(status))
++ {
++ /* Error. */
++ return -1;
++ }
++
++ return state;
++}
++
++static int
++viv_sync_pt_compare(
++ struct sync_pt * a,
++ struct sync_pt * b
++ )
++{
++ int ret;
++ struct viv_sync_pt * pt1 = (struct viv_sync_pt *) a;
++ struct viv_sync_pt * pt2 = (struct viv_sync_pt *) b;
++
++ ret = (pt1->stamp < pt2->stamp) ? -1
++ : (pt1->stamp == pt2->stamp) ? 0
++ : 1;
++
++ return ret;
++}
++
++static void
++viv_sync_pt_free(
++ struct sync_pt * sync_pt
++ )
++{
++ struct viv_sync_pt * pt;
++ struct viv_sync_timeline * obj;
++
++ pt = (struct viv_sync_pt *) sync_pt;
++ obj = (struct viv_sync_timeline *) sync_pt->parent;
++
++ gckOS_DestroySyncPoint(obj->os, pt->sync);
++}
++
++static struct sync_timeline_ops viv_timeline_ops =
++{
++ .driver_name = "viv_sync",
++ .dup = viv_sync_pt_dup,
++ .has_signaled = viv_sync_pt_has_signaled,
++ .compare = viv_sync_pt_compare,
++ .free_pt = viv_sync_pt_free,
++};
++
++struct viv_sync_timeline *
++viv_sync_timeline_create(
++ const char * name,
++ gckOS os
++ )
++{
++ struct viv_sync_timeline * obj;
++
++ obj = (struct viv_sync_timeline *)
++ sync_timeline_create(&viv_timeline_ops, sizeof(struct viv_sync_timeline), name);
++
++ obj->os = os;
++ obj->stamp = 0;
++
++ return obj;
++}
++
++struct sync_pt *
++viv_sync_pt_create(
++ struct viv_sync_timeline * obj,
++ gctSYNC_POINT SyncPoint
++ )
++{
++ gceSTATUS status;
++ struct viv_sync_pt * pt;
++
++ pt = (struct viv_sync_pt *)
++ sync_pt_create(&obj->obj, sizeof(struct viv_sync_pt));
++
++ pt->stamp = obj->stamp++;
++ pt->sync = SyncPoint;
++
++ /* Dup signal. */
++ status = gckOS_ReferenceSyncPoint(obj->os, SyncPoint);
++
++ if (gcmIS_ERROR(status))
++ {
++ sync_pt_free((struct sync_pt *)pt);
++ return NULL;
++ }
++
++ return (struct sync_pt *) pt;
++}
++
++#endif
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_sync.h linux-openelec/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_sync.h
+--- linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_sync.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_sync.h 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,72 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_kernel_sync_h_
++#define __gc_hal_kernel_sync_h_
++
++#include <linux/types.h>
++
++/* sync.h is in drivers/staging/android/ for now. */
++#include <sync.h>
++
++#include <gc_hal.h>
++#include <gc_hal_base.h>
++
++struct viv_sync_timeline
++{
++ /* Parent object. */
++ struct sync_timeline obj;
++
++ /* Timestamp when sync_pt is created. */
++ gctUINT stamp;
++
++ /* Pointer to os struct. */
++ gckOS os;
++};
++
++
++struct viv_sync_pt
++{
++ /* Parent object. */
++ struct sync_pt pt;
++
++ /* Reference sync point*/
++ gctSYNC_POINT sync;
++
++ /* Timestamp when sync_pt is created. */
++ gctUINT stamp;
++};
++
++/* Create viv_sync_timeline object. */
++struct viv_sync_timeline *
++viv_sync_timeline_create(
++ const char * Name,
++ gckOS Os
++ );
++
++/* Create viv_sync_pt object. */
++struct sync_pt *
++viv_sync_pt_create(
++ struct viv_sync_timeline * Obj,
++ gctSYNC_POINT SyncPoint
++ );
++
++#endif /* __gc_hal_kernel_sync_h_ */
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/platform/freescale/gc_hal_kernel_platform_imx6q14.c linux-openelec/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/platform/freescale/gc_hal_kernel_platform_imx6q14.c
+--- linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/platform/freescale/gc_hal_kernel_platform_imx6q14.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/platform/freescale/gc_hal_kernel_platform_imx6q14.c 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,880 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal_kernel_linux.h"
++#include "gc_hal_kernel_platform.h"
++#include "gc_hal_kernel_device.h"
++#include "gc_hal_driver.h"
++#include <linux/slab.h>
++
++#if USE_PLATFORM_DRIVER
++# include <linux/platform_device.h>
++#endif
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0)
++#include <mach/viv_gpu.h>
++#else
++#include <linux/pm_runtime.h>
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0)
++#include <mach/busfreq.h>
++#else
++#include <linux/busfreq-imx6.h>
++#include <linux/reset.h>
++#endif
++#endif
++
++#include <linux/clk.h>
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0)
++#include <mach/hardware.h>
++#endif
++#include <linux/pm_runtime.h>
++
++#include <linux/regulator/consumer.h>
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
++#include <linux/device_cooling.h>
++#define REG_THERMAL_NOTIFIER(a) register_devfreq_cooling_notifier(a);
++#define UNREG_THERMAL_NOTIFIER(a) unregister_devfreq_cooling_notifier(a);
++#else
++extern int register_thermal_notifier(struct notifier_block *nb);
++extern int unregister_thermal_notifier(struct notifier_block *nb);
++#define REG_THERMAL_NOTIFIER(a) register_thermal_notifier(a);
++#define UNREG_THERMAL_NOTIFIER(a) unregister_thermal_notifier(a);
++#endif
++
++static int initgpu3DMinClock = 1;
++module_param(initgpu3DMinClock, int, 0644);
++
++struct platform_device *pdevice;
++
++#ifdef CONFIG_GPU_LOW_MEMORY_KILLER
++# include <linux/kernel.h>
++# include <linux/mm.h>
++# include <linux/oom.h>
++# include <linux/sched.h>
++
++struct task_struct *lowmem_deathpending;
++
++static int
++task_notify_func(struct notifier_block *self, unsigned long val, void *data);
++
++static struct notifier_block task_nb = {
++ .notifier_call = task_notify_func,
++};
++
++static int
++task_notify_func(struct notifier_block *self, unsigned long val, void *data)
++{
++ struct task_struct *task = data;
++
++ if (task == lowmem_deathpending)
++ lowmem_deathpending = NULL;
++
++ return NOTIFY_OK;
++}
++
++extern struct task_struct *lowmem_deathpending;
++static unsigned long lowmem_deathpending_timeout;
++
++static int force_contiguous_lowmem_shrink(IN gckKERNEL Kernel)
++{
++ struct task_struct *p;
++ struct task_struct *selected = NULL;
++ int tasksize;
++ int ret = -1;
++ int min_adj = 0;
++ int selected_tasksize = 0;
++ int selected_oom_adj;
++ /*
++ * If we already have a death outstanding, then
++ * bail out right away; indicating to vmscan
++ * that we have nothing further to offer on
++ * this pass.
++ *
++ */
++ if (lowmem_deathpending &&
++ time_before_eq(jiffies, lowmem_deathpending_timeout))
++ return 0;
++ selected_oom_adj = min_adj;
++
++ rcu_read_lock();
++ for_each_process(p) {
++ struct mm_struct *mm;
++ struct signal_struct *sig;
++ gcuDATABASE_INFO info;
++ int oom_adj;
++
++ task_lock(p);
++ mm = p->mm;
++ sig = p->signal;
++ if (!mm || !sig) {
++ task_unlock(p);
++ continue;
++ }
++ oom_adj = sig->oom_score_adj;
++ if (oom_adj < min_adj) {
++ task_unlock(p);
++ continue;
++ }
++
++ tasksize = 0;
++ task_unlock(p);
++ rcu_read_unlock();
++
++ if (gckKERNEL_QueryProcessDB(Kernel, p->pid, gcvFALSE, gcvDB_VIDEO_MEMORY, &info) == gcvSTATUS_OK){
++ tasksize += info.counters.bytes / PAGE_SIZE;
++ }
++ if (gckKERNEL_QueryProcessDB(Kernel, p->pid, gcvFALSE, gcvDB_CONTIGUOUS, &info) == gcvSTATUS_OK){
++ tasksize += info.counters.bytes / PAGE_SIZE;
++ }
++
++ rcu_read_lock();
++
++ if (tasksize <= 0)
++ continue;
++
++ gckOS_Print("<gpu> pid %d (%s), adj %d, size %d \n", p->pid, p->comm, oom_adj, tasksize);
++
++ if (selected) {
++ if (oom_adj < selected_oom_adj)
++ continue;
++ if (oom_adj == selected_oom_adj &&
++ tasksize <= selected_tasksize)
++ continue;
++ }
++ selected = p;
++ selected_tasksize = tasksize;
++ selected_oom_adj = oom_adj;
++ }
++ if (selected) {
++ gckOS_Print("<gpu> send sigkill to %d (%s), adj %d, size %d\n",
++ selected->pid, selected->comm,
++ selected_oom_adj, selected_tasksize);
++ lowmem_deathpending = selected;
++ lowmem_deathpending_timeout = jiffies + HZ;
++ force_sig(SIGKILL, selected);
++ ret = 0;
++ }
++ rcu_read_unlock();
++ return ret;
++}
++
++
++gceSTATUS
++_ShrinkMemory(
++ IN gckPLATFORM Platform
++ )
++{
++ struct platform_device *pdev;
++ gckGALDEVICE galDevice;
++ gckKERNEL kernel;
++
++ pdev = Platform->device;
++
++ galDevice = platform_get_drvdata(pdev);
++
++ kernel = galDevice->kernels[gcvCORE_MAJOR];
++
++ if (kernel != gcvNULL)
++ {
++ force_contiguous_lowmem_shrink(kernel);
++ }
++ else
++ {
++ gcmkPRINT("%s(%d) can't find kernel! ", __FUNCTION__, __LINE__);
++ }
++
++ return gcvSTATUS_OK;
++}
++#endif
++
++#if gcdENABLE_FSCALE_VAL_ADJUST
++static int thermal_hot_pm_notify(struct notifier_block *nb, unsigned long event,
++ void *dummy)
++{
++ static gctUINT orgFscale, minFscale, maxFscale;
++ static gctBOOL bAlreadyTooHot = gcvFALSE;
++ gckHARDWARE hardware;
++ gckGALDEVICE galDevice;
++
++ galDevice = platform_get_drvdata(pdevice);
++ if (!galDevice)
++ {
++ /* GPU is not ready, so it is meaningless to change GPU freq. */
++ return NOTIFY_OK;
++ }
++
++ if (!galDevice->kernels[gcvCORE_MAJOR])
++ {
++ return NOTIFY_OK;
++ }
++
++ hardware = galDevice->kernels[gcvCORE_MAJOR]->hardware;
++
++ if (!hardware)
++ {
++ return NOTIFY_OK;
++ }
++
++ if (event && !bAlreadyTooHot) {
++ gckHARDWARE_GetFscaleValue(hardware,&orgFscale,&minFscale, &maxFscale);
++ gckHARDWARE_SetFscaleValue(hardware, minFscale);
++ bAlreadyTooHot = gcvTRUE;
++ gckOS_Print("System is too hot. GPU3D will work at %d/64 clock.\n", minFscale);
++ } else if (!event && bAlreadyTooHot) {
++ gckHARDWARE_SetFscaleValue(hardware, orgFscale);
++ gckOS_Print("Hot alarm is canceled. GPU3D clock will return to %d/64\n", orgFscale);
++ bAlreadyTooHot = gcvFALSE;
++ }
++ return NOTIFY_OK;
++}
++
++static struct notifier_block thermal_hot_pm_notifier = {
++ .notifier_call = thermal_hot_pm_notify,
++ };
++
++static ssize_t show_gpu3DMinClock(struct device_driver *dev, char *buf)
++{
++ gctUINT currentf,minf,maxf;
++ gckGALDEVICE galDevice;
++
++ galDevice = platform_get_drvdata(pdevice);
++ if(galDevice->kernels[gcvCORE_MAJOR])
++ {
++ gckHARDWARE_GetFscaleValue(galDevice->kernels[gcvCORE_MAJOR]->hardware,
++ &currentf, &minf, &maxf);
++ }
++ snprintf(buf, PAGE_SIZE, "%d\n", minf);
++ return strlen(buf);
++}
++
++static ssize_t update_gpu3DMinClock(struct device_driver *dev, const char *buf, size_t count)
++{
++
++ gctINT fields;
++ gctUINT MinFscaleValue;
++ gckGALDEVICE galDevice;
++
++ galDevice = platform_get_drvdata(pdevice);
++ if(galDevice->kernels[gcvCORE_MAJOR])
++ {
++ fields = sscanf(buf, "%d", &MinFscaleValue);
++ if (fields < 1)
++ return -EINVAL;
++
++ gckHARDWARE_SetMinFscaleValue(galDevice->kernels[gcvCORE_MAJOR]->hardware,MinFscaleValue);
++ }
++
++ return count;
++}
++
++static DRIVER_ATTR(gpu3DMinClock, S_IRUGO | S_IWUSR, show_gpu3DMinClock, update_gpu3DMinClock);
++#endif
++
++
++
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0)
++static const struct of_device_id mxs_gpu_dt_ids[] = {
++ { .compatible = "fsl,imx6q-gpu", },
++ {/* sentinel */}
++};
++MODULE_DEVICE_TABLE(of, mxs_gpu_dt_ids);
++#endif
++
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
++struct contiguous_mem_pool {
++ struct dma_attrs attrs;
++ dma_addr_t phys;
++ void *virt;
++ size_t size;
++};
++#endif
++
++struct imx_priv {
++ /* Clock management.*/
++ struct clk *clk_3d_core;
++ struct clk *clk_3d_shader;
++ struct clk *clk_3d_axi;
++ struct clk *clk_2d_core;
++ struct clk *clk_2d_axi;
++ struct clk *clk_vg_axi;
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3,15,0)
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) || LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
++ /*Power management.*/
++ struct regulator *gpu_regulator;
++#endif
++#endif
++ /*Run time pm*/
++ struct device *pmdev;
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
++ struct contiguous_mem_pool *pool;
++ struct reset_control *rstc[gcdMAX_GPU_COUNT];
++#endif
++};
++
++static struct imx_priv imxPriv;
++
++gceSTATUS
++gckPLATFORM_AdjustParam(
++ IN gckPLATFORM Platform,
++ OUT gcsMODULE_PARAMETERS *Args
++ )
++{
++ struct resource* res;
++ struct platform_device* pdev = Platform->device;
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
++#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0)
++ struct device_node *dn =pdev->dev.of_node;
++ const u32 *prop;
++#else
++ struct viv_gpu_platform_data *pdata;
++#endif
++
++ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phys_baseaddr");
++ if (res)
++ Args->baseAddress = res->start;
++
++ res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "irq_3d");
++ if (res)
++ Args->irqLine = res->start;
++
++ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "iobase_3d");
++ if (res)
++ {
++ Args->registerMemBase = res->start;
++ Args->registerMemSize = res->end - res->start + 1;
++ }
++
++ res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "irq_2d");
++ if (res)
++ Args->irqLine2D = res->start;
++
++ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "iobase_2d");
++ if (res)
++ {
++ Args->registerMemBase2D = res->start;
++ Args->registerMemSize2D = res->end - res->start + 1;
++ }
++
++ res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "irq_vg");
++ if (res)
++ Args->irqLineVG = res->start;
++
++ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "iobase_vg");
++ if (res)
++ {
++ Args->registerMemBaseVG = res->start;
++ Args->registerMemSizeVG = res->end - res->start + 1;
++ }
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
++ Args->contiguousBase = 0;
++#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0)
++ prop = of_get_property(dn, "contiguousbase", NULL);
++ if(prop)
++ Args->contiguousBase = *prop;
++ of_property_read_u32(dn,"contiguoussize", (u32 *)&contiguousSize);
++#else
++ pdata = pdev->dev.platform_data;
++ if (pdata) {
++ Args->contiguousBase = pdata->reserved_mem_base;
++ Args->contiguousSize = pdata->reserved_mem_size;
++ }
++#endif
++ if (Args->contiguousSize == 0)
++ gckOS_Print("Warning: No contiguous memory is reserverd for gpu.!\n ");
++
++ Args->gpu3DMinClock = initgpu3DMinClock;
++
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++_AllocPriv(
++ IN gckPLATFORM Platform
++ )
++{
++ Platform->priv = &imxPriv;
++
++#ifdef CONFIG_GPU_LOW_MEMORY_KILLER
++ task_free_register(&task_nb);
++#endif
++
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++_FreePriv(
++ IN gckPLATFORM Platform
++ )
++{
++#ifdef CONFIG_GPU_LOW_MEMORY_KILLER
++ task_free_unregister(&task_nb);
++#endif
++
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++_GetPower(
++ IN gckPLATFORM Platform
++ )
++{
++ struct device* pdev = &Platform->device->dev;
++ struct imx_priv *priv = Platform->priv;
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
++ struct reset_control *rstc;
++#endif
++
++#ifdef CONFIG_PM
++ /*Init runtime pm for gpu*/
++ pm_runtime_enable(pdev);
++ priv->pmdev = pdev;
++#endif
++
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
++ rstc = devm_reset_control_get(pdev, "gpu3d");
++ priv->rstc[gcvCORE_MAJOR] = IS_ERR(rstc) ? NULL : rstc;
++ rstc = devm_reset_control_get(pdev, "gpu2d");
++ priv->rstc[gcvCORE_2D] = IS_ERR(rstc) ? NULL : rstc;
++ rstc = devm_reset_control_get(pdev, "gpuvg");
++ priv->rstc[gcvCORE_VG] = IS_ERR(rstc) ? NULL : rstc;
++#endif
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3,15,0)
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0)
++ /*get gpu regulator*/
++ priv->gpu_regulator = regulator_get(pdev, "cpu_vddgpu");
++#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
++ priv->gpu_regulator = devm_regulator_get(pdev, "pu");
++#endif
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) || LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
++ if (IS_ERR(priv->gpu_regulator)) {
++ gcmkTRACE_ZONE(gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): Failed to get gpu regulator \n",
++ __FUNCTION__, __LINE__);
++ return gcvSTATUS_NOT_FOUND;
++ }
++#endif
++#endif
++
++ /*Initialize the clock structure*/
++ priv->clk_3d_core = clk_get(pdev, "gpu3d_clk");
++ if (!IS_ERR(priv->clk_3d_core)) {
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0)
++ if (cpu_is_mx6q()) {
++ priv->clk_3d_shader = clk_get(pdev, "gpu3d_shader_clk");
++ if (IS_ERR(priv->clk_3d_shader)) {
++ clk_put(priv->clk_3d_core);
++ priv->clk_3d_core = NULL;
++ priv->clk_3d_shader = NULL;
++ gckOS_Print("galcore: clk_get gpu3d_shader_clk failed, disable 3d!\n");
++ }
++ }
++#else
++ priv->clk_3d_axi = clk_get(pdev, "gpu3d_axi_clk");
++ priv->clk_3d_shader = clk_get(pdev, "gpu3d_shader_clk");
++ if (IS_ERR(priv->clk_3d_shader)) {
++ clk_put(priv->clk_3d_core);
++ priv->clk_3d_core = NULL;
++ priv->clk_3d_shader = NULL;
++ gckOS_Print("galcore: clk_get gpu3d_shader_clk failed, disable 3d!\n");
++ }
++#endif
++ } else {
++ priv->clk_3d_core = NULL;
++ gckOS_Print("galcore: clk_get gpu3d_clk failed, disable 3d!\n");
++ }
++
++ priv->clk_2d_core = clk_get(pdev, "gpu2d_clk");
++ if (IS_ERR(priv->clk_2d_core)) {
++ priv->clk_2d_core = NULL;
++ gckOS_Print("galcore: clk_get 2d core clock failed, disable 2d/vg!\n");
++ } else {
++ priv->clk_2d_axi = clk_get(pdev, "gpu2d_axi_clk");
++ if (IS_ERR(priv->clk_2d_axi)) {
++ priv->clk_2d_axi = NULL;
++ gckOS_Print("galcore: clk_get 2d axi clock failed, disable 2d\n");
++ }
++
++ priv->clk_vg_axi = clk_get(pdev, "openvg_axi_clk");
++ if (IS_ERR(priv->clk_vg_axi)) {
++ priv->clk_vg_axi = NULL;
++ gckOS_Print("galcore: clk_get vg clock failed, disable vg!\n");
++ }
++ }
++
++
++#if gcdENABLE_FSCALE_VAL_ADJUST
++ pdevice = Platform->device;
++ REG_THERMAL_NOTIFIER(&thermal_hot_pm_notifier);
++ {
++ int ret = 0;
++ ret = driver_create_file(pdevice->dev.driver, &driver_attr_gpu3DMinClock);
++ if(ret)
++ dev_err(&pdevice->dev, "create gpu3DMinClock attr failed (%d)\n", ret);
++ }
++#endif
++
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++_PutPower(
++ IN gckPLATFORM Platform
++ )
++{
++ struct imx_priv *priv = Platform->priv;
++
++ /*Disable clock*/
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0)
++ if (priv->clk_3d_axi) {
++ clk_put(priv->clk_3d_axi);
++ priv->clk_3d_axi = NULL;
++ }
++#endif
++ if (priv->clk_3d_core) {
++ clk_put(priv->clk_3d_core);
++ priv->clk_3d_core = NULL;
++ }
++ if (priv->clk_3d_shader) {
++ clk_put(priv->clk_3d_shader);
++ priv->clk_3d_shader = NULL;
++ }
++ if (priv->clk_2d_core) {
++ clk_put(priv->clk_2d_core);
++ priv->clk_2d_core = NULL;
++ }
++ if (priv->clk_2d_axi) {
++ clk_put(priv->clk_2d_axi);
++ priv->clk_2d_axi = NULL;
++ }
++ if (priv->clk_vg_axi) {
++ clk_put(priv->clk_vg_axi);
++ priv->clk_vg_axi = NULL;
++ }
++
++#ifdef CONFIG_PM
++ if(priv->pmdev)
++ pm_runtime_disable(priv->pmdev);
++#endif
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0)
++ if (priv->gpu_regulator) {
++ regulator_put(priv->gpu_regulator);
++ priv->gpu_regulator = NULL;
++ }
++#endif
++
++#if gcdENABLE_FSCALE_VAL_ADJUST
++ UNREG_THERMAL_NOTIFIER(&thermal_hot_pm_notifier);
++
++ driver_remove_file(pdevice->dev.driver, &driver_attr_gpu3DMinClock);
++#endif
++
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++_SetPower(
++ IN gckPLATFORM Platform,
++ IN gceCORE GPU,
++ IN gctBOOL Enable
++ )
++{
++ struct imx_priv* priv = Platform->priv;
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3,15,0)
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) || LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
++ int ret;
++#endif
++#endif
++
++ if (Enable)
++ {
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3,15,0)
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) || LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
++ if(!IS_ERR(priv->gpu_regulator)) {
++ ret = regulator_enable(priv->gpu_regulator);
++ if (ret != 0)
++ gckOS_Print("%s(%d): fail to enable pu regulator %d!\n",
++ __FUNCTION__, __LINE__, ret);
++ }
++#else
++ imx_gpc_power_up_pu(true);
++#endif
++#endif
++
++#ifdef CONFIG_PM
++ pm_runtime_get_sync(priv->pmdev);
++#endif
++ }
++ else
++ {
++#ifdef CONFIG_PM
++ pm_runtime_put_sync(priv->pmdev);
++#endif
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3,15,0)
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) || LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
++ if(!IS_ERR(priv->gpu_regulator))
++ regulator_disable(priv->gpu_regulator);
++#else
++ imx_gpc_power_up_pu(false);
++#endif
++#endif
++
++ }
++
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++_SetClock(
++ IN gckPLATFORM Platform,
++ IN gceCORE GPU,
++ IN gctBOOL Enable
++ )
++{
++ struct imx_priv* priv = Platform->priv;
++ struct clk *clk_3dcore = priv->clk_3d_core;
++ struct clk *clk_3dshader = priv->clk_3d_shader;
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0)
++ struct clk *clk_3d_axi = priv->clk_3d_axi;
++#endif
++ struct clk *clk_2dcore = priv->clk_2d_core;
++ struct clk *clk_2d_axi = priv->clk_2d_axi;
++ struct clk *clk_vg_axi = priv->clk_vg_axi;
++
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0)
++ if (Enable) {
++ switch (GPU) {
++ case gcvCORE_MAJOR:
++ clk_enable(clk_3dcore);
++ if (cpu_is_mx6q())
++ clk_enable(clk_3dshader);
++ break;
++ case gcvCORE_2D:
++ clk_enable(clk_2dcore);
++ clk_enable(clk_2d_axi);
++ break;
++ case gcvCORE_VG:
++ clk_enable(clk_2dcore);
++ clk_enable(clk_vg_axi);
++ break;
++ default:
++ break;
++ }
++ } else {
++ switch (GPU) {
++ case gcvCORE_MAJOR:
++ if (cpu_is_mx6q())
++ clk_disable(clk_3dshader);
++ clk_disable(clk_3dcore);
++ break;
++ case gcvCORE_2D:
++ clk_disable(clk_2dcore);
++ clk_disable(clk_2d_axi);
++ break;
++ case gcvCORE_VG:
++ clk_disable(clk_2dcore);
++ clk_disable(clk_vg_axi);
++ break;
++ default:
++ break;
++ }
++ }
++#else
++ if (Enable) {
++ switch (GPU) {
++ case gcvCORE_MAJOR:
++ clk_prepare(clk_3dcore);
++ clk_enable(clk_3dcore);
++ clk_prepare(clk_3dshader);
++ clk_enable(clk_3dshader);
++ clk_prepare(clk_3d_axi);
++ clk_enable(clk_3d_axi);
++ break;
++ case gcvCORE_2D:
++ clk_prepare(clk_2dcore);
++ clk_enable(clk_2dcore);
++ clk_prepare(clk_2d_axi);
++ clk_enable(clk_2d_axi);
++ break;
++ case gcvCORE_VG:
++ clk_prepare(clk_2dcore);
++ clk_enable(clk_2dcore);
++ clk_prepare(clk_vg_axi);
++ clk_enable(clk_vg_axi);
++ break;
++ default:
++ break;
++ }
++ } else {
++ switch (GPU) {
++ case gcvCORE_MAJOR:
++ clk_disable(clk_3dshader);
++ clk_unprepare(clk_3dshader);
++ clk_disable(clk_3dcore);
++ clk_unprepare(clk_3dcore);
++ clk_disable(clk_3d_axi);
++ clk_unprepare(clk_3d_axi);
++ break;
++ case gcvCORE_2D:
++ clk_disable(clk_2dcore);
++ clk_unprepare(clk_2dcore);
++ clk_disable(clk_2d_axi);
++ clk_unprepare(clk_2d_axi);
++ break;
++ case gcvCORE_VG:
++ clk_disable(clk_2dcore);
++ clk_unprepare(clk_2dcore);
++ clk_disable(clk_vg_axi);
++ clk_unprepare(clk_vg_axi);
++ break;
++ default:
++ break;
++ }
++ }
++#endif
++
++ return gcvSTATUS_OK;
++}
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0)
++#ifdef CONFIG_PM
++static int gpu_runtime_suspend(struct device *dev)
++{
++ release_bus_freq(BUS_FREQ_HIGH);
++ return 0;
++}
++
++static int gpu_runtime_resume(struct device *dev)
++{
++ request_bus_freq(BUS_FREQ_HIGH);
++ return 0;
++}
++
++static struct dev_pm_ops gpu_pm_ops;
++#endif
++#endif
++
++gceSTATUS
++_AdjustDriver(
++ IN gckPLATFORM Platform
++ )
++{
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0)
++ struct platform_driver * driver = Platform->driver;
++#endif
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0)
++ driver->driver.of_match_table = mxs_gpu_dt_ids;
++#endif
++
++ /* Override PM callbacks to add runtime PM callbacks. */
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0)
++ /* Fill local structure with original value. */
++ memcpy(&gpu_pm_ops, driver->driver.pm, sizeof(struct dev_pm_ops));
++
++ /* Add runtime PM callback. */
++#ifdef CONFIG_PM_RUNTIME
++ gpu_pm_ops.runtime_suspend = gpu_runtime_suspend;
++ gpu_pm_ops.runtime_resume = gpu_runtime_resume;
++ gpu_pm_ops.runtime_idle = NULL;
++#endif
++
++ /* Replace callbacks. */
++ driver->driver.pm = &gpu_pm_ops;
++#endif
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++_Reset(
++ IN gckPLATFORM Platform,
++ gceCORE GPU
++ )
++{
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0)
++#define SRC_SCR_OFFSET 0
++#define BP_SRC_SCR_GPU3D_RST 1
++#define BP_SRC_SCR_GPU2D_RST 4
++ void __iomem *src_base = IO_ADDRESS(SRC_BASE_ADDR);
++ gctUINT32 bit_offset,val;
++
++ if(GPU == gcvCORE_MAJOR) {
++ bit_offset = BP_SRC_SCR_GPU3D_RST;
++ } else if((GPU == gcvCORE_VG)
++ ||(GPU == gcvCORE_2D)) {
++ bit_offset = BP_SRC_SCR_GPU2D_RST;
++ } else {
++ return gcvSTATUS_INVALID_CONFIG;
++ }
++ val = __raw_readl(src_base + SRC_SCR_OFFSET);
++ val &= ~(1 << (bit_offset));
++ val |= (1 << (bit_offset));
++ __raw_writel(val, src_base + SRC_SCR_OFFSET);
++
++ while ((__raw_readl(src_base + SRC_SCR_OFFSET) &
++ (1 << (bit_offset))) != 0) {
++ }
++
++ return gcvSTATUS_NOT_SUPPORTED;
++#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
++ struct imx_priv* priv = Platform->priv;
++ struct reset_control *rstc = priv->rstc[GPU];
++ if (rstc)
++ reset_control_reset(rstc);
++#else
++ imx_src_reset_gpu((int)GPU);
++#endif
++ return gcvSTATUS_OK;
++}
++
++gcsPLATFORM_OPERATIONS platformOperations = {
++ .adjustParam = gckPLATFORM_AdjustParam,
++ .allocPriv = _AllocPriv,
++ .freePriv = _FreePriv,
++ .getPower = _GetPower,
++ .putPower = _PutPower,
++ .setPower = _SetPower,
++ .setClock = _SetClock,
++ .adjustDriver = _AdjustDriver,
++ .reset = _Reset,
++#ifdef CONFIG_GPU_LOW_MEMORY_KILLER
++ .shrinkMemory = _ShrinkMemory,
++#endif
++};
++
++void
++gckPLATFORM_QueryOperations(
++ IN gcsPLATFORM_OPERATIONS ** Operations
++ )
++{
++ *Operations = &platformOperations;
++}
++
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/platform/freescale/gc_hal_kernel_platform_imx6q14.config linux-openelec/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/platform/freescale/gc_hal_kernel_platform_imx6q14.config
+--- linux-3.14.36/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/platform/freescale/gc_hal_kernel_platform_imx6q14.config 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/platform/freescale/gc_hal_kernel_platform_imx6q14.config 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,15 @@
++EXTRA_CFLAGS += -DgcdDEFAULT_CONTIGUOUS_SIZE=134217728
++
++ifneq ($(CONFIG_ANDROID),)
++# build for android
++EXTRA_CFLAGS += -DgcdANDROID_NATIVE_FENCE_SYNC=3
++
++ifeq ($(CONFIG_SYNC),)
++$(warn CONFIG_SYNC is not set in kernel config)
++$(warn Android native fence sync needs CONFIG_SYNC)
++endif
++endif
++
++EXTRA_CFLAGS += -DLINUX_CMA_FSL=1
++ALLOCATOR_ARRAY_H_LOCATION := $(OS_KERNEL_DIR)/allocator/freescale
++CUSTOMER_ALLOCATOR_OBJS := $(ALLOCATOR_ARRAY_H_LOCATION)/gc_hal_kernel_allocator_cma.o
+diff -Nur linux-3.14.36/drivers/mxc/gpu-viv/v5/Kbuild linux-openelec/drivers/mxc/gpu-viv/v5/Kbuild
+--- linux-3.14.36/drivers/mxc/gpu-viv/v5/Kbuild 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/gpu-viv/v5/Kbuild 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,272 @@
++##############################################################################
++#
++# Copyright (C) 2005 - 2014 by Vivante Corp.
++#
++# This program is free software; you can redistribute it and/or modify
++# it under the terms of the GNU General Public License as published by
++# the Free Software Foundation; either version 2 of the license, or
++# (at your option) any later version.
++#
++# This program is distributed in the hope that it will be useful,
++# but WITHOUT ANY WARRANTY; without even the implied warranty of
++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++# GNU General Public License for more details.
++#
++# You should have received a copy of the GNU General Public License
++# along with this program; if not write to the Free Software
++# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++#
++##############################################################################
++
++
++#
++# Linux build file for kernel HAL driver.
++#
++
++AQROOT := $(srctree)/drivers/mxc/gpu-viv/v5
++
++include $(AQROOT)/config
++
++KERNEL_DIR ?= $(TOOL_DIR)/kernel
++
++OS_KERNEL_DIR := hal/os/linux/kernel
++ARCH_KERNEL_DIR := hal/kernel/arch
++ARCH_VG_KERNEL_DIR := hal/kernel/archvg
++HAL_KERNEL_DIR := hal/kernel
++
++# Check and include platform config.
++ifneq ($(PLATFORM),)
++
++# Get platform config path.
++PLATFORM_CONFIG ?= $(AQROOT)/$(OS_KERNEL_DIR)/platform/$(PLATFORM).config
++
++# Check whether it exists.
++PLATFORM_CONFIG := $(wildcard $(PLATFORM_CONFIG))
++
++# Include it if exists.
++ifneq ($(PLATFORM_CONFIG),)
++include $(PLATFORM_CONFIG)
++endif
++
++endif
++
++MODULE_NAME ?= galcore
++CUSTOMER_ALLOCATOR_OBJS ?=
++ALLOCATOR_ARRAY_H_LOCATION ?= $(OS_KERNEL_DIR)/allocator/default/
++
++EXTRA_CFLAGS += -Werror
++
++OBJS := $(OS_KERNEL_DIR)/gc_hal_kernel_device.o \
++ $(OS_KERNEL_DIR)/gc_hal_kernel_linux.o \
++ $(OS_KERNEL_DIR)/gc_hal_kernel_math.o \
++ $(OS_KERNEL_DIR)/gc_hal_kernel_os.o \
++ $(OS_KERNEL_DIR)/gc_hal_kernel_debugfs.o \
++ $(OS_KERNEL_DIR)/gc_hal_kernel_allocator.o \
++
++ifneq ($(CONFIG_IOMMU_SUPPORT),)
++OBJS += $(OS_KERNEL_DIR)/gc_hal_kernel_iommu.o
++endif
++
++ifneq ($(PLATFORM),)
++OBJS += $(OS_KERNEL_DIR)/gc_hal_kernel_probe.o
++OBJS += $(OS_KERNEL_DIR)/platform/$(PLATFORM).o
++else
++OBJS += $(OS_KERNEL_DIR)/gc_hal_kernel_driver.o
++endif
++
++OBJS += $(HAL_KERNEL_DIR)/gc_hal_kernel.o \
++ $(HAL_KERNEL_DIR)/gc_hal_kernel_command.o \
++ $(HAL_KERNEL_DIR)/gc_hal_kernel_db.o \
++ $(HAL_KERNEL_DIR)/gc_hal_kernel_debug.o \
++ $(HAL_KERNEL_DIR)/gc_hal_kernel_event.o \
++ $(HAL_KERNEL_DIR)/gc_hal_kernel_heap.o \
++ $(HAL_KERNEL_DIR)/gc_hal_kernel_mmu.o \
++ $(HAL_KERNEL_DIR)/gc_hal_kernel_video_memory.o \
++ $(HAL_KERNEL_DIR)/gc_hal_kernel_power.o
++
++OBJS += $(ARCH_KERNEL_DIR)/gc_hal_kernel_context.o \
++ $(ARCH_KERNEL_DIR)/gc_hal_kernel_hardware.o
++
++ifeq ($(VIVANTE_ENABLE_3D), 1)
++OBJS += $(ARCH_KERNEL_DIR)/gc_hal_kernel_recorder.o
++endif
++
++ifeq ($(VIVANTE_ENABLE_VG), 1)
++OBJS +=\
++ $(HAL_KERNEL_DIR)/gc_hal_kernel_vg.o\
++ $(HAL_KERNEL_DIR)/gc_hal_kernel_command_vg.o\
++ $(HAL_KERNEL_DIR)/gc_hal_kernel_interrupt_vg.o\
++ $(HAL_KERNEL_DIR)/gc_hal_kernel_mmu_vg.o\
++ $(ARCH_VG_KERNEL_DIR)/gc_hal_kernel_hardware_command_vg.o\
++ $(ARCH_VG_KERNEL_DIR)/gc_hal_kernel_hardware_vg.o
++endif
++
++ifneq ($(CONFIG_SYNC),)
++EXTRA_CFLAGS += -Idrivers/staging/android
++
++OBJS += $(OS_KERNEL_DIR)/gc_hal_kernel_sync.o
++endif
++
++ifeq ($(SECURITY), 1)
++OBJS += $(OS_KERNEL_DIR)/gc_hal_kernel_security_channel.o \
++ $(HAL_KERNEL_DIR)/gc_hal_kernel_security.o
++endif
++
++ifneq ($(CUSTOMER_ALLOCATOR_OBJS),)
++OBJS += $(CUSTOMER_ALLOCATOR_OBJS)
++endif
++
++ifeq ($(KERNELRELEASE), )
++
++.PHONY: all clean install
++
++# Define targets.
++all:
++ @make V=$(V) ARCH=$(ARCH_TYPE) -C $(KERNEL_DIR) SUBDIRS=`pwd` modules
++
++clean:
++ @rm -rf $(OBJS)
++ @rm -rf modules.order Module.symvers
++ @find $(AQROOT) -name ".gc_*.cmd" | xargs rm -f
++
++install: all
++ @mkdir -p $(SDK_DIR)/drivers
++
++else
++
++
++EXTRA_CFLAGS += -DLINUX -DDRIVER
++
++ifeq ($(FLAREON),1)
++EXTRA_CFLAGS += -DFLAREON
++endif
++
++ifeq ($(DEBUG), 1)
++EXTRA_CFLAGS += -DDBG=1 -DDEBUG -D_DEBUG
++else
++EXTRA_CFLAGS += -DDBG=0
++endif
++
++ifeq ($(NO_DMA_COHERENT), 1)
++EXTRA_CFLAGS += -DNO_DMA_COHERENT
++endif
++
++ifeq ($(CONFIG_DOVE_GPU), 1)
++EXTRA_CFLAGS += -DCONFIG_DOVE_GPU=1
++endif
++
++ifneq ($(USE_PLATFORM_DRIVER), 0)
++EXTRA_CFLAGS += -DUSE_PLATFORM_DRIVER=1
++else
++EXTRA_CFLAGS += -DUSE_PLATFORM_DRIVER=0
++endif
++
++EXTRA_CFLAGS += -DVIVANTE_PROFILER=1
++EXTRA_CFLAGS += -DVIVANTE_PROFILER_CONTEXT=1
++
++ifeq ($(ENABLE_GPU_CLOCK_BY_DRIVER), 1)
++EXTRA_CFLAGS += -DENABLE_GPU_CLOCK_BY_DRIVER=1
++else
++EXTRA_CFLAGS += -DENABLE_GPU_CLOCK_BY_DRIVER=0
++endif
++
++ifeq ($(USE_NEW_LINUX_SIGNAL), 1)
++EXTRA_CFLAGS += -DUSE_NEW_LINUX_SIGNAL=1
++else
++EXTRA_CFLAGS += -DUSE_NEW_LINUX_SIGNAL=0
++endif
++
++ifeq ($(FORCE_ALL_VIDEO_MEMORY_CACHED), 1)
++EXTRA_CFLAGS += -DgcdPAGED_MEMORY_CACHEABLE=1
++else
++EXTRA_CFLAGS += -DgcdPAGED_MEMORY_CACHEABLE=0
++endif
++
++ifeq ($(NONPAGED_MEMORY_CACHEABLE), 1)
++EXTRA_CFLAGS += -DgcdNONPAGED_MEMORY_CACHEABLE=1
++else
++EXTRA_CFLAGS += -DgcdNONPAGED_MEMORY_CACHEABLE=0
++endif
++
++ifeq ($(NONPAGED_MEMORY_BUFFERABLE), 1)
++EXTRA_CFLAGS += -DgcdNONPAGED_MEMORY_BUFFERABLE=1
++else
++EXTRA_CFLAGS += -DgcdNONPAGED_MEMORY_BUFFERABLE=0
++endif
++
++ifeq ($(CACHE_FUNCTION_UNIMPLEMENTED), 1)
++EXTRA_CFLAGS += -DgcdCACHE_FUNCTION_UNIMPLEMENTED=1
++else
++EXTRA_CFLAGS += -DgcdCACHE_FUNCTION_UNIMPLEMENTED=0
++endif
++
++ifeq ($(CONFIG_SMP), y)
++EXTRA_CFLAGS += -DgcdSMP=1
++else
++EXTRA_CFLAGS += -DgcdSMP=0
++endif
++
++ifeq ($(VIVANTE_ENABLE_3D),0)
++EXTRA_CFLAGS += -DgcdENABLE_3D=0
++else
++EXTRA_CFLAGS += -DgcdENABLE_3D=1
++endif
++
++ifeq ($(VIVANTE_ENABLE_2D),0)
++EXTRA_CFLAGS += -DgcdENABLE_2D=0
++else
++EXTRA_CFLAGS += -DgcdENABLE_2D=1
++endif
++
++ifeq ($(VIVANTE_ENABLE_VG),0)
++EXTRA_CFLAGS += -DgcdENABLE_VG=0
++else
++EXTRA_CFLAGS += -DgcdENABLE_VG=1
++endif
++
++ifeq ($(ENABLE_OUTER_CACHE_PATCH), 1)
++EXTRA_CFLAGS += -DgcdENABLE_OUTER_CACHE_PATCH=1
++else
++EXTRA_CFLAGS += -DgcdENABLE_OUTER_CACHE_PATCH=0
++endif
++
++ifeq ($(USE_BANK_ALIGNMENT), 1)
++ EXTRA_CFLAGS += -DgcdENABLE_BANK_ALIGNMENT=1
++ ifneq ($(BANK_BIT_START), 0)
++ ifneq ($(BANK_BIT_END), 0)
++ EXTRA_CFLAGS += -DgcdBANK_BIT_START=$(BANK_BIT_START)
++ EXTRA_CFLAGS += -DgcdBANK_BIT_END=$(BANK_BIT_END)
++ endif
++ endif
++
++ ifneq ($(BANK_CHANNEL_BIT), 0)
++ EXTRA_CFLAGS += -DgcdBANK_CHANNEL_BIT=$(BANK_CHANNEL_BIT)
++ endif
++endif
++
++ifeq ($(gcdFPGA_BUILD), 1)
++EXTRA_CFLAGS += -DgcdFPGA_BUILD=1
++else
++EXTRA_CFLAGS += -DgcdFPGA_BUILD=0
++endif
++
++ifeq ($(SECURITY), 1)
++EXTRA_CFLAGS += -DgcdSECURITY=1
++endif
++
++EXTRA_CFLAGS += -I$(AQROOT)/hal/kernel/inc
++EXTRA_CFLAGS += -I$(AQROOT)/hal/kernel
++EXTRA_CFLAGS += -I$(AQROOT)/hal/kernel/arch
++EXTRA_CFLAGS += -I$(AQROOT)/hal/kernel/inc
++EXTRA_CFLAGS += -I$(AQROOT)/hal/os/linux/kernel
++EXTRA_CFLAGS += -I$(AQROOT)/$(ALLOCATOR_ARRAY_H_LOCATION)
++
++ifeq ($(VIVANTE_ENABLE_VG), 1)
++EXTRA_CFLAGS += -I$(AQROOT)/hal/kernel/archvg
++endif
++
++obj-$(CONFIG_MXC_GPU_VIV) += galcore.o
++
++galcore-objs := $(OBJS)
++
++endif
+diff -Nur linux-3.14.36/drivers/mxc/hdmi-cec/Kconfig linux-openelec/drivers/mxc/hdmi-cec/Kconfig
+--- linux-3.14.36/drivers/mxc/hdmi-cec/Kconfig 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/hdmi-cec/Kconfig 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,11 @@
++
++menu "MXC HDMI CEC (Consumer Electronics Control) support"
++
++config MXC_HDMI_CEC
++ tristate "Support for MXC HDMI CEC (Consumer Electronics Control)"
++ depends on MFD_MXC_HDMI
++ depends on FB_MXC_HDMI
++ help
++ The HDMI CEC device implement low level protocol on i.MX6x platforms.
++
++endmenu
+diff -Nur linux-3.14.36/drivers/mxc/hdmi-cec/Makefile linux-openelec/drivers/mxc/hdmi-cec/Makefile
+--- linux-3.14.36/drivers/mxc/hdmi-cec/Makefile 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/hdmi-cec/Makefile 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1 @@
++obj-$(CONFIG_MXC_HDMI_CEC) += mxc_hdmi-cec.o
+diff -Nur linux-3.14.36/drivers/mxc/hdmi-cec/mxc_hdmi-cec.c linux-openelec/drivers/mxc/hdmi-cec/mxc_hdmi-cec.c
+--- linux-3.14.36/drivers/mxc/hdmi-cec/mxc_hdmi-cec.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/hdmi-cec/mxc_hdmi-cec.c 2015-07-24 18:03:30.348842002 -0500
+@@ -0,0 +1,608 @@
++/*
++ * Copyright (C) 2012-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/*!
++ * @file mxc_hdmi-cec.c
++ *
++ * @brief HDMI CEC system initialization and file operation implementation
++ *
++ * @ingroup HDMI
++ */
++
++#include <linux/module.h>
++#include <linux/kernel.h>
++#include <linux/mm.h>
++#include <linux/fs.h>
++#include <linux/stat.h>
++#include <linux/platform_device.h>
++#include <linux/poll.h>
++#include <linux/wait.h>
++#include <linux/list.h>
++#include <linux/delay.h>
++#include <linux/fsl_devices.h>
++#include <linux/uaccess.h>
++#include <linux/io.h>
++#include <linux/slab.h>
++#include <linux/vmalloc.h>
++#include <linux/workqueue.h>
++#include <linux/sizes.h>
++
++#include <linux/console.h>
++#include <linux/types.h>
++#include <linux/mfd/mxc-hdmi-core.h>
++#include <linux/pinctrl/consumer.h>
++
++#include <video/mxc_hdmi.h>
++
++#include "mxc_hdmi-cec.h"
++
++
++#define MAX_MESSAGE_LEN 17
++
++#define MESSAGE_TYPE_RECEIVE_SUCCESS 1
++#define MESSAGE_TYPE_NOACK 2
++#define MESSAGE_TYPE_DISCONNECTED 3
++#define MESSAGE_TYPE_CONNECTED 4
++#define MESSAGE_TYPE_SEND_SUCCESS 5
++
++#define CEC_TX_INPROGRESS -1
++#define CEC_TX_AVAIL 0
++
++struct hdmi_cec_priv {
++ int receive_error;
++ int send_error;
++ u8 Logical_address;
++ bool cec_state;
++ u8 last_msg[MAX_MESSAGE_LEN];
++ u8 msg_len;
++ int tx_answer;
++ u16 latest_cec_stat;
++ spinlock_t irq_lock;
++ struct delayed_work hdmi_cec_work;
++ struct mutex lock;
++};
++
++struct hdmi_cec_event {
++ int event_type;
++ int msg_len;
++ u8 msg[MAX_MESSAGE_LEN];
++ struct list_head list;
++};
++
++
++static LIST_HEAD(head);
++
++static int hdmi_cec_major;
++static struct class *hdmi_cec_class;
++static struct hdmi_cec_priv hdmi_cec_data;
++static u8 open_count;
++
++static wait_queue_head_t hdmi_cec_queue;
++static wait_queue_head_t tx_cec_queue;
++
++static irqreturn_t mxc_hdmi_cec_isr(int irq, void *data)
++{
++ struct hdmi_cec_priv *hdmi_cec = data;
++ u16 cec_stat = 0;
++ unsigned long flags;
++ irqreturn_t ret = IRQ_HANDLED;
++
++ spin_lock_irqsave(&hdmi_cec->irq_lock, flags);
++
++ hdmi_writeb(0x7f, HDMI_IH_MUTE_CEC_STAT0);
++
++ cec_stat = hdmi_readb(HDMI_IH_CEC_STAT0);
++ hdmi_writeb(cec_stat, HDMI_IH_CEC_STAT0);
++
++ if ((cec_stat & (HDMI_IH_CEC_STAT0_ERROR_INIT | \
++ HDMI_IH_CEC_STAT0_NACK | HDMI_IH_CEC_STAT0_EOM | \
++ HDMI_IH_CEC_STAT0_DONE)) == 0) {
++ ret = IRQ_NONE;
++ cec_stat = 0;
++ }
++
++ pr_debug("HDMI CEC interrupt received\n");
++ hdmi_cec->latest_cec_stat = cec_stat ;
++
++ schedule_delayed_work(&(hdmi_cec->hdmi_cec_work), msecs_to_jiffies(20));
++
++ spin_unlock_irqrestore(&hdmi_cec->irq_lock, flags);
++
++ return ret;
++}
++
++void mxc_hdmi_cec_handle(u16 cec_stat)
++{
++ u8 val = 0, i = 0;
++ struct hdmi_cec_event *event = NULL;
++ /*The current transmission is successful (for initiator only).*/
++ if (!open_count)
++ return;
++
++ if (cec_stat & HDMI_IH_CEC_STAT0_DONE) {
++ hdmi_cec_data.tx_answer = cec_stat;
++ wake_up(&tx_cec_queue);
++ }
++ /*EOM is detected so that the received data is ready in the receiver data buffer*/
++ if (cec_stat & HDMI_IH_CEC_STAT0_EOM) {
++ hdmi_writeb(0x02, HDMI_IH_CEC_STAT0);
++ event = vmalloc(sizeof(struct hdmi_cec_event));
++ if (NULL == event) {
++ pr_err("%s: Not enough memory!\n", __func__);
++ return;
++ }
++ memset(event, 0, sizeof(struct hdmi_cec_event));
++ event->msg_len = hdmi_readb(HDMI_CEC_RX_CNT);
++ if (!event->msg_len) {
++ pr_err("%s: Invalid CEC message length!\n", __func__);
++ return;
++ }
++ event->event_type = MESSAGE_TYPE_RECEIVE_SUCCESS;
++ for (i = 0; i < event->msg_len; i++)
++ event->msg[i] = hdmi_readb(HDMI_CEC_RX_DATA0+i);
++ hdmi_writeb(0x0, HDMI_CEC_LOCK);
++ mutex_lock(&hdmi_cec_data.lock);
++ list_add_tail(&event->list, &head);
++ mutex_unlock(&hdmi_cec_data.lock);
++ wake_up(&hdmi_cec_queue);
++ }
++ /*An error is detected on cec line (for initiator only). */
++ if (cec_stat & HDMI_IH_CEC_STAT0_ERROR_INIT) {
++ mutex_lock(&hdmi_cec_data.lock);
++ hdmi_cec_data.send_error++;
++ if (hdmi_cec_data.send_error > 2) {
++ pr_err("%s:Re-transmission is attempted more than 2 times!\n", __func__);
++ hdmi_cec_data.send_error = 0;
++ mutex_unlock(&hdmi_cec_data.lock);
++ hdmi_cec_data.tx_answer = cec_stat;
++ wake_up(&tx_cec_queue);
++ return;
++ }
++ for (i = 0; i < hdmi_cec_data.msg_len; i++)
++ hdmi_writeb(hdmi_cec_data.last_msg[i], HDMI_CEC_TX_DATA0+i);
++ hdmi_writeb(hdmi_cec_data.msg_len, HDMI_CEC_TX_CNT);
++ val = hdmi_readb(HDMI_CEC_CTRL);
++ val |= 0x01;
++ hdmi_writeb(val, HDMI_CEC_CTRL);
++ mutex_unlock(&hdmi_cec_data.lock);
++ }
++ /*A frame is not acknowledged in a directly addressed message. Or a frame is negatively acknowledged in
++ a broadcast message (for initiator only).*/
++ if (cec_stat & HDMI_IH_CEC_STAT0_NACK) {
++ hdmi_cec_data.tx_answer = cec_stat;
++ wake_up(&tx_cec_queue);
++ }
++ /*An error is notified by a follower. Abnormal logic data bit error (for follower).*/
++ if (cec_stat & HDMI_IH_CEC_STAT0_ERROR_FOLL) {
++ hdmi_cec_data.receive_error++;
++ }
++ /*HDMI cable connected*/
++ if (cec_stat & 0x80) {
++ pr_info("HDMI link connected\n");
++ event = vmalloc(sizeof(struct hdmi_cec_event));
++ if (NULL == event) {
++ pr_err("%s: Not enough memory\n", __func__);
++ return;
++ }
++ memset(event, 0, sizeof(struct hdmi_cec_event));
++ event->event_type = MESSAGE_TYPE_CONNECTED;
++ mutex_lock(&hdmi_cec_data.lock);
++ list_add_tail(&event->list, &head);
++ mutex_unlock(&hdmi_cec_data.lock);
++ wake_up(&hdmi_cec_queue);
++ }
++ /*HDMI cable disconnected*/
++ if (cec_stat & 0x100) {
++ pr_info("HDMI link disconnected\n");
++ event = vmalloc(sizeof(struct hdmi_cec_event));
++ if (NULL == event) {
++ pr_err("%s: Not enough memory!\n", __func__);
++ return;
++ }
++ memset(event, 0, sizeof(struct hdmi_cec_event));
++ event->event_type = MESSAGE_TYPE_DISCONNECTED;
++ mutex_lock(&hdmi_cec_data.lock);
++ list_add_tail(&event->list, &head);
++ mutex_unlock(&hdmi_cec_data.lock);
++ wake_up(&hdmi_cec_queue);
++ }
++ return;
++}
++EXPORT_SYMBOL(mxc_hdmi_cec_handle);
++static void mxc_hdmi_cec_worker(struct work_struct *work)
++{
++ u8 val;
++ mxc_hdmi_cec_handle(hdmi_cec_data.latest_cec_stat);
++ val = HDMI_IH_CEC_STAT0_WAKEUP | HDMI_IH_CEC_STAT0_ERROR_FOLL | HDMI_IH_CEC_STAT0_ARB_LOST;
++ hdmi_writeb(val, HDMI_IH_MUTE_CEC_STAT0);
++}
++
++/*!
++ * @brief open function for cec file operation
++ *
++ * @return 0 on success or negative error code on error
++ */
++static int hdmi_cec_open(struct inode *inode, struct file *filp)
++{
++ mutex_lock(&hdmi_cec_data.lock);
++ if (open_count) {
++ mutex_unlock(&hdmi_cec_data.lock);
++ return -EBUSY;
++ }
++ open_count = 1;
++ filp->private_data = (void *)(&hdmi_cec_data);
++ hdmi_cec_data.Logical_address = 15;
++ hdmi_cec_data.cec_state = false;
++ mutex_unlock(&hdmi_cec_data.lock);
++ return 0;
++}
++
++static ssize_t hdmi_cec_read(struct file *file, char __user *buf, size_t count,
++ loff_t *ppos)
++{
++ struct hdmi_cec_event *event = NULL;
++ pr_debug("function : %s\n", __func__);
++
++ if (!open_count)
++ return -ENODEV;
++ mutex_lock(&hdmi_cec_data.lock);
++ if (false == hdmi_cec_data.cec_state) {
++ mutex_unlock(&hdmi_cec_data.lock);
++ return -EACCES;
++ }
++
++ if (list_empty(&head)) {
++ if (file->f_flags & O_NONBLOCK) {
++ mutex_unlock(&hdmi_cec_data.lock);
++ return -EAGAIN;
++ } else {
++ do {
++ mutex_unlock(&hdmi_cec_data.lock);
++ if (wait_event_interruptible(hdmi_cec_queue, (!list_empty(&head))))
++ return -ERESTARTSYS;
++ mutex_lock(&hdmi_cec_data.lock);
++ } while (list_empty(&head));
++ }
++ }
++
++ event = list_first_entry(&head, struct hdmi_cec_event, list);
++ list_del(&event->list);
++ mutex_unlock(&hdmi_cec_data.lock);
++ if (copy_to_user(buf, event,
++ sizeof(struct hdmi_cec_event) - sizeof(struct list_head))) {
++ vfree(event);
++ return -EFAULT;
++ }
++ vfree(event);
++ return (sizeof(struct hdmi_cec_event) - sizeof(struct list_head));
++}
++
++static ssize_t hdmi_cec_write(struct file *file, const char __user *buf,
++ size_t count, loff_t *ppos)
++{
++ int ret = 0 , i = 0;
++ u8 msg[MAX_MESSAGE_LEN];
++ u8 msg_len = 0, val = 0;
++
++ pr_debug("function : %s\n", __func__);
++
++ if (!open_count)
++ return -ENODEV;
++ mutex_lock(&hdmi_cec_data.lock);
++ if (false == hdmi_cec_data.cec_state) {
++ mutex_unlock(&hdmi_cec_data.lock);
++ return -EACCES;
++ }
++ /* Ensure that there is only one writer who is the unique listener of tx_cec_queue */
++ if (hdmi_cec_data.tx_answer != CEC_TX_AVAIL) {
++ mutex_unlock(&hdmi_cec_data.lock);
++ return -EBUSY;
++ }
++ mutex_unlock(&hdmi_cec_data.lock);
++ if (count > MAX_MESSAGE_LEN)
++ return -EINVAL;
++ memset(&msg, 0, MAX_MESSAGE_LEN);
++ ret = copy_from_user(&msg, buf, count);
++ if (ret)
++ return -EACCES;
++ mutex_lock(&hdmi_cec_data.lock);
++ hdmi_cec_data.send_error = 0;
++ hdmi_cec_data.tx_answer = CEC_TX_INPROGRESS;
++ msg_len = count;
++ hdmi_writeb(msg_len, HDMI_CEC_TX_CNT);
++ for (i = 0; i < msg_len; i++)
++ hdmi_writeb(msg[i], HDMI_CEC_TX_DATA0+i);
++ val = hdmi_readb(HDMI_CEC_CTRL);
++ val |= 0x01;
++ hdmi_writeb(val, HDMI_CEC_CTRL);
++ memcpy(hdmi_cec_data.last_msg, msg, msg_len);
++ hdmi_cec_data.msg_len = msg_len;
++ mutex_unlock(&hdmi_cec_data.lock);
++
++ ret = wait_event_interruptible_timeout(tx_cec_queue, hdmi_cec_data.tx_answer != CEC_TX_INPROGRESS, HZ);
++
++ if (ret < 0) {
++ ret = -ERESTARTSYS;
++ goto tx_out;
++ }
++
++ if (hdmi_cec_data.tx_answer & HDMI_IH_CEC_STAT0_DONE)
++ /* msg correctly sent */
++ ret = msg_len;
++ else
++ ret = -EIO;
++
++ tx_out:
++ hdmi_cec_data.tx_answer = CEC_TX_AVAIL;
++ return ret;
++}
++
++void hdmi_cec_start_device(void)
++{
++ u8 val;
++
++ val = hdmi_readb(HDMI_MC_CLKDIS);
++ val &= ~HDMI_MC_CLKDIS_CECCLK_DISABLE;
++ hdmi_writeb(val, HDMI_MC_CLKDIS);
++ hdmi_writeb(0x02, HDMI_CEC_CTRL);
++ /* Force read unlock */
++ hdmi_writeb(0x0, HDMI_CEC_LOCK);
++ val = HDMI_IH_CEC_STAT0_ERROR_INIT | HDMI_IH_CEC_STAT0_NACK | HDMI_IH_CEC_STAT0_EOM | HDMI_IH_CEC_STAT0_DONE;
++ hdmi_writeb(val, HDMI_CEC_POLARITY);
++ val = HDMI_IH_CEC_STAT0_WAKEUP | HDMI_IH_CEC_STAT0_ERROR_FOLL | HDMI_IH_CEC_STAT0_ARB_LOST;
++ hdmi_writeb(val, HDMI_CEC_MASK);
++ hdmi_writeb(val, HDMI_IH_MUTE_CEC_STAT0);
++ hdmi_cec_data.cec_state = true;
++}
++EXPORT_SYMBOL(hdmi_cec_start_device);
++
++void hdmi_cec_stop_device(void)
++{
++ u8 val;
++
++ hdmi_writeb(0x10, HDMI_CEC_CTRL);
++ val = HDMI_IH_CEC_STAT0_WAKEUP | HDMI_IH_CEC_STAT0_ERROR_FOLL | HDMI_IH_CEC_STAT0_ERROR_INIT | HDMI_IH_CEC_STAT0_ARB_LOST | \
++ HDMI_IH_CEC_STAT0_NACK | HDMI_IH_CEC_STAT0_EOM | HDMI_IH_CEC_STAT0_DONE;
++ hdmi_writeb(val, HDMI_CEC_MASK);
++ hdmi_writeb(val, HDMI_IH_MUTE_CEC_STAT0);
++ hdmi_writeb(0x0, HDMI_CEC_POLARITY);
++ val = hdmi_readb(HDMI_MC_CLKDIS);
++ val |= HDMI_MC_CLKDIS_CECCLK_DISABLE;
++ hdmi_writeb(val, HDMI_MC_CLKDIS);
++ hdmi_cec_data.cec_state = false;
++}
++EXPORT_SYMBOL(hdmi_cec_stop_device);
++
++/*!
++ * @brief IO ctrl function for vpu file operation
++ * @param cmd IO ctrl command
++ * @return 0 on success or negative error code on error
++ */
++static long hdmi_cec_ioctl(struct file *filp, u_int cmd,
++ u_long arg)
++{
++ int ret = 0, status = 0;
++ u8 val = 0, msg = 0;
++ struct mxc_edid_cfg hdmi_edid_cfg;
++ pr_debug("function : %s\n", __func__);
++ if (!open_count)
++ return -ENODEV;
++ switch (cmd) {
++ case HDMICEC_IOC_SETLOGICALADDRESS:
++ mutex_lock(&hdmi_cec_data.lock);
++ if (false == hdmi_cec_data.cec_state) {
++ mutex_unlock(&hdmi_cec_data.lock);
++ pr_err("Trying to set logical address while not started\n");
++ return -EACCES;
++ }
++ hdmi_cec_data.Logical_address = (u8)arg;
++ if (hdmi_cec_data.Logical_address <= 7) {
++ val = 1 << hdmi_cec_data.Logical_address;
++ hdmi_writeb(val, HDMI_CEC_ADDR_L);
++ hdmi_writeb(0, HDMI_CEC_ADDR_H);
++ } else if (hdmi_cec_data.Logical_address > 7 && hdmi_cec_data.Logical_address <= 15) {
++ val = 1 << (hdmi_cec_data.Logical_address - 8);
++ hdmi_writeb(val, HDMI_CEC_ADDR_H);
++ hdmi_writeb(0, HDMI_CEC_ADDR_L);
++ } else
++ ret = -EINVAL;
++ /*Send Polling message with same source and destination address*/
++ if (0 == ret && 15 != hdmi_cec_data.Logical_address) {
++ msg = (hdmi_cec_data.Logical_address << 4)|hdmi_cec_data.Logical_address;
++ hdmi_writeb(1, HDMI_CEC_TX_CNT);
++ hdmi_writeb(msg, HDMI_CEC_TX_DATA0);
++ val = hdmi_readb(HDMI_CEC_CTRL);
++ val |= 0x01;
++ hdmi_writeb(val, HDMI_CEC_CTRL);
++ }
++ mutex_unlock(&hdmi_cec_data.lock);
++ break;
++ case HDMICEC_IOC_STARTDEVICE:
++ hdmi_cec_start_device();
++ break;
++ case HDMICEC_IOC_STOPDEVICE:
++ hdmi_cec_stop_device();
++ break;
++ case HDMICEC_IOC_GETPHYADDRESS:
++ hdmi_get_edid_cfg(&hdmi_edid_cfg);
++ status = copy_to_user((void __user *)arg,
++ &hdmi_edid_cfg.physical_address,
++ 4*sizeof(u8));
++ if (status)
++ ret = -EFAULT;
++ break;
++ default:
++ ret = -EINVAL;
++ break;
++ }
++ return ret;
++}
++
++/*!
++ * @brief Release function for vpu file operation
++ * @return 0 on success or negative error code on error
++ */
++static int hdmi_cec_release(struct inode *inode, struct file *filp)
++{
++ struct hdmi_cec_event *event, *tmp_event;
++ mutex_lock(&hdmi_cec_data.lock);
++ if (open_count) {
++ open_count = 0;
++ hdmi_cec_data.cec_state = false;
++ hdmi_cec_data.Logical_address = 15;
++
++ /* Flush eventual events which have not been read by user space */
++ list_for_each_entry_safe(event, tmp_event, &head, list) {
++ list_del(&event->list);
++ vfree(event);
++ }
++ }
++ mutex_unlock(&hdmi_cec_data.lock);
++
++ return 0;
++}
++
++static unsigned int hdmi_cec_poll(struct file *file, poll_table *wait)
++{
++ unsigned int mask = 0;
++
++ pr_debug("function : %s\n", __func__);
++
++ poll_wait(file, &hdmi_cec_queue, wait);
++
++ mutex_lock(&hdmi_cec_data.lock);
++ if (hdmi_cec_data.tx_answer == CEC_TX_AVAIL)
++ mask = (POLLOUT | POLLWRNORM);
++ if (!list_empty(&head))
++ mask |= (POLLIN | POLLRDNORM);
++ mutex_unlock(&hdmi_cec_data.lock);
++ return mask;
++}
++
++
++const struct file_operations hdmi_cec_fops = {
++ .owner = THIS_MODULE,
++ .read = hdmi_cec_read,
++ .write = hdmi_cec_write,
++ .open = hdmi_cec_open,
++ .unlocked_ioctl = hdmi_cec_ioctl,
++ .release = hdmi_cec_release,
++ .poll = hdmi_cec_poll,
++};
++
++static int hdmi_cec_dev_probe(struct platform_device *pdev)
++{
++ int err = 0;
++ struct device *temp_class;
++ struct resource *res;
++ struct pinctrl *pinctrl;
++ int irq = platform_get_irq(pdev, 0);
++
++ hdmi_cec_major = register_chrdev(hdmi_cec_major, "mxc_hdmi_cec", &hdmi_cec_fops);
++ if (hdmi_cec_major < 0) {
++ dev_err(&pdev->dev, "hdmi_cec: unable to get a major for HDMI CEC\n");
++ err = -EBUSY;
++ goto out;
++ }
++
++ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
++ if (unlikely(res == NULL)) {
++ dev_err(&pdev->dev, "hdmi_cec:No HDMI irq line provided\n");
++ goto err_out_chrdev;
++ }
++ spin_lock_init(&hdmi_cec_data.irq_lock);
++
++ err = devm_request_irq(&pdev->dev, irq, mxc_hdmi_cec_isr, IRQF_SHARED,
++ dev_name(&pdev->dev), &hdmi_cec_data);
++ if (err < 0) {
++ dev_err(&pdev->dev, "hdmi_cec:Unable to request irq: %d\n", err);
++ goto err_out_chrdev;
++ }
++
++ hdmi_cec_class = class_create(THIS_MODULE, "mxc_hdmi_cec");
++ if (IS_ERR(hdmi_cec_class)) {
++ err = PTR_ERR(hdmi_cec_class);
++ goto err_out_chrdev;
++ }
++
++ temp_class = device_create(hdmi_cec_class, NULL, MKDEV(hdmi_cec_major, 0),
++ NULL, "mxc_hdmi_cec");
++ if (IS_ERR(temp_class)) {
++ err = PTR_ERR(temp_class);
++ goto err_out_class;
++ }
++
++ pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
++ if (IS_ERR(pinctrl)) {
++ dev_err(&pdev->dev, "can't get/select CEC pinctrl\n");
++ goto err_out_class;
++ }
++
++ init_waitqueue_head(&hdmi_cec_queue);
++ init_waitqueue_head(&tx_cec_queue);
++
++ INIT_LIST_HEAD(&head);
++
++ mutex_init(&hdmi_cec_data.lock);
++ hdmi_cec_data.Logical_address = 15;
++ hdmi_cec_data.tx_answer = CEC_TX_AVAIL;
++ platform_set_drvdata(pdev, &hdmi_cec_data);
++ INIT_DELAYED_WORK(&hdmi_cec_data.hdmi_cec_work, mxc_hdmi_cec_worker);
++
++ dev_info(&pdev->dev, "HDMI CEC initialized\n");
++ goto out;
++
++err_out_class:
++ device_destroy(hdmi_cec_class, MKDEV(hdmi_cec_major, 0));
++ class_destroy(hdmi_cec_class);
++err_out_chrdev:
++ unregister_chrdev(hdmi_cec_major, "mxc_hdmi_cec");
++out:
++ return err;
++}
++
++static int hdmi_cec_dev_remove(struct platform_device *pdev)
++{
++ if (hdmi_cec_data.cec_state)
++ hdmi_cec_stop_device();
++ if (hdmi_cec_major > 0) {
++ device_destroy(hdmi_cec_class, MKDEV(hdmi_cec_major, 0));
++ class_destroy(hdmi_cec_class);
++ unregister_chrdev(hdmi_cec_major, "mxc_hdmi_cec");
++ hdmi_cec_major = 0;
++}
++ return 0;
++}
++
++static const struct of_device_id imx_hdmi_cec_match[] = {
++ { .compatible = "fsl,imx6q-hdmi-cec", },
++ { .compatible = "fsl,imx6dl-hdmi-cec", },
++ { /* sentinel */ }
++};
++
++static struct platform_driver mxc_hdmi_cec_driver = {
++ .probe = hdmi_cec_dev_probe,
++ .remove = hdmi_cec_dev_remove,
++ .driver = {
++ .name = "mxc_hdmi_cec",
++ .of_match_table = imx_hdmi_cec_match,
++ },
++};
++
++module_platform_driver(mxc_hdmi_cec_driver);
++
++MODULE_AUTHOR("Freescale Semiconductor, Inc.");
++MODULE_DESCRIPTION("Linux HDMI CEC driver for Freescale i.MX/MXC");
++MODULE_LICENSE("GPL");
++MODULE_ALIAS("platform:mxc_hdmi_cec");
++
+diff -Nur linux-3.14.36/drivers/mxc/hdmi-cec/mxc_hdmi-cec.h linux-openelec/drivers/mxc/hdmi-cec/mxc_hdmi-cec.h
+--- linux-3.14.36/drivers/mxc/hdmi-cec/mxc_hdmi-cec.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/hdmi-cec/mxc_hdmi-cec.h 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,38 @@
++/*
++ * Copyright 2005-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++#ifndef _HDMICEC_H_
++#define _HDMICEC_H_
++#include <linux/ioctl.h>
++
++/*
++ * Ioctl definitions
++ */
++
++/* Use 'k' as magic number */
++#define HDMICEC_IOC_MAGIC 'H'
++/*
++ * S means "Set" through a ptr,
++ * T means "Tell" directly with the argument value
++ * G means "Get": reply by setting through a pointer
++ * Q means "Query": response is on the return value
++ * X means "eXchange": G and S atomically
++ * H means "sHift": T and Q atomically
++ */
++#define HDMICEC_IOC_SETLOGICALADDRESS \
++ _IOW(HDMICEC_IOC_MAGIC, 1, unsigned char)
++#define HDMICEC_IOC_STARTDEVICE _IO(HDMICEC_IOC_MAGIC, 2)
++#define HDMICEC_IOC_STOPDEVICE _IO(HDMICEC_IOC_MAGIC, 3)
++#define HDMICEC_IOC_GETPHYADDRESS \
++ _IOR(HDMICEC_IOC_MAGIC, 4, unsigned char[4])
++
++#endif /* !_HDMICEC_H_ */
+diff -Nur linux-3.14.36/drivers/mxc/ipu3/ipu_calc_stripes_sizes.c linux-openelec/drivers/mxc/ipu3/ipu_calc_stripes_sizes.c
+--- linux-3.14.36/drivers/mxc/ipu3/ipu_calc_stripes_sizes.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/ipu3/ipu_calc_stripes_sizes.c 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,495 @@
++/*
++ * Copyright 2009-2014 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/*
++ * @file ipu_calc_stripes_sizes.c
++ *
++ * @brief IPU IC functions
++ *
++ * @ingroup IPU
++ */
++
++#include <linux/ipu-v3.h>
++#include <linux/module.h>
++#include <linux/math64.h>
++
++#define BPP_32 0
++#define BPP_16 3
++#define BPP_8 5
++#define BPP_24 1
++#define BPP_12 4
++#define BPP_18 2
++
++static u32 truncate(u32 up, /* 0: down; else: up */
++ u64 a, /* must be non-negative */
++ u32 b)
++{
++ u32 d;
++ u64 div;
++ div = div_u64(a, b);
++ d = b * (div >> 32);
++ if (up && (a > (((u64)d) << 32)))
++ return d+b;
++ else
++ return d;
++}
++
++static unsigned int f_calc(unsigned int pfs, unsigned int bpp, unsigned int *write)
++{/* return input_f */
++ unsigned int f_calculated = 0;
++ switch (pfs) {
++ case IPU_PIX_FMT_YVU422P:
++ case IPU_PIX_FMT_YUV422P:
++ case IPU_PIX_FMT_YUV420P2:
++ case IPU_PIX_FMT_YUV420P:
++ case IPU_PIX_FMT_YVU420P:
++ case IPU_PIX_FMT_YUV444P:
++ f_calculated = 16;
++ break;
++
++ case IPU_PIX_FMT_RGB565:
++ case IPU_PIX_FMT_YUYV:
++ case IPU_PIX_FMT_UYVY:
++ f_calculated = 8;
++ break;
++
++ case IPU_PIX_FMT_NV12:
++ f_calculated = 8;
++ break;
++
++ default:
++ f_calculated = 0;
++ break;
++
++ }
++ if (!f_calculated) {
++ switch (bpp) {
++ case BPP_32:
++ f_calculated = 2;
++ break;
++
++ case BPP_16:
++ f_calculated = 4;
++ break;
++
++ case BPP_8:
++ case BPP_24:
++ f_calculated = 8;
++ break;
++
++ case BPP_12:
++ f_calculated = 16;
++ break;
++
++ case BPP_18:
++ f_calculated = 32;
++ break;
++
++ default:
++ f_calculated = 0;
++ break;
++ }
++ }
++ return f_calculated;
++}
++
++
++static unsigned int m_calc(unsigned int pfs)
++{
++ unsigned int m_calculated = 0;
++ switch (pfs) {
++ case IPU_PIX_FMT_YUV420P2:
++ case IPU_PIX_FMT_YUV420P:
++ case IPU_PIX_FMT_YVU422P:
++ case IPU_PIX_FMT_YUV422P:
++ case IPU_PIX_FMT_YVU420P:
++ case IPU_PIX_FMT_YUV444P:
++ m_calculated = 16;
++ break;
++
++ case IPU_PIX_FMT_NV12:
++ case IPU_PIX_FMT_YUYV:
++ case IPU_PIX_FMT_UYVY:
++ m_calculated = 8;
++ break;
++
++ default:
++ m_calculated = 8;
++ break;
++
++ }
++ return m_calculated;
++}
++
++static int calc_split_resize_coeffs(unsigned int inSize, unsigned int outSize,
++ unsigned int *resizeCoeff,
++ unsigned int *downsizeCoeff)
++{
++ uint32_t tempSize;
++ uint32_t tempDownsize;
++
++ if (inSize > 4096) {
++ pr_debug("IC input size(%d) cannot exceed 4096\n",
++ inSize);
++ return -EINVAL;
++ }
++
++ if (outSize > 1024) {
++ pr_debug("IC output size(%d) cannot exceed 1024\n",
++ outSize);
++ return -EINVAL;
++ }
++
++ if ((outSize << 3) < inSize) {
++ pr_debug("IC cannot downsize more than 8:1\n");
++ return -EINVAL;
++ }
++
++ /* Compute downsizing coefficient */
++ /* Output of downsizing unit cannot be more than 1024 */
++ tempDownsize = 0;
++ tempSize = inSize;
++ while (((tempSize > 1024) || (tempSize >= outSize * 2)) &&
++ (tempDownsize < 2)) {
++ tempSize >>= 1;
++ tempDownsize++;
++ }
++ *downsizeCoeff = tempDownsize;
++
++ /* compute resizing coefficient using the following equation:
++ resizeCoeff = M*(SI -1)/(SO - 1)
++ where M = 2^13, SI - input size, SO - output size */
++ *resizeCoeff = (8192L * (tempSize - 1)) / (outSize - 1);
++ if (*resizeCoeff >= 16384L) {
++ pr_debug("Overflow on IC resize coefficient.\n");
++ return -EINVAL;
++ }
++
++ pr_debug("resizing from %u -> %u pixels, "
++ "downsize=%u, resize=%u.%lu (reg=%u)\n", inSize, outSize,
++ *downsizeCoeff, (*resizeCoeff >= 8192L) ? 1 : 0,
++ ((*resizeCoeff & 0x1FFF) * 10000L) / 8192L, *resizeCoeff);
++
++ return 0;
++}
++
++/* Stripe parameters calculator */
++/**************************************************************************
++Notes:
++MSW = the maximal width allowed for a stripe
++ i.MX31: 720, i.MX35: 800, i.MX37/51/53: 1024
++cirr = the maximal inverse resizing ratio for which overlap in the input
++ is requested; typically cirr~2
++flags
++ bit 0 - equal_stripes
++ 0 each stripe is allowed to have independent parameters
++ for maximal image quality
++ 1 the stripes are requested to have identical parameters
++ (except the base address), for maximal performance
++ bit 1 - vertical/horizontal
++ 0 horizontal
++ 1 vertical
++
++If performance is the top priority (above image quality)
++ Avoid overlap, by setting CIRR = 0
++ This will also force effectively identical_stripes = 1
++ Choose IF & OF that corresponds to the same IOX/SX for both stripes
++ Choose IFW & OFW such that
++ IFW/IM, IFW/IF, OFW/OM, OFW/OF are even integers
++ The function returns an error status:
++ 0: no error
++ 1: invalid input parameters -> aborted without result
++ Valid parameters should satisfy the following conditions
++ IFW <= OFW, otherwise downsizing is required
++ - which is not supported yet
++ 4 <= IFW,OFW, so some interpolation may be needed even without overlap
++ IM, OM, IF, OF should not vanish
++ 2*IF <= IFW
++ so the frame can be split to two equal stripes, even without overlap
++ 2*(OF+IF/irr_opt) <= OFW
++ so a valid positive INW exists even for equal stripes
++ OF <= MSW, otherwise, the left stripe cannot be sufficiently large
++ MSW < OFW, so splitting to stripes is required
++ OFW <= 2*MSW, so two stripes are sufficient
++ (this also implies that 2<=MSW)
++ 2: OF is not a multiple of OM - not fully-supported yet
++ Output is produced but OW is not guaranited to be a multiple of OM
++ 4: OFW reduced to be a multiple of OM
++ 8: CIRR > 1: truncated to 1
++ Overlap is not supported (and not needed) y for upsizing)
++**************************************************************************/
++int ipu_calc_stripes_sizes(const unsigned int input_frame_width,
++ /* input frame width;>1 */
++ unsigned int output_frame_width, /* output frame width; >1 */
++ const unsigned int maximal_stripe_width,
++ /* the maximal width allowed for a stripe */
++ const unsigned long long cirr, /* see above */
++ const unsigned int flags, /* see above */
++ u32 input_pixelformat,/* pixel format after of read channel*/
++ u32 output_pixelformat,/* pixel format after of write channel*/
++ struct stripe_param *left,
++ struct stripe_param *right)
++{
++ const unsigned int irr_frac_bits = 13;
++ const unsigned long irr_steps = 1 << irr_frac_bits;
++ const u64 dirr = ((u64)1) << (32 - 2);
++ /* The maximum relative difference allowed between the irrs */
++ const u64 cr = ((u64)4) << 32;
++ /* The importance ratio between the two terms in the cost function below */
++
++ unsigned int status;
++ unsigned int temp;
++ unsigned int onw_min;
++ unsigned int inw = 0, onw = 0, inw_best = 0;
++ /* number of pixels in the left stripe NOT hidden by the right stripe */
++ u64 irr_opt; /* the optimal inverse resizing ratio */
++ u64 rr_opt; /* the optimal resizing ratio = 1/irr_opt*/
++ u64 dinw; /* the misalignment between the stripes */
++ /* (measured in units of input columns) */
++ u64 difwl, difwr = 0;
++ /* The number of input columns not reflected in the output */
++ /* the resizing ratio used for the right stripe is */
++ /* left->irr and right->irr respectively */
++ u64 cost, cost_min;
++ u64 div; /* result of division */
++ bool equal_stripes = (flags & 0x1) != 0;
++ bool vertical = (flags & 0x2) != 0;
++
++ unsigned int input_m, input_f, output_m, output_f; /* parameters for upsizing by stripes */
++ unsigned int resize_coeff;
++ unsigned int downsize_coeff;
++
++ status = 0;
++
++ if (vertical) {
++ input_f = 2;
++ input_m = 8;
++ output_f = 8;
++ output_m = 2;
++ } else {
++ input_f = f_calc(input_pixelformat, 0, NULL);
++ input_m = m_calc(input_pixelformat);
++ output_f = input_m;
++ output_m = m_calc(output_pixelformat);
++ }
++ if ((input_frame_width < 4) || (output_frame_width < 4))
++ return 1;
++
++ irr_opt = div_u64((((u64)(input_frame_width - 1)) << 32),
++ (output_frame_width - 1));
++ rr_opt = div_u64((((u64)(output_frame_width - 1)) << 32),
++ (input_frame_width - 1));
++
++ if ((input_m == 0) || (output_m == 0) || (input_f == 0) || (output_f == 0)
++ || (input_frame_width < (2 * input_f))
++ || ((((u64)output_frame_width) << 32) <
++ (2 * ((((u64)output_f) << 32) + (input_f * rr_opt))))
++ || (maximal_stripe_width < output_f)
++ || ((output_frame_width <= maximal_stripe_width)
++ && (equal_stripes == 0))
++ || ((2 * maximal_stripe_width) < output_frame_width))
++ return 1;
++
++ if (output_f % output_m)
++ status += 2;
++
++ temp = truncate(0, (((u64)output_frame_width) << 32), output_m);
++ if (temp < output_frame_width) {
++ output_frame_width = temp;
++ status += 4;
++ }
++
++ pr_debug("---------------->\n"
++ "if = %d\n"
++ "im = %d\n"
++ "of = %d\n"
++ "om = %d\n"
++ "irr_opt = %llu\n"
++ "rr_opt = %llu\n"
++ "cirr = %llu\n"
++ "pixel in = %08x\n"
++ "pixel out = %08x\n"
++ "ifw = %d\n"
++ "ofwidth = %d\n",
++ input_f,
++ input_m,
++ output_f,
++ output_m,
++ irr_opt,
++ rr_opt,
++ cirr,
++ input_pixelformat,
++ output_pixelformat,
++ input_frame_width,
++ output_frame_width
++ );
++
++ if (equal_stripes) {
++ if ((irr_opt > cirr) /* overlap in the input is not requested */
++ && ((input_frame_width % (input_m << 1)) == 0)
++ && ((input_frame_width % (input_f << 1)) == 0)
++ && ((output_frame_width % (output_m << 1)) == 0)
++ && ((output_frame_width % (output_f << 1)) == 0)) {
++ /* without overlap */
++ left->input_width = right->input_width = right->input_column =
++ input_frame_width >> 1;
++ left->output_width = right->output_width = right->output_column =
++ output_frame_width >> 1;
++ left->input_column = 0;
++ left->output_column = 0;
++ div = div_u64(((((u64)irr_steps) << 32) *
++ (right->input_width - 1)), (right->output_width - 1));
++ left->irr = right->irr = truncate(0, div, 1);
++ } else { /* with overlap */
++ onw = truncate(0, (((u64)output_frame_width - 1) << 32) >> 1,
++ output_f);
++ inw = truncate(0, onw * irr_opt, input_f);
++ /* this is the maximal inw which allows the same resizing ratio */
++ /* in both stripes */
++ onw = truncate(1, (inw * rr_opt), output_f);
++ div = div_u64((((u64)(irr_steps * inw)) <<
++ 32), onw);
++ left->irr = right->irr = truncate(0, div, 1);
++ left->output_width = right->output_width =
++ output_frame_width - onw;
++ /* These are valid assignments for output_width, */
++ /* assuming output_f is a multiple of output_m */
++ div = (((u64)(left->output_width-1) * (left->irr)) << 32);
++ div = (((u64)1) << 32) + div_u64(div, irr_steps);
++
++ left->input_width = right->input_width = truncate(1, div, input_m);
++
++ div = div_u64((((u64)((right->output_width - 1) * right->irr)) <<
++ 32), irr_steps);
++ difwr = (((u64)(input_frame_width - 1 - inw)) << 32) - div;
++ div = div_u64((difwr + (((u64)input_f) << 32)), 2);
++ left->input_column = truncate(0, div, input_f);
++
++
++ /* This splits the truncated input columns evenly */
++ /* between the left and right margins */
++ right->input_column = left->input_column + inw;
++ left->output_column = 0;
++ right->output_column = onw;
++ }
++ if (left->input_width > left->output_width) {
++ if (calc_split_resize_coeffs(left->input_width,
++ left->output_width,
++ &resize_coeff,
++ &downsize_coeff) < 0)
++ return -EINVAL;
++
++ if (downsize_coeff > 0) {
++ left->irr = right->irr =
++ (downsize_coeff << 14) | resize_coeff;
++ }
++ }
++ pr_debug("inw %d, onw %d, ilw %d, ilc %d, olw %d,"
++ " irw %d, irc %d, orw %d, orc %d, "
++ "difwr %llu, lirr %u\n",
++ inw, onw, left->input_width,
++ left->input_column, left->output_width,
++ right->input_width, right->input_column,
++ right->output_width,
++ right->output_column, difwr, left->irr);
++ } else { /* independent stripes */
++ onw_min = output_frame_width - maximal_stripe_width;
++ /* onw is a multiple of output_f, in the range */
++ /* [max(output_f,output_frame_width-maximal_stripe_width),*/
++ /*min(output_frame_width-2,maximal_stripe_width)] */
++ /* definitely beyond the cost of any valid setting */
++ cost_min = (((u64)input_frame_width) << 32) + cr;
++ onw = truncate(0, ((u64)maximal_stripe_width), output_f);
++ if (output_frame_width - onw == 1)
++ onw -= output_f; /* => onw and output_frame_width-1-onw are positive */
++ inw = truncate(0, onw * irr_opt, input_f);
++ /* this is the maximal inw which allows the same resizing ratio */
++ /* in both stripes */
++ onw = truncate(1, inw * rr_opt, output_f);
++ do {
++ div = div_u64((((u64)(irr_steps * inw)) << 32), onw);
++ left->irr = truncate(0, div, 1);
++ div = div_u64((((u64)(onw * left->irr)) << 32),
++ irr_steps);
++ dinw = (((u64)inw) << 32) - div;
++
++ div = div_u64((((u64)((output_frame_width - 1 - onw) * left->irr)) <<
++ 32), irr_steps);
++
++ difwl = (((u64)(input_frame_width - 1 - inw)) << 32) - div;
++
++ cost = difwl + (((u64)(cr * dinw)) >> 32);
++
++ if (cost < cost_min) {
++ inw_best = inw;
++ cost_min = cost;
++ }
++
++ inw -= input_f;
++ onw = truncate(1, inw * rr_opt, output_f);
++ /* This is the minimal onw which allows the same resizing ratio */
++ /* in both stripes */
++ } while (onw >= onw_min);
++
++ inw = inw_best;
++ onw = truncate(1, inw * rr_opt, output_f);
++ div = div_u64((((u64)(irr_steps * inw)) << 32), onw);
++ left->irr = truncate(0, div, 1);
++
++ left->output_width = onw;
++ right->output_width = output_frame_width - onw;
++ /* These are valid assignments for output_width, */
++ /* assuming output_f is a multiple of output_m */
++ left->input_width = truncate(1, ((u64)(inw + 1)) << 32, input_m);
++ right->input_width = truncate(1, ((u64)(input_frame_width - inw)) <<
++ 32, input_m);
++
++ div = div_u64((((u64)(irr_steps * (input_frame_width - 1 - inw))) <<
++ 32), (right->output_width - 1));
++ right->irr = truncate(0, div, 1);
++ temp = truncate(0, ((u64)left->irr) * ((((u64)1) << 32) + dirr), 1);
++ if (temp < right->irr)
++ right->irr = temp;
++ div = div_u64(((u64)((right->output_width - 1) * right->irr) <<
++ 32), irr_steps);
++ difwr = (u64)(input_frame_width - 1 - inw) - div;
++
++
++ div = div_u64((difwr + (((u64)input_f) << 32)), 2);
++ left->input_column = truncate(0, div, input_f);
++
++ /* This splits the truncated input columns evenly */
++ /* between the left and right margins */
++ right->input_column = left->input_column + inw;
++ left->output_column = 0;
++ right->output_column = onw;
++ if (left->input_width > left->output_width) {
++ if (calc_split_resize_coeffs(left->input_width,
++ left->output_width,
++ &resize_coeff,
++ &downsize_coeff) < 0)
++ return -EINVAL;
++ left->irr = (downsize_coeff << 14) | resize_coeff;
++ }
++ if (right->input_width > right->output_width) {
++ if (calc_split_resize_coeffs(right->input_width,
++ right->output_width,
++ &resize_coeff,
++ &downsize_coeff) < 0)
++ return -EINVAL;
++ right->irr = (downsize_coeff << 14) | resize_coeff;
++ }
++ }
++ return status;
++}
++EXPORT_SYMBOL(ipu_calc_stripes_sizes);
+diff -Nur linux-3.14.36/drivers/mxc/ipu3/ipu_capture.c linux-openelec/drivers/mxc/ipu3/ipu_capture.c
+--- linux-3.14.36/drivers/mxc/ipu3/ipu_capture.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/ipu3/ipu_capture.c 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,816 @@
++/*
++ * Copyright 2008-2014 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/*!
++ * @file ipu_capture.c
++ *
++ * @brief IPU capture dase functions
++ *
++ * @ingroup IPU
++ */
++#include <linux/clk.h>
++#include <linux/delay.h>
++#include <linux/errno.h>
++#include <linux/init.h>
++#include <linux/io.h>
++#include <linux/ipu-v3.h>
++#include <linux/module.h>
++#include <linux/spinlock.h>
++#include <linux/types.h>
++
++#include "ipu_prv.h"
++#include "ipu_regs.h"
++
++/*!
++ * _ipu_csi_mclk_set
++ *
++ * @param ipu ipu handler
++ * @param pixel_clk desired pixel clock frequency in Hz
++ * @param csi csi 0 or csi 1
++ *
++ * @return Returns 0 on success or negative error code on fail
++ */
++int _ipu_csi_mclk_set(struct ipu_soc *ipu, uint32_t pixel_clk, uint32_t csi)
++{
++ uint32_t temp;
++ uint32_t div_ratio;
++
++ div_ratio = (clk_get_rate(ipu->ipu_clk) / pixel_clk) - 1;
++
++ if (div_ratio > 0xFF || div_ratio < 0) {
++ dev_dbg(ipu->dev, "value of pixel_clk extends normal range\n");
++ return -EINVAL;
++ }
++
++ temp = ipu_csi_read(ipu, csi, CSI_SENS_CONF);
++ temp &= ~CSI_SENS_CONF_DIVRATIO_MASK;
++ ipu_csi_write(ipu, csi, temp |
++ (div_ratio << CSI_SENS_CONF_DIVRATIO_SHIFT),
++ CSI_SENS_CONF);
++
++ return 0;
++}
++
++/*!
++ * ipu_csi_init_interface
++ * Sets initial values for the CSI registers.
++ * The width and height of the sensor and the actual frame size will be
++ * set to the same values.
++ * @param ipu ipu handler
++ * @param width Sensor width
++ * @param height Sensor height
++ * @param pixel_fmt pixel format
++ * @param cfg_param ipu_csi_signal_cfg_t structure
++ * @param csi csi 0 or csi 1
++ *
++ * @return 0 for success, -EINVAL for error
++ */
++int32_t
++ipu_csi_init_interface(struct ipu_soc *ipu, uint16_t width, uint16_t height,
++ uint32_t pixel_fmt, ipu_csi_signal_cfg_t cfg_param)
++{
++ uint32_t data = 0;
++ uint32_t csi = cfg_param.csi;
++
++ /* Set SENS_DATA_FORMAT bits (8, 9 and 10)
++ RGB or YUV444 is 0 which is current value in data so not set
++ explicitly
++ This is also the default value if attempts are made to set it to
++ something invalid. */
++ switch (pixel_fmt) {
++ case IPU_PIX_FMT_YUYV:
++ cfg_param.data_fmt = CSI_SENS_CONF_DATA_FMT_YUV422_YUYV;
++ break;
++ case IPU_PIX_FMT_UYVY:
++ cfg_param.data_fmt = CSI_SENS_CONF_DATA_FMT_YUV422_UYVY;
++ break;
++ case IPU_PIX_FMT_RGB24:
++ case IPU_PIX_FMT_BGR24:
++ cfg_param.data_fmt = CSI_SENS_CONF_DATA_FMT_RGB_YUV444;
++ break;
++ case IPU_PIX_FMT_GENERIC:
++ case IPU_PIX_FMT_GENERIC_16:
++ cfg_param.data_fmt = CSI_SENS_CONF_DATA_FMT_BAYER;
++ break;
++ case IPU_PIX_FMT_RGB565:
++ cfg_param.data_fmt = CSI_SENS_CONF_DATA_FMT_RGB565;
++ break;
++ case IPU_PIX_FMT_RGB555:
++ cfg_param.data_fmt = CSI_SENS_CONF_DATA_FMT_RGB555;
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ /* Set the CSI_SENS_CONF register remaining fields */
++ data |= cfg_param.data_width << CSI_SENS_CONF_DATA_WIDTH_SHIFT |
++ cfg_param.data_fmt << CSI_SENS_CONF_DATA_FMT_SHIFT |
++ cfg_param.data_pol << CSI_SENS_CONF_DATA_POL_SHIFT |
++ cfg_param.Vsync_pol << CSI_SENS_CONF_VSYNC_POL_SHIFT |
++ cfg_param.Hsync_pol << CSI_SENS_CONF_HSYNC_POL_SHIFT |
++ cfg_param.pixclk_pol << CSI_SENS_CONF_PIX_CLK_POL_SHIFT |
++ cfg_param.ext_vsync << CSI_SENS_CONF_EXT_VSYNC_SHIFT |
++ cfg_param.clk_mode << CSI_SENS_CONF_SENS_PRTCL_SHIFT |
++ cfg_param.pack_tight << CSI_SENS_CONF_PACK_TIGHT_SHIFT |
++ cfg_param.force_eof << CSI_SENS_CONF_FORCE_EOF_SHIFT |
++ cfg_param.data_en_pol << CSI_SENS_CONF_DATA_EN_POL_SHIFT;
++
++ _ipu_get(ipu);
++
++ mutex_lock(&ipu->mutex_lock);
++
++ ipu_csi_write(ipu, csi, data, CSI_SENS_CONF);
++
++ /* Setup sensor frame size */
++ ipu_csi_write(ipu, csi, (width - 1) | (height - 1) << 16, CSI_SENS_FRM_SIZE);
++
++ /* Set CCIR registers */
++ if (cfg_param.clk_mode == IPU_CSI_CLK_MODE_CCIR656_PROGRESSIVE) {
++ ipu_csi_write(ipu, csi, 0x40030, CSI_CCIR_CODE_1);
++ ipu_csi_write(ipu, csi, 0xFF0000, CSI_CCIR_CODE_3);
++ } else if (cfg_param.clk_mode == IPU_CSI_CLK_MODE_CCIR656_INTERLACED) {
++ if (width == 720 && height == 625) {
++ /* PAL case */
++ /*
++ * Field0BlankEnd = 0x6, Field0BlankStart = 0x2,
++ * Field0ActiveEnd = 0x4, Field0ActiveStart = 0
++ */
++ ipu_csi_write(ipu, csi, 0x40596, CSI_CCIR_CODE_1);
++ /*
++ * Field1BlankEnd = 0x7, Field1BlankStart = 0x3,
++ * Field1ActiveEnd = 0x5, Field1ActiveStart = 0x1
++ */
++ ipu_csi_write(ipu, csi, 0xD07DF, CSI_CCIR_CODE_2);
++
++ ipu_csi_write(ipu, csi, 0xFF0000, CSI_CCIR_CODE_3);
++
++ } else if (width == 720 && height == 525) {
++ /* NTSC case */
++ /*
++ * Field0BlankEnd = 0x7, Field0BlankStart = 0x3,
++ * Field0ActiveEnd = 0x5, Field0ActiveStart = 0x1
++ */
++ ipu_csi_write(ipu, csi, 0xD07DF, CSI_CCIR_CODE_1);
++ /*
++ * Field1BlankEnd = 0x6, Field1BlankStart = 0x2,
++ * Field1ActiveEnd = 0x4, Field1ActiveStart = 0
++ */
++ ipu_csi_write(ipu, csi, 0x40596, CSI_CCIR_CODE_2);
++ ipu_csi_write(ipu, csi, 0xFF0000, CSI_CCIR_CODE_3);
++ } else {
++ dev_err(ipu->dev, "Unsupported CCIR656 interlaced "
++ "video mode\n");
++ mutex_unlock(&ipu->mutex_lock);
++ _ipu_put(ipu);
++ return -EINVAL;
++ }
++ _ipu_csi_ccir_err_detection_enable(ipu, csi);
++ } else if ((cfg_param.clk_mode ==
++ IPU_CSI_CLK_MODE_CCIR1120_PROGRESSIVE_DDR) ||
++ (cfg_param.clk_mode ==
++ IPU_CSI_CLK_MODE_CCIR1120_PROGRESSIVE_SDR) ||
++ (cfg_param.clk_mode ==
++ IPU_CSI_CLK_MODE_CCIR1120_INTERLACED_DDR) ||
++ (cfg_param.clk_mode ==
++ IPU_CSI_CLK_MODE_CCIR1120_INTERLACED_SDR)) {
++ ipu_csi_write(ipu, csi, 0x40030, CSI_CCIR_CODE_1);
++ ipu_csi_write(ipu, csi, 0xFF0000, CSI_CCIR_CODE_3);
++ _ipu_csi_ccir_err_detection_enable(ipu, csi);
++ } else if ((cfg_param.clk_mode == IPU_CSI_CLK_MODE_GATED_CLK) ||
++ (cfg_param.clk_mode == IPU_CSI_CLK_MODE_NONGATED_CLK)) {
++ _ipu_csi_ccir_err_detection_disable(ipu, csi);
++ }
++
++ dev_dbg(ipu->dev, "CSI_SENS_CONF = 0x%08X\n",
++ ipu_csi_read(ipu, csi, CSI_SENS_CONF));
++ dev_dbg(ipu->dev, "CSI_ACT_FRM_SIZE = 0x%08X\n",
++ ipu_csi_read(ipu, csi, CSI_ACT_FRM_SIZE));
++
++ mutex_unlock(&ipu->mutex_lock);
++
++ _ipu_put(ipu);
++
++ return 0;
++}
++EXPORT_SYMBOL(ipu_csi_init_interface);
++
++/*!
++ * ipu_csi_get_sensor_protocol
++ *
++ * @param ipu ipu handler
++ * @param csi csi 0 or csi 1
++ *
++ * @return Returns sensor protocol
++ */
++int32_t ipu_csi_get_sensor_protocol(struct ipu_soc *ipu, uint32_t csi)
++{
++ int ret;
++ _ipu_get(ipu);
++ ret = (ipu_csi_read(ipu, csi, CSI_SENS_CONF) &
++ CSI_SENS_CONF_SENS_PRTCL_MASK) >>
++ CSI_SENS_CONF_SENS_PRTCL_SHIFT;
++ _ipu_put(ipu);
++ return ret;
++}
++EXPORT_SYMBOL(ipu_csi_get_sensor_protocol);
++
++/*!
++ * ipu_csi_enable_mclk
++ *
++ * @param ipu ipu handler
++ * @param csi csi 0 or csi 1
++ * @param flag true to enable mclk, false to disable mclk
++ * @param wait true to wait 100ms make clock stable, false not wait
++ *
++ * @return Returns 0 on success
++ */
++int ipu_csi_enable_mclk(struct ipu_soc *ipu, int csi, bool flag, bool wait)
++{
++ /* Return immediately if there is no csi_clk to manage */
++ if (ipu->csi_clk[csi] == NULL)
++ return 0;
++
++ if (flag) {
++ clk_enable(ipu->csi_clk[csi]);
++ if (wait == true)
++ msleep(10);
++ } else {
++ clk_disable(ipu->csi_clk[csi]);
++ }
++
++ return 0;
++}
++EXPORT_SYMBOL(ipu_csi_enable_mclk);
++
++/*!
++ * ipu_csi_get_window_size
++ *
++ * @param ipu ipu handler
++ * @param width pointer to window width
++ * @param height pointer to window height
++ * @param csi csi 0 or csi 1
++ */
++void ipu_csi_get_window_size(struct ipu_soc *ipu, uint32_t *width, uint32_t *height, uint32_t csi)
++{
++ uint32_t reg;
++
++ _ipu_get(ipu);
++
++ mutex_lock(&ipu->mutex_lock);
++
++ reg = ipu_csi_read(ipu, csi, CSI_ACT_FRM_SIZE);
++ *width = (reg & 0xFFFF) + 1;
++ *height = (reg >> 16 & 0xFFFF) + 1;
++
++ mutex_unlock(&ipu->mutex_lock);
++
++ _ipu_put(ipu);
++}
++EXPORT_SYMBOL(ipu_csi_get_window_size);
++
++/*!
++ * ipu_csi_set_window_size
++ *
++ * @param ipu ipu handler
++ * @param width window width
++ * @param height window height
++ * @param csi csi 0 or csi 1
++ */
++void ipu_csi_set_window_size(struct ipu_soc *ipu, uint32_t width, uint32_t height, uint32_t csi)
++{
++ _ipu_get(ipu);
++
++ mutex_lock(&ipu->mutex_lock);
++
++ ipu_csi_write(ipu, csi, (width - 1) | (height - 1) << 16, CSI_ACT_FRM_SIZE);
++
++ mutex_unlock(&ipu->mutex_lock);
++
++ _ipu_put(ipu);
++}
++EXPORT_SYMBOL(ipu_csi_set_window_size);
++
++/*!
++ * ipu_csi_set_window_pos
++ *
++ * @param ipu ipu handler
++ * @param left uint32 window x start
++ * @param top uint32 window y start
++ * @param csi csi 0 or csi 1
++ */
++void ipu_csi_set_window_pos(struct ipu_soc *ipu, uint32_t left, uint32_t top, uint32_t csi)
++{
++ uint32_t temp;
++
++ _ipu_get(ipu);
++
++ mutex_lock(&ipu->mutex_lock);
++
++ temp = ipu_csi_read(ipu, csi, CSI_OUT_FRM_CTRL);
++ temp &= ~(CSI_HSC_MASK | CSI_VSC_MASK);
++ temp |= ((top << CSI_VSC_SHIFT) | (left << CSI_HSC_SHIFT));
++ ipu_csi_write(ipu, csi, temp, CSI_OUT_FRM_CTRL);
++
++ mutex_unlock(&ipu->mutex_lock);
++
++ _ipu_put(ipu);
++}
++EXPORT_SYMBOL(ipu_csi_set_window_pos);
++
++/*!
++ * _ipu_csi_horizontal_downsize_enable
++ * Enable horizontal downsizing(decimation) by 2.
++ *
++ * @param ipu ipu handler
++ * @param csi csi 0 or csi 1
++ */
++void _ipu_csi_horizontal_downsize_enable(struct ipu_soc *ipu, uint32_t csi)
++{
++ uint32_t temp;
++
++ temp = ipu_csi_read(ipu, csi, CSI_OUT_FRM_CTRL);
++ temp |= CSI_HORI_DOWNSIZE_EN;
++ ipu_csi_write(ipu, csi, temp, CSI_OUT_FRM_CTRL);
++}
++
++/*!
++ * _ipu_csi_horizontal_downsize_disable
++ * Disable horizontal downsizing(decimation) by 2.
++ *
++ * @param ipu ipu handler
++ * @param csi csi 0 or csi 1
++ */
++void _ipu_csi_horizontal_downsize_disable(struct ipu_soc *ipu, uint32_t csi)
++{
++ uint32_t temp;
++
++ temp = ipu_csi_read(ipu, csi, CSI_OUT_FRM_CTRL);
++ temp &= ~CSI_HORI_DOWNSIZE_EN;
++ ipu_csi_write(ipu, csi, temp, CSI_OUT_FRM_CTRL);
++}
++
++/*!
++ * _ipu_csi_vertical_downsize_enable
++ * Enable vertical downsizing(decimation) by 2.
++ *
++ * @param ipu ipu handler
++ * @param csi csi 0 or csi 1
++ */
++void _ipu_csi_vertical_downsize_enable(struct ipu_soc *ipu, uint32_t csi)
++{
++ uint32_t temp;
++
++ temp = ipu_csi_read(ipu, csi, CSI_OUT_FRM_CTRL);
++ temp |= CSI_VERT_DOWNSIZE_EN;
++ ipu_csi_write(ipu, csi, temp, CSI_OUT_FRM_CTRL);
++}
++
++/*!
++ * _ipu_csi_vertical_downsize_disable
++ * Disable vertical downsizing(decimation) by 2.
++ *
++ * @param ipu ipu handler
++ * @param csi csi 0 or csi 1
++ */
++void _ipu_csi_vertical_downsize_disable(struct ipu_soc *ipu, uint32_t csi)
++{
++ uint32_t temp;
++
++ temp = ipu_csi_read(ipu, csi, CSI_OUT_FRM_CTRL);
++ temp &= ~CSI_VERT_DOWNSIZE_EN;
++ ipu_csi_write(ipu, csi, temp, CSI_OUT_FRM_CTRL);
++}
++
++/*!
++ * _ipu_csi_set_test_generator
++ *
++ * @param ipu ipu handler
++ * @param active 1 for active and 0 for inactive
++ * @param r_value red value for the generated pattern of even pixel
++ * @param g_value green value for the generated pattern of even
++ * pixel
++ * @param b_value blue value for the generated pattern of even pixel
++ * @param pixel_clk desired pixel clock frequency in Hz
++ * @param csi csi 0 or csi 1
++ */
++void _ipu_csi_set_test_generator(struct ipu_soc *ipu, bool active, uint32_t r_value,
++ uint32_t g_value, uint32_t b_value, uint32_t pix_clk, uint32_t csi)
++{
++ uint32_t temp;
++
++ temp = ipu_csi_read(ipu, csi, CSI_TST_CTRL);
++
++ if (active == false) {
++ temp &= ~CSI_TEST_GEN_MODE_EN;
++ ipu_csi_write(ipu, csi, temp, CSI_TST_CTRL);
++ } else {
++ /* Set sensb_mclk div_ratio*/
++ _ipu_csi_mclk_set(ipu, pix_clk, csi);
++
++ temp &= ~(CSI_TEST_GEN_R_MASK | CSI_TEST_GEN_G_MASK |
++ CSI_TEST_GEN_B_MASK);
++ temp |= CSI_TEST_GEN_MODE_EN;
++ temp |= (r_value << CSI_TEST_GEN_R_SHIFT) |
++ (g_value << CSI_TEST_GEN_G_SHIFT) |
++ (b_value << CSI_TEST_GEN_B_SHIFT);
++ ipu_csi_write(ipu, csi, temp, CSI_TST_CTRL);
++ }
++}
++
++/*!
++ * _ipu_csi_ccir_err_detection_en
++ * Enable error detection and correction for
++ * CCIR interlaced mode with protection bit.
++ *
++ * @param ipu ipu handler
++ * @param csi csi 0 or csi 1
++ */
++void _ipu_csi_ccir_err_detection_enable(struct ipu_soc *ipu, uint32_t csi)
++{
++ uint32_t temp;
++
++ temp = ipu_csi_read(ipu, csi, CSI_CCIR_CODE_1);
++ temp |= CSI_CCIR_ERR_DET_EN;
++ ipu_csi_write(ipu, csi, temp, CSI_CCIR_CODE_1);
++
++}
++
++/*!
++ * _ipu_csi_ccir_err_detection_disable
++ * Disable error detection and correction for
++ * CCIR interlaced mode with protection bit.
++ *
++ * @param ipu ipu handler
++ * @param csi csi 0 or csi 1
++ */
++void _ipu_csi_ccir_err_detection_disable(struct ipu_soc *ipu, uint32_t csi)
++{
++ uint32_t temp;
++
++ temp = ipu_csi_read(ipu, csi, CSI_CCIR_CODE_1);
++ temp &= ~CSI_CCIR_ERR_DET_EN;
++ ipu_csi_write(ipu, csi, temp, CSI_CCIR_CODE_1);
++
++}
++
++/*!
++ * _ipu_csi_set_mipi_di
++ *
++ * @param ipu ipu handler
++ * @param num MIPI data identifier 0-3 handled by CSI
++ * @param di_val data identifier value
++ * @param csi csi 0 or csi 1
++ *
++ * @return Returns 0 on success or negative error code on fail
++ */
++int _ipu_csi_set_mipi_di(struct ipu_soc *ipu, uint32_t num, uint32_t di_val, uint32_t csi)
++{
++ uint32_t temp;
++ int retval = 0;
++
++ if (di_val > 0xFFL) {
++ retval = -EINVAL;
++ goto err;
++ }
++
++ temp = ipu_csi_read(ipu, csi, CSI_MIPI_DI);
++
++ switch (num) {
++ case IPU_CSI_MIPI_DI0:
++ temp &= ~CSI_MIPI_DI0_MASK;
++ temp |= (di_val << CSI_MIPI_DI0_SHIFT);
++ ipu_csi_write(ipu, csi, temp, CSI_MIPI_DI);
++ break;
++ case IPU_CSI_MIPI_DI1:
++ temp &= ~CSI_MIPI_DI1_MASK;
++ temp |= (di_val << CSI_MIPI_DI1_SHIFT);
++ ipu_csi_write(ipu, csi, temp, CSI_MIPI_DI);
++ break;
++ case IPU_CSI_MIPI_DI2:
++ temp &= ~CSI_MIPI_DI2_MASK;
++ temp |= (di_val << CSI_MIPI_DI2_SHIFT);
++ ipu_csi_write(ipu, csi, temp, CSI_MIPI_DI);
++ break;
++ case IPU_CSI_MIPI_DI3:
++ temp &= ~CSI_MIPI_DI3_MASK;
++ temp |= (di_val << CSI_MIPI_DI3_SHIFT);
++ ipu_csi_write(ipu, csi, temp, CSI_MIPI_DI);
++ break;
++ default:
++ retval = -EINVAL;
++ }
++
++err:
++ return retval;
++}
++
++/*!
++ * _ipu_csi_set_skip_isp
++ *
++ * @param ipu ipu handler
++ * @param skip select frames to be skipped and set the
++ * correspond bits to 1
++ * @param max_ratio number of frames in a skipping set and the
++ * maximum value of max_ratio is 5
++ * @param csi csi 0 or csi 1
++ *
++ * @return Returns 0 on success or negative error code on fail
++ */
++int _ipu_csi_set_skip_isp(struct ipu_soc *ipu, uint32_t skip, uint32_t max_ratio, uint32_t csi)
++{
++ uint32_t temp;
++ int retval = 0;
++
++ if (max_ratio > 5) {
++ retval = -EINVAL;
++ goto err;
++ }
++
++ temp = ipu_csi_read(ipu, csi, CSI_SKIP);
++ temp &= ~(CSI_MAX_RATIO_SKIP_ISP_MASK | CSI_SKIP_ISP_MASK);
++ temp |= (max_ratio << CSI_MAX_RATIO_SKIP_ISP_SHIFT) |
++ (skip << CSI_SKIP_ISP_SHIFT);
++ ipu_csi_write(ipu, csi, temp, CSI_SKIP);
++
++err:
++ return retval;
++}
++
++/*!
++ * _ipu_csi_set_skip_smfc
++ *
++ * @param ipu ipu handler
++ * @param skip select frames to be skipped and set the
++ * correspond bits to 1
++ * @param max_ratio number of frames in a skipping set and the
++ * maximum value of max_ratio is 5
++ * @param id csi to smfc skipping id
++ * @param csi csi 0 or csi 1
++ *
++ * @return Returns 0 on success or negative error code on fail
++ */
++int _ipu_csi_set_skip_smfc(struct ipu_soc *ipu, uint32_t skip,
++ uint32_t max_ratio, uint32_t id, uint32_t csi)
++{
++ uint32_t temp;
++ int retval = 0;
++
++ if (max_ratio > 5 || id > 3) {
++ retval = -EINVAL;
++ goto err;
++ }
++
++ temp = ipu_csi_read(ipu, csi, CSI_SKIP);
++ temp &= ~(CSI_MAX_RATIO_SKIP_SMFC_MASK | CSI_ID_2_SKIP_MASK |
++ CSI_SKIP_SMFC_MASK);
++ temp |= (max_ratio << CSI_MAX_RATIO_SKIP_SMFC_SHIFT) |
++ (id << CSI_ID_2_SKIP_SHIFT) |
++ (skip << CSI_SKIP_SMFC_SHIFT);
++ ipu_csi_write(ipu, csi, temp, CSI_SKIP);
++
++err:
++ return retval;
++}
++
++/*!
++ * _ipu_smfc_init
++ * Map CSI frames to IDMAC channels.
++ *
++ * @param ipu ipu handler
++ * @param channel IDMAC channel 0-3
++ * @param mipi_id mipi id number 0-3
++ * @param csi csi0 or csi1
++ */
++void _ipu_smfc_init(struct ipu_soc *ipu, ipu_channel_t channel, uint32_t mipi_id, uint32_t csi)
++{
++ uint32_t temp;
++
++ temp = ipu_smfc_read(ipu, SMFC_MAP);
++
++ switch (channel) {
++ case CSI_MEM0:
++ temp &= ~SMFC_MAP_CH0_MASK;
++ temp |= ((csi << 2) | mipi_id) << SMFC_MAP_CH0_SHIFT;
++ break;
++ case CSI_MEM1:
++ temp &= ~SMFC_MAP_CH1_MASK;
++ temp |= ((csi << 2) | mipi_id) << SMFC_MAP_CH1_SHIFT;
++ break;
++ case CSI_MEM2:
++ temp &= ~SMFC_MAP_CH2_MASK;
++ temp |= ((csi << 2) | mipi_id) << SMFC_MAP_CH2_SHIFT;
++ break;
++ case CSI_MEM3:
++ temp &= ~SMFC_MAP_CH3_MASK;
++ temp |= ((csi << 2) | mipi_id) << SMFC_MAP_CH3_SHIFT;
++ break;
++ default:
++ return;
++ }
++
++ ipu_smfc_write(ipu, temp, SMFC_MAP);
++}
++
++/*!
++ * _ipu_smfc_set_wmc
++ * Caution: The number of required channels, the enabled channels
++ * and the FIFO size per channel are configured restrictedly.
++ *
++ * @param ipu ipu handler
++ * @param channel IDMAC channel 0-3
++ * @param set set 1 or clear 0
++ * @param level water mark level when FIFO is on the
++ * relative size
++ */
++void _ipu_smfc_set_wmc(struct ipu_soc *ipu, ipu_channel_t channel, bool set, uint32_t level)
++{
++ uint32_t temp;
++
++ temp = ipu_smfc_read(ipu, SMFC_WMC);
++
++ switch (channel) {
++ case CSI_MEM0:
++ if (set == true) {
++ temp &= ~SMFC_WM0_SET_MASK;
++ temp |= level << SMFC_WM0_SET_SHIFT;
++ } else {
++ temp &= ~SMFC_WM0_CLR_MASK;
++ temp |= level << SMFC_WM0_CLR_SHIFT;
++ }
++ break;
++ case CSI_MEM1:
++ if (set == true) {
++ temp &= ~SMFC_WM1_SET_MASK;
++ temp |= level << SMFC_WM1_SET_SHIFT;
++ } else {
++ temp &= ~SMFC_WM1_CLR_MASK;
++ temp |= level << SMFC_WM1_CLR_SHIFT;
++ }
++ break;
++ case CSI_MEM2:
++ if (set == true) {
++ temp &= ~SMFC_WM2_SET_MASK;
++ temp |= level << SMFC_WM2_SET_SHIFT;
++ } else {
++ temp &= ~SMFC_WM2_CLR_MASK;
++ temp |= level << SMFC_WM2_CLR_SHIFT;
++ }
++ break;
++ case CSI_MEM3:
++ if (set == true) {
++ temp &= ~SMFC_WM3_SET_MASK;
++ temp |= level << SMFC_WM3_SET_SHIFT;
++ } else {
++ temp &= ~SMFC_WM3_CLR_MASK;
++ temp |= level << SMFC_WM3_CLR_SHIFT;
++ }
++ break;
++ default:
++ return;
++ }
++
++ ipu_smfc_write(ipu, temp, SMFC_WMC);
++}
++
++/*!
++ * _ipu_smfc_set_burst_size
++ *
++ * @param ipu ipu handler
++ * @param channel IDMAC channel 0-3
++ * @param bs burst size of IDMAC channel,
++ * the value programmed here shoud be BURST_SIZE-1
++ */
++void _ipu_smfc_set_burst_size(struct ipu_soc *ipu, ipu_channel_t channel, uint32_t bs)
++{
++ uint32_t temp;
++
++ temp = ipu_smfc_read(ipu, SMFC_BS);
++
++ switch (channel) {
++ case CSI_MEM0:
++ temp &= ~SMFC_BS0_MASK;
++ temp |= bs << SMFC_BS0_SHIFT;
++ break;
++ case CSI_MEM1:
++ temp &= ~SMFC_BS1_MASK;
++ temp |= bs << SMFC_BS1_SHIFT;
++ break;
++ case CSI_MEM2:
++ temp &= ~SMFC_BS2_MASK;
++ temp |= bs << SMFC_BS2_SHIFT;
++ break;
++ case CSI_MEM3:
++ temp &= ~SMFC_BS3_MASK;
++ temp |= bs << SMFC_BS3_SHIFT;
++ break;
++ default:
++ return;
++ }
++
++ ipu_smfc_write(ipu, temp, SMFC_BS);
++}
++
++/*!
++ * _ipu_csi_init
++ *
++ * @param ipu ipu handler
++ * @param channel IDMAC channel
++ * @param csi csi 0 or csi 1
++ *
++ * @return Returns 0 on success or negative error code on fail
++ */
++int _ipu_csi_init(struct ipu_soc *ipu, ipu_channel_t channel, uint32_t csi)
++{
++ uint32_t csi_sens_conf, csi_dest;
++ int retval = 0;
++
++ switch (channel) {
++ case CSI_MEM0:
++ case CSI_MEM1:
++ case CSI_MEM2:
++ case CSI_MEM3:
++ csi_dest = CSI_DATA_DEST_IDMAC;
++ break;
++ case CSI_PRP_ENC_MEM:
++ case CSI_PRP_VF_MEM:
++ csi_dest = CSI_DATA_DEST_IC;
++ break;
++ default:
++ retval = -EINVAL;
++ goto err;
++ }
++
++ csi_sens_conf = ipu_csi_read(ipu, csi, CSI_SENS_CONF);
++ csi_sens_conf &= ~CSI_SENS_CONF_DATA_DEST_MASK;
++ ipu_csi_write(ipu, csi, csi_sens_conf | (csi_dest <<
++ CSI_SENS_CONF_DATA_DEST_SHIFT), CSI_SENS_CONF);
++err:
++ return retval;
++}
++
++/*!
++ * csi_irq_handler
++ *
++ * @param irq interrupt id
++ * @param dev_id pointer to ipu handler
++ *
++ * @return Returns if irq is handled
++ */
++static irqreturn_t csi_irq_handler(int irq, void *dev_id)
++{
++ struct ipu_soc *ipu = dev_id;
++ struct completion *comp = &ipu->csi_comp;
++
++ complete(comp);
++ return IRQ_HANDLED;
++}
++
++/*!
++ * _ipu_csi_wait4eof
++ *
++ * @param ipu ipu handler
++ * @param channel IDMAC channel
++ *
++ */
++void _ipu_csi_wait4eof(struct ipu_soc *ipu, ipu_channel_t channel)
++{
++ int ret;
++ int irq = 0;
++
++ if (channel == CSI_MEM0)
++ irq = IPU_IRQ_CSI0_OUT_EOF;
++ else if (channel == CSI_MEM1)
++ irq = IPU_IRQ_CSI1_OUT_EOF;
++ else if (channel == CSI_MEM2)
++ irq = IPU_IRQ_CSI2_OUT_EOF;
++ else if (channel == CSI_MEM3)
++ irq = IPU_IRQ_CSI3_OUT_EOF;
++ else if (channel == CSI_PRP_ENC_MEM)
++ irq = IPU_IRQ_PRP_ENC_OUT_EOF;
++ else if (channel == CSI_PRP_VF_MEM)
++ irq = IPU_IRQ_PRP_VF_OUT_EOF;
++ else{
++ dev_err(ipu->dev, "Not a CSI channel\n");
++ return;
++ }
++
++ init_completion(&ipu->csi_comp);
++ ret = ipu_request_irq(ipu, irq, csi_irq_handler, 0, NULL, ipu);
++ if (ret < 0) {
++ dev_err(ipu->dev, "CSI irq %d in use\n", irq);
++ return;
++ }
++ ret = wait_for_completion_timeout(&ipu->csi_comp, msecs_to_jiffies(500));
++ ipu_free_irq(ipu, irq, ipu);
++ dev_dbg(ipu->dev, "CSI stop timeout - %d * 10ms\n", 5 - ret);
++}
+diff -Nur linux-3.14.36/drivers/mxc/ipu3/ipu_common.c linux-openelec/drivers/mxc/ipu3/ipu_common.c
+--- linux-3.14.36/drivers/mxc/ipu3/ipu_common.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/ipu3/ipu_common.c 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,3134 @@
++/*
++ * Copyright 2005-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/*!
++ * @file ipu_common.c
++ *
++ * @brief This file contains the IPU driver common API functions.
++ *
++ * @ingroup IPU
++ */
++#include <linux/busfreq-imx6.h>
++#include <linux/clk.h>
++#include <linux/clk-provider.h>
++#include <linux/delay.h>
++#include <linux/err.h>
++#include <linux/init.h>
++#include <linux/interrupt.h>
++#include <linux/io.h>
++#include <linux/ipu-v3.h>
++#include <linux/irq.h>
++#include <linux/irqdesc.h>
++#include <linux/module.h>
++#include <linux/mod_devicetable.h>
++#include <linux/of_device.h>
++#include <linux/platform_device.h>
++#include <linux/pm_runtime.h>
++#include <linux/reset.h>
++#include <linux/spinlock.h>
++#include <linux/types.h>
++
++#include <asm/cacheflush.h>
++
++#include "ipu_param_mem.h"
++#include "ipu_regs.h"
++
++static struct ipu_soc ipu_array[MXC_IPU_MAX_NUM];
++int g_ipu_hw_rev;
++
++/* Static functions */
++static irqreturn_t ipu_sync_irq_handler(int irq, void *desc);
++static irqreturn_t ipu_err_irq_handler(int irq, void *desc);
++
++static inline uint32_t channel_2_dma(ipu_channel_t ch, ipu_buffer_t type)
++{
++ return ((uint32_t) ch >> (6 * type)) & 0x3F;
++};
++
++static inline int _ipu_is_ic_chan(uint32_t dma_chan)
++{
++ return (((dma_chan >= 11) && (dma_chan <= 22) && (dma_chan != 17) &&
++ (dma_chan != 18)));
++}
++
++static inline int _ipu_is_vdi_out_chan(uint32_t dma_chan)
++{
++ return (dma_chan == 5);
++}
++
++static inline int _ipu_is_ic_graphic_chan(uint32_t dma_chan)
++{
++ return (dma_chan == 14 || dma_chan == 15);
++}
++
++/* Either DP BG or DP FG can be graphic window */
++static inline int _ipu_is_dp_graphic_chan(uint32_t dma_chan)
++{
++ return (dma_chan == 23 || dma_chan == 27);
++}
++
++static inline int _ipu_is_irt_chan(uint32_t dma_chan)
++{
++ return ((dma_chan >= 45) && (dma_chan <= 50));
++}
++
++static inline int _ipu_is_dmfc_chan(uint32_t dma_chan)
++{
++ return ((dma_chan >= 23) && (dma_chan <= 29));
++}
++
++static inline int _ipu_is_smfc_chan(uint32_t dma_chan)
++{
++ return ((dma_chan >= 0) && (dma_chan <= 3));
++}
++
++static inline int _ipu_is_trb_chan(uint32_t dma_chan)
++{
++ return (((dma_chan == 8) || (dma_chan == 9) ||
++ (dma_chan == 10) || (dma_chan == 13) ||
++ (dma_chan == 21) || (dma_chan == 23) ||
++ (dma_chan == 27) || (dma_chan == 28)) &&
++ (g_ipu_hw_rev >= IPU_V3DEX));
++}
++
++/*
++ * We usually use IDMAC 23 as full plane and IDMAC 27 as partial
++ * plane.
++ * IDMAC 23/24/28/41 can drive a display respectively - primary
++ * IDMAC 27 depends on IDMAC 23 - nonprimary
++ */
++static inline int _ipu_is_primary_disp_chan(uint32_t dma_chan)
++{
++ return ((dma_chan == 23) || (dma_chan == 24) ||
++ (dma_chan == 28) || (dma_chan == 41));
++}
++
++static inline int _ipu_is_sync_irq(uint32_t irq)
++{
++ /* sync interrupt register number */
++ int reg_num = irq / 32 + 1;
++
++ return ((reg_num == 1) || (reg_num == 2) || (reg_num == 3) ||
++ (reg_num == 4) || (reg_num == 7) || (reg_num == 8) ||
++ (reg_num == 11) || (reg_num == 12) || (reg_num == 13) ||
++ (reg_num == 14) || (reg_num == 15));
++}
++
++#define idma_is_valid(ch) (ch != NO_DMA)
++#define idma_mask(ch) (idma_is_valid(ch) ? (1UL << (ch & 0x1F)) : 0)
++#define idma_is_set(ipu, reg, dma) (ipu_idmac_read(ipu, reg(dma)) & idma_mask(dma))
++#define tri_cur_buf_mask(ch) (idma_mask(ch*2) * 3)
++#define tri_cur_buf_shift(ch) (ffs(idma_mask(ch*2)) - 1)
++
++static int ipu_clk_setup_enable(struct ipu_soc *ipu,
++ struct ipu_pltfm_data *pdata)
++{
++ char pixel_clk_0[] = "ipu1_pclk_0";
++ char pixel_clk_1[] = "ipu1_pclk_1";
++ char pixel_clk_0_sel[] = "ipu1_pclk0_sel";
++ char pixel_clk_1_sel[] = "ipu1_pclk1_sel";
++ char pixel_clk_0_div[] = "ipu1_pclk0_div";
++ char pixel_clk_1_div[] = "ipu1_pclk1_div";
++ char *ipu_pixel_clk_sel[] = { "ipu1", "ipu1_di0", "ipu1_di1", };
++ char *pclk_sel;
++ struct clk *clk;
++ int ret;
++ int i;
++
++ pixel_clk_0[3] += pdata->id;
++ pixel_clk_1[3] += pdata->id;
++ pixel_clk_0_sel[3] += pdata->id;
++ pixel_clk_1_sel[3] += pdata->id;
++ pixel_clk_0_div[3] += pdata->id;
++ pixel_clk_1_div[3] += pdata->id;
++ for (i = 0; i < ARRAY_SIZE(ipu_pixel_clk_sel); i++) {
++ pclk_sel = ipu_pixel_clk_sel[i];
++ pclk_sel[3] += pdata->id;
++ }
++ dev_dbg(ipu->dev, "ipu_clk = %lu\n", clk_get_rate(ipu->ipu_clk));
++
++ clk = clk_register_mux_pix_clk(ipu->dev, pixel_clk_0_sel,
++ (const char **)ipu_pixel_clk_sel,
++ ARRAY_SIZE(ipu_pixel_clk_sel),
++ 0, pdata->id, 0, 0);
++ if (IS_ERR(clk)) {
++ dev_err(ipu->dev, "clk_register mux di0 failed");
++ return PTR_ERR(clk);
++ }
++ ipu->pixel_clk_sel[0] = clk;
++ clk = clk_register_mux_pix_clk(ipu->dev, pixel_clk_1_sel,
++ (const char **)ipu_pixel_clk_sel,
++ ARRAY_SIZE(ipu_pixel_clk_sel),
++ 0, pdata->id, 1, 0);
++ if (IS_ERR(clk)) {
++ dev_err(ipu->dev, "clk_register mux di1 failed");
++ return PTR_ERR(clk);
++ }
++ ipu->pixel_clk_sel[1] = clk;
++
++ clk = clk_register_div_pix_clk(ipu->dev, pixel_clk_0_div,
++ pixel_clk_0_sel, 0, pdata->id, 0, 0);
++ if (IS_ERR(clk)) {
++ dev_err(ipu->dev, "clk register di0 div failed");
++ return PTR_ERR(clk);
++ }
++ clk = clk_register_div_pix_clk(ipu->dev, pixel_clk_1_div,
++ pixel_clk_1_sel, CLK_SET_RATE_PARENT, pdata->id, 1, 0);
++ if (IS_ERR(clk)) {
++ dev_err(ipu->dev, "clk register di1 div failed");
++ return PTR_ERR(clk);
++ }
++
++ ipu->pixel_clk[0] = clk_register_gate_pix_clk(ipu->dev, pixel_clk_0,
++ pixel_clk_0_div, CLK_SET_RATE_PARENT,
++ pdata->id, 0, 0);
++ if (IS_ERR(ipu->pixel_clk[0])) {
++ dev_err(ipu->dev, "clk register di0 gate failed");
++ return PTR_ERR(ipu->pixel_clk[0]);
++ }
++ ipu->pixel_clk[1] = clk_register_gate_pix_clk(ipu->dev, pixel_clk_1,
++ pixel_clk_1_div, CLK_SET_RATE_PARENT,
++ pdata->id, 1, 0);
++ if (IS_ERR(ipu->pixel_clk[1])) {
++ dev_err(ipu->dev, "clk register di1 gate failed");
++ return PTR_ERR(ipu->pixel_clk[1]);
++ }
++
++ ret = clk_set_parent(ipu->pixel_clk_sel[0], ipu->ipu_clk);
++ if (ret) {
++ dev_err(ipu->dev, "clk set parent failed");
++ return ret;
++ }
++
++ ret = clk_set_parent(ipu->pixel_clk_sel[1], ipu->ipu_clk);
++ if (ret) {
++ dev_err(ipu->dev, "clk set parent failed");
++ return ret;
++ }
++
++ ipu->di_clk[0] = devm_clk_get(ipu->dev, "di0");
++ if (IS_ERR(ipu->di_clk[0])) {
++ dev_err(ipu->dev, "clk_get di0 failed");
++ return PTR_ERR(ipu->di_clk[0]);
++ }
++ ipu->di_clk[1] = devm_clk_get(ipu->dev, "di1");
++ if (IS_ERR(ipu->di_clk[1])) {
++ dev_err(ipu->dev, "clk_get di1 failed");
++ return PTR_ERR(ipu->di_clk[1]);
++ }
++
++ ipu->di_clk_sel[0] = devm_clk_get(ipu->dev, "di0_sel");
++ if (IS_ERR(ipu->di_clk_sel[0])) {
++ dev_err(ipu->dev, "clk_get di0_sel failed");
++ return PTR_ERR(ipu->di_clk_sel[0]);
++ }
++ ipu->di_clk_sel[1] = devm_clk_get(ipu->dev, "di1_sel");
++ if (IS_ERR(ipu->di_clk_sel[1])) {
++ dev_err(ipu->dev, "clk_get di1_sel failed");
++ return PTR_ERR(ipu->di_clk_sel[1]);
++ }
++
++ return 0;
++}
++
++static int ipu_mem_reset(struct ipu_soc *ipu)
++{
++ int timeout = 1000;
++
++ ipu_cm_write(ipu, 0x807FFFFF, IPU_MEM_RST);
++
++ while (ipu_cm_read(ipu, IPU_MEM_RST) & 0x80000000) {
++ if (!timeout--)
++ return -ETIME;
++ msleep(1);
++ }
++
++ return 0;
++}
++
++struct ipu_soc *ipu_get_soc(int id)
++{
++ if (id >= MXC_IPU_MAX_NUM)
++ return ERR_PTR(-ENODEV);
++ else if (!ipu_array[id].online)
++ return ERR_PTR(-ENODEV);
++ else
++ return &(ipu_array[id]);
++}
++EXPORT_SYMBOL_GPL(ipu_get_soc);
++
++void _ipu_get(struct ipu_soc *ipu)
++{
++ int ret;
++
++ ret = clk_enable(ipu->ipu_clk);
++ if (ret < 0)
++ BUG();
++}
++
++void _ipu_put(struct ipu_soc *ipu)
++{
++ clk_disable(ipu->ipu_clk);
++}
++
++void ipu_disable_hsp_clk(struct ipu_soc *ipu)
++{
++ _ipu_put(ipu);
++}
++EXPORT_SYMBOL(ipu_disable_hsp_clk);
++
++static struct platform_device_id imx_ipu_type[] = {
++ {
++ .name = "ipu-imx6q",
++ .driver_data = IPU_V3H,
++ }, {
++ /* sentinel */
++ }
++};
++MODULE_DEVICE_TABLE(platform, imx_ipu_type);
++
++static const struct of_device_id imx_ipuv3_dt_ids[] = {
++ { .compatible = "fsl,imx6q-ipu", .data = &imx_ipu_type[IMX6Q_IPU], },
++ { /* sentinel */ }
++};
++MODULE_DEVICE_TABLE(of, imx_ipuv3_dt_ids);
++
++/*!
++ * This function is called by the driver framework to initialize the IPU
++ * hardware.
++ *
++ * @param dev The device structure for the IPU passed in by the
++ * driver framework.
++ *
++ * @return Returns 0 on success or negative error code on error
++ */
++static int ipu_probe(struct platform_device *pdev)
++{
++ struct ipu_soc *ipu;
++ struct resource *res;
++ unsigned long ipu_base;
++ const struct of_device_id *of_id =
++ of_match_device(imx_ipuv3_dt_ids, &pdev->dev);
++ struct ipu_pltfm_data *pltfm_data;
++ int ret = 0;
++ u32 bypass_reset;
++
++ dev_dbg(&pdev->dev, "<%s>\n", __func__);
++
++ pltfm_data = devm_kzalloc(&pdev->dev, sizeof(struct ipu_pltfm_data),
++ GFP_KERNEL);
++ if (!pltfm_data)
++ return -ENOMEM;
++
++ ret = of_property_read_u32(pdev->dev.of_node,
++ "bypass_reset", &bypass_reset);
++ if (ret < 0) {
++ dev_dbg(&pdev->dev, "can not get bypass_reset\n");
++ return ret;
++ }
++ pltfm_data->bypass_reset = (bool)bypass_reset;
++
++ pltfm_data->id = of_alias_get_id(pdev->dev.of_node, "ipu");
++ if (pltfm_data->id < 0) {
++ dev_dbg(&pdev->dev, "can not get alias id\n");
++ return pltfm_data->id;
++ }
++
++ if (of_id)
++ pdev->id_entry = of_id->data;
++ pltfm_data->devtype = pdev->id_entry->driver_data;
++ g_ipu_hw_rev = pltfm_data->devtype;
++
++ ipu = &ipu_array[pltfm_data->id];
++ memset(ipu, 0, sizeof(struct ipu_soc));
++ ipu->dev = &pdev->dev;
++ ipu->pdata = pltfm_data;
++ dev_dbg(ipu->dev, "IPU rev:%d\n", g_ipu_hw_rev);
++ spin_lock_init(&ipu->int_reg_spin_lock);
++ spin_lock_init(&ipu->rdy_reg_spin_lock);
++ mutex_init(&ipu->mutex_lock);
++
++ ipu->irq_sync = platform_get_irq(pdev, 0);
++ ipu->irq_err = platform_get_irq(pdev, 1);
++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++
++ if (!res || ipu->irq_sync < 0 || ipu->irq_err < 0) {
++ dev_err(&pdev->dev, "can't get device resources\n");
++ return -ENODEV;
++ }
++
++ if (!devm_request_mem_region(&pdev->dev, res->start,
++ resource_size(res), pdev->name))
++ return -EBUSY;
++
++ ret = devm_request_irq(&pdev->dev, ipu->irq_sync,
++ ipu_sync_irq_handler, 0, pdev->name, ipu);
++ if (ret) {
++ dev_err(ipu->dev, "request SYNC interrupt failed\n");
++ return ret;
++ }
++ ret = devm_request_irq(&pdev->dev, ipu->irq_err,
++ ipu_err_irq_handler, 0, pdev->name, ipu);
++ if (ret) {
++ dev_err(ipu->dev, "request ERR interrupt failed\n");
++ return ret;
++ }
++
++ ipu_base = res->start;
++ /* base fixup */
++ if (g_ipu_hw_rev == IPU_V3H) /* IPUv3H */
++ ipu_base += IPUV3H_REG_BASE;
++ else if (g_ipu_hw_rev == IPU_V3M) /* IPUv3M */
++ ipu_base += IPUV3M_REG_BASE;
++ else /* IPUv3D, v3E, v3EX */
++ ipu_base += IPUV3DEX_REG_BASE;
++
++ ipu->cm_reg = devm_ioremap(&pdev->dev,
++ ipu_base + IPU_CM_REG_BASE, PAGE_SIZE);
++ ipu->ic_reg = devm_ioremap(&pdev->dev,
++ ipu_base + IPU_IC_REG_BASE, PAGE_SIZE);
++ ipu->idmac_reg = devm_ioremap(&pdev->dev,
++ ipu_base + IPU_IDMAC_REG_BASE, PAGE_SIZE);
++ /* DP Registers are accessed thru the SRM */
++ ipu->dp_reg = devm_ioremap(&pdev->dev,
++ ipu_base + IPU_SRM_REG_BASE, PAGE_SIZE);
++ ipu->dc_reg = devm_ioremap(&pdev->dev,
++ ipu_base + IPU_DC_REG_BASE, PAGE_SIZE);
++ ipu->dmfc_reg = devm_ioremap(&pdev->dev,
++ ipu_base + IPU_DMFC_REG_BASE, PAGE_SIZE);
++ ipu->di_reg[0] = devm_ioremap(&pdev->dev,
++ ipu_base + IPU_DI0_REG_BASE, PAGE_SIZE);
++ ipu->di_reg[1] = devm_ioremap(&pdev->dev,
++ ipu_base + IPU_DI1_REG_BASE, PAGE_SIZE);
++ ipu->smfc_reg = devm_ioremap(&pdev->dev,
++ ipu_base + IPU_SMFC_REG_BASE, PAGE_SIZE);
++ ipu->csi_reg[0] = devm_ioremap(&pdev->dev,
++ ipu_base + IPU_CSI0_REG_BASE, PAGE_SIZE);
++ ipu->csi_reg[1] = devm_ioremap(&pdev->dev,
++ ipu_base + IPU_CSI1_REG_BASE, PAGE_SIZE);
++ ipu->cpmem_base = devm_ioremap(&pdev->dev,
++ ipu_base + IPU_CPMEM_REG_BASE, SZ_128K);
++ ipu->tpmem_base = devm_ioremap(&pdev->dev,
++ ipu_base + IPU_TPM_REG_BASE, SZ_64K);
++ ipu->dc_tmpl_reg = devm_ioremap(&pdev->dev,
++ ipu_base + IPU_DC_TMPL_REG_BASE, SZ_128K);
++ ipu->vdi_reg = devm_ioremap(&pdev->dev,
++ ipu_base + IPU_VDI_REG_BASE, PAGE_SIZE);
++ ipu->disp_base[1] = devm_ioremap(&pdev->dev,
++ ipu_base + IPU_DISP1_BASE, SZ_4K);
++ if (!ipu->cm_reg || !ipu->ic_reg || !ipu->idmac_reg ||
++ !ipu->dp_reg || !ipu->dc_reg || !ipu->dmfc_reg ||
++ !ipu->di_reg[0] || !ipu->di_reg[1] || !ipu->smfc_reg ||
++ !ipu->csi_reg[0] || !ipu->csi_reg[1] || !ipu->cpmem_base ||
++ !ipu->tpmem_base || !ipu->dc_tmpl_reg || !ipu->disp_base[1]
++ || !ipu->vdi_reg)
++ return -ENOMEM;
++
++ dev_dbg(ipu->dev, "IPU CM Regs = %p\n", ipu->cm_reg);
++ dev_dbg(ipu->dev, "IPU IC Regs = %p\n", ipu->ic_reg);
++ dev_dbg(ipu->dev, "IPU IDMAC Regs = %p\n", ipu->idmac_reg);
++ dev_dbg(ipu->dev, "IPU DP Regs = %p\n", ipu->dp_reg);
++ dev_dbg(ipu->dev, "IPU DC Regs = %p\n", ipu->dc_reg);
++ dev_dbg(ipu->dev, "IPU DMFC Regs = %p\n", ipu->dmfc_reg);
++ dev_dbg(ipu->dev, "IPU DI0 Regs = %p\n", ipu->di_reg[0]);
++ dev_dbg(ipu->dev, "IPU DI1 Regs = %p\n", ipu->di_reg[1]);
++ dev_dbg(ipu->dev, "IPU SMFC Regs = %p\n", ipu->smfc_reg);
++ dev_dbg(ipu->dev, "IPU CSI0 Regs = %p\n", ipu->csi_reg[0]);
++ dev_dbg(ipu->dev, "IPU CSI1 Regs = %p\n", ipu->csi_reg[1]);
++ dev_dbg(ipu->dev, "IPU CPMem = %p\n", ipu->cpmem_base);
++ dev_dbg(ipu->dev, "IPU TPMem = %p\n", ipu->tpmem_base);
++ dev_dbg(ipu->dev, "IPU DC Template Mem = %p\n", ipu->dc_tmpl_reg);
++ dev_dbg(ipu->dev, "IPU Display Region 1 Mem = %p\n", ipu->disp_base[1]);
++ dev_dbg(ipu->dev, "IPU VDI Regs = %p\n", ipu->vdi_reg);
++
++ ipu->ipu_clk = devm_clk_get(ipu->dev, "bus");
++ if (IS_ERR(ipu->ipu_clk)) {
++ dev_err(ipu->dev, "clk_get ipu failed");
++ return PTR_ERR(ipu->ipu_clk);
++ }
++
++ /* ipu_clk is always prepared */
++ ret = clk_prepare_enable(ipu->ipu_clk);
++ if (ret < 0) {
++ dev_err(ipu->dev, "ipu clk enable failed\n");
++ return ret;
++ }
++
++ ipu->online = true;
++
++ platform_set_drvdata(pdev, ipu);
++
++ if (!pltfm_data->bypass_reset) {
++ ret = device_reset(&pdev->dev);
++ if (ret) {
++ dev_err(&pdev->dev, "failed to reset: %d\n", ret);
++ return ret;
++ }
++
++ ipu_mem_reset(ipu);
++
++ ipu_disp_init(ipu);
++
++ /* Set MCU_T to divide MCU access window into 2 */
++ ipu_cm_write(ipu, 0x00400000L | (IPU_MCU_T_DEFAULT << 18),
++ IPU_DISP_GEN);
++ }
++
++ /* setup ipu clk tree after ipu reset */
++ ret = ipu_clk_setup_enable(ipu, pltfm_data);
++ if (ret < 0) {
++ dev_err(ipu->dev, "ipu clk setup failed\n");
++ ipu->online = false;
++ return ret;
++ }
++
++ /* Set sync refresh channels and CSI->mem channel as high priority */
++ ipu_idmac_write(ipu, 0x18800001L, IDMAC_CHA_PRI(0));
++
++ /* Enable error interrupts by default */
++ ipu_cm_write(ipu, 0xFFFFFFFF, IPU_INT_CTRL(5));
++ ipu_cm_write(ipu, 0xFFFFFFFF, IPU_INT_CTRL(6));
++ ipu_cm_write(ipu, 0xFFFFFFFF, IPU_INT_CTRL(9));
++ ipu_cm_write(ipu, 0xFFFFFFFF, IPU_INT_CTRL(10));
++
++ if (!pltfm_data->bypass_reset)
++ clk_disable(ipu->ipu_clk);
++
++ register_ipu_device(ipu, ipu->pdata->id);
++
++ pm_runtime_enable(&pdev->dev);
++
++ return ret;
++}
++
++int ipu_remove(struct platform_device *pdev)
++{
++ struct ipu_soc *ipu = platform_get_drvdata(pdev);
++
++ unregister_ipu_device(ipu, ipu->pdata->id);
++
++ clk_put(ipu->ipu_clk);
++
++ return 0;
++}
++
++void ipu_dump_registers(struct ipu_soc *ipu)
++{
++ dev_dbg(ipu->dev, "IPU_CONF = \t0x%08X\n", ipu_cm_read(ipu, IPU_CONF));
++ dev_dbg(ipu->dev, "IDMAC_CONF = \t0x%08X\n", ipu_idmac_read(ipu, IDMAC_CONF));
++ dev_dbg(ipu->dev, "IDMAC_CHA_EN1 = \t0x%08X\n",
++ ipu_idmac_read(ipu, IDMAC_CHA_EN(0)));
++ dev_dbg(ipu->dev, "IDMAC_CHA_EN2 = \t0x%08X\n",
++ ipu_idmac_read(ipu, IDMAC_CHA_EN(32)));
++ dev_dbg(ipu->dev, "IDMAC_CHA_PRI1 = \t0x%08X\n",
++ ipu_idmac_read(ipu, IDMAC_CHA_PRI(0)));
++ dev_dbg(ipu->dev, "IDMAC_CHA_PRI2 = \t0x%08X\n",
++ ipu_idmac_read(ipu, IDMAC_CHA_PRI(32)));
++ dev_dbg(ipu->dev, "IDMAC_BAND_EN1 = \t0x%08X\n",
++ ipu_idmac_read(ipu, IDMAC_BAND_EN(0)));
++ dev_dbg(ipu->dev, "IDMAC_BAND_EN2 = \t0x%08X\n",
++ ipu_idmac_read(ipu, IDMAC_BAND_EN(32)));
++ dev_dbg(ipu->dev, "IPU_CHA_DB_MODE_SEL0 = \t0x%08X\n",
++ ipu_cm_read(ipu, IPU_CHA_DB_MODE_SEL(0)));
++ dev_dbg(ipu->dev, "IPU_CHA_DB_MODE_SEL1 = \t0x%08X\n",
++ ipu_cm_read(ipu, IPU_CHA_DB_MODE_SEL(32)));
++ if (g_ipu_hw_rev >= IPU_V3DEX) {
++ dev_dbg(ipu->dev, "IPU_CHA_TRB_MODE_SEL0 = \t0x%08X\n",
++ ipu_cm_read(ipu, IPU_CHA_TRB_MODE_SEL(0)));
++ dev_dbg(ipu->dev, "IPU_CHA_TRB_MODE_SEL1 = \t0x%08X\n",
++ ipu_cm_read(ipu, IPU_CHA_TRB_MODE_SEL(32)));
++ }
++ dev_dbg(ipu->dev, "DMFC_WR_CHAN = \t0x%08X\n",
++ ipu_dmfc_read(ipu, DMFC_WR_CHAN));
++ dev_dbg(ipu->dev, "DMFC_WR_CHAN_DEF = \t0x%08X\n",
++ ipu_dmfc_read(ipu, DMFC_WR_CHAN_DEF));
++ dev_dbg(ipu->dev, "DMFC_DP_CHAN = \t0x%08X\n",
++ ipu_dmfc_read(ipu, DMFC_DP_CHAN));
++ dev_dbg(ipu->dev, "DMFC_DP_CHAN_DEF = \t0x%08X\n",
++ ipu_dmfc_read(ipu, DMFC_DP_CHAN_DEF));
++ dev_dbg(ipu->dev, "DMFC_IC_CTRL = \t0x%08X\n",
++ ipu_dmfc_read(ipu, DMFC_IC_CTRL));
++ dev_dbg(ipu->dev, "IPU_FS_PROC_FLOW1 = \t0x%08X\n",
++ ipu_cm_read(ipu, IPU_FS_PROC_FLOW1));
++ dev_dbg(ipu->dev, "IPU_FS_PROC_FLOW2 = \t0x%08X\n",
++ ipu_cm_read(ipu, IPU_FS_PROC_FLOW2));
++ dev_dbg(ipu->dev, "IPU_FS_PROC_FLOW3 = \t0x%08X\n",
++ ipu_cm_read(ipu, IPU_FS_PROC_FLOW3));
++ dev_dbg(ipu->dev, "IPU_FS_DISP_FLOW1 = \t0x%08X\n",
++ ipu_cm_read(ipu, IPU_FS_DISP_FLOW1));
++ dev_dbg(ipu->dev, "IPU_VDIC_VDI_FSIZE = \t0x%08X\n",
++ ipu_vdi_read(ipu, VDI_FSIZE));
++ dev_dbg(ipu->dev, "IPU_VDIC_VDI_C = \t0x%08X\n",
++ ipu_vdi_read(ipu, VDI_C));
++ dev_dbg(ipu->dev, "IPU_IC_CONF = \t0x%08X\n",
++ ipu_ic_read(ipu, IC_CONF));
++}
++
++/*!
++ * This function is called to initialize a logical IPU channel.
++ *
++ * @param ipu ipu handler
++ * @param channel Input parameter for the logical channel ID to init.
++ *
++ * @param params Input parameter containing union of channel
++ * initialization parameters.
++ *
++ * @return Returns 0 on success or negative error code on fail
++ */
++int32_t ipu_init_channel(struct ipu_soc *ipu, ipu_channel_t channel, ipu_channel_params_t *params)
++{
++ int ret = 0;
++ bool bad_pixfmt;
++ uint32_t ipu_conf, reg, in_g_pixel_fmt, sec_dma;
++
++ dev_dbg(ipu->dev, "init channel = %d\n", IPU_CHAN_ID(channel));
++
++ ret = pm_runtime_get_sync(ipu->dev);
++ if (ret < 0) {
++ dev_err(ipu->dev, "ch = %d, pm_runtime_get failed:%d!\n",
++ IPU_CHAN_ID(channel), ret);
++ dump_stack();
++ return ret;
++ }
++ /*
++ * Here, ret could be 1 if the device's runtime PM status was
++ * already 'active', so clear it to be 0.
++ */
++ ret = 0;
++
++ _ipu_get(ipu);
++
++ mutex_lock(&ipu->mutex_lock);
++
++ /* Re-enable error interrupts every time a channel is initialized */
++ ipu_cm_write(ipu, 0xFFFFFFFF, IPU_INT_CTRL(5));
++ ipu_cm_write(ipu, 0xFFFFFFFF, IPU_INT_CTRL(6));
++ ipu_cm_write(ipu, 0xFFFFFFFF, IPU_INT_CTRL(9));
++ ipu_cm_write(ipu, 0xFFFFFFFF, IPU_INT_CTRL(10));
++
++ if (ipu->channel_init_mask & (1L << IPU_CHAN_ID(channel))) {
++ dev_warn(ipu->dev, "Warning: channel already initialized %d\n",
++ IPU_CHAN_ID(channel));
++ }
++
++ ipu_conf = ipu_cm_read(ipu, IPU_CONF);
++
++ switch (channel) {
++ case CSI_MEM0:
++ case CSI_MEM1:
++ case CSI_MEM2:
++ case CSI_MEM3:
++ if (params->csi_mem.csi > 1) {
++ ret = -EINVAL;
++ goto err;
++ }
++
++ if (params->csi_mem.interlaced)
++ ipu->chan_is_interlaced[channel_2_dma(channel,
++ IPU_OUTPUT_BUFFER)] = true;
++ else
++ ipu->chan_is_interlaced[channel_2_dma(channel,
++ IPU_OUTPUT_BUFFER)] = false;
++
++ ipu->smfc_use_count++;
++ ipu->csi_channel[params->csi_mem.csi] = channel;
++
++ /*SMFC setting*/
++ if (params->csi_mem.mipi_en) {
++ ipu_conf |= (1 << (IPU_CONF_CSI0_DATA_SOURCE_OFFSET +
++ params->csi_mem.csi));
++ _ipu_smfc_init(ipu, channel, params->csi_mem.mipi_vc,
++ params->csi_mem.csi);
++ _ipu_csi_set_mipi_di(ipu, params->csi_mem.mipi_vc,
++ params->csi_mem.mipi_id, params->csi_mem.csi);
++ } else {
++ ipu_conf &= ~(1 << (IPU_CONF_CSI0_DATA_SOURCE_OFFSET +
++ params->csi_mem.csi));
++ _ipu_smfc_init(ipu, channel, 0, params->csi_mem.csi);
++ }
++
++ /*CSI data (include compander) dest*/
++ _ipu_csi_init(ipu, channel, params->csi_mem.csi);
++ break;
++ case CSI_PRP_ENC_MEM:
++ if (params->csi_prp_enc_mem.csi > 1) {
++ ret = -EINVAL;
++ goto err;
++ }
++ if ((ipu->using_ic_dirct_ch == MEM_VDI_PRP_VF_MEM) ||
++ (ipu->using_ic_dirct_ch == MEM_VDI_MEM)) {
++ ret = -EINVAL;
++ goto err;
++ }
++ ipu->using_ic_dirct_ch = CSI_PRP_ENC_MEM;
++
++ ipu->ic_use_count++;
++ ipu->csi_channel[params->csi_prp_enc_mem.csi] = channel;
++
++ if (params->csi_prp_enc_mem.mipi_en) {
++ ipu_conf |= (1 << (IPU_CONF_CSI0_DATA_SOURCE_OFFSET +
++ params->csi_prp_enc_mem.csi));
++ _ipu_csi_set_mipi_di(ipu,
++ params->csi_prp_enc_mem.mipi_vc,
++ params->csi_prp_enc_mem.mipi_id,
++ params->csi_prp_enc_mem.csi);
++ } else
++ ipu_conf &= ~(1 << (IPU_CONF_CSI0_DATA_SOURCE_OFFSET +
++ params->csi_prp_enc_mem.csi));
++
++ /*CSI0/1 feed into IC*/
++ ipu_conf &= ~IPU_CONF_IC_INPUT;
++ if (params->csi_prp_enc_mem.csi)
++ ipu_conf |= IPU_CONF_CSI_SEL;
++ else
++ ipu_conf &= ~IPU_CONF_CSI_SEL;
++
++ /*PRP skip buffer in memory, only valid when RWS_EN is true*/
++ reg = ipu_cm_read(ipu, IPU_FS_PROC_FLOW1);
++ ipu_cm_write(ipu, reg & ~FS_ENC_IN_VALID, IPU_FS_PROC_FLOW1);
++
++ /*CSI data (include compander) dest*/
++ _ipu_csi_init(ipu, channel, params->csi_prp_enc_mem.csi);
++ _ipu_ic_init_prpenc(ipu, params, true);
++ break;
++ case CSI_PRP_VF_MEM:
++ if (params->csi_prp_vf_mem.csi > 1) {
++ ret = -EINVAL;
++ goto err;
++ }
++ if ((ipu->using_ic_dirct_ch == MEM_VDI_PRP_VF_MEM) ||
++ (ipu->using_ic_dirct_ch == MEM_VDI_MEM)) {
++ ret = -EINVAL;
++ goto err;
++ }
++ ipu->using_ic_dirct_ch = CSI_PRP_VF_MEM;
++
++ ipu->ic_use_count++;
++ ipu->csi_channel[params->csi_prp_vf_mem.csi] = channel;
++
++ if (params->csi_prp_vf_mem.mipi_en) {
++ ipu_conf |= (1 << (IPU_CONF_CSI0_DATA_SOURCE_OFFSET +
++ params->csi_prp_vf_mem.csi));
++ _ipu_csi_set_mipi_di(ipu,
++ params->csi_prp_vf_mem.mipi_vc,
++ params->csi_prp_vf_mem.mipi_id,
++ params->csi_prp_vf_mem.csi);
++ } else
++ ipu_conf &= ~(1 << (IPU_CONF_CSI0_DATA_SOURCE_OFFSET +
++ params->csi_prp_vf_mem.csi));
++
++ /*CSI0/1 feed into IC*/
++ ipu_conf &= ~IPU_CONF_IC_INPUT;
++ if (params->csi_prp_vf_mem.csi)
++ ipu_conf |= IPU_CONF_CSI_SEL;
++ else
++ ipu_conf &= ~IPU_CONF_CSI_SEL;
++
++ /*PRP skip buffer in memory, only valid when RWS_EN is true*/
++ reg = ipu_cm_read(ipu, IPU_FS_PROC_FLOW1);
++ ipu_cm_write(ipu, reg & ~FS_VF_IN_VALID, IPU_FS_PROC_FLOW1);
++
++ /*CSI data (include compander) dest*/
++ _ipu_csi_init(ipu, channel, params->csi_prp_vf_mem.csi);
++ _ipu_ic_init_prpvf(ipu, params, true);
++ break;
++ case MEM_PRP_VF_MEM:
++ if (params->mem_prp_vf_mem.graphics_combine_en) {
++ sec_dma = channel_2_dma(channel, IPU_GRAPH_IN_BUFFER);
++ in_g_pixel_fmt = params->mem_prp_vf_mem.in_g_pixel_fmt;
++ bad_pixfmt =
++ _ipu_ch_param_bad_alpha_pos(in_g_pixel_fmt);
++
++ if (params->mem_prp_vf_mem.alpha_chan_en) {
++ if (bad_pixfmt) {
++ dev_err(ipu->dev, "bad pixel format "
++ "for graphics plane from "
++ "ch%d\n", sec_dma);
++ ret = -EINVAL;
++ goto err;
++ }
++ ipu->thrd_chan_en[IPU_CHAN_ID(channel)] = true;
++ }
++ ipu->sec_chan_en[IPU_CHAN_ID(channel)] = true;
++ }
++
++ reg = ipu_cm_read(ipu, IPU_FS_PROC_FLOW1);
++ ipu_cm_write(ipu, reg | FS_VF_IN_VALID, IPU_FS_PROC_FLOW1);
++
++ _ipu_ic_init_prpvf(ipu, params, false);
++ ipu->ic_use_count++;
++ break;
++ case MEM_VDI_PRP_VF_MEM:
++ if ((ipu->using_ic_dirct_ch == CSI_PRP_VF_MEM) ||
++ (ipu->using_ic_dirct_ch == MEM_VDI_MEM) ||
++ (ipu->using_ic_dirct_ch == CSI_PRP_ENC_MEM)) {
++ ret = -EINVAL;
++ goto err;
++ }
++ ipu->using_ic_dirct_ch = MEM_VDI_PRP_VF_MEM;
++ ipu->ic_use_count++;
++ ipu->vdi_use_count++;
++ reg = ipu_cm_read(ipu, IPU_FS_PROC_FLOW1);
++ reg &= ~FS_VDI_SRC_SEL_MASK;
++ ipu_cm_write(ipu, reg , IPU_FS_PROC_FLOW1);
++
++ if (params->mem_prp_vf_mem.graphics_combine_en)
++ ipu->sec_chan_en[IPU_CHAN_ID(channel)] = true;
++ _ipu_ic_init_prpvf(ipu, params, false);
++ _ipu_vdi_init(ipu, channel, params);
++ break;
++ case MEM_VDI_PRP_VF_MEM_P:
++ case MEM_VDI_PRP_VF_MEM_N:
++ case MEM_VDI_MEM_P:
++ case MEM_VDI_MEM_N:
++ _ipu_vdi_init(ipu, channel, params);
++ break;
++ case MEM_VDI_MEM:
++ if ((ipu->using_ic_dirct_ch == CSI_PRP_VF_MEM) ||
++ (ipu->using_ic_dirct_ch == MEM_VDI_PRP_VF_MEM) ||
++ (ipu->using_ic_dirct_ch == CSI_PRP_ENC_MEM)) {
++ ret = -EINVAL;
++ goto err;
++ }
++ ipu->using_ic_dirct_ch = MEM_VDI_MEM;
++ ipu->ic_use_count++;
++ ipu->vdi_use_count++;
++ _ipu_vdi_init(ipu, channel, params);
++ break;
++ case MEM_ROT_VF_MEM:
++ ipu->ic_use_count++;
++ ipu->rot_use_count++;
++ _ipu_ic_init_rotate_vf(ipu, params);
++ break;
++ case MEM_PRP_ENC_MEM:
++ ipu->ic_use_count++;
++ reg = ipu_cm_read(ipu, IPU_FS_PROC_FLOW1);
++ ipu_cm_write(ipu, reg | FS_ENC_IN_VALID, IPU_FS_PROC_FLOW1);
++ _ipu_ic_init_prpenc(ipu, params, false);
++ break;
++ case MEM_ROT_ENC_MEM:
++ ipu->ic_use_count++;
++ ipu->rot_use_count++;
++ _ipu_ic_init_rotate_enc(ipu, params);
++ break;
++ case MEM_PP_MEM:
++ if (params->mem_pp_mem.graphics_combine_en) {
++ sec_dma = channel_2_dma(channel, IPU_GRAPH_IN_BUFFER);
++ in_g_pixel_fmt = params->mem_pp_mem.in_g_pixel_fmt;
++ bad_pixfmt =
++ _ipu_ch_param_bad_alpha_pos(in_g_pixel_fmt);
++
++ if (params->mem_pp_mem.alpha_chan_en) {
++ if (bad_pixfmt) {
++ dev_err(ipu->dev, "bad pixel format "
++ "for graphics plane from "
++ "ch%d\n", sec_dma);
++ ret = -EINVAL;
++ goto err;
++ }
++ ipu->thrd_chan_en[IPU_CHAN_ID(channel)] = true;
++ }
++
++ ipu->sec_chan_en[IPU_CHAN_ID(channel)] = true;
++ }
++
++ _ipu_ic_init_pp(ipu, params);
++ ipu->ic_use_count++;
++ break;
++ case MEM_ROT_PP_MEM:
++ _ipu_ic_init_rotate_pp(ipu, params);
++ ipu->ic_use_count++;
++ ipu->rot_use_count++;
++ break;
++ case MEM_DC_SYNC:
++ if (params->mem_dc_sync.di > 1) {
++ ret = -EINVAL;
++ goto err;
++ }
++
++ ipu->dc_di_assignment[1] = params->mem_dc_sync.di;
++ _ipu_dc_init(ipu, 1, params->mem_dc_sync.di,
++ params->mem_dc_sync.interlaced,
++ params->mem_dc_sync.out_pixel_fmt);
++ ipu->di_use_count[params->mem_dc_sync.di]++;
++ ipu->dc_use_count++;
++ ipu->dmfc_use_count++;
++ break;
++ case MEM_BG_SYNC:
++ if (params->mem_dp_bg_sync.di > 1) {
++ ret = -EINVAL;
++ goto err;
++ }
++
++ if (params->mem_dp_bg_sync.alpha_chan_en)
++ ipu->thrd_chan_en[IPU_CHAN_ID(channel)] = true;
++
++ ipu->dc_di_assignment[5] = params->mem_dp_bg_sync.di;
++ _ipu_dp_init(ipu, channel, params->mem_dp_bg_sync.in_pixel_fmt,
++ params->mem_dp_bg_sync.out_pixel_fmt);
++ _ipu_dc_init(ipu, 5, params->mem_dp_bg_sync.di,
++ params->mem_dp_bg_sync.interlaced,
++ params->mem_dp_bg_sync.out_pixel_fmt);
++ ipu->di_use_count[params->mem_dp_bg_sync.di]++;
++ ipu->dc_use_count++;
++ ipu->dp_use_count++;
++ ipu->dmfc_use_count++;
++ break;
++ case MEM_FG_SYNC:
++ _ipu_dp_init(ipu, channel, params->mem_dp_fg_sync.in_pixel_fmt,
++ params->mem_dp_fg_sync.out_pixel_fmt);
++
++ if (params->mem_dp_fg_sync.alpha_chan_en)
++ ipu->thrd_chan_en[IPU_CHAN_ID(channel)] = true;
++
++ ipu->dc_use_count++;
++ ipu->dp_use_count++;
++ ipu->dmfc_use_count++;
++ break;
++ case DIRECT_ASYNC0:
++ if (params->direct_async.di > 1) {
++ ret = -EINVAL;
++ goto err;
++ }
++
++ ipu->dc_di_assignment[8] = params->direct_async.di;
++ _ipu_dc_init(ipu, 8, params->direct_async.di, false, IPU_PIX_FMT_GENERIC);
++ ipu->di_use_count[params->direct_async.di]++;
++ ipu->dc_use_count++;
++ break;
++ case DIRECT_ASYNC1:
++ if (params->direct_async.di > 1) {
++ ret = -EINVAL;
++ goto err;
++ }
++
++ ipu->dc_di_assignment[9] = params->direct_async.di;
++ _ipu_dc_init(ipu, 9, params->direct_async.di, false, IPU_PIX_FMT_GENERIC);
++ ipu->di_use_count[params->direct_async.di]++;
++ ipu->dc_use_count++;
++ break;
++ default:
++ dev_err(ipu->dev, "Missing channel initialization\n");
++ break;
++ }
++
++ ipu->channel_init_mask |= 1L << IPU_CHAN_ID(channel);
++
++ ipu_cm_write(ipu, ipu_conf, IPU_CONF);
++
++err:
++ mutex_unlock(&ipu->mutex_lock);
++ return ret;
++}
++EXPORT_SYMBOL(ipu_init_channel);
++
++/*!
++ * This function is called to uninitialize a logical IPU channel.
++ *
++ * @param ipu ipu handler
++ * @param channel Input parameter for the logical channel ID to uninit.
++ */
++void ipu_uninit_channel(struct ipu_soc *ipu, ipu_channel_t channel)
++{
++ uint32_t reg;
++ uint32_t in_dma, out_dma = 0;
++ uint32_t ipu_conf;
++ uint32_t dc_chan = 0;
++ int ret;
++
++ mutex_lock(&ipu->mutex_lock);
++
++ if ((ipu->channel_init_mask & (1L << IPU_CHAN_ID(channel))) == 0) {
++ dev_dbg(ipu->dev, "Channel already uninitialized %d\n",
++ IPU_CHAN_ID(channel));
++ mutex_unlock(&ipu->mutex_lock);
++ return;
++ }
++
++ /* Make sure channel is disabled */
++ /* Get input and output dma channels */
++ in_dma = channel_2_dma(channel, IPU_VIDEO_IN_BUFFER);
++ out_dma = channel_2_dma(channel, IPU_OUTPUT_BUFFER);
++
++ if (idma_is_set(ipu, IDMAC_CHA_EN, in_dma) ||
++ idma_is_set(ipu, IDMAC_CHA_EN, out_dma)) {
++ dev_err(ipu->dev,
++ "Channel %d is not disabled, disable first\n",
++ IPU_CHAN_ID(channel));
++ mutex_unlock(&ipu->mutex_lock);
++ return;
++ }
++
++ ipu_conf = ipu_cm_read(ipu, IPU_CONF);
++
++ /* Reset the double buffer */
++ reg = ipu_cm_read(ipu, IPU_CHA_DB_MODE_SEL(in_dma));
++ ipu_cm_write(ipu, reg & ~idma_mask(in_dma), IPU_CHA_DB_MODE_SEL(in_dma));
++ reg = ipu_cm_read(ipu, IPU_CHA_DB_MODE_SEL(out_dma));
++ ipu_cm_write(ipu, reg & ~idma_mask(out_dma), IPU_CHA_DB_MODE_SEL(out_dma));
++
++ /* Reset the triple buffer */
++ reg = ipu_cm_read(ipu, IPU_CHA_TRB_MODE_SEL(in_dma));
++ ipu_cm_write(ipu, reg & ~idma_mask(in_dma), IPU_CHA_TRB_MODE_SEL(in_dma));
++ reg = ipu_cm_read(ipu, IPU_CHA_TRB_MODE_SEL(out_dma));
++ ipu_cm_write(ipu, reg & ~idma_mask(out_dma), IPU_CHA_TRB_MODE_SEL(out_dma));
++
++ if (_ipu_is_ic_chan(in_dma) || _ipu_is_dp_graphic_chan(in_dma)) {
++ ipu->sec_chan_en[IPU_CHAN_ID(channel)] = false;
++ ipu->thrd_chan_en[IPU_CHAN_ID(channel)] = false;
++ }
++
++ switch (channel) {
++ case CSI_MEM0:
++ case CSI_MEM1:
++ case CSI_MEM2:
++ case CSI_MEM3:
++ ipu->smfc_use_count--;
++ if (ipu->csi_channel[0] == channel) {
++ ipu->csi_channel[0] = CHAN_NONE;
++ } else if (ipu->csi_channel[1] == channel) {
++ ipu->csi_channel[1] = CHAN_NONE;
++ }
++ break;
++ case CSI_PRP_ENC_MEM:
++ ipu->ic_use_count--;
++ if (ipu->using_ic_dirct_ch == CSI_PRP_ENC_MEM)
++ ipu->using_ic_dirct_ch = 0;
++ _ipu_ic_uninit_prpenc(ipu);
++ if (ipu->csi_channel[0] == channel) {
++ ipu->csi_channel[0] = CHAN_NONE;
++ } else if (ipu->csi_channel[1] == channel) {
++ ipu->csi_channel[1] = CHAN_NONE;
++ }
++ break;
++ case CSI_PRP_VF_MEM:
++ ipu->ic_use_count--;
++ if (ipu->using_ic_dirct_ch == CSI_PRP_VF_MEM)
++ ipu->using_ic_dirct_ch = 0;
++ _ipu_ic_uninit_prpvf(ipu);
++ if (ipu->csi_channel[0] == channel) {
++ ipu->csi_channel[0] = CHAN_NONE;
++ } else if (ipu->csi_channel[1] == channel) {
++ ipu->csi_channel[1] = CHAN_NONE;
++ }
++ break;
++ case MEM_PRP_VF_MEM:
++ ipu->ic_use_count--;
++ _ipu_ic_uninit_prpvf(ipu);
++ reg = ipu_cm_read(ipu, IPU_FS_PROC_FLOW1);
++ ipu_cm_write(ipu, reg & ~FS_VF_IN_VALID, IPU_FS_PROC_FLOW1);
++ break;
++ case MEM_VDI_PRP_VF_MEM:
++ ipu->ic_use_count--;
++ ipu->vdi_use_count--;
++ if (ipu->using_ic_dirct_ch == MEM_VDI_PRP_VF_MEM)
++ ipu->using_ic_dirct_ch = 0;
++ _ipu_ic_uninit_prpvf(ipu);
++ _ipu_vdi_uninit(ipu);
++ reg = ipu_cm_read(ipu, IPU_FS_PROC_FLOW1);
++ ipu_cm_write(ipu, reg & ~FS_VF_IN_VALID, IPU_FS_PROC_FLOW1);
++ break;
++ case MEM_VDI_MEM:
++ ipu->ic_use_count--;
++ ipu->vdi_use_count--;
++ if (ipu->using_ic_dirct_ch == MEM_VDI_MEM)
++ ipu->using_ic_dirct_ch = 0;
++ _ipu_vdi_uninit(ipu);
++ break;
++ case MEM_VDI_PRP_VF_MEM_P:
++ case MEM_VDI_PRP_VF_MEM_N:
++ case MEM_VDI_MEM_P:
++ case MEM_VDI_MEM_N:
++ break;
++ case MEM_ROT_VF_MEM:
++ ipu->rot_use_count--;
++ ipu->ic_use_count--;
++ _ipu_ic_uninit_rotate_vf(ipu);
++ break;
++ case MEM_PRP_ENC_MEM:
++ ipu->ic_use_count--;
++ _ipu_ic_uninit_prpenc(ipu);
++ reg = ipu_cm_read(ipu, IPU_FS_PROC_FLOW1);
++ ipu_cm_write(ipu, reg & ~FS_ENC_IN_VALID, IPU_FS_PROC_FLOW1);
++ break;
++ case MEM_ROT_ENC_MEM:
++ ipu->rot_use_count--;
++ ipu->ic_use_count--;
++ _ipu_ic_uninit_rotate_enc(ipu);
++ break;
++ case MEM_PP_MEM:
++ ipu->ic_use_count--;
++ _ipu_ic_uninit_pp(ipu);
++ break;
++ case MEM_ROT_PP_MEM:
++ ipu->rot_use_count--;
++ ipu->ic_use_count--;
++ _ipu_ic_uninit_rotate_pp(ipu);
++ break;
++ case MEM_DC_SYNC:
++ dc_chan = 1;
++ _ipu_dc_uninit(ipu, 1);
++ ipu->di_use_count[ipu->dc_di_assignment[1]]--;
++ ipu->dc_use_count--;
++ ipu->dmfc_use_count--;
++ break;
++ case MEM_BG_SYNC:
++ dc_chan = 5;
++ _ipu_dp_uninit(ipu, channel);
++ _ipu_dc_uninit(ipu, 5);
++ ipu->di_use_count[ipu->dc_di_assignment[5]]--;
++ ipu->dc_use_count--;
++ ipu->dp_use_count--;
++ ipu->dmfc_use_count--;
++ break;
++ case MEM_FG_SYNC:
++ _ipu_dp_uninit(ipu, channel);
++ ipu->dc_use_count--;
++ ipu->dp_use_count--;
++ ipu->dmfc_use_count--;
++ break;
++ case DIRECT_ASYNC0:
++ dc_chan = 8;
++ _ipu_dc_uninit(ipu, 8);
++ ipu->di_use_count[ipu->dc_di_assignment[8]]--;
++ ipu->dc_use_count--;
++ break;
++ case DIRECT_ASYNC1:
++ dc_chan = 9;
++ _ipu_dc_uninit(ipu, 9);
++ ipu->di_use_count[ipu->dc_di_assignment[9]]--;
++ ipu->dc_use_count--;
++ break;
++ default:
++ break;
++ }
++
++ if (ipu->ic_use_count == 0)
++ ipu_conf &= ~IPU_CONF_IC_EN;
++ if (ipu->vdi_use_count == 0) {
++ ipu_conf &= ~IPU_CONF_ISP_EN;
++ ipu_conf &= ~IPU_CONF_VDI_EN;
++ ipu_conf &= ~IPU_CONF_IC_INPUT;
++ }
++ if (ipu->rot_use_count == 0)
++ ipu_conf &= ~IPU_CONF_ROT_EN;
++ if (ipu->dc_use_count == 0)
++ ipu_conf &= ~IPU_CONF_DC_EN;
++ if (ipu->dp_use_count == 0)
++ ipu_conf &= ~IPU_CONF_DP_EN;
++ if (ipu->dmfc_use_count == 0)
++ ipu_conf &= ~IPU_CONF_DMFC_EN;
++ if (ipu->di_use_count[0] == 0) {
++ ipu_conf &= ~IPU_CONF_DI0_EN;
++ }
++ if (ipu->di_use_count[1] == 0) {
++ ipu_conf &= ~IPU_CONF_DI1_EN;
++ }
++ if (ipu->smfc_use_count == 0)
++ ipu_conf &= ~IPU_CONF_SMFC_EN;
++
++ ipu_cm_write(ipu, ipu_conf, IPU_CONF);
++
++ ipu->channel_init_mask &= ~(1L << IPU_CHAN_ID(channel));
++
++ /*
++ * Disable pixel clk and its parent clock(if the parent clock
++ * usecount is 1) after clearing DC/DP/DI bits in IPU_CONF
++ * register to prevent LVDS display channel starvation.
++ */
++ if (_ipu_is_primary_disp_chan(in_dma))
++ clk_disable_unprepare(ipu->pixel_clk[ipu->dc_di_assignment[dc_chan]]);
++
++ mutex_unlock(&ipu->mutex_lock);
++
++ _ipu_put(ipu);
++
++ ret = pm_runtime_put_sync_suspend(ipu->dev);
++ if (ret < 0) {
++ dev_err(ipu->dev, "ch = %d, pm_runtime_put failed:%d!\n",
++ IPU_CHAN_ID(channel), ret);
++ dump_stack();
++ }
++
++ WARN_ON(ipu->ic_use_count < 0);
++ WARN_ON(ipu->vdi_use_count < 0);
++ WARN_ON(ipu->rot_use_count < 0);
++ WARN_ON(ipu->dc_use_count < 0);
++ WARN_ON(ipu->dp_use_count < 0);
++ WARN_ON(ipu->dmfc_use_count < 0);
++ WARN_ON(ipu->smfc_use_count < 0);
++}
++EXPORT_SYMBOL(ipu_uninit_channel);
++
++/*!
++ * This function is called to initialize buffer(s) for logical IPU channel.
++ *
++ * @param ipu ipu handler
++ *
++ * @param channel Input parameter for the logical channel ID.
++ *
++ * @param type Input parameter which buffer to initialize.
++ *
++ * @param pixel_fmt Input parameter for pixel format of buffer.
++ * Pixel format is a FOURCC ASCII code.
++ *
++ * @param width Input parameter for width of buffer in pixels.
++ *
++ * @param height Input parameter for height of buffer in pixels.
++ *
++ * @param stride Input parameter for stride length of buffer
++ * in pixels.
++ *
++ * @param rot_mode Input parameter for rotation setting of buffer.
++ * A rotation setting other than
++ * IPU_ROTATE_VERT_FLIP
++ * should only be used for input buffers of
++ * rotation channels.
++ *
++ * @param phyaddr_0 Input parameter buffer 0 physical address.
++ *
++ * @param phyaddr_1 Input parameter buffer 1 physical address.
++ * Setting this to a value other than NULL enables
++ * double buffering mode.
++ *
++ * @param phyaddr_2 Input parameter buffer 2 physical address.
++ * Setting this to a value other than NULL enables
++ * triple buffering mode, phyaddr_1 should not be
++ * NULL then.
++ *
++ * @param u private u offset for additional cropping,
++ * zero if not used.
++ *
++ * @param v private v offset for additional cropping,
++ * zero if not used.
++ *
++ * @return Returns 0 on success or negative error code on fail
++ */
++int32_t ipu_init_channel_buffer(struct ipu_soc *ipu, ipu_channel_t channel,
++ ipu_buffer_t type,
++ uint32_t pixel_fmt,
++ uint16_t width, uint16_t height,
++ uint32_t stride,
++ ipu_rotate_mode_t rot_mode,
++ dma_addr_t phyaddr_0, dma_addr_t phyaddr_1,
++ dma_addr_t phyaddr_2,
++ uint32_t u, uint32_t v)
++{
++ uint32_t reg;
++ uint32_t dma_chan;
++ uint32_t burst_size;
++
++ dma_chan = channel_2_dma(channel, type);
++ if (!idma_is_valid(dma_chan))
++ return -EINVAL;
++
++ if (stride < width * bytes_per_pixel(pixel_fmt))
++ stride = width * bytes_per_pixel(pixel_fmt);
++
++ if (stride % 4) {
++ dev_err(ipu->dev,
++ "Stride not 32-bit aligned, stride = %d\n", stride);
++ return -EINVAL;
++ }
++ /* IC & IRT channels' width must be multiple of 8 pixels */
++ if ((_ipu_is_ic_chan(dma_chan) || _ipu_is_irt_chan(dma_chan))
++ && (width % 8)) {
++ dev_err(ipu->dev, "Width must be 8 pixel multiple\n");
++ return -EINVAL;
++ }
++
++ if (_ipu_is_vdi_out_chan(dma_chan) &&
++ ((width < 16) || (height < 16) || (width % 2) || (height % 4))) {
++ dev_err(ipu->dev, "vdi width/height limited err\n");
++ return -EINVAL;
++ }
++
++ /* IPUv3EX and IPUv3M support triple buffer */
++ if ((!_ipu_is_trb_chan(dma_chan)) && phyaddr_2) {
++ dev_err(ipu->dev, "Chan%d doesn't support triple buffer "
++ "mode\n", dma_chan);
++ return -EINVAL;
++ }
++ if (!phyaddr_1 && phyaddr_2) {
++ dev_err(ipu->dev, "Chan%d's buf1 physical addr is NULL for "
++ "triple buffer mode\n", dma_chan);
++ return -EINVAL;
++ }
++
++ mutex_lock(&ipu->mutex_lock);
++
++ /* Build parameter memory data for DMA channel */
++ _ipu_ch_param_init(ipu, dma_chan, pixel_fmt, width, height, stride, u, v, 0,
++ phyaddr_0, phyaddr_1, phyaddr_2);
++
++ /* Set correlative channel parameter of local alpha channel */
++ if ((_ipu_is_ic_graphic_chan(dma_chan) ||
++ _ipu_is_dp_graphic_chan(dma_chan)) &&
++ (ipu->thrd_chan_en[IPU_CHAN_ID(channel)] == true)) {
++ _ipu_ch_param_set_alpha_use_separate_channel(ipu, dma_chan, true);
++ _ipu_ch_param_set_alpha_buffer_memory(ipu, dma_chan);
++ _ipu_ch_param_set_alpha_condition_read(ipu, dma_chan);
++ /* fix alpha width as 8 and burst size as 16*/
++ _ipu_ch_params_set_alpha_width(ipu, dma_chan, 8);
++ _ipu_ch_param_set_burst_size(ipu, dma_chan, 16);
++ } else if (_ipu_is_ic_graphic_chan(dma_chan) &&
++ ipu_pixel_format_has_alpha(pixel_fmt))
++ _ipu_ch_param_set_alpha_use_separate_channel(ipu, dma_chan, false);
++
++ if (rot_mode)
++ _ipu_ch_param_set_rotation(ipu, dma_chan, rot_mode);
++
++ /* IC and ROT channels have restriction of 8 or 16 pix burst length */
++ if (_ipu_is_ic_chan(dma_chan) || _ipu_is_vdi_out_chan(dma_chan)) {
++ if ((width % 16) == 0)
++ _ipu_ch_param_set_burst_size(ipu, dma_chan, 16);
++ else
++ _ipu_ch_param_set_burst_size(ipu, dma_chan, 8);
++ } else if (_ipu_is_irt_chan(dma_chan)) {
++ _ipu_ch_param_set_burst_size(ipu, dma_chan, 8);
++ _ipu_ch_param_set_block_mode(ipu, dma_chan);
++ } else if (_ipu_is_dmfc_chan(dma_chan)) {
++ burst_size = _ipu_ch_param_get_burst_size(ipu, dma_chan);
++ _ipu_dmfc_set_wait4eot(ipu, dma_chan, width);
++ _ipu_dmfc_set_burst_size(ipu, dma_chan, burst_size);
++ }
++
++ if (_ipu_disp_chan_is_interlaced(ipu, channel) ||
++ ipu->chan_is_interlaced[dma_chan])
++ _ipu_ch_param_set_interlaced_scan(ipu, dma_chan);
++
++ if (_ipu_is_ic_chan(dma_chan) || _ipu_is_irt_chan(dma_chan) ||
++ _ipu_is_vdi_out_chan(dma_chan)) {
++ burst_size = _ipu_ch_param_get_burst_size(ipu, dma_chan);
++ _ipu_ic_idma_init(ipu, dma_chan, width, height, burst_size,
++ rot_mode);
++ } else if (_ipu_is_smfc_chan(dma_chan)) {
++ burst_size = _ipu_ch_param_get_burst_size(ipu, dma_chan);
++ /*
++ * This is different from IPUv3 spec, but it is confirmed
++ * in IPUforum that SMFC burst size should be NPB[6:3]
++ * when IDMAC works in 16-bit generic data mode.
++ */
++ if (pixel_fmt == IPU_PIX_FMT_GENERIC)
++ /* 8 bits per pixel */
++ burst_size = burst_size >> 4;
++ else if (pixel_fmt == IPU_PIX_FMT_GENERIC_16)
++ /* 16 bits per pixel */
++ burst_size = burst_size >> 3;
++ else
++ burst_size = burst_size >> 2;
++ _ipu_smfc_set_burst_size(ipu, channel, burst_size-1);
++ }
++
++ /* AXI-id */
++ if (idma_is_set(ipu, IDMAC_CHA_PRI, dma_chan)) {
++ unsigned reg = IDMAC_CH_LOCK_EN_1;
++ uint32_t value = 0;
++ if (ipu->pdata->devtype == IPU_V3H) {
++ _ipu_ch_param_set_axi_id(ipu, dma_chan, 0);
++ switch (dma_chan) {
++ case 5:
++ value = 0x3;
++ break;
++ case 11:
++ value = 0x3 << 2;
++ break;
++ case 12:
++ value = 0x3 << 4;
++ break;
++ case 14:
++ value = 0x3 << 6;
++ break;
++ case 15:
++ value = 0x3 << 8;
++ break;
++ case 20:
++ value = 0x3 << 10;
++ break;
++ case 21:
++ value = 0x3 << 12;
++ break;
++ case 22:
++ value = 0x3 << 14;
++ break;
++ case 23:
++ value = 0x3 << 16;
++ break;
++ case 27:
++ value = 0x3 << 18;
++ break;
++ case 28:
++ value = 0x3 << 20;
++ break;
++ case 45:
++ reg = IDMAC_CH_LOCK_EN_2;
++ value = 0x3 << 0;
++ break;
++ case 46:
++ reg = IDMAC_CH_LOCK_EN_2;
++ value = 0x3 << 2;
++ break;
++ case 47:
++ reg = IDMAC_CH_LOCK_EN_2;
++ value = 0x3 << 4;
++ break;
++ case 48:
++ reg = IDMAC_CH_LOCK_EN_2;
++ value = 0x3 << 6;
++ break;
++ case 49:
++ reg = IDMAC_CH_LOCK_EN_2;
++ value = 0x3 << 8;
++ break;
++ case 50:
++ reg = IDMAC_CH_LOCK_EN_2;
++ value = 0x3 << 10;
++ break;
++ default:
++ break;
++ }
++ value |= ipu_idmac_read(ipu, reg);
++ ipu_idmac_write(ipu, value, reg);
++ } else
++ _ipu_ch_param_set_axi_id(ipu, dma_chan, 1);
++ } else {
++ if (ipu->pdata->devtype == IPU_V3H)
++ _ipu_ch_param_set_axi_id(ipu, dma_chan, 1);
++ }
++
++ _ipu_ch_param_dump(ipu, dma_chan);
++
++ if (phyaddr_2 && g_ipu_hw_rev >= IPU_V3DEX) {
++ reg = ipu_cm_read(ipu, IPU_CHA_DB_MODE_SEL(dma_chan));
++ reg &= ~idma_mask(dma_chan);
++ ipu_cm_write(ipu, reg, IPU_CHA_DB_MODE_SEL(dma_chan));
++
++ reg = ipu_cm_read(ipu, IPU_CHA_TRB_MODE_SEL(dma_chan));
++ reg |= idma_mask(dma_chan);
++ ipu_cm_write(ipu, reg, IPU_CHA_TRB_MODE_SEL(dma_chan));
++
++ /* Set IDMAC third buffer's cpmem number */
++ /* See __ipu_ch_get_third_buf_cpmem_num() for mapping */
++ ipu_idmac_write(ipu, 0x00444047L, IDMAC_SUB_ADDR_4);
++ ipu_idmac_write(ipu, 0x46004241L, IDMAC_SUB_ADDR_3);
++ ipu_idmac_write(ipu, 0x00000045L, IDMAC_SUB_ADDR_1);
++
++ /* Reset to buffer 0 */
++ ipu_cm_write(ipu, tri_cur_buf_mask(dma_chan),
++ IPU_CHA_TRIPLE_CUR_BUF(dma_chan));
++ } else {
++ reg = ipu_cm_read(ipu, IPU_CHA_TRB_MODE_SEL(dma_chan));
++ reg &= ~idma_mask(dma_chan);
++ ipu_cm_write(ipu, reg, IPU_CHA_TRB_MODE_SEL(dma_chan));
++
++ reg = ipu_cm_read(ipu, IPU_CHA_DB_MODE_SEL(dma_chan));
++ if (phyaddr_1)
++ reg |= idma_mask(dma_chan);
++ else
++ reg &= ~idma_mask(dma_chan);
++ ipu_cm_write(ipu, reg, IPU_CHA_DB_MODE_SEL(dma_chan));
++
++ /* Reset to buffer 0 */
++ ipu_cm_write(ipu, idma_mask(dma_chan),
++ IPU_CHA_CUR_BUF(dma_chan));
++
++ }
++
++ mutex_unlock(&ipu->mutex_lock);
++
++ return 0;
++}
++EXPORT_SYMBOL(ipu_init_channel_buffer);
++
++/*!
++ * This function is called to update the physical address of a buffer for
++ * a logical IPU channel.
++ *
++ * @param ipu ipu handler
++ * @param channel Input parameter for the logical channel ID.
++ *
++ * @param type Input parameter which buffer to initialize.
++ *
++ * @param bufNum Input parameter for buffer number to update.
++ * 0 or 1 are the only valid values.
++ *
++ * @param phyaddr Input parameter buffer physical address.
++ *
++ * @return This function returns 0 on success or negative error code on
++ * fail. This function will fail if the buffer is set to ready.
++ */
++int32_t ipu_update_channel_buffer(struct ipu_soc *ipu, ipu_channel_t channel,
++ ipu_buffer_t type, uint32_t bufNum, dma_addr_t phyaddr)
++{
++ uint32_t reg;
++ int ret = 0;
++ uint32_t dma_chan = channel_2_dma(channel, type);
++ unsigned long lock_flags;
++
++ if (dma_chan == IDMA_CHAN_INVALID)
++ return -EINVAL;
++
++ spin_lock_irqsave(&ipu->rdy_reg_spin_lock, lock_flags);
++ if (bufNum == 0)
++ reg = ipu_cm_read(ipu, IPU_CHA_BUF0_RDY(dma_chan));
++ else if (bufNum == 1)
++ reg = ipu_cm_read(ipu, IPU_CHA_BUF1_RDY(dma_chan));
++ else
++ reg = ipu_cm_read(ipu, IPU_CHA_BUF2_RDY(dma_chan));
++
++ if ((reg & idma_mask(dma_chan)) == 0)
++ _ipu_ch_param_set_buffer(ipu, dma_chan, bufNum, phyaddr);
++ else
++ ret = -EACCES;
++ spin_unlock_irqrestore(&ipu->rdy_reg_spin_lock, lock_flags);
++
++ return ret;
++}
++EXPORT_SYMBOL(ipu_update_channel_buffer);
++
++/*!
++ * This function is called to update the band mode setting for
++ * a logical IPU channel.
++ *
++ * @param ipu ipu handler
++ *
++ * @param channel Input parameter for the logical channel ID.
++ *
++ * @param type Input parameter which buffer to initialize.
++ *
++ * @param band_height Input parameter for band lines:
++ * shoule be log2(4/8/16/32/64/128/256).
++ *
++ * @return This function returns 0 on success or negative error code on
++ * fail.
++ */
++int32_t ipu_set_channel_bandmode(struct ipu_soc *ipu, ipu_channel_t channel,
++ ipu_buffer_t type, uint32_t band_height)
++{
++ uint32_t reg;
++ int ret = 0;
++ uint32_t dma_chan = channel_2_dma(channel, type);
++
++ if ((2 > band_height) || (8 < band_height))
++ return -EINVAL;
++
++ mutex_lock(&ipu->mutex_lock);
++
++ reg = ipu_idmac_read(ipu, IDMAC_BAND_EN(dma_chan));
++ reg |= 1 << (dma_chan % 32);
++ ipu_idmac_write(ipu, reg, IDMAC_BAND_EN(dma_chan));
++
++ _ipu_ch_param_set_bandmode(ipu, dma_chan, band_height);
++ dev_dbg(ipu->dev, "dma_chan:%d, band_height:%d.\n\n",
++ dma_chan, 1 << band_height);
++ mutex_unlock(&ipu->mutex_lock);
++
++ return ret;
++}
++EXPORT_SYMBOL(ipu_set_channel_bandmode);
++
++/*!
++ * This function is called to initialize a buffer for logical IPU channel.
++ *
++ * @param ipu ipu handler
++ * @param channel Input parameter for the logical channel ID.
++ *
++ * @param type Input parameter which buffer to initialize.
++ *
++ * @param pixel_fmt Input parameter for pixel format of buffer.
++ * Pixel format is a FOURCC ASCII code.
++ *
++ * @param width Input parameter for width of buffer in pixels.
++ *
++ * @param height Input parameter for height of buffer in pixels.
++ *
++ * @param stride Input parameter for stride length of buffer
++ * in pixels.
++ *
++ * @param u predefined private u offset for additional cropping,
++ * zero if not used.
++ *
++ * @param v predefined private v offset for additional cropping,
++ * zero if not used.
++ *
++ * @param vertical_offset vertical offset for Y coordinate
++ * in the existed frame
++ *
++ *
++ * @param horizontal_offset horizontal offset for X coordinate
++ * in the existed frame
++ *
++ *
++ * @return Returns 0 on success or negative error code on fail
++ * This function will fail if any buffer is set to ready.
++ */
++
++int32_t ipu_update_channel_offset(struct ipu_soc *ipu,
++ ipu_channel_t channel, ipu_buffer_t type,
++ uint32_t pixel_fmt,
++ uint16_t width, uint16_t height,
++ uint32_t stride,
++ uint32_t u, uint32_t v,
++ uint32_t vertical_offset, uint32_t horizontal_offset)
++{
++ int ret = 0;
++ uint32_t dma_chan = channel_2_dma(channel, type);
++ unsigned long lock_flags;
++
++ if (dma_chan == IDMA_CHAN_INVALID)
++ return -EINVAL;
++
++ spin_lock_irqsave(&ipu->rdy_reg_spin_lock, lock_flags);
++ if ((ipu_cm_read(ipu, IPU_CHA_BUF0_RDY(dma_chan)) & idma_mask(dma_chan)) ||
++ (ipu_cm_read(ipu, IPU_CHA_BUF1_RDY(dma_chan)) & idma_mask(dma_chan)) ||
++ ((ipu_cm_read(ipu, IPU_CHA_BUF2_RDY(dma_chan)) & idma_mask(dma_chan)) &&
++ (ipu_cm_read(ipu, IPU_CHA_TRB_MODE_SEL(dma_chan)) & idma_mask(dma_chan)) &&
++ _ipu_is_trb_chan(dma_chan)))
++ ret = -EACCES;
++ else
++ _ipu_ch_offset_update(ipu, dma_chan, pixel_fmt, width, height, stride,
++ u, v, 0, vertical_offset, horizontal_offset);
++ spin_unlock_irqrestore(&ipu->rdy_reg_spin_lock, lock_flags);
++
++ return ret;
++}
++EXPORT_SYMBOL(ipu_update_channel_offset);
++
++
++/*!
++ * This function is called to set a channel's buffer as ready.
++ *
++ * @param ipu ipu handler
++ * @param channel Input parameter for the logical channel ID.
++ *
++ * @param type Input parameter which buffer to initialize.
++ *
++ * @param bufNum Input parameter for which buffer number set to
++ * ready state.
++ *
++ * @return Returns 0 on success or negative error code on fail
++ */
++int32_t ipu_select_buffer(struct ipu_soc *ipu, ipu_channel_t channel,
++ ipu_buffer_t type, uint32_t bufNum)
++{
++ uint32_t dma_chan = channel_2_dma(channel, type);
++ unsigned long lock_flags;
++
++ if (dma_chan == IDMA_CHAN_INVALID)
++ return -EINVAL;
++
++ spin_lock_irqsave(&ipu->rdy_reg_spin_lock, lock_flags);
++ /* Mark buffer to be ready. */
++ if (bufNum == 0)
++ ipu_cm_write(ipu, idma_mask(dma_chan),
++ IPU_CHA_BUF0_RDY(dma_chan));
++ else if (bufNum == 1)
++ ipu_cm_write(ipu, idma_mask(dma_chan),
++ IPU_CHA_BUF1_RDY(dma_chan));
++ else
++ ipu_cm_write(ipu, idma_mask(dma_chan),
++ IPU_CHA_BUF2_RDY(dma_chan));
++ spin_unlock_irqrestore(&ipu->rdy_reg_spin_lock, lock_flags);
++
++ return 0;
++}
++EXPORT_SYMBOL(ipu_select_buffer);
++
++/*!
++ * This function is called to set a channel's buffer as ready.
++ *
++ * @param ipu ipu handler
++ * @param bufNum Input parameter for which buffer number set to
++ * ready state.
++ *
++ * @return Returns 0 on success or negative error code on fail
++ */
++int32_t ipu_select_multi_vdi_buffer(struct ipu_soc *ipu, uint32_t bufNum)
++{
++
++ uint32_t dma_chan = channel_2_dma(MEM_VDI_PRP_VF_MEM, IPU_INPUT_BUFFER);
++ uint32_t mask_bit =
++ idma_mask(channel_2_dma(MEM_VDI_PRP_VF_MEM_P, IPU_INPUT_BUFFER))|
++ idma_mask(dma_chan)|
++ idma_mask(channel_2_dma(MEM_VDI_PRP_VF_MEM_N, IPU_INPUT_BUFFER));
++ unsigned long lock_flags;
++
++ spin_lock_irqsave(&ipu->rdy_reg_spin_lock, lock_flags);
++ /* Mark buffers to be ready. */
++ if (bufNum == 0)
++ ipu_cm_write(ipu, mask_bit, IPU_CHA_BUF0_RDY(dma_chan));
++ else
++ ipu_cm_write(ipu, mask_bit, IPU_CHA_BUF1_RDY(dma_chan));
++ spin_unlock_irqrestore(&ipu->rdy_reg_spin_lock, lock_flags);
++
++ return 0;
++}
++EXPORT_SYMBOL(ipu_select_multi_vdi_buffer);
++
++#define NA -1
++static int proc_dest_sel[] = {
++ 0, 1, 1, 3, 5, 5, 4, 7, 8, 9, 10, 11, 12, 14, 15, 16,
++ 0, 1, 1, 5, 5, 5, 5, 5, 7, 8, 9, 10, 11, 12, 14, 31 };
++static int proc_src_sel[] = { 0, 6, 7, 6, 7, 8, 5, NA, NA, NA,
++ NA, NA, NA, NA, NA, 1, 2, 3, 4, 7, 8, NA, 8, NA };
++static int disp_src_sel[] = { 0, 6, 7, 8, 3, 4, 5, NA, NA, NA,
++ NA, NA, NA, NA, NA, 1, NA, 2, NA, 3, 4, 4, 4, 4 };
++
++
++/*!
++ * This function links 2 channels together for automatic frame
++ * synchronization. The output of the source channel is linked to the input of
++ * the destination channel.
++ *
++ * @param ipu ipu handler
++ * @param src_ch Input parameter for the logical channel ID of
++ * the source channel.
++ *
++ * @param dest_ch Input parameter for the logical channel ID of
++ * the destination channel.
++ *
++ * @return This function returns 0 on success or negative error code on
++ * fail.
++ */
++int32_t ipu_link_channels(struct ipu_soc *ipu, ipu_channel_t src_ch, ipu_channel_t dest_ch)
++{
++ int retval = 0;
++ uint32_t fs_proc_flow1;
++ uint32_t fs_proc_flow2;
++ uint32_t fs_proc_flow3;
++ uint32_t fs_disp_flow1;
++
++ mutex_lock(&ipu->mutex_lock);
++
++ fs_proc_flow1 = ipu_cm_read(ipu, IPU_FS_PROC_FLOW1);
++ fs_proc_flow2 = ipu_cm_read(ipu, IPU_FS_PROC_FLOW2);
++ fs_proc_flow3 = ipu_cm_read(ipu, IPU_FS_PROC_FLOW3);
++ fs_disp_flow1 = ipu_cm_read(ipu, IPU_FS_DISP_FLOW1);
++
++ switch (src_ch) {
++ case CSI_MEM0:
++ fs_proc_flow3 &= ~FS_SMFC0_DEST_SEL_MASK;
++ fs_proc_flow3 |=
++ proc_dest_sel[IPU_CHAN_ID(dest_ch)] <<
++ FS_SMFC0_DEST_SEL_OFFSET;
++ break;
++ case CSI_MEM1:
++ fs_proc_flow3 &= ~FS_SMFC1_DEST_SEL_MASK;
++ fs_proc_flow3 |=
++ proc_dest_sel[IPU_CHAN_ID(dest_ch)] <<
++ FS_SMFC1_DEST_SEL_OFFSET;
++ break;
++ case CSI_MEM2:
++ fs_proc_flow3 &= ~FS_SMFC2_DEST_SEL_MASK;
++ fs_proc_flow3 |=
++ proc_dest_sel[IPU_CHAN_ID(dest_ch)] <<
++ FS_SMFC2_DEST_SEL_OFFSET;
++ break;
++ case CSI_MEM3:
++ fs_proc_flow3 &= ~FS_SMFC3_DEST_SEL_MASK;
++ fs_proc_flow3 |=
++ proc_dest_sel[IPU_CHAN_ID(dest_ch)] <<
++ FS_SMFC3_DEST_SEL_OFFSET;
++ break;
++ case CSI_PRP_ENC_MEM:
++ fs_proc_flow2 &= ~FS_PRPENC_DEST_SEL_MASK;
++ fs_proc_flow2 |=
++ proc_dest_sel[IPU_CHAN_ID(dest_ch)] <<
++ FS_PRPENC_DEST_SEL_OFFSET;
++ break;
++ case CSI_PRP_VF_MEM:
++ fs_proc_flow2 &= ~FS_PRPVF_DEST_SEL_MASK;
++ fs_proc_flow2 |=
++ proc_dest_sel[IPU_CHAN_ID(dest_ch)] <<
++ FS_PRPVF_DEST_SEL_OFFSET;
++ break;
++ case MEM_PP_MEM:
++ fs_proc_flow2 &= ~FS_PP_DEST_SEL_MASK;
++ fs_proc_flow2 |=
++ proc_dest_sel[IPU_CHAN_ID(dest_ch)] <<
++ FS_PP_DEST_SEL_OFFSET;
++ break;
++ case MEM_ROT_PP_MEM:
++ fs_proc_flow2 &= ~FS_PP_ROT_DEST_SEL_MASK;
++ fs_proc_flow2 |=
++ proc_dest_sel[IPU_CHAN_ID(dest_ch)] <<
++ FS_PP_ROT_DEST_SEL_OFFSET;
++ break;
++ case MEM_PRP_ENC_MEM:
++ fs_proc_flow2 &= ~FS_PRPENC_DEST_SEL_MASK;
++ fs_proc_flow2 |=
++ proc_dest_sel[IPU_CHAN_ID(dest_ch)] <<
++ FS_PRPENC_DEST_SEL_OFFSET;
++ break;
++ case MEM_ROT_ENC_MEM:
++ fs_proc_flow2 &= ~FS_PRPENC_ROT_DEST_SEL_MASK;
++ fs_proc_flow2 |=
++ proc_dest_sel[IPU_CHAN_ID(dest_ch)] <<
++ FS_PRPENC_ROT_DEST_SEL_OFFSET;
++ break;
++ case MEM_PRP_VF_MEM:
++ fs_proc_flow2 &= ~FS_PRPVF_DEST_SEL_MASK;
++ fs_proc_flow2 |=
++ proc_dest_sel[IPU_CHAN_ID(dest_ch)] <<
++ FS_PRPVF_DEST_SEL_OFFSET;
++ break;
++ case MEM_VDI_PRP_VF_MEM:
++ fs_proc_flow2 &= ~FS_PRPVF_DEST_SEL_MASK;
++ fs_proc_flow2 |=
++ proc_dest_sel[IPU_CHAN_ID(dest_ch)] <<
++ FS_PRPVF_DEST_SEL_OFFSET;
++ break;
++ case MEM_ROT_VF_MEM:
++ fs_proc_flow2 &= ~FS_PRPVF_ROT_DEST_SEL_MASK;
++ fs_proc_flow2 |=
++ proc_dest_sel[IPU_CHAN_ID(dest_ch)] <<
++ FS_PRPVF_ROT_DEST_SEL_OFFSET;
++ break;
++ case MEM_VDOA_MEM:
++ fs_proc_flow3 &= ~FS_VDOA_DEST_SEL_MASK;
++ if (MEM_VDI_MEM == dest_ch)
++ fs_proc_flow3 |= FS_VDOA_DEST_SEL_VDI;
++ else if (MEM_PP_MEM == dest_ch)
++ fs_proc_flow3 |= FS_VDOA_DEST_SEL_IC;
++ else {
++ retval = -EINVAL;
++ goto err;
++ }
++ break;
++ default:
++ retval = -EINVAL;
++ goto err;
++ }
++
++ switch (dest_ch) {
++ case MEM_PP_MEM:
++ fs_proc_flow1 &= ~FS_PP_SRC_SEL_MASK;
++ if (MEM_VDOA_MEM == src_ch)
++ fs_proc_flow1 |= FS_PP_SRC_SEL_VDOA;
++ else
++ fs_proc_flow1 |= proc_src_sel[IPU_CHAN_ID(src_ch)] <<
++ FS_PP_SRC_SEL_OFFSET;
++ break;
++ case MEM_ROT_PP_MEM:
++ fs_proc_flow1 &= ~FS_PP_ROT_SRC_SEL_MASK;
++ fs_proc_flow1 |=
++ proc_src_sel[IPU_CHAN_ID(src_ch)] <<
++ FS_PP_ROT_SRC_SEL_OFFSET;
++ break;
++ case MEM_PRP_ENC_MEM:
++ fs_proc_flow1 &= ~FS_PRP_SRC_SEL_MASK;
++ fs_proc_flow1 |=
++ proc_src_sel[IPU_CHAN_ID(src_ch)] << FS_PRP_SRC_SEL_OFFSET;
++ break;
++ case MEM_ROT_ENC_MEM:
++ fs_proc_flow1 &= ~FS_PRPENC_ROT_SRC_SEL_MASK;
++ fs_proc_flow1 |=
++ proc_src_sel[IPU_CHAN_ID(src_ch)] <<
++ FS_PRPENC_ROT_SRC_SEL_OFFSET;
++ break;
++ case MEM_PRP_VF_MEM:
++ fs_proc_flow1 &= ~FS_PRP_SRC_SEL_MASK;
++ fs_proc_flow1 |=
++ proc_src_sel[IPU_CHAN_ID(src_ch)] << FS_PRP_SRC_SEL_OFFSET;
++ break;
++ case MEM_VDI_PRP_VF_MEM:
++ fs_proc_flow1 &= ~FS_PRP_SRC_SEL_MASK;
++ fs_proc_flow1 |=
++ proc_src_sel[IPU_CHAN_ID(src_ch)] << FS_PRP_SRC_SEL_OFFSET;
++ break;
++ case MEM_ROT_VF_MEM:
++ fs_proc_flow1 &= ~FS_PRPVF_ROT_SRC_SEL_MASK;
++ fs_proc_flow1 |=
++ proc_src_sel[IPU_CHAN_ID(src_ch)] <<
++ FS_PRPVF_ROT_SRC_SEL_OFFSET;
++ break;
++ case MEM_DC_SYNC:
++ fs_disp_flow1 &= ~FS_DC1_SRC_SEL_MASK;
++ fs_disp_flow1 |=
++ disp_src_sel[IPU_CHAN_ID(src_ch)] << FS_DC1_SRC_SEL_OFFSET;
++ break;
++ case MEM_BG_SYNC:
++ fs_disp_flow1 &= ~FS_DP_SYNC0_SRC_SEL_MASK;
++ fs_disp_flow1 |=
++ disp_src_sel[IPU_CHAN_ID(src_ch)] <<
++ FS_DP_SYNC0_SRC_SEL_OFFSET;
++ break;
++ case MEM_FG_SYNC:
++ fs_disp_flow1 &= ~FS_DP_SYNC1_SRC_SEL_MASK;
++ fs_disp_flow1 |=
++ disp_src_sel[IPU_CHAN_ID(src_ch)] <<
++ FS_DP_SYNC1_SRC_SEL_OFFSET;
++ break;
++ case MEM_DC_ASYNC:
++ fs_disp_flow1 &= ~FS_DC2_SRC_SEL_MASK;
++ fs_disp_flow1 |=
++ disp_src_sel[IPU_CHAN_ID(src_ch)] << FS_DC2_SRC_SEL_OFFSET;
++ break;
++ case MEM_BG_ASYNC0:
++ fs_disp_flow1 &= ~FS_DP_ASYNC0_SRC_SEL_MASK;
++ fs_disp_flow1 |=
++ disp_src_sel[IPU_CHAN_ID(src_ch)] <<
++ FS_DP_ASYNC0_SRC_SEL_OFFSET;
++ break;
++ case MEM_FG_ASYNC0:
++ fs_disp_flow1 &= ~FS_DP_ASYNC1_SRC_SEL_MASK;
++ fs_disp_flow1 |=
++ disp_src_sel[IPU_CHAN_ID(src_ch)] <<
++ FS_DP_ASYNC1_SRC_SEL_OFFSET;
++ break;
++ case MEM_VDI_MEM:
++ fs_proc_flow1 &= ~FS_VDI_SRC_SEL_MASK;
++ if (MEM_VDOA_MEM == src_ch)
++ fs_proc_flow1 |= FS_VDI_SRC_SEL_VDOA;
++ else {
++ retval = -EINVAL;
++ goto err;
++ }
++ break;
++ default:
++ retval = -EINVAL;
++ goto err;
++ }
++
++ ipu_cm_write(ipu, fs_proc_flow1, IPU_FS_PROC_FLOW1);
++ ipu_cm_write(ipu, fs_proc_flow2, IPU_FS_PROC_FLOW2);
++ ipu_cm_write(ipu, fs_proc_flow3, IPU_FS_PROC_FLOW3);
++ ipu_cm_write(ipu, fs_disp_flow1, IPU_FS_DISP_FLOW1);
++
++err:
++ mutex_unlock(&ipu->mutex_lock);
++ return retval;
++}
++EXPORT_SYMBOL(ipu_link_channels);
++
++/*!
++ * This function unlinks 2 channels and disables automatic frame
++ * synchronization.
++ *
++ * @param ipu ipu handler
++ * @param src_ch Input parameter for the logical channel ID of
++ * the source channel.
++ *
++ * @param dest_ch Input parameter for the logical channel ID of
++ * the destination channel.
++ *
++ * @return This function returns 0 on success or negative error code on
++ * fail.
++ */
++int32_t ipu_unlink_channels(struct ipu_soc *ipu, ipu_channel_t src_ch, ipu_channel_t dest_ch)
++{
++ int retval = 0;
++ uint32_t fs_proc_flow1;
++ uint32_t fs_proc_flow2;
++ uint32_t fs_proc_flow3;
++ uint32_t fs_disp_flow1;
++
++ mutex_lock(&ipu->mutex_lock);
++
++ fs_proc_flow1 = ipu_cm_read(ipu, IPU_FS_PROC_FLOW1);
++ fs_proc_flow2 = ipu_cm_read(ipu, IPU_FS_PROC_FLOW2);
++ fs_proc_flow3 = ipu_cm_read(ipu, IPU_FS_PROC_FLOW3);
++ fs_disp_flow1 = ipu_cm_read(ipu, IPU_FS_DISP_FLOW1);
++
++ switch (src_ch) {
++ case CSI_MEM0:
++ fs_proc_flow3 &= ~FS_SMFC0_DEST_SEL_MASK;
++ break;
++ case CSI_MEM1:
++ fs_proc_flow3 &= ~FS_SMFC1_DEST_SEL_MASK;
++ break;
++ case CSI_MEM2:
++ fs_proc_flow3 &= ~FS_SMFC2_DEST_SEL_MASK;
++ break;
++ case CSI_MEM3:
++ fs_proc_flow3 &= ~FS_SMFC3_DEST_SEL_MASK;
++ break;
++ case CSI_PRP_ENC_MEM:
++ fs_proc_flow2 &= ~FS_PRPENC_DEST_SEL_MASK;
++ break;
++ case CSI_PRP_VF_MEM:
++ fs_proc_flow2 &= ~FS_PRPVF_DEST_SEL_MASK;
++ break;
++ case MEM_PP_MEM:
++ fs_proc_flow2 &= ~FS_PP_DEST_SEL_MASK;
++ break;
++ case MEM_ROT_PP_MEM:
++ fs_proc_flow2 &= ~FS_PP_ROT_DEST_SEL_MASK;
++ break;
++ case MEM_PRP_ENC_MEM:
++ fs_proc_flow2 &= ~FS_PRPENC_DEST_SEL_MASK;
++ break;
++ case MEM_ROT_ENC_MEM:
++ fs_proc_flow2 &= ~FS_PRPENC_ROT_DEST_SEL_MASK;
++ break;
++ case MEM_PRP_VF_MEM:
++ fs_proc_flow2 &= ~FS_PRPVF_DEST_SEL_MASK;
++ break;
++ case MEM_VDI_PRP_VF_MEM:
++ fs_proc_flow2 &= ~FS_PRPVF_DEST_SEL_MASK;
++ break;
++ case MEM_ROT_VF_MEM:
++ fs_proc_flow2 &= ~FS_PRPVF_ROT_DEST_SEL_MASK;
++ break;
++ case MEM_VDOA_MEM:
++ fs_proc_flow3 &= ~FS_VDOA_DEST_SEL_MASK;
++ break;
++ default:
++ retval = -EINVAL;
++ goto err;
++ }
++
++ switch (dest_ch) {
++ case MEM_PP_MEM:
++ fs_proc_flow1 &= ~FS_PP_SRC_SEL_MASK;
++ break;
++ case MEM_ROT_PP_MEM:
++ fs_proc_flow1 &= ~FS_PP_ROT_SRC_SEL_MASK;
++ break;
++ case MEM_PRP_ENC_MEM:
++ fs_proc_flow1 &= ~FS_PRP_SRC_SEL_MASK;
++ break;
++ case MEM_ROT_ENC_MEM:
++ fs_proc_flow1 &= ~FS_PRPENC_ROT_SRC_SEL_MASK;
++ break;
++ case MEM_PRP_VF_MEM:
++ fs_proc_flow1 &= ~FS_PRP_SRC_SEL_MASK;
++ break;
++ case MEM_VDI_PRP_VF_MEM:
++ fs_proc_flow1 &= ~FS_PRP_SRC_SEL_MASK;
++ break;
++ case MEM_ROT_VF_MEM:
++ fs_proc_flow1 &= ~FS_PRPVF_ROT_SRC_SEL_MASK;
++ break;
++ case MEM_DC_SYNC:
++ fs_disp_flow1 &= ~FS_DC1_SRC_SEL_MASK;
++ break;
++ case MEM_BG_SYNC:
++ fs_disp_flow1 &= ~FS_DP_SYNC0_SRC_SEL_MASK;
++ break;
++ case MEM_FG_SYNC:
++ fs_disp_flow1 &= ~FS_DP_SYNC1_SRC_SEL_MASK;
++ break;
++ case MEM_DC_ASYNC:
++ fs_disp_flow1 &= ~FS_DC2_SRC_SEL_MASK;
++ break;
++ case MEM_BG_ASYNC0:
++ fs_disp_flow1 &= ~FS_DP_ASYNC0_SRC_SEL_MASK;
++ break;
++ case MEM_FG_ASYNC0:
++ fs_disp_flow1 &= ~FS_DP_ASYNC1_SRC_SEL_MASK;
++ break;
++ case MEM_VDI_MEM:
++ fs_proc_flow1 &= ~FS_VDI_SRC_SEL_MASK;
++ break;
++ default:
++ retval = -EINVAL;
++ goto err;
++ }
++
++ ipu_cm_write(ipu, fs_proc_flow1, IPU_FS_PROC_FLOW1);
++ ipu_cm_write(ipu, fs_proc_flow2, IPU_FS_PROC_FLOW2);
++ ipu_cm_write(ipu, fs_proc_flow3, IPU_FS_PROC_FLOW3);
++ ipu_cm_write(ipu, fs_disp_flow1, IPU_FS_DISP_FLOW1);
++
++err:
++ mutex_unlock(&ipu->mutex_lock);
++ return retval;
++}
++EXPORT_SYMBOL(ipu_unlink_channels);
++
++/*!
++ * This function check whether a logical channel was enabled.
++ *
++ * @param ipu ipu handler
++ * @param channel Input parameter for the logical channel ID.
++ *
++ * @return This function returns 1 while request channel is enabled or
++ * 0 for not enabled.
++ */
++int32_t ipu_is_channel_busy(struct ipu_soc *ipu, ipu_channel_t channel)
++{
++ uint32_t reg;
++ uint32_t in_dma;
++ uint32_t out_dma;
++
++ out_dma = channel_2_dma(channel, IPU_OUTPUT_BUFFER);
++ in_dma = channel_2_dma(channel, IPU_VIDEO_IN_BUFFER);
++
++ reg = ipu_idmac_read(ipu, IDMAC_CHA_EN(in_dma));
++ if (reg & idma_mask(in_dma))
++ return 1;
++ reg = ipu_idmac_read(ipu, IDMAC_CHA_EN(out_dma));
++ if (reg & idma_mask(out_dma))
++ return 1;
++ return 0;
++}
++EXPORT_SYMBOL(ipu_is_channel_busy);
++
++/*!
++ * This function enables a logical channel.
++ *
++ * @param ipu ipu handler
++ * @param channel Input parameter for the logical channel ID.
++ *
++ * @return This function returns 0 on success or negative error code on
++ * fail.
++ */
++int32_t ipu_enable_channel(struct ipu_soc *ipu, ipu_channel_t channel)
++{
++ uint32_t reg;
++ uint32_t ipu_conf;
++ uint32_t in_dma;
++ uint32_t out_dma;
++ uint32_t sec_dma;
++ uint32_t thrd_dma;
++
++ mutex_lock(&ipu->mutex_lock);
++
++ if (ipu->channel_enable_mask & (1L << IPU_CHAN_ID(channel))) {
++ dev_err(ipu->dev, "Warning: channel already enabled %d\n",
++ IPU_CHAN_ID(channel));
++ mutex_unlock(&ipu->mutex_lock);
++ return -EACCES;
++ }
++
++ /* Get input and output dma channels */
++ out_dma = channel_2_dma(channel, IPU_OUTPUT_BUFFER);
++ in_dma = channel_2_dma(channel, IPU_VIDEO_IN_BUFFER);
++
++ ipu_conf = ipu_cm_read(ipu, IPU_CONF);
++ if (ipu->di_use_count[0] > 0) {
++ ipu_conf |= IPU_CONF_DI0_EN;
++ }
++ if (ipu->di_use_count[1] > 0) {
++ ipu_conf |= IPU_CONF_DI1_EN;
++ }
++ if (ipu->dp_use_count > 0)
++ ipu_conf |= IPU_CONF_DP_EN;
++ if (ipu->dc_use_count > 0)
++ ipu_conf |= IPU_CONF_DC_EN;
++ if (ipu->dmfc_use_count > 0)
++ ipu_conf |= IPU_CONF_DMFC_EN;
++ if (ipu->ic_use_count > 0)
++ ipu_conf |= IPU_CONF_IC_EN;
++ if (ipu->vdi_use_count > 0) {
++ ipu_conf |= IPU_CONF_ISP_EN;
++ ipu_conf |= IPU_CONF_VDI_EN;
++ ipu_conf |= IPU_CONF_IC_INPUT;
++ }
++ if (ipu->rot_use_count > 0)
++ ipu_conf |= IPU_CONF_ROT_EN;
++ if (ipu->smfc_use_count > 0)
++ ipu_conf |= IPU_CONF_SMFC_EN;
++ ipu_cm_write(ipu, ipu_conf, IPU_CONF);
++
++ if (idma_is_valid(in_dma)) {
++ reg = ipu_idmac_read(ipu, IDMAC_CHA_EN(in_dma));
++ ipu_idmac_write(ipu, reg | idma_mask(in_dma), IDMAC_CHA_EN(in_dma));
++ }
++ if (idma_is_valid(out_dma)) {
++ reg = ipu_idmac_read(ipu, IDMAC_CHA_EN(out_dma));
++ ipu_idmac_write(ipu, reg | idma_mask(out_dma), IDMAC_CHA_EN(out_dma));
++ }
++
++ if ((ipu->sec_chan_en[IPU_CHAN_ID(channel)]) &&
++ ((channel == MEM_PP_MEM) || (channel == MEM_PRP_VF_MEM) ||
++ (channel == MEM_VDI_PRP_VF_MEM))) {
++ sec_dma = channel_2_dma(channel, IPU_GRAPH_IN_BUFFER);
++ reg = ipu_idmac_read(ipu, IDMAC_CHA_EN(sec_dma));
++ ipu_idmac_write(ipu, reg | idma_mask(sec_dma), IDMAC_CHA_EN(sec_dma));
++ }
++ if ((ipu->thrd_chan_en[IPU_CHAN_ID(channel)]) &&
++ ((channel == MEM_PP_MEM) || (channel == MEM_PRP_VF_MEM))) {
++ thrd_dma = channel_2_dma(channel, IPU_ALPHA_IN_BUFFER);
++ reg = ipu_idmac_read(ipu, IDMAC_CHA_EN(thrd_dma));
++ ipu_idmac_write(ipu, reg | idma_mask(thrd_dma), IDMAC_CHA_EN(thrd_dma));
++
++ sec_dma = channel_2_dma(channel, IPU_GRAPH_IN_BUFFER);
++ reg = ipu_idmac_read(ipu, IDMAC_SEP_ALPHA);
++ ipu_idmac_write(ipu, reg | idma_mask(sec_dma), IDMAC_SEP_ALPHA);
++ } else if ((ipu->thrd_chan_en[IPU_CHAN_ID(channel)]) &&
++ ((channel == MEM_BG_SYNC) || (channel == MEM_FG_SYNC))) {
++ thrd_dma = channel_2_dma(channel, IPU_ALPHA_IN_BUFFER);
++ reg = ipu_idmac_read(ipu, IDMAC_CHA_EN(thrd_dma));
++ ipu_idmac_write(ipu, reg | idma_mask(thrd_dma), IDMAC_CHA_EN(thrd_dma));
++ reg = ipu_idmac_read(ipu, IDMAC_SEP_ALPHA);
++ ipu_idmac_write(ipu, reg | idma_mask(in_dma), IDMAC_SEP_ALPHA);
++ }
++
++ if ((channel == MEM_DC_SYNC) || (channel == MEM_BG_SYNC) ||
++ (channel == MEM_FG_SYNC)) {
++ reg = ipu_idmac_read(ipu, IDMAC_WM_EN(in_dma));
++ ipu_idmac_write(ipu, reg | idma_mask(in_dma), IDMAC_WM_EN(in_dma));
++
++ _ipu_dp_dc_enable(ipu, channel);
++ }
++
++ if (_ipu_is_ic_chan(in_dma) || _ipu_is_ic_chan(out_dma) ||
++ _ipu_is_irt_chan(in_dma) || _ipu_is_irt_chan(out_dma) ||
++ _ipu_is_vdi_out_chan(out_dma))
++ _ipu_ic_enable_task(ipu, channel);
++
++ ipu->channel_enable_mask |= 1L << IPU_CHAN_ID(channel);
++
++ mutex_unlock(&ipu->mutex_lock);
++
++ return 0;
++}
++EXPORT_SYMBOL(ipu_enable_channel);
++
++/*!
++ * This function check buffer ready for a logical channel.
++ *
++ * @param ipu ipu handler
++ * @param channel Input parameter for the logical channel ID.
++ *
++ * @param type Input parameter which buffer to clear.
++ *
++ * @param bufNum Input parameter for which buffer number clear
++ * ready state.
++ *
++ */
++int32_t ipu_check_buffer_ready(struct ipu_soc *ipu, ipu_channel_t channel, ipu_buffer_t type,
++ uint32_t bufNum)
++{
++ uint32_t dma_chan = channel_2_dma(channel, type);
++ uint32_t reg;
++ unsigned long lock_flags;
++
++ if (dma_chan == IDMA_CHAN_INVALID)
++ return -EINVAL;
++
++ spin_lock_irqsave(&ipu->rdy_reg_spin_lock, lock_flags);
++ if (bufNum == 0)
++ reg = ipu_cm_read(ipu, IPU_CHA_BUF0_RDY(dma_chan));
++ else if (bufNum == 1)
++ reg = ipu_cm_read(ipu, IPU_CHA_BUF1_RDY(dma_chan));
++ else
++ reg = ipu_cm_read(ipu, IPU_CHA_BUF2_RDY(dma_chan));
++ spin_unlock_irqrestore(&ipu->rdy_reg_spin_lock, lock_flags);
++
++ if (reg & idma_mask(dma_chan))
++ return 1;
++ else
++ return 0;
++}
++EXPORT_SYMBOL(ipu_check_buffer_ready);
++
++/*!
++ * This function clear buffer ready for a logical channel.
++ *
++ * @param ipu ipu handler
++ * @param channel Input parameter for the logical channel ID.
++ *
++ * @param type Input parameter which buffer to clear.
++ *
++ * @param bufNum Input parameter for which buffer number clear
++ * ready state.
++ *
++ */
++void _ipu_clear_buffer_ready(struct ipu_soc *ipu, ipu_channel_t channel, ipu_buffer_t type,
++ uint32_t bufNum)
++{
++ uint32_t dma_ch = channel_2_dma(channel, type);
++
++ if (!idma_is_valid(dma_ch))
++ return;
++
++ ipu_cm_write(ipu, 0xF0300000, IPU_GPR); /* write one to clear */
++ if (bufNum == 0)
++ ipu_cm_write(ipu, idma_mask(dma_ch),
++ IPU_CHA_BUF0_RDY(dma_ch));
++ else if (bufNum == 1)
++ ipu_cm_write(ipu, idma_mask(dma_ch),
++ IPU_CHA_BUF1_RDY(dma_ch));
++ else
++ ipu_cm_write(ipu, idma_mask(dma_ch),
++ IPU_CHA_BUF2_RDY(dma_ch));
++ ipu_cm_write(ipu, 0x0, IPU_GPR); /* write one to set */
++}
++
++void ipu_clear_buffer_ready(struct ipu_soc *ipu, ipu_channel_t channel, ipu_buffer_t type,
++ uint32_t bufNum)
++{
++ unsigned long lock_flags;
++
++ spin_lock_irqsave(&ipu->rdy_reg_spin_lock, lock_flags);
++ _ipu_clear_buffer_ready(ipu, channel, type, bufNum);
++ spin_unlock_irqrestore(&ipu->rdy_reg_spin_lock, lock_flags);
++}
++EXPORT_SYMBOL(ipu_clear_buffer_ready);
++
++/*!
++ * This function disables a logical channel.
++ *
++ * @param ipu ipu handler
++ * @param channel Input parameter for the logical channel ID.
++ *
++ * @param wait_for_stop Flag to set whether to wait for channel end
++ * of frame or return immediately.
++ *
++ * @return This function returns 0 on success or negative error code on
++ * fail.
++ */
++int32_t ipu_disable_channel(struct ipu_soc *ipu, ipu_channel_t channel, bool wait_for_stop)
++{
++ uint32_t reg;
++ uint32_t in_dma;
++ uint32_t out_dma;
++ uint32_t sec_dma = NO_DMA;
++ uint32_t thrd_dma = NO_DMA;
++ uint16_t fg_pos_x, fg_pos_y;
++ unsigned long lock_flags;
++
++ mutex_lock(&ipu->mutex_lock);
++
++ if ((ipu->channel_enable_mask & (1L << IPU_CHAN_ID(channel))) == 0) {
++ dev_dbg(ipu->dev, "Channel already disabled %d\n",
++ IPU_CHAN_ID(channel));
++ mutex_unlock(&ipu->mutex_lock);
++ return -EACCES;
++ }
++
++ /* Get input and output dma channels */
++ out_dma = channel_2_dma(channel, IPU_OUTPUT_BUFFER);
++ in_dma = channel_2_dma(channel, IPU_VIDEO_IN_BUFFER);
++
++ if ((idma_is_valid(in_dma) &&
++ !idma_is_set(ipu, IDMAC_CHA_EN, in_dma))
++ && (idma_is_valid(out_dma) &&
++ !idma_is_set(ipu, IDMAC_CHA_EN, out_dma))) {
++ mutex_unlock(&ipu->mutex_lock);
++ return -EINVAL;
++ }
++
++ if (ipu->sec_chan_en[IPU_CHAN_ID(channel)])
++ sec_dma = channel_2_dma(channel, IPU_GRAPH_IN_BUFFER);
++ if (ipu->thrd_chan_en[IPU_CHAN_ID(channel)]) {
++ sec_dma = channel_2_dma(channel, IPU_GRAPH_IN_BUFFER);
++ thrd_dma = channel_2_dma(channel, IPU_ALPHA_IN_BUFFER);
++ }
++
++ if ((channel == MEM_BG_SYNC) || (channel == MEM_FG_SYNC) ||
++ (channel == MEM_DC_SYNC)) {
++ if (channel == MEM_FG_SYNC) {
++ _ipu_disp_get_window_pos(ipu, channel, &fg_pos_x, &fg_pos_y);
++ _ipu_disp_set_window_pos(ipu, channel, 0, 0);
++ }
++
++ _ipu_dp_dc_disable(ipu, channel, false);
++
++ /*
++ * wait for BG channel EOF then disable FG-IDMAC,
++ * it avoid FG NFB4EOF error.
++ */
++ if ((channel == MEM_FG_SYNC) && (ipu_is_channel_busy(ipu, MEM_BG_SYNC))) {
++ int timeout = 50;
++
++ ipu_cm_write(ipu, IPUIRQ_2_MASK(IPU_IRQ_BG_SYNC_EOF),
++ IPUIRQ_2_STATREG(IPU_IRQ_BG_SYNC_EOF));
++ while ((ipu_cm_read(ipu, IPUIRQ_2_STATREG(IPU_IRQ_BG_SYNC_EOF)) &
++ IPUIRQ_2_MASK(IPU_IRQ_BG_SYNC_EOF)) == 0) {
++ msleep(10);
++ timeout -= 10;
++ if (timeout <= 0) {
++ dev_err(ipu->dev, "warning: wait for bg sync eof timeout\n");
++ break;
++ }
++ }
++ }
++ } else if (wait_for_stop && !_ipu_is_smfc_chan(out_dma) &&
++ channel != CSI_PRP_VF_MEM && channel != CSI_PRP_ENC_MEM) {
++ while (idma_is_set(ipu, IDMAC_CHA_BUSY, in_dma) ||
++ idma_is_set(ipu, IDMAC_CHA_BUSY, out_dma) ||
++ (ipu->sec_chan_en[IPU_CHAN_ID(channel)] &&
++ idma_is_set(ipu, IDMAC_CHA_BUSY, sec_dma)) ||
++ (ipu->thrd_chan_en[IPU_CHAN_ID(channel)] &&
++ idma_is_set(ipu, IDMAC_CHA_BUSY, thrd_dma))) {
++ uint32_t irq = 0xffffffff;
++ int timeout = 50000;
++
++ if (idma_is_set(ipu, IDMAC_CHA_BUSY, out_dma))
++ irq = out_dma;
++ if (ipu->sec_chan_en[IPU_CHAN_ID(channel)] &&
++ idma_is_set(ipu, IDMAC_CHA_BUSY, sec_dma))
++ irq = sec_dma;
++ if (ipu->thrd_chan_en[IPU_CHAN_ID(channel)] &&
++ idma_is_set(ipu, IDMAC_CHA_BUSY, thrd_dma))
++ irq = thrd_dma;
++ if (idma_is_set(ipu, IDMAC_CHA_BUSY, in_dma))
++ irq = in_dma;
++
++ if (irq == 0xffffffff) {
++ dev_dbg(ipu->dev, "warning: no channel busy, break\n");
++ break;
++ }
++
++ ipu_cm_write(ipu, IPUIRQ_2_MASK(irq),
++ IPUIRQ_2_STATREG(irq));
++
++ dev_dbg(ipu->dev, "warning: channel %d busy, need wait\n", irq);
++
++ while (((ipu_cm_read(ipu, IPUIRQ_2_STATREG(irq))
++ & IPUIRQ_2_MASK(irq)) == 0) &&
++ (idma_is_set(ipu, IDMAC_CHA_BUSY, irq))) {
++ udelay(10);
++ timeout -= 10;
++ if (timeout <= 0) {
++ ipu_dump_registers(ipu);
++ dev_err(ipu->dev, "warning: disable ipu dma channel %d during its busy state\n", irq);
++ break;
++ }
++ }
++ dev_dbg(ipu->dev, "wait_time:%d\n", 50000 - timeout);
++
++ }
++ }
++
++ if ((channel == MEM_BG_SYNC) || (channel == MEM_FG_SYNC) ||
++ (channel == MEM_DC_SYNC)) {
++ reg = ipu_idmac_read(ipu, IDMAC_WM_EN(in_dma));
++ ipu_idmac_write(ipu, reg & ~idma_mask(in_dma), IDMAC_WM_EN(in_dma));
++ }
++
++ /* Disable IC task */
++ if (_ipu_is_ic_chan(in_dma) || _ipu_is_ic_chan(out_dma) ||
++ _ipu_is_irt_chan(in_dma) || _ipu_is_irt_chan(out_dma) ||
++ _ipu_is_vdi_out_chan(out_dma))
++ _ipu_ic_disable_task(ipu, channel);
++
++ /* Disable DMA channel(s) */
++ if (idma_is_valid(in_dma)) {
++ reg = ipu_idmac_read(ipu, IDMAC_CHA_EN(in_dma));
++ ipu_idmac_write(ipu, reg & ~idma_mask(in_dma), IDMAC_CHA_EN(in_dma));
++ ipu_cm_write(ipu, idma_mask(in_dma), IPU_CHA_CUR_BUF(in_dma));
++ ipu_cm_write(ipu, tri_cur_buf_mask(in_dma),
++ IPU_CHA_TRIPLE_CUR_BUF(in_dma));
++ }
++ if (idma_is_valid(out_dma)) {
++ reg = ipu_idmac_read(ipu, IDMAC_CHA_EN(out_dma));
++ ipu_idmac_write(ipu, reg & ~idma_mask(out_dma), IDMAC_CHA_EN(out_dma));
++ ipu_cm_write(ipu, idma_mask(out_dma), IPU_CHA_CUR_BUF(out_dma));
++ ipu_cm_write(ipu, tri_cur_buf_mask(out_dma),
++ IPU_CHA_TRIPLE_CUR_BUF(out_dma));
++ }
++ if (ipu->sec_chan_en[IPU_CHAN_ID(channel)] && idma_is_valid(sec_dma)) {
++ reg = ipu_idmac_read(ipu, IDMAC_CHA_EN(sec_dma));
++ ipu_idmac_write(ipu, reg & ~idma_mask(sec_dma), IDMAC_CHA_EN(sec_dma));
++ ipu_cm_write(ipu, idma_mask(sec_dma), IPU_CHA_CUR_BUF(sec_dma));
++ }
++ if (ipu->thrd_chan_en[IPU_CHAN_ID(channel)] && idma_is_valid(thrd_dma)) {
++ reg = ipu_idmac_read(ipu, IDMAC_CHA_EN(thrd_dma));
++ ipu_idmac_write(ipu, reg & ~idma_mask(thrd_dma), IDMAC_CHA_EN(thrd_dma));
++ if (channel == MEM_BG_SYNC || channel == MEM_FG_SYNC) {
++ reg = ipu_idmac_read(ipu, IDMAC_SEP_ALPHA);
++ ipu_idmac_write(ipu, reg & ~idma_mask(in_dma), IDMAC_SEP_ALPHA);
++ } else {
++ reg = ipu_idmac_read(ipu, IDMAC_SEP_ALPHA);
++ ipu_idmac_write(ipu, reg & ~idma_mask(sec_dma), IDMAC_SEP_ALPHA);
++ }
++ ipu_cm_write(ipu, idma_mask(thrd_dma), IPU_CHA_CUR_BUF(thrd_dma));
++ }
++
++ if (channel == MEM_FG_SYNC)
++ _ipu_disp_set_window_pos(ipu, channel, fg_pos_x, fg_pos_y);
++
++ spin_lock_irqsave(&ipu->rdy_reg_spin_lock, lock_flags);
++ /* Set channel buffers NOT to be ready */
++ if (idma_is_valid(in_dma)) {
++ _ipu_clear_buffer_ready(ipu, channel, IPU_VIDEO_IN_BUFFER, 0);
++ _ipu_clear_buffer_ready(ipu, channel, IPU_VIDEO_IN_BUFFER, 1);
++ _ipu_clear_buffer_ready(ipu, channel, IPU_VIDEO_IN_BUFFER, 2);
++ }
++ if (idma_is_valid(out_dma)) {
++ _ipu_clear_buffer_ready(ipu, channel, IPU_OUTPUT_BUFFER, 0);
++ _ipu_clear_buffer_ready(ipu, channel, IPU_OUTPUT_BUFFER, 1);
++ }
++ if (ipu->sec_chan_en[IPU_CHAN_ID(channel)] && idma_is_valid(sec_dma)) {
++ _ipu_clear_buffer_ready(ipu, channel, IPU_GRAPH_IN_BUFFER, 0);
++ _ipu_clear_buffer_ready(ipu, channel, IPU_GRAPH_IN_BUFFER, 1);
++ }
++ if (ipu->thrd_chan_en[IPU_CHAN_ID(channel)] && idma_is_valid(thrd_dma)) {
++ _ipu_clear_buffer_ready(ipu, channel, IPU_ALPHA_IN_BUFFER, 0);
++ _ipu_clear_buffer_ready(ipu, channel, IPU_ALPHA_IN_BUFFER, 1);
++ }
++ spin_unlock_irqrestore(&ipu->rdy_reg_spin_lock, lock_flags);
++
++ ipu->channel_enable_mask &= ~(1L << IPU_CHAN_ID(channel));
++
++ mutex_unlock(&ipu->mutex_lock);
++
++ return 0;
++}
++EXPORT_SYMBOL(ipu_disable_channel);
++
++/*!
++ * This function enables CSI.
++ *
++ * @param ipu ipu handler
++ * @param csi csi num 0 or 1
++ *
++ * @return This function returns 0 on success or negative error code on
++ * fail.
++ */
++int32_t ipu_enable_csi(struct ipu_soc *ipu, uint32_t csi)
++{
++ uint32_t reg;
++
++ if (csi > 1) {
++ dev_err(ipu->dev, "Wrong csi num_%d\n", csi);
++ return -EINVAL;
++ }
++
++ _ipu_get(ipu);
++ mutex_lock(&ipu->mutex_lock);
++ ipu->csi_use_count[csi]++;
++
++ if (ipu->csi_use_count[csi] == 1) {
++ reg = ipu_cm_read(ipu, IPU_CONF);
++ if (csi == 0)
++ ipu_cm_write(ipu, reg | IPU_CONF_CSI0_EN, IPU_CONF);
++ else
++ ipu_cm_write(ipu, reg | IPU_CONF_CSI1_EN, IPU_CONF);
++ }
++ mutex_unlock(&ipu->mutex_lock);
++ _ipu_put(ipu);
++ return 0;
++}
++EXPORT_SYMBOL(ipu_enable_csi);
++
++/*!
++ * This function disables CSI.
++ *
++ * @param ipu ipu handler
++ * @param csi csi num 0 or 1
++ *
++ * @return This function returns 0 on success or negative error code on
++ * fail.
++ */
++int32_t ipu_disable_csi(struct ipu_soc *ipu, uint32_t csi)
++{
++ uint32_t reg;
++
++ if (csi > 1) {
++ dev_err(ipu->dev, "Wrong csi num_%d\n", csi);
++ return -EINVAL;
++ }
++ _ipu_get(ipu);
++ mutex_lock(&ipu->mutex_lock);
++ ipu->csi_use_count[csi]--;
++ if (ipu->csi_use_count[csi] == 0) {
++ _ipu_csi_wait4eof(ipu, ipu->csi_channel[csi]);
++ reg = ipu_cm_read(ipu, IPU_CONF);
++ if (csi == 0)
++ ipu_cm_write(ipu, reg & ~IPU_CONF_CSI0_EN, IPU_CONF);
++ else
++ ipu_cm_write(ipu, reg & ~IPU_CONF_CSI1_EN, IPU_CONF);
++ }
++ mutex_unlock(&ipu->mutex_lock);
++ _ipu_put(ipu);
++ return 0;
++}
++EXPORT_SYMBOL(ipu_disable_csi);
++
++static irqreturn_t ipu_sync_irq_handler(int irq, void *desc)
++{
++ struct ipu_soc *ipu = desc;
++ int i;
++ uint32_t line, bit, int_stat, int_ctrl;
++ irqreturn_t result = IRQ_NONE;
++ const int int_reg[] = { 1, 2, 3, 4, 11, 12, 13, 14, 15, 0 };
++
++ spin_lock(&ipu->int_reg_spin_lock);
++
++ for (i = 0; int_reg[i] != 0; i++) {
++ int_stat = ipu_cm_read(ipu, IPU_INT_STAT(int_reg[i]));
++ int_ctrl = ipu_cm_read(ipu, IPU_INT_CTRL(int_reg[i]));
++ int_stat &= int_ctrl;
++ ipu_cm_write(ipu, int_stat, IPU_INT_STAT(int_reg[i]));
++ while ((line = ffs(int_stat)) != 0) {
++ bit = --line;
++ int_stat &= ~(1UL << line);
++ line += (int_reg[i] - 1) * 32;
++ result |=
++ ipu->irq_list[line].handler(line,
++ ipu->irq_list[line].
++ dev_id);
++ if (ipu->irq_list[line].flags & IPU_IRQF_ONESHOT) {
++ int_ctrl &= ~(1UL << bit);
++ ipu_cm_write(ipu, int_ctrl,
++ IPU_INT_CTRL(int_reg[i]));
++ }
++ }
++ }
++
++ spin_unlock(&ipu->int_reg_spin_lock);
++
++ return result;
++}
++
++static irqreturn_t ipu_err_irq_handler(int irq, void *desc)
++{
++ struct ipu_soc *ipu = desc;
++ int i;
++ uint32_t int_stat;
++ const int err_reg[] = { 5, 6, 9, 10, 0 };
++
++ spin_lock(&ipu->int_reg_spin_lock);
++
++ for (i = 0; err_reg[i] != 0; i++) {
++ int_stat = ipu_cm_read(ipu, IPU_INT_STAT(err_reg[i]));
++ int_stat &= ipu_cm_read(ipu, IPU_INT_CTRL(err_reg[i]));
++ if (int_stat) {
++ ipu_cm_write(ipu, int_stat, IPU_INT_STAT(err_reg[i]));
++ dev_warn(ipu->dev,
++ "IPU Warning - IPU_INT_STAT_%d = 0x%08X\n",
++ err_reg[i], int_stat);
++ /* Disable interrupts so we only get error once */
++ int_stat = ipu_cm_read(ipu, IPU_INT_CTRL(err_reg[i])) &
++ ~int_stat;
++ ipu_cm_write(ipu, int_stat, IPU_INT_CTRL(err_reg[i]));
++ }
++ }
++
++ spin_unlock(&ipu->int_reg_spin_lock);
++
++ return IRQ_HANDLED;
++}
++
++/*!
++ * This function enables the interrupt for the specified interrupt line.
++ * The interrupt lines are defined in \b ipu_irq_line enum.
++ *
++ * @param ipu ipu handler
++ * @param irq Interrupt line to enable interrupt for.
++ *
++ * @return This function returns 0 on success or negative error code on
++ * fail.
++ */
++int ipu_enable_irq(struct ipu_soc *ipu, uint32_t irq)
++{
++ uint32_t reg;
++ unsigned long lock_flags;
++ int ret = 0;
++
++ _ipu_get(ipu);
++
++ spin_lock_irqsave(&ipu->int_reg_spin_lock, lock_flags);
++
++ /*
++ * Check sync interrupt handler only, since we do nothing for
++ * error interrupts but than print out register values in the
++ * error interrupt source handler.
++ */
++ if (_ipu_is_sync_irq(irq) && (ipu->irq_list[irq].handler == NULL)) {
++ dev_err(ipu->dev, "handler hasn't been registered on sync "
++ "irq %d\n", irq);
++ ret = -EACCES;
++ goto out;
++ }
++
++ reg = ipu_cm_read(ipu, IPUIRQ_2_CTRLREG(irq));
++ reg |= IPUIRQ_2_MASK(irq);
++ ipu_cm_write(ipu, reg, IPUIRQ_2_CTRLREG(irq));
++out:
++ spin_unlock_irqrestore(&ipu->int_reg_spin_lock, lock_flags);
++
++ _ipu_put(ipu);
++
++ return ret;
++}
++EXPORT_SYMBOL(ipu_enable_irq);
++
++/*!
++ * This function disables the interrupt for the specified interrupt line.
++ * The interrupt lines are defined in \b ipu_irq_line enum.
++ *
++ * @param ipu ipu handler
++ * @param irq Interrupt line to disable interrupt for.
++ *
++ */
++void ipu_disable_irq(struct ipu_soc *ipu, uint32_t irq)
++{
++ uint32_t reg;
++ unsigned long lock_flags;
++
++ _ipu_get(ipu);
++
++ spin_lock_irqsave(&ipu->int_reg_spin_lock, lock_flags);
++
++ reg = ipu_cm_read(ipu, IPUIRQ_2_CTRLREG(irq));
++ reg &= ~IPUIRQ_2_MASK(irq);
++ ipu_cm_write(ipu, reg, IPUIRQ_2_CTRLREG(irq));
++
++ spin_unlock_irqrestore(&ipu->int_reg_spin_lock, lock_flags);
++
++ _ipu_put(ipu);
++}
++EXPORT_SYMBOL(ipu_disable_irq);
++
++/*!
++ * This function clears the interrupt for the specified interrupt line.
++ * The interrupt lines are defined in \b ipu_irq_line enum.
++ *
++ * @param ipu ipu handler
++ * @param irq Interrupt line to clear interrupt for.
++ *
++ */
++void ipu_clear_irq(struct ipu_soc *ipu, uint32_t irq)
++{
++ unsigned long lock_flags;
++
++ _ipu_get(ipu);
++
++ spin_lock_irqsave(&ipu->int_reg_spin_lock, lock_flags);
++
++ ipu_cm_write(ipu, IPUIRQ_2_MASK(irq), IPUIRQ_2_STATREG(irq));
++
++ spin_unlock_irqrestore(&ipu->int_reg_spin_lock, lock_flags);
++
++ _ipu_put(ipu);
++}
++EXPORT_SYMBOL(ipu_clear_irq);
++
++/*!
++ * This function returns the current interrupt status for the specified
++ * interrupt line. The interrupt lines are defined in \b ipu_irq_line enum.
++ *
++ * @param ipu ipu handler
++ * @param irq Interrupt line to get status for.
++ *
++ * @return Returns true if the interrupt is pending/asserted or false if
++ * the interrupt is not pending.
++ */
++bool ipu_get_irq_status(struct ipu_soc *ipu, uint32_t irq)
++{
++ uint32_t reg;
++ unsigned long lock_flags;
++
++ _ipu_get(ipu);
++
++ spin_lock_irqsave(&ipu->int_reg_spin_lock, lock_flags);
++ reg = ipu_cm_read(ipu, IPUIRQ_2_STATREG(irq));
++ spin_unlock_irqrestore(&ipu->int_reg_spin_lock, lock_flags);
++
++ _ipu_put(ipu);
++
++ if (reg & IPUIRQ_2_MASK(irq))
++ return true;
++ else
++ return false;
++}
++EXPORT_SYMBOL(ipu_get_irq_status);
++
++/*!
++ * This function registers an interrupt handler function for the specified
++ * interrupt line. The interrupt lines are defined in \b ipu_irq_line enum.
++ *
++ * @param ipu ipu handler
++ * @param irq Interrupt line to get status for.
++ *
++ * @param handler Input parameter for address of the handler
++ * function.
++ *
++ * @param irq_flags Flags for interrupt mode. Currently not used.
++ *
++ * @param devname Input parameter for string name of driver
++ * registering the handler.
++ *
++ * @param dev_id Input parameter for pointer of data to be
++ * passed to the handler.
++ *
++ * @return This function returns 0 on success or negative error code on
++ * fail.
++ */
++int ipu_request_irq(struct ipu_soc *ipu, uint32_t irq,
++ irqreturn_t(*handler) (int, void *),
++ uint32_t irq_flags, const char *devname, void *dev_id)
++{
++ uint32_t reg;
++ unsigned long lock_flags;
++ int ret = 0;
++
++ BUG_ON(irq >= IPU_IRQ_COUNT);
++
++ _ipu_get(ipu);
++
++ spin_lock_irqsave(&ipu->int_reg_spin_lock, lock_flags);
++
++ if (ipu->irq_list[irq].handler != NULL) {
++ dev_err(ipu->dev,
++ "handler already installed on irq %d\n", irq);
++ ret = -EINVAL;
++ goto out;
++ }
++
++ /*
++ * Check sync interrupt handler only, since we do nothing for
++ * error interrupts but than print out register values in the
++ * error interrupt source handler.
++ */
++ if (_ipu_is_sync_irq(irq) && (handler == NULL)) {
++ dev_err(ipu->dev, "handler is NULL for sync irq %d\n", irq);
++ ret = -EINVAL;
++ goto out;
++ }
++
++ ipu->irq_list[irq].handler = handler;
++ ipu->irq_list[irq].flags = irq_flags;
++ ipu->irq_list[irq].dev_id = dev_id;
++ ipu->irq_list[irq].name = devname;
++
++ /* clear irq stat for previous use */
++ ipu_cm_write(ipu, IPUIRQ_2_MASK(irq), IPUIRQ_2_STATREG(irq));
++ /* enable the interrupt */
++ reg = ipu_cm_read(ipu, IPUIRQ_2_CTRLREG(irq));
++ reg |= IPUIRQ_2_MASK(irq);
++ ipu_cm_write(ipu, reg, IPUIRQ_2_CTRLREG(irq));
++out:
++ spin_unlock_irqrestore(&ipu->int_reg_spin_lock, lock_flags);
++
++ _ipu_put(ipu);
++
++ return ret;
++}
++EXPORT_SYMBOL(ipu_request_irq);
++
++/*!
++ * This function unregisters an interrupt handler for the specified interrupt
++ * line. The interrupt lines are defined in \b ipu_irq_line enum.
++ *
++ * @param ipu ipu handler
++ * @param irq Interrupt line to get status for.
++ *
++ * @param dev_id Input parameter for pointer of data to be passed
++ * to the handler. This must match value passed to
++ * ipu_request_irq().
++ *
++ */
++void ipu_free_irq(struct ipu_soc *ipu, uint32_t irq, void *dev_id)
++{
++ uint32_t reg;
++ unsigned long lock_flags;
++
++ _ipu_get(ipu);
++
++ spin_lock_irqsave(&ipu->int_reg_spin_lock, lock_flags);
++
++ /* disable the interrupt */
++ reg = ipu_cm_read(ipu, IPUIRQ_2_CTRLREG(irq));
++ reg &= ~IPUIRQ_2_MASK(irq);
++ ipu_cm_write(ipu, reg, IPUIRQ_2_CTRLREG(irq));
++ if (ipu->irq_list[irq].dev_id == dev_id)
++ memset(&ipu->irq_list[irq], 0, sizeof(ipu->irq_list[irq]));
++
++ spin_unlock_irqrestore(&ipu->int_reg_spin_lock, lock_flags);
++
++ _ipu_put(ipu);
++}
++EXPORT_SYMBOL(ipu_free_irq);
++
++uint32_t ipu_get_cur_buffer_idx(struct ipu_soc *ipu, ipu_channel_t channel, ipu_buffer_t type)
++{
++ uint32_t reg, dma_chan;
++
++ dma_chan = channel_2_dma(channel, type);
++ if (!idma_is_valid(dma_chan))
++ return -EINVAL;
++
++ reg = ipu_cm_read(ipu, IPU_CHA_TRB_MODE_SEL(dma_chan));
++ if ((reg & idma_mask(dma_chan)) && _ipu_is_trb_chan(dma_chan)) {
++ reg = ipu_cm_read(ipu, IPU_CHA_TRIPLE_CUR_BUF(dma_chan));
++ return (reg & tri_cur_buf_mask(dma_chan)) >>
++ tri_cur_buf_shift(dma_chan);
++ } else {
++ reg = ipu_cm_read(ipu, IPU_CHA_CUR_BUF(dma_chan));
++ if (reg & idma_mask(dma_chan))
++ return 1;
++ else
++ return 0;
++ }
++}
++EXPORT_SYMBOL(ipu_get_cur_buffer_idx);
++
++uint32_t _ipu_channel_status(struct ipu_soc *ipu, ipu_channel_t channel)
++{
++ uint32_t stat = 0;
++ uint32_t task_stat_reg = ipu_cm_read(ipu, IPU_PROC_TASK_STAT);
++
++ switch (channel) {
++ case MEM_PRP_VF_MEM:
++ stat = (task_stat_reg & TSTAT_VF_MASK) >> TSTAT_VF_OFFSET;
++ break;
++ case MEM_VDI_PRP_VF_MEM:
++ stat = (task_stat_reg & TSTAT_VF_MASK) >> TSTAT_VF_OFFSET;
++ break;
++ case MEM_ROT_VF_MEM:
++ stat =
++ (task_stat_reg & TSTAT_VF_ROT_MASK) >> TSTAT_VF_ROT_OFFSET;
++ break;
++ case MEM_PRP_ENC_MEM:
++ stat = (task_stat_reg & TSTAT_ENC_MASK) >> TSTAT_ENC_OFFSET;
++ break;
++ case MEM_ROT_ENC_MEM:
++ stat =
++ (task_stat_reg & TSTAT_ENC_ROT_MASK) >>
++ TSTAT_ENC_ROT_OFFSET;
++ break;
++ case MEM_PP_MEM:
++ stat = (task_stat_reg & TSTAT_PP_MASK) >> TSTAT_PP_OFFSET;
++ break;
++ case MEM_ROT_PP_MEM:
++ stat =
++ (task_stat_reg & TSTAT_PP_ROT_MASK) >> TSTAT_PP_ROT_OFFSET;
++ break;
++
++ default:
++ stat = TASK_STAT_IDLE;
++ break;
++ }
++ return stat;
++}
++
++/*!
++ * This function check for a logical channel status
++ *
++ * @param ipu ipu handler
++ * @param channel Input parameter for the logical channel ID.
++ *
++ * @return This function returns 0 on idle and 1 on busy.
++ *
++ */
++uint32_t ipu_channel_status(struct ipu_soc *ipu, ipu_channel_t channel)
++{
++ uint32_t dma_status;
++
++ _ipu_get(ipu);
++ mutex_lock(&ipu->mutex_lock);
++ dma_status = ipu_is_channel_busy(ipu, channel);
++ mutex_unlock(&ipu->mutex_lock);
++ _ipu_put(ipu);
++
++ dev_dbg(ipu->dev, "%s, dma_status:%d.\n", __func__, dma_status);
++
++ return dma_status;
++}
++EXPORT_SYMBOL(ipu_channel_status);
++
++int32_t ipu_swap_channel(struct ipu_soc *ipu, ipu_channel_t from_ch, ipu_channel_t to_ch)
++{
++ uint32_t reg;
++ unsigned long lock_flags;
++ int from_dma = channel_2_dma(from_ch, IPU_INPUT_BUFFER);
++ int to_dma = channel_2_dma(to_ch, IPU_INPUT_BUFFER);
++
++ mutex_lock(&ipu->mutex_lock);
++
++ /* enable target channel */
++ reg = ipu_idmac_read(ipu, IDMAC_CHA_EN(to_dma));
++ ipu_idmac_write(ipu, reg | idma_mask(to_dma), IDMAC_CHA_EN(to_dma));
++
++ ipu->channel_enable_mask |= 1L << IPU_CHAN_ID(to_ch);
++
++ /* switch dp dc */
++ _ipu_dp_dc_disable(ipu, from_ch, true);
++
++ /* disable source channel */
++ reg = ipu_idmac_read(ipu, IDMAC_CHA_EN(from_dma));
++ ipu_idmac_write(ipu, reg & ~idma_mask(from_dma), IDMAC_CHA_EN(from_dma));
++ ipu_cm_write(ipu, idma_mask(from_dma), IPU_CHA_CUR_BUF(from_dma));
++ ipu_cm_write(ipu, tri_cur_buf_mask(from_dma),
++ IPU_CHA_TRIPLE_CUR_BUF(from_dma));
++
++ ipu->channel_enable_mask &= ~(1L << IPU_CHAN_ID(from_ch));
++
++ spin_lock_irqsave(&ipu->rdy_reg_spin_lock, lock_flags);
++ _ipu_clear_buffer_ready(ipu, from_ch, IPU_VIDEO_IN_BUFFER, 0);
++ _ipu_clear_buffer_ready(ipu, from_ch, IPU_VIDEO_IN_BUFFER, 1);
++ _ipu_clear_buffer_ready(ipu, from_ch, IPU_VIDEO_IN_BUFFER, 2);
++ spin_unlock_irqrestore(&ipu->rdy_reg_spin_lock, lock_flags);
++
++ mutex_unlock(&ipu->mutex_lock);
++
++ return 0;
++}
++EXPORT_SYMBOL(ipu_swap_channel);
++
++uint32_t bytes_per_pixel(uint32_t fmt)
++{
++ switch (fmt) {
++ case IPU_PIX_FMT_GENERIC: /*generic data */
++ case IPU_PIX_FMT_RGB332:
++ case IPU_PIX_FMT_YUV420P:
++ case IPU_PIX_FMT_YVU420P:
++ case IPU_PIX_FMT_YUV422P:
++ case IPU_PIX_FMT_YUV444P:
++ return 1;
++ break;
++ case IPU_PIX_FMT_GENERIC_16: /* generic data */
++ case IPU_PIX_FMT_RGB565:
++ case IPU_PIX_FMT_YUYV:
++ case IPU_PIX_FMT_UYVY:
++ return 2;
++ break;
++ case IPU_PIX_FMT_BGR24:
++ case IPU_PIX_FMT_RGB24:
++ case IPU_PIX_FMT_YUV444:
++ return 3;
++ break;
++ case IPU_PIX_FMT_GENERIC_32: /*generic data */
++ case IPU_PIX_FMT_BGR32:
++ case IPU_PIX_FMT_BGRA32:
++ case IPU_PIX_FMT_RGB32:
++ case IPU_PIX_FMT_RGBA32:
++ case IPU_PIX_FMT_ABGR32:
++ return 4;
++ break;
++ default:
++ return 1;
++ break;
++ }
++ return 0;
++}
++EXPORT_SYMBOL(bytes_per_pixel);
++
++ipu_color_space_t format_to_colorspace(uint32_t fmt)
++{
++ switch (fmt) {
++ case IPU_PIX_FMT_RGB666:
++ case IPU_PIX_FMT_RGB565:
++ case IPU_PIX_FMT_BGR24:
++ case IPU_PIX_FMT_RGB24:
++ case IPU_PIX_FMT_GBR24:
++ case IPU_PIX_FMT_BGR32:
++ case IPU_PIX_FMT_BGRA32:
++ case IPU_PIX_FMT_RGB32:
++ case IPU_PIX_FMT_RGBA32:
++ case IPU_PIX_FMT_ABGR32:
++ case IPU_PIX_FMT_LVDS666:
++ case IPU_PIX_FMT_LVDS888:
++ return RGB;
++ break;
++
++ default:
++ return YCbCr;
++ break;
++ }
++ return RGB;
++}
++
++bool ipu_pixel_format_has_alpha(uint32_t fmt)
++{
++ switch (fmt) {
++ case IPU_PIX_FMT_RGBA32:
++ case IPU_PIX_FMT_BGRA32:
++ case IPU_PIX_FMT_ABGR32:
++ return true;
++ break;
++ default:
++ return false;
++ break;
++ }
++ return false;
++}
++
++bool ipu_ch_param_bad_alpha_pos(uint32_t pixel_fmt)
++{
++ return _ipu_ch_param_bad_alpha_pos(pixel_fmt);
++}
++EXPORT_SYMBOL(ipu_ch_param_bad_alpha_pos);
++
++#ifdef CONFIG_PM
++static int ipu_suspend(struct device *dev)
++{
++ struct ipu_soc *ipu = dev_get_drvdata(dev);
++
++ /* All IDMAC channel and IPU clock should be disabled.*/
++ if (ipu->pdata->pg)
++ ipu->pdata->pg(1);
++
++ dev_dbg(dev, "ipu suspend.\n");
++ return 0;
++}
++
++static int ipu_resume(struct device *dev)
++{
++ struct ipu_soc *ipu = dev_get_drvdata(dev);
++
++ if (ipu->pdata->pg) {
++ ipu->pdata->pg(0);
++
++ _ipu_get(ipu);
++ _ipu_dmfc_init(ipu, dmfc_type_setup, 1);
++ /* Set sync refresh channels as high priority */
++ ipu_idmac_write(ipu, 0x18800001L, IDMAC_CHA_PRI(0));
++ _ipu_put(ipu);
++ }
++ dev_dbg(dev, "ipu resume.\n");
++ return 0;
++}
++
++int ipu_runtime_suspend(struct device *dev)
++{
++ release_bus_freq(BUS_FREQ_HIGH);
++ dev_dbg(dev, "ipu busfreq high release.\n");
++
++ return 0;
++}
++
++int ipu_runtime_resume(struct device *dev)
++{
++ request_bus_freq(BUS_FREQ_HIGH);
++ dev_dbg(dev, "ipu busfreq high requst.\n");
++
++ return 0;
++}
++
++static const struct dev_pm_ops ipu_pm_ops = {
++ SET_RUNTIME_PM_OPS(ipu_runtime_suspend, ipu_runtime_resume, NULL)
++ SET_SYSTEM_SLEEP_PM_OPS(ipu_suspend, ipu_resume)
++};
++#endif
++
++/*!
++ * This structure contains pointers to the power management callback functions.
++ */
++static struct platform_driver mxcipu_driver = {
++ .driver = {
++ .name = "imx-ipuv3",
++ .of_match_table = imx_ipuv3_dt_ids,
++ #ifdef CONFIG_PM
++ .pm = &ipu_pm_ops,
++ #endif
++ },
++ .probe = ipu_probe,
++ .id_table = imx_ipu_type,
++ .remove = ipu_remove,
++};
++
++int32_t __init ipu_gen_init(void)
++{
++ int32_t ret;
++
++ ret = platform_driver_register(&mxcipu_driver);
++ return 0;
++}
++
++subsys_initcall(ipu_gen_init);
++
++static void __exit ipu_gen_uninit(void)
++{
++ platform_driver_unregister(&mxcipu_driver);
++}
++
++module_exit(ipu_gen_uninit);
+diff -Nur linux-3.14.36/drivers/mxc/ipu3/ipu_device.c linux-openelec/drivers/mxc/ipu3/ipu_device.c
+--- linux-3.14.36/drivers/mxc/ipu3/ipu_device.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/ipu3/ipu_device.c 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,3717 @@
++/*
++ * Copyright 2005-2014 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/*!
++ * @file ipu_device.c
++ *
++ * @brief This file contains the IPUv3 driver device interface and fops functions.
++ *
++ * @ingroup IPU
++ */
++#include <linux/clk.h>
++#include <linux/cpumask.h>
++#include <linux/delay.h>
++#include <linux/dma-mapping.h>
++#include <linux/err.h>
++#include <linux/init.h>
++#include <linux/io.h>
++#include <linux/ipu-v3.h>
++#include <linux/kernel.h>
++#include <linux/kthread.h>
++#include <linux/module.h>
++#include <linux/platform_device.h>
++#include <linux/poll.h>
++#include <linux/sched.h>
++#include <linux/sched/rt.h>
++#include <linux/slab.h>
++#include <linux/spinlock.h>
++#include <linux/time.h>
++#include <linux/types.h>
++#include <linux/vmalloc.h>
++#include <linux/wait.h>
++
++#include <asm/cacheflush.h>
++#include <asm/outercache.h>
++
++#include "ipu_param_mem.h"
++#include "ipu_regs.h"
++#include "vdoa.h"
++
++#define CHECK_RETCODE(cont, str, err, label, ret) \
++do { \
++ if (cont) { \
++ dev_err(t->dev, "ERR:[0x%p]-no:0x%x "#str" ret:%d," \
++ "line:%d\n", t, t->task_no, ret, __LINE__);\
++ if (ret != -EACCES) { \
++ t->state = err; \
++ goto label; \
++ } \
++ } \
++} while (0)
++
++#define CHECK_RETCODE_CONT(cont, str, err, ret) \
++do { \
++ if (cont) { \
++ dev_err(t->dev, "ERR:[0x%p]-no:0x%x"#str" ret:%d," \
++ "line:%d\n", t, t->task_no, ret, __LINE__);\
++ if (ret != -EACCES) { \
++ if (t->state == STATE_OK) \
++ t->state = err; \
++ } \
++ } \
++} while (0)
++
++#undef DBG_IPU_PERF
++#ifdef DBG_IPU_PERF
++#define CHECK_PERF(ts) \
++do { \
++ getnstimeofday(ts); \
++} while (0)
++
++#define DECLARE_PERF_VAR \
++ struct timespec ts_queue; \
++ struct timespec ts_dotask; \
++ struct timespec ts_waitirq; \
++ struct timespec ts_sche; \
++ struct timespec ts_rel; \
++ struct timespec ts_frame
++
++#define PRINT_TASK_STATISTICS \
++do { \
++ ts_queue = timespec_sub(tsk->ts_dotask, tsk->ts_queue); \
++ ts_dotask = timespec_sub(tsk->ts_waitirq, tsk->ts_dotask); \
++ ts_waitirq = timespec_sub(tsk->ts_inirq, tsk->ts_waitirq); \
++ ts_sche = timespec_sub(tsk->ts_wakeup, tsk->ts_inirq); \
++ ts_rel = timespec_sub(tsk->ts_rel, tsk->ts_wakeup); \
++ ts_frame = timespec_sub(tsk->ts_rel, tsk->ts_queue); \
++ dev_dbg(tsk->dev, "[0x%p] no-0x%x, ts_q:%ldus, ts_do:%ldus," \
++ "ts_waitirq:%ldus,ts_sche:%ldus, ts_rel:%ldus," \
++ "ts_frame: %ldus\n", tsk, tsk->task_no, \
++ ts_queue.tv_nsec / NSEC_PER_USEC + ts_queue.tv_sec * USEC_PER_SEC,\
++ ts_dotask.tv_nsec / NSEC_PER_USEC + ts_dotask.tv_sec * USEC_PER_SEC,\
++ ts_waitirq.tv_nsec / NSEC_PER_USEC + ts_waitirq.tv_sec * USEC_PER_SEC,\
++ ts_sche.tv_nsec / NSEC_PER_USEC + ts_sche.tv_sec * USEC_PER_SEC,\
++ ts_rel.tv_nsec / NSEC_PER_USEC + ts_rel.tv_sec * USEC_PER_SEC,\
++ ts_frame.tv_nsec / NSEC_PER_USEC + ts_frame.tv_sec * USEC_PER_SEC); \
++ if ((ts_frame.tv_nsec/NSEC_PER_USEC + ts_frame.tv_sec*USEC_PER_SEC) > \
++ 80000) \
++ dev_dbg(tsk->dev, "ts_frame larger than 80ms [0x%p] no-0x%x.\n"\
++ , tsk, tsk->task_no); \
++} while (0)
++#else
++#define CHECK_PERF(ts)
++#define DECLARE_PERF_VAR
++#define PRINT_TASK_STATISTICS
++#endif
++
++#define IPU_PP_CH_VF (IPU_TASK_ID_VF - 1)
++#define IPU_PP_CH_PP (IPU_TASK_ID_PP - 1)
++#define MAX_PP_CH (IPU_TASK_ID_MAX - 1)
++#define VDOA_DEF_TIMEOUT_MS (HZ/2)
++
++/* Strucutures and variables for exporting MXC IPU as device*/
++typedef enum {
++ STATE_OK = 0,
++ STATE_QUEUE,
++ STATE_IN_PROGRESS,
++ STATE_ERR,
++ STATE_TIMEOUT,
++ STATE_RES_TIMEOUT,
++ STATE_NO_IPU,
++ STATE_NO_IRQ,
++ STATE_IPU_BUSY,
++ STATE_IRQ_FAIL,
++ STATE_IRQ_TIMEOUT,
++ STATE_ENABLE_CHAN_FAIL,
++ STATE_DISABLE_CHAN_FAIL,
++ STATE_SEL_BUF_FAIL,
++ STATE_INIT_CHAN_FAIL,
++ STATE_LINK_CHAN_FAIL,
++ STATE_UNLINK_CHAN_FAIL,
++ STATE_INIT_CHAN_BUF_FAIL,
++ STATE_INIT_CHAN_BAND_FAIL,
++ STATE_SYS_NO_MEM,
++ STATE_VDOA_IRQ_TIMEOUT,
++ STATE_VDOA_IRQ_FAIL,
++ STATE_VDOA_TASK_FAIL,
++} ipu_state_t;
++
++enum {
++ INPUT_CHAN_VDI_P = 1,
++ INPUT_CHAN,
++ INPUT_CHAN_VDI_N,
++};
++
++struct ipu_state_msg {
++ int state;
++ char *msg;
++} state_msg[] = {
++ {STATE_OK, "ok"},
++ {STATE_QUEUE, "split queue"},
++ {STATE_IN_PROGRESS, "split in progress"},
++ {STATE_ERR, "error"},
++ {STATE_TIMEOUT, "split task timeout"},
++ {STATE_RES_TIMEOUT, "wait resource timeout"},
++ {STATE_NO_IPU, "no ipu found"},
++ {STATE_NO_IRQ, "no irq found for task"},
++ {STATE_IPU_BUSY, "ipu busy"},
++ {STATE_IRQ_FAIL, "request irq failed"},
++ {STATE_IRQ_TIMEOUT, "wait for irq timeout"},
++ {STATE_ENABLE_CHAN_FAIL, "ipu enable channel fail"},
++ {STATE_DISABLE_CHAN_FAIL, "ipu disable channel fail"},
++ {STATE_SEL_BUF_FAIL, "ipu select buf fail"},
++ {STATE_INIT_CHAN_FAIL, "ipu init channel fail"},
++ {STATE_LINK_CHAN_FAIL, "ipu link channel fail"},
++ {STATE_UNLINK_CHAN_FAIL, "ipu unlink channel fail"},
++ {STATE_INIT_CHAN_BUF_FAIL, "ipu init channel buffer fail"},
++ {STATE_INIT_CHAN_BAND_FAIL, "ipu init channel band mode fail"},
++ {STATE_SYS_NO_MEM, "sys no mem: -ENOMEM"},
++ {STATE_VDOA_IRQ_TIMEOUT, "wait for vdoa irq timeout"},
++ {STATE_VDOA_IRQ_FAIL, "vdoa irq fail"},
++ {STATE_VDOA_TASK_FAIL, "vdoa task fail"},
++};
++
++struct stripe_setting {
++ u32 iw;
++ u32 ih;
++ u32 ow;
++ u32 oh;
++ u32 outh_resize_ratio;
++ u32 outv_resize_ratio;
++ u32 i_left_pos;
++ u32 i_right_pos;
++ u32 i_top_pos;
++ u32 i_bottom_pos;
++ u32 o_left_pos;
++ u32 o_right_pos;
++ u32 o_top_pos;
++ u32 o_bottom_pos;
++ u32 rl_split_line;
++ u32 ud_split_line;
++};
++
++struct task_set {
++#define NULL_MODE 0x0
++#define IC_MODE 0x1
++#define ROT_MODE 0x2
++#define VDI_MODE 0x4
++#define IPU_PREPROCESS_MODE_MASK (IC_MODE | ROT_MODE | VDI_MODE)
++/* VDOA_MODE means this task use vdoa, and VDOA has two modes:
++ * BAND MODE and non-BAND MODE. Non-band mode will do transfer data
++ * to memory. BAND mode needs hareware sync with IPU, it is used default
++ * if connected to VDIC.
++ */
++#define VDOA_MODE 0x8
++#define VDOA_BAND_MODE 0x10
++ u8 mode;
++#define IC_VF 0x1
++#define IC_PP 0x2
++#define ROT_VF 0x4
++#define ROT_PP 0x8
++#define VDI_VF 0x10
++#define VDOA_ONLY 0x20
++ u8 task;
++#define NO_SPLIT 0x0
++#define RL_SPLIT 0x1
++#define UD_SPLIT 0x2
++#define LEFT_STRIPE 0x1
++#define RIGHT_STRIPE 0x2
++#define UP_STRIPE 0x4
++#define DOWN_STRIPE 0x8
++#define SPLIT_MASK 0xF
++ u8 split_mode;
++ u8 band_lines;
++ ipu_channel_t ic_chan;
++ ipu_channel_t rot_chan;
++ ipu_channel_t vdi_ic_p_chan;
++ ipu_channel_t vdi_ic_n_chan;
++
++ u32 i_off;
++ u32 i_uoff;
++ u32 i_voff;
++ u32 istride;
++
++ u32 ov_off;
++ u32 ov_uoff;
++ u32 ov_voff;
++ u32 ovstride;
++
++ u32 ov_alpha_off;
++ u32 ov_alpha_stride;
++
++ u32 o_off;
++ u32 o_uoff;
++ u32 o_voff;
++ u32 ostride;
++
++ u32 r_fmt;
++ u32 r_width;
++ u32 r_height;
++ u32 r_stride;
++ dma_addr_t r_paddr;
++
++ struct stripe_setting sp_setting;
++};
++
++struct ipu_split_task {
++ struct ipu_task task;
++ struct ipu_task_entry *parent_task;
++ struct ipu_task_entry *child_task;
++ u32 task_no;
++};
++
++struct ipu_task_entry {
++ struct ipu_input input;
++ struct ipu_output output;
++
++ bool overlay_en;
++ struct ipu_overlay overlay;
++#define DEF_TIMEOUT_MS 1000
++#define DEF_DELAY_MS 20
++ int timeout;
++ int irq;
++
++ u8 task_id;
++ u8 ipu_id;
++ u8 task_in_list;
++ u8 split_done;
++ struct mutex split_lock;
++ struct mutex vdic_lock;
++ wait_queue_head_t split_waitq;
++
++ struct list_head node;
++ struct list_head split_list;
++ struct ipu_soc *ipu;
++ struct device *dev;
++ struct task_set set;
++ wait_queue_head_t task_waitq;
++ struct completion irq_comp;
++ struct kref refcount;
++ ipu_state_t state;
++ u32 task_no;
++ atomic_t done;
++ atomic_t res_free;
++ atomic_t res_get;
++
++ struct ipu_task_entry *parent;
++ char *vditmpbuf[2];
++ u32 old_save_lines;
++ u32 old_size;
++ bool buf1filled;
++ bool buf0filled;
++
++ vdoa_handle_t vdoa_handle;
++ struct vdoa_output_mem {
++ void *vaddr;
++ dma_addr_t paddr;
++ int size;
++ } vdoa_dma;
++
++#ifdef DBG_IPU_PERF
++ struct timespec ts_queue;
++ struct timespec ts_dotask;
++ struct timespec ts_waitirq;
++ struct timespec ts_inirq;
++ struct timespec ts_wakeup;
++ struct timespec ts_rel;
++#endif
++};
++
++struct ipu_channel_tabel {
++ struct mutex lock;
++ u8 used[MXC_IPU_MAX_NUM][MAX_PP_CH];
++ u8 vdoa_used;
++};
++
++struct ipu_thread_data {
++ struct ipu_soc *ipu;
++ u32 id;
++ u32 is_vdoa;
++};
++
++struct ipu_alloc_list {
++ struct list_head list;
++ dma_addr_t phy_addr;
++ void *cpu_addr;
++ u32 size;
++ void *file_index;
++};
++
++static LIST_HEAD(ipu_alloc_list);
++static DEFINE_MUTEX(ipu_alloc_lock);
++static struct ipu_channel_tabel ipu_ch_tbl;
++static LIST_HEAD(ipu_task_list);
++static DEFINE_SPINLOCK(ipu_task_list_lock);
++static DECLARE_WAIT_QUEUE_HEAD(thread_waitq);
++static DECLARE_WAIT_QUEUE_HEAD(res_waitq);
++static atomic_t req_cnt;
++static atomic_t file_index = ATOMIC_INIT(1);
++static int major;
++static int max_ipu_no;
++static int thread_id;
++static atomic_t frame_no;
++static struct class *ipu_class;
++static struct device *ipu_dev;
++static int debug;
++module_param(debug, int, 0600);
++#ifdef DBG_IPU_PERF
++static struct timespec ts_frame_max;
++static u32 ts_frame_avg;
++static atomic_t frame_cnt;
++#endif
++
++static bool deinterlace_3_field(struct ipu_task_entry *t)
++{
++ return ((t->set.mode & VDI_MODE) &&
++ (t->input.deinterlace.motion != HIGH_MOTION));
++}
++
++static u32 tiled_filed_size(struct ipu_task_entry *t)
++{
++ u32 field_size;
++
++ /* note: page_align is required by VPU hw ouput buffer */
++ field_size = TILED_NV12_FRAME_SIZE(t->input.width, t->input.height/2);
++ return field_size;
++}
++
++static bool only_ic(u8 mode)
++{
++ mode = mode & IPU_PREPROCESS_MODE_MASK;
++ return ((mode == IC_MODE) || (mode == VDI_MODE));
++}
++
++static bool only_rot(u8 mode)
++{
++ mode = mode & IPU_PREPROCESS_MODE_MASK;
++ return (mode == ROT_MODE);
++}
++
++static bool ic_and_rot(u8 mode)
++{
++ mode = mode & IPU_PREPROCESS_MODE_MASK;
++ return ((mode == (IC_MODE | ROT_MODE)) ||
++ (mode == (VDI_MODE | ROT_MODE)));
++}
++
++static bool need_split(struct ipu_task_entry *t)
++{
++ return ((t->set.split_mode != NO_SPLIT) || (t->task_no & SPLIT_MASK));
++}
++
++unsigned int fmt_to_bpp(unsigned int pixelformat)
++{
++ u32 bpp;
++
++ switch (pixelformat) {
++ case IPU_PIX_FMT_RGB565:
++ /*interleaved 422*/
++ case IPU_PIX_FMT_YUYV:
++ case IPU_PIX_FMT_UYVY:
++ /*non-interleaved 422*/
++ case IPU_PIX_FMT_YUV422P:
++ case IPU_PIX_FMT_YVU422P:
++ bpp = 16;
++ break;
++ case IPU_PIX_FMT_BGR24:
++ case IPU_PIX_FMT_RGB24:
++ case IPU_PIX_FMT_YUV444:
++ case IPU_PIX_FMT_YUV444P:
++ bpp = 24;
++ break;
++ case IPU_PIX_FMT_BGR32:
++ case IPU_PIX_FMT_BGRA32:
++ case IPU_PIX_FMT_RGB32:
++ case IPU_PIX_FMT_RGBA32:
++ case IPU_PIX_FMT_ABGR32:
++ bpp = 32;
++ break;
++ /*non-interleaved 420*/
++ case IPU_PIX_FMT_YUV420P:
++ case IPU_PIX_FMT_YVU420P:
++ case IPU_PIX_FMT_YUV420P2:
++ case IPU_PIX_FMT_NV12:
++ bpp = 12;
++ break;
++ default:
++ bpp = 8;
++ break;
++ }
++ return bpp;
++}
++EXPORT_SYMBOL_GPL(fmt_to_bpp);
++
++cs_t colorspaceofpixel(int fmt)
++{
++ switch (fmt) {
++ case IPU_PIX_FMT_RGB565:
++ case IPU_PIX_FMT_BGR24:
++ case IPU_PIX_FMT_RGB24:
++ case IPU_PIX_FMT_BGRA32:
++ case IPU_PIX_FMT_BGR32:
++ case IPU_PIX_FMT_RGBA32:
++ case IPU_PIX_FMT_RGB32:
++ case IPU_PIX_FMT_ABGR32:
++ return RGB_CS;
++ break;
++ case IPU_PIX_FMT_UYVY:
++ case IPU_PIX_FMT_YUYV:
++ case IPU_PIX_FMT_YUV420P2:
++ case IPU_PIX_FMT_YUV420P:
++ case IPU_PIX_FMT_YVU420P:
++ case IPU_PIX_FMT_YVU422P:
++ case IPU_PIX_FMT_YUV422P:
++ case IPU_PIX_FMT_YUV444:
++ case IPU_PIX_FMT_YUV444P:
++ case IPU_PIX_FMT_NV12:
++ case IPU_PIX_FMT_TILED_NV12:
++ case IPU_PIX_FMT_TILED_NV12F:
++ return YUV_CS;
++ break;
++ default:
++ return NULL_CS;
++ }
++}
++EXPORT_SYMBOL_GPL(colorspaceofpixel);
++
++int need_csc(int ifmt, int ofmt)
++{
++ cs_t ics, ocs;
++
++ ics = colorspaceofpixel(ifmt);
++ ocs = colorspaceofpixel(ofmt);
++
++ if ((ics == NULL_CS) || (ocs == NULL_CS))
++ return -1;
++ else if (ics != ocs)
++ return 1;
++
++ return 0;
++}
++EXPORT_SYMBOL_GPL(need_csc);
++
++static int soc_max_in_width(u32 is_vdoa)
++{
++ return is_vdoa ? 8192 : 4096;
++}
++
++static int soc_max_vdi_in_width(void)
++{
++ return IPU_MAX_VDI_IN_WIDTH;
++}
++static int soc_max_in_height(void)
++{
++ return 4096;
++}
++
++static int soc_max_out_width(void)
++{
++ /* mx51/mx53/mx6q is 1024*/
++ return 1024;
++}
++
++static int soc_max_out_height(void)
++{
++ /* mx51/mx53/mx6q is 1024*/
++ return 1024;
++}
++
++static void dump_task_info(struct ipu_task_entry *t)
++{
++ if (!debug)
++ return;
++ dev_dbg(t->dev, "[0x%p]input:\n", (void *)t);
++ dev_dbg(t->dev, "[0x%p]\tformat = 0x%x\n", (void *)t, t->input.format);
++ dev_dbg(t->dev, "[0x%p]\twidth = %d\n", (void *)t, t->input.width);
++ dev_dbg(t->dev, "[0x%p]\theight = %d\n", (void *)t, t->input.height);
++ dev_dbg(t->dev, "[0x%p]\tcrop.w = %d\n", (void *)t, t->input.crop.w);
++ dev_dbg(t->dev, "[0x%p]\tcrop.h = %d\n", (void *)t, t->input.crop.h);
++ dev_dbg(t->dev, "[0x%p]\tcrop.pos.x = %d\n",
++ (void *)t, t->input.crop.pos.x);
++ dev_dbg(t->dev, "[0x%p]\tcrop.pos.y = %d\n",
++ (void *)t, t->input.crop.pos.y);
++ dev_dbg(t->dev, "[0x%p]input buffer:\n", (void *)t);
++ dev_dbg(t->dev, "[0x%p]\tpaddr = 0x%x\n", (void *)t, t->input.paddr);
++ dev_dbg(t->dev, "[0x%p]\ti_off = 0x%x\n", (void *)t, t->set.i_off);
++ dev_dbg(t->dev, "[0x%p]\ti_uoff = 0x%x\n", (void *)t, t->set.i_uoff);
++ dev_dbg(t->dev, "[0x%p]\ti_voff = 0x%x\n", (void *)t, t->set.i_voff);
++ dev_dbg(t->dev, "[0x%p]\tistride = %d\n", (void *)t, t->set.istride);
++ if (t->input.deinterlace.enable) {
++ dev_dbg(t->dev, "[0x%p]deinterlace enabled with:\n", (void *)t);
++ if (t->input.deinterlace.motion != HIGH_MOTION) {
++ dev_dbg(t->dev, "[0x%p]\tlow/medium motion\n", (void *)t);
++ dev_dbg(t->dev, "[0x%p]\tpaddr_n = 0x%x\n",
++ (void *)t, t->input.paddr_n);
++ } else
++ dev_dbg(t->dev, "[0x%p]\thigh motion\n", (void *)t);
++ }
++
++ dev_dbg(t->dev, "[0x%p]output:\n", (void *)t);
++ dev_dbg(t->dev, "[0x%p]\tformat = 0x%x\n", (void *)t, t->output.format);
++ dev_dbg(t->dev, "[0x%p]\twidth = %d\n", (void *)t, t->output.width);
++ dev_dbg(t->dev, "[0x%p]\theight = %d\n", (void *)t, t->output.height);
++ dev_dbg(t->dev, "[0x%p]\tcrop.w = %d\n", (void *)t, t->output.crop.w);
++ dev_dbg(t->dev, "[0x%p]\tcrop.h = %d\n", (void *)t, t->output.crop.h);
++ dev_dbg(t->dev, "[0x%p]\tcrop.pos.x = %d\n",
++ (void *)t, t->output.crop.pos.x);
++ dev_dbg(t->dev, "[0x%p]\tcrop.pos.y = %d\n",
++ (void *)t, t->output.crop.pos.y);
++ dev_dbg(t->dev, "[0x%p]\trotate = %d\n", (void *)t, t->output.rotate);
++ dev_dbg(t->dev, "[0x%p]output buffer:\n", (void *)t);
++ dev_dbg(t->dev, "[0x%p]\tpaddr = 0x%x\n", (void *)t, t->output.paddr);
++ dev_dbg(t->dev, "[0x%p]\to_off = 0x%x\n", (void *)t, t->set.o_off);
++ dev_dbg(t->dev, "[0x%p]\to_uoff = 0x%x\n", (void *)t, t->set.o_uoff);
++ dev_dbg(t->dev, "[0x%p]\to_voff = 0x%x\n", (void *)t, t->set.o_voff);
++ dev_dbg(t->dev, "[0x%p]\tostride = %d\n", (void *)t, t->set.ostride);
++
++ if (t->overlay_en) {
++ dev_dbg(t->dev, "[0x%p]overlay:\n", (void *)t);
++ dev_dbg(t->dev, "[0x%p]\tformat = 0x%x\n",
++ (void *)t, t->overlay.format);
++ dev_dbg(t->dev, "[0x%p]\twidth = %d\n",
++ (void *)t, t->overlay.width);
++ dev_dbg(t->dev, "[0x%p]\theight = %d\n",
++ (void *)t, t->overlay.height);
++ dev_dbg(t->dev, "[0x%p]\tcrop.w = %d\n",
++ (void *)t, t->overlay.crop.w);
++ dev_dbg(t->dev, "[0x%p]\tcrop.h = %d\n",
++ (void *)t, t->overlay.crop.h);
++ dev_dbg(t->dev, "[0x%p]\tcrop.pos.x = %d\n",
++ (void *)t, t->overlay.crop.pos.x);
++ dev_dbg(t->dev, "[0x%p]\tcrop.pos.y = %d\n",
++ (void *)t, t->overlay.crop.pos.y);
++ dev_dbg(t->dev, "[0x%p]overlay buffer:\n", (void *)t);
++ dev_dbg(t->dev, "[0x%p]\tpaddr = 0x%x\n",
++ (void *)t, t->overlay.paddr);
++ dev_dbg(t->dev, "[0x%p]\tov_off = 0x%x\n",
++ (void *)t, t->set.ov_off);
++ dev_dbg(t->dev, "[0x%p]\tov_uoff = 0x%x\n",
++ (void *)t, t->set.ov_uoff);
++ dev_dbg(t->dev, "[0x%p]\tov_voff = 0x%x\n",
++ (void *)t, t->set.ov_voff);
++ dev_dbg(t->dev, "[0x%p]\tovstride = %d\n",
++ (void *)t, t->set.ovstride);
++ if (t->overlay.alpha.mode == IPU_ALPHA_MODE_LOCAL) {
++ dev_dbg(t->dev, "[0x%p]local alpha enabled with:\n",
++ (void *)t);
++ dev_dbg(t->dev, "[0x%p]\tpaddr = 0x%x\n",
++ (void *)t, t->overlay.alpha.loc_alp_paddr);
++ dev_dbg(t->dev, "[0x%p]\tov_alpha_off = 0x%x\n",
++ (void *)t, t->set.ov_alpha_off);
++ dev_dbg(t->dev, "[0x%p]\tov_alpha_stride = %d\n",
++ (void *)t, t->set.ov_alpha_stride);
++ } else
++ dev_dbg(t->dev, "[0x%p]globle alpha enabled with value 0x%x\n",
++ (void *)t, t->overlay.alpha.gvalue);
++ if (t->overlay.colorkey.enable)
++ dev_dbg(t->dev, "[0x%p]colorkey enabled with value 0x%x\n",
++ (void *)t, t->overlay.colorkey.value);
++ }
++
++ dev_dbg(t->dev, "[0x%p]want task_id = %d\n", (void *)t, t->task_id);
++ dev_dbg(t->dev, "[0x%p]want task mode is 0x%x\n",
++ (void *)t, t->set.mode);
++ dev_dbg(t->dev, "[0x%p]\tIC_MODE = 0x%x\n", (void *)t, IC_MODE);
++ dev_dbg(t->dev, "[0x%p]\tROT_MODE = 0x%x\n", (void *)t, ROT_MODE);
++ dev_dbg(t->dev, "[0x%p]\tVDI_MODE = 0x%x\n", (void *)t, VDI_MODE);
++ dev_dbg(t->dev, "[0x%p]\tTask_no = 0x%x\n\n\n", (void *)t, t->task_no);
++}
++
++static void dump_check_err(struct device *dev, int err)
++{
++ switch (err) {
++ case IPU_CHECK_ERR_INPUT_CROP:
++ dev_err(dev, "input crop setting error\n");
++ break;
++ case IPU_CHECK_ERR_OUTPUT_CROP:
++ dev_err(dev, "output crop setting error\n");
++ break;
++ case IPU_CHECK_ERR_OVERLAY_CROP:
++ dev_err(dev, "overlay crop setting error\n");
++ break;
++ case IPU_CHECK_ERR_INPUT_OVER_LIMIT:
++ dev_err(dev, "input over limitation\n");
++ break;
++ case IPU_CHECK_ERR_OVERLAY_WITH_VDI:
++ dev_err(dev, "do not support overlay with deinterlace\n");
++ break;
++ case IPU_CHECK_ERR_OV_OUT_NO_FIT:
++ dev_err(dev,
++ "width/height of overlay and ic output should be same\n");
++ break;
++ case IPU_CHECK_ERR_PROC_NO_NEED:
++ dev_err(dev, "no ipu processing need\n");
++ break;
++ case IPU_CHECK_ERR_SPLIT_INPUTW_OVER:
++ dev_err(dev, "split mode input width overflow\n");
++ break;
++ case IPU_CHECK_ERR_SPLIT_INPUTH_OVER:
++ dev_err(dev, "split mode input height overflow\n");
++ break;
++ case IPU_CHECK_ERR_SPLIT_OUTPUTW_OVER:
++ dev_err(dev, "split mode output width overflow\n");
++ break;
++ case IPU_CHECK_ERR_SPLIT_OUTPUTH_OVER:
++ dev_err(dev, "split mode output height overflow\n");
++ break;
++ case IPU_CHECK_ERR_SPLIT_WITH_ROT:
++ dev_err(dev, "not support split mode with rotation\n");
++ break;
++ case IPU_CHECK_ERR_W_DOWNSIZE_OVER:
++ dev_err(dev, "horizontal downsizing ratio overflow\n");
++ break;
++ case IPU_CHECK_ERR_H_DOWNSIZE_OVER:
++ dev_err(dev, "vertical downsizing ratio overflow\n");
++ break;
++ default:
++ break;
++ }
++}
++
++static void dump_check_warn(struct device *dev, int warn)
++{
++ if (warn & IPU_CHECK_WARN_INPUT_OFFS_NOT8ALIGN)
++ dev_warn(dev, "input u/v offset not 8 align\n");
++ if (warn & IPU_CHECK_WARN_OUTPUT_OFFS_NOT8ALIGN)
++ dev_warn(dev, "output u/v offset not 8 align\n");
++ if (warn & IPU_CHECK_WARN_OVERLAY_OFFS_NOT8ALIGN)
++ dev_warn(dev, "overlay u/v offset not 8 align\n");
++}
++
++static int set_crop(struct ipu_crop *crop, int width, int height, int fmt)
++{
++ if ((width == 0) || (height == 0)) {
++ pr_err("Invalid param: width=%d, height=%d\n", width, height);
++ return -EINVAL;
++ }
++
++ if ((IPU_PIX_FMT_TILED_NV12 == fmt) ||
++ (IPU_PIX_FMT_TILED_NV12F == fmt)) {
++ if (crop->w || crop->h) {
++ if (((crop->w + crop->pos.x) > width)
++ || ((crop->h + crop->pos.y) > height)
++ || (0 != (crop->w % IPU_PIX_FMT_TILED_NV12_MBALIGN))
++ || (0 != (crop->h % IPU_PIX_FMT_TILED_NV12_MBALIGN))
++ || (0 != (crop->pos.x % IPU_PIX_FMT_TILED_NV12_MBALIGN))
++ || (0 != (crop->pos.y % IPU_PIX_FMT_TILED_NV12_MBALIGN))
++ ) {
++ pr_err("set_crop error MB align.\n");
++ return -EINVAL;
++ }
++ } else {
++ crop->pos.x = 0;
++ crop->pos.y = 0;
++ crop->w = width;
++ crop->h = height;
++ if ((0 != (crop->w % IPU_PIX_FMT_TILED_NV12_MBALIGN))
++ || (0 != (crop->h % IPU_PIX_FMT_TILED_NV12_MBALIGN))) {
++ pr_err("set_crop error w/h MB align.\n");
++ return -EINVAL;
++ }
++ }
++ } else {
++ if (crop->w || crop->h) {
++ if (((crop->w + crop->pos.x) > (width + 16))
++ || ((crop->h + crop->pos.y) > height + 16)) {
++ pr_err("set_crop error exceeds width/height.\n");
++ return -EINVAL;
++ }
++ } else {
++ crop->pos.x = 0;
++ crop->pos.y = 0;
++ crop->w = width;
++ crop->h = height;
++ }
++ crop->w -= crop->w%8;
++ crop->h -= crop->h%8;
++ }
++
++ if ((crop->w == 0) || (crop->h == 0)) {
++ pr_err("Invalid crop param: crop.w=%d, crop.h=%d\n",
++ crop->w, crop->h);
++ return -EINVAL;
++ }
++
++ return 0;
++}
++
++static void update_offset(unsigned int fmt,
++ unsigned int width, unsigned int height,
++ unsigned int pos_x, unsigned int pos_y,
++ int *off, int *uoff, int *voff, int *stride)
++{
++ /* NOTE: u v offset should based on start point of off*/
++ switch (fmt) {
++ case IPU_PIX_FMT_YUV420P2:
++ case IPU_PIX_FMT_YUV420P:
++ *off = pos_y * width + pos_x;
++ *uoff = (width * (height - pos_y) - pos_x)
++ + (width/2) * (pos_y/2) + pos_x/2;
++ /* In case height is odd, round up to even */
++ *voff = *uoff + (width/2) * ((height+1)/2);
++ break;
++ case IPU_PIX_FMT_YVU420P:
++ *off = pos_y * width + pos_x;
++ *voff = (width * (height - pos_y) - pos_x)
++ + (width/2) * (pos_y/2) + pos_x/2;
++ /* In case height is odd, round up to even */
++ *uoff = *voff + (width/2) * ((height+1)/2);
++ break;
++ case IPU_PIX_FMT_YVU422P:
++ *off = pos_y * width + pos_x;
++ *voff = (width * (height - pos_y) - pos_x)
++ + (width/2) * pos_y + pos_x/2;
++ *uoff = *voff + (width/2) * height;
++ break;
++ case IPU_PIX_FMT_YUV422P:
++ *off = pos_y * width + pos_x;
++ *uoff = (width * (height - pos_y) - pos_x)
++ + (width/2) * pos_y + pos_x/2;
++ *voff = *uoff + (width/2) * height;
++ break;
++ case IPU_PIX_FMT_YUV444P:
++ *off = pos_y * width + pos_x;
++ *uoff = width * height;
++ *voff = width * height * 2;
++ break;
++ case IPU_PIX_FMT_NV12:
++ *off = pos_y * width + pos_x;
++ *uoff = (width * (height - pos_y) - pos_x)
++ + width * (pos_y/2) + pos_x;
++ break;
++ case IPU_PIX_FMT_TILED_NV12:
++ /*
++ * tiled format, progressive:
++ * assuming that line is aligned with MB height (aligned to 16)
++ * offset = line * stride + (pixel / MB_width) * pixels_in_MB
++ * = line * stride + (pixel / 16) * 256
++ * = line * stride + pixel * 16
++ */
++ *off = pos_y * width + (pos_x << 4);
++ *uoff = ALIGN(width * height, SZ_4K) + (*off >> 1) - *off;
++ break;
++ case IPU_PIX_FMT_TILED_NV12F:
++ /*
++ * tiled format, interlaced:
++ * same as above, only number of pixels in MB is 128,
++ * instead of 256
++ */
++ *off = (pos_y >> 1) * width + (pos_x << 3);
++ *uoff = ALIGN(width * height/2, SZ_4K) + (*off >> 1) - *off;
++ break;
++ default:
++ *off = (pos_y * width + pos_x) * fmt_to_bpp(fmt)/8;
++ break;
++ }
++ *stride = width * bytes_per_pixel(fmt);
++}
++
++static int update_split_setting(struct ipu_task_entry *t, bool vdi_split)
++{
++ struct stripe_param left_stripe;
++ struct stripe_param right_stripe;
++ struct stripe_param up_stripe;
++ struct stripe_param down_stripe;
++ u32 iw, ih, ow, oh;
++ u32 max_width;
++ int ret;
++
++ if (t->output.rotate >= IPU_ROTATE_90_RIGHT)
++ return IPU_CHECK_ERR_SPLIT_WITH_ROT;
++
++ iw = t->input.crop.w;
++ ih = t->input.crop.h;
++
++ ow = t->output.crop.w;
++ oh = t->output.crop.h;
++
++ memset(&left_stripe, 0, sizeof(left_stripe));
++ memset(&right_stripe, 0, sizeof(right_stripe));
++ memset(&up_stripe, 0, sizeof(up_stripe));
++ memset(&down_stripe, 0, sizeof(down_stripe));
++
++ if (t->set.split_mode & RL_SPLIT) {
++ /*
++ * We do want equal strips: initialize stripes in case
++ * calc_stripes returns before actually doing the calculation
++ */
++ left_stripe.input_width = iw / 2;
++ left_stripe.output_width = ow / 2;
++ right_stripe.input_column = iw / 2;
++ right_stripe.output_column = ow / 2;
++
++ if (vdi_split)
++ max_width = soc_max_vdi_in_width();
++ else
++ max_width = soc_max_out_width();
++ ret = ipu_calc_stripes_sizes(iw,
++ ow,
++ max_width,
++ (((unsigned long long)1) << 32), /* 32bit for fractional*/
++ 1, /* equal stripes */
++ t->input.format,
++ t->output.format,
++ &left_stripe,
++ &right_stripe);
++ if (ret < 0)
++ return IPU_CHECK_ERR_W_DOWNSIZE_OVER;
++ else if (ret)
++ dev_dbg(t->dev, "Warn: no:0x%x,calc_stripes ret:%d\n",
++ t->task_no, ret);
++ t->set.sp_setting.iw = left_stripe.input_width;
++ t->set.sp_setting.ow = left_stripe.output_width;
++ t->set.sp_setting.outh_resize_ratio = left_stripe.irr;
++ t->set.sp_setting.i_left_pos = left_stripe.input_column;
++ t->set.sp_setting.o_left_pos = left_stripe.output_column;
++ t->set.sp_setting.i_right_pos = right_stripe.input_column;
++ t->set.sp_setting.o_right_pos = right_stripe.output_column;
++ } else {
++ t->set.sp_setting.iw = iw;
++ t->set.sp_setting.ow = ow;
++ t->set.sp_setting.outh_resize_ratio = 0;
++ t->set.sp_setting.i_left_pos = 0;
++ t->set.sp_setting.o_left_pos = 0;
++ t->set.sp_setting.i_right_pos = 0;
++ t->set.sp_setting.o_right_pos = 0;
++ }
++ if ((t->set.sp_setting.iw + t->set.sp_setting.i_right_pos) > (iw+16))
++ return IPU_CHECK_ERR_SPLIT_INPUTW_OVER;
++ if (((t->set.sp_setting.ow + t->set.sp_setting.o_right_pos) > ow)
++ || (t->set.sp_setting.ow > soc_max_out_width()))
++ return IPU_CHECK_ERR_SPLIT_OUTPUTW_OVER;
++ if (rounddown(t->set.sp_setting.ow, 8) * 8 <=
++ rounddown(t->set.sp_setting.iw, 8))
++ return IPU_CHECK_ERR_W_DOWNSIZE_OVER;
++
++ if (t->set.split_mode & UD_SPLIT) {
++ /*
++ * We do want equal strips: initialize stripes in case
++ * calc_stripes returns before actually doing the calculation
++ */
++ up_stripe.input_width = ih / 2;
++ up_stripe.output_width = oh / 2;
++ down_stripe.input_column = ih / 2;
++ down_stripe.output_column = oh / 2;
++ ret = ipu_calc_stripes_sizes(ih,
++ oh,
++ soc_max_out_height(),
++ (((unsigned long long)1) << 32), /* 32bit for fractional*/
++ 0x1 | 0x2, /* equal stripes and vertical */
++ t->input.format,
++ t->output.format,
++ &up_stripe,
++ &down_stripe);
++ if (ret < 0)
++ return IPU_CHECK_ERR_H_DOWNSIZE_OVER;
++ else if (ret)
++ dev_err(t->dev, "Warn: no:0x%x,calc_stripes ret:%d\n",
++ t->task_no, ret);
++ t->set.sp_setting.ih = up_stripe.input_width;
++ t->set.sp_setting.oh = up_stripe.output_width;
++ t->set.sp_setting.outv_resize_ratio = up_stripe.irr;
++ t->set.sp_setting.i_top_pos = up_stripe.input_column;
++ t->set.sp_setting.o_top_pos = up_stripe.output_column;
++ t->set.sp_setting.i_bottom_pos = down_stripe.input_column;
++ t->set.sp_setting.o_bottom_pos = down_stripe.output_column;
++ } else {
++ t->set.sp_setting.ih = ih;
++ t->set.sp_setting.oh = oh;
++ t->set.sp_setting.outv_resize_ratio = 0;
++ t->set.sp_setting.i_top_pos = 0;
++ t->set.sp_setting.o_top_pos = 0;
++ t->set.sp_setting.i_bottom_pos = 0;
++ t->set.sp_setting.o_bottom_pos = 0;
++ }
++
++ /* downscale case: enforce limits */
++ if (((t->set.sp_setting.ih + t->set.sp_setting.i_bottom_pos) > (ih))
++ && (t->set.sp_setting.ih >= t->set.sp_setting.oh))
++ return IPU_CHECK_ERR_SPLIT_INPUTH_OVER;
++ /* upscale case: relax limits because ipu_calc_stripes_sizes() may
++ create input stripe that falls just outside of the input window */
++ else if ((t->set.sp_setting.ih + t->set.sp_setting.i_bottom_pos)
++ > (ih+16))
++ return IPU_CHECK_ERR_SPLIT_INPUTH_OVER;
++ if (((t->set.sp_setting.oh + t->set.sp_setting.o_bottom_pos) > oh)
++ || (t->set.sp_setting.oh > soc_max_out_height()))
++ return IPU_CHECK_ERR_SPLIT_OUTPUTH_OVER;
++ if (rounddown(t->set.sp_setting.oh, 8) * 8 <=
++ rounddown(t->set.sp_setting.ih, 8))
++ return IPU_CHECK_ERR_H_DOWNSIZE_OVER;
++
++ return IPU_CHECK_OK;
++}
++
++static int check_task(struct ipu_task_entry *t)
++{
++ int tmp;
++ int ret = IPU_CHECK_OK;
++ int timeout;
++ bool vdi_split = false;
++ int ocw, och;
++
++ if ((IPU_PIX_FMT_TILED_NV12 == t->overlay.format) ||
++ (IPU_PIX_FMT_TILED_NV12F == t->overlay.format) ||
++ (IPU_PIX_FMT_TILED_NV12 == t->output.format) ||
++ (IPU_PIX_FMT_TILED_NV12F == t->output.format) ||
++ ((IPU_PIX_FMT_TILED_NV12F == t->input.format) &&
++ !t->input.deinterlace.enable)) {
++ ret = IPU_CHECK_ERR_NOT_SUPPORT;
++ goto done;
++ }
++
++ /* check input */
++ ret = set_crop(&t->input.crop, t->input.width, t->input.height,
++ t->input.format);
++ if (ret < 0) {
++ ret = IPU_CHECK_ERR_INPUT_CROP;
++ goto done;
++ } else
++ update_offset(t->input.format, t->input.width, t->input.height,
++ t->input.crop.pos.x, t->input.crop.pos.y,
++ &t->set.i_off, &t->set.i_uoff,
++ &t->set.i_voff, &t->set.istride);
++
++ /* check output */
++ ret = set_crop(&t->output.crop, t->output.width, t->output.height,
++ t->output.format);
++ if (ret < 0) {
++ ret = IPU_CHECK_ERR_OUTPUT_CROP;
++ goto done;
++ } else
++ update_offset(t->output.format,
++ t->output.width, t->output.height,
++ t->output.crop.pos.x, t->output.crop.pos.y,
++ &t->set.o_off, &t->set.o_uoff,
++ &t->set.o_voff, &t->set.ostride);
++
++ if (t->output.rotate >= IPU_ROTATE_90_RIGHT) {
++ /*
++ * Cache output width and height and
++ * swap them so that we may check
++ * downsize overflow correctly.
++ */
++ ocw = t->output.crop.h;
++ och = t->output.crop.w;
++ } else {
++ ocw = t->output.crop.w;
++ och = t->output.crop.h;
++ }
++
++ if (ocw * 8 <= t->input.crop.w) {
++ ret = IPU_CHECK_ERR_W_DOWNSIZE_OVER;
++ goto done;
++ }
++
++ if (och * 8 <= t->input.crop.h) {
++ ret = IPU_CHECK_ERR_H_DOWNSIZE_OVER;
++ goto done;
++ }
++
++ if ((IPU_PIX_FMT_TILED_NV12 == t->input.format) ||
++ (IPU_PIX_FMT_TILED_NV12F == t->input.format)) {
++ if ((t->input.crop.w > soc_max_in_width(1)) ||
++ (t->input.crop.h > soc_max_in_height())) {
++ ret = IPU_CHECK_ERR_INPUT_OVER_LIMIT;
++ goto done;
++ }
++ /* output fmt: NV12 and YUYV, now don't support resize */
++ if (((IPU_PIX_FMT_NV12 != t->output.format) &&
++ (IPU_PIX_FMT_YUYV != t->output.format)) ||
++ (t->input.crop.w != t->output.crop.w) ||
++ (t->input.crop.h != t->output.crop.h)) {
++ ret = IPU_CHECK_ERR_NOT_SUPPORT;
++ goto done;
++ }
++ }
++
++ /* check overlay if there is */
++ if (t->overlay_en) {
++ if (t->input.deinterlace.enable) {
++ ret = IPU_CHECK_ERR_OVERLAY_WITH_VDI;
++ goto done;
++ }
++
++ ret = set_crop(&t->overlay.crop, t->overlay.width,
++ t->overlay.height, t->overlay.format);
++ if (ret < 0) {
++ ret = IPU_CHECK_ERR_OVERLAY_CROP;
++ goto done;
++ } else {
++ ocw = t->output.crop.w;
++ och = t->output.crop.h;
++
++ if (t->output.rotate >= IPU_ROTATE_90_RIGHT) {
++ ocw = t->output.crop.h;
++ och = t->output.crop.w;
++ }
++ if ((t->overlay.crop.w != ocw) ||
++ (t->overlay.crop.h != och)) {
++ ret = IPU_CHECK_ERR_OV_OUT_NO_FIT;
++ goto done;
++ }
++
++ update_offset(t->overlay.format,
++ t->overlay.width, t->overlay.height,
++ t->overlay.crop.pos.x, t->overlay.crop.pos.y,
++ &t->set.ov_off, &t->set.ov_uoff,
++ &t->set.ov_voff, &t->set.ovstride);
++ if (t->overlay.alpha.mode == IPU_ALPHA_MODE_LOCAL) {
++ t->set.ov_alpha_stride = t->overlay.width;
++ t->set.ov_alpha_off = t->overlay.crop.pos.y *
++ t->overlay.width + t->overlay.crop.pos.x;
++ }
++ }
++ }
++
++ /* input overflow? */
++ if (!((IPU_PIX_FMT_TILED_NV12 == t->input.format) ||
++ (IPU_PIX_FMT_TILED_NV12F == t->input.format))) {
++ if ((t->input.crop.w > soc_max_in_width(0)) ||
++ (t->input.crop.h > soc_max_in_height())) {
++ ret = IPU_CHECK_ERR_INPUT_OVER_LIMIT;
++ goto done;
++ }
++ }
++
++ /* check task mode */
++ t->set.mode = NULL_MODE;
++ t->set.split_mode = NO_SPLIT;
++
++ if (t->output.rotate >= IPU_ROTATE_90_RIGHT) {
++ /*output swap*/
++ tmp = t->output.crop.w;
++ t->output.crop.w = t->output.crop.h;
++ t->output.crop.h = tmp;
++ }
++
++ if (t->output.rotate >= IPU_ROTATE_90_RIGHT)
++ t->set.mode |= ROT_MODE;
++
++ /*need resize or CSC?*/
++ if ((t->input.crop.w != t->output.crop.w) ||
++ (t->input.crop.h != t->output.crop.h) ||
++ need_csc(t->input.format, t->output.format))
++ t->set.mode |= IC_MODE;
++
++ /*need flip?*/
++ if ((t->set.mode == NULL_MODE) && (t->output.rotate > IPU_ROTATE_NONE))
++ t->set.mode |= IC_MODE;
++
++ /*need IDMAC do format(same color space)?*/
++ if ((t->set.mode == NULL_MODE) && (t->input.format != t->output.format))
++ t->set.mode |= IC_MODE;
++
++ /*overlay support*/
++ if (t->overlay_en)
++ t->set.mode |= IC_MODE;
++
++ /*deinterlace*/
++ if (t->input.deinterlace.enable) {
++ t->set.mode &= ~IC_MODE;
++ t->set.mode |= VDI_MODE;
++ }
++ if ((IPU_PIX_FMT_TILED_NV12 == t->input.format) ||
++ (IPU_PIX_FMT_TILED_NV12F == t->input.format)) {
++ if (t->set.mode & ROT_MODE) {
++ ret = IPU_CHECK_ERR_NOT_SUPPORT;
++ goto done;
++ }
++ t->set.mode |= VDOA_MODE;
++ if (IPU_PIX_FMT_TILED_NV12F == t->input.format)
++ t->set.mode |= VDOA_BAND_MODE;
++ t->set.mode &= ~IC_MODE;
++ }
++
++ if ((t->set.mode & (IC_MODE | VDI_MODE)) &&
++ (IPU_PIX_FMT_TILED_NV12F != t->input.format)) {
++ if (t->output.crop.w > soc_max_out_width())
++ t->set.split_mode |= RL_SPLIT;
++ if (t->output.crop.h > soc_max_out_height())
++ t->set.split_mode |= UD_SPLIT;
++ if (!t->set.split_mode && (t->set.mode & VDI_MODE) &&
++ (t->input.crop.w > soc_max_vdi_in_width())) {
++ t->set.split_mode |= RL_SPLIT;
++ vdi_split = true;
++ }
++ if (t->set.split_mode) {
++ if ((t->set.split_mode == RL_SPLIT) ||
++ (t->set.split_mode == UD_SPLIT))
++ timeout = DEF_TIMEOUT_MS * 2 + DEF_DELAY_MS;
++ else
++ timeout = DEF_TIMEOUT_MS * 4 + DEF_DELAY_MS;
++ if (t->timeout < timeout)
++ t->timeout = timeout;
++
++ ret = update_split_setting(t, vdi_split);
++ if (ret > IPU_CHECK_ERR_MIN)
++ goto done;
++ }
++ }
++
++ if (t->output.rotate >= IPU_ROTATE_90_RIGHT) {
++ /*output swap*/
++ tmp = t->output.crop.w;
++ t->output.crop.w = t->output.crop.h;
++ t->output.crop.h = tmp;
++ }
++
++ if (t->set.mode == NULL_MODE) {
++ ret = IPU_CHECK_ERR_PROC_NO_NEED;
++ goto done;
++ }
++
++ if ((t->set.i_uoff % 8) || (t->set.i_voff % 8))
++ ret |= IPU_CHECK_WARN_INPUT_OFFS_NOT8ALIGN;
++ if ((t->set.o_uoff % 8) || (t->set.o_voff % 8))
++ ret |= IPU_CHECK_WARN_OUTPUT_OFFS_NOT8ALIGN;
++ if (t->overlay_en && ((t->set.ov_uoff % 8) || (t->set.ov_voff % 8)))
++ ret |= IPU_CHECK_WARN_OVERLAY_OFFS_NOT8ALIGN;
++
++done:
++ /* dump msg */
++ if (debug) {
++ if (ret > IPU_CHECK_ERR_MIN)
++ dump_check_err(t->dev, ret);
++ else if (ret != IPU_CHECK_OK)
++ dump_check_warn(t->dev, ret);
++ }
++
++ return ret;
++}
++
++static int prepare_task(struct ipu_task_entry *t)
++{
++ int ret = 0;
++
++ ret = check_task(t);
++ if (ret > IPU_CHECK_ERR_MIN)
++ return -EINVAL;
++
++ if (t->set.mode & VDI_MODE) {
++ t->task_id = IPU_TASK_ID_VF;
++ t->set.task = VDI_VF;
++ if (t->set.mode & ROT_MODE)
++ t->set.task |= ROT_VF;
++ }
++
++ if (VDOA_MODE == t->set.mode) {
++ if (t->set.task != 0) {
++ dev_err(t->dev, "ERR: vdoa only task:0x%x, [0x%p].\n",
++ t->set.task, t);
++ return -EINVAL;
++ }
++ t->set.task |= VDOA_ONLY;
++ }
++
++ if (VDOA_BAND_MODE & t->set.mode) {
++ /* to save band size: 1<<3 = 8 lines */
++ t->set.band_lines = 3;
++ }
++
++ dump_task_info(t);
++
++ return ret;
++}
++
++static uint32_t ic_vf_pp_is_busy(struct ipu_soc *ipu, bool is_vf)
++{
++ uint32_t status;
++ uint32_t status_vf;
++ uint32_t status_rot;
++
++ if (is_vf) {
++ status = ipu_channel_status(ipu, MEM_VDI_PRP_VF_MEM);
++ status_vf = ipu_channel_status(ipu, MEM_PRP_VF_MEM);
++ status_rot = ipu_channel_status(ipu, MEM_ROT_VF_MEM);
++ return status || status_vf || status_rot;
++ } else {
++ status = ipu_channel_status(ipu, MEM_PP_MEM);
++ status_rot = ipu_channel_status(ipu, MEM_ROT_PP_MEM);
++ return status || status_rot;
++ }
++}
++
++static int _get_vdoa_ipu_res(struct ipu_task_entry *t)
++{
++ int i;
++ struct ipu_soc *ipu;
++ u8 *used;
++ uint32_t found_ipu = 0;
++ uint32_t found_vdoa = 0;
++ struct ipu_channel_tabel *tbl = &ipu_ch_tbl;
++
++ mutex_lock(&tbl->lock);
++ if (t->set.mode & VDOA_MODE) {
++ if (NULL != t->vdoa_handle)
++ found_vdoa = 1;
++ else {
++ found_vdoa = tbl->vdoa_used ? 0 : 1;
++ if (found_vdoa) {
++ tbl->vdoa_used = 1;
++ vdoa_get_handle(&t->vdoa_handle);
++ } else
++ /* first get vdoa->ipu resource sequence */
++ goto out;
++ if (t->set.task & VDOA_ONLY)
++ goto out;
++ }
++ }
++
++ for (i = 0; i < max_ipu_no; i++) {
++ ipu = ipu_get_soc(i);
++ if (IS_ERR(ipu))
++ dev_err(t->dev, "no:0x%x,found_vdoa:%d, ipu:%d\n",
++ t->task_no, found_vdoa, i);
++
++ used = &tbl->used[i][IPU_PP_CH_VF];
++ if (t->set.mode & VDI_MODE) {
++ if (0 == *used) {
++ *used = 1;
++ found_ipu = 1;
++ break;
++ }
++ } else if ((t->set.mode & IC_MODE) || only_rot(t->set.mode)) {
++ if (0 == *used) {
++ t->task_id = IPU_TASK_ID_VF;
++ if (t->set.mode & IC_MODE)
++ t->set.task |= IC_VF;
++ if (t->set.mode & ROT_MODE)
++ t->set.task |= ROT_VF;
++ *used = 1;
++ found_ipu = 1;
++ break;
++ }
++ } else
++ dev_err(t->dev, "no:0x%x,found_vdoa:%d, mode:0x%x\n",
++ t->task_no, found_vdoa, t->set.mode);
++ }
++ if (found_ipu)
++ goto next;
++
++ for (i = 0; i < max_ipu_no; i++) {
++ ipu = ipu_get_soc(i);
++ if (IS_ERR(ipu))
++ dev_err(t->dev, "no:0x%x,found_vdoa:%d, ipu:%d\n",
++ t->task_no, found_vdoa, i);
++
++ if ((t->set.mode & IC_MODE) || only_rot(t->set.mode)) {
++ used = &tbl->used[i][IPU_PP_CH_PP];
++ if (0 == *used) {
++ t->task_id = IPU_TASK_ID_PP;
++ if (t->set.mode & IC_MODE)
++ t->set.task |= IC_PP;
++ if (t->set.mode & ROT_MODE)
++ t->set.task |= ROT_PP;
++ *used = 1;
++ found_ipu = 1;
++ break;
++ }
++ }
++ }
++
++next:
++ if (found_ipu) {
++ t->ipu = ipu;
++ t->ipu_id = i;
++ t->dev = ipu->dev;
++ if (atomic_inc_return(&t->res_get) == 2)
++ dev_err(t->dev,
++ "ERR no:0x%x,found_vdoa:%d,get ipu twice\n",
++ t->task_no, found_vdoa);
++ }
++out:
++ dev_dbg(t->dev,
++ "%s:no:0x%x,found_vdoa:%d, found_ipu:%d\n",
++ __func__, t->task_no, found_vdoa, found_ipu);
++ mutex_unlock(&tbl->lock);
++ if (t->set.task & VDOA_ONLY)
++ return found_vdoa;
++ else if (t->set.mode & VDOA_MODE)
++ return found_vdoa && found_ipu;
++ else
++ return found_ipu;
++}
++
++static void put_vdoa_ipu_res(struct ipu_task_entry *tsk, int vdoa_only)
++{
++ int ret;
++ int rel_vdoa = 0, rel_ipu = 0;
++ struct ipu_channel_tabel *tbl = &ipu_ch_tbl;
++
++ mutex_lock(&tbl->lock);
++ if (tsk->set.mode & VDOA_MODE) {
++ if (!tbl->vdoa_used && tsk->vdoa_handle)
++ dev_err(tsk->dev,
++ "ERR no:0x%x,vdoa not used,mode:0x%x\n",
++ tsk->task_no, tsk->set.mode);
++ if (tbl->vdoa_used && tsk->vdoa_handle) {
++ tbl->vdoa_used = 0;
++ vdoa_put_handle(&tsk->vdoa_handle);
++ if (tsk->ipu)
++ tsk->ipu->vdoa_en = 0;
++ rel_vdoa = 1;
++ if (vdoa_only || (tsk->set.task & VDOA_ONLY))
++ goto out;
++ }
++ }
++
++ tbl->used[tsk->ipu_id][tsk->task_id - 1] = 0;
++ rel_ipu = 1;
++ ret = atomic_inc_return(&tsk->res_free);
++ if (ret == 2)
++ dev_err(tsk->dev,
++ "ERR no:0x%x,rel_vdoa:%d,put ipu twice\n",
++ tsk->task_no, rel_vdoa);
++out:
++ dev_dbg(tsk->dev,
++ "%s:no:0x%x,rel_vdoa:%d, rel_ipu:%d\n",
++ __func__, tsk->task_no, rel_vdoa, rel_ipu);
++ mutex_unlock(&tbl->lock);
++}
++
++static int get_vdoa_ipu_res(struct ipu_task_entry *t)
++{
++ int ret;
++ uint32_t found = 0;
++
++ found = _get_vdoa_ipu_res(t);
++ if (!found) {
++ t->ipu_id = -1;
++ t->ipu = NULL;
++ /* blocking to get resource */
++ ret = atomic_inc_return(&req_cnt);
++ dev_dbg(t->dev,
++ "wait_res:no:0x%x,req_cnt:%d\n", t->task_no, ret);
++ ret = wait_event_timeout(res_waitq, _get_vdoa_ipu_res(t),
++ msecs_to_jiffies(t->timeout - DEF_DELAY_MS));
++ if (ret == 0) {
++ dev_err(t->dev, "ERR[0x%p,no-0x%x] wait_res timeout:%dms!\n",
++ t, t->task_no, t->timeout - DEF_DELAY_MS);
++ ret = -ETIMEDOUT;
++ t->state = STATE_RES_TIMEOUT;
++ goto out;
++ } else {
++ if (!(t->set.task & VDOA_ONLY) && (!t->ipu))
++ dev_err(t->dev,
++ "ERR[no-0x%x] can not get ipu!\n",
++ t->task_no);
++ ret = atomic_read(&req_cnt);
++ if (ret > 0)
++ ret = atomic_dec_return(&req_cnt);
++ else
++ dev_err(t->dev,
++ "ERR[no-0x%x] req_cnt:%d mismatch!\n",
++ t->task_no, ret);
++ dev_dbg(t->dev, "no-0x%x,[0x%p],req_cnt:%d, got_res!\n",
++ t->task_no, t, ret);
++ found = 1;
++ }
++ }
++
++out:
++ return found;
++}
++
++static struct ipu_task_entry *create_task_entry(struct ipu_task *task)
++{
++ struct ipu_task_entry *tsk;
++
++ tsk = kzalloc(sizeof(struct ipu_task_entry), GFP_KERNEL);
++ if (!tsk)
++ return ERR_PTR(-ENOMEM);
++ kref_init(&tsk->refcount);
++ tsk->state = -EINVAL;
++ tsk->ipu_id = -1;
++ tsk->dev = ipu_dev;
++ tsk->input = task->input;
++ tsk->output = task->output;
++ tsk->overlay_en = task->overlay_en;
++ if (tsk->overlay_en)
++ tsk->overlay = task->overlay;
++ if (task->timeout > DEF_TIMEOUT_MS)
++ tsk->timeout = task->timeout;
++ else
++ tsk->timeout = DEF_TIMEOUT_MS;
++
++ return tsk;
++}
++
++static void task_mem_free(struct kref *ref)
++{
++ struct ipu_task_entry *tsk =
++ container_of(ref, struct ipu_task_entry, refcount);
++ kfree(tsk);
++}
++
++int create_split_child_task(struct ipu_split_task *sp_task)
++{
++ int ret = 0;
++ struct ipu_task_entry *tsk;
++
++ tsk = create_task_entry(&sp_task->task);
++ if (IS_ERR(tsk))
++ return PTR_ERR(tsk);
++
++ sp_task->child_task = tsk;
++ tsk->task_no = sp_task->task_no;
++
++ ret = prepare_task(tsk);
++ if (ret < 0)
++ goto err;
++
++ tsk->parent = sp_task->parent_task;
++ tsk->set.sp_setting = sp_task->parent_task->set.sp_setting;
++
++ list_add(&tsk->node, &tsk->parent->split_list);
++ dev_dbg(tsk->dev, "[0x%p] sp_tsk Q list,no-0x%x\n", tsk, tsk->task_no);
++ tsk->state = STATE_QUEUE;
++ CHECK_PERF(&tsk->ts_queue);
++err:
++ return ret;
++}
++
++static inline int sp_task_check_done(struct ipu_split_task *sp_task,
++ struct ipu_task_entry *parent, int num, int *idx)
++{
++ int i;
++ int ret = 0;
++ struct ipu_task_entry *tsk;
++ struct mutex *lock = &parent->split_lock;
++
++ *idx = -EINVAL;
++ mutex_lock(lock);
++ for (i = 0; i < num; i++) {
++ tsk = sp_task[i].child_task;
++ if (tsk && tsk->split_done) {
++ *idx = i;
++ ret = 1;
++ goto out;
++ }
++ }
++
++out:
++ mutex_unlock(lock);
++ return ret;
++}
++
++static int create_split_task(
++ int stripe,
++ struct ipu_split_task *sp_task)
++{
++ struct ipu_task *task = &(sp_task->task);
++ struct ipu_task_entry *t = sp_task->parent_task;
++ int ret;
++
++ sp_task->task_no |= stripe;
++
++ task->input = t->input;
++ task->output = t->output;
++ task->overlay_en = t->overlay_en;
++ if (task->overlay_en)
++ task->overlay = t->overlay;
++ task->task_id = t->task_id;
++ if ((t->set.split_mode == RL_SPLIT) ||
++ (t->set.split_mode == UD_SPLIT))
++ task->timeout = t->timeout / 2;
++ else
++ task->timeout = t->timeout / 4;
++
++ task->input.crop.w = t->set.sp_setting.iw;
++ task->input.crop.h = t->set.sp_setting.ih;
++ if (task->overlay_en) {
++ task->overlay.crop.w = t->set.sp_setting.ow;
++ task->overlay.crop.h = t->set.sp_setting.oh;
++ }
++ if (t->output.rotate >= IPU_ROTATE_90_RIGHT) {
++ task->output.crop.w = t->set.sp_setting.oh;
++ task->output.crop.h = t->set.sp_setting.ow;
++ t->set.sp_setting.rl_split_line = t->set.sp_setting.o_bottom_pos;
++ t->set.sp_setting.ud_split_line = t->set.sp_setting.o_right_pos;
++
++ } else {
++ task->output.crop.w = t->set.sp_setting.ow;
++ task->output.crop.h = t->set.sp_setting.oh;
++ t->set.sp_setting.rl_split_line = t->set.sp_setting.o_right_pos;
++ t->set.sp_setting.ud_split_line = t->set.sp_setting.o_bottom_pos;
++ }
++
++ if (stripe & LEFT_STRIPE)
++ task->input.crop.pos.x += t->set.sp_setting.i_left_pos;
++ else if (stripe & RIGHT_STRIPE)
++ task->input.crop.pos.x += t->set.sp_setting.i_right_pos;
++ if (stripe & UP_STRIPE)
++ task->input.crop.pos.y += t->set.sp_setting.i_top_pos;
++ else if (stripe & DOWN_STRIPE)
++ task->input.crop.pos.y += t->set.sp_setting.i_bottom_pos;
++
++ if (task->overlay_en) {
++ if (stripe & LEFT_STRIPE)
++ task->overlay.crop.pos.x += t->set.sp_setting.o_left_pos;
++ else if (stripe & RIGHT_STRIPE)
++ task->overlay.crop.pos.x += t->set.sp_setting.o_right_pos;
++ if (stripe & UP_STRIPE)
++ task->overlay.crop.pos.y += t->set.sp_setting.o_top_pos;
++ else if (stripe & DOWN_STRIPE)
++ task->overlay.crop.pos.y += t->set.sp_setting.o_bottom_pos;
++ }
++
++ switch (t->output.rotate) {
++ case IPU_ROTATE_NONE:
++ if (stripe & LEFT_STRIPE)
++ task->output.crop.pos.x += t->set.sp_setting.o_left_pos;
++ else if (stripe & RIGHT_STRIPE)
++ task->output.crop.pos.x += t->set.sp_setting.o_right_pos;
++ if (stripe & UP_STRIPE)
++ task->output.crop.pos.y += t->set.sp_setting.o_top_pos;
++ else if (stripe & DOWN_STRIPE)
++ task->output.crop.pos.y += t->set.sp_setting.o_bottom_pos;
++ break;
++ case IPU_ROTATE_VERT_FLIP:
++ if (stripe & LEFT_STRIPE)
++ task->output.crop.pos.x += t->set.sp_setting.o_left_pos;
++ else if (stripe & RIGHT_STRIPE)
++ task->output.crop.pos.x += t->set.sp_setting.o_right_pos;
++ if (stripe & UP_STRIPE)
++ task->output.crop.pos.y =
++ t->output.crop.pos.y + t->output.crop.h
++ - t->set.sp_setting.o_top_pos - t->set.sp_setting.oh;
++ else if (stripe & DOWN_STRIPE)
++ task->output.crop.pos.y =
++ t->output.crop.pos.y + t->output.crop.h
++ - t->set.sp_setting.o_bottom_pos - t->set.sp_setting.oh;
++ break;
++ case IPU_ROTATE_HORIZ_FLIP:
++ if (stripe & LEFT_STRIPE)
++ task->output.crop.pos.x =
++ t->output.crop.pos.x + t->output.crop.w
++ - t->set.sp_setting.o_left_pos - t->set.sp_setting.ow;
++ else if (stripe & RIGHT_STRIPE)
++ task->output.crop.pos.x =
++ t->output.crop.pos.x + t->output.crop.w
++ - t->set.sp_setting.o_right_pos - t->set.sp_setting.ow;
++ if (stripe & UP_STRIPE)
++ task->output.crop.pos.y += t->set.sp_setting.o_top_pos;
++ else if (stripe & DOWN_STRIPE)
++ task->output.crop.pos.y += t->set.sp_setting.o_bottom_pos;
++ break;
++ case IPU_ROTATE_180:
++ if (stripe & LEFT_STRIPE)
++ task->output.crop.pos.x =
++ t->output.crop.pos.x + t->output.crop.w
++ - t->set.sp_setting.o_left_pos - t->set.sp_setting.ow;
++ else if (stripe & RIGHT_STRIPE)
++ task->output.crop.pos.x =
++ t->output.crop.pos.x + t->output.crop.w
++ - t->set.sp_setting.o_right_pos - t->set.sp_setting.ow;
++ if (stripe & UP_STRIPE)
++ task->output.crop.pos.y =
++ t->output.crop.pos.y + t->output.crop.h
++ - t->set.sp_setting.o_top_pos - t->set.sp_setting.oh;
++ else if (stripe & DOWN_STRIPE)
++ task->output.crop.pos.y =
++ t->output.crop.pos.y + t->output.crop.h
++ - t->set.sp_setting.o_bottom_pos - t->set.sp_setting.oh;
++ break;
++ case IPU_ROTATE_90_RIGHT:
++ if (stripe & UP_STRIPE)
++ task->output.crop.pos.x =
++ t->output.crop.pos.x + t->output.crop.w
++ - t->set.sp_setting.o_top_pos - t->set.sp_setting.oh;
++ else if (stripe & DOWN_STRIPE)
++ task->output.crop.pos.x =
++ t->output.crop.pos.x + t->output.crop.w
++ - t->set.sp_setting.o_bottom_pos - t->set.sp_setting.oh;
++ if (stripe & LEFT_STRIPE)
++ task->output.crop.pos.y += t->set.sp_setting.o_left_pos;
++ else if (stripe & RIGHT_STRIPE)
++ task->output.crop.pos.y += t->set.sp_setting.o_right_pos;
++ break;
++ case IPU_ROTATE_90_RIGHT_HFLIP:
++ if (stripe & UP_STRIPE)
++ task->output.crop.pos.x += t->set.sp_setting.o_top_pos;
++ else if (stripe & DOWN_STRIPE)
++ task->output.crop.pos.x += t->set.sp_setting.o_bottom_pos;
++ if (stripe & LEFT_STRIPE)
++ task->output.crop.pos.y += t->set.sp_setting.o_left_pos;
++ else if (stripe & RIGHT_STRIPE)
++ task->output.crop.pos.y += t->set.sp_setting.o_right_pos;
++ break;
++ case IPU_ROTATE_90_RIGHT_VFLIP:
++ if (stripe & UP_STRIPE)
++ task->output.crop.pos.x =
++ t->output.crop.pos.x + t->output.crop.w
++ - t->set.sp_setting.o_top_pos - t->set.sp_setting.oh;
++ else if (stripe & DOWN_STRIPE)
++ task->output.crop.pos.x =
++ t->output.crop.pos.x + t->output.crop.w
++ - t->set.sp_setting.o_bottom_pos - t->set.sp_setting.oh;
++ if (stripe & LEFT_STRIPE)
++ task->output.crop.pos.y =
++ t->output.crop.pos.y + t->output.crop.h
++ - t->set.sp_setting.o_left_pos - t->set.sp_setting.ow;
++ else if (stripe & RIGHT_STRIPE)
++ task->output.crop.pos.y =
++ t->output.crop.pos.y + t->output.crop.h
++ - t->set.sp_setting.o_right_pos - t->set.sp_setting.ow;
++ break;
++ case IPU_ROTATE_90_LEFT:
++ if (stripe & UP_STRIPE)
++ task->output.crop.pos.x += t->set.sp_setting.o_top_pos;
++ else if (stripe & DOWN_STRIPE)
++ task->output.crop.pos.x += t->set.sp_setting.o_bottom_pos;
++ if (stripe & LEFT_STRIPE)
++ task->output.crop.pos.y =
++ t->output.crop.pos.y + t->output.crop.h
++ - t->set.sp_setting.o_left_pos - t->set.sp_setting.ow;
++ else if (stripe & RIGHT_STRIPE)
++ task->output.crop.pos.y =
++ t->output.crop.pos.y + t->output.crop.h
++ - t->set.sp_setting.o_right_pos - t->set.sp_setting.ow;
++ break;
++ default:
++ dev_err(t->dev, "ERR:should not be here\n");
++ break;
++ }
++
++ ret = create_split_child_task(sp_task);
++ if (ret < 0)
++ dev_err(t->dev, "ERR:create_split_child_task() ret:%d\n", ret);
++ return ret;
++}
++
++static int queue_split_task(struct ipu_task_entry *t,
++ struct ipu_split_task *sp_task, uint32_t size)
++{
++ int err[4];
++ int ret = 0;
++ int i, j;
++ struct ipu_task_entry *tsk = NULL;
++ struct mutex *lock = &t->split_lock;
++ struct mutex *vdic_lock = &t->vdic_lock;
++
++ dev_dbg(t->dev, "Split task 0x%p, no-0x%x, size:%d\n",
++ t, t->task_no, size);
++ mutex_init(lock);
++ mutex_init(vdic_lock);
++ init_waitqueue_head(&t->split_waitq);
++ INIT_LIST_HEAD(&t->split_list);
++ for (j = 0; j < size; j++) {
++ memset(&sp_task[j], 0, sizeof(*sp_task));
++ sp_task[j].parent_task = t;
++ sp_task[j].task_no = t->task_no;
++ }
++
++ if (t->set.split_mode == RL_SPLIT) {
++ i = 0;
++ err[i] = create_split_task(RIGHT_STRIPE, &sp_task[i]);
++ if (err[i] < 0)
++ goto err_start;
++ i = 1;
++ err[i] = create_split_task(LEFT_STRIPE, &sp_task[i]);
++ } else if (t->set.split_mode == UD_SPLIT) {
++ i = 0;
++ err[i] = create_split_task(DOWN_STRIPE, &sp_task[i]);
++ if (err[i] < 0)
++ goto err_start;
++ i = 1;
++ err[i] = create_split_task(UP_STRIPE, &sp_task[i]);
++ } else {
++ i = 0;
++ err[i] = create_split_task(RIGHT_STRIPE | DOWN_STRIPE, &sp_task[i]);
++ if (err[i] < 0)
++ goto err_start;
++ i = 1;
++ err[i] = create_split_task(LEFT_STRIPE | DOWN_STRIPE, &sp_task[i]);
++ if (err[i] < 0)
++ goto err_start;
++ i = 2;
++ err[i] = create_split_task(RIGHT_STRIPE | UP_STRIPE, &sp_task[i]);
++ if (err[i] < 0)
++ goto err_start;
++ i = 3;
++ err[i] = create_split_task(LEFT_STRIPE | UP_STRIPE, &sp_task[i]);
++ }
++
++err_start:
++ for (j = 0; j < (i + 1); j++) {
++ if (err[j] < 0) {
++ if (sp_task[j].child_task)
++ dev_err(t->dev,
++ "sp_task[%d],no-0x%x fail state:%d, queue err:%d.\n",
++ j, sp_task[j].child_task->task_no,
++ sp_task[j].child_task->state, err[j]);
++ goto err_exit;
++ }
++ dev_dbg(t->dev, "[0x%p] sp_task[%d], no-0x%x state:%s, queue ret:%d.\n",
++ sp_task[j].child_task, j, sp_task[j].child_task->task_no,
++ state_msg[sp_task[j].child_task->state].msg, err[j]);
++ }
++
++ return ret;
++
++err_exit:
++ for (j = 0; j < (i + 1); j++) {
++ if (err[j] < 0 && !ret)
++ ret = err[j];
++ tsk = sp_task[j].child_task;
++ if (!tsk)
++ continue;
++ kfree(tsk);
++ }
++ t->state = STATE_ERR;
++ return ret;
++
++}
++
++static int init_tiled_buf(struct ipu_soc *ipu, struct ipu_task_entry *t,
++ ipu_channel_t channel, uint32_t ch_type)
++{
++ int ret = 0;
++ int i;
++ uint32_t ipu_fmt;
++ dma_addr_t inbuf_base = 0;
++ u32 field_size;
++ struct vdoa_params param;
++ struct vdoa_ipu_buf buf;
++ struct ipu_soc *ipu_idx;
++ u32 ipu_stride, obuf_size;
++ u32 height, width;
++ ipu_buffer_t type;
++
++ if ((IPU_PIX_FMT_YUYV != t->output.format) &&
++ (IPU_PIX_FMT_NV12 != t->output.format)) {
++ dev_err(t->dev, "ERR:[0x%d] output format\n", t->task_no);
++ return -EINVAL;
++ }
++
++ memset(&param, 0, sizeof(param));
++ /* init channel tiled bufs */
++ if (deinterlace_3_field(t) &&
++ (IPU_PIX_FMT_TILED_NV12F == t->input.format)) {
++ field_size = tiled_filed_size(t);
++ if (INPUT_CHAN_VDI_P == ch_type) {
++ inbuf_base = t->input.paddr + field_size;
++ param.vfield_buf.prev_veba = inbuf_base + t->set.i_off;
++ } else if (INPUT_CHAN == ch_type) {
++ inbuf_base = t->input.paddr_n;
++ param.vfield_buf.cur_veba = inbuf_base + t->set.i_off;
++ } else if (INPUT_CHAN_VDI_N == ch_type) {
++ inbuf_base = t->input.paddr_n + field_size;
++ param.vfield_buf.next_veba = inbuf_base + t->set.i_off;
++ } else
++ return -EINVAL;
++ height = t->input.crop.h >> 1; /* field format for vdoa */
++ width = t->input.crop.w;
++ param.vfield_buf.vubo = t->set.i_uoff;
++ param.interlaced = 1;
++ param.scan_order = 1;
++ type = IPU_INPUT_BUFFER;
++ } else if ((IPU_PIX_FMT_TILED_NV12 == t->input.format) &&
++ (INPUT_CHAN == ch_type)) {
++ height = t->input.crop.h;
++ width = t->input.crop.w;
++ param.vframe_buf.veba = t->input.paddr + t->set.i_off;
++ param.vframe_buf.vubo = t->set.i_uoff;
++ type = IPU_INPUT_BUFFER;
++ } else
++ return -EINVAL;
++
++ param.band_mode = (t->set.mode & VDOA_BAND_MODE) ? 1 : 0;
++ if (param.band_mode && (t->set.band_lines != 3) &&
++ (t->set.band_lines != 4) && (t->set.band_lines != 5))
++ return -EINVAL;
++ else if (param.band_mode)
++ param.band_lines = (1 << t->set.band_lines);
++ for (i = 0; i < max_ipu_no; i++) {
++ ipu_idx = ipu_get_soc(i);
++ if (!IS_ERR(ipu_idx) && ipu_idx == ipu)
++ break;
++ }
++ if (t->set.task & VDOA_ONLY)
++ /* dummy, didn't need ipu res */
++ i = 0;
++ if (max_ipu_no == i) {
++ dev_err(t->dev, "ERR:[0x%p] get ipu num\n", t);
++ return -EINVAL;
++ }
++
++ param.ipu_num = i;
++ param.vpu_stride = t->input.width;
++ param.height = height;
++ param.width = width;
++ if (IPU_PIX_FMT_NV12 == t->output.format)
++ param.pfs = VDOA_PFS_NV12;
++ else
++ param.pfs = VDOA_PFS_YUYV;
++ ipu_fmt = (param.pfs == VDOA_PFS_YUYV) ? IPU_PIX_FMT_YUYV :
++ IPU_PIX_FMT_NV12;
++ ipu_stride = param.width * bytes_per_pixel(ipu_fmt);
++ obuf_size = PAGE_ALIGN(param.width * param.height *
++ fmt_to_bpp(ipu_fmt)/8);
++ dev_dbg(t->dev, "band_mode:%d, band_lines:%d\n",
++ param.band_mode, param.band_lines);
++ if (!param.band_mode) {
++ /* note: if only for tiled -> raster convert and
++ no other post-processing, we don't need alloc buf
++ and use output buffer directly.
++ */
++ if (t->set.task & VDOA_ONLY)
++ param.ieba0 = t->output.paddr;
++ else {
++ dev_err(t->dev, "ERR:[0x%d] vdoa task\n", t->task_no);
++ return -EINVAL;
++ }
++ } else {
++ if (IPU_PIX_FMT_TILED_NV12F != t->input.format) {
++ dev_err(t->dev, "ERR [0x%d] vdoa task\n", t->task_no);
++ return -EINVAL;
++ }
++ }
++ ret = vdoa_setup(t->vdoa_handle, &param);
++ if (ret)
++ goto done;
++ vdoa_get_output_buf(t->vdoa_handle, &buf);
++ if (t->set.task & VDOA_ONLY)
++ goto done;
++
++ ret = ipu_init_channel_buffer(ipu,
++ channel,
++ type,
++ ipu_fmt,
++ width,
++ height,
++ ipu_stride,
++ IPU_ROTATE_NONE,
++ buf.ieba0,
++ buf.ieba1,
++ 0,
++ buf.iubo,
++ 0);
++ if (ret < 0) {
++ t->state = STATE_INIT_CHAN_BUF_FAIL;
++ goto done;
++ }
++
++ if (param.band_mode) {
++ ret = ipu_set_channel_bandmode(ipu, channel,
++ type, t->set.band_lines);
++ if (ret < 0) {
++ t->state = STATE_INIT_CHAN_BAND_FAIL;
++ goto done;
++ }
++ }
++done:
++ return ret;
++}
++
++static int init_tiled_ch_bufs(struct ipu_soc *ipu, struct ipu_task_entry *t)
++{
++ int ret = 0;
++
++ if (IPU_PIX_FMT_TILED_NV12 == t->input.format) {
++ ret = init_tiled_buf(ipu, t, t->set.ic_chan, INPUT_CHAN);
++ CHECK_RETCODE(ret < 0, "init tiled_ch", t->state, done, ret);
++ } else if (IPU_PIX_FMT_TILED_NV12F == t->input.format) {
++ ret = init_tiled_buf(ipu, t, t->set.ic_chan, INPUT_CHAN);
++ CHECK_RETCODE(ret < 0, "init tiled_ch-c", t->state, done, ret);
++ ret = init_tiled_buf(ipu, t, t->set.vdi_ic_p_chan,
++ INPUT_CHAN_VDI_P);
++ CHECK_RETCODE(ret < 0, "init tiled_ch-p", t->state, done, ret);
++ ret = init_tiled_buf(ipu, t, t->set.vdi_ic_n_chan,
++ INPUT_CHAN_VDI_N);
++ CHECK_RETCODE(ret < 0, "init tiled_ch-n", t->state, done, ret);
++ } else {
++ ret = -EINVAL;
++ dev_err(t->dev, "ERR[no-0x%x] invalid fmt:0x%x!\n",
++ t->task_no, t->input.format);
++ }
++
++done:
++ return ret;
++}
++
++static int init_ic(struct ipu_soc *ipu, struct ipu_task_entry *t)
++{
++ int ret = 0;
++ ipu_channel_params_t params;
++ dma_addr_t inbuf = 0, ovbuf = 0, ov_alp_buf = 0;
++ dma_addr_t inbuf_p = 0, inbuf_n = 0;
++ dma_addr_t outbuf = 0;
++ int out_uoff = 0, out_voff = 0, out_rot;
++ int out_w = 0, out_h = 0, out_stride;
++ int out_fmt;
++ u32 vdi_frame_idx = 0;
++
++ memset(&params, 0, sizeof(params));
++
++ /* is it need link a rot channel */
++ if (ic_and_rot(t->set.mode)) {
++ outbuf = t->set.r_paddr;
++ out_w = t->set.r_width;
++ out_h = t->set.r_height;
++ out_stride = t->set.r_stride;
++ out_fmt = t->set.r_fmt;
++ out_uoff = 0;
++ out_voff = 0;
++ out_rot = IPU_ROTATE_NONE;
++ } else {
++ outbuf = t->output.paddr + t->set.o_off;
++ out_w = t->output.crop.w;
++ out_h = t->output.crop.h;
++ out_stride = t->set.ostride;
++ out_fmt = t->output.format;
++ out_uoff = t->set.o_uoff;
++ out_voff = t->set.o_voff;
++ out_rot = t->output.rotate;
++ }
++
++ /* settings */
++ params.mem_prp_vf_mem.in_width = t->input.crop.w;
++ params.mem_prp_vf_mem.out_width = out_w;
++ params.mem_prp_vf_mem.in_height = t->input.crop.h;
++ params.mem_prp_vf_mem.out_height = out_h;
++ params.mem_prp_vf_mem.in_pixel_fmt = t->input.format;
++ params.mem_prp_vf_mem.out_pixel_fmt = out_fmt;
++ params.mem_prp_vf_mem.motion_sel = t->input.deinterlace.motion;
++
++ params.mem_prp_vf_mem.outh_resize_ratio =
++ t->set.sp_setting.outh_resize_ratio;
++ params.mem_prp_vf_mem.outv_resize_ratio =
++ t->set.sp_setting.outv_resize_ratio;
++
++ if (t->overlay_en) {
++ params.mem_prp_vf_mem.in_g_pixel_fmt = t->overlay.format;
++ params.mem_prp_vf_mem.graphics_combine_en = 1;
++ if (t->overlay.alpha.mode == IPU_ALPHA_MODE_GLOBAL)
++ params.mem_prp_vf_mem.global_alpha_en = 1;
++ else if (t->overlay.alpha.loc_alp_paddr)
++ params.mem_prp_vf_mem.alpha_chan_en = 1;
++ /* otherwise, alpha bending per pixel is used. */
++ params.mem_prp_vf_mem.alpha = t->overlay.alpha.gvalue;
++ if (t->overlay.colorkey.enable) {
++ params.mem_prp_vf_mem.key_color_en = 1;
++ params.mem_prp_vf_mem.key_color = t->overlay.colorkey.value;
++ }
++ }
++
++ if (t->input.deinterlace.enable) {
++ if (t->input.deinterlace.field_fmt & IPU_DEINTERLACE_FIELD_MASK)
++ params.mem_prp_vf_mem.field_fmt =
++ IPU_DEINTERLACE_FIELD_BOTTOM;
++ else
++ params.mem_prp_vf_mem.field_fmt =
++ IPU_DEINTERLACE_FIELD_TOP;
++
++ if (t->input.deinterlace.field_fmt & IPU_DEINTERLACE_RATE_EN)
++ vdi_frame_idx = t->input.deinterlace.field_fmt &
++ IPU_DEINTERLACE_RATE_FRAME1;
++ }
++
++ if (t->set.mode & VDOA_MODE)
++ ipu->vdoa_en = 1;
++
++ /* init channels */
++ if (!(t->set.task & VDOA_ONLY)) {
++ ret = ipu_init_channel(ipu, t->set.ic_chan, &params);
++ if (ret < 0) {
++ t->state = STATE_INIT_CHAN_FAIL;
++ goto done;
++ }
++ }
++
++ if (deinterlace_3_field(t)) {
++ ret = ipu_init_channel(ipu, t->set.vdi_ic_p_chan, &params);
++ if (ret < 0) {
++ t->state = STATE_INIT_CHAN_FAIL;
++ goto done;
++ }
++ ret = ipu_init_channel(ipu, t->set.vdi_ic_n_chan, &params);
++ if (ret < 0) {
++ t->state = STATE_INIT_CHAN_FAIL;
++ goto done;
++ }
++ }
++
++ /* init channel bufs */
++ if ((IPU_PIX_FMT_TILED_NV12 == t->input.format) ||
++ (IPU_PIX_FMT_TILED_NV12F == t->input.format)) {
++ ret = init_tiled_ch_bufs(ipu, t);
++ if (ret < 0)
++ goto done;
++ } else {
++ if ((deinterlace_3_field(t)) &&
++ (IPU_PIX_FMT_TILED_NV12F != t->input.format)) {
++ if (params.mem_prp_vf_mem.field_fmt ==
++ IPU_DEINTERLACE_FIELD_TOP) {
++ if (vdi_frame_idx) {
++ inbuf_p = t->input.paddr + t->set.istride +
++ t->set.i_off;
++ inbuf = t->input.paddr_n + t->set.i_off;
++ inbuf_n = t->input.paddr_n + t->set.istride +
++ t->set.i_off;
++ params.mem_prp_vf_mem.field_fmt =
++ IPU_DEINTERLACE_FIELD_BOTTOM;
++ } else {
++ inbuf_p = t->input.paddr + t->set.i_off;
++ inbuf = t->input.paddr + t->set.istride + t->set.i_off;
++ inbuf_n = t->input.paddr_n + t->set.i_off;
++ }
++ } else {
++ if (vdi_frame_idx) {
++ inbuf_p = t->input.paddr + t->set.i_off;
++ inbuf = t->input.paddr_n + t->set.istride + t->set.i_off;
++ inbuf_n = t->input.paddr_n + t->set.i_off;
++ params.mem_prp_vf_mem.field_fmt =
++ IPU_DEINTERLACE_FIELD_TOP;
++ } else {
++ inbuf_p = t->input.paddr + t->set.istride +
++ t->set.i_off;
++ inbuf = t->input.paddr + t->set.i_off;
++ inbuf_n = t->input.paddr_n + t->set.istride +
++ t->set.i_off;
++ }
++ }
++ } else {
++ if (t->input.deinterlace.enable) {
++ if (params.mem_prp_vf_mem.field_fmt ==
++ IPU_DEINTERLACE_FIELD_TOP) {
++ if (vdi_frame_idx) {
++ inbuf = t->input.paddr + t->set.istride + t->set.i_off;
++ params.mem_prp_vf_mem.field_fmt =
++ IPU_DEINTERLACE_FIELD_BOTTOM;
++ } else
++ inbuf = t->input.paddr + t->set.i_off;
++ } else {
++ if (vdi_frame_idx) {
++ inbuf = t->input.paddr + t->set.i_off;
++ params.mem_prp_vf_mem.field_fmt =
++ IPU_DEINTERLACE_FIELD_TOP;
++ } else
++ inbuf = t->input.paddr + t->set.istride + t->set.i_off;
++ }
++ } else
++ inbuf = t->input.paddr + t->set.i_off;
++ }
++
++ if (t->overlay_en)
++ ovbuf = t->overlay.paddr + t->set.ov_off;
++ }
++ if (t->overlay_en && (t->overlay.alpha.mode == IPU_ALPHA_MODE_LOCAL))
++ ov_alp_buf = t->overlay.alpha.loc_alp_paddr
++ + t->set.ov_alpha_off;
++
++ if ((IPU_PIX_FMT_TILED_NV12 != t->input.format) &&
++ (IPU_PIX_FMT_TILED_NV12F != t->input.format)) {
++ ret = ipu_init_channel_buffer(ipu,
++ t->set.ic_chan,
++ IPU_INPUT_BUFFER,
++ t->input.format,
++ t->input.crop.w,
++ t->input.crop.h,
++ t->set.istride,
++ IPU_ROTATE_NONE,
++ inbuf,
++ 0,
++ 0,
++ t->set.i_uoff,
++ t->set.i_voff);
++ if (ret < 0) {
++ t->state = STATE_INIT_CHAN_BUF_FAIL;
++ goto done;
++ }
++ }
++ if (deinterlace_3_field(t) &&
++ (IPU_PIX_FMT_TILED_NV12F != t->input.format)) {
++ ret = ipu_init_channel_buffer(ipu,
++ t->set.vdi_ic_p_chan,
++ IPU_INPUT_BUFFER,
++ t->input.format,
++ t->input.crop.w,
++ t->input.crop.h,
++ t->set.istride,
++ IPU_ROTATE_NONE,
++ inbuf_p,
++ 0,
++ 0,
++ t->set.i_uoff,
++ t->set.i_voff);
++ if (ret < 0) {
++ t->state = STATE_INIT_CHAN_BUF_FAIL;
++ goto done;
++ }
++
++ ret = ipu_init_channel_buffer(ipu,
++ t->set.vdi_ic_n_chan,
++ IPU_INPUT_BUFFER,
++ t->input.format,
++ t->input.crop.w,
++ t->input.crop.h,
++ t->set.istride,
++ IPU_ROTATE_NONE,
++ inbuf_n,
++ 0,
++ 0,
++ t->set.i_uoff,
++ t->set.i_voff);
++ if (ret < 0) {
++ t->state = STATE_INIT_CHAN_BUF_FAIL;
++ goto done;
++ }
++ }
++
++ if (t->overlay_en) {
++ ret = ipu_init_channel_buffer(ipu,
++ t->set.ic_chan,
++ IPU_GRAPH_IN_BUFFER,
++ t->overlay.format,
++ t->overlay.crop.w,
++ t->overlay.crop.h,
++ t->set.ovstride,
++ IPU_ROTATE_NONE,
++ ovbuf,
++ 0,
++ 0,
++ t->set.ov_uoff,
++ t->set.ov_voff);
++ if (ret < 0) {
++ t->state = STATE_INIT_CHAN_BUF_FAIL;
++ goto done;
++ }
++ }
++
++ if (t->overlay.alpha.mode == IPU_ALPHA_MODE_LOCAL) {
++ ret = ipu_init_channel_buffer(ipu,
++ t->set.ic_chan,
++ IPU_ALPHA_IN_BUFFER,
++ IPU_PIX_FMT_GENERIC,
++ t->overlay.crop.w,
++ t->overlay.crop.h,
++ t->set.ov_alpha_stride,
++ IPU_ROTATE_NONE,
++ ov_alp_buf,
++ 0,
++ 0,
++ 0, 0);
++ if (ret < 0) {
++ t->state = STATE_INIT_CHAN_BUF_FAIL;
++ goto done;
++ }
++ }
++
++ if (!(t->set.task & VDOA_ONLY)) {
++ ret = ipu_init_channel_buffer(ipu,
++ t->set.ic_chan,
++ IPU_OUTPUT_BUFFER,
++ out_fmt,
++ out_w,
++ out_h,
++ out_stride,
++ out_rot,
++ outbuf,
++ 0,
++ 0,
++ out_uoff,
++ out_voff);
++ if (ret < 0) {
++ t->state = STATE_INIT_CHAN_BUF_FAIL;
++ goto done;
++ }
++ }
++
++ if ((t->set.mode & VDOA_BAND_MODE) && (t->set.task & VDI_VF)) {
++ ret = ipu_link_channels(ipu, MEM_VDOA_MEM, t->set.ic_chan);
++ CHECK_RETCODE(ret < 0, "ipu_link_ch vdoa_ic",
++ STATE_LINK_CHAN_FAIL, done, ret);
++ }
++
++done:
++ return ret;
++}
++
++static void uninit_ic(struct ipu_soc *ipu, struct ipu_task_entry *t)
++{
++ int ret;
++
++ if ((t->set.mode & VDOA_BAND_MODE) && (t->set.task & VDI_VF)) {
++ ret = ipu_unlink_channels(ipu, MEM_VDOA_MEM, t->set.ic_chan);
++ CHECK_RETCODE_CONT(ret < 0, "ipu_unlink_ch vdoa_ic",
++ STATE_UNLINK_CHAN_FAIL, ret);
++ }
++ ipu_uninit_channel(ipu, t->set.ic_chan);
++ if (deinterlace_3_field(t)) {
++ ipu_uninit_channel(ipu, t->set.vdi_ic_p_chan);
++ ipu_uninit_channel(ipu, t->set.vdi_ic_n_chan);
++ }
++}
++
++static int init_rot(struct ipu_soc *ipu, struct ipu_task_entry *t)
++{
++ int ret = 0;
++ dma_addr_t inbuf = 0, outbuf = 0;
++ int in_uoff = 0, in_voff = 0;
++ int in_fmt, in_width, in_height, in_stride;
++
++ /* init channel */
++ ret = ipu_init_channel(ipu, t->set.rot_chan, NULL);
++ if (ret < 0) {
++ t->state = STATE_INIT_CHAN_FAIL;
++ goto done;
++ }
++
++ /* init channel buf */
++ /* is it need link to a ic channel */
++ if (ic_and_rot(t->set.mode)) {
++ in_fmt = t->set.r_fmt;
++ in_width = t->set.r_width;
++ in_height = t->set.r_height;
++ in_stride = t->set.r_stride;
++ inbuf = t->set.r_paddr;
++ in_uoff = 0;
++ in_voff = 0;
++ } else {
++ in_fmt = t->input.format;
++ in_width = t->input.crop.w;
++ in_height = t->input.crop.h;
++ in_stride = t->set.istride;
++ inbuf = t->input.paddr + t->set.i_off;
++ in_uoff = t->set.i_uoff;
++ in_voff = t->set.i_voff;
++ }
++ outbuf = t->output.paddr + t->set.o_off;
++
++ ret = ipu_init_channel_buffer(ipu,
++ t->set.rot_chan,
++ IPU_INPUT_BUFFER,
++ in_fmt,
++ in_width,
++ in_height,
++ in_stride,
++ t->output.rotate,
++ inbuf,
++ 0,
++ 0,
++ in_uoff,
++ in_voff);
++ if (ret < 0) {
++ t->state = STATE_INIT_CHAN_BUF_FAIL;
++ goto done;
++ }
++
++ ret = ipu_init_channel_buffer(ipu,
++ t->set.rot_chan,
++ IPU_OUTPUT_BUFFER,
++ t->output.format,
++ t->output.crop.w,
++ t->output.crop.h,
++ t->set.ostride,
++ IPU_ROTATE_NONE,
++ outbuf,
++ 0,
++ 0,
++ t->set.o_uoff,
++ t->set.o_voff);
++ if (ret < 0) {
++ t->state = STATE_INIT_CHAN_BUF_FAIL;
++ goto done;
++ }
++
++done:
++ return ret;
++}
++
++static void uninit_rot(struct ipu_soc *ipu, struct ipu_task_entry *t)
++{
++ ipu_uninit_channel(ipu, t->set.rot_chan);
++}
++
++static int get_irq(struct ipu_task_entry *t)
++{
++ int irq;
++ ipu_channel_t chan;
++
++ if (only_ic(t->set.mode))
++ chan = t->set.ic_chan;
++ else
++ chan = t->set.rot_chan;
++
++ switch (chan) {
++ case MEM_ROT_VF_MEM:
++ irq = IPU_IRQ_PRP_VF_ROT_OUT_EOF;
++ break;
++ case MEM_ROT_PP_MEM:
++ irq = IPU_IRQ_PP_ROT_OUT_EOF;
++ break;
++ case MEM_VDI_PRP_VF_MEM:
++ case MEM_PRP_VF_MEM:
++ irq = IPU_IRQ_PRP_VF_OUT_EOF;
++ break;
++ case MEM_PP_MEM:
++ irq = IPU_IRQ_PP_OUT_EOF;
++ break;
++ case MEM_VDI_MEM:
++ irq = IPU_IRQ_VDIC_OUT_EOF;
++ break;
++ default:
++ irq = -EINVAL;
++ }
++
++ return irq;
++}
++
++static irqreturn_t task_irq_handler(int irq, void *dev_id)
++{
++ struct ipu_task_entry *prev_tsk = dev_id;
++
++ CHECK_PERF(&prev_tsk->ts_inirq);
++ complete(&prev_tsk->irq_comp);
++ dev_dbg(prev_tsk->dev, "[0x%p] no-0x%x in-irq!",
++ prev_tsk, prev_tsk->task_no);
++
++ return IRQ_HANDLED;
++}
++
++/* Fix deinterlace up&down split mode medium line */
++static void vdi_split_process(struct ipu_soc *ipu, struct ipu_task_entry *t)
++{
++ u32 vdi_size;
++ u32 vdi_save_lines;
++ u32 stripe_mode;
++ u32 task_no;
++ u32 i, offset_addr;
++ u32 line_size;
++ unsigned char *base_off;
++ struct ipu_task_entry *parent = t->parent;
++ struct mutex *lock = &parent->vdic_lock;
++
++ if (!parent) {
++ dev_err(t->dev, "ERR[0x%x]invalid parent\n", t->task_no);
++ return;
++ }
++ mutex_lock(lock);
++ stripe_mode = t->task_no & 0xf;
++ task_no = t->task_no >> 4;
++
++ /* Save both luma and chroma part for interleaved YUV(e.g. YUYV).
++ * Save luma part for non-interleaved and partial-interleaved
++ * YUV format (e.g NV12 and YV12). */
++ if (t->output.format == IPU_PIX_FMT_YUYV ||
++ t->output.format == IPU_PIX_FMT_UYVY)
++ line_size = t->output.crop.w * fmt_to_bpp(t->output.format)/8;
++ else
++ line_size = t->output.crop.w;
++
++ vdi_save_lines = (t->output.crop.h - t->set.sp_setting.ud_split_line)/2;
++ vdi_size = vdi_save_lines * line_size;
++ if (vdi_save_lines <= 0) {
++ dev_err(t->dev, "[0x%p] vdi_save_line error\n", (void *)t);
++ mutex_unlock(lock);
++ return;
++ }
++
++ /*check vditmpbuf buffer have alloced or buffer size is changed */
++ if ((vdi_save_lines != parent->old_save_lines) ||
++ (vdi_size != parent->old_size)) {
++ if (parent->vditmpbuf[0] != NULL)
++ kfree(parent->vditmpbuf[0]);
++ if (parent->vditmpbuf[1] != NULL)
++ kfree(parent->vditmpbuf[1]);
++
++ parent->vditmpbuf[0] = kmalloc(vdi_size, GFP_KERNEL);
++ if (parent->vditmpbuf[0] == NULL) {
++ dev_err(t->dev,
++ "[0x%p]Falied Alloc vditmpbuf[0]\n", (void *)t);
++ mutex_unlock(lock);
++ return;
++ }
++ memset(parent->vditmpbuf[0], 0, vdi_size);
++
++ parent->vditmpbuf[1] = kmalloc(vdi_size, GFP_KERNEL);
++ if (parent->vditmpbuf[1] == NULL) {
++ dev_err(t->dev,
++ "[0x%p]Falied Alloc vditmpbuf[1]\n", (void *)t);
++ mutex_unlock(lock);
++ return;
++ }
++ memset(parent->vditmpbuf[1], 0, vdi_size);
++
++ parent->old_save_lines = vdi_save_lines;
++ parent->old_size = vdi_size;
++ }
++
++ if (pfn_valid(t->output.paddr >> PAGE_SHIFT)) {
++ base_off = page_address(pfn_to_page(t->output.paddr >> PAGE_SHIFT));
++ base_off += t->output.paddr & ((1 << PAGE_SHIFT) - 1);
++ } else {
++ base_off = (char *)ioremap_nocache(t->output.paddr,
++ t->output.width * t->output.height *
++ fmt_to_bpp(t->output.format)/8);
++ }
++ if (base_off == NULL) {
++ dev_err(t->dev, "ERR[0x%p]Failed get virtual address\n", t);
++ mutex_unlock(lock);
++ return;
++ }
++
++ /* UP stripe or UP&LEFT stripe */
++ if ((stripe_mode == UP_STRIPE) ||
++ (stripe_mode == (UP_STRIPE | LEFT_STRIPE))) {
++ if (!parent->buf0filled) {
++ offset_addr = t->set.o_off +
++ t->set.sp_setting.ud_split_line*t->set.ostride;
++ dmac_flush_range(base_off + offset_addr,
++ base_off + offset_addr + vdi_size);
++ outer_flush_range(t->output.paddr + offset_addr,
++ t->output.paddr + offset_addr + vdi_size);
++
++ for (i = 0; i < vdi_save_lines; i++)
++ memcpy(parent->vditmpbuf[0] + i*line_size,
++ base_off + offset_addr +
++ i*t->set.ostride, line_size);
++ parent->buf0filled = true;
++ } else {
++ offset_addr = t->set.o_off + (t->output.crop.h -
++ vdi_save_lines) * t->set.ostride;
++ for (i = 0; i < vdi_save_lines; i++)
++ memcpy(base_off + offset_addr + i*t->set.ostride,
++ parent->vditmpbuf[0] + i*line_size, line_size);
++
++ dmac_flush_range(base_off + offset_addr,
++ base_off + offset_addr + i*t->set.ostride);
++ outer_flush_range(t->output.paddr + offset_addr,
++ t->output.paddr + offset_addr + i*t->set.ostride);
++ parent->buf0filled = false;
++ }
++ }
++ /*Down stripe or Down&Left stripe*/
++ else if ((stripe_mode == DOWN_STRIPE) ||
++ (stripe_mode == (DOWN_STRIPE | LEFT_STRIPE))) {
++ if (!parent->buf0filled) {
++ offset_addr = t->set.o_off + vdi_save_lines*t->set.ostride;
++ dmac_flush_range(base_off + offset_addr,
++ base_off + offset_addr + vdi_size);
++ outer_flush_range(t->output.paddr + offset_addr,
++ t->output.paddr + offset_addr + vdi_size);
++
++ for (i = 0; i < vdi_save_lines; i++)
++ memcpy(parent->vditmpbuf[0] + i*line_size,
++ base_off + offset_addr + i*t->set.ostride,
++ line_size);
++ parent->buf0filled = true;
++ } else {
++ offset_addr = t->set.o_off;
++ for (i = 0; i < vdi_save_lines; i++)
++ memcpy(base_off + offset_addr + i*t->set.ostride,
++ parent->vditmpbuf[0] + i*line_size,
++ line_size);
++
++ dmac_flush_range(base_off + offset_addr,
++ base_off + offset_addr + i*t->set.ostride);
++ outer_flush_range(t->output.paddr + offset_addr,
++ t->output.paddr + offset_addr + i*t->set.ostride);
++ parent->buf0filled = false;
++ }
++ }
++ /*Up&Right stripe*/
++ else if (stripe_mode == (UP_STRIPE | RIGHT_STRIPE)) {
++ if (!parent->buf1filled) {
++ offset_addr = t->set.o_off +
++ t->set.sp_setting.ud_split_line*t->set.ostride;
++ dmac_flush_range(base_off + offset_addr,
++ base_off + offset_addr + vdi_size);
++ outer_flush_range(t->output.paddr + offset_addr,
++ t->output.paddr + offset_addr + vdi_size);
++
++ for (i = 0; i < vdi_save_lines; i++)
++ memcpy(parent->vditmpbuf[1] + i*line_size,
++ base_off + offset_addr + i*t->set.ostride,
++ line_size);
++ parent->buf1filled = true;
++ } else {
++ offset_addr = t->set.o_off +
++ (t->output.crop.h - vdi_save_lines)*t->set.ostride;
++ for (i = 0; i < vdi_save_lines; i++)
++ memcpy(base_off + offset_addr + i*t->set.ostride,
++ parent->vditmpbuf[1] + i*line_size,
++ line_size);
++
++ dmac_flush_range(base_off + offset_addr,
++ base_off + offset_addr + i*t->set.ostride);
++ outer_flush_range(t->output.paddr + offset_addr,
++ t->output.paddr + offset_addr + i*t->set.ostride);
++ parent->buf1filled = false;
++ }
++ }
++ /*Down stripe or Down&Right stript*/
++ else if (stripe_mode == (DOWN_STRIPE | RIGHT_STRIPE)) {
++ if (!parent->buf1filled) {
++ offset_addr = t->set.o_off + vdi_save_lines*t->set.ostride;
++ dmac_flush_range(base_off + offset_addr,
++ base_off + offset_addr + vdi_save_lines*t->set.ostride);
++ outer_flush_range(t->output.paddr + offset_addr,
++ t->output.paddr + offset_addr + vdi_save_lines*t->set.ostride);
++
++ for (i = 0; i < vdi_save_lines; i++)
++ memcpy(parent->vditmpbuf[1] + i*line_size,
++ base_off + offset_addr + i*t->set.ostride,
++ line_size);
++ parent->buf1filled = true;
++ } else {
++ offset_addr = t->set.o_off;
++ for (i = 0; i < vdi_save_lines; i++)
++ memcpy(base_off + offset_addr + i*t->set.ostride,
++ parent->vditmpbuf[1] + i*line_size,
++ line_size);
++
++ dmac_flush_range(base_off + offset_addr,
++ base_off + offset_addr + vdi_save_lines*t->set.ostride);
++ outer_flush_range(t->output.paddr + offset_addr,
++ t->output.paddr + offset_addr + vdi_save_lines*t->set.ostride);
++ parent->buf1filled = false;
++ }
++ }
++ if (!pfn_valid(t->output.paddr >> PAGE_SHIFT))
++ iounmap(base_off);
++ mutex_unlock(lock);
++}
++
++static void do_task_release(struct ipu_task_entry *t, int fail)
++{
++ int ret;
++ struct ipu_soc *ipu = t->ipu;
++
++ if (t->input.deinterlace.enable && !fail &&
++ (t->task_no & (UP_STRIPE | DOWN_STRIPE)))
++ vdi_split_process(ipu, t);
++
++ ipu_free_irq(ipu, t->irq, t);
++
++ if (t->vdoa_dma.vaddr)
++ dma_free_coherent(t->dev,
++ t->vdoa_dma.size,
++ t->vdoa_dma.vaddr,
++ t->vdoa_dma.paddr);
++
++ if (only_ic(t->set.mode)) {
++ ret = ipu_disable_channel(ipu, t->set.ic_chan, true);
++ CHECK_RETCODE_CONT(ret < 0, "ipu_disable_ch only_ic",
++ STATE_DISABLE_CHAN_FAIL, ret);
++ if (deinterlace_3_field(t)) {
++ ret = ipu_disable_channel(ipu, t->set.vdi_ic_p_chan,
++ true);
++ CHECK_RETCODE_CONT(ret < 0, "ipu_disable_ch only_ic_p",
++ STATE_DISABLE_CHAN_FAIL, ret);
++ ret = ipu_disable_channel(ipu, t->set.vdi_ic_n_chan,
++ true);
++ CHECK_RETCODE_CONT(ret < 0, "ipu_disable_ch only_ic_n",
++ STATE_DISABLE_CHAN_FAIL, ret);
++ }
++ } else if (only_rot(t->set.mode)) {
++ ret = ipu_disable_channel(ipu, t->set.rot_chan, true);
++ CHECK_RETCODE_CONT(ret < 0, "ipu_disable_ch only_rot",
++ STATE_DISABLE_CHAN_FAIL, ret);
++ } else if (ic_and_rot(t->set.mode)) {
++ ret = ipu_unlink_channels(ipu, t->set.ic_chan, t->set.rot_chan);
++ CHECK_RETCODE_CONT(ret < 0, "ipu_unlink_ch",
++ STATE_UNLINK_CHAN_FAIL, ret);
++ ret = ipu_disable_channel(ipu, t->set.rot_chan, true);
++ CHECK_RETCODE_CONT(ret < 0, "ipu_disable_ch ic_and_rot-rot",
++ STATE_DISABLE_CHAN_FAIL, ret);
++ ret = ipu_disable_channel(ipu, t->set.ic_chan, true);
++ CHECK_RETCODE_CONT(ret < 0, "ipu_disable_ch ic_and_rot-ic",
++ STATE_DISABLE_CHAN_FAIL, ret);
++ if (deinterlace_3_field(t)) {
++ ret = ipu_disable_channel(ipu, t->set.vdi_ic_p_chan,
++ true);
++ CHECK_RETCODE_CONT(ret < 0, "ipu_disable_ch icrot-ic-p",
++ STATE_DISABLE_CHAN_FAIL, ret);
++ ret = ipu_disable_channel(ipu, t->set.vdi_ic_n_chan,
++ true);
++ CHECK_RETCODE_CONT(ret < 0, "ipu_disable_ch icrot-ic-n",
++ STATE_DISABLE_CHAN_FAIL, ret);
++ }
++ }
++
++ if (only_ic(t->set.mode))
++ uninit_ic(ipu, t);
++ else if (only_rot(t->set.mode))
++ uninit_rot(ipu, t);
++ else if (ic_and_rot(t->set.mode)) {
++ uninit_ic(ipu, t);
++ uninit_rot(ipu, t);
++ }
++
++ t->state = STATE_OK;
++ CHECK_PERF(&t->ts_rel);
++ return;
++}
++
++static void do_task_vdoa_only(struct ipu_task_entry *t)
++{
++ int ret;
++
++ ret = init_tiled_ch_bufs(NULL, t);
++ CHECK_RETCODE(ret < 0, "do_vdoa_only", STATE_ERR, out, ret);
++ ret = vdoa_start(t->vdoa_handle, VDOA_DEF_TIMEOUT_MS);
++ vdoa_stop(t->vdoa_handle);
++ CHECK_RETCODE(ret < 0, "vdoa_wait4complete, do_vdoa_only",
++ STATE_VDOA_IRQ_TIMEOUT, out, ret);
++
++ t->state = STATE_OK;
++out:
++ return;
++}
++
++static void do_task(struct ipu_task_entry *t)
++{
++ int r_size;
++ int irq;
++ int ret;
++ uint32_t busy;
++ struct ipu_soc *ipu = t->ipu;
++
++ CHECK_PERF(&t->ts_dotask);
++
++ if (!ipu) {
++ t->state = STATE_NO_IPU;
++ return;
++ }
++
++ init_completion(&t->irq_comp);
++ dev_dbg(ipu->dev, "[0x%p]Do task no:0x%x: id %d\n", (void *)t,
++ t->task_no, t->task_id);
++ dump_task_info(t);
++
++ if (t->set.task & IC_PP) {
++ t->set.ic_chan = MEM_PP_MEM;
++ dev_dbg(ipu->dev, "[0x%p]ic channel MEM_PP_MEM\n", (void *)t);
++ } else if (t->set.task & IC_VF) {
++ t->set.ic_chan = MEM_PRP_VF_MEM;
++ dev_dbg(ipu->dev, "[0x%p]ic channel MEM_PRP_VF_MEM\n", (void *)t);
++ } else if (t->set.task & VDI_VF) {
++ if (t->set.mode & VDOA_BAND_MODE) {
++ t->set.ic_chan = MEM_VDI_MEM;
++ if (deinterlace_3_field(t)) {
++ t->set.vdi_ic_p_chan = MEM_VDI_MEM_P;
++ t->set.vdi_ic_n_chan = MEM_VDI_MEM_N;
++ }
++ dev_dbg(ipu->dev, "[0x%p]ic ch MEM_VDI_MEM\n",
++ (void *)t);
++ } else {
++ t->set.ic_chan = MEM_VDI_PRP_VF_MEM;
++ if (deinterlace_3_field(t)) {
++ t->set.vdi_ic_p_chan = MEM_VDI_PRP_VF_MEM_P;
++ t->set.vdi_ic_n_chan = MEM_VDI_PRP_VF_MEM_N;
++ }
++ dev_dbg(ipu->dev,
++ "[0x%p]ic ch MEM_VDI_PRP_VF_MEM\n", t);
++ }
++ }
++
++ if (t->set.task & ROT_PP) {
++ t->set.rot_chan = MEM_ROT_PP_MEM;
++ dev_dbg(ipu->dev, "[0x%p]rot channel MEM_ROT_PP_MEM\n", (void *)t);
++ } else if (t->set.task & ROT_VF) {
++ t->set.rot_chan = MEM_ROT_VF_MEM;
++ dev_dbg(ipu->dev, "[0x%p]rot channel MEM_ROT_VF_MEM\n", (void *)t);
++ }
++
++ if (t->task_id == IPU_TASK_ID_VF)
++ busy = ic_vf_pp_is_busy(ipu, true);
++ else if (t->task_id == IPU_TASK_ID_PP)
++ busy = ic_vf_pp_is_busy(ipu, false);
++ else {
++ dev_err(ipu->dev, "ERR[no:0x%x]ipu task_id:%d invalid!\n",
++ t->task_no, t->task_id);
++ return;
++ }
++ if (busy) {
++ dev_err(ipu->dev, "ERR[0x%p-no:0x%x]ipu task_id:%d busy!\n",
++ (void *)t, t->task_no, t->task_id);
++ t->state = STATE_IPU_BUSY;
++ return;
++ }
++
++ irq = get_irq(t);
++ if (irq < 0) {
++ t->state = STATE_NO_IRQ;
++ return;
++ }
++ t->irq = irq;
++
++ /* channel setup */
++ if (only_ic(t->set.mode)) {
++ dev_dbg(t->dev, "[0x%p]only ic mode\n", (void *)t);
++ ret = init_ic(ipu, t);
++ CHECK_RETCODE(ret < 0, "init_ic only_ic",
++ t->state, chan_setup, ret);
++ } else if (only_rot(t->set.mode)) {
++ dev_dbg(t->dev, "[0x%p]only rot mode\n", (void *)t);
++ ret = init_rot(ipu, t);
++ CHECK_RETCODE(ret < 0, "init_rot only_rot",
++ t->state, chan_setup, ret);
++ } else if (ic_and_rot(t->set.mode)) {
++ int rot_idx = (t->task_id == IPU_TASK_ID_VF) ? 0 : 1;
++
++ dev_dbg(t->dev, "[0x%p]ic + rot mode\n", (void *)t);
++ t->set.r_fmt = t->output.format;
++ if (t->output.rotate >= IPU_ROTATE_90_RIGHT) {
++ t->set.r_width = t->output.crop.h;
++ t->set.r_height = t->output.crop.w;
++ } else {
++ t->set.r_width = t->output.crop.w;
++ t->set.r_height = t->output.crop.h;
++ }
++ t->set.r_stride = t->set.r_width *
++ bytes_per_pixel(t->set.r_fmt);
++ r_size = PAGE_ALIGN(t->set.r_width * t->set.r_height
++ * fmt_to_bpp(t->set.r_fmt)/8);
++
++ if (r_size > ipu->rot_dma[rot_idx].size) {
++ dev_dbg(t->dev, "[0x%p]realloc rot buffer\n", (void *)t);
++
++ if (ipu->rot_dma[rot_idx].vaddr)
++ dma_free_coherent(t->dev,
++ ipu->rot_dma[rot_idx].size,
++ ipu->rot_dma[rot_idx].vaddr,
++ ipu->rot_dma[rot_idx].paddr);
++
++ ipu->rot_dma[rot_idx].size = r_size;
++ ipu->rot_dma[rot_idx].vaddr = dma_alloc_coherent(t->dev,
++ r_size,
++ &ipu->rot_dma[rot_idx].paddr,
++ GFP_DMA | GFP_KERNEL);
++ CHECK_RETCODE(ipu->rot_dma[rot_idx].vaddr == NULL,
++ "ic_and_rot", STATE_SYS_NO_MEM,
++ chan_setup, -ENOMEM);
++ }
++ t->set.r_paddr = ipu->rot_dma[rot_idx].paddr;
++
++ dev_dbg(t->dev, "[0x%p]rotation:\n", (void *)t);
++ dev_dbg(t->dev, "[0x%p]\tformat = 0x%x\n", (void *)t, t->set.r_fmt);
++ dev_dbg(t->dev, "[0x%p]\twidth = %d\n", (void *)t, t->set.r_width);
++ dev_dbg(t->dev, "[0x%p]\theight = %d\n", (void *)t, t->set.r_height);
++ dev_dbg(t->dev, "[0x%p]\tpaddr = 0x%x\n", (void *)t, t->set.r_paddr);
++ dev_dbg(t->dev, "[0x%p]\trstride = %d\n", (void *)t, t->set.r_stride);
++
++ ret = init_ic(ipu, t);
++ CHECK_RETCODE(ret < 0, "init_ic ic_and_rot",
++ t->state, chan_setup, ret);
++ ret = init_rot(ipu, t);
++ CHECK_RETCODE(ret < 0, "init_rot ic_and_rot",
++ t->state, chan_setup, ret);
++ ret = ipu_link_channels(ipu, t->set.ic_chan,
++ t->set.rot_chan);
++ CHECK_RETCODE(ret < 0, "ipu_link_ch ic_and_rot",
++ STATE_LINK_CHAN_FAIL, chan_setup, ret);
++ } else {
++ dev_err(t->dev, "ERR [0x%p]do task: should not be here\n", t);
++ t->state = STATE_ERR;
++ return;
++ }
++
++ ret = ipu_request_irq(ipu, irq, task_irq_handler, 0, NULL, t);
++ CHECK_RETCODE(ret < 0, "ipu_req_irq",
++ STATE_IRQ_FAIL, chan_setup, ret);
++
++ /* enable/start channel */
++ if (only_ic(t->set.mode)) {
++ ret = ipu_enable_channel(ipu, t->set.ic_chan);
++ CHECK_RETCODE(ret < 0, "ipu_enable_ch only_ic",
++ STATE_ENABLE_CHAN_FAIL, chan_en, ret);
++ if (deinterlace_3_field(t)) {
++ ret = ipu_enable_channel(ipu, t->set.vdi_ic_p_chan);
++ CHECK_RETCODE(ret < 0, "ipu_enable_ch only_ic_p",
++ STATE_ENABLE_CHAN_FAIL, chan_en, ret);
++ ret = ipu_enable_channel(ipu, t->set.vdi_ic_n_chan);
++ CHECK_RETCODE(ret < 0, "ipu_enable_ch only_ic_n",
++ STATE_ENABLE_CHAN_FAIL, chan_en, ret);
++ }
++
++ ret = ipu_select_buffer(ipu, t->set.ic_chan, IPU_OUTPUT_BUFFER,
++ 0);
++ CHECK_RETCODE(ret < 0, "ipu_sel_buf only_ic",
++ STATE_SEL_BUF_FAIL, chan_buf, ret);
++ if (t->overlay_en) {
++ ret = ipu_select_buffer(ipu, t->set.ic_chan,
++ IPU_GRAPH_IN_BUFFER, 0);
++ CHECK_RETCODE(ret < 0, "ipu_sel_buf only_ic_g",
++ STATE_SEL_BUF_FAIL, chan_buf, ret);
++ if (t->overlay.alpha.mode == IPU_ALPHA_MODE_LOCAL) {
++ ret = ipu_select_buffer(ipu, t->set.ic_chan,
++ IPU_ALPHA_IN_BUFFER, 0);
++ CHECK_RETCODE(ret < 0, "ipu_sel_buf only_ic_a",
++ STATE_SEL_BUF_FAIL, chan_buf,
++ ret);
++ }
++ }
++ if (!(t->set.mode & VDOA_BAND_MODE)) {
++ if (deinterlace_3_field(t))
++ ipu_select_multi_vdi_buffer(ipu, 0);
++ else {
++ ret = ipu_select_buffer(ipu, t->set.ic_chan,
++ IPU_INPUT_BUFFER, 0);
++ CHECK_RETCODE(ret < 0, "ipu_sel_buf only_ic_i",
++ STATE_SEL_BUF_FAIL, chan_buf, ret);
++ }
++ }
++ } else if (only_rot(t->set.mode)) {
++ ret = ipu_enable_channel(ipu, t->set.rot_chan);
++ CHECK_RETCODE(ret < 0, "ipu_enable_ch only_rot",
++ STATE_ENABLE_CHAN_FAIL, chan_en, ret);
++ ret = ipu_select_buffer(ipu, t->set.rot_chan,
++ IPU_OUTPUT_BUFFER, 0);
++ CHECK_RETCODE(ret < 0, "ipu_sel_buf only_rot_o",
++ STATE_SEL_BUF_FAIL, chan_buf, ret);
++ ret = ipu_select_buffer(ipu, t->set.rot_chan,
++ IPU_INPUT_BUFFER, 0);
++ CHECK_RETCODE(ret < 0, "ipu_sel_buf only_rot_i",
++ STATE_SEL_BUF_FAIL, chan_buf, ret);
++ } else if (ic_and_rot(t->set.mode)) {
++ ret = ipu_enable_channel(ipu, t->set.rot_chan);
++ CHECK_RETCODE(ret < 0, "ipu_enable_ch ic_and_rot-rot",
++ STATE_ENABLE_CHAN_FAIL, chan_en, ret);
++ ret = ipu_enable_channel(ipu, t->set.ic_chan);
++ CHECK_RETCODE(ret < 0, "ipu_enable_ch ic_and_rot-ic",
++ STATE_ENABLE_CHAN_FAIL, chan_en, ret);
++ if (deinterlace_3_field(t)) {
++ ret = ipu_enable_channel(ipu, t->set.vdi_ic_p_chan);
++ CHECK_RETCODE(ret < 0, "ipu_enable_ch ic_and_rot-p",
++ STATE_ENABLE_CHAN_FAIL, chan_en, ret);
++ ret = ipu_enable_channel(ipu, t->set.vdi_ic_n_chan);
++ CHECK_RETCODE(ret < 0, "ipu_enable_ch ic_and_rot-n",
++ STATE_ENABLE_CHAN_FAIL, chan_en, ret);
++ }
++
++ ret = ipu_select_buffer(ipu, t->set.rot_chan,
++ IPU_OUTPUT_BUFFER, 0);
++ CHECK_RETCODE(ret < 0, "ipu_sel_buf ic_and_rot-rot-o",
++ STATE_SEL_BUF_FAIL, chan_buf, ret);
++ if (t->overlay_en) {
++ ret = ipu_select_buffer(ipu, t->set.ic_chan,
++ IPU_GRAPH_IN_BUFFER, 0);
++ CHECK_RETCODE(ret < 0, "ipu_sel_buf ic_and_rot-ic-g",
++ STATE_SEL_BUF_FAIL, chan_buf, ret);
++ if (t->overlay.alpha.mode == IPU_ALPHA_MODE_LOCAL) {
++ ret = ipu_select_buffer(ipu, t->set.ic_chan,
++ IPU_ALPHA_IN_BUFFER, 0);
++ CHECK_RETCODE(ret < 0, "ipu_sel_buf icrot-ic-a",
++ STATE_SEL_BUF_FAIL,
++ chan_buf, ret);
++ }
++ }
++ ret = ipu_select_buffer(ipu, t->set.ic_chan,
++ IPU_OUTPUT_BUFFER, 0);
++ CHECK_RETCODE(ret < 0, "ipu_sel_buf ic_and_rot-ic-o",
++ STATE_SEL_BUF_FAIL, chan_buf, ret);
++ if (deinterlace_3_field(t))
++ ipu_select_multi_vdi_buffer(ipu, 0);
++ else {
++ ret = ipu_select_buffer(ipu, t->set.ic_chan,
++ IPU_INPUT_BUFFER, 0);
++ CHECK_RETCODE(ret < 0, "ipu_sel_buf ic_and_rot-ic-i",
++ STATE_SEL_BUF_FAIL, chan_buf, ret);
++ }
++ }
++
++ if (need_split(t))
++ t->state = STATE_IN_PROGRESS;
++
++ if (t->set.mode & VDOA_BAND_MODE) {
++ ret = vdoa_start(t->vdoa_handle, VDOA_DEF_TIMEOUT_MS);
++ CHECK_RETCODE(ret < 0, "vdoa_wait4complete, do_vdoa_band",
++ STATE_VDOA_IRQ_TIMEOUT, chan_rel, ret);
++ }
++
++ CHECK_PERF(&t->ts_waitirq);
++ ret = wait_for_completion_timeout(&t->irq_comp,
++ msecs_to_jiffies(t->timeout - DEF_DELAY_MS));
++ CHECK_PERF(&t->ts_wakeup);
++ CHECK_RETCODE(ret == 0, "wait_for_comp_timeout",
++ STATE_IRQ_TIMEOUT, chan_rel, ret);
++ dev_dbg(t->dev, "[0x%p] no-0x%x ipu irq done!", t, t->task_no);
++
++chan_rel:
++chan_buf:
++chan_en:
++chan_setup:
++ if (t->set.mode & VDOA_BAND_MODE)
++ vdoa_stop(t->vdoa_handle);
++ do_task_release(t, t->state >= STATE_ERR);
++ return;
++}
++
++static void do_task_vdoa_vdi(struct ipu_task_entry *t)
++{
++ int i;
++ int ret;
++ u32 stripe_width;
++
++ /* FIXME: crop mode not support now */
++ stripe_width = t->input.width >> 1;
++ t->input.crop.pos.x = 0;
++ t->input.crop.pos.y = 0;
++ t->input.crop.w = stripe_width;
++ t->input.crop.h = t->input.height;
++ t->output.crop.w = stripe_width;
++ t->output.crop.h = t->input.height;
++
++ for (i = 0; i < 2; i++) {
++ t->input.crop.pos.x = t->input.crop.pos.x + i * stripe_width;
++ t->output.crop.pos.x = t->output.crop.pos.x + i * stripe_width;
++ /* check input */
++ ret = set_crop(&t->input.crop, t->input.width, t->input.height,
++ t->input.format);
++ if (ret < 0) {
++ ret = STATE_ERR;
++ goto done;
++ } else
++ update_offset(t->input.format,
++ t->input.width, t->input.height,
++ t->input.crop.pos.x,
++ t->input.crop.pos.y,
++ &t->set.i_off, &t->set.i_uoff,
++ &t->set.i_voff, &t->set.istride);
++ dev_dbg(t->dev, "i_off:0x%x, i_uoff:0x%x, istride:%d.\n",
++ t->set.i_off, t->set.i_uoff, t->set.istride);
++ /* check output */
++ ret = set_crop(&t->output.crop, t->input.width,
++ t->output.height, t->output.format);
++ if (ret < 0) {
++ ret = STATE_ERR;
++ goto done;
++ } else
++ update_offset(t->output.format,
++ t->output.width, t->output.height,
++ t->output.crop.pos.x,
++ t->output.crop.pos.y,
++ &t->set.o_off, &t->set.o_uoff,
++ &t->set.o_voff, &t->set.ostride);
++
++ dev_dbg(t->dev, "o_off:0x%x, o_uoff:0x%x, ostride:%d.\n",
++ t->set.o_off, t->set.o_uoff, t->set.ostride);
++
++ do_task(t);
++ }
++
++ return;
++done:
++ dev_err(t->dev, "ERR %s set_crop.\n", __func__);
++ t->state = ret;
++ return;
++}
++
++static void get_res_do_task(struct ipu_task_entry *t)
++{
++ uint32_t found;
++ uint32_t split_child;
++ struct mutex *lock;
++
++ found = get_vdoa_ipu_res(t);
++ if (!found) {
++ dev_err(t->dev, "ERR:[0x%p] no-0x%x can not get res\n",
++ t, t->task_no);
++ return;
++ } else {
++ if (t->set.task & VDOA_ONLY)
++ do_task_vdoa_only(t);
++ else if ((IPU_PIX_FMT_TILED_NV12F == t->input.format) &&
++ (t->set.mode & VDOA_BAND_MODE) &&
++ (t->input.crop.w > soc_max_vdi_in_width()))
++ do_task_vdoa_vdi(t);
++ else
++ do_task(t);
++ put_vdoa_ipu_res(t, 0);
++ }
++ if (t->state != STATE_OK) {
++ dev_err(t->dev, "ERR:[0x%p] no-0x%x state: %s\n",
++ t, t->task_no, state_msg[t->state].msg);
++ }
++
++ split_child = need_split(t) && t->parent;
++ if (split_child) {
++ lock = &t->parent->split_lock;
++ mutex_lock(lock);
++ t->split_done = 1;
++ mutex_unlock(lock);
++ wake_up(&t->parent->split_waitq);
++ }
++
++ return;
++}
++
++static void wait_split_task_complete(struct ipu_task_entry *parent,
++ struct ipu_split_task *sp_task, uint32_t size)
++{
++ struct ipu_task_entry *tsk = NULL;
++ int ret = 0, rc;
++ int j, idx = -1;
++ unsigned long flags;
++ struct mutex *lock = &parent->split_lock;
++ int k, busy_vf, busy_pp;
++ struct ipu_soc *ipu;
++ DECLARE_PERF_VAR;
++
++ for (j = 0; j < size; j++) {
++ rc = wait_event_timeout(
++ parent->split_waitq,
++ sp_task_check_done(sp_task, parent, size, &idx),
++ msecs_to_jiffies(parent->timeout - DEF_DELAY_MS));
++ if (!rc) {
++ dev_err(parent->dev,
++ "ERR:[0x%p] no-0x%x, split_task timeout,j:%d,"
++ "size:%d.\n",
++ parent, parent->task_no, j, size);
++ ret = -ETIMEDOUT;
++ goto out;
++ } else {
++ if (idx < 0) {
++ dev_err(parent->dev,
++ "ERR:[0x%p] no-0x%x, invalid task idx:%d\n",
++ parent, parent->task_no, idx);
++ continue;
++ }
++ tsk = sp_task[idx].child_task;
++ mutex_lock(lock);
++ if (!tsk->split_done || !tsk->ipu)
++ dev_err(tsk->dev,
++ "ERR:no-0x%x,split not done:%d/null ipu:0x%p\n",
++ tsk->task_no, tsk->split_done, tsk->ipu);
++ tsk->split_done = 0;
++ mutex_unlock(lock);
++
++ dev_dbg(tsk->dev,
++ "[0x%p] no-0x%x sp_tsk[%d] done,state:%d.\n",
++ tsk, tsk->task_no, idx, tsk->state);
++ #ifdef DBG_IPU_PERF
++ CHECK_PERF(&tsk->ts_rel);
++ PRINT_TASK_STATISTICS;
++ #endif
++ }
++ }
++
++out:
++ if (ret == -ETIMEDOUT) {
++ /* debug */
++ for (k = 0; k < max_ipu_no; k++) {
++ ipu = ipu_get_soc(k);
++ if (IS_ERR(ipu)) {
++ dev_err(parent->dev, "no:0x%x, null ipu:%d\n",
++ parent->task_no, k);
++ } else {
++ busy_vf = ic_vf_pp_is_busy(ipu, true);
++ busy_pp = ic_vf_pp_is_busy(ipu, false);
++ dev_err(parent->dev,
++ "ERR:ipu[%d] busy_vf:%d, busy_pp:%d.\n",
++ k, busy_vf, busy_pp);
++ }
++ }
++ for (k = 0; k < size; k++) {
++ tsk = sp_task[k].child_task;
++ if (!tsk)
++ continue;
++ dev_err(parent->dev,
++ "ERR: sp_task[%d][0x%p] no-0x%x done:%d,"
++ "state:%s,on_list:%d, ipu:0x%p,timeout!\n",
++ k, tsk, tsk->task_no, tsk->split_done,
++ state_msg[tsk->state].msg, tsk->task_in_list,
++ tsk->ipu);
++ }
++ }
++
++ for (j = 0; j < size; j++) {
++ tsk = sp_task[j].child_task;
++ if (!tsk)
++ continue;
++ spin_lock_irqsave(&ipu_task_list_lock, flags);
++ if (tsk->task_in_list) {
++ list_del(&tsk->node);
++ tsk->task_in_list = 0;
++ dev_dbg(tsk->dev,
++ "[0x%p] no-0x%x,id:%d sp_tsk timeout list_del.\n",
++ tsk, tsk->task_no, tsk->task_id);
++ }
++ spin_unlock_irqrestore(&ipu_task_list_lock, flags);
++ if (!tsk->ipu)
++ continue;
++ if (tsk->state != STATE_OK) {
++ dev_err(tsk->dev,
++ "ERR:[0x%p] no-0x%x,id:%d, sp_tsk state: %s\n",
++ tsk, tsk->task_no, tsk->task_id,
++ state_msg[tsk->state].msg);
++ }
++ kref_put(&tsk->refcount, task_mem_free);
++ }
++
++ kfree(parent->vditmpbuf[0]);
++ kfree(parent->vditmpbuf[1]);
++
++ if (ret < 0)
++ parent->state = STATE_TIMEOUT;
++ else
++ parent->state = STATE_OK;
++ return;
++}
++
++static inline int find_task(struct ipu_task_entry **t, int thread_id)
++{
++ int found;
++ unsigned long flags;
++ struct ipu_task_entry *tsk;
++ struct list_head *task_list = &ipu_task_list;
++
++ *t = NULL;
++ spin_lock_irqsave(&ipu_task_list_lock, flags);
++ found = !list_empty(task_list);
++ if (found) {
++ tsk = list_first_entry(task_list, struct ipu_task_entry, node);
++ if (tsk->task_in_list) {
++ list_del(&tsk->node);
++ tsk->task_in_list = 0;
++ *t = tsk;
++ kref_get(&tsk->refcount);
++ dev_dbg(tsk->dev,
++ "thread_id:%d,[0x%p] task_no:0x%x,mode:0x%x list_del\n",
++ thread_id, tsk, tsk->task_no, tsk->set.mode);
++ } else
++ dev_err(tsk->dev,
++ "thread_id:%d,task_no:0x%x,mode:0x%x not on list_del\n",
++ thread_id, tsk->task_no, tsk->set.mode);
++ }
++ spin_unlock_irqrestore(&ipu_task_list_lock, flags);
++
++ return found;
++}
++
++static int ipu_task_thread(void *argv)
++{
++ struct ipu_task_entry *tsk;
++ struct ipu_task_entry *sp_tsk0;
++ struct ipu_split_task sp_task[4];
++ /* priority lower than irq_thread */
++ const struct sched_param param = {
++ .sched_priority = MAX_USER_RT_PRIO/2 - 1,
++ };
++ int ret;
++ int curr_thread_id;
++ uint32_t size;
++ unsigned long flags;
++ unsigned int cpu;
++ struct cpumask cpu_mask;
++ struct ipu_thread_data *data = (struct ipu_thread_data *)argv;
++
++ thread_id++;
++ curr_thread_id = thread_id;
++ sched_setscheduler(current, SCHED_FIFO, &param);
++
++ if (!data->is_vdoa) {
++ cpu = cpumask_first(cpu_online_mask);
++ cpumask_set_cpu(cpu, &cpu_mask);
++ ret = sched_setaffinity(data->ipu->thread[data->id]->pid,
++ &cpu_mask);
++ if (ret < 0) {
++ pr_err("%s: sched_setaffinity fail:%d.\n", __func__, ret);
++ }
++ pr_debug("%s: sched_setaffinity cpu:%d.\n", __func__, cpu);
++ }
++
++ while (!kthread_should_stop()) {
++ int split_fail = 0;
++ int split_parent;
++ int split_child;
++
++ wait_event_interruptible(thread_waitq, find_task(&tsk, curr_thread_id));
++
++ if (!tsk) {
++ pr_err("thread:%d can not find task.\n",
++ curr_thread_id);
++ continue;
++ }
++
++ /* note: other threads run split child task */
++ split_parent = need_split(tsk) && !tsk->parent;
++ split_child = need_split(tsk) && tsk->parent;
++ if (split_parent) {
++ if ((tsk->set.split_mode == RL_SPLIT) ||
++ (tsk->set.split_mode == UD_SPLIT))
++ size = 2;
++ else
++ size = 4;
++ ret = queue_split_task(tsk, sp_task, size);
++ if (ret < 0) {
++ split_fail = 1;
++ } else {
++ struct list_head *pos;
++
++ spin_lock_irqsave(&ipu_task_list_lock, flags);
++
++ sp_tsk0 = list_first_entry(&tsk->split_list,
++ struct ipu_task_entry, node);
++ list_del(&sp_tsk0->node);
++
++ list_for_each(pos, &tsk->split_list) {
++ struct ipu_task_entry *tmp;
++
++ tmp = list_entry(pos,
++ struct ipu_task_entry, node);
++ tmp->task_in_list = 1;
++ dev_dbg(tmp->dev,
++ "[0x%p] no-0x%x,id:%d sp_tsk "
++ "add_to_list.\n", tmp,
++ tmp->task_no, tmp->task_id);
++ }
++ /* add to global list */
++ list_splice(&tsk->split_list, &ipu_task_list);
++
++ spin_unlock_irqrestore(&ipu_task_list_lock,
++ flags);
++ /* let the parent thread do the first sp_task */
++ /* FIXME: ensure the correct sequence for split
++ 4size: 5/6->9/a*/
++ if (!sp_tsk0)
++ dev_err(tsk->dev,
++ "ERR: no-0x%x,can not get split_tsk0\n",
++ tsk->task_no);
++ wake_up_interruptible(&thread_waitq);
++ get_res_do_task(sp_tsk0);
++ dev_dbg(sp_tsk0->dev,
++ "thread:%d complete tsk no:0x%x.\n",
++ curr_thread_id, sp_tsk0->task_no);
++ ret = atomic_read(&req_cnt);
++ if (ret > 0) {
++ wake_up(&res_waitq);
++ dev_dbg(sp_tsk0->dev,
++ "sp_tsk0 sche thread:%d no:0x%x,"
++ "req_cnt:%d\n", curr_thread_id,
++ sp_tsk0->task_no, ret);
++ /* For other threads to get_res */
++ schedule();
++ }
++ }
++ } else
++ get_res_do_task(tsk);
++
++ /* wait for all 4 sp_task finished here or timeout
++ and then release all resources */
++ if (split_parent && !split_fail)
++ wait_split_task_complete(tsk, sp_task, size);
++
++ if (!split_child) {
++ atomic_inc(&tsk->done);
++ wake_up(&tsk->task_waitq);
++ }
++
++ dev_dbg(tsk->dev, "thread:%d complete tsk no:0x%x-[0x%p].\n",
++ curr_thread_id, tsk->task_no, tsk);
++ ret = atomic_read(&req_cnt);
++ if (ret > 0) {
++ wake_up(&res_waitq);
++ dev_dbg(tsk->dev, "sche thread:%d no:0x%x,req_cnt:%d\n",
++ curr_thread_id, tsk->task_no, ret);
++ /* note: give cpu to other threads to get_res */
++ schedule();
++ }
++
++ kref_put(&tsk->refcount, task_mem_free);
++ }
++
++ pr_info("ERR %s exit.\n", __func__);
++ return 0;
++}
++
++int ipu_check_task(struct ipu_task *task)
++{
++ struct ipu_task_entry *tsk;
++ int ret = 0;
++
++ tsk = create_task_entry(task);
++ if (IS_ERR(tsk))
++ return PTR_ERR(tsk);
++
++ ret = check_task(tsk);
++
++ task->input = tsk->input;
++ task->output = tsk->output;
++ task->overlay = tsk->overlay;
++ dump_task_info(tsk);
++
++ kref_put(&tsk->refcount, task_mem_free);
++ if (ret != 0)
++ pr_debug("%s ret:%d.\n", __func__, ret);
++ return ret;
++}
++EXPORT_SYMBOL_GPL(ipu_check_task);
++
++int ipu_queue_task(struct ipu_task *task)
++{
++ struct ipu_task_entry *tsk;
++ unsigned long flags;
++ int ret;
++ u32 tmp_task_no;
++ DECLARE_PERF_VAR;
++
++ tsk = create_task_entry(task);
++ if (IS_ERR(tsk))
++ return PTR_ERR(tsk);
++
++ CHECK_PERF(&tsk->ts_queue);
++ ret = prepare_task(tsk);
++ if (ret < 0)
++ goto done;
++
++ if (need_split(tsk)) {
++ CHECK_PERF(&tsk->ts_dotask);
++ CHECK_PERF(&tsk->ts_waitirq);
++ CHECK_PERF(&tsk->ts_inirq);
++ CHECK_PERF(&tsk->ts_wakeup);
++ }
++
++ /* task_no last four bits for split task type*/
++ tmp_task_no = atomic_inc_return(&frame_no);
++ tsk->task_no = tmp_task_no << 4;
++ init_waitqueue_head(&tsk->task_waitq);
++
++ spin_lock_irqsave(&ipu_task_list_lock, flags);
++ list_add_tail(&tsk->node, &ipu_task_list);
++ tsk->task_in_list = 1;
++ dev_dbg(tsk->dev, "[0x%p,no-0x%x] list_add_tail\n", tsk, tsk->task_no);
++ spin_unlock_irqrestore(&ipu_task_list_lock, flags);
++ wake_up_interruptible(&thread_waitq);
++
++ ret = wait_event_timeout(tsk->task_waitq, atomic_read(&tsk->done),
++ msecs_to_jiffies(tsk->timeout));
++ if (0 == ret) {
++ /* note: the timeout should larger than the internal timeout!*/
++ ret = -ETIMEDOUT;
++ dev_err(tsk->dev, "ERR: [0x%p] no-0x%x, timeout:%dms!\n",
++ tsk, tsk->task_no, tsk->timeout);
++ } else {
++ if (STATE_OK != tsk->state) {
++ dev_err(tsk->dev, "ERR: [0x%p] no-0x%x,state %d: %s\n",
++ tsk, tsk->task_no, tsk->state,
++ state_msg[tsk->state].msg);
++ ret = -ECANCELED;
++ } else
++ ret = 0;
++ }
++
++ spin_lock_irqsave(&ipu_task_list_lock, flags);
++ if (tsk->task_in_list) {
++ list_del(&tsk->node);
++ tsk->task_in_list = 0;
++ dev_dbg(tsk->dev, "[0x%p] no:0x%x list_del\n",
++ tsk, tsk->task_no);
++ }
++ spin_unlock_irqrestore(&ipu_task_list_lock, flags);
++
++#ifdef DBG_IPU_PERF
++ CHECK_PERF(&tsk->ts_rel);
++ PRINT_TASK_STATISTICS;
++ if (ts_frame_avg == 0)
++ ts_frame_avg = ts_frame.tv_nsec / NSEC_PER_USEC +
++ ts_frame.tv_sec * USEC_PER_SEC;
++ else
++ ts_frame_avg = (ts_frame_avg + ts_frame.tv_nsec / NSEC_PER_USEC
++ + ts_frame.tv_sec * USEC_PER_SEC)/2;
++ if (timespec_compare(&ts_frame, &ts_frame_max) > 0)
++ ts_frame_max = ts_frame;
++
++ atomic_inc(&frame_cnt);
++
++ if ((atomic_read(&frame_cnt) % 1000) == 0)
++ pr_debug("ipu_dev: max frame time:%ldus, avg frame time:%dus,"
++ "frame_cnt:%d\n", ts_frame_max.tv_nsec / NSEC_PER_USEC
++ + ts_frame_max.tv_sec * USEC_PER_SEC,
++ ts_frame_avg, atomic_read(&frame_cnt));
++#endif
++done:
++ if (ret < 0)
++ dev_err(tsk->dev, "ERR: no-0x%x,ipu_queue_task err:%d\n",
++ tsk->task_no, ret);
++
++ kref_put(&tsk->refcount, task_mem_free);
++
++ return ret;
++}
++EXPORT_SYMBOL_GPL(ipu_queue_task);
++
++static int mxc_ipu_open(struct inode *inode, struct file *file)
++{
++ file->private_data = (void *)atomic_inc_return(&file_index);
++ return 0;
++}
++
++static long mxc_ipu_ioctl(struct file *file,
++ unsigned int cmd, unsigned long arg)
++{
++ int __user *argp = (void __user *)arg;
++ int ret = 0;
++
++ switch (cmd) {
++ case IPU_CHECK_TASK:
++ {
++ struct ipu_task task;
++
++ if (copy_from_user
++ (&task, (struct ipu_task *) arg,
++ sizeof(struct ipu_task)))
++ return -EFAULT;
++ ret = ipu_check_task(&task);
++ if (copy_to_user((struct ipu_task *) arg,
++ &task, sizeof(struct ipu_task)))
++ return -EFAULT;
++ break;
++ }
++ case IPU_QUEUE_TASK:
++ {
++ struct ipu_task task;
++
++ if (copy_from_user
++ (&task, (struct ipu_task *) arg,
++ sizeof(struct ipu_task)))
++ return -EFAULT;
++ ret = ipu_queue_task(&task);
++ break;
++ }
++ case IPU_ALLOC:
++ {
++ int size;
++ struct ipu_alloc_list *mem;
++
++ mem = kzalloc(sizeof(*mem), GFP_KERNEL);
++ if (mem == NULL)
++ return -ENOMEM;
++
++ if (get_user(size, argp))
++ return -EFAULT;
++
++ mem->size = PAGE_ALIGN(size);
++
++ mem->cpu_addr = dma_alloc_coherent(ipu_dev, size,
++ &mem->phy_addr,
++ GFP_DMA | GFP_KERNEL);
++ if (mem->cpu_addr == NULL) {
++ kfree(mem);
++ return -ENOMEM;
++ }
++ mem->file_index = file->private_data;
++ mutex_lock(&ipu_alloc_lock);
++ list_add(&mem->list, &ipu_alloc_list);
++ mutex_unlock(&ipu_alloc_lock);
++
++ dev_dbg(ipu_dev, "allocated %d bytes @ 0x%08X\n",
++ mem->size, mem->phy_addr);
++
++ if (put_user(mem->phy_addr, argp))
++ return -EFAULT;
++
++ break;
++ }
++ case IPU_FREE:
++ {
++ unsigned long offset;
++ struct ipu_alloc_list *mem;
++
++ if (get_user(offset, argp))
++ return -EFAULT;
++
++ ret = -EINVAL;
++ mutex_lock(&ipu_alloc_lock);
++ list_for_each_entry(mem, &ipu_alloc_list, list) {
++ if (mem->phy_addr == offset) {
++ list_del(&mem->list);
++ dma_free_coherent(ipu_dev,
++ mem->size,
++ mem->cpu_addr,
++ mem->phy_addr);
++ kfree(mem);
++ ret = 0;
++ break;
++ }
++ }
++ mutex_unlock(&ipu_alloc_lock);
++ if (0 == ret)
++ dev_dbg(ipu_dev, "free %d bytes @ 0x%08X\n",
++ mem->size, mem->phy_addr);
++
++ break;
++ }
++ default:
++ break;
++ }
++ return ret;
++}
++
++static int mxc_ipu_mmap(struct file *file, struct vm_area_struct *vma)
++{
++ bool found = false;
++ u32 len;
++ unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
++ struct ipu_alloc_list *mem;
++
++ mutex_lock(&ipu_alloc_lock);
++ list_for_each_entry(mem, &ipu_alloc_list, list) {
++ if (offset == mem->phy_addr) {
++ found = true;
++ len = mem->size;
++ break;
++ }
++ }
++ mutex_unlock(&ipu_alloc_lock);
++ if (!found)
++ return -EINVAL;
++
++ if (vma->vm_end - vma->vm_start > len)
++ return -EINVAL;
++
++ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
++
++ if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
++ vma->vm_end - vma->vm_start,
++ vma->vm_page_prot)) {
++ printk(KERN_ERR
++ "mmap failed!\n");
++ return -ENOBUFS;
++ }
++ return 0;
++}
++
++static int mxc_ipu_release(struct inode *inode, struct file *file)
++{
++ struct ipu_alloc_list *mem;
++ struct ipu_alloc_list *n;
++
++ mutex_lock(&ipu_alloc_lock);
++ list_for_each_entry_safe(mem, n, &ipu_alloc_list, list) {
++ if ((mem->cpu_addr != 0) &&
++ (file->private_data == mem->file_index)) {
++ list_del(&mem->list);
++ dma_free_coherent(ipu_dev,
++ mem->size,
++ mem->cpu_addr,
++ mem->phy_addr);
++ dev_dbg(ipu_dev, "rel-free %d bytes @ 0x%08X\n",
++ mem->size, mem->phy_addr);
++ kfree(mem);
++ }
++ }
++ mutex_unlock(&ipu_alloc_lock);
++ atomic_dec(&file_index);
++
++ return 0;
++}
++
++static struct file_operations mxc_ipu_fops = {
++ .owner = THIS_MODULE,
++ .open = mxc_ipu_open,
++ .mmap = mxc_ipu_mmap,
++ .release = mxc_ipu_release,
++ .unlocked_ioctl = mxc_ipu_ioctl,
++};
++
++int register_ipu_device(struct ipu_soc *ipu, int id)
++{
++ int ret = 0;
++ static int idx;
++ static struct ipu_thread_data thread_data[5];
++
++ if (!major) {
++ major = register_chrdev(0, "mxc_ipu", &mxc_ipu_fops);
++ if (major < 0) {
++ printk(KERN_ERR "Unable to register mxc_ipu as a char device\n");
++ ret = major;
++ goto register_cdev_fail;
++ }
++
++ ipu_class = class_create(THIS_MODULE, "mxc_ipu");
++ if (IS_ERR(ipu_class)) {
++ ret = PTR_ERR(ipu_class);
++ goto ipu_class_fail;
++ }
++
++ ipu_dev = device_create(ipu_class, NULL, MKDEV(major, 0),
++ NULL, "mxc_ipu");
++ if (IS_ERR(ipu_dev)) {
++ ret = PTR_ERR(ipu_dev);
++ goto dev_create_fail;
++ }
++ ipu_dev->dma_mask = kmalloc(sizeof(*ipu_dev->dma_mask), GFP_KERNEL);
++ *ipu_dev->dma_mask = DMA_BIT_MASK(32);
++ ipu_dev->coherent_dma_mask = DMA_BIT_MASK(32);
++
++ mutex_init(&ipu_ch_tbl.lock);
++ }
++ max_ipu_no = ++id;
++ ipu->rot_dma[0].size = 0;
++ ipu->rot_dma[1].size = 0;
++
++ thread_data[idx].ipu = ipu;
++ thread_data[idx].id = 0;
++ thread_data[idx].is_vdoa = 0;
++ ipu->thread[0] = kthread_run(ipu_task_thread, &thread_data[idx++],
++ "ipu%d_task", id);
++ if (IS_ERR(ipu->thread[0])) {
++ ret = PTR_ERR(ipu->thread[0]);
++ goto kthread0_fail;
++ }
++
++ thread_data[idx].ipu = ipu;
++ thread_data[idx].id = 1;
++ thread_data[idx].is_vdoa = 0;
++ ipu->thread[1] = kthread_run(ipu_task_thread, &thread_data[idx++],
++ "ipu%d_task", id);
++ if (IS_ERR(ipu->thread[1])) {
++ ret = PTR_ERR(ipu->thread[1]);
++ goto kthread1_fail;
++ }
++
++
++ return ret;
++
++kthread1_fail:
++ kthread_stop(ipu->thread[0]);
++kthread0_fail:
++ if (id == 0)
++ device_destroy(ipu_class, MKDEV(major, 0));
++dev_create_fail:
++ if (id == 0) {
++ class_destroy(ipu_class);
++ }
++ipu_class_fail:
++ if (id == 0)
++ unregister_chrdev(major, "mxc_ipu");
++register_cdev_fail:
++ return ret;
++}
++
++void unregister_ipu_device(struct ipu_soc *ipu, int id)
++{
++ int i;
++
++ kthread_stop(ipu->thread[0]);
++ kthread_stop(ipu->thread[1]);
++ for (i = 0; i < 2; i++) {
++ if (ipu->rot_dma[i].vaddr)
++ dma_free_coherent(ipu_dev,
++ ipu->rot_dma[i].size,
++ ipu->rot_dma[i].vaddr,
++ ipu->rot_dma[i].paddr);
++ }
++
++ if (major) {
++ device_destroy(ipu_class, MKDEV(major, 0));
++ class_destroy(ipu_class);
++ unregister_chrdev(major, "mxc_ipu");
++ major = 0;
++ }
++}
+diff -Nur linux-3.14.36/drivers/mxc/ipu3/ipu_disp.c linux-openelec/drivers/mxc/ipu3/ipu_disp.c
+--- linux-3.14.36/drivers/mxc/ipu3/ipu_disp.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/ipu3/ipu_disp.c 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,1962 @@
++/*
++ * Copyright 2005-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/*!
++ * @file ipu_disp.c
++ *
++ * @brief IPU display submodule API functions
++ *
++ * @ingroup IPU
++ */
++
++#include <linux/clk.h>
++#include <linux/clk-provider.h>
++#include <linux/delay.h>
++#include <linux/err.h>
++#include <linux/errno.h>
++#include <linux/io.h>
++#include <linux/ipu-v3.h>
++#include <linux/module.h>
++#include <linux/spinlock.h>
++#include <linux/types.h>
++
++#include <asm/atomic.h>
++
++#include "ipu_param_mem.h"
++#include "ipu_regs.h"
++
++struct dp_csc_param_t {
++ int mode;
++ void *coeff;
++};
++
++#define SYNC_WAVE 0
++#define NULL_WAVE (-1)
++#define ASYNC_SER_WAVE 6
++
++/* DC display ID assignments */
++#define DC_DISP_ID_SYNC(di) (di)
++#define DC_DISP_ID_SERIAL 2
++#define DC_DISP_ID_ASYNC 3
++
++int dmfc_type_setup;
++
++void _ipu_dmfc_init(struct ipu_soc *ipu, int dmfc_type, int first)
++{
++ u32 dmfc_wr_chan, dmfc_dp_chan;
++
++ if (first) {
++ if (dmfc_type_setup > dmfc_type)
++ dmfc_type = dmfc_type_setup;
++ else
++ dmfc_type_setup = dmfc_type;
++
++ /* disable DMFC-IC channel*/
++ ipu_dmfc_write(ipu, 0x2, DMFC_IC_CTRL);
++ } else if (dmfc_type_setup >= DMFC_HIGH_RESOLUTION_DC) {
++ dev_dbg(ipu->dev, "DMFC high resolution has set, will not change\n");
++ return;
++ } else
++ dmfc_type_setup = dmfc_type;
++
++ if (dmfc_type == DMFC_HIGH_RESOLUTION_DC) {
++ /* 1 - segment 0~3;
++ * 5B - segement 4, 5;
++ * 5F - segement 6, 7;
++ * 1C, 2C and 6B, 6F unused;
++ */
++ dev_info(ipu->dev, "IPU DMFC DC HIGH RESOLUTION: 1(0~3), 5B(4,5), 5F(6,7)\n");
++ dmfc_wr_chan = 0x00000088;
++ dmfc_dp_chan = 0x00009694;
++ ipu->dmfc_size_28 = 256*4;
++ ipu->dmfc_size_29 = 0;
++ ipu->dmfc_size_24 = 0;
++ ipu->dmfc_size_27 = 128*4;
++ ipu->dmfc_size_23 = 128*4;
++ } else if (dmfc_type == DMFC_HIGH_RESOLUTION_DP) {
++ /* 1 - segment 0, 1;
++ * 5B - segement 2~5;
++ * 5F - segement 6,7;
++ * 1C, 2C and 6B, 6F unused;
++ */
++ dev_info(ipu->dev, "IPU DMFC DP HIGH RESOLUTION: 1(0,1), 5B(2~5), 5F(6,7)\n");
++ dmfc_wr_chan = 0x00000090;
++ dmfc_dp_chan = 0x0000968a;
++ ipu->dmfc_size_28 = 128*4;
++ ipu->dmfc_size_29 = 0;
++ ipu->dmfc_size_24 = 0;
++ ipu->dmfc_size_27 = 128*4;
++ ipu->dmfc_size_23 = 256*4;
++ } else if (dmfc_type == DMFC_HIGH_RESOLUTION_ONLY_DP) {
++ /* 5B - segement 0~3;
++ * 5F - segement 4~7;
++ * 1, 1C, 2C and 6B, 6F unused;
++ */
++ dev_info(ipu->dev, "IPU DMFC ONLY-DP HIGH RESOLUTION: 5B(0~3), 5F(4~7)\n");
++ dmfc_wr_chan = 0x00000000;
++ dmfc_dp_chan = 0x00008c88;
++ ipu->dmfc_size_28 = 0;
++ ipu->dmfc_size_29 = 0;
++ ipu->dmfc_size_24 = 0;
++ ipu->dmfc_size_27 = 256*4;
++ ipu->dmfc_size_23 = 256*4;
++ } else {
++ /* 1 - segment 0, 1;
++ * 5B - segement 4, 5;
++ * 5F - segement 6, 7;
++ * 1C, 2C and 6B, 6F unused;
++ */
++ dev_info(ipu->dev, "IPU DMFC NORMAL mode: 1(0~1), 5B(4,5), 5F(6,7)\n");
++ dmfc_wr_chan = 0x00000090;
++ dmfc_dp_chan = 0x00009694;
++ ipu->dmfc_size_28 = 128*4;
++ ipu->dmfc_size_29 = 0;
++ ipu->dmfc_size_24 = 0;
++ ipu->dmfc_size_27 = 128*4;
++ ipu->dmfc_size_23 = 128*4;
++ }
++ ipu_dmfc_write(ipu, dmfc_wr_chan, DMFC_WR_CHAN);
++ ipu_dmfc_write(ipu, 0x202020F6, DMFC_WR_CHAN_DEF);
++ ipu_dmfc_write(ipu, dmfc_dp_chan, DMFC_DP_CHAN);
++ /* Enable chan 5 watermark set at 5 bursts and clear at 7 bursts */
++ ipu_dmfc_write(ipu, 0x2020F6F6, DMFC_DP_CHAN_DEF);
++}
++
++static int __init dmfc_setup(char *options)
++{
++ get_option(&options, &dmfc_type_setup);
++ if (dmfc_type_setup > DMFC_HIGH_RESOLUTION_ONLY_DP)
++ dmfc_type_setup = DMFC_HIGH_RESOLUTION_ONLY_DP;
++ return 1;
++}
++__setup("dmfc=", dmfc_setup);
++
++void _ipu_dmfc_set_wait4eot(struct ipu_soc *ipu, int dma_chan, int width)
++{
++ u32 dmfc_gen1 = ipu_dmfc_read(ipu, DMFC_GENERAL1);
++
++ if (width >= HIGH_RESOLUTION_WIDTH) {
++ if (dma_chan == 23)
++ _ipu_dmfc_init(ipu, DMFC_HIGH_RESOLUTION_DP, 0);
++ else if (dma_chan == 28)
++ _ipu_dmfc_init(ipu, DMFC_HIGH_RESOLUTION_DC, 0);
++ }
++
++ if (dma_chan == 23) { /*5B*/
++ if (ipu->dmfc_size_23/width > 3)
++ dmfc_gen1 |= 1UL << 20;
++ else
++ dmfc_gen1 &= ~(1UL << 20);
++ } else if (dma_chan == 24) { /*6B*/
++ if (ipu->dmfc_size_24/width > 1)
++ dmfc_gen1 |= 1UL << 22;
++ else
++ dmfc_gen1 &= ~(1UL << 22);
++ } else if (dma_chan == 27) { /*5F*/
++ if (ipu->dmfc_size_27/width > 2)
++ dmfc_gen1 |= 1UL << 21;
++ else
++ dmfc_gen1 &= ~(1UL << 21);
++ } else if (dma_chan == 28) { /*1*/
++ if (ipu->dmfc_size_28/width > 2)
++ dmfc_gen1 |= 1UL << 16;
++ else
++ dmfc_gen1 &= ~(1UL << 16);
++ } else if (dma_chan == 29) { /*6F*/
++ if (ipu->dmfc_size_29/width > 1)
++ dmfc_gen1 |= 1UL << 23;
++ else
++ dmfc_gen1 &= ~(1UL << 23);
++ }
++
++ ipu_dmfc_write(ipu, dmfc_gen1, DMFC_GENERAL1);
++}
++
++void _ipu_dmfc_set_burst_size(struct ipu_soc *ipu, int dma_chan, int burst_size)
++{
++ u32 dmfc_wr_chan = ipu_dmfc_read(ipu, DMFC_WR_CHAN);
++ u32 dmfc_dp_chan = ipu_dmfc_read(ipu, DMFC_DP_CHAN);
++ int dmfc_bs = 0;
++
++ switch (burst_size) {
++ case 64:
++ dmfc_bs = 0x40;
++ break;
++ case 32:
++ case 20:
++ dmfc_bs = 0x80;
++ break;
++ case 16:
++ dmfc_bs = 0xc0;
++ break;
++ default:
++ dev_err(ipu->dev, "Unsupported burst size %d\n",
++ burst_size);
++ return;
++ }
++
++ if (dma_chan == 23) { /*5B*/
++ dmfc_dp_chan &= ~(0xc0);
++ dmfc_dp_chan |= dmfc_bs;
++ } else if (dma_chan == 27) { /*5F*/
++ dmfc_dp_chan &= ~(0xc000);
++ dmfc_dp_chan |= (dmfc_bs << 8);
++ } else if (dma_chan == 28) { /*1*/
++ dmfc_wr_chan &= ~(0xc0);
++ dmfc_wr_chan |= dmfc_bs;
++ }
++
++ ipu_dmfc_write(ipu, dmfc_wr_chan, DMFC_WR_CHAN);
++ ipu_dmfc_write(ipu, dmfc_dp_chan, DMFC_DP_CHAN);
++}
++
++static void _ipu_di_data_wave_config(struct ipu_soc *ipu,
++ int di, int wave_gen,
++ int access_size, int component_size)
++{
++ u32 reg;
++ reg = (access_size << DI_DW_GEN_ACCESS_SIZE_OFFSET) |
++ (component_size << DI_DW_GEN_COMPONENT_SIZE_OFFSET);
++ ipu_di_write(ipu, di, reg, DI_DW_GEN(wave_gen));
++}
++
++static void _ipu_di_data_pin_config(struct ipu_soc *ipu,
++ int di, int wave_gen, int di_pin, int set,
++ int up, int down)
++{
++ u32 reg;
++
++ reg = ipu_di_read(ipu, di, DI_DW_GEN(wave_gen));
++ reg &= ~(0x3 << (di_pin * 2));
++ reg |= set << (di_pin * 2);
++ ipu_di_write(ipu, di, reg, DI_DW_GEN(wave_gen));
++
++ ipu_di_write(ipu, di, (down << 16) | up, DI_DW_SET(wave_gen, set));
++}
++
++static void _ipu_di_sync_config(struct ipu_soc *ipu,
++ int di, int wave_gen,
++ int run_count, int run_src,
++ int offset_count, int offset_src,
++ int repeat_count, int cnt_clr_src,
++ int cnt_polarity_gen_en,
++ int cnt_polarity_clr_src,
++ int cnt_polarity_trigger_src,
++ int cnt_up, int cnt_down)
++{
++ u32 reg;
++
++ if ((run_count >= 0x1000) || (offset_count >= 0x1000) || (repeat_count >= 0x1000) ||
++ (cnt_up >= 0x400) || (cnt_down >= 0x400)) {
++ dev_err(ipu->dev, "DI%d counters out of range.\n", di);
++ return;
++ }
++
++ reg = (run_count << 19) | (++run_src << 16) |
++ (offset_count << 3) | ++offset_src;
++ ipu_di_write(ipu, di, reg, DI_SW_GEN0(wave_gen));
++ reg = (cnt_polarity_gen_en << 29) | (++cnt_clr_src << 25) |
++ (++cnt_polarity_trigger_src << 12) | (++cnt_polarity_clr_src << 9);
++ reg |= (cnt_down << 16) | cnt_up;
++ if (repeat_count == 0) {
++ /* Enable auto reload */
++ reg |= 0x10000000;
++ }
++ ipu_di_write(ipu, di, reg, DI_SW_GEN1(wave_gen));
++ reg = ipu_di_read(ipu, di, DI_STP_REP(wave_gen));
++ reg &= ~(0xFFFF << (16 * ((wave_gen - 1) & 0x1)));
++ reg |= repeat_count << (16 * ((wave_gen - 1) & 0x1));
++ ipu_di_write(ipu, di, reg, DI_STP_REP(wave_gen));
++}
++
++static void _ipu_dc_map_link(struct ipu_soc *ipu,
++ int current_map,
++ int base_map_0, int buf_num_0,
++ int base_map_1, int buf_num_1,
++ int base_map_2, int buf_num_2)
++{
++ int ptr_0 = base_map_0 * 3 + buf_num_0;
++ int ptr_1 = base_map_1 * 3 + buf_num_1;
++ int ptr_2 = base_map_2 * 3 + buf_num_2;
++ int ptr;
++ u32 reg;
++ ptr = (ptr_2 << 10) + (ptr_1 << 5) + ptr_0;
++
++ reg = ipu_dc_read(ipu, DC_MAP_CONF_PTR(current_map));
++ reg &= ~(0x1F << ((16 * (current_map & 0x1))));
++ reg |= ptr << ((16 * (current_map & 0x1)));
++ ipu_dc_write(ipu, reg, DC_MAP_CONF_PTR(current_map));
++}
++
++static void _ipu_dc_map_config(struct ipu_soc *ipu,
++ int map, int byte_num, int offset, int mask)
++{
++ int ptr = map * 3 + byte_num;
++ u32 reg;
++
++ reg = ipu_dc_read(ipu, DC_MAP_CONF_VAL(ptr));
++ reg &= ~(0xFFFF << (16 * (ptr & 0x1)));
++ reg |= ((offset << 8) | mask) << (16 * (ptr & 0x1));
++ ipu_dc_write(ipu, reg, DC_MAP_CONF_VAL(ptr));
++
++ reg = ipu_dc_read(ipu, DC_MAP_CONF_PTR(map));
++ reg &= ~(0x1F << ((16 * (map & 0x1)) + (5 * byte_num)));
++ reg |= ptr << ((16 * (map & 0x1)) + (5 * byte_num));
++ ipu_dc_write(ipu, reg, DC_MAP_CONF_PTR(map));
++}
++
++static void _ipu_dc_map_clear(struct ipu_soc *ipu, int map)
++{
++ u32 reg = ipu_dc_read(ipu, DC_MAP_CONF_PTR(map));
++ ipu_dc_write(ipu, reg & ~(0xFFFF << (16 * (map & 0x1))),
++ DC_MAP_CONF_PTR(map));
++}
++
++static void _ipu_dc_write_tmpl(struct ipu_soc *ipu,
++ int word, u32 opcode, u32 operand, int map,
++ int wave, int glue, int sync, int stop)
++{
++ u32 reg;
++
++ if (opcode == WRG) {
++ reg = sync;
++ reg |= (glue << 4);
++ reg |= (++wave << 11);
++ reg |= ((operand & 0x1FFFF) << 15);
++ ipu_dc_tmpl_write(ipu, reg, word * 8);
++
++ reg = (operand >> 17);
++ reg |= opcode << 7;
++ reg |= (stop << 9);
++ ipu_dc_tmpl_write(ipu, reg, word * 8 + 4);
++ } else {
++ reg = sync;
++ reg |= (glue << 4);
++ reg |= (++wave << 11);
++ reg |= (++map << 15);
++ reg |= (operand << 20) & 0xFFF00000;
++ ipu_dc_tmpl_write(ipu, reg, word * 8);
++
++ reg = (operand >> 12);
++ reg |= opcode << 4;
++ reg |= (stop << 9);
++ ipu_dc_tmpl_write(ipu, reg, word * 8 + 4);
++ }
++}
++
++static void _ipu_dc_link_event(struct ipu_soc *ipu,
++ int chan, int event, int addr, int priority)
++{
++ u32 reg;
++ u32 address_shift;
++ if (event < DC_EVEN_UGDE0) {
++ reg = ipu_dc_read(ipu, DC_RL_CH(chan, event));
++ reg &= ~(0xFFFF << (16 * (event & 0x1)));
++ reg |= ((addr << 8) | priority) << (16 * (event & 0x1));
++ ipu_dc_write(ipu, reg, DC_RL_CH(chan, event));
++ } else {
++ reg = ipu_dc_read(ipu, DC_UGDE_0((event - DC_EVEN_UGDE0) / 2));
++ if ((event - DC_EVEN_UGDE0) & 0x1) {
++ reg &= ~(0x2FF << 16);
++ reg |= (addr << 16);
++ reg |= priority ? (2 << 24) : 0x0;
++ } else {
++ reg &= ~0xFC00FFFF;
++ if (priority)
++ chan = (chan >> 1) +
++ ((((chan & 0x1) + ((chan & 0x2) >> 1))) | (chan >> 3));
++ else
++ chan = 0x7;
++ address_shift = ((event - DC_EVEN_UGDE0) >> 1) ? 7 : 8;
++ reg |= (addr << address_shift) | (priority << 3) | chan;
++ }
++ ipu_dc_write(ipu, reg, DC_UGDE_0((event - DC_EVEN_UGDE0) / 2));
++ }
++}
++
++/* Y = R * 1.200 + G * 2.343 + B * .453 + 0.250;
++ U = R * -.672 + G * -1.328 + B * 2.000 + 512.250.;
++ V = R * 2.000 + G * -1.672 + B * -.328 + 512.250.;*/
++static const int rgb2ycbcr_coeff[5][3] = {
++ {0x4D, 0x96, 0x1D},
++ {-0x2B, -0x55, 0x80},
++ {0x80, -0x6B, -0x15},
++ {0x0000, 0x0200, 0x0200}, /* B0, B1, B2 */
++ {0x2, 0x2, 0x2}, /* S0, S1, S2 */
++};
++
++/* R = (1.164 * (Y - 16)) + (1.596 * (Cr - 128));
++ G = (1.164 * (Y - 16)) - (0.392 * (Cb - 128)) - (0.813 * (Cr - 128));
++ B = (1.164 * (Y - 16)) + (2.017 * (Cb - 128); */
++static const int ycbcr2rgb_coeff[5][3] = {
++ {0x095, 0x000, 0x0CC},
++ {0x095, 0x3CE, 0x398},
++ {0x095, 0x0FF, 0x000},
++ {0x3E42, 0x010A, 0x3DD6}, /*B0,B1,B2 */
++ {0x1, 0x1, 0x1}, /*S0,S1,S2 */
++};
++
++#define mask_a(a) ((u32)(a) & 0x3FF)
++#define mask_b(b) ((u32)(b) & 0x3FFF)
++
++/* Pls keep S0, S1 and S2 as 0x2 by using this convertion */
++static int _rgb_to_yuv(int n, int red, int green, int blue)
++{
++ int c;
++ c = red * rgb2ycbcr_coeff[n][0];
++ c += green * rgb2ycbcr_coeff[n][1];
++ c += blue * rgb2ycbcr_coeff[n][2];
++ c /= 16;
++ c += rgb2ycbcr_coeff[3][n] * 4;
++ c += 8;
++ c /= 16;
++ if (c < 0)
++ c = 0;
++ if (c > 255)
++ c = 255;
++ return c;
++}
++
++/*
++ * Row is for BG: RGB2YUV YUV2RGB RGB2RGB YUV2YUV CSC_NONE
++ * Column is for FG: RGB2YUV YUV2RGB RGB2RGB YUV2YUV CSC_NONE
++ */
++static struct dp_csc_param_t dp_csc_array[CSC_NUM][CSC_NUM] = {
++{{DP_COM_CONF_CSC_DEF_BOTH, &rgb2ycbcr_coeff}, {0, 0}, {0, 0}, {DP_COM_CONF_CSC_DEF_BG, &rgb2ycbcr_coeff}, {DP_COM_CONF_CSC_DEF_BG, &rgb2ycbcr_coeff} },
++{{0, 0}, {DP_COM_CONF_CSC_DEF_BOTH, &ycbcr2rgb_coeff}, {DP_COM_CONF_CSC_DEF_BG, &ycbcr2rgb_coeff}, {0, 0}, {DP_COM_CONF_CSC_DEF_BG, &ycbcr2rgb_coeff} },
++{{0, 0}, {DP_COM_CONF_CSC_DEF_FG, &ycbcr2rgb_coeff}, {0, 0}, {0, 0}, {0, 0} },
++{{DP_COM_CONF_CSC_DEF_FG, &rgb2ycbcr_coeff}, {0, 0}, {0, 0}, {0, 0}, {0, 0} },
++{{DP_COM_CONF_CSC_DEF_FG, &rgb2ycbcr_coeff}, {DP_COM_CONF_CSC_DEF_FG, &ycbcr2rgb_coeff}, {0, 0}, {0, 0}, {0, 0} }
++};
++
++void __ipu_dp_csc_setup(struct ipu_soc *ipu,
++ int dp, struct dp_csc_param_t dp_csc_param,
++ bool srm_mode_update)
++{
++ u32 reg;
++ const int (*coeff)[5][3];
++
++ if (dp_csc_param.mode >= 0) {
++ reg = ipu_dp_read(ipu, DP_COM_CONF(dp));
++ reg &= ~DP_COM_CONF_CSC_DEF_MASK;
++ reg |= dp_csc_param.mode;
++ ipu_dp_write(ipu, reg, DP_COM_CONF(dp));
++ }
++
++ coeff = dp_csc_param.coeff;
++
++ if (coeff) {
++ ipu_dp_write(ipu, mask_a((*coeff)[0][0]) |
++ (mask_a((*coeff)[0][1]) << 16), DP_CSC_A_0(dp));
++ ipu_dp_write(ipu, mask_a((*coeff)[0][2]) |
++ (mask_a((*coeff)[1][0]) << 16), DP_CSC_A_1(dp));
++ ipu_dp_write(ipu, mask_a((*coeff)[1][1]) |
++ (mask_a((*coeff)[1][2]) << 16), DP_CSC_A_2(dp));
++ ipu_dp_write(ipu, mask_a((*coeff)[2][0]) |
++ (mask_a((*coeff)[2][1]) << 16), DP_CSC_A_3(dp));
++ ipu_dp_write(ipu, mask_a((*coeff)[2][2]) |
++ (mask_b((*coeff)[3][0]) << 16) |
++ ((*coeff)[4][0] << 30), DP_CSC_0(dp));
++ ipu_dp_write(ipu, mask_b((*coeff)[3][1]) | ((*coeff)[4][1] << 14) |
++ (mask_b((*coeff)[3][2]) << 16) |
++ ((*coeff)[4][2] << 30), DP_CSC_1(dp));
++ }
++
++ if (srm_mode_update) {
++ reg = ipu_cm_read(ipu, IPU_SRM_PRI2) | 0x8;
++ ipu_cm_write(ipu, reg, IPU_SRM_PRI2);
++ }
++}
++
++int _ipu_dp_init(struct ipu_soc *ipu,
++ ipu_channel_t channel, uint32_t in_pixel_fmt,
++ uint32_t out_pixel_fmt)
++{
++ int in_fmt, out_fmt;
++ int dp;
++ int partial = false;
++ uint32_t reg;
++
++ if (channel == MEM_FG_SYNC) {
++ dp = DP_SYNC;
++ partial = true;
++ } else if (channel == MEM_BG_SYNC) {
++ dp = DP_SYNC;
++ partial = false;
++ } else if (channel == MEM_BG_ASYNC0) {
++ dp = DP_ASYNC0;
++ partial = false;
++ } else {
++ return -EINVAL;
++ }
++
++ in_fmt = format_to_colorspace(in_pixel_fmt);
++ out_fmt = format_to_colorspace(out_pixel_fmt);
++
++ if (partial) {
++ if (in_fmt == RGB) {
++ if (out_fmt == RGB)
++ ipu->fg_csc_type = RGB2RGB;
++ else
++ ipu->fg_csc_type = RGB2YUV;
++ } else {
++ if (out_fmt == RGB)
++ ipu->fg_csc_type = YUV2RGB;
++ else
++ ipu->fg_csc_type = YUV2YUV;
++ }
++ } else {
++ if (in_fmt == RGB) {
++ if (out_fmt == RGB)
++ ipu->bg_csc_type = RGB2RGB;
++ else
++ ipu->bg_csc_type = RGB2YUV;
++ } else {
++ if (out_fmt == RGB)
++ ipu->bg_csc_type = YUV2RGB;
++ else
++ ipu->bg_csc_type = YUV2YUV;
++ }
++ }
++
++ /* Transform color key from rgb to yuv if CSC is enabled */
++ reg = ipu_dp_read(ipu, DP_COM_CONF(dp));
++ if (ipu->color_key_4rgb && (reg & DP_COM_CONF_GWCKE) &&
++ (((ipu->fg_csc_type == RGB2YUV) && (ipu->bg_csc_type == YUV2YUV)) ||
++ ((ipu->fg_csc_type == YUV2YUV) && (ipu->bg_csc_type == RGB2YUV)) ||
++ ((ipu->fg_csc_type == YUV2YUV) && (ipu->bg_csc_type == YUV2YUV)) ||
++ ((ipu->fg_csc_type == YUV2RGB) && (ipu->bg_csc_type == YUV2RGB)))) {
++ int red, green, blue;
++ int y, u, v;
++ uint32_t color_key = ipu_dp_read(ipu, DP_GRAPH_WIND_CTRL(dp)) & 0xFFFFFFL;
++
++ dev_dbg(ipu->dev, "_ipu_dp_init color key 0x%x need change to yuv fmt!\n", color_key);
++
++ red = (color_key >> 16) & 0xFF;
++ green = (color_key >> 8) & 0xFF;
++ blue = color_key & 0xFF;
++
++ y = _rgb_to_yuv(0, red, green, blue);
++ u = _rgb_to_yuv(1, red, green, blue);
++ v = _rgb_to_yuv(2, red, green, blue);
++ color_key = (y << 16) | (u << 8) | v;
++
++ reg = ipu_dp_read(ipu, DP_GRAPH_WIND_CTRL(dp)) & 0xFF000000L;
++ ipu_dp_write(ipu, reg | color_key, DP_GRAPH_WIND_CTRL(dp));
++ ipu->color_key_4rgb = false;
++
++ dev_dbg(ipu->dev, "_ipu_dp_init color key change to yuv fmt 0x%x!\n", color_key);
++ }
++
++ __ipu_dp_csc_setup(ipu, dp, dp_csc_array[ipu->bg_csc_type][ipu->fg_csc_type], true);
++
++ return 0;
++}
++
++void _ipu_dp_uninit(struct ipu_soc *ipu, ipu_channel_t channel)
++{
++ int dp;
++ int partial = false;
++
++ if (channel == MEM_FG_SYNC) {
++ dp = DP_SYNC;
++ partial = true;
++ } else if (channel == MEM_BG_SYNC) {
++ dp = DP_SYNC;
++ partial = false;
++ } else if (channel == MEM_BG_ASYNC0) {
++ dp = DP_ASYNC0;
++ partial = false;
++ } else {
++ return;
++ }
++
++ if (partial)
++ ipu->fg_csc_type = CSC_NONE;
++ else
++ ipu->bg_csc_type = CSC_NONE;
++
++ __ipu_dp_csc_setup(ipu, dp, dp_csc_array[ipu->bg_csc_type][ipu->fg_csc_type], false);
++}
++
++void _ipu_dc_init(struct ipu_soc *ipu, int dc_chan, int di, bool interlaced, uint32_t pixel_fmt)
++{
++ u32 reg = 0;
++
++ if ((dc_chan == 1) || (dc_chan == 5)) {
++ if (interlaced) {
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NL, 0, 3);
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_EOL, 0, 2);
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NEW_DATA, 0, 1);
++ } else {
++ if (di) {
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NL, 2, 3);
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_EOL, 3, 2);
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NEW_DATA, 1, 1);
++ if ((pixel_fmt == IPU_PIX_FMT_YUYV) ||
++ (pixel_fmt == IPU_PIX_FMT_UYVY) ||
++ (pixel_fmt == IPU_PIX_FMT_YVYU) ||
++ (pixel_fmt == IPU_PIX_FMT_VYUY)) {
++ _ipu_dc_link_event(ipu, dc_chan, DC_ODD_UGDE1, 9, 5);
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVEN_UGDE1, 8, 5);
++ }
++ } else {
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NL, 5, 3);
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_EOL, 6, 2);
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NEW_DATA, 12, 1);
++ if ((pixel_fmt == IPU_PIX_FMT_YUYV) ||
++ (pixel_fmt == IPU_PIX_FMT_UYVY) ||
++ (pixel_fmt == IPU_PIX_FMT_YVYU) ||
++ (pixel_fmt == IPU_PIX_FMT_VYUY)) {
++ _ipu_dc_link_event(ipu, dc_chan, DC_ODD_UGDE0, 10, 5);
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVEN_UGDE0, 11, 5);
++ }
++ }
++ }
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NF, 0, 0);
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NFIELD, 0, 0);
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_EOF, 0, 0);
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_EOFIELD, 0, 0);
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NEW_CHAN, 0, 0);
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NEW_ADDR, 0, 0);
++
++ reg = 0x2;
++ reg |= DC_DISP_ID_SYNC(di) << DC_WR_CH_CONF_PROG_DISP_ID_OFFSET;
++ reg |= di << 2;
++ if (interlaced)
++ reg |= DC_WR_CH_CONF_FIELD_MODE;
++ } else if ((dc_chan == 8) || (dc_chan == 9)) {
++ /* async channels */
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NEW_DATA_W_0, 0x64, 1);
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NEW_DATA_W_1, 0x64, 1);
++
++ reg = 0x3;
++ reg |= DC_DISP_ID_SERIAL << DC_WR_CH_CONF_PROG_DISP_ID_OFFSET;
++ }
++ ipu_dc_write(ipu, reg, DC_WR_CH_CONF(dc_chan));
++
++ ipu_dc_write(ipu, 0x00000000, DC_WR_CH_ADDR(dc_chan));
++
++ ipu_dc_write(ipu, 0x00000084, DC_GEN);
++}
++
++void _ipu_dc_uninit(struct ipu_soc *ipu, int dc_chan)
++{
++ if ((dc_chan == 1) || (dc_chan == 5)) {
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NL, 0, 0);
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_EOL, 0, 0);
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NEW_DATA, 0, 0);
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NF, 0, 0);
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NFIELD, 0, 0);
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_EOF, 0, 0);
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_EOFIELD, 0, 0);
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NEW_CHAN, 0, 0);
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NEW_ADDR, 0, 0);
++ _ipu_dc_link_event(ipu, dc_chan, DC_ODD_UGDE0, 0, 0);
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVEN_UGDE0, 0, 0);
++ _ipu_dc_link_event(ipu, dc_chan, DC_ODD_UGDE1, 0, 0);
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVEN_UGDE1, 0, 0);
++ } else if ((dc_chan == 8) || (dc_chan == 9)) {
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NEW_ADDR_W_0, 0, 0);
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NEW_ADDR_W_1, 0, 0);
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NEW_CHAN_W_0, 0, 0);
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NEW_CHAN_W_1, 0, 0);
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NEW_DATA_W_0, 0, 0);
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NEW_DATA_W_1, 0, 0);
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NEW_ADDR_R_0, 0, 0);
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NEW_ADDR_R_1, 0, 0);
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NEW_CHAN_R_0, 0, 0);
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NEW_CHAN_R_1, 0, 0);
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NEW_DATA_R_0, 0, 0);
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NEW_DATA_R_1, 0, 0);
++ }
++}
++
++int _ipu_disp_chan_is_interlaced(struct ipu_soc *ipu, ipu_channel_t channel)
++{
++ if (channel == MEM_DC_SYNC)
++ return !!(ipu_dc_read(ipu, DC_WR_CH_CONF_1) &
++ DC_WR_CH_CONF_FIELD_MODE);
++ else if ((channel == MEM_BG_SYNC) || (channel == MEM_FG_SYNC))
++ return !!(ipu_dc_read(ipu, DC_WR_CH_CONF_5) &
++ DC_WR_CH_CONF_FIELD_MODE);
++ return 0;
++}
++
++void _ipu_dp_dc_enable(struct ipu_soc *ipu, ipu_channel_t channel)
++{
++ int di;
++ uint32_t reg;
++ uint32_t dc_chan;
++ int irq = 0;
++
++ if (channel == MEM_FG_SYNC)
++ irq = IPU_IRQ_DP_SF_END;
++ else if (channel == MEM_DC_SYNC)
++ dc_chan = 1;
++ else if (channel == MEM_BG_SYNC)
++ dc_chan = 5;
++ else
++ return;
++
++ if (channel == MEM_FG_SYNC) {
++ /* Enable FG channel */
++ reg = ipu_dp_read(ipu, DP_COM_CONF(DP_SYNC));
++ ipu_dp_write(ipu, reg | DP_COM_CONF_FG_EN, DP_COM_CONF(DP_SYNC));
++
++ reg = ipu_cm_read(ipu, IPU_SRM_PRI2) | 0x8;
++ ipu_cm_write(ipu, reg, IPU_SRM_PRI2);
++ return;
++ } else if (channel == MEM_BG_SYNC) {
++ reg = ipu_cm_read(ipu, IPU_SRM_PRI2) | 0x8;
++ ipu_cm_write(ipu, reg, IPU_SRM_PRI2);
++ }
++
++ di = ipu->dc_di_assignment[dc_chan];
++
++ /* Make sure other DC sync channel is not assigned same DI */
++ reg = ipu_dc_read(ipu, DC_WR_CH_CONF(6 - dc_chan));
++ if ((di << 2) == (reg & DC_WR_CH_CONF_PROG_DI_ID)) {
++ reg &= ~DC_WR_CH_CONF_PROG_DI_ID;
++ reg |= di ? 0 : DC_WR_CH_CONF_PROG_DI_ID;
++ ipu_dc_write(ipu, reg, DC_WR_CH_CONF(6 - dc_chan));
++ }
++
++ reg = ipu_dc_read(ipu, DC_WR_CH_CONF(dc_chan));
++ reg |= 4 << DC_WR_CH_CONF_PROG_TYPE_OFFSET;
++ ipu_dc_write(ipu, reg, DC_WR_CH_CONF(dc_chan));
++
++ clk_prepare_enable(ipu->pixel_clk[di]);
++}
++
++static irqreturn_t dc_irq_handler(int irq, void *dev_id)
++{
++ struct ipu_soc *ipu = dev_id;
++ struct completion *comp = &ipu->dc_comp;
++ uint32_t reg;
++ uint32_t dc_chan;
++
++ if (irq == IPU_IRQ_DC_FC_1)
++ dc_chan = 1;
++ else
++ dc_chan = 5;
++
++ if (!ipu->dc_swap) {
++ reg = ipu_dc_read(ipu, DC_WR_CH_CONF(dc_chan));
++ reg &= ~DC_WR_CH_CONF_PROG_TYPE_MASK;
++ ipu_dc_write(ipu, reg, DC_WR_CH_CONF(dc_chan));
++
++ reg = ipu_cm_read(ipu, IPU_DISP_GEN);
++ if (ipu->dc_di_assignment[dc_chan])
++ reg &= ~DI1_COUNTER_RELEASE;
++ else
++ reg &= ~DI0_COUNTER_RELEASE;
++ ipu_cm_write(ipu, reg, IPU_DISP_GEN);
++ }
++
++ complete(comp);
++ return IRQ_HANDLED;
++}
++
++void _ipu_dp_dc_disable(struct ipu_soc *ipu, ipu_channel_t channel, bool swap)
++{
++ int ret;
++ uint32_t reg;
++ uint32_t csc;
++ uint32_t dc_chan;
++ int irq = 0;
++ int timeout = 50;
++
++ ipu->dc_swap = swap;
++
++ if (channel == MEM_DC_SYNC) {
++ dc_chan = 1;
++ irq = IPU_IRQ_DC_FC_1;
++ } else if (channel == MEM_BG_SYNC) {
++ dc_chan = 5;
++ irq = IPU_IRQ_DP_SF_END;
++ } else if (channel == MEM_FG_SYNC) {
++ /* Disable FG channel */
++ dc_chan = 5;
++
++ reg = ipu_dp_read(ipu, DP_COM_CONF(DP_SYNC));
++ csc = reg & DP_COM_CONF_CSC_DEF_MASK;
++ if (csc == DP_COM_CONF_CSC_DEF_FG)
++ reg &= ~DP_COM_CONF_CSC_DEF_MASK;
++
++ reg &= ~DP_COM_CONF_FG_EN;
++ ipu_dp_write(ipu, reg, DP_COM_CONF(DP_SYNC));
++
++ reg = ipu_cm_read(ipu, IPU_SRM_PRI2) | 0x8;
++ ipu_cm_write(ipu, reg, IPU_SRM_PRI2);
++
++ if (ipu_is_channel_busy(ipu, MEM_BG_SYNC)) {
++ ipu_cm_write(ipu, IPUIRQ_2_MASK(IPU_IRQ_DP_SF_END),
++ IPUIRQ_2_STATREG(IPU_IRQ_DP_SF_END));
++ while ((ipu_cm_read(ipu, IPUIRQ_2_STATREG(IPU_IRQ_DP_SF_END)) &
++ IPUIRQ_2_MASK(IPU_IRQ_DP_SF_END)) == 0) {
++ msleep(2);
++ timeout -= 2;
++ if (timeout <= 0)
++ break;
++ }
++ }
++ return;
++ } else {
++ return;
++ }
++
++ init_completion(&ipu->dc_comp);
++ ret = ipu_request_irq(ipu, irq, dc_irq_handler, 0, NULL, ipu);
++ if (ret < 0) {
++ dev_err(ipu->dev, "DC irq %d in use\n", irq);
++ return;
++ }
++ ret = wait_for_completion_timeout(&ipu->dc_comp, msecs_to_jiffies(50));
++ ipu_free_irq(ipu, irq, ipu);
++ dev_dbg(ipu->dev, "DC stop timeout - %d * 10ms\n", 5 - ret);
++
++ if (ipu->dc_swap) {
++ /* Swap DC channel 1 and 5 settings, and disable old dc chan */
++ reg = ipu_dc_read(ipu, DC_WR_CH_CONF(dc_chan));
++ ipu_dc_write(ipu, reg, DC_WR_CH_CONF(6 - dc_chan));
++ reg &= ~DC_WR_CH_CONF_PROG_TYPE_MASK;
++ reg ^= DC_WR_CH_CONF_PROG_DI_ID;
++ ipu_dc_write(ipu, reg, DC_WR_CH_CONF(dc_chan));
++ }
++}
++
++void _ipu_init_dc_mappings(struct ipu_soc *ipu)
++{
++ /* IPU_PIX_FMT_RGB24 */
++ _ipu_dc_map_clear(ipu, 0);
++ _ipu_dc_map_config(ipu, 0, 0, 7, 0xFF);
++ _ipu_dc_map_config(ipu, 0, 1, 15, 0xFF);
++ _ipu_dc_map_config(ipu, 0, 2, 23, 0xFF);
++
++ /* IPU_PIX_FMT_RGB666 */
++ _ipu_dc_map_clear(ipu, 1);
++ _ipu_dc_map_config(ipu, 1, 0, 5, 0xFC);
++ _ipu_dc_map_config(ipu, 1, 1, 11, 0xFC);
++ _ipu_dc_map_config(ipu, 1, 2, 17, 0xFC);
++
++ /* IPU_PIX_FMT_YUV444 */
++ _ipu_dc_map_clear(ipu, 2);
++ _ipu_dc_map_config(ipu, 2, 0, 15, 0xFF);
++ _ipu_dc_map_config(ipu, 2, 1, 23, 0xFF);
++ _ipu_dc_map_config(ipu, 2, 2, 7, 0xFF);
++
++ /* IPU_PIX_FMT_RGB565 */
++ _ipu_dc_map_clear(ipu, 3);
++ _ipu_dc_map_config(ipu, 3, 0, 4, 0xF8);
++ _ipu_dc_map_config(ipu, 3, 1, 10, 0xFC);
++ _ipu_dc_map_config(ipu, 3, 2, 15, 0xF8);
++
++ /* IPU_PIX_FMT_LVDS666 */
++ _ipu_dc_map_clear(ipu, 4);
++ _ipu_dc_map_config(ipu, 4, 0, 5, 0xFC);
++ _ipu_dc_map_config(ipu, 4, 1, 13, 0xFC);
++ _ipu_dc_map_config(ipu, 4, 2, 21, 0xFC);
++
++ /* IPU_PIX_FMT_VYUY 16bit width */
++ _ipu_dc_map_clear(ipu, 5);
++ _ipu_dc_map_config(ipu, 5, 0, 7, 0xFF);
++ _ipu_dc_map_config(ipu, 5, 1, 0, 0x0);
++ _ipu_dc_map_config(ipu, 5, 2, 15, 0xFF);
++ _ipu_dc_map_clear(ipu, 6);
++ _ipu_dc_map_config(ipu, 6, 0, 0, 0x0);
++ _ipu_dc_map_config(ipu, 6, 1, 7, 0xFF);
++ _ipu_dc_map_config(ipu, 6, 2, 15, 0xFF);
++
++ /* IPU_PIX_FMT_UYUV 16bit width */
++ _ipu_dc_map_clear(ipu, 7);
++ _ipu_dc_map_link(ipu, 7, 6, 0, 6, 1, 6, 2);
++ _ipu_dc_map_clear(ipu, 8);
++ _ipu_dc_map_link(ipu, 8, 5, 0, 5, 1, 5, 2);
++
++ /* IPU_PIX_FMT_YUYV 16bit width */
++ _ipu_dc_map_clear(ipu, 9);
++ _ipu_dc_map_link(ipu, 9, 5, 2, 5, 1, 5, 0);
++ _ipu_dc_map_clear(ipu, 10);
++ _ipu_dc_map_link(ipu, 10, 5, 1, 5, 2, 5, 0);
++
++ /* IPU_PIX_FMT_YVYU 16bit width */
++ _ipu_dc_map_clear(ipu, 11);
++ _ipu_dc_map_link(ipu, 11, 5, 1, 5, 2, 5, 0);
++ _ipu_dc_map_clear(ipu, 12);
++ _ipu_dc_map_link(ipu, 12, 5, 2, 5, 1, 5, 0);
++
++ /* IPU_PIX_FMT_GBR24 */
++ /* IPU_PIX_FMT_VYU444 */
++ _ipu_dc_map_clear(ipu, 13);
++ _ipu_dc_map_link(ipu, 13, 0, 2, 0, 0, 0, 1);
++
++ /* IPU_PIX_FMT_BGR24 */
++ _ipu_dc_map_clear(ipu, 14);
++ _ipu_dc_map_link(ipu, 14, 0, 2, 0, 1, 0, 0);
++}
++
++int _ipu_pixfmt_to_map(uint32_t fmt)
++{
++ switch (fmt) {
++ case IPU_PIX_FMT_GENERIC:
++ case IPU_PIX_FMT_RGB24:
++ return 0;
++ case IPU_PIX_FMT_RGB666:
++ return 1;
++ case IPU_PIX_FMT_YUV444:
++ return 2;
++ case IPU_PIX_FMT_RGB565:
++ return 3;
++ case IPU_PIX_FMT_LVDS666:
++ return 4;
++ case IPU_PIX_FMT_VYUY:
++ return 6;
++ case IPU_PIX_FMT_UYVY:
++ return 8;
++ case IPU_PIX_FMT_YUYV:
++ return 10;
++ case IPU_PIX_FMT_YVYU:
++ return 12;
++ case IPU_PIX_FMT_GBR24:
++ case IPU_PIX_FMT_VYU444:
++ return 13;
++ case IPU_PIX_FMT_BGR24:
++ return 14;
++ }
++
++ return -1;
++}
++
++/*!
++ * This function sets the colorspace for of dp.
++ * modes.
++ *
++ * @param ipu ipu handler
++ * @param channel Input parameter for the logical channel ID.
++ *
++ * @param param If it's not NULL, update the csc table
++ * with this parameter.
++ *
++ * @return N/A
++ */
++void _ipu_dp_set_csc_coefficients(struct ipu_soc *ipu, ipu_channel_t channel, int32_t param[][3])
++{
++ int dp;
++ struct dp_csc_param_t dp_csc_param;
++
++ if (channel == MEM_FG_SYNC)
++ dp = DP_SYNC;
++ else if (channel == MEM_BG_SYNC)
++ dp = DP_SYNC;
++ else if (channel == MEM_BG_ASYNC0)
++ dp = DP_ASYNC0;
++ else
++ return;
++
++ dp_csc_param.mode = -1;
++ dp_csc_param.coeff = param;
++ __ipu_dp_csc_setup(ipu, dp, dp_csc_param, true);
++}
++
++void ipu_set_csc_coefficients(struct ipu_soc *ipu, ipu_channel_t channel, int32_t param[][3])
++{
++ _ipu_dp_set_csc_coefficients(ipu, channel, param);
++}
++EXPORT_SYMBOL(ipu_set_csc_coefficients);
++
++/*!
++ * This function is called to adapt synchronous LCD panel to IPU restriction.
++ *
++ */
++void adapt_panel_to_ipu_restricitions(struct ipu_soc *ipu, uint16_t *v_start_width,
++ uint16_t *v_sync_width,
++ uint16_t *v_end_width)
++{
++ if (*v_end_width < 2) {
++ uint16_t diff = 2 - *v_end_width;
++ if (*v_start_width >= diff) {
++ *v_end_width = 2;
++ *v_start_width = *v_start_width - diff;
++ } else if (*v_sync_width > diff) {
++ *v_end_width = 2;
++ *v_sync_width = *v_sync_width - diff;
++ } else
++ dev_err(ipu->dev, "WARNING: try to adapt timming, but failed\n");
++ dev_err(ipu->dev, "WARNING: adapt panel end blank lines\n");
++ }
++}
++
++/*!
++ * This function is called to initialize a synchronous LCD panel.
++ *
++ * @param ipu ipu handler
++ * @param disp The DI the panel is attached to.
++ *
++ * @param pixel_clk Desired pixel clock frequency in Hz.
++ *
++ * @param pixel_fmt Input parameter for pixel format of buffer.
++ * Pixel format is a FOURCC ASCII code.
++ *
++ * @param width The width of panel in pixels.
++ *
++ * @param height The height of panel in pixels.
++ *
++ * @param hStartWidth The number of pixel clocks between the HSYNC
++ * signal pulse and the start of valid data.
++ *
++ * @param hSyncWidth The width of the HSYNC signal in units of pixel
++ * clocks.
++ *
++ * @param hEndWidth The number of pixel clocks between the end of
++ * valid data and the HSYNC signal for next line.
++ *
++ * @param vStartWidth The number of lines between the VSYNC
++ * signal pulse and the start of valid data.
++ *
++ * @param vSyncWidth The width of the VSYNC signal in units of lines
++ *
++ * @param vEndWidth The number of lines between the end of valid
++ * data and the VSYNC signal for next frame.
++ *
++ * @param sig Bitfield of signal polarities for LCD interface.
++ *
++ * @return This function returns 0 on success or negative error code on
++ * fail.
++ */
++int32_t ipu_init_sync_panel(struct ipu_soc *ipu, int disp, uint32_t pixel_clk,
++ uint16_t width, uint16_t height,
++ uint32_t pixel_fmt,
++ uint16_t h_start_width, uint16_t h_sync_width,
++ uint16_t h_end_width, uint16_t v_start_width,
++ uint16_t v_sync_width, uint16_t v_end_width,
++ uint32_t v_to_h_sync, ipu_di_signal_cfg_t sig)
++{
++ uint32_t field0_offset = 0;
++ uint32_t field1_offset;
++ uint32_t reg;
++ uint32_t di_gen, vsync_cnt;
++ uint32_t div, rounded_pixel_clk;
++ uint32_t h_total, v_total;
++ int map;
++ int ret;
++ struct clk *ldb_di0_clk, *ldb_di1_clk;
++ struct clk *di_parent;
++
++ dev_dbg(ipu->dev, "panel size = %d x %d\n", width, height);
++
++ if ((v_sync_width == 0) || (h_sync_width == 0))
++ return -EINVAL;
++
++ adapt_panel_to_ipu_restricitions(ipu, &v_start_width, &v_sync_width, &v_end_width);
++ h_total = width + h_sync_width + h_start_width + h_end_width;
++ v_total = height + v_sync_width + v_start_width + v_end_width;
++
++ /* Init clocking */
++ dev_dbg(ipu->dev, "pixel clk = %d\n", pixel_clk);
++
++ di_parent = clk_get_parent(ipu->di_clk_sel[disp]);
++ if (!di_parent) {
++ dev_err(ipu->dev, "get di clk parent fail\n");
++ return -EINVAL;
++ }
++ ldb_di0_clk = clk_get(ipu->dev, "ldb_di0");
++ if (IS_ERR(ldb_di0_clk)) {
++ dev_err(ipu->dev, "clk_get di0 failed");
++ return PTR_ERR(ldb_di0_clk);
++ }
++ ldb_di1_clk = clk_get(ipu->dev, "ldb_di1");
++ if (IS_ERR(ldb_di1_clk)) {
++ dev_err(ipu->dev, "clk_get di1 failed");
++ return PTR_ERR(ldb_di1_clk);
++ }
++
++ if (ldb_di0_clk == di_parent || ldb_di1_clk == di_parent) {
++ /* if di clk parent is tve/ldb, then keep it;*/
++ dev_dbg(ipu->dev, "use special clk parent\n");
++ ret = clk_set_parent(ipu->pixel_clk_sel[disp], ipu->di_clk[disp]);
++ if (ret) {
++ dev_err(ipu->dev, "set pixel clk error:%d\n", ret);
++ return ret;
++ }
++ clk_put(ldb_di0_clk);
++ clk_put(ldb_di1_clk);
++ } else {
++ /* try ipu clk first*/
++ dev_dbg(ipu->dev, "try ipu internal clk\n");
++ ret = clk_set_parent(ipu->pixel_clk_sel[disp], ipu->ipu_clk);
++ if (ret) {
++ dev_err(ipu->dev, "set pixel clk error:%d\n", ret);
++ return ret;
++ }
++ rounded_pixel_clk = clk_round_rate(ipu->pixel_clk[disp], pixel_clk);
++ dev_dbg(ipu->dev, "rounded pix clk:%d\n", rounded_pixel_clk);
++ /*
++ * we will only use 1/2 fraction for ipu clk,
++ * so if the clk rate is not fit, try ext clk.
++ */
++ if (!sig.int_clk &&
++ ((rounded_pixel_clk >= pixel_clk + pixel_clk/200) ||
++ (rounded_pixel_clk <= pixel_clk - pixel_clk/200))) {
++ dev_dbg(ipu->dev, "try ipu ext di clk\n");
++
++ rounded_pixel_clk =
++ clk_round_rate(ipu->di_clk[disp], pixel_clk);
++ ret = clk_set_rate(ipu->di_clk[disp],
++ rounded_pixel_clk);
++ if (ret) {
++ dev_err(ipu->dev,
++ "set di clk rate error:%d\n", ret);
++ return ret;
++ }
++ dev_dbg(ipu->dev, "di clk:%d\n", rounded_pixel_clk);
++ ret = clk_set_parent(ipu->pixel_clk_sel[disp],
++ ipu->di_clk[disp]);
++ if (ret) {
++ dev_err(ipu->dev,
++ "set pixel clk parent error:%d\n", ret);
++ return ret;
++ }
++ }
++ }
++ rounded_pixel_clk = clk_round_rate(ipu->pixel_clk[disp], pixel_clk);
++ dev_dbg(ipu->dev, "round pixel clk:%d\n", rounded_pixel_clk);
++ ret = clk_set_rate(ipu->pixel_clk[disp], rounded_pixel_clk);
++ if (ret) {
++ dev_err(ipu->dev, "set pixel clk rate error:%d\n", ret);
++ return ret;
++ }
++ msleep(5);
++ /* Get integer portion of divider */
++ div = clk_get_rate(clk_get_parent(ipu->pixel_clk_sel[disp])) / rounded_pixel_clk;
++ dev_dbg(ipu->dev, "div:%d\n", div);
++ if (!div) {
++ dev_err(ipu->dev, "invalid pixel clk div = 0\n");
++ return -EINVAL;
++ }
++
++
++ mutex_lock(&ipu->mutex_lock);
++
++ _ipu_di_data_wave_config(ipu, disp, SYNC_WAVE, div - 1, div - 1);
++ _ipu_di_data_pin_config(ipu, disp, SYNC_WAVE, DI_PIN15, 3, 0, div * 2);
++
++ map = _ipu_pixfmt_to_map(pixel_fmt);
++ if (map < 0) {
++ dev_dbg(ipu->dev, "IPU_DISP: No MAP\n");
++ mutex_unlock(&ipu->mutex_lock);
++ return -EINVAL;
++ }
++
++ /*clear DI*/
++ di_gen = ipu_di_read(ipu, disp, DI_GENERAL);
++ di_gen &= (0x3 << 20);
++ ipu_di_write(ipu, disp, di_gen, DI_GENERAL);
++
++ if (sig.interlaced) {
++ if (g_ipu_hw_rev >= IPU_V3DEX) {
++ /* Setup internal HSYNC waveform */
++ _ipu_di_sync_config(ipu,
++ disp, /* display */
++ 1, /* counter */
++ h_total/2 - 1, /* run count */
++ DI_SYNC_CLK, /* run_resolution */
++ 0, /* offset */
++ DI_SYNC_NONE, /* offset resolution */
++ 0, /* repeat count */
++ DI_SYNC_NONE, /* CNT_CLR_SEL */
++ 0, /* CNT_POLARITY_GEN_EN */
++ DI_SYNC_NONE, /* CNT_POLARITY_CLR_SEL */
++ DI_SYNC_NONE, /* CNT_POLARITY_TRIGGER_SEL */
++ 0, /* COUNT UP */
++ 0 /* COUNT DOWN */
++ );
++
++ /* Field 1 VSYNC waveform */
++ _ipu_di_sync_config(ipu,
++ disp, /* display */
++ 2, /* counter */
++ h_total - 1, /* run count */
++ DI_SYNC_CLK, /* run_resolution */
++ 0, /* offset */
++ DI_SYNC_NONE, /* offset resolution */
++ 0, /* repeat count */
++ DI_SYNC_NONE, /* CNT_CLR_SEL */
++ 0, /* CNT_POLARITY_GEN_EN */
++ DI_SYNC_NONE, /* CNT_POLARITY_CLR_SEL */
++ DI_SYNC_NONE, /* CNT_POLARITY_TRIGGER_SEL */
++ 0, /* COUNT UP */
++ 2*div /* COUNT DOWN */
++ );
++
++ /* Setup internal HSYNC waveform */
++ _ipu_di_sync_config(ipu,
++ disp, /* display */
++ 3, /* counter */
++ v_total*2 - 1, /* run count */
++ DI_SYNC_INT_HSYNC, /* run_resolution */
++ 1, /* offset */
++ DI_SYNC_INT_HSYNC, /* offset resolution */
++ 0, /* repeat count */
++ DI_SYNC_NONE, /* CNT_CLR_SEL */
++ 0, /* CNT_POLARITY_GEN_EN */
++ DI_SYNC_NONE, /* CNT_POLARITY_CLR_SEL */
++ DI_SYNC_NONE, /* CNT_POLARITY_TRIGGER_SEL */
++ 0, /* COUNT UP */
++ 2*div /* COUNT DOWN */
++ );
++
++ /* Active Field ? */
++ _ipu_di_sync_config(ipu,
++ disp, /* display */
++ 4, /* counter */
++ v_total/2 - 1, /* run count */
++ DI_SYNC_HSYNC, /* run_resolution */
++ v_start_width, /* offset */
++ DI_SYNC_HSYNC, /* offset resolution */
++ 2, /* repeat count */
++ DI_SYNC_VSYNC, /* CNT_CLR_SEL */
++ 0, /* CNT_POLARITY_GEN_EN */
++ DI_SYNC_NONE, /* CNT_POLARITY_CLR_SEL */
++ DI_SYNC_NONE, /* CNT_POLARITY_TRIGGER_SEL */
++ 0, /* COUNT UP */
++ 0 /* COUNT DOWN */
++ );
++
++ /* Active Line */
++ _ipu_di_sync_config(ipu,
++ disp, /* display */
++ 5, /* counter */
++ 0, /* run count */
++ DI_SYNC_HSYNC, /* run_resolution */
++ 0, /* offset */
++ DI_SYNC_NONE, /* offset resolution */
++ height/2, /* repeat count */
++ 4, /* CNT_CLR_SEL */
++ 0, /* CNT_POLARITY_GEN_EN */
++ DI_SYNC_NONE, /* CNT_POLARITY_CLR_SEL */
++ DI_SYNC_NONE, /* CNT_POLARITY_TRIGGER_SEL */
++ 0, /* COUNT UP */
++ 0 /* COUNT DOWN */
++ );
++
++ /* Field 0 VSYNC waveform */
++ _ipu_di_sync_config(ipu,
++ disp, /* display */
++ 6, /* counter */
++ v_total - 1, /* run count */
++ DI_SYNC_HSYNC, /* run_resolution */
++ 0, /* offset */
++ DI_SYNC_NONE, /* offset resolution */
++ 0, /* repeat count */
++ DI_SYNC_NONE, /* CNT_CLR_SEL */
++ 0, /* CNT_POLARITY_GEN_EN */
++ DI_SYNC_NONE, /* CNT_POLARITY_CLR_SEL */
++ DI_SYNC_NONE, /* CNT_POLARITY_TRIGGER_SEL */
++ 0, /* COUNT UP */
++ 0 /* COUNT DOWN */
++ );
++
++ /* DC VSYNC waveform */
++ vsync_cnt = 7;
++ _ipu_di_sync_config(ipu,
++ disp, /* display */
++ 7, /* counter */
++ v_total/2 - 1, /* run count */
++ DI_SYNC_HSYNC, /* run_resolution */
++ 9, /* offset */
++ DI_SYNC_HSYNC, /* offset resolution */
++ 2, /* repeat count */
++ DI_SYNC_VSYNC, /* CNT_CLR_SEL */
++ 0, /* CNT_POLARITY_GEN_EN */
++ DI_SYNC_NONE, /* CNT_POLARITY_CLR_SEL */
++ DI_SYNC_NONE, /* CNT_POLARITY_TRIGGER_SEL */
++ 0, /* COUNT UP */
++ 0 /* COUNT DOWN */
++ );
++
++ /* active pixel waveform */
++ _ipu_di_sync_config(ipu,
++ disp, /* display */
++ 8, /* counter */
++ 0, /* run count */
++ DI_SYNC_CLK, /* run_resolution */
++ h_start_width, /* offset */
++ DI_SYNC_CLK, /* offset resolution */
++ width, /* repeat count */
++ 5, /* CNT_CLR_SEL */
++ 0, /* CNT_POLARITY_GEN_EN */
++ DI_SYNC_NONE, /* CNT_POLARITY_CLR_SEL */
++ DI_SYNC_NONE, /* CNT_POLARITY_TRIGGER_SEL */
++ 0, /* COUNT UP */
++ 0 /* COUNT DOWN */
++ );
++
++ /* Second VSYNC */
++ _ipu_di_sync_config(ipu,
++ disp, /* display */
++ 9, /* counter */
++ v_total - 1, /* run count */
++ DI_SYNC_INT_HSYNC, /* run_resolution */
++ v_total/2, /* offset */
++ DI_SYNC_INT_HSYNC, /* offset resolution */
++ 0, /* repeat count */
++ DI_SYNC_HSYNC, /* CNT_CLR_SEL */
++ 0, /* CNT_POLARITY_GEN_EN */
++ DI_SYNC_NONE, /* CNT_POLARITY_CLR_SEL */
++ DI_SYNC_NONE, /* CNT_POLARITY_TRIGGER_SEL */
++ 0, /* COUNT UP */
++ 2*div /* COUNT DOWN */
++ );
++
++ /* set gentime select and tag sel */
++ reg = ipu_di_read(ipu, disp, DI_SW_GEN1(9));
++ reg &= 0x1FFFFFFF;
++ reg |= (3-1)<<29 | 0x00008000;
++ ipu_di_write(ipu, disp, reg, DI_SW_GEN1(9));
++
++ ipu_di_write(ipu, disp, v_total / 2 - 1, DI_SCR_CONF);
++
++ /* set y_sel = 1 */
++ di_gen |= 0x10000000;
++ di_gen |= DI_GEN_POLARITY_5;
++ di_gen |= DI_GEN_POLARITY_8;
++ } else {
++ /* Setup internal HSYNC waveform */
++ _ipu_di_sync_config(ipu, disp, 1, h_total - 1, DI_SYNC_CLK,
++ 0, DI_SYNC_NONE, 0, DI_SYNC_NONE, 0, DI_SYNC_NONE,
++ DI_SYNC_NONE, 0, 0);
++
++ field1_offset = v_sync_width + v_start_width + height / 2 +
++ v_end_width;
++ if (sig.odd_field_first) {
++ field0_offset = field1_offset - 1;
++ field1_offset = 0;
++ }
++ v_total += v_start_width + v_end_width;
++
++ /* Field 1 VSYNC waveform */
++ _ipu_di_sync_config(ipu, disp, 2, v_total - 1, 1,
++ field0_offset,
++ field0_offset ? 1 : DI_SYNC_NONE,
++ 0, DI_SYNC_NONE, 0,
++ DI_SYNC_NONE, DI_SYNC_NONE, 0, 4);
++
++ /* Setup internal HSYNC waveform */
++ _ipu_di_sync_config(ipu, disp, 3, h_total - 1, DI_SYNC_CLK,
++ 0, DI_SYNC_NONE, 0, DI_SYNC_NONE, 0,
++ DI_SYNC_NONE, DI_SYNC_NONE, 0, 4);
++
++ /* Active Field ? */
++ _ipu_di_sync_config(ipu, disp, 4,
++ field0_offset ?
++ field0_offset : field1_offset - 2,
++ 1, v_start_width + v_sync_width, 1, 2, 2,
++ 0, DI_SYNC_NONE, DI_SYNC_NONE, 0, 0);
++
++ /* Active Line */
++ _ipu_di_sync_config(ipu, disp, 5, 0, 1,
++ 0, DI_SYNC_NONE,
++ height / 2, 4, 0, DI_SYNC_NONE,
++ DI_SYNC_NONE, 0, 0);
++
++ /* Field 0 VSYNC waveform */
++ _ipu_di_sync_config(ipu, disp, 6, v_total - 1, 1,
++ 0, DI_SYNC_NONE,
++ 0, DI_SYNC_NONE, 0, DI_SYNC_NONE,
++ DI_SYNC_NONE, 0, 0);
++
++ /* DC VSYNC waveform */
++ vsync_cnt = 7;
++ _ipu_di_sync_config(ipu, disp, 7, 0, 1,
++ field1_offset,
++ field1_offset ? 1 : DI_SYNC_NONE,
++ 1, 2, 0, DI_SYNC_NONE, DI_SYNC_NONE, 0, 0);
++
++ /* active pixel waveform */
++ _ipu_di_sync_config(ipu, disp, 8, 0, DI_SYNC_CLK,
++ h_sync_width + h_start_width, DI_SYNC_CLK,
++ width, 5, 0, DI_SYNC_NONE, DI_SYNC_NONE,
++ 0, 0);
++
++ /* ??? */
++ _ipu_di_sync_config(ipu, disp, 9, v_total - 1, 2,
++ 0, DI_SYNC_NONE,
++ 0, DI_SYNC_NONE, 6, DI_SYNC_NONE,
++ DI_SYNC_NONE, 0, 0);
++
++ reg = ipu_di_read(ipu, disp, DI_SW_GEN1(9));
++ reg |= 0x8000;
++ ipu_di_write(ipu, disp, reg, DI_SW_GEN1(9));
++
++ ipu_di_write(ipu, disp, v_sync_width + v_start_width +
++ v_end_width + height / 2 - 1, DI_SCR_CONF);
++ }
++
++ /* Init template microcode */
++ _ipu_dc_write_tmpl(ipu, 0, WROD(0), 0, map, SYNC_WAVE, 0, 8, 1);
++
++ if (sig.Hsync_pol)
++ di_gen |= DI_GEN_POLARITY_3;
++ if (sig.Vsync_pol)
++ di_gen |= DI_GEN_POLARITY_2;
++ } else {
++ /* Setup internal HSYNC waveform */
++ _ipu_di_sync_config(ipu, disp, 1, h_total - 1, DI_SYNC_CLK,
++ 0, DI_SYNC_NONE, 0, DI_SYNC_NONE, 0, DI_SYNC_NONE,
++ DI_SYNC_NONE, 0, 0);
++
++ /* Setup external (delayed) HSYNC waveform */
++ _ipu_di_sync_config(ipu, disp, DI_SYNC_HSYNC, h_total - 1,
++ DI_SYNC_CLK, div * v_to_h_sync, DI_SYNC_CLK,
++ 0, DI_SYNC_NONE, 1, DI_SYNC_NONE,
++ DI_SYNC_CLK, 0, h_sync_width * 2);
++ /* Setup VSYNC waveform */
++ vsync_cnt = DI_SYNC_VSYNC;
++ _ipu_di_sync_config(ipu, disp, DI_SYNC_VSYNC, v_total - 1,
++ DI_SYNC_INT_HSYNC, 0, DI_SYNC_NONE, 0,
++ DI_SYNC_NONE, 1, DI_SYNC_NONE,
++ DI_SYNC_INT_HSYNC, 0, v_sync_width * 2);
++ ipu_di_write(ipu, disp, v_total - 1, DI_SCR_CONF);
++
++ /* Setup active data waveform to sync with DC */
++ _ipu_di_sync_config(ipu, disp, 4, 0, DI_SYNC_HSYNC,
++ v_sync_width + v_start_width, DI_SYNC_HSYNC, height,
++ DI_SYNC_VSYNC, 0, DI_SYNC_NONE,
++ DI_SYNC_NONE, 0, 0);
++ _ipu_di_sync_config(ipu, disp, 5, 0, DI_SYNC_CLK,
++ h_sync_width + h_start_width, DI_SYNC_CLK,
++ width, 4, 0, DI_SYNC_NONE, DI_SYNC_NONE, 0,
++ 0);
++
++ /* set VGA delayed hsync/vsync no matter VGA enabled */
++ if (disp) {
++ /* couter 7 for VGA delay HSYNC */
++ _ipu_di_sync_config(ipu, disp, 7,
++ h_total - 1, DI_SYNC_CLK,
++ 18, DI_SYNC_CLK,
++ 0, DI_SYNC_NONE,
++ 1, DI_SYNC_NONE, DI_SYNC_CLK,
++ 0, h_sync_width * 2);
++
++ /* couter 8 for VGA delay VSYNC */
++ _ipu_di_sync_config(ipu, disp, 8,
++ v_total - 1, DI_SYNC_INT_HSYNC,
++ 1, DI_SYNC_INT_HSYNC,
++ 0, DI_SYNC_NONE,
++ 1, DI_SYNC_NONE, DI_SYNC_INT_HSYNC,
++ 0, v_sync_width * 2);
++ }
++
++ /* reset all unused counters */
++ ipu_di_write(ipu, disp, 0, DI_SW_GEN0(6));
++ ipu_di_write(ipu, disp, 0, DI_SW_GEN1(6));
++ if (!disp) {
++ ipu_di_write(ipu, disp, 0, DI_SW_GEN0(7));
++ ipu_di_write(ipu, disp, 0, DI_SW_GEN1(7));
++ ipu_di_write(ipu, disp, 0, DI_STP_REP(7));
++ ipu_di_write(ipu, disp, 0, DI_SW_GEN0(8));
++ ipu_di_write(ipu, disp, 0, DI_SW_GEN1(8));
++ ipu_di_write(ipu, disp, 0, DI_STP_REP(8));
++ }
++ ipu_di_write(ipu, disp, 0, DI_SW_GEN0(9));
++ ipu_di_write(ipu, disp, 0, DI_SW_GEN1(9));
++ ipu_di_write(ipu, disp, 0, DI_STP_REP(9));
++
++ reg = ipu_di_read(ipu, disp, DI_STP_REP(6));
++ reg &= 0x0000FFFF;
++ ipu_di_write(ipu, disp, reg, DI_STP_REP(6));
++
++ /* Init template microcode */
++ if (disp) {
++ if ((pixel_fmt == IPU_PIX_FMT_YUYV) ||
++ (pixel_fmt == IPU_PIX_FMT_UYVY) ||
++ (pixel_fmt == IPU_PIX_FMT_YVYU) ||
++ (pixel_fmt == IPU_PIX_FMT_VYUY)) {
++ _ipu_dc_write_tmpl(ipu, 8, WROD(0), 0, (map - 1), SYNC_WAVE, 0, 5, 1);
++ _ipu_dc_write_tmpl(ipu, 9, WROD(0), 0, map, SYNC_WAVE, 0, 5, 1);
++ /* configure user events according to DISP NUM */
++ ipu_dc_write(ipu, (width - 1), DC_UGDE_3(disp));
++ }
++ _ipu_dc_write_tmpl(ipu, 2, WROD(0), 0, map, SYNC_WAVE, 8, 5, 1);
++ _ipu_dc_write_tmpl(ipu, 3, WROD(0), 0, map, SYNC_WAVE, 4, 5, 0);
++ _ipu_dc_write_tmpl(ipu, 4, WRG, 0, map, NULL_WAVE, 0, 0, 1);
++ _ipu_dc_write_tmpl(ipu, 1, WROD(0), 0, map, SYNC_WAVE, 0, 5, 1);
++
++ } else {
++ if ((pixel_fmt == IPU_PIX_FMT_YUYV) ||
++ (pixel_fmt == IPU_PIX_FMT_UYVY) ||
++ (pixel_fmt == IPU_PIX_FMT_YVYU) ||
++ (pixel_fmt == IPU_PIX_FMT_VYUY)) {
++ _ipu_dc_write_tmpl(ipu, 10, WROD(0), 0, (map - 1), SYNC_WAVE, 0, 5, 1);
++ _ipu_dc_write_tmpl(ipu, 11, WROD(0), 0, map, SYNC_WAVE, 0, 5, 1);
++ /* configure user events according to DISP NUM */
++ ipu_dc_write(ipu, width - 1, DC_UGDE_3(disp));
++ }
++ _ipu_dc_write_tmpl(ipu, 5, WROD(0), 0, map, SYNC_WAVE, 8, 5, 1);
++ _ipu_dc_write_tmpl(ipu, 6, WROD(0), 0, map, SYNC_WAVE, 4, 5, 0);
++ _ipu_dc_write_tmpl(ipu, 7, WRG, 0, map, NULL_WAVE, 0, 0, 1);
++ _ipu_dc_write_tmpl(ipu, 12, WROD(0), 0, map, SYNC_WAVE, 0, 5, 1);
++ }
++
++ if (sig.Hsync_pol) {
++ di_gen |= DI_GEN_POLARITY_2;
++ if (disp)
++ di_gen |= DI_GEN_POLARITY_7;
++ }
++ if (sig.Vsync_pol) {
++ di_gen |= DI_GEN_POLARITY_3;
++ if (disp)
++ di_gen |= DI_GEN_POLARITY_8;
++ }
++ }
++ /* changinc DISP_CLK polarity: it can be wrong for some applications */
++ if ((pixel_fmt == IPU_PIX_FMT_YUYV) ||
++ (pixel_fmt == IPU_PIX_FMT_UYVY) ||
++ (pixel_fmt == IPU_PIX_FMT_YVYU) ||
++ (pixel_fmt == IPU_PIX_FMT_VYUY))
++ di_gen |= 0x00020000;
++
++ if (!sig.clk_pol)
++ di_gen |= DI_GEN_POLARITY_DISP_CLK;
++
++ ipu_di_write(ipu, disp, di_gen, DI_GENERAL);
++
++ ipu_di_write(ipu, disp, (--vsync_cnt << DI_VSYNC_SEL_OFFSET) |
++ 0x00000002, DI_SYNC_AS_GEN);
++ reg = ipu_di_read(ipu, disp, DI_POL);
++ reg &= ~(DI_POL_DRDY_DATA_POLARITY | DI_POL_DRDY_POLARITY_15);
++ if (sig.enable_pol)
++ reg |= DI_POL_DRDY_POLARITY_15;
++ if (sig.data_pol)
++ reg |= DI_POL_DRDY_DATA_POLARITY;
++ ipu_di_write(ipu, disp, reg, DI_POL);
++
++ ipu_dc_write(ipu, width, DC_DISP_CONF2(DC_DISP_ID_SYNC(disp)));
++
++ mutex_unlock(&ipu->mutex_lock);
++
++ return 0;
++}
++EXPORT_SYMBOL(ipu_init_sync_panel);
++
++void ipu_uninit_sync_panel(struct ipu_soc *ipu, int disp)
++{
++ uint32_t reg;
++ uint32_t di_gen;
++
++ if ((disp != 0) || (disp != 1))
++ return;
++
++ mutex_lock(&ipu->mutex_lock);
++
++ di_gen = ipu_di_read(ipu, disp, DI_GENERAL);
++ di_gen |= 0x3ff | DI_GEN_POLARITY_DISP_CLK;
++ ipu_di_write(ipu, disp, di_gen, DI_GENERAL);
++
++ reg = ipu_di_read(ipu, disp, DI_POL);
++ reg |= 0x3ffffff;
++ ipu_di_write(ipu, disp, reg, DI_POL);
++
++ mutex_unlock(&ipu->mutex_lock);
++}
++EXPORT_SYMBOL(ipu_uninit_sync_panel);
++
++int ipu_init_async_panel(struct ipu_soc *ipu, int disp, int type, uint32_t cycle_time,
++ uint32_t pixel_fmt, ipu_adc_sig_cfg_t sig)
++{
++ int map;
++ u32 ser_conf = 0;
++ u32 div;
++ u32 di_clk = clk_get_rate(ipu->ipu_clk);
++
++ /* round up cycle_time, then calcalate the divider using scaled math */
++ cycle_time += (1000000000UL / di_clk) - 1;
++ div = (cycle_time * (di_clk / 256UL)) / (1000000000UL / 256UL);
++
++ map = _ipu_pixfmt_to_map(pixel_fmt);
++ if (map < 0)
++ return -EINVAL;
++
++ mutex_lock(&ipu->mutex_lock);
++
++ if (type == IPU_PANEL_SERIAL) {
++ ipu_di_write(ipu, disp, (div << 24) | ((sig.ifc_width - 1) << 4),
++ DI_DW_GEN(ASYNC_SER_WAVE));
++
++ _ipu_di_data_pin_config(ipu, disp, ASYNC_SER_WAVE, DI_PIN_CS,
++ 0, 0, (div * 2) + 1);
++ _ipu_di_data_pin_config(ipu, disp, ASYNC_SER_WAVE, DI_PIN_SER_CLK,
++ 1, div, div * 2);
++ _ipu_di_data_pin_config(ipu, disp, ASYNC_SER_WAVE, DI_PIN_SER_RS,
++ 2, 0, 0);
++
++ _ipu_dc_write_tmpl(ipu, 0x64, WROD(0), 0, map, ASYNC_SER_WAVE, 0, 0, 1);
++
++ /* Configure DC for serial panel */
++ ipu_dc_write(ipu, 0x14, DC_DISP_CONF1(DC_DISP_ID_SERIAL));
++
++ if (sig.clk_pol)
++ ser_conf |= DI_SER_CONF_SERIAL_CLK_POL;
++ if (sig.data_pol)
++ ser_conf |= DI_SER_CONF_SERIAL_DATA_POL;
++ if (sig.rs_pol)
++ ser_conf |= DI_SER_CONF_SERIAL_RS_POL;
++ if (sig.cs_pol)
++ ser_conf |= DI_SER_CONF_SERIAL_CS_POL;
++ ipu_di_write(ipu, disp, ser_conf, DI_SER_CONF);
++ }
++
++ mutex_unlock(&ipu->mutex_lock);
++ return 0;
++}
++EXPORT_SYMBOL(ipu_init_async_panel);
++
++/*!
++ * This function sets the foreground and background plane global alpha blending
++ * modes. This function also sets the DP graphic plane according to the
++ * parameter of IPUv3 DP channel.
++ *
++ * @param ipu ipu handler
++ * @param channel IPUv3 DP channel
++ *
++ * @param enable Boolean to enable or disable global alpha
++ * blending. If disabled, local blending is used.
++ *
++ * @param alpha Global alpha value.
++ *
++ * @return Returns 0 on success or negative error code on fail
++ */
++int32_t ipu_disp_set_global_alpha(struct ipu_soc *ipu, ipu_channel_t channel,
++ bool enable, uint8_t alpha)
++{
++ uint32_t reg;
++ uint32_t flow;
++ bool bg_chan;
++
++ if (channel == MEM_BG_SYNC || channel == MEM_FG_SYNC)
++ flow = DP_SYNC;
++ else if (channel == MEM_BG_ASYNC0 || channel == MEM_FG_ASYNC0)
++ flow = DP_ASYNC0;
++ else if (channel == MEM_BG_ASYNC1 || channel == MEM_FG_ASYNC1)
++ flow = DP_ASYNC1;
++ else
++ return -EINVAL;
++
++ if (channel == MEM_BG_SYNC || channel == MEM_BG_ASYNC0 ||
++ channel == MEM_BG_ASYNC1)
++ bg_chan = true;
++ else
++ bg_chan = false;
++
++ _ipu_get(ipu);
++
++ mutex_lock(&ipu->mutex_lock);
++
++ if (bg_chan) {
++ reg = ipu_dp_read(ipu, DP_COM_CONF(flow));
++ ipu_dp_write(ipu, reg & ~DP_COM_CONF_GWSEL, DP_COM_CONF(flow));
++ } else {
++ reg = ipu_dp_read(ipu, DP_COM_CONF(flow));
++ ipu_dp_write(ipu, reg | DP_COM_CONF_GWSEL, DP_COM_CONF(flow));
++ }
++
++ if (enable) {
++ reg = ipu_dp_read(ipu, DP_GRAPH_WIND_CTRL(flow)) & 0x00FFFFFFL;
++ ipu_dp_write(ipu, reg | ((uint32_t) alpha << 24),
++ DP_GRAPH_WIND_CTRL(flow));
++
++ reg = ipu_dp_read(ipu, DP_COM_CONF(flow));
++ ipu_dp_write(ipu, reg | DP_COM_CONF_GWAM, DP_COM_CONF(flow));
++ } else {
++ reg = ipu_dp_read(ipu, DP_COM_CONF(flow));
++ ipu_dp_write(ipu, reg & ~DP_COM_CONF_GWAM, DP_COM_CONF(flow));
++ }
++
++ reg = ipu_cm_read(ipu, IPU_SRM_PRI2) | 0x8;
++ ipu_cm_write(ipu, reg, IPU_SRM_PRI2);
++
++ mutex_unlock(&ipu->mutex_lock);
++
++ _ipu_put(ipu);
++
++ return 0;
++}
++EXPORT_SYMBOL(ipu_disp_set_global_alpha);
++
++/*!
++ * This function sets the transparent color key for SDC graphic plane.
++ *
++ * @param ipu ipu handler
++ * @param channel Input parameter for the logical channel ID.
++ *
++ * @param enable Boolean to enable or disable color key
++ *
++ * @param colorKey 24-bit RGB color for transparent color key.
++ *
++ * @return Returns 0 on success or negative error code on fail
++ */
++int32_t ipu_disp_set_color_key(struct ipu_soc *ipu, ipu_channel_t channel,
++ bool enable, uint32_t color_key)
++{
++ uint32_t reg, flow;
++ int y, u, v;
++ int red, green, blue;
++
++ if (channel == MEM_BG_SYNC || channel == MEM_FG_SYNC)
++ flow = DP_SYNC;
++ else if (channel == MEM_BG_ASYNC0 || channel == MEM_FG_ASYNC0)
++ flow = DP_ASYNC0;
++ else if (channel == MEM_BG_ASYNC1 || channel == MEM_FG_ASYNC1)
++ flow = DP_ASYNC1;
++ else
++ return -EINVAL;
++
++ _ipu_get(ipu);
++
++ mutex_lock(&ipu->mutex_lock);
++
++ ipu->color_key_4rgb = true;
++ /* Transform color key from rgb to yuv if CSC is enabled */
++ if (((ipu->fg_csc_type == RGB2YUV) && (ipu->bg_csc_type == YUV2YUV)) ||
++ ((ipu->fg_csc_type == YUV2YUV) && (ipu->bg_csc_type == RGB2YUV)) ||
++ ((ipu->fg_csc_type == YUV2YUV) && (ipu->bg_csc_type == YUV2YUV)) ||
++ ((ipu->fg_csc_type == YUV2RGB) && (ipu->bg_csc_type == YUV2RGB))) {
++
++ dev_dbg(ipu->dev, "color key 0x%x need change to yuv fmt\n", color_key);
++
++ red = (color_key >> 16) & 0xFF;
++ green = (color_key >> 8) & 0xFF;
++ blue = color_key & 0xFF;
++
++ y = _rgb_to_yuv(0, red, green, blue);
++ u = _rgb_to_yuv(1, red, green, blue);
++ v = _rgb_to_yuv(2, red, green, blue);
++ color_key = (y << 16) | (u << 8) | v;
++
++ ipu->color_key_4rgb = false;
++
++ dev_dbg(ipu->dev, "color key change to yuv fmt 0x%x\n", color_key);
++ }
++
++ if (enable) {
++ reg = ipu_dp_read(ipu, DP_GRAPH_WIND_CTRL(flow)) & 0xFF000000L;
++ ipu_dp_write(ipu, reg | color_key, DP_GRAPH_WIND_CTRL(flow));
++
++ reg = ipu_dp_read(ipu, DP_COM_CONF(flow));
++ ipu_dp_write(ipu, reg | DP_COM_CONF_GWCKE, DP_COM_CONF(flow));
++ } else {
++ reg = ipu_dp_read(ipu, DP_COM_CONF(flow));
++ ipu_dp_write(ipu, reg & ~DP_COM_CONF_GWCKE, DP_COM_CONF(flow));
++ }
++
++ reg = ipu_cm_read(ipu, IPU_SRM_PRI2) | 0x8;
++ ipu_cm_write(ipu, reg, IPU_SRM_PRI2);
++
++ mutex_unlock(&ipu->mutex_lock);
++
++ _ipu_put(ipu);
++
++ return 0;
++}
++EXPORT_SYMBOL(ipu_disp_set_color_key);
++
++/*!
++ * This function sets the gamma correction for DP output.
++ *
++ * @param ipu ipu handler
++ * @param channel Input parameter for the logical channel ID.
++ *
++ * @param enable Boolean to enable or disable gamma correction.
++ *
++ * @param constk Gamma piecewise linear approximation constk coeff.
++ *
++ * @param slopek Gamma piecewise linear approximation slopek coeff.
++ *
++ * @return Returns 0 on success or negative error code on fail
++ */
++int32_t ipu_disp_set_gamma_correction(struct ipu_soc *ipu, ipu_channel_t channel, bool enable, int constk[], int slopek[])
++{
++ uint32_t reg, flow, i;
++
++ if (channel == MEM_BG_SYNC || channel == MEM_FG_SYNC)
++ flow = DP_SYNC;
++ else if (channel == MEM_BG_ASYNC0 || channel == MEM_FG_ASYNC0)
++ flow = DP_ASYNC0;
++ else if (channel == MEM_BG_ASYNC1 || channel == MEM_FG_ASYNC1)
++ flow = DP_ASYNC1;
++ else
++ return -EINVAL;
++
++ _ipu_get(ipu);
++
++ mutex_lock(&ipu->mutex_lock);
++
++ for (i = 0; i < 8; i++)
++ ipu_dp_write(ipu, (constk[2*i] & 0x1ff) | ((constk[2*i+1] & 0x1ff) << 16), DP_GAMMA_C(flow, i));
++ for (i = 0; i < 4; i++)
++ ipu_dp_write(ipu, (slopek[4*i] & 0xff) | ((slopek[4*i+1] & 0xff) << 8) |
++ ((slopek[4*i+2] & 0xff) << 16) | ((slopek[4*i+3] & 0xff) << 24), DP_GAMMA_S(flow, i));
++
++ reg = ipu_dp_read(ipu, DP_COM_CONF(flow));
++ if (enable) {
++ if ((ipu->bg_csc_type == RGB2YUV) || (ipu->bg_csc_type == YUV2YUV))
++ reg |= DP_COM_CONF_GAMMA_YUV_EN;
++ else
++ reg &= ~DP_COM_CONF_GAMMA_YUV_EN;
++ ipu_dp_write(ipu, reg | DP_COM_CONF_GAMMA_EN, DP_COM_CONF(flow));
++ } else
++ ipu_dp_write(ipu, reg & ~DP_COM_CONF_GAMMA_EN, DP_COM_CONF(flow));
++
++ reg = ipu_cm_read(ipu, IPU_SRM_PRI2) | 0x8;
++ ipu_cm_write(ipu, reg, IPU_SRM_PRI2);
++
++ mutex_unlock(&ipu->mutex_lock);
++
++ _ipu_put(ipu);
++
++ return 0;
++}
++EXPORT_SYMBOL(ipu_disp_set_gamma_correction);
++
++/*!
++ * This function sets the window position of the foreground or background plane.
++ * modes.
++ *
++ * @param ipu ipu handler
++ * @param channel Input parameter for the logical channel ID.
++ *
++ * @param x_pos The X coordinate position to place window at.
++ * The position is relative to the top left corner.
++ *
++ * @param y_pos The Y coordinate position to place window at.
++ * The position is relative to the top left corner.
++ *
++ * @return Returns 0 on success or negative error code on fail
++ */
++int32_t _ipu_disp_set_window_pos(struct ipu_soc *ipu, ipu_channel_t channel,
++ int16_t x_pos, int16_t y_pos)
++{
++ u32 reg;
++ uint32_t flow = 0;
++ uint32_t dp_srm_shift;
++
++ if ((channel == MEM_FG_SYNC) || (channel == MEM_BG_SYNC)) {
++ flow = DP_SYNC;
++ dp_srm_shift = 3;
++ } else if (channel == MEM_FG_ASYNC0) {
++ flow = DP_ASYNC0;
++ dp_srm_shift = 5;
++ } else if (channel == MEM_FG_ASYNC1) {
++ flow = DP_ASYNC1;
++ dp_srm_shift = 7;
++ } else
++ return -EINVAL;
++
++ ipu_dp_write(ipu, (x_pos << 16) | y_pos, DP_FG_POS(flow));
++
++ if (ipu_is_channel_busy(ipu, channel)) {
++ /* controled by FSU if channel enabled */
++ reg = ipu_cm_read(ipu, IPU_SRM_PRI2) & (~(0x3 << dp_srm_shift));
++ reg |= (0x1 << dp_srm_shift);
++ ipu_cm_write(ipu, reg, IPU_SRM_PRI2);
++ } else {
++ /* disable auto swap, controled by MCU if channel disabled */
++ reg = ipu_cm_read(ipu, IPU_SRM_PRI2) & (~(0x3 << dp_srm_shift));
++ ipu_cm_write(ipu, reg, IPU_SRM_PRI2);
++ }
++
++ return 0;
++}
++
++int32_t ipu_disp_set_window_pos(struct ipu_soc *ipu, ipu_channel_t channel,
++ int16_t x_pos, int16_t y_pos)
++{
++ int ret;
++
++ _ipu_get(ipu);
++ mutex_lock(&ipu->mutex_lock);
++ ret = _ipu_disp_set_window_pos(ipu, channel, x_pos, y_pos);
++ mutex_unlock(&ipu->mutex_lock);
++ _ipu_put(ipu);
++ return ret;
++}
++EXPORT_SYMBOL(ipu_disp_set_window_pos);
++
++int32_t _ipu_disp_get_window_pos(struct ipu_soc *ipu, ipu_channel_t channel,
++ int16_t *x_pos, int16_t *y_pos)
++{
++ u32 reg;
++ uint32_t flow = 0;
++
++ if (channel == MEM_FG_SYNC)
++ flow = DP_SYNC;
++ else if (channel == MEM_FG_ASYNC0)
++ flow = DP_ASYNC0;
++ else if (channel == MEM_FG_ASYNC1)
++ flow = DP_ASYNC1;
++ else
++ return -EINVAL;
++
++ reg = ipu_dp_read(ipu, DP_FG_POS(flow));
++
++ *x_pos = (reg >> 16) & 0x7FF;
++ *y_pos = reg & 0x7FF;
++
++ return 0;
++}
++int32_t ipu_disp_get_window_pos(struct ipu_soc *ipu, ipu_channel_t channel,
++ int16_t *x_pos, int16_t *y_pos)
++{
++ int ret;
++
++ _ipu_get(ipu);
++ mutex_lock(&ipu->mutex_lock);
++ ret = _ipu_disp_get_window_pos(ipu, channel, x_pos, y_pos);
++ mutex_unlock(&ipu->mutex_lock);
++ _ipu_put(ipu);
++ return ret;
++}
++EXPORT_SYMBOL(ipu_disp_get_window_pos);
++
++void ipu_disp_direct_write(struct ipu_soc *ipu, ipu_channel_t channel, u32 value, u32 offset)
++{
++ if (channel == DIRECT_ASYNC0)
++ writel(value, ipu->disp_base[0] + offset);
++ else if (channel == DIRECT_ASYNC1)
++ writel(value, ipu->disp_base[1] + offset);
++}
++EXPORT_SYMBOL(ipu_disp_direct_write);
++
++void ipu_reset_disp_panel(struct ipu_soc *ipu)
++{
++ uint32_t tmp;
++
++ tmp = ipu_di_read(ipu, 1, DI_GENERAL);
++ ipu_di_write(ipu, 1, tmp | 0x08, DI_GENERAL);
++ msleep(10); /* tRES >= 100us */
++ tmp = ipu_di_read(ipu, 1, DI_GENERAL);
++ ipu_di_write(ipu, 1, tmp & ~0x08, DI_GENERAL);
++ msleep(60);
++
++ return;
++}
++EXPORT_SYMBOL(ipu_reset_disp_panel);
++
++void ipu_disp_init(struct ipu_soc *ipu)
++{
++ ipu->fg_csc_type = ipu->bg_csc_type = CSC_NONE;
++ ipu->color_key_4rgb = true;
++ _ipu_init_dc_mappings(ipu);
++ _ipu_dmfc_init(ipu, DMFC_NORMAL, 1);
++}
+diff -Nur linux-3.14.36/drivers/mxc/ipu3/ipu_ic.c linux-openelec/drivers/mxc/ipu3/ipu_ic.c
+--- linux-3.14.36/drivers/mxc/ipu3/ipu_ic.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/ipu3/ipu_ic.c 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,924 @@
++/*
++ * Copyright 2005-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/*
++ * @file ipu_ic.c
++ *
++ * @brief IPU IC functions
++ *
++ * @ingroup IPU
++ */
++#include <linux/errno.h>
++#include <linux/init.h>
++#include <linux/io.h>
++#include <linux/ipu-v3.h>
++#include <linux/spinlock.h>
++#include <linux/types.h>
++#include <linux/videodev2.h>
++
++#include "ipu_param_mem.h"
++#include "ipu_regs.h"
++
++enum {
++ IC_TASK_VIEWFINDER,
++ IC_TASK_ENCODER,
++ IC_TASK_POST_PROCESSOR
++};
++
++static void _init_csc(struct ipu_soc *ipu, uint8_t ic_task, ipu_color_space_t in_format,
++ ipu_color_space_t out_format, int csc_index);
++
++static int _calc_resize_coeffs(struct ipu_soc *ipu,
++ uint32_t inSize, uint32_t outSize,
++ uint32_t *resizeCoeff,
++ uint32_t *downsizeCoeff);
++
++void _ipu_vdi_set_top_field_man(struct ipu_soc *ipu, bool top_field_0)
++{
++ uint32_t reg;
++
++ reg = ipu_vdi_read(ipu, VDI_C);
++ if (top_field_0)
++ reg &= ~VDI_C_TOP_FIELD_MAN_1;
++ else
++ reg |= VDI_C_TOP_FIELD_MAN_1;
++ ipu_vdi_write(ipu, reg, VDI_C);
++}
++
++void _ipu_vdi_set_motion(struct ipu_soc *ipu, ipu_motion_sel motion_sel)
++{
++ uint32_t reg;
++
++ reg = ipu_vdi_read(ipu, VDI_C);
++ reg &= ~(VDI_C_MOT_SEL_FULL | VDI_C_MOT_SEL_MED | VDI_C_MOT_SEL_LOW);
++ if (motion_sel == HIGH_MOTION)
++ reg |= VDI_C_MOT_SEL_FULL;
++ else if (motion_sel == MED_MOTION)
++ reg |= VDI_C_MOT_SEL_MED;
++ else
++ reg |= VDI_C_MOT_SEL_LOW;
++
++ ipu_vdi_write(ipu, reg, VDI_C);
++ dev_dbg(ipu->dev, "VDI_C = \t0x%08X\n", reg);
++}
++
++void ic_dump_register(struct ipu_soc *ipu)
++{
++ printk(KERN_DEBUG "IC_CONF = \t0x%08X\n", ipu_ic_read(ipu, IC_CONF));
++ printk(KERN_DEBUG "IC_PRP_ENC_RSC = \t0x%08X\n",
++ ipu_ic_read(ipu, IC_PRP_ENC_RSC));
++ printk(KERN_DEBUG "IC_PRP_VF_RSC = \t0x%08X\n",
++ ipu_ic_read(ipu, IC_PRP_VF_RSC));
++ printk(KERN_DEBUG "IC_PP_RSC = \t0x%08X\n", ipu_ic_read(ipu, IC_PP_RSC));
++ printk(KERN_DEBUG "IC_IDMAC_1 = \t0x%08X\n", ipu_ic_read(ipu, IC_IDMAC_1));
++ printk(KERN_DEBUG "IC_IDMAC_2 = \t0x%08X\n", ipu_ic_read(ipu, IC_IDMAC_2));
++ printk(KERN_DEBUG "IC_IDMAC_3 = \t0x%08X\n", ipu_ic_read(ipu, IC_IDMAC_3));
++}
++
++void _ipu_ic_enable_task(struct ipu_soc *ipu, ipu_channel_t channel)
++{
++ uint32_t ic_conf;
++
++ ic_conf = ipu_ic_read(ipu, IC_CONF);
++ switch (channel) {
++ case CSI_PRP_VF_MEM:
++ case MEM_PRP_VF_MEM:
++ ic_conf |= IC_CONF_PRPVF_EN;
++ break;
++ case MEM_VDI_PRP_VF_MEM:
++ ic_conf |= IC_CONF_PRPVF_EN;
++ break;
++ case MEM_VDI_MEM:
++ ic_conf |= IC_CONF_PRPVF_EN | IC_CONF_RWS_EN ;
++ break;
++ case MEM_ROT_VF_MEM:
++ ic_conf |= IC_CONF_PRPVF_ROT_EN;
++ break;
++ case CSI_PRP_ENC_MEM:
++ case MEM_PRP_ENC_MEM:
++ ic_conf |= IC_CONF_PRPENC_EN;
++ break;
++ case MEM_ROT_ENC_MEM:
++ ic_conf |= IC_CONF_PRPENC_ROT_EN;
++ break;
++ case MEM_PP_MEM:
++ ic_conf |= IC_CONF_PP_EN;
++ break;
++ case MEM_ROT_PP_MEM:
++ ic_conf |= IC_CONF_PP_ROT_EN;
++ break;
++ default:
++ break;
++ }
++ ipu_ic_write(ipu, ic_conf, IC_CONF);
++}
++
++void _ipu_ic_disable_task(struct ipu_soc *ipu, ipu_channel_t channel)
++{
++ uint32_t ic_conf;
++
++ ic_conf = ipu_ic_read(ipu, IC_CONF);
++ switch (channel) {
++ case CSI_PRP_VF_MEM:
++ case MEM_PRP_VF_MEM:
++ ic_conf &= ~IC_CONF_PRPVF_EN;
++ break;
++ case MEM_VDI_PRP_VF_MEM:
++ ic_conf &= ~IC_CONF_PRPVF_EN;
++ break;
++ case MEM_VDI_MEM:
++ ic_conf &= ~(IC_CONF_PRPVF_EN | IC_CONF_RWS_EN);
++ break;
++ case MEM_ROT_VF_MEM:
++ ic_conf &= ~IC_CONF_PRPVF_ROT_EN;
++ break;
++ case CSI_PRP_ENC_MEM:
++ case MEM_PRP_ENC_MEM:
++ ic_conf &= ~IC_CONF_PRPENC_EN;
++ break;
++ case MEM_ROT_ENC_MEM:
++ ic_conf &= ~IC_CONF_PRPENC_ROT_EN;
++ break;
++ case MEM_PP_MEM:
++ ic_conf &= ~IC_CONF_PP_EN;
++ break;
++ case MEM_ROT_PP_MEM:
++ ic_conf &= ~IC_CONF_PP_ROT_EN;
++ break;
++ default:
++ break;
++ }
++ ipu_ic_write(ipu, ic_conf, IC_CONF);
++}
++
++void _ipu_vdi_init(struct ipu_soc *ipu, ipu_channel_t channel, ipu_channel_params_t *params)
++{
++ uint32_t reg;
++ uint32_t pixel_fmt;
++ uint32_t pix_per_burst;
++
++ reg = ((params->mem_prp_vf_mem.in_height-1) << 16) |
++ (params->mem_prp_vf_mem.in_width-1);
++ ipu_vdi_write(ipu, reg, VDI_FSIZE);
++
++ /* Full motion, only vertical filter is used
++ Burst size is 4 accesses */
++ if (params->mem_prp_vf_mem.in_pixel_fmt ==
++ IPU_PIX_FMT_UYVY ||
++ params->mem_prp_vf_mem.in_pixel_fmt ==
++ IPU_PIX_FMT_YUYV) {
++ pixel_fmt = VDI_C_CH_422;
++ pix_per_burst = 32;
++ } else {
++ pixel_fmt = VDI_C_CH_420;
++ pix_per_burst = 64;
++ }
++
++ reg = ipu_vdi_read(ipu, VDI_C);
++ reg |= pixel_fmt;
++ switch (channel) {
++ case MEM_VDI_PRP_VF_MEM:
++ reg |= VDI_C_BURST_SIZE2_4;
++ break;
++ case MEM_VDI_PRP_VF_MEM_P:
++ reg |= VDI_C_BURST_SIZE1_4 | VDI_C_VWM1_SET_1 | VDI_C_VWM1_CLR_2;
++ break;
++ case MEM_VDI_PRP_VF_MEM_N:
++ reg |= VDI_C_BURST_SIZE3_4 | VDI_C_VWM3_SET_1 | VDI_C_VWM3_CLR_2;
++ break;
++
++ case MEM_VDI_MEM:
++ reg |= (((pix_per_burst >> 2) - 1) & VDI_C_BURST_SIZE_MASK)
++ << VDI_C_BURST_SIZE2_OFFSET;
++ break;
++ case MEM_VDI_MEM_P:
++ reg |= (((pix_per_burst >> 2) - 1) & VDI_C_BURST_SIZE_MASK)
++ << VDI_C_BURST_SIZE1_OFFSET;
++ reg |= VDI_C_VWM1_SET_2 | VDI_C_VWM1_CLR_2;
++ break;
++ case MEM_VDI_MEM_N:
++ reg |= (((pix_per_burst >> 2) - 1) & VDI_C_BURST_SIZE_MASK)
++ << VDI_C_BURST_SIZE3_OFFSET;
++ reg |= VDI_C_VWM3_SET_2 | VDI_C_VWM3_CLR_2;
++ break;
++ default:
++ break;
++ }
++ ipu_vdi_write(ipu, reg, VDI_C);
++
++ if (params->mem_prp_vf_mem.field_fmt == IPU_DEINTERLACE_FIELD_TOP)
++ _ipu_vdi_set_top_field_man(ipu, true);
++ else if (params->mem_prp_vf_mem.field_fmt == IPU_DEINTERLACE_FIELD_BOTTOM)
++ _ipu_vdi_set_top_field_man(ipu, false);
++
++ _ipu_vdi_set_motion(ipu, params->mem_prp_vf_mem.motion_sel);
++
++ reg = ipu_ic_read(ipu, IC_CONF);
++ reg &= ~IC_CONF_RWS_EN;
++ ipu_ic_write(ipu, reg, IC_CONF);
++}
++
++void _ipu_vdi_uninit(struct ipu_soc *ipu)
++{
++ ipu_vdi_write(ipu, 0, VDI_FSIZE);
++ ipu_vdi_write(ipu, 0, VDI_C);
++}
++
++int _ipu_ic_init_prpvf(struct ipu_soc *ipu, ipu_channel_params_t *params,
++ bool src_is_csi)
++{
++ uint32_t reg, ic_conf;
++ uint32_t downsizeCoeff, resizeCoeff;
++ ipu_color_space_t in_fmt, out_fmt;
++ int ret = 0;
++
++ /* Setup vertical resizing */
++ if (!params->mem_prp_vf_mem.outv_resize_ratio) {
++ ret = _calc_resize_coeffs(ipu, params->mem_prp_vf_mem.in_height,
++ params->mem_prp_vf_mem.out_height,
++ &resizeCoeff, &downsizeCoeff);
++ if (ret < 0) {
++ dev_err(ipu->dev, "failed to calculate prpvf height "
++ "scaling coefficients\n");
++ return ret;
++ }
++
++ reg = (downsizeCoeff << 30) | (resizeCoeff << 16);
++ } else
++ reg = (params->mem_prp_vf_mem.outv_resize_ratio) << 16;
++
++ /* Setup horizontal resizing */
++ if (!params->mem_prp_vf_mem.outh_resize_ratio) {
++ ret = _calc_resize_coeffs(ipu, params->mem_prp_vf_mem.in_width,
++ params->mem_prp_vf_mem.out_width,
++ &resizeCoeff, &downsizeCoeff);
++ if (ret < 0) {
++ dev_err(ipu->dev, "failed to calculate prpvf width "
++ "scaling coefficients\n");
++ return ret;
++ }
++
++ reg |= (downsizeCoeff << 14) | resizeCoeff;
++ } else
++ reg |= params->mem_prp_vf_mem.outh_resize_ratio;
++
++ ipu_ic_write(ipu, reg, IC_PRP_VF_RSC);
++
++ ic_conf = ipu_ic_read(ipu, IC_CONF);
++
++ /* Setup color space conversion */
++ in_fmt = format_to_colorspace(params->mem_prp_vf_mem.in_pixel_fmt);
++ out_fmt = format_to_colorspace(params->mem_prp_vf_mem.out_pixel_fmt);
++ if (in_fmt == RGB) {
++ if ((out_fmt == YCbCr) || (out_fmt == YUV)) {
++ /* Enable RGB->YCBCR CSC1 */
++ _init_csc(ipu, IC_TASK_VIEWFINDER, RGB, out_fmt, 1);
++ ic_conf |= IC_CONF_PRPVF_CSC1;
++ }
++ }
++ if ((in_fmt == YCbCr) || (in_fmt == YUV)) {
++ if (out_fmt == RGB) {
++ /* Enable YCBCR->RGB CSC1 */
++ _init_csc(ipu, IC_TASK_VIEWFINDER, YCbCr, RGB, 1);
++ ic_conf |= IC_CONF_PRPVF_CSC1;
++ } else {
++ /* TODO: Support YUV<->YCbCr conversion? */
++ }
++ }
++
++ if (params->mem_prp_vf_mem.graphics_combine_en) {
++ ic_conf |= IC_CONF_PRPVF_CMB;
++
++ if (!(ic_conf & IC_CONF_PRPVF_CSC1)) {
++ /* need transparent CSC1 conversion */
++ _init_csc(ipu, IC_TASK_VIEWFINDER, RGB, RGB, 1);
++ ic_conf |= IC_CONF_PRPVF_CSC1; /* Enable RGB->RGB CSC */
++ }
++ in_fmt = format_to_colorspace(params->mem_prp_vf_mem.in_g_pixel_fmt);
++ out_fmt = format_to_colorspace(params->mem_prp_vf_mem.out_pixel_fmt);
++ if (in_fmt == RGB) {
++ if ((out_fmt == YCbCr) || (out_fmt == YUV)) {
++ /* Enable RGB->YCBCR CSC2 */
++ _init_csc(ipu, IC_TASK_VIEWFINDER, RGB, out_fmt, 2);
++ ic_conf |= IC_CONF_PRPVF_CSC2;
++ }
++ }
++ if ((in_fmt == YCbCr) || (in_fmt == YUV)) {
++ if (out_fmt == RGB) {
++ /* Enable YCBCR->RGB CSC2 */
++ _init_csc(ipu, IC_TASK_VIEWFINDER, YCbCr, RGB, 2);
++ ic_conf |= IC_CONF_PRPVF_CSC2;
++ } else {
++ /* TODO: Support YUV<->YCbCr conversion? */
++ }
++ }
++
++ if (params->mem_prp_vf_mem.global_alpha_en) {
++ ic_conf |= IC_CONF_IC_GLB_LOC_A;
++ reg = ipu_ic_read(ipu, IC_CMBP_1);
++ reg &= ~(0xff);
++ reg |= params->mem_prp_vf_mem.alpha;
++ ipu_ic_write(ipu, reg, IC_CMBP_1);
++ } else
++ ic_conf &= ~IC_CONF_IC_GLB_LOC_A;
++
++ if (params->mem_prp_vf_mem.key_color_en) {
++ ic_conf |= IC_CONF_KEY_COLOR_EN;
++ ipu_ic_write(ipu, params->mem_prp_vf_mem.key_color,
++ IC_CMBP_2);
++ } else
++ ic_conf &= ~IC_CONF_KEY_COLOR_EN;
++ } else {
++ ic_conf &= ~IC_CONF_PRPVF_CMB;
++ }
++
++ if (src_is_csi)
++ ic_conf &= ~IC_CONF_RWS_EN;
++ else
++ ic_conf |= IC_CONF_RWS_EN;
++
++ ipu_ic_write(ipu, ic_conf, IC_CONF);
++
++ return ret;
++}
++
++void _ipu_ic_uninit_prpvf(struct ipu_soc *ipu)
++{
++ uint32_t reg;
++
++ reg = ipu_ic_read(ipu, IC_CONF);
++ reg &= ~(IC_CONF_PRPVF_EN | IC_CONF_PRPVF_CMB |
++ IC_CONF_PRPVF_CSC2 | IC_CONF_PRPVF_CSC1);
++ ipu_ic_write(ipu, reg, IC_CONF);
++}
++
++void _ipu_ic_init_rotate_vf(struct ipu_soc *ipu, ipu_channel_params_t *params)
++{
++}
++
++void _ipu_ic_uninit_rotate_vf(struct ipu_soc *ipu)
++{
++ uint32_t reg;
++ reg = ipu_ic_read(ipu, IC_CONF);
++ reg &= ~IC_CONF_PRPVF_ROT_EN;
++ ipu_ic_write(ipu, reg, IC_CONF);
++}
++
++int _ipu_ic_init_prpenc(struct ipu_soc *ipu, ipu_channel_params_t *params,
++ bool src_is_csi)
++{
++ uint32_t reg, ic_conf;
++ uint32_t downsizeCoeff, resizeCoeff;
++ ipu_color_space_t in_fmt, out_fmt;
++ int ret = 0;
++
++ /* Setup vertical resizing */
++ if (!params->mem_prp_enc_mem.outv_resize_ratio) {
++ ret = _calc_resize_coeffs(ipu,
++ params->mem_prp_enc_mem.in_height,
++ params->mem_prp_enc_mem.out_height,
++ &resizeCoeff, &downsizeCoeff);
++ if (ret < 0) {
++ dev_err(ipu->dev, "failed to calculate prpenc height "
++ "scaling coefficients\n");
++ return ret;
++ }
++
++ reg = (downsizeCoeff << 30) | (resizeCoeff << 16);
++ } else
++ reg = (params->mem_prp_enc_mem.outv_resize_ratio) << 16;
++
++ /* Setup horizontal resizing */
++ if (!params->mem_prp_enc_mem.outh_resize_ratio) {
++ ret = _calc_resize_coeffs(ipu, params->mem_prp_enc_mem.in_width,
++ params->mem_prp_enc_mem.out_width,
++ &resizeCoeff, &downsizeCoeff);
++ if (ret < 0) {
++ dev_err(ipu->dev, "failed to calculate prpenc width "
++ "scaling coefficients\n");
++ return ret;
++ }
++
++ reg |= (downsizeCoeff << 14) | resizeCoeff;
++ } else
++ reg |= params->mem_prp_enc_mem.outh_resize_ratio;
++
++ ipu_ic_write(ipu, reg, IC_PRP_ENC_RSC);
++
++ ic_conf = ipu_ic_read(ipu, IC_CONF);
++
++ /* Setup color space conversion */
++ in_fmt = format_to_colorspace(params->mem_prp_enc_mem.in_pixel_fmt);
++ out_fmt = format_to_colorspace(params->mem_prp_enc_mem.out_pixel_fmt);
++ if (in_fmt == RGB) {
++ if ((out_fmt == YCbCr) || (out_fmt == YUV)) {
++ /* Enable RGB->YCBCR CSC1 */
++ _init_csc(ipu, IC_TASK_ENCODER, RGB, out_fmt, 1);
++ ic_conf |= IC_CONF_PRPENC_CSC1;
++ }
++ }
++ if ((in_fmt == YCbCr) || (in_fmt == YUV)) {
++ if (out_fmt == RGB) {
++ /* Enable YCBCR->RGB CSC1 */
++ _init_csc(ipu, IC_TASK_ENCODER, YCbCr, RGB, 1);
++ ic_conf |= IC_CONF_PRPENC_CSC1;
++ } else {
++ /* TODO: Support YUV<->YCbCr conversion? */
++ }
++ }
++
++ if (src_is_csi)
++ ic_conf &= ~IC_CONF_RWS_EN;
++ else
++ ic_conf |= IC_CONF_RWS_EN;
++
++ ipu_ic_write(ipu, ic_conf, IC_CONF);
++
++ return ret;
++}
++
++void _ipu_ic_uninit_prpenc(struct ipu_soc *ipu)
++{
++ uint32_t reg;
++
++ reg = ipu_ic_read(ipu, IC_CONF);
++ reg &= ~(IC_CONF_PRPENC_EN | IC_CONF_PRPENC_CSC1);
++ ipu_ic_write(ipu, reg, IC_CONF);
++}
++
++void _ipu_ic_init_rotate_enc(struct ipu_soc *ipu, ipu_channel_params_t *params)
++{
++}
++
++void _ipu_ic_uninit_rotate_enc(struct ipu_soc *ipu)
++{
++ uint32_t reg;
++
++ reg = ipu_ic_read(ipu, IC_CONF);
++ reg &= ~(IC_CONF_PRPENC_ROT_EN);
++ ipu_ic_write(ipu, reg, IC_CONF);
++}
++
++int _ipu_ic_init_pp(struct ipu_soc *ipu, ipu_channel_params_t *params)
++{
++ uint32_t reg, ic_conf;
++ uint32_t downsizeCoeff, resizeCoeff;
++ ipu_color_space_t in_fmt, out_fmt;
++ int ret = 0;
++
++ /* Setup vertical resizing */
++ if (!params->mem_pp_mem.outv_resize_ratio) {
++ ret = _calc_resize_coeffs(ipu, params->mem_pp_mem.in_height,
++ params->mem_pp_mem.out_height,
++ &resizeCoeff, &downsizeCoeff);
++ if (ret < 0) {
++ dev_err(ipu->dev, "failed to calculate pp height "
++ "scaling coefficients\n");
++ return ret;
++ }
++
++ reg = (downsizeCoeff << 30) | (resizeCoeff << 16);
++ } else {
++ reg = (params->mem_pp_mem.outv_resize_ratio) << 16;
++ }
++
++ /* Setup horizontal resizing */
++ if (!params->mem_pp_mem.outh_resize_ratio) {
++ ret = _calc_resize_coeffs(ipu, params->mem_pp_mem.in_width,
++ params->mem_pp_mem.out_width,
++ &resizeCoeff, &downsizeCoeff);
++ if (ret < 0) {
++ dev_err(ipu->dev, "failed to calculate pp width "
++ "scaling coefficients\n");
++ return ret;
++ }
++
++ reg |= (downsizeCoeff << 14) | resizeCoeff;
++ } else {
++ reg |= params->mem_pp_mem.outh_resize_ratio;
++ }
++
++ ipu_ic_write(ipu, reg, IC_PP_RSC);
++
++ ic_conf = ipu_ic_read(ipu, IC_CONF);
++
++ /* Setup color space conversion */
++ in_fmt = format_to_colorspace(params->mem_pp_mem.in_pixel_fmt);
++ out_fmt = format_to_colorspace(params->mem_pp_mem.out_pixel_fmt);
++ if (in_fmt == RGB) {
++ if ((out_fmt == YCbCr) || (out_fmt == YUV)) {
++ /* Enable RGB->YCBCR CSC1 */
++ _init_csc(ipu, IC_TASK_POST_PROCESSOR, RGB, out_fmt, 1);
++ ic_conf |= IC_CONF_PP_CSC1;
++ }
++ }
++ if ((in_fmt == YCbCr) || (in_fmt == YUV)) {
++ if (out_fmt == RGB) {
++ /* Enable YCBCR->RGB CSC1 */
++ _init_csc(ipu, IC_TASK_POST_PROCESSOR, YCbCr, RGB, 1);
++ ic_conf |= IC_CONF_PP_CSC1;
++ } else {
++ /* TODO: Support YUV<->YCbCr conversion? */
++ }
++ }
++
++ if (params->mem_pp_mem.graphics_combine_en) {
++ ic_conf |= IC_CONF_PP_CMB;
++
++ if (!(ic_conf & IC_CONF_PP_CSC1)) {
++ /* need transparent CSC1 conversion */
++ _init_csc(ipu, IC_TASK_POST_PROCESSOR, RGB, RGB, 1);
++ ic_conf |= IC_CONF_PP_CSC1; /* Enable RGB->RGB CSC */
++ }
++
++ in_fmt = format_to_colorspace(params->mem_pp_mem.in_g_pixel_fmt);
++ out_fmt = format_to_colorspace(params->mem_pp_mem.out_pixel_fmt);
++ if (in_fmt == RGB) {
++ if ((out_fmt == YCbCr) || (out_fmt == YUV)) {
++ /* Enable RGB->YCBCR CSC2 */
++ _init_csc(ipu, IC_TASK_POST_PROCESSOR, RGB, out_fmt, 2);
++ ic_conf |= IC_CONF_PP_CSC2;
++ }
++ }
++ if ((in_fmt == YCbCr) || (in_fmt == YUV)) {
++ if (out_fmt == RGB) {
++ /* Enable YCBCR->RGB CSC2 */
++ _init_csc(ipu, IC_TASK_POST_PROCESSOR, YCbCr, RGB, 2);
++ ic_conf |= IC_CONF_PP_CSC2;
++ } else {
++ /* TODO: Support YUV<->YCbCr conversion? */
++ }
++ }
++
++ if (params->mem_pp_mem.global_alpha_en) {
++ ic_conf |= IC_CONF_IC_GLB_LOC_A;
++ reg = ipu_ic_read(ipu, IC_CMBP_1);
++ reg &= ~(0xff00);
++ reg |= (params->mem_pp_mem.alpha << 8);
++ ipu_ic_write(ipu, reg, IC_CMBP_1);
++ } else
++ ic_conf &= ~IC_CONF_IC_GLB_LOC_A;
++
++ if (params->mem_pp_mem.key_color_en) {
++ ic_conf |= IC_CONF_KEY_COLOR_EN;
++ ipu_ic_write(ipu, params->mem_pp_mem.key_color,
++ IC_CMBP_2);
++ } else
++ ic_conf &= ~IC_CONF_KEY_COLOR_EN;
++ } else {
++ ic_conf &= ~IC_CONF_PP_CMB;
++ }
++
++ ipu_ic_write(ipu, ic_conf, IC_CONF);
++
++ return ret;
++}
++
++void _ipu_ic_uninit_pp(struct ipu_soc *ipu)
++{
++ uint32_t reg;
++
++ reg = ipu_ic_read(ipu, IC_CONF);
++ reg &= ~(IC_CONF_PP_EN | IC_CONF_PP_CSC1 | IC_CONF_PP_CSC2 |
++ IC_CONF_PP_CMB);
++ ipu_ic_write(ipu, reg, IC_CONF);
++}
++
++void _ipu_ic_init_rotate_pp(struct ipu_soc *ipu, ipu_channel_params_t *params)
++{
++}
++
++void _ipu_ic_uninit_rotate_pp(struct ipu_soc *ipu)
++{
++ uint32_t reg;
++ reg = ipu_ic_read(ipu, IC_CONF);
++ reg &= ~IC_CONF_PP_ROT_EN;
++ ipu_ic_write(ipu, reg, IC_CONF);
++}
++
++int _ipu_ic_idma_init(struct ipu_soc *ipu, int dma_chan,
++ uint16_t width, uint16_t height,
++ int burst_size, ipu_rotate_mode_t rot)
++{
++ u32 ic_idmac_1, ic_idmac_2, ic_idmac_3;
++ u32 temp_rot = bitrev8(rot) >> 5;
++ bool need_hor_flip = false;
++
++ if ((burst_size != 8) && (burst_size != 16)) {
++ dev_dbg(ipu->dev, "Illegal burst length for IC\n");
++ return -EINVAL;
++ }
++
++ width--;
++ height--;
++
++ if (temp_rot & 0x2) /* Need horizontal flip */
++ need_hor_flip = true;
++
++ ic_idmac_1 = ipu_ic_read(ipu, IC_IDMAC_1);
++ ic_idmac_2 = ipu_ic_read(ipu, IC_IDMAC_2);
++ ic_idmac_3 = ipu_ic_read(ipu, IC_IDMAC_3);
++ if (dma_chan == 22) { /* PP output - CB2 */
++ if (burst_size == 16)
++ ic_idmac_1 |= IC_IDMAC_1_CB2_BURST_16;
++ else
++ ic_idmac_1 &= ~IC_IDMAC_1_CB2_BURST_16;
++
++ if (need_hor_flip)
++ ic_idmac_1 |= IC_IDMAC_1_PP_FLIP_RS;
++ else
++ ic_idmac_1 &= ~IC_IDMAC_1_PP_FLIP_RS;
++
++ ic_idmac_2 &= ~IC_IDMAC_2_PP_HEIGHT_MASK;
++ ic_idmac_2 |= height << IC_IDMAC_2_PP_HEIGHT_OFFSET;
++
++ ic_idmac_3 &= ~IC_IDMAC_3_PP_WIDTH_MASK;
++ ic_idmac_3 |= width << IC_IDMAC_3_PP_WIDTH_OFFSET;
++ } else if (dma_chan == 11) { /* PP Input - CB5 */
++ if (burst_size == 16)
++ ic_idmac_1 |= IC_IDMAC_1_CB5_BURST_16;
++ else
++ ic_idmac_1 &= ~IC_IDMAC_1_CB5_BURST_16;
++ } else if (dma_chan == 47) { /* PP Rot input */
++ ic_idmac_1 &= ~IC_IDMAC_1_PP_ROT_MASK;
++ ic_idmac_1 |= temp_rot << IC_IDMAC_1_PP_ROT_OFFSET;
++ }
++
++ if (dma_chan == 12) { /* PRP Input - CB6 */
++ if (burst_size == 16)
++ ic_idmac_1 |= IC_IDMAC_1_CB6_BURST_16;
++ else
++ ic_idmac_1 &= ~IC_IDMAC_1_CB6_BURST_16;
++ }
++
++ if (dma_chan == 20) { /* PRP ENC output - CB0 */
++ if (burst_size == 16)
++ ic_idmac_1 |= IC_IDMAC_1_CB0_BURST_16;
++ else
++ ic_idmac_1 &= ~IC_IDMAC_1_CB0_BURST_16;
++
++ if (need_hor_flip)
++ ic_idmac_1 |= IC_IDMAC_1_PRPENC_FLIP_RS;
++ else
++ ic_idmac_1 &= ~IC_IDMAC_1_PRPENC_FLIP_RS;
++
++ ic_idmac_2 &= ~IC_IDMAC_2_PRPENC_HEIGHT_MASK;
++ ic_idmac_2 |= height << IC_IDMAC_2_PRPENC_HEIGHT_OFFSET;
++
++ ic_idmac_3 &= ~IC_IDMAC_3_PRPENC_WIDTH_MASK;
++ ic_idmac_3 |= width << IC_IDMAC_3_PRPENC_WIDTH_OFFSET;
++
++ } else if (dma_chan == 45) { /* PRP ENC Rot input */
++ ic_idmac_1 &= ~IC_IDMAC_1_PRPENC_ROT_MASK;
++ ic_idmac_1 |= temp_rot << IC_IDMAC_1_PRPENC_ROT_OFFSET;
++ }
++
++ if (dma_chan == 21) { /* PRP VF output - CB1 */
++ if (burst_size == 16)
++ ic_idmac_1 |= IC_IDMAC_1_CB1_BURST_16;
++ else
++ ic_idmac_1 &= ~IC_IDMAC_1_CB1_BURST_16;
++
++ if (need_hor_flip)
++ ic_idmac_1 |= IC_IDMAC_1_PRPVF_FLIP_RS;
++ else
++ ic_idmac_1 &= ~IC_IDMAC_1_PRPVF_FLIP_RS;
++
++ ic_idmac_2 &= ~IC_IDMAC_2_PRPVF_HEIGHT_MASK;
++ ic_idmac_2 |= height << IC_IDMAC_2_PRPVF_HEIGHT_OFFSET;
++
++ ic_idmac_3 &= ~IC_IDMAC_3_PRPVF_WIDTH_MASK;
++ ic_idmac_3 |= width << IC_IDMAC_3_PRPVF_WIDTH_OFFSET;
++
++ } else if (dma_chan == 46) { /* PRP VF Rot input */
++ ic_idmac_1 &= ~IC_IDMAC_1_PRPVF_ROT_MASK;
++ ic_idmac_1 |= temp_rot << IC_IDMAC_1_PRPVF_ROT_OFFSET;
++ }
++
++ if (dma_chan == 14) { /* PRP VF graphics combining input - CB3 */
++ if (burst_size == 16)
++ ic_idmac_1 |= IC_IDMAC_1_CB3_BURST_16;
++ else
++ ic_idmac_1 &= ~IC_IDMAC_1_CB3_BURST_16;
++ } else if (dma_chan == 15) { /* PP graphics combining input - CB4 */
++ if (burst_size == 16)
++ ic_idmac_1 |= IC_IDMAC_1_CB4_BURST_16;
++ else
++ ic_idmac_1 &= ~IC_IDMAC_1_CB4_BURST_16;
++ } else if (dma_chan == 5) { /* VDIC OUTPUT - CB7 */
++ if (burst_size == 16)
++ ic_idmac_1 |= IC_IDMAC_1_CB7_BURST_16;
++ else
++ ic_idmac_1 &= ~IC_IDMAC_1_CB7_BURST_16;
++ }
++
++ ipu_ic_write(ipu, ic_idmac_1, IC_IDMAC_1);
++ ipu_ic_write(ipu, ic_idmac_2, IC_IDMAC_2);
++ ipu_ic_write(ipu, ic_idmac_3, IC_IDMAC_3);
++ return 0;
++}
++
++static void _init_csc(struct ipu_soc *ipu, uint8_t ic_task, ipu_color_space_t in_format,
++ ipu_color_space_t out_format, int csc_index)
++{
++ /*
++ * Y = 0.257 * R + 0.504 * G + 0.098 * B + 16;
++ * U = -0.148 * R - 0.291 * G + 0.439 * B + 128;
++ * V = 0.439 * R - 0.368 * G - 0.071 * B + 128;
++ */
++ static const uint32_t rgb2ycbcr_coeff[4][3] = {
++ {0x0042, 0x0081, 0x0019},
++ {0x01DA, 0x01B6, 0x0070},
++ {0x0070, 0x01A2, 0x01EE},
++ {0x0040, 0x0200, 0x0200}, /* A0, A1, A2 */
++ };
++
++ /* transparent RGB->RGB matrix for combining
++ */
++ static const uint32_t rgb2rgb_coeff[4][3] = {
++ {0x0080, 0x0000, 0x0000},
++ {0x0000, 0x0080, 0x0000},
++ {0x0000, 0x0000, 0x0080},
++ {0x0000, 0x0000, 0x0000}, /* A0, A1, A2 */
++ };
++
++/* R = (1.164 * (Y - 16)) + (1.596 * (Cr - 128));
++ G = (1.164 * (Y - 16)) - (0.392 * (Cb - 128)) - (0.813 * (Cr - 128));
++ B = (1.164 * (Y - 16)) + (2.017 * (Cb - 128); */
++ static const uint32_t ycbcr2rgb_coeff[4][3] = {
++ {149, 0, 204},
++ {149, 462, 408},
++ {149, 255, 0},
++ {8192 - 446, 266, 8192 - 554}, /* A0, A1, A2 */
++ };
++
++ uint32_t param;
++ uint32_t *base = NULL;
++
++ if (ic_task == IC_TASK_ENCODER) {
++ base = (uint32_t *)ipu->tpmem_base + 0x2008 / 4;
++ } else if (ic_task == IC_TASK_VIEWFINDER) {
++ if (csc_index == 1)
++ base = (uint32_t *)ipu->tpmem_base + 0x4028 / 4;
++ else
++ base = (uint32_t *)ipu->tpmem_base + 0x4040 / 4;
++ } else if (ic_task == IC_TASK_POST_PROCESSOR) {
++ if (csc_index == 1)
++ base = (uint32_t *)ipu->tpmem_base + 0x6060 / 4;
++ else
++ base = (uint32_t *)ipu->tpmem_base + 0x6078 / 4;
++ } else {
++ BUG();
++ }
++
++ if ((in_format == YCbCr) && (out_format == RGB)) {
++ /* Init CSC (YCbCr->RGB) */
++ param = (ycbcr2rgb_coeff[3][0] << 27) |
++ (ycbcr2rgb_coeff[0][0] << 18) |
++ (ycbcr2rgb_coeff[1][1] << 9) | ycbcr2rgb_coeff[2][2];
++ writel(param, base++);
++ /* scale = 2, sat = 0 */
++ param = (ycbcr2rgb_coeff[3][0] >> 5) | (2L << (40 - 32));
++ writel(param, base++);
++
++ param = (ycbcr2rgb_coeff[3][1] << 27) |
++ (ycbcr2rgb_coeff[0][1] << 18) |
++ (ycbcr2rgb_coeff[1][0] << 9) | ycbcr2rgb_coeff[2][0];
++ writel(param, base++);
++ param = (ycbcr2rgb_coeff[3][1] >> 5);
++ writel(param, base++);
++
++ param = (ycbcr2rgb_coeff[3][2] << 27) |
++ (ycbcr2rgb_coeff[0][2] << 18) |
++ (ycbcr2rgb_coeff[1][2] << 9) | ycbcr2rgb_coeff[2][1];
++ writel(param, base++);
++ param = (ycbcr2rgb_coeff[3][2] >> 5);
++ writel(param, base++);
++ } else if ((in_format == RGB) && (out_format == YCbCr)) {
++ /* Init CSC (RGB->YCbCr) */
++ param = (rgb2ycbcr_coeff[3][0] << 27) |
++ (rgb2ycbcr_coeff[0][0] << 18) |
++ (rgb2ycbcr_coeff[1][1] << 9) | rgb2ycbcr_coeff[2][2];
++ writel(param, base++);
++ /* scale = 1, sat = 0 */
++ param = (rgb2ycbcr_coeff[3][0] >> 5) | (1UL << 8);
++ writel(param, base++);
++
++ param = (rgb2ycbcr_coeff[3][1] << 27) |
++ (rgb2ycbcr_coeff[0][1] << 18) |
++ (rgb2ycbcr_coeff[1][0] << 9) | rgb2ycbcr_coeff[2][0];
++ writel(param, base++);
++ param = (rgb2ycbcr_coeff[3][1] >> 5);
++ writel(param, base++);
++
++ param = (rgb2ycbcr_coeff[3][2] << 27) |
++ (rgb2ycbcr_coeff[0][2] << 18) |
++ (rgb2ycbcr_coeff[1][2] << 9) | rgb2ycbcr_coeff[2][1];
++ writel(param, base++);
++ param = (rgb2ycbcr_coeff[3][2] >> 5);
++ writel(param, base++);
++ } else if ((in_format == RGB) && (out_format == RGB)) {
++ /* Init CSC */
++ param =
++ (rgb2rgb_coeff[3][0] << 27) | (rgb2rgb_coeff[0][0] << 18) |
++ (rgb2rgb_coeff[1][1] << 9) | rgb2rgb_coeff[2][2];
++ writel(param, base++);
++ /* scale = 2, sat = 0 */
++ param = (rgb2rgb_coeff[3][0] >> 5) | (2UL << 8);
++ writel(param, base++);
++
++ param =
++ (rgb2rgb_coeff[3][1] << 27) | (rgb2rgb_coeff[0][1] << 18) |
++ (rgb2rgb_coeff[1][0] << 9) | rgb2rgb_coeff[2][0];
++ writel(param, base++);
++ param = (rgb2rgb_coeff[3][1] >> 5);
++ writel(param, base++);
++
++ param =
++ (rgb2rgb_coeff[3][2] << 27) | (rgb2rgb_coeff[0][2] << 18) |
++ (rgb2rgb_coeff[1][2] << 9) | rgb2rgb_coeff[2][1];
++ writel(param, base++);
++ param = (rgb2rgb_coeff[3][2] >> 5);
++ writel(param, base++);
++ } else {
++ dev_err(ipu->dev, "Unsupported color space conversion\n");
++ }
++}
++
++static int _calc_resize_coeffs(struct ipu_soc *ipu,
++ uint32_t inSize, uint32_t outSize,
++ uint32_t *resizeCoeff,
++ uint32_t *downsizeCoeff)
++{
++ uint32_t tempSize;
++ uint32_t tempDownsize;
++
++ if (inSize > 4096) {
++ dev_err(ipu->dev, "IC input size(%d) cannot exceed 4096\n",
++ inSize);
++ return -EINVAL;
++ }
++
++ if (outSize > 1024) {
++ dev_err(ipu->dev, "IC output size(%d) cannot exceed 1024\n",
++ outSize);
++ return -EINVAL;
++ }
++
++ if ((outSize << 3) < inSize) {
++ dev_err(ipu->dev, "IC cannot downsize more than 8:1\n");
++ return -EINVAL;
++ }
++
++ /* Compute downsizing coefficient */
++ /* Output of downsizing unit cannot be more than 1024 */
++ tempDownsize = 0;
++ tempSize = inSize;
++ while (((tempSize > 1024) || (tempSize >= outSize * 2)) &&
++ (tempDownsize < 2)) {
++ tempSize >>= 1;
++ tempDownsize++;
++ }
++ *downsizeCoeff = tempDownsize;
++
++ /* compute resizing coefficient using the following equation:
++ resizeCoeff = M*(SI -1)/(SO - 1)
++ where M = 2^13, SI - input size, SO - output size */
++ *resizeCoeff = (8192L * (tempSize - 1)) / (outSize - 1);
++ if (*resizeCoeff >= 16384L) {
++ dev_err(ipu->dev, "Overflow on IC resize coefficient.\n");
++ return -EINVAL;
++ }
++
++ dev_dbg(ipu->dev, "resizing from %u -> %u pixels, "
++ "downsize=%u, resize=%u.%lu (reg=%u)\n", inSize, outSize,
++ *downsizeCoeff, (*resizeCoeff >= 8192L) ? 1 : 0,
++ ((*resizeCoeff & 0x1FFF) * 10000L) / 8192L, *resizeCoeff);
++
++ return 0;
++}
++
++void _ipu_vdi_toggle_top_field_man(struct ipu_soc *ipu)
++{
++ uint32_t reg;
++ uint32_t mask_reg;
++
++ reg = ipu_vdi_read(ipu, VDI_C);
++ mask_reg = reg & VDI_C_TOP_FIELD_MAN_1;
++ if (mask_reg == VDI_C_TOP_FIELD_MAN_1)
++ reg &= ~VDI_C_TOP_FIELD_MAN_1;
++ else
++ reg |= VDI_C_TOP_FIELD_MAN_1;
++
++ ipu_vdi_write(ipu, reg, VDI_C);
++}
+diff -Nur linux-3.14.36/drivers/mxc/ipu3/ipu_param_mem.h linux-openelec/drivers/mxc/ipu3/ipu_param_mem.h
+--- linux-3.14.36/drivers/mxc/ipu3/ipu_param_mem.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/ipu3/ipu_param_mem.h 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,921 @@
++/*
++ * Copyright 2005-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++#ifndef __INCLUDE_IPU_PARAM_MEM_H__
++#define __INCLUDE_IPU_PARAM_MEM_H__
++
++#include <linux/bitrev.h>
++#include <linux/types.h>
++
++#include "ipu_prv.h"
++
++extern u32 *ipu_cpmem_base;
++
++struct ipu_ch_param_word {
++ uint32_t data[5];
++ uint32_t res[3];
++};
++
++struct ipu_ch_param {
++ struct ipu_ch_param_word word[2];
++};
++
++#define ipu_ch_param_addr(ipu, ch) (((struct ipu_ch_param *)ipu->cpmem_base) + (ch))
++
++#define _param_word(base, w) \
++ (((struct ipu_ch_param *)(base))->word[(w)].data)
++
++#define ipu_ch_param_set_field(base, w, bit, size, v) { \
++ int i = (bit) / 32; \
++ int off = (bit) % 32; \
++ _param_word(base, w)[i] |= (v) << off; \
++ if (((bit)+(size)-1)/32 > i) { \
++ _param_word(base, w)[i + 1] |= (v) >> (off ? (32 - off) : 0); \
++ } \
++}
++
++#define ipu_ch_param_set_field_io(base, w, bit, size, v) { \
++ int i = (bit) / 32; \
++ int off = (bit) % 32; \
++ unsigned reg_offset; \
++ u32 temp; \
++ reg_offset = sizeof(struct ipu_ch_param_word) * w / 4; \
++ reg_offset += i; \
++ temp = readl((u32 *)base + reg_offset); \
++ temp |= (v) << off; \
++ writel(temp, (u32 *)base + reg_offset); \
++ if (((bit)+(size)-1)/32 > i) { \
++ reg_offset++; \
++ temp = readl((u32 *)base + reg_offset); \
++ temp |= (v) >> (off ? (32 - off) : 0); \
++ writel(temp, (u32 *)base + reg_offset); \
++ } \
++}
++
++#define ipu_ch_param_mod_field(base, w, bit, size, v) { \
++ int i = (bit) / 32; \
++ int off = (bit) % 32; \
++ u32 mask = (1UL << size) - 1; \
++ u32 temp = _param_word(base, w)[i]; \
++ temp &= ~(mask << off); \
++ _param_word(base, w)[i] = temp | (v) << off; \
++ if (((bit)+(size)-1)/32 > i) { \
++ temp = _param_word(base, w)[i + 1]; \
++ temp &= ~(mask >> (32 - off)); \
++ _param_word(base, w)[i + 1] = \
++ temp | ((v) >> (off ? (32 - off) : 0)); \
++ } \
++}
++
++#define ipu_ch_param_mod_field_io(base, w, bit, size, v) { \
++ int i = (bit) / 32; \
++ int off = (bit) % 32; \
++ u32 mask = (1UL << size) - 1; \
++ unsigned reg_offset; \
++ u32 temp; \
++ reg_offset = sizeof(struct ipu_ch_param_word) * w / 4; \
++ reg_offset += i; \
++ temp = readl((u32 *)base + reg_offset); \
++ temp &= ~(mask << off); \
++ temp |= (v) << off; \
++ writel(temp, (u32 *)base + reg_offset); \
++ if (((bit)+(size)-1)/32 > i) { \
++ reg_offset++; \
++ temp = readl((u32 *)base + reg_offset); \
++ temp &= ~(mask >> (32 - off)); \
++ temp |= ((v) >> (off ? (32 - off) : 0)); \
++ writel(temp, (u32 *)base + reg_offset); \
++ } \
++}
++
++#define ipu_ch_param_read_field(base, w, bit, size) ({ \
++ u32 temp2; \
++ int i = (bit) / 32; \
++ int off = (bit) % 32; \
++ u32 mask = (1UL << size) - 1; \
++ u32 temp1 = _param_word(base, w)[i]; \
++ temp1 = mask & (temp1 >> off); \
++ if (((bit)+(size)-1)/32 > i) { \
++ temp2 = _param_word(base, w)[i + 1]; \
++ temp2 &= mask >> (off ? (32 - off) : 0); \
++ temp1 |= temp2 << (off ? (32 - off) : 0); \
++ } \
++ temp1; \
++})
++
++#define ipu_ch_param_read_field_io(base, w, bit, size) ({ \
++ u32 temp1, temp2; \
++ int i = (bit) / 32; \
++ int off = (bit) % 32; \
++ u32 mask = (1UL << size) - 1; \
++ unsigned reg_offset; \
++ reg_offset = sizeof(struct ipu_ch_param_word) * w / 4; \
++ reg_offset += i; \
++ temp1 = readl((u32 *)base + reg_offset); \
++ temp1 = mask & (temp1 >> off); \
++ if (((bit)+(size)-1)/32 > i) { \
++ reg_offset++; \
++ temp2 = readl((u32 *)base + reg_offset); \
++ temp2 &= mask >> (off ? (32 - off) : 0); \
++ temp1 |= temp2 << (off ? (32 - off) : 0); \
++ } \
++ temp1; \
++})
++
++static inline int __ipu_ch_get_third_buf_cpmem_num(int ch)
++{
++ switch (ch) {
++ case 8:
++ return 64;
++ case 9:
++ return 65;
++ case 10:
++ return 66;
++ case 13:
++ return 67;
++ case 21:
++ return 68;
++ case 23:
++ return 69;
++ case 27:
++ return 70;
++ case 28:
++ return 71;
++ default:
++ return -EINVAL;
++ }
++ return 0;
++}
++
++static inline void _ipu_ch_params_set_packing(struct ipu_ch_param *p,
++ int red_width, int red_offset,
++ int green_width, int green_offset,
++ int blue_width, int blue_offset,
++ int alpha_width, int alpha_offset)
++{
++ /* Setup red width and offset */
++ ipu_ch_param_set_field(p, 1, 116, 3, red_width - 1);
++ ipu_ch_param_set_field(p, 1, 128, 5, red_offset);
++ /* Setup green width and offset */
++ ipu_ch_param_set_field(p, 1, 119, 3, green_width - 1);
++ ipu_ch_param_set_field(p, 1, 133, 5, green_offset);
++ /* Setup blue width and offset */
++ ipu_ch_param_set_field(p, 1, 122, 3, blue_width - 1);
++ ipu_ch_param_set_field(p, 1, 138, 5, blue_offset);
++ /* Setup alpha width and offset */
++ ipu_ch_param_set_field(p, 1, 125, 3, alpha_width - 1);
++ ipu_ch_param_set_field(p, 1, 143, 5, alpha_offset);
++}
++
++static inline void _ipu_ch_param_dump(struct ipu_soc *ipu, int ch)
++{
++ struct ipu_ch_param *p = ipu_ch_param_addr(ipu, ch);
++ dev_dbg(ipu->dev, "ch %d word 0 - %08X %08X %08X %08X %08X\n", ch,
++ p->word[0].data[0], p->word[0].data[1], p->word[0].data[2],
++ p->word[0].data[3], p->word[0].data[4]);
++ dev_dbg(ipu->dev, "ch %d word 1 - %08X %08X %08X %08X %08X\n", ch,
++ p->word[1].data[0], p->word[1].data[1], p->word[1].data[2],
++ p->word[1].data[3], p->word[1].data[4]);
++ dev_dbg(ipu->dev, "PFS 0x%x, ",
++ ipu_ch_param_read_field_io(ipu_ch_param_addr(ipu, ch), 1, 85, 4));
++ dev_dbg(ipu->dev, "BPP 0x%x, ",
++ ipu_ch_param_read_field_io(ipu_ch_param_addr(ipu, ch), 0, 107, 3));
++ dev_dbg(ipu->dev, "NPB 0x%x\n",
++ ipu_ch_param_read_field_io(ipu_ch_param_addr(ipu, ch), 1, 78, 7));
++
++ dev_dbg(ipu->dev, "FW %d, ",
++ ipu_ch_param_read_field_io(ipu_ch_param_addr(ipu, ch), 0, 125, 13));
++ dev_dbg(ipu->dev, "FH %d, ",
++ ipu_ch_param_read_field_io(ipu_ch_param_addr(ipu, ch), 0, 138, 12));
++ dev_dbg(ipu->dev, "EBA0 0x%x\n",
++ ipu_ch_param_read_field_io(ipu_ch_param_addr(ipu, ch), 1, 0, 29) << 3);
++ dev_dbg(ipu->dev, "EBA1 0x%x\n",
++ ipu_ch_param_read_field_io(ipu_ch_param_addr(ipu, ch), 1, 29, 29) << 3);
++ dev_dbg(ipu->dev, "Stride %d\n",
++ ipu_ch_param_read_field_io(ipu_ch_param_addr(ipu, ch), 1, 102, 14));
++ dev_dbg(ipu->dev, "scan_order %d\n",
++ ipu_ch_param_read_field_io(ipu_ch_param_addr(ipu, ch), 0, 113, 1));
++ dev_dbg(ipu->dev, "uv_stride %d\n",
++ ipu_ch_param_read_field_io(ipu_ch_param_addr(ipu, ch), 1, 128, 14));
++ dev_dbg(ipu->dev, "u_offset 0x%x\n",
++ ipu_ch_param_read_field_io(ipu_ch_param_addr(ipu, ch), 0, 46, 22) << 3);
++ dev_dbg(ipu->dev, "v_offset 0x%x\n",
++ ipu_ch_param_read_field_io(ipu_ch_param_addr(ipu, ch), 0, 68, 22) << 3);
++
++ dev_dbg(ipu->dev, "Width0 %d+1, ",
++ ipu_ch_param_read_field_io(ipu_ch_param_addr(ipu, ch), 1, 116, 3));
++ dev_dbg(ipu->dev, "Width1 %d+1, ",
++ ipu_ch_param_read_field_io(ipu_ch_param_addr(ipu, ch), 1, 119, 3));
++ dev_dbg(ipu->dev, "Width2 %d+1, ",
++ ipu_ch_param_read_field_io(ipu_ch_param_addr(ipu, ch), 1, 122, 3));
++ dev_dbg(ipu->dev, "Width3 %d+1, ",
++ ipu_ch_param_read_field_io(ipu_ch_param_addr(ipu, ch), 1, 125, 3));
++ dev_dbg(ipu->dev, "Offset0 %d, ",
++ ipu_ch_param_read_field_io(ipu_ch_param_addr(ipu, ch), 1, 128, 5));
++ dev_dbg(ipu->dev, "Offset1 %d, ",
++ ipu_ch_param_read_field_io(ipu_ch_param_addr(ipu, ch), 1, 133, 5));
++ dev_dbg(ipu->dev, "Offset2 %d, ",
++ ipu_ch_param_read_field_io(ipu_ch_param_addr(ipu, ch), 1, 138, 5));
++ dev_dbg(ipu->dev, "Offset3 %d\n",
++ ipu_ch_param_read_field_io(ipu_ch_param_addr(ipu, ch), 1, 143, 5));
++}
++
++static inline void fill_cpmem(struct ipu_soc *ipu, int ch, struct ipu_ch_param *params)
++{
++ int i, w;
++ void *addr = ipu_ch_param_addr(ipu, ch);
++
++ /* 2 words, 5 valid data */
++ for (w = 0; w < 2; w++) {
++ for (i = 0; i < 5; i++) {
++ writel(params->word[w].data[i], addr);
++ addr += 4;
++ }
++ addr += 12;
++ }
++}
++
++static inline void _ipu_ch_param_init(struct ipu_soc *ipu, int ch,
++ uint32_t pixel_fmt, uint32_t width,
++ uint32_t height, uint32_t stride,
++ uint32_t u, uint32_t v,
++ uint32_t uv_stride, dma_addr_t addr0,
++ dma_addr_t addr1, dma_addr_t addr2)
++{
++ uint32_t u_offset = 0;
++ uint32_t v_offset = 0;
++ int32_t sub_ch = 0;
++ struct ipu_ch_param params;
++
++ memset(&params, 0, sizeof(params));
++
++ ipu_ch_param_set_field(&params, 0, 125, 13, width - 1);
++
++ if (((ch == 8) || (ch == 9) || (ch == 10)) && !ipu->vdoa_en) {
++ ipu_ch_param_set_field(&params, 0, 138, 12, (height / 2) - 1);
++ ipu_ch_param_set_field(&params, 1, 102, 14, (stride * 2) - 1);
++ } else {
++ /* note: for vdoa+vdi- ch8/9/10, always use band mode */
++ ipu_ch_param_set_field(&params, 0, 138, 12, height - 1);
++ ipu_ch_param_set_field(&params, 1, 102, 14, stride - 1);
++ }
++
++ /* EBA is 8-byte aligned */
++ ipu_ch_param_set_field(&params, 1, 0, 29, addr0 >> 3);
++ ipu_ch_param_set_field(&params, 1, 29, 29, addr1 >> 3);
++ if (addr0%8)
++ dev_warn(ipu->dev,
++ "IDMAC%d's EBA0 is not 8-byte aligned\n", ch);
++ if (addr1%8)
++ dev_warn(ipu->dev,
++ "IDMAC%d's EBA1 is not 8-byte aligned\n", ch);
++
++ switch (pixel_fmt) {
++ case IPU_PIX_FMT_GENERIC:
++ /*Represents 8-bit Generic data */
++ ipu_ch_param_set_field(&params, 0, 107, 3, 5); /* bits/pixel */
++ ipu_ch_param_set_field(&params, 1, 85, 4, 6); /* pix format */
++ ipu_ch_param_set_field(&params, 1, 78, 7, 63); /* burst size */
++
++ break;
++ case IPU_PIX_FMT_GENERIC_16:
++ /* Represents 16-bit generic data */
++ ipu_ch_param_set_field(&params, 0, 107, 3, 3); /* bits/pixel */
++ ipu_ch_param_set_field(&params, 1, 85, 4, 6); /* pix format */
++ ipu_ch_param_set_field(&params, 1, 78, 7, 31); /* burst size */
++
++ break;
++ case IPU_PIX_FMT_GENERIC_32:
++ /*Represents 32-bit Generic data */
++ break;
++ case IPU_PIX_FMT_RGB565:
++ ipu_ch_param_set_field(&params, 0, 107, 3, 3); /* bits/pixel */
++ ipu_ch_param_set_field(&params, 1, 85, 4, 7); /* pix format */
++ ipu_ch_param_set_field(&params, 1, 78, 7, 31); /* burst size */
++
++ _ipu_ch_params_set_packing(&params, 5, 0, 6, 5, 5, 11, 8, 16);
++ break;
++ case IPU_PIX_FMT_BGR24:
++ ipu_ch_param_set_field(&params, 0, 107, 3, 1); /* bits/pixel */
++ ipu_ch_param_set_field(&params, 1, 85, 4, 7); /* pix format */
++ ipu_ch_param_set_field(&params, 1, 78, 7, 19); /* burst size */
++
++ _ipu_ch_params_set_packing(&params, 8, 0, 8, 8, 8, 16, 8, 24);
++ break;
++ case IPU_PIX_FMT_RGB24:
++ case IPU_PIX_FMT_YUV444:
++ ipu_ch_param_set_field(&params, 0, 107, 3, 1); /* bits/pixel */
++ ipu_ch_param_set_field(&params, 1, 85, 4, 7); /* pix format */
++ ipu_ch_param_set_field(&params, 1, 78, 7, 19); /* burst size */
++
++ _ipu_ch_params_set_packing(&params, 8, 16, 8, 8, 8, 0, 8, 24);
++ break;
++ case IPU_PIX_FMT_VYU444:
++ ipu_ch_param_set_field(&params, 0, 107, 3, 1); /* bits/pixel */
++ ipu_ch_param_set_field(&params, 1, 85, 4, 7); /* pix format */
++ ipu_ch_param_set_field(&params, 1, 78, 7, 19); /* burst size */
++
++ _ipu_ch_params_set_packing(&params, 8, 8, 8, 0, 8, 16, 8, 24);
++ break;
++ case IPU_PIX_FMT_BGRA32:
++ case IPU_PIX_FMT_BGR32:
++ ipu_ch_param_set_field(&params, 0, 107, 3, 0); /* bits/pixel */
++ ipu_ch_param_set_field(&params, 1, 85, 4, 7); /* pix format */
++ ipu_ch_param_set_field(&params, 1, 78, 7, 15); /* burst size */
++
++ _ipu_ch_params_set_packing(&params, 8, 8, 8, 16, 8, 24, 8, 0);
++ break;
++ case IPU_PIX_FMT_RGBA32:
++ case IPU_PIX_FMT_RGB32:
++ ipu_ch_param_set_field(&params, 0, 107, 3, 0); /* bits/pixel */
++ ipu_ch_param_set_field(&params, 1, 85, 4, 7); /* pix format */
++ ipu_ch_param_set_field(&params, 1, 78, 7, 15); /* burst size */
++
++ _ipu_ch_params_set_packing(&params, 8, 24, 8, 16, 8, 8, 8, 0);
++ break;
++ case IPU_PIX_FMT_ABGR32:
++ ipu_ch_param_set_field(&params, 0, 107, 3, 0); /* bits/pixel */
++ ipu_ch_param_set_field(&params, 1, 85, 4, 7); /* pix format */
++ ipu_ch_param_set_field(&params, 1, 78, 7, 15); /* burst size */
++
++ _ipu_ch_params_set_packing(&params, 8, 0, 8, 8, 8, 16, 8, 24);
++ break;
++ case IPU_PIX_FMT_UYVY:
++ ipu_ch_param_set_field(&params, 0, 107, 3, 3); /* bits/pixel */
++ ipu_ch_param_set_field(&params, 1, 85, 4, 0xA); /* pix format */
++ if ((ch == 8) || (ch == 9) || (ch == 10)) {
++ ipu_ch_param_set_field(&params, 1, 78, 7, 15); /* burst size */
++ } else {
++ ipu_ch_param_set_field(&params, 1, 78, 7, 31); /* burst size */
++ }
++ break;
++ case IPU_PIX_FMT_YUYV:
++ ipu_ch_param_set_field(&params, 0, 107, 3, 3); /* bits/pixel */
++ ipu_ch_param_set_field(&params, 1, 85, 4, 0x8); /* pix format */
++ if ((ch == 8) || (ch == 9) || (ch == 10)) {
++ if (ipu->vdoa_en) {
++ ipu_ch_param_set_field(&params, 1, 78, 7, 31);
++ } else {
++ ipu_ch_param_set_field(&params, 1, 78, 7, 15);
++ }
++ } else {
++ ipu_ch_param_set_field(&params, 1, 78, 7, 31); /* burst size */
++ }
++ break;
++ case IPU_PIX_FMT_YUV420P2:
++ case IPU_PIX_FMT_YUV420P:
++ ipu_ch_param_set_field(&params, 1, 85, 4, 2); /* pix format */
++
++ if (uv_stride < stride / 2)
++ uv_stride = stride / 2;
++
++ u_offset = stride * height;
++ v_offset = u_offset + (uv_stride * height / 2);
++ if ((ch == 8) || (ch == 9) || (ch == 10)) {
++ ipu_ch_param_set_field(&params, 1, 78, 7, 15); /* burst size */
++ uv_stride = uv_stride*2;
++ } else {
++ ipu_ch_param_set_field(&params, 1, 78, 7, 31); /* burst size */
++ }
++ break;
++ case IPU_PIX_FMT_YVU420P:
++ ipu_ch_param_set_field(&params, 1, 85, 4, 2); /* pix format */
++
++ if (uv_stride < stride / 2)
++ uv_stride = stride / 2;
++
++ v_offset = stride * height;
++ u_offset = v_offset + (uv_stride * height / 2);
++ if ((ch == 8) || (ch == 9) || (ch == 10)) {
++ ipu_ch_param_set_field(&params, 1, 78, 7, 15); /* burst size */
++ uv_stride = uv_stride*2;
++ } else {
++ ipu_ch_param_set_field(&params, 1, 78, 7, 31); /* burst size */
++ }
++ break;
++ case IPU_PIX_FMT_YVU422P:
++ /* BPP & pixel format */
++ ipu_ch_param_set_field(&params, 1, 85, 4, 1); /* pix format */
++ ipu_ch_param_set_field(&params, 1, 78, 7, 31); /* burst size */
++
++ if (uv_stride < stride / 2)
++ uv_stride = stride / 2;
++
++ v_offset = (v == 0) ? stride * height : v;
++ u_offset = (u == 0) ? v_offset + v_offset / 2 : u;
++ break;
++ case IPU_PIX_FMT_YUV422P:
++ /* BPP & pixel format */
++ ipu_ch_param_set_field(&params, 1, 85, 4, 1); /* pix format */
++ ipu_ch_param_set_field(&params, 1, 78, 7, 31); /* burst size */
++
++ if (uv_stride < stride / 2)
++ uv_stride = stride / 2;
++
++ u_offset = (u == 0) ? stride * height : u;
++ v_offset = (v == 0) ? u_offset + u_offset / 2 : v;
++ break;
++ case IPU_PIX_FMT_YUV444P:
++ /* BPP & pixel format */
++ ipu_ch_param_set_field(&params, 1, 85, 4, 0); /* pix format */
++ ipu_ch_param_set_field(&params, 1, 78, 7, 31); /* burst size */
++ uv_stride = stride;
++ u_offset = (u == 0) ? stride * height : u;
++ v_offset = (v == 0) ? u_offset * 2 : v;
++ break;
++ case IPU_PIX_FMT_NV12:
++ /* BPP & pixel format */
++ ipu_ch_param_set_field(&params, 1, 85, 4, 4); /* pix format */
++ uv_stride = stride;
++ u_offset = (u == 0) ? stride * height : u;
++ if ((ch == 8) || (ch == 9) || (ch == 10)) {
++ if (ipu->vdoa_en) {
++ /* one field buffer, memory width 64bits */
++ ipu_ch_param_set_field(&params, 1, 78, 7, 63);
++ } else {
++ ipu_ch_param_set_field(&params, 1, 78, 7, 15);
++ /* top/bottom field in one buffer*/
++ uv_stride = uv_stride*2;
++ }
++ } else {
++ ipu_ch_param_set_field(&params, 1, 78, 7, 31); /* burst size */
++ }
++ break;
++ default:
++ dev_err(ipu->dev, "mxc ipu: unimplemented pixel format\n");
++ break;
++ }
++ /*set burst size to 16*/
++
++
++ if (uv_stride)
++ ipu_ch_param_set_field(&params, 1, 128, 14, uv_stride - 1);
++
++ /* Get the uv offset from user when need cropping */
++ if (u || v) {
++ u_offset = u;
++ v_offset = v;
++ }
++
++ /* UBO and VBO are 22-bit and 8-byte aligned */
++ if (u_offset/8 > 0x3fffff)
++ dev_warn(ipu->dev,
++ "IDMAC%d's U offset exceeds IPU limitation\n", ch);
++ if (v_offset/8 > 0x3fffff)
++ dev_warn(ipu->dev,
++ "IDMAC%d's V offset exceeds IPU limitation\n", ch);
++ if (u_offset%8)
++ dev_warn(ipu->dev,
++ "IDMAC%d's U offset is not 8-byte aligned\n", ch);
++ if (v_offset%8)
++ dev_warn(ipu->dev,
++ "IDMAC%d's V offset is not 8-byte aligned\n", ch);
++
++ ipu_ch_param_set_field(&params, 0, 46, 22, u_offset / 8);
++ ipu_ch_param_set_field(&params, 0, 68, 22, v_offset / 8);
++
++ dev_dbg(ipu->dev, "initializing idma ch %d @ %p\n", ch, ipu_ch_param_addr(ipu, ch));
++ fill_cpmem(ipu, ch, &params);
++ if (addr2) {
++ sub_ch = __ipu_ch_get_third_buf_cpmem_num(ch);
++ if (sub_ch <= 0)
++ return;
++
++ ipu_ch_param_set_field(&params, 1, 0, 29, addr2 >> 3);
++ ipu_ch_param_set_field(&params, 1, 29, 29, 0);
++ if (addr2%8)
++ dev_warn(ipu->dev,
++ "IDMAC%d's sub-CPMEM entry%d EBA0 is not "
++ "8-byte aligned\n", ch, sub_ch);
++
++ dev_dbg(ipu->dev, "initializing idma ch %d @ %p sub cpmem\n", ch,
++ ipu_ch_param_addr(ipu, sub_ch));
++ fill_cpmem(ipu, sub_ch, &params);
++ }
++};
++
++static inline void _ipu_ch_param_set_burst_size(struct ipu_soc *ipu,
++ uint32_t ch,
++ uint16_t burst_pixels)
++{
++ int32_t sub_ch = 0;
++
++ ipu_ch_param_mod_field_io(ipu_ch_param_addr(ipu, ch), 1, 78, 7,
++ burst_pixels - 1);
++
++ sub_ch = __ipu_ch_get_third_buf_cpmem_num(ch);
++ if (sub_ch <= 0)
++ return;
++ ipu_ch_param_mod_field_io(ipu_ch_param_addr(ipu, sub_ch), 1, 78, 7,
++ burst_pixels - 1);
++};
++
++static inline int _ipu_ch_param_get_burst_size(struct ipu_soc *ipu, uint32_t ch)
++{
++ return ipu_ch_param_read_field_io(ipu_ch_param_addr(ipu, ch), 1, 78, 7) + 1;
++};
++
++static inline int _ipu_ch_param_get_bpp(struct ipu_soc *ipu, uint32_t ch)
++{
++ return ipu_ch_param_read_field_io(ipu_ch_param_addr(ipu, ch), 0, 107, 3);
++};
++
++static inline void _ipu_ch_param_set_buffer(struct ipu_soc *ipu, uint32_t ch,
++ int bufNum, dma_addr_t phyaddr)
++{
++ if (bufNum == 2) {
++ ch = __ipu_ch_get_third_buf_cpmem_num(ch);
++ if (ch <= 0)
++ return;
++ bufNum = 0;
++ }
++
++ ipu_ch_param_mod_field_io(ipu_ch_param_addr(ipu, ch), 1, 29 * bufNum, 29,
++ phyaddr / 8);
++};
++
++static inline void _ipu_ch_param_set_rotation(struct ipu_soc *ipu, uint32_t ch,
++ ipu_rotate_mode_t rot)
++{
++ u32 temp_rot = bitrev8(rot) >> 5;
++ int32_t sub_ch = 0;
++
++ ipu_ch_param_mod_field_io(ipu_ch_param_addr(ipu, ch), 0, 119, 3, temp_rot);
++
++ sub_ch = __ipu_ch_get_third_buf_cpmem_num(ch);
++ if (sub_ch <= 0)
++ return;
++ ipu_ch_param_mod_field_io(ipu_ch_param_addr(ipu, sub_ch), 0, 119, 3, temp_rot);
++};
++
++static inline void _ipu_ch_param_set_block_mode(struct ipu_soc *ipu, uint32_t ch)
++{
++ int32_t sub_ch = 0;
++
++ ipu_ch_param_mod_field_io(ipu_ch_param_addr(ipu, ch), 0, 117, 2, 1);
++
++ sub_ch = __ipu_ch_get_third_buf_cpmem_num(ch);
++ if (sub_ch <= 0)
++ return;
++ ipu_ch_param_mod_field_io(ipu_ch_param_addr(ipu, sub_ch), 0, 117, 2, 1);
++};
++
++static inline void _ipu_ch_param_set_alpha_use_separate_channel(struct ipu_soc *ipu,
++ uint32_t ch,
++ bool option)
++{
++ int32_t sub_ch = 0;
++
++ if (option) {
++ ipu_ch_param_mod_field_io(ipu_ch_param_addr(ipu, ch), 1, 89, 1, 1);
++ } else {
++ ipu_ch_param_mod_field_io(ipu_ch_param_addr(ipu, ch), 1, 89, 1, 0);
++ }
++
++ sub_ch = __ipu_ch_get_third_buf_cpmem_num(ch);
++ if (sub_ch <= 0)
++ return;
++
++ if (option) {
++ ipu_ch_param_mod_field_io(ipu_ch_param_addr(ipu, sub_ch), 1, 89, 1, 1);
++ } else {
++ ipu_ch_param_mod_field_io(ipu_ch_param_addr(ipu, sub_ch), 1, 89, 1, 0);
++ }
++};
++
++static inline void _ipu_ch_param_set_alpha_condition_read(struct ipu_soc *ipu, uint32_t ch)
++{
++ int32_t sub_ch = 0;
++
++ ipu_ch_param_mod_field_io(ipu_ch_param_addr(ipu, ch), 1, 149, 1, 1);
++
++ sub_ch = __ipu_ch_get_third_buf_cpmem_num(ch);
++ if (sub_ch <= 0)
++ return;
++ ipu_ch_param_mod_field_io(ipu_ch_param_addr(ipu, sub_ch), 1, 149, 1, 1);
++};
++
++static inline void _ipu_ch_param_set_alpha_buffer_memory(struct ipu_soc *ipu, uint32_t ch)
++{
++ int alp_mem_idx;
++ int32_t sub_ch = 0;
++
++ switch (ch) {
++ case 14: /* PRP graphic */
++ alp_mem_idx = 0;
++ break;
++ case 15: /* PP graphic */
++ alp_mem_idx = 1;
++ break;
++ case 23: /* DP BG SYNC graphic */
++ alp_mem_idx = 4;
++ break;
++ case 27: /* DP FG SYNC graphic */
++ alp_mem_idx = 2;
++ break;
++ default:
++ dev_err(ipu->dev, "unsupported correlative channel of local "
++ "alpha channel\n");
++ return;
++ }
++
++ ipu_ch_param_mod_field_io(ipu_ch_param_addr(ipu, ch), 1, 90, 3, alp_mem_idx);
++
++ sub_ch = __ipu_ch_get_third_buf_cpmem_num(ch);
++ if (sub_ch <= 0)
++ return;
++ ipu_ch_param_mod_field_io(ipu_ch_param_addr(ipu, sub_ch), 1, 90, 3, alp_mem_idx);
++};
++
++static inline void _ipu_ch_param_set_interlaced_scan(struct ipu_soc *ipu, uint32_t ch)
++{
++ u32 stride;
++ int32_t sub_ch = 0;
++
++ sub_ch = __ipu_ch_get_third_buf_cpmem_num(ch);
++
++ ipu_ch_param_set_field_io(ipu_ch_param_addr(ipu, ch), 0, 113, 1, 1);
++ if (sub_ch > 0)
++ ipu_ch_param_set_field_io(ipu_ch_param_addr(ipu, sub_ch), 0, 113, 1, 1);
++ stride = ipu_ch_param_read_field_io(ipu_ch_param_addr(ipu, ch), 1, 102, 14) + 1;
++ /* ILO is 20-bit and 8-byte aligned */
++ if (stride/8 > 0xfffff)
++ dev_warn(ipu->dev,
++ "IDMAC%d's ILO exceeds IPU limitation\n", ch);
++ if (stride%8)
++ dev_warn(ipu->dev,
++ "IDMAC%d's ILO is not 8-byte aligned\n", ch);
++ ipu_ch_param_mod_field_io(ipu_ch_param_addr(ipu, ch), 1, 58, 20, stride / 8);
++ if (sub_ch > 0)
++ ipu_ch_param_mod_field_io(ipu_ch_param_addr(ipu, sub_ch), 1, 58, 20,
++ stride / 8);
++ stride *= 2;
++ ipu_ch_param_mod_field_io(ipu_ch_param_addr(ipu, ch), 1, 102, 14, stride - 1);
++ if (sub_ch > 0)
++ ipu_ch_param_mod_field_io(ipu_ch_param_addr(ipu, sub_ch), 1, 102, 14,
++ stride - 1);
++};
++
++static inline void _ipu_ch_param_set_axi_id(struct ipu_soc *ipu, uint32_t ch, uint32_t id)
++{
++ int32_t sub_ch = 0;
++
++ id %= 4;
++
++ ipu_ch_param_mod_field_io(ipu_ch_param_addr(ipu, ch), 1, 93, 2, id);
++
++ sub_ch = __ipu_ch_get_third_buf_cpmem_num(ch);
++ if (sub_ch <= 0)
++ return;
++ ipu_ch_param_mod_field_io(ipu_ch_param_addr(ipu, sub_ch), 1, 93, 2, id);
++};
++
++/* IDMAC U/V offset changing support */
++/* U and V input is not affected, */
++/* the update is done by new calculation according to */
++/* vertical_offset and horizontal_offset */
++static inline void _ipu_ch_offset_update(struct ipu_soc *ipu,
++ int ch,
++ uint32_t pixel_fmt,
++ uint32_t width,
++ uint32_t height,
++ uint32_t stride,
++ uint32_t u,
++ uint32_t v,
++ uint32_t uv_stride,
++ uint32_t vertical_offset,
++ uint32_t horizontal_offset)
++{
++ uint32_t u_offset = 0;
++ uint32_t v_offset = 0;
++ uint32_t old_offset = 0;
++ uint32_t u_fix = 0;
++ uint32_t v_fix = 0;
++ int32_t sub_ch = 0;
++
++ switch (pixel_fmt) {
++ case IPU_PIX_FMT_GENERIC:
++ case IPU_PIX_FMT_GENERIC_16:
++ case IPU_PIX_FMT_GENERIC_32:
++ case IPU_PIX_FMT_RGB565:
++ case IPU_PIX_FMT_BGR24:
++ case IPU_PIX_FMT_RGB24:
++ case IPU_PIX_FMT_YUV444:
++ case IPU_PIX_FMT_BGRA32:
++ case IPU_PIX_FMT_BGR32:
++ case IPU_PIX_FMT_RGBA32:
++ case IPU_PIX_FMT_RGB32:
++ case IPU_PIX_FMT_ABGR32:
++ case IPU_PIX_FMT_UYVY:
++ case IPU_PIX_FMT_YUYV:
++ break;
++
++ case IPU_PIX_FMT_YUV420P2:
++ case IPU_PIX_FMT_YUV420P:
++ if (uv_stride < stride / 2)
++ uv_stride = stride / 2;
++
++ u_offset = stride * (height - vertical_offset - 1) +
++ (stride - horizontal_offset) +
++ (uv_stride * vertical_offset / 2) +
++ horizontal_offset / 2;
++ v_offset = u_offset + (uv_stride * height / 2);
++ u_fix = u ? (u + (uv_stride * vertical_offset / 2) +
++ (horizontal_offset / 2) -
++ (stride * vertical_offset) - (horizontal_offset)) :
++ u_offset;
++ v_fix = v ? (v + (uv_stride * vertical_offset / 2) +
++ (horizontal_offset / 2) -
++ (stride * vertical_offset) - (horizontal_offset)) :
++ v_offset;
++
++ break;
++ case IPU_PIX_FMT_YVU420P:
++ if (uv_stride < stride / 2)
++ uv_stride = stride / 2;
++
++ v_offset = stride * (height - vertical_offset - 1) +
++ (stride - horizontal_offset) +
++ (uv_stride * vertical_offset / 2) +
++ horizontal_offset / 2;
++ u_offset = v_offset + (uv_stride * height / 2);
++ u_fix = u ? (u + (uv_stride * vertical_offset / 2) +
++ (horizontal_offset / 2) -
++ (stride * vertical_offset) - (horizontal_offset)) :
++ u_offset;
++ v_fix = v ? (v + (uv_stride * vertical_offset / 2) +
++ (horizontal_offset / 2) -
++ (stride * vertical_offset) - (horizontal_offset)) :
++ v_offset;
++
++ break;
++ case IPU_PIX_FMT_YVU422P:
++ if (uv_stride < stride / 2)
++ uv_stride = stride / 2;
++
++ v_offset = stride * (height - vertical_offset - 1) +
++ (stride - horizontal_offset) +
++ (uv_stride * vertical_offset) +
++ horizontal_offset / 2;
++ u_offset = v_offset + uv_stride * height;
++ u_fix = u ? (u + (uv_stride * vertical_offset) +
++ horizontal_offset / 2 -
++ (stride * vertical_offset) - (horizontal_offset)) :
++ u_offset;
++ v_fix = v ? (v + (uv_stride * vertical_offset) +
++ horizontal_offset / 2 -
++ (stride * vertical_offset) - (horizontal_offset)) :
++ v_offset;
++ break;
++ case IPU_PIX_FMT_YUV422P:
++ if (uv_stride < stride / 2)
++ uv_stride = stride / 2;
++
++ u_offset = stride * (height - vertical_offset - 1) +
++ (stride - horizontal_offset) +
++ (uv_stride * vertical_offset) +
++ horizontal_offset / 2;
++ v_offset = u_offset + uv_stride * height;
++ u_fix = u ? (u + (uv_stride * vertical_offset) +
++ horizontal_offset / 2 -
++ (stride * vertical_offset) - (horizontal_offset)) :
++ u_offset;
++ v_fix = v ? (v + (uv_stride * vertical_offset) +
++ horizontal_offset / 2 -
++ (stride * vertical_offset) - (horizontal_offset)) :
++ v_offset;
++ break;
++
++ case IPU_PIX_FMT_YUV444P:
++ uv_stride = stride;
++ u_offset = stride * (height - vertical_offset - 1) +
++ (stride - horizontal_offset) +
++ (uv_stride * vertical_offset) +
++ horizontal_offset;
++ v_offset = u_offset + uv_stride * height;
++ u_fix = u ? (u + (uv_stride * vertical_offset) +
++ horizontal_offset -
++ (stride * vertical_offset) -
++ (horizontal_offset)) :
++ u_offset;
++ v_fix = v ? (v + (uv_stride * vertical_offset) +
++ horizontal_offset -
++ (stride * vertical_offset) -
++ (horizontal_offset)) :
++ v_offset;
++ break;
++ case IPU_PIX_FMT_NV12:
++ uv_stride = stride;
++ u_offset = stride * (height - vertical_offset - 1) +
++ (stride - horizontal_offset) +
++ (uv_stride * vertical_offset / 2) +
++ horizontal_offset;
++ u_fix = u ? (u + (uv_stride * vertical_offset / 2) +
++ horizontal_offset -
++ (stride * vertical_offset) - (horizontal_offset)) :
++ u_offset;
++
++ break;
++ default:
++ dev_err(ipu->dev, "mxc ipu: unimplemented pixel format\n");
++ break;
++ }
++
++
++
++ if (u_fix > u_offset)
++ u_offset = u_fix;
++
++ if (v_fix > v_offset)
++ v_offset = v_fix;
++
++ /* UBO and VBO are 22-bit and 8-byte aligned */
++ if (u_offset/8 > 0x3fffff)
++ dev_warn(ipu->dev,
++ "IDMAC%d's U offset exceeds IPU limitation\n", ch);
++ if (v_offset/8 > 0x3fffff)
++ dev_warn(ipu->dev,
++ "IDMAC%d's V offset exceeds IPU limitation\n", ch);
++ if (u_offset%8)
++ dev_warn(ipu->dev,
++ "IDMAC%d's U offset is not 8-byte aligned\n", ch);
++ if (v_offset%8)
++ dev_warn(ipu->dev,
++ "IDMAC%d's V offset is not 8-byte aligned\n", ch);
++
++ old_offset = ipu_ch_param_read_field_io(ipu_ch_param_addr(ipu, ch), 0, 46, 22);
++ if (old_offset != u_offset / 8)
++ ipu_ch_param_mod_field_io(ipu_ch_param_addr(ipu, ch), 0, 46, 22, u_offset / 8);
++ old_offset = ipu_ch_param_read_field_io(ipu_ch_param_addr(ipu, ch), 0, 68, 22);
++ if (old_offset != v_offset / 8)
++ ipu_ch_param_mod_field_io(ipu_ch_param_addr(ipu, ch), 0, 68, 22, v_offset / 8);
++
++ sub_ch = __ipu_ch_get_third_buf_cpmem_num(ch);
++ if (sub_ch <= 0)
++ return;
++ old_offset = ipu_ch_param_read_field_io(ipu_ch_param_addr(ipu, sub_ch), 0, 46, 22);
++ if (old_offset != u_offset / 8)
++ ipu_ch_param_mod_field_io(ipu_ch_param_addr(ipu, sub_ch), 0, 46, 22, u_offset / 8);
++ old_offset = ipu_ch_param_read_field_io(ipu_ch_param_addr(ipu, sub_ch), 0, 68, 22);
++ if (old_offset != v_offset / 8)
++ ipu_ch_param_mod_field_io(ipu_ch_param_addr(ipu, sub_ch), 0, 68, 22, v_offset / 8);
++};
++
++static inline void _ipu_ch_params_set_alpha_width(struct ipu_soc *ipu, uint32_t ch, int alpha_width)
++{
++ int32_t sub_ch = 0;
++
++ ipu_ch_param_set_field_io(ipu_ch_param_addr(ipu, ch), 1, 125, 3, alpha_width - 1);
++
++ sub_ch = __ipu_ch_get_third_buf_cpmem_num(ch);
++ if (sub_ch <= 0)
++ return;
++ ipu_ch_param_set_field_io(ipu_ch_param_addr(ipu, sub_ch), 1, 125, 3, alpha_width - 1);
++};
++
++static inline void _ipu_ch_param_set_bandmode(struct ipu_soc *ipu,
++ uint32_t ch, uint32_t band_height)
++{
++ int32_t sub_ch = 0;
++
++ ipu_ch_param_set_field_io(ipu_ch_param_addr(ipu, ch),
++ 0, 114, 3, band_height - 1);
++ sub_ch = __ipu_ch_get_third_buf_cpmem_num(ch);
++ if (sub_ch <= 0)
++ return;
++ ipu_ch_param_set_field_io(ipu_ch_param_addr(ipu, sub_ch),
++ 0, 114, 3, band_height - 1);
++
++ dev_dbg(ipu->dev, "BNDM 0x%x, ",
++ ipu_ch_param_read_field_io(ipu_ch_param_addr(ipu, ch), 0, 114, 3));
++}
++
++/*
++ * The IPUv3 IDMAC has a bug to read 32bpp pixels from a graphics plane
++ * whose alpha component is at the most significant 8 bits. The bug only
++ * impacts on cases in which the relevant separate alpha channel is enabled.
++ *
++ * Return true on bad alpha component position, otherwise, return false.
++ */
++static inline bool _ipu_ch_param_bad_alpha_pos(uint32_t pixel_fmt)
++{
++ switch (pixel_fmt) {
++ case IPU_PIX_FMT_BGRA32:
++ case IPU_PIX_FMT_BGR32:
++ case IPU_PIX_FMT_RGBA32:
++ case IPU_PIX_FMT_RGB32:
++ return true;
++ }
++
++ return false;
++}
++#endif
+diff -Nur linux-3.14.36/drivers/mxc/ipu3/ipu_pixel_clk.c linux-openelec/drivers/mxc/ipu3/ipu_pixel_clk.c
+--- linux-3.14.36/drivers/mxc/ipu3/ipu_pixel_clk.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/ipu3/ipu_pixel_clk.c 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,317 @@
++/*
++ * Copyright (C) 2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/*!
++ * @file ipu_pixel_clk.c
++ *
++ * @brief IPU pixel clock implementation
++ *
++ * @ingroup IPU
++ */
++
++#include <linux/clk-provider.h>
++#include <linux/err.h>
++#include <linux/io.h>
++#include <linux/ipu-v3.h>
++#include <linux/module.h>
++#include <linux/slab.h>
++#include <linux/string.h>
++
++#include "ipu_prv.h"
++#include "ipu_regs.h"
++
++ /*
++ * muxd clock implementation
++ */
++struct clk_di_mux {
++ struct clk_hw hw;
++ u8 ipu_id;
++ u8 di_id;
++ u8 flags;
++ u8 index;
++};
++#define to_clk_di_mux(_hw) container_of(_hw, struct clk_di_mux, hw)
++
++static int _ipu_pixel_clk_set_parent(struct clk_hw *hw, u8 index)
++{
++ struct clk_di_mux *mux = to_clk_di_mux(hw);
++ struct ipu_soc *ipu = ipu_get_soc(mux->ipu_id);
++ u32 di_gen;
++
++ di_gen = ipu_di_read(ipu, mux->di_id, DI_GENERAL);
++ if (index == 0)
++ /* ipu1_clk or ipu2_clk internal clk */
++ di_gen &= ~DI_GEN_DI_CLK_EXT;
++ else
++ di_gen |= DI_GEN_DI_CLK_EXT;
++
++ ipu_di_write(ipu, mux->di_id, di_gen, DI_GENERAL);
++ mux->index = index;
++ pr_debug("ipu_pixel_clk: di_clk_ext:0x%x, di_gen reg:0x%x.\n",
++ !(di_gen & DI_GEN_DI_CLK_EXT), di_gen);
++ return 0;
++}
++
++static u8 _ipu_pixel_clk_get_parent(struct clk_hw *hw)
++{
++ struct clk_di_mux *mux = to_clk_di_mux(hw);
++
++ return mux->index;
++}
++
++const struct clk_ops clk_mux_di_ops = {
++ .get_parent = _ipu_pixel_clk_get_parent,
++ .set_parent = _ipu_pixel_clk_set_parent,
++};
++
++struct clk *clk_register_mux_pix_clk(struct device *dev, const char *name,
++ const char **parent_names, u8 num_parents, unsigned long flags,
++ u8 ipu_id, u8 di_id, u8 clk_mux_flags)
++{
++ struct clk_di_mux *mux;
++ struct clk *clk;
++ struct clk_init_data init;
++
++ mux = kzalloc(sizeof(struct clk_di_mux), GFP_KERNEL);
++ if (!mux)
++ return ERR_PTR(-ENOMEM);
++
++ init.name = name;
++ init.ops = &clk_mux_di_ops;
++ init.flags = flags;
++ init.parent_names = parent_names;
++ init.num_parents = num_parents;
++
++ mux->ipu_id = ipu_id;
++ mux->di_id = di_id;
++ mux->flags = clk_mux_flags | CLK_SET_RATE_PARENT;
++ mux->hw.init = &init;
++
++ clk = clk_register(dev, &mux->hw);
++ if (IS_ERR(clk))
++ kfree(mux);
++
++ return clk;
++}
++
++/*
++ * Gated clock implementation
++ */
++struct clk_di_div {
++ struct clk_hw hw;
++ u8 ipu_id;
++ u8 di_id;
++ u8 flags;
++};
++#define to_clk_di_div(_hw) container_of(_hw, struct clk_di_div, hw)
++
++static unsigned long _ipu_pixel_clk_div_recalc_rate(struct clk_hw *hw,
++ unsigned long parent_rate)
++{
++ struct clk_di_div *di_div = to_clk_di_div(hw);
++ struct ipu_soc *ipu = ipu_get_soc(di_div->ipu_id);
++ u32 div;
++ u64 final_rate = (unsigned long long)parent_rate * 16;
++
++ _ipu_get(ipu);
++ div = ipu_di_read(ipu, di_div->di_id, DI_BS_CLKGEN0);
++ _ipu_put(ipu);
++ pr_debug("ipu_di%d read BS_CLKGEN0 div:%d, final_rate:%lld, prate:%ld\n",
++ di_div->di_id, div, final_rate, parent_rate);
++
++ if (div == 0)
++ return 0;
++ do_div(final_rate, div);
++
++ return (unsigned long)final_rate;
++}
++
++static long _ipu_pixel_clk_div_round_rate(struct clk_hw *hw, unsigned long rate,
++ unsigned long *parent_clk_rate)
++{
++ u64 div, final_rate;
++ u32 remainder;
++ u64 parent_rate = (unsigned long long)(*parent_clk_rate) * 16;
++
++ /*
++ * Calculate divider
++ * Fractional part is 4 bits,
++ * so simply multiply by 2^4 to get fractional part.
++ */
++ div = parent_rate;
++ remainder = do_div(div, rate);
++ /* Round the divider value */
++ if (remainder > (rate/2))
++ div++;
++ if (div < 0x10) /* Min DI disp clock divider is 1 */
++ div = 0x10;
++ if (div & ~0xFEF)
++ div &= 0xFF8;
++ else {
++ /* Round up divider if it gets us closer to desired pix clk */
++ if ((div & 0xC) == 0xC) {
++ div += 0x10;
++ div &= ~0xF;
++ }
++ }
++ final_rate = parent_rate;
++ do_div(final_rate, div);
++
++ return final_rate;
++}
++
++static int _ipu_pixel_clk_div_set_rate(struct clk_hw *hw, unsigned long rate,
++ unsigned long parent_clk_rate)
++{
++ struct clk_di_div *di_div = to_clk_di_div(hw);
++ struct ipu_soc *ipu = ipu_get_soc(di_div->ipu_id);
++ u64 div, parent_rate;
++ u32 remainder;
++
++ parent_rate = (unsigned long long)parent_clk_rate * 16;
++ div = parent_rate;
++ remainder = do_div(div, rate);
++ /* Round the divider value */
++ if (remainder > (rate/2))
++ div++;
++
++ /* Round up divider if it gets us closer to desired pix clk */
++ if ((div & 0xC) == 0xC) {
++ div += 0x10;
++ div &= ~0xF;
++ }
++ if (div > 0x1000)
++ pr_err("Overflow, di:%d, DI_BS_CLKGEN0 div:0x%x\n",
++ di_div->di_id, (u32)div);
++ _ipu_get(ipu);
++ ipu_di_write(ipu, di_div->di_id, (u32)div, DI_BS_CLKGEN0);
++
++ /* Setup pixel clock timing */
++ /* FIXME: needs to be more flexible */
++ /* Down time is half of period */
++ ipu_di_write(ipu, di_div->di_id, ((u32)div / 16) << 16, DI_BS_CLKGEN1);
++ _ipu_put(ipu);
++
++ return 0;
++}
++
++static struct clk_ops clk_div_ops = {
++ .recalc_rate = _ipu_pixel_clk_div_recalc_rate,
++ .round_rate = _ipu_pixel_clk_div_round_rate,
++ .set_rate = _ipu_pixel_clk_div_set_rate,
++};
++
++struct clk *clk_register_div_pix_clk(struct device *dev, const char *name,
++ const char *parent_name, unsigned long flags,
++ u8 ipu_id, u8 di_id, u8 clk_div_flags)
++{
++ struct clk_di_div *di_div;
++ struct clk *clk;
++ struct clk_init_data init;
++
++ di_div = kzalloc(sizeof(struct clk_di_div), GFP_KERNEL);
++ if (!di_div)
++ return ERR_PTR(-ENOMEM);
++
++ /* struct clk_di_div assignments */
++ di_div->ipu_id = ipu_id;
++ di_div->di_id = di_id;
++ di_div->flags = clk_div_flags;
++
++ init.name = name;
++ init.ops = &clk_div_ops;
++ init.flags = flags | CLK_SET_RATE_PARENT;
++ init.parent_names = parent_name ? &parent_name : NULL;
++ init.num_parents = parent_name ? 1 : 0;
++
++ di_div->hw.init = &init;
++
++ clk = clk_register(dev, &di_div->hw);
++ if (IS_ERR(clk))
++ kfree(clk);
++
++ return clk;
++}
++
++/*
++ * Gated clock implementation
++ */
++struct clk_di_gate {
++ struct clk_hw hw;
++ u8 ipu_id;
++ u8 di_id;
++ u8 flags;
++};
++#define to_clk_di_gate(_hw) container_of(_hw, struct clk_di_gate, hw)
++
++static int _ipu_pixel_clk_enable(struct clk_hw *hw)
++{
++ struct clk_di_gate *gate = to_clk_di_gate(hw);
++ struct ipu_soc *ipu = ipu_get_soc(gate->ipu_id);
++ u32 disp_gen;
++
++ disp_gen = ipu_cm_read(ipu, IPU_DISP_GEN);
++ disp_gen |= gate->di_id ? DI1_COUNTER_RELEASE : DI0_COUNTER_RELEASE;
++ ipu_cm_write(ipu, disp_gen, IPU_DISP_GEN);
++
++ return 0;
++}
++
++static void _ipu_pixel_clk_disable(struct clk_hw *hw)
++{
++ struct clk_di_gate *gate = to_clk_di_gate(hw);
++ struct ipu_soc *ipu = ipu_get_soc(gate->ipu_id);
++ u32 disp_gen;
++
++ disp_gen = ipu_cm_read(ipu, IPU_DISP_GEN);
++ disp_gen &= gate->di_id ? ~DI1_COUNTER_RELEASE : ~DI0_COUNTER_RELEASE;
++ ipu_cm_write(ipu, disp_gen, IPU_DISP_GEN);
++
++}
++
++
++static struct clk_ops clk_gate_di_ops = {
++ .enable = _ipu_pixel_clk_enable,
++ .disable = _ipu_pixel_clk_disable,
++};
++
++struct clk *clk_register_gate_pix_clk(struct device *dev, const char *name,
++ const char *parent_name, unsigned long flags,
++ u8 ipu_id, u8 di_id, u8 clk_gate_flags)
++{
++ struct clk_di_gate *gate;
++ struct clk *clk;
++ struct clk_init_data init;
++
++ gate = kzalloc(sizeof(struct clk_di_gate), GFP_KERNEL);
++ if (!gate)
++ return ERR_PTR(-ENOMEM);
++
++ gate->ipu_id = ipu_id;
++ gate->di_id = di_id;
++ gate->flags = clk_gate_flags;
++
++ init.name = name;
++ init.ops = &clk_gate_di_ops;
++ init.flags = flags | CLK_SET_RATE_PARENT;
++ init.parent_names = parent_name ? &parent_name : NULL;
++ init.num_parents = parent_name ? 1 : 0;
++
++ gate->hw.init = &init;
++
++ clk = clk_register(dev, &gate->hw);
++ if (IS_ERR(clk))
++ kfree(clk);
++
++ return clk;
++}
+diff -Nur linux-3.14.36/drivers/mxc/ipu3/ipu_prv.h linux-openelec/drivers/mxc/ipu3/ipu_prv.h
+--- linux-3.14.36/drivers/mxc/ipu3/ipu_prv.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/ipu3/ipu_prv.h 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,356 @@
++/*
++ * Copyright 2005-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++#ifndef __INCLUDE_IPU_PRV_H__
++#define __INCLUDE_IPU_PRV_H__
++
++#include <linux/clkdev.h>
++#include <linux/device.h>
++#include <linux/fsl_devices.h>
++#include <linux/interrupt.h>
++#include <linux/types.h>
++
++#define MXC_IPU_MAX_NUM 2
++#define MXC_DI_NUM_PER_IPU 2
++
++/* Globals */
++extern int dmfc_type_setup;
++
++#define IDMA_CHAN_INVALID 0xFF
++#define HIGH_RESOLUTION_WIDTH 1024
++
++struct ipu_irq_node {
++ irqreturn_t(*handler) (int, void *); /*!< the ISR */
++ const char *name; /*!< device associated with the interrupt */
++ void *dev_id; /*!< some unique information for the ISR */
++ __u32 flags; /*!< not used */
++};
++
++enum csc_type_t {
++ RGB2YUV = 0,
++ YUV2RGB,
++ RGB2RGB,
++ YUV2YUV,
++ CSC_NONE,
++ CSC_NUM
++};
++
++enum imx_ipu_type {
++ IMX6Q_IPU,
++};
++
++struct ipu_pltfm_data {
++ u32 id;
++ u32 devtype;
++ int (*init) (int);
++ void (*pg) (int);
++
++ /*
++ * Bypass reset to avoid display channel being
++ * stopped by probe since it may starts to work
++ * in bootloader.
++ */
++ bool bypass_reset;
++};
++
++struct ipu_soc {
++ bool online;
++ struct ipu_pltfm_data *pdata;
++
++ /*clk*/
++ struct clk *ipu_clk;
++ struct clk *di_clk[2];
++ struct clk *di_clk_sel[2];
++ struct clk *pixel_clk[2];
++ struct clk *pixel_clk_sel[2];
++ struct clk *csi_clk[2];
++
++ /*irq*/
++ int irq_sync;
++ int irq_err;
++ struct ipu_irq_node irq_list[IPU_IRQ_COUNT];
++
++ /*reg*/
++ void __iomem *cm_reg;
++ void __iomem *idmac_reg;
++ void __iomem *dp_reg;
++ void __iomem *ic_reg;
++ void __iomem *dc_reg;
++ void __iomem *dc_tmpl_reg;
++ void __iomem *dmfc_reg;
++ void __iomem *di_reg[2];
++ void __iomem *smfc_reg;
++ void __iomem *csi_reg[2];
++ void __iomem *cpmem_base;
++ void __iomem *tpmem_base;
++ void __iomem *disp_base[2];
++ void __iomem *vdi_reg;
++
++ struct device *dev;
++
++ ipu_channel_t csi_channel[2];
++ ipu_channel_t using_ic_dirct_ch;
++ unsigned char dc_di_assignment[10];
++ bool sec_chan_en[24];
++ bool thrd_chan_en[24];
++ bool chan_is_interlaced[52];
++ uint32_t channel_init_mask;
++ uint32_t channel_enable_mask;
++
++ /*use count*/
++ int dc_use_count;
++ int dp_use_count;
++ int dmfc_use_count;
++ int smfc_use_count;
++ int ic_use_count;
++ int rot_use_count;
++ int vdi_use_count;
++ int di_use_count[2];
++ int csi_use_count[2];
++
++ struct mutex mutex_lock;
++ spinlock_t int_reg_spin_lock;
++ spinlock_t rdy_reg_spin_lock;
++
++ int dmfc_size_28;
++ int dmfc_size_29;
++ int dmfc_size_24;
++ int dmfc_size_27;
++ int dmfc_size_23;
++
++ enum csc_type_t fg_csc_type;
++ enum csc_type_t bg_csc_type;
++ bool color_key_4rgb;
++ bool dc_swap;
++ struct completion dc_comp;
++ struct completion csi_comp;
++
++ struct rot_mem {
++ void *vaddr;
++ dma_addr_t paddr;
++ int size;
++ } rot_dma[2];
++
++ int vdoa_en;
++ struct task_struct *thread[2];
++
++};
++
++struct ipu_channel {
++ u8 video_in_dma;
++ u8 alpha_in_dma;
++ u8 graph_in_dma;
++ u8 out_dma;
++};
++
++enum ipu_dmfc_type {
++ DMFC_NORMAL = 0,
++ DMFC_HIGH_RESOLUTION_DC,
++ DMFC_HIGH_RESOLUTION_DP,
++ DMFC_HIGH_RESOLUTION_ONLY_DP,
++};
++
++static inline u32 ipu_cm_read(struct ipu_soc *ipu, unsigned offset)
++{
++ return readl(ipu->cm_reg + offset);
++}
++
++static inline void ipu_cm_write(struct ipu_soc *ipu,
++ u32 value, unsigned offset)
++{
++ writel(value, ipu->cm_reg + offset);
++}
++
++static inline u32 ipu_idmac_read(struct ipu_soc *ipu, unsigned offset)
++{
++ return readl(ipu->idmac_reg + offset);
++}
++
++static inline void ipu_idmac_write(struct ipu_soc *ipu,
++ u32 value, unsigned offset)
++{
++ writel(value, ipu->idmac_reg + offset);
++}
++
++static inline u32 ipu_dc_read(struct ipu_soc *ipu, unsigned offset)
++{
++ return readl(ipu->dc_reg + offset);
++}
++
++static inline void ipu_dc_write(struct ipu_soc *ipu,
++ u32 value, unsigned offset)
++{
++ writel(value, ipu->dc_reg + offset);
++}
++
++static inline u32 ipu_dc_tmpl_read(struct ipu_soc *ipu, unsigned offset)
++{
++ return readl(ipu->dc_tmpl_reg + offset);
++}
++
++static inline void ipu_dc_tmpl_write(struct ipu_soc *ipu,
++ u32 value, unsigned offset)
++{
++ writel(value, ipu->dc_tmpl_reg + offset);
++}
++
++static inline u32 ipu_dmfc_read(struct ipu_soc *ipu, unsigned offset)
++{
++ return readl(ipu->dmfc_reg + offset);
++}
++
++static inline void ipu_dmfc_write(struct ipu_soc *ipu,
++ u32 value, unsigned offset)
++{
++ writel(value, ipu->dmfc_reg + offset);
++}
++
++static inline u32 ipu_dp_read(struct ipu_soc *ipu, unsigned offset)
++{
++ return readl(ipu->dp_reg + offset);
++}
++
++static inline void ipu_dp_write(struct ipu_soc *ipu,
++ u32 value, unsigned offset)
++{
++ writel(value, ipu->dp_reg + offset);
++}
++
++static inline u32 ipu_di_read(struct ipu_soc *ipu, int di, unsigned offset)
++{
++ return readl(ipu->di_reg[di] + offset);
++}
++
++static inline void ipu_di_write(struct ipu_soc *ipu, int di,
++ u32 value, unsigned offset)
++{
++ writel(value, ipu->di_reg[di] + offset);
++}
++
++static inline u32 ipu_csi_read(struct ipu_soc *ipu, int csi, unsigned offset)
++{
++ return readl(ipu->csi_reg[csi] + offset);
++}
++
++static inline void ipu_csi_write(struct ipu_soc *ipu, int csi,
++ u32 value, unsigned offset)
++{
++ writel(value, ipu->csi_reg[csi] + offset);
++}
++
++static inline u32 ipu_smfc_read(struct ipu_soc *ipu, unsigned offset)
++{
++ return readl(ipu->smfc_reg + offset);
++}
++
++static inline void ipu_smfc_write(struct ipu_soc *ipu,
++ u32 value, unsigned offset)
++{
++ writel(value, ipu->smfc_reg + offset);
++}
++
++static inline u32 ipu_vdi_read(struct ipu_soc *ipu, unsigned offset)
++{
++ return readl(ipu->vdi_reg + offset);
++}
++
++static inline void ipu_vdi_write(struct ipu_soc *ipu,
++ u32 value, unsigned offset)
++{
++ writel(value, ipu->vdi_reg + offset);
++}
++
++static inline u32 ipu_ic_read(struct ipu_soc *ipu, unsigned offset)
++{
++ return readl(ipu->ic_reg + offset);
++}
++
++static inline void ipu_ic_write(struct ipu_soc *ipu,
++ u32 value, unsigned offset)
++{
++ writel(value, ipu->ic_reg + offset);
++}
++
++int register_ipu_device(struct ipu_soc *ipu, int id);
++void unregister_ipu_device(struct ipu_soc *ipu, int id);
++ipu_color_space_t format_to_colorspace(uint32_t fmt);
++bool ipu_pixel_format_has_alpha(uint32_t fmt);
++
++void ipu_dump_registers(struct ipu_soc *ipu);
++
++uint32_t _ipu_channel_status(struct ipu_soc *ipu, ipu_channel_t channel);
++
++void ipu_disp_init(struct ipu_soc *ipu);
++void _ipu_init_dc_mappings(struct ipu_soc *ipu);
++int _ipu_dp_init(struct ipu_soc *ipu, ipu_channel_t channel, uint32_t in_pixel_fmt,
++ uint32_t out_pixel_fmt);
++void _ipu_dp_uninit(struct ipu_soc *ipu, ipu_channel_t channel);
++void _ipu_dc_init(struct ipu_soc *ipu, int dc_chan, int di, bool interlaced, uint32_t pixel_fmt);
++void _ipu_dc_uninit(struct ipu_soc *ipu, int dc_chan);
++void _ipu_dp_dc_enable(struct ipu_soc *ipu, ipu_channel_t channel);
++void _ipu_dp_dc_disable(struct ipu_soc *ipu, ipu_channel_t channel, bool swap);
++void _ipu_dmfc_init(struct ipu_soc *ipu, int dmfc_type, int first);
++void _ipu_dmfc_set_wait4eot(struct ipu_soc *ipu, int dma_chan, int width);
++void _ipu_dmfc_set_burst_size(struct ipu_soc *ipu, int dma_chan, int burst_size);
++int _ipu_disp_chan_is_interlaced(struct ipu_soc *ipu, ipu_channel_t channel);
++
++void _ipu_ic_enable_task(struct ipu_soc *ipu, ipu_channel_t channel);
++void _ipu_ic_disable_task(struct ipu_soc *ipu, ipu_channel_t channel);
++int _ipu_ic_init_prpvf(struct ipu_soc *ipu, ipu_channel_params_t *params,
++ bool src_is_csi);
++void _ipu_vdi_init(struct ipu_soc *ipu, ipu_channel_t channel, ipu_channel_params_t *params);
++void _ipu_vdi_uninit(struct ipu_soc *ipu);
++void _ipu_ic_uninit_prpvf(struct ipu_soc *ipu);
++void _ipu_ic_init_rotate_vf(struct ipu_soc *ipu, ipu_channel_params_t *params);
++void _ipu_ic_uninit_rotate_vf(struct ipu_soc *ipu);
++void _ipu_ic_init_csi(struct ipu_soc *ipu, ipu_channel_params_t *params);
++void _ipu_ic_uninit_csi(struct ipu_soc *ipu);
++int _ipu_ic_init_prpenc(struct ipu_soc *ipu, ipu_channel_params_t *params,
++ bool src_is_csi);
++void _ipu_ic_uninit_prpenc(struct ipu_soc *ipu);
++void _ipu_ic_init_rotate_enc(struct ipu_soc *ipu, ipu_channel_params_t *params);
++void _ipu_ic_uninit_rotate_enc(struct ipu_soc *ipu);
++int _ipu_ic_init_pp(struct ipu_soc *ipu, ipu_channel_params_t *params);
++void _ipu_ic_uninit_pp(struct ipu_soc *ipu);
++void _ipu_ic_init_rotate_pp(struct ipu_soc *ipu, ipu_channel_params_t *params);
++void _ipu_ic_uninit_rotate_pp(struct ipu_soc *ipu);
++int _ipu_ic_idma_init(struct ipu_soc *ipu, int dma_chan, uint16_t width, uint16_t height,
++ int burst_size, ipu_rotate_mode_t rot);
++void _ipu_vdi_toggle_top_field_man(struct ipu_soc *ipu);
++int _ipu_csi_init(struct ipu_soc *ipu, ipu_channel_t channel, uint32_t csi);
++int _ipu_csi_set_mipi_di(struct ipu_soc *ipu, uint32_t num, uint32_t di_val, uint32_t csi);
++void ipu_csi_set_test_generator(struct ipu_soc *ipu, bool active, uint32_t r_value,
++ uint32_t g_value, uint32_t b_value,
++ uint32_t pix_clk, uint32_t csi);
++void _ipu_csi_ccir_err_detection_enable(struct ipu_soc *ipu, uint32_t csi);
++void _ipu_csi_ccir_err_detection_disable(struct ipu_soc *ipu, uint32_t csi);
++void _ipu_csi_wait4eof(struct ipu_soc *ipu, ipu_channel_t channel);
++void _ipu_smfc_init(struct ipu_soc *ipu, ipu_channel_t channel, uint32_t mipi_id, uint32_t csi);
++void _ipu_smfc_set_burst_size(struct ipu_soc *ipu, ipu_channel_t channel, uint32_t bs);
++void _ipu_dp_set_csc_coefficients(struct ipu_soc *ipu, ipu_channel_t channel, int32_t param[][3]);
++int32_t _ipu_disp_set_window_pos(struct ipu_soc *ipu, ipu_channel_t channel,
++ int16_t x_pos, int16_t y_pos);
++int32_t _ipu_disp_get_window_pos(struct ipu_soc *ipu, ipu_channel_t channel,
++ int16_t *x_pos, int16_t *y_pos);
++void _ipu_get(struct ipu_soc *ipu);
++void _ipu_put(struct ipu_soc *ipu);
++
++struct clk *clk_register_mux_pix_clk(struct device *dev, const char *name,
++ const char **parent_names, u8 num_parents, unsigned long flags,
++ u8 ipu_id, u8 di_id, u8 clk_mux_flags);
++struct clk *clk_register_div_pix_clk(struct device *dev, const char *name,
++ const char *parent_name, unsigned long flags,
++ u8 ipu_id, u8 di_id, u8 clk_div_flags);
++struct clk *clk_register_gate_pix_clk(struct device *dev, const char *name,
++ const char *parent_name, unsigned long flags,
++ u8 ipu_id, u8 di_id, u8 clk_gate_flags);
++#endif /* __INCLUDE_IPU_PRV_H__ */
+diff -Nur linux-3.14.36/drivers/mxc/ipu3/ipu_regs.h linux-openelec/drivers/mxc/ipu3/ipu_regs.h
+--- linux-3.14.36/drivers/mxc/ipu3/ipu_regs.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/ipu3/ipu_regs.h 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,743 @@
++/*
++ * Copyright (C) 2005-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/*
++ * @file ipu_regs.h
++ *
++ * @brief IPU Register definitions
++ *
++ * @ingroup IPU
++ */
++#ifndef __IPU_REGS_INCLUDED__
++#define __IPU_REGS_INCLUDED__
++
++enum imx_ipu_rev {
++ IPU_V3DEX = 2,
++ IPU_V3M,
++ IPU_V3H,
++};
++
++/*
++ * hw_rev 2: IPUV3DEX
++ * hw_rev 3: IPUV3M
++ * hw_rev 4: IPUV3H
++ */
++extern int g_ipu_hw_rev;
++
++#define IPU_MAX_VDI_IN_WIDTH ({g_ipu_hw_rev >= 3 ? \
++ (968) : \
++ (720); })
++#define IPU_DISP0_BASE 0x00000000
++#define IPU_MCU_T_DEFAULT 8
++#define IPU_DISP1_BASE ({g_ipu_hw_rev < 4 ? \
++ (IPU_MCU_T_DEFAULT << 25) : \
++ (0x00000000); })
++#define IPUV3DEX_REG_BASE 0x1E000000
++#define IPUV3M_REG_BASE 0x06000000
++#define IPUV3H_REG_BASE 0x00200000
++
++#define IPU_CM_REG_BASE 0x00000000
++#define IPU_IDMAC_REG_BASE 0x00008000
++#define IPU_ISP_REG_BASE 0x00010000
++#define IPU_DP_REG_BASE 0x00018000
++#define IPU_IC_REG_BASE 0x00020000
++#define IPU_IRT_REG_BASE 0x00028000
++#define IPU_CSI0_REG_BASE 0x00030000
++#define IPU_CSI1_REG_BASE 0x00038000
++#define IPU_DI0_REG_BASE 0x00040000
++#define IPU_DI1_REG_BASE 0x00048000
++#define IPU_SMFC_REG_BASE 0x00050000
++#define IPU_DC_REG_BASE 0x00058000
++#define IPU_DMFC_REG_BASE 0x00060000
++#define IPU_VDI_REG_BASE 0x00068000
++#define IPU_CPMEM_REG_BASE ({g_ipu_hw_rev >= 4 ? \
++ (0x00100000) : \
++ (0x01000000); })
++#define IPU_LUT_REG_BASE 0x01020000
++#define IPU_SRM_REG_BASE ({g_ipu_hw_rev >= 4 ? \
++ (0x00140000) : \
++ (0x01040000); })
++#define IPU_TPM_REG_BASE ({g_ipu_hw_rev >= 4 ? \
++ (0x00160000) : \
++ (0x01060000); })
++#define IPU_DC_TMPL_REG_BASE ({g_ipu_hw_rev >= 4 ? \
++ (0x00180000) : \
++ (0x01080000); })
++#define IPU_ISP_TBPR_REG_BASE 0x010C0000
++
++/* Register addresses */
++/* IPU Common registers */
++#define IPU_CM_REG(offset) (offset)
++
++#define IPU_CONF IPU_CM_REG(0)
++#define IPU_SRM_PRI1 IPU_CM_REG(0x00A0)
++#define IPU_SRM_PRI2 IPU_CM_REG(0x00A4)
++#define IPU_FS_PROC_FLOW1 IPU_CM_REG(0x00A8)
++#define IPU_FS_PROC_FLOW2 IPU_CM_REG(0x00AC)
++#define IPU_FS_PROC_FLOW3 IPU_CM_REG(0x00B0)
++#define IPU_FS_DISP_FLOW1 IPU_CM_REG(0x00B4)
++#define IPU_FS_DISP_FLOW2 IPU_CM_REG(0x00B8)
++#define IPU_SKIP IPU_CM_REG(0x00BC)
++#define IPU_DISP_ALT_CONF IPU_CM_REG(0x00C0)
++#define IPU_DISP_GEN IPU_CM_REG(0x00C4)
++#define IPU_DISP_ALT1 IPU_CM_REG(0x00C8)
++#define IPU_DISP_ALT2 IPU_CM_REG(0x00CC)
++#define IPU_DISP_ALT3 IPU_CM_REG(0x00D0)
++#define IPU_DISP_ALT4 IPU_CM_REG(0x00D4)
++#define IPU_SNOOP IPU_CM_REG(0x00D8)
++#define IPU_MEM_RST IPU_CM_REG(0x00DC)
++#define IPU_PM IPU_CM_REG(0x00E0)
++#define IPU_GPR IPU_CM_REG(0x00E4)
++#define IPU_CHA_DB_MODE_SEL(ch) IPU_CM_REG(0x0150 + 4 * ((ch) / 32))
++#define IPU_ALT_CHA_DB_MODE_SEL(ch) IPU_CM_REG(0x0168 + 4 * ((ch) / 32))
++/*
++ * IPUv3D doesn't support triple buffer, so point
++ * IPU_CHA_TRB_MODE_SEL, IPU_CHA_TRIPLE_CUR_BUF and
++ * IPU_CHA_BUF2_RDY to readonly
++ * IPU_ALT_CUR_BUF0 for IPUv3D.
++ */
++#define IPU_CHA_TRB_MODE_SEL(ch) IPU_CM_REG({g_ipu_hw_rev >= 2 ? \
++ (0x0178 + 4 * ((ch) / 32)) : \
++ (0x012C); })
++#define IPU_CHA_TRIPLE_CUR_BUF(ch) IPU_CM_REG({g_ipu_hw_rev >= 2 ? \
++ (0x0258 + \
++ 4 * (((ch) * 2) / 32)) : \
++ (0x012C); })
++#define IPU_CHA_BUF2_RDY(ch) IPU_CM_REG({g_ipu_hw_rev >= 2 ? \
++ (0x0288 + 4 * ((ch) / 32)) : \
++ (0x012C); })
++#define IPU_CHA_CUR_BUF(ch) IPU_CM_REG({g_ipu_hw_rev >= 2 ? \
++ (0x023C + 4 * ((ch) / 32)) : \
++ (0x0124 + 4 * ((ch) / 32)); })
++#define IPU_ALT_CUR_BUF0 IPU_CM_REG({g_ipu_hw_rev >= 2 ? \
++ (0x0244) : \
++ (0x012C); })
++#define IPU_ALT_CUR_BUF1 IPU_CM_REG({g_ipu_hw_rev >= 2 ? \
++ (0x0248) : \
++ (0x0130); })
++#define IPU_SRM_STAT IPU_CM_REG({g_ipu_hw_rev >= 2 ? \
++ (0x024C) : \
++ (0x0134); })
++#define IPU_PROC_TASK_STAT IPU_CM_REG({g_ipu_hw_rev >= 2 ? \
++ (0x0250) : \
++ (0x0138); })
++#define IPU_DISP_TASK_STAT IPU_CM_REG({g_ipu_hw_rev >= 2 ? \
++ (0x0254) : \
++ (0x013C); })
++#define IPU_CHA_BUF0_RDY(ch) IPU_CM_REG({g_ipu_hw_rev >= 2 ? \
++ (0x0268 + 4 * ((ch) / 32)) : \
++ (0x0140 + 4 * ((ch) / 32)); })
++#define IPU_CHA_BUF1_RDY(ch) IPU_CM_REG({g_ipu_hw_rev >= 2 ? \
++ (0x0270 + 4 * ((ch) / 32)) : \
++ (0x0148 + 4 * ((ch) / 32)); })
++#define IPU_ALT_CHA_BUF0_RDY(ch) IPU_CM_REG({g_ipu_hw_rev >= 2 ? \
++ (0x0278 + 4 * ((ch) / 32)) : \
++ (0x0158 + 4 * ((ch) / 32)); })
++#define IPU_ALT_CHA_BUF1_RDY(ch) IPU_CM_REG({g_ipu_hw_rev >= 2 ? \
++ (0x0280 + 4 * ((ch) / 32)) : \
++ (0x0160 + 4 * ((ch) / 32)); })
++
++#define IPU_INT_CTRL(n) IPU_CM_REG(0x003C + 4 * ((n) - 1))
++#define IPU_INT_STAT(n) IPU_CM_REG({g_ipu_hw_rev >= 2 ? \
++ (0x0200 + 4 * ((n) - 1)) : \
++ (0x00E8 + 4 * ((n) - 1)); })
++
++#define IPUIRQ_2_STATREG(irq) IPU_CM_REG(IPU_INT_STAT(1) + 4 * ((irq) / 32))
++#define IPUIRQ_2_CTRLREG(irq) IPU_CM_REG(IPU_INT_CTRL(1) + 4 * ((irq) / 32))
++#define IPUIRQ_2_MASK(irq) (1UL << ((irq) & 0x1F))
++
++/* IPU VDI registers */
++#define IPU_VDI_REG(offset) (offset)
++
++#define VDI_FSIZE IPU_VDI_REG(0)
++#define VDI_C IPU_VDI_REG(0x0004)
++
++/* IPU CSI Registers */
++#define IPU_CSI_REG(offset) (offset)
++
++#define CSI_SENS_CONF IPU_CSI_REG(0)
++#define CSI_SENS_FRM_SIZE IPU_CSI_REG(0x0004)
++#define CSI_ACT_FRM_SIZE IPU_CSI_REG(0x0008)
++#define CSI_OUT_FRM_CTRL IPU_CSI_REG(0x000C)
++#define CSI_TST_CTRL IPU_CSI_REG(0x0010)
++#define CSI_CCIR_CODE_1 IPU_CSI_REG(0x0014)
++#define CSI_CCIR_CODE_2 IPU_CSI_REG(0x0018)
++#define CSI_CCIR_CODE_3 IPU_CSI_REG(0x001C)
++#define CSI_MIPI_DI IPU_CSI_REG(0x0020)
++#define CSI_SKIP IPU_CSI_REG(0x0024)
++#define CSI_CPD_CTRL IPU_CSI_REG(0x0028)
++#define CSI_CPD_RC(n) IPU_CSI_REG(0x002C + 4 * (n))
++#define CSI_CPD_RS(n) IPU_CSI_REG(0x004C + 4 * (n))
++#define CSI_CPD_GRC(n) IPU_CSI_REG(0x005C + 4 * (n))
++#define CSI_CPD_GRS(n) IPU_CSI_REG(0x007C + 4 * (n))
++#define CSI_CPD_GBC(n) IPU_CSI_REG(0x008C + 4 * (n))
++#define CSI_CPD_GBS(n) IPU_CSI_REG(0x00AC + 4 * (n))
++#define CSI_CPD_BC(n) IPU_CSI_REG(0x00BC + 4 * (n))
++#define CSI_CPD_BS(n) IPU_CSI_REG(0x00DC + 4 * (n))
++#define CSI_CPD_OFFSET1 IPU_CSI_REG(0x00EC)
++#define CSI_CPD_OFFSET2 IPU_CSI_REG(0x00F0)
++
++/* IPU SMFC Registers */
++#define IPU_SMFC_REG(offset) (offset)
++
++#define SMFC_MAP IPU_SMFC_REG(0)
++#define SMFC_WMC IPU_SMFC_REG(0x0004)
++#define SMFC_BS IPU_SMFC_REG(0x0008)
++
++/* IPU IC Registers */
++#define IPU_IC_REG(offset) (offset)
++
++#define IC_CONF IPU_IC_REG(0)
++#define IC_PRP_ENC_RSC IPU_IC_REG(0x0004)
++#define IC_PRP_VF_RSC IPU_IC_REG(0x0008)
++#define IC_PP_RSC IPU_IC_REG(0x000C)
++#define IC_CMBP_1 IPU_IC_REG(0x0010)
++#define IC_CMBP_2 IPU_IC_REG(0x0014)
++#define IC_IDMAC_1 IPU_IC_REG(0x0018)
++#define IC_IDMAC_2 IPU_IC_REG(0x001C)
++#define IC_IDMAC_3 IPU_IC_REG(0x0020)
++#define IC_IDMAC_4 IPU_IC_REG(0x0024)
++
++/* IPU IDMAC Registers */
++#define IPU_IDMAC_REG(offset) (offset)
++
++#define IDMAC_CONF IPU_IDMAC_REG(0x0000)
++#define IDMAC_CHA_EN(ch) IPU_IDMAC_REG(0x0004 + 4 * ((ch) / 32))
++#define IDMAC_SEP_ALPHA IPU_IDMAC_REG(0x000C)
++#define IDMAC_ALT_SEP_ALPHA IPU_IDMAC_REG(0x0010)
++#define IDMAC_CHA_PRI(ch) IPU_IDMAC_REG(0x0014 + 4 * ((ch) / 32))
++#define IDMAC_WM_EN(ch) IPU_IDMAC_REG(0x001C + 4 * ((ch) / 32))
++#define IDMAC_CH_LOCK_EN_1 IPU_IDMAC_REG({g_ipu_hw_rev >= 2 ? \
++ (0x0024) : 0; })
++#define IDMAC_CH_LOCK_EN_2 IPU_IDMAC_REG({g_ipu_hw_rev >= 2 ? \
++ (0x0028) : \
++ (0x0024); })
++#define IDMAC_SUB_ADDR_0 IPU_IDMAC_REG({g_ipu_hw_rev >= 2 ? \
++ (0x002C) : \
++ (0x0028); })
++#define IDMAC_SUB_ADDR_1 IPU_IDMAC_REG({g_ipu_hw_rev >= 2 ? \
++ (0x0030) : \
++ (0x002C); })
++#define IDMAC_SUB_ADDR_2 IPU_IDMAC_REG({g_ipu_hw_rev >= 2 ? \
++ (0x0034) : \
++ (0x0030); })
++/*
++ * IPUv3D doesn't support IDMAC_SUB_ADDR_3 and IDMAC_SUB_ADDR_4,
++ * so point them to readonly IDMAC_CHA_BUSY1 for IPUv3D.
++ */
++#define IDMAC_SUB_ADDR_3 IPU_IDMAC_REG({g_ipu_hw_rev >= 2 ? \
++ (0x0038) : \
++ (0x0040); })
++#define IDMAC_SUB_ADDR_4 IPU_IDMAC_REG({g_ipu_hw_rev >= 2 ? \
++ (0x003C) : \
++ (0x0040); })
++#define IDMAC_BAND_EN(ch) IPU_IDMAC_REG({g_ipu_hw_rev >= 2 ? \
++ (0x0040 + 4 * ((ch) / 32)) : \
++ (0x0034 + 4 * ((ch) / 32)); })
++#define IDMAC_CHA_BUSY(ch) IPU_IDMAC_REG({g_ipu_hw_rev >= 2 ? \
++ (0x0100 + 4 * ((ch) / 32)) : \
++ (0x0040 + 4 * ((ch) / 32)); })
++
++/* IPU DI Registers */
++#define IPU_DI_REG(offset) (offset)
++
++#define DI_GENERAL IPU_DI_REG(0)
++#define DI_BS_CLKGEN0 IPU_DI_REG(0x0004)
++#define DI_BS_CLKGEN1 IPU_DI_REG(0x0008)
++#define DI_SW_GEN0(gen) IPU_DI_REG(0x000C + 4 * ((gen) - 1))
++#define DI_SW_GEN1(gen) IPU_DI_REG(0x0030 + 4 * ((gen) - 1))
++#define DI_STP_REP(gen) IPU_DI_REG(0x0148 + 4 * (((gen) - 1) / 2))
++#define DI_SYNC_AS_GEN IPU_DI_REG(0x0054)
++#define DI_DW_GEN(gen) IPU_DI_REG(0x0058 + 4 * (gen))
++#define DI_DW_SET(gen, set) IPU_DI_REG(0x0088 + 4 * ((gen) + 0xC * (set)))
++#define DI_SER_CONF IPU_DI_REG(0x015C)
++#define DI_SSC IPU_DI_REG(0x0160)
++#define DI_POL IPU_DI_REG(0x0164)
++#define DI_AW0 IPU_DI_REG(0x0168)
++#define DI_AW1 IPU_DI_REG(0x016C)
++#define DI_SCR_CONF IPU_DI_REG(0x0170)
++#define DI_STAT IPU_DI_REG(0x0174)
++
++/* IPU DMFC Registers */
++#define IPU_DMFC_REG(offset) (offset)
++
++#define DMFC_RD_CHAN IPU_DMFC_REG(0)
++#define DMFC_WR_CHAN IPU_DMFC_REG(0x0004)
++#define DMFC_WR_CHAN_DEF IPU_DMFC_REG(0x0008)
++#define DMFC_DP_CHAN IPU_DMFC_REG(0x000C)
++#define DMFC_DP_CHAN_DEF IPU_DMFC_REG(0x0010)
++#define DMFC_GENERAL1 IPU_DMFC_REG(0x0014)
++#define DMFC_GENERAL2 IPU_DMFC_REG(0x0018)
++#define DMFC_IC_CTRL IPU_DMFC_REG(0x001C)
++#define DMFC_STAT IPU_DMFC_REG(0x0020)
++
++/* IPU DC Registers */
++#define IPU_DC_REG(offset) (offset)
++
++#define DC_MAP_CONF_PTR(n) IPU_DC_REG(0x0108 + ((n) & ~0x1) * 2)
++#define DC_MAP_CONF_VAL(n) IPU_DC_REG(0x0144 + ((n) & ~0x1) * 2)
++
++#define _RL_CH_2_OFFSET(ch) (((ch) == 0) ? 8 : ( \
++ ((ch) == 1) ? 0x24 : ( \
++ ((ch) == 2) ? 0x40 : ( \
++ ((ch) == 5) ? 0x64 : ( \
++ ((ch) == 6) ? 0x80 : ( \
++ ((ch) == 8) ? 0x9C : ( \
++ ((ch) == 9) ? 0xBC : (-1))))))))
++#define DC_RL_CH(ch, evt) IPU_DC_REG(_RL_CH_2_OFFSET(ch) + \
++ ((evt) & ~0x1) * 2)
++
++#define DC_EVT_NF 0
++#define DC_EVT_NL 1
++#define DC_EVT_EOF 2
++#define DC_EVT_NFIELD 3
++#define DC_EVT_EOL 4
++#define DC_EVT_EOFIELD 5
++#define DC_EVT_NEW_ADDR 6
++#define DC_EVT_NEW_CHAN 7
++#define DC_EVT_NEW_DATA 8
++
++#define DC_EVT_NEW_ADDR_W_0 0
++#define DC_EVT_NEW_ADDR_W_1 1
++#define DC_EVT_NEW_CHAN_W_0 2
++#define DC_EVT_NEW_CHAN_W_1 3
++#define DC_EVT_NEW_DATA_W_0 4
++#define DC_EVT_NEW_DATA_W_1 5
++#define DC_EVT_NEW_ADDR_R_0 6
++#define DC_EVT_NEW_ADDR_R_1 7
++#define DC_EVT_NEW_CHAN_R_0 8
++#define DC_EVT_NEW_CHAN_R_1 9
++#define DC_EVT_NEW_DATA_R_0 10
++#define DC_EVT_NEW_DATA_R_1 11
++#define DC_EVEN_UGDE0 12
++#define DC_ODD_UGDE0 13
++#define DC_EVEN_UGDE1 14
++#define DC_ODD_UGDE1 15
++#define DC_EVEN_UGDE2 16
++#define DC_ODD_UGDE2 17
++#define DC_EVEN_UGDE3 18
++#define DC_ODD_UGDE3 19
++
++#define dc_ch_offset(ch) \
++({ \
++ const u8 _offset[] = { \
++ 0, 0x1C, 0x38, 0x54, 0x58, 0x5C, 0x78, 0, 0x94, 0xB4}; \
++ _offset[ch]; \
++})
++#define DC_WR_CH_CONF(ch) IPU_DC_REG(dc_ch_offset(ch))
++#define DC_WR_CH_ADDR(ch) IPU_DC_REG(dc_ch_offset(ch) + 4)
++
++#define DC_WR_CH_CONF_1 IPU_DC_REG(0x001C)
++#define DC_WR_CH_ADDR_1 IPU_DC_REG(0x0020)
++#define DC_WR_CH_CONF_5 IPU_DC_REG(0x005C)
++#define DC_WR_CH_ADDR_5 IPU_DC_REG(0x0060)
++#define DC_GEN IPU_DC_REG(0x00D4)
++#define DC_DISP_CONF1(disp) IPU_DC_REG(0x00D8 + 4 * (disp))
++#define DC_DISP_CONF2(disp) IPU_DC_REG(0x00E8 + 4 * (disp))
++#define DC_STAT IPU_DC_REG(0x01C8)
++#define DC_UGDE_0(evt) IPU_DC_REG(0x0174 + 16 * (evt))
++#define DC_UGDE_1(evt) IPU_DC_REG(0x0178 + 16 * (evt))
++#define DC_UGDE_2(evt) IPU_DC_REG(0x017C + 16 * (evt))
++#define DC_UGDE_3(evt) IPU_DC_REG(0x0180 + 16 * (evt))
++
++/* IPU DP Registers */
++#define IPU_DP_REG(offset) (offset)
++
++#define DP_SYNC 0
++#define DP_ASYNC0 0x60
++#define DP_ASYNC1 0xBC
++#define DP_COM_CONF(flow) IPU_DP_REG(flow)
++#define DP_GRAPH_WIND_CTRL(flow) IPU_DP_REG(0x0004 + (flow))
++#define DP_FG_POS(flow) IPU_DP_REG(0x0008 + (flow))
++#define DP_GAMMA_C(flow, i) IPU_DP_REG(0x0014 + (flow) + 4 * (i))
++#define DP_GAMMA_S(flow, i) IPU_DP_REG(0x0034 + (flow) + 4 * (i))
++#define DP_CSC_A_0(flow) IPU_DP_REG(0x0044 + (flow))
++#define DP_CSC_A_1(flow) IPU_DP_REG(0x0048 + (flow))
++#define DP_CSC_A_2(flow) IPU_DP_REG(0x004C + (flow))
++#define DP_CSC_A_3(flow) IPU_DP_REG(0x0050 + (flow))
++#define DP_CSC_0(flow) IPU_DP_REG(0x0054 + (flow))
++#define DP_CSC_1(flow) IPU_DP_REG(0x0058 + (flow))
++
++enum {
++ IPU_CONF_CSI0_EN = 0x00000001,
++ IPU_CONF_CSI1_EN = 0x00000002,
++ IPU_CONF_IC_EN = 0x00000004,
++ IPU_CONF_ROT_EN = 0x00000008,
++ IPU_CONF_ISP_EN = 0x00000010,
++ IPU_CONF_DP_EN = 0x00000020,
++ IPU_CONF_DI0_EN = 0x00000040,
++ IPU_CONF_DI1_EN = 0x00000080,
++ IPU_CONF_DMFC_EN = 0x00000400,
++ IPU_CONF_SMFC_EN = 0x00000100,
++ IPU_CONF_DC_EN = 0x00000200,
++ IPU_CONF_VDI_EN = 0x00001000,
++ IPU_CONF_IDMAC_DIS = 0x00400000,
++ IPU_CONF_IC_DMFC_SEL = 0x02000000,
++ IPU_CONF_IC_DMFC_SYNC = 0x04000000,
++ IPU_CONF_VDI_DMFC_SYNC = 0x08000000,
++ IPU_CONF_CSI0_DATA_SOURCE = 0x10000000,
++ IPU_CONF_CSI0_DATA_SOURCE_OFFSET = 28,
++ IPU_CONF_CSI1_DATA_SOURCE = 0x20000000,
++ IPU_CONF_IC_INPUT = 0x40000000,
++ IPU_CONF_CSI_SEL = 0x80000000,
++
++ DI0_COUNTER_RELEASE = 0x01000000,
++ DI1_COUNTER_RELEASE = 0x02000000,
++
++ FS_PRPVF_ROT_SRC_SEL_MASK = 0x00000F00,
++ FS_PRPVF_ROT_SRC_SEL_OFFSET = 8,
++ FS_PRPENC_ROT_SRC_SEL_MASK = 0x0000000F,
++ FS_PRPENC_ROT_SRC_SEL_OFFSET = 0,
++ FS_PP_ROT_SRC_SEL_MASK = 0x000F0000,
++ FS_PP_ROT_SRC_SEL_OFFSET = 16,
++ FS_PP_SRC_SEL_MASK = 0x0000F000,
++ FS_PP_SRC_SEL_VDOA = 0x00008000,
++ FS_PP_SRC_SEL_OFFSET = 12,
++ FS_PRP_SRC_SEL_MASK = 0x0F000000,
++ FS_PRP_SRC_SEL_OFFSET = 24,
++ FS_VF_IN_VALID = 0x80000000,
++ FS_ENC_IN_VALID = 0x40000000,
++ FS_VDI_SRC_SEL_MASK = 0x30000000,
++ FS_VDI_SRC_SEL_VDOA = 0x20000000,
++ FS_VDOA_DEST_SEL_MASK = 0x00030000,
++ FS_VDOA_DEST_SEL_VDI = 0x00020000,
++ FS_VDOA_DEST_SEL_IC = 0x00010000,
++ FS_VDI_SRC_SEL_OFFSET = 28,
++
++
++ FS_PRPENC_DEST_SEL_MASK = 0x0000000F,
++ FS_PRPENC_DEST_SEL_OFFSET = 0,
++ FS_PRPVF_DEST_SEL_MASK = 0x000000F0,
++ FS_PRPVF_DEST_SEL_OFFSET = 4,
++ FS_PRPVF_ROT_DEST_SEL_MASK = 0x00000F00,
++ FS_PRPVF_ROT_DEST_SEL_OFFSET = 8,
++ FS_PP_DEST_SEL_MASK = 0x0000F000,
++ FS_PP_DEST_SEL_OFFSET = 12,
++ FS_PP_ROT_DEST_SEL_MASK = 0x000F0000,
++ FS_PP_ROT_DEST_SEL_OFFSET = 16,
++ FS_PRPENC_ROT_DEST_SEL_MASK = 0x00F00000,
++ FS_PRPENC_ROT_DEST_SEL_OFFSET = 20,
++
++ FS_SMFC0_DEST_SEL_MASK = 0x0000000F,
++ FS_SMFC0_DEST_SEL_OFFSET = 0,
++ FS_SMFC1_DEST_SEL_MASK = 0x00000070,
++ FS_SMFC1_DEST_SEL_OFFSET = 4,
++ FS_SMFC2_DEST_SEL_MASK = 0x00000780,
++ FS_SMFC2_DEST_SEL_OFFSET = 7,
++ FS_SMFC3_DEST_SEL_MASK = 0x00003800,
++ FS_SMFC3_DEST_SEL_OFFSET = 11,
++
++ FS_DC1_SRC_SEL_MASK = 0x00F00000,
++ FS_DC1_SRC_SEL_OFFSET = 20,
++ FS_DC2_SRC_SEL_MASK = 0x000F0000,
++ FS_DC2_SRC_SEL_OFFSET = 16,
++ FS_DP_SYNC0_SRC_SEL_MASK = 0x0000000F,
++ FS_DP_SYNC0_SRC_SEL_OFFSET = 0,
++ FS_DP_SYNC1_SRC_SEL_MASK = 0x000000F0,
++ FS_DP_SYNC1_SRC_SEL_OFFSET = 4,
++ FS_DP_ASYNC0_SRC_SEL_MASK = 0x00000F00,
++ FS_DP_ASYNC0_SRC_SEL_OFFSET = 8,
++ FS_DP_ASYNC1_SRC_SEL_MASK = 0x0000F000,
++ FS_DP_ASYNC1_SRC_SEL_OFFSET = 12,
++
++ FS_AUTO_REF_PER_MASK = 0,
++ FS_AUTO_REF_PER_OFFSET = 16,
++
++ TSTAT_VF_MASK = 0x0000000C,
++ TSTAT_VF_OFFSET = 2,
++ TSTAT_VF_ROT_MASK = 0x00000300,
++ TSTAT_VF_ROT_OFFSET = 8,
++ TSTAT_ENC_MASK = 0x00000003,
++ TSTAT_ENC_OFFSET = 0,
++ TSTAT_ENC_ROT_MASK = 0x000000C0,
++ TSTAT_ENC_ROT_OFFSET = 6,
++ TSTAT_PP_MASK = 0x00000030,
++ TSTAT_PP_OFFSET = 4,
++ TSTAT_PP_ROT_MASK = 0x00000C00,
++ TSTAT_PP_ROT_OFFSET = 10,
++
++ TASK_STAT_IDLE = 0,
++ TASK_STAT_ACTIVE = 1,
++ TASK_STAT_WAIT4READY = 2,
++
++ /* Image Converter Register bits */
++ IC_CONF_PRPENC_EN = 0x00000001,
++ IC_CONF_PRPENC_CSC1 = 0x00000002,
++ IC_CONF_PRPENC_ROT_EN = 0x00000004,
++ IC_CONF_PRPVF_EN = 0x00000100,
++ IC_CONF_PRPVF_CSC1 = 0x00000200,
++ IC_CONF_PRPVF_CSC2 = 0x00000400,
++ IC_CONF_PRPVF_CMB = 0x00000800,
++ IC_CONF_PRPVF_ROT_EN = 0x00001000,
++ IC_CONF_PP_EN = 0x00010000,
++ IC_CONF_PP_CSC1 = 0x00020000,
++ IC_CONF_PP_CSC2 = 0x00040000,
++ IC_CONF_PP_CMB = 0x00080000,
++ IC_CONF_PP_ROT_EN = 0x00100000,
++ IC_CONF_IC_GLB_LOC_A = 0x10000000,
++ IC_CONF_KEY_COLOR_EN = 0x20000000,
++ IC_CONF_RWS_EN = 0x40000000,
++ IC_CONF_CSI_MEM_WR_EN = 0x80000000,
++
++ IC_RSZ_MAX_RESIZE_RATIO = 0x00004000,
++
++ IC_IDMAC_1_CB0_BURST_16 = 0x00000001,
++ IC_IDMAC_1_CB1_BURST_16 = 0x00000002,
++ IC_IDMAC_1_CB2_BURST_16 = 0x00000004,
++ IC_IDMAC_1_CB3_BURST_16 = 0x00000008,
++ IC_IDMAC_1_CB4_BURST_16 = 0x00000010,
++ IC_IDMAC_1_CB5_BURST_16 = 0x00000020,
++ IC_IDMAC_1_CB6_BURST_16 = 0x00000040,
++ IC_IDMAC_1_CB7_BURST_16 = 0x00000080,
++ IC_IDMAC_1_PRPENC_ROT_MASK = 0x00003800,
++ IC_IDMAC_1_PRPENC_ROT_OFFSET = 11,
++ IC_IDMAC_1_PRPVF_ROT_MASK = 0x0001C000,
++ IC_IDMAC_1_PRPVF_ROT_OFFSET = 14,
++ IC_IDMAC_1_PP_ROT_MASK = 0x000E0000,
++ IC_IDMAC_1_PP_ROT_OFFSET = 17,
++ IC_IDMAC_1_PP_FLIP_RS = 0x00400000,
++ IC_IDMAC_1_PRPVF_FLIP_RS = 0x00200000,
++ IC_IDMAC_1_PRPENC_FLIP_RS = 0x00100000,
++
++ IC_IDMAC_2_PRPENC_HEIGHT_MASK = 0x000003FF,
++ IC_IDMAC_2_PRPENC_HEIGHT_OFFSET = 0,
++ IC_IDMAC_2_PRPVF_HEIGHT_MASK = 0x000FFC00,
++ IC_IDMAC_2_PRPVF_HEIGHT_OFFSET = 10,
++ IC_IDMAC_2_PP_HEIGHT_MASK = 0x3FF00000,
++ IC_IDMAC_2_PP_HEIGHT_OFFSET = 20,
++
++ IC_IDMAC_3_PRPENC_WIDTH_MASK = 0x000003FF,
++ IC_IDMAC_3_PRPENC_WIDTH_OFFSET = 0,
++ IC_IDMAC_3_PRPVF_WIDTH_MASK = 0x000FFC00,
++ IC_IDMAC_3_PRPVF_WIDTH_OFFSET = 10,
++ IC_IDMAC_3_PP_WIDTH_MASK = 0x3FF00000,
++ IC_IDMAC_3_PP_WIDTH_OFFSET = 20,
++
++ CSI_SENS_CONF_DATA_FMT_SHIFT = 8,
++ CSI_SENS_CONF_DATA_FMT_MASK = 0x00000700,
++ CSI_SENS_CONF_DATA_FMT_RGB_YUV444 = 0L,
++ CSI_SENS_CONF_DATA_FMT_YUV422_YUYV = 1L,
++ CSI_SENS_CONF_DATA_FMT_YUV422_UYVY = 2L,
++ CSI_SENS_CONF_DATA_FMT_BAYER = 3L,
++ CSI_SENS_CONF_DATA_FMT_RGB565 = 4L,
++ CSI_SENS_CONF_DATA_FMT_RGB555 = 5L,
++ CSI_SENS_CONF_DATA_FMT_RGB444 = 6L,
++ CSI_SENS_CONF_DATA_FMT_JPEG = 7L,
++
++ CSI_SENS_CONF_VSYNC_POL_SHIFT = 0,
++ CSI_SENS_CONF_HSYNC_POL_SHIFT = 1,
++ CSI_SENS_CONF_DATA_POL_SHIFT = 2,
++ CSI_SENS_CONF_PIX_CLK_POL_SHIFT = 3,
++ CSI_SENS_CONF_SENS_PRTCL_MASK = 0x00000070L,
++ CSI_SENS_CONF_SENS_PRTCL_SHIFT = 4,
++ CSI_SENS_CONF_PACK_TIGHT_SHIFT = 7,
++ CSI_SENS_CONF_DATA_WIDTH_SHIFT = 11,
++ CSI_SENS_CONF_EXT_VSYNC_SHIFT = 15,
++ CSI_SENS_CONF_DIVRATIO_SHIFT = 16,
++
++ CSI_SENS_CONF_DIVRATIO_MASK = 0x00FF0000L,
++ CSI_SENS_CONF_DATA_DEST_SHIFT = 24,
++ CSI_SENS_CONF_DATA_DEST_MASK = 0x07000000L,
++ CSI_SENS_CONF_JPEG8_EN_SHIFT = 27,
++ CSI_SENS_CONF_JPEG_EN_SHIFT = 28,
++ CSI_SENS_CONF_FORCE_EOF_SHIFT = 29,
++ CSI_SENS_CONF_DATA_EN_POL_SHIFT = 31,
++
++ CSI_DATA_DEST_ISP = 1L,
++ CSI_DATA_DEST_IC = 2L,
++ CSI_DATA_DEST_IDMAC = 4L,
++
++ CSI_CCIR_ERR_DET_EN = 0x01000000L,
++ CSI_HORI_DOWNSIZE_EN = 0x80000000L,
++ CSI_VERT_DOWNSIZE_EN = 0x40000000L,
++ CSI_TEST_GEN_MODE_EN = 0x01000000L,
++
++ CSI_HSC_MASK = 0x1FFF0000,
++ CSI_HSC_SHIFT = 16,
++ CSI_VSC_MASK = 0x00000FFF,
++ CSI_VSC_SHIFT = 0,
++
++ CSI_TEST_GEN_R_MASK = 0x000000FFL,
++ CSI_TEST_GEN_R_SHIFT = 0,
++ CSI_TEST_GEN_G_MASK = 0x0000FF00L,
++ CSI_TEST_GEN_G_SHIFT = 8,
++ CSI_TEST_GEN_B_MASK = 0x00FF0000L,
++ CSI_TEST_GEN_B_SHIFT = 16,
++
++ CSI_MIPI_DI0_MASK = 0x000000FFL,
++ CSI_MIPI_DI0_SHIFT = 0,
++ CSI_MIPI_DI1_MASK = 0x0000FF00L,
++ CSI_MIPI_DI1_SHIFT = 8,
++ CSI_MIPI_DI2_MASK = 0x00FF0000L,
++ CSI_MIPI_DI2_SHIFT = 16,
++ CSI_MIPI_DI3_MASK = 0xFF000000L,
++ CSI_MIPI_DI3_SHIFT = 24,
++
++ CSI_MAX_RATIO_SKIP_ISP_MASK = 0x00070000L,
++ CSI_MAX_RATIO_SKIP_ISP_SHIFT = 16,
++ CSI_SKIP_ISP_MASK = 0x00F80000L,
++ CSI_SKIP_ISP_SHIFT = 19,
++ CSI_MAX_RATIO_SKIP_SMFC_MASK = 0x00000007L,
++ CSI_MAX_RATIO_SKIP_SMFC_SHIFT = 0,
++ CSI_SKIP_SMFC_MASK = 0x000000F8L,
++ CSI_SKIP_SMFC_SHIFT = 3,
++ CSI_ID_2_SKIP_MASK = 0x00000300L,
++ CSI_ID_2_SKIP_SHIFT = 8,
++
++ CSI_COLOR_FIRST_ROW_MASK = 0x00000002L,
++ CSI_COLOR_FIRST_COMP_MASK = 0x00000001L,
++
++ SMFC_MAP_CH0_MASK = 0x00000007L,
++ SMFC_MAP_CH0_SHIFT = 0,
++ SMFC_MAP_CH1_MASK = 0x00000038L,
++ SMFC_MAP_CH1_SHIFT = 3,
++ SMFC_MAP_CH2_MASK = 0x000001C0L,
++ SMFC_MAP_CH2_SHIFT = 6,
++ SMFC_MAP_CH3_MASK = 0x00000E00L,
++ SMFC_MAP_CH3_SHIFT = 9,
++
++ SMFC_WM0_SET_MASK = 0x00000007L,
++ SMFC_WM0_SET_SHIFT = 0,
++ SMFC_WM1_SET_MASK = 0x000001C0L,
++ SMFC_WM1_SET_SHIFT = 6,
++ SMFC_WM2_SET_MASK = 0x00070000L,
++ SMFC_WM2_SET_SHIFT = 16,
++ SMFC_WM3_SET_MASK = 0x01C00000L,
++ SMFC_WM3_SET_SHIFT = 22,
++
++ SMFC_WM0_CLR_MASK = 0x00000038L,
++ SMFC_WM0_CLR_SHIFT = 3,
++ SMFC_WM1_CLR_MASK = 0x00000E00L,
++ SMFC_WM1_CLR_SHIFT = 9,
++ SMFC_WM2_CLR_MASK = 0x00380000L,
++ SMFC_WM2_CLR_SHIFT = 19,
++ SMFC_WM3_CLR_MASK = 0x0E000000L,
++ SMFC_WM3_CLR_SHIFT = 25,
++
++ SMFC_BS0_MASK = 0x0000000FL,
++ SMFC_BS0_SHIFT = 0,
++ SMFC_BS1_MASK = 0x000000F0L,
++ SMFC_BS1_SHIFT = 4,
++ SMFC_BS2_MASK = 0x00000F00L,
++ SMFC_BS2_SHIFT = 8,
++ SMFC_BS3_MASK = 0x0000F000L,
++ SMFC_BS3_SHIFT = 12,
++
++ PF_CONF_TYPE_MASK = 0x00000007,
++ PF_CONF_TYPE_SHIFT = 0,
++ PF_CONF_PAUSE_EN = 0x00000010,
++ PF_CONF_RESET = 0x00008000,
++ PF_CONF_PAUSE_ROW_MASK = 0x00FF0000,
++ PF_CONF_PAUSE_ROW_SHIFT = 16,
++
++ DI_DW_GEN_ACCESS_SIZE_OFFSET = 24,
++ DI_DW_GEN_COMPONENT_SIZE_OFFSET = 16,
++
++ DI_GEN_DI_CLK_EXT = 0x100000,
++ DI_GEN_POLARITY_DISP_CLK = 0x00020000,
++ DI_GEN_POLARITY_1 = 0x00000001,
++ DI_GEN_POLARITY_2 = 0x00000002,
++ DI_GEN_POLARITY_3 = 0x00000004,
++ DI_GEN_POLARITY_4 = 0x00000008,
++ DI_GEN_POLARITY_5 = 0x00000010,
++ DI_GEN_POLARITY_6 = 0x00000020,
++ DI_GEN_POLARITY_7 = 0x00000040,
++ DI_GEN_POLARITY_8 = 0x00000080,
++
++ DI_POL_DRDY_DATA_POLARITY = 0x00000080,
++ DI_POL_DRDY_POLARITY_15 = 0x00000010,
++
++ DI_VSYNC_SEL_OFFSET = 13,
++
++ DC_WR_CH_CONF_FIELD_MODE = 0x00000200,
++ DC_WR_CH_CONF_PROG_TYPE_OFFSET = 5,
++ DC_WR_CH_CONF_PROG_TYPE_MASK = 0x000000E0,
++ DC_WR_CH_CONF_PROG_DI_ID = 0x00000004,
++ DC_WR_CH_CONF_PROG_DISP_ID_OFFSET = 3,
++ DC_WR_CH_CONF_PROG_DISP_ID_MASK = 0x00000018,
++
++ DC_UGDE_0_ODD_EN = 0x02000000,
++ DC_UGDE_0_ID_CODED_MASK = 0x00000007,
++ DC_UGDE_0_ID_CODED_OFFSET = 0,
++ DC_UGDE_0_EV_PRIORITY_MASK = 0x00000078,
++ DC_UGDE_0_EV_PRIORITY_OFFSET = 3,
++
++ DP_COM_CONF_FG_EN = 0x00000001,
++ DP_COM_CONF_GWSEL = 0x00000002,
++ DP_COM_CONF_GWAM = 0x00000004,
++ DP_COM_CONF_GWCKE = 0x00000008,
++ DP_COM_CONF_CSC_DEF_MASK = 0x00000300,
++ DP_COM_CONF_CSC_DEF_OFFSET = 8,
++ DP_COM_CONF_CSC_DEF_FG = 0x00000300,
++ DP_COM_CONF_CSC_DEF_BG = 0x00000200,
++ DP_COM_CONF_CSC_DEF_BOTH = 0x00000100,
++ DP_COM_CONF_GAMMA_EN = 0x00001000,
++ DP_COM_CONF_GAMMA_YUV_EN = 0x00002000,
++
++ DI_SER_CONF_LLA_SER_ACCESS = 0x00000020,
++ DI_SER_CONF_SERIAL_CLK_POL = 0x00000010,
++ DI_SER_CONF_SERIAL_DATA_POL = 0x00000008,
++ DI_SER_CONF_SERIAL_RS_POL = 0x00000004,
++ DI_SER_CONF_SERIAL_CS_POL = 0x00000002,
++ DI_SER_CONF_WAIT4SERIAL = 0x00000001,
++
++ VDI_C_CH_420 = 0x00000000,
++ VDI_C_CH_422 = 0x00000002,
++ VDI_C_MOT_SEL_FULL = 0x00000008,
++ VDI_C_MOT_SEL_LOW = 0x00000004,
++ VDI_C_MOT_SEL_MED = 0x00000000,
++ VDI_C_BURST_SIZE1_4 = 0x00000030,
++ VDI_C_BURST_SIZE2_4 = 0x00000300,
++ VDI_C_BURST_SIZE3_4 = 0x00003000,
++ VDI_C_BURST_SIZE_MASK = 0xF,
++ VDI_C_BURST_SIZE1_OFFSET = 4,
++ VDI_C_BURST_SIZE2_OFFSET = 8,
++ VDI_C_BURST_SIZE3_OFFSET = 12,
++ VDI_C_VWM1_SET_1 = 0x00000000,
++ VDI_C_VWM1_SET_2 = 0x00010000,
++ VDI_C_VWM1_CLR_2 = 0x00080000,
++ VDI_C_VWM3_SET_1 = 0x00000000,
++ VDI_C_VWM3_SET_2 = 0x00400000,
++ VDI_C_VWM3_CLR_2 = 0x02000000,
++ VDI_C_TOP_FIELD_MAN_1 = 0x40000000,
++ VDI_C_TOP_FIELD_AUTO_1 = 0x80000000,
++};
++
++enum di_pins {
++ DI_PIN11 = 0,
++ DI_PIN12 = 1,
++ DI_PIN13 = 2,
++ DI_PIN14 = 3,
++ DI_PIN15 = 4,
++ DI_PIN16 = 5,
++ DI_PIN17 = 6,
++ DI_PIN_CS = 7,
++
++ DI_PIN_SER_CLK = 0,
++ DI_PIN_SER_RS = 1,
++};
++
++enum di_sync_wave {
++ DI_SYNC_NONE = -1,
++ DI_SYNC_CLK = 0,
++ DI_SYNC_INT_HSYNC = 1,
++ DI_SYNC_HSYNC = 2,
++ DI_SYNC_VSYNC = 3,
++ DI_SYNC_DE = 5,
++};
++
++/* DC template opcodes */
++#define WROD(lf) (0x18 | (lf << 1))
++#define WRG (0x01)
++
++#endif
+diff -Nur linux-3.14.36/drivers/mxc/ipu3/Kconfig linux-openelec/drivers/mxc/ipu3/Kconfig
+--- linux-3.14.36/drivers/mxc/ipu3/Kconfig 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/ipu3/Kconfig 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,2 @@
++config MXC_IPU_V3
++ bool
+diff -Nur linux-3.14.36/drivers/mxc/ipu3/Makefile linux-openelec/drivers/mxc/ipu3/Makefile
+--- linux-3.14.36/drivers/mxc/ipu3/Makefile 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/ipu3/Makefile 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,4 @@
++obj-$(CONFIG_MXC_IPU_V3) = mxc_ipu.o
++
++mxc_ipu-objs := ipu_common.o ipu_ic.o ipu_disp.o ipu_capture.o ipu_device.o \
++ ipu_calc_stripes_sizes.o vdoa.o ipu_pixel_clk.o
+diff -Nur linux-3.14.36/drivers/mxc/ipu3/vdoa.c linux-openelec/drivers/mxc/ipu3/vdoa.c
+--- linux-3.14.36/drivers/mxc/ipu3/vdoa.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/ipu3/vdoa.c 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,543 @@
++/*
++ * Copyright (C) 2012-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
++ */
++#include <linux/clk.h>
++#include <linux/err.h>
++#include <linux/io.h>
++#include <linux/ipu.h>
++#include <linux/genalloc.h>
++#include <linux/module.h>
++#include <linux/platform_device.h>
++#include <linux/slab.h>
++#include <linux/types.h>
++
++#include "vdoa.h"
++/* 6band(3field* double buffer) * (width*2) * bandline(8)
++ = 6x1024x2x8 = 96k or 72k(1.5byte) */
++#define MAX_VDOA_IRAM_SIZE (1024*96)
++#define VDOA_IRAM_SIZE (1024*72)
++
++#define VDOAC_BAND_HEIGHT_32LINES (32)
++#define VDOAC_BAND_HEIGHT_16LINES (16)
++#define VDOAC_BAND_HEIGHT_8LINES (8)
++#define VDOAC_THREE_FRAMES (0x1 << 2)
++#define VDOAC_SYNC_BAND_MODE (0x1 << 3)
++#define VDOAC_SCAN_ORDER_INTERLACED (0x1 << 4)
++#define VDOAC_PFS_YUYV (0x1 << 5)
++#define VDOAC_IPU_SEL_1 (0x1 << 6)
++#define VDOAFP_FH_MASK (0x1FFF)
++#define VDOAFP_FH_SHIFT (16)
++#define VDOAFP_FW_MASK (0x3FFF)
++#define VDOAFP_FW_SHIFT (0)
++#define VDOASL_VSLY_MASK (0x3FFF)
++#define VDOASL_VSLY_SHIFT (16)
++#define VDOASL_ISLY_MASK (0x7FFF)
++#define VDOASL_ISLY_SHIFT (0)
++#define VDOASRR_START_XFER (0x2)
++#define VDOASRR_SWRST (0x1)
++#define VDOAIEIST_TRANSFER_ERR (0x2)
++#define VDOAIEIST_TRANSFER_END (0x1)
++
++#define VDOAC (0x0) /* Control Register */
++#define VDOASRR (0x4) /* Start and Reset Register */
++#define VDOAIE (0x8) /* Interrupt Enable Register */
++#define VDOAIST (0xc) /* Interrupt Status Register */
++#define VDOAFP (0x10) /* Frame Parameters Register */
++#define VDOAIEBA00 (0x14) /* External Buffer n Frame m Address Register */
++#define VDOAIEBA01 (0x18) /* External Buffer n Frame m Address Register */
++#define VDOAIEBA02 (0x1c) /* External Buffer n Frame m Address Register */
++#define VDOAIEBA10 (0x20) /* External Buffer n Frame m Address Register */
++#define VDOAIEBA11 (0x24) /* External Buffer n Frame m Address Register */
++#define VDOAIEBA12 (0x28) /* External Buffer n Frame m Address Register */
++#define VDOASL (0x2c) /* IPU Stride Line Register */
++#define VDOAIUBO (0x30) /* IPU Chroma Buffer Offset Register */
++#define VDOAVEBA0 (0x34) /* External Buffer m Address Register */
++#define VDOAVEBA1 (0x38) /* External Buffer m Address Register */
++#define VDOAVEBA2 (0x3c) /* External Buffer m Address Register */
++#define VDOAVUBO (0x40) /* VPU Chroma Buffer Offset */
++#define VDOASR (0x44) /* Status Register */
++#define VDOATD (0x48) /* Test Debug Register */
++
++
++enum {
++ VDOA_INIT = 0x1,
++ VDOA_GET = 0x2,
++ VDOA_SETUP = 0x4,
++ VDOA_GET_OBUF = 0x8,
++ VDOA_START = 0x10,
++ VDOA_INIRQ = 0x20,
++ VDOA_STOP = 0x40,
++ VDOA_PUT = VDOA_INIT,
++};
++
++enum {
++ VDOA_NULL = 0,
++ VDOA_FRAME = 1,
++ VDOA_PREV_FIELD = 2,
++ VDOA_CURR_FIELD = 3,
++ VDOA_NEXT_FIELD = 4,
++};
++
++#define CHECK_STATE(expect, retcode) \
++do { \
++ if (!((expect) & vdoa->state)) { \
++ dev_err(vdoa->dev, "ERR: %s state:0x%x, expect:0x%x.\n",\
++ __func__, vdoa->state, (expect)); \
++ retcode; \
++ } \
++} while (0)
++
++#define CHECK_NULL_PTR(ptr) \
++do { \
++ pr_debug("vdoa_ptr:0x%p in %s state:0x%x.\n", \
++ vdoa, __func__, vdoa->state); \
++ if (NULL == (ptr)) { \
++ pr_err("ERR vdoa: %s state:0x%x null ptr.\n", \
++ __func__, vdoa->state); \
++ } \
++} while (0)
++
++struct vdoa_info {
++ int state;
++ struct device *dev;
++ struct clk *vdoa_clk;
++ void __iomem *reg_base;
++ struct gen_pool *iram_pool;
++ unsigned long iram_base;
++ unsigned long iram_paddr;
++ int irq;
++ int field;
++ struct completion comp;
++};
++
++static struct vdoa_info *g_vdoa;
++static unsigned long iram_size;
++static DEFINE_MUTEX(vdoa_lock);
++
++static inline void vdoa_read_register(struct vdoa_info *vdoa,
++ u32 reg, u32 *val)
++{
++ *val = ioread32(vdoa->reg_base + reg);
++ dev_dbg(vdoa->dev, "read_reg:0x%02x, val:0x%08x.\n", reg, *val);
++}
++
++static inline void vdoa_write_register(struct vdoa_info *vdoa,
++ u32 reg, u32 val)
++{
++ iowrite32(val, vdoa->reg_base + reg);
++ dev_dbg(vdoa->dev, "\t\twrite_reg:0x%02x, val:0x%08x.\n", reg, val);
++}
++
++static void dump_registers(struct vdoa_info *vdoa)
++{
++ int i;
++ u32 data;
++
++ for (i = VDOAC; i < VDOATD; i += 4)
++ vdoa_read_register(vdoa, i, &data);
++}
++
++int vdoa_setup(vdoa_handle_t handle, struct vdoa_params *params)
++{
++ int band_size;
++ int total_band_size = 0;
++ int ipu_stride;
++ u32 data;
++ struct vdoa_info *vdoa = (struct vdoa_info *)handle;
++
++ CHECK_NULL_PTR(vdoa);
++ CHECK_STATE(VDOA_GET | VDOA_GET_OBUF | VDOA_STOP, return -EINVAL);
++ if (VDOA_GET == vdoa->state) {
++ dev_dbg(vdoa->dev, "w:%d, h:%d.\n",
++ params->width, params->height);
++ data = (params->band_lines == VDOAC_BAND_HEIGHT_32LINES) ? 2 :
++ ((params->band_lines == VDOAC_BAND_HEIGHT_16LINES) ?
++ 1 : 0);
++ data |= params->scan_order ? VDOAC_SCAN_ORDER_INTERLACED : 0;
++ data |= params->band_mode ? VDOAC_SYNC_BAND_MODE : 0;
++ data |= params->pfs ? VDOAC_PFS_YUYV : 0;
++ data |= params->ipu_num ? VDOAC_IPU_SEL_1 : 0;
++ vdoa_write_register(vdoa, VDOAC, data);
++
++ data = ((params->width & VDOAFP_FW_MASK) << VDOAFP_FW_SHIFT) |
++ ((params->height & VDOAFP_FH_MASK) << VDOAFP_FH_SHIFT);
++ vdoa_write_register(vdoa, VDOAFP, data);
++
++ ipu_stride = params->pfs ? params->width << 1 : params->width;
++ data = ((params->vpu_stride & VDOASL_VSLY_MASK) <<
++ VDOASL_VSLY_SHIFT) |
++ ((ipu_stride & VDOASL_ISLY_MASK) << VDOASL_ISLY_SHIFT);
++ vdoa_write_register(vdoa, VDOASL, data);
++
++ dev_dbg(vdoa->dev, "band_mode:%d, band_line:%d, base:0x%lx.\n",
++ params->band_mode, params->band_lines, vdoa->iram_paddr);
++ }
++ /*
++ * band size = (luma_per_line + chroma_per_line) * bandLines
++ * = width * (3/2 or 2) * bandLines
++ * double buffer mode used.
++ */
++ if (params->pfs)
++ band_size = (params->width << 1) * params->band_lines;
++ else
++ band_size = ((params->width * 3) >> 1) *
++ params->band_lines;
++ if (params->interlaced) {
++ total_band_size = 6 * band_size; /* 3 frames*double buffer */
++ if (iram_size < total_band_size) {
++ dev_err(vdoa->dev, "iram_size:0x%lx is smaller than "
++ "request:0x%x!\n", iram_size, total_band_size);
++ return -EINVAL;
++ }
++ if (params->vfield_buf.prev_veba) {
++ if (params->band_mode) {
++ vdoa_write_register(vdoa, VDOAIEBA00,
++ vdoa->iram_paddr);
++ vdoa_write_register(vdoa, VDOAIEBA10,
++ vdoa->iram_paddr + band_size);
++ } else
++ vdoa_write_register(vdoa, VDOAIEBA00,
++ params->ieba0);
++ vdoa_write_register(vdoa, VDOAVEBA0,
++ params->vfield_buf.prev_veba);
++ vdoa->field = VDOA_PREV_FIELD;
++ }
++ if (params->vfield_buf.cur_veba) {
++ if (params->band_mode) {
++ vdoa_write_register(vdoa, VDOAIEBA01,
++ vdoa->iram_paddr + band_size * 2);
++ vdoa_write_register(vdoa, VDOAIEBA11,
++ vdoa->iram_paddr + band_size * 3);
++ } else
++ vdoa_write_register(vdoa, VDOAIEBA01,
++ params->ieba1);
++ vdoa_write_register(vdoa, VDOAVEBA1,
++ params->vfield_buf.cur_veba);
++ vdoa->field = VDOA_CURR_FIELD;
++ }
++ if (params->vfield_buf.next_veba) {
++ if (params->band_mode) {
++ vdoa_write_register(vdoa, VDOAIEBA02,
++ vdoa->iram_paddr + band_size * 4);
++ vdoa_write_register(vdoa, VDOAIEBA12,
++ vdoa->iram_paddr + band_size * 5);
++ } else
++ vdoa_write_register(vdoa, VDOAIEBA02,
++ params->ieba2);
++ vdoa_write_register(vdoa, VDOAVEBA2,
++ params->vfield_buf.next_veba);
++ vdoa->field = VDOA_NEXT_FIELD;
++ vdoa_read_register(vdoa, VDOAC, &data);
++ data |= VDOAC_THREE_FRAMES;
++ vdoa_write_register(vdoa, VDOAC, data);
++ }
++
++ if (!params->pfs)
++ vdoa_write_register(vdoa, VDOAIUBO,
++ params->width * params->band_lines);
++ vdoa_write_register(vdoa, VDOAVUBO,
++ params->vfield_buf.vubo);
++ dev_dbg(vdoa->dev, "total band_size:0x%x.\n", band_size*6);
++ } else if (params->band_mode) {
++ /* used for progressive frame resize on PrP channel */
++ BUG(); /* currently not support */
++ /* progressvie frame: band mode */
++ vdoa_write_register(vdoa, VDOAIEBA00, vdoa->iram_paddr);
++ vdoa_write_register(vdoa, VDOAIEBA10,
++ vdoa->iram_paddr + band_size);
++ if (!params->pfs)
++ vdoa_write_register(vdoa, VDOAIUBO,
++ params->width * params->band_lines);
++ dev_dbg(vdoa->dev, "total band_size:0x%x\n", band_size*2);
++ } else {
++ /* progressive frame: mem->mem, non-band mode */
++ vdoa->field = VDOA_FRAME;
++ vdoa_write_register(vdoa, VDOAVEBA0, params->vframe_buf.veba);
++ vdoa_write_register(vdoa, VDOAVUBO, params->vframe_buf.vubo);
++ vdoa_write_register(vdoa, VDOAIEBA00, params->ieba0);
++ if (!params->pfs)
++ /* note: iubo is relative value, based on ieba0 */
++ vdoa_write_register(vdoa, VDOAIUBO,
++ params->width * params->height);
++ }
++ vdoa->state = VDOA_SETUP;
++ return 0;
++}
++
++void vdoa_get_output_buf(vdoa_handle_t handle, struct vdoa_ipu_buf *buf)
++{
++ u32 data;
++ struct vdoa_info *vdoa = (struct vdoa_info *)handle;
++
++ CHECK_NULL_PTR(vdoa);
++ CHECK_STATE(VDOA_SETUP, return);
++ vdoa->state = VDOA_GET_OBUF;
++ memset(buf, 0, sizeof(*buf));
++
++ vdoa_read_register(vdoa, VDOAC, &data);
++ switch (vdoa->field) {
++ case VDOA_FRAME:
++ case VDOA_PREV_FIELD:
++ vdoa_read_register(vdoa, VDOAIEBA00, &buf->ieba0);
++ if (data & VDOAC_SYNC_BAND_MODE)
++ vdoa_read_register(vdoa, VDOAIEBA10, &buf->ieba1);
++ break;
++ case VDOA_CURR_FIELD:
++ vdoa_read_register(vdoa, VDOAIEBA01, &buf->ieba0);
++ vdoa_read_register(vdoa, VDOAIEBA11, &buf->ieba1);
++ break;
++ case VDOA_NEXT_FIELD:
++ vdoa_read_register(vdoa, VDOAIEBA02, &buf->ieba0);
++ vdoa_read_register(vdoa, VDOAIEBA12, &buf->ieba1);
++ break;
++ default:
++ BUG();
++ break;
++ }
++ if (!(data & VDOAC_PFS_YUYV))
++ vdoa_read_register(vdoa, VDOAIUBO, &buf->iubo);
++}
++
++int vdoa_start(vdoa_handle_t handle, int timeout_ms)
++{
++ int ret;
++ struct vdoa_info *vdoa = (struct vdoa_info *)handle;
++
++ CHECK_NULL_PTR(vdoa);
++ CHECK_STATE(VDOA_GET_OBUF, return -EINVAL);
++ vdoa->state = VDOA_START;
++ init_completion(&vdoa->comp);
++ vdoa_write_register(vdoa, VDOAIST,
++ VDOAIEIST_TRANSFER_ERR | VDOAIEIST_TRANSFER_END);
++ vdoa_write_register(vdoa, VDOAIE,
++ VDOAIEIST_TRANSFER_ERR | VDOAIEIST_TRANSFER_END);
++
++ enable_irq(vdoa->irq);
++ vdoa_write_register(vdoa, VDOASRR, VDOASRR_START_XFER);
++ dump_registers(vdoa);
++
++ ret = wait_for_completion_timeout(&vdoa->comp,
++ msecs_to_jiffies(timeout_ms));
++
++ return ret > 0 ? 0 : -ETIMEDOUT;
++}
++
++void vdoa_stop(vdoa_handle_t handle)
++{
++ struct vdoa_info *vdoa = (struct vdoa_info *)handle;
++
++ CHECK_NULL_PTR(vdoa);
++ CHECK_STATE(VDOA_GET | VDOA_START | VDOA_INIRQ, return);
++ vdoa->state = VDOA_STOP;
++
++ disable_irq(vdoa->irq);
++
++ vdoa_write_register(vdoa, VDOASRR, VDOASRR_SWRST);
++}
++
++void vdoa_get_handle(vdoa_handle_t *handle)
++{
++ struct vdoa_info *vdoa = g_vdoa;
++
++ CHECK_NULL_PTR(handle);
++ *handle = (vdoa_handle_t *)NULL;
++ CHECK_STATE(VDOA_INIT, return);
++ mutex_lock(&vdoa_lock);
++ clk_prepare_enable(vdoa->vdoa_clk);
++ vdoa->state = VDOA_GET;
++ vdoa->field = VDOA_NULL;
++ vdoa_write_register(vdoa, VDOASRR, VDOASRR_SWRST);
++
++ *handle = (vdoa_handle_t *)vdoa;
++}
++
++void vdoa_put_handle(vdoa_handle_t *handle)
++{
++ struct vdoa_info *vdoa = (struct vdoa_info *)(*handle);
++
++ CHECK_NULL_PTR(vdoa);
++ CHECK_STATE(VDOA_STOP, return);
++ if (vdoa != g_vdoa)
++ BUG();
++
++ clk_disable_unprepare(vdoa->vdoa_clk);
++ vdoa->state = VDOA_PUT;
++ *handle = (vdoa_handle_t *)NULL;
++ mutex_unlock(&vdoa_lock);
++}
++
++static irqreturn_t vdoa_irq_handler(int irq, void *data)
++{
++ u32 status, mask, val;
++ struct vdoa_info *vdoa = data;
++
++ CHECK_NULL_PTR(vdoa);
++ CHECK_STATE(VDOA_START, return IRQ_HANDLED);
++ vdoa->state = VDOA_INIRQ;
++ vdoa_read_register(vdoa, VDOAIST, &status);
++ vdoa_read_register(vdoa, VDOAIE, &mask);
++ val = status & mask;
++ vdoa_write_register(vdoa, VDOAIST, val);
++ if (VDOAIEIST_TRANSFER_ERR & val)
++ dev_err(vdoa->dev, "vdoa Transfer err irq!\n");
++ if (VDOAIEIST_TRANSFER_END & val)
++ dev_dbg(vdoa->dev, "vdoa Transfer end irq!\n");
++ if (0 == val) {
++ dev_err(vdoa->dev, "vdoa unknown irq!\n");
++ BUG();
++ }
++
++ complete(&vdoa->comp);
++ return IRQ_HANDLED;
++}
++
++/* IRAM Size in Kbytes, example:vdoa_iram_size=64, 64KBytes */
++static int __init vdoa_iram_size_setup(char *options)
++{
++ int ret;
++
++ ret = strict_strtoul(options, 0, &iram_size);
++ if (ret)
++ iram_size = 0;
++ else
++ iram_size *= SZ_1K;
++
++ return 1;
++}
++__setup("vdoa_iram_size=", vdoa_iram_size_setup);
++
++static const struct of_device_id imx_vdoa_dt_ids[] = {
++ { .compatible = "fsl,imx6q-vdoa", },
++ { /* sentinel */ }
++};
++
++static int vdoa_probe(struct platform_device *pdev)
++{
++ int ret;
++ struct vdoa_info *vdoa;
++ struct resource *res;
++ struct resource *res_irq;
++ struct device *dev = &pdev->dev;
++ struct device_node *np = pdev->dev.of_node;
++
++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++ if (!res) {
++ dev_err(dev, "can't get device resources\n");
++ return -ENOENT;
++ }
++
++ res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
++ if (!res_irq) {
++ dev_err(dev, "failed to get irq resource\n");
++ return -ENOENT;
++ }
++
++ vdoa = devm_kzalloc(dev, sizeof(struct vdoa_info), GFP_KERNEL);
++ if (!vdoa)
++ return -ENOMEM;
++ vdoa->dev = dev;
++
++ vdoa->reg_base = devm_request_and_ioremap(&pdev->dev, res);
++ if (!vdoa->reg_base)
++ return -EBUSY;
++
++ vdoa->irq = res_irq->start;
++ ret = devm_request_irq(dev, vdoa->irq, vdoa_irq_handler, 0,
++ "vdoa", vdoa);
++ if (ret) {
++ dev_err(dev, "can't claim irq %d\n", vdoa->irq);
++ return ret;
++ }
++ disable_irq(vdoa->irq);
++
++ vdoa->vdoa_clk = devm_clk_get(dev, NULL);
++ if (IS_ERR(vdoa->vdoa_clk)) {
++ dev_err(dev, "failed to get vdoa_clk\n");
++ return PTR_ERR(vdoa->vdoa_clk);
++ }
++
++ vdoa->iram_pool = of_get_named_gen_pool(np, "iram", 0);
++ if (!vdoa->iram_pool) {
++ dev_err(&pdev->dev, "iram pool not available\n");
++ return -ENOMEM;
++ }
++
++ if ((iram_size == 0) || (iram_size > MAX_VDOA_IRAM_SIZE))
++ iram_size = VDOA_IRAM_SIZE;
++
++ vdoa->iram_base = gen_pool_alloc(vdoa->iram_pool, iram_size);
++ if (!vdoa->iram_base) {
++ dev_err(&pdev->dev, "unable to alloc iram\n");
++ return -ENOMEM;
++ }
++
++ vdoa->iram_paddr = gen_pool_virt_to_phys(vdoa->iram_pool,
++ vdoa->iram_base);
++
++ dev_dbg(dev, "iram_base:0x%lx,iram_paddr:0x%lx,size:0x%lx\n",
++ vdoa->iram_base, vdoa->iram_paddr, iram_size);
++
++ vdoa->state = VDOA_INIT;
++ dev_set_drvdata(dev, vdoa);
++ g_vdoa = vdoa;
++ dev_info(dev, "i.MX Video Data Order Adapter(VDOA) driver probed\n");
++ return 0;
++}
++
++static int vdoa_remove(struct platform_device *pdev)
++{
++ struct vdoa_info *vdoa = dev_get_drvdata(&pdev->dev);
++
++ gen_pool_free(vdoa->iram_pool, vdoa->iram_base, iram_size);
++ kfree(vdoa);
++ dev_set_drvdata(&pdev->dev, NULL);
++
++ return 0;
++}
++
++static struct platform_driver vdoa_driver = {
++ .driver = {
++ .name = "mxc_vdoa",
++ .of_match_table = imx_vdoa_dt_ids,
++ },
++ .probe = vdoa_probe,
++ .remove = vdoa_remove,
++};
++
++static int __init vdoa_init(void)
++{
++ int err;
++
++ err = platform_driver_register(&vdoa_driver);
++ if (err) {
++ pr_err("vdoa_driver register failed\n");
++ return -ENODEV;
++ }
++ return 0;
++}
++
++static void __exit vdoa_cleanup(void)
++{
++ platform_driver_unregister(&vdoa_driver);
++}
++
++module_init(vdoa_init);
++module_exit(vdoa_cleanup);
++
++MODULE_AUTHOR("Freescale Semiconductor, Inc.");
++MODULE_DESCRIPTION("i.MX Video Data Order Adapter(VDOA) driver");
++MODULE_LICENSE("GPL");
+diff -Nur linux-3.14.36/drivers/mxc/ipu3/vdoa.h linux-openelec/drivers/mxc/ipu3/vdoa.h
+--- linux-3.14.36/drivers/mxc/ipu3/vdoa.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/ipu3/vdoa.h 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,69 @@
++/*
++ * Copyright (C) 2012-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
++ */
++
++#ifndef __VDOA_H__
++#define __VDOA_H__
++
++#define VDOA_PFS_YUYV (1)
++#define VDOA_PFS_NV12 (0)
++
++
++struct vfield_buf {
++ u32 prev_veba;
++ u32 cur_veba;
++ u32 next_veba;
++ u32 vubo;
++};
++
++struct vframe_buf {
++ u32 veba;
++ u32 vubo;
++};
++
++struct vdoa_params {
++ u32 width;
++ u32 height;
++ int vpu_stride;
++ int interlaced;
++ int scan_order;
++ int ipu_num;
++ int band_lines;
++ int band_mode;
++ int pfs;
++ u32 ieba0;
++ u32 ieba1;
++ u32 ieba2;
++ struct vframe_buf vframe_buf;
++ struct vfield_buf vfield_buf;
++};
++struct vdoa_ipu_buf {
++ u32 ieba0;
++ u32 ieba1;
++ u32 iubo;
++};
++
++struct vdoa_info;
++typedef void *vdoa_handle_t;
++
++int vdoa_setup(vdoa_handle_t handle, struct vdoa_params *params);
++void vdoa_get_output_buf(vdoa_handle_t handle, struct vdoa_ipu_buf *buf);
++int vdoa_start(vdoa_handle_t handle, int timeout_ms);
++void vdoa_stop(vdoa_handle_t handle);
++void vdoa_get_handle(vdoa_handle_t *handle);
++void vdoa_put_handle(vdoa_handle_t *handle);
++#endif
+diff -Nur linux-3.14.36/drivers/mxc/Kconfig linux-openelec/drivers/mxc/Kconfig
+--- linux-3.14.36/drivers/mxc/Kconfig 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/Kconfig 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,24 @@
++# drivers/mxc/Kconfig
++
++if ARCH_MXC
++
++menu "MXC support drivers"
++
++config MXC_IPU
++ bool "Image Processing Unit Driver"
++ select MXC_IPU_V3
++ help
++ If you plan to use the Image Processing unit, say
++ Y here. IPU is needed by Framebuffer and V4L2 drivers.
++
++source "drivers/mxc/gpu-viv/Kconfig"
++source "drivers/mxc/ipu3/Kconfig"
++source "drivers/mxc/asrc/Kconfig"
++source "drivers/mxc/vpu/Kconfig"
++source "drivers/mxc/hdmi-cec/Kconfig"
++source "drivers/mxc/mipi/Kconfig"
++source "drivers/mxc/mlb/Kconfig"
++
++endmenu
++
++endif
+diff -Nur linux-3.14.36/drivers/mxc/Makefile linux-openelec/drivers/mxc/Makefile
+--- linux-3.14.36/drivers/mxc/Makefile 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/Makefile 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,11 @@
++ifeq ($(CONFIG_MXC_GPU_VIV_V5),y)
++obj-$(CONFIG_MXC_GPU_VIV) += gpu-viv/v5/
++else
++obj-$(CONFIG_MXC_GPU_VIV) += gpu-viv/v4/
++endif
++obj-$(CONFIG_MXC_IPU_V3) += ipu3/
++obj-$(CONFIG_MXC_ASRC) += asrc/
++obj-$(CONFIG_MXC_VPU) += vpu/
++obj-$(CONFIG_MXC_HDMI_CEC) += hdmi-cec/
++obj-$(CONFIG_MXC_MIPI_CSI2) += mipi/
++obj-$(CONFIG_MXC_MLB) += mlb/
+diff -Nur linux-3.14.36/drivers/mxc/mipi/Kconfig linux-openelec/drivers/mxc/mipi/Kconfig
+--- linux-3.14.36/drivers/mxc/mipi/Kconfig 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/mipi/Kconfig 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,14 @@
++#
++# MIPI configuration
++#
++
++menu "MXC MIPI Support"
++
++config MXC_MIPI_CSI2
++ tristate "MIPI CSI2 support"
++ depends on SOC_IMX6Q
++ default n
++ ---help---
++ Say Y to get the MIPI CSI2 support.
++
++endmenu
+diff -Nur linux-3.14.36/drivers/mxc/mipi/Makefile linux-openelec/drivers/mxc/mipi/Makefile
+--- linux-3.14.36/drivers/mxc/mipi/Makefile 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/mipi/Makefile 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,4 @@
++#
++# Makefile for the mipi interface driver
++#
++obj-$(CONFIG_MXC_MIPI_CSI2) += mxc_mipi_csi2.o
+diff -Nur linux-3.14.36/drivers/mxc/mipi/mxc_mipi_csi2.c linux-openelec/drivers/mxc/mipi/mxc_mipi_csi2.c
+--- linux-3.14.36/drivers/mxc/mipi/mxc_mipi_csi2.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/mipi/mxc_mipi_csi2.c 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,540 @@
++/*
++ * Copyright (C) 2011-2014 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
++ */
++
++#include <linux/module.h>
++#include <linux/types.h>
++#include <linux/interrupt.h>
++#include <linux/irq.h>
++#include <linux/irqdesc.h>
++#include <linux/init.h>
++#include <linux/platform_device.h>
++#include <linux/err.h>
++#include <linux/clk.h>
++#include <linux/console.h>
++#include <linux/io.h>
++#include <linux/bitops.h>
++#include <linux/delay.h>
++#include <linux/fsl_devices.h>
++#include <linux/slab.h>
++#include <linux/of.h>
++
++#include <linux/mipi_csi2.h>
++
++#include "mxc_mipi_csi2.h"
++
++static struct mipi_csi2_info *gmipi_csi2;
++
++void _mipi_csi2_lock(struct mipi_csi2_info *info)
++{
++ if (!in_irq() && !in_softirq())
++ mutex_lock(&info->mutex_lock);
++}
++
++void _mipi_csi2_unlock(struct mipi_csi2_info *info)
++{
++ if (!in_irq() && !in_softirq())
++ mutex_unlock(&info->mutex_lock);
++}
++
++static inline void mipi_csi2_write(struct mipi_csi2_info *info,
++ unsigned value, unsigned offset)
++{
++ writel(value, info->mipi_csi2_base + offset);
++}
++
++static inline unsigned int mipi_csi2_read(struct mipi_csi2_info *info,
++ unsigned offset)
++{
++ return readl(info->mipi_csi2_base + offset);
++}
++
++/*!
++ * This function is called to enable the mipi csi2 interface.
++ *
++ * @param info mipi csi2 hander
++ * @return Returns setted value
++ */
++bool mipi_csi2_enable(struct mipi_csi2_info *info)
++{
++ bool status;
++
++ _mipi_csi2_lock(info);
++
++ if (!info->mipi_en) {
++ info->mipi_en = true;
++ clk_prepare_enable(info->cfg_clk);
++ clk_prepare_enable(info->dphy_clk);
++ } else
++ mipi_dbg("mipi csi2 already enabled!\n");
++
++ status = info->mipi_en;
++
++ _mipi_csi2_unlock(info);
++
++ return status;
++}
++EXPORT_SYMBOL(mipi_csi2_enable);
++
++/*!
++ * This function is called to disable the mipi csi2 interface.
++ *
++ * @param info mipi csi2 hander
++ * @return Returns setted value
++ */
++bool mipi_csi2_disable(struct mipi_csi2_info *info)
++{
++ bool status;
++
++ _mipi_csi2_lock(info);
++
++ if (info->mipi_en) {
++ info->mipi_en = false;
++ clk_disable_unprepare(info->dphy_clk);
++ clk_disable_unprepare(info->cfg_clk);
++ } else
++ mipi_dbg("mipi csi2 already disabled!\n");
++
++ status = info->mipi_en;
++
++ _mipi_csi2_unlock(info);
++
++ return status;
++}
++EXPORT_SYMBOL(mipi_csi2_disable);
++
++/*!
++ * This function is called to get mipi csi2 disable/enable status.
++ *
++ * @param info mipi csi2 hander
++ * @return Returns mipi csi2 status
++ */
++bool mipi_csi2_get_status(struct mipi_csi2_info *info)
++{
++ bool status;
++
++ _mipi_csi2_lock(info);
++ status = info->mipi_en;
++ _mipi_csi2_unlock(info);
++
++ return status;
++}
++EXPORT_SYMBOL(mipi_csi2_get_status);
++
++/*!
++ * This function is called to set mipi lanes.
++ *
++ * @param info mipi csi2 hander
++ * @return Returns setted value
++ */
++unsigned int mipi_csi2_set_lanes(struct mipi_csi2_info *info)
++{
++ unsigned int lanes;
++
++ _mipi_csi2_lock(info);
++ mipi_csi2_write(info, info->lanes - 1, MIPI_CSI2_N_LANES);
++ lanes = mipi_csi2_read(info, MIPI_CSI2_N_LANES);
++ _mipi_csi2_unlock(info);
++
++ return lanes;
++}
++EXPORT_SYMBOL(mipi_csi2_set_lanes);
++
++/*!
++ * This function is called to set mipi data type.
++ *
++ * @param info mipi csi2 hander
++ * @return Returns setted value
++ */
++unsigned int mipi_csi2_set_datatype(struct mipi_csi2_info *info,
++ unsigned int datatype)
++{
++ unsigned int dtype;
++
++ _mipi_csi2_lock(info);
++ info->datatype = datatype;
++ dtype = info->datatype;
++ _mipi_csi2_unlock(info);
++
++ return dtype;
++}
++EXPORT_SYMBOL(mipi_csi2_set_datatype);
++
++/*!
++ * This function is called to get mipi data type.
++ *
++ * @param info mipi csi2 hander
++ * @return Returns mipi data type
++ */
++unsigned int mipi_csi2_get_datatype(struct mipi_csi2_info *info)
++{
++ unsigned int dtype;
++
++ _mipi_csi2_lock(info);
++ dtype = info->datatype;
++ _mipi_csi2_unlock(info);
++
++ return dtype;
++}
++EXPORT_SYMBOL(mipi_csi2_get_datatype);
++
++/*!
++ * This function is called to get mipi csi2 dphy status.
++ *
++ * @param info mipi csi2 hander
++ * @return Returns dphy status
++ */
++unsigned int mipi_csi2_dphy_status(struct mipi_csi2_info *info)
++{
++ unsigned int status;
++
++ _mipi_csi2_lock(info);
++ status = mipi_csi2_read(info, MIPI_CSI2_PHY_STATE);
++ _mipi_csi2_unlock(info);
++
++ return status;
++}
++EXPORT_SYMBOL(mipi_csi2_dphy_status);
++
++/*!
++ * This function is called to get mipi csi2 error1 status.
++ *
++ * @param info mipi csi2 hander
++ * @return Returns error1 value
++ */
++unsigned int mipi_csi2_get_error1(struct mipi_csi2_info *info)
++{
++ unsigned int err1;
++
++ _mipi_csi2_lock(info);
++ err1 = mipi_csi2_read(info, MIPI_CSI2_ERR1);
++ _mipi_csi2_unlock(info);
++
++ return err1;
++}
++EXPORT_SYMBOL(mipi_csi2_get_error1);
++
++/*!
++ * This function is called to get mipi csi2 error1 status.
++ *
++ * @param info mipi csi2 hander
++ * @return Returns error1 value
++ */
++unsigned int mipi_csi2_get_error2(struct mipi_csi2_info *info)
++{
++ unsigned int err2;
++
++ _mipi_csi2_lock(info);
++ err2 = mipi_csi2_read(info, MIPI_CSI2_ERR2);
++ _mipi_csi2_unlock(info);
++
++ return err2;
++}
++EXPORT_SYMBOL(mipi_csi2_get_error2);
++
++/*!
++ * This function is called to enable mipi to ipu pixel clock.
++ *
++ * @param info mipi csi2 hander
++ * @return Returns 0 on success or negative error code on fail
++ */
++int mipi_csi2_pixelclk_enable(struct mipi_csi2_info *info)
++{
++ return clk_prepare_enable(info->pixel_clk);
++}
++EXPORT_SYMBOL(mipi_csi2_pixelclk_enable);
++
++/*!
++ * This function is called to disable mipi to ipu pixel clock.
++ *
++ * @param info mipi csi2 hander
++ * @return Returns 0 on success or negative error code on fail
++ */
++void mipi_csi2_pixelclk_disable(struct mipi_csi2_info *info)
++{
++ clk_disable_unprepare(info->pixel_clk);
++}
++EXPORT_SYMBOL(mipi_csi2_pixelclk_disable);
++
++/*!
++ * This function is called to power on mipi csi2.
++ *
++ * @param info mipi csi2 hander
++ * @return Returns 0 on success or negative error code on fail
++ */
++int mipi_csi2_reset(struct mipi_csi2_info *info)
++{
++ _mipi_csi2_lock(info);
++
++ mipi_csi2_write(info, 0x0, MIPI_CSI2_PHY_SHUTDOWNZ);
++ mipi_csi2_write(info, 0x0, MIPI_CSI2_DPHY_RSTZ);
++ mipi_csi2_write(info, 0x0, MIPI_CSI2_CSI2_RESETN);
++
++ mipi_csi2_write(info, 0x00000001, MIPI_CSI2_PHY_TST_CTRL0);
++ mipi_csi2_write(info, 0x00000000, MIPI_CSI2_PHY_TST_CTRL1);
++ mipi_csi2_write(info, 0x00000000, MIPI_CSI2_PHY_TST_CTRL0);
++ mipi_csi2_write(info, 0x00000002, MIPI_CSI2_PHY_TST_CTRL0);
++ mipi_csi2_write(info, 0x00010044, MIPI_CSI2_PHY_TST_CTRL1);
++ mipi_csi2_write(info, 0x00000000, MIPI_CSI2_PHY_TST_CTRL0);
++ mipi_csi2_write(info, 0x00000014, MIPI_CSI2_PHY_TST_CTRL1);
++ mipi_csi2_write(info, 0x00000002, MIPI_CSI2_PHY_TST_CTRL0);
++ mipi_csi2_write(info, 0x00000000, MIPI_CSI2_PHY_TST_CTRL0);
++
++ mipi_csi2_write(info, 0xffffffff, MIPI_CSI2_PHY_SHUTDOWNZ);
++ mipi_csi2_write(info, 0xffffffff, MIPI_CSI2_DPHY_RSTZ);
++ mipi_csi2_write(info, 0xffffffff, MIPI_CSI2_CSI2_RESETN);
++
++ _mipi_csi2_unlock(info);
++
++ return 0;
++}
++EXPORT_SYMBOL(mipi_csi2_reset);
++
++/*!
++ * This function is called to get mipi csi2 info.
++ *
++ * @return Returns mipi csi2 info struct pointor
++ */
++struct mipi_csi2_info *mipi_csi2_get_info(void)
++{
++ return gmipi_csi2;
++}
++EXPORT_SYMBOL(mipi_csi2_get_info);
++
++/*!
++ * This function is called to get mipi csi2 bind ipu num.
++ *
++ * @return Returns mipi csi2 bind ipu num
++ */
++int mipi_csi2_get_bind_ipu(struct mipi_csi2_info *info)
++{
++ int ipu_id;
++
++ _mipi_csi2_lock(info);
++ ipu_id = info->ipu_id;
++ _mipi_csi2_unlock(info);
++
++ return ipu_id;
++}
++EXPORT_SYMBOL(mipi_csi2_get_bind_ipu);
++
++/*!
++ * This function is called to get mipi csi2 bind csi num.
++ *
++ * @return Returns mipi csi2 bind csi num
++ */
++unsigned int mipi_csi2_get_bind_csi(struct mipi_csi2_info *info)
++{
++ unsigned int csi_id;
++
++ _mipi_csi2_lock(info);
++ csi_id = info->csi_id;
++ _mipi_csi2_unlock(info);
++
++ return csi_id;
++}
++EXPORT_SYMBOL(mipi_csi2_get_bind_csi);
++
++/*!
++ * This function is called to get mipi csi2 virtual channel.
++ *
++ * @return Returns mipi csi2 virtual channel num
++ */
++unsigned int mipi_csi2_get_virtual_channel(struct mipi_csi2_info *info)
++{
++ unsigned int v_channel;
++
++ _mipi_csi2_lock(info);
++ v_channel = info->v_channel;
++ _mipi_csi2_unlock(info);
++
++ return v_channel;
++}
++EXPORT_SYMBOL(mipi_csi2_get_virtual_channel);
++
++/**
++ * This function is called by the driver framework to initialize the MIPI CSI2
++ * device.
++ *
++ * @param pdev The device structure for the MIPI CSI2 passed in by the
++ * driver framework.
++ *
++ * @return Returns 0 on success or negative error code on error
++ */
++static int mipi_csi2_probe(struct platform_device *pdev)
++{
++ struct device *dev = &pdev->dev;
++ struct device_node *np = pdev->dev.of_node;
++ struct resource *res;
++ u32 mipi_csi2_dphy_ver;
++ int ret;
++
++ gmipi_csi2 = kmalloc(sizeof(struct mipi_csi2_info), GFP_KERNEL);
++ if (!gmipi_csi2) {
++ ret = -ENOMEM;
++ goto alloc_failed;
++ }
++
++ ret = of_property_read_u32(np, "ipu_id", &(gmipi_csi2->ipu_id));
++ if (ret) {
++ dev_err(&pdev->dev, "ipu_id missing or invalid\n");
++ goto err;
++ }
++
++ ret = of_property_read_u32(np, "csi_id", &(gmipi_csi2->csi_id));
++ if (ret) {
++ dev_err(&pdev->dev, "csi_id missing or invalid\n");
++ goto err;
++ }
++
++ ret = of_property_read_u32(np, "v_channel", &(gmipi_csi2->v_channel));
++ if (ret) {
++ dev_err(&pdev->dev, "v_channel missing or invalid\n");
++ goto err;
++ }
++
++ ret = of_property_read_u32(np, "lanes", &(gmipi_csi2->lanes));
++ if (ret) {
++ dev_err(&pdev->dev, "lanes missing or invalid\n");
++ goto err;
++ }
++
++ if ((gmipi_csi2->ipu_id < 0) || (gmipi_csi2->ipu_id > 1) ||
++ (gmipi_csi2->csi_id > 1) || (gmipi_csi2->v_channel > 3) ||
++ (gmipi_csi2->lanes > 4)) {
++ dev_err(&pdev->dev, "invalid param for mipi csi2!\n");
++ ret = -EINVAL;
++ goto err;
++ }
++
++ /* initialize mutex */
++ mutex_init(&gmipi_csi2->mutex_lock);
++
++ /* get mipi csi2 informaiton */
++ gmipi_csi2->pdev = pdev;
++ gmipi_csi2->mipi_en = false;
++
++ gmipi_csi2->cfg_clk = devm_clk_get(dev, "cfg_clk");
++ if (IS_ERR(gmipi_csi2->cfg_clk)) {
++ dev_err(&pdev->dev, "failed to get cfg_clk\n");
++ ret = PTR_ERR(gmipi_csi2->cfg_clk);
++ goto err;
++ }
++
++ /* get mipi dphy clk */
++ gmipi_csi2->dphy_clk = devm_clk_get(dev, "dphy_clk");
++ if (IS_ERR(gmipi_csi2->dphy_clk)) {
++ dev_err(&pdev->dev, "failed to get dphy pll_ref_clk\n");
++ ret = PTR_ERR(gmipi_csi2->dphy_clk);
++ goto err;
++ }
++
++ /* get mipi to ipu pixel clk */
++ gmipi_csi2->pixel_clk = devm_clk_get(dev, "pixel_clk");
++ if (IS_ERR(gmipi_csi2->pixel_clk)) {
++ dev_err(&pdev->dev, "failed to get mipi pixel clk\n");
++ ret = PTR_ERR(gmipi_csi2->pixel_clk);
++ goto err;
++ }
++
++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++ if (!res) {
++ ret = -ENODEV;
++ goto err;
++ }
++
++ /* mipi register mapping */
++ gmipi_csi2->mipi_csi2_base = ioremap(res->start, PAGE_SIZE);
++ if (!gmipi_csi2->mipi_csi2_base) {
++ ret = -ENOMEM;
++ goto err;
++ }
++
++ /* mipi dphy clk enable for register access */
++ clk_prepare_enable(gmipi_csi2->dphy_clk);
++ /* get mipi csi2 dphy version */
++ mipi_csi2_dphy_ver = mipi_csi2_read(gmipi_csi2, MIPI_CSI2_VERSION);
++
++ clk_disable_unprepare(gmipi_csi2->dphy_clk);
++
++ platform_set_drvdata(pdev, gmipi_csi2);
++
++ dev_info(&pdev->dev, "i.MX MIPI CSI2 driver probed\n");
++ dev_info(&pdev->dev, "i.MX MIPI CSI2 dphy version is 0x%x\n",
++ mipi_csi2_dphy_ver);
++
++ return 0;
++
++err:
++ kfree(gmipi_csi2);
++alloc_failed:
++ dev_err(&pdev->dev, "i.MX MIPI CSI2 driver probed - error\n");
++ return ret;
++}
++
++static int mipi_csi2_remove(struct platform_device *pdev)
++{
++ /* unmapping mipi register */
++ iounmap(gmipi_csi2->mipi_csi2_base);
++
++ kfree(gmipi_csi2);
++
++ dev_set_drvdata(&pdev->dev, NULL);
++
++ return 0;
++}
++
++static const struct of_device_id imx_mipi_csi2_dt_ids[] = {
++ { .compatible = "fsl,imx6q-mipi-csi2", },
++ { /* sentinel */ }
++};
++
++static struct platform_driver mipi_csi2_driver = {
++ .driver = {
++ .name = "mxc_mipi_csi2",
++ .of_match_table = imx_mipi_csi2_dt_ids,
++ },
++ .probe = mipi_csi2_probe,
++ .remove = mipi_csi2_remove,
++};
++
++static int __init mipi_csi2_init(void)
++{
++ int err;
++
++ err = platform_driver_register(&mipi_csi2_driver);
++ if (err) {
++ pr_err("mipi_csi2_driver register failed\n");
++ return -ENODEV;
++ }
++
++ pr_info("MIPI CSI2 driver module loaded\n");
++
++ return 0;
++}
++
++static void __exit mipi_csi2_cleanup(void)
++{
++ platform_driver_unregister(&mipi_csi2_driver);
++}
++
++subsys_initcall(mipi_csi2_init);
++module_exit(mipi_csi2_cleanup);
++
++MODULE_AUTHOR("Freescale Semiconductor, Inc.");
++MODULE_DESCRIPTION("i.MX MIPI CSI2 driver");
++MODULE_LICENSE("GPL");
+diff -Nur linux-3.14.36/drivers/mxc/mipi/mxc_mipi_csi2.h linux-openelec/drivers/mxc/mipi/mxc_mipi_csi2.h
+--- linux-3.14.36/drivers/mxc/mipi/mxc_mipi_csi2.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/mipi/mxc_mipi_csi2.h 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,46 @@
++/*
++ * Copyright (C) 2011-2014 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
++ */
++
++#ifndef __MXC_MIPI_CSI2_H__
++#define __MXC_MIPI_CSI2_H__
++
++#ifdef DEBUG
++#define mipi_dbg(fmt, ...) \
++ printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
++#else
++#define mipi_dbg(fmt, ...)
++#endif
++
++/* driver private data */
++struct mipi_csi2_info {
++ bool mipi_en;
++ int ipu_id;
++ unsigned int csi_id;
++ unsigned int v_channel;
++ unsigned int lanes;
++ unsigned int datatype;
++ struct clk *cfg_clk;
++ struct clk *dphy_clk;
++ struct clk *pixel_clk;
++ void __iomem *mipi_csi2_base;
++ struct platform_device *pdev;
++
++ struct mutex mutex_lock;
++};
++
++#endif
+diff -Nur linux-3.14.36/drivers/mxc/mlb/Kconfig linux-openelec/drivers/mxc/mlb/Kconfig
+--- linux-3.14.36/drivers/mxc/mlb/Kconfig 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/mlb/Kconfig 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,17 @@
++#
++# MLB150 configuration
++#
++
++menu "MXC Media Local Bus Driver"
++
++config MXC_MLB
++ boolean
++
++config MXC_MLB150
++ tristate "MLB150 support"
++ depends on SOC_IMX6Q
++ select MXC_MLB
++ ---help---
++ Say Y to get the MLB150 support.
++
++endmenu
+diff -Nur linux-3.14.36/drivers/mxc/mlb/Makefile linux-openelec/drivers/mxc/mlb/Makefile
+--- linux-3.14.36/drivers/mxc/mlb/Makefile 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/mlb/Makefile 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,5 @@
++#
++# Makefile for the i.MX6Q/DL MLB150 driver
++#
++
++obj-$(CONFIG_MXC_MLB150) += mxc_mlb150.o
+diff -Nur linux-3.14.36/drivers/mxc/mlb/mxc_mlb150.c linux-openelec/drivers/mxc/mlb/mxc_mlb150.c
+--- linux-3.14.36/drivers/mxc/mlb/mxc_mlb150.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/mlb/mxc_mlb150.c 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,2778 @@
++/*
++ * Copyright (C) 2011-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
++ */
++
++#include <linux/cdev.h>
++#include <linux/circ_buf.h>
++#include <linux/clk.h>
++#include <linux/delay.h>
++#include <linux/device.h>
++#include <linux/errno.h>
++#include <linux/fs.h>
++#include <linux/genalloc.h>
++#include <linux/init.h>
++#include <linux/interrupt.h>
++#include <linux/io.h>
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/mxc_mlb.h>
++#include <linux/of.h>
++#include <linux/platform_device.h>
++#include <linux/poll.h>
++#include <linux/regulator/consumer.h>
++#include <linux/sched.h>
++#include <linux/slab.h>
++#include <linux/spinlock.h>
++#include <linux/uaccess.h>
++
++#define DRIVER_NAME "mxc_mlb150"
++
++/*
++ * MLB module memory map registers define
++ */
++#define REG_MLBC0 0x0
++#define MLBC0_MLBEN (0x1)
++#define MLBC0_MLBCLK_MASK (0x7 << 2)
++#define MLBC0_MLBCLK_SHIFT (2)
++#define MLBC0_MLBPEN (0x1 << 5)
++#define MLBC0_MLBLK (0x1 << 7)
++#define MLBC0_ASYRETRY (0x1 << 12)
++#define MLBC0_CTLRETRY (0x1 << 12)
++#define MLBC0_FCNT_MASK (0x7 << 15)
++#define MLBC0_FCNT_SHIFT (15)
++
++#define REG_MLBPC0 0x8
++#define MLBPC0_MCLKHYS (0x1 << 11)
++
++#define REG_MS0 0xC
++#define REG_MS1 0x14
++
++#define REG_MSS 0x20
++#define MSS_RSTSYSCMD (0x1)
++#define MSS_LKSYSCMD (0x1 << 1)
++#define MSS_ULKSYSCMD (0x1 << 2)
++#define MSS_CSSYSCMD (0x1 << 3)
++#define MSS_SWSYSCMD (0x1 << 4)
++#define MSS_SERVREQ (0x1 << 5)
++
++#define REG_MSD 0x24
++
++#define REG_MIEN 0x2C
++#define MIEN_ISOC_PE (0x1)
++#define MIEN_ISOC_BUFO (0x1 << 1)
++#define MIEN_SYNC_PE (0x1 << 16)
++#define MIEN_ARX_DONE (0x1 << 17)
++#define MIEN_ARX_PE (0x1 << 18)
++#define MIEN_ARX_BREAK (0x1 << 19)
++#define MIEN_ATX_DONE (0x1 << 20)
++#define MIEN_ATX_PE (0x1 << 21)
++#define MIEN_ATX_BREAK (0x1 << 22)
++#define MIEN_CRX_DONE (0x1 << 24)
++#define MIEN_CRX_PE (0x1 << 25)
++#define MIEN_CRX_BREAK (0x1 << 26)
++#define MIEN_CTX_DONE (0x1 << 27)
++#define MIEN_CTX_PE (0x1 << 28)
++#define MIEN_CTX_BREAK (0x1 << 29)
++
++#define REG_MLBPC2 0x34
++#define REG_MLBPC1 0x38
++#define MLBPC1_VAL (0x00000888)
++
++#define REG_MLBC1 0x3C
++#define MLBC1_LOCK (0x1 << 6)
++#define MLBC1_CLKM (0x1 << 7)
++#define MLBC1_NDA_MASK (0xFF << 8)
++#define MLBC1_NDA_SHIFT (8)
++
++#define REG_HCTL 0x80
++#define HCTL_RST0 (0x1)
++#define HCTL_RST1 (0x1 << 1)
++#define HCTL_EN (0x1 << 15)
++
++#define REG_HCMR0 0x88
++#define REG_HCMR1 0x8C
++#define REG_HCER0 0x90
++#define REG_HCER1 0x94
++#define REG_HCBR0 0x98
++#define REG_HCBR1 0x9C
++
++#define REG_MDAT0 0xC0
++#define REG_MDAT1 0xC4
++#define REG_MDAT2 0xC8
++#define REG_MDAT3 0xCC
++
++#define REG_MDWE0 0xD0
++#define REG_MDWE1 0xD4
++#define REG_MDWE2 0xD8
++#define REG_MDWE3 0xDC
++
++#define REG_MCTL 0xE0
++#define MCTL_XCMP (0x1)
++
++#define REG_MADR 0xE4
++#define MADR_WNR (0x1 << 31)
++#define MADR_TB (0x1 << 30)
++#define MADR_ADDR_MASK (0x7f << 8)
++#define MADR_ADDR_SHIFT (0)
++
++#define REG_ACTL 0x3C0
++#define ACTL_MPB (0x1 << 4)
++#define ACTL_DMAMODE (0x1 << 2)
++#define ACTL_SMX (0x1 << 1)
++#define ACTL_SCE (0x1)
++
++#define REG_ACSR0 0x3D0
++#define REG_ACSR1 0x3D4
++#define REG_ACMR0 0x3D8
++#define REG_ACMR1 0x3DC
++
++#define REG_CAT_MDATn(ch) (REG_MDAT0 + ((ch % 8) >> 1) * 4)
++#define REG_CAT_MDWEn(ch) (REG_MDWE0 + ((ch % 8) >> 1) * 4)
++
++#define INT_AHB0_CH_START (0)
++#define INT_AHB1_CH_START (32)
++
++#define LOGIC_CH_NUM (64)
++#define BUF_CDT_OFFSET (0x0)
++#define BUF_ADT_OFFSET (0x40)
++#define BUF_CAT_MLB_OFFSET (0x80)
++#define BUF_CAT_HBI_OFFSET (0x88)
++#define BUF_CTR_END_OFFSET (0x8F)
++
++#define CAT_MODE_RX (0x1 << 0)
++#define CAT_MODE_TX (0x1 << 1)
++#define CAT_MODE_INBOUND_DMA (0x1 << 8)
++#define CAT_MODE_OUTBOUND_DMA (0x1 << 9)
++
++#define CH_SYNC_DEFAULT_QUAD (1)
++#define CH_SYNC_MAX_QUAD (15)
++#define CH_SYNC_CDT_BUF_DEP (CH_SYNC_DEFAULT_QUAD * 4 * 4)
++#define CH_SYNC_ADT_BUF_MULTI (4)
++#define CH_SYNC_ADT_BUF_DEP (CH_SYNC_CDT_BUF_DEP * CH_SYNC_ADT_BUF_MULTI)
++#define CH_SYNC_BUF_SZ (CH_SYNC_MAX_QUAD * 4 * 4 * \
++ CH_SYNC_ADT_BUF_MULTI)
++#define CH_CTRL_CDT_BUF_DEP (64)
++#define CH_CTRL_ADT_BUF_DEP (CH_CTRL_CDT_BUF_DEP)
++#define CH_CTRL_BUF_SZ (CH_CTRL_ADT_BUF_DEP)
++#define CH_ASYNC_MDP_PACKET_LEN (1024)
++#define CH_ASYNC_MEP_PACKET_LEN (1536)
++#define CH_ASYNC_CDT_BUF_DEP (CH_ASYNC_MEP_PACKET_LEN)
++#define CH_ASYNC_ADT_BUF_DEP (CH_ASYNC_CDT_BUF_DEP)
++#define CH_ASYNC_BUF_SZ (CH_ASYNC_ADT_BUF_DEP)
++#define CH_ISOC_BLK_SIZE_188 (188)
++#define CH_ISOC_BLK_SIZE_196 (196)
++#define CH_ISOC_BLK_SIZE (CH_ISOC_BLK_SIZE_188)
++#define CH_ISOC_BLK_NUM (1)
++#define CH_ISOC_CDT_BUF_DEP (CH_ISOC_BLK_SIZE * CH_ISOC_BLK_NUM)
++#define CH_ISOC_ADT_BUF_DEP (CH_ISOC_CDT_BUF_DEP)
++#define CH_ISOC_BUF_SZ (1024)
++
++#define CH_SYNC_DBR_BUF_OFFSET (0x0)
++#define CH_CTRL_DBR_BUF_OFFSET (CH_SYNC_DBR_BUF_OFFSET + \
++ 2 * (CH_SYNC_MAX_QUAD * 4 * 4))
++#define CH_ASYNC_DBR_BUF_OFFSET (CH_CTRL_DBR_BUF_OFFSET + \
++ 2 * CH_CTRL_CDT_BUF_DEP)
++#define CH_ISOC_DBR_BUF_OFFSET (CH_ASYNC_DBR_BUF_OFFSET + \
++ 2 * CH_ASYNC_CDT_BUF_DEP)
++
++#define DBR_BUF_START 0x00000
++
++#define CDT_LEN (16)
++#define ADT_LEN (16)
++#define CAT_LEN (2)
++
++#define CDT_SZ (CDT_LEN * LOGIC_CH_NUM)
++#define ADT_SZ (ADT_LEN * LOGIC_CH_NUM)
++#define CAT_SZ (CAT_LEN * LOGIC_CH_NUM * 2)
++
++#define CDT_BASE(base) (base + BUF_CDT_OFFSET)
++#define ADT_BASE(base) (base + BUF_ADT_OFFSET)
++#define CAT_MLB_BASE(base) (base + BUF_CAT_MLB_OFFSET)
++#define CAT_HBI_BASE(base) (base + BUF_CAT_HBI_OFFSET)
++
++#define CDTn_ADDR(base, n) (base + BUF_CDT_OFFSET + n * CDT_LEN)
++#define ADTn_ADDR(base, n) (base + BUF_ADT_OFFSET + n * ADT_LEN)
++#define CATn_MLB_ADDR(base, n) (base + BUF_CAT_MLB_OFFSET + n * CAT_LEN)
++#define CATn_HBI_ADDR(base, n) (base + BUF_CAT_HBI_OFFSET + n * CAT_LEN)
++
++#define CAT_CL_SHIFT (0x0)
++#define CAT_CT_SHIFT (8)
++#define CAT_CE (0x1 << 11)
++#define CAT_RNW (0x1 << 12)
++#define CAT_MT (0x1 << 13)
++#define CAT_FCE (0x1 << 14)
++#define CAT_MFE (0x1 << 14)
++
++#define CDT_WSBC_SHIFT (14)
++#define CDT_WPC_SHIFT (11)
++#define CDT_RSBC_SHIFT (30)
++#define CDT_RPC_SHIFT (27)
++#define CDT_WPC_1_SHIFT (12)
++#define CDT_RPC_1_SHIFT (28)
++#define CDT_WPTR_SHIFT (0)
++#define CDT_SYNC_WSTS_MASK (0x0000f000)
++#define CDT_SYNC_WSTS_SHIFT (12)
++#define CDT_CTRL_ASYNC_WSTS_MASK (0x0000f000)
++#define CDT_CTRL_ASYNC_WSTS_SHIFT (12)
++#define CDT_ISOC_WSTS_MASK (0x0000e000)
++#define CDT_ISOC_WSTS_SHIFT (13)
++#define CDT_RPTR_SHIFT (16)
++#define CDT_SYNC_RSTS_MASK (0xf0000000)
++#define CDT_SYNC_RSTS_SHIFT (28)
++#define CDT_CTRL_ASYNC_RSTS_MASK (0xf0000000)
++#define CDT_CTRL_ASYNC_RSTS_SHIFT (28)
++#define CDT_ISOC_RSTS_MASK (0xe0000000)
++#define CDT_ISOC_RSTS_SHIFT (29)
++#define CDT_CTRL_ASYNC_WSTS_1 (0x1 << 14)
++#define CDT_CTRL_ASYNC_RSTS_1 (0x1 << 15)
++#define CDT_BD_SHIFT (0)
++#define CDT_BA_SHIFT (16)
++#define CDT_BS_SHIFT (0)
++#define CDT_BF_SHIFT (31)
++
++#define ADT_PG (0x1 << 13)
++#define ADT_LE (0x1 << 14)
++#define ADT_CE (0x1 << 15)
++#define ADT_BD1_SHIFT (0)
++#define ADT_ERR1 (0x1 << 13)
++#define ADT_DNE1 (0x1 << 14)
++#define ADT_RDY1 (0x1 << 15)
++#define ADT_BD2_SHIFT (16)
++#define ADT_ERR2 (0x1 << 29)
++#define ADT_DNE2 (0x1 << 30)
++#define ADT_RDY2 (0x1 << 31)
++#define ADT_BA1_SHIFT (0x0)
++#define ADT_BA2_SHIFT (0x0)
++#define ADT_PS1 (0x1 << 12)
++#define ADT_PS2 (0x1 << 28)
++#define ADT_MEP1 (0x1 << 11)
++#define ADT_MEP2 (0x1 << 27)
++
++#define MLB_MINOR_DEVICES 4
++#define MLB_CONTROL_DEV_NAME "ctrl"
++#define MLB_ASYNC_DEV_NAME "async"
++#define MLB_SYNC_DEV_NAME "sync"
++#define MLB_ISOC_DEV_NAME "isoc"
++
++#define TX_CHANNEL 0
++#define RX_CHANNEL 1
++
++#define TRANS_RING_NODES (1 << 3)
++
++enum MLB_CTYPE {
++ MLB_CTYPE_SYNC,
++ MLB_CTYPE_CTRL,
++ MLB_CTYPE_ASYNC,
++ MLB_CTYPE_ISOC,
++};
++
++enum CLK_SPEED {
++ CLK_256FS,
++ CLK_512FS,
++ CLK_1024FS,
++ CLK_2048FS,
++ CLK_3072FS,
++ CLK_4096FS,
++ CLK_6144FS,
++ CLK_8192FS,
++};
++
++struct mlb_ringbuf {
++ s8 *virt_bufs[TRANS_RING_NODES];
++ u32 phy_addrs[TRANS_RING_NODES];
++ s32 head;
++ s32 tail;
++ s32 unit_size;
++ s32 total_size;
++ rwlock_t rb_lock ____cacheline_aligned; /* ring index lock */
++};
++
++struct mlb_channel_info {
++ /* Input MLB channel address */
++ u32 address;
++ /* Internal AHB channel label */
++ u32 cl;
++ /* DBR buf head */
++ u32 dbr_buf_head;
++};
++
++struct mlb_dev_info {
++ /* device node name */
++ const char dev_name[20];
++ /* channel type */
++ const unsigned int channel_type;
++ /* ch fps */
++ enum CLK_SPEED fps;
++ /* channel info for tx/rx */
++ struct mlb_channel_info channels[2];
++ /* ring buffer */
++ u8 *rbuf_base_virt;
++ u32 rbuf_base_phy;
++ struct mlb_ringbuf rx_rbuf;
++ struct mlb_ringbuf tx_rbuf;
++ /* exception event */
++ unsigned long ex_event;
++ /* tx busy indicator */
++ unsigned long tx_busy;
++ /* channel started up or not */
++ atomic_t on;
++ /* device open count */
++ atomic_t opencnt;
++ /* wait queue head for channel */
++ wait_queue_head_t rx_wq;
++ wait_queue_head_t tx_wq;
++ /* TX OK */
++ s32 tx_ok;
++ /* spinlock for event access */
++ spinlock_t event_lock;
++ /*
++ * Block size for isoc mode
++ * This variable can be configured in ioctl
++ */
++ u32 isoc_blksz;
++ /*
++ * Quads number for sync mode
++ * This variable can be confifured in ioctl
++ */
++ u32 sync_quad;
++ /* Buffer depth in cdt */
++ u32 cdt_buf_dep;
++ /* Buffer depth in adt */
++ u32 adt_buf_dep;
++ /* Buffer size to hold data */
++ u32 buf_size;
++};
++
++struct mlb_data {
++ struct mlb_dev_info *devinfo;
++ struct clk *clk_mlb3p;
++ struct clk *clk_mlb6p;
++ struct cdev cdev;
++ struct class *class; /* device class */
++ dev_t firstdev;
++#ifdef CONFIG_REGULATOR
++ struct regulator *nvcc;
++#endif
++ void __iomem *membase; /* mlb module base address */
++ struct gen_pool *iram_pool;
++ u32 iram_size;
++ u32 irq_ahb0;
++ u32 irq_ahb1;
++ u32 irq_mlb;
++};
++
++/*
++ * For optimization, we use fixed channel label for
++ * input channels of each mode
++ * SYNC: CL = 0 for RX, CL = 64 for TX
++ * CTRL: CL = 1 for RX, CL = 65 for TX
++ * ASYNC: CL = 2 for RX, CL = 66 for TX
++ * ISOC: CL = 3 for RX, CL = 67 for TX
++ */
++#define SYNC_RX_CL_AHB0 0
++#define CTRL_RX_CL_AHB0 1
++#define ASYNC_RX_CL_AHB0 2
++#define ISOC_RX_CL_AHB0 3
++#define SYNC_TX_CL_AHB0 4
++#define CTRL_TX_CL_AHB0 5
++#define ASYNC_TX_CL_AHB0 6
++#define ISOC_TX_CL_AHB0 7
++
++#define SYNC_RX_CL_AHB1 32
++#define CTRL_RX_CL_AHB1 33
++#define ASYNC_RX_CL_AHB1 34
++#define ISOC_RX_CL_AHB1 35
++#define SYNC_TX_CL_AHB1 36
++#define CTRL_TX_CL_AHB1 37
++#define ASYNC_TX_CL_AHB1 38
++#define ISOC_TX_CL_AHB1 39
++
++#define SYNC_RX_CL SYNC_RX_CL_AHB0
++#define CTRL_RX_CL CTRL_RX_CL_AHB0
++#define ASYNC_RX_CL ASYNC_RX_CL_AHB0
++#define ISOC_RX_CL ISOC_RX_CL_AHB0
++
++#define SYNC_TX_CL SYNC_TX_CL_AHB0
++#define CTRL_TX_CL CTRL_TX_CL_AHB0
++#define ASYNC_TX_CL ASYNC_TX_CL_AHB0
++#define ISOC_TX_CL ISOC_TX_CL_AHB0
++
++static struct mlb_dev_info mlb_devinfo[MLB_MINOR_DEVICES] = {
++ {
++ .dev_name = MLB_SYNC_DEV_NAME,
++ .channel_type = MLB_CTYPE_SYNC,
++ .channels = {
++ [0] = {
++ .cl = SYNC_TX_CL,
++ .dbr_buf_head = CH_SYNC_DBR_BUF_OFFSET,
++ },
++ [1] = {
++ .cl = SYNC_RX_CL,
++ .dbr_buf_head = CH_SYNC_DBR_BUF_OFFSET
++ + CH_SYNC_BUF_SZ,
++ },
++ },
++ .rx_rbuf = {
++ .unit_size = CH_SYNC_BUF_SZ,
++ .rb_lock =
++ __RW_LOCK_UNLOCKED(mlb_devinfo[0].rx_rbuf.rb_lock),
++ },
++ .tx_rbuf = {
++ .unit_size = CH_SYNC_BUF_SZ,
++ .rb_lock =
++ __RW_LOCK_UNLOCKED(mlb_devinfo[0].tx_rbuf.rb_lock),
++ },
++ .cdt_buf_dep = CH_SYNC_CDT_BUF_DEP,
++ .adt_buf_dep = CH_SYNC_ADT_BUF_DEP,
++ .buf_size = CH_SYNC_BUF_SZ,
++ .on = ATOMIC_INIT(0),
++ .opencnt = ATOMIC_INIT(0),
++ .event_lock = __SPIN_LOCK_UNLOCKED(mlb_devinfo[0].event_lock),
++ },
++ {
++ .dev_name = MLB_CONTROL_DEV_NAME,
++ .channel_type = MLB_CTYPE_CTRL,
++ .channels = {
++ [0] = {
++ .cl = CTRL_TX_CL,
++ .dbr_buf_head = CH_CTRL_DBR_BUF_OFFSET,
++ },
++ [1] = {
++ .cl = CTRL_RX_CL,
++ .dbr_buf_head = CH_CTRL_DBR_BUF_OFFSET
++ + CH_CTRL_BUF_SZ,
++ },
++ },
++ .rx_rbuf = {
++ .unit_size = CH_CTRL_BUF_SZ,
++ .rb_lock =
++ __RW_LOCK_UNLOCKED(mlb_devinfo[1].rx_rbuf.rb_lock),
++ },
++ .tx_rbuf = {
++ .unit_size = CH_CTRL_BUF_SZ,
++ .rb_lock =
++ __RW_LOCK_UNLOCKED(mlb_devinfo[1].tx_rbuf.rb_lock),
++ },
++ .cdt_buf_dep = CH_CTRL_CDT_BUF_DEP,
++ .adt_buf_dep = CH_CTRL_ADT_BUF_DEP,
++ .buf_size = CH_CTRL_BUF_SZ,
++ .on = ATOMIC_INIT(0),
++ .opencnt = ATOMIC_INIT(0),
++ .event_lock = __SPIN_LOCK_UNLOCKED(mlb_devinfo[1].event_lock),
++ },
++ {
++ .dev_name = MLB_ASYNC_DEV_NAME,
++ .channel_type = MLB_CTYPE_ASYNC,
++ .channels = {
++ [0] = {
++ .cl = ASYNC_TX_CL,
++ .dbr_buf_head = CH_ASYNC_DBR_BUF_OFFSET,
++ },
++ [1] = {
++ .cl = ASYNC_RX_CL,
++ .dbr_buf_head = CH_ASYNC_DBR_BUF_OFFSET
++ + CH_ASYNC_BUF_SZ,
++ },
++ },
++ .rx_rbuf = {
++ .unit_size = CH_ASYNC_BUF_SZ,
++ .rb_lock =
++ __RW_LOCK_UNLOCKED(mlb_devinfo[2].rx_rbuf.rb_lock),
++ },
++ .tx_rbuf = {
++ .unit_size = CH_ASYNC_BUF_SZ,
++ .rb_lock =
++ __RW_LOCK_UNLOCKED(mlb_devinfo[2].tx_rbuf.rb_lock),
++ },
++ .cdt_buf_dep = CH_ASYNC_CDT_BUF_DEP,
++ .adt_buf_dep = CH_ASYNC_ADT_BUF_DEP,
++ .buf_size = CH_ASYNC_BUF_SZ,
++ .on = ATOMIC_INIT(0),
++ .opencnt = ATOMIC_INIT(0),
++ .event_lock = __SPIN_LOCK_UNLOCKED(mlb_devinfo[2].event_lock),
++ },
++ {
++ .dev_name = MLB_ISOC_DEV_NAME,
++ .channel_type = MLB_CTYPE_ISOC,
++ .channels = {
++ [0] = {
++ .cl = ISOC_TX_CL,
++ .dbr_buf_head = CH_ISOC_DBR_BUF_OFFSET,
++ },
++ [1] = {
++ .cl = ISOC_RX_CL,
++ .dbr_buf_head = CH_ISOC_DBR_BUF_OFFSET
++ + CH_ISOC_BUF_SZ,
++ },
++ },
++ .rx_rbuf = {
++ .unit_size = CH_ISOC_BUF_SZ,
++ .rb_lock =
++ __RW_LOCK_UNLOCKED(mlb_devinfo[3].rx_rbuf.rb_lock),
++ },
++ .tx_rbuf = {
++ .unit_size = CH_ISOC_BUF_SZ,
++ .rb_lock =
++ __RW_LOCK_UNLOCKED(mlb_devinfo[3].tx_rbuf.rb_lock),
++ },
++ .cdt_buf_dep = CH_ISOC_CDT_BUF_DEP,
++ .adt_buf_dep = CH_ISOC_ADT_BUF_DEP,
++ .buf_size = CH_ISOC_BUF_SZ,
++ .on = ATOMIC_INIT(0),
++ .opencnt = ATOMIC_INIT(0),
++ .event_lock = __SPIN_LOCK_UNLOCKED(mlb_devinfo[3].event_lock),
++ .isoc_blksz = CH_ISOC_BLK_SIZE_188,
++ },
++};
++
++static void __iomem *mlb_base;
++
++DEFINE_SPINLOCK(ctr_lock);
++
++#ifdef DEBUG
++#define DUMP_REG(reg) pr_debug(#reg": 0x%08x\n", __raw_readl(mlb_base + reg))
++
++static void mlb150_dev_dump_reg(void)
++{
++ pr_debug("mxc_mlb150: Dump registers:\n");
++ DUMP_REG(REG_MLBC0);
++ DUMP_REG(REG_MLBPC0);
++ DUMP_REG(REG_MS0);
++ DUMP_REG(REG_MS1);
++ DUMP_REG(REG_MSS);
++ DUMP_REG(REG_MSD);
++ DUMP_REG(REG_MIEN);
++ DUMP_REG(REG_MLBPC2);
++ DUMP_REG(REG_MLBPC1);
++ DUMP_REG(REG_MLBC1);
++ DUMP_REG(REG_HCTL);
++ DUMP_REG(REG_HCMR0);
++ DUMP_REG(REG_HCMR1);
++ DUMP_REG(REG_HCER0);
++ DUMP_REG(REG_HCER1);
++ DUMP_REG(REG_HCBR0);
++ DUMP_REG(REG_HCBR1);
++ DUMP_REG(REG_MDAT0);
++ DUMP_REG(REG_MDAT1);
++ DUMP_REG(REG_MDAT2);
++ DUMP_REG(REG_MDAT3);
++ DUMP_REG(REG_MDWE0);
++ DUMP_REG(REG_MDWE1);
++ DUMP_REG(REG_MDWE2);
++ DUMP_REG(REG_MDWE3);
++ DUMP_REG(REG_MCTL);
++ DUMP_REG(REG_MADR);
++ DUMP_REG(REG_ACTL);
++ DUMP_REG(REG_ACSR0);
++ DUMP_REG(REG_ACSR1);
++ DUMP_REG(REG_ACMR0);
++ DUMP_REG(REG_ACMR1);
++}
++
++static void mlb150_dev_dump_hex(const u8 *buf, u32 len)
++{
++ print_hex_dump(KERN_DEBUG, "CTR DUMP:",
++ DUMP_PREFIX_OFFSET, 8, 1, buf, len, 0);
++}
++#endif
++
++static inline void mlb150_dev_enable_ctr_write(u32 mdat0_bits_en,
++ u32 mdat1_bits_en, u32 mdat2_bits_en, u32 mdat3_bits_en)
++{
++ __raw_writel(mdat0_bits_en, mlb_base + REG_MDWE0);
++ __raw_writel(mdat1_bits_en, mlb_base + REG_MDWE1);
++ __raw_writel(mdat2_bits_en, mlb_base + REG_MDWE2);
++ __raw_writel(mdat3_bits_en, mlb_base + REG_MDWE3);
++}
++
++#ifdef DEBUG
++static inline u8 mlb150_dev_dbr_read(u32 dbr_addr)
++{
++ s32 timeout = 1000;
++ u8 dbr_val = 0;
++ unsigned long flags;
++
++ spin_lock_irqsave(&ctr_lock, flags);
++ __raw_writel(MADR_TB | dbr_addr,
++ mlb_base + REG_MADR);
++
++ while ((!(__raw_readl(mlb_base + REG_MCTL)
++ & MCTL_XCMP)) &&
++ timeout--)
++ ;
++
++ if (0 == timeout) {
++ spin_unlock_irqrestore(&ctr_lock, flags);
++ return -ETIME;
++ }
++
++ dbr_val = __raw_readl(mlb_base + REG_MDAT0) & 0x000000ff;
++
++ __raw_writel(0, mlb_base + REG_MCTL);
++ spin_unlock_irqrestore(&ctr_lock, flags);
++
++ return dbr_val;
++}
++
++static inline s32 mlb150_dev_dbr_write(u32 dbr_addr, u8 dbr_val)
++{
++ s32 timeout = 1000;
++ u32 mdat0 = dbr_val & 0x000000ff;
++ unsigned long flags;
++
++ spin_lock_irqsave(&ctr_lock, flags);
++ __raw_writel(mdat0, mlb_base + REG_MDAT0);
++
++ __raw_writel(MADR_WNR | MADR_TB | dbr_addr,
++ mlb_base + REG_MADR);
++
++ while ((!(__raw_readl(mlb_base + REG_MCTL)
++ & MCTL_XCMP)) &&
++ timeout--)
++ ;
++
++ if (timeout <= 0) {
++ spin_unlock_irqrestore(&ctr_lock, flags);
++ return -ETIME;
++ }
++
++ __raw_writel(0, mlb_base + REG_MCTL);
++ spin_unlock_irqrestore(&ctr_lock, flags);
++
++ return 0;
++}
++
++static inline s32 mlb150_dev_dbr_dump(u32 addr, u32 size)
++{
++ u8 *dump_buf = NULL;
++ u8 *buf_ptr = NULL;
++ s32 i;
++
++ dump_buf = kzalloc(size, GFP_KERNEL);
++ if (!dump_buf) {
++ pr_err("can't allocate enough memory\n");
++ return -ENOMEM;
++ }
++
++ for (i = 0, buf_ptr = dump_buf;
++ i < size; ++i, ++buf_ptr)
++ *buf_ptr = mlb150_dev_dbr_read(addr + i);
++
++ mlb150_dev_dump_hex(dump_buf, size);
++
++ kfree(dump_buf);
++
++ return 0;
++}
++#endif
++
++static s32 mlb150_dev_ctr_read(u32 ctr_offset, u32 *ctr_val)
++{
++ s32 timeout = 1000;
++ unsigned long flags;
++
++ spin_lock_irqsave(&ctr_lock, flags);
++ __raw_writel(ctr_offset, mlb_base + REG_MADR);
++
++ while ((!(__raw_readl(mlb_base + REG_MCTL)
++ & MCTL_XCMP)) &&
++ timeout--)
++ ;
++
++ if (timeout <= 0) {
++ spin_unlock_irqrestore(&ctr_lock, flags);
++ pr_debug("mxc_mlb150: Read CTR timeout\n");
++ return -ETIME;
++ }
++
++ ctr_val[0] = __raw_readl(mlb_base + REG_MDAT0);
++ ctr_val[1] = __raw_readl(mlb_base + REG_MDAT1);
++ ctr_val[2] = __raw_readl(mlb_base + REG_MDAT2);
++ ctr_val[3] = __raw_readl(mlb_base + REG_MDAT3);
++
++ __raw_writel(0, mlb_base + REG_MCTL);
++
++ spin_unlock_irqrestore(&ctr_lock, flags);
++
++ return 0;
++}
++
++static s32 mlb150_dev_ctr_write(u32 ctr_offset, const u32 *ctr_val)
++{
++ s32 timeout = 1000;
++ unsigned long flags;
++
++ spin_lock_irqsave(&ctr_lock, flags);
++
++ __raw_writel(ctr_val[0], mlb_base + REG_MDAT0);
++ __raw_writel(ctr_val[1], mlb_base + REG_MDAT1);
++ __raw_writel(ctr_val[2], mlb_base + REG_MDAT2);
++ __raw_writel(ctr_val[3], mlb_base + REG_MDAT3);
++
++ __raw_writel(MADR_WNR | ctr_offset,
++ mlb_base + REG_MADR);
++
++ while ((!(__raw_readl(mlb_base + REG_MCTL)
++ & MCTL_XCMP)) &&
++ timeout--)
++ ;
++
++ if (timeout <= 0) {
++ spin_unlock_irqrestore(&ctr_lock, flags);
++ pr_debug("mxc_mlb150: Write CTR timeout\n");
++ return -ETIME;
++ }
++
++ __raw_writel(0, mlb_base + REG_MCTL);
++
++ spin_unlock_irqrestore(&ctr_lock, flags);
++
++#ifdef DEBUG_CTR
++ {
++ u32 ctr_rd[4] = { 0 };
++
++ if (!mlb150_dev_ctr_read(ctr_offset, ctr_rd)) {
++ if (ctr_val[0] == ctr_rd[0] &&
++ ctr_val[1] == ctr_rd[1] &&
++ ctr_val[2] == ctr_rd[2] &&
++ ctr_val[3] == ctr_rd[3])
++ return 0;
++ else {
++ pr_debug("mxc_mlb150: ctr write failed\n");
++ pr_debug("offset: 0x%x\n", ctr_offset);
++ pr_debug("Write: 0x%x 0x%x 0x%x 0x%x\n",
++ ctr_val[3], ctr_val[2],
++ ctr_val[1], ctr_val[0]);
++ pr_debug("Read: 0x%x 0x%x 0x%x 0x%x\n",
++ ctr_rd[3], ctr_rd[2],
++ ctr_rd[1], ctr_rd[0]);
++ return -EBADE;
++ }
++ } else {
++ pr_debug("mxc_mlb150: ctr read failed\n");
++ return -EBADE;
++ }
++ }
++#endif
++
++ return 0;
++}
++
++#ifdef DEBUG
++static s32 mlb150_dev_cat_read(u32 ctr_offset, u32 ch, u16 *cat_val)
++{
++ u16 ctr_val[8] = { 0 };
++
++ if (mlb150_dev_ctr_read(ctr_offset, (u32 *)ctr_val))
++ return -ETIME;
++
++ /*
++ * Use u16 array to get u32 array value,
++ * need to convert
++ */
++ cat_val = ctr_val[ch % 8];
++
++ return 0;
++}
++#endif
++
++static s32 mlb150_dev_cat_write(u32 ctr_offset, u32 ch, const u16 cat_val)
++{
++ u16 ctr_val[8] = { 0 };
++
++ if (mlb150_dev_ctr_read(ctr_offset, (u32 *)ctr_val))
++ return -ETIME;
++
++ ctr_val[ch % 8] = cat_val;
++ if (mlb150_dev_ctr_write(ctr_offset, (u32 *)ctr_val))
++ return -ETIME;
++
++ return 0;
++}
++
++#define mlb150_dev_cat_mlb_read(ch, cat_val) \
++ mlb150_dev_cat_read(BUF_CAT_MLB_OFFSET + (ch >> 3), ch, cat_val)
++#define mlb150_dev_cat_mlb_write(ch, cat_val) \
++ mlb150_dev_cat_write(BUF_CAT_MLB_OFFSET + (ch >> 3), ch, cat_val)
++#define mlb150_dev_cat_hbi_read(ch, cat_val) \
++ mlb150_dev_cat_read(BUF_CAT_HBI_OFFSET + (ch >> 3), ch, cat_val)
++#define mlb150_dev_cat_hbi_write(ch, cat_val) \
++ mlb150_dev_cat_write(BUF_CAT_HBI_OFFSET + (ch >> 3), ch, cat_val)
++
++#define mlb150_dev_cdt_read(ch, cdt_val) \
++ mlb150_dev_ctr_read(BUF_CDT_OFFSET + ch, cdt_val)
++#define mlb150_dev_cdt_write(ch, cdt_val) \
++ mlb150_dev_ctr_write(BUF_CDT_OFFSET + ch, cdt_val)
++#define mlb150_dev_adt_read(ch, adt_val) \
++ mlb150_dev_ctr_read(BUF_ADT_OFFSET + ch, adt_val)
++#define mlb150_dev_adt_write(ch, adt_val) \
++ mlb150_dev_ctr_write(BUF_ADT_OFFSET + ch, adt_val)
++
++static s32 mlb150_dev_get_adt_sts(u32 ch)
++{
++ s32 timeout = 1000;
++ unsigned long flags;
++ u32 reg;
++
++ spin_lock_irqsave(&ctr_lock, flags);
++ __raw_writel(BUF_ADT_OFFSET + ch,
++ mlb_base + REG_MADR);
++
++ while ((!(__raw_readl(mlb_base + REG_MCTL)
++ & MCTL_XCMP)) &&
++ timeout--)
++ ;
++
++ if (timeout <= 0) {
++ spin_unlock_irqrestore(&ctr_lock, flags);
++ pr_debug("mxc_mlb150: Read CTR timeout\n");
++ return -ETIME;
++ }
++
++ reg = __raw_readl(mlb_base + REG_MDAT1);
++
++ __raw_writel(0, mlb_base + REG_MCTL);
++ spin_unlock_irqrestore(&ctr_lock, flags);
++
++#ifdef DEBUG_ADT
++ pr_debug("mxc_mlb150: Get ch %d adt sts: 0x%08x\n", ch, reg);
++#endif
++
++ return reg;
++}
++
++#ifdef DEBUG
++static void mlb150_dev_dump_ctr_tbl(u32 ch_start, u32 ch_end)
++{
++ u32 i = 0;
++ u32 ctr_val[4] = { 0 };
++
++ pr_debug("mxc_mlb150: CDT Table");
++ for (i = BUF_CDT_OFFSET + ch_start;
++ i < BUF_CDT_OFFSET + ch_end;
++ ++i) {
++ mlb150_dev_ctr_read(i, ctr_val);
++ pr_debug("CTR 0x%02x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
++ i, ctr_val[3], ctr_val[2], ctr_val[1], ctr_val[0]);
++ }
++
++ pr_debug("mxc_mlb150: ADT Table");
++ for (i = BUF_ADT_OFFSET + ch_start;
++ i < BUF_ADT_OFFSET + ch_end;
++ ++i) {
++ mlb150_dev_ctr_read(i, ctr_val);
++ pr_debug("CTR 0x%02x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
++ i, ctr_val[3], ctr_val[2], ctr_val[1], ctr_val[0]);
++ }
++
++ pr_debug("mxc_mlb150: CAT MLB Table");
++ for (i = BUF_CAT_MLB_OFFSET + (ch_start >> 3);
++ i <= BUF_CAT_MLB_OFFSET + ((ch_end + 8) >> 3);
++ ++i) {
++ mlb150_dev_ctr_read(i, ctr_val);
++ pr_debug("CTR 0x%02x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
++ i, ctr_val[3], ctr_val[2], ctr_val[1], ctr_val[0]);
++ }
++
++ pr_debug("mxc_mlb150: CAT HBI Table");
++ for (i = BUF_CAT_HBI_OFFSET + (ch_start >> 3);
++ i <= BUF_CAT_HBI_OFFSET + ((ch_end + 8) >> 3);
++ ++i) {
++ mlb150_dev_ctr_read(i, ctr_val);
++ pr_debug("CTR 0x%02x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
++ i, ctr_val[3], ctr_val[2], ctr_val[1], ctr_val[0]);
++ }
++}
++#endif
++
++/*
++ * Initial the MLB module device
++ */
++static inline void mlb150_dev_enable_dma_irq(u32 enable)
++{
++ u32 ch_rx_mask = (1 << SYNC_RX_CL_AHB0) | (1 << CTRL_RX_CL_AHB0)
++ | (1 << ASYNC_RX_CL_AHB0) | (1 << ISOC_RX_CL_AHB0)
++ | (1 << SYNC_TX_CL_AHB0) | (1 << CTRL_TX_CL_AHB0)
++ | (1 << ASYNC_TX_CL_AHB0) | (1 << ISOC_TX_CL_AHB0);
++ u32 ch_tx_mask = (1 << (SYNC_RX_CL_AHB1 - INT_AHB1_CH_START)) |
++ (1 << (CTRL_RX_CL_AHB1 - INT_AHB1_CH_START)) |
++ (1 << (ASYNC_RX_CL_AHB1 - INT_AHB1_CH_START)) |
++ (1 << (ISOC_RX_CL_AHB1 - INT_AHB1_CH_START)) |
++ (1 << (SYNC_TX_CL_AHB1 - INT_AHB1_CH_START)) |
++ (1 << (CTRL_TX_CL_AHB1 - INT_AHB1_CH_START)) |
++ (1 << (ASYNC_TX_CL_AHB1 - INT_AHB1_CH_START)) |
++ (1 << (ISOC_TX_CL_AHB1 - INT_AHB1_CH_START));
++
++ if (enable) {
++ __raw_writel(ch_rx_mask, mlb_base + REG_ACMR0);
++ __raw_writel(ch_tx_mask, mlb_base + REG_ACMR1);
++ } else {
++ __raw_writel(0x0, mlb_base + REG_ACMR0);
++ __raw_writel(0x0, mlb_base + REG_ACMR1);
++ }
++}
++
++
++static void mlb150_dev_init_ir_amba_ahb(void)
++{
++ u32 reg = 0;
++
++ /*
++ * Step 1. Program the ACMRn registers to enable interrupts from all
++ * active DMA channels
++ */
++ mlb150_dev_enable_dma_irq(1);
++
++ /*
++ * Step 2. Select the status clear method:
++ * ACTL.SCE = 0, hardware clears on read
++ * ACTL.SCE = 1, software writes a '1' to clear
++ * We only support DMA MODE 1
++ */
++ reg = __raw_readl(mlb_base + REG_ACTL);
++ reg |= ACTL_DMAMODE;
++#ifdef MULTIPLE_PACKAGE_MODE
++ reg |= REG_ACTL_MPB;
++#endif
++
++ /*
++ * Step 3. Select 1 or 2 interrupt signals:
++ * ACTL.SMX = 0: one interrupt for channels 0 - 31 on ahb_init[0]
++ * and another interrupt for channels 32 - 63 on ahb_init[1]
++ * ACTL.SMX = 1: singel interrupt all channels on ahb_init[0]
++ */
++ reg &= ~ACTL_SMX;
++
++ __raw_writel(reg, mlb_base + REG_ACTL);
++}
++
++static inline void mlb150_dev_enable_ir_mlb(u32 enable)
++{
++ /*
++ * Step 1, Select the MSn to be cleared by software,
++ * writing a '0' to the appropriate bits
++ */
++ __raw_writel(0, mlb_base + REG_MS0);
++ __raw_writel(0, mlb_base + REG_MS1);
++
++ /*
++ * Step 1, Program MIEN to enable protocol error
++ * interrupts for all active MLB channels
++ */
++ if (enable)
++ __raw_writel(MIEN_CTX_PE |
++ MIEN_CRX_PE | MIEN_ATX_PE |
++ MIEN_ARX_PE | MIEN_SYNC_PE |
++ MIEN_ISOC_PE,
++ mlb_base + REG_MIEN);
++ else
++ __raw_writel(0, mlb_base + REG_MIEN);
++}
++
++static inline void mlb150_enable_pll(struct mlb_data *drvdata)
++{
++ u32 c0_val;
++
++ __raw_writel(MLBPC1_VAL,
++ drvdata->membase + REG_MLBPC1);
++
++ c0_val = __raw_readl(drvdata->membase + REG_MLBC0);
++ if (c0_val & MLBC0_MLBPEN) {
++ c0_val &= ~MLBC0_MLBPEN;
++ __raw_writel(c0_val,
++ drvdata->membase + REG_MLBC0);
++ }
++
++ clk_prepare_enable(drvdata->clk_mlb6p);
++
++ c0_val |= (MLBC0_MLBPEN);
++ __raw_writel(c0_val, drvdata->membase + REG_MLBC0);
++}
++
++static inline void mlb150_disable_pll(struct mlb_data *drvdata)
++{
++ u32 c0_val;
++
++ clk_disable_unprepare(drvdata->clk_mlb6p);
++
++ c0_val = __raw_readl(drvdata->membase + REG_MLBC0);
++
++ __raw_writel(0x0, drvdata->membase + REG_MLBPC1);
++
++ c0_val &= ~MLBC0_MLBPEN;
++ __raw_writel(c0_val, drvdata->membase + REG_MLBC0);
++}
++
++static void mlb150_dev_reset_cdt(void)
++{
++ int i = 0;
++ u32 ctr_val[4] = { 0 };
++
++ mlb150_dev_enable_ctr_write(0xffffffff, 0xffffffff,
++ 0xffffffff, 0xffffffff);
++
++ for (i = 0; i < (LOGIC_CH_NUM); ++i)
++ mlb150_dev_ctr_write(BUF_CDT_OFFSET + i, ctr_val);
++}
++
++static s32 mlb150_dev_init_ch_cdt(struct mlb_dev_info *pdevinfo, u32 ch,
++ enum MLB_CTYPE ctype, u32 ch_func)
++{
++ u32 cdt_val[4] = { 0 };
++
++ /* a. Set the 14-bit base address (BA) */
++ pr_debug("mxc_mlb150: ctype: %d, ch: %d, dbr_buf_head: 0x%08x",
++ ctype, ch, pdevinfo->channels[ch_func].dbr_buf_head);
++ cdt_val[3] = (pdevinfo->channels[ch_func].dbr_buf_head)
++ << CDT_BA_SHIFT;
++ /*
++ * b. Set the 12-bit or 13-bit buffer depth (BD)
++ * BD = buffer depth in bytes - 1
++ * For synchronous channels: (BD + 1) = 4 * m * bpf
++ * For control channels: (BD + 1) >= max packet length (64)
++ * For asynchronous channels: (BD + 1) >= max packet length
++ * 1024 for a MOST Data packet (MDP);
++ * 1536 for a MOST Ethernet Packet (MEP)
++ * For isochronous channels: (BD + 1) mod (BS + 1) = 0
++ * BS
++ */
++ if (MLB_CTYPE_ISOC == ctype)
++ cdt_val[1] |= (pdevinfo->isoc_blksz - 1);
++ /* BD */
++ cdt_val[3] |= (pdevinfo->cdt_buf_dep - 1) << CDT_BD_SHIFT;
++
++ pr_debug("mxc_mlb150: Set CDT val of channel %d, type: %d: "
++ "0x%08x 0x%08x 0x%08x 0x%08x\n",
++ ch, ctype, cdt_val[3], cdt_val[2], cdt_val[1], cdt_val[0]);
++
++ if (mlb150_dev_cdt_write(ch, cdt_val))
++ return -ETIME;
++
++#ifdef DEBUG_CTR
++ {
++ u32 cdt_rd[4] = { 0 };
++ if (!mlb150_dev_cdt_read(ch, cdt_rd)) {
++ pr_debug("mxc_mlb150: CDT val of channel %d: "
++ "0x%08x 0x%08x 0x%08x 0x%08x\n",
++ ch, cdt_rd[3], cdt_rd[2], cdt_rd[1], cdt_rd[0]);
++ if (cdt_rd[3] == cdt_val[3] &&
++ cdt_rd[2] == cdt_val[2] &&
++ cdt_rd[1] == cdt_val[1] &&
++ cdt_rd[0] == cdt_val[0]) {
++ pr_debug("mxc_mlb150: set cdt succeed!\n");
++ return 0;
++ } else {
++ pr_debug("mxc_mlb150: set cdt failed!\n");
++ return -EBADE;
++ }
++ } else {
++ pr_debug("mxc_mlb150: Read CDT val of channel %d failed\n",
++ ch);
++ return -EBADE;
++ }
++ }
++#endif
++
++ return 0;
++}
++
++static s32 mlb150_dev_init_ch_cat(u32 ch, u32 cl,
++ u32 cat_mode, enum MLB_CTYPE ctype)
++{
++ u16 cat_val = 0;
++#ifdef DEBUG_CTR
++ u16 cat_rd = 0;
++#endif
++
++ cat_val = CAT_CE | (ctype << CAT_CT_SHIFT) | cl;
++
++ if (cat_mode & CAT_MODE_OUTBOUND_DMA)
++ cat_val |= CAT_RNW;
++
++ if (MLB_CTYPE_SYNC == ctype)
++ cat_val |= CAT_MT;
++
++ switch (cat_mode) {
++ case CAT_MODE_RX | CAT_MODE_INBOUND_DMA:
++ case CAT_MODE_TX | CAT_MODE_OUTBOUND_DMA:
++ pr_debug("mxc_mlb150: set CAT val of channel %d, type: %d: 0x%04x\n",
++ ch, ctype, cat_val);
++
++ if (mlb150_dev_cat_mlb_write(ch, cat_val))
++ return -ETIME;
++#ifdef DEBUG_CTR
++ if (!mlb150_dev_cat_mlb_read(ch, &cat_rd))
++ pr_debug("mxc_mlb150: CAT val of mlb channel %d: 0x%04x",
++ ch, cat_rd);
++ else {
++ pr_debug("mxc_mlb150: Read CAT of mlb channel %d failed\n",
++ ch);
++ return -EBADE;
++ }
++#endif
++ break;
++ case CAT_MODE_TX | CAT_MODE_INBOUND_DMA:
++ case CAT_MODE_RX | CAT_MODE_OUTBOUND_DMA:
++ pr_debug("mxc_mlb150: set CAT val of channel %d, type: %d: 0x%04x\n",
++ cl, ctype, cat_val);
++
++ if (mlb150_dev_cat_hbi_write(cl, cat_val))
++ return -ETIME;
++#ifdef DEBUG_CTR
++ if (!mlb150_dev_cat_hbi_read(cl, &cat_rd))
++ pr_debug("mxc_mlb150: CAT val of hbi channel %d: 0x%04x",
++ cl, cat_rd);
++ else {
++ pr_debug("mxc_mlb150: Read CAT of hbi channel %d failed\n",
++ cl);
++ return -EBADE;
++ }
++#endif
++ break;
++ default:
++ return EBADRQC;
++ }
++
++#ifdef DEBUG_CTR
++ {
++ if (cat_val == cat_rd) {
++ pr_debug("mxc_mlb150: set cat succeed!\n");
++ return 0;
++ } else {
++ pr_debug("mxc_mlb150: set cat failed!\n");
++ return -EBADE;
++ }
++ }
++#endif
++ return 0;
++}
++
++static void mlb150_dev_reset_cat(void)
++{
++ int i = 0;
++ u32 ctr_val[4] = { 0 };
++
++ mlb150_dev_enable_ctr_write(0xffffffff, 0xffffffff,
++ 0xffffffff, 0xffffffff);
++
++ for (i = 0; i < (LOGIC_CH_NUM >> 3); ++i) {
++ mlb150_dev_ctr_write(BUF_CAT_MLB_OFFSET + i, ctr_val);
++ mlb150_dev_ctr_write(BUF_CAT_HBI_OFFSET + i, ctr_val);
++ }
++}
++
++static void mlb150_dev_init_rfb(struct mlb_dev_info *pdevinfo, u32 rx_ch,
++ u32 tx_ch, enum MLB_CTYPE ctype)
++{
++ u32 rx_cl = pdevinfo->channels[RX_CHANNEL].cl;
++ u32 tx_cl = pdevinfo->channels[TX_CHANNEL].cl;
++ /* Step 1, Initialize all bits of CAT to '0' */
++ mlb150_dev_reset_cat();
++ mlb150_dev_reset_cdt();
++ /*
++ * Step 2, Initialize logical channel
++ * Step 3, Program the CDT for channel N
++ */
++ mlb150_dev_init_ch_cdt(pdevinfo, rx_cl, ctype, RX_CHANNEL);
++ mlb150_dev_init_ch_cdt(pdevinfo, tx_cl, ctype, TX_CHANNEL);
++
++ /* Step 4&5, Program the CAT for the inbound and outbound DMA */
++ mlb150_dev_init_ch_cat(rx_ch, rx_cl,
++ CAT_MODE_RX | CAT_MODE_INBOUND_DMA,
++ ctype);
++ mlb150_dev_init_ch_cat(rx_ch, rx_cl,
++ CAT_MODE_RX | CAT_MODE_OUTBOUND_DMA,
++ ctype);
++ mlb150_dev_init_ch_cat(tx_ch, tx_cl,
++ CAT_MODE_TX | CAT_MODE_INBOUND_DMA,
++ ctype);
++ mlb150_dev_init_ch_cat(tx_ch, tx_cl,
++ CAT_MODE_TX | CAT_MODE_OUTBOUND_DMA,
++ ctype);
++}
++
++static void mlb150_dev_reset_adt(void)
++{
++ int i = 0;
++ u32 ctr_val[4] = { 0 };
++
++ mlb150_dev_enable_ctr_write(0xffffffff, 0xffffffff,
++ 0xffffffff, 0xffffffff);
++
++ for (i = 0; i < (LOGIC_CH_NUM); ++i)
++ mlb150_dev_ctr_write(BUF_ADT_OFFSET + i, ctr_val);
++}
++
++static void mlb150_dev_reset_whole_ctr(void)
++{
++ mlb150_dev_enable_ctr_write(0xffffffff, 0xffffffff,
++ 0xffffffff, 0xffffffff);
++ mlb150_dev_reset_cdt();
++ mlb150_dev_reset_adt();
++ mlb150_dev_reset_cat();
++}
++
++#define CLR_REG(reg) __raw_writel(0x0, mlb_base + reg)
++
++static void mlb150_dev_reset_all_regs(void)
++{
++ CLR_REG(REG_MLBC0);
++ CLR_REG(REG_MLBPC0);
++ CLR_REG(REG_MS0);
++ CLR_REG(REG_MS1);
++ CLR_REG(REG_MSS);
++ CLR_REG(REG_MSD);
++ CLR_REG(REG_MIEN);
++ CLR_REG(REG_MLBPC2);
++ CLR_REG(REG_MLBPC1);
++ CLR_REG(REG_MLBC1);
++ CLR_REG(REG_HCTL);
++ CLR_REG(REG_HCMR0);
++ CLR_REG(REG_HCMR1);
++ CLR_REG(REG_HCER0);
++ CLR_REG(REG_HCER1);
++ CLR_REG(REG_HCBR0);
++ CLR_REG(REG_HCBR1);
++ CLR_REG(REG_MDAT0);
++ CLR_REG(REG_MDAT1);
++ CLR_REG(REG_MDAT2);
++ CLR_REG(REG_MDAT3);
++ CLR_REG(REG_MDWE0);
++ CLR_REG(REG_MDWE1);
++ CLR_REG(REG_MDWE2);
++ CLR_REG(REG_MDWE3);
++ CLR_REG(REG_MCTL);
++ CLR_REG(REG_MADR);
++ CLR_REG(REG_ACTL);
++ CLR_REG(REG_ACSR0);
++ CLR_REG(REG_ACSR1);
++ CLR_REG(REG_ACMR0);
++ CLR_REG(REG_ACMR1);
++}
++
++static inline s32 mlb150_dev_pipo_start(struct mlb_ringbuf *rbuf,
++ u32 ahb_ch, u32 buf_addr)
++{
++ u32 ctr_val[4] = { 0 };
++
++ ctr_val[1] |= ADT_RDY1;
++ ctr_val[2] = buf_addr;
++
++ if (mlb150_dev_adt_write(ahb_ch, ctr_val))
++ return -ETIME;
++
++ return 0;
++}
++
++static inline s32 mlb150_dev_pipo_next(u32 ahb_ch, enum MLB_CTYPE ctype,
++ u32 dne_sts, u32 buf_addr)
++{
++ u32 ctr_val[4] = { 0 };
++
++ if (MLB_CTYPE_ASYNC == ctype ||
++ MLB_CTYPE_CTRL == ctype) {
++ ctr_val[1] |= ADT_PS1;
++ ctr_val[1] |= ADT_PS2;
++ }
++
++ /*
++ * Clear DNE1 and ERR1
++ * Set the page ready bit (RDY1)
++ */
++ if (dne_sts & ADT_DNE1) {
++ ctr_val[1] |= ADT_RDY2;
++ ctr_val[3] = buf_addr;
++ } else {
++ ctr_val[1] |= ADT_RDY1;
++ ctr_val[2] = buf_addr;
++ }
++
++ if (mlb150_dev_adt_write(ahb_ch, ctr_val))
++ return -ETIME;
++
++ return 0;
++}
++
++static inline s32 mlb150_dev_pipo_stop(struct mlb_ringbuf *rbuf, u32 ahb_ch)
++{
++ u32 ctr_val[4] = { 0 };
++ unsigned long flags;
++
++ write_lock_irqsave(&rbuf->rb_lock, flags);
++ rbuf->head = rbuf->tail = 0;
++ write_unlock_irqrestore(&rbuf->rb_lock, flags);
++
++ if (mlb150_dev_adt_write(ahb_ch, ctr_val))
++ return -ETIME;
++
++ return 0;
++}
++
++static s32 mlb150_dev_init_ch_amba_ahb(struct mlb_dev_info *pdevinfo,
++ struct mlb_channel_info *chinfo,
++ enum MLB_CTYPE ctype)
++{
++ u32 ctr_val[4] = { 0 };
++
++ /* a. Set the 32-bit base address (BA1) */
++ ctr_val[3] = 0;
++ ctr_val[2] = 0;
++ ctr_val[1] = (pdevinfo->adt_buf_dep - 1) << ADT_BD1_SHIFT;
++ ctr_val[1] |= (pdevinfo->adt_buf_dep - 1) << ADT_BD2_SHIFT;
++ if (MLB_CTYPE_ASYNC == ctype ||
++ MLB_CTYPE_CTRL == ctype) {
++ ctr_val[1] |= ADT_PS1;
++ ctr_val[1] |= ADT_PS2;
++ }
++
++ ctr_val[0] |= (ADT_LE | ADT_CE);
++
++ pr_debug("mxc_mlb150: Set ADT val of channel %d, ctype: %d: "
++ "0x%08x 0x%08x 0x%08x 0x%08x\n",
++ chinfo->cl, ctype, ctr_val[3], ctr_val[2],
++ ctr_val[1], ctr_val[0]);
++
++ if (mlb150_dev_adt_write(chinfo->cl, ctr_val))
++ return -ETIME;
++
++#ifdef DEBUG_CTR
++ {
++ u32 ctr_rd[4] = { 0 };
++ if (!mlb150_dev_adt_read(chinfo->cl, ctr_rd)) {
++ pr_debug("mxc_mlb150: ADT val of channel %d: "
++ "0x%08x 0x%08x 0x%08x 0x%08x\n",
++ chinfo->cl, ctr_rd[3], ctr_rd[2],
++ ctr_rd[1], ctr_rd[0]);
++ if (ctr_rd[3] == ctr_val[3] &&
++ ctr_rd[2] == ctr_val[2] &&
++ ctr_rd[1] == ctr_val[1] &&
++ ctr_rd[0] == ctr_val[0]) {
++ pr_debug("mxc_mlb150: set adt succeed!\n");
++ return 0;
++ } else {
++ pr_debug("mxc_mlb150: set adt failed!\n");
++ return -EBADE;
++ }
++ } else {
++ pr_debug("mxc_mlb150: Read ADT val of channel %d failed\n",
++ chinfo->cl);
++ return -EBADE;
++ }
++ }
++#endif
++
++ return 0;
++}
++
++static void mlb150_dev_init_amba_ahb(struct mlb_dev_info *pdevinfo,
++ enum MLB_CTYPE ctype)
++{
++ struct mlb_channel_info *tx_chinfo = &pdevinfo->channels[TX_CHANNEL];
++ struct mlb_channel_info *rx_chinfo = &pdevinfo->channels[RX_CHANNEL];
++
++ /* Step 1, Initialize all bits of the ADT to '0' */
++ mlb150_dev_reset_adt();
++
++ /*
++ * Step 2, Select a logic channel
++ * Step 3, Program the AMBA AHB block ping page for channel N
++ * Step 4, Program the AMBA AHB block pong page for channel N
++ */
++ mlb150_dev_init_ch_amba_ahb(pdevinfo, rx_chinfo, ctype);
++ mlb150_dev_init_ch_amba_ahb(pdevinfo, tx_chinfo, ctype);
++}
++
++static void mlb150_dev_exit(void)
++{
++ u32 c0_val, hctl_val;
++
++ /* Disable EN bits */
++ c0_val = __raw_readl(mlb_base + REG_MLBC0);
++ c0_val &= ~(MLBC0_MLBEN | MLBC0_MLBPEN);
++ __raw_writel(c0_val, mlb_base + REG_MLBC0);
++
++ hctl_val = __raw_readl(mlb_base + REG_HCTL);
++ hctl_val &= ~HCTL_EN;
++ __raw_writel(hctl_val, mlb_base + REG_HCTL);
++
++ __raw_writel(0x0, mlb_base + REG_HCMR0);
++ __raw_writel(0x0, mlb_base + REG_HCMR1);
++
++ mlb150_dev_enable_dma_irq(0);
++ mlb150_dev_enable_ir_mlb(0);
++}
++
++static void mlb150_dev_init(void)
++{
++ u32 c0_val;
++ u32 ch_rx_mask = (1 << SYNC_RX_CL_AHB0) | (1 << CTRL_RX_CL_AHB0)
++ | (1 << ASYNC_RX_CL_AHB0) | (1 << ISOC_RX_CL_AHB0)
++ | (1 << SYNC_TX_CL_AHB0) | (1 << CTRL_TX_CL_AHB0)
++ | (1 << ASYNC_TX_CL_AHB0) | (1 << ISOC_TX_CL_AHB0);
++ u32 ch_tx_mask = (1 << (SYNC_RX_CL_AHB1 - INT_AHB1_CH_START)) |
++ (1 << (CTRL_RX_CL_AHB1 - INT_AHB1_CH_START)) |
++ (1 << (ASYNC_RX_CL_AHB1 - INT_AHB1_CH_START)) |
++ (1 << (ISOC_RX_CL_AHB1 - INT_AHB1_CH_START)) |
++ (1 << (SYNC_TX_CL_AHB1 - INT_AHB1_CH_START)) |
++ (1 << (CTRL_TX_CL_AHB1 - INT_AHB1_CH_START)) |
++ (1 << (ASYNC_TX_CL_AHB1 - INT_AHB1_CH_START)) |
++ (1 << (ISOC_TX_CL_AHB1 - INT_AHB1_CH_START));
++
++ /* Disable EN bits */
++ mlb150_dev_exit();
++
++ /*
++ * Step 1. Initialize CTR and registers
++ * a. Set all bit of the CTR (CAT, CDT, and ADT) to 0.
++ */
++ mlb150_dev_reset_whole_ctr();
++
++ /* a. Set all bit of the CTR (CAT, CDT, and ADT) to 0. */
++ mlb150_dev_reset_all_regs();
++
++ /*
++ * Step 2, Configure the MediaLB interface
++ * Select pin mode and clock, 3-pin and 256fs
++ */
++ c0_val = __raw_readl(mlb_base + REG_MLBC0);
++ c0_val &= ~(MLBC0_MLBPEN | MLBC0_MLBCLK_MASK);
++ __raw_writel(c0_val, mlb_base + REG_MLBC0);
++
++ c0_val |= MLBC0_MLBEN;
++ __raw_writel(c0_val, mlb_base + REG_MLBC0);
++
++ /* Step 3, Configure the HBI interface */
++ __raw_writel(ch_rx_mask, mlb_base + REG_HCMR0);
++ __raw_writel(ch_tx_mask, mlb_base + REG_HCMR1);
++ __raw_writel(HCTL_EN, mlb_base + REG_HCTL);
++
++ mlb150_dev_init_ir_amba_ahb();
++
++ mlb150_dev_enable_ir_mlb(1);
++}
++
++static s32 mlb150_dev_unmute_syn_ch(u32 rx_ch, u32 rx_cl, u32 tx_ch, u32 tx_cl)
++{
++ u32 timeout = 10000;
++
++ /*
++ * Check that MediaLB clock is running (MLBC1.CLKM = 0)
++ * If MLBC1.CLKM = 1, clear the register bit, wait one
++ * APB or I/O clock cycle and repeat the check
++ */
++ while ((__raw_readl(mlb_base + REG_MLBC1) & MLBC1_CLKM)
++ && --timeout)
++ __raw_writel(~MLBC1_CLKM, mlb_base + REG_MLBC1);
++
++ if (0 == timeout)
++ return -ETIME;
++
++ timeout = 10000;
++ /* Poll for MLB lock (MLBC0.MLBLK = 1) */
++ while (!(__raw_readl(mlb_base + REG_MLBC0) & MLBC0_MLBLK)
++ && --timeout)
++ ;
++
++ if (0 == timeout)
++ return -ETIME;
++
++ /* Unmute synchronous channel(s) */
++ mlb150_dev_cat_mlb_write(rx_ch, CAT_CE | rx_cl);
++ mlb150_dev_cat_mlb_write(tx_ch,
++ CAT_CE | tx_cl | CAT_RNW);
++ mlb150_dev_cat_hbi_write(rx_cl,
++ CAT_CE | rx_cl | CAT_RNW);
++ mlb150_dev_cat_hbi_write(tx_cl, CAT_CE | tx_cl);
++
++ return 0;
++}
++
++/* In case the user calls channel shutdown, but rx or tx is not completed yet */
++static s32 mlb150_trans_complete_check(struct mlb_dev_info *pdevinfo)
++{
++ struct mlb_ringbuf *rx_rbuf = &pdevinfo->rx_rbuf;
++ struct mlb_ringbuf *tx_rbuf = &pdevinfo->tx_rbuf;
++ s32 timeout = 1024;
++
++ while (timeout--) {
++ read_lock(&tx_rbuf->rb_lock);
++ if (!CIRC_CNT(tx_rbuf->head, tx_rbuf->tail, TRANS_RING_NODES)) {
++ read_unlock(&tx_rbuf->rb_lock);
++ break;
++ } else
++ read_unlock(&tx_rbuf->rb_lock);
++ }
++
++ if (timeout <= 0) {
++ pr_debug("TX complete check timeout!\n");
++ return -ETIME;
++ }
++
++ timeout = 1024;
++ while (timeout--) {
++ read_lock(&rx_rbuf->rb_lock);
++ if (!CIRC_CNT(rx_rbuf->head, rx_rbuf->tail, TRANS_RING_NODES)) {
++ read_unlock(&rx_rbuf->rb_lock);
++ break;
++ } else
++ read_unlock(&rx_rbuf->rb_lock);
++ }
++
++ if (timeout <= 0) {
++ pr_debug("RX complete check timeout!\n");
++ return -ETIME;
++ }
++
++ /*
++ * Interrupt from TX can only inform that the data is sent
++ * to AHB bus, not mean that it is sent to MITB. Thus we add
++ * a delay here for data to be completed sent.
++ */
++ udelay(1000);
++
++ return 0;
++}
++
++/*
++ * Enable/Disable the MLB IRQ
++ */
++static void mxc_mlb150_irq_enable(struct mlb_data *drvdata, u8 enable)
++{
++ if (enable) {
++ enable_irq(drvdata->irq_ahb0);
++ enable_irq(drvdata->irq_ahb1);
++ enable_irq(drvdata->irq_mlb);
++ } else {
++ disable_irq(drvdata->irq_ahb0);
++ disable_irq(drvdata->irq_ahb1);
++ disable_irq(drvdata->irq_mlb);
++ }
++}
++
++/*
++ * Enable the MLB channel
++ */
++static s32 mlb_channel_enable(struct mlb_data *drvdata,
++ int chan_dev_id, int on)
++{
++ struct mlb_dev_info *pdevinfo = drvdata->devinfo;
++ struct mlb_channel_info *tx_chinfo = &pdevinfo->channels[TX_CHANNEL];
++ struct mlb_channel_info *rx_chinfo = &pdevinfo->channels[RX_CHANNEL];
++ u32 tx_ch = tx_chinfo->address;
++ u32 rx_ch = rx_chinfo->address;
++ u32 tx_cl = tx_chinfo->cl;
++ u32 rx_cl = rx_chinfo->cl;
++ s32 ret = 0;
++
++ /*
++ * setup the direction, enable, channel type,
++ * mode select, channel address and mask buf start
++ */
++ if (on) {
++ u32 ctype = pdevinfo->channel_type;
++
++ mlb150_dev_enable_ctr_write(0xffffffff, 0xffffffff,
++ 0xffffffff, 0xffffffff);
++ mlb150_dev_init_rfb(pdevinfo, rx_ch, tx_ch, ctype);
++
++ mlb150_dev_init_amba_ahb(pdevinfo, ctype);
++
++#ifdef DEBUG
++ mlb150_dev_dump_ctr_tbl(0, tx_chinfo->cl + 1);
++#endif
++ /* Synchronize and unmute synchrouous channel */
++ if (MLB_CTYPE_SYNC == ctype) {
++ ret = mlb150_dev_unmute_syn_ch(rx_ch, rx_cl,
++ tx_ch, tx_cl);
++ if (ret)
++ return ret;
++ }
++
++ mlb150_dev_enable_ctr_write(0x0, ADT_RDY1 | ADT_DNE1 |
++ ADT_ERR1 | ADT_PS1 |
++ ADT_RDY2 | ADT_DNE2 | ADT_ERR2 | ADT_PS2,
++ 0xffffffff, 0xffffffff);
++
++ if (pdevinfo->fps >= CLK_2048FS)
++ mlb150_enable_pll(drvdata);
++
++ atomic_set(&pdevinfo->on, 1);
++
++#ifdef DEBUG
++ mlb150_dev_dump_reg();
++ mlb150_dev_dump_ctr_tbl(0, tx_chinfo->cl + 1);
++#endif
++ /* Init RX ADT */
++ mlb150_dev_pipo_start(&pdevinfo->rx_rbuf, rx_cl,
++ pdevinfo->rx_rbuf.phy_addrs[0]);
++ } else {
++ mlb150_dev_pipo_stop(&pdevinfo->rx_rbuf, rx_cl);
++
++ mlb150_dev_enable_dma_irq(0);
++ mlb150_dev_enable_ir_mlb(0);
++
++ mlb150_dev_reset_cat();
++
++ atomic_set(&pdevinfo->on, 0);
++
++ if (pdevinfo->fps >= CLK_2048FS)
++ mlb150_disable_pll(drvdata);
++ }
++
++ return 0;
++}
++
++/*
++ * MLB interrupt handler
++ */
++static void mlb_rx_isr(s32 ctype, u32 ahb_ch, struct mlb_dev_info *pdevinfo)
++{
++ struct mlb_ringbuf *rx_rbuf = &pdevinfo->rx_rbuf;
++ s32 head, tail, adt_sts;
++ u32 rx_buf_ptr;
++
++#ifdef DEBUG_RX
++ pr_debug("mxc_mlb150: mlb_rx_isr\n");
++#endif
++
++ read_lock(&rx_rbuf->rb_lock);
++
++ head = (rx_rbuf->head + 1) & (TRANS_RING_NODES - 1);
++ tail = ACCESS_ONCE(rx_rbuf->tail);
++ read_unlock(&rx_rbuf->rb_lock);
++
++ if (CIRC_SPACE(head, tail, TRANS_RING_NODES) >= 1) {
++ rx_buf_ptr = rx_rbuf->phy_addrs[head];
++
++ /* commit the item before incrementing the head */
++ smp_wmb();
++
++ write_lock(&rx_rbuf->rb_lock);
++ rx_rbuf->head = head;
++ write_unlock(&rx_rbuf->rb_lock);
++
++ /* wake up the reader */
++ wake_up_interruptible(&pdevinfo->rx_wq);
++ } else {
++ rx_buf_ptr = rx_rbuf->phy_addrs[head];
++ pr_debug("drop RX package, due to no space, (%d,%d)\n",
++ head, tail);
++ }
++
++ adt_sts = mlb150_dev_get_adt_sts(ahb_ch);
++ /* Set ADT for RX */
++ mlb150_dev_pipo_next(ahb_ch, ctype, adt_sts, rx_buf_ptr);
++}
++
++static void mlb_tx_isr(s32 ctype, u32 ahb_ch, struct mlb_dev_info *pdevinfo)
++{
++ struct mlb_ringbuf *tx_rbuf = &pdevinfo->tx_rbuf;
++ s32 head, tail, adt_sts;
++ u32 tx_buf_ptr;
++
++ read_lock(&tx_rbuf->rb_lock);
++
++ head = ACCESS_ONCE(tx_rbuf->head);
++ tail = (tx_rbuf->tail + 1) & (TRANS_RING_NODES - 1);
++ read_unlock(&tx_rbuf->rb_lock);
++
++ smp_mb();
++ write_lock(&tx_rbuf->rb_lock);
++ tx_rbuf->tail = tail;
++ write_unlock(&tx_rbuf->rb_lock);
++
++ /* check the current tx buffer is available or not */
++ if (CIRC_CNT(head, tail, TRANS_RING_NODES) >= 1) {
++ /* read index before reading contents at that index */
++ smp_read_barrier_depends();
++
++ tx_buf_ptr = tx_rbuf->phy_addrs[tail];
++
++ wake_up_interruptible(&pdevinfo->tx_wq);
++
++ adt_sts = mlb150_dev_get_adt_sts(ahb_ch);
++ /* Set ADT for TX */
++ mlb150_dev_pipo_next(ahb_ch, ctype, adt_sts, tx_buf_ptr);
++ }
++}
++
++static irqreturn_t mlb_ahb_isr(int irq, void *dev_id)
++{
++ u32 acsr0, hcer0;
++ u32 ch_mask = (1 << SYNC_RX_CL) | (1 << CTRL_RX_CL)
++ | (1 << ASYNC_RX_CL) | (1 << ISOC_RX_CL)
++ | (1 << SYNC_TX_CL) | (1 << CTRL_TX_CL)
++ | (1 << ASYNC_TX_CL) | (1 << ISOC_TX_CL);
++
++ /*
++ * Step 5, Read the ACSRn registers to determine which channel or
++ * channels are causing the interrupt
++ */
++ acsr0 = __raw_readl(mlb_base + REG_ACSR0);
++
++ hcer0 = __raw_readl(mlb_base + REG_HCER0);
++
++ /*
++ * Step 6, If ACTL.SCE = 1, write the result of step 5 back to ACSR0
++ * and ACSR1 to clear the interrupt
++ * We'll not set ACTL_SCE
++ */
++
++ if (ch_mask & hcer0)
++ pr_err("CH encounters an AHB error: 0x%x\n", hcer0);
++
++ if ((1 << SYNC_RX_CL) & acsr0)
++ mlb_rx_isr(MLB_CTYPE_SYNC, SYNC_RX_CL,
++ &mlb_devinfo[MLB_CTYPE_SYNC]);
++
++ if ((1 << CTRL_RX_CL) & acsr0)
++ mlb_rx_isr(MLB_CTYPE_CTRL, CTRL_RX_CL,
++ &mlb_devinfo[MLB_CTYPE_CTRL]);
++
++ if ((1 << ASYNC_RX_CL) & acsr0)
++ mlb_rx_isr(MLB_CTYPE_ASYNC, ASYNC_RX_CL,
++ &mlb_devinfo[MLB_CTYPE_ASYNC]);
++
++ if ((1 << ISOC_RX_CL) & acsr0)
++ mlb_rx_isr(MLB_CTYPE_ISOC, ISOC_RX_CL,
++ &mlb_devinfo[MLB_CTYPE_ISOC]);
++
++ if ((1 << SYNC_TX_CL) & acsr0)
++ mlb_tx_isr(MLB_CTYPE_SYNC, SYNC_TX_CL,
++ &mlb_devinfo[MLB_CTYPE_SYNC]);
++
++ if ((1 << CTRL_TX_CL) & acsr0)
++ mlb_tx_isr(MLB_CTYPE_CTRL, CTRL_TX_CL,
++ &mlb_devinfo[MLB_CTYPE_CTRL]);
++
++ if ((1 << ASYNC_TX_CL) & acsr0)
++ mlb_tx_isr(MLB_CTYPE_ASYNC, ASYNC_TX_CL,
++ &mlb_devinfo[MLB_CTYPE_ASYNC]);
++
++ if ((1 << ISOC_TX_CL) & acsr0)
++ mlb_tx_isr(MLB_CTYPE_ASYNC, ISOC_TX_CL,
++ &mlb_devinfo[MLB_CTYPE_ISOC]);
++
++ return IRQ_HANDLED;
++}
++
++static irqreturn_t mlb_isr(int irq, void *dev_id)
++{
++ u32 rx_int_sts, tx_int_sts, ms0,
++ ms1, tx_cis, rx_cis, ctype;
++ int minor;
++ u32 cdt_val[4] = { 0 };
++
++ /*
++ * Step 4, Read the MSn register to determine which channel(s)
++ * are causing the interrupt
++ */
++ ms0 = __raw_readl(mlb_base + REG_MS0);
++ ms1 = __raw_readl(mlb_base + REG_MS1);
++
++ /*
++ * The MLB150_MS0, MLB150_MS1 registers need to be cleared. In
++ * the spec description, the registers should be cleared when
++ * enabling interrupt. In fact, we also should clear it in ISR.
++ */
++ __raw_writel(0, mlb_base + REG_MS0);
++ __raw_writel(0, mlb_base + REG_MS1);
++
++ pr_debug("mxc_mlb150: mlb interrupt:0x%08x 0x%08x\n",
++ (u32)ms0, (u32)ms1);
++
++ for (minor = 0; minor < MLB_MINOR_DEVICES; minor++) {
++ struct mlb_dev_info *pdevinfo = &mlb_devinfo[minor];
++ u32 rx_mlb_ch = pdevinfo->channels[RX_CHANNEL].address;
++ u32 tx_mlb_ch = pdevinfo->channels[TX_CHANNEL].address;
++ u32 rx_mlb_cl = pdevinfo->channels[RX_CHANNEL].cl;
++ u32 tx_mlb_cl = pdevinfo->channels[TX_CHANNEL].cl;
++
++ tx_cis = rx_cis = 0;
++
++ ctype = pdevinfo->channel_type;
++ rx_int_sts = (rx_mlb_ch < 31) ? ms0 : ms1;
++ tx_int_sts = (tx_mlb_ch < 31) ? ms0 : ms1;
++
++ pr_debug("mxc_mlb150: channel interrupt: "
++ "tx %d: 0x%08x, rx %d: 0x%08x\n",
++ tx_mlb_ch, (u32)tx_int_sts, rx_mlb_ch, (u32)rx_int_sts);
++
++ /* Get tx channel interrupt status */
++ if (tx_int_sts & (1 << (tx_mlb_ch % 32))) {
++ mlb150_dev_cdt_read(tx_mlb_cl, cdt_val);
++ pr_debug("mxc_mlb150: TX_CH: %d, cdt_val[3]: 0x%08x, "
++ "cdt_val[2]: 0x%08x, "
++ "cdt_val[1]: 0x%08x, "
++ "cdt_val[0]: 0x%08x\n",
++ tx_mlb_ch, cdt_val[3], cdt_val[2],
++ cdt_val[1], cdt_val[0]);
++ switch (ctype) {
++ case MLB_CTYPE_SYNC:
++ tx_cis = (cdt_val[2] & ~CDT_SYNC_WSTS_MASK)
++ >> CDT_SYNC_WSTS_SHIFT;
++ /*
++ * Clear RSTS/WSTS errors to resume
++ * channel operation
++ * a. For synchronous channels: WSTS[3] = 0
++ */
++ cdt_val[2] &= ~(0x8 << CDT_SYNC_WSTS_SHIFT);
++ break;
++ case MLB_CTYPE_CTRL:
++ case MLB_CTYPE_ASYNC:
++ tx_cis = (cdt_val[2] &
++ ~CDT_CTRL_ASYNC_WSTS_MASK)
++ >> CDT_CTRL_ASYNC_WSTS_SHIFT;
++ tx_cis = (cdt_val[3] & CDT_CTRL_ASYNC_WSTS_1) ?
++ (tx_cis | (0x1 << 4)) : tx_cis;
++ /*
++ * b. For async and ctrl channels:
++ * RSTS[4]/WSTS[4] = 0
++ * and RSTS[2]/WSTS[2] = 0
++ */
++ cdt_val[3] &= ~CDT_CTRL_ASYNC_WSTS_1;
++ cdt_val[2] &=
++ ~(0x4 << CDT_CTRL_ASYNC_WSTS_SHIFT);
++ break;
++ case MLB_CTYPE_ISOC:
++ tx_cis = (cdt_val[2] & ~CDT_ISOC_WSTS_MASK)
++ >> CDT_ISOC_WSTS_SHIFT;
++ /* c. For isoc channels: WSTS[2:1] = 0x00 */
++ cdt_val[2] &= ~(0x6 << CDT_ISOC_WSTS_SHIFT);
++ break;
++ default:
++ break;
++ }
++ mlb150_dev_cdt_write(tx_mlb_ch, cdt_val);
++ }
++
++ /* Get rx channel interrupt status */
++ if (rx_int_sts & (1 << (rx_mlb_ch % 32))) {
++ mlb150_dev_cdt_read(rx_mlb_cl, cdt_val);
++ pr_debug("mxc_mlb150: RX_CH: %d, cdt_val[3]: 0x%08x, "
++ "cdt_val[2]: 0x%08x, "
++ "cdt_val[1]: 0x%08x, "
++ "cdt_val[0]: 0x%08x\n",
++ rx_mlb_ch, cdt_val[3], cdt_val[2],
++ cdt_val[1], cdt_val[0]);
++ switch (ctype) {
++ case MLB_CTYPE_SYNC:
++ tx_cis = (cdt_val[2] & ~CDT_SYNC_RSTS_MASK)
++ >> CDT_SYNC_RSTS_SHIFT;
++ cdt_val[2] &= ~(0x8 << CDT_SYNC_WSTS_SHIFT);
++ break;
++ case MLB_CTYPE_CTRL:
++ case MLB_CTYPE_ASYNC:
++ tx_cis =
++ (cdt_val[2] & ~CDT_CTRL_ASYNC_RSTS_MASK)
++ >> CDT_CTRL_ASYNC_RSTS_SHIFT;
++ tx_cis = (cdt_val[3] & CDT_CTRL_ASYNC_RSTS_1) ?
++ (tx_cis | (0x1 << 4)) : tx_cis;
++ cdt_val[3] &= ~CDT_CTRL_ASYNC_RSTS_1;
++ cdt_val[2] &=
++ ~(0x4 << CDT_CTRL_ASYNC_RSTS_SHIFT);
++ break;
++ case MLB_CTYPE_ISOC:
++ tx_cis = (cdt_val[2] & ~CDT_ISOC_RSTS_MASK)
++ >> CDT_ISOC_RSTS_SHIFT;
++ cdt_val[2] &= ~(0x6 << CDT_ISOC_WSTS_SHIFT);
++ break;
++ default:
++ break;
++ }
++ mlb150_dev_cdt_write(rx_mlb_ch, cdt_val);
++ }
++
++ if (!tx_cis && !rx_cis)
++ continue;
++
++ /* fill exception event */
++ spin_lock(&pdevinfo->event_lock);
++ pdevinfo->ex_event |= (rx_cis << 16) | tx_cis;
++ spin_unlock(&pdevinfo->event_lock);
++ }
++
++ return IRQ_HANDLED;
++}
++
++static int mxc_mlb150_open(struct inode *inode, struct file *filp)
++{
++ int minor, ring_buf_size, buf_size, j, ret;
++ void __iomem *buf_addr;
++ ulong phy_addr;
++ struct mlb_dev_info *pdevinfo = NULL;
++ struct mlb_channel_info *pchinfo = NULL;
++ struct mlb_data *drvdata;
++
++ minor = MINOR(inode->i_rdev);
++ drvdata = container_of(inode->i_cdev, struct mlb_data, cdev);
++
++ if (minor < 0 || minor >= MLB_MINOR_DEVICES) {
++ pr_err("no device\n");
++ return -ENODEV;
++ }
++
++ /* open for each channel device */
++ if (atomic_cmpxchg(&mlb_devinfo[minor].opencnt, 0, 1) != 0) {
++ pr_err("busy\n");
++ return -EBUSY;
++ }
++
++ clk_prepare_enable(drvdata->clk_mlb3p);
++
++ /* initial MLB module */
++ mlb150_dev_init();
++
++ pdevinfo = &mlb_devinfo[minor];
++ pchinfo = &pdevinfo->channels[TX_CHANNEL];
++
++ ring_buf_size = pdevinfo->buf_size;
++ buf_size = ring_buf_size * (TRANS_RING_NODES * 2);
++ buf_addr = (void __iomem *)gen_pool_alloc(drvdata->iram_pool, buf_size);
++ if (buf_addr == NULL) {
++ ret = -ENOMEM;
++ pr_err("can not alloc rx/tx buffers: %d\n", buf_size);
++ return ret;
++ }
++ phy_addr = gen_pool_virt_to_phys(drvdata->iram_pool, (ulong)buf_addr);
++ pr_debug("IRAM Range: Virt 0x%p - 0x%p, Phys 0x%x - 0x%x, size: 0x%x\n",
++ buf_addr, (buf_addr + buf_size - 1), (u32)phy_addr,
++ (u32)(phy_addr + buf_size - 1), buf_size);
++ pdevinfo->rbuf_base_virt = buf_addr;
++ pdevinfo->rbuf_base_phy = phy_addr;
++ drvdata->iram_size = buf_size;
++
++ memset(buf_addr, 0, buf_size);
++
++ for (j = 0; j < (TRANS_RING_NODES);
++ ++j, buf_addr += ring_buf_size, phy_addr += ring_buf_size) {
++ pdevinfo->rx_rbuf.virt_bufs[j] = buf_addr;
++ pdevinfo->rx_rbuf.phy_addrs[j] = phy_addr;
++ pr_debug("RX Ringbuf[%d]: 0x%p 0x%x\n",
++ j, buf_addr, (u32)phy_addr);
++ }
++ pdevinfo->rx_rbuf.unit_size = ring_buf_size;
++ pdevinfo->rx_rbuf.total_size = buf_size;
++ for (j = 0; j < (TRANS_RING_NODES);
++ ++j, buf_addr += ring_buf_size, phy_addr += ring_buf_size) {
++ pdevinfo->tx_rbuf.virt_bufs[j] = buf_addr;
++ pdevinfo->tx_rbuf.phy_addrs[j] = phy_addr;
++ pr_debug("TX Ringbuf[%d]: 0x%p 0x%x\n",
++ j, buf_addr, (u32)phy_addr);
++ }
++
++ pdevinfo->tx_rbuf.unit_size = ring_buf_size;
++ pdevinfo->tx_rbuf.total_size = buf_size;
++
++ /* reset the buffer read/write ptr */
++ pdevinfo->rx_rbuf.head = pdevinfo->rx_rbuf.tail = 0;
++ pdevinfo->tx_rbuf.head = pdevinfo->tx_rbuf.tail = 0;
++ pdevinfo->ex_event = 0;
++ pdevinfo->tx_ok = 0;
++
++ init_waitqueue_head(&pdevinfo->rx_wq);
++ init_waitqueue_head(&pdevinfo->tx_wq);
++
++ drvdata = container_of(inode->i_cdev, struct mlb_data, cdev);
++ drvdata->devinfo = pdevinfo;
++ mxc_mlb150_irq_enable(drvdata, 1);
++ filp->private_data = drvdata;
++
++ return 0;
++}
++
++static int mxc_mlb150_release(struct inode *inode, struct file *filp)
++{
++ int minor;
++ struct mlb_data *drvdata = filp->private_data;
++ struct mlb_dev_info *pdevinfo = drvdata->devinfo;
++
++ minor = MINOR(inode->i_rdev);
++ mxc_mlb150_irq_enable(drvdata, 0);
++
++#ifdef DEBUG
++ mlb150_dev_dump_reg();
++ mlb150_dev_dump_ctr_tbl(0, pdevinfo->channels[TX_CHANNEL].cl + 1);
++#endif
++
++ gen_pool_free(drvdata->iram_pool,
++ (ulong)pdevinfo->rbuf_base_virt, drvdata->iram_size);
++
++ mlb150_dev_exit();
++
++ if (pdevinfo && atomic_read(&pdevinfo->on)
++ && (pdevinfo->fps >= CLK_2048FS))
++ clk_disable_unprepare(drvdata->clk_mlb6p);
++
++ atomic_set(&pdevinfo->on, 0);
++
++ clk_disable_unprepare(drvdata->clk_mlb3p);
++ /* decrease the open count */
++ atomic_set(&pdevinfo->opencnt, 0);
++
++ drvdata->devinfo = NULL;
++
++ return 0;
++}
++
++static long mxc_mlb150_ioctl(struct file *filp,
++ unsigned int cmd, unsigned long arg)
++{
++ struct inode *inode = filp->f_dentry->d_inode;
++ struct mlb_data *drvdata = filp->private_data;
++ struct mlb_dev_info *pdevinfo = drvdata->devinfo;
++ void __user *argp = (void __user *)arg;
++ unsigned long flags, event;
++ int minor;
++
++ minor = MINOR(inode->i_rdev);
++
++ switch (cmd) {
++ case MLB_CHAN_SETADDR:
++ {
++ unsigned int caddr;
++ /* get channel address from user space */
++ if (copy_from_user(&caddr, argp, sizeof(caddr))) {
++ pr_err("mxc_mlb150: copy from user failed\n");
++ return -EFAULT;
++ }
++ pdevinfo->channels[TX_CHANNEL].address =
++ (caddr >> 16) & 0xFFFF;
++ pdevinfo->channels[RX_CHANNEL].address = caddr & 0xFFFF;
++ pr_debug("mxc_mlb150: set ch addr, tx: %d, rx: %d\n",
++ pdevinfo->channels[TX_CHANNEL].address,
++ pdevinfo->channels[RX_CHANNEL].address);
++ break;
++ }
++
++ case MLB_CHAN_STARTUP:
++ if (atomic_read(&pdevinfo->on)) {
++ pr_debug("mxc_mlb150: channel alreadly startup\n");
++ break;
++ }
++ if (mlb_channel_enable(drvdata, minor, 1))
++ return -EFAULT;
++ break;
++ case MLB_CHAN_SHUTDOWN:
++ if (atomic_read(&pdevinfo->on) == 0) {
++ pr_debug("mxc_mlb150: channel areadly shutdown\n");
++ break;
++ }
++ mlb150_trans_complete_check(pdevinfo);
++ mlb_channel_enable(drvdata, minor, 0);
++ break;
++ case MLB_CHAN_GETEVENT:
++ /* get and clear the ex_event */
++ spin_lock_irqsave(&pdevinfo->event_lock, flags);
++ event = pdevinfo->ex_event;
++ pdevinfo->ex_event = 0;
++ spin_unlock_irqrestore(&pdevinfo->event_lock, flags);
++
++ if (event) {
++ if (copy_to_user(argp, &event, sizeof(event))) {
++ pr_err("mxc_mlb150: copy to user failed\n");
++ return -EFAULT;
++ }
++ } else
++ return -EAGAIN;
++ break;
++ case MLB_SET_ISOC_BLKSIZE_188:
++ pdevinfo->isoc_blksz = 188;
++ pdevinfo->cdt_buf_dep = pdevinfo->adt_buf_dep =
++ pdevinfo->isoc_blksz * CH_ISOC_BLK_NUM;
++ break;
++ case MLB_SET_ISOC_BLKSIZE_196:
++ pdevinfo->isoc_blksz = 196;
++ pdevinfo->cdt_buf_dep = pdevinfo->adt_buf_dep =
++ pdevinfo->isoc_blksz * CH_ISOC_BLK_NUM;
++ break;
++ case MLB_SET_SYNC_QUAD:
++ {
++ u32 quad;
++
++ if (copy_from_user(&quad, argp, sizeof(quad))) {
++ pr_err("mxc_mlb150: get quad number "
++ "from user failed\n");
++ return -EFAULT;
++ }
++ if (quad <= 0 || quad > 3) {
++ pr_err("mxc_mlb150: Invalid Quadlets!"
++ "Quadlets in Sync mode can "
++ "only be 1, 2, 3\n");
++ return -EINVAL;
++ }
++ pdevinfo->sync_quad = quad;
++ /* Each quadlets is 4 bytes */
++ pdevinfo->cdt_buf_dep = quad * 4 * 4;
++ pdevinfo->adt_buf_dep =
++ pdevinfo->cdt_buf_dep * CH_SYNC_ADT_BUF_MULTI;
++ }
++ break;
++ case MLB_SET_FPS:
++ {
++ u32 fps, c0_val;
++
++ /* get fps from user space */
++ if (copy_from_user(&fps, argp, sizeof(fps))) {
++ pr_err("mxc_mlb150: copy from user failed\n");
++ return -EFAULT;
++ }
++
++ c0_val = __raw_readl(mlb_base + REG_MLBC0);
++ c0_val &= ~MLBC0_MLBCLK_MASK;
++
++ /* check fps value */
++ switch (fps) {
++ case 256:
++ case 512:
++ case 1024:
++ pdevinfo->fps = fps >> 9;
++ c0_val &= ~MLBC0_MLBPEN;
++ c0_val |= (fps >> 9)
++ << MLBC0_MLBCLK_SHIFT;
++
++ if (1024 == fps) {
++ /*
++ * Invert output clock phase
++ * in 1024 fps
++ */
++ __raw_writel(0x1,
++ mlb_base + REG_MLBPC2);
++ }
++ break;
++ case 2048:
++ case 3072:
++ case 4096:
++ pdevinfo->fps = (fps >> 10) + 1;
++ c0_val |= ((fps >> 10) + 1)
++ << MLBC0_MLBCLK_SHIFT;
++ break;
++ case 6144:
++ pdevinfo->fps = fps >> 10;
++ c0_val |= ((fps >> 10) + 1)
++ << MLBC0_MLBCLK_SHIFT;
++ break;
++ case 8192:
++ pdevinfo->fps = (fps >> 10) - 1;
++ c0_val |= ((fps >> 10) - 1)
++ << MLBC0_MLBCLK_SHIFT;
++ break;
++ default:
++ pr_debug("mxc_mlb150: invalid fps argument: %d\n",
++ fps);
++ return -EINVAL;
++ }
++
++ __raw_writel(c0_val, mlb_base + REG_MLBC0);
++
++ pr_debug("mxc_mlb150: set fps to %d, MLBC0: 0x%08x\n",
++ fps,
++ (u32)__raw_readl(mlb_base + REG_MLBC0));
++
++ break;
++ }
++
++ case MLB_GET_VER:
++ {
++ u32 version;
++
++ /* get MLB device module version */
++ version = 0x03030003;
++
++ pr_debug("mxc_mlb150: get version: 0x%08x\n",
++ version);
++
++ if (copy_to_user(argp, &version, sizeof(version))) {
++ pr_err("mxc_mlb150: copy to user failed\n");
++ return -EFAULT;
++ }
++ break;
++ }
++
++ case MLB_SET_DEVADDR:
++ {
++ u32 c1_val;
++ u8 devaddr;
++
++ /* get MLB device address from user space */
++ if (copy_from_user
++ (&devaddr, argp, sizeof(unsigned char))) {
++ pr_err("mxc_mlb150: copy from user failed\n");
++ return -EFAULT;
++ }
++
++ c1_val = __raw_readl(mlb_base + REG_MLBC1);
++ c1_val &= ~MLBC1_NDA_MASK;
++ c1_val |= devaddr << MLBC1_NDA_SHIFT;
++ __raw_writel(c1_val, mlb_base + REG_MLBC1);
++ pr_debug("mxc_mlb150: set dev addr, dev addr: %d, "
++ "MLBC1: 0x%08x\n", devaddr,
++ (u32)__raw_readl(mlb_base + REG_MLBC1));
++
++ break;
++ }
++
++ case MLB_IRQ_DISABLE:
++ {
++ disable_irq(drvdata->irq_mlb);
++ break;
++ }
++
++ case MLB_IRQ_ENABLE:
++ {
++ enable_irq(drvdata->irq_mlb);
++ break;
++ }
++ default:
++ pr_info("mxc_mlb150: Invalid ioctl command\n");
++ return -EINVAL;
++ }
++
++ return 0;
++}
++
++/*
++ * MLB read routine
++ * Read the current received data from queued buffer,
++ * and free this buffer for hw to fill ingress data.
++ */
++static ssize_t mxc_mlb150_read(struct file *filp, char __user *buf,
++ size_t count, loff_t *f_pos)
++{
++ int size;
++ struct mlb_data *drvdata = filp->private_data;
++ struct mlb_dev_info *pdevinfo = drvdata->devinfo;
++ struct mlb_ringbuf *rx_rbuf = &pdevinfo->rx_rbuf;
++ int head, tail;
++ unsigned long flags;
++
++ read_lock_irqsave(&rx_rbuf->rb_lock, flags);
++
++ head = ACCESS_ONCE(rx_rbuf->head);
++ tail = rx_rbuf->tail;
++
++ read_unlock_irqrestore(&rx_rbuf->rb_lock, flags);
++
++ /* check the current rx buffer is available or not */
++ if (0 == CIRC_CNT(head, tail, TRANS_RING_NODES)) {
++
++ if (filp->f_flags & O_NONBLOCK)
++ return -EAGAIN;
++
++ do {
++ DEFINE_WAIT(__wait);
++
++ for (;;) {
++ prepare_to_wait(&pdevinfo->rx_wq,
++ &__wait, TASK_INTERRUPTIBLE);
++
++ read_lock_irqsave(&rx_rbuf->rb_lock, flags);
++ if (CIRC_CNT(rx_rbuf->head, rx_rbuf->tail,
++ TRANS_RING_NODES) > 0) {
++ read_unlock_irqrestore(&rx_rbuf->rb_lock,
++ flags);
++ break;
++ }
++ read_unlock_irqrestore(&rx_rbuf->rb_lock,
++ flags);
++
++ if (!signal_pending(current)) {
++ schedule();
++ continue;
++ }
++ return -ERESTARTSYS;
++ }
++ finish_wait(&pdevinfo->rx_wq, &__wait);
++ } while (0);
++ }
++
++ /* read index before reading contents at that index */
++ smp_read_barrier_depends();
++
++ size = pdevinfo->adt_buf_dep;
++ if (size > count) {
++ /* the user buffer is too small */
++ pr_warning
++ ("mxc_mlb150: received data size is bigger than "
++ "size: %d, count: %d\n", size, count);
++ return -EINVAL;
++ }
++
++ /* extract one item from the buffer */
++ if (copy_to_user(buf, rx_rbuf->virt_bufs[tail], size)) {
++ pr_err("mxc_mlb150: copy from user failed\n");
++ return -EFAULT;
++ }
++
++ /* finish reading descriptor before incrementing tail */
++ smp_mb();
++
++ write_lock_irqsave(&rx_rbuf->rb_lock, flags);
++ rx_rbuf->tail = (tail + 1) & (TRANS_RING_NODES - 1);
++ write_unlock_irqrestore(&rx_rbuf->rb_lock, flags);
++
++ *f_pos = 0;
++
++ return size;
++}
++
++/*
++ * MLB write routine
++ * Copy the user data to tx channel buffer,
++ * and prepare the channel current/next buffer ptr.
++ */
++static ssize_t mxc_mlb150_write(struct file *filp, const char __user *buf,
++ size_t count, loff_t *f_pos)
++{
++ s32 ret = 0;
++ struct mlb_channel_info *pchinfo = NULL;
++ struct mlb_data *drvdata = filp->private_data;
++ struct mlb_dev_info *pdevinfo = drvdata->devinfo;
++ struct mlb_ringbuf *tx_rbuf = &pdevinfo->tx_rbuf;
++ int head, tail;
++ unsigned long flags;
++
++ /*
++ * minor = MINOR(filp->f_dentry->d_inode->i_rdev);
++ */
++ pchinfo = &pdevinfo->channels[TX_CHANNEL];
++
++ if (count > pdevinfo->buf_size) {
++ /* too many data to write */
++ pr_warning("mxc_mlb150: overflow write data\n");
++ return -EFBIG;
++ }
++
++ *f_pos = 0;
++
++ read_lock_irqsave(&tx_rbuf->rb_lock, flags);
++
++ head = tx_rbuf->head;
++ tail = ACCESS_ONCE(tx_rbuf->tail);
++ read_unlock_irqrestore(&tx_rbuf->rb_lock, flags);
++
++ if (0 == CIRC_SPACE(head, tail, TRANS_RING_NODES)) {
++ if (filp->f_flags & O_NONBLOCK)
++ return -EAGAIN;
++ do {
++ DEFINE_WAIT(__wait);
++
++ for (;;) {
++ prepare_to_wait(&pdevinfo->tx_wq,
++ &__wait, TASK_INTERRUPTIBLE);
++
++ read_lock_irqsave(&tx_rbuf->rb_lock, flags);
++ if (CIRC_SPACE(tx_rbuf->head, tx_rbuf->tail,
++ TRANS_RING_NODES) > 0) {
++ read_unlock_irqrestore(&tx_rbuf->rb_lock,
++ flags);
++ break;
++ }
++ read_unlock_irqrestore(&tx_rbuf->rb_lock,
++ flags);
++
++ if (!signal_pending(current)) {
++ schedule();
++ continue;
++ }
++ return -ERESTARTSYS;
++ }
++ finish_wait(&pdevinfo->tx_wq, &__wait);
++ } while (0);
++ }
++
++ if (copy_from_user((void *)tx_rbuf->virt_bufs[head], buf, count)) {
++ read_unlock_irqrestore(&tx_rbuf->rb_lock, flags);
++ pr_err("mxc_mlb: copy from user failed\n");
++ ret = -EFAULT;
++ goto out;
++ }
++
++ write_lock_irqsave(&tx_rbuf->rb_lock, flags);
++ smp_wmb();
++ tx_rbuf->head = (head + 1) & (TRANS_RING_NODES - 1);
++ write_unlock_irqrestore(&tx_rbuf->rb_lock, flags);
++
++ if (0 == CIRC_CNT(head, tail, TRANS_RING_NODES)) {
++ u32 tx_buf_ptr, ahb_ch;
++ s32 adt_sts;
++ u32 ctype = pdevinfo->channel_type;
++
++ /* read index before reading contents at that index */
++ smp_read_barrier_depends();
++
++ tx_buf_ptr = tx_rbuf->phy_addrs[tail];
++
++ ahb_ch = pdevinfo->channels[TX_CHANNEL].cl;
++ adt_sts = mlb150_dev_get_adt_sts(ahb_ch);
++
++ /* Set ADT for TX */
++ mlb150_dev_pipo_next(ahb_ch, ctype, adt_sts, tx_buf_ptr);
++ }
++
++ ret = count;
++out:
++ return ret;
++}
++
++static unsigned int mxc_mlb150_poll(struct file *filp,
++ struct poll_table_struct *wait)
++{
++ int minor;
++ unsigned int ret = 0;
++ struct mlb_data *drvdata = filp->private_data;
++ struct mlb_dev_info *pdevinfo = drvdata->devinfo;
++ struct mlb_ringbuf *tx_rbuf = &pdevinfo->tx_rbuf;
++ struct mlb_ringbuf *rx_rbuf = &pdevinfo->rx_rbuf;
++ int head, tail;
++ unsigned long flags;
++
++
++ minor = MINOR(filp->f_dentry->d_inode->i_rdev);
++
++ poll_wait(filp, &pdevinfo->rx_wq, wait);
++ poll_wait(filp, &pdevinfo->tx_wq, wait);
++
++ read_lock_irqsave(&tx_rbuf->rb_lock, flags);
++ head = tx_rbuf->head;
++ tail = tx_rbuf->tail;
++ read_unlock_irqrestore(&tx_rbuf->rb_lock, flags);
++
++ /* check the tx buffer is avaiable or not */
++ if (CIRC_SPACE(head, tail, TRANS_RING_NODES) >= 1)
++ ret |= POLLOUT | POLLWRNORM;
++
++ read_lock_irqsave(&rx_rbuf->rb_lock, flags);
++ head = rx_rbuf->head;
++ tail = rx_rbuf->tail;
++ read_unlock_irqrestore(&rx_rbuf->rb_lock, flags);
++
++ /* check the rx buffer filled or not */
++ if (CIRC_CNT(head, tail, TRANS_RING_NODES) >= 1)
++ ret |= POLLIN | POLLRDNORM;
++
++
++ /* check the exception event */
++ if (pdevinfo->ex_event)
++ ret |= POLLIN | POLLRDNORM;
++
++ return ret;
++}
++
++/*
++ * char dev file operations structure
++ */
++static const struct file_operations mxc_mlb150_fops = {
++
++ .owner = THIS_MODULE,
++ .open = mxc_mlb150_open,
++ .release = mxc_mlb150_release,
++ .unlocked_ioctl = mxc_mlb150_ioctl,
++ .poll = mxc_mlb150_poll,
++ .read = mxc_mlb150_read,
++ .write = mxc_mlb150_write,
++};
++
++static struct platform_device_id imx_mlb150_devtype[] = {
++ {
++ .name = "imx6q-mlb150",
++ .driver_data = 0,
++ }, {
++ /* sentinel */
++ }
++};
++MODULE_DEVICE_TABLE(platform, imx_mlb150_devtype);
++
++static const struct of_device_id mlb150_imx_dt_ids[] = {
++ { .compatible = "fsl,imx6q-mlb150", .data = &imx_mlb150_devtype[0], },
++ { /* sentinel */ }
++};
++
++/*
++ * This function is called whenever the MLB device is detected.
++ */
++static int mxc_mlb150_probe(struct platform_device *pdev)
++{
++ int ret, mlb_major, i;
++ struct mlb_data *drvdata;
++ struct resource *res;
++ struct device_node *np = pdev->dev.of_node;
++
++ drvdata = devm_kzalloc(&pdev->dev, sizeof(struct mlb_data),
++ GFP_KERNEL);
++ if (!drvdata) {
++ dev_err(&pdev->dev, "can't allocate enough memory\n");
++ return -ENOMEM;
++ }
++
++ /*
++ * Register MLB lld as four character devices
++ */
++ ret = alloc_chrdev_region(&drvdata->firstdev, 0,
++ MLB_MINOR_DEVICES, "mxc_mlb150");
++ if (ret < 0) {
++ dev_err(&pdev->dev, "alloc region error\n");
++ goto err_reg;
++ }
++ mlb_major = MAJOR(drvdata->firstdev);
++ dev_dbg(&pdev->dev, "MLB device major: %d\n", mlb_major);
++
++ cdev_init(&drvdata->cdev, &mxc_mlb150_fops);
++ drvdata->cdev.owner = THIS_MODULE;
++
++ ret = cdev_add(&drvdata->cdev, drvdata->firstdev, MLB_MINOR_DEVICES);
++ if (ret) {
++ dev_err(&pdev->dev, "can't add cdev\n");
++ goto err_reg;
++ }
++
++ /* create class and device for udev information */
++ drvdata->class = class_create(THIS_MODULE, "mlb150");
++ if (IS_ERR(drvdata->class)) {
++ dev_err(&pdev->dev, "failed to create device class\n");
++ ret = -ENOMEM;
++ goto err_class;
++ }
++
++ for (i = 0; i < MLB_MINOR_DEVICES; i++) {
++ struct device *class_dev;
++
++ class_dev = device_create(drvdata->class, NULL,
++ MKDEV(mlb_major, i),
++ NULL, mlb_devinfo[i].dev_name);
++ if (IS_ERR(class_dev)) {
++ dev_err(&pdev->dev, "failed to create mlb150 %s"
++ " class device\n", mlb_devinfo[i].dev_name);
++ ret = -ENOMEM;
++ goto err_dev;
++ }
++ }
++
++ /* ahb0 irq */
++ drvdata->irq_ahb0 = platform_get_irq(pdev, 1);
++ if (drvdata->irq_ahb0 < 0) {
++ dev_err(&pdev->dev, "No ahb0 irq line provided\n");
++ goto err_dev;
++ }
++ dev_dbg(&pdev->dev, "ahb0_irq: %d\n", drvdata->irq_ahb0);
++ if (devm_request_irq(&pdev->dev, drvdata->irq_ahb0, mlb_ahb_isr,
++ 0, "mlb_ahb0", NULL)) {
++ dev_err(&pdev->dev, "can't claim irq %d\n", drvdata->irq_ahb0);
++ goto err_dev;
++ }
++
++ /* ahb1 irq */
++ drvdata->irq_ahb1 = platform_get_irq(pdev, 2);
++ if (drvdata->irq_ahb1 < 0) {
++ dev_err(&pdev->dev, "No ahb1 irq line provided\n");
++ goto err_dev;
++ }
++ dev_dbg(&pdev->dev, "ahb1_irq: %d\n", drvdata->irq_ahb1);
++ if (devm_request_irq(&pdev->dev, drvdata->irq_ahb1, mlb_ahb_isr,
++ 0, "mlb_ahb1", NULL)) {
++ dev_err(&pdev->dev, "can't claim irq %d\n", drvdata->irq_ahb1);
++ goto err_dev;
++ }
++
++ /* mlb irq */
++ drvdata->irq_mlb = platform_get_irq(pdev, 0);
++ if (drvdata->irq_mlb < 0) {
++ dev_err(&pdev->dev, "No mlb irq line provided\n");
++ goto err_dev;
++ }
++ dev_dbg(&pdev->dev, "mlb_irq: %d\n", drvdata->irq_mlb);
++ if (devm_request_irq(&pdev->dev, drvdata->irq_mlb, mlb_isr,
++ 0, "mlb", NULL)) {
++ dev_err(&pdev->dev, "can't claim irq %d\n", drvdata->irq_mlb);
++ goto err_dev;
++ }
++
++ /* ioremap from phy mlb to kernel space */
++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++ if (!res) {
++ dev_err(&pdev->dev, "can't get device resources\n");
++ ret = -ENOENT;
++ goto err_dev;
++ }
++ mlb_base = devm_request_and_ioremap(&pdev->dev, res);
++ dev_dbg(&pdev->dev, "mapped base address: 0x%08x\n", (u32)mlb_base);
++ if (IS_ERR(mlb_base)) {
++ dev_err(&pdev->dev,
++ "failed to get ioremap base\n");
++ ret = PTR_ERR(mlb_base);
++ goto err_dev;
++ }
++ drvdata->membase = mlb_base;
++
++#ifdef CONFIG_REGULATOR
++ drvdata->nvcc = devm_regulator_get(&pdev->dev, "reg_nvcc");
++ if (!IS_ERR(drvdata->nvcc)) {
++ regulator_set_voltage(drvdata->nvcc, 2500000, 2500000);
++ dev_err(&pdev->dev, "enalbe regulator\n");
++ ret = regulator_enable(drvdata->nvcc);
++ if (ret) {
++ dev_err(&pdev->dev, "vdd set voltage error\n");
++ goto err_dev;
++ }
++ }
++#endif
++
++ /* enable clock */
++ drvdata->clk_mlb3p = devm_clk_get(&pdev->dev, "mlb");
++ if (IS_ERR(drvdata->clk_mlb3p)) {
++ dev_err(&pdev->dev, "unable to get mlb clock\n");
++ ret = PTR_ERR(drvdata->clk_mlb3p);
++ goto err_dev;
++ }
++
++ drvdata->clk_mlb6p = devm_clk_get(&pdev->dev, "pll8_mlb");
++ if (IS_ERR(drvdata->clk_mlb6p)) {
++ dev_err(&pdev->dev, "unable to get mlb pll clock\n");
++ ret = PTR_ERR(drvdata->clk_mlb6p);
++ goto err_dev;
++ }
++
++
++ drvdata->iram_pool = of_get_named_gen_pool(np, "iram", 0);
++ if (!drvdata->iram_pool) {
++ dev_err(&pdev->dev, "iram pool not available\n");
++ ret = -ENOMEM;
++ goto err_dev;
++ }
++
++ drvdata->devinfo = NULL;
++ mxc_mlb150_irq_enable(drvdata, 0);
++ platform_set_drvdata(pdev, drvdata);
++ return 0;
++
++err_dev:
++ for (--i; i >= 0; i--)
++ device_destroy(drvdata->class, MKDEV(mlb_major, i));
++
++ class_destroy(drvdata->class);
++err_class:
++ cdev_del(&drvdata->cdev);
++err_reg:
++ unregister_chrdev_region(drvdata->firstdev, MLB_MINOR_DEVICES);
++
++ return ret;
++}
++
++static int mxc_mlb150_remove(struct platform_device *pdev)
++{
++ int i;
++ struct mlb_data *drvdata = platform_get_drvdata(pdev);
++ struct mlb_dev_info *pdevinfo = drvdata->devinfo;
++
++ if (pdevinfo && atomic_read(&pdevinfo->on)
++ && (pdevinfo->fps >= CLK_2048FS))
++ clk_disable_unprepare(drvdata->clk_mlb6p);
++
++ if (pdevinfo && atomic_read(&pdevinfo->opencnt))
++ clk_disable_unprepare(drvdata->clk_mlb3p);
++
++ /* disable mlb power */
++#ifdef CONFIG_REGULATOR
++ if (!IS_ERR(drvdata->nvcc))
++ regulator_disable(drvdata->nvcc);
++#endif
++
++ /* destroy mlb device class */
++ for (i = MLB_MINOR_DEVICES - 1; i >= 0; i--)
++ device_destroy(drvdata->class,
++ MKDEV(MAJOR(drvdata->firstdev), i));
++ class_destroy(drvdata->class);
++
++ cdev_del(&drvdata->cdev);
++
++ /* Unregister the two MLB devices */
++ unregister_chrdev_region(drvdata->firstdev, MLB_MINOR_DEVICES);
++
++ return 0;
++}
++
++#ifdef CONFIG_PM
++static int mxc_mlb150_suspend(struct platform_device *pdev, pm_message_t state)
++{
++ struct mlb_data *drvdata = platform_get_drvdata(pdev);
++ struct mlb_dev_info *pdevinfo = drvdata->devinfo;
++
++ if (pdevinfo && atomic_read(&pdevinfo->on)
++ && (pdevinfo->fps >= CLK_2048FS))
++ clk_disable_unprepare(drvdata->clk_mlb6p);
++
++ if (pdevinfo && atomic_read(&pdevinfo->opencnt)) {
++ mlb150_dev_exit();
++ clk_disable_unprepare(drvdata->clk_mlb3p);
++ }
++
++ return 0;
++}
++
++static int mxc_mlb150_resume(struct platform_device *pdev)
++{
++ struct mlb_data *drvdata = platform_get_drvdata(pdev);
++ struct mlb_dev_info *pdevinfo = drvdata->devinfo;
++
++ if (pdevinfo && atomic_read(&pdevinfo->opencnt)) {
++ clk_prepare_enable(drvdata->clk_mlb3p);
++ mlb150_dev_init();
++ }
++
++ if (pdevinfo && atomic_read(&pdevinfo->on) &&
++ (pdevinfo->fps >= CLK_2048FS))
++ clk_prepare_enable(drvdata->clk_mlb6p);
++
++ return 0;
++}
++#else
++#define mxc_mlb150_suspend NULL
++#define mxc_mlb150_resume NULL
++#endif
++
++/*
++ * platform driver structure for MLB
++ */
++static struct platform_driver mxc_mlb150_driver = {
++ .driver = {
++ .name = DRIVER_NAME,
++ .owner = THIS_MODULE,
++ .of_match_table = mlb150_imx_dt_ids,
++ },
++ .probe = mxc_mlb150_probe,
++ .remove = mxc_mlb150_remove,
++ .suspend = mxc_mlb150_suspend,
++ .resume = mxc_mlb150_resume,
++ .id_table = imx_mlb150_devtype,
++};
++
++static int __init mxc_mlb150_init(void)
++{
++ return platform_driver_register(&mxc_mlb150_driver);
++}
++
++static void __exit mxc_mlb150_exit(void)
++{
++ platform_driver_unregister(&mxc_mlb150_driver);
++}
++
++module_init(mxc_mlb150_init);
++module_exit(mxc_mlb150_exit);
++
++MODULE_AUTHOR("Freescale Semiconductor, Inc.");
++MODULE_DESCRIPTION("MLB150 low level driver");
++MODULE_LICENSE("GPL");
+diff -Nur linux-3.14.36/drivers/mxc/vpu/Kconfig linux-openelec/drivers/mxc/vpu/Kconfig
+--- linux-3.14.36/drivers/mxc/vpu/Kconfig 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/vpu/Kconfig 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,31 @@
++#
++# Codec configuration
++#
++
++menu "MXC VPU(Video Processing Unit) support"
++
++config MXC_VPU
++ tristate "Support for MXC VPU(Video Processing Unit)"
++ depends on (SOC_IMX27 || SOC_IMX5 || SOC_IMX6Q)
++ default y
++ ---help---
++ The VPU codec device provides codec function for H.264/MPEG4/H.263,
++ as well as MPEG2/VC-1/DivX on some platforms.
++
++config MXC_VPU_DEBUG
++ bool "MXC VPU debugging"
++ depends on MXC_VPU != n
++ help
++ This is an option for the developers; most people should
++ say N here. This enables MXC VPU driver debugging.
++
++config MX6_VPU_352M
++ bool "MX6 VPU 352M"
++ depends on MXC_VPU
++ default n
++ help
++ Increase VPU frequncy to 352M, the config will disable bus frequency
++ adjust dynamic, and CPU lowest setpoint will be 352Mhz.
++ This config is used for special VPU use case.
++
++endmenu
+diff -Nur linux-3.14.36/drivers/mxc/vpu/Makefile linux-openelec/drivers/mxc/vpu/Makefile
+--- linux-3.14.36/drivers/mxc/vpu/Makefile 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/vpu/Makefile 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,9 @@
++#
++# Makefile for the VPU drivers.
++#
++
++obj-$(CONFIG_MXC_VPU) += mxc_vpu.o
++
++ifeq ($(CONFIG_MXC_VPU_DEBUG),y)
++EXTRA_CFLAGS += -DDEBUG
++endif
+diff -Nur linux-3.14.36/drivers/mxc/vpu/mxc_vpu.c linux-openelec/drivers/mxc/vpu/mxc_vpu.c
+--- linux-3.14.36/drivers/mxc/vpu/mxc_vpu.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/mxc/vpu/mxc_vpu.c 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,1342 @@
++/*
++ * Copyright 2006-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/*!
++ * @file mxc_vpu.c
++ *
++ * @brief VPU system initialization and file operation implementation
++ *
++ * @ingroup VPU
++ */
++
++#include <linux/kernel.h>
++#include <linux/mm.h>
++#include <linux/interrupt.h>
++#include <linux/ioport.h>
++#include <linux/stat.h>
++#include <linux/platform_device.h>
++#include <linux/kdev_t.h>
++#include <linux/dma-mapping.h>
++#include <linux/wait.h>
++#include <linux/list.h>
++#include <linux/clk.h>
++#include <linux/delay.h>
++#include <linux/fsl_devices.h>
++#include <linux/uaccess.h>
++#include <linux/io.h>
++#include <linux/slab.h>
++#include <linux/workqueue.h>
++#include <linux/sched.h>
++#include <linux/vmalloc.h>
++#include <linux/regulator/consumer.h>
++#include <linux/page-flags.h>
++#include <linux/mm_types.h>
++#include <linux/types.h>
++#include <linux/memblock.h>
++#include <linux/memory.h>
++#include <linux/version.h>
++#include <asm/page.h>
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
++#include <linux/module.h>
++#include <linux/pm_runtime.h>
++#include <linux/sizes.h>
++#endif
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0)
++#include <linux/iram_alloc.h>
++#include <mach/clock.h>
++#include <mach/hardware.h>
++#include <mach/mxc_vpu.h>
++#endif
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)
++#include <linux/busfreq-imx6.h>
++#include <linux/clk.h>
++#include <linux/genalloc.h>
++#include <linux/mxc_vpu.h>
++#include <linux/of.h>
++#include <linux/reset.h>
++#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
++#include <mach/busfreq.h>
++#include <mach/common.h>
++#else
++#include <asm/sizes.h>
++#endif
++
++/* Define one new pgprot which combined uncached and XN(never executable) */
++#define pgprot_noncachedxn(prot) \
++ __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED | L_PTE_XN)
++
++struct vpu_priv {
++ struct fasync_struct *async_queue;
++ struct work_struct work;
++ struct workqueue_struct *workqueue;
++ struct mutex lock;
++};
++
++/* To track the allocated memory buffer */
++struct memalloc_record {
++ struct list_head list;
++ struct vpu_mem_desc mem;
++};
++
++struct iram_setting {
++ u32 start;
++ u32 end;
++};
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)
++static struct gen_pool *iram_pool;
++static u32 iram_base;
++#endif
++
++static LIST_HEAD(head);
++
++static int vpu_major;
++static int vpu_clk_usercount;
++static struct class *vpu_class;
++static struct vpu_priv vpu_data;
++static u8 open_count;
++static struct clk *vpu_clk;
++static struct vpu_mem_desc bitwork_mem = { 0 };
++static struct vpu_mem_desc pic_para_mem = { 0 };
++static struct vpu_mem_desc user_data_mem = { 0 };
++static struct vpu_mem_desc share_mem = { 0 };
++static struct vpu_mem_desc vshare_mem = { 0 };
++
++static void __iomem *vpu_base;
++static int vpu_ipi_irq;
++static u32 phy_vpu_base_addr;
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
++static phys_addr_t top_address_DRAM;
++static struct mxc_vpu_platform_data *vpu_plat;
++#endif
++
++static struct device *vpu_dev;
++
++/* IRAM setting */
++static struct iram_setting iram;
++
++/* implement the blocking ioctl */
++static int irq_status;
++static int codec_done;
++static wait_queue_head_t vpu_queue;
++
++#ifdef CONFIG_SOC_IMX6Q
++#define MXC_VPU_HAS_JPU
++#endif
++
++#ifdef MXC_VPU_HAS_JPU
++static int vpu_jpu_irq;
++#endif
++
++#ifdef CONFIG_PM
++static unsigned int regBk[64];
++static unsigned int pc_before_suspend;
++#endif
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0) || LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)
++static struct regulator *vpu_regulator;
++#endif
++static atomic_t clk_cnt_from_ioc = ATOMIC_INIT(0);
++
++#define READ_REG(x) readl_relaxed(vpu_base + x)
++#define WRITE_REG(val, x) writel_relaxed(val, vpu_base + x)
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
++/* redirect to static functions */
++static int cpu_is_mx6dl(void)
++{
++ int ret;
++ ret = of_machine_is_compatible("fsl,imx6dl");
++ return ret;
++}
++
++static int cpu_is_mx6q(void)
++{
++ int ret;
++ ret = of_machine_is_compatible("fsl,imx6q");
++ return ret;
++}
++#endif
++
++static void vpu_reset(void)
++{
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)
++ device_reset(vpu_dev);
++#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
++ imx_src_reset_vpu();
++#else
++ if (vpu_plat->reset)
++ vpu_plat->reset();
++#endif
++}
++
++static long vpu_power_get(bool on)
++{
++ long ret = 0;
++
++ if (on) {
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
++ vpu_regulator = regulator_get(NULL, "cpu_vddvpu");
++ ret = IS_ERR(vpu_regulator);
++#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)
++ vpu_regulator = devm_regulator_get(vpu_dev, "pu");
++ ret = IS_ERR(vpu_regulator);
++#endif
++ } else {
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
++ if (!IS_ERR(vpu_regulator))
++ regulator_put(vpu_regulator);
++#endif
++ }
++ return ret;
++}
++
++static void vpu_power_up(bool on)
++{
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0) || LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)
++ int ret = 0;
++
++ if (on) {
++ if (!IS_ERR(vpu_regulator)) {
++ ret = regulator_enable(vpu_regulator);
++ if (ret)
++ dev_err(vpu_dev, "failed to power up vpu\n");
++ }
++ } else {
++ if (!IS_ERR(vpu_regulator)) {
++ ret = regulator_disable(vpu_regulator);
++ if (ret)
++ dev_err(vpu_dev, "failed to power down vpu\n");
++ }
++ }
++#else
++ imx_gpc_power_up_pu(on);
++#endif
++}
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)
++static int cpu_is_mx53(void)
++{
++ return 0;
++}
++
++static int cpu_is_mx51(void)
++{
++ return 0;
++}
++
++#define VM_RESERVED 0
++#endif
++
++/*!
++ * Private function to alloc dma buffer
++ * @return status 0 success.
++ */
++static int vpu_alloc_dma_buffer(struct vpu_mem_desc *mem)
++{
++ mem->cpu_addr = (unsigned long)
++ dma_alloc_coherent(NULL, PAGE_ALIGN(mem->size),
++ (dma_addr_t *) (&mem->phy_addr),
++ GFP_DMA | GFP_KERNEL);
++ dev_dbg(vpu_dev, "[ALLOC] mem alloc cpu_addr = 0x%x\n", mem->cpu_addr);
++ if ((void *)(mem->cpu_addr) == NULL) {
++ dev_err(vpu_dev, "Physical memory allocation error!\n");
++ return -1;
++ }
++ return 0;
++}
++
++/*!
++ * Private function to free dma buffer
++ */
++static void vpu_free_dma_buffer(struct vpu_mem_desc *mem)
++{
++ if (mem->cpu_addr != 0) {
++ dma_free_coherent(0, PAGE_ALIGN(mem->size),
++ (void *)mem->cpu_addr, mem->phy_addr);
++ }
++}
++
++/*!
++ * Private function to free buffers
++ * @return status 0 success.
++ */
++static int vpu_free_buffers(void)
++{
++ struct memalloc_record *rec, *n;
++ struct vpu_mem_desc mem;
++
++ list_for_each_entry_safe(rec, n, &head, list) {
++ mem = rec->mem;
++ if (mem.cpu_addr != 0) {
++ vpu_free_dma_buffer(&mem);
++ dev_dbg(vpu_dev, "[FREE] freed paddr=0x%08X\n", mem.phy_addr);
++ /* delete from list */
++ list_del(&rec->list);
++ kfree(rec);
++ }
++ }
++
++ return 0;
++}
++
++static inline void vpu_worker_callback(struct work_struct *w)
++{
++ struct vpu_priv *dev = container_of(w, struct vpu_priv,
++ work);
++
++ if (dev->async_queue)
++ kill_fasync(&dev->async_queue, SIGIO, POLL_IN);
++
++ irq_status = 1;
++ /*
++ * Clock is gated on when dec/enc started, gate it off when
++ * codec is done.
++ */
++ if (codec_done)
++ codec_done = 0;
++
++ wake_up_interruptible(&vpu_queue);
++}
++
++/*!
++ * @brief vpu interrupt handler
++ */
++static irqreturn_t vpu_ipi_irq_handler(int irq, void *dev_id)
++{
++ struct vpu_priv *dev = dev_id;
++ unsigned long reg;
++
++ reg = READ_REG(BIT_INT_REASON);
++ if (reg & 0x8)
++ codec_done = 1;
++ WRITE_REG(0x1, BIT_INT_CLEAR);
++
++ queue_work(dev->workqueue, &dev->work);
++
++ return IRQ_HANDLED;
++}
++
++/*!
++ * @brief vpu jpu interrupt handler
++ */
++#ifdef MXC_VPU_HAS_JPU
++static irqreturn_t vpu_jpu_irq_handler(int irq, void *dev_id)
++{
++ struct vpu_priv *dev = dev_id;
++ unsigned long reg;
++
++ reg = READ_REG(MJPEG_PIC_STATUS_REG);
++ if (reg & 0x3)
++ codec_done = 1;
++
++ queue_work(dev->workqueue, &dev->work);
++
++ return IRQ_HANDLED;
++}
++#endif
++
++/*!
++ * @brief check phy memory prepare to pass to vpu is valid or not, we
++ * already address some issue that if pass a wrong address to vpu
++ * (like virtual address), system will hang.
++ *
++ * @return true return is a valid phy memory address, false return not.
++ */
++bool vpu_is_valid_phy_memory(u32 paddr)
++{
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
++ if (paddr > top_address_DRAM)
++ return false;
++#endif
++
++ return true;
++}
++
++/*!
++ * @brief open function for vpu file operation
++ *
++ * @return 0 on success or negative error code on error
++ */
++static int vpu_open(struct inode *inode, struct file *filp)
++{
++
++ mutex_lock(&vpu_data.lock);
++
++ if (open_count++ == 0) {
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
++ pm_runtime_get_sync(vpu_dev);
++#endif
++ vpu_power_up(true);
++
++#ifdef CONFIG_SOC_IMX6Q
++ clk_prepare(vpu_clk);
++ clk_enable(vpu_clk);
++ if (READ_REG(BIT_CUR_PC))
++ dev_dbg(vpu_dev, "Not power off before vpu open!\n");
++ clk_disable(vpu_clk);
++ clk_unprepare(vpu_clk);
++#endif
++ }
++
++ filp->private_data = (void *)(&vpu_data);
++ mutex_unlock(&vpu_data.lock);
++ return 0;
++}
++
++/*!
++ * @brief IO ctrl function for vpu file operation
++ * @param cmd IO ctrl command
++ * @return 0 on success or negative error code on error
++ */
++static long vpu_ioctl(struct file *filp, u_int cmd,
++ u_long arg)
++{
++ int ret = 0;
++
++ switch (cmd) {
++ case VPU_IOC_PHYMEM_ALLOC:
++ {
++ struct memalloc_record *rec;
++
++ rec = kzalloc(sizeof(*rec), GFP_KERNEL);
++ if (!rec)
++ return -ENOMEM;
++
++ ret = copy_from_user(&(rec->mem),
++ (struct vpu_mem_desc *)arg,
++ sizeof(struct vpu_mem_desc));
++ if (ret) {
++ kfree(rec);
++ return -EFAULT;
++ }
++
++ dev_dbg(vpu_dev, "[ALLOC] mem alloc size = 0x%x\n",
++ rec->mem.size);
++
++ ret = vpu_alloc_dma_buffer(&(rec->mem));
++ if (ret == -1) {
++ kfree(rec);
++ dev_err(vpu_dev,
++ "Physical memory allocation error!\n");
++ break;
++ }
++ ret = copy_to_user((void __user *)arg, &(rec->mem),
++ sizeof(struct vpu_mem_desc));
++ if (ret) {
++ kfree(rec);
++ ret = -EFAULT;
++ break;
++ }
++
++ mutex_lock(&vpu_data.lock);
++ list_add(&rec->list, &head);
++ mutex_unlock(&vpu_data.lock);
++
++ break;
++ }
++ case VPU_IOC_PHYMEM_FREE:
++ {
++ struct memalloc_record *rec, *n;
++ struct vpu_mem_desc vpu_mem;
++
++ ret = copy_from_user(&vpu_mem,
++ (struct vpu_mem_desc *)arg,
++ sizeof(struct vpu_mem_desc));
++ if (ret)
++ return -EACCES;
++
++ dev_dbg(vpu_dev, "[FREE] mem freed cpu_addr = 0x%x\n",
++ vpu_mem.cpu_addr);
++ if ((void *)vpu_mem.cpu_addr != NULL)
++ vpu_free_dma_buffer(&vpu_mem);
++
++ mutex_lock(&vpu_data.lock);
++ list_for_each_entry_safe(rec, n, &head, list) {
++ if (rec->mem.cpu_addr == vpu_mem.cpu_addr) {
++ /* delete from list */
++ list_del(&rec->list);
++ kfree(rec);
++ break;
++ }
++ }
++ mutex_unlock(&vpu_data.lock);
++
++ break;
++ }
++ case VPU_IOC_WAIT4INT:
++ {
++ u_long timeout = (u_long) arg;
++ if (!wait_event_interruptible_timeout
++ (vpu_queue, irq_status != 0,
++ msecs_to_jiffies(timeout))) {
++ dev_warn(vpu_dev, "VPU blocking: timeout.\n");
++ ret = -ETIME;
++ } else if (signal_pending(current)) {
++ dev_warn(vpu_dev, "VPU interrupt received.\n");
++ ret = -ERESTARTSYS;
++ } else
++ irq_status = 0;
++ break;
++ }
++ case VPU_IOC_IRAM_SETTING:
++ {
++ ret = copy_to_user((void __user *)arg, &iram,
++ sizeof(struct iram_setting));
++ if (ret)
++ ret = -EFAULT;
++
++ break;
++ }
++ case VPU_IOC_CLKGATE_SETTING:
++ {
++ u32 clkgate_en;
++
++ if (get_user(clkgate_en, (u32 __user *) arg))
++ return -EFAULT;
++
++ if (clkgate_en) {
++ clk_prepare(vpu_clk);
++ clk_enable(vpu_clk);
++ atomic_inc(&clk_cnt_from_ioc);
++ } else {
++ clk_disable(vpu_clk);
++ clk_unprepare(vpu_clk);
++ atomic_dec(&clk_cnt_from_ioc);
++ }
++
++ break;
++ }
++ case VPU_IOC_GET_SHARE_MEM:
++ {
++ mutex_lock(&vpu_data.lock);
++ if (share_mem.cpu_addr != 0) {
++ ret = copy_to_user((void __user *)arg,
++ &share_mem,
++ sizeof(struct vpu_mem_desc));
++ mutex_unlock(&vpu_data.lock);
++ break;
++ } else {
++ if (copy_from_user(&share_mem,
++ (struct vpu_mem_desc *)arg,
++ sizeof(struct vpu_mem_desc))) {
++ mutex_unlock(&vpu_data.lock);
++ return -EFAULT;
++ }
++ if (vpu_alloc_dma_buffer(&share_mem) == -1)
++ ret = -EFAULT;
++ else {
++ if (copy_to_user((void __user *)arg,
++ &share_mem,
++ sizeof(struct
++ vpu_mem_desc)))
++ ret = -EFAULT;
++ }
++ }
++ mutex_unlock(&vpu_data.lock);
++ break;
++ }
++ case VPU_IOC_REQ_VSHARE_MEM:
++ {
++ mutex_lock(&vpu_data.lock);
++ if (vshare_mem.cpu_addr != 0) {
++ ret = copy_to_user((void __user *)arg,
++ &vshare_mem,
++ sizeof(struct vpu_mem_desc));
++ mutex_unlock(&vpu_data.lock);
++ break;
++ } else {
++ if (copy_from_user(&vshare_mem,
++ (struct vpu_mem_desc *)arg,
++ sizeof(struct
++ vpu_mem_desc))) {
++ mutex_unlock(&vpu_data.lock);
++ return -EFAULT;
++ }
++ /* vmalloc shared memory if not allocated */
++ if (!vshare_mem.cpu_addr)
++ vshare_mem.cpu_addr =
++ (unsigned long)
++ vmalloc_user(vshare_mem.size);
++ if (copy_to_user
++ ((void __user *)arg, &vshare_mem,
++ sizeof(struct vpu_mem_desc)))
++ ret = -EFAULT;
++ }
++ mutex_unlock(&vpu_data.lock);
++ break;
++ }
++ case VPU_IOC_GET_WORK_ADDR:
++ {
++ if (bitwork_mem.cpu_addr != 0) {
++ ret =
++ copy_to_user((void __user *)arg,
++ &bitwork_mem,
++ sizeof(struct vpu_mem_desc));
++ break;
++ } else {
++ if (copy_from_user(&bitwork_mem,
++ (struct vpu_mem_desc *)arg,
++ sizeof(struct vpu_mem_desc)))
++ return -EFAULT;
++
++ if (vpu_alloc_dma_buffer(&bitwork_mem) == -1)
++ ret = -EFAULT;
++ else if (copy_to_user((void __user *)arg,
++ &bitwork_mem,
++ sizeof(struct
++ vpu_mem_desc)))
++ ret = -EFAULT;
++ }
++ break;
++ }
++ /*
++ * The following two ioctl is used when user allocates working buffer
++ * and register it to vpu driver.
++ */
++ case VPU_IOC_QUERY_BITWORK_MEM:
++ {
++ if (copy_to_user((void __user *)arg,
++ &bitwork_mem,
++ sizeof(struct vpu_mem_desc)))
++ ret = -EFAULT;
++ break;
++ }
++ case VPU_IOC_SET_BITWORK_MEM:
++ {
++ if (copy_from_user(&bitwork_mem,
++ (struct vpu_mem_desc *)arg,
++ sizeof(struct vpu_mem_desc)))
++ ret = -EFAULT;
++ break;
++ }
++ case VPU_IOC_SYS_SW_RESET:
++ {
++ vpu_reset();
++ break;
++ }
++ case VPU_IOC_REG_DUMP:
++ break;
++ case VPU_IOC_PHYMEM_DUMP:
++ break;
++ case VPU_IOC_PHYMEM_CHECK:
++ {
++ struct vpu_mem_desc check_memory;
++ ret = copy_from_user(&check_memory,
++ (void __user *)arg,
++ sizeof(struct vpu_mem_desc));
++ if (ret != 0) {
++ dev_err(vpu_dev, "copy from user failure:%d\n", ret);
++ ret = -EFAULT;
++ break;
++ }
++ ret = vpu_is_valid_phy_memory((u32)check_memory.phy_addr);
++
++ dev_dbg(vpu_dev, "vpu: memory phy:0x%x %s phy memory\n",
++ check_memory.phy_addr, (ret ? "is" : "isn't"));
++ /* borrow .size to pass back the result. */
++ check_memory.size = ret;
++ ret = copy_to_user((void __user *)arg, &check_memory,
++ sizeof(struct vpu_mem_desc));
++ if (ret) {
++ ret = -EFAULT;
++ break;
++ }
++ break;
++ }
++ case VPU_IOC_LOCK_DEV:
++ {
++ u32 lock_en;
++
++ if (get_user(lock_en, (u32 __user *) arg))
++ return -EFAULT;
++
++ if (lock_en)
++ mutex_lock(&vpu_data.lock);
++ else
++ mutex_unlock(&vpu_data.lock);
++
++ break;
++ }
++ default:
++ {
++ dev_err(vpu_dev, "No such IOCTL, cmd is %d\n", cmd);
++ ret = -EINVAL;
++ break;
++ }
++ }
++ return ret;
++}
++
++/*!
++ * @brief Release function for vpu file operation
++ * @return 0 on success or negative error code on error
++ */
++static int vpu_release(struct inode *inode, struct file *filp)
++{
++ int i;
++ unsigned long timeout;
++
++ mutex_lock(&vpu_data.lock);
++
++ if (open_count > 0 && !(--open_count)) {
++
++ /* Wait for vpu go to idle state */
++ clk_prepare(vpu_clk);
++ clk_enable(vpu_clk);
++ if (READ_REG(BIT_CUR_PC)) {
++
++ timeout = jiffies + HZ;
++ while (READ_REG(BIT_BUSY_FLAG)) {
++ msleep(1);
++ if (time_after(jiffies, timeout)) {
++ dev_warn(vpu_dev, "VPU timeout during release\n");
++ break;
++ }
++ }
++ clk_disable(vpu_clk);
++ clk_unprepare(vpu_clk);
++
++ /* Clean up interrupt */
++ cancel_work_sync(&vpu_data.work);
++ flush_workqueue(vpu_data.workqueue);
++ irq_status = 0;
++
++ clk_prepare(vpu_clk);
++ clk_enable(vpu_clk);
++ if (READ_REG(BIT_BUSY_FLAG)) {
++
++ if (cpu_is_mx51() || cpu_is_mx53()) {
++ dev_err(vpu_dev,
++ "fatal error: can't gate/power off when VPU is busy\n");
++ clk_disable(vpu_clk);
++ clk_unprepare(vpu_clk);
++ mutex_unlock(&vpu_data.lock);
++ return -EFAULT;
++ }
++
++#ifdef CONFIG_SOC_IMX6Q
++ if (cpu_is_mx6dl() || cpu_is_mx6q()) {
++ WRITE_REG(0x11, 0x10F0);
++ timeout = jiffies + HZ;
++ while (READ_REG(0x10F4) != 0x77) {
++ msleep(1);
++ if (time_after(jiffies, timeout))
++ break;
++ }
++
++ if (READ_REG(0x10F4) != 0x77) {
++ dev_err(vpu_dev,
++ "fatal error: can't gate/power off when VPU is busy\n");
++ WRITE_REG(0x0, 0x10F0);
++ clk_disable(vpu_clk);
++ clk_unprepare(vpu_clk);
++ mutex_unlock(&vpu_data.lock);
++ return -EFAULT;
++ } else
++ vpu_reset();
++ }
++#endif
++ }
++ }
++ clk_disable(vpu_clk);
++ clk_unprepare(vpu_clk);
++
++ vpu_free_buffers();
++
++ /* Free shared memory when vpu device is idle */
++ vpu_free_dma_buffer(&share_mem);
++ share_mem.cpu_addr = 0;
++ vfree((void *)vshare_mem.cpu_addr);
++ vshare_mem.cpu_addr = 0;
++
++ vpu_clk_usercount = atomic_read(&clk_cnt_from_ioc);
++ for (i = 0; i < vpu_clk_usercount; i++) {
++ clk_disable(vpu_clk);
++ clk_unprepare(vpu_clk);
++ atomic_dec(&clk_cnt_from_ioc);
++ }
++
++ vpu_power_up(false);
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
++ pm_runtime_put_sync_suspend(vpu_dev);
++#endif
++
++ }
++ mutex_unlock(&vpu_data.lock);
++
++ return 0;
++}
++
++/*!
++ * @brief fasync function for vpu file operation
++ * @return 0 on success or negative error code on error
++ */
++static int vpu_fasync(int fd, struct file *filp, int mode)
++{
++ struct vpu_priv *dev = (struct vpu_priv *)filp->private_data;
++ return fasync_helper(fd, filp, mode, &dev->async_queue);
++}
++
++/*!
++ * @brief memory map function of harware registers for vpu file operation
++ * @return 0 on success or negative error code on error
++ */
++static int vpu_map_hwregs(struct file *fp, struct vm_area_struct *vm)
++{
++ unsigned long pfn;
++
++ vm->vm_flags |= VM_IO | VM_RESERVED;
++ /*
++ * Since vpu registers have been mapped with ioremap() at probe
++ * which L_PTE_XN is 1, and the same physical address must be
++ * mapped multiple times with same type, so set L_PTE_XN to 1 here.
++ * Otherwise, there may be unexpected result in video codec.
++ */
++ vm->vm_page_prot = pgprot_noncachedxn(vm->vm_page_prot);
++ pfn = phy_vpu_base_addr >> PAGE_SHIFT;
++ dev_dbg(vpu_dev, "size=0x%x, page no.=0x%x\n",
++ (int)(vm->vm_end - vm->vm_start), (int)pfn);
++ return remap_pfn_range(vm, vm->vm_start, pfn, vm->vm_end - vm->vm_start,
++ vm->vm_page_prot) ? -EAGAIN : 0;
++}
++
++/*!
++ * @brief memory map function of memory for vpu file operation
++ * @return 0 on success or negative error code on error
++ */
++static int vpu_map_dma_mem(struct file *fp, struct vm_area_struct *vm)
++{
++ int request_size;
++ request_size = vm->vm_end - vm->vm_start;
++
++ dev_dbg(vpu_dev, "start=0x%x, pgoff=0x%x, size=0x%x\n",
++ (unsigned int)(vm->vm_start), (unsigned int)(vm->vm_pgoff),
++ request_size);
++
++ vm->vm_flags |= VM_IO | VM_RESERVED;
++ vm->vm_page_prot = pgprot_writecombine(vm->vm_page_prot);
++
++ return remap_pfn_range(vm, vm->vm_start, vm->vm_pgoff,
++ request_size, vm->vm_page_prot) ? -EAGAIN : 0;
++
++}
++
++/* !
++ * @brief memory map function of vmalloced share memory
++ * @return 0 on success or negative error code on error
++ */
++static int vpu_map_vshare_mem(struct file *fp, struct vm_area_struct *vm)
++{
++ int ret = -EINVAL;
++
++ ret = remap_vmalloc_range(vm, (void *)(vm->vm_pgoff << PAGE_SHIFT), 0);
++ vm->vm_flags |= VM_IO;
++
++ return ret;
++}
++/*!
++ * @brief memory map interface for vpu file operation
++ * @return 0 on success or negative error code on error
++ */
++static int vpu_mmap(struct file *fp, struct vm_area_struct *vm)
++{
++ unsigned long offset;
++
++ offset = vshare_mem.cpu_addr >> PAGE_SHIFT;
++
++ if (vm->vm_pgoff && (vm->vm_pgoff == offset))
++ return vpu_map_vshare_mem(fp, vm);
++ else if (vm->vm_pgoff)
++ return vpu_map_dma_mem(fp, vm);
++ else
++ return vpu_map_hwregs(fp, vm);
++}
++
++const struct file_operations vpu_fops = {
++ .owner = THIS_MODULE,
++ .open = vpu_open,
++ .unlocked_ioctl = vpu_ioctl,
++ .release = vpu_release,
++ .fasync = vpu_fasync,
++ .mmap = vpu_mmap,
++};
++
++/*!
++ * This function is called by the driver framework to initialize the vpu device.
++ * @param dev The device structure for the vpu passed in by the framework.
++ * @return 0 on success or negative error code on error
++ */
++static int vpu_dev_probe(struct platform_device *pdev)
++{
++ int err = 0;
++ struct device *temp_class;
++ struct resource *res;
++ unsigned long addr = 0;
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
++ struct device_node *np = pdev->dev.of_node;
++ u32 iramsize;
++
++ err = of_property_read_u32(np, "iramsize", (u32 *)&iramsize);
++ if (!err && iramsize)
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)
++ {
++ iram_pool = of_get_named_gen_pool(np, "iram", 0);
++ if (!iram_pool) {
++ dev_err(&pdev->dev, "iram pool not available\n");
++ return -ENOMEM;
++ }
++
++ iram_base = gen_pool_alloc(iram_pool, iramsize);
++ if (!iram_base) {
++ dev_err(&pdev->dev, "unable to alloc iram\n");
++ return -ENOMEM;
++ }
++
++ addr = gen_pool_virt_to_phys(iram_pool, iram_base);
++ }
++#else
++ iram_alloc(iramsize, &addr);
++#endif
++ if (addr == 0)
++ iram.start = iram.end = 0;
++ else {
++ iram.start = addr;
++ iram.end = addr + iramsize - 1;
++ }
++#else
++
++ vpu_plat = pdev->dev.platform_data;
++
++ if (vpu_plat && vpu_plat->iram_enable && vpu_plat->iram_size)
++ iram_alloc(vpu_plat->iram_size, &addr);
++ if (addr == 0)
++ iram.start = iram.end = 0;
++ else {
++ iram.start = addr;
++ iram.end = addr + vpu_plat->iram_size - 1;
++ }
++#endif
++
++ vpu_dev = &pdev->dev;
++
++ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vpu_regs");
++ if (!res) {
++ dev_err(vpu_dev, "vpu: unable to get vpu base addr\n");
++ return -ENODEV;
++ }
++ phy_vpu_base_addr = res->start;
++ vpu_base = ioremap(res->start, res->end - res->start);
++
++ vpu_major = register_chrdev(vpu_major, "mxc_vpu", &vpu_fops);
++ if (vpu_major < 0) {
++ dev_err(vpu_dev, "vpu: unable to get a major for VPU\n");
++ err = -EBUSY;
++ goto error;
++ }
++
++ vpu_class = class_create(THIS_MODULE, "mxc_vpu");
++ if (IS_ERR(vpu_class)) {
++ err = PTR_ERR(vpu_class);
++ goto err_out_chrdev;
++ }
++
++ temp_class = device_create(vpu_class, NULL, MKDEV(vpu_major, 0),
++ NULL, "mxc_vpu");
++ if (IS_ERR(temp_class)) {
++ err = PTR_ERR(temp_class);
++ goto err_out_class;
++ }
++
++ vpu_clk = clk_get(&pdev->dev, "vpu_clk");
++ if (IS_ERR(vpu_clk)) {
++ err = -ENOENT;
++ goto err_out_class;
++ }
++
++ vpu_ipi_irq = platform_get_irq_byname(pdev, "vpu_ipi_irq");
++ if (vpu_ipi_irq < 0) {
++ dev_err(vpu_dev, "vpu: unable to get vpu interrupt\n");
++ err = -ENXIO;
++ goto err_out_class;
++ }
++ err = request_irq(vpu_ipi_irq, vpu_ipi_irq_handler, 0, "VPU_CODEC_IRQ",
++ (void *)(&vpu_data));
++ if (err)
++ goto err_out_class;
++ if (vpu_power_get(true)) {
++ if (!(cpu_is_mx51() || cpu_is_mx53())) {
++ dev_err(vpu_dev, "failed to get vpu power\n");
++ goto err_out_class;
++ } else {
++ /* regulator_get will return error on MX5x,
++ * just igore it everywhere*/
++ dev_warn(vpu_dev, "failed to get vpu power\n");
++ }
++ }
++
++#ifdef MXC_VPU_HAS_JPU
++ vpu_jpu_irq = platform_get_irq_byname(pdev, "vpu_jpu_irq");
++ if (vpu_jpu_irq < 0) {
++ dev_err(vpu_dev, "vpu: unable to get vpu jpu interrupt\n");
++ err = -ENXIO;
++ free_irq(vpu_ipi_irq, &vpu_data);
++ goto err_out_class;
++ }
++ err = request_irq(vpu_jpu_irq, vpu_jpu_irq_handler, IRQF_TRIGGER_RISING,
++ "VPU_JPG_IRQ", (void *)(&vpu_data));
++ if (err) {
++ free_irq(vpu_ipi_irq, &vpu_data);
++ goto err_out_class;
++ }
++#endif
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
++ pm_runtime_enable(&pdev->dev);
++#endif
++
++ vpu_data.workqueue = create_workqueue("vpu_wq");
++ INIT_WORK(&vpu_data.work, vpu_worker_callback);
++ mutex_init(&vpu_data.lock);
++ dev_info(vpu_dev, "VPU initialized\n");
++ goto out;
++
++err_out_class:
++ device_destroy(vpu_class, MKDEV(vpu_major, 0));
++ class_destroy(vpu_class);
++err_out_chrdev:
++ unregister_chrdev(vpu_major, "mxc_vpu");
++error:
++ iounmap(vpu_base);
++out:
++ return err;
++}
++
++static int vpu_dev_remove(struct platform_device *pdev)
++{
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
++ pm_runtime_disable(&pdev->dev);
++#endif
++ free_irq(vpu_ipi_irq, &vpu_data);
++#ifdef MXC_VPU_HAS_JPU
++ free_irq(vpu_jpu_irq, &vpu_data);
++#endif
++ cancel_work_sync(&vpu_data.work);
++ flush_workqueue(vpu_data.workqueue);
++ destroy_workqueue(vpu_data.workqueue);
++
++ iounmap(vpu_base);
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
++ if (iram.start)
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)
++ gen_pool_free(iram_pool, iram_base, iram.end-iram.start+1);
++#else
++ iram_free(iram.start, iram.end-iram.start+1);
++#endif
++#else
++ if (vpu_plat && vpu_plat->iram_enable && vpu_plat->iram_size)
++ iram_free(iram.start, vpu_plat->iram_size);
++#endif
++
++ vpu_power_get(false);
++ return 0;
++}
++
++#ifdef CONFIG_PM
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
++static int vpu_suspend(struct device *dev)
++#else
++static int vpu_suspend(struct platform_device *pdev, pm_message_t state)
++#endif
++{
++ int i;
++ unsigned long timeout;
++
++ mutex_lock(&vpu_data.lock);
++ if (open_count == 0) {
++ /* VPU is released (all instances are freed),
++ * clock is already off, context is no longer needed,
++ * power is already off on MX6,
++ * gate power on MX51 */
++ if (cpu_is_mx51()) {
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
++ if (vpu_plat->pg)
++ vpu_plat->pg(1);
++#endif
++ }
++ } else {
++ /* Wait for vpu go to idle state, suspect vpu cannot be changed
++ to idle state after about 1 sec */
++ timeout = jiffies + HZ;
++ clk_prepare(vpu_clk);
++ clk_enable(vpu_clk);
++ while (READ_REG(BIT_BUSY_FLAG)) {
++ msleep(1);
++ if (time_after(jiffies, timeout)) {
++ clk_disable(vpu_clk);
++ clk_unprepare(vpu_clk);
++ mutex_unlock(&vpu_data.lock);
++ return -EAGAIN;
++ }
++ }
++ clk_disable(vpu_clk);
++ clk_unprepare(vpu_clk);
++
++ /* Make sure clock is disabled before suspend */
++ vpu_clk_usercount = atomic_read(&clk_cnt_from_ioc);
++ for (i = 0; i < vpu_clk_usercount; i++) {
++ clk_disable(vpu_clk);
++ clk_unprepare(vpu_clk);
++ }
++
++ if (cpu_is_mx53()) {
++ mutex_unlock(&vpu_data.lock);
++ return 0;
++ }
++
++ if (bitwork_mem.cpu_addr != 0) {
++ clk_prepare(vpu_clk);
++ clk_enable(vpu_clk);
++ /* Save 64 registers from BIT_CODE_BUF_ADDR */
++ for (i = 0; i < 64; i++)
++ regBk[i] = READ_REG(BIT_CODE_BUF_ADDR + (i * 4));
++ pc_before_suspend = READ_REG(BIT_CUR_PC);
++ clk_disable(vpu_clk);
++ clk_unprepare(vpu_clk);
++ }
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
++ if (vpu_plat->pg)
++ vpu_plat->pg(1);
++#endif
++
++ /* If VPU is working before suspend, disable
++ * regulator to make usecount right. */
++ vpu_power_up(false);
++ }
++
++ mutex_unlock(&vpu_data.lock);
++ return 0;
++}
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
++static int vpu_resume(struct device *dev)
++#else
++static int vpu_resume(struct platform_device *pdev)
++#endif
++{
++ int i;
++
++ mutex_lock(&vpu_data.lock);
++ if (open_count == 0) {
++ /* VPU is released (all instances are freed),
++ * clock should be kept off, context is no longer needed,
++ * power should be kept off on MX6,
++ * disable power gating on MX51 */
++ if (cpu_is_mx51()) {
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
++ if (vpu_plat->pg)
++ vpu_plat->pg(0);
++#endif
++ }
++ } else {
++ if (cpu_is_mx53())
++ goto recover_clk;
++
++ /* If VPU is working before suspend, enable
++ * regulator to make usecount right. */
++ vpu_power_up(true);
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
++ if (vpu_plat->pg)
++ vpu_plat->pg(0);
++#endif
++
++ if (bitwork_mem.cpu_addr != 0) {
++ u32 *p = (u32 *) bitwork_mem.cpu_addr;
++ u32 data, pc;
++ u16 data_hi;
++ u16 data_lo;
++
++ clk_prepare(vpu_clk);
++ clk_enable(vpu_clk);
++
++ pc = READ_REG(BIT_CUR_PC);
++ if (pc) {
++ dev_warn(vpu_dev, "Not power off after suspend (PC=0x%x)\n", pc);
++ clk_disable(vpu_clk);
++ clk_unprepare(vpu_clk);
++ goto recover_clk;
++ }
++
++ /* Restore registers */
++ for (i = 0; i < 64; i++)
++ WRITE_REG(regBk[i], BIT_CODE_BUF_ADDR + (i * 4));
++
++ WRITE_REG(0x0, BIT_RESET_CTRL);
++ WRITE_REG(0x0, BIT_CODE_RUN);
++ /* MX6 RTL has a bug not to init MBC_SET_SUBBLK_EN on reset */
++#ifdef CONFIG_SOC_IMX6Q
++ WRITE_REG(0x0, MBC_SET_SUBBLK_EN);
++#endif
++
++ /*
++ * Re-load boot code, from the codebuffer in external RAM.
++ * Thankfully, we only need 4096 bytes, same for all platforms.
++ */
++ for (i = 0; i < 2048; i += 4) {
++ data = p[(i / 2) + 1];
++ data_hi = (data >> 16) & 0xFFFF;
++ data_lo = data & 0xFFFF;
++ WRITE_REG((i << 16) | data_hi, BIT_CODE_DOWN);
++ WRITE_REG(((i + 1) << 16) | data_lo,
++ BIT_CODE_DOWN);
++
++ data = p[i / 2];
++ data_hi = (data >> 16) & 0xFFFF;
++ data_lo = data & 0xFFFF;
++ WRITE_REG(((i + 2) << 16) | data_hi,
++ BIT_CODE_DOWN);
++ WRITE_REG(((i + 3) << 16) | data_lo,
++ BIT_CODE_DOWN);
++ }
++
++ if (pc_before_suspend) {
++ WRITE_REG(0x1, BIT_BUSY_FLAG);
++ WRITE_REG(0x1, BIT_CODE_RUN);
++ while (READ_REG(BIT_BUSY_FLAG))
++ ;
++ } else {
++ dev_warn(vpu_dev, "PC=0 before suspend\n");
++ }
++ clk_disable(vpu_clk);
++ clk_unprepare(vpu_clk);
++ }
++
++recover_clk:
++ /* Recover vpu clock */
++ for (i = 0; i < vpu_clk_usercount; i++) {
++ clk_prepare(vpu_clk);
++ clk_enable(vpu_clk);
++ }
++ }
++
++ mutex_unlock(&vpu_data.lock);
++ return 0;
++}
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
++static int vpu_runtime_suspend(struct device *dev)
++{
++ release_bus_freq(BUS_FREQ_HIGH);
++ return 0;
++}
++
++static int vpu_runtime_resume(struct device *dev)
++{
++ request_bus_freq(BUS_FREQ_HIGH);
++ return 0;
++}
++
++static const struct dev_pm_ops vpu_pm_ops = {
++ SET_RUNTIME_PM_OPS(vpu_runtime_suspend, vpu_runtime_resume, NULL)
++ SET_SYSTEM_SLEEP_PM_OPS(vpu_suspend, vpu_resume)
++};
++#endif
++
++#else
++#define vpu_suspend NULL
++#define vpu_resume NULL
++#endif /* !CONFIG_PM */
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
++static const struct of_device_id vpu_of_match[] = {
++ { .compatible = "fsl,imx6-vpu", },
++ {/* sentinel */}
++};
++MODULE_DEVICE_TABLE(of, vpu_of_match);
++#endif
++
++/*! Driver definition
++ *
++ */
++static struct platform_driver mxcvpu_driver = {
++ .driver = {
++ .name = "mxc_vpu",
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
++ .of_match_table = vpu_of_match,
++#ifdef CONFIG_PM
++ .pm = &vpu_pm_ops,
++#endif
++#endif
++ },
++ .probe = vpu_dev_probe,
++ .remove = vpu_dev_remove,
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
++ .suspend = vpu_suspend,
++ .resume = vpu_resume,
++#endif
++};
++
++static int __init vpu_init(void)
++{
++ int ret = platform_driver_register(&mxcvpu_driver);
++
++ init_waitqueue_head(&vpu_queue);
++
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
++ memblock_analyze();
++ top_address_DRAM = memblock_end_of_DRAM_with_reserved();
++#endif
++
++ return ret;
++}
++
++static void __exit vpu_exit(void)
++{
++ if (vpu_major > 0) {
++ device_destroy(vpu_class, MKDEV(vpu_major, 0));
++ class_destroy(vpu_class);
++ unregister_chrdev(vpu_major, "mxc_vpu");
++ vpu_major = 0;
++ }
++
++ vpu_free_dma_buffer(&bitwork_mem);
++ vpu_free_dma_buffer(&pic_para_mem);
++ vpu_free_dma_buffer(&user_data_mem);
++
++ /* reset VPU state */
++ vpu_power_up(true);
++ clk_prepare(vpu_clk);
++ clk_enable(vpu_clk);
++ vpu_reset();
++ clk_disable(vpu_clk);
++ clk_unprepare(vpu_clk);
++ vpu_power_up(false);
++
++ clk_put(vpu_clk);
++
++ platform_driver_unregister(&mxcvpu_driver);
++ return;
++}
++
++MODULE_AUTHOR("Freescale Semiconductor, Inc.");
++MODULE_DESCRIPTION("Linux VPU driver for Freescale i.MX/MXC");
++MODULE_LICENSE("GPL");
++
++module_init(vpu_init);
++module_exit(vpu_exit);
+diff -Nur linux-3.14.36/drivers/net/bonding/bonding.h linux-openelec/drivers/net/bonding/bonding.h
+--- linux-3.14.36/drivers/net/bonding/bonding.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/net/bonding/bonding.h 2015-05-06 12:05:42.000000000 -0500
+@@ -188,7 +188,8 @@
+ struct net_device *dev; /* first - useful for panic debug */
+ struct bonding *bond; /* our master */
+ int delay;
+- unsigned long jiffies;
++ /* all three in jiffies */
++ unsigned long last_link_up;
+ unsigned long last_arp_rx;
+ unsigned long target_last_arp_rx[BOND_MAX_ARP_TARGETS];
+ s8 link; /* one of BOND_LINK_XXXX */
+diff -Nur linux-3.14.36/drivers/net/bonding/bond_main.c linux-openelec/drivers/net/bonding/bond_main.c
+--- linux-3.14.36/drivers/net/bonding/bond_main.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/net/bonding/bond_main.c 2015-07-24 18:03:29.080842002 -0500
+@@ -798,7 +798,7 @@
+ return;
+
+ if (new_active) {
+- new_active->jiffies = jiffies;
++ new_active->last_link_up = jiffies;
+
+ if (new_active->link == BOND_LINK_BACK) {
+ if (USES_PRIMARY(bond->params.mode)) {
+@@ -1457,7 +1457,7 @@
+ }
+
+ if (new_slave->link != BOND_LINK_DOWN)
+- new_slave->jiffies = jiffies;
++ new_slave->last_link_up = jiffies;
+ pr_debug("Initial state of slave_dev is BOND_LINK_%s\n",
+ new_slave->link == BOND_LINK_DOWN ? "DOWN" :
+ (new_slave->link == BOND_LINK_UP ? "UP" : "BACK"));
+@@ -1908,7 +1908,7 @@
+ * recovered before downdelay expired
+ */
+ slave->link = BOND_LINK_UP;
+- slave->jiffies = jiffies;
++ slave->last_link_up = jiffies;
+ pr_info("%s: link status up again after %d ms for interface %s.\n",
+ bond->dev->name,
+ (bond->params.downdelay - slave->delay) *
+@@ -1983,7 +1983,7 @@
+
+ case BOND_LINK_UP:
+ slave->link = BOND_LINK_UP;
+- slave->jiffies = jiffies;
++ slave->last_link_up = jiffies;
+
+ if (bond->params.mode == BOND_MODE_8023AD) {
+ /* prevent it from being the active one */
+@@ -2268,6 +2268,7 @@
+ struct slave *slave)
+ {
+ struct arphdr *arp = (struct arphdr *)skb->data;
++ struct slave *curr_active_slave;
+ unsigned char *arp_ptr;
+ __be32 sip, tip;
+ int alen;
+@@ -2312,6 +2313,8 @@
+ bond->params.arp_validate, slave_do_arp_validate(bond, slave),
+ &sip, &tip);
+
++ curr_active_slave = rcu_dereference(bond->curr_active_slave);
++
+ /*
+ * Backup slaves won't see the ARP reply, but do come through
+ * here for each ARP probe (so we swap the sip/tip to validate
+@@ -2325,11 +2328,12 @@
+ * is done to avoid endless looping when we can't reach the
+ * arp_ip_target and fool ourselves with our own arp requests.
+ */
++
+ if (bond_is_active_slave(slave))
+ bond_validate_arp(bond, slave, sip, tip);
+- else if (bond->curr_active_slave &&
+- time_after(slave_last_rx(bond, bond->curr_active_slave),
+- bond->curr_active_slave->jiffies))
++ else if (curr_active_slave &&
++ time_after(slave_last_rx(bond, curr_active_slave),
++ curr_active_slave->last_link_up))
+ bond_validate_arp(bond, slave, tip, sip);
+
+ out_unlock:
+@@ -2376,9 +2380,9 @@
+ oldcurrent = ACCESS_ONCE(bond->curr_active_slave);
+ /* see if any of the previous devices are up now (i.e. they have
+ * xmt and rcv traffic). the curr_active_slave does not come into
+- * the picture unless it is null. also, slave->jiffies is not needed
+- * here because we send an arp on each slave and give a slave as
+- * long as it needs to get the tx/rx within the delta.
++ * the picture unless it is null. also, slave->last_link_up is not
++ * needed here because we send an arp on each slave and give a slave
++ * as long as it needs to get the tx/rx within the delta.
+ * TODO: what about up/down delay in arp mode? it wasn't here before
+ * so it can wait
+ */
+@@ -2505,7 +2509,7 @@
+ * active. This avoids bouncing, as the last receive
+ * times need a full ARP monitor cycle to be updated.
+ */
+- if (bond_time_in_interval(bond, slave->jiffies, 2))
++ if (bond_time_in_interval(bond, slave->last_link_up, 2))
+ continue;
+
+ /*
+@@ -2698,7 +2702,7 @@
+ new_slave->link = BOND_LINK_BACK;
+ bond_set_slave_active_flags(new_slave, BOND_SLAVE_NOTIFY_LATER);
+ bond_arp_send_all(bond, new_slave);
+- new_slave->jiffies = jiffies;
++ new_slave->last_link_up = jiffies;
+ rcu_assign_pointer(bond->current_arp_slave, new_slave);
+
+ check_state:
+diff -Nur linux-3.14.36/drivers/net/bonding/bond_main.c.orig linux-openelec/drivers/net/bonding/bond_main.c.orig
+--- linux-3.14.36/drivers/net/bonding/bond_main.c.orig 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/net/bonding/bond_main.c.orig 2015-07-24 18:03:28.588842002 -0500
+@@ -0,0 +1,4584 @@
++/*
++ * originally based on the dummy device.
++ *
++ * Copyright 1999, Thomas Davis, tadavis@lbl.gov.
++ * Licensed under the GPL. Based on dummy.c, and eql.c devices.
++ *
++ * bonding.c: an Ethernet Bonding driver
++ *
++ * This is useful to talk to a Cisco EtherChannel compatible equipment:
++ * Cisco 5500
++ * Sun Trunking (Solaris)
++ * Alteon AceDirector Trunks
++ * Linux Bonding
++ * and probably many L2 switches ...
++ *
++ * How it works:
++ * ifconfig bond0 ipaddress netmask up
++ * will setup a network device, with an ip address. No mac address
++ * will be assigned at this time. The hw mac address will come from
++ * the first slave bonded to the channel. All slaves will then use
++ * this hw mac address.
++ *
++ * ifconfig bond0 down
++ * will release all slaves, marking them as down.
++ *
++ * ifenslave bond0 eth0
++ * will attach eth0 to bond0 as a slave. eth0 hw mac address will either
++ * a: be used as initial mac address
++ * b: if a hw mac address already is there, eth0's hw mac address
++ * will then be set from bond0.
++ *
++ */
++
++#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
++
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/types.h>
++#include <linux/fcntl.h>
++#include <linux/interrupt.h>
++#include <linux/ptrace.h>
++#include <linux/ioport.h>
++#include <linux/in.h>
++#include <net/ip.h>
++#include <linux/ip.h>
++#include <linux/tcp.h>
++#include <linux/udp.h>
++#include <linux/slab.h>
++#include <linux/string.h>
++#include <linux/init.h>
++#include <linux/timer.h>
++#include <linux/socket.h>
++#include <linux/ctype.h>
++#include <linux/inet.h>
++#include <linux/bitops.h>
++#include <linux/io.h>
++#include <asm/dma.h>
++#include <linux/uaccess.h>
++#include <linux/errno.h>
++#include <linux/netdevice.h>
++#include <linux/inetdevice.h>
++#include <linux/igmp.h>
++#include <linux/etherdevice.h>
++#include <linux/skbuff.h>
++#include <net/sock.h>
++#include <linux/rtnetlink.h>
++#include <linux/smp.h>
++#include <linux/if_ether.h>
++#include <net/arp.h>
++#include <linux/mii.h>
++#include <linux/ethtool.h>
++#include <linux/if_vlan.h>
++#include <linux/if_bonding.h>
++#include <linux/jiffies.h>
++#include <linux/preempt.h>
++#include <net/route.h>
++#include <net/net_namespace.h>
++#include <net/netns/generic.h>
++#include <net/pkt_sched.h>
++#include <linux/rculist.h>
++#include <net/flow_keys.h>
++#include "bonding.h"
++#include "bond_3ad.h"
++#include "bond_alb.h"
++
++/*---------------------------- Module parameters ----------------------------*/
++
++/* monitor all links that often (in milliseconds). <=0 disables monitoring */
++
++static int max_bonds = BOND_DEFAULT_MAX_BONDS;
++static int tx_queues = BOND_DEFAULT_TX_QUEUES;
++static int num_peer_notif = 1;
++static int miimon;
++static int updelay;
++static int downdelay;
++static int use_carrier = 1;
++static char *mode;
++static char *primary;
++static char *primary_reselect;
++static char *lacp_rate;
++static int min_links;
++static char *ad_select;
++static char *xmit_hash_policy;
++static int arp_interval;
++static char *arp_ip_target[BOND_MAX_ARP_TARGETS];
++static char *arp_validate;
++static char *arp_all_targets;
++static char *fail_over_mac;
++static int all_slaves_active;
++static struct bond_params bonding_defaults;
++static int resend_igmp = BOND_DEFAULT_RESEND_IGMP;
++static int packets_per_slave = 1;
++static int lp_interval = BOND_ALB_DEFAULT_LP_INTERVAL;
++
++module_param(max_bonds, int, 0);
++MODULE_PARM_DESC(max_bonds, "Max number of bonded devices");
++module_param(tx_queues, int, 0);
++MODULE_PARM_DESC(tx_queues, "Max number of transmit queues (default = 16)");
++module_param_named(num_grat_arp, num_peer_notif, int, 0644);
++MODULE_PARM_DESC(num_grat_arp, "Number of peer notifications to send on "
++ "failover event (alias of num_unsol_na)");
++module_param_named(num_unsol_na, num_peer_notif, int, 0644);
++MODULE_PARM_DESC(num_unsol_na, "Number of peer notifications to send on "
++ "failover event (alias of num_grat_arp)");
++module_param(miimon, int, 0);
++MODULE_PARM_DESC(miimon, "Link check interval in milliseconds");
++module_param(updelay, int, 0);
++MODULE_PARM_DESC(updelay, "Delay before considering link up, in milliseconds");
++module_param(downdelay, int, 0);
++MODULE_PARM_DESC(downdelay, "Delay before considering link down, "
++ "in milliseconds");
++module_param(use_carrier, int, 0);
++MODULE_PARM_DESC(use_carrier, "Use netif_carrier_ok (vs MII ioctls) in miimon; "
++ "0 for off, 1 for on (default)");
++module_param(mode, charp, 0);
++MODULE_PARM_DESC(mode, "Mode of operation; 0 for balance-rr, "
++ "1 for active-backup, 2 for balance-xor, "
++ "3 for broadcast, 4 for 802.3ad, 5 for balance-tlb, "
++ "6 for balance-alb");
++module_param(primary, charp, 0);
++MODULE_PARM_DESC(primary, "Primary network device to use");
++module_param(primary_reselect, charp, 0);
++MODULE_PARM_DESC(primary_reselect, "Reselect primary slave "
++ "once it comes up; "
++ "0 for always (default), "
++ "1 for only if speed of primary is "
++ "better, "
++ "2 for only on active slave "
++ "failure");
++module_param(lacp_rate, charp, 0);
++MODULE_PARM_DESC(lacp_rate, "LACPDU tx rate to request from 802.3ad partner; "
++ "0 for slow, 1 for fast");
++module_param(ad_select, charp, 0);
++MODULE_PARM_DESC(ad_select, "803.ad aggregation selection logic; "
++ "0 for stable (default), 1 for bandwidth, "
++ "2 for count");
++module_param(min_links, int, 0);
++MODULE_PARM_DESC(min_links, "Minimum number of available links before turning on carrier");
++
++module_param(xmit_hash_policy, charp, 0);
++MODULE_PARM_DESC(xmit_hash_policy, "balance-xor and 802.3ad hashing method; "
++ "0 for layer 2 (default), 1 for layer 3+4, "
++ "2 for layer 2+3, 3 for encap layer 2+3, "
++ "4 for encap layer 3+4");
++module_param(arp_interval, int, 0);
++MODULE_PARM_DESC(arp_interval, "arp interval in milliseconds");
++module_param_array(arp_ip_target, charp, NULL, 0);
++MODULE_PARM_DESC(arp_ip_target, "arp targets in n.n.n.n form");
++module_param(arp_validate, charp, 0);
++MODULE_PARM_DESC(arp_validate, "validate src/dst of ARP probes; "
++ "0 for none (default), 1 for active, "
++ "2 for backup, 3 for all");
++module_param(arp_all_targets, charp, 0);
++MODULE_PARM_DESC(arp_all_targets, "fail on any/all arp targets timeout; 0 for any (default), 1 for all");
++module_param(fail_over_mac, charp, 0);
++MODULE_PARM_DESC(fail_over_mac, "For active-backup, do not set all slaves to "
++ "the same MAC; 0 for none (default), "
++ "1 for active, 2 for follow");
++module_param(all_slaves_active, int, 0);
++MODULE_PARM_DESC(all_slaves_active, "Keep all frames received on an interface"
++ "by setting active flag for all slaves; "
++ "0 for never (default), 1 for always.");
++module_param(resend_igmp, int, 0);
++MODULE_PARM_DESC(resend_igmp, "Number of IGMP membership reports to send on "
++ "link failure");
++module_param(packets_per_slave, int, 0);
++MODULE_PARM_DESC(packets_per_slave, "Packets to send per slave in balance-rr "
++ "mode; 0 for a random slave, 1 packet per "
++ "slave (default), >1 packets per slave.");
++module_param(lp_interval, uint, 0);
++MODULE_PARM_DESC(lp_interval, "The number of seconds between instances where "
++ "the bonding driver sends learning packets to "
++ "each slaves peer switch. The default is 1.");
++
++/*----------------------------- Global variables ----------------------------*/
++
++#ifdef CONFIG_NET_POLL_CONTROLLER
++atomic_t netpoll_block_tx = ATOMIC_INIT(0);
++#endif
++
++int bond_net_id __read_mostly;
++
++static __be32 arp_target[BOND_MAX_ARP_TARGETS];
++static int arp_ip_count;
++static int bond_mode = BOND_MODE_ROUNDROBIN;
++static int xmit_hashtype = BOND_XMIT_POLICY_LAYER2;
++static int lacp_fast;
++
++/*-------------------------- Forward declarations ---------------------------*/
++
++static int bond_init(struct net_device *bond_dev);
++static void bond_uninit(struct net_device *bond_dev);
++
++/*---------------------------- General routines -----------------------------*/
++
++const char *bond_mode_name(int mode)
++{
++ static const char *names[] = {
++ [BOND_MODE_ROUNDROBIN] = "load balancing (round-robin)",
++ [BOND_MODE_ACTIVEBACKUP] = "fault-tolerance (active-backup)",
++ [BOND_MODE_XOR] = "load balancing (xor)",
++ [BOND_MODE_BROADCAST] = "fault-tolerance (broadcast)",
++ [BOND_MODE_8023AD] = "IEEE 802.3ad Dynamic link aggregation",
++ [BOND_MODE_TLB] = "transmit load balancing",
++ [BOND_MODE_ALB] = "adaptive load balancing",
++ };
++
++ if (mode < BOND_MODE_ROUNDROBIN || mode > BOND_MODE_ALB)
++ return "unknown";
++
++ return names[mode];
++}
++
++/*---------------------------------- VLAN -----------------------------------*/
++
++/**
++ * bond_dev_queue_xmit - Prepare skb for xmit.
++ *
++ * @bond: bond device that got this skb for tx.
++ * @skb: hw accel VLAN tagged skb to transmit
++ * @slave_dev: slave that is supposed to xmit this skbuff
++ */
++void bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
++ struct net_device *slave_dev)
++{
++ skb->dev = slave_dev;
++
++ BUILD_BUG_ON(sizeof(skb->queue_mapping) !=
++ sizeof(qdisc_skb_cb(skb)->slave_dev_queue_mapping));
++ skb->queue_mapping = qdisc_skb_cb(skb)->slave_dev_queue_mapping;
++
++ if (unlikely(netpoll_tx_running(bond->dev)))
++ bond_netpoll_send_skb(bond_get_slave_by_dev(bond, slave_dev), skb);
++ else
++ dev_queue_xmit(skb);
++}
++
++/*
++ * In the following 2 functions, bond_vlan_rx_add_vid and bond_vlan_rx_kill_vid,
++ * We don't protect the slave list iteration with a lock because:
++ * a. This operation is performed in IOCTL context,
++ * b. The operation is protected by the RTNL semaphore in the 8021q code,
++ * c. Holding a lock with BH disabled while directly calling a base driver
++ * entry point is generally a BAD idea.
++ *
++ * The design of synchronization/protection for this operation in the 8021q
++ * module is good for one or more VLAN devices over a single physical device
++ * and cannot be extended for a teaming solution like bonding, so there is a
++ * potential race condition here where a net device from the vlan group might
++ * be referenced (either by a base driver or the 8021q code) while it is being
++ * removed from the system. However, it turns out we're not making matters
++ * worse, and if it works for regular VLAN usage it will work here too.
++*/
++
++/**
++ * bond_vlan_rx_add_vid - Propagates adding an id to slaves
++ * @bond_dev: bonding net device that got called
++ * @vid: vlan id being added
++ */
++static int bond_vlan_rx_add_vid(struct net_device *bond_dev,
++ __be16 proto, u16 vid)
++{
++ struct bonding *bond = netdev_priv(bond_dev);
++ struct slave *slave, *rollback_slave;
++ struct list_head *iter;
++ int res;
++
++ bond_for_each_slave(bond, slave, iter) {
++ res = vlan_vid_add(slave->dev, proto, vid);
++ if (res)
++ goto unwind;
++ }
++
++ return 0;
++
++unwind:
++ /* unwind to the slave that failed */
++ bond_for_each_slave(bond, rollback_slave, iter) {
++ if (rollback_slave == slave)
++ break;
++
++ vlan_vid_del(rollback_slave->dev, proto, vid);
++ }
++
++ return res;
++}
++
++/**
++ * bond_vlan_rx_kill_vid - Propagates deleting an id to slaves
++ * @bond_dev: bonding net device that got called
++ * @vid: vlan id being removed
++ */
++static int bond_vlan_rx_kill_vid(struct net_device *bond_dev,
++ __be16 proto, u16 vid)
++{
++ struct bonding *bond = netdev_priv(bond_dev);
++ struct list_head *iter;
++ struct slave *slave;
++
++ bond_for_each_slave(bond, slave, iter)
++ vlan_vid_del(slave->dev, proto, vid);
++
++ if (bond_is_lb(bond))
++ bond_alb_clear_vlan(bond, vid);
++
++ return 0;
++}
++
++/*------------------------------- Link status -------------------------------*/
++
++/*
++ * Set the carrier state for the master according to the state of its
++ * slaves. If any slaves are up, the master is up. In 802.3ad mode,
++ * do special 802.3ad magic.
++ *
++ * Returns zero if carrier state does not change, nonzero if it does.
++ */
++static int bond_set_carrier(struct bonding *bond)
++{
++ struct list_head *iter;
++ struct slave *slave;
++
++ if (!bond_has_slaves(bond))
++ goto down;
++
++ if (bond->params.mode == BOND_MODE_8023AD)
++ return bond_3ad_set_carrier(bond);
++
++ bond_for_each_slave(bond, slave, iter) {
++ if (slave->link == BOND_LINK_UP) {
++ if (!netif_carrier_ok(bond->dev)) {
++ netif_carrier_on(bond->dev);
++ return 1;
++ }
++ return 0;
++ }
++ }
++
++down:
++ if (netif_carrier_ok(bond->dev)) {
++ netif_carrier_off(bond->dev);
++ return 1;
++ }
++ return 0;
++}
++
++/*
++ * Get link speed and duplex from the slave's base driver
++ * using ethtool. If for some reason the call fails or the
++ * values are invalid, set speed and duplex to -1,
++ * and return.
++ */
++static void bond_update_speed_duplex(struct slave *slave)
++{
++ struct net_device *slave_dev = slave->dev;
++ struct ethtool_cmd ecmd;
++ u32 slave_speed;
++ int res;
++
++ slave->speed = SPEED_UNKNOWN;
++ slave->duplex = DUPLEX_UNKNOWN;
++
++ res = __ethtool_get_settings(slave_dev, &ecmd);
++ if (res < 0)
++ return;
++
++ slave_speed = ethtool_cmd_speed(&ecmd);
++ if (slave_speed == 0 || slave_speed == ((__u32) -1))
++ return;
++
++ switch (ecmd.duplex) {
++ case DUPLEX_FULL:
++ case DUPLEX_HALF:
++ break;
++ default:
++ return;
++ }
++
++ slave->speed = slave_speed;
++ slave->duplex = ecmd.duplex;
++
++ return;
++}
++
++const char *bond_slave_link_status(s8 link)
++{
++ switch (link) {
++ case BOND_LINK_UP:
++ return "up";
++ case BOND_LINK_FAIL:
++ return "going down";
++ case BOND_LINK_DOWN:
++ return "down";
++ case BOND_LINK_BACK:
++ return "going back";
++ default:
++ return "unknown";
++ }
++}
++
++/*
++ * if <dev> supports MII link status reporting, check its link status.
++ *
++ * We either do MII/ETHTOOL ioctls, or check netif_carrier_ok(),
++ * depending upon the setting of the use_carrier parameter.
++ *
++ * Return either BMSR_LSTATUS, meaning that the link is up (or we
++ * can't tell and just pretend it is), or 0, meaning that the link is
++ * down.
++ *
++ * If reporting is non-zero, instead of faking link up, return -1 if
++ * both ETHTOOL and MII ioctls fail (meaning the device does not
++ * support them). If use_carrier is set, return whatever it says.
++ * It'd be nice if there was a good way to tell if a driver supports
++ * netif_carrier, but there really isn't.
++ */
++static int bond_check_dev_link(struct bonding *bond,
++ struct net_device *slave_dev, int reporting)
++{
++ const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
++ int (*ioctl)(struct net_device *, struct ifreq *, int);
++ struct ifreq ifr;
++ struct mii_ioctl_data *mii;
++
++ if (!reporting && !netif_running(slave_dev))
++ return 0;
++
++ if (bond->params.use_carrier)
++ return netif_carrier_ok(slave_dev) ? BMSR_LSTATUS : 0;
++
++ /* Try to get link status using Ethtool first. */
++ if (slave_dev->ethtool_ops->get_link)
++ return slave_dev->ethtool_ops->get_link(slave_dev) ?
++ BMSR_LSTATUS : 0;
++
++ /* Ethtool can't be used, fallback to MII ioctls. */
++ ioctl = slave_ops->ndo_do_ioctl;
++ if (ioctl) {
++ /* TODO: set pointer to correct ioctl on a per team member */
++ /* bases to make this more efficient. that is, once */
++ /* we determine the correct ioctl, we will always */
++ /* call it and not the others for that team */
++ /* member. */
++
++ /*
++ * We cannot assume that SIOCGMIIPHY will also read a
++ * register; not all network drivers (e.g., e100)
++ * support that.
++ */
++
++ /* Yes, the mii is overlaid on the ifreq.ifr_ifru */
++ strncpy(ifr.ifr_name, slave_dev->name, IFNAMSIZ);
++ mii = if_mii(&ifr);
++ if (IOCTL(slave_dev, &ifr, SIOCGMIIPHY) == 0) {
++ mii->reg_num = MII_BMSR;
++ if (IOCTL(slave_dev, &ifr, SIOCGMIIREG) == 0)
++ return mii->val_out & BMSR_LSTATUS;
++ }
++ }
++
++ /*
++ * If reporting, report that either there's no dev->do_ioctl,
++ * or both SIOCGMIIREG and get_link failed (meaning that we
++ * cannot report link status). If not reporting, pretend
++ * we're ok.
++ */
++ return reporting ? -1 : BMSR_LSTATUS;
++}
++
++/*----------------------------- Multicast list ------------------------------*/
++
++/*
++ * Push the promiscuity flag down to appropriate slaves
++ */
++static int bond_set_promiscuity(struct bonding *bond, int inc)
++{
++ struct list_head *iter;
++ int err = 0;
++
++ if (USES_PRIMARY(bond->params.mode)) {
++ /* write lock already acquired */
++ if (bond->curr_active_slave) {
++ err = dev_set_promiscuity(bond->curr_active_slave->dev,
++ inc);
++ }
++ } else {
++ struct slave *slave;
++
++ bond_for_each_slave(bond, slave, iter) {
++ err = dev_set_promiscuity(slave->dev, inc);
++ if (err)
++ return err;
++ }
++ }
++ return err;
++}
++
++/*
++ * Push the allmulti flag down to all slaves
++ */
++static int bond_set_allmulti(struct bonding *bond, int inc)
++{
++ struct list_head *iter;
++ int err = 0;
++
++ if (USES_PRIMARY(bond->params.mode)) {
++ /* write lock already acquired */
++ if (bond->curr_active_slave) {
++ err = dev_set_allmulti(bond->curr_active_slave->dev,
++ inc);
++ }
++ } else {
++ struct slave *slave;
++
++ bond_for_each_slave(bond, slave, iter) {
++ err = dev_set_allmulti(slave->dev, inc);
++ if (err)
++ return err;
++ }
++ }
++ return err;
++}
++
++/*
++ * Retrieve the list of registered multicast addresses for the bonding
++ * device and retransmit an IGMP JOIN request to the current active
++ * slave.
++ */
++static void bond_resend_igmp_join_requests_delayed(struct work_struct *work)
++{
++ struct bonding *bond = container_of(work, struct bonding,
++ mcast_work.work);
++
++ if (!rtnl_trylock()) {
++ queue_delayed_work(bond->wq, &bond->mcast_work, 1);
++ return;
++ }
++ call_netdevice_notifiers(NETDEV_RESEND_IGMP, bond->dev);
++
++ if (bond->igmp_retrans > 1) {
++ bond->igmp_retrans--;
++ queue_delayed_work(bond->wq, &bond->mcast_work, HZ/5);
++ }
++ rtnl_unlock();
++}
++
++/* Flush bond's hardware addresses from slave
++ */
++static void bond_hw_addr_flush(struct net_device *bond_dev,
++ struct net_device *slave_dev)
++{
++ struct bonding *bond = netdev_priv(bond_dev);
++
++ dev_uc_unsync(slave_dev, bond_dev);
++ dev_mc_unsync(slave_dev, bond_dev);
++
++ if (bond->params.mode == BOND_MODE_8023AD) {
++ /* del lacpdu mc addr from mc list */
++ u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
++
++ dev_mc_del(slave_dev, lacpdu_multicast);
++ }
++}
++
++/*--------------------------- Active slave change ---------------------------*/
++
++/* Update the hardware address list and promisc/allmulti for the new and
++ * old active slaves (if any). Modes that are !USES_PRIMARY keep all
++ * slaves up date at all times; only the USES_PRIMARY modes need to call
++ * this function to swap these settings during a failover.
++ */
++static void bond_hw_addr_swap(struct bonding *bond, struct slave *new_active,
++ struct slave *old_active)
++{
++ ASSERT_RTNL();
++
++ if (old_active) {
++ if (bond->dev->flags & IFF_PROMISC)
++ dev_set_promiscuity(old_active->dev, -1);
++
++ if (bond->dev->flags & IFF_ALLMULTI)
++ dev_set_allmulti(old_active->dev, -1);
++
++ bond_hw_addr_flush(bond->dev, old_active->dev);
++ }
++
++ if (new_active) {
++ /* FIXME: Signal errors upstream. */
++ if (bond->dev->flags & IFF_PROMISC)
++ dev_set_promiscuity(new_active->dev, 1);
++
++ if (bond->dev->flags & IFF_ALLMULTI)
++ dev_set_allmulti(new_active->dev, 1);
++
++ netif_addr_lock_bh(bond->dev);
++ dev_uc_sync(new_active->dev, bond->dev);
++ dev_mc_sync(new_active->dev, bond->dev);
++ netif_addr_unlock_bh(bond->dev);
++ }
++}
++
++/**
++ * bond_set_dev_addr - clone slave's address to bond
++ * @bond_dev: bond net device
++ * @slave_dev: slave net device
++ *
++ * Should be called with RTNL held.
++ */
++static void bond_set_dev_addr(struct net_device *bond_dev,
++ struct net_device *slave_dev)
++{
++ pr_debug("bond_dev=%p slave_dev=%p slave_dev->addr_len=%d\n",
++ bond_dev, slave_dev, slave_dev->addr_len);
++ memcpy(bond_dev->dev_addr, slave_dev->dev_addr, slave_dev->addr_len);
++ bond_dev->addr_assign_type = NET_ADDR_STOLEN;
++ call_netdevice_notifiers(NETDEV_CHANGEADDR, bond_dev);
++}
++
++/*
++ * bond_do_fail_over_mac
++ *
++ * Perform special MAC address swapping for fail_over_mac settings
++ *
++ * Called with RTNL, curr_slave_lock for write_bh.
++ */
++static void bond_do_fail_over_mac(struct bonding *bond,
++ struct slave *new_active,
++ struct slave *old_active)
++ __releases(&bond->curr_slave_lock)
++ __acquires(&bond->curr_slave_lock)
++{
++ u8 tmp_mac[ETH_ALEN];
++ struct sockaddr saddr;
++ int rv;
++
++ switch (bond->params.fail_over_mac) {
++ case BOND_FOM_ACTIVE:
++ if (new_active) {
++ write_unlock_bh(&bond->curr_slave_lock);
++ bond_set_dev_addr(bond->dev, new_active->dev);
++ write_lock_bh(&bond->curr_slave_lock);
++ }
++ break;
++ case BOND_FOM_FOLLOW:
++ /*
++ * if new_active && old_active, swap them
++ * if just old_active, do nothing (going to no active slave)
++ * if just new_active, set new_active to bond's MAC
++ */
++ if (!new_active)
++ return;
++
++ write_unlock_bh(&bond->curr_slave_lock);
++
++ if (old_active) {
++ memcpy(tmp_mac, new_active->dev->dev_addr, ETH_ALEN);
++ memcpy(saddr.sa_data, old_active->dev->dev_addr,
++ ETH_ALEN);
++ saddr.sa_family = new_active->dev->type;
++ } else {
++ memcpy(saddr.sa_data, bond->dev->dev_addr, ETH_ALEN);
++ saddr.sa_family = bond->dev->type;
++ }
++
++ rv = dev_set_mac_address(new_active->dev, &saddr);
++ if (rv) {
++ pr_err("%s: Error %d setting MAC of slave %s\n",
++ bond->dev->name, -rv, new_active->dev->name);
++ goto out;
++ }
++
++ if (!old_active)
++ goto out;
++
++ memcpy(saddr.sa_data, tmp_mac, ETH_ALEN);
++ saddr.sa_family = old_active->dev->type;
++
++ rv = dev_set_mac_address(old_active->dev, &saddr);
++ if (rv)
++ pr_err("%s: Error %d setting MAC of slave %s\n",
++ bond->dev->name, -rv, new_active->dev->name);
++out:
++ write_lock_bh(&bond->curr_slave_lock);
++ break;
++ default:
++ pr_err("%s: bond_do_fail_over_mac impossible: bad policy %d\n",
++ bond->dev->name, bond->params.fail_over_mac);
++ break;
++ }
++
++}
++
++static bool bond_should_change_active(struct bonding *bond)
++{
++ struct slave *prim = bond->primary_slave;
++ struct slave *curr = bond->curr_active_slave;
++
++ if (!prim || !curr || curr->link != BOND_LINK_UP)
++ return true;
++ if (bond->force_primary) {
++ bond->force_primary = false;
++ return true;
++ }
++ if (bond->params.primary_reselect == BOND_PRI_RESELECT_BETTER &&
++ (prim->speed < curr->speed ||
++ (prim->speed == curr->speed && prim->duplex <= curr->duplex)))
++ return false;
++ if (bond->params.primary_reselect == BOND_PRI_RESELECT_FAILURE)
++ return false;
++ return true;
++}
++
++/**
++ * find_best_interface - select the best available slave to be the active one
++ * @bond: our bonding struct
++ */
++static struct slave *bond_find_best_slave(struct bonding *bond)
++{
++ struct slave *slave, *bestslave = NULL;
++ struct list_head *iter;
++ int mintime = bond->params.updelay;
++
++ if (bond->primary_slave && bond->primary_slave->link == BOND_LINK_UP &&
++ bond_should_change_active(bond))
++ return bond->primary_slave;
++
++ bond_for_each_slave(bond, slave, iter) {
++ if (slave->link == BOND_LINK_UP)
++ return slave;
++ if (slave->link == BOND_LINK_BACK && IS_UP(slave->dev) &&
++ slave->delay < mintime) {
++ mintime = slave->delay;
++ bestslave = slave;
++ }
++ }
++
++ return bestslave;
++}
++
++static bool bond_should_notify_peers(struct bonding *bond)
++{
++ struct slave *slave;
++
++ rcu_read_lock();
++ slave = rcu_dereference(bond->curr_active_slave);
++ rcu_read_unlock();
++
++ pr_debug("bond_should_notify_peers: bond %s slave %s\n",
++ bond->dev->name, slave ? slave->dev->name : "NULL");
++
++ if (!slave || !bond->send_peer_notif ||
++ test_bit(__LINK_STATE_LINKWATCH_PENDING, &slave->dev->state))
++ return false;
++
++ return true;
++}
++
++/**
++ * change_active_interface - change the active slave into the specified one
++ * @bond: our bonding struct
++ * @new: the new slave to make the active one
++ *
++ * Set the new slave to the bond's settings and unset them on the old
++ * curr_active_slave.
++ * Setting include flags, mc-list, promiscuity, allmulti, etc.
++ *
++ * If @new's link state is %BOND_LINK_BACK we'll set it to %BOND_LINK_UP,
++ * because it is apparently the best available slave we have, even though its
++ * updelay hasn't timed out yet.
++ *
++ * If new_active is not NULL, caller must hold curr_slave_lock for write_bh.
++ */
++void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
++{
++ struct slave *old_active = bond->curr_active_slave;
++
++ if (old_active == new_active)
++ return;
++
++ if (new_active) {
++ new_active->last_link_up = jiffies;
++
++ if (new_active->link == BOND_LINK_BACK) {
++ if (USES_PRIMARY(bond->params.mode)) {
++ pr_info("%s: making interface %s the new active one %d ms earlier.\n",
++ bond->dev->name, new_active->dev->name,
++ (bond->params.updelay - new_active->delay) * bond->params.miimon);
++ }
++
++ new_active->delay = 0;
++ new_active->link = BOND_LINK_UP;
++
++ if (bond->params.mode == BOND_MODE_8023AD)
++ bond_3ad_handle_link_change(new_active, BOND_LINK_UP);
++
++ if (bond_is_lb(bond))
++ bond_alb_handle_link_change(bond, new_active, BOND_LINK_UP);
++ } else {
++ if (USES_PRIMARY(bond->params.mode)) {
++ pr_info("%s: making interface %s the new active one.\n",
++ bond->dev->name, new_active->dev->name);
++ }
++ }
++ }
++
++ if (USES_PRIMARY(bond->params.mode))
++ bond_hw_addr_swap(bond, new_active, old_active);
++
++ if (bond_is_lb(bond)) {
++ bond_alb_handle_active_change(bond, new_active);
++ if (old_active)
++ bond_set_slave_inactive_flags(old_active,
++ BOND_SLAVE_NOTIFY_NOW);
++ if (new_active)
++ bond_set_slave_active_flags(new_active,
++ BOND_SLAVE_NOTIFY_NOW);
++ } else {
++ rcu_assign_pointer(bond->curr_active_slave, new_active);
++ }
++
++ if (bond->params.mode == BOND_MODE_ACTIVEBACKUP) {
++ if (old_active)
++ bond_set_slave_inactive_flags(old_active,
++ BOND_SLAVE_NOTIFY_NOW);
++
++ if (new_active) {
++ bool should_notify_peers = false;
++
++ bond_set_slave_active_flags(new_active,
++ BOND_SLAVE_NOTIFY_NOW);
++
++ if (bond->params.fail_over_mac)
++ bond_do_fail_over_mac(bond, new_active,
++ old_active);
++
++ if (netif_running(bond->dev)) {
++ bond->send_peer_notif =
++ bond->params.num_peer_notif;
++ should_notify_peers =
++ bond_should_notify_peers(bond);
++ }
++
++ write_unlock_bh(&bond->curr_slave_lock);
++
++ call_netdevice_notifiers(NETDEV_BONDING_FAILOVER, bond->dev);
++ if (should_notify_peers)
++ call_netdevice_notifiers(NETDEV_NOTIFY_PEERS,
++ bond->dev);
++
++ write_lock_bh(&bond->curr_slave_lock);
++ }
++ }
++
++ /* resend IGMP joins since active slave has changed or
++ * all were sent on curr_active_slave.
++ * resend only if bond is brought up with the affected
++ * bonding modes and the retransmission is enabled */
++ if (netif_running(bond->dev) && (bond->params.resend_igmp > 0) &&
++ ((USES_PRIMARY(bond->params.mode) && new_active) ||
++ bond->params.mode == BOND_MODE_ROUNDROBIN)) {
++ bond->igmp_retrans = bond->params.resend_igmp;
++ queue_delayed_work(bond->wq, &bond->mcast_work, 1);
++ }
++}
++
++/**
++ * bond_select_active_slave - select a new active slave, if needed
++ * @bond: our bonding struct
++ *
++ * This functions should be called when one of the following occurs:
++ * - The old curr_active_slave has been released or lost its link.
++ * - The primary_slave has got its link back.
++ * - A slave has got its link back and there's no old curr_active_slave.
++ *
++ * Caller must hold curr_slave_lock for write_bh.
++ */
++void bond_select_active_slave(struct bonding *bond)
++{
++ struct slave *best_slave;
++ int rv;
++
++ best_slave = bond_find_best_slave(bond);
++ if (best_slave != bond->curr_active_slave) {
++ bond_change_active_slave(bond, best_slave);
++ rv = bond_set_carrier(bond);
++ if (!rv)
++ return;
++
++ if (netif_carrier_ok(bond->dev)) {
++ pr_info("%s: first active interface up!\n",
++ bond->dev->name);
++ } else {
++ pr_info("%s: now running without any active interface !\n",
++ bond->dev->name);
++ }
++ }
++}
++
++#ifdef CONFIG_NET_POLL_CONTROLLER
++static inline int slave_enable_netpoll(struct slave *slave)
++{
++ struct netpoll *np;
++ int err = 0;
++
++ np = kzalloc(sizeof(*np), GFP_ATOMIC);
++ err = -ENOMEM;
++ if (!np)
++ goto out;
++
++ err = __netpoll_setup(np, slave->dev, GFP_ATOMIC);
++ if (err) {
++ kfree(np);
++ goto out;
++ }
++ slave->np = np;
++out:
++ return err;
++}
++static inline void slave_disable_netpoll(struct slave *slave)
++{
++ struct netpoll *np = slave->np;
++
++ if (!np)
++ return;
++
++ slave->np = NULL;
++ __netpoll_free_async(np);
++}
++static inline bool slave_dev_support_netpoll(struct net_device *slave_dev)
++{
++ if (slave_dev->priv_flags & IFF_DISABLE_NETPOLL)
++ return false;
++ if (!slave_dev->netdev_ops->ndo_poll_controller)
++ return false;
++ return true;
++}
++
++static void bond_poll_controller(struct net_device *bond_dev)
++{
++}
++
++static void bond_netpoll_cleanup(struct net_device *bond_dev)
++{
++ struct bonding *bond = netdev_priv(bond_dev);
++ struct list_head *iter;
++ struct slave *slave;
++
++ bond_for_each_slave(bond, slave, iter)
++ if (IS_UP(slave->dev))
++ slave_disable_netpoll(slave);
++}
++
++static int bond_netpoll_setup(struct net_device *dev, struct netpoll_info *ni, gfp_t gfp)
++{
++ struct bonding *bond = netdev_priv(dev);
++ struct list_head *iter;
++ struct slave *slave;
++ int err = 0;
++
++ bond_for_each_slave(bond, slave, iter) {
++ err = slave_enable_netpoll(slave);
++ if (err) {
++ bond_netpoll_cleanup(dev);
++ break;
++ }
++ }
++ return err;
++}
++#else
++static inline int slave_enable_netpoll(struct slave *slave)
++{
++ return 0;
++}
++static inline void slave_disable_netpoll(struct slave *slave)
++{
++}
++static void bond_netpoll_cleanup(struct net_device *bond_dev)
++{
++}
++#endif
++
++/*---------------------------------- IOCTL ----------------------------------*/
++
++static netdev_features_t bond_fix_features(struct net_device *dev,
++ netdev_features_t features)
++{
++ struct bonding *bond = netdev_priv(dev);
++ struct list_head *iter;
++ netdev_features_t mask;
++ struct slave *slave;
++
++ if (!bond_has_slaves(bond)) {
++ /* Disable adding VLANs to empty bond. But why? --mq */
++ features |= NETIF_F_VLAN_CHALLENGED;
++ return features;
++ }
++
++ mask = features;
++ features &= ~NETIF_F_ONE_FOR_ALL;
++ features |= NETIF_F_ALL_FOR_ALL;
++
++ bond_for_each_slave(bond, slave, iter) {
++ features = netdev_increment_features(features,
++ slave->dev->features,
++ mask);
++ }
++ features = netdev_add_tso_features(features, mask);
++
++ return features;
++}
++
++#define BOND_VLAN_FEATURES (NETIF_F_ALL_CSUM | NETIF_F_SG | \
++ NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \
++ NETIF_F_HIGHDMA | NETIF_F_LRO)
++
++static void bond_compute_features(struct bonding *bond)
++{
++ unsigned int flags, dst_release_flag = IFF_XMIT_DST_RELEASE;
++ netdev_features_t vlan_features = BOND_VLAN_FEATURES;
++ struct net_device *bond_dev = bond->dev;
++ struct list_head *iter;
++ struct slave *slave;
++ unsigned short max_hard_header_len = ETH_HLEN;
++ unsigned int gso_max_size = GSO_MAX_SIZE;
++ u16 gso_max_segs = GSO_MAX_SEGS;
++
++ if (!bond_has_slaves(bond))
++ goto done;
++
++ bond_for_each_slave(bond, slave, iter) {
++ vlan_features = netdev_increment_features(vlan_features,
++ slave->dev->vlan_features, BOND_VLAN_FEATURES);
++
++ dst_release_flag &= slave->dev->priv_flags;
++ if (slave->dev->hard_header_len > max_hard_header_len)
++ max_hard_header_len = slave->dev->hard_header_len;
++
++ gso_max_size = min(gso_max_size, slave->dev->gso_max_size);
++ gso_max_segs = min(gso_max_segs, slave->dev->gso_max_segs);
++ }
++
++done:
++ bond_dev->vlan_features = vlan_features;
++ bond_dev->hard_header_len = max_hard_header_len;
++ bond_dev->gso_max_segs = gso_max_segs;
++ netif_set_gso_max_size(bond_dev, gso_max_size);
++
++ flags = bond_dev->priv_flags & ~IFF_XMIT_DST_RELEASE;
++ bond_dev->priv_flags = flags | dst_release_flag;
++
++ netdev_change_features(bond_dev);
++}
++
++static void bond_setup_by_slave(struct net_device *bond_dev,
++ struct net_device *slave_dev)
++{
++ bond_dev->header_ops = slave_dev->header_ops;
++
++ bond_dev->type = slave_dev->type;
++ bond_dev->hard_header_len = slave_dev->hard_header_len;
++ bond_dev->addr_len = slave_dev->addr_len;
++
++ memcpy(bond_dev->broadcast, slave_dev->broadcast,
++ slave_dev->addr_len);
++}
++
++/* On bonding slaves other than the currently active slave, suppress
++ * duplicates except for alb non-mcast/bcast.
++ */
++static bool bond_should_deliver_exact_match(struct sk_buff *skb,
++ struct slave *slave,
++ struct bonding *bond)
++{
++ if (bond_is_slave_inactive(slave)) {
++ if (bond->params.mode == BOND_MODE_ALB &&
++ skb->pkt_type != PACKET_BROADCAST &&
++ skb->pkt_type != PACKET_MULTICAST)
++ return false;
++ return true;
++ }
++ return false;
++}
++
++static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
++{
++ struct sk_buff *skb = *pskb;
++ struct slave *slave;
++ struct bonding *bond;
++ int (*recv_probe)(const struct sk_buff *, struct bonding *,
++ struct slave *);
++ int ret = RX_HANDLER_ANOTHER;
++
++ skb = skb_share_check(skb, GFP_ATOMIC);
++ if (unlikely(!skb))
++ return RX_HANDLER_CONSUMED;
++
++ *pskb = skb;
++
++ slave = bond_slave_get_rcu(skb->dev);
++ bond = slave->bond;
++
++ if (bond->params.arp_interval)
++ slave->dev->last_rx = jiffies;
++
++ recv_probe = ACCESS_ONCE(bond->recv_probe);
++ if (recv_probe) {
++ ret = recv_probe(skb, bond, slave);
++ if (ret == RX_HANDLER_CONSUMED) {
++ consume_skb(skb);
++ return ret;
++ }
++ }
++
++ if (bond_should_deliver_exact_match(skb, slave, bond)) {
++ return RX_HANDLER_EXACT;
++ }
++
++ skb->dev = bond->dev;
++
++ if (bond->params.mode == BOND_MODE_ALB &&
++ bond->dev->priv_flags & IFF_BRIDGE_PORT &&
++ skb->pkt_type == PACKET_HOST) {
++
++ if (unlikely(skb_cow_head(skb,
++ skb->data - skb_mac_header(skb)))) {
++ kfree_skb(skb);
++ return RX_HANDLER_CONSUMED;
++ }
++ memcpy(eth_hdr(skb)->h_dest, bond->dev->dev_addr, ETH_ALEN);
++ }
++
++ return ret;
++}
++
++static int bond_master_upper_dev_link(struct net_device *bond_dev,
++ struct net_device *slave_dev,
++ struct slave *slave)
++{
++ int err;
++
++ err = netdev_master_upper_dev_link_private(slave_dev, bond_dev, slave);
++ if (err)
++ return err;
++ slave_dev->flags |= IFF_SLAVE;
++ rtmsg_ifinfo(RTM_NEWLINK, slave_dev, IFF_SLAVE, GFP_KERNEL);
++ return 0;
++}
++
++static void bond_upper_dev_unlink(struct net_device *bond_dev,
++ struct net_device *slave_dev)
++{
++ netdev_upper_dev_unlink(slave_dev, bond_dev);
++ slave_dev->flags &= ~IFF_SLAVE;
++ rtmsg_ifinfo(RTM_NEWLINK, slave_dev, IFF_SLAVE, GFP_KERNEL);
++}
++
++/* enslave device <slave> to bond device <master> */
++int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
++{
++ struct bonding *bond = netdev_priv(bond_dev);
++ const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
++ struct slave *new_slave = NULL, *prev_slave;
++ struct sockaddr addr;
++ int link_reporting;
++ int res = 0, i;
++
++ if (!bond->params.use_carrier &&
++ slave_dev->ethtool_ops->get_link == NULL &&
++ slave_ops->ndo_do_ioctl == NULL) {
++ pr_warning("%s: Warning: no link monitoring support for %s\n",
++ bond_dev->name, slave_dev->name);
++ }
++
++ /* already enslaved */
++ if (slave_dev->flags & IFF_SLAVE) {
++ pr_debug("Error, Device was already enslaved\n");
++ return -EBUSY;
++ }
++
++ if (bond_dev == slave_dev) {
++ pr_err("%s: cannot enslave bond to itself.\n", bond_dev->name);
++ return -EPERM;
++ }
++
++ /* vlan challenged mutual exclusion */
++ /* no need to lock since we're protected by rtnl_lock */
++ if (slave_dev->features & NETIF_F_VLAN_CHALLENGED) {
++ pr_debug("%s: NETIF_F_VLAN_CHALLENGED\n", slave_dev->name);
++ if (vlan_uses_dev(bond_dev)) {
++ pr_err("%s: Error: cannot enslave VLAN challenged slave %s on VLAN enabled bond %s\n",
++ bond_dev->name, slave_dev->name, bond_dev->name);
++ return -EPERM;
++ } else {
++ pr_warning("%s: Warning: enslaved VLAN challenged slave %s. Adding VLANs will be blocked as long as %s is part of bond %s\n",
++ bond_dev->name, slave_dev->name,
++ slave_dev->name, bond_dev->name);
++ }
++ } else {
++ pr_debug("%s: ! NETIF_F_VLAN_CHALLENGED\n", slave_dev->name);
++ }
++
++ /*
++ * Old ifenslave binaries are no longer supported. These can
++ * be identified with moderate accuracy by the state of the slave:
++ * the current ifenslave will set the interface down prior to
++ * enslaving it; the old ifenslave will not.
++ */
++ if ((slave_dev->flags & IFF_UP)) {
++ pr_err("%s is up. This may be due to an out of date ifenslave.\n",
++ slave_dev->name);
++ res = -EPERM;
++ goto err_undo_flags;
++ }
++
++ /* set bonding device ether type by slave - bonding netdevices are
++ * created with ether_setup, so when the slave type is not ARPHRD_ETHER
++ * there is a need to override some of the type dependent attribs/funcs.
++ *
++ * bond ether type mutual exclusion - don't allow slaves of dissimilar
++ * ether type (eg ARPHRD_ETHER and ARPHRD_INFINIBAND) share the same bond
++ */
++ if (!bond_has_slaves(bond)) {
++ if (bond_dev->type != slave_dev->type) {
++ pr_debug("%s: change device type from %d to %d\n",
++ bond_dev->name,
++ bond_dev->type, slave_dev->type);
++
++ res = call_netdevice_notifiers(NETDEV_PRE_TYPE_CHANGE,
++ bond_dev);
++ res = notifier_to_errno(res);
++ if (res) {
++ pr_err("%s: refused to change device type\n",
++ bond_dev->name);
++ res = -EBUSY;
++ goto err_undo_flags;
++ }
++
++ /* Flush unicast and multicast addresses */
++ dev_uc_flush(bond_dev);
++ dev_mc_flush(bond_dev);
++
++ if (slave_dev->type != ARPHRD_ETHER)
++ bond_setup_by_slave(bond_dev, slave_dev);
++ else {
++ ether_setup(bond_dev);
++ bond_dev->priv_flags &= ~IFF_TX_SKB_SHARING;
++ }
++
++ call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE,
++ bond_dev);
++ }
++ } else if (bond_dev->type != slave_dev->type) {
++ pr_err("%s ether type (%d) is different from other slaves (%d), can not enslave it.\n",
++ slave_dev->name,
++ slave_dev->type, bond_dev->type);
++ res = -EINVAL;
++ goto err_undo_flags;
++ }
++
++ if (slave_ops->ndo_set_mac_address == NULL) {
++ if (!bond_has_slaves(bond)) {
++ pr_warn("%s: Warning: The first slave device specified does not support setting the MAC address.\n",
++ bond_dev->name);
++ if (bond->params.mode == BOND_MODE_ACTIVEBACKUP) {
++ bond->params.fail_over_mac = BOND_FOM_ACTIVE;
++ pr_warn("%s: Setting fail_over_mac to active for active-backup mode.\n",
++ bond_dev->name);
++ }
++ } else if (bond->params.fail_over_mac != BOND_FOM_ACTIVE) {
++ pr_err("%s: Error: The slave device specified does not support setting the MAC address, but fail_over_mac is not set to active.\n",
++ bond_dev->name);
++ res = -EOPNOTSUPP;
++ goto err_undo_flags;
++ }
++ }
++
++ call_netdevice_notifiers(NETDEV_JOIN, slave_dev);
++
++ /* If this is the first slave, then we need to set the master's hardware
++ * address to be the same as the slave's. */
++ if (!bond_has_slaves(bond) &&
++ bond->dev->addr_assign_type == NET_ADDR_RANDOM)
++ bond_set_dev_addr(bond->dev, slave_dev);
++
++ new_slave = kzalloc(sizeof(struct slave), GFP_KERNEL);
++ if (!new_slave) {
++ res = -ENOMEM;
++ goto err_undo_flags;
++ }
++ /*
++ * Set the new_slave's queue_id to be zero. Queue ID mapping
++ * is set via sysfs or module option if desired.
++ */
++ new_slave->queue_id = 0;
++
++ /* Save slave's original mtu and then set it to match the bond */
++ new_slave->original_mtu = slave_dev->mtu;
++ res = dev_set_mtu(slave_dev, bond->dev->mtu);
++ if (res) {
++ pr_debug("Error %d calling dev_set_mtu\n", res);
++ goto err_free;
++ }
++
++ /*
++ * Save slave's original ("permanent") mac address for modes
++ * that need it, and for restoring it upon release, and then
++ * set it to the master's address
++ */
++ memcpy(new_slave->perm_hwaddr, slave_dev->dev_addr, ETH_ALEN);
++
++ if (!bond->params.fail_over_mac ||
++ bond->params.mode != BOND_MODE_ACTIVEBACKUP) {
++ /*
++ * Set slave to master's mac address. The application already
++ * set the master's mac address to that of the first slave
++ */
++ memcpy(addr.sa_data, bond_dev->dev_addr, bond_dev->addr_len);
++ addr.sa_family = slave_dev->type;
++ res = dev_set_mac_address(slave_dev, &addr);
++ if (res) {
++ pr_debug("Error %d calling set_mac_address\n", res);
++ goto err_restore_mtu;
++ }
++ }
++
++ /* open the slave since the application closed it */
++ res = dev_open(slave_dev);
++ if (res) {
++ pr_debug("Opening slave %s failed\n", slave_dev->name);
++ goto err_restore_mac;
++ }
++
++ new_slave->bond = bond;
++ new_slave->dev = slave_dev;
++ slave_dev->priv_flags |= IFF_BONDING;
++
++ if (bond_is_lb(bond)) {
++ /* bond_alb_init_slave() must be called before all other stages since
++ * it might fail and we do not want to have to undo everything
++ */
++ res = bond_alb_init_slave(bond, new_slave);
++ if (res)
++ goto err_close;
++ }
++
++ /* If the mode USES_PRIMARY, then the following is handled by
++ * bond_change_active_slave().
++ */
++ if (!USES_PRIMARY(bond->params.mode)) {
++ /* set promiscuity level to new slave */
++ if (bond_dev->flags & IFF_PROMISC) {
++ res = dev_set_promiscuity(slave_dev, 1);
++ if (res)
++ goto err_close;
++ }
++
++ /* set allmulti level to new slave */
++ if (bond_dev->flags & IFF_ALLMULTI) {
++ res = dev_set_allmulti(slave_dev, 1);
++ if (res)
++ goto err_close;
++ }
++
++ netif_addr_lock_bh(bond_dev);
++
++ dev_mc_sync_multiple(slave_dev, bond_dev);
++ dev_uc_sync_multiple(slave_dev, bond_dev);
++
++ netif_addr_unlock_bh(bond_dev);
++ }
++
++ if (bond->params.mode == BOND_MODE_8023AD) {
++ /* add lacpdu mc addr to mc list */
++ u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
++
++ dev_mc_add(slave_dev, lacpdu_multicast);
++ }
++
++ res = vlan_vids_add_by_dev(slave_dev, bond_dev);
++ if (res) {
++ pr_err("%s: Error: Couldn't add bond vlan ids to %s\n",
++ bond_dev->name, slave_dev->name);
++ goto err_close;
++ }
++
++ prev_slave = bond_last_slave(bond);
++
++ new_slave->delay = 0;
++ new_slave->link_failure_count = 0;
++
++ bond_update_speed_duplex(new_slave);
++
++ new_slave->last_arp_rx = jiffies -
++ (msecs_to_jiffies(bond->params.arp_interval) + 1);
++ for (i = 0; i < BOND_MAX_ARP_TARGETS; i++)
++ new_slave->target_last_arp_rx[i] = new_slave->last_arp_rx;
++
++ if (bond->params.miimon && !bond->params.use_carrier) {
++ link_reporting = bond_check_dev_link(bond, slave_dev, 1);
++
++ if ((link_reporting == -1) && !bond->params.arp_interval) {
++ /*
++ * miimon is set but a bonded network driver
++ * does not support ETHTOOL/MII and
++ * arp_interval is not set. Note: if
++ * use_carrier is enabled, we will never go
++ * here (because netif_carrier is always
++ * supported); thus, we don't need to change
++ * the messages for netif_carrier.
++ */
++ pr_warning("%s: Warning: MII and ETHTOOL support not available for interface %s, and arp_interval/arp_ip_target module parameters not specified, thus bonding will not detect link failures! see bonding.txt for details.\n",
++ bond_dev->name, slave_dev->name);
++ } else if (link_reporting == -1) {
++ /* unable get link status using mii/ethtool */
++ pr_warning("%s: Warning: can't get link status from interface %s; the network driver associated with this interface does not support MII or ETHTOOL link status reporting, thus miimon has no effect on this interface.\n",
++ bond_dev->name, slave_dev->name);
++ }
++ }
++
++ /* check for initial state */
++ if (bond->params.miimon) {
++ if (bond_check_dev_link(bond, slave_dev, 0) == BMSR_LSTATUS) {
++ if (bond->params.updelay) {
++ new_slave->link = BOND_LINK_BACK;
++ new_slave->delay = bond->params.updelay;
++ } else {
++ new_slave->link = BOND_LINK_UP;
++ }
++ } else {
++ new_slave->link = BOND_LINK_DOWN;
++ }
++ } else if (bond->params.arp_interval) {
++ new_slave->link = (netif_carrier_ok(slave_dev) ?
++ BOND_LINK_UP : BOND_LINK_DOWN);
++ } else {
++ new_slave->link = BOND_LINK_UP;
++ }
++
++ if (new_slave->link != BOND_LINK_DOWN)
++ new_slave->last_link_up = jiffies;
++ pr_debug("Initial state of slave_dev is BOND_LINK_%s\n",
++ new_slave->link == BOND_LINK_DOWN ? "DOWN" :
++ (new_slave->link == BOND_LINK_UP ? "UP" : "BACK"));
++
++ if (USES_PRIMARY(bond->params.mode) && bond->params.primary[0]) {
++ /* if there is a primary slave, remember it */
++ if (strcmp(bond->params.primary, new_slave->dev->name) == 0) {
++ bond->primary_slave = new_slave;
++ bond->force_primary = true;
++ }
++ }
++
++ switch (bond->params.mode) {
++ case BOND_MODE_ACTIVEBACKUP:
++ bond_set_slave_inactive_flags(new_slave,
++ BOND_SLAVE_NOTIFY_NOW);
++ break;
++ case BOND_MODE_8023AD:
++ /* in 802.3ad mode, the internal mechanism
++ * will activate the slaves in the selected
++ * aggregator
++ */
++ bond_set_slave_inactive_flags(new_slave, BOND_SLAVE_NOTIFY_NOW);
++ /* if this is the first slave */
++ if (!prev_slave) {
++ SLAVE_AD_INFO(new_slave).id = 1;
++ /* Initialize AD with the number of times that the AD timer is called in 1 second
++ * can be called only after the mac address of the bond is set
++ */
++ bond_3ad_initialize(bond, 1000/AD_TIMER_INTERVAL);
++ } else {
++ SLAVE_AD_INFO(new_slave).id =
++ SLAVE_AD_INFO(prev_slave).id + 1;
++ }
++
++ bond_3ad_bind_slave(new_slave);
++ break;
++ case BOND_MODE_TLB:
++ case BOND_MODE_ALB:
++ bond_set_active_slave(new_slave);
++ bond_set_slave_inactive_flags(new_slave, BOND_SLAVE_NOTIFY_NOW);
++ break;
++ default:
++ pr_debug("This slave is always active in trunk mode\n");
++
++ /* always active in trunk mode */
++ bond_set_active_slave(new_slave);
++
++ /* In trunking mode there is little meaning to curr_active_slave
++ * anyway (it holds no special properties of the bond device),
++ * so we can change it without calling change_active_interface()
++ */
++ if (!bond->curr_active_slave && new_slave->link == BOND_LINK_UP)
++ rcu_assign_pointer(bond->curr_active_slave, new_slave);
++
++ break;
++ } /* switch(bond_mode) */
++
++#ifdef CONFIG_NET_POLL_CONTROLLER
++ slave_dev->npinfo = bond->dev->npinfo;
++ if (slave_dev->npinfo) {
++ if (slave_enable_netpoll(new_slave)) {
++ pr_info("Error, %s: master_dev is using netpoll, "
++ "but new slave device does not support netpoll.\n",
++ bond_dev->name);
++ res = -EBUSY;
++ goto err_detach;
++ }
++ }
++#endif
++
++ res = netdev_rx_handler_register(slave_dev, bond_handle_frame,
++ new_slave);
++ if (res) {
++ pr_debug("Error %d calling netdev_rx_handler_register\n", res);
++ goto err_detach;
++ }
++
++ res = bond_master_upper_dev_link(bond_dev, slave_dev, new_slave);
++ if (res) {
++ pr_debug("Error %d calling bond_master_upper_dev_link\n", res);
++ goto err_unregister;
++ }
++
++ res = bond_sysfs_slave_add(new_slave);
++ if (res) {
++ pr_debug("Error %d calling bond_sysfs_slave_add\n", res);
++ goto err_upper_unlink;
++ }
++
++ bond->slave_cnt++;
++ bond_compute_features(bond);
++ bond_set_carrier(bond);
++
++ if (USES_PRIMARY(bond->params.mode)) {
++ block_netpoll_tx();
++ write_lock_bh(&bond->curr_slave_lock);
++ bond_select_active_slave(bond);
++ write_unlock_bh(&bond->curr_slave_lock);
++ unblock_netpoll_tx();
++ }
++
++ pr_info("%s: enslaving %s as a%s interface with a%s link.\n",
++ bond_dev->name, slave_dev->name,
++ bond_is_active_slave(new_slave) ? "n active" : " backup",
++ new_slave->link != BOND_LINK_DOWN ? "n up" : " down");
++
++ /* enslave is successful */
++ return 0;
++
++/* Undo stages on error */
++err_upper_unlink:
++ bond_upper_dev_unlink(bond_dev, slave_dev);
++
++err_unregister:
++ netdev_rx_handler_unregister(slave_dev);
++
++err_detach:
++ if (!USES_PRIMARY(bond->params.mode))
++ bond_hw_addr_flush(bond_dev, slave_dev);
++
++ vlan_vids_del_by_dev(slave_dev, bond_dev);
++ if (bond->primary_slave == new_slave)
++ bond->primary_slave = NULL;
++ if (bond->curr_active_slave == new_slave) {
++ block_netpoll_tx();
++ write_lock_bh(&bond->curr_slave_lock);
++ bond_change_active_slave(bond, NULL);
++ bond_select_active_slave(bond);
++ write_unlock_bh(&bond->curr_slave_lock);
++ unblock_netpoll_tx();
++ }
++ slave_disable_netpoll(new_slave);
++
++err_close:
++ slave_dev->priv_flags &= ~IFF_BONDING;
++ dev_close(slave_dev);
++
++err_restore_mac:
++ if (!bond->params.fail_over_mac ||
++ bond->params.mode != BOND_MODE_ACTIVEBACKUP) {
++ /* XXX TODO - fom follow mode needs to change master's
++ * MAC if this slave's MAC is in use by the bond, or at
++ * least print a warning.
++ */
++ memcpy(addr.sa_data, new_slave->perm_hwaddr, ETH_ALEN);
++ addr.sa_family = slave_dev->type;
++ dev_set_mac_address(slave_dev, &addr);
++ }
++
++err_restore_mtu:
++ dev_set_mtu(slave_dev, new_slave->original_mtu);
++
++err_free:
++ kfree(new_slave);
++
++err_undo_flags:
++ /* Enslave of first slave has failed and we need to fix master's mac */
++ if (!bond_has_slaves(bond) &&
++ ether_addr_equal_64bits(bond_dev->dev_addr, slave_dev->dev_addr))
++ eth_hw_addr_random(bond_dev);
++
++ return res;
++}
++
++/*
++ * Try to release the slave device <slave> from the bond device <master>
++ * It is legal to access curr_active_slave without a lock because all the function
++ * is write-locked. If "all" is true it means that the function is being called
++ * while destroying a bond interface and all slaves are being released.
++ *
++ * The rules for slave state should be:
++ * for Active/Backup:
++ * Active stays on all backups go down
++ * for Bonded connections:
++ * The first up interface should be left on and all others downed.
++ */
++static int __bond_release_one(struct net_device *bond_dev,
++ struct net_device *slave_dev,
++ bool all)
++{
++ struct bonding *bond = netdev_priv(bond_dev);
++ struct slave *slave, *oldcurrent;
++ struct sockaddr addr;
++ int old_flags = bond_dev->flags;
++ netdev_features_t old_features = bond_dev->features;
++
++ /* slave is not a slave or master is not master of this slave */
++ if (!(slave_dev->flags & IFF_SLAVE) ||
++ !netdev_has_upper_dev(slave_dev, bond_dev)) {
++ pr_err("%s: Error: cannot release %s.\n",
++ bond_dev->name, slave_dev->name);
++ return -EINVAL;
++ }
++
++ block_netpoll_tx();
++
++ slave = bond_get_slave_by_dev(bond, slave_dev);
++ if (!slave) {
++ /* not a slave of this bond */
++ pr_info("%s: %s not enslaved\n",
++ bond_dev->name, slave_dev->name);
++ unblock_netpoll_tx();
++ return -EINVAL;
++ }
++
++ bond_sysfs_slave_del(slave);
++
++ bond_upper_dev_unlink(bond_dev, slave_dev);
++ /* unregister rx_handler early so bond_handle_frame wouldn't be called
++ * for this slave anymore.
++ */
++ netdev_rx_handler_unregister(slave_dev);
++ write_lock_bh(&bond->lock);
++
++ /* Inform AD package of unbinding of slave. */
++ if (bond->params.mode == BOND_MODE_8023AD)
++ bond_3ad_unbind_slave(slave);
++
++ write_unlock_bh(&bond->lock);
++
++ pr_info("%s: releasing %s interface %s\n",
++ bond_dev->name,
++ bond_is_active_slave(slave) ? "active" : "backup",
++ slave_dev->name);
++
++ oldcurrent = bond->curr_active_slave;
++
++ bond->current_arp_slave = NULL;
++
++ if (!all && (!bond->params.fail_over_mac ||
++ bond->params.mode != BOND_MODE_ACTIVEBACKUP)) {
++ if (ether_addr_equal_64bits(bond_dev->dev_addr, slave->perm_hwaddr) &&
++ bond_has_slaves(bond))
++ pr_warn("%s: Warning: the permanent HWaddr of %s - %pM - is still in use by %s. Set the HWaddr of %s to a different address to avoid conflicts.\n",
++ bond_dev->name, slave_dev->name,
++ slave->perm_hwaddr,
++ bond_dev->name, slave_dev->name);
++ }
++
++ if (bond->primary_slave == slave)
++ bond->primary_slave = NULL;
++
++ if (oldcurrent == slave) {
++ write_lock_bh(&bond->curr_slave_lock);
++ bond_change_active_slave(bond, NULL);
++ write_unlock_bh(&bond->curr_slave_lock);
++ }
++
++ if (bond_is_lb(bond)) {
++ /* Must be called only after the slave has been
++ * detached from the list and the curr_active_slave
++ * has been cleared (if our_slave == old_current),
++ * but before a new active slave is selected.
++ */
++ bond_alb_deinit_slave(bond, slave);
++ }
++
++ if (all) {
++ RCU_INIT_POINTER(bond->curr_active_slave, NULL);
++ } else if (oldcurrent == slave) {
++ /*
++ * Note that we hold RTNL over this sequence, so there
++ * is no concern that another slave add/remove event
++ * will interfere.
++ */
++ write_lock_bh(&bond->curr_slave_lock);
++
++ bond_select_active_slave(bond);
++
++ write_unlock_bh(&bond->curr_slave_lock);
++ }
++
++ if (!bond_has_slaves(bond)) {
++ bond_set_carrier(bond);
++ eth_hw_addr_random(bond_dev);
++
++ if (vlan_uses_dev(bond_dev)) {
++ pr_warning("%s: Warning: clearing HW address of %s while it still has VLANs.\n",
++ bond_dev->name, bond_dev->name);
++ pr_warning("%s: When re-adding slaves, make sure the bond's HW address matches its VLANs'.\n",
++ bond_dev->name);
++ }
++ }
++
++ unblock_netpoll_tx();
++ synchronize_rcu();
++ bond->slave_cnt--;
++
++ if (!bond_has_slaves(bond)) {
++ call_netdevice_notifiers(NETDEV_CHANGEADDR, bond->dev);
++ call_netdevice_notifiers(NETDEV_RELEASE, bond->dev);
++ }
++
++ bond_compute_features(bond);
++ if (!(bond_dev->features & NETIF_F_VLAN_CHALLENGED) &&
++ (old_features & NETIF_F_VLAN_CHALLENGED))
++ pr_info("%s: last VLAN challenged slave %s left bond %s. VLAN blocking is removed\n",
++ bond_dev->name, slave_dev->name, bond_dev->name);
++
++ /* must do this from outside any spinlocks */
++ vlan_vids_del_by_dev(slave_dev, bond_dev);
++
++ /* If the mode USES_PRIMARY, then this cases was handled above by
++ * bond_change_active_slave(..., NULL)
++ */
++ if (!USES_PRIMARY(bond->params.mode)) {
++ /* unset promiscuity level from slave
++ * NOTE: The NETDEV_CHANGEADDR call above may change the value
++ * of the IFF_PROMISC flag in the bond_dev, but we need the
++ * value of that flag before that change, as that was the value
++ * when this slave was attached, so we cache at the start of the
++ * function and use it here. Same goes for ALLMULTI below
++ */
++ if (old_flags & IFF_PROMISC)
++ dev_set_promiscuity(slave_dev, -1);
++
++ /* unset allmulti level from slave */
++ if (old_flags & IFF_ALLMULTI)
++ dev_set_allmulti(slave_dev, -1);
++
++ bond_hw_addr_flush(bond_dev, slave_dev);
++ }
++
++ slave_disable_netpoll(slave);
++
++ /* close slave before restoring its mac address */
++ dev_close(slave_dev);
++
++ if (bond->params.fail_over_mac != BOND_FOM_ACTIVE ||
++ bond->params.mode != BOND_MODE_ACTIVEBACKUP) {
++ /* restore original ("permanent") mac address */
++ memcpy(addr.sa_data, slave->perm_hwaddr, ETH_ALEN);
++ addr.sa_family = slave_dev->type;
++ dev_set_mac_address(slave_dev, &addr);
++ }
++
++ dev_set_mtu(slave_dev, slave->original_mtu);
++
++ slave_dev->priv_flags &= ~IFF_BONDING;
++
++ kfree(slave);
++
++ return 0; /* deletion OK */
++}
++
++/* A wrapper used because of ndo_del_link */
++int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
++{
++ return __bond_release_one(bond_dev, slave_dev, false);
++}
++
++/*
++* First release a slave and then destroy the bond if no more slaves are left.
++* Must be under rtnl_lock when this function is called.
++*/
++static int bond_release_and_destroy(struct net_device *bond_dev,
++ struct net_device *slave_dev)
++{
++ struct bonding *bond = netdev_priv(bond_dev);
++ int ret;
++
++ ret = bond_release(bond_dev, slave_dev);
++ if (ret == 0 && !bond_has_slaves(bond)) {
++ bond_dev->priv_flags |= IFF_DISABLE_NETPOLL;
++ pr_info("%s: destroying bond %s.\n",
++ bond_dev->name, bond_dev->name);
++ unregister_netdevice(bond_dev);
++ }
++ return ret;
++}
++
++static int bond_info_query(struct net_device *bond_dev, struct ifbond *info)
++{
++ struct bonding *bond = netdev_priv(bond_dev);
++
++ info->bond_mode = bond->params.mode;
++ info->miimon = bond->params.miimon;
++
++ read_lock(&bond->lock);
++ info->num_slaves = bond->slave_cnt;
++ read_unlock(&bond->lock);
++
++ return 0;
++}
++
++static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *info)
++{
++ struct bonding *bond = netdev_priv(bond_dev);
++ struct list_head *iter;
++ int i = 0, res = -ENODEV;
++ struct slave *slave;
++
++ read_lock(&bond->lock);
++ bond_for_each_slave(bond, slave, iter) {
++ if (i++ == (int)info->slave_id) {
++ res = 0;
++ strcpy(info->slave_name, slave->dev->name);
++ info->link = slave->link;
++ info->state = bond_slave_state(slave);
++ info->link_failure_count = slave->link_failure_count;
++ break;
++ }
++ }
++ read_unlock(&bond->lock);
++
++ return res;
++}
++
++/*-------------------------------- Monitoring -------------------------------*/
++
++
++static int bond_miimon_inspect(struct bonding *bond)
++{
++ int link_state, commit = 0;
++ struct list_head *iter;
++ struct slave *slave;
++ bool ignore_updelay;
++
++ ignore_updelay = !bond->curr_active_slave ? true : false;
++
++ bond_for_each_slave_rcu(bond, slave, iter) {
++ slave->new_link = BOND_LINK_NOCHANGE;
++
++ link_state = bond_check_dev_link(bond, slave->dev, 0);
++
++ switch (slave->link) {
++ case BOND_LINK_UP:
++ if (link_state)
++ continue;
++
++ slave->link = BOND_LINK_FAIL;
++ slave->delay = bond->params.downdelay;
++ if (slave->delay) {
++ pr_info("%s: link status down for %sinterface %s, disabling it in %d ms.\n",
++ bond->dev->name,
++ (bond->params.mode ==
++ BOND_MODE_ACTIVEBACKUP) ?
++ (bond_is_active_slave(slave) ?
++ "active " : "backup ") : "",
++ slave->dev->name,
++ bond->params.downdelay * bond->params.miimon);
++ }
++ /*FALLTHRU*/
++ case BOND_LINK_FAIL:
++ if (link_state) {
++ /*
++ * recovered before downdelay expired
++ */
++ slave->link = BOND_LINK_UP;
++ slave->last_link_up = jiffies;
++ pr_info("%s: link status up again after %d ms for interface %s.\n",
++ bond->dev->name,
++ (bond->params.downdelay - slave->delay) *
++ bond->params.miimon,
++ slave->dev->name);
++ continue;
++ }
++
++ if (slave->delay <= 0) {
++ slave->new_link = BOND_LINK_DOWN;
++ commit++;
++ continue;
++ }
++
++ slave->delay--;
++ break;
++
++ case BOND_LINK_DOWN:
++ if (!link_state)
++ continue;
++
++ slave->link = BOND_LINK_BACK;
++ slave->delay = bond->params.updelay;
++
++ if (slave->delay) {
++ pr_info("%s: link status up for interface %s, enabling it in %d ms.\n",
++ bond->dev->name, slave->dev->name,
++ ignore_updelay ? 0 :
++ bond->params.updelay *
++ bond->params.miimon);
++ }
++ /*FALLTHRU*/
++ case BOND_LINK_BACK:
++ if (!link_state) {
++ slave->link = BOND_LINK_DOWN;
++ pr_info("%s: link status down again after %d ms for interface %s.\n",
++ bond->dev->name,
++ (bond->params.updelay - slave->delay) *
++ bond->params.miimon,
++ slave->dev->name);
++
++ continue;
++ }
++
++ if (ignore_updelay)
++ slave->delay = 0;
++
++ if (slave->delay <= 0) {
++ slave->new_link = BOND_LINK_UP;
++ commit++;
++ ignore_updelay = false;
++ continue;
++ }
++
++ slave->delay--;
++ break;
++ }
++ }
++
++ return commit;
++}
++
++static void bond_miimon_commit(struct bonding *bond)
++{
++ struct list_head *iter;
++ struct slave *slave;
++
++ bond_for_each_slave(bond, slave, iter) {
++ switch (slave->new_link) {
++ case BOND_LINK_NOCHANGE:
++ continue;
++
++ case BOND_LINK_UP:
++ slave->link = BOND_LINK_UP;
++ slave->last_link_up = jiffies;
++
++ if (bond->params.mode == BOND_MODE_8023AD) {
++ /* prevent it from being the active one */
++ bond_set_backup_slave(slave);
++ } else if (bond->params.mode != BOND_MODE_ACTIVEBACKUP) {
++ /* make it immediately active */
++ bond_set_active_slave(slave);
++ } else if (slave != bond->primary_slave) {
++ /* prevent it from being the active one */
++ bond_set_backup_slave(slave);
++ }
++
++ pr_info("%s: link status definitely up for interface %s, %u Mbps %s duplex.\n",
++ bond->dev->name, slave->dev->name,
++ slave->speed == SPEED_UNKNOWN ? 0 : slave->speed,
++ slave->duplex ? "full" : "half");
++
++ /* notify ad that the link status has changed */
++ if (bond->params.mode == BOND_MODE_8023AD)
++ bond_3ad_handle_link_change(slave, BOND_LINK_UP);
++
++ if (bond_is_lb(bond))
++ bond_alb_handle_link_change(bond, slave,
++ BOND_LINK_UP);
++
++ if (!bond->curr_active_slave ||
++ (slave == bond->primary_slave))
++ goto do_failover;
++
++ continue;
++
++ case BOND_LINK_DOWN:
++ if (slave->link_failure_count < UINT_MAX)
++ slave->link_failure_count++;
++
++ slave->link = BOND_LINK_DOWN;
++
++ if (bond->params.mode == BOND_MODE_ACTIVEBACKUP ||
++ bond->params.mode == BOND_MODE_8023AD)
++ bond_set_slave_inactive_flags(slave,
++ BOND_SLAVE_NOTIFY_NOW);
++
++ pr_info("%s: link status definitely down for interface %s, disabling it\n",
++ bond->dev->name, slave->dev->name);
++
++ if (bond->params.mode == BOND_MODE_8023AD)
++ bond_3ad_handle_link_change(slave,
++ BOND_LINK_DOWN);
++
++ if (bond_is_lb(bond))
++ bond_alb_handle_link_change(bond, slave,
++ BOND_LINK_DOWN);
++
++ if (slave == bond->curr_active_slave)
++ goto do_failover;
++
++ continue;
++
++ default:
++ pr_err("%s: invalid new link %d on slave %s\n",
++ bond->dev->name, slave->new_link,
++ slave->dev->name);
++ slave->new_link = BOND_LINK_NOCHANGE;
++
++ continue;
++ }
++
++do_failover:
++ ASSERT_RTNL();
++ block_netpoll_tx();
++ write_lock_bh(&bond->curr_slave_lock);
++ bond_select_active_slave(bond);
++ write_unlock_bh(&bond->curr_slave_lock);
++ unblock_netpoll_tx();
++ }
++
++ bond_set_carrier(bond);
++}
++
++/*
++ * bond_mii_monitor
++ *
++ * Really a wrapper that splits the mii monitor into two phases: an
++ * inspection, then (if inspection indicates something needs to be done)
++ * an acquisition of appropriate locks followed by a commit phase to
++ * implement whatever link state changes are indicated.
++ */
++static void bond_mii_monitor(struct work_struct *work)
++{
++ struct bonding *bond = container_of(work, struct bonding,
++ mii_work.work);
++ bool should_notify_peers = false;
++ unsigned long delay;
++
++ delay = msecs_to_jiffies(bond->params.miimon);
++
++ if (!bond_has_slaves(bond))
++ goto re_arm;
++
++ rcu_read_lock();
++
++ should_notify_peers = bond_should_notify_peers(bond);
++
++ if (bond_miimon_inspect(bond)) {
++ rcu_read_unlock();
++
++ /* Race avoidance with bond_close cancel of workqueue */
++ if (!rtnl_trylock()) {
++ delay = 1;
++ should_notify_peers = false;
++ goto re_arm;
++ }
++
++ bond_miimon_commit(bond);
++
++ rtnl_unlock(); /* might sleep, hold no other locks */
++ } else
++ rcu_read_unlock();
++
++re_arm:
++ if (bond->params.miimon)
++ queue_delayed_work(bond->wq, &bond->mii_work, delay);
++
++ if (should_notify_peers) {
++ if (!rtnl_trylock())
++ return;
++ call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, bond->dev);
++ rtnl_unlock();
++ }
++}
++
++static bool bond_has_this_ip(struct bonding *bond, __be32 ip)
++{
++ struct net_device *upper;
++ struct list_head *iter;
++ bool ret = false;
++
++ if (ip == bond_confirm_addr(bond->dev, 0, ip))
++ return true;
++
++ rcu_read_lock();
++ netdev_for_each_all_upper_dev_rcu(bond->dev, upper, iter) {
++ if (ip == bond_confirm_addr(upper, 0, ip)) {
++ ret = true;
++ break;
++ }
++ }
++ rcu_read_unlock();
++
++ return ret;
++}
++
++/*
++ * We go to the (large) trouble of VLAN tagging ARP frames because
++ * switches in VLAN mode (especially if ports are configured as
++ * "native" to a VLAN) might not pass non-tagged frames.
++ */
++static void bond_arp_send(struct net_device *slave_dev, int arp_op, __be32 dest_ip, __be32 src_ip, unsigned short vlan_id)
++{
++ struct sk_buff *skb;
++
++ pr_debug("arp %d on slave %s: dst %pI4 src %pI4 vid %d\n", arp_op,
++ slave_dev->name, &dest_ip, &src_ip, vlan_id);
++
++ skb = arp_create(arp_op, ETH_P_ARP, dest_ip, slave_dev, src_ip,
++ NULL, slave_dev->dev_addr, NULL);
++
++ if (!skb) {
++ pr_err("ARP packet allocation failed\n");
++ return;
++ }
++ if (vlan_id) {
++ skb = vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
++ if (!skb) {
++ pr_err("failed to insert VLAN tag\n");
++ return;
++ }
++ }
++ arp_xmit(skb);
++}
++
++
++static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
++{
++ struct net_device *upper, *vlan_upper;
++ struct list_head *iter, *vlan_iter;
++ struct rtable *rt;
++ __be32 *targets = bond->params.arp_targets, addr;
++ int i, vlan_id;
++
++ for (i = 0; i < BOND_MAX_ARP_TARGETS && targets[i]; i++) {
++ pr_debug("basa: target %pI4\n", &targets[i]);
++
++ /* Find out through which dev should the packet go */
++ rt = ip_route_output(dev_net(bond->dev), targets[i], 0,
++ RTO_ONLINK, 0);
++ if (IS_ERR(rt)) {
++ pr_debug("%s: no route to arp_ip_target %pI4\n",
++ bond->dev->name, &targets[i]);
++ continue;
++ }
++
++ vlan_id = 0;
++
++ /* bond device itself */
++ if (rt->dst.dev == bond->dev)
++ goto found;
++
++ rcu_read_lock();
++ /* first we search only for vlan devices. for every vlan
++ * found we verify its upper dev list, searching for the
++ * rt->dst.dev. If found we save the tag of the vlan and
++ * proceed to send the packet.
++ *
++ * TODO: QinQ?
++ */
++ netdev_for_each_all_upper_dev_rcu(bond->dev, vlan_upper,
++ vlan_iter) {
++ if (!is_vlan_dev(vlan_upper))
++ continue;
++ netdev_for_each_all_upper_dev_rcu(vlan_upper, upper,
++ iter) {
++ if (upper == rt->dst.dev) {
++ vlan_id = vlan_dev_vlan_id(vlan_upper);
++ rcu_read_unlock();
++ goto found;
++ }
++ }
++ }
++
++ /* if the device we're looking for is not on top of any of
++ * our upper vlans, then just search for any dev that
++ * matches, and in case it's a vlan - save the id
++ */
++ netdev_for_each_all_upper_dev_rcu(bond->dev, upper, iter) {
++ if (upper == rt->dst.dev) {
++ /* if it's a vlan - get its VID */
++ if (is_vlan_dev(upper))
++ vlan_id = vlan_dev_vlan_id(upper);
++
++ rcu_read_unlock();
++ goto found;
++ }
++ }
++ rcu_read_unlock();
++
++ /* Not our device - skip */
++ pr_debug("%s: no path to arp_ip_target %pI4 via rt.dev %s\n",
++ bond->dev->name, &targets[i],
++ rt->dst.dev ? rt->dst.dev->name : "NULL");
++
++ ip_rt_put(rt);
++ continue;
++
++found:
++ addr = bond_confirm_addr(rt->dst.dev, targets[i], 0);
++ ip_rt_put(rt);
++ bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i],
++ addr, vlan_id);
++ }
++}
++
++static void bond_validate_arp(struct bonding *bond, struct slave *slave, __be32 sip, __be32 tip)
++{
++ int i;
++
++ if (!sip || !bond_has_this_ip(bond, tip)) {
++ pr_debug("bva: sip %pI4 tip %pI4 not found\n", &sip, &tip);
++ return;
++ }
++
++ i = bond_get_targets_ip(bond->params.arp_targets, sip);
++ if (i == -1) {
++ pr_debug("bva: sip %pI4 not found in targets\n", &sip);
++ return;
++ }
++ slave->last_arp_rx = jiffies;
++ slave->target_last_arp_rx[i] = jiffies;
++}
++
++int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
++ struct slave *slave)
++{
++ struct arphdr *arp = (struct arphdr *)skb->data;
++ struct slave *curr_active_slave;
++ unsigned char *arp_ptr;
++ __be32 sip, tip;
++ int alen;
++
++ if (skb->protocol != __cpu_to_be16(ETH_P_ARP))
++ return RX_HANDLER_ANOTHER;
++
++ read_lock(&bond->lock);
++
++ if (!slave_do_arp_validate(bond, slave))
++ goto out_unlock;
++
++ alen = arp_hdr_len(bond->dev);
++
++ pr_debug("bond_arp_rcv: bond %s skb->dev %s\n",
++ bond->dev->name, skb->dev->name);
++
++ if (alen > skb_headlen(skb)) {
++ arp = kmalloc(alen, GFP_ATOMIC);
++ if (!arp)
++ goto out_unlock;
++ if (skb_copy_bits(skb, 0, arp, alen) < 0)
++ goto out_unlock;
++ }
++
++ if (arp->ar_hln != bond->dev->addr_len ||
++ skb->pkt_type == PACKET_OTHERHOST ||
++ skb->pkt_type == PACKET_LOOPBACK ||
++ arp->ar_hrd != htons(ARPHRD_ETHER) ||
++ arp->ar_pro != htons(ETH_P_IP) ||
++ arp->ar_pln != 4)
++ goto out_unlock;
++
++ arp_ptr = (unsigned char *)(arp + 1);
++ arp_ptr += bond->dev->addr_len;
++ memcpy(&sip, arp_ptr, 4);
++ arp_ptr += 4 + bond->dev->addr_len;
++ memcpy(&tip, arp_ptr, 4);
++
++ pr_debug("bond_arp_rcv: %s %s/%d av %d sv %d sip %pI4 tip %pI4\n",
++ bond->dev->name, slave->dev->name, bond_slave_state(slave),
++ bond->params.arp_validate, slave_do_arp_validate(bond, slave),
++ &sip, &tip);
++
++ curr_active_slave = rcu_dereference(bond->curr_active_slave);
++
++ /*
++ * Backup slaves won't see the ARP reply, but do come through
++ * here for each ARP probe (so we swap the sip/tip to validate
++ * the probe). In a "redundant switch, common router" type of
++ * configuration, the ARP probe will (hopefully) travel from
++ * the active, through one switch, the router, then the other
++ * switch before reaching the backup.
++ *
++ * We 'trust' the arp requests if there is an active slave and
++ * it received valid arp reply(s) after it became active. This
++ * is done to avoid endless looping when we can't reach the
++ * arp_ip_target and fool ourselves with our own arp requests.
++ */
++
++ if (bond_is_active_slave(slave))
++ bond_validate_arp(bond, slave, sip, tip);
++ else if (curr_active_slave &&
++ time_after(slave_last_rx(bond, curr_active_slave),
++ curr_active_slave->last_link_up))
++ bond_validate_arp(bond, slave, tip, sip);
++
++out_unlock:
++ read_unlock(&bond->lock);
++ if (arp != (struct arphdr *)skb->data)
++ kfree(arp);
++ return RX_HANDLER_ANOTHER;
++}
++
++/* function to verify if we're in the arp_interval timeslice, returns true if
++ * (last_act - arp_interval) <= jiffies <= (last_act + mod * arp_interval +
++ * arp_interval/2) . the arp_interval/2 is needed for really fast networks.
++ */
++static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act,
++ int mod)
++{
++ int delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval);
++
++ return time_in_range(jiffies,
++ last_act - delta_in_ticks,
++ last_act + mod * delta_in_ticks + delta_in_ticks/2);
++}
++
++/*
++ * this function is called regularly to monitor each slave's link
++ * ensuring that traffic is being sent and received when arp monitoring
++ * is used in load-balancing mode. if the adapter has been dormant, then an
++ * arp is transmitted to generate traffic. see activebackup_arp_monitor for
++ * arp monitoring in active backup mode.
++ */
++static void bond_loadbalance_arp_mon(struct work_struct *work)
++{
++ struct bonding *bond = container_of(work, struct bonding,
++ arp_work.work);
++ struct slave *slave, *oldcurrent;
++ struct list_head *iter;
++ int do_failover = 0, slave_state_changed = 0;
++
++ if (!bond_has_slaves(bond))
++ goto re_arm;
++
++ rcu_read_lock();
++
++ oldcurrent = ACCESS_ONCE(bond->curr_active_slave);
++ /* see if any of the previous devices are up now (i.e. they have
++ * xmt and rcv traffic). the curr_active_slave does not come into
++ * the picture unless it is null. also, slave->last_link_up is not
++ * needed here because we send an arp on each slave and give a slave
++ * as long as it needs to get the tx/rx within the delta.
++ * TODO: what about up/down delay in arp mode? it wasn't here before
++ * so it can wait
++ */
++ bond_for_each_slave_rcu(bond, slave, iter) {
++ unsigned long trans_start = dev_trans_start(slave->dev);
++
++ if (slave->link != BOND_LINK_UP) {
++ if (bond_time_in_interval(bond, trans_start, 1) &&
++ bond_time_in_interval(bond, slave->dev->last_rx, 1)) {
++
++ slave->link = BOND_LINK_UP;
++ slave_state_changed = 1;
++
++ /* primary_slave has no meaning in round-robin
++ * mode. the window of a slave being up and
++ * curr_active_slave being null after enslaving
++ * is closed.
++ */
++ if (!oldcurrent) {
++ pr_info("%s: link status definitely up for interface %s, ",
++ bond->dev->name,
++ slave->dev->name);
++ do_failover = 1;
++ } else {
++ pr_info("%s: interface %s is now up\n",
++ bond->dev->name,
++ slave->dev->name);
++ }
++ }
++ } else {
++ /* slave->link == BOND_LINK_UP */
++
++ /* not all switches will respond to an arp request
++ * when the source ip is 0, so don't take the link down
++ * if we don't know our ip yet
++ */
++ if (!bond_time_in_interval(bond, trans_start, 2) ||
++ !bond_time_in_interval(bond, slave->dev->last_rx, 2)) {
++
++ slave->link = BOND_LINK_DOWN;
++ slave_state_changed = 1;
++
++ if (slave->link_failure_count < UINT_MAX)
++ slave->link_failure_count++;
++
++ pr_info("%s: interface %s is now down.\n",
++ bond->dev->name,
++ slave->dev->name);
++
++ if (slave == oldcurrent)
++ do_failover = 1;
++ }
++ }
++
++ /* note: if switch is in round-robin mode, all links
++ * must tx arp to ensure all links rx an arp - otherwise
++ * links may oscillate or not come up at all; if switch is
++ * in something like xor mode, there is nothing we can
++ * do - all replies will be rx'ed on same link causing slaves
++ * to be unstable during low/no traffic periods
++ */
++ if (IS_UP(slave->dev))
++ bond_arp_send_all(bond, slave);
++ }
++
++ rcu_read_unlock();
++
++ if (do_failover || slave_state_changed) {
++ if (!rtnl_trylock())
++ goto re_arm;
++
++ if (slave_state_changed) {
++ bond_slave_state_change(bond);
++ } else if (do_failover) {
++ /* the bond_select_active_slave must hold RTNL
++ * and curr_slave_lock for write.
++ */
++ block_netpoll_tx();
++ write_lock_bh(&bond->curr_slave_lock);
++
++ bond_select_active_slave(bond);
++
++ write_unlock_bh(&bond->curr_slave_lock);
++ unblock_netpoll_tx();
++ }
++ rtnl_unlock();
++ }
++
++re_arm:
++ if (bond->params.arp_interval)
++ queue_delayed_work(bond->wq, &bond->arp_work,
++ msecs_to_jiffies(bond->params.arp_interval));
++}
++
++/*
++ * Called to inspect slaves for active-backup mode ARP monitor link state
++ * changes. Sets new_link in slaves to specify what action should take
++ * place for the slave. Returns 0 if no changes are found, >0 if changes
++ * to link states must be committed.
++ *
++ * Called with rcu_read_lock hold.
++ */
++static int bond_ab_arp_inspect(struct bonding *bond)
++{
++ unsigned long trans_start, last_rx;
++ struct list_head *iter;
++ struct slave *slave;
++ int commit = 0;
++
++ bond_for_each_slave_rcu(bond, slave, iter) {
++ slave->new_link = BOND_LINK_NOCHANGE;
++ last_rx = slave_last_rx(bond, slave);
++
++ if (slave->link != BOND_LINK_UP) {
++ if (bond_time_in_interval(bond, last_rx, 1)) {
++ slave->new_link = BOND_LINK_UP;
++ commit++;
++ }
++ continue;
++ }
++
++ /*
++ * Give slaves 2*delta after being enslaved or made
++ * active. This avoids bouncing, as the last receive
++ * times need a full ARP monitor cycle to be updated.
++ */
++ if (bond_time_in_interval(bond, slave->last_link_up, 2))
++ continue;
++
++ /*
++ * Backup slave is down if:
++ * - No current_arp_slave AND
++ * - more than 3*delta since last receive AND
++ * - the bond has an IP address
++ *
++ * Note: a non-null current_arp_slave indicates
++ * the curr_active_slave went down and we are
++ * searching for a new one; under this condition
++ * we only take the curr_active_slave down - this
++ * gives each slave a chance to tx/rx traffic
++ * before being taken out
++ */
++ if (!bond_is_active_slave(slave) &&
++ !bond->current_arp_slave &&
++ !bond_time_in_interval(bond, last_rx, 3)) {
++ slave->new_link = BOND_LINK_DOWN;
++ commit++;
++ }
++
++ /*
++ * Active slave is down if:
++ * - more than 2*delta since transmitting OR
++ * - (more than 2*delta since receive AND
++ * the bond has an IP address)
++ */
++ trans_start = dev_trans_start(slave->dev);
++ if (bond_is_active_slave(slave) &&
++ (!bond_time_in_interval(bond, trans_start, 2) ||
++ !bond_time_in_interval(bond, last_rx, 2))) {
++ slave->new_link = BOND_LINK_DOWN;
++ commit++;
++ }
++ }
++
++ return commit;
++}
++
++/*
++ * Called to commit link state changes noted by inspection step of
++ * active-backup mode ARP monitor.
++ *
++ * Called with RTNL hold.
++ */
++static void bond_ab_arp_commit(struct bonding *bond)
++{
++ unsigned long trans_start;
++ struct list_head *iter;
++ struct slave *slave;
++
++ bond_for_each_slave(bond, slave, iter) {
++ switch (slave->new_link) {
++ case BOND_LINK_NOCHANGE:
++ continue;
++
++ case BOND_LINK_UP:
++ trans_start = dev_trans_start(slave->dev);
++ if (bond->curr_active_slave != slave ||
++ (!bond->curr_active_slave &&
++ bond_time_in_interval(bond, trans_start, 1))) {
++ slave->link = BOND_LINK_UP;
++ if (bond->current_arp_slave) {
++ bond_set_slave_inactive_flags(
++ bond->current_arp_slave,
++ BOND_SLAVE_NOTIFY_NOW);
++ bond->current_arp_slave = NULL;
++ }
++
++ pr_info("%s: link status definitely up for interface %s.\n",
++ bond->dev->name, slave->dev->name);
++
++ if (!bond->curr_active_slave ||
++ (slave == bond->primary_slave))
++ goto do_failover;
++
++ }
++
++ continue;
++
++ case BOND_LINK_DOWN:
++ if (slave->link_failure_count < UINT_MAX)
++ slave->link_failure_count++;
++
++ slave->link = BOND_LINK_DOWN;
++ bond_set_slave_inactive_flags(slave,
++ BOND_SLAVE_NOTIFY_NOW);
++
++ pr_info("%s: link status definitely down for interface %s, disabling it\n",
++ bond->dev->name, slave->dev->name);
++
++ if (slave == bond->curr_active_slave) {
++ bond->current_arp_slave = NULL;
++ goto do_failover;
++ }
++
++ continue;
++
++ default:
++ pr_err("%s: impossible: new_link %d on slave %s\n",
++ bond->dev->name, slave->new_link,
++ slave->dev->name);
++ continue;
++ }
++
++do_failover:
++ ASSERT_RTNL();
++ block_netpoll_tx();
++ write_lock_bh(&bond->curr_slave_lock);
++ bond_select_active_slave(bond);
++ write_unlock_bh(&bond->curr_slave_lock);
++ unblock_netpoll_tx();
++ }
++
++ bond_set_carrier(bond);
++}
++
++/*
++ * Send ARP probes for active-backup mode ARP monitor.
++ *
++ * Called with rcu_read_lock hold.
++ */
++static bool bond_ab_arp_probe(struct bonding *bond)
++{
++ struct slave *slave, *before = NULL, *new_slave = NULL,
++ *curr_arp_slave = rcu_dereference(bond->current_arp_slave),
++ *curr_active_slave = rcu_dereference(bond->curr_active_slave);
++ struct list_head *iter;
++ bool found = false;
++ bool should_notify_rtnl = BOND_SLAVE_NOTIFY_LATER;
++
++ if (curr_arp_slave && curr_active_slave)
++ pr_info("PROBE: c_arp %s && cas %s BAD\n",
++ curr_arp_slave->dev->name,
++ curr_active_slave->dev->name);
++
++ if (curr_active_slave) {
++ bond_arp_send_all(bond, curr_active_slave);
++ return should_notify_rtnl;
++ }
++
++ /* if we don't have a curr_active_slave, search for the next available
++ * backup slave from the current_arp_slave and make it the candidate
++ * for becoming the curr_active_slave
++ */
++
++ if (!curr_arp_slave) {
++ curr_arp_slave = bond_first_slave_rcu(bond);
++ if (!curr_arp_slave)
++ return should_notify_rtnl;
++ }
++
++ bond_set_slave_inactive_flags(curr_arp_slave, BOND_SLAVE_NOTIFY_LATER);
++
++ bond_for_each_slave_rcu(bond, slave, iter) {
++ if (!found && !before && IS_UP(slave->dev))
++ before = slave;
++
++ if (found && !new_slave && IS_UP(slave->dev))
++ new_slave = slave;
++ /* if the link state is up at this point, we
++ * mark it down - this can happen if we have
++ * simultaneous link failures and
++ * reselect_active_interface doesn't make this
++ * one the current slave so it is still marked
++ * up when it is actually down
++ */
++ if (!IS_UP(slave->dev) && slave->link == BOND_LINK_UP) {
++ slave->link = BOND_LINK_DOWN;
++ if (slave->link_failure_count < UINT_MAX)
++ slave->link_failure_count++;
++
++ bond_set_slave_inactive_flags(slave,
++ BOND_SLAVE_NOTIFY_LATER);
++
++ pr_info("%s: backup interface %s is now down.\n",
++ bond->dev->name, slave->dev->name);
++ }
++ if (slave == curr_arp_slave)
++ found = true;
++ }
++
++ if (!new_slave && before)
++ new_slave = before;
++
++ if (!new_slave)
++ goto check_state;
++
++ new_slave->link = BOND_LINK_BACK;
++ bond_set_slave_active_flags(new_slave, BOND_SLAVE_NOTIFY_LATER);
++ bond_arp_send_all(bond, new_slave);
++ new_slave->last_link_up = jiffies;
++ rcu_assign_pointer(bond->current_arp_slave, new_slave);
++
++check_state:
++ bond_for_each_slave_rcu(bond, slave, iter) {
++ if (slave->should_notify) {
++ should_notify_rtnl = BOND_SLAVE_NOTIFY_NOW;
++ break;
++ }
++ }
++ return should_notify_rtnl;
++}
++
++static void bond_activebackup_arp_mon(struct work_struct *work)
++{
++ struct bonding *bond = container_of(work, struct bonding,
++ arp_work.work);
++ bool should_notify_peers = false;
++ bool should_notify_rtnl = false;
++ int delta_in_ticks;
++
++ delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval);
++
++ if (!bond_has_slaves(bond))
++ goto re_arm;
++
++ rcu_read_lock();
++
++ should_notify_peers = bond_should_notify_peers(bond);
++
++ if (bond_ab_arp_inspect(bond)) {
++ rcu_read_unlock();
++
++ /* Race avoidance with bond_close flush of workqueue */
++ if (!rtnl_trylock()) {
++ delta_in_ticks = 1;
++ should_notify_peers = false;
++ goto re_arm;
++ }
++
++ bond_ab_arp_commit(bond);
++
++ rtnl_unlock();
++ rcu_read_lock();
++ }
++
++ should_notify_rtnl = bond_ab_arp_probe(bond);
++ rcu_read_unlock();
++
++re_arm:
++ if (bond->params.arp_interval)
++ queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks);
++
++ if (should_notify_peers || should_notify_rtnl) {
++ if (!rtnl_trylock())
++ return;
++
++ if (should_notify_peers)
++ call_netdevice_notifiers(NETDEV_NOTIFY_PEERS,
++ bond->dev);
++ if (should_notify_rtnl)
++ bond_slave_state_notify(bond);
++
++ rtnl_unlock();
++ }
++}
++
++/*-------------------------- netdev event handling --------------------------*/
++
++/*
++ * Change device name
++ */
++static int bond_event_changename(struct bonding *bond)
++{
++ bond_remove_proc_entry(bond);
++ bond_create_proc_entry(bond);
++
++ bond_debug_reregister(bond);
++
++ return NOTIFY_DONE;
++}
++
++static int bond_master_netdev_event(unsigned long event,
++ struct net_device *bond_dev)
++{
++ struct bonding *event_bond = netdev_priv(bond_dev);
++
++ switch (event) {
++ case NETDEV_CHANGENAME:
++ return bond_event_changename(event_bond);
++ case NETDEV_UNREGISTER:
++ bond_remove_proc_entry(event_bond);
++ break;
++ case NETDEV_REGISTER:
++ bond_create_proc_entry(event_bond);
++ break;
++ case NETDEV_NOTIFY_PEERS:
++ if (event_bond->send_peer_notif)
++ event_bond->send_peer_notif--;
++ break;
++ default:
++ break;
++ }
++
++ return NOTIFY_DONE;
++}
++
++static int bond_slave_netdev_event(unsigned long event,
++ struct net_device *slave_dev)
++{
++ struct slave *slave = bond_slave_get_rtnl(slave_dev);
++ struct bonding *bond;
++ struct net_device *bond_dev;
++ u32 old_speed;
++ u8 old_duplex;
++
++ /* A netdev event can be generated while enslaving a device
++ * before netdev_rx_handler_register is called in which case
++ * slave will be NULL
++ */
++ if (!slave)
++ return NOTIFY_DONE;
++ bond_dev = slave->bond->dev;
++ bond = slave->bond;
++
++ switch (event) {
++ case NETDEV_UNREGISTER:
++ if (bond_dev->type != ARPHRD_ETHER)
++ bond_release_and_destroy(bond_dev, slave_dev);
++ else
++ bond_release(bond_dev, slave_dev);
++ break;
++ case NETDEV_UP:
++ case NETDEV_CHANGE:
++ old_speed = slave->speed;
++ old_duplex = slave->duplex;
++
++ bond_update_speed_duplex(slave);
++
++ if (bond->params.mode == BOND_MODE_8023AD) {
++ if (old_speed != slave->speed)
++ bond_3ad_adapter_speed_changed(slave);
++ if (old_duplex != slave->duplex)
++ bond_3ad_adapter_duplex_changed(slave);
++ }
++ break;
++ case NETDEV_DOWN:
++ /*
++ * ... Or is it this?
++ */
++ break;
++ case NETDEV_CHANGEMTU:
++ /*
++ * TODO: Should slaves be allowed to
++ * independently alter their MTU? For
++ * an active-backup bond, slaves need
++ * not be the same type of device, so
++ * MTUs may vary. For other modes,
++ * slaves arguably should have the
++ * same MTUs. To do this, we'd need to
++ * take over the slave's change_mtu
++ * function for the duration of their
++ * servitude.
++ */
++ break;
++ case NETDEV_CHANGENAME:
++ /* we don't care if we don't have primary set */
++ if (!USES_PRIMARY(bond->params.mode) ||
++ !bond->params.primary[0])
++ break;
++
++ if (slave == bond->primary_slave) {
++ /* slave's name changed - he's no longer primary */
++ bond->primary_slave = NULL;
++ } else if (!strcmp(slave_dev->name, bond->params.primary)) {
++ /* we have a new primary slave */
++ bond->primary_slave = slave;
++ } else { /* we didn't change primary - exit */
++ break;
++ }
++
++ pr_info("%s: Primary slave changed to %s, reselecting active slave.\n",
++ bond->dev->name, bond->primary_slave ? slave_dev->name :
++ "none");
++
++ block_netpoll_tx();
++ write_lock_bh(&bond->curr_slave_lock);
++ bond_select_active_slave(bond);
++ write_unlock_bh(&bond->curr_slave_lock);
++ unblock_netpoll_tx();
++ break;
++ case NETDEV_FEAT_CHANGE:
++ bond_compute_features(bond);
++ break;
++ case NETDEV_RESEND_IGMP:
++ /* Propagate to master device */
++ call_netdevice_notifiers(event, slave->bond->dev);
++ break;
++ default:
++ break;
++ }
++
++ return NOTIFY_DONE;
++}
++
++/*
++ * bond_netdev_event: handle netdev notifier chain events.
++ *
++ * This function receives events for the netdev chain. The caller (an
++ * ioctl handler calling blocking_notifier_call_chain) holds the necessary
++ * locks for us to safely manipulate the slave devices (RTNL lock,
++ * dev_probe_lock).
++ */
++static int bond_netdev_event(struct notifier_block *this,
++ unsigned long event, void *ptr)
++{
++ struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
++
++ pr_debug("event_dev: %s, event: %lx\n",
++ event_dev ? event_dev->name : "None",
++ event);
++
++ if (!(event_dev->priv_flags & IFF_BONDING))
++ return NOTIFY_DONE;
++
++ if (event_dev->flags & IFF_MASTER) {
++ pr_debug("IFF_MASTER\n");
++ return bond_master_netdev_event(event, event_dev);
++ }
++
++ if (event_dev->flags & IFF_SLAVE) {
++ pr_debug("IFF_SLAVE\n");
++ return bond_slave_netdev_event(event, event_dev);
++ }
++
++ return NOTIFY_DONE;
++}
++
++static struct notifier_block bond_netdev_notifier = {
++ .notifier_call = bond_netdev_event,
++};
++
++/*---------------------------- Hashing Policies -----------------------------*/
++
++/* L2 hash helper */
++static inline u32 bond_eth_hash(struct sk_buff *skb)
++{
++ struct ethhdr *data = (struct ethhdr *)skb->data;
++
++ if (skb_headlen(skb) >= offsetof(struct ethhdr, h_proto))
++ return data->h_dest[5] ^ data->h_source[5];
++
++ return 0;
++}
++
++/* Extract the appropriate headers based on bond's xmit policy */
++static bool bond_flow_dissect(struct bonding *bond, struct sk_buff *skb,
++ struct flow_keys *fk)
++{
++ const struct ipv6hdr *iph6;
++ const struct iphdr *iph;
++ int noff, proto = -1;
++
++ if (bond->params.xmit_policy > BOND_XMIT_POLICY_LAYER23)
++ return skb_flow_dissect(skb, fk);
++
++ fk->ports = 0;
++ noff = skb_network_offset(skb);
++ if (skb->protocol == htons(ETH_P_IP)) {
++ if (!pskb_may_pull(skb, noff + sizeof(*iph)))
++ return false;
++ iph = ip_hdr(skb);
++ fk->src = iph->saddr;
++ fk->dst = iph->daddr;
++ noff += iph->ihl << 2;
++ if (!ip_is_fragment(iph))
++ proto = iph->protocol;
++ } else if (skb->protocol == htons(ETH_P_IPV6)) {
++ if (!pskb_may_pull(skb, noff + sizeof(*iph6)))
++ return false;
++ iph6 = ipv6_hdr(skb);
++ fk->src = (__force __be32)ipv6_addr_hash(&iph6->saddr);
++ fk->dst = (__force __be32)ipv6_addr_hash(&iph6->daddr);
++ noff += sizeof(*iph6);
++ proto = iph6->nexthdr;
++ } else {
++ return false;
++ }
++ if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER34 && proto >= 0)
++ fk->ports = skb_flow_get_ports(skb, noff, proto);
++
++ return true;
++}
++
++/**
++ * bond_xmit_hash - generate a hash value based on the xmit policy
++ * @bond: bonding device
++ * @skb: buffer to use for headers
++ * @count: modulo value
++ *
++ * This function will extract the necessary headers from the skb buffer and use
++ * them to generate a hash based on the xmit_policy set in the bonding device
++ * which will be reduced modulo count before returning.
++ */
++int bond_xmit_hash(struct bonding *bond, struct sk_buff *skb, int count)
++{
++ struct flow_keys flow;
++ u32 hash;
++
++ if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER2 ||
++ !bond_flow_dissect(bond, skb, &flow))
++ return bond_eth_hash(skb) % count;
++
++ if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER23 ||
++ bond->params.xmit_policy == BOND_XMIT_POLICY_ENCAP23)
++ hash = bond_eth_hash(skb);
++ else
++ hash = (__force u32)flow.ports;
++ hash ^= (__force u32)flow.dst ^ (__force u32)flow.src;
++ hash ^= (hash >> 16);
++ hash ^= (hash >> 8);
++
++ return hash % count;
++}
++
++/*-------------------------- Device entry points ----------------------------*/
++
++static void bond_work_init_all(struct bonding *bond)
++{
++ INIT_DELAYED_WORK(&bond->mcast_work,
++ bond_resend_igmp_join_requests_delayed);
++ INIT_DELAYED_WORK(&bond->alb_work, bond_alb_monitor);
++ INIT_DELAYED_WORK(&bond->mii_work, bond_mii_monitor);
++ if (bond->params.mode == BOND_MODE_ACTIVEBACKUP)
++ INIT_DELAYED_WORK(&bond->arp_work, bond_activebackup_arp_mon);
++ else
++ INIT_DELAYED_WORK(&bond->arp_work, bond_loadbalance_arp_mon);
++ INIT_DELAYED_WORK(&bond->ad_work, bond_3ad_state_machine_handler);
++}
++
++static void bond_work_cancel_all(struct bonding *bond)
++{
++ cancel_delayed_work_sync(&bond->mii_work);
++ cancel_delayed_work_sync(&bond->arp_work);
++ cancel_delayed_work_sync(&bond->alb_work);
++ cancel_delayed_work_sync(&bond->ad_work);
++ cancel_delayed_work_sync(&bond->mcast_work);
++}
++
++static int bond_open(struct net_device *bond_dev)
++{
++ struct bonding *bond = netdev_priv(bond_dev);
++ struct list_head *iter;
++ struct slave *slave;
++
++ /* reset slave->backup and slave->inactive */
++ read_lock(&bond->lock);
++ if (bond_has_slaves(bond)) {
++ read_lock(&bond->curr_slave_lock);
++ bond_for_each_slave(bond, slave, iter) {
++ if ((bond->params.mode == BOND_MODE_ACTIVEBACKUP)
++ && (slave != bond->curr_active_slave)) {
++ bond_set_slave_inactive_flags(slave,
++ BOND_SLAVE_NOTIFY_NOW);
++ } else {
++ bond_set_slave_active_flags(slave,
++ BOND_SLAVE_NOTIFY_NOW);
++ }
++ }
++ read_unlock(&bond->curr_slave_lock);
++ }
++ read_unlock(&bond->lock);
++
++ bond_work_init_all(bond);
++
++ if (bond_is_lb(bond)) {
++ /* bond_alb_initialize must be called before the timer
++ * is started.
++ */
++ if (bond_alb_initialize(bond, (bond->params.mode == BOND_MODE_ALB)))
++ return -ENOMEM;
++ queue_delayed_work(bond->wq, &bond->alb_work, 0);
++ }
++
++ if (bond->params.miimon) /* link check interval, in milliseconds. */
++ queue_delayed_work(bond->wq, &bond->mii_work, 0);
++
++ if (bond->params.arp_interval) { /* arp interval, in milliseconds. */
++ queue_delayed_work(bond->wq, &bond->arp_work, 0);
++ if (bond->params.arp_validate)
++ bond->recv_probe = bond_arp_rcv;
++ }
++
++ if (bond->params.mode == BOND_MODE_8023AD) {
++ queue_delayed_work(bond->wq, &bond->ad_work, 0);
++ /* register to receive LACPDUs */
++ bond->recv_probe = bond_3ad_lacpdu_recv;
++ bond_3ad_initiate_agg_selection(bond, 1);
++ }
++
++ return 0;
++}
++
++static int bond_close(struct net_device *bond_dev)
++{
++ struct bonding *bond = netdev_priv(bond_dev);
++
++ bond_work_cancel_all(bond);
++ bond->send_peer_notif = 0;
++ if (bond_is_lb(bond))
++ bond_alb_deinitialize(bond);
++ bond->recv_probe = NULL;
++
++ return 0;
++}
++
++static struct rtnl_link_stats64 *bond_get_stats(struct net_device *bond_dev,
++ struct rtnl_link_stats64 *stats)
++{
++ struct bonding *bond = netdev_priv(bond_dev);
++ struct rtnl_link_stats64 temp;
++ struct list_head *iter;
++ struct slave *slave;
++
++ memset(stats, 0, sizeof(*stats));
++
++ read_lock_bh(&bond->lock);
++ bond_for_each_slave(bond, slave, iter) {
++ const struct rtnl_link_stats64 *sstats =
++ dev_get_stats(slave->dev, &temp);
++
++ stats->rx_packets += sstats->rx_packets;
++ stats->rx_bytes += sstats->rx_bytes;
++ stats->rx_errors += sstats->rx_errors;
++ stats->rx_dropped += sstats->rx_dropped;
++
++ stats->tx_packets += sstats->tx_packets;
++ stats->tx_bytes += sstats->tx_bytes;
++ stats->tx_errors += sstats->tx_errors;
++ stats->tx_dropped += sstats->tx_dropped;
++
++ stats->multicast += sstats->multicast;
++ stats->collisions += sstats->collisions;
++
++ stats->rx_length_errors += sstats->rx_length_errors;
++ stats->rx_over_errors += sstats->rx_over_errors;
++ stats->rx_crc_errors += sstats->rx_crc_errors;
++ stats->rx_frame_errors += sstats->rx_frame_errors;
++ stats->rx_fifo_errors += sstats->rx_fifo_errors;
++ stats->rx_missed_errors += sstats->rx_missed_errors;
++
++ stats->tx_aborted_errors += sstats->tx_aborted_errors;
++ stats->tx_carrier_errors += sstats->tx_carrier_errors;
++ stats->tx_fifo_errors += sstats->tx_fifo_errors;
++ stats->tx_heartbeat_errors += sstats->tx_heartbeat_errors;
++ stats->tx_window_errors += sstats->tx_window_errors;
++ }
++ read_unlock_bh(&bond->lock);
++
++ return stats;
++}
++
++static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd)
++{
++ struct bonding *bond = netdev_priv(bond_dev);
++ struct net_device *slave_dev = NULL;
++ struct ifbond k_binfo;
++ struct ifbond __user *u_binfo = NULL;
++ struct ifslave k_sinfo;
++ struct ifslave __user *u_sinfo = NULL;
++ struct mii_ioctl_data *mii = NULL;
++ struct bond_opt_value newval;
++ struct net *net;
++ int res = 0;
++
++ pr_debug("bond_ioctl: master=%s, cmd=%d\n", bond_dev->name, cmd);
++
++ switch (cmd) {
++ case SIOCGMIIPHY:
++ mii = if_mii(ifr);
++ if (!mii)
++ return -EINVAL;
++
++ mii->phy_id = 0;
++ /* Fall Through */
++ case SIOCGMIIREG:
++ /*
++ * We do this again just in case we were called by SIOCGMIIREG
++ * instead of SIOCGMIIPHY.
++ */
++ mii = if_mii(ifr);
++ if (!mii)
++ return -EINVAL;
++
++
++ if (mii->reg_num == 1) {
++ mii->val_out = 0;
++ read_lock(&bond->lock);
++ read_lock(&bond->curr_slave_lock);
++ if (netif_carrier_ok(bond->dev))
++ mii->val_out = BMSR_LSTATUS;
++
++ read_unlock(&bond->curr_slave_lock);
++ read_unlock(&bond->lock);
++ }
++
++ return 0;
++ case BOND_INFO_QUERY_OLD:
++ case SIOCBONDINFOQUERY:
++ u_binfo = (struct ifbond __user *)ifr->ifr_data;
++
++ if (copy_from_user(&k_binfo, u_binfo, sizeof(ifbond)))
++ return -EFAULT;
++
++ res = bond_info_query(bond_dev, &k_binfo);
++ if (res == 0 &&
++ copy_to_user(u_binfo, &k_binfo, sizeof(ifbond)))
++ return -EFAULT;
++
++ return res;
++ case BOND_SLAVE_INFO_QUERY_OLD:
++ case SIOCBONDSLAVEINFOQUERY:
++ u_sinfo = (struct ifslave __user *)ifr->ifr_data;
++
++ if (copy_from_user(&k_sinfo, u_sinfo, sizeof(ifslave)))
++ return -EFAULT;
++
++ res = bond_slave_info_query(bond_dev, &k_sinfo);
++ if (res == 0 &&
++ copy_to_user(u_sinfo, &k_sinfo, sizeof(ifslave)))
++ return -EFAULT;
++
++ return res;
++ default:
++ /* Go on */
++ break;
++ }
++
++ net = dev_net(bond_dev);
++
++ if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
++ return -EPERM;
++
++ slave_dev = __dev_get_by_name(net, ifr->ifr_slave);
++
++ pr_debug("slave_dev=%p:\n", slave_dev);
++
++ if (!slave_dev)
++ return -ENODEV;
++
++ pr_debug("slave_dev->name=%s:\n", slave_dev->name);
++ switch (cmd) {
++ case BOND_ENSLAVE_OLD:
++ case SIOCBONDENSLAVE:
++ res = bond_enslave(bond_dev, slave_dev);
++ break;
++ case BOND_RELEASE_OLD:
++ case SIOCBONDRELEASE:
++ res = bond_release(bond_dev, slave_dev);
++ break;
++ case BOND_SETHWADDR_OLD:
++ case SIOCBONDSETHWADDR:
++ bond_set_dev_addr(bond_dev, slave_dev);
++ res = 0;
++ break;
++ case BOND_CHANGE_ACTIVE_OLD:
++ case SIOCBONDCHANGEACTIVE:
++ bond_opt_initstr(&newval, slave_dev->name);
++ res = __bond_opt_set(bond, BOND_OPT_ACTIVE_SLAVE, &newval);
++ break;
++ default:
++ res = -EOPNOTSUPP;
++ }
++
++ return res;
++}
++
++static void bond_change_rx_flags(struct net_device *bond_dev, int change)
++{
++ struct bonding *bond = netdev_priv(bond_dev);
++
++ if (change & IFF_PROMISC)
++ bond_set_promiscuity(bond,
++ bond_dev->flags & IFF_PROMISC ? 1 : -1);
++
++ if (change & IFF_ALLMULTI)
++ bond_set_allmulti(bond,
++ bond_dev->flags & IFF_ALLMULTI ? 1 : -1);
++}
++
++static void bond_set_rx_mode(struct net_device *bond_dev)
++{
++ struct bonding *bond = netdev_priv(bond_dev);
++ struct list_head *iter;
++ struct slave *slave;
++
++
++ rcu_read_lock();
++ if (USES_PRIMARY(bond->params.mode)) {
++ slave = rcu_dereference(bond->curr_active_slave);
++ if (slave) {
++ dev_uc_sync(slave->dev, bond_dev);
++ dev_mc_sync(slave->dev, bond_dev);
++ }
++ } else {
++ bond_for_each_slave_rcu(bond, slave, iter) {
++ dev_uc_sync_multiple(slave->dev, bond_dev);
++ dev_mc_sync_multiple(slave->dev, bond_dev);
++ }
++ }
++ rcu_read_unlock();
++}
++
++static int bond_neigh_init(struct neighbour *n)
++{
++ struct bonding *bond = netdev_priv(n->dev);
++ const struct net_device_ops *slave_ops;
++ struct neigh_parms parms;
++ struct slave *slave;
++ int ret;
++
++ slave = bond_first_slave(bond);
++ if (!slave)
++ return 0;
++ slave_ops = slave->dev->netdev_ops;
++ if (!slave_ops->ndo_neigh_setup)
++ return 0;
++
++ parms.neigh_setup = NULL;
++ parms.neigh_cleanup = NULL;
++ ret = slave_ops->ndo_neigh_setup(slave->dev, &parms);
++ if (ret)
++ return ret;
++
++ /*
++ * Assign slave's neigh_cleanup to neighbour in case cleanup is called
++ * after the last slave has been detached. Assumes that all slaves
++ * utilize the same neigh_cleanup (true at this writing as only user
++ * is ipoib).
++ */
++ n->parms->neigh_cleanup = parms.neigh_cleanup;
++
++ if (!parms.neigh_setup)
++ return 0;
++
++ return parms.neigh_setup(n);
++}
++
++/*
++ * The bonding ndo_neigh_setup is called at init time beofre any
++ * slave exists. So we must declare proxy setup function which will
++ * be used at run time to resolve the actual slave neigh param setup.
++ *
++ * It's also called by master devices (such as vlans) to setup their
++ * underlying devices. In that case - do nothing, we're already set up from
++ * our init.
++ */
++static int bond_neigh_setup(struct net_device *dev,
++ struct neigh_parms *parms)
++{
++ /* modify only our neigh_parms */
++ if (parms->dev == dev)
++ parms->neigh_setup = bond_neigh_init;
++
++ return 0;
++}
++
++/*
++ * Change the MTU of all of a master's slaves to match the master
++ */
++static int bond_change_mtu(struct net_device *bond_dev, int new_mtu)
++{
++ struct bonding *bond = netdev_priv(bond_dev);
++ struct slave *slave, *rollback_slave;
++ struct list_head *iter;
++ int res = 0;
++
++ pr_debug("bond=%p, name=%s, new_mtu=%d\n", bond,
++ (bond_dev ? bond_dev->name : "None"), new_mtu);
++
++ /* Can't hold bond->lock with bh disabled here since
++ * some base drivers panic. On the other hand we can't
++ * hold bond->lock without bh disabled because we'll
++ * deadlock. The only solution is to rely on the fact
++ * that we're under rtnl_lock here, and the slaves
++ * list won't change. This doesn't solve the problem
++ * of setting the slave's MTU while it is
++ * transmitting, but the assumption is that the base
++ * driver can handle that.
++ *
++ * TODO: figure out a way to safely iterate the slaves
++ * list, but without holding a lock around the actual
++ * call to the base driver.
++ */
++
++ bond_for_each_slave(bond, slave, iter) {
++ pr_debug("s %p c_m %p\n",
++ slave,
++ slave->dev->netdev_ops->ndo_change_mtu);
++
++ res = dev_set_mtu(slave->dev, new_mtu);
++
++ if (res) {
++ /* If we failed to set the slave's mtu to the new value
++ * we must abort the operation even in ACTIVE_BACKUP
++ * mode, because if we allow the backup slaves to have
++ * different mtu values than the active slave we'll
++ * need to change their mtu when doing a failover. That
++ * means changing their mtu from timer context, which
++ * is probably not a good idea.
++ */
++ pr_debug("err %d %s\n", res, slave->dev->name);
++ goto unwind;
++ }
++ }
++
++ bond_dev->mtu = new_mtu;
++
++ return 0;
++
++unwind:
++ /* unwind from head to the slave that failed */
++ bond_for_each_slave(bond, rollback_slave, iter) {
++ int tmp_res;
++
++ if (rollback_slave == slave)
++ break;
++
++ tmp_res = dev_set_mtu(rollback_slave->dev, bond_dev->mtu);
++ if (tmp_res) {
++ pr_debug("unwind err %d dev %s\n",
++ tmp_res, rollback_slave->dev->name);
++ }
++ }
++
++ return res;
++}
++
++/*
++ * Change HW address
++ *
++ * Note that many devices must be down to change the HW address, and
++ * downing the master releases all slaves. We can make bonds full of
++ * bonding devices to test this, however.
++ */
++static int bond_set_mac_address(struct net_device *bond_dev, void *addr)
++{
++ struct bonding *bond = netdev_priv(bond_dev);
++ struct slave *slave, *rollback_slave;
++ struct sockaddr *sa = addr, tmp_sa;
++ struct list_head *iter;
++ int res = 0;
++
++ if (bond->params.mode == BOND_MODE_ALB)
++ return bond_alb_set_mac_address(bond_dev, addr);
++
++
++ pr_debug("bond=%p, name=%s\n",
++ bond, bond_dev ? bond_dev->name : "None");
++
++ /* If fail_over_mac is enabled, do nothing and return success.
++ * Returning an error causes ifenslave to fail.
++ */
++ if (bond->params.fail_over_mac &&
++ bond->params.mode == BOND_MODE_ACTIVEBACKUP)
++ return 0;
++
++ if (!is_valid_ether_addr(sa->sa_data))
++ return -EADDRNOTAVAIL;
++
++ /* Can't hold bond->lock with bh disabled here since
++ * some base drivers panic. On the other hand we can't
++ * hold bond->lock without bh disabled because we'll
++ * deadlock. The only solution is to rely on the fact
++ * that we're under rtnl_lock here, and the slaves
++ * list won't change. This doesn't solve the problem
++ * of setting the slave's hw address while it is
++ * transmitting, but the assumption is that the base
++ * driver can handle that.
++ *
++ * TODO: figure out a way to safely iterate the slaves
++ * list, but without holding a lock around the actual
++ * call to the base driver.
++ */
++
++ bond_for_each_slave(bond, slave, iter) {
++ const struct net_device_ops *slave_ops = slave->dev->netdev_ops;
++ pr_debug("slave %p %s\n", slave, slave->dev->name);
++
++ if (slave_ops->ndo_set_mac_address == NULL) {
++ res = -EOPNOTSUPP;
++ pr_debug("EOPNOTSUPP %s\n", slave->dev->name);
++ goto unwind;
++ }
++
++ res = dev_set_mac_address(slave->dev, addr);
++ if (res) {
++ /* TODO: consider downing the slave
++ * and retry ?
++ * User should expect communications
++ * breakage anyway until ARP finish
++ * updating, so...
++ */
++ pr_debug("err %d %s\n", res, slave->dev->name);
++ goto unwind;
++ }
++ }
++
++ /* success */
++ memcpy(bond_dev->dev_addr, sa->sa_data, bond_dev->addr_len);
++ return 0;
++
++unwind:
++ memcpy(tmp_sa.sa_data, bond_dev->dev_addr, bond_dev->addr_len);
++ tmp_sa.sa_family = bond_dev->type;
++
++ /* unwind from head to the slave that failed */
++ bond_for_each_slave(bond, rollback_slave, iter) {
++ int tmp_res;
++
++ if (rollback_slave == slave)
++ break;
++
++ tmp_res = dev_set_mac_address(rollback_slave->dev, &tmp_sa);
++ if (tmp_res) {
++ pr_debug("unwind err %d dev %s\n",
++ tmp_res, rollback_slave->dev->name);
++ }
++ }
++
++ return res;
++}
++
++/**
++ * bond_xmit_slave_id - transmit skb through slave with slave_id
++ * @bond: bonding device that is transmitting
++ * @skb: buffer to transmit
++ * @slave_id: slave id up to slave_cnt-1 through which to transmit
++ *
++ * This function tries to transmit through slave with slave_id but in case
++ * it fails, it tries to find the first available slave for transmission.
++ * The skb is consumed in all cases, thus the function is void.
++ */
++static void bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int slave_id)
++{
++ struct list_head *iter;
++ struct slave *slave;
++ int i = slave_id;
++
++ /* Here we start from the slave with slave_id */
++ bond_for_each_slave_rcu(bond, slave, iter) {
++ if (--i < 0) {
++ if (slave_can_tx(slave)) {
++ bond_dev_queue_xmit(bond, skb, slave->dev);
++ return;
++ }
++ }
++ }
++
++ /* Here we start from the first slave up to slave_id */
++ i = slave_id;
++ bond_for_each_slave_rcu(bond, slave, iter) {
++ if (--i < 0)
++ break;
++ if (slave_can_tx(slave)) {
++ bond_dev_queue_xmit(bond, skb, slave->dev);
++ return;
++ }
++ }
++ /* no slave that can tx has been found */
++ kfree_skb(skb);
++}
++
++/**
++ * bond_rr_gen_slave_id - generate slave id based on packets_per_slave
++ * @bond: bonding device to use
++ *
++ * Based on the value of the bonding device's packets_per_slave parameter
++ * this function generates a slave id, which is usually used as the next
++ * slave to transmit through.
++ */
++static u32 bond_rr_gen_slave_id(struct bonding *bond)
++{
++ u32 slave_id;
++ struct reciprocal_value reciprocal_packets_per_slave;
++ int packets_per_slave = bond->params.packets_per_slave;
++
++ switch (packets_per_slave) {
++ case 0:
++ slave_id = prandom_u32();
++ break;
++ case 1:
++ slave_id = bond->rr_tx_counter;
++ break;
++ default:
++ reciprocal_packets_per_slave =
++ bond->params.reciprocal_packets_per_slave;
++ slave_id = reciprocal_divide(bond->rr_tx_counter,
++ reciprocal_packets_per_slave);
++ break;
++ }
++ bond->rr_tx_counter++;
++
++ return slave_id;
++}
++
++static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev)
++{
++ struct bonding *bond = netdev_priv(bond_dev);
++ struct iphdr *iph = ip_hdr(skb);
++ struct slave *slave;
++ u32 slave_id;
++
++ /* Start with the curr_active_slave that joined the bond as the
++ * default for sending IGMP traffic. For failover purposes one
++ * needs to maintain some consistency for the interface that will
++ * send the join/membership reports. The curr_active_slave found
++ * will send all of this type of traffic.
++ */
++ if (iph->protocol == IPPROTO_IGMP && skb->protocol == htons(ETH_P_IP)) {
++ slave = rcu_dereference(bond->curr_active_slave);
++ if (slave && slave_can_tx(slave))
++ bond_dev_queue_xmit(bond, skb, slave->dev);
++ else
++ bond_xmit_slave_id(bond, skb, 0);
++ } else {
++ int slave_cnt = ACCESS_ONCE(bond->slave_cnt);
++
++ if (likely(slave_cnt)) {
++ slave_id = bond_rr_gen_slave_id(bond);
++ bond_xmit_slave_id(bond, skb, slave_id % slave_cnt);
++ } else {
++ dev_kfree_skb_any(skb);
++ }
++ }
++
++ return NETDEV_TX_OK;
++}
++
++/*
++ * in active-backup mode, we know that bond->curr_active_slave is always valid if
++ * the bond has a usable interface.
++ */
++static int bond_xmit_activebackup(struct sk_buff *skb, struct net_device *bond_dev)
++{
++ struct bonding *bond = netdev_priv(bond_dev);
++ struct slave *slave;
++
++ slave = rcu_dereference(bond->curr_active_slave);
++ if (slave)
++ bond_dev_queue_xmit(bond, skb, slave->dev);
++ else
++ kfree_skb(skb);
++
++ return NETDEV_TX_OK;
++}
++
++/* In bond_xmit_xor() , we determine the output device by using a pre-
++ * determined xmit_hash_policy(), If the selected device is not enabled,
++ * find the next active slave.
++ */
++static int bond_xmit_xor(struct sk_buff *skb, struct net_device *bond_dev)
++{
++ struct bonding *bond = netdev_priv(bond_dev);
++ int slave_cnt = ACCESS_ONCE(bond->slave_cnt);
++
++ if (likely(slave_cnt))
++ bond_xmit_slave_id(bond, skb,
++ bond_xmit_hash(bond, skb, bond->slave_cnt));
++ else
++ dev_kfree_skb_any(skb);
++
++ return NETDEV_TX_OK;
++}
++
++/* in broadcast mode, we send everything to all usable interfaces. */
++static int bond_xmit_broadcast(struct sk_buff *skb, struct net_device *bond_dev)
++{
++ struct bonding *bond = netdev_priv(bond_dev);
++ struct slave *slave = NULL;
++ struct list_head *iter;
++
++ bond_for_each_slave_rcu(bond, slave, iter) {
++ if (bond_is_last_slave(bond, slave))
++ break;
++ if (IS_UP(slave->dev) && slave->link == BOND_LINK_UP) {
++ struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
++
++ if (!skb2) {
++ pr_err("%s: Error: bond_xmit_broadcast(): skb_clone() failed\n",
++ bond_dev->name);
++ continue;
++ }
++ /* bond_dev_queue_xmit always returns 0 */
++ bond_dev_queue_xmit(bond, skb2, slave->dev);
++ }
++ }
++ if (slave && IS_UP(slave->dev) && slave->link == BOND_LINK_UP)
++ bond_dev_queue_xmit(bond, skb, slave->dev);
++ else
++ kfree_skb(skb);
++
++ return NETDEV_TX_OK;
++}
++
++/*------------------------- Device initialization ---------------------------*/
++
++/*
++ * Lookup the slave that corresponds to a qid
++ */
++static inline int bond_slave_override(struct bonding *bond,
++ struct sk_buff *skb)
++{
++ struct slave *slave = NULL;
++ struct list_head *iter;
++
++ if (!skb->queue_mapping)
++ return 1;
++
++ /* Find out if any slaves have the same mapping as this skb. */
++ bond_for_each_slave_rcu(bond, slave, iter) {
++ if (slave->queue_id == skb->queue_mapping) {
++ if (slave_can_tx(slave)) {
++ bond_dev_queue_xmit(bond, skb, slave->dev);
++ return 0;
++ }
++ /* If the slave isn't UP, use default transmit policy. */
++ break;
++ }
++ }
++
++ return 1;
++}
++
++
++static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb,
++ void *accel_priv, select_queue_fallback_t fallback)
++{
++ /*
++ * This helper function exists to help dev_pick_tx get the correct
++ * destination queue. Using a helper function skips a call to
++ * skb_tx_hash and will put the skbs in the queue we expect on their
++ * way down to the bonding driver.
++ */
++ u16 txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0;
++
++ /*
++ * Save the original txq to restore before passing to the driver
++ */
++ qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb->queue_mapping;
++
++ if (unlikely(txq >= dev->real_num_tx_queues)) {
++ do {
++ txq -= dev->real_num_tx_queues;
++ } while (txq >= dev->real_num_tx_queues);
++ }
++ return txq;
++}
++
++static netdev_tx_t __bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
++{
++ struct bonding *bond = netdev_priv(dev);
++
++ if (TX_QUEUE_OVERRIDE(bond->params.mode)) {
++ if (!bond_slave_override(bond, skb))
++ return NETDEV_TX_OK;
++ }
++
++ switch (bond->params.mode) {
++ case BOND_MODE_ROUNDROBIN:
++ return bond_xmit_roundrobin(skb, dev);
++ case BOND_MODE_ACTIVEBACKUP:
++ return bond_xmit_activebackup(skb, dev);
++ case BOND_MODE_XOR:
++ return bond_xmit_xor(skb, dev);
++ case BOND_MODE_BROADCAST:
++ return bond_xmit_broadcast(skb, dev);
++ case BOND_MODE_8023AD:
++ return bond_3ad_xmit_xor(skb, dev);
++ case BOND_MODE_ALB:
++ case BOND_MODE_TLB:
++ return bond_alb_xmit(skb, dev);
++ default:
++ /* Should never happen, mode already checked */
++ pr_err("%s: Error: Unknown bonding mode %d\n",
++ dev->name, bond->params.mode);
++ WARN_ON_ONCE(1);
++ kfree_skb(skb);
++ return NETDEV_TX_OK;
++ }
++}
++
++static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
++{
++ struct bonding *bond = netdev_priv(dev);
++ netdev_tx_t ret = NETDEV_TX_OK;
++
++ /*
++ * If we risk deadlock from transmitting this in the
++ * netpoll path, tell netpoll to queue the frame for later tx
++ */
++ if (is_netpoll_tx_blocked(dev))
++ return NETDEV_TX_BUSY;
++
++ rcu_read_lock();
++ if (bond_has_slaves(bond))
++ ret = __bond_start_xmit(skb, dev);
++ else
++ kfree_skb(skb);
++ rcu_read_unlock();
++
++ return ret;
++}
++
++static int bond_ethtool_get_settings(struct net_device *bond_dev,
++ struct ethtool_cmd *ecmd)
++{
++ struct bonding *bond = netdev_priv(bond_dev);
++ unsigned long speed = 0;
++ struct list_head *iter;
++ struct slave *slave;
++
++ ecmd->duplex = DUPLEX_UNKNOWN;
++ ecmd->port = PORT_OTHER;
++
++ /* Since SLAVE_IS_OK returns false for all inactive or down slaves, we
++ * do not need to check mode. Though link speed might not represent
++ * the true receive or transmit bandwidth (not all modes are symmetric)
++ * this is an accurate maximum.
++ */
++ read_lock(&bond->lock);
++ bond_for_each_slave(bond, slave, iter) {
++ if (SLAVE_IS_OK(slave)) {
++ if (slave->speed != SPEED_UNKNOWN)
++ speed += slave->speed;
++ if (ecmd->duplex == DUPLEX_UNKNOWN &&
++ slave->duplex != DUPLEX_UNKNOWN)
++ ecmd->duplex = slave->duplex;
++ }
++ }
++ ethtool_cmd_speed_set(ecmd, speed ? : SPEED_UNKNOWN);
++ read_unlock(&bond->lock);
++
++ return 0;
++}
++
++static void bond_ethtool_get_drvinfo(struct net_device *bond_dev,
++ struct ethtool_drvinfo *drvinfo)
++{
++ strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
++ strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
++ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%d",
++ BOND_ABI_VERSION);
++}
++
++static const struct ethtool_ops bond_ethtool_ops = {
++ .get_drvinfo = bond_ethtool_get_drvinfo,
++ .get_settings = bond_ethtool_get_settings,
++ .get_link = ethtool_op_get_link,
++};
++
++static const struct net_device_ops bond_netdev_ops = {
++ .ndo_init = bond_init,
++ .ndo_uninit = bond_uninit,
++ .ndo_open = bond_open,
++ .ndo_stop = bond_close,
++ .ndo_start_xmit = bond_start_xmit,
++ .ndo_select_queue = bond_select_queue,
++ .ndo_get_stats64 = bond_get_stats,
++ .ndo_do_ioctl = bond_do_ioctl,
++ .ndo_change_rx_flags = bond_change_rx_flags,
++ .ndo_set_rx_mode = bond_set_rx_mode,
++ .ndo_change_mtu = bond_change_mtu,
++ .ndo_set_mac_address = bond_set_mac_address,
++ .ndo_neigh_setup = bond_neigh_setup,
++ .ndo_vlan_rx_add_vid = bond_vlan_rx_add_vid,
++ .ndo_vlan_rx_kill_vid = bond_vlan_rx_kill_vid,
++#ifdef CONFIG_NET_POLL_CONTROLLER
++ .ndo_netpoll_setup = bond_netpoll_setup,
++ .ndo_netpoll_cleanup = bond_netpoll_cleanup,
++ .ndo_poll_controller = bond_poll_controller,
++#endif
++ .ndo_add_slave = bond_enslave,
++ .ndo_del_slave = bond_release,
++ .ndo_fix_features = bond_fix_features,
++};
++
++static const struct device_type bond_type = {
++ .name = "bond",
++};
++
++static void bond_destructor(struct net_device *bond_dev)
++{
++ struct bonding *bond = netdev_priv(bond_dev);
++ if (bond->wq)
++ destroy_workqueue(bond->wq);
++ free_netdev(bond_dev);
++}
++
++void bond_setup(struct net_device *bond_dev)
++{
++ struct bonding *bond = netdev_priv(bond_dev);
++
++ /* initialize rwlocks */
++ rwlock_init(&bond->lock);
++ rwlock_init(&bond->curr_slave_lock);
++ bond->params = bonding_defaults;
++
++ /* Initialize pointers */
++ bond->dev = bond_dev;
++
++ /* Initialize the device entry points */
++ ether_setup(bond_dev);
++ bond_dev->netdev_ops = &bond_netdev_ops;
++ bond_dev->ethtool_ops = &bond_ethtool_ops;
++
++ bond_dev->destructor = bond_destructor;
++
++ SET_NETDEV_DEVTYPE(bond_dev, &bond_type);
++
++ /* Initialize the device options */
++ bond_dev->tx_queue_len = 0;
++ bond_dev->flags |= IFF_MASTER|IFF_MULTICAST;
++ bond_dev->priv_flags |= IFF_BONDING;
++ bond_dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
++
++ /* At first, we block adding VLANs. That's the only way to
++ * prevent problems that occur when adding VLANs over an
++ * empty bond. The block will be removed once non-challenged
++ * slaves are enslaved.
++ */
++ bond_dev->features |= NETIF_F_VLAN_CHALLENGED;
++
++ /* don't acquire bond device's netif_tx_lock when
++ * transmitting */
++ bond_dev->features |= NETIF_F_LLTX;
++
++ /* By default, we declare the bond to be fully
++ * VLAN hardware accelerated capable. Special
++ * care is taken in the various xmit functions
++ * when there are slaves that are not hw accel
++ * capable
++ */
++
++ /* Don't allow bond devices to change network namespaces. */
++ bond_dev->features |= NETIF_F_NETNS_LOCAL;
++
++ bond_dev->hw_features = BOND_VLAN_FEATURES |
++ NETIF_F_HW_VLAN_CTAG_TX |
++ NETIF_F_HW_VLAN_CTAG_RX |
++ NETIF_F_HW_VLAN_CTAG_FILTER;
++
++ bond_dev->hw_features &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_HW_CSUM);
++ bond_dev->features |= bond_dev->hw_features;
++}
++
++/*
++* Destroy a bonding device.
++* Must be under rtnl_lock when this function is called.
++*/
++static void bond_uninit(struct net_device *bond_dev)
++{
++ struct bonding *bond = netdev_priv(bond_dev);
++ struct list_head *iter;
++ struct slave *slave;
++
++ bond_netpoll_cleanup(bond_dev);
++
++ /* Release the bonded slaves */
++ bond_for_each_slave(bond, slave, iter)
++ __bond_release_one(bond_dev, slave->dev, true);
++ pr_info("%s: released all slaves\n", bond_dev->name);
++
++ list_del(&bond->bond_list);
++
++ bond_debug_unregister(bond);
++}
++
++/*------------------------- Module initialization ---------------------------*/
++
++int bond_parm_tbl_lookup(int mode, const struct bond_parm_tbl *tbl)
++{
++ int i;
++
++ for (i = 0; tbl[i].modename; i++)
++ if (mode == tbl[i].mode)
++ return tbl[i].mode;
++
++ return -1;
++}
++
++static int bond_parm_tbl_lookup_name(const char *modename,
++ const struct bond_parm_tbl *tbl)
++{
++ int i;
++
++ for (i = 0; tbl[i].modename; i++)
++ if (strcmp(modename, tbl[i].modename) == 0)
++ return tbl[i].mode;
++
++ return -1;
++}
++
++/*
++ * Convert string input module parms. Accept either the
++ * number of the mode or its string name. A bit complicated because
++ * some mode names are substrings of other names, and calls from sysfs
++ * may have whitespace in the name (trailing newlines, for example).
++ */
++int bond_parse_parm(const char *buf, const struct bond_parm_tbl *tbl)
++{
++ int modeint;
++ char *p, modestr[BOND_MAX_MODENAME_LEN + 1];
++
++ for (p = (char *)buf; *p; p++)
++ if (!(isdigit(*p) || isspace(*p)))
++ break;
++
++ if (*p && sscanf(buf, "%20s", modestr) != 0)
++ return bond_parm_tbl_lookup_name(modestr, tbl);
++ else if (sscanf(buf, "%d", &modeint) != 0)
++ return bond_parm_tbl_lookup(modeint, tbl);
++
++ return -1;
++}
++
++static int bond_check_params(struct bond_params *params)
++{
++ int arp_validate_value, fail_over_mac_value, primary_reselect_value, i;
++ struct bond_opt_value newval, *valptr;
++ int arp_all_targets_value;
++
++ /*
++ * Convert string parameters.
++ */
++ if (mode) {
++ bond_opt_initstr(&newval, mode);
++ valptr = bond_opt_parse(bond_opt_get(BOND_OPT_MODE), &newval);
++ if (!valptr) {
++ pr_err("Error: Invalid bonding mode \"%s\"\n", mode);
++ return -EINVAL;
++ }
++ bond_mode = valptr->value;
++ }
++
++ if (xmit_hash_policy) {
++ if ((bond_mode != BOND_MODE_XOR) &&
++ (bond_mode != BOND_MODE_8023AD)) {
++ pr_info("xmit_hash_policy param is irrelevant in mode %s\n",
++ bond_mode_name(bond_mode));
++ } else {
++ bond_opt_initstr(&newval, xmit_hash_policy);
++ valptr = bond_opt_parse(bond_opt_get(BOND_OPT_XMIT_HASH),
++ &newval);
++ if (!valptr) {
++ pr_err("Error: Invalid xmit_hash_policy \"%s\"\n",
++ xmit_hash_policy);
++ return -EINVAL;
++ }
++ xmit_hashtype = valptr->value;
++ }
++ }
++
++ if (lacp_rate) {
++ if (bond_mode != BOND_MODE_8023AD) {
++ pr_info("lacp_rate param is irrelevant in mode %s\n",
++ bond_mode_name(bond_mode));
++ } else {
++ bond_opt_initstr(&newval, lacp_rate);
++ valptr = bond_opt_parse(bond_opt_get(BOND_OPT_LACP_RATE),
++ &newval);
++ if (!valptr) {
++ pr_err("Error: Invalid lacp rate \"%s\"\n",
++ lacp_rate);
++ return -EINVAL;
++ }
++ lacp_fast = valptr->value;
++ }
++ }
++
++ if (ad_select) {
++ bond_opt_initstr(&newval, ad_select);
++ valptr = bond_opt_parse(bond_opt_get(BOND_OPT_AD_SELECT),
++ &newval);
++ if (!valptr) {
++ pr_err("Error: Invalid ad_select \"%s\"\n", ad_select);
++ return -EINVAL;
++ }
++ params->ad_select = valptr->value;
++ if (bond_mode != BOND_MODE_8023AD)
++ pr_warning("ad_select param only affects 802.3ad mode\n");
++ } else {
++ params->ad_select = BOND_AD_STABLE;
++ }
++
++ if (max_bonds < 0) {
++ pr_warning("Warning: max_bonds (%d) not in range %d-%d, so it was reset to BOND_DEFAULT_MAX_BONDS (%d)\n",
++ max_bonds, 0, INT_MAX, BOND_DEFAULT_MAX_BONDS);
++ max_bonds = BOND_DEFAULT_MAX_BONDS;
++ }
++
++ if (miimon < 0) {
++ pr_warning("Warning: miimon module parameter (%d), not in range 0-%d, so it was reset to 0\n",
++ miimon, INT_MAX);
++ miimon = 0;
++ }
++
++ if (updelay < 0) {
++ pr_warning("Warning: updelay module parameter (%d), not in range 0-%d, so it was reset to 0\n",
++ updelay, INT_MAX);
++ updelay = 0;
++ }
++
++ if (downdelay < 0) {
++ pr_warning("Warning: downdelay module parameter (%d), not in range 0-%d, so it was reset to 0\n",
++ downdelay, INT_MAX);
++ downdelay = 0;
++ }
++
++ if ((use_carrier != 0) && (use_carrier != 1)) {
++ pr_warning("Warning: use_carrier module parameter (%d), not of valid value (0/1), so it was set to 1\n",
++ use_carrier);
++ use_carrier = 1;
++ }
++
++ if (num_peer_notif < 0 || num_peer_notif > 255) {
++ pr_warning("Warning: num_grat_arp/num_unsol_na (%d) not in range 0-255 so it was reset to 1\n",
++ num_peer_notif);
++ num_peer_notif = 1;
++ }
++
++ /* reset values for 802.3ad/TLB/ALB */
++ if (BOND_NO_USES_ARP(bond_mode)) {
++ if (!miimon) {
++ pr_warning("Warning: miimon must be specified, otherwise bonding will not detect link failure, speed and duplex which are essential for 802.3ad operation\n");
++ pr_warning("Forcing miimon to 100msec\n");
++ miimon = BOND_DEFAULT_MIIMON;
++ }
++ }
++
++ if (tx_queues < 1 || tx_queues > 255) {
++ pr_warning("Warning: tx_queues (%d) should be between "
++ "1 and 255, resetting to %d\n",
++ tx_queues, BOND_DEFAULT_TX_QUEUES);
++ tx_queues = BOND_DEFAULT_TX_QUEUES;
++ }
++
++ if ((all_slaves_active != 0) && (all_slaves_active != 1)) {
++ pr_warning("Warning: all_slaves_active module parameter (%d), "
++ "not of valid value (0/1), so it was set to "
++ "0\n", all_slaves_active);
++ all_slaves_active = 0;
++ }
++
++ if (resend_igmp < 0 || resend_igmp > 255) {
++ pr_warning("Warning: resend_igmp (%d) should be between "
++ "0 and 255, resetting to %d\n",
++ resend_igmp, BOND_DEFAULT_RESEND_IGMP);
++ resend_igmp = BOND_DEFAULT_RESEND_IGMP;
++ }
++
++ bond_opt_initval(&newval, packets_per_slave);
++ if (!bond_opt_parse(bond_opt_get(BOND_OPT_PACKETS_PER_SLAVE), &newval)) {
++ pr_warn("Warning: packets_per_slave (%d) should be between 0 and %u resetting to 1\n",
++ packets_per_slave, USHRT_MAX);
++ packets_per_slave = 1;
++ }
++
++ if (bond_mode == BOND_MODE_ALB) {
++ pr_notice("In ALB mode you might experience client disconnections upon reconnection of a link if the bonding module updelay parameter (%d msec) is incompatible with the forwarding delay time of the switch\n",
++ updelay);
++ }
++
++ if (!miimon) {
++ if (updelay || downdelay) {
++ /* just warn the user the up/down delay will have
++ * no effect since miimon is zero...
++ */
++ pr_warning("Warning: miimon module parameter not set and updelay (%d) or downdelay (%d) module parameter is set; updelay and downdelay have no effect unless miimon is set\n",
++ updelay, downdelay);
++ }
++ } else {
++ /* don't allow arp monitoring */
++ if (arp_interval) {
++ pr_warning("Warning: miimon (%d) and arp_interval (%d) can't be used simultaneously, disabling ARP monitoring\n",
++ miimon, arp_interval);
++ arp_interval = 0;
++ }
++
++ if ((updelay % miimon) != 0) {
++ pr_warning("Warning: updelay (%d) is not a multiple of miimon (%d), updelay rounded to %d ms\n",
++ updelay, miimon,
++ (updelay / miimon) * miimon);
++ }
++
++ updelay /= miimon;
++
++ if ((downdelay % miimon) != 0) {
++ pr_warning("Warning: downdelay (%d) is not a multiple of miimon (%d), downdelay rounded to %d ms\n",
++ downdelay, miimon,
++ (downdelay / miimon) * miimon);
++ }
++
++ downdelay /= miimon;
++ }
++
++ if (arp_interval < 0) {
++ pr_warning("Warning: arp_interval module parameter (%d) , not in range 0-%d, so it was reset to 0\n",
++ arp_interval, INT_MAX);
++ arp_interval = 0;
++ }
++
++ for (arp_ip_count = 0, i = 0;
++ (arp_ip_count < BOND_MAX_ARP_TARGETS) && arp_ip_target[i]; i++) {
++ /* not complete check, but should be good enough to
++ catch mistakes */
++ __be32 ip;
++ if (!in4_pton(arp_ip_target[i], -1, (u8 *)&ip, -1, NULL) ||
++ IS_IP_TARGET_UNUSABLE_ADDRESS(ip)) {
++ pr_warning("Warning: bad arp_ip_target module parameter (%s), ARP monitoring will not be performed\n",
++ arp_ip_target[i]);
++ arp_interval = 0;
++ } else {
++ if (bond_get_targets_ip(arp_target, ip) == -1)
++ arp_target[arp_ip_count++] = ip;
++ else
++ pr_warning("Warning: duplicate address %pI4 in arp_ip_target, skipping\n",
++ &ip);
++ }
++ }
++
++ if (arp_interval && !arp_ip_count) {
++ /* don't allow arping if no arp_ip_target given... */
++ pr_warning("Warning: arp_interval module parameter (%d) specified without providing an arp_ip_target parameter, arp_interval was reset to 0\n",
++ arp_interval);
++ arp_interval = 0;
++ }
++
++ if (arp_validate) {
++ if (bond_mode != BOND_MODE_ACTIVEBACKUP) {
++ pr_err("arp_validate only supported in active-backup mode\n");
++ return -EINVAL;
++ }
++ if (!arp_interval) {
++ pr_err("arp_validate requires arp_interval\n");
++ return -EINVAL;
++ }
++
++ bond_opt_initstr(&newval, arp_validate);
++ valptr = bond_opt_parse(bond_opt_get(BOND_OPT_ARP_VALIDATE),
++ &newval);
++ if (!valptr) {
++ pr_err("Error: invalid arp_validate \"%s\"\n",
++ arp_validate);
++ return -EINVAL;
++ }
++ arp_validate_value = valptr->value;
++ } else {
++ arp_validate_value = 0;
++ }
++
++ arp_all_targets_value = 0;
++ if (arp_all_targets) {
++ bond_opt_initstr(&newval, arp_all_targets);
++ valptr = bond_opt_parse(bond_opt_get(BOND_OPT_ARP_ALL_TARGETS),
++ &newval);
++ if (!valptr) {
++ pr_err("Error: invalid arp_all_targets_value \"%s\"\n",
++ arp_all_targets);
++ arp_all_targets_value = 0;
++ } else {
++ arp_all_targets_value = valptr->value;
++ }
++ }
++
++ if (miimon) {
++ pr_info("MII link monitoring set to %d ms\n", miimon);
++ } else if (arp_interval) {
++ valptr = bond_opt_get_val(BOND_OPT_ARP_VALIDATE,
++ arp_validate_value);
++ pr_info("ARP monitoring set to %d ms, validate %s, with %d target(s):",
++ arp_interval, valptr->string, arp_ip_count);
++
++ for (i = 0; i < arp_ip_count; i++)
++ pr_info(" %s", arp_ip_target[i]);
++
++ pr_info("\n");
++
++ } else if (max_bonds) {
++ /* miimon and arp_interval not set, we need one so things
++ * work as expected, see bonding.txt for details
++ */
++ pr_debug("Warning: either miimon or arp_interval and arp_ip_target module parameters must be specified, otherwise bonding will not detect link failures! see bonding.txt for details.\n");
++ }
++
++ if (primary && !USES_PRIMARY(bond_mode)) {
++ /* currently, using a primary only makes sense
++ * in active backup, TLB or ALB modes
++ */
++ pr_warning("Warning: %s primary device specified but has no effect in %s mode\n",
++ primary, bond_mode_name(bond_mode));
++ primary = NULL;
++ }
++
++ if (primary && primary_reselect) {
++ bond_opt_initstr(&newval, primary_reselect);
++ valptr = bond_opt_parse(bond_opt_get(BOND_OPT_PRIMARY_RESELECT),
++ &newval);
++ if (!valptr) {
++ pr_err("Error: Invalid primary_reselect \"%s\"\n",
++ primary_reselect);
++ return -EINVAL;
++ }
++ primary_reselect_value = valptr->value;
++ } else {
++ primary_reselect_value = BOND_PRI_RESELECT_ALWAYS;
++ }
++
++ if (fail_over_mac) {
++ bond_opt_initstr(&newval, fail_over_mac);
++ valptr = bond_opt_parse(bond_opt_get(BOND_OPT_FAIL_OVER_MAC),
++ &newval);
++ if (!valptr) {
++ pr_err("Error: invalid fail_over_mac \"%s\"\n",
++ fail_over_mac);
++ return -EINVAL;
++ }
++ fail_over_mac_value = valptr->value;
++ if (bond_mode != BOND_MODE_ACTIVEBACKUP)
++ pr_warning("Warning: fail_over_mac only affects active-backup mode.\n");
++ } else {
++ fail_over_mac_value = BOND_FOM_NONE;
++ }
++
++ if (lp_interval == 0) {
++ pr_warning("Warning: ip_interval must be between 1 and %d, so it was reset to %d\n",
++ INT_MAX, BOND_ALB_DEFAULT_LP_INTERVAL);
++ lp_interval = BOND_ALB_DEFAULT_LP_INTERVAL;
++ }
++
++ /* fill params struct with the proper values */
++ params->mode = bond_mode;
++ params->xmit_policy = xmit_hashtype;
++ params->miimon = miimon;
++ params->num_peer_notif = num_peer_notif;
++ params->arp_interval = arp_interval;
++ params->arp_validate = arp_validate_value;
++ params->arp_all_targets = arp_all_targets_value;
++ params->updelay = updelay;
++ params->downdelay = downdelay;
++ params->use_carrier = use_carrier;
++ params->lacp_fast = lacp_fast;
++ params->primary[0] = 0;
++ params->primary_reselect = primary_reselect_value;
++ params->fail_over_mac = fail_over_mac_value;
++ params->tx_queues = tx_queues;
++ params->all_slaves_active = all_slaves_active;
++ params->resend_igmp = resend_igmp;
++ params->min_links = min_links;
++ params->lp_interval = lp_interval;
++ params->packets_per_slave = packets_per_slave;
++ if (packets_per_slave > 0) {
++ params->reciprocal_packets_per_slave =
++ reciprocal_value(packets_per_slave);
++ } else {
++ /* reciprocal_packets_per_slave is unused if
++ * packets_per_slave is 0 or 1, just initialize it
++ */
++ params->reciprocal_packets_per_slave =
++ (struct reciprocal_value) { 0 };
++ }
++
++ if (primary) {
++ strncpy(params->primary, primary, IFNAMSIZ);
++ params->primary[IFNAMSIZ - 1] = 0;
++ }
++
++ memcpy(params->arp_targets, arp_target, sizeof(arp_target));
++
++ return 0;
++}
++
++static struct lock_class_key bonding_netdev_xmit_lock_key;
++static struct lock_class_key bonding_netdev_addr_lock_key;
++static struct lock_class_key bonding_tx_busylock_key;
++
++static void bond_set_lockdep_class_one(struct net_device *dev,
++ struct netdev_queue *txq,
++ void *_unused)
++{
++ lockdep_set_class(&txq->_xmit_lock,
++ &bonding_netdev_xmit_lock_key);
++}
++
++static void bond_set_lockdep_class(struct net_device *dev)
++{
++ lockdep_set_class(&dev->addr_list_lock,
++ &bonding_netdev_addr_lock_key);
++ netdev_for_each_tx_queue(dev, bond_set_lockdep_class_one, NULL);
++ dev->qdisc_tx_busylock = &bonding_tx_busylock_key;
++}
++
++/*
++ * Called from registration process
++ */
++static int bond_init(struct net_device *bond_dev)
++{
++ struct bonding *bond = netdev_priv(bond_dev);
++ struct bond_net *bn = net_generic(dev_net(bond_dev), bond_net_id);
++ struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
++
++ pr_debug("Begin bond_init for %s\n", bond_dev->name);
++
++ /*
++ * Initialize locks that may be required during
++ * en/deslave operations. All of the bond_open work
++ * (of which this is part) should really be moved to
++ * a phase prior to dev_open
++ */
++ spin_lock_init(&(bond_info->tx_hashtbl_lock));
++ spin_lock_init(&(bond_info->rx_hashtbl_lock));
++
++ bond->wq = create_singlethread_workqueue(bond_dev->name);
++ if (!bond->wq)
++ return -ENOMEM;
++
++ bond_set_lockdep_class(bond_dev);
++
++ list_add_tail(&bond->bond_list, &bn->dev_list);
++
++ bond_prepare_sysfs_group(bond);
++
++ bond_debug_register(bond);
++
++ /* Ensure valid dev_addr */
++ if (is_zero_ether_addr(bond_dev->dev_addr) &&
++ bond_dev->addr_assign_type == NET_ADDR_PERM)
++ eth_hw_addr_random(bond_dev);
++
++ return 0;
++}
++
++unsigned int bond_get_num_tx_queues(void)
++{
++ return tx_queues;
++}
++
++/* Create a new bond based on the specified name and bonding parameters.
++ * If name is NULL, obtain a suitable "bond%d" name for us.
++ * Caller must NOT hold rtnl_lock; we need to release it here before we
++ * set up our sysfs entries.
++ */
++int bond_create(struct net *net, const char *name)
++{
++ struct net_device *bond_dev;
++ int res;
++
++ rtnl_lock();
++
++ bond_dev = alloc_netdev_mq(sizeof(struct bonding),
++ name ? name : "bond%d",
++ bond_setup, tx_queues);
++ if (!bond_dev) {
++ pr_err("%s: eek! can't alloc netdev!\n", name);
++ rtnl_unlock();
++ return -ENOMEM;
++ }
++
++ dev_net_set(bond_dev, net);
++ bond_dev->rtnl_link_ops = &bond_link_ops;
++
++ res = register_netdevice(bond_dev);
++
++ netif_carrier_off(bond_dev);
++
++ rtnl_unlock();
++ if (res < 0)
++ bond_destructor(bond_dev);
++ return res;
++}
++
++static int __net_init bond_net_init(struct net *net)
++{
++ struct bond_net *bn = net_generic(net, bond_net_id);
++
++ bn->net = net;
++ INIT_LIST_HEAD(&bn->dev_list);
++
++ bond_create_proc_dir(bn);
++ bond_create_sysfs(bn);
++
++ return 0;
++}
++
++static void __net_exit bond_net_exit(struct net *net)
++{
++ struct bond_net *bn = net_generic(net, bond_net_id);
++ struct bonding *bond, *tmp_bond;
++ LIST_HEAD(list);
++
++ bond_destroy_sysfs(bn);
++ bond_destroy_proc_dir(bn);
++
++ /* Kill off any bonds created after unregistering bond rtnl ops */
++ rtnl_lock();
++ list_for_each_entry_safe(bond, tmp_bond, &bn->dev_list, bond_list)
++ unregister_netdevice_queue(bond->dev, &list);
++ unregister_netdevice_many(&list);
++ rtnl_unlock();
++}
++
++static struct pernet_operations bond_net_ops = {
++ .init = bond_net_init,
++ .exit = bond_net_exit,
++ .id = &bond_net_id,
++ .size = sizeof(struct bond_net),
++};
++
++static int __init bonding_init(void)
++{
++ int i;
++ int res;
++
++ pr_info("%s", bond_version);
++
++ res = bond_check_params(&bonding_defaults);
++ if (res)
++ goto out;
++
++ res = register_pernet_subsys(&bond_net_ops);
++ if (res)
++ goto out;
++
++ res = bond_netlink_init();
++ if (res)
++ goto err_link;
++
++ bond_create_debugfs();
++
++ for (i = 0; i < max_bonds; i++) {
++ res = bond_create(&init_net, NULL);
++ if (res)
++ goto err;
++ }
++
++ register_netdevice_notifier(&bond_netdev_notifier);
++out:
++ return res;
++err:
++ bond_destroy_debugfs();
++ bond_netlink_fini();
++err_link:
++ unregister_pernet_subsys(&bond_net_ops);
++ goto out;
++
++}
++
++static void __exit bonding_exit(void)
++{
++ unregister_netdevice_notifier(&bond_netdev_notifier);
++
++ bond_destroy_debugfs();
++
++ bond_netlink_fini();
++ unregister_pernet_subsys(&bond_net_ops);
++
++#ifdef CONFIG_NET_POLL_CONTROLLER
++ /*
++ * Make sure we don't have an imbalance on our netpoll blocking
++ */
++ WARN_ON(atomic_read(&netpoll_block_tx));
++#endif
++}
++
++module_init(bonding_init);
++module_exit(bonding_exit);
++MODULE_LICENSE("GPL");
++MODULE_VERSION(DRV_VERSION);
++MODULE_DESCRIPTION(DRV_DESCRIPTION ", v" DRV_VERSION);
++MODULE_AUTHOR("Thomas Davis, tadavis@lbl.gov and many others");
+diff -Nur linux-3.14.36/drivers/net/can/flexcan.c linux-openelec/drivers/net/can/flexcan.c
+--- linux-3.14.36/drivers/net/can/flexcan.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/net/can/flexcan.c 2015-05-06 12:05:42.000000000 -0500
+@@ -125,7 +125,8 @@
+ FLEXCAN_ESR_BOFF_INT | FLEXCAN_ESR_ERR_INT)
+
+ /* FLEXCAN interrupt flag register (IFLAG) bits */
+-#define FLEXCAN_TX_BUF_ID 8
++#define FLEXCAN_RESERVED_BUF_ID 8
++#define FLEXCAN_TX_BUF_ID 13
+ #define FLEXCAN_IFLAG_BUF(x) BIT(x)
+ #define FLEXCAN_IFLAG_RX_FIFO_OVERFLOW BIT(7)
+ #define FLEXCAN_IFLAG_RX_FIFO_WARN BIT(6)
+@@ -162,6 +163,7 @@
+ */
+ #define FLEXCAN_HAS_V10_FEATURES BIT(1) /* For core version >= 10 */
+ #define FLEXCAN_HAS_BROKEN_ERR_STATE BIT(2) /* [TR]WRN_INT not connected */
++#define FLEXCAN_HAS_ERR005829 BIT(3) /* have errata ERR005829 */
+
+ /* Structure of the message buffer */
+ struct flexcan_mb {
+@@ -221,7 +223,7 @@
+ };
+ static struct flexcan_devtype_data fsl_imx28_devtype_data;
+ static struct flexcan_devtype_data fsl_imx6q_devtype_data = {
+- .features = FLEXCAN_HAS_V10_FEATURES,
++ .features = FLEXCAN_HAS_V10_FEATURES | FLEXCAN_HAS_ERR005829,
+ };
+
+ static const struct can_bittiming_const flexcan_bittiming_const = {
+@@ -428,6 +430,11 @@
+ flexcan_write(can_id, &regs->cantxfg[FLEXCAN_TX_BUF_ID].can_id);
+ flexcan_write(ctrl, &regs->cantxfg[FLEXCAN_TX_BUF_ID].can_ctrl);
+
++ if (priv->devtype_data->features & FLEXCAN_HAS_ERR005829) {
++ writel(0x0, &regs->cantxfg[FLEXCAN_RESERVED_BUF_ID].can_ctrl);
++ writel(0x0, &regs->cantxfg[FLEXCAN_RESERVED_BUF_ID].can_ctrl);
++ }
++
+ return NETDEV_TX_OK;
+ }
+
+diff -Nur linux-3.14.36/drivers/net/ethernet/adi/bfin_mac.c linux-openelec/drivers/net/ethernet/adi/bfin_mac.c
+--- linux-3.14.36/drivers/net/ethernet/adi/bfin_mac.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/net/ethernet/adi/bfin_mac.c 2015-05-06 12:05:42.000000000 -0500
+@@ -1040,6 +1040,7 @@
+ .n_alarm = 0,
+ .n_ext_ts = 0,
+ .n_per_out = 0,
++ .n_pins = 0,
+ .pps = 0,
+ .adjfreq = bfin_ptp_adjfreq,
+ .adjtime = bfin_ptp_adjtime,
+diff -Nur linux-3.14.36/drivers/net/ethernet/broadcom/tg3.c linux-openelec/drivers/net/ethernet/broadcom/tg3.c
+--- linux-3.14.36/drivers/net/ethernet/broadcom/tg3.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/net/ethernet/broadcom/tg3.c 2015-07-24 18:03:29.396842002 -0500
+@@ -6322,6 +6322,7 @@
+ .n_alarm = 0,
+ .n_ext_ts = 0,
+ .n_per_out = 1,
++ .n_pins = 0,
+ .pps = 0,
+ .adjfreq = tg3_ptp_adjfreq,
+ .adjtime = tg3_ptp_adjtime,
+diff -Nur linux-3.14.36/drivers/net/ethernet/broadcom/tg3.c.orig linux-openelec/drivers/net/ethernet/broadcom/tg3.c.orig
+--- linux-3.14.36/drivers/net/ethernet/broadcom/tg3.c.orig 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/net/ethernet/broadcom/tg3.c.orig 2015-07-24 18:03:29.228842002 -0500
+@@ -0,0 +1,18193 @@
++/*
++ * tg3.c: Broadcom Tigon3 ethernet driver.
++ *
++ * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
++ * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
++ * Copyright (C) 2004 Sun Microsystems Inc.
++ * Copyright (C) 2005-2013 Broadcom Corporation.
++ *
++ * Firmware is:
++ * Derived from proprietary unpublished source code,
++ * Copyright (C) 2000-2003 Broadcom Corporation.
++ *
++ * Permission is hereby granted for the distribution of this firmware
++ * data in hexadecimal or equivalent format, provided this copyright
++ * notice is accompanying it.
++ */
++
++
++#include <linux/module.h>
++#include <linux/moduleparam.h>
++#include <linux/stringify.h>
++#include <linux/kernel.h>
++#include <linux/types.h>
++#include <linux/compiler.h>
++#include <linux/slab.h>
++#include <linux/delay.h>
++#include <linux/in.h>
++#include <linux/interrupt.h>
++#include <linux/ioport.h>
++#include <linux/pci.h>
++#include <linux/netdevice.h>
++#include <linux/etherdevice.h>
++#include <linux/skbuff.h>
++#include <linux/ethtool.h>
++#include <linux/mdio.h>
++#include <linux/mii.h>
++#include <linux/phy.h>
++#include <linux/brcmphy.h>
++#include <linux/if.h>
++#include <linux/if_vlan.h>
++#include <linux/ip.h>
++#include <linux/tcp.h>
++#include <linux/workqueue.h>
++#include <linux/prefetch.h>
++#include <linux/dma-mapping.h>
++#include <linux/firmware.h>
++#include <linux/ssb/ssb_driver_gige.h>
++#include <linux/hwmon.h>
++#include <linux/hwmon-sysfs.h>
++
++#include <net/checksum.h>
++#include <net/ip.h>
++
++#include <linux/io.h>
++#include <asm/byteorder.h>
++#include <linux/uaccess.h>
++
++#include <uapi/linux/net_tstamp.h>
++#include <linux/ptp_clock_kernel.h>
++
++#ifdef CONFIG_SPARC
++#include <asm/idprom.h>
++#include <asm/prom.h>
++#endif
++
++#define BAR_0 0
++#define BAR_2 2
++
++#include "tg3.h"
++
++/* Functions & macros to verify TG3_FLAGS types */
++
++static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
++{
++ return test_bit(flag, bits);
++}
++
++static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
++{
++ set_bit(flag, bits);
++}
++
++static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
++{
++ clear_bit(flag, bits);
++}
++
++#define tg3_flag(tp, flag) \
++ _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
++#define tg3_flag_set(tp, flag) \
++ _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
++#define tg3_flag_clear(tp, flag) \
++ _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
++
++#define DRV_MODULE_NAME "tg3"
++#define TG3_MAJ_NUM 3
++#define TG3_MIN_NUM 136
++#define DRV_MODULE_VERSION \
++ __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
++#define DRV_MODULE_RELDATE "Jan 03, 2014"
++
++#define RESET_KIND_SHUTDOWN 0
++#define RESET_KIND_INIT 1
++#define RESET_KIND_SUSPEND 2
++
++#define TG3_DEF_RX_MODE 0
++#define TG3_DEF_TX_MODE 0
++#define TG3_DEF_MSG_ENABLE \
++ (NETIF_MSG_DRV | \
++ NETIF_MSG_PROBE | \
++ NETIF_MSG_LINK | \
++ NETIF_MSG_TIMER | \
++ NETIF_MSG_IFDOWN | \
++ NETIF_MSG_IFUP | \
++ NETIF_MSG_RX_ERR | \
++ NETIF_MSG_TX_ERR)
++
++#define TG3_GRC_LCLCTL_PWRSW_DELAY 100
++
++/* length of time before we decide the hardware is borked,
++ * and dev->tx_timeout() should be called to fix the problem
++ */
++
++#define TG3_TX_TIMEOUT (5 * HZ)
++
++/* hardware minimum and maximum for a single frame's data payload */
++#define TG3_MIN_MTU 60
++#define TG3_MAX_MTU(tp) \
++ (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
++
++/* These numbers seem to be hard coded in the NIC firmware somehow.
++ * You can't change the ring sizes, but you can change where you place
++ * them in the NIC onboard memory.
++ */
++#define TG3_RX_STD_RING_SIZE(tp) \
++ (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
++ TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
++#define TG3_DEF_RX_RING_PENDING 200
++#define TG3_RX_JMB_RING_SIZE(tp) \
++ (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
++ TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
++#define TG3_DEF_RX_JUMBO_RING_PENDING 100
++
++/* Do not place this n-ring entries value into the tp struct itself,
++ * we really want to expose these constants to GCC so that modulo et
++ * al. operations are done with shifts and masks instead of with
++ * hw multiply/modulo instructions. Another solution would be to
++ * replace things like '% foo' with '& (foo - 1)'.
++ */
++
++#define TG3_TX_RING_SIZE 512
++#define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
++
++#define TG3_RX_STD_RING_BYTES(tp) \
++ (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
++#define TG3_RX_JMB_RING_BYTES(tp) \
++ (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
++#define TG3_RX_RCB_RING_BYTES(tp) \
++ (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
++#define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
++ TG3_TX_RING_SIZE)
++#define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
++
++#define TG3_DMA_BYTE_ENAB 64
++
++#define TG3_RX_STD_DMA_SZ 1536
++#define TG3_RX_JMB_DMA_SZ 9046
++
++#define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
++
++#define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
++#define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
++
++#define TG3_RX_STD_BUFF_RING_SIZE(tp) \
++ (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
++
++#define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
++ (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
++
++/* Due to a hardware bug, the 5701 can only DMA to memory addresses
++ * that are at least dword aligned when used in PCIX mode. The driver
++ * works around this bug by double copying the packet. This workaround
++ * is built into the normal double copy length check for efficiency.
++ *
++ * However, the double copy is only necessary on those architectures
++ * where unaligned memory accesses are inefficient. For those architectures
++ * where unaligned memory accesses incur little penalty, we can reintegrate
++ * the 5701 in the normal rx path. Doing so saves a device structure
++ * dereference by hardcoding the double copy threshold in place.
++ */
++#define TG3_RX_COPY_THRESHOLD 256
++#if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
++ #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
++#else
++ #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
++#endif
++
++#if (NET_IP_ALIGN != 0)
++#define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
++#else
++#define TG3_RX_OFFSET(tp) (NET_SKB_PAD)
++#endif
++
++/* minimum number of free TX descriptors required to wake up TX process */
++#define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
++#define TG3_TX_BD_DMA_MAX_2K 2048
++#define TG3_TX_BD_DMA_MAX_4K 4096
++
++#define TG3_RAW_IP_ALIGN 2
++
++#define TG3_MAX_UCAST_ADDR(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 3)
++#define TG3_UCAST_ADDR_IDX(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 1)
++
++#define TG3_FW_UPDATE_TIMEOUT_SEC 5
++#define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
++
++#define FIRMWARE_TG3 "tigon/tg3.bin"
++#define FIRMWARE_TG357766 "tigon/tg357766.bin"
++#define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
++#define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
++
++static char version[] =
++ DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
++
++MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
++MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
++MODULE_LICENSE("GPL");
++MODULE_VERSION(DRV_MODULE_VERSION);
++MODULE_FIRMWARE(FIRMWARE_TG3);
++MODULE_FIRMWARE(FIRMWARE_TG3TSO);
++MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
++
++static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
++module_param(tg3_debug, int, 0);
++MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
++
++#define TG3_DRV_DATA_FLAG_10_100_ONLY 0x0001
++#define TG3_DRV_DATA_FLAG_5705_10_100 0x0002
++
++static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
++ .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
++ TG3_DRV_DATA_FLAG_5705_10_100},
++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
++ .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
++ TG3_DRV_DATA_FLAG_5705_10_100},
++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
++ .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
++ TG3_DRV_DATA_FLAG_5705_10_100},
++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
++ .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
++ .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
++ {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
++ PCI_VENDOR_ID_LENOVO,
++ TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
++ .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
++ .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
++ {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
++ PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
++ .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
++ {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
++ PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
++ .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
++ .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
++ .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
++ .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57764)},
++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57767)},
++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57787)},
++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57782)},
++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57786)},
++ {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
++ {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
++ {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
++ {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
++ {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
++ {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
++ {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
++ {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
++ {}
++};
++
++MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
++
++static const struct {
++ const char string[ETH_GSTRING_LEN];
++} ethtool_stats_keys[] = {
++ { "rx_octets" },
++ { "rx_fragments" },
++ { "rx_ucast_packets" },
++ { "rx_mcast_packets" },
++ { "rx_bcast_packets" },
++ { "rx_fcs_errors" },
++ { "rx_align_errors" },
++ { "rx_xon_pause_rcvd" },
++ { "rx_xoff_pause_rcvd" },
++ { "rx_mac_ctrl_rcvd" },
++ { "rx_xoff_entered" },
++ { "rx_frame_too_long_errors" },
++ { "rx_jabbers" },
++ { "rx_undersize_packets" },
++ { "rx_in_length_errors" },
++ { "rx_out_length_errors" },
++ { "rx_64_or_less_octet_packets" },
++ { "rx_65_to_127_octet_packets" },
++ { "rx_128_to_255_octet_packets" },
++ { "rx_256_to_511_octet_packets" },
++ { "rx_512_to_1023_octet_packets" },
++ { "rx_1024_to_1522_octet_packets" },
++ { "rx_1523_to_2047_octet_packets" },
++ { "rx_2048_to_4095_octet_packets" },
++ { "rx_4096_to_8191_octet_packets" },
++ { "rx_8192_to_9022_octet_packets" },
++
++ { "tx_octets" },
++ { "tx_collisions" },
++
++ { "tx_xon_sent" },
++ { "tx_xoff_sent" },
++ { "tx_flow_control" },
++ { "tx_mac_errors" },
++ { "tx_single_collisions" },
++ { "tx_mult_collisions" },
++ { "tx_deferred" },
++ { "tx_excessive_collisions" },
++ { "tx_late_collisions" },
++ { "tx_collide_2times" },
++ { "tx_collide_3times" },
++ { "tx_collide_4times" },
++ { "tx_collide_5times" },
++ { "tx_collide_6times" },
++ { "tx_collide_7times" },
++ { "tx_collide_8times" },
++ { "tx_collide_9times" },
++ { "tx_collide_10times" },
++ { "tx_collide_11times" },
++ { "tx_collide_12times" },
++ { "tx_collide_13times" },
++ { "tx_collide_14times" },
++ { "tx_collide_15times" },
++ { "tx_ucast_packets" },
++ { "tx_mcast_packets" },
++ { "tx_bcast_packets" },
++ { "tx_carrier_sense_errors" },
++ { "tx_discards" },
++ { "tx_errors" },
++
++ { "dma_writeq_full" },
++ { "dma_write_prioq_full" },
++ { "rxbds_empty" },
++ { "rx_discards" },
++ { "rx_errors" },
++ { "rx_threshold_hit" },
++
++ { "dma_readq_full" },
++ { "dma_read_prioq_full" },
++ { "tx_comp_queue_full" },
++
++ { "ring_set_send_prod_index" },
++ { "ring_status_update" },
++ { "nic_irqs" },
++ { "nic_avoided_irqs" },
++ { "nic_tx_threshold_hit" },
++
++ { "mbuf_lwm_thresh_hit" },
++};
++
++#define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
++#define TG3_NVRAM_TEST 0
++#define TG3_LINK_TEST 1
++#define TG3_REGISTER_TEST 2
++#define TG3_MEMORY_TEST 3
++#define TG3_MAC_LOOPB_TEST 4
++#define TG3_PHY_LOOPB_TEST 5
++#define TG3_EXT_LOOPB_TEST 6
++#define TG3_INTERRUPT_TEST 7
++
++
++static const struct {
++ const char string[ETH_GSTRING_LEN];
++} ethtool_test_keys[] = {
++ [TG3_NVRAM_TEST] = { "nvram test (online) " },
++ [TG3_LINK_TEST] = { "link test (online) " },
++ [TG3_REGISTER_TEST] = { "register test (offline)" },
++ [TG3_MEMORY_TEST] = { "memory test (offline)" },
++ [TG3_MAC_LOOPB_TEST] = { "mac loopback test (offline)" },
++ [TG3_PHY_LOOPB_TEST] = { "phy loopback test (offline)" },
++ [TG3_EXT_LOOPB_TEST] = { "ext loopback test (offline)" },
++ [TG3_INTERRUPT_TEST] = { "interrupt test (offline)" },
++};
++
++#define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
++
++
++static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
++{
++ writel(val, tp->regs + off);
++}
++
++static u32 tg3_read32(struct tg3 *tp, u32 off)
++{
++ return readl(tp->regs + off);
++}
++
++static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
++{
++ writel(val, tp->aperegs + off);
++}
++
++static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
++{
++ return readl(tp->aperegs + off);
++}
++
++static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
++{
++ unsigned long flags;
++
++ spin_lock_irqsave(&tp->indirect_lock, flags);
++ pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
++ pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
++ spin_unlock_irqrestore(&tp->indirect_lock, flags);
++}
++
++static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
++{
++ writel(val, tp->regs + off);
++ readl(tp->regs + off);
++}
++
++static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
++{
++ unsigned long flags;
++ u32 val;
++
++ spin_lock_irqsave(&tp->indirect_lock, flags);
++ pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
++ pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
++ spin_unlock_irqrestore(&tp->indirect_lock, flags);
++ return val;
++}
++
++static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
++{
++ unsigned long flags;
++
++ if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
++ pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
++ TG3_64BIT_REG_LOW, val);
++ return;
++ }
++ if (off == TG3_RX_STD_PROD_IDX_REG) {
++ pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
++ TG3_64BIT_REG_LOW, val);
++ return;
++ }
++
++ spin_lock_irqsave(&tp->indirect_lock, flags);
++ pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
++ pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
++ spin_unlock_irqrestore(&tp->indirect_lock, flags);
++
++ /* In indirect mode when disabling interrupts, we also need
++ * to clear the interrupt bit in the GRC local ctrl register.
++ */
++ if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
++ (val == 0x1)) {
++ pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
++ tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
++ }
++}
++
++static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
++{
++ unsigned long flags;
++ u32 val;
++
++ spin_lock_irqsave(&tp->indirect_lock, flags);
++ pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
++ pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
++ spin_unlock_irqrestore(&tp->indirect_lock, flags);
++ return val;
++}
++
++/* usec_wait specifies the wait time in usec when writing to certain registers
++ * where it is unsafe to read back the register without some delay.
++ * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
++ * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
++ */
++static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
++{
++ if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
++ /* Non-posted methods */
++ tp->write32(tp, off, val);
++ else {
++ /* Posted method */
++ tg3_write32(tp, off, val);
++ if (usec_wait)
++ udelay(usec_wait);
++ tp->read32(tp, off);
++ }
++ /* Wait again after the read for the posted method to guarantee that
++ * the wait time is met.
++ */
++ if (usec_wait)
++ udelay(usec_wait);
++}
++
++static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
++{
++ tp->write32_mbox(tp, off, val);
++ if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
++ (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
++ !tg3_flag(tp, ICH_WORKAROUND)))
++ tp->read32_mbox(tp, off);
++}
++
++static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
++{
++ void __iomem *mbox = tp->regs + off;
++ writel(val, mbox);
++ if (tg3_flag(tp, TXD_MBOX_HWBUG))
++ writel(val, mbox);
++ if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
++ tg3_flag(tp, FLUSH_POSTED_WRITES))
++ readl(mbox);
++}
++
++static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
++{
++ return readl(tp->regs + off + GRCMBOX_BASE);
++}
++
++static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
++{
++ writel(val, tp->regs + off + GRCMBOX_BASE);
++}
++
++#define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
++#define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
++#define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
++#define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
++#define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
++
++#define tw32(reg, val) tp->write32(tp, reg, val)
++#define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
++#define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
++#define tr32(reg) tp->read32(tp, reg)
++
++static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
++{
++ unsigned long flags;
++
++ if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
++ (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
++ return;
++
++ spin_lock_irqsave(&tp->indirect_lock, flags);
++ if (tg3_flag(tp, SRAM_USE_CONFIG)) {
++ pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
++ pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
++
++ /* Always leave this as zero. */
++ pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
++ } else {
++ tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
++ tw32_f(TG3PCI_MEM_WIN_DATA, val);
++
++ /* Always leave this as zero. */
++ tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
++ }
++ spin_unlock_irqrestore(&tp->indirect_lock, flags);
++}
++
++static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
++{
++ unsigned long flags;
++
++ if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
++ (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
++ *val = 0;
++ return;
++ }
++
++ spin_lock_irqsave(&tp->indirect_lock, flags);
++ if (tg3_flag(tp, SRAM_USE_CONFIG)) {
++ pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
++ pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
++
++ /* Always leave this as zero. */
++ pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
++ } else {
++ tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
++ *val = tr32(TG3PCI_MEM_WIN_DATA);
++
++ /* Always leave this as zero. */
++ tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
++ }
++ spin_unlock_irqrestore(&tp->indirect_lock, flags);
++}
++
++static void tg3_ape_lock_init(struct tg3 *tp)
++{
++ int i;
++ u32 regbase, bit;
++
++ if (tg3_asic_rev(tp) == ASIC_REV_5761)
++ regbase = TG3_APE_LOCK_GRANT;
++ else
++ regbase = TG3_APE_PER_LOCK_GRANT;
++
++ /* Make sure the driver hasn't any stale locks. */
++ for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
++ switch (i) {
++ case TG3_APE_LOCK_PHY0:
++ case TG3_APE_LOCK_PHY1:
++ case TG3_APE_LOCK_PHY2:
++ case TG3_APE_LOCK_PHY3:
++ bit = APE_LOCK_GRANT_DRIVER;
++ break;
++ default:
++ if (!tp->pci_fn)
++ bit = APE_LOCK_GRANT_DRIVER;
++ else
++ bit = 1 << tp->pci_fn;
++ }
++ tg3_ape_write32(tp, regbase + 4 * i, bit);
++ }
++
++}
++
++static int tg3_ape_lock(struct tg3 *tp, int locknum)
++{
++ int i, off;
++ int ret = 0;
++ u32 status, req, gnt, bit;
++
++ if (!tg3_flag(tp, ENABLE_APE))
++ return 0;
++
++ switch (locknum) {
++ case TG3_APE_LOCK_GPIO:
++ if (tg3_asic_rev(tp) == ASIC_REV_5761)
++ return 0;
++ case TG3_APE_LOCK_GRC:
++ case TG3_APE_LOCK_MEM:
++ if (!tp->pci_fn)
++ bit = APE_LOCK_REQ_DRIVER;
++ else
++ bit = 1 << tp->pci_fn;
++ break;
++ case TG3_APE_LOCK_PHY0:
++ case TG3_APE_LOCK_PHY1:
++ case TG3_APE_LOCK_PHY2:
++ case TG3_APE_LOCK_PHY3:
++ bit = APE_LOCK_REQ_DRIVER;
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ if (tg3_asic_rev(tp) == ASIC_REV_5761) {
++ req = TG3_APE_LOCK_REQ;
++ gnt = TG3_APE_LOCK_GRANT;
++ } else {
++ req = TG3_APE_PER_LOCK_REQ;
++ gnt = TG3_APE_PER_LOCK_GRANT;
++ }
++
++ off = 4 * locknum;
++
++ tg3_ape_write32(tp, req + off, bit);
++
++ /* Wait for up to 1 millisecond to acquire lock. */
++ for (i = 0; i < 100; i++) {
++ status = tg3_ape_read32(tp, gnt + off);
++ if (status == bit)
++ break;
++ if (pci_channel_offline(tp->pdev))
++ break;
++
++ udelay(10);
++ }
++
++ if (status != bit) {
++ /* Revoke the lock request. */
++ tg3_ape_write32(tp, gnt + off, bit);
++ ret = -EBUSY;
++ }
++
++ return ret;
++}
++
++static void tg3_ape_unlock(struct tg3 *tp, int locknum)
++{
++ u32 gnt, bit;
++
++ if (!tg3_flag(tp, ENABLE_APE))
++ return;
++
++ switch (locknum) {
++ case TG3_APE_LOCK_GPIO:
++ if (tg3_asic_rev(tp) == ASIC_REV_5761)
++ return;
++ case TG3_APE_LOCK_GRC:
++ case TG3_APE_LOCK_MEM:
++ if (!tp->pci_fn)
++ bit = APE_LOCK_GRANT_DRIVER;
++ else
++ bit = 1 << tp->pci_fn;
++ break;
++ case TG3_APE_LOCK_PHY0:
++ case TG3_APE_LOCK_PHY1:
++ case TG3_APE_LOCK_PHY2:
++ case TG3_APE_LOCK_PHY3:
++ bit = APE_LOCK_GRANT_DRIVER;
++ break;
++ default:
++ return;
++ }
++
++ if (tg3_asic_rev(tp) == ASIC_REV_5761)
++ gnt = TG3_APE_LOCK_GRANT;
++ else
++ gnt = TG3_APE_PER_LOCK_GRANT;
++
++ tg3_ape_write32(tp, gnt + 4 * locknum, bit);
++}
++
++static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
++{
++ u32 apedata;
++
++ while (timeout_us) {
++ if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
++ return -EBUSY;
++
++ apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
++ if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
++ break;
++
++ tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
++
++ udelay(10);
++ timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
++ }
++
++ return timeout_us ? 0 : -EBUSY;
++}
++
++static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
++{
++ u32 i, apedata;
++
++ for (i = 0; i < timeout_us / 10; i++) {
++ apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
++
++ if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
++ break;
++
++ udelay(10);
++ }
++
++ return i == timeout_us / 10;
++}
++
++static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
++ u32 len)
++{
++ int err;
++ u32 i, bufoff, msgoff, maxlen, apedata;
++
++ if (!tg3_flag(tp, APE_HAS_NCSI))
++ return 0;
++
++ apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
++ if (apedata != APE_SEG_SIG_MAGIC)
++ return -ENODEV;
++
++ apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
++ if (!(apedata & APE_FW_STATUS_READY))
++ return -EAGAIN;
++
++ bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
++ TG3_APE_SHMEM_BASE;
++ msgoff = bufoff + 2 * sizeof(u32);
++ maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
++
++ while (len) {
++ u32 length;
++
++ /* Cap xfer sizes to scratchpad limits. */
++ length = (len > maxlen) ? maxlen : len;
++ len -= length;
++
++ apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
++ if (!(apedata & APE_FW_STATUS_READY))
++ return -EAGAIN;
++
++ /* Wait for up to 1 msec for APE to service previous event. */
++ err = tg3_ape_event_lock(tp, 1000);
++ if (err)
++ return err;
++
++ apedata = APE_EVENT_STATUS_DRIVER_EVNT |
++ APE_EVENT_STATUS_SCRTCHPD_READ |
++ APE_EVENT_STATUS_EVENT_PENDING;
++ tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
++
++ tg3_ape_write32(tp, bufoff, base_off);
++ tg3_ape_write32(tp, bufoff + sizeof(u32), length);
++
++ tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
++ tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
++
++ base_off += length;
++
++ if (tg3_ape_wait_for_event(tp, 30000))
++ return -EAGAIN;
++
++ for (i = 0; length; i += 4, length -= 4) {
++ u32 val = tg3_ape_read32(tp, msgoff + i);
++ memcpy(data, &val, sizeof(u32));
++ data++;
++ }
++ }
++
++ return 0;
++}
++
++static int tg3_ape_send_event(struct tg3 *tp, u32 event)
++{
++ int err;
++ u32 apedata;
++
++ apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
++ if (apedata != APE_SEG_SIG_MAGIC)
++ return -EAGAIN;
++
++ apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
++ if (!(apedata & APE_FW_STATUS_READY))
++ return -EAGAIN;
++
++ /* Wait for up to 1 millisecond for APE to service previous event. */
++ err = tg3_ape_event_lock(tp, 1000);
++ if (err)
++ return err;
++
++ tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
++ event | APE_EVENT_STATUS_EVENT_PENDING);
++
++ tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
++ tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
++
++ return 0;
++}
++
++static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
++{
++ u32 event;
++ u32 apedata;
++
++ if (!tg3_flag(tp, ENABLE_APE))
++ return;
++
++ switch (kind) {
++ case RESET_KIND_INIT:
++ tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
++ APE_HOST_SEG_SIG_MAGIC);
++ tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
++ APE_HOST_SEG_LEN_MAGIC);
++ apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
++ tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
++ tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
++ APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
++ tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
++ APE_HOST_BEHAV_NO_PHYLOCK);
++ tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
++ TG3_APE_HOST_DRVR_STATE_START);
++
++ event = APE_EVENT_STATUS_STATE_START;
++ break;
++ case RESET_KIND_SHUTDOWN:
++ /* With the interface we are currently using,
++ * APE does not track driver state. Wiping
++ * out the HOST SEGMENT SIGNATURE forces
++ * the APE to assume OS absent status.
++ */
++ tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
++
++ if (device_may_wakeup(&tp->pdev->dev) &&
++ tg3_flag(tp, WOL_ENABLE)) {
++ tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
++ TG3_APE_HOST_WOL_SPEED_AUTO);
++ apedata = TG3_APE_HOST_DRVR_STATE_WOL;
++ } else
++ apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
++
++ tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
++
++ event = APE_EVENT_STATUS_STATE_UNLOAD;
++ break;
++ default:
++ return;
++ }
++
++ event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
++
++ tg3_ape_send_event(tp, event);
++}
++
++static void tg3_disable_ints(struct tg3 *tp)
++{
++ int i;
++
++ tw32(TG3PCI_MISC_HOST_CTRL,
++ (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
++ for (i = 0; i < tp->irq_max; i++)
++ tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
++}
++
++static void tg3_enable_ints(struct tg3 *tp)
++{
++ int i;
++
++ tp->irq_sync = 0;
++ wmb();
++
++ tw32(TG3PCI_MISC_HOST_CTRL,
++ (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
++
++ tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
++ for (i = 0; i < tp->irq_cnt; i++) {
++ struct tg3_napi *tnapi = &tp->napi[i];
++
++ tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
++ if (tg3_flag(tp, 1SHOT_MSI))
++ tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
++
++ tp->coal_now |= tnapi->coal_now;
++ }
++
++ /* Force an initial interrupt */
++ if (!tg3_flag(tp, TAGGED_STATUS) &&
++ (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
++ tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
++ else
++ tw32(HOSTCC_MODE, tp->coal_now);
++
++ tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
++}
++
++static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
++{
++ struct tg3 *tp = tnapi->tp;
++ struct tg3_hw_status *sblk = tnapi->hw_status;
++ unsigned int work_exists = 0;
++
++ /* check for phy events */
++ if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
++ if (sblk->status & SD_STATUS_LINK_CHG)
++ work_exists = 1;
++ }
++
++ /* check for TX work to do */
++ if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
++ work_exists = 1;
++
++ /* check for RX work to do */
++ if (tnapi->rx_rcb_prod_idx &&
++ *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
++ work_exists = 1;
++
++ return work_exists;
++}
++
++/* tg3_int_reenable
++ * similar to tg3_enable_ints, but it accurately determines whether there
++ * is new work pending and can return without flushing the PIO write
++ * which reenables interrupts
++ */
++static void tg3_int_reenable(struct tg3_napi *tnapi)
++{
++ struct tg3 *tp = tnapi->tp;
++
++ tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
++ mmiowb();
++
++ /* When doing tagged status, this work check is unnecessary.
++ * The last_tag we write above tells the chip which piece of
++ * work we've completed.
++ */
++ if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
++ tw32(HOSTCC_MODE, tp->coalesce_mode |
++ HOSTCC_MODE_ENABLE | tnapi->coal_now);
++}
++
++static void tg3_switch_clocks(struct tg3 *tp)
++{
++ u32 clock_ctrl;
++ u32 orig_clock_ctrl;
++
++ if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
++ return;
++
++ clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
++
++ orig_clock_ctrl = clock_ctrl;
++ clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
++ CLOCK_CTRL_CLKRUN_OENABLE |
++ 0x1f);
++ tp->pci_clock_ctrl = clock_ctrl;
++
++ if (tg3_flag(tp, 5705_PLUS)) {
++ if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
++ tw32_wait_f(TG3PCI_CLOCK_CTRL,
++ clock_ctrl | CLOCK_CTRL_625_CORE, 40);
++ }
++ } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
++ tw32_wait_f(TG3PCI_CLOCK_CTRL,
++ clock_ctrl |
++ (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
++ 40);
++ tw32_wait_f(TG3PCI_CLOCK_CTRL,
++ clock_ctrl | (CLOCK_CTRL_ALTCLK),
++ 40);
++ }
++ tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
++}
++
++#define PHY_BUSY_LOOPS 5000
++
++static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
++ u32 *val)
++{
++ u32 frame_val;
++ unsigned int loops;
++ int ret;
++
++ if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
++ tw32_f(MAC_MI_MODE,
++ (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
++ udelay(80);
++ }
++
++ tg3_ape_lock(tp, tp->phy_ape_lock);
++
++ *val = 0x0;
++
++ frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
++ MI_COM_PHY_ADDR_MASK);
++ frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
++ MI_COM_REG_ADDR_MASK);
++ frame_val |= (MI_COM_CMD_READ | MI_COM_START);
++
++ tw32_f(MAC_MI_COM, frame_val);
++
++ loops = PHY_BUSY_LOOPS;
++ while (loops != 0) {
++ udelay(10);
++ frame_val = tr32(MAC_MI_COM);
++
++ if ((frame_val & MI_COM_BUSY) == 0) {
++ udelay(5);
++ frame_val = tr32(MAC_MI_COM);
++ break;
++ }
++ loops -= 1;
++ }
++
++ ret = -EBUSY;
++ if (loops != 0) {
++ *val = frame_val & MI_COM_DATA_MASK;
++ ret = 0;
++ }
++
++ if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
++ tw32_f(MAC_MI_MODE, tp->mi_mode);
++ udelay(80);
++ }
++
++ tg3_ape_unlock(tp, tp->phy_ape_lock);
++
++ return ret;
++}
++
++static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
++{
++ return __tg3_readphy(tp, tp->phy_addr, reg, val);
++}
++
++static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
++ u32 val)
++{
++ u32 frame_val;
++ unsigned int loops;
++ int ret;
++
++ if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
++ (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
++ return 0;
++
++ if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
++ tw32_f(MAC_MI_MODE,
++ (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
++ udelay(80);
++ }
++
++ tg3_ape_lock(tp, tp->phy_ape_lock);
++
++ frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
++ MI_COM_PHY_ADDR_MASK);
++ frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
++ MI_COM_REG_ADDR_MASK);
++ frame_val |= (val & MI_COM_DATA_MASK);
++ frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
++
++ tw32_f(MAC_MI_COM, frame_val);
++
++ loops = PHY_BUSY_LOOPS;
++ while (loops != 0) {
++ udelay(10);
++ frame_val = tr32(MAC_MI_COM);
++ if ((frame_val & MI_COM_BUSY) == 0) {
++ udelay(5);
++ frame_val = tr32(MAC_MI_COM);
++ break;
++ }
++ loops -= 1;
++ }
++
++ ret = -EBUSY;
++ if (loops != 0)
++ ret = 0;
++
++ if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
++ tw32_f(MAC_MI_MODE, tp->mi_mode);
++ udelay(80);
++ }
++
++ tg3_ape_unlock(tp, tp->phy_ape_lock);
++
++ return ret;
++}
++
++static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
++{
++ return __tg3_writephy(tp, tp->phy_addr, reg, val);
++}
++
++static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
++{
++ int err;
++
++ err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
++ if (err)
++ goto done;
++
++ err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
++ if (err)
++ goto done;
++
++ err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
++ MII_TG3_MMD_CTRL_DATA_NOINC | devad);
++ if (err)
++ goto done;
++
++ err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
++
++done:
++ return err;
++}
++
++static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
++{
++ int err;
++
++ err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
++ if (err)
++ goto done;
++
++ err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
++ if (err)
++ goto done;
++
++ err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
++ MII_TG3_MMD_CTRL_DATA_NOINC | devad);
++ if (err)
++ goto done;
++
++ err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
++
++done:
++ return err;
++}
++
++static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
++{
++ int err;
++
++ err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
++ if (!err)
++ err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
++
++ return err;
++}
++
++static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
++{
++ int err;
++
++ err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
++ if (!err)
++ err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
++
++ return err;
++}
++
++static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
++{
++ int err;
++
++ err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
++ (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
++ MII_TG3_AUXCTL_SHDWSEL_MISC);
++ if (!err)
++ err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
++
++ return err;
++}
++
++static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
++{
++ if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
++ set |= MII_TG3_AUXCTL_MISC_WREN;
++
++ return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
++}
++
++static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
++{
++ u32 val;
++ int err;
++
++ err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
++
++ if (err)
++ return err;
++
++ if (enable)
++ val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
++ else
++ val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
++
++ err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
++ val | MII_TG3_AUXCTL_ACTL_TX_6DB);
++
++ return err;
++}
++
++static int tg3_phy_shdw_write(struct tg3 *tp, int reg, u32 val)
++{
++ return tg3_writephy(tp, MII_TG3_MISC_SHDW,
++ reg | val | MII_TG3_MISC_SHDW_WREN);
++}
++
++static int tg3_bmcr_reset(struct tg3 *tp)
++{
++ u32 phy_control;
++ int limit, err;
++
++ /* OK, reset it, and poll the BMCR_RESET bit until it
++ * clears or we time out.
++ */
++ phy_control = BMCR_RESET;
++ err = tg3_writephy(tp, MII_BMCR, phy_control);
++ if (err != 0)
++ return -EBUSY;
++
++ limit = 5000;
++ while (limit--) {
++ err = tg3_readphy(tp, MII_BMCR, &phy_control);
++ if (err != 0)
++ return -EBUSY;
++
++ if ((phy_control & BMCR_RESET) == 0) {
++ udelay(40);
++ break;
++ }
++ udelay(10);
++ }
++ if (limit < 0)
++ return -EBUSY;
++
++ return 0;
++}
++
++static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
++{
++ struct tg3 *tp = bp->priv;
++ u32 val;
++
++ spin_lock_bh(&tp->lock);
++
++ if (__tg3_readphy(tp, mii_id, reg, &val))
++ val = -EIO;
++
++ spin_unlock_bh(&tp->lock);
++
++ return val;
++}
++
++static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
++{
++ struct tg3 *tp = bp->priv;
++ u32 ret = 0;
++
++ spin_lock_bh(&tp->lock);
++
++ if (__tg3_writephy(tp, mii_id, reg, val))
++ ret = -EIO;
++
++ spin_unlock_bh(&tp->lock);
++
++ return ret;
++}
++
++static int tg3_mdio_reset(struct mii_bus *bp)
++{
++ return 0;
++}
++
++static void tg3_mdio_config_5785(struct tg3 *tp)
++{
++ u32 val;
++ struct phy_device *phydev;
++
++ phydev = tp->mdio_bus->phy_map[tp->phy_addr];
++ switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
++ case PHY_ID_BCM50610:
++ case PHY_ID_BCM50610M:
++ val = MAC_PHYCFG2_50610_LED_MODES;
++ break;
++ case PHY_ID_BCMAC131:
++ val = MAC_PHYCFG2_AC131_LED_MODES;
++ break;
++ case PHY_ID_RTL8211C:
++ val = MAC_PHYCFG2_RTL8211C_LED_MODES;
++ break;
++ case PHY_ID_RTL8201E:
++ val = MAC_PHYCFG2_RTL8201E_LED_MODES;
++ break;
++ default:
++ return;
++ }
++
++ if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
++ tw32(MAC_PHYCFG2, val);
++
++ val = tr32(MAC_PHYCFG1);
++ val &= ~(MAC_PHYCFG1_RGMII_INT |
++ MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
++ val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
++ tw32(MAC_PHYCFG1, val);
++
++ return;
++ }
++
++ if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
++ val |= MAC_PHYCFG2_EMODE_MASK_MASK |
++ MAC_PHYCFG2_FMODE_MASK_MASK |
++ MAC_PHYCFG2_GMODE_MASK_MASK |
++ MAC_PHYCFG2_ACT_MASK_MASK |
++ MAC_PHYCFG2_QUAL_MASK_MASK |
++ MAC_PHYCFG2_INBAND_ENABLE;
++
++ tw32(MAC_PHYCFG2, val);
++
++ val = tr32(MAC_PHYCFG1);
++ val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
++ MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
++ if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
++ if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
++ val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
++ if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
++ val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
++ }
++ val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
++ MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
++ tw32(MAC_PHYCFG1, val);
++
++ val = tr32(MAC_EXT_RGMII_MODE);
++ val &= ~(MAC_RGMII_MODE_RX_INT_B |
++ MAC_RGMII_MODE_RX_QUALITY |
++ MAC_RGMII_MODE_RX_ACTIVITY |
++ MAC_RGMII_MODE_RX_ENG_DET |
++ MAC_RGMII_MODE_TX_ENABLE |
++ MAC_RGMII_MODE_TX_LOWPWR |
++ MAC_RGMII_MODE_TX_RESET);
++ if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
++ if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
++ val |= MAC_RGMII_MODE_RX_INT_B |
++ MAC_RGMII_MODE_RX_QUALITY |
++ MAC_RGMII_MODE_RX_ACTIVITY |
++ MAC_RGMII_MODE_RX_ENG_DET;
++ if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
++ val |= MAC_RGMII_MODE_TX_ENABLE |
++ MAC_RGMII_MODE_TX_LOWPWR |
++ MAC_RGMII_MODE_TX_RESET;
++ }
++ tw32(MAC_EXT_RGMII_MODE, val);
++}
++
++static void tg3_mdio_start(struct tg3 *tp)
++{
++ tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
++ tw32_f(MAC_MI_MODE, tp->mi_mode);
++ udelay(80);
++
++ if (tg3_flag(tp, MDIOBUS_INITED) &&
++ tg3_asic_rev(tp) == ASIC_REV_5785)
++ tg3_mdio_config_5785(tp);
++}
++
++static int tg3_mdio_init(struct tg3 *tp)
++{
++ int i;
++ u32 reg;
++ struct phy_device *phydev;
++
++ if (tg3_flag(tp, 5717_PLUS)) {
++ u32 is_serdes;
++
++ tp->phy_addr = tp->pci_fn + 1;
++
++ if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
++ is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
++ else
++ is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
++ TG3_CPMU_PHY_STRAP_IS_SERDES;
++ if (is_serdes)
++ tp->phy_addr += 7;
++ } else if (tg3_flag(tp, IS_SSB_CORE) && tg3_flag(tp, ROBOSWITCH)) {
++ int addr;
++
++ addr = ssb_gige_get_phyaddr(tp->pdev);
++ if (addr < 0)
++ return addr;
++ tp->phy_addr = addr;
++ } else
++ tp->phy_addr = TG3_PHY_MII_ADDR;
++
++ tg3_mdio_start(tp);
++
++ if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
++ return 0;
++
++ tp->mdio_bus = mdiobus_alloc();
++ if (tp->mdio_bus == NULL)
++ return -ENOMEM;
++
++ tp->mdio_bus->name = "tg3 mdio bus";
++ snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
++ (tp->pdev->bus->number << 8) | tp->pdev->devfn);
++ tp->mdio_bus->priv = tp;
++ tp->mdio_bus->parent = &tp->pdev->dev;
++ tp->mdio_bus->read = &tg3_mdio_read;
++ tp->mdio_bus->write = &tg3_mdio_write;
++ tp->mdio_bus->reset = &tg3_mdio_reset;
++ tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr);
++ tp->mdio_bus->irq = &tp->mdio_irq[0];
++
++ for (i = 0; i < PHY_MAX_ADDR; i++)
++ tp->mdio_bus->irq[i] = PHY_POLL;
++
++ /* The bus registration will look for all the PHYs on the mdio bus.
++ * Unfortunately, it does not ensure the PHY is powered up before
++ * accessing the PHY ID registers. A chip reset is the
++ * quickest way to bring the device back to an operational state..
++ */
++ if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
++ tg3_bmcr_reset(tp);
++
++ i = mdiobus_register(tp->mdio_bus);
++ if (i) {
++ dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
++ mdiobus_free(tp->mdio_bus);
++ return i;
++ }
++
++ phydev = tp->mdio_bus->phy_map[tp->phy_addr];
++
++ if (!phydev || !phydev->drv) {
++ dev_warn(&tp->pdev->dev, "No PHY devices\n");
++ mdiobus_unregister(tp->mdio_bus);
++ mdiobus_free(tp->mdio_bus);
++ return -ENODEV;
++ }
++
++ switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
++ case PHY_ID_BCM57780:
++ phydev->interface = PHY_INTERFACE_MODE_GMII;
++ phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
++ break;
++ case PHY_ID_BCM50610:
++ case PHY_ID_BCM50610M:
++ phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
++ PHY_BRCM_RX_REFCLK_UNUSED |
++ PHY_BRCM_DIS_TXCRXC_NOENRGY |
++ PHY_BRCM_AUTO_PWRDWN_ENABLE;
++ if (tg3_flag(tp, RGMII_INBAND_DISABLE))
++ phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
++ if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
++ phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
++ if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
++ phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
++ /* fallthru */
++ case PHY_ID_RTL8211C:
++ phydev->interface = PHY_INTERFACE_MODE_RGMII;
++ break;
++ case PHY_ID_RTL8201E:
++ case PHY_ID_BCMAC131:
++ phydev->interface = PHY_INTERFACE_MODE_MII;
++ phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
++ tp->phy_flags |= TG3_PHYFLG_IS_FET;
++ break;
++ }
++
++ tg3_flag_set(tp, MDIOBUS_INITED);
++
++ if (tg3_asic_rev(tp) == ASIC_REV_5785)
++ tg3_mdio_config_5785(tp);
++
++ return 0;
++}
++
++static void tg3_mdio_fini(struct tg3 *tp)
++{
++ if (tg3_flag(tp, MDIOBUS_INITED)) {
++ tg3_flag_clear(tp, MDIOBUS_INITED);
++ mdiobus_unregister(tp->mdio_bus);
++ mdiobus_free(tp->mdio_bus);
++ }
++}
++
++/* tp->lock is held. */
++static inline void tg3_generate_fw_event(struct tg3 *tp)
++{
++ u32 val;
++
++ val = tr32(GRC_RX_CPU_EVENT);
++ val |= GRC_RX_CPU_DRIVER_EVENT;
++ tw32_f(GRC_RX_CPU_EVENT, val);
++
++ tp->last_event_jiffies = jiffies;
++}
++
++#define TG3_FW_EVENT_TIMEOUT_USEC 2500
++
++/* tp->lock is held. */
++static void tg3_wait_for_event_ack(struct tg3 *tp)
++{
++ int i;
++ unsigned int delay_cnt;
++ long time_remain;
++
++ /* If enough time has passed, no wait is necessary. */
++ time_remain = (long)(tp->last_event_jiffies + 1 +
++ usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
++ (long)jiffies;
++ if (time_remain < 0)
++ return;
++
++ /* Check if we can shorten the wait time. */
++ delay_cnt = jiffies_to_usecs(time_remain);
++ if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
++ delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
++ delay_cnt = (delay_cnt >> 3) + 1;
++
++ for (i = 0; i < delay_cnt; i++) {
++ if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
++ break;
++ if (pci_channel_offline(tp->pdev))
++ break;
++
++ udelay(8);
++ }
++}
++
++/* tp->lock is held. */
++static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
++{
++ u32 reg, val;
++
++ val = 0;
++ if (!tg3_readphy(tp, MII_BMCR, &reg))
++ val = reg << 16;
++ if (!tg3_readphy(tp, MII_BMSR, &reg))
++ val |= (reg & 0xffff);
++ *data++ = val;
++
++ val = 0;
++ if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
++ val = reg << 16;
++ if (!tg3_readphy(tp, MII_LPA, &reg))
++ val |= (reg & 0xffff);
++ *data++ = val;
++
++ val = 0;
++ if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
++ if (!tg3_readphy(tp, MII_CTRL1000, &reg))
++ val = reg << 16;
++ if (!tg3_readphy(tp, MII_STAT1000, &reg))
++ val |= (reg & 0xffff);
++ }
++ *data++ = val;
++
++ if (!tg3_readphy(tp, MII_PHYADDR, &reg))
++ val = reg << 16;
++ else
++ val = 0;
++ *data++ = val;
++}
++
++/* tp->lock is held. */
++static void tg3_ump_link_report(struct tg3 *tp)
++{
++ u32 data[4];
++
++ if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
++ return;
++
++ tg3_phy_gather_ump_data(tp, data);
++
++ tg3_wait_for_event_ack(tp);
++
++ tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
++ tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
++ tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
++ tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
++ tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
++ tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
++
++ tg3_generate_fw_event(tp);
++}
++
++/* tp->lock is held. */
++static void tg3_stop_fw(struct tg3 *tp)
++{
++ if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
++ /* Wait for RX cpu to ACK the previous event. */
++ tg3_wait_for_event_ack(tp);
++
++ tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
++
++ tg3_generate_fw_event(tp);
++
++ /* Wait for RX cpu to ACK this event. */
++ tg3_wait_for_event_ack(tp);
++ }
++}
++
++/* tp->lock is held. */
++static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
++{
++ tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
++ NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
++
++ if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
++ switch (kind) {
++ case RESET_KIND_INIT:
++ tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
++ DRV_STATE_START);
++ break;
++
++ case RESET_KIND_SHUTDOWN:
++ tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
++ DRV_STATE_UNLOAD);
++ break;
++
++ case RESET_KIND_SUSPEND:
++ tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
++ DRV_STATE_SUSPEND);
++ break;
++
++ default:
++ break;
++ }
++ }
++}
++
++/* tp->lock is held. */
++static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
++{
++ if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
++ switch (kind) {
++ case RESET_KIND_INIT:
++ tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
++ DRV_STATE_START_DONE);
++ break;
++
++ case RESET_KIND_SHUTDOWN:
++ tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
++ DRV_STATE_UNLOAD_DONE);
++ break;
++
++ default:
++ break;
++ }
++ }
++}
++
++/* tp->lock is held. */
++static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
++{
++ if (tg3_flag(tp, ENABLE_ASF)) {
++ switch (kind) {
++ case RESET_KIND_INIT:
++ tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
++ DRV_STATE_START);
++ break;
++
++ case RESET_KIND_SHUTDOWN:
++ tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
++ DRV_STATE_UNLOAD);
++ break;
++
++ case RESET_KIND_SUSPEND:
++ tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
++ DRV_STATE_SUSPEND);
++ break;
++
++ default:
++ break;
++ }
++ }
++}
++
++static int tg3_poll_fw(struct tg3 *tp)
++{
++ int i;
++ u32 val;
++
++ if (tg3_flag(tp, NO_FWARE_REPORTED))
++ return 0;
++
++ if (tg3_flag(tp, IS_SSB_CORE)) {
++ /* We don't use firmware. */
++ return 0;
++ }
++
++ if (tg3_asic_rev(tp) == ASIC_REV_5906) {
++ /* Wait up to 20ms for init done. */
++ for (i = 0; i < 200; i++) {
++ if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
++ return 0;
++ if (pci_channel_offline(tp->pdev))
++ return -ENODEV;
++
++ udelay(100);
++ }
++ return -ENODEV;
++ }
++
++ /* Wait for firmware initialization to complete. */
++ for (i = 0; i < 100000; i++) {
++ tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
++ if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
++ break;
++ if (pci_channel_offline(tp->pdev)) {
++ if (!tg3_flag(tp, NO_FWARE_REPORTED)) {
++ tg3_flag_set(tp, NO_FWARE_REPORTED);
++ netdev_info(tp->dev, "No firmware running\n");
++ }
++
++ break;
++ }
++
++ udelay(10);
++ }
++
++ /* Chip might not be fitted with firmware. Some Sun onboard
++ * parts are configured like that. So don't signal the timeout
++ * of the above loop as an error, but do report the lack of
++ * running firmware once.
++ */
++ if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
++ tg3_flag_set(tp, NO_FWARE_REPORTED);
++
++ netdev_info(tp->dev, "No firmware running\n");
++ }
++
++ if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
++ /* The 57765 A0 needs a little more
++ * time to do some important work.
++ */
++ mdelay(10);
++ }
++
++ return 0;
++}
++
++static void tg3_link_report(struct tg3 *tp)
++{
++ if (!netif_carrier_ok(tp->dev)) {
++ netif_info(tp, link, tp->dev, "Link is down\n");
++ tg3_ump_link_report(tp);
++ } else if (netif_msg_link(tp)) {
++ netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
++ (tp->link_config.active_speed == SPEED_1000 ?
++ 1000 :
++ (tp->link_config.active_speed == SPEED_100 ?
++ 100 : 10)),
++ (tp->link_config.active_duplex == DUPLEX_FULL ?
++ "full" : "half"));
++
++ netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
++ (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
++ "on" : "off",
++ (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
++ "on" : "off");
++
++ if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
++ netdev_info(tp->dev, "EEE is %s\n",
++ tp->setlpicnt ? "enabled" : "disabled");
++
++ tg3_ump_link_report(tp);
++ }
++
++ tp->link_up = netif_carrier_ok(tp->dev);
++}
++
++static u32 tg3_decode_flowctrl_1000T(u32 adv)
++{
++ u32 flowctrl = 0;
++
++ if (adv & ADVERTISE_PAUSE_CAP) {
++ flowctrl |= FLOW_CTRL_RX;
++ if (!(adv & ADVERTISE_PAUSE_ASYM))
++ flowctrl |= FLOW_CTRL_TX;
++ } else if (adv & ADVERTISE_PAUSE_ASYM)
++ flowctrl |= FLOW_CTRL_TX;
++
++ return flowctrl;
++}
++
++static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
++{
++ u16 miireg;
++
++ if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
++ miireg = ADVERTISE_1000XPAUSE;
++ else if (flow_ctrl & FLOW_CTRL_TX)
++ miireg = ADVERTISE_1000XPSE_ASYM;
++ else if (flow_ctrl & FLOW_CTRL_RX)
++ miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
++ else
++ miireg = 0;
++
++ return miireg;
++}
++
++static u32 tg3_decode_flowctrl_1000X(u32 adv)
++{
++ u32 flowctrl = 0;
++
++ if (adv & ADVERTISE_1000XPAUSE) {
++ flowctrl |= FLOW_CTRL_RX;
++ if (!(adv & ADVERTISE_1000XPSE_ASYM))
++ flowctrl |= FLOW_CTRL_TX;
++ } else if (adv & ADVERTISE_1000XPSE_ASYM)
++ flowctrl |= FLOW_CTRL_TX;
++
++ return flowctrl;
++}
++
++static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
++{
++ u8 cap = 0;
++
++ if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
++ cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
++ } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
++ if (lcladv & ADVERTISE_1000XPAUSE)
++ cap = FLOW_CTRL_RX;
++ if (rmtadv & ADVERTISE_1000XPAUSE)
++ cap = FLOW_CTRL_TX;
++ }
++
++ return cap;
++}
++
++static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
++{
++ u8 autoneg;
++ u8 flowctrl = 0;
++ u32 old_rx_mode = tp->rx_mode;
++ u32 old_tx_mode = tp->tx_mode;
++
++ if (tg3_flag(tp, USE_PHYLIB))
++ autoneg = tp->mdio_bus->phy_map[tp->phy_addr]->autoneg;
++ else
++ autoneg = tp->link_config.autoneg;
++
++ if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
++ if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
++ flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
++ else
++ flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
++ } else
++ flowctrl = tp->link_config.flowctrl;
++
++ tp->link_config.active_flowctrl = flowctrl;
++
++ if (flowctrl & FLOW_CTRL_RX)
++ tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
++ else
++ tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
++
++ if (old_rx_mode != tp->rx_mode)
++ tw32_f(MAC_RX_MODE, tp->rx_mode);
++
++ if (flowctrl & FLOW_CTRL_TX)
++ tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
++ else
++ tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
++
++ if (old_tx_mode != tp->tx_mode)
++ tw32_f(MAC_TX_MODE, tp->tx_mode);
++}
++
++static void tg3_adjust_link(struct net_device *dev)
++{
++ u8 oldflowctrl, linkmesg = 0;
++ u32 mac_mode, lcl_adv, rmt_adv;
++ struct tg3 *tp = netdev_priv(dev);
++ struct phy_device *phydev = tp->mdio_bus->phy_map[tp->phy_addr];
++
++ spin_lock_bh(&tp->lock);
++
++ mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
++ MAC_MODE_HALF_DUPLEX);
++
++ oldflowctrl = tp->link_config.active_flowctrl;
++
++ if (phydev->link) {
++ lcl_adv = 0;
++ rmt_adv = 0;
++
++ if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
++ mac_mode |= MAC_MODE_PORT_MODE_MII;
++ else if (phydev->speed == SPEED_1000 ||
++ tg3_asic_rev(tp) != ASIC_REV_5785)
++ mac_mode |= MAC_MODE_PORT_MODE_GMII;
++ else
++ mac_mode |= MAC_MODE_PORT_MODE_MII;
++
++ if (phydev->duplex == DUPLEX_HALF)
++ mac_mode |= MAC_MODE_HALF_DUPLEX;
++ else {
++ lcl_adv = mii_advertise_flowctrl(
++ tp->link_config.flowctrl);
++
++ if (phydev->pause)
++ rmt_adv = LPA_PAUSE_CAP;
++ if (phydev->asym_pause)
++ rmt_adv |= LPA_PAUSE_ASYM;
++ }
++
++ tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
++ } else
++ mac_mode |= MAC_MODE_PORT_MODE_GMII;
++
++ if (mac_mode != tp->mac_mode) {
++ tp->mac_mode = mac_mode;
++ tw32_f(MAC_MODE, tp->mac_mode);
++ udelay(40);
++ }
++
++ if (tg3_asic_rev(tp) == ASIC_REV_5785) {
++ if (phydev->speed == SPEED_10)
++ tw32(MAC_MI_STAT,
++ MAC_MI_STAT_10MBPS_MODE |
++ MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
++ else
++ tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
++ }
++
++ if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
++ tw32(MAC_TX_LENGTHS,
++ ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
++ (6 << TX_LENGTHS_IPG_SHIFT) |
++ (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
++ else
++ tw32(MAC_TX_LENGTHS,
++ ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
++ (6 << TX_LENGTHS_IPG_SHIFT) |
++ (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
++
++ if (phydev->link != tp->old_link ||
++ phydev->speed != tp->link_config.active_speed ||
++ phydev->duplex != tp->link_config.active_duplex ||
++ oldflowctrl != tp->link_config.active_flowctrl)
++ linkmesg = 1;
++
++ tp->old_link = phydev->link;
++ tp->link_config.active_speed = phydev->speed;
++ tp->link_config.active_duplex = phydev->duplex;
++
++ spin_unlock_bh(&tp->lock);
++
++ if (linkmesg)
++ tg3_link_report(tp);
++}
++
++static int tg3_phy_init(struct tg3 *tp)
++{
++ struct phy_device *phydev;
++
++ if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
++ return 0;
++
++ /* Bring the PHY back to a known state. */
++ tg3_bmcr_reset(tp);
++
++ phydev = tp->mdio_bus->phy_map[tp->phy_addr];
++
++ /* Attach the MAC to the PHY. */
++ phydev = phy_connect(tp->dev, dev_name(&phydev->dev),
++ tg3_adjust_link, phydev->interface);
++ if (IS_ERR(phydev)) {
++ dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
++ return PTR_ERR(phydev);
++ }
++
++ /* Mask with MAC supported features. */
++ switch (phydev->interface) {
++ case PHY_INTERFACE_MODE_GMII:
++ case PHY_INTERFACE_MODE_RGMII:
++ if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
++ phydev->supported &= (PHY_GBIT_FEATURES |
++ SUPPORTED_Pause |
++ SUPPORTED_Asym_Pause);
++ break;
++ }
++ /* fallthru */
++ case PHY_INTERFACE_MODE_MII:
++ phydev->supported &= (PHY_BASIC_FEATURES |
++ SUPPORTED_Pause |
++ SUPPORTED_Asym_Pause);
++ break;
++ default:
++ phy_disconnect(tp->mdio_bus->phy_map[tp->phy_addr]);
++ return -EINVAL;
++ }
++
++ tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
++
++ phydev->advertising = phydev->supported;
++
++ return 0;
++}
++
++static void tg3_phy_start(struct tg3 *tp)
++{
++ struct phy_device *phydev;
++
++ if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
++ return;
++
++ phydev = tp->mdio_bus->phy_map[tp->phy_addr];
++
++ if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
++ tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
++ phydev->speed = tp->link_config.speed;
++ phydev->duplex = tp->link_config.duplex;
++ phydev->autoneg = tp->link_config.autoneg;
++ phydev->advertising = tp->link_config.advertising;
++ }
++
++ phy_start(phydev);
++
++ phy_start_aneg(phydev);
++}
++
++static void tg3_phy_stop(struct tg3 *tp)
++{
++ if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
++ return;
++
++ phy_stop(tp->mdio_bus->phy_map[tp->phy_addr]);
++}
++
++static void tg3_phy_fini(struct tg3 *tp)
++{
++ if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
++ phy_disconnect(tp->mdio_bus->phy_map[tp->phy_addr]);
++ tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
++ }
++}
++
++static int tg3_phy_set_extloopbk(struct tg3 *tp)
++{
++ int err;
++ u32 val;
++
++ if (tp->phy_flags & TG3_PHYFLG_IS_FET)
++ return 0;
++
++ if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
++ /* Cannot do read-modify-write on 5401 */
++ err = tg3_phy_auxctl_write(tp,
++ MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
++ MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
++ 0x4c20);
++ goto done;
++ }
++
++ err = tg3_phy_auxctl_read(tp,
++ MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
++ if (err)
++ return err;
++
++ val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
++ err = tg3_phy_auxctl_write(tp,
++ MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
++
++done:
++ return err;
++}
++
++static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
++{
++ u32 phytest;
++
++ if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
++ u32 phy;
++
++ tg3_writephy(tp, MII_TG3_FET_TEST,
++ phytest | MII_TG3_FET_SHADOW_EN);
++ if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
++ if (enable)
++ phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
++ else
++ phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
++ tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
++ }
++ tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
++ }
++}
++
++static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
++{
++ u32 reg;
++
++ if (!tg3_flag(tp, 5705_PLUS) ||
++ (tg3_flag(tp, 5717_PLUS) &&
++ (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
++ return;
++
++ if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
++ tg3_phy_fet_toggle_apd(tp, enable);
++ return;
++ }
++
++ reg = MII_TG3_MISC_SHDW_SCR5_LPED |
++ MII_TG3_MISC_SHDW_SCR5_DLPTLM |
++ MII_TG3_MISC_SHDW_SCR5_SDTL |
++ MII_TG3_MISC_SHDW_SCR5_C125OE;
++ if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
++ reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
++
++ tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_SCR5_SEL, reg);
++
++
++ reg = MII_TG3_MISC_SHDW_APD_WKTM_84MS;
++ if (enable)
++ reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
++
++ tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_APD_SEL, reg);
++}
++
++static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
++{
++ u32 phy;
++
++ if (!tg3_flag(tp, 5705_PLUS) ||
++ (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
++ return;
++
++ if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
++ u32 ephy;
++
++ if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
++ u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
++
++ tg3_writephy(tp, MII_TG3_FET_TEST,
++ ephy | MII_TG3_FET_SHADOW_EN);
++ if (!tg3_readphy(tp, reg, &phy)) {
++ if (enable)
++ phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
++ else
++ phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
++ tg3_writephy(tp, reg, phy);
++ }
++ tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
++ }
++ } else {
++ int ret;
++
++ ret = tg3_phy_auxctl_read(tp,
++ MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
++ if (!ret) {
++ if (enable)
++ phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
++ else
++ phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
++ tg3_phy_auxctl_write(tp,
++ MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
++ }
++ }
++}
++
++static void tg3_phy_set_wirespeed(struct tg3 *tp)
++{
++ int ret;
++ u32 val;
++
++ if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
++ return;
++
++ ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
++ if (!ret)
++ tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
++ val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
++}
++
++static void tg3_phy_apply_otp(struct tg3 *tp)
++{
++ u32 otp, phy;
++
++ if (!tp->phy_otp)
++ return;
++
++ otp = tp->phy_otp;
++
++ if (tg3_phy_toggle_auxctl_smdsp(tp, true))
++ return;
++
++ phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
++ phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
++ tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
++
++ phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
++ ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
++ tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
++
++ phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
++ phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
++ tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
++
++ phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
++ tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
++
++ phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
++ tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
++
++ phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
++ ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
++ tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
++
++ tg3_phy_toggle_auxctl_smdsp(tp, false);
++}
++
++static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee)
++{
++ u32 val;
++ struct ethtool_eee *dest = &tp->eee;
++
++ if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
++ return;
++
++ if (eee)
++ dest = eee;
++
++ if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val))
++ return;
++
++ /* Pull eee_active */
++ if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
++ val == TG3_CL45_D7_EEERES_STAT_LP_100TX) {
++ dest->eee_active = 1;
++ } else
++ dest->eee_active = 0;
++
++ /* Pull lp advertised settings */
++ if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val))
++ return;
++ dest->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
++
++ /* Pull advertised and eee_enabled settings */
++ if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
++ return;
++ dest->eee_enabled = !!val;
++ dest->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
++
++ /* Pull tx_lpi_enabled */
++ val = tr32(TG3_CPMU_EEE_MODE);
++ dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX);
++
++ /* Pull lpi timer value */
++ dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff;
++}
++
++static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
++{
++ u32 val;
++
++ if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
++ return;
++
++ tp->setlpicnt = 0;
++
++ if (tp->link_config.autoneg == AUTONEG_ENABLE &&
++ current_link_up &&
++ tp->link_config.active_duplex == DUPLEX_FULL &&
++ (tp->link_config.active_speed == SPEED_100 ||
++ tp->link_config.active_speed == SPEED_1000)) {
++ u32 eeectl;
++
++ if (tp->link_config.active_speed == SPEED_1000)
++ eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
++ else
++ eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
++
++ tw32(TG3_CPMU_EEE_CTRL, eeectl);
++
++ tg3_eee_pull_config(tp, NULL);
++ if (tp->eee.eee_active)
++ tp->setlpicnt = 2;
++ }
++
++ if (!tp->setlpicnt) {
++ if (current_link_up &&
++ !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
++ tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
++ tg3_phy_toggle_auxctl_smdsp(tp, false);
++ }
++
++ val = tr32(TG3_CPMU_EEE_MODE);
++ tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
++ }
++}
++
++static void tg3_phy_eee_enable(struct tg3 *tp)
++{
++ u32 val;
++
++ if (tp->link_config.active_speed == SPEED_1000 &&
++ (tg3_asic_rev(tp) == ASIC_REV_5717 ||
++ tg3_asic_rev(tp) == ASIC_REV_5719 ||
++ tg3_flag(tp, 57765_CLASS)) &&
++ !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
++ val = MII_TG3_DSP_TAP26_ALNOKO |
++ MII_TG3_DSP_TAP26_RMRXSTO;
++ tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
++ tg3_phy_toggle_auxctl_smdsp(tp, false);
++ }
++
++ val = tr32(TG3_CPMU_EEE_MODE);
++ tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
++}
++
++static int tg3_wait_macro_done(struct tg3 *tp)
++{
++ int limit = 100;
++
++ while (limit--) {
++ u32 tmp32;
++
++ if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
++ if ((tmp32 & 0x1000) == 0)
++ break;
++ }
++ }
++ if (limit < 0)
++ return -EBUSY;
++
++ return 0;
++}
++
++static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
++{
++ static const u32 test_pat[4][6] = {
++ { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
++ { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
++ { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
++ { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
++ };
++ int chan;
++
++ for (chan = 0; chan < 4; chan++) {
++ int i;
++
++ tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
++ (chan * 0x2000) | 0x0200);
++ tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
++
++ for (i = 0; i < 6; i++)
++ tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
++ test_pat[chan][i]);
++
++ tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
++ if (tg3_wait_macro_done(tp)) {
++ *resetp = 1;
++ return -EBUSY;
++ }
++
++ tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
++ (chan * 0x2000) | 0x0200);
++ tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
++ if (tg3_wait_macro_done(tp)) {
++ *resetp = 1;
++ return -EBUSY;
++ }
++
++ tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
++ if (tg3_wait_macro_done(tp)) {
++ *resetp = 1;
++ return -EBUSY;
++ }
++
++ for (i = 0; i < 6; i += 2) {
++ u32 low, high;
++
++ if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
++ tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
++ tg3_wait_macro_done(tp)) {
++ *resetp = 1;
++ return -EBUSY;
++ }
++ low &= 0x7fff;
++ high &= 0x000f;
++ if (low != test_pat[chan][i] ||
++ high != test_pat[chan][i+1]) {
++ tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
++ tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
++ tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
++
++ return -EBUSY;
++ }
++ }
++ }
++
++ return 0;
++}
++
++static int tg3_phy_reset_chanpat(struct tg3 *tp)
++{
++ int chan;
++
++ for (chan = 0; chan < 4; chan++) {
++ int i;
++
++ tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
++ (chan * 0x2000) | 0x0200);
++ tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
++ for (i = 0; i < 6; i++)
++ tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
++ tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
++ if (tg3_wait_macro_done(tp))
++ return -EBUSY;
++ }
++
++ return 0;
++}
++
++static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
++{
++ u32 reg32, phy9_orig;
++ int retries, do_phy_reset, err;
++
++ retries = 10;
++ do_phy_reset = 1;
++ do {
++ if (do_phy_reset) {
++ err = tg3_bmcr_reset(tp);
++ if (err)
++ return err;
++ do_phy_reset = 0;
++ }
++
++ /* Disable transmitter and interrupt. */
++ if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
++ continue;
++
++ reg32 |= 0x3000;
++ tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
++
++ /* Set full-duplex, 1000 mbps. */
++ tg3_writephy(tp, MII_BMCR,
++ BMCR_FULLDPLX | BMCR_SPEED1000);
++
++ /* Set to master mode. */
++ if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
++ continue;
++
++ tg3_writephy(tp, MII_CTRL1000,
++ CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
++
++ err = tg3_phy_toggle_auxctl_smdsp(tp, true);
++ if (err)
++ return err;
++
++ /* Block the PHY control access. */
++ tg3_phydsp_write(tp, 0x8005, 0x0800);
++
++ err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
++ if (!err)
++ break;
++ } while (--retries);
++
++ err = tg3_phy_reset_chanpat(tp);
++ if (err)
++ return err;
++
++ tg3_phydsp_write(tp, 0x8005, 0x0000);
++
++ tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
++ tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
++
++ tg3_phy_toggle_auxctl_smdsp(tp, false);
++
++ tg3_writephy(tp, MII_CTRL1000, phy9_orig);
++
++ err = tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32);
++ if (err)
++ return err;
++
++ reg32 &= ~0x3000;
++ tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
++
++ return 0;
++}
++
++static void tg3_carrier_off(struct tg3 *tp)
++{
++ netif_carrier_off(tp->dev);
++ tp->link_up = false;
++}
++
++static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
++{
++ if (tg3_flag(tp, ENABLE_ASF))
++ netdev_warn(tp->dev,
++ "Management side-band traffic will be interrupted during phy settings change\n");
++}
++
++/* This will reset the tigon3 PHY if there is no valid
++ * link unless the FORCE argument is non-zero.
++ */
++static int tg3_phy_reset(struct tg3 *tp)
++{
++ u32 val, cpmuctrl;
++ int err;
++
++ if (tg3_asic_rev(tp) == ASIC_REV_5906) {
++ val = tr32(GRC_MISC_CFG);
++ tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
++ udelay(40);
++ }
++ err = tg3_readphy(tp, MII_BMSR, &val);
++ err |= tg3_readphy(tp, MII_BMSR, &val);
++ if (err != 0)
++ return -EBUSY;
++
++ if (netif_running(tp->dev) && tp->link_up) {
++ netif_carrier_off(tp->dev);
++ tg3_link_report(tp);
++ }
++
++ if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
++ tg3_asic_rev(tp) == ASIC_REV_5704 ||
++ tg3_asic_rev(tp) == ASIC_REV_5705) {
++ err = tg3_phy_reset_5703_4_5(tp);
++ if (err)
++ return err;
++ goto out;
++ }
++
++ cpmuctrl = 0;
++ if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
++ tg3_chip_rev(tp) != CHIPREV_5784_AX) {
++ cpmuctrl = tr32(TG3_CPMU_CTRL);
++ if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
++ tw32(TG3_CPMU_CTRL,
++ cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
++ }
++
++ err = tg3_bmcr_reset(tp);
++ if (err)
++ return err;
++
++ if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
++ val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
++ tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
++
++ tw32(TG3_CPMU_CTRL, cpmuctrl);
++ }
++
++ if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
++ tg3_chip_rev(tp) == CHIPREV_5761_AX) {
++ val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
++ if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
++ CPMU_LSPD_1000MB_MACCLK_12_5) {
++ val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
++ udelay(40);
++ tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
++ }
++ }
++
++ if (tg3_flag(tp, 5717_PLUS) &&
++ (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
++ return 0;
++
++ tg3_phy_apply_otp(tp);
++
++ if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
++ tg3_phy_toggle_apd(tp, true);
++ else
++ tg3_phy_toggle_apd(tp, false);
++
++out:
++ if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
++ !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
++ tg3_phydsp_write(tp, 0x201f, 0x2aaa);
++ tg3_phydsp_write(tp, 0x000a, 0x0323);
++ tg3_phy_toggle_auxctl_smdsp(tp, false);
++ }
++
++ if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
++ tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
++ tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
++ }
++
++ if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
++ if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
++ tg3_phydsp_write(tp, 0x000a, 0x310b);
++ tg3_phydsp_write(tp, 0x201f, 0x9506);
++ tg3_phydsp_write(tp, 0x401f, 0x14e2);
++ tg3_phy_toggle_auxctl_smdsp(tp, false);
++ }
++ } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
++ if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
++ tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
++ if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
++ tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
++ tg3_writephy(tp, MII_TG3_TEST1,
++ MII_TG3_TEST1_TRIM_EN | 0x4);
++ } else
++ tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
++
++ tg3_phy_toggle_auxctl_smdsp(tp, false);
++ }
++ }
++
++ /* Set Extended packet length bit (bit 14) on all chips that */
++ /* support jumbo frames */
++ if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
++ /* Cannot do read-modify-write on 5401 */
++ tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
++ } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
++ /* Set bit 14 with read-modify-write to preserve other bits */
++ err = tg3_phy_auxctl_read(tp,
++ MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
++ if (!err)
++ tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
++ val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
++ }
++
++ /* Set phy register 0x10 bit 0 to high fifo elasticity to support
++ * jumbo frames transmission.
++ */
++ if (tg3_flag(tp, JUMBO_CAPABLE)) {
++ if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
++ tg3_writephy(tp, MII_TG3_EXT_CTRL,
++ val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
++ }
++
++ if (tg3_asic_rev(tp) == ASIC_REV_5906) {
++ /* adjust output voltage */
++ tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
++ }
++
++ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
++ tg3_phydsp_write(tp, 0xffb, 0x4000);
++
++ tg3_phy_toggle_automdix(tp, true);
++ tg3_phy_set_wirespeed(tp);
++ return 0;
++}
++
++#define TG3_GPIO_MSG_DRVR_PRES 0x00000001
++#define TG3_GPIO_MSG_NEED_VAUX 0x00000002
++#define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
++ TG3_GPIO_MSG_NEED_VAUX)
++#define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
++ ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
++ (TG3_GPIO_MSG_DRVR_PRES << 4) | \
++ (TG3_GPIO_MSG_DRVR_PRES << 8) | \
++ (TG3_GPIO_MSG_DRVR_PRES << 12))
++
++#define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
++ ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
++ (TG3_GPIO_MSG_NEED_VAUX << 4) | \
++ (TG3_GPIO_MSG_NEED_VAUX << 8) | \
++ (TG3_GPIO_MSG_NEED_VAUX << 12))
++
++static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
++{
++ u32 status, shift;
++
++ if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
++ tg3_asic_rev(tp) == ASIC_REV_5719)
++ status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
++ else
++ status = tr32(TG3_CPMU_DRV_STATUS);
++
++ shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
++ status &= ~(TG3_GPIO_MSG_MASK << shift);
++ status |= (newstat << shift);
++
++ if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
++ tg3_asic_rev(tp) == ASIC_REV_5719)
++ tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
++ else
++ tw32(TG3_CPMU_DRV_STATUS, status);
++
++ return status >> TG3_APE_GPIO_MSG_SHIFT;
++}
++
++static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
++{
++ if (!tg3_flag(tp, IS_NIC))
++ return 0;
++
++ if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
++ tg3_asic_rev(tp) == ASIC_REV_5719 ||
++ tg3_asic_rev(tp) == ASIC_REV_5720) {
++ if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
++ return -EIO;
++
++ tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
++
++ tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
++ TG3_GRC_LCLCTL_PWRSW_DELAY);
++
++ tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
++ } else {
++ tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
++ TG3_GRC_LCLCTL_PWRSW_DELAY);
++ }
++
++ return 0;
++}
++
++static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
++{
++ u32 grc_local_ctrl;
++
++ if (!tg3_flag(tp, IS_NIC) ||
++ tg3_asic_rev(tp) == ASIC_REV_5700 ||
++ tg3_asic_rev(tp) == ASIC_REV_5701)
++ return;
++
++ grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
++
++ tw32_wait_f(GRC_LOCAL_CTRL,
++ grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
++ TG3_GRC_LCLCTL_PWRSW_DELAY);
++
++ tw32_wait_f(GRC_LOCAL_CTRL,
++ grc_local_ctrl,
++ TG3_GRC_LCLCTL_PWRSW_DELAY);
++
++ tw32_wait_f(GRC_LOCAL_CTRL,
++ grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
++ TG3_GRC_LCLCTL_PWRSW_DELAY);
++}
++
++static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
++{
++ if (!tg3_flag(tp, IS_NIC))
++ return;
++
++ if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
++ tg3_asic_rev(tp) == ASIC_REV_5701) {
++ tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
++ (GRC_LCLCTRL_GPIO_OE0 |
++ GRC_LCLCTRL_GPIO_OE1 |
++ GRC_LCLCTRL_GPIO_OE2 |
++ GRC_LCLCTRL_GPIO_OUTPUT0 |
++ GRC_LCLCTRL_GPIO_OUTPUT1),
++ TG3_GRC_LCLCTL_PWRSW_DELAY);
++ } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
++ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
++ /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
++ u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
++ GRC_LCLCTRL_GPIO_OE1 |
++ GRC_LCLCTRL_GPIO_OE2 |
++ GRC_LCLCTRL_GPIO_OUTPUT0 |
++ GRC_LCLCTRL_GPIO_OUTPUT1 |
++ tp->grc_local_ctrl;
++ tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
++ TG3_GRC_LCLCTL_PWRSW_DELAY);
++
++ grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
++ tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
++ TG3_GRC_LCLCTL_PWRSW_DELAY);
++
++ grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
++ tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
++ TG3_GRC_LCLCTL_PWRSW_DELAY);
++ } else {
++ u32 no_gpio2;
++ u32 grc_local_ctrl = 0;
++
++ /* Workaround to prevent overdrawing Amps. */
++ if (tg3_asic_rev(tp) == ASIC_REV_5714) {
++ grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
++ tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
++ grc_local_ctrl,
++ TG3_GRC_LCLCTL_PWRSW_DELAY);
++ }
++
++ /* On 5753 and variants, GPIO2 cannot be used. */
++ no_gpio2 = tp->nic_sram_data_cfg &
++ NIC_SRAM_DATA_CFG_NO_GPIO2;
++
++ grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
++ GRC_LCLCTRL_GPIO_OE1 |
++ GRC_LCLCTRL_GPIO_OE2 |
++ GRC_LCLCTRL_GPIO_OUTPUT1 |
++ GRC_LCLCTRL_GPIO_OUTPUT2;
++ if (no_gpio2) {
++ grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
++ GRC_LCLCTRL_GPIO_OUTPUT2);
++ }
++ tw32_wait_f(GRC_LOCAL_CTRL,
++ tp->grc_local_ctrl | grc_local_ctrl,
++ TG3_GRC_LCLCTL_PWRSW_DELAY);
++
++ grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
++
++ tw32_wait_f(GRC_LOCAL_CTRL,
++ tp->grc_local_ctrl | grc_local_ctrl,
++ TG3_GRC_LCLCTL_PWRSW_DELAY);
++
++ if (!no_gpio2) {
++ grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
++ tw32_wait_f(GRC_LOCAL_CTRL,
++ tp->grc_local_ctrl | grc_local_ctrl,
++ TG3_GRC_LCLCTL_PWRSW_DELAY);
++ }
++ }
++}
++
++static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
++{
++ u32 msg = 0;
++
++ /* Serialize power state transitions */
++ if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
++ return;
++
++ if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
++ msg = TG3_GPIO_MSG_NEED_VAUX;
++
++ msg = tg3_set_function_status(tp, msg);
++
++ if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
++ goto done;
++
++ if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
++ tg3_pwrsrc_switch_to_vaux(tp);
++ else
++ tg3_pwrsrc_die_with_vmain(tp);
++
++done:
++ tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
++}
++
++static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
++{
++ bool need_vaux = false;
++
++ /* The GPIOs do something completely different on 57765. */
++ if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
++ return;
++
++ if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
++ tg3_asic_rev(tp) == ASIC_REV_5719 ||
++ tg3_asic_rev(tp) == ASIC_REV_5720) {
++ tg3_frob_aux_power_5717(tp, include_wol ?
++ tg3_flag(tp, WOL_ENABLE) != 0 : 0);
++ return;
++ }
++
++ if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
++ struct net_device *dev_peer;
++
++ dev_peer = pci_get_drvdata(tp->pdev_peer);
++
++ /* remove_one() may have been run on the peer. */
++ if (dev_peer) {
++ struct tg3 *tp_peer = netdev_priv(dev_peer);
++
++ if (tg3_flag(tp_peer, INIT_COMPLETE))
++ return;
++
++ if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
++ tg3_flag(tp_peer, ENABLE_ASF))
++ need_vaux = true;
++ }
++ }
++
++ if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
++ tg3_flag(tp, ENABLE_ASF))
++ need_vaux = true;
++
++ if (need_vaux)
++ tg3_pwrsrc_switch_to_vaux(tp);
++ else
++ tg3_pwrsrc_die_with_vmain(tp);
++}
++
++static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
++{
++ if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
++ return 1;
++ else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
++ if (speed != SPEED_10)
++ return 1;
++ } else if (speed == SPEED_10)
++ return 1;
++
++ return 0;
++}
++
++static bool tg3_phy_power_bug(struct tg3 *tp)
++{
++ switch (tg3_asic_rev(tp)) {
++ case ASIC_REV_5700:
++ case ASIC_REV_5704:
++ return true;
++ case ASIC_REV_5780:
++ if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
++ return true;
++ return false;
++ case ASIC_REV_5717:
++ if (!tp->pci_fn)
++ return true;
++ return false;
++ case ASIC_REV_5719:
++ case ASIC_REV_5720:
++ if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
++ !tp->pci_fn)
++ return true;
++ return false;
++ }
++
++ return false;
++}
++
++static bool tg3_phy_led_bug(struct tg3 *tp)
++{
++ switch (tg3_asic_rev(tp)) {
++ case ASIC_REV_5719:
++ case ASIC_REV_5720:
++ if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
++ !tp->pci_fn)
++ return true;
++ return false;
++ }
++
++ return false;
++}
++
++static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
++{
++ u32 val;
++
++ if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
++ return;
++
++ if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
++ if (tg3_asic_rev(tp) == ASIC_REV_5704) {
++ u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
++ u32 serdes_cfg = tr32(MAC_SERDES_CFG);
++
++ sg_dig_ctrl |=
++ SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
++ tw32(SG_DIG_CTRL, sg_dig_ctrl);
++ tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
++ }
++ return;
++ }
++
++ if (tg3_asic_rev(tp) == ASIC_REV_5906) {
++ tg3_bmcr_reset(tp);
++ val = tr32(GRC_MISC_CFG);
++ tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
++ udelay(40);
++ return;
++ } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
++ u32 phytest;
++ if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
++ u32 phy;
++
++ tg3_writephy(tp, MII_ADVERTISE, 0);
++ tg3_writephy(tp, MII_BMCR,
++ BMCR_ANENABLE | BMCR_ANRESTART);
++
++ tg3_writephy(tp, MII_TG3_FET_TEST,
++ phytest | MII_TG3_FET_SHADOW_EN);
++ if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
++ phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
++ tg3_writephy(tp,
++ MII_TG3_FET_SHDW_AUXMODE4,
++ phy);
++ }
++ tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
++ }
++ return;
++ } else if (do_low_power) {
++ if (!tg3_phy_led_bug(tp))
++ tg3_writephy(tp, MII_TG3_EXT_CTRL,
++ MII_TG3_EXT_CTRL_FORCE_LED_OFF);
++
++ val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
++ MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
++ MII_TG3_AUXCTL_PCTL_VREG_11V;
++ tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
++ }
++
++ /* The PHY should not be powered down on some chips because
++ * of bugs.
++ */
++ if (tg3_phy_power_bug(tp))
++ return;
++
++ if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
++ tg3_chip_rev(tp) == CHIPREV_5761_AX) {
++ val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
++ val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
++ val |= CPMU_LSPD_1000MB_MACCLK_12_5;
++ tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
++ }
++
++ tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
++}
++
++/* tp->lock is held. */
++static int tg3_nvram_lock(struct tg3 *tp)
++{
++ if (tg3_flag(tp, NVRAM)) {
++ int i;
++
++ if (tp->nvram_lock_cnt == 0) {
++ tw32(NVRAM_SWARB, SWARB_REQ_SET1);
++ for (i = 0; i < 8000; i++) {
++ if (tr32(NVRAM_SWARB) & SWARB_GNT1)
++ break;
++ udelay(20);
++ }
++ if (i == 8000) {
++ tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
++ return -ENODEV;
++ }
++ }
++ tp->nvram_lock_cnt++;
++ }
++ return 0;
++}
++
++/* tp->lock is held. */
++static void tg3_nvram_unlock(struct tg3 *tp)
++{
++ if (tg3_flag(tp, NVRAM)) {
++ if (tp->nvram_lock_cnt > 0)
++ tp->nvram_lock_cnt--;
++ if (tp->nvram_lock_cnt == 0)
++ tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
++ }
++}
++
++/* tp->lock is held. */
++static void tg3_enable_nvram_access(struct tg3 *tp)
++{
++ if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
++ u32 nvaccess = tr32(NVRAM_ACCESS);
++
++ tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
++ }
++}
++
++/* tp->lock is held. */
++static void tg3_disable_nvram_access(struct tg3 *tp)
++{
++ if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
++ u32 nvaccess = tr32(NVRAM_ACCESS);
++
++ tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
++ }
++}
++
++static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
++ u32 offset, u32 *val)
++{
++ u32 tmp;
++ int i;
++
++ if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
++ return -EINVAL;
++
++ tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
++ EEPROM_ADDR_DEVID_MASK |
++ EEPROM_ADDR_READ);
++ tw32(GRC_EEPROM_ADDR,
++ tmp |
++ (0 << EEPROM_ADDR_DEVID_SHIFT) |
++ ((offset << EEPROM_ADDR_ADDR_SHIFT) &
++ EEPROM_ADDR_ADDR_MASK) |
++ EEPROM_ADDR_READ | EEPROM_ADDR_START);
++
++ for (i = 0; i < 1000; i++) {
++ tmp = tr32(GRC_EEPROM_ADDR);
++
++ if (tmp & EEPROM_ADDR_COMPLETE)
++ break;
++ msleep(1);
++ }
++ if (!(tmp & EEPROM_ADDR_COMPLETE))
++ return -EBUSY;
++
++ tmp = tr32(GRC_EEPROM_DATA);
++
++ /*
++ * The data will always be opposite the native endian
++ * format. Perform a blind byteswap to compensate.
++ */
++ *val = swab32(tmp);
++
++ return 0;
++}
++
++#define NVRAM_CMD_TIMEOUT 10000
++
++static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
++{
++ int i;
++
++ tw32(NVRAM_CMD, nvram_cmd);
++ for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
++ udelay(10);
++ if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
++ udelay(10);
++ break;
++ }
++ }
++
++ if (i == NVRAM_CMD_TIMEOUT)
++ return -EBUSY;
++
++ return 0;
++}
++
++static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
++{
++ if (tg3_flag(tp, NVRAM) &&
++ tg3_flag(tp, NVRAM_BUFFERED) &&
++ tg3_flag(tp, FLASH) &&
++ !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
++ (tp->nvram_jedecnum == JEDEC_ATMEL))
++
++ addr = ((addr / tp->nvram_pagesize) <<
++ ATMEL_AT45DB0X1B_PAGE_POS) +
++ (addr % tp->nvram_pagesize);
++
++ return addr;
++}
++
++static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
++{
++ if (tg3_flag(tp, NVRAM) &&
++ tg3_flag(tp, NVRAM_BUFFERED) &&
++ tg3_flag(tp, FLASH) &&
++ !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
++ (tp->nvram_jedecnum == JEDEC_ATMEL))
++
++ addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
++ tp->nvram_pagesize) +
++ (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
++
++ return addr;
++}
++
++/* NOTE: Data read in from NVRAM is byteswapped according to
++ * the byteswapping settings for all other register accesses.
++ * tg3 devices are BE devices, so on a BE machine, the data
++ * returned will be exactly as it is seen in NVRAM. On a LE
++ * machine, the 32-bit value will be byteswapped.
++ */
++static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
++{
++ int ret;
++
++ if (!tg3_flag(tp, NVRAM))
++ return tg3_nvram_read_using_eeprom(tp, offset, val);
++
++ offset = tg3_nvram_phys_addr(tp, offset);
++
++ if (offset > NVRAM_ADDR_MSK)
++ return -EINVAL;
++
++ ret = tg3_nvram_lock(tp);
++ if (ret)
++ return ret;
++
++ tg3_enable_nvram_access(tp);
++
++ tw32(NVRAM_ADDR, offset);
++ ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
++ NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
++
++ if (ret == 0)
++ *val = tr32(NVRAM_RDDATA);
++
++ tg3_disable_nvram_access(tp);
++
++ tg3_nvram_unlock(tp);
++
++ return ret;
++}
++
++/* Ensures NVRAM data is in bytestream format. */
++static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
++{
++ u32 v;
++ int res = tg3_nvram_read(tp, offset, &v);
++ if (!res)
++ *val = cpu_to_be32(v);
++ return res;
++}
++
++static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
++ u32 offset, u32 len, u8 *buf)
++{
++ int i, j, rc = 0;
++ u32 val;
++
++ for (i = 0; i < len; i += 4) {
++ u32 addr;
++ __be32 data;
++
++ addr = offset + i;
++
++ memcpy(&data, buf + i, 4);
++
++ /*
++ * The SEEPROM interface expects the data to always be opposite
++ * the native endian format. We accomplish this by reversing
++ * all the operations that would have been performed on the
++ * data from a call to tg3_nvram_read_be32().
++ */
++ tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
++
++ val = tr32(GRC_EEPROM_ADDR);
++ tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
++
++ val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
++ EEPROM_ADDR_READ);
++ tw32(GRC_EEPROM_ADDR, val |
++ (0 << EEPROM_ADDR_DEVID_SHIFT) |
++ (addr & EEPROM_ADDR_ADDR_MASK) |
++ EEPROM_ADDR_START |
++ EEPROM_ADDR_WRITE);
++
++ for (j = 0; j < 1000; j++) {
++ val = tr32(GRC_EEPROM_ADDR);
++
++ if (val & EEPROM_ADDR_COMPLETE)
++ break;
++ msleep(1);
++ }
++ if (!(val & EEPROM_ADDR_COMPLETE)) {
++ rc = -EBUSY;
++ break;
++ }
++ }
++
++ return rc;
++}
++
++/* offset and length are dword aligned */
++static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
++ u8 *buf)
++{
++ int ret = 0;
++ u32 pagesize = tp->nvram_pagesize;
++ u32 pagemask = pagesize - 1;
++ u32 nvram_cmd;
++ u8 *tmp;
++
++ tmp = kmalloc(pagesize, GFP_KERNEL);
++ if (tmp == NULL)
++ return -ENOMEM;
++
++ while (len) {
++ int j;
++ u32 phy_addr, page_off, size;
++
++ phy_addr = offset & ~pagemask;
++
++ for (j = 0; j < pagesize; j += 4) {
++ ret = tg3_nvram_read_be32(tp, phy_addr + j,
++ (__be32 *) (tmp + j));
++ if (ret)
++ break;
++ }
++ if (ret)
++ break;
++
++ page_off = offset & pagemask;
++ size = pagesize;
++ if (len < size)
++ size = len;
++
++ len -= size;
++
++ memcpy(tmp + page_off, buf, size);
++
++ offset = offset + (pagesize - page_off);
++
++ tg3_enable_nvram_access(tp);
++
++ /*
++ * Before we can erase the flash page, we need
++ * to issue a special "write enable" command.
++ */
++ nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
++
++ if (tg3_nvram_exec_cmd(tp, nvram_cmd))
++ break;
++
++ /* Erase the target page */
++ tw32(NVRAM_ADDR, phy_addr);
++
++ nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
++ NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
++
++ if (tg3_nvram_exec_cmd(tp, nvram_cmd))
++ break;
++
++ /* Issue another write enable to start the write. */
++ nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
++
++ if (tg3_nvram_exec_cmd(tp, nvram_cmd))
++ break;
++
++ for (j = 0; j < pagesize; j += 4) {
++ __be32 data;
++
++ data = *((__be32 *) (tmp + j));
++
++ tw32(NVRAM_WRDATA, be32_to_cpu(data));
++
++ tw32(NVRAM_ADDR, phy_addr + j);
++
++ nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
++ NVRAM_CMD_WR;
++
++ if (j == 0)
++ nvram_cmd |= NVRAM_CMD_FIRST;
++ else if (j == (pagesize - 4))
++ nvram_cmd |= NVRAM_CMD_LAST;
++
++ ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
++ if (ret)
++ break;
++ }
++ if (ret)
++ break;
++ }
++
++ nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
++ tg3_nvram_exec_cmd(tp, nvram_cmd);
++
++ kfree(tmp);
++
++ return ret;
++}
++
++/* offset and length are dword aligned */
++static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
++ u8 *buf)
++{
++ int i, ret = 0;
++
++ for (i = 0; i < len; i += 4, offset += 4) {
++ u32 page_off, phy_addr, nvram_cmd;
++ __be32 data;
++
++ memcpy(&data, buf + i, 4);
++ tw32(NVRAM_WRDATA, be32_to_cpu(data));
++
++ page_off = offset % tp->nvram_pagesize;
++
++ phy_addr = tg3_nvram_phys_addr(tp, offset);
++
++ nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
++
++ if (page_off == 0 || i == 0)
++ nvram_cmd |= NVRAM_CMD_FIRST;
++ if (page_off == (tp->nvram_pagesize - 4))
++ nvram_cmd |= NVRAM_CMD_LAST;
++
++ if (i == (len - 4))
++ nvram_cmd |= NVRAM_CMD_LAST;
++
++ if ((nvram_cmd & NVRAM_CMD_FIRST) ||
++ !tg3_flag(tp, FLASH) ||
++ !tg3_flag(tp, 57765_PLUS))
++ tw32(NVRAM_ADDR, phy_addr);
++
++ if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
++ !tg3_flag(tp, 5755_PLUS) &&
++ (tp->nvram_jedecnum == JEDEC_ST) &&
++ (nvram_cmd & NVRAM_CMD_FIRST)) {
++ u32 cmd;
++
++ cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
++ ret = tg3_nvram_exec_cmd(tp, cmd);
++ if (ret)
++ break;
++ }
++ if (!tg3_flag(tp, FLASH)) {
++ /* We always do complete word writes to eeprom. */
++ nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
++ }
++
++ ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
++ if (ret)
++ break;
++ }
++ return ret;
++}
++
++/* offset and length are dword aligned */
++static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
++{
++ int ret;
++
++ if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
++ tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
++ ~GRC_LCLCTRL_GPIO_OUTPUT1);
++ udelay(40);
++ }
++
++ if (!tg3_flag(tp, NVRAM)) {
++ ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
++ } else {
++ u32 grc_mode;
++
++ ret = tg3_nvram_lock(tp);
++ if (ret)
++ return ret;
++
++ tg3_enable_nvram_access(tp);
++ if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
++ tw32(NVRAM_WRITE1, 0x406);
++
++ grc_mode = tr32(GRC_MODE);
++ tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
++
++ if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
++ ret = tg3_nvram_write_block_buffered(tp, offset, len,
++ buf);
++ } else {
++ ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
++ buf);
++ }
++
++ grc_mode = tr32(GRC_MODE);
++ tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
++
++ tg3_disable_nvram_access(tp);
++ tg3_nvram_unlock(tp);
++ }
++
++ if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
++ tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
++ udelay(40);
++ }
++
++ return ret;
++}
++
++#define RX_CPU_SCRATCH_BASE 0x30000
++#define RX_CPU_SCRATCH_SIZE 0x04000
++#define TX_CPU_SCRATCH_BASE 0x34000
++#define TX_CPU_SCRATCH_SIZE 0x04000
++
++/* tp->lock is held. */
++static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
++{
++ int i;
++ const int iters = 10000;
++
++ for (i = 0; i < iters; i++) {
++ tw32(cpu_base + CPU_STATE, 0xffffffff);
++ tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
++ if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
++ break;
++ if (pci_channel_offline(tp->pdev))
++ return -EBUSY;
++ }
++
++ return (i == iters) ? -EBUSY : 0;
++}
++
++/* tp->lock is held. */
++static int tg3_rxcpu_pause(struct tg3 *tp)
++{
++ int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
++
++ tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
++ tw32_f(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
++ udelay(10);
++
++ return rc;
++}
++
++/* tp->lock is held. */
++static int tg3_txcpu_pause(struct tg3 *tp)
++{
++ return tg3_pause_cpu(tp, TX_CPU_BASE);
++}
++
++/* tp->lock is held. */
++static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
++{
++ tw32(cpu_base + CPU_STATE, 0xffffffff);
++ tw32_f(cpu_base + CPU_MODE, 0x00000000);
++}
++
++/* tp->lock is held. */
++static void tg3_rxcpu_resume(struct tg3 *tp)
++{
++ tg3_resume_cpu(tp, RX_CPU_BASE);
++}
++
++/* tp->lock is held. */
++static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
++{
++ int rc;
++
++ BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
++
++ if (tg3_asic_rev(tp) == ASIC_REV_5906) {
++ u32 val = tr32(GRC_VCPU_EXT_CTRL);
++
++ tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
++ return 0;
++ }
++ if (cpu_base == RX_CPU_BASE) {
++ rc = tg3_rxcpu_pause(tp);
++ } else {
++ /*
++ * There is only an Rx CPU for the 5750 derivative in the
++ * BCM4785.
++ */
++ if (tg3_flag(tp, IS_SSB_CORE))
++ return 0;
++
++ rc = tg3_txcpu_pause(tp);
++ }
++
++ if (rc) {
++ netdev_err(tp->dev, "%s timed out, %s CPU\n",
++ __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
++ return -ENODEV;
++ }
++
++ /* Clear firmware's nvram arbitration. */
++ if (tg3_flag(tp, NVRAM))
++ tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
++ return 0;
++}
++
++static int tg3_fw_data_len(struct tg3 *tp,
++ const struct tg3_firmware_hdr *fw_hdr)
++{
++ int fw_len;
++
++ /* Non fragmented firmware have one firmware header followed by a
++ * contiguous chunk of data to be written. The length field in that
++ * header is not the length of data to be written but the complete
++ * length of the bss. The data length is determined based on
++ * tp->fw->size minus headers.
++ *
++ * Fragmented firmware have a main header followed by multiple
++ * fragments. Each fragment is identical to non fragmented firmware
++ * with a firmware header followed by a contiguous chunk of data. In
++ * the main header, the length field is unused and set to 0xffffffff.
++ * In each fragment header the length is the entire size of that
++ * fragment i.e. fragment data + header length. Data length is
++ * therefore length field in the header minus TG3_FW_HDR_LEN.
++ */
++ if (tp->fw_len == 0xffffffff)
++ fw_len = be32_to_cpu(fw_hdr->len);
++ else
++ fw_len = tp->fw->size;
++
++ return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
++}
++
++/* tp->lock is held. */
++static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
++ u32 cpu_scratch_base, int cpu_scratch_size,
++ const struct tg3_firmware_hdr *fw_hdr)
++{
++ int err, i;
++ void (*write_op)(struct tg3 *, u32, u32);
++ int total_len = tp->fw->size;
++
++ if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
++ netdev_err(tp->dev,
++ "%s: Trying to load TX cpu firmware which is 5705\n",
++ __func__);
++ return -EINVAL;
++ }
++
++ if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
++ write_op = tg3_write_mem;
++ else
++ write_op = tg3_write_indirect_reg32;
++
++ if (tg3_asic_rev(tp) != ASIC_REV_57766) {
++ /* It is possible that bootcode is still loading at this point.
++ * Get the nvram lock first before halting the cpu.
++ */
++ int lock_err = tg3_nvram_lock(tp);
++ err = tg3_halt_cpu(tp, cpu_base);
++ if (!lock_err)
++ tg3_nvram_unlock(tp);
++ if (err)
++ goto out;
++
++ for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
++ write_op(tp, cpu_scratch_base + i, 0);
++ tw32(cpu_base + CPU_STATE, 0xffffffff);
++ tw32(cpu_base + CPU_MODE,
++ tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
++ } else {
++ /* Subtract additional main header for fragmented firmware and
++ * advance to the first fragment
++ */
++ total_len -= TG3_FW_HDR_LEN;
++ fw_hdr++;
++ }
++
++ do {
++ u32 *fw_data = (u32 *)(fw_hdr + 1);
++ for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
++ write_op(tp, cpu_scratch_base +
++ (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
++ (i * sizeof(u32)),
++ be32_to_cpu(fw_data[i]));
++
++ total_len -= be32_to_cpu(fw_hdr->len);
++
++ /* Advance to next fragment */
++ fw_hdr = (struct tg3_firmware_hdr *)
++ ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
++ } while (total_len > 0);
++
++ err = 0;
++
++out:
++ return err;
++}
++
++/* tp->lock is held. */
++static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
++{
++ int i;
++ const int iters = 5;
++
++ tw32(cpu_base + CPU_STATE, 0xffffffff);
++ tw32_f(cpu_base + CPU_PC, pc);
++
++ for (i = 0; i < iters; i++) {
++ if (tr32(cpu_base + CPU_PC) == pc)
++ break;
++ tw32(cpu_base + CPU_STATE, 0xffffffff);
++ tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
++ tw32_f(cpu_base + CPU_PC, pc);
++ udelay(1000);
++ }
++
++ return (i == iters) ? -EBUSY : 0;
++}
++
++/* tp->lock is held. */
++static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
++{
++ const struct tg3_firmware_hdr *fw_hdr;
++ int err;
++
++ fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
++
++ /* Firmware blob starts with version numbers, followed by
++ start address and length. We are setting complete length.
++ length = end_address_of_bss - start_address_of_text.
++ Remainder is the blob to be loaded contiguously
++ from start address. */
++
++ err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
++ RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
++ fw_hdr);
++ if (err)
++ return err;
++
++ err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
++ TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
++ fw_hdr);
++ if (err)
++ return err;
++
++ /* Now startup only the RX cpu. */
++ err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
++ be32_to_cpu(fw_hdr->base_addr));
++ if (err) {
++ netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
++ "should be %08x\n", __func__,
++ tr32(RX_CPU_BASE + CPU_PC),
++ be32_to_cpu(fw_hdr->base_addr));
++ return -ENODEV;
++ }
++
++ tg3_rxcpu_resume(tp);
++
++ return 0;
++}
++
++static int tg3_validate_rxcpu_state(struct tg3 *tp)
++{
++ const int iters = 1000;
++ int i;
++ u32 val;
++
++ /* Wait for boot code to complete initialization and enter service
++ * loop. It is then safe to download service patches
++ */
++ for (i = 0; i < iters; i++) {
++ if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
++ break;
++
++ udelay(10);
++ }
++
++ if (i == iters) {
++ netdev_err(tp->dev, "Boot code not ready for service patches\n");
++ return -EBUSY;
++ }
++
++ val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
++ if (val & 0xff) {
++ netdev_warn(tp->dev,
++ "Other patches exist. Not downloading EEE patch\n");
++ return -EEXIST;
++ }
++
++ return 0;
++}
++
++/* tp->lock is held. */
++static void tg3_load_57766_firmware(struct tg3 *tp)
++{
++ struct tg3_firmware_hdr *fw_hdr;
++
++ if (!tg3_flag(tp, NO_NVRAM))
++ return;
++
++ if (tg3_validate_rxcpu_state(tp))
++ return;
++
++ if (!tp->fw)
++ return;
++
++ /* This firmware blob has a different format than older firmware
++ * releases as given below. The main difference is we have fragmented
++ * data to be written to non-contiguous locations.
++ *
++ * In the beginning we have a firmware header identical to other
++ * firmware which consists of version, base addr and length. The length
++ * here is unused and set to 0xffffffff.
++ *
++ * This is followed by a series of firmware fragments which are
++ * individually identical to previous firmware. i.e. they have the
++ * firmware header and followed by data for that fragment. The version
++ * field of the individual fragment header is unused.
++ */
++
++ fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
++ if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
++ return;
++
++ if (tg3_rxcpu_pause(tp))
++ return;
++
++ /* tg3_load_firmware_cpu() will always succeed for the 57766 */
++ tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
++
++ tg3_rxcpu_resume(tp);
++}
++
++/* tp->lock is held. */
++static int tg3_load_tso_firmware(struct tg3 *tp)
++{
++ const struct tg3_firmware_hdr *fw_hdr;
++ unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
++ int err;
++
++ if (!tg3_flag(tp, FW_TSO))
++ return 0;
++
++ fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
++
++ /* Firmware blob starts with version numbers, followed by
++ start address and length. We are setting complete length.
++ length = end_address_of_bss - start_address_of_text.
++ Remainder is the blob to be loaded contiguously
++ from start address. */
++
++ cpu_scratch_size = tp->fw_len;
++
++ if (tg3_asic_rev(tp) == ASIC_REV_5705) {
++ cpu_base = RX_CPU_BASE;
++ cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
++ } else {
++ cpu_base = TX_CPU_BASE;
++ cpu_scratch_base = TX_CPU_SCRATCH_BASE;
++ cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
++ }
++
++ err = tg3_load_firmware_cpu(tp, cpu_base,
++ cpu_scratch_base, cpu_scratch_size,
++ fw_hdr);
++ if (err)
++ return err;
++
++ /* Now startup the cpu. */
++ err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
++ be32_to_cpu(fw_hdr->base_addr));
++ if (err) {
++ netdev_err(tp->dev,
++ "%s fails to set CPU PC, is %08x should be %08x\n",
++ __func__, tr32(cpu_base + CPU_PC),
++ be32_to_cpu(fw_hdr->base_addr));
++ return -ENODEV;
++ }
++
++ tg3_resume_cpu(tp, cpu_base);
++ return 0;
++}
++
++/* tp->lock is held. */
++static void __tg3_set_one_mac_addr(struct tg3 *tp, u8 *mac_addr, int index)
++{
++ u32 addr_high, addr_low;
++
++ addr_high = ((mac_addr[0] << 8) | mac_addr[1]);
++ addr_low = ((mac_addr[2] << 24) | (mac_addr[3] << 16) |
++ (mac_addr[4] << 8) | mac_addr[5]);
++
++ if (index < 4) {
++ tw32(MAC_ADDR_0_HIGH + (index * 8), addr_high);
++ tw32(MAC_ADDR_0_LOW + (index * 8), addr_low);
++ } else {
++ index -= 4;
++ tw32(MAC_EXTADDR_0_HIGH + (index * 8), addr_high);
++ tw32(MAC_EXTADDR_0_LOW + (index * 8), addr_low);
++ }
++}
++
++/* tp->lock is held. */
++static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
++{
++ u32 addr_high;
++ int i;
++
++ for (i = 0; i < 4; i++) {
++ if (i == 1 && skip_mac_1)
++ continue;
++ __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
++ }
++
++ if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
++ tg3_asic_rev(tp) == ASIC_REV_5704) {
++ for (i = 4; i < 16; i++)
++ __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
++ }
++
++ addr_high = (tp->dev->dev_addr[0] +
++ tp->dev->dev_addr[1] +
++ tp->dev->dev_addr[2] +
++ tp->dev->dev_addr[3] +
++ tp->dev->dev_addr[4] +
++ tp->dev->dev_addr[5]) &
++ TX_BACKOFF_SEED_MASK;
++ tw32(MAC_TX_BACKOFF_SEED, addr_high);
++}
++
++static void tg3_enable_register_access(struct tg3 *tp)
++{
++ /*
++ * Make sure register accesses (indirect or otherwise) will function
++ * correctly.
++ */
++ pci_write_config_dword(tp->pdev,
++ TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
++}
++
++static int tg3_power_up(struct tg3 *tp)
++{
++ int err;
++
++ tg3_enable_register_access(tp);
++
++ err = pci_set_power_state(tp->pdev, PCI_D0);
++ if (!err) {
++ /* Switch out of Vaux if it is a NIC */
++ tg3_pwrsrc_switch_to_vmain(tp);
++ } else {
++ netdev_err(tp->dev, "Transition to D0 failed\n");
++ }
++
++ return err;
++}
++
++static int tg3_setup_phy(struct tg3 *, bool);
++
++static int tg3_power_down_prepare(struct tg3 *tp)
++{
++ u32 misc_host_ctrl;
++ bool device_should_wake, do_low_power;
++
++ tg3_enable_register_access(tp);
++
++ /* Restore the CLKREQ setting. */
++ if (tg3_flag(tp, CLKREQ_BUG))
++ pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
++ PCI_EXP_LNKCTL_CLKREQ_EN);
++
++ misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
++ tw32(TG3PCI_MISC_HOST_CTRL,
++ misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
++
++ device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
++ tg3_flag(tp, WOL_ENABLE);
++
++ if (tg3_flag(tp, USE_PHYLIB)) {
++ do_low_power = false;
++ if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
++ !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
++ struct phy_device *phydev;
++ u32 phyid, advertising;
++
++ phydev = tp->mdio_bus->phy_map[tp->phy_addr];
++
++ tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
++
++ tp->link_config.speed = phydev->speed;
++ tp->link_config.duplex = phydev->duplex;
++ tp->link_config.autoneg = phydev->autoneg;
++ tp->link_config.advertising = phydev->advertising;
++
++ advertising = ADVERTISED_TP |
++ ADVERTISED_Pause |
++ ADVERTISED_Autoneg |
++ ADVERTISED_10baseT_Half;
++
++ if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
++ if (tg3_flag(tp, WOL_SPEED_100MB))
++ advertising |=
++ ADVERTISED_100baseT_Half |
++ ADVERTISED_100baseT_Full |
++ ADVERTISED_10baseT_Full;
++ else
++ advertising |= ADVERTISED_10baseT_Full;
++ }
++
++ phydev->advertising = advertising;
++
++ phy_start_aneg(phydev);
++
++ phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
++ if (phyid != PHY_ID_BCMAC131) {
++ phyid &= PHY_BCM_OUI_MASK;
++ if (phyid == PHY_BCM_OUI_1 ||
++ phyid == PHY_BCM_OUI_2 ||
++ phyid == PHY_BCM_OUI_3)
++ do_low_power = true;
++ }
++ }
++ } else {
++ do_low_power = true;
++
++ if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
++ tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
++
++ if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
++ tg3_setup_phy(tp, false);
++ }
++
++ if (tg3_asic_rev(tp) == ASIC_REV_5906) {
++ u32 val;
++
++ val = tr32(GRC_VCPU_EXT_CTRL);
++ tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
++ } else if (!tg3_flag(tp, ENABLE_ASF)) {
++ int i;
++ u32 val;
++
++ for (i = 0; i < 200; i++) {
++ tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
++ if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
++ break;
++ msleep(1);
++ }
++ }
++ if (tg3_flag(tp, WOL_CAP))
++ tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
++ WOL_DRV_STATE_SHUTDOWN |
++ WOL_DRV_WOL |
++ WOL_SET_MAGIC_PKT);
++
++ if (device_should_wake) {
++ u32 mac_mode;
++
++ if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
++ if (do_low_power &&
++ !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
++ tg3_phy_auxctl_write(tp,
++ MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
++ MII_TG3_AUXCTL_PCTL_WOL_EN |
++ MII_TG3_AUXCTL_PCTL_100TX_LPWR |
++ MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
++ udelay(40);
++ }
++
++ if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
++ mac_mode = MAC_MODE_PORT_MODE_GMII;
++ else if (tp->phy_flags &
++ TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
++ if (tp->link_config.active_speed == SPEED_1000)
++ mac_mode = MAC_MODE_PORT_MODE_GMII;
++ else
++ mac_mode = MAC_MODE_PORT_MODE_MII;
++ } else
++ mac_mode = MAC_MODE_PORT_MODE_MII;
++
++ mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
++ if (tg3_asic_rev(tp) == ASIC_REV_5700) {
++ u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
++ SPEED_100 : SPEED_10;
++ if (tg3_5700_link_polarity(tp, speed))
++ mac_mode |= MAC_MODE_LINK_POLARITY;
++ else
++ mac_mode &= ~MAC_MODE_LINK_POLARITY;
++ }
++ } else {
++ mac_mode = MAC_MODE_PORT_MODE_TBI;
++ }
++
++ if (!tg3_flag(tp, 5750_PLUS))
++ tw32(MAC_LED_CTRL, tp->led_ctrl);
++
++ mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
++ if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
++ (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
++ mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
++
++ if (tg3_flag(tp, ENABLE_APE))
++ mac_mode |= MAC_MODE_APE_TX_EN |
++ MAC_MODE_APE_RX_EN |
++ MAC_MODE_TDE_ENABLE;
++
++ tw32_f(MAC_MODE, mac_mode);
++ udelay(100);
++
++ tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
++ udelay(10);
++ }
++
++ if (!tg3_flag(tp, WOL_SPEED_100MB) &&
++ (tg3_asic_rev(tp) == ASIC_REV_5700 ||
++ tg3_asic_rev(tp) == ASIC_REV_5701)) {
++ u32 base_val;
++
++ base_val = tp->pci_clock_ctrl;
++ base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
++ CLOCK_CTRL_TXCLK_DISABLE);
++
++ tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
++ CLOCK_CTRL_PWRDOWN_PLL133, 40);
++ } else if (tg3_flag(tp, 5780_CLASS) ||
++ tg3_flag(tp, CPMU_PRESENT) ||
++ tg3_asic_rev(tp) == ASIC_REV_5906) {
++ /* do nothing */
++ } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
++ u32 newbits1, newbits2;
++
++ if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
++ tg3_asic_rev(tp) == ASIC_REV_5701) {
++ newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
++ CLOCK_CTRL_TXCLK_DISABLE |
++ CLOCK_CTRL_ALTCLK);
++ newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
++ } else if (tg3_flag(tp, 5705_PLUS)) {
++ newbits1 = CLOCK_CTRL_625_CORE;
++ newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
++ } else {
++ newbits1 = CLOCK_CTRL_ALTCLK;
++ newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
++ }
++
++ tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
++ 40);
++
++ tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
++ 40);
++
++ if (!tg3_flag(tp, 5705_PLUS)) {
++ u32 newbits3;
++
++ if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
++ tg3_asic_rev(tp) == ASIC_REV_5701) {
++ newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
++ CLOCK_CTRL_TXCLK_DISABLE |
++ CLOCK_CTRL_44MHZ_CORE);
++ } else {
++ newbits3 = CLOCK_CTRL_44MHZ_CORE;
++ }
++
++ tw32_wait_f(TG3PCI_CLOCK_CTRL,
++ tp->pci_clock_ctrl | newbits3, 40);
++ }
++ }
++
++ if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
++ tg3_power_down_phy(tp, do_low_power);
++
++ tg3_frob_aux_power(tp, true);
++
++ /* Workaround for unstable PLL clock */
++ if ((!tg3_flag(tp, IS_SSB_CORE)) &&
++ ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
++ (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
++ u32 val = tr32(0x7d00);
++
++ val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
++ tw32(0x7d00, val);
++ if (!tg3_flag(tp, ENABLE_ASF)) {
++ int err;
++
++ err = tg3_nvram_lock(tp);
++ tg3_halt_cpu(tp, RX_CPU_BASE);
++ if (!err)
++ tg3_nvram_unlock(tp);
++ }
++ }
++
++ tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
++
++ tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN);
++
++ return 0;
++}
++
++static void tg3_power_down(struct tg3 *tp)
++{
++ pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
++ pci_set_power_state(tp->pdev, PCI_D3hot);
++}
++
++static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
++{
++ switch (val & MII_TG3_AUX_STAT_SPDMASK) {
++ case MII_TG3_AUX_STAT_10HALF:
++ *speed = SPEED_10;
++ *duplex = DUPLEX_HALF;
++ break;
++
++ case MII_TG3_AUX_STAT_10FULL:
++ *speed = SPEED_10;
++ *duplex = DUPLEX_FULL;
++ break;
++
++ case MII_TG3_AUX_STAT_100HALF:
++ *speed = SPEED_100;
++ *duplex = DUPLEX_HALF;
++ break;
++
++ case MII_TG3_AUX_STAT_100FULL:
++ *speed = SPEED_100;
++ *duplex = DUPLEX_FULL;
++ break;
++
++ case MII_TG3_AUX_STAT_1000HALF:
++ *speed = SPEED_1000;
++ *duplex = DUPLEX_HALF;
++ break;
++
++ case MII_TG3_AUX_STAT_1000FULL:
++ *speed = SPEED_1000;
++ *duplex = DUPLEX_FULL;
++ break;
++
++ default:
++ if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
++ *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
++ SPEED_10;
++ *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
++ DUPLEX_HALF;
++ break;
++ }
++ *speed = SPEED_UNKNOWN;
++ *duplex = DUPLEX_UNKNOWN;
++ break;
++ }
++}
++
++static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
++{
++ int err = 0;
++ u32 val, new_adv;
++
++ new_adv = ADVERTISE_CSMA;
++ new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
++ new_adv |= mii_advertise_flowctrl(flowctrl);
++
++ err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
++ if (err)
++ goto done;
++
++ if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
++ new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
++
++ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
++ tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
++ new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
++
++ err = tg3_writephy(tp, MII_CTRL1000, new_adv);
++ if (err)
++ goto done;
++ }
++
++ if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
++ goto done;
++
++ tw32(TG3_CPMU_EEE_MODE,
++ tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
++
++ err = tg3_phy_toggle_auxctl_smdsp(tp, true);
++ if (!err) {
++ u32 err2;
++
++ val = 0;
++ /* Advertise 100-BaseTX EEE ability */
++ if (advertise & ADVERTISED_100baseT_Full)
++ val |= MDIO_AN_EEE_ADV_100TX;
++ /* Advertise 1000-BaseT EEE ability */
++ if (advertise & ADVERTISED_1000baseT_Full)
++ val |= MDIO_AN_EEE_ADV_1000T;
++
++ if (!tp->eee.eee_enabled) {
++ val = 0;
++ tp->eee.advertised = 0;
++ } else {
++ tp->eee.advertised = advertise &
++ (ADVERTISED_100baseT_Full |
++ ADVERTISED_1000baseT_Full);
++ }
++
++ err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
++ if (err)
++ val = 0;
++
++ switch (tg3_asic_rev(tp)) {
++ case ASIC_REV_5717:
++ case ASIC_REV_57765:
++ case ASIC_REV_57766:
++ case ASIC_REV_5719:
++ /* If we advertised any eee advertisements above... */
++ if (val)
++ val = MII_TG3_DSP_TAP26_ALNOKO |
++ MII_TG3_DSP_TAP26_RMRXSTO |
++ MII_TG3_DSP_TAP26_OPCSINPT;
++ tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
++ /* Fall through */
++ case ASIC_REV_5720:
++ case ASIC_REV_5762:
++ if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
++ tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
++ MII_TG3_DSP_CH34TP2_HIBW01);
++ }
++
++ err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
++ if (!err)
++ err = err2;
++ }
++
++done:
++ return err;
++}
++
++static void tg3_phy_copper_begin(struct tg3 *tp)
++{
++ if (tp->link_config.autoneg == AUTONEG_ENABLE ||
++ (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
++ u32 adv, fc;
++
++ if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
++ !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
++ adv = ADVERTISED_10baseT_Half |
++ ADVERTISED_10baseT_Full;
++ if (tg3_flag(tp, WOL_SPEED_100MB))
++ adv |= ADVERTISED_100baseT_Half |
++ ADVERTISED_100baseT_Full;
++ if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK) {
++ if (!(tp->phy_flags &
++ TG3_PHYFLG_DISABLE_1G_HD_ADV))
++ adv |= ADVERTISED_1000baseT_Half;
++ adv |= ADVERTISED_1000baseT_Full;
++ }
++
++ fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
++ } else {
++ adv = tp->link_config.advertising;
++ if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
++ adv &= ~(ADVERTISED_1000baseT_Half |
++ ADVERTISED_1000baseT_Full);
++
++ fc = tp->link_config.flowctrl;
++ }
++
++ tg3_phy_autoneg_cfg(tp, adv, fc);
++
++ if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
++ (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
++ /* Normally during power down we want to autonegotiate
++ * the lowest possible speed for WOL. However, to avoid
++ * link flap, we leave it untouched.
++ */
++ return;
++ }
++
++ tg3_writephy(tp, MII_BMCR,
++ BMCR_ANENABLE | BMCR_ANRESTART);
++ } else {
++ int i;
++ u32 bmcr, orig_bmcr;
++
++ tp->link_config.active_speed = tp->link_config.speed;
++ tp->link_config.active_duplex = tp->link_config.duplex;
++
++ if (tg3_asic_rev(tp) == ASIC_REV_5714) {
++ /* With autoneg disabled, 5715 only links up when the
++ * advertisement register has the configured speed
++ * enabled.
++ */
++ tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
++ }
++
++ bmcr = 0;
++ switch (tp->link_config.speed) {
++ default:
++ case SPEED_10:
++ break;
++
++ case SPEED_100:
++ bmcr |= BMCR_SPEED100;
++ break;
++
++ case SPEED_1000:
++ bmcr |= BMCR_SPEED1000;
++ break;
++ }
++
++ if (tp->link_config.duplex == DUPLEX_FULL)
++ bmcr |= BMCR_FULLDPLX;
++
++ if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
++ (bmcr != orig_bmcr)) {
++ tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
++ for (i = 0; i < 1500; i++) {
++ u32 tmp;
++
++ udelay(10);
++ if (tg3_readphy(tp, MII_BMSR, &tmp) ||
++ tg3_readphy(tp, MII_BMSR, &tmp))
++ continue;
++ if (!(tmp & BMSR_LSTATUS)) {
++ udelay(40);
++ break;
++ }
++ }
++ tg3_writephy(tp, MII_BMCR, bmcr);
++ udelay(40);
++ }
++ }
++}
++
++static int tg3_phy_pull_config(struct tg3 *tp)
++{
++ int err;
++ u32 val;
++
++ err = tg3_readphy(tp, MII_BMCR, &val);
++ if (err)
++ goto done;
++
++ if (!(val & BMCR_ANENABLE)) {
++ tp->link_config.autoneg = AUTONEG_DISABLE;
++ tp->link_config.advertising = 0;
++ tg3_flag_clear(tp, PAUSE_AUTONEG);
++
++ err = -EIO;
++
++ switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) {
++ case 0:
++ if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
++ goto done;
++
++ tp->link_config.speed = SPEED_10;
++ break;
++ case BMCR_SPEED100:
++ if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
++ goto done;
++
++ tp->link_config.speed = SPEED_100;
++ break;
++ case BMCR_SPEED1000:
++ if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
++ tp->link_config.speed = SPEED_1000;
++ break;
++ }
++ /* Fall through */
++ default:
++ goto done;
++ }
++
++ if (val & BMCR_FULLDPLX)
++ tp->link_config.duplex = DUPLEX_FULL;
++ else
++ tp->link_config.duplex = DUPLEX_HALF;
++
++ tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
++
++ err = 0;
++ goto done;
++ }
++
++ tp->link_config.autoneg = AUTONEG_ENABLE;
++ tp->link_config.advertising = ADVERTISED_Autoneg;
++ tg3_flag_set(tp, PAUSE_AUTONEG);
++
++ if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
++ u32 adv;
++
++ err = tg3_readphy(tp, MII_ADVERTISE, &val);
++ if (err)
++ goto done;
++
++ adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL);
++ tp->link_config.advertising |= adv | ADVERTISED_TP;
++
++ tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val);
++ } else {
++ tp->link_config.advertising |= ADVERTISED_FIBRE;
++ }
++
++ if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
++ u32 adv;
++
++ if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
++ err = tg3_readphy(tp, MII_CTRL1000, &val);
++ if (err)
++ goto done;
++
++ adv = mii_ctrl1000_to_ethtool_adv_t(val);
++ } else {
++ err = tg3_readphy(tp, MII_ADVERTISE, &val);
++ if (err)
++ goto done;
++
++ adv = tg3_decode_flowctrl_1000X(val);
++ tp->link_config.flowctrl = adv;
++
++ val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL);
++ adv = mii_adv_to_ethtool_adv_x(val);
++ }
++
++ tp->link_config.advertising |= adv;
++ }
++
++done:
++ return err;
++}
++
++static int tg3_init_5401phy_dsp(struct tg3 *tp)
++{
++ int err;
++
++ /* Turn off tap power management. */
++ /* Set Extended packet length bit */
++ err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
++
++ err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
++ err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
++ err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
++ err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
++ err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
++
++ udelay(40);
++
++ return err;
++}
++
++static bool tg3_phy_eee_config_ok(struct tg3 *tp)
++{
++ struct ethtool_eee eee;
++
++ if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
++ return true;
++
++ tg3_eee_pull_config(tp, &eee);
++
++ if (tp->eee.eee_enabled) {
++ if (tp->eee.advertised != eee.advertised ||
++ tp->eee.tx_lpi_timer != eee.tx_lpi_timer ||
++ tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled)
++ return false;
++ } else {
++ /* EEE is disabled but we're advertising */
++ if (eee.advertised)
++ return false;
++ }
++
++ return true;
++}
++
++static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
++{
++ u32 advmsk, tgtadv, advertising;
++
++ advertising = tp->link_config.advertising;
++ tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
++
++ advmsk = ADVERTISE_ALL;
++ if (tp->link_config.active_duplex == DUPLEX_FULL) {
++ tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
++ advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
++ }
++
++ if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
++ return false;
++
++ if ((*lcladv & advmsk) != tgtadv)
++ return false;
++
++ if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
++ u32 tg3_ctrl;
++
++ tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
++
++ if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
++ return false;
++
++ if (tgtadv &&
++ (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
++ tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
++ tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
++ tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
++ CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
++ } else {
++ tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
++ }
++
++ if (tg3_ctrl != tgtadv)
++ return false;
++ }
++
++ return true;
++}
++
++static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
++{
++ u32 lpeth = 0;
++
++ if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
++ u32 val;
++
++ if (tg3_readphy(tp, MII_STAT1000, &val))
++ return false;
++
++ lpeth = mii_stat1000_to_ethtool_lpa_t(val);
++ }
++
++ if (tg3_readphy(tp, MII_LPA, rmtadv))
++ return false;
++
++ lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
++ tp->link_config.rmt_adv = lpeth;
++
++ return true;
++}
++
++static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up)
++{
++ if (curr_link_up != tp->link_up) {
++ if (curr_link_up) {
++ netif_carrier_on(tp->dev);
++ } else {
++ netif_carrier_off(tp->dev);
++ if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
++ tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
++ }
++
++ tg3_link_report(tp);
++ return true;
++ }
++
++ return false;
++}
++
++static void tg3_clear_mac_status(struct tg3 *tp)
++{
++ tw32(MAC_EVENT, 0);
++
++ tw32_f(MAC_STATUS,
++ MAC_STATUS_SYNC_CHANGED |
++ MAC_STATUS_CFG_CHANGED |
++ MAC_STATUS_MI_COMPLETION |
++ MAC_STATUS_LNKSTATE_CHANGED);
++ udelay(40);
++}
++
++static void tg3_setup_eee(struct tg3 *tp)
++{
++ u32 val;
++
++ val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
++ TG3_CPMU_EEE_LNKIDL_UART_IDL;
++ if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
++ val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
++
++ tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
++
++ tw32_f(TG3_CPMU_EEE_CTRL,
++ TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
++
++ val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
++ (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) |
++ TG3_CPMU_EEEMD_LPI_IN_RX |
++ TG3_CPMU_EEEMD_EEE_ENABLE;
++
++ if (tg3_asic_rev(tp) != ASIC_REV_5717)
++ val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
++
++ if (tg3_flag(tp, ENABLE_APE))
++ val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
++
++ tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0);
++
++ tw32_f(TG3_CPMU_EEE_DBTMR1,
++ TG3_CPMU_DBTMR1_PCIEXIT_2047US |
++ (tp->eee.tx_lpi_timer & 0xffff));
++
++ tw32_f(TG3_CPMU_EEE_DBTMR2,
++ TG3_CPMU_DBTMR2_APE_TX_2047US |
++ TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
++}
++
++static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
++{
++ bool current_link_up;
++ u32 bmsr, val;
++ u32 lcl_adv, rmt_adv;
++ u16 current_speed;
++ u8 current_duplex;
++ int i, err;
++
++ tg3_clear_mac_status(tp);
++
++ if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
++ tw32_f(MAC_MI_MODE,
++ (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
++ udelay(80);
++ }
++
++ tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
++
++ /* Some third-party PHYs need to be reset on link going
++ * down.
++ */
++ if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
++ tg3_asic_rev(tp) == ASIC_REV_5704 ||
++ tg3_asic_rev(tp) == ASIC_REV_5705) &&
++ tp->link_up) {
++ tg3_readphy(tp, MII_BMSR, &bmsr);
++ if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
++ !(bmsr & BMSR_LSTATUS))
++ force_reset = true;
++ }
++ if (force_reset)
++ tg3_phy_reset(tp);
++
++ if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
++ tg3_readphy(tp, MII_BMSR, &bmsr);
++ if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
++ !tg3_flag(tp, INIT_COMPLETE))
++ bmsr = 0;
++
++ if (!(bmsr & BMSR_LSTATUS)) {
++ err = tg3_init_5401phy_dsp(tp);
++ if (err)
++ return err;
++
++ tg3_readphy(tp, MII_BMSR, &bmsr);
++ for (i = 0; i < 1000; i++) {
++ udelay(10);
++ if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
++ (bmsr & BMSR_LSTATUS)) {
++ udelay(40);
++ break;
++ }
++ }
++
++ if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
++ TG3_PHY_REV_BCM5401_B0 &&
++ !(bmsr & BMSR_LSTATUS) &&
++ tp->link_config.active_speed == SPEED_1000) {
++ err = tg3_phy_reset(tp);
++ if (!err)
++ err = tg3_init_5401phy_dsp(tp);
++ if (err)
++ return err;
++ }
++ }
++ } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
++ tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
++ /* 5701 {A0,B0} CRC bug workaround */
++ tg3_writephy(tp, 0x15, 0x0a75);
++ tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
++ tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
++ tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
++ }
++
++ /* Clear pending interrupts... */
++ tg3_readphy(tp, MII_TG3_ISTAT, &val);
++ tg3_readphy(tp, MII_TG3_ISTAT, &val);
++
++ if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
++ tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
++ else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
++ tg3_writephy(tp, MII_TG3_IMASK, ~0);
++
++ if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
++ tg3_asic_rev(tp) == ASIC_REV_5701) {
++ if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
++ tg3_writephy(tp, MII_TG3_EXT_CTRL,
++ MII_TG3_EXT_CTRL_LNK3_LED_MODE);
++ else
++ tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
++ }
++
++ current_link_up = false;
++ current_speed = SPEED_UNKNOWN;
++ current_duplex = DUPLEX_UNKNOWN;
++ tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
++ tp->link_config.rmt_adv = 0;
++
++ if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
++ err = tg3_phy_auxctl_read(tp,
++ MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
++ &val);
++ if (!err && !(val & (1 << 10))) {
++ tg3_phy_auxctl_write(tp,
++ MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
++ val | (1 << 10));
++ goto relink;
++ }
++ }
++
++ bmsr = 0;
++ for (i = 0; i < 100; i++) {
++ tg3_readphy(tp, MII_BMSR, &bmsr);
++ if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
++ (bmsr & BMSR_LSTATUS))
++ break;
++ udelay(40);
++ }
++
++ if (bmsr & BMSR_LSTATUS) {
++ u32 aux_stat, bmcr;
++
++ tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
++ for (i = 0; i < 2000; i++) {
++ udelay(10);
++ if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
++ aux_stat)
++ break;
++ }
++
++ tg3_aux_stat_to_speed_duplex(tp, aux_stat,
++ &current_speed,
++ &current_duplex);
++
++ bmcr = 0;
++ for (i = 0; i < 200; i++) {
++ tg3_readphy(tp, MII_BMCR, &bmcr);
++ if (tg3_readphy(tp, MII_BMCR, &bmcr))
++ continue;
++ if (bmcr && bmcr != 0x7fff)
++ break;
++ udelay(10);
++ }
++
++ lcl_adv = 0;
++ rmt_adv = 0;
++
++ tp->link_config.active_speed = current_speed;
++ tp->link_config.active_duplex = current_duplex;
++
++ if (tp->link_config.autoneg == AUTONEG_ENABLE) {
++ bool eee_config_ok = tg3_phy_eee_config_ok(tp);
++
++ if ((bmcr & BMCR_ANENABLE) &&
++ eee_config_ok &&
++ tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
++ tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
++ current_link_up = true;
++
++ /* EEE settings changes take effect only after a phy
++ * reset. If we have skipped a reset due to Link Flap
++ * Avoidance being enabled, do it now.
++ */
++ if (!eee_config_ok &&
++ (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
++ !force_reset) {
++ tg3_setup_eee(tp);
++ tg3_phy_reset(tp);
++ }
++ } else {
++ if (!(bmcr & BMCR_ANENABLE) &&
++ tp->link_config.speed == current_speed &&
++ tp->link_config.duplex == current_duplex) {
++ current_link_up = true;
++ }
++ }
++
++ if (current_link_up &&
++ tp->link_config.active_duplex == DUPLEX_FULL) {
++ u32 reg, bit;
++
++ if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
++ reg = MII_TG3_FET_GEN_STAT;
++ bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
++ } else {
++ reg = MII_TG3_EXT_STAT;
++ bit = MII_TG3_EXT_STAT_MDIX;
++ }
++
++ if (!tg3_readphy(tp, reg, &val) && (val & bit))
++ tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
++
++ tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
++ }
++ }
++
++relink:
++ if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
++ tg3_phy_copper_begin(tp);
++
++ if (tg3_flag(tp, ROBOSWITCH)) {
++ current_link_up = true;
++ /* FIXME: when BCM5325 switch is used use 100 MBit/s */
++ current_speed = SPEED_1000;
++ current_duplex = DUPLEX_FULL;
++ tp->link_config.active_speed = current_speed;
++ tp->link_config.active_duplex = current_duplex;
++ }
++
++ tg3_readphy(tp, MII_BMSR, &bmsr);
++ if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
++ (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
++ current_link_up = true;
++ }
++
++ tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
++ if (current_link_up) {
++ if (tp->link_config.active_speed == SPEED_100 ||
++ tp->link_config.active_speed == SPEED_10)
++ tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
++ else
++ tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
++ } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
++ tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
++ else
++ tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
++
++ /* In order for the 5750 core in BCM4785 chip to work properly
++ * in RGMII mode, the Led Control Register must be set up.
++ */
++ if (tg3_flag(tp, RGMII_MODE)) {
++ u32 led_ctrl = tr32(MAC_LED_CTRL);
++ led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
++
++ if (tp->link_config.active_speed == SPEED_10)
++ led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
++ else if (tp->link_config.active_speed == SPEED_100)
++ led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
++ LED_CTRL_100MBPS_ON);
++ else if (tp->link_config.active_speed == SPEED_1000)
++ led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
++ LED_CTRL_1000MBPS_ON);
++
++ tw32(MAC_LED_CTRL, led_ctrl);
++ udelay(40);
++ }
++
++ tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
++ if (tp->link_config.active_duplex == DUPLEX_HALF)
++ tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
++
++ if (tg3_asic_rev(tp) == ASIC_REV_5700) {
++ if (current_link_up &&
++ tg3_5700_link_polarity(tp, tp->link_config.active_speed))
++ tp->mac_mode |= MAC_MODE_LINK_POLARITY;
++ else
++ tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
++ }
++
++ /* ??? Without this setting Netgear GA302T PHY does not
++ * ??? send/receive packets...
++ */
++ if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
++ tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
++ tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
++ tw32_f(MAC_MI_MODE, tp->mi_mode);
++ udelay(80);
++ }
++
++ tw32_f(MAC_MODE, tp->mac_mode);
++ udelay(40);
++
++ tg3_phy_eee_adjust(tp, current_link_up);
++
++ if (tg3_flag(tp, USE_LINKCHG_REG)) {
++ /* Polled via timer. */
++ tw32_f(MAC_EVENT, 0);
++ } else {
++ tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
++ }
++ udelay(40);
++
++ if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
++ current_link_up &&
++ tp->link_config.active_speed == SPEED_1000 &&
++ (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
++ udelay(120);
++ tw32_f(MAC_STATUS,
++ (MAC_STATUS_SYNC_CHANGED |
++ MAC_STATUS_CFG_CHANGED));
++ udelay(40);
++ tg3_write_mem(tp,
++ NIC_SRAM_FIRMWARE_MBOX,
++ NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
++ }
++
++ /* Prevent send BD corruption. */
++ if (tg3_flag(tp, CLKREQ_BUG)) {
++ if (tp->link_config.active_speed == SPEED_100 ||
++ tp->link_config.active_speed == SPEED_10)
++ pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
++ PCI_EXP_LNKCTL_CLKREQ_EN);
++ else
++ pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
++ PCI_EXP_LNKCTL_CLKREQ_EN);
++ }
++
++ tg3_test_and_report_link_chg(tp, current_link_up);
++
++ return 0;
++}
++
++struct tg3_fiber_aneginfo {
++ int state;
++#define ANEG_STATE_UNKNOWN 0
++#define ANEG_STATE_AN_ENABLE 1
++#define ANEG_STATE_RESTART_INIT 2
++#define ANEG_STATE_RESTART 3
++#define ANEG_STATE_DISABLE_LINK_OK 4
++#define ANEG_STATE_ABILITY_DETECT_INIT 5
++#define ANEG_STATE_ABILITY_DETECT 6
++#define ANEG_STATE_ACK_DETECT_INIT 7
++#define ANEG_STATE_ACK_DETECT 8
++#define ANEG_STATE_COMPLETE_ACK_INIT 9
++#define ANEG_STATE_COMPLETE_ACK 10
++#define ANEG_STATE_IDLE_DETECT_INIT 11
++#define ANEG_STATE_IDLE_DETECT 12
++#define ANEG_STATE_LINK_OK 13
++#define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
++#define ANEG_STATE_NEXT_PAGE_WAIT 15
++
++ u32 flags;
++#define MR_AN_ENABLE 0x00000001
++#define MR_RESTART_AN 0x00000002
++#define MR_AN_COMPLETE 0x00000004
++#define MR_PAGE_RX 0x00000008
++#define MR_NP_LOADED 0x00000010
++#define MR_TOGGLE_TX 0x00000020
++#define MR_LP_ADV_FULL_DUPLEX 0x00000040
++#define MR_LP_ADV_HALF_DUPLEX 0x00000080
++#define MR_LP_ADV_SYM_PAUSE 0x00000100
++#define MR_LP_ADV_ASYM_PAUSE 0x00000200
++#define MR_LP_ADV_REMOTE_FAULT1 0x00000400
++#define MR_LP_ADV_REMOTE_FAULT2 0x00000800
++#define MR_LP_ADV_NEXT_PAGE 0x00001000
++#define MR_TOGGLE_RX 0x00002000
++#define MR_NP_RX 0x00004000
++
++#define MR_LINK_OK 0x80000000
++
++ unsigned long link_time, cur_time;
++
++ u32 ability_match_cfg;
++ int ability_match_count;
++
++ char ability_match, idle_match, ack_match;
++
++ u32 txconfig, rxconfig;
++#define ANEG_CFG_NP 0x00000080
++#define ANEG_CFG_ACK 0x00000040
++#define ANEG_CFG_RF2 0x00000020
++#define ANEG_CFG_RF1 0x00000010
++#define ANEG_CFG_PS2 0x00000001
++#define ANEG_CFG_PS1 0x00008000
++#define ANEG_CFG_HD 0x00004000
++#define ANEG_CFG_FD 0x00002000
++#define ANEG_CFG_INVAL 0x00001f06
++
++};
++#define ANEG_OK 0
++#define ANEG_DONE 1
++#define ANEG_TIMER_ENAB 2
++#define ANEG_FAILED -1
++
++#define ANEG_STATE_SETTLE_TIME 10000
++
++static int tg3_fiber_aneg_smachine(struct tg3 *tp,
++ struct tg3_fiber_aneginfo *ap)
++{
++ u16 flowctrl;
++ unsigned long delta;
++ u32 rx_cfg_reg;
++ int ret;
++
++ if (ap->state == ANEG_STATE_UNKNOWN) {
++ ap->rxconfig = 0;
++ ap->link_time = 0;
++ ap->cur_time = 0;
++ ap->ability_match_cfg = 0;
++ ap->ability_match_count = 0;
++ ap->ability_match = 0;
++ ap->idle_match = 0;
++ ap->ack_match = 0;
++ }
++ ap->cur_time++;
++
++ if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
++ rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
++
++ if (rx_cfg_reg != ap->ability_match_cfg) {
++ ap->ability_match_cfg = rx_cfg_reg;
++ ap->ability_match = 0;
++ ap->ability_match_count = 0;
++ } else {
++ if (++ap->ability_match_count > 1) {
++ ap->ability_match = 1;
++ ap->ability_match_cfg = rx_cfg_reg;
++ }
++ }
++ if (rx_cfg_reg & ANEG_CFG_ACK)
++ ap->ack_match = 1;
++ else
++ ap->ack_match = 0;
++
++ ap->idle_match = 0;
++ } else {
++ ap->idle_match = 1;
++ ap->ability_match_cfg = 0;
++ ap->ability_match_count = 0;
++ ap->ability_match = 0;
++ ap->ack_match = 0;
++
++ rx_cfg_reg = 0;
++ }
++
++ ap->rxconfig = rx_cfg_reg;
++ ret = ANEG_OK;
++
++ switch (ap->state) {
++ case ANEG_STATE_UNKNOWN:
++ if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
++ ap->state = ANEG_STATE_AN_ENABLE;
++
++ /* fallthru */
++ case ANEG_STATE_AN_ENABLE:
++ ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
++ if (ap->flags & MR_AN_ENABLE) {
++ ap->link_time = 0;
++ ap->cur_time = 0;
++ ap->ability_match_cfg = 0;
++ ap->ability_match_count = 0;
++ ap->ability_match = 0;
++ ap->idle_match = 0;
++ ap->ack_match = 0;
++
++ ap->state = ANEG_STATE_RESTART_INIT;
++ } else {
++ ap->state = ANEG_STATE_DISABLE_LINK_OK;
++ }
++ break;
++
++ case ANEG_STATE_RESTART_INIT:
++ ap->link_time = ap->cur_time;
++ ap->flags &= ~(MR_NP_LOADED);
++ ap->txconfig = 0;
++ tw32(MAC_TX_AUTO_NEG, 0);
++ tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
++ tw32_f(MAC_MODE, tp->mac_mode);
++ udelay(40);
++
++ ret = ANEG_TIMER_ENAB;
++ ap->state = ANEG_STATE_RESTART;
++
++ /* fallthru */
++ case ANEG_STATE_RESTART:
++ delta = ap->cur_time - ap->link_time;
++ if (delta > ANEG_STATE_SETTLE_TIME)
++ ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
++ else
++ ret = ANEG_TIMER_ENAB;
++ break;
++
++ case ANEG_STATE_DISABLE_LINK_OK:
++ ret = ANEG_DONE;
++ break;
++
++ case ANEG_STATE_ABILITY_DETECT_INIT:
++ ap->flags &= ~(MR_TOGGLE_TX);
++ ap->txconfig = ANEG_CFG_FD;
++ flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
++ if (flowctrl & ADVERTISE_1000XPAUSE)
++ ap->txconfig |= ANEG_CFG_PS1;
++ if (flowctrl & ADVERTISE_1000XPSE_ASYM)
++ ap->txconfig |= ANEG_CFG_PS2;
++ tw32(MAC_TX_AUTO_NEG, ap->txconfig);
++ tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
++ tw32_f(MAC_MODE, tp->mac_mode);
++ udelay(40);
++
++ ap->state = ANEG_STATE_ABILITY_DETECT;
++ break;
++
++ case ANEG_STATE_ABILITY_DETECT:
++ if (ap->ability_match != 0 && ap->rxconfig != 0)
++ ap->state = ANEG_STATE_ACK_DETECT_INIT;
++ break;
++
++ case ANEG_STATE_ACK_DETECT_INIT:
++ ap->txconfig |= ANEG_CFG_ACK;
++ tw32(MAC_TX_AUTO_NEG, ap->txconfig);
++ tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
++ tw32_f(MAC_MODE, tp->mac_mode);
++ udelay(40);
++
++ ap->state = ANEG_STATE_ACK_DETECT;
++
++ /* fallthru */
++ case ANEG_STATE_ACK_DETECT:
++ if (ap->ack_match != 0) {
++ if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
++ (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
++ ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
++ } else {
++ ap->state = ANEG_STATE_AN_ENABLE;
++ }
++ } else if (ap->ability_match != 0 &&
++ ap->rxconfig == 0) {
++ ap->state = ANEG_STATE_AN_ENABLE;
++ }
++ break;
++
++ case ANEG_STATE_COMPLETE_ACK_INIT:
++ if (ap->rxconfig & ANEG_CFG_INVAL) {
++ ret = ANEG_FAILED;
++ break;
++ }
++ ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
++ MR_LP_ADV_HALF_DUPLEX |
++ MR_LP_ADV_SYM_PAUSE |
++ MR_LP_ADV_ASYM_PAUSE |
++ MR_LP_ADV_REMOTE_FAULT1 |
++ MR_LP_ADV_REMOTE_FAULT2 |
++ MR_LP_ADV_NEXT_PAGE |
++ MR_TOGGLE_RX |
++ MR_NP_RX);
++ if (ap->rxconfig & ANEG_CFG_FD)
++ ap->flags |= MR_LP_ADV_FULL_DUPLEX;
++ if (ap->rxconfig & ANEG_CFG_HD)
++ ap->flags |= MR_LP_ADV_HALF_DUPLEX;
++ if (ap->rxconfig & ANEG_CFG_PS1)
++ ap->flags |= MR_LP_ADV_SYM_PAUSE;
++ if (ap->rxconfig & ANEG_CFG_PS2)
++ ap->flags |= MR_LP_ADV_ASYM_PAUSE;
++ if (ap->rxconfig & ANEG_CFG_RF1)
++ ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
++ if (ap->rxconfig & ANEG_CFG_RF2)
++ ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
++ if (ap->rxconfig & ANEG_CFG_NP)
++ ap->flags |= MR_LP_ADV_NEXT_PAGE;
++
++ ap->link_time = ap->cur_time;
++
++ ap->flags ^= (MR_TOGGLE_TX);
++ if (ap->rxconfig & 0x0008)
++ ap->flags |= MR_TOGGLE_RX;
++ if (ap->rxconfig & ANEG_CFG_NP)
++ ap->flags |= MR_NP_RX;
++ ap->flags |= MR_PAGE_RX;
++
++ ap->state = ANEG_STATE_COMPLETE_ACK;
++ ret = ANEG_TIMER_ENAB;
++ break;
++
++ case ANEG_STATE_COMPLETE_ACK:
++ if (ap->ability_match != 0 &&
++ ap->rxconfig == 0) {
++ ap->state = ANEG_STATE_AN_ENABLE;
++ break;
++ }
++ delta = ap->cur_time - ap->link_time;
++ if (delta > ANEG_STATE_SETTLE_TIME) {
++ if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
++ ap->state = ANEG_STATE_IDLE_DETECT_INIT;
++ } else {
++ if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
++ !(ap->flags & MR_NP_RX)) {
++ ap->state = ANEG_STATE_IDLE_DETECT_INIT;
++ } else {
++ ret = ANEG_FAILED;
++ }
++ }
++ }
++ break;
++
++ case ANEG_STATE_IDLE_DETECT_INIT:
++ ap->link_time = ap->cur_time;
++ tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
++ tw32_f(MAC_MODE, tp->mac_mode);
++ udelay(40);
++
++ ap->state = ANEG_STATE_IDLE_DETECT;
++ ret = ANEG_TIMER_ENAB;
++ break;
++
++ case ANEG_STATE_IDLE_DETECT:
++ if (ap->ability_match != 0 &&
++ ap->rxconfig == 0) {
++ ap->state = ANEG_STATE_AN_ENABLE;
++ break;
++ }
++ delta = ap->cur_time - ap->link_time;
++ if (delta > ANEG_STATE_SETTLE_TIME) {
++ /* XXX another gem from the Broadcom driver :( */
++ ap->state = ANEG_STATE_LINK_OK;
++ }
++ break;
++
++ case ANEG_STATE_LINK_OK:
++ ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
++ ret = ANEG_DONE;
++ break;
++
++ case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
++ /* ??? unimplemented */
++ break;
++
++ case ANEG_STATE_NEXT_PAGE_WAIT:
++ /* ??? unimplemented */
++ break;
++
++ default:
++ ret = ANEG_FAILED;
++ break;
++ }
++
++ return ret;
++}
++
++static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
++{
++ int res = 0;
++ struct tg3_fiber_aneginfo aninfo;
++ int status = ANEG_FAILED;
++ unsigned int tick;
++ u32 tmp;
++
++ tw32_f(MAC_TX_AUTO_NEG, 0);
++
++ tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
++ tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
++ udelay(40);
++
++ tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
++ udelay(40);
++
++ memset(&aninfo, 0, sizeof(aninfo));
++ aninfo.flags |= MR_AN_ENABLE;
++ aninfo.state = ANEG_STATE_UNKNOWN;
++ aninfo.cur_time = 0;
++ tick = 0;
++ while (++tick < 195000) {
++ status = tg3_fiber_aneg_smachine(tp, &aninfo);
++ if (status == ANEG_DONE || status == ANEG_FAILED)
++ break;
++
++ udelay(1);
++ }
++
++ tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
++ tw32_f(MAC_MODE, tp->mac_mode);
++ udelay(40);
++
++ *txflags = aninfo.txconfig;
++ *rxflags = aninfo.flags;
++
++ if (status == ANEG_DONE &&
++ (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
++ MR_LP_ADV_FULL_DUPLEX)))
++ res = 1;
++
++ return res;
++}
++
++static void tg3_init_bcm8002(struct tg3 *tp)
++{
++ u32 mac_status = tr32(MAC_STATUS);
++ int i;
++
++ /* Reset when initting first time or we have a link. */
++ if (tg3_flag(tp, INIT_COMPLETE) &&
++ !(mac_status & MAC_STATUS_PCS_SYNCED))
++ return;
++
++ /* Set PLL lock range. */
++ tg3_writephy(tp, 0x16, 0x8007);
++
++ /* SW reset */
++ tg3_writephy(tp, MII_BMCR, BMCR_RESET);
++
++ /* Wait for reset to complete. */
++ /* XXX schedule_timeout() ... */
++ for (i = 0; i < 500; i++)
++ udelay(10);
++
++ /* Config mode; select PMA/Ch 1 regs. */
++ tg3_writephy(tp, 0x10, 0x8411);
++
++ /* Enable auto-lock and comdet, select txclk for tx. */
++ tg3_writephy(tp, 0x11, 0x0a10);
++
++ tg3_writephy(tp, 0x18, 0x00a0);
++ tg3_writephy(tp, 0x16, 0x41ff);
++
++ /* Assert and deassert POR. */
++ tg3_writephy(tp, 0x13, 0x0400);
++ udelay(40);
++ tg3_writephy(tp, 0x13, 0x0000);
++
++ tg3_writephy(tp, 0x11, 0x0a50);
++ udelay(40);
++ tg3_writephy(tp, 0x11, 0x0a10);
++
++ /* Wait for signal to stabilize */
++ /* XXX schedule_timeout() ... */
++ for (i = 0; i < 15000; i++)
++ udelay(10);
++
++ /* Deselect the channel register so we can read the PHYID
++ * later.
++ */
++ tg3_writephy(tp, 0x10, 0x8011);
++}
++
++static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
++{
++ u16 flowctrl;
++ bool current_link_up;
++ u32 sg_dig_ctrl, sg_dig_status;
++ u32 serdes_cfg, expected_sg_dig_ctrl;
++ int workaround, port_a;
++
++ serdes_cfg = 0;
++ expected_sg_dig_ctrl = 0;
++ workaround = 0;
++ port_a = 1;
++ current_link_up = false;
++
++ if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
++ tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
++ workaround = 1;
++ if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
++ port_a = 0;
++
++ /* preserve bits 0-11,13,14 for signal pre-emphasis */
++ /* preserve bits 20-23 for voltage regulator */
++ serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
++ }
++
++ sg_dig_ctrl = tr32(SG_DIG_CTRL);
++
++ if (tp->link_config.autoneg != AUTONEG_ENABLE) {
++ if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
++ if (workaround) {
++ u32 val = serdes_cfg;
++
++ if (port_a)
++ val |= 0xc010000;
++ else
++ val |= 0x4010000;
++ tw32_f(MAC_SERDES_CFG, val);
++ }
++
++ tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
++ }
++ if (mac_status & MAC_STATUS_PCS_SYNCED) {
++ tg3_setup_flow_control(tp, 0, 0);
++ current_link_up = true;
++ }
++ goto out;
++ }
++
++ /* Want auto-negotiation. */
++ expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
++
++ flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
++ if (flowctrl & ADVERTISE_1000XPAUSE)
++ expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
++ if (flowctrl & ADVERTISE_1000XPSE_ASYM)
++ expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
++
++ if (sg_dig_ctrl != expected_sg_dig_ctrl) {
++ if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
++ tp->serdes_counter &&
++ ((mac_status & (MAC_STATUS_PCS_SYNCED |
++ MAC_STATUS_RCVD_CFG)) ==
++ MAC_STATUS_PCS_SYNCED)) {
++ tp->serdes_counter--;
++ current_link_up = true;
++ goto out;
++ }
++restart_autoneg:
++ if (workaround)
++ tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
++ tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
++ udelay(5);
++ tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
++
++ tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
++ tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
++ } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
++ MAC_STATUS_SIGNAL_DET)) {
++ sg_dig_status = tr32(SG_DIG_STATUS);
++ mac_status = tr32(MAC_STATUS);
++
++ if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
++ (mac_status & MAC_STATUS_PCS_SYNCED)) {
++ u32 local_adv = 0, remote_adv = 0;
++
++ if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
++ local_adv |= ADVERTISE_1000XPAUSE;
++ if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
++ local_adv |= ADVERTISE_1000XPSE_ASYM;
++
++ if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
++ remote_adv |= LPA_1000XPAUSE;
++ if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
++ remote_adv |= LPA_1000XPAUSE_ASYM;
++
++ tp->link_config.rmt_adv =
++ mii_adv_to_ethtool_adv_x(remote_adv);
++
++ tg3_setup_flow_control(tp, local_adv, remote_adv);
++ current_link_up = true;
++ tp->serdes_counter = 0;
++ tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
++ } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
++ if (tp->serdes_counter)
++ tp->serdes_counter--;
++ else {
++ if (workaround) {
++ u32 val = serdes_cfg;
++
++ if (port_a)
++ val |= 0xc010000;
++ else
++ val |= 0x4010000;
++
++ tw32_f(MAC_SERDES_CFG, val);
++ }
++
++ tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
++ udelay(40);
++
++ /* Link parallel detection - link is up */
++ /* only if we have PCS_SYNC and not */
++ /* receiving config code words */
++ mac_status = tr32(MAC_STATUS);
++ if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
++ !(mac_status & MAC_STATUS_RCVD_CFG)) {
++ tg3_setup_flow_control(tp, 0, 0);
++ current_link_up = true;
++ tp->phy_flags |=
++ TG3_PHYFLG_PARALLEL_DETECT;
++ tp->serdes_counter =
++ SERDES_PARALLEL_DET_TIMEOUT;
++ } else
++ goto restart_autoneg;
++ }
++ }
++ } else {
++ tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
++ tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
++ }
++
++out:
++ return current_link_up;
++}
++
++static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
++{
++ bool current_link_up = false;
++
++ if (!(mac_status & MAC_STATUS_PCS_SYNCED))
++ goto out;
++
++ if (tp->link_config.autoneg == AUTONEG_ENABLE) {
++ u32 txflags, rxflags;
++ int i;
++
++ if (fiber_autoneg(tp, &txflags, &rxflags)) {
++ u32 local_adv = 0, remote_adv = 0;
++
++ if (txflags & ANEG_CFG_PS1)
++ local_adv |= ADVERTISE_1000XPAUSE;
++ if (txflags & ANEG_CFG_PS2)
++ local_adv |= ADVERTISE_1000XPSE_ASYM;
++
++ if (rxflags & MR_LP_ADV_SYM_PAUSE)
++ remote_adv |= LPA_1000XPAUSE;
++ if (rxflags & MR_LP_ADV_ASYM_PAUSE)
++ remote_adv |= LPA_1000XPAUSE_ASYM;
++
++ tp->link_config.rmt_adv =
++ mii_adv_to_ethtool_adv_x(remote_adv);
++
++ tg3_setup_flow_control(tp, local_adv, remote_adv);
++
++ current_link_up = true;
++ }
++ for (i = 0; i < 30; i++) {
++ udelay(20);
++ tw32_f(MAC_STATUS,
++ (MAC_STATUS_SYNC_CHANGED |
++ MAC_STATUS_CFG_CHANGED));
++ udelay(40);
++ if ((tr32(MAC_STATUS) &
++ (MAC_STATUS_SYNC_CHANGED |
++ MAC_STATUS_CFG_CHANGED)) == 0)
++ break;
++ }
++
++ mac_status = tr32(MAC_STATUS);
++ if (!current_link_up &&
++ (mac_status & MAC_STATUS_PCS_SYNCED) &&
++ !(mac_status & MAC_STATUS_RCVD_CFG))
++ current_link_up = true;
++ } else {
++ tg3_setup_flow_control(tp, 0, 0);
++
++ /* Forcing 1000FD link up. */
++ current_link_up = true;
++
++ tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
++ udelay(40);
++
++ tw32_f(MAC_MODE, tp->mac_mode);
++ udelay(40);
++ }
++
++out:
++ return current_link_up;
++}
++
++static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
++{
++ u32 orig_pause_cfg;
++ u16 orig_active_speed;
++ u8 orig_active_duplex;
++ u32 mac_status;
++ bool current_link_up;
++ int i;
++
++ orig_pause_cfg = tp->link_config.active_flowctrl;
++ orig_active_speed = tp->link_config.active_speed;
++ orig_active_duplex = tp->link_config.active_duplex;
++
++ if (!tg3_flag(tp, HW_AUTONEG) &&
++ tp->link_up &&
++ tg3_flag(tp, INIT_COMPLETE)) {
++ mac_status = tr32(MAC_STATUS);
++ mac_status &= (MAC_STATUS_PCS_SYNCED |
++ MAC_STATUS_SIGNAL_DET |
++ MAC_STATUS_CFG_CHANGED |
++ MAC_STATUS_RCVD_CFG);
++ if (mac_status == (MAC_STATUS_PCS_SYNCED |
++ MAC_STATUS_SIGNAL_DET)) {
++ tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
++ MAC_STATUS_CFG_CHANGED));
++ return 0;
++ }
++ }
++
++ tw32_f(MAC_TX_AUTO_NEG, 0);
++
++ tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
++ tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
++ tw32_f(MAC_MODE, tp->mac_mode);
++ udelay(40);
++
++ if (tp->phy_id == TG3_PHY_ID_BCM8002)
++ tg3_init_bcm8002(tp);
++
++ /* Enable link change event even when serdes polling. */
++ tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
++ udelay(40);
++
++ current_link_up = false;
++ tp->link_config.rmt_adv = 0;
++ mac_status = tr32(MAC_STATUS);
++
++ if (tg3_flag(tp, HW_AUTONEG))
++ current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
++ else
++ current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
++
++ tp->napi[0].hw_status->status =
++ (SD_STATUS_UPDATED |
++ (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
++
++ for (i = 0; i < 100; i++) {
++ tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
++ MAC_STATUS_CFG_CHANGED));
++ udelay(5);
++ if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
++ MAC_STATUS_CFG_CHANGED |
++ MAC_STATUS_LNKSTATE_CHANGED)) == 0)
++ break;
++ }
++
++ mac_status = tr32(MAC_STATUS);
++ if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
++ current_link_up = false;
++ if (tp->link_config.autoneg == AUTONEG_ENABLE &&
++ tp->serdes_counter == 0) {
++ tw32_f(MAC_MODE, (tp->mac_mode |
++ MAC_MODE_SEND_CONFIGS));
++ udelay(1);
++ tw32_f(MAC_MODE, tp->mac_mode);
++ }
++ }
++
++ if (current_link_up) {
++ tp->link_config.active_speed = SPEED_1000;
++ tp->link_config.active_duplex = DUPLEX_FULL;
++ tw32(MAC_LED_CTRL, (tp->led_ctrl |
++ LED_CTRL_LNKLED_OVERRIDE |
++ LED_CTRL_1000MBPS_ON));
++ } else {
++ tp->link_config.active_speed = SPEED_UNKNOWN;
++ tp->link_config.active_duplex = DUPLEX_UNKNOWN;
++ tw32(MAC_LED_CTRL, (tp->led_ctrl |
++ LED_CTRL_LNKLED_OVERRIDE |
++ LED_CTRL_TRAFFIC_OVERRIDE));
++ }
++
++ if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
++ u32 now_pause_cfg = tp->link_config.active_flowctrl;
++ if (orig_pause_cfg != now_pause_cfg ||
++ orig_active_speed != tp->link_config.active_speed ||
++ orig_active_duplex != tp->link_config.active_duplex)
++ tg3_link_report(tp);
++ }
++
++ return 0;
++}
++
++static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
++{
++ int err = 0;
++ u32 bmsr, bmcr;
++ u16 current_speed = SPEED_UNKNOWN;
++ u8 current_duplex = DUPLEX_UNKNOWN;
++ bool current_link_up = false;
++ u32 local_adv, remote_adv, sgsr;
++
++ if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
++ tg3_asic_rev(tp) == ASIC_REV_5720) &&
++ !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
++ (sgsr & SERDES_TG3_SGMII_MODE)) {
++
++ if (force_reset)
++ tg3_phy_reset(tp);
++
++ tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
++
++ if (!(sgsr & SERDES_TG3_LINK_UP)) {
++ tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
++ } else {
++ current_link_up = true;
++ if (sgsr & SERDES_TG3_SPEED_1000) {
++ current_speed = SPEED_1000;
++ tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
++ } else if (sgsr & SERDES_TG3_SPEED_100) {
++ current_speed = SPEED_100;
++ tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
++ } else {
++ current_speed = SPEED_10;
++ tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
++ }
++
++ if (sgsr & SERDES_TG3_FULL_DUPLEX)
++ current_duplex = DUPLEX_FULL;
++ else
++ current_duplex = DUPLEX_HALF;
++ }
++
++ tw32_f(MAC_MODE, tp->mac_mode);
++ udelay(40);
++
++ tg3_clear_mac_status(tp);
++
++ goto fiber_setup_done;
++ }
++
++ tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
++ tw32_f(MAC_MODE, tp->mac_mode);
++ udelay(40);
++
++ tg3_clear_mac_status(tp);
++
++ if (force_reset)
++ tg3_phy_reset(tp);
++
++ tp->link_config.rmt_adv = 0;
++
++ err |= tg3_readphy(tp, MII_BMSR, &bmsr);
++ err |= tg3_readphy(tp, MII_BMSR, &bmsr);
++ if (tg3_asic_rev(tp) == ASIC_REV_5714) {
++ if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
++ bmsr |= BMSR_LSTATUS;
++ else
++ bmsr &= ~BMSR_LSTATUS;
++ }
++
++ err |= tg3_readphy(tp, MII_BMCR, &bmcr);
++
++ if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
++ (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
++ /* do nothing, just check for link up at the end */
++ } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
++ u32 adv, newadv;
++
++ err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
++ newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
++ ADVERTISE_1000XPAUSE |
++ ADVERTISE_1000XPSE_ASYM |
++ ADVERTISE_SLCT);
++
++ newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
++ newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
++
++ if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
++ tg3_writephy(tp, MII_ADVERTISE, newadv);
++ bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
++ tg3_writephy(tp, MII_BMCR, bmcr);
++
++ tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
++ tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
++ tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
++
++ return err;
++ }
++ } else {
++ u32 new_bmcr;
++
++ bmcr &= ~BMCR_SPEED1000;
++ new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
++
++ if (tp->link_config.duplex == DUPLEX_FULL)
++ new_bmcr |= BMCR_FULLDPLX;
++
++ if (new_bmcr != bmcr) {
++ /* BMCR_SPEED1000 is a reserved bit that needs
++ * to be set on write.
++ */
++ new_bmcr |= BMCR_SPEED1000;
++
++ /* Force a linkdown */
++ if (tp->link_up) {
++ u32 adv;
++
++ err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
++ adv &= ~(ADVERTISE_1000XFULL |
++ ADVERTISE_1000XHALF |
++ ADVERTISE_SLCT);
++ tg3_writephy(tp, MII_ADVERTISE, adv);
++ tg3_writephy(tp, MII_BMCR, bmcr |
++ BMCR_ANRESTART |
++ BMCR_ANENABLE);
++ udelay(10);
++ tg3_carrier_off(tp);
++ }
++ tg3_writephy(tp, MII_BMCR, new_bmcr);
++ bmcr = new_bmcr;
++ err |= tg3_readphy(tp, MII_BMSR, &bmsr);
++ err |= tg3_readphy(tp, MII_BMSR, &bmsr);
++ if (tg3_asic_rev(tp) == ASIC_REV_5714) {
++ if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
++ bmsr |= BMSR_LSTATUS;
++ else
++ bmsr &= ~BMSR_LSTATUS;
++ }
++ tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
++ }
++ }
++
++ if (bmsr & BMSR_LSTATUS) {
++ current_speed = SPEED_1000;
++ current_link_up = true;
++ if (bmcr & BMCR_FULLDPLX)
++ current_duplex = DUPLEX_FULL;
++ else
++ current_duplex = DUPLEX_HALF;
++
++ local_adv = 0;
++ remote_adv = 0;
++
++ if (bmcr & BMCR_ANENABLE) {
++ u32 common;
++
++ err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
++ err |= tg3_readphy(tp, MII_LPA, &remote_adv);
++ common = local_adv & remote_adv;
++ if (common & (ADVERTISE_1000XHALF |
++ ADVERTISE_1000XFULL)) {
++ if (common & ADVERTISE_1000XFULL)
++ current_duplex = DUPLEX_FULL;
++ else
++ current_duplex = DUPLEX_HALF;
++
++ tp->link_config.rmt_adv =
++ mii_adv_to_ethtool_adv_x(remote_adv);
++ } else if (!tg3_flag(tp, 5780_CLASS)) {
++ /* Link is up via parallel detect */
++ } else {
++ current_link_up = false;
++ }
++ }
++ }
++
++fiber_setup_done:
++ if (current_link_up && current_duplex == DUPLEX_FULL)
++ tg3_setup_flow_control(tp, local_adv, remote_adv);
++
++ tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
++ if (tp->link_config.active_duplex == DUPLEX_HALF)
++ tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
++
++ tw32_f(MAC_MODE, tp->mac_mode);
++ udelay(40);
++
++ tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
++
++ tp->link_config.active_speed = current_speed;
++ tp->link_config.active_duplex = current_duplex;
++
++ tg3_test_and_report_link_chg(tp, current_link_up);
++ return err;
++}
++
++static void tg3_serdes_parallel_detect(struct tg3 *tp)
++{
++ if (tp->serdes_counter) {
++ /* Give autoneg time to complete. */
++ tp->serdes_counter--;
++ return;
++ }
++
++ if (!tp->link_up &&
++ (tp->link_config.autoneg == AUTONEG_ENABLE)) {
++ u32 bmcr;
++
++ tg3_readphy(tp, MII_BMCR, &bmcr);
++ if (bmcr & BMCR_ANENABLE) {
++ u32 phy1, phy2;
++
++ /* Select shadow register 0x1f */
++ tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
++ tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
++
++ /* Select expansion interrupt status register */
++ tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
++ MII_TG3_DSP_EXP1_INT_STAT);
++ tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
++ tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
++
++ if ((phy1 & 0x10) && !(phy2 & 0x20)) {
++ /* We have signal detect and not receiving
++ * config code words, link is up by parallel
++ * detection.
++ */
++
++ bmcr &= ~BMCR_ANENABLE;
++ bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
++ tg3_writephy(tp, MII_BMCR, bmcr);
++ tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
++ }
++ }
++ } else if (tp->link_up &&
++ (tp->link_config.autoneg == AUTONEG_ENABLE) &&
++ (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
++ u32 phy2;
++
++ /* Select expansion interrupt status register */
++ tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
++ MII_TG3_DSP_EXP1_INT_STAT);
++ tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
++ if (phy2 & 0x20) {
++ u32 bmcr;
++
++ /* Config code words received, turn on autoneg. */
++ tg3_readphy(tp, MII_BMCR, &bmcr);
++ tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
++
++ tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
++
++ }
++ }
++}
++
++static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
++{
++ u32 val;
++ int err;
++
++ if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
++ err = tg3_setup_fiber_phy(tp, force_reset);
++ else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
++ err = tg3_setup_fiber_mii_phy(tp, force_reset);
++ else
++ err = tg3_setup_copper_phy(tp, force_reset);
++
++ if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
++ u32 scale;
++
++ val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
++ if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
++ scale = 65;
++ else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
++ scale = 6;
++ else
++ scale = 12;
++
++ val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
++ val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
++ tw32(GRC_MISC_CFG, val);
++ }
++
++ val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
++ (6 << TX_LENGTHS_IPG_SHIFT);
++ if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
++ tg3_asic_rev(tp) == ASIC_REV_5762)
++ val |= tr32(MAC_TX_LENGTHS) &
++ (TX_LENGTHS_JMB_FRM_LEN_MSK |
++ TX_LENGTHS_CNT_DWN_VAL_MSK);
++
++ if (tp->link_config.active_speed == SPEED_1000 &&
++ tp->link_config.active_duplex == DUPLEX_HALF)
++ tw32(MAC_TX_LENGTHS, val |
++ (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
++ else
++ tw32(MAC_TX_LENGTHS, val |
++ (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
++
++ if (!tg3_flag(tp, 5705_PLUS)) {
++ if (tp->link_up) {
++ tw32(HOSTCC_STAT_COAL_TICKS,
++ tp->coal.stats_block_coalesce_usecs);
++ } else {
++ tw32(HOSTCC_STAT_COAL_TICKS, 0);
++ }
++ }
++
++ if (tg3_flag(tp, ASPM_WORKAROUND)) {
++ val = tr32(PCIE_PWR_MGMT_THRESH);
++ if (!tp->link_up)
++ val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
++ tp->pwrmgmt_thresh;
++ else
++ val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
++ tw32(PCIE_PWR_MGMT_THRESH, val);
++ }
++
++ return err;
++}
++
++/* tp->lock must be held */
++static u64 tg3_refclk_read(struct tg3 *tp)
++{
++ u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
++ return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
++}
++
++/* tp->lock must be held */
++static void tg3_refclk_write(struct tg3 *tp, u64 newval)
++{
++ u32 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
++
++ tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_STOP);
++ tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
++ tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
++ tw32_f(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_RESUME);
++}
++
++static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
++static inline void tg3_full_unlock(struct tg3 *tp);
++static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
++{
++ struct tg3 *tp = netdev_priv(dev);
++
++ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
++ SOF_TIMESTAMPING_RX_SOFTWARE |
++ SOF_TIMESTAMPING_SOFTWARE;
++
++ if (tg3_flag(tp, PTP_CAPABLE)) {
++ info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
++ SOF_TIMESTAMPING_RX_HARDWARE |
++ SOF_TIMESTAMPING_RAW_HARDWARE;
++ }
++
++ if (tp->ptp_clock)
++ info->phc_index = ptp_clock_index(tp->ptp_clock);
++ else
++ info->phc_index = -1;
++
++ info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
++
++ info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
++ (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
++ (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
++ (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
++ return 0;
++}
++
++static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
++{
++ struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
++ bool neg_adj = false;
++ u32 correction = 0;
++
++ if (ppb < 0) {
++ neg_adj = true;
++ ppb = -ppb;
++ }
++
++ /* Frequency adjustment is performed using hardware with a 24 bit
++ * accumulator and a programmable correction value. On each clk, the
++ * correction value gets added to the accumulator and when it
++ * overflows, the time counter is incremented/decremented.
++ *
++ * So conversion from ppb to correction value is
++ * ppb * (1 << 24) / 1000000000
++ */
++ correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
++ TG3_EAV_REF_CLK_CORRECT_MASK;
++
++ tg3_full_lock(tp, 0);
++
++ if (correction)
++ tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
++ TG3_EAV_REF_CLK_CORRECT_EN |
++ (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
++ else
++ tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
++
++ tg3_full_unlock(tp);
++
++ return 0;
++}
++
++static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
++{
++ struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
++
++ tg3_full_lock(tp, 0);
++ tp->ptp_adjust += delta;
++ tg3_full_unlock(tp);
++
++ return 0;
++}
++
++static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
++{
++ u64 ns;
++ u32 remainder;
++ struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
++
++ tg3_full_lock(tp, 0);
++ ns = tg3_refclk_read(tp);
++ ns += tp->ptp_adjust;
++ tg3_full_unlock(tp);
++
++ ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
++ ts->tv_nsec = remainder;
++
++ return 0;
++}
++
++static int tg3_ptp_settime(struct ptp_clock_info *ptp,
++ const struct timespec *ts)
++{
++ u64 ns;
++ struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
++
++ ns = timespec_to_ns(ts);
++
++ tg3_full_lock(tp, 0);
++ tg3_refclk_write(tp, ns);
++ tp->ptp_adjust = 0;
++ tg3_full_unlock(tp);
++
++ return 0;
++}
++
++static int tg3_ptp_enable(struct ptp_clock_info *ptp,
++ struct ptp_clock_request *rq, int on)
++{
++ struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
++ u32 clock_ctl;
++ int rval = 0;
++
++ switch (rq->type) {
++ case PTP_CLK_REQ_PEROUT:
++ if (rq->perout.index != 0)
++ return -EINVAL;
++
++ tg3_full_lock(tp, 0);
++ clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
++ clock_ctl &= ~TG3_EAV_CTL_TSYNC_GPIO_MASK;
++
++ if (on) {
++ u64 nsec;
++
++ nsec = rq->perout.start.sec * 1000000000ULL +
++ rq->perout.start.nsec;
++
++ if (rq->perout.period.sec || rq->perout.period.nsec) {
++ netdev_warn(tp->dev,
++ "Device supports only a one-shot timesync output, period must be 0\n");
++ rval = -EINVAL;
++ goto err_out;
++ }
++
++ if (nsec & (1ULL << 63)) {
++ netdev_warn(tp->dev,
++ "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n");
++ rval = -EINVAL;
++ goto err_out;
++ }
++
++ tw32(TG3_EAV_WATCHDOG0_LSB, (nsec & 0xffffffff));
++ tw32(TG3_EAV_WATCHDOG0_MSB,
++ TG3_EAV_WATCHDOG0_EN |
++ ((nsec >> 32) & TG3_EAV_WATCHDOG_MSB_MASK));
++
++ tw32(TG3_EAV_REF_CLCK_CTL,
++ clock_ctl | TG3_EAV_CTL_TSYNC_WDOG0);
++ } else {
++ tw32(TG3_EAV_WATCHDOG0_MSB, 0);
++ tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl);
++ }
++
++err_out:
++ tg3_full_unlock(tp);
++ return rval;
++
++ default:
++ break;
++ }
++
++ return -EOPNOTSUPP;
++}
++
++static const struct ptp_clock_info tg3_ptp_caps = {
++ .owner = THIS_MODULE,
++ .name = "tg3 clock",
++ .max_adj = 250000000,
++ .n_alarm = 0,
++ .n_ext_ts = 0,
++ .n_per_out = 1,
++ .n_pins = 0,
++ .pps = 0,
++ .adjfreq = tg3_ptp_adjfreq,
++ .adjtime = tg3_ptp_adjtime,
++ .gettime = tg3_ptp_gettime,
++ .settime = tg3_ptp_settime,
++ .enable = tg3_ptp_enable,
++};
++
++static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
++ struct skb_shared_hwtstamps *timestamp)
++{
++ memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
++ timestamp->hwtstamp = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
++ tp->ptp_adjust);
++}
++
++/* tp->lock must be held */
++static void tg3_ptp_init(struct tg3 *tp)
++{
++ if (!tg3_flag(tp, PTP_CAPABLE))
++ return;
++
++ /* Initialize the hardware clock to the system time. */
++ tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
++ tp->ptp_adjust = 0;
++ tp->ptp_info = tg3_ptp_caps;
++}
++
++/* tp->lock must be held */
++static void tg3_ptp_resume(struct tg3 *tp)
++{
++ if (!tg3_flag(tp, PTP_CAPABLE))
++ return;
++
++ tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
++ tp->ptp_adjust = 0;
++}
++
++static void tg3_ptp_fini(struct tg3 *tp)
++{
++ if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
++ return;
++
++ ptp_clock_unregister(tp->ptp_clock);
++ tp->ptp_clock = NULL;
++ tp->ptp_adjust = 0;
++}
++
++static inline int tg3_irq_sync(struct tg3 *tp)
++{
++ return tp->irq_sync;
++}
++
++static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
++{
++ int i;
++
++ dst = (u32 *)((u8 *)dst + off);
++ for (i = 0; i < len; i += sizeof(u32))
++ *dst++ = tr32(off + i);
++}
++
++static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
++{
++ tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
++ tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
++ tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
++ tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
++ tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
++ tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
++ tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
++ tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
++ tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
++ tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
++ tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
++ tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
++ tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
++ tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
++ tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
++ tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
++ tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
++ tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
++ tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
++
++ if (tg3_flag(tp, SUPPORT_MSIX))
++ tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
++
++ tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
++ tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
++ tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
++ tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
++ tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
++ tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
++ tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
++ tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
++
++ if (!tg3_flag(tp, 5705_PLUS)) {
++ tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
++ tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
++ tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
++ }
++
++ tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
++ tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
++ tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
++ tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
++ tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
++
++ if (tg3_flag(tp, NVRAM))
++ tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
++}
++
++static void tg3_dump_state(struct tg3 *tp)
++{
++ int i;
++ u32 *regs;
++
++ regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
++ if (!regs)
++ return;
++
++ if (tg3_flag(tp, PCI_EXPRESS)) {
++ /* Read up to but not including private PCI registers */
++ for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
++ regs[i / sizeof(u32)] = tr32(i);
++ } else
++ tg3_dump_legacy_regs(tp, regs);
++
++ for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
++ if (!regs[i + 0] && !regs[i + 1] &&
++ !regs[i + 2] && !regs[i + 3])
++ continue;
++
++ netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
++ i * 4,
++ regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
++ }
++
++ kfree(regs);
++
++ for (i = 0; i < tp->irq_cnt; i++) {
++ struct tg3_napi *tnapi = &tp->napi[i];
++
++ /* SW status block */
++ netdev_err(tp->dev,
++ "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
++ i,
++ tnapi->hw_status->status,
++ tnapi->hw_status->status_tag,
++ tnapi->hw_status->rx_jumbo_consumer,
++ tnapi->hw_status->rx_consumer,
++ tnapi->hw_status->rx_mini_consumer,
++ tnapi->hw_status->idx[0].rx_producer,
++ tnapi->hw_status->idx[0].tx_consumer);
++
++ netdev_err(tp->dev,
++ "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
++ i,
++ tnapi->last_tag, tnapi->last_irq_tag,
++ tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
++ tnapi->rx_rcb_ptr,
++ tnapi->prodring.rx_std_prod_idx,
++ tnapi->prodring.rx_std_cons_idx,
++ tnapi->prodring.rx_jmb_prod_idx,
++ tnapi->prodring.rx_jmb_cons_idx);
++ }
++}
++
++/* This is called whenever we suspect that the system chipset is re-
++ * ordering the sequence of MMIO to the tx send mailbox. The symptom
++ * is bogus tx completions. We try to recover by setting the
++ * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
++ * in the workqueue.
++ */
++static void tg3_tx_recover(struct tg3 *tp)
++{
++ BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
++ tp->write32_tx_mbox == tg3_write_indirect_mbox);
++
++ netdev_warn(tp->dev,
++ "The system may be re-ordering memory-mapped I/O "
++ "cycles to the network device, attempting to recover. "
++ "Please report the problem to the driver maintainer "
++ "and include system chipset information.\n");
++
++ tg3_flag_set(tp, TX_RECOVERY_PENDING);
++}
++
++static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
++{
++ /* Tell compiler to fetch tx indices from memory. */
++ barrier();
++ return tnapi->tx_pending -
++ ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
++}
++
++/* Tigon3 never reports partial packet sends. So we do not
++ * need special logic to handle SKBs that have not had all
++ * of their frags sent yet, like SunGEM does.
++ */
++static void tg3_tx(struct tg3_napi *tnapi)
++{
++ struct tg3 *tp = tnapi->tp;
++ u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
++ u32 sw_idx = tnapi->tx_cons;
++ struct netdev_queue *txq;
++ int index = tnapi - tp->napi;
++ unsigned int pkts_compl = 0, bytes_compl = 0;
++
++ if (tg3_flag(tp, ENABLE_TSS))
++ index--;
++
++ txq = netdev_get_tx_queue(tp->dev, index);
++
++ while (sw_idx != hw_idx) {
++ struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
++ struct sk_buff *skb = ri->skb;
++ int i, tx_bug = 0;
++
++ if (unlikely(skb == NULL)) {
++ tg3_tx_recover(tp);
++ return;
++ }
++
++ if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
++ struct skb_shared_hwtstamps timestamp;
++ u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
++ hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
++
++ tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
++
++ skb_tstamp_tx(skb, &timestamp);
++ }
++
++ pci_unmap_single(tp->pdev,
++ dma_unmap_addr(ri, mapping),
++ skb_headlen(skb),
++ PCI_DMA_TODEVICE);
++
++ ri->skb = NULL;
++
++ while (ri->fragmented) {
++ ri->fragmented = false;
++ sw_idx = NEXT_TX(sw_idx);
++ ri = &tnapi->tx_buffers[sw_idx];
++ }
++
++ sw_idx = NEXT_TX(sw_idx);
++
++ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
++ ri = &tnapi->tx_buffers[sw_idx];
++ if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
++ tx_bug = 1;
++
++ pci_unmap_page(tp->pdev,
++ dma_unmap_addr(ri, mapping),
++ skb_frag_size(&skb_shinfo(skb)->frags[i]),
++ PCI_DMA_TODEVICE);
++
++ while (ri->fragmented) {
++ ri->fragmented = false;
++ sw_idx = NEXT_TX(sw_idx);
++ ri = &tnapi->tx_buffers[sw_idx];
++ }
++
++ sw_idx = NEXT_TX(sw_idx);
++ }
++
++ pkts_compl++;
++ bytes_compl += skb->len;
++
++ dev_kfree_skb(skb);
++
++ if (unlikely(tx_bug)) {
++ tg3_tx_recover(tp);
++ return;
++ }
++ }
++
++ netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
++
++ tnapi->tx_cons = sw_idx;
++
++ /* Need to make the tx_cons update visible to tg3_start_xmit()
++ * before checking for netif_queue_stopped(). Without the
++ * memory barrier, there is a small possibility that tg3_start_xmit()
++ * will miss it and cause the queue to be stopped forever.
++ */
++ smp_mb();
++
++ if (unlikely(netif_tx_queue_stopped(txq) &&
++ (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
++ __netif_tx_lock(txq, smp_processor_id());
++ if (netif_tx_queue_stopped(txq) &&
++ (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
++ netif_tx_wake_queue(txq);
++ __netif_tx_unlock(txq);
++ }
++}
++
++static void tg3_frag_free(bool is_frag, void *data)
++{
++ if (is_frag)
++ put_page(virt_to_head_page(data));
++ else
++ kfree(data);
++}
++
++static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
++{
++ unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
++ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
++
++ if (!ri->data)
++ return;
++
++ pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
++ map_sz, PCI_DMA_FROMDEVICE);
++ tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
++ ri->data = NULL;
++}
++
++
++/* Returns size of skb allocated or < 0 on error.
++ *
++ * We only need to fill in the address because the other members
++ * of the RX descriptor are invariant, see tg3_init_rings.
++ *
++ * Note the purposeful assymetry of cpu vs. chip accesses. For
++ * posting buffers we only dirty the first cache line of the RX
++ * descriptor (containing the address). Whereas for the RX status
++ * buffers the cpu only reads the last cacheline of the RX descriptor
++ * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
++ */
++static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
++ u32 opaque_key, u32 dest_idx_unmasked,
++ unsigned int *frag_size)
++{
++ struct tg3_rx_buffer_desc *desc;
++ struct ring_info *map;
++ u8 *data;
++ dma_addr_t mapping;
++ int skb_size, data_size, dest_idx;
++
++ switch (opaque_key) {
++ case RXD_OPAQUE_RING_STD:
++ dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
++ desc = &tpr->rx_std[dest_idx];
++ map = &tpr->rx_std_buffers[dest_idx];
++ data_size = tp->rx_pkt_map_sz;
++ break;
++
++ case RXD_OPAQUE_RING_JUMBO:
++ dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
++ desc = &tpr->rx_jmb[dest_idx].std;
++ map = &tpr->rx_jmb_buffers[dest_idx];
++ data_size = TG3_RX_JMB_MAP_SZ;
++ break;
++
++ default:
++ return -EINVAL;
++ }
++
++ /* Do not overwrite any of the map or rp information
++ * until we are sure we can commit to a new buffer.
++ *
++ * Callers depend upon this behavior and assume that
++ * we leave everything unchanged if we fail.
++ */
++ skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
++ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
++ if (skb_size <= PAGE_SIZE) {
++ data = netdev_alloc_frag(skb_size);
++ *frag_size = skb_size;
++ } else {
++ data = kmalloc(skb_size, GFP_ATOMIC);
++ *frag_size = 0;
++ }
++ if (!data)
++ return -ENOMEM;
++
++ mapping = pci_map_single(tp->pdev,
++ data + TG3_RX_OFFSET(tp),
++ data_size,
++ PCI_DMA_FROMDEVICE);
++ if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
++ tg3_frag_free(skb_size <= PAGE_SIZE, data);
++ return -EIO;
++ }
++
++ map->data = data;
++ dma_unmap_addr_set(map, mapping, mapping);
++
++ desc->addr_hi = ((u64)mapping >> 32);
++ desc->addr_lo = ((u64)mapping & 0xffffffff);
++
++ return data_size;
++}
++
++/* We only need to move over in the address because the other
++ * members of the RX descriptor are invariant. See notes above
++ * tg3_alloc_rx_data for full details.
++ */
++static void tg3_recycle_rx(struct tg3_napi *tnapi,
++ struct tg3_rx_prodring_set *dpr,
++ u32 opaque_key, int src_idx,
++ u32 dest_idx_unmasked)
++{
++ struct tg3 *tp = tnapi->tp;
++ struct tg3_rx_buffer_desc *src_desc, *dest_desc;
++ struct ring_info *src_map, *dest_map;
++ struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
++ int dest_idx;
++
++ switch (opaque_key) {
++ case RXD_OPAQUE_RING_STD:
++ dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
++ dest_desc = &dpr->rx_std[dest_idx];
++ dest_map = &dpr->rx_std_buffers[dest_idx];
++ src_desc = &spr->rx_std[src_idx];
++ src_map = &spr->rx_std_buffers[src_idx];
++ break;
++
++ case RXD_OPAQUE_RING_JUMBO:
++ dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
++ dest_desc = &dpr->rx_jmb[dest_idx].std;
++ dest_map = &dpr->rx_jmb_buffers[dest_idx];
++ src_desc = &spr->rx_jmb[src_idx].std;
++ src_map = &spr->rx_jmb_buffers[src_idx];
++ break;
++
++ default:
++ return;
++ }
++
++ dest_map->data = src_map->data;
++ dma_unmap_addr_set(dest_map, mapping,
++ dma_unmap_addr(src_map, mapping));
++ dest_desc->addr_hi = src_desc->addr_hi;
++ dest_desc->addr_lo = src_desc->addr_lo;
++
++ /* Ensure that the update to the skb happens after the physical
++ * addresses have been transferred to the new BD location.
++ */
++ smp_wmb();
++
++ src_map->data = NULL;
++}
++
++/* The RX ring scheme is composed of multiple rings which post fresh
++ * buffers to the chip, and one special ring the chip uses to report
++ * status back to the host.
++ *
++ * The special ring reports the status of received packets to the
++ * host. The chip does not write into the original descriptor the
++ * RX buffer was obtained from. The chip simply takes the original
++ * descriptor as provided by the host, updates the status and length
++ * field, then writes this into the next status ring entry.
++ *
++ * Each ring the host uses to post buffers to the chip is described
++ * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
++ * it is first placed into the on-chip ram. When the packet's length
++ * is known, it walks down the TG3_BDINFO entries to select the ring.
++ * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
++ * which is within the range of the new packet's length is chosen.
++ *
++ * The "separate ring for rx status" scheme may sound queer, but it makes
++ * sense from a cache coherency perspective. If only the host writes
++ * to the buffer post rings, and only the chip writes to the rx status
++ * rings, then cache lines never move beyond shared-modified state.
++ * If both the host and chip were to write into the same ring, cache line
++ * eviction could occur since both entities want it in an exclusive state.
++ */
++static int tg3_rx(struct tg3_napi *tnapi, int budget)
++{
++ struct tg3 *tp = tnapi->tp;
++ u32 work_mask, rx_std_posted = 0;
++ u32 std_prod_idx, jmb_prod_idx;
++ u32 sw_idx = tnapi->rx_rcb_ptr;
++ u16 hw_idx;
++ int received;
++ struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
++
++ hw_idx = *(tnapi->rx_rcb_prod_idx);
++ /*
++ * We need to order the read of hw_idx and the read of
++ * the opaque cookie.
++ */
++ rmb();
++ work_mask = 0;
++ received = 0;
++ std_prod_idx = tpr->rx_std_prod_idx;
++ jmb_prod_idx = tpr->rx_jmb_prod_idx;
++ while (sw_idx != hw_idx && budget > 0) {
++ struct ring_info *ri;
++ struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
++ unsigned int len;
++ struct sk_buff *skb;
++ dma_addr_t dma_addr;
++ u32 opaque_key, desc_idx, *post_ptr;
++ u8 *data;
++ u64 tstamp = 0;
++
++ desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
++ opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
++ if (opaque_key == RXD_OPAQUE_RING_STD) {
++ ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
++ dma_addr = dma_unmap_addr(ri, mapping);
++ data = ri->data;
++ post_ptr = &std_prod_idx;
++ rx_std_posted++;
++ } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
++ ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
++ dma_addr = dma_unmap_addr(ri, mapping);
++ data = ri->data;
++ post_ptr = &jmb_prod_idx;
++ } else
++ goto next_pkt_nopost;
++
++ work_mask |= opaque_key;
++
++ if (desc->err_vlan & RXD_ERR_MASK) {
++ drop_it:
++ tg3_recycle_rx(tnapi, tpr, opaque_key,
++ desc_idx, *post_ptr);
++ drop_it_no_recycle:
++ /* Other statistics kept track of by card. */
++ tp->rx_dropped++;
++ goto next_pkt;
++ }
++
++ prefetch(data + TG3_RX_OFFSET(tp));
++ len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
++ ETH_FCS_LEN;
++
++ if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
++ RXD_FLAG_PTPSTAT_PTPV1 ||
++ (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
++ RXD_FLAG_PTPSTAT_PTPV2) {
++ tstamp = tr32(TG3_RX_TSTAMP_LSB);
++ tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
++ }
++
++ if (len > TG3_RX_COPY_THRESH(tp)) {
++ int skb_size;
++ unsigned int frag_size;
++
++ skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
++ *post_ptr, &frag_size);
++ if (skb_size < 0)
++ goto drop_it;
++
++ pci_unmap_single(tp->pdev, dma_addr, skb_size,
++ PCI_DMA_FROMDEVICE);
++
++ /* Ensure that the update to the data happens
++ * after the usage of the old DMA mapping.
++ */
++ smp_wmb();
++
++ ri->data = NULL;
++
++ skb = build_skb(data, frag_size);
++ if (!skb) {
++ tg3_frag_free(frag_size != 0, data);
++ goto drop_it_no_recycle;
++ }
++ skb_reserve(skb, TG3_RX_OFFSET(tp));
++ } else {
++ tg3_recycle_rx(tnapi, tpr, opaque_key,
++ desc_idx, *post_ptr);
++
++ skb = netdev_alloc_skb(tp->dev,
++ len + TG3_RAW_IP_ALIGN);
++ if (skb == NULL)
++ goto drop_it_no_recycle;
++
++ skb_reserve(skb, TG3_RAW_IP_ALIGN);
++ pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
++ memcpy(skb->data,
++ data + TG3_RX_OFFSET(tp),
++ len);
++ pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
++ }
++
++ skb_put(skb, len);
++ if (tstamp)
++ tg3_hwclock_to_timestamp(tp, tstamp,
++ skb_hwtstamps(skb));
++
++ if ((tp->dev->features & NETIF_F_RXCSUM) &&
++ (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
++ (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
++ >> RXD_TCPCSUM_SHIFT) == 0xffff))
++ skb->ip_summed = CHECKSUM_UNNECESSARY;
++ else
++ skb_checksum_none_assert(skb);
++
++ skb->protocol = eth_type_trans(skb, tp->dev);
++
++ if (len > (tp->dev->mtu + ETH_HLEN) &&
++ skb->protocol != htons(ETH_P_8021Q) &&
++ skb->protocol != htons(ETH_P_8021AD)) {
++ dev_kfree_skb(skb);
++ goto drop_it_no_recycle;
++ }
++
++ if (desc->type_flags & RXD_FLAG_VLAN &&
++ !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
++ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
++ desc->err_vlan & RXD_VLAN_MASK);
++
++ napi_gro_receive(&tnapi->napi, skb);
++
++ received++;
++ budget--;
++
++next_pkt:
++ (*post_ptr)++;
++
++ if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
++ tpr->rx_std_prod_idx = std_prod_idx &
++ tp->rx_std_ring_mask;
++ tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
++ tpr->rx_std_prod_idx);
++ work_mask &= ~RXD_OPAQUE_RING_STD;
++ rx_std_posted = 0;
++ }
++next_pkt_nopost:
++ sw_idx++;
++ sw_idx &= tp->rx_ret_ring_mask;
++
++ /* Refresh hw_idx to see if there is new work */
++ if (sw_idx == hw_idx) {
++ hw_idx = *(tnapi->rx_rcb_prod_idx);
++ rmb();
++ }
++ }
++
++ /* ACK the status ring. */
++ tnapi->rx_rcb_ptr = sw_idx;
++ tw32_rx_mbox(tnapi->consmbox, sw_idx);
++
++ /* Refill RX ring(s). */
++ if (!tg3_flag(tp, ENABLE_RSS)) {
++ /* Sync BD data before updating mailbox */
++ wmb();
++
++ if (work_mask & RXD_OPAQUE_RING_STD) {
++ tpr->rx_std_prod_idx = std_prod_idx &
++ tp->rx_std_ring_mask;
++ tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
++ tpr->rx_std_prod_idx);
++ }
++ if (work_mask & RXD_OPAQUE_RING_JUMBO) {
++ tpr->rx_jmb_prod_idx = jmb_prod_idx &
++ tp->rx_jmb_ring_mask;
++ tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
++ tpr->rx_jmb_prod_idx);
++ }
++ mmiowb();
++ } else if (work_mask) {
++ /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
++ * updated before the producer indices can be updated.
++ */
++ smp_wmb();
++
++ tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
++ tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
++
++ if (tnapi != &tp->napi[1]) {
++ tp->rx_refill = true;
++ napi_schedule(&tp->napi[1].napi);
++ }
++ }
++
++ return received;
++}
++
++static void tg3_poll_link(struct tg3 *tp)
++{
++ /* handle link change and other phy events */
++ if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
++ struct tg3_hw_status *sblk = tp->napi[0].hw_status;
++
++ if (sblk->status & SD_STATUS_LINK_CHG) {
++ sblk->status = SD_STATUS_UPDATED |
++ (sblk->status & ~SD_STATUS_LINK_CHG);
++ spin_lock(&tp->lock);
++ if (tg3_flag(tp, USE_PHYLIB)) {
++ tw32_f(MAC_STATUS,
++ (MAC_STATUS_SYNC_CHANGED |
++ MAC_STATUS_CFG_CHANGED |
++ MAC_STATUS_MI_COMPLETION |
++ MAC_STATUS_LNKSTATE_CHANGED));
++ udelay(40);
++ } else
++ tg3_setup_phy(tp, false);
++ spin_unlock(&tp->lock);
++ }
++ }
++}
++
++static int tg3_rx_prodring_xfer(struct tg3 *tp,
++ struct tg3_rx_prodring_set *dpr,
++ struct tg3_rx_prodring_set *spr)
++{
++ u32 si, di, cpycnt, src_prod_idx;
++ int i, err = 0;
++
++ while (1) {
++ src_prod_idx = spr->rx_std_prod_idx;
++
++ /* Make sure updates to the rx_std_buffers[] entries and the
++ * standard producer index are seen in the correct order.
++ */
++ smp_rmb();
++
++ if (spr->rx_std_cons_idx == src_prod_idx)
++ break;
++
++ if (spr->rx_std_cons_idx < src_prod_idx)
++ cpycnt = src_prod_idx - spr->rx_std_cons_idx;
++ else
++ cpycnt = tp->rx_std_ring_mask + 1 -
++ spr->rx_std_cons_idx;
++
++ cpycnt = min(cpycnt,
++ tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
++
++ si = spr->rx_std_cons_idx;
++ di = dpr->rx_std_prod_idx;
++
++ for (i = di; i < di + cpycnt; i++) {
++ if (dpr->rx_std_buffers[i].data) {
++ cpycnt = i - di;
++ err = -ENOSPC;
++ break;
++ }
++ }
++
++ if (!cpycnt)
++ break;
++
++ /* Ensure that updates to the rx_std_buffers ring and the
++ * shadowed hardware producer ring from tg3_recycle_skb() are
++ * ordered correctly WRT the skb check above.
++ */
++ smp_rmb();
++
++ memcpy(&dpr->rx_std_buffers[di],
++ &spr->rx_std_buffers[si],
++ cpycnt * sizeof(struct ring_info));
++
++ for (i = 0; i < cpycnt; i++, di++, si++) {
++ struct tg3_rx_buffer_desc *sbd, *dbd;
++ sbd = &spr->rx_std[si];
++ dbd = &dpr->rx_std[di];
++ dbd->addr_hi = sbd->addr_hi;
++ dbd->addr_lo = sbd->addr_lo;
++ }
++
++ spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
++ tp->rx_std_ring_mask;
++ dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
++ tp->rx_std_ring_mask;
++ }
++
++ while (1) {
++ src_prod_idx = spr->rx_jmb_prod_idx;
++
++ /* Make sure updates to the rx_jmb_buffers[] entries and
++ * the jumbo producer index are seen in the correct order.
++ */
++ smp_rmb();
++
++ if (spr->rx_jmb_cons_idx == src_prod_idx)
++ break;
++
++ if (spr->rx_jmb_cons_idx < src_prod_idx)
++ cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
++ else
++ cpycnt = tp->rx_jmb_ring_mask + 1 -
++ spr->rx_jmb_cons_idx;
++
++ cpycnt = min(cpycnt,
++ tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
++
++ si = spr->rx_jmb_cons_idx;
++ di = dpr->rx_jmb_prod_idx;
++
++ for (i = di; i < di + cpycnt; i++) {
++ if (dpr->rx_jmb_buffers[i].data) {
++ cpycnt = i - di;
++ err = -ENOSPC;
++ break;
++ }
++ }
++
++ if (!cpycnt)
++ break;
++
++ /* Ensure that updates to the rx_jmb_buffers ring and the
++ * shadowed hardware producer ring from tg3_recycle_skb() are
++ * ordered correctly WRT the skb check above.
++ */
++ smp_rmb();
++
++ memcpy(&dpr->rx_jmb_buffers[di],
++ &spr->rx_jmb_buffers[si],
++ cpycnt * sizeof(struct ring_info));
++
++ for (i = 0; i < cpycnt; i++, di++, si++) {
++ struct tg3_rx_buffer_desc *sbd, *dbd;
++ sbd = &spr->rx_jmb[si].std;
++ dbd = &dpr->rx_jmb[di].std;
++ dbd->addr_hi = sbd->addr_hi;
++ dbd->addr_lo = sbd->addr_lo;
++ }
++
++ spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
++ tp->rx_jmb_ring_mask;
++ dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
++ tp->rx_jmb_ring_mask;
++ }
++
++ return err;
++}
++
++static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
++{
++ struct tg3 *tp = tnapi->tp;
++
++ /* run TX completion thread */
++ if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
++ tg3_tx(tnapi);
++ if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
++ return work_done;
++ }
++
++ if (!tnapi->rx_rcb_prod_idx)
++ return work_done;
++
++ /* run RX thread, within the bounds set by NAPI.
++ * All RX "locking" is done by ensuring outside
++ * code synchronizes with tg3->napi.poll()
++ */
++ if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
++ work_done += tg3_rx(tnapi, budget - work_done);
++
++ if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
++ struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
++ int i, err = 0;
++ u32 std_prod_idx = dpr->rx_std_prod_idx;
++ u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
++
++ tp->rx_refill = false;
++ for (i = 1; i <= tp->rxq_cnt; i++)
++ err |= tg3_rx_prodring_xfer(tp, dpr,
++ &tp->napi[i].prodring);
++
++ wmb();
++
++ if (std_prod_idx != dpr->rx_std_prod_idx)
++ tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
++ dpr->rx_std_prod_idx);
++
++ if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
++ tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
++ dpr->rx_jmb_prod_idx);
++
++ mmiowb();
++
++ if (err)
++ tw32_f(HOSTCC_MODE, tp->coal_now);
++ }
++
++ return work_done;
++}
++
++static inline void tg3_reset_task_schedule(struct tg3 *tp)
++{
++ if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
++ schedule_work(&tp->reset_task);
++}
++
++static inline void tg3_reset_task_cancel(struct tg3 *tp)
++{
++ cancel_work_sync(&tp->reset_task);
++ tg3_flag_clear(tp, RESET_TASK_PENDING);
++ tg3_flag_clear(tp, TX_RECOVERY_PENDING);
++}
++
++static int tg3_poll_msix(struct napi_struct *napi, int budget)
++{
++ struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
++ struct tg3 *tp = tnapi->tp;
++ int work_done = 0;
++ struct tg3_hw_status *sblk = tnapi->hw_status;
++
++ while (1) {
++ work_done = tg3_poll_work(tnapi, work_done, budget);
++
++ if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
++ goto tx_recovery;
++
++ if (unlikely(work_done >= budget))
++ break;
++
++ /* tp->last_tag is used in tg3_int_reenable() below
++ * to tell the hw how much work has been processed,
++ * so we must read it before checking for more work.
++ */
++ tnapi->last_tag = sblk->status_tag;
++ tnapi->last_irq_tag = tnapi->last_tag;
++ rmb();
++
++ /* check for RX/TX work to do */
++ if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
++ *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
++
++ /* This test here is not race free, but will reduce
++ * the number of interrupts by looping again.
++ */
++ if (tnapi == &tp->napi[1] && tp->rx_refill)
++ continue;
++
++ napi_complete(napi);
++ /* Reenable interrupts. */
++ tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
++
++ /* This test here is synchronized by napi_schedule()
++ * and napi_complete() to close the race condition.
++ */
++ if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
++ tw32(HOSTCC_MODE, tp->coalesce_mode |
++ HOSTCC_MODE_ENABLE |
++ tnapi->coal_now);
++ }
++ mmiowb();
++ break;
++ }
++ }
++
++ return work_done;
++
++tx_recovery:
++ /* work_done is guaranteed to be less than budget. */
++ napi_complete(napi);
++ tg3_reset_task_schedule(tp);
++ return work_done;
++}
++
++static void tg3_process_error(struct tg3 *tp)
++{
++ u32 val;
++ bool real_error = false;
++
++ if (tg3_flag(tp, ERROR_PROCESSED))
++ return;
++
++ /* Check Flow Attention register */
++ val = tr32(HOSTCC_FLOW_ATTN);
++ if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
++ netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
++ real_error = true;
++ }
++
++ if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
++ netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
++ real_error = true;
++ }
++
++ if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
++ netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
++ real_error = true;
++ }
++
++ if (!real_error)
++ return;
++
++ tg3_dump_state(tp);
++
++ tg3_flag_set(tp, ERROR_PROCESSED);
++ tg3_reset_task_schedule(tp);
++}
++
++static int tg3_poll(struct napi_struct *napi, int budget)
++{
++ struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
++ struct tg3 *tp = tnapi->tp;
++ int work_done = 0;
++ struct tg3_hw_status *sblk = tnapi->hw_status;
++
++ while (1) {
++ if (sblk->status & SD_STATUS_ERROR)
++ tg3_process_error(tp);
++
++ tg3_poll_link(tp);
++
++ work_done = tg3_poll_work(tnapi, work_done, budget);
++
++ if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
++ goto tx_recovery;
++
++ if (unlikely(work_done >= budget))
++ break;
++
++ if (tg3_flag(tp, TAGGED_STATUS)) {
++ /* tp->last_tag is used in tg3_int_reenable() below
++ * to tell the hw how much work has been processed,
++ * so we must read it before checking for more work.
++ */
++ tnapi->last_tag = sblk->status_tag;
++ tnapi->last_irq_tag = tnapi->last_tag;
++ rmb();
++ } else
++ sblk->status &= ~SD_STATUS_UPDATED;
++
++ if (likely(!tg3_has_work(tnapi))) {
++ napi_complete(napi);
++ tg3_int_reenable(tnapi);
++ break;
++ }
++ }
++
++ return work_done;
++
++tx_recovery:
++ /* work_done is guaranteed to be less than budget. */
++ napi_complete(napi);
++ tg3_reset_task_schedule(tp);
++ return work_done;
++}
++
++static void tg3_napi_disable(struct tg3 *tp)
++{
++ int i;
++
++ for (i = tp->irq_cnt - 1; i >= 0; i--)
++ napi_disable(&tp->napi[i].napi);
++}
++
++static void tg3_napi_enable(struct tg3 *tp)
++{
++ int i;
++
++ for (i = 0; i < tp->irq_cnt; i++)
++ napi_enable(&tp->napi[i].napi);
++}
++
++static void tg3_napi_init(struct tg3 *tp)
++{
++ int i;
++
++ netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
++ for (i = 1; i < tp->irq_cnt; i++)
++ netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
++}
++
++static void tg3_napi_fini(struct tg3 *tp)
++{
++ int i;
++
++ for (i = 0; i < tp->irq_cnt; i++)
++ netif_napi_del(&tp->napi[i].napi);
++}
++
++static inline void tg3_netif_stop(struct tg3 *tp)
++{
++ tp->dev->trans_start = jiffies; /* prevent tx timeout */
++ tg3_napi_disable(tp);
++ netif_carrier_off(tp->dev);
++ netif_tx_disable(tp->dev);
++}
++
++/* tp->lock must be held */
++static inline void tg3_netif_start(struct tg3 *tp)
++{
++ tg3_ptp_resume(tp);
++
++ /* NOTE: unconditional netif_tx_wake_all_queues is only
++ * appropriate so long as all callers are assured to
++ * have free tx slots (such as after tg3_init_hw)
++ */
++ netif_tx_wake_all_queues(tp->dev);
++
++ if (tp->link_up)
++ netif_carrier_on(tp->dev);
++
++ tg3_napi_enable(tp);
++ tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
++ tg3_enable_ints(tp);
++}
++
++static void tg3_irq_quiesce(struct tg3 *tp)
++{
++ int i;
++
++ BUG_ON(tp->irq_sync);
++
++ tp->irq_sync = 1;
++ smp_mb();
++
++ for (i = 0; i < tp->irq_cnt; i++)
++ synchronize_irq(tp->napi[i].irq_vec);
++}
++
++/* Fully shutdown all tg3 driver activity elsewhere in the system.
++ * If irq_sync is non-zero, then the IRQ handler must be synchronized
++ * with as well. Most of the time, this is not necessary except when
++ * shutting down the device.
++ */
++static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
++{
++ spin_lock_bh(&tp->lock);
++ if (irq_sync)
++ tg3_irq_quiesce(tp);
++}
++
++static inline void tg3_full_unlock(struct tg3 *tp)
++{
++ spin_unlock_bh(&tp->lock);
++}
++
++/* One-shot MSI handler - Chip automatically disables interrupt
++ * after sending MSI so driver doesn't have to do it.
++ */
++static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
++{
++ struct tg3_napi *tnapi = dev_id;
++ struct tg3 *tp = tnapi->tp;
++
++ prefetch(tnapi->hw_status);
++ if (tnapi->rx_rcb)
++ prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
++
++ if (likely(!tg3_irq_sync(tp)))
++ napi_schedule(&tnapi->napi);
++
++ return IRQ_HANDLED;
++}
++
++/* MSI ISR - No need to check for interrupt sharing and no need to
++ * flush status block and interrupt mailbox. PCI ordering rules
++ * guarantee that MSI will arrive after the status block.
++ */
++static irqreturn_t tg3_msi(int irq, void *dev_id)
++{
++ struct tg3_napi *tnapi = dev_id;
++ struct tg3 *tp = tnapi->tp;
++
++ prefetch(tnapi->hw_status);
++ if (tnapi->rx_rcb)
++ prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
++ /*
++ * Writing any value to intr-mbox-0 clears PCI INTA# and
++ * chip-internal interrupt pending events.
++ * Writing non-zero to intr-mbox-0 additional tells the
++ * NIC to stop sending us irqs, engaging "in-intr-handler"
++ * event coalescing.
++ */
++ tw32_mailbox(tnapi->int_mbox, 0x00000001);
++ if (likely(!tg3_irq_sync(tp)))
++ napi_schedule(&tnapi->napi);
++
++ return IRQ_RETVAL(1);
++}
++
++static irqreturn_t tg3_interrupt(int irq, void *dev_id)
++{
++ struct tg3_napi *tnapi = dev_id;
++ struct tg3 *tp = tnapi->tp;
++ struct tg3_hw_status *sblk = tnapi->hw_status;
++ unsigned int handled = 1;
++
++ /* In INTx mode, it is possible for the interrupt to arrive at
++ * the CPU before the status block posted prior to the interrupt.
++ * Reading the PCI State register will confirm whether the
++ * interrupt is ours and will flush the status block.
++ */
++ if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
++ if (tg3_flag(tp, CHIP_RESETTING) ||
++ (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
++ handled = 0;
++ goto out;
++ }
++ }
++
++ /*
++ * Writing any value to intr-mbox-0 clears PCI INTA# and
++ * chip-internal interrupt pending events.
++ * Writing non-zero to intr-mbox-0 additional tells the
++ * NIC to stop sending us irqs, engaging "in-intr-handler"
++ * event coalescing.
++ *
++ * Flush the mailbox to de-assert the IRQ immediately to prevent
++ * spurious interrupts. The flush impacts performance but
++ * excessive spurious interrupts can be worse in some cases.
++ */
++ tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
++ if (tg3_irq_sync(tp))
++ goto out;
++ sblk->status &= ~SD_STATUS_UPDATED;
++ if (likely(tg3_has_work(tnapi))) {
++ prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
++ napi_schedule(&tnapi->napi);
++ } else {
++ /* No work, shared interrupt perhaps? re-enable
++ * interrupts, and flush that PCI write
++ */
++ tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
++ 0x00000000);
++ }
++out:
++ return IRQ_RETVAL(handled);
++}
++
++static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
++{
++ struct tg3_napi *tnapi = dev_id;
++ struct tg3 *tp = tnapi->tp;
++ struct tg3_hw_status *sblk = tnapi->hw_status;
++ unsigned int handled = 1;
++
++ /* In INTx mode, it is possible for the interrupt to arrive at
++ * the CPU before the status block posted prior to the interrupt.
++ * Reading the PCI State register will confirm whether the
++ * interrupt is ours and will flush the status block.
++ */
++ if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
++ if (tg3_flag(tp, CHIP_RESETTING) ||
++ (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
++ handled = 0;
++ goto out;
++ }
++ }
++
++ /*
++ * writing any value to intr-mbox-0 clears PCI INTA# and
++ * chip-internal interrupt pending events.
++ * writing non-zero to intr-mbox-0 additional tells the
++ * NIC to stop sending us irqs, engaging "in-intr-handler"
++ * event coalescing.
++ *
++ * Flush the mailbox to de-assert the IRQ immediately to prevent
++ * spurious interrupts. The flush impacts performance but
++ * excessive spurious interrupts can be worse in some cases.
++ */
++ tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
++
++ /*
++ * In a shared interrupt configuration, sometimes other devices'
++ * interrupts will scream. We record the current status tag here
++ * so that the above check can report that the screaming interrupts
++ * are unhandled. Eventually they will be silenced.
++ */
++ tnapi->last_irq_tag = sblk->status_tag;
++
++ if (tg3_irq_sync(tp))
++ goto out;
++
++ prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
++
++ napi_schedule(&tnapi->napi);
++
++out:
++ return IRQ_RETVAL(handled);
++}
++
++/* ISR for interrupt test */
++static irqreturn_t tg3_test_isr(int irq, void *dev_id)
++{
++ struct tg3_napi *tnapi = dev_id;
++ struct tg3 *tp = tnapi->tp;
++ struct tg3_hw_status *sblk = tnapi->hw_status;
++
++ if ((sblk->status & SD_STATUS_UPDATED) ||
++ !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
++ tg3_disable_ints(tp);
++ return IRQ_RETVAL(1);
++ }
++ return IRQ_RETVAL(0);
++}
++
++#ifdef CONFIG_NET_POLL_CONTROLLER
++static void tg3_poll_controller(struct net_device *dev)
++{
++ int i;
++ struct tg3 *tp = netdev_priv(dev);
++
++ if (tg3_irq_sync(tp))
++ return;
++
++ for (i = 0; i < tp->irq_cnt; i++)
++ tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
++}
++#endif
++
++static void tg3_tx_timeout(struct net_device *dev)
++{
++ struct tg3 *tp = netdev_priv(dev);
++
++ if (netif_msg_tx_err(tp)) {
++ netdev_err(dev, "transmit timed out, resetting\n");
++ tg3_dump_state(tp);
++ }
++
++ tg3_reset_task_schedule(tp);
++}
++
++/* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
++static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
++{
++ u32 base = (u32) mapping & 0xffffffff;
++
++ return base + len + 8 < base;
++}
++
++/* Test for TSO DMA buffers that cross into regions which are within MSS bytes
++ * of any 4GB boundaries: 4G, 8G, etc
++ */
++static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping,
++ u32 len, u32 mss)
++{
++ if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) {
++ u32 base = (u32) mapping & 0xffffffff;
++
++ return ((base + len + (mss & 0x3fff)) < base);
++ }
++ return 0;
++}
++
++/* Test for DMA addresses > 40-bit */
++static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
++ int len)
++{
++#if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
++ if (tg3_flag(tp, 40BIT_DMA_BUG))
++ return ((u64) mapping + len) > DMA_BIT_MASK(40);
++ return 0;
++#else
++ return 0;
++#endif
++}
++
++static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
++ dma_addr_t mapping, u32 len, u32 flags,
++ u32 mss, u32 vlan)
++{
++ txbd->addr_hi = ((u64) mapping >> 32);
++ txbd->addr_lo = ((u64) mapping & 0xffffffff);
++ txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
++ txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
++}
++
++static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
++ dma_addr_t map, u32 len, u32 flags,
++ u32 mss, u32 vlan)
++{
++ struct tg3 *tp = tnapi->tp;
++ bool hwbug = false;
++
++ if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
++ hwbug = true;
++
++ if (tg3_4g_overflow_test(map, len))
++ hwbug = true;
++
++ if (tg3_4g_tso_overflow_test(tp, map, len, mss))
++ hwbug = true;
++
++ if (tg3_40bit_overflow_test(tp, map, len))
++ hwbug = true;
++
++ if (tp->dma_limit) {
++ u32 prvidx = *entry;
++ u32 tmp_flag = flags & ~TXD_FLAG_END;
++ while (len > tp->dma_limit && *budget) {
++ u32 frag_len = tp->dma_limit;
++ len -= tp->dma_limit;
++
++ /* Avoid the 8byte DMA problem */
++ if (len <= 8) {
++ len += tp->dma_limit / 2;
++ frag_len = tp->dma_limit / 2;
++ }
++
++ tnapi->tx_buffers[*entry].fragmented = true;
++
++ tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
++ frag_len, tmp_flag, mss, vlan);
++ *budget -= 1;
++ prvidx = *entry;
++ *entry = NEXT_TX(*entry);
++
++ map += frag_len;
++ }
++
++ if (len) {
++ if (*budget) {
++ tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
++ len, flags, mss, vlan);
++ *budget -= 1;
++ *entry = NEXT_TX(*entry);
++ } else {
++ hwbug = true;
++ tnapi->tx_buffers[prvidx].fragmented = false;
++ }
++ }
++ } else {
++ tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
++ len, flags, mss, vlan);
++ *entry = NEXT_TX(*entry);
++ }
++
++ return hwbug;
++}
++
++static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
++{
++ int i;
++ struct sk_buff *skb;
++ struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
++
++ skb = txb->skb;
++ txb->skb = NULL;
++
++ pci_unmap_single(tnapi->tp->pdev,
++ dma_unmap_addr(txb, mapping),
++ skb_headlen(skb),
++ PCI_DMA_TODEVICE);
++
++ while (txb->fragmented) {
++ txb->fragmented = false;
++ entry = NEXT_TX(entry);
++ txb = &tnapi->tx_buffers[entry];
++ }
++
++ for (i = 0; i <= last; i++) {
++ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
++
++ entry = NEXT_TX(entry);
++ txb = &tnapi->tx_buffers[entry];
++
++ pci_unmap_page(tnapi->tp->pdev,
++ dma_unmap_addr(txb, mapping),
++ skb_frag_size(frag), PCI_DMA_TODEVICE);
++
++ while (txb->fragmented) {
++ txb->fragmented = false;
++ entry = NEXT_TX(entry);
++ txb = &tnapi->tx_buffers[entry];
++ }
++ }
++}
++
++/* Workaround 4GB and 40-bit hardware DMA bugs. */
++static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
++ struct sk_buff **pskb,
++ u32 *entry, u32 *budget,
++ u32 base_flags, u32 mss, u32 vlan)
++{
++ struct tg3 *tp = tnapi->tp;
++ struct sk_buff *new_skb, *skb = *pskb;
++ dma_addr_t new_addr = 0;
++ int ret = 0;
++
++ if (tg3_asic_rev(tp) != ASIC_REV_5701)
++ new_skb = skb_copy(skb, GFP_ATOMIC);
++ else {
++ int more_headroom = 4 - ((unsigned long)skb->data & 3);
++
++ new_skb = skb_copy_expand(skb,
++ skb_headroom(skb) + more_headroom,
++ skb_tailroom(skb), GFP_ATOMIC);
++ }
++
++ if (!new_skb) {
++ ret = -1;
++ } else {
++ /* New SKB is guaranteed to be linear. */
++ new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
++ PCI_DMA_TODEVICE);
++ /* Make sure the mapping succeeded */
++ if (pci_dma_mapping_error(tp->pdev, new_addr)) {
++ dev_kfree_skb(new_skb);
++ ret = -1;
++ } else {
++ u32 save_entry = *entry;
++
++ base_flags |= TXD_FLAG_END;
++
++ tnapi->tx_buffers[*entry].skb = new_skb;
++ dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
++ mapping, new_addr);
++
++ if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
++ new_skb->len, base_flags,
++ mss, vlan)) {
++ tg3_tx_skb_unmap(tnapi, save_entry, -1);
++ dev_kfree_skb(new_skb);
++ ret = -1;
++ }
++ }
++ }
++
++ dev_kfree_skb(skb);
++ *pskb = new_skb;
++ return ret;
++}
++
++static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
++
++/* Use GSO to workaround a rare TSO bug that may be triggered when the
++ * TSO header is greater than 80 bytes.
++ */
++static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
++{
++ struct sk_buff *segs, *nskb;
++ u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
++
++ /* Estimate the number of fragments in the worst case */
++ if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
++ netif_stop_queue(tp->dev);
++
++ /* netif_tx_stop_queue() must be done before checking
++ * checking tx index in tg3_tx_avail() below, because in
++ * tg3_tx(), we update tx index before checking for
++ * netif_tx_queue_stopped().
++ */
++ smp_mb();
++ if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
++ return NETDEV_TX_BUSY;
++
++ netif_wake_queue(tp->dev);
++ }
++
++ segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
++ if (IS_ERR(segs))
++ goto tg3_tso_bug_end;
++
++ do {
++ nskb = segs;
++ segs = segs->next;
++ nskb->next = NULL;
++ tg3_start_xmit(nskb, tp->dev);
++ } while (segs);
++
++tg3_tso_bug_end:
++ dev_kfree_skb(skb);
++
++ return NETDEV_TX_OK;
++}
++
++/* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
++ * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
++ */
++static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
++{
++ struct tg3 *tp = netdev_priv(dev);
++ u32 len, entry, base_flags, mss, vlan = 0;
++ u32 budget;
++ int i = -1, would_hit_hwbug;
++ dma_addr_t mapping;
++ struct tg3_napi *tnapi;
++ struct netdev_queue *txq;
++ unsigned int last;
++
++ txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
++ tnapi = &tp->napi[skb_get_queue_mapping(skb)];
++ if (tg3_flag(tp, ENABLE_TSS))
++ tnapi++;
++
++ budget = tg3_tx_avail(tnapi);
++
++ /* We are running in BH disabled context with netif_tx_lock
++ * and TX reclaim runs via tp->napi.poll inside of a software
++ * interrupt. Furthermore, IRQ processing runs lockless so we have
++ * no IRQ context deadlocks to worry about either. Rejoice!
++ */
++ if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
++ if (!netif_tx_queue_stopped(txq)) {
++ netif_tx_stop_queue(txq);
++
++ /* This is a hard error, log it. */
++ netdev_err(dev,
++ "BUG! Tx Ring full when queue awake!\n");
++ }
++ return NETDEV_TX_BUSY;
++ }
++
++ entry = tnapi->tx_prod;
++ base_flags = 0;
++
++ mss = skb_shinfo(skb)->gso_size;
++ if (mss) {
++ struct iphdr *iph;
++ u32 tcp_opt_len, hdr_len;
++
++ if (skb_header_cloned(skb) &&
++ pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
++ goto drop;
++
++ iph = ip_hdr(skb);
++ tcp_opt_len = tcp_optlen(skb);
++
++ hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
++
++ /* HW/FW can not correctly segment packets that have been
++ * vlan encapsulated.
++ */
++ if (skb->protocol == htons(ETH_P_8021Q) ||
++ skb->protocol == htons(ETH_P_8021AD))
++ return tg3_tso_bug(tp, skb);
++
++ if (!skb_is_gso_v6(skb)) {
++ iph->check = 0;
++ iph->tot_len = htons(mss + hdr_len);
++ }
++
++ if (unlikely((ETH_HLEN + hdr_len) > 80) &&
++ tg3_flag(tp, TSO_BUG))
++ return tg3_tso_bug(tp, skb);
++
++ base_flags |= (TXD_FLAG_CPU_PRE_DMA |
++ TXD_FLAG_CPU_POST_DMA);
++
++ if (tg3_flag(tp, HW_TSO_1) ||
++ tg3_flag(tp, HW_TSO_2) ||
++ tg3_flag(tp, HW_TSO_3)) {
++ tcp_hdr(skb)->check = 0;
++ base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
++ } else
++ tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
++ iph->daddr, 0,
++ IPPROTO_TCP,
++ 0);
++
++ if (tg3_flag(tp, HW_TSO_3)) {
++ mss |= (hdr_len & 0xc) << 12;
++ if (hdr_len & 0x10)
++ base_flags |= 0x00000010;
++ base_flags |= (hdr_len & 0x3e0) << 5;
++ } else if (tg3_flag(tp, HW_TSO_2))
++ mss |= hdr_len << 9;
++ else if (tg3_flag(tp, HW_TSO_1) ||
++ tg3_asic_rev(tp) == ASIC_REV_5705) {
++ if (tcp_opt_len || iph->ihl > 5) {
++ int tsflags;
++
++ tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
++ mss |= (tsflags << 11);
++ }
++ } else {
++ if (tcp_opt_len || iph->ihl > 5) {
++ int tsflags;
++
++ tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
++ base_flags |= tsflags << 12;
++ }
++ }
++ } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
++ /* HW/FW can not correctly checksum packets that have been
++ * vlan encapsulated.
++ */
++ if (skb->protocol == htons(ETH_P_8021Q) ||
++ skb->protocol == htons(ETH_P_8021AD)) {
++ if (skb_checksum_help(skb))
++ goto drop;
++ } else {
++ base_flags |= TXD_FLAG_TCPUDP_CSUM;
++ }
++ }
++
++ if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
++ !mss && skb->len > VLAN_ETH_FRAME_LEN)
++ base_flags |= TXD_FLAG_JMB_PKT;
++
++ if (vlan_tx_tag_present(skb)) {
++ base_flags |= TXD_FLAG_VLAN;
++ vlan = vlan_tx_tag_get(skb);
++ }
++
++ if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
++ tg3_flag(tp, TX_TSTAMP_EN)) {
++ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
++ base_flags |= TXD_FLAG_HWTSTAMP;
++ }
++
++ len = skb_headlen(skb);
++
++ mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
++ if (pci_dma_mapping_error(tp->pdev, mapping))
++ goto drop;
++
++
++ tnapi->tx_buffers[entry].skb = skb;
++ dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
++
++ would_hit_hwbug = 0;
++
++ if (tg3_flag(tp, 5701_DMA_BUG))
++ would_hit_hwbug = 1;
++
++ if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
++ ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
++ mss, vlan)) {
++ would_hit_hwbug = 1;
++ } else if (skb_shinfo(skb)->nr_frags > 0) {
++ u32 tmp_mss = mss;
++
++ if (!tg3_flag(tp, HW_TSO_1) &&
++ !tg3_flag(tp, HW_TSO_2) &&
++ !tg3_flag(tp, HW_TSO_3))
++ tmp_mss = 0;
++
++ /* Now loop through additional data
++ * fragments, and queue them.
++ */
++ last = skb_shinfo(skb)->nr_frags - 1;
++ for (i = 0; i <= last; i++) {
++ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
++
++ len = skb_frag_size(frag);
++ mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
++ len, DMA_TO_DEVICE);
++
++ tnapi->tx_buffers[entry].skb = NULL;
++ dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
++ mapping);
++ if (dma_mapping_error(&tp->pdev->dev, mapping))
++ goto dma_error;
++
++ if (!budget ||
++ tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
++ len, base_flags |
++ ((i == last) ? TXD_FLAG_END : 0),
++ tmp_mss, vlan)) {
++ would_hit_hwbug = 1;
++ break;
++ }
++ }
++ }
++
++ if (would_hit_hwbug) {
++ tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
++
++ /* If the workaround fails due to memory/mapping
++ * failure, silently drop this packet.
++ */
++ entry = tnapi->tx_prod;
++ budget = tg3_tx_avail(tnapi);
++ if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
++ base_flags, mss, vlan))
++ goto drop_nofree;
++ }
++
++ skb_tx_timestamp(skb);
++ netdev_tx_sent_queue(txq, skb->len);
++
++ /* Sync BD data before updating mailbox */
++ wmb();
++
++ /* Packets are ready, update Tx producer idx local and on card. */
++ tw32_tx_mbox(tnapi->prodmbox, entry);
++
++ tnapi->tx_prod = entry;
++ if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
++ netif_tx_stop_queue(txq);
++
++ /* netif_tx_stop_queue() must be done before checking
++ * checking tx index in tg3_tx_avail() below, because in
++ * tg3_tx(), we update tx index before checking for
++ * netif_tx_queue_stopped().
++ */
++ smp_mb();
++ if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
++ netif_tx_wake_queue(txq);
++ }
++
++ mmiowb();
++ return NETDEV_TX_OK;
++
++dma_error:
++ tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
++ tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
++drop:
++ dev_kfree_skb(skb);
++drop_nofree:
++ tp->tx_dropped++;
++ return NETDEV_TX_OK;
++}
++
++static void tg3_mac_loopback(struct tg3 *tp, bool enable)
++{
++ if (enable) {
++ tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
++ MAC_MODE_PORT_MODE_MASK);
++
++ tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
++
++ if (!tg3_flag(tp, 5705_PLUS))
++ tp->mac_mode |= MAC_MODE_LINK_POLARITY;
++
++ if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
++ tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
++ else
++ tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
++ } else {
++ tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
++
++ if (tg3_flag(tp, 5705_PLUS) ||
++ (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
++ tg3_asic_rev(tp) == ASIC_REV_5700)
++ tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
++ }
++
++ tw32(MAC_MODE, tp->mac_mode);
++ udelay(40);
++}
++
++static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
++{
++ u32 val, bmcr, mac_mode, ptest = 0;
++
++ tg3_phy_toggle_apd(tp, false);
++ tg3_phy_toggle_automdix(tp, false);
++
++ if (extlpbk && tg3_phy_set_extloopbk(tp))
++ return -EIO;
++
++ bmcr = BMCR_FULLDPLX;
++ switch (speed) {
++ case SPEED_10:
++ break;
++ case SPEED_100:
++ bmcr |= BMCR_SPEED100;
++ break;
++ case SPEED_1000:
++ default:
++ if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
++ speed = SPEED_100;
++ bmcr |= BMCR_SPEED100;
++ } else {
++ speed = SPEED_1000;
++ bmcr |= BMCR_SPEED1000;
++ }
++ }
++
++ if (extlpbk) {
++ if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
++ tg3_readphy(tp, MII_CTRL1000, &val);
++ val |= CTL1000_AS_MASTER |
++ CTL1000_ENABLE_MASTER;
++ tg3_writephy(tp, MII_CTRL1000, val);
++ } else {
++ ptest = MII_TG3_FET_PTEST_TRIM_SEL |
++ MII_TG3_FET_PTEST_TRIM_2;
++ tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
++ }
++ } else
++ bmcr |= BMCR_LOOPBACK;
++
++ tg3_writephy(tp, MII_BMCR, bmcr);
++
++ /* The write needs to be flushed for the FETs */
++ if (tp->phy_flags & TG3_PHYFLG_IS_FET)
++ tg3_readphy(tp, MII_BMCR, &bmcr);
++
++ udelay(40);
++
++ if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
++ tg3_asic_rev(tp) == ASIC_REV_5785) {
++ tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
++ MII_TG3_FET_PTEST_FRC_TX_LINK |
++ MII_TG3_FET_PTEST_FRC_TX_LOCK);
++
++ /* The write needs to be flushed for the AC131 */
++ tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
++ }
++
++ /* Reset to prevent losing 1st rx packet intermittently */
++ if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
++ tg3_flag(tp, 5780_CLASS)) {
++ tw32_f(MAC_RX_MODE, RX_MODE_RESET);
++ udelay(10);
++ tw32_f(MAC_RX_MODE, tp->rx_mode);
++ }
++
++ mac_mode = tp->mac_mode &
++ ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
++ if (speed == SPEED_1000)
++ mac_mode |= MAC_MODE_PORT_MODE_GMII;
++ else
++ mac_mode |= MAC_MODE_PORT_MODE_MII;
++
++ if (tg3_asic_rev(tp) == ASIC_REV_5700) {
++ u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
++
++ if (masked_phy_id == TG3_PHY_ID_BCM5401)
++ mac_mode &= ~MAC_MODE_LINK_POLARITY;
++ else if (masked_phy_id == TG3_PHY_ID_BCM5411)
++ mac_mode |= MAC_MODE_LINK_POLARITY;
++
++ tg3_writephy(tp, MII_TG3_EXT_CTRL,
++ MII_TG3_EXT_CTRL_LNK3_LED_MODE);
++ }
++
++ tw32(MAC_MODE, mac_mode);
++ udelay(40);
++
++ return 0;
++}
++
++static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
++{
++ struct tg3 *tp = netdev_priv(dev);
++
++ if (features & NETIF_F_LOOPBACK) {
++ if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
++ return;
++
++ spin_lock_bh(&tp->lock);
++ tg3_mac_loopback(tp, true);
++ netif_carrier_on(tp->dev);
++ spin_unlock_bh(&tp->lock);
++ netdev_info(dev, "Internal MAC loopback mode enabled.\n");
++ } else {
++ if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
++ return;
++
++ spin_lock_bh(&tp->lock);
++ tg3_mac_loopback(tp, false);
++ /* Force link status check */
++ tg3_setup_phy(tp, true);
++ spin_unlock_bh(&tp->lock);
++ netdev_info(dev, "Internal MAC loopback mode disabled.\n");
++ }
++}
++
++static netdev_features_t tg3_fix_features(struct net_device *dev,
++ netdev_features_t features)
++{
++ struct tg3 *tp = netdev_priv(dev);
++
++ if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
++ features &= ~NETIF_F_ALL_TSO;
++
++ return features;
++}
++
++static int tg3_set_features(struct net_device *dev, netdev_features_t features)
++{
++ netdev_features_t changed = dev->features ^ features;
++
++ if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
++ tg3_set_loopback(dev, features);
++
++ return 0;
++}
++
++static void tg3_rx_prodring_free(struct tg3 *tp,
++ struct tg3_rx_prodring_set *tpr)
++{
++ int i;
++
++ if (tpr != &tp->napi[0].prodring) {
++ for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
++ i = (i + 1) & tp->rx_std_ring_mask)
++ tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
++ tp->rx_pkt_map_sz);
++
++ if (tg3_flag(tp, JUMBO_CAPABLE)) {
++ for (i = tpr->rx_jmb_cons_idx;
++ i != tpr->rx_jmb_prod_idx;
++ i = (i + 1) & tp->rx_jmb_ring_mask) {
++ tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
++ TG3_RX_JMB_MAP_SZ);
++ }
++ }
++
++ return;
++ }
++
++ for (i = 0; i <= tp->rx_std_ring_mask; i++)
++ tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
++ tp->rx_pkt_map_sz);
++
++ if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
++ for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
++ tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
++ TG3_RX_JMB_MAP_SZ);
++ }
++}
++
++/* Initialize rx rings for packet processing.
++ *
++ * The chip has been shut down and the driver detached from
++ * the networking, so no interrupts or new tx packets will
++ * end up in the driver. tp->{tx,}lock are held and thus
++ * we may not sleep.
++ */
++static int tg3_rx_prodring_alloc(struct tg3 *tp,
++ struct tg3_rx_prodring_set *tpr)
++{
++ u32 i, rx_pkt_dma_sz;
++
++ tpr->rx_std_cons_idx = 0;
++ tpr->rx_std_prod_idx = 0;
++ tpr->rx_jmb_cons_idx = 0;
++ tpr->rx_jmb_prod_idx = 0;
++
++ if (tpr != &tp->napi[0].prodring) {
++ memset(&tpr->rx_std_buffers[0], 0,
++ TG3_RX_STD_BUFF_RING_SIZE(tp));
++ if (tpr->rx_jmb_buffers)
++ memset(&tpr->rx_jmb_buffers[0], 0,
++ TG3_RX_JMB_BUFF_RING_SIZE(tp));
++ goto done;
++ }
++
++ /* Zero out all descriptors. */
++ memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
++
++ rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
++ if (tg3_flag(tp, 5780_CLASS) &&
++ tp->dev->mtu > ETH_DATA_LEN)
++ rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
++ tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
++
++ /* Initialize invariants of the rings, we only set this
++ * stuff once. This works because the card does not
++ * write into the rx buffer posting rings.
++ */
++ for (i = 0; i <= tp->rx_std_ring_mask; i++) {
++ struct tg3_rx_buffer_desc *rxd;
++
++ rxd = &tpr->rx_std[i];
++ rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
++ rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
++ rxd->opaque = (RXD_OPAQUE_RING_STD |
++ (i << RXD_OPAQUE_INDEX_SHIFT));
++ }
++
++ /* Now allocate fresh SKBs for each rx ring. */
++ for (i = 0; i < tp->rx_pending; i++) {
++ unsigned int frag_size;
++
++ if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
++ &frag_size) < 0) {
++ netdev_warn(tp->dev,
++ "Using a smaller RX standard ring. Only "
++ "%d out of %d buffers were allocated "
++ "successfully\n", i, tp->rx_pending);
++ if (i == 0)
++ goto initfail;
++ tp->rx_pending = i;
++ break;
++ }
++ }
++
++ if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
++ goto done;
++
++ memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
++
++ if (!tg3_flag(tp, JUMBO_RING_ENABLE))
++ goto done;
++
++ for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
++ struct tg3_rx_buffer_desc *rxd;
++
++ rxd = &tpr->rx_jmb[i].std;
++ rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
++ rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
++ RXD_FLAG_JUMBO;
++ rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
++ (i << RXD_OPAQUE_INDEX_SHIFT));
++ }
++
++ for (i = 0; i < tp->rx_jumbo_pending; i++) {
++ unsigned int frag_size;
++
++ if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
++ &frag_size) < 0) {
++ netdev_warn(tp->dev,
++ "Using a smaller RX jumbo ring. Only %d "
++ "out of %d buffers were allocated "
++ "successfully\n", i, tp->rx_jumbo_pending);
++ if (i == 0)
++ goto initfail;
++ tp->rx_jumbo_pending = i;
++ break;
++ }
++ }
++
++done:
++ return 0;
++
++initfail:
++ tg3_rx_prodring_free(tp, tpr);
++ return -ENOMEM;
++}
++
++static void tg3_rx_prodring_fini(struct tg3 *tp,
++ struct tg3_rx_prodring_set *tpr)
++{
++ kfree(tpr->rx_std_buffers);
++ tpr->rx_std_buffers = NULL;
++ kfree(tpr->rx_jmb_buffers);
++ tpr->rx_jmb_buffers = NULL;
++ if (tpr->rx_std) {
++ dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
++ tpr->rx_std, tpr->rx_std_mapping);
++ tpr->rx_std = NULL;
++ }
++ if (tpr->rx_jmb) {
++ dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
++ tpr->rx_jmb, tpr->rx_jmb_mapping);
++ tpr->rx_jmb = NULL;
++ }
++}
++
++static int tg3_rx_prodring_init(struct tg3 *tp,
++ struct tg3_rx_prodring_set *tpr)
++{
++ tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
++ GFP_KERNEL);
++ if (!tpr->rx_std_buffers)
++ return -ENOMEM;
++
++ tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
++ TG3_RX_STD_RING_BYTES(tp),
++ &tpr->rx_std_mapping,
++ GFP_KERNEL);
++ if (!tpr->rx_std)
++ goto err_out;
++
++ if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
++ tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
++ GFP_KERNEL);
++ if (!tpr->rx_jmb_buffers)
++ goto err_out;
++
++ tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
++ TG3_RX_JMB_RING_BYTES(tp),
++ &tpr->rx_jmb_mapping,
++ GFP_KERNEL);
++ if (!tpr->rx_jmb)
++ goto err_out;
++ }
++
++ return 0;
++
++err_out:
++ tg3_rx_prodring_fini(tp, tpr);
++ return -ENOMEM;
++}
++
++/* Free up pending packets in all rx/tx rings.
++ *
++ * The chip has been shut down and the driver detached from
++ * the networking, so no interrupts or new tx packets will
++ * end up in the driver. tp->{tx,}lock is not held and we are not
++ * in an interrupt context and thus may sleep.
++ */
++static void tg3_free_rings(struct tg3 *tp)
++{
++ int i, j;
++
++ for (j = 0; j < tp->irq_cnt; j++) {
++ struct tg3_napi *tnapi = &tp->napi[j];
++
++ tg3_rx_prodring_free(tp, &tnapi->prodring);
++
++ if (!tnapi->tx_buffers)
++ continue;
++
++ for (i = 0; i < TG3_TX_RING_SIZE; i++) {
++ struct sk_buff *skb = tnapi->tx_buffers[i].skb;
++
++ if (!skb)
++ continue;
++
++ tg3_tx_skb_unmap(tnapi, i,
++ skb_shinfo(skb)->nr_frags - 1);
++
++ dev_kfree_skb_any(skb);
++ }
++ netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
++ }
++}
++
++/* Initialize tx/rx rings for packet processing.
++ *
++ * The chip has been shut down and the driver detached from
++ * the networking, so no interrupts or new tx packets will
++ * end up in the driver. tp->{tx,}lock are held and thus
++ * we may not sleep.
++ */
++static int tg3_init_rings(struct tg3 *tp)
++{
++ int i;
++
++ /* Free up all the SKBs. */
++ tg3_free_rings(tp);
++
++ for (i = 0; i < tp->irq_cnt; i++) {
++ struct tg3_napi *tnapi = &tp->napi[i];
++
++ tnapi->last_tag = 0;
++ tnapi->last_irq_tag = 0;
++ tnapi->hw_status->status = 0;
++ tnapi->hw_status->status_tag = 0;
++ memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
++
++ tnapi->tx_prod = 0;
++ tnapi->tx_cons = 0;
++ if (tnapi->tx_ring)
++ memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
++
++ tnapi->rx_rcb_ptr = 0;
++ if (tnapi->rx_rcb)
++ memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
++
++ if (tnapi->prodring.rx_std &&
++ tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
++ tg3_free_rings(tp);
++ return -ENOMEM;
++ }
++ }
++
++ return 0;
++}
++
++static void tg3_mem_tx_release(struct tg3 *tp)
++{
++ int i;
++
++ for (i = 0; i < tp->irq_max; i++) {
++ struct tg3_napi *tnapi = &tp->napi[i];
++
++ if (tnapi->tx_ring) {
++ dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
++ tnapi->tx_ring, tnapi->tx_desc_mapping);
++ tnapi->tx_ring = NULL;
++ }
++
++ kfree(tnapi->tx_buffers);
++ tnapi->tx_buffers = NULL;
++ }
++}
++
++static int tg3_mem_tx_acquire(struct tg3 *tp)
++{
++ int i;
++ struct tg3_napi *tnapi = &tp->napi[0];
++
++ /* If multivector TSS is enabled, vector 0 does not handle
++ * tx interrupts. Don't allocate any resources for it.
++ */
++ if (tg3_flag(tp, ENABLE_TSS))
++ tnapi++;
++
++ for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
++ tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) *
++ TG3_TX_RING_SIZE, GFP_KERNEL);
++ if (!tnapi->tx_buffers)
++ goto err_out;
++
++ tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
++ TG3_TX_RING_BYTES,
++ &tnapi->tx_desc_mapping,
++ GFP_KERNEL);
++ if (!tnapi->tx_ring)
++ goto err_out;
++ }
++
++ return 0;
++
++err_out:
++ tg3_mem_tx_release(tp);
++ return -ENOMEM;
++}
++
++static void tg3_mem_rx_release(struct tg3 *tp)
++{
++ int i;
++
++ for (i = 0; i < tp->irq_max; i++) {
++ struct tg3_napi *tnapi = &tp->napi[i];
++
++ tg3_rx_prodring_fini(tp, &tnapi->prodring);
++
++ if (!tnapi->rx_rcb)
++ continue;
++
++ dma_free_coherent(&tp->pdev->dev,
++ TG3_RX_RCB_RING_BYTES(tp),
++ tnapi->rx_rcb,
++ tnapi->rx_rcb_mapping);
++ tnapi->rx_rcb = NULL;
++ }
++}
++
++static int tg3_mem_rx_acquire(struct tg3 *tp)
++{
++ unsigned int i, limit;
++
++ limit = tp->rxq_cnt;
++
++ /* If RSS is enabled, we need a (dummy) producer ring
++ * set on vector zero. This is the true hw prodring.
++ */
++ if (tg3_flag(tp, ENABLE_RSS))
++ limit++;
++
++ for (i = 0; i < limit; i++) {
++ struct tg3_napi *tnapi = &tp->napi[i];
++
++ if (tg3_rx_prodring_init(tp, &tnapi->prodring))
++ goto err_out;
++
++ /* If multivector RSS is enabled, vector 0
++ * does not handle rx or tx interrupts.
++ * Don't allocate any resources for it.
++ */
++ if (!i && tg3_flag(tp, ENABLE_RSS))
++ continue;
++
++ tnapi->rx_rcb = dma_zalloc_coherent(&tp->pdev->dev,
++ TG3_RX_RCB_RING_BYTES(tp),
++ &tnapi->rx_rcb_mapping,
++ GFP_KERNEL);
++ if (!tnapi->rx_rcb)
++ goto err_out;
++ }
++
++ return 0;
++
++err_out:
++ tg3_mem_rx_release(tp);
++ return -ENOMEM;
++}
++
++/*
++ * Must not be invoked with interrupt sources disabled and
++ * the hardware shutdown down.
++ */
++static void tg3_free_consistent(struct tg3 *tp)
++{
++ int i;
++
++ for (i = 0; i < tp->irq_cnt; i++) {
++ struct tg3_napi *tnapi = &tp->napi[i];
++
++ if (tnapi->hw_status) {
++ dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
++ tnapi->hw_status,
++ tnapi->status_mapping);
++ tnapi->hw_status = NULL;
++ }
++ }
++
++ tg3_mem_rx_release(tp);
++ tg3_mem_tx_release(tp);
++
++ if (tp->hw_stats) {
++ dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
++ tp->hw_stats, tp->stats_mapping);
++ tp->hw_stats = NULL;
++ }
++}
++
++/*
++ * Must not be invoked with interrupt sources disabled and
++ * the hardware shutdown down. Can sleep.
++ */
++static int tg3_alloc_consistent(struct tg3 *tp)
++{
++ int i;
++
++ tp->hw_stats = dma_zalloc_coherent(&tp->pdev->dev,
++ sizeof(struct tg3_hw_stats),
++ &tp->stats_mapping, GFP_KERNEL);
++ if (!tp->hw_stats)
++ goto err_out;
++
++ for (i = 0; i < tp->irq_cnt; i++) {
++ struct tg3_napi *tnapi = &tp->napi[i];
++ struct tg3_hw_status *sblk;
++
++ tnapi->hw_status = dma_zalloc_coherent(&tp->pdev->dev,
++ TG3_HW_STATUS_SIZE,
++ &tnapi->status_mapping,
++ GFP_KERNEL);
++ if (!tnapi->hw_status)
++ goto err_out;
++
++ sblk = tnapi->hw_status;
++
++ if (tg3_flag(tp, ENABLE_RSS)) {
++ u16 *prodptr = NULL;
++
++ /*
++ * When RSS is enabled, the status block format changes
++ * slightly. The "rx_jumbo_consumer", "reserved",
++ * and "rx_mini_consumer" members get mapped to the
++ * other three rx return ring producer indexes.
++ */
++ switch (i) {
++ case 1:
++ prodptr = &sblk->idx[0].rx_producer;
++ break;
++ case 2:
++ prodptr = &sblk->rx_jumbo_consumer;
++ break;
++ case 3:
++ prodptr = &sblk->reserved;
++ break;
++ case 4:
++ prodptr = &sblk->rx_mini_consumer;
++ break;
++ }
++ tnapi->rx_rcb_prod_idx = prodptr;
++ } else {
++ tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
++ }
++ }
++
++ if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
++ goto err_out;
++
++ return 0;
++
++err_out:
++ tg3_free_consistent(tp);
++ return -ENOMEM;
++}
++
++#define MAX_WAIT_CNT 1000
++
++/* To stop a block, clear the enable bit and poll till it
++ * clears. tp->lock is held.
++ */
++static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent)
++{
++ unsigned int i;
++ u32 val;
++
++ if (tg3_flag(tp, 5705_PLUS)) {
++ switch (ofs) {
++ case RCVLSC_MODE:
++ case DMAC_MODE:
++ case MBFREE_MODE:
++ case BUFMGR_MODE:
++ case MEMARB_MODE:
++ /* We can't enable/disable these bits of the
++ * 5705/5750, just say success.
++ */
++ return 0;
++
++ default:
++ break;
++ }
++ }
++
++ val = tr32(ofs);
++ val &= ~enable_bit;
++ tw32_f(ofs, val);
++
++ for (i = 0; i < MAX_WAIT_CNT; i++) {
++ if (pci_channel_offline(tp->pdev)) {
++ dev_err(&tp->pdev->dev,
++ "tg3_stop_block device offline, "
++ "ofs=%lx enable_bit=%x\n",
++ ofs, enable_bit);
++ return -ENODEV;
++ }
++
++ udelay(100);
++ val = tr32(ofs);
++ if ((val & enable_bit) == 0)
++ break;
++ }
++
++ if (i == MAX_WAIT_CNT && !silent) {
++ dev_err(&tp->pdev->dev,
++ "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
++ ofs, enable_bit);
++ return -ENODEV;
++ }
++
++ return 0;
++}
++
++/* tp->lock is held. */
++static int tg3_abort_hw(struct tg3 *tp, bool silent)
++{
++ int i, err;
++
++ tg3_disable_ints(tp);
++
++ if (pci_channel_offline(tp->pdev)) {
++ tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE);
++ tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
++ err = -ENODEV;
++ goto err_no_dev;
++ }
++
++ tp->rx_mode &= ~RX_MODE_ENABLE;
++ tw32_f(MAC_RX_MODE, tp->rx_mode);
++ udelay(10);
++
++ err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
++ err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
++ err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
++ err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
++ err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
++ err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
++
++ err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
++ err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
++ err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
++ err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
++ err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
++ err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
++ err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
++
++ tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
++ tw32_f(MAC_MODE, tp->mac_mode);
++ udelay(40);
++
++ tp->tx_mode &= ~TX_MODE_ENABLE;
++ tw32_f(MAC_TX_MODE, tp->tx_mode);
++
++ for (i = 0; i < MAX_WAIT_CNT; i++) {
++ udelay(100);
++ if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
++ break;
++ }
++ if (i >= MAX_WAIT_CNT) {
++ dev_err(&tp->pdev->dev,
++ "%s timed out, TX_MODE_ENABLE will not clear "
++ "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
++ err |= -ENODEV;
++ }
++
++ err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
++ err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
++ err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
++
++ tw32(FTQ_RESET, 0xffffffff);
++ tw32(FTQ_RESET, 0x00000000);
++
++ err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
++ err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
++
++err_no_dev:
++ for (i = 0; i < tp->irq_cnt; i++) {
++ struct tg3_napi *tnapi = &tp->napi[i];
++ if (tnapi->hw_status)
++ memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
++ }
++
++ return err;
++}
++
++/* Save PCI command register before chip reset */
++static void tg3_save_pci_state(struct tg3 *tp)
++{
++ pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
++}
++
++/* Restore PCI state after chip reset */
++static void tg3_restore_pci_state(struct tg3 *tp)
++{
++ u32 val;
++
++ /* Re-enable indirect register accesses. */
++ pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
++ tp->misc_host_ctrl);
++
++ /* Set MAX PCI retry to zero. */
++ val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
++ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
++ tg3_flag(tp, PCIX_MODE))
++ val |= PCISTATE_RETRY_SAME_DMA;
++ /* Allow reads and writes to the APE register and memory space. */
++ if (tg3_flag(tp, ENABLE_APE))
++ val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
++ PCISTATE_ALLOW_APE_SHMEM_WR |
++ PCISTATE_ALLOW_APE_PSPACE_WR;
++ pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
++
++ pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
++
++ if (!tg3_flag(tp, PCI_EXPRESS)) {
++ pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
++ tp->pci_cacheline_sz);
++ pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
++ tp->pci_lat_timer);
++ }
++
++ /* Make sure PCI-X relaxed ordering bit is clear. */
++ if (tg3_flag(tp, PCIX_MODE)) {
++ u16 pcix_cmd;
++
++ pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
++ &pcix_cmd);
++ pcix_cmd &= ~PCI_X_CMD_ERO;
++ pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
++ pcix_cmd);
++ }
++
++ if (tg3_flag(tp, 5780_CLASS)) {
++
++ /* Chip reset on 5780 will reset MSI enable bit,
++ * so need to restore it.
++ */
++ if (tg3_flag(tp, USING_MSI)) {
++ u16 ctrl;
++
++ pci_read_config_word(tp->pdev,
++ tp->msi_cap + PCI_MSI_FLAGS,
++ &ctrl);
++ pci_write_config_word(tp->pdev,
++ tp->msi_cap + PCI_MSI_FLAGS,
++ ctrl | PCI_MSI_FLAGS_ENABLE);
++ val = tr32(MSGINT_MODE);
++ tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
++ }
++ }
++}
++
++static void tg3_override_clk(struct tg3 *tp)
++{
++ u32 val;
++
++ switch (tg3_asic_rev(tp)) {
++ case ASIC_REV_5717:
++ val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
++ tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
++ TG3_CPMU_MAC_ORIDE_ENABLE);
++ break;
++
++ case ASIC_REV_5719:
++ case ASIC_REV_5720:
++ tw32(TG3_CPMU_CLCK_ORIDE, CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
++ break;
++
++ default:
++ return;
++ }
++}
++
++static void tg3_restore_clk(struct tg3 *tp)
++{
++ u32 val;
++
++ switch (tg3_asic_rev(tp)) {
++ case ASIC_REV_5717:
++ val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
++ tw32(TG3_CPMU_CLCK_ORIDE_ENABLE,
++ val & ~TG3_CPMU_MAC_ORIDE_ENABLE);
++ break;
++
++ case ASIC_REV_5719:
++ case ASIC_REV_5720:
++ val = tr32(TG3_CPMU_CLCK_ORIDE);
++ tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
++ break;
++
++ default:
++ return;
++ }
++}
++
++/* tp->lock is held. */
++static int tg3_chip_reset(struct tg3 *tp)
++{
++ u32 val;
++ void (*write_op)(struct tg3 *, u32, u32);
++ int i, err;
++
++ if (!pci_device_is_present(tp->pdev))
++ return -ENODEV;
++
++ tg3_nvram_lock(tp);
++
++ tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
++
++ /* No matching tg3_nvram_unlock() after this because
++ * chip reset below will undo the nvram lock.
++ */
++ tp->nvram_lock_cnt = 0;
++
++ /* GRC_MISC_CFG core clock reset will clear the memory
++ * enable bit in PCI register 4 and the MSI enable bit
++ * on some chips, so we save relevant registers here.
++ */
++ tg3_save_pci_state(tp);
++
++ if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
++ tg3_flag(tp, 5755_PLUS))
++ tw32(GRC_FASTBOOT_PC, 0);
++
++ /*
++ * We must avoid the readl() that normally takes place.
++ * It locks machines, causes machine checks, and other
++ * fun things. So, temporarily disable the 5701
++ * hardware workaround, while we do the reset.
++ */
++ write_op = tp->write32;
++ if (write_op == tg3_write_flush_reg32)
++ tp->write32 = tg3_write32;
++
++ /* Prevent the irq handler from reading or writing PCI registers
++ * during chip reset when the memory enable bit in the PCI command
++ * register may be cleared. The chip does not generate interrupt
++ * at this time, but the irq handler may still be called due to irq
++ * sharing or irqpoll.
++ */
++ tg3_flag_set(tp, CHIP_RESETTING);
++ for (i = 0; i < tp->irq_cnt; i++) {
++ struct tg3_napi *tnapi = &tp->napi[i];
++ if (tnapi->hw_status) {
++ tnapi->hw_status->status = 0;
++ tnapi->hw_status->status_tag = 0;
++ }
++ tnapi->last_tag = 0;
++ tnapi->last_irq_tag = 0;
++ }
++ smp_mb();
++
++ for (i = 0; i < tp->irq_cnt; i++)
++ synchronize_irq(tp->napi[i].irq_vec);
++
++ if (tg3_asic_rev(tp) == ASIC_REV_57780) {
++ val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
++ tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
++ }
++
++ /* do the reset */
++ val = GRC_MISC_CFG_CORECLK_RESET;
++
++ if (tg3_flag(tp, PCI_EXPRESS)) {
++ /* Force PCIe 1.0a mode */
++ if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
++ !tg3_flag(tp, 57765_PLUS) &&
++ tr32(TG3_PCIE_PHY_TSTCTL) ==
++ (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
++ tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
++
++ if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
++ tw32(GRC_MISC_CFG, (1 << 29));
++ val |= (1 << 29);
++ }
++ }
++
++ if (tg3_asic_rev(tp) == ASIC_REV_5906) {
++ tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
++ tw32(GRC_VCPU_EXT_CTRL,
++ tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
++ }
++
++ /* Set the clock to the highest frequency to avoid timeouts. With link
++ * aware mode, the clock speed could be slow and bootcode does not
++ * complete within the expected time. Override the clock to allow the
++ * bootcode to finish sooner and then restore it.
++ */
++ tg3_override_clk(tp);
++
++ /* Manage gphy power for all CPMU absent PCIe devices. */
++ if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
++ val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
++
++ tw32(GRC_MISC_CFG, val);
++
++ /* restore 5701 hardware bug workaround write method */
++ tp->write32 = write_op;
++
++ /* Unfortunately, we have to delay before the PCI read back.
++ * Some 575X chips even will not respond to a PCI cfg access
++ * when the reset command is given to the chip.
++ *
++ * How do these hardware designers expect things to work
++ * properly if the PCI write is posted for a long period
++ * of time? It is always necessary to have some method by
++ * which a register read back can occur to push the write
++ * out which does the reset.
++ *
++ * For most tg3 variants the trick below was working.
++ * Ho hum...
++ */
++ udelay(120);
++
++ /* Flush PCI posted writes. The normal MMIO registers
++ * are inaccessible at this time so this is the only
++ * way to make this reliably (actually, this is no longer
++ * the case, see above). I tried to use indirect
++ * register read/write but this upset some 5701 variants.
++ */
++ pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
++
++ udelay(120);
++
++ if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
++ u16 val16;
++
++ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
++ int j;
++ u32 cfg_val;
++
++ /* Wait for link training to complete. */
++ for (j = 0; j < 5000; j++)
++ udelay(100);
++
++ pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
++ pci_write_config_dword(tp->pdev, 0xc4,
++ cfg_val | (1 << 15));
++ }
++
++ /* Clear the "no snoop" and "relaxed ordering" bits. */
++ val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
++ /*
++ * Older PCIe devices only support the 128 byte
++ * MPS setting. Enforce the restriction.
++ */
++ if (!tg3_flag(tp, CPMU_PRESENT))
++ val16 |= PCI_EXP_DEVCTL_PAYLOAD;
++ pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
++
++ /* Clear error status */
++ pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
++ PCI_EXP_DEVSTA_CED |
++ PCI_EXP_DEVSTA_NFED |
++ PCI_EXP_DEVSTA_FED |
++ PCI_EXP_DEVSTA_URD);
++ }
++
++ tg3_restore_pci_state(tp);
++
++ tg3_flag_clear(tp, CHIP_RESETTING);
++ tg3_flag_clear(tp, ERROR_PROCESSED);
++
++ val = 0;
++ if (tg3_flag(tp, 5780_CLASS))
++ val = tr32(MEMARB_MODE);
++ tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
++
++ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
++ tg3_stop_fw(tp);
++ tw32(0x5000, 0x400);
++ }
++
++ if (tg3_flag(tp, IS_SSB_CORE)) {
++ /*
++ * BCM4785: In order to avoid repercussions from using
++ * potentially defective internal ROM, stop the Rx RISC CPU,
++ * which is not required.
++ */
++ tg3_stop_fw(tp);
++ tg3_halt_cpu(tp, RX_CPU_BASE);
++ }
++
++ err = tg3_poll_fw(tp);
++ if (err)
++ return err;
++
++ tw32(GRC_MODE, tp->grc_mode);
++
++ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
++ val = tr32(0xc4);
++
++ tw32(0xc4, val | (1 << 15));
++ }
++
++ if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
++ tg3_asic_rev(tp) == ASIC_REV_5705) {
++ tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
++ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
++ tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
++ tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
++ }
++
++ if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
++ tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
++ val = tp->mac_mode;
++ } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
++ tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
++ val = tp->mac_mode;
++ } else
++ val = 0;
++
++ tw32_f(MAC_MODE, val);
++ udelay(40);
++
++ tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
++
++ tg3_mdio_start(tp);
++
++ if (tg3_flag(tp, PCI_EXPRESS) &&
++ tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
++ tg3_asic_rev(tp) != ASIC_REV_5785 &&
++ !tg3_flag(tp, 57765_PLUS)) {
++ val = tr32(0x7c00);
++
++ tw32(0x7c00, val | (1 << 25));
++ }
++
++ tg3_restore_clk(tp);
++
++ /* Reprobe ASF enable state. */
++ tg3_flag_clear(tp, ENABLE_ASF);
++ tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
++ TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
++
++ tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
++ tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
++ if (val == NIC_SRAM_DATA_SIG_MAGIC) {
++ u32 nic_cfg;
++
++ tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
++ if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
++ tg3_flag_set(tp, ENABLE_ASF);
++ tp->last_event_jiffies = jiffies;
++ if (tg3_flag(tp, 5750_PLUS))
++ tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
++
++ tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
++ if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
++ tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
++ if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
++ tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
++ }
++ }
++
++ return 0;
++}
++
++static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
++static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
++static void __tg3_set_rx_mode(struct net_device *);
++
++/* tp->lock is held. */
++static int tg3_halt(struct tg3 *tp, int kind, bool silent)
++{
++ int err;
++
++ tg3_stop_fw(tp);
++
++ tg3_write_sig_pre_reset(tp, kind);
++
++ tg3_abort_hw(tp, silent);
++ err = tg3_chip_reset(tp);
++
++ __tg3_set_mac_addr(tp, false);
++
++ tg3_write_sig_legacy(tp, kind);
++ tg3_write_sig_post_reset(tp, kind);
++
++ if (tp->hw_stats) {
++ /* Save the stats across chip resets... */
++ tg3_get_nstats(tp, &tp->net_stats_prev);
++ tg3_get_estats(tp, &tp->estats_prev);
++
++ /* And make sure the next sample is new data */
++ memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
++ }
++
++ return err;
++}
++
++static int tg3_set_mac_addr(struct net_device *dev, void *p)
++{
++ struct tg3 *tp = netdev_priv(dev);
++ struct sockaddr *addr = p;
++ int err = 0;
++ bool skip_mac_1 = false;
++
++ if (!is_valid_ether_addr(addr->sa_data))
++ return -EADDRNOTAVAIL;
++
++ memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
++
++ if (!netif_running(dev))
++ return 0;
++
++ if (tg3_flag(tp, ENABLE_ASF)) {
++ u32 addr0_high, addr0_low, addr1_high, addr1_low;
++
++ addr0_high = tr32(MAC_ADDR_0_HIGH);
++ addr0_low = tr32(MAC_ADDR_0_LOW);
++ addr1_high = tr32(MAC_ADDR_1_HIGH);
++ addr1_low = tr32(MAC_ADDR_1_LOW);
++
++ /* Skip MAC addr 1 if ASF is using it. */
++ if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
++ !(addr1_high == 0 && addr1_low == 0))
++ skip_mac_1 = true;
++ }
++ spin_lock_bh(&tp->lock);
++ __tg3_set_mac_addr(tp, skip_mac_1);
++ __tg3_set_rx_mode(dev);
++ spin_unlock_bh(&tp->lock);
++
++ return err;
++}
++
++/* tp->lock is held. */
++static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
++ dma_addr_t mapping, u32 maxlen_flags,
++ u32 nic_addr)
++{
++ tg3_write_mem(tp,
++ (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
++ ((u64) mapping >> 32));
++ tg3_write_mem(tp,
++ (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
++ ((u64) mapping & 0xffffffff));
++ tg3_write_mem(tp,
++ (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
++ maxlen_flags);
++
++ if (!tg3_flag(tp, 5705_PLUS))
++ tg3_write_mem(tp,
++ (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
++ nic_addr);
++}
++
++
++static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
++{
++ int i = 0;
++
++ if (!tg3_flag(tp, ENABLE_TSS)) {
++ tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
++ tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
++ tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
++ } else {
++ tw32(HOSTCC_TXCOL_TICKS, 0);
++ tw32(HOSTCC_TXMAX_FRAMES, 0);
++ tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
++
++ for (; i < tp->txq_cnt; i++) {
++ u32 reg;
++
++ reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
++ tw32(reg, ec->tx_coalesce_usecs);
++ reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
++ tw32(reg, ec->tx_max_coalesced_frames);
++ reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
++ tw32(reg, ec->tx_max_coalesced_frames_irq);
++ }
++ }
++
++ for (; i < tp->irq_max - 1; i++) {
++ tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
++ tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
++ tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
++ }
++}
++
++static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
++{
++ int i = 0;
++ u32 limit = tp->rxq_cnt;
++
++ if (!tg3_flag(tp, ENABLE_RSS)) {
++ tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
++ tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
++ tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
++ limit--;
++ } else {
++ tw32(HOSTCC_RXCOL_TICKS, 0);
++ tw32(HOSTCC_RXMAX_FRAMES, 0);
++ tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
++ }
++
++ for (; i < limit; i++) {
++ u32 reg;
++
++ reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
++ tw32(reg, ec->rx_coalesce_usecs);
++ reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
++ tw32(reg, ec->rx_max_coalesced_frames);
++ reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
++ tw32(reg, ec->rx_max_coalesced_frames_irq);
++ }
++
++ for (; i < tp->irq_max - 1; i++) {
++ tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
++ tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
++ tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
++ }
++}
++
++static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
++{
++ tg3_coal_tx_init(tp, ec);
++ tg3_coal_rx_init(tp, ec);
++
++ if (!tg3_flag(tp, 5705_PLUS)) {
++ u32 val = ec->stats_block_coalesce_usecs;
++
++ tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
++ tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
++
++ if (!tp->link_up)
++ val = 0;
++
++ tw32(HOSTCC_STAT_COAL_TICKS, val);
++ }
++}
++
++/* tp->lock is held. */
++static void tg3_tx_rcbs_disable(struct tg3 *tp)
++{
++ u32 txrcb, limit;
++
++ /* Disable all transmit rings but the first. */
++ if (!tg3_flag(tp, 5705_PLUS))
++ limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
++ else if (tg3_flag(tp, 5717_PLUS))
++ limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
++ else if (tg3_flag(tp, 57765_CLASS) ||
++ tg3_asic_rev(tp) == ASIC_REV_5762)
++ limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
++ else
++ limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
++
++ for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
++ txrcb < limit; txrcb += TG3_BDINFO_SIZE)
++ tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
++ BDINFO_FLAGS_DISABLED);
++}
++
++/* tp->lock is held. */
++static void tg3_tx_rcbs_init(struct tg3 *tp)
++{
++ int i = 0;
++ u32 txrcb = NIC_SRAM_SEND_RCB;
++
++ if (tg3_flag(tp, ENABLE_TSS))
++ i++;
++
++ for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) {
++ struct tg3_napi *tnapi = &tp->napi[i];
++
++ if (!tnapi->tx_ring)
++ continue;
++
++ tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
++ (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT),
++ NIC_SRAM_TX_BUFFER_DESC);
++ }
++}
++
++/* tp->lock is held. */
++static void tg3_rx_ret_rcbs_disable(struct tg3 *tp)
++{
++ u32 rxrcb, limit;
++
++ /* Disable all receive return rings but the first. */
++ if (tg3_flag(tp, 5717_PLUS))
++ limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
++ else if (!tg3_flag(tp, 5705_PLUS))
++ limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
++ else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
++ tg3_asic_rev(tp) == ASIC_REV_5762 ||
++ tg3_flag(tp, 57765_CLASS))
++ limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
++ else
++ limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
++
++ for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
++ rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
++ tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
++ BDINFO_FLAGS_DISABLED);
++}
++
++/* tp->lock is held. */
++static void tg3_rx_ret_rcbs_init(struct tg3 *tp)
++{
++ int i = 0;
++ u32 rxrcb = NIC_SRAM_RCV_RET_RCB;
++
++ if (tg3_flag(tp, ENABLE_RSS))
++ i++;
++
++ for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) {
++ struct tg3_napi *tnapi = &tp->napi[i];
++
++ if (!tnapi->rx_rcb)
++ continue;
++
++ tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
++ (tp->rx_ret_ring_mask + 1) <<
++ BDINFO_FLAGS_MAXLEN_SHIFT, 0);
++ }
++}
++
++/* tp->lock is held. */
++static void tg3_rings_reset(struct tg3 *tp)
++{
++ int i;
++ u32 stblk;
++ struct tg3_napi *tnapi = &tp->napi[0];
++
++ tg3_tx_rcbs_disable(tp);
++
++ tg3_rx_ret_rcbs_disable(tp);
++
++ /* Disable interrupts */
++ tw32_mailbox_f(tp->napi[0].int_mbox, 1);
++ tp->napi[0].chk_msi_cnt = 0;
++ tp->napi[0].last_rx_cons = 0;
++ tp->napi[0].last_tx_cons = 0;
++
++ /* Zero mailbox registers. */
++ if (tg3_flag(tp, SUPPORT_MSIX)) {
++ for (i = 1; i < tp->irq_max; i++) {
++ tp->napi[i].tx_prod = 0;
++ tp->napi[i].tx_cons = 0;
++ if (tg3_flag(tp, ENABLE_TSS))
++ tw32_mailbox(tp->napi[i].prodmbox, 0);
++ tw32_rx_mbox(tp->napi[i].consmbox, 0);
++ tw32_mailbox_f(tp->napi[i].int_mbox, 1);
++ tp->napi[i].chk_msi_cnt = 0;
++ tp->napi[i].last_rx_cons = 0;
++ tp->napi[i].last_tx_cons = 0;
++ }
++ if (!tg3_flag(tp, ENABLE_TSS))
++ tw32_mailbox(tp->napi[0].prodmbox, 0);
++ } else {
++ tp->napi[0].tx_prod = 0;
++ tp->napi[0].tx_cons = 0;
++ tw32_mailbox(tp->napi[0].prodmbox, 0);
++ tw32_rx_mbox(tp->napi[0].consmbox, 0);
++ }
++
++ /* Make sure the NIC-based send BD rings are disabled. */
++ if (!tg3_flag(tp, 5705_PLUS)) {
++ u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
++ for (i = 0; i < 16; i++)
++ tw32_tx_mbox(mbox + i * 8, 0);
++ }
++
++ /* Clear status block in ram. */
++ memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
++
++ /* Set status block DMA address */
++ tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
++ ((u64) tnapi->status_mapping >> 32));
++ tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
++ ((u64) tnapi->status_mapping & 0xffffffff));
++
++ stblk = HOSTCC_STATBLCK_RING1;
++
++ for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
++ u64 mapping = (u64)tnapi->status_mapping;
++ tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
++ tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
++ stblk += 8;
++
++ /* Clear status block in ram. */
++ memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
++ }
++
++ tg3_tx_rcbs_init(tp);
++ tg3_rx_ret_rcbs_init(tp);
++}
++
++static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
++{
++ u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
++
++ if (!tg3_flag(tp, 5750_PLUS) ||
++ tg3_flag(tp, 5780_CLASS) ||
++ tg3_asic_rev(tp) == ASIC_REV_5750 ||
++ tg3_asic_rev(tp) == ASIC_REV_5752 ||
++ tg3_flag(tp, 57765_PLUS))
++ bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
++ else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
++ tg3_asic_rev(tp) == ASIC_REV_5787)
++ bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
++ else
++ bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
++
++ nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
++ host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
++
++ val = min(nic_rep_thresh, host_rep_thresh);
++ tw32(RCVBDI_STD_THRESH, val);
++
++ if (tg3_flag(tp, 57765_PLUS))
++ tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
++
++ if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
++ return;
++
++ bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
++
++ host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
++
++ val = min(bdcache_maxcnt / 2, host_rep_thresh);
++ tw32(RCVBDI_JUMBO_THRESH, val);
++
++ if (tg3_flag(tp, 57765_PLUS))
++ tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
++}
++
++static inline u32 calc_crc(unsigned char *buf, int len)
++{
++ u32 reg;
++ u32 tmp;
++ int j, k;
++
++ reg = 0xffffffff;
++
++ for (j = 0; j < len; j++) {
++ reg ^= buf[j];
++
++ for (k = 0; k < 8; k++) {
++ tmp = reg & 0x01;
++
++ reg >>= 1;
++
++ if (tmp)
++ reg ^= 0xedb88320;
++ }
++ }
++
++ return ~reg;
++}
++
++static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
++{
++ /* accept or reject all multicast frames */
++ tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
++ tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
++ tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
++ tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
++}
++
++static void __tg3_set_rx_mode(struct net_device *dev)
++{
++ struct tg3 *tp = netdev_priv(dev);
++ u32 rx_mode;
++
++ rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
++ RX_MODE_KEEP_VLAN_TAG);
++
++#if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
++ /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
++ * flag clear.
++ */
++ if (!tg3_flag(tp, ENABLE_ASF))
++ rx_mode |= RX_MODE_KEEP_VLAN_TAG;
++#endif
++
++ if (dev->flags & IFF_PROMISC) {
++ /* Promiscuous mode. */
++ rx_mode |= RX_MODE_PROMISC;
++ } else if (dev->flags & IFF_ALLMULTI) {
++ /* Accept all multicast. */
++ tg3_set_multi(tp, 1);
++ } else if (netdev_mc_empty(dev)) {
++ /* Reject all multicast. */
++ tg3_set_multi(tp, 0);
++ } else {
++ /* Accept one or more multicast(s). */
++ struct netdev_hw_addr *ha;
++ u32 mc_filter[4] = { 0, };
++ u32 regidx;
++ u32 bit;
++ u32 crc;
++
++ netdev_for_each_mc_addr(ha, dev) {
++ crc = calc_crc(ha->addr, ETH_ALEN);
++ bit = ~crc & 0x7f;
++ regidx = (bit & 0x60) >> 5;
++ bit &= 0x1f;
++ mc_filter[regidx] |= (1 << bit);
++ }
++
++ tw32(MAC_HASH_REG_0, mc_filter[0]);
++ tw32(MAC_HASH_REG_1, mc_filter[1]);
++ tw32(MAC_HASH_REG_2, mc_filter[2]);
++ tw32(MAC_HASH_REG_3, mc_filter[3]);
++ }
++
++ if (netdev_uc_count(dev) > TG3_MAX_UCAST_ADDR(tp)) {
++ rx_mode |= RX_MODE_PROMISC;
++ } else if (!(dev->flags & IFF_PROMISC)) {
++ /* Add all entries into to the mac addr filter list */
++ int i = 0;
++ struct netdev_hw_addr *ha;
++
++ netdev_for_each_uc_addr(ha, dev) {
++ __tg3_set_one_mac_addr(tp, ha->addr,
++ i + TG3_UCAST_ADDR_IDX(tp));
++ i++;
++ }
++ }
++
++ if (rx_mode != tp->rx_mode) {
++ tp->rx_mode = rx_mode;
++ tw32_f(MAC_RX_MODE, rx_mode);
++ udelay(10);
++ }
++}
++
++static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
++{
++ int i;
++
++ for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
++ tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
++}
++
++static void tg3_rss_check_indir_tbl(struct tg3 *tp)
++{
++ int i;
++
++ if (!tg3_flag(tp, SUPPORT_MSIX))
++ return;
++
++ if (tp->rxq_cnt == 1) {
++ memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
++ return;
++ }
++
++ /* Validate table against current IRQ count */
++ for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
++ if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
++ break;
++ }
++
++ if (i != TG3_RSS_INDIR_TBL_SIZE)
++ tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
++}
++
++static void tg3_rss_write_indir_tbl(struct tg3 *tp)
++{
++ int i = 0;
++ u32 reg = MAC_RSS_INDIR_TBL_0;
++
++ while (i < TG3_RSS_INDIR_TBL_SIZE) {
++ u32 val = tp->rss_ind_tbl[i];
++ i++;
++ for (; i % 8; i++) {
++ val <<= 4;
++ val |= tp->rss_ind_tbl[i];
++ }
++ tw32(reg, val);
++ reg += 4;
++ }
++}
++
++static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp)
++{
++ if (tg3_asic_rev(tp) == ASIC_REV_5719)
++ return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719;
++ else
++ return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720;
++}
++
++/* tp->lock is held. */
++static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
++{
++ u32 val, rdmac_mode;
++ int i, err, limit;
++ struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
++
++ tg3_disable_ints(tp);
++
++ tg3_stop_fw(tp);
++
++ tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
++
++ if (tg3_flag(tp, INIT_COMPLETE))
++ tg3_abort_hw(tp, 1);
++
++ if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
++ !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
++ tg3_phy_pull_config(tp);
++ tg3_eee_pull_config(tp, NULL);
++ tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
++ }
++
++ /* Enable MAC control of LPI */
++ if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
++ tg3_setup_eee(tp);
++
++ if (reset_phy)
++ tg3_phy_reset(tp);
++
++ err = tg3_chip_reset(tp);
++ if (err)
++ return err;
++
++ tg3_write_sig_legacy(tp, RESET_KIND_INIT);
++
++ if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
++ val = tr32(TG3_CPMU_CTRL);
++ val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
++ tw32(TG3_CPMU_CTRL, val);
++
++ val = tr32(TG3_CPMU_LSPD_10MB_CLK);
++ val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
++ val |= CPMU_LSPD_10MB_MACCLK_6_25;
++ tw32(TG3_CPMU_LSPD_10MB_CLK, val);
++
++ val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
++ val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
++ val |= CPMU_LNK_AWARE_MACCLK_6_25;
++ tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
++
++ val = tr32(TG3_CPMU_HST_ACC);
++ val &= ~CPMU_HST_ACC_MACCLK_MASK;
++ val |= CPMU_HST_ACC_MACCLK_6_25;
++ tw32(TG3_CPMU_HST_ACC, val);
++ }
++
++ if (tg3_asic_rev(tp) == ASIC_REV_57780) {
++ val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
++ val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
++ PCIE_PWR_MGMT_L1_THRESH_4MS;
++ tw32(PCIE_PWR_MGMT_THRESH, val);
++
++ val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
++ tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
++
++ tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
++
++ val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
++ tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
++ }
++
++ if (tg3_flag(tp, L1PLLPD_EN)) {
++ u32 grc_mode = tr32(GRC_MODE);
++
++ /* Access the lower 1K of PL PCIE block registers. */
++ val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
++ tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
++
++ val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
++ tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
++ val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
++
++ tw32(GRC_MODE, grc_mode);
++ }
++
++ if (tg3_flag(tp, 57765_CLASS)) {
++ if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
++ u32 grc_mode = tr32(GRC_MODE);
++
++ /* Access the lower 1K of PL PCIE block registers. */
++ val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
++ tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
++
++ val = tr32(TG3_PCIE_TLDLPL_PORT +
++ TG3_PCIE_PL_LO_PHYCTL5);
++ tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
++ val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
++
++ tw32(GRC_MODE, grc_mode);
++ }
++
++ if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
++ u32 grc_mode;
++
++ /* Fix transmit hangs */
++ val = tr32(TG3_CPMU_PADRNG_CTL);
++ val |= TG3_CPMU_PADRNG_CTL_RDIV2;
++ tw32(TG3_CPMU_PADRNG_CTL, val);
++
++ grc_mode = tr32(GRC_MODE);
++
++ /* Access the lower 1K of DL PCIE block registers. */
++ val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
++ tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
++
++ val = tr32(TG3_PCIE_TLDLPL_PORT +
++ TG3_PCIE_DL_LO_FTSMAX);
++ val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
++ tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
++ val | TG3_PCIE_DL_LO_FTSMAX_VAL);
++
++ tw32(GRC_MODE, grc_mode);
++ }
++
++ val = tr32(TG3_CPMU_LSPD_10MB_CLK);
++ val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
++ val |= CPMU_LSPD_10MB_MACCLK_6_25;
++ tw32(TG3_CPMU_LSPD_10MB_CLK, val);
++ }
++
++ /* This works around an issue with Athlon chipsets on
++ * B3 tigon3 silicon. This bit has no effect on any
++ * other revision. But do not set this on PCI Express
++ * chips and don't even touch the clocks if the CPMU is present.
++ */
++ if (!tg3_flag(tp, CPMU_PRESENT)) {
++ if (!tg3_flag(tp, PCI_EXPRESS))
++ tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
++ tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
++ }
++
++ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
++ tg3_flag(tp, PCIX_MODE)) {
++ val = tr32(TG3PCI_PCISTATE);
++ val |= PCISTATE_RETRY_SAME_DMA;
++ tw32(TG3PCI_PCISTATE, val);
++ }
++
++ if (tg3_flag(tp, ENABLE_APE)) {
++ /* Allow reads and writes to the
++ * APE register and memory space.
++ */
++ val = tr32(TG3PCI_PCISTATE);
++ val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
++ PCISTATE_ALLOW_APE_SHMEM_WR |
++ PCISTATE_ALLOW_APE_PSPACE_WR;
++ tw32(TG3PCI_PCISTATE, val);
++ }
++
++ if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
++ /* Enable some hw fixes. */
++ val = tr32(TG3PCI_MSI_DATA);
++ val |= (1 << 26) | (1 << 28) | (1 << 29);
++ tw32(TG3PCI_MSI_DATA, val);
++ }
++
++ /* Descriptor ring init may make accesses to the
++ * NIC SRAM area to setup the TX descriptors, so we
++ * can only do this after the hardware has been
++ * successfully reset.
++ */
++ err = tg3_init_rings(tp);
++ if (err)
++ return err;
++
++ if (tg3_flag(tp, 57765_PLUS)) {
++ val = tr32(TG3PCI_DMA_RW_CTRL) &
++ ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
++ if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
++ val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
++ if (!tg3_flag(tp, 57765_CLASS) &&
++ tg3_asic_rev(tp) != ASIC_REV_5717 &&
++ tg3_asic_rev(tp) != ASIC_REV_5762)
++ val |= DMA_RWCTRL_TAGGED_STAT_WA;
++ tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
++ } else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
++ tg3_asic_rev(tp) != ASIC_REV_5761) {
++ /* This value is determined during the probe time DMA
++ * engine test, tg3_test_dma.
++ */
++ tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
++ }
++
++ tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
++ GRC_MODE_4X_NIC_SEND_RINGS |
++ GRC_MODE_NO_TX_PHDR_CSUM |
++ GRC_MODE_NO_RX_PHDR_CSUM);
++ tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
++
++ /* Pseudo-header checksum is done by hardware logic and not
++ * the offload processers, so make the chip do the pseudo-
++ * header checksums on receive. For transmit it is more
++ * convenient to do the pseudo-header checksum in software
++ * as Linux does that on transmit for us in all cases.
++ */
++ tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
++
++ val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
++ if (tp->rxptpctl)
++ tw32(TG3_RX_PTP_CTL,
++ tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
++
++ if (tg3_flag(tp, PTP_CAPABLE))
++ val |= GRC_MODE_TIME_SYNC_ENABLE;
++
++ tw32(GRC_MODE, tp->grc_mode | val);
++
++ /* Setup the timer prescalar register. Clock is always 66Mhz. */
++ val = tr32(GRC_MISC_CFG);
++ val &= ~0xff;
++ val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
++ tw32(GRC_MISC_CFG, val);
++
++ /* Initialize MBUF/DESC pool. */
++ if (tg3_flag(tp, 5750_PLUS)) {
++ /* Do nothing. */
++ } else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
++ tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
++ if (tg3_asic_rev(tp) == ASIC_REV_5704)
++ tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
++ else
++ tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
++ tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
++ tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
++ } else if (tg3_flag(tp, TSO_CAPABLE)) {
++ int fw_len;
++
++ fw_len = tp->fw_len;
++ fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
++ tw32(BUFMGR_MB_POOL_ADDR,
++ NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
++ tw32(BUFMGR_MB_POOL_SIZE,
++ NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
++ }
++
++ if (tp->dev->mtu <= ETH_DATA_LEN) {
++ tw32(BUFMGR_MB_RDMA_LOW_WATER,
++ tp->bufmgr_config.mbuf_read_dma_low_water);
++ tw32(BUFMGR_MB_MACRX_LOW_WATER,
++ tp->bufmgr_config.mbuf_mac_rx_low_water);
++ tw32(BUFMGR_MB_HIGH_WATER,
++ tp->bufmgr_config.mbuf_high_water);
++ } else {
++ tw32(BUFMGR_MB_RDMA_LOW_WATER,
++ tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
++ tw32(BUFMGR_MB_MACRX_LOW_WATER,
++ tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
++ tw32(BUFMGR_MB_HIGH_WATER,
++ tp->bufmgr_config.mbuf_high_water_jumbo);
++ }
++ tw32(BUFMGR_DMA_LOW_WATER,
++ tp->bufmgr_config.dma_low_water);
++ tw32(BUFMGR_DMA_HIGH_WATER,
++ tp->bufmgr_config.dma_high_water);
++
++ val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
++ if (tg3_asic_rev(tp) == ASIC_REV_5719)
++ val |= BUFMGR_MODE_NO_TX_UNDERRUN;
++ if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
++ tg3_asic_rev(tp) == ASIC_REV_5762 ||
++ tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
++ tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
++ val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
++ tw32(BUFMGR_MODE, val);
++ for (i = 0; i < 2000; i++) {
++ if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
++ break;
++ udelay(10);
++ }
++ if (i >= 2000) {
++ netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
++ return -ENODEV;
++ }
++
++ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
++ tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
++
++ tg3_setup_rxbd_thresholds(tp);
++
++ /* Initialize TG3_BDINFO's at:
++ * RCVDBDI_STD_BD: standard eth size rx ring
++ * RCVDBDI_JUMBO_BD: jumbo frame rx ring
++ * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
++ *
++ * like so:
++ * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
++ * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
++ * ring attribute flags
++ * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
++ *
++ * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
++ * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
++ *
++ * The size of each ring is fixed in the firmware, but the location is
++ * configurable.
++ */
++ tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
++ ((u64) tpr->rx_std_mapping >> 32));
++ tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
++ ((u64) tpr->rx_std_mapping & 0xffffffff));
++ if (!tg3_flag(tp, 5717_PLUS))
++ tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
++ NIC_SRAM_RX_BUFFER_DESC);
++
++ /* Disable the mini ring */
++ if (!tg3_flag(tp, 5705_PLUS))
++ tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
++ BDINFO_FLAGS_DISABLED);
++
++ /* Program the jumbo buffer descriptor ring control
++ * blocks on those devices that have them.
++ */
++ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
++ (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
++
++ if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
++ tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
++ ((u64) tpr->rx_jmb_mapping >> 32));
++ tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
++ ((u64) tpr->rx_jmb_mapping & 0xffffffff));
++ val = TG3_RX_JMB_RING_SIZE(tp) <<
++ BDINFO_FLAGS_MAXLEN_SHIFT;
++ tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
++ val | BDINFO_FLAGS_USE_EXT_RECV);
++ if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
++ tg3_flag(tp, 57765_CLASS) ||
++ tg3_asic_rev(tp) == ASIC_REV_5762)
++ tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
++ NIC_SRAM_RX_JUMBO_BUFFER_DESC);
++ } else {
++ tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
++ BDINFO_FLAGS_DISABLED);
++ }
++
++ if (tg3_flag(tp, 57765_PLUS)) {
++ val = TG3_RX_STD_RING_SIZE(tp);
++ val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
++ val |= (TG3_RX_STD_DMA_SZ << 2);
++ } else
++ val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
++ } else
++ val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
++
++ tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
++
++ tpr->rx_std_prod_idx = tp->rx_pending;
++ tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
++
++ tpr->rx_jmb_prod_idx =
++ tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
++ tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
++
++ tg3_rings_reset(tp);
++
++ /* Initialize MAC address and backoff seed. */
++ __tg3_set_mac_addr(tp, false);
++
++ /* MTU + ethernet header + FCS + optional VLAN tag */
++ tw32(MAC_RX_MTU_SIZE,
++ tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
++
++ /* The slot time is changed by tg3_setup_phy if we
++ * run at gigabit with half duplex.
++ */
++ val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
++ (6 << TX_LENGTHS_IPG_SHIFT) |
++ (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
++
++ if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
++ tg3_asic_rev(tp) == ASIC_REV_5762)
++ val |= tr32(MAC_TX_LENGTHS) &
++ (TX_LENGTHS_JMB_FRM_LEN_MSK |
++ TX_LENGTHS_CNT_DWN_VAL_MSK);
++
++ tw32(MAC_TX_LENGTHS, val);
++
++ /* Receive rules. */
++ tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
++ tw32(RCVLPC_CONFIG, 0x0181);
++
++ /* Calculate RDMAC_MODE setting early, we need it to determine
++ * the RCVLPC_STATE_ENABLE mask.
++ */
++ rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
++ RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
++ RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
++ RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
++ RDMAC_MODE_LNGREAD_ENAB);
++
++ if (tg3_asic_rev(tp) == ASIC_REV_5717)
++ rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
++
++ if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
++ tg3_asic_rev(tp) == ASIC_REV_5785 ||
++ tg3_asic_rev(tp) == ASIC_REV_57780)
++ rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
++ RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
++ RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
++
++ if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
++ tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
++ if (tg3_flag(tp, TSO_CAPABLE) &&
++ tg3_asic_rev(tp) == ASIC_REV_5705) {
++ rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
++ } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
++ !tg3_flag(tp, IS_5788)) {
++ rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
++ }
++ }
++
++ if (tg3_flag(tp, PCI_EXPRESS))
++ rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
++
++ if (tg3_asic_rev(tp) == ASIC_REV_57766) {
++ tp->dma_limit = 0;
++ if (tp->dev->mtu <= ETH_DATA_LEN) {
++ rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
++ tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
++ }
++ }
++
++ if (tg3_flag(tp, HW_TSO_1) ||
++ tg3_flag(tp, HW_TSO_2) ||
++ tg3_flag(tp, HW_TSO_3))
++ rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
++
++ if (tg3_flag(tp, 57765_PLUS) ||
++ tg3_asic_rev(tp) == ASIC_REV_5785 ||
++ tg3_asic_rev(tp) == ASIC_REV_57780)
++ rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
++
++ if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
++ tg3_asic_rev(tp) == ASIC_REV_5762)
++ rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
++
++ if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
++ tg3_asic_rev(tp) == ASIC_REV_5784 ||
++ tg3_asic_rev(tp) == ASIC_REV_5785 ||
++ tg3_asic_rev(tp) == ASIC_REV_57780 ||
++ tg3_flag(tp, 57765_PLUS)) {
++ u32 tgtreg;
++
++ if (tg3_asic_rev(tp) == ASIC_REV_5762)
++ tgtreg = TG3_RDMA_RSRVCTRL_REG2;
++ else
++ tgtreg = TG3_RDMA_RSRVCTRL_REG;
++
++ val = tr32(tgtreg);
++ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
++ tg3_asic_rev(tp) == ASIC_REV_5762) {
++ val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
++ TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
++ TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
++ val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
++ TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
++ TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
++ }
++ tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
++ }
++
++ if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
++ tg3_asic_rev(tp) == ASIC_REV_5720 ||
++ tg3_asic_rev(tp) == ASIC_REV_5762) {
++ u32 tgtreg;
++
++ if (tg3_asic_rev(tp) == ASIC_REV_5762)
++ tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
++ else
++ tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
++
++ val = tr32(tgtreg);
++ tw32(tgtreg, val |
++ TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
++ TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
++ }
++
++ /* Receive/send statistics. */
++ if (tg3_flag(tp, 5750_PLUS)) {
++ val = tr32(RCVLPC_STATS_ENABLE);
++ val &= ~RCVLPC_STATSENAB_DACK_FIX;
++ tw32(RCVLPC_STATS_ENABLE, val);
++ } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
++ tg3_flag(tp, TSO_CAPABLE)) {
++ val = tr32(RCVLPC_STATS_ENABLE);
++ val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
++ tw32(RCVLPC_STATS_ENABLE, val);
++ } else {
++ tw32(RCVLPC_STATS_ENABLE, 0xffffff);
++ }
++ tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
++ tw32(SNDDATAI_STATSENAB, 0xffffff);
++ tw32(SNDDATAI_STATSCTRL,
++ (SNDDATAI_SCTRL_ENABLE |
++ SNDDATAI_SCTRL_FASTUPD));
++
++ /* Setup host coalescing engine. */
++ tw32(HOSTCC_MODE, 0);
++ for (i = 0; i < 2000; i++) {
++ if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
++ break;
++ udelay(10);
++ }
++
++ __tg3_set_coalesce(tp, &tp->coal);
++
++ if (!tg3_flag(tp, 5705_PLUS)) {
++ /* Status/statistics block address. See tg3_timer,
++ * the tg3_periodic_fetch_stats call there, and
++ * tg3_get_stats to see how this works for 5705/5750 chips.
++ */
++ tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
++ ((u64) tp->stats_mapping >> 32));
++ tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
++ ((u64) tp->stats_mapping & 0xffffffff));
++ tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
++
++ tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
++
++ /* Clear statistics and status block memory areas */
++ for (i = NIC_SRAM_STATS_BLK;
++ i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
++ i += sizeof(u32)) {
++ tg3_write_mem(tp, i, 0);
++ udelay(40);
++ }
++ }
++
++ tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
++
++ tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
++ tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
++ if (!tg3_flag(tp, 5705_PLUS))
++ tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
++
++ if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
++ tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
++ /* reset to prevent losing 1st rx packet intermittently */
++ tw32_f(MAC_RX_MODE, RX_MODE_RESET);
++ udelay(10);
++ }
++
++ tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
++ MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
++ MAC_MODE_FHDE_ENABLE;
++ if (tg3_flag(tp, ENABLE_APE))
++ tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
++ if (!tg3_flag(tp, 5705_PLUS) &&
++ !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
++ tg3_asic_rev(tp) != ASIC_REV_5700)
++ tp->mac_mode |= MAC_MODE_LINK_POLARITY;
++ tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
++ udelay(40);
++
++ /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
++ * If TG3_FLAG_IS_NIC is zero, we should read the
++ * register to preserve the GPIO settings for LOMs. The GPIOs,
++ * whether used as inputs or outputs, are set by boot code after
++ * reset.
++ */
++ if (!tg3_flag(tp, IS_NIC)) {
++ u32 gpio_mask;
++
++ gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
++ GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
++ GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
++
++ if (tg3_asic_rev(tp) == ASIC_REV_5752)
++ gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
++ GRC_LCLCTRL_GPIO_OUTPUT3;
++
++ if (tg3_asic_rev(tp) == ASIC_REV_5755)
++ gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
++
++ tp->grc_local_ctrl &= ~gpio_mask;
++ tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
++
++ /* GPIO1 must be driven high for eeprom write protect */
++ if (tg3_flag(tp, EEPROM_WRITE_PROT))
++ tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
++ GRC_LCLCTRL_GPIO_OUTPUT1);
++ }
++ tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
++ udelay(100);
++
++ if (tg3_flag(tp, USING_MSIX)) {
++ val = tr32(MSGINT_MODE);
++ val |= MSGINT_MODE_ENABLE;
++ if (tp->irq_cnt > 1)
++ val |= MSGINT_MODE_MULTIVEC_EN;
++ if (!tg3_flag(tp, 1SHOT_MSI))
++ val |= MSGINT_MODE_ONE_SHOT_DISABLE;
++ tw32(MSGINT_MODE, val);
++ }
++
++ if (!tg3_flag(tp, 5705_PLUS)) {
++ tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
++ udelay(40);
++ }
++
++ val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
++ WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
++ WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
++ WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
++ WDMAC_MODE_LNGREAD_ENAB);
++
++ if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
++ tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
++ if (tg3_flag(tp, TSO_CAPABLE) &&
++ (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
++ tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
++ /* nothing */
++ } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
++ !tg3_flag(tp, IS_5788)) {
++ val |= WDMAC_MODE_RX_ACCEL;
++ }
++ }
++
++ /* Enable host coalescing bug fix */
++ if (tg3_flag(tp, 5755_PLUS))
++ val |= WDMAC_MODE_STATUS_TAG_FIX;
++
++ if (tg3_asic_rev(tp) == ASIC_REV_5785)
++ val |= WDMAC_MODE_BURST_ALL_DATA;
++
++ tw32_f(WDMAC_MODE, val);
++ udelay(40);
++
++ if (tg3_flag(tp, PCIX_MODE)) {
++ u16 pcix_cmd;
++
++ pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
++ &pcix_cmd);
++ if (tg3_asic_rev(tp) == ASIC_REV_5703) {
++ pcix_cmd &= ~PCI_X_CMD_MAX_READ;
++ pcix_cmd |= PCI_X_CMD_READ_2K;
++ } else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
++ pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
++ pcix_cmd |= PCI_X_CMD_READ_2K;
++ }
++ pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
++ pcix_cmd);
++ }
++
++ tw32_f(RDMAC_MODE, rdmac_mode);
++ udelay(40);
++
++ if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
++ tg3_asic_rev(tp) == ASIC_REV_5720) {
++ for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
++ if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
++ break;
++ }
++ if (i < TG3_NUM_RDMA_CHANNELS) {
++ val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
++ val |= tg3_lso_rd_dma_workaround_bit(tp);
++ tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
++ tg3_flag_set(tp, 5719_5720_RDMA_BUG);
++ }
++ }
++
++ tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
++ if (!tg3_flag(tp, 5705_PLUS))
++ tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
++
++ if (tg3_asic_rev(tp) == ASIC_REV_5761)
++ tw32(SNDDATAC_MODE,
++ SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
++ else
++ tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
++
++ tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
++ tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
++ val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
++ if (tg3_flag(tp, LRG_PROD_RING_CAP))
++ val |= RCVDBDI_MODE_LRG_RING_SZ;
++ tw32(RCVDBDI_MODE, val);
++ tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
++ if (tg3_flag(tp, HW_TSO_1) ||
++ tg3_flag(tp, HW_TSO_2) ||
++ tg3_flag(tp, HW_TSO_3))
++ tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
++ val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
++ if (tg3_flag(tp, ENABLE_TSS))
++ val |= SNDBDI_MODE_MULTI_TXQ_EN;
++ tw32(SNDBDI_MODE, val);
++ tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
++
++ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
++ err = tg3_load_5701_a0_firmware_fix(tp);
++ if (err)
++ return err;
++ }
++
++ if (tg3_asic_rev(tp) == ASIC_REV_57766) {
++ /* Ignore any errors for the firmware download. If download
++ * fails, the device will operate with EEE disabled
++ */
++ tg3_load_57766_firmware(tp);
++ }
++
++ if (tg3_flag(tp, TSO_CAPABLE)) {
++ err = tg3_load_tso_firmware(tp);
++ if (err)
++ return err;
++ }
++
++ tp->tx_mode = TX_MODE_ENABLE;
++
++ if (tg3_flag(tp, 5755_PLUS) ||
++ tg3_asic_rev(tp) == ASIC_REV_5906)
++ tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
++
++ if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
++ tg3_asic_rev(tp) == ASIC_REV_5762) {
++ val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
++ tp->tx_mode &= ~val;
++ tp->tx_mode |= tr32(MAC_TX_MODE) & val;
++ }
++
++ tw32_f(MAC_TX_MODE, tp->tx_mode);
++ udelay(100);
++
++ if (tg3_flag(tp, ENABLE_RSS)) {
++ tg3_rss_write_indir_tbl(tp);
++
++ /* Setup the "secret" hash key. */
++ tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
++ tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
++ tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
++ tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
++ tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
++ tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
++ tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
++ tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
++ tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
++ tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
++ }
++
++ tp->rx_mode = RX_MODE_ENABLE;
++ if (tg3_flag(tp, 5755_PLUS))
++ tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
++
++ if (tg3_asic_rev(tp) == ASIC_REV_5762)
++ tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX;
++
++ if (tg3_flag(tp, ENABLE_RSS))
++ tp->rx_mode |= RX_MODE_RSS_ENABLE |
++ RX_MODE_RSS_ITBL_HASH_BITS_7 |
++ RX_MODE_RSS_IPV6_HASH_EN |
++ RX_MODE_RSS_TCP_IPV6_HASH_EN |
++ RX_MODE_RSS_IPV4_HASH_EN |
++ RX_MODE_RSS_TCP_IPV4_HASH_EN;
++
++ tw32_f(MAC_RX_MODE, tp->rx_mode);
++ udelay(10);
++
++ tw32(MAC_LED_CTRL, tp->led_ctrl);
++
++ tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
++ if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
++ tw32_f(MAC_RX_MODE, RX_MODE_RESET);
++ udelay(10);
++ }
++ tw32_f(MAC_RX_MODE, tp->rx_mode);
++ udelay(10);
++
++ if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
++ if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
++ !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
++ /* Set drive transmission level to 1.2V */
++ /* only if the signal pre-emphasis bit is not set */
++ val = tr32(MAC_SERDES_CFG);
++ val &= 0xfffff000;
++ val |= 0x880;
++ tw32(MAC_SERDES_CFG, val);
++ }
++ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
++ tw32(MAC_SERDES_CFG, 0x616000);
++ }
++
++ /* Prevent chip from dropping frames when flow control
++ * is enabled.
++ */
++ if (tg3_flag(tp, 57765_CLASS))
++ val = 1;
++ else
++ val = 2;
++ tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
++
++ if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
++ (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
++ /* Use hardware link auto-negotiation */
++ tg3_flag_set(tp, HW_AUTONEG);
++ }
++
++ if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
++ tg3_asic_rev(tp) == ASIC_REV_5714) {
++ u32 tmp;
++
++ tmp = tr32(SERDES_RX_CTRL);
++ tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
++ tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
++ tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
++ tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
++ }
++
++ if (!tg3_flag(tp, USE_PHYLIB)) {
++ if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
++ tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
++
++ err = tg3_setup_phy(tp, false);
++ if (err)
++ return err;
++
++ if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
++ !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
++ u32 tmp;
++
++ /* Clear CRC stats. */
++ if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
++ tg3_writephy(tp, MII_TG3_TEST1,
++ tmp | MII_TG3_TEST1_CRC_EN);
++ tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
++ }
++ }
++ }
++
++ __tg3_set_rx_mode(tp->dev);
++
++ /* Initialize receive rules. */
++ tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
++ tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
++ tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
++ tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
++
++ if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
++ limit = 8;
++ else
++ limit = 16;
++ if (tg3_flag(tp, ENABLE_ASF))
++ limit -= 4;
++ switch (limit) {
++ case 16:
++ tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
++ case 15:
++ tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
++ case 14:
++ tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
++ case 13:
++ tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
++ case 12:
++ tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
++ case 11:
++ tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
++ case 10:
++ tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
++ case 9:
++ tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
++ case 8:
++ tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
++ case 7:
++ tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
++ case 6:
++ tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
++ case 5:
++ tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
++ case 4:
++ /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
++ case 3:
++ /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
++ case 2:
++ case 1:
++
++ default:
++ break;
++ }
++
++ if (tg3_flag(tp, ENABLE_APE))
++ /* Write our heartbeat update interval to APE. */
++ tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
++ APE_HOST_HEARTBEAT_INT_DISABLE);
++
++ tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
++
++ return 0;
++}
++
++/* Called at device open time to get the chip ready for
++ * packet processing. Invoked with tp->lock held.
++ */
++static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
++{
++ /* Chip may have been just powered on. If so, the boot code may still
++ * be running initialization. Wait for it to finish to avoid races in
++ * accessing the hardware.
++ */
++ tg3_enable_register_access(tp);
++ tg3_poll_fw(tp);
++
++ tg3_switch_clocks(tp);
++
++ tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
++
++ return tg3_reset_hw(tp, reset_phy);
++}
++
++static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
++{
++ int i;
++
++ for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
++ u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
++
++ tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
++ off += len;
++
++ if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
++ !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
++ memset(ocir, 0, TG3_OCIR_LEN);
++ }
++}
++
++/* sysfs attributes for hwmon */
++static ssize_t tg3_show_temp(struct device *dev,
++ struct device_attribute *devattr, char *buf)
++{
++ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
++ struct tg3 *tp = dev_get_drvdata(dev);
++ u32 temperature;
++
++ spin_lock_bh(&tp->lock);
++ tg3_ape_scratchpad_read(tp, &temperature, attr->index,
++ sizeof(temperature));
++ spin_unlock_bh(&tp->lock);
++ return sprintf(buf, "%u\n", temperature);
++}
++
++
++static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
++ TG3_TEMP_SENSOR_OFFSET);
++static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
++ TG3_TEMP_CAUTION_OFFSET);
++static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
++ TG3_TEMP_MAX_OFFSET);
++
++static struct attribute *tg3_attrs[] = {
++ &sensor_dev_attr_temp1_input.dev_attr.attr,
++ &sensor_dev_attr_temp1_crit.dev_attr.attr,
++ &sensor_dev_attr_temp1_max.dev_attr.attr,
++ NULL
++};
++ATTRIBUTE_GROUPS(tg3);
++
++static void tg3_hwmon_close(struct tg3 *tp)
++{
++ if (tp->hwmon_dev) {
++ hwmon_device_unregister(tp->hwmon_dev);
++ tp->hwmon_dev = NULL;
++ }
++}
++
++static void tg3_hwmon_open(struct tg3 *tp)
++{
++ int i;
++ u32 size = 0;
++ struct pci_dev *pdev = tp->pdev;
++ struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
++
++ tg3_sd_scan_scratchpad(tp, ocirs);
++
++ for (i = 0; i < TG3_SD_NUM_RECS; i++) {
++ if (!ocirs[i].src_data_length)
++ continue;
++
++ size += ocirs[i].src_hdr_length;
++ size += ocirs[i].src_data_length;
++ }
++
++ if (!size)
++ return;
++
++ tp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, "tg3",
++ tp, tg3_groups);
++ if (IS_ERR(tp->hwmon_dev)) {
++ tp->hwmon_dev = NULL;
++ dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
++ }
++}
++
++
++#define TG3_STAT_ADD32(PSTAT, REG) \
++do { u32 __val = tr32(REG); \
++ (PSTAT)->low += __val; \
++ if ((PSTAT)->low < __val) \
++ (PSTAT)->high += 1; \
++} while (0)
++
++static void tg3_periodic_fetch_stats(struct tg3 *tp)
++{
++ struct tg3_hw_stats *sp = tp->hw_stats;
++
++ if (!tp->link_up)
++ return;
++
++ TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
++ TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
++ TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
++ TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
++ TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
++ TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
++ TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
++ TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
++ TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
++ TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
++ TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
++ TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
++ TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
++ if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) &&
++ (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
++ sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
++ u32 val;
++
++ val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
++ val &= ~tg3_lso_rd_dma_workaround_bit(tp);
++ tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
++ tg3_flag_clear(tp, 5719_5720_RDMA_BUG);
++ }
++
++ TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
++ TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
++ TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
++ TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
++ TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
++ TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
++ TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
++ TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
++ TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
++ TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
++ TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
++ TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
++ TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
++ TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
++
++ TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
++ if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
++ tg3_asic_rev(tp) != ASIC_REV_5762 &&
++ tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
++ tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
++ TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
++ } else {
++ u32 val = tr32(HOSTCC_FLOW_ATTN);
++ val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
++ if (val) {
++ tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
++ sp->rx_discards.low += val;
++ if (sp->rx_discards.low < val)
++ sp->rx_discards.high += 1;
++ }
++ sp->mbuf_lwm_thresh_hit = sp->rx_discards;
++ }
++ TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
++}
++
++static void tg3_chk_missed_msi(struct tg3 *tp)
++{
++ u32 i;
++
++ for (i = 0; i < tp->irq_cnt; i++) {
++ struct tg3_napi *tnapi = &tp->napi[i];
++
++ if (tg3_has_work(tnapi)) {
++ if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
++ tnapi->last_tx_cons == tnapi->tx_cons) {
++ if (tnapi->chk_msi_cnt < 1) {
++ tnapi->chk_msi_cnt++;
++ return;
++ }
++ tg3_msi(0, tnapi);
++ }
++ }
++ tnapi->chk_msi_cnt = 0;
++ tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
++ tnapi->last_tx_cons = tnapi->tx_cons;
++ }
++}
++
++static void tg3_timer(unsigned long __opaque)
++{
++ struct tg3 *tp = (struct tg3 *) __opaque;
++
++ if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
++ goto restart_timer;
++
++ spin_lock(&tp->lock);
++
++ if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
++ tg3_flag(tp, 57765_CLASS))
++ tg3_chk_missed_msi(tp);
++
++ if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
++ /* BCM4785: Flush posted writes from GbE to host memory. */
++ tr32(HOSTCC_MODE);
++ }
++
++ if (!tg3_flag(tp, TAGGED_STATUS)) {
++ /* All of this garbage is because when using non-tagged
++ * IRQ status the mailbox/status_block protocol the chip
++ * uses with the cpu is race prone.
++ */
++ if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
++ tw32(GRC_LOCAL_CTRL,
++ tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
++ } else {
++ tw32(HOSTCC_MODE, tp->coalesce_mode |
++ HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
++ }
++
++ if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
++ spin_unlock(&tp->lock);
++ tg3_reset_task_schedule(tp);
++ goto restart_timer;
++ }
++ }
++
++ /* This part only runs once per second. */
++ if (!--tp->timer_counter) {
++ if (tg3_flag(tp, 5705_PLUS))
++ tg3_periodic_fetch_stats(tp);
++
++ if (tp->setlpicnt && !--tp->setlpicnt)
++ tg3_phy_eee_enable(tp);
++
++ if (tg3_flag(tp, USE_LINKCHG_REG)) {
++ u32 mac_stat;
++ int phy_event;
++
++ mac_stat = tr32(MAC_STATUS);
++
++ phy_event = 0;
++ if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
++ if (mac_stat & MAC_STATUS_MI_INTERRUPT)
++ phy_event = 1;
++ } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
++ phy_event = 1;
++
++ if (phy_event)
++ tg3_setup_phy(tp, false);
++ } else if (tg3_flag(tp, POLL_SERDES)) {
++ u32 mac_stat = tr32(MAC_STATUS);
++ int need_setup = 0;
++
++ if (tp->link_up &&
++ (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
++ need_setup = 1;
++ }
++ if (!tp->link_up &&
++ (mac_stat & (MAC_STATUS_PCS_SYNCED |
++ MAC_STATUS_SIGNAL_DET))) {
++ need_setup = 1;
++ }
++ if (need_setup) {
++ if (!tp->serdes_counter) {
++ tw32_f(MAC_MODE,
++ (tp->mac_mode &
++ ~MAC_MODE_PORT_MODE_MASK));
++ udelay(40);
++ tw32_f(MAC_MODE, tp->mac_mode);
++ udelay(40);
++ }
++ tg3_setup_phy(tp, false);
++ }
++ } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
++ tg3_flag(tp, 5780_CLASS)) {
++ tg3_serdes_parallel_detect(tp);
++ } else if (tg3_flag(tp, POLL_CPMU_LINK)) {
++ u32 cpmu = tr32(TG3_CPMU_STATUS);
++ bool link_up = !((cpmu & TG3_CPMU_STATUS_LINK_MASK) ==
++ TG3_CPMU_STATUS_LINK_MASK);
++
++ if (link_up != tp->link_up)
++ tg3_setup_phy(tp, false);
++ }
++
++ tp->timer_counter = tp->timer_multiplier;
++ }
++
++ /* Heartbeat is only sent once every 2 seconds.
++ *
++ * The heartbeat is to tell the ASF firmware that the host
++ * driver is still alive. In the event that the OS crashes,
++ * ASF needs to reset the hardware to free up the FIFO space
++ * that may be filled with rx packets destined for the host.
++ * If the FIFO is full, ASF will no longer function properly.
++ *
++ * Unintended resets have been reported on real time kernels
++ * where the timer doesn't run on time. Netpoll will also have
++ * same problem.
++ *
++ * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
++ * to check the ring condition when the heartbeat is expiring
++ * before doing the reset. This will prevent most unintended
++ * resets.
++ */
++ if (!--tp->asf_counter) {
++ if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
++ tg3_wait_for_event_ack(tp);
++
++ tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
++ FWCMD_NICDRV_ALIVE3);
++ tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
++ tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
++ TG3_FW_UPDATE_TIMEOUT_SEC);
++
++ tg3_generate_fw_event(tp);
++ }
++ tp->asf_counter = tp->asf_multiplier;
++ }
++
++ spin_unlock(&tp->lock);
++
++restart_timer:
++ tp->timer.expires = jiffies + tp->timer_offset;
++ add_timer(&tp->timer);
++}
++
++static void tg3_timer_init(struct tg3 *tp)
++{
++ if (tg3_flag(tp, TAGGED_STATUS) &&
++ tg3_asic_rev(tp) != ASIC_REV_5717 &&
++ !tg3_flag(tp, 57765_CLASS))
++ tp->timer_offset = HZ;
++ else
++ tp->timer_offset = HZ / 10;
++
++ BUG_ON(tp->timer_offset > HZ);
++
++ tp->timer_multiplier = (HZ / tp->timer_offset);
++ tp->asf_multiplier = (HZ / tp->timer_offset) *
++ TG3_FW_UPDATE_FREQ_SEC;
++
++ init_timer(&tp->timer);
++ tp->timer.data = (unsigned long) tp;
++ tp->timer.function = tg3_timer;
++}
++
++static void tg3_timer_start(struct tg3 *tp)
++{
++ tp->asf_counter = tp->asf_multiplier;
++ tp->timer_counter = tp->timer_multiplier;
++
++ tp->timer.expires = jiffies + tp->timer_offset;
++ add_timer(&tp->timer);
++}
++
++static void tg3_timer_stop(struct tg3 *tp)
++{
++ del_timer_sync(&tp->timer);
++}
++
++/* Restart hardware after configuration changes, self-test, etc.
++ * Invoked with tp->lock held.
++ */
++static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
++ __releases(tp->lock)
++ __acquires(tp->lock)
++{
++ int err;
++
++ err = tg3_init_hw(tp, reset_phy);
++ if (err) {
++ netdev_err(tp->dev,
++ "Failed to re-initialize device, aborting\n");
++ tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
++ tg3_full_unlock(tp);
++ tg3_timer_stop(tp);
++ tp->irq_sync = 0;
++ tg3_napi_enable(tp);
++ dev_close(tp->dev);
++ tg3_full_lock(tp, 0);
++ }
++ return err;
++}
++
++static void tg3_reset_task(struct work_struct *work)
++{
++ struct tg3 *tp = container_of(work, struct tg3, reset_task);
++ int err;
++
++ tg3_full_lock(tp, 0);
++
++ if (!netif_running(tp->dev)) {
++ tg3_flag_clear(tp, RESET_TASK_PENDING);
++ tg3_full_unlock(tp);
++ return;
++ }
++
++ tg3_full_unlock(tp);
++
++ tg3_phy_stop(tp);
++
++ tg3_netif_stop(tp);
++
++ tg3_full_lock(tp, 1);
++
++ if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
++ tp->write32_tx_mbox = tg3_write32_tx_mbox;
++ tp->write32_rx_mbox = tg3_write_flush_reg32;
++ tg3_flag_set(tp, MBOX_WRITE_REORDER);
++ tg3_flag_clear(tp, TX_RECOVERY_PENDING);
++ }
++
++ tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
++ err = tg3_init_hw(tp, true);
++ if (err)
++ goto out;
++
++ tg3_netif_start(tp);
++
++out:
++ tg3_full_unlock(tp);
++
++ if (!err)
++ tg3_phy_start(tp);
++
++ tg3_flag_clear(tp, RESET_TASK_PENDING);
++}
++
++static int tg3_request_irq(struct tg3 *tp, int irq_num)
++{
++ irq_handler_t fn;
++ unsigned long flags;
++ char *name;
++ struct tg3_napi *tnapi = &tp->napi[irq_num];
++
++ if (tp->irq_cnt == 1)
++ name = tp->dev->name;
++ else {
++ name = &tnapi->irq_lbl[0];
++ if (tnapi->tx_buffers && tnapi->rx_rcb)
++ snprintf(name, IFNAMSIZ,
++ "%s-txrx-%d", tp->dev->name, irq_num);
++ else if (tnapi->tx_buffers)
++ snprintf(name, IFNAMSIZ,
++ "%s-tx-%d", tp->dev->name, irq_num);
++ else if (tnapi->rx_rcb)
++ snprintf(name, IFNAMSIZ,
++ "%s-rx-%d", tp->dev->name, irq_num);
++ else
++ snprintf(name, IFNAMSIZ,
++ "%s-%d", tp->dev->name, irq_num);
++ name[IFNAMSIZ-1] = 0;
++ }
++
++ if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
++ fn = tg3_msi;
++ if (tg3_flag(tp, 1SHOT_MSI))
++ fn = tg3_msi_1shot;
++ flags = 0;
++ } else {
++ fn = tg3_interrupt;
++ if (tg3_flag(tp, TAGGED_STATUS))
++ fn = tg3_interrupt_tagged;
++ flags = IRQF_SHARED;
++ }
++
++ return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
++}
++
++static int tg3_test_interrupt(struct tg3 *tp)
++{
++ struct tg3_napi *tnapi = &tp->napi[0];
++ struct net_device *dev = tp->dev;
++ int err, i, intr_ok = 0;
++ u32 val;
++
++ if (!netif_running(dev))
++ return -ENODEV;
++
++ tg3_disable_ints(tp);
++
++ free_irq(tnapi->irq_vec, tnapi);
++
++ /*
++ * Turn off MSI one shot mode. Otherwise this test has no
++ * observable way to know whether the interrupt was delivered.
++ */
++ if (tg3_flag(tp, 57765_PLUS)) {
++ val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
++ tw32(MSGINT_MODE, val);
++ }
++
++ err = request_irq(tnapi->irq_vec, tg3_test_isr,
++ IRQF_SHARED, dev->name, tnapi);
++ if (err)
++ return err;
++
++ tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
++ tg3_enable_ints(tp);
++
++ tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
++ tnapi->coal_now);
++
++ for (i = 0; i < 5; i++) {
++ u32 int_mbox, misc_host_ctrl;
++
++ int_mbox = tr32_mailbox(tnapi->int_mbox);
++ misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
++
++ if ((int_mbox != 0) ||
++ (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
++ intr_ok = 1;
++ break;
++ }
++
++ if (tg3_flag(tp, 57765_PLUS) &&
++ tnapi->hw_status->status_tag != tnapi->last_tag)
++ tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
++
++ msleep(10);
++ }
++
++ tg3_disable_ints(tp);
++
++ free_irq(tnapi->irq_vec, tnapi);
++
++ err = tg3_request_irq(tp, 0);
++
++ if (err)
++ return err;
++
++ if (intr_ok) {
++ /* Reenable MSI one shot mode. */
++ if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
++ val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
++ tw32(MSGINT_MODE, val);
++ }
++ return 0;
++ }
++
++ return -EIO;
++}
++
++/* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
++ * successfully restored
++ */
++static int tg3_test_msi(struct tg3 *tp)
++{
++ int err;
++ u16 pci_cmd;
++
++ if (!tg3_flag(tp, USING_MSI))
++ return 0;
++
++ /* Turn off SERR reporting in case MSI terminates with Master
++ * Abort.
++ */
++ pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
++ pci_write_config_word(tp->pdev, PCI_COMMAND,
++ pci_cmd & ~PCI_COMMAND_SERR);
++
++ err = tg3_test_interrupt(tp);
++
++ pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
++
++ if (!err)
++ return 0;
++
++ /* other failures */
++ if (err != -EIO)
++ return err;
++
++ /* MSI test failed, go back to INTx mode */
++ netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
++ "to INTx mode. Please report this failure to the PCI "
++ "maintainer and include system chipset information\n");
++
++ free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
++
++ pci_disable_msi(tp->pdev);
++
++ tg3_flag_clear(tp, USING_MSI);
++ tp->napi[0].irq_vec = tp->pdev->irq;
++
++ err = tg3_request_irq(tp, 0);
++ if (err)
++ return err;
++
++ /* Need to reset the chip because the MSI cycle may have terminated
++ * with Master Abort.
++ */
++ tg3_full_lock(tp, 1);
++
++ tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
++ err = tg3_init_hw(tp, true);
++
++ tg3_full_unlock(tp);
++
++ if (err)
++ free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
++
++ return err;
++}
++
++static int tg3_request_firmware(struct tg3 *tp)
++{
++ const struct tg3_firmware_hdr *fw_hdr;
++
++ if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
++ netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
++ tp->fw_needed);
++ return -ENOENT;
++ }
++
++ fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
++
++ /* Firmware blob starts with version numbers, followed by
++ * start address and _full_ length including BSS sections
++ * (which must be longer than the actual data, of course
++ */
++
++ tp->fw_len = be32_to_cpu(fw_hdr->len); /* includes bss */
++ if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
++ netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
++ tp->fw_len, tp->fw_needed);
++ release_firmware(tp->fw);
++ tp->fw = NULL;
++ return -EINVAL;
++ }
++
++ /* We no longer need firmware; we have it. */
++ tp->fw_needed = NULL;
++ return 0;
++}
++
++static u32 tg3_irq_count(struct tg3 *tp)
++{
++ u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
++
++ if (irq_cnt > 1) {
++ /* We want as many rx rings enabled as there are cpus.
++ * In multiqueue MSI-X mode, the first MSI-X vector
++ * only deals with link interrupts, etc, so we add
++ * one to the number of vectors we are requesting.
++ */
++ irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
++ }
++
++ return irq_cnt;
++}
++
++static bool tg3_enable_msix(struct tg3 *tp)
++{
++ int i, rc;
++ struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
++
++ tp->txq_cnt = tp->txq_req;
++ tp->rxq_cnt = tp->rxq_req;
++ if (!tp->rxq_cnt)
++ tp->rxq_cnt = netif_get_num_default_rss_queues();
++ if (tp->rxq_cnt > tp->rxq_max)
++ tp->rxq_cnt = tp->rxq_max;
++
++ /* Disable multiple TX rings by default. Simple round-robin hardware
++ * scheduling of the TX rings can cause starvation of rings with
++ * small packets when other rings have TSO or jumbo packets.
++ */
++ if (!tp->txq_req)
++ tp->txq_cnt = 1;
++
++ tp->irq_cnt = tg3_irq_count(tp);
++
++ for (i = 0; i < tp->irq_max; i++) {
++ msix_ent[i].entry = i;
++ msix_ent[i].vector = 0;
++ }
++
++ rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
++ if (rc < 0) {
++ return false;
++ } else if (rc != 0) {
++ if (pci_enable_msix(tp->pdev, msix_ent, rc))
++ return false;
++ netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
++ tp->irq_cnt, rc);
++ tp->irq_cnt = rc;
++ tp->rxq_cnt = max(rc - 1, 1);
++ if (tp->txq_cnt)
++ tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
++ }
++
++ for (i = 0; i < tp->irq_max; i++)
++ tp->napi[i].irq_vec = msix_ent[i].vector;
++
++ if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
++ pci_disable_msix(tp->pdev);
++ return false;
++ }
++
++ if (tp->irq_cnt == 1)
++ return true;
++
++ tg3_flag_set(tp, ENABLE_RSS);
++
++ if (tp->txq_cnt > 1)
++ tg3_flag_set(tp, ENABLE_TSS);
++
++ netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
++
++ return true;
++}
++
++static void tg3_ints_init(struct tg3 *tp)
++{
++ if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
++ !tg3_flag(tp, TAGGED_STATUS)) {
++ /* All MSI supporting chips should support tagged
++ * status. Assert that this is the case.
++ */
++ netdev_warn(tp->dev,
++ "MSI without TAGGED_STATUS? Not using MSI\n");
++ goto defcfg;
++ }
++
++ if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
++ tg3_flag_set(tp, USING_MSIX);
++ else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
++ tg3_flag_set(tp, USING_MSI);
++
++ if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
++ u32 msi_mode = tr32(MSGINT_MODE);
++ if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
++ msi_mode |= MSGINT_MODE_MULTIVEC_EN;
++ if (!tg3_flag(tp, 1SHOT_MSI))
++ msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
++ tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
++ }
++defcfg:
++ if (!tg3_flag(tp, USING_MSIX)) {
++ tp->irq_cnt = 1;
++ tp->napi[0].irq_vec = tp->pdev->irq;
++ }
++
++ if (tp->irq_cnt == 1) {
++ tp->txq_cnt = 1;
++ tp->rxq_cnt = 1;
++ netif_set_real_num_tx_queues(tp->dev, 1);
++ netif_set_real_num_rx_queues(tp->dev, 1);
++ }
++}
++
++static void tg3_ints_fini(struct tg3 *tp)
++{
++ if (tg3_flag(tp, USING_MSIX))
++ pci_disable_msix(tp->pdev);
++ else if (tg3_flag(tp, USING_MSI))
++ pci_disable_msi(tp->pdev);
++ tg3_flag_clear(tp, USING_MSI);
++ tg3_flag_clear(tp, USING_MSIX);
++ tg3_flag_clear(tp, ENABLE_RSS);
++ tg3_flag_clear(tp, ENABLE_TSS);
++}
++
++static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
++ bool init)
++{
++ struct net_device *dev = tp->dev;
++ int i, err;
++
++ /*
++ * Setup interrupts first so we know how
++ * many NAPI resources to allocate
++ */
++ tg3_ints_init(tp);
++
++ tg3_rss_check_indir_tbl(tp);
++
++ /* The placement of this call is tied
++ * to the setup and use of Host TX descriptors.
++ */
++ err = tg3_alloc_consistent(tp);
++ if (err)
++ goto out_ints_fini;
++
++ tg3_napi_init(tp);
++
++ tg3_napi_enable(tp);
++
++ for (i = 0; i < tp->irq_cnt; i++) {
++ struct tg3_napi *tnapi = &tp->napi[i];
++ err = tg3_request_irq(tp, i);
++ if (err) {
++ for (i--; i >= 0; i--) {
++ tnapi = &tp->napi[i];
++ free_irq(tnapi->irq_vec, tnapi);
++ }
++ goto out_napi_fini;
++ }
++ }
++
++ tg3_full_lock(tp, 0);
++
++ if (init)
++ tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
++
++ err = tg3_init_hw(tp, reset_phy);
++ if (err) {
++ tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
++ tg3_free_rings(tp);
++ }
++
++ tg3_full_unlock(tp);
++
++ if (err)
++ goto out_free_irq;
++
++ if (test_irq && tg3_flag(tp, USING_MSI)) {
++ err = tg3_test_msi(tp);
++
++ if (err) {
++ tg3_full_lock(tp, 0);
++ tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
++ tg3_free_rings(tp);
++ tg3_full_unlock(tp);
++
++ goto out_napi_fini;
++ }
++
++ if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
++ u32 val = tr32(PCIE_TRANSACTION_CFG);
++
++ tw32(PCIE_TRANSACTION_CFG,
++ val | PCIE_TRANS_CFG_1SHOT_MSI);
++ }
++ }
++
++ tg3_phy_start(tp);
++
++ tg3_hwmon_open(tp);
++
++ tg3_full_lock(tp, 0);
++
++ tg3_timer_start(tp);
++ tg3_flag_set(tp, INIT_COMPLETE);
++ tg3_enable_ints(tp);
++
++ if (init)
++ tg3_ptp_init(tp);
++ else
++ tg3_ptp_resume(tp);
++
++
++ tg3_full_unlock(tp);
++
++ netif_tx_start_all_queues(dev);
++
++ /*
++ * Reset loopback feature if it was turned on while the device was down
++ * make sure that it's installed properly now.
++ */
++ if (dev->features & NETIF_F_LOOPBACK)
++ tg3_set_loopback(dev, dev->features);
++
++ return 0;
++
++out_free_irq:
++ for (i = tp->irq_cnt - 1; i >= 0; i--) {
++ struct tg3_napi *tnapi = &tp->napi[i];
++ free_irq(tnapi->irq_vec, tnapi);
++ }
++
++out_napi_fini:
++ tg3_napi_disable(tp);
++ tg3_napi_fini(tp);
++ tg3_free_consistent(tp);
++
++out_ints_fini:
++ tg3_ints_fini(tp);
++
++ return err;
++}
++
++static void tg3_stop(struct tg3 *tp)
++{
++ int i;
++
++ tg3_reset_task_cancel(tp);
++ tg3_netif_stop(tp);
++
++ tg3_timer_stop(tp);
++
++ tg3_hwmon_close(tp);
++
++ tg3_phy_stop(tp);
++
++ tg3_full_lock(tp, 1);
++
++ tg3_disable_ints(tp);
++
++ tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
++ tg3_free_rings(tp);
++ tg3_flag_clear(tp, INIT_COMPLETE);
++
++ tg3_full_unlock(tp);
++
++ for (i = tp->irq_cnt - 1; i >= 0; i--) {
++ struct tg3_napi *tnapi = &tp->napi[i];
++ free_irq(tnapi->irq_vec, tnapi);
++ }
++
++ tg3_ints_fini(tp);
++
++ tg3_napi_fini(tp);
++
++ tg3_free_consistent(tp);
++}
++
++static int tg3_open(struct net_device *dev)
++{
++ struct tg3 *tp = netdev_priv(dev);
++ int err;
++
++ if (tp->fw_needed) {
++ err = tg3_request_firmware(tp);
++ if (tg3_asic_rev(tp) == ASIC_REV_57766) {
++ if (err) {
++ netdev_warn(tp->dev, "EEE capability disabled\n");
++ tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
++ } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
++ netdev_warn(tp->dev, "EEE capability restored\n");
++ tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
++ }
++ } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
++ if (err)
++ return err;
++ } else if (err) {
++ netdev_warn(tp->dev, "TSO capability disabled\n");
++ tg3_flag_clear(tp, TSO_CAPABLE);
++ } else if (!tg3_flag(tp, TSO_CAPABLE)) {
++ netdev_notice(tp->dev, "TSO capability restored\n");
++ tg3_flag_set(tp, TSO_CAPABLE);
++ }
++ }
++
++ tg3_carrier_off(tp);
++
++ err = tg3_power_up(tp);
++ if (err)
++ return err;
++
++ tg3_full_lock(tp, 0);
++
++ tg3_disable_ints(tp);
++ tg3_flag_clear(tp, INIT_COMPLETE);
++
++ tg3_full_unlock(tp);
++
++ err = tg3_start(tp,
++ !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
++ true, true);
++ if (err) {
++ tg3_frob_aux_power(tp, false);
++ pci_set_power_state(tp->pdev, PCI_D3hot);
++ }
++
++ if (tg3_flag(tp, PTP_CAPABLE)) {
++ tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
++ &tp->pdev->dev);
++ if (IS_ERR(tp->ptp_clock))
++ tp->ptp_clock = NULL;
++ }
++
++ return err;
++}
++
++static int tg3_close(struct net_device *dev)
++{
++ struct tg3 *tp = netdev_priv(dev);
++
++ tg3_ptp_fini(tp);
++
++ tg3_stop(tp);
++
++ /* Clear stats across close / open calls */
++ memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
++ memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
++
++ if (pci_device_is_present(tp->pdev)) {
++ tg3_power_down_prepare(tp);
++
++ tg3_carrier_off(tp);
++ }
++ return 0;
++}
++
++static inline u64 get_stat64(tg3_stat64_t *val)
++{
++ return ((u64)val->high << 32) | ((u64)val->low);
++}
++
++static u64 tg3_calc_crc_errors(struct tg3 *tp)
++{
++ struct tg3_hw_stats *hw_stats = tp->hw_stats;
++
++ if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
++ (tg3_asic_rev(tp) == ASIC_REV_5700 ||
++ tg3_asic_rev(tp) == ASIC_REV_5701)) {
++ u32 val;
++
++ if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
++ tg3_writephy(tp, MII_TG3_TEST1,
++ val | MII_TG3_TEST1_CRC_EN);
++ tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
++ } else
++ val = 0;
++
++ tp->phy_crc_errors += val;
++
++ return tp->phy_crc_errors;
++ }
++
++ return get_stat64(&hw_stats->rx_fcs_errors);
++}
++
++#define ESTAT_ADD(member) \
++ estats->member = old_estats->member + \
++ get_stat64(&hw_stats->member)
++
++static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
++{
++ struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
++ struct tg3_hw_stats *hw_stats = tp->hw_stats;
++
++ ESTAT_ADD(rx_octets);
++ ESTAT_ADD(rx_fragments);
++ ESTAT_ADD(rx_ucast_packets);
++ ESTAT_ADD(rx_mcast_packets);
++ ESTAT_ADD(rx_bcast_packets);
++ ESTAT_ADD(rx_fcs_errors);
++ ESTAT_ADD(rx_align_errors);
++ ESTAT_ADD(rx_xon_pause_rcvd);
++ ESTAT_ADD(rx_xoff_pause_rcvd);
++ ESTAT_ADD(rx_mac_ctrl_rcvd);
++ ESTAT_ADD(rx_xoff_entered);
++ ESTAT_ADD(rx_frame_too_long_errors);
++ ESTAT_ADD(rx_jabbers);
++ ESTAT_ADD(rx_undersize_packets);
++ ESTAT_ADD(rx_in_length_errors);
++ ESTAT_ADD(rx_out_length_errors);
++ ESTAT_ADD(rx_64_or_less_octet_packets);
++ ESTAT_ADD(rx_65_to_127_octet_packets);
++ ESTAT_ADD(rx_128_to_255_octet_packets);
++ ESTAT_ADD(rx_256_to_511_octet_packets);
++ ESTAT_ADD(rx_512_to_1023_octet_packets);
++ ESTAT_ADD(rx_1024_to_1522_octet_packets);
++ ESTAT_ADD(rx_1523_to_2047_octet_packets);
++ ESTAT_ADD(rx_2048_to_4095_octet_packets);
++ ESTAT_ADD(rx_4096_to_8191_octet_packets);
++ ESTAT_ADD(rx_8192_to_9022_octet_packets);
++
++ ESTAT_ADD(tx_octets);
++ ESTAT_ADD(tx_collisions);
++ ESTAT_ADD(tx_xon_sent);
++ ESTAT_ADD(tx_xoff_sent);
++ ESTAT_ADD(tx_flow_control);
++ ESTAT_ADD(tx_mac_errors);
++ ESTAT_ADD(tx_single_collisions);
++ ESTAT_ADD(tx_mult_collisions);
++ ESTAT_ADD(tx_deferred);
++ ESTAT_ADD(tx_excessive_collisions);
++ ESTAT_ADD(tx_late_collisions);
++ ESTAT_ADD(tx_collide_2times);
++ ESTAT_ADD(tx_collide_3times);
++ ESTAT_ADD(tx_collide_4times);
++ ESTAT_ADD(tx_collide_5times);
++ ESTAT_ADD(tx_collide_6times);
++ ESTAT_ADD(tx_collide_7times);
++ ESTAT_ADD(tx_collide_8times);
++ ESTAT_ADD(tx_collide_9times);
++ ESTAT_ADD(tx_collide_10times);
++ ESTAT_ADD(tx_collide_11times);
++ ESTAT_ADD(tx_collide_12times);
++ ESTAT_ADD(tx_collide_13times);
++ ESTAT_ADD(tx_collide_14times);
++ ESTAT_ADD(tx_collide_15times);
++ ESTAT_ADD(tx_ucast_packets);
++ ESTAT_ADD(tx_mcast_packets);
++ ESTAT_ADD(tx_bcast_packets);
++ ESTAT_ADD(tx_carrier_sense_errors);
++ ESTAT_ADD(tx_discards);
++ ESTAT_ADD(tx_errors);
++
++ ESTAT_ADD(dma_writeq_full);
++ ESTAT_ADD(dma_write_prioq_full);
++ ESTAT_ADD(rxbds_empty);
++ ESTAT_ADD(rx_discards);
++ ESTAT_ADD(rx_errors);
++ ESTAT_ADD(rx_threshold_hit);
++
++ ESTAT_ADD(dma_readq_full);
++ ESTAT_ADD(dma_read_prioq_full);
++ ESTAT_ADD(tx_comp_queue_full);
++
++ ESTAT_ADD(ring_set_send_prod_index);
++ ESTAT_ADD(ring_status_update);
++ ESTAT_ADD(nic_irqs);
++ ESTAT_ADD(nic_avoided_irqs);
++ ESTAT_ADD(nic_tx_threshold_hit);
++
++ ESTAT_ADD(mbuf_lwm_thresh_hit);
++}
++
++static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
++{
++ struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
++ struct tg3_hw_stats *hw_stats = tp->hw_stats;
++
++ stats->rx_packets = old_stats->rx_packets +
++ get_stat64(&hw_stats->rx_ucast_packets) +
++ get_stat64(&hw_stats->rx_mcast_packets) +
++ get_stat64(&hw_stats->rx_bcast_packets);
++
++ stats->tx_packets = old_stats->tx_packets +
++ get_stat64(&hw_stats->tx_ucast_packets) +
++ get_stat64(&hw_stats->tx_mcast_packets) +
++ get_stat64(&hw_stats->tx_bcast_packets);
++
++ stats->rx_bytes = old_stats->rx_bytes +
++ get_stat64(&hw_stats->rx_octets);
++ stats->tx_bytes = old_stats->tx_bytes +
++ get_stat64(&hw_stats->tx_octets);
++
++ stats->rx_errors = old_stats->rx_errors +
++ get_stat64(&hw_stats->rx_errors);
++ stats->tx_errors = old_stats->tx_errors +
++ get_stat64(&hw_stats->tx_errors) +
++ get_stat64(&hw_stats->tx_mac_errors) +
++ get_stat64(&hw_stats->tx_carrier_sense_errors) +
++ get_stat64(&hw_stats->tx_discards);
++
++ stats->multicast = old_stats->multicast +
++ get_stat64(&hw_stats->rx_mcast_packets);
++ stats->collisions = old_stats->collisions +
++ get_stat64(&hw_stats->tx_collisions);
++
++ stats->rx_length_errors = old_stats->rx_length_errors +
++ get_stat64(&hw_stats->rx_frame_too_long_errors) +
++ get_stat64(&hw_stats->rx_undersize_packets);
++
++ stats->rx_frame_errors = old_stats->rx_frame_errors +
++ get_stat64(&hw_stats->rx_align_errors);
++ stats->tx_aborted_errors = old_stats->tx_aborted_errors +
++ get_stat64(&hw_stats->tx_discards);
++ stats->tx_carrier_errors = old_stats->tx_carrier_errors +
++ get_stat64(&hw_stats->tx_carrier_sense_errors);
++
++ stats->rx_crc_errors = old_stats->rx_crc_errors +
++ tg3_calc_crc_errors(tp);
++
++ stats->rx_missed_errors = old_stats->rx_missed_errors +
++ get_stat64(&hw_stats->rx_discards);
++
++ stats->rx_dropped = tp->rx_dropped;
++ stats->tx_dropped = tp->tx_dropped;
++}
++
++static int tg3_get_regs_len(struct net_device *dev)
++{
++ return TG3_REG_BLK_SIZE;
++}
++
++static void tg3_get_regs(struct net_device *dev,
++ struct ethtool_regs *regs, void *_p)
++{
++ struct tg3 *tp = netdev_priv(dev);
++
++ regs->version = 0;
++
++ memset(_p, 0, TG3_REG_BLK_SIZE);
++
++ if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
++ return;
++
++ tg3_full_lock(tp, 0);
++
++ tg3_dump_legacy_regs(tp, (u32 *)_p);
++
++ tg3_full_unlock(tp);
++}
++
++static int tg3_get_eeprom_len(struct net_device *dev)
++{
++ struct tg3 *tp = netdev_priv(dev);
++
++ return tp->nvram_size;
++}
++
++static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
++{
++ struct tg3 *tp = netdev_priv(dev);
++ int ret;
++ u8 *pd;
++ u32 i, offset, len, b_offset, b_count;
++ __be32 val;
++
++ if (tg3_flag(tp, NO_NVRAM))
++ return -EINVAL;
++
++ offset = eeprom->offset;
++ len = eeprom->len;
++ eeprom->len = 0;
++
++ eeprom->magic = TG3_EEPROM_MAGIC;
++
++ if (offset & 3) {
++ /* adjustments to start on required 4 byte boundary */
++ b_offset = offset & 3;
++ b_count = 4 - b_offset;
++ if (b_count > len) {
++ /* i.e. offset=1 len=2 */
++ b_count = len;
++ }
++ ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
++ if (ret)
++ return ret;
++ memcpy(data, ((char *)&val) + b_offset, b_count);
++ len -= b_count;
++ offset += b_count;
++ eeprom->len += b_count;
++ }
++
++ /* read bytes up to the last 4 byte boundary */
++ pd = &data[eeprom->len];
++ for (i = 0; i < (len - (len & 3)); i += 4) {
++ ret = tg3_nvram_read_be32(tp, offset + i, &val);
++ if (ret) {
++ eeprom->len += i;
++ return ret;
++ }
++ memcpy(pd + i, &val, 4);
++ }
++ eeprom->len += i;
++
++ if (len & 3) {
++ /* read last bytes not ending on 4 byte boundary */
++ pd = &data[eeprom->len];
++ b_count = len & 3;
++ b_offset = offset + len - b_count;
++ ret = tg3_nvram_read_be32(tp, b_offset, &val);
++ if (ret)
++ return ret;
++ memcpy(pd, &val, b_count);
++ eeprom->len += b_count;
++ }
++ return 0;
++}
++
++static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
++{
++ struct tg3 *tp = netdev_priv(dev);
++ int ret;
++ u32 offset, len, b_offset, odd_len;
++ u8 *buf;
++ __be32 start, end;
++
++ if (tg3_flag(tp, NO_NVRAM) ||
++ eeprom->magic != TG3_EEPROM_MAGIC)
++ return -EINVAL;
++
++ offset = eeprom->offset;
++ len = eeprom->len;
++
++ if ((b_offset = (offset & 3))) {
++ /* adjustments to start on required 4 byte boundary */
++ ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
++ if (ret)
++ return ret;
++ len += b_offset;
++ offset &= ~3;
++ if (len < 4)
++ len = 4;
++ }
++
++ odd_len = 0;
++ if (len & 3) {
++ /* adjustments to end on required 4 byte boundary */
++ odd_len = 1;
++ len = (len + 3) & ~3;
++ ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
++ if (ret)
++ return ret;
++ }
++
++ buf = data;
++ if (b_offset || odd_len) {
++ buf = kmalloc(len, GFP_KERNEL);
++ if (!buf)
++ return -ENOMEM;
++ if (b_offset)
++ memcpy(buf, &start, 4);
++ if (odd_len)
++ memcpy(buf+len-4, &end, 4);
++ memcpy(buf + b_offset, data, eeprom->len);
++ }
++
++ ret = tg3_nvram_write_block(tp, offset, len, buf);
++
++ if (buf != data)
++ kfree(buf);
++
++ return ret;
++}
++
++static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
++{
++ struct tg3 *tp = netdev_priv(dev);
++
++ if (tg3_flag(tp, USE_PHYLIB)) {
++ struct phy_device *phydev;
++ if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
++ return -EAGAIN;
++ phydev = tp->mdio_bus->phy_map[tp->phy_addr];
++ return phy_ethtool_gset(phydev, cmd);
++ }
++
++ cmd->supported = (SUPPORTED_Autoneg);
++
++ if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
++ cmd->supported |= (SUPPORTED_1000baseT_Half |
++ SUPPORTED_1000baseT_Full);
++
++ if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
++ cmd->supported |= (SUPPORTED_100baseT_Half |
++ SUPPORTED_100baseT_Full |
++ SUPPORTED_10baseT_Half |
++ SUPPORTED_10baseT_Full |
++ SUPPORTED_TP);
++ cmd->port = PORT_TP;
++ } else {
++ cmd->supported |= SUPPORTED_FIBRE;
++ cmd->port = PORT_FIBRE;
++ }
++
++ cmd->advertising = tp->link_config.advertising;
++ if (tg3_flag(tp, PAUSE_AUTONEG)) {
++ if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
++ if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
++ cmd->advertising |= ADVERTISED_Pause;
++ } else {
++ cmd->advertising |= ADVERTISED_Pause |
++ ADVERTISED_Asym_Pause;
++ }
++ } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
++ cmd->advertising |= ADVERTISED_Asym_Pause;
++ }
++ }
++ if (netif_running(dev) && tp->link_up) {
++ ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
++ cmd->duplex = tp->link_config.active_duplex;
++ cmd->lp_advertising = tp->link_config.rmt_adv;
++ if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
++ if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
++ cmd->eth_tp_mdix = ETH_TP_MDI_X;
++ else
++ cmd->eth_tp_mdix = ETH_TP_MDI;
++ }
++ } else {
++ ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
++ cmd->duplex = DUPLEX_UNKNOWN;
++ cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
++ }
++ cmd->phy_address = tp->phy_addr;
++ cmd->transceiver = XCVR_INTERNAL;
++ cmd->autoneg = tp->link_config.autoneg;
++ cmd->maxtxpkt = 0;
++ cmd->maxrxpkt = 0;
++ return 0;
++}
++
++static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
++{
++ struct tg3 *tp = netdev_priv(dev);
++ u32 speed = ethtool_cmd_speed(cmd);
++
++ if (tg3_flag(tp, USE_PHYLIB)) {
++ struct phy_device *phydev;
++ if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
++ return -EAGAIN;
++ phydev = tp->mdio_bus->phy_map[tp->phy_addr];
++ return phy_ethtool_sset(phydev, cmd);
++ }
++
++ if (cmd->autoneg != AUTONEG_ENABLE &&
++ cmd->autoneg != AUTONEG_DISABLE)
++ return -EINVAL;
++
++ if (cmd->autoneg == AUTONEG_DISABLE &&
++ cmd->duplex != DUPLEX_FULL &&
++ cmd->duplex != DUPLEX_HALF)
++ return -EINVAL;
++
++ if (cmd->autoneg == AUTONEG_ENABLE) {
++ u32 mask = ADVERTISED_Autoneg |
++ ADVERTISED_Pause |
++ ADVERTISED_Asym_Pause;
++
++ if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
++ mask |= ADVERTISED_1000baseT_Half |
++ ADVERTISED_1000baseT_Full;
++
++ if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
++ mask |= ADVERTISED_100baseT_Half |
++ ADVERTISED_100baseT_Full |
++ ADVERTISED_10baseT_Half |
++ ADVERTISED_10baseT_Full |
++ ADVERTISED_TP;
++ else
++ mask |= ADVERTISED_FIBRE;
++
++ if (cmd->advertising & ~mask)
++ return -EINVAL;
++
++ mask &= (ADVERTISED_1000baseT_Half |
++ ADVERTISED_1000baseT_Full |
++ ADVERTISED_100baseT_Half |
++ ADVERTISED_100baseT_Full |
++ ADVERTISED_10baseT_Half |
++ ADVERTISED_10baseT_Full);
++
++ cmd->advertising &= mask;
++ } else {
++ if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
++ if (speed != SPEED_1000)
++ return -EINVAL;
++
++ if (cmd->duplex != DUPLEX_FULL)
++ return -EINVAL;
++ } else {
++ if (speed != SPEED_100 &&
++ speed != SPEED_10)
++ return -EINVAL;
++ }
++ }
++
++ tg3_full_lock(tp, 0);
++
++ tp->link_config.autoneg = cmd->autoneg;
++ if (cmd->autoneg == AUTONEG_ENABLE) {
++ tp->link_config.advertising = (cmd->advertising |
++ ADVERTISED_Autoneg);
++ tp->link_config.speed = SPEED_UNKNOWN;
++ tp->link_config.duplex = DUPLEX_UNKNOWN;
++ } else {
++ tp->link_config.advertising = 0;
++ tp->link_config.speed = speed;
++ tp->link_config.duplex = cmd->duplex;
++ }
++
++ tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
++
++ tg3_warn_mgmt_link_flap(tp);
++
++ if (netif_running(dev))
++ tg3_setup_phy(tp, true);
++
++ tg3_full_unlock(tp);
++
++ return 0;
++}
++
++static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
++{
++ struct tg3 *tp = netdev_priv(dev);
++
++ strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
++ strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
++ strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
++ strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
++}
++
++static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
++{
++ struct tg3 *tp = netdev_priv(dev);
++
++ if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
++ wol->supported = WAKE_MAGIC;
++ else
++ wol->supported = 0;
++ wol->wolopts = 0;
++ if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
++ wol->wolopts = WAKE_MAGIC;
++ memset(&wol->sopass, 0, sizeof(wol->sopass));
++}
++
++static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
++{
++ struct tg3 *tp = netdev_priv(dev);
++ struct device *dp = &tp->pdev->dev;
++
++ if (wol->wolopts & ~WAKE_MAGIC)
++ return -EINVAL;
++ if ((wol->wolopts & WAKE_MAGIC) &&
++ !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
++ return -EINVAL;
++
++ device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
++
++ if (device_may_wakeup(dp))
++ tg3_flag_set(tp, WOL_ENABLE);
++ else
++ tg3_flag_clear(tp, WOL_ENABLE);
++
++ return 0;
++}
++
++static u32 tg3_get_msglevel(struct net_device *dev)
++{
++ struct tg3 *tp = netdev_priv(dev);
++ return tp->msg_enable;
++}
++
++static void tg3_set_msglevel(struct net_device *dev, u32 value)
++{
++ struct tg3 *tp = netdev_priv(dev);
++ tp->msg_enable = value;
++}
++
++static int tg3_nway_reset(struct net_device *dev)
++{
++ struct tg3 *tp = netdev_priv(dev);
++ int r;
++
++ if (!netif_running(dev))
++ return -EAGAIN;
++
++ if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
++ return -EINVAL;
++
++ tg3_warn_mgmt_link_flap(tp);
++
++ if (tg3_flag(tp, USE_PHYLIB)) {
++ if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
++ return -EAGAIN;
++ r = phy_start_aneg(tp->mdio_bus->phy_map[tp->phy_addr]);
++ } else {
++ u32 bmcr;
++
++ spin_lock_bh(&tp->lock);
++ r = -EINVAL;
++ tg3_readphy(tp, MII_BMCR, &bmcr);
++ if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
++ ((bmcr & BMCR_ANENABLE) ||
++ (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
++ tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
++ BMCR_ANENABLE);
++ r = 0;
++ }
++ spin_unlock_bh(&tp->lock);
++ }
++
++ return r;
++}
++
++static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
++{
++ struct tg3 *tp = netdev_priv(dev);
++
++ ering->rx_max_pending = tp->rx_std_ring_mask;
++ if (tg3_flag(tp, JUMBO_RING_ENABLE))
++ ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
++ else
++ ering->rx_jumbo_max_pending = 0;
++
++ ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
++
++ ering->rx_pending = tp->rx_pending;
++ if (tg3_flag(tp, JUMBO_RING_ENABLE))
++ ering->rx_jumbo_pending = tp->rx_jumbo_pending;
++ else
++ ering->rx_jumbo_pending = 0;
++
++ ering->tx_pending = tp->napi[0].tx_pending;
++}
++
++static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
++{
++ struct tg3 *tp = netdev_priv(dev);
++ int i, irq_sync = 0, err = 0;
++
++ if ((ering->rx_pending > tp->rx_std_ring_mask) ||
++ (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
++ (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
++ (ering->tx_pending <= MAX_SKB_FRAGS) ||
++ (tg3_flag(tp, TSO_BUG) &&
++ (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
++ return -EINVAL;
++
++ if (netif_running(dev)) {
++ tg3_phy_stop(tp);
++ tg3_netif_stop(tp);
++ irq_sync = 1;
++ }
++
++ tg3_full_lock(tp, irq_sync);
++
++ tp->rx_pending = ering->rx_pending;
++
++ if (tg3_flag(tp, MAX_RXPEND_64) &&
++ tp->rx_pending > 63)
++ tp->rx_pending = 63;
++
++ if (tg3_flag(tp, JUMBO_RING_ENABLE))
++ tp->rx_jumbo_pending = ering->rx_jumbo_pending;
++
++ for (i = 0; i < tp->irq_max; i++)
++ tp->napi[i].tx_pending = ering->tx_pending;
++
++ if (netif_running(dev)) {
++ tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
++ err = tg3_restart_hw(tp, false);
++ if (!err)
++ tg3_netif_start(tp);
++ }
++
++ tg3_full_unlock(tp);
++
++ if (irq_sync && !err)
++ tg3_phy_start(tp);
++
++ return err;
++}
++
++static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
++{
++ struct tg3 *tp = netdev_priv(dev);
++
++ epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
++
++ if (tp->link_config.flowctrl & FLOW_CTRL_RX)
++ epause->rx_pause = 1;
++ else
++ epause->rx_pause = 0;
++
++ if (tp->link_config.flowctrl & FLOW_CTRL_TX)
++ epause->tx_pause = 1;
++ else
++ epause->tx_pause = 0;
++}
++
++static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
++{
++ struct tg3 *tp = netdev_priv(dev);
++ int err = 0;
++
++ if (tp->link_config.autoneg == AUTONEG_ENABLE)
++ tg3_warn_mgmt_link_flap(tp);
++
++ if (tg3_flag(tp, USE_PHYLIB)) {
++ u32 newadv;
++ struct phy_device *phydev;
++
++ phydev = tp->mdio_bus->phy_map[tp->phy_addr];
++
++ if (!(phydev->supported & SUPPORTED_Pause) ||
++ (!(phydev->supported & SUPPORTED_Asym_Pause) &&
++ (epause->rx_pause != epause->tx_pause)))
++ return -EINVAL;
++
++ tp->link_config.flowctrl = 0;
++ if (epause->rx_pause) {
++ tp->link_config.flowctrl |= FLOW_CTRL_RX;
++
++ if (epause->tx_pause) {
++ tp->link_config.flowctrl |= FLOW_CTRL_TX;
++ newadv = ADVERTISED_Pause;
++ } else
++ newadv = ADVERTISED_Pause |
++ ADVERTISED_Asym_Pause;
++ } else if (epause->tx_pause) {
++ tp->link_config.flowctrl |= FLOW_CTRL_TX;
++ newadv = ADVERTISED_Asym_Pause;
++ } else
++ newadv = 0;
++
++ if (epause->autoneg)
++ tg3_flag_set(tp, PAUSE_AUTONEG);
++ else
++ tg3_flag_clear(tp, PAUSE_AUTONEG);
++
++ if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
++ u32 oldadv = phydev->advertising &
++ (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
++ if (oldadv != newadv) {
++ phydev->advertising &=
++ ~(ADVERTISED_Pause |
++ ADVERTISED_Asym_Pause);
++ phydev->advertising |= newadv;
++ if (phydev->autoneg) {
++ /*
++ * Always renegotiate the link to
++ * inform our link partner of our
++ * flow control settings, even if the
++ * flow control is forced. Let
++ * tg3_adjust_link() do the final
++ * flow control setup.
++ */
++ return phy_start_aneg(phydev);
++ }
++ }
++
++ if (!epause->autoneg)
++ tg3_setup_flow_control(tp, 0, 0);
++ } else {
++ tp->link_config.advertising &=
++ ~(ADVERTISED_Pause |
++ ADVERTISED_Asym_Pause);
++ tp->link_config.advertising |= newadv;
++ }
++ } else {
++ int irq_sync = 0;
++
++ if (netif_running(dev)) {
++ tg3_netif_stop(tp);
++ irq_sync = 1;
++ }
++
++ tg3_full_lock(tp, irq_sync);
++
++ if (epause->autoneg)
++ tg3_flag_set(tp, PAUSE_AUTONEG);
++ else
++ tg3_flag_clear(tp, PAUSE_AUTONEG);
++ if (epause->rx_pause)
++ tp->link_config.flowctrl |= FLOW_CTRL_RX;
++ else
++ tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
++ if (epause->tx_pause)
++ tp->link_config.flowctrl |= FLOW_CTRL_TX;
++ else
++ tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
++
++ if (netif_running(dev)) {
++ tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
++ err = tg3_restart_hw(tp, false);
++ if (!err)
++ tg3_netif_start(tp);
++ }
++
++ tg3_full_unlock(tp);
++ }
++
++ tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
++
++ return err;
++}
++
++static int tg3_get_sset_count(struct net_device *dev, int sset)
++{
++ switch (sset) {
++ case ETH_SS_TEST:
++ return TG3_NUM_TEST;
++ case ETH_SS_STATS:
++ return TG3_NUM_STATS;
++ default:
++ return -EOPNOTSUPP;
++ }
++}
++
++static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
++ u32 *rules __always_unused)
++{
++ struct tg3 *tp = netdev_priv(dev);
++
++ if (!tg3_flag(tp, SUPPORT_MSIX))
++ return -EOPNOTSUPP;
++
++ switch (info->cmd) {
++ case ETHTOOL_GRXRINGS:
++ if (netif_running(tp->dev))
++ info->data = tp->rxq_cnt;
++ else {
++ info->data = num_online_cpus();
++ if (info->data > TG3_RSS_MAX_NUM_QS)
++ info->data = TG3_RSS_MAX_NUM_QS;
++ }
++
++ /* The first interrupt vector only
++ * handles link interrupts.
++ */
++ info->data -= 1;
++ return 0;
++
++ default:
++ return -EOPNOTSUPP;
++ }
++}
++
++static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
++{
++ u32 size = 0;
++ struct tg3 *tp = netdev_priv(dev);
++
++ if (tg3_flag(tp, SUPPORT_MSIX))
++ size = TG3_RSS_INDIR_TBL_SIZE;
++
++ return size;
++}
++
++static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
++{
++ struct tg3 *tp = netdev_priv(dev);
++ int i;
++
++ for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
++ indir[i] = tp->rss_ind_tbl[i];
++
++ return 0;
++}
++
++static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
++{
++ struct tg3 *tp = netdev_priv(dev);
++ size_t i;
++
++ for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
++ tp->rss_ind_tbl[i] = indir[i];
++
++ if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
++ return 0;
++
++ /* It is legal to write the indirection
++ * table while the device is running.
++ */
++ tg3_full_lock(tp, 0);
++ tg3_rss_write_indir_tbl(tp);
++ tg3_full_unlock(tp);
++
++ return 0;
++}
++
++static void tg3_get_channels(struct net_device *dev,
++ struct ethtool_channels *channel)
++{
++ struct tg3 *tp = netdev_priv(dev);
++ u32 deflt_qs = netif_get_num_default_rss_queues();
++
++ channel->max_rx = tp->rxq_max;
++ channel->max_tx = tp->txq_max;
++
++ if (netif_running(dev)) {
++ channel->rx_count = tp->rxq_cnt;
++ channel->tx_count = tp->txq_cnt;
++ } else {
++ if (tp->rxq_req)
++ channel->rx_count = tp->rxq_req;
++ else
++ channel->rx_count = min(deflt_qs, tp->rxq_max);
++
++ if (tp->txq_req)
++ channel->tx_count = tp->txq_req;
++ else
++ channel->tx_count = min(deflt_qs, tp->txq_max);
++ }
++}
++
++static int tg3_set_channels(struct net_device *dev,
++ struct ethtool_channels *channel)
++{
++ struct tg3 *tp = netdev_priv(dev);
++
++ if (!tg3_flag(tp, SUPPORT_MSIX))
++ return -EOPNOTSUPP;
++
++ if (channel->rx_count > tp->rxq_max ||
++ channel->tx_count > tp->txq_max)
++ return -EINVAL;
++
++ tp->rxq_req = channel->rx_count;
++ tp->txq_req = channel->tx_count;
++
++ if (!netif_running(dev))
++ return 0;
++
++ tg3_stop(tp);
++
++ tg3_carrier_off(tp);
++
++ tg3_start(tp, true, false, false);
++
++ return 0;
++}
++
++static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
++{
++ switch (stringset) {
++ case ETH_SS_STATS:
++ memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
++ break;
++ case ETH_SS_TEST:
++ memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
++ break;
++ default:
++ WARN_ON(1); /* we need a WARN() */
++ break;
++ }
++}
++
++static int tg3_set_phys_id(struct net_device *dev,
++ enum ethtool_phys_id_state state)
++{
++ struct tg3 *tp = netdev_priv(dev);
++
++ if (!netif_running(tp->dev))
++ return -EAGAIN;
++
++ switch (state) {
++ case ETHTOOL_ID_ACTIVE:
++ return 1; /* cycle on/off once per second */
++
++ case ETHTOOL_ID_ON:
++ tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
++ LED_CTRL_1000MBPS_ON |
++ LED_CTRL_100MBPS_ON |
++ LED_CTRL_10MBPS_ON |
++ LED_CTRL_TRAFFIC_OVERRIDE |
++ LED_CTRL_TRAFFIC_BLINK |
++ LED_CTRL_TRAFFIC_LED);
++ break;
++
++ case ETHTOOL_ID_OFF:
++ tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
++ LED_CTRL_TRAFFIC_OVERRIDE);
++ break;
++
++ case ETHTOOL_ID_INACTIVE:
++ tw32(MAC_LED_CTRL, tp->led_ctrl);
++ break;
++ }
++
++ return 0;
++}
++
++static void tg3_get_ethtool_stats(struct net_device *dev,
++ struct ethtool_stats *estats, u64 *tmp_stats)
++{
++ struct tg3 *tp = netdev_priv(dev);
++
++ if (tp->hw_stats)
++ tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
++ else
++ memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
++}
++
++static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
++{
++ int i;
++ __be32 *buf;
++ u32 offset = 0, len = 0;
++ u32 magic, val;
++
++ if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
++ return NULL;
++
++ if (magic == TG3_EEPROM_MAGIC) {
++ for (offset = TG3_NVM_DIR_START;
++ offset < TG3_NVM_DIR_END;
++ offset += TG3_NVM_DIRENT_SIZE) {
++ if (tg3_nvram_read(tp, offset, &val))
++ return NULL;
++
++ if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
++ TG3_NVM_DIRTYPE_EXTVPD)
++ break;
++ }
++
++ if (offset != TG3_NVM_DIR_END) {
++ len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
++ if (tg3_nvram_read(tp, offset + 4, &offset))
++ return NULL;
++
++ offset = tg3_nvram_logical_addr(tp, offset);
++ }
++ }
++
++ if (!offset || !len) {
++ offset = TG3_NVM_VPD_OFF;
++ len = TG3_NVM_VPD_LEN;
++ }
++
++ buf = kmalloc(len, GFP_KERNEL);
++ if (buf == NULL)
++ return NULL;
++
++ if (magic == TG3_EEPROM_MAGIC) {
++ for (i = 0; i < len; i += 4) {
++ /* The data is in little-endian format in NVRAM.
++ * Use the big-endian read routines to preserve
++ * the byte order as it exists in NVRAM.
++ */
++ if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
++ goto error;
++ }
++ } else {
++ u8 *ptr;
++ ssize_t cnt;
++ unsigned int pos = 0;
++
++ ptr = (u8 *)&buf[0];
++ for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
++ cnt = pci_read_vpd(tp->pdev, pos,
++ len - pos, ptr);
++ if (cnt == -ETIMEDOUT || cnt == -EINTR)
++ cnt = 0;
++ else if (cnt < 0)
++ goto error;
++ }
++ if (pos != len)
++ goto error;
++ }
++
++ *vpdlen = len;
++
++ return buf;
++
++error:
++ kfree(buf);
++ return NULL;
++}
++
++#define NVRAM_TEST_SIZE 0x100
++#define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
++#define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
++#define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
++#define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
++#define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
++#define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
++#define NVRAM_SELFBOOT_HW_SIZE 0x20
++#define NVRAM_SELFBOOT_DATA_SIZE 0x1c
++
++static int tg3_test_nvram(struct tg3 *tp)
++{
++ u32 csum, magic, len;
++ __be32 *buf;
++ int i, j, k, err = 0, size;
++
++ if (tg3_flag(tp, NO_NVRAM))
++ return 0;
++
++ if (tg3_nvram_read(tp, 0, &magic) != 0)
++ return -EIO;
++
++ if (magic == TG3_EEPROM_MAGIC)
++ size = NVRAM_TEST_SIZE;
++ else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
++ if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
++ TG3_EEPROM_SB_FORMAT_1) {
++ switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
++ case TG3_EEPROM_SB_REVISION_0:
++ size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
++ break;
++ case TG3_EEPROM_SB_REVISION_2:
++ size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
++ break;
++ case TG3_EEPROM_SB_REVISION_3:
++ size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
++ break;
++ case TG3_EEPROM_SB_REVISION_4:
++ size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
++ break;
++ case TG3_EEPROM_SB_REVISION_5:
++ size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
++ break;
++ case TG3_EEPROM_SB_REVISION_6:
++ size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
++ break;
++ default:
++ return -EIO;
++ }
++ } else
++ return 0;
++ } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
++ size = NVRAM_SELFBOOT_HW_SIZE;
++ else
++ return -EIO;
++
++ buf = kmalloc(size, GFP_KERNEL);
++ if (buf == NULL)
++ return -ENOMEM;
++
++ err = -EIO;
++ for (i = 0, j = 0; i < size; i += 4, j++) {
++ err = tg3_nvram_read_be32(tp, i, &buf[j]);
++ if (err)
++ break;
++ }
++ if (i < size)
++ goto out;
++
++ /* Selfboot format */
++ magic = be32_to_cpu(buf[0]);
++ if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
++ TG3_EEPROM_MAGIC_FW) {
++ u8 *buf8 = (u8 *) buf, csum8 = 0;
++
++ if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
++ TG3_EEPROM_SB_REVISION_2) {
++ /* For rev 2, the csum doesn't include the MBA. */
++ for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
++ csum8 += buf8[i];
++ for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
++ csum8 += buf8[i];
++ } else {
++ for (i = 0; i < size; i++)
++ csum8 += buf8[i];
++ }
++
++ if (csum8 == 0) {
++ err = 0;
++ goto out;
++ }
++
++ err = -EIO;
++ goto out;
++ }
++
++ if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
++ TG3_EEPROM_MAGIC_HW) {
++ u8 data[NVRAM_SELFBOOT_DATA_SIZE];
++ u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
++ u8 *buf8 = (u8 *) buf;
++
++ /* Separate the parity bits and the data bytes. */
++ for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
++ if ((i == 0) || (i == 8)) {
++ int l;
++ u8 msk;
++
++ for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
++ parity[k++] = buf8[i] & msk;
++ i++;
++ } else if (i == 16) {
++ int l;
++ u8 msk;
++
++ for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
++ parity[k++] = buf8[i] & msk;
++ i++;
++
++ for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
++ parity[k++] = buf8[i] & msk;
++ i++;
++ }
++ data[j++] = buf8[i];
++ }
++
++ err = -EIO;
++ for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
++ u8 hw8 = hweight8(data[i]);
++
++ if ((hw8 & 0x1) && parity[i])
++ goto out;
++ else if (!(hw8 & 0x1) && !parity[i])
++ goto out;
++ }
++ err = 0;
++ goto out;
++ }
++
++ err = -EIO;
++
++ /* Bootstrap checksum at offset 0x10 */
++ csum = calc_crc((unsigned char *) buf, 0x10);
++ if (csum != le32_to_cpu(buf[0x10/4]))
++ goto out;
++
++ /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
++ csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
++ if (csum != le32_to_cpu(buf[0xfc/4]))
++ goto out;
++
++ kfree(buf);
++
++ buf = tg3_vpd_readblock(tp, &len);
++ if (!buf)
++ return -ENOMEM;
++
++ i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
++ if (i > 0) {
++ j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
++ if (j < 0)
++ goto out;
++
++ if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
++ goto out;
++
++ i += PCI_VPD_LRDT_TAG_SIZE;
++ j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
++ PCI_VPD_RO_KEYWORD_CHKSUM);
++ if (j > 0) {
++ u8 csum8 = 0;
++
++ j += PCI_VPD_INFO_FLD_HDR_SIZE;
++
++ for (i = 0; i <= j; i++)
++ csum8 += ((u8 *)buf)[i];
++
++ if (csum8)
++ goto out;
++ }
++ }
++
++ err = 0;
++
++out:
++ kfree(buf);
++ return err;
++}
++
++#define TG3_SERDES_TIMEOUT_SEC 2
++#define TG3_COPPER_TIMEOUT_SEC 6
++
++static int tg3_test_link(struct tg3 *tp)
++{
++ int i, max;
++
++ if (!netif_running(tp->dev))
++ return -ENODEV;
++
++ if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
++ max = TG3_SERDES_TIMEOUT_SEC;
++ else
++ max = TG3_COPPER_TIMEOUT_SEC;
++
++ for (i = 0; i < max; i++) {
++ if (tp->link_up)
++ return 0;
++
++ if (msleep_interruptible(1000))
++ break;
++ }
++
++ return -EIO;
++}
++
++/* Only test the commonly used registers */
++static int tg3_test_registers(struct tg3 *tp)
++{
++ int i, is_5705, is_5750;
++ u32 offset, read_mask, write_mask, val, save_val, read_val;
++ static struct {
++ u16 offset;
++ u16 flags;
++#define TG3_FL_5705 0x1
++#define TG3_FL_NOT_5705 0x2
++#define TG3_FL_NOT_5788 0x4
++#define TG3_FL_NOT_5750 0x8
++ u32 read_mask;
++ u32 write_mask;
++ } reg_tbl[] = {
++ /* MAC Control Registers */
++ { MAC_MODE, TG3_FL_NOT_5705,
++ 0x00000000, 0x00ef6f8c },
++ { MAC_MODE, TG3_FL_5705,
++ 0x00000000, 0x01ef6b8c },
++ { MAC_STATUS, TG3_FL_NOT_5705,
++ 0x03800107, 0x00000000 },
++ { MAC_STATUS, TG3_FL_5705,
++ 0x03800100, 0x00000000 },
++ { MAC_ADDR_0_HIGH, 0x0000,
++ 0x00000000, 0x0000ffff },
++ { MAC_ADDR_0_LOW, 0x0000,
++ 0x00000000, 0xffffffff },
++ { MAC_RX_MTU_SIZE, 0x0000,
++ 0x00000000, 0x0000ffff },
++ { MAC_TX_MODE, 0x0000,
++ 0x00000000, 0x00000070 },
++ { MAC_TX_LENGTHS, 0x0000,
++ 0x00000000, 0x00003fff },
++ { MAC_RX_MODE, TG3_FL_NOT_5705,
++ 0x00000000, 0x000007fc },
++ { MAC_RX_MODE, TG3_FL_5705,
++ 0x00000000, 0x000007dc },
++ { MAC_HASH_REG_0, 0x0000,
++ 0x00000000, 0xffffffff },
++ { MAC_HASH_REG_1, 0x0000,
++ 0x00000000, 0xffffffff },
++ { MAC_HASH_REG_2, 0x0000,
++ 0x00000000, 0xffffffff },
++ { MAC_HASH_REG_3, 0x0000,
++ 0x00000000, 0xffffffff },
++
++ /* Receive Data and Receive BD Initiator Control Registers. */
++ { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
++ 0x00000000, 0xffffffff },
++ { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
++ 0x00000000, 0xffffffff },
++ { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
++ 0x00000000, 0x00000003 },
++ { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
++ 0x00000000, 0xffffffff },
++ { RCVDBDI_STD_BD+0, 0x0000,
++ 0x00000000, 0xffffffff },
++ { RCVDBDI_STD_BD+4, 0x0000,
++ 0x00000000, 0xffffffff },
++ { RCVDBDI_STD_BD+8, 0x0000,
++ 0x00000000, 0xffff0002 },
++ { RCVDBDI_STD_BD+0xc, 0x0000,
++ 0x00000000, 0xffffffff },
++
++ /* Receive BD Initiator Control Registers. */
++ { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
++ 0x00000000, 0xffffffff },
++ { RCVBDI_STD_THRESH, TG3_FL_5705,
++ 0x00000000, 0x000003ff },
++ { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
++ 0x00000000, 0xffffffff },
++
++ /* Host Coalescing Control Registers. */
++ { HOSTCC_MODE, TG3_FL_NOT_5705,
++ 0x00000000, 0x00000004 },
++ { HOSTCC_MODE, TG3_FL_5705,
++ 0x00000000, 0x000000f6 },
++ { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
++ 0x00000000, 0xffffffff },
++ { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
++ 0x00000000, 0x000003ff },
++ { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
++ 0x00000000, 0xffffffff },
++ { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
++ 0x00000000, 0x000003ff },
++ { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
++ 0x00000000, 0xffffffff },
++ { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
++ 0x00000000, 0x000000ff },
++ { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
++ 0x00000000, 0xffffffff },
++ { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
++ 0x00000000, 0x000000ff },
++ { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
++ 0x00000000, 0xffffffff },
++ { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
++ 0x00000000, 0xffffffff },
++ { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
++ 0x00000000, 0xffffffff },
++ { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
++ 0x00000000, 0x000000ff },
++ { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
++ 0x00000000, 0xffffffff },
++ { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
++ 0x00000000, 0x000000ff },
++ { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
++ 0x00000000, 0xffffffff },
++ { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
++ 0x00000000, 0xffffffff },
++ { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
++ 0x00000000, 0xffffffff },
++ { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
++ 0x00000000, 0xffffffff },
++ { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
++ 0x00000000, 0xffffffff },
++ { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
++ 0xffffffff, 0x00000000 },
++ { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
++ 0xffffffff, 0x00000000 },
++
++ /* Buffer Manager Control Registers. */
++ { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
++ 0x00000000, 0x007fff80 },
++ { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
++ 0x00000000, 0x007fffff },
++ { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
++ 0x00000000, 0x0000003f },
++ { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
++ 0x00000000, 0x000001ff },
++ { BUFMGR_MB_HIGH_WATER, 0x0000,
++ 0x00000000, 0x000001ff },
++ { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
++ 0xffffffff, 0x00000000 },
++ { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
++ 0xffffffff, 0x00000000 },
++
++ /* Mailbox Registers */
++ { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
++ 0x00000000, 0x000001ff },
++ { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
++ 0x00000000, 0x000001ff },
++ { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
++ 0x00000000, 0x000007ff },
++ { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
++ 0x00000000, 0x000001ff },
++
++ { 0xffff, 0x0000, 0x00000000, 0x00000000 },
++ };
++
++ is_5705 = is_5750 = 0;
++ if (tg3_flag(tp, 5705_PLUS)) {
++ is_5705 = 1;
++ if (tg3_flag(tp, 5750_PLUS))
++ is_5750 = 1;
++ }
++
++ for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
++ if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
++ continue;
++
++ if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
++ continue;
++
++ if (tg3_flag(tp, IS_5788) &&
++ (reg_tbl[i].flags & TG3_FL_NOT_5788))
++ continue;
++
++ if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
++ continue;
++
++ offset = (u32) reg_tbl[i].offset;
++ read_mask = reg_tbl[i].read_mask;
++ write_mask = reg_tbl[i].write_mask;
++
++ /* Save the original register content */
++ save_val = tr32(offset);
++
++ /* Determine the read-only value. */
++ read_val = save_val & read_mask;
++
++ /* Write zero to the register, then make sure the read-only bits
++ * are not changed and the read/write bits are all zeros.
++ */
++ tw32(offset, 0);
++
++ val = tr32(offset);
++
++ /* Test the read-only and read/write bits. */
++ if (((val & read_mask) != read_val) || (val & write_mask))
++ goto out;
++
++ /* Write ones to all the bits defined by RdMask and WrMask, then
++ * make sure the read-only bits are not changed and the
++ * read/write bits are all ones.
++ */
++ tw32(offset, read_mask | write_mask);
++
++ val = tr32(offset);
++
++ /* Test the read-only bits. */
++ if ((val & read_mask) != read_val)
++ goto out;
++
++ /* Test the read/write bits. */
++ if ((val & write_mask) != write_mask)
++ goto out;
++
++ tw32(offset, save_val);
++ }
++
++ return 0;
++
++out:
++ if (netif_msg_hw(tp))
++ netdev_err(tp->dev,
++ "Register test failed at offset %x\n", offset);
++ tw32(offset, save_val);
++ return -EIO;
++}
++
++static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
++{
++ static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
++ int i;
++ u32 j;
++
++ for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
++ for (j = 0; j < len; j += 4) {
++ u32 val;
++
++ tg3_write_mem(tp, offset + j, test_pattern[i]);
++ tg3_read_mem(tp, offset + j, &val);
++ if (val != test_pattern[i])
++ return -EIO;
++ }
++ }
++ return 0;
++}
++
++static int tg3_test_memory(struct tg3 *tp)
++{
++ static struct mem_entry {
++ u32 offset;
++ u32 len;
++ } mem_tbl_570x[] = {
++ { 0x00000000, 0x00b50},
++ { 0x00002000, 0x1c000},
++ { 0xffffffff, 0x00000}
++ }, mem_tbl_5705[] = {
++ { 0x00000100, 0x0000c},
++ { 0x00000200, 0x00008},
++ { 0x00004000, 0x00800},
++ { 0x00006000, 0x01000},
++ { 0x00008000, 0x02000},
++ { 0x00010000, 0x0e000},
++ { 0xffffffff, 0x00000}
++ }, mem_tbl_5755[] = {
++ { 0x00000200, 0x00008},
++ { 0x00004000, 0x00800},
++ { 0x00006000, 0x00800},
++ { 0x00008000, 0x02000},
++ { 0x00010000, 0x0c000},
++ { 0xffffffff, 0x00000}
++ }, mem_tbl_5906[] = {
++ { 0x00000200, 0x00008},
++ { 0x00004000, 0x00400},
++ { 0x00006000, 0x00400},
++ { 0x00008000, 0x01000},
++ { 0x00010000, 0x01000},
++ { 0xffffffff, 0x00000}
++ }, mem_tbl_5717[] = {
++ { 0x00000200, 0x00008},
++ { 0x00010000, 0x0a000},
++ { 0x00020000, 0x13c00},
++ { 0xffffffff, 0x00000}
++ }, mem_tbl_57765[] = {
++ { 0x00000200, 0x00008},
++ { 0x00004000, 0x00800},
++ { 0x00006000, 0x09800},
++ { 0x00010000, 0x0a000},
++ { 0xffffffff, 0x00000}
++ };
++ struct mem_entry *mem_tbl;
++ int err = 0;
++ int i;
++
++ if (tg3_flag(tp, 5717_PLUS))
++ mem_tbl = mem_tbl_5717;
++ else if (tg3_flag(tp, 57765_CLASS) ||
++ tg3_asic_rev(tp) == ASIC_REV_5762)
++ mem_tbl = mem_tbl_57765;
++ else if (tg3_flag(tp, 5755_PLUS))
++ mem_tbl = mem_tbl_5755;
++ else if (tg3_asic_rev(tp) == ASIC_REV_5906)
++ mem_tbl = mem_tbl_5906;
++ else if (tg3_flag(tp, 5705_PLUS))
++ mem_tbl = mem_tbl_5705;
++ else
++ mem_tbl = mem_tbl_570x;
++
++ for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
++ err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
++ if (err)
++ break;
++ }
++
++ return err;
++}
++
++#define TG3_TSO_MSS 500
++
++#define TG3_TSO_IP_HDR_LEN 20
++#define TG3_TSO_TCP_HDR_LEN 20
++#define TG3_TSO_TCP_OPT_LEN 12
++
++static const u8 tg3_tso_header[] = {
++0x08, 0x00,
++0x45, 0x00, 0x00, 0x00,
++0x00, 0x00, 0x40, 0x00,
++0x40, 0x06, 0x00, 0x00,
++0x0a, 0x00, 0x00, 0x01,
++0x0a, 0x00, 0x00, 0x02,
++0x0d, 0x00, 0xe0, 0x00,
++0x00, 0x00, 0x01, 0x00,
++0x00, 0x00, 0x02, 0x00,
++0x80, 0x10, 0x10, 0x00,
++0x14, 0x09, 0x00, 0x00,
++0x01, 0x01, 0x08, 0x0a,
++0x11, 0x11, 0x11, 0x11,
++0x11, 0x11, 0x11, 0x11,
++};
++
++static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
++{
++ u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
++ u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
++ u32 budget;
++ struct sk_buff *skb;
++ u8 *tx_data, *rx_data;
++ dma_addr_t map;
++ int num_pkts, tx_len, rx_len, i, err;
++ struct tg3_rx_buffer_desc *desc;
++ struct tg3_napi *tnapi, *rnapi;
++ struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
++
++ tnapi = &tp->napi[0];
++ rnapi = &tp->napi[0];
++ if (tp->irq_cnt > 1) {
++ if (tg3_flag(tp, ENABLE_RSS))
++ rnapi = &tp->napi[1];
++ if (tg3_flag(tp, ENABLE_TSS))
++ tnapi = &tp->napi[1];
++ }
++ coal_now = tnapi->coal_now | rnapi->coal_now;
++
++ err = -EIO;
++
++ tx_len = pktsz;
++ skb = netdev_alloc_skb(tp->dev, tx_len);
++ if (!skb)
++ return -ENOMEM;
++
++ tx_data = skb_put(skb, tx_len);
++ memcpy(tx_data, tp->dev->dev_addr, ETH_ALEN);
++ memset(tx_data + ETH_ALEN, 0x0, 8);
++
++ tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
++
++ if (tso_loopback) {
++ struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
++
++ u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
++ TG3_TSO_TCP_OPT_LEN;
++
++ memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
++ sizeof(tg3_tso_header));
++ mss = TG3_TSO_MSS;
++
++ val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
++ num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
++
++ /* Set the total length field in the IP header */
++ iph->tot_len = htons((u16)(mss + hdr_len));
++
++ base_flags = (TXD_FLAG_CPU_PRE_DMA |
++ TXD_FLAG_CPU_POST_DMA);
++
++ if (tg3_flag(tp, HW_TSO_1) ||
++ tg3_flag(tp, HW_TSO_2) ||
++ tg3_flag(tp, HW_TSO_3)) {
++ struct tcphdr *th;
++ val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
++ th = (struct tcphdr *)&tx_data[val];
++ th->check = 0;
++ } else
++ base_flags |= TXD_FLAG_TCPUDP_CSUM;
++
++ if (tg3_flag(tp, HW_TSO_3)) {
++ mss |= (hdr_len & 0xc) << 12;
++ if (hdr_len & 0x10)
++ base_flags |= 0x00000010;
++ base_flags |= (hdr_len & 0x3e0) << 5;
++ } else if (tg3_flag(tp, HW_TSO_2))
++ mss |= hdr_len << 9;
++ else if (tg3_flag(tp, HW_TSO_1) ||
++ tg3_asic_rev(tp) == ASIC_REV_5705) {
++ mss |= (TG3_TSO_TCP_OPT_LEN << 9);
++ } else {
++ base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
++ }
++
++ data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
++ } else {
++ num_pkts = 1;
++ data_off = ETH_HLEN;
++
++ if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
++ tx_len > VLAN_ETH_FRAME_LEN)
++ base_flags |= TXD_FLAG_JMB_PKT;
++ }
++
++ for (i = data_off; i < tx_len; i++)
++ tx_data[i] = (u8) (i & 0xff);
++
++ map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
++ if (pci_dma_mapping_error(tp->pdev, map)) {
++ dev_kfree_skb(skb);
++ return -EIO;
++ }
++
++ val = tnapi->tx_prod;
++ tnapi->tx_buffers[val].skb = skb;
++ dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
++
++ tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
++ rnapi->coal_now);
++
++ udelay(10);
++
++ rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
++
++ budget = tg3_tx_avail(tnapi);
++ if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
++ base_flags | TXD_FLAG_END, mss, 0)) {
++ tnapi->tx_buffers[val].skb = NULL;
++ dev_kfree_skb(skb);
++ return -EIO;
++ }
++
++ tnapi->tx_prod++;
++
++ /* Sync BD data before updating mailbox */
++ wmb();
++
++ tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
++ tr32_mailbox(tnapi->prodmbox);
++
++ udelay(10);
++
++ /* 350 usec to allow enough time on some 10/100 Mbps devices. */
++ for (i = 0; i < 35; i++) {
++ tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
++ coal_now);
++
++ udelay(10);
++
++ tx_idx = tnapi->hw_status->idx[0].tx_consumer;
++ rx_idx = rnapi->hw_status->idx[0].rx_producer;
++ if ((tx_idx == tnapi->tx_prod) &&
++ (rx_idx == (rx_start_idx + num_pkts)))
++ break;
++ }
++
++ tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
++ dev_kfree_skb(skb);
++
++ if (tx_idx != tnapi->tx_prod)
++ goto out;
++
++ if (rx_idx != rx_start_idx + num_pkts)
++ goto out;
++
++ val = data_off;
++ while (rx_idx != rx_start_idx) {
++ desc = &rnapi->rx_rcb[rx_start_idx++];
++ desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
++ opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
++
++ if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
++ (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
++ goto out;
++
++ rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
++ - ETH_FCS_LEN;
++
++ if (!tso_loopback) {
++ if (rx_len != tx_len)
++ goto out;
++
++ if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
++ if (opaque_key != RXD_OPAQUE_RING_STD)
++ goto out;
++ } else {
++ if (opaque_key != RXD_OPAQUE_RING_JUMBO)
++ goto out;
++ }
++ } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
++ (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
++ >> RXD_TCPCSUM_SHIFT != 0xffff) {
++ goto out;
++ }
++
++ if (opaque_key == RXD_OPAQUE_RING_STD) {
++ rx_data = tpr->rx_std_buffers[desc_idx].data;
++ map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
++ mapping);
++ } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
++ rx_data = tpr->rx_jmb_buffers[desc_idx].data;
++ map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
++ mapping);
++ } else
++ goto out;
++
++ pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
++ PCI_DMA_FROMDEVICE);
++
++ rx_data += TG3_RX_OFFSET(tp);
++ for (i = data_off; i < rx_len; i++, val++) {
++ if (*(rx_data + i) != (u8) (val & 0xff))
++ goto out;
++ }
++ }
++
++ err = 0;
++
++ /* tg3_free_rings will unmap and free the rx_data */
++out:
++ return err;
++}
++
++#define TG3_STD_LOOPBACK_FAILED 1
++#define TG3_JMB_LOOPBACK_FAILED 2
++#define TG3_TSO_LOOPBACK_FAILED 4
++#define TG3_LOOPBACK_FAILED \
++ (TG3_STD_LOOPBACK_FAILED | \
++ TG3_JMB_LOOPBACK_FAILED | \
++ TG3_TSO_LOOPBACK_FAILED)
++
++static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
++{
++ int err = -EIO;
++ u32 eee_cap;
++ u32 jmb_pkt_sz = 9000;
++
++ if (tp->dma_limit)
++ jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
++
++ eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
++ tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
++
++ if (!netif_running(tp->dev)) {
++ data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
++ data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
++ if (do_extlpbk)
++ data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
++ goto done;
++ }
++
++ err = tg3_reset_hw(tp, true);
++ if (err) {
++ data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
++ data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
++ if (do_extlpbk)
++ data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
++ goto done;
++ }
++
++ if (tg3_flag(tp, ENABLE_RSS)) {
++ int i;
++
++ /* Reroute all rx packets to the 1st queue */
++ for (i = MAC_RSS_INDIR_TBL_0;
++ i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
++ tw32(i, 0x0);
++ }
++
++ /* HW errata - mac loopback fails in some cases on 5780.
++ * Normal traffic and PHY loopback are not affected by
++ * errata. Also, the MAC loopback test is deprecated for
++ * all newer ASIC revisions.
++ */
++ if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
++ !tg3_flag(tp, CPMU_PRESENT)) {
++ tg3_mac_loopback(tp, true);
++
++ if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
++ data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
++
++ if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
++ tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
++ data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
++
++ tg3_mac_loopback(tp, false);
++ }
++
++ if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
++ !tg3_flag(tp, USE_PHYLIB)) {
++ int i;
++
++ tg3_phy_lpbk_set(tp, 0, false);
++
++ /* Wait for link */
++ for (i = 0; i < 100; i++) {
++ if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
++ break;
++ mdelay(1);
++ }
++
++ if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
++ data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
++ if (tg3_flag(tp, TSO_CAPABLE) &&
++ tg3_run_loopback(tp, ETH_FRAME_LEN, true))
++ data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
++ if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
++ tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
++ data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
++
++ if (do_extlpbk) {
++ tg3_phy_lpbk_set(tp, 0, true);
++
++ /* All link indications report up, but the hardware
++ * isn't really ready for about 20 msec. Double it
++ * to be sure.
++ */
++ mdelay(40);
++
++ if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
++ data[TG3_EXT_LOOPB_TEST] |=
++ TG3_STD_LOOPBACK_FAILED;
++ if (tg3_flag(tp, TSO_CAPABLE) &&
++ tg3_run_loopback(tp, ETH_FRAME_LEN, true))
++ data[TG3_EXT_LOOPB_TEST] |=
++ TG3_TSO_LOOPBACK_FAILED;
++ if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
++ tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
++ data[TG3_EXT_LOOPB_TEST] |=
++ TG3_JMB_LOOPBACK_FAILED;
++ }
++
++ /* Re-enable gphy autopowerdown. */
++ if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
++ tg3_phy_toggle_apd(tp, true);
++ }
++
++ err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
++ data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
++
++done:
++ tp->phy_flags |= eee_cap;
++
++ return err;
++}
++
++static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
++ u64 *data)
++{
++ struct tg3 *tp = netdev_priv(dev);
++ bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
++
++ if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
++ if (tg3_power_up(tp)) {
++ etest->flags |= ETH_TEST_FL_FAILED;
++ memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
++ return;
++ }
++ tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
++ }
++
++ memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
++
++ if (tg3_test_nvram(tp) != 0) {
++ etest->flags |= ETH_TEST_FL_FAILED;
++ data[TG3_NVRAM_TEST] = 1;
++ }
++ if (!doextlpbk && tg3_test_link(tp)) {
++ etest->flags |= ETH_TEST_FL_FAILED;
++ data[TG3_LINK_TEST] = 1;
++ }
++ if (etest->flags & ETH_TEST_FL_OFFLINE) {
++ int err, err2 = 0, irq_sync = 0;
++
++ if (netif_running(dev)) {
++ tg3_phy_stop(tp);
++ tg3_netif_stop(tp);
++ irq_sync = 1;
++ }
++
++ tg3_full_lock(tp, irq_sync);
++ tg3_halt(tp, RESET_KIND_SUSPEND, 1);
++ err = tg3_nvram_lock(tp);
++ tg3_halt_cpu(tp, RX_CPU_BASE);
++ if (!tg3_flag(tp, 5705_PLUS))
++ tg3_halt_cpu(tp, TX_CPU_BASE);
++ if (!err)
++ tg3_nvram_unlock(tp);
++
++ if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
++ tg3_phy_reset(tp);
++
++ if (tg3_test_registers(tp) != 0) {
++ etest->flags |= ETH_TEST_FL_FAILED;
++ data[TG3_REGISTER_TEST] = 1;
++ }
++
++ if (tg3_test_memory(tp) != 0) {
++ etest->flags |= ETH_TEST_FL_FAILED;
++ data[TG3_MEMORY_TEST] = 1;
++ }
++
++ if (doextlpbk)
++ etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
++
++ if (tg3_test_loopback(tp, data, doextlpbk))
++ etest->flags |= ETH_TEST_FL_FAILED;
++
++ tg3_full_unlock(tp);
++
++ if (tg3_test_interrupt(tp) != 0) {
++ etest->flags |= ETH_TEST_FL_FAILED;
++ data[TG3_INTERRUPT_TEST] = 1;
++ }
++
++ tg3_full_lock(tp, 0);
++
++ tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
++ if (netif_running(dev)) {
++ tg3_flag_set(tp, INIT_COMPLETE);
++ err2 = tg3_restart_hw(tp, true);
++ if (!err2)
++ tg3_netif_start(tp);
++ }
++
++ tg3_full_unlock(tp);
++
++ if (irq_sync && !err2)
++ tg3_phy_start(tp);
++ }
++ if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
++ tg3_power_down_prepare(tp);
++
++}
++
++static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
++{
++ struct tg3 *tp = netdev_priv(dev);
++ struct hwtstamp_config stmpconf;
++
++ if (!tg3_flag(tp, PTP_CAPABLE))
++ return -EOPNOTSUPP;
++
++ if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
++ return -EFAULT;
++
++ if (stmpconf.flags)
++ return -EINVAL;
++
++ if (stmpconf.tx_type != HWTSTAMP_TX_ON &&
++ stmpconf.tx_type != HWTSTAMP_TX_OFF)
++ return -ERANGE;
++
++ switch (stmpconf.rx_filter) {
++ case HWTSTAMP_FILTER_NONE:
++ tp->rxptpctl = 0;
++ break;
++ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
++ tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
++ TG3_RX_PTP_CTL_ALL_V1_EVENTS;
++ break;
++ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
++ tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
++ TG3_RX_PTP_CTL_SYNC_EVNT;
++ break;
++ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
++ tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
++ TG3_RX_PTP_CTL_DELAY_REQ;
++ break;
++ case HWTSTAMP_FILTER_PTP_V2_EVENT:
++ tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
++ TG3_RX_PTP_CTL_ALL_V2_EVENTS;
++ break;
++ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
++ tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
++ TG3_RX_PTP_CTL_ALL_V2_EVENTS;
++ break;
++ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
++ tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
++ TG3_RX_PTP_CTL_ALL_V2_EVENTS;
++ break;
++ case HWTSTAMP_FILTER_PTP_V2_SYNC:
++ tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
++ TG3_RX_PTP_CTL_SYNC_EVNT;
++ break;
++ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
++ tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
++ TG3_RX_PTP_CTL_SYNC_EVNT;
++ break;
++ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
++ tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
++ TG3_RX_PTP_CTL_SYNC_EVNT;
++ break;
++ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
++ tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
++ TG3_RX_PTP_CTL_DELAY_REQ;
++ break;
++ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
++ tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
++ TG3_RX_PTP_CTL_DELAY_REQ;
++ break;
++ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
++ tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
++ TG3_RX_PTP_CTL_DELAY_REQ;
++ break;
++ default:
++ return -ERANGE;
++ }
++
++ if (netif_running(dev) && tp->rxptpctl)
++ tw32(TG3_RX_PTP_CTL,
++ tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
++
++ if (stmpconf.tx_type == HWTSTAMP_TX_ON)
++ tg3_flag_set(tp, TX_TSTAMP_EN);
++ else
++ tg3_flag_clear(tp, TX_TSTAMP_EN);
++
++ return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
++ -EFAULT : 0;
++}
++
++static int tg3_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
++{
++ struct tg3 *tp = netdev_priv(dev);
++ struct hwtstamp_config stmpconf;
++
++ if (!tg3_flag(tp, PTP_CAPABLE))
++ return -EOPNOTSUPP;
++
++ stmpconf.flags = 0;
++ stmpconf.tx_type = (tg3_flag(tp, TX_TSTAMP_EN) ?
++ HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF);
++
++ switch (tp->rxptpctl) {
++ case 0:
++ stmpconf.rx_filter = HWTSTAMP_FILTER_NONE;
++ break;
++ case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_ALL_V1_EVENTS:
++ stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
++ break;
++ case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
++ stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
++ break;
++ case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_DELAY_REQ:
++ stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
++ break;
++ case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
++ stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
++ break;
++ case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
++ stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
++ break;
++ case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
++ stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
++ break;
++ case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
++ stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
++ break;
++ case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
++ stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_SYNC;
++ break;
++ case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
++ stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
++ break;
++ case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
++ stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
++ break;
++ case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
++ stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ;
++ break;
++ case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_DELAY_REQ:
++ stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
++ break;
++ default:
++ WARN_ON_ONCE(1);
++ return -ERANGE;
++ }
++
++ return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
++ -EFAULT : 0;
++}
++
++static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
++{
++ struct mii_ioctl_data *data = if_mii(ifr);
++ struct tg3 *tp = netdev_priv(dev);
++ int err;
++
++ if (tg3_flag(tp, USE_PHYLIB)) {
++ struct phy_device *phydev;
++ if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
++ return -EAGAIN;
++ phydev = tp->mdio_bus->phy_map[tp->phy_addr];
++ return phy_mii_ioctl(phydev, ifr, cmd);
++ }
++
++ switch (cmd) {
++ case SIOCGMIIPHY:
++ data->phy_id = tp->phy_addr;
++
++ /* fallthru */
++ case SIOCGMIIREG: {
++ u32 mii_regval;
++
++ if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
++ break; /* We have no PHY */
++
++ if (!netif_running(dev))
++ return -EAGAIN;
++
++ spin_lock_bh(&tp->lock);
++ err = __tg3_readphy(tp, data->phy_id & 0x1f,
++ data->reg_num & 0x1f, &mii_regval);
++ spin_unlock_bh(&tp->lock);
++
++ data->val_out = mii_regval;
++
++ return err;
++ }
++
++ case SIOCSMIIREG:
++ if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
++ break; /* We have no PHY */
++
++ if (!netif_running(dev))
++ return -EAGAIN;
++
++ spin_lock_bh(&tp->lock);
++ err = __tg3_writephy(tp, data->phy_id & 0x1f,
++ data->reg_num & 0x1f, data->val_in);
++ spin_unlock_bh(&tp->lock);
++
++ return err;
++
++ case SIOCSHWTSTAMP:
++ return tg3_hwtstamp_set(dev, ifr);
++
++ case SIOCGHWTSTAMP:
++ return tg3_hwtstamp_get(dev, ifr);
++
++ default:
++ /* do nothing */
++ break;
++ }
++ return -EOPNOTSUPP;
++}
++
++static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
++{
++ struct tg3 *tp = netdev_priv(dev);
++
++ memcpy(ec, &tp->coal, sizeof(*ec));
++ return 0;
++}
++
++static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
++{
++ struct tg3 *tp = netdev_priv(dev);
++ u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
++ u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
++
++ if (!tg3_flag(tp, 5705_PLUS)) {
++ max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
++ max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
++ max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
++ min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
++ }
++
++ if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
++ (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
++ (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
++ (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
++ (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
++ (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
++ (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
++ (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
++ (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
++ (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
++ return -EINVAL;
++
++ /* No rx interrupts will be generated if both are zero */
++ if ((ec->rx_coalesce_usecs == 0) &&
++ (ec->rx_max_coalesced_frames == 0))
++ return -EINVAL;
++
++ /* No tx interrupts will be generated if both are zero */
++ if ((ec->tx_coalesce_usecs == 0) &&
++ (ec->tx_max_coalesced_frames == 0))
++ return -EINVAL;
++
++ /* Only copy relevant parameters, ignore all others. */
++ tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
++ tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
++ tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
++ tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
++ tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
++ tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
++ tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
++ tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
++ tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
++
++ if (netif_running(dev)) {
++ tg3_full_lock(tp, 0);
++ __tg3_set_coalesce(tp, &tp->coal);
++ tg3_full_unlock(tp);
++ }
++ return 0;
++}
++
++static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata)
++{
++ struct tg3 *tp = netdev_priv(dev);
++
++ if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
++ netdev_warn(tp->dev, "Board does not support EEE!\n");
++ return -EOPNOTSUPP;
++ }
++
++ if (edata->advertised != tp->eee.advertised) {
++ netdev_warn(tp->dev,
++ "Direct manipulation of EEE advertisement is not supported\n");
++ return -EINVAL;
++ }
++
++ if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) {
++ netdev_warn(tp->dev,
++ "Maximal Tx Lpi timer supported is %#x(u)\n",
++ TG3_CPMU_DBTMR1_LNKIDLE_MAX);
++ return -EINVAL;
++ }
++
++ tp->eee = *edata;
++
++ tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
++ tg3_warn_mgmt_link_flap(tp);
++
++ if (netif_running(tp->dev)) {
++ tg3_full_lock(tp, 0);
++ tg3_setup_eee(tp);
++ tg3_phy_reset(tp);
++ tg3_full_unlock(tp);
++ }
++
++ return 0;
++}
++
++static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata)
++{
++ struct tg3 *tp = netdev_priv(dev);
++
++ if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
++ netdev_warn(tp->dev,
++ "Board does not support EEE!\n");
++ return -EOPNOTSUPP;
++ }
++
++ *edata = tp->eee;
++ return 0;
++}
++
++static const struct ethtool_ops tg3_ethtool_ops = {
++ .get_settings = tg3_get_settings,
++ .set_settings = tg3_set_settings,
++ .get_drvinfo = tg3_get_drvinfo,
++ .get_regs_len = tg3_get_regs_len,
++ .get_regs = tg3_get_regs,
++ .get_wol = tg3_get_wol,
++ .set_wol = tg3_set_wol,
++ .get_msglevel = tg3_get_msglevel,
++ .set_msglevel = tg3_set_msglevel,
++ .nway_reset = tg3_nway_reset,
++ .get_link = ethtool_op_get_link,
++ .get_eeprom_len = tg3_get_eeprom_len,
++ .get_eeprom = tg3_get_eeprom,
++ .set_eeprom = tg3_set_eeprom,
++ .get_ringparam = tg3_get_ringparam,
++ .set_ringparam = tg3_set_ringparam,
++ .get_pauseparam = tg3_get_pauseparam,
++ .set_pauseparam = tg3_set_pauseparam,
++ .self_test = tg3_self_test,
++ .get_strings = tg3_get_strings,
++ .set_phys_id = tg3_set_phys_id,
++ .get_ethtool_stats = tg3_get_ethtool_stats,
++ .get_coalesce = tg3_get_coalesce,
++ .set_coalesce = tg3_set_coalesce,
++ .get_sset_count = tg3_get_sset_count,
++ .get_rxnfc = tg3_get_rxnfc,
++ .get_rxfh_indir_size = tg3_get_rxfh_indir_size,
++ .get_rxfh_indir = tg3_get_rxfh_indir,
++ .set_rxfh_indir = tg3_set_rxfh_indir,
++ .get_channels = tg3_get_channels,
++ .set_channels = tg3_set_channels,
++ .get_ts_info = tg3_get_ts_info,
++ .get_eee = tg3_get_eee,
++ .set_eee = tg3_set_eee,
++};
++
++static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
++ struct rtnl_link_stats64 *stats)
++{
++ struct tg3 *tp = netdev_priv(dev);
++
++ spin_lock_bh(&tp->lock);
++ if (!tp->hw_stats) {
++ spin_unlock_bh(&tp->lock);
++ return &tp->net_stats_prev;
++ }
++
++ tg3_get_nstats(tp, stats);
++ spin_unlock_bh(&tp->lock);
++
++ return stats;
++}
++
++static void tg3_set_rx_mode(struct net_device *dev)
++{
++ struct tg3 *tp = netdev_priv(dev);
++
++ if (!netif_running(dev))
++ return;
++
++ tg3_full_lock(tp, 0);
++ __tg3_set_rx_mode(dev);
++ tg3_full_unlock(tp);
++}
++
++static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
++ int new_mtu)
++{
++ dev->mtu = new_mtu;
++
++ if (new_mtu > ETH_DATA_LEN) {
++ if (tg3_flag(tp, 5780_CLASS)) {
++ netdev_update_features(dev);
++ tg3_flag_clear(tp, TSO_CAPABLE);
++ } else {
++ tg3_flag_set(tp, JUMBO_RING_ENABLE);
++ }
++ } else {
++ if (tg3_flag(tp, 5780_CLASS)) {
++ tg3_flag_set(tp, TSO_CAPABLE);
++ netdev_update_features(dev);
++ }
++ tg3_flag_clear(tp, JUMBO_RING_ENABLE);
++ }
++}
++
++static int tg3_change_mtu(struct net_device *dev, int new_mtu)
++{
++ struct tg3 *tp = netdev_priv(dev);
++ int err;
++ bool reset_phy = false;
++
++ if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
++ return -EINVAL;
++
++ if (!netif_running(dev)) {
++ /* We'll just catch it later when the
++ * device is up'd.
++ */
++ tg3_set_mtu(dev, tp, new_mtu);
++ return 0;
++ }
++
++ tg3_phy_stop(tp);
++
++ tg3_netif_stop(tp);
++
++ tg3_set_mtu(dev, tp, new_mtu);
++
++ tg3_full_lock(tp, 1);
++
++ tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
++
++ /* Reset PHY, otherwise the read DMA engine will be in a mode that
++ * breaks all requests to 256 bytes.
++ */
++ if (tg3_asic_rev(tp) == ASIC_REV_57766)
++ reset_phy = true;
++
++ err = tg3_restart_hw(tp, reset_phy);
++
++ if (!err)
++ tg3_netif_start(tp);
++
++ tg3_full_unlock(tp);
++
++ if (!err)
++ tg3_phy_start(tp);
++
++ return err;
++}
++
++static const struct net_device_ops tg3_netdev_ops = {
++ .ndo_open = tg3_open,
++ .ndo_stop = tg3_close,
++ .ndo_start_xmit = tg3_start_xmit,
++ .ndo_get_stats64 = tg3_get_stats64,
++ .ndo_validate_addr = eth_validate_addr,
++ .ndo_set_rx_mode = tg3_set_rx_mode,
++ .ndo_set_mac_address = tg3_set_mac_addr,
++ .ndo_do_ioctl = tg3_ioctl,
++ .ndo_tx_timeout = tg3_tx_timeout,
++ .ndo_change_mtu = tg3_change_mtu,
++ .ndo_fix_features = tg3_fix_features,
++ .ndo_set_features = tg3_set_features,
++#ifdef CONFIG_NET_POLL_CONTROLLER
++ .ndo_poll_controller = tg3_poll_controller,
++#endif
++};
++
++static void tg3_get_eeprom_size(struct tg3 *tp)
++{
++ u32 cursize, val, magic;
++
++ tp->nvram_size = EEPROM_CHIP_SIZE;
++
++ if (tg3_nvram_read(tp, 0, &magic) != 0)
++ return;
++
++ if ((magic != TG3_EEPROM_MAGIC) &&
++ ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
++ ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
++ return;
++
++ /*
++ * Size the chip by reading offsets at increasing powers of two.
++ * When we encounter our validation signature, we know the addressing
++ * has wrapped around, and thus have our chip size.
++ */
++ cursize = 0x10;
++
++ while (cursize < tp->nvram_size) {
++ if (tg3_nvram_read(tp, cursize, &val) != 0)
++ return;
++
++ if (val == magic)
++ break;
++
++ cursize <<= 1;
++ }
++
++ tp->nvram_size = cursize;
++}
++
++static void tg3_get_nvram_size(struct tg3 *tp)
++{
++ u32 val;
++
++ if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
++ return;
++
++ /* Selfboot format */
++ if (val != TG3_EEPROM_MAGIC) {
++ tg3_get_eeprom_size(tp);
++ return;
++ }
++
++ if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
++ if (val != 0) {
++ /* This is confusing. We want to operate on the
++ * 16-bit value at offset 0xf2. The tg3_nvram_read()
++ * call will read from NVRAM and byteswap the data
++ * according to the byteswapping settings for all
++ * other register accesses. This ensures the data we
++ * want will always reside in the lower 16-bits.
++ * However, the data in NVRAM is in LE format, which
++ * means the data from the NVRAM read will always be
++ * opposite the endianness of the CPU. The 16-bit
++ * byteswap then brings the data to CPU endianness.
++ */
++ tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
++ return;
++ }
++ }
++ tp->nvram_size = TG3_NVRAM_SIZE_512KB;
++}
++
++static void tg3_get_nvram_info(struct tg3 *tp)
++{
++ u32 nvcfg1;
++
++ nvcfg1 = tr32(NVRAM_CFG1);
++ if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
++ tg3_flag_set(tp, FLASH);
++ } else {
++ nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
++ tw32(NVRAM_CFG1, nvcfg1);
++ }
++
++ if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
++ tg3_flag(tp, 5780_CLASS)) {
++ switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
++ case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
++ tp->nvram_jedecnum = JEDEC_ATMEL;
++ tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
++ tg3_flag_set(tp, NVRAM_BUFFERED);
++ break;
++ case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
++ tp->nvram_jedecnum = JEDEC_ATMEL;
++ tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
++ break;
++ case FLASH_VENDOR_ATMEL_EEPROM:
++ tp->nvram_jedecnum = JEDEC_ATMEL;
++ tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
++ tg3_flag_set(tp, NVRAM_BUFFERED);
++ break;
++ case FLASH_VENDOR_ST:
++ tp->nvram_jedecnum = JEDEC_ST;
++ tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
++ tg3_flag_set(tp, NVRAM_BUFFERED);
++ break;
++ case FLASH_VENDOR_SAIFUN:
++ tp->nvram_jedecnum = JEDEC_SAIFUN;
++ tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
++ break;
++ case FLASH_VENDOR_SST_SMALL:
++ case FLASH_VENDOR_SST_LARGE:
++ tp->nvram_jedecnum = JEDEC_SST;
++ tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
++ break;
++ }
++ } else {
++ tp->nvram_jedecnum = JEDEC_ATMEL;
++ tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
++ tg3_flag_set(tp, NVRAM_BUFFERED);
++ }
++}
++
++static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
++{
++ switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
++ case FLASH_5752PAGE_SIZE_256:
++ tp->nvram_pagesize = 256;
++ break;
++ case FLASH_5752PAGE_SIZE_512:
++ tp->nvram_pagesize = 512;
++ break;
++ case FLASH_5752PAGE_SIZE_1K:
++ tp->nvram_pagesize = 1024;
++ break;
++ case FLASH_5752PAGE_SIZE_2K:
++ tp->nvram_pagesize = 2048;
++ break;
++ case FLASH_5752PAGE_SIZE_4K:
++ tp->nvram_pagesize = 4096;
++ break;
++ case FLASH_5752PAGE_SIZE_264:
++ tp->nvram_pagesize = 264;
++ break;
++ case FLASH_5752PAGE_SIZE_528:
++ tp->nvram_pagesize = 528;
++ break;
++ }
++}
++
++static void tg3_get_5752_nvram_info(struct tg3 *tp)
++{
++ u32 nvcfg1;
++
++ nvcfg1 = tr32(NVRAM_CFG1);
++
++ /* NVRAM protection for TPM */
++ if (nvcfg1 & (1 << 27))
++ tg3_flag_set(tp, PROTECTED_NVRAM);
++
++ switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
++ case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
++ case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
++ tp->nvram_jedecnum = JEDEC_ATMEL;
++ tg3_flag_set(tp, NVRAM_BUFFERED);
++ break;
++ case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
++ tp->nvram_jedecnum = JEDEC_ATMEL;
++ tg3_flag_set(tp, NVRAM_BUFFERED);
++ tg3_flag_set(tp, FLASH);
++ break;
++ case FLASH_5752VENDOR_ST_M45PE10:
++ case FLASH_5752VENDOR_ST_M45PE20:
++ case FLASH_5752VENDOR_ST_M45PE40:
++ tp->nvram_jedecnum = JEDEC_ST;
++ tg3_flag_set(tp, NVRAM_BUFFERED);
++ tg3_flag_set(tp, FLASH);
++ break;
++ }
++
++ if (tg3_flag(tp, FLASH)) {
++ tg3_nvram_get_pagesize(tp, nvcfg1);
++ } else {
++ /* For eeprom, set pagesize to maximum eeprom size */
++ tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
++
++ nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
++ tw32(NVRAM_CFG1, nvcfg1);
++ }
++}
++
++static void tg3_get_5755_nvram_info(struct tg3 *tp)
++{
++ u32 nvcfg1, protect = 0;
++
++ nvcfg1 = tr32(NVRAM_CFG1);
++
++ /* NVRAM protection for TPM */
++ if (nvcfg1 & (1 << 27)) {
++ tg3_flag_set(tp, PROTECTED_NVRAM);
++ protect = 1;
++ }
++
++ nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
++ switch (nvcfg1) {
++ case FLASH_5755VENDOR_ATMEL_FLASH_1:
++ case FLASH_5755VENDOR_ATMEL_FLASH_2:
++ case FLASH_5755VENDOR_ATMEL_FLASH_3:
++ case FLASH_5755VENDOR_ATMEL_FLASH_5:
++ tp->nvram_jedecnum = JEDEC_ATMEL;
++ tg3_flag_set(tp, NVRAM_BUFFERED);
++ tg3_flag_set(tp, FLASH);
++ tp->nvram_pagesize = 264;
++ if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
++ nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
++ tp->nvram_size = (protect ? 0x3e200 :
++ TG3_NVRAM_SIZE_512KB);
++ else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
++ tp->nvram_size = (protect ? 0x1f200 :
++ TG3_NVRAM_SIZE_256KB);
++ else
++ tp->nvram_size = (protect ? 0x1f200 :
++ TG3_NVRAM_SIZE_128KB);
++ break;
++ case FLASH_5752VENDOR_ST_M45PE10:
++ case FLASH_5752VENDOR_ST_M45PE20:
++ case FLASH_5752VENDOR_ST_M45PE40:
++ tp->nvram_jedecnum = JEDEC_ST;
++ tg3_flag_set(tp, NVRAM_BUFFERED);
++ tg3_flag_set(tp, FLASH);
++ tp->nvram_pagesize = 256;
++ if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
++ tp->nvram_size = (protect ?
++ TG3_NVRAM_SIZE_64KB :
++ TG3_NVRAM_SIZE_128KB);
++ else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
++ tp->nvram_size = (protect ?
++ TG3_NVRAM_SIZE_64KB :
++ TG3_NVRAM_SIZE_256KB);
++ else
++ tp->nvram_size = (protect ?
++ TG3_NVRAM_SIZE_128KB :
++ TG3_NVRAM_SIZE_512KB);
++ break;
++ }
++}
++
++static void tg3_get_5787_nvram_info(struct tg3 *tp)
++{
++ u32 nvcfg1;
++
++ nvcfg1 = tr32(NVRAM_CFG1);
++
++ switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
++ case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
++ case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
++ case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
++ case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
++ tp->nvram_jedecnum = JEDEC_ATMEL;
++ tg3_flag_set(tp, NVRAM_BUFFERED);
++ tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
++
++ nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
++ tw32(NVRAM_CFG1, nvcfg1);
++ break;
++ case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
++ case FLASH_5755VENDOR_ATMEL_FLASH_1:
++ case FLASH_5755VENDOR_ATMEL_FLASH_2:
++ case FLASH_5755VENDOR_ATMEL_FLASH_3:
++ tp->nvram_jedecnum = JEDEC_ATMEL;
++ tg3_flag_set(tp, NVRAM_BUFFERED);
++ tg3_flag_set(tp, FLASH);
++ tp->nvram_pagesize = 264;
++ break;
++ case FLASH_5752VENDOR_ST_M45PE10:
++ case FLASH_5752VENDOR_ST_M45PE20:
++ case FLASH_5752VENDOR_ST_M45PE40:
++ tp->nvram_jedecnum = JEDEC_ST;
++ tg3_flag_set(tp, NVRAM_BUFFERED);
++ tg3_flag_set(tp, FLASH);
++ tp->nvram_pagesize = 256;
++ break;
++ }
++}
++
++static void tg3_get_5761_nvram_info(struct tg3 *tp)
++{
++ u32 nvcfg1, protect = 0;
++
++ nvcfg1 = tr32(NVRAM_CFG1);
++
++ /* NVRAM protection for TPM */
++ if (nvcfg1 & (1 << 27)) {
++ tg3_flag_set(tp, PROTECTED_NVRAM);
++ protect = 1;
++ }
++
++ nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
++ switch (nvcfg1) {
++ case FLASH_5761VENDOR_ATMEL_ADB021D:
++ case FLASH_5761VENDOR_ATMEL_ADB041D:
++ case FLASH_5761VENDOR_ATMEL_ADB081D:
++ case FLASH_5761VENDOR_ATMEL_ADB161D:
++ case FLASH_5761VENDOR_ATMEL_MDB021D:
++ case FLASH_5761VENDOR_ATMEL_MDB041D:
++ case FLASH_5761VENDOR_ATMEL_MDB081D:
++ case FLASH_5761VENDOR_ATMEL_MDB161D:
++ tp->nvram_jedecnum = JEDEC_ATMEL;
++ tg3_flag_set(tp, NVRAM_BUFFERED);
++ tg3_flag_set(tp, FLASH);
++ tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
++ tp->nvram_pagesize = 256;
++ break;
++ case FLASH_5761VENDOR_ST_A_M45PE20:
++ case FLASH_5761VENDOR_ST_A_M45PE40:
++ case FLASH_5761VENDOR_ST_A_M45PE80:
++ case FLASH_5761VENDOR_ST_A_M45PE16:
++ case FLASH_5761VENDOR_ST_M_M45PE20:
++ case FLASH_5761VENDOR_ST_M_M45PE40:
++ case FLASH_5761VENDOR_ST_M_M45PE80:
++ case FLASH_5761VENDOR_ST_M_M45PE16:
++ tp->nvram_jedecnum = JEDEC_ST;
++ tg3_flag_set(tp, NVRAM_BUFFERED);
++ tg3_flag_set(tp, FLASH);
++ tp->nvram_pagesize = 256;
++ break;
++ }
++
++ if (protect) {
++ tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
++ } else {
++ switch (nvcfg1) {
++ case FLASH_5761VENDOR_ATMEL_ADB161D:
++ case FLASH_5761VENDOR_ATMEL_MDB161D:
++ case FLASH_5761VENDOR_ST_A_M45PE16:
++ case FLASH_5761VENDOR_ST_M_M45PE16:
++ tp->nvram_size = TG3_NVRAM_SIZE_2MB;
++ break;
++ case FLASH_5761VENDOR_ATMEL_ADB081D:
++ case FLASH_5761VENDOR_ATMEL_MDB081D:
++ case FLASH_5761VENDOR_ST_A_M45PE80:
++ case FLASH_5761VENDOR_ST_M_M45PE80:
++ tp->nvram_size = TG3_NVRAM_SIZE_1MB;
++ break;
++ case FLASH_5761VENDOR_ATMEL_ADB041D:
++ case FLASH_5761VENDOR_ATMEL_MDB041D:
++ case FLASH_5761VENDOR_ST_A_M45PE40:
++ case FLASH_5761VENDOR_ST_M_M45PE40:
++ tp->nvram_size = TG3_NVRAM_SIZE_512KB;
++ break;
++ case FLASH_5761VENDOR_ATMEL_ADB021D:
++ case FLASH_5761VENDOR_ATMEL_MDB021D:
++ case FLASH_5761VENDOR_ST_A_M45PE20:
++ case FLASH_5761VENDOR_ST_M_M45PE20:
++ tp->nvram_size = TG3_NVRAM_SIZE_256KB;
++ break;
++ }
++ }
++}
++
++static void tg3_get_5906_nvram_info(struct tg3 *tp)
++{
++ tp->nvram_jedecnum = JEDEC_ATMEL;
++ tg3_flag_set(tp, NVRAM_BUFFERED);
++ tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
++}
++
++static void tg3_get_57780_nvram_info(struct tg3 *tp)
++{
++ u32 nvcfg1;
++
++ nvcfg1 = tr32(NVRAM_CFG1);
++
++ switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
++ case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
++ case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
++ tp->nvram_jedecnum = JEDEC_ATMEL;
++ tg3_flag_set(tp, NVRAM_BUFFERED);
++ tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
++
++ nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
++ tw32(NVRAM_CFG1, nvcfg1);
++ return;
++ case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
++ case FLASH_57780VENDOR_ATMEL_AT45DB011D:
++ case FLASH_57780VENDOR_ATMEL_AT45DB011B:
++ case FLASH_57780VENDOR_ATMEL_AT45DB021D:
++ case FLASH_57780VENDOR_ATMEL_AT45DB021B:
++ case FLASH_57780VENDOR_ATMEL_AT45DB041D:
++ case FLASH_57780VENDOR_ATMEL_AT45DB041B:
++ tp->nvram_jedecnum = JEDEC_ATMEL;
++ tg3_flag_set(tp, NVRAM_BUFFERED);
++ tg3_flag_set(tp, FLASH);
++
++ switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
++ case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
++ case FLASH_57780VENDOR_ATMEL_AT45DB011D:
++ case FLASH_57780VENDOR_ATMEL_AT45DB011B:
++ tp->nvram_size = TG3_NVRAM_SIZE_128KB;
++ break;
++ case FLASH_57780VENDOR_ATMEL_AT45DB021D:
++ case FLASH_57780VENDOR_ATMEL_AT45DB021B:
++ tp->nvram_size = TG3_NVRAM_SIZE_256KB;
++ break;
++ case FLASH_57780VENDOR_ATMEL_AT45DB041D:
++ case FLASH_57780VENDOR_ATMEL_AT45DB041B:
++ tp->nvram_size = TG3_NVRAM_SIZE_512KB;
++ break;
++ }
++ break;
++ case FLASH_5752VENDOR_ST_M45PE10:
++ case FLASH_5752VENDOR_ST_M45PE20:
++ case FLASH_5752VENDOR_ST_M45PE40:
++ tp->nvram_jedecnum = JEDEC_ST;
++ tg3_flag_set(tp, NVRAM_BUFFERED);
++ tg3_flag_set(tp, FLASH);
++
++ switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
++ case FLASH_5752VENDOR_ST_M45PE10:
++ tp->nvram_size = TG3_NVRAM_SIZE_128KB;
++ break;
++ case FLASH_5752VENDOR_ST_M45PE20:
++ tp->nvram_size = TG3_NVRAM_SIZE_256KB;
++ break;
++ case FLASH_5752VENDOR_ST_M45PE40:
++ tp->nvram_size = TG3_NVRAM_SIZE_512KB;
++ break;
++ }
++ break;
++ default:
++ tg3_flag_set(tp, NO_NVRAM);
++ return;
++ }
++
++ tg3_nvram_get_pagesize(tp, nvcfg1);
++ if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
++ tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
++}
++
++
++static void tg3_get_5717_nvram_info(struct tg3 *tp)
++{
++ u32 nvcfg1;
++
++ nvcfg1 = tr32(NVRAM_CFG1);
++
++ switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
++ case FLASH_5717VENDOR_ATMEL_EEPROM:
++ case FLASH_5717VENDOR_MICRO_EEPROM:
++ tp->nvram_jedecnum = JEDEC_ATMEL;
++ tg3_flag_set(tp, NVRAM_BUFFERED);
++ tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
++
++ nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
++ tw32(NVRAM_CFG1, nvcfg1);
++ return;
++ case FLASH_5717VENDOR_ATMEL_MDB011D:
++ case FLASH_5717VENDOR_ATMEL_ADB011B:
++ case FLASH_5717VENDOR_ATMEL_ADB011D:
++ case FLASH_5717VENDOR_ATMEL_MDB021D:
++ case FLASH_5717VENDOR_ATMEL_ADB021B:
++ case FLASH_5717VENDOR_ATMEL_ADB021D:
++ case FLASH_5717VENDOR_ATMEL_45USPT:
++ tp->nvram_jedecnum = JEDEC_ATMEL;
++ tg3_flag_set(tp, NVRAM_BUFFERED);
++ tg3_flag_set(tp, FLASH);
++
++ switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
++ case FLASH_5717VENDOR_ATMEL_MDB021D:
++ /* Detect size with tg3_nvram_get_size() */
++ break;
++ case FLASH_5717VENDOR_ATMEL_ADB021B:
++ case FLASH_5717VENDOR_ATMEL_ADB021D:
++ tp->nvram_size = TG3_NVRAM_SIZE_256KB;
++ break;
++ default:
++ tp->nvram_size = TG3_NVRAM_SIZE_128KB;
++ break;
++ }
++ break;
++ case FLASH_5717VENDOR_ST_M_M25PE10:
++ case FLASH_5717VENDOR_ST_A_M25PE10:
++ case FLASH_5717VENDOR_ST_M_M45PE10:
++ case FLASH_5717VENDOR_ST_A_M45PE10:
++ case FLASH_5717VENDOR_ST_M_M25PE20:
++ case FLASH_5717VENDOR_ST_A_M25PE20:
++ case FLASH_5717VENDOR_ST_M_M45PE20:
++ case FLASH_5717VENDOR_ST_A_M45PE20:
++ case FLASH_5717VENDOR_ST_25USPT:
++ case FLASH_5717VENDOR_ST_45USPT:
++ tp->nvram_jedecnum = JEDEC_ST;
++ tg3_flag_set(tp, NVRAM_BUFFERED);
++ tg3_flag_set(tp, FLASH);
++
++ switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
++ case FLASH_5717VENDOR_ST_M_M25PE20:
++ case FLASH_5717VENDOR_ST_M_M45PE20:
++ /* Detect size with tg3_nvram_get_size() */
++ break;
++ case FLASH_5717VENDOR_ST_A_M25PE20:
++ case FLASH_5717VENDOR_ST_A_M45PE20:
++ tp->nvram_size = TG3_NVRAM_SIZE_256KB;
++ break;
++ default:
++ tp->nvram_size = TG3_NVRAM_SIZE_128KB;
++ break;
++ }
++ break;
++ default:
++ tg3_flag_set(tp, NO_NVRAM);
++ return;
++ }
++
++ tg3_nvram_get_pagesize(tp, nvcfg1);
++ if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
++ tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
++}
++
++static void tg3_get_5720_nvram_info(struct tg3 *tp)
++{
++ u32 nvcfg1, nvmpinstrp;
++
++ nvcfg1 = tr32(NVRAM_CFG1);
++ nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
++
++ if (tg3_asic_rev(tp) == ASIC_REV_5762) {
++ if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
++ tg3_flag_set(tp, NO_NVRAM);
++ return;
++ }
++
++ switch (nvmpinstrp) {
++ case FLASH_5762_EEPROM_HD:
++ nvmpinstrp = FLASH_5720_EEPROM_HD;
++ break;
++ case FLASH_5762_EEPROM_LD:
++ nvmpinstrp = FLASH_5720_EEPROM_LD;
++ break;
++ case FLASH_5720VENDOR_M_ST_M45PE20:
++ /* This pinstrap supports multiple sizes, so force it
++ * to read the actual size from location 0xf0.
++ */
++ nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
++ break;
++ }
++ }
++
++ switch (nvmpinstrp) {
++ case FLASH_5720_EEPROM_HD:
++ case FLASH_5720_EEPROM_LD:
++ tp->nvram_jedecnum = JEDEC_ATMEL;
++ tg3_flag_set(tp, NVRAM_BUFFERED);
++
++ nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
++ tw32(NVRAM_CFG1, nvcfg1);
++ if (nvmpinstrp == FLASH_5720_EEPROM_HD)
++ tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
++ else
++ tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
++ return;
++ case FLASH_5720VENDOR_M_ATMEL_DB011D:
++ case FLASH_5720VENDOR_A_ATMEL_DB011B:
++ case FLASH_5720VENDOR_A_ATMEL_DB011D:
++ case FLASH_5720VENDOR_M_ATMEL_DB021D:
++ case FLASH_5720VENDOR_A_ATMEL_DB021B:
++ case FLASH_5720VENDOR_A_ATMEL_DB021D:
++ case FLASH_5720VENDOR_M_ATMEL_DB041D:
++ case FLASH_5720VENDOR_A_ATMEL_DB041B:
++ case FLASH_5720VENDOR_A_ATMEL_DB041D:
++ case FLASH_5720VENDOR_M_ATMEL_DB081D:
++ case FLASH_5720VENDOR_A_ATMEL_DB081D:
++ case FLASH_5720VENDOR_ATMEL_45USPT:
++ tp->nvram_jedecnum = JEDEC_ATMEL;
++ tg3_flag_set(tp, NVRAM_BUFFERED);
++ tg3_flag_set(tp, FLASH);
++
++ switch (nvmpinstrp) {
++ case FLASH_5720VENDOR_M_ATMEL_DB021D:
++ case FLASH_5720VENDOR_A_ATMEL_DB021B:
++ case FLASH_5720VENDOR_A_ATMEL_DB021D:
++ tp->nvram_size = TG3_NVRAM_SIZE_256KB;
++ break;
++ case FLASH_5720VENDOR_M_ATMEL_DB041D:
++ case FLASH_5720VENDOR_A_ATMEL_DB041B:
++ case FLASH_5720VENDOR_A_ATMEL_DB041D:
++ tp->nvram_size = TG3_NVRAM_SIZE_512KB;
++ break;
++ case FLASH_5720VENDOR_M_ATMEL_DB081D:
++ case FLASH_5720VENDOR_A_ATMEL_DB081D:
++ tp->nvram_size = TG3_NVRAM_SIZE_1MB;
++ break;
++ default:
++ if (tg3_asic_rev(tp) != ASIC_REV_5762)
++ tp->nvram_size = TG3_NVRAM_SIZE_128KB;
++ break;
++ }
++ break;
++ case FLASH_5720VENDOR_M_ST_M25PE10:
++ case FLASH_5720VENDOR_M_ST_M45PE10:
++ case FLASH_5720VENDOR_A_ST_M25PE10:
++ case FLASH_5720VENDOR_A_ST_M45PE10:
++ case FLASH_5720VENDOR_M_ST_M25PE20:
++ case FLASH_5720VENDOR_M_ST_M45PE20:
++ case FLASH_5720VENDOR_A_ST_M25PE20:
++ case FLASH_5720VENDOR_A_ST_M45PE20:
++ case FLASH_5720VENDOR_M_ST_M25PE40:
++ case FLASH_5720VENDOR_M_ST_M45PE40:
++ case FLASH_5720VENDOR_A_ST_M25PE40:
++ case FLASH_5720VENDOR_A_ST_M45PE40:
++ case FLASH_5720VENDOR_M_ST_M25PE80:
++ case FLASH_5720VENDOR_M_ST_M45PE80:
++ case FLASH_5720VENDOR_A_ST_M25PE80:
++ case FLASH_5720VENDOR_A_ST_M45PE80:
++ case FLASH_5720VENDOR_ST_25USPT:
++ case FLASH_5720VENDOR_ST_45USPT:
++ tp->nvram_jedecnum = JEDEC_ST;
++ tg3_flag_set(tp, NVRAM_BUFFERED);
++ tg3_flag_set(tp, FLASH);
++
++ switch (nvmpinstrp) {
++ case FLASH_5720VENDOR_M_ST_M25PE20:
++ case FLASH_5720VENDOR_M_ST_M45PE20:
++ case FLASH_5720VENDOR_A_ST_M25PE20:
++ case FLASH_5720VENDOR_A_ST_M45PE20:
++ tp->nvram_size = TG3_NVRAM_SIZE_256KB;
++ break;
++ case FLASH_5720VENDOR_M_ST_M25PE40:
++ case FLASH_5720VENDOR_M_ST_M45PE40:
++ case FLASH_5720VENDOR_A_ST_M25PE40:
++ case FLASH_5720VENDOR_A_ST_M45PE40:
++ tp->nvram_size = TG3_NVRAM_SIZE_512KB;
++ break;
++ case FLASH_5720VENDOR_M_ST_M25PE80:
++ case FLASH_5720VENDOR_M_ST_M45PE80:
++ case FLASH_5720VENDOR_A_ST_M25PE80:
++ case FLASH_5720VENDOR_A_ST_M45PE80:
++ tp->nvram_size = TG3_NVRAM_SIZE_1MB;
++ break;
++ default:
++ if (tg3_asic_rev(tp) != ASIC_REV_5762)
++ tp->nvram_size = TG3_NVRAM_SIZE_128KB;
++ break;
++ }
++ break;
++ default:
++ tg3_flag_set(tp, NO_NVRAM);
++ return;
++ }
++
++ tg3_nvram_get_pagesize(tp, nvcfg1);
++ if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
++ tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
++
++ if (tg3_asic_rev(tp) == ASIC_REV_5762) {
++ u32 val;
++
++ if (tg3_nvram_read(tp, 0, &val))
++ return;
++
++ if (val != TG3_EEPROM_MAGIC &&
++ (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
++ tg3_flag_set(tp, NO_NVRAM);
++ }
++}
++
++/* Chips other than 5700/5701 use the NVRAM for fetching info. */
++static void tg3_nvram_init(struct tg3 *tp)
++{
++ if (tg3_flag(tp, IS_SSB_CORE)) {
++ /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
++ tg3_flag_clear(tp, NVRAM);
++ tg3_flag_clear(tp, NVRAM_BUFFERED);
++ tg3_flag_set(tp, NO_NVRAM);
++ return;
++ }
++
++ tw32_f(GRC_EEPROM_ADDR,
++ (EEPROM_ADDR_FSM_RESET |
++ (EEPROM_DEFAULT_CLOCK_PERIOD <<
++ EEPROM_ADDR_CLKPERD_SHIFT)));
++
++ msleep(1);
++
++ /* Enable seeprom accesses. */
++ tw32_f(GRC_LOCAL_CTRL,
++ tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
++ udelay(100);
++
++ if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
++ tg3_asic_rev(tp) != ASIC_REV_5701) {
++ tg3_flag_set(tp, NVRAM);
++
++ if (tg3_nvram_lock(tp)) {
++ netdev_warn(tp->dev,
++ "Cannot get nvram lock, %s failed\n",
++ __func__);
++ return;
++ }
++ tg3_enable_nvram_access(tp);
++
++ tp->nvram_size = 0;
++
++ if (tg3_asic_rev(tp) == ASIC_REV_5752)
++ tg3_get_5752_nvram_info(tp);
++ else if (tg3_asic_rev(tp) == ASIC_REV_5755)
++ tg3_get_5755_nvram_info(tp);
++ else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
++ tg3_asic_rev(tp) == ASIC_REV_5784 ||
++ tg3_asic_rev(tp) == ASIC_REV_5785)
++ tg3_get_5787_nvram_info(tp);
++ else if (tg3_asic_rev(tp) == ASIC_REV_5761)
++ tg3_get_5761_nvram_info(tp);
++ else if (tg3_asic_rev(tp) == ASIC_REV_5906)
++ tg3_get_5906_nvram_info(tp);
++ else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
++ tg3_flag(tp, 57765_CLASS))
++ tg3_get_57780_nvram_info(tp);
++ else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
++ tg3_asic_rev(tp) == ASIC_REV_5719)
++ tg3_get_5717_nvram_info(tp);
++ else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
++ tg3_asic_rev(tp) == ASIC_REV_5762)
++ tg3_get_5720_nvram_info(tp);
++ else
++ tg3_get_nvram_info(tp);
++
++ if (tp->nvram_size == 0)
++ tg3_get_nvram_size(tp);
++
++ tg3_disable_nvram_access(tp);
++ tg3_nvram_unlock(tp);
++
++ } else {
++ tg3_flag_clear(tp, NVRAM);
++ tg3_flag_clear(tp, NVRAM_BUFFERED);
++
++ tg3_get_eeprom_size(tp);
++ }
++}
++
++struct subsys_tbl_ent {
++ u16 subsys_vendor, subsys_devid;
++ u32 phy_id;
++};
++
++static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
++ /* Broadcom boards. */
++ { TG3PCI_SUBVENDOR_ID_BROADCOM,
++ TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
++ { TG3PCI_SUBVENDOR_ID_BROADCOM,
++ TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
++ { TG3PCI_SUBVENDOR_ID_BROADCOM,
++ TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
++ { TG3PCI_SUBVENDOR_ID_BROADCOM,
++ TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
++ { TG3PCI_SUBVENDOR_ID_BROADCOM,
++ TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
++ { TG3PCI_SUBVENDOR_ID_BROADCOM,
++ TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
++ { TG3PCI_SUBVENDOR_ID_BROADCOM,
++ TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
++ { TG3PCI_SUBVENDOR_ID_BROADCOM,
++ TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
++ { TG3PCI_SUBVENDOR_ID_BROADCOM,
++ TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
++ { TG3PCI_SUBVENDOR_ID_BROADCOM,
++ TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
++ { TG3PCI_SUBVENDOR_ID_BROADCOM,
++ TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
++
++ /* 3com boards. */
++ { TG3PCI_SUBVENDOR_ID_3COM,
++ TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
++ { TG3PCI_SUBVENDOR_ID_3COM,
++ TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
++ { TG3PCI_SUBVENDOR_ID_3COM,
++ TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
++ { TG3PCI_SUBVENDOR_ID_3COM,
++ TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
++ { TG3PCI_SUBVENDOR_ID_3COM,
++ TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
++
++ /* DELL boards. */
++ { TG3PCI_SUBVENDOR_ID_DELL,
++ TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
++ { TG3PCI_SUBVENDOR_ID_DELL,
++ TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
++ { TG3PCI_SUBVENDOR_ID_DELL,
++ TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
++ { TG3PCI_SUBVENDOR_ID_DELL,
++ TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
++
++ /* Compaq boards. */
++ { TG3PCI_SUBVENDOR_ID_COMPAQ,
++ TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
++ { TG3PCI_SUBVENDOR_ID_COMPAQ,
++ TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
++ { TG3PCI_SUBVENDOR_ID_COMPAQ,
++ TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
++ { TG3PCI_SUBVENDOR_ID_COMPAQ,
++ TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
++ { TG3PCI_SUBVENDOR_ID_COMPAQ,
++ TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
++
++ /* IBM boards. */
++ { TG3PCI_SUBVENDOR_ID_IBM,
++ TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
++};
++
++static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
++{
++ int i;
++
++ for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
++ if ((subsys_id_to_phy_id[i].subsys_vendor ==
++ tp->pdev->subsystem_vendor) &&
++ (subsys_id_to_phy_id[i].subsys_devid ==
++ tp->pdev->subsystem_device))
++ return &subsys_id_to_phy_id[i];
++ }
++ return NULL;
++}
++
++static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
++{
++ u32 val;
++
++ tp->phy_id = TG3_PHY_ID_INVALID;
++ tp->led_ctrl = LED_CTRL_MODE_PHY_1;
++
++ /* Assume an onboard device and WOL capable by default. */
++ tg3_flag_set(tp, EEPROM_WRITE_PROT);
++ tg3_flag_set(tp, WOL_CAP);
++
++ if (tg3_asic_rev(tp) == ASIC_REV_5906) {
++ if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
++ tg3_flag_clear(tp, EEPROM_WRITE_PROT);
++ tg3_flag_set(tp, IS_NIC);
++ }
++ val = tr32(VCPU_CFGSHDW);
++ if (val & VCPU_CFGSHDW_ASPM_DBNC)
++ tg3_flag_set(tp, ASPM_WORKAROUND);
++ if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
++ (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
++ tg3_flag_set(tp, WOL_ENABLE);
++ device_set_wakeup_enable(&tp->pdev->dev, true);
++ }
++ goto done;
++ }
++
++ tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
++ if (val == NIC_SRAM_DATA_SIG_MAGIC) {
++ u32 nic_cfg, led_cfg;
++ u32 cfg2 = 0, cfg4 = 0, cfg5 = 0;
++ u32 nic_phy_id, ver, eeprom_phy_id;
++ int eeprom_phy_serdes = 0;
++
++ tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
++ tp->nic_sram_data_cfg = nic_cfg;
++
++ tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
++ ver >>= NIC_SRAM_DATA_VER_SHIFT;
++ if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
++ tg3_asic_rev(tp) != ASIC_REV_5701 &&
++ tg3_asic_rev(tp) != ASIC_REV_5703 &&
++ (ver > 0) && (ver < 0x100))
++ tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
++
++ if (tg3_asic_rev(tp) == ASIC_REV_5785)
++ tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
++
++ if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
++ tg3_asic_rev(tp) == ASIC_REV_5719 ||
++ tg3_asic_rev(tp) == ASIC_REV_5720)
++ tg3_read_mem(tp, NIC_SRAM_DATA_CFG_5, &cfg5);
++
++ if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
++ NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
++ eeprom_phy_serdes = 1;
++
++ tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
++ if (nic_phy_id != 0) {
++ u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
++ u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
++
++ eeprom_phy_id = (id1 >> 16) << 10;
++ eeprom_phy_id |= (id2 & 0xfc00) << 16;
++ eeprom_phy_id |= (id2 & 0x03ff) << 0;
++ } else
++ eeprom_phy_id = 0;
++
++ tp->phy_id = eeprom_phy_id;
++ if (eeprom_phy_serdes) {
++ if (!tg3_flag(tp, 5705_PLUS))
++ tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
++ else
++ tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
++ }
++
++ if (tg3_flag(tp, 5750_PLUS))
++ led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
++ SHASTA_EXT_LED_MODE_MASK);
++ else
++ led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
++
++ switch (led_cfg) {
++ default:
++ case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
++ tp->led_ctrl = LED_CTRL_MODE_PHY_1;
++ break;
++
++ case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
++ tp->led_ctrl = LED_CTRL_MODE_PHY_2;
++ break;
++
++ case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
++ tp->led_ctrl = LED_CTRL_MODE_MAC;
++
++ /* Default to PHY_1_MODE if 0 (MAC_MODE) is
++ * read on some older 5700/5701 bootcode.
++ */
++ if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
++ tg3_asic_rev(tp) == ASIC_REV_5701)
++ tp->led_ctrl = LED_CTRL_MODE_PHY_1;
++
++ break;
++
++ case SHASTA_EXT_LED_SHARED:
++ tp->led_ctrl = LED_CTRL_MODE_SHARED;
++ if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
++ tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
++ tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
++ LED_CTRL_MODE_PHY_2);
++
++ if (tg3_flag(tp, 5717_PLUS) ||
++ tg3_asic_rev(tp) == ASIC_REV_5762)
++ tp->led_ctrl |= LED_CTRL_BLINK_RATE_OVERRIDE |
++ LED_CTRL_BLINK_RATE_MASK;
++
++ break;
++
++ case SHASTA_EXT_LED_MAC:
++ tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
++ break;
++
++ case SHASTA_EXT_LED_COMBO:
++ tp->led_ctrl = LED_CTRL_MODE_COMBO;
++ if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
++ tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
++ LED_CTRL_MODE_PHY_2);
++ break;
++
++ }
++
++ if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
++ tg3_asic_rev(tp) == ASIC_REV_5701) &&
++ tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
++ tp->led_ctrl = LED_CTRL_MODE_PHY_2;
++
++ if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
++ tp->led_ctrl = LED_CTRL_MODE_PHY_1;
++
++ if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
++ tg3_flag_set(tp, EEPROM_WRITE_PROT);
++ if ((tp->pdev->subsystem_vendor ==
++ PCI_VENDOR_ID_ARIMA) &&
++ (tp->pdev->subsystem_device == 0x205a ||
++ tp->pdev->subsystem_device == 0x2063))
++ tg3_flag_clear(tp, EEPROM_WRITE_PROT);
++ } else {
++ tg3_flag_clear(tp, EEPROM_WRITE_PROT);
++ tg3_flag_set(tp, IS_NIC);
++ }
++
++ if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
++ tg3_flag_set(tp, ENABLE_ASF);
++ if (tg3_flag(tp, 5750_PLUS))
++ tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
++ }
++
++ if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
++ tg3_flag(tp, 5750_PLUS))
++ tg3_flag_set(tp, ENABLE_APE);
++
++ if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
++ !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
++ tg3_flag_clear(tp, WOL_CAP);
++
++ if (tg3_flag(tp, WOL_CAP) &&
++ (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
++ tg3_flag_set(tp, WOL_ENABLE);
++ device_set_wakeup_enable(&tp->pdev->dev, true);
++ }
++
++ if (cfg2 & (1 << 17))
++ tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
++
++ /* serdes signal pre-emphasis in register 0x590 set by */
++ /* bootcode if bit 18 is set */
++ if (cfg2 & (1 << 18))
++ tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
++
++ if ((tg3_flag(tp, 57765_PLUS) ||
++ (tg3_asic_rev(tp) == ASIC_REV_5784 &&
++ tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
++ (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
++ tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
++
++ if (tg3_flag(tp, PCI_EXPRESS)) {
++ u32 cfg3;
++
++ tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
++ if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
++ !tg3_flag(tp, 57765_PLUS) &&
++ (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
++ tg3_flag_set(tp, ASPM_WORKAROUND);
++ if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
++ tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
++ if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
++ tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
++ }
++
++ if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
++ tg3_flag_set(tp, RGMII_INBAND_DISABLE);
++ if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
++ tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
++ if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
++ tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
++
++ if (cfg5 & NIC_SRAM_DISABLE_1G_HALF_ADV)
++ tp->phy_flags |= TG3_PHYFLG_DISABLE_1G_HD_ADV;
++ }
++done:
++ if (tg3_flag(tp, WOL_CAP))
++ device_set_wakeup_enable(&tp->pdev->dev,
++ tg3_flag(tp, WOL_ENABLE));
++ else
++ device_set_wakeup_capable(&tp->pdev->dev, false);
++}
++
++static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
++{
++ int i, err;
++ u32 val2, off = offset * 8;
++
++ err = tg3_nvram_lock(tp);
++ if (err)
++ return err;
++
++ tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
++ tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
++ APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
++ tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
++ udelay(10);
++
++ for (i = 0; i < 100; i++) {
++ val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
++ if (val2 & APE_OTP_STATUS_CMD_DONE) {
++ *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
++ break;
++ }
++ udelay(10);
++ }
++
++ tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
++
++ tg3_nvram_unlock(tp);
++ if (val2 & APE_OTP_STATUS_CMD_DONE)
++ return 0;
++
++ return -EBUSY;
++}
++
++static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
++{
++ int i;
++ u32 val;
++
++ tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
++ tw32(OTP_CTRL, cmd);
++
++ /* Wait for up to 1 ms for command to execute. */
++ for (i = 0; i < 100; i++) {
++ val = tr32(OTP_STATUS);
++ if (val & OTP_STATUS_CMD_DONE)
++ break;
++ udelay(10);
++ }
++
++ return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
++}
++
++/* Read the gphy configuration from the OTP region of the chip. The gphy
++ * configuration is a 32-bit value that straddles the alignment boundary.
++ * We do two 32-bit reads and then shift and merge the results.
++ */
++static u32 tg3_read_otp_phycfg(struct tg3 *tp)
++{
++ u32 bhalf_otp, thalf_otp;
++
++ tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
++
++ if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
++ return 0;
++
++ tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
++
++ if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
++ return 0;
++
++ thalf_otp = tr32(OTP_READ_DATA);
++
++ tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
++
++ if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
++ return 0;
++
++ bhalf_otp = tr32(OTP_READ_DATA);
++
++ return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
++}
++
++static void tg3_phy_init_link_config(struct tg3 *tp)
++{
++ u32 adv = ADVERTISED_Autoneg;
++
++ if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
++ if (!(tp->phy_flags & TG3_PHYFLG_DISABLE_1G_HD_ADV))
++ adv |= ADVERTISED_1000baseT_Half;
++ adv |= ADVERTISED_1000baseT_Full;
++ }
++
++ if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
++ adv |= ADVERTISED_100baseT_Half |
++ ADVERTISED_100baseT_Full |
++ ADVERTISED_10baseT_Half |
++ ADVERTISED_10baseT_Full |
++ ADVERTISED_TP;
++ else
++ adv |= ADVERTISED_FIBRE;
++
++ tp->link_config.advertising = adv;
++ tp->link_config.speed = SPEED_UNKNOWN;
++ tp->link_config.duplex = DUPLEX_UNKNOWN;
++ tp->link_config.autoneg = AUTONEG_ENABLE;
++ tp->link_config.active_speed = SPEED_UNKNOWN;
++ tp->link_config.active_duplex = DUPLEX_UNKNOWN;
++
++ tp->old_link = -1;
++}
++
++static int tg3_phy_probe(struct tg3 *tp)
++{
++ u32 hw_phy_id_1, hw_phy_id_2;
++ u32 hw_phy_id, hw_phy_id_masked;
++ int err;
++
++ /* flow control autonegotiation is default behavior */
++ tg3_flag_set(tp, PAUSE_AUTONEG);
++ tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
++
++ if (tg3_flag(tp, ENABLE_APE)) {
++ switch (tp->pci_fn) {
++ case 0:
++ tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
++ break;
++ case 1:
++ tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
++ break;
++ case 2:
++ tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
++ break;
++ case 3:
++ tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
++ break;
++ }
++ }
++
++ if (!tg3_flag(tp, ENABLE_ASF) &&
++ !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
++ !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
++ tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
++ TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
++
++ if (tg3_flag(tp, USE_PHYLIB))
++ return tg3_phy_init(tp);
++
++ /* Reading the PHY ID register can conflict with ASF
++ * firmware access to the PHY hardware.
++ */
++ err = 0;
++ if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
++ hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
++ } else {
++ /* Now read the physical PHY_ID from the chip and verify
++ * that it is sane. If it doesn't look good, we fall back
++ * to either the hard-coded table based PHY_ID and failing
++ * that the value found in the eeprom area.
++ */
++ err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
++ err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
++
++ hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
++ hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
++ hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
++
++ hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
++ }
++
++ if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
++ tp->phy_id = hw_phy_id;
++ if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
++ tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
++ else
++ tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
++ } else {
++ if (tp->phy_id != TG3_PHY_ID_INVALID) {
++ /* Do nothing, phy ID already set up in
++ * tg3_get_eeprom_hw_cfg().
++ */
++ } else {
++ struct subsys_tbl_ent *p;
++
++ /* No eeprom signature? Try the hardcoded
++ * subsys device table.
++ */
++ p = tg3_lookup_by_subsys(tp);
++ if (p) {
++ tp->phy_id = p->phy_id;
++ } else if (!tg3_flag(tp, IS_SSB_CORE)) {
++ /* For now we saw the IDs 0xbc050cd0,
++ * 0xbc050f80 and 0xbc050c30 on devices
++ * connected to an BCM4785 and there are
++ * probably more. Just assume that the phy is
++ * supported when it is connected to a SSB core
++ * for now.
++ */
++ return -ENODEV;
++ }
++
++ if (!tp->phy_id ||
++ tp->phy_id == TG3_PHY_ID_BCM8002)
++ tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
++ }
++ }
++
++ if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
++ (tg3_asic_rev(tp) == ASIC_REV_5719 ||
++ tg3_asic_rev(tp) == ASIC_REV_5720 ||
++ tg3_asic_rev(tp) == ASIC_REV_57766 ||
++ tg3_asic_rev(tp) == ASIC_REV_5762 ||
++ (tg3_asic_rev(tp) == ASIC_REV_5717 &&
++ tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
++ (tg3_asic_rev(tp) == ASIC_REV_57765 &&
++ tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) {
++ tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
++
++ tp->eee.supported = SUPPORTED_100baseT_Full |
++ SUPPORTED_1000baseT_Full;
++ tp->eee.advertised = ADVERTISED_100baseT_Full |
++ ADVERTISED_1000baseT_Full;
++ tp->eee.eee_enabled = 1;
++ tp->eee.tx_lpi_enabled = 1;
++ tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US;
++ }
++
++ tg3_phy_init_link_config(tp);
++
++ if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
++ !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
++ !tg3_flag(tp, ENABLE_APE) &&
++ !tg3_flag(tp, ENABLE_ASF)) {
++ u32 bmsr, dummy;
++
++ tg3_readphy(tp, MII_BMSR, &bmsr);
++ if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
++ (bmsr & BMSR_LSTATUS))
++ goto skip_phy_reset;
++
++ err = tg3_phy_reset(tp);
++ if (err)
++ return err;
++
++ tg3_phy_set_wirespeed(tp);
++
++ if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
++ tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
++ tp->link_config.flowctrl);
++
++ tg3_writephy(tp, MII_BMCR,
++ BMCR_ANENABLE | BMCR_ANRESTART);
++ }
++ }
++
++skip_phy_reset:
++ if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
++ err = tg3_init_5401phy_dsp(tp);
++ if (err)
++ return err;
++
++ err = tg3_init_5401phy_dsp(tp);
++ }
++
++ return err;
++}
++
++static void tg3_read_vpd(struct tg3 *tp)
++{
++ u8 *vpd_data;
++ unsigned int block_end, rosize, len;
++ u32 vpdlen;
++ int j, i = 0;
++
++ vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
++ if (!vpd_data)
++ goto out_no_vpd;
++
++ i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
++ if (i < 0)
++ goto out_not_found;
++
++ rosize = pci_vpd_lrdt_size(&vpd_data[i]);
++ block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
++ i += PCI_VPD_LRDT_TAG_SIZE;
++
++ if (block_end > vpdlen)
++ goto out_not_found;
++
++ j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
++ PCI_VPD_RO_KEYWORD_MFR_ID);
++ if (j > 0) {
++ len = pci_vpd_info_field_size(&vpd_data[j]);
++
++ j += PCI_VPD_INFO_FLD_HDR_SIZE;
++ if (j + len > block_end || len != 4 ||
++ memcmp(&vpd_data[j], "1028", 4))
++ goto partno;
++
++ j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
++ PCI_VPD_RO_KEYWORD_VENDOR0);
++ if (j < 0)
++ goto partno;
++
++ len = pci_vpd_info_field_size(&vpd_data[j]);
++
++ j += PCI_VPD_INFO_FLD_HDR_SIZE;
++ if (j + len > block_end)
++ goto partno;
++
++ if (len >= sizeof(tp->fw_ver))
++ len = sizeof(tp->fw_ver) - 1;
++ memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
++ snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len,
++ &vpd_data[j]);
++ }
++
++partno:
++ i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
++ PCI_VPD_RO_KEYWORD_PARTNO);
++ if (i < 0)
++ goto out_not_found;
++
++ len = pci_vpd_info_field_size(&vpd_data[i]);
++
++ i += PCI_VPD_INFO_FLD_HDR_SIZE;
++ if (len > TG3_BPN_SIZE ||
++ (len + i) > vpdlen)
++ goto out_not_found;
++
++ memcpy(tp->board_part_number, &vpd_data[i], len);
++
++out_not_found:
++ kfree(vpd_data);
++ if (tp->board_part_number[0])
++ return;
++
++out_no_vpd:
++ if (tg3_asic_rev(tp) == ASIC_REV_5717) {
++ if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
++ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
++ strcpy(tp->board_part_number, "BCM5717");
++ else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
++ strcpy(tp->board_part_number, "BCM5718");
++ else
++ goto nomatch;
++ } else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
++ if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
++ strcpy(tp->board_part_number, "BCM57780");
++ else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
++ strcpy(tp->board_part_number, "BCM57760");
++ else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
++ strcpy(tp->board_part_number, "BCM57790");
++ else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
++ strcpy(tp->board_part_number, "BCM57788");
++ else
++ goto nomatch;
++ } else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
++ if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
++ strcpy(tp->board_part_number, "BCM57761");
++ else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
++ strcpy(tp->board_part_number, "BCM57765");
++ else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
++ strcpy(tp->board_part_number, "BCM57781");
++ else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
++ strcpy(tp->board_part_number, "BCM57785");
++ else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
++ strcpy(tp->board_part_number, "BCM57791");
++ else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
++ strcpy(tp->board_part_number, "BCM57795");
++ else
++ goto nomatch;
++ } else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
++ if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
++ strcpy(tp->board_part_number, "BCM57762");
++ else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
++ strcpy(tp->board_part_number, "BCM57766");
++ else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
++ strcpy(tp->board_part_number, "BCM57782");
++ else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
++ strcpy(tp->board_part_number, "BCM57786");
++ else
++ goto nomatch;
++ } else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
++ strcpy(tp->board_part_number, "BCM95906");
++ } else {
++nomatch:
++ strcpy(tp->board_part_number, "none");
++ }
++}
++
++static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
++{
++ u32 val;
++
++ if (tg3_nvram_read(tp, offset, &val) ||
++ (val & 0xfc000000) != 0x0c000000 ||
++ tg3_nvram_read(tp, offset + 4, &val) ||
++ val != 0)
++ return 0;
++
++ return 1;
++}
++
++static void tg3_read_bc_ver(struct tg3 *tp)
++{
++ u32 val, offset, start, ver_offset;
++ int i, dst_off;
++ bool newver = false;
++
++ if (tg3_nvram_read(tp, 0xc, &offset) ||
++ tg3_nvram_read(tp, 0x4, &start))
++ return;
++
++ offset = tg3_nvram_logical_addr(tp, offset);
++
++ if (tg3_nvram_read(tp, offset, &val))
++ return;
++
++ if ((val & 0xfc000000) == 0x0c000000) {
++ if (tg3_nvram_read(tp, offset + 4, &val))
++ return;
++
++ if (val == 0)
++ newver = true;
++ }
++
++ dst_off = strlen(tp->fw_ver);
++
++ if (newver) {
++ if (TG3_VER_SIZE - dst_off < 16 ||
++ tg3_nvram_read(tp, offset + 8, &ver_offset))
++ return;
++
++ offset = offset + ver_offset - start;
++ for (i = 0; i < 16; i += 4) {
++ __be32 v;
++ if (tg3_nvram_read_be32(tp, offset + i, &v))
++ return;
++
++ memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
++ }
++ } else {
++ u32 major, minor;
++
++ if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
++ return;
++
++ major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
++ TG3_NVM_BCVER_MAJSFT;
++ minor = ver_offset & TG3_NVM_BCVER_MINMSK;
++ snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
++ "v%d.%02d", major, minor);
++ }
++}
++
++static void tg3_read_hwsb_ver(struct tg3 *tp)
++{
++ u32 val, major, minor;
++
++ /* Use native endian representation */
++ if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
++ return;
++
++ major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
++ TG3_NVM_HWSB_CFG1_MAJSFT;
++ minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
++ TG3_NVM_HWSB_CFG1_MINSFT;
++
++ snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
++}
++
++static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
++{
++ u32 offset, major, minor, build;
++
++ strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
++
++ if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
++ return;
++
++ switch (val & TG3_EEPROM_SB_REVISION_MASK) {
++ case TG3_EEPROM_SB_REVISION_0:
++ offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
++ break;
++ case TG3_EEPROM_SB_REVISION_2:
++ offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
++ break;
++ case TG3_EEPROM_SB_REVISION_3:
++ offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
++ break;
++ case TG3_EEPROM_SB_REVISION_4:
++ offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
++ break;
++ case TG3_EEPROM_SB_REVISION_5:
++ offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
++ break;
++ case TG3_EEPROM_SB_REVISION_6:
++ offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
++ break;
++ default:
++ return;
++ }
++
++ if (tg3_nvram_read(tp, offset, &val))
++ return;
++
++ build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
++ TG3_EEPROM_SB_EDH_BLD_SHFT;
++ major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
++ TG3_EEPROM_SB_EDH_MAJ_SHFT;
++ minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
++
++ if (minor > 99 || build > 26)
++ return;
++
++ offset = strlen(tp->fw_ver);
++ snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
++ " v%d.%02d", major, minor);
++
++ if (build > 0) {
++ offset = strlen(tp->fw_ver);
++ if (offset < TG3_VER_SIZE - 1)
++ tp->fw_ver[offset] = 'a' + build - 1;
++ }
++}
++
++static void tg3_read_mgmtfw_ver(struct tg3 *tp)
++{
++ u32 val, offset, start;
++ int i, vlen;
++
++ for (offset = TG3_NVM_DIR_START;
++ offset < TG3_NVM_DIR_END;
++ offset += TG3_NVM_DIRENT_SIZE) {
++ if (tg3_nvram_read(tp, offset, &val))
++ return;
++
++ if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
++ break;
++ }
++
++ if (offset == TG3_NVM_DIR_END)
++ return;
++
++ if (!tg3_flag(tp, 5705_PLUS))
++ start = 0x08000000;
++ else if (tg3_nvram_read(tp, offset - 4, &start))
++ return;
++
++ if (tg3_nvram_read(tp, offset + 4, &offset) ||
++ !tg3_fw_img_is_valid(tp, offset) ||
++ tg3_nvram_read(tp, offset + 8, &val))
++ return;
++
++ offset += val - start;
++
++ vlen = strlen(tp->fw_ver);
++
++ tp->fw_ver[vlen++] = ',';
++ tp->fw_ver[vlen++] = ' ';
++
++ for (i = 0; i < 4; i++) {
++ __be32 v;
++ if (tg3_nvram_read_be32(tp, offset, &v))
++ return;
++
++ offset += sizeof(v);
++
++ if (vlen > TG3_VER_SIZE - sizeof(v)) {
++ memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
++ break;
++ }
++
++ memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
++ vlen += sizeof(v);
++ }
++}
++
++static void tg3_probe_ncsi(struct tg3 *tp)
++{
++ u32 apedata;
++
++ apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
++ if (apedata != APE_SEG_SIG_MAGIC)
++ return;
++
++ apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
++ if (!(apedata & APE_FW_STATUS_READY))
++ return;
++
++ if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
++ tg3_flag_set(tp, APE_HAS_NCSI);
++}
++
++static void tg3_read_dash_ver(struct tg3 *tp)
++{
++ int vlen;
++ u32 apedata;
++ char *fwtype;
++
++ apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
++
++ if (tg3_flag(tp, APE_HAS_NCSI))
++ fwtype = "NCSI";
++ else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
++ fwtype = "SMASH";
++ else
++ fwtype = "DASH";
++
++ vlen = strlen(tp->fw_ver);
++
++ snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
++ fwtype,
++ (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
++ (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
++ (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
++ (apedata & APE_FW_VERSION_BLDMSK));
++}
++
++static void tg3_read_otp_ver(struct tg3 *tp)
++{
++ u32 val, val2;
++
++ if (tg3_asic_rev(tp) != ASIC_REV_5762)
++ return;
++
++ if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
++ !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
++ TG3_OTP_MAGIC0_VALID(val)) {
++ u64 val64 = (u64) val << 32 | val2;
++ u32 ver = 0;
++ int i, vlen;
++
++ for (i = 0; i < 7; i++) {
++ if ((val64 & 0xff) == 0)
++ break;
++ ver = val64 & 0xff;
++ val64 >>= 8;
++ }
++ vlen = strlen(tp->fw_ver);
++ snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
++ }
++}
++
++static void tg3_read_fw_ver(struct tg3 *tp)
++{
++ u32 val;
++ bool vpd_vers = false;
++
++ if (tp->fw_ver[0] != 0)
++ vpd_vers = true;
++
++ if (tg3_flag(tp, NO_NVRAM)) {
++ strcat(tp->fw_ver, "sb");
++ tg3_read_otp_ver(tp);
++ return;
++ }
++
++ if (tg3_nvram_read(tp, 0, &val))
++ return;
++
++ if (val == TG3_EEPROM_MAGIC)
++ tg3_read_bc_ver(tp);
++ else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
++ tg3_read_sb_ver(tp, val);
++ else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
++ tg3_read_hwsb_ver(tp);
++
++ if (tg3_flag(tp, ENABLE_ASF)) {
++ if (tg3_flag(tp, ENABLE_APE)) {
++ tg3_probe_ncsi(tp);
++ if (!vpd_vers)
++ tg3_read_dash_ver(tp);
++ } else if (!vpd_vers) {
++ tg3_read_mgmtfw_ver(tp);
++ }
++ }
++
++ tp->fw_ver[TG3_VER_SIZE - 1] = 0;
++}
++
++static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
++{
++ if (tg3_flag(tp, LRG_PROD_RING_CAP))
++ return TG3_RX_RET_MAX_SIZE_5717;
++ else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
++ return TG3_RX_RET_MAX_SIZE_5700;
++ else
++ return TG3_RX_RET_MAX_SIZE_5705;
++}
++
++static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
++ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
++ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
++ { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
++ { },
++};
++
++static struct pci_dev *tg3_find_peer(struct tg3 *tp)
++{
++ struct pci_dev *peer;
++ unsigned int func, devnr = tp->pdev->devfn & ~7;
++
++ for (func = 0; func < 8; func++) {
++ peer = pci_get_slot(tp->pdev->bus, devnr | func);
++ if (peer && peer != tp->pdev)
++ break;
++ pci_dev_put(peer);
++ }
++ /* 5704 can be configured in single-port mode, set peer to
++ * tp->pdev in that case.
++ */
++ if (!peer) {
++ peer = tp->pdev;
++ return peer;
++ }
++
++ /*
++ * We don't need to keep the refcount elevated; there's no way
++ * to remove one half of this device without removing the other
++ */
++ pci_dev_put(peer);
++
++ return peer;
++}
++
++static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
++{
++ tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
++ if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
++ u32 reg;
++
++ /* All devices that use the alternate
++ * ASIC REV location have a CPMU.
++ */
++ tg3_flag_set(tp, CPMU_PRESENT);
++
++ if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
++ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
++ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
++ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
++ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
++ tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
++ tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
++ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
++ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
++ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
++ tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787)
++ reg = TG3PCI_GEN2_PRODID_ASICREV;
++ else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
++ tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
++ tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
++ tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
++ tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
++ tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
++ tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
++ tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
++ tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
++ tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
++ reg = TG3PCI_GEN15_PRODID_ASICREV;
++ else
++ reg = TG3PCI_PRODID_ASICREV;
++
++ pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
++ }
++
++ /* Wrong chip ID in 5752 A0. This code can be removed later
++ * as A0 is not in production.
++ */
++ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
++ tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
++
++ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
++ tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
++
++ if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
++ tg3_asic_rev(tp) == ASIC_REV_5719 ||
++ tg3_asic_rev(tp) == ASIC_REV_5720)
++ tg3_flag_set(tp, 5717_PLUS);
++
++ if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
++ tg3_asic_rev(tp) == ASIC_REV_57766)
++ tg3_flag_set(tp, 57765_CLASS);
++
++ if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
++ tg3_asic_rev(tp) == ASIC_REV_5762)
++ tg3_flag_set(tp, 57765_PLUS);
++
++ /* Intentionally exclude ASIC_REV_5906 */
++ if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
++ tg3_asic_rev(tp) == ASIC_REV_5787 ||
++ tg3_asic_rev(tp) == ASIC_REV_5784 ||
++ tg3_asic_rev(tp) == ASIC_REV_5761 ||
++ tg3_asic_rev(tp) == ASIC_REV_5785 ||
++ tg3_asic_rev(tp) == ASIC_REV_57780 ||
++ tg3_flag(tp, 57765_PLUS))
++ tg3_flag_set(tp, 5755_PLUS);
++
++ if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
++ tg3_asic_rev(tp) == ASIC_REV_5714)
++ tg3_flag_set(tp, 5780_CLASS);
++
++ if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
++ tg3_asic_rev(tp) == ASIC_REV_5752 ||
++ tg3_asic_rev(tp) == ASIC_REV_5906 ||
++ tg3_flag(tp, 5755_PLUS) ||
++ tg3_flag(tp, 5780_CLASS))
++ tg3_flag_set(tp, 5750_PLUS);
++
++ if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
++ tg3_flag(tp, 5750_PLUS))
++ tg3_flag_set(tp, 5705_PLUS);
++}
++
++static bool tg3_10_100_only_device(struct tg3 *tp,
++ const struct pci_device_id *ent)
++{
++ u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
++
++ if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
++ (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
++ (tp->phy_flags & TG3_PHYFLG_IS_FET))
++ return true;
++
++ if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
++ if (tg3_asic_rev(tp) == ASIC_REV_5705) {
++ if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
++ return true;
++ } else {
++ return true;
++ }
++ }
++
++ return false;
++}
++
++static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
++{
++ u32 misc_ctrl_reg;
++ u32 pci_state_reg, grc_misc_cfg;
++ u32 val;
++ u16 pci_cmd;
++ int err;
++
++ /* Force memory write invalidate off. If we leave it on,
++ * then on 5700_BX chips we have to enable a workaround.
++ * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
++ * to match the cacheline size. The Broadcom driver have this
++ * workaround but turns MWI off all the times so never uses
++ * it. This seems to suggest that the workaround is insufficient.
++ */
++ pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
++ pci_cmd &= ~PCI_COMMAND_INVALIDATE;
++ pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
++
++ /* Important! -- Make sure register accesses are byteswapped
++ * correctly. Also, for those chips that require it, make
++ * sure that indirect register accesses are enabled before
++ * the first operation.
++ */
++ pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
++ &misc_ctrl_reg);
++ tp->misc_host_ctrl |= (misc_ctrl_reg &
++ MISC_HOST_CTRL_CHIPREV);
++ pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
++ tp->misc_host_ctrl);
++
++ tg3_detect_asic_rev(tp, misc_ctrl_reg);
++
++ /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
++ * we need to disable memory and use config. cycles
++ * only to access all registers. The 5702/03 chips
++ * can mistakenly decode the special cycles from the
++ * ICH chipsets as memory write cycles, causing corruption
++ * of register and memory space. Only certain ICH bridges
++ * will drive special cycles with non-zero data during the
++ * address phase which can fall within the 5703's address
++ * range. This is not an ICH bug as the PCI spec allows
++ * non-zero address during special cycles. However, only
++ * these ICH bridges are known to drive non-zero addresses
++ * during special cycles.
++ *
++ * Since special cycles do not cross PCI bridges, we only
++ * enable this workaround if the 5703 is on the secondary
++ * bus of these ICH bridges.
++ */
++ if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
++ (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
++ static struct tg3_dev_id {
++ u32 vendor;
++ u32 device;
++ u32 rev;
++ } ich_chipsets[] = {
++ { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
++ PCI_ANY_ID },
++ { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
++ PCI_ANY_ID },
++ { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
++ 0xa },
++ { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
++ PCI_ANY_ID },
++ { },
++ };
++ struct tg3_dev_id *pci_id = &ich_chipsets[0];
++ struct pci_dev *bridge = NULL;
++
++ while (pci_id->vendor != 0) {
++ bridge = pci_get_device(pci_id->vendor, pci_id->device,
++ bridge);
++ if (!bridge) {
++ pci_id++;
++ continue;
++ }
++ if (pci_id->rev != PCI_ANY_ID) {
++ if (bridge->revision > pci_id->rev)
++ continue;
++ }
++ if (bridge->subordinate &&
++ (bridge->subordinate->number ==
++ tp->pdev->bus->number)) {
++ tg3_flag_set(tp, ICH_WORKAROUND);
++ pci_dev_put(bridge);
++ break;
++ }
++ }
++ }
++
++ if (tg3_asic_rev(tp) == ASIC_REV_5701) {
++ static struct tg3_dev_id {
++ u32 vendor;
++ u32 device;
++ } bridge_chipsets[] = {
++ { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
++ { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
++ { },
++ };
++ struct tg3_dev_id *pci_id = &bridge_chipsets[0];
++ struct pci_dev *bridge = NULL;
++
++ while (pci_id->vendor != 0) {
++ bridge = pci_get_device(pci_id->vendor,
++ pci_id->device,
++ bridge);
++ if (!bridge) {
++ pci_id++;
++ continue;
++ }
++ if (bridge->subordinate &&
++ (bridge->subordinate->number <=
++ tp->pdev->bus->number) &&
++ (bridge->subordinate->busn_res.end >=
++ tp->pdev->bus->number)) {
++ tg3_flag_set(tp, 5701_DMA_BUG);
++ pci_dev_put(bridge);
++ break;
++ }
++ }
++ }
++
++ /* The EPB bridge inside 5714, 5715, and 5780 cannot support
++ * DMA addresses > 40-bit. This bridge may have other additional
++ * 57xx devices behind it in some 4-port NIC designs for example.
++ * Any tg3 device found behind the bridge will also need the 40-bit
++ * DMA workaround.
++ */
++ if (tg3_flag(tp, 5780_CLASS)) {
++ tg3_flag_set(tp, 40BIT_DMA_BUG);
++ tp->msi_cap = tp->pdev->msi_cap;
++ } else {
++ struct pci_dev *bridge = NULL;
++
++ do {
++ bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
++ PCI_DEVICE_ID_SERVERWORKS_EPB,
++ bridge);
++ if (bridge && bridge->subordinate &&
++ (bridge->subordinate->number <=
++ tp->pdev->bus->number) &&
++ (bridge->subordinate->busn_res.end >=
++ tp->pdev->bus->number)) {
++ tg3_flag_set(tp, 40BIT_DMA_BUG);
++ pci_dev_put(bridge);
++ break;
++ }
++ } while (bridge);
++ }
++
++ if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
++ tg3_asic_rev(tp) == ASIC_REV_5714)
++ tp->pdev_peer = tg3_find_peer(tp);
++
++ /* Determine TSO capabilities */
++ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
++ ; /* Do nothing. HW bug. */
++ else if (tg3_flag(tp, 57765_PLUS))
++ tg3_flag_set(tp, HW_TSO_3);
++ else if (tg3_flag(tp, 5755_PLUS) ||
++ tg3_asic_rev(tp) == ASIC_REV_5906)
++ tg3_flag_set(tp, HW_TSO_2);
++ else if (tg3_flag(tp, 5750_PLUS)) {
++ tg3_flag_set(tp, HW_TSO_1);
++ tg3_flag_set(tp, TSO_BUG);
++ if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
++ tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
++ tg3_flag_clear(tp, TSO_BUG);
++ } else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
++ tg3_asic_rev(tp) != ASIC_REV_5701 &&
++ tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
++ tg3_flag_set(tp, FW_TSO);
++ tg3_flag_set(tp, TSO_BUG);
++ if (tg3_asic_rev(tp) == ASIC_REV_5705)
++ tp->fw_needed = FIRMWARE_TG3TSO5;
++ else
++ tp->fw_needed = FIRMWARE_TG3TSO;
++ }
++
++ /* Selectively allow TSO based on operating conditions */
++ if (tg3_flag(tp, HW_TSO_1) ||
++ tg3_flag(tp, HW_TSO_2) ||
++ tg3_flag(tp, HW_TSO_3) ||
++ tg3_flag(tp, FW_TSO)) {
++ /* For firmware TSO, assume ASF is disabled.
++ * We'll disable TSO later if we discover ASF
++ * is enabled in tg3_get_eeprom_hw_cfg().
++ */
++ tg3_flag_set(tp, TSO_CAPABLE);
++ } else {
++ tg3_flag_clear(tp, TSO_CAPABLE);
++ tg3_flag_clear(tp, TSO_BUG);
++ tp->fw_needed = NULL;
++ }
++
++ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
++ tp->fw_needed = FIRMWARE_TG3;
++
++ if (tg3_asic_rev(tp) == ASIC_REV_57766)
++ tp->fw_needed = FIRMWARE_TG357766;
++
++ tp->irq_max = 1;
++
++ if (tg3_flag(tp, 5750_PLUS)) {
++ tg3_flag_set(tp, SUPPORT_MSI);
++ if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
++ tg3_chip_rev(tp) == CHIPREV_5750_BX ||
++ (tg3_asic_rev(tp) == ASIC_REV_5714 &&
++ tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
++ tp->pdev_peer == tp->pdev))
++ tg3_flag_clear(tp, SUPPORT_MSI);
++
++ if (tg3_flag(tp, 5755_PLUS) ||
++ tg3_asic_rev(tp) == ASIC_REV_5906) {
++ tg3_flag_set(tp, 1SHOT_MSI);
++ }
++
++ if (tg3_flag(tp, 57765_PLUS)) {
++ tg3_flag_set(tp, SUPPORT_MSIX);
++ tp->irq_max = TG3_IRQ_MAX_VECS;
++ }
++ }
++
++ tp->txq_max = 1;
++ tp->rxq_max = 1;
++ if (tp->irq_max > 1) {
++ tp->rxq_max = TG3_RSS_MAX_NUM_QS;
++ tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
++
++ if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
++ tg3_asic_rev(tp) == ASIC_REV_5720)
++ tp->txq_max = tp->irq_max - 1;
++ }
++
++ if (tg3_flag(tp, 5755_PLUS) ||
++ tg3_asic_rev(tp) == ASIC_REV_5906)
++ tg3_flag_set(tp, SHORT_DMA_BUG);
++
++ if (tg3_asic_rev(tp) == ASIC_REV_5719)
++ tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
++
++ if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
++ tg3_asic_rev(tp) == ASIC_REV_5719 ||
++ tg3_asic_rev(tp) == ASIC_REV_5720 ||
++ tg3_asic_rev(tp) == ASIC_REV_5762)
++ tg3_flag_set(tp, LRG_PROD_RING_CAP);
++
++ if (tg3_flag(tp, 57765_PLUS) &&
++ tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
++ tg3_flag_set(tp, USE_JUMBO_BDFLAG);
++
++ if (!tg3_flag(tp, 5705_PLUS) ||
++ tg3_flag(tp, 5780_CLASS) ||
++ tg3_flag(tp, USE_JUMBO_BDFLAG))
++ tg3_flag_set(tp, JUMBO_CAPABLE);
++
++ pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
++ &pci_state_reg);
++
++ if (pci_is_pcie(tp->pdev)) {
++ u16 lnkctl;
++
++ tg3_flag_set(tp, PCI_EXPRESS);
++
++ pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
++ if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
++ if (tg3_asic_rev(tp) == ASIC_REV_5906) {
++ tg3_flag_clear(tp, HW_TSO_2);
++ tg3_flag_clear(tp, TSO_CAPABLE);
++ }
++ if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
++ tg3_asic_rev(tp) == ASIC_REV_5761 ||
++ tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
++ tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
++ tg3_flag_set(tp, CLKREQ_BUG);
++ } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
++ tg3_flag_set(tp, L1PLLPD_EN);
++ }
++ } else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
++ /* BCM5785 devices are effectively PCIe devices, and should
++ * follow PCIe codepaths, but do not have a PCIe capabilities
++ * section.
++ */
++ tg3_flag_set(tp, PCI_EXPRESS);
++ } else if (!tg3_flag(tp, 5705_PLUS) ||
++ tg3_flag(tp, 5780_CLASS)) {
++ tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
++ if (!tp->pcix_cap) {
++ dev_err(&tp->pdev->dev,
++ "Cannot find PCI-X capability, aborting\n");
++ return -EIO;
++ }
++
++ if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
++ tg3_flag_set(tp, PCIX_MODE);
++ }
++
++ /* If we have an AMD 762 or VIA K8T800 chipset, write
++ * reordering to the mailbox registers done by the host
++ * controller can cause major troubles. We read back from
++ * every mailbox register write to force the writes to be
++ * posted to the chip in order.
++ */
++ if (pci_dev_present(tg3_write_reorder_chipsets) &&
++ !tg3_flag(tp, PCI_EXPRESS))
++ tg3_flag_set(tp, MBOX_WRITE_REORDER);
++
++ pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
++ &tp->pci_cacheline_sz);
++ pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
++ &tp->pci_lat_timer);
++ if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
++ tp->pci_lat_timer < 64) {
++ tp->pci_lat_timer = 64;
++ pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
++ tp->pci_lat_timer);
++ }
++
++ /* Important! -- It is critical that the PCI-X hw workaround
++ * situation is decided before the first MMIO register access.
++ */
++ if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
++ /* 5700 BX chips need to have their TX producer index
++ * mailboxes written twice to workaround a bug.
++ */
++ tg3_flag_set(tp, TXD_MBOX_HWBUG);
++
++ /* If we are in PCI-X mode, enable register write workaround.
++ *
++ * The workaround is to use indirect register accesses
++ * for all chip writes not to mailbox registers.
++ */
++ if (tg3_flag(tp, PCIX_MODE)) {
++ u32 pm_reg;
++
++ tg3_flag_set(tp, PCIX_TARGET_HWBUG);
++
++ /* The chip can have it's power management PCI config
++ * space registers clobbered due to this bug.
++ * So explicitly force the chip into D0 here.
++ */
++ pci_read_config_dword(tp->pdev,
++ tp->pdev->pm_cap + PCI_PM_CTRL,
++ &pm_reg);
++ pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
++ pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
++ pci_write_config_dword(tp->pdev,
++ tp->pdev->pm_cap + PCI_PM_CTRL,
++ pm_reg);
++
++ /* Also, force SERR#/PERR# in PCI command. */
++ pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
++ pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
++ pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
++ }
++ }
++
++ if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
++ tg3_flag_set(tp, PCI_HIGH_SPEED);
++ if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
++ tg3_flag_set(tp, PCI_32BIT);
++
++ /* Chip-specific fixup from Broadcom driver */
++ if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
++ (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
++ pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
++ pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
++ }
++
++ /* Default fast path register access methods */
++ tp->read32 = tg3_read32;
++ tp->write32 = tg3_write32;
++ tp->read32_mbox = tg3_read32;
++ tp->write32_mbox = tg3_write32;
++ tp->write32_tx_mbox = tg3_write32;
++ tp->write32_rx_mbox = tg3_write32;
++
++ /* Various workaround register access methods */
++ if (tg3_flag(tp, PCIX_TARGET_HWBUG))
++ tp->write32 = tg3_write_indirect_reg32;
++ else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
++ (tg3_flag(tp, PCI_EXPRESS) &&
++ tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
++ /*
++ * Back to back register writes can cause problems on these
++ * chips, the workaround is to read back all reg writes
++ * except those to mailbox regs.
++ *
++ * See tg3_write_indirect_reg32().
++ */
++ tp->write32 = tg3_write_flush_reg32;
++ }
++
++ if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
++ tp->write32_tx_mbox = tg3_write32_tx_mbox;
++ if (tg3_flag(tp, MBOX_WRITE_REORDER))
++ tp->write32_rx_mbox = tg3_write_flush_reg32;
++ }
++
++ if (tg3_flag(tp, ICH_WORKAROUND)) {
++ tp->read32 = tg3_read_indirect_reg32;
++ tp->write32 = tg3_write_indirect_reg32;
++ tp->read32_mbox = tg3_read_indirect_mbox;
++ tp->write32_mbox = tg3_write_indirect_mbox;
++ tp->write32_tx_mbox = tg3_write_indirect_mbox;
++ tp->write32_rx_mbox = tg3_write_indirect_mbox;
++
++ iounmap(tp->regs);
++ tp->regs = NULL;
++
++ pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
++ pci_cmd &= ~PCI_COMMAND_MEMORY;
++ pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
++ }
++ if (tg3_asic_rev(tp) == ASIC_REV_5906) {
++ tp->read32_mbox = tg3_read32_mbox_5906;
++ tp->write32_mbox = tg3_write32_mbox_5906;
++ tp->write32_tx_mbox = tg3_write32_mbox_5906;
++ tp->write32_rx_mbox = tg3_write32_mbox_5906;
++ }
++
++ if (tp->write32 == tg3_write_indirect_reg32 ||
++ (tg3_flag(tp, PCIX_MODE) &&
++ (tg3_asic_rev(tp) == ASIC_REV_5700 ||
++ tg3_asic_rev(tp) == ASIC_REV_5701)))
++ tg3_flag_set(tp, SRAM_USE_CONFIG);
++
++ /* The memory arbiter has to be enabled in order for SRAM accesses
++ * to succeed. Normally on powerup the tg3 chip firmware will make
++ * sure it is enabled, but other entities such as system netboot
++ * code might disable it.
++ */
++ val = tr32(MEMARB_MODE);
++ tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
++
++ tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
++ if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
++ tg3_flag(tp, 5780_CLASS)) {
++ if (tg3_flag(tp, PCIX_MODE)) {
++ pci_read_config_dword(tp->pdev,
++ tp->pcix_cap + PCI_X_STATUS,
++ &val);
++ tp->pci_fn = val & 0x7;
++ }
++ } else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
++ tg3_asic_rev(tp) == ASIC_REV_5719 ||
++ tg3_asic_rev(tp) == ASIC_REV_5720) {
++ tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
++ if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
++ val = tr32(TG3_CPMU_STATUS);
++
++ if (tg3_asic_rev(tp) == ASIC_REV_5717)
++ tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
++ else
++ tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
++ TG3_CPMU_STATUS_FSHFT_5719;
++ }
++
++ if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
++ tp->write32_tx_mbox = tg3_write_flush_reg32;
++ tp->write32_rx_mbox = tg3_write_flush_reg32;
++ }
++
++ /* Get eeprom hw config before calling tg3_set_power_state().
++ * In particular, the TG3_FLAG_IS_NIC flag must be
++ * determined before calling tg3_set_power_state() so that
++ * we know whether or not to switch out of Vaux power.
++ * When the flag is set, it means that GPIO1 is used for eeprom
++ * write protect and also implies that it is a LOM where GPIOs
++ * are not used to switch power.
++ */
++ tg3_get_eeprom_hw_cfg(tp);
++
++ if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
++ tg3_flag_clear(tp, TSO_CAPABLE);
++ tg3_flag_clear(tp, TSO_BUG);
++ tp->fw_needed = NULL;
++ }
++
++ if (tg3_flag(tp, ENABLE_APE)) {
++ /* Allow reads and writes to the
++ * APE register and memory space.
++ */
++ pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
++ PCISTATE_ALLOW_APE_SHMEM_WR |
++ PCISTATE_ALLOW_APE_PSPACE_WR;
++ pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
++ pci_state_reg);
++
++ tg3_ape_lock_init(tp);
++ }
++
++ /* Set up tp->grc_local_ctrl before calling
++ * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
++ * will bring 5700's external PHY out of reset.
++ * It is also used as eeprom write protect on LOMs.
++ */
++ tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
++ if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
++ tg3_flag(tp, EEPROM_WRITE_PROT))
++ tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
++ GRC_LCLCTRL_GPIO_OUTPUT1);
++ /* Unused GPIO3 must be driven as output on 5752 because there
++ * are no pull-up resistors on unused GPIO pins.
++ */
++ else if (tg3_asic_rev(tp) == ASIC_REV_5752)
++ tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
++
++ if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
++ tg3_asic_rev(tp) == ASIC_REV_57780 ||
++ tg3_flag(tp, 57765_CLASS))
++ tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
++
++ if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
++ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
++ /* Turn off the debug UART. */
++ tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
++ if (tg3_flag(tp, IS_NIC))
++ /* Keep VMain power. */
++ tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
++ GRC_LCLCTRL_GPIO_OUTPUT0;
++ }
++
++ if (tg3_asic_rev(tp) == ASIC_REV_5762)
++ tp->grc_local_ctrl |=
++ tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
++
++ /* Switch out of Vaux if it is a NIC */
++ tg3_pwrsrc_switch_to_vmain(tp);
++
++ /* Derive initial jumbo mode from MTU assigned in
++ * ether_setup() via the alloc_etherdev() call
++ */
++ if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
++ tg3_flag_set(tp, JUMBO_RING_ENABLE);
++
++ /* Determine WakeOnLan speed to use. */
++ if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
++ tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
++ tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
++ tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
++ tg3_flag_clear(tp, WOL_SPEED_100MB);
++ } else {
++ tg3_flag_set(tp, WOL_SPEED_100MB);
++ }
++
++ if (tg3_asic_rev(tp) == ASIC_REV_5906)
++ tp->phy_flags |= TG3_PHYFLG_IS_FET;
++
++ /* A few boards don't want Ethernet@WireSpeed phy feature */
++ if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
++ (tg3_asic_rev(tp) == ASIC_REV_5705 &&
++ (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
++ (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
++ (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
++ (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
++ tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
++
++ if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
++ tg3_chip_rev(tp) == CHIPREV_5704_AX)
++ tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
++ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
++ tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
++
++ if (tg3_flag(tp, 5705_PLUS) &&
++ !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
++ tg3_asic_rev(tp) != ASIC_REV_5785 &&
++ tg3_asic_rev(tp) != ASIC_REV_57780 &&
++ !tg3_flag(tp, 57765_PLUS)) {
++ if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
++ tg3_asic_rev(tp) == ASIC_REV_5787 ||
++ tg3_asic_rev(tp) == ASIC_REV_5784 ||
++ tg3_asic_rev(tp) == ASIC_REV_5761) {
++ if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
++ tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
++ tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
++ if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
++ tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
++ } else
++ tp->phy_flags |= TG3_PHYFLG_BER_BUG;
++ }
++
++ if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
++ tg3_chip_rev(tp) != CHIPREV_5784_AX) {
++ tp->phy_otp = tg3_read_otp_phycfg(tp);
++ if (tp->phy_otp == 0)
++ tp->phy_otp = TG3_OTP_DEFAULT;
++ }
++
++ if (tg3_flag(tp, CPMU_PRESENT))
++ tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
++ else
++ tp->mi_mode = MAC_MI_MODE_BASE;
++
++ tp->coalesce_mode = 0;
++ if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
++ tg3_chip_rev(tp) != CHIPREV_5700_BX)
++ tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
++
++ /* Set these bits to enable statistics workaround. */
++ if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
++ tg3_asic_rev(tp) == ASIC_REV_5762 ||
++ tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
++ tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
++ tp->coalesce_mode |= HOSTCC_MODE_ATTN;
++ tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
++ }
++
++ if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
++ tg3_asic_rev(tp) == ASIC_REV_57780)
++ tg3_flag_set(tp, USE_PHYLIB);
++
++ err = tg3_mdio_init(tp);
++ if (err)
++ return err;
++
++ /* Initialize data/descriptor byte/word swapping. */
++ val = tr32(GRC_MODE);
++ if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
++ tg3_asic_rev(tp) == ASIC_REV_5762)
++ val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
++ GRC_MODE_WORD_SWAP_B2HRX_DATA |
++ GRC_MODE_B2HRX_ENABLE |
++ GRC_MODE_HTX2B_ENABLE |
++ GRC_MODE_HOST_STACKUP);
++ else
++ val &= GRC_MODE_HOST_STACKUP;
++
++ tw32(GRC_MODE, val | tp->grc_mode);
++
++ tg3_switch_clocks(tp);
++
++ /* Clear this out for sanity. */
++ tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
++
++ /* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */
++ tw32(TG3PCI_REG_BASE_ADDR, 0);
++
++ pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
++ &pci_state_reg);
++ if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
++ !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
++ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
++ tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
++ tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
++ tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
++ void __iomem *sram_base;
++
++ /* Write some dummy words into the SRAM status block
++ * area, see if it reads back correctly. If the return
++ * value is bad, force enable the PCIX workaround.
++ */
++ sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
++
++ writel(0x00000000, sram_base);
++ writel(0x00000000, sram_base + 4);
++ writel(0xffffffff, sram_base + 4);
++ if (readl(sram_base) != 0x00000000)
++ tg3_flag_set(tp, PCIX_TARGET_HWBUG);
++ }
++ }
++
++ udelay(50);
++ tg3_nvram_init(tp);
++
++ /* If the device has an NVRAM, no need to load patch firmware */
++ if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
++ !tg3_flag(tp, NO_NVRAM))
++ tp->fw_needed = NULL;
++
++ grc_misc_cfg = tr32(GRC_MISC_CFG);
++ grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
++
++ if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
++ (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
++ grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
++ tg3_flag_set(tp, IS_5788);
++
++ if (!tg3_flag(tp, IS_5788) &&
++ tg3_asic_rev(tp) != ASIC_REV_5700)
++ tg3_flag_set(tp, TAGGED_STATUS);
++ if (tg3_flag(tp, TAGGED_STATUS)) {
++ tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
++ HOSTCC_MODE_CLRTICK_TXBD);
++
++ tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
++ pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
++ tp->misc_host_ctrl);
++ }
++
++ /* Preserve the APE MAC_MODE bits */
++ if (tg3_flag(tp, ENABLE_APE))
++ tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
++ else
++ tp->mac_mode = 0;
++
++ if (tg3_10_100_only_device(tp, ent))
++ tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
++
++ err = tg3_phy_probe(tp);
++ if (err) {
++ dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
++ /* ... but do not return immediately ... */
++ tg3_mdio_fini(tp);
++ }
++
++ tg3_read_vpd(tp);
++ tg3_read_fw_ver(tp);
++
++ if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
++ tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
++ } else {
++ if (tg3_asic_rev(tp) == ASIC_REV_5700)
++ tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
++ else
++ tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
++ }
++
++ /* 5700 {AX,BX} chips have a broken status block link
++ * change bit implementation, so we must use the
++ * status register in those cases.
++ */
++ if (tg3_asic_rev(tp) == ASIC_REV_5700)
++ tg3_flag_set(tp, USE_LINKCHG_REG);
++ else
++ tg3_flag_clear(tp, USE_LINKCHG_REG);
++
++ /* The led_ctrl is set during tg3_phy_probe, here we might
++ * have to force the link status polling mechanism based
++ * upon subsystem IDs.
++ */
++ if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
++ tg3_asic_rev(tp) == ASIC_REV_5701 &&
++ !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
++ tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
++ tg3_flag_set(tp, USE_LINKCHG_REG);
++ }
++
++ /* For all SERDES we poll the MAC status register. */
++ if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
++ tg3_flag_set(tp, POLL_SERDES);
++ else
++ tg3_flag_clear(tp, POLL_SERDES);
++
++ if (tg3_flag(tp, ENABLE_APE) && tg3_flag(tp, ENABLE_ASF))
++ tg3_flag_set(tp, POLL_CPMU_LINK);
++
++ tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
++ tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
++ if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
++ tg3_flag(tp, PCIX_MODE)) {
++ tp->rx_offset = NET_SKB_PAD;
++#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
++ tp->rx_copy_thresh = ~(u16)0;
++#endif
++ }
++
++ tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
++ tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
++ tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
++
++ tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
++
++ /* Increment the rx prod index on the rx std ring by at most
++ * 8 for these chips to workaround hw errata.
++ */
++ if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
++ tg3_asic_rev(tp) == ASIC_REV_5752 ||
++ tg3_asic_rev(tp) == ASIC_REV_5755)
++ tp->rx_std_max_post = 8;
++
++ if (tg3_flag(tp, ASPM_WORKAROUND))
++ tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
++ PCIE_PWR_MGMT_L1_THRESH_MSK;
++
++ return err;
++}
++
++#ifdef CONFIG_SPARC
++static int tg3_get_macaddr_sparc(struct tg3 *tp)
++{
++ struct net_device *dev = tp->dev;
++ struct pci_dev *pdev = tp->pdev;
++ struct device_node *dp = pci_device_to_OF_node(pdev);
++ const unsigned char *addr;
++ int len;
++
++ addr = of_get_property(dp, "local-mac-address", &len);
++ if (addr && len == ETH_ALEN) {
++ memcpy(dev->dev_addr, addr, ETH_ALEN);
++ return 0;
++ }
++ return -ENODEV;
++}
++
++static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
++{
++ struct net_device *dev = tp->dev;
++
++ memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN);
++ return 0;
++}
++#endif
++
++static int tg3_get_device_address(struct tg3 *tp)
++{
++ struct net_device *dev = tp->dev;
++ u32 hi, lo, mac_offset;
++ int addr_ok = 0;
++ int err;
++
++#ifdef CONFIG_SPARC
++ if (!tg3_get_macaddr_sparc(tp))
++ return 0;
++#endif
++
++ if (tg3_flag(tp, IS_SSB_CORE)) {
++ err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
++ if (!err && is_valid_ether_addr(&dev->dev_addr[0]))
++ return 0;
++ }
++
++ mac_offset = 0x7c;
++ if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
++ tg3_flag(tp, 5780_CLASS)) {
++ if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
++ mac_offset = 0xcc;
++ if (tg3_nvram_lock(tp))
++ tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
++ else
++ tg3_nvram_unlock(tp);
++ } else if (tg3_flag(tp, 5717_PLUS)) {
++ if (tp->pci_fn & 1)
++ mac_offset = 0xcc;
++ if (tp->pci_fn > 1)
++ mac_offset += 0x18c;
++ } else if (tg3_asic_rev(tp) == ASIC_REV_5906)
++ mac_offset = 0x10;
++
++ /* First try to get it from MAC address mailbox. */
++ tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
++ if ((hi >> 16) == 0x484b) {
++ dev->dev_addr[0] = (hi >> 8) & 0xff;
++ dev->dev_addr[1] = (hi >> 0) & 0xff;
++
++ tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
++ dev->dev_addr[2] = (lo >> 24) & 0xff;
++ dev->dev_addr[3] = (lo >> 16) & 0xff;
++ dev->dev_addr[4] = (lo >> 8) & 0xff;
++ dev->dev_addr[5] = (lo >> 0) & 0xff;
++
++ /* Some old bootcode may report a 0 MAC address in SRAM */
++ addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
++ }
++ if (!addr_ok) {
++ /* Next, try NVRAM. */
++ if (!tg3_flag(tp, NO_NVRAM) &&
++ !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
++ !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
++ memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
++ memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
++ }
++ /* Finally just fetch it out of the MAC control regs. */
++ else {
++ hi = tr32(MAC_ADDR_0_HIGH);
++ lo = tr32(MAC_ADDR_0_LOW);
++
++ dev->dev_addr[5] = lo & 0xff;
++ dev->dev_addr[4] = (lo >> 8) & 0xff;
++ dev->dev_addr[3] = (lo >> 16) & 0xff;
++ dev->dev_addr[2] = (lo >> 24) & 0xff;
++ dev->dev_addr[1] = hi & 0xff;
++ dev->dev_addr[0] = (hi >> 8) & 0xff;
++ }
++ }
++
++ if (!is_valid_ether_addr(&dev->dev_addr[0])) {
++#ifdef CONFIG_SPARC
++ if (!tg3_get_default_macaddr_sparc(tp))
++ return 0;
++#endif
++ return -EINVAL;
++ }
++ return 0;
++}
++
++#define BOUNDARY_SINGLE_CACHELINE 1
++#define BOUNDARY_MULTI_CACHELINE 2
++
++static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
++{
++ int cacheline_size;
++ u8 byte;
++ int goal;
++
++ pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
++ if (byte == 0)
++ cacheline_size = 1024;
++ else
++ cacheline_size = (int) byte * 4;
++
++ /* On 5703 and later chips, the boundary bits have no
++ * effect.
++ */
++ if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
++ tg3_asic_rev(tp) != ASIC_REV_5701 &&
++ !tg3_flag(tp, PCI_EXPRESS))
++ goto out;
++
++#if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
++ goal = BOUNDARY_MULTI_CACHELINE;
++#else
++#if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
++ goal = BOUNDARY_SINGLE_CACHELINE;
++#else
++ goal = 0;
++#endif
++#endif
++
++ if (tg3_flag(tp, 57765_PLUS)) {
++ val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
++ goto out;
++ }
++
++ if (!goal)
++ goto out;
++
++ /* PCI controllers on most RISC systems tend to disconnect
++ * when a device tries to burst across a cache-line boundary.
++ * Therefore, letting tg3 do so just wastes PCI bandwidth.
++ *
++ * Unfortunately, for PCI-E there are only limited
++ * write-side controls for this, and thus for reads
++ * we will still get the disconnects. We'll also waste
++ * these PCI cycles for both read and write for chips
++ * other than 5700 and 5701 which do not implement the
++ * boundary bits.
++ */
++ if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
++ switch (cacheline_size) {
++ case 16:
++ case 32:
++ case 64:
++ case 128:
++ if (goal == BOUNDARY_SINGLE_CACHELINE) {
++ val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
++ DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
++ } else {
++ val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
++ DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
++ }
++ break;
++
++ case 256:
++ val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
++ DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
++ break;
++
++ default:
++ val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
++ DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
++ break;
++ }
++ } else if (tg3_flag(tp, PCI_EXPRESS)) {
++ switch (cacheline_size) {
++ case 16:
++ case 32:
++ case 64:
++ if (goal == BOUNDARY_SINGLE_CACHELINE) {
++ val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
++ val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
++ break;
++ }
++ /* fallthrough */
++ case 128:
++ default:
++ val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
++ val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
++ break;
++ }
++ } else {
++ switch (cacheline_size) {
++ case 16:
++ if (goal == BOUNDARY_SINGLE_CACHELINE) {
++ val |= (DMA_RWCTRL_READ_BNDRY_16 |
++ DMA_RWCTRL_WRITE_BNDRY_16);
++ break;
++ }
++ /* fallthrough */
++ case 32:
++ if (goal == BOUNDARY_SINGLE_CACHELINE) {
++ val |= (DMA_RWCTRL_READ_BNDRY_32 |
++ DMA_RWCTRL_WRITE_BNDRY_32);
++ break;
++ }
++ /* fallthrough */
++ case 64:
++ if (goal == BOUNDARY_SINGLE_CACHELINE) {
++ val |= (DMA_RWCTRL_READ_BNDRY_64 |
++ DMA_RWCTRL_WRITE_BNDRY_64);
++ break;
++ }
++ /* fallthrough */
++ case 128:
++ if (goal == BOUNDARY_SINGLE_CACHELINE) {
++ val |= (DMA_RWCTRL_READ_BNDRY_128 |
++ DMA_RWCTRL_WRITE_BNDRY_128);
++ break;
++ }
++ /* fallthrough */
++ case 256:
++ val |= (DMA_RWCTRL_READ_BNDRY_256 |
++ DMA_RWCTRL_WRITE_BNDRY_256);
++ break;
++ case 512:
++ val |= (DMA_RWCTRL_READ_BNDRY_512 |
++ DMA_RWCTRL_WRITE_BNDRY_512);
++ break;
++ case 1024:
++ default:
++ val |= (DMA_RWCTRL_READ_BNDRY_1024 |
++ DMA_RWCTRL_WRITE_BNDRY_1024);
++ break;
++ }
++ }
++
++out:
++ return val;
++}
++
++static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
++ int size, bool to_device)
++{
++ struct tg3_internal_buffer_desc test_desc;
++ u32 sram_dma_descs;
++ int i, ret;
++
++ sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
++
++ tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
++ tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
++ tw32(RDMAC_STATUS, 0);
++ tw32(WDMAC_STATUS, 0);
++
++ tw32(BUFMGR_MODE, 0);
++ tw32(FTQ_RESET, 0);
++
++ test_desc.addr_hi = ((u64) buf_dma) >> 32;
++ test_desc.addr_lo = buf_dma & 0xffffffff;
++ test_desc.nic_mbuf = 0x00002100;
++ test_desc.len = size;
++
++ /*
++ * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
++ * the *second* time the tg3 driver was getting loaded after an
++ * initial scan.
++ *
++ * Broadcom tells me:
++ * ...the DMA engine is connected to the GRC block and a DMA
++ * reset may affect the GRC block in some unpredictable way...
++ * The behavior of resets to individual blocks has not been tested.
++ *
++ * Broadcom noted the GRC reset will also reset all sub-components.
++ */
++ if (to_device) {
++ test_desc.cqid_sqid = (13 << 8) | 2;
++
++ tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
++ udelay(40);
++ } else {
++ test_desc.cqid_sqid = (16 << 8) | 7;
++
++ tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
++ udelay(40);
++ }
++ test_desc.flags = 0x00000005;
++
++ for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
++ u32 val;
++
++ val = *(((u32 *)&test_desc) + i);
++ pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
++ sram_dma_descs + (i * sizeof(u32)));
++ pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
++ }
++ pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
++
++ if (to_device)
++ tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
++ else
++ tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
++
++ ret = -ENODEV;
++ for (i = 0; i < 40; i++) {
++ u32 val;
++
++ if (to_device)
++ val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
++ else
++ val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
++ if ((val & 0xffff) == sram_dma_descs) {
++ ret = 0;
++ break;
++ }
++
++ udelay(100);
++ }
++
++ return ret;
++}
++
++#define TEST_BUFFER_SIZE 0x2000
++
++static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
++ { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
++ { },
++};
++
++static int tg3_test_dma(struct tg3 *tp)
++{
++ dma_addr_t buf_dma;
++ u32 *buf, saved_dma_rwctrl;
++ int ret = 0;
++
++ buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
++ &buf_dma, GFP_KERNEL);
++ if (!buf) {
++ ret = -ENOMEM;
++ goto out_nofree;
++ }
++
++ tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
++ (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
++
++ tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
++
++ if (tg3_flag(tp, 57765_PLUS))
++ goto out;
++
++ if (tg3_flag(tp, PCI_EXPRESS)) {
++ /* DMA read watermark not used on PCIE */
++ tp->dma_rwctrl |= 0x00180000;
++ } else if (!tg3_flag(tp, PCIX_MODE)) {
++ if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
++ tg3_asic_rev(tp) == ASIC_REV_5750)
++ tp->dma_rwctrl |= 0x003f0000;
++ else
++ tp->dma_rwctrl |= 0x003f000f;
++ } else {
++ if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
++ tg3_asic_rev(tp) == ASIC_REV_5704) {
++ u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
++ u32 read_water = 0x7;
++
++ /* If the 5704 is behind the EPB bridge, we can
++ * do the less restrictive ONE_DMA workaround for
++ * better performance.
++ */
++ if (tg3_flag(tp, 40BIT_DMA_BUG) &&
++ tg3_asic_rev(tp) == ASIC_REV_5704)
++ tp->dma_rwctrl |= 0x8000;
++ else if (ccval == 0x6 || ccval == 0x7)
++ tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
++
++ if (tg3_asic_rev(tp) == ASIC_REV_5703)
++ read_water = 4;
++ /* Set bit 23 to enable PCIX hw bug fix */
++ tp->dma_rwctrl |=
++ (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
++ (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
++ (1 << 23);
++ } else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
++ /* 5780 always in PCIX mode */
++ tp->dma_rwctrl |= 0x00144000;
++ } else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
++ /* 5714 always in PCIX mode */
++ tp->dma_rwctrl |= 0x00148000;
++ } else {
++ tp->dma_rwctrl |= 0x001b000f;
++ }
++ }
++ if (tg3_flag(tp, ONE_DMA_AT_ONCE))
++ tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
++
++ if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
++ tg3_asic_rev(tp) == ASIC_REV_5704)
++ tp->dma_rwctrl &= 0xfffffff0;
++
++ if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
++ tg3_asic_rev(tp) == ASIC_REV_5701) {
++ /* Remove this if it causes problems for some boards. */
++ tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
++
++ /* On 5700/5701 chips, we need to set this bit.
++ * Otherwise the chip will issue cacheline transactions
++ * to streamable DMA memory with not all the byte
++ * enables turned on. This is an error on several
++ * RISC PCI controllers, in particular sparc64.
++ *
++ * On 5703/5704 chips, this bit has been reassigned
++ * a different meaning. In particular, it is used
++ * on those chips to enable a PCI-X workaround.
++ */
++ tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
++ }
++
++ tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
++
++
++ if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
++ tg3_asic_rev(tp) != ASIC_REV_5701)
++ goto out;
++
++ /* It is best to perform DMA test with maximum write burst size
++ * to expose the 5700/5701 write DMA bug.
++ */
++ saved_dma_rwctrl = tp->dma_rwctrl;
++ tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
++ tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
++
++ while (1) {
++ u32 *p = buf, i;
++
++ for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
++ p[i] = i;
++
++ /* Send the buffer to the chip. */
++ ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true);
++ if (ret) {
++ dev_err(&tp->pdev->dev,
++ "%s: Buffer write failed. err = %d\n",
++ __func__, ret);
++ break;
++ }
++
++ /* Now read it back. */
++ ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
++ if (ret) {
++ dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
++ "err = %d\n", __func__, ret);
++ break;
++ }
++
++ /* Verify it. */
++ for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
++ if (p[i] == i)
++ continue;
++
++ if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
++ DMA_RWCTRL_WRITE_BNDRY_16) {
++ tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
++ tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
++ tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
++ break;
++ } else {
++ dev_err(&tp->pdev->dev,
++ "%s: Buffer corrupted on read back! "
++ "(%d != %d)\n", __func__, p[i], i);
++ ret = -ENODEV;
++ goto out;
++ }
++ }
++
++ if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
++ /* Success. */
++ ret = 0;
++ break;
++ }
++ }
++ if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
++ DMA_RWCTRL_WRITE_BNDRY_16) {
++ /* DMA test passed without adjusting DMA boundary,
++ * now look for chipsets that are known to expose the
++ * DMA bug without failing the test.
++ */
++ if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
++ tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
++ tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
++ } else {
++ /* Safe to use the calculated DMA boundary. */
++ tp->dma_rwctrl = saved_dma_rwctrl;
++ }
++
++ tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
++ }
++
++out:
++ dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
++out_nofree:
++ return ret;
++}
++
++static void tg3_init_bufmgr_config(struct tg3 *tp)
++{
++ if (tg3_flag(tp, 57765_PLUS)) {
++ tp->bufmgr_config.mbuf_read_dma_low_water =
++ DEFAULT_MB_RDMA_LOW_WATER_5705;
++ tp->bufmgr_config.mbuf_mac_rx_low_water =
++ DEFAULT_MB_MACRX_LOW_WATER_57765;
++ tp->bufmgr_config.mbuf_high_water =
++ DEFAULT_MB_HIGH_WATER_57765;
++
++ tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
++ DEFAULT_MB_RDMA_LOW_WATER_5705;
++ tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
++ DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
++ tp->bufmgr_config.mbuf_high_water_jumbo =
++ DEFAULT_MB_HIGH_WATER_JUMBO_57765;
++ } else if (tg3_flag(tp, 5705_PLUS)) {
++ tp->bufmgr_config.mbuf_read_dma_low_water =
++ DEFAULT_MB_RDMA_LOW_WATER_5705;
++ tp->bufmgr_config.mbuf_mac_rx_low_water =
++ DEFAULT_MB_MACRX_LOW_WATER_5705;
++ tp->bufmgr_config.mbuf_high_water =
++ DEFAULT_MB_HIGH_WATER_5705;
++ if (tg3_asic_rev(tp) == ASIC_REV_5906) {
++ tp->bufmgr_config.mbuf_mac_rx_low_water =
++ DEFAULT_MB_MACRX_LOW_WATER_5906;
++ tp->bufmgr_config.mbuf_high_water =
++ DEFAULT_MB_HIGH_WATER_5906;
++ }
++
++ tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
++ DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
++ tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
++ DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
++ tp->bufmgr_config.mbuf_high_water_jumbo =
++ DEFAULT_MB_HIGH_WATER_JUMBO_5780;
++ } else {
++ tp->bufmgr_config.mbuf_read_dma_low_water =
++ DEFAULT_MB_RDMA_LOW_WATER;
++ tp->bufmgr_config.mbuf_mac_rx_low_water =
++ DEFAULT_MB_MACRX_LOW_WATER;
++ tp->bufmgr_config.mbuf_high_water =
++ DEFAULT_MB_HIGH_WATER;
++
++ tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
++ DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
++ tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
++ DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
++ tp->bufmgr_config.mbuf_high_water_jumbo =
++ DEFAULT_MB_HIGH_WATER_JUMBO;
++ }
++
++ tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
++ tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
++}
++
++static char *tg3_phy_string(struct tg3 *tp)
++{
++ switch (tp->phy_id & TG3_PHY_ID_MASK) {
++ case TG3_PHY_ID_BCM5400: return "5400";
++ case TG3_PHY_ID_BCM5401: return "5401";
++ case TG3_PHY_ID_BCM5411: return "5411";
++ case TG3_PHY_ID_BCM5701: return "5701";
++ case TG3_PHY_ID_BCM5703: return "5703";
++ case TG3_PHY_ID_BCM5704: return "5704";
++ case TG3_PHY_ID_BCM5705: return "5705";
++ case TG3_PHY_ID_BCM5750: return "5750";
++ case TG3_PHY_ID_BCM5752: return "5752";
++ case TG3_PHY_ID_BCM5714: return "5714";
++ case TG3_PHY_ID_BCM5780: return "5780";
++ case TG3_PHY_ID_BCM5755: return "5755";
++ case TG3_PHY_ID_BCM5787: return "5787";
++ case TG3_PHY_ID_BCM5784: return "5784";
++ case TG3_PHY_ID_BCM5756: return "5722/5756";
++ case TG3_PHY_ID_BCM5906: return "5906";
++ case TG3_PHY_ID_BCM5761: return "5761";
++ case TG3_PHY_ID_BCM5718C: return "5718C";
++ case TG3_PHY_ID_BCM5718S: return "5718S";
++ case TG3_PHY_ID_BCM57765: return "57765";
++ case TG3_PHY_ID_BCM5719C: return "5719C";
++ case TG3_PHY_ID_BCM5720C: return "5720C";
++ case TG3_PHY_ID_BCM5762: return "5762C";
++ case TG3_PHY_ID_BCM8002: return "8002/serdes";
++ case 0: return "serdes";
++ default: return "unknown";
++ }
++}
++
++static char *tg3_bus_string(struct tg3 *tp, char *str)
++{
++ if (tg3_flag(tp, PCI_EXPRESS)) {
++ strcpy(str, "PCI Express");
++ return str;
++ } else if (tg3_flag(tp, PCIX_MODE)) {
++ u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
++
++ strcpy(str, "PCIX:");
++
++ if ((clock_ctrl == 7) ||
++ ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
++ GRC_MISC_CFG_BOARD_ID_5704CIOBE))
++ strcat(str, "133MHz");
++ else if (clock_ctrl == 0)
++ strcat(str, "33MHz");
++ else if (clock_ctrl == 2)
++ strcat(str, "50MHz");
++ else if (clock_ctrl == 4)
++ strcat(str, "66MHz");
++ else if (clock_ctrl == 6)
++ strcat(str, "100MHz");
++ } else {
++ strcpy(str, "PCI:");
++ if (tg3_flag(tp, PCI_HIGH_SPEED))
++ strcat(str, "66MHz");
++ else
++ strcat(str, "33MHz");
++ }
++ if (tg3_flag(tp, PCI_32BIT))
++ strcat(str, ":32-bit");
++ else
++ strcat(str, ":64-bit");
++ return str;
++}
++
++static void tg3_init_coal(struct tg3 *tp)
++{
++ struct ethtool_coalesce *ec = &tp->coal;
++
++ memset(ec, 0, sizeof(*ec));
++ ec->cmd = ETHTOOL_GCOALESCE;
++ ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
++ ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
++ ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
++ ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
++ ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
++ ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
++ ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
++ ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
++ ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
++
++ if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
++ HOSTCC_MODE_CLRTICK_TXBD)) {
++ ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
++ ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
++ ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
++ ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
++ }
++
++ if (tg3_flag(tp, 5705_PLUS)) {
++ ec->rx_coalesce_usecs_irq = 0;
++ ec->tx_coalesce_usecs_irq = 0;
++ ec->stats_block_coalesce_usecs = 0;
++ }
++}
++
++static int tg3_init_one(struct pci_dev *pdev,
++ const struct pci_device_id *ent)
++{
++ struct net_device *dev;
++ struct tg3 *tp;
++ int i, err;
++ u32 sndmbx, rcvmbx, intmbx;
++ char str[40];
++ u64 dma_mask, persist_dma_mask;
++ netdev_features_t features = 0;
++
++ printk_once(KERN_INFO "%s\n", version);
++
++ err = pci_enable_device(pdev);
++ if (err) {
++ dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
++ return err;
++ }
++
++ err = pci_request_regions(pdev, DRV_MODULE_NAME);
++ if (err) {
++ dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
++ goto err_out_disable_pdev;
++ }
++
++ pci_set_master(pdev);
++
++ dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
++ if (!dev) {
++ err = -ENOMEM;
++ goto err_out_free_res;
++ }
++
++ SET_NETDEV_DEV(dev, &pdev->dev);
++
++ tp = netdev_priv(dev);
++ tp->pdev = pdev;
++ tp->dev = dev;
++ tp->rx_mode = TG3_DEF_RX_MODE;
++ tp->tx_mode = TG3_DEF_TX_MODE;
++ tp->irq_sync = 1;
++
++ if (tg3_debug > 0)
++ tp->msg_enable = tg3_debug;
++ else
++ tp->msg_enable = TG3_DEF_MSG_ENABLE;
++
++ if (pdev_is_ssb_gige_core(pdev)) {
++ tg3_flag_set(tp, IS_SSB_CORE);
++ if (ssb_gige_must_flush_posted_writes(pdev))
++ tg3_flag_set(tp, FLUSH_POSTED_WRITES);
++ if (ssb_gige_one_dma_at_once(pdev))
++ tg3_flag_set(tp, ONE_DMA_AT_ONCE);
++ if (ssb_gige_have_roboswitch(pdev)) {
++ tg3_flag_set(tp, USE_PHYLIB);
++ tg3_flag_set(tp, ROBOSWITCH);
++ }
++ if (ssb_gige_is_rgmii(pdev))
++ tg3_flag_set(tp, RGMII_MODE);
++ }
++
++ /* The word/byte swap controls here control register access byte
++ * swapping. DMA data byte swapping is controlled in the GRC_MODE
++ * setting below.
++ */
++ tp->misc_host_ctrl =
++ MISC_HOST_CTRL_MASK_PCI_INT |
++ MISC_HOST_CTRL_WORD_SWAP |
++ MISC_HOST_CTRL_INDIR_ACCESS |
++ MISC_HOST_CTRL_PCISTATE_RW;
++
++ /* The NONFRM (non-frame) byte/word swap controls take effect
++ * on descriptor entries, anything which isn't packet data.
++ *
++ * The StrongARM chips on the board (one for tx, one for rx)
++ * are running in big-endian mode.
++ */
++ tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
++ GRC_MODE_WSWAP_NONFRM_DATA);
++#ifdef __BIG_ENDIAN
++ tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
++#endif
++ spin_lock_init(&tp->lock);
++ spin_lock_init(&tp->indirect_lock);
++ INIT_WORK(&tp->reset_task, tg3_reset_task);
++
++ tp->regs = pci_ioremap_bar(pdev, BAR_0);
++ if (!tp->regs) {
++ dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
++ err = -ENOMEM;
++ goto err_out_free_dev;
++ }
++
++ if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
++ tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
++ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
++ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
++ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
++ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
++ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
++ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
++ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
++ tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
++ tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
++ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
++ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
++ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
++ tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) {
++ tg3_flag_set(tp, ENABLE_APE);
++ tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
++ if (!tp->aperegs) {
++ dev_err(&pdev->dev,
++ "Cannot map APE registers, aborting\n");
++ err = -ENOMEM;
++ goto err_out_iounmap;
++ }
++ }
++
++ tp->rx_pending = TG3_DEF_RX_RING_PENDING;
++ tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
++
++ dev->ethtool_ops = &tg3_ethtool_ops;
++ dev->watchdog_timeo = TG3_TX_TIMEOUT;
++ dev->netdev_ops = &tg3_netdev_ops;
++ dev->irq = pdev->irq;
++
++ err = tg3_get_invariants(tp, ent);
++ if (err) {
++ dev_err(&pdev->dev,
++ "Problem fetching invariants of chip, aborting\n");
++ goto err_out_apeunmap;
++ }
++
++ /* The EPB bridge inside 5714, 5715, and 5780 and any
++ * device behind the EPB cannot support DMA addresses > 40-bit.
++ * On 64-bit systems with IOMMU, use 40-bit dma_mask.
++ * On 64-bit systems without IOMMU, use 64-bit dma_mask and
++ * do DMA address check in tg3_start_xmit().
++ */
++ if (tg3_flag(tp, IS_5788))
++ persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
++ else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
++ persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
++#ifdef CONFIG_HIGHMEM
++ dma_mask = DMA_BIT_MASK(64);
++#endif
++ } else
++ persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
++
++ /* Configure DMA attributes. */
++ if (dma_mask > DMA_BIT_MASK(32)) {
++ err = pci_set_dma_mask(pdev, dma_mask);
++ if (!err) {
++ features |= NETIF_F_HIGHDMA;
++ err = pci_set_consistent_dma_mask(pdev,
++ persist_dma_mask);
++ if (err < 0) {
++ dev_err(&pdev->dev, "Unable to obtain 64 bit "
++ "DMA for consistent allocations\n");
++ goto err_out_apeunmap;
++ }
++ }
++ }
++ if (err || dma_mask == DMA_BIT_MASK(32)) {
++ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
++ if (err) {
++ dev_err(&pdev->dev,
++ "No usable DMA configuration, aborting\n");
++ goto err_out_apeunmap;
++ }
++ }
++
++ tg3_init_bufmgr_config(tp);
++
++ /* 5700 B0 chips do not support checksumming correctly due
++ * to hardware bugs.
++ */
++ if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
++ features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
++
++ if (tg3_flag(tp, 5755_PLUS))
++ features |= NETIF_F_IPV6_CSUM;
++ }
++
++ /* TSO is on by default on chips that support hardware TSO.
++ * Firmware TSO on older chips gives lower performance, so it
++ * is off by default, but can be enabled using ethtool.
++ */
++ if ((tg3_flag(tp, HW_TSO_1) ||
++ tg3_flag(tp, HW_TSO_2) ||
++ tg3_flag(tp, HW_TSO_3)) &&
++ (features & NETIF_F_IP_CSUM))
++ features |= NETIF_F_TSO;
++ if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
++ if (features & NETIF_F_IPV6_CSUM)
++ features |= NETIF_F_TSO6;
++ if (tg3_flag(tp, HW_TSO_3) ||
++ tg3_asic_rev(tp) == ASIC_REV_5761 ||
++ (tg3_asic_rev(tp) == ASIC_REV_5784 &&
++ tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
++ tg3_asic_rev(tp) == ASIC_REV_5785 ||
++ tg3_asic_rev(tp) == ASIC_REV_57780)
++ features |= NETIF_F_TSO_ECN;
++ }
++
++ dev->features |= features | NETIF_F_HW_VLAN_CTAG_TX |
++ NETIF_F_HW_VLAN_CTAG_RX;
++ dev->vlan_features |= features;
++
++ /*
++ * Add loopback capability only for a subset of devices that support
++ * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
++ * loopback for the remaining devices.
++ */
++ if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
++ !tg3_flag(tp, CPMU_PRESENT))
++ /* Add the loopback capability */
++ features |= NETIF_F_LOOPBACK;
++
++ dev->hw_features |= features;
++ dev->priv_flags |= IFF_UNICAST_FLT;
++
++ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
++ !tg3_flag(tp, TSO_CAPABLE) &&
++ !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
++ tg3_flag_set(tp, MAX_RXPEND_64);
++ tp->rx_pending = 63;
++ }
++
++ err = tg3_get_device_address(tp);
++ if (err) {
++ dev_err(&pdev->dev,
++ "Could not obtain valid ethernet address, aborting\n");
++ goto err_out_apeunmap;
++ }
++
++ /*
++ * Reset chip in case UNDI or EFI driver did not shutdown
++ * DMA self test will enable WDMAC and we'll see (spurious)
++ * pending DMA on the PCI bus at that point.
++ */
++ if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
++ (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
++ tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
++ tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
++ }
++
++ err = tg3_test_dma(tp);
++ if (err) {
++ dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
++ goto err_out_apeunmap;
++ }
++
++ intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
++ rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
++ sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
++ for (i = 0; i < tp->irq_max; i++) {
++ struct tg3_napi *tnapi = &tp->napi[i];
++
++ tnapi->tp = tp;
++ tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
++
++ tnapi->int_mbox = intmbx;
++ if (i <= 4)
++ intmbx += 0x8;
++ else
++ intmbx += 0x4;
++
++ tnapi->consmbox = rcvmbx;
++ tnapi->prodmbox = sndmbx;
++
++ if (i)
++ tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
++ else
++ tnapi->coal_now = HOSTCC_MODE_NOW;
++
++ if (!tg3_flag(tp, SUPPORT_MSIX))
++ break;
++
++ /*
++ * If we support MSIX, we'll be using RSS. If we're using
++ * RSS, the first vector only handles link interrupts and the
++ * remaining vectors handle rx and tx interrupts. Reuse the
++ * mailbox values for the next iteration. The values we setup
++ * above are still useful for the single vectored mode.
++ */
++ if (!i)
++ continue;
++
++ rcvmbx += 0x8;
++
++ if (sndmbx & 0x4)
++ sndmbx -= 0x4;
++ else
++ sndmbx += 0xc;
++ }
++
++ tg3_init_coal(tp);
++
++ pci_set_drvdata(pdev, dev);
++
++ if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
++ tg3_asic_rev(tp) == ASIC_REV_5720 ||
++ tg3_asic_rev(tp) == ASIC_REV_5762)
++ tg3_flag_set(tp, PTP_CAPABLE);
++
++ tg3_timer_init(tp);
++
++ tg3_carrier_off(tp);
++
++ err = register_netdev(dev);
++ if (err) {
++ dev_err(&pdev->dev, "Cannot register net device, aborting\n");
++ goto err_out_apeunmap;
++ }
++
++ netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
++ tp->board_part_number,
++ tg3_chip_rev_id(tp),
++ tg3_bus_string(tp, str),
++ dev->dev_addr);
++
++ if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
++ struct phy_device *phydev;
++ phydev = tp->mdio_bus->phy_map[tp->phy_addr];
++ netdev_info(dev,
++ "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
++ phydev->drv->name, dev_name(&phydev->dev));
++ } else {
++ char *ethtype;
++
++ if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
++ ethtype = "10/100Base-TX";
++ else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
++ ethtype = "1000Base-SX";
++ else
++ ethtype = "10/100/1000Base-T";
++
++ netdev_info(dev, "attached PHY is %s (%s Ethernet) "
++ "(WireSpeed[%d], EEE[%d])\n",
++ tg3_phy_string(tp), ethtype,
++ (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
++ (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
++ }
++
++ netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
++ (dev->features & NETIF_F_RXCSUM) != 0,
++ tg3_flag(tp, USE_LINKCHG_REG) != 0,
++ (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
++ tg3_flag(tp, ENABLE_ASF) != 0,
++ tg3_flag(tp, TSO_CAPABLE) != 0);
++ netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
++ tp->dma_rwctrl,
++ pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
++ ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
++
++ pci_save_state(pdev);
++
++ return 0;
++
++err_out_apeunmap:
++ if (tp->aperegs) {
++ iounmap(tp->aperegs);
++ tp->aperegs = NULL;
++ }
++
++err_out_iounmap:
++ if (tp->regs) {
++ iounmap(tp->regs);
++ tp->regs = NULL;
++ }
++
++err_out_free_dev:
++ free_netdev(dev);
++
++err_out_free_res:
++ pci_release_regions(pdev);
++
++err_out_disable_pdev:
++ if (pci_is_enabled(pdev))
++ pci_disable_device(pdev);
++ return err;
++}
++
++static void tg3_remove_one(struct pci_dev *pdev)
++{
++ struct net_device *dev = pci_get_drvdata(pdev);
++
++ if (dev) {
++ struct tg3 *tp = netdev_priv(dev);
++
++ release_firmware(tp->fw);
++
++ tg3_reset_task_cancel(tp);
++
++ if (tg3_flag(tp, USE_PHYLIB)) {
++ tg3_phy_fini(tp);
++ tg3_mdio_fini(tp);
++ }
++
++ unregister_netdev(dev);
++ if (tp->aperegs) {
++ iounmap(tp->aperegs);
++ tp->aperegs = NULL;
++ }
++ if (tp->regs) {
++ iounmap(tp->regs);
++ tp->regs = NULL;
++ }
++ free_netdev(dev);
++ pci_release_regions(pdev);
++ pci_disable_device(pdev);
++ }
++}
++
++#ifdef CONFIG_PM_SLEEP
++static int tg3_suspend(struct device *device)
++{
++ struct pci_dev *pdev = to_pci_dev(device);
++ struct net_device *dev = pci_get_drvdata(pdev);
++ struct tg3 *tp = netdev_priv(dev);
++ int err = 0;
++
++ rtnl_lock();
++
++ if (!netif_running(dev))
++ goto unlock;
++
++ tg3_reset_task_cancel(tp);
++ tg3_phy_stop(tp);
++ tg3_netif_stop(tp);
++
++ tg3_timer_stop(tp);
++
++ tg3_full_lock(tp, 1);
++ tg3_disable_ints(tp);
++ tg3_full_unlock(tp);
++
++ netif_device_detach(dev);
++
++ tg3_full_lock(tp, 0);
++ tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
++ tg3_flag_clear(tp, INIT_COMPLETE);
++ tg3_full_unlock(tp);
++
++ err = tg3_power_down_prepare(tp);
++ if (err) {
++ int err2;
++
++ tg3_full_lock(tp, 0);
++
++ tg3_flag_set(tp, INIT_COMPLETE);
++ err2 = tg3_restart_hw(tp, true);
++ if (err2)
++ goto out;
++
++ tg3_timer_start(tp);
++
++ netif_device_attach(dev);
++ tg3_netif_start(tp);
++
++out:
++ tg3_full_unlock(tp);
++
++ if (!err2)
++ tg3_phy_start(tp);
++ }
++
++unlock:
++ rtnl_unlock();
++ return err;
++}
++
++static int tg3_resume(struct device *device)
++{
++ struct pci_dev *pdev = to_pci_dev(device);
++ struct net_device *dev = pci_get_drvdata(pdev);
++ struct tg3 *tp = netdev_priv(dev);
++ int err = 0;
++
++ rtnl_lock();
++
++ if (!netif_running(dev))
++ goto unlock;
++
++ netif_device_attach(dev);
++
++ tg3_full_lock(tp, 0);
++
++ tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
++
++ tg3_flag_set(tp, INIT_COMPLETE);
++ err = tg3_restart_hw(tp,
++ !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
++ if (err)
++ goto out;
++
++ tg3_timer_start(tp);
++
++ tg3_netif_start(tp);
++
++out:
++ tg3_full_unlock(tp);
++
++ if (!err)
++ tg3_phy_start(tp);
++
++unlock:
++ rtnl_unlock();
++ return err;
++}
++#endif /* CONFIG_PM_SLEEP */
++
++static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
++
++static void tg3_shutdown(struct pci_dev *pdev)
++{
++ struct net_device *dev = pci_get_drvdata(pdev);
++ struct tg3 *tp = netdev_priv(dev);
++
++ rtnl_lock();
++ netif_device_detach(dev);
++
++ if (netif_running(dev))
++ dev_close(dev);
++
++ if (system_state == SYSTEM_POWER_OFF)
++ tg3_power_down(tp);
++
++ rtnl_unlock();
++}
++
++/**
++ * tg3_io_error_detected - called when PCI error is detected
++ * @pdev: Pointer to PCI device
++ * @state: The current pci connection state
++ *
++ * This function is called after a PCI bus error affecting
++ * this device has been detected.
++ */
++static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
++ pci_channel_state_t state)
++{
++ struct net_device *netdev = pci_get_drvdata(pdev);
++ struct tg3 *tp = netdev_priv(netdev);
++ pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
++
++ netdev_info(netdev, "PCI I/O error detected\n");
++
++ rtnl_lock();
++
++ /* We probably don't have netdev yet */
++ if (!netdev || !netif_running(netdev))
++ goto done;
++
++ tg3_phy_stop(tp);
++
++ tg3_netif_stop(tp);
++
++ tg3_timer_stop(tp);
++
++ /* Want to make sure that the reset task doesn't run */
++ tg3_reset_task_cancel(tp);
++
++ netif_device_detach(netdev);
++
++ /* Clean up software state, even if MMIO is blocked */
++ tg3_full_lock(tp, 0);
++ tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
++ tg3_full_unlock(tp);
++
++done:
++ if (state == pci_channel_io_perm_failure) {
++ if (netdev) {
++ tg3_napi_enable(tp);
++ dev_close(netdev);
++ }
++ err = PCI_ERS_RESULT_DISCONNECT;
++ } else {
++ pci_disable_device(pdev);
++ }
++
++ rtnl_unlock();
++
++ return err;
++}
++
++/**
++ * tg3_io_slot_reset - called after the pci bus has been reset.
++ * @pdev: Pointer to PCI device
++ *
++ * Restart the card from scratch, as if from a cold-boot.
++ * At this point, the card has exprienced a hard reset,
++ * followed by fixups by BIOS, and has its config space
++ * set up identically to what it was at cold boot.
++ */
++static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
++{
++ struct net_device *netdev = pci_get_drvdata(pdev);
++ struct tg3 *tp = netdev_priv(netdev);
++ pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
++ int err;
++
++ rtnl_lock();
++
++ if (pci_enable_device(pdev)) {
++ dev_err(&pdev->dev,
++ "Cannot re-enable PCI device after reset.\n");
++ goto done;
++ }
++
++ pci_set_master(pdev);
++ pci_restore_state(pdev);
++ pci_save_state(pdev);
++
++ if (!netdev || !netif_running(netdev)) {
++ rc = PCI_ERS_RESULT_RECOVERED;
++ goto done;
++ }
++
++ err = tg3_power_up(tp);
++ if (err)
++ goto done;
++
++ rc = PCI_ERS_RESULT_RECOVERED;
++
++done:
++ if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) {
++ tg3_napi_enable(tp);
++ dev_close(netdev);
++ }
++ rtnl_unlock();
++
++ return rc;
++}
++
++/**
++ * tg3_io_resume - called when traffic can start flowing again.
++ * @pdev: Pointer to PCI device
++ *
++ * This callback is called when the error recovery driver tells
++ * us that its OK to resume normal operation.
++ */
++static void tg3_io_resume(struct pci_dev *pdev)
++{
++ struct net_device *netdev = pci_get_drvdata(pdev);
++ struct tg3 *tp = netdev_priv(netdev);
++ int err;
++
++ rtnl_lock();
++
++ if (!netif_running(netdev))
++ goto done;
++
++ tg3_full_lock(tp, 0);
++ tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
++ tg3_flag_set(tp, INIT_COMPLETE);
++ err = tg3_restart_hw(tp, true);
++ if (err) {
++ tg3_full_unlock(tp);
++ netdev_err(netdev, "Cannot restart hardware after reset.\n");
++ goto done;
++ }
++
++ netif_device_attach(netdev);
++
++ tg3_timer_start(tp);
++
++ tg3_netif_start(tp);
++
++ tg3_full_unlock(tp);
++
++ tg3_phy_start(tp);
++
++done:
++ rtnl_unlock();
++}
++
++static const struct pci_error_handlers tg3_err_handler = {
++ .error_detected = tg3_io_error_detected,
++ .slot_reset = tg3_io_slot_reset,
++ .resume = tg3_io_resume
++};
++
++static struct pci_driver tg3_driver = {
++ .name = DRV_MODULE_NAME,
++ .id_table = tg3_pci_tbl,
++ .probe = tg3_init_one,
++ .remove = tg3_remove_one,
++ .err_handler = &tg3_err_handler,
++ .driver.pm = &tg3_pm_ops,
++ .shutdown = tg3_shutdown,
++};
++
++module_pci_driver(tg3_driver);
+diff -Nur linux-3.14.36/drivers/net/ethernet/cadence/macb.c linux-openelec/drivers/net/ethernet/cadence/macb.c
+--- linux-3.14.36/drivers/net/ethernet/cadence/macb.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/net/ethernet/cadence/macb.c 2015-07-24 18:03:28.592842002 -0500
+@@ -604,25 +604,16 @@
+ {
+ unsigned int entry;
+ struct sk_buff *skb;
+- struct macb_dma_desc *desc;
+ dma_addr_t paddr;
+
+ while (CIRC_SPACE(bp->rx_prepared_head, bp->rx_tail, RX_RING_SIZE) > 0) {
+- u32 addr, ctrl;
+-
+ entry = macb_rx_ring_wrap(bp->rx_prepared_head);
+- desc = &bp->rx_ring[entry];
+
+ /* Make hw descriptor updates visible to CPU */
+ rmb();
+
+- addr = desc->addr;
+- ctrl = desc->ctrl;
+ bp->rx_prepared_head++;
+
+- if ((addr & MACB_BIT(RX_USED)))
+- continue;
+-
+ if (bp->rx_skbuff[entry] == NULL) {
+ /* allocate sk_buff for this free entry in ring */
+ skb = netdev_alloc_skb(bp->dev, bp->rx_buffer_size);
+@@ -703,7 +694,6 @@
+ if (!(addr & MACB_BIT(RX_USED)))
+ break;
+
+- desc->addr &= ~MACB_BIT(RX_USED);
+ bp->rx_tail++;
+ count++;
+
+diff -Nur linux-3.14.36/drivers/net/ethernet/cadence/macb.c.orig linux-openelec/drivers/net/ethernet/cadence/macb.c.orig
+--- linux-3.14.36/drivers/net/ethernet/cadence/macb.c.orig 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/net/ethernet/cadence/macb.c.orig 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,2064 @@
++/*
++ * Cadence MACB/GEM Ethernet Controller driver
++ *
++ * Copyright (C) 2004-2006 Atmel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
++#include <linux/clk.h>
++#include <linux/module.h>
++#include <linux/moduleparam.h>
++#include <linux/kernel.h>
++#include <linux/types.h>
++#include <linux/circ_buf.h>
++#include <linux/slab.h>
++#include <linux/init.h>
++#include <linux/io.h>
++#include <linux/gpio.h>
++#include <linux/interrupt.h>
++#include <linux/netdevice.h>
++#include <linux/etherdevice.h>
++#include <linux/dma-mapping.h>
++#include <linux/platform_data/macb.h>
++#include <linux/platform_device.h>
++#include <linux/phy.h>
++#include <linux/of.h>
++#include <linux/of_device.h>
++#include <linux/of_mdio.h>
++#include <linux/of_net.h>
++#include <linux/pinctrl/consumer.h>
++
++#include "macb.h"
++
++#define MACB_RX_BUFFER_SIZE 128
++#define RX_BUFFER_MULTIPLE 64 /* bytes */
++#define RX_RING_SIZE 512 /* must be power of 2 */
++#define RX_RING_BYTES (sizeof(struct macb_dma_desc) * RX_RING_SIZE)
++
++#define TX_RING_SIZE 128 /* must be power of 2 */
++#define TX_RING_BYTES (sizeof(struct macb_dma_desc) * TX_RING_SIZE)
++
++/* level of occupied TX descriptors under which we wake up TX process */
++#define MACB_TX_WAKEUP_THRESH (3 * TX_RING_SIZE / 4)
++
++#define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(RXUBR) \
++ | MACB_BIT(ISR_ROVR))
++#define MACB_TX_ERR_FLAGS (MACB_BIT(ISR_TUND) \
++ | MACB_BIT(ISR_RLE) \
++ | MACB_BIT(TXERR))
++#define MACB_TX_INT_FLAGS (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
++
++/*
++ * Graceful stop timeouts in us. We should allow up to
++ * 1 frame time (10 Mbits/s, full-duplex, ignoring collisions)
++ */
++#define MACB_HALT_TIMEOUT 1230
++
++/* Ring buffer accessors */
++static unsigned int macb_tx_ring_wrap(unsigned int index)
++{
++ return index & (TX_RING_SIZE - 1);
++}
++
++static struct macb_dma_desc *macb_tx_desc(struct macb *bp, unsigned int index)
++{
++ return &bp->tx_ring[macb_tx_ring_wrap(index)];
++}
++
++static struct macb_tx_skb *macb_tx_skb(struct macb *bp, unsigned int index)
++{
++ return &bp->tx_skb[macb_tx_ring_wrap(index)];
++}
++
++static dma_addr_t macb_tx_dma(struct macb *bp, unsigned int index)
++{
++ dma_addr_t offset;
++
++ offset = macb_tx_ring_wrap(index) * sizeof(struct macb_dma_desc);
++
++ return bp->tx_ring_dma + offset;
++}
++
++static unsigned int macb_rx_ring_wrap(unsigned int index)
++{
++ return index & (RX_RING_SIZE - 1);
++}
++
++static struct macb_dma_desc *macb_rx_desc(struct macb *bp, unsigned int index)
++{
++ return &bp->rx_ring[macb_rx_ring_wrap(index)];
++}
++
++static void *macb_rx_buffer(struct macb *bp, unsigned int index)
++{
++ return bp->rx_buffers + bp->rx_buffer_size * macb_rx_ring_wrap(index);
++}
++
++void macb_set_hwaddr(struct macb *bp)
++{
++ u32 bottom;
++ u16 top;
++
++ bottom = cpu_to_le32(*((u32 *)bp->dev->dev_addr));
++ macb_or_gem_writel(bp, SA1B, bottom);
++ top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4)));
++ macb_or_gem_writel(bp, SA1T, top);
++
++ /* Clear unused address register sets */
++ macb_or_gem_writel(bp, SA2B, 0);
++ macb_or_gem_writel(bp, SA2T, 0);
++ macb_or_gem_writel(bp, SA3B, 0);
++ macb_or_gem_writel(bp, SA3T, 0);
++ macb_or_gem_writel(bp, SA4B, 0);
++ macb_or_gem_writel(bp, SA4T, 0);
++}
++EXPORT_SYMBOL_GPL(macb_set_hwaddr);
++
++void macb_get_hwaddr(struct macb *bp)
++{
++ struct macb_platform_data *pdata;
++ u32 bottom;
++ u16 top;
++ u8 addr[6];
++ int i;
++
++ pdata = dev_get_platdata(&bp->pdev->dev);
++
++ /* Check all 4 address register for vaild address */
++ for (i = 0; i < 4; i++) {
++ bottom = macb_or_gem_readl(bp, SA1B + i * 8);
++ top = macb_or_gem_readl(bp, SA1T + i * 8);
++
++ if (pdata && pdata->rev_eth_addr) {
++ addr[5] = bottom & 0xff;
++ addr[4] = (bottom >> 8) & 0xff;
++ addr[3] = (bottom >> 16) & 0xff;
++ addr[2] = (bottom >> 24) & 0xff;
++ addr[1] = top & 0xff;
++ addr[0] = (top & 0xff00) >> 8;
++ } else {
++ addr[0] = bottom & 0xff;
++ addr[1] = (bottom >> 8) & 0xff;
++ addr[2] = (bottom >> 16) & 0xff;
++ addr[3] = (bottom >> 24) & 0xff;
++ addr[4] = top & 0xff;
++ addr[5] = (top >> 8) & 0xff;
++ }
++
++ if (is_valid_ether_addr(addr)) {
++ memcpy(bp->dev->dev_addr, addr, sizeof(addr));
++ return;
++ }
++ }
++
++ netdev_info(bp->dev, "invalid hw address, using random\n");
++ eth_hw_addr_random(bp->dev);
++}
++EXPORT_SYMBOL_GPL(macb_get_hwaddr);
++
++static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
++{
++ struct macb *bp = bus->priv;
++ int value;
++
++ macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF)
++ | MACB_BF(RW, MACB_MAN_READ)
++ | MACB_BF(PHYA, mii_id)
++ | MACB_BF(REGA, regnum)
++ | MACB_BF(CODE, MACB_MAN_CODE)));
++
++ /* wait for end of transfer */
++ while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR)))
++ cpu_relax();
++
++ value = MACB_BFEXT(DATA, macb_readl(bp, MAN));
++
++ return value;
++}
++
++static int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
++ u16 value)
++{
++ struct macb *bp = bus->priv;
++
++ macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF)
++ | MACB_BF(RW, MACB_MAN_WRITE)
++ | MACB_BF(PHYA, mii_id)
++ | MACB_BF(REGA, regnum)
++ | MACB_BF(CODE, MACB_MAN_CODE)
++ | MACB_BF(DATA, value)));
++
++ /* wait for end of transfer */
++ while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR)))
++ cpu_relax();
++
++ return 0;
++}
++
++static int macb_mdio_reset(struct mii_bus *bus)
++{
++ return 0;
++}
++
++/**
++ * macb_set_tx_clk() - Set a clock to a new frequency
++ * @clk Pointer to the clock to change
++ * @rate New frequency in Hz
++ * @dev Pointer to the struct net_device
++ */
++static void macb_set_tx_clk(struct clk *clk, int speed, struct net_device *dev)
++{
++ long ferr, rate, rate_rounded;
++
++ switch (speed) {
++ case SPEED_10:
++ rate = 2500000;
++ break;
++ case SPEED_100:
++ rate = 25000000;
++ break;
++ case SPEED_1000:
++ rate = 125000000;
++ break;
++ default:
++ return;
++ }
++
++ rate_rounded = clk_round_rate(clk, rate);
++ if (rate_rounded < 0)
++ return;
++
++ /* RGMII allows 50 ppm frequency error. Test and warn if this limit
++ * is not satisfied.
++ */
++ ferr = abs(rate_rounded - rate);
++ ferr = DIV_ROUND_UP(ferr, rate / 100000);
++ if (ferr > 5)
++ netdev_warn(dev, "unable to generate target frequency: %ld Hz\n",
++ rate);
++
++ if (clk_set_rate(clk, rate_rounded))
++ netdev_err(dev, "adjusting tx_clk failed.\n");
++}
++
++static void macb_handle_link_change(struct net_device *dev)
++{
++ struct macb *bp = netdev_priv(dev);
++ struct phy_device *phydev = bp->phy_dev;
++ unsigned long flags;
++
++ int status_change = 0;
++
++ spin_lock_irqsave(&bp->lock, flags);
++
++ if (phydev->link) {
++ if ((bp->speed != phydev->speed) ||
++ (bp->duplex != phydev->duplex)) {
++ u32 reg;
++
++ reg = macb_readl(bp, NCFGR);
++ reg &= ~(MACB_BIT(SPD) | MACB_BIT(FD));
++ if (macb_is_gem(bp))
++ reg &= ~GEM_BIT(GBE);
++
++ if (phydev->duplex)
++ reg |= MACB_BIT(FD);
++ if (phydev->speed == SPEED_100)
++ reg |= MACB_BIT(SPD);
++ if (phydev->speed == SPEED_1000)
++ reg |= GEM_BIT(GBE);
++
++ macb_or_gem_writel(bp, NCFGR, reg);
++
++ bp->speed = phydev->speed;
++ bp->duplex = phydev->duplex;
++ status_change = 1;
++ }
++ }
++
++ if (phydev->link != bp->link) {
++ if (!phydev->link) {
++ bp->speed = 0;
++ bp->duplex = -1;
++ }
++ bp->link = phydev->link;
++
++ status_change = 1;
++ }
++
++ spin_unlock_irqrestore(&bp->lock, flags);
++
++ if (!IS_ERR(bp->tx_clk))
++ macb_set_tx_clk(bp->tx_clk, phydev->speed, dev);
++
++ if (status_change) {
++ if (phydev->link) {
++ netif_carrier_on(dev);
++ netdev_info(dev, "link up (%d/%s)\n",
++ phydev->speed,
++ phydev->duplex == DUPLEX_FULL ?
++ "Full" : "Half");
++ } else {
++ netif_carrier_off(dev);
++ netdev_info(dev, "link down\n");
++ }
++ }
++}
++
++/* based on au1000_eth. c*/
++static int macb_mii_probe(struct net_device *dev)
++{
++ struct macb *bp = netdev_priv(dev);
++ struct macb_platform_data *pdata;
++ struct phy_device *phydev;
++ int phy_irq;
++ int ret;
++
++ phydev = phy_find_first(bp->mii_bus);
++ if (!phydev) {
++ netdev_err(dev, "no PHY found\n");
++ return -ENXIO;
++ }
++
++ pdata = dev_get_platdata(&bp->pdev->dev);
++ if (pdata && gpio_is_valid(pdata->phy_irq_pin)) {
++ ret = devm_gpio_request(&bp->pdev->dev, pdata->phy_irq_pin, "phy int");
++ if (!ret) {
++ phy_irq = gpio_to_irq(pdata->phy_irq_pin);
++ phydev->irq = (phy_irq < 0) ? PHY_POLL : phy_irq;
++ }
++ }
++
++ /* attach the mac to the phy */
++ ret = phy_connect_direct(dev, phydev, &macb_handle_link_change,
++ bp->phy_interface);
++ if (ret) {
++ netdev_err(dev, "Could not attach to PHY\n");
++ return ret;
++ }
++
++ /* mask with MAC supported features */
++ if (macb_is_gem(bp))
++ phydev->supported &= PHY_GBIT_FEATURES;
++ else
++ phydev->supported &= PHY_BASIC_FEATURES;
++
++ phydev->advertising = phydev->supported;
++
++ bp->link = 0;
++ bp->speed = 0;
++ bp->duplex = -1;
++ bp->phy_dev = phydev;
++
++ return 0;
++}
++
++int macb_mii_init(struct macb *bp)
++{
++ struct macb_platform_data *pdata;
++ struct device_node *np;
++ int err = -ENXIO, i;
++
++ /* Enable management port */
++ macb_writel(bp, NCR, MACB_BIT(MPE));
++
++ bp->mii_bus = mdiobus_alloc();
++ if (bp->mii_bus == NULL) {
++ err = -ENOMEM;
++ goto err_out;
++ }
++
++ bp->mii_bus->name = "MACB_mii_bus";
++ bp->mii_bus->read = &macb_mdio_read;
++ bp->mii_bus->write = &macb_mdio_write;
++ bp->mii_bus->reset = &macb_mdio_reset;
++ snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
++ bp->pdev->name, bp->pdev->id);
++ bp->mii_bus->priv = bp;
++ bp->mii_bus->parent = &bp->dev->dev;
++ pdata = dev_get_platdata(&bp->pdev->dev);
++
++ bp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
++ if (!bp->mii_bus->irq) {
++ err = -ENOMEM;
++ goto err_out_free_mdiobus;
++ }
++
++ dev_set_drvdata(&bp->dev->dev, bp->mii_bus);
++
++ np = bp->pdev->dev.of_node;
++ if (np) {
++ /* try dt phy registration */
++ err = of_mdiobus_register(bp->mii_bus, np);
++
++ /* fallback to standard phy registration if no phy were
++ found during dt phy registration */
++ if (!err && !phy_find_first(bp->mii_bus)) {
++ for (i = 0; i < PHY_MAX_ADDR; i++) {
++ struct phy_device *phydev;
++
++ phydev = mdiobus_scan(bp->mii_bus, i);
++ if (IS_ERR(phydev)) {
++ err = PTR_ERR(phydev);
++ break;
++ }
++ }
++
++ if (err)
++ goto err_out_unregister_bus;
++ }
++ } else {
++ for (i = 0; i < PHY_MAX_ADDR; i++)
++ bp->mii_bus->irq[i] = PHY_POLL;
++
++ if (pdata)
++ bp->mii_bus->phy_mask = pdata->phy_mask;
++
++ err = mdiobus_register(bp->mii_bus);
++ }
++
++ if (err)
++ goto err_out_free_mdio_irq;
++
++ err = macb_mii_probe(bp->dev);
++ if (err)
++ goto err_out_unregister_bus;
++
++ return 0;
++
++err_out_unregister_bus:
++ mdiobus_unregister(bp->mii_bus);
++err_out_free_mdio_irq:
++ kfree(bp->mii_bus->irq);
++err_out_free_mdiobus:
++ mdiobus_free(bp->mii_bus);
++err_out:
++ return err;
++}
++EXPORT_SYMBOL_GPL(macb_mii_init);
++
++static void macb_update_stats(struct macb *bp)
++{
++ u32 __iomem *reg = bp->regs + MACB_PFR;
++ u32 *p = &bp->hw_stats.macb.rx_pause_frames;
++ u32 *end = &bp->hw_stats.macb.tx_pause_frames + 1;
++
++ WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4);
++
++ for(; p < end; p++, reg++)
++ *p += __raw_readl(reg);
++}
++
++static int macb_halt_tx(struct macb *bp)
++{
++ unsigned long halt_time, timeout;
++ u32 status;
++
++ macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(THALT));
++
++ timeout = jiffies + usecs_to_jiffies(MACB_HALT_TIMEOUT);
++ do {
++ halt_time = jiffies;
++ status = macb_readl(bp, TSR);
++ if (!(status & MACB_BIT(TGO)))
++ return 0;
++
++ usleep_range(10, 250);
++ } while (time_before(halt_time, timeout));
++
++ return -ETIMEDOUT;
++}
++
++static void macb_tx_error_task(struct work_struct *work)
++{
++ struct macb *bp = container_of(work, struct macb, tx_error_task);
++ struct macb_tx_skb *tx_skb;
++ struct sk_buff *skb;
++ unsigned int tail;
++
++ netdev_vdbg(bp->dev, "macb_tx_error_task: t = %u, h = %u\n",
++ bp->tx_tail, bp->tx_head);
++
++ /* Make sure nobody is trying to queue up new packets */
++ netif_stop_queue(bp->dev);
++
++ /*
++ * Stop transmission now
++ * (in case we have just queued new packets)
++ */
++ if (macb_halt_tx(bp))
++ /* Just complain for now, reinitializing TX path can be good */
++ netdev_err(bp->dev, "BUG: halt tx timed out\n");
++
++ /* No need for the lock here as nobody will interrupt us anymore */
++
++ /*
++ * Treat frames in TX queue including the ones that caused the error.
++ * Free transmit buffers in upper layer.
++ */
++ for (tail = bp->tx_tail; tail != bp->tx_head; tail++) {
++ struct macb_dma_desc *desc;
++ u32 ctrl;
++
++ desc = macb_tx_desc(bp, tail);
++ ctrl = desc->ctrl;
++ tx_skb = macb_tx_skb(bp, tail);
++ skb = tx_skb->skb;
++
++ if (ctrl & MACB_BIT(TX_USED)) {
++ netdev_vdbg(bp->dev, "txerr skb %u (data %p) TX complete\n",
++ macb_tx_ring_wrap(tail), skb->data);
++ bp->stats.tx_packets++;
++ bp->stats.tx_bytes += skb->len;
++ } else {
++ /*
++ * "Buffers exhausted mid-frame" errors may only happen
++ * if the driver is buggy, so complain loudly about those.
++ * Statistics are updated by hardware.
++ */
++ if (ctrl & MACB_BIT(TX_BUF_EXHAUSTED))
++ netdev_err(bp->dev,
++ "BUG: TX buffers exhausted mid-frame\n");
++
++ desc->ctrl = ctrl | MACB_BIT(TX_USED);
++ }
++
++ dma_unmap_single(&bp->pdev->dev, tx_skb->mapping, skb->len,
++ DMA_TO_DEVICE);
++ tx_skb->skb = NULL;
++ dev_kfree_skb(skb);
++ }
++
++ /* Make descriptor updates visible to hardware */
++ wmb();
++
++ /* Reinitialize the TX desc queue */
++ macb_writel(bp, TBQP, bp->tx_ring_dma);
++ /* Make TX ring reflect state of hardware */
++ bp->tx_head = bp->tx_tail = 0;
++
++ /* Now we are ready to start transmission again */
++ netif_wake_queue(bp->dev);
++
++ /* Housework before enabling TX IRQ */
++ macb_writel(bp, TSR, macb_readl(bp, TSR));
++ macb_writel(bp, IER, MACB_TX_INT_FLAGS);
++}
++
++static void macb_tx_interrupt(struct macb *bp)
++{
++ unsigned int tail;
++ unsigned int head;
++ u32 status;
++
++ status = macb_readl(bp, TSR);
++ macb_writel(bp, TSR, status);
++
++ if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
++ macb_writel(bp, ISR, MACB_BIT(TCOMP));
++
++ netdev_vdbg(bp->dev, "macb_tx_interrupt status = 0x%03lx\n",
++ (unsigned long)status);
++
++ head = bp->tx_head;
++ for (tail = bp->tx_tail; tail != head; tail++) {
++ struct macb_tx_skb *tx_skb;
++ struct sk_buff *skb;
++ struct macb_dma_desc *desc;
++ u32 ctrl;
++
++ desc = macb_tx_desc(bp, tail);
++
++ /* Make hw descriptor updates visible to CPU */
++ rmb();
++
++ ctrl = desc->ctrl;
++
++ if (!(ctrl & MACB_BIT(TX_USED)))
++ break;
++
++ tx_skb = macb_tx_skb(bp, tail);
++ skb = tx_skb->skb;
++
++ netdev_vdbg(bp->dev, "skb %u (data %p) TX complete\n",
++ macb_tx_ring_wrap(tail), skb->data);
++ dma_unmap_single(&bp->pdev->dev, tx_skb->mapping, skb->len,
++ DMA_TO_DEVICE);
++ bp->stats.tx_packets++;
++ bp->stats.tx_bytes += skb->len;
++ tx_skb->skb = NULL;
++ dev_kfree_skb_irq(skb);
++ }
++
++ bp->tx_tail = tail;
++ if (netif_queue_stopped(bp->dev)
++ && CIRC_CNT(bp->tx_head, bp->tx_tail,
++ TX_RING_SIZE) <= MACB_TX_WAKEUP_THRESH)
++ netif_wake_queue(bp->dev);
++}
++
++static void gem_rx_refill(struct macb *bp)
++{
++ unsigned int entry;
++ struct sk_buff *skb;
++ dma_addr_t paddr;
++
++ while (CIRC_SPACE(bp->rx_prepared_head, bp->rx_tail, RX_RING_SIZE) > 0) {
++ entry = macb_rx_ring_wrap(bp->rx_prepared_head);
++
++ /* Make hw descriptor updates visible to CPU */
++ rmb();
++
++ bp->rx_prepared_head++;
++
++ if (bp->rx_skbuff[entry] == NULL) {
++ /* allocate sk_buff for this free entry in ring */
++ skb = netdev_alloc_skb(bp->dev, bp->rx_buffer_size);
++ if (unlikely(skb == NULL)) {
++ netdev_err(bp->dev,
++ "Unable to allocate sk_buff\n");
++ break;
++ }
++
++ /* now fill corresponding descriptor entry */
++ paddr = dma_map_single(&bp->pdev->dev, skb->data,
++ bp->rx_buffer_size, DMA_FROM_DEVICE);
++ if (dma_mapping_error(&bp->pdev->dev, paddr)) {
++ dev_kfree_skb(skb);
++ break;
++ }
++
++ bp->rx_skbuff[entry] = skb;
++
++ if (entry == RX_RING_SIZE - 1)
++ paddr |= MACB_BIT(RX_WRAP);
++ bp->rx_ring[entry].addr = paddr;
++ bp->rx_ring[entry].ctrl = 0;
++
++ /* properly align Ethernet header */
++ skb_reserve(skb, NET_IP_ALIGN);
++ }
++ }
++
++ /* Make descriptor updates visible to hardware */
++ wmb();
++
++ netdev_vdbg(bp->dev, "rx ring: prepared head %d, tail %d\n",
++ bp->rx_prepared_head, bp->rx_tail);
++}
++
++/* Mark DMA descriptors from begin up to and not including end as unused */
++static void discard_partial_frame(struct macb *bp, unsigned int begin,
++ unsigned int end)
++{
++ unsigned int frag;
++
++ for (frag = begin; frag != end; frag++) {
++ struct macb_dma_desc *desc = macb_rx_desc(bp, frag);
++ desc->addr &= ~MACB_BIT(RX_USED);
++ }
++
++ /* Make descriptor updates visible to hardware */
++ wmb();
++
++ /*
++ * When this happens, the hardware stats registers for
++ * whatever caused this is updated, so we don't have to record
++ * anything.
++ */
++}
++
++static int gem_rx(struct macb *bp, int budget)
++{
++ unsigned int len;
++ unsigned int entry;
++ struct sk_buff *skb;
++ struct macb_dma_desc *desc;
++ int count = 0;
++
++ while (count < budget) {
++ u32 addr, ctrl;
++
++ entry = macb_rx_ring_wrap(bp->rx_tail);
++ desc = &bp->rx_ring[entry];
++
++ /* Make hw descriptor updates visible to CPU */
++ rmb();
++
++ addr = desc->addr;
++ ctrl = desc->ctrl;
++
++ if (!(addr & MACB_BIT(RX_USED)))
++ break;
++
++ bp->rx_tail++;
++ count++;
++
++ if (!(ctrl & MACB_BIT(RX_SOF) && ctrl & MACB_BIT(RX_EOF))) {
++ netdev_err(bp->dev,
++ "not whole frame pointed by descriptor\n");
++ bp->stats.rx_dropped++;
++ break;
++ }
++ skb = bp->rx_skbuff[entry];
++ if (unlikely(!skb)) {
++ netdev_err(bp->dev,
++ "inconsistent Rx descriptor chain\n");
++ bp->stats.rx_dropped++;
++ break;
++ }
++ /* now everything is ready for receiving packet */
++ bp->rx_skbuff[entry] = NULL;
++ len = MACB_BFEXT(RX_FRMLEN, ctrl);
++
++ netdev_vdbg(bp->dev, "gem_rx %u (len %u)\n", entry, len);
++
++ skb_put(skb, len);
++ addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, addr));
++ dma_unmap_single(&bp->pdev->dev, addr,
++ bp->rx_buffer_size, DMA_FROM_DEVICE);
++
++ skb->protocol = eth_type_trans(skb, bp->dev);
++ skb_checksum_none_assert(skb);
++
++ bp->stats.rx_packets++;
++ bp->stats.rx_bytes += skb->len;
++
++#if defined(DEBUG) && defined(VERBOSE_DEBUG)
++ netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
++ skb->len, skb->csum);
++ print_hex_dump(KERN_DEBUG, " mac: ", DUMP_PREFIX_ADDRESS, 16, 1,
++ skb->mac_header, 16, true);
++ print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_ADDRESS, 16, 1,
++ skb->data, 32, true);
++#endif
++
++ netif_receive_skb(skb);
++ }
++
++ gem_rx_refill(bp);
++
++ return count;
++}
++
++static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
++ unsigned int last_frag)
++{
++ unsigned int len;
++ unsigned int frag;
++ unsigned int offset;
++ struct sk_buff *skb;
++ struct macb_dma_desc *desc;
++
++ desc = macb_rx_desc(bp, last_frag);
++ len = MACB_BFEXT(RX_FRMLEN, desc->ctrl);
++
++ netdev_vdbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n",
++ macb_rx_ring_wrap(first_frag),
++ macb_rx_ring_wrap(last_frag), len);
++
++ /*
++ * The ethernet header starts NET_IP_ALIGN bytes into the
++ * first buffer. Since the header is 14 bytes, this makes the
++ * payload word-aligned.
++ *
++ * Instead of calling skb_reserve(NET_IP_ALIGN), we just copy
++ * the two padding bytes into the skb so that we avoid hitting
++ * the slowpath in memcpy(), and pull them off afterwards.
++ */
++ skb = netdev_alloc_skb(bp->dev, len + NET_IP_ALIGN);
++ if (!skb) {
++ bp->stats.rx_dropped++;
++ for (frag = first_frag; ; frag++) {
++ desc = macb_rx_desc(bp, frag);
++ desc->addr &= ~MACB_BIT(RX_USED);
++ if (frag == last_frag)
++ break;
++ }
++
++ /* Make descriptor updates visible to hardware */
++ wmb();
++
++ return 1;
++ }
++
++ offset = 0;
++ len += NET_IP_ALIGN;
++ skb_checksum_none_assert(skb);
++ skb_put(skb, len);
++
++ for (frag = first_frag; ; frag++) {
++ unsigned int frag_len = bp->rx_buffer_size;
++
++ if (offset + frag_len > len) {
++ BUG_ON(frag != last_frag);
++ frag_len = len - offset;
++ }
++ skb_copy_to_linear_data_offset(skb, offset,
++ macb_rx_buffer(bp, frag), frag_len);
++ offset += bp->rx_buffer_size;
++ desc = macb_rx_desc(bp, frag);
++ desc->addr &= ~MACB_BIT(RX_USED);
++
++ if (frag == last_frag)
++ break;
++ }
++
++ /* Make descriptor updates visible to hardware */
++ wmb();
++
++ __skb_pull(skb, NET_IP_ALIGN);
++ skb->protocol = eth_type_trans(skb, bp->dev);
++
++ bp->stats.rx_packets++;
++ bp->stats.rx_bytes += skb->len;
++ netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
++ skb->len, skb->csum);
++ netif_receive_skb(skb);
++
++ return 0;
++}
++
++static int macb_rx(struct macb *bp, int budget)
++{
++ int received = 0;
++ unsigned int tail;
++ int first_frag = -1;
++
++ for (tail = bp->rx_tail; budget > 0; tail++) {
++ struct macb_dma_desc *desc = macb_rx_desc(bp, tail);
++ u32 addr, ctrl;
++
++ /* Make hw descriptor updates visible to CPU */
++ rmb();
++
++ addr = desc->addr;
++ ctrl = desc->ctrl;
++
++ if (!(addr & MACB_BIT(RX_USED)))
++ break;
++
++ if (ctrl & MACB_BIT(RX_SOF)) {
++ if (first_frag != -1)
++ discard_partial_frame(bp, first_frag, tail);
++ first_frag = tail;
++ }
++
++ if (ctrl & MACB_BIT(RX_EOF)) {
++ int dropped;
++ BUG_ON(first_frag == -1);
++
++ dropped = macb_rx_frame(bp, first_frag, tail);
++ first_frag = -1;
++ if (!dropped) {
++ received++;
++ budget--;
++ }
++ }
++ }
++
++ if (first_frag != -1)
++ bp->rx_tail = first_frag;
++ else
++ bp->rx_tail = tail;
++
++ return received;
++}
++
++static int macb_poll(struct napi_struct *napi, int budget)
++{
++ struct macb *bp = container_of(napi, struct macb, napi);
++ int work_done;
++ u32 status;
++
++ status = macb_readl(bp, RSR);
++ macb_writel(bp, RSR, status);
++
++ work_done = 0;
++
++ netdev_vdbg(bp->dev, "poll: status = %08lx, budget = %d\n",
++ (unsigned long)status, budget);
++
++ work_done = bp->macbgem_ops.mog_rx(bp, budget);
++ if (work_done < budget) {
++ napi_complete(napi);
++
++ /*
++ * We've done what we can to clean the buffers. Make sure we
++ * get notified when new packets arrive.
++ */
++ macb_writel(bp, IER, MACB_RX_INT_FLAGS);
++
++ /* Packets received while interrupts were disabled */
++ status = macb_readl(bp, RSR);
++ if (unlikely(status))
++ napi_reschedule(napi);
++ }
++
++ /* TODO: Handle errors */
++
++ return work_done;
++}
++
++static irqreturn_t macb_interrupt(int irq, void *dev_id)
++{
++ struct net_device *dev = dev_id;
++ struct macb *bp = netdev_priv(dev);
++ u32 status;
++
++ status = macb_readl(bp, ISR);
++
++ if (unlikely(!status))
++ return IRQ_NONE;
++
++ spin_lock(&bp->lock);
++
++ while (status) {
++ /* close possible race with dev_close */
++ if (unlikely(!netif_running(dev))) {
++ macb_writel(bp, IDR, -1);
++ break;
++ }
++
++ netdev_vdbg(bp->dev, "isr = 0x%08lx\n", (unsigned long)status);
++
++ if (status & MACB_RX_INT_FLAGS) {
++ /*
++ * There's no point taking any more interrupts
++ * until we have processed the buffers. The
++ * scheduling call may fail if the poll routine
++ * is already scheduled, so disable interrupts
++ * now.
++ */
++ macb_writel(bp, IDR, MACB_RX_INT_FLAGS);
++ if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
++ macb_writel(bp, ISR, MACB_BIT(RCOMP));
++
++ if (napi_schedule_prep(&bp->napi)) {
++ netdev_vdbg(bp->dev, "scheduling RX softirq\n");
++ __napi_schedule(&bp->napi);
++ }
++ }
++
++ if (unlikely(status & (MACB_TX_ERR_FLAGS))) {
++ macb_writel(bp, IDR, MACB_TX_INT_FLAGS);
++ schedule_work(&bp->tx_error_task);
++ break;
++ }
++
++ if (status & MACB_BIT(TCOMP))
++ macb_tx_interrupt(bp);
++
++ /*
++ * Link change detection isn't possible with RMII, so we'll
++ * add that if/when we get our hands on a full-blown MII PHY.
++ */
++
++ if (status & MACB_BIT(ISR_ROVR)) {
++ /* We missed at least one packet */
++ if (macb_is_gem(bp))
++ bp->hw_stats.gem.rx_overruns++;
++ else
++ bp->hw_stats.macb.rx_overruns++;
++ }
++
++ if (status & MACB_BIT(HRESP)) {
++ /*
++ * TODO: Reset the hardware, and maybe move the
++ * netdev_err to a lower-priority context as well
++ * (work queue?)
++ */
++ netdev_err(dev, "DMA bus error: HRESP not OK\n");
++ }
++
++ status = macb_readl(bp, ISR);
++ }
++
++ spin_unlock(&bp->lock);
++
++ return IRQ_HANDLED;
++}
++
++#ifdef CONFIG_NET_POLL_CONTROLLER
++/*
++ * Polling receive - used by netconsole and other diagnostic tools
++ * to allow network i/o with interrupts disabled.
++ */
++static void macb_poll_controller(struct net_device *dev)
++{
++ unsigned long flags;
++
++ local_irq_save(flags);
++ macb_interrupt(dev->irq, dev);
++ local_irq_restore(flags);
++}
++#endif
++
++static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
++{
++ struct macb *bp = netdev_priv(dev);
++ dma_addr_t mapping;
++ unsigned int len, entry;
++ struct macb_dma_desc *desc;
++ struct macb_tx_skb *tx_skb;
++ u32 ctrl;
++ unsigned long flags;
++
++#if defined(DEBUG) && defined(VERBOSE_DEBUG)
++ netdev_vdbg(bp->dev,
++ "start_xmit: len %u head %p data %p tail %p end %p\n",
++ skb->len, skb->head, skb->data,
++ skb_tail_pointer(skb), skb_end_pointer(skb));
++ print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_OFFSET, 16, 1,
++ skb->data, 16, true);
++#endif
++
++ len = skb->len;
++ spin_lock_irqsave(&bp->lock, flags);
++
++ /* This is a hard error, log it. */
++ if (CIRC_SPACE(bp->tx_head, bp->tx_tail, TX_RING_SIZE) < 1) {
++ netif_stop_queue(dev);
++ spin_unlock_irqrestore(&bp->lock, flags);
++ netdev_err(bp->dev, "BUG! Tx Ring full when queue awake!\n");
++ netdev_dbg(bp->dev, "tx_head = %u, tx_tail = %u\n",
++ bp->tx_head, bp->tx_tail);
++ return NETDEV_TX_BUSY;
++ }
++
++ entry = macb_tx_ring_wrap(bp->tx_head);
++ netdev_vdbg(bp->dev, "Allocated ring entry %u\n", entry);
++ mapping = dma_map_single(&bp->pdev->dev, skb->data,
++ len, DMA_TO_DEVICE);
++ if (dma_mapping_error(&bp->pdev->dev, mapping)) {
++ kfree_skb(skb);
++ goto unlock;
++ }
++
++ bp->tx_head++;
++ tx_skb = &bp->tx_skb[entry];
++ tx_skb->skb = skb;
++ tx_skb->mapping = mapping;
++ netdev_vdbg(bp->dev, "Mapped skb data %p to DMA addr %08lx\n",
++ skb->data, (unsigned long)mapping);
++
++ ctrl = MACB_BF(TX_FRMLEN, len);
++ ctrl |= MACB_BIT(TX_LAST);
++ if (entry == (TX_RING_SIZE - 1))
++ ctrl |= MACB_BIT(TX_WRAP);
++
++ desc = &bp->tx_ring[entry];
++ desc->addr = mapping;
++ desc->ctrl = ctrl;
++
++ /* Make newly initialized descriptor visible to hardware */
++ wmb();
++
++ skb_tx_timestamp(skb);
++
++ macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
++
++ if (CIRC_SPACE(bp->tx_head, bp->tx_tail, TX_RING_SIZE) < 1)
++ netif_stop_queue(dev);
++
++unlock:
++ spin_unlock_irqrestore(&bp->lock, flags);
++
++ return NETDEV_TX_OK;
++}
++
++static void macb_init_rx_buffer_size(struct macb *bp, size_t size)
++{
++ if (!macb_is_gem(bp)) {
++ bp->rx_buffer_size = MACB_RX_BUFFER_SIZE;
++ } else {
++ bp->rx_buffer_size = size;
++
++ if (bp->rx_buffer_size % RX_BUFFER_MULTIPLE) {
++ netdev_dbg(bp->dev,
++ "RX buffer must be multiple of %d bytes, expanding\n",
++ RX_BUFFER_MULTIPLE);
++ bp->rx_buffer_size =
++ roundup(bp->rx_buffer_size, RX_BUFFER_MULTIPLE);
++ }
++ }
++
++ netdev_dbg(bp->dev, "mtu [%u] rx_buffer_size [%Zu]\n",
++ bp->dev->mtu, bp->rx_buffer_size);
++}
++
++static void gem_free_rx_buffers(struct macb *bp)
++{
++ struct sk_buff *skb;
++ struct macb_dma_desc *desc;
++ dma_addr_t addr;
++ int i;
++
++ if (!bp->rx_skbuff)
++ return;
++
++ for (i = 0; i < RX_RING_SIZE; i++) {
++ skb = bp->rx_skbuff[i];
++
++ if (skb == NULL)
++ continue;
++
++ desc = &bp->rx_ring[i];
++ addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr));
++ dma_unmap_single(&bp->pdev->dev, addr, skb->len,
++ DMA_FROM_DEVICE);
++ dev_kfree_skb_any(skb);
++ skb = NULL;
++ }
++
++ kfree(bp->rx_skbuff);
++ bp->rx_skbuff = NULL;
++}
++
++static void macb_free_rx_buffers(struct macb *bp)
++{
++ if (bp->rx_buffers) {
++ dma_free_coherent(&bp->pdev->dev,
++ RX_RING_SIZE * bp->rx_buffer_size,
++ bp->rx_buffers, bp->rx_buffers_dma);
++ bp->rx_buffers = NULL;
++ }
++}
++
++static void macb_free_consistent(struct macb *bp)
++{
++ if (bp->tx_skb) {
++ kfree(bp->tx_skb);
++ bp->tx_skb = NULL;
++ }
++ bp->macbgem_ops.mog_free_rx_buffers(bp);
++ if (bp->rx_ring) {
++ dma_free_coherent(&bp->pdev->dev, RX_RING_BYTES,
++ bp->rx_ring, bp->rx_ring_dma);
++ bp->rx_ring = NULL;
++ }
++ if (bp->tx_ring) {
++ dma_free_coherent(&bp->pdev->dev, TX_RING_BYTES,
++ bp->tx_ring, bp->tx_ring_dma);
++ bp->tx_ring = NULL;
++ }
++}
++
++static int gem_alloc_rx_buffers(struct macb *bp)
++{
++ int size;
++
++ size = RX_RING_SIZE * sizeof(struct sk_buff *);
++ bp->rx_skbuff = kzalloc(size, GFP_KERNEL);
++ if (!bp->rx_skbuff)
++ return -ENOMEM;
++ else
++ netdev_dbg(bp->dev,
++ "Allocated %d RX struct sk_buff entries at %p\n",
++ RX_RING_SIZE, bp->rx_skbuff);
++ return 0;
++}
++
++static int macb_alloc_rx_buffers(struct macb *bp)
++{
++ int size;
++
++ size = RX_RING_SIZE * bp->rx_buffer_size;
++ bp->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size,
++ &bp->rx_buffers_dma, GFP_KERNEL);
++ if (!bp->rx_buffers)
++ return -ENOMEM;
++ else
++ netdev_dbg(bp->dev,
++ "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n",
++ size, (unsigned long)bp->rx_buffers_dma, bp->rx_buffers);
++ return 0;
++}
++
++static int macb_alloc_consistent(struct macb *bp)
++{
++ int size;
++
++ size = TX_RING_SIZE * sizeof(struct macb_tx_skb);
++ bp->tx_skb = kmalloc(size, GFP_KERNEL);
++ if (!bp->tx_skb)
++ goto out_err;
++
++ size = RX_RING_BYTES;
++ bp->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
++ &bp->rx_ring_dma, GFP_KERNEL);
++ if (!bp->rx_ring)
++ goto out_err;
++ netdev_dbg(bp->dev,
++ "Allocated RX ring of %d bytes at %08lx (mapped %p)\n",
++ size, (unsigned long)bp->rx_ring_dma, bp->rx_ring);
++
++ size = TX_RING_BYTES;
++ bp->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
++ &bp->tx_ring_dma, GFP_KERNEL);
++ if (!bp->tx_ring)
++ goto out_err;
++ netdev_dbg(bp->dev,
++ "Allocated TX ring of %d bytes at %08lx (mapped %p)\n",
++ size, (unsigned long)bp->tx_ring_dma, bp->tx_ring);
++
++ if (bp->macbgem_ops.mog_alloc_rx_buffers(bp))
++ goto out_err;
++
++ return 0;
++
++out_err:
++ macb_free_consistent(bp);
++ return -ENOMEM;
++}
++
++static void gem_init_rings(struct macb *bp)
++{
++ int i;
++
++ for (i = 0; i < TX_RING_SIZE; i++) {
++ bp->tx_ring[i].addr = 0;
++ bp->tx_ring[i].ctrl = MACB_BIT(TX_USED);
++ }
++ bp->tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP);
++
++ bp->rx_tail = bp->rx_prepared_head = bp->tx_head = bp->tx_tail = 0;
++
++ gem_rx_refill(bp);
++}
++
++static void macb_init_rings(struct macb *bp)
++{
++ int i;
++ dma_addr_t addr;
++
++ addr = bp->rx_buffers_dma;
++ for (i = 0; i < RX_RING_SIZE; i++) {
++ bp->rx_ring[i].addr = addr;
++ bp->rx_ring[i].ctrl = 0;
++ addr += bp->rx_buffer_size;
++ }
++ bp->rx_ring[RX_RING_SIZE - 1].addr |= MACB_BIT(RX_WRAP);
++
++ for (i = 0; i < TX_RING_SIZE; i++) {
++ bp->tx_ring[i].addr = 0;
++ bp->tx_ring[i].ctrl = MACB_BIT(TX_USED);
++ }
++ bp->tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP);
++
++ bp->rx_tail = bp->tx_head = bp->tx_tail = 0;
++}
++
++static void macb_reset_hw(struct macb *bp)
++{
++ /*
++ * Disable RX and TX (XXX: Should we halt the transmission
++ * more gracefully?)
++ */
++ macb_writel(bp, NCR, 0);
++
++ /* Clear the stats registers (XXX: Update stats first?) */
++ macb_writel(bp, NCR, MACB_BIT(CLRSTAT));
++
++ /* Clear all status flags */
++ macb_writel(bp, TSR, -1);
++ macb_writel(bp, RSR, -1);
++
++ /* Disable all interrupts */
++ macb_writel(bp, IDR, -1);
++ macb_readl(bp, ISR);
++}
++
++static u32 gem_mdc_clk_div(struct macb *bp)
++{
++ u32 config;
++ unsigned long pclk_hz = clk_get_rate(bp->pclk);
++
++ if (pclk_hz <= 20000000)
++ config = GEM_BF(CLK, GEM_CLK_DIV8);
++ else if (pclk_hz <= 40000000)
++ config = GEM_BF(CLK, GEM_CLK_DIV16);
++ else if (pclk_hz <= 80000000)
++ config = GEM_BF(CLK, GEM_CLK_DIV32);
++ else if (pclk_hz <= 120000000)
++ config = GEM_BF(CLK, GEM_CLK_DIV48);
++ else if (pclk_hz <= 160000000)
++ config = GEM_BF(CLK, GEM_CLK_DIV64);
++ else
++ config = GEM_BF(CLK, GEM_CLK_DIV96);
++
++ return config;
++}
++
++static u32 macb_mdc_clk_div(struct macb *bp)
++{
++ u32 config;
++ unsigned long pclk_hz;
++
++ if (macb_is_gem(bp))
++ return gem_mdc_clk_div(bp);
++
++ pclk_hz = clk_get_rate(bp->pclk);
++ if (pclk_hz <= 20000000)
++ config = MACB_BF(CLK, MACB_CLK_DIV8);
++ else if (pclk_hz <= 40000000)
++ config = MACB_BF(CLK, MACB_CLK_DIV16);
++ else if (pclk_hz <= 80000000)
++ config = MACB_BF(CLK, MACB_CLK_DIV32);
++ else
++ config = MACB_BF(CLK, MACB_CLK_DIV64);
++
++ return config;
++}
++
++/*
++ * Get the DMA bus width field of the network configuration register that we
++ * should program. We find the width from decoding the design configuration
++ * register to find the maximum supported data bus width.
++ */
++static u32 macb_dbw(struct macb *bp)
++{
++ if (!macb_is_gem(bp))
++ return 0;
++
++ switch (GEM_BFEXT(DBWDEF, gem_readl(bp, DCFG1))) {
++ case 4:
++ return GEM_BF(DBW, GEM_DBW128);
++ case 2:
++ return GEM_BF(DBW, GEM_DBW64);
++ case 1:
++ default:
++ return GEM_BF(DBW, GEM_DBW32);
++ }
++}
++
++/*
++ * Configure the receive DMA engine
++ * - use the correct receive buffer size
++ * - set the possibility to use INCR16 bursts
++ * (if not supported by FIFO, it will fallback to default)
++ * - set both rx/tx packet buffers to full memory size
++ * These are configurable parameters for GEM.
++ */
++static void macb_configure_dma(struct macb *bp)
++{
++ u32 dmacfg;
++
++ if (macb_is_gem(bp)) {
++ dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L);
++ dmacfg |= GEM_BF(RXBS, bp->rx_buffer_size / RX_BUFFER_MULTIPLE);
++ dmacfg |= GEM_BF(FBLDO, 16);
++ dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L);
++ dmacfg &= ~GEM_BIT(ENDIA);
++ gem_writel(bp, DMACFG, dmacfg);
++ }
++}
++
++/*
++ * Configure peripheral capacities according to integration options used
++ */
++static void macb_configure_caps(struct macb *bp)
++{
++ if (macb_is_gem(bp)) {
++ if (GEM_BFEXT(IRQCOR, gem_readl(bp, DCFG1)) == 0)
++ bp->caps |= MACB_CAPS_ISR_CLEAR_ON_WRITE;
++ }
++}
++
++static void macb_init_hw(struct macb *bp)
++{
++ u32 config;
++
++ macb_reset_hw(bp);
++ macb_set_hwaddr(bp);
++
++ config = macb_mdc_clk_div(bp);
++ config |= MACB_BF(RBOF, NET_IP_ALIGN); /* Make eth data aligned */
++ config |= MACB_BIT(PAE); /* PAuse Enable */
++ config |= MACB_BIT(DRFCS); /* Discard Rx FCS */
++ config |= MACB_BIT(BIG); /* Receive oversized frames */
++ if (bp->dev->flags & IFF_PROMISC)
++ config |= MACB_BIT(CAF); /* Copy All Frames */
++ if (!(bp->dev->flags & IFF_BROADCAST))
++ config |= MACB_BIT(NBC); /* No BroadCast */
++ config |= macb_dbw(bp);
++ macb_writel(bp, NCFGR, config);
++ bp->speed = SPEED_10;
++ bp->duplex = DUPLEX_HALF;
++
++ macb_configure_dma(bp);
++ macb_configure_caps(bp);
++
++ /* Initialize TX and RX buffers */
++ macb_writel(bp, RBQP, bp->rx_ring_dma);
++ macb_writel(bp, TBQP, bp->tx_ring_dma);
++
++ /* Enable TX and RX */
++ macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE) | MACB_BIT(MPE));
++
++ /* Enable interrupts */
++ macb_writel(bp, IER, (MACB_RX_INT_FLAGS
++ | MACB_TX_INT_FLAGS
++ | MACB_BIT(HRESP)));
++
++}
++
++/*
++ * The hash address register is 64 bits long and takes up two
++ * locations in the memory map. The least significant bits are stored
++ * in EMAC_HSL and the most significant bits in EMAC_HSH.
++ *
++ * The unicast hash enable and the multicast hash enable bits in the
++ * network configuration register enable the reception of hash matched
++ * frames. The destination address is reduced to a 6 bit index into
++ * the 64 bit hash register using the following hash function. The
++ * hash function is an exclusive or of every sixth bit of the
++ * destination address.
++ *
++ * hi[5] = da[5] ^ da[11] ^ da[17] ^ da[23] ^ da[29] ^ da[35] ^ da[41] ^ da[47]
++ * hi[4] = da[4] ^ da[10] ^ da[16] ^ da[22] ^ da[28] ^ da[34] ^ da[40] ^ da[46]
++ * hi[3] = da[3] ^ da[09] ^ da[15] ^ da[21] ^ da[27] ^ da[33] ^ da[39] ^ da[45]
++ * hi[2] = da[2] ^ da[08] ^ da[14] ^ da[20] ^ da[26] ^ da[32] ^ da[38] ^ da[44]
++ * hi[1] = da[1] ^ da[07] ^ da[13] ^ da[19] ^ da[25] ^ da[31] ^ da[37] ^ da[43]
++ * hi[0] = da[0] ^ da[06] ^ da[12] ^ da[18] ^ da[24] ^ da[30] ^ da[36] ^ da[42]
++ *
++ * da[0] represents the least significant bit of the first byte
++ * received, that is, the multicast/unicast indicator, and da[47]
++ * represents the most significant bit of the last byte received. If
++ * the hash index, hi[n], points to a bit that is set in the hash
++ * register then the frame will be matched according to whether the
++ * frame is multicast or unicast. A multicast match will be signalled
++ * if the multicast hash enable bit is set, da[0] is 1 and the hash
++ * index points to a bit set in the hash register. A unicast match
++ * will be signalled if the unicast hash enable bit is set, da[0] is 0
++ * and the hash index points to a bit set in the hash register. To
++ * receive all multicast frames, the hash register should be set with
++ * all ones and the multicast hash enable bit should be set in the
++ * network configuration register.
++ */
++
++static inline int hash_bit_value(int bitnr, __u8 *addr)
++{
++ if (addr[bitnr / 8] & (1 << (bitnr % 8)))
++ return 1;
++ return 0;
++}
++
++/*
++ * Return the hash index value for the specified address.
++ */
++static int hash_get_index(__u8 *addr)
++{
++ int i, j, bitval;
++ int hash_index = 0;
++
++ for (j = 0; j < 6; j++) {
++ for (i = 0, bitval = 0; i < 8; i++)
++ bitval ^= hash_bit_value(i*6 + j, addr);
++
++ hash_index |= (bitval << j);
++ }
++
++ return hash_index;
++}
++
++/*
++ * Add multicast addresses to the internal multicast-hash table.
++ */
++static void macb_sethashtable(struct net_device *dev)
++{
++ struct netdev_hw_addr *ha;
++ unsigned long mc_filter[2];
++ unsigned int bitnr;
++ struct macb *bp = netdev_priv(dev);
++
++ mc_filter[0] = mc_filter[1] = 0;
++
++ netdev_for_each_mc_addr(ha, dev) {
++ bitnr = hash_get_index(ha->addr);
++ mc_filter[bitnr >> 5] |= 1 << (bitnr & 31);
++ }
++
++ macb_or_gem_writel(bp, HRB, mc_filter[0]);
++ macb_or_gem_writel(bp, HRT, mc_filter[1]);
++}
++
++/*
++ * Enable/Disable promiscuous and multicast modes.
++ */
++void macb_set_rx_mode(struct net_device *dev)
++{
++ unsigned long cfg;
++ struct macb *bp = netdev_priv(dev);
++
++ cfg = macb_readl(bp, NCFGR);
++
++ if (dev->flags & IFF_PROMISC)
++ /* Enable promiscuous mode */
++ cfg |= MACB_BIT(CAF);
++ else if (dev->flags & (~IFF_PROMISC))
++ /* Disable promiscuous mode */
++ cfg &= ~MACB_BIT(CAF);
++
++ if (dev->flags & IFF_ALLMULTI) {
++ /* Enable all multicast mode */
++ macb_or_gem_writel(bp, HRB, -1);
++ macb_or_gem_writel(bp, HRT, -1);
++ cfg |= MACB_BIT(NCFGR_MTI);
++ } else if (!netdev_mc_empty(dev)) {
++ /* Enable specific multicasts */
++ macb_sethashtable(dev);
++ cfg |= MACB_BIT(NCFGR_MTI);
++ } else if (dev->flags & (~IFF_ALLMULTI)) {
++ /* Disable all multicast mode */
++ macb_or_gem_writel(bp, HRB, 0);
++ macb_or_gem_writel(bp, HRT, 0);
++ cfg &= ~MACB_BIT(NCFGR_MTI);
++ }
++
++ macb_writel(bp, NCFGR, cfg);
++}
++EXPORT_SYMBOL_GPL(macb_set_rx_mode);
++
++static int macb_open(struct net_device *dev)
++{
++ struct macb *bp = netdev_priv(dev);
++ size_t bufsz = dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN;
++ int err;
++
++ netdev_dbg(bp->dev, "open\n");
++
++ /* carrier starts down */
++ netif_carrier_off(dev);
++
++ /* if the phy is not yet register, retry later*/
++ if (!bp->phy_dev)
++ return -EAGAIN;
++
++ /* RX buffers initialization */
++ macb_init_rx_buffer_size(bp, bufsz);
++
++ err = macb_alloc_consistent(bp);
++ if (err) {
++ netdev_err(dev, "Unable to allocate DMA memory (error %d)\n",
++ err);
++ return err;
++ }
++
++ napi_enable(&bp->napi);
++
++ bp->macbgem_ops.mog_init_rings(bp);
++ macb_init_hw(bp);
++
++ /* schedule a link state check */
++ phy_start(bp->phy_dev);
++
++ netif_start_queue(dev);
++
++ return 0;
++}
++
++static int macb_close(struct net_device *dev)
++{
++ struct macb *bp = netdev_priv(dev);
++ unsigned long flags;
++
++ netif_stop_queue(dev);
++ napi_disable(&bp->napi);
++
++ if (bp->phy_dev)
++ phy_stop(bp->phy_dev);
++
++ spin_lock_irqsave(&bp->lock, flags);
++ macb_reset_hw(bp);
++ netif_carrier_off(dev);
++ spin_unlock_irqrestore(&bp->lock, flags);
++
++ macb_free_consistent(bp);
++
++ return 0;
++}
++
++static void gem_update_stats(struct macb *bp)
++{
++ u32 __iomem *reg = bp->regs + GEM_OTX;
++ u32 *p = &bp->hw_stats.gem.tx_octets_31_0;
++ u32 *end = &bp->hw_stats.gem.rx_udp_checksum_errors + 1;
++
++ for (; p < end; p++, reg++)
++ *p += __raw_readl(reg);
++}
++
++static struct net_device_stats *gem_get_stats(struct macb *bp)
++{
++ struct gem_stats *hwstat = &bp->hw_stats.gem;
++ struct net_device_stats *nstat = &bp->stats;
++
++ gem_update_stats(bp);
++
++ nstat->rx_errors = (hwstat->rx_frame_check_sequence_errors +
++ hwstat->rx_alignment_errors +
++ hwstat->rx_resource_errors +
++ hwstat->rx_overruns +
++ hwstat->rx_oversize_frames +
++ hwstat->rx_jabbers +
++ hwstat->rx_undersized_frames +
++ hwstat->rx_length_field_frame_errors);
++ nstat->tx_errors = (hwstat->tx_late_collisions +
++ hwstat->tx_excessive_collisions +
++ hwstat->tx_underrun +
++ hwstat->tx_carrier_sense_errors);
++ nstat->multicast = hwstat->rx_multicast_frames;
++ nstat->collisions = (hwstat->tx_single_collision_frames +
++ hwstat->tx_multiple_collision_frames +
++ hwstat->tx_excessive_collisions);
++ nstat->rx_length_errors = (hwstat->rx_oversize_frames +
++ hwstat->rx_jabbers +
++ hwstat->rx_undersized_frames +
++ hwstat->rx_length_field_frame_errors);
++ nstat->rx_over_errors = hwstat->rx_resource_errors;
++ nstat->rx_crc_errors = hwstat->rx_frame_check_sequence_errors;
++ nstat->rx_frame_errors = hwstat->rx_alignment_errors;
++ nstat->rx_fifo_errors = hwstat->rx_overruns;
++ nstat->tx_aborted_errors = hwstat->tx_excessive_collisions;
++ nstat->tx_carrier_errors = hwstat->tx_carrier_sense_errors;
++ nstat->tx_fifo_errors = hwstat->tx_underrun;
++
++ return nstat;
++}
++
++struct net_device_stats *macb_get_stats(struct net_device *dev)
++{
++ struct macb *bp = netdev_priv(dev);
++ struct net_device_stats *nstat = &bp->stats;
++ struct macb_stats *hwstat = &bp->hw_stats.macb;
++
++ if (macb_is_gem(bp))
++ return gem_get_stats(bp);
++
++ /* read stats from hardware */
++ macb_update_stats(bp);
++
++ /* Convert HW stats into netdevice stats */
++ nstat->rx_errors = (hwstat->rx_fcs_errors +
++ hwstat->rx_align_errors +
++ hwstat->rx_resource_errors +
++ hwstat->rx_overruns +
++ hwstat->rx_oversize_pkts +
++ hwstat->rx_jabbers +
++ hwstat->rx_undersize_pkts +
++ hwstat->sqe_test_errors +
++ hwstat->rx_length_mismatch);
++ nstat->tx_errors = (hwstat->tx_late_cols +
++ hwstat->tx_excessive_cols +
++ hwstat->tx_underruns +
++ hwstat->tx_carrier_errors);
++ nstat->collisions = (hwstat->tx_single_cols +
++ hwstat->tx_multiple_cols +
++ hwstat->tx_excessive_cols);
++ nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
++ hwstat->rx_jabbers +
++ hwstat->rx_undersize_pkts +
++ hwstat->rx_length_mismatch);
++ nstat->rx_over_errors = hwstat->rx_resource_errors +
++ hwstat->rx_overruns;
++ nstat->rx_crc_errors = hwstat->rx_fcs_errors;
++ nstat->rx_frame_errors = hwstat->rx_align_errors;
++ nstat->rx_fifo_errors = hwstat->rx_overruns;
++ /* XXX: What does "missed" mean? */
++ nstat->tx_aborted_errors = hwstat->tx_excessive_cols;
++ nstat->tx_carrier_errors = hwstat->tx_carrier_errors;
++ nstat->tx_fifo_errors = hwstat->tx_underruns;
++ /* Don't know about heartbeat or window errors... */
++
++ return nstat;
++}
++EXPORT_SYMBOL_GPL(macb_get_stats);
++
++static int macb_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
++{
++ struct macb *bp = netdev_priv(dev);
++ struct phy_device *phydev = bp->phy_dev;
++
++ if (!phydev)
++ return -ENODEV;
++
++ return phy_ethtool_gset(phydev, cmd);
++}
++
++static int macb_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
++{
++ struct macb *bp = netdev_priv(dev);
++ struct phy_device *phydev = bp->phy_dev;
++
++ if (!phydev)
++ return -ENODEV;
++
++ return phy_ethtool_sset(phydev, cmd);
++}
++
++static int macb_get_regs_len(struct net_device *netdev)
++{
++ return MACB_GREGS_NBR * sizeof(u32);
++}
++
++static void macb_get_regs(struct net_device *dev, struct ethtool_regs *regs,
++ void *p)
++{
++ struct macb *bp = netdev_priv(dev);
++ unsigned int tail, head;
++ u32 *regs_buff = p;
++
++ regs->version = (macb_readl(bp, MID) & ((1 << MACB_REV_SIZE) - 1))
++ | MACB_GREGS_VERSION;
++
++ tail = macb_tx_ring_wrap(bp->tx_tail);
++ head = macb_tx_ring_wrap(bp->tx_head);
++
++ regs_buff[0] = macb_readl(bp, NCR);
++ regs_buff[1] = macb_or_gem_readl(bp, NCFGR);
++ regs_buff[2] = macb_readl(bp, NSR);
++ regs_buff[3] = macb_readl(bp, TSR);
++ regs_buff[4] = macb_readl(bp, RBQP);
++ regs_buff[5] = macb_readl(bp, TBQP);
++ regs_buff[6] = macb_readl(bp, RSR);
++ regs_buff[7] = macb_readl(bp, IMR);
++
++ regs_buff[8] = tail;
++ regs_buff[9] = head;
++ regs_buff[10] = macb_tx_dma(bp, tail);
++ regs_buff[11] = macb_tx_dma(bp, head);
++
++ if (macb_is_gem(bp)) {
++ regs_buff[12] = gem_readl(bp, USRIO);
++ regs_buff[13] = gem_readl(bp, DMACFG);
++ }
++}
++
++const struct ethtool_ops macb_ethtool_ops = {
++ .get_settings = macb_get_settings,
++ .set_settings = macb_set_settings,
++ .get_regs_len = macb_get_regs_len,
++ .get_regs = macb_get_regs,
++ .get_link = ethtool_op_get_link,
++ .get_ts_info = ethtool_op_get_ts_info,
++};
++EXPORT_SYMBOL_GPL(macb_ethtool_ops);
++
++int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
++{
++ struct macb *bp = netdev_priv(dev);
++ struct phy_device *phydev = bp->phy_dev;
++
++ if (!netif_running(dev))
++ return -EINVAL;
++
++ if (!phydev)
++ return -ENODEV;
++
++ return phy_mii_ioctl(phydev, rq, cmd);
++}
++EXPORT_SYMBOL_GPL(macb_ioctl);
++
++static const struct net_device_ops macb_netdev_ops = {
++ .ndo_open = macb_open,
++ .ndo_stop = macb_close,
++ .ndo_start_xmit = macb_start_xmit,
++ .ndo_set_rx_mode = macb_set_rx_mode,
++ .ndo_get_stats = macb_get_stats,
++ .ndo_do_ioctl = macb_ioctl,
++ .ndo_validate_addr = eth_validate_addr,
++ .ndo_change_mtu = eth_change_mtu,
++ .ndo_set_mac_address = eth_mac_addr,
++#ifdef CONFIG_NET_POLL_CONTROLLER
++ .ndo_poll_controller = macb_poll_controller,
++#endif
++};
++
++#if defined(CONFIG_OF)
++static const struct of_device_id macb_dt_ids[] = {
++ { .compatible = "cdns,at32ap7000-macb" },
++ { .compatible = "cdns,at91sam9260-macb" },
++ { .compatible = "cdns,macb" },
++ { .compatible = "cdns,pc302-gem" },
++ { .compatible = "cdns,gem" },
++ { /* sentinel */ }
++};
++MODULE_DEVICE_TABLE(of, macb_dt_ids);
++#endif
++
++static int __init macb_probe(struct platform_device *pdev)
++{
++ struct macb_platform_data *pdata;
++ struct resource *regs;
++ struct net_device *dev;
++ struct macb *bp;
++ struct phy_device *phydev;
++ u32 config;
++ int err = -ENXIO;
++ struct pinctrl *pinctrl;
++ const char *mac;
++
++ regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++ if (!regs) {
++ dev_err(&pdev->dev, "no mmio resource defined\n");
++ goto err_out;
++ }
++
++ pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
++ if (IS_ERR(pinctrl)) {
++ err = PTR_ERR(pinctrl);
++ if (err == -EPROBE_DEFER)
++ goto err_out;
++
++ dev_warn(&pdev->dev, "No pinctrl provided\n");
++ }
++
++ err = -ENOMEM;
++ dev = alloc_etherdev(sizeof(*bp));
++ if (!dev)
++ goto err_out;
++
++ SET_NETDEV_DEV(dev, &pdev->dev);
++
++ /* TODO: Actually, we have some interesting features... */
++ dev->features |= 0;
++
++ bp = netdev_priv(dev);
++ bp->pdev = pdev;
++ bp->dev = dev;
++
++ spin_lock_init(&bp->lock);
++ INIT_WORK(&bp->tx_error_task, macb_tx_error_task);
++
++ bp->pclk = devm_clk_get(&pdev->dev, "pclk");
++ if (IS_ERR(bp->pclk)) {
++ err = PTR_ERR(bp->pclk);
++ dev_err(&pdev->dev, "failed to get macb_clk (%u)\n", err);
++ goto err_out_free_dev;
++ }
++
++ bp->hclk = devm_clk_get(&pdev->dev, "hclk");
++ if (IS_ERR(bp->hclk)) {
++ err = PTR_ERR(bp->hclk);
++ dev_err(&pdev->dev, "failed to get hclk (%u)\n", err);
++ goto err_out_free_dev;
++ }
++
++ bp->tx_clk = devm_clk_get(&pdev->dev, "tx_clk");
++
++ err = clk_prepare_enable(bp->pclk);
++ if (err) {
++ dev_err(&pdev->dev, "failed to enable pclk (%u)\n", err);
++ goto err_out_free_dev;
++ }
++
++ err = clk_prepare_enable(bp->hclk);
++ if (err) {
++ dev_err(&pdev->dev, "failed to enable hclk (%u)\n", err);
++ goto err_out_disable_pclk;
++ }
++
++ if (!IS_ERR(bp->tx_clk)) {
++ err = clk_prepare_enable(bp->tx_clk);
++ if (err) {
++ dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n",
++ err);
++ goto err_out_disable_hclk;
++ }
++ }
++
++ bp->regs = devm_ioremap(&pdev->dev, regs->start, resource_size(regs));
++ if (!bp->regs) {
++ dev_err(&pdev->dev, "failed to map registers, aborting.\n");
++ err = -ENOMEM;
++ goto err_out_disable_clocks;
++ }
++
++ dev->irq = platform_get_irq(pdev, 0);
++ err = devm_request_irq(&pdev->dev, dev->irq, macb_interrupt, 0,
++ dev->name, dev);
++ if (err) {
++ dev_err(&pdev->dev, "Unable to request IRQ %d (error %d)\n",
++ dev->irq, err);
++ goto err_out_disable_clocks;
++ }
++
++ dev->netdev_ops = &macb_netdev_ops;
++ netif_napi_add(dev, &bp->napi, macb_poll, 64);
++ dev->ethtool_ops = &macb_ethtool_ops;
++
++ dev->base_addr = regs->start;
++
++ /* setup appropriated routines according to adapter type */
++ if (macb_is_gem(bp)) {
++ bp->macbgem_ops.mog_alloc_rx_buffers = gem_alloc_rx_buffers;
++ bp->macbgem_ops.mog_free_rx_buffers = gem_free_rx_buffers;
++ bp->macbgem_ops.mog_init_rings = gem_init_rings;
++ bp->macbgem_ops.mog_rx = gem_rx;
++ } else {
++ bp->macbgem_ops.mog_alloc_rx_buffers = macb_alloc_rx_buffers;
++ bp->macbgem_ops.mog_free_rx_buffers = macb_free_rx_buffers;
++ bp->macbgem_ops.mog_init_rings = macb_init_rings;
++ bp->macbgem_ops.mog_rx = macb_rx;
++ }
++
++ /* Set MII management clock divider */
++ config = macb_mdc_clk_div(bp);
++ config |= macb_dbw(bp);
++ macb_writel(bp, NCFGR, config);
++
++ mac = of_get_mac_address(pdev->dev.of_node);
++ if (mac)
++ memcpy(bp->dev->dev_addr, mac, ETH_ALEN);
++ else
++ macb_get_hwaddr(bp);
++
++ err = of_get_phy_mode(pdev->dev.of_node);
++ if (err < 0) {
++ pdata = dev_get_platdata(&pdev->dev);
++ if (pdata && pdata->is_rmii)
++ bp->phy_interface = PHY_INTERFACE_MODE_RMII;
++ else
++ bp->phy_interface = PHY_INTERFACE_MODE_MII;
++ } else {
++ bp->phy_interface = err;
++ }
++
++ if (bp->phy_interface == PHY_INTERFACE_MODE_RGMII)
++ macb_or_gem_writel(bp, USRIO, GEM_BIT(RGMII));
++ else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII)
++#if defined(CONFIG_ARCH_AT91)
++ macb_or_gem_writel(bp, USRIO, (MACB_BIT(RMII) |
++ MACB_BIT(CLKEN)));
++#else
++ macb_or_gem_writel(bp, USRIO, 0);
++#endif
++ else
++#if defined(CONFIG_ARCH_AT91)
++ macb_or_gem_writel(bp, USRIO, MACB_BIT(CLKEN));
++#else
++ macb_or_gem_writel(bp, USRIO, MACB_BIT(MII));
++#endif
++
++ err = register_netdev(dev);
++ if (err) {
++ dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
++ goto err_out_disable_clocks;
++ }
++
++ err = macb_mii_init(bp);
++ if (err)
++ goto err_out_unregister_netdev;
++
++ platform_set_drvdata(pdev, dev);
++
++ netif_carrier_off(dev);
++
++ netdev_info(dev, "Cadence %s at 0x%08lx irq %d (%pM)\n",
++ macb_is_gem(bp) ? "GEM" : "MACB", dev->base_addr,
++ dev->irq, dev->dev_addr);
++
++ phydev = bp->phy_dev;
++ netdev_info(dev, "attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n",
++ phydev->drv->name, dev_name(&phydev->dev), phydev->irq);
++
++ return 0;
++
++err_out_unregister_netdev:
++ unregister_netdev(dev);
++err_out_disable_clocks:
++ if (!IS_ERR(bp->tx_clk))
++ clk_disable_unprepare(bp->tx_clk);
++err_out_disable_hclk:
++ clk_disable_unprepare(bp->hclk);
++err_out_disable_pclk:
++ clk_disable_unprepare(bp->pclk);
++err_out_free_dev:
++ free_netdev(dev);
++err_out:
++ return err;
++}
++
++static int __exit macb_remove(struct platform_device *pdev)
++{
++ struct net_device *dev;
++ struct macb *bp;
++
++ dev = platform_get_drvdata(pdev);
++
++ if (dev) {
++ bp = netdev_priv(dev);
++ if (bp->phy_dev)
++ phy_disconnect(bp->phy_dev);
++ mdiobus_unregister(bp->mii_bus);
++ kfree(bp->mii_bus->irq);
++ mdiobus_free(bp->mii_bus);
++ unregister_netdev(dev);
++ if (!IS_ERR(bp->tx_clk))
++ clk_disable_unprepare(bp->tx_clk);
++ clk_disable_unprepare(bp->hclk);
++ clk_disable_unprepare(bp->pclk);
++ free_netdev(dev);
++ }
++
++ return 0;
++}
++
++#ifdef CONFIG_PM
++static int macb_suspend(struct device *dev)
++{
++ struct platform_device *pdev = to_platform_device(dev);
++ struct net_device *netdev = platform_get_drvdata(pdev);
++ struct macb *bp = netdev_priv(netdev);
++
++ netif_carrier_off(netdev);
++ netif_device_detach(netdev);
++
++ if (!IS_ERR(bp->tx_clk))
++ clk_disable_unprepare(bp->tx_clk);
++ clk_disable_unprepare(bp->hclk);
++ clk_disable_unprepare(bp->pclk);
++
++ return 0;
++}
++
++static int macb_resume(struct device *dev)
++{
++ struct platform_device *pdev = to_platform_device(dev);
++ struct net_device *netdev = platform_get_drvdata(pdev);
++ struct macb *bp = netdev_priv(netdev);
++
++ clk_prepare_enable(bp->pclk);
++ clk_prepare_enable(bp->hclk);
++ if (!IS_ERR(bp->tx_clk))
++ clk_prepare_enable(bp->tx_clk);
++
++ netif_device_attach(netdev);
++
++ return 0;
++}
++#endif
++
++static SIMPLE_DEV_PM_OPS(macb_pm_ops, macb_suspend, macb_resume);
++
++static struct platform_driver macb_driver = {
++ .remove = __exit_p(macb_remove),
++ .driver = {
++ .name = "macb",
++ .owner = THIS_MODULE,
++ .of_match_table = of_match_ptr(macb_dt_ids),
++ .pm = &macb_pm_ops,
++ },
++};
++
++module_platform_driver_probe(macb_driver, macb_probe);
++
++MODULE_LICENSE("GPL");
++MODULE_DESCRIPTION("Cadence MACB/GEM Ethernet driver");
++MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
++MODULE_ALIAS("platform:macb");
+diff -Nur linux-3.14.36/drivers/net/ethernet/chelsio/cxgb4vf/sge.c linux-openelec/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+--- linux-3.14.36/drivers/net/ethernet/chelsio/cxgb4vf/sge.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/net/ethernet/chelsio/cxgb4vf/sge.c 2015-05-06 12:05:42.000000000 -0500
+@@ -1510,7 +1510,8 @@
+ {
+ struct sk_buff *skb;
+ const struct cpl_rx_pkt *pkt = (void *)rsp;
+- bool csum_ok = pkt->csum_calc && !pkt->err_vec;
++ bool csum_ok = pkt->csum_calc && !pkt->err_vec &&
++ (rspq->netdev->features & NETIF_F_RXCSUM);
+ struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq);
+
+ /*
+@@ -1538,8 +1539,8 @@
+ skb_record_rx_queue(skb, rspq->idx);
+ rxq->stats.pkts++;
+
+- if (csum_ok && (rspq->netdev->features & NETIF_F_RXCSUM) &&
+- !pkt->err_vec && (be32_to_cpu(pkt->l2info) & (RXF_UDP|RXF_TCP))) {
++ if (csum_ok && !pkt->err_vec &&
++ (be32_to_cpu(pkt->l2info) & (RXF_UDP|RXF_TCP))) {
+ if (!pkt->ip_frag)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ else {
+diff -Nur linux-3.14.36/drivers/net/ethernet/freescale/fec.h linux-openelec/drivers/net/ethernet/freescale/fec.h
+--- linux-3.14.36/drivers/net/ethernet/freescale/fec.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/net/ethernet/freescale/fec.h 2015-05-06 12:05:42.000000000 -0500
+@@ -221,7 +221,7 @@
+ #define BD_ENET_TX_RCMASK ((ushort)0x003c)
+ #define BD_ENET_TX_UN ((ushort)0x0002)
+ #define BD_ENET_TX_CSL ((ushort)0x0001)
+-#define BD_ENET_TX_STATS ((ushort)0x03ff) /* All status bits */
++#define BD_ENET_TX_STATS ((ushort)0x0fff) /* All status bits */
+
+ /*enhanced buffer descriptor control/status used by Ethernet transmit*/
+ #define BD_ENET_TX_INT 0x40000000
+@@ -246,8 +246,8 @@
+ #define RX_RING_SIZE (FEC_ENET_RX_FRPPG * FEC_ENET_RX_PAGES)
+ #define FEC_ENET_TX_FRSIZE 2048
+ #define FEC_ENET_TX_FRPPG (PAGE_SIZE / FEC_ENET_TX_FRSIZE)
+-#define TX_RING_SIZE 16 /* Must be power of two */
+-#define TX_RING_MOD_MASK 15 /* for this to work */
++#define TX_RING_SIZE 512 /* Must be power of two */
++#define TX_RING_MOD_MASK 511 /* for this to work */
+
+ #define BD_ENET_RX_INT 0x00800000
+ #define BD_ENET_RX_PTP ((ushort)0x0400)
+@@ -256,12 +256,6 @@
+ #define FLAG_RX_CSUM_ENABLED (BD_ENET_RX_ICE | BD_ENET_RX_PCR)
+ #define FLAG_RX_CSUM_ERROR (BD_ENET_RX_ICE | BD_ENET_RX_PCR)
+
+-struct fec_enet_delayed_work {
+- struct delayed_work delay_work;
+- bool timeout;
+- bool trig_tx;
+-};
+-
+ /* The FEC buffer descriptors track the ring buffers. The rx_bd_base and
+ * tx_bd_base always point to the base of the buffer descriptors. The
+ * cur_rx and cur_tx point to the currently available buffer.
+@@ -296,12 +290,18 @@
+ /* The ring entries to be free()ed */
+ struct bufdesc *dirty_tx;
+
++ unsigned short bufdesc_size;
+ unsigned short tx_ring_size;
+ unsigned short rx_ring_size;
++ unsigned short tx_stop_threshold;
++ unsigned short tx_wake_threshold;
++
++ /* Software TSO */
++ char *tso_hdrs;
++ dma_addr_t tso_hdrs_dma;
+
+ struct platform_device *pdev;
+
+- int opened;
+ int dev_id;
+
+ /* Phylib and MDIO interface */
+@@ -321,6 +321,8 @@
+ struct napi_struct napi;
+ int csum_flags;
+
++ struct work_struct tx_timeout_work;
++
+ struct ptp_clock *ptp_clock;
+ struct ptp_clock_info ptp_caps;
+ unsigned long last_overflow_check;
+@@ -333,7 +335,6 @@
+ int hwts_rx_en;
+ int hwts_tx_en;
+ struct timer_list time_keep;
+- struct fec_enet_delayed_work delay_work;
+ struct regulator *reg_phy;
+ };
+
+diff -Nur linux-3.14.36/drivers/net/ethernet/freescale/fec_main.c linux-openelec/drivers/net/ethernet/freescale/fec_main.c
+--- linux-3.14.36/drivers/net/ethernet/freescale/fec_main.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/net/ethernet/freescale/fec_main.c 2015-05-06 12:05:42.000000000 -0500
+@@ -36,6 +36,7 @@
+ #include <linux/in.h>
+ #include <linux/ip.h>
+ #include <net/ip.h>
++#include <net/tso.h>
+ #include <linux/tcp.h>
+ #include <linux/udp.h>
+ #include <linux/icmp.h>
+@@ -54,6 +55,10 @@
+ #include <linux/of_net.h>
+ #include <linux/regulator/consumer.h>
+ #include <linux/if_vlan.h>
++#include <linux/pinctrl/consumer.h>
++#include <linux/busfreq-imx6.h>
++#include <linux/pm_runtime.h>
++#include <linux/pm_qos.h>
+
+ #include <asm/cacheflush.h>
+
+@@ -91,6 +96,8 @@
+ #define FEC_QUIRK_HAS_CSUM (1 << 5)
+ /* Controller has hardware vlan support */
+ #define FEC_QUIRK_HAS_VLAN (1 << 6)
++/* Controller is FEC-MAC */
++#define FEC_QUIRK_FEC_MAC (1 << 7)
+ /* ENET IP errata ERR006358
+ *
+ * If the ready bit in the transmit buffer descriptor (TxBD[R]) is previously
+@@ -100,7 +107,13 @@
+ * frames not being transmitted until there is a 0-to-1 transition on
+ * ENET_TDAR[TDAR].
+ */
+-#define FEC_QUIRK_ERR006358 (1 << 7)
++#define FEC_QUIRK_ERR006358 (1 << 8)
++/*
++ * i.MX6Q/DL ENET cannot wake up system in wait mode because ENET tx & rx
++ * interrupt signal don't connect to GPC. So use pm qos to avoid cpu enter
++ * to wait mode.
++ */
++#define FEC_QUIRK_BUG_WAITMODE (1 << 9)
+
+ static struct platform_device_id fec_devtype[] = {
+ {
+@@ -109,7 +122,7 @@
+ .driver_data = 0,
+ }, {
+ .name = "imx25-fec",
+- .driver_data = FEC_QUIRK_USE_GASKET,
++ .driver_data = FEC_QUIRK_USE_GASKET | FEC_QUIRK_FEC_MAC,
+ }, {
+ .name = "imx27-fec",
+ .driver_data = 0,
+@@ -120,7 +133,8 @@
+ .name = "imx6q-fec",
+ .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
+ FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
+- FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR006358,
++ FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR006358 |
++ FEC_QUIRK_BUG_WAITMODE,
+ }, {
+ .name = "mvf600-fec",
+ .driver_data = FEC_QUIRK_ENET_MAC,
+@@ -172,10 +186,6 @@
+ #endif
+ #endif /* CONFIG_M5272 */
+
+-#if (((RX_RING_SIZE + TX_RING_SIZE) * 32) > PAGE_SIZE)
+-#error "FEC: descriptor ring size constants too large"
+-#endif
+-
+ /* Interrupt events/masks. */
+ #define FEC_ENET_HBERR ((uint)0x80000000) /* Heartbeat error */
+ #define FEC_ENET_BABR ((uint)0x40000000) /* Babbling receiver */
+@@ -231,6 +241,15 @@
+ #define FEC_PAUSE_FLAG_AUTONEG 0x1
+ #define FEC_PAUSE_FLAG_ENABLE 0x2
+
++#define TSO_HEADER_SIZE 128
++/* Max number of allowed TCP segments for software TSO */
++#define FEC_MAX_TSO_SEGS 100
++#define FEC_MAX_SKB_DESCS (FEC_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
++
++#define IS_TSO_HEADER(txq, addr) \
++ ((addr >= txq->tso_hdrs_dma) && \
++ (addr < txq->tso_hdrs_dma + txq->tx_ring_size * TSO_HEADER_SIZE))
++
+ static int mii_cnt;
+
+ static inline
+@@ -286,6 +305,22 @@
+ return (new_bd < base) ? (new_bd + ring_size) : new_bd;
+ }
+
++static int fec_enet_get_bd_index(struct bufdesc *base, struct bufdesc *bdp,
++ struct fec_enet_private *fep)
++{
++ return ((const char *)bdp - (const char *)base) / fep->bufdesc_size;
++}
++
++static int fec_enet_get_free_txdesc_num(struct fec_enet_private *fep)
++{
++ int entries;
++
++ entries = ((const char *)fep->dirty_tx -
++ (const char *)fep->cur_tx) / fep->bufdesc_size - 1;
++
++ return entries > 0 ? entries : entries + fep->tx_ring_size;
++}
++
+ static void *swap_buffer(void *bufaddr, int len)
+ {
+ int i;
+@@ -297,6 +332,32 @@
+ return bufaddr;
+ }
+
++static void fec_dump(struct net_device *ndev)
++{
++ struct fec_enet_private *fep = netdev_priv(ndev);
++ struct bufdesc *bdp = fep->tx_bd_base;
++ unsigned int index = 0;
++
++ netdev_info(ndev, "TX ring dump\n");
++ pr_info("Nr SC addr len SKB\n");
++
++ do {
++ pr_info("%3u %c%c 0x%04x 0x%08lx %4u %p\n",
++ index,
++ bdp == fep->cur_tx ? 'S' : ' ',
++ bdp == fep->dirty_tx ? 'H' : ' ',
++ bdp->cbd_sc, bdp->cbd_bufaddr, bdp->cbd_datlen,
++ fep->tx_skbuff[index]);
++ bdp = fec_enet_get_nextdesc(bdp, fep);
++ index++;
++ } while (bdp != fep->tx_bd_base);
++}
++
++static inline bool is_ipv4_pkt(struct sk_buff *skb)
++{
++ return skb->protocol == htons(ETH_P_IP) && ip_hdr(skb)->version == 4;
++}
++
+ static int
+ fec_enet_clear_csum(struct sk_buff *skb, struct net_device *ndev)
+ {
+@@ -307,137 +368,419 @@
+ if (unlikely(skb_cow_head(skb, 0)))
+ return -1;
+
++ if (is_ipv4_pkt(skb))
++ ip_hdr(skb)->check = 0;
+ *(__sum16 *)(skb->head + skb->csum_start + skb->csum_offset) = 0;
+
+ return 0;
+ }
+
+-static netdev_tx_t
+-fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
++static int
++fec_enet_txq_submit_frag_skb(struct sk_buff *skb, struct net_device *ndev)
+ {
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ const struct platform_device_id *id_entry =
+ platform_get_device_id(fep->pdev);
+- struct bufdesc *bdp, *bdp_pre;
+- void *bufaddr;
+- unsigned short status;
++ struct bufdesc *bdp = fep->cur_tx;
++ struct bufdesc_ex *ebdp;
++ int nr_frags = skb_shinfo(skb)->nr_frags;
++ int frag, frag_len;
++ unsigned short status;
++ unsigned int estatus = 0;
++ skb_frag_t *this_frag;
+ unsigned int index;
++ void *bufaddr;
++ dma_addr_t addr;
++ int i;
+
+- /* Fill in a Tx ring entry */
++ for (frag = 0; frag < nr_frags; frag++) {
++ this_frag = &skb_shinfo(skb)->frags[frag];
++ bdp = fec_enet_get_nextdesc(bdp, fep);
++ ebdp = (struct bufdesc_ex *)bdp;
++
++ status = bdp->cbd_sc;
++ status &= ~BD_ENET_TX_STATS;
++ status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
++ frag_len = skb_shinfo(skb)->frags[frag].size;
++
++ /* Handle the last BD specially */
++ if (frag == nr_frags - 1) {
++ status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST);
++ if (fep->bufdesc_ex) {
++ estatus |= BD_ENET_TX_INT;
++ if (unlikely(skb_shinfo(skb)->tx_flags &
++ SKBTX_HW_TSTAMP && fep->hwts_tx_en))
++ estatus |= BD_ENET_TX_TS;
++ }
++ }
++
++ if (fep->bufdesc_ex) {
++ if (skb->ip_summed == CHECKSUM_PARTIAL)
++ estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
++ ebdp->cbd_bdu = 0;
++ ebdp->cbd_esc = estatus;
++ }
++
++ bufaddr = page_address(this_frag->page.p) + this_frag->page_offset;
++
++ index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep);
++ if (((unsigned long) bufaddr) & FEC_ALIGNMENT ||
++ id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) {
++ memcpy(fep->tx_bounce[index], bufaddr, frag_len);
++ bufaddr = fep->tx_bounce[index];
++
++ if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
++ swap_buffer(bufaddr, frag_len);
++ }
++
++ addr = dma_map_single(&fep->pdev->dev, bufaddr, frag_len,
++ DMA_TO_DEVICE);
++ if (dma_mapping_error(&fep->pdev->dev, addr)) {
++ dev_kfree_skb_any(skb);
++ if (net_ratelimit())
++ netdev_err(ndev, "Tx DMA memory map failed\n");
++ goto dma_mapping_error;
++ }
++
++ bdp->cbd_bufaddr = addr;
++ bdp->cbd_datlen = frag_len;
++ bdp->cbd_sc = status;
++ }
++
++ fep->cur_tx = bdp;
++
++ return 0;
++
++dma_mapping_error:
+ bdp = fep->cur_tx;
++ for (i = 0; i < frag; i++) {
++ bdp = fec_enet_get_nextdesc(bdp, fep);
++ dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
++ bdp->cbd_datlen, DMA_TO_DEVICE);
++ }
++ return NETDEV_TX_OK;
++}
+
+- status = bdp->cbd_sc;
++static int fec_enet_txq_submit_skb(struct sk_buff *skb, struct net_device *ndev)
++{
++ struct fec_enet_private *fep = netdev_priv(ndev);
++ const struct platform_device_id *id_entry =
++ platform_get_device_id(fep->pdev);
++ int nr_frags = skb_shinfo(skb)->nr_frags;
++ struct bufdesc *bdp, *last_bdp;
++ void *bufaddr;
++ dma_addr_t addr;
++ unsigned short status;
++ unsigned short buflen;
++ unsigned int estatus = 0;
++ unsigned int index;
++ int entries_free;
++ int ret;
+
+- if (status & BD_ENET_TX_READY) {
+- /* Ooops. All transmit buffers are full. Bail out.
+- * This should not happen, since ndev->tbusy should be set.
+- */
+- netdev_err(ndev, "tx queue full!\n");
+- return NETDEV_TX_BUSY;
++ entries_free = fec_enet_get_free_txdesc_num(fep);
++ if (entries_free < MAX_SKB_FRAGS + 1) {
++ dev_kfree_skb_any(skb);
++ if (net_ratelimit())
++ netdev_err(ndev, "NOT enough BD for SG!\n");
++ return NETDEV_TX_OK;
+ }
+
+ /* Protocol checksum off-load for TCP and UDP. */
+ if (fec_enet_clear_csum(skb, ndev)) {
+- kfree_skb(skb);
++ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+- /* Clear all of the status flags */
++ /* Fill in a Tx ring entry */
++ bdp = fep->cur_tx;
++ status = bdp->cbd_sc;
+ status &= ~BD_ENET_TX_STATS;
+
+ /* Set buffer length and buffer pointer */
+ bufaddr = skb->data;
+- bdp->cbd_datlen = skb->len;
++ buflen = skb_headlen(skb);
+
+- /*
+- * On some FEC implementations data must be aligned on
+- * 4-byte boundaries. Use bounce buffers to copy data
+- * and get it aligned. Ugh.
+- */
+- if (fep->bufdesc_ex)
+- index = (struct bufdesc_ex *)bdp -
+- (struct bufdesc_ex *)fep->tx_bd_base;
+- else
+- index = bdp - fep->tx_bd_base;
+-
+- if (((unsigned long) bufaddr) & FEC_ALIGNMENT) {
+- memcpy(fep->tx_bounce[index], skb->data, skb->len);
++ index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep);
++ if (((unsigned long) bufaddr) & FEC_ALIGNMENT ||
++ id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) {
++ memcpy(fep->tx_bounce[index], skb->data, buflen);
+ bufaddr = fep->tx_bounce[index];
+- }
+-
+- /*
+- * Some design made an incorrect assumption on endian mode of
+- * the system that it's running on. As the result, driver has to
+- * swap every frame going to and coming from the controller.
+- */
+- if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
+- swap_buffer(bufaddr, skb->len);
+
+- /* Save skb pointer */
+- fep->tx_skbuff[index] = skb;
++ if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
++ swap_buffer(bufaddr, buflen);
++ }
+
+- /* Push the data cache so the CPM does not get stale memory
+- * data.
+- */
+- bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, bufaddr,
+- skb->len, DMA_TO_DEVICE);
+- if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) {
+- bdp->cbd_bufaddr = 0;
+- fep->tx_skbuff[index] = NULL;
++ /* Push the data cache so the CPM does not get stale memory data. */
++ addr = dma_map_single(&fep->pdev->dev, bufaddr, buflen, DMA_TO_DEVICE);
++ if (dma_mapping_error(&fep->pdev->dev, addr)) {
+ dev_kfree_skb_any(skb);
+ if (net_ratelimit())
+ netdev_err(ndev, "Tx DMA memory map failed\n");
+ return NETDEV_TX_OK;
+ }
+
++ if (nr_frags) {
++ ret = fec_enet_txq_submit_frag_skb(skb, ndev);
++ if (ret)
++ return ret;
++ } else {
++ status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST);
++ if (fep->bufdesc_ex) {
++ estatus = BD_ENET_TX_INT;
++ if (unlikely(skb_shinfo(skb)->tx_flags &
++ SKBTX_HW_TSTAMP && fep->hwts_tx_en))
++ estatus |= BD_ENET_TX_TS;
++ }
++ }
++
+ if (fep->bufdesc_ex) {
+
+ struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
+- ebdp->cbd_bdu = 0;
++
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
+- fep->hwts_tx_en)) {
+- ebdp->cbd_esc = (BD_ENET_TX_TS | BD_ENET_TX_INT);
++ fep->hwts_tx_en))
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+- } else {
+- ebdp->cbd_esc = BD_ENET_TX_INT;
+
+- /* Enable protocol checksum flags
+- * We do not bother with the IP Checksum bits as they
+- * are done by the kernel
+- */
+- if (skb->ip_summed == CHECKSUM_PARTIAL)
+- ebdp->cbd_esc |= BD_ENET_TX_PINS;
+- }
++ if (skb->ip_summed == CHECKSUM_PARTIAL)
++ estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
++
++ ebdp->cbd_bdu = 0;
++ ebdp->cbd_esc = estatus;
+ }
+
++ last_bdp = fep->cur_tx;
++ index = fec_enet_get_bd_index(fep->tx_bd_base, last_bdp, fep);
++ /* Save skb pointer */
++ fep->tx_skbuff[index] = skb;
++
++ bdp->cbd_datlen = buflen;
++ bdp->cbd_bufaddr = addr;
++
+ /* Send it on its way. Tell FEC it's ready, interrupt when done,
+ * it's the last BD of the frame, and to put the CRC on the end.
+ */
+- status |= (BD_ENET_TX_READY | BD_ENET_TX_INTR
+- | BD_ENET_TX_LAST | BD_ENET_TX_TC);
++ status |= (BD_ENET_TX_READY | BD_ENET_TX_TC);
+ bdp->cbd_sc = status;
+
+- bdp_pre = fec_enet_get_prevdesc(bdp, fep);
+- if ((id_entry->driver_data & FEC_QUIRK_ERR006358) &&
+- !(bdp_pre->cbd_sc & BD_ENET_TX_READY)) {
+- fep->delay_work.trig_tx = true;
+- schedule_delayed_work(&(fep->delay_work.delay_work),
+- msecs_to_jiffies(1));
+- }
+-
+ /* If this was the last BD in the ring, start at the beginning again. */
+- bdp = fec_enet_get_nextdesc(bdp, fep);
++ bdp = fec_enet_get_nextdesc(last_bdp, fep);
+
+ skb_tx_timestamp(skb);
+
+ fep->cur_tx = bdp;
+
+- if (fep->cur_tx == fep->dirty_tx)
+- netif_stop_queue(ndev);
++ /* Trigger transmission start */
++ writel(0, fep->hwp + FEC_X_DES_ACTIVE);
++
++ return 0;
++}
++
++static int
++fec_enet_txq_put_data_tso(struct sk_buff *skb, struct net_device *ndev,
++ struct bufdesc *bdp, int index, char *data,
++ int size, bool last_tcp, bool is_last)
++{
++ struct fec_enet_private *fep = netdev_priv(ndev);
++ const struct platform_device_id *id_entry =
++ platform_get_device_id(fep->pdev);
++ struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
++ unsigned short status;
++ unsigned int estatus = 0;
++ dma_addr_t addr;
++
++ status = bdp->cbd_sc;
++ status &= ~BD_ENET_TX_STATS;
++
++ status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
++
++ if (((unsigned long) data) & FEC_ALIGNMENT ||
++ id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) {
++ memcpy(fep->tx_bounce[index], data, size);
++ data = fep->tx_bounce[index];
++
++ if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
++ swap_buffer(data, size);
++ }
++
++ addr = dma_map_single(&fep->pdev->dev, data, size, DMA_TO_DEVICE);
++ if (dma_mapping_error(&fep->pdev->dev, addr)) {
++ dev_kfree_skb_any(skb);
++ if (net_ratelimit())
++ netdev_err(ndev, "Tx DMA memory map failed\n");
++ return NETDEV_TX_BUSY;
++ }
++
++ bdp->cbd_datlen = size;
++ bdp->cbd_bufaddr = addr;
++
++ if (fep->bufdesc_ex) {
++ if (skb->ip_summed == CHECKSUM_PARTIAL)
++ estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
++ ebdp->cbd_bdu = 0;
++ ebdp->cbd_esc = estatus;
++ }
++
++ /* Handle the last BD specially */
++ if (last_tcp)
++ status |= (BD_ENET_TX_LAST | BD_ENET_TX_TC);
++ if (is_last) {
++ status |= BD_ENET_TX_INTR;
++ if (fep->bufdesc_ex)
++ ebdp->cbd_esc |= BD_ENET_TX_INT;
++ }
++
++ bdp->cbd_sc = status;
++
++ return 0;
++}
++
++static int
++fec_enet_txq_put_hdr_tso(struct sk_buff *skb, struct net_device *ndev,
++ struct bufdesc *bdp, int index)
++{
++ struct fec_enet_private *fep = netdev_priv(ndev);
++ const struct platform_device_id *id_entry =
++ platform_get_device_id(fep->pdev);
++ int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
++ struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
++ void *bufaddr;
++ unsigned long dmabuf;
++ unsigned short status;
++ unsigned int estatus = 0;
++
++ status = bdp->cbd_sc;
++ status &= ~BD_ENET_TX_STATS;
++ status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
++
++ bufaddr = fep->tso_hdrs + index * TSO_HEADER_SIZE;
++ dmabuf = fep->tso_hdrs_dma + index * TSO_HEADER_SIZE;
++ if (((unsigned long) bufaddr) & FEC_ALIGNMENT ||
++ id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) {
++ memcpy(fep->tx_bounce[index], skb->data, hdr_len);
++ bufaddr = fep->tx_bounce[index];
++
++ if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
++ swap_buffer(bufaddr, hdr_len);
++
++ dmabuf = dma_map_single(&fep->pdev->dev, bufaddr,
++ hdr_len, DMA_TO_DEVICE);
++ if (dma_mapping_error(&fep->pdev->dev, dmabuf)) {
++ dev_kfree_skb_any(skb);
++ if (net_ratelimit())
++ netdev_err(ndev, "Tx DMA memory map failed\n");
++ return NETDEV_TX_BUSY;
++ }
++ }
++
++ bdp->cbd_bufaddr = dmabuf;
++ bdp->cbd_datlen = hdr_len;
++
++ if (fep->bufdesc_ex) {
++ if (skb->ip_summed == CHECKSUM_PARTIAL)
++ estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
++ ebdp->cbd_bdu = 0;
++ ebdp->cbd_esc = estatus;
++ }
++
++ bdp->cbd_sc = status;
++
++ return 0;
++}
++
++static int fec_enet_txq_submit_tso(struct sk_buff *skb, struct net_device *ndev)
++{
++ struct fec_enet_private *fep = netdev_priv(ndev);
++ int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
++ int total_len, data_left;
++ struct bufdesc *bdp = fep->cur_tx;
++ struct tso_t tso;
++ unsigned int index = 0;
++ int ret;
++
++ if (tso_count_descs(skb) >= fec_enet_get_free_txdesc_num(fep)) {
++ dev_kfree_skb_any(skb);
++ if (net_ratelimit())
++ netdev_err(ndev, "NOT enough BD for TSO!\n");
++ return NETDEV_TX_OK;
++ }
++
++ /* Protocol checksum off-load for TCP and UDP. */
++ if (fec_enet_clear_csum(skb, ndev)) {
++ dev_kfree_skb_any(skb);
++ return NETDEV_TX_OK;
++ }
++
++ /* Initialize the TSO handler, and prepare the first payload */
++ tso_start(skb, &tso);
++
++ total_len = skb->len - hdr_len;
++ while (total_len > 0) {
++ char *hdr;
++
++ index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep);
++ data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
++ total_len -= data_left;
++
++ /* prepare packet headers: MAC + IP + TCP */
++ hdr = fep->tso_hdrs + index * TSO_HEADER_SIZE;
++ tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
++ ret = fec_enet_txq_put_hdr_tso(skb, ndev, bdp, index);
++ if (ret)
++ goto err_release;
++
++ while (data_left > 0) {
++ int size;
++
++ size = min_t(int, tso.size, data_left);
++ bdp = fec_enet_get_nextdesc(bdp, fep);
++ index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep);
++ ret = fec_enet_txq_put_data_tso(skb, ndev, bdp, index, tso.data,
++ size, size == data_left,
++ total_len == 0);
++ if (ret)
++ goto err_release;
++
++ data_left -= size;
++ tso_build_data(skb, &tso, size);
++ }
++
++ bdp = fec_enet_get_nextdesc(bdp, fep);
++ }
++
++ /* Save skb pointer */
++ fep->tx_skbuff[index] = skb;
++
++ skb_tx_timestamp(skb);
++ fep->cur_tx = bdp;
+
+ /* Trigger transmission start */
+ writel(0, fep->hwp + FEC_X_DES_ACTIVE);
+
++ return 0;
++
++err_release:
++ /* TODO: Release all used data descriptors for TSO */
++ return ret;
++}
++
++static netdev_tx_t
++fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
++{
++ struct fec_enet_private *fep = netdev_priv(ndev);
++ int entries_free;
++ int ret;
++
++ if (skb_is_gso(skb))
++ ret = fec_enet_txq_submit_tso(skb, ndev);
++ else
++ ret = fec_enet_txq_submit_skb(skb, ndev);
++ if (ret)
++ return ret;
++
++ entries_free = fec_enet_get_free_txdesc_num(fep);
++ if (entries_free <= fep->tx_stop_threshold)
++ netif_stop_queue(ndev);
++
+ return NETDEV_TX_OK;
+ }
+
+@@ -474,7 +817,7 @@
+
+ /* Initialize the BD for every fragment in the page. */
+ bdp->cbd_sc = 0;
+- if (bdp->cbd_bufaddr && fep->tx_skbuff[i]) {
++ if (fep->tx_skbuff[i]) {
+ dev_kfree_skb_any(fep->tx_skbuff[i]);
+ fep->tx_skbuff[i] = NULL;
+ }
+@@ -488,12 +831,13 @@
+ fep->dirty_tx = bdp;
+ }
+
+-/* This function is called to start or restart the FEC during a link
+- * change. This only happens when switching between half and full
+- * duplex.
++/*
++ * This function is called to start or restart the FEC during a link
++ * change, transmit timeout, or to reconfigure the FEC. The network
++ * packet processing for this device must be stopped before this call.
+ */
+ static void
+-fec_restart(struct net_device *ndev, int duplex)
++fec_restart(struct net_device *ndev)
+ {
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ const struct platform_device_id *id_entry =
+@@ -504,13 +848,6 @@
+ u32 rcntl = OPT_FRAME_SIZE | 0x04;
+ u32 ecntl = 0x2; /* ETHEREN */
+
+- if (netif_running(ndev)) {
+- netif_device_detach(ndev);
+- napi_disable(&fep->napi);
+- netif_stop_queue(ndev);
+- netif_tx_lock_bh(ndev);
+- }
+-
+ /* Whack a reset. We should wait for this. */
+ writel(1, fep->hwp + FEC_ECNTRL);
+ udelay(10);
+@@ -519,7 +856,8 @@
+ * enet-mac reset will reset mac address registers too,
+ * so need to reconfigure it.
+ */
+- if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
++ if (id_entry->driver_data & FEC_QUIRK_ENET_MAC ||
++ id_entry->driver_data & FEC_QUIRK_FEC_MAC) {
+ memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN);
+ writel(cpu_to_be32(temp_mac[0]), fep->hwp + FEC_ADDR_LOW);
+ writel(cpu_to_be32(temp_mac[1]), fep->hwp + FEC_ADDR_HIGH);
+@@ -551,7 +889,7 @@
+ }
+
+ /* Enable MII mode */
+- if (duplex) {
++ if (fep->full_duplex == DUPLEX_FULL) {
+ /* FD enable */
+ writel(0x04, fep->hwp + FEC_X_CNTRL);
+ } else {
+@@ -560,8 +898,6 @@
+ writel(0x0, fep->hwp + FEC_X_CNTRL);
+ }
+
+- fep->full_duplex = duplex;
+-
+ /* Set MII speed */
+ writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
+
+@@ -679,13 +1015,6 @@
+
+ /* Enable interrupts we wish to service */
+ writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
+-
+- if (netif_running(ndev)) {
+- netif_tx_unlock_bh(ndev);
+- netif_wake_queue(ndev);
+- napi_enable(&fep->napi);
+- netif_device_attach(ndev);
+- }
+ }
+
+ static void
+@@ -723,29 +1052,44 @@
+ {
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
++ fec_dump(ndev);
++
+ ndev->stats.tx_errors++;
+
+- fep->delay_work.timeout = true;
+- schedule_delayed_work(&(fep->delay_work.delay_work), 0);
++ schedule_work(&fep->tx_timeout_work);
+ }
+
+-static void fec_enet_work(struct work_struct *work)
++static void fec_enet_timeout_work(struct work_struct *work)
+ {
+ struct fec_enet_private *fep =
+- container_of(work,
+- struct fec_enet_private,
+- delay_work.delay_work.work);
+-
+- if (fep->delay_work.timeout) {
+- fep->delay_work.timeout = false;
+- fec_restart(fep->netdev, fep->full_duplex);
+- netif_wake_queue(fep->netdev);
+- }
++ container_of(work, struct fec_enet_private, tx_timeout_work);
++ struct net_device *ndev = fep->netdev;
+
+- if (fep->delay_work.trig_tx) {
+- fep->delay_work.trig_tx = false;
+- writel(0, fep->hwp + FEC_X_DES_ACTIVE);
++ rtnl_lock();
++ if (netif_device_present(ndev) || netif_running(ndev)) {
++ napi_disable(&fep->napi);
++ netif_tx_lock_bh(ndev);
++ fec_restart(ndev);
++ netif_wake_queue(ndev);
++ netif_tx_unlock_bh(ndev);
++ napi_enable(&fep->napi);
+ }
++ rtnl_unlock();
++}
++
++static void
++fec_enet_hwtstamp(struct fec_enet_private *fep, unsigned ts,
++ struct skb_shared_hwtstamps *hwtstamps)
++{
++ unsigned long flags;
++ u64 ns;
++
++ spin_lock_irqsave(&fep->tmreg_lock, flags);
++ ns = timecounter_cyc2time(&fep->tc, ts);
++ spin_unlock_irqrestore(&fep->tmreg_lock, flags);
++
++ memset(hwtstamps, 0, sizeof(*hwtstamps));
++ hwtstamps->hwtstamp = ns_to_ktime(ns);
+ }
+
+ static void
+@@ -756,6 +1100,7 @@
+ unsigned short status;
+ struct sk_buff *skb;
+ int index = 0;
++ int entries_free;
+
+ fep = netdev_priv(ndev);
+ bdp = fep->dirty_tx;
+@@ -769,16 +1114,18 @@
+ if (bdp == fep->cur_tx)
+ break;
+
+- if (fep->bufdesc_ex)
+- index = (struct bufdesc_ex *)bdp -
+- (struct bufdesc_ex *)fep->tx_bd_base;
+- else
+- index = bdp - fep->tx_bd_base;
++ index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep);
+
+ skb = fep->tx_skbuff[index];
+- dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, skb->len,
+- DMA_TO_DEVICE);
++ fep->tx_skbuff[index] = NULL;
++ if (!IS_TSO_HEADER(fep, bdp->cbd_bufaddr))
++ dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
++ bdp->cbd_datlen, DMA_TO_DEVICE);
+ bdp->cbd_bufaddr = 0;
++ if (!skb) {
++ bdp = fec_enet_get_nextdesc(bdp, fep);
++ continue;
++ }
+
+ /* Check for errors. */
+ if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
+@@ -797,26 +1144,18 @@
+ ndev->stats.tx_carrier_errors++;
+ } else {
+ ndev->stats.tx_packets++;
+- ndev->stats.tx_bytes += bdp->cbd_datlen;
++ ndev->stats.tx_bytes += skb->len;
+ }
+
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) &&
+ fep->bufdesc_ex) {
+ struct skb_shared_hwtstamps shhwtstamps;
+- unsigned long flags;
+ struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
+
+- memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+- spin_lock_irqsave(&fep->tmreg_lock, flags);
+- shhwtstamps.hwtstamp = ns_to_ktime(
+- timecounter_cyc2time(&fep->tc, ebdp->ts));
+- spin_unlock_irqrestore(&fep->tmreg_lock, flags);
++ fec_enet_hwtstamp(fep, ebdp->ts, &shhwtstamps);
+ skb_tstamp_tx(skb, &shhwtstamps);
+ }
+
+- if (status & BD_ENET_TX_READY)
+- netdev_err(ndev, "HEY! Enet xmit interrupt and TX_READY\n");
+-
+ /* Deferred means some collisions occurred during transmit,
+ * but we eventually sent the packet OK.
+ */
+@@ -825,7 +1164,6 @@
+
+ /* Free the sk buffer associated with this last transmit */
+ dev_kfree_skb_any(skb);
+- fep->tx_skbuff[index] = NULL;
+
+ fep->dirty_tx = bdp;
+
+@@ -834,14 +1172,17 @@
+
+ /* Since we have freed up a buffer, the ring is no longer full
+ */
+- if (fep->dirty_tx != fep->cur_tx) {
+- if (netif_queue_stopped(ndev))
++ if (netif_queue_stopped(ndev)) {
++ entries_free = fec_enet_get_free_txdesc_num(fep);
++ if (entries_free >= fep->tx_wake_threshold)
+ netif_wake_queue(ndev);
+ }
+ }
+- return;
+-}
+
++ /* ERR006538: Keep the transmitter going */
++ if (bdp != fep->cur_tx && readl(fep->hwp + FEC_X_DES_ACTIVE) == 0)
++ writel(0, fep->hwp + FEC_X_DES_ACTIVE);
++}
+
+ /* During a receive, the cur_rx points to the current incoming buffer.
+ * When we update through the ring, if the next incoming buffer has
+@@ -876,8 +1217,11 @@
+
+ while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) {
+
+- if (pkt_received >= budget)
++ if (pkt_received >= budget) {
++ /* overwhelmed take a breath */
++ udelay(210);
+ break;
++ }
+ pkt_received++;
+
+ /* Since we have allocated space to hold a complete frame,
+@@ -886,8 +1230,7 @@
+ if ((status & BD_ENET_RX_LAST) == 0)
+ netdev_err(ndev, "rcv is not +last\n");
+
+- if (!fep->opened)
+- goto rx_processing_done;
++ writel(FEC_ENET_RXF, fep->hwp + FEC_IEVENT);
+
+ /* Check for errors. */
+ if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO |
+@@ -920,11 +1263,7 @@
+ pkt_len = bdp->cbd_datlen;
+ ndev->stats.rx_bytes += pkt_len;
+
+- if (fep->bufdesc_ex)
+- index = (struct bufdesc_ex *)bdp -
+- (struct bufdesc_ex *)fep->rx_bd_base;
+- else
+- index = bdp - fep->rx_bd_base;
++ index = fec_enet_get_bd_index(fep->rx_bd_base, bdp, fep);
+ data = fep->rx_skbuff[index]->data;
+ dma_sync_single_for_cpu(&fep->pdev->dev, bdp->cbd_bufaddr,
+ FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
+@@ -975,18 +1314,9 @@
+ skb->protocol = eth_type_trans(skb, ndev);
+
+ /* Get receive timestamp from the skb */
+- if (fep->hwts_rx_en && fep->bufdesc_ex) {
+- struct skb_shared_hwtstamps *shhwtstamps =
+- skb_hwtstamps(skb);
+- unsigned long flags;
+-
+- memset(shhwtstamps, 0, sizeof(*shhwtstamps));
+-
+- spin_lock_irqsave(&fep->tmreg_lock, flags);
+- shhwtstamps->hwtstamp = ns_to_ktime(
+- timecounter_cyc2time(&fep->tc, ebdp->ts));
+- spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+- }
++ if (fep->hwts_rx_en && fep->bufdesc_ex)
++ fec_enet_hwtstamp(fep, ebdp->ts,
++ skb_hwtstamps(skb));
+
+ if (fep->bufdesc_ex &&
+ (fep->csum_flags & FLAG_RX_CSUM_ENABLED)) {
+@@ -1044,29 +1374,25 @@
+ {
+ struct net_device *ndev = dev_id;
+ struct fec_enet_private *fep = netdev_priv(ndev);
++ const unsigned napi_mask = FEC_ENET_RXF | FEC_ENET_TXF;
+ uint int_events;
+ irqreturn_t ret = IRQ_NONE;
+
+- do {
+- int_events = readl(fep->hwp + FEC_IEVENT);
+- writel(int_events, fep->hwp + FEC_IEVENT);
++ int_events = readl(fep->hwp + FEC_IEVENT);
++ writel(int_events & ~napi_mask, fep->hwp + FEC_IEVENT);
+
+- if (int_events & (FEC_ENET_RXF | FEC_ENET_TXF)) {
+- ret = IRQ_HANDLED;
++ if (int_events & napi_mask) {
++ ret = IRQ_HANDLED;
+
+- /* Disable the RX interrupt */
+- if (napi_schedule_prep(&fep->napi)) {
+- writel(FEC_RX_DISABLED_IMASK,
+- fep->hwp + FEC_IMASK);
+- __napi_schedule(&fep->napi);
+- }
+- }
++ /* Disable the NAPI interrupts */
++ writel(FEC_ENET_MII, fep->hwp + FEC_IMASK);
++ napi_schedule(&fep->napi);
++ }
+
+- if (int_events & FEC_ENET_MII) {
+- ret = IRQ_HANDLED;
+- complete(&fep->mdio_done);
+- }
+- } while (int_events);
++ if (int_events & FEC_ENET_MII) {
++ ret = IRQ_HANDLED;
++ complete(&fep->mdio_done);
++ }
+
+ return ret;
+ }
+@@ -1074,8 +1400,16 @@
+ static int fec_enet_rx_napi(struct napi_struct *napi, int budget)
+ {
+ struct net_device *ndev = napi->dev;
+- int pkts = fec_enet_rx(ndev, budget);
+ struct fec_enet_private *fep = netdev_priv(ndev);
++ int pkts;
++
++ /*
++ * Clear any pending transmit or receive interrupts before
++ * processing the rings to avoid racing with the hardware.
++ */
++ writel(FEC_ENET_RXF | FEC_ENET_TXF, fep->hwp + FEC_IEVENT);
++
++ pkts = fec_enet_rx(ndev, budget);
+
+ fec_enet_tx(ndev);
+
+@@ -1173,14 +1507,23 @@
+ return;
+ }
+
+- if (phy_dev->link) {
++ /*
++ * If the netdev is down, or is going down, we're not interested
++ * in link state events, so just mark our idea of the link as down
++ * and ignore the event.
++ */
++ if (!netif_running(ndev) || !netif_device_present(ndev)) {
++ fep->link = 0;
++ } else if (phy_dev->link) {
+ if (!fep->link) {
+ fep->link = phy_dev->link;
+ status_change = 1;
+ }
+
+- if (fep->full_duplex != phy_dev->duplex)
++ if (fep->full_duplex != phy_dev->duplex) {
++ fep->full_duplex = phy_dev->duplex;
+ status_change = 1;
++ }
+
+ if (phy_dev->speed != fep->speed) {
+ fep->speed = phy_dev->speed;
+@@ -1188,11 +1531,21 @@
+ }
+
+ /* if any of the above changed restart the FEC */
+- if (status_change)
+- fec_restart(ndev, phy_dev->duplex);
++ if (status_change) {
++ napi_disable(&fep->napi);
++ netif_tx_lock_bh(ndev);
++ fec_restart(ndev);
++ netif_wake_queue(ndev);
++ netif_tx_unlock_bh(ndev);
++ napi_enable(&fep->napi);
++ }
+ } else {
+ if (fep->link) {
++ napi_disable(&fep->napi);
++ netif_tx_lock_bh(ndev);
+ fec_stop(ndev);
++ netif_tx_unlock_bh(ndev);
++ napi_enable(&fep->napi);
+ fep->link = phy_dev->link;
+ status_change = 1;
+ }
+@@ -1255,9 +1608,51 @@
+ return 0;
+ }
+
+-static int fec_enet_mdio_reset(struct mii_bus *bus)
++static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
+ {
++ struct fec_enet_private *fep = netdev_priv(ndev);
++ int ret;
++
++ if (enable) {
++ pm_runtime_enable(&fep->pdev->dev);
++
++ ret = clk_prepare_enable(fep->clk_ahb);
++ if (ret)
++ return ret;
++ ret = clk_prepare_enable(fep->clk_ipg);
++ if (ret)
++ goto failed_clk_ipg;
++ if (fep->clk_enet_out) {
++ ret = clk_prepare_enable(fep->clk_enet_out);
++ if (ret)
++ goto failed_clk_enet_out;
++ }
++ if (fep->clk_ptp) {
++ ret = clk_prepare_enable(fep->clk_ptp);
++ if (ret)
++ goto failed_clk_ptp;
++ }
++ } else {
++ clk_disable_unprepare(fep->clk_ahb);
++ clk_disable_unprepare(fep->clk_ipg);
++ if (fep->clk_enet_out)
++ clk_disable_unprepare(fep->clk_enet_out);
++ if (fep->clk_ptp)
++ clk_disable_unprepare(fep->clk_ptp);
++
++ pm_runtime_disable(&fep->pdev->dev);
++ }
++
+ return 0;
++failed_clk_ptp:
++ if (fep->clk_enet_out)
++ clk_disable_unprepare(fep->clk_enet_out);
++failed_clk_enet_out:
++ clk_disable_unprepare(fep->clk_ipg);
++failed_clk_ipg:
++ clk_disable_unprepare(fep->clk_ahb);
++
++ return ret;
+ }
+
+ static int fec_enet_mii_probe(struct net_device *ndev)
+@@ -1304,6 +1699,7 @@
+ /* mask with MAC supported features */
+ if (id_entry->driver_data & FEC_QUIRK_HAS_GBIT) {
+ phy_dev->supported &= PHY_GBIT_FEATURES;
++ phy_dev->supported &= ~SUPPORTED_1000baseT_Half;
+ #if !defined(CONFIG_M5272)
+ phy_dev->supported |= SUPPORTED_Pause;
+ #endif
+@@ -1369,7 +1765,7 @@
+ * Reference Manual has an error on this, and gets fixed on i.MX6Q
+ * document.
+ */
+- fep->phy_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ahb), 5000000);
++ fep->phy_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 5000000);
+ if (id_entry->driver_data & FEC_QUIRK_ENET_MAC)
+ fep->phy_speed--;
+ fep->phy_speed <<= 1;
+@@ -1384,7 +1780,6 @@
+ fep->mii_bus->name = "fec_enet_mii_bus";
+ fep->mii_bus->read = fec_enet_mdio_read;
+ fep->mii_bus->write = fec_enet_mdio_write;
+- fep->mii_bus->reset = fec_enet_mdio_reset;
+ snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
+ pdev->name, fep->dev_id + 1);
+ fep->mii_bus->priv = fep;
+@@ -1508,6 +1903,9 @@
+ {
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
++ if (!fep->phy_dev)
++ return -ENODEV;
++
+ if (pause->tx_pause != pause->rx_pause) {
+ netdev_info(ndev,
+ "hardware only support enable/disable both tx and rx");
+@@ -1533,8 +1931,14 @@
+ fec_stop(ndev);
+ phy_start_aneg(fep->phy_dev);
+ }
+- if (netif_running(ndev))
+- fec_restart(ndev, 0);
++ if (netif_running(ndev)) {
++ napi_disable(&fep->napi);
++ netif_tx_lock_bh(ndev);
++ fec_restart(ndev);
++ netif_wake_queue(ndev);
++ netif_tx_unlock_bh(ndev);
++ napi_enable(&fep->napi);
++ }
+
+ return 0;
+ }
+@@ -1651,21 +2055,19 @@
+ }
+
+ static const struct ethtool_ops fec_enet_ethtool_ops = {
+-#if !defined(CONFIG_M5272)
+- .get_pauseparam = fec_enet_get_pauseparam,
+- .set_pauseparam = fec_enet_set_pauseparam,
+-#endif
+ .get_settings = fec_enet_get_settings,
+ .set_settings = fec_enet_set_settings,
+ .get_drvinfo = fec_enet_get_drvinfo,
+- .get_link = ethtool_op_get_link,
+- .get_ts_info = fec_enet_get_ts_info,
+ .nway_reset = fec_enet_nway_reset,
++ .get_link = ethtool_op_get_link,
+ #ifndef CONFIG_M5272
+- .get_ethtool_stats = fec_enet_get_ethtool_stats,
++ .get_pauseparam = fec_enet_get_pauseparam,
++ .set_pauseparam = fec_enet_set_pauseparam,
+ .get_strings = fec_enet_get_strings,
++ .get_ethtool_stats = fec_enet_get_ethtool_stats,
+ .get_sset_count = fec_enet_get_sset_count,
+ #endif
++ .get_ts_info = fec_enet_get_ts_info,
+ };
+
+ static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
+@@ -1699,18 +2101,23 @@
+ bdp = fep->rx_bd_base;
+ for (i = 0; i < fep->rx_ring_size; i++) {
+ skb = fep->rx_skbuff[i];
+-
+- if (bdp->cbd_bufaddr)
++ fep->rx_skbuff[i] = NULL;
++ if (skb) {
+ dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
+ FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
+- if (skb)
+ dev_kfree_skb(skb);
++ }
+ bdp = fec_enet_get_nextdesc(bdp, fep);
+ }
+
+ bdp = fep->tx_bd_base;
+- for (i = 0; i < fep->tx_ring_size; i++)
++ for (i = 0; i < fep->tx_ring_size; i++) {
+ kfree(fep->tx_bounce[i]);
++ fep->tx_bounce[i] = NULL;
++ skb = fep->tx_skbuff[i];
++ fep->tx_skbuff[i] = NULL;
++ dev_kfree_skb(skb);
++ }
+ }
+
+ static int fec_enet_alloc_buffers(struct net_device *ndev)
+@@ -1722,21 +2129,23 @@
+
+ bdp = fep->rx_bd_base;
+ for (i = 0; i < fep->rx_ring_size; i++) {
++ dma_addr_t addr;
++
+ skb = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE);
+- if (!skb) {
+- fec_enet_free_buffers(ndev);
+- return -ENOMEM;
+- }
+- fep->rx_skbuff[i] = skb;
++ if (!skb)
++ goto err_alloc;
+
+- bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, skb->data,
++ addr = dma_map_single(&fep->pdev->dev, skb->data,
+ FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
+- if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) {
+- fec_enet_free_buffers(ndev);
++ if (dma_mapping_error(&fep->pdev->dev, addr)) {
++ dev_kfree_skb(skb);
+ if (net_ratelimit())
+ netdev_err(ndev, "Rx DMA memory map failed\n");
+- return -ENOMEM;
++ goto err_alloc;
+ }
++
++ fep->rx_skbuff[i] = skb;
++ bdp->cbd_bufaddr = addr;
+ bdp->cbd_sc = BD_ENET_RX_EMPTY;
+
+ if (fep->bufdesc_ex) {
+@@ -1754,6 +2163,8 @@
+ bdp = fep->tx_bd_base;
+ for (i = 0; i < fep->tx_ring_size; i++) {
+ fep->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL);
++ if (!fep->tx_bounce[i])
++ goto err_alloc;
+
+ bdp->cbd_sc = 0;
+ bdp->cbd_bufaddr = 0;
+@@ -1771,14 +2182,35 @@
+ bdp->cbd_sc |= BD_SC_WRAP;
+
+ return 0;
++
++ err_alloc:
++ fec_enet_free_buffers(ndev);
++ return -ENOMEM;
+ }
+
+ static int
+ fec_enet_open(struct net_device *ndev)
+ {
+ struct fec_enet_private *fep = netdev_priv(ndev);
++ const struct platform_device_id *id_entry =
++ platform_get_device_id(fep->pdev);
+ int ret;
+
++ if (id_entry->driver_data & FEC_QUIRK_BUG_WAITMODE)
++ pm_qos_add_request(&ndev->pm_qos_req,
++ PM_QOS_CPU_DMA_LATENCY,
++ 0);
++ else
++ pm_qos_add_request(&ndev->pm_qos_req,
++ PM_QOS_CPU_DMA_LATENCY,
++ PM_QOS_DEFAULT_VALUE);
++
++
++ pinctrl_pm_select_default_state(&fep->pdev->dev);
++ ret = fec_enet_clk_enable(ndev, true);
++ if (ret)
++ return ret;
++
+ /* I should reset the ring buffers here, but I don't yet know
+ * a simple way to do that.
+ */
+@@ -1794,10 +2226,12 @@
+ return ret;
+ }
+
++ pm_runtime_get_sync(&fep->pdev->dev);
++
++ fec_restart(ndev);
+ napi_enable(&fep->napi);
+ phy_start(fep->phy_dev);
+ netif_start_queue(ndev);
+- fep->opened = 1;
+ return 0;
+ }
+
+@@ -1806,17 +2240,22 @@
+ {
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
+- /* Don't know what to do yet. */
+- napi_disable(&fep->napi);
+- fep->opened = 0;
+- netif_stop_queue(ndev);
+- fec_stop(ndev);
++ phy_stop(fep->phy_dev);
+
+- if (fep->phy_dev) {
+- phy_stop(fep->phy_dev);
+- phy_disconnect(fep->phy_dev);
++ if (netif_device_present(ndev)) {
++ napi_disable(&fep->napi);
++ netif_tx_disable(ndev);
++ fec_stop(ndev);
+ }
+
++ phy_disconnect(fep->phy_dev);
++ fep->phy_dev = NULL;
++
++ fec_enet_clk_enable(ndev, false);
++ pinctrl_pm_select_sleep_state(&fep->pdev->dev);
++ pm_qos_remove_request(&ndev->pm_qos_req);
++ pm_runtime_put_sync_suspend(&fep->pdev->dev);
++
+ fec_enet_free_buffers(ndev);
+
+ return 0;
+@@ -1904,10 +2343,11 @@
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ struct sockaddr *addr = p;
+
+- if (!is_valid_ether_addr(addr->sa_data))
+- return -EADDRNOTAVAIL;
+-
+- memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
++ if (addr) {
++ if (!is_valid_ether_addr(addr->sa_data))
++ return -EADDRNOTAVAIL;
++ memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
++ }
+
+ writel(ndev->dev_addr[3] | (ndev->dev_addr[2] << 8) |
+ (ndev->dev_addr[1] << 16) | (ndev->dev_addr[0] << 24),
+@@ -1940,12 +2380,21 @@
+ }
+ #endif
+
++#define FEATURES_NEED_QUIESCE NETIF_F_RXCSUM
++
+ static int fec_set_features(struct net_device *netdev,
+ netdev_features_t features)
+ {
+ struct fec_enet_private *fep = netdev_priv(netdev);
+ netdev_features_t changed = features ^ netdev->features;
+
++ /* Quiesce the device if necessary */
++ if (netif_running(netdev) && changed & FEATURES_NEED_QUIESCE) {
++ napi_disable(&fep->napi);
++ netif_tx_lock_bh(netdev);
++ fec_stop(netdev);
++ }
++
+ netdev->features = features;
+
+ /* Receive checksum has been changed */
+@@ -1954,14 +2403,14 @@
+ fep->csum_flags |= FLAG_RX_CSUM_ENABLED;
+ else
+ fep->csum_flags &= ~FLAG_RX_CSUM_ENABLED;
++ }
+
+- if (netif_running(netdev)) {
+- fec_stop(netdev);
+- fec_restart(netdev, fep->phy_dev->duplex);
+- netif_wake_queue(netdev);
+- } else {
+- fec_restart(netdev, fep->phy_dev->duplex);
+- }
++ /* Resume the device after updates */
++ if (netif_running(netdev) && changed & FEATURES_NEED_QUIESCE) {
++ fec_restart(netdev);
++ netif_wake_queue(netdev);
++ netif_tx_unlock_bh(netdev);
++ napi_enable(&fep->napi);
+ }
+
+ return 0;
+@@ -1993,23 +2442,43 @@
+ const struct platform_device_id *id_entry =
+ platform_get_device_id(fep->pdev);
+ struct bufdesc *cbd_base;
++ int bd_size;
++
++ /* init the tx & rx ring size */
++ fep->tx_ring_size = TX_RING_SIZE;
++ fep->rx_ring_size = RX_RING_SIZE;
++
++ fep->tx_stop_threshold = FEC_MAX_SKB_DESCS;
++ fep->tx_wake_threshold = (fep->tx_ring_size - fep->tx_stop_threshold) / 2;
++
++ if (fep->bufdesc_ex)
++ fep->bufdesc_size = sizeof(struct bufdesc_ex);
++ else
++ fep->bufdesc_size = sizeof(struct bufdesc);
++ bd_size = (fep->tx_ring_size + fep->rx_ring_size) *
++ fep->bufdesc_size;
+
+ /* Allocate memory for buffer descriptors. */
+- cbd_base = dma_alloc_coherent(NULL, PAGE_SIZE, &fep->bd_dma,
++ cbd_base = dma_alloc_coherent(NULL, bd_size, &fep->bd_dma,
+ GFP_KERNEL);
+ if (!cbd_base)
+ return -ENOMEM;
+
++ fep->tso_hdrs = dma_alloc_coherent(NULL, fep->tx_ring_size * TSO_HEADER_SIZE,
++ &fep->tso_hdrs_dma, GFP_KERNEL);
++ if (!fep->tso_hdrs) {
++ dma_free_coherent(NULL, bd_size, cbd_base, fep->bd_dma);
++ return -ENOMEM;
++ }
++
+ memset(cbd_base, 0, PAGE_SIZE);
+
+ fep->netdev = ndev;
+
+ /* Get the Ethernet address */
+ fec_get_mac(ndev);
+-
+- /* init the tx & rx ring size */
+- fep->tx_ring_size = TX_RING_SIZE;
+- fep->rx_ring_size = RX_RING_SIZE;
++ /* make sure MAC we just acquired is programmed into the hw */
++ fec_set_mac_address(ndev, NULL);
+
+ /* Set receive and transmit descriptor base. */
+ fep->rx_bd_base = cbd_base;
+@@ -2027,22 +2496,22 @@
+ writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK);
+ netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, NAPI_POLL_WEIGHT);
+
+- if (id_entry->driver_data & FEC_QUIRK_HAS_VLAN) {
++ if (id_entry->driver_data & FEC_QUIRK_HAS_VLAN)
+ /* enable hw VLAN support */
+ ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
+- ndev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
+- }
+
+ if (id_entry->driver_data & FEC_QUIRK_HAS_CSUM) {
++ ndev->gso_max_segs = FEC_MAX_TSO_SEGS;
++
+ /* enable hw accelerator */
+ ndev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
+- | NETIF_F_RXCSUM);
+- ndev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
+- | NETIF_F_RXCSUM);
++ | NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_TSO);
+ fep->csum_flags |= FLAG_RX_CSUM_ENABLED;
+ }
+
+- fec_restart(ndev, 0);
++ ndev->hw_features = ndev->features;
++
++ fec_restart(ndev);
+
+ return 0;
+ }
+@@ -2117,6 +2586,9 @@
+ fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG;
+ #endif
+
++ /* Select default pin state */
++ pinctrl_pm_select_default_state(&pdev->dev);
++
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ fep->hwp = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(fep->hwp)) {
+@@ -2167,26 +2639,10 @@
+ fep->bufdesc_ex = 0;
+ }
+
+- ret = clk_prepare_enable(fep->clk_ahb);
++ ret = fec_enet_clk_enable(ndev, true);
+ if (ret)
+ goto failed_clk;
+
+- ret = clk_prepare_enable(fep->clk_ipg);
+- if (ret)
+- goto failed_clk_ipg;
+-
+- if (fep->clk_enet_out) {
+- ret = clk_prepare_enable(fep->clk_enet_out);
+- if (ret)
+- goto failed_clk_enet_out;
+- }
+-
+- if (fep->clk_ptp) {
+- ret = clk_prepare_enable(fep->clk_ptp);
+- if (ret)
+- goto failed_clk_ptp;
+- }
+-
+ fep->reg_phy = devm_regulator_get(&pdev->dev, "phy");
+ if (!IS_ERR(fep->reg_phy)) {
+ ret = regulator_enable(fep->reg_phy);
+@@ -2228,6 +2684,8 @@
+
+ /* Carrier starts down, phylib will bring it up */
+ netif_carrier_off(ndev);
++ fec_enet_clk_enable(ndev, false);
++ pinctrl_pm_select_sleep_state(&pdev->dev);
+
+ ret = register_netdev(ndev);
+ if (ret)
+@@ -2236,7 +2694,7 @@
+ if (fep->bufdesc_ex && fep->ptp_clock)
+ netdev_info(ndev, "registered PHC device %d\n", fep->dev_id);
+
+- INIT_DELAYED_WORK(&(fep->delay_work.delay_work), fec_enet_work);
++ INIT_WORK(&fep->tx_timeout_work, fec_enet_timeout_work);
+ return 0;
+
+ failed_register:
+@@ -2246,16 +2704,10 @@
+ failed_init:
+ if (fep->reg_phy)
+ regulator_disable(fep->reg_phy);
++ if (fep->ptp_clock)
++ ptp_clock_unregister(fep->ptp_clock);
+ failed_regulator:
+- if (fep->clk_ptp)
+- clk_disable_unprepare(fep->clk_ptp);
+-failed_clk_ptp:
+- if (fep->clk_enet_out)
+- clk_disable_unprepare(fep->clk_enet_out);
+-failed_clk_enet_out:
+- clk_disable_unprepare(fep->clk_ipg);
+-failed_clk_ipg:
+- clk_disable_unprepare(fep->clk_ahb);
++ fec_enet_clk_enable(ndev, false);
+ failed_clk:
+ failed_ioremap:
+ free_netdev(ndev);
+@@ -2269,42 +2721,40 @@
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
+- cancel_delayed_work_sync(&(fep->delay_work.delay_work));
++ cancel_work_sync(&fep->tx_timeout_work);
+ unregister_netdev(ndev);
+ fec_enet_mii_remove(fep);
+ del_timer_sync(&fep->time_keep);
+ if (fep->reg_phy)
+ regulator_disable(fep->reg_phy);
+- if (fep->clk_ptp)
+- clk_disable_unprepare(fep->clk_ptp);
+ if (fep->ptp_clock)
+ ptp_clock_unregister(fep->ptp_clock);
+- if (fep->clk_enet_out)
+- clk_disable_unprepare(fep->clk_enet_out);
+- clk_disable_unprepare(fep->clk_ipg);
+- clk_disable_unprepare(fep->clk_ahb);
++ fec_enet_clk_enable(ndev, false);
+ free_netdev(ndev);
+
+ return 0;
+ }
+
+-#ifdef CONFIG_PM_SLEEP
++#ifdef CONFIG_PM
+ static int
+ fec_suspend(struct device *dev)
+ {
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
++ rtnl_lock();
+ if (netif_running(ndev)) {
+- fec_stop(ndev);
++ phy_stop(fep->phy_dev);
++ napi_disable(&fep->napi);
++ netif_tx_lock_bh(ndev);
+ netif_device_detach(ndev);
++ netif_tx_unlock_bh(ndev);
++ fec_stop(ndev);
+ }
+- if (fep->clk_ptp)
+- clk_disable_unprepare(fep->clk_ptp);
+- if (fep->clk_enet_out)
+- clk_disable_unprepare(fep->clk_enet_out);
+- clk_disable_unprepare(fep->clk_ipg);
+- clk_disable_unprepare(fep->clk_ahb);
++ rtnl_unlock();
++
++ fec_enet_clk_enable(ndev, false);
++ pinctrl_pm_select_sleep_state(&fep->pdev->dev);
+
+ if (fep->reg_phy)
+ regulator_disable(fep->reg_phy);
+@@ -2325,48 +2775,49 @@
+ return ret;
+ }
+
+- ret = clk_prepare_enable(fep->clk_ahb);
+- if (ret)
+- goto failed_clk_ahb;
+-
+- ret = clk_prepare_enable(fep->clk_ipg);
++ pinctrl_pm_select_default_state(&fep->pdev->dev);
++ ret = fec_enet_clk_enable(ndev, true);
+ if (ret)
+- goto failed_clk_ipg;
+-
+- if (fep->clk_enet_out) {
+- ret = clk_prepare_enable(fep->clk_enet_out);
+- if (ret)
+- goto failed_clk_enet_out;
+- }
+-
+- if (fep->clk_ptp) {
+- ret = clk_prepare_enable(fep->clk_ptp);
+- if (ret)
+- goto failed_clk_ptp;
+- }
++ goto failed_clk;
+
++ rtnl_lock();
+ if (netif_running(ndev)) {
+- fec_restart(ndev, fep->full_duplex);
++ fec_restart(ndev);
++ netif_tx_lock_bh(ndev);
+ netif_device_attach(ndev);
++ netif_tx_unlock_bh(ndev);
++ napi_enable(&fep->napi);
++ phy_start(fep->phy_dev);
+ }
++ rtnl_unlock();
+
+ return 0;
+
+-failed_clk_ptp:
+- if (fep->clk_enet_out)
+- clk_disable_unprepare(fep->clk_enet_out);
+-failed_clk_enet_out:
+- clk_disable_unprepare(fep->clk_ipg);
+-failed_clk_ipg:
+- clk_disable_unprepare(fep->clk_ahb);
+-failed_clk_ahb:
++failed_clk:
+ if (fep->reg_phy)
+ regulator_disable(fep->reg_phy);
+ return ret;
+ }
++
++static int fec_runtime_suspend(struct device *dev)
++{
++ release_bus_freq(BUS_FREQ_HIGH);
++ return 0;
++}
++
++static int fec_runtime_resume(struct device *dev)
++{
++ request_bus_freq(BUS_FREQ_HIGH);
++ return 0;
++}
++
++static const struct dev_pm_ops fec_pm_ops = {
++ SET_RUNTIME_PM_OPS(fec_runtime_suspend, fec_runtime_resume, NULL)
++ SET_SYSTEM_SLEEP_PM_OPS(fec_suspend, fec_resume)
++};
++
+ #endif /* CONFIG_PM_SLEEP */
+
+-static SIMPLE_DEV_PM_OPS(fec_pm_ops, fec_suspend, fec_resume);
+
+ static struct platform_driver fec_driver = {
+ .driver = {
+diff -Nur linux-3.14.36/drivers/net/ethernet/freescale/fec_ptp.c linux-openelec/drivers/net/ethernet/freescale/fec_ptp.c
+--- linux-3.14.36/drivers/net/ethernet/freescale/fec_ptp.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/net/ethernet/freescale/fec_ptp.c 2015-05-06 12:05:42.000000000 -0500
+@@ -372,6 +372,7 @@
+ fep->ptp_caps.n_alarm = 0;
+ fep->ptp_caps.n_ext_ts = 0;
+ fep->ptp_caps.n_per_out = 0;
++ fep->ptp_caps.n_pins = 0;
+ fep->ptp_caps.pps = 0;
+ fep->ptp_caps.adjfreq = fec_ptp_adjfreq;
+ fep->ptp_caps.adjtime = fec_ptp_adjtime;
+diff -Nur linux-3.14.36/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c linux-openelec/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+--- linux-3.14.36/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c 2015-05-06 12:05:42.000000000 -0500
+@@ -91,6 +91,9 @@
+ u16 pkt_len, sc;
+ int curidx;
+
++ if (budget <= 0)
++ return received;
++
+ /*
+ * First, grab all of the stats for the incoming packet.
+ * These get messed up if we get called due to a busy condition.
+@@ -789,10 +792,6 @@
+ phydev = of_phy_connect(dev, fep->fpi->phy_node, &fs_adjust_link, 0,
+ iface);
+ if (!phydev) {
+- phydev = of_phy_connect_fixed_link(dev, &fs_adjust_link,
+- iface);
+- }
+- if (!phydev) {
+ dev_err(&dev->dev, "Could not attach to PHY\n");
+ return -ENODEV;
+ }
+@@ -1026,9 +1025,16 @@
+ fpi->use_napi = 1;
+ fpi->napi_weight = 17;
+ fpi->phy_node = of_parse_phandle(ofdev->dev.of_node, "phy-handle", 0);
+- if ((!fpi->phy_node) && (!of_get_property(ofdev->dev.of_node, "fixed-link",
+- NULL)))
+- goto out_free_fpi;
++ if (!fpi->phy_node && of_phy_is_fixed_link(ofdev->dev.of_node)) {
++ err = of_phy_register_fixed_link(ofdev->dev.of_node);
++ if (err)
++ goto out_free_fpi;
++
++ /* In the case of a fixed PHY, the DT node associated
++ * to the PHY is the Ethernet MAC DT node.
++ */
++ fpi->phy_node = ofdev->dev.of_node;
++ }
+
+ if (of_device_is_compatible(ofdev->dev.of_node, "fsl,mpc5125-fec")) {
+ phy_connection_type = of_get_property(ofdev->dev.of_node,
+diff -Nur linux-3.14.36/drivers/net/ethernet/freescale/fs_enet/mii-fec.c linux-openelec/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
+--- linux-3.14.36/drivers/net/ethernet/freescale/fs_enet/mii-fec.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/net/ethernet/freescale/fs_enet/mii-fec.c 2015-05-06 12:05:42.000000000 -0500
+@@ -95,12 +95,6 @@
+
+ }
+
+-static int fs_enet_fec_mii_reset(struct mii_bus *bus)
+-{
+- /* nothing here - for now */
+- return 0;
+-}
+-
+ static struct of_device_id fs_enet_mdio_fec_match[];
+ static int fs_enet_mdio_probe(struct platform_device *ofdev)
+ {
+@@ -128,7 +122,6 @@
+ new_bus->name = "FEC MII Bus";
+ new_bus->read = &fs_enet_fec_mii_read;
+ new_bus->write = &fs_enet_fec_mii_write;
+- new_bus->reset = &fs_enet_fec_mii_reset;
+
+ ret = of_address_to_resource(ofdev->dev.of_node, 0, &res);
+ if (ret)
+diff -Nur linux-3.14.36/drivers/net/ethernet/freescale/gianfar.c linux-openelec/drivers/net/ethernet/freescale/gianfar.c
+--- linux-3.14.36/drivers/net/ethernet/freescale/gianfar.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/net/ethernet/freescale/gianfar.c 2015-05-06 12:05:42.000000000 -0500
+@@ -9,7 +9,7 @@
+ * Maintainer: Kumar Gala
+ * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
+ *
+- * Copyright 2002-2009, 2011 Freescale Semiconductor, Inc.
++ * Copyright 2002-2009, 2011-2013 Freescale Semiconductor, Inc.
+ * Copyright 2007 MontaVista Software, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+@@ -121,7 +121,7 @@
+ static irqreturn_t gfar_transmit(int irq, void *dev_id);
+ static irqreturn_t gfar_interrupt(int irq, void *dev_id);
+ static void adjust_link(struct net_device *dev);
+-static void init_registers(struct net_device *dev);
++static noinline void gfar_update_link_state(struct gfar_private *priv);
+ static int init_phy(struct net_device *dev);
+ static int gfar_probe(struct platform_device *ofdev);
+ static int gfar_remove(struct platform_device *ofdev);
+@@ -129,8 +129,10 @@
+ static void gfar_set_multi(struct net_device *dev);
+ static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr);
+ static void gfar_configure_serdes(struct net_device *dev);
+-static int gfar_poll(struct napi_struct *napi, int budget);
+-static int gfar_poll_sq(struct napi_struct *napi, int budget);
++static int gfar_poll_rx(struct napi_struct *napi, int budget);
++static int gfar_poll_tx(struct napi_struct *napi, int budget);
++static int gfar_poll_rx_sq(struct napi_struct *napi, int budget);
++static int gfar_poll_tx_sq(struct napi_struct *napi, int budget);
+ #ifdef CONFIG_NET_POLL_CONTROLLER
+ static void gfar_netpoll(struct net_device *dev);
+ #endif
+@@ -138,9 +140,7 @@
+ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue);
+ static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
+ int amount_pull, struct napi_struct *napi);
+-void gfar_halt(struct net_device *dev);
+-static void gfar_halt_nodisable(struct net_device *dev);
+-void gfar_start(struct net_device *dev);
++static void gfar_halt_nodisable(struct gfar_private *priv);
+ static void gfar_clear_exact_match(struct net_device *dev);
+ static void gfar_set_mac_for_addr(struct net_device *dev, int num,
+ const u8 *addr);
+@@ -332,72 +332,76 @@
+ }
+ }
+
+-static void gfar_init_mac(struct net_device *ndev)
++static void gfar_rx_buff_size_config(struct gfar_private *priv)
+ {
+- struct gfar_private *priv = netdev_priv(ndev);
+- struct gfar __iomem *regs = priv->gfargrp[0].regs;
+- u32 rctrl = 0;
+- u32 tctrl = 0;
+- u32 attrs = 0;
+-
+- /* write the tx/rx base registers */
+- gfar_init_tx_rx_base(priv);
+-
+- /* Configure the coalescing support */
+- gfar_configure_coalescing_all(priv);
++ int frame_size = priv->ndev->mtu + ETH_HLEN;
+
+ /* set this when rx hw offload (TOE) functions are being used */
+ priv->uses_rxfcb = 0;
+
++ if (priv->ndev->features & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX))
++ priv->uses_rxfcb = 1;
++
++ if (priv->hwts_rx_en)
++ priv->uses_rxfcb = 1;
++
++ if (priv->uses_rxfcb)
++ frame_size += GMAC_FCB_LEN;
++
++ frame_size += priv->padding;
++
++ frame_size = (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) +
++ INCREMENTAL_BUFFER_SIZE;
++
++ priv->rx_buffer_size = frame_size;
++}
++
++static void gfar_mac_rx_config(struct gfar_private *priv)
++{
++ struct gfar __iomem *regs = priv->gfargrp[0].regs;
++ u32 rctrl = 0;
++
+ if (priv->rx_filer_enable) {
+ rctrl |= RCTRL_FILREN;
+ /* Program the RIR0 reg with the required distribution */
+- gfar_write(&regs->rir0, DEFAULT_RIR0);
++ if (priv->poll_mode == GFAR_SQ_POLLING)
++ gfar_write(&regs->rir0, DEFAULT_2RXQ_RIR0);
++ else /* GFAR_MQ_POLLING */
++ gfar_write(&regs->rir0, DEFAULT_8RXQ_RIR0);
+ }
+
+ /* Restore PROMISC mode */
+- if (ndev->flags & IFF_PROMISC)
++ if (priv->ndev->flags & IFF_PROMISC)
+ rctrl |= RCTRL_PROM;
+
+- if (ndev->features & NETIF_F_RXCSUM) {
++ if (priv->ndev->features & NETIF_F_RXCSUM)
+ rctrl |= RCTRL_CHECKSUMMING;
+- priv->uses_rxfcb = 1;
+- }
+-
+- if (priv->extended_hash) {
+- rctrl |= RCTRL_EXTHASH;
+
+- gfar_clear_exact_match(ndev);
+- rctrl |= RCTRL_EMEN;
+- }
++ if (priv->extended_hash)
++ rctrl |= RCTRL_EXTHASH | RCTRL_EMEN;
+
+ if (priv->padding) {
+ rctrl &= ~RCTRL_PAL_MASK;
+ rctrl |= RCTRL_PADDING(priv->padding);
+ }
+
+- /* Insert receive time stamps into padding alignment bytes */
+- if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) {
+- rctrl &= ~RCTRL_PAL_MASK;
+- rctrl |= RCTRL_PADDING(8);
+- priv->padding = 8;
+- }
+-
+ /* Enable HW time stamping if requested from user space */
+- if (priv->hwts_rx_en) {
++ if (priv->hwts_rx_en)
+ rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE;
+- priv->uses_rxfcb = 1;
+- }
+
+- if (ndev->features & NETIF_F_HW_VLAN_CTAG_RX) {
++ if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
+ rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
+- priv->uses_rxfcb = 1;
+- }
+
+ /* Init rctrl based on our settings */
+ gfar_write(&regs->rctrl, rctrl);
++}
+
+- if (ndev->features & NETIF_F_IP_CSUM)
++static void gfar_mac_tx_config(struct gfar_private *priv)
++{
++ struct gfar __iomem *regs = priv->gfargrp[0].regs;
++ u32 tctrl = 0;
++
++ if (priv->ndev->features & NETIF_F_IP_CSUM)
+ tctrl |= TCTRL_INIT_CSUM;
+
+ if (priv->prio_sched_en)
+@@ -408,30 +412,51 @@
+ gfar_write(&regs->tr47wt, DEFAULT_WRRS_WEIGHT);
+ }
+
+- gfar_write(&regs->tctrl, tctrl);
++ if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_TX)
++ tctrl |= TCTRL_VLINS;
+
+- /* Set the extraction length and index */
+- attrs = ATTRELI_EL(priv->rx_stash_size) |
+- ATTRELI_EI(priv->rx_stash_index);
++ gfar_write(&regs->tctrl, tctrl);
++}
+
+- gfar_write(&regs->attreli, attrs);
++static void gfar_configure_coalescing(struct gfar_private *priv,
++ unsigned long tx_mask, unsigned long rx_mask)
++{
++ struct gfar __iomem *regs = priv->gfargrp[0].regs;
++ u32 __iomem *baddr;
+
+- /* Start with defaults, and add stashing or locking
+- * depending on the approprate variables
+- */
+- attrs = ATTR_INIT_SETTINGS;
++ if (priv->mode == MQ_MG_MODE) {
++ int i = 0;
+
+- if (priv->bd_stash_en)
+- attrs |= ATTR_BDSTASH;
++ baddr = &regs->txic0;
++ for_each_set_bit(i, &tx_mask, priv->num_tx_queues) {
++ gfar_write(baddr + i, 0);
++ if (likely(priv->tx_queue[i]->txcoalescing))
++ gfar_write(baddr + i, priv->tx_queue[i]->txic);
++ }
+
+- if (priv->rx_stash_size != 0)
+- attrs |= ATTR_BUFSTASH;
++ baddr = &regs->rxic0;
++ for_each_set_bit(i, &rx_mask, priv->num_rx_queues) {
++ gfar_write(baddr + i, 0);
++ if (likely(priv->rx_queue[i]->rxcoalescing))
++ gfar_write(baddr + i, priv->rx_queue[i]->rxic);
++ }
++ } else {
++ /* Backward compatible case -- even if we enable
++ * multiple queues, there's only single reg to program
++ */
++ gfar_write(&regs->txic, 0);
++ if (likely(priv->tx_queue[0]->txcoalescing))
++ gfar_write(&regs->txic, priv->tx_queue[0]->txic);
+
+- gfar_write(&regs->attr, attrs);
++ gfar_write(&regs->rxic, 0);
++ if (unlikely(priv->rx_queue[0]->rxcoalescing))
++ gfar_write(&regs->rxic, priv->rx_queue[0]->rxic);
++ }
++}
+
+- gfar_write(&regs->fifo_tx_thr, priv->fifo_threshold);
+- gfar_write(&regs->fifo_tx_starve, priv->fifo_starve);
+- gfar_write(&regs->fifo_tx_starve_shutoff, priv->fifo_starve_off);
++void gfar_configure_coalescing_all(struct gfar_private *priv)
++{
++ gfar_configure_coalescing(priv, 0xFF, 0xFF);
+ }
+
+ static struct net_device_stats *gfar_get_stats(struct net_device *dev)
+@@ -479,12 +504,27 @@
+ #endif
+ };
+
+-void lock_rx_qs(struct gfar_private *priv)
++static void gfar_ints_disable(struct gfar_private *priv)
+ {
+ int i;
++ for (i = 0; i < priv->num_grps; i++) {
++ struct gfar __iomem *regs = priv->gfargrp[i].regs;
++ /* Clear IEVENT */
++ gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
+
+- for (i = 0; i < priv->num_rx_queues; i++)
+- spin_lock(&priv->rx_queue[i]->rxlock);
++ /* Initialize IMASK */
++ gfar_write(&regs->imask, IMASK_INIT_CLEAR);
++ }
++}
++
++static void gfar_ints_enable(struct gfar_private *priv)
++{
++ int i;
++ for (i = 0; i < priv->num_grps; i++) {
++ struct gfar __iomem *regs = priv->gfargrp[i].regs;
++ /* Unmask the interrupts we look for */
++ gfar_write(&regs->imask, IMASK_DEFAULT);
++ }
+ }
+
+ void lock_tx_qs(struct gfar_private *priv)
+@@ -495,23 +535,50 @@
+ spin_lock(&priv->tx_queue[i]->txlock);
+ }
+
+-void unlock_rx_qs(struct gfar_private *priv)
++void unlock_tx_qs(struct gfar_private *priv)
+ {
+ int i;
+
+- for (i = 0; i < priv->num_rx_queues; i++)
+- spin_unlock(&priv->rx_queue[i]->rxlock);
++ for (i = 0; i < priv->num_tx_queues; i++)
++ spin_unlock(&priv->tx_queue[i]->txlock);
+ }
+
+-void unlock_tx_qs(struct gfar_private *priv)
++static int gfar_alloc_tx_queues(struct gfar_private *priv)
+ {
+ int i;
+
+- for (i = 0; i < priv->num_tx_queues; i++)
+- spin_unlock(&priv->tx_queue[i]->txlock);
++ for (i = 0; i < priv->num_tx_queues; i++) {
++ priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q),
++ GFP_KERNEL);
++ if (!priv->tx_queue[i])
++ return -ENOMEM;
++
++ priv->tx_queue[i]->tx_skbuff = NULL;
++ priv->tx_queue[i]->qindex = i;
++ priv->tx_queue[i]->dev = priv->ndev;
++ spin_lock_init(&(priv->tx_queue[i]->txlock));
++ }
++ return 0;
++}
++
++static int gfar_alloc_rx_queues(struct gfar_private *priv)
++{
++ int i;
++
++ for (i = 0; i < priv->num_rx_queues; i++) {
++ priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q),
++ GFP_KERNEL);
++ if (!priv->rx_queue[i])
++ return -ENOMEM;
++
++ priv->rx_queue[i]->rx_skbuff = NULL;
++ priv->rx_queue[i]->qindex = i;
++ priv->rx_queue[i]->dev = priv->ndev;
++ }
++ return 0;
+ }
+
+-static void free_tx_pointers(struct gfar_private *priv)
++static void gfar_free_tx_queues(struct gfar_private *priv)
+ {
+ int i;
+
+@@ -519,7 +586,7 @@
+ kfree(priv->tx_queue[i]);
+ }
+
+-static void free_rx_pointers(struct gfar_private *priv)
++static void gfar_free_rx_queues(struct gfar_private *priv)
+ {
+ int i;
+
+@@ -553,23 +620,26 @@
+ {
+ int i;
+
+- for (i = 0; i < priv->num_grps; i++)
+- napi_disable(&priv->gfargrp[i].napi);
++ for (i = 0; i < priv->num_grps; i++) {
++ napi_disable(&priv->gfargrp[i].napi_rx);
++ napi_disable(&priv->gfargrp[i].napi_tx);
++ }
+ }
+
+ static void enable_napi(struct gfar_private *priv)
+ {
+ int i;
+
+- for (i = 0; i < priv->num_grps; i++)
+- napi_enable(&priv->gfargrp[i].napi);
++ for (i = 0; i < priv->num_grps; i++) {
++ napi_enable(&priv->gfargrp[i].napi_rx);
++ napi_enable(&priv->gfargrp[i].napi_tx);
++ }
+ }
+
+ static int gfar_parse_group(struct device_node *np,
+ struct gfar_private *priv, const char *model)
+ {
+ struct gfar_priv_grp *grp = &priv->gfargrp[priv->num_grps];
+- u32 *queue_mask;
+ int i;
+
+ for (i = 0; i < GFAR_NUM_IRQS; i++) {
+@@ -598,16 +668,52 @@
+ grp->priv = priv;
+ spin_lock_init(&grp->grplock);
+ if (priv->mode == MQ_MG_MODE) {
+- queue_mask = (u32 *)of_get_property(np, "fsl,rx-bit-map", NULL);
+- grp->rx_bit_map = queue_mask ?
+- *queue_mask : (DEFAULT_MAPPING >> priv->num_grps);
+- queue_mask = (u32 *)of_get_property(np, "fsl,tx-bit-map", NULL);
+- grp->tx_bit_map = queue_mask ?
+- *queue_mask : (DEFAULT_MAPPING >> priv->num_grps);
++ u32 *rxq_mask, *txq_mask;
++ rxq_mask = (u32 *)of_get_property(np, "fsl,rx-bit-map", NULL);
++ txq_mask = (u32 *)of_get_property(np, "fsl,tx-bit-map", NULL);
++
++ if (priv->poll_mode == GFAR_SQ_POLLING) {
++ /* One Q per interrupt group: Q0 to G0, Q1 to G1 */
++ grp->rx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
++ grp->tx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
++ } else { /* GFAR_MQ_POLLING */
++ grp->rx_bit_map = rxq_mask ?
++ *rxq_mask : (DEFAULT_MAPPING >> priv->num_grps);
++ grp->tx_bit_map = txq_mask ?
++ *txq_mask : (DEFAULT_MAPPING >> priv->num_grps);
++ }
+ } else {
+ grp->rx_bit_map = 0xFF;
+ grp->tx_bit_map = 0xFF;
+ }
++
++ /* bit_map's MSB is q0 (from q0 to q7) but, for_each_set_bit parses
++ * right to left, so we need to revert the 8 bits to get the q index
++ */
++ grp->rx_bit_map = bitrev8(grp->rx_bit_map);
++ grp->tx_bit_map = bitrev8(grp->tx_bit_map);
++
++ /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values,
++ * also assign queues to groups
++ */
++ for_each_set_bit(i, &grp->rx_bit_map, priv->num_rx_queues) {
++ if (!grp->rx_queue)
++ grp->rx_queue = priv->rx_queue[i];
++ grp->num_rx_queues++;
++ grp->rstat |= (RSTAT_CLEAR_RHALT >> i);
++ priv->rqueue |= ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
++ priv->rx_queue[i]->grp = grp;
++ }
++
++ for_each_set_bit(i, &grp->tx_bit_map, priv->num_tx_queues) {
++ if (!grp->tx_queue)
++ grp->tx_queue = priv->tx_queue[i];
++ grp->num_tx_queues++;
++ grp->tstat |= (TSTAT_CLEAR_THALT >> i);
++ priv->tqueue |= (TQUEUE_EN0 >> i);
++ priv->tx_queue[i]->grp = grp;
++ }
++
+ priv->num_grps++;
+
+ return 0;
+@@ -628,13 +734,45 @@
+ const u32 *stash_idx;
+ unsigned int num_tx_qs, num_rx_qs;
+ u32 *tx_queues, *rx_queues;
++ unsigned short mode, poll_mode;
+
+ if (!np || !of_device_is_available(np))
+ return -ENODEV;
+
+- /* parse the num of tx and rx queues */
++ if (of_device_is_compatible(np, "fsl,etsec2")) {
++ mode = MQ_MG_MODE;
++ poll_mode = GFAR_SQ_POLLING;
++ } else {
++ mode = SQ_SG_MODE;
++ poll_mode = GFAR_SQ_POLLING;
++ }
++
++ /* parse the num of HW tx and rx queues */
+ tx_queues = (u32 *)of_get_property(np, "fsl,num_tx_queues", NULL);
+- num_tx_qs = tx_queues ? *tx_queues : 1;
++ rx_queues = (u32 *)of_get_property(np, "fsl,num_rx_queues", NULL);
++
++ if (mode == SQ_SG_MODE) {
++ num_tx_qs = 1;
++ num_rx_qs = 1;
++ } else { /* MQ_MG_MODE */
++ /* get the actual number of supported groups */
++ unsigned int num_grps = of_get_available_child_count(np);
++
++ if (num_grps == 0 || num_grps > MAXGROUPS) {
++ dev_err(&ofdev->dev, "Invalid # of int groups(%d)\n",
++ num_grps);
++ pr_err("Cannot do alloc_etherdev, aborting\n");
++ return -EINVAL;
++ }
++
++ if (poll_mode == GFAR_SQ_POLLING) {
++ num_tx_qs = num_grps; /* one txq per int group */
++ num_rx_qs = num_grps; /* one rxq per int group */
++ } else { /* GFAR_MQ_POLLING */
++ num_tx_qs = tx_queues ? *tx_queues : 1;
++ num_rx_qs = rx_queues ? *rx_queues : 1;
++ }
++ }
+
+ if (num_tx_qs > MAX_TX_QS) {
+ pr_err("num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n",
+@@ -643,9 +781,6 @@
+ return -EINVAL;
+ }
+
+- rx_queues = (u32 *)of_get_property(np, "fsl,num_rx_queues", NULL);
+- num_rx_qs = rx_queues ? *rx_queues : 1;
+-
+ if (num_rx_qs > MAX_RX_QS) {
+ pr_err("num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n",
+ num_rx_qs, MAX_RX_QS);
+@@ -661,10 +796,20 @@
+ priv = netdev_priv(dev);
+ priv->ndev = dev;
+
++ priv->mode = mode;
++ priv->poll_mode = poll_mode;
++
+ priv->num_tx_queues = num_tx_qs;
+ netif_set_real_num_rx_queues(dev, num_rx_qs);
+ priv->num_rx_queues = num_rx_qs;
+- priv->num_grps = 0x0;
++
++ err = gfar_alloc_tx_queues(priv);
++ if (err)
++ goto tx_alloc_failed;
++
++ err = gfar_alloc_rx_queues(priv);
++ if (err)
++ goto rx_alloc_failed;
+
+ /* Init Rx queue filer rule set linked list */
+ INIT_LIST_HEAD(&priv->rx_list.list);
+@@ -677,52 +822,18 @@
+ priv->gfargrp[i].regs = NULL;
+
+ /* Parse and initialize group specific information */
+- if (of_device_is_compatible(np, "fsl,etsec2")) {
+- priv->mode = MQ_MG_MODE;
++ if (priv->mode == MQ_MG_MODE) {
+ for_each_child_of_node(np, child) {
+ err = gfar_parse_group(child, priv, model);
+ if (err)
+ goto err_grp_init;
+ }
+- } else {
+- priv->mode = SQ_SG_MODE;
++ } else { /* SQ_SG_MODE */
+ err = gfar_parse_group(np, priv, model);
+ if (err)
+ goto err_grp_init;
+ }
+
+- for (i = 0; i < priv->num_tx_queues; i++)
+- priv->tx_queue[i] = NULL;
+- for (i = 0; i < priv->num_rx_queues; i++)
+- priv->rx_queue[i] = NULL;
+-
+- for (i = 0; i < priv->num_tx_queues; i++) {
+- priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q),
+- GFP_KERNEL);
+- if (!priv->tx_queue[i]) {
+- err = -ENOMEM;
+- goto tx_alloc_failed;
+- }
+- priv->tx_queue[i]->tx_skbuff = NULL;
+- priv->tx_queue[i]->qindex = i;
+- priv->tx_queue[i]->dev = dev;
+- spin_lock_init(&(priv->tx_queue[i]->txlock));
+- }
+-
+- for (i = 0; i < priv->num_rx_queues; i++) {
+- priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q),
+- GFP_KERNEL);
+- if (!priv->rx_queue[i]) {
+- err = -ENOMEM;
+- goto rx_alloc_failed;
+- }
+- priv->rx_queue[i]->rx_skbuff = NULL;
+- priv->rx_queue[i]->qindex = i;
+- priv->rx_queue[i]->dev = dev;
+- spin_lock_init(&(priv->rx_queue[i]->rxlock));
+- }
+-
+-
+ stash = of_get_property(np, "bd-stash", NULL);
+
+ if (stash) {
+@@ -749,17 +860,16 @@
+ memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
+
+ if (model && !strcasecmp(model, "TSEC"))
+- priv->device_flags = FSL_GIANFAR_DEV_HAS_GIGABIT |
++ priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT |
+ FSL_GIANFAR_DEV_HAS_COALESCE |
+ FSL_GIANFAR_DEV_HAS_RMON |
+ FSL_GIANFAR_DEV_HAS_MULTI_INTR;
+
+ if (model && !strcasecmp(model, "eTSEC"))
+- priv->device_flags = FSL_GIANFAR_DEV_HAS_GIGABIT |
++ priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT |
+ FSL_GIANFAR_DEV_HAS_COALESCE |
+ FSL_GIANFAR_DEV_HAS_RMON |
+ FSL_GIANFAR_DEV_HAS_MULTI_INTR |
+- FSL_GIANFAR_DEV_HAS_PADDING |
+ FSL_GIANFAR_DEV_HAS_CSUM |
+ FSL_GIANFAR_DEV_HAS_VLAN |
+ FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
+@@ -779,17 +889,28 @@
+
+ priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
+
++ /* In the case of a fixed PHY, the DT node associated
++ * to the PHY is the Ethernet MAC DT node.
++ */
++ if (of_phy_is_fixed_link(np)) {
++ err = of_phy_register_fixed_link(np);
++ if (err)
++ goto err_grp_init;
++
++ priv->phy_node = np;
++ }
++
+ /* Find the TBI PHY. If it's not there, we don't support SGMII */
+ priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0);
+
+ return 0;
+
+-rx_alloc_failed:
+- free_rx_pointers(priv);
+-tx_alloc_failed:
+- free_tx_pointers(priv);
+ err_grp_init:
+ unmap_group_regs(priv);
++rx_alloc_failed:
++ gfar_free_rx_queues(priv);
++tx_alloc_failed:
++ gfar_free_tx_queues(priv);
+ free_gfar_dev(priv);
+ return err;
+ }
+@@ -822,18 +943,16 @@
+ switch (config.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ if (priv->hwts_rx_en) {
+- stop_gfar(netdev);
+ priv->hwts_rx_en = 0;
+- startup_gfar(netdev);
++ reset_gfar(netdev);
+ }
+ break;
+ default:
+ if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
+ return -ERANGE;
+ if (!priv->hwts_rx_en) {
+- stop_gfar(netdev);
+ priv->hwts_rx_en = 1;
+- startup_gfar(netdev);
++ reset_gfar(netdev);
+ }
+ config.rx_filter = HWTSTAMP_FILTER_ALL;
+ break;
+@@ -875,19 +994,6 @@
+ return phy_mii_ioctl(priv->phydev, rq, cmd);
+ }
+
+-static unsigned int reverse_bitmap(unsigned int bit_map, unsigned int max_qs)
+-{
+- unsigned int new_bit_map = 0x0;
+- int mask = 0x1 << (max_qs - 1), i;
+-
+- for (i = 0; i < max_qs; i++) {
+- if (bit_map & mask)
+- new_bit_map = new_bit_map + (1 << i);
+- mask = mask >> 0x1;
+- }
+- return new_bit_map;
+-}
+-
+ static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar,
+ u32 class)
+ {
+@@ -1005,100 +1111,141 @@
+ priv->errata);
+ }
+
+-/* Set up the ethernet device structure, private data,
+- * and anything else we need before we start
+- */
+-static int gfar_probe(struct platform_device *ofdev)
++void gfar_mac_reset(struct gfar_private *priv)
+ {
++ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+ u32 tempval;
+- struct net_device *dev = NULL;
+- struct gfar_private *priv = NULL;
+- struct gfar __iomem *regs = NULL;
+- int err = 0, i, grp_idx = 0;
+- u32 rstat = 0, tstat = 0, rqueue = 0, tqueue = 0;
+- u32 isrg = 0;
+- u32 __iomem *baddr;
+-
+- err = gfar_of_init(ofdev, &dev);
+-
+- if (err)
+- return err;
+-
+- priv = netdev_priv(dev);
+- priv->ndev = dev;
+- priv->ofdev = ofdev;
+- priv->dev = &ofdev->dev;
+- SET_NETDEV_DEV(dev, &ofdev->dev);
+-
+- spin_lock_init(&priv->bflock);
+- INIT_WORK(&priv->reset_task, gfar_reset_task);
+-
+- platform_set_drvdata(ofdev, priv);
+- regs = priv->gfargrp[0].regs;
+-
+- gfar_detect_errata(priv);
+-
+- /* Stop the DMA engine now, in case it was running before
+- * (The firmware could have used it, and left it running).
+- */
+- gfar_halt(dev);
+
+ /* Reset MAC layer */
+ gfar_write(&regs->maccfg1, MACCFG1_SOFT_RESET);
+
+ /* We need to delay at least 3 TX clocks */
+- udelay(2);
++ udelay(3);
+
+- tempval = 0;
+- if (!priv->pause_aneg_en && priv->tx_pause_en)
+- tempval |= MACCFG1_TX_FLOW;
+- if (!priv->pause_aneg_en && priv->rx_pause_en)
+- tempval |= MACCFG1_RX_FLOW;
+ /* the soft reset bit is not self-resetting, so we need to
+ * clear it before resuming normal operation
+ */
+- gfar_write(&regs->maccfg1, tempval);
++ gfar_write(&regs->maccfg1, 0);
+
+- /* Initialize MACCFG2. */
+- tempval = MACCFG2_INIT_SETTINGS;
+- if (gfar_has_errata(priv, GFAR_ERRATA_74))
+- tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK;
+- gfar_write(&regs->maccfg2, tempval);
++ udelay(3);
+
+- /* Initialize ECNTRL */
+- gfar_write(&regs->ecntrl, ECNTRL_INIT_SETTINGS);
++ /* Compute rx_buff_size based on config flags */
++ gfar_rx_buff_size_config(priv);
+
+- /* Set the dev->base_addr to the gfar reg region */
+- dev->base_addr = (unsigned long) regs;
++ /* Initialize the max receive frame/buffer lengths */
++ gfar_write(&regs->maxfrm, priv->rx_buffer_size);
++ gfar_write(&regs->mrblr, priv->rx_buffer_size);
+
+- /* Fill in the dev structure */
+- dev->watchdog_timeo = TX_TIMEOUT;
+- dev->mtu = 1500;
+- dev->netdev_ops = &gfar_netdev_ops;
+- dev->ethtool_ops = &gfar_ethtool_ops;
++ /* Initialize the Minimum Frame Length Register */
++ gfar_write(&regs->minflr, MINFLR_INIT_SETTINGS);
+
+- /* Register for napi ...We are registering NAPI for each grp */
+- if (priv->mode == SQ_SG_MODE)
+- netif_napi_add(dev, &priv->gfargrp[0].napi, gfar_poll_sq,
+- GFAR_DEV_WEIGHT);
+- else
+- for (i = 0; i < priv->num_grps; i++)
+- netif_napi_add(dev, &priv->gfargrp[i].napi, gfar_poll,
+- GFAR_DEV_WEIGHT);
++ /* Initialize MACCFG2. */
++ tempval = MACCFG2_INIT_SETTINGS;
+
+- if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
+- dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
+- NETIF_F_RXCSUM;
+- dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG |
+- NETIF_F_RXCSUM | NETIF_F_HIGHDMA;
+- }
++ /* If the mtu is larger than the max size for standard
++ * ethernet frames (ie, a jumbo frame), then set maccfg2
++ * to allow huge frames, and to check the length
++ */
++ if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE ||
++ gfar_has_errata(priv, GFAR_ERRATA_74))
++ tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK;
+
+- if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
+- dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX |
+- NETIF_F_HW_VLAN_CTAG_RX;
+- dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
++ gfar_write(&regs->maccfg2, tempval);
++
++ /* Clear mac addr hash registers */
++ gfar_write(&regs->igaddr0, 0);
++ gfar_write(&regs->igaddr1, 0);
++ gfar_write(&regs->igaddr2, 0);
++ gfar_write(&regs->igaddr3, 0);
++ gfar_write(&regs->igaddr4, 0);
++ gfar_write(&regs->igaddr5, 0);
++ gfar_write(&regs->igaddr6, 0);
++ gfar_write(&regs->igaddr7, 0);
++
++ gfar_write(&regs->gaddr0, 0);
++ gfar_write(&regs->gaddr1, 0);
++ gfar_write(&regs->gaddr2, 0);
++ gfar_write(&regs->gaddr3, 0);
++ gfar_write(&regs->gaddr4, 0);
++ gfar_write(&regs->gaddr5, 0);
++ gfar_write(&regs->gaddr6, 0);
++ gfar_write(&regs->gaddr7, 0);
++
++ if (priv->extended_hash)
++ gfar_clear_exact_match(priv->ndev);
++
++ gfar_mac_rx_config(priv);
++
++ gfar_mac_tx_config(priv);
++
++ gfar_set_mac_address(priv->ndev);
++
++ gfar_set_multi(priv->ndev);
++
++ /* clear ievent and imask before configuring coalescing */
++ gfar_ints_disable(priv);
++
++ /* Configure the coalescing support */
++ gfar_configure_coalescing_all(priv);
++}
++
++static void gfar_hw_init(struct gfar_private *priv)
++{
++ struct gfar __iomem *regs = priv->gfargrp[0].regs;
++ u32 attrs;
++
++ /* Stop the DMA engine now, in case it was running before
++ * (The firmware could have used it, and left it running).
++ */
++ gfar_halt(priv);
++
++ gfar_mac_reset(priv);
++
++ /* Zero out the rmon mib registers if it has them */
++ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
++ memset_io(&(regs->rmon), 0, sizeof(struct rmon_mib));
++
++ /* Mask off the CAM interrupts */
++ gfar_write(&regs->rmon.cam1, 0xffffffff);
++ gfar_write(&regs->rmon.cam2, 0xffffffff);
+ }
+
++ /* Initialize ECNTRL */
++ gfar_write(&regs->ecntrl, ECNTRL_INIT_SETTINGS);
++
++ /* Set the extraction length and index */
++ attrs = ATTRELI_EL(priv->rx_stash_size) |
++ ATTRELI_EI(priv->rx_stash_index);
++
++ gfar_write(&regs->attreli, attrs);
++
++ /* Start with defaults, and add stashing
++ * depending on driver parameters
++ */
++ attrs = ATTR_INIT_SETTINGS;
++
++ if (priv->bd_stash_en)
++ attrs |= ATTR_BDSTASH;
++
++ if (priv->rx_stash_size != 0)
++ attrs |= ATTR_BUFSTASH;
++
++ gfar_write(&regs->attr, attrs);
++
++ /* FIFO configs */
++ gfar_write(&regs->fifo_tx_thr, DEFAULT_FIFO_TX_THR);
++ gfar_write(&regs->fifo_tx_starve, DEFAULT_FIFO_TX_STARVE);
++ gfar_write(&regs->fifo_tx_starve_shutoff, DEFAULT_FIFO_TX_STARVE_OFF);
++
++ /* Program the interrupt steering regs, only for MG devices */
++ if (priv->num_grps > 1)
++ gfar_write_isrg(priv);
++}
++
++static void gfar_init_addr_hash_table(struct gfar_private *priv)
++{
++ struct gfar __iomem *regs = priv->gfargrp[0].regs;
++
+ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
+ priv->extended_hash = 1;
+ priv->hash_width = 9;
+@@ -1133,68 +1280,81 @@
+ priv->hash_regs[6] = &regs->gaddr6;
+ priv->hash_regs[7] = &regs->gaddr7;
+ }
++}
+
+- if (priv->device_flags & FSL_GIANFAR_DEV_HAS_PADDING)
+- priv->padding = DEFAULT_PADDING;
+- else
+- priv->padding = 0;
++/* Set up the ethernet device structure, private data,
++ * and anything else we need before we start
++ */
++static int gfar_probe(struct platform_device *ofdev)
++{
++ struct net_device *dev = NULL;
++ struct gfar_private *priv = NULL;
++ int err = 0, i;
+
+- if (dev->features & NETIF_F_IP_CSUM ||
+- priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
+- dev->needed_headroom = GMAC_FCB_LEN;
++ err = gfar_of_init(ofdev, &dev);
+
+- /* Program the isrg regs only if number of grps > 1 */
+- if (priv->num_grps > 1) {
+- baddr = &regs->isrg0;
+- for (i = 0; i < priv->num_grps; i++) {
+- isrg |= (priv->gfargrp[i].rx_bit_map << ISRG_SHIFT_RX);
+- isrg |= (priv->gfargrp[i].tx_bit_map << ISRG_SHIFT_TX);
+- gfar_write(baddr, isrg);
+- baddr++;
+- isrg = 0x0;
++ if (err)
++ return err;
++
++ priv = netdev_priv(dev);
++ priv->ndev = dev;
++ priv->ofdev = ofdev;
++ priv->dev = &ofdev->dev;
++ SET_NETDEV_DEV(dev, &ofdev->dev);
++
++ spin_lock_init(&priv->bflock);
++ INIT_WORK(&priv->reset_task, gfar_reset_task);
++
++ platform_set_drvdata(ofdev, priv);
++
++ gfar_detect_errata(priv);
++
++ /* Set the dev->base_addr to the gfar reg region */
++ dev->base_addr = (unsigned long) priv->gfargrp[0].regs;
++
++ /* Fill in the dev structure */
++ dev->watchdog_timeo = TX_TIMEOUT;
++ dev->mtu = 1500;
++ dev->netdev_ops = &gfar_netdev_ops;
++ dev->ethtool_ops = &gfar_ethtool_ops;
++
++ /* Register for napi ...We are registering NAPI for each grp */
++ for (i = 0; i < priv->num_grps; i++) {
++ if (priv->poll_mode == GFAR_SQ_POLLING) {
++ netif_napi_add(dev, &priv->gfargrp[i].napi_rx,
++ gfar_poll_rx_sq, GFAR_DEV_WEIGHT);
++ netif_napi_add(dev, &priv->gfargrp[i].napi_tx,
++ gfar_poll_tx_sq, 2);
++ } else {
++ netif_napi_add(dev, &priv->gfargrp[i].napi_rx,
++ gfar_poll_rx, GFAR_DEV_WEIGHT);
++ netif_napi_add(dev, &priv->gfargrp[i].napi_tx,
++ gfar_poll_tx, 2);
+ }
+ }
+
+- /* Need to reverse the bit maps as bit_map's MSB is q0
+- * but, for_each_set_bit parses from right to left, which
+- * basically reverses the queue numbers
+- */
+- for (i = 0; i< priv->num_grps; i++) {
+- priv->gfargrp[i].tx_bit_map =
+- reverse_bitmap(priv->gfargrp[i].tx_bit_map, MAX_TX_QS);
+- priv->gfargrp[i].rx_bit_map =
+- reverse_bitmap(priv->gfargrp[i].rx_bit_map, MAX_RX_QS);
++ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
++ dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
++ NETIF_F_RXCSUM;
++ dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG |
++ NETIF_F_RXCSUM | NETIF_F_HIGHDMA;
+ }
+
+- /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values,
+- * also assign queues to groups
+- */
+- for (grp_idx = 0; grp_idx < priv->num_grps; grp_idx++) {
+- priv->gfargrp[grp_idx].num_rx_queues = 0x0;
+-
+- for_each_set_bit(i, &priv->gfargrp[grp_idx].rx_bit_map,
+- priv->num_rx_queues) {
+- priv->gfargrp[grp_idx].num_rx_queues++;
+- priv->rx_queue[i]->grp = &priv->gfargrp[grp_idx];
+- rstat = rstat | (RSTAT_CLEAR_RHALT >> i);
+- rqueue = rqueue | ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
+- }
+- priv->gfargrp[grp_idx].num_tx_queues = 0x0;
+-
+- for_each_set_bit(i, &priv->gfargrp[grp_idx].tx_bit_map,
+- priv->num_tx_queues) {
+- priv->gfargrp[grp_idx].num_tx_queues++;
+- priv->tx_queue[i]->grp = &priv->gfargrp[grp_idx];
+- tstat = tstat | (TSTAT_CLEAR_THALT >> i);
+- tqueue = tqueue | (TQUEUE_EN0 >> i);
+- }
+- priv->gfargrp[grp_idx].rstat = rstat;
+- priv->gfargrp[grp_idx].tstat = tstat;
+- rstat = tstat =0;
++ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
++ dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX |
++ NETIF_F_HW_VLAN_CTAG_RX;
++ dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
+ }
+
+- gfar_write(&regs->rqueue, rqueue);
+- gfar_write(&regs->tqueue, tqueue);
++ gfar_init_addr_hash_table(priv);
++
++ /* Insert receive time stamps into padding alignment bytes */
++ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
++ priv->padding = 8;
++
++ if (dev->features & NETIF_F_IP_CSUM ||
++ priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
++ dev->needed_headroom = GMAC_FCB_LEN;
+
+ priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE;
+
+@@ -1220,6 +1380,10 @@
+ if (priv->num_tx_queues == 1)
+ priv->prio_sched_en = 1;
+
++ set_bit(GFAR_DOWN, &priv->state);
++
++ gfar_hw_init(priv);
++
+ /* Carrier starts down, phylib will bring it up */
+ netif_carrier_off(dev);
+
+@@ -1251,9 +1415,6 @@
+ /* Initialize the filer table */
+ gfar_init_filer_table(priv);
+
+- /* Create all the sysfs files */
+- gfar_init_sysfs(dev);
+-
+ /* Print out the device info */
+ netdev_info(dev, "mac: %pM\n", dev->dev_addr);
+
+@@ -1272,8 +1433,8 @@
+
+ register_fail:
+ unmap_group_regs(priv);
+- free_tx_pointers(priv);
+- free_rx_pointers(priv);
++ gfar_free_rx_queues(priv);
++ gfar_free_tx_queues(priv);
+ if (priv->phy_node)
+ of_node_put(priv->phy_node);
+ if (priv->tbi_node)
+@@ -1293,6 +1454,8 @@
+
+ unregister_netdev(priv->ndev);
+ unmap_group_regs(priv);
++ gfar_free_rx_queues(priv);
++ gfar_free_tx_queues(priv);
+ free_gfar_dev(priv);
+
+ return 0;
+@@ -1318,9 +1481,8 @@
+
+ local_irq_save(flags);
+ lock_tx_qs(priv);
+- lock_rx_qs(priv);
+
+- gfar_halt_nodisable(ndev);
++ gfar_halt_nodisable(priv);
+
+ /* Disable Tx, and Rx if wake-on-LAN is disabled. */
+ tempval = gfar_read(&regs->maccfg1);
+@@ -1332,7 +1494,6 @@
+
+ gfar_write(&regs->maccfg1, tempval);
+
+- unlock_rx_qs(priv);
+ unlock_tx_qs(priv);
+ local_irq_restore(flags);
+
+@@ -1378,15 +1539,13 @@
+ */
+ local_irq_save(flags);
+ lock_tx_qs(priv);
+- lock_rx_qs(priv);
+
+ tempval = gfar_read(&regs->maccfg2);
+ tempval &= ~MACCFG2_MPEN;
+ gfar_write(&regs->maccfg2, tempval);
+
+- gfar_start(ndev);
++ gfar_start(priv);
+
+- unlock_rx_qs(priv);
+ unlock_tx_qs(priv);
+ local_irq_restore(flags);
+
+@@ -1413,10 +1572,11 @@
+ return -ENOMEM;
+ }
+
+- init_registers(ndev);
+- gfar_set_mac_address(ndev);
+- gfar_init_mac(ndev);
+- gfar_start(ndev);
++ gfar_mac_reset(priv);
++
++ gfar_init_tx_rx_base(priv);
++
++ gfar_start(priv);
+
+ priv->oldlink = 0;
+ priv->oldspeed = 0;
+@@ -1511,9 +1671,6 @@
+
+ priv->phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0,
+ interface);
+- if (!priv->phydev)
+- priv->phydev = of_phy_connect_fixed_link(dev, &adjust_link,
+- interface);
+ if (!priv->phydev) {
+ dev_err(&dev->dev, "could not attach to PHY\n");
+ return -ENODEV;
+@@ -1574,57 +1731,6 @@
+ BMCR_SPEED1000);
+ }
+
+-static void init_registers(struct net_device *dev)
+-{
+- struct gfar_private *priv = netdev_priv(dev);
+- struct gfar __iomem *regs = NULL;
+- int i;
+-
+- for (i = 0; i < priv->num_grps; i++) {
+- regs = priv->gfargrp[i].regs;
+- /* Clear IEVENT */
+- gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
+-
+- /* Initialize IMASK */
+- gfar_write(&regs->imask, IMASK_INIT_CLEAR);
+- }
+-
+- regs = priv->gfargrp[0].regs;
+- /* Init hash registers to zero */
+- gfar_write(&regs->igaddr0, 0);
+- gfar_write(&regs->igaddr1, 0);
+- gfar_write(&regs->igaddr2, 0);
+- gfar_write(&regs->igaddr3, 0);
+- gfar_write(&regs->igaddr4, 0);
+- gfar_write(&regs->igaddr5, 0);
+- gfar_write(&regs->igaddr6, 0);
+- gfar_write(&regs->igaddr7, 0);
+-
+- gfar_write(&regs->gaddr0, 0);
+- gfar_write(&regs->gaddr1, 0);
+- gfar_write(&regs->gaddr2, 0);
+- gfar_write(&regs->gaddr3, 0);
+- gfar_write(&regs->gaddr4, 0);
+- gfar_write(&regs->gaddr5, 0);
+- gfar_write(&regs->gaddr6, 0);
+- gfar_write(&regs->gaddr7, 0);
+-
+- /* Zero out the rmon mib registers if it has them */
+- if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
+- memset_io(&(regs->rmon), 0, sizeof (struct rmon_mib));
+-
+- /* Mask off the CAM interrupts */
+- gfar_write(&regs->rmon.cam1, 0xffffffff);
+- gfar_write(&regs->rmon.cam2, 0xffffffff);
+- }
+-
+- /* Initialize the max receive buffer length */
+- gfar_write(&regs->mrblr, priv->rx_buffer_size);
+-
+- /* Initialize the Minimum Frame Length Register */
+- gfar_write(&regs->minflr, MINFLR_INIT_SETTINGS);
+-}
+-
+ static int __gfar_is_rx_idle(struct gfar_private *priv)
+ {
+ u32 res;
+@@ -1648,23 +1754,13 @@
+ }
+
+ /* Halt the receive and transmit queues */
+-static void gfar_halt_nodisable(struct net_device *dev)
++static void gfar_halt_nodisable(struct gfar_private *priv)
+ {
+- struct gfar_private *priv = netdev_priv(dev);
+- struct gfar __iomem *regs = NULL;
++ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+ u32 tempval;
+- int i;
+-
+- for (i = 0; i < priv->num_grps; i++) {
+- regs = priv->gfargrp[i].regs;
+- /* Mask all interrupts */
+- gfar_write(&regs->imask, IMASK_INIT_CLEAR);
+
+- /* Clear all interrupts */
+- gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
+- }
++ gfar_ints_disable(priv);
+
+- regs = priv->gfargrp[0].regs;
+ /* Stop the DMA, and wait for it to stop */
+ tempval = gfar_read(&regs->dmactrl);
+ if ((tempval & (DMACTRL_GRS | DMACTRL_GTS)) !=
+@@ -1685,56 +1781,41 @@
+ }
+
+ /* Halt the receive and transmit queues */
+-void gfar_halt(struct net_device *dev)
++void gfar_halt(struct gfar_private *priv)
+ {
+- struct gfar_private *priv = netdev_priv(dev);
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+ u32 tempval;
+
+- gfar_halt_nodisable(dev);
++ /* Dissable the Rx/Tx hw queues */
++ gfar_write(&regs->rqueue, 0);
++ gfar_write(&regs->tqueue, 0);
+
+- /* Disable Rx and Tx */
++ mdelay(10);
++
++ gfar_halt_nodisable(priv);
++
++ /* Disable Rx/Tx DMA */
+ tempval = gfar_read(&regs->maccfg1);
+ tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
+ gfar_write(&regs->maccfg1, tempval);
+ }
+
+-static void free_grp_irqs(struct gfar_priv_grp *grp)
+-{
+- free_irq(gfar_irq(grp, TX)->irq, grp);
+- free_irq(gfar_irq(grp, RX)->irq, grp);
+- free_irq(gfar_irq(grp, ER)->irq, grp);
+-}
+-
+ void stop_gfar(struct net_device *dev)
+ {
+ struct gfar_private *priv = netdev_priv(dev);
+- unsigned long flags;
+- int i;
+-
+- phy_stop(priv->phydev);
+
++ netif_tx_stop_all_queues(dev);
+
+- /* Lock it down */
+- local_irq_save(flags);
+- lock_tx_qs(priv);
+- lock_rx_qs(priv);
++ smp_mb__before_clear_bit();
++ set_bit(GFAR_DOWN, &priv->state);
++ smp_mb__after_clear_bit();
+
+- gfar_halt(dev);
++ disable_napi(priv);
+
+- unlock_rx_qs(priv);
+- unlock_tx_qs(priv);
+- local_irq_restore(flags);
++ /* disable ints and gracefully shut down Rx/Tx DMA */
++ gfar_halt(priv);
+
+- /* Free the IRQs */
+- if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
+- for (i = 0; i < priv->num_grps; i++)
+- free_grp_irqs(&priv->gfargrp[i]);
+- } else {
+- for (i = 0; i < priv->num_grps; i++)
+- free_irq(gfar_irq(&priv->gfargrp[i], TX)->irq,
+- &priv->gfargrp[i]);
+- }
++ phy_stop(priv->phydev);
+
+ free_skb_resources(priv);
+ }
+@@ -1825,17 +1906,15 @@
+ priv->tx_queue[0]->tx_bd_dma_base);
+ }
+
+-void gfar_start(struct net_device *dev)
++void gfar_start(struct gfar_private *priv)
+ {
+- struct gfar_private *priv = netdev_priv(dev);
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+ u32 tempval;
+ int i = 0;
+
+- /* Enable Rx and Tx in MACCFG1 */
+- tempval = gfar_read(&regs->maccfg1);
+- tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN);
+- gfar_write(&regs->maccfg1, tempval);
++ /* Enable Rx/Tx hw queues */
++ gfar_write(&regs->rqueue, priv->rqueue);
++ gfar_write(&regs->tqueue, priv->tqueue);
+
+ /* Initialize DMACTRL to have WWR and WOP */
+ tempval = gfar_read(&regs->dmactrl);
+@@ -1852,52 +1931,23 @@
+ /* Clear THLT/RHLT, so that the DMA starts polling now */
+ gfar_write(&regs->tstat, priv->gfargrp[i].tstat);
+ gfar_write(&regs->rstat, priv->gfargrp[i].rstat);
+- /* Unmask the interrupts we look for */
+- gfar_write(&regs->imask, IMASK_DEFAULT);
+ }
+
+- dev->trans_start = jiffies; /* prevent tx timeout */
+-}
+-
+-static void gfar_configure_coalescing(struct gfar_private *priv,
+- unsigned long tx_mask, unsigned long rx_mask)
+-{
+- struct gfar __iomem *regs = priv->gfargrp[0].regs;
+- u32 __iomem *baddr;
++ /* Enable Rx/Tx DMA */
++ tempval = gfar_read(&regs->maccfg1);
++ tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN);
++ gfar_write(&regs->maccfg1, tempval);
+
+- if (priv->mode == MQ_MG_MODE) {
+- int i = 0;
++ gfar_ints_enable(priv);
+
+- baddr = &regs->txic0;
+- for_each_set_bit(i, &tx_mask, priv->num_tx_queues) {
+- gfar_write(baddr + i, 0);
+- if (likely(priv->tx_queue[i]->txcoalescing))
+- gfar_write(baddr + i, priv->tx_queue[i]->txic);
+- }
+-
+- baddr = &regs->rxic0;
+- for_each_set_bit(i, &rx_mask, priv->num_rx_queues) {
+- gfar_write(baddr + i, 0);
+- if (likely(priv->rx_queue[i]->rxcoalescing))
+- gfar_write(baddr + i, priv->rx_queue[i]->rxic);
+- }
+- } else {
+- /* Backward compatible case -- even if we enable
+- * multiple queues, there's only single reg to program
+- */
+- gfar_write(&regs->txic, 0);
+- if (likely(priv->tx_queue[0]->txcoalescing))
+- gfar_write(&regs->txic, priv->tx_queue[0]->txic);
+-
+- gfar_write(&regs->rxic, 0);
+- if (unlikely(priv->rx_queue[0]->rxcoalescing))
+- gfar_write(&regs->rxic, priv->rx_queue[0]->rxic);
+- }
++ priv->ndev->trans_start = jiffies; /* prevent tx timeout */
+ }
+
+-void gfar_configure_coalescing_all(struct gfar_private *priv)
++static void free_grp_irqs(struct gfar_priv_grp *grp)
+ {
+- gfar_configure_coalescing(priv, 0xFF, 0xFF);
++ free_irq(gfar_irq(grp, TX)->irq, grp);
++ free_irq(gfar_irq(grp, RX)->irq, grp);
++ free_irq(gfar_irq(grp, ER)->irq, grp);
+ }
+
+ static int register_grp_irqs(struct gfar_priv_grp *grp)
+@@ -1956,46 +2006,65 @@
+
+ }
+
+-/* Bring the controller up and running */
+-int startup_gfar(struct net_device *ndev)
++static void gfar_free_irq(struct gfar_private *priv)
+ {
+- struct gfar_private *priv = netdev_priv(ndev);
+- struct gfar __iomem *regs = NULL;
+- int err, i, j;
++ int i;
+
+- for (i = 0; i < priv->num_grps; i++) {
+- regs= priv->gfargrp[i].regs;
+- gfar_write(&regs->imask, IMASK_INIT_CLEAR);
++ /* Free the IRQs */
++ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
++ for (i = 0; i < priv->num_grps; i++)
++ free_grp_irqs(&priv->gfargrp[i]);
++ } else {
++ for (i = 0; i < priv->num_grps; i++)
++ free_irq(gfar_irq(&priv->gfargrp[i], TX)->irq,
++ &priv->gfargrp[i]);
+ }
++}
+
+- regs= priv->gfargrp[0].regs;
+- err = gfar_alloc_skb_resources(ndev);
+- if (err)
+- return err;
+-
+- gfar_init_mac(ndev);
++static int gfar_request_irq(struct gfar_private *priv)
++{
++ int err, i, j;
+
+ for (i = 0; i < priv->num_grps; i++) {
+ err = register_grp_irqs(&priv->gfargrp[i]);
+ if (err) {
+ for (j = 0; j < i; j++)
+ free_grp_irqs(&priv->gfargrp[j]);
+- goto irq_fail;
++ return err;
+ }
+ }
+
+- /* Start the controller */
+- gfar_start(ndev);
++ return 0;
++}
++
++/* Bring the controller up and running */
++int startup_gfar(struct net_device *ndev)
++{
++ struct gfar_private *priv = netdev_priv(ndev);
++ int err;
++
++ gfar_mac_reset(priv);
++
++ err = gfar_alloc_skb_resources(ndev);
++ if (err)
++ return err;
++
++ gfar_init_tx_rx_base(priv);
++
++ smp_mb__before_clear_bit();
++ clear_bit(GFAR_DOWN, &priv->state);
++ smp_mb__after_clear_bit();
++
++ /* Start Rx/Tx DMA and enable the interrupts */
++ gfar_start(priv);
+
+ phy_start(priv->phydev);
+
+- gfar_configure_coalescing_all(priv);
++ enable_napi(priv);
+
+- return 0;
++ netif_tx_wake_all_queues(ndev);
+
+-irq_fail:
+- free_skb_resources(priv);
+- return err;
++ return 0;
+ }
+
+ /* Called when something needs to use the ethernet device
+@@ -2006,27 +2075,17 @@
+ struct gfar_private *priv = netdev_priv(dev);
+ int err;
+
+- enable_napi(priv);
+-
+- /* Initialize a bunch of registers */
+- init_registers(dev);
+-
+- gfar_set_mac_address(dev);
+-
+ err = init_phy(dev);
++ if (err)
++ return err;
+
+- if (err) {
+- disable_napi(priv);
++ err = gfar_request_irq(priv);
++ if (err)
+ return err;
+- }
+
+ err = startup_gfar(dev);
+- if (err) {
+- disable_napi(priv);
++ if (err)
+ return err;
+- }
+-
+- netif_tx_start_all_queues(dev);
+
+ device_set_wakeup_enable(&dev->dev, priv->wol_en);
+
+@@ -2152,13 +2211,13 @@
+ skb_new = skb_realloc_headroom(skb, fcb_len);
+ if (!skb_new) {
+ dev->stats.tx_errors++;
+- kfree_skb(skb);
++ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+ if (skb->sk)
+ skb_set_owner_w(skb_new, skb->sk);
+- consume_skb(skb);
++ dev_consume_skb_any(skb);
+ skb = skb_new;
+ }
+
+@@ -2351,8 +2410,6 @@
+ {
+ struct gfar_private *priv = netdev_priv(dev);
+
+- disable_napi(priv);
+-
+ cancel_work_sync(&priv->reset_task);
+ stop_gfar(dev);
+
+@@ -2360,7 +2417,7 @@
+ phy_disconnect(priv->phydev);
+ priv->phydev = NULL;
+
+- netif_tx_stop_all_queues(dev);
++ gfar_free_irq(priv);
+
+ return 0;
+ }
+@@ -2373,77 +2430,9 @@
+ return 0;
+ }
+
+-/* Check if rx parser should be activated */
+-void gfar_check_rx_parser_mode(struct gfar_private *priv)
+-{
+- struct gfar __iomem *regs;
+- u32 tempval;
+-
+- regs = priv->gfargrp[0].regs;
+-
+- tempval = gfar_read(&regs->rctrl);
+- /* If parse is no longer required, then disable parser */
+- if (tempval & RCTRL_REQ_PARSER) {
+- tempval |= RCTRL_PRSDEP_INIT;
+- priv->uses_rxfcb = 1;
+- } else {
+- tempval &= ~RCTRL_PRSDEP_INIT;
+- priv->uses_rxfcb = 0;
+- }
+- gfar_write(&regs->rctrl, tempval);
+-}
+-
+-/* Enables and disables VLAN insertion/extraction */
+-void gfar_vlan_mode(struct net_device *dev, netdev_features_t features)
+-{
+- struct gfar_private *priv = netdev_priv(dev);
+- struct gfar __iomem *regs = NULL;
+- unsigned long flags;
+- u32 tempval;
+-
+- regs = priv->gfargrp[0].regs;
+- local_irq_save(flags);
+- lock_rx_qs(priv);
+-
+- if (features & NETIF_F_HW_VLAN_CTAG_TX) {
+- /* Enable VLAN tag insertion */
+- tempval = gfar_read(&regs->tctrl);
+- tempval |= TCTRL_VLINS;
+- gfar_write(&regs->tctrl, tempval);
+- } else {
+- /* Disable VLAN tag insertion */
+- tempval = gfar_read(&regs->tctrl);
+- tempval &= ~TCTRL_VLINS;
+- gfar_write(&regs->tctrl, tempval);
+- }
+-
+- if (features & NETIF_F_HW_VLAN_CTAG_RX) {
+- /* Enable VLAN tag extraction */
+- tempval = gfar_read(&regs->rctrl);
+- tempval |= (RCTRL_VLEX | RCTRL_PRSDEP_INIT);
+- gfar_write(&regs->rctrl, tempval);
+- priv->uses_rxfcb = 1;
+- } else {
+- /* Disable VLAN tag extraction */
+- tempval = gfar_read(&regs->rctrl);
+- tempval &= ~RCTRL_VLEX;
+- gfar_write(&regs->rctrl, tempval);
+-
+- gfar_check_rx_parser_mode(priv);
+- }
+-
+- gfar_change_mtu(dev, dev->mtu);
+-
+- unlock_rx_qs(priv);
+- local_irq_restore(flags);
+-}
+-
+ static int gfar_change_mtu(struct net_device *dev, int new_mtu)
+ {
+- int tempsize, tempval;
+ struct gfar_private *priv = netdev_priv(dev);
+- struct gfar __iomem *regs = priv->gfargrp[0].regs;
+- int oldsize = priv->rx_buffer_size;
+ int frame_size = new_mtu + ETH_HLEN;
+
+ if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) {
+@@ -2451,45 +2440,33 @@
+ return -EINVAL;
+ }
+
+- if (priv->uses_rxfcb)
+- frame_size += GMAC_FCB_LEN;
+-
+- frame_size += priv->padding;
+-
+- tempsize = (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) +
+- INCREMENTAL_BUFFER_SIZE;
++ while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
++ cpu_relax();
+
+- /* Only stop and start the controller if it isn't already
+- * stopped, and we changed something
+- */
+- if ((oldsize != tempsize) && (dev->flags & IFF_UP))
++ if (dev->flags & IFF_UP)
+ stop_gfar(dev);
+
+- priv->rx_buffer_size = tempsize;
+-
+ dev->mtu = new_mtu;
+
+- gfar_write(&regs->mrblr, priv->rx_buffer_size);
+- gfar_write(&regs->maxfrm, priv->rx_buffer_size);
++ if (dev->flags & IFF_UP)
++ startup_gfar(dev);
+
+- /* If the mtu is larger than the max size for standard
+- * ethernet frames (ie, a jumbo frame), then set maccfg2
+- * to allow huge frames, and to check the length
+- */
+- tempval = gfar_read(&regs->maccfg2);
++ clear_bit_unlock(GFAR_RESETTING, &priv->state);
+
+- if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE ||
+- gfar_has_errata(priv, GFAR_ERRATA_74))
+- tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
+- else
+- tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
++ return 0;
++}
+
+- gfar_write(&regs->maccfg2, tempval);
++void reset_gfar(struct net_device *ndev)
++{
++ struct gfar_private *priv = netdev_priv(ndev);
+
+- if ((oldsize != tempsize) && (dev->flags & IFF_UP))
+- startup_gfar(dev);
++ while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
++ cpu_relax();
+
+- return 0;
++ stop_gfar(ndev);
++ startup_gfar(ndev);
++
++ clear_bit_unlock(GFAR_RESETTING, &priv->state);
+ }
+
+ /* gfar_reset_task gets scheduled when a packet has not been
+@@ -2501,16 +2478,7 @@
+ {
+ struct gfar_private *priv = container_of(work, struct gfar_private,
+ reset_task);
+- struct net_device *dev = priv->ndev;
+-
+- if (dev->flags & IFF_UP) {
+- netif_tx_stop_all_queues(dev);
+- stop_gfar(dev);
+- startup_gfar(dev);
+- netif_tx_start_all_queues(dev);
+- }
+-
+- netif_tx_schedule_all(dev);
++ reset_gfar(priv->ndev);
+ }
+
+ static void gfar_timeout(struct net_device *dev)
+@@ -2623,8 +2591,10 @@
+ }
+
+ /* If we freed a buffer, we can restart transmission, if necessary */
+- if (netif_tx_queue_stopped(txq) && tx_queue->num_txbdfree)
+- netif_wake_subqueue(dev, tqi);
++ if (tx_queue->num_txbdfree &&
++ netif_tx_queue_stopped(txq) &&
++ !(test_bit(GFAR_DOWN, &priv->state)))
++ netif_wake_subqueue(priv->ndev, tqi);
+
+ /* Update dirty indicators */
+ tx_queue->skb_dirtytx = skb_dirtytx;
+@@ -2633,31 +2603,6 @@
+ netdev_tx_completed_queue(txq, howmany, bytes_sent);
+ }
+
+-static void gfar_schedule_cleanup(struct gfar_priv_grp *gfargrp)
+-{
+- unsigned long flags;
+-
+- spin_lock_irqsave(&gfargrp->grplock, flags);
+- if (napi_schedule_prep(&gfargrp->napi)) {
+- gfar_write(&gfargrp->regs->imask, IMASK_RTX_DISABLED);
+- __napi_schedule(&gfargrp->napi);
+- } else {
+- /* Clear IEVENT, so interrupts aren't called again
+- * because of the packets that have already arrived.
+- */
+- gfar_write(&gfargrp->regs->ievent, IEVENT_RTX_MASK);
+- }
+- spin_unlock_irqrestore(&gfargrp->grplock, flags);
+-
+-}
+-
+-/* Interrupt Handler for Transmit complete */
+-static irqreturn_t gfar_transmit(int irq, void *grp_id)
+-{
+- gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id);
+- return IRQ_HANDLED;
+-}
+-
+ static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
+ struct sk_buff *skb)
+ {
+@@ -2728,7 +2673,48 @@
+
+ irqreturn_t gfar_receive(int irq, void *grp_id)
+ {
+- gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id);
++ struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id;
++ unsigned long flags;
++ u32 imask;
++
++ if (likely(napi_schedule_prep(&grp->napi_rx))) {
++ spin_lock_irqsave(&grp->grplock, flags);
++ imask = gfar_read(&grp->regs->imask);
++ imask &= IMASK_RX_DISABLED;
++ gfar_write(&grp->regs->imask, imask);
++ spin_unlock_irqrestore(&grp->grplock, flags);
++ __napi_schedule(&grp->napi_rx);
++ } else {
++ /* Clear IEVENT, so interrupts aren't called again
++ * because of the packets that have already arrived.
++ */
++ gfar_write(&grp->regs->ievent, IEVENT_RX_MASK);
++ }
++
++ return IRQ_HANDLED;
++}
++
++/* Interrupt Handler for Transmit complete */
++static irqreturn_t gfar_transmit(int irq, void *grp_id)
++{
++ struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id;
++ unsigned long flags;
++ u32 imask;
++
++ if (likely(napi_schedule_prep(&grp->napi_tx))) {
++ spin_lock_irqsave(&grp->grplock, flags);
++ imask = gfar_read(&grp->regs->imask);
++ imask &= IMASK_TX_DISABLED;
++ gfar_write(&grp->regs->imask, imask);
++ spin_unlock_irqrestore(&grp->grplock, flags);
++ __napi_schedule(&grp->napi_tx);
++ } else {
++ /* Clear IEVENT, so interrupts aren't called again
++ * because of the packets that have already arrived.
++ */
++ gfar_write(&grp->regs->ievent, IEVENT_TX_MASK);
++ }
++
+ return IRQ_HANDLED;
+ }
+
+@@ -2852,7 +2838,7 @@
+ rx_queue->stats.rx_bytes += pkt_len;
+ skb_record_rx_queue(skb, rx_queue->qindex);
+ gfar_process_frame(dev, skb, amount_pull,
+- &rx_queue->grp->napi);
++ &rx_queue->grp->napi_rx);
+
+ } else {
+ netif_warn(priv, rx_err, dev, "Missing skb!\n");
+@@ -2881,66 +2867,81 @@
+ return howmany;
+ }
+
+-static int gfar_poll_sq(struct napi_struct *napi, int budget)
++static int gfar_poll_rx_sq(struct napi_struct *napi, int budget)
+ {
+ struct gfar_priv_grp *gfargrp =
+- container_of(napi, struct gfar_priv_grp, napi);
++ container_of(napi, struct gfar_priv_grp, napi_rx);
+ struct gfar __iomem *regs = gfargrp->regs;
+- struct gfar_priv_tx_q *tx_queue = gfargrp->priv->tx_queue[0];
+- struct gfar_priv_rx_q *rx_queue = gfargrp->priv->rx_queue[0];
++ struct gfar_priv_rx_q *rx_queue = gfargrp->rx_queue;
+ int work_done = 0;
+
+ /* Clear IEVENT, so interrupts aren't called again
+ * because of the packets that have already arrived
+ */
+- gfar_write(&regs->ievent, IEVENT_RTX_MASK);
+-
+- /* run Tx cleanup to completion */
+- if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx])
+- gfar_clean_tx_ring(tx_queue);
++ gfar_write(&regs->ievent, IEVENT_RX_MASK);
+
+ work_done = gfar_clean_rx_ring(rx_queue, budget);
+
+ if (work_done < budget) {
++ u32 imask;
+ napi_complete(napi);
+ /* Clear the halt bit in RSTAT */
+ gfar_write(&regs->rstat, gfargrp->rstat);
+
+- gfar_write(&regs->imask, IMASK_DEFAULT);
+-
+- /* If we are coalescing interrupts, update the timer
+- * Otherwise, clear it
+- */
+- gfar_write(&regs->txic, 0);
+- if (likely(tx_queue->txcoalescing))
+- gfar_write(&regs->txic, tx_queue->txic);
+-
+- gfar_write(&regs->rxic, 0);
+- if (unlikely(rx_queue->rxcoalescing))
+- gfar_write(&regs->rxic, rx_queue->rxic);
++ spin_lock_irq(&gfargrp->grplock);
++ imask = gfar_read(&regs->imask);
++ imask |= IMASK_RX_DEFAULT;
++ gfar_write(&regs->imask, imask);
++ spin_unlock_irq(&gfargrp->grplock);
+ }
+
+ return work_done;
+ }
+
+-static int gfar_poll(struct napi_struct *napi, int budget)
++static int gfar_poll_tx_sq(struct napi_struct *napi, int budget)
++{
++ struct gfar_priv_grp *gfargrp =
++ container_of(napi, struct gfar_priv_grp, napi_tx);
++ struct gfar __iomem *regs = gfargrp->regs;
++ struct gfar_priv_tx_q *tx_queue = gfargrp->tx_queue;
++ u32 imask;
++
++ /* Clear IEVENT, so interrupts aren't called again
++ * because of the packets that have already arrived
++ */
++ gfar_write(&regs->ievent, IEVENT_TX_MASK);
++
++ /* run Tx cleanup to completion */
++ if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx])
++ gfar_clean_tx_ring(tx_queue);
++
++ napi_complete(napi);
++
++ spin_lock_irq(&gfargrp->grplock);
++ imask = gfar_read(&regs->imask);
++ imask |= IMASK_TX_DEFAULT;
++ gfar_write(&regs->imask, imask);
++ spin_unlock_irq(&gfargrp->grplock);
++
++ return 0;
++}
++
++static int gfar_poll_rx(struct napi_struct *napi, int budget)
+ {
+ struct gfar_priv_grp *gfargrp =
+- container_of(napi, struct gfar_priv_grp, napi);
++ container_of(napi, struct gfar_priv_grp, napi_rx);
+ struct gfar_private *priv = gfargrp->priv;
+ struct gfar __iomem *regs = gfargrp->regs;
+- struct gfar_priv_tx_q *tx_queue = NULL;
+ struct gfar_priv_rx_q *rx_queue = NULL;
+ int work_done = 0, work_done_per_q = 0;
+ int i, budget_per_q = 0;
+- int has_tx_work = 0;
+ unsigned long rstat_rxf;
+ int num_act_queues;
+
+ /* Clear IEVENT, so interrupts aren't called again
+ * because of the packets that have already arrived
+ */
+- gfar_write(&regs->ievent, IEVENT_RTX_MASK);
++ gfar_write(&regs->ievent, IEVENT_RX_MASK);
+
+ rstat_rxf = gfar_read(&regs->rstat) & RSTAT_RXF_MASK;
+
+@@ -2948,15 +2949,6 @@
+ if (num_act_queues)
+ budget_per_q = budget/num_act_queues;
+
+- for_each_set_bit(i, &gfargrp->tx_bit_map, priv->num_tx_queues) {
+- tx_queue = priv->tx_queue[i];
+- /* run Tx cleanup to completion */
+- if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) {
+- gfar_clean_tx_ring(tx_queue);
+- has_tx_work = 1;
+- }
+- }
+-
+ for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) {
+ /* skip queue if not active */
+ if (!(rstat_rxf & (RSTAT_CLEAR_RXF0 >> i)))
+@@ -2979,25 +2971,62 @@
+ }
+ }
+
+- if (!num_act_queues && !has_tx_work) {
+-
++ if (!num_act_queues) {
++ u32 imask;
+ napi_complete(napi);
+
+ /* Clear the halt bit in RSTAT */
+ gfar_write(&regs->rstat, gfargrp->rstat);
+
+- gfar_write(&regs->imask, IMASK_DEFAULT);
+-
+- /* If we are coalescing interrupts, update the timer
+- * Otherwise, clear it
+- */
+- gfar_configure_coalescing(priv, gfargrp->rx_bit_map,
+- gfargrp->tx_bit_map);
++ spin_lock_irq(&gfargrp->grplock);
++ imask = gfar_read(&regs->imask);
++ imask |= IMASK_RX_DEFAULT;
++ gfar_write(&regs->imask, imask);
++ spin_unlock_irq(&gfargrp->grplock);
+ }
+
+ return work_done;
+ }
+
++static int gfar_poll_tx(struct napi_struct *napi, int budget)
++{
++ struct gfar_priv_grp *gfargrp =
++ container_of(napi, struct gfar_priv_grp, napi_tx);
++ struct gfar_private *priv = gfargrp->priv;
++ struct gfar __iomem *regs = gfargrp->regs;
++ struct gfar_priv_tx_q *tx_queue = NULL;
++ int has_tx_work = 0;
++ int i;
++
++ /* Clear IEVENT, so interrupts aren't called again
++ * because of the packets that have already arrived
++ */
++ gfar_write(&regs->ievent, IEVENT_TX_MASK);
++
++ for_each_set_bit(i, &gfargrp->tx_bit_map, priv->num_tx_queues) {
++ tx_queue = priv->tx_queue[i];
++ /* run Tx cleanup to completion */
++ if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) {
++ gfar_clean_tx_ring(tx_queue);
++ has_tx_work = 1;
++ }
++ }
++
++ if (!has_tx_work) {
++ u32 imask;
++ napi_complete(napi);
++
++ spin_lock_irq(&gfargrp->grplock);
++ imask = gfar_read(&regs->imask);
++ imask |= IMASK_TX_DEFAULT;
++ gfar_write(&regs->imask, imask);
++ spin_unlock_irq(&gfargrp->grplock);
++ }
++
++ return 0;
++}
++
++
+ #ifdef CONFIG_NET_POLL_CONTROLLER
+ /* Polling 'interrupt' - used by things like netconsole to send skbs
+ * without having to re-enable interrupts. It's not called while
+@@ -3056,41 +3085,6 @@
+ return IRQ_HANDLED;
+ }
+
+-static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv)
+-{
+- struct phy_device *phydev = priv->phydev;
+- u32 val = 0;
+-
+- if (!phydev->duplex)
+- return val;
+-
+- if (!priv->pause_aneg_en) {
+- if (priv->tx_pause_en)
+- val |= MACCFG1_TX_FLOW;
+- if (priv->rx_pause_en)
+- val |= MACCFG1_RX_FLOW;
+- } else {
+- u16 lcl_adv, rmt_adv;
+- u8 flowctrl;
+- /* get link partner capabilities */
+- rmt_adv = 0;
+- if (phydev->pause)
+- rmt_adv = LPA_PAUSE_CAP;
+- if (phydev->asym_pause)
+- rmt_adv |= LPA_PAUSE_ASYM;
+-
+- lcl_adv = mii_advertise_flowctrl(phydev->advertising);
+-
+- flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
+- if (flowctrl & FLOW_CTRL_TX)
+- val |= MACCFG1_TX_FLOW;
+- if (flowctrl & FLOW_CTRL_RX)
+- val |= MACCFG1_RX_FLOW;
+- }
+-
+- return val;
+-}
+-
+ /* Called every time the controller might need to be made
+ * aware of new link state. The PHY code conveys this
+ * information through variables in the phydev structure, and this
+@@ -3100,86 +3094,12 @@
+ static void adjust_link(struct net_device *dev)
+ {
+ struct gfar_private *priv = netdev_priv(dev);
+- struct gfar __iomem *regs = priv->gfargrp[0].regs;
+- unsigned long flags;
+ struct phy_device *phydev = priv->phydev;
+- int new_state = 0;
+-
+- local_irq_save(flags);
+- lock_tx_qs(priv);
+-
+- if (phydev->link) {
+- u32 tempval1 = gfar_read(&regs->maccfg1);
+- u32 tempval = gfar_read(&regs->maccfg2);
+- u32 ecntrl = gfar_read(&regs->ecntrl);
+-
+- /* Now we make sure that we can be in full duplex mode.
+- * If not, we operate in half-duplex mode.
+- */
+- if (phydev->duplex != priv->oldduplex) {
+- new_state = 1;
+- if (!(phydev->duplex))
+- tempval &= ~(MACCFG2_FULL_DUPLEX);
+- else
+- tempval |= MACCFG2_FULL_DUPLEX;
+-
+- priv->oldduplex = phydev->duplex;
+- }
+-
+- if (phydev->speed != priv->oldspeed) {
+- new_state = 1;
+- switch (phydev->speed) {
+- case 1000:
+- tempval =
+- ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
+-
+- ecntrl &= ~(ECNTRL_R100);
+- break;
+- case 100:
+- case 10:
+- tempval =
+- ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
+-
+- /* Reduced mode distinguishes
+- * between 10 and 100
+- */
+- if (phydev->speed == SPEED_100)
+- ecntrl |= ECNTRL_R100;
+- else
+- ecntrl &= ~(ECNTRL_R100);
+- break;
+- default:
+- netif_warn(priv, link, dev,
+- "Ack! Speed (%d) is not 10/100/1000!\n",
+- phydev->speed);
+- break;
+- }
+-
+- priv->oldspeed = phydev->speed;
+- }
+-
+- tempval1 &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
+- tempval1 |= gfar_get_flowctrl_cfg(priv);
+-
+- gfar_write(&regs->maccfg1, tempval1);
+- gfar_write(&regs->maccfg2, tempval);
+- gfar_write(&regs->ecntrl, ecntrl);
+-
+- if (!priv->oldlink) {
+- new_state = 1;
+- priv->oldlink = 1;
+- }
+- } else if (priv->oldlink) {
+- new_state = 1;
+- priv->oldlink = 0;
+- priv->oldspeed = 0;
+- priv->oldduplex = -1;
+- }
+
+- if (new_state && netif_msg_link(priv))
+- phy_print_status(phydev);
+- unlock_tx_qs(priv);
+- local_irq_restore(flags);
++ if (unlikely(phydev->link != priv->oldlink ||
++ phydev->duplex != priv->oldduplex ||
++ phydev->speed != priv->oldspeed))
++ gfar_update_link_state(priv);
+ }
+
+ /* Update the hash table based on the current list of multicast
+@@ -3425,6 +3345,114 @@
+ return IRQ_HANDLED;
+ }
+
++static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv)
++{
++ struct phy_device *phydev = priv->phydev;
++ u32 val = 0;
++
++ if (!phydev->duplex)
++ return val;
++
++ if (!priv->pause_aneg_en) {
++ if (priv->tx_pause_en)
++ val |= MACCFG1_TX_FLOW;
++ if (priv->rx_pause_en)
++ val |= MACCFG1_RX_FLOW;
++ } else {
++ u16 lcl_adv, rmt_adv;
++ u8 flowctrl;
++ /* get link partner capabilities */
++ rmt_adv = 0;
++ if (phydev->pause)
++ rmt_adv = LPA_PAUSE_CAP;
++ if (phydev->asym_pause)
++ rmt_adv |= LPA_PAUSE_ASYM;
++
++ lcl_adv = mii_advertise_flowctrl(phydev->advertising);
++
++ flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
++ if (flowctrl & FLOW_CTRL_TX)
++ val |= MACCFG1_TX_FLOW;
++ if (flowctrl & FLOW_CTRL_RX)
++ val |= MACCFG1_RX_FLOW;
++ }
++
++ return val;
++}
++
++static noinline void gfar_update_link_state(struct gfar_private *priv)
++{
++ struct gfar __iomem *regs = priv->gfargrp[0].regs;
++ struct phy_device *phydev = priv->phydev;
++
++ if (unlikely(test_bit(GFAR_RESETTING, &priv->state)))
++ return;
++
++ if (phydev->link) {
++ u32 tempval1 = gfar_read(&regs->maccfg1);
++ u32 tempval = gfar_read(&regs->maccfg2);
++ u32 ecntrl = gfar_read(&regs->ecntrl);
++
++ if (phydev->duplex != priv->oldduplex) {
++ if (!(phydev->duplex))
++ tempval &= ~(MACCFG2_FULL_DUPLEX);
++ else
++ tempval |= MACCFG2_FULL_DUPLEX;
++
++ priv->oldduplex = phydev->duplex;
++ }
++
++ if (phydev->speed != priv->oldspeed) {
++ switch (phydev->speed) {
++ case 1000:
++ tempval =
++ ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
++
++ ecntrl &= ~(ECNTRL_R100);
++ break;
++ case 100:
++ case 10:
++ tempval =
++ ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
++
++ /* Reduced mode distinguishes
++ * between 10 and 100
++ */
++ if (phydev->speed == SPEED_100)
++ ecntrl |= ECNTRL_R100;
++ else
++ ecntrl &= ~(ECNTRL_R100);
++ break;
++ default:
++ netif_warn(priv, link, priv->ndev,
++ "Ack! Speed (%d) is not 10/100/1000!\n",
++ phydev->speed);
++ break;
++ }
++
++ priv->oldspeed = phydev->speed;
++ }
++
++ tempval1 &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
++ tempval1 |= gfar_get_flowctrl_cfg(priv);
++
++ gfar_write(&regs->maccfg1, tempval1);
++ gfar_write(&regs->maccfg2, tempval);
++ gfar_write(&regs->ecntrl, ecntrl);
++
++ if (!priv->oldlink)
++ priv->oldlink = 1;
++
++ } else if (priv->oldlink) {
++ priv->oldlink = 0;
++ priv->oldspeed = 0;
++ priv->oldduplex = -1;
++ }
++
++ if (netif_msg_link(priv))
++ phy_print_status(phydev);
++}
++
+ static struct of_device_id gfar_match[] =
+ {
+ {
+diff -Nur linux-3.14.36/drivers/net/ethernet/freescale/gianfar_ethtool.c linux-openelec/drivers/net/ethernet/freescale/gianfar_ethtool.c
+--- linux-3.14.36/drivers/net/ethernet/freescale/gianfar_ethtool.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/net/ethernet/freescale/gianfar_ethtool.c 2015-05-06 12:05:42.000000000 -0500
+@@ -44,10 +44,6 @@
+
+ #include "gianfar.h"
+
+-extern void gfar_start(struct net_device *dev);
+-extern int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue,
+- int rx_work_limit);
+-
+ #define GFAR_MAX_COAL_USECS 0xffff
+ #define GFAR_MAX_COAL_FRAMES 0xff
+ static void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy,
+@@ -364,25 +360,11 @@
+ struct ethtool_coalesce *cvals)
+ {
+ struct gfar_private *priv = netdev_priv(dev);
+- int i = 0;
++ int i, err = 0;
+
+ if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_COALESCE))
+ return -EOPNOTSUPP;
+
+- /* Set up rx coalescing */
+- /* As of now, we will enable/disable coalescing for all
+- * queues together in case of eTSEC2, this will be modified
+- * along with the ethtool interface
+- */
+- if ((cvals->rx_coalesce_usecs == 0) ||
+- (cvals->rx_max_coalesced_frames == 0)) {
+- for (i = 0; i < priv->num_rx_queues; i++)
+- priv->rx_queue[i]->rxcoalescing = 0;
+- } else {
+- for (i = 0; i < priv->num_rx_queues; i++)
+- priv->rx_queue[i]->rxcoalescing = 1;
+- }
+-
+ if (NULL == priv->phydev)
+ return -ENODEV;
+
+@@ -399,6 +381,32 @@
+ return -EINVAL;
+ }
+
++ /* Check the bounds of the values */
++ if (cvals->tx_coalesce_usecs > GFAR_MAX_COAL_USECS) {
++ netdev_info(dev, "Coalescing is limited to %d microseconds\n",
++ GFAR_MAX_COAL_USECS);
++ return -EINVAL;
++ }
++
++ if (cvals->tx_max_coalesced_frames > GFAR_MAX_COAL_FRAMES) {
++ netdev_info(dev, "Coalescing is limited to %d frames\n",
++ GFAR_MAX_COAL_FRAMES);
++ return -EINVAL;
++ }
++
++ while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
++ cpu_relax();
++
++ /* Set up rx coalescing */
++ if ((cvals->rx_coalesce_usecs == 0) ||
++ (cvals->rx_max_coalesced_frames == 0)) {
++ for (i = 0; i < priv->num_rx_queues; i++)
++ priv->rx_queue[i]->rxcoalescing = 0;
++ } else {
++ for (i = 0; i < priv->num_rx_queues; i++)
++ priv->rx_queue[i]->rxcoalescing = 1;
++ }
++
+ for (i = 0; i < priv->num_rx_queues; i++) {
+ priv->rx_queue[i]->rxic = mk_ic_value(
+ cvals->rx_max_coalesced_frames,
+@@ -415,28 +423,22 @@
+ priv->tx_queue[i]->txcoalescing = 1;
+ }
+
+- /* Check the bounds of the values */
+- if (cvals->tx_coalesce_usecs > GFAR_MAX_COAL_USECS) {
+- netdev_info(dev, "Coalescing is limited to %d microseconds\n",
+- GFAR_MAX_COAL_USECS);
+- return -EINVAL;
+- }
+-
+- if (cvals->tx_max_coalesced_frames > GFAR_MAX_COAL_FRAMES) {
+- netdev_info(dev, "Coalescing is limited to %d frames\n",
+- GFAR_MAX_COAL_FRAMES);
+- return -EINVAL;
+- }
+-
+ for (i = 0; i < priv->num_tx_queues; i++) {
+ priv->tx_queue[i]->txic = mk_ic_value(
+ cvals->tx_max_coalesced_frames,
+ gfar_usecs2ticks(priv, cvals->tx_coalesce_usecs));
+ }
+
+- gfar_configure_coalescing_all(priv);
++ if (dev->flags & IFF_UP) {
++ stop_gfar(dev);
++ err = startup_gfar(dev);
++ } else {
++ gfar_mac_reset(priv);
++ }
++
++ clear_bit_unlock(GFAR_RESETTING, &priv->state);
+
+- return 0;
++ return err;
+ }
+
+ /* Fills in rvals with the current ring parameters. Currently,
+@@ -467,15 +469,13 @@
+ }
+
+ /* Change the current ring parameters, stopping the controller if
+- * necessary so that we don't mess things up while we're in
+- * motion. We wait for the ring to be clean before reallocating
+- * the rings.
++ * necessary so that we don't mess things up while we're in motion.
+ */
+ static int gfar_sringparam(struct net_device *dev,
+ struct ethtool_ringparam *rvals)
+ {
+ struct gfar_private *priv = netdev_priv(dev);
+- int err = 0, i = 0;
++ int err = 0, i;
+
+ if (rvals->rx_pending > GFAR_RX_MAX_RING_SIZE)
+ return -EINVAL;
+@@ -493,44 +493,25 @@
+ return -EINVAL;
+ }
+
++ while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
++ cpu_relax();
+
+- if (dev->flags & IFF_UP) {
+- unsigned long flags;
+-
+- /* Halt TX and RX, and process the frames which
+- * have already been received
+- */
+- local_irq_save(flags);
+- lock_tx_qs(priv);
+- lock_rx_qs(priv);
+-
+- gfar_halt(dev);
+-
+- unlock_rx_qs(priv);
+- unlock_tx_qs(priv);
+- local_irq_restore(flags);
+-
+- for (i = 0; i < priv->num_rx_queues; i++)
+- gfar_clean_rx_ring(priv->rx_queue[i],
+- priv->rx_queue[i]->rx_ring_size);
+-
+- /* Now we take down the rings to rebuild them */
++ if (dev->flags & IFF_UP)
+ stop_gfar(dev);
+- }
+
+- /* Change the size */
+- for (i = 0; i < priv->num_rx_queues; i++) {
++ /* Change the sizes */
++ for (i = 0; i < priv->num_rx_queues; i++)
+ priv->rx_queue[i]->rx_ring_size = rvals->rx_pending;
++
++ for (i = 0; i < priv->num_tx_queues; i++)
+ priv->tx_queue[i]->tx_ring_size = rvals->tx_pending;
+- priv->tx_queue[i]->num_txbdfree =
+- priv->tx_queue[i]->tx_ring_size;
+- }
+
+ /* Rebuild the rings with the new size */
+- if (dev->flags & IFF_UP) {
++ if (dev->flags & IFF_UP)
+ err = startup_gfar(dev);
+- netif_tx_wake_all_queues(dev);
+- }
++
++ clear_bit_unlock(GFAR_RESETTING, &priv->state);
++
+ return err;
+ }
+
+@@ -552,6 +533,9 @@
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+ u32 oldadv, newadv;
+
++ if (!phydev)
++ return -ENODEV;
++
+ if (!(phydev->supported & SUPPORTED_Pause) ||
+ (!(phydev->supported & SUPPORTED_Asym_Pause) &&
+ (epause->rx_pause != epause->tx_pause)))
+@@ -608,43 +592,29 @@
+
+ int gfar_set_features(struct net_device *dev, netdev_features_t features)
+ {
+- struct gfar_private *priv = netdev_priv(dev);
+- unsigned long flags;
+- int err = 0, i = 0;
+ netdev_features_t changed = dev->features ^ features;
++ struct gfar_private *priv = netdev_priv(dev);
++ int err = 0;
+
+- if (changed & (NETIF_F_HW_VLAN_CTAG_TX|NETIF_F_HW_VLAN_CTAG_RX))
+- gfar_vlan_mode(dev, features);
+-
+- if (!(changed & NETIF_F_RXCSUM))
++ if (!(changed & (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
++ NETIF_F_RXCSUM)))
+ return 0;
+
+- if (dev->flags & IFF_UP) {
+- /* Halt TX and RX, and process the frames which
+- * have already been received
+- */
+- local_irq_save(flags);
+- lock_tx_qs(priv);
+- lock_rx_qs(priv);
+-
+- gfar_halt(dev);
+-
+- unlock_tx_qs(priv);
+- unlock_rx_qs(priv);
+- local_irq_restore(flags);
++ while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
++ cpu_relax();
+
+- for (i = 0; i < priv->num_rx_queues; i++)
+- gfar_clean_rx_ring(priv->rx_queue[i],
+- priv->rx_queue[i]->rx_ring_size);
++ dev->features = features;
+
++ if (dev->flags & IFF_UP) {
+ /* Now we take down the rings to rebuild them */
+ stop_gfar(dev);
+-
+- dev->features = features;
+-
+ err = startup_gfar(dev);
+- netif_tx_wake_all_queues(dev);
++ } else {
++ gfar_mac_reset(priv);
+ }
++
++ clear_bit_unlock(GFAR_RESETTING, &priv->state);
++
+ return err;
+ }
+
+@@ -1610,9 +1580,6 @@
+ if (tab->index > MAX_FILER_IDX - 1)
+ return -EBUSY;
+
+- /* Avoid inconsistent filer table to be processed */
+- lock_rx_qs(priv);
+-
+ /* Fill regular entries */
+ for (; i < MAX_FILER_IDX - 1 && (tab->fe[i].ctrl | tab->fe[i].ctrl);
+ i++)
+@@ -1625,8 +1592,6 @@
+ */
+ gfar_write_filer(priv, i, 0x20, 0x0);
+
+- unlock_rx_qs(priv);
+-
+ return 0;
+ }
+
+@@ -1831,6 +1796,9 @@
+ struct gfar_private *priv = netdev_priv(dev);
+ int ret = 0;
+
++ if (test_bit(GFAR_RESETTING, &priv->state))
++ return -EBUSY;
++
+ mutex_lock(&priv->rx_queue_access);
+
+ switch (cmd->cmd) {
+diff -Nur linux-3.14.36/drivers/net/ethernet/freescale/gianfar.h linux-openelec/drivers/net/ethernet/freescale/gianfar.h
+--- linux-3.14.36/drivers/net/ethernet/freescale/gianfar.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/net/ethernet/freescale/gianfar.h 2015-05-06 12:05:42.000000000 -0500
+@@ -9,7 +9,7 @@
+ * Maintainer: Kumar Gala
+ * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
+ *
+- * Copyright 2002-2009, 2011 Freescale Semiconductor, Inc.
++ * Copyright 2002-2009, 2011-2013 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+@@ -377,8 +377,11 @@
+ IMASK_RXFEN0 | IMASK_BSY | IMASK_EBERR | IMASK_BABR | \
+ IMASK_XFUN | IMASK_RXC | IMASK_BABT | IMASK_DPE \
+ | IMASK_PERR)
+-#define IMASK_RTX_DISABLED ((~(IMASK_RXFEN0 | IMASK_TXFEN | IMASK_BSY)) \
+- & IMASK_DEFAULT)
++#define IMASK_RX_DEFAULT (IMASK_RXFEN0 | IMASK_BSY)
++#define IMASK_TX_DEFAULT (IMASK_TXFEN | IMASK_TXBEN)
++
++#define IMASK_RX_DISABLED ((~(IMASK_RX_DEFAULT)) & IMASK_DEFAULT)
++#define IMASK_TX_DISABLED ((~(IMASK_TX_DEFAULT)) & IMASK_DEFAULT)
+
+ /* Fifo management */
+ #define FIFO_TX_THR_MASK 0x01ff
+@@ -409,7 +412,9 @@
+
+ /* This default RIR value directly corresponds
+ * to the 3-bit hash value generated */
+-#define DEFAULT_RIR0 0x05397700
++#define DEFAULT_8RXQ_RIR0 0x05397700
++/* Map even hash values to Q0, and odd ones to Q1 */
++#define DEFAULT_2RXQ_RIR0 0x04104100
+
+ /* RQFCR register bits */
+ #define RQFCR_GPI 0x80000000
+@@ -880,7 +885,6 @@
+ #define FSL_GIANFAR_DEV_HAS_CSUM 0x00000010
+ #define FSL_GIANFAR_DEV_HAS_VLAN 0x00000020
+ #define FSL_GIANFAR_DEV_HAS_EXTENDED_HASH 0x00000040
+-#define FSL_GIANFAR_DEV_HAS_PADDING 0x00000080
+ #define FSL_GIANFAR_DEV_HAS_MAGIC_PACKET 0x00000100
+ #define FSL_GIANFAR_DEV_HAS_BD_STASHING 0x00000200
+ #define FSL_GIANFAR_DEV_HAS_BUF_STASHING 0x00000400
+@@ -892,8 +896,8 @@
+ #define DEFAULT_MAPPING 0xFF
+ #endif
+
+-#define ISRG_SHIFT_TX 0x10
+-#define ISRG_SHIFT_RX 0x18
++#define ISRG_RR0 0x80000000
++#define ISRG_TR0 0x00800000
+
+ /* The same driver can operate in two modes */
+ /* SQ_SG_MODE: Single Queue Single Group Mode
+@@ -905,6 +909,22 @@
+ MQ_MG_MODE
+ };
+
++/* GFAR_SQ_POLLING: Single Queue NAPI polling mode
++ * The driver supports a single pair of RX/Tx queues
++ * per interrupt group (Rx/Tx int line). MQ_MG mode
++ * devices have 2 interrupt groups, so the device will
++ * have a total of 2 Tx and 2 Rx queues in this case.
++ * GFAR_MQ_POLLING: Multi Queue NAPI polling mode
++ * The driver supports all the 8 Rx and Tx HW queues
++ * each queue mapped by the Device Tree to one of
++ * the 2 interrupt groups. This mode implies significant
++ * processing overhead (CPU and controller level).
++ */
++enum gfar_poll_mode {
++ GFAR_SQ_POLLING = 0,
++ GFAR_MQ_POLLING
++};
++
+ /*
+ * Per TX queue stats
+ */
+@@ -966,7 +986,6 @@
+
+ /**
+ * struct gfar_priv_rx_q - per rx queue structure
+- * @rxlock: per queue rx spin lock
+ * @rx_skbuff: skb pointers
+ * @skb_currx: currently use skb pointer
+ * @rx_bd_base: First rx buffer descriptor
+@@ -979,8 +998,7 @@
+ */
+
+ struct gfar_priv_rx_q {
+- spinlock_t rxlock __attribute__ ((aligned (SMP_CACHE_BYTES)));
+- struct sk_buff ** rx_skbuff;
++ struct sk_buff **rx_skbuff __aligned(SMP_CACHE_BYTES);
+ dma_addr_t rx_bd_dma_base;
+ struct rxbd8 *rx_bd_base;
+ struct rxbd8 *cur_rx;
+@@ -1016,17 +1034,20 @@
+ */
+
+ struct gfar_priv_grp {
+- spinlock_t grplock __attribute__ ((aligned (SMP_CACHE_BYTES)));
+- struct napi_struct napi;
+- struct gfar_private *priv;
++ spinlock_t grplock __aligned(SMP_CACHE_BYTES);
++ struct napi_struct napi_rx;
++ struct napi_struct napi_tx;
+ struct gfar __iomem *regs;
+- unsigned int rstat;
+- unsigned long num_rx_queues;
+- unsigned long rx_bit_map;
+- /* cacheline 3 */
++ struct gfar_priv_tx_q *tx_queue;
++ struct gfar_priv_rx_q *rx_queue;
+ unsigned int tstat;
++ unsigned int rstat;
++
++ struct gfar_private *priv;
+ unsigned long num_tx_queues;
+ unsigned long tx_bit_map;
++ unsigned long num_rx_queues;
++ unsigned long rx_bit_map;
+
+ struct gfar_irqinfo *irqinfo[GFAR_NUM_IRQS];
+ };
+@@ -1041,6 +1062,11 @@
+ GFAR_ERRATA_12 = 0x08, /* a.k.a errata eTSEC49 */
+ };
+
++enum gfar_dev_state {
++ GFAR_DOWN = 1,
++ GFAR_RESETTING
++};
++
+ /* Struct stolen almost completely (and shamelessly) from the FCC enet source
+ * (Ok, that's not so true anymore, but there is a family resemblance)
+ * The GFAR buffer descriptors track the ring buffers. The rx_bd_base
+@@ -1051,8 +1077,6 @@
+ * the buffer descriptor determines the actual condition.
+ */
+ struct gfar_private {
+- unsigned int num_rx_queues;
+-
+ struct device *dev;
+ struct net_device *ndev;
+ enum gfar_errata errata;
+@@ -1060,6 +1084,7 @@
+
+ u16 uses_rxfcb;
+ u16 padding;
++ u32 device_flags;
+
+ /* HW time stamping enabled flag */
+ int hwts_rx_en;
+@@ -1069,10 +1094,12 @@
+ struct gfar_priv_rx_q *rx_queue[MAX_RX_QS];
+ struct gfar_priv_grp gfargrp[MAXGROUPS];
+
+- u32 device_flags;
++ unsigned long state;
+
+- unsigned int mode;
++ unsigned short mode;
++ unsigned short poll_mode;
+ unsigned int num_tx_queues;
++ unsigned int num_rx_queues;
+ unsigned int num_grps;
+
+ /* Network Statistics */
+@@ -1113,6 +1140,9 @@
+ unsigned int total_tx_ring_size;
+ unsigned int total_rx_ring_size;
+
++ u32 rqueue;
++ u32 tqueue;
++
+ /* RX per device parameters */
+ unsigned int rx_stash_size;
+ unsigned int rx_stash_index;
+@@ -1127,11 +1157,6 @@
+ u32 __iomem *hash_regs[16];
+ int hash_width;
+
+- /* global parameters */
+- unsigned int fifo_threshold;
+- unsigned int fifo_starve;
+- unsigned int fifo_starve_off;
+-
+ /*Filer table*/
+ unsigned int ftp_rqfpr[MAX_FILER_IDX + 1];
+ unsigned int ftp_rqfcr[MAX_FILER_IDX + 1];
+@@ -1176,21 +1201,42 @@
+ *fpr = gfar_read(&regs->rqfpr);
+ }
+
+-void lock_rx_qs(struct gfar_private *priv);
+-void lock_tx_qs(struct gfar_private *priv);
+-void unlock_rx_qs(struct gfar_private *priv);
+-void unlock_tx_qs(struct gfar_private *priv);
++static inline void gfar_write_isrg(struct gfar_private *priv)
++{
++ struct gfar __iomem *regs = priv->gfargrp[0].regs;
++ u32 __iomem *baddr = &regs->isrg0;
++ u32 isrg = 0;
++ int grp_idx, i;
++
++ for (grp_idx = 0; grp_idx < priv->num_grps; grp_idx++) {
++ struct gfar_priv_grp *grp = &priv->gfargrp[grp_idx];
++
++ for_each_set_bit(i, &grp->rx_bit_map, priv->num_rx_queues) {
++ isrg |= (ISRG_RR0 >> i);
++ }
++
++ for_each_set_bit(i, &grp->tx_bit_map, priv->num_tx_queues) {
++ isrg |= (ISRG_TR0 >> i);
++ }
++
++ gfar_write(baddr, isrg);
++
++ baddr++;
++ isrg = 0;
++ }
++}
++
+ irqreturn_t gfar_receive(int irq, void *dev_id);
+ int startup_gfar(struct net_device *dev);
+ void stop_gfar(struct net_device *dev);
+-void gfar_halt(struct net_device *dev);
++void reset_gfar(struct net_device *dev);
++void gfar_mac_reset(struct gfar_private *priv);
++void gfar_halt(struct gfar_private *priv);
++void gfar_start(struct gfar_private *priv);
+ void gfar_phy_test(struct mii_bus *bus, struct phy_device *phydev, int enable,
+ u32 regnum, u32 read);
+ void gfar_configure_coalescing_all(struct gfar_private *priv);
+-void gfar_init_sysfs(struct net_device *dev);
+ int gfar_set_features(struct net_device *dev, netdev_features_t features);
+-void gfar_check_rx_parser_mode(struct gfar_private *priv);
+-void gfar_vlan_mode(struct net_device *dev, netdev_features_t features);
+
+ extern const struct ethtool_ops gfar_ethtool_ops;
+
+diff -Nur linux-3.14.36/drivers/net/ethernet/freescale/gianfar_ptp.c linux-openelec/drivers/net/ethernet/freescale/gianfar_ptp.c
+--- linux-3.14.36/drivers/net/ethernet/freescale/gianfar_ptp.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/net/ethernet/freescale/gianfar_ptp.c 2015-05-06 12:05:42.000000000 -0500
+@@ -414,6 +414,7 @@
+ .n_alarm = 0,
+ .n_ext_ts = N_EXT_TS,
+ .n_per_out = 0,
++ .n_pins = 0,
+ .pps = 1,
+ .adjfreq = ptp_gianfar_adjfreq,
+ .adjtime = ptp_gianfar_adjtime,
+diff -Nur linux-3.14.36/drivers/net/ethernet/freescale/gianfar_sysfs.c linux-openelec/drivers/net/ethernet/freescale/gianfar_sysfs.c
+--- linux-3.14.36/drivers/net/ethernet/freescale/gianfar_sysfs.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/net/ethernet/freescale/gianfar_sysfs.c 1969-12-31 18:00:00.000000000 -0600
+@@ -1,340 +0,0 @@
+-/*
+- * drivers/net/ethernet/freescale/gianfar_sysfs.c
+- *
+- * Gianfar Ethernet Driver
+- * This driver is designed for the non-CPM ethernet controllers
+- * on the 85xx and 83xx family of integrated processors
+- * Based on 8260_io/fcc_enet.c
+- *
+- * Author: Andy Fleming
+- * Maintainer: Kumar Gala (galak@kernel.crashing.org)
+- * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
+- *
+- * Copyright 2002-2009 Freescale Semiconductor, Inc.
+- *
+- * This program is free software; you can redistribute it and/or modify it
+- * under the terms of the GNU General Public License as published by the
+- * Free Software Foundation; either version 2 of the License, or (at your
+- * option) any later version.
+- *
+- * Sysfs file creation and management
+- */
+-
+-#include <linux/kernel.h>
+-#include <linux/string.h>
+-#include <linux/errno.h>
+-#include <linux/unistd.h>
+-#include <linux/delay.h>
+-#include <linux/etherdevice.h>
+-#include <linux/spinlock.h>
+-#include <linux/mm.h>
+-#include <linux/device.h>
+-
+-#include <asm/uaccess.h>
+-#include <linux/module.h>
+-
+-#include "gianfar.h"
+-
+-static ssize_t gfar_show_bd_stash(struct device *dev,
+- struct device_attribute *attr, char *buf)
+-{
+- struct gfar_private *priv = netdev_priv(to_net_dev(dev));
+-
+- return sprintf(buf, "%s\n", priv->bd_stash_en ? "on" : "off");
+-}
+-
+-static ssize_t gfar_set_bd_stash(struct device *dev,
+- struct device_attribute *attr,
+- const char *buf, size_t count)
+-{
+- struct gfar_private *priv = netdev_priv(to_net_dev(dev));
+- struct gfar __iomem *regs = priv->gfargrp[0].regs;
+- int new_setting = 0;
+- u32 temp;
+- unsigned long flags;
+-
+- if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_BD_STASHING))
+- return count;
+-
+-
+- /* Find out the new setting */
+- if (!strncmp("on", buf, count - 1) || !strncmp("1", buf, count - 1))
+- new_setting = 1;
+- else if (!strncmp("off", buf, count - 1) ||
+- !strncmp("0", buf, count - 1))
+- new_setting = 0;
+- else
+- return count;
+-
+-
+- local_irq_save(flags);
+- lock_rx_qs(priv);
+-
+- /* Set the new stashing value */
+- priv->bd_stash_en = new_setting;
+-
+- temp = gfar_read(&regs->attr);
+-
+- if (new_setting)
+- temp |= ATTR_BDSTASH;
+- else
+- temp &= ~(ATTR_BDSTASH);
+-
+- gfar_write(&regs->attr, temp);
+-
+- unlock_rx_qs(priv);
+- local_irq_restore(flags);
+-
+- return count;
+-}
+-
+-static DEVICE_ATTR(bd_stash, 0644, gfar_show_bd_stash, gfar_set_bd_stash);
+-
+-static ssize_t gfar_show_rx_stash_size(struct device *dev,
+- struct device_attribute *attr, char *buf)
+-{
+- struct gfar_private *priv = netdev_priv(to_net_dev(dev));
+-
+- return sprintf(buf, "%d\n", priv->rx_stash_size);
+-}
+-
+-static ssize_t gfar_set_rx_stash_size(struct device *dev,
+- struct device_attribute *attr,
+- const char *buf, size_t count)
+-{
+- struct gfar_private *priv = netdev_priv(to_net_dev(dev));
+- struct gfar __iomem *regs = priv->gfargrp[0].regs;
+- unsigned int length = simple_strtoul(buf, NULL, 0);
+- u32 temp;
+- unsigned long flags;
+-
+- if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_BUF_STASHING))
+- return count;
+-
+- local_irq_save(flags);
+- lock_rx_qs(priv);
+-
+- if (length > priv->rx_buffer_size)
+- goto out;
+-
+- if (length == priv->rx_stash_size)
+- goto out;
+-
+- priv->rx_stash_size = length;
+-
+- temp = gfar_read(&regs->attreli);
+- temp &= ~ATTRELI_EL_MASK;
+- temp |= ATTRELI_EL(length);
+- gfar_write(&regs->attreli, temp);
+-
+- /* Turn stashing on/off as appropriate */
+- temp = gfar_read(&regs->attr);
+-
+- if (length)
+- temp |= ATTR_BUFSTASH;
+- else
+- temp &= ~(ATTR_BUFSTASH);
+-
+- gfar_write(&regs->attr, temp);
+-
+-out:
+- unlock_rx_qs(priv);
+- local_irq_restore(flags);
+-
+- return count;
+-}
+-
+-static DEVICE_ATTR(rx_stash_size, 0644, gfar_show_rx_stash_size,
+- gfar_set_rx_stash_size);
+-
+-/* Stashing will only be enabled when rx_stash_size != 0 */
+-static ssize_t gfar_show_rx_stash_index(struct device *dev,
+- struct device_attribute *attr,
+- char *buf)
+-{
+- struct gfar_private *priv = netdev_priv(to_net_dev(dev));
+-
+- return sprintf(buf, "%d\n", priv->rx_stash_index);
+-}
+-
+-static ssize_t gfar_set_rx_stash_index(struct device *dev,
+- struct device_attribute *attr,
+- const char *buf, size_t count)
+-{
+- struct gfar_private *priv = netdev_priv(to_net_dev(dev));
+- struct gfar __iomem *regs = priv->gfargrp[0].regs;
+- unsigned short index = simple_strtoul(buf, NULL, 0);
+- u32 temp;
+- unsigned long flags;
+-
+- if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_BUF_STASHING))
+- return count;
+-
+- local_irq_save(flags);
+- lock_rx_qs(priv);
+-
+- if (index > priv->rx_stash_size)
+- goto out;
+-
+- if (index == priv->rx_stash_index)
+- goto out;
+-
+- priv->rx_stash_index = index;
+-
+- temp = gfar_read(&regs->attreli);
+- temp &= ~ATTRELI_EI_MASK;
+- temp |= ATTRELI_EI(index);
+- gfar_write(&regs->attreli, temp);
+-
+-out:
+- unlock_rx_qs(priv);
+- local_irq_restore(flags);
+-
+- return count;
+-}
+-
+-static DEVICE_ATTR(rx_stash_index, 0644, gfar_show_rx_stash_index,
+- gfar_set_rx_stash_index);
+-
+-static ssize_t gfar_show_fifo_threshold(struct device *dev,
+- struct device_attribute *attr,
+- char *buf)
+-{
+- struct gfar_private *priv = netdev_priv(to_net_dev(dev));
+-
+- return sprintf(buf, "%d\n", priv->fifo_threshold);
+-}
+-
+-static ssize_t gfar_set_fifo_threshold(struct device *dev,
+- struct device_attribute *attr,
+- const char *buf, size_t count)
+-{
+- struct gfar_private *priv = netdev_priv(to_net_dev(dev));
+- struct gfar __iomem *regs = priv->gfargrp[0].regs;
+- unsigned int length = simple_strtoul(buf, NULL, 0);
+- u32 temp;
+- unsigned long flags;
+-
+- if (length > GFAR_MAX_FIFO_THRESHOLD)
+- return count;
+-
+- local_irq_save(flags);
+- lock_tx_qs(priv);
+-
+- priv->fifo_threshold = length;
+-
+- temp = gfar_read(&regs->fifo_tx_thr);
+- temp &= ~FIFO_TX_THR_MASK;
+- temp |= length;
+- gfar_write(&regs->fifo_tx_thr, temp);
+-
+- unlock_tx_qs(priv);
+- local_irq_restore(flags);
+-
+- return count;
+-}
+-
+-static DEVICE_ATTR(fifo_threshold, 0644, gfar_show_fifo_threshold,
+- gfar_set_fifo_threshold);
+-
+-static ssize_t gfar_show_fifo_starve(struct device *dev,
+- struct device_attribute *attr, char *buf)
+-{
+- struct gfar_private *priv = netdev_priv(to_net_dev(dev));
+-
+- return sprintf(buf, "%d\n", priv->fifo_starve);
+-}
+-
+-static ssize_t gfar_set_fifo_starve(struct device *dev,
+- struct device_attribute *attr,
+- const char *buf, size_t count)
+-{
+- struct gfar_private *priv = netdev_priv(to_net_dev(dev));
+- struct gfar __iomem *regs = priv->gfargrp[0].regs;
+- unsigned int num = simple_strtoul(buf, NULL, 0);
+- u32 temp;
+- unsigned long flags;
+-
+- if (num > GFAR_MAX_FIFO_STARVE)
+- return count;
+-
+- local_irq_save(flags);
+- lock_tx_qs(priv);
+-
+- priv->fifo_starve = num;
+-
+- temp = gfar_read(&regs->fifo_tx_starve);
+- temp &= ~FIFO_TX_STARVE_MASK;
+- temp |= num;
+- gfar_write(&regs->fifo_tx_starve, temp);
+-
+- unlock_tx_qs(priv);
+- local_irq_restore(flags);
+-
+- return count;
+-}
+-
+-static DEVICE_ATTR(fifo_starve, 0644, gfar_show_fifo_starve,
+- gfar_set_fifo_starve);
+-
+-static ssize_t gfar_show_fifo_starve_off(struct device *dev,
+- struct device_attribute *attr,
+- char *buf)
+-{
+- struct gfar_private *priv = netdev_priv(to_net_dev(dev));
+-
+- return sprintf(buf, "%d\n", priv->fifo_starve_off);
+-}
+-
+-static ssize_t gfar_set_fifo_starve_off(struct device *dev,
+- struct device_attribute *attr,
+- const char *buf, size_t count)
+-{
+- struct gfar_private *priv = netdev_priv(to_net_dev(dev));
+- struct gfar __iomem *regs = priv->gfargrp[0].regs;
+- unsigned int num = simple_strtoul(buf, NULL, 0);
+- u32 temp;
+- unsigned long flags;
+-
+- if (num > GFAR_MAX_FIFO_STARVE_OFF)
+- return count;
+-
+- local_irq_save(flags);
+- lock_tx_qs(priv);
+-
+- priv->fifo_starve_off = num;
+-
+- temp = gfar_read(&regs->fifo_tx_starve_shutoff);
+- temp &= ~FIFO_TX_STARVE_OFF_MASK;
+- temp |= num;
+- gfar_write(&regs->fifo_tx_starve_shutoff, temp);
+-
+- unlock_tx_qs(priv);
+- local_irq_restore(flags);
+-
+- return count;
+-}
+-
+-static DEVICE_ATTR(fifo_starve_off, 0644, gfar_show_fifo_starve_off,
+- gfar_set_fifo_starve_off);
+-
+-void gfar_init_sysfs(struct net_device *dev)
+-{
+- struct gfar_private *priv = netdev_priv(dev);
+- int rc;
+-
+- /* Initialize the default values */
+- priv->fifo_threshold = DEFAULT_FIFO_TX_THR;
+- priv->fifo_starve = DEFAULT_FIFO_TX_STARVE;
+- priv->fifo_starve_off = DEFAULT_FIFO_TX_STARVE_OFF;
+-
+- /* Create our sysfs files */
+- rc = device_create_file(&dev->dev, &dev_attr_bd_stash);
+- rc |= device_create_file(&dev->dev, &dev_attr_rx_stash_size);
+- rc |= device_create_file(&dev->dev, &dev_attr_rx_stash_index);
+- rc |= device_create_file(&dev->dev, &dev_attr_fifo_threshold);
+- rc |= device_create_file(&dev->dev, &dev_attr_fifo_starve);
+- rc |= device_create_file(&dev->dev, &dev_attr_fifo_starve_off);
+- if (rc)
+- dev_err(&dev->dev, "Error creating gianfar sysfs files\n");
+-}
+diff -Nur linux-3.14.36/drivers/net/ethernet/freescale/Kconfig linux-openelec/drivers/net/ethernet/freescale/Kconfig
+--- linux-3.14.36/drivers/net/ethernet/freescale/Kconfig 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/net/ethernet/freescale/Kconfig 2015-05-06 12:05:42.000000000 -0500
+@@ -67,6 +67,7 @@
+ tristate "Freescale XGMAC MDIO"
+ depends on FSL_SOC
+ select PHYLIB
++ select OF_MDIO
+ ---help---
+ This driver supports the MDIO bus on the Fman 10G Ethernet MACs.
+
+diff -Nur linux-3.14.36/drivers/net/ethernet/freescale/Makefile linux-openelec/drivers/net/ethernet/freescale/Makefile
+--- linux-3.14.36/drivers/net/ethernet/freescale/Makefile 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/net/ethernet/freescale/Makefile 2015-05-06 12:05:42.000000000 -0500
+@@ -14,7 +14,6 @@
+ obj-$(CONFIG_GIANFAR) += gianfar_driver.o
+ obj-$(CONFIG_PTP_1588_CLOCK_GIANFAR) += gianfar_ptp.o
+ gianfar_driver-objs := gianfar.o \
+- gianfar_ethtool.o \
+- gianfar_sysfs.o
++ gianfar_ethtool.o
+ obj-$(CONFIG_UCC_GETH) += ucc_geth_driver.o
+ ucc_geth_driver-objs := ucc_geth.o ucc_geth_ethtool.o
+diff -Nur linux-3.14.36/drivers/net/ethernet/freescale/ucc_geth.c linux-openelec/drivers/net/ethernet/freescale/ucc_geth.c
+--- linux-3.14.36/drivers/net/ethernet/freescale/ucc_geth.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/net/ethernet/freescale/ucc_geth.c 2015-05-06 12:05:42.000000000 -0500
+@@ -1728,9 +1728,6 @@
+
+ phydev = of_phy_connect(dev, ug_info->phy_node, &adjust_link, 0,
+ priv->phy_interface);
+- if (!phydev)
+- phydev = of_phy_connect_fixed_link(dev, &adjust_link,
+- priv->phy_interface);
+ if (!phydev) {
+ dev_err(&dev->dev, "Could not attach to PHY\n");
+ return -ENODEV;
+@@ -3261,7 +3258,7 @@
+
+ dev->stats.tx_packets++;
+
+- dev_kfree_skb(skb);
++ dev_consume_skb_any(skb);
+
+ ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]] = NULL;
+ ugeth->skb_dirtytx[txQ] =
+@@ -3790,6 +3787,17 @@
+ ug_info->uf_info.irq = irq_of_parse_and_map(np, 0);
+
+ ug_info->phy_node = of_parse_phandle(np, "phy-handle", 0);
++ if (!ug_info->phy_node) {
++ /* In the case of a fixed PHY, the DT node associated
++ * to the PHY is the Ethernet MAC DT node.
++ */
++ if (of_phy_is_fixed_link(np)) {
++ err = of_phy_register_fixed_link(np);
++ if (err)
++ return err;
++ }
++ ug_info->phy_node = np;
++ }
+
+ /* Find the TBI PHY node. If it's not there, we don't support SGMII */
+ ug_info->tbi_node = of_parse_phandle(np, "tbi-handle", 0);
+diff -Nur linux-3.14.36/drivers/net/ethernet/freescale/xgmac_mdio.c linux-openelec/drivers/net/ethernet/freescale/xgmac_mdio.c
+--- linux-3.14.36/drivers/net/ethernet/freescale/xgmac_mdio.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/net/ethernet/freescale/xgmac_mdio.c 2015-05-06 12:05:42.000000000 -0500
+@@ -162,7 +162,9 @@
+
+ /* Return all Fs if nothing was there */
+ if (in_be32(&regs->mdio_stat) & MDIO_STAT_RD_ER) {
+- dev_err(&bus->dev, "MDIO read error\n");
++ dev_err(&bus->dev,
++ "Error while reading PHY%d reg at %d.%d\n",
++ phy_id, dev_addr, regnum);
+ return 0xffff;
+ }
+
+diff -Nur linux-3.14.36/drivers/net/ethernet/intel/e1000e/ptp.c linux-openelec/drivers/net/ethernet/intel/e1000e/ptp.c
+--- linux-3.14.36/drivers/net/ethernet/intel/e1000e/ptp.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/net/ethernet/intel/e1000e/ptp.c 2015-05-06 12:05:42.000000000 -0500
+@@ -191,6 +191,7 @@
+ .n_alarm = 0,
+ .n_ext_ts = 0,
+ .n_per_out = 0,
++ .n_pins = 0,
+ .pps = 0,
+ .adjfreq = e1000e_phc_adjfreq,
+ .adjtime = e1000e_phc_adjtime,
+diff -Nur linux-3.14.36/drivers/net/ethernet/mellanox/mlx4/en_clock.c linux-openelec/drivers/net/ethernet/mellanox/mlx4/en_clock.c
+--- linux-3.14.36/drivers/net/ethernet/mellanox/mlx4/en_clock.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/net/ethernet/mellanox/mlx4/en_clock.c 2015-05-06 12:05:42.000000000 -0500
+@@ -276,6 +276,7 @@
+ .n_alarm = 0,
+ .n_ext_ts = 0,
+ .n_per_out = 0,
++ .n_pins = 0,
+ .pps = 0,
+ .adjfreq = mlx4_en_phc_adjfreq,
+ .adjtime = mlx4_en_phc_adjtime,
+diff -Nur linux-3.14.36/drivers/net/ethernet/sfc/ptp.c linux-openelec/drivers/net/ethernet/sfc/ptp.c
+--- linux-3.14.36/drivers/net/ethernet/sfc/ptp.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/net/ethernet/sfc/ptp.c 2015-05-06 12:05:42.000000000 -0500
+@@ -1208,6 +1208,7 @@
+ .n_alarm = 0,
+ .n_ext_ts = 0,
+ .n_per_out = 0,
++ .n_pins = 0,
+ .pps = 1,
+ .adjfreq = efx_phc_adjfreq,
+ .adjtime = efx_phc_adjtime,
+diff -Nur linux-3.14.36/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c linux-openelec/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
+--- linux-3.14.36/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c 2015-05-06 12:05:42.000000000 -0500
+@@ -164,6 +164,7 @@
+ .n_alarm = 0,
+ .n_ext_ts = 0,
+ .n_per_out = 0,
++ .n_pins = 0,
+ .pps = 0,
+ .adjfreq = stmmac_adjust_freq,
+ .adjtime = stmmac_adjust_time,
+diff -Nur linux-3.14.36/drivers/net/ethernet/ti/cpts.c linux-openelec/drivers/net/ethernet/ti/cpts.c
+--- linux-3.14.36/drivers/net/ethernet/ti/cpts.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/net/ethernet/ti/cpts.c 2015-05-06 12:05:42.000000000 -0500
+@@ -217,6 +217,7 @@
+ .name = "CTPS timer",
+ .max_adj = 1000000,
+ .n_ext_ts = 0,
++ .n_pins = 0,
+ .pps = 0,
+ .adjfreq = cpts_ptp_adjfreq,
+ .adjtime = cpts_ptp_adjtime,
+diff -Nur linux-3.14.36/drivers/net/ethernet/tile/tilegx.c linux-openelec/drivers/net/ethernet/tile/tilegx.c
+--- linux-3.14.36/drivers/net/ethernet/tile/tilegx.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/net/ethernet/tile/tilegx.c 2015-05-06 12:05:42.000000000 -0500
+@@ -870,6 +870,7 @@
+ .name = "mPIPE clock",
+ .max_adj = 999999999,
+ .n_ext_ts = 0,
++ .n_pins = 0,
+ .pps = 0,
+ .adjfreq = ptp_mpipe_adjfreq,
+ .adjtime = ptp_mpipe_adjtime,
+diff -Nur linux-3.14.36/drivers/net/ieee802154/Kconfig linux-openelec/drivers/net/ieee802154/Kconfig
+--- linux-3.14.36/drivers/net/ieee802154/Kconfig 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/net/ieee802154/Kconfig 2015-05-06 12:05:42.000000000 -0500
+@@ -15,9 +15,9 @@
+ depends on IEEE802154_DRIVERS
+ ---help---
+ Say Y here to enable the fake driver that serves as an example
+- of HardMAC device driver.
++ of HardMAC device driver.
+
+- This driver can also be built as a module. To do so say M here.
++ This driver can also be built as a module. To do so say M here.
+ The module will be called 'fakehard'.
+
+ config IEEE802154_FAKELB
+@@ -31,17 +31,17 @@
+ The module will be called 'fakelb'.
+
+ config IEEE802154_AT86RF230
+- depends on IEEE802154_DRIVERS && MAC802154
+- tristate "AT86RF230/231 transceiver driver"
+- depends on SPI
++ depends on IEEE802154_DRIVERS && MAC802154
++ tristate "AT86RF230/231 transceiver driver"
++ depends on SPI
+
+ config IEEE802154_MRF24J40
+- tristate "Microchip MRF24J40 transceiver driver"
+- depends on IEEE802154_DRIVERS && MAC802154
+- depends on SPI
+- ---help---
+- Say Y here to enable the MRF24J20 SPI 802.15.4 wireless
+- controller.
++ tristate "Microchip MRF24J40 transceiver driver"
++ depends on IEEE802154_DRIVERS && MAC802154
++ depends on SPI
++ ---help---
++ Say Y here to enable the MRF24J20 SPI 802.15.4 wireless
++ controller.
+
+- This driver can also be built as a module. To do so, say M here.
+- the module will be called 'mrf24j40'.
++ This driver can also be built as a module. To do so, say M here.
++ the module will be called 'mrf24j40'.
+diff -Nur linux-3.14.36/drivers/net/phy/at803x.c linux-openelec/drivers/net/phy/at803x.c
+--- linux-3.14.36/drivers/net/phy/at803x.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/net/phy/at803x.c 2015-05-06 12:05:42.000000000 -0500
+@@ -27,6 +27,9 @@
+ #define AT803X_MMD_ACCESS_CONTROL 0x0D
+ #define AT803X_MMD_ACCESS_CONTROL_DATA 0x0E
+ #define AT803X_FUNC_DATA 0x4003
++#define AT803X_INER 0x0012
++#define AT803X_INER_INIT 0xec00
++#define AT803X_INSR 0x0013
+ #define AT803X_DEBUG_ADDR 0x1D
+ #define AT803X_DEBUG_DATA 0x1E
+ #define AT803X_DEBUG_SYSTEM_MODE_CTRL 0x05
+@@ -141,41 +144,11 @@
+
+ static int at803x_config_init(struct phy_device *phydev)
+ {
+- int val;
+ int ret;
+- u32 features;
+
+- features = SUPPORTED_TP | SUPPORTED_MII | SUPPORTED_AUI |
+- SUPPORTED_FIBRE | SUPPORTED_BNC;
+-
+- val = phy_read(phydev, MII_BMSR);
+- if (val < 0)
+- return val;
+-
+- if (val & BMSR_ANEGCAPABLE)
+- features |= SUPPORTED_Autoneg;
+- if (val & BMSR_100FULL)
+- features |= SUPPORTED_100baseT_Full;
+- if (val & BMSR_100HALF)
+- features |= SUPPORTED_100baseT_Half;
+- if (val & BMSR_10FULL)
+- features |= SUPPORTED_10baseT_Full;
+- if (val & BMSR_10HALF)
+- features |= SUPPORTED_10baseT_Half;
+-
+- if (val & BMSR_ESTATEN) {
+- val = phy_read(phydev, MII_ESTATUS);
+- if (val < 0)
+- return val;
+-
+- if (val & ESTATUS_1000_TFULL)
+- features |= SUPPORTED_1000baseT_Full;
+- if (val & ESTATUS_1000_THALF)
+- features |= SUPPORTED_1000baseT_Half;
+- }
+-
+- phydev->supported = features;
+- phydev->advertising = features;
++ ret = genphy_config_init(phydev);
++ if (ret < 0)
++ return ret;
+
+ if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) {
+ ret = phy_write(phydev, AT803X_DEBUG_ADDR,
+@@ -191,6 +164,31 @@
+ return 0;
+ }
+
++static int at803x_ack_interrupt(struct phy_device *phydev)
++{
++ int err;
++
++ err = phy_read(phydev, AT803X_INSR);
++
++ return (err < 0) ? err : 0;
++}
++
++static int at803x_config_intr(struct phy_device *phydev)
++{
++ int err;
++ int value;
++
++ value = phy_read(phydev, AT803X_INER);
++
++ if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
++ err = phy_write(phydev, AT803X_INER,
++ value | AT803X_INER_INIT);
++ else
++ err = phy_write(phydev, AT803X_INER, 0);
++
++ return err;
++}
++
+ static struct phy_driver at803x_driver[] = {
+ {
+ /* ATHEROS 8035 */
+@@ -240,6 +238,8 @@
+ .flags = PHY_HAS_INTERRUPT,
+ .config_aneg = genphy_config_aneg,
+ .read_status = genphy_read_status,
++ .ack_interrupt = &at803x_ack_interrupt,
++ .config_intr = &at803x_config_intr,
+ .driver = {
+ .owner = THIS_MODULE,
+ },
+@@ -253,8 +253,7 @@
+
+ static void __exit atheros_exit(void)
+ {
+- return phy_drivers_unregister(at803x_driver,
+- ARRAY_SIZE(at803x_driver));
++ phy_drivers_unregister(at803x_driver, ARRAY_SIZE(at803x_driver));
+ }
+
+ module_init(atheros_init);
+diff -Nur linux-3.14.36/drivers/net/phy/phy_device.c linux-openelec/drivers/net/phy/phy_device.c
+--- linux-3.14.36/drivers/net/phy/phy_device.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/net/phy/phy_device.c 2015-07-24 18:03:28.316842002 -0500
+@@ -1029,7 +1029,7 @@
+ return 0;
+ }
+
+-static int genphy_config_init(struct phy_device *phydev)
++int genphy_config_init(struct phy_device *phydev)
+ {
+ int val;
+ u32 features;
+@@ -1075,6 +1075,8 @@
+ return 0;
+ }
+
++EXPORT_SYMBOL(genphy_config_init);
++
+ static int gen10g_config_init(struct phy_device *phydev)
+ {
+ /* Temporarily just say we support everything */
+diff -Nur linux-3.14.36/drivers/net/phy/smsc.c linux-openelec/drivers/net/phy/smsc.c
+--- linux-3.14.36/drivers/net/phy/smsc.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/net/phy/smsc.c 2015-05-06 12:05:42.000000000 -0500
+@@ -249,8 +249,7 @@
+
+ static void __exit smsc_exit(void)
+ {
+- return phy_drivers_unregister(smsc_phy_driver,
+- ARRAY_SIZE(smsc_phy_driver));
++ phy_drivers_unregister(smsc_phy_driver, ARRAY_SIZE(smsc_phy_driver));
+ }
+
+ MODULE_DESCRIPTION("SMSC PHY driver");
+diff -Nur linux-3.14.36/drivers/net/phy/vitesse.c linux-openelec/drivers/net/phy/vitesse.c
+--- linux-3.14.36/drivers/net/phy/vitesse.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/net/phy/vitesse.c 2015-05-06 12:05:42.000000000 -0500
+@@ -319,8 +319,7 @@
+
+ static void __exit vsc82xx_exit(void)
+ {
+- return phy_drivers_unregister(vsc82xx_driver,
+- ARRAY_SIZE(vsc82xx_driver));
++ phy_drivers_unregister(vsc82xx_driver, ARRAY_SIZE(vsc82xx_driver));
+ }
+
+ module_init(vsc82xx_init);
+diff -Nur linux-3.14.36/drivers/net/veth.c linux-openelec/drivers/net/veth.c
+--- linux-3.14.36/drivers/net/veth.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/net/veth.c 2015-05-06 12:05:42.000000000 -0500
+@@ -14,6 +14,7 @@
+ #include <linux/etherdevice.h>
+ #include <linux/u64_stats_sync.h>
+
++#include <net/rtnetlink.h>
+ #include <net/dst.h>
+ #include <net/xfrm.h>
+ #include <linux/veth.h>
+@@ -336,10 +337,9 @@
+
+ nla_peer = data[VETH_INFO_PEER];
+ ifmp = nla_data(nla_peer);
+- err = nla_parse(peer_tb, IFLA_MAX,
+- nla_data(nla_peer) + sizeof(struct ifinfomsg),
+- nla_len(nla_peer) - sizeof(struct ifinfomsg),
+- ifla_policy);
++ err = rtnl_nla_parse_ifla(peer_tb,
++ nla_data(nla_peer) + sizeof(struct ifinfomsg),
++ nla_len(nla_peer) - sizeof(struct ifinfomsg));
+ if (err < 0)
+ return err;
+
+diff -Nur linux-3.14.36/drivers/net/wireless/ath/ar5523/ar5523.c linux-openelec/drivers/net/wireless/ath/ar5523/ar5523.c
+--- linux-3.14.36/drivers/net/wireless/ath/ar5523/ar5523.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/net/wireless/ath/ar5523/ar5523.c 2015-05-06 12:05:42.000000000 -0500
+@@ -1090,7 +1090,8 @@
+ return ret;
+ }
+
+-static void ar5523_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
++static void ar5523_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
++ u32 queues, bool drop)
+ {
+ struct ar5523 *ar = hw->priv;
+
+diff -Nur linux-3.14.36/drivers/net/wireless/ath/ath10k/mac.c linux-openelec/drivers/net/wireless/ath/ath10k/mac.c
+--- linux-3.14.36/drivers/net/wireless/ath/ath10k/mac.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/net/wireless/ath/ath10k/mac.c 2015-05-06 12:05:42.000000000 -0500
+@@ -3183,7 +3183,8 @@
+ return ret;
+ }
+
+-static void ath10k_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
++static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
++ u32 queues, bool drop)
+ {
+ struct ath10k *ar = hw->priv;
+ bool skip;
+diff -Nur linux-3.14.36/drivers/net/wireless/ath/ath6kl/cfg80211.c linux-openelec/drivers/net/wireless/ath/ath6kl/cfg80211.c
+--- linux-3.14.36/drivers/net/wireless/ath/ath6kl/cfg80211.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/net/wireless/ath/ath6kl/cfg80211.c 2015-05-06 12:05:42.000000000 -0500
+@@ -790,7 +790,7 @@
+ if (nw_type & ADHOC_NETWORK) {
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "ad-hoc %s selected\n",
+ nw_type & ADHOC_CREATOR ? "creator" : "joiner");
+- cfg80211_ibss_joined(vif->ndev, bssid, GFP_KERNEL);
++ cfg80211_ibss_joined(vif->ndev, bssid, chan, GFP_KERNEL);
+ cfg80211_put_bss(ar->wiphy, bss);
+ return;
+ }
+@@ -861,13 +861,9 @@
+ }
+
+ if (vif->nw_type & ADHOC_NETWORK) {
+- if (vif->wdev.iftype != NL80211_IFTYPE_ADHOC) {
++ if (vif->wdev.iftype != NL80211_IFTYPE_ADHOC)
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
+ "%s: ath6k not in ibss mode\n", __func__);
+- return;
+- }
+- memset(bssid, 0, ETH_ALEN);
+- cfg80211_ibss_joined(vif->ndev, bssid, GFP_KERNEL);
+ return;
+ }
+
+diff -Nur linux-3.14.36/drivers/net/wireless/ath/ath6kl/sdio.c linux-openelec/drivers/net/wireless/ath/ath6kl/sdio.c
+--- linux-3.14.36/drivers/net/wireless/ath/ath6kl/sdio.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/net/wireless/ath/ath6kl/sdio.c 2015-05-06 12:05:42.000000000 -0500
+@@ -222,6 +222,7 @@
+ struct mmc_data *data)
+ {
+ struct scatterlist *sg;
++ struct hif_scatter_item *scat_list;
+ int i;
+
+ data->blksz = HIF_MBOX_BLOCK_SIZE;
+@@ -240,14 +241,14 @@
+ sg = scat_req->sgentries;
+ sg_init_table(sg, scat_req->scat_entries);
+
++ scat_list = &scat_req->scat_list[0];
++
+ /* assemble SG list */
+- for (i = 0; i < scat_req->scat_entries; i++, sg++) {
++ for (i = 0; i < scat_req->scat_entries; i++, sg++, scat_list++) {
+ ath6kl_dbg(ATH6KL_DBG_SCATTER, "%d: addr:0x%p, len:%d\n",
+- i, scat_req->scat_list[i].buf,
+- scat_req->scat_list[i].len);
++ i, scat_list->buf, scat_list->len);
+
+- sg_set_buf(sg, scat_req->scat_list[i].buf,
+- scat_req->scat_list[i].len);
++ sg_set_buf(sg, scat_list->buf, scat_list->len);
+ }
+
+ /* set scatter-gather table for request */
+diff -Nur linux-3.14.36/drivers/net/wireless/ath/ath9k/hif_usb.c linux-openelec/drivers/net/wireless/ath/ath9k/hif_usb.c
+--- linux-3.14.36/drivers/net/wireless/ath/ath9k/hif_usb.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/net/wireless/ath/ath9k/hif_usb.c 2015-07-24 18:03:30.328842002 -0500
+@@ -37,9 +37,11 @@
+ { USB_DEVICE(0x13D3, 0x3350) }, /* Azurewave */
+ { USB_DEVICE(0x04CA, 0x4605) }, /* Liteon */
+ { USB_DEVICE(0x040D, 0x3801) }, /* VIA */
++ { USB_DEVICE(0x0cf3, 0xb002) }, /* Ubiquiti WifiStation */
+ { USB_DEVICE(0x0cf3, 0xb003) }, /* Ubiquiti WifiStation Ext */
+ { USB_DEVICE(0x0cf3, 0xb002) }, /* Ubiquiti WifiStation */
+ { USB_DEVICE(0x057c, 0x8403) }, /* AVM FRITZ!WLAN 11N v2 USB */
++ { USB_DEVICE(0x057c, 0x8403) }, /* AVM FRITZ!WLAN 11N v2 USB */
+
+ { USB_DEVICE(0x0cf3, 0x7015),
+ .driver_info = AR9287_USB }, /* Atheros */
+diff -Nur linux-3.14.36/drivers/net/wireless/ath/ath9k/main.c linux-openelec/drivers/net/wireless/ath/ath9k/main.c
+--- linux-3.14.36/drivers/net/wireless/ath/ath9k/main.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/net/wireless/ath/ath9k/main.c 2015-05-06 12:05:42.000000000 -0500
+@@ -1883,7 +1883,8 @@
+ return !!npend;
+ }
+
+-static void ath9k_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
++static void ath9k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
++ u32 queues, bool drop)
+ {
+ struct ath_softc *sc = hw->priv;
+ struct ath_hw *ah = sc->sc_ah;
+diff -Nur linux-3.14.36/drivers/net/wireless/ath/carl9170/main.c linux-openelec/drivers/net/wireless/ath/carl9170/main.c
+--- linux-3.14.36/drivers/net/wireless/ath/carl9170/main.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/net/wireless/ath/carl9170/main.c 2015-05-06 12:05:42.000000000 -0500
+@@ -1707,7 +1707,9 @@
+ return 0;
+ }
+
+-static void carl9170_op_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
++static void carl9170_op_flush(struct ieee80211_hw *hw,
++ struct ieee80211_vif *vif,
++ u32 queues, bool drop)
+ {
+ struct ar9170 *ar = hw->priv;
+ unsigned int vid;
+diff -Nur linux-3.14.36/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c linux-openelec/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
+--- linux-3.14.36/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c 2015-05-06 12:05:42.000000000 -0500
+@@ -43,7 +43,6 @@
+ #include "dhd_bus.h"
+ #include "dhd_dbg.h"
+ #include "sdio_host.h"
+-#include "sdio_chip.h"
+
+ #define SDIOH_API_ACCESS_RETRY_LIMIT 2
+
+@@ -54,6 +53,12 @@
+ /* Maximum milliseconds to wait for F2 to come up */
+ #define SDIO_WAIT_F2RDY 3000
+
++#define BRCMF_DEFAULT_TXGLOM_SIZE 32 /* max tx frames in glom chain */
++#define BRCMF_DEFAULT_RXGLOM_SIZE 32 /* max rx frames in glom chain */
++
++static int brcmf_sdiod_txglomsz = BRCMF_DEFAULT_TXGLOM_SIZE;
++module_param_named(txglomsz, brcmf_sdiod_txglomsz, int, 0);
++MODULE_PARM_DESC(txglomsz, "maximum tx packet chain size [SDIO]");
+
+ static irqreturn_t brcmf_sdiod_oob_irqhandler(int irq, void *dev_id)
+ {
+@@ -264,26 +269,17 @@
+ break;
+ }
+
+- if (ret) {
+- /*
+- * SleepCSR register access can fail when
+- * waking up the device so reduce this noise
+- * in the logs.
+- */
+- if (addr != SBSDIO_FUNC1_SLEEPCSR)
+- brcmf_err("failed to %s data F%d@0x%05x, err: %d\n",
+- write ? "write" : "read", fn, addr, ret);
+- else
+- brcmf_dbg(SDIO, "failed to %s data F%d@0x%05x, err: %d\n",
+- write ? "write" : "read", fn, addr, ret);
+- }
++ if (ret)
++ brcmf_dbg(SDIO, "failed to %s data F%d@0x%05x, err: %d\n",
++ write ? "write" : "read", fn, addr, ret);
++
+ return ret;
+ }
+
+ static int brcmf_sdiod_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
+ u8 regsz, void *data, bool write)
+ {
+- u8 func_num;
++ u8 func;
+ s32 retry = 0;
+ int ret;
+
+@@ -297,9 +293,9 @@
+ * The rest: function 1 silicon backplane core registers
+ */
+ if ((addr & ~REG_F0_REG_MASK) == 0)
+- func_num = SDIO_FUNC_0;
++ func = SDIO_FUNC_0;
+ else
+- func_num = SDIO_FUNC_1;
++ func = SDIO_FUNC_1;
+
+ do {
+ if (!write)
+@@ -307,16 +303,26 @@
+ /* for retry wait for 1 ms till bus get settled down */
+ if (retry)
+ usleep_range(1000, 2000);
+- ret = brcmf_sdiod_request_data(sdiodev, func_num, addr, regsz,
++ ret = brcmf_sdiod_request_data(sdiodev, func, addr, regsz,
+ data, write);
+ } while (ret != 0 && ret != -ENOMEDIUM &&
+ retry++ < SDIOH_API_ACCESS_RETRY_LIMIT);
+
+ if (ret == -ENOMEDIUM)
+ brcmf_bus_change_state(sdiodev->bus_if, BRCMF_BUS_NOMEDIUM);
+- else if (ret != 0)
+- brcmf_err("failed with %d\n", ret);
+-
++ else if (ret != 0) {
++ /*
++ * SleepCSR register access can fail when
++ * waking up the device so reduce this noise
++ * in the logs.
++ */
++ if (addr != SBSDIO_FUNC1_SLEEPCSR)
++ brcmf_err("failed to %s data F%d@0x%05x, err: %d\n",
++ write ? "write" : "read", func, addr, ret);
++ else
++ brcmf_dbg(SDIO, "failed to %s data F%d@0x%05x, err: %d\n",
++ write ? "write" : "read", func, addr, ret);
++ }
+ return ret;
+ }
+
+@@ -488,7 +494,6 @@
+ struct mmc_request mmc_req;
+ struct mmc_command mmc_cmd;
+ struct mmc_data mmc_dat;
+- struct sg_table st;
+ struct scatterlist *sgl;
+ int ret = 0;
+
+@@ -533,16 +538,11 @@
+ pkt_offset = 0;
+ pkt_next = target_list->next;
+
+- if (sg_alloc_table(&st, max_seg_cnt, GFP_KERNEL)) {
+- ret = -ENOMEM;
+- goto exit;
+- }
+-
+ memset(&mmc_req, 0, sizeof(struct mmc_request));
+ memset(&mmc_cmd, 0, sizeof(struct mmc_command));
+ memset(&mmc_dat, 0, sizeof(struct mmc_data));
+
+- mmc_dat.sg = st.sgl;
++ mmc_dat.sg = sdiodev->sgtable.sgl;
+ mmc_dat.blksz = func_blk_sz;
+ mmc_dat.flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
+ mmc_cmd.opcode = SD_IO_RW_EXTENDED;
+@@ -558,7 +558,7 @@
+ while (seg_sz) {
+ req_sz = 0;
+ sg_cnt = 0;
+- sgl = st.sgl;
++ sgl = sdiodev->sgtable.sgl;
+ /* prep sg table */
+ while (pkt_next != (struct sk_buff *)target_list) {
+ pkt_data = pkt_next->data + pkt_offset;
+@@ -640,7 +640,7 @@
+ }
+
+ exit:
+- sg_free_table(&st);
++ sg_init_table(sdiodev->sgtable.sgl, sdiodev->sgtable.orig_nents);
+ while ((pkt_next = __skb_dequeue(&local_list)) != NULL)
+ brcmu_pkt_buf_free_skb(pkt_next);
+
+@@ -827,7 +827,7 @@
+ }
+ if (!write)
+ memcpy(data, pkt->data, dsize);
+- skb_trim(pkt, dsize);
++ skb_trim(pkt, 0);
+
+ /* Adjust for next transfer (if any) */
+ size -= dsize;
+@@ -864,6 +864,29 @@
+ return 0;
+ }
+
++static void brcmf_sdiod_sgtable_alloc(struct brcmf_sdio_dev *sdiodev)
++{
++ uint nents;
++ int err;
++
++ if (!sdiodev->sg_support)
++ return;
++
++ nents = max_t(uint, BRCMF_DEFAULT_RXGLOM_SIZE, brcmf_sdiod_txglomsz);
++ nents += (nents >> 4) + 1;
++
++ WARN_ON(nents > sdiodev->max_segment_count);
++
++ brcmf_dbg(TRACE, "nents=%d\n", nents);
++ err = sg_alloc_table(&sdiodev->sgtable, nents, GFP_KERNEL);
++ if (err < 0) {
++ brcmf_err("allocation failed: disable scatter-gather");
++ sdiodev->sg_support = false;
++ }
++
++ sdiodev->txglomsz = brcmf_sdiod_txglomsz;
++}
++
+ static int brcmf_sdiod_remove(struct brcmf_sdio_dev *sdiodev)
+ {
+ if (sdiodev->bus) {
+@@ -881,6 +904,7 @@
+ sdio_disable_func(sdiodev->func[1]);
+ sdio_release_host(sdiodev->func[1]);
+
++ sg_free_table(&sdiodev->sgtable);
+ sdiodev->sbwad = 0;
+
+ return 0;
+@@ -936,6 +960,11 @@
+ SG_MAX_SINGLE_ALLOC);
+ sdiodev->max_segment_size = host->max_seg_size;
+
++ /* allocate scatter-gather table. sg support
++ * will be disabled upon allocation failure.
++ */
++ brcmf_sdiod_sgtable_alloc(sdiodev);
++
+ /* try to attach to the target device */
+ sdiodev->bus = brcmf_sdio_probe(sdiodev);
+ if (!sdiodev->bus) {
+@@ -960,6 +989,7 @@
+ {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_43362)},
+ {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM,
+ SDIO_DEVICE_ID_BROADCOM_4335_4339)},
++ {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4354)},
+ { /* end: all zeroes */ },
+ };
+ MODULE_DEVICE_TABLE(sdio, brcmf_sdmmc_ids);
+@@ -1073,9 +1103,7 @@
+ struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
+ int ret = 0;
+
+- brcmf_dbg(SDIO, "\n");
+-
+- atomic_set(&sdiodev->suspend, true);
++ brcmf_dbg(SDIO, "Enter\n");
+
+ sdio_flags = sdio_get_host_pm_caps(sdiodev->func[1]);
+ if (!(sdio_flags & MMC_PM_KEEP_POWER)) {
+@@ -1083,9 +1111,12 @@
+ return -EINVAL;
+ }
+
++ atomic_set(&sdiodev->suspend, true);
++
+ ret = sdio_set_host_pm_flags(sdiodev->func[1], MMC_PM_KEEP_POWER);
+ if (ret) {
+ brcmf_err("Failed to set pm_flags\n");
++ atomic_set(&sdiodev->suspend, false);
+ return ret;
+ }
+
+@@ -1099,6 +1130,7 @@
+ struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+ struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
+
++ brcmf_dbg(SDIO, "Enter\n");
+ brcmf_sdio_wd_timer(sdiodev->bus, BRCMF_WD_POLL_MS);
+ atomic_set(&sdiodev->suspend, false);
+ return 0;
+@@ -1115,14 +1147,15 @@
+ .remove = brcmf_ops_sdio_remove,
+ .name = BRCMFMAC_SDIO_PDATA_NAME,
+ .id_table = brcmf_sdmmc_ids,
+-#ifdef CONFIG_PM_SLEEP
+ .drv = {
++ .owner = THIS_MODULE,
++#ifdef CONFIG_PM_SLEEP
+ .pm = &brcmf_sdio_pm_ops,
+- },
+ #endif /* CONFIG_PM_SLEEP */
++ },
+ };
+
+-static int brcmf_sdio_pd_probe(struct platform_device *pdev)
++static int __init brcmf_sdio_pd_probe(struct platform_device *pdev)
+ {
+ brcmf_dbg(SDIO, "Enter\n");
+
+diff -Nur linux-3.14.36/drivers/net/wireless/brcm80211/brcmfmac/chip.c linux-openelec/drivers/net/wireless/brcm80211/brcmfmac/chip.c
+--- linux-3.14.36/drivers/net/wireless/brcm80211/brcmfmac/chip.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/net/wireless/brcm80211/brcmfmac/chip.c 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,1035 @@
++/*
++ * Copyright (c) 2014 Broadcom Corporation
++ *
++ * Permission to use, copy, modify, and/or distribute this software for any
++ * purpose with or without fee is hereby granted, provided that the above
++ * copyright notice and this permission notice appear in all copies.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
++ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
++ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
++ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
++ */
++#include <linux/kernel.h>
++#include <linux/delay.h>
++#include <linux/list.h>
++#include <linux/ssb/ssb_regs.h>
++#include <linux/bcma/bcma.h>
++#include <linux/bcma/bcma_regs.h>
++
++#include <defs.h>
++#include <soc.h>
++#include <brcm_hw_ids.h>
++#include <brcmu_utils.h>
++#include <chipcommon.h>
++#include "dhd_dbg.h"
++#include "chip.h"
++
++/* SOC Interconnect types (aka chip types) */
++#define SOCI_SB 0
++#define SOCI_AI 1
++
++/* PL-368 DMP definitions */
++#define DMP_DESC_TYPE_MSK 0x0000000F
++#define DMP_DESC_EMPTY 0x00000000
++#define DMP_DESC_VALID 0x00000001
++#define DMP_DESC_COMPONENT 0x00000001
++#define DMP_DESC_MASTER_PORT 0x00000003
++#define DMP_DESC_ADDRESS 0x00000005
++#define DMP_DESC_ADDRSIZE_GT32 0x00000008
++#define DMP_DESC_EOT 0x0000000F
++
++#define DMP_COMP_DESIGNER 0xFFF00000
++#define DMP_COMP_DESIGNER_S 20
++#define DMP_COMP_PARTNUM 0x000FFF00
++#define DMP_COMP_PARTNUM_S 8
++#define DMP_COMP_CLASS 0x000000F0
++#define DMP_COMP_CLASS_S 4
++#define DMP_COMP_REVISION 0xFF000000
++#define DMP_COMP_REVISION_S 24
++#define DMP_COMP_NUM_SWRAP 0x00F80000
++#define DMP_COMP_NUM_SWRAP_S 19
++#define DMP_COMP_NUM_MWRAP 0x0007C000
++#define DMP_COMP_NUM_MWRAP_S 14
++#define DMP_COMP_NUM_SPORT 0x00003E00
++#define DMP_COMP_NUM_SPORT_S 9
++#define DMP_COMP_NUM_MPORT 0x000001F0
++#define DMP_COMP_NUM_MPORT_S 4
++
++#define DMP_MASTER_PORT_UID 0x0000FF00
++#define DMP_MASTER_PORT_UID_S 8
++#define DMP_MASTER_PORT_NUM 0x000000F0
++#define DMP_MASTER_PORT_NUM_S 4
++
++#define DMP_SLAVE_ADDR_BASE 0xFFFFF000
++#define DMP_SLAVE_ADDR_BASE_S 12
++#define DMP_SLAVE_PORT_NUM 0x00000F00
++#define DMP_SLAVE_PORT_NUM_S 8
++#define DMP_SLAVE_TYPE 0x000000C0
++#define DMP_SLAVE_TYPE_S 6
++#define DMP_SLAVE_TYPE_SLAVE 0
++#define DMP_SLAVE_TYPE_BRIDGE 1
++#define DMP_SLAVE_TYPE_SWRAP 2
++#define DMP_SLAVE_TYPE_MWRAP 3
++#define DMP_SLAVE_SIZE_TYPE 0x00000030
++#define DMP_SLAVE_SIZE_TYPE_S 4
++#define DMP_SLAVE_SIZE_4K 0
++#define DMP_SLAVE_SIZE_8K 1
++#define DMP_SLAVE_SIZE_16K 2
++#define DMP_SLAVE_SIZE_DESC 3
++
++/* EROM CompIdentB */
++#define CIB_REV_MASK 0xff000000
++#define CIB_REV_SHIFT 24
++
++/* ARM CR4 core specific control flag bits */
++#define ARMCR4_BCMA_IOCTL_CPUHALT 0x0020
++
++/* D11 core specific control flag bits */
++#define D11_BCMA_IOCTL_PHYCLOCKEN 0x0004
++#define D11_BCMA_IOCTL_PHYRESET 0x0008
++
++/* chip core base & ramsize */
++/* bcm4329 */
++/* SDIO device core, ID 0x829 */
++#define BCM4329_CORE_BUS_BASE 0x18011000
++/* internal memory core, ID 0x80e */
++#define BCM4329_CORE_SOCRAM_BASE 0x18003000
++/* ARM Cortex M3 core, ID 0x82a */
++#define BCM4329_CORE_ARM_BASE 0x18002000
++#define BCM4329_RAMSIZE 0x48000
++
++/* bcm43143 */
++/* SDIO device core */
++#define BCM43143_CORE_BUS_BASE 0x18002000
++/* internal memory core */
++#define BCM43143_CORE_SOCRAM_BASE 0x18004000
++/* ARM Cortex M3 core, ID 0x82a */
++#define BCM43143_CORE_ARM_BASE 0x18003000
++#define BCM43143_RAMSIZE 0x70000
++
++#define CORE_SB(base, field) \
++ (base + SBCONFIGOFF + offsetof(struct sbconfig, field))
++#define SBCOREREV(sbidh) \
++ ((((sbidh) & SSB_IDHIGH_RCHI) >> SSB_IDHIGH_RCHI_SHIFT) | \
++ ((sbidh) & SSB_IDHIGH_RCLO))
++
++struct sbconfig {
++ u32 PAD[2];
++ u32 sbipsflag; /* initiator port ocp slave flag */
++ u32 PAD[3];
++ u32 sbtpsflag; /* target port ocp slave flag */
++ u32 PAD[11];
++ u32 sbtmerrloga; /* (sonics >= 2.3) */
++ u32 PAD;
++ u32 sbtmerrlog; /* (sonics >= 2.3) */
++ u32 PAD[3];
++ u32 sbadmatch3; /* address match3 */
++ u32 PAD;
++ u32 sbadmatch2; /* address match2 */
++ u32 PAD;
++ u32 sbadmatch1; /* address match1 */
++ u32 PAD[7];
++ u32 sbimstate; /* initiator agent state */
++ u32 sbintvec; /* interrupt mask */
++ u32 sbtmstatelow; /* target state */
++ u32 sbtmstatehigh; /* target state */
++ u32 sbbwa0; /* bandwidth allocation table0 */
++ u32 PAD;
++ u32 sbimconfiglow; /* initiator configuration */
++ u32 sbimconfighigh; /* initiator configuration */
++ u32 sbadmatch0; /* address match0 */
++ u32 PAD;
++ u32 sbtmconfiglow; /* target configuration */
++ u32 sbtmconfighigh; /* target configuration */
++ u32 sbbconfig; /* broadcast configuration */
++ u32 PAD;
++ u32 sbbstate; /* broadcast state */
++ u32 PAD[3];
++ u32 sbactcnfg; /* activate configuration */
++ u32 PAD[3];
++ u32 sbflagst; /* current sbflags */
++ u32 PAD[3];
++ u32 sbidlow; /* identification */
++ u32 sbidhigh; /* identification */
++};
++
++struct brcmf_core_priv {
++ struct brcmf_core pub;
++ u32 wrapbase;
++ struct list_head list;
++ struct brcmf_chip_priv *chip;
++};
++
++/* ARM CR4 core specific control flag bits */
++#define ARMCR4_BCMA_IOCTL_CPUHALT 0x0020
++
++/* D11 core specific control flag bits */
++#define D11_BCMA_IOCTL_PHYCLOCKEN 0x0004
++#define D11_BCMA_IOCTL_PHYRESET 0x0008
++
++struct brcmf_chip_priv {
++ struct brcmf_chip pub;
++ const struct brcmf_buscore_ops *ops;
++ void *ctx;
++ /* assured first core is chipcommon, second core is buscore */
++ struct list_head cores;
++ u16 num_cores;
++
++ bool (*iscoreup)(struct brcmf_core_priv *core);
++ void (*coredisable)(struct brcmf_core_priv *core, u32 prereset,
++ u32 reset);
++ void (*resetcore)(struct brcmf_core_priv *core, u32 prereset, u32 reset,
++ u32 postreset);
++};
++
++static void brcmf_chip_sb_corerev(struct brcmf_chip_priv *ci,
++ struct brcmf_core *core)
++{
++ u32 regdata;
++
++ regdata = ci->ops->read32(ci->ctx, CORE_SB(core->base, sbidhigh));
++ core->rev = SBCOREREV(regdata);
++}
++
++static bool brcmf_chip_sb_iscoreup(struct brcmf_core_priv *core)
++{
++ struct brcmf_chip_priv *ci;
++ u32 regdata;
++ u32 address;
++
++ ci = core->chip;
++ address = CORE_SB(core->pub.base, sbtmstatelow);
++ regdata = ci->ops->read32(ci->ctx, address);
++ regdata &= (SSB_TMSLOW_RESET | SSB_TMSLOW_REJECT |
++ SSB_IMSTATE_REJECT | SSB_TMSLOW_CLOCK);
++ return SSB_TMSLOW_CLOCK == regdata;
++}
++
++static bool brcmf_chip_ai_iscoreup(struct brcmf_core_priv *core)
++{
++ struct brcmf_chip_priv *ci;
++ u32 regdata;
++ bool ret;
++
++ ci = core->chip;
++ regdata = ci->ops->read32(ci->ctx, core->wrapbase + BCMA_IOCTL);
++ ret = (regdata & (BCMA_IOCTL_FGC | BCMA_IOCTL_CLK)) == BCMA_IOCTL_CLK;
++
++ regdata = ci->ops->read32(ci->ctx, core->wrapbase + BCMA_RESET_CTL);
++ ret = ret && ((regdata & BCMA_RESET_CTL_RESET) == 0);
++
++ return ret;
++}
++
++static void brcmf_chip_sb_coredisable(struct brcmf_core_priv *core,
++ u32 prereset, u32 reset)
++{
++ struct brcmf_chip_priv *ci;
++ u32 val, base;
++
++ ci = core->chip;
++ base = core->pub.base;
++ val = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatelow));
++ if (val & SSB_TMSLOW_RESET)
++ return;
++
++ val = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatelow));
++ if ((val & SSB_TMSLOW_CLOCK) != 0) {
++ /*
++ * set target reject and spin until busy is clear
++ * (preserve core-specific bits)
++ */
++ val = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatelow));
++ ci->ops->write32(ci->ctx, CORE_SB(base, sbtmstatelow),
++ val | SSB_TMSLOW_REJECT);
++
++ val = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatelow));
++ udelay(1);
++ SPINWAIT((ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatehigh))
++ & SSB_TMSHIGH_BUSY), 100000);
++
++ val = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatehigh));
++ if (val & SSB_TMSHIGH_BUSY)
++ brcmf_err("core state still busy\n");
++
++ val = ci->ops->read32(ci->ctx, CORE_SB(base, sbidlow));
++ if (val & SSB_IDLOW_INITIATOR) {
++ val = ci->ops->read32(ci->ctx,
++ CORE_SB(base, sbimstate));
++ val |= SSB_IMSTATE_REJECT;
++ ci->ops->write32(ci->ctx,
++ CORE_SB(base, sbimstate), val);
++ val = ci->ops->read32(ci->ctx,
++ CORE_SB(base, sbimstate));
++ udelay(1);
++ SPINWAIT((ci->ops->read32(ci->ctx,
++ CORE_SB(base, sbimstate)) &
++ SSB_IMSTATE_BUSY), 100000);
++ }
++
++ /* set reset and reject while enabling the clocks */
++ val = SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK |
++ SSB_TMSLOW_REJECT | SSB_TMSLOW_RESET;
++ ci->ops->write32(ci->ctx, CORE_SB(base, sbtmstatelow), val);
++ val = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatelow));
++ udelay(10);
++
++ /* clear the initiator reject bit */
++ val = ci->ops->read32(ci->ctx, CORE_SB(base, sbidlow));
++ if (val & SSB_IDLOW_INITIATOR) {
++ val = ci->ops->read32(ci->ctx,
++ CORE_SB(base, sbimstate));
++ val &= ~SSB_IMSTATE_REJECT;
++ ci->ops->write32(ci->ctx,
++ CORE_SB(base, sbimstate), val);
++ }
++ }
++
++ /* leave reset and reject asserted */
++ ci->ops->write32(ci->ctx, CORE_SB(base, sbtmstatelow),
++ (SSB_TMSLOW_REJECT | SSB_TMSLOW_RESET));
++ udelay(1);
++}
++
++static void brcmf_chip_ai_coredisable(struct brcmf_core_priv *core,
++ u32 prereset, u32 reset)
++{
++ struct brcmf_chip_priv *ci;
++ u32 regdata;
++
++ ci = core->chip;
++
++ /* if core is already in reset, skip reset */
++ regdata = ci->ops->read32(ci->ctx, core->wrapbase + BCMA_RESET_CTL);
++ if ((regdata & BCMA_RESET_CTL_RESET) != 0)
++ goto in_reset_configure;
++
++ /* configure reset */
++ ci->ops->write32(ci->ctx, core->wrapbase + BCMA_IOCTL,
++ prereset | BCMA_IOCTL_FGC | BCMA_IOCTL_CLK);
++ ci->ops->read32(ci->ctx, core->wrapbase + BCMA_IOCTL);
++
++ /* put in reset */
++ ci->ops->write32(ci->ctx, core->wrapbase + BCMA_RESET_CTL,
++ BCMA_RESET_CTL_RESET);
++ usleep_range(10, 20);
++
++ /* wait till reset is 1 */
++ SPINWAIT(ci->ops->read32(ci->ctx, core->wrapbase + BCMA_RESET_CTL) !=
++ BCMA_RESET_CTL_RESET, 300);
++
++in_reset_configure:
++ /* in-reset configure */
++ ci->ops->write32(ci->ctx, core->wrapbase + BCMA_IOCTL,
++ reset | BCMA_IOCTL_FGC | BCMA_IOCTL_CLK);
++ ci->ops->read32(ci->ctx, core->wrapbase + BCMA_IOCTL);
++}
++
++static void brcmf_chip_sb_resetcore(struct brcmf_core_priv *core, u32 prereset,
++ u32 reset, u32 postreset)
++{
++ struct brcmf_chip_priv *ci;
++ u32 regdata;
++ u32 base;
++
++ ci = core->chip;
++ base = core->pub.base;
++ /*
++ * Must do the disable sequence first to work for
++ * arbitrary current core state.
++ */
++ brcmf_chip_sb_coredisable(core, 0, 0);
++
++ /*
++ * Now do the initialization sequence.
++ * set reset while enabling the clock and
++ * forcing them on throughout the core
++ */
++ ci->ops->write32(ci->ctx, CORE_SB(base, sbtmstatelow),
++ SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK |
++ SSB_TMSLOW_RESET);
++ regdata = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatelow));
++ udelay(1);
++
++ /* clear any serror */
++ regdata = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatehigh));
++ if (regdata & SSB_TMSHIGH_SERR)
++ ci->ops->write32(ci->ctx, CORE_SB(base, sbtmstatehigh), 0);
++
++ regdata = ci->ops->read32(ci->ctx, CORE_SB(base, sbimstate));
++ if (regdata & (SSB_IMSTATE_IBE | SSB_IMSTATE_TO)) {
++ regdata &= ~(SSB_IMSTATE_IBE | SSB_IMSTATE_TO);
++ ci->ops->write32(ci->ctx, CORE_SB(base, sbimstate), regdata);
++ }
++
++ /* clear reset and allow it to propagate throughout the core */
++ ci->ops->write32(ci->ctx, CORE_SB(base, sbtmstatelow),
++ SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK);
++ regdata = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatelow));
++ udelay(1);
++
++ /* leave clock enabled */
++ ci->ops->write32(ci->ctx, CORE_SB(base, sbtmstatelow),
++ SSB_TMSLOW_CLOCK);
++ regdata = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatelow));
++ udelay(1);
++}
++
++static void brcmf_chip_ai_resetcore(struct brcmf_core_priv *core, u32 prereset,
++ u32 reset, u32 postreset)
++{
++ struct brcmf_chip_priv *ci;
++ int count;
++
++ ci = core->chip;
++
++ /* must disable first to work for arbitrary current core state */
++ brcmf_chip_ai_coredisable(core, prereset, reset);
++
++ count = 0;
++ while (ci->ops->read32(ci->ctx, core->wrapbase + BCMA_RESET_CTL) &
++ BCMA_RESET_CTL_RESET) {
++ ci->ops->write32(ci->ctx, core->wrapbase + BCMA_RESET_CTL, 0);
++ count++;
++ if (count > 50)
++ break;
++ usleep_range(40, 60);
++ }
++
++ ci->ops->write32(ci->ctx, core->wrapbase + BCMA_IOCTL,
++ postreset | BCMA_IOCTL_CLK);
++ ci->ops->read32(ci->ctx, core->wrapbase + BCMA_IOCTL);
++}
++
++static char *brcmf_chip_name(uint chipid, char *buf, uint len)
++{
++ const char *fmt;
++
++ fmt = ((chipid > 0xa000) || (chipid < 0x4000)) ? "%d" : "%x";
++ snprintf(buf, len, fmt, chipid);
++ return buf;
++}
++
++static struct brcmf_core *brcmf_chip_add_core(struct brcmf_chip_priv *ci,
++ u16 coreid, u32 base,
++ u32 wrapbase)
++{
++ struct brcmf_core_priv *core;
++
++ core = kzalloc(sizeof(*core), GFP_KERNEL);
++ if (!core)
++ return ERR_PTR(-ENOMEM);
++
++ core->pub.id = coreid;
++ core->pub.base = base;
++ core->chip = ci;
++ core->wrapbase = wrapbase;
++
++ list_add_tail(&core->list, &ci->cores);
++ return &core->pub;
++}
++
++#ifdef DEBUG
++/* safety check for chipinfo */
++static int brcmf_chip_cores_check(struct brcmf_chip_priv *ci)
++{
++ struct brcmf_core_priv *core;
++ bool need_socram = false;
++ bool has_socram = false;
++ int idx = 1;
++
++ list_for_each_entry(core, &ci->cores, list) {
++ brcmf_dbg(INFO, " [%-2d] core 0x%x:%-2d base 0x%08x wrap 0x%08x\n",
++ idx++, core->pub.id, core->pub.rev, core->pub.base,
++ core->wrapbase);
++
++ switch (core->pub.id) {
++ case BCMA_CORE_ARM_CM3:
++ need_socram = true;
++ break;
++ case BCMA_CORE_INTERNAL_MEM:
++ has_socram = true;
++ break;
++ case BCMA_CORE_ARM_CR4:
++ if (ci->pub.rambase == 0) {
++ brcmf_err("RAM base not provided with ARM CR4 core\n");
++ return -ENOMEM;
++ }
++ break;
++ default:
++ break;
++ }
++ }
++
++ /* check RAM core presence for ARM CM3 core */
++ if (need_socram && !has_socram) {
++ brcmf_err("RAM core not provided with ARM CM3 core\n");
++ return -ENODEV;
++ }
++ return 0;
++}
++#else /* DEBUG */
++static inline int brcmf_chip_cores_check(struct brcmf_chip_priv *ci)
++{
++ return 0;
++}
++#endif
++
++static void brcmf_chip_get_raminfo(struct brcmf_chip_priv *ci)
++{
++ switch (ci->pub.chip) {
++ case BCM4329_CHIP_ID:
++ ci->pub.ramsize = BCM4329_RAMSIZE;
++ break;
++ case BCM43143_CHIP_ID:
++ ci->pub.ramsize = BCM43143_RAMSIZE;
++ break;
++ case BCM43241_CHIP_ID:
++ ci->pub.ramsize = 0x90000;
++ break;
++ case BCM4330_CHIP_ID:
++ ci->pub.ramsize = 0x48000;
++ break;
++ case BCM4334_CHIP_ID:
++ ci->pub.ramsize = 0x80000;
++ break;
++ case BCM4335_CHIP_ID:
++ ci->pub.ramsize = 0xc0000;
++ ci->pub.rambase = 0x180000;
++ break;
++ case BCM43362_CHIP_ID:
++ ci->pub.ramsize = 0x3c000;
++ break;
++ case BCM4339_CHIP_ID:
++ case BCM4354_CHIP_ID:
++ ci->pub.ramsize = 0xc0000;
++ ci->pub.rambase = 0x180000;
++ break;
++ default:
++ brcmf_err("unknown chip: %s\n", ci->pub.name);
++ break;
++ }
++}
++
++static u32 brcmf_chip_dmp_get_desc(struct brcmf_chip_priv *ci, u32 *eromaddr,
++ u8 *type)
++{
++ u32 val;
++
++ /* read next descriptor */
++ val = ci->ops->read32(ci->ctx, *eromaddr);
++ *eromaddr += 4;
++
++ if (!type)
++ return val;
++
++ /* determine descriptor type */
++ *type = (val & DMP_DESC_TYPE_MSK);
++ if ((*type & ~DMP_DESC_ADDRSIZE_GT32) == DMP_DESC_ADDRESS)
++ *type = DMP_DESC_ADDRESS;
++
++ return val;
++}
++
++static int brcmf_chip_dmp_get_regaddr(struct brcmf_chip_priv *ci, u32 *eromaddr,
++ u32 *regbase, u32 *wrapbase)
++{
++ u8 desc;
++ u32 val;
++ u8 mpnum = 0;
++ u8 stype, sztype, wraptype;
++
++ *regbase = 0;
++ *wrapbase = 0;
++
++ val = brcmf_chip_dmp_get_desc(ci, eromaddr, &desc);
++ if (desc == DMP_DESC_MASTER_PORT) {
++ mpnum = (val & DMP_MASTER_PORT_NUM) >> DMP_MASTER_PORT_NUM_S;
++ wraptype = DMP_SLAVE_TYPE_MWRAP;
++ } else if (desc == DMP_DESC_ADDRESS) {
++ /* revert erom address */
++ *eromaddr -= 4;
++ wraptype = DMP_SLAVE_TYPE_SWRAP;
++ } else {
++ *eromaddr -= 4;
++ return -EILSEQ;
++ }
++
++ do {
++ /* locate address descriptor */
++ do {
++ val = brcmf_chip_dmp_get_desc(ci, eromaddr, &desc);
++ /* unexpected table end */
++ if (desc == DMP_DESC_EOT) {
++ *eromaddr -= 4;
++ return -EFAULT;
++ }
++ } while (desc != DMP_DESC_ADDRESS);
++
++ /* skip upper 32-bit address descriptor */
++ if (val & DMP_DESC_ADDRSIZE_GT32)
++ brcmf_chip_dmp_get_desc(ci, eromaddr, NULL);
++
++ sztype = (val & DMP_SLAVE_SIZE_TYPE) >> DMP_SLAVE_SIZE_TYPE_S;
++
++ /* next size descriptor can be skipped */
++ if (sztype == DMP_SLAVE_SIZE_DESC) {
++ val = brcmf_chip_dmp_get_desc(ci, eromaddr, NULL);
++ /* skip upper size descriptor if present */
++ if (val & DMP_DESC_ADDRSIZE_GT32)
++ brcmf_chip_dmp_get_desc(ci, eromaddr, NULL);
++ }
++
++ /* only look for 4K register regions */
++ if (sztype != DMP_SLAVE_SIZE_4K)
++ continue;
++
++ stype = (val & DMP_SLAVE_TYPE) >> DMP_SLAVE_TYPE_S;
++
++ /* only regular slave and wrapper */
++ if (*regbase == 0 && stype == DMP_SLAVE_TYPE_SLAVE)
++ *regbase = val & DMP_SLAVE_ADDR_BASE;
++ if (*wrapbase == 0 && stype == wraptype)
++ *wrapbase = val & DMP_SLAVE_ADDR_BASE;
++ } while (*regbase == 0 || *wrapbase == 0);
++
++ return 0;
++}
++
++static
++int brcmf_chip_dmp_erom_scan(struct brcmf_chip_priv *ci)
++{
++ struct brcmf_core *core;
++ u32 eromaddr;
++ u8 desc_type = 0;
++ u32 val;
++ u16 id;
++ u8 nmp, nsp, nmw, nsw, rev;
++ u32 base, wrap;
++ int err;
++
++ eromaddr = ci->ops->read32(ci->ctx, CORE_CC_REG(SI_ENUM_BASE, eromptr));
++
++ while (desc_type != DMP_DESC_EOT) {
++ val = brcmf_chip_dmp_get_desc(ci, &eromaddr, &desc_type);
++ if (!(val & DMP_DESC_VALID))
++ continue;
++
++ if (desc_type == DMP_DESC_EMPTY)
++ continue;
++
++ /* need a component descriptor */
++ if (desc_type != DMP_DESC_COMPONENT)
++ continue;
++
++ id = (val & DMP_COMP_PARTNUM) >> DMP_COMP_PARTNUM_S;
++
++ /* next descriptor must be component as well */
++ val = brcmf_chip_dmp_get_desc(ci, &eromaddr, &desc_type);
++ if (WARN_ON((val & DMP_DESC_TYPE_MSK) != DMP_DESC_COMPONENT))
++ return -EFAULT;
++
++ /* only look at cores with master port(s) */
++ nmp = (val & DMP_COMP_NUM_MPORT) >> DMP_COMP_NUM_MPORT_S;
++ nsp = (val & DMP_COMP_NUM_SPORT) >> DMP_COMP_NUM_SPORT_S;
++ nmw = (val & DMP_COMP_NUM_MWRAP) >> DMP_COMP_NUM_MWRAP_S;
++ nsw = (val & DMP_COMP_NUM_SWRAP) >> DMP_COMP_NUM_SWRAP_S;
++ rev = (val & DMP_COMP_REVISION) >> DMP_COMP_REVISION_S;
++
++ /* need core with ports */
++ if (nmw + nsw == 0)
++ continue;
++
++ /* try to obtain register address info */
++ err = brcmf_chip_dmp_get_regaddr(ci, &eromaddr, &base, &wrap);
++ if (err)
++ continue;
++
++ /* finally a core to be added */
++ core = brcmf_chip_add_core(ci, id, base, wrap);
++ if (IS_ERR(core))
++ return PTR_ERR(core);
++
++ core->rev = rev;
++ }
++
++ return 0;
++}
++
++static int brcmf_chip_recognition(struct brcmf_chip_priv *ci)
++{
++ struct brcmf_core *core;
++ u32 regdata;
++ u32 socitype;
++
++ /* Get CC core rev
++ * Chipid is assume to be at offset 0 from SI_ENUM_BASE
++ * For different chiptypes or old sdio hosts w/o chipcommon,
++ * other ways of recognition should be added here.
++ */
++ regdata = ci->ops->read32(ci->ctx, CORE_CC_REG(SI_ENUM_BASE, chipid));
++ ci->pub.chip = regdata & CID_ID_MASK;
++ ci->pub.chiprev = (regdata & CID_REV_MASK) >> CID_REV_SHIFT;
++ socitype = (regdata & CID_TYPE_MASK) >> CID_TYPE_SHIFT;
++
++ brcmf_chip_name(ci->pub.chip, ci->pub.name, sizeof(ci->pub.name));
++ brcmf_dbg(INFO, "found %s chip: BCM%s, rev=%d\n",
++ socitype == SOCI_SB ? "SB" : "AXI", ci->pub.name,
++ ci->pub.chiprev);
++
++ if (socitype == SOCI_SB) {
++ if (ci->pub.chip != BCM4329_CHIP_ID) {
++ brcmf_err("SB chip is not supported\n");
++ return -ENODEV;
++ }
++ ci->iscoreup = brcmf_chip_sb_iscoreup;
++ ci->coredisable = brcmf_chip_sb_coredisable;
++ ci->resetcore = brcmf_chip_sb_resetcore;
++
++ core = brcmf_chip_add_core(ci, BCMA_CORE_CHIPCOMMON,
++ SI_ENUM_BASE, 0);
++ brcmf_chip_sb_corerev(ci, core);
++ core = brcmf_chip_add_core(ci, BCMA_CORE_SDIO_DEV,
++ BCM4329_CORE_BUS_BASE, 0);
++ brcmf_chip_sb_corerev(ci, core);
++ core = brcmf_chip_add_core(ci, BCMA_CORE_INTERNAL_MEM,
++ BCM4329_CORE_SOCRAM_BASE, 0);
++ brcmf_chip_sb_corerev(ci, core);
++ core = brcmf_chip_add_core(ci, BCMA_CORE_ARM_CM3,
++ BCM4329_CORE_ARM_BASE, 0);
++ brcmf_chip_sb_corerev(ci, core);
++
++ core = brcmf_chip_add_core(ci, BCMA_CORE_80211, 0x18001000, 0);
++ brcmf_chip_sb_corerev(ci, core);
++ } else if (socitype == SOCI_AI) {
++ ci->iscoreup = brcmf_chip_ai_iscoreup;
++ ci->coredisable = brcmf_chip_ai_coredisable;
++ ci->resetcore = brcmf_chip_ai_resetcore;
++
++ brcmf_chip_dmp_erom_scan(ci);
++ } else {
++ brcmf_err("chip backplane type %u is not supported\n",
++ socitype);
++ return -ENODEV;
++ }
++
++ brcmf_chip_get_raminfo(ci);
++
++ return brcmf_chip_cores_check(ci);
++}
++
++static void brcmf_chip_disable_arm(struct brcmf_chip_priv *chip, u16 id)
++{
++ struct brcmf_core *core;
++ struct brcmf_core_priv *cr4;
++ u32 val;
++
++
++ core = brcmf_chip_get_core(&chip->pub, id);
++ if (!core)
++ return;
++
++ switch (id) {
++ case BCMA_CORE_ARM_CM3:
++ brcmf_chip_coredisable(core, 0, 0);
++ break;
++ case BCMA_CORE_ARM_CR4:
++ cr4 = container_of(core, struct brcmf_core_priv, pub);
++
++ /* clear all IOCTL bits except HALT bit */
++ val = chip->ops->read32(chip->ctx, cr4->wrapbase + BCMA_IOCTL);
++ val &= ARMCR4_BCMA_IOCTL_CPUHALT;
++ brcmf_chip_resetcore(core, val, ARMCR4_BCMA_IOCTL_CPUHALT,
++ ARMCR4_BCMA_IOCTL_CPUHALT);
++ break;
++ default:
++ brcmf_err("unknown id: %u\n", id);
++ break;
++ }
++}
++
++static int brcmf_chip_setup(struct brcmf_chip_priv *chip)
++{
++ struct brcmf_chip *pub;
++ struct brcmf_core_priv *cc;
++ u32 base;
++ u32 val;
++ int ret = 0;
++
++ pub = &chip->pub;
++ cc = list_first_entry(&chip->cores, struct brcmf_core_priv, list);
++ base = cc->pub.base;
++
++ /* get chipcommon capabilites */
++ pub->cc_caps = chip->ops->read32(chip->ctx,
++ CORE_CC_REG(base, capabilities));
++
++ /* get pmu caps & rev */
++ if (pub->cc_caps & CC_CAP_PMU) {
++ val = chip->ops->read32(chip->ctx,
++ CORE_CC_REG(base, pmucapabilities));
++ pub->pmurev = val & PCAP_REV_MASK;
++ pub->pmucaps = val;
++ }
++
++ brcmf_dbg(INFO, "ccrev=%d, pmurev=%d, pmucaps=0x%x\n",
++ cc->pub.rev, pub->pmurev, pub->pmucaps);
++
++ /* execute bus core specific setup */
++ if (chip->ops->setup)
++ ret = chip->ops->setup(chip->ctx, pub);
++
++ /*
++ * Make sure any on-chip ARM is off (in case strapping is wrong),
++ * or downloaded code was already running.
++ */
++ brcmf_chip_disable_arm(chip, BCMA_CORE_ARM_CM3);
++ brcmf_chip_disable_arm(chip, BCMA_CORE_ARM_CR4);
++ return ret;
++}
++
++struct brcmf_chip *brcmf_chip_attach(void *ctx,
++ const struct brcmf_buscore_ops *ops)
++{
++ struct brcmf_chip_priv *chip;
++ int err = 0;
++
++ if (WARN_ON(!ops->read32))
++ err = -EINVAL;
++ if (WARN_ON(!ops->write32))
++ err = -EINVAL;
++ if (WARN_ON(!ops->prepare))
++ err = -EINVAL;
++ if (WARN_ON(!ops->exit_dl))
++ err = -EINVAL;
++ if (err < 0)
++ return ERR_PTR(-EINVAL);
++
++ chip = kzalloc(sizeof(*chip), GFP_KERNEL);
++ if (!chip)
++ return ERR_PTR(-ENOMEM);
++
++ INIT_LIST_HEAD(&chip->cores);
++ chip->num_cores = 0;
++ chip->ops = ops;
++ chip->ctx = ctx;
++
++ err = ops->prepare(ctx);
++ if (err < 0)
++ goto fail;
++
++ err = brcmf_chip_recognition(chip);
++ if (err < 0)
++ goto fail;
++
++ err = brcmf_chip_setup(chip);
++ if (err < 0)
++ goto fail;
++
++ return &chip->pub;
++
++fail:
++ brcmf_chip_detach(&chip->pub);
++ return ERR_PTR(err);
++}
++
++void brcmf_chip_detach(struct brcmf_chip *pub)
++{
++ struct brcmf_chip_priv *chip;
++ struct brcmf_core_priv *core;
++ struct brcmf_core_priv *tmp;
++
++ chip = container_of(pub, struct brcmf_chip_priv, pub);
++ list_for_each_entry_safe(core, tmp, &chip->cores, list) {
++ list_del(&core->list);
++ kfree(core);
++ }
++ kfree(chip);
++}
++
++struct brcmf_core *brcmf_chip_get_core(struct brcmf_chip *pub, u16 coreid)
++{
++ struct brcmf_chip_priv *chip;
++ struct brcmf_core_priv *core;
++
++ chip = container_of(pub, struct brcmf_chip_priv, pub);
++ list_for_each_entry(core, &chip->cores, list)
++ if (core->pub.id == coreid)
++ return &core->pub;
++
++ return NULL;
++}
++
++struct brcmf_core *brcmf_chip_get_chipcommon(struct brcmf_chip *pub)
++{
++ struct brcmf_chip_priv *chip;
++ struct brcmf_core_priv *cc;
++
++ chip = container_of(pub, struct brcmf_chip_priv, pub);
++ cc = list_first_entry(&chip->cores, struct brcmf_core_priv, list);
++ if (WARN_ON(!cc || cc->pub.id != BCMA_CORE_CHIPCOMMON))
++ return brcmf_chip_get_core(pub, BCMA_CORE_CHIPCOMMON);
++ return &cc->pub;
++}
++
++bool brcmf_chip_iscoreup(struct brcmf_core *pub)
++{
++ struct brcmf_core_priv *core;
++
++ core = container_of(pub, struct brcmf_core_priv, pub);
++ return core->chip->iscoreup(core);
++}
++
++void brcmf_chip_coredisable(struct brcmf_core *pub, u32 prereset, u32 reset)
++{
++ struct brcmf_core_priv *core;
++
++ core = container_of(pub, struct brcmf_core_priv, pub);
++ core->chip->coredisable(core, prereset, reset);
++}
++
++void brcmf_chip_resetcore(struct brcmf_core *pub, u32 prereset, u32 reset,
++ u32 postreset)
++{
++ struct brcmf_core_priv *core;
++
++ core = container_of(pub, struct brcmf_core_priv, pub);
++ core->chip->resetcore(core, prereset, reset, postreset);
++}
++
++static void
++brcmf_chip_cm3_enterdl(struct brcmf_chip_priv *chip)
++{
++ struct brcmf_core *core;
++
++ brcmf_chip_disable_arm(chip, BCMA_CORE_ARM_CM3);
++ core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_80211);
++ brcmf_chip_resetcore(core, D11_BCMA_IOCTL_PHYRESET |
++ D11_BCMA_IOCTL_PHYCLOCKEN,
++ D11_BCMA_IOCTL_PHYCLOCKEN,
++ D11_BCMA_IOCTL_PHYCLOCKEN);
++ core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_INTERNAL_MEM);
++ brcmf_chip_resetcore(core, 0, 0, 0);
++}
++
++static bool brcmf_chip_cm3_exitdl(struct brcmf_chip_priv *chip)
++{
++ struct brcmf_core *core;
++
++ core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_INTERNAL_MEM);
++ if (!brcmf_chip_iscoreup(core)) {
++ brcmf_err("SOCRAM core is down after reset?\n");
++ return false;
++ }
++
++ chip->ops->exit_dl(chip->ctx, &chip->pub, 0);
++
++ core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_ARM_CM3);
++ brcmf_chip_resetcore(core, 0, 0, 0);
++
++ return true;
++}
++
++static inline void
++brcmf_chip_cr4_enterdl(struct brcmf_chip_priv *chip)
++{
++ struct brcmf_core *core;
++
++ brcmf_chip_disable_arm(chip, BCMA_CORE_ARM_CR4);
++
++ core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_80211);
++ brcmf_chip_resetcore(core, D11_BCMA_IOCTL_PHYRESET |
++ D11_BCMA_IOCTL_PHYCLOCKEN,
++ D11_BCMA_IOCTL_PHYCLOCKEN,
++ D11_BCMA_IOCTL_PHYCLOCKEN);
++}
++
++static bool brcmf_chip_cr4_exitdl(struct brcmf_chip_priv *chip, u32 rstvec)
++{
++ struct brcmf_core *core;
++
++ chip->ops->exit_dl(chip->ctx, &chip->pub, rstvec);
++
++ /* restore ARM */
++ core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_ARM_CR4);
++ brcmf_chip_resetcore(core, ARMCR4_BCMA_IOCTL_CPUHALT, 0, 0);
++
++ return true;
++}
++
++void brcmf_chip_enter_download(struct brcmf_chip *pub)
++{
++ struct brcmf_chip_priv *chip;
++ struct brcmf_core *arm;
++
++ brcmf_dbg(TRACE, "Enter\n");
++
++ chip = container_of(pub, struct brcmf_chip_priv, pub);
++ arm = brcmf_chip_get_core(pub, BCMA_CORE_ARM_CR4);
++ if (arm) {
++ brcmf_chip_cr4_enterdl(chip);
++ return;
++ }
++
++ brcmf_chip_cm3_enterdl(chip);
++}
++
++bool brcmf_chip_exit_download(struct brcmf_chip *pub, u32 rstvec)
++{
++ struct brcmf_chip_priv *chip;
++ struct brcmf_core *arm;
++
++ brcmf_dbg(TRACE, "Enter\n");
++
++ chip = container_of(pub, struct brcmf_chip_priv, pub);
++ arm = brcmf_chip_get_core(pub, BCMA_CORE_ARM_CR4);
++ if (arm)
++ return brcmf_chip_cr4_exitdl(chip, rstvec);
++
++ return brcmf_chip_cm3_exitdl(chip);
++}
++
++bool brcmf_chip_sr_capable(struct brcmf_chip *pub)
++{
++ u32 base, addr, reg, pmu_cc3_mask = ~0;
++ struct brcmf_chip_priv *chip;
++
++ brcmf_dbg(TRACE, "Enter\n");
++
++ /* old chips with PMU version less than 17 don't support save restore */
++ if (pub->pmurev < 17)
++ return false;
++
++ base = brcmf_chip_get_chipcommon(pub)->base;
++ chip = container_of(pub, struct brcmf_chip_priv, pub);
++
++ switch (pub->chip) {
++ case BCM4354_CHIP_ID:
++ /* explicitly check SR engine enable bit */
++ pmu_cc3_mask = BIT(2);
++ /* fall-through */
++ case BCM43241_CHIP_ID:
++ case BCM4335_CHIP_ID:
++ case BCM4339_CHIP_ID:
++ /* read PMU chipcontrol register 3 */
++ addr = CORE_CC_REG(base, chipcontrol_addr);
++ chip->ops->write32(chip->ctx, addr, 3);
++ addr = CORE_CC_REG(base, chipcontrol_data);
++ reg = chip->ops->read32(chip->ctx, addr);
++ return (reg & pmu_cc3_mask) != 0;
++ default:
++ addr = CORE_CC_REG(base, pmucapabilities_ext);
++ reg = chip->ops->read32(chip->ctx, addr);
++ if ((reg & PCAPEXT_SR_SUPPORTED_MASK) == 0)
++ return false;
++
++ addr = CORE_CC_REG(base, retention_ctl);
++ reg = chip->ops->read32(chip->ctx, addr);
++ return (reg & (PMU_RCTL_MACPHY_DISABLE_MASK |
++ PMU_RCTL_LOGIC_DISABLE_MASK)) == 0;
++ }
++}
+diff -Nur linux-3.14.36/drivers/net/wireless/brcm80211/brcmfmac/chip.h linux-openelec/drivers/net/wireless/brcm80211/brcmfmac/chip.h
+--- linux-3.14.36/drivers/net/wireless/brcm80211/brcmfmac/chip.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/net/wireless/brcm80211/brcmfmac/chip.h 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,91 @@
++/*
++ * Copyright (c) 2014 Broadcom Corporation
++ *
++ * Permission to use, copy, modify, and/or distribute this software for any
++ * purpose with or without fee is hereby granted, provided that the above
++ * copyright notice and this permission notice appear in all copies.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
++ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
++ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
++ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
++ */
++#ifndef BRCMF_CHIP_H
++#define BRCMF_CHIP_H
++
++#include <linux/types.h>
++
++#define CORE_CC_REG(base, field) \
++ (base + offsetof(struct chipcregs, field))
++
++/**
++ * struct brcmf_chip - chip level information.
++ *
++ * @chip: chip identifier.
++ * @chiprev: chip revision.
++ * @cc_caps: chipcommon core capabilities.
++ * @pmucaps: PMU capabilities.
++ * @pmurev: PMU revision.
++ * @rambase: RAM base address (only applicable for ARM CR4 chips).
++ * @ramsize: amount of RAM on chip.
++ * @name: string representation of the chip identifier.
++ */
++struct brcmf_chip {
++ u32 chip;
++ u32 chiprev;
++ u32 cc_caps;
++ u32 pmucaps;
++ u32 pmurev;
++ u32 rambase;
++ u32 ramsize;
++ char name[8];
++};
++
++/**
++ * struct brcmf_core - core related information.
++ *
++ * @id: core identifier.
++ * @rev: core revision.
++ * @base: base address of core register space.
++ */
++struct brcmf_core {
++ u16 id;
++ u16 rev;
++ u32 base;
++};
++
++/**
++ * struct brcmf_buscore_ops - buscore specific callbacks.
++ *
++ * @read32: read 32-bit value over bus.
++ * @write32: write 32-bit value over bus.
++ * @prepare: prepare bus for core configuration.
++ * @setup: bus-specific core setup.
++ * @exit_dl: exit download state.
++ * The callback should use the provided @rstvec when non-zero.
++ */
++struct brcmf_buscore_ops {
++ u32 (*read32)(void *ctx, u32 addr);
++ void (*write32)(void *ctx, u32 addr, u32 value);
++ int (*prepare)(void *ctx);
++ int (*setup)(void *ctx, struct brcmf_chip *chip);
++ void (*exit_dl)(void *ctx, struct brcmf_chip *chip, u32 rstvec);
++};
++
++struct brcmf_chip *brcmf_chip_attach(void *ctx,
++ const struct brcmf_buscore_ops *ops);
++void brcmf_chip_detach(struct brcmf_chip *chip);
++struct brcmf_core *brcmf_chip_get_core(struct brcmf_chip *chip, u16 coreid);
++struct brcmf_core *brcmf_chip_get_chipcommon(struct brcmf_chip *chip);
++bool brcmf_chip_iscoreup(struct brcmf_core *core);
++void brcmf_chip_coredisable(struct brcmf_core *core, u32 prereset, u32 reset);
++void brcmf_chip_resetcore(struct brcmf_core *core, u32 prereset, u32 reset,
++ u32 postreset);
++void brcmf_chip_enter_download(struct brcmf_chip *ci);
++bool brcmf_chip_exit_download(struct brcmf_chip *ci, u32 rstvec);
++bool brcmf_chip_sr_capable(struct brcmf_chip *pub);
++
++#endif /* BRCMF_AXIDMP_H */
+diff -Nur linux-3.14.36/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h linux-openelec/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
+--- linux-3.14.36/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h 2015-05-06 12:05:42.000000000 -0500
+@@ -63,7 +63,6 @@
+ */
+ struct brcmf_bus_ops {
+ int (*preinit)(struct device *dev);
+- int (*init)(struct device *dev);
+ void (*stop)(struct device *dev);
+ int (*txdata)(struct device *dev, struct sk_buff *skb);
+ int (*txctl)(struct device *dev, unsigned char *msg, uint len);
+@@ -99,6 +98,7 @@
+ unsigned long tx_realloc;
+ u32 chip;
+ u32 chiprev;
++ bool always_use_fws_queue;
+
+ struct brcmf_bus_ops *ops;
+ };
+@@ -113,11 +113,6 @@
+ return bus->ops->preinit(bus->dev);
+ }
+
+-static inline int brcmf_bus_init(struct brcmf_bus *bus)
+-{
+- return bus->ops->init(bus->dev);
+-}
+-
+ static inline void brcmf_bus_stop(struct brcmf_bus *bus)
+ {
+ bus->ops->stop(bus->dev);
+diff -Nur linux-3.14.36/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c linux-openelec/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
+--- linux-3.14.36/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c 2015-05-06 12:05:42.000000000 -0500
+@@ -32,6 +32,9 @@
+ #define BRCMF_DEFAULT_SCAN_UNASSOC_TIME 40
+ #define BRCMF_DEFAULT_PACKET_FILTER "100 0 0 0 0x01 0x00"
+
++/* boost value for RSSI_DELTA in preferred join selection */
++#define BRCMF_JOIN_PREF_RSSI_BOOST 8
++
+
+ bool brcmf_c_prec_enq(struct device *dev, struct pktq *q,
+ struct sk_buff *pkt, int prec)
+@@ -246,6 +249,7 @@
+ {
+ s8 eventmask[BRCMF_EVENTING_MASK_LEN];
+ u8 buf[BRCMF_DCMD_SMLEN];
++ struct brcmf_join_pref_params join_pref_params[2];
+ char *ptr;
+ s32 err;
+
+@@ -298,6 +302,20 @@
+ goto done;
+ }
+
++ /* Setup join_pref to select target by RSSI(with boost on 5GHz) */
++ join_pref_params[0].type = BRCMF_JOIN_PREF_RSSI_DELTA;
++ join_pref_params[0].len = 2;
++ join_pref_params[0].rssi_gain = BRCMF_JOIN_PREF_RSSI_BOOST;
++ join_pref_params[0].band = WLC_BAND_5G;
++ join_pref_params[1].type = BRCMF_JOIN_PREF_RSSI;
++ join_pref_params[1].len = 2;
++ join_pref_params[1].rssi_gain = 0;
++ join_pref_params[1].band = 0;
++ err = brcmf_fil_iovar_data_set(ifp, "join_pref", join_pref_params,
++ sizeof(join_pref_params));
++ if (err)
++ brcmf_err("Set join_pref error (%d)\n", err);
++
+ /* Setup event_msgs, enable E_IF */
+ err = brcmf_fil_iovar_data_get(ifp, "event_msgs", eventmask,
+ BRCMF_EVENTING_MASK_LEN);
+diff -Nur linux-3.14.36/drivers/net/wireless/brcm80211/brcmfmac/dhd.h linux-openelec/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
+--- linux-3.14.36/drivers/net/wireless/brcm80211/brcmfmac/dhd.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/net/wireless/brcm80211/brcmfmac/dhd.h 2015-05-06 12:05:42.000000000 -0500
+@@ -186,7 +186,7 @@
+ void brcmf_txflowblock_if(struct brcmf_if *ifp,
+ enum brcmf_netif_stop_reason reason, bool state);
+ u32 brcmf_get_chip_info(struct brcmf_if *ifp);
+-void brcmf_txfinalize(struct brcmf_pub *drvr, struct sk_buff *txp,
++void brcmf_txfinalize(struct brcmf_pub *drvr, struct sk_buff *txp, u8 ifidx,
+ bool success);
+
+ /* Sets dongle media info (drv_version, mac address). */
+diff -Nur linux-3.14.36/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c linux-openelec/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
+--- linux-3.14.36/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c 2015-05-06 12:05:42.000000000 -0500
+@@ -190,7 +190,7 @@
+ int ret;
+ struct brcmf_if *ifp = netdev_priv(ndev);
+ struct brcmf_pub *drvr = ifp->drvr;
+- struct ethhdr *eh;
++ struct ethhdr *eh = (struct ethhdr *)(skb->data);
+
+ brcmf_dbg(DATA, "Enter, idx=%d\n", ifp->bssidx);
+
+@@ -236,6 +236,9 @@
+ goto done;
+ }
+
++ if (eh->h_proto == htons(ETH_P_PAE))
++ atomic_inc(&ifp->pend_8021x_cnt);
++
+ ret = brcmf_fws_process_skb(ifp, skb);
+
+ done:
+@@ -511,7 +514,7 @@
+
+ void brcmf_rx_frame(struct device *dev, struct sk_buff *skb)
+ {
+- struct brcmf_if *ifp;
++ struct brcmf_if *ifp = NULL;
+ struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+ struct brcmf_pub *drvr = bus_if->drvr;
+ struct brcmf_skb_reorder_data *rd;
+@@ -522,7 +525,7 @@
+
+ /* process and remove protocol-specific header */
+ ret = brcmf_proto_hdrpull(drvr, true, &ifidx, skb);
+- ifp = drvr->iflist[ifidx];
++ if (!ret) ifp = drvr->iflist[ifidx];
+
+ if (ret || !ifp || !ifp->ndev) {
+ if ((ret != -ENODATA) && ifp)
+@@ -538,31 +541,26 @@
+ brcmf_netif_rx(ifp, skb);
+ }
+
+-void brcmf_txfinalize(struct brcmf_pub *drvr, struct sk_buff *txp,
++void brcmf_txfinalize(struct brcmf_pub *drvr, struct sk_buff *txp, u8 ifidx,
+ bool success)
+ {
+ struct brcmf_if *ifp;
+ struct ethhdr *eh;
+- u8 ifidx;
+ u16 type;
+- int res;
+-
+- res = brcmf_proto_hdrpull(drvr, false, &ifidx, txp);
+
+ ifp = drvr->iflist[ifidx];
+ if (!ifp)
+ goto done;
+
+- if (res == 0) {
+- eh = (struct ethhdr *)(txp->data);
+- type = ntohs(eh->h_proto);
+-
+- if (type == ETH_P_PAE) {
+- atomic_dec(&ifp->pend_8021x_cnt);
+- if (waitqueue_active(&ifp->pend_8021x_wait))
+- wake_up(&ifp->pend_8021x_wait);
+- }
++ eh = (struct ethhdr *)(txp->data);
++ type = ntohs(eh->h_proto);
++
++ if (type == ETH_P_PAE) {
++ atomic_dec(&ifp->pend_8021x_cnt);
++ if (waitqueue_active(&ifp->pend_8021x_wait))
++ wake_up(&ifp->pend_8021x_wait);
+ }
++
+ if (!success)
+ ifp->stats.tx_errors++;
+ done:
+@@ -573,13 +571,17 @@
+ {
+ struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+ struct brcmf_pub *drvr = bus_if->drvr;
++ u8 ifidx;
+
+ /* await txstatus signal for firmware if active */
+ if (brcmf_fws_fc_active(drvr->fws)) {
+ if (!success)
+ brcmf_fws_bustxfail(drvr->fws, txp);
+ } else {
+- brcmf_txfinalize(drvr, txp, success);
++ if (brcmf_proto_hdrpull(drvr, false, &ifidx, txp))
++ brcmu_pkt_buf_free_skb(txp);
++ else
++ brcmf_txfinalize(drvr, txp, ifidx, success);
+ }
+ }
+
+@@ -914,13 +916,6 @@
+
+ brcmf_dbg(TRACE, "\n");
+
+- /* Bring up the bus */
+- ret = brcmf_bus_init(bus_if);
+- if (ret != 0) {
+- brcmf_err("brcmf_sdbrcm_bus_init failed %d\n", ret);
+- return ret;
+- }
+-
+ /* add primary networking interface */
+ ifp = brcmf_add_if(drvr, 0, 0, "wlan%d", NULL);
+ if (IS_ERR(ifp))
+@@ -1040,12 +1035,12 @@
+
+ brcmf_cfg80211_detach(drvr->config);
+
++ brcmf_fws_deinit(drvr);
++
+ brcmf_bus_detach(drvr);
+
+ brcmf_proto_detach(drvr);
+
+- brcmf_fws_deinit(drvr);
+-
+ brcmf_debugfs_detach(drvr);
+ bus_if->drvr = NULL;
+ kfree(drvr);
+diff -Nur linux-3.14.36/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c linux-openelec/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
+--- linux-3.14.36/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c 2015-05-06 12:05:42.000000000 -0500
+@@ -23,6 +23,7 @@
+ #include <linux/interrupt.h>
+ #include <linux/sched.h>
+ #include <linux/mmc/sdio.h>
++#include <linux/mmc/sdio_ids.h>
+ #include <linux/mmc/sdio_func.h>
+ #include <linux/mmc/card.h>
+ #include <linux/semaphore.h>
+@@ -40,8 +41,8 @@
+ #include <brcm_hw_ids.h>
+ #include <soc.h>
+ #include "sdio_host.h"
+-#include "sdio_chip.h"
+-#include "nvram.h"
++#include "chip.h"
++#include "firmware.h"
+
+ #define DCMD_RESP_TIMEOUT 2000 /* In milli second */
+
+@@ -112,8 +113,6 @@
+ #define BRCMF_TXBOUND 20 /* Default for max tx frames in
+ one scheduling */
+
+-#define BRCMF_DEFAULT_TXGLOM_SIZE 32 /* max tx frames in glom chain */
+-
+ #define BRCMF_TXMINMAX 1 /* Max tx frames if rx still pending */
+
+ #define MEMBLOCK 2048 /* Block size used for downloading
+@@ -156,6 +155,34 @@
+ /* manfid tuple length, include tuple, link bytes */
+ #define SBSDIO_CIS_MANFID_TUPLE_LEN 6
+
++#define CORE_BUS_REG(base, field) \
++ (base + offsetof(struct sdpcmd_regs, field))
++
++/* SDIO function 1 register CHIPCLKCSR */
++/* Force ALP request to backplane */
++#define SBSDIO_FORCE_ALP 0x01
++/* Force HT request to backplane */
++#define SBSDIO_FORCE_HT 0x02
++/* Force ILP request to backplane */
++#define SBSDIO_FORCE_ILP 0x04
++/* Make ALP ready (power up xtal) */
++#define SBSDIO_ALP_AVAIL_REQ 0x08
++/* Make HT ready (power up PLL) */
++#define SBSDIO_HT_AVAIL_REQ 0x10
++/* Squelch clock requests from HW */
++#define SBSDIO_FORCE_HW_CLKREQ_OFF 0x20
++/* Status: ALP is ready */
++#define SBSDIO_ALP_AVAIL 0x40
++/* Status: HT is ready */
++#define SBSDIO_HT_AVAIL 0x80
++#define SBSDIO_CSR_MASK 0x1F
++#define SBSDIO_AVBITS (SBSDIO_HT_AVAIL | SBSDIO_ALP_AVAIL)
++#define SBSDIO_ALPAV(regval) ((regval) & SBSDIO_AVBITS)
++#define SBSDIO_HTAV(regval) (((regval) & SBSDIO_AVBITS) == SBSDIO_AVBITS)
++#define SBSDIO_ALPONLY(regval) (SBSDIO_ALPAV(regval) && !SBSDIO_HTAV(regval))
++#define SBSDIO_CLKAV(regval, alponly) \
++ (SBSDIO_ALPAV(regval) && (alponly ? 1 : SBSDIO_HTAV(regval)))
++
+ /* intstatus */
+ #define I_SMB_SW0 (1 << 0) /* To SB Mail S/W interrupt 0 */
+ #define I_SMB_SW1 (1 << 1) /* To SB Mail S/W interrupt 1 */
+@@ -276,7 +303,6 @@
+ /* Flags for SDH calls */
+ #define F2SYNC (SDIO_REQ_4BYTE | SDIO_REQ_FIXED)
+
+-#define BRCMF_IDLE_IMMEDIATE (-1) /* Enter idle immediately */
+ #define BRCMF_IDLE_ACTIVE 0 /* Do not request any SD clock change
+ * when idle
+ */
+@@ -433,10 +459,11 @@
+ bool alp_only; /* Don't use HT clock (ALP only) */
+
+ u8 *ctrl_frame_buf;
+- u32 ctrl_frame_len;
++ u16 ctrl_frame_len;
+ bool ctrl_frame_stat;
+
+- spinlock_t txqlock;
++ spinlock_t txq_lock; /* protect bus->txq */
++ struct semaphore tx_seq_lock; /* protect bus->tx_seq */
+ wait_queue_head_t ctrl_wait;
+ wait_queue_head_t dcmd_resp_wait;
+
+@@ -483,16 +510,58 @@
+
+ #define ALIGNMENT 4
+
+-static int brcmf_sdio_txglomsz = BRCMF_DEFAULT_TXGLOM_SIZE;
+-module_param_named(txglomsz, brcmf_sdio_txglomsz, int, 0);
+-MODULE_PARM_DESC(txglomsz, "maximum tx packet chain size [SDIO]");
+-
+ enum brcmf_sdio_frmtype {
+ BRCMF_SDIO_FT_NORMAL,
+ BRCMF_SDIO_FT_SUPER,
+ BRCMF_SDIO_FT_SUB,
+ };
+
++#define SDIOD_DRVSTR_KEY(chip, pmu) (((chip) << 16) | (pmu))
++
++/* SDIO Pad drive strength to select value mappings */
++struct sdiod_drive_str {
++ u8 strength; /* Pad Drive Strength in mA */
++ u8 sel; /* Chip-specific select value */
++};
++
++/* SDIO Drive Strength to sel value table for PMU Rev 11 (1.8V) */
++static const struct sdiod_drive_str sdiod_drvstr_tab1_1v8[] = {
++ {32, 0x6},
++ {26, 0x7},
++ {22, 0x4},
++ {16, 0x5},
++ {12, 0x2},
++ {8, 0x3},
++ {4, 0x0},
++ {0, 0x1}
++};
++
++/* SDIO Drive Strength to sel value table for PMU Rev 13 (1.8v) */
++static const struct sdiod_drive_str sdiod_drive_strength_tab5_1v8[] = {
++ {6, 0x7},
++ {5, 0x6},
++ {4, 0x5},
++ {3, 0x4},
++ {2, 0x2},
++ {1, 0x1},
++ {0, 0x0}
++};
++
++/* SDIO Drive Strength to sel value table for PMU Rev 17 (1.8v) */
++static const struct sdiod_drive_str sdiod_drvstr_tab6_1v8[] = {
++ {3, 0x3},
++ {2, 0x2},
++ {1, 0x1},
++ {0, 0x0} };
++
++/* SDIO Drive Strength to sel value table for 43143 PMU Rev 17 (3.3V) */
++static const struct sdiod_drive_str sdiod_drvstr_tab2_3v3[] = {
++ {16, 0x7},
++ {12, 0x5},
++ {8, 0x3},
++ {4, 0x1}
++};
++
+ #define BCM43143_FIRMWARE_NAME "brcm/brcmfmac43143-sdio.bin"
+ #define BCM43143_NVRAM_NAME "brcm/brcmfmac43143-sdio.txt"
+ #define BCM43241B0_FIRMWARE_NAME "brcm/brcmfmac43241b0-sdio.bin"
+@@ -511,6 +580,8 @@
+ #define BCM43362_NVRAM_NAME "brcm/brcmfmac43362-sdio.txt"
+ #define BCM4339_FIRMWARE_NAME "brcm/brcmfmac4339-sdio.bin"
+ #define BCM4339_NVRAM_NAME "brcm/brcmfmac4339-sdio.txt"
++#define BCM4354_FIRMWARE_NAME "brcm/brcmfmac4354-sdio.bin"
++#define BCM4354_NVRAM_NAME "brcm/brcmfmac4354-sdio.txt"
+
+ MODULE_FIRMWARE(BCM43143_FIRMWARE_NAME);
+ MODULE_FIRMWARE(BCM43143_NVRAM_NAME);
+@@ -530,6 +601,8 @@
+ MODULE_FIRMWARE(BCM43362_NVRAM_NAME);
+ MODULE_FIRMWARE(BCM4339_FIRMWARE_NAME);
+ MODULE_FIRMWARE(BCM4339_NVRAM_NAME);
++MODULE_FIRMWARE(BCM4354_FIRMWARE_NAME);
++MODULE_FIRMWARE(BCM4354_NVRAM_NAME);
+
+ struct brcmf_firmware_names {
+ u32 chipid;
+@@ -555,46 +628,32 @@
+ { BCM4334_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4334) },
+ { BCM4335_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4335) },
+ { BCM43362_CHIP_ID, 0xFFFFFFFE, BRCMF_FIRMWARE_NVRAM(BCM43362) },
+- { BCM4339_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4339) }
++ { BCM4339_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4339) },
++ { BCM4354_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4354) }
+ };
+
+-
+-static const struct firmware *brcmf_sdio_get_fw(struct brcmf_sdio *bus,
+- enum brcmf_firmware_type type)
++static const char *brcmf_sdio_get_fwname(struct brcmf_chip *ci,
++ enum brcmf_firmware_type type)
+ {
+- const struct firmware *fw;
+- const char *name;
+- int err, i;
++ int i;
+
+ for (i = 0; i < ARRAY_SIZE(brcmf_fwname_data); i++) {
+- if (brcmf_fwname_data[i].chipid == bus->ci->chip &&
+- brcmf_fwname_data[i].revmsk & BIT(bus->ci->chiprev)) {
++ if (brcmf_fwname_data[i].chipid == ci->chip &&
++ brcmf_fwname_data[i].revmsk & BIT(ci->chiprev)) {
+ switch (type) {
+ case BRCMF_FIRMWARE_BIN:
+- name = brcmf_fwname_data[i].bin;
+- break;
++ return brcmf_fwname_data[i].bin;
+ case BRCMF_FIRMWARE_NVRAM:
+- name = brcmf_fwname_data[i].nv;
+- break;
++ return brcmf_fwname_data[i].nv;
+ default:
+ brcmf_err("invalid firmware type (%d)\n", type);
+ return NULL;
+ }
+- goto found;
+ }
+ }
+ brcmf_err("Unknown chipid %d [%d]\n",
+- bus->ci->chip, bus->ci->chiprev);
++ ci->chip, ci->chiprev);
+ return NULL;
+-
+-found:
+- err = request_firmware(&fw, name, &bus->sdiodev->func[2]->dev);
+- if ((err) || (!fw)) {
+- brcmf_err("fail to request firmware %s (%d)\n", name, err);
+- return NULL;
+- }
+-
+- return fw;
+ }
+
+ static void pkt_align(struct sk_buff *p, int len, int align)
+@@ -618,27 +677,24 @@
+ * Reads a register in the SDIO hardware block. This block occupies a series of
+ * adresses on the 32 bit backplane bus.
+ */
+-static int
+-r_sdreg32(struct brcmf_sdio *bus, u32 *regvar, u32 offset)
++static int r_sdreg32(struct brcmf_sdio *bus, u32 *regvar, u32 offset)
+ {
+- u8 idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV);
++ struct brcmf_core *core;
+ int ret;
+
+- *regvar = brcmf_sdiod_regrl(bus->sdiodev,
+- bus->ci->c_inf[idx].base + offset, &ret);
++ core = brcmf_chip_get_core(bus->ci, BCMA_CORE_SDIO_DEV);
++ *regvar = brcmf_sdiod_regrl(bus->sdiodev, core->base + offset, &ret);
+
+ return ret;
+ }
+
+-static int
+-w_sdreg32(struct brcmf_sdio *bus, u32 regval, u32 reg_offset)
++static int w_sdreg32(struct brcmf_sdio *bus, u32 regval, u32 reg_offset)
+ {
+- u8 idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV);
++ struct brcmf_core *core;
+ int ret;
+
+- brcmf_sdiod_regwl(bus->sdiodev,
+- bus->ci->c_inf[idx].base + reg_offset,
+- regval, &ret);
++ core = brcmf_chip_get_core(bus->ci, BCMA_CORE_SDIO_DEV);
++ brcmf_sdiod_regwl(bus->sdiodev, core->base + reg_offset, regval, &ret);
+
+ return ret;
+ }
+@@ -650,16 +706,12 @@
+ int err = 0;
+ int try_cnt = 0;
+
+- brcmf_dbg(TRACE, "Enter\n");
++ brcmf_dbg(TRACE, "Enter: on=%d\n", on);
+
+ wr_val = (on << SBSDIO_FUNC1_SLEEPCSR_KSO_SHIFT);
+ /* 1st KSO write goes to AOS wake up core if device is asleep */
+ brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
+ wr_val, &err);
+- if (err) {
+- brcmf_err("SDIO_AOS KSO write error: %d\n", err);
+- return err;
+- }
+
+ if (on) {
+ /* device WAKEUP through KSO:
+@@ -689,18 +741,22 @@
+ &err);
+ if (((rd_val & bmask) == cmp_val) && !err)
+ break;
+- brcmf_dbg(SDIO, "KSO wr/rd retry:%d (max: %d) ERR:%x\n",
+- try_cnt, MAX_KSO_ATTEMPTS, err);
++
+ udelay(KSO_WAIT_US);
+ brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
+ wr_val, &err);
+ } while (try_cnt++ < MAX_KSO_ATTEMPTS);
+
++ if (try_cnt > 2)
++ brcmf_dbg(SDIO, "try_cnt=%d rd_val=0x%x err=%d\n", try_cnt,
++ rd_val, err);
++
++ if (try_cnt > MAX_KSO_ATTEMPTS)
++ brcmf_err("max tries: rd_val=0x%x err=%d\n", rd_val, err);
++
+ return err;
+ }
+
+-#define PKT_AVAILABLE() (intstatus & I_HMB_FRAME_IND)
+-
+ #define HOSTINTMASK (I_HMB_SW_MASK | I_CHIPACTIVE)
+
+ /* Turn backplane clock on or off */
+@@ -799,7 +855,6 @@
+ }
+ #endif /* defined (DEBUG) */
+
+- bus->activity = true;
+ } else {
+ clkreq = 0;
+
+@@ -899,8 +954,9 @@
+ brcmf_sdio_bus_sleep(struct brcmf_sdio *bus, bool sleep, bool pendok)
+ {
+ int err = 0;
+- brcmf_dbg(TRACE, "Enter\n");
+- brcmf_dbg(SDIO, "request %s currently %s\n",
++ u8 clkcsr;
++
++ brcmf_dbg(SDIO, "Enter: request %s currently %s\n",
+ (sleep ? "SLEEP" : "WAKE"),
+ (bus->sleeping ? "SLEEP" : "WAKE"));
+
+@@ -917,8 +973,20 @@
+ atomic_read(&bus->ipend) > 0 ||
+ (!atomic_read(&bus->fcstate) &&
+ brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol) &&
+- data_ok(bus)))
+- return -EBUSY;
++ data_ok(bus))) {
++ err = -EBUSY;
++ goto done;
++ }
++
++ clkcsr = brcmf_sdiod_regrb(bus->sdiodev,
++ SBSDIO_FUNC1_CHIPCLKCSR,
++ &err);
++ if ((clkcsr & SBSDIO_CSR_MASK) == 0) {
++ brcmf_dbg(SDIO, "no clock, set ALP\n");
++ brcmf_sdiod_regwb(bus->sdiodev,
++ SBSDIO_FUNC1_CHIPCLKCSR,
++ SBSDIO_ALP_AVAIL_REQ, &err);
++ }
+ err = brcmf_sdio_kso_control(bus, false);
+ /* disable watchdog */
+ if (!err)
+@@ -935,7 +1003,7 @@
+ } else {
+ brcmf_err("error while changing bus sleep state %d\n",
+ err);
+- return err;
++ goto done;
+ }
+ }
+
+@@ -947,11 +1015,92 @@
+ } else {
+ brcmf_sdio_clkctl(bus, CLK_AVAIL, pendok);
+ }
+-
++done:
++ brcmf_dbg(SDIO, "Exit: err=%d\n", err);
+ return err;
+
+ }
+
++#ifdef DEBUG
++static inline bool brcmf_sdio_valid_shared_address(u32 addr)
++{
++ return !(addr == 0 || ((~addr >> 16) & 0xffff) == (addr & 0xffff));
++}
++
++static int brcmf_sdio_readshared(struct brcmf_sdio *bus,
++ struct sdpcm_shared *sh)
++{
++ u32 addr;
++ int rv;
++ u32 shaddr = 0;
++ struct sdpcm_shared_le sh_le;
++ __le32 addr_le;
++
++ shaddr = bus->ci->rambase + bus->ramsize - 4;
++
++ /*
++ * Read last word in socram to determine
++ * address of sdpcm_shared structure
++ */
++ sdio_claim_host(bus->sdiodev->func[1]);
++ brcmf_sdio_bus_sleep(bus, false, false);
++ rv = brcmf_sdiod_ramrw(bus->sdiodev, false, shaddr, (u8 *)&addr_le, 4);
++ sdio_release_host(bus->sdiodev->func[1]);
++ if (rv < 0)
++ return rv;
++
++ addr = le32_to_cpu(addr_le);
++
++ brcmf_dbg(SDIO, "sdpcm_shared address 0x%08X\n", addr);
++
++ /*
++ * Check if addr is valid.
++ * NVRAM length at the end of memory should have been overwritten.
++ */
++ if (!brcmf_sdio_valid_shared_address(addr)) {
++ brcmf_err("invalid sdpcm_shared address 0x%08X\n",
++ addr);
++ return -EINVAL;
++ }
++
++ /* Read hndrte_shared structure */
++ rv = brcmf_sdiod_ramrw(bus->sdiodev, false, addr, (u8 *)&sh_le,
++ sizeof(struct sdpcm_shared_le));
++ if (rv < 0)
++ return rv;
++
++ /* Endianness */
++ sh->flags = le32_to_cpu(sh_le.flags);
++ sh->trap_addr = le32_to_cpu(sh_le.trap_addr);
++ sh->assert_exp_addr = le32_to_cpu(sh_le.assert_exp_addr);
++ sh->assert_file_addr = le32_to_cpu(sh_le.assert_file_addr);
++ sh->assert_line = le32_to_cpu(sh_le.assert_line);
++ sh->console_addr = le32_to_cpu(sh_le.console_addr);
++ sh->msgtrace_addr = le32_to_cpu(sh_le.msgtrace_addr);
++
++ if ((sh->flags & SDPCM_SHARED_VERSION_MASK) > SDPCM_SHARED_VERSION) {
++ brcmf_err("sdpcm shared version unsupported: dhd %d dongle %d\n",
++ SDPCM_SHARED_VERSION,
++ sh->flags & SDPCM_SHARED_VERSION_MASK);
++ return -EPROTO;
++ }
++
++ return 0;
++}
++
++static void brcmf_sdio_get_console_addr(struct brcmf_sdio *bus)
++{
++ struct sdpcm_shared sh;
++
++ if (brcmf_sdio_readshared(bus, &sh) == 0)
++ bus->console_addr = sh.console_addr;
++}
++#else
++static void brcmf_sdio_get_console_addr(struct brcmf_sdio *bus)
++{
++}
++#endif /* DEBUG */
++
+ static u32 brcmf_sdio_hostmail(struct brcmf_sdio *bus)
+ {
+ u32 intstatus = 0;
+@@ -995,6 +1144,12 @@
+ else
+ brcmf_dbg(SDIO, "Dongle ready, protocol version %d\n",
+ bus->sdpcm_ver);
++
++ /*
++ * Retrieve console state address now that firmware should have
++ * updated it.
++ */
++ brcmf_sdio_get_console_addr(bus);
+ }
+
+ /*
+@@ -1083,6 +1238,28 @@
+ bus->cur_read.len = 0;
+ }
+
++static void brcmf_sdio_txfail(struct brcmf_sdio *bus)
++{
++ struct brcmf_sdio_dev *sdiodev = bus->sdiodev;
++ u8 i, hi, lo;
++
++ /* On failure, abort the command and terminate the frame */
++ brcmf_err("sdio error, abort command and terminate frame\n");
++ bus->sdcnt.tx_sderrs++;
++
++ brcmf_sdiod_abort(sdiodev, SDIO_FUNC_2);
++ brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_FRAMECTRL, SFC_WF_TERM, NULL);
++ bus->sdcnt.f1regdata++;
++
++ for (i = 0; i < 3; i++) {
++ hi = brcmf_sdiod_regrb(sdiodev, SBSDIO_FUNC1_WFRAMEBCHI, NULL);
++ lo = brcmf_sdiod_regrb(sdiodev, SBSDIO_FUNC1_WFRAMEBCLO, NULL);
++ bus->sdcnt.f1regdata += 2;
++ if ((hi == 0) && (lo == 0))
++ break;
++ }
++}
++
+ /* return total length of buffer chain */
+ static uint brcmf_sdio_glom_len(struct brcmf_sdio *bus)
+ {
+@@ -1955,7 +2132,7 @@
+ memcpy(pkt_pad->data,
+ pkt->data + pkt->len - tail_chop,
+ tail_chop);
+- *(u32 *)(pkt_pad->cb) = ALIGN_SKB_FLAG + tail_chop;
++ *(u16 *)(pkt_pad->cb) = ALIGN_SKB_FLAG + tail_chop;
+ skb_trim(pkt, pkt->len - tail_chop);
+ skb_trim(pkt_pad, tail_pad + tail_chop);
+ __skb_queue_after(pktq, pkt, pkt_pad);
+@@ -2003,7 +2180,7 @@
+ * already properly aligned and does not
+ * need an sdpcm header.
+ */
+- if (*(u32 *)(pkt_next->cb) & ALIGN_SKB_FLAG)
++ if (*(u16 *)(pkt_next->cb) & ALIGN_SKB_FLAG)
+ continue;
+
+ /* align packet data pointer */
+@@ -2037,10 +2214,10 @@
+ if (BRCMF_BYTES_ON() &&
+ ((BRCMF_CTL_ON() && chan == SDPCM_CONTROL_CHANNEL) ||
+ (BRCMF_DATA_ON() && chan != SDPCM_CONTROL_CHANNEL)))
+- brcmf_dbg_hex_dump(true, pkt_next, hd_info.len,
++ brcmf_dbg_hex_dump(true, pkt_next->data, hd_info.len,
+ "Tx Frame:\n");
+ else if (BRCMF_HDRS_ON())
+- brcmf_dbg_hex_dump(true, pkt_next,
++ brcmf_dbg_hex_dump(true, pkt_next->data,
+ head_pad + bus->tx_hdrlen,
+ "Tx Header:\n");
+ }
+@@ -2067,11 +2244,11 @@
+ u8 *hdr;
+ u32 dat_offset;
+ u16 tail_pad;
+- u32 dummy_flags, chop_len;
++ u16 dummy_flags, chop_len;
+ struct sk_buff *pkt_next, *tmp, *pkt_prev;
+
+ skb_queue_walk_safe(pktq, pkt_next, tmp) {
+- dummy_flags = *(u32 *)(pkt_next->cb);
++ dummy_flags = *(u16 *)(pkt_next->cb);
+ if (dummy_flags & ALIGN_SKB_FLAG) {
+ chop_len = dummy_flags & ALIGN_SKB_CHOP_LEN_MASK;
+ if (chop_len) {
+@@ -2100,7 +2277,6 @@
+ uint chan)
+ {
+ int ret;
+- int i;
+ struct sk_buff *pkt_next, *tmp;
+
+ brcmf_dbg(TRACE, "Enter\n");
+@@ -2113,28 +2289,9 @@
+ ret = brcmf_sdiod_send_pkt(bus->sdiodev, pktq);
+ bus->sdcnt.f2txdata++;
+
+- if (ret < 0) {
+- /* On failure, abort the command and terminate the frame */
+- brcmf_dbg(INFO, "sdio error %d, abort command and terminate frame\n",
+- ret);
+- bus->sdcnt.tx_sderrs++;
++ if (ret < 0)
++ brcmf_sdio_txfail(bus);
+
+- brcmf_sdiod_abort(bus->sdiodev, SDIO_FUNC_2);
+- brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL,
+- SFC_WF_TERM, NULL);
+- bus->sdcnt.f1regdata++;
+-
+- for (i = 0; i < 3; i++) {
+- u8 hi, lo;
+- hi = brcmf_sdiod_regrb(bus->sdiodev,
+- SBSDIO_FUNC1_WFRAMEBCHI, NULL);
+- lo = brcmf_sdiod_regrb(bus->sdiodev,
+- SBSDIO_FUNC1_WFRAMEBCLO, NULL);
+- bus->sdcnt.f1regdata += 2;
+- if ((hi == 0) && (lo == 0))
+- break;
+- }
+- }
+ sdio_release_host(bus->sdiodev->func[1]);
+
+ done:
+@@ -2164,13 +2321,15 @@
+ /* Send frames until the limit or some other event */
+ for (cnt = 0; (cnt < maxframes) && data_ok(bus);) {
+ pkt_num = 1;
+- __skb_queue_head_init(&pktq);
++ if (down_interruptible(&bus->tx_seq_lock))
++ return cnt;
+ if (bus->txglom)
+ pkt_num = min_t(u8, bus->tx_max - bus->tx_seq,
+- brcmf_sdio_txglomsz);
++ bus->sdiodev->txglomsz);
+ pkt_num = min_t(u32, pkt_num,
+ brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol));
+- spin_lock_bh(&bus->txqlock);
++ __skb_queue_head_init(&pktq);
++ spin_lock_bh(&bus->txq_lock);
+ for (i = 0; i < pkt_num; i++) {
+ pkt = brcmu_pktq_mdeq(&bus->txq, tx_prec_map,
+ &prec_out);
+@@ -2178,15 +2337,19 @@
+ break;
+ __skb_queue_tail(&pktq, pkt);
+ }
+- spin_unlock_bh(&bus->txqlock);
+- if (i == 0)
++ spin_unlock_bh(&bus->txq_lock);
++ if (i == 0) {
++ up(&bus->tx_seq_lock);
+ break;
++ }
+
+ ret = brcmf_sdio_txpkt(bus, &pktq, SDPCM_DATA_CHANNEL);
++ up(&bus->tx_seq_lock);
++
+ cnt += i;
+
+ /* In poll mode, need to check for other events */
+- if (!bus->intr && cnt) {
++ if (!bus->intr) {
+ /* Check device status, signal pending interrupt */
+ sdio_claim_host(bus->sdiodev->func[1]);
+ ret = r_sdreg32(bus, &intstatus,
+@@ -2211,6 +2374,68 @@
+ return cnt;
+ }
+
++static int brcmf_sdio_tx_ctrlframe(struct brcmf_sdio *bus, u8 *frame, u16 len)
++{
++ u8 doff;
++ u16 pad;
++ uint retries = 0;
++ struct brcmf_sdio_hdrinfo hd_info = {0};
++ int ret;
++
++ brcmf_dbg(TRACE, "Enter\n");
++
++ /* Back the pointer to make room for bus header */
++ frame -= bus->tx_hdrlen;
++ len += bus->tx_hdrlen;
++
++ /* Add alignment padding (optional for ctl frames) */
++ doff = ((unsigned long)frame % bus->head_align);
++ if (doff) {
++ frame -= doff;
++ len += doff;
++ memset(frame + bus->tx_hdrlen, 0, doff);
++ }
++
++ /* Round send length to next SDIO block */
++ pad = 0;
++ if (bus->roundup && bus->blocksize && (len > bus->blocksize)) {
++ pad = bus->blocksize - (len % bus->blocksize);
++ if ((pad > bus->roundup) || (pad >= bus->blocksize))
++ pad = 0;
++ } else if (len % bus->head_align) {
++ pad = bus->head_align - (len % bus->head_align);
++ }
++ len += pad;
++
++ hd_info.len = len - pad;
++ hd_info.channel = SDPCM_CONTROL_CHANNEL;
++ hd_info.dat_offset = doff + bus->tx_hdrlen;
++ hd_info.seq_num = bus->tx_seq;
++ hd_info.lastfrm = true;
++ hd_info.tail_pad = pad;
++ brcmf_sdio_hdpack(bus, frame, &hd_info);
++
++ if (bus->txglom)
++ brcmf_sdio_update_hwhdr(frame, len);
++
++ brcmf_dbg_hex_dump(BRCMF_BYTES_ON() && BRCMF_CTL_ON(),
++ frame, len, "Tx Frame:\n");
++ brcmf_dbg_hex_dump(!(BRCMF_BYTES_ON() && BRCMF_CTL_ON()) &&
++ BRCMF_HDRS_ON(),
++ frame, min_t(u16, len, 16), "TxHdr:\n");
++
++ do {
++ ret = brcmf_sdiod_send_buf(bus->sdiodev, frame, len);
++
++ if (ret < 0)
++ brcmf_sdio_txfail(bus);
++ else
++ bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQ_WRAP;
++ } while (ret < 0 && retries++ < TXRETRIES);
++
++ return ret;
++}
++
+ static void brcmf_sdio_bus_stop(struct device *dev)
+ {
+ u32 local_hostintmask;
+@@ -2292,21 +2517,29 @@
+ }
+ }
+
++static void atomic_orr(int val, atomic_t *v)
++{
++ int old_val;
++
++ old_val = atomic_read(v);
++ while (atomic_cmpxchg(v, old_val, val | old_val) != old_val)
++ old_val = atomic_read(v);
++}
++
+ static int brcmf_sdio_intr_rstatus(struct brcmf_sdio *bus)
+ {
+- u8 idx;
++ struct brcmf_core *buscore;
+ u32 addr;
+ unsigned long val;
+- int n, ret;
++ int ret;
+
+- idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV);
+- addr = bus->ci->c_inf[idx].base +
+- offsetof(struct sdpcmd_regs, intstatus);
++ buscore = brcmf_chip_get_core(bus->ci, BCMA_CORE_SDIO_DEV);
++ addr = buscore->base + offsetof(struct sdpcmd_regs, intstatus);
+
+ val = brcmf_sdiod_regrl(bus->sdiodev, addr, &ret);
+ bus->sdcnt.f1regdata++;
+ if (ret != 0)
+- val = 0;
++ return ret;
+
+ val &= bus->hostintmask;
+ atomic_set(&bus->fcstate, !!(val & I_HMB_FC_STATE));
+@@ -2315,13 +2548,7 @@
+ if (val) {
+ brcmf_sdiod_regwl(bus->sdiodev, addr, val, &ret);
+ bus->sdcnt.f1regdata++;
+- }
+-
+- if (ret) {
+- atomic_set(&bus->intstatus, 0);
+- } else if (val) {
+- for_each_set_bit(n, &val, 32)
+- set_bit(n, (unsigned long *)&bus->intstatus.counter);
++ atomic_orr(val, &bus->intstatus);
+ }
+
+ return ret;
+@@ -2331,10 +2558,9 @@
+ {
+ u32 newstatus = 0;
+ unsigned long intstatus;
+- uint rxlimit = bus->rxbound; /* Rx frames to read before resched */
+ uint txlimit = bus->txbound; /* Tx frames to send before resched */
+- uint framecnt = 0; /* Temporary counter of tx/rx frames */
+- int err = 0, n;
++ uint framecnt; /* Temporary counter of tx/rx frames */
++ int err = 0;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+@@ -2431,70 +2657,38 @@
+ intstatus &= ~I_HMB_FRAME_IND;
+
+ /* On frame indication, read available frames */
+- if (PKT_AVAILABLE() && bus->clkstate == CLK_AVAIL) {
+- framecnt = brcmf_sdio_readframes(bus, rxlimit);
++ if ((intstatus & I_HMB_FRAME_IND) && (bus->clkstate == CLK_AVAIL)) {
++ brcmf_sdio_readframes(bus, bus->rxbound);
+ if (!bus->rxpending)
+ intstatus &= ~I_HMB_FRAME_IND;
+- rxlimit -= min(framecnt, rxlimit);
+ }
+
+ /* Keep still-pending events for next scheduling */
+- if (intstatus) {
+- for_each_set_bit(n, &intstatus, 32)
+- set_bit(n, (unsigned long *)&bus->intstatus.counter);
+- }
++ if (intstatus)
++ atomic_orr(intstatus, &bus->intstatus);
+
+ brcmf_sdio_clrintr(bus);
+
+- if (data_ok(bus) && bus->ctrl_frame_stat &&
+- (bus->clkstate == CLK_AVAIL)) {
+- int i;
+-
+- sdio_claim_host(bus->sdiodev->func[1]);
+- err = brcmf_sdiod_send_buf(bus->sdiodev, bus->ctrl_frame_buf,
+- (u32)bus->ctrl_frame_len);
+-
+- if (err < 0) {
+- /* On failure, abort the command and
+- terminate the frame */
+- brcmf_dbg(INFO, "sdio error %d, abort command and terminate frame\n",
+- err);
+- bus->sdcnt.tx_sderrs++;
+-
+- brcmf_sdiod_abort(bus->sdiodev, SDIO_FUNC_2);
+-
+- brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL,
+- SFC_WF_TERM, &err);
+- bus->sdcnt.f1regdata++;
+-
+- for (i = 0; i < 3; i++) {
+- u8 hi, lo;
+- hi = brcmf_sdiod_regrb(bus->sdiodev,
+- SBSDIO_FUNC1_WFRAMEBCHI,
+- &err);
+- lo = brcmf_sdiod_regrb(bus->sdiodev,
+- SBSDIO_FUNC1_WFRAMEBCLO,
+- &err);
+- bus->sdcnt.f1regdata += 2;
+- if ((hi == 0) && (lo == 0))
+- break;
+- }
++ if (bus->ctrl_frame_stat && (bus->clkstate == CLK_AVAIL) &&
++ (down_interruptible(&bus->tx_seq_lock) == 0)) {
++ if (data_ok(bus)) {
++ sdio_claim_host(bus->sdiodev->func[1]);
++ err = brcmf_sdio_tx_ctrlframe(bus, bus->ctrl_frame_buf,
++ bus->ctrl_frame_len);
++ sdio_release_host(bus->sdiodev->func[1]);
+
+- } else {
+- bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQ_WRAP;
++ bus->ctrl_frame_stat = false;
++ brcmf_sdio_wait_event_wakeup(bus);
+ }
+- sdio_release_host(bus->sdiodev->func[1]);
+- bus->ctrl_frame_stat = false;
+- brcmf_sdio_wait_event_wakeup(bus);
++ up(&bus->tx_seq_lock);
+ }
+ /* Send queued frames (limit 1 if rx may still be pending) */
+- else if ((bus->clkstate == CLK_AVAIL) && !atomic_read(&bus->fcstate) &&
+- brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol) && txlimit
+- && data_ok(bus)) {
++ if ((bus->clkstate == CLK_AVAIL) && !atomic_read(&bus->fcstate) &&
++ brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol) && txlimit &&
++ data_ok(bus)) {
+ framecnt = bus->rxpending ? min(txlimit, bus->txminmax) :
+ txlimit;
+- framecnt = brcmf_sdio_sendfromq(bus, framecnt);
+- txlimit -= framecnt;
++ brcmf_sdio_sendfromq(bus, framecnt);
+ }
+
+ if (!brcmf_bus_ready(bus->sdiodev->bus_if) || (err != 0)) {
+@@ -2504,19 +2698,9 @@
+ atomic_read(&bus->ipend) > 0 ||
+ (!atomic_read(&bus->fcstate) &&
+ brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol) &&
+- data_ok(bus)) || PKT_AVAILABLE()) {
++ data_ok(bus))) {
+ atomic_inc(&bus->dpc_tskcnt);
+ }
+-
+- /* If we're done for now, turn off clock request. */
+- if ((bus->clkstate != CLK_PENDING)
+- && bus->idletime == BRCMF_IDLE_IMMEDIATE) {
+- bus->activity = false;
+- brcmf_dbg(SDIO, "idle state\n");
+- sdio_claim_host(bus->sdiodev->func[1]);
+- brcmf_sdio_bus_sleep(bus, true, false);
+- sdio_release_host(bus->sdiodev->func[1]);
+- }
+ }
+
+ static struct pktq *brcmf_sdio_bus_gettxq(struct device *dev)
+@@ -2531,15 +2715,12 @@
+ static int brcmf_sdio_bus_txdata(struct device *dev, struct sk_buff *pkt)
+ {
+ int ret = -EBADE;
+- uint datalen, prec;
++ uint prec;
+ struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+ struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
+ struct brcmf_sdio *bus = sdiodev->bus;
+- ulong flags;
+-
+- brcmf_dbg(TRACE, "Enter\n");
+
+- datalen = pkt->len;
++ brcmf_dbg(TRACE, "Enter: pkt: data %p len %d\n", pkt->data, pkt->len);
+
+ /* Add space for the header */
+ skb_push(pkt, bus->tx_hdrlen);
+@@ -2553,7 +2734,9 @@
+ bus->sdcnt.fcqueued++;
+
+ /* Priority based enq */
+- spin_lock_irqsave(&bus->txqlock, flags);
++ spin_lock_bh(&bus->txq_lock);
++ /* reset bus_flags in packet cb */
++ *(u16 *)(pkt->cb) = 0;
+ if (!brcmf_c_prec_enq(bus->sdiodev->dev, &bus->txq, pkt, prec)) {
+ skb_pull(pkt, bus->tx_hdrlen);
+ brcmf_err("out of bus->txq !!!\n");
+@@ -2566,7 +2749,7 @@
+ bus->txoff = true;
+ brcmf_txflowblock(bus->sdiodev->dev, true);
+ }
+- spin_unlock_irqrestore(&bus->txqlock, flags);
++ spin_unlock_bh(&bus->txq_lock);
+
+ #ifdef DEBUG
+ if (pktq_plen(&bus->txq, prec) > qcount[prec])
+@@ -2661,110 +2844,27 @@
+ }
+ #endif /* DEBUG */
+
+-static int brcmf_sdio_tx_frame(struct brcmf_sdio *bus, u8 *frame, u16 len)
+-{
+- int i;
+- int ret;
+-
+- bus->ctrl_frame_stat = false;
+- ret = brcmf_sdiod_send_buf(bus->sdiodev, frame, len);
+-
+- if (ret < 0) {
+- /* On failure, abort the command and terminate the frame */
+- brcmf_dbg(INFO, "sdio error %d, abort command and terminate frame\n",
+- ret);
+- bus->sdcnt.tx_sderrs++;
+-
+- brcmf_sdiod_abort(bus->sdiodev, SDIO_FUNC_2);
+-
+- brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL,
+- SFC_WF_TERM, NULL);
+- bus->sdcnt.f1regdata++;
+-
+- for (i = 0; i < 3; i++) {
+- u8 hi, lo;
+- hi = brcmf_sdiod_regrb(bus->sdiodev,
+- SBSDIO_FUNC1_WFRAMEBCHI, NULL);
+- lo = brcmf_sdiod_regrb(bus->sdiodev,
+- SBSDIO_FUNC1_WFRAMEBCLO, NULL);
+- bus->sdcnt.f1regdata += 2;
+- if (hi == 0 && lo == 0)
+- break;
+- }
+- return ret;
+- }
+-
+- bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQ_WRAP;
+-
+- return ret;
+-}
+-
+ static int
+ brcmf_sdio_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
+ {
+- u8 *frame;
+- u16 len, pad;
+- uint retries = 0;
+- u8 doff = 0;
+- int ret = -1;
+ struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+ struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
+ struct brcmf_sdio *bus = sdiodev->bus;
+- struct brcmf_sdio_hdrinfo hd_info = {0};
++ int ret = -1;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+- /* Back the pointer to make a room for bus header */
+- frame = msg - bus->tx_hdrlen;
+- len = (msglen += bus->tx_hdrlen);
++ if (down_interruptible(&bus->tx_seq_lock))
++ return -EINTR;
+
+- /* Add alignment padding (optional for ctl frames) */
+- doff = ((unsigned long)frame % bus->head_align);
+- if (doff) {
+- frame -= doff;
+- len += doff;
+- msglen += doff;
+- memset(frame, 0, doff + bus->tx_hdrlen);
+- }
+- /* precondition: doff < bus->head_align */
+- doff += bus->tx_hdrlen;
+-
+- /* Round send length to next SDIO block */
+- pad = 0;
+- if (bus->roundup && bus->blocksize && (len > bus->blocksize)) {
+- pad = bus->blocksize - (len % bus->blocksize);
+- if ((pad > bus->roundup) || (pad >= bus->blocksize))
+- pad = 0;
+- } else if (len % bus->head_align) {
+- pad = bus->head_align - (len % bus->head_align);
+- }
+- len += pad;
+-
+- /* precondition: IS_ALIGNED((unsigned long)frame, 2) */
+-
+- /* Make sure backplane clock is on */
+- sdio_claim_host(bus->sdiodev->func[1]);
+- brcmf_sdio_bus_sleep(bus, false, false);
+- sdio_release_host(bus->sdiodev->func[1]);
+-
+- hd_info.len = (u16)msglen;
+- hd_info.channel = SDPCM_CONTROL_CHANNEL;
+- hd_info.dat_offset = doff;
+- hd_info.seq_num = bus->tx_seq;
+- hd_info.lastfrm = true;
+- hd_info.tail_pad = pad;
+- brcmf_sdio_hdpack(bus, frame, &hd_info);
+-
+- if (bus->txglom)
+- brcmf_sdio_update_hwhdr(frame, len);
+-
+- if (!data_ok(bus)) {
+- brcmf_dbg(INFO, "No bus credit bus->tx_max %d, bus->tx_seq %d\n",
+- bus->tx_max, bus->tx_seq);
+- bus->ctrl_frame_stat = true;
+- /* Send from dpc */
+- bus->ctrl_frame_buf = frame;
+- bus->ctrl_frame_len = len;
++ if (!data_ok(bus)) {
++ brcmf_dbg(INFO, "No bus credit bus->tx_max %d, bus->tx_seq %d\n",
++ bus->tx_max, bus->tx_seq);
++ up(&bus->tx_seq_lock);
++ /* Send from dpc */
++ bus->ctrl_frame_buf = msg;
++ bus->ctrl_frame_len = msglen;
++ bus->ctrl_frame_stat = true;
+
+ wait_event_interruptible_timeout(bus->ctrl_wait,
+ !bus->ctrl_frame_stat,
+@@ -2775,31 +2875,18 @@
+ ret = 0;
+ } else {
+ brcmf_dbg(SDIO, "ctrl_frame_stat == true\n");
++ bus->ctrl_frame_stat = false;
++ if (down_interruptible(&bus->tx_seq_lock))
++ return -EINTR;
+ ret = -1;
+ }
+ }
+-
+ if (ret == -1) {
+- brcmf_dbg_hex_dump(BRCMF_BYTES_ON() && BRCMF_CTL_ON(),
+- frame, len, "Tx Frame:\n");
+- brcmf_dbg_hex_dump(!(BRCMF_BYTES_ON() && BRCMF_CTL_ON()) &&
+- BRCMF_HDRS_ON(),
+- frame, min_t(u16, len, 16), "TxHdr:\n");
+-
+- do {
+- sdio_claim_host(bus->sdiodev->func[1]);
+- ret = brcmf_sdio_tx_frame(bus, frame, len);
+- sdio_release_host(bus->sdiodev->func[1]);
+- } while (ret < 0 && retries++ < TXRETRIES);
+- }
+-
+- if ((bus->idletime == BRCMF_IDLE_IMMEDIATE) &&
+- atomic_read(&bus->dpc_tskcnt) == 0) {
+- bus->activity = false;
+ sdio_claim_host(bus->sdiodev->func[1]);
+- brcmf_dbg(INFO, "idle\n");
+- brcmf_sdio_clkctl(bus, CLK_NONE, true);
++ brcmf_sdio_bus_sleep(bus, false, false);
++ ret = brcmf_sdio_tx_ctrlframe(bus, msg, msglen);
+ sdio_release_host(bus->sdiodev->func[1]);
++ up(&bus->tx_seq_lock);
+ }
+
+ if (ret)
+@@ -2811,72 +2898,6 @@
+ }
+
+ #ifdef DEBUG
+-static inline bool brcmf_sdio_valid_shared_address(u32 addr)
+-{
+- return !(addr == 0 || ((~addr >> 16) & 0xffff) == (addr & 0xffff));
+-}
+-
+-static int brcmf_sdio_readshared(struct brcmf_sdio *bus,
+- struct sdpcm_shared *sh)
+-{
+- u32 addr;
+- int rv;
+- u32 shaddr = 0;
+- struct sdpcm_shared_le sh_le;
+- __le32 addr_le;
+-
+- shaddr = bus->ci->rambase + bus->ramsize - 4;
+-
+- /*
+- * Read last word in socram to determine
+- * address of sdpcm_shared structure
+- */
+- sdio_claim_host(bus->sdiodev->func[1]);
+- brcmf_sdio_bus_sleep(bus, false, false);
+- rv = brcmf_sdiod_ramrw(bus->sdiodev, false, shaddr, (u8 *)&addr_le, 4);
+- sdio_release_host(bus->sdiodev->func[1]);
+- if (rv < 0)
+- return rv;
+-
+- addr = le32_to_cpu(addr_le);
+-
+- brcmf_dbg(SDIO, "sdpcm_shared address 0x%08X\n", addr);
+-
+- /*
+- * Check if addr is valid.
+- * NVRAM length at the end of memory should have been overwritten.
+- */
+- if (!brcmf_sdio_valid_shared_address(addr)) {
+- brcmf_err("invalid sdpcm_shared address 0x%08X\n",
+- addr);
+- return -EINVAL;
+- }
+-
+- /* Read hndrte_shared structure */
+- rv = brcmf_sdiod_ramrw(bus->sdiodev, false, addr, (u8 *)&sh_le,
+- sizeof(struct sdpcm_shared_le));
+- if (rv < 0)
+- return rv;
+-
+- /* Endianness */
+- sh->flags = le32_to_cpu(sh_le.flags);
+- sh->trap_addr = le32_to_cpu(sh_le.trap_addr);
+- sh->assert_exp_addr = le32_to_cpu(sh_le.assert_exp_addr);
+- sh->assert_file_addr = le32_to_cpu(sh_le.assert_file_addr);
+- sh->assert_line = le32_to_cpu(sh_le.assert_line);
+- sh->console_addr = le32_to_cpu(sh_le.console_addr);
+- sh->msgtrace_addr = le32_to_cpu(sh_le.msgtrace_addr);
+-
+- if ((sh->flags & SDPCM_SHARED_VERSION_MASK) > SDPCM_SHARED_VERSION) {
+- brcmf_err("sdpcm shared version unsupported: dhd %d dongle %d\n",
+- SDPCM_SHARED_VERSION,
+- sh->flags & SDPCM_SHARED_VERSION_MASK);
+- return -EPROTO;
+- }
+-
+- return 0;
+-}
+-
+ static int brcmf_sdio_dump_console(struct brcmf_sdio *bus,
+ struct sdpcm_shared *sh, char __user *data,
+ size_t count)
+@@ -3106,6 +3127,8 @@
+ debugfs_create_file("forensics", S_IRUGO, dentry, bus,
+ &brcmf_sdio_forensic_ops);
+ brcmf_debugfs_create_sdio_count(drvr, &bus->sdcnt);
++ debugfs_create_u32("console_interval", 0644, dentry,
++ &bus->console_interval);
+ }
+ #else
+ static int brcmf_sdio_checkdied(struct brcmf_sdio *bus)
+@@ -3224,51 +3247,29 @@
+ const struct firmware *fw)
+ {
+ int err;
+- int offset;
+- int address;
+- int len;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+- err = 0;
+- offset = 0;
+- address = bus->ci->rambase;
+- while (offset < fw->size) {
+- len = ((offset + MEMBLOCK) < fw->size) ? MEMBLOCK :
+- fw->size - offset;
+- err = brcmf_sdiod_ramrw(bus->sdiodev, true, address,
+- (u8 *)&fw->data[offset], len);
+- if (err) {
+- brcmf_err("error %d on writing %d membytes at 0x%08x\n",
+- err, len, address);
+- return err;
+- }
+- offset += len;
+- address += len;
+- }
+- if (!err)
+- if (!brcmf_sdio_verifymemory(bus->sdiodev, bus->ci->rambase,
+- (u8 *)fw->data, fw->size))
+- err = -EIO;
++ err = brcmf_sdiod_ramrw(bus->sdiodev, true, bus->ci->rambase,
++ (u8 *)fw->data, fw->size);
++ if (err)
++ brcmf_err("error %d on writing %d membytes at 0x%08x\n",
++ err, (int)fw->size, bus->ci->rambase);
++ else if (!brcmf_sdio_verifymemory(bus->sdiodev, bus->ci->rambase,
++ (u8 *)fw->data, fw->size))
++ err = -EIO;
+
+ return err;
+ }
+
+ static int brcmf_sdio_download_nvram(struct brcmf_sdio *bus,
+- const struct firmware *nv)
++ void *vars, u32 varsz)
+ {
+- void *vars;
+- u32 varsz;
+ int address;
+ int err;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+- vars = brcmf_nvram_strip(nv, &varsz);
+-
+- if (vars == NULL)
+- return -EINVAL;
+-
+ address = bus->ci->ramsize - varsz + bus->ci->rambase;
+ err = brcmf_sdiod_ramrw(bus->sdiodev, true, address, vars, varsz);
+ if (err)
+@@ -3277,28 +3278,21 @@
+ else if (!brcmf_sdio_verifymemory(bus->sdiodev, address, vars, varsz))
+ err = -EIO;
+
+- brcmf_nvram_free(vars);
+-
+ return err;
+ }
+
+-static int brcmf_sdio_download_firmware(struct brcmf_sdio *bus)
++static int brcmf_sdio_download_firmware(struct brcmf_sdio *bus,
++ const struct firmware *fw,
++ void *nvram, u32 nvlen)
+ {
+ int bcmerror = -EFAULT;
+- const struct firmware *fw;
+ u32 rstvec;
+
+ sdio_claim_host(bus->sdiodev->func[1]);
+ brcmf_sdio_clkctl(bus, CLK_AVAIL, false);
+
+ /* Keep arm in reset */
+- brcmf_sdio_chip_enter_download(bus->sdiodev, bus->ci);
+-
+- fw = brcmf_sdio_get_fw(bus, BRCMF_FIRMWARE_BIN);
+- if (fw == NULL) {
+- bcmerror = -ENOENT;
+- goto err;
+- }
++ brcmf_chip_enter_download(bus->ci);
+
+ rstvec = get_unaligned_le32(fw->data);
+ brcmf_dbg(SDIO, "firmware rstvec: %x\n", rstvec);
+@@ -3307,24 +3301,19 @@
+ release_firmware(fw);
+ if (bcmerror) {
+ brcmf_err("dongle image file download failed\n");
++ brcmf_fw_nvram_free(nvram);
+ goto err;
+ }
+
+- fw = brcmf_sdio_get_fw(bus, BRCMF_FIRMWARE_NVRAM);
+- if (fw == NULL) {
+- bcmerror = -ENOENT;
+- goto err;
+- }
+-
+- bcmerror = brcmf_sdio_download_nvram(bus, fw);
+- release_firmware(fw);
++ bcmerror = brcmf_sdio_download_nvram(bus, nvram, nvlen);
++ brcmf_fw_nvram_free(nvram);
+ if (bcmerror) {
+ brcmf_err("dongle nvram file download failed\n");
+ goto err;
+ }
+
+ /* Take arm out of reset */
+- if (!brcmf_sdio_chip_exit_download(bus->sdiodev, bus->ci, rstvec)) {
++ if (!brcmf_chip_exit_download(bus->ci, rstvec)) {
+ brcmf_err("error getting out of ARM core reset\n");
+ goto err;
+ }
+@@ -3339,40 +3328,6 @@
+ return bcmerror;
+ }
+
+-static bool brcmf_sdio_sr_capable(struct brcmf_sdio *bus)
+-{
+- u32 addr, reg, pmu_cc3_mask = ~0;
+- int err;
+-
+- brcmf_dbg(TRACE, "Enter\n");
+-
+- /* old chips with PMU version less than 17 don't support save restore */
+- if (bus->ci->pmurev < 17)
+- return false;
+-
+- switch (bus->ci->chip) {
+- case BCM43241_CHIP_ID:
+- case BCM4335_CHIP_ID:
+- case BCM4339_CHIP_ID:
+- /* read PMU chipcontrol register 3 */
+- addr = CORE_CC_REG(bus->ci->c_inf[0].base, chipcontrol_addr);
+- brcmf_sdiod_regwl(bus->sdiodev, addr, 3, NULL);
+- addr = CORE_CC_REG(bus->ci->c_inf[0].base, chipcontrol_data);
+- reg = brcmf_sdiod_regrl(bus->sdiodev, addr, NULL);
+- return (reg & pmu_cc3_mask) != 0;
+- default:
+- addr = CORE_CC_REG(bus->ci->c_inf[0].base, pmucapabilities_ext);
+- reg = brcmf_sdiod_regrl(bus->sdiodev, addr, &err);
+- if ((reg & PCAPEXT_SR_SUPPORTED_MASK) == 0)
+- return false;
+-
+- addr = CORE_CC_REG(bus->ci->c_inf[0].base, retention_ctl);
+- reg = brcmf_sdiod_regrl(bus->sdiodev, addr, NULL);
+- return (reg & (PMU_RCTL_MACPHY_DISABLE_MASK |
+- PMU_RCTL_LOGIC_DISABLE_MASK)) == 0;
+- }
+-}
+-
+ static void brcmf_sdio_sr_init(struct brcmf_sdio *bus)
+ {
+ int err = 0;
+@@ -3424,7 +3379,7 @@
+ brcmf_dbg(TRACE, "Enter\n");
+
+ /* KSO bit added in SDIO core rev 12 */
+- if (bus->ci->c_inf[1].rev < 12)
++ if (brcmf_chip_get_core(bus->ci, BCMA_CORE_SDIO_DEV)->rev < 12)
+ return 0;
+
+ val = brcmf_sdiod_regrb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR, &err);
+@@ -3455,15 +3410,13 @@
+ struct brcmf_sdio *bus = sdiodev->bus;
+ uint pad_size;
+ u32 value;
+- u8 idx;
+ int err;
+
+ /* the commands below use the terms tx and rx from
+ * a device perspective, ie. bus:txglom affects the
+ * bus transfers from device to host.
+ */
+- idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV);
+- if (bus->ci->c_inf[idx].rev < 12) {
++ if (brcmf_chip_get_core(bus->ci, BCMA_CORE_SDIO_DEV)->rev < 12) {
+ /* for sdio core rev < 12, disable txgloming */
+ value = 0;
+ err = brcmf_iovar_data_set(dev, "bus:txglom", &value,
+@@ -3503,97 +3456,6 @@
+ return err;
+ }
+
+-static int brcmf_sdio_bus_init(struct device *dev)
+-{
+- struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+- struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
+- struct brcmf_sdio *bus = sdiodev->bus;
+- int err, ret = 0;
+- u8 saveclk;
+-
+- brcmf_dbg(TRACE, "Enter\n");
+-
+- /* try to download image and nvram to the dongle */
+- if (bus_if->state == BRCMF_BUS_DOWN) {
+- bus->alp_only = true;
+- err = brcmf_sdio_download_firmware(bus);
+- if (err)
+- return err;
+- bus->alp_only = false;
+- }
+-
+- if (!bus->sdiodev->bus_if->drvr)
+- return 0;
+-
+- /* Start the watchdog timer */
+- bus->sdcnt.tickcnt = 0;
+- brcmf_sdio_wd_timer(bus, BRCMF_WD_POLL_MS);
+-
+- sdio_claim_host(bus->sdiodev->func[1]);
+-
+- /* Make sure backplane clock is on, needed to generate F2 interrupt */
+- brcmf_sdio_clkctl(bus, CLK_AVAIL, false);
+- if (bus->clkstate != CLK_AVAIL)
+- goto exit;
+-
+- /* Force clocks on backplane to be sure F2 interrupt propagates */
+- saveclk = brcmf_sdiod_regrb(bus->sdiodev,
+- SBSDIO_FUNC1_CHIPCLKCSR, &err);
+- if (!err) {
+- brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
+- (saveclk | SBSDIO_FORCE_HT), &err);
+- }
+- if (err) {
+- brcmf_err("Failed to force clock for F2: err %d\n", err);
+- goto exit;
+- }
+-
+- /* Enable function 2 (frame transfers) */
+- w_sdreg32(bus, SDPCM_PROT_VERSION << SMB_DATA_VERSION_SHIFT,
+- offsetof(struct sdpcmd_regs, tosbmailboxdata));
+- err = sdio_enable_func(bus->sdiodev->func[SDIO_FUNC_2]);
+-
+-
+- brcmf_dbg(INFO, "enable F2: err=%d\n", err);
+-
+- /* If F2 successfully enabled, set core and enable interrupts */
+- if (!err) {
+- /* Set up the interrupt mask and enable interrupts */
+- bus->hostintmask = HOSTINTMASK;
+- w_sdreg32(bus, bus->hostintmask,
+- offsetof(struct sdpcmd_regs, hostintmask));
+-
+- brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_WATERMARK, 8, &err);
+- } else {
+- /* Disable F2 again */
+- sdio_disable_func(bus->sdiodev->func[SDIO_FUNC_2]);
+- ret = -ENODEV;
+- }
+-
+- if (brcmf_sdio_sr_capable(bus)) {
+- brcmf_sdio_sr_init(bus);
+- } else {
+- /* Restore previous clock setting */
+- brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
+- saveclk, &err);
+- }
+-
+- if (ret == 0) {
+- ret = brcmf_sdiod_intr_register(bus->sdiodev);
+- if (ret != 0)
+- brcmf_err("intr register failed:%d\n", ret);
+- }
+-
+- /* If we didn't come up, turn off backplane clock */
+- if (ret != 0)
+- brcmf_sdio_clkctl(bus, CLK_NONE, false);
+-
+-exit:
+- sdio_release_host(bus->sdiodev->func[1]);
+-
+- return ret;
+-}
+-
+ void brcmf_sdio_isr(struct brcmf_sdio *bus)
+ {
+ brcmf_dbg(TRACE, "Enter\n");
+@@ -3714,11 +3576,175 @@
+ datawork);
+
+ while (atomic_read(&bus->dpc_tskcnt)) {
++ atomic_set(&bus->dpc_tskcnt, 0);
+ brcmf_sdio_dpc(bus);
+- atomic_dec(&bus->dpc_tskcnt);
+ }
+ }
+
++static void
++brcmf_sdio_drivestrengthinit(struct brcmf_sdio_dev *sdiodev,
++ struct brcmf_chip *ci, u32 drivestrength)
++{
++ const struct sdiod_drive_str *str_tab = NULL;
++ u32 str_mask;
++ u32 str_shift;
++ u32 base;
++ u32 i;
++ u32 drivestrength_sel = 0;
++ u32 cc_data_temp;
++ u32 addr;
++
++ if (!(ci->cc_caps & CC_CAP_PMU))
++ return;
++
++ switch (SDIOD_DRVSTR_KEY(ci->chip, ci->pmurev)) {
++ case SDIOD_DRVSTR_KEY(BCM4330_CHIP_ID, 12):
++ str_tab = sdiod_drvstr_tab1_1v8;
++ str_mask = 0x00003800;
++ str_shift = 11;
++ break;
++ case SDIOD_DRVSTR_KEY(BCM4334_CHIP_ID, 17):
++ str_tab = sdiod_drvstr_tab6_1v8;
++ str_mask = 0x00001800;
++ str_shift = 11;
++ break;
++ case SDIOD_DRVSTR_KEY(BCM43143_CHIP_ID, 17):
++ /* note: 43143 does not support tristate */
++ i = ARRAY_SIZE(sdiod_drvstr_tab2_3v3) - 1;
++ if (drivestrength >= sdiod_drvstr_tab2_3v3[i].strength) {
++ str_tab = sdiod_drvstr_tab2_3v3;
++ str_mask = 0x00000007;
++ str_shift = 0;
++ } else
++ brcmf_err("Invalid SDIO Drive strength for chip %s, strength=%d\n",
++ ci->name, drivestrength);
++ break;
++ case SDIOD_DRVSTR_KEY(BCM43362_CHIP_ID, 13):
++ str_tab = sdiod_drive_strength_tab5_1v8;
++ str_mask = 0x00003800;
++ str_shift = 11;
++ break;
++ default:
++ brcmf_err("No SDIO Drive strength init done for chip %s rev %d pmurev %d\n",
++ ci->name, ci->chiprev, ci->pmurev);
++ break;
++ }
++
++ if (str_tab != NULL) {
++ for (i = 0; str_tab[i].strength != 0; i++) {
++ if (drivestrength >= str_tab[i].strength) {
++ drivestrength_sel = str_tab[i].sel;
++ break;
++ }
++ }
++ base = brcmf_chip_get_chipcommon(ci)->base;
++ addr = CORE_CC_REG(base, chipcontrol_addr);
++ brcmf_sdiod_regwl(sdiodev, addr, 1, NULL);
++ cc_data_temp = brcmf_sdiod_regrl(sdiodev, addr, NULL);
++ cc_data_temp &= ~str_mask;
++ drivestrength_sel <<= str_shift;
++ cc_data_temp |= drivestrength_sel;
++ brcmf_sdiod_regwl(sdiodev, addr, cc_data_temp, NULL);
++
++ brcmf_dbg(INFO, "SDIO: %d mA (req=%d mA) drive strength selected, set to 0x%08x\n",
++ str_tab[i].strength, drivestrength, cc_data_temp);
++ }
++}
++
++static int brcmf_sdio_buscoreprep(void *ctx)
++{
++ struct brcmf_sdio_dev *sdiodev = ctx;
++ int err = 0;
++ u8 clkval, clkset;
++
++ /* Try forcing SDIO core to do ALPAvail request only */
++ clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_ALP_AVAIL_REQ;
++ brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
++ if (err) {
++ brcmf_err("error writing for HT off\n");
++ return err;
++ }
++
++ /* If register supported, wait for ALPAvail and then force ALP */
++ /* This may take up to 15 milliseconds */
++ clkval = brcmf_sdiod_regrb(sdiodev,
++ SBSDIO_FUNC1_CHIPCLKCSR, NULL);
++
++ if ((clkval & ~SBSDIO_AVBITS) != clkset) {
++ brcmf_err("ChipClkCSR access: wrote 0x%02x read 0x%02x\n",
++ clkset, clkval);
++ return -EACCES;
++ }
++
++ SPINWAIT(((clkval = brcmf_sdiod_regrb(sdiodev,
++ SBSDIO_FUNC1_CHIPCLKCSR, NULL)),
++ !SBSDIO_ALPAV(clkval)),
++ PMU_MAX_TRANSITION_DLY);
++ if (!SBSDIO_ALPAV(clkval)) {
++ brcmf_err("timeout on ALPAV wait, clkval 0x%02x\n",
++ clkval);
++ return -EBUSY;
++ }
++
++ clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_FORCE_ALP;
++ brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
++ udelay(65);
++
++ /* Also, disable the extra SDIO pull-ups */
++ brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_SDIOPULLUP, 0, NULL);
++
++ return 0;
++}
++
++static void brcmf_sdio_buscore_exitdl(void *ctx, struct brcmf_chip *chip,
++ u32 rstvec)
++{
++ struct brcmf_sdio_dev *sdiodev = ctx;
++ struct brcmf_core *core;
++ u32 reg_addr;
++
++ /* clear all interrupts */
++ core = brcmf_chip_get_core(chip, BCMA_CORE_SDIO_DEV);
++ reg_addr = core->base + offsetof(struct sdpcmd_regs, intstatus);
++ brcmf_sdiod_regwl(sdiodev, reg_addr, 0xFFFFFFFF, NULL);
++
++ if (rstvec)
++ /* Write reset vector to address 0 */
++ brcmf_sdiod_ramrw(sdiodev, true, 0, (void *)&rstvec,
++ sizeof(rstvec));
++}
++
++static u32 brcmf_sdio_buscore_read32(void *ctx, u32 addr)
++{
++ struct brcmf_sdio_dev *sdiodev = ctx;
++ u32 val, rev;
++
++ val = brcmf_sdiod_regrl(sdiodev, addr, NULL);
++ if (sdiodev->func[0]->device == SDIO_DEVICE_ID_BROADCOM_4335_4339 &&
++ addr == CORE_CC_REG(SI_ENUM_BASE, chipid)) {
++ rev = (val & CID_REV_MASK) >> CID_REV_SHIFT;
++ if (rev >= 2) {
++ val &= ~CID_ID_MASK;
++ val |= BCM4339_CHIP_ID;
++ }
++ }
++ return val;
++}
++
++static void brcmf_sdio_buscore_write32(void *ctx, u32 addr, u32 val)
++{
++ struct brcmf_sdio_dev *sdiodev = ctx;
++
++ brcmf_sdiod_regwl(sdiodev, addr, val, NULL);
++}
++
++static const struct brcmf_buscore_ops brcmf_sdio_buscore_ops = {
++ .prepare = brcmf_sdio_buscoreprep,
++ .exit_dl = brcmf_sdio_buscore_exitdl,
++ .read32 = brcmf_sdio_buscore_read32,
++ .write32 = brcmf_sdio_buscore_write32,
++};
++
+ static bool
+ brcmf_sdio_probe_attach(struct brcmf_sdio *bus)
+ {
+@@ -3734,7 +3760,7 @@
+ brcmf_sdiod_regrl(bus->sdiodev, SI_ENUM_BASE, NULL));
+
+ /*
+- * Force PLL off until brcmf_sdio_chip_attach()
++ * Force PLL off until brcmf_chip_attach()
+ * programs PLL control regs
+ */
+
+@@ -3755,8 +3781,10 @@
+ */
+ brcmf_bus_change_state(bus->sdiodev->bus_if, BRCMF_BUS_DOWN);
+
+- if (brcmf_sdio_chip_attach(bus->sdiodev, &bus->ci)) {
+- brcmf_err("brcmf_sdio_chip_attach failed!\n");
++ bus->ci = brcmf_chip_attach(bus->sdiodev, &brcmf_sdio_buscore_ops);
++ if (IS_ERR(bus->ci)) {
++ brcmf_err("brcmf_chip_attach failed!\n");
++ bus->ci = NULL;
+ goto fail;
+ }
+
+@@ -3769,7 +3797,7 @@
+ drivestrength = bus->sdiodev->pdata->drive_strength;
+ else
+ drivestrength = DEFAULT_SDIO_DRIVE_STRENGTH;
+- brcmf_sdio_chip_drivestrengthinit(bus->sdiodev, bus->ci, drivestrength);
++ brcmf_sdio_drivestrengthinit(bus->sdiodev, bus->ci, drivestrength);
+
+ /* Get info on the SOCRAM cores... */
+ bus->ramsize = bus->ci->ramsize;
+@@ -3792,24 +3820,18 @@
+ goto fail;
+
+ /* set PMUControl so a backplane reset does PMU state reload */
+- reg_addr = CORE_CC_REG(bus->ci->c_inf[0].base,
++ reg_addr = CORE_CC_REG(brcmf_chip_get_chipcommon(bus->ci)->base,
+ pmucontrol);
+- reg_val = brcmf_sdiod_regrl(bus->sdiodev,
+- reg_addr,
+- &err);
++ reg_val = brcmf_sdiod_regrl(bus->sdiodev, reg_addr, &err);
+ if (err)
+ goto fail;
+
+ reg_val |= (BCMA_CC_PMU_CTL_RES_RELOAD << BCMA_CC_PMU_CTL_RES_SHIFT);
+
+- brcmf_sdiod_regwl(bus->sdiodev,
+- reg_addr,
+- reg_val,
+- &err);
++ brcmf_sdiod_regwl(bus->sdiodev, reg_addr, reg_val, &err);
+ if (err)
+ goto fail;
+
+-
+ sdio_release_host(bus->sdiodev->func[1]);
+
+ brcmu_pktq_init(&bus->txq, (PRIOMASK + 1), TXQLEN);
+@@ -3849,6 +3871,7 @@
+ brcmf_sdio_bus_watchdog(bus);
+ /* Count the tick for reference */
+ bus->sdcnt.tickcnt++;
++ reinit_completion(&bus->watchdog_wait);
+ } else
+ break;
+ }
+@@ -3872,13 +3895,114 @@
+ static struct brcmf_bus_ops brcmf_sdio_bus_ops = {
+ .stop = brcmf_sdio_bus_stop,
+ .preinit = brcmf_sdio_bus_preinit,
+- .init = brcmf_sdio_bus_init,
+ .txdata = brcmf_sdio_bus_txdata,
+ .txctl = brcmf_sdio_bus_txctl,
+ .rxctl = brcmf_sdio_bus_rxctl,
+ .gettxq = brcmf_sdio_bus_gettxq,
+ };
+
++static void brcmf_sdio_firmware_callback(struct device *dev,
++ const struct firmware *code,
++ void *nvram, u32 nvram_len)
++{
++ struct brcmf_bus *bus_if = dev_get_drvdata(dev);
++ struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
++ struct brcmf_sdio *bus = sdiodev->bus;
++ int err = 0;
++ u8 saveclk;
++
++ brcmf_dbg(TRACE, "Enter: dev=%s\n", dev_name(dev));
++
++ /* try to download image and nvram to the dongle */
++ if (bus_if->state == BRCMF_BUS_DOWN) {
++ bus->alp_only = true;
++ err = brcmf_sdio_download_firmware(bus, code, nvram, nvram_len);
++ if (err)
++ goto fail;
++ bus->alp_only = false;
++ }
++
++ if (!bus_if->drvr)
++ return;
++
++ /* Start the watchdog timer */
++ bus->sdcnt.tickcnt = 0;
++ brcmf_sdio_wd_timer(bus, BRCMF_WD_POLL_MS);
++
++ sdio_claim_host(sdiodev->func[1]);
++
++ /* Make sure backplane clock is on, needed to generate F2 interrupt */
++ brcmf_sdio_clkctl(bus, CLK_AVAIL, false);
++ if (bus->clkstate != CLK_AVAIL)
++ goto release;
++
++ /* Force clocks on backplane to be sure F2 interrupt propagates */
++ saveclk = brcmf_sdiod_regrb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, &err);
++ if (!err) {
++ brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
++ (saveclk | SBSDIO_FORCE_HT), &err);
++ }
++ if (err) {
++ brcmf_err("Failed to force clock for F2: err %d\n", err);
++ goto release;
++ }
++
++ /* Enable function 2 (frame transfers) */
++ w_sdreg32(bus, SDPCM_PROT_VERSION << SMB_DATA_VERSION_SHIFT,
++ offsetof(struct sdpcmd_regs, tosbmailboxdata));
++ err = sdio_enable_func(sdiodev->func[SDIO_FUNC_2]);
++
++
++ brcmf_dbg(INFO, "enable F2: err=%d\n", err);
++
++ /* If F2 successfully enabled, set core and enable interrupts */
++ if (!err) {
++ /* Set up the interrupt mask and enable interrupts */
++ bus->hostintmask = HOSTINTMASK;
++ w_sdreg32(bus, bus->hostintmask,
++ offsetof(struct sdpcmd_regs, hostintmask));
++
++ brcmf_sdiod_regwb(sdiodev, SBSDIO_WATERMARK, 8, &err);
++ } else {
++ /* Disable F2 again */
++ sdio_disable_func(sdiodev->func[SDIO_FUNC_2]);
++ goto release;
++ }
++
++ if (brcmf_chip_sr_capable(bus->ci)) {
++ brcmf_sdio_sr_init(bus);
++ } else {
++ /* Restore previous clock setting */
++ brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
++ saveclk, &err);
++ }
++
++ if (err == 0) {
++ err = brcmf_sdiod_intr_register(sdiodev);
++ if (err != 0)
++ brcmf_err("intr register failed:%d\n", err);
++ }
++
++ /* If we didn't come up, turn off backplane clock */
++ if (err != 0)
++ brcmf_sdio_clkctl(bus, CLK_NONE, false);
++
++ sdio_release_host(sdiodev->func[1]);
++
++ err = brcmf_bus_start(dev);
++ if (err != 0) {
++ brcmf_err("dongle is not responding\n");
++ goto fail;
++ }
++ return;
++
++release:
++ sdio_release_host(sdiodev->func[1]);
++fail:
++ brcmf_dbg(TRACE, "failed: dev=%s, err=%d\n", dev_name(dev), err);
++ device_release_driver(dev);
++}
++
+ struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
+ {
+ int ret;
+@@ -3925,7 +4049,8 @@
+ }
+
+ spin_lock_init(&bus->rxctl_lock);
+- spin_lock_init(&bus->txqlock);
++ spin_lock_init(&bus->txq_lock);
++ sema_init(&bus->tx_seq_lock, 1);
+ init_waitqueue_head(&bus->ctrl_wait);
+ init_waitqueue_head(&bus->dcmd_resp_wait);
+
+@@ -3961,8 +4086,13 @@
+ goto fail;
+ }
+
++ /* Query the F2 block size, set roundup accordingly */
++ bus->blocksize = bus->sdiodev->func[2]->cur_blksize;
++ bus->roundup = min(max_roundup, bus->blocksize);
++
+ /* Allocate buffers */
+ if (bus->sdiodev->bus_if->maxctl) {
++ bus->sdiodev->bus_if->maxctl += bus->roundup;
+ bus->rxblen =
+ roundup((bus->sdiodev->bus_if->maxctl + SDPCM_HDRLEN),
+ ALIGNMENT) + bus->head_align;
+@@ -3990,10 +4120,6 @@
+ bus->idletime = BRCMF_IDLE_INTERVAL;
+ bus->idleclock = BRCMF_IDLE_ACTIVE;
+
+- /* Query the F2 block size, set roundup accordingly */
+- bus->blocksize = bus->sdiodev->func[2]->cur_blksize;
+- bus->roundup = min(max_roundup, bus->blocksize);
+-
+ /* SR state */
+ bus->sleeping = false;
+ bus->sr_enabled = false;
+@@ -4001,10 +4127,14 @@
+ brcmf_sdio_debugfs_create(bus);
+ brcmf_dbg(INFO, "completed!!\n");
+
+- /* if firmware path present try to download and bring up bus */
+- ret = brcmf_bus_start(bus->sdiodev->dev);
++ ret = brcmf_fw_get_firmwares(sdiodev->dev, BRCMF_FW_REQUEST_NVRAM,
++ brcmf_sdio_get_fwname(bus->ci,
++ BRCMF_FIRMWARE_BIN),
++ brcmf_sdio_get_fwname(bus->ci,
++ BRCMF_FIRMWARE_NVRAM),
++ brcmf_sdio_firmware_callback);
+ if (ret != 0) {
+- brcmf_err("dongle is not responding\n");
++ brcmf_err("async firmware request failed: %d\n", ret);
+ goto fail;
+ }
+
+@@ -4024,14 +4154,12 @@
+ /* De-register interrupt handler */
+ brcmf_sdiod_intr_unregister(bus->sdiodev);
+
++ brcmf_detach(bus->sdiodev->dev);
++
+ cancel_work_sync(&bus->datawork);
+ if (bus->brcmf_wq)
+ destroy_workqueue(bus->brcmf_wq);
+
+- if (bus->sdiodev->bus_if->drvr) {
+- brcmf_detach(bus->sdiodev->dev);
+- }
+-
+ if (bus->ci) {
+ if (bus->sdiodev->bus_if->state == BRCMF_BUS_DOWN) {
+ sdio_claim_host(bus->sdiodev->func[1]);
+@@ -4042,12 +4170,11 @@
+ * all necessary cores.
+ */
+ msleep(20);
+- brcmf_sdio_chip_enter_download(bus->sdiodev,
+- bus->ci);
++ brcmf_chip_enter_download(bus->ci);
+ brcmf_sdio_clkctl(bus, CLK_NONE, false);
+ sdio_release_host(bus->sdiodev->func[1]);
+ }
+- brcmf_sdio_chip_detach(&bus->ci);
++ brcmf_chip_detach(bus->ci);
+ }
+
+ kfree(bus->rxbuf);
+diff -Nur linux-3.14.36/drivers/net/wireless/brcm80211/brcmfmac/firmware.c linux-openelec/drivers/net/wireless/brcm80211/brcmfmac/firmware.c
+--- linux-3.14.36/drivers/net/wireless/brcm80211/brcmfmac/firmware.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/net/wireless/brcm80211/brcmfmac/firmware.c 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,332 @@
++/*
++ * Copyright (c) 2013 Broadcom Corporation
++ *
++ * Permission to use, copy, modify, and/or distribute this software for any
++ * purpose with or without fee is hereby granted, provided that the above
++ * copyright notice and this permission notice appear in all copies.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
++ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
++ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
++ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
++ */
++
++#include <linux/kernel.h>
++#include <linux/slab.h>
++#include <linux/device.h>
++#include <linux/firmware.h>
++
++#include "dhd_dbg.h"
++#include "firmware.h"
++
++enum nvram_parser_state {
++ IDLE,
++ KEY,
++ VALUE,
++ COMMENT,
++ END
++};
++
++/**
++ * struct nvram_parser - internal info for parser.
++ *
++ * @state: current parser state.
++ * @fwnv: input buffer being parsed.
++ * @nvram: output buffer with parse result.
++ * @nvram_len: lenght of parse result.
++ * @line: current line.
++ * @column: current column in line.
++ * @pos: byte offset in input buffer.
++ * @entry: start position of key,value entry.
++ */
++struct nvram_parser {
++ enum nvram_parser_state state;
++ const struct firmware *fwnv;
++ u8 *nvram;
++ u32 nvram_len;
++ u32 line;
++ u32 column;
++ u32 pos;
++ u32 entry;
++};
++
++static bool is_nvram_char(char c)
++{
++ /* comment marker excluded */
++ if (c == '#')
++ return false;
++
++ /* key and value may have any other readable character */
++ return (c > 0x20 && c < 0x7f);
++}
++
++static bool is_whitespace(char c)
++{
++ return (c == ' ' || c == '\r' || c == '\n' || c == '\t');
++}
++
++static enum nvram_parser_state brcmf_nvram_handle_idle(struct nvram_parser *nvp)
++{
++ char c;
++
++ c = nvp->fwnv->data[nvp->pos];
++ if (c == '\n')
++ return COMMENT;
++ if (is_whitespace(c))
++ goto proceed;
++ if (c == '#')
++ return COMMENT;
++ if (is_nvram_char(c)) {
++ nvp->entry = nvp->pos;
++ return KEY;
++ }
++ brcmf_dbg(INFO, "warning: ln=%d:col=%d: ignoring invalid character\n",
++ nvp->line, nvp->column);
++proceed:
++ nvp->column++;
++ nvp->pos++;
++ return IDLE;
++}
++
++static enum nvram_parser_state brcmf_nvram_handle_key(struct nvram_parser *nvp)
++{
++ enum nvram_parser_state st = nvp->state;
++ char c;
++
++ c = nvp->fwnv->data[nvp->pos];
++ if (c == '=') {
++ st = VALUE;
++ } else if (!is_nvram_char(c)) {
++ brcmf_dbg(INFO, "warning: ln=%d:col=%d: '=' expected, skip invalid key entry\n",
++ nvp->line, nvp->column);
++ return COMMENT;
++ }
++
++ nvp->column++;
++ nvp->pos++;
++ return st;
++}
++
++static enum nvram_parser_state
++brcmf_nvram_handle_value(struct nvram_parser *nvp)
++{
++ char c;
++ char *skv;
++ char *ekv;
++ u32 cplen;
++
++ c = nvp->fwnv->data[nvp->pos];
++ if (!is_nvram_char(c)) {
++ /* key,value pair complete */
++ ekv = (u8 *)&nvp->fwnv->data[nvp->pos];
++ skv = (u8 *)&nvp->fwnv->data[nvp->entry];
++ cplen = ekv - skv;
++ /* copy to output buffer */
++ memcpy(&nvp->nvram[nvp->nvram_len], skv, cplen);
++ nvp->nvram_len += cplen;
++ nvp->nvram[nvp->nvram_len] = '\0';
++ nvp->nvram_len++;
++ return IDLE;
++ }
++ nvp->pos++;
++ nvp->column++;
++ return VALUE;
++}
++
++static enum nvram_parser_state
++brcmf_nvram_handle_comment(struct nvram_parser *nvp)
++{
++ char *eol, *sol;
++
++ sol = (char *)&nvp->fwnv->data[nvp->pos];
++ eol = strchr(sol, '\n');
++ if (eol == NULL)
++ return END;
++
++ /* eat all moving to next line */
++ nvp->line++;
++ nvp->column = 1;
++ nvp->pos += (eol - sol) + 1;
++ return IDLE;
++}
++
++static enum nvram_parser_state brcmf_nvram_handle_end(struct nvram_parser *nvp)
++{
++ /* final state */
++ return END;
++}
++
++static enum nvram_parser_state
++(*nv_parser_states[])(struct nvram_parser *nvp) = {
++ brcmf_nvram_handle_idle,
++ brcmf_nvram_handle_key,
++ brcmf_nvram_handle_value,
++ brcmf_nvram_handle_comment,
++ brcmf_nvram_handle_end
++};
++
++static int brcmf_init_nvram_parser(struct nvram_parser *nvp,
++ const struct firmware *nv)
++{
++ memset(nvp, 0, sizeof(*nvp));
++ nvp->fwnv = nv;
++ /* Alloc for extra 0 byte + roundup by 4 + length field */
++ nvp->nvram = kzalloc(nv->size + 1 + 3 + sizeof(u32), GFP_KERNEL);
++ if (!nvp->nvram)
++ return -ENOMEM;
++
++ nvp->line = 1;
++ nvp->column = 1;
++ return 0;
++}
++
++/* brcmf_nvram_strip :Takes a buffer of "<var>=<value>\n" lines read from a fil
++ * and ending in a NUL. Removes carriage returns, empty lines, comment lines,
++ * and converts newlines to NULs. Shortens buffer as needed and pads with NULs.
++ * End of buffer is completed with token identifying length of buffer.
++ */
++static void *brcmf_fw_nvram_strip(const struct firmware *nv, u32 *new_length)
++{
++ struct nvram_parser nvp;
++ u32 pad;
++ u32 token;
++ __le32 token_le;
++
++ if (brcmf_init_nvram_parser(&nvp, nv) < 0)
++ return NULL;
++
++ while (nvp.pos < nv->size) {
++ nvp.state = nv_parser_states[nvp.state](&nvp);
++ if (nvp.state == END)
++ break;
++ }
++ pad = nvp.nvram_len;
++ *new_length = roundup(nvp.nvram_len + 1, 4);
++ while (pad != *new_length) {
++ nvp.nvram[pad] = 0;
++ pad++;
++ }
++
++ token = *new_length / 4;
++ token = (~token << 16) | (token & 0x0000FFFF);
++ token_le = cpu_to_le32(token);
++
++ memcpy(&nvp.nvram[*new_length], &token_le, sizeof(token_le));
++ *new_length += sizeof(token_le);
++
++ return nvp.nvram;
++}
++
++void brcmf_fw_nvram_free(void *nvram)
++{
++ kfree(nvram);
++}
++
++struct brcmf_fw {
++ struct device *dev;
++ u16 flags;
++ const struct firmware *code;
++ const char *nvram_name;
++ void (*done)(struct device *dev, const struct firmware *fw,
++ void *nvram_image, u32 nvram_len);
++};
++
++static void brcmf_fw_request_nvram_done(const struct firmware *fw, void *ctx)
++{
++ struct brcmf_fw *fwctx = ctx;
++ u32 nvram_length = 0;
++ void *nvram = NULL;
++
++ brcmf_dbg(TRACE, "enter: dev=%s\n", dev_name(fwctx->dev));
++ if (!fw && !(fwctx->flags & BRCMF_FW_REQ_NV_OPTIONAL))
++ goto fail;
++
++ if (fw) {
++ nvram = brcmf_fw_nvram_strip(fw, &nvram_length);
++ release_firmware(fw);
++ if (!nvram && !(fwctx->flags & BRCMF_FW_REQ_NV_OPTIONAL))
++ goto fail;
++ }
++
++ fwctx->done(fwctx->dev, fwctx->code, nvram, nvram_length);
++ kfree(fwctx);
++ return;
++
++fail:
++ brcmf_dbg(TRACE, "failed: dev=%s\n", dev_name(fwctx->dev));
++ if (fwctx->code)
++ release_firmware(fwctx->code);
++ device_release_driver(fwctx->dev);
++ kfree(fwctx);
++}
++
++static void brcmf_fw_request_code_done(const struct firmware *fw, void *ctx)
++{
++ struct brcmf_fw *fwctx = ctx;
++ int ret;
++
++ brcmf_dbg(TRACE, "enter: dev=%s\n", dev_name(fwctx->dev));
++ if (!fw)
++ goto fail;
++
++ /* only requested code so done here */
++ if (!(fwctx->flags & BRCMF_FW_REQUEST_NVRAM)) {
++ fwctx->done(fwctx->dev, fw, NULL, 0);
++ kfree(fwctx);
++ return;
++ }
++ fwctx->code = fw;
++ ret = request_firmware_nowait(THIS_MODULE, true, fwctx->nvram_name,
++ fwctx->dev, GFP_KERNEL, fwctx,
++ brcmf_fw_request_nvram_done);
++
++ if (!ret)
++ return;
++
++ /* when nvram is optional call .done() callback here */
++ if (fwctx->flags & BRCMF_FW_REQ_NV_OPTIONAL) {
++ fwctx->done(fwctx->dev, fw, NULL, 0);
++ kfree(fwctx);
++ return;
++ }
++
++ /* failed nvram request */
++ release_firmware(fw);
++fail:
++ brcmf_dbg(TRACE, "failed: dev=%s\n", dev_name(fwctx->dev));
++ device_release_driver(fwctx->dev);
++ kfree(fwctx);
++}
++
++int brcmf_fw_get_firmwares(struct device *dev, u16 flags,
++ const char *code, const char *nvram,
++ void (*fw_cb)(struct device *dev,
++ const struct firmware *fw,
++ void *nvram_image, u32 nvram_len))
++{
++ struct brcmf_fw *fwctx;
++
++ brcmf_dbg(TRACE, "enter: dev=%s\n", dev_name(dev));
++ if (!fw_cb || !code)
++ return -EINVAL;
++
++ if ((flags & BRCMF_FW_REQUEST_NVRAM) && !nvram)
++ return -EINVAL;
++
++ fwctx = kzalloc(sizeof(*fwctx), GFP_KERNEL);
++ if (!fwctx)
++ return -ENOMEM;
++
++ fwctx->dev = dev;
++ fwctx->flags = flags;
++ fwctx->done = fw_cb;
++ if (flags & BRCMF_FW_REQUEST_NVRAM)
++ fwctx->nvram_name = nvram;
++
++ return request_firmware_nowait(THIS_MODULE, true, code, dev,
++ GFP_KERNEL, fwctx,
++ brcmf_fw_request_code_done);
++}
+diff -Nur linux-3.14.36/drivers/net/wireless/brcm80211/brcmfmac/firmware.h linux-openelec/drivers/net/wireless/brcm80211/brcmfmac/firmware.h
+--- linux-3.14.36/drivers/net/wireless/brcm80211/brcmfmac/firmware.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/net/wireless/brcm80211/brcmfmac/firmware.h 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,36 @@
++/*
++ * Copyright (c) 2013 Broadcom Corporation
++ *
++ * Permission to use, copy, modify, and/or distribute this software for any
++ * purpose with or without fee is hereby granted, provided that the above
++ * copyright notice and this permission notice appear in all copies.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
++ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
++ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
++ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
++ */
++#ifndef BRCMFMAC_FIRMWARE_H
++#define BRCMFMAC_FIRMWARE_H
++
++#define BRCMF_FW_REQUEST 0x000F
++#define BRCMF_FW_REQUEST_NVRAM 0x0001
++#define BRCMF_FW_REQ_FLAGS 0x00F0
++#define BRCMF_FW_REQ_NV_OPTIONAL 0x0010
++
++void brcmf_fw_nvram_free(void *nvram);
++/*
++ * Request firmware(s) asynchronously. When the asynchronous request
++ * fails it will not use the callback, but call device_release_driver()
++ * instead which will call the driver .remove() callback.
++ */
++int brcmf_fw_get_firmwares(struct device *dev, u16 flags,
++ const char *code, const char *nvram,
++ void (*fw_cb)(struct device *dev,
++ const struct firmware *fw,
++ void *nvram_image, u32 nvram_len));
++
++#endif /* BRCMFMAC_FIRMWARE_H */
+diff -Nur linux-3.14.36/drivers/net/wireless/brcm80211/brcmfmac/fwil.c linux-openelec/drivers/net/wireless/brcm80211/brcmfmac/fwil.c
+--- linux-3.14.36/drivers/net/wireless/brcm80211/brcmfmac/fwil.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/net/wireless/brcm80211/brcmfmac/fwil.c 2015-05-06 12:05:42.000000000 -0500
+@@ -54,7 +54,7 @@
+ if (err >= 0)
+ err = 0;
+ else
+- brcmf_err("Failed err=%d\n", err);
++ brcmf_dbg(FIL, "Failed err=%d\n", err);
+
+ return err;
+ }
+@@ -124,7 +124,8 @@
+ }
+
+ static u32
+-brcmf_create_iovar(char *name, char *data, u32 datalen, char *buf, u32 buflen)
++brcmf_create_iovar(char *name, const char *data, u32 datalen,
++ char *buf, u32 buflen)
+ {
+ u32 len;
+
+@@ -144,7 +145,7 @@
+
+
+ s32
+-brcmf_fil_iovar_data_set(struct brcmf_if *ifp, char *name, void *data,
++brcmf_fil_iovar_data_set(struct brcmf_if *ifp, char *name, const void *data,
+ u32 len)
+ {
+ struct brcmf_pub *drvr = ifp->drvr;
+diff -Nur linux-3.14.36/drivers/net/wireless/brcm80211/brcmfmac/fwil.h linux-openelec/drivers/net/wireless/brcm80211/brcmfmac/fwil.h
+--- linux-3.14.36/drivers/net/wireless/brcm80211/brcmfmac/fwil.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/net/wireless/brcm80211/brcmfmac/fwil.h 2015-05-06 12:05:42.000000000 -0500
+@@ -83,7 +83,7 @@
+ s32 brcmf_fil_cmd_int_set(struct brcmf_if *ifp, u32 cmd, u32 data);
+ s32 brcmf_fil_cmd_int_get(struct brcmf_if *ifp, u32 cmd, u32 *data);
+
+-s32 brcmf_fil_iovar_data_set(struct brcmf_if *ifp, char *name, void *data,
++s32 brcmf_fil_iovar_data_set(struct brcmf_if *ifp, char *name, const void *data,
+ u32 len);
+ s32 brcmf_fil_iovar_data_get(struct brcmf_if *ifp, char *name, void *data,
+ u32 len);
+diff -Nur linux-3.14.36/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h linux-openelec/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h
+--- linux-3.14.36/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h 2015-05-06 12:05:42.000000000 -0500
+@@ -48,6 +48,19 @@
+
+ #define BRCMF_MAXRATES_IN_SET 16 /* max # of rates in rateset */
+
++/* OBSS Coex Auto/On/Off */
++#define BRCMF_OBSS_COEX_AUTO (-1)
++#define BRCMF_OBSS_COEX_OFF 0
++#define BRCMF_OBSS_COEX_ON 1
++
++/* join preference types for join_pref iovar */
++enum brcmf_join_pref_types {
++ BRCMF_JOIN_PREF_RSSI = 1,
++ BRCMF_JOIN_PREF_WPA,
++ BRCMF_JOIN_PREF_BAND,
++ BRCMF_JOIN_PREF_RSSI_DELTA,
++};
++
+ enum brcmf_fil_p2p_if_types {
+ BRCMF_FIL_P2P_IF_CLIENT,
+ BRCMF_FIL_P2P_IF_GO,
+@@ -87,6 +100,11 @@
+ __le32 enable;
+ };
+
++struct brcmf_fil_bwcap_le {
++ __le32 band;
++ __le32 bw_cap;
++};
++
+ /**
+ * struct tdls_iovar - common structure for tdls iovars.
+ *
+@@ -272,6 +290,22 @@
+ __le16 chanspec_list[1];
+ };
+
++/**
++ * struct join_pref params - parameters for preferred join selection.
++ *
++ * @type: preference type (see enum brcmf_join_pref_types).
++ * @len: length of bytes following (currently always 2).
++ * @rssi_gain: signal gain for selection (only when @type is RSSI_DELTA).
++ * @band: band to which selection preference applies.
++ * This is used if @type is BAND or RSSI_DELTA.
++ */
++struct brcmf_join_pref_params {
++ u8 type;
++ u8 len;
++ u8 rssi_gain;
++ u8 band;
++};
++
+ /* used for join with or without a specific bssid and channel list */
+ struct brcmf_join_params {
+ struct brcmf_ssid_le ssid_le;
+diff -Nur linux-3.14.36/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c linux-openelec/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c
+--- linux-3.14.36/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c 2015-05-06 12:05:42.000000000 -0500
+@@ -476,6 +476,7 @@
+ bool bus_flow_blocked;
+ bool creditmap_received;
+ u8 mode;
++ bool avoid_queueing;
+ };
+
+ /*
+@@ -1369,13 +1370,12 @@
+ }
+
+ static int brcmf_fws_txstatus_suppressed(struct brcmf_fws_info *fws, int fifo,
+- struct sk_buff *skb, u32 genbit,
+- u16 seq)
++ struct sk_buff *skb, u8 ifidx,
++ u32 genbit, u16 seq)
+ {
+ struct brcmf_fws_mac_descriptor *entry = brcmf_skbcb(skb)->mac;
+ u32 hslot;
+ int ret;
+- u8 ifidx;
+
+ hslot = brcmf_skb_htod_tag_get_field(skb, HSLOT);
+
+@@ -1389,29 +1389,21 @@
+
+ entry->generation = genbit;
+
+- ret = brcmf_proto_hdrpull(fws->drvr, false, &ifidx, skb);
+- if (ret == 0) {
+- brcmf_skb_htod_tag_set_field(skb, GENERATION, genbit);
+- brcmf_skbcb(skb)->htod_seq = seq;
+- if (brcmf_skb_htod_seq_get_field(skb, FROMFW)) {
+- brcmf_skb_htod_seq_set_field(skb, FROMDRV, 1);
+- brcmf_skb_htod_seq_set_field(skb, FROMFW, 0);
+- } else {
+- brcmf_skb_htod_seq_set_field(skb, FROMDRV, 0);
+- }
+- ret = brcmf_fws_enq(fws, BRCMF_FWS_SKBSTATE_SUPPRESSED, fifo,
+- skb);
++ brcmf_skb_htod_tag_set_field(skb, GENERATION, genbit);
++ brcmf_skbcb(skb)->htod_seq = seq;
++ if (brcmf_skb_htod_seq_get_field(skb, FROMFW)) {
++ brcmf_skb_htod_seq_set_field(skb, FROMDRV, 1);
++ brcmf_skb_htod_seq_set_field(skb, FROMFW, 0);
++ } else {
++ brcmf_skb_htod_seq_set_field(skb, FROMDRV, 0);
+ }
++ ret = brcmf_fws_enq(fws, BRCMF_FWS_SKBSTATE_SUPPRESSED, fifo, skb);
+
+ if (ret != 0) {
+- /* suppress q is full or hdrpull failed, drop this packet */
+- brcmf_fws_hanger_poppkt(&fws->hanger, hslot, &skb,
+- true);
++ /* suppress q is full drop this packet */
++ brcmf_fws_hanger_poppkt(&fws->hanger, hslot, &skb, true);
+ } else {
+- /*
+- * Mark suppressed to avoid a double free during
+- * wlfc cleanup
+- */
++ /* Mark suppressed to avoid a double free during wlfc cleanup */
+ brcmf_fws_hanger_mark_suppressed(&fws->hanger, hslot);
+ }
+
+@@ -1428,6 +1420,7 @@
+ struct sk_buff *skb;
+ struct brcmf_skbuff_cb *skcb;
+ struct brcmf_fws_mac_descriptor *entry = NULL;
++ u8 ifidx;
+
+ brcmf_dbg(DATA, "flags %d\n", flags);
+
+@@ -1476,12 +1469,15 @@
+ }
+ brcmf_fws_macdesc_return_req_credit(skb);
+
++ if (brcmf_proto_hdrpull(fws->drvr, false, &ifidx, skb)) {
++ brcmu_pkt_buf_free_skb(skb);
++ return -EINVAL;
++ }
+ if (!remove_from_hanger)
+- ret = brcmf_fws_txstatus_suppressed(fws, fifo, skb, genbit,
+- seq);
+-
++ ret = brcmf_fws_txstatus_suppressed(fws, fifo, skb, ifidx,
++ genbit, seq);
+ if (remove_from_hanger || ret)
+- brcmf_txfinalize(fws->drvr, skb, true);
++ brcmf_txfinalize(fws->drvr, skb, ifidx, true);
+
+ return 0;
+ }
+@@ -1868,7 +1864,7 @@
+ struct ethhdr *eh = (struct ethhdr *)(skb->data);
+ int fifo = BRCMF_FWS_FIFO_BCMC;
+ bool multicast = is_multicast_ether_addr(eh->h_dest);
+- bool pae = eh->h_proto == htons(ETH_P_PAE);
++ int rc = 0;
+
+ brcmf_dbg(DATA, "tx proto=0x%X\n", ntohs(eh->h_proto));
+ /* determine the priority */
+@@ -1876,8 +1872,13 @@
+ skb->priority = cfg80211_classify8021d(skb, NULL);
+
+ drvr->tx_multicast += !!multicast;
+- if (pae)
+- atomic_inc(&ifp->pend_8021x_cnt);
++
++ if (fws->avoid_queueing) {
++ rc = brcmf_proto_txdata(drvr, ifp->ifidx, 0, skb);
++ if (rc < 0)
++ brcmf_txfinalize(drvr, skb, ifp->ifidx, false);
++ return rc;
++ }
+
+ /* set control buffer information */
+ skcb->if_flags = 0;
+@@ -1899,15 +1900,12 @@
+ brcmf_fws_schedule_deq(fws);
+ } else {
+ brcmf_err("drop skb: no hanger slot\n");
+- if (pae) {
+- atomic_dec(&ifp->pend_8021x_cnt);
+- if (waitqueue_active(&ifp->pend_8021x_wait))
+- wake_up(&ifp->pend_8021x_wait);
+- }
+- brcmu_pkt_buf_free_skb(skb);
++ brcmf_txfinalize(drvr, skb, ifp->ifidx, false);
++ rc = -ENOMEM;
+ }
+ brcmf_fws_unlock(fws);
+- return 0;
++
++ return rc;
+ }
+
+ void brcmf_fws_reset_interface(struct brcmf_if *ifp)
+@@ -1982,7 +1980,8 @@
+ ret = brcmf_proto_txdata(drvr, ifidx, 0, skb);
+ brcmf_fws_lock(fws);
+ if (ret < 0)
+- brcmf_txfinalize(drvr, skb, false);
++ brcmf_txfinalize(drvr, skb, ifidx,
++ false);
+ if (fws->bus_flow_blocked)
+ break;
+ }
+@@ -2039,6 +2038,13 @@
+ fws->drvr = drvr;
+ fws->fcmode = fcmode;
+
++ if ((drvr->bus_if->always_use_fws_queue == false) &&
++ (fcmode == BRCMF_FWS_FCMODE_NONE)) {
++ fws->avoid_queueing = true;
++ brcmf_dbg(INFO, "FWS queueing will be avoided\n");
++ return 0;
++ }
++
+ fws->fws_wq = create_singlethread_workqueue("brcmf_fws_wq");
+ if (fws->fws_wq == NULL) {
+ brcmf_err("workqueue creation failed\n");
+diff -Nur linux-3.14.36/drivers/net/wireless/brcm80211/brcmfmac/Makefile linux-openelec/drivers/net/wireless/brcm80211/brcmfmac/Makefile
+--- linux-3.14.36/drivers/net/wireless/brcm80211/brcmfmac/Makefile 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/net/wireless/brcm80211/brcmfmac/Makefile 2015-05-06 12:05:42.000000000 -0500
+@@ -24,6 +24,7 @@
+ obj-$(CONFIG_BRCMFMAC) += brcmfmac.o
+ brcmfmac-objs += \
+ wl_cfg80211.o \
++ chip.o \
+ fwil.o \
+ fweh.o \
+ fwsignal.o \
+@@ -32,12 +33,11 @@
+ bcdc.o \
+ dhd_common.o \
+ dhd_linux.o \
+- nvram.o \
++ firmware.o \
+ btcoex.o
+ brcmfmac-$(CONFIG_BRCMFMAC_SDIO) += \
+ dhd_sdio.o \
+- bcmsdh.o \
+- sdio_chip.o
++ bcmsdh.o
+ brcmfmac-$(CONFIG_BRCMFMAC_USB) += \
+ usb.o
+ brcmfmac-$(CONFIG_BRCMDBG) += \
+diff -Nur linux-3.14.36/drivers/net/wireless/brcm80211/brcmfmac/nvram.c linux-openelec/drivers/net/wireless/brcm80211/brcmfmac/nvram.c
+--- linux-3.14.36/drivers/net/wireless/brcm80211/brcmfmac/nvram.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/net/wireless/brcm80211/brcmfmac/nvram.c 1969-12-31 18:00:00.000000000 -0600
+@@ -1,94 +0,0 @@
+-/*
+- * Copyright (c) 2013 Broadcom Corporation
+- *
+- * Permission to use, copy, modify, and/or distribute this software for any
+- * purpose with or without fee is hereby granted, provided that the above
+- * copyright notice and this permission notice appear in all copies.
+- *
+- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+- */
+-
+-#include <linux/kernel.h>
+-#include <linux/slab.h>
+-#include <linux/firmware.h>
+-
+-#include "nvram.h"
+-
+-/* brcmf_nvram_strip :Takes a buffer of "<var>=<value>\n" lines read from a file
+- * and ending in a NUL. Removes carriage returns, empty lines, comment lines,
+- * and converts newlines to NULs. Shortens buffer as needed and pads with NULs.
+- * End of buffer is completed with token identifying length of buffer.
+- */
+-void *brcmf_nvram_strip(const struct firmware *nv, u32 *new_length)
+-{
+- u8 *nvram;
+- u32 i;
+- u32 len;
+- u32 column;
+- u8 val;
+- bool comment;
+- u32 token;
+- __le32 token_le;
+-
+- /* Alloc for extra 0 byte + roundup by 4 + length field */
+- nvram = kmalloc(nv->size + 1 + 3 + sizeof(token_le), GFP_KERNEL);
+- if (!nvram)
+- return NULL;
+-
+- len = 0;
+- column = 0;
+- comment = false;
+- for (i = 0; i < nv->size; i++) {
+- val = nv->data[i];
+- if (val == 0)
+- break;
+- if (val == '\r')
+- continue;
+- if (comment && (val != '\n'))
+- continue;
+- comment = false;
+- if (val == '#') {
+- comment = true;
+- continue;
+- }
+- if (val == '\n') {
+- if (column == 0)
+- continue;
+- nvram[len] = 0;
+- len++;
+- column = 0;
+- continue;
+- }
+- nvram[len] = val;
+- len++;
+- column++;
+- }
+- column = len;
+- *new_length = roundup(len + 1, 4);
+- while (column != *new_length) {
+- nvram[column] = 0;
+- column++;
+- }
+-
+- token = *new_length / 4;
+- token = (~token << 16) | (token & 0x0000FFFF);
+- token_le = cpu_to_le32(token);
+-
+- memcpy(&nvram[*new_length], &token_le, sizeof(token_le));
+- *new_length += sizeof(token_le);
+-
+- return nvram;
+-}
+-
+-void brcmf_nvram_free(void *nvram)
+-{
+- kfree(nvram);
+-}
+-
+-
+diff -Nur linux-3.14.36/drivers/net/wireless/brcm80211/brcmfmac/nvram.h linux-openelec/drivers/net/wireless/brcm80211/brcmfmac/nvram.h
+--- linux-3.14.36/drivers/net/wireless/brcm80211/brcmfmac/nvram.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/net/wireless/brcm80211/brcmfmac/nvram.h 1969-12-31 18:00:00.000000000 -0600
+@@ -1,24 +0,0 @@
+-/*
+- * Copyright (c) 2013 Broadcom Corporation
+- *
+- * Permission to use, copy, modify, and/or distribute this software for any
+- * purpose with or without fee is hereby granted, provided that the above
+- * copyright notice and this permission notice appear in all copies.
+- *
+- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+- */
+-#ifndef BRCMFMAC_NVRAM_H
+-#define BRCMFMAC_NVRAM_H
+-
+-
+-void *brcmf_nvram_strip(const struct firmware *nv, u32 *new_length);
+-void brcmf_nvram_free(void *nvram);
+-
+-
+-#endif /* BRCMFMAC_NVRAM_H */
+diff -Nur linux-3.14.36/drivers/net/wireless/brcm80211/brcmfmac/p2p.c linux-openelec/drivers/net/wireless/brcm80211/brcmfmac/p2p.c
+--- linux-3.14.36/drivers/net/wireless/brcm80211/brcmfmac/p2p.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/net/wireless/brcm80211/brcmfmac/p2p.c 2015-05-06 12:05:42.000000000 -0500
+@@ -797,7 +797,8 @@
+ /* SOCIAL CHANNELS 1, 6, 11 */
+ search_state = WL_P2P_DISC_ST_SEARCH;
+ brcmf_dbg(INFO, "P2P SEARCH PHASE START\n");
+- } else if (dev != NULL && vif->mode == WL_MODE_AP) {
++ } else if (dev != NULL &&
++ vif->wdev.iftype == NL80211_IFTYPE_P2P_GO) {
+ /* If you are already a GO, then do SEARCH only */
+ brcmf_dbg(INFO, "Already a GO. Do SEARCH Only\n");
+ search_state = WL_P2P_DISC_ST_SEARCH;
+@@ -2256,7 +2257,6 @@
+ struct brcmf_if *ifp = netdev_priv(cfg_to_ndev(cfg));
+ struct brcmf_cfg80211_vif *vif;
+ enum brcmf_fil_p2p_if_types iftype;
+- enum wl_mode mode;
+ int err;
+
+ if (brcmf_cfg80211_vif_event_armed(cfg))
+@@ -2267,11 +2267,9 @@
+ switch (type) {
+ case NL80211_IFTYPE_P2P_CLIENT:
+ iftype = BRCMF_FIL_P2P_IF_CLIENT;
+- mode = WL_MODE_BSS;
+ break;
+ case NL80211_IFTYPE_P2P_GO:
+ iftype = BRCMF_FIL_P2P_IF_GO;
+- mode = WL_MODE_AP;
+ break;
+ case NL80211_IFTYPE_P2P_DEVICE:
+ return brcmf_p2p_create_p2pdev(&cfg->p2p, wiphy,
+diff -Nur linux-3.14.36/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c linux-openelec/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c
+--- linux-3.14.36/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c 1969-12-31 18:00:00.000000000 -0600
+@@ -1,973 +0,0 @@
+-/*
+- * Copyright (c) 2011 Broadcom Corporation
+- *
+- * Permission to use, copy, modify, and/or distribute this software for any
+- * purpose with or without fee is hereby granted, provided that the above
+- * copyright notice and this permission notice appear in all copies.
+- *
+- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+- */
+-/* ***** SDIO interface chip backplane handle functions ***** */
+-
+-#include <linux/types.h>
+-#include <linux/netdevice.h>
+-#include <linux/mmc/card.h>
+-#include <linux/mmc/sdio_func.h>
+-#include <linux/mmc/sdio_ids.h>
+-#include <linux/ssb/ssb_regs.h>
+-#include <linux/bcma/bcma.h>
+-
+-#include <chipcommon.h>
+-#include <brcm_hw_ids.h>
+-#include <brcmu_wifi.h>
+-#include <brcmu_utils.h>
+-#include <soc.h>
+-#include "dhd_dbg.h"
+-#include "sdio_host.h"
+-#include "sdio_chip.h"
+-
+-/* chip core base & ramsize */
+-/* bcm4329 */
+-/* SDIO device core, ID 0x829 */
+-#define BCM4329_CORE_BUS_BASE 0x18011000
+-/* internal memory core, ID 0x80e */
+-#define BCM4329_CORE_SOCRAM_BASE 0x18003000
+-/* ARM Cortex M3 core, ID 0x82a */
+-#define BCM4329_CORE_ARM_BASE 0x18002000
+-#define BCM4329_RAMSIZE 0x48000
+-
+-/* bcm43143 */
+-/* SDIO device core */
+-#define BCM43143_CORE_BUS_BASE 0x18002000
+-/* internal memory core */
+-#define BCM43143_CORE_SOCRAM_BASE 0x18004000
+-/* ARM Cortex M3 core, ID 0x82a */
+-#define BCM43143_CORE_ARM_BASE 0x18003000
+-#define BCM43143_RAMSIZE 0x70000
+-
+-/* All D11 cores, ID 0x812 */
+-#define BCM43xx_CORE_D11_BASE 0x18001000
+-
+-#define SBCOREREV(sbidh) \
+- ((((sbidh) & SSB_IDHIGH_RCHI) >> SSB_IDHIGH_RCHI_SHIFT) | \
+- ((sbidh) & SSB_IDHIGH_RCLO))
+-
+-/* SOC Interconnect types (aka chip types) */
+-#define SOCI_SB 0
+-#define SOCI_AI 1
+-
+-/* EROM CompIdentB */
+-#define CIB_REV_MASK 0xff000000
+-#define CIB_REV_SHIFT 24
+-
+-/* ARM CR4 core specific control flag bits */
+-#define ARMCR4_BCMA_IOCTL_CPUHALT 0x0020
+-
+-/* D11 core specific control flag bits */
+-#define D11_BCMA_IOCTL_PHYCLOCKEN 0x0004
+-#define D11_BCMA_IOCTL_PHYRESET 0x0008
+-
+-#define SDIOD_DRVSTR_KEY(chip, pmu) (((chip) << 16) | (pmu))
+-/* SDIO Pad drive strength to select value mappings */
+-struct sdiod_drive_str {
+- u8 strength; /* Pad Drive Strength in mA */
+- u8 sel; /* Chip-specific select value */
+-};
+-/* SDIO Drive Strength to sel value table for PMU Rev 11 (1.8V) */
+-static const struct sdiod_drive_str sdiod_drvstr_tab1_1v8[] = {
+- {32, 0x6},
+- {26, 0x7},
+- {22, 0x4},
+- {16, 0x5},
+- {12, 0x2},
+- {8, 0x3},
+- {4, 0x0},
+- {0, 0x1}
+-};
+-
+-/* SDIO Drive Strength to sel value table for PMU Rev 13 (1.8v) */
+-static const struct sdiod_drive_str sdiod_drive_strength_tab5_1v8[] = {
+- {6, 0x7},
+- {5, 0x6},
+- {4, 0x5},
+- {3, 0x4},
+- {2, 0x2},
+- {1, 0x1},
+- {0, 0x0}
+-};
+-
+-/* SDIO Drive Strength to sel value table for PMU Rev 17 (1.8v) */
+-static const struct sdiod_drive_str sdiod_drvstr_tab6_1v8[] = {
+- {3, 0x3},
+- {2, 0x2},
+- {1, 0x1},
+- {0, 0x0} };
+-
+-/* SDIO Drive Strength to sel value table for 43143 PMU Rev 17 (3.3V) */
+-static const struct sdiod_drive_str sdiod_drvstr_tab2_3v3[] = {
+- {16, 0x7},
+- {12, 0x5},
+- {8, 0x3},
+- {4, 0x1}
+-};
+-
+-u8
+-brcmf_sdio_chip_getinfidx(struct brcmf_chip *ci, u16 coreid)
+-{
+- u8 idx;
+-
+- for (idx = 0; idx < BRCMF_MAX_CORENUM; idx++)
+- if (coreid == ci->c_inf[idx].id)
+- return idx;
+-
+- return BRCMF_MAX_CORENUM;
+-}
+-
+-static u32
+-brcmf_sdio_sb_corerev(struct brcmf_sdio_dev *sdiodev,
+- struct brcmf_chip *ci, u16 coreid)
+-{
+- u32 regdata;
+- u8 idx;
+-
+- idx = brcmf_sdio_chip_getinfidx(ci, coreid);
+-
+- regdata = brcmf_sdiod_regrl(sdiodev,
+- CORE_SB(ci->c_inf[idx].base, sbidhigh),
+- NULL);
+- return SBCOREREV(regdata);
+-}
+-
+-static u32
+-brcmf_sdio_ai_corerev(struct brcmf_sdio_dev *sdiodev,
+- struct brcmf_chip *ci, u16 coreid)
+-{
+- u8 idx;
+-
+- idx = brcmf_sdio_chip_getinfidx(ci, coreid);
+-
+- return (ci->c_inf[idx].cib & CIB_REV_MASK) >> CIB_REV_SHIFT;
+-}
+-
+-static bool
+-brcmf_sdio_sb_iscoreup(struct brcmf_sdio_dev *sdiodev,
+- struct brcmf_chip *ci, u16 coreid)
+-{
+- u32 regdata;
+- u8 idx;
+-
+- idx = brcmf_sdio_chip_getinfidx(ci, coreid);
+- if (idx == BRCMF_MAX_CORENUM)
+- return false;
+-
+- regdata = brcmf_sdiod_regrl(sdiodev,
+- CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
+- NULL);
+- regdata &= (SSB_TMSLOW_RESET | SSB_TMSLOW_REJECT |
+- SSB_IMSTATE_REJECT | SSB_TMSLOW_CLOCK);
+- return (SSB_TMSLOW_CLOCK == regdata);
+-}
+-
+-static bool
+-brcmf_sdio_ai_iscoreup(struct brcmf_sdio_dev *sdiodev,
+- struct brcmf_chip *ci, u16 coreid)
+-{
+- u32 regdata;
+- u8 idx;
+- bool ret;
+-
+- idx = brcmf_sdio_chip_getinfidx(ci, coreid);
+- if (idx == BRCMF_MAX_CORENUM)
+- return false;
+-
+- regdata = brcmf_sdiod_regrl(sdiodev, ci->c_inf[idx].wrapbase+BCMA_IOCTL,
+- NULL);
+- ret = (regdata & (BCMA_IOCTL_FGC | BCMA_IOCTL_CLK)) == BCMA_IOCTL_CLK;
+-
+- regdata = brcmf_sdiod_regrl(sdiodev,
+- ci->c_inf[idx].wrapbase+BCMA_RESET_CTL,
+- NULL);
+- ret = ret && ((regdata & BCMA_RESET_CTL_RESET) == 0);
+-
+- return ret;
+-}
+-
+-static void
+-brcmf_sdio_sb_coredisable(struct brcmf_sdio_dev *sdiodev,
+- struct brcmf_chip *ci, u16 coreid, u32 pre_resetbits,
+- u32 in_resetbits)
+-{
+- u32 regdata, base;
+- u8 idx;
+-
+- idx = brcmf_sdio_chip_getinfidx(ci, coreid);
+- base = ci->c_inf[idx].base;
+-
+- regdata = brcmf_sdiod_regrl(sdiodev, CORE_SB(base, sbtmstatelow), NULL);
+- if (regdata & SSB_TMSLOW_RESET)
+- return;
+-
+- regdata = brcmf_sdiod_regrl(sdiodev, CORE_SB(base, sbtmstatelow), NULL);
+- if ((regdata & SSB_TMSLOW_CLOCK) != 0) {
+- /*
+- * set target reject and spin until busy is clear
+- * (preserve core-specific bits)
+- */
+- regdata = brcmf_sdiod_regrl(sdiodev,
+- CORE_SB(base, sbtmstatelow), NULL);
+- brcmf_sdiod_regwl(sdiodev, CORE_SB(base, sbtmstatelow),
+- regdata | SSB_TMSLOW_REJECT, NULL);
+-
+- regdata = brcmf_sdiod_regrl(sdiodev,
+- CORE_SB(base, sbtmstatelow), NULL);
+- udelay(1);
+- SPINWAIT((brcmf_sdiod_regrl(sdiodev,
+- CORE_SB(base, sbtmstatehigh),
+- NULL) &
+- SSB_TMSHIGH_BUSY), 100000);
+-
+- regdata = brcmf_sdiod_regrl(sdiodev,
+- CORE_SB(base, sbtmstatehigh),
+- NULL);
+- if (regdata & SSB_TMSHIGH_BUSY)
+- brcmf_err("core state still busy\n");
+-
+- regdata = brcmf_sdiod_regrl(sdiodev, CORE_SB(base, sbidlow),
+- NULL);
+- if (regdata & SSB_IDLOW_INITIATOR) {
+- regdata = brcmf_sdiod_regrl(sdiodev,
+- CORE_SB(base, sbimstate),
+- NULL);
+- regdata |= SSB_IMSTATE_REJECT;
+- brcmf_sdiod_regwl(sdiodev, CORE_SB(base, sbimstate),
+- regdata, NULL);
+- regdata = brcmf_sdiod_regrl(sdiodev,
+- CORE_SB(base, sbimstate),
+- NULL);
+- udelay(1);
+- SPINWAIT((brcmf_sdiod_regrl(sdiodev,
+- CORE_SB(base, sbimstate),
+- NULL) &
+- SSB_IMSTATE_BUSY), 100000);
+- }
+-
+- /* set reset and reject while enabling the clocks */
+- regdata = SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK |
+- SSB_TMSLOW_REJECT | SSB_TMSLOW_RESET;
+- brcmf_sdiod_regwl(sdiodev, CORE_SB(base, sbtmstatelow),
+- regdata, NULL);
+- regdata = brcmf_sdiod_regrl(sdiodev,
+- CORE_SB(base, sbtmstatelow), NULL);
+- udelay(10);
+-
+- /* clear the initiator reject bit */
+- regdata = brcmf_sdiod_regrl(sdiodev, CORE_SB(base, sbidlow),
+- NULL);
+- if (regdata & SSB_IDLOW_INITIATOR) {
+- regdata = brcmf_sdiod_regrl(sdiodev,
+- CORE_SB(base, sbimstate),
+- NULL);
+- regdata &= ~SSB_IMSTATE_REJECT;
+- brcmf_sdiod_regwl(sdiodev, CORE_SB(base, sbimstate),
+- regdata, NULL);
+- }
+- }
+-
+- /* leave reset and reject asserted */
+- brcmf_sdiod_regwl(sdiodev, CORE_SB(base, sbtmstatelow),
+- (SSB_TMSLOW_REJECT | SSB_TMSLOW_RESET), NULL);
+- udelay(1);
+-}
+-
+-static void
+-brcmf_sdio_ai_coredisable(struct brcmf_sdio_dev *sdiodev,
+- struct brcmf_chip *ci, u16 coreid, u32 pre_resetbits,
+- u32 in_resetbits)
+-{
+- u8 idx;
+- u32 regdata;
+- u32 wrapbase;
+-
+- idx = brcmf_sdio_chip_getinfidx(ci, coreid);
+- if (idx == BRCMF_MAX_CORENUM)
+- return;
+-
+- wrapbase = ci->c_inf[idx].wrapbase;
+-
+- /* if core is already in reset, skip reset */
+- regdata = brcmf_sdiod_regrl(sdiodev, wrapbase + BCMA_RESET_CTL, NULL);
+- if ((regdata & BCMA_RESET_CTL_RESET) != 0)
+- goto post_reset_config;
+-
+- /* configure reset */
+- brcmf_sdiod_regwl(sdiodev, wrapbase + BCMA_IOCTL, pre_resetbits |
+- BCMA_IOCTL_FGC | BCMA_IOCTL_CLK, NULL);
+- regdata = brcmf_sdiod_regrl(sdiodev, wrapbase + BCMA_IOCTL, NULL);
+-
+- /* put in reset */
+- brcmf_sdiod_regwl(sdiodev, wrapbase + BCMA_RESET_CTL,
+- BCMA_RESET_CTL_RESET, NULL);
+- usleep_range(10, 20);
+-
+- /* wait till reset is 1 */
+- SPINWAIT(brcmf_sdiod_regrl(sdiodev, wrapbase + BCMA_RESET_CTL, NULL) !=
+- BCMA_RESET_CTL_RESET, 300);
+-
+-post_reset_config:
+- /* post reset configure */
+- brcmf_sdiod_regwl(sdiodev, wrapbase + BCMA_IOCTL, pre_resetbits |
+- BCMA_IOCTL_FGC | BCMA_IOCTL_CLK, NULL);
+- regdata = brcmf_sdiod_regrl(sdiodev, wrapbase + BCMA_IOCTL, NULL);
+-}
+-
+-static void
+-brcmf_sdio_sb_resetcore(struct brcmf_sdio_dev *sdiodev,
+- struct brcmf_chip *ci, u16 coreid, u32 pre_resetbits,
+- u32 in_resetbits, u32 post_resetbits)
+-{
+- u32 regdata;
+- u8 idx;
+-
+- idx = brcmf_sdio_chip_getinfidx(ci, coreid);
+- if (idx == BRCMF_MAX_CORENUM)
+- return;
+-
+- /*
+- * Must do the disable sequence first to work for
+- * arbitrary current core state.
+- */
+- brcmf_sdio_sb_coredisable(sdiodev, ci, coreid, pre_resetbits,
+- in_resetbits);
+-
+- /*
+- * Now do the initialization sequence.
+- * set reset while enabling the clock and
+- * forcing them on throughout the core
+- */
+- brcmf_sdiod_regwl(sdiodev,
+- CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
+- SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK | SSB_TMSLOW_RESET,
+- NULL);
+- regdata = brcmf_sdiod_regrl(sdiodev,
+- CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
+- NULL);
+- udelay(1);
+-
+- /* clear any serror */
+- regdata = brcmf_sdiod_regrl(sdiodev,
+- CORE_SB(ci->c_inf[idx].base, sbtmstatehigh),
+- NULL);
+- if (regdata & SSB_TMSHIGH_SERR)
+- brcmf_sdiod_regwl(sdiodev,
+- CORE_SB(ci->c_inf[idx].base, sbtmstatehigh),
+- 0, NULL);
+-
+- regdata = brcmf_sdiod_regrl(sdiodev,
+- CORE_SB(ci->c_inf[idx].base, sbimstate),
+- NULL);
+- if (regdata & (SSB_IMSTATE_IBE | SSB_IMSTATE_TO))
+- brcmf_sdiod_regwl(sdiodev,
+- CORE_SB(ci->c_inf[idx].base, sbimstate),
+- regdata & ~(SSB_IMSTATE_IBE | SSB_IMSTATE_TO),
+- NULL);
+-
+- /* clear reset and allow it to propagate throughout the core */
+- brcmf_sdiod_regwl(sdiodev, CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
+- SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK, NULL);
+- regdata = brcmf_sdiod_regrl(sdiodev,
+- CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
+- NULL);
+- udelay(1);
+-
+- /* leave clock enabled */
+- brcmf_sdiod_regwl(sdiodev, CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
+- SSB_TMSLOW_CLOCK, NULL);
+- regdata = brcmf_sdiod_regrl(sdiodev,
+- CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
+- NULL);
+- udelay(1);
+-}
+-
+-static void
+-brcmf_sdio_ai_resetcore(struct brcmf_sdio_dev *sdiodev,
+- struct brcmf_chip *ci, u16 coreid, u32 pre_resetbits,
+- u32 in_resetbits, u32 post_resetbits)
+-{
+- u8 idx;
+- u32 regdata;
+- u32 wrapbase;
+-
+- idx = brcmf_sdio_chip_getinfidx(ci, coreid);
+- if (idx == BRCMF_MAX_CORENUM)
+- return;
+-
+- wrapbase = ci->c_inf[idx].wrapbase;
+-
+- /* must disable first to work for arbitrary current core state */
+- brcmf_sdio_ai_coredisable(sdiodev, ci, coreid, pre_resetbits,
+- in_resetbits);
+-
+- while (brcmf_sdiod_regrl(sdiodev, wrapbase + BCMA_RESET_CTL, NULL) &
+- BCMA_RESET_CTL_RESET) {
+- brcmf_sdiod_regwl(sdiodev, wrapbase + BCMA_RESET_CTL, 0, NULL);
+- usleep_range(40, 60);
+- }
+-
+- brcmf_sdiod_regwl(sdiodev, wrapbase + BCMA_IOCTL, post_resetbits |
+- BCMA_IOCTL_CLK, NULL);
+- regdata = brcmf_sdiod_regrl(sdiodev, wrapbase + BCMA_IOCTL, NULL);
+-}
+-
+-#ifdef DEBUG
+-/* safety check for chipinfo */
+-static int brcmf_sdio_chip_cichk(struct brcmf_chip *ci)
+-{
+- u8 core_idx;
+-
+- /* check RAM core presence for ARM CM3 core */
+- core_idx = brcmf_sdio_chip_getinfidx(ci, BCMA_CORE_ARM_CM3);
+- if (BRCMF_MAX_CORENUM != core_idx) {
+- core_idx = brcmf_sdio_chip_getinfidx(ci,
+- BCMA_CORE_INTERNAL_MEM);
+- if (BRCMF_MAX_CORENUM == core_idx) {
+- brcmf_err("RAM core not provided with ARM CM3 core\n");
+- return -ENODEV;
+- }
+- }
+-
+- /* check RAM base for ARM CR4 core */
+- core_idx = brcmf_sdio_chip_getinfidx(ci, BCMA_CORE_ARM_CR4);
+- if (BRCMF_MAX_CORENUM != core_idx) {
+- if (ci->rambase == 0) {
+- brcmf_err("RAM base not provided with ARM CR4 core\n");
+- return -ENOMEM;
+- }
+- }
+-
+- return 0;
+-}
+-#else /* DEBUG */
+-static inline int brcmf_sdio_chip_cichk(struct brcmf_chip *ci)
+-{
+- return 0;
+-}
+-#endif
+-
+-static int brcmf_sdio_chip_recognition(struct brcmf_sdio_dev *sdiodev,
+- struct brcmf_chip *ci)
+-{
+- u32 regdata;
+- u32 socitype;
+-
+- /* Get CC core rev
+- * Chipid is assume to be at offset 0 from SI_ENUM_BASE
+- * For different chiptypes or old sdio hosts w/o chipcommon,
+- * other ways of recognition should be added here.
+- */
+- regdata = brcmf_sdiod_regrl(sdiodev,
+- CORE_CC_REG(SI_ENUM_BASE, chipid),
+- NULL);
+- ci->chip = regdata & CID_ID_MASK;
+- ci->chiprev = (regdata & CID_REV_MASK) >> CID_REV_SHIFT;
+- if (sdiodev->func[0]->device == SDIO_DEVICE_ID_BROADCOM_4335_4339 &&
+- ci->chiprev >= 2)
+- ci->chip = BCM4339_CHIP_ID;
+- socitype = (regdata & CID_TYPE_MASK) >> CID_TYPE_SHIFT;
+-
+- brcmf_dbg(INFO, "found %s chip: id=0x%x, rev=%d\n",
+- socitype == SOCI_SB ? "SB" : "AXI", ci->chip, ci->chiprev);
+-
+- if (socitype == SOCI_SB) {
+- if (ci->chip != BCM4329_CHIP_ID) {
+- brcmf_err("SB chip is not supported\n");
+- return -ENODEV;
+- }
+- ci->iscoreup = brcmf_sdio_sb_iscoreup;
+- ci->corerev = brcmf_sdio_sb_corerev;
+- ci->coredisable = brcmf_sdio_sb_coredisable;
+- ci->resetcore = brcmf_sdio_sb_resetcore;
+-
+- ci->c_inf[0].id = BCMA_CORE_CHIPCOMMON;
+- ci->c_inf[0].base = SI_ENUM_BASE;
+- ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
+- ci->c_inf[1].base = BCM4329_CORE_BUS_BASE;
+- ci->c_inf[2].id = BCMA_CORE_INTERNAL_MEM;
+- ci->c_inf[2].base = BCM4329_CORE_SOCRAM_BASE;
+- ci->c_inf[3].id = BCMA_CORE_ARM_CM3;
+- ci->c_inf[3].base = BCM4329_CORE_ARM_BASE;
+- ci->c_inf[4].id = BCMA_CORE_80211;
+- ci->c_inf[4].base = BCM43xx_CORE_D11_BASE;
+- ci->ramsize = BCM4329_RAMSIZE;
+- } else if (socitype == SOCI_AI) {
+- ci->iscoreup = brcmf_sdio_ai_iscoreup;
+- ci->corerev = brcmf_sdio_ai_corerev;
+- ci->coredisable = brcmf_sdio_ai_coredisable;
+- ci->resetcore = brcmf_sdio_ai_resetcore;
+-
+- ci->c_inf[0].id = BCMA_CORE_CHIPCOMMON;
+- ci->c_inf[0].base = SI_ENUM_BASE;
+-
+- /* Address of cores for new chips should be added here */
+- switch (ci->chip) {
+- case BCM43143_CHIP_ID:
+- ci->c_inf[0].wrapbase = ci->c_inf[0].base + 0x00100000;
+- ci->c_inf[0].cib = 0x2b000000;
+- ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
+- ci->c_inf[1].base = BCM43143_CORE_BUS_BASE;
+- ci->c_inf[1].wrapbase = ci->c_inf[1].base + 0x00100000;
+- ci->c_inf[1].cib = 0x18000000;
+- ci->c_inf[2].id = BCMA_CORE_INTERNAL_MEM;
+- ci->c_inf[2].base = BCM43143_CORE_SOCRAM_BASE;
+- ci->c_inf[2].wrapbase = ci->c_inf[2].base + 0x00100000;
+- ci->c_inf[2].cib = 0x14000000;
+- ci->c_inf[3].id = BCMA_CORE_ARM_CM3;
+- ci->c_inf[3].base = BCM43143_CORE_ARM_BASE;
+- ci->c_inf[3].wrapbase = ci->c_inf[3].base + 0x00100000;
+- ci->c_inf[3].cib = 0x07000000;
+- ci->c_inf[4].id = BCMA_CORE_80211;
+- ci->c_inf[4].base = BCM43xx_CORE_D11_BASE;
+- ci->c_inf[4].wrapbase = ci->c_inf[4].base + 0x00100000;
+- ci->ramsize = BCM43143_RAMSIZE;
+- break;
+- case BCM43241_CHIP_ID:
+- ci->c_inf[0].wrapbase = 0x18100000;
+- ci->c_inf[0].cib = 0x2a084411;
+- ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
+- ci->c_inf[1].base = 0x18002000;
+- ci->c_inf[1].wrapbase = 0x18102000;
+- ci->c_inf[1].cib = 0x0e004211;
+- ci->c_inf[2].id = BCMA_CORE_INTERNAL_MEM;
+- ci->c_inf[2].base = 0x18004000;
+- ci->c_inf[2].wrapbase = 0x18104000;
+- ci->c_inf[2].cib = 0x14080401;
+- ci->c_inf[3].id = BCMA_CORE_ARM_CM3;
+- ci->c_inf[3].base = 0x18003000;
+- ci->c_inf[3].wrapbase = 0x18103000;
+- ci->c_inf[3].cib = 0x07004211;
+- ci->c_inf[4].id = BCMA_CORE_80211;
+- ci->c_inf[4].base = BCM43xx_CORE_D11_BASE;
+- ci->c_inf[4].wrapbase = ci->c_inf[4].base + 0x00100000;
+- ci->ramsize = 0x90000;
+- break;
+- case BCM4330_CHIP_ID:
+- ci->c_inf[0].wrapbase = 0x18100000;
+- ci->c_inf[0].cib = 0x27004211;
+- ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
+- ci->c_inf[1].base = 0x18002000;
+- ci->c_inf[1].wrapbase = 0x18102000;
+- ci->c_inf[1].cib = 0x07004211;
+- ci->c_inf[2].id = BCMA_CORE_INTERNAL_MEM;
+- ci->c_inf[2].base = 0x18004000;
+- ci->c_inf[2].wrapbase = 0x18104000;
+- ci->c_inf[2].cib = 0x0d080401;
+- ci->c_inf[3].id = BCMA_CORE_ARM_CM3;
+- ci->c_inf[3].base = 0x18003000;
+- ci->c_inf[3].wrapbase = 0x18103000;
+- ci->c_inf[3].cib = 0x03004211;
+- ci->c_inf[4].id = BCMA_CORE_80211;
+- ci->c_inf[4].base = BCM43xx_CORE_D11_BASE;
+- ci->c_inf[4].wrapbase = ci->c_inf[4].base + 0x00100000;
+- ci->ramsize = 0x48000;
+- break;
+- case BCM4334_CHIP_ID:
+- ci->c_inf[0].wrapbase = 0x18100000;
+- ci->c_inf[0].cib = 0x29004211;
+- ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
+- ci->c_inf[1].base = 0x18002000;
+- ci->c_inf[1].wrapbase = 0x18102000;
+- ci->c_inf[1].cib = 0x0d004211;
+- ci->c_inf[2].id = BCMA_CORE_INTERNAL_MEM;
+- ci->c_inf[2].base = 0x18004000;
+- ci->c_inf[2].wrapbase = 0x18104000;
+- ci->c_inf[2].cib = 0x13080401;
+- ci->c_inf[3].id = BCMA_CORE_ARM_CM3;
+- ci->c_inf[3].base = 0x18003000;
+- ci->c_inf[3].wrapbase = 0x18103000;
+- ci->c_inf[3].cib = 0x07004211;
+- ci->c_inf[4].id = BCMA_CORE_80211;
+- ci->c_inf[4].base = BCM43xx_CORE_D11_BASE;
+- ci->c_inf[4].wrapbase = ci->c_inf[4].base + 0x00100000;
+- ci->ramsize = 0x80000;
+- break;
+- case BCM4335_CHIP_ID:
+- ci->c_inf[0].wrapbase = 0x18100000;
+- ci->c_inf[0].cib = 0x2b084411;
+- ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
+- ci->c_inf[1].base = 0x18005000;
+- ci->c_inf[1].wrapbase = 0x18105000;
+- ci->c_inf[1].cib = 0x0f004211;
+- ci->c_inf[2].id = BCMA_CORE_ARM_CR4;
+- ci->c_inf[2].base = 0x18002000;
+- ci->c_inf[2].wrapbase = 0x18102000;
+- ci->c_inf[2].cib = 0x01084411;
+- ci->c_inf[3].id = BCMA_CORE_80211;
+- ci->c_inf[3].base = BCM43xx_CORE_D11_BASE;
+- ci->c_inf[3].wrapbase = ci->c_inf[3].base + 0x00100000;
+- ci->ramsize = 0xc0000;
+- ci->rambase = 0x180000;
+- break;
+- case BCM43362_CHIP_ID:
+- ci->c_inf[0].wrapbase = 0x18100000;
+- ci->c_inf[0].cib = 0x27004211;
+- ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
+- ci->c_inf[1].base = 0x18002000;
+- ci->c_inf[1].wrapbase = 0x18102000;
+- ci->c_inf[1].cib = 0x0a004211;
+- ci->c_inf[2].id = BCMA_CORE_INTERNAL_MEM;
+- ci->c_inf[2].base = 0x18004000;
+- ci->c_inf[2].wrapbase = 0x18104000;
+- ci->c_inf[2].cib = 0x08080401;
+- ci->c_inf[3].id = BCMA_CORE_ARM_CM3;
+- ci->c_inf[3].base = 0x18003000;
+- ci->c_inf[3].wrapbase = 0x18103000;
+- ci->c_inf[3].cib = 0x03004211;
+- ci->c_inf[4].id = BCMA_CORE_80211;
+- ci->c_inf[4].base = BCM43xx_CORE_D11_BASE;
+- ci->c_inf[4].wrapbase = ci->c_inf[4].base + 0x00100000;
+- ci->ramsize = 0x3C000;
+- break;
+- case BCM4339_CHIP_ID:
+- ci->c_inf[0].wrapbase = 0x18100000;
+- ci->c_inf[0].cib = 0x2e084411;
+- ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
+- ci->c_inf[1].base = 0x18005000;
+- ci->c_inf[1].wrapbase = 0x18105000;
+- ci->c_inf[1].cib = 0x15004211;
+- ci->c_inf[2].id = BCMA_CORE_ARM_CR4;
+- ci->c_inf[2].base = 0x18002000;
+- ci->c_inf[2].wrapbase = 0x18102000;
+- ci->c_inf[2].cib = 0x04084411;
+- ci->c_inf[3].id = BCMA_CORE_80211;
+- ci->c_inf[3].base = BCM43xx_CORE_D11_BASE;
+- ci->c_inf[3].wrapbase = ci->c_inf[3].base + 0x00100000;
+- ci->ramsize = 0xc0000;
+- ci->rambase = 0x180000;
+- break;
+- default:
+- brcmf_err("AXI chip is not supported\n");
+- return -ENODEV;
+- }
+- } else {
+- brcmf_err("chip backplane type %u is not supported\n",
+- socitype);
+- return -ENODEV;
+- }
+-
+- return brcmf_sdio_chip_cichk(ci);
+-}
+-
+-static int
+-brcmf_sdio_chip_buscoreprep(struct brcmf_sdio_dev *sdiodev)
+-{
+- int err = 0;
+- u8 clkval, clkset;
+-
+- /* Try forcing SDIO core to do ALPAvail request only */
+- clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_ALP_AVAIL_REQ;
+- brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
+- if (err) {
+- brcmf_err("error writing for HT off\n");
+- return err;
+- }
+-
+- /* If register supported, wait for ALPAvail and then force ALP */
+- /* This may take up to 15 milliseconds */
+- clkval = brcmf_sdiod_regrb(sdiodev,
+- SBSDIO_FUNC1_CHIPCLKCSR, NULL);
+-
+- if ((clkval & ~SBSDIO_AVBITS) != clkset) {
+- brcmf_err("ChipClkCSR access: wrote 0x%02x read 0x%02x\n",
+- clkset, clkval);
+- return -EACCES;
+- }
+-
+- SPINWAIT(((clkval = brcmf_sdiod_regrb(sdiodev,
+- SBSDIO_FUNC1_CHIPCLKCSR, NULL)),
+- !SBSDIO_ALPAV(clkval)),
+- PMU_MAX_TRANSITION_DLY);
+- if (!SBSDIO_ALPAV(clkval)) {
+- brcmf_err("timeout on ALPAV wait, clkval 0x%02x\n",
+- clkval);
+- return -EBUSY;
+- }
+-
+- clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_FORCE_ALP;
+- brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
+- udelay(65);
+-
+- /* Also, disable the extra SDIO pull-ups */
+- brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_SDIOPULLUP, 0, NULL);
+-
+- return 0;
+-}
+-
+-static void
+-brcmf_sdio_chip_buscoresetup(struct brcmf_sdio_dev *sdiodev,
+- struct brcmf_chip *ci)
+-{
+- u32 base = ci->c_inf[0].base;
+-
+- /* get chipcommon rev */
+- ci->c_inf[0].rev = ci->corerev(sdiodev, ci, ci->c_inf[0].id);
+-
+- /* get chipcommon capabilites */
+- ci->c_inf[0].caps = brcmf_sdiod_regrl(sdiodev,
+- CORE_CC_REG(base, capabilities),
+- NULL);
+-
+- /* get pmu caps & rev */
+- if (ci->c_inf[0].caps & CC_CAP_PMU) {
+- ci->pmucaps =
+- brcmf_sdiod_regrl(sdiodev,
+- CORE_CC_REG(base, pmucapabilities),
+- NULL);
+- ci->pmurev = ci->pmucaps & PCAP_REV_MASK;
+- }
+-
+- ci->c_inf[1].rev = ci->corerev(sdiodev, ci, ci->c_inf[1].id);
+-
+- brcmf_dbg(INFO, "ccrev=%d, pmurev=%d, buscore rev/type=%d/0x%x\n",
+- ci->c_inf[0].rev, ci->pmurev,
+- ci->c_inf[1].rev, ci->c_inf[1].id);
+-
+- /*
+- * Make sure any on-chip ARM is off (in case strapping is wrong),
+- * or downloaded code was already running.
+- */
+- ci->coredisable(sdiodev, ci, BCMA_CORE_ARM_CM3, 0, 0);
+-}
+-
+-int brcmf_sdio_chip_attach(struct brcmf_sdio_dev *sdiodev,
+- struct brcmf_chip **ci_ptr)
+-{
+- int ret;
+- struct brcmf_chip *ci;
+-
+- brcmf_dbg(TRACE, "Enter\n");
+-
+- ci = kzalloc(sizeof(*ci), GFP_ATOMIC);
+- if (!ci)
+- return -ENOMEM;
+-
+- ret = brcmf_sdio_chip_buscoreprep(sdiodev);
+- if (ret != 0)
+- goto err;
+-
+- ret = brcmf_sdio_chip_recognition(sdiodev, ci);
+- if (ret != 0)
+- goto err;
+-
+- brcmf_sdio_chip_buscoresetup(sdiodev, ci);
+-
+- brcmf_sdiod_regwl(sdiodev, CORE_CC_REG(ci->c_inf[0].base, gpiopullup),
+- 0, NULL);
+- brcmf_sdiod_regwl(sdiodev, CORE_CC_REG(ci->c_inf[0].base, gpiopulldown),
+- 0, NULL);
+-
+- *ci_ptr = ci;
+- return 0;
+-
+-err:
+- kfree(ci);
+- return ret;
+-}
+-
+-void
+-brcmf_sdio_chip_detach(struct brcmf_chip **ci_ptr)
+-{
+- brcmf_dbg(TRACE, "Enter\n");
+-
+- kfree(*ci_ptr);
+- *ci_ptr = NULL;
+-}
+-
+-static char *brcmf_sdio_chip_name(uint chipid, char *buf, uint len)
+-{
+- const char *fmt;
+-
+- fmt = ((chipid > 0xa000) || (chipid < 0x4000)) ? "%d" : "%x";
+- snprintf(buf, len, fmt, chipid);
+- return buf;
+-}
+-
+-void
+-brcmf_sdio_chip_drivestrengthinit(struct brcmf_sdio_dev *sdiodev,
+- struct brcmf_chip *ci, u32 drivestrength)
+-{
+- const struct sdiod_drive_str *str_tab = NULL;
+- u32 str_mask;
+- u32 str_shift;
+- char chn[8];
+- u32 base = ci->c_inf[0].base;
+- u32 i;
+- u32 drivestrength_sel = 0;
+- u32 cc_data_temp;
+- u32 addr;
+-
+- if (!(ci->c_inf[0].caps & CC_CAP_PMU))
+- return;
+-
+- switch (SDIOD_DRVSTR_KEY(ci->chip, ci->pmurev)) {
+- case SDIOD_DRVSTR_KEY(BCM4330_CHIP_ID, 12):
+- str_tab = sdiod_drvstr_tab1_1v8;
+- str_mask = 0x00003800;
+- str_shift = 11;
+- break;
+- case SDIOD_DRVSTR_KEY(BCM4334_CHIP_ID, 17):
+- str_tab = sdiod_drvstr_tab6_1v8;
+- str_mask = 0x00001800;
+- str_shift = 11;
+- break;
+- case SDIOD_DRVSTR_KEY(BCM43143_CHIP_ID, 17):
+- /* note: 43143 does not support tristate */
+- i = ARRAY_SIZE(sdiod_drvstr_tab2_3v3) - 1;
+- if (drivestrength >= sdiod_drvstr_tab2_3v3[i].strength) {
+- str_tab = sdiod_drvstr_tab2_3v3;
+- str_mask = 0x00000007;
+- str_shift = 0;
+- } else
+- brcmf_err("Invalid SDIO Drive strength for chip %s, strength=%d\n",
+- brcmf_sdio_chip_name(ci->chip, chn, 8),
+- drivestrength);
+- break;
+- case SDIOD_DRVSTR_KEY(BCM43362_CHIP_ID, 13):
+- str_tab = sdiod_drive_strength_tab5_1v8;
+- str_mask = 0x00003800;
+- str_shift = 11;
+- break;
+- default:
+- brcmf_err("No SDIO Drive strength init done for chip %s rev %d pmurev %d\n",
+- brcmf_sdio_chip_name(ci->chip, chn, 8),
+- ci->chiprev, ci->pmurev);
+- break;
+- }
+-
+- if (str_tab != NULL) {
+- for (i = 0; str_tab[i].strength != 0; i++) {
+- if (drivestrength >= str_tab[i].strength) {
+- drivestrength_sel = str_tab[i].sel;
+- break;
+- }
+- }
+- addr = CORE_CC_REG(base, chipcontrol_addr);
+- brcmf_sdiod_regwl(sdiodev, addr, 1, NULL);
+- cc_data_temp = brcmf_sdiod_regrl(sdiodev, addr, NULL);
+- cc_data_temp &= ~str_mask;
+- drivestrength_sel <<= str_shift;
+- cc_data_temp |= drivestrength_sel;
+- brcmf_sdiod_regwl(sdiodev, addr, cc_data_temp, NULL);
+-
+- brcmf_dbg(INFO, "SDIO: %d mA (req=%d mA) drive strength selected, set to 0x%08x\n",
+- str_tab[i].strength, drivestrength, cc_data_temp);
+- }
+-}
+-
+-static void
+-brcmf_sdio_chip_cm3_enterdl(struct brcmf_sdio_dev *sdiodev,
+- struct brcmf_chip *ci)
+-{
+- ci->coredisable(sdiodev, ci, BCMA_CORE_ARM_CM3, 0, 0);
+- ci->resetcore(sdiodev, ci, BCMA_CORE_80211,
+- D11_BCMA_IOCTL_PHYRESET | D11_BCMA_IOCTL_PHYCLOCKEN,
+- D11_BCMA_IOCTL_PHYCLOCKEN, D11_BCMA_IOCTL_PHYCLOCKEN);
+- ci->resetcore(sdiodev, ci, BCMA_CORE_INTERNAL_MEM, 0, 0, 0);
+-}
+-
+-static bool brcmf_sdio_chip_cm3_exitdl(struct brcmf_sdio_dev *sdiodev,
+- struct brcmf_chip *ci)
+-{
+- u8 core_idx;
+- u32 reg_addr;
+-
+- if (!ci->iscoreup(sdiodev, ci, BCMA_CORE_INTERNAL_MEM)) {
+- brcmf_err("SOCRAM core is down after reset?\n");
+- return false;
+- }
+-
+- /* clear all interrupts */
+- core_idx = brcmf_sdio_chip_getinfidx(ci, BCMA_CORE_SDIO_DEV);
+- reg_addr = ci->c_inf[core_idx].base;
+- reg_addr += offsetof(struct sdpcmd_regs, intstatus);
+- brcmf_sdiod_regwl(sdiodev, reg_addr, 0xFFFFFFFF, NULL);
+-
+- ci->resetcore(sdiodev, ci, BCMA_CORE_ARM_CM3, 0, 0, 0);
+-
+- return true;
+-}
+-
+-static inline void
+-brcmf_sdio_chip_cr4_enterdl(struct brcmf_sdio_dev *sdiodev,
+- struct brcmf_chip *ci)
+-{
+- u8 idx;
+- u32 regdata;
+- u32 wrapbase;
+- idx = brcmf_sdio_chip_getinfidx(ci, BCMA_CORE_ARM_CR4);
+-
+- if (idx == BRCMF_MAX_CORENUM)
+- return;
+-
+- wrapbase = ci->c_inf[idx].wrapbase;
+- regdata = brcmf_sdiod_regrl(sdiodev, wrapbase + BCMA_IOCTL, NULL);
+- regdata &= ARMCR4_BCMA_IOCTL_CPUHALT;
+- ci->resetcore(sdiodev, ci, BCMA_CORE_ARM_CR4, regdata,
+- ARMCR4_BCMA_IOCTL_CPUHALT, ARMCR4_BCMA_IOCTL_CPUHALT);
+- ci->resetcore(sdiodev, ci, BCMA_CORE_80211,
+- D11_BCMA_IOCTL_PHYRESET | D11_BCMA_IOCTL_PHYCLOCKEN,
+- D11_BCMA_IOCTL_PHYCLOCKEN, D11_BCMA_IOCTL_PHYCLOCKEN);
+-}
+-
+-static bool brcmf_sdio_chip_cr4_exitdl(struct brcmf_sdio_dev *sdiodev,
+- struct brcmf_chip *ci, u32 rstvec)
+-{
+- u8 core_idx;
+- u32 reg_addr;
+-
+- /* clear all interrupts */
+- core_idx = brcmf_sdio_chip_getinfidx(ci, BCMA_CORE_SDIO_DEV);
+- reg_addr = ci->c_inf[core_idx].base;
+- reg_addr += offsetof(struct sdpcmd_regs, intstatus);
+- brcmf_sdiod_regwl(sdiodev, reg_addr, 0xFFFFFFFF, NULL);
+-
+- /* Write reset vector to address 0 */
+- brcmf_sdiod_ramrw(sdiodev, true, 0, (void *)&rstvec,
+- sizeof(rstvec));
+-
+- /* restore ARM */
+- ci->resetcore(sdiodev, ci, BCMA_CORE_ARM_CR4, ARMCR4_BCMA_IOCTL_CPUHALT,
+- 0, 0);
+-
+- return true;
+-}
+-
+-void brcmf_sdio_chip_enter_download(struct brcmf_sdio_dev *sdiodev,
+- struct brcmf_chip *ci)
+-{
+- u8 arm_core_idx;
+-
+- arm_core_idx = brcmf_sdio_chip_getinfidx(ci, BCMA_CORE_ARM_CM3);
+- if (BRCMF_MAX_CORENUM != arm_core_idx) {
+- brcmf_sdio_chip_cm3_enterdl(sdiodev, ci);
+- return;
+- }
+-
+- brcmf_sdio_chip_cr4_enterdl(sdiodev, ci);
+-}
+-
+-bool brcmf_sdio_chip_exit_download(struct brcmf_sdio_dev *sdiodev,
+- struct brcmf_chip *ci, u32 rstvec)
+-{
+- u8 arm_core_idx;
+-
+- arm_core_idx = brcmf_sdio_chip_getinfidx(ci, BCMA_CORE_ARM_CM3);
+- if (BRCMF_MAX_CORENUM != arm_core_idx)
+- return brcmf_sdio_chip_cm3_exitdl(sdiodev, ci);
+-
+- return brcmf_sdio_chip_cr4_exitdl(sdiodev, ci, rstvec);
+-}
+diff -Nur linux-3.14.36/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.h linux-openelec/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.h
+--- linux-3.14.36/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.h 1969-12-31 18:00:00.000000000 -0600
+@@ -1,231 +0,0 @@
+-/*
+- * Copyright (c) 2011 Broadcom Corporation
+- *
+- * Permission to use, copy, modify, and/or distribute this software for any
+- * purpose with or without fee is hereby granted, provided that the above
+- * copyright notice and this permission notice appear in all copies.
+- *
+- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+- */
+-
+-#ifndef _BRCMFMAC_SDIO_CHIP_H_
+-#define _BRCMFMAC_SDIO_CHIP_H_
+-
+-/*
+- * Core reg address translation.
+- * Both macro's returns a 32 bits byte address on the backplane bus.
+- */
+-#define CORE_CC_REG(base, field) \
+- (base + offsetof(struct chipcregs, field))
+-#define CORE_BUS_REG(base, field) \
+- (base + offsetof(struct sdpcmd_regs, field))
+-#define CORE_SB(base, field) \
+- (base + SBCONFIGOFF + offsetof(struct sbconfig, field))
+-
+-/* SDIO function 1 register CHIPCLKCSR */
+-/* Force ALP request to backplane */
+-#define SBSDIO_FORCE_ALP 0x01
+-/* Force HT request to backplane */
+-#define SBSDIO_FORCE_HT 0x02
+-/* Force ILP request to backplane */
+-#define SBSDIO_FORCE_ILP 0x04
+-/* Make ALP ready (power up xtal) */
+-#define SBSDIO_ALP_AVAIL_REQ 0x08
+-/* Make HT ready (power up PLL) */
+-#define SBSDIO_HT_AVAIL_REQ 0x10
+-/* Squelch clock requests from HW */
+-#define SBSDIO_FORCE_HW_CLKREQ_OFF 0x20
+-/* Status: ALP is ready */
+-#define SBSDIO_ALP_AVAIL 0x40
+-/* Status: HT is ready */
+-#define SBSDIO_HT_AVAIL 0x80
+-#define SBSDIO_AVBITS (SBSDIO_HT_AVAIL | SBSDIO_ALP_AVAIL)
+-#define SBSDIO_ALPAV(regval) ((regval) & SBSDIO_AVBITS)
+-#define SBSDIO_HTAV(regval) (((regval) & SBSDIO_AVBITS) == SBSDIO_AVBITS)
+-#define SBSDIO_ALPONLY(regval) (SBSDIO_ALPAV(regval) && !SBSDIO_HTAV(regval))
+-#define SBSDIO_CLKAV(regval, alponly) \
+- (SBSDIO_ALPAV(regval) && (alponly ? 1 : SBSDIO_HTAV(regval)))
+-
+-#define BRCMF_MAX_CORENUM 6
+-
+-struct brcmf_core {
+- u16 id;
+- u16 rev;
+- u32 base;
+- u32 wrapbase;
+- u32 caps;
+- u32 cib;
+-};
+-
+-struct brcmf_chip {
+- u32 chip;
+- u32 chiprev;
+- /* core info */
+- /* always put chipcommon core at 0, bus core at 1 */
+- struct brcmf_core c_inf[BRCMF_MAX_CORENUM];
+- u32 pmurev;
+- u32 pmucaps;
+- u32 ramsize;
+- u32 rambase;
+- u32 rst_vec; /* reset vertor for ARM CR4 core */
+-
+- bool (*iscoreup)(struct brcmf_sdio_dev *sdiodev, struct brcmf_chip *ci,
+- u16 coreid);
+- u32 (*corerev)(struct brcmf_sdio_dev *sdiodev, struct brcmf_chip *ci,
+- u16 coreid);
+- void (*coredisable)(struct brcmf_sdio_dev *sdiodev,
+- struct brcmf_chip *ci, u16 coreid, u32 pre_resetbits,
+- u32 in_resetbits);
+- void (*resetcore)(struct brcmf_sdio_dev *sdiodev,
+- struct brcmf_chip *ci, u16 coreid, u32 pre_resetbits,
+- u32 in_resetbits, u32 post_resetbits);
+-};
+-
+-struct sbconfig {
+- u32 PAD[2];
+- u32 sbipsflag; /* initiator port ocp slave flag */
+- u32 PAD[3];
+- u32 sbtpsflag; /* target port ocp slave flag */
+- u32 PAD[11];
+- u32 sbtmerrloga; /* (sonics >= 2.3) */
+- u32 PAD;
+- u32 sbtmerrlog; /* (sonics >= 2.3) */
+- u32 PAD[3];
+- u32 sbadmatch3; /* address match3 */
+- u32 PAD;
+- u32 sbadmatch2; /* address match2 */
+- u32 PAD;
+- u32 sbadmatch1; /* address match1 */
+- u32 PAD[7];
+- u32 sbimstate; /* initiator agent state */
+- u32 sbintvec; /* interrupt mask */
+- u32 sbtmstatelow; /* target state */
+- u32 sbtmstatehigh; /* target state */
+- u32 sbbwa0; /* bandwidth allocation table0 */
+- u32 PAD;
+- u32 sbimconfiglow; /* initiator configuration */
+- u32 sbimconfighigh; /* initiator configuration */
+- u32 sbadmatch0; /* address match0 */
+- u32 PAD;
+- u32 sbtmconfiglow; /* target configuration */
+- u32 sbtmconfighigh; /* target configuration */
+- u32 sbbconfig; /* broadcast configuration */
+- u32 PAD;
+- u32 sbbstate; /* broadcast state */
+- u32 PAD[3];
+- u32 sbactcnfg; /* activate configuration */
+- u32 PAD[3];
+- u32 sbflagst; /* current sbflags */
+- u32 PAD[3];
+- u32 sbidlow; /* identification */
+- u32 sbidhigh; /* identification */
+-};
+-
+-/* sdio core registers */
+-struct sdpcmd_regs {
+- u32 corecontrol; /* 0x00, rev8 */
+- u32 corestatus; /* rev8 */
+- u32 PAD[1];
+- u32 biststatus; /* rev8 */
+-
+- /* PCMCIA access */
+- u16 pcmciamesportaladdr; /* 0x010, rev8 */
+- u16 PAD[1];
+- u16 pcmciamesportalmask; /* rev8 */
+- u16 PAD[1];
+- u16 pcmciawrframebc; /* rev8 */
+- u16 PAD[1];
+- u16 pcmciaunderflowtimer; /* rev8 */
+- u16 PAD[1];
+-
+- /* interrupt */
+- u32 intstatus; /* 0x020, rev8 */
+- u32 hostintmask; /* rev8 */
+- u32 intmask; /* rev8 */
+- u32 sbintstatus; /* rev8 */
+- u32 sbintmask; /* rev8 */
+- u32 funcintmask; /* rev4 */
+- u32 PAD[2];
+- u32 tosbmailbox; /* 0x040, rev8 */
+- u32 tohostmailbox; /* rev8 */
+- u32 tosbmailboxdata; /* rev8 */
+- u32 tohostmailboxdata; /* rev8 */
+-
+- /* synchronized access to registers in SDIO clock domain */
+- u32 sdioaccess; /* 0x050, rev8 */
+- u32 PAD[3];
+-
+- /* PCMCIA frame control */
+- u8 pcmciaframectrl; /* 0x060, rev8 */
+- u8 PAD[3];
+- u8 pcmciawatermark; /* rev8 */
+- u8 PAD[155];
+-
+- /* interrupt batching control */
+- u32 intrcvlazy; /* 0x100, rev8 */
+- u32 PAD[3];
+-
+- /* counters */
+- u32 cmd52rd; /* 0x110, rev8 */
+- u32 cmd52wr; /* rev8 */
+- u32 cmd53rd; /* rev8 */
+- u32 cmd53wr; /* rev8 */
+- u32 abort; /* rev8 */
+- u32 datacrcerror; /* rev8 */
+- u32 rdoutofsync; /* rev8 */
+- u32 wroutofsync; /* rev8 */
+- u32 writebusy; /* rev8 */
+- u32 readwait; /* rev8 */
+- u32 readterm; /* rev8 */
+- u32 writeterm; /* rev8 */
+- u32 PAD[40];
+- u32 clockctlstatus; /* rev8 */
+- u32 PAD[7];
+-
+- u32 PAD[128]; /* DMA engines */
+-
+- /* SDIO/PCMCIA CIS region */
+- char cis[512]; /* 0x400-0x5ff, rev6 */
+-
+- /* PCMCIA function control registers */
+- char pcmciafcr[256]; /* 0x600-6ff, rev6 */
+- u16 PAD[55];
+-
+- /* PCMCIA backplane access */
+- u16 backplanecsr; /* 0x76E, rev6 */
+- u16 backplaneaddr0; /* rev6 */
+- u16 backplaneaddr1; /* rev6 */
+- u16 backplaneaddr2; /* rev6 */
+- u16 backplaneaddr3; /* rev6 */
+- u16 backplanedata0; /* rev6 */
+- u16 backplanedata1; /* rev6 */
+- u16 backplanedata2; /* rev6 */
+- u16 backplanedata3; /* rev6 */
+- u16 PAD[31];
+-
+- /* sprom "size" & "blank" info */
+- u16 spromstatus; /* 0x7BE, rev2 */
+- u32 PAD[464];
+-
+- u16 PAD[0x80];
+-};
+-
+-int brcmf_sdio_chip_attach(struct brcmf_sdio_dev *sdiodev,
+- struct brcmf_chip **ci_ptr);
+-void brcmf_sdio_chip_detach(struct brcmf_chip **ci_ptr);
+-void brcmf_sdio_chip_drivestrengthinit(struct brcmf_sdio_dev *sdiodev,
+- struct brcmf_chip *ci,
+- u32 drivestrength);
+-u8 brcmf_sdio_chip_getinfidx(struct brcmf_chip *ci, u16 coreid);
+-void brcmf_sdio_chip_enter_download(struct brcmf_sdio_dev *sdiodev,
+- struct brcmf_chip *ci);
+-bool brcmf_sdio_chip_exit_download(struct brcmf_sdio_dev *sdiodev,
+- struct brcmf_chip *ci, u32 rstvec);
+-
+-#endif /* _BRCMFMAC_SDIO_CHIP_H_ */
+diff -Nur linux-3.14.36/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h linux-openelec/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
+--- linux-3.14.36/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h 2015-05-06 12:05:42.000000000 -0500
+@@ -180,6 +180,97 @@
+ uint max_request_size;
+ ushort max_segment_count;
+ uint max_segment_size;
++ uint txglomsz;
++ struct sg_table sgtable;
++};
++
++/* sdio core registers */
++struct sdpcmd_regs {
++ u32 corecontrol; /* 0x00, rev8 */
++ u32 corestatus; /* rev8 */
++ u32 PAD[1];
++ u32 biststatus; /* rev8 */
++
++ /* PCMCIA access */
++ u16 pcmciamesportaladdr; /* 0x010, rev8 */
++ u16 PAD[1];
++ u16 pcmciamesportalmask; /* rev8 */
++ u16 PAD[1];
++ u16 pcmciawrframebc; /* rev8 */
++ u16 PAD[1];
++ u16 pcmciaunderflowtimer; /* rev8 */
++ u16 PAD[1];
++
++ /* interrupt */
++ u32 intstatus; /* 0x020, rev8 */
++ u32 hostintmask; /* rev8 */
++ u32 intmask; /* rev8 */
++ u32 sbintstatus; /* rev8 */
++ u32 sbintmask; /* rev8 */
++ u32 funcintmask; /* rev4 */
++ u32 PAD[2];
++ u32 tosbmailbox; /* 0x040, rev8 */
++ u32 tohostmailbox; /* rev8 */
++ u32 tosbmailboxdata; /* rev8 */
++ u32 tohostmailboxdata; /* rev8 */
++
++ /* synchronized access to registers in SDIO clock domain */
++ u32 sdioaccess; /* 0x050, rev8 */
++ u32 PAD[3];
++
++ /* PCMCIA frame control */
++ u8 pcmciaframectrl; /* 0x060, rev8 */
++ u8 PAD[3];
++ u8 pcmciawatermark; /* rev8 */
++ u8 PAD[155];
++
++ /* interrupt batching control */
++ u32 intrcvlazy; /* 0x100, rev8 */
++ u32 PAD[3];
++
++ /* counters */
++ u32 cmd52rd; /* 0x110, rev8 */
++ u32 cmd52wr; /* rev8 */
++ u32 cmd53rd; /* rev8 */
++ u32 cmd53wr; /* rev8 */
++ u32 abort; /* rev8 */
++ u32 datacrcerror; /* rev8 */
++ u32 rdoutofsync; /* rev8 */
++ u32 wroutofsync; /* rev8 */
++ u32 writebusy; /* rev8 */
++ u32 readwait; /* rev8 */
++ u32 readterm; /* rev8 */
++ u32 writeterm; /* rev8 */
++ u32 PAD[40];
++ u32 clockctlstatus; /* rev8 */
++ u32 PAD[7];
++
++ u32 PAD[128]; /* DMA engines */
++
++ /* SDIO/PCMCIA CIS region */
++ char cis[512]; /* 0x400-0x5ff, rev6 */
++
++ /* PCMCIA function control registers */
++ char pcmciafcr[256]; /* 0x600-6ff, rev6 */
++ u16 PAD[55];
++
++ /* PCMCIA backplane access */
++ u16 backplanecsr; /* 0x76E, rev6 */
++ u16 backplaneaddr0; /* rev6 */
++ u16 backplaneaddr1; /* rev6 */
++ u16 backplaneaddr2; /* rev6 */
++ u16 backplaneaddr3; /* rev6 */
++ u16 backplanedata0; /* rev6 */
++ u16 backplanedata1; /* rev6 */
++ u16 backplanedata2; /* rev6 */
++ u16 backplanedata3; /* rev6 */
++ u16 PAD[31];
++
++ /* sprom "size" & "blank" info */
++ u16 spromstatus; /* 0x7BE, rev2 */
++ u32 PAD[464];
++
++ u16 PAD[0x80];
+ };
+
+ /* Register/deregister interrupt handler. */
+diff -Nur linux-3.14.36/drivers/net/wireless/brcm80211/brcmfmac/usb.c linux-openelec/drivers/net/wireless/brcm80211/brcmfmac/usb.c
+--- linux-3.14.36/drivers/net/wireless/brcm80211/brcmfmac/usb.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/net/wireless/brcm80211/brcmfmac/usb.c 2015-07-24 18:03:30.324842002 -0500
+@@ -25,6 +25,7 @@
+ #include <dhd_bus.h>
+ #include <dhd_dbg.h>
+
++#include "firmware.h"
+ #include "usb_rdl.h"
+ #include "usb.h"
+
+@@ -61,12 +62,6 @@
+ u8 *image;
+ int image_len;
+ };
+-static struct list_head fw_image_list;
+-
+-struct intr_transfer_buf {
+- u32 notification;
+- u32 reserved;
+-};
+
+ struct brcmf_usbdev_info {
+ struct brcmf_usbdev bus_pub; /* MUST BE FIRST */
+@@ -75,7 +70,7 @@
+ struct list_head rx_postq;
+ struct list_head tx_freeq;
+ struct list_head tx_postq;
+- uint rx_pipe, tx_pipe, intr_pipe, rx_pipe2;
++ uint rx_pipe, tx_pipe, rx_pipe2;
+
+ int rx_low_watermark;
+ int tx_low_watermark;
+@@ -87,7 +82,7 @@
+ struct brcmf_usbreq *tx_reqs;
+ struct brcmf_usbreq *rx_reqs;
+
+- u8 *image; /* buffer for combine fw and nvram */
++ const u8 *image; /* buffer for combine fw and nvram */
+ int image_len;
+
+ struct usb_device *usbdev;
+@@ -104,10 +99,6 @@
+ ulong ctl_op;
+
+ struct urb *bulk_urb; /* used for FW download */
+- struct urb *intr_urb; /* URB for interrupt endpoint */
+- int intr_size; /* Size of interrupt message */
+- int interval; /* Interrupt polling interval */
+- struct intr_transfer_buf intr; /* Data buffer for interrupt endpoint */
+ };
+
+ static void brcmf_usb_rx_refill(struct brcmf_usbdev_info *devinfo,
+@@ -531,39 +522,6 @@
+ }
+ }
+
+-static void
+-brcmf_usb_intr_complete(struct urb *urb)
+-{
+- struct brcmf_usbdev_info *devinfo =
+- (struct brcmf_usbdev_info *)urb->context;
+- int err;
+-
+- brcmf_dbg(USB, "Enter, urb->status=%d\n", urb->status);
+-
+- if (devinfo == NULL)
+- return;
+-
+- if (unlikely(urb->status)) {
+- if (urb->status == -ENOENT ||
+- urb->status == -ESHUTDOWN ||
+- urb->status == -ENODEV) {
+- brcmf_usb_state_change(devinfo,
+- BRCMFMAC_USB_STATE_DOWN);
+- }
+- }
+-
+- if (devinfo->bus_pub.state == BRCMFMAC_USB_STATE_DOWN) {
+- brcmf_err("intr cb when DBUS down, ignoring\n");
+- return;
+- }
+-
+- if (devinfo->bus_pub.state == BRCMFMAC_USB_STATE_UP) {
+- err = usb_submit_urb(devinfo->intr_urb, GFP_ATOMIC);
+- if (err)
+- brcmf_err("usb_submit_urb, err=%d\n", err);
+- }
+-}
+-
+ static int brcmf_usb_tx(struct device *dev, struct sk_buff *skb)
+ {
+ struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(dev);
+@@ -619,7 +577,6 @@
+ {
+ struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(dev);
+ u16 ifnum;
+- int ret;
+
+ brcmf_dbg(USB, "Enter\n");
+ if (devinfo->bus_pub.state == BRCMFMAC_USB_STATE_UP)
+@@ -628,23 +585,6 @@
+ /* Success, indicate devinfo is fully up */
+ brcmf_usb_state_change(devinfo, BRCMFMAC_USB_STATE_UP);
+
+- if (devinfo->intr_urb) {
+- usb_fill_int_urb(devinfo->intr_urb, devinfo->usbdev,
+- devinfo->intr_pipe,
+- &devinfo->intr,
+- devinfo->intr_size,
+- (usb_complete_t)brcmf_usb_intr_complete,
+- devinfo,
+- devinfo->interval);
+-
+- ret = usb_submit_urb(devinfo->intr_urb, GFP_ATOMIC);
+- if (ret) {
+- brcmf_err("USB_SUBMIT_URB failed with status %d\n",
+- ret);
+- return -EINVAL;
+- }
+- }
+-
+ if (devinfo->ctl_urb) {
+ devinfo->ctl_in_pipe = usb_rcvctrlpipe(devinfo->usbdev, 0);
+ devinfo->ctl_out_pipe = usb_sndctrlpipe(devinfo->usbdev, 0);
+@@ -681,8 +621,6 @@
+ return;
+
+ brcmf_usb_state_change(devinfo, BRCMFMAC_USB_STATE_DOWN);
+- if (devinfo->intr_urb)
+- usb_kill_urb(devinfo->intr_urb);
+
+ if (devinfo->ctl_urb)
+ usb_kill_urb(devinfo->ctl_urb);
+@@ -1021,7 +959,7 @@
+ }
+
+ err = brcmf_usb_dlstart(devinfo,
+- devinfo->image, devinfo->image_len);
++ (u8 *)devinfo->image, devinfo->image_len);
+ if (err == 0)
+ err = brcmf_usb_dlrun(devinfo);
+ return err;
+@@ -1036,7 +974,6 @@
+ brcmf_usb_free_q(&devinfo->rx_freeq, false);
+ brcmf_usb_free_q(&devinfo->tx_freeq, false);
+
+- usb_free_urb(devinfo->intr_urb);
+ usb_free_urb(devinfo->ctl_urb);
+ usb_free_urb(devinfo->bulk_urb);
+
+@@ -1080,68 +1017,20 @@
+ return -1;
+ }
+
+-static int brcmf_usb_get_fw(struct brcmf_usbdev_info *devinfo)
++static const char *brcmf_usb_get_fwname(struct brcmf_usbdev_info *devinfo)
+ {
+- s8 *fwname;
+- const struct firmware *fw;
+- struct brcmf_usb_image *fw_image;
+- int err;
+-
+- brcmf_dbg(USB, "Enter\n");
+ switch (devinfo->bus_pub.devid) {
+ case 43143:
+- fwname = BRCMF_USB_43143_FW_NAME;
+- break;
++ return BRCMF_USB_43143_FW_NAME;
+ case 43235:
+ case 43236:
+ case 43238:
+- fwname = BRCMF_USB_43236_FW_NAME;
+- break;
++ return BRCMF_USB_43236_FW_NAME;
+ case 43242:
+- fwname = BRCMF_USB_43242_FW_NAME;
+- break;
++ return BRCMF_USB_43242_FW_NAME;
+ default:
+- return -EINVAL;
+- break;
+- }
+- brcmf_dbg(USB, "Loading FW %s\n", fwname);
+- list_for_each_entry(fw_image, &fw_image_list, list) {
+- if (fw_image->fwname == fwname) {
+- devinfo->image = fw_image->image;
+- devinfo->image_len = fw_image->image_len;
+- return 0;
+- }
+- }
+- /* fw image not yet loaded. Load it now and add to list */
+- err = request_firmware(&fw, fwname, devinfo->dev);
+- if (!fw) {
+- brcmf_err("fail to request firmware %s\n", fwname);
+- return err;
+- }
+- if (check_file(fw->data) < 0) {
+- brcmf_err("invalid firmware %s\n", fwname);
+- return -EINVAL;
++ return NULL;
+ }
+-
+- fw_image = kzalloc(sizeof(*fw_image), GFP_ATOMIC);
+- if (!fw_image)
+- return -ENOMEM;
+- INIT_LIST_HEAD(&fw_image->list);
+- list_add_tail(&fw_image->list, &fw_image_list);
+- fw_image->fwname = fwname;
+- fw_image->image = vmalloc(fw->size);
+- if (!fw_image->image)
+- return -ENOMEM;
+-
+- memcpy(fw_image->image, fw->data, fw->size);
+- fw_image->image_len = fw->size;
+-
+- release_firmware(fw);
+-
+- devinfo->image = fw_image->image;
+- devinfo->image_len = fw_image->image_len;
+-
+- return 0;
+ }
+
+
+@@ -1186,11 +1075,6 @@
+ goto error;
+ devinfo->tx_freecount = ntxq;
+
+- devinfo->intr_urb = usb_alloc_urb(0, GFP_ATOMIC);
+- if (!devinfo->intr_urb) {
+- brcmf_err("usb_alloc_urb (intr) failed\n");
+- goto error;
+- }
+ devinfo->ctl_urb = usb_alloc_urb(0, GFP_ATOMIC);
+ if (!devinfo->ctl_urb) {
+ brcmf_err("usb_alloc_urb (ctl) failed\n");
+@@ -1202,16 +1086,6 @@
+ goto error;
+ }
+
+- if (!brcmf_usb_dlneeded(devinfo))
+- return &devinfo->bus_pub;
+-
+- brcmf_dbg(USB, "Start fw downloading\n");
+- if (brcmf_usb_get_fw(devinfo))
+- goto error;
+-
+- if (brcmf_usb_fw_download(devinfo))
+- goto error;
+-
+ return &devinfo->bus_pub;
+
+ error:
+@@ -1222,18 +1096,77 @@
+
+ static struct brcmf_bus_ops brcmf_usb_bus_ops = {
+ .txdata = brcmf_usb_tx,
+- .init = brcmf_usb_up,
+ .stop = brcmf_usb_down,
+ .txctl = brcmf_usb_tx_ctlpkt,
+ .rxctl = brcmf_usb_rx_ctlpkt,
+ };
+
++static int brcmf_usb_bus_setup(struct brcmf_usbdev_info *devinfo)
++{
++ int ret;
++
++ /* Attach to the common driver interface */
++ ret = brcmf_attach(devinfo->dev);
++ if (ret) {
++ brcmf_err("brcmf_attach failed\n");
++ return ret;
++ }
++
++ ret = brcmf_usb_up(devinfo->dev);
++ if (ret)
++ goto fail;
++
++ ret = brcmf_bus_start(devinfo->dev);
++ if (ret)
++ goto fail;
++
++ return 0;
++fail:
++ brcmf_detach(devinfo->dev);
++ return ret;
++}
++
++static void brcmf_usb_probe_phase2(struct device *dev,
++ const struct firmware *fw,
++ void *nvram, u32 nvlen)
++{
++ struct brcmf_bus *bus = dev_get_drvdata(dev);
++ struct brcmf_usbdev_info *devinfo;
++ int ret;
++
++ brcmf_dbg(USB, "Start fw downloading\n");
++ ret = check_file(fw->data);
++ if (ret < 0) {
++ brcmf_err("invalid firmware\n");
++ release_firmware(fw);
++ goto error;
++ }
++
++ devinfo = bus->bus_priv.usb->devinfo;
++ devinfo->image = fw->data;
++ devinfo->image_len = fw->size;
++
++ ret = brcmf_usb_fw_download(devinfo);
++ release_firmware(fw);
++ if (ret)
++ goto error;
++
++ ret = brcmf_usb_bus_setup(devinfo);
++ if (ret)
++ goto error;
++
++ return;
++error:
++ brcmf_dbg(TRACE, "failed: dev=%s, err=%d\n", dev_name(dev), ret);
++ device_release_driver(dev);
++}
++
+ static int brcmf_usb_probe_cb(struct brcmf_usbdev_info *devinfo)
+ {
+ struct brcmf_bus *bus = NULL;
+ struct brcmf_usbdev *bus_pub = NULL;
+- int ret;
+ struct device *dev = devinfo->dev;
++ int ret;
+
+ brcmf_dbg(USB, "Enter\n");
+ bus_pub = brcmf_usb_attach(devinfo, BRCMF_USB_NRXQ, BRCMF_USB_NTXQ);
+@@ -1254,22 +1187,18 @@
+ bus->chip = bus_pub->devid;
+ bus->chiprev = bus_pub->chiprev;
+ bus->proto_type = BRCMF_PROTO_BCDC;
++ bus->always_use_fws_queue = true;
+
+- /* Attach to the common driver interface */
+- ret = brcmf_attach(dev);
+- if (ret) {
+- brcmf_err("brcmf_attach failed\n");
+- goto fail;
+- }
+-
+- ret = brcmf_bus_start(dev);
+- if (ret) {
+- brcmf_err("dongle is not responding\n");
+- brcmf_detach(dev);
+- goto fail;
++ if (!brcmf_usb_dlneeded(devinfo)) {
++ ret = brcmf_usb_bus_setup(devinfo);
++ if (ret)
++ goto fail;
+ }
+-
++ /* request firmware here */
++ brcmf_fw_get_firmwares(dev, 0, brcmf_usb_get_fwname(devinfo), NULL,
++ brcmf_usb_probe_phase2);
+ return 0;
++
+ fail:
+ /* Release resources in reverse order */
+ kfree(bus);
+@@ -1357,9 +1286,6 @@
+ goto fail;
+ }
+
+- endpoint_num = endpoint->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
+- devinfo->intr_pipe = usb_rcvintpipe(usb, endpoint_num);
+-
+ devinfo->rx_pipe = 0;
+ devinfo->rx_pipe2 = 0;
+ devinfo->tx_pipe = 0;
+@@ -1391,16 +1317,9 @@
+ }
+ }
+
+- /* Allocate interrupt URB and data buffer */
+- /* RNDIS says 8-byte intr, our old drivers used 4-byte */
+- if (IFEPDESC(usb, CONTROL_IF, 0).wMaxPacketSize == cpu_to_le16(16))
+- devinfo->intr_size = 8;
+- else
+- devinfo->intr_size = 4;
+-
+- devinfo->interval = IFEPDESC(usb, CONTROL_IF, 0).bInterval;
+-
+- if (usb->speed == USB_SPEED_HIGH)
++ if (usb->speed == USB_SPEED_SUPER)
++ brcmf_dbg(USB, "Broadcom super speed USB wireless device detected\n");
++ else if (usb->speed == USB_SPEED_HIGH)
+ brcmf_dbg(USB, "Broadcom high speed USB wireless device detected\n");
+ else
+ brcmf_dbg(USB, "Broadcom full speed USB wireless device detected\n");
+@@ -1455,35 +1374,33 @@
+ struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(&usb->dev);
+
+ brcmf_dbg(USB, "Enter\n");
+- if (!brcmf_attach(devinfo->dev))
+- return brcmf_bus_start(&usb->dev);
+-
+- return 0;
++ return brcmf_usb_bus_setup(devinfo);
+ }
+
+ static int brcmf_usb_reset_resume(struct usb_interface *intf)
+ {
+ struct usb_device *usb = interface_to_usbdev(intf);
+ struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(&usb->dev);
+-
+ brcmf_dbg(USB, "Enter\n");
+
+- if (!brcmf_usb_fw_download(devinfo))
+- return brcmf_usb_resume(intf);
+-
+- return -EIO;
++ return brcmf_fw_get_firmwares(&usb->dev, 0,
++ brcmf_usb_get_fwname(devinfo), NULL,
++ brcmf_usb_probe_phase2);
+ }
+
+ #define BRCMF_USB_VENDOR_ID_BROADCOM 0x0a5c
++#define BRCMF_USB_VENDOR_ID_LINKSYS 0x13b1
+ #define BRCMF_USB_DEVICE_ID_43143 0xbd1e
+ #define BRCMF_USB_DEVICE_ID_43236 0xbd17
+ #define BRCMF_USB_DEVICE_ID_43242 0xbd1f
++#define BRCMF_USB_DEVICE_ID_AE2500 0x003a
+ #define BRCMF_USB_DEVICE_ID_BCMFW 0x0bdc
+
+ static struct usb_device_id brcmf_usb_devid_table[] = {
+ { USB_DEVICE(BRCMF_USB_VENDOR_ID_BROADCOM, BRCMF_USB_DEVICE_ID_43143) },
+ { USB_DEVICE(BRCMF_USB_VENDOR_ID_BROADCOM, BRCMF_USB_DEVICE_ID_43236) },
+ { USB_DEVICE(BRCMF_USB_VENDOR_ID_BROADCOM, BRCMF_USB_DEVICE_ID_43242) },
++ { USB_DEVICE(BRCMF_USB_VENDOR_ID_LINKSYS, BRCMF_USB_DEVICE_ID_AE2500) },
+ /* special entry for device with firmware loaded and running */
+ { USB_DEVICE(BRCMF_USB_VENDOR_ID_BROADCOM, BRCMF_USB_DEVICE_ID_BCMFW) },
+ { }
+@@ -1506,16 +1423,6 @@
+ .disable_hub_initiated_lpm = 1,
+ };
+
+-static void brcmf_release_fw(struct list_head *q)
+-{
+- struct brcmf_usb_image *fw_image, *next;
+-
+- list_for_each_entry_safe(fw_image, next, q, list) {
+- vfree(fw_image->image);
+- list_del_init(&fw_image->list);
+- }
+-}
+-
+ static int brcmf_usb_reset_device(struct device *dev, void *notused)
+ {
+ /* device past is the usb interface so we
+@@ -1534,12 +1441,10 @@
+ ret = driver_for_each_device(drv, NULL, NULL,
+ brcmf_usb_reset_device);
+ usb_deregister(&brcmf_usbdrvr);
+- brcmf_release_fw(&fw_image_list);
+ }
+
+ void brcmf_usb_register(void)
+ {
+ brcmf_dbg(USB, "Enter\n");
+- INIT_LIST_HEAD(&fw_image_list);
+ usb_register(&brcmf_usbdrvr);
+ }
+diff -Nur linux-3.14.36/drivers/net/wireless/brcm80211/brcmfmac/usb.c.orig linux-openelec/drivers/net/wireless/brcm80211/brcmfmac/usb.c.orig
+--- linux-3.14.36/drivers/net/wireless/brcm80211/brcmfmac/usb.c.orig 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/net/wireless/brcm80211/brcmfmac/usb.c.orig 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,1447 @@
++/*
++ * Copyright (c) 2011 Broadcom Corporation
++ *
++ * Permission to use, copy, modify, and/or distribute this software for any
++ * purpose with or without fee is hereby granted, provided that the above
++ * copyright notice and this permission notice appear in all copies.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
++ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
++ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
++ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
++ */
++
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/firmware.h>
++#include <linux/usb.h>
++#include <linux/vmalloc.h>
++
++#include <brcmu_utils.h>
++#include <brcmu_wifi.h>
++#include <dhd_bus.h>
++#include <dhd_dbg.h>
++
++#include "firmware.h"
++#include "usb_rdl.h"
++#include "usb.h"
++
++#define IOCTL_RESP_TIMEOUT 2000
++
++#define BRCMF_USB_RESET_GETVER_SPINWAIT 100 /* in unit of ms */
++#define BRCMF_USB_RESET_GETVER_LOOP_CNT 10
++
++#define BRCMF_POSTBOOT_ID 0xA123 /* ID to detect if dongle
++ has boot up */
++#define BRCMF_USB_NRXQ 50
++#define BRCMF_USB_NTXQ 50
++
++#define CONFIGDESC(usb) (&((usb)->actconfig)->desc)
++#define IFPTR(usb, idx) ((usb)->actconfig->interface[(idx)])
++#define IFALTS(usb, idx) (IFPTR((usb), (idx))->altsetting[0])
++#define IFDESC(usb, idx) IFALTS((usb), (idx)).desc
++#define IFEPDESC(usb, idx, ep) (IFALTS((usb), (idx)).endpoint[(ep)]).desc
++
++#define CONTROL_IF 0
++#define BULK_IF 0
++
++#define BRCMF_USB_CBCTL_WRITE 0
++#define BRCMF_USB_CBCTL_READ 1
++#define BRCMF_USB_MAX_PKT_SIZE 1600
++
++#define BRCMF_USB_43143_FW_NAME "brcm/brcmfmac43143.bin"
++#define BRCMF_USB_43236_FW_NAME "brcm/brcmfmac43236b.bin"
++#define BRCMF_USB_43242_FW_NAME "brcm/brcmfmac43242a.bin"
++
++struct brcmf_usb_image {
++ struct list_head list;
++ s8 *fwname;
++ u8 *image;
++ int image_len;
++};
++
++struct brcmf_usbdev_info {
++ struct brcmf_usbdev bus_pub; /* MUST BE FIRST */
++ spinlock_t qlock;
++ struct list_head rx_freeq;
++ struct list_head rx_postq;
++ struct list_head tx_freeq;
++ struct list_head tx_postq;
++ uint rx_pipe, tx_pipe, rx_pipe2;
++
++ int rx_low_watermark;
++ int tx_low_watermark;
++ int tx_high_watermark;
++ int tx_freecount;
++ bool tx_flowblock;
++ spinlock_t tx_flowblock_lock;
++
++ struct brcmf_usbreq *tx_reqs;
++ struct brcmf_usbreq *rx_reqs;
++
++ const u8 *image; /* buffer for combine fw and nvram */
++ int image_len;
++
++ struct usb_device *usbdev;
++ struct device *dev;
++
++ int ctl_in_pipe, ctl_out_pipe;
++ struct urb *ctl_urb; /* URB for control endpoint */
++ struct usb_ctrlrequest ctl_write;
++ struct usb_ctrlrequest ctl_read;
++ u32 ctl_urb_actual_length;
++ int ctl_urb_status;
++ int ctl_completed;
++ wait_queue_head_t ioctl_resp_wait;
++ ulong ctl_op;
++
++ struct urb *bulk_urb; /* used for FW download */
++};
++
++static void brcmf_usb_rx_refill(struct brcmf_usbdev_info *devinfo,
++ struct brcmf_usbreq *req);
++
++static struct brcmf_usbdev *brcmf_usb_get_buspub(struct device *dev)
++{
++ struct brcmf_bus *bus_if = dev_get_drvdata(dev);
++ return bus_if->bus_priv.usb;
++}
++
++static struct brcmf_usbdev_info *brcmf_usb_get_businfo(struct device *dev)
++{
++ return brcmf_usb_get_buspub(dev)->devinfo;
++}
++
++static int brcmf_usb_ioctl_resp_wait(struct brcmf_usbdev_info *devinfo)
++{
++ return wait_event_timeout(devinfo->ioctl_resp_wait,
++ devinfo->ctl_completed,
++ msecs_to_jiffies(IOCTL_RESP_TIMEOUT));
++}
++
++static void brcmf_usb_ioctl_resp_wake(struct brcmf_usbdev_info *devinfo)
++{
++ if (waitqueue_active(&devinfo->ioctl_resp_wait))
++ wake_up(&devinfo->ioctl_resp_wait);
++}
++
++static void
++brcmf_usb_ctl_complete(struct brcmf_usbdev_info *devinfo, int type, int status)
++{
++ brcmf_dbg(USB, "Enter, status=%d\n", status);
++
++ if (unlikely(devinfo == NULL))
++ return;
++
++ if (type == BRCMF_USB_CBCTL_READ) {
++ if (status == 0)
++ devinfo->bus_pub.stats.rx_ctlpkts++;
++ else
++ devinfo->bus_pub.stats.rx_ctlerrs++;
++ } else if (type == BRCMF_USB_CBCTL_WRITE) {
++ if (status == 0)
++ devinfo->bus_pub.stats.tx_ctlpkts++;
++ else
++ devinfo->bus_pub.stats.tx_ctlerrs++;
++ }
++
++ devinfo->ctl_urb_status = status;
++ devinfo->ctl_completed = true;
++ brcmf_usb_ioctl_resp_wake(devinfo);
++}
++
++static void
++brcmf_usb_ctlread_complete(struct urb *urb)
++{
++ struct brcmf_usbdev_info *devinfo =
++ (struct brcmf_usbdev_info *)urb->context;
++
++ brcmf_dbg(USB, "Enter\n");
++ devinfo->ctl_urb_actual_length = urb->actual_length;
++ brcmf_usb_ctl_complete(devinfo, BRCMF_USB_CBCTL_READ,
++ urb->status);
++}
++
++static void
++brcmf_usb_ctlwrite_complete(struct urb *urb)
++{
++ struct brcmf_usbdev_info *devinfo =
++ (struct brcmf_usbdev_info *)urb->context;
++
++ brcmf_dbg(USB, "Enter\n");
++ brcmf_usb_ctl_complete(devinfo, BRCMF_USB_CBCTL_WRITE,
++ urb->status);
++}
++
++static int
++brcmf_usb_send_ctl(struct brcmf_usbdev_info *devinfo, u8 *buf, int len)
++{
++ int ret;
++ u16 size;
++
++ brcmf_dbg(USB, "Enter\n");
++ if (devinfo == NULL || buf == NULL ||
++ len == 0 || devinfo->ctl_urb == NULL)
++ return -EINVAL;
++
++ size = len;
++ devinfo->ctl_write.wLength = cpu_to_le16p(&size);
++ devinfo->ctl_urb->transfer_buffer_length = size;
++ devinfo->ctl_urb_status = 0;
++ devinfo->ctl_urb_actual_length = 0;
++
++ usb_fill_control_urb(devinfo->ctl_urb,
++ devinfo->usbdev,
++ devinfo->ctl_out_pipe,
++ (unsigned char *) &devinfo->ctl_write,
++ buf, size,
++ (usb_complete_t)brcmf_usb_ctlwrite_complete,
++ devinfo);
++
++ ret = usb_submit_urb(devinfo->ctl_urb, GFP_ATOMIC);
++ if (ret < 0)
++ brcmf_err("usb_submit_urb failed %d\n", ret);
++
++ return ret;
++}
++
++static int
++brcmf_usb_recv_ctl(struct brcmf_usbdev_info *devinfo, u8 *buf, int len)
++{
++ int ret;
++ u16 size;
++
++ brcmf_dbg(USB, "Enter\n");
++ if ((devinfo == NULL) || (buf == NULL) || (len == 0)
++ || (devinfo->ctl_urb == NULL))
++ return -EINVAL;
++
++ size = len;
++ devinfo->ctl_read.wLength = cpu_to_le16p(&size);
++ devinfo->ctl_urb->transfer_buffer_length = size;
++
++ devinfo->ctl_read.bRequestType = USB_DIR_IN
++ | USB_TYPE_CLASS | USB_RECIP_INTERFACE;
++ devinfo->ctl_read.bRequest = 1;
++
++ usb_fill_control_urb(devinfo->ctl_urb,
++ devinfo->usbdev,
++ devinfo->ctl_in_pipe,
++ (unsigned char *) &devinfo->ctl_read,
++ buf, size,
++ (usb_complete_t)brcmf_usb_ctlread_complete,
++ devinfo);
++
++ ret = usb_submit_urb(devinfo->ctl_urb, GFP_ATOMIC);
++ if (ret < 0)
++ brcmf_err("usb_submit_urb failed %d\n", ret);
++
++ return ret;
++}
++
++static int brcmf_usb_tx_ctlpkt(struct device *dev, u8 *buf, u32 len)
++{
++ int err = 0;
++ int timeout = 0;
++ struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(dev);
++
++ brcmf_dbg(USB, "Enter\n");
++ if (devinfo->bus_pub.state != BRCMFMAC_USB_STATE_UP)
++ return -EIO;
++
++ if (test_and_set_bit(0, &devinfo->ctl_op))
++ return -EIO;
++
++ devinfo->ctl_completed = false;
++ err = brcmf_usb_send_ctl(devinfo, buf, len);
++ if (err) {
++ brcmf_err("fail %d bytes: %d\n", err, len);
++ clear_bit(0, &devinfo->ctl_op);
++ return err;
++ }
++ timeout = brcmf_usb_ioctl_resp_wait(devinfo);
++ clear_bit(0, &devinfo->ctl_op);
++ if (!timeout) {
++ brcmf_err("Txctl wait timed out\n");
++ err = -EIO;
++ }
++ return err;
++}
++
++static int brcmf_usb_rx_ctlpkt(struct device *dev, u8 *buf, u32 len)
++{
++ int err = 0;
++ int timeout = 0;
++ struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(dev);
++
++ brcmf_dbg(USB, "Enter\n");
++ if (devinfo->bus_pub.state != BRCMFMAC_USB_STATE_UP)
++ return -EIO;
++
++ if (test_and_set_bit(0, &devinfo->ctl_op))
++ return -EIO;
++
++ devinfo->ctl_completed = false;
++ err = brcmf_usb_recv_ctl(devinfo, buf, len);
++ if (err) {
++ brcmf_err("fail %d bytes: %d\n", err, len);
++ clear_bit(0, &devinfo->ctl_op);
++ return err;
++ }
++ timeout = brcmf_usb_ioctl_resp_wait(devinfo);
++ err = devinfo->ctl_urb_status;
++ clear_bit(0, &devinfo->ctl_op);
++ if (!timeout) {
++ brcmf_err("rxctl wait timed out\n");
++ err = -EIO;
++ }
++ if (!err)
++ return devinfo->ctl_urb_actual_length;
++ else
++ return err;
++}
++
++static struct brcmf_usbreq *brcmf_usb_deq(struct brcmf_usbdev_info *devinfo,
++ struct list_head *q, int *counter)
++{
++ unsigned long flags;
++ struct brcmf_usbreq *req;
++ spin_lock_irqsave(&devinfo->qlock, flags);
++ if (list_empty(q)) {
++ spin_unlock_irqrestore(&devinfo->qlock, flags);
++ return NULL;
++ }
++ req = list_entry(q->next, struct brcmf_usbreq, list);
++ list_del_init(q->next);
++ if (counter)
++ (*counter)--;
++ spin_unlock_irqrestore(&devinfo->qlock, flags);
++ return req;
++
++}
++
++static void brcmf_usb_enq(struct brcmf_usbdev_info *devinfo,
++ struct list_head *q, struct brcmf_usbreq *req,
++ int *counter)
++{
++ unsigned long flags;
++ spin_lock_irqsave(&devinfo->qlock, flags);
++ list_add_tail(&req->list, q);
++ if (counter)
++ (*counter)++;
++ spin_unlock_irqrestore(&devinfo->qlock, flags);
++}
++
++static struct brcmf_usbreq *
++brcmf_usbdev_qinit(struct list_head *q, int qsize)
++{
++ int i;
++ struct brcmf_usbreq *req, *reqs;
++
++ reqs = kcalloc(qsize, sizeof(struct brcmf_usbreq), GFP_ATOMIC);
++ if (reqs == NULL)
++ return NULL;
++
++ req = reqs;
++
++ for (i = 0; i < qsize; i++) {
++ req->urb = usb_alloc_urb(0, GFP_ATOMIC);
++ if (!req->urb)
++ goto fail;
++
++ INIT_LIST_HEAD(&req->list);
++ list_add_tail(&req->list, q);
++ req++;
++ }
++ return reqs;
++fail:
++ brcmf_err("fail!\n");
++ while (!list_empty(q)) {
++ req = list_entry(q->next, struct brcmf_usbreq, list);
++ if (req && req->urb)
++ usb_free_urb(req->urb);
++ list_del(q->next);
++ }
++ return NULL;
++
++}
++
++static void brcmf_usb_free_q(struct list_head *q, bool pending)
++{
++ struct brcmf_usbreq *req, *next;
++ int i = 0;
++ list_for_each_entry_safe(req, next, q, list) {
++ if (!req->urb) {
++ brcmf_err("bad req\n");
++ break;
++ }
++ i++;
++ if (pending) {
++ usb_kill_urb(req->urb);
++ } else {
++ usb_free_urb(req->urb);
++ list_del_init(&req->list);
++ }
++ }
++}
++
++static void brcmf_usb_del_fromq(struct brcmf_usbdev_info *devinfo,
++ struct brcmf_usbreq *req)
++{
++ unsigned long flags;
++
++ spin_lock_irqsave(&devinfo->qlock, flags);
++ list_del_init(&req->list);
++ spin_unlock_irqrestore(&devinfo->qlock, flags);
++}
++
++
++static void brcmf_usb_tx_complete(struct urb *urb)
++{
++ struct brcmf_usbreq *req = (struct brcmf_usbreq *)urb->context;
++ struct brcmf_usbdev_info *devinfo = req->devinfo;
++ unsigned long flags;
++
++ brcmf_dbg(USB, "Enter, urb->status=%d, skb=%p\n", urb->status,
++ req->skb);
++ brcmf_usb_del_fromq(devinfo, req);
++
++ brcmf_txcomplete(devinfo->dev, req->skb, urb->status == 0);
++ req->skb = NULL;
++ brcmf_usb_enq(devinfo, &devinfo->tx_freeq, req, &devinfo->tx_freecount);
++ spin_lock_irqsave(&devinfo->tx_flowblock_lock, flags);
++ if (devinfo->tx_freecount > devinfo->tx_high_watermark &&
++ devinfo->tx_flowblock) {
++ brcmf_txflowblock(devinfo->dev, false);
++ devinfo->tx_flowblock = false;
++ }
++ spin_unlock_irqrestore(&devinfo->tx_flowblock_lock, flags);
++}
++
++static void brcmf_usb_rx_complete(struct urb *urb)
++{
++ struct brcmf_usbreq *req = (struct brcmf_usbreq *)urb->context;
++ struct brcmf_usbdev_info *devinfo = req->devinfo;
++ struct sk_buff *skb;
++
++ brcmf_dbg(USB, "Enter, urb->status=%d\n", urb->status);
++ brcmf_usb_del_fromq(devinfo, req);
++ skb = req->skb;
++ req->skb = NULL;
++
++ /* zero lenght packets indicate usb "failure". Do not refill */
++ if (urb->status != 0 || !urb->actual_length) {
++ brcmu_pkt_buf_free_skb(skb);
++ brcmf_usb_enq(devinfo, &devinfo->rx_freeq, req, NULL);
++ return;
++ }
++
++ if (devinfo->bus_pub.state == BRCMFMAC_USB_STATE_UP) {
++ skb_put(skb, urb->actual_length);
++ brcmf_rx_frame(devinfo->dev, skb);
++ brcmf_usb_rx_refill(devinfo, req);
++ } else {
++ brcmu_pkt_buf_free_skb(skb);
++ brcmf_usb_enq(devinfo, &devinfo->rx_freeq, req, NULL);
++ }
++ return;
++
++}
++
++static void brcmf_usb_rx_refill(struct brcmf_usbdev_info *devinfo,
++ struct brcmf_usbreq *req)
++{
++ struct sk_buff *skb;
++ int ret;
++
++ if (!req || !devinfo)
++ return;
++
++ skb = dev_alloc_skb(devinfo->bus_pub.bus_mtu);
++ if (!skb) {
++ brcmf_usb_enq(devinfo, &devinfo->rx_freeq, req, NULL);
++ return;
++ }
++ req->skb = skb;
++
++ usb_fill_bulk_urb(req->urb, devinfo->usbdev, devinfo->rx_pipe,
++ skb->data, skb_tailroom(skb), brcmf_usb_rx_complete,
++ req);
++ req->devinfo = devinfo;
++ brcmf_usb_enq(devinfo, &devinfo->rx_postq, req, NULL);
++
++ ret = usb_submit_urb(req->urb, GFP_ATOMIC);
++ if (ret) {
++ brcmf_usb_del_fromq(devinfo, req);
++ brcmu_pkt_buf_free_skb(req->skb);
++ req->skb = NULL;
++ brcmf_usb_enq(devinfo, &devinfo->rx_freeq, req, NULL);
++ }
++ return;
++}
++
++static void brcmf_usb_rx_fill_all(struct brcmf_usbdev_info *devinfo)
++{
++ struct brcmf_usbreq *req;
++
++ if (devinfo->bus_pub.state != BRCMFMAC_USB_STATE_UP) {
++ brcmf_err("bus is not up=%d\n", devinfo->bus_pub.state);
++ return;
++ }
++ while ((req = brcmf_usb_deq(devinfo, &devinfo->rx_freeq, NULL)) != NULL)
++ brcmf_usb_rx_refill(devinfo, req);
++}
++
++static void
++brcmf_usb_state_change(struct brcmf_usbdev_info *devinfo, int state)
++{
++ struct brcmf_bus *bcmf_bus = devinfo->bus_pub.bus;
++ int old_state;
++
++ brcmf_dbg(USB, "Enter, current state=%d, new state=%d\n",
++ devinfo->bus_pub.state, state);
++
++ if (devinfo->bus_pub.state == state)
++ return;
++
++ old_state = devinfo->bus_pub.state;
++ devinfo->bus_pub.state = state;
++
++ /* update state of upper layer */
++ if (state == BRCMFMAC_USB_STATE_DOWN) {
++ brcmf_dbg(USB, "DBUS is down\n");
++ brcmf_bus_change_state(bcmf_bus, BRCMF_BUS_DOWN);
++ } else if (state == BRCMFMAC_USB_STATE_UP) {
++ brcmf_dbg(USB, "DBUS is up\n");
++ brcmf_bus_change_state(bcmf_bus, BRCMF_BUS_DATA);
++ } else {
++ brcmf_dbg(USB, "DBUS current state=%d\n", state);
++ }
++}
++
++static int brcmf_usb_tx(struct device *dev, struct sk_buff *skb)
++{
++ struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(dev);
++ struct brcmf_usbreq *req;
++ int ret;
++ unsigned long flags;
++
++ brcmf_dbg(USB, "Enter, skb=%p\n", skb);
++ if (devinfo->bus_pub.state != BRCMFMAC_USB_STATE_UP) {
++ ret = -EIO;
++ goto fail;
++ }
++
++ req = brcmf_usb_deq(devinfo, &devinfo->tx_freeq,
++ &devinfo->tx_freecount);
++ if (!req) {
++ brcmf_err("no req to send\n");
++ ret = -ENOMEM;
++ goto fail;
++ }
++
++ req->skb = skb;
++ req->devinfo = devinfo;
++ usb_fill_bulk_urb(req->urb, devinfo->usbdev, devinfo->tx_pipe,
++ skb->data, skb->len, brcmf_usb_tx_complete, req);
++ req->urb->transfer_flags |= URB_ZERO_PACKET;
++ brcmf_usb_enq(devinfo, &devinfo->tx_postq, req, NULL);
++ ret = usb_submit_urb(req->urb, GFP_ATOMIC);
++ if (ret) {
++ brcmf_err("brcmf_usb_tx usb_submit_urb FAILED\n");
++ brcmf_usb_del_fromq(devinfo, req);
++ req->skb = NULL;
++ brcmf_usb_enq(devinfo, &devinfo->tx_freeq, req,
++ &devinfo->tx_freecount);
++ goto fail;
++ }
++
++ spin_lock_irqsave(&devinfo->tx_flowblock_lock, flags);
++ if (devinfo->tx_freecount < devinfo->tx_low_watermark &&
++ !devinfo->tx_flowblock) {
++ brcmf_txflowblock(dev, true);
++ devinfo->tx_flowblock = true;
++ }
++ spin_unlock_irqrestore(&devinfo->tx_flowblock_lock, flags);
++ return 0;
++
++fail:
++ return ret;
++}
++
++
++static int brcmf_usb_up(struct device *dev)
++{
++ struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(dev);
++ u16 ifnum;
++
++ brcmf_dbg(USB, "Enter\n");
++ if (devinfo->bus_pub.state == BRCMFMAC_USB_STATE_UP)
++ return 0;
++
++ /* Success, indicate devinfo is fully up */
++ brcmf_usb_state_change(devinfo, BRCMFMAC_USB_STATE_UP);
++
++ if (devinfo->ctl_urb) {
++ devinfo->ctl_in_pipe = usb_rcvctrlpipe(devinfo->usbdev, 0);
++ devinfo->ctl_out_pipe = usb_sndctrlpipe(devinfo->usbdev, 0);
++
++ ifnum = IFDESC(devinfo->usbdev, CONTROL_IF).bInterfaceNumber;
++
++ /* CTL Write */
++ devinfo->ctl_write.bRequestType =
++ USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE;
++ devinfo->ctl_write.bRequest = 0;
++ devinfo->ctl_write.wValue = cpu_to_le16(0);
++ devinfo->ctl_write.wIndex = cpu_to_le16p(&ifnum);
++
++ /* CTL Read */
++ devinfo->ctl_read.bRequestType =
++ USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE;
++ devinfo->ctl_read.bRequest = 1;
++ devinfo->ctl_read.wValue = cpu_to_le16(0);
++ devinfo->ctl_read.wIndex = cpu_to_le16p(&ifnum);
++ }
++ brcmf_usb_rx_fill_all(devinfo);
++ return 0;
++}
++
++static void brcmf_usb_down(struct device *dev)
++{
++ struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(dev);
++
++ brcmf_dbg(USB, "Enter\n");
++ if (devinfo == NULL)
++ return;
++
++ if (devinfo->bus_pub.state == BRCMFMAC_USB_STATE_DOWN)
++ return;
++
++ brcmf_usb_state_change(devinfo, BRCMFMAC_USB_STATE_DOWN);
++
++ if (devinfo->ctl_urb)
++ usb_kill_urb(devinfo->ctl_urb);
++
++ if (devinfo->bulk_urb)
++ usb_kill_urb(devinfo->bulk_urb);
++ brcmf_usb_free_q(&devinfo->tx_postq, true);
++
++ brcmf_usb_free_q(&devinfo->rx_postq, true);
++}
++
++static void
++brcmf_usb_sync_complete(struct urb *urb)
++{
++ struct brcmf_usbdev_info *devinfo =
++ (struct brcmf_usbdev_info *)urb->context;
++
++ devinfo->ctl_completed = true;
++ brcmf_usb_ioctl_resp_wake(devinfo);
++}
++
++static bool brcmf_usb_dl_cmd(struct brcmf_usbdev_info *devinfo, u8 cmd,
++ void *buffer, int buflen)
++{
++ int ret = 0;
++ char *tmpbuf;
++ u16 size;
++
++ if ((!devinfo) || (devinfo->ctl_urb == NULL))
++ return false;
++
++ tmpbuf = kmalloc(buflen, GFP_ATOMIC);
++ if (!tmpbuf)
++ return false;
++
++ size = buflen;
++ devinfo->ctl_urb->transfer_buffer_length = size;
++
++ devinfo->ctl_read.wLength = cpu_to_le16p(&size);
++ devinfo->ctl_read.bRequestType = USB_DIR_IN | USB_TYPE_VENDOR |
++ USB_RECIP_INTERFACE;
++ devinfo->ctl_read.bRequest = cmd;
++
++ usb_fill_control_urb(devinfo->ctl_urb,
++ devinfo->usbdev,
++ usb_rcvctrlpipe(devinfo->usbdev, 0),
++ (unsigned char *) &devinfo->ctl_read,
++ (void *) tmpbuf, size,
++ (usb_complete_t)brcmf_usb_sync_complete, devinfo);
++
++ devinfo->ctl_completed = false;
++ ret = usb_submit_urb(devinfo->ctl_urb, GFP_ATOMIC);
++ if (ret < 0) {
++ brcmf_err("usb_submit_urb failed %d\n", ret);
++ kfree(tmpbuf);
++ return false;
++ }
++
++ ret = brcmf_usb_ioctl_resp_wait(devinfo);
++ memcpy(buffer, tmpbuf, buflen);
++ kfree(tmpbuf);
++
++ return ret;
++}
++
++static bool
++brcmf_usb_dlneeded(struct brcmf_usbdev_info *devinfo)
++{
++ struct bootrom_id_le id;
++ u32 chipid, chiprev;
++
++ brcmf_dbg(USB, "Enter\n");
++
++ if (devinfo == NULL)
++ return false;
++
++ /* Check if firmware downloaded already by querying runtime ID */
++ id.chip = cpu_to_le32(0xDEAD);
++ brcmf_usb_dl_cmd(devinfo, DL_GETVER, &id, sizeof(id));
++
++ chipid = le32_to_cpu(id.chip);
++ chiprev = le32_to_cpu(id.chiprev);
++
++ if ((chipid & 0x4300) == 0x4300)
++ brcmf_dbg(USB, "chip %x rev 0x%x\n", chipid, chiprev);
++ else
++ brcmf_dbg(USB, "chip %d rev 0x%x\n", chipid, chiprev);
++ if (chipid == BRCMF_POSTBOOT_ID) {
++ brcmf_dbg(USB, "firmware already downloaded\n");
++ brcmf_usb_dl_cmd(devinfo, DL_RESETCFG, &id, sizeof(id));
++ return false;
++ } else {
++ devinfo->bus_pub.devid = chipid;
++ devinfo->bus_pub.chiprev = chiprev;
++ }
++ return true;
++}
++
++static int
++brcmf_usb_resetcfg(struct brcmf_usbdev_info *devinfo)
++{
++ struct bootrom_id_le id;
++ u32 loop_cnt;
++
++ brcmf_dbg(USB, "Enter\n");
++
++ loop_cnt = 0;
++ do {
++ mdelay(BRCMF_USB_RESET_GETVER_SPINWAIT);
++ loop_cnt++;
++ id.chip = cpu_to_le32(0xDEAD); /* Get the ID */
++ brcmf_usb_dl_cmd(devinfo, DL_GETVER, &id, sizeof(id));
++ if (id.chip == cpu_to_le32(BRCMF_POSTBOOT_ID))
++ break;
++ } while (loop_cnt < BRCMF_USB_RESET_GETVER_LOOP_CNT);
++
++ if (id.chip == cpu_to_le32(BRCMF_POSTBOOT_ID)) {
++ brcmf_dbg(USB, "postboot chip 0x%x/rev 0x%x\n",
++ le32_to_cpu(id.chip), le32_to_cpu(id.chiprev));
++
++ brcmf_usb_dl_cmd(devinfo, DL_RESETCFG, &id, sizeof(id));
++ return 0;
++ } else {
++ brcmf_err("Cannot talk to Dongle. Firmware is not UP, %d ms\n",
++ BRCMF_USB_RESET_GETVER_SPINWAIT * loop_cnt);
++ return -EINVAL;
++ }
++}
++
++
++static int
++brcmf_usb_dl_send_bulk(struct brcmf_usbdev_info *devinfo, void *buffer, int len)
++{
++ int ret;
++
++ if ((devinfo == NULL) || (devinfo->bulk_urb == NULL))
++ return -EINVAL;
++
++ /* Prepare the URB */
++ usb_fill_bulk_urb(devinfo->bulk_urb, devinfo->usbdev,
++ devinfo->tx_pipe, buffer, len,
++ (usb_complete_t)brcmf_usb_sync_complete, devinfo);
++
++ devinfo->bulk_urb->transfer_flags |= URB_ZERO_PACKET;
++
++ devinfo->ctl_completed = false;
++ ret = usb_submit_urb(devinfo->bulk_urb, GFP_ATOMIC);
++ if (ret) {
++ brcmf_err("usb_submit_urb failed %d\n", ret);
++ return ret;
++ }
++ ret = brcmf_usb_ioctl_resp_wait(devinfo);
++ return (ret == 0);
++}
++
++static int
++brcmf_usb_dl_writeimage(struct brcmf_usbdev_info *devinfo, u8 *fw, int fwlen)
++{
++ unsigned int sendlen, sent, dllen;
++ char *bulkchunk = NULL, *dlpos;
++ struct rdl_state_le state;
++ u32 rdlstate, rdlbytes;
++ int err = 0;
++
++ brcmf_dbg(USB, "Enter, fw %p, len %d\n", fw, fwlen);
++
++ bulkchunk = kmalloc(RDL_CHUNK, GFP_ATOMIC);
++ if (bulkchunk == NULL) {
++ err = -ENOMEM;
++ goto fail;
++ }
++
++ /* 1) Prepare USB boot loader for runtime image */
++ brcmf_usb_dl_cmd(devinfo, DL_START, &state,
++ sizeof(struct rdl_state_le));
++
++ rdlstate = le32_to_cpu(state.state);
++ rdlbytes = le32_to_cpu(state.bytes);
++
++ /* 2) Check we are in the Waiting state */
++ if (rdlstate != DL_WAITING) {
++ brcmf_err("Failed to DL_START\n");
++ err = -EINVAL;
++ goto fail;
++ }
++ sent = 0;
++ dlpos = fw;
++ dllen = fwlen;
++
++ /* Get chip id and rev */
++ while (rdlbytes != dllen) {
++ /* Wait until the usb device reports it received all
++ * the bytes we sent */
++ if ((rdlbytes == sent) && (rdlbytes != dllen)) {
++ if ((dllen-sent) < RDL_CHUNK)
++ sendlen = dllen-sent;
++ else
++ sendlen = RDL_CHUNK;
++
++ /* simply avoid having to send a ZLP by ensuring we
++ * never have an even
++ * multiple of 64
++ */
++ if (!(sendlen % 64))
++ sendlen -= 4;
++
++ /* send data */
++ memcpy(bulkchunk, dlpos, sendlen);
++ if (brcmf_usb_dl_send_bulk(devinfo, bulkchunk,
++ sendlen)) {
++ brcmf_err("send_bulk failed\n");
++ err = -EINVAL;
++ goto fail;
++ }
++
++ dlpos += sendlen;
++ sent += sendlen;
++ }
++ if (!brcmf_usb_dl_cmd(devinfo, DL_GETSTATE, &state,
++ sizeof(struct rdl_state_le))) {
++ brcmf_err("DL_GETSTATE Failed xxxx\n");
++ err = -EINVAL;
++ goto fail;
++ }
++
++ rdlstate = le32_to_cpu(state.state);
++ rdlbytes = le32_to_cpu(state.bytes);
++
++ /* restart if an error is reported */
++ if (rdlstate == DL_BAD_HDR || rdlstate == DL_BAD_CRC) {
++ brcmf_err("Bad Hdr or Bad CRC state %d\n",
++ rdlstate);
++ err = -EINVAL;
++ goto fail;
++ }
++ }
++
++fail:
++ kfree(bulkchunk);
++ brcmf_dbg(USB, "Exit, err=%d\n", err);
++ return err;
++}
++
++static int brcmf_usb_dlstart(struct brcmf_usbdev_info *devinfo, u8 *fw, int len)
++{
++ int err;
++
++ brcmf_dbg(USB, "Enter\n");
++
++ if (devinfo == NULL)
++ return -EINVAL;
++
++ if (devinfo->bus_pub.devid == 0xDEAD)
++ return -EINVAL;
++
++ err = brcmf_usb_dl_writeimage(devinfo, fw, len);
++ if (err == 0)
++ devinfo->bus_pub.state = BRCMFMAC_USB_STATE_DL_DONE;
++ else
++ devinfo->bus_pub.state = BRCMFMAC_USB_STATE_DL_FAIL;
++ brcmf_dbg(USB, "Exit, err=%d\n", err);
++
++ return err;
++}
++
++static int brcmf_usb_dlrun(struct brcmf_usbdev_info *devinfo)
++{
++ struct rdl_state_le state;
++
++ brcmf_dbg(USB, "Enter\n");
++ if (!devinfo)
++ return -EINVAL;
++
++ if (devinfo->bus_pub.devid == 0xDEAD)
++ return -EINVAL;
++
++ /* Check we are runnable */
++ brcmf_usb_dl_cmd(devinfo, DL_GETSTATE, &state,
++ sizeof(struct rdl_state_le));
++
++ /* Start the image */
++ if (state.state == cpu_to_le32(DL_RUNNABLE)) {
++ if (!brcmf_usb_dl_cmd(devinfo, DL_GO, &state,
++ sizeof(struct rdl_state_le)))
++ return -ENODEV;
++ if (brcmf_usb_resetcfg(devinfo))
++ return -ENODEV;
++ /* The Dongle may go for re-enumeration. */
++ } else {
++ brcmf_err("Dongle not runnable\n");
++ return -EINVAL;
++ }
++ brcmf_dbg(USB, "Exit\n");
++ return 0;
++}
++
++static bool brcmf_usb_chip_support(int chipid, int chiprev)
++{
++ switch(chipid) {
++ case 43143:
++ return true;
++ case 43235:
++ case 43236:
++ case 43238:
++ return (chiprev == 3);
++ case 43242:
++ return true;
++ default:
++ break;
++ }
++ return false;
++}
++
++static int
++brcmf_usb_fw_download(struct brcmf_usbdev_info *devinfo)
++{
++ int devid, chiprev;
++ int err;
++
++ brcmf_dbg(USB, "Enter\n");
++ if (devinfo == NULL)
++ return -ENODEV;
++
++ devid = devinfo->bus_pub.devid;
++ chiprev = devinfo->bus_pub.chiprev;
++
++ if (!brcmf_usb_chip_support(devid, chiprev)) {
++ brcmf_err("unsupported chip %d rev %d\n",
++ devid, chiprev);
++ return -EINVAL;
++ }
++
++ if (!devinfo->image) {
++ brcmf_err("No firmware!\n");
++ return -ENOENT;
++ }
++
++ err = brcmf_usb_dlstart(devinfo,
++ (u8 *)devinfo->image, devinfo->image_len);
++ if (err == 0)
++ err = brcmf_usb_dlrun(devinfo);
++ return err;
++}
++
++
++static void brcmf_usb_detach(struct brcmf_usbdev_info *devinfo)
++{
++ brcmf_dbg(USB, "Enter, devinfo %p\n", devinfo);
++
++ /* free the URBS */
++ brcmf_usb_free_q(&devinfo->rx_freeq, false);
++ brcmf_usb_free_q(&devinfo->tx_freeq, false);
++
++ usb_free_urb(devinfo->ctl_urb);
++ usb_free_urb(devinfo->bulk_urb);
++
++ kfree(devinfo->tx_reqs);
++ kfree(devinfo->rx_reqs);
++}
++
++#define TRX_MAGIC 0x30524448 /* "HDR0" */
++#define TRX_VERSION 1 /* Version 1 */
++#define TRX_MAX_LEN 0x3B0000 /* Max length */
++#define TRX_NO_HEADER 1 /* Do not write TRX header */
++#define TRX_MAX_OFFSET 3 /* Max number of individual files */
++#define TRX_UNCOMP_IMAGE 0x20 /* Trx contains uncompressed image */
++
++struct trx_header_le {
++ __le32 magic; /* "HDR0" */
++ __le32 len; /* Length of file including header */
++ __le32 crc32; /* CRC from flag_version to end of file */
++ __le32 flag_version; /* 0:15 flags, 16:31 version */
++ __le32 offsets[TRX_MAX_OFFSET]; /* Offsets of partitions from start of
++ * header */
++};
++
++static int check_file(const u8 *headers)
++{
++ struct trx_header_le *trx;
++ int actual_len = -1;
++
++ brcmf_dbg(USB, "Enter\n");
++ /* Extract trx header */
++ trx = (struct trx_header_le *) headers;
++ if (trx->magic != cpu_to_le32(TRX_MAGIC))
++ return -1;
++
++ headers += sizeof(struct trx_header_le);
++
++ if (le32_to_cpu(trx->flag_version) & TRX_UNCOMP_IMAGE) {
++ actual_len = le32_to_cpu(trx->offsets[TRX_OFFSETS_DLFWLEN_IDX]);
++ return actual_len + sizeof(struct trx_header_le);
++ }
++ return -1;
++}
++
++static const char *brcmf_usb_get_fwname(struct brcmf_usbdev_info *devinfo)
++{
++ switch (devinfo->bus_pub.devid) {
++ case 43143:
++ return BRCMF_USB_43143_FW_NAME;
++ case 43235:
++ case 43236:
++ case 43238:
++ return BRCMF_USB_43236_FW_NAME;
++ case 43242:
++ return BRCMF_USB_43242_FW_NAME;
++ default:
++ return NULL;
++ }
++}
++
++
++static
++struct brcmf_usbdev *brcmf_usb_attach(struct brcmf_usbdev_info *devinfo,
++ int nrxq, int ntxq)
++{
++ brcmf_dbg(USB, "Enter\n");
++
++ devinfo->bus_pub.nrxq = nrxq;
++ devinfo->rx_low_watermark = nrxq / 2;
++ devinfo->bus_pub.devinfo = devinfo;
++ devinfo->bus_pub.ntxq = ntxq;
++ devinfo->bus_pub.state = BRCMFMAC_USB_STATE_DOWN;
++
++ /* flow control when too many tx urbs posted */
++ devinfo->tx_low_watermark = ntxq / 4;
++ devinfo->tx_high_watermark = devinfo->tx_low_watermark * 3;
++ devinfo->bus_pub.bus_mtu = BRCMF_USB_MAX_PKT_SIZE;
++
++ /* Initialize other structure content */
++ init_waitqueue_head(&devinfo->ioctl_resp_wait);
++
++ /* Initialize the spinlocks */
++ spin_lock_init(&devinfo->qlock);
++ spin_lock_init(&devinfo->tx_flowblock_lock);
++
++ INIT_LIST_HEAD(&devinfo->rx_freeq);
++ INIT_LIST_HEAD(&devinfo->rx_postq);
++
++ INIT_LIST_HEAD(&devinfo->tx_freeq);
++ INIT_LIST_HEAD(&devinfo->tx_postq);
++
++ devinfo->tx_flowblock = false;
++
++ devinfo->rx_reqs = brcmf_usbdev_qinit(&devinfo->rx_freeq, nrxq);
++ if (!devinfo->rx_reqs)
++ goto error;
++
++ devinfo->tx_reqs = brcmf_usbdev_qinit(&devinfo->tx_freeq, ntxq);
++ if (!devinfo->tx_reqs)
++ goto error;
++ devinfo->tx_freecount = ntxq;
++
++ devinfo->ctl_urb = usb_alloc_urb(0, GFP_ATOMIC);
++ if (!devinfo->ctl_urb) {
++ brcmf_err("usb_alloc_urb (ctl) failed\n");
++ goto error;
++ }
++ devinfo->bulk_urb = usb_alloc_urb(0, GFP_ATOMIC);
++ if (!devinfo->bulk_urb) {
++ brcmf_err("usb_alloc_urb (bulk) failed\n");
++ goto error;
++ }
++
++ return &devinfo->bus_pub;
++
++error:
++ brcmf_err("failed!\n");
++ brcmf_usb_detach(devinfo);
++ return NULL;
++}
++
++static struct brcmf_bus_ops brcmf_usb_bus_ops = {
++ .txdata = brcmf_usb_tx,
++ .stop = brcmf_usb_down,
++ .txctl = brcmf_usb_tx_ctlpkt,
++ .rxctl = brcmf_usb_rx_ctlpkt,
++};
++
++static int brcmf_usb_bus_setup(struct brcmf_usbdev_info *devinfo)
++{
++ int ret;
++
++ /* Attach to the common driver interface */
++ ret = brcmf_attach(devinfo->dev);
++ if (ret) {
++ brcmf_err("brcmf_attach failed\n");
++ return ret;
++ }
++
++ ret = brcmf_usb_up(devinfo->dev);
++ if (ret)
++ goto fail;
++
++ ret = brcmf_bus_start(devinfo->dev);
++ if (ret)
++ goto fail;
++
++ return 0;
++fail:
++ brcmf_detach(devinfo->dev);
++ return ret;
++}
++
++static void brcmf_usb_probe_phase2(struct device *dev,
++ const struct firmware *fw,
++ void *nvram, u32 nvlen)
++{
++ struct brcmf_bus *bus = dev_get_drvdata(dev);
++ struct brcmf_usbdev_info *devinfo;
++ int ret;
++
++ brcmf_dbg(USB, "Start fw downloading\n");
++ ret = check_file(fw->data);
++ if (ret < 0) {
++ brcmf_err("invalid firmware\n");
++ release_firmware(fw);
++ goto error;
++ }
++
++ devinfo = bus->bus_priv.usb->devinfo;
++ devinfo->image = fw->data;
++ devinfo->image_len = fw->size;
++
++ ret = brcmf_usb_fw_download(devinfo);
++ release_firmware(fw);
++ if (ret)
++ goto error;
++
++ ret = brcmf_usb_bus_setup(devinfo);
++ if (ret)
++ goto error;
++
++ return;
++error:
++ brcmf_dbg(TRACE, "failed: dev=%s, err=%d\n", dev_name(dev), ret);
++ device_release_driver(dev);
++}
++
++static int brcmf_usb_probe_cb(struct brcmf_usbdev_info *devinfo)
++{
++ struct brcmf_bus *bus = NULL;
++ struct brcmf_usbdev *bus_pub = NULL;
++ struct device *dev = devinfo->dev;
++ int ret;
++
++ brcmf_dbg(USB, "Enter\n");
++ bus_pub = brcmf_usb_attach(devinfo, BRCMF_USB_NRXQ, BRCMF_USB_NTXQ);
++ if (!bus_pub)
++ return -ENODEV;
++
++ bus = kzalloc(sizeof(struct brcmf_bus), GFP_ATOMIC);
++ if (!bus) {
++ ret = -ENOMEM;
++ goto fail;
++ }
++
++ bus->dev = dev;
++ bus_pub->bus = bus;
++ bus->bus_priv.usb = bus_pub;
++ dev_set_drvdata(dev, bus);
++ bus->ops = &brcmf_usb_bus_ops;
++ bus->chip = bus_pub->devid;
++ bus->chiprev = bus_pub->chiprev;
++ bus->proto_type = BRCMF_PROTO_BCDC;
++ bus->always_use_fws_queue = true;
++
++ if (!brcmf_usb_dlneeded(devinfo)) {
++ ret = brcmf_usb_bus_setup(devinfo);
++ if (ret)
++ goto fail;
++ }
++ /* request firmware here */
++ brcmf_fw_get_firmwares(dev, 0, brcmf_usb_get_fwname(devinfo), NULL,
++ brcmf_usb_probe_phase2);
++ return 0;
++
++fail:
++ /* Release resources in reverse order */
++ kfree(bus);
++ brcmf_usb_detach(devinfo);
++ return ret;
++}
++
++static void
++brcmf_usb_disconnect_cb(struct brcmf_usbdev_info *devinfo)
++{
++ if (!devinfo)
++ return;
++ brcmf_dbg(USB, "Enter, bus_pub %p\n", devinfo);
++
++ brcmf_detach(devinfo->dev);
++ kfree(devinfo->bus_pub.bus);
++ brcmf_usb_detach(devinfo);
++}
++
++static int
++brcmf_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
++{
++ int ep;
++ struct usb_endpoint_descriptor *endpoint;
++ int ret = 0;
++ struct usb_device *usb = interface_to_usbdev(intf);
++ int num_of_eps;
++ u8 endpoint_num;
++ struct brcmf_usbdev_info *devinfo;
++
++ brcmf_dbg(USB, "Enter\n");
++
++ devinfo = kzalloc(sizeof(*devinfo), GFP_ATOMIC);
++ if (devinfo == NULL)
++ return -ENOMEM;
++
++ devinfo->usbdev = usb;
++ devinfo->dev = &usb->dev;
++
++ usb_set_intfdata(intf, devinfo);
++
++ /* Check that the device supports only one configuration */
++ if (usb->descriptor.bNumConfigurations != 1) {
++ ret = -1;
++ goto fail;
++ }
++
++ if (usb->descriptor.bDeviceClass != USB_CLASS_VENDOR_SPEC) {
++ ret = -1;
++ goto fail;
++ }
++
++ /*
++ * Only the BDC interface configuration is supported:
++ * Device class: USB_CLASS_VENDOR_SPEC
++ * if0 class: USB_CLASS_VENDOR_SPEC
++ * if0/ep0: control
++ * if0/ep1: bulk in
++ * if0/ep2: bulk out (ok if swapped with bulk in)
++ */
++ if (CONFIGDESC(usb)->bNumInterfaces != 1) {
++ ret = -1;
++ goto fail;
++ }
++
++ /* Check interface */
++ if (IFDESC(usb, CONTROL_IF).bInterfaceClass != USB_CLASS_VENDOR_SPEC ||
++ IFDESC(usb, CONTROL_IF).bInterfaceSubClass != 2 ||
++ IFDESC(usb, CONTROL_IF).bInterfaceProtocol != 0xff) {
++ brcmf_err("invalid control interface: class %d, subclass %d, proto %d\n",
++ IFDESC(usb, CONTROL_IF).bInterfaceClass,
++ IFDESC(usb, CONTROL_IF).bInterfaceSubClass,
++ IFDESC(usb, CONTROL_IF).bInterfaceProtocol);
++ ret = -1;
++ goto fail;
++ }
++
++ /* Check control endpoint */
++ endpoint = &IFEPDESC(usb, CONTROL_IF, 0);
++ if ((endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
++ != USB_ENDPOINT_XFER_INT) {
++ brcmf_err("invalid control endpoint %d\n",
++ endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK);
++ ret = -1;
++ goto fail;
++ }
++
++ devinfo->rx_pipe = 0;
++ devinfo->rx_pipe2 = 0;
++ devinfo->tx_pipe = 0;
++ num_of_eps = IFDESC(usb, BULK_IF).bNumEndpoints - 1;
++
++ /* Check data endpoints and get pipes */
++ for (ep = 1; ep <= num_of_eps; ep++) {
++ endpoint = &IFEPDESC(usb, BULK_IF, ep);
++ if ((endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) !=
++ USB_ENDPOINT_XFER_BULK) {
++ brcmf_err("invalid data endpoint %d\n", ep);
++ ret = -1;
++ goto fail;
++ }
++
++ endpoint_num = endpoint->bEndpointAddress &
++ USB_ENDPOINT_NUMBER_MASK;
++ if ((endpoint->bEndpointAddress & USB_ENDPOINT_DIR_MASK)
++ == USB_DIR_IN) {
++ if (!devinfo->rx_pipe) {
++ devinfo->rx_pipe =
++ usb_rcvbulkpipe(usb, endpoint_num);
++ } else {
++ devinfo->rx_pipe2 =
++ usb_rcvbulkpipe(usb, endpoint_num);
++ }
++ } else {
++ devinfo->tx_pipe = usb_sndbulkpipe(usb, endpoint_num);
++ }
++ }
++
++ if (usb->speed == USB_SPEED_SUPER)
++ brcmf_dbg(USB, "Broadcom super speed USB wireless device detected\n");
++ else if (usb->speed == USB_SPEED_HIGH)
++ brcmf_dbg(USB, "Broadcom high speed USB wireless device detected\n");
++ else
++ brcmf_dbg(USB, "Broadcom full speed USB wireless device detected\n");
++
++ ret = brcmf_usb_probe_cb(devinfo);
++ if (ret)
++ goto fail;
++
++ /* Success */
++ return 0;
++
++fail:
++ brcmf_err("failed with errno %d\n", ret);
++ kfree(devinfo);
++ usb_set_intfdata(intf, NULL);
++ return ret;
++
++}
++
++static void
++brcmf_usb_disconnect(struct usb_interface *intf)
++{
++ struct brcmf_usbdev_info *devinfo;
++
++ brcmf_dbg(USB, "Enter\n");
++ devinfo = (struct brcmf_usbdev_info *)usb_get_intfdata(intf);
++ brcmf_usb_disconnect_cb(devinfo);
++ kfree(devinfo);
++ brcmf_dbg(USB, "Exit\n");
++}
++
++/*
++ * only need to signal the bus being down and update the state.
++ */
++static int brcmf_usb_suspend(struct usb_interface *intf, pm_message_t state)
++{
++ struct usb_device *usb = interface_to_usbdev(intf);
++ struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(&usb->dev);
++
++ brcmf_dbg(USB, "Enter\n");
++ devinfo->bus_pub.state = BRCMFMAC_USB_STATE_SLEEP;
++ brcmf_detach(&usb->dev);
++ return 0;
++}
++
++/*
++ * (re-) start the bus.
++ */
++static int brcmf_usb_resume(struct usb_interface *intf)
++{
++ struct usb_device *usb = interface_to_usbdev(intf);
++ struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(&usb->dev);
++
++ brcmf_dbg(USB, "Enter\n");
++ return brcmf_usb_bus_setup(devinfo);
++}
++
++static int brcmf_usb_reset_resume(struct usb_interface *intf)
++{
++ struct usb_device *usb = interface_to_usbdev(intf);
++ struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(&usb->dev);
++ brcmf_dbg(USB, "Enter\n");
++
++ return brcmf_fw_get_firmwares(&usb->dev, 0,
++ brcmf_usb_get_fwname(devinfo), NULL,
++ brcmf_usb_probe_phase2);
++}
++
++#define BRCMF_USB_VENDOR_ID_BROADCOM 0x0a5c
++#define BRCMF_USB_DEVICE_ID_43143 0xbd1e
++#define BRCMF_USB_DEVICE_ID_43236 0xbd17
++#define BRCMF_USB_DEVICE_ID_43242 0xbd1f
++#define BRCMF_USB_DEVICE_ID_BCMFW 0x0bdc
++
++static struct usb_device_id brcmf_usb_devid_table[] = {
++ { USB_DEVICE(BRCMF_USB_VENDOR_ID_BROADCOM, BRCMF_USB_DEVICE_ID_43143) },
++ { USB_DEVICE(BRCMF_USB_VENDOR_ID_BROADCOM, BRCMF_USB_DEVICE_ID_43236) },
++ { USB_DEVICE(BRCMF_USB_VENDOR_ID_BROADCOM, BRCMF_USB_DEVICE_ID_43242) },
++ /* special entry for device with firmware loaded and running */
++ { USB_DEVICE(BRCMF_USB_VENDOR_ID_BROADCOM, BRCMF_USB_DEVICE_ID_BCMFW) },
++ { }
++};
++
++MODULE_DEVICE_TABLE(usb, brcmf_usb_devid_table);
++MODULE_FIRMWARE(BRCMF_USB_43143_FW_NAME);
++MODULE_FIRMWARE(BRCMF_USB_43236_FW_NAME);
++MODULE_FIRMWARE(BRCMF_USB_43242_FW_NAME);
++
++static struct usb_driver brcmf_usbdrvr = {
++ .name = KBUILD_MODNAME,
++ .probe = brcmf_usb_probe,
++ .disconnect = brcmf_usb_disconnect,
++ .id_table = brcmf_usb_devid_table,
++ .suspend = brcmf_usb_suspend,
++ .resume = brcmf_usb_resume,
++ .reset_resume = brcmf_usb_reset_resume,
++ .supports_autosuspend = 1,
++ .disable_hub_initiated_lpm = 1,
++};
++
++static int brcmf_usb_reset_device(struct device *dev, void *notused)
++{
++ /* device past is the usb interface so we
++ * need to use parent here.
++ */
++ brcmf_dev_reset(dev->parent);
++ return 0;
++}
++
++void brcmf_usb_exit(void)
++{
++ struct device_driver *drv = &brcmf_usbdrvr.drvwrap.driver;
++ int ret;
++
++ brcmf_dbg(USB, "Enter\n");
++ ret = driver_for_each_device(drv, NULL, NULL,
++ brcmf_usb_reset_device);
++ usb_deregister(&brcmf_usbdrvr);
++}
++
++void brcmf_usb_register(void)
++{
++ brcmf_dbg(USB, "Enter\n");
++ usb_register(&brcmf_usbdrvr);
++}
+diff -Nur linux-3.14.36/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c linux-openelec/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
+--- linux-3.14.36/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c 2015-05-06 12:05:42.000000000 -0500
+@@ -18,6 +18,7 @@
+
+ #include <linux/kernel.h>
+ #include <linux/etherdevice.h>
++#include <linux/module.h>
+ #include <net/cfg80211.h>
+ #include <net/netlink.h>
+
+@@ -190,6 +191,7 @@
+ .n_channels = ARRAY_SIZE(__wl_2ghz_channels),
+ .bitrates = wl_g_rates,
+ .n_bitrates = wl_g_rates_size,
++ .ht_cap = {IEEE80211_HT_CAP_SUP_WIDTH_20_40, true},
+ };
+
+ static struct ieee80211_supported_band __wl_band_5ghz_a = {
+@@ -219,9 +221,9 @@
+ */
+ REG_RULE(2484-10, 2484+10, 20, 6, 20, 0),
+ /* IEEE 802.11a, channel 36..64 */
+- REG_RULE(5150-10, 5350+10, 40, 6, 20, 0),
++ REG_RULE(5150-10, 5350+10, 80, 6, 20, 0),
+ /* IEEE 802.11a, channel 100..165 */
+- REG_RULE(5470-10, 5850+10, 40, 6, 20, 0), }
++ REG_RULE(5470-10, 5850+10, 80, 6, 20, 0), }
+ };
+
+ static const u32 __wl_cipher_suites[] = {
+@@ -251,6 +253,10 @@
+ struct parsed_vndr_ie_info ie_info[VNDR_IE_PARSE_LIMIT];
+ };
+
++static int brcmf_roamoff;
++module_param_named(roamoff, brcmf_roamoff, int, S_IRUSR);
++MODULE_PARM_DESC(roamoff, "do not use internal roaming engine");
++
+ /* Quarter dBm units to mW
+ * Table starts at QDBM_OFFSET, so the first entry is mW for qdBm=153
+ * Table is offset so the last entry is largest mW value that fits in
+@@ -335,6 +341,61 @@
+ return qdbm;
+ }
+
++static u16 chandef_to_chanspec(struct brcmu_d11inf *d11inf,
++ struct cfg80211_chan_def *ch)
++{
++ struct brcmu_chan ch_inf;
++ s32 primary_offset;
++
++ brcmf_dbg(TRACE, "chandef: control %d center %d width %d\n",
++ ch->chan->center_freq, ch->center_freq1, ch->width);
++ ch_inf.chnum = ieee80211_frequency_to_channel(ch->center_freq1);
++ primary_offset = ch->center_freq1 - ch->chan->center_freq;
++ switch (ch->width) {
++ case NL80211_CHAN_WIDTH_20_NOHT:
++ case NL80211_CHAN_WIDTH_20:
++ ch_inf.bw = BRCMU_CHAN_BW_20;
++ WARN_ON(primary_offset != 0);
++ break;
++ case NL80211_CHAN_WIDTH_40:
++ ch_inf.bw = BRCMU_CHAN_BW_40;
++ if (primary_offset < 0)
++ ch_inf.sb = BRCMU_CHAN_SB_U;
++ else
++ ch_inf.sb = BRCMU_CHAN_SB_L;
++ break;
++ case NL80211_CHAN_WIDTH_80:
++ ch_inf.bw = BRCMU_CHAN_BW_80;
++ if (primary_offset < 0) {
++ if (primary_offset < -CH_10MHZ_APART)
++ ch_inf.sb = BRCMU_CHAN_SB_UU;
++ else
++ ch_inf.sb = BRCMU_CHAN_SB_UL;
++ } else {
++ if (primary_offset > CH_10MHZ_APART)
++ ch_inf.sb = BRCMU_CHAN_SB_LL;
++ else
++ ch_inf.sb = BRCMU_CHAN_SB_LU;
++ }
++ break;
++ default:
++ WARN_ON_ONCE(1);
++ }
++ switch (ch->chan->band) {
++ case IEEE80211_BAND_2GHZ:
++ ch_inf.band = BRCMU_CHAN_BAND_2G;
++ break;
++ case IEEE80211_BAND_5GHZ:
++ ch_inf.band = BRCMU_CHAN_BAND_5G;
++ break;
++ default:
++ WARN_ON_ONCE(1);
++ }
++ d11inf->encchspec(&ch_inf);
++
++ return ch_inf.chspec;
++}
++
+ u16 channel_to_chanspec(struct brcmu_d11inf *d11inf,
+ struct ieee80211_channel *ch)
+ {
+@@ -351,13 +412,11 @@
+ * triples, returning a pointer to the substring whose first element
+ * matches tag
+ */
+-struct brcmf_tlv *brcmf_parse_tlvs(void *buf, int buflen, uint key)
++const struct brcmf_tlv *
++brcmf_parse_tlvs(const void *buf, int buflen, uint key)
+ {
+- struct brcmf_tlv *elt;
+- int totlen;
+-
+- elt = (struct brcmf_tlv *)buf;
+- totlen = buflen;
++ const struct brcmf_tlv *elt = buf;
++ int totlen = buflen;
+
+ /* find tagged parameter */
+ while (totlen >= TLV_HDR_LEN) {
+@@ -378,8 +437,8 @@
+ * not update the tlvs buffer pointer/length.
+ */
+ static bool
+-brcmf_tlv_has_ie(u8 *ie, u8 **tlvs, u32 *tlvs_len,
+- u8 *oui, u32 oui_len, u8 type)
++brcmf_tlv_has_ie(const u8 *ie, const u8 **tlvs, u32 *tlvs_len,
++ const u8 *oui, u32 oui_len, u8 type)
+ {
+ /* If the contents match the OUI and the type */
+ if (ie[TLV_LEN_OFF] >= oui_len + 1 &&
+@@ -401,12 +460,12 @@
+ }
+
+ static struct brcmf_vs_tlv *
+-brcmf_find_wpaie(u8 *parse, u32 len)
++brcmf_find_wpaie(const u8 *parse, u32 len)
+ {
+- struct brcmf_tlv *ie;
++ const struct brcmf_tlv *ie;
+
+ while ((ie = brcmf_parse_tlvs(parse, len, WLAN_EID_VENDOR_SPECIFIC))) {
+- if (brcmf_tlv_has_ie((u8 *)ie, &parse, &len,
++ if (brcmf_tlv_has_ie((const u8 *)ie, &parse, &len,
+ WPA_OUI, TLV_OUI_LEN, WPA_OUI_TYPE))
+ return (struct brcmf_vs_tlv *)ie;
+ }
+@@ -414,9 +473,9 @@
+ }
+
+ static struct brcmf_vs_tlv *
+-brcmf_find_wpsie(u8 *parse, u32 len)
++brcmf_find_wpsie(const u8 *parse, u32 len)
+ {
+- struct brcmf_tlv *ie;
++ const struct brcmf_tlv *ie;
+
+ while ((ie = brcmf_parse_tlvs(parse, len, WLAN_EID_VENDOR_SPECIFIC))) {
+ if (brcmf_tlv_has_ie((u8 *)ie, &parse, &len,
+@@ -491,6 +550,19 @@
+ return err;
+ }
+
++static bool brcmf_is_apmode(struct brcmf_cfg80211_vif *vif)
++{
++ enum nl80211_iftype iftype;
++
++ iftype = vif->wdev.iftype;
++ return iftype == NL80211_IFTYPE_AP || iftype == NL80211_IFTYPE_P2P_GO;
++}
++
++static bool brcmf_is_ibssmode(struct brcmf_cfg80211_vif *vif)
++{
++ return vif->wdev.iftype == NL80211_IFTYPE_ADHOC;
++}
++
+ static struct wireless_dev *brcmf_cfg80211_add_iface(struct wiphy *wiphy,
+ const char *name,
+ enum nl80211_iftype type,
+@@ -569,6 +641,9 @@
+ if (err)
+ brcmf_err("Scan abort failed\n");
+ }
++
++ brcmf_set_mpc(ifp, 1);
++
+ /*
+ * e-scan can be initiated by scheduled scan
+ * which takes precedence.
+@@ -578,12 +653,10 @@
+ cfg->sched_escan = false;
+ if (!aborted)
+ cfg80211_sched_scan_results(cfg_to_wiphy(cfg));
+- brcmf_set_mpc(ifp, 1);
+ } else if (scan_request) {
+ brcmf_dbg(SCAN, "ESCAN Completed scan: %s\n",
+ aborted ? "Aborted" : "Done");
+ cfg80211_scan_done(scan_request, aborted);
+- brcmf_set_mpc(ifp, 1);
+ }
+ if (!test_and_clear_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status))
+ brcmf_dbg(SCAN, "Scan complete, probably P2P scan\n");
+@@ -651,7 +724,6 @@
+ type);
+ return -EOPNOTSUPP;
+ case NL80211_IFTYPE_ADHOC:
+- vif->mode = WL_MODE_IBSS;
+ infra = 0;
+ break;
+ case NL80211_IFTYPE_STATION:
+@@ -667,12 +739,10 @@
+ */
+ return 0;
+ }
+- vif->mode = WL_MODE_BSS;
+ infra = 1;
+ break;
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_P2P_GO:
+- vif->mode = WL_MODE_AP;
+ ap = 1;
+ break;
+ default:
+@@ -696,7 +766,7 @@
+ err = -EAGAIN;
+ goto done;
+ }
+- brcmf_dbg(INFO, "IF Type = %s\n", (vif->mode == WL_MODE_IBSS) ?
++ brcmf_dbg(INFO, "IF Type = %s\n", brcmf_is_ibssmode(vif) ?
+ "Adhoc" : "Infra");
+ }
+ ndev->ieee80211_ptr->iftype = type;
+@@ -1222,8 +1292,8 @@
+ params->chandef.chan->center_freq);
+ if (params->channel_fixed) {
+ /* adding chanspec */
+- chanspec = channel_to_chanspec(&cfg->d11inf,
+- params->chandef.chan);
++ chanspec = chandef_to_chanspec(&cfg->d11inf,
++ &params->chandef);
+ join_params.params_le.chanspec_list[0] =
+ cpu_to_le16(chanspec);
+ join_params.params_le.chanspec_num = cpu_to_le32(1);
+@@ -1340,13 +1410,14 @@
+ }
+
+ static s32
+-brcmf_set_set_cipher(struct net_device *ndev,
+- struct cfg80211_connect_params *sme)
++brcmf_set_wsec_mode(struct net_device *ndev,
++ struct cfg80211_connect_params *sme, bool mfp)
+ {
+ struct brcmf_cfg80211_profile *profile = ndev_to_prof(ndev);
+ struct brcmf_cfg80211_security *sec;
+ s32 pval = 0;
+ s32 gval = 0;
++ s32 wsec;
+ s32 err = 0;
+
+ if (sme->crypto.n_ciphers_pairwise) {
+@@ -1398,7 +1469,12 @@
+ if (brcmf_find_wpsie(sme->ie, sme->ie_len) && !pval && !gval &&
+ sme->privacy)
+ pval = AES_ENABLED;
+- err = brcmf_fil_bsscfg_int_set(netdev_priv(ndev), "wsec", pval | gval);
++
++ if (mfp)
++ wsec = pval | gval | MFP_CAPABLE;
++ else
++ wsec = pval | gval;
++ err = brcmf_fil_bsscfg_int_set(netdev_priv(ndev), "wsec", wsec);
+ if (err) {
+ brcmf_err("error (%d)\n", err);
+ return err;
+@@ -1562,13 +1638,12 @@
+ struct ieee80211_channel *chan = sme->channel;
+ struct brcmf_join_params join_params;
+ size_t join_params_size;
+- struct brcmf_tlv *rsn_ie;
+- struct brcmf_vs_tlv *wpa_ie;
+- void *ie;
++ const struct brcmf_tlv *rsn_ie;
++ const struct brcmf_vs_tlv *wpa_ie;
++ const void *ie;
+ u32 ie_len;
+ struct brcmf_ext_join_params_le *ext_join_params;
+ u16 chanspec;
+-
+ s32 err = 0;
+
+ brcmf_dbg(TRACE, "Enter\n");
+@@ -1591,7 +1666,8 @@
+ ie_len = wpa_ie->len + TLV_HDR_LEN;
+ } else {
+ /* find the RSN_IE */
+- rsn_ie = brcmf_parse_tlvs((u8 *)sme->ie, sme->ie_len,
++ rsn_ie = brcmf_parse_tlvs((const u8 *)sme->ie,
++ sme->ie_len,
+ WLAN_EID_RSN);
+ if (rsn_ie) {
+ ie = rsn_ie;
+@@ -1636,7 +1712,7 @@
+ goto done;
+ }
+
+- err = brcmf_set_set_cipher(ndev, sme);
++ err = brcmf_set_wsec_mode(ndev, sme, sme->mfp == NL80211_MFP_REQUIRED);
+ if (err) {
+ brcmf_err("wl_set_set_cipher failed (%d)\n", err);
+ goto done;
+@@ -1678,22 +1754,9 @@
+ ext_join_params->ssid_le.SSID_len = cpu_to_le32(profile->ssid.SSID_len);
+ memcpy(&ext_join_params->ssid_le.SSID, sme->ssid,
+ profile->ssid.SSID_len);
+- /*increase dwell time to receive probe response or detect Beacon
+- * from target AP at a noisy air only during connect command
+- */
+- ext_join_params->scan_le.active_time =
+- cpu_to_le32(BRCMF_SCAN_JOIN_ACTIVE_DWELL_TIME_MS);
+- ext_join_params->scan_le.passive_time =
+- cpu_to_le32(BRCMF_SCAN_JOIN_PASSIVE_DWELL_TIME_MS);
++
+ /* Set up join scan parameters */
+ ext_join_params->scan_le.scan_type = -1;
+- /* to sync with presence period of VSDB GO.
+- * Send probe request more frequently. Probe request will be stopped
+- * when it gets probe response from target AP/GO.
+- */
+- ext_join_params->scan_le.nprobes =
+- cpu_to_le32(BRCMF_SCAN_JOIN_ACTIVE_DWELL_TIME_MS /
+- BRCMF_SCAN_JOIN_PROBE_INTERVAL_MS);
+ ext_join_params->scan_le.home_time = cpu_to_le32(-1);
+
+ if (sme->bssid)
+@@ -1706,6 +1769,25 @@
+
+ ext_join_params->assoc_le.chanspec_list[0] =
+ cpu_to_le16(chanspec);
++ /* Increase dwell time to receive probe response or detect
++ * beacon from target AP at a noisy air only during connect
++ * command.
++ */
++ ext_join_params->scan_le.active_time =
++ cpu_to_le32(BRCMF_SCAN_JOIN_ACTIVE_DWELL_TIME_MS);
++ ext_join_params->scan_le.passive_time =
++ cpu_to_le32(BRCMF_SCAN_JOIN_PASSIVE_DWELL_TIME_MS);
++ /* To sync with presence period of VSDB GO send probe request
++ * more frequently. Probe request will be stopped when it gets
++ * probe response from target AP/GO.
++ */
++ ext_join_params->scan_le.nprobes =
++ cpu_to_le32(BRCMF_SCAN_JOIN_ACTIVE_DWELL_TIME_MS /
++ BRCMF_SCAN_JOIN_PROBE_INTERVAL_MS);
++ } else {
++ ext_join_params->scan_le.active_time = cpu_to_le32(-1);
++ ext_join_params->scan_le.passive_time = cpu_to_le32(-1);
++ ext_join_params->scan_le.nprobes = cpu_to_le32(-1);
+ }
+
+ err = brcmf_fil_bsscfg_data_set(ifp, "join", ext_join_params,
+@@ -1913,7 +1995,7 @@
+ brcmf_dbg(CONN, "Setting the key index %d\n", key.index);
+ memcpy(key.data, params->key, key.len);
+
+- if ((ifp->vif->mode != WL_MODE_AP) &&
++ if (!brcmf_is_apmode(ifp->vif) &&
+ (params->cipher == WLAN_CIPHER_SUITE_TKIP)) {
+ brcmf_dbg(CONN, "Swapping RX/TX MIC key\n");
+ memcpy(keybuf, &key.data[24], sizeof(keybuf));
+@@ -1981,7 +2063,9 @@
+ if (!check_vif_up(ifp->vif))
+ return -EIO;
+
+- if (mac_addr) {
++ if (mac_addr &&
++ (params->cipher != WLAN_CIPHER_SUITE_WEP40) &&
++ (params->cipher != WLAN_CIPHER_SUITE_WEP104)) {
+ brcmf_dbg(TRACE, "Exit");
+ return brcmf_add_keyext(wiphy, ndev, key_idx, mac_addr, params);
+ }
+@@ -2010,7 +2094,7 @@
+ brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_WEP104\n");
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+- if (ifp->vif->mode != WL_MODE_AP) {
++ if (!brcmf_is_apmode(ifp->vif)) {
+ brcmf_dbg(CONN, "Swapping RX/TX MIC key\n");
+ memcpy(keybuf, &key.data[24], sizeof(keybuf));
+ memcpy(&key.data[24], &key.data[16], sizeof(keybuf));
+@@ -2164,12 +2248,14 @@
+ s32 err = 0;
+ u8 *bssid = profile->bssid;
+ struct brcmf_sta_info_le sta_info_le;
++ u32 beacon_period;
++ u32 dtim_period;
+
+ brcmf_dbg(TRACE, "Enter, MAC %pM\n", mac);
+ if (!check_vif_up(ifp->vif))
+ return -EIO;
+
+- if (ifp->vif->mode == WL_MODE_AP) {
++ if (brcmf_is_apmode(ifp->vif)) {
+ memcpy(&sta_info_le, mac, ETH_ALEN);
+ err = brcmf_fil_iovar_data_get(ifp, "sta_info",
+ &sta_info_le,
+@@ -2186,7 +2272,7 @@
+ }
+ brcmf_dbg(TRACE, "STA idle time : %d ms, connected time :%d sec\n",
+ sinfo->inactive_time, sinfo->connected_time);
+- } else if (ifp->vif->mode == WL_MODE_BSS) {
++ } else if (ifp->vif->wdev.iftype == NL80211_IFTYPE_STATION) {
+ if (memcmp(mac, bssid, ETH_ALEN)) {
+ brcmf_err("Wrong Mac address cfg_mac-%pM wl_bssid-%pM\n",
+ mac, bssid);
+@@ -2218,6 +2304,30 @@
+ sinfo->signal = rssi;
+ brcmf_dbg(CONN, "RSSI %d dBm\n", rssi);
+ }
++ err = brcmf_fil_cmd_int_get(ifp, BRCMF_C_GET_BCNPRD,
++ &beacon_period);
++ if (err) {
++ brcmf_err("Could not get beacon period (%d)\n",
++ err);
++ goto done;
++ } else {
++ sinfo->bss_param.beacon_interval =
++ beacon_period;
++ brcmf_dbg(CONN, "Beacon peroid %d\n",
++ beacon_period);
++ }
++ err = brcmf_fil_cmd_int_get(ifp, BRCMF_C_GET_DTIMPRD,
++ &dtim_period);
++ if (err) {
++ brcmf_err("Could not get DTIM period (%d)\n",
++ err);
++ goto done;
++ } else {
++ sinfo->bss_param.dtim_period = dtim_period;
++ brcmf_dbg(CONN, "DTIM peroid %d\n",
++ dtim_period);
++ }
++ sinfo->filled |= STATION_INFO_BSS_PARAM;
+ }
+ } else
+ err = -EPERM;
+@@ -2444,18 +2554,13 @@
+ return err;
+ }
+
+-static bool brcmf_is_ibssmode(struct brcmf_cfg80211_vif *vif)
+-{
+- return vif->mode == WL_MODE_IBSS;
+-}
+-
+ static s32 brcmf_update_bss_info(struct brcmf_cfg80211_info *cfg,
+ struct brcmf_if *ifp)
+ {
+ struct brcmf_cfg80211_profile *profile = ndev_to_prof(ifp->ndev);
+ struct brcmf_bss_info_le *bi;
+ struct brcmf_ssid *ssid;
+- struct brcmf_tlv *tim;
++ const struct brcmf_tlv *tim;
+ u16 beacon_interval;
+ u8 dtim_period;
+ size_t ie_len;
+@@ -3075,7 +3180,7 @@
+ }
+
+ if (!request->n_ssids || !request->n_match_sets) {
+- brcmf_err("Invalid sched scan req!! n_ssids:%d\n",
++ brcmf_dbg(SCAN, "Invalid sched scan req!! n_ssids:%d\n",
+ request->n_ssids);
+ return -EINVAL;
+ }
+@@ -3220,8 +3325,9 @@
+ }
+
+ static s32
+-brcmf_configure_wpaie(struct net_device *ndev, struct brcmf_vs_tlv *wpa_ie,
+- bool is_rsn_ie)
++brcmf_configure_wpaie(struct net_device *ndev,
++ const struct brcmf_vs_tlv *wpa_ie,
++ bool is_rsn_ie)
+ {
+ struct brcmf_if *ifp = netdev_priv(ndev);
+ u32 auth = 0; /* d11 open authentication */
+@@ -3684,42 +3790,26 @@
+ }
+
+ static s32
+-brcmf_cfg80211_set_channel(struct brcmf_cfg80211_info *cfg,
+- struct brcmf_if *ifp,
+- struct ieee80211_channel *channel)
+-{
+- u16 chanspec;
+- s32 err;
+-
+- brcmf_dbg(TRACE, "band=%d, center_freq=%d\n", channel->band,
+- channel->center_freq);
+-
+- chanspec = channel_to_chanspec(&cfg->d11inf, channel);
+- err = brcmf_fil_iovar_int_set(ifp, "chanspec", chanspec);
+-
+- return err;
+-}
+-
+-static s32
+ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
+ struct cfg80211_ap_settings *settings)
+ {
+ s32 ie_offset;
+ struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+ struct brcmf_if *ifp = netdev_priv(ndev);
+- struct brcmf_tlv *ssid_ie;
++ const struct brcmf_tlv *ssid_ie;
+ struct brcmf_ssid_le ssid_le;
+ s32 err = -EPERM;
+- struct brcmf_tlv *rsn_ie;
+- struct brcmf_vs_tlv *wpa_ie;
++ const struct brcmf_tlv *rsn_ie;
++ const struct brcmf_vs_tlv *wpa_ie;
+ struct brcmf_join_params join_params;
+ enum nl80211_iftype dev_role;
+ struct brcmf_fil_bss_enable_le bss_enable;
++ u16 chanspec;
+
+- brcmf_dbg(TRACE, "channel_type=%d, beacon_interval=%d, dtim_period=%d,\n",
+- cfg80211_get_chandef_type(&settings->chandef),
+- settings->beacon_interval,
+- settings->dtim_period);
++ brcmf_dbg(TRACE, "ctrlchn=%d, center=%d, bw=%d, beacon_interval=%d, dtim_period=%d,\n",
++ settings->chandef.chan->hw_value,
++ settings->chandef.center_freq1, settings->chandef.width,
++ settings->beacon_interval, settings->dtim_period);
+ brcmf_dbg(TRACE, "ssid=%s(%zu), auth_type=%d, inactivity_timeout=%d\n",
+ settings->ssid, settings->ssid_len, settings->auth_type,
+ settings->inactivity_timeout);
+@@ -3776,9 +3866,10 @@
+
+ brcmf_config_ap_mgmt_ie(ifp->vif, &settings->beacon);
+
+- err = brcmf_cfg80211_set_channel(cfg, ifp, settings->chandef.chan);
++ chanspec = chandef_to_chanspec(&cfg->d11inf, &settings->chandef);
++ err = brcmf_fil_iovar_int_set(ifp, "chanspec", chanspec);
+ if (err < 0) {
+- brcmf_err("Set Channel failed, %d\n", err);
++ brcmf_err("Set Channel failed: chspec=%d, %d\n", chanspec, err);
+ goto exit;
+ }
+
+@@ -4220,32 +4311,6 @@
+ CFG80211_TESTMODE_CMD(brcmf_cfg80211_testmode)
+ };
+
+-static s32 brcmf_nl80211_iftype_to_mode(enum nl80211_iftype type)
+-{
+- switch (type) {
+- case NL80211_IFTYPE_AP_VLAN:
+- case NL80211_IFTYPE_WDS:
+- case NL80211_IFTYPE_MONITOR:
+- case NL80211_IFTYPE_MESH_POINT:
+- return -ENOTSUPP;
+- case NL80211_IFTYPE_ADHOC:
+- return WL_MODE_IBSS;
+- case NL80211_IFTYPE_STATION:
+- case NL80211_IFTYPE_P2P_CLIENT:
+- return WL_MODE_BSS;
+- case NL80211_IFTYPE_AP:
+- case NL80211_IFTYPE_P2P_GO:
+- return WL_MODE_AP;
+- case NL80211_IFTYPE_P2P_DEVICE:
+- return WL_MODE_P2P;
+- case NL80211_IFTYPE_UNSPECIFIED:
+- default:
+- break;
+- }
+-
+- return -EINVAL;
+-}
+-
+ static void brcmf_wiphy_pno_params(struct wiphy *wiphy)
+ {
+ /* scheduled scan settings */
+@@ -4340,6 +4405,8 @@
+ WIPHY_FLAG_OFFCHAN_TX |
+ WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
+ WIPHY_FLAG_SUPPORTS_TDLS;
++ if (!brcmf_roamoff)
++ wiphy->flags |= WIPHY_FLAG_SUPPORTS_FW_ROAM;
+ wiphy->mgmt_stypes = brcmf_txrx_stypes;
+ wiphy->max_remain_on_channel_duration = 5000;
+ brcmf_wiphy_pno_params(wiphy);
+@@ -4370,7 +4437,6 @@
+ vif->wdev.wiphy = cfg->wiphy;
+ vif->wdev.iftype = type;
+
+- vif->mode = brcmf_nl80211_iftype_to_mode(type);
+ vif->pm_block = pm_block;
+ vif->roam_off = -1;
+
+@@ -4416,7 +4482,9 @@
+ u32 event = e->event_code;
+ u16 flags = e->flags;
+
+- if (event == BRCMF_E_LINK && (!(flags & BRCMF_EVENT_MSG_LINK))) {
++ if ((event == BRCMF_E_DEAUTH) || (event == BRCMF_E_DEAUTH_IND) ||
++ (event == BRCMF_E_DISASSOC_IND) ||
++ ((event == BRCMF_E_LINK) && (!(flags & BRCMF_EVENT_MSG_LINK)))) {
+ brcmf_dbg(CONN, "Processing link down\n");
+ return true;
+ }
+@@ -4658,16 +4726,18 @@
+ struct brcmf_cfg80211_info *cfg = ifp->drvr->config;
+ struct net_device *ndev = ifp->ndev;
+ struct brcmf_cfg80211_profile *profile = &ifp->vif->profile;
++ struct ieee80211_channel *chan;
+ s32 err = 0;
+
+- if (ifp->vif->mode == WL_MODE_AP) {
++ if (brcmf_is_apmode(ifp->vif)) {
+ err = brcmf_notify_connect_status_ap(cfg, ndev, e, data);
+ } else if (brcmf_is_linkup(e)) {
+ brcmf_dbg(CONN, "Linkup\n");
+ if (brcmf_is_ibssmode(ifp->vif)) {
++ chan = ieee80211_get_channel(cfg->wiphy, cfg->channel);
+ memcpy(profile->bssid, e->addr, ETH_ALEN);
+ wl_inform_ibss(cfg, ndev, e->addr);
+- cfg80211_ibss_joined(ndev, e->addr, GFP_KERNEL);
++ cfg80211_ibss_joined(ndev, e->addr, chan, GFP_KERNEL);
+ clear_bit(BRCMF_VIF_STATUS_CONNECTING,
+ &ifp->vif->sme_state);
+ set_bit(BRCMF_VIF_STATUS_CONNECTED,
+@@ -4678,10 +4748,6 @@
+ brcmf_dbg(CONN, "Linkdown\n");
+ if (!brcmf_is_ibssmode(ifp->vif)) {
+ brcmf_bss_connect_done(cfg, ndev, e, false);
+- if (test_and_clear_bit(BRCMF_VIF_STATUS_CONNECTED,
+- &ifp->vif->sme_state))
+- cfg80211_disconnected(ndev, 0, NULL, 0,
+- GFP_KERNEL);
+ }
+ brcmf_link_down(ifp->vif);
+ brcmf_init_prof(ndev_to_prof(ndev));
+@@ -4875,11 +4941,8 @@
+
+ cfg->scan_request = NULL;
+ cfg->pwr_save = true;
+- cfg->roam_on = true; /* roam on & off switch.
+- we enable roam per default */
+- cfg->active_scan = true; /* we do active scan for
+- specific scan per default */
+- cfg->dongle_up = false; /* dongle is not up yet */
++ cfg->active_scan = true; /* we do active scan per default */
++ cfg->dongle_up = false; /* dongle is not up yet */
+ err = brcmf_init_priv_mem(cfg);
+ if (err)
+ return err;
+@@ -4904,6 +4967,30 @@
+ mutex_init(&event->vif_event_lock);
+ }
+
++static int brcmf_enable_bw40_2g(struct brcmf_if *ifp)
++{
++ struct brcmf_fil_bwcap_le band_bwcap;
++ u32 val;
++ int err;
++
++ /* verify support for bw_cap command */
++ val = WLC_BAND_5G;
++ err = brcmf_fil_iovar_int_get(ifp, "bw_cap", &val);
++
++ if (!err) {
++ /* only set 2G bandwidth using bw_cap command */
++ band_bwcap.band = cpu_to_le32(WLC_BAND_2G);
++ band_bwcap.bw_cap = cpu_to_le32(WLC_BW_CAP_40MHZ);
++ err = brcmf_fil_iovar_data_set(ifp, "bw_cap", &band_bwcap,
++ sizeof(band_bwcap));
++ } else {
++ brcmf_dbg(INFO, "fallback to mimo_bw_cap\n");
++ val = WLC_N_BW_40ALL;
++ err = brcmf_fil_iovar_int_set(ifp, "mimo_bw_cap", val);
++ }
++ return err;
++}
++
+ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr,
+ struct device *busdev)
+ {
+@@ -4961,6 +5048,17 @@
+ goto cfg80211_p2p_attach_out;
+ }
+
++ /* If cfg80211 didn't disable 40MHz HT CAP in wiphy_register(),
++ * setup 40MHz in 2GHz band and enable OBSS scanning.
++ */
++ if (wiphy->bands[IEEE80211_BAND_2GHZ]->ht_cap.cap &
++ IEEE80211_HT_CAP_SUP_WIDTH_20_40) {
++ err = brcmf_enable_bw40_2g(ifp);
++ if (!err)
++ err = brcmf_fil_iovar_int_set(ifp, "obss_coex",
++ BRCMF_OBSS_COEX_AUTO);
++ }
++
+ err = brcmf_fil_iovar_int_set(ifp, "tdls_enable", 1);
+ if (err) {
+ brcmf_dbg(INFO, "TDLS not enabled (%d)\n", err);
+@@ -4999,7 +5097,7 @@
+ }
+
+ static s32
+-brcmf_dongle_roam(struct brcmf_if *ifp, u32 roamvar, u32 bcn_timeout)
++brcmf_dongle_roam(struct brcmf_if *ifp, u32 bcn_timeout)
+ {
+ s32 err = 0;
+ __le32 roamtrigger[2];
+@@ -5009,7 +5107,7 @@
+ * Setup timeout if Beacons are lost and roam is
+ * off to report link down
+ */
+- if (roamvar) {
++ if (brcmf_roamoff) {
+ err = brcmf_fil_iovar_int_set(ifp, "bcn_timeout", bcn_timeout);
+ if (err) {
+ brcmf_err("bcn_timeout error (%d)\n", err);
+@@ -5021,8 +5119,9 @@
+ * Enable/Disable built-in roaming to allow supplicant
+ * to take care of roaming
+ */
+- brcmf_dbg(INFO, "Internal Roaming = %s\n", roamvar ? "Off" : "On");
+- err = brcmf_fil_iovar_int_set(ifp, "roam_off", roamvar);
++ brcmf_dbg(INFO, "Internal Roaming = %s\n",
++ brcmf_roamoff ? "Off" : "On");
++ err = brcmf_fil_iovar_int_set(ifp, "roam_off", !!(brcmf_roamoff));
+ if (err) {
+ brcmf_err("roam_off error (%d)\n", err);
+ goto dongle_rom_out;
+@@ -5148,6 +5247,9 @@
+ if (!(bw_cap[band] & WLC_BW_40MHZ_BIT) &&
+ ch.bw == BRCMU_CHAN_BW_40)
+ continue;
++ if (!(bw_cap[band] & WLC_BW_80MHZ_BIT) &&
++ ch.bw == BRCMU_CHAN_BW_80)
++ continue;
+ update = false;
+ for (j = 0; (j < *n_cnt && (*n_cnt < array_size)); j++) {
+ if (band_chan_arr[j].hw_value == ch.chnum) {
+@@ -5164,13 +5266,13 @@
+ ieee80211_channel_to_frequency(ch.chnum, band);
+ band_chan_arr[index].hw_value = ch.chnum;
+
+- brcmf_err("channel %d: f=%d bw=%d sb=%d\n",
+- ch.chnum, band_chan_arr[index].center_freq,
+- ch.bw, ch.sb);
+- if (ch.bw == BRCMU_CHAN_BW_40) {
+- /* assuming the order is HT20, HT40 Upper,
+- * HT40 lower from chanspecs
+- */
++ /* assuming the chanspecs order is HT20,
++ * HT40 upper, HT40 lower, and VHT80.
++ */
++ if (ch.bw == BRCMU_CHAN_BW_80) {
++ band_chan_arr[index].flags &=
++ ~IEEE80211_CHAN_NO_80MHZ;
++ } else if (ch.bw == BRCMU_CHAN_BW_40) {
+ ht40_flag = band_chan_arr[index].flags &
+ IEEE80211_CHAN_NO_HT40;
+ if (ch.sb == BRCMU_CHAN_SB_U) {
+@@ -5191,8 +5293,13 @@
+ IEEE80211_CHAN_NO_HT40MINUS;
+ }
+ } else {
++ /* disable other bandwidths for now as mentioned
++ * order assure they are enabled for subsequent
++ * chanspecs.
++ */
+ band_chan_arr[index].flags =
+- IEEE80211_CHAN_NO_HT40;
++ IEEE80211_CHAN_NO_HT40 |
++ IEEE80211_CHAN_NO_80MHZ;
+ ch.bw = BRCMU_CHAN_BW_20;
+ cfg->d11inf.encchspec(&ch);
+ channel = ch.chspec;
+@@ -5259,14 +5366,66 @@
+ }
+ }
+
++static void brcmf_update_ht_cap(struct ieee80211_supported_band *band,
++ u32 bw_cap[2], u32 nchain)
++{
++ band->ht_cap.ht_supported = true;
++ if (bw_cap[band->band] & WLC_BW_40MHZ_BIT) {
++ band->ht_cap.cap |= IEEE80211_HT_CAP_SGI_40;
++ band->ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
++ }
++ band->ht_cap.cap |= IEEE80211_HT_CAP_SGI_20;
++ band->ht_cap.cap |= IEEE80211_HT_CAP_DSSSCCK40;
++ band->ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
++ band->ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_16;
++ memset(band->ht_cap.mcs.rx_mask, 0xff, nchain);
++ band->ht_cap.mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
++}
++
++static __le16 brcmf_get_mcs_map(u32 nchain, enum ieee80211_vht_mcs_support supp)
++{
++ u16 mcs_map;
++ int i;
++
++ for (i = 0, mcs_map = 0xFFFF; i < nchain; i++)
++ mcs_map = (mcs_map << 2) | supp;
++
++ return cpu_to_le16(mcs_map);
++}
++
++static void brcmf_update_vht_cap(struct ieee80211_supported_band *band,
++ u32 bw_cap[2], u32 nchain)
++{
++ __le16 mcs_map;
++
++ /* not allowed in 2.4G band */
++ if (band->band == IEEE80211_BAND_2GHZ)
++ return;
++
++ band->vht_cap.vht_supported = true;
++ /* 80MHz is mandatory */
++ band->vht_cap.cap |= IEEE80211_VHT_CAP_SHORT_GI_80;
++ if (bw_cap[band->band] & WLC_BW_160MHZ_BIT) {
++ band->vht_cap.cap |= IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ;
++ band->vht_cap.cap |= IEEE80211_VHT_CAP_SHORT_GI_160;
++ }
++ /* all support 256-QAM */
++ mcs_map = brcmf_get_mcs_map(nchain, IEEE80211_VHT_MCS_SUPPORT_0_9);
++ band->vht_cap.vht_mcs.rx_mcs_map = mcs_map;
++ band->vht_cap.vht_mcs.tx_mcs_map = mcs_map;
++}
++
+ static s32 brcmf_update_wiphybands(struct brcmf_cfg80211_info *cfg)
+ {
+ struct brcmf_if *ifp = netdev_priv(cfg_to_ndev(cfg));
+ struct wiphy *wiphy;
+ s32 phy_list;
+ u32 band_list[3];
+- u32 nmode;
++ u32 nmode = 0;
++ u32 vhtmode = 0;
+ u32 bw_cap[2] = { 0, 0 };
++ u32 rxchain;
++ u32 nchain;
+ s8 phy;
+ s32 err;
+ u32 nband;
+@@ -5294,14 +5453,26 @@
+ brcmf_dbg(INFO, "BRCMF_C_GET_BANDLIST reported: 0x%08x 0x%08x 0x%08x phy\n",
+ band_list[0], band_list[1], band_list[2]);
+
++ (void)brcmf_fil_iovar_int_get(ifp, "vhtmode", &vhtmode);
+ err = brcmf_fil_iovar_int_get(ifp, "nmode", &nmode);
+ if (err) {
+ brcmf_err("nmode error (%d)\n", err);
+ } else {
+ brcmf_get_bwcap(ifp, bw_cap);
+ }
+- brcmf_dbg(INFO, "nmode=%d, bw_cap=(%d, %d)\n", nmode,
+- bw_cap[IEEE80211_BAND_2GHZ], bw_cap[IEEE80211_BAND_5GHZ]);
++ brcmf_dbg(INFO, "nmode=%d, vhtmode=%d, bw_cap=(%d, %d)\n",
++ nmode, vhtmode, bw_cap[IEEE80211_BAND_2GHZ],
++ bw_cap[IEEE80211_BAND_5GHZ]);
++
++ err = brcmf_fil_iovar_int_get(ifp, "rxchain", &rxchain);
++ if (err) {
++ brcmf_err("rxchain error (%d)\n", err);
++ nchain = 1;
++ } else {
++ for (nchain = 0; rxchain; nchain++)
++ rxchain = rxchain & (rxchain - 1);
++ }
++ brcmf_dbg(INFO, "nchain=%d\n", nchain);
+
+ err = brcmf_construct_reginfo(cfg, bw_cap);
+ if (err) {
+@@ -5322,20 +5493,10 @@
+ else
+ continue;
+
+- if (bw_cap[band->band] & WLC_BW_40MHZ_BIT) {
+- band->ht_cap.cap |= IEEE80211_HT_CAP_SGI_40;
+- band->ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+- }
+- band->ht_cap.cap |= IEEE80211_HT_CAP_SGI_20;
+- band->ht_cap.cap |= IEEE80211_HT_CAP_DSSSCCK40;
+- band->ht_cap.ht_supported = true;
+- band->ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
+- band->ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_16;
+- /* An HT shall support all EQM rates for one spatial
+- * stream
+- */
+- band->ht_cap.mcs.rx_mask[0] = 0xff;
+- band->ht_cap.mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
++ if (nmode)
++ brcmf_update_ht_cap(band, bw_cap, nchain);
++ if (vhtmode)
++ brcmf_update_vht_cap(band, bw_cap, nchain);
+ bands[band->band] = band;
+ }
+
+@@ -5381,7 +5542,7 @@
+ brcmf_dbg(INFO, "power save set to %s\n",
+ (power_mode ? "enabled" : "disabled"));
+
+- err = brcmf_dongle_roam(ifp, (cfg->roam_on ? 0 : 1), WL_BEACON_TIMEOUT);
++ err = brcmf_dongle_roam(ifp, WL_BEACON_TIMEOUT);
+ if (err)
+ goto default_conf_out;
+ err = brcmf_cfg80211_change_iface(wdev->wiphy, ndev, wdev->iftype,
+diff -Nur linux-3.14.36/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h linux-openelec/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h
+--- linux-3.14.36/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h 2015-05-06 12:05:42.000000000 -0500
+@@ -89,21 +89,6 @@
+ BRCMF_SCAN_STATUS_SUPPRESS,
+ };
+
+-/**
+- * enum wl_mode - driver mode of virtual interface.
+- *
+- * @WL_MODE_BSS: connects to BSS.
+- * @WL_MODE_IBSS: operate as ad-hoc.
+- * @WL_MODE_AP: operate as access-point.
+- * @WL_MODE_P2P: provide P2P discovery.
+- */
+-enum wl_mode {
+- WL_MODE_BSS,
+- WL_MODE_IBSS,
+- WL_MODE_AP,
+- WL_MODE_P2P
+-};
+-
+ /* dongle configuration */
+ struct brcmf_cfg80211_conf {
+ u32 frag_threshold;
+@@ -193,7 +178,6 @@
+ * @ifp: lower layer interface pointer
+ * @wdev: wireless device.
+ * @profile: profile information.
+- * @mode: operating mode.
+ * @roam_off: roaming state.
+ * @sme_state: SME state using enum brcmf_vif_status bits.
+ * @pm_block: power-management blocked.
+@@ -204,7 +188,6 @@
+ struct brcmf_if *ifp;
+ struct wireless_dev wdev;
+ struct brcmf_cfg80211_profile profile;
+- s32 mode;
+ s32 roam_off;
+ unsigned long sme_state;
+ bool pm_block;
+@@ -402,7 +385,6 @@
+ bool ibss_starter;
+ bool pwr_save;
+ bool dongle_up;
+- bool roam_on;
+ bool scan_tried;
+ u8 *dcmd_buf;
+ u8 *extra_buf;
+@@ -491,7 +473,8 @@
+ s32 brcmf_vif_set_mgmt_ie(struct brcmf_cfg80211_vif *vif, s32 pktflag,
+ const u8 *vndr_ie_buf, u32 vndr_ie_len);
+ s32 brcmf_vif_clear_mgmt_ies(struct brcmf_cfg80211_vif *vif);
+-struct brcmf_tlv *brcmf_parse_tlvs(void *buf, int buflen, uint key);
++const struct brcmf_tlv *
++brcmf_parse_tlvs(const void *buf, int buflen, uint key);
+ u16 channel_to_chanspec(struct brcmu_d11inf *d11inf,
+ struct ieee80211_channel *ch);
+ u32 wl_get_vif_state_all(struct brcmf_cfg80211_info *cfg, unsigned long state);
+diff -Nur linux-3.14.36/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c linux-openelec/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
+--- linux-3.14.36/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c 2015-05-06 12:05:42.000000000 -0500
+@@ -897,7 +897,8 @@
+ return result;
+ }
+
+-static void brcms_ops_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
++static void brcms_ops_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
++ u32 queues, bool drop)
+ {
+ struct brcms_info *wl = hw->priv;
+ int ret;
+@@ -1092,12 +1093,6 @@
+ * Attach to the WL device identified by vendor and device parameters.
+ * regs is a host accessible memory address pointing to WL device registers.
+ *
+- * brcms_attach is not defined as static because in the case where no bus
+- * is defined, wl_attach will never be called, and thus, gcc will issue
+- * a warning that this function is defined but not used if we declare
+- * it as static.
+- *
+- *
+ * is called in brcms_bcma_probe() context, therefore no locking required.
+ */
+ static struct brcms_info *brcms_attach(struct bcma_device *pdev)
+diff -Nur linux-3.14.36/drivers/net/wireless/brcm80211/brcmsmac/main.c linux-openelec/drivers/net/wireless/brcm80211/brcmsmac/main.c
+--- linux-3.14.36/drivers/net/wireless/brcm80211/brcmsmac/main.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/net/wireless/brcm80211/brcmsmac/main.c 2015-05-06 12:05:42.000000000 -0500
+@@ -4870,14 +4870,11 @@
+ /*
+ * low level detach
+ */
+-static int brcms_b_detach(struct brcms_c_info *wlc)
++static void brcms_b_detach(struct brcms_c_info *wlc)
+ {
+ uint i;
+ struct brcms_hw_band *band;
+ struct brcms_hardware *wlc_hw = wlc->hw;
+- int callbacks;
+-
+- callbacks = 0;
+
+ brcms_b_detach_dmapio(wlc_hw);
+
+@@ -4900,9 +4897,6 @@
+ ai_detach(wlc_hw->sih);
+ wlc_hw->sih = NULL;
+ }
+-
+- return callbacks;
+-
+ }
+
+ /*
+@@ -4917,14 +4911,15 @@
+ */
+ uint brcms_c_detach(struct brcms_c_info *wlc)
+ {
+- uint callbacks = 0;
++ uint callbacks;
+
+ if (wlc == NULL)
+ return 0;
+
+- callbacks += brcms_b_detach(wlc);
++ brcms_b_detach(wlc);
+
+ /* delete software timers */
++ callbacks = 0;
+ if (!brcms_c_radio_monitor_stop(wlc))
+ callbacks++;
+
+diff -Nur linux-3.14.36/drivers/net/wireless/brcm80211/brcmutil/d11.c linux-openelec/drivers/net/wireless/brcm80211/brcmutil/d11.c
+--- linux-3.14.36/drivers/net/wireless/brcm80211/brcmutil/d11.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/net/wireless/brcm80211/brcmutil/d11.c 2015-05-06 12:05:42.000000000 -0500
+@@ -21,19 +21,46 @@
+ #include <brcmu_wifi.h>
+ #include <brcmu_d11.h>
+
+-static void brcmu_d11n_encchspec(struct brcmu_chan *ch)
++static u16 d11n_sb(enum brcmu_chan_sb sb)
+ {
+- ch->chspec = ch->chnum & BRCMU_CHSPEC_CH_MASK;
++ switch (sb) {
++ case BRCMU_CHAN_SB_NONE:
++ return BRCMU_CHSPEC_D11N_SB_N;
++ case BRCMU_CHAN_SB_L:
++ return BRCMU_CHSPEC_D11N_SB_L;
++ case BRCMU_CHAN_SB_U:
++ return BRCMU_CHSPEC_D11N_SB_U;
++ default:
++ WARN_ON(1);
++ }
++ return 0;
++}
+
+- switch (ch->bw) {
++static u16 d11n_bw(enum brcmu_chan_bw bw)
++{
++ switch (bw) {
+ case BRCMU_CHAN_BW_20:
+- ch->chspec |= BRCMU_CHSPEC_D11N_BW_20 | BRCMU_CHSPEC_D11N_SB_N;
+- break;
++ return BRCMU_CHSPEC_D11N_BW_20;
+ case BRCMU_CHAN_BW_40:
++ return BRCMU_CHSPEC_D11N_BW_40;
+ default:
+- WARN_ON_ONCE(1);
+- break;
++ WARN_ON(1);
+ }
++ return 0;
++}
++
++static void brcmu_d11n_encchspec(struct brcmu_chan *ch)
++{
++ if (ch->bw == BRCMU_CHAN_BW_20)
++ ch->sb = BRCMU_CHAN_SB_NONE;
++
++ ch->chspec = 0;
++ brcmu_maskset16(&ch->chspec, BRCMU_CHSPEC_CH_MASK,
++ BRCMU_CHSPEC_CH_SHIFT, ch->chnum);
++ brcmu_maskset16(&ch->chspec, BRCMU_CHSPEC_D11N_SB_MASK,
++ 0, d11n_sb(ch->sb));
++ brcmu_maskset16(&ch->chspec, BRCMU_CHSPEC_D11N_BW_MASK,
++ 0, d11n_bw(ch->bw));
+
+ if (ch->chnum <= CH_MAX_2G_CHANNEL)
+ ch->chspec |= BRCMU_CHSPEC_D11N_BND_2G;
+@@ -41,23 +68,34 @@
+ ch->chspec |= BRCMU_CHSPEC_D11N_BND_5G;
+ }
+
+-static void brcmu_d11ac_encchspec(struct brcmu_chan *ch)
++static u16 d11ac_bw(enum brcmu_chan_bw bw)
+ {
+- ch->chspec = ch->chnum & BRCMU_CHSPEC_CH_MASK;
+-
+- switch (ch->bw) {
++ switch (bw) {
+ case BRCMU_CHAN_BW_20:
+- ch->chspec |= BRCMU_CHSPEC_D11AC_BW_20;
+- break;
++ return BRCMU_CHSPEC_D11AC_BW_20;
+ case BRCMU_CHAN_BW_40:
++ return BRCMU_CHSPEC_D11AC_BW_40;
+ case BRCMU_CHAN_BW_80:
+- case BRCMU_CHAN_BW_80P80:
+- case BRCMU_CHAN_BW_160:
++ return BRCMU_CHSPEC_D11AC_BW_80;
+ default:
+- WARN_ON_ONCE(1);
+- break;
++ WARN_ON(1);
+ }
++ return 0;
++}
+
++static void brcmu_d11ac_encchspec(struct brcmu_chan *ch)
++{
++ if (ch->bw == BRCMU_CHAN_BW_20 || ch->sb == BRCMU_CHAN_SB_NONE)
++ ch->sb = BRCMU_CHAN_SB_L;
++
++ brcmu_maskset16(&ch->chspec, BRCMU_CHSPEC_CH_MASK,
++ BRCMU_CHSPEC_CH_SHIFT, ch->chnum);
++ brcmu_maskset16(&ch->chspec, BRCMU_CHSPEC_D11AC_SB_MASK,
++ BRCMU_CHSPEC_D11AC_SB_SHIFT, ch->sb);
++ brcmu_maskset16(&ch->chspec, BRCMU_CHSPEC_D11AC_BW_MASK,
++ 0, d11ac_bw(ch->bw));
++
++ ch->chspec &= ~BRCMU_CHSPEC_D11AC_BND_MASK;
+ if (ch->chnum <= CH_MAX_2G_CHANNEL)
+ ch->chspec |= BRCMU_CHSPEC_D11AC_BND_2G;
+ else
+@@ -73,6 +111,7 @@
+ switch (ch->chspec & BRCMU_CHSPEC_D11N_BW_MASK) {
+ case BRCMU_CHSPEC_D11N_BW_20:
+ ch->bw = BRCMU_CHAN_BW_20;
++ ch->sb = BRCMU_CHAN_SB_NONE;
+ break;
+ case BRCMU_CHSPEC_D11N_BW_40:
+ ch->bw = BRCMU_CHAN_BW_40;
+@@ -112,6 +151,7 @@
+ switch (ch->chspec & BRCMU_CHSPEC_D11AC_BW_MASK) {
+ case BRCMU_CHSPEC_D11AC_BW_20:
+ ch->bw = BRCMU_CHAN_BW_20;
++ ch->sb = BRCMU_CHAN_SB_NONE;
+ break;
+ case BRCMU_CHSPEC_D11AC_BW_40:
+ ch->bw = BRCMU_CHAN_BW_40;
+@@ -128,6 +168,25 @@
+ break;
+ case BRCMU_CHSPEC_D11AC_BW_80:
+ ch->bw = BRCMU_CHAN_BW_80;
++ ch->sb = brcmu_maskget16(ch->chspec, BRCMU_CHSPEC_D11AC_SB_MASK,
++ BRCMU_CHSPEC_D11AC_SB_SHIFT);
++ switch (ch->sb) {
++ case BRCMU_CHAN_SB_LL:
++ ch->chnum -= CH_30MHZ_APART;
++ break;
++ case BRCMU_CHAN_SB_LU:
++ ch->chnum -= CH_10MHZ_APART;
++ break;
++ case BRCMU_CHAN_SB_UL:
++ ch->chnum += CH_10MHZ_APART;
++ break;
++ case BRCMU_CHAN_SB_UU:
++ ch->chnum += CH_30MHZ_APART;
++ break;
++ default:
++ WARN_ON_ONCE(1);
++ break;
++ }
+ break;
+ case BRCMU_CHSPEC_D11AC_BW_8080:
+ case BRCMU_CHSPEC_D11AC_BW_160:
+diff -Nur linux-3.14.36/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h linux-openelec/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
+--- linux-3.14.36/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h 2015-05-06 12:05:42.000000000 -0500
+@@ -43,5 +43,6 @@
+ #define BCM4335_CHIP_ID 0x4335
+ #define BCM43362_CHIP_ID 43362
+ #define BCM4339_CHIP_ID 0x4339
++#define BCM4354_CHIP_ID 0x4354
+
+ #endif /* _BRCM_HW_IDS_H_ */
+diff -Nur linux-3.14.36/drivers/net/wireless/brcm80211/include/brcmu_d11.h linux-openelec/drivers/net/wireless/brcm80211/include/brcmu_d11.h
+--- linux-3.14.36/drivers/net/wireless/brcm80211/include/brcmu_d11.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/net/wireless/brcm80211/include/brcmu_d11.h 2015-05-06 12:05:42.000000000 -0500
+@@ -108,13 +108,7 @@
+ };
+
+ enum brcmu_chan_sb {
+- BRCMU_CHAN_SB_NONE = 0,
+- BRCMU_CHAN_SB_L,
+- BRCMU_CHAN_SB_U,
+- BRCMU_CHAN_SB_LL,
+- BRCMU_CHAN_SB_LU,
+- BRCMU_CHAN_SB_UL,
+- BRCMU_CHAN_SB_UU,
++ BRCMU_CHAN_SB_NONE = -1,
+ BRCMU_CHAN_SB_LLL,
+ BRCMU_CHAN_SB_LLU,
+ BRCMU_CHAN_SB_LUL,
+@@ -123,6 +117,12 @@
+ BRCMU_CHAN_SB_ULU,
+ BRCMU_CHAN_SB_UUL,
+ BRCMU_CHAN_SB_UUU,
++ BRCMU_CHAN_SB_L = BRCMU_CHAN_SB_LLL,
++ BRCMU_CHAN_SB_U = BRCMU_CHAN_SB_LLU,
++ BRCMU_CHAN_SB_LL = BRCMU_CHAN_SB_LLL,
++ BRCMU_CHAN_SB_LU = BRCMU_CHAN_SB_LLU,
++ BRCMU_CHAN_SB_UL = BRCMU_CHAN_SB_LUL,
++ BRCMU_CHAN_SB_UU = BRCMU_CHAN_SB_LUU,
+ };
+
+ struct brcmu_chan {
+diff -Nur linux-3.14.36/drivers/net/wireless/brcm80211/include/brcmu_wifi.h linux-openelec/drivers/net/wireless/brcm80211/include/brcmu_wifi.h
+--- linux-3.14.36/drivers/net/wireless/brcm80211/include/brcmu_wifi.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/net/wireless/brcm80211/include/brcmu_wifi.h 2015-05-06 12:05:42.000000000 -0500
+@@ -29,6 +29,7 @@
+ #define CH_UPPER_SB 0x01
+ #define CH_LOWER_SB 0x02
+ #define CH_EWA_VALID 0x04
++#define CH_30MHZ_APART 6
+ #define CH_20MHZ_APART 4
+ #define CH_10MHZ_APART 2
+ #define CH_5MHZ_APART 1 /* 2G band channels are 5 Mhz apart */
+@@ -217,6 +218,9 @@
+ #define WSEC_SWFLAG 0x0008
+ /* to go into transition mode without setting wep */
+ #define SES_OW_ENABLED 0x0040
++/* MFP */
++#define MFP_CAPABLE 0x0200
++#define MFP_REQUIRED 0x0400
+
+ /* WPA authentication mode bitvec */
+ #define WPA_AUTH_DISABLED 0x0000 /* Legacy (i.e., non-WPA) */
+diff -Nur linux-3.14.36/drivers/net/wireless/cw1200/sta.c linux-openelec/drivers/net/wireless/cw1200/sta.c
+--- linux-3.14.36/drivers/net/wireless/cw1200/sta.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/net/wireless/cw1200/sta.c 2015-05-06 12:05:42.000000000 -0500
+@@ -936,7 +936,8 @@
+ return ret;
+ }
+
+-void cw1200_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
++void cw1200_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
++ u32 queues, bool drop)
+ {
+ struct cw1200_common *priv = hw->priv;
+
+diff -Nur linux-3.14.36/drivers/net/wireless/cw1200/sta.h linux-openelec/drivers/net/wireless/cw1200/sta.h
+--- linux-3.14.36/drivers/net/wireless/cw1200/sta.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/net/wireless/cw1200/sta.h 2015-05-06 12:05:42.000000000 -0500
+@@ -40,7 +40,8 @@
+
+ int cw1200_set_rts_threshold(struct ieee80211_hw *hw, u32 value);
+
+-void cw1200_flush(struct ieee80211_hw *hw, u32 queues, bool drop);
++void cw1200_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
++ u32 queues, bool drop);
+
+ u64 cw1200_prepare_multicast(struct ieee80211_hw *hw,
+ struct netdev_hw_addr_list *mc_list);
+diff -Nur linux-3.14.36/drivers/net/wireless/iwlegacy/common.c linux-openelec/drivers/net/wireless/iwlegacy/common.c
+--- linux-3.14.36/drivers/net/wireless/iwlegacy/common.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/net/wireless/iwlegacy/common.c 2015-05-06 12:05:42.000000000 -0500
+@@ -4701,7 +4701,8 @@
+ }
+ EXPORT_SYMBOL(il_mac_change_interface);
+
+-void il_mac_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
++void il_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
++ u32 queues, bool drop)
+ {
+ struct il_priv *il = hw->priv;
+ unsigned long timeout = jiffies + msecs_to_jiffies(500);
+diff -Nur linux-3.14.36/drivers/net/wireless/iwlegacy/common.h linux-openelec/drivers/net/wireless/iwlegacy/common.h
+--- linux-3.14.36/drivers/net/wireless/iwlegacy/common.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/net/wireless/iwlegacy/common.h 2015-05-06 12:05:42.000000000 -0500
+@@ -1722,7 +1722,8 @@
+ struct ieee80211_vif *vif);
+ int il_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ enum nl80211_iftype newtype, bool newp2p);
+-void il_mac_flush(struct ieee80211_hw *hw, u32 queues, bool drop);
++void il_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
++ u32 queues, bool drop);
+ int il_alloc_txq_mem(struct il_priv *il);
+ void il_free_txq_mem(struct il_priv *il);
+
+diff -Nur linux-3.14.36/drivers/net/wireless/iwlwifi/dvm/mac80211.c linux-openelec/drivers/net/wireless/iwlwifi/dvm/mac80211.c
+--- linux-3.14.36/drivers/net/wireless/iwlwifi/dvm/mac80211.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/net/wireless/iwlwifi/dvm/mac80211.c 2015-05-06 12:05:42.000000000 -0500
+@@ -1091,7 +1091,8 @@
+ FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
+ }
+
+-static void iwlagn_mac_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
++static void iwlagn_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
++ u32 queues, bool drop)
+ {
+ struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
+
+diff -Nur linux-3.14.36/drivers/net/wireless/libertas/cfg.c linux-openelec/drivers/net/wireless/libertas/cfg.c
+--- linux-3.14.36/drivers/net/wireless/libertas/cfg.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/net/wireless/libertas/cfg.c 2015-05-06 12:05:42.000000000 -0500
+@@ -1766,7 +1766,8 @@
+ memcpy(priv->wdev->ssid, params->ssid, params->ssid_len);
+ priv->wdev->ssid_len = params->ssid_len;
+
+- cfg80211_ibss_joined(priv->dev, bssid, GFP_KERNEL);
++ cfg80211_ibss_joined(priv->dev, bssid, params->chandef.chan,
++ GFP_KERNEL);
+
+ /* TODO: consider doing this at MACREG_INT_CODE_LINK_SENSED time */
+ priv->connect_status = LBS_CONNECTED;
+diff -Nur linux-3.14.36/drivers/net/wireless/mac80211_hwsim.c linux-openelec/drivers/net/wireless/mac80211_hwsim.c
+--- linux-3.14.36/drivers/net/wireless/mac80211_hwsim.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/net/wireless/mac80211_hwsim.c 2015-07-24 18:03:29.040842002 -0500
+@@ -1671,7 +1671,9 @@
+ return 0;
+ }
+
+-static void mac80211_hwsim_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
++static void mac80211_hwsim_flush(struct ieee80211_hw *hw,
++ struct ieee80211_vif *vif,
++ u32 queues, bool drop)
+ {
+ /* Not implemented, queues only on kernel side */
+ }
+diff -Nur linux-3.14.36/drivers/net/wireless/mac80211_hwsim.c.orig linux-openelec/drivers/net/wireless/mac80211_hwsim.c.orig
+--- linux-3.14.36/drivers/net/wireless/mac80211_hwsim.c.orig 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/net/wireless/mac80211_hwsim.c.orig 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,2696 @@
++/*
++ * mac80211_hwsim - software simulator of 802.11 radio(s) for mac80211
++ * Copyright (c) 2008, Jouni Malinen <j@w1.fi>
++ * Copyright (c) 2011, Javier Lopez <jlopex@gmail.com>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++/*
++ * TODO:
++ * - Add TSF sync and fix IBSS beacon transmission by adding
++ * competition for "air time" at TBTT
++ * - RX filtering based on filter configuration (data->rx_filter)
++ */
++
++#include <linux/list.h>
++#include <linux/slab.h>
++#include <linux/spinlock.h>
++#include <net/dst.h>
++#include <net/xfrm.h>
++#include <net/mac80211.h>
++#include <net/ieee80211_radiotap.h>
++#include <linux/if_arp.h>
++#include <linux/rtnetlink.h>
++#include <linux/etherdevice.h>
++#include <linux/platform_device.h>
++#include <linux/debugfs.h>
++#include <linux/module.h>
++#include <linux/ktime.h>
++#include <net/genetlink.h>
++#include "mac80211_hwsim.h"
++
++#define WARN_QUEUE 100
++#define MAX_QUEUE 200
++
++MODULE_AUTHOR("Jouni Malinen");
++MODULE_DESCRIPTION("Software simulator of 802.11 radio(s) for mac80211");
++MODULE_LICENSE("GPL");
++
++static u32 wmediumd_portid;
++
++static int radios = 2;
++module_param(radios, int, 0444);
++MODULE_PARM_DESC(radios, "Number of simulated radios");
++
++static int channels = 1;
++module_param(channels, int, 0444);
++MODULE_PARM_DESC(channels, "Number of concurrent channels");
++
++static bool paged_rx = false;
++module_param(paged_rx, bool, 0644);
++MODULE_PARM_DESC(paged_rx, "Use paged SKBs for RX instead of linear ones");
++
++static bool rctbl = false;
++module_param(rctbl, bool, 0444);
++MODULE_PARM_DESC(rctbl, "Handle rate control table");
++
++/**
++ * enum hwsim_regtest - the type of regulatory tests we offer
++ *
++ * These are the different values you can use for the regtest
++ * module parameter. This is useful to help test world roaming
++ * and the driver regulatory_hint() call and combinations of these.
++ * If you want to do specific alpha2 regulatory domain tests simply
++ * use the userspace regulatory request as that will be respected as
++ * well without the need of this module parameter. This is designed
++ * only for testing the driver regulatory request, world roaming
++ * and all possible combinations.
++ *
++ * @HWSIM_REGTEST_DISABLED: No regulatory tests are performed,
++ * this is the default value.
++ * @HWSIM_REGTEST_DRIVER_REG_FOLLOW: Used for testing the driver regulatory
++ * hint, only one driver regulatory hint will be sent as such the
++ * secondary radios are expected to follow.
++ * @HWSIM_REGTEST_DRIVER_REG_ALL: Used for testing the driver regulatory
++ * request with all radios reporting the same regulatory domain.
++ * @HWSIM_REGTEST_DIFF_COUNTRY: Used for testing the drivers calling
++ * different regulatory domains requests. Expected behaviour is for
++ * an intersection to occur but each device will still use their
++ * respective regulatory requested domains. Subsequent radios will
++ * use the resulting intersection.
++ * @HWSIM_REGTEST_WORLD_ROAM: Used for testing the world roaming. We accomplish
++ * this by using a custom beacon-capable regulatory domain for the first
++ * radio. All other device world roam.
++ * @HWSIM_REGTEST_CUSTOM_WORLD: Used for testing the custom world regulatory
++ * domain requests. All radios will adhere to this custom world regulatory
++ * domain.
++ * @HWSIM_REGTEST_CUSTOM_WORLD_2: Used for testing 2 custom world regulatory
++ * domain requests. The first radio will adhere to the first custom world
++ * regulatory domain, the second one to the second custom world regulatory
++ * domain. All other devices will world roam.
++ * @HWSIM_REGTEST_STRICT_FOLLOW_: Used for testing strict regulatory domain
++ * settings, only the first radio will send a regulatory domain request
++ * and use strict settings. The rest of the radios are expected to follow.
++ * @HWSIM_REGTEST_STRICT_ALL: Used for testing strict regulatory domain
++ * settings. All radios will adhere to this.
++ * @HWSIM_REGTEST_STRICT_AND_DRIVER_REG: Used for testing strict regulatory
++ * domain settings, combined with secondary driver regulatory domain
++ * settings. The first radio will get a strict regulatory domain setting
++ * using the first driver regulatory request and the second radio will use
++ * non-strict settings using the second driver regulatory request. All
++ * other devices should follow the intersection created between the
++ * first two.
++ * @HWSIM_REGTEST_ALL: Used for testing every possible mix. You will need
++ * at least 6 radios for a complete test. We will test in this order:
++ * 1 - driver custom world regulatory domain
++ * 2 - second custom world regulatory domain
++ * 3 - first driver regulatory domain request
++ * 4 - second driver regulatory domain request
++ * 5 - strict regulatory domain settings using the third driver regulatory
++ * domain request
++ * 6 and on - should follow the intersection of the 3rd, 4rth and 5th radio
++ * regulatory requests.
++ */
++enum hwsim_regtest {
++ HWSIM_REGTEST_DISABLED = 0,
++ HWSIM_REGTEST_DRIVER_REG_FOLLOW = 1,
++ HWSIM_REGTEST_DRIVER_REG_ALL = 2,
++ HWSIM_REGTEST_DIFF_COUNTRY = 3,
++ HWSIM_REGTEST_WORLD_ROAM = 4,
++ HWSIM_REGTEST_CUSTOM_WORLD = 5,
++ HWSIM_REGTEST_CUSTOM_WORLD_2 = 6,
++ HWSIM_REGTEST_STRICT_FOLLOW = 7,
++ HWSIM_REGTEST_STRICT_ALL = 8,
++ HWSIM_REGTEST_STRICT_AND_DRIVER_REG = 9,
++ HWSIM_REGTEST_ALL = 10,
++};
++
++/* Set to one of the HWSIM_REGTEST_* values above */
++static int regtest = HWSIM_REGTEST_DISABLED;
++module_param(regtest, int, 0444);
++MODULE_PARM_DESC(regtest, "The type of regulatory test we want to run");
++
++static const char *hwsim_alpha2s[] = {
++ "FI",
++ "AL",
++ "US",
++ "DE",
++ "JP",
++ "AL",
++};
++
++static const struct ieee80211_regdomain hwsim_world_regdom_custom_01 = {
++ .n_reg_rules = 4,
++ .alpha2 = "99",
++ .reg_rules = {
++ REG_RULE(2412-10, 2462+10, 40, 0, 20, 0),
++ REG_RULE(2484-10, 2484+10, 40, 0, 20, 0),
++ REG_RULE(5150-10, 5240+10, 40, 0, 30, 0),
++ REG_RULE(5745-10, 5825+10, 40, 0, 30, 0),
++ }
++};
++
++static const struct ieee80211_regdomain hwsim_world_regdom_custom_02 = {
++ .n_reg_rules = 2,
++ .alpha2 = "99",
++ .reg_rules = {
++ REG_RULE(2412-10, 2462+10, 40, 0, 20, 0),
++ REG_RULE(5725-10, 5850+10, 40, 0, 30,
++ NL80211_RRF_NO_IR),
++ }
++};
++
++static const struct ieee80211_regdomain *hwsim_world_regdom_custom[] = {
++ &hwsim_world_regdom_custom_01,
++ &hwsim_world_regdom_custom_02,
++};
++
++struct hwsim_vif_priv {
++ u32 magic;
++ u8 bssid[ETH_ALEN];
++ bool assoc;
++ bool bcn_en;
++ u16 aid;
++};
++
++#define HWSIM_VIF_MAGIC 0x69537748
++
++static inline void hwsim_check_magic(struct ieee80211_vif *vif)
++{
++ struct hwsim_vif_priv *vp = (void *)vif->drv_priv;
++ WARN(vp->magic != HWSIM_VIF_MAGIC,
++ "Invalid VIF (%p) magic %#x, %pM, %d/%d\n",
++ vif, vp->magic, vif->addr, vif->type, vif->p2p);
++}
++
++static inline void hwsim_set_magic(struct ieee80211_vif *vif)
++{
++ struct hwsim_vif_priv *vp = (void *)vif->drv_priv;
++ vp->magic = HWSIM_VIF_MAGIC;
++}
++
++static inline void hwsim_clear_magic(struct ieee80211_vif *vif)
++{
++ struct hwsim_vif_priv *vp = (void *)vif->drv_priv;
++ vp->magic = 0;
++}
++
++struct hwsim_sta_priv {
++ u32 magic;
++};
++
++#define HWSIM_STA_MAGIC 0x6d537749
++
++static inline void hwsim_check_sta_magic(struct ieee80211_sta *sta)
++{
++ struct hwsim_sta_priv *sp = (void *)sta->drv_priv;
++ WARN_ON(sp->magic != HWSIM_STA_MAGIC);
++}
++
++static inline void hwsim_set_sta_magic(struct ieee80211_sta *sta)
++{
++ struct hwsim_sta_priv *sp = (void *)sta->drv_priv;
++ sp->magic = HWSIM_STA_MAGIC;
++}
++
++static inline void hwsim_clear_sta_magic(struct ieee80211_sta *sta)
++{
++ struct hwsim_sta_priv *sp = (void *)sta->drv_priv;
++ sp->magic = 0;
++}
++
++struct hwsim_chanctx_priv {
++ u32 magic;
++};
++
++#define HWSIM_CHANCTX_MAGIC 0x6d53774a
++
++static inline void hwsim_check_chanctx_magic(struct ieee80211_chanctx_conf *c)
++{
++ struct hwsim_chanctx_priv *cp = (void *)c->drv_priv;
++ WARN_ON(cp->magic != HWSIM_CHANCTX_MAGIC);
++}
++
++static inline void hwsim_set_chanctx_magic(struct ieee80211_chanctx_conf *c)
++{
++ struct hwsim_chanctx_priv *cp = (void *)c->drv_priv;
++ cp->magic = HWSIM_CHANCTX_MAGIC;
++}
++
++static inline void hwsim_clear_chanctx_magic(struct ieee80211_chanctx_conf *c)
++{
++ struct hwsim_chanctx_priv *cp = (void *)c->drv_priv;
++ cp->magic = 0;
++}
++
++static struct class *hwsim_class;
++
++static struct net_device *hwsim_mon; /* global monitor netdev */
++
++#define CHAN2G(_freq) { \
++ .band = IEEE80211_BAND_2GHZ, \
++ .center_freq = (_freq), \
++ .hw_value = (_freq), \
++ .max_power = 20, \
++}
++
++#define CHAN5G(_freq) { \
++ .band = IEEE80211_BAND_5GHZ, \
++ .center_freq = (_freq), \
++ .hw_value = (_freq), \
++ .max_power = 20, \
++}
++
++static const struct ieee80211_channel hwsim_channels_2ghz[] = {
++ CHAN2G(2412), /* Channel 1 */
++ CHAN2G(2417), /* Channel 2 */
++ CHAN2G(2422), /* Channel 3 */
++ CHAN2G(2427), /* Channel 4 */
++ CHAN2G(2432), /* Channel 5 */
++ CHAN2G(2437), /* Channel 6 */
++ CHAN2G(2442), /* Channel 7 */
++ CHAN2G(2447), /* Channel 8 */
++ CHAN2G(2452), /* Channel 9 */
++ CHAN2G(2457), /* Channel 10 */
++ CHAN2G(2462), /* Channel 11 */
++ CHAN2G(2467), /* Channel 12 */
++ CHAN2G(2472), /* Channel 13 */
++ CHAN2G(2484), /* Channel 14 */
++};
++
++static const struct ieee80211_channel hwsim_channels_5ghz[] = {
++ CHAN5G(5180), /* Channel 36 */
++ CHAN5G(5200), /* Channel 40 */
++ CHAN5G(5220), /* Channel 44 */
++ CHAN5G(5240), /* Channel 48 */
++
++ CHAN5G(5260), /* Channel 52 */
++ CHAN5G(5280), /* Channel 56 */
++ CHAN5G(5300), /* Channel 60 */
++ CHAN5G(5320), /* Channel 64 */
++
++ CHAN5G(5500), /* Channel 100 */
++ CHAN5G(5520), /* Channel 104 */
++ CHAN5G(5540), /* Channel 108 */
++ CHAN5G(5560), /* Channel 112 */
++ CHAN5G(5580), /* Channel 116 */
++ CHAN5G(5600), /* Channel 120 */
++ CHAN5G(5620), /* Channel 124 */
++ CHAN5G(5640), /* Channel 128 */
++ CHAN5G(5660), /* Channel 132 */
++ CHAN5G(5680), /* Channel 136 */
++ CHAN5G(5700), /* Channel 140 */
++
++ CHAN5G(5745), /* Channel 149 */
++ CHAN5G(5765), /* Channel 153 */
++ CHAN5G(5785), /* Channel 157 */
++ CHAN5G(5805), /* Channel 161 */
++ CHAN5G(5825), /* Channel 165 */
++};
++
++static const struct ieee80211_rate hwsim_rates[] = {
++ { .bitrate = 10 },
++ { .bitrate = 20, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
++ { .bitrate = 55, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
++ { .bitrate = 110, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
++ { .bitrate = 60 },
++ { .bitrate = 90 },
++ { .bitrate = 120 },
++ { .bitrate = 180 },
++ { .bitrate = 240 },
++ { .bitrate = 360 },
++ { .bitrate = 480 },
++ { .bitrate = 540 }
++};
++
++static const struct ieee80211_iface_limit hwsim_if_limits[] = {
++ { .max = 1, .types = BIT(NL80211_IFTYPE_ADHOC) },
++ { .max = 2048, .types = BIT(NL80211_IFTYPE_STATION) |
++ BIT(NL80211_IFTYPE_P2P_CLIENT) |
++#ifdef CONFIG_MAC80211_MESH
++ BIT(NL80211_IFTYPE_MESH_POINT) |
++#endif
++ BIT(NL80211_IFTYPE_AP) |
++ BIT(NL80211_IFTYPE_P2P_GO) },
++ { .max = 1, .types = BIT(NL80211_IFTYPE_P2P_DEVICE) },
++};
++
++static const struct ieee80211_iface_limit hwsim_if_dfs_limits[] = {
++ { .max = 8, .types = BIT(NL80211_IFTYPE_AP) },
++};
++
++static const struct ieee80211_iface_combination hwsim_if_comb[] = {
++ {
++ .limits = hwsim_if_limits,
++ .n_limits = ARRAY_SIZE(hwsim_if_limits),
++ .max_interfaces = 2048,
++ .num_different_channels = 1,
++ },
++ {
++ .limits = hwsim_if_dfs_limits,
++ .n_limits = ARRAY_SIZE(hwsim_if_dfs_limits),
++ .max_interfaces = 8,
++ .num_different_channels = 1,
++ .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
++ BIT(NL80211_CHAN_WIDTH_20) |
++ BIT(NL80211_CHAN_WIDTH_40) |
++ BIT(NL80211_CHAN_WIDTH_80) |
++ BIT(NL80211_CHAN_WIDTH_160),
++ }
++};
++
++static spinlock_t hwsim_radio_lock;
++static struct list_head hwsim_radios;
++static int hwsim_radio_idx;
++
++static struct platform_driver mac80211_hwsim_driver = {
++ .driver = {
++ .name = "mac80211_hwsim",
++ .owner = THIS_MODULE,
++ },
++};
++
++struct mac80211_hwsim_data {
++ struct list_head list;
++ struct ieee80211_hw *hw;
++ struct device *dev;
++ struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
++ struct ieee80211_channel channels_2ghz[ARRAY_SIZE(hwsim_channels_2ghz)];
++ struct ieee80211_channel channels_5ghz[ARRAY_SIZE(hwsim_channels_5ghz)];
++ struct ieee80211_rate rates[ARRAY_SIZE(hwsim_rates)];
++ struct ieee80211_iface_combination if_combination;
++
++ struct mac_address addresses[2];
++ int channels, idx;
++
++ struct ieee80211_channel *tmp_chan;
++ struct delayed_work roc_done;
++ struct delayed_work hw_scan;
++ struct cfg80211_scan_request *hw_scan_request;
++ struct ieee80211_vif *hw_scan_vif;
++ int scan_chan_idx;
++
++ struct ieee80211_channel *channel;
++ u64 beacon_int /* beacon interval in us */;
++ unsigned int rx_filter;
++ bool started, idle, scanning;
++ struct mutex mutex;
++ struct tasklet_hrtimer beacon_timer;
++ enum ps_mode {
++ PS_DISABLED, PS_ENABLED, PS_AUTO_POLL, PS_MANUAL_POLL
++ } ps;
++ bool ps_poll_pending;
++ struct dentry *debugfs;
++
++ struct sk_buff_head pending; /* packets pending */
++ /*
++ * Only radios in the same group can communicate together (the
++ * channel has to match too). Each bit represents a group. A
++ * radio can be in more then one group.
++ */
++ u64 group;
++
++ int power_level;
++
++ /* difference between this hw's clock and the real clock, in usecs */
++ s64 tsf_offset;
++ s64 bcn_delta;
++ /* absolute beacon transmission time. Used to cover up "tx" delay. */
++ u64 abs_bcn_ts;
++};
++
++
++struct hwsim_radiotap_hdr {
++ struct ieee80211_radiotap_header hdr;
++ __le64 rt_tsft;
++ u8 rt_flags;
++ u8 rt_rate;
++ __le16 rt_channel;
++ __le16 rt_chbitmask;
++} __packed;
++
++struct hwsim_radiotap_ack_hdr {
++ struct ieee80211_radiotap_header hdr;
++ u8 rt_flags;
++ u8 pad;
++ __le16 rt_channel;
++ __le16 rt_chbitmask;
++} __packed;
++
++/* MAC80211_HWSIM netlinf family */
++static struct genl_family hwsim_genl_family = {
++ .id = GENL_ID_GENERATE,
++ .hdrsize = 0,
++ .name = "MAC80211_HWSIM",
++ .version = 1,
++ .maxattr = HWSIM_ATTR_MAX,
++};
++
++/* MAC80211_HWSIM netlink policy */
++
++static struct nla_policy hwsim_genl_policy[HWSIM_ATTR_MAX + 1] = {
++ [HWSIM_ATTR_ADDR_RECEIVER] = { .type = NLA_UNSPEC, .len = ETH_ALEN },
++ [HWSIM_ATTR_ADDR_TRANSMITTER] = { .type = NLA_UNSPEC, .len = ETH_ALEN },
++ [HWSIM_ATTR_FRAME] = { .type = NLA_BINARY,
++ .len = IEEE80211_MAX_DATA_LEN },
++ [HWSIM_ATTR_FLAGS] = { .type = NLA_U32 },
++ [HWSIM_ATTR_RX_RATE] = { .type = NLA_U32 },
++ [HWSIM_ATTR_SIGNAL] = { .type = NLA_U32 },
++ [HWSIM_ATTR_TX_INFO] = { .type = NLA_UNSPEC,
++ .len = IEEE80211_TX_MAX_RATES *
++ sizeof(struct hwsim_tx_rate)},
++ [HWSIM_ATTR_COOKIE] = { .type = NLA_U64 },
++ [HWSIM_ATTR_CHANNELS] = { .type = NLA_U32 },
++ [HWSIM_ATTR_RADIO_ID] = { .type = NLA_U32 },
++ [HWSIM_ATTR_REG_HINT_ALPHA2] = { .type = NLA_STRING, .len = 2 },
++ [HWSIM_ATTR_REG_CUSTOM_REG] = { .type = NLA_U32 },
++ [HWSIM_ATTR_REG_STRICT_REG] = { .type = NLA_FLAG },
++};
++
++static void mac80211_hwsim_tx_frame(struct ieee80211_hw *hw,
++ struct sk_buff *skb,
++ struct ieee80211_channel *chan);
++
++/* sysfs attributes */
++static void hwsim_send_ps_poll(void *dat, u8 *mac, struct ieee80211_vif *vif)
++{
++ struct mac80211_hwsim_data *data = dat;
++ struct hwsim_vif_priv *vp = (void *)vif->drv_priv;
++ struct sk_buff *skb;
++ struct ieee80211_pspoll *pspoll;
++
++ if (!vp->assoc)
++ return;
++
++ wiphy_debug(data->hw->wiphy,
++ "%s: send PS-Poll to %pM for aid %d\n",
++ __func__, vp->bssid, vp->aid);
++
++ skb = dev_alloc_skb(sizeof(*pspoll));
++ if (!skb)
++ return;
++ pspoll = (void *) skb_put(skb, sizeof(*pspoll));
++ pspoll->frame_control = cpu_to_le16(IEEE80211_FTYPE_CTL |
++ IEEE80211_STYPE_PSPOLL |
++ IEEE80211_FCTL_PM);
++ pspoll->aid = cpu_to_le16(0xc000 | vp->aid);
++ memcpy(pspoll->bssid, vp->bssid, ETH_ALEN);
++ memcpy(pspoll->ta, mac, ETH_ALEN);
++
++ rcu_read_lock();
++ mac80211_hwsim_tx_frame(data->hw, skb,
++ rcu_dereference(vif->chanctx_conf)->def.chan);
++ rcu_read_unlock();
++}
++
++static void hwsim_send_nullfunc(struct mac80211_hwsim_data *data, u8 *mac,
++ struct ieee80211_vif *vif, int ps)
++{
++ struct hwsim_vif_priv *vp = (void *)vif->drv_priv;
++ struct sk_buff *skb;
++ struct ieee80211_hdr *hdr;
++
++ if (!vp->assoc)
++ return;
++
++ wiphy_debug(data->hw->wiphy,
++ "%s: send data::nullfunc to %pM ps=%d\n",
++ __func__, vp->bssid, ps);
++
++ skb = dev_alloc_skb(sizeof(*hdr));
++ if (!skb)
++ return;
++ hdr = (void *) skb_put(skb, sizeof(*hdr) - ETH_ALEN);
++ hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
++ IEEE80211_STYPE_NULLFUNC |
++ (ps ? IEEE80211_FCTL_PM : 0));
++ hdr->duration_id = cpu_to_le16(0);
++ memcpy(hdr->addr1, vp->bssid, ETH_ALEN);
++ memcpy(hdr->addr2, mac, ETH_ALEN);
++ memcpy(hdr->addr3, vp->bssid, ETH_ALEN);
++
++ rcu_read_lock();
++ mac80211_hwsim_tx_frame(data->hw, skb,
++ rcu_dereference(vif->chanctx_conf)->def.chan);
++ rcu_read_unlock();
++}
++
++
++static void hwsim_send_nullfunc_ps(void *dat, u8 *mac,
++ struct ieee80211_vif *vif)
++{
++ struct mac80211_hwsim_data *data = dat;
++ hwsim_send_nullfunc(data, mac, vif, 1);
++}
++
++static void hwsim_send_nullfunc_no_ps(void *dat, u8 *mac,
++ struct ieee80211_vif *vif)
++{
++ struct mac80211_hwsim_data *data = dat;
++ hwsim_send_nullfunc(data, mac, vif, 0);
++}
++
++static int hwsim_fops_ps_read(void *dat, u64 *val)
++{
++ struct mac80211_hwsim_data *data = dat;
++ *val = data->ps;
++ return 0;
++}
++
++static int hwsim_fops_ps_write(void *dat, u64 val)
++{
++ struct mac80211_hwsim_data *data = dat;
++ enum ps_mode old_ps;
++
++ if (val != PS_DISABLED && val != PS_ENABLED && val != PS_AUTO_POLL &&
++ val != PS_MANUAL_POLL)
++ return -EINVAL;
++
++ old_ps = data->ps;
++ data->ps = val;
++
++ if (val == PS_MANUAL_POLL) {
++ ieee80211_iterate_active_interfaces(data->hw,
++ IEEE80211_IFACE_ITER_NORMAL,
++ hwsim_send_ps_poll, data);
++ data->ps_poll_pending = true;
++ } else if (old_ps == PS_DISABLED && val != PS_DISABLED) {
++ ieee80211_iterate_active_interfaces(data->hw,
++ IEEE80211_IFACE_ITER_NORMAL,
++ hwsim_send_nullfunc_ps,
++ data);
++ } else if (old_ps != PS_DISABLED && val == PS_DISABLED) {
++ ieee80211_iterate_active_interfaces(data->hw,
++ IEEE80211_IFACE_ITER_NORMAL,
++ hwsim_send_nullfunc_no_ps,
++ data);
++ }
++
++ return 0;
++}
++
++DEFINE_SIMPLE_ATTRIBUTE(hwsim_fops_ps, hwsim_fops_ps_read, hwsim_fops_ps_write,
++ "%llu\n");
++
++static int hwsim_write_simulate_radar(void *dat, u64 val)
++{
++ struct mac80211_hwsim_data *data = dat;
++
++ ieee80211_radar_detected(data->hw);
++
++ return 0;
++}
++
++DEFINE_SIMPLE_ATTRIBUTE(hwsim_simulate_radar, NULL,
++ hwsim_write_simulate_radar, "%llu\n");
++
++static int hwsim_fops_group_read(void *dat, u64 *val)
++{
++ struct mac80211_hwsim_data *data = dat;
++ *val = data->group;
++ return 0;
++}
++
++static int hwsim_fops_group_write(void *dat, u64 val)
++{
++ struct mac80211_hwsim_data *data = dat;
++ data->group = val;
++ return 0;
++}
++
++DEFINE_SIMPLE_ATTRIBUTE(hwsim_fops_group,
++ hwsim_fops_group_read, hwsim_fops_group_write,
++ "%llx\n");
++
++static netdev_tx_t hwsim_mon_xmit(struct sk_buff *skb,
++ struct net_device *dev)
++{
++ /* TODO: allow packet injection */
++ dev_kfree_skb(skb);
++ return NETDEV_TX_OK;
++}
++
++static inline u64 mac80211_hwsim_get_tsf_raw(void)
++{
++ return ktime_to_us(ktime_get_real());
++}
++
++static __le64 __mac80211_hwsim_get_tsf(struct mac80211_hwsim_data *data)
++{
++ u64 now = mac80211_hwsim_get_tsf_raw();
++ return cpu_to_le64(now + data->tsf_offset);
++}
++
++static u64 mac80211_hwsim_get_tsf(struct ieee80211_hw *hw,
++ struct ieee80211_vif *vif)
++{
++ struct mac80211_hwsim_data *data = hw->priv;
++ return le64_to_cpu(__mac80211_hwsim_get_tsf(data));
++}
++
++static void mac80211_hwsim_set_tsf(struct ieee80211_hw *hw,
++ struct ieee80211_vif *vif, u64 tsf)
++{
++ struct mac80211_hwsim_data *data = hw->priv;
++ u64 now = mac80211_hwsim_get_tsf(hw, vif);
++ u32 bcn_int = data->beacon_int;
++ s64 delta = tsf - now;
++
++ data->tsf_offset += delta;
++ /* adjust after beaconing with new timestamp at old TBTT */
++ data->bcn_delta = do_div(delta, bcn_int);
++}
++
++static void mac80211_hwsim_monitor_rx(struct ieee80211_hw *hw,
++ struct sk_buff *tx_skb,
++ struct ieee80211_channel *chan)
++{
++ struct mac80211_hwsim_data *data = hw->priv;
++ struct sk_buff *skb;
++ struct hwsim_radiotap_hdr *hdr;
++ u16 flags;
++ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_skb);
++ struct ieee80211_rate *txrate = ieee80211_get_tx_rate(hw, info);
++
++ if (!netif_running(hwsim_mon))
++ return;
++
++ skb = skb_copy_expand(tx_skb, sizeof(*hdr), 0, GFP_ATOMIC);
++ if (skb == NULL)
++ return;
++
++ hdr = (struct hwsim_radiotap_hdr *) skb_push(skb, sizeof(*hdr));
++ hdr->hdr.it_version = PKTHDR_RADIOTAP_VERSION;
++ hdr->hdr.it_pad = 0;
++ hdr->hdr.it_len = cpu_to_le16(sizeof(*hdr));
++ hdr->hdr.it_present = cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) |
++ (1 << IEEE80211_RADIOTAP_RATE) |
++ (1 << IEEE80211_RADIOTAP_TSFT) |
++ (1 << IEEE80211_RADIOTAP_CHANNEL));
++ hdr->rt_tsft = __mac80211_hwsim_get_tsf(data);
++ hdr->rt_flags = 0;
++ hdr->rt_rate = txrate->bitrate / 5;
++ hdr->rt_channel = cpu_to_le16(chan->center_freq);
++ flags = IEEE80211_CHAN_2GHZ;
++ if (txrate->flags & IEEE80211_RATE_ERP_G)
++ flags |= IEEE80211_CHAN_OFDM;
++ else
++ flags |= IEEE80211_CHAN_CCK;
++ hdr->rt_chbitmask = cpu_to_le16(flags);
++
++ skb->dev = hwsim_mon;
++ skb_set_mac_header(skb, 0);
++ skb->ip_summed = CHECKSUM_UNNECESSARY;
++ skb->pkt_type = PACKET_OTHERHOST;
++ skb->protocol = htons(ETH_P_802_2);
++ memset(skb->cb, 0, sizeof(skb->cb));
++ netif_rx(skb);
++}
++
++
++static void mac80211_hwsim_monitor_ack(struct ieee80211_channel *chan,
++ const u8 *addr)
++{
++ struct sk_buff *skb;
++ struct hwsim_radiotap_ack_hdr *hdr;
++ u16 flags;
++ struct ieee80211_hdr *hdr11;
++
++ if (!netif_running(hwsim_mon))
++ return;
++
++ skb = dev_alloc_skb(100);
++ if (skb == NULL)
++ return;
++
++ hdr = (struct hwsim_radiotap_ack_hdr *) skb_put(skb, sizeof(*hdr));
++ hdr->hdr.it_version = PKTHDR_RADIOTAP_VERSION;
++ hdr->hdr.it_pad = 0;
++ hdr->hdr.it_len = cpu_to_le16(sizeof(*hdr));
++ hdr->hdr.it_present = cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) |
++ (1 << IEEE80211_RADIOTAP_CHANNEL));
++ hdr->rt_flags = 0;
++ hdr->pad = 0;
++ hdr->rt_channel = cpu_to_le16(chan->center_freq);
++ flags = IEEE80211_CHAN_2GHZ;
++ hdr->rt_chbitmask = cpu_to_le16(flags);
++
++ hdr11 = (struct ieee80211_hdr *) skb_put(skb, 10);
++ hdr11->frame_control = cpu_to_le16(IEEE80211_FTYPE_CTL |
++ IEEE80211_STYPE_ACK);
++ hdr11->duration_id = cpu_to_le16(0);
++ memcpy(hdr11->addr1, addr, ETH_ALEN);
++
++ skb->dev = hwsim_mon;
++ skb_set_mac_header(skb, 0);
++ skb->ip_summed = CHECKSUM_UNNECESSARY;
++ skb->pkt_type = PACKET_OTHERHOST;
++ skb->protocol = htons(ETH_P_802_2);
++ memset(skb->cb, 0, sizeof(skb->cb));
++ netif_rx(skb);
++}
++
++
++static bool hwsim_ps_rx_ok(struct mac80211_hwsim_data *data,
++ struct sk_buff *skb)
++{
++ switch (data->ps) {
++ case PS_DISABLED:
++ return true;
++ case PS_ENABLED:
++ return false;
++ case PS_AUTO_POLL:
++ /* TODO: accept (some) Beacons by default and other frames only
++ * if pending PS-Poll has been sent */
++ return true;
++ case PS_MANUAL_POLL:
++ /* Allow unicast frames to own address if there is a pending
++ * PS-Poll */
++ if (data->ps_poll_pending &&
++ memcmp(data->hw->wiphy->perm_addr, skb->data + 4,
++ ETH_ALEN) == 0) {
++ data->ps_poll_pending = false;
++ return true;
++ }
++ return false;
++ }
++
++ return true;
++}
++
++
++struct mac80211_hwsim_addr_match_data {
++ bool ret;
++ const u8 *addr;
++};
++
++static void mac80211_hwsim_addr_iter(void *data, u8 *mac,
++ struct ieee80211_vif *vif)
++{
++ struct mac80211_hwsim_addr_match_data *md = data;
++ if (memcmp(mac, md->addr, ETH_ALEN) == 0)
++ md->ret = true;
++}
++
++
++static bool mac80211_hwsim_addr_match(struct mac80211_hwsim_data *data,
++ const u8 *addr)
++{
++ struct mac80211_hwsim_addr_match_data md;
++
++ if (memcmp(addr, data->hw->wiphy->perm_addr, ETH_ALEN) == 0)
++ return true;
++
++ md.ret = false;
++ md.addr = addr;
++ ieee80211_iterate_active_interfaces_atomic(data->hw,
++ IEEE80211_IFACE_ITER_NORMAL,
++ mac80211_hwsim_addr_iter,
++ &md);
++
++ return md.ret;
++}
++
++static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw,
++ struct sk_buff *my_skb,
++ int dst_portid)
++{
++ struct sk_buff *skb;
++ struct mac80211_hwsim_data *data = hw->priv;
++ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) my_skb->data;
++ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(my_skb);
++ void *msg_head;
++ unsigned int hwsim_flags = 0;
++ int i;
++ struct hwsim_tx_rate tx_attempts[IEEE80211_TX_MAX_RATES];
++
++ if (data->ps != PS_DISABLED)
++ hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM);
++ /* If the queue contains MAX_QUEUE skb's drop some */
++ if (skb_queue_len(&data->pending) >= MAX_QUEUE) {
++ /* Droping until WARN_QUEUE level */
++ while (skb_queue_len(&data->pending) >= WARN_QUEUE)
++ skb_dequeue(&data->pending);
++ }
++
++ skb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_ATOMIC);
++ if (skb == NULL)
++ goto nla_put_failure;
++
++ msg_head = genlmsg_put(skb, 0, 0, &hwsim_genl_family, 0,
++ HWSIM_CMD_FRAME);
++ if (msg_head == NULL) {
++ printk(KERN_DEBUG "mac80211_hwsim: problem with msg_head\n");
++ goto nla_put_failure;
++ }
++
++ if (nla_put(skb, HWSIM_ATTR_ADDR_TRANSMITTER,
++ ETH_ALEN, data->addresses[1].addr))
++ goto nla_put_failure;
++
++ /* We get the skb->data */
++ if (nla_put(skb, HWSIM_ATTR_FRAME, my_skb->len, my_skb->data))
++ goto nla_put_failure;
++
++ /* We get the flags for this transmission, and we translate them to
++ wmediumd flags */
++
++ if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS)
++ hwsim_flags |= HWSIM_TX_CTL_REQ_TX_STATUS;
++
++ if (info->flags & IEEE80211_TX_CTL_NO_ACK)
++ hwsim_flags |= HWSIM_TX_CTL_NO_ACK;
++
++ if (nla_put_u32(skb, HWSIM_ATTR_FLAGS, hwsim_flags))
++ goto nla_put_failure;
++
++ /* We get the tx control (rate and retries) info*/
++
++ for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
++ tx_attempts[i].idx = info->status.rates[i].idx;
++ tx_attempts[i].count = info->status.rates[i].count;
++ }
++
++ if (nla_put(skb, HWSIM_ATTR_TX_INFO,
++ sizeof(struct hwsim_tx_rate)*IEEE80211_TX_MAX_RATES,
++ tx_attempts))
++ goto nla_put_failure;
++
++ /* We create a cookie to identify this skb */
++ if (nla_put_u64(skb, HWSIM_ATTR_COOKIE, (unsigned long) my_skb))
++ goto nla_put_failure;
++
++ genlmsg_end(skb, msg_head);
++ genlmsg_unicast(&init_net, skb, dst_portid);
++
++ /* Enqueue the packet */
++ skb_queue_tail(&data->pending, my_skb);
++ return;
++
++nla_put_failure:
++ printk(KERN_DEBUG "mac80211_hwsim: error occurred in %s\n", __func__);
++}
++
++static bool hwsim_chans_compat(struct ieee80211_channel *c1,
++ struct ieee80211_channel *c2)
++{
++ if (!c1 || !c2)
++ return false;
++
++ return c1->center_freq == c2->center_freq;
++}
++
++struct tx_iter_data {
++ struct ieee80211_channel *channel;
++ bool receive;
++};
++
++static void mac80211_hwsim_tx_iter(void *_data, u8 *addr,
++ struct ieee80211_vif *vif)
++{
++ struct tx_iter_data *data = _data;
++
++ if (!vif->chanctx_conf)
++ return;
++
++ if (!hwsim_chans_compat(data->channel,
++ rcu_dereference(vif->chanctx_conf)->def.chan))
++ return;
++
++ data->receive = true;
++}
++
++static bool mac80211_hwsim_tx_frame_no_nl(struct ieee80211_hw *hw,
++ struct sk_buff *skb,
++ struct ieee80211_channel *chan)
++{
++ struct mac80211_hwsim_data *data = hw->priv, *data2;
++ bool ack = false;
++ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
++ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
++ struct ieee80211_rx_status rx_status;
++ u64 now;
++
++ memset(&rx_status, 0, sizeof(rx_status));
++ rx_status.flag |= RX_FLAG_MACTIME_START;
++ rx_status.freq = chan->center_freq;
++ rx_status.band = chan->band;
++ if (info->control.rates[0].flags & IEEE80211_TX_RC_VHT_MCS) {
++ rx_status.rate_idx =
++ ieee80211_rate_get_vht_mcs(&info->control.rates[0]);
++ rx_status.vht_nss =
++ ieee80211_rate_get_vht_nss(&info->control.rates[0]);
++ rx_status.flag |= RX_FLAG_VHT;
++ } else {
++ rx_status.rate_idx = info->control.rates[0].idx;
++ if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS)
++ rx_status.flag |= RX_FLAG_HT;
++ }
++ if (info->control.rates[0].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
++ rx_status.flag |= RX_FLAG_40MHZ;
++ if (info->control.rates[0].flags & IEEE80211_TX_RC_SHORT_GI)
++ rx_status.flag |= RX_FLAG_SHORT_GI;
++ /* TODO: simulate real signal strength (and optional packet loss) */
++ rx_status.signal = data->power_level - 50;
++
++ if (data->ps != PS_DISABLED)
++ hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM);
++
++ /* release the skb's source info */
++ skb_orphan(skb);
++ skb_dst_drop(skb);
++ skb->mark = 0;
++ secpath_reset(skb);
++ nf_reset(skb);
++
++ /*
++ * Get absolute mactime here so all HWs RX at the "same time", and
++ * absolute TX time for beacon mactime so the timestamp matches.
++ * Giving beacons a different mactime than non-beacons looks messy, but
++ * it helps the Toffset be exact and a ~10us mactime discrepancy
++ * probably doesn't really matter.
++ */
++ if (ieee80211_is_beacon(hdr->frame_control) ||
++ ieee80211_is_probe_resp(hdr->frame_control))
++ now = data->abs_bcn_ts;
++ else
++ now = mac80211_hwsim_get_tsf_raw();
++
++ /* Copy skb to all enabled radios that are on the current frequency */
++ spin_lock(&hwsim_radio_lock);
++ list_for_each_entry(data2, &hwsim_radios, list) {
++ struct sk_buff *nskb;
++ struct tx_iter_data tx_iter_data = {
++ .receive = false,
++ .channel = chan,
++ };
++
++ if (data == data2)
++ continue;
++
++ if (!data2->started || (data2->idle && !data2->tmp_chan) ||
++ !hwsim_ps_rx_ok(data2, skb))
++ continue;
++
++ if (!(data->group & data2->group))
++ continue;
++
++ if (!hwsim_chans_compat(chan, data2->tmp_chan) &&
++ !hwsim_chans_compat(chan, data2->channel)) {
++ ieee80211_iterate_active_interfaces_atomic(
++ data2->hw, IEEE80211_IFACE_ITER_NORMAL,
++ mac80211_hwsim_tx_iter, &tx_iter_data);
++ if (!tx_iter_data.receive)
++ continue;
++ }
++
++ /*
++ * reserve some space for our vendor and the normal
++ * radiotap header, since we're copying anyway
++ */
++ if (skb->len < PAGE_SIZE && paged_rx) {
++ struct page *page = alloc_page(GFP_ATOMIC);
++
++ if (!page)
++ continue;
++
++ nskb = dev_alloc_skb(128);
++ if (!nskb) {
++ __free_page(page);
++ continue;
++ }
++
++ memcpy(page_address(page), skb->data, skb->len);
++ skb_add_rx_frag(nskb, 0, page, 0, skb->len, skb->len);
++ } else {
++ nskb = skb_copy(skb, GFP_ATOMIC);
++ if (!nskb)
++ continue;
++ }
++
++ if (mac80211_hwsim_addr_match(data2, hdr->addr1))
++ ack = true;
++
++ rx_status.mactime = now + data2->tsf_offset;
++#if 0
++ /*
++ * Don't enable this code by default as the OUI 00:00:00
++ * is registered to Xerox so we shouldn't use it here, it
++ * might find its way into pcap files.
++ * Note that this code requires the headroom in the SKB
++ * that was allocated earlier.
++ */
++ rx_status.vendor_radiotap_oui[0] = 0x00;
++ rx_status.vendor_radiotap_oui[1] = 0x00;
++ rx_status.vendor_radiotap_oui[2] = 0x00;
++ rx_status.vendor_radiotap_subns = 127;
++ /*
++ * Radiotap vendor namespaces can (and should) also be
++ * split into fields by using the standard radiotap
++ * presence bitmap mechanism. Use just BIT(0) here for
++ * the presence bitmap.
++ */
++ rx_status.vendor_radiotap_bitmap = BIT(0);
++ /* We have 8 bytes of (dummy) data */
++ rx_status.vendor_radiotap_len = 8;
++ /* For testing, also require it to be aligned */
++ rx_status.vendor_radiotap_align = 8;
++ /* push the data */
++ memcpy(skb_push(nskb, 8), "ABCDEFGH", 8);
++#endif
++
++ memcpy(IEEE80211_SKB_RXCB(nskb), &rx_status, sizeof(rx_status));
++ ieee80211_rx_irqsafe(data2->hw, nskb);
++ }
++ spin_unlock(&hwsim_radio_lock);
++
++ return ack;
++}
++
++static void mac80211_hwsim_tx(struct ieee80211_hw *hw,
++ struct ieee80211_tx_control *control,
++ struct sk_buff *skb)
++{
++ struct mac80211_hwsim_data *data = hw->priv;
++ struct ieee80211_tx_info *txi = IEEE80211_SKB_CB(skb);
++ struct ieee80211_chanctx_conf *chanctx_conf;
++ struct ieee80211_channel *channel;
++ bool ack;
++ u32 _portid;
++
++ if (WARN_ON(skb->len < 10)) {
++ /* Should not happen; just a sanity check for addr1 use */
++ ieee80211_free_txskb(hw, skb);
++ return;
++ }
++
++ if (data->channels == 1) {
++ channel = data->channel;
++ } else if (txi->hw_queue == 4) {
++ channel = data->tmp_chan;
++ } else {
++ chanctx_conf = rcu_dereference(txi->control.vif->chanctx_conf);
++ if (chanctx_conf)
++ channel = chanctx_conf->def.chan;
++ else
++ channel = NULL;
++ }
++
++ if (WARN(!channel, "TX w/o channel - queue = %d\n", txi->hw_queue)) {
++ ieee80211_free_txskb(hw, skb);
++ return;
++ }
++
++ if (data->idle && !data->tmp_chan) {
++ wiphy_debug(hw->wiphy, "Trying to TX when idle - reject\n");
++ ieee80211_free_txskb(hw, skb);
++ return;
++ }
++
++ if (txi->control.vif)
++ hwsim_check_magic(txi->control.vif);
++ if (control->sta)
++ hwsim_check_sta_magic(control->sta);
++
++ if (hw->flags & IEEE80211_HW_SUPPORTS_RC_TABLE)
++ ieee80211_get_tx_rates(txi->control.vif, control->sta, skb,
++ txi->control.rates,
++ ARRAY_SIZE(txi->control.rates));
++
++ txi->rate_driver_data[0] = channel;
++ mac80211_hwsim_monitor_rx(hw, skb, channel);
++
++ /* wmediumd mode check */
++ _portid = ACCESS_ONCE(wmediumd_portid);
++
++ if (_portid)
++ return mac80211_hwsim_tx_frame_nl(hw, skb, _portid);
++
++ /* NO wmediumd detected, perfect medium simulation */
++ ack = mac80211_hwsim_tx_frame_no_nl(hw, skb, channel);
++
++ if (ack && skb->len >= 16) {
++ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
++ mac80211_hwsim_monitor_ack(channel, hdr->addr2);
++ }
++
++ ieee80211_tx_info_clear_status(txi);
++
++ /* frame was transmitted at most favorable rate at first attempt */
++ txi->control.rates[0].count = 1;
++ txi->control.rates[1].idx = -1;
++
++ if (!(txi->flags & IEEE80211_TX_CTL_NO_ACK) && ack)
++ txi->flags |= IEEE80211_TX_STAT_ACK;
++ ieee80211_tx_status_irqsafe(hw, skb);
++}
++
++
++static int mac80211_hwsim_start(struct ieee80211_hw *hw)
++{
++ struct mac80211_hwsim_data *data = hw->priv;
++ wiphy_debug(hw->wiphy, "%s\n", __func__);
++ data->started = true;
++ return 0;
++}
++
++
++static void mac80211_hwsim_stop(struct ieee80211_hw *hw)
++{
++ struct mac80211_hwsim_data *data = hw->priv;
++ data->started = false;
++ tasklet_hrtimer_cancel(&data->beacon_timer);
++ wiphy_debug(hw->wiphy, "%s\n", __func__);
++}
++
++
++static int mac80211_hwsim_add_interface(struct ieee80211_hw *hw,
++ struct ieee80211_vif *vif)
++{
++ wiphy_debug(hw->wiphy, "%s (type=%d mac_addr=%pM)\n",
++ __func__, ieee80211_vif_type_p2p(vif),
++ vif->addr);
++ hwsim_set_magic(vif);
++
++ vif->cab_queue = 0;
++ vif->hw_queue[IEEE80211_AC_VO] = 0;
++ vif->hw_queue[IEEE80211_AC_VI] = 1;
++ vif->hw_queue[IEEE80211_AC_BE] = 2;
++ vif->hw_queue[IEEE80211_AC_BK] = 3;
++
++ return 0;
++}
++
++
++static int mac80211_hwsim_change_interface(struct ieee80211_hw *hw,
++ struct ieee80211_vif *vif,
++ enum nl80211_iftype newtype,
++ bool newp2p)
++{
++ newtype = ieee80211_iftype_p2p(newtype, newp2p);
++ wiphy_debug(hw->wiphy,
++ "%s (old type=%d, new type=%d, mac_addr=%pM)\n",
++ __func__, ieee80211_vif_type_p2p(vif),
++ newtype, vif->addr);
++ hwsim_check_magic(vif);
++
++ /*
++ * interface may change from non-AP to AP in
++ * which case this needs to be set up again
++ */
++ vif->cab_queue = 0;
++
++ return 0;
++}
++
++static void mac80211_hwsim_remove_interface(
++ struct ieee80211_hw *hw, struct ieee80211_vif *vif)
++{
++ wiphy_debug(hw->wiphy, "%s (type=%d mac_addr=%pM)\n",
++ __func__, ieee80211_vif_type_p2p(vif),
++ vif->addr);
++ hwsim_check_magic(vif);
++ hwsim_clear_magic(vif);
++}
++
++static void mac80211_hwsim_tx_frame(struct ieee80211_hw *hw,
++ struct sk_buff *skb,
++ struct ieee80211_channel *chan)
++{
++ u32 _pid = ACCESS_ONCE(wmediumd_portid);
++
++ if (hw->flags & IEEE80211_HW_SUPPORTS_RC_TABLE) {
++ struct ieee80211_tx_info *txi = IEEE80211_SKB_CB(skb);
++ ieee80211_get_tx_rates(txi->control.vif, NULL, skb,
++ txi->control.rates,
++ ARRAY_SIZE(txi->control.rates));
++ }
++
++ mac80211_hwsim_monitor_rx(hw, skb, chan);
++
++ if (_pid)
++ return mac80211_hwsim_tx_frame_nl(hw, skb, _pid);
++
++ mac80211_hwsim_tx_frame_no_nl(hw, skb, chan);
++ dev_kfree_skb(skb);
++}
++
++static void mac80211_hwsim_beacon_tx(void *arg, u8 *mac,
++ struct ieee80211_vif *vif)
++{
++ struct mac80211_hwsim_data *data = arg;
++ struct ieee80211_hw *hw = data->hw;
++ struct ieee80211_tx_info *info;
++ struct ieee80211_rate *txrate;
++ struct ieee80211_mgmt *mgmt;
++ struct sk_buff *skb;
++
++ hwsim_check_magic(vif);
++
++ if (vif->type != NL80211_IFTYPE_AP &&
++ vif->type != NL80211_IFTYPE_MESH_POINT &&
++ vif->type != NL80211_IFTYPE_ADHOC)
++ return;
++
++ skb = ieee80211_beacon_get(hw, vif);
++ if (skb == NULL)
++ return;
++ info = IEEE80211_SKB_CB(skb);
++ if (hw->flags & IEEE80211_HW_SUPPORTS_RC_TABLE)
++ ieee80211_get_tx_rates(vif, NULL, skb,
++ info->control.rates,
++ ARRAY_SIZE(info->control.rates));
++
++ txrate = ieee80211_get_tx_rate(hw, info);
++
++ mgmt = (struct ieee80211_mgmt *) skb->data;
++ /* fake header transmission time */
++ data->abs_bcn_ts = mac80211_hwsim_get_tsf_raw();
++ mgmt->u.beacon.timestamp = cpu_to_le64(data->abs_bcn_ts +
++ data->tsf_offset +
++ 24 * 8 * 10 / txrate->bitrate);
++
++ mac80211_hwsim_tx_frame(hw, skb,
++ rcu_dereference(vif->chanctx_conf)->def.chan);
++}
++
++static enum hrtimer_restart
++mac80211_hwsim_beacon(struct hrtimer *timer)
++{
++ struct mac80211_hwsim_data *data =
++ container_of(timer, struct mac80211_hwsim_data,
++ beacon_timer.timer);
++ struct ieee80211_hw *hw = data->hw;
++ u64 bcn_int = data->beacon_int;
++ ktime_t next_bcn;
++
++ if (!data->started)
++ goto out;
++
++ ieee80211_iterate_active_interfaces_atomic(
++ hw, IEEE80211_IFACE_ITER_NORMAL,
++ mac80211_hwsim_beacon_tx, data);
++
++ /* beacon at new TBTT + beacon interval */
++ if (data->bcn_delta) {
++ bcn_int -= data->bcn_delta;
++ data->bcn_delta = 0;
++ }
++
++ next_bcn = ktime_add(hrtimer_get_expires(timer),
++ ns_to_ktime(bcn_int * 1000));
++ tasklet_hrtimer_start(&data->beacon_timer, next_bcn, HRTIMER_MODE_ABS);
++out:
++ return HRTIMER_NORESTART;
++}
++
++static const char * const hwsim_chanwidths[] = {
++ [NL80211_CHAN_WIDTH_20_NOHT] = "noht",
++ [NL80211_CHAN_WIDTH_20] = "ht20",
++ [NL80211_CHAN_WIDTH_40] = "ht40",
++ [NL80211_CHAN_WIDTH_80] = "vht80",
++ [NL80211_CHAN_WIDTH_80P80] = "vht80p80",
++ [NL80211_CHAN_WIDTH_160] = "vht160",
++};
++
++static int mac80211_hwsim_config(struct ieee80211_hw *hw, u32 changed)
++{
++ struct mac80211_hwsim_data *data = hw->priv;
++ struct ieee80211_conf *conf = &hw->conf;
++ static const char *smps_modes[IEEE80211_SMPS_NUM_MODES] = {
++ [IEEE80211_SMPS_AUTOMATIC] = "auto",
++ [IEEE80211_SMPS_OFF] = "off",
++ [IEEE80211_SMPS_STATIC] = "static",
++ [IEEE80211_SMPS_DYNAMIC] = "dynamic",
++ };
++
++ if (conf->chandef.chan)
++ wiphy_debug(hw->wiphy,
++ "%s (freq=%d(%d - %d)/%s idle=%d ps=%d smps=%s)\n",
++ __func__,
++ conf->chandef.chan->center_freq,
++ conf->chandef.center_freq1,
++ conf->chandef.center_freq2,
++ hwsim_chanwidths[conf->chandef.width],
++ !!(conf->flags & IEEE80211_CONF_IDLE),
++ !!(conf->flags & IEEE80211_CONF_PS),
++ smps_modes[conf->smps_mode]);
++ else
++ wiphy_debug(hw->wiphy,
++ "%s (freq=0 idle=%d ps=%d smps=%s)\n",
++ __func__,
++ !!(conf->flags & IEEE80211_CONF_IDLE),
++ !!(conf->flags & IEEE80211_CONF_PS),
++ smps_modes[conf->smps_mode]);
++
++ data->idle = !!(conf->flags & IEEE80211_CONF_IDLE);
++
++ data->channel = conf->chandef.chan;
++
++ WARN_ON(data->channel && data->channels > 1);
++
++ data->power_level = conf->power_level;
++ if (!data->started || !data->beacon_int)
++ tasklet_hrtimer_cancel(&data->beacon_timer);
++ else if (!hrtimer_is_queued(&data->beacon_timer.timer)) {
++ u64 tsf = mac80211_hwsim_get_tsf(hw, NULL);
++ u32 bcn_int = data->beacon_int;
++ u64 until_tbtt = bcn_int - do_div(tsf, bcn_int);
++
++ tasklet_hrtimer_start(&data->beacon_timer,
++ ns_to_ktime(until_tbtt * 1000),
++ HRTIMER_MODE_REL);
++ }
++
++ return 0;
++}
++
++
++static void mac80211_hwsim_configure_filter(struct ieee80211_hw *hw,
++ unsigned int changed_flags,
++ unsigned int *total_flags,u64 multicast)
++{
++ struct mac80211_hwsim_data *data = hw->priv;
++
++ wiphy_debug(hw->wiphy, "%s\n", __func__);
++
++ data->rx_filter = 0;
++ if (*total_flags & FIF_PROMISC_IN_BSS)
++ data->rx_filter |= FIF_PROMISC_IN_BSS;
++ if (*total_flags & FIF_ALLMULTI)
++ data->rx_filter |= FIF_ALLMULTI;
++
++ *total_flags = data->rx_filter;
++}
++
++static void mac80211_hwsim_bcn_en_iter(void *data, u8 *mac,
++ struct ieee80211_vif *vif)
++{
++ unsigned int *count = data;
++ struct hwsim_vif_priv *vp = (void *)vif->drv_priv;
++
++ if (vp->bcn_en)
++ (*count)++;
++}
++
++static void mac80211_hwsim_bss_info_changed(struct ieee80211_hw *hw,
++ struct ieee80211_vif *vif,
++ struct ieee80211_bss_conf *info,
++ u32 changed)
++{
++ struct hwsim_vif_priv *vp = (void *)vif->drv_priv;
++ struct mac80211_hwsim_data *data = hw->priv;
++
++ hwsim_check_magic(vif);
++
++ wiphy_debug(hw->wiphy, "%s(changed=0x%x vif->addr=%pM)\n",
++ __func__, changed, vif->addr);
++
++ if (changed & BSS_CHANGED_BSSID) {
++ wiphy_debug(hw->wiphy, "%s: BSSID changed: %pM\n",
++ __func__, info->bssid);
++ memcpy(vp->bssid, info->bssid, ETH_ALEN);
++ }
++
++ if (changed & BSS_CHANGED_ASSOC) {
++ wiphy_debug(hw->wiphy, " ASSOC: assoc=%d aid=%d\n",
++ info->assoc, info->aid);
++ vp->assoc = info->assoc;
++ vp->aid = info->aid;
++ }
++
++ if (changed & BSS_CHANGED_BEACON_INT) {
++ wiphy_debug(hw->wiphy, " BCNINT: %d\n", info->beacon_int);
++ data->beacon_int = info->beacon_int * 1024;
++ }
++
++ if (changed & BSS_CHANGED_BEACON_ENABLED) {
++ wiphy_debug(hw->wiphy, " BCN EN: %d\n", info->enable_beacon);
++ vp->bcn_en = info->enable_beacon;
++ if (data->started &&
++ !hrtimer_is_queued(&data->beacon_timer.timer) &&
++ info->enable_beacon) {
++ u64 tsf, until_tbtt;
++ u32 bcn_int;
++ if (WARN_ON(!data->beacon_int))
++ data->beacon_int = 1000 * 1024;
++ tsf = mac80211_hwsim_get_tsf(hw, vif);
++ bcn_int = data->beacon_int;
++ until_tbtt = bcn_int - do_div(tsf, bcn_int);
++ tasklet_hrtimer_start(&data->beacon_timer,
++ ns_to_ktime(until_tbtt * 1000),
++ HRTIMER_MODE_REL);
++ } else if (!info->enable_beacon) {
++ unsigned int count = 0;
++ ieee80211_iterate_active_interfaces_atomic(
++ data->hw, IEEE80211_IFACE_ITER_NORMAL,
++ mac80211_hwsim_bcn_en_iter, &count);
++ wiphy_debug(hw->wiphy, " beaconing vifs remaining: %u",
++ count);
++ if (count == 0)
++ tasklet_hrtimer_cancel(&data->beacon_timer);
++ }
++ }
++
++ if (changed & BSS_CHANGED_ERP_CTS_PROT) {
++ wiphy_debug(hw->wiphy, " ERP_CTS_PROT: %d\n",
++ info->use_cts_prot);
++ }
++
++ if (changed & BSS_CHANGED_ERP_PREAMBLE) {
++ wiphy_debug(hw->wiphy, " ERP_PREAMBLE: %d\n",
++ info->use_short_preamble);
++ }
++
++ if (changed & BSS_CHANGED_ERP_SLOT) {
++ wiphy_debug(hw->wiphy, " ERP_SLOT: %d\n", info->use_short_slot);
++ }
++
++ if (changed & BSS_CHANGED_HT) {
++ wiphy_debug(hw->wiphy, " HT: op_mode=0x%x\n",
++ info->ht_operation_mode);
++ }
++
++ if (changed & BSS_CHANGED_BASIC_RATES) {
++ wiphy_debug(hw->wiphy, " BASIC_RATES: 0x%llx\n",
++ (unsigned long long) info->basic_rates);
++ }
++
++ if (changed & BSS_CHANGED_TXPOWER)
++ wiphy_debug(hw->wiphy, " TX Power: %d dBm\n", info->txpower);
++}
++
++static int mac80211_hwsim_sta_add(struct ieee80211_hw *hw,
++ struct ieee80211_vif *vif,
++ struct ieee80211_sta *sta)
++{
++ hwsim_check_magic(vif);
++ hwsim_set_sta_magic(sta);
++
++ return 0;
++}
++
++static int mac80211_hwsim_sta_remove(struct ieee80211_hw *hw,
++ struct ieee80211_vif *vif,
++ struct ieee80211_sta *sta)
++{
++ hwsim_check_magic(vif);
++ hwsim_clear_sta_magic(sta);
++
++ return 0;
++}
++
++static void mac80211_hwsim_sta_notify(struct ieee80211_hw *hw,
++ struct ieee80211_vif *vif,
++ enum sta_notify_cmd cmd,
++ struct ieee80211_sta *sta)
++{
++ hwsim_check_magic(vif);
++
++ switch (cmd) {
++ case STA_NOTIFY_SLEEP:
++ case STA_NOTIFY_AWAKE:
++ /* TODO: make good use of these flags */
++ break;
++ default:
++ WARN(1, "Invalid sta notify: %d\n", cmd);
++ break;
++ }
++}
++
++static int mac80211_hwsim_set_tim(struct ieee80211_hw *hw,
++ struct ieee80211_sta *sta,
++ bool set)
++{
++ hwsim_check_sta_magic(sta);
++ return 0;
++}
++
++static int mac80211_hwsim_conf_tx(
++ struct ieee80211_hw *hw,
++ struct ieee80211_vif *vif, u16 queue,
++ const struct ieee80211_tx_queue_params *params)
++{
++ wiphy_debug(hw->wiphy,
++ "%s (queue=%d txop=%d cw_min=%d cw_max=%d aifs=%d)\n",
++ __func__, queue,
++ params->txop, params->cw_min,
++ params->cw_max, params->aifs);
++ return 0;
++}
++
++static int mac80211_hwsim_get_survey(
++ struct ieee80211_hw *hw, int idx,
++ struct survey_info *survey)
++{
++ struct ieee80211_conf *conf = &hw->conf;
++
++ wiphy_debug(hw->wiphy, "%s (idx=%d)\n", __func__, idx);
++
++ if (idx != 0)
++ return -ENOENT;
++
++ /* Current channel */
++ survey->channel = conf->chandef.chan;
++
++ /*
++ * Magically conjured noise level --- this is only ok for simulated hardware.
++ *
++ * A real driver which cannot determine the real channel noise MUST NOT
++ * report any noise, especially not a magically conjured one :-)
++ */
++ survey->filled = SURVEY_INFO_NOISE_DBM;
++ survey->noise = -92;
++
++ return 0;
++}
++
++#ifdef CONFIG_NL80211_TESTMODE
++/*
++ * This section contains example code for using netlink
++ * attributes with the testmode command in nl80211.
++ */
++
++/* These enums need to be kept in sync with userspace */
++enum hwsim_testmode_attr {
++ __HWSIM_TM_ATTR_INVALID = 0,
++ HWSIM_TM_ATTR_CMD = 1,
++ HWSIM_TM_ATTR_PS = 2,
++
++ /* keep last */
++ __HWSIM_TM_ATTR_AFTER_LAST,
++ HWSIM_TM_ATTR_MAX = __HWSIM_TM_ATTR_AFTER_LAST - 1
++};
++
++enum hwsim_testmode_cmd {
++ HWSIM_TM_CMD_SET_PS = 0,
++ HWSIM_TM_CMD_GET_PS = 1,
++ HWSIM_TM_CMD_STOP_QUEUES = 2,
++ HWSIM_TM_CMD_WAKE_QUEUES = 3,
++};
++
++static const struct nla_policy hwsim_testmode_policy[HWSIM_TM_ATTR_MAX + 1] = {
++ [HWSIM_TM_ATTR_CMD] = { .type = NLA_U32 },
++ [HWSIM_TM_ATTR_PS] = { .type = NLA_U32 },
++};
++
++static int mac80211_hwsim_testmode_cmd(struct ieee80211_hw *hw,
++ struct ieee80211_vif *vif,
++ void *data, int len)
++{
++ struct mac80211_hwsim_data *hwsim = hw->priv;
++ struct nlattr *tb[HWSIM_TM_ATTR_MAX + 1];
++ struct sk_buff *skb;
++ int err, ps;
++
++ err = nla_parse(tb, HWSIM_TM_ATTR_MAX, data, len,
++ hwsim_testmode_policy);
++ if (err)
++ return err;
++
++ if (!tb[HWSIM_TM_ATTR_CMD])
++ return -EINVAL;
++
++ switch (nla_get_u32(tb[HWSIM_TM_ATTR_CMD])) {
++ case HWSIM_TM_CMD_SET_PS:
++ if (!tb[HWSIM_TM_ATTR_PS])
++ return -EINVAL;
++ ps = nla_get_u32(tb[HWSIM_TM_ATTR_PS]);
++ return hwsim_fops_ps_write(hwsim, ps);
++ case HWSIM_TM_CMD_GET_PS:
++ skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy,
++ nla_total_size(sizeof(u32)));
++ if (!skb)
++ return -ENOMEM;
++ if (nla_put_u32(skb, HWSIM_TM_ATTR_PS, hwsim->ps))
++ goto nla_put_failure;
++ return cfg80211_testmode_reply(skb);
++ case HWSIM_TM_CMD_STOP_QUEUES:
++ ieee80211_stop_queues(hw);
++ return 0;
++ case HWSIM_TM_CMD_WAKE_QUEUES:
++ ieee80211_wake_queues(hw);
++ return 0;
++ default:
++ return -EOPNOTSUPP;
++ }
++
++ nla_put_failure:
++ kfree_skb(skb);
++ return -ENOBUFS;
++}
++#endif
++
++static int mac80211_hwsim_ampdu_action(struct ieee80211_hw *hw,
++ struct ieee80211_vif *vif,
++ enum ieee80211_ampdu_mlme_action action,
++ struct ieee80211_sta *sta, u16 tid, u16 *ssn,
++ u8 buf_size)
++{
++ switch (action) {
++ case IEEE80211_AMPDU_TX_START:
++ ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
++ break;
++ case IEEE80211_AMPDU_TX_STOP_CONT:
++ case IEEE80211_AMPDU_TX_STOP_FLUSH:
++ case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
++ ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
++ break;
++ case IEEE80211_AMPDU_TX_OPERATIONAL:
++ break;
++ case IEEE80211_AMPDU_RX_START:
++ case IEEE80211_AMPDU_RX_STOP:
++ break;
++ default:
++ return -EOPNOTSUPP;
++ }
++
++ return 0;
++}
++
++static void mac80211_hwsim_flush(struct ieee80211_hw *hw,
++ struct ieee80211_vif *vif,
++ u32 queues, bool drop)
++{
++ /* Not implemented, queues only on kernel side */
++}
++
++static void hw_scan_work(struct work_struct *work)
++{
++ struct mac80211_hwsim_data *hwsim =
++ container_of(work, struct mac80211_hwsim_data, hw_scan.work);
++ struct cfg80211_scan_request *req = hwsim->hw_scan_request;
++ int dwell, i;
++
++ mutex_lock(&hwsim->mutex);
++ if (hwsim->scan_chan_idx >= req->n_channels) {
++ wiphy_debug(hwsim->hw->wiphy, "hw scan complete\n");
++ ieee80211_scan_completed(hwsim->hw, false);
++ hwsim->hw_scan_request = NULL;
++ hwsim->hw_scan_vif = NULL;
++ hwsim->tmp_chan = NULL;
++ mutex_unlock(&hwsim->mutex);
++ return;
++ }
++
++ wiphy_debug(hwsim->hw->wiphy, "hw scan %d MHz\n",
++ req->channels[hwsim->scan_chan_idx]->center_freq);
++
++ hwsim->tmp_chan = req->channels[hwsim->scan_chan_idx];
++ if (hwsim->tmp_chan->flags & IEEE80211_CHAN_NO_IR ||
++ !req->n_ssids) {
++ dwell = 120;
++ } else {
++ dwell = 30;
++ /* send probes */
++ for (i = 0; i < req->n_ssids; i++) {
++ struct sk_buff *probe;
++
++ probe = ieee80211_probereq_get(hwsim->hw,
++ hwsim->hw_scan_vif,
++ req->ssids[i].ssid,
++ req->ssids[i].ssid_len,
++ req->ie_len);
++ if (!probe)
++ continue;
++
++ if (req->ie_len)
++ memcpy(skb_put(probe, req->ie_len), req->ie,
++ req->ie_len);
++
++ local_bh_disable();
++ mac80211_hwsim_tx_frame(hwsim->hw, probe,
++ hwsim->tmp_chan);
++ local_bh_enable();
++ }
++ }
++ ieee80211_queue_delayed_work(hwsim->hw, &hwsim->hw_scan,
++ msecs_to_jiffies(dwell));
++ hwsim->scan_chan_idx++;
++ mutex_unlock(&hwsim->mutex);
++}
++
++static int mac80211_hwsim_hw_scan(struct ieee80211_hw *hw,
++ struct ieee80211_vif *vif,
++ struct cfg80211_scan_request *req)
++{
++ struct mac80211_hwsim_data *hwsim = hw->priv;
++
++ mutex_lock(&hwsim->mutex);
++ if (WARN_ON(hwsim->tmp_chan || hwsim->hw_scan_request)) {
++ mutex_unlock(&hwsim->mutex);
++ return -EBUSY;
++ }
++ hwsim->hw_scan_request = req;
++ hwsim->hw_scan_vif = vif;
++ hwsim->scan_chan_idx = 0;
++ mutex_unlock(&hwsim->mutex);
++
++ wiphy_debug(hw->wiphy, "hwsim hw_scan request\n");
++
++ ieee80211_queue_delayed_work(hwsim->hw, &hwsim->hw_scan, 0);
++
++ return 0;
++}
++
++static void mac80211_hwsim_cancel_hw_scan(struct ieee80211_hw *hw,
++ struct ieee80211_vif *vif)
++{
++ struct mac80211_hwsim_data *hwsim = hw->priv;
++
++ wiphy_debug(hw->wiphy, "hwsim cancel_hw_scan\n");
++
++ cancel_delayed_work_sync(&hwsim->hw_scan);
++
++ mutex_lock(&hwsim->mutex);
++ ieee80211_scan_completed(hwsim->hw, true);
++ hwsim->tmp_chan = NULL;
++ hwsim->hw_scan_request = NULL;
++ hwsim->hw_scan_vif = NULL;
++ mutex_unlock(&hwsim->mutex);
++}
++
++static void mac80211_hwsim_sw_scan(struct ieee80211_hw *hw)
++{
++ struct mac80211_hwsim_data *hwsim = hw->priv;
++
++ mutex_lock(&hwsim->mutex);
++
++ if (hwsim->scanning) {
++ printk(KERN_DEBUG "two hwsim sw_scans detected!\n");
++ goto out;
++ }
++
++ printk(KERN_DEBUG "hwsim sw_scan request, prepping stuff\n");
++ hwsim->scanning = true;
++
++out:
++ mutex_unlock(&hwsim->mutex);
++}
++
++static void mac80211_hwsim_sw_scan_complete(struct ieee80211_hw *hw)
++{
++ struct mac80211_hwsim_data *hwsim = hw->priv;
++
++ mutex_lock(&hwsim->mutex);
++
++ printk(KERN_DEBUG "hwsim sw_scan_complete\n");
++ hwsim->scanning = false;
++
++ mutex_unlock(&hwsim->mutex);
++}
++
++static void hw_roc_done(struct work_struct *work)
++{
++ struct mac80211_hwsim_data *hwsim =
++ container_of(work, struct mac80211_hwsim_data, roc_done.work);
++
++ mutex_lock(&hwsim->mutex);
++ ieee80211_remain_on_channel_expired(hwsim->hw);
++ hwsim->tmp_chan = NULL;
++ mutex_unlock(&hwsim->mutex);
++
++ wiphy_debug(hwsim->hw->wiphy, "hwsim ROC expired\n");
++}
++
++static int mac80211_hwsim_roc(struct ieee80211_hw *hw,
++ struct ieee80211_vif *vif,
++ struct ieee80211_channel *chan,
++ int duration,
++ enum ieee80211_roc_type type)
++{
++ struct mac80211_hwsim_data *hwsim = hw->priv;
++
++ mutex_lock(&hwsim->mutex);
++ if (WARN_ON(hwsim->tmp_chan || hwsim->hw_scan_request)) {
++ mutex_unlock(&hwsim->mutex);
++ return -EBUSY;
++ }
++
++ hwsim->tmp_chan = chan;
++ mutex_unlock(&hwsim->mutex);
++
++ wiphy_debug(hw->wiphy, "hwsim ROC (%d MHz, %d ms)\n",
++ chan->center_freq, duration);
++
++ ieee80211_ready_on_channel(hw);
++
++ ieee80211_queue_delayed_work(hw, &hwsim->roc_done,
++ msecs_to_jiffies(duration));
++ return 0;
++}
++
++static int mac80211_hwsim_croc(struct ieee80211_hw *hw)
++{
++ struct mac80211_hwsim_data *hwsim = hw->priv;
++
++ cancel_delayed_work_sync(&hwsim->roc_done);
++
++ mutex_lock(&hwsim->mutex);
++ hwsim->tmp_chan = NULL;
++ mutex_unlock(&hwsim->mutex);
++
++ wiphy_debug(hw->wiphy, "hwsim ROC canceled\n");
++
++ return 0;
++}
++
++static int mac80211_hwsim_add_chanctx(struct ieee80211_hw *hw,
++ struct ieee80211_chanctx_conf *ctx)
++{
++ hwsim_set_chanctx_magic(ctx);
++ wiphy_debug(hw->wiphy,
++ "add channel context control: %d MHz/width: %d/cfreqs:%d/%d MHz\n",
++ ctx->def.chan->center_freq, ctx->def.width,
++ ctx->def.center_freq1, ctx->def.center_freq2);
++ return 0;
++}
++
++static void mac80211_hwsim_remove_chanctx(struct ieee80211_hw *hw,
++ struct ieee80211_chanctx_conf *ctx)
++{
++ wiphy_debug(hw->wiphy,
++ "remove channel context control: %d MHz/width: %d/cfreqs:%d/%d MHz\n",
++ ctx->def.chan->center_freq, ctx->def.width,
++ ctx->def.center_freq1, ctx->def.center_freq2);
++ hwsim_check_chanctx_magic(ctx);
++ hwsim_clear_chanctx_magic(ctx);
++}
++
++static void mac80211_hwsim_change_chanctx(struct ieee80211_hw *hw,
++ struct ieee80211_chanctx_conf *ctx,
++ u32 changed)
++{
++ hwsim_check_chanctx_magic(ctx);
++ wiphy_debug(hw->wiphy,
++ "change channel context control: %d MHz/width: %d/cfreqs:%d/%d MHz\n",
++ ctx->def.chan->center_freq, ctx->def.width,
++ ctx->def.center_freq1, ctx->def.center_freq2);
++}
++
++static int mac80211_hwsim_assign_vif_chanctx(struct ieee80211_hw *hw,
++ struct ieee80211_vif *vif,
++ struct ieee80211_chanctx_conf *ctx)
++{
++ hwsim_check_magic(vif);
++ hwsim_check_chanctx_magic(ctx);
++
++ return 0;
++}
++
++static void mac80211_hwsim_unassign_vif_chanctx(struct ieee80211_hw *hw,
++ struct ieee80211_vif *vif,
++ struct ieee80211_chanctx_conf *ctx)
++{
++ hwsim_check_magic(vif);
++ hwsim_check_chanctx_magic(ctx);
++}
++
++static const struct ieee80211_ops mac80211_hwsim_ops = {
++ .tx = mac80211_hwsim_tx,
++ .start = mac80211_hwsim_start,
++ .stop = mac80211_hwsim_stop,
++ .add_interface = mac80211_hwsim_add_interface,
++ .change_interface = mac80211_hwsim_change_interface,
++ .remove_interface = mac80211_hwsim_remove_interface,
++ .config = mac80211_hwsim_config,
++ .configure_filter = mac80211_hwsim_configure_filter,
++ .bss_info_changed = mac80211_hwsim_bss_info_changed,
++ .sta_add = mac80211_hwsim_sta_add,
++ .sta_remove = mac80211_hwsim_sta_remove,
++ .sta_notify = mac80211_hwsim_sta_notify,
++ .set_tim = mac80211_hwsim_set_tim,
++ .conf_tx = mac80211_hwsim_conf_tx,
++ .get_survey = mac80211_hwsim_get_survey,
++ CFG80211_TESTMODE_CMD(mac80211_hwsim_testmode_cmd)
++ .ampdu_action = mac80211_hwsim_ampdu_action,
++ .sw_scan_start = mac80211_hwsim_sw_scan,
++ .sw_scan_complete = mac80211_hwsim_sw_scan_complete,
++ .flush = mac80211_hwsim_flush,
++ .get_tsf = mac80211_hwsim_get_tsf,
++ .set_tsf = mac80211_hwsim_set_tsf,
++};
++
++static struct ieee80211_ops mac80211_hwsim_mchan_ops;
++
++static int mac80211_hwsim_create_radio(int channels, const char *reg_alpha2,
++ const struct ieee80211_regdomain *regd,
++ bool reg_strict)
++{
++ int err;
++ u8 addr[ETH_ALEN];
++ struct mac80211_hwsim_data *data;
++ struct ieee80211_hw *hw;
++ enum ieee80211_band band;
++ const struct ieee80211_ops *ops = &mac80211_hwsim_ops;
++ int idx;
++
++ spin_lock_bh(&hwsim_radio_lock);
++ idx = hwsim_radio_idx++;
++ spin_unlock_bh(&hwsim_radio_lock);
++
++ if (channels > 1)
++ ops = &mac80211_hwsim_mchan_ops;
++ hw = ieee80211_alloc_hw(sizeof(*data), ops);
++ if (!hw) {
++ printk(KERN_DEBUG "mac80211_hwsim: ieee80211_alloc_hw failed\n");
++ err = -ENOMEM;
++ goto failed;
++ }
++ data = hw->priv;
++ data->hw = hw;
++
++ data->dev = device_create(hwsim_class, NULL, 0, hw, "hwsim%d", idx);
++ if (IS_ERR(data->dev)) {
++ printk(KERN_DEBUG
++ "mac80211_hwsim: device_create failed (%ld)\n",
++ PTR_ERR(data->dev));
++ err = -ENOMEM;
++ goto failed_drvdata;
++ }
++ data->dev->driver = &mac80211_hwsim_driver.driver;
++ err = device_bind_driver(data->dev);
++ if (err != 0) {
++ printk(KERN_DEBUG "mac80211_hwsim: device_bind_driver failed (%d)\n",
++ err);
++ goto failed_hw;
++ }
++
++ skb_queue_head_init(&data->pending);
++
++ SET_IEEE80211_DEV(hw, data->dev);
++ memset(addr, 0, ETH_ALEN);
++ addr[0] = 0x02;
++ addr[3] = idx >> 8;
++ addr[4] = idx;
++ memcpy(data->addresses[0].addr, addr, ETH_ALEN);
++ memcpy(data->addresses[1].addr, addr, ETH_ALEN);
++ data->addresses[1].addr[0] |= 0x40;
++ hw->wiphy->n_addresses = 2;
++ hw->wiphy->addresses = data->addresses;
++
++ data->channels = channels;
++ data->idx = idx;
++
++ if (data->channels > 1) {
++ hw->wiphy->max_scan_ssids = 255;
++ hw->wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN;
++ hw->wiphy->max_remain_on_channel_duration = 1000;
++ /* For channels > 1 DFS is not allowed */
++ hw->wiphy->n_iface_combinations = 1;
++ hw->wiphy->iface_combinations = &data->if_combination;
++ data->if_combination = hwsim_if_comb[0];
++ data->if_combination.num_different_channels = data->channels;
++ } else {
++ hw->wiphy->iface_combinations = hwsim_if_comb;
++ hw->wiphy->n_iface_combinations = ARRAY_SIZE(hwsim_if_comb);
++ }
++
++ INIT_DELAYED_WORK(&data->roc_done, hw_roc_done);
++ INIT_DELAYED_WORK(&data->hw_scan, hw_scan_work);
++
++ hw->queues = 5;
++ hw->offchannel_tx_hw_queue = 4;
++ hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
++ BIT(NL80211_IFTYPE_AP) |
++ BIT(NL80211_IFTYPE_P2P_CLIENT) |
++ BIT(NL80211_IFTYPE_P2P_GO) |
++ BIT(NL80211_IFTYPE_ADHOC) |
++ BIT(NL80211_IFTYPE_MESH_POINT) |
++ BIT(NL80211_IFTYPE_P2P_DEVICE);
++
++ hw->flags = IEEE80211_HW_MFP_CAPABLE |
++ IEEE80211_HW_SIGNAL_DBM |
++ IEEE80211_HW_SUPPORTS_STATIC_SMPS |
++ IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
++ IEEE80211_HW_AMPDU_AGGREGATION |
++ IEEE80211_HW_WANT_MONITOR_VIF |
++ IEEE80211_HW_QUEUE_CONTROL |
++ IEEE80211_HW_SUPPORTS_HT_CCK_RATES;
++ if (rctbl)
++ hw->flags |= IEEE80211_HW_SUPPORTS_RC_TABLE;
++
++ hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS |
++ WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
++ WIPHY_FLAG_AP_UAPSD;
++ hw->wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR;
++
++ /* ask mac80211 to reserve space for magic */
++ hw->vif_data_size = sizeof(struct hwsim_vif_priv);
++ hw->sta_data_size = sizeof(struct hwsim_sta_priv);
++ hw->chanctx_data_size = sizeof(struct hwsim_chanctx_priv);
++
++ memcpy(data->channels_2ghz, hwsim_channels_2ghz,
++ sizeof(hwsim_channels_2ghz));
++ memcpy(data->channels_5ghz, hwsim_channels_5ghz,
++ sizeof(hwsim_channels_5ghz));
++ memcpy(data->rates, hwsim_rates, sizeof(hwsim_rates));
++
++ for (band = IEEE80211_BAND_2GHZ; band < IEEE80211_NUM_BANDS; band++) {
++ struct ieee80211_supported_band *sband = &data->bands[band];
++ switch (band) {
++ case IEEE80211_BAND_2GHZ:
++ sband->channels = data->channels_2ghz;
++ sband->n_channels = ARRAY_SIZE(hwsim_channels_2ghz);
++ sband->bitrates = data->rates;
++ sband->n_bitrates = ARRAY_SIZE(hwsim_rates);
++ break;
++ case IEEE80211_BAND_5GHZ:
++ sband->channels = data->channels_5ghz;
++ sband->n_channels = ARRAY_SIZE(hwsim_channels_5ghz);
++ sband->bitrates = data->rates + 4;
++ sband->n_bitrates = ARRAY_SIZE(hwsim_rates) - 4;
++ break;
++ default:
++ continue;
++ }
++
++ sband->ht_cap.ht_supported = true;
++ sband->ht_cap.cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
++ IEEE80211_HT_CAP_GRN_FLD |
++ IEEE80211_HT_CAP_SGI_40 |
++ IEEE80211_HT_CAP_DSSSCCK40;
++ sband->ht_cap.ampdu_factor = 0x3;
++ sband->ht_cap.ampdu_density = 0x6;
++ memset(&sband->ht_cap.mcs, 0,
++ sizeof(sband->ht_cap.mcs));
++ sband->ht_cap.mcs.rx_mask[0] = 0xff;
++ sband->ht_cap.mcs.rx_mask[1] = 0xff;
++ sband->ht_cap.mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
++
++ hw->wiphy->bands[band] = sband;
++
++ sband->vht_cap.vht_supported = true;
++ sband->vht_cap.cap =
++ IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 |
++ IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ |
++ IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ |
++ IEEE80211_VHT_CAP_RXLDPC |
++ IEEE80211_VHT_CAP_SHORT_GI_80 |
++ IEEE80211_VHT_CAP_SHORT_GI_160 |
++ IEEE80211_VHT_CAP_TXSTBC |
++ IEEE80211_VHT_CAP_RXSTBC_1 |
++ IEEE80211_VHT_CAP_RXSTBC_2 |
++ IEEE80211_VHT_CAP_RXSTBC_3 |
++ IEEE80211_VHT_CAP_RXSTBC_4 |
++ IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
++ sband->vht_cap.vht_mcs.rx_mcs_map =
++ cpu_to_le16(IEEE80211_VHT_MCS_SUPPORT_0_8 << 0 |
++ IEEE80211_VHT_MCS_SUPPORT_0_8 << 2 |
++ IEEE80211_VHT_MCS_SUPPORT_0_9 << 4 |
++ IEEE80211_VHT_MCS_SUPPORT_0_8 << 6 |
++ IEEE80211_VHT_MCS_SUPPORT_0_8 << 8 |
++ IEEE80211_VHT_MCS_SUPPORT_0_9 << 10 |
++ IEEE80211_VHT_MCS_SUPPORT_0_9 << 12 |
++ IEEE80211_VHT_MCS_SUPPORT_0_8 << 14);
++ sband->vht_cap.vht_mcs.tx_mcs_map =
++ sband->vht_cap.vht_mcs.rx_mcs_map;
++ }
++
++ /* By default all radios belong to the first group */
++ data->group = 1;
++ mutex_init(&data->mutex);
++
++ /* Enable frame retransmissions for lossy channels */
++ hw->max_rates = 4;
++ hw->max_rate_tries = 11;
++
++ if (reg_strict)
++ hw->wiphy->regulatory_flags |= REGULATORY_STRICT_REG;
++ if (regd) {
++ hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG;
++ wiphy_apply_custom_regulatory(hw->wiphy, regd);
++ /* give the regulatory workqueue a chance to run */
++ schedule_timeout_interruptible(1);
++ }
++
++ err = ieee80211_register_hw(hw);
++ if (err < 0) {
++ printk(KERN_DEBUG "mac80211_hwsim: ieee80211_register_hw failed (%d)\n",
++ err);
++ goto failed_hw;
++ }
++
++ wiphy_debug(hw->wiphy, "hwaddr %pM registered\n", hw->wiphy->perm_addr);
++
++ if (reg_alpha2)
++ regulatory_hint(hw->wiphy, reg_alpha2);
++
++ data->debugfs = debugfs_create_dir("hwsim", hw->wiphy->debugfsdir);
++ debugfs_create_file("ps", 0666, data->debugfs, data, &hwsim_fops_ps);
++ debugfs_create_file("group", 0666, data->debugfs, data,
++ &hwsim_fops_group);
++ if (data->channels == 1)
++ debugfs_create_file("dfs_simulate_radar", 0222,
++ data->debugfs,
++ data, &hwsim_simulate_radar);
++
++ tasklet_hrtimer_init(&data->beacon_timer,
++ mac80211_hwsim_beacon,
++ CLOCK_MONOTONIC_RAW, HRTIMER_MODE_ABS);
++
++ spin_lock_bh(&hwsim_radio_lock);
++ list_add_tail(&data->list, &hwsim_radios);
++ spin_unlock_bh(&hwsim_radio_lock);
++
++ return idx;
++
++failed_hw:
++ device_unregister(data->dev);
++failed_drvdata:
++ ieee80211_free_hw(hw);
++failed:
++ return err;
++}
++
++static void mac80211_hwsim_destroy_radio(struct mac80211_hwsim_data *data)
++{
++ debugfs_remove_recursive(data->debugfs);
++ ieee80211_unregister_hw(data->hw);
++ device_release_driver(data->dev);
++ device_unregister(data->dev);
++ ieee80211_free_hw(data->hw);
++}
++
++static void mac80211_hwsim_free(void)
++{
++ struct mac80211_hwsim_data *data;
++
++ spin_lock_bh(&hwsim_radio_lock);
++ while ((data = list_first_entry_or_null(&hwsim_radios,
++ struct mac80211_hwsim_data,
++ list))) {
++ list_del(&data->list);
++ spin_unlock_bh(&hwsim_radio_lock);
++ mac80211_hwsim_destroy_radio(data);
++ spin_lock_bh(&hwsim_radio_lock);
++ }
++ spin_unlock_bh(&hwsim_radio_lock);
++ class_destroy(hwsim_class);
++}
++
++static const struct net_device_ops hwsim_netdev_ops = {
++ .ndo_start_xmit = hwsim_mon_xmit,
++ .ndo_change_mtu = eth_change_mtu,
++ .ndo_set_mac_address = eth_mac_addr,
++ .ndo_validate_addr = eth_validate_addr,
++};
++
++static void hwsim_mon_setup(struct net_device *dev)
++{
++ dev->netdev_ops = &hwsim_netdev_ops;
++ dev->destructor = free_netdev;
++ ether_setup(dev);
++ dev->tx_queue_len = 0;
++ dev->type = ARPHRD_IEEE80211_RADIOTAP;
++ memset(dev->dev_addr, 0, ETH_ALEN);
++ dev->dev_addr[0] = 0x12;
++}
++
++static struct mac80211_hwsim_data *get_hwsim_data_ref_from_addr(const u8 *addr)
++{
++ struct mac80211_hwsim_data *data;
++ bool _found = false;
++
++ spin_lock_bh(&hwsim_radio_lock);
++ list_for_each_entry(data, &hwsim_radios, list) {
++ if (memcmp(data->addresses[1].addr, addr, ETH_ALEN) == 0) {
++ _found = true;
++ break;
++ }
++ }
++ spin_unlock_bh(&hwsim_radio_lock);
++
++ if (!_found)
++ return NULL;
++
++ return data;
++}
++
++static int hwsim_tx_info_frame_received_nl(struct sk_buff *skb_2,
++ struct genl_info *info)
++{
++
++ struct ieee80211_hdr *hdr;
++ struct mac80211_hwsim_data *data2;
++ struct ieee80211_tx_info *txi;
++ struct hwsim_tx_rate *tx_attempts;
++ unsigned long ret_skb_ptr;
++ struct sk_buff *skb, *tmp;
++ const u8 *src;
++ unsigned int hwsim_flags;
++ int i;
++ bool found = false;
++
++ if (info->snd_portid != wmediumd_portid)
++ return -EINVAL;
++
++ if (!info->attrs[HWSIM_ATTR_ADDR_TRANSMITTER] ||
++ !info->attrs[HWSIM_ATTR_FLAGS] ||
++ !info->attrs[HWSIM_ATTR_COOKIE] ||
++ !info->attrs[HWSIM_ATTR_TX_INFO])
++ goto out;
++
++ src = (void *)nla_data(info->attrs[HWSIM_ATTR_ADDR_TRANSMITTER]);
++ hwsim_flags = nla_get_u32(info->attrs[HWSIM_ATTR_FLAGS]);
++ ret_skb_ptr = nla_get_u64(info->attrs[HWSIM_ATTR_COOKIE]);
++
++ data2 = get_hwsim_data_ref_from_addr(src);
++ if (!data2)
++ goto out;
++
++ /* look for the skb matching the cookie passed back from user */
++ skb_queue_walk_safe(&data2->pending, skb, tmp) {
++ if ((unsigned long)skb == ret_skb_ptr) {
++ skb_unlink(skb, &data2->pending);
++ found = true;
++ break;
++ }
++ }
++
++ /* not found */
++ if (!found)
++ goto out;
++
++ /* Tx info received because the frame was broadcasted on user space,
++ so we get all the necessary info: tx attempts and skb control buff */
++
++ tx_attempts = (struct hwsim_tx_rate *)nla_data(
++ info->attrs[HWSIM_ATTR_TX_INFO]);
++
++ /* now send back TX status */
++ txi = IEEE80211_SKB_CB(skb);
++
++ ieee80211_tx_info_clear_status(txi);
++
++ for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
++ txi->status.rates[i].idx = tx_attempts[i].idx;
++ txi->status.rates[i].count = tx_attempts[i].count;
++ /*txi->status.rates[i].flags = 0;*/
++ }
++
++ txi->status.ack_signal = nla_get_u32(info->attrs[HWSIM_ATTR_SIGNAL]);
++
++ if (!(hwsim_flags & HWSIM_TX_CTL_NO_ACK) &&
++ (hwsim_flags & HWSIM_TX_STAT_ACK)) {
++ if (skb->len >= 16) {
++ hdr = (struct ieee80211_hdr *) skb->data;
++ mac80211_hwsim_monitor_ack(data2->channel,
++ hdr->addr2);
++ }
++ txi->flags |= IEEE80211_TX_STAT_ACK;
++ }
++ ieee80211_tx_status_irqsafe(data2->hw, skb);
++ return 0;
++out:
++ return -EINVAL;
++
++}
++
++static int hwsim_cloned_frame_received_nl(struct sk_buff *skb_2,
++ struct genl_info *info)
++{
++
++ struct mac80211_hwsim_data *data2;
++ struct ieee80211_rx_status rx_status;
++ const u8 *dst;
++ int frame_data_len;
++ void *frame_data;
++ struct sk_buff *skb = NULL;
++
++ if (info->snd_portid != wmediumd_portid)
++ return -EINVAL;
++
++ if (!info->attrs[HWSIM_ATTR_ADDR_RECEIVER] ||
++ !info->attrs[HWSIM_ATTR_FRAME] ||
++ !info->attrs[HWSIM_ATTR_RX_RATE] ||
++ !info->attrs[HWSIM_ATTR_SIGNAL])
++ goto out;
++
++ dst = (void *)nla_data(info->attrs[HWSIM_ATTR_ADDR_RECEIVER]);
++ frame_data_len = nla_len(info->attrs[HWSIM_ATTR_FRAME]);
++ frame_data = (void *)nla_data(info->attrs[HWSIM_ATTR_FRAME]);
++
++ /* Allocate new skb here */
++ skb = alloc_skb(frame_data_len, GFP_KERNEL);
++ if (skb == NULL)
++ goto err;
++
++ if (frame_data_len > IEEE80211_MAX_DATA_LEN)
++ goto err;
++
++ /* Copy the data */
++ memcpy(skb_put(skb, frame_data_len), frame_data, frame_data_len);
++
++ data2 = get_hwsim_data_ref_from_addr(dst);
++ if (!data2)
++ goto out;
++
++ /* check if radio is configured properly */
++
++ if (data2->idle || !data2->started)
++ goto out;
++
++ /* A frame is received from user space */
++ memset(&rx_status, 0, sizeof(rx_status));
++ rx_status.freq = data2->channel->center_freq;
++ rx_status.band = data2->channel->band;
++ rx_status.rate_idx = nla_get_u32(info->attrs[HWSIM_ATTR_RX_RATE]);
++ rx_status.signal = nla_get_u32(info->attrs[HWSIM_ATTR_SIGNAL]);
++
++ memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));
++ ieee80211_rx_irqsafe(data2->hw, skb);
++
++ return 0;
++err:
++ printk(KERN_DEBUG "mac80211_hwsim: error occurred in %s\n", __func__);
++ goto out;
++out:
++ dev_kfree_skb(skb);
++ return -EINVAL;
++}
++
++static int hwsim_register_received_nl(struct sk_buff *skb_2,
++ struct genl_info *info)
++{
++ struct mac80211_hwsim_data *data;
++ int chans = 1;
++
++ spin_lock_bh(&hwsim_radio_lock);
++ list_for_each_entry(data, &hwsim_radios, list)
++ chans = max(chans, data->channels);
++ spin_unlock_bh(&hwsim_radio_lock);
++
++ /* In the future we should revise the userspace API and allow it
++ * to set a flag that it does support multi-channel, then we can
++ * let this pass conditionally on the flag.
++ * For current userspace, prohibit it since it won't work right.
++ */
++ if (chans > 1)
++ return -EOPNOTSUPP;
++
++ if (wmediumd_portid)
++ return -EBUSY;
++
++ wmediumd_portid = info->snd_portid;
++
++ printk(KERN_DEBUG "mac80211_hwsim: received a REGISTER, "
++ "switching to wmediumd mode with pid %d\n", info->snd_portid);
++
++ return 0;
++}
++
++static int hwsim_create_radio_nl(struct sk_buff *msg, struct genl_info *info)
++{
++ unsigned int chans = channels;
++ const char *alpha2 = NULL;
++ const struct ieee80211_regdomain *regd = NULL;
++ bool reg_strict = info->attrs[HWSIM_ATTR_REG_STRICT_REG];
++
++ if (info->attrs[HWSIM_ATTR_CHANNELS])
++ chans = nla_get_u32(info->attrs[HWSIM_ATTR_CHANNELS]);
++
++ if (info->attrs[HWSIM_ATTR_REG_HINT_ALPHA2])
++ alpha2 = nla_data(info->attrs[HWSIM_ATTR_REG_HINT_ALPHA2]);
++
++ if (info->attrs[HWSIM_ATTR_REG_CUSTOM_REG]) {
++ u32 idx = nla_get_u32(info->attrs[HWSIM_ATTR_REG_CUSTOM_REG]);
++
++ if (idx >= ARRAY_SIZE(hwsim_world_regdom_custom))
++ return -EINVAL;
++ regd = hwsim_world_regdom_custom[idx];
++ }
++
++ return mac80211_hwsim_create_radio(chans, alpha2, regd, reg_strict);
++}
++
++static int hwsim_destroy_radio_nl(struct sk_buff *msg, struct genl_info *info)
++{
++ struct mac80211_hwsim_data *data;
++ int idx;
++
++ if (!info->attrs[HWSIM_ATTR_RADIO_ID])
++ return -EINVAL;
++ idx = nla_get_u32(info->attrs[HWSIM_ATTR_RADIO_ID]);
++
++ spin_lock_bh(&hwsim_radio_lock);
++ list_for_each_entry(data, &hwsim_radios, list) {
++ if (data->idx != idx)
++ continue;
++ list_del(&data->list);
++ spin_unlock_bh(&hwsim_radio_lock);
++ mac80211_hwsim_destroy_radio(data);
++ return 0;
++ }
++ spin_unlock_bh(&hwsim_radio_lock);
++
++ return -ENODEV;
++}
++
++/* Generic Netlink operations array */
++static const struct genl_ops hwsim_ops[] = {
++ {
++ .cmd = HWSIM_CMD_REGISTER,
++ .policy = hwsim_genl_policy,
++ .doit = hwsim_register_received_nl,
++ .flags = GENL_ADMIN_PERM,
++ },
++ {
++ .cmd = HWSIM_CMD_FRAME,
++ .policy = hwsim_genl_policy,
++ .doit = hwsim_cloned_frame_received_nl,
++ },
++ {
++ .cmd = HWSIM_CMD_TX_INFO_FRAME,
++ .policy = hwsim_genl_policy,
++ .doit = hwsim_tx_info_frame_received_nl,
++ },
++ {
++ .cmd = HWSIM_CMD_CREATE_RADIO,
++ .policy = hwsim_genl_policy,
++ .doit = hwsim_create_radio_nl,
++ .flags = GENL_ADMIN_PERM,
++ },
++ {
++ .cmd = HWSIM_CMD_DESTROY_RADIO,
++ .policy = hwsim_genl_policy,
++ .doit = hwsim_destroy_radio_nl,
++ .flags = GENL_ADMIN_PERM,
++ },
++};
++
++static int mac80211_hwsim_netlink_notify(struct notifier_block *nb,
++ unsigned long state,
++ void *_notify)
++{
++ struct netlink_notify *notify = _notify;
++
++ if (state != NETLINK_URELEASE)
++ return NOTIFY_DONE;
++
++ if (notify->portid == wmediumd_portid) {
++ printk(KERN_INFO "mac80211_hwsim: wmediumd released netlink"
++ " socket, switching to perfect channel medium\n");
++ wmediumd_portid = 0;
++ }
++ return NOTIFY_DONE;
++
++}
++
++static struct notifier_block hwsim_netlink_notifier = {
++ .notifier_call = mac80211_hwsim_netlink_notify,
++};
++
++static int hwsim_init_netlink(void)
++{
++ int rc;
++
++ printk(KERN_INFO "mac80211_hwsim: initializing netlink\n");
++
++ rc = genl_register_family_with_ops(&hwsim_genl_family, hwsim_ops);
++ if (rc)
++ goto failure;
++
++ rc = netlink_register_notifier(&hwsim_netlink_notifier);
++ if (rc)
++ goto failure;
++
++ return 0;
++
++failure:
++ printk(KERN_DEBUG "mac80211_hwsim: error occurred in %s\n", __func__);
++ return -EINVAL;
++}
++
++static void hwsim_exit_netlink(void)
++{
++ /* unregister the notifier */
++ netlink_unregister_notifier(&hwsim_netlink_notifier);
++ /* unregister the family */
++ genl_unregister_family(&hwsim_genl_family);
++}
++
++static int __init init_mac80211_hwsim(void)
++{
++ int i, err;
++
++ if (radios < 0 || radios > 100)
++ return -EINVAL;
++
++ if (channels < 1)
++ return -EINVAL;
++
++ mac80211_hwsim_mchan_ops = mac80211_hwsim_ops;
++ mac80211_hwsim_mchan_ops.hw_scan = mac80211_hwsim_hw_scan;
++ mac80211_hwsim_mchan_ops.cancel_hw_scan = mac80211_hwsim_cancel_hw_scan;
++ mac80211_hwsim_mchan_ops.sw_scan_start = NULL;
++ mac80211_hwsim_mchan_ops.sw_scan_complete = NULL;
++ mac80211_hwsim_mchan_ops.remain_on_channel = mac80211_hwsim_roc;
++ mac80211_hwsim_mchan_ops.cancel_remain_on_channel = mac80211_hwsim_croc;
++ mac80211_hwsim_mchan_ops.add_chanctx = mac80211_hwsim_add_chanctx;
++ mac80211_hwsim_mchan_ops.remove_chanctx = mac80211_hwsim_remove_chanctx;
++ mac80211_hwsim_mchan_ops.change_chanctx = mac80211_hwsim_change_chanctx;
++ mac80211_hwsim_mchan_ops.assign_vif_chanctx =
++ mac80211_hwsim_assign_vif_chanctx;
++ mac80211_hwsim_mchan_ops.unassign_vif_chanctx =
++ mac80211_hwsim_unassign_vif_chanctx;
++
++ spin_lock_init(&hwsim_radio_lock);
++ INIT_LIST_HEAD(&hwsim_radios);
++
++ err = platform_driver_register(&mac80211_hwsim_driver);
++ if (err)
++ return err;
++
++ hwsim_class = class_create(THIS_MODULE, "mac80211_hwsim");
++ if (IS_ERR(hwsim_class)) {
++ err = PTR_ERR(hwsim_class);
++ goto out_unregister_driver;
++ }
++
++ for (i = 0; i < radios; i++) {
++ const char *reg_alpha2 = NULL;
++ const struct ieee80211_regdomain *regd = NULL;
++ bool reg_strict = false;
++
++ switch (regtest) {
++ case HWSIM_REGTEST_DIFF_COUNTRY:
++ if (i < ARRAY_SIZE(hwsim_alpha2s))
++ reg_alpha2 = hwsim_alpha2s[i];
++ break;
++ case HWSIM_REGTEST_DRIVER_REG_FOLLOW:
++ if (!i)
++ reg_alpha2 = hwsim_alpha2s[0];
++ break;
++ case HWSIM_REGTEST_STRICT_ALL:
++ reg_strict = true;
++ case HWSIM_REGTEST_DRIVER_REG_ALL:
++ reg_alpha2 = hwsim_alpha2s[0];
++ break;
++ case HWSIM_REGTEST_WORLD_ROAM:
++ if (i == 0)
++ regd = &hwsim_world_regdom_custom_01;
++ break;
++ case HWSIM_REGTEST_CUSTOM_WORLD:
++ regd = &hwsim_world_regdom_custom_01;
++ break;
++ case HWSIM_REGTEST_CUSTOM_WORLD_2:
++ if (i == 0)
++ regd = &hwsim_world_regdom_custom_01;
++ else if (i == 1)
++ regd = &hwsim_world_regdom_custom_02;
++ break;
++ case HWSIM_REGTEST_STRICT_FOLLOW:
++ if (i == 0) {
++ reg_strict = true;
++ reg_alpha2 = hwsim_alpha2s[0];
++ }
++ break;
++ case HWSIM_REGTEST_STRICT_AND_DRIVER_REG:
++ if (i == 0) {
++ reg_strict = true;
++ reg_alpha2 = hwsim_alpha2s[0];
++ } else if (i == 1) {
++ reg_alpha2 = hwsim_alpha2s[1];
++ }
++ break;
++ case HWSIM_REGTEST_ALL:
++ switch (i) {
++ case 0:
++ regd = &hwsim_world_regdom_custom_01;
++ break;
++ case 1:
++ regd = &hwsim_world_regdom_custom_02;
++ break;
++ case 2:
++ reg_alpha2 = hwsim_alpha2s[0];
++ break;
++ case 3:
++ reg_alpha2 = hwsim_alpha2s[1];
++ break;
++ case 4:
++ reg_strict = true;
++ reg_alpha2 = hwsim_alpha2s[2];
++ break;
++ }
++ break;
++ default:
++ break;
++ }
++
++ err = mac80211_hwsim_create_radio(channels, reg_alpha2,
++ regd, reg_strict);
++ if (err < 0)
++ goto out_free_radios;
++ }
++
++ hwsim_mon = alloc_netdev(0, "hwsim%d", hwsim_mon_setup);
++ if (hwsim_mon == NULL) {
++ err = -ENOMEM;
++ goto out_free_radios;
++ }
++
++ rtnl_lock();
++ err = dev_alloc_name(hwsim_mon, hwsim_mon->name);
++ if (err < 0) {
++ rtnl_unlock();
++ goto out_free_radios;
++ }
++
++ err = register_netdevice(hwsim_mon);
++ if (err < 0) {
++ rtnl_unlock();
++ goto out_free_mon;
++ }
++ rtnl_unlock();
++
++ err = hwsim_init_netlink();
++ if (err < 0)
++ goto out_free_mon;
++
++ return 0;
++
++out_free_mon:
++ free_netdev(hwsim_mon);
++out_free_radios:
++ mac80211_hwsim_free();
++out_unregister_driver:
++ platform_driver_unregister(&mac80211_hwsim_driver);
++ return err;
++}
++module_init(init_mac80211_hwsim);
++
++static void __exit exit_mac80211_hwsim(void)
++{
++ printk(KERN_DEBUG "mac80211_hwsim: unregister radios\n");
++
++ hwsim_exit_netlink();
++
++ mac80211_hwsim_free();
++ unregister_netdev(hwsim_mon);
++ platform_driver_unregister(&mac80211_hwsim_driver);
++}
++module_exit(exit_mac80211_hwsim);
+diff -Nur linux-3.14.36/drivers/net/wireless/mwifiex/cfg80211.c linux-openelec/drivers/net/wireless/mwifiex/cfg80211.c
+--- linux-3.14.36/drivers/net/wireless/mwifiex/cfg80211.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/net/wireless/mwifiex/cfg80211.c 2015-05-06 12:05:42.000000000 -0500
+@@ -1881,7 +1881,8 @@
+ params->privacy);
+ done:
+ if (!ret) {
+- cfg80211_ibss_joined(priv->netdev, priv->cfg_bssid, GFP_KERNEL);
++ cfg80211_ibss_joined(priv->netdev, priv->cfg_bssid,
++ params->chandef.chan, GFP_KERNEL);
+ dev_dbg(priv->adapter->dev,
+ "info: joined/created adhoc network with bssid"
+ " %pM successfully\n", priv->cfg_bssid);
+diff -Nur linux-3.14.36/drivers/net/wireless/mwifiex/main.h linux-openelec/drivers/net/wireless/mwifiex/main.h
+--- linux-3.14.36/drivers/net/wireless/mwifiex/main.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/net/wireless/mwifiex/main.h 2015-05-06 12:05:42.000000000 -0500
+@@ -1078,7 +1078,7 @@
+ const u8 *key, int key_len, u8 key_index,
+ const u8 *mac_addr, int disable);
+
+-int mwifiex_set_gen_ie(struct mwifiex_private *priv, u8 *ie, int ie_len);
++int mwifiex_set_gen_ie(struct mwifiex_private *priv, const u8 *ie, int ie_len);
+
+ int mwifiex_get_ver_ext(struct mwifiex_private *priv);
+
+diff -Nur linux-3.14.36/drivers/net/wireless/mwifiex/sta_ioctl.c linux-openelec/drivers/net/wireless/mwifiex/sta_ioctl.c
+--- linux-3.14.36/drivers/net/wireless/mwifiex/sta_ioctl.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/net/wireless/mwifiex/sta_ioctl.c 2015-05-06 12:05:42.000000000 -0500
+@@ -1391,7 +1391,7 @@
+ * with requisite parameters and calls the IOCTL handler.
+ */
+ int
+-mwifiex_set_gen_ie(struct mwifiex_private *priv, u8 *ie, int ie_len)
++mwifiex_set_gen_ie(struct mwifiex_private *priv, const u8 *ie, int ie_len)
+ {
+ struct mwifiex_ds_misc_gen_ie gen_ie;
+
+diff -Nur linux-3.14.36/drivers/net/wireless/p54/main.c linux-openelec/drivers/net/wireless/p54/main.c
+--- linux-3.14.36/drivers/net/wireless/p54/main.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/net/wireless/p54/main.c 2015-05-06 12:05:42.000000000 -0500
+@@ -669,7 +669,8 @@
+ return total;
+ }
+
+-static void p54_flush(struct ieee80211_hw *dev, u32 queues, bool drop)
++static void p54_flush(struct ieee80211_hw *dev, struct ieee80211_vif *vif,
++ u32 queues, bool drop)
+ {
+ struct p54_common *priv = dev->priv;
+ unsigned int total, i;
+diff -Nur linux-3.14.36/drivers/net/wireless/rndis_wlan.c linux-openelec/drivers/net/wireless/rndis_wlan.c
+--- linux-3.14.36/drivers/net/wireless/rndis_wlan.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/net/wireless/rndis_wlan.c 2015-05-06 12:05:42.000000000 -0500
+@@ -2835,7 +2835,9 @@
+ bssid, req_ie, req_ie_len,
+ resp_ie, resp_ie_len, GFP_KERNEL);
+ } else if (priv->infra_mode == NDIS_80211_INFRA_ADHOC)
+- cfg80211_ibss_joined(usbdev->net, bssid, GFP_KERNEL);
++ cfg80211_ibss_joined(usbdev->net, bssid,
++ get_current_channel(usbdev, NULL),
++ GFP_KERNEL);
+
+ kfree(info);
+
+diff -Nur linux-3.14.36/drivers/net/wireless/rt2x00/rt2800usb.c linux-openelec/drivers/net/wireless/rt2x00/rt2800usb.c
+--- linux-3.14.36/drivers/net/wireless/rt2x00/rt2800usb.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/net/wireless/rt2x00/rt2800usb.c 2015-07-24 18:03:30.416842002 -0500
+@@ -240,6 +240,7 @@
+ int status;
+ u32 offset;
+ u32 length;
++ __le32 fwMode;
+
+ /*
+ * Check which section of the firmware we need.
+@@ -257,8 +258,17 @@
+ /*
+ * Write firmware to device.
+ */
+- rt2x00usb_register_multiwrite(rt2x00dev, FIRMWARE_IMAGE_BASE,
+- data + offset, length);
++ rt2x00usb_vendor_request(rt2x00dev, USB_DEVICE_MODE,
++ USB_VENDOR_REQUEST_IN, 0, 0x11, &fwMode,
++ sizeof(fwMode), REGISTER_TIMEOUT_FIRMWARE);
++
++ if ((fwMode & 0x00000003) == 2) {
++ rt2x00_info(rt2x00dev,
++ "Firmware loading not required - NIC in AutoRun mode\n");
++ } else {
++ rt2x00usb_register_multiwrite(rt2x00dev, FIRMWARE_IMAGE_BASE,
++ data + offset, length);
++ }
+
+ rt2x00usb_register_write(rt2x00dev, H2M_MAILBOX_CID, ~0);
+ rt2x00usb_register_write(rt2x00dev, H2M_MAILBOX_STATUS, ~0);
+@@ -735,11 +745,25 @@
+ /*
+ * Device probe functions.
+ */
++static int rt2800usb_efuse_detect(struct rt2x00_dev *rt2x00dev)
++{
++ __le32 fwMode;
++
++ rt2x00usb_vendor_request(rt2x00dev, USB_DEVICE_MODE,
++ USB_VENDOR_REQUEST_IN, 0, 0x11, &fwMode,
++ sizeof(fwMode), REGISTER_TIMEOUT_FIRMWARE);
++
++ if ((fwMode & 0x00000003) == 2)
++ return 1;
++
++ return rt2800_efuse_detect(rt2x00dev);
++}
++
+ static int rt2800usb_read_eeprom(struct rt2x00_dev *rt2x00dev)
+ {
+ int retval;
+
+- if (rt2800_efuse_detect(rt2x00dev))
++ if (rt2800usb_efuse_detect(rt2x00dev))
+ retval = rt2800_read_eeprom_efuse(rt2x00dev);
+ else
+ retval = rt2x00usb_eeprom_read(rt2x00dev, rt2x00dev->eeprom,
+@@ -971,6 +995,7 @@
+ { USB_DEVICE(0x0411, 0x015d) },
+ { USB_DEVICE(0x0411, 0x016f) },
+ { USB_DEVICE(0x0411, 0x01a2) },
++ { USB_DEVICE(0x0411, 0x01a8) },
+ { USB_DEVICE(0x0411, 0x01ee) },
+ { USB_DEVICE(0x0411, 0x01a8) },
+ /* Corega */
+diff -Nur linux-3.14.36/drivers/net/wireless/rt2x00/rt2800usb.c.orig linux-openelec/drivers/net/wireless/rt2x00/rt2800usb.c.orig
+--- linux-3.14.36/drivers/net/wireless/rt2x00/rt2800usb.c.orig 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/net/wireless/rt2x00/rt2800usb.c.orig 2015-07-24 18:03:28.856842002 -0500
+@@ -0,0 +1,1407 @@
++/*
++ Copyright (C) 2010 Willow Garage <http://www.willowgarage.com>
++ Copyright (C) 2009 - 2010 Ivo van Doorn <IvDoorn@gmail.com>
++ Copyright (C) 2009 Mattias Nissler <mattias.nissler@gmx.de>
++ Copyright (C) 2009 Felix Fietkau <nbd@openwrt.org>
++ Copyright (C) 2009 Xose Vazquez Perez <xose.vazquez@gmail.com>
++ Copyright (C) 2009 Axel Kollhofer <rain_maker@root-forum.org>
++ <http://rt2x00.serialmonkey.com>
++
++ This program is free software; you can redistribute it and/or modify
++ it under the terms of the GNU General Public License as published by
++ the Free Software Foundation; either version 2 of the License, or
++ (at your option) any later version.
++
++ This program is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ GNU General Public License for more details.
++
++ You should have received a copy of the GNU General Public License
++ along with this program; if not, see <http://www.gnu.org/licenses/>.
++ */
++
++/*
++ Module: rt2800usb
++ Abstract: rt2800usb device specific routines.
++ Supported chipsets: RT2800U.
++ */
++
++#include <linux/delay.h>
++#include <linux/etherdevice.h>
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/usb.h>
++
++#include "rt2x00.h"
++#include "rt2x00usb.h"
++#include "rt2800lib.h"
++#include "rt2800.h"
++#include "rt2800usb.h"
++
++/*
++ * Allow hardware encryption to be disabled.
++ */
++static bool modparam_nohwcrypt;
++module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
++MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
++
++static bool rt2800usb_hwcrypt_disabled(struct rt2x00_dev *rt2x00dev)
++{
++ return modparam_nohwcrypt;
++}
++
++/*
++ * Queue handlers.
++ */
++static void rt2800usb_start_queue(struct data_queue *queue)
++{
++ struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
++ u32 reg;
++
++ switch (queue->qid) {
++ case QID_RX:
++ rt2x00usb_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
++ rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 1);
++ rt2x00usb_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
++ break;
++ case QID_BEACON:
++ rt2x00usb_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
++ rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 1);
++ rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 1);
++ rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 1);
++ rt2x00usb_register_write(rt2x00dev, BCN_TIME_CFG, reg);
++ break;
++ default:
++ break;
++ }
++}
++
++static void rt2800usb_stop_queue(struct data_queue *queue)
++{
++ struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
++ u32 reg;
++
++ switch (queue->qid) {
++ case QID_RX:
++ rt2x00usb_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
++ rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 0);
++ rt2x00usb_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
++ break;
++ case QID_BEACON:
++ rt2x00usb_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
++ rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 0);
++ rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 0);
++ rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 0);
++ rt2x00usb_register_write(rt2x00dev, BCN_TIME_CFG, reg);
++ break;
++ default:
++ break;
++ }
++}
++
++/*
++ * test if there is an entry in any TX queue for which DMA is done
++ * but the TX status has not been returned yet
++ */
++static bool rt2800usb_txstatus_pending(struct rt2x00_dev *rt2x00dev)
++{
++ struct data_queue *queue;
++
++ tx_queue_for_each(rt2x00dev, queue) {
++ if (rt2x00queue_get_entry(queue, Q_INDEX_DMA_DONE) !=
++ rt2x00queue_get_entry(queue, Q_INDEX_DONE))
++ return true;
++ }
++ return false;
++}
++
++static inline bool rt2800usb_entry_txstatus_timeout(struct queue_entry *entry)
++{
++ bool tout;
++
++ if (!test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags))
++ return false;
++
++ tout = time_after(jiffies, entry->last_action + msecs_to_jiffies(100));
++ if (unlikely(tout))
++ rt2x00_warn(entry->queue->rt2x00dev,
++ "TX status timeout for entry %d in queue %d\n",
++ entry->entry_idx, entry->queue->qid);
++ return tout;
++
++}
++
++static bool rt2800usb_txstatus_timeout(struct rt2x00_dev *rt2x00dev)
++{
++ struct data_queue *queue;
++ struct queue_entry *entry;
++
++ tx_queue_for_each(rt2x00dev, queue) {
++ entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
++ if (rt2800usb_entry_txstatus_timeout(entry))
++ return true;
++ }
++ return false;
++}
++
++#define TXSTATUS_READ_INTERVAL 1000000
++
++static bool rt2800usb_tx_sta_fifo_read_completed(struct rt2x00_dev *rt2x00dev,
++ int urb_status, u32 tx_status)
++{
++ bool valid;
++
++ if (urb_status) {
++ rt2x00_warn(rt2x00dev, "TX status read failed %d\n",
++ urb_status);
++
++ goto stop_reading;
++ }
++
++ valid = rt2x00_get_field32(tx_status, TX_STA_FIFO_VALID);
++ if (valid) {
++ if (!kfifo_put(&rt2x00dev->txstatus_fifo, tx_status))
++ rt2x00_warn(rt2x00dev, "TX status FIFO overrun\n");
++
++ queue_work(rt2x00dev->workqueue, &rt2x00dev->txdone_work);
++
++ /* Reschedule urb to read TX status again instantly */
++ return true;
++ }
++
++ /* Check if there is any entry that timedout waiting on TX status */
++ if (rt2800usb_txstatus_timeout(rt2x00dev))
++ queue_work(rt2x00dev->workqueue, &rt2x00dev->txdone_work);
++
++ if (rt2800usb_txstatus_pending(rt2x00dev)) {
++ /* Read register after 1 ms */
++ hrtimer_start(&rt2x00dev->txstatus_timer,
++ ktime_set(0, TXSTATUS_READ_INTERVAL),
++ HRTIMER_MODE_REL);
++ return false;
++ }
++
++stop_reading:
++ clear_bit(TX_STATUS_READING, &rt2x00dev->flags);
++ /*
++ * There is small race window above, between txstatus pending check and
++ * clear_bit someone could do rt2x00usb_interrupt_txdone, so recheck
++ * here again if status reading is needed.
++ */
++ if (rt2800usb_txstatus_pending(rt2x00dev) &&
++ !test_and_set_bit(TX_STATUS_READING, &rt2x00dev->flags))
++ return true;
++ else
++ return false;
++}
++
++static void rt2800usb_async_read_tx_status(struct rt2x00_dev *rt2x00dev)
++{
++
++ if (test_and_set_bit(TX_STATUS_READING, &rt2x00dev->flags))
++ return;
++
++ /* Read TX_STA_FIFO register after 2 ms */
++ hrtimer_start(&rt2x00dev->txstatus_timer,
++ ktime_set(0, 2*TXSTATUS_READ_INTERVAL),
++ HRTIMER_MODE_REL);
++}
++
++static void rt2800usb_tx_dma_done(struct queue_entry *entry)
++{
++ struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
++
++ rt2800usb_async_read_tx_status(rt2x00dev);
++}
++
++static enum hrtimer_restart rt2800usb_tx_sta_fifo_timeout(struct hrtimer *timer)
++{
++ struct rt2x00_dev *rt2x00dev =
++ container_of(timer, struct rt2x00_dev, txstatus_timer);
++
++ rt2x00usb_register_read_async(rt2x00dev, TX_STA_FIFO,
++ rt2800usb_tx_sta_fifo_read_completed);
++
++ return HRTIMER_NORESTART;
++}
++
++/*
++ * Firmware functions
++ */
++static char *rt2800usb_get_firmware_name(struct rt2x00_dev *rt2x00dev)
++{
++ return FIRMWARE_RT2870;
++}
++
++static int rt2800usb_write_firmware(struct rt2x00_dev *rt2x00dev,
++ const u8 *data, const size_t len)
++{
++ int status;
++ u32 offset;
++ u32 length;
++
++ /*
++ * Check which section of the firmware we need.
++ */
++ if (rt2x00_rt(rt2x00dev, RT2860) ||
++ rt2x00_rt(rt2x00dev, RT2872) ||
++ rt2x00_rt(rt2x00dev, RT3070)) {
++ offset = 0;
++ length = 4096;
++ } else {
++ offset = 4096;
++ length = 4096;
++ }
++
++ /*
++ * Write firmware to device.
++ */
++ rt2x00usb_register_multiwrite(rt2x00dev, FIRMWARE_IMAGE_BASE,
++ data + offset, length);
++
++ rt2x00usb_register_write(rt2x00dev, H2M_MAILBOX_CID, ~0);
++ rt2x00usb_register_write(rt2x00dev, H2M_MAILBOX_STATUS, ~0);
++
++ /*
++ * Send firmware request to device to load firmware,
++ * we need to specify a long timeout time.
++ */
++ status = rt2x00usb_vendor_request_sw(rt2x00dev, USB_DEVICE_MODE,
++ 0, USB_MODE_FIRMWARE,
++ REGISTER_TIMEOUT_FIRMWARE);
++ if (status < 0) {
++ rt2x00_err(rt2x00dev, "Failed to write Firmware to device\n");
++ return status;
++ }
++
++ msleep(10);
++ rt2x00usb_register_write(rt2x00dev, H2M_MAILBOX_CSR, 0);
++
++ return 0;
++}
++
++/*
++ * Device state switch handlers.
++ */
++static int rt2800usb_init_registers(struct rt2x00_dev *rt2x00dev)
++{
++ u32 reg;
++
++ /*
++ * Wait until BBP and RF are ready.
++ */
++ if (rt2800_wait_csr_ready(rt2x00dev))
++ return -EBUSY;
++
++ rt2x00usb_register_read(rt2x00dev, PBF_SYS_CTRL, &reg);
++ rt2x00usb_register_write(rt2x00dev, PBF_SYS_CTRL, reg & ~0x00002000);
++
++ reg = 0;
++ rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_CSR, 1);
++ rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_BBP, 1);
++ rt2x00usb_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
++
++ rt2x00usb_register_write(rt2x00dev, USB_DMA_CFG, 0x00000000);
++
++ rt2x00usb_vendor_request_sw(rt2x00dev, USB_DEVICE_MODE, 0,
++ USB_MODE_RESET, REGISTER_TIMEOUT);
++
++ rt2x00usb_register_write(rt2x00dev, MAC_SYS_CTRL, 0x00000000);
++
++ return 0;
++}
++
++static int rt2800usb_enable_radio(struct rt2x00_dev *rt2x00dev)
++{
++ u32 reg;
++
++ if (unlikely(rt2800_wait_wpdma_ready(rt2x00dev)))
++ return -EIO;
++
++ rt2x00usb_register_read(rt2x00dev, USB_DMA_CFG, &reg);
++ rt2x00_set_field32(&reg, USB_DMA_CFG_PHY_CLEAR, 0);
++ rt2x00_set_field32(&reg, USB_DMA_CFG_RX_BULK_AGG_EN, 0);
++ rt2x00_set_field32(&reg, USB_DMA_CFG_RX_BULK_AGG_TIMEOUT, 128);
++ /*
++ * Total room for RX frames in kilobytes, PBF might still exceed
++ * this limit so reduce the number to prevent errors.
++ */
++ rt2x00_set_field32(&reg, USB_DMA_CFG_RX_BULK_AGG_LIMIT,
++ ((rt2x00dev->rx->limit * DATA_FRAME_SIZE)
++ / 1024) - 3);
++ rt2x00_set_field32(&reg, USB_DMA_CFG_RX_BULK_EN, 1);
++ rt2x00_set_field32(&reg, USB_DMA_CFG_TX_BULK_EN, 1);
++ rt2x00usb_register_write(rt2x00dev, USB_DMA_CFG, reg);
++
++ return rt2800_enable_radio(rt2x00dev);
++}
++
++static void rt2800usb_disable_radio(struct rt2x00_dev *rt2x00dev)
++{
++ rt2800_disable_radio(rt2x00dev);
++ rt2x00usb_disable_radio(rt2x00dev);
++}
++
++static int rt2800usb_set_state(struct rt2x00_dev *rt2x00dev,
++ enum dev_state state)
++{
++ if (state == STATE_AWAKE)
++ rt2800_mcu_request(rt2x00dev, MCU_WAKEUP, 0xff, 0, 2);
++ else
++ rt2800_mcu_request(rt2x00dev, MCU_SLEEP, 0xff, 0xff, 2);
++
++ return 0;
++}
++
++static int rt2800usb_set_device_state(struct rt2x00_dev *rt2x00dev,
++ enum dev_state state)
++{
++ int retval = 0;
++
++ switch (state) {
++ case STATE_RADIO_ON:
++ /*
++ * Before the radio can be enabled, the device first has
++ * to be woken up. After that it needs a bit of time
++ * to be fully awake and then the radio can be enabled.
++ */
++ rt2800usb_set_state(rt2x00dev, STATE_AWAKE);
++ msleep(1);
++ retval = rt2800usb_enable_radio(rt2x00dev);
++ break;
++ case STATE_RADIO_OFF:
++ /*
++ * After the radio has been disabled, the device should
++ * be put to sleep for powersaving.
++ */
++ rt2800usb_disable_radio(rt2x00dev);
++ rt2800usb_set_state(rt2x00dev, STATE_SLEEP);
++ break;
++ case STATE_RADIO_IRQ_ON:
++ case STATE_RADIO_IRQ_OFF:
++ /* No support, but no error either */
++ break;
++ case STATE_DEEP_SLEEP:
++ case STATE_SLEEP:
++ case STATE_STANDBY:
++ case STATE_AWAKE:
++ retval = rt2800usb_set_state(rt2x00dev, state);
++ break;
++ default:
++ retval = -ENOTSUPP;
++ break;
++ }
++
++ if (unlikely(retval))
++ rt2x00_err(rt2x00dev, "Device failed to enter state %d (%d)\n",
++ state, retval);
++
++ return retval;
++}
++
++/*
++ * Watchdog handlers
++ */
++static void rt2800usb_watchdog(struct rt2x00_dev *rt2x00dev)
++{
++ unsigned int i;
++ u32 reg;
++
++ rt2x00usb_register_read(rt2x00dev, TXRXQ_PCNT, &reg);
++ if (rt2x00_get_field32(reg, TXRXQ_PCNT_TX0Q)) {
++ rt2x00_warn(rt2x00dev, "TX HW queue 0 timed out, invoke forced kick\n");
++
++ rt2x00usb_register_write(rt2x00dev, PBF_CFG, 0xf40012);
++
++ for (i = 0; i < 10; i++) {
++ udelay(10);
++ if (!rt2x00_get_field32(reg, TXRXQ_PCNT_TX0Q))
++ break;
++ }
++
++ rt2x00usb_register_write(rt2x00dev, PBF_CFG, 0xf40006);
++ }
++
++ rt2x00usb_register_read(rt2x00dev, TXRXQ_PCNT, &reg);
++ if (rt2x00_get_field32(reg, TXRXQ_PCNT_TX1Q)) {
++ rt2x00_warn(rt2x00dev, "TX HW queue 1 timed out, invoke forced kick\n");
++
++ rt2x00usb_register_write(rt2x00dev, PBF_CFG, 0xf4000a);
++
++ for (i = 0; i < 10; i++) {
++ udelay(10);
++ if (!rt2x00_get_field32(reg, TXRXQ_PCNT_TX1Q))
++ break;
++ }
++
++ rt2x00usb_register_write(rt2x00dev, PBF_CFG, 0xf40006);
++ }
++
++ rt2x00usb_watchdog(rt2x00dev);
++}
++
++/*
++ * TX descriptor initialization
++ */
++static __le32 *rt2800usb_get_txwi(struct queue_entry *entry)
++{
++ if (entry->queue->qid == QID_BEACON)
++ return (__le32 *) (entry->skb->data);
++ else
++ return (__le32 *) (entry->skb->data + TXINFO_DESC_SIZE);
++}
++
++static void rt2800usb_write_tx_desc(struct queue_entry *entry,
++ struct txentry_desc *txdesc)
++{
++ struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
++ __le32 *txi = (__le32 *) entry->skb->data;
++ u32 word;
++
++ /*
++ * Initialize TXINFO descriptor
++ */
++ rt2x00_desc_read(txi, 0, &word);
++
++ /*
++ * The size of TXINFO_W0_USB_DMA_TX_PKT_LEN is
++ * TXWI + 802.11 header + L2 pad + payload + pad,
++ * so need to decrease size of TXINFO.
++ */
++ rt2x00_set_field32(&word, TXINFO_W0_USB_DMA_TX_PKT_LEN,
++ roundup(entry->skb->len, 4) - TXINFO_DESC_SIZE);
++ rt2x00_set_field32(&word, TXINFO_W0_WIV,
++ !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc->flags));
++ rt2x00_set_field32(&word, TXINFO_W0_QSEL, 2);
++ rt2x00_set_field32(&word, TXINFO_W0_SW_USE_LAST_ROUND, 0);
++ rt2x00_set_field32(&word, TXINFO_W0_USB_DMA_NEXT_VALID, 0);
++ rt2x00_set_field32(&word, TXINFO_W0_USB_DMA_TX_BURST,
++ test_bit(ENTRY_TXD_BURST, &txdesc->flags));
++ rt2x00_desc_write(txi, 0, word);
++
++ /*
++ * Register descriptor details in skb frame descriptor.
++ */
++ skbdesc->flags |= SKBDESC_DESC_IN_SKB;
++ skbdesc->desc = txi;
++ skbdesc->desc_len = TXINFO_DESC_SIZE + entry->queue->winfo_size;
++}
++
++/*
++ * TX data initialization
++ */
++static int rt2800usb_get_tx_data_len(struct queue_entry *entry)
++{
++ /*
++ * pad(1~3 bytes) is needed after each 802.11 payload.
++ * USB end pad(4 bytes) is needed at each USB bulk out packet end.
++ * TX frame format is :
++ * | TXINFO | TXWI | 802.11 header | L2 pad | payload | pad | USB end pad |
++ * |<------------- tx_pkt_len ------------->|
++ */
++
++ return roundup(entry->skb->len, 4) + 4;
++}
++
++/*
++ * TX control handlers
++ */
++static enum txdone_entry_desc_flags
++rt2800usb_txdone_entry_check(struct queue_entry *entry, u32 reg)
++{
++ __le32 *txwi;
++ u32 word;
++ int wcid, ack, pid;
++ int tx_wcid, tx_ack, tx_pid, is_agg;
++
++ /*
++ * This frames has returned with an IO error,
++ * so the status report is not intended for this
++ * frame.
++ */
++ if (test_bit(ENTRY_DATA_IO_FAILED, &entry->flags))
++ return TXDONE_FAILURE;
++
++ wcid = rt2x00_get_field32(reg, TX_STA_FIFO_WCID);
++ ack = rt2x00_get_field32(reg, TX_STA_FIFO_TX_ACK_REQUIRED);
++ pid = rt2x00_get_field32(reg, TX_STA_FIFO_PID_TYPE);
++ is_agg = rt2x00_get_field32(reg, TX_STA_FIFO_TX_AGGRE);
++
++ /*
++ * Validate if this TX status report is intended for
++ * this entry by comparing the WCID/ACK/PID fields.
++ */
++ txwi = rt2800usb_get_txwi(entry);
++
++ rt2x00_desc_read(txwi, 1, &word);
++ tx_wcid = rt2x00_get_field32(word, TXWI_W1_WIRELESS_CLI_ID);
++ tx_ack = rt2x00_get_field32(word, TXWI_W1_ACK);
++ tx_pid = rt2x00_get_field32(word, TXWI_W1_PACKETID);
++
++ if (wcid != tx_wcid || ack != tx_ack || (!is_agg && pid != tx_pid)) {
++ rt2x00_dbg(entry->queue->rt2x00dev,
++ "TX status report missed for queue %d entry %d\n",
++ entry->queue->qid, entry->entry_idx);
++ return TXDONE_UNKNOWN;
++ }
++
++ return TXDONE_SUCCESS;
++}
++
++static void rt2800usb_txdone(struct rt2x00_dev *rt2x00dev)
++{
++ struct data_queue *queue;
++ struct queue_entry *entry;
++ u32 reg;
++ u8 qid;
++ enum txdone_entry_desc_flags done_status;
++
++ while (kfifo_get(&rt2x00dev->txstatus_fifo, &reg)) {
++ /*
++ * TX_STA_FIFO_PID_QUEUE is a 2-bit field, thus qid is
++ * guaranteed to be one of the TX QIDs .
++ */
++ qid = rt2x00_get_field32(reg, TX_STA_FIFO_PID_QUEUE);
++ queue = rt2x00queue_get_tx_queue(rt2x00dev, qid);
++
++ if (unlikely(rt2x00queue_empty(queue))) {
++ rt2x00_warn(rt2x00dev, "Got TX status for an empty queue %u, dropping\n",
++ qid);
++ break;
++ }
++
++ entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
++
++ if (unlikely(test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags) ||
++ !test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags))) {
++ rt2x00_warn(rt2x00dev, "Data pending for entry %u in queue %u\n",
++ entry->entry_idx, qid);
++ break;
++ }
++
++ done_status = rt2800usb_txdone_entry_check(entry, reg);
++ if (likely(done_status == TXDONE_SUCCESS))
++ rt2800_txdone_entry(entry, reg, rt2800usb_get_txwi(entry));
++ else
++ rt2x00lib_txdone_noinfo(entry, done_status);
++ }
++}
++
++static void rt2800usb_txdone_nostatus(struct rt2x00_dev *rt2x00dev)
++{
++ struct data_queue *queue;
++ struct queue_entry *entry;
++
++ /*
++ * Process any trailing TX status reports for IO failures,
++ * we loop until we find the first non-IO error entry. This
++ * can either be a frame which is free, is being uploaded,
++ * or has completed the upload but didn't have an entry
++ * in the TX_STAT_FIFO register yet.
++ */
++ tx_queue_for_each(rt2x00dev, queue) {
++ while (!rt2x00queue_empty(queue)) {
++ entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
++
++ if (test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags) ||
++ !test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags))
++ break;
++
++ if (test_bit(ENTRY_DATA_IO_FAILED, &entry->flags))
++ rt2x00lib_txdone_noinfo(entry, TXDONE_FAILURE);
++ else if (rt2800usb_entry_txstatus_timeout(entry))
++ rt2x00lib_txdone_noinfo(entry, TXDONE_UNKNOWN);
++ else
++ break;
++ }
++ }
++}
++
++static void rt2800usb_work_txdone(struct work_struct *work)
++{
++ struct rt2x00_dev *rt2x00dev =
++ container_of(work, struct rt2x00_dev, txdone_work);
++
++ while (!kfifo_is_empty(&rt2x00dev->txstatus_fifo) ||
++ rt2800usb_txstatus_timeout(rt2x00dev)) {
++
++ rt2800usb_txdone(rt2x00dev);
++
++ rt2800usb_txdone_nostatus(rt2x00dev);
++
++ /*
++ * The hw may delay sending the packet after DMA complete
++ * if the medium is busy, thus the TX_STA_FIFO entry is
++ * also delayed -> use a timer to retrieve it.
++ */
++ if (rt2800usb_txstatus_pending(rt2x00dev))
++ rt2800usb_async_read_tx_status(rt2x00dev);
++ }
++}
++
++/*
++ * RX control handlers
++ */
++static void rt2800usb_fill_rxdone(struct queue_entry *entry,
++ struct rxdone_entry_desc *rxdesc)
++{
++ struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
++ __le32 *rxi = (__le32 *)entry->skb->data;
++ __le32 *rxd;
++ u32 word;
++ int rx_pkt_len;
++
++ /*
++ * Copy descriptor to the skbdesc->desc buffer, making it safe from
++ * moving of frame data in rt2x00usb.
++ */
++ memcpy(skbdesc->desc, rxi, skbdesc->desc_len);
++
++ /*
++ * RX frame format is :
++ * | RXINFO | RXWI | header | L2 pad | payload | pad | RXD | USB pad |
++ * |<------------ rx_pkt_len -------------->|
++ */
++ rt2x00_desc_read(rxi, 0, &word);
++ rx_pkt_len = rt2x00_get_field32(word, RXINFO_W0_USB_DMA_RX_PKT_LEN);
++
++ /*
++ * Remove the RXINFO structure from the sbk.
++ */
++ skb_pull(entry->skb, RXINFO_DESC_SIZE);
++
++ /*
++ * Check for rx_pkt_len validity. Return if invalid, leaving
++ * rxdesc->size zeroed out by the upper level.
++ */
++ if (unlikely(rx_pkt_len == 0 ||
++ rx_pkt_len > entry->queue->data_size)) {
++ rt2x00_err(entry->queue->rt2x00dev,
++ "Bad frame size %d, forcing to 0\n", rx_pkt_len);
++ return;
++ }
++
++ rxd = (__le32 *)(entry->skb->data + rx_pkt_len);
++
++ /*
++ * It is now safe to read the descriptor on all architectures.
++ */
++ rt2x00_desc_read(rxd, 0, &word);
++
++ if (rt2x00_get_field32(word, RXD_W0_CRC_ERROR))
++ rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC;
++
++ rxdesc->cipher_status = rt2x00_get_field32(word, RXD_W0_CIPHER_ERROR);
++
++ if (rt2x00_get_field32(word, RXD_W0_DECRYPTED)) {
++ /*
++ * Hardware has stripped IV/EIV data from 802.11 frame during
++ * decryption. Unfortunately the descriptor doesn't contain
++ * any fields with the EIV/IV data either, so they can't
++ * be restored by rt2x00lib.
++ */
++ rxdesc->flags |= RX_FLAG_IV_STRIPPED;
++
++ /*
++ * The hardware has already checked the Michael Mic and has
++ * stripped it from the frame. Signal this to mac80211.
++ */
++ rxdesc->flags |= RX_FLAG_MMIC_STRIPPED;
++
++ if (rxdesc->cipher_status == RX_CRYPTO_SUCCESS)
++ rxdesc->flags |= RX_FLAG_DECRYPTED;
++ else if (rxdesc->cipher_status == RX_CRYPTO_FAIL_MIC)
++ rxdesc->flags |= RX_FLAG_MMIC_ERROR;
++ }
++
++ if (rt2x00_get_field32(word, RXD_W0_MY_BSS))
++ rxdesc->dev_flags |= RXDONE_MY_BSS;
++
++ if (rt2x00_get_field32(word, RXD_W0_L2PAD))
++ rxdesc->dev_flags |= RXDONE_L2PAD;
++
++ /*
++ * Remove RXD descriptor from end of buffer.
++ */
++ skb_trim(entry->skb, rx_pkt_len);
++
++ /*
++ * Process the RXWI structure.
++ */
++ rt2800_process_rxwi(entry, rxdesc);
++}
++
++/*
++ * Device probe functions.
++ */
++static int rt2800usb_read_eeprom(struct rt2x00_dev *rt2x00dev)
++{
++ int retval;
++
++ if (rt2800_efuse_detect(rt2x00dev))
++ retval = rt2800_read_eeprom_efuse(rt2x00dev);
++ else
++ retval = rt2x00usb_eeprom_read(rt2x00dev, rt2x00dev->eeprom,
++ EEPROM_SIZE);
++
++ return retval;
++}
++
++static int rt2800usb_probe_hw(struct rt2x00_dev *rt2x00dev)
++{
++ int retval;
++
++ retval = rt2800_probe_hw(rt2x00dev);
++ if (retval)
++ return retval;
++
++ /*
++ * Set txstatus timer function.
++ */
++ rt2x00dev->txstatus_timer.function = rt2800usb_tx_sta_fifo_timeout;
++
++ /*
++ * Overwrite TX done handler
++ */
++ PREPARE_WORK(&rt2x00dev->txdone_work, rt2800usb_work_txdone);
++
++ return 0;
++}
++
++static const struct ieee80211_ops rt2800usb_mac80211_ops = {
++ .tx = rt2x00mac_tx,
++ .start = rt2x00mac_start,
++ .stop = rt2x00mac_stop,
++ .add_interface = rt2x00mac_add_interface,
++ .remove_interface = rt2x00mac_remove_interface,
++ .config = rt2x00mac_config,
++ .configure_filter = rt2x00mac_configure_filter,
++ .set_tim = rt2x00mac_set_tim,
++ .set_key = rt2x00mac_set_key,
++ .sw_scan_start = rt2x00mac_sw_scan_start,
++ .sw_scan_complete = rt2x00mac_sw_scan_complete,
++ .get_stats = rt2x00mac_get_stats,
++ .get_tkip_seq = rt2800_get_tkip_seq,
++ .set_rts_threshold = rt2800_set_rts_threshold,
++ .sta_add = rt2x00mac_sta_add,
++ .sta_remove = rt2x00mac_sta_remove,
++ .bss_info_changed = rt2x00mac_bss_info_changed,
++ .conf_tx = rt2800_conf_tx,
++ .get_tsf = rt2800_get_tsf,
++ .rfkill_poll = rt2x00mac_rfkill_poll,
++ .ampdu_action = rt2800_ampdu_action,
++ .flush = rt2x00mac_flush,
++ .get_survey = rt2800_get_survey,
++ .get_ringparam = rt2x00mac_get_ringparam,
++ .tx_frames_pending = rt2x00mac_tx_frames_pending,
++};
++
++static const struct rt2800_ops rt2800usb_rt2800_ops = {
++ .register_read = rt2x00usb_register_read,
++ .register_read_lock = rt2x00usb_register_read_lock,
++ .register_write = rt2x00usb_register_write,
++ .register_write_lock = rt2x00usb_register_write_lock,
++ .register_multiread = rt2x00usb_register_multiread,
++ .register_multiwrite = rt2x00usb_register_multiwrite,
++ .regbusy_read = rt2x00usb_regbusy_read,
++ .read_eeprom = rt2800usb_read_eeprom,
++ .hwcrypt_disabled = rt2800usb_hwcrypt_disabled,
++ .drv_write_firmware = rt2800usb_write_firmware,
++ .drv_init_registers = rt2800usb_init_registers,
++ .drv_get_txwi = rt2800usb_get_txwi,
++};
++
++static const struct rt2x00lib_ops rt2800usb_rt2x00_ops = {
++ .probe_hw = rt2800usb_probe_hw,
++ .get_firmware_name = rt2800usb_get_firmware_name,
++ .check_firmware = rt2800_check_firmware,
++ .load_firmware = rt2800_load_firmware,
++ .initialize = rt2x00usb_initialize,
++ .uninitialize = rt2x00usb_uninitialize,
++ .clear_entry = rt2x00usb_clear_entry,
++ .set_device_state = rt2800usb_set_device_state,
++ .rfkill_poll = rt2800_rfkill_poll,
++ .link_stats = rt2800_link_stats,
++ .reset_tuner = rt2800_reset_tuner,
++ .link_tuner = rt2800_link_tuner,
++ .gain_calibration = rt2800_gain_calibration,
++ .vco_calibration = rt2800_vco_calibration,
++ .watchdog = rt2800usb_watchdog,
++ .start_queue = rt2800usb_start_queue,
++ .kick_queue = rt2x00usb_kick_queue,
++ .stop_queue = rt2800usb_stop_queue,
++ .flush_queue = rt2x00usb_flush_queue,
++ .tx_dma_done = rt2800usb_tx_dma_done,
++ .write_tx_desc = rt2800usb_write_tx_desc,
++ .write_tx_data = rt2800_write_tx_data,
++ .write_beacon = rt2800_write_beacon,
++ .clear_beacon = rt2800_clear_beacon,
++ .get_tx_data_len = rt2800usb_get_tx_data_len,
++ .fill_rxdone = rt2800usb_fill_rxdone,
++ .config_shared_key = rt2800_config_shared_key,
++ .config_pairwise_key = rt2800_config_pairwise_key,
++ .config_filter = rt2800_config_filter,
++ .config_intf = rt2800_config_intf,
++ .config_erp = rt2800_config_erp,
++ .config_ant = rt2800_config_ant,
++ .config = rt2800_config,
++ .sta_add = rt2800_sta_add,
++ .sta_remove = rt2800_sta_remove,
++};
++
++static void rt2800usb_queue_init(struct data_queue *queue)
++{
++ struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
++ unsigned short txwi_size, rxwi_size;
++
++ rt2800_get_txwi_rxwi_size(rt2x00dev, &txwi_size, &rxwi_size);
++
++ switch (queue->qid) {
++ case QID_RX:
++ queue->limit = 128;
++ queue->data_size = AGGREGATION_SIZE;
++ queue->desc_size = RXINFO_DESC_SIZE;
++ queue->winfo_size = rxwi_size;
++ queue->priv_size = sizeof(struct queue_entry_priv_usb);
++ break;
++
++ case QID_AC_VO:
++ case QID_AC_VI:
++ case QID_AC_BE:
++ case QID_AC_BK:
++ queue->limit = 16;
++ queue->data_size = AGGREGATION_SIZE;
++ queue->desc_size = TXINFO_DESC_SIZE;
++ queue->winfo_size = txwi_size;
++ queue->priv_size = sizeof(struct queue_entry_priv_usb);
++ break;
++
++ case QID_BEACON:
++ queue->limit = 8;
++ queue->data_size = MGMT_FRAME_SIZE;
++ queue->desc_size = TXINFO_DESC_SIZE;
++ queue->winfo_size = txwi_size;
++ queue->priv_size = sizeof(struct queue_entry_priv_usb);
++ break;
++
++ case QID_ATIM:
++ /* fallthrough */
++ default:
++ BUG();
++ break;
++ }
++}
++
++static const struct rt2x00_ops rt2800usb_ops = {
++ .name = KBUILD_MODNAME,
++ .drv_data_size = sizeof(struct rt2800_drv_data),
++ .max_ap_intf = 8,
++ .eeprom_size = EEPROM_SIZE,
++ .rf_size = RF_SIZE,
++ .tx_queues = NUM_TX_QUEUES,
++ .queue_init = rt2800usb_queue_init,
++ .lib = &rt2800usb_rt2x00_ops,
++ .drv = &rt2800usb_rt2800_ops,
++ .hw = &rt2800usb_mac80211_ops,
++#ifdef CONFIG_RT2X00_LIB_DEBUGFS
++ .debugfs = &rt2800_rt2x00debug,
++#endif /* CONFIG_RT2X00_LIB_DEBUGFS */
++};
++
++/*
++ * rt2800usb module information.
++ */
++static struct usb_device_id rt2800usb_device_table[] = {
++ /* Abocom */
++ { USB_DEVICE(0x07b8, 0x2870) },
++ { USB_DEVICE(0x07b8, 0x2770) },
++ { USB_DEVICE(0x07b8, 0x3070) },
++ { USB_DEVICE(0x07b8, 0x3071) },
++ { USB_DEVICE(0x07b8, 0x3072) },
++ { USB_DEVICE(0x1482, 0x3c09) },
++ /* AirTies */
++ { USB_DEVICE(0x1eda, 0x2012) },
++ { USB_DEVICE(0x1eda, 0x2210) },
++ { USB_DEVICE(0x1eda, 0x2310) },
++ /* Allwin */
++ { USB_DEVICE(0x8516, 0x2070) },
++ { USB_DEVICE(0x8516, 0x2770) },
++ { USB_DEVICE(0x8516, 0x2870) },
++ { USB_DEVICE(0x8516, 0x3070) },
++ { USB_DEVICE(0x8516, 0x3071) },
++ { USB_DEVICE(0x8516, 0x3072) },
++ /* Alpha Networks */
++ { USB_DEVICE(0x14b2, 0x3c06) },
++ { USB_DEVICE(0x14b2, 0x3c07) },
++ { USB_DEVICE(0x14b2, 0x3c09) },
++ { USB_DEVICE(0x14b2, 0x3c12) },
++ { USB_DEVICE(0x14b2, 0x3c23) },
++ { USB_DEVICE(0x14b2, 0x3c25) },
++ { USB_DEVICE(0x14b2, 0x3c27) },
++ { USB_DEVICE(0x14b2, 0x3c28) },
++ { USB_DEVICE(0x14b2, 0x3c2c) },
++ /* Amit */
++ { USB_DEVICE(0x15c5, 0x0008) },
++ /* Askey */
++ { USB_DEVICE(0x1690, 0x0740) },
++ /* ASUS */
++ { USB_DEVICE(0x0b05, 0x1731) },
++ { USB_DEVICE(0x0b05, 0x1732) },
++ { USB_DEVICE(0x0b05, 0x1742) },
++ { USB_DEVICE(0x0b05, 0x1784) },
++ { USB_DEVICE(0x1761, 0x0b05) },
++ /* AzureWave */
++ { USB_DEVICE(0x13d3, 0x3247) },
++ { USB_DEVICE(0x13d3, 0x3273) },
++ { USB_DEVICE(0x13d3, 0x3305) },
++ { USB_DEVICE(0x13d3, 0x3307) },
++ { USB_DEVICE(0x13d3, 0x3321) },
++ /* Belkin */
++ { USB_DEVICE(0x050d, 0x8053) },
++ { USB_DEVICE(0x050d, 0x805c) },
++ { USB_DEVICE(0x050d, 0x815c) },
++ { USB_DEVICE(0x050d, 0x825a) },
++ { USB_DEVICE(0x050d, 0x825b) },
++ { USB_DEVICE(0x050d, 0x935a) },
++ { USB_DEVICE(0x050d, 0x935b) },
++ /* Buffalo */
++ { USB_DEVICE(0x0411, 0x00e8) },
++ { USB_DEVICE(0x0411, 0x0158) },
++ { USB_DEVICE(0x0411, 0x015d) },
++ { USB_DEVICE(0x0411, 0x016f) },
++ { USB_DEVICE(0x0411, 0x01a2) },
++ { USB_DEVICE(0x0411, 0x01ee) },
++ { USB_DEVICE(0x0411, 0x01a8) },
++ /* Corega */
++ { USB_DEVICE(0x07aa, 0x002f) },
++ { USB_DEVICE(0x07aa, 0x003c) },
++ { USB_DEVICE(0x07aa, 0x003f) },
++ { USB_DEVICE(0x18c5, 0x0012) },
++ /* D-Link */
++ { USB_DEVICE(0x07d1, 0x3c09) },
++ { USB_DEVICE(0x07d1, 0x3c0a) },
++ { USB_DEVICE(0x07d1, 0x3c0d) },
++ { USB_DEVICE(0x07d1, 0x3c0e) },
++ { USB_DEVICE(0x07d1, 0x3c0f) },
++ { USB_DEVICE(0x07d1, 0x3c11) },
++ { USB_DEVICE(0x07d1, 0x3c13) },
++ { USB_DEVICE(0x07d1, 0x3c15) },
++ { USB_DEVICE(0x07d1, 0x3c16) },
++ { USB_DEVICE(0x07d1, 0x3c17) },
++ { USB_DEVICE(0x2001, 0x3317) },
++ { USB_DEVICE(0x2001, 0x3c1b) },
++ /* Draytek */
++ { USB_DEVICE(0x07fa, 0x7712) },
++ /* DVICO */
++ { USB_DEVICE(0x0fe9, 0xb307) },
++ /* Edimax */
++ { USB_DEVICE(0x7392, 0x4085) },
++ { USB_DEVICE(0x7392, 0x7711) },
++ { USB_DEVICE(0x7392, 0x7717) },
++ { USB_DEVICE(0x7392, 0x7718) },
++ { USB_DEVICE(0x7392, 0x7722) },
++ /* Encore */
++ { USB_DEVICE(0x203d, 0x1480) },
++ { USB_DEVICE(0x203d, 0x14a9) },
++ /* EnGenius */
++ { USB_DEVICE(0x1740, 0x9701) },
++ { USB_DEVICE(0x1740, 0x9702) },
++ { USB_DEVICE(0x1740, 0x9703) },
++ { USB_DEVICE(0x1740, 0x9705) },
++ { USB_DEVICE(0x1740, 0x9706) },
++ { USB_DEVICE(0x1740, 0x9707) },
++ { USB_DEVICE(0x1740, 0x9708) },
++ { USB_DEVICE(0x1740, 0x9709) },
++ /* Gemtek */
++ { USB_DEVICE(0x15a9, 0x0012) },
++ /* Gigabyte */
++ { USB_DEVICE(0x1044, 0x800b) },
++ { USB_DEVICE(0x1044, 0x800d) },
++ /* Hawking */
++ { USB_DEVICE(0x0e66, 0x0001) },
++ { USB_DEVICE(0x0e66, 0x0003) },
++ { USB_DEVICE(0x0e66, 0x0009) },
++ { USB_DEVICE(0x0e66, 0x000b) },
++ { USB_DEVICE(0x0e66, 0x0013) },
++ { USB_DEVICE(0x0e66, 0x0017) },
++ { USB_DEVICE(0x0e66, 0x0018) },
++ /* I-O DATA */
++ { USB_DEVICE(0x04bb, 0x0945) },
++ { USB_DEVICE(0x04bb, 0x0947) },
++ { USB_DEVICE(0x04bb, 0x0948) },
++ /* Linksys */
++ { USB_DEVICE(0x13b1, 0x0031) },
++ { USB_DEVICE(0x1737, 0x0070) },
++ { USB_DEVICE(0x1737, 0x0071) },
++ { USB_DEVICE(0x1737, 0x0077) },
++ { USB_DEVICE(0x1737, 0x0078) },
++ /* Logitec */
++ { USB_DEVICE(0x0789, 0x0162) },
++ { USB_DEVICE(0x0789, 0x0163) },
++ { USB_DEVICE(0x0789, 0x0164) },
++ { USB_DEVICE(0x0789, 0x0166) },
++ /* Motorola */
++ { USB_DEVICE(0x100d, 0x9031) },
++ /* MSI */
++ { USB_DEVICE(0x0db0, 0x3820) },
++ { USB_DEVICE(0x0db0, 0x3821) },
++ { USB_DEVICE(0x0db0, 0x3822) },
++ { USB_DEVICE(0x0db0, 0x3870) },
++ { USB_DEVICE(0x0db0, 0x3871) },
++ { USB_DEVICE(0x0db0, 0x6899) },
++ { USB_DEVICE(0x0db0, 0x821a) },
++ { USB_DEVICE(0x0db0, 0x822a) },
++ { USB_DEVICE(0x0db0, 0x822b) },
++ { USB_DEVICE(0x0db0, 0x822c) },
++ { USB_DEVICE(0x0db0, 0x870a) },
++ { USB_DEVICE(0x0db0, 0x871a) },
++ { USB_DEVICE(0x0db0, 0x871b) },
++ { USB_DEVICE(0x0db0, 0x871c) },
++ { USB_DEVICE(0x0db0, 0x899a) },
++ /* Ovislink */
++ { USB_DEVICE(0x1b75, 0x3071) },
++ { USB_DEVICE(0x1b75, 0x3072) },
++ { USB_DEVICE(0x1b75, 0xa200) },
++ /* Para */
++ { USB_DEVICE(0x20b8, 0x8888) },
++ /* Pegatron */
++ { USB_DEVICE(0x1d4d, 0x0002) },
++ { USB_DEVICE(0x1d4d, 0x000c) },
++ { USB_DEVICE(0x1d4d, 0x000e) },
++ { USB_DEVICE(0x1d4d, 0x0011) },
++ /* Philips */
++ { USB_DEVICE(0x0471, 0x200f) },
++ /* Planex */
++ { USB_DEVICE(0x2019, 0x5201) },
++ { USB_DEVICE(0x2019, 0xab25) },
++ { USB_DEVICE(0x2019, 0xed06) },
++ /* Quanta */
++ { USB_DEVICE(0x1a32, 0x0304) },
++ /* Ralink */
++ { USB_DEVICE(0x148f, 0x2070) },
++ { USB_DEVICE(0x148f, 0x2770) },
++ { USB_DEVICE(0x148f, 0x2870) },
++ { USB_DEVICE(0x148f, 0x3070) },
++ { USB_DEVICE(0x148f, 0x3071) },
++ { USB_DEVICE(0x148f, 0x3072) },
++ /* Samsung */
++ { USB_DEVICE(0x04e8, 0x2018) },
++ /* Siemens */
++ { USB_DEVICE(0x129b, 0x1828) },
++ /* Sitecom */
++ { USB_DEVICE(0x0df6, 0x0017) },
++ { USB_DEVICE(0x0df6, 0x002b) },
++ { USB_DEVICE(0x0df6, 0x002c) },
++ { USB_DEVICE(0x0df6, 0x002d) },
++ { USB_DEVICE(0x0df6, 0x0039) },
++ { USB_DEVICE(0x0df6, 0x003b) },
++ { USB_DEVICE(0x0df6, 0x003d) },
++ { USB_DEVICE(0x0df6, 0x003e) },
++ { USB_DEVICE(0x0df6, 0x003f) },
++ { USB_DEVICE(0x0df6, 0x0040) },
++ { USB_DEVICE(0x0df6, 0x0042) },
++ { USB_DEVICE(0x0df6, 0x0047) },
++ { USB_DEVICE(0x0df6, 0x0048) },
++ { USB_DEVICE(0x0df6, 0x0051) },
++ { USB_DEVICE(0x0df6, 0x005f) },
++ { USB_DEVICE(0x0df6, 0x0060) },
++ /* SMC */
++ { USB_DEVICE(0x083a, 0x6618) },
++ { USB_DEVICE(0x083a, 0x7511) },
++ { USB_DEVICE(0x083a, 0x7512) },
++ { USB_DEVICE(0x083a, 0x7522) },
++ { USB_DEVICE(0x083a, 0x8522) },
++ { USB_DEVICE(0x083a, 0xa618) },
++ { USB_DEVICE(0x083a, 0xa701) },
++ { USB_DEVICE(0x083a, 0xa702) },
++ { USB_DEVICE(0x083a, 0xa703) },
++ { USB_DEVICE(0x083a, 0xb522) },
++ /* Sparklan */
++ { USB_DEVICE(0x15a9, 0x0006) },
++ /* Sweex */
++ { USB_DEVICE(0x177f, 0x0153) },
++ { USB_DEVICE(0x177f, 0x0164) },
++ { USB_DEVICE(0x177f, 0x0302) },
++ { USB_DEVICE(0x177f, 0x0313) },
++ { USB_DEVICE(0x177f, 0x0323) },
++ { USB_DEVICE(0x177f, 0x0324) },
++ /* U-Media */
++ { USB_DEVICE(0x157e, 0x300e) },
++ { USB_DEVICE(0x157e, 0x3013) },
++ /* ZCOM */
++ { USB_DEVICE(0x0cde, 0x0022) },
++ { USB_DEVICE(0x0cde, 0x0025) },
++ /* Zinwell */
++ { USB_DEVICE(0x5a57, 0x0280) },
++ { USB_DEVICE(0x5a57, 0x0282) },
++ { USB_DEVICE(0x5a57, 0x0283) },
++ { USB_DEVICE(0x5a57, 0x5257) },
++ /* Zyxel */
++ { USB_DEVICE(0x0586, 0x3416) },
++ { USB_DEVICE(0x0586, 0x3418) },
++ { USB_DEVICE(0x0586, 0x341a) },
++ { USB_DEVICE(0x0586, 0x341e) },
++ { USB_DEVICE(0x0586, 0x343e) },
++#ifdef CONFIG_RT2800USB_RT33XX
++ /* Belkin */
++ { USB_DEVICE(0x050d, 0x945b) },
++ /* D-Link */
++ { USB_DEVICE(0x2001, 0x3c17) },
++ /* Panasonic */
++ { USB_DEVICE(0x083a, 0xb511) },
++ /* Philips */
++ { USB_DEVICE(0x0471, 0x20dd) },
++ /* Ralink */
++ { USB_DEVICE(0x148f, 0x3370) },
++ { USB_DEVICE(0x148f, 0x8070) },
++ /* Sitecom */
++ { USB_DEVICE(0x0df6, 0x0050) },
++ /* Sweex */
++ { USB_DEVICE(0x177f, 0x0163) },
++ { USB_DEVICE(0x177f, 0x0165) },
++#endif
++#ifdef CONFIG_RT2800USB_RT35XX
++ /* Allwin */
++ { USB_DEVICE(0x8516, 0x3572) },
++ /* Askey */
++ { USB_DEVICE(0x1690, 0x0744) },
++ { USB_DEVICE(0x1690, 0x0761) },
++ { USB_DEVICE(0x1690, 0x0764) },
++ /* ASUS */
++ { USB_DEVICE(0x0b05, 0x179d) },
++ /* Cisco */
++ { USB_DEVICE(0x167b, 0x4001) },
++ /* EnGenius */
++ { USB_DEVICE(0x1740, 0x9801) },
++ /* I-O DATA */
++ { USB_DEVICE(0x04bb, 0x0944) },
++ /* Linksys */
++ { USB_DEVICE(0x13b1, 0x002f) },
++ { USB_DEVICE(0x1737, 0x0079) },
++ /* Logitec */
++ { USB_DEVICE(0x0789, 0x0170) },
++ /* Ralink */
++ { USB_DEVICE(0x148f, 0x3572) },
++ /* Sitecom */
++ { USB_DEVICE(0x0df6, 0x0041) },
++ { USB_DEVICE(0x0df6, 0x0062) },
++ { USB_DEVICE(0x0df6, 0x0065) },
++ { USB_DEVICE(0x0df6, 0x0066) },
++ { USB_DEVICE(0x0df6, 0x0068) },
++ /* Toshiba */
++ { USB_DEVICE(0x0930, 0x0a07) },
++ /* Zinwell */
++ { USB_DEVICE(0x5a57, 0x0284) },
++#endif
++#ifdef CONFIG_RT2800USB_RT3573
++ /* AirLive */
++ { USB_DEVICE(0x1b75, 0x7733) },
++ /* ASUS */
++ { USB_DEVICE(0x0b05, 0x17bc) },
++ { USB_DEVICE(0x0b05, 0x17ad) },
++ /* Belkin */
++ { USB_DEVICE(0x050d, 0x1103) },
++ /* Cameo */
++ { USB_DEVICE(0x148f, 0xf301) },
++ /* D-Link */
++ { USB_DEVICE(0x2001, 0x3c1f) },
++ /* Edimax */
++ { USB_DEVICE(0x7392, 0x7733) },
++ /* Hawking */
++ { USB_DEVICE(0x0e66, 0x0020) },
++ { USB_DEVICE(0x0e66, 0x0021) },
++ /* I-O DATA */
++ { USB_DEVICE(0x04bb, 0x094e) },
++ /* Linksys */
++ { USB_DEVICE(0x13b1, 0x003b) },
++ /* Logitec */
++ { USB_DEVICE(0x0789, 0x016b) },
++ /* NETGEAR */
++ { USB_DEVICE(0x0846, 0x9012) },
++ { USB_DEVICE(0x0846, 0x9013) },
++ { USB_DEVICE(0x0846, 0x9019) },
++ /* Planex */
++ { USB_DEVICE(0x2019, 0xed19) },
++ /* Ralink */
++ { USB_DEVICE(0x148f, 0x3573) },
++ /* Sitecom */
++ { USB_DEVICE(0x0df6, 0x0067) },
++ { USB_DEVICE(0x0df6, 0x006a) },
++ { USB_DEVICE(0x0df6, 0x006e) },
++ /* ZyXEL */
++ { USB_DEVICE(0x0586, 0x3421) },
++#endif
++#ifdef CONFIG_RT2800USB_RT53XX
++ /* Arcadyan */
++ { USB_DEVICE(0x043e, 0x7a12) },
++ { USB_DEVICE(0x043e, 0x7a32) },
++ /* ASUS */
++ { USB_DEVICE(0x0b05, 0x17e8) },
++ /* Azurewave */
++ { USB_DEVICE(0x13d3, 0x3329) },
++ { USB_DEVICE(0x13d3, 0x3365) },
++ /* D-Link */
++ { USB_DEVICE(0x2001, 0x3c15) },
++ { USB_DEVICE(0x2001, 0x3c19) },
++ { USB_DEVICE(0x2001, 0x3c1c) },
++ { USB_DEVICE(0x2001, 0x3c1d) },
++ { USB_DEVICE(0x2001, 0x3c1e) },
++ { USB_DEVICE(0x2001, 0x3c20) },
++ { USB_DEVICE(0x2001, 0x3c22) },
++ { USB_DEVICE(0x2001, 0x3c23) },
++ /* LG innotek */
++ { USB_DEVICE(0x043e, 0x7a22) },
++ { USB_DEVICE(0x043e, 0x7a42) },
++ /* Panasonic */
++ { USB_DEVICE(0x04da, 0x1801) },
++ { USB_DEVICE(0x04da, 0x1800) },
++ { USB_DEVICE(0x04da, 0x23f6) },
++ /* Philips */
++ { USB_DEVICE(0x0471, 0x2104) },
++ { USB_DEVICE(0x0471, 0x2126) },
++ { USB_DEVICE(0x0471, 0x2180) },
++ { USB_DEVICE(0x0471, 0x2181) },
++ { USB_DEVICE(0x0471, 0x2182) },
++ /* Ralink */
++ { USB_DEVICE(0x148f, 0x5370) },
++ { USB_DEVICE(0x148f, 0x5372) },
++#endif
++#ifdef CONFIG_RT2800USB_RT55XX
++ /* Arcadyan */
++ { USB_DEVICE(0x043e, 0x7a32) },
++ /* AVM GmbH */
++ { USB_DEVICE(0x057c, 0x8501) },
++ /* Buffalo */
++ { USB_DEVICE(0x0411, 0x0241) },
++ { USB_DEVICE(0x0411, 0x0253) },
++ /* D-Link */
++ { USB_DEVICE(0x2001, 0x3c1a) },
++ { USB_DEVICE(0x2001, 0x3c21) },
++ /* Proware */
++ { USB_DEVICE(0x043e, 0x7a13) },
++ /* Ralink */
++ { USB_DEVICE(0x148f, 0x5572) },
++ /* TRENDnet */
++ { USB_DEVICE(0x20f4, 0x724a) },
++#endif
++#ifdef CONFIG_RT2800USB_UNKNOWN
++ /*
++ * Unclear what kind of devices these are (they aren't supported by the
++ * vendor linux driver).
++ */
++ /* Abocom */
++ { USB_DEVICE(0x07b8, 0x3073) },
++ { USB_DEVICE(0x07b8, 0x3074) },
++ /* Alpha Networks */
++ { USB_DEVICE(0x14b2, 0x3c08) },
++ { USB_DEVICE(0x14b2, 0x3c11) },
++ /* Amigo */
++ { USB_DEVICE(0x0e0b, 0x9031) },
++ { USB_DEVICE(0x0e0b, 0x9041) },
++ /* ASUS */
++ { USB_DEVICE(0x0b05, 0x166a) },
++ { USB_DEVICE(0x0b05, 0x1760) },
++ { USB_DEVICE(0x0b05, 0x1761) },
++ { USB_DEVICE(0x0b05, 0x1790) },
++ { USB_DEVICE(0x0b05, 0x17a7) },
++ /* AzureWave */
++ { USB_DEVICE(0x13d3, 0x3262) },
++ { USB_DEVICE(0x13d3, 0x3284) },
++ { USB_DEVICE(0x13d3, 0x3322) },
++ { USB_DEVICE(0x13d3, 0x3340) },
++ { USB_DEVICE(0x13d3, 0x3399) },
++ { USB_DEVICE(0x13d3, 0x3400) },
++ { USB_DEVICE(0x13d3, 0x3401) },
++ /* Belkin */
++ { USB_DEVICE(0x050d, 0x1003) },
++ /* Buffalo */
++ { USB_DEVICE(0x0411, 0x012e) },
++ { USB_DEVICE(0x0411, 0x0148) },
++ { USB_DEVICE(0x0411, 0x0150) },
++ /* Corega */
++ { USB_DEVICE(0x07aa, 0x0041) },
++ { USB_DEVICE(0x07aa, 0x0042) },
++ { USB_DEVICE(0x18c5, 0x0008) },
++ /* D-Link */
++ { USB_DEVICE(0x07d1, 0x3c0b) },
++ /* Encore */
++ { USB_DEVICE(0x203d, 0x14a1) },
++ /* EnGenius */
++ { USB_DEVICE(0x1740, 0x0600) },
++ { USB_DEVICE(0x1740, 0x0602) },
++ /* Gemtek */
++ { USB_DEVICE(0x15a9, 0x0010) },
++ /* Gigabyte */
++ { USB_DEVICE(0x1044, 0x800c) },
++ /* Hercules */
++ { USB_DEVICE(0x06f8, 0xe036) },
++ /* Huawei */
++ { USB_DEVICE(0x148f, 0xf101) },
++ /* I-O DATA */
++ { USB_DEVICE(0x04bb, 0x094b) },
++ /* LevelOne */
++ { USB_DEVICE(0x1740, 0x0605) },
++ { USB_DEVICE(0x1740, 0x0615) },
++ /* Logitec */
++ { USB_DEVICE(0x0789, 0x0168) },
++ { USB_DEVICE(0x0789, 0x0169) },
++ /* Motorola */
++ { USB_DEVICE(0x100d, 0x9032) },
++ /* Pegatron */
++ { USB_DEVICE(0x05a6, 0x0101) },
++ { USB_DEVICE(0x1d4d, 0x0010) },
++ /* Planex */
++ { USB_DEVICE(0x2019, 0xab24) },
++ { USB_DEVICE(0x2019, 0xab29) },
++ /* Qcom */
++ { USB_DEVICE(0x18e8, 0x6259) },
++ /* RadioShack */
++ { USB_DEVICE(0x08b9, 0x1197) },
++ /* Sitecom */
++ { USB_DEVICE(0x0df6, 0x003c) },
++ { USB_DEVICE(0x0df6, 0x004a) },
++ { USB_DEVICE(0x0df6, 0x004d) },
++ { USB_DEVICE(0x0df6, 0x0053) },
++ { USB_DEVICE(0x0df6, 0x0069) },
++ { USB_DEVICE(0x0df6, 0x006f) },
++ { USB_DEVICE(0x0df6, 0x0078) },
++ /* SMC */
++ { USB_DEVICE(0x083a, 0xa512) },
++ { USB_DEVICE(0x083a, 0xc522) },
++ { USB_DEVICE(0x083a, 0xd522) },
++ { USB_DEVICE(0x083a, 0xf511) },
++ /* Sweex */
++ { USB_DEVICE(0x177f, 0x0254) },
++ /* TP-LINK */
++ { USB_DEVICE(0xf201, 0x5370) },
++#endif
++ { 0, }
++};
++
++MODULE_AUTHOR(DRV_PROJECT);
++MODULE_VERSION(DRV_VERSION);
++MODULE_DESCRIPTION("Ralink RT2800 USB Wireless LAN driver.");
++MODULE_SUPPORTED_DEVICE("Ralink RT2870 USB chipset based cards");
++MODULE_DEVICE_TABLE(usb, rt2800usb_device_table);
++MODULE_FIRMWARE(FIRMWARE_RT2870);
++MODULE_LICENSE("GPL");
++
++static int rt2800usb_probe(struct usb_interface *usb_intf,
++ const struct usb_device_id *id)
++{
++ return rt2x00usb_probe(usb_intf, &rt2800usb_ops);
++}
++
++static struct usb_driver rt2800usb_driver = {
++ .name = KBUILD_MODNAME,
++ .id_table = rt2800usb_device_table,
++ .probe = rt2800usb_probe,
++ .disconnect = rt2x00usb_disconnect,
++ .suspend = rt2x00usb_suspend,
++ .resume = rt2x00usb_resume,
++ .reset_resume = rt2x00usb_resume,
++ .disable_hub_initiated_lpm = 1,
++};
++
++module_usb_driver(rt2800usb_driver);
+diff -Nur linux-3.14.36/drivers/net/wireless/rt2x00/rt2x00.h linux-openelec/drivers/net/wireless/rt2x00/rt2x00.h
+--- linux-3.14.36/drivers/net/wireless/rt2x00/rt2x00.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/net/wireless/rt2x00/rt2x00.h 2015-05-06 12:05:42.000000000 -0500
+@@ -1449,7 +1449,8 @@
+ struct ieee80211_vif *vif, u16 queue,
+ const struct ieee80211_tx_queue_params *params);
+ void rt2x00mac_rfkill_poll(struct ieee80211_hw *hw);
+-void rt2x00mac_flush(struct ieee80211_hw *hw, u32 queues, bool drop);
++void rt2x00mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
++ u32 queues, bool drop);
+ int rt2x00mac_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant);
+ int rt2x00mac_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant);
+ void rt2x00mac_get_ringparam(struct ieee80211_hw *hw,
+diff -Nur linux-3.14.36/drivers/net/wireless/rt2x00/rt2x00mac.c linux-openelec/drivers/net/wireless/rt2x00/rt2x00mac.c
+--- linux-3.14.36/drivers/net/wireless/rt2x00/rt2x00mac.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/net/wireless/rt2x00/rt2x00mac.c 2015-05-06 12:05:42.000000000 -0500
+@@ -751,7 +751,8 @@
+ }
+ EXPORT_SYMBOL_GPL(rt2x00mac_rfkill_poll);
+
+-void rt2x00mac_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
++void rt2x00mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
++ u32 queues, bool drop)
+ {
+ struct rt2x00_dev *rt2x00dev = hw->priv;
+ struct data_queue *queue;
+diff -Nur linux-3.14.36/drivers/net/wireless/rtl818x/rtl8187/dev.c linux-openelec/drivers/net/wireless/rtl818x/rtl8187/dev.c
+--- linux-3.14.36/drivers/net/wireless/rtl818x/rtl8187/dev.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/net/wireless/rtl818x/rtl8187/dev.c 2015-05-06 12:05:42.000000000 -0500
+@@ -1636,10 +1636,10 @@
+
+ err_free_dmabuf:
+ kfree(priv->io_dmabuf);
+- err_free_dev:
+- ieee80211_free_hw(dev);
+ usb_set_intfdata(intf, NULL);
+ usb_put_dev(udev);
++ err_free_dev:
++ ieee80211_free_hw(dev);
+ return err;
+ }
+
+diff -Nur linux-3.14.36/drivers/net/wireless/rtlwifi/core.c linux-openelec/drivers/net/wireless/rtlwifi/core.c
+--- linux-3.14.36/drivers/net/wireless/rtlwifi/core.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/net/wireless/rtlwifi/core.c 2015-05-06 12:05:42.000000000 -0500
+@@ -1309,7 +1309,8 @@
+ * before switch channel or power save, or tx buffer packet
+ * maybe send after offchannel or rf sleep, this may cause
+ * dis-association by AP */
+-static void rtl_op_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
++static void rtl_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
++ u32 queues, bool drop)
+ {
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+diff -Nur linux-3.14.36/drivers/net/wireless/ti/wlcore/main.c linux-openelec/drivers/net/wireless/ti/wlcore/main.c
+--- linux-3.14.36/drivers/net/wireless/ti/wlcore/main.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/net/wireless/ti/wlcore/main.c 2015-05-06 12:05:42.000000000 -0500
+@@ -5156,7 +5156,8 @@
+ mutex_unlock(&wl->mutex);
+ }
+
+-static void wlcore_op_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
++static void wlcore_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
++ u32 queues, bool drop)
+ {
+ struct wl1271 *wl = hw->priv;
+
+diff -Nur linux-3.14.36/drivers/of/fdt.c linux-openelec/drivers/of/fdt.c
+--- linux-3.14.36/drivers/of/fdt.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/of/fdt.c 2015-07-24 18:03:29.952842002 -0500
+@@ -804,6 +804,7 @@
+ {
+ unsigned long l;
+ char *p;
++ char tmp_command_line[COMMAND_LINE_SIZE] = CONFIG_CMDLINE;
+
+ pr_debug("search \"chosen\", depth: %d, uname: %s\n", depth, uname);
+
+@@ -822,12 +823,23 @@
+ * CONFIG_CMDLINE is meant to be a default in case nothing else
+ * managed to set the command line, unless CONFIG_CMDLINE_FORCE
+ * is set in which case we override whatever was found earlier.
++ *
++ * But we do prepend CONFIG_CMDLINE to bootloader arguments anyway.
+ */
+ #ifdef CONFIG_CMDLINE
+ #ifndef CONFIG_CMDLINE_FORCE
+ if (!((char *)data)[0])
+-#endif
+ strlcpy(data, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
++ else {
++ /* append bootloader arguments to CONFIG_CMDLINE */
++ strlcat(tmp_command_line, " ", COMMAND_LINE_SIZE);
++ strlcat(tmp_command_line, data, COMMAND_LINE_SIZE);
++ /* copy everything back */
++ strlcpy(data, tmp_command_line, COMMAND_LINE_SIZE);
++ }
++#else
++ strlcpy(data, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
++#endif /* CONFIG_CMDLINE_FORCE */
+ #endif /* CONFIG_CMDLINE */
+
+ pr_debug("Command line is: %s\n", (char*)data);
+diff -Nur linux-3.14.36/drivers/pci/host/Kconfig linux-openelec/drivers/pci/host/Kconfig
+--- linux-3.14.36/drivers/pci/host/Kconfig 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/pci/host/Kconfig 2015-05-06 12:05:42.000000000 -0500
+@@ -21,6 +21,23 @@
+ select PCIEPORTBUS
+ select PCIE_DW
+
++config EP_MODE_IN_EP_RC_SYS
++ bool "PCI Express EP mode in the IMX6 RC/EP interconnection system"
++ depends on PCI_IMX6
++
++config EP_SELF_IO_TEST
++ bool "PCI Express EP_SELF_IO_TEST in EP mode"
++ depends on EP_MODE_IN_EP_RC_SYS
++
++config RC_MODE_IN_EP_RC_SYS
++ bool "PCI Express RC mode in the IMX6 RC/EP interconnection system"
++ depends on PCI_IMX6
++
++config PCI_IMX_EP_DRV
++ bool "i.MX6 PCI Express EP skeleton driver"
++ depends on RC_MODE_IN_EP_RC_SYS
++ default y
++
+ config PCI_TEGRA
+ bool "NVIDIA Tegra PCIe controller"
+ depends on ARCH_TEGRA
+diff -Nur linux-3.14.36/drivers/pci/host/Makefile linux-openelec/drivers/pci/host/Makefile
+--- linux-3.14.36/drivers/pci/host/Makefile 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/pci/host/Makefile 2015-05-06 12:05:42.000000000 -0500
+@@ -1,6 +1,7 @@
+ obj-$(CONFIG_PCIE_DW) += pcie-designware.o
+ obj-$(CONFIG_PCI_EXYNOS) += pci-exynos.o
+ obj-$(CONFIG_PCI_IMX6) += pci-imx6.o
++obj-$(CONFIG_PCI_IMX_EP_DRV) += pci-imx6-ep-driver.o
+ obj-$(CONFIG_PCI_MVEBU) += pci-mvebu.o
+ obj-$(CONFIG_PCI_TEGRA) += pci-tegra.o
+ obj-$(CONFIG_PCI_RCAR_GEN2) += pci-rcar-gen2.o
+diff -Nur linux-3.14.36/drivers/pci/host/pcie-designware.c linux-openelec/drivers/pci/host/pcie-designware.c
+--- linux-3.14.36/drivers/pci/host/pcie-designware.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/pci/host/pcie-designware.c 2015-05-06 12:05:42.000000000 -0500
+@@ -23,48 +23,6 @@
+
+ #include "pcie-designware.h"
+
+-/* Synopsis specific PCIE configuration registers */
+-#define PCIE_PORT_LINK_CONTROL 0x710
+-#define PORT_LINK_MODE_MASK (0x3f << 16)
+-#define PORT_LINK_MODE_1_LANES (0x1 << 16)
+-#define PORT_LINK_MODE_2_LANES (0x3 << 16)
+-#define PORT_LINK_MODE_4_LANES (0x7 << 16)
+-
+-#define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80C
+-#define PORT_LOGIC_SPEED_CHANGE (0x1 << 17)
+-#define PORT_LOGIC_LINK_WIDTH_MASK (0x1ff << 8)
+-#define PORT_LOGIC_LINK_WIDTH_1_LANES (0x1 << 8)
+-#define PORT_LOGIC_LINK_WIDTH_2_LANES (0x2 << 8)
+-#define PORT_LOGIC_LINK_WIDTH_4_LANES (0x4 << 8)
+-
+-#define PCIE_MSI_ADDR_LO 0x820
+-#define PCIE_MSI_ADDR_HI 0x824
+-#define PCIE_MSI_INTR0_ENABLE 0x828
+-#define PCIE_MSI_INTR0_MASK 0x82C
+-#define PCIE_MSI_INTR0_STATUS 0x830
+-
+-#define PCIE_ATU_VIEWPORT 0x900
+-#define PCIE_ATU_REGION_INBOUND (0x1 << 31)
+-#define PCIE_ATU_REGION_OUTBOUND (0x0 << 31)
+-#define PCIE_ATU_REGION_INDEX1 (0x1 << 0)
+-#define PCIE_ATU_REGION_INDEX0 (0x0 << 0)
+-#define PCIE_ATU_CR1 0x904
+-#define PCIE_ATU_TYPE_MEM (0x0 << 0)
+-#define PCIE_ATU_TYPE_IO (0x2 << 0)
+-#define PCIE_ATU_TYPE_CFG0 (0x4 << 0)
+-#define PCIE_ATU_TYPE_CFG1 (0x5 << 0)
+-#define PCIE_ATU_CR2 0x908
+-#define PCIE_ATU_ENABLE (0x1 << 31)
+-#define PCIE_ATU_BAR_MODE_ENABLE (0x1 << 30)
+-#define PCIE_ATU_LOWER_BASE 0x90C
+-#define PCIE_ATU_UPPER_BASE 0x910
+-#define PCIE_ATU_LIMIT 0x914
+-#define PCIE_ATU_LOWER_TARGET 0x918
+-#define PCIE_ATU_BUS(x) (((x) & 0xff) << 24)
+-#define PCIE_ATU_DEV(x) (((x) & 0x1f) << 19)
+-#define PCIE_ATU_FUNC(x) (((x) & 0x7) << 16)
+-#define PCIE_ATU_UPPER_TARGET 0x91C
+-
+ static struct hw_pci dw_pci;
+
+ static unsigned long global_io_offset;
+@@ -332,23 +290,28 @@
+ return -EINVAL;
+ }
+
+- pci_read_config_word(pdev, desc->msi_attrib.pos+PCI_MSI_FLAGS,
+- &msg_ctr);
+- msgvec = (msg_ctr&PCI_MSI_FLAGS_QSIZE) >> 4;
+- if (msgvec == 0)
+- msgvec = (msg_ctr & PCI_MSI_FLAGS_QMASK) >> 1;
+- if (msgvec > 5)
+- msgvec = 0;
+-
+- irq = assign_irq((1 << msgvec), desc, &pos);
+- if (irq < 0)
+- return irq;
+-
+- /*
+- * write_msi_msg() will update PCI_MSI_FLAGS so there is
+- * no need to explicitly call pci_write_config_word().
+- */
+- desc->msi_attrib.multiple = msgvec;
++ if (pp->quirks & DW_PCIE_QUIRK_NO_MSI_VEC) {
++ irq = assign_irq(1, desc, &pos);
++ set_irq_flags(irq, IRQF_VALID);
++ } else {
++ pci_read_config_word(pdev, desc->msi_attrib.pos+PCI_MSI_FLAGS,
++ &msg_ctr);
++ msgvec = (msg_ctr&PCI_MSI_FLAGS_QSIZE) >> 4;
++ if (msgvec == 0)
++ msgvec = (msg_ctr & PCI_MSI_FLAGS_QMASK) >> 1;
++ if (msgvec > 5)
++ msgvec = 0;
++
++ irq = assign_irq((1 << msgvec), desc, &pos);
++ if (irq < 0)
++ return irq;
++
++ msg_ctr &= ~PCI_MSI_FLAGS_QSIZE;
++ msg_ctr |= msgvec << 4;
++ pci_write_config_word(pdev, desc->msi_attrib.pos + PCI_MSI_FLAGS,
++ msg_ctr);
++ desc->msi_attrib.multiple = msgvec;
++ }
+
+ msg.address_lo = virt_to_phys((void *)pp->msi_data);
+ msg.address_hi = 0x0;
+@@ -363,9 +326,30 @@
+ clear_irq(irq);
+ }
+
++static int dw_msi_check_device(struct msi_chip *chip, struct pci_dev *pdev,
++ int nvec, int type)
++{
++ struct pcie_port *pp = sys_to_pcie(pdev->bus->sysdata);
++ u32 val;
++
++ if (pp->quirks & DW_PCIE_QUIRK_MSI_SELF_EN) {
++ if ((type == PCI_CAP_ID_MSI) || (type == PCI_CAP_ID_MSIX)) {
++ /* Set MSI enable of RC here */
++ val = readl(pp->dbi_base + 0x50);
++ if ((val & (PCI_MSI_FLAGS_ENABLE << 16)) == 0) {
++ val |= PCI_MSI_FLAGS_ENABLE << 16;
++ writel(val, pp->dbi_base + 0x50);
++ }
++ }
++ }
++
++ return 0;
++}
++
+ static struct msi_chip dw_pcie_msi_chip = {
+ .setup_irq = dw_msi_setup_irq,
+ .teardown_irq = dw_msi_teardown_irq,
++ .check_device = dw_msi_check_device,
+ };
+
+ int dw_pcie_link_up(struct pcie_port *pp)
+@@ -531,38 +515,6 @@
+ dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
+ }
+
+-static void dw_pcie_prog_viewport_mem_outbound(struct pcie_port *pp)
+-{
+- /* Program viewport 0 : OUTBOUND : MEM */
+- dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX0,
+- PCIE_ATU_VIEWPORT);
+- dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_MEM, PCIE_ATU_CR1);
+- dw_pcie_writel_rc(pp, pp->mem_base, PCIE_ATU_LOWER_BASE);
+- dw_pcie_writel_rc(pp, (pp->mem_base >> 32), PCIE_ATU_UPPER_BASE);
+- dw_pcie_writel_rc(pp, pp->mem_base + pp->config.mem_size - 1,
+- PCIE_ATU_LIMIT);
+- dw_pcie_writel_rc(pp, pp->config.mem_bus_addr, PCIE_ATU_LOWER_TARGET);
+- dw_pcie_writel_rc(pp, upper_32_bits(pp->config.mem_bus_addr),
+- PCIE_ATU_UPPER_TARGET);
+- dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
+-}
+-
+-static void dw_pcie_prog_viewport_io_outbound(struct pcie_port *pp)
+-{
+- /* Program viewport 1 : OUTBOUND : IO */
+- dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX1,
+- PCIE_ATU_VIEWPORT);
+- dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_IO, PCIE_ATU_CR1);
+- dw_pcie_writel_rc(pp, pp->io_base, PCIE_ATU_LOWER_BASE);
+- dw_pcie_writel_rc(pp, (pp->io_base >> 32), PCIE_ATU_UPPER_BASE);
+- dw_pcie_writel_rc(pp, pp->io_base + pp->config.io_size - 1,
+- PCIE_ATU_LIMIT);
+- dw_pcie_writel_rc(pp, pp->config.io_bus_addr, PCIE_ATU_LOWER_TARGET);
+- dw_pcie_writel_rc(pp, upper_32_bits(pp->config.io_bus_addr),
+- PCIE_ATU_UPPER_TARGET);
+- dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
+-}
+-
+ static int dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
+ u32 devfn, int where, int size, u32 *val)
+ {
+@@ -577,12 +529,10 @@
+ dw_pcie_prog_viewport_cfg0(pp, busdev);
+ ret = dw_pcie_cfg_read(pp->va_cfg0_base + address, where, size,
+ val);
+- dw_pcie_prog_viewport_mem_outbound(pp);
+ } else {
+ dw_pcie_prog_viewport_cfg1(pp, busdev);
+ ret = dw_pcie_cfg_read(pp->va_cfg1_base + address, where, size,
+ val);
+- dw_pcie_prog_viewport_io_outbound(pp);
+ }
+
+ return ret;
+@@ -602,12 +552,10 @@
+ dw_pcie_prog_viewport_cfg0(pp, busdev);
+ ret = dw_pcie_cfg_write(pp->va_cfg0_base + address, where, size,
+ val);
+- dw_pcie_prog_viewport_mem_outbound(pp);
+ } else {
+ dw_pcie_prog_viewport_cfg1(pp, busdev);
+ ret = dw_pcie_cfg_write(pp->va_cfg1_base + address, where, size,
+ val);
+- dw_pcie_prog_viewport_io_outbound(pp);
+ }
+
+ return ret;
+@@ -739,7 +687,13 @@
+ {
+ struct pcie_port *pp = sys_to_pcie(dev->bus->sysdata);
+
+- return pp->irq;
++ switch (pin) {
++ case 1: return pp->irq;
++ case 2: return pp->irq - 1;
++ case 3: return pp->irq - 2;
++ case 4: return pp->irq - 3;
++ default: return -1;
++ }
+ }
+
+ static void dw_pcie_add_bus(struct pci_bus *bus)
+diff -Nur linux-3.14.36/drivers/pci/host/pcie-designware.h linux-openelec/drivers/pci/host/pcie-designware.h
+--- linux-3.14.36/drivers/pci/host/pcie-designware.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/pci/host/pcie-designware.h 2015-05-06 12:05:42.000000000 -0500
+@@ -14,6 +14,48 @@
+ #ifndef _PCIE_DESIGNWARE_H
+ #define _PCIE_DESIGNWARE_H
+
++/* Synopsis specific PCIE configuration registers */
++#define PCIE_PORT_LINK_CONTROL 0x710
++#define PORT_LINK_MODE_MASK (0x3f << 16)
++#define PORT_LINK_MODE_1_LANES (0x1 << 16)
++#define PORT_LINK_MODE_2_LANES (0x3 << 16)
++#define PORT_LINK_MODE_4_LANES (0x7 << 16)
++
++#define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80C
++#define PORT_LOGIC_SPEED_CHANGE (0x1 << 17)
++#define PORT_LOGIC_LINK_WIDTH_MASK (0x1ff << 8)
++#define PORT_LOGIC_LINK_WIDTH_1_LANES (0x1 << 8)
++#define PORT_LOGIC_LINK_WIDTH_2_LANES (0x2 << 8)
++#define PORT_LOGIC_LINK_WIDTH_4_LANES (0x4 << 8)
++
++#define PCIE_MSI_ADDR_LO 0x820
++#define PCIE_MSI_ADDR_HI 0x824
++#define PCIE_MSI_INTR0_ENABLE 0x828
++#define PCIE_MSI_INTR0_MASK 0x82C
++#define PCIE_MSI_INTR0_STATUS 0x830
++
++#define PCIE_ATU_VIEWPORT 0x900
++#define PCIE_ATU_REGION_INBOUND (0x1 << 31)
++#define PCIE_ATU_REGION_OUTBOUND (0x0 << 31)
++#define PCIE_ATU_REGION_INDEX1 (0x1 << 0)
++#define PCIE_ATU_REGION_INDEX0 (0x0 << 0)
++#define PCIE_ATU_CR1 0x904
++#define PCIE_ATU_TYPE_MEM (0x0 << 0)
++#define PCIE_ATU_TYPE_IO (0x2 << 0)
++#define PCIE_ATU_TYPE_CFG0 (0x4 << 0)
++#define PCIE_ATU_TYPE_CFG1 (0x5 << 0)
++#define PCIE_ATU_CR2 0x908
++#define PCIE_ATU_ENABLE (0x1 << 31)
++#define PCIE_ATU_BAR_MODE_ENABLE (0x1 << 30)
++#define PCIE_ATU_LOWER_BASE 0x90C
++#define PCIE_ATU_UPPER_BASE 0x910
++#define PCIE_ATU_LIMIT 0x914
++#define PCIE_ATU_LOWER_TARGET 0x918
++#define PCIE_ATU_BUS(x) (((x) & 0xff) << 24)
++#define PCIE_ATU_DEV(x) (((x) & 0x1f) << 19)
++#define PCIE_ATU_FUNC(x) (((x) & 0x7) << 16)
++#define PCIE_ATU_UPPER_TARGET 0x91C
++
+ struct pcie_port_info {
+ u32 cfg0_size;
+ u32 cfg1_size;
+@@ -49,6 +91,11 @@
+ int irq;
+ u32 lanes;
+ struct pcie_host_ops *ops;
++ u32 quirks; /* Deviations from spec. */
++/* Controller doesn't support MSI VEC */
++#define DW_PCIE_QUIRK_NO_MSI_VEC (1<<0)
++/* MSI EN of Controller should be configured when MSI is enabled */
++#define DW_PCIE_QUIRK_MSI_SELF_EN (1<<1)
+ int msi_irq;
+ struct irq_domain *irq_domain;
+ unsigned long msi_data;
+diff -Nur linux-3.14.36/drivers/pci/host/pci-imx6.c linux-openelec/drivers/pci/host/pci-imx6.c
+--- linux-3.14.36/drivers/pci/host/pci-imx6.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/pci/host/pci-imx6.c 2015-05-06 12:05:42.000000000 -0500
+@@ -1,6 +1,7 @@
+ /*
+ * PCIe host controller driver for Freescale i.MX6 SoCs
+ *
++ * Copyright (C) 2014 Freescale Semiconductor, Inc. All Rights Reserved.
+ * Copyright (C) 2013 Kosagi
+ * http://www.kosagi.com
+ *
+@@ -14,6 +15,7 @@
+ #include <linux/clk.h>
+ #include <linux/delay.h>
+ #include <linux/gpio.h>
++#include <linux/interrupt.h>
+ #include <linux/kernel.h>
+ #include <linux/mfd/syscon.h>
+ #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
+@@ -25,11 +27,22 @@
+ #include <linux/resource.h>
+ #include <linux/signal.h>
+ #include <linux/types.h>
++#include <linux/busfreq-imx6.h>
+
++#include "../pci.h"
+ #include "pcie-designware.h"
+
+ #define to_imx6_pcie(x) container_of(x, struct imx6_pcie, pp)
+
++/*
++ * The default value of the reserved ddr memory
++ * used to verify EP/RC memory space access operations.
++ * BTW, here is the layout of the 1G ddr on SD boards
++ * 0x1000_0000 ~ 0x4FFF_FFFF
++ */
++static u32 ddr_test_region = 0x40000000;
++static u32 test_region_size = SZ_2M;
++
+ struct imx6_pcie {
+ int reset_gpio;
+ int power_on_gpio;
+@@ -52,6 +65,9 @@
+
+ /* PCIe Port Logic registers (memory-mapped) */
+ #define PL_OFFSET 0x700
++#define PCIE_PL_PFLR (PL_OFFSET + 0x08)
++#define PCIE_PL_PFLR_LINK_STATE_MASK (0x3f << 16)
++#define PCIE_PL_PFLR_FORCE_LINK (1 << 15)
+ #define PCIE_PHY_DEBUG_R0 (PL_OFFSET + 0x28)
+ #define PCIE_PHY_DEBUG_R1 (PL_OFFSET + 0x2c)
+ #define PCIE_PHY_DEBUG_R1_XMLH_LINK_IN_TRAINING (1 << 29)
+@@ -216,14 +232,14 @@
+
+ static int imx6_pcie_assert_core_reset(struct pcie_port *pp)
+ {
+- struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
++ struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
+
+- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
+- IMX6Q_GPR1_PCIE_TEST_PD, 1 << 18);
+- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
+- IMX6Q_GPR1_PCIE_REF_CLK_EN, 0 << 16);
++ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
++ IMX6Q_GPR1_PCIE_TEST_PD, 1 << 18);
++ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
++ IMX6Q_GPR1_PCIE_REF_CLK_EN, 0 << 16);
+
+- return 0;
++ return 0;
+ }
+
+ static int imx6_pcie_deassert_core_reset(struct pcie_port *pp)
+@@ -234,10 +250,7 @@
+ if (gpio_is_valid(imx6_pcie->power_on_gpio))
+ gpio_set_value(imx6_pcie->power_on_gpio, 1);
+
+- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
+- IMX6Q_GPR1_PCIE_TEST_PD, 0 << 18);
+- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
+- IMX6Q_GPR1_PCIE_REF_CLK_EN, 1 << 16);
++ request_bus_freq(BUS_FREQ_HIGH);
+
+ ret = clk_prepare_enable(imx6_pcie->sata_ref_100m);
+ if (ret) {
+@@ -251,10 +264,13 @@
+ goto err_pcie_ref;
+ }
+
+- ret = clk_prepare_enable(imx6_pcie->lvds_gate);
+- if (ret) {
+- dev_err(pp->dev, "unable to enable lvds_gate\n");
+- goto err_lvds_gate;
++ if (!IS_ENABLED(CONFIG_EP_MODE_IN_EP_RC_SYS)
++ && !IS_ENABLED(CONFIG_RC_MODE_IN_EP_RC_SYS)) {
++ ret = clk_prepare_enable(imx6_pcie->lvds_gate);
++ if (ret) {
++ dev_err(pp->dev, "unable to enable lvds_gate\n");
++ goto err_lvds_gate;
++ }
+ }
+
+ ret = clk_prepare_enable(imx6_pcie->pcie_axi);
+@@ -266,6 +282,12 @@
+ /* allow the clocks to stabilize */
+ usleep_range(200, 500);
+
++ /* power up core phy and enable ref clock */
++ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
++ IMX6Q_GPR1_PCIE_TEST_PD, 0 << 18);
++ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
++ IMX6Q_GPR1_PCIE_REF_CLK_EN, 1 << 16);
++
+ /* Some boards don't have PCIe reset GPIO. */
+ if (gpio_is_valid(imx6_pcie->reset_gpio)) {
+ gpio_set_value(imx6_pcie->reset_gpio, 0);
+@@ -281,6 +303,7 @@
+ err_pcie_ref:
+ clk_disable_unprepare(imx6_pcie->sata_ref_100m);
+ err_sata_ref:
++ release_bus_freq(BUS_FREQ_HIGH);
+ return ret;
+
+ }
+@@ -288,13 +311,44 @@
+ static void imx6_pcie_init_phy(struct pcie_port *pp)
+ {
+ struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
++ u32 val, gpr1, gpr12;
++
++ /*
++ * If the bootloader already enabled the link we need some special
++ * handling to get the core back into a state where it is safe to
++ * touch it for configuration. As there is no dedicated reset signal
++ * wired up for MX6QDL, we need to manually force LTSSM into "detect"
++ * state before completely disabling LTSSM, which is a prerequisite
++ * for core configuration.
++ * If both LTSSM_ENABLE and REF_SSP_ENABLE are active we have a strong
++ * indication that the bootloader activated the link.
++ */
++ regmap_read(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, &gpr1);
++ regmap_read(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, &gpr12);
++
++ if ((gpr1 & IMX6Q_GPR1_PCIE_REF_CLK_EN) &&
++ (gpr12 & IMX6Q_GPR12_PCIE_CTL_2)) {
++ val = readl(pp->dbi_base + PCIE_PL_PFLR);
++ val &= ~PCIE_PL_PFLR_LINK_STATE_MASK;
++ val |= PCIE_PL_PFLR_FORCE_LINK;
++ writel(val, pp->dbi_base + PCIE_PL_PFLR);
++
++ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
++ IMX6Q_GPR12_PCIE_CTL_2, 0 << 10);
++ }
+
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6Q_GPR12_PCIE_CTL_2, 0 << 10);
+
+ /* configure constant input signal to the pcie ctrl and phy */
+- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+- IMX6Q_GPR12_DEVICE_TYPE, PCI_EXP_TYPE_ROOT_PORT << 12);
++ if (IS_ENABLED(CONFIG_EP_MODE_IN_EP_RC_SYS))
++ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
++ IMX6Q_GPR12_DEVICE_TYPE,
++ PCI_EXP_TYPE_ENDPOINT << 12);
++ else
++ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
++ IMX6Q_GPR12_DEVICE_TYPE,
++ PCI_EXP_TYPE_ROOT_PORT << 12);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6Q_GPR12_LOS_LEVEL, 9 << 4);
+
+@@ -326,6 +380,12 @@
+ return -EINVAL;
+ }
+
++ if (IS_ENABLED(CONFIG_PCI_MSI)) {
++ pp->quirks |= DW_PCIE_QUIRK_NO_MSI_VEC;
++ pp->quirks |= DW_PCIE_QUIRK_MSI_SELF_EN;
++ dw_pcie_msi_init(pp);
++ }
++
+ return 0;
+ }
+
+@@ -392,6 +452,15 @@
+ return ret;
+ }
+
++static irqreturn_t imx_pcie_msi_irq_handler(int irq, void *arg)
++{
++ struct pcie_port *pp = arg;
++
++ dw_handle_msi_irq(pp);
++
++ return IRQ_HANDLED;
++}
++
+ static void imx6_pcie_host_init(struct pcie_port *pp)
+ {
+ imx6_pcie_assert_core_reset(pp);
+@@ -498,6 +567,22 @@
+ return -ENODEV;
+ }
+
++ if (IS_ENABLED(CONFIG_PCI_MSI)) {
++ pp->msi_irq = pp->irq - 3;
++ if (!pp->msi_irq) {
++ dev_err(&pdev->dev, "failed to get msi irq\n");
++ return -ENODEV;
++ }
++
++ ret = devm_request_irq(&pdev->dev, pp->msi_irq,
++ imx_pcie_msi_irq_handler,
++ IRQF_SHARED, "imx6q-pcie", pp);
++ if (ret) {
++ dev_err(&pdev->dev, "failed to request msi irq\n");
++ return ret;
++ }
++ }
++
+ pp->root_bus_nr = -1;
+ pp->ops = &imx6_pcie_host_ops;
+
+@@ -511,29 +596,188 @@
+ return 0;
+ }
+
++static ssize_t imx_pcie_bar0_addr_info(struct device *dev,
++ struct device_attribute *devattr, char *buf)
++{
++ struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
++ struct pcie_port *pp = &imx6_pcie->pp;
++
++ return sprintf(buf, "imx-pcie-bar0-addr-info start 0x%08x\n",
++ readl(pp->dbi_base + PCI_BASE_ADDRESS_0));
++}
++
++static ssize_t imx_pcie_bar0_addr_start(struct device *dev,
++ struct device_attribute *attr, const char *buf, size_t count)
++{
++ u32 bar_start;
++ struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
++ struct pcie_port *pp = &imx6_pcie->pp;
++
++ sscanf(buf, "%x\n", &bar_start);
++ writel(bar_start, pp->dbi_base + PCI_BASE_ADDRESS_0);
++
++ return count;
++}
++
++static void imx_pcie_regions_setup(struct device *dev)
++{
++ struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
++ struct pcie_port *pp = &imx6_pcie->pp;
++
++ if (IS_ENABLED(CONFIG_EP_MODE_IN_EP_RC_SYS)) {
++ /*
++ * region2 outbound used to access rc mem
++ * in imx6 pcie ep/rc validation system
++ */
++ writel(0, pp->dbi_base + PCIE_ATU_VIEWPORT);
++ writel(0x01000000, pp->dbi_base + PCIE_ATU_LOWER_BASE);
++ writel(0, pp->dbi_base + PCIE_ATU_UPPER_BASE);
++ writel(0x01000000 + test_region_size,
++ pp->dbi_base + PCIE_ATU_LIMIT);
++
++ writel(ddr_test_region,
++ pp->dbi_base + PCIE_ATU_LOWER_TARGET);
++ writel(0, pp->dbi_base + PCIE_ATU_UPPER_TARGET);
++ writel(PCIE_ATU_TYPE_MEM, pp->dbi_base + PCIE_ATU_CR1);
++ writel(PCIE_ATU_ENABLE, pp->dbi_base + PCIE_ATU_CR2);
++ }
++
++ if (IS_ENABLED(CONFIG_RC_MODE_IN_EP_RC_SYS)) {
++ /*
++ * region2 outbound used to access ep mem
++ * in imx6 pcie ep/rc validation system
++ */
++ writel(2, pp->dbi_base + PCIE_ATU_VIEWPORT);
++ writel(0x01000000, pp->dbi_base + PCIE_ATU_LOWER_BASE);
++ writel(0, pp->dbi_base + PCIE_ATU_UPPER_BASE);
++ writel(0x01000000 + test_region_size,
++ pp->dbi_base + PCIE_ATU_LIMIT);
++
++ writel(ddr_test_region,
++ pp->dbi_base + PCIE_ATU_LOWER_TARGET);
++ writel(0, pp->dbi_base + PCIE_ATU_UPPER_TARGET);
++ writel(PCIE_ATU_TYPE_MEM, pp->dbi_base + PCIE_ATU_CR1);
++ writel(PCIE_ATU_ENABLE, pp->dbi_base + PCIE_ATU_CR2);
++ }
++}
++
++static ssize_t imx_pcie_memw_info(struct device *dev,
++ struct device_attribute *devattr, char *buf)
++{
++ return sprintf(buf, "imx-pcie-rc-memw-info start 0x%08x, size 0x%08x\n",
++ ddr_test_region, test_region_size);
++}
++
++static ssize_t
++imx_pcie_memw_start(struct device *dev, struct device_attribute *attr,
++ const char *buf, size_t count)
++{
++ u32 memw_start;
++
++ sscanf(buf, "%x\n", &memw_start);
++
++ if (memw_start < 0x10000000) {
++ dev_err(dev, "Invalid memory start address.\n");
++ dev_info(dev, "For example: echo 0x41000000 > /sys/...");
++ return -1;
++ }
++
++ if (ddr_test_region != memw_start) {
++ ddr_test_region = memw_start;
++ /* Re-setup the iATU */
++ imx_pcie_regions_setup(dev);
++ }
++
++ return count;
++}
++
++static ssize_t
++imx_pcie_memw_size(struct device *dev, struct device_attribute *attr,
++ const char *buf, size_t count)
++{
++ u32 memw_size;
++
++ sscanf(buf, "%x\n", &memw_size);
++
++ if ((memw_size > (SZ_16M - SZ_1M)) || (memw_size < SZ_64K)) {
++ dev_err(dev, "Invalid, should be [SZ_64K,SZ_16M - SZ_1MB].\n");
++ dev_info(dev, "For example: echo 0x800000 > /sys/...");
++ return -1;
++ }
++
++ if (test_region_size != memw_size) {
++ test_region_size = memw_size;
++ /* Re-setup the iATU */
++ imx_pcie_regions_setup(dev);
++ }
++
++ return count;
++}
++
++static DEVICE_ATTR(memw_info, S_IRUGO, imx_pcie_memw_info, NULL);
++static DEVICE_ATTR(memw_start_set, S_IWUGO, NULL, imx_pcie_memw_start);
++static DEVICE_ATTR(memw_size_set, S_IWUGO, NULL, imx_pcie_memw_size);
++static DEVICE_ATTR(ep_bar0_addr, S_IRWXUGO, imx_pcie_bar0_addr_info,
++ imx_pcie_bar0_addr_start);
++
++static struct attribute *imx_pcie_attrs[] = {
++ /*
++ * The start address, and the limitation (64KB ~ (16MB - 1MB))
++ * of the ddr mem window reserved by RC, and used for EP to access.
++ * BTW, these attrs are only configured at EP side.
++ */
++ &dev_attr_memw_info.attr,
++ &dev_attr_memw_start_set.attr,
++ &dev_attr_memw_size_set.attr,
++ &dev_attr_ep_bar0_addr.attr,
++ NULL
++};
++
++static struct attribute_group imx_pcie_attrgroup = {
++ .attrs = imx_pcie_attrs,
++};
++
+ static int __init imx6_pcie_probe(struct platform_device *pdev)
+ {
+ struct imx6_pcie *imx6_pcie;
+ struct pcie_port *pp;
+ struct device_node *np = pdev->dev.of_node;
+ struct resource *dbi_base;
+- int ret;
++ int ret = 0;
++ int i;
++ void *test_reg1, *test_reg2;
++ void __iomem *pcie_arb_base_addr;
++ struct timeval tv1, tv2, tv3;
++ u32 tv_count1, tv_count2;
+
+ imx6_pcie = devm_kzalloc(&pdev->dev, sizeof(*imx6_pcie), GFP_KERNEL);
+- if (!imx6_pcie)
+- return -ENOMEM;
++ if (!imx6_pcie) {
++ ret = -ENOMEM;
++ goto err;
++ }
+
+ pp = &imx6_pcie->pp;
+ pp->dev = &pdev->dev;
+
++ if (IS_ENABLED(CONFIG_EP_MODE_IN_EP_RC_SYS)) {
++ /* add attributes for device */
++ ret = sysfs_create_group(&pdev->dev.kobj, &imx_pcie_attrgroup);
++ if (ret) {
++ ret = -EINVAL;
++ goto err;
++ }
++ }
++
+ /* Added for PCI abort handling */
+ hook_fault_code(16 + 6, imx6q_pcie_abort_handler, SIGBUS, 0,
+ "imprecise external abort");
+
+ dbi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ pp->dbi_base = devm_ioremap_resource(&pdev->dev, dbi_base);
+- if (IS_ERR(pp->dbi_base))
+- return PTR_ERR(pp->dbi_base);
++ if (IS_ERR(pp->dbi_base)) {
++ ret = PTR_ERR(pp->dbi_base);
++ goto err;
++ }
+
+ /* Fetch GPIOs */
+ imx6_pcie->reset_gpio = of_get_named_gpio(np, "reset-gpio", 0);
+@@ -542,7 +786,7 @@
+ GPIOF_OUT_INIT_LOW, "PCIe reset");
+ if (ret) {
+ dev_err(&pdev->dev, "unable to get reset gpio\n");
+- return ret;
++ goto err;
+ }
+ }
+
+@@ -554,7 +798,7 @@
+ "PCIe power enable");
+ if (ret) {
+ dev_err(&pdev->dev, "unable to get power-on gpio\n");
+- return ret;
++ goto err;
+ }
+ }
+
+@@ -566,7 +810,7 @@
+ "PCIe wake up");
+ if (ret) {
+ dev_err(&pdev->dev, "unable to get wake-up gpio\n");
+- return ret;
++ goto err;
+ }
+ }
+
+@@ -578,7 +822,7 @@
+ "PCIe disable endpoint");
+ if (ret) {
+ dev_err(&pdev->dev, "unable to get disable-ep gpio\n");
+- return ret;
++ goto err;
+ }
+ }
+
+@@ -587,28 +831,32 @@
+ if (IS_ERR(imx6_pcie->lvds_gate)) {
+ dev_err(&pdev->dev,
+ "lvds_gate clock select missing or invalid\n");
+- return PTR_ERR(imx6_pcie->lvds_gate);
++ ret = PTR_ERR(imx6_pcie->lvds_gate);
++ goto err;
+ }
+
+ imx6_pcie->sata_ref_100m = devm_clk_get(&pdev->dev, "sata_ref_100m");
+ if (IS_ERR(imx6_pcie->sata_ref_100m)) {
+ dev_err(&pdev->dev,
+ "sata_ref_100m clock source missing or invalid\n");
+- return PTR_ERR(imx6_pcie->sata_ref_100m);
++ ret = PTR_ERR(imx6_pcie->sata_ref_100m);
++ goto err;
+ }
+
+ imx6_pcie->pcie_ref_125m = devm_clk_get(&pdev->dev, "pcie_ref_125m");
+ if (IS_ERR(imx6_pcie->pcie_ref_125m)) {
+ dev_err(&pdev->dev,
+ "pcie_ref_125m clock source missing or invalid\n");
+- return PTR_ERR(imx6_pcie->pcie_ref_125m);
++ ret = PTR_ERR(imx6_pcie->pcie_ref_125m);
++ goto err;
+ }
+
+ imx6_pcie->pcie_axi = devm_clk_get(&pdev->dev, "pcie_axi");
+ if (IS_ERR(imx6_pcie->pcie_axi)) {
+ dev_err(&pdev->dev,
+ "pcie_axi clock source missing or invalid\n");
+- return PTR_ERR(imx6_pcie->pcie_axi);
++ ret = PTR_ERR(imx6_pcie->pcie_axi);
++ goto err;
+ }
+
+ /* Grab GPR config register range */
+@@ -616,15 +864,178 @@
+ syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr");
+ if (IS_ERR(imx6_pcie->iomuxc_gpr)) {
+ dev_err(&pdev->dev, "unable to find iomuxc registers\n");
+- return PTR_ERR(imx6_pcie->iomuxc_gpr);
++ ret = PTR_ERR(imx6_pcie->iomuxc_gpr);
++ goto err;
+ }
+
+- ret = imx6_add_pcie_port(pp, pdev);
+- if (ret < 0)
+- return ret;
++ if (of_find_property(np, "no-msi", NULL))
++ pci_no_msi();
+
+- platform_set_drvdata(pdev, imx6_pcie);
+- return 0;
++ if (IS_ENABLED(CONFIG_EP_MODE_IN_EP_RC_SYS)) {
++ if (IS_ENABLED(CONFIG_EP_SELF_IO_TEST)) {
++ /* Prepare the test regions and data */
++ test_reg1 = devm_kzalloc(&pdev->dev,
++ test_region_size, GFP_KERNEL);
++ if (!test_reg1) {
++ pr_err("pcie ep: can't alloc the test reg1.\n");
++ ret = PTR_ERR(test_reg1);
++ goto err;
++ }
++
++ test_reg2 = devm_kzalloc(&pdev->dev,
++ test_region_size, GFP_KERNEL);
++ if (!test_reg2) {
++ pr_err("pcie ep: can't alloc the test reg2.\n");
++ ret = PTR_ERR(test_reg1);
++ goto err;
++ }
++
++ pcie_arb_base_addr = ioremap_cache(0x01000000,
++ test_region_size);
++
++ if (!pcie_arb_base_addr) {
++ pr_err("error with ioremap in ep selftest\n");
++ ret = PTR_ERR(pcie_arb_base_addr);
++ goto err;
++ }
++
++ for (i = 0; i < test_region_size; i = i + 4) {
++ writel(0xE6600D00 + i, test_reg1 + i);
++ writel(0xDEADBEAF, test_reg2 + i);
++ }
++ }
++
++ imx6_pcie_init_phy(pp);
++
++ imx6_pcie_deassert_core_reset(pp);
++
++ /* assert LTSSM enable */
++ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
++ IMX6Q_GPR12_PCIE_CTL_2, 1 << 10);
++
++
++ dev_info(&pdev->dev, "PCIe EP: waiting for link up...\n");
++
++ platform_set_drvdata(pdev, imx6_pcie);
++ /* link is indicated by the bit4 of DB_R1 register */
++ do {
++ usleep_range(10, 20);
++ } while ((readl(pp->dbi_base + PCIE_PHY_DEBUG_R1) & 0x10) == 0);
++
++ /* CMD reg:I/O space, MEM space, and Bus Master Enable */
++ writel(readl(pp->dbi_base + PCI_COMMAND)
++ | PCI_COMMAND_IO
++ | PCI_COMMAND_MEMORY
++ | PCI_COMMAND_MASTER,
++ pp->dbi_base + PCI_COMMAND);
++
++ /*
++ * configure the class_rev(emaluate one memory ram ep device),
++ * bar0 and bar1 of ep
++ */
++ writel(0xdeadbeaf, pp->dbi_base + PCI_VENDOR_ID);
++ writel(readl(pp->dbi_base + PCI_CLASS_REVISION)
++ | (PCI_CLASS_MEMORY_RAM << 16),
++ pp->dbi_base + PCI_CLASS_REVISION);
++ writel(0xdeadbeaf, pp->dbi_base
++ + PCI_SUBSYSTEM_VENDOR_ID);
++
++ /* 32bit none-prefetchable 8M bytes memory on bar0 */
++ writel(0x0, pp->dbi_base + PCI_BASE_ADDRESS_0);
++ writel(SZ_8M - 1, pp->dbi_base + (1 << 12)
++ + PCI_BASE_ADDRESS_0);
++
++ /* None used bar1 */
++ writel(0x0, pp->dbi_base + PCI_BASE_ADDRESS_1);
++ writel(0, pp->dbi_base + (1 << 12) + PCI_BASE_ADDRESS_1);
++
++ /* 4K bytes IO on bar2 */
++ writel(0x1, pp->dbi_base + PCI_BASE_ADDRESS_2);
++ writel(SZ_4K - 1, pp->dbi_base + (1 << 12) +
++ PCI_BASE_ADDRESS_2);
++
++ /*
++ * 32bit prefetchable 1M bytes memory on bar3
++ * FIXME BAR MASK3 is not changable, the size
++ * is fixed to 256 bytes.
++ */
++ writel(0x8, pp->dbi_base + PCI_BASE_ADDRESS_3);
++ writel(SZ_1M - 1, pp->dbi_base + (1 << 12)
++ + PCI_BASE_ADDRESS_3);
++
++ /*
++ * 64bit prefetchable 1M bytes memory on bar4-5.
++ * FIXME BAR4,5 are not enabled yet
++ */
++ writel(0xc, pp->dbi_base + PCI_BASE_ADDRESS_4);
++ writel(SZ_1M - 1, pp->dbi_base + (1 << 12)
++ + PCI_BASE_ADDRESS_4);
++ writel(0, pp->dbi_base + (1 << 12) + PCI_BASE_ADDRESS_5);
++
++ /* Re-setup the iATU */
++ imx_pcie_regions_setup(&pdev->dev);
++
++ if (IS_ENABLED(CONFIG_EP_SELF_IO_TEST)) {
++ /* PCIe EP start the data transfer after link up */
++ pr_info("pcie ep: Starting data transfer...\n");
++ do_gettimeofday(&tv1);
++
++ memcpy((unsigned long *)pcie_arb_base_addr,
++ (unsigned long *)test_reg1,
++ test_region_size);
++
++ do_gettimeofday(&tv2);
++
++ memcpy((unsigned long *)test_reg2,
++ (unsigned long *)pcie_arb_base_addr,
++ test_region_size);
++
++ do_gettimeofday(&tv3);
++
++ if (memcmp(test_reg2, test_reg1, test_region_size) == 0) {
++ tv_count1 = (tv2.tv_sec - tv1.tv_sec)
++ * USEC_PER_SEC
++ + tv2.tv_usec - tv1.tv_usec;
++ tv_count2 = (tv3.tv_sec - tv2.tv_sec)
++ * USEC_PER_SEC
++ + tv3.tv_usec - tv2.tv_usec;
++
++ pr_info("pcie ep: Data transfer is successful."
++ " tv_count1 %dus,"
++ " tv_count2 %dus.\n",
++ tv_count1, tv_count2);
++ pr_info("pcie ep: Data write speed:%ldMB/s.\n",
++ ((test_region_size/1024)
++ * MSEC_PER_SEC)
++ /(tv_count1));
++ pr_info("pcie ep: Data read speed:%ldMB/s.\n",
++ ((test_region_size/1024)
++ * MSEC_PER_SEC)
++ /(tv_count2));
++ } else {
++ pr_info("pcie ep: Data transfer is failed.\n");
++ }
++ }
++ } else {
++ ret = imx6_add_pcie_port(pp, pdev);
++ if (ret < 0)
++ goto err;
++ platform_set_drvdata(pdev, imx6_pcie);
++
++ /* Re-setup the iATU */
++ imx_pcie_regions_setup(&pdev->dev);
++ }
++
++err:
++ return ret;
++}
++
++static void imx6_pcie_shutdown(struct platform_device *pdev)
++{
++ struct imx6_pcie *imx6_pcie = platform_get_drvdata(pdev);
++
++ /* bring down link, so bootloader gets clean state in case of reboot */
++ imx6_pcie_assert_core_reset(&imx6_pcie->pp);
+ }
+
+ static const struct of_device_id imx6_pcie_of_match[] = {
+@@ -639,6 +1050,7 @@
+ .owner = THIS_MODULE,
+ .of_match_table = imx6_pcie_of_match,
+ },
++ .shutdown = imx6_pcie_shutdown,
+ };
+
+ /* Freescale PCIe driver does not allow module unload */
+diff -Nur linux-3.14.36/drivers/pci/host/pci-imx6-ep-driver.c linux-openelec/drivers/pci/host/pci-imx6-ep-driver.c
+--- linux-3.14.36/drivers/pci/host/pci-imx6-ep-driver.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/pci/host/pci-imx6-ep-driver.c 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,159 @@
++/*
++ * PCIe endpoint skeleton driver for IMX6 SOCs
++ *
++ * Copyright (C) 2014 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
++ */
++
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/pci.h>
++#include <linux/pci-aspm.h>
++#include <linux/slab.h>
++#include <linux/dma-mapping.h>
++#include <linux/delay.h>
++#include <linux/sched.h>
++#include <linux/interrupt.h>
++
++#define DRV_DESCRIPTION "i.MX PCIE endpoint device driver"
++#define DRV_VERSION "version 0.1"
++#define DRV_NAME "imx_pcie_ep"
++
++struct imx_pcie_ep_priv {
++ struct pci_dev *pci_dev;
++ void __iomem *hw_base;
++};
++
++/**
++ * imx_pcie_ep_probe - Device Initialization Routine
++ * @pdev: PCI device information struct
++ * @id: entry in id_tbl
++ *
++ * Returns 0 on success, negative on failure
++ **/
++static int imx_pcie_ep_probe(struct pci_dev *pdev,
++ const struct pci_device_id *id)
++{
++ int ret = 0;
++ struct device *dev = &pdev->dev;
++ struct imx_pcie_ep_priv *priv;
++
++ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
++ if (!priv) {
++ dev_err(dev, "can't alloc imx pcie priv\n");
++ return -ENOMEM;
++ }
++
++ priv->pci_dev = pdev;
++
++ if (pci_enable_device(pdev)) {
++ ret = -ENODEV;
++ goto out;
++ }
++ pci_set_master(pdev);
++
++ pci_set_drvdata(pdev, priv);
++
++ priv->hw_base = pci_iomap(pdev, 0, 0);
++ if (!priv->hw_base) {
++ ret = -ENODEV;
++ goto out;
++ }
++
++ pr_info("pci_resource_len = 0x%08llx\n",
++ (unsigned long long) pci_resource_len(pdev, 0));
++ pr_info("pci_resource_base = %p\n", priv->hw_base);
++
++ ret = pci_enable_msi(priv->pci_dev);
++ if (ret < 0) {
++ dev_err(dev, "can't enable msi\n");
++ return ret;
++ }
++
++ /*
++ * Force to use 0x01FF8000 as the MSI address,
++ * to do the MSI demo
++ */
++ pci_bus_write_config_dword(pdev->bus, 0, 0x54, 0x01FF8000);
++ pci_bus_write_config_dword(pdev->bus->parent, 0, 0x820, 0x01FF8000);
++
++ /* configure rc's msi cap */
++ pci_bus_read_config_dword(pdev->bus->parent, 0, 0x50, &ret);
++ ret |= (PCI_MSI_FLAGS_ENABLE << 16);
++ pci_bus_write_config_dword(pdev->bus->parent, 0, 0x50, ret);
++ pci_bus_write_config_dword(pdev->bus->parent, 0, 0x828, 0x1);
++ pci_bus_write_config_dword(pdev->bus->parent, 0, 0x82C, 0xFFFFFFFE);
++
++ return 0;
++
++out:
++ return ret;
++}
++
++static void imx_pcie_ep_remove(struct pci_dev *pdev)
++{
++ struct imx_pcie_ep_priv *priv = pci_get_drvdata(pdev);
++
++ if (!priv)
++ return;
++ pr_info("***imx pcie ep driver unload***\n");
++}
++
++static struct pci_device_id imx_pcie_ep_ids[] = {
++ {
++ .class = PCI_CLASS_MEMORY_RAM << 8,
++ .class_mask = ~0,
++ .vendor = 0xbeaf,
++ .device = 0xdead,
++ .subvendor = PCI_ANY_ID,
++ .subdevice = PCI_ANY_ID,
++ },
++ { } /* terminate list */
++};
++MODULE_DEVICE_TABLE(pci, imx_pcie_ep_ids);
++
++static struct pci_driver imx_pcie_ep_driver = {
++ .name = DRV_NAME,
++ .id_table = imx_pcie_ep_ids,
++ .probe = imx_pcie_ep_probe,
++ .remove = imx_pcie_ep_remove,
++};
++
++static int __init imx_pcie_ep_init(void)
++{
++ int ret;
++ pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n");
++
++ ret = pci_register_driver(&imx_pcie_ep_driver);
++ if (ret)
++ pr_err("Unable to initialize PCI module\n");
++
++ return ret;
++}
++
++static void __exit imx_pcie_ep_exit(void)
++{
++ pci_unregister_driver(&imx_pcie_ep_driver);
++}
++
++module_exit(imx_pcie_ep_exit);
++module_init(imx_pcie_ep_init);
++
++MODULE_DESCRIPTION(DRV_DESCRIPTION);
++MODULE_VERSION(DRV_VERSION);
++MODULE_LICENSE("GPL");
++MODULE_ALIAS("imx_pcie_ep");
+diff -Nur linux-3.14.36/drivers/pinctrl/devicetree.c linux-openelec/drivers/pinctrl/devicetree.c
+--- linux-3.14.36/drivers/pinctrl/devicetree.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/pinctrl/devicetree.c 2015-05-06 12:05:42.000000000 -0500
+@@ -18,6 +18,7 @@
+
+ #include <linux/device.h>
+ #include <linux/of.h>
++#include <linux/of_gpio.h>
+ #include <linux/pinctrl/pinctrl.h>
+ #include <linux/slab.h>
+
+@@ -172,6 +173,43 @@
+ return dt_remember_or_free_map(p, statename, NULL, map, 1);
+ }
+
++static int dt_gpio_assert_pinctrl(struct pinctrl *p)
++{
++ struct device_node *np = p->dev->of_node;
++ enum of_gpio_flags flags;
++ int gpio;
++ int index = 0;
++ int ret;
++
++ if (!of_find_property(np, "pinctrl-assert-gpios", NULL))
++ return 0; /* Missing the property, so nothing to be done */
++
++ for (;; index++) {
++ gpio = of_get_named_gpio_flags(np, "pinctrl-assert-gpios",
++ index, &flags);
++ if (gpio < 0)
++ break; /* End of the phandle list */
++
++ if (!gpio_is_valid(gpio))
++ return -EINVAL;
++
++ ret = devm_gpio_request_one(p->dev, gpio, GPIOF_OUT_INIT_LOW,
++ NULL);
++ if (ret < 0)
++ return ret;
++
++ if (flags & OF_GPIO_ACTIVE_LOW)
++ continue;
++
++ if (gpio_cansleep(gpio))
++ gpio_set_value_cansleep(gpio, 1);
++ else
++ gpio_set_value(gpio, 1);
++ }
++
++ return 0;
++}
++
+ int pinctrl_dt_to_map(struct pinctrl *p)
+ {
+ struct device_node *np = p->dev->of_node;
+@@ -190,6 +228,12 @@
+ return 0;
+ }
+
++ ret = dt_gpio_assert_pinctrl(p);
++ if (ret) {
++ dev_dbg(p->dev, "failed to assert pinctrl setting: %d\n", ret);
++ return ret;
++ }
++
+ /* We may store pointers to property names within the node */
+ of_node_get(np);
+
+diff -Nur linux-3.14.36/drivers/pinctrl/pinctrl-imx6sl.c linux-openelec/drivers/pinctrl/pinctrl-imx6sl.c
+--- linux-3.14.36/drivers/pinctrl/pinctrl-imx6sl.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/pinctrl/pinctrl-imx6sl.c 2015-05-06 12:05:42.000000000 -0500
+@@ -384,6 +384,10 @@
+ },
+ .probe = imx6sl_pinctrl_probe,
+ .remove = imx_pinctrl_remove,
++#ifdef CONFIG_PM
++ .suspend = imx_pinctrl_suspend,
++ .resume = imx_pinctrl_resume,
++#endif
+ };
+
+ static int __init imx6sl_pinctrl_init(void)
+diff -Nur linux-3.14.36/drivers/pinctrl/pinctrl-imx.c linux-openelec/drivers/pinctrl/pinctrl-imx.c
+--- linux-3.14.36/drivers/pinctrl/pinctrl-imx.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/pinctrl/pinctrl-imx.c 2015-05-06 12:05:42.000000000 -0500
+@@ -1,7 +1,7 @@
+ /*
+ * Core driver for the imx pin controller
+ *
+- * Copyright (C) 2012 Freescale Semiconductor, Inc.
++ * Copyright (C) 2012-2013 Freescale Semiconductor, Inc.
+ * Copyright (C) 2012 Linaro Ltd.
+ *
+ * Author: Dong Aisheng <dong.aisheng@linaro.org>
+@@ -628,3 +628,25 @@
+
+ return 0;
+ }
++
++#ifdef CONFIG_PM
++int imx_pinctrl_suspend(struct platform_device *pdev, pm_message_t state)
++{
++ struct imx_pinctrl *ipctl = platform_get_drvdata(pdev);
++
++ if (!ipctl)
++ return -EINVAL;
++
++ return pinctrl_force_sleep(ipctl->pctl);
++}
++
++int imx_pinctrl_resume(struct platform_device *pdev)
++{
++ struct imx_pinctrl *ipctl = platform_get_drvdata(pdev);
++
++ if (!ipctl)
++ return -EINVAL;
++
++ return pinctrl_force_default(ipctl->pctl);
++}
++#endif
+diff -Nur linux-3.14.36/drivers/pinctrl/pinctrl-imx.h linux-openelec/drivers/pinctrl/pinctrl-imx.h
+--- linux-3.14.36/drivers/pinctrl/pinctrl-imx.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/pinctrl/pinctrl-imx.h 2015-05-06 12:05:42.000000000 -0500
+@@ -1,7 +1,7 @@
+ /*
+ * IMX pinmux core definitions
+ *
+- * Copyright (C) 2012 Freescale Semiconductor, Inc.
++ * Copyright (C) 2012-2013 Freescale Semiconductor, Inc.
+ * Copyright (C) 2012 Linaro Ltd.
+ *
+ * Author: Dong Aisheng <dong.aisheng@linaro.org>
+@@ -98,4 +98,8 @@
+ int imx_pinctrl_probe(struct platform_device *pdev,
+ struct imx_pinctrl_soc_info *info);
+ int imx_pinctrl_remove(struct platform_device *pdev);
++#ifdef CONFIG_PM
++int imx_pinctrl_suspend(struct platform_device *pdev, pm_message_t state);
++int imx_pinctrl_resume(struct platform_device *pdev);
++#endif
+ #endif /* __DRIVERS_PINCTRL_IMX_H */
+diff -Nur linux-3.14.36/drivers/power/imx6_usb_charger.c linux-openelec/drivers/power/imx6_usb_charger.c
+--- linux-3.14.36/drivers/power/imx6_usb_charger.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/power/imx6_usb_charger.c 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,294 @@
++/*
++ * Copyright (C) 2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++#include <linux/delay.h>
++#include <linux/device.h>
++#include <linux/power/imx6_usb_charger.h>
++#include <linux/regmap.h>
++
++#define HW_ANADIG_REG_3P0_SET (0x00000124)
++#define HW_ANADIG_REG_3P0_CLR (0x00000128)
++#define BM_ANADIG_REG_3P0_ENABLE_ILIMIT 0x00000004
++#define BM_ANADIG_REG_3P0_ENABLE_LINREG 0x00000001
++
++#define HW_ANADIG_USB1_CHRG_DETECT_SET (0x000001b4)
++#define HW_ANADIG_USB1_CHRG_DETECT_CLR (0x000001b8)
++
++#define BM_ANADIG_USB1_CHRG_DETECT_EN_B 0x00100000
++#define BM_ANADIG_USB1_CHRG_DETECT_CHK_CHRG_B 0x00080000
++#define BM_ANADIG_USB1_CHRG_DETECT_CHK_CONTACT 0x00040000
++
++#define HW_ANADIG_USB1_VBUS_DET_STAT (0x000001c0)
++
++#define BM_ANADIG_USB1_VBUS_DET_STAT_VBUS_VALID 0x00000008
++
++#define HW_ANADIG_USB1_CHRG_DET_STAT (0x000001d0)
++
++#define BM_ANADIG_USB1_CHRG_DET_STAT_DM_STATE 0x00000004
++#define BM_ANADIG_USB1_CHRG_DET_STAT_CHRG_DETECTED 0x00000002
++#define BM_ANADIG_USB1_CHRG_DET_STAT_PLUG_CONTACT 0x00000001
++
++static char *imx6_usb_charger_supplied_to[] = {
++ "imx6_usb_charger",
++};
++
++static enum power_supply_property imx6_usb_charger_power_props[] = {
++ POWER_SUPPLY_PROP_PRESENT, /* Charger detected */
++ POWER_SUPPLY_PROP_ONLINE, /* VBUS online */
++ POWER_SUPPLY_PROP_CURRENT_MAX, /* Maximum current in mA */
++};
++
++static int imx6_usb_charger_get_property(struct power_supply *psy,
++ enum power_supply_property psp,
++ union power_supply_propval *val)
++{
++ struct usb_charger *charger =
++ container_of(psy, struct usb_charger, psy);
++
++ switch (psp) {
++ case POWER_SUPPLY_PROP_PRESENT:
++ val->intval = charger->present;
++ break;
++ case POWER_SUPPLY_PROP_ONLINE:
++ val->intval = charger->online;
++ break;
++ case POWER_SUPPLY_PROP_CURRENT_MAX:
++ val->intval = charger->max_current;
++ break;
++ default:
++ return -EINVAL;
++ }
++ return 0;
++}
++
++static void disable_charger_detector(struct regmap *regmap)
++{
++ regmap_write(regmap, HW_ANADIG_USB1_CHRG_DETECT_SET,
++ BM_ANADIG_USB1_CHRG_DETECT_EN_B |
++ BM_ANADIG_USB1_CHRG_DETECT_CHK_CHRG_B);
++}
++
++static void disable_current_limiter(struct regmap *regmap)
++{
++ /* Disable the vdd3p0 current limiter */
++ regmap_write(regmap, HW_ANADIG_REG_3P0_CLR,
++ BM_ANADIG_REG_3P0_ENABLE_ILIMIT);
++}
++
++/* Return value if the charger is present */
++static int imx6_usb_charger_detect(struct usb_charger *charger)
++{
++ struct regmap *regmap = charger->anatop;
++ u32 val;
++ int i, data_pin_contact_count = 0;
++
++ /* Enable the vdd3p0 curret limiter */
++ regmap_write(regmap, HW_ANADIG_REG_3P0_SET,
++ BM_ANADIG_REG_3P0_ENABLE_LINREG |
++ BM_ANADIG_REG_3P0_ENABLE_ILIMIT);
++
++ /* check if vbus is valid */
++ regmap_read(regmap, HW_ANADIG_USB1_VBUS_DET_STAT, &val);
++ if (!(val & BM_ANADIG_USB1_VBUS_DET_STAT_VBUS_VALID)) {
++ dev_err(charger->dev, "vbus is error\n");
++ disable_current_limiter(regmap);
++ return -EINVAL;
++ }
++
++ /* Enable charger detector */
++ regmap_write(regmap, HW_ANADIG_USB1_CHRG_DETECT_CLR,
++ BM_ANADIG_USB1_CHRG_DETECT_EN_B);
++ /*
++ * - Do not check whether a charger is connected to the USB port
++ * - Check whether the USB plug has been in contact with each other
++ */
++ regmap_write(regmap, HW_ANADIG_USB1_CHRG_DETECT_SET,
++ BM_ANADIG_USB1_CHRG_DETECT_CHK_CONTACT |
++ BM_ANADIG_USB1_CHRG_DETECT_CHK_CHRG_B);
++
++ /* Check if plug is connected */
++ for (i = 0; i < 100; i = i + 1) {
++ regmap_read(regmap, HW_ANADIG_USB1_CHRG_DET_STAT, &val);
++ if (val & BM_ANADIG_USB1_CHRG_DET_STAT_PLUG_CONTACT) {
++ if (data_pin_contact_count++ > 5)
++ /* Data pin makes contact */
++ break;
++ } else {
++ msleep(20);
++ }
++ }
++
++ if (i == 100) {
++ dev_err(charger->dev,
++ "VBUS is coming from a dedicated power supply.\n");
++ disable_current_limiter(regmap);
++ disable_charger_detector(regmap);
++ return -ENXIO;
++ }
++
++ /*
++ * - Do check whether a charger is connected to the USB port
++ * - Do not Check whether the USB plug has been in contact with
++ * each other
++ */
++ regmap_write(regmap, HW_ANADIG_USB1_CHRG_DETECT_CLR,
++ BM_ANADIG_USB1_CHRG_DETECT_CHK_CONTACT |
++ BM_ANADIG_USB1_CHRG_DETECT_CHK_CHRG_B);
++ msleep(45);
++
++ /* Check if it is a charger */
++ regmap_read(regmap, HW_ANADIG_USB1_CHRG_DET_STAT, &val);
++ if (!(val & BM_ANADIG_USB1_CHRG_DET_STAT_CHRG_DETECTED)) {
++ dev_dbg(charger->dev, "It is a stardard downstream port\n");
++ charger->psy.type = POWER_SUPPLY_TYPE_USB;
++ charger->max_current = 500;
++ disable_charger_detector(regmap);
++ } else {
++ /* It is a charger */
++ disable_charger_detector(regmap);
++ msleep(45);
++ }
++
++ disable_current_limiter(regmap);
++
++ return 0;
++}
++
++/*
++ * imx6_usb_vbus_connect - inform about VBUS connection
++ * @charger: the usb charger
++ *
++ * Inform the charger VBUS is connected, vbus detect supplier should call it.
++ * Besides, the USB device controller is expected to keep the dataline
++ * pullups disabled.
++ */
++int imx6_usb_vbus_connect(struct usb_charger *charger)
++{
++ int ret;
++
++ charger->online = 1;
++
++ mutex_lock(&charger->lock);
++
++ /* Start the 1st period charger detection. */
++ ret = imx6_usb_charger_detect(charger);
++ if (ret)
++ dev_err(charger->dev,
++ "Error occurs during detection: %d\n",
++ ret);
++ else
++ charger->present = 1;
++
++ mutex_unlock(&charger->lock);
++
++ return ret;
++}
++EXPORT_SYMBOL(imx6_usb_vbus_connect);
++
++/*
++ * It must be called after dp is pulled up (from USB controller driver),
++ * That is used to differentiate DCP and CDP
++ */
++int imx6_usb_charger_detect_post(struct usb_charger *charger)
++{
++ struct regmap *regmap = charger->anatop;
++ int val;
++
++ mutex_lock(&charger->lock);
++
++ msleep(40);
++
++ regmap_read(regmap, HW_ANADIG_USB1_CHRG_DET_STAT, &val);
++ if (val & BM_ANADIG_USB1_CHRG_DET_STAT_DM_STATE) {
++ dev_dbg(charger->dev, "It is a dedicate charging port\n");
++ charger->psy.type = POWER_SUPPLY_TYPE_USB_DCP;
++ charger->max_current = 1500;
++ } else {
++ dev_dbg(charger->dev, "It is a charging downstream port\n");
++ charger->psy.type = POWER_SUPPLY_TYPE_USB_CDP;
++ charger->max_current = 900;
++ }
++
++ power_supply_changed(&charger->psy);
++
++ mutex_unlock(&charger->lock);
++
++ return 0;
++}
++EXPORT_SYMBOL(imx6_usb_charger_detect_post);
++
++/*
++ * imx6_usb_vbus_disconnect - inform about VBUS disconnection
++ * @charger: the usb charger
++ *
++ * Inform the charger that VBUS is disconnected. The charging will be
++ * stopped and the charger properties cleared.
++ */
++int imx6_usb_vbus_disconnect(struct usb_charger *charger)
++{
++ charger->online = 0;
++ charger->present = 0;
++ charger->max_current = 0;
++ charger->psy.type = POWER_SUPPLY_TYPE_MAINS;
++
++ power_supply_changed(&charger->psy);
++
++ return 0;
++}
++EXPORT_SYMBOL(imx6_usb_vbus_disconnect);
++
++/*
++ * imx6_usb_create_charger - create a USB charger
++ * @charger: the charger to be initialized
++ * @name: name for the power supply
++
++ * Registers a power supply for the charger. The USB Controller
++ * driver will call this after filling struct usb_charger.
++ */
++int imx6_usb_create_charger(struct usb_charger *charger,
++ const char *name)
++{
++ struct power_supply *psy = &charger->psy;
++
++ if (!charger->dev)
++ return -EINVAL;
++
++ if (name)
++ psy->name = name;
++ else
++ psy->name = "imx6_usb_charger";
++
++ charger->bc = BATTERY_CHARGING_SPEC_1_2;
++ mutex_init(&charger->lock);
++
++ psy->type = POWER_SUPPLY_TYPE_MAINS;
++ psy->properties = imx6_usb_charger_power_props;
++ psy->num_properties = ARRAY_SIZE(imx6_usb_charger_power_props);
++ psy->get_property = imx6_usb_charger_get_property;
++ psy->supplied_to = imx6_usb_charger_supplied_to;
++ psy->num_supplicants = sizeof(imx6_usb_charger_supplied_to)
++ / sizeof(char *);
++
++ return power_supply_register(charger->dev, psy);
++}
++EXPORT_SYMBOL(imx6_usb_create_charger);
++
++/*
++ * imx6_usb_remove_charger - remove a USB charger
++ * @charger: the charger to be removed
++ *
++ * Unregister the chargers power supply.
++ */
++void imx6_usb_remove_charger(struct usb_charger *charger)
++{
++ power_supply_unregister(&charger->psy);
++}
++EXPORT_SYMBOL(imx6_usb_remove_charger);
+diff -Nur linux-3.14.36/drivers/power/Kconfig linux-openelec/drivers/power/Kconfig
+--- linux-3.14.36/drivers/power/Kconfig 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/power/Kconfig 2015-05-06 12:05:42.000000000 -0500
+@@ -389,6 +389,12 @@
+ Say Y to enable support for the battery and AC power in the
+ Goldfish emulator.
+
++config IMX6_USB_CHARGER
++ bool "Freescale imx6 USB Charger"
++ depends on SOC_IMX6Q || SOC_IMX6SL
++ help
++ Say Y to enable Freescale imx6 USB Charger Detect.
++
+ source "drivers/power/reset/Kconfig"
+
+ endif # POWER_SUPPLY
+diff -Nur linux-3.14.36/drivers/power/Makefile linux-openelec/drivers/power/Makefile
+--- linux-3.14.36/drivers/power/Makefile 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/power/Makefile 2015-05-06 12:05:42.000000000 -0500
+@@ -58,3 +58,4 @@
+ obj-$(CONFIG_CHARGER_SMB347) += smb347-charger.o
+ obj-$(CONFIG_CHARGER_TPS65090) += tps65090-charger.o
+ obj-$(CONFIG_POWER_RESET) += reset/
++obj-$(CONFIG_IMX6_USB_CHARGER) += imx6_usb_charger.o
+diff -Nur linux-3.14.36/drivers/power/reset/Kconfig linux-openelec/drivers/power/reset/Kconfig
+--- linux-3.14.36/drivers/power/reset/Kconfig 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/power/reset/Kconfig 2015-07-24 18:03:30.276842002 -0500
+@@ -57,3 +57,11 @@
+ depends on POWER_RESET
+ help
+ Reboot support for the APM SoC X-Gene Eval boards.
++
++config POWER_RESET_UDOO
++ bool "UDOO power-off driver"
++ depends on POWER_RESET
++ help
++ This driver supports powering down the UDOO.
++ Say Y if you have a UDOO.
++
+diff -Nur linux-3.14.36/drivers/power/reset/Makefile linux-openelec/drivers/power/reset/Makefile
+--- linux-3.14.36/drivers/power/reset/Makefile 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/power/reset/Makefile 2015-07-24 18:03:30.276842002 -0500
+@@ -5,3 +5,4 @@
+ obj-$(CONFIG_POWER_RESET_RESTART) += restart-poweroff.o
+ obj-$(CONFIG_POWER_RESET_VEXPRESS) += vexpress-poweroff.o
+ obj-$(CONFIG_POWER_RESET_XGENE) += xgene-reboot.o
++obj-$(CONFIG_POWER_RESET_UDOO) += udoo-poweroff.o
+diff -Nur linux-3.14.36/drivers/power/reset/udoo-poweroff.c linux-openelec/drivers/power/reset/udoo-poweroff.c
+--- linux-3.14.36/drivers/power/reset/udoo-poweroff.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/power/reset/udoo-poweroff.c 2015-07-24 18:03:30.276842002 -0500
+@@ -0,0 +1,159 @@
++/*
++ * UDOO board power off
++ *
++ * Copyright (C) 2014 Jasbir Matharu
++ * Copyright (C) 2015 Peter Vicman
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ */
++
++#include <linux/platform_device.h>
++#include <linux/module.h>
++#include <linux/io.h>
++#include <linux/gpio.h>
++#include <linux/delay.h>
++#include <linux/of_address.h>
++#include <linux/of_platform.h>
++#include <linux/of_gpio.h>
++
++#define ARDUINO_MODE_STOPPED 1 /* does arduino starts at boot */
++#define ARDUINO_MODE_LEAVE_POWER 2 /* leave 5V power on after shutdown (to keep arduino reset) */
++
++static void (*pm_power_off_orig)(void) = NULL;
++static int sam3x_rst_gpio = -EINVAL;
++static int pwr_5v_gpio = -EINVAL;
++static u32 arduino_mode = -EINVAL;
++static int lcd_touch_reset_gpio = -EINVAL;
++static int lcd_panel_on_gpio = -EINVAL;
++static int lcd_backlight_gpio = -EINVAL;
++
++static void udoo_set_gpio(unsigned gpio, int value) {
++ int ret;
++
++ if (! gpio_is_valid(gpio))
++ return;
++
++ ret = gpio_direction_output(gpio, value);
++ if (ret)
++ pr_err("%s: gpio %u/%d failed\n", __func__, gpio, value);
++}
++
++static void udoo_request_gpio(struct device *dev, unsigned gpio, unsigned long flags, const char *label) {
++ int ret;
++
++ if (! gpio_is_valid(gpio))
++ return;
++
++ ret = devm_gpio_request_one(dev, gpio, flags, label);
++ if (ret)
++ dev_err(dev, "request of gpio %s %u failed with %d\n", label, gpio, ret);
++}
++
++static void udoo_power_off(void) {
++ pr_emerg("%s: powering off\n", __func__);
++
++ if (pm_power_off_orig != NULL)
++ pm_power_off_orig();
++
++ udoo_set_gpio(lcd_touch_reset_gpio, 1);
++ udoo_set_gpio(lcd_panel_on_gpio, 0);
++ udoo_set_gpio(lcd_backlight_gpio, 0);
++
++ udoo_set_gpio(sam3x_rst_gpio, 0);
++ msleep(50); /* stop sam3x safely */
++
++ if (gpio_is_valid(pwr_5v_gpio) && (arduino_mode & ARDUINO_MODE_LEAVE_POWER) == 0) {
++ pr_emerg("%s: 5V power down\n", __func__);
++ udoo_set_gpio(pwr_5v_gpio, 1);
++ } else
++ pr_emerg("%s: 5V power still on, sam3x reset\n", __func__);
++}
++
++static int udoo_power_off_probe(struct platform_device *pdev)
++{
++ struct device_node *pwr_off_np;
++ int ret;
++
++ dev_err(&pdev->dev, "%s: power-off probe\n", __func__);
++
++ pwr_off_np = of_find_compatible_node(NULL, NULL, "sitronix,st1232");
++ if (pwr_off_np) {
++ lcd_touch_reset_gpio = of_get_named_gpio(pwr_off_np, "gpios", 0);
++ lcd_panel_on_gpio = of_get_named_gpio(pwr_off_np, "lcd_panel_on_gpio", 0);
++ lcd_backlight_gpio = of_get_named_gpio(pwr_off_np, "lcd_backlight_gpio", 0);
++ of_node_put(pwr_off_np);
++
++ udoo_request_gpio(&pdev->dev, lcd_panel_on_gpio, GPIOF_OUT_INIT_HIGH, "lcd_panel_on_gpio");
++ udoo_request_gpio(&pdev->dev, lcd_backlight_gpio, GPIOF_OUT_INIT_HIGH, "lcd_backlight_gpio");
++
++ ret = gpio_export(lcd_backlight_gpio, false);
++ }
++
++ pwr_off_np = of_find_compatible_node(NULL, NULL, "udoo,poweroff");
++ if (pwr_off_np) {
++ ret = of_property_read_u32(pwr_off_np, "arduino_mode", &arduino_mode);
++ if (ret != 0) {
++ dev_err(&pdev->dev, "%s: arduino mode not found in dtb\n", __func__);
++ arduino_mode = 0;
++ }
++
++ sam3x_rst_gpio = of_get_named_gpio(pwr_off_np, "sam3x_rst_gpio", 0);
++ pwr_5v_gpio = of_get_named_gpio(pwr_off_np, "pwr_5v_gpio", 0);
++ of_node_put(pwr_off_np);
++
++ udoo_request_gpio(&pdev->dev, pwr_5v_gpio, GPIOF_OUT_INIT_LOW, "pwr_5v_gpio");
++
++ if (gpio_is_valid(sam3x_rst_gpio)) {
++ ret = gpio_export(sam3x_rst_gpio, false);
++
++ if (arduino_mode & ARDUINO_MODE_STOPPED) {
++ dev_err(&pdev->dev, "%s: arduino stopped\n", __func__);
++ udoo_set_gpio(sam3x_rst_gpio, 0);
++ } else {
++ dev_err(&pdev->dev, "%s: arduino running\n", __func__);
++ udoo_set_gpio(sam3x_rst_gpio, 1);
++ }
++ }
++
++ pm_power_off_orig = pm_power_off;
++ pm_power_off = udoo_power_off;
++ return 0;
++ }
++
++ /* If a pm_power_off function has already been added, leave it alone */
++ if (pm_power_off != NULL) {
++ dev_err(&pdev->dev, "%s: pm_power_off function already registered\n", __func__);
++ return -EBUSY;
++ }
++
++ return -ENODEV;
++}
++
++static int udoo_power_off_remove(struct platform_device *pdev)
++{
++ return 0;
++}
++
++static const struct of_device_id power_off_dt_ids[] = {
++ { .compatible = "udoo,poweroff", },
++ { /* sentinel */ }
++};
++MODULE_DEVICE_TABLE(of, power_off_dt_ids);
++
++static struct platform_driver udoo_power_off_driver = {
++ .driver = {
++ .name = "udoo_power_off",
++ .owner = THIS_MODULE,
++ .of_match_table = of_match_ptr(power_off_dt_ids),
++ },
++ .probe = udoo_power_off_probe,
++ .remove = udoo_power_off_remove,
++};
++module_platform_driver(udoo_power_off_driver);
++
++MODULE_AUTHOR("Jasbir Matharu, Peter Vicman");
++MODULE_DESCRIPTION("UDOO Power off driver v3");
++MODULE_LICENSE("GPL v2");
+diff -Nur linux-3.14.36/drivers/ptp/ptp_chardev.c linux-openelec/drivers/ptp/ptp_chardev.c
+--- linux-3.14.36/drivers/ptp/ptp_chardev.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/ptp/ptp_chardev.c 2015-05-06 12:05:42.000000000 -0500
+@@ -25,6 +25,96 @@
+
+ #include "ptp_private.h"
+
++static int ptp_disable_pinfunc(struct ptp_clock_info *ops,
++ enum ptp_pin_function func, unsigned int chan)
++{
++ struct ptp_clock_request rq;
++ int err = 0;
++
++ memset(&rq, 0, sizeof(rq));
++
++ switch (func) {
++ case PTP_PF_NONE:
++ break;
++ case PTP_PF_EXTTS:
++ rq.type = PTP_CLK_REQ_EXTTS;
++ rq.extts.index = chan;
++ err = ops->enable(ops, &rq, 0);
++ break;
++ case PTP_PF_PEROUT:
++ rq.type = PTP_CLK_REQ_PEROUT;
++ rq.perout.index = chan;
++ err = ops->enable(ops, &rq, 0);
++ break;
++ case PTP_PF_PHYSYNC:
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ return err;
++}
++
++int ptp_set_pinfunc(struct ptp_clock *ptp, unsigned int pin,
++ enum ptp_pin_function func, unsigned int chan)
++{
++ struct ptp_clock_info *info = ptp->info;
++ struct ptp_pin_desc *pin1 = NULL, *pin2 = &info->pin_config[pin];
++ unsigned int i;
++
++ /* Check to see if any other pin previously had this function. */
++ for (i = 0; i < info->n_pins; i++) {
++ if (info->pin_config[i].func == func &&
++ info->pin_config[i].chan == chan) {
++ pin1 = &info->pin_config[i];
++ break;
++ }
++ }
++ if (pin1 && i == pin)
++ return 0;
++
++ /* Check the desired function and channel. */
++ switch (func) {
++ case PTP_PF_NONE:
++ break;
++ case PTP_PF_EXTTS:
++ if (chan >= info->n_ext_ts)
++ return -EINVAL;
++ break;
++ case PTP_PF_PEROUT:
++ if (chan >= info->n_per_out)
++ return -EINVAL;
++ break;
++ case PTP_PF_PHYSYNC:
++ pr_err("sorry, cannot reassign the calibration pin\n");
++ return -EINVAL;
++ default:
++ return -EINVAL;
++ }
++
++ if (pin2->func == PTP_PF_PHYSYNC) {
++ pr_err("sorry, cannot reprogram the calibration pin\n");
++ return -EINVAL;
++ }
++
++ if (info->verify(info, pin, func, chan)) {
++ pr_err("driver cannot use function %u on pin %u\n", func, chan);
++ return -EOPNOTSUPP;
++ }
++
++ /* Disable whatever function was previously assigned. */
++ if (pin1) {
++ ptp_disable_pinfunc(info, func, chan);
++ pin1->func = PTP_PF_NONE;
++ pin1->chan = 0;
++ }
++ ptp_disable_pinfunc(info, pin2->func, pin2->chan);
++ pin2->func = func;
++ pin2->chan = chan;
++
++ return 0;
++}
++
+ int ptp_open(struct posix_clock *pc, fmode_t fmode)
+ {
+ return 0;
+@@ -35,12 +125,13 @@
+ struct ptp_clock_caps caps;
+ struct ptp_clock_request req;
+ struct ptp_sys_offset *sysoff = NULL;
++ struct ptp_pin_desc pd;
+ struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
+ struct ptp_clock_info *ops = ptp->info;
+ struct ptp_clock_time *pct;
+ struct timespec ts;
+ int enable, err = 0;
+- unsigned int i;
++ unsigned int i, pin_index;
+
+ switch (cmd) {
+
+@@ -51,6 +142,7 @@
+ caps.n_ext_ts = ptp->info->n_ext_ts;
+ caps.n_per_out = ptp->info->n_per_out;
+ caps.pps = ptp->info->pps;
++ caps.n_pins = ptp->info->n_pins;
+ if (copy_to_user((void __user *)arg, &caps, sizeof(caps)))
+ err = -EFAULT;
+ break;
+@@ -126,6 +218,40 @@
+ err = -EFAULT;
+ break;
+
++ case PTP_PIN_GETFUNC:
++ if (copy_from_user(&pd, (void __user *)arg, sizeof(pd))) {
++ err = -EFAULT;
++ break;
++ }
++ pin_index = pd.index;
++ if (pin_index >= ops->n_pins) {
++ err = -EINVAL;
++ break;
++ }
++ if (mutex_lock_interruptible(&ptp->pincfg_mux))
++ return -ERESTARTSYS;
++ pd = ops->pin_config[pin_index];
++ mutex_unlock(&ptp->pincfg_mux);
++ if (!err && copy_to_user((void __user *)arg, &pd, sizeof(pd)))
++ err = -EFAULT;
++ break;
++
++ case PTP_PIN_SETFUNC:
++ if (copy_from_user(&pd, (void __user *)arg, sizeof(pd))) {
++ err = -EFAULT;
++ break;
++ }
++ pin_index = pd.index;
++ if (pin_index >= ops->n_pins) {
++ err = -EINVAL;
++ break;
++ }
++ if (mutex_lock_interruptible(&ptp->pincfg_mux))
++ return -ERESTARTSYS;
++ err = ptp_set_pinfunc(ptp, pin_index, pd.func, pd.chan);
++ mutex_unlock(&ptp->pincfg_mux);
++ break;
++
+ default:
+ err = -ENOTTY;
+ break;
+diff -Nur linux-3.14.36/drivers/ptp/ptp_clock.c linux-openelec/drivers/ptp/ptp_clock.c
+--- linux-3.14.36/drivers/ptp/ptp_clock.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/ptp/ptp_clock.c 2015-05-06 12:05:42.000000000 -0500
+@@ -169,6 +169,7 @@
+ struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
+
+ mutex_destroy(&ptp->tsevq_mux);
++ mutex_destroy(&ptp->pincfg_mux);
+ ida_simple_remove(&ptp_clocks_map, ptp->index);
+ kfree(ptp);
+ }
+@@ -203,6 +204,7 @@
+ ptp->index = index;
+ spin_lock_init(&ptp->tsevq.lock);
+ mutex_init(&ptp->tsevq_mux);
++ mutex_init(&ptp->pincfg_mux);
+ init_waitqueue_head(&ptp->tsev_wq);
+
+ /* Create a new device in our class. */
+@@ -249,6 +251,7 @@
+ device_destroy(ptp_class, ptp->devid);
+ no_device:
+ mutex_destroy(&ptp->tsevq_mux);
++ mutex_destroy(&ptp->pincfg_mux);
+ no_slot:
+ kfree(ptp);
+ no_memory:
+@@ -305,6 +308,26 @@
+ }
+ EXPORT_SYMBOL(ptp_clock_index);
+
++int ptp_find_pin(struct ptp_clock *ptp,
++ enum ptp_pin_function func, unsigned int chan)
++{
++ struct ptp_pin_desc *pin = NULL;
++ int i;
++
++ mutex_lock(&ptp->pincfg_mux);
++ for (i = 0; i < ptp->info->n_pins; i++) {
++ if (ptp->info->pin_config[i].func == func &&
++ ptp->info->pin_config[i].chan == chan) {
++ pin = &ptp->info->pin_config[i];
++ break;
++ }
++ }
++ mutex_unlock(&ptp->pincfg_mux);
++
++ return pin ? i : -1;
++}
++EXPORT_SYMBOL(ptp_find_pin);
++
+ /* module operations */
+
+ static void __exit ptp_exit(void)
+diff -Nur linux-3.14.36/drivers/ptp/ptp_ixp46x.c linux-openelec/drivers/ptp/ptp_ixp46x.c
+--- linux-3.14.36/drivers/ptp/ptp_ixp46x.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/ptp/ptp_ixp46x.c 2015-05-06 12:05:42.000000000 -0500
+@@ -244,6 +244,7 @@
+ .name = "IXP46X timer",
+ .max_adj = 66666655,
+ .n_ext_ts = N_EXT_TS,
++ .n_pins = 0,
+ .pps = 0,
+ .adjfreq = ptp_ixp_adjfreq,
+ .adjtime = ptp_ixp_adjtime,
+diff -Nur linux-3.14.36/drivers/ptp/ptp_pch.c linux-openelec/drivers/ptp/ptp_pch.c
+--- linux-3.14.36/drivers/ptp/ptp_pch.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/ptp/ptp_pch.c 2015-05-06 12:05:42.000000000 -0500
+@@ -514,6 +514,7 @@
+ .name = "PCH timer",
+ .max_adj = 50000000,
+ .n_ext_ts = N_EXT_TS,
++ .n_pins = 0,
+ .pps = 0,
+ .adjfreq = ptp_pch_adjfreq,
+ .adjtime = ptp_pch_adjtime,
+diff -Nur linux-3.14.36/drivers/ptp/ptp_private.h linux-openelec/drivers/ptp/ptp_private.h
+--- linux-3.14.36/drivers/ptp/ptp_private.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/ptp/ptp_private.h 2015-05-06 12:05:42.000000000 -0500
+@@ -48,6 +48,7 @@
+ long dialed_frequency; /* remembers the frequency adjustment */
+ struct timestamp_event_queue tsevq; /* simple fifo for time stamps */
+ struct mutex tsevq_mux; /* one process at a time reading the fifo */
++ struct mutex pincfg_mux; /* protect concurrent info->pin_config access */
+ wait_queue_head_t tsev_wq;
+ int defunct; /* tells readers to go away when clock is being removed */
+ };
+@@ -69,6 +70,10 @@
+ * see ptp_chardev.c
+ */
+
++/* caller must hold pincfg_mux */
++int ptp_set_pinfunc(struct ptp_clock *ptp, unsigned int pin,
++ enum ptp_pin_function func, unsigned int chan);
++
+ long ptp_ioctl(struct posix_clock *pc,
+ unsigned int cmd, unsigned long arg);
+
+diff -Nur linux-3.14.36/drivers/pwm/pwm-imx.c linux-openelec/drivers/pwm/pwm-imx.c
+--- linux-3.14.36/drivers/pwm/pwm-imx.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/pwm/pwm-imx.c 2015-05-06 12:05:42.000000000 -0500
+@@ -1,4 +1,5 @@
+ /*
++ * Copyright (C) 2013 Freescale Semiconductor, Inc.
+ * simple driver for PWM (Pulse Width Modulator) controller
+ *
+ * This program is free software; you can redistribute it and/or modify
+@@ -293,11 +294,34 @@
+ return pwmchip_remove(&imx->chip);
+ }
+
++#ifdef CONFIG_PM
++static int imx_pwm_suspend(struct device *dev)
++{
++ pinctrl_pm_select_sleep_state(dev);
++
++ return 0;
++}
++
++static int imx_pwm_resume(struct device *dev)
++{
++ pinctrl_pm_select_default_state(dev);
++
++ return 0;
++}
++
++static const struct dev_pm_ops imx_pwm_pm_ops = {
++ SET_SYSTEM_SLEEP_PM_OPS(imx_pwm_suspend, imx_pwm_resume)
++};
++#endif
++
+ static struct platform_driver imx_pwm_driver = {
+ .driver = {
+ .name = "imx-pwm",
+ .owner = THIS_MODULE,
+ .of_match_table = imx_pwm_dt_ids,
++#ifdef CONFIG_PM
++ .pm = &imx_pwm_pm_ops,
++#endif
+ },
+ .probe = imx_pwm_probe,
+ .remove = imx_pwm_remove,
+diff -Nur linux-3.14.36/drivers/regulator/anatop-regulator.c linux-openelec/drivers/regulator/anatop-regulator.c
+--- linux-3.14.36/drivers/regulator/anatop-regulator.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/regulator/anatop-regulator.c 2015-05-06 12:05:42.000000000 -0500
+@@ -1,5 +1,5 @@
+ /*
+- * Copyright (C) 2011 Freescale Semiconductor, Inc. All Rights Reserved.
++ * Copyright (C) 2011, 2013 Freescale Semiconductor, Inc. All Rights Reserved.
+ */
+
+ /*
+@@ -34,6 +34,22 @@
+ #define LDO_RAMP_UP_UNIT_IN_CYCLES 64 /* 64 cycles per step */
+ #define LDO_RAMP_UP_FREQ_IN_MHZ 24 /* cycle based on 24M OSC */
+
++#define REG_SET 0x4
++#define REG_CLR 0x8
++#define SOC_PU_FIELD_OFFSET 0x9
++
++/*
++ * for CORE, SOC and PU regulator, the register field
++ * has following definition: 00001 -- Target core voltage
++ * = 0.725V, which means the lowest setting in this
++ * field is 0.725V once the regulator is enabled. So
++ * when these regulators are turned on from off status,
++ * we need to count the voltage step of 0V to 0.7V, it will
++ * need additional delay, so the additional step number is
++ * 700mV / 25mV = 28.
++ */
++#define CORE_REG_ENABLE_STEP_ADD 28
++
+ struct anatop_regulator {
+ const char *name;
+ u32 control_reg;
+@@ -97,12 +113,86 @@
+ return regulator_get_voltage_sel_regmap(reg);
+ }
+
++/*
++ * currently on anatop regulators, only PU regulator supports
++ * enable/disable function, and its voltage must be equal
++ * to SOC voltage, so we need to get SOC voltage then set
++ * into PU regulator. Other regulators are always on due
++ * to hardware design, so enable/disable/is_enabled/enable_time
++ * functions are only used by PU regulator.
++ */
++static int anatop_regmap_enable(struct regulator_dev *reg)
++{
++ struct anatop_regulator *anatop_reg = rdev_get_drvdata(reg);
++ u32 val;
++
++ if (!anatop_reg->control_reg)
++ return -ENOTSUPP;
++
++ regmap_read(anatop_reg->anatop, anatop_reg->control_reg, &val);
++ val &= ((1 << anatop_reg->vol_bit_width) - 1) <<
++ (anatop_reg->vol_bit_shift + SOC_PU_FIELD_OFFSET);
++ regmap_write(anatop_reg->anatop, anatop_reg->control_reg +
++ REG_SET, val >> SOC_PU_FIELD_OFFSET);
++
++ return 0;
++}
++
++static int anatop_regmap_disable(struct regulator_dev *reg)
++{
++ struct anatop_regulator *anatop_reg = rdev_get_drvdata(reg);
++
++ if (!anatop_reg->control_reg)
++ return -ENOTSUPP;
++
++ regmap_write(anatop_reg->anatop, anatop_reg->control_reg +
++ REG_CLR, ((1 << anatop_reg->vol_bit_width) - 1) <<
++ anatop_reg->vol_bit_shift);
++
++ return 0;
++}
++
++static int anatop_regmap_is_enabled(struct regulator_dev *reg)
++{
++ struct anatop_regulator *anatop_reg = rdev_get_drvdata(reg);
++ u32 val;
++
++ if (!anatop_reg->control_reg)
++ return -ENOTSUPP;
++
++ regmap_read(anatop_reg->anatop, anatop_reg->control_reg, &val);
++
++ return (val >> anatop_reg->vol_bit_shift) &
++ ((1 << anatop_reg->vol_bit_width) - 1) ? 1 : 0;
++}
++
++static int anatop_regmap_enable_time(struct regulator_dev *reg)
++{
++ struct anatop_regulator *anatop_reg = rdev_get_drvdata(reg);
++ u32 val, soc_val;
++
++ if (!anatop_reg->control_reg)
++ return -ENOTSUPP;
++
++ regmap_read(anatop_reg->anatop, anatop_reg->control_reg, &val);
++ soc_val = (val >> (anatop_reg->vol_bit_shift +
++ SOC_PU_FIELD_OFFSET)) &
++ ((1 << anatop_reg->vol_bit_width) - 1);
++
++ return anatop_regmap_set_voltage_time_sel(reg, 0,
++ soc_val + CORE_REG_ENABLE_STEP_ADD);
++}
++
+ static struct regulator_ops anatop_rops = {
+ .set_voltage_sel = anatop_regmap_set_voltage_sel,
+ .set_voltage_time_sel = anatop_regmap_set_voltage_time_sel,
+ .get_voltage_sel = anatop_regmap_get_voltage_sel,
++ .enable = anatop_regmap_enable,
++ .disable = anatop_regmap_disable,
++ .is_enabled = anatop_regmap_is_enabled,
+ .list_voltage = regulator_list_voltage_linear,
+ .map_voltage = regulator_map_voltage_linear,
++ .enable_time = anatop_regmap_enable_time,
+ };
+
+ static int anatop_regulator_probe(struct platform_device *pdev)
+@@ -196,6 +286,7 @@
+ config.driver_data = sreg;
+ config.of_node = pdev->dev.of_node;
+ config.regmap = sreg->anatop;
++ config.ena_gpio = -EINVAL;
+
+ /* register regulator */
+ rdev = devm_regulator_register(dev, rdesc, &config);
+diff -Nur linux-3.14.36/drivers/regulator/core.c linux-openelec/drivers/regulator/core.c
+--- linux-3.14.36/drivers/regulator/core.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/regulator/core.c 2015-07-24 18:03:29.600842002 -0500
+@@ -3,6 +3,7 @@
+ *
+ * Copyright 2007, 2008 Wolfson Microelectronics PLC.
+ * Copyright 2008 SlimLogic Ltd.
++ * Copyright (C) 2013 Freescale Semiconductor, Inc.
+ *
+ * Author: Liam Girdwood <lrg@slimlogic.co.uk>
+ *
+@@ -24,6 +25,7 @@
+ #include <linux/suspend.h>
+ #include <linux/delay.h>
+ #include <linux/gpio.h>
++#include <linux/gpio/consumer.h>
+ #include <linux/of.h>
+ #include <linux/regmap.h>
+ #include <linux/regulator/of_regulator.h>
+@@ -77,7 +79,7 @@
+ */
+ struct regulator_enable_gpio {
+ struct list_head list;
+- int gpio;
++ struct gpio_desc *gpiod;
+ u32 enable_count; /* a number of enabled shared GPIO */
+ u32 request_count; /* a number of requested shared GPIO */
+ unsigned int ena_gpio_invert:1;
+@@ -1655,10 +1657,13 @@
+ const struct regulator_config *config)
+ {
+ struct regulator_enable_gpio *pin;
++ struct gpio_desc *gpiod;
+ int ret;
+
++ gpiod = gpio_to_desc(config->ena_gpio);
++
+ list_for_each_entry(pin, &regulator_ena_gpio_list, list) {
+- if (pin->gpio == config->ena_gpio) {
++ if (pin->gpiod == gpiod) {
+ rdev_dbg(rdev, "GPIO %d is already used\n",
+ config->ena_gpio);
+ goto update_ena_gpio_to_rdev;
+@@ -1677,7 +1682,7 @@
+ return -ENOMEM;
+ }
+
+- pin->gpio = config->ena_gpio;
++ pin->gpiod = gpiod;
+ pin->ena_gpio_invert = config->ena_gpio_invert;
+ list_add(&pin->list, &regulator_ena_gpio_list);
+
+@@ -1696,10 +1701,10 @@
+
+ /* Free the GPIO only in case of no use */
+ list_for_each_entry_safe(pin, n, &regulator_ena_gpio_list, list) {
+- if (pin->gpio == rdev->ena_pin->gpio) {
++ if (pin->gpiod == rdev->ena_pin->gpiod) {
+ if (pin->request_count <= 1) {
+ pin->request_count = 0;
+- gpio_free(pin->gpio);
++ gpiod_put(pin->gpiod);
+ list_del(&pin->list);
+ kfree(pin);
+ } else {
+@@ -1727,8 +1732,8 @@
+ if (enable) {
+ /* Enable GPIO at initial use */
+ if (pin->enable_count == 0)
+- gpio_set_value_cansleep(pin->gpio,
+- !pin->ena_gpio_invert);
++ gpiod_set_value_cansleep(pin->gpiod,
++ !pin->ena_gpio_invert);
+
+ pin->enable_count++;
+ } else {
+@@ -1739,8 +1744,8 @@
+
+ /* Disable GPIO if not used */
+ if (pin->enable_count <= 1) {
+- gpio_set_value_cansleep(pin->gpio,
+- pin->ena_gpio_invert);
++ gpiod_set_value_cansleep(pin->gpiod,
++ pin->ena_gpio_invert);
+ pin->enable_count = 0;
+ }
+ }
+@@ -1817,6 +1822,7 @@
+ }
+
+ trace_regulator_enable_complete(rdev_get_name(rdev));
++ _notifier_call_chain(rdev, REGULATOR_EVENT_ENABLE, NULL);
+
+ return 0;
+ }
+@@ -1894,6 +1900,7 @@
+ {
+ int ret;
+
++ _notifier_call_chain(rdev, REGULATOR_EVENT_PRE_DISABLE, NULL);
+ trace_regulator_disable(rdev_get_name(rdev));
+
+ if (rdev->ena_pin) {
+@@ -2140,7 +2147,7 @@
+ * @regulator: regulator source
+ *
+ * Returns positive if the regulator driver backing the source/client
+- * can change its voltage, false otherwise. Usefull for detecting fixed
++ * can change its voltage, false otherwise. Useful for detecting fixed
+ * or dummy regulators and disabling voltage change logic in the client
+ * driver.
+ */
+@@ -3447,7 +3454,7 @@
+
+ dev_set_drvdata(&rdev->dev, rdev);
+
+- if (config->ena_gpio && gpio_is_valid(config->ena_gpio)) {
++ if (gpio_is_valid(config->ena_gpio)) {
+ ret = regulator_ena_gpio_request(rdev, config);
+ if (ret != 0) {
+ rdev_err(rdev, "Failed to request enable GPIO%d: %d\n",
+diff -Nur linux-3.14.36/drivers/regulator/core.c.orig linux-openelec/drivers/regulator/core.c.orig
+--- linux-3.14.36/drivers/regulator/core.c.orig 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/regulator/core.c.orig 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,3863 @@
++/*
++ * core.c -- Voltage/Current Regulator framework.
++ *
++ * Copyright 2007, 2008 Wolfson Microelectronics PLC.
++ * Copyright 2008 SlimLogic Ltd.
++ * Copyright (C) 2013 Freescale Semiconductor, Inc.
++ *
++ * Author: Liam Girdwood <lrg@slimlogic.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License as published by the
++ * Free Software Foundation; either version 2 of the License, or (at your
++ * option) any later version.
++ *
++ */
++
++#include <linux/kernel.h>
++#include <linux/init.h>
++#include <linux/debugfs.h>
++#include <linux/device.h>
++#include <linux/slab.h>
++#include <linux/async.h>
++#include <linux/err.h>
++#include <linux/mutex.h>
++#include <linux/suspend.h>
++#include <linux/delay.h>
++#include <linux/gpio.h>
++#include <linux/gpio/consumer.h>
++#include <linux/of.h>
++#include <linux/regmap.h>
++#include <linux/regulator/of_regulator.h>
++#include <linux/regulator/consumer.h>
++#include <linux/regulator/driver.h>
++#include <linux/regulator/machine.h>
++#include <linux/module.h>
++
++#define CREATE_TRACE_POINTS
++#include <trace/events/regulator.h>
++
++#include "dummy.h"
++#include "internal.h"
++
++#define rdev_crit(rdev, fmt, ...) \
++ pr_crit("%s: " fmt, rdev_get_name(rdev), ##__VA_ARGS__)
++#define rdev_err(rdev, fmt, ...) \
++ pr_err("%s: " fmt, rdev_get_name(rdev), ##__VA_ARGS__)
++#define rdev_warn(rdev, fmt, ...) \
++ pr_warn("%s: " fmt, rdev_get_name(rdev), ##__VA_ARGS__)
++#define rdev_info(rdev, fmt, ...) \
++ pr_info("%s: " fmt, rdev_get_name(rdev), ##__VA_ARGS__)
++#define rdev_dbg(rdev, fmt, ...) \
++ pr_debug("%s: " fmt, rdev_get_name(rdev), ##__VA_ARGS__)
++
++static DEFINE_MUTEX(regulator_list_mutex);
++static LIST_HEAD(regulator_list);
++static LIST_HEAD(regulator_map_list);
++static LIST_HEAD(regulator_ena_gpio_list);
++static LIST_HEAD(regulator_supply_alias_list);
++static bool has_full_constraints;
++
++static struct dentry *debugfs_root;
++
++/*
++ * struct regulator_map
++ *
++ * Used to provide symbolic supply names to devices.
++ */
++struct regulator_map {
++ struct list_head list;
++ const char *dev_name; /* The dev_name() for the consumer */
++ const char *supply;
++ struct regulator_dev *regulator;
++};
++
++/*
++ * struct regulator_enable_gpio
++ *
++ * Management for shared enable GPIO pin
++ */
++struct regulator_enable_gpio {
++ struct list_head list;
++ struct gpio_desc *gpiod;
++ u32 enable_count; /* a number of enabled shared GPIO */
++ u32 request_count; /* a number of requested shared GPIO */
++ unsigned int ena_gpio_invert:1;
++};
++
++/*
++ * struct regulator_supply_alias
++ *
++ * Used to map lookups for a supply onto an alternative device.
++ */
++struct regulator_supply_alias {
++ struct list_head list;
++ struct device *src_dev;
++ const char *src_supply;
++ struct device *alias_dev;
++ const char *alias_supply;
++};
++
++static int _regulator_is_enabled(struct regulator_dev *rdev);
++static int _regulator_disable(struct regulator_dev *rdev);
++static int _regulator_get_voltage(struct regulator_dev *rdev);
++static int _regulator_get_current_limit(struct regulator_dev *rdev);
++static unsigned int _regulator_get_mode(struct regulator_dev *rdev);
++static void _notifier_call_chain(struct regulator_dev *rdev,
++ unsigned long event, void *data);
++static int _regulator_do_set_voltage(struct regulator_dev *rdev,
++ int min_uV, int max_uV);
++static struct regulator *create_regulator(struct regulator_dev *rdev,
++ struct device *dev,
++ const char *supply_name);
++
++static const char *rdev_get_name(struct regulator_dev *rdev)
++{
++ if (rdev->constraints && rdev->constraints->name)
++ return rdev->constraints->name;
++ else if (rdev->desc->name)
++ return rdev->desc->name;
++ else
++ return "";
++}
++
++static bool have_full_constraints(void)
++{
++ return has_full_constraints || of_have_populated_dt();
++}
++
++/**
++ * of_get_regulator - get a regulator device node based on supply name
++ * @dev: Device pointer for the consumer (of regulator) device
++ * @supply: regulator supply name
++ *
++ * Extract the regulator device node corresponding to the supply name.
++ * returns the device node corresponding to the regulator if found, else
++ * returns NULL.
++ */
++static struct device_node *of_get_regulator(struct device *dev, const char *supply)
++{
++ struct device_node *regnode = NULL;
++ char prop_name[32]; /* 32 is max size of property name */
++
++ dev_dbg(dev, "Looking up %s-supply from device tree\n", supply);
++
++ snprintf(prop_name, 32, "%s-supply", supply);
++ regnode = of_parse_phandle(dev->of_node, prop_name, 0);
++
++ if (!regnode) {
++ dev_dbg(dev, "Looking up %s property in node %s failed",
++ prop_name, dev->of_node->full_name);
++ return NULL;
++ }
++ return regnode;
++}
++
++static int _regulator_can_change_status(struct regulator_dev *rdev)
++{
++ if (!rdev->constraints)
++ return 0;
++
++ if (rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_STATUS)
++ return 1;
++ else
++ return 0;
++}
++
++/* Platform voltage constraint check */
++static int regulator_check_voltage(struct regulator_dev *rdev,
++ int *min_uV, int *max_uV)
++{
++ BUG_ON(*min_uV > *max_uV);
++
++ if (!rdev->constraints) {
++ rdev_err(rdev, "no constraints\n");
++ return -ENODEV;
++ }
++ if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_VOLTAGE)) {
++ rdev_err(rdev, "operation not allowed\n");
++ return -EPERM;
++ }
++
++ if (*max_uV > rdev->constraints->max_uV)
++ *max_uV = rdev->constraints->max_uV;
++ if (*min_uV < rdev->constraints->min_uV)
++ *min_uV = rdev->constraints->min_uV;
++
++ if (*min_uV > *max_uV) {
++ rdev_err(rdev, "unsupportable voltage range: %d-%duV\n",
++ *min_uV, *max_uV);
++ return -EINVAL;
++ }
++
++ return 0;
++}
++
++/* Make sure we select a voltage that suits the needs of all
++ * regulator consumers
++ */
++static int regulator_check_consumers(struct regulator_dev *rdev,
++ int *min_uV, int *max_uV)
++{
++ struct regulator *regulator;
++
++ list_for_each_entry(regulator, &rdev->consumer_list, list) {
++ /*
++ * Assume consumers that didn't say anything are OK
++ * with anything in the constraint range.
++ */
++ if (!regulator->min_uV && !regulator->max_uV)
++ continue;
++
++ if (*max_uV > regulator->max_uV)
++ *max_uV = regulator->max_uV;
++ if (*min_uV < regulator->min_uV)
++ *min_uV = regulator->min_uV;
++ }
++
++ if (*min_uV > *max_uV) {
++ rdev_err(rdev, "Restricting voltage, %u-%uuV\n",
++ *min_uV, *max_uV);
++ return -EINVAL;
++ }
++
++ return 0;
++}
++
++/* current constraint check */
++static int regulator_check_current_limit(struct regulator_dev *rdev,
++ int *min_uA, int *max_uA)
++{
++ BUG_ON(*min_uA > *max_uA);
++
++ if (!rdev->constraints) {
++ rdev_err(rdev, "no constraints\n");
++ return -ENODEV;
++ }
++ if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_CURRENT)) {
++ rdev_err(rdev, "operation not allowed\n");
++ return -EPERM;
++ }
++
++ if (*max_uA > rdev->constraints->max_uA)
++ *max_uA = rdev->constraints->max_uA;
++ if (*min_uA < rdev->constraints->min_uA)
++ *min_uA = rdev->constraints->min_uA;
++
++ if (*min_uA > *max_uA) {
++ rdev_err(rdev, "unsupportable current range: %d-%duA\n",
++ *min_uA, *max_uA);
++ return -EINVAL;
++ }
++
++ return 0;
++}
++
++/* operating mode constraint check */
++static int regulator_mode_constrain(struct regulator_dev *rdev, int *mode)
++{
++ switch (*mode) {
++ case REGULATOR_MODE_FAST:
++ case REGULATOR_MODE_NORMAL:
++ case REGULATOR_MODE_IDLE:
++ case REGULATOR_MODE_STANDBY:
++ break;
++ default:
++ rdev_err(rdev, "invalid mode %x specified\n", *mode);
++ return -EINVAL;
++ }
++
++ if (!rdev->constraints) {
++ rdev_err(rdev, "no constraints\n");
++ return -ENODEV;
++ }
++ if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_MODE)) {
++ rdev_err(rdev, "operation not allowed\n");
++ return -EPERM;
++ }
++
++ /* The modes are bitmasks, the most power hungry modes having
++ * the lowest values. If the requested mode isn't supported
++ * try higher modes. */
++ while (*mode) {
++ if (rdev->constraints->valid_modes_mask & *mode)
++ return 0;
++ *mode /= 2;
++ }
++
++ return -EINVAL;
++}
++
++/* dynamic regulator mode switching constraint check */
++static int regulator_check_drms(struct regulator_dev *rdev)
++{
++ if (!rdev->constraints) {
++ rdev_err(rdev, "no constraints\n");
++ return -ENODEV;
++ }
++ if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_DRMS)) {
++ rdev_err(rdev, "operation not allowed\n");
++ return -EPERM;
++ }
++ return 0;
++}
++
++static ssize_t regulator_uV_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct regulator_dev *rdev = dev_get_drvdata(dev);
++ ssize_t ret;
++
++ mutex_lock(&rdev->mutex);
++ ret = sprintf(buf, "%d\n", _regulator_get_voltage(rdev));
++ mutex_unlock(&rdev->mutex);
++
++ return ret;
++}
++static DEVICE_ATTR(microvolts, 0444, regulator_uV_show, NULL);
++
++static ssize_t regulator_uA_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct regulator_dev *rdev = dev_get_drvdata(dev);
++
++ return sprintf(buf, "%d\n", _regulator_get_current_limit(rdev));
++}
++static DEVICE_ATTR(microamps, 0444, regulator_uA_show, NULL);
++
++static ssize_t name_show(struct device *dev, struct device_attribute *attr,
++ char *buf)
++{
++ struct regulator_dev *rdev = dev_get_drvdata(dev);
++
++ return sprintf(buf, "%s\n", rdev_get_name(rdev));
++}
++static DEVICE_ATTR_RO(name);
++
++static ssize_t regulator_print_opmode(char *buf, int mode)
++{
++ switch (mode) {
++ case REGULATOR_MODE_FAST:
++ return sprintf(buf, "fast\n");
++ case REGULATOR_MODE_NORMAL:
++ return sprintf(buf, "normal\n");
++ case REGULATOR_MODE_IDLE:
++ return sprintf(buf, "idle\n");
++ case REGULATOR_MODE_STANDBY:
++ return sprintf(buf, "standby\n");
++ }
++ return sprintf(buf, "unknown\n");
++}
++
++static ssize_t regulator_opmode_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct regulator_dev *rdev = dev_get_drvdata(dev);
++
++ return regulator_print_opmode(buf, _regulator_get_mode(rdev));
++}
++static DEVICE_ATTR(opmode, 0444, regulator_opmode_show, NULL);
++
++static ssize_t regulator_print_state(char *buf, int state)
++{
++ if (state > 0)
++ return sprintf(buf, "enabled\n");
++ else if (state == 0)
++ return sprintf(buf, "disabled\n");
++ else
++ return sprintf(buf, "unknown\n");
++}
++
++static ssize_t regulator_state_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct regulator_dev *rdev = dev_get_drvdata(dev);
++ ssize_t ret;
++
++ mutex_lock(&rdev->mutex);
++ ret = regulator_print_state(buf, _regulator_is_enabled(rdev));
++ mutex_unlock(&rdev->mutex);
++
++ return ret;
++}
++static DEVICE_ATTR(state, 0444, regulator_state_show, NULL);
++
++static ssize_t regulator_status_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct regulator_dev *rdev = dev_get_drvdata(dev);
++ int status;
++ char *label;
++
++ status = rdev->desc->ops->get_status(rdev);
++ if (status < 0)
++ return status;
++
++ switch (status) {
++ case REGULATOR_STATUS_OFF:
++ label = "off";
++ break;
++ case REGULATOR_STATUS_ON:
++ label = "on";
++ break;
++ case REGULATOR_STATUS_ERROR:
++ label = "error";
++ break;
++ case REGULATOR_STATUS_FAST:
++ label = "fast";
++ break;
++ case REGULATOR_STATUS_NORMAL:
++ label = "normal";
++ break;
++ case REGULATOR_STATUS_IDLE:
++ label = "idle";
++ break;
++ case REGULATOR_STATUS_STANDBY:
++ label = "standby";
++ break;
++ case REGULATOR_STATUS_BYPASS:
++ label = "bypass";
++ break;
++ case REGULATOR_STATUS_UNDEFINED:
++ label = "undefined";
++ break;
++ default:
++ return -ERANGE;
++ }
++
++ return sprintf(buf, "%s\n", label);
++}
++static DEVICE_ATTR(status, 0444, regulator_status_show, NULL);
++
++static ssize_t regulator_min_uA_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct regulator_dev *rdev = dev_get_drvdata(dev);
++
++ if (!rdev->constraints)
++ return sprintf(buf, "constraint not defined\n");
++
++ return sprintf(buf, "%d\n", rdev->constraints->min_uA);
++}
++static DEVICE_ATTR(min_microamps, 0444, regulator_min_uA_show, NULL);
++
++static ssize_t regulator_max_uA_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct regulator_dev *rdev = dev_get_drvdata(dev);
++
++ if (!rdev->constraints)
++ return sprintf(buf, "constraint not defined\n");
++
++ return sprintf(buf, "%d\n", rdev->constraints->max_uA);
++}
++static DEVICE_ATTR(max_microamps, 0444, regulator_max_uA_show, NULL);
++
++static ssize_t regulator_min_uV_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct regulator_dev *rdev = dev_get_drvdata(dev);
++
++ if (!rdev->constraints)
++ return sprintf(buf, "constraint not defined\n");
++
++ return sprintf(buf, "%d\n", rdev->constraints->min_uV);
++}
++static DEVICE_ATTR(min_microvolts, 0444, regulator_min_uV_show, NULL);
++
++static ssize_t regulator_max_uV_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct regulator_dev *rdev = dev_get_drvdata(dev);
++
++ if (!rdev->constraints)
++ return sprintf(buf, "constraint not defined\n");
++
++ return sprintf(buf, "%d\n", rdev->constraints->max_uV);
++}
++static DEVICE_ATTR(max_microvolts, 0444, regulator_max_uV_show, NULL);
++
++static ssize_t regulator_total_uA_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct regulator_dev *rdev = dev_get_drvdata(dev);
++ struct regulator *regulator;
++ int uA = 0;
++
++ mutex_lock(&rdev->mutex);
++ list_for_each_entry(regulator, &rdev->consumer_list, list)
++ uA += regulator->uA_load;
++ mutex_unlock(&rdev->mutex);
++ return sprintf(buf, "%d\n", uA);
++}
++static DEVICE_ATTR(requested_microamps, 0444, regulator_total_uA_show, NULL);
++
++static ssize_t num_users_show(struct device *dev, struct device_attribute *attr,
++ char *buf)
++{
++ struct regulator_dev *rdev = dev_get_drvdata(dev);
++ return sprintf(buf, "%d\n", rdev->use_count);
++}
++static DEVICE_ATTR_RO(num_users);
++
++static ssize_t type_show(struct device *dev, struct device_attribute *attr,
++ char *buf)
++{
++ struct regulator_dev *rdev = dev_get_drvdata(dev);
++
++ switch (rdev->desc->type) {
++ case REGULATOR_VOLTAGE:
++ return sprintf(buf, "voltage\n");
++ case REGULATOR_CURRENT:
++ return sprintf(buf, "current\n");
++ }
++ return sprintf(buf, "unknown\n");
++}
++static DEVICE_ATTR_RO(type);
++
++static ssize_t regulator_suspend_mem_uV_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct regulator_dev *rdev = dev_get_drvdata(dev);
++
++ return sprintf(buf, "%d\n", rdev->constraints->state_mem.uV);
++}
++static DEVICE_ATTR(suspend_mem_microvolts, 0444,
++ regulator_suspend_mem_uV_show, NULL);
++
++static ssize_t regulator_suspend_disk_uV_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct regulator_dev *rdev = dev_get_drvdata(dev);
++
++ return sprintf(buf, "%d\n", rdev->constraints->state_disk.uV);
++}
++static DEVICE_ATTR(suspend_disk_microvolts, 0444,
++ regulator_suspend_disk_uV_show, NULL);
++
++static ssize_t regulator_suspend_standby_uV_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct regulator_dev *rdev = dev_get_drvdata(dev);
++
++ return sprintf(buf, "%d\n", rdev->constraints->state_standby.uV);
++}
++static DEVICE_ATTR(suspend_standby_microvolts, 0444,
++ regulator_suspend_standby_uV_show, NULL);
++
++static ssize_t regulator_suspend_mem_mode_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct regulator_dev *rdev = dev_get_drvdata(dev);
++
++ return regulator_print_opmode(buf,
++ rdev->constraints->state_mem.mode);
++}
++static DEVICE_ATTR(suspend_mem_mode, 0444,
++ regulator_suspend_mem_mode_show, NULL);
++
++static ssize_t regulator_suspend_disk_mode_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct regulator_dev *rdev = dev_get_drvdata(dev);
++
++ return regulator_print_opmode(buf,
++ rdev->constraints->state_disk.mode);
++}
++static DEVICE_ATTR(suspend_disk_mode, 0444,
++ regulator_suspend_disk_mode_show, NULL);
++
++static ssize_t regulator_suspend_standby_mode_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct regulator_dev *rdev = dev_get_drvdata(dev);
++
++ return regulator_print_opmode(buf,
++ rdev->constraints->state_standby.mode);
++}
++static DEVICE_ATTR(suspend_standby_mode, 0444,
++ regulator_suspend_standby_mode_show, NULL);
++
++static ssize_t regulator_suspend_mem_state_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct regulator_dev *rdev = dev_get_drvdata(dev);
++
++ return regulator_print_state(buf,
++ rdev->constraints->state_mem.enabled);
++}
++static DEVICE_ATTR(suspend_mem_state, 0444,
++ regulator_suspend_mem_state_show, NULL);
++
++static ssize_t regulator_suspend_disk_state_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct regulator_dev *rdev = dev_get_drvdata(dev);
++
++ return regulator_print_state(buf,
++ rdev->constraints->state_disk.enabled);
++}
++static DEVICE_ATTR(suspend_disk_state, 0444,
++ regulator_suspend_disk_state_show, NULL);
++
++static ssize_t regulator_suspend_standby_state_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct regulator_dev *rdev = dev_get_drvdata(dev);
++
++ return regulator_print_state(buf,
++ rdev->constraints->state_standby.enabled);
++}
++static DEVICE_ATTR(suspend_standby_state, 0444,
++ regulator_suspend_standby_state_show, NULL);
++
++static ssize_t regulator_bypass_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct regulator_dev *rdev = dev_get_drvdata(dev);
++ const char *report;
++ bool bypass;
++ int ret;
++
++ ret = rdev->desc->ops->get_bypass(rdev, &bypass);
++
++ if (ret != 0)
++ report = "unknown";
++ else if (bypass)
++ report = "enabled";
++ else
++ report = "disabled";
++
++ return sprintf(buf, "%s\n", report);
++}
++static DEVICE_ATTR(bypass, 0444,
++ regulator_bypass_show, NULL);
++
++/*
++ * These are the only attributes are present for all regulators.
++ * Other attributes are a function of regulator functionality.
++ */
++static struct attribute *regulator_dev_attrs[] = {
++ &dev_attr_name.attr,
++ &dev_attr_num_users.attr,
++ &dev_attr_type.attr,
++ NULL,
++};
++ATTRIBUTE_GROUPS(regulator_dev);
++
++static void regulator_dev_release(struct device *dev)
++{
++ struct regulator_dev *rdev = dev_get_drvdata(dev);
++ kfree(rdev);
++}
++
++static struct class regulator_class = {
++ .name = "regulator",
++ .dev_release = regulator_dev_release,
++ .dev_groups = regulator_dev_groups,
++};
++
++/* Calculate the new optimum regulator operating mode based on the new total
++ * consumer load. All locks held by caller */
++static void drms_uA_update(struct regulator_dev *rdev)
++{
++ struct regulator *sibling;
++ int current_uA = 0, output_uV, input_uV, err;
++ unsigned int mode;
++
++ err = regulator_check_drms(rdev);
++ if (err < 0 || !rdev->desc->ops->get_optimum_mode ||
++ (!rdev->desc->ops->get_voltage &&
++ !rdev->desc->ops->get_voltage_sel) ||
++ !rdev->desc->ops->set_mode)
++ return;
++
++ /* get output voltage */
++ output_uV = _regulator_get_voltage(rdev);
++ if (output_uV <= 0)
++ return;
++
++ /* get input voltage */
++ input_uV = 0;
++ if (rdev->supply)
++ input_uV = regulator_get_voltage(rdev->supply);
++ if (input_uV <= 0)
++ input_uV = rdev->constraints->input_uV;
++ if (input_uV <= 0)
++ return;
++
++ /* calc total requested load */
++ list_for_each_entry(sibling, &rdev->consumer_list, list)
++ current_uA += sibling->uA_load;
++
++ /* now get the optimum mode for our new total regulator load */
++ mode = rdev->desc->ops->get_optimum_mode(rdev, input_uV,
++ output_uV, current_uA);
++
++ /* check the new mode is allowed */
++ err = regulator_mode_constrain(rdev, &mode);
++ if (err == 0)
++ rdev->desc->ops->set_mode(rdev, mode);
++}
++
++static int suspend_set_state(struct regulator_dev *rdev,
++ struct regulator_state *rstate)
++{
++ int ret = 0;
++
++ /* If we have no suspend mode configration don't set anything;
++ * only warn if the driver implements set_suspend_voltage or
++ * set_suspend_mode callback.
++ */
++ if (!rstate->enabled && !rstate->disabled) {
++ if (rdev->desc->ops->set_suspend_voltage ||
++ rdev->desc->ops->set_suspend_mode)
++ rdev_warn(rdev, "No configuration\n");
++ return 0;
++ }
++
++ if (rstate->enabled && rstate->disabled) {
++ rdev_err(rdev, "invalid configuration\n");
++ return -EINVAL;
++ }
++
++ if (rstate->enabled && rdev->desc->ops->set_suspend_enable)
++ ret = rdev->desc->ops->set_suspend_enable(rdev);
++ else if (rstate->disabled && rdev->desc->ops->set_suspend_disable)
++ ret = rdev->desc->ops->set_suspend_disable(rdev);
++ else /* OK if set_suspend_enable or set_suspend_disable is NULL */
++ ret = 0;
++
++ if (ret < 0) {
++ rdev_err(rdev, "failed to enabled/disable\n");
++ return ret;
++ }
++
++ if (rdev->desc->ops->set_suspend_voltage && rstate->uV > 0) {
++ ret = rdev->desc->ops->set_suspend_voltage(rdev, rstate->uV);
++ if (ret < 0) {
++ rdev_err(rdev, "failed to set voltage\n");
++ return ret;
++ }
++ }
++
++ if (rdev->desc->ops->set_suspend_mode && rstate->mode > 0) {
++ ret = rdev->desc->ops->set_suspend_mode(rdev, rstate->mode);
++ if (ret < 0) {
++ rdev_err(rdev, "failed to set mode\n");
++ return ret;
++ }
++ }
++ return ret;
++}
++
++/* locks held by caller */
++static int suspend_prepare(struct regulator_dev *rdev, suspend_state_t state)
++{
++ if (!rdev->constraints)
++ return -EINVAL;
++
++ switch (state) {
++ case PM_SUSPEND_STANDBY:
++ return suspend_set_state(rdev,
++ &rdev->constraints->state_standby);
++ case PM_SUSPEND_MEM:
++ return suspend_set_state(rdev,
++ &rdev->constraints->state_mem);
++ case PM_SUSPEND_MAX:
++ return suspend_set_state(rdev,
++ &rdev->constraints->state_disk);
++ default:
++ return -EINVAL;
++ }
++}
++
++static void print_constraints(struct regulator_dev *rdev)
++{
++ struct regulation_constraints *constraints = rdev->constraints;
++ char buf[80] = "";
++ int count = 0;
++ int ret;
++
++ if (constraints->min_uV && constraints->max_uV) {
++ if (constraints->min_uV == constraints->max_uV)
++ count += sprintf(buf + count, "%d mV ",
++ constraints->min_uV / 1000);
++ else
++ count += sprintf(buf + count, "%d <--> %d mV ",
++ constraints->min_uV / 1000,
++ constraints->max_uV / 1000);
++ }
++
++ if (!constraints->min_uV ||
++ constraints->min_uV != constraints->max_uV) {
++ ret = _regulator_get_voltage(rdev);
++ if (ret > 0)
++ count += sprintf(buf + count, "at %d mV ", ret / 1000);
++ }
++
++ if (constraints->uV_offset)
++ count += sprintf(buf, "%dmV offset ",
++ constraints->uV_offset / 1000);
++
++ if (constraints->min_uA && constraints->max_uA) {
++ if (constraints->min_uA == constraints->max_uA)
++ count += sprintf(buf + count, "%d mA ",
++ constraints->min_uA / 1000);
++ else
++ count += sprintf(buf + count, "%d <--> %d mA ",
++ constraints->min_uA / 1000,
++ constraints->max_uA / 1000);
++ }
++
++ if (!constraints->min_uA ||
++ constraints->min_uA != constraints->max_uA) {
++ ret = _regulator_get_current_limit(rdev);
++ if (ret > 0)
++ count += sprintf(buf + count, "at %d mA ", ret / 1000);
++ }
++
++ if (constraints->valid_modes_mask & REGULATOR_MODE_FAST)
++ count += sprintf(buf + count, "fast ");
++ if (constraints->valid_modes_mask & REGULATOR_MODE_NORMAL)
++ count += sprintf(buf + count, "normal ");
++ if (constraints->valid_modes_mask & REGULATOR_MODE_IDLE)
++ count += sprintf(buf + count, "idle ");
++ if (constraints->valid_modes_mask & REGULATOR_MODE_STANDBY)
++ count += sprintf(buf + count, "standby");
++
++ if (!count)
++ sprintf(buf, "no parameters");
++
++ rdev_info(rdev, "%s\n", buf);
++
++ if ((constraints->min_uV != constraints->max_uV) &&
++ !(constraints->valid_ops_mask & REGULATOR_CHANGE_VOLTAGE))
++ rdev_warn(rdev,
++ "Voltage range but no REGULATOR_CHANGE_VOLTAGE\n");
++}
++
++static int machine_constraints_voltage(struct regulator_dev *rdev,
++ struct regulation_constraints *constraints)
++{
++ struct regulator_ops *ops = rdev->desc->ops;
++ int ret;
++
++ /* do we need to apply the constraint voltage */
++ if (rdev->constraints->apply_uV &&
++ rdev->constraints->min_uV == rdev->constraints->max_uV) {
++ ret = _regulator_do_set_voltage(rdev,
++ rdev->constraints->min_uV,
++ rdev->constraints->max_uV);
++ if (ret < 0) {
++ rdev_err(rdev, "failed to apply %duV constraint\n",
++ rdev->constraints->min_uV);
++ return ret;
++ }
++ }
++
++ /* constrain machine-level voltage specs to fit
++ * the actual range supported by this regulator.
++ */
++ if (ops->list_voltage && rdev->desc->n_voltages) {
++ int count = rdev->desc->n_voltages;
++ int i;
++ int min_uV = INT_MAX;
++ int max_uV = INT_MIN;
++ int cmin = constraints->min_uV;
++ int cmax = constraints->max_uV;
++
++ /* it's safe to autoconfigure fixed-voltage supplies
++ and the constraints are used by list_voltage. */
++ if (count == 1 && !cmin) {
++ cmin = 1;
++ cmax = INT_MAX;
++ constraints->min_uV = cmin;
++ constraints->max_uV = cmax;
++ }
++
++ /* voltage constraints are optional */
++ if ((cmin == 0) && (cmax == 0))
++ return 0;
++
++ /* else require explicit machine-level constraints */
++ if (cmin <= 0 || cmax <= 0 || cmax < cmin) {
++ rdev_err(rdev, "invalid voltage constraints\n");
++ return -EINVAL;
++ }
++
++ /* initial: [cmin..cmax] valid, [min_uV..max_uV] not */
++ for (i = 0; i < count; i++) {
++ int value;
++
++ value = ops->list_voltage(rdev, i);
++ if (value <= 0)
++ continue;
++
++ /* maybe adjust [min_uV..max_uV] */
++ if (value >= cmin && value < min_uV)
++ min_uV = value;
++ if (value <= cmax && value > max_uV)
++ max_uV = value;
++ }
++
++ /* final: [min_uV..max_uV] valid iff constraints valid */
++ if (max_uV < min_uV) {
++ rdev_err(rdev,
++ "unsupportable voltage constraints %u-%uuV\n",
++ min_uV, max_uV);
++ return -EINVAL;
++ }
++
++ /* use regulator's subset of machine constraints */
++ if (constraints->min_uV < min_uV) {
++ rdev_dbg(rdev, "override min_uV, %d -> %d\n",
++ constraints->min_uV, min_uV);
++ constraints->min_uV = min_uV;
++ }
++ if (constraints->max_uV > max_uV) {
++ rdev_dbg(rdev, "override max_uV, %d -> %d\n",
++ constraints->max_uV, max_uV);
++ constraints->max_uV = max_uV;
++ }
++ }
++
++ return 0;
++}
++
++static int machine_constraints_current(struct regulator_dev *rdev,
++ struct regulation_constraints *constraints)
++{
++ struct regulator_ops *ops = rdev->desc->ops;
++ int ret;
++
++ if (!constraints->min_uA && !constraints->max_uA)
++ return 0;
++
++ if (constraints->min_uA > constraints->max_uA) {
++ rdev_err(rdev, "Invalid current constraints\n");
++ return -EINVAL;
++ }
++
++ if (!ops->set_current_limit || !ops->get_current_limit) {
++ rdev_warn(rdev, "Operation of current configuration missing\n");
++ return 0;
++ }
++
++ /* Set regulator current in constraints range */
++ ret = ops->set_current_limit(rdev, constraints->min_uA,
++ constraints->max_uA);
++ if (ret < 0) {
++ rdev_err(rdev, "Failed to set current constraint, %d\n", ret);
++ return ret;
++ }
++
++ return 0;
++}
++
++static int _regulator_do_enable(struct regulator_dev *rdev);
++
++/**
++ * set_machine_constraints - sets regulator constraints
++ * @rdev: regulator source
++ * @constraints: constraints to apply
++ *
++ * Allows platform initialisation code to define and constrain
++ * regulator circuits e.g. valid voltage/current ranges, etc. NOTE:
++ * Constraints *must* be set by platform code in order for some
++ * regulator operations to proceed i.e. set_voltage, set_current_limit,
++ * set_mode.
++ */
++static int set_machine_constraints(struct regulator_dev *rdev,
++ const struct regulation_constraints *constraints)
++{
++ int ret = 0;
++ struct regulator_ops *ops = rdev->desc->ops;
++
++ if (constraints)
++ rdev->constraints = kmemdup(constraints, sizeof(*constraints),
++ GFP_KERNEL);
++ else
++ rdev->constraints = kzalloc(sizeof(*constraints),
++ GFP_KERNEL);
++ if (!rdev->constraints)
++ return -ENOMEM;
++
++ ret = machine_constraints_voltage(rdev, rdev->constraints);
++ if (ret != 0)
++ goto out;
++
++ ret = machine_constraints_current(rdev, rdev->constraints);
++ if (ret != 0)
++ goto out;
++
++ /* do we need to setup our suspend state */
++ if (rdev->constraints->initial_state) {
++ ret = suspend_prepare(rdev, rdev->constraints->initial_state);
++ if (ret < 0) {
++ rdev_err(rdev, "failed to set suspend state\n");
++ goto out;
++ }
++ }
++
++ if (rdev->constraints->initial_mode) {
++ if (!ops->set_mode) {
++ rdev_err(rdev, "no set_mode operation\n");
++ ret = -EINVAL;
++ goto out;
++ }
++
++ ret = ops->set_mode(rdev, rdev->constraints->initial_mode);
++ if (ret < 0) {
++ rdev_err(rdev, "failed to set initial mode: %d\n", ret);
++ goto out;
++ }
++ }
++
++ /* If the constraints say the regulator should be on at this point
++ * and we have control then make sure it is enabled.
++ */
++ if (rdev->constraints->always_on || rdev->constraints->boot_on) {
++ ret = _regulator_do_enable(rdev);
++ if (ret < 0 && ret != -EINVAL) {
++ rdev_err(rdev, "failed to enable\n");
++ goto out;
++ }
++ }
++
++ if ((rdev->constraints->ramp_delay || rdev->constraints->ramp_disable)
++ && ops->set_ramp_delay) {
++ ret = ops->set_ramp_delay(rdev, rdev->constraints->ramp_delay);
++ if (ret < 0) {
++ rdev_err(rdev, "failed to set ramp_delay\n");
++ goto out;
++ }
++ }
++
++ print_constraints(rdev);
++ return 0;
++out:
++ kfree(rdev->constraints);
++ rdev->constraints = NULL;
++ return ret;
++}
++
++/**
++ * set_supply - set regulator supply regulator
++ * @rdev: regulator name
++ * @supply_rdev: supply regulator name
++ *
++ * Called by platform initialisation code to set the supply regulator for this
++ * regulator. This ensures that a regulators supply will also be enabled by the
++ * core if it's child is enabled.
++ */
++static int set_supply(struct regulator_dev *rdev,
++ struct regulator_dev *supply_rdev)
++{
++ int err;
++
++ rdev_info(rdev, "supplied by %s\n", rdev_get_name(supply_rdev));
++
++ rdev->supply = create_regulator(supply_rdev, &rdev->dev, "SUPPLY");
++ if (rdev->supply == NULL) {
++ err = -ENOMEM;
++ return err;
++ }
++ supply_rdev->open_count++;
++
++ return 0;
++}
++
++/**
++ * set_consumer_device_supply - Bind a regulator to a symbolic supply
++ * @rdev: regulator source
++ * @consumer_dev_name: dev_name() string for device supply applies to
++ * @supply: symbolic name for supply
++ *
++ * Allows platform initialisation code to map physical regulator
++ * sources to symbolic names for supplies for use by devices. Devices
++ * should use these symbolic names to request regulators, avoiding the
++ * need to provide board-specific regulator names as platform data.
++ */
++static int set_consumer_device_supply(struct regulator_dev *rdev,
++ const char *consumer_dev_name,
++ const char *supply)
++{
++ struct regulator_map *node;
++ int has_dev;
++
++ if (supply == NULL)
++ return -EINVAL;
++
++ if (consumer_dev_name != NULL)
++ has_dev = 1;
++ else
++ has_dev = 0;
++
++ list_for_each_entry(node, &regulator_map_list, list) {
++ if (node->dev_name && consumer_dev_name) {
++ if (strcmp(node->dev_name, consumer_dev_name) != 0)
++ continue;
++ } else if (node->dev_name || consumer_dev_name) {
++ continue;
++ }
++
++ if (strcmp(node->supply, supply) != 0)
++ continue;
++
++ pr_debug("%s: %s/%s is '%s' supply; fail %s/%s\n",
++ consumer_dev_name,
++ dev_name(&node->regulator->dev),
++ node->regulator->desc->name,
++ supply,
++ dev_name(&rdev->dev), rdev_get_name(rdev));
++ return -EBUSY;
++ }
++
++ node = kzalloc(sizeof(struct regulator_map), GFP_KERNEL);
++ if (node == NULL)
++ return -ENOMEM;
++
++ node->regulator = rdev;
++ node->supply = supply;
++
++ if (has_dev) {
++ node->dev_name = kstrdup(consumer_dev_name, GFP_KERNEL);
++ if (node->dev_name == NULL) {
++ kfree(node);
++ return -ENOMEM;
++ }
++ }
++
++ list_add(&node->list, &regulator_map_list);
++ return 0;
++}
++
++static void unset_regulator_supplies(struct regulator_dev *rdev)
++{
++ struct regulator_map *node, *n;
++
++ list_for_each_entry_safe(node, n, &regulator_map_list, list) {
++ if (rdev == node->regulator) {
++ list_del(&node->list);
++ kfree(node->dev_name);
++ kfree(node);
++ }
++ }
++}
++
++#define REG_STR_SIZE 64
++
++static struct regulator *create_regulator(struct regulator_dev *rdev,
++ struct device *dev,
++ const char *supply_name)
++{
++ struct regulator *regulator;
++ char buf[REG_STR_SIZE];
++ int err, size;
++
++ regulator = kzalloc(sizeof(*regulator), GFP_KERNEL);
++ if (regulator == NULL)
++ return NULL;
++
++ mutex_lock(&rdev->mutex);
++ regulator->rdev = rdev;
++ list_add(&regulator->list, &rdev->consumer_list);
++
++ if (dev) {
++ regulator->dev = dev;
++
++ /* Add a link to the device sysfs entry */
++ size = scnprintf(buf, REG_STR_SIZE, "%s-%s",
++ dev->kobj.name, supply_name);
++ if (size >= REG_STR_SIZE)
++ goto overflow_err;
++
++ regulator->supply_name = kstrdup(buf, GFP_KERNEL);
++ if (regulator->supply_name == NULL)
++ goto overflow_err;
++
++ err = sysfs_create_link(&rdev->dev.kobj, &dev->kobj,
++ buf);
++ if (err) {
++ rdev_warn(rdev, "could not add device link %s err %d\n",
++ dev->kobj.name, err);
++ /* non-fatal */
++ }
++ } else {
++ regulator->supply_name = kstrdup(supply_name, GFP_KERNEL);
++ if (regulator->supply_name == NULL)
++ goto overflow_err;
++ }
++
++ regulator->debugfs = debugfs_create_dir(regulator->supply_name,
++ rdev->debugfs);
++ if (!regulator->debugfs) {
++ rdev_warn(rdev, "Failed to create debugfs directory\n");
++ } else {
++ debugfs_create_u32("uA_load", 0444, regulator->debugfs,
++ &regulator->uA_load);
++ debugfs_create_u32("min_uV", 0444, regulator->debugfs,
++ &regulator->min_uV);
++ debugfs_create_u32("max_uV", 0444, regulator->debugfs,
++ &regulator->max_uV);
++ }
++
++ /*
++ * Check now if the regulator is an always on regulator - if
++ * it is then we don't need to do nearly so much work for
++ * enable/disable calls.
++ */
++ if (!_regulator_can_change_status(rdev) &&
++ _regulator_is_enabled(rdev))
++ regulator->always_on = true;
++
++ mutex_unlock(&rdev->mutex);
++ return regulator;
++overflow_err:
++ list_del(&regulator->list);
++ kfree(regulator);
++ mutex_unlock(&rdev->mutex);
++ return NULL;
++}
++
++static int _regulator_get_enable_time(struct regulator_dev *rdev)
++{
++ if (rdev->constraints && rdev->constraints->enable_time)
++ return rdev->constraints->enable_time;
++ if (!rdev->desc->ops->enable_time)
++ return rdev->desc->enable_time;
++ return rdev->desc->ops->enable_time(rdev);
++}
++
++static struct regulator_supply_alias *regulator_find_supply_alias(
++ struct device *dev, const char *supply)
++{
++ struct regulator_supply_alias *map;
++
++ list_for_each_entry(map, &regulator_supply_alias_list, list)
++ if (map->src_dev == dev && strcmp(map->src_supply, supply) == 0)
++ return map;
++
++ return NULL;
++}
++
++static void regulator_supply_alias(struct device **dev, const char **supply)
++{
++ struct regulator_supply_alias *map;
++
++ map = regulator_find_supply_alias(*dev, *supply);
++ if (map) {
++ dev_dbg(*dev, "Mapping supply %s to %s,%s\n",
++ *supply, map->alias_supply,
++ dev_name(map->alias_dev));
++ *dev = map->alias_dev;
++ *supply = map->alias_supply;
++ }
++}
++
++static struct regulator_dev *regulator_dev_lookup(struct device *dev,
++ const char *supply,
++ int *ret)
++{
++ struct regulator_dev *r;
++ struct device_node *node;
++ struct regulator_map *map;
++ const char *devname = NULL;
++
++ regulator_supply_alias(&dev, &supply);
++
++ /* first do a dt based lookup */
++ if (dev && dev->of_node) {
++ node = of_get_regulator(dev, supply);
++ if (node) {
++ list_for_each_entry(r, &regulator_list, list)
++ if (r->dev.parent &&
++ node == r->dev.of_node)
++ return r;
++ *ret = -EPROBE_DEFER;
++ return NULL;
++ } else {
++ /*
++ * If we couldn't even get the node then it's
++ * not just that the device didn't register
++ * yet, there's no node and we'll never
++ * succeed.
++ */
++ *ret = -ENODEV;
++ }
++ }
++
++ /* if not found, try doing it non-dt way */
++ if (dev)
++ devname = dev_name(dev);
++
++ list_for_each_entry(r, &regulator_list, list)
++ if (strcmp(rdev_get_name(r), supply) == 0)
++ return r;
++
++ list_for_each_entry(map, &regulator_map_list, list) {
++ /* If the mapping has a device set up it must match */
++ if (map->dev_name &&
++ (!devname || strcmp(map->dev_name, devname)))
++ continue;
++
++ if (strcmp(map->supply, supply) == 0)
++ return map->regulator;
++ }
++
++
++ return NULL;
++}
++
++/* Internal regulator request function */
++static struct regulator *_regulator_get(struct device *dev, const char *id,
++ bool exclusive, bool allow_dummy)
++{
++ struct regulator_dev *rdev;
++ struct regulator *regulator = ERR_PTR(-EPROBE_DEFER);
++ const char *devname = NULL;
++ int ret;
++
++ if (id == NULL) {
++ pr_err("get() with no identifier\n");
++ return ERR_PTR(-EINVAL);
++ }
++
++ if (dev)
++ devname = dev_name(dev);
++
++ if (have_full_constraints())
++ ret = -ENODEV;
++ else
++ ret = -EPROBE_DEFER;
++
++ mutex_lock(&regulator_list_mutex);
++
++ rdev = regulator_dev_lookup(dev, id, &ret);
++ if (rdev)
++ goto found;
++
++ regulator = ERR_PTR(ret);
++
++ /*
++ * If we have return value from dev_lookup fail, we do not expect to
++ * succeed, so, quit with appropriate error value
++ */
++ if (ret && ret != -ENODEV)
++ goto out;
++
++ if (!devname)
++ devname = "deviceless";
++
++ /*
++ * Assume that a regulator is physically present and enabled
++ * even if it isn't hooked up and just provide a dummy.
++ */
++ if (have_full_constraints() && allow_dummy) {
++ pr_warn("%s supply %s not found, using dummy regulator\n",
++ devname, id);
++
++ rdev = dummy_regulator_rdev;
++ goto found;
++ /* Don't log an error when called from regulator_get_optional() */
++ } else if (!have_full_constraints() || exclusive) {
++ dev_warn(dev, "dummy supplies not allowed\n");
++ }
++
++ mutex_unlock(&regulator_list_mutex);
++ return regulator;
++
++found:
++ if (rdev->exclusive) {
++ regulator = ERR_PTR(-EPERM);
++ goto out;
++ }
++
++ if (exclusive && rdev->open_count) {
++ regulator = ERR_PTR(-EBUSY);
++ goto out;
++ }
++
++ if (!try_module_get(rdev->owner))
++ goto out;
++
++ regulator = create_regulator(rdev, dev, id);
++ if (regulator == NULL) {
++ regulator = ERR_PTR(-ENOMEM);
++ module_put(rdev->owner);
++ goto out;
++ }
++
++ rdev->open_count++;
++ if (exclusive) {
++ rdev->exclusive = 1;
++
++ ret = _regulator_is_enabled(rdev);
++ if (ret > 0)
++ rdev->use_count = 1;
++ else
++ rdev->use_count = 0;
++ }
++
++out:
++ mutex_unlock(&regulator_list_mutex);
++
++ return regulator;
++}
++
++/**
++ * regulator_get - lookup and obtain a reference to a regulator.
++ * @dev: device for regulator "consumer"
++ * @id: Supply name or regulator ID.
++ *
++ * Returns a struct regulator corresponding to the regulator producer,
++ * or IS_ERR() condition containing errno.
++ *
++ * Use of supply names configured via regulator_set_device_supply() is
++ * strongly encouraged. It is recommended that the supply name used
++ * should match the name used for the supply and/or the relevant
++ * device pins in the datasheet.
++ */
++struct regulator *regulator_get(struct device *dev, const char *id)
++{
++ return _regulator_get(dev, id, false, true);
++}
++EXPORT_SYMBOL_GPL(regulator_get);
++
++/**
++ * regulator_get_exclusive - obtain exclusive access to a regulator.
++ * @dev: device for regulator "consumer"
++ * @id: Supply name or regulator ID.
++ *
++ * Returns a struct regulator corresponding to the regulator producer,
++ * or IS_ERR() condition containing errno. Other consumers will be
++ * unable to obtain this reference is held and the use count for the
++ * regulator will be initialised to reflect the current state of the
++ * regulator.
++ *
++ * This is intended for use by consumers which cannot tolerate shared
++ * use of the regulator such as those which need to force the
++ * regulator off for correct operation of the hardware they are
++ * controlling.
++ *
++ * Use of supply names configured via regulator_set_device_supply() is
++ * strongly encouraged. It is recommended that the supply name used
++ * should match the name used for the supply and/or the relevant
++ * device pins in the datasheet.
++ */
++struct regulator *regulator_get_exclusive(struct device *dev, const char *id)
++{
++ return _regulator_get(dev, id, true, false);
++}
++EXPORT_SYMBOL_GPL(regulator_get_exclusive);
++
++/**
++ * regulator_get_optional - obtain optional access to a regulator.
++ * @dev: device for regulator "consumer"
++ * @id: Supply name or regulator ID.
++ *
++ * Returns a struct regulator corresponding to the regulator producer,
++ * or IS_ERR() condition containing errno. Other consumers will be
++ * unable to obtain this reference is held and the use count for the
++ * regulator will be initialised to reflect the current state of the
++ * regulator.
++ *
++ * This is intended for use by consumers for devices which can have
++ * some supplies unconnected in normal use, such as some MMC devices.
++ * It can allow the regulator core to provide stub supplies for other
++ * supplies requested using normal regulator_get() calls without
++ * disrupting the operation of drivers that can handle absent
++ * supplies.
++ *
++ * Use of supply names configured via regulator_set_device_supply() is
++ * strongly encouraged. It is recommended that the supply name used
++ * should match the name used for the supply and/or the relevant
++ * device pins in the datasheet.
++ */
++struct regulator *regulator_get_optional(struct device *dev, const char *id)
++{
++ return _regulator_get(dev, id, false, false);
++}
++EXPORT_SYMBOL_GPL(regulator_get_optional);
++
++/* Locks held by regulator_put() */
++static void _regulator_put(struct regulator *regulator)
++{
++ struct regulator_dev *rdev;
++
++ if (regulator == NULL || IS_ERR(regulator))
++ return;
++
++ rdev = regulator->rdev;
++
++ debugfs_remove_recursive(regulator->debugfs);
++
++ /* remove any sysfs entries */
++ if (regulator->dev)
++ sysfs_remove_link(&rdev->dev.kobj, regulator->supply_name);
++ kfree(regulator->supply_name);
++ list_del(&regulator->list);
++ kfree(regulator);
++
++ rdev->open_count--;
++ rdev->exclusive = 0;
++
++ module_put(rdev->owner);
++}
++
++/**
++ * regulator_put - "free" the regulator source
++ * @regulator: regulator source
++ *
++ * Note: drivers must ensure that all regulator_enable calls made on this
++ * regulator source are balanced by regulator_disable calls prior to calling
++ * this function.
++ */
++void regulator_put(struct regulator *regulator)
++{
++ mutex_lock(&regulator_list_mutex);
++ _regulator_put(regulator);
++ mutex_unlock(&regulator_list_mutex);
++}
++EXPORT_SYMBOL_GPL(regulator_put);
++
++/**
++ * regulator_register_supply_alias - Provide device alias for supply lookup
++ *
++ * @dev: device that will be given as the regulator "consumer"
++ * @id: Supply name or regulator ID
++ * @alias_dev: device that should be used to lookup the supply
++ * @alias_id: Supply name or regulator ID that should be used to lookup the
++ * supply
++ *
++ * All lookups for id on dev will instead be conducted for alias_id on
++ * alias_dev.
++ */
++int regulator_register_supply_alias(struct device *dev, const char *id,
++ struct device *alias_dev,
++ const char *alias_id)
++{
++ struct regulator_supply_alias *map;
++
++ map = regulator_find_supply_alias(dev, id);
++ if (map)
++ return -EEXIST;
++
++ map = kzalloc(sizeof(struct regulator_supply_alias), GFP_KERNEL);
++ if (!map)
++ return -ENOMEM;
++
++ map->src_dev = dev;
++ map->src_supply = id;
++ map->alias_dev = alias_dev;
++ map->alias_supply = alias_id;
++
++ list_add(&map->list, &regulator_supply_alias_list);
++
++ pr_info("Adding alias for supply %s,%s -> %s,%s\n",
++ id, dev_name(dev), alias_id, dev_name(alias_dev));
++
++ return 0;
++}
++EXPORT_SYMBOL_GPL(regulator_register_supply_alias);
++
++/**
++ * regulator_unregister_supply_alias - Remove device alias
++ *
++ * @dev: device that will be given as the regulator "consumer"
++ * @id: Supply name or regulator ID
++ *
++ * Remove a lookup alias if one exists for id on dev.
++ */
++void regulator_unregister_supply_alias(struct device *dev, const char *id)
++{
++ struct regulator_supply_alias *map;
++
++ map = regulator_find_supply_alias(dev, id);
++ if (map) {
++ list_del(&map->list);
++ kfree(map);
++ }
++}
++EXPORT_SYMBOL_GPL(regulator_unregister_supply_alias);
++
++/**
++ * regulator_bulk_register_supply_alias - register multiple aliases
++ *
++ * @dev: device that will be given as the regulator "consumer"
++ * @id: List of supply names or regulator IDs
++ * @alias_dev: device that should be used to lookup the supply
++ * @alias_id: List of supply names or regulator IDs that should be used to
++ * lookup the supply
++ * @num_id: Number of aliases to register
++ *
++ * @return 0 on success, an errno on failure.
++ *
++ * This helper function allows drivers to register several supply
++ * aliases in one operation. If any of the aliases cannot be
++ * registered any aliases that were registered will be removed
++ * before returning to the caller.
++ */
++int regulator_bulk_register_supply_alias(struct device *dev, const char **id,
++ struct device *alias_dev,
++ const char **alias_id,
++ int num_id)
++{
++ int i;
++ int ret;
++
++ for (i = 0; i < num_id; ++i) {
++ ret = regulator_register_supply_alias(dev, id[i], alias_dev,
++ alias_id[i]);
++ if (ret < 0)
++ goto err;
++ }
++
++ return 0;
++
++err:
++ dev_err(dev,
++ "Failed to create supply alias %s,%s -> %s,%s\n",
++ id[i], dev_name(dev), alias_id[i], dev_name(alias_dev));
++
++ while (--i >= 0)
++ regulator_unregister_supply_alias(dev, id[i]);
++
++ return ret;
++}
++EXPORT_SYMBOL_GPL(regulator_bulk_register_supply_alias);
++
++/**
++ * regulator_bulk_unregister_supply_alias - unregister multiple aliases
++ *
++ * @dev: device that will be given as the regulator "consumer"
++ * @id: List of supply names or regulator IDs
++ * @num_id: Number of aliases to unregister
++ *
++ * This helper function allows drivers to unregister several supply
++ * aliases in one operation.
++ */
++void regulator_bulk_unregister_supply_alias(struct device *dev,
++ const char **id,
++ int num_id)
++{
++ int i;
++
++ for (i = 0; i < num_id; ++i)
++ regulator_unregister_supply_alias(dev, id[i]);
++}
++EXPORT_SYMBOL_GPL(regulator_bulk_unregister_supply_alias);
++
++
++/* Manage enable GPIO list. Same GPIO pin can be shared among regulators */
++static int regulator_ena_gpio_request(struct regulator_dev *rdev,
++ const struct regulator_config *config)
++{
++ struct regulator_enable_gpio *pin;
++ struct gpio_desc *gpiod;
++ int ret;
++
++ gpiod = gpio_to_desc(config->ena_gpio);
++
++ list_for_each_entry(pin, &regulator_ena_gpio_list, list) {
++ if (pin->gpiod == gpiod) {
++ rdev_dbg(rdev, "GPIO %d is already used\n",
++ config->ena_gpio);
++ goto update_ena_gpio_to_rdev;
++ }
++ }
++
++ ret = gpio_request_one(config->ena_gpio,
++ GPIOF_DIR_OUT | config->ena_gpio_flags,
++ rdev_get_name(rdev));
++ if (ret)
++ return ret;
++
++ pin = kzalloc(sizeof(struct regulator_enable_gpio), GFP_KERNEL);
++ if (pin == NULL) {
++ gpio_free(config->ena_gpio);
++ return -ENOMEM;
++ }
++
++ pin->gpiod = gpiod;
++ pin->ena_gpio_invert = config->ena_gpio_invert;
++ list_add(&pin->list, &regulator_ena_gpio_list);
++
++update_ena_gpio_to_rdev:
++ pin->request_count++;
++ rdev->ena_pin = pin;
++ return 0;
++}
++
++static void regulator_ena_gpio_free(struct regulator_dev *rdev)
++{
++ struct regulator_enable_gpio *pin, *n;
++
++ if (!rdev->ena_pin)
++ return;
++
++ /* Free the GPIO only in case of no use */
++ list_for_each_entry_safe(pin, n, &regulator_ena_gpio_list, list) {
++ if (pin->gpiod == rdev->ena_pin->gpiod) {
++ if (pin->request_count <= 1) {
++ pin->request_count = 0;
++ gpiod_put(pin->gpiod);
++ list_del(&pin->list);
++ kfree(pin);
++ } else {
++ pin->request_count--;
++ }
++ }
++ }
++}
++
++/**
++ * regulator_ena_gpio_ctrl - balance enable_count of each GPIO and actual GPIO pin control
++ * @rdev: regulator_dev structure
++ * @enable: enable GPIO at initial use?
++ *
++ * GPIO is enabled in case of initial use. (enable_count is 0)
++ * GPIO is disabled when it is not shared any more. (enable_count <= 1)
++ */
++static int regulator_ena_gpio_ctrl(struct regulator_dev *rdev, bool enable)
++{
++ struct regulator_enable_gpio *pin = rdev->ena_pin;
++
++ if (!pin)
++ return -EINVAL;
++
++ if (enable) {
++ /* Enable GPIO at initial use */
++ if (pin->enable_count == 0)
++ gpiod_set_value_cansleep(pin->gpiod,
++ !pin->ena_gpio_invert);
++
++ pin->enable_count++;
++ } else {
++ if (pin->enable_count > 1) {
++ pin->enable_count--;
++ return 0;
++ }
++
++ /* Disable GPIO if not used */
++ if (pin->enable_count <= 1) {
++ gpiod_set_value_cansleep(pin->gpiod,
++ pin->ena_gpio_invert);
++ pin->enable_count = 0;
++ }
++ }
++
++ return 0;
++}
++
++static int _regulator_do_enable(struct regulator_dev *rdev)
++{
++ int ret, delay;
++
++ /* Query before enabling in case configuration dependent. */
++ ret = _regulator_get_enable_time(rdev);
++ if (ret >= 0) {
++ delay = ret;
++ } else {
++ rdev_warn(rdev, "enable_time() failed: %d\n", ret);
++ delay = 0;
++ }
++
++ trace_regulator_enable(rdev_get_name(rdev));
++
++ if (rdev->ena_pin) {
++ ret = regulator_ena_gpio_ctrl(rdev, true);
++ if (ret < 0)
++ return ret;
++ rdev->ena_gpio_state = 1;
++ } else if (rdev->desc->ops->enable) {
++ ret = rdev->desc->ops->enable(rdev);
++ if (ret < 0)
++ return ret;
++ } else {
++ return -EINVAL;
++ }
++
++ /* Allow the regulator to ramp; it would be useful to extend
++ * this for bulk operations so that the regulators can ramp
++ * together. */
++ trace_regulator_enable_delay(rdev_get_name(rdev));
++
++ /*
++ * Delay for the requested amount of time as per the guidelines in:
++ *
++ * Documentation/timers/timers-howto.txt
++ *
++ * The assumption here is that regulators will never be enabled in
++ * atomic context and therefore sleeping functions can be used.
++ */
++ if (delay) {
++ unsigned int ms = delay / 1000;
++ unsigned int us = delay % 1000;
++
++ if (ms > 0) {
++ /*
++ * For small enough values, handle super-millisecond
++ * delays in the usleep_range() call below.
++ */
++ if (ms < 20)
++ us += ms * 1000;
++ else
++ msleep(ms);
++ }
++
++ /*
++ * Give the scheduler some room to coalesce with any other
++ * wakeup sources. For delays shorter than 10 us, don't even
++ * bother setting up high-resolution timers and just busy-
++ * loop.
++ */
++ if (us >= 10)
++ usleep_range(us, us + 100);
++ else
++ udelay(us);
++ }
++
++ trace_regulator_enable_complete(rdev_get_name(rdev));
++ _notifier_call_chain(rdev, REGULATOR_EVENT_ENABLE, NULL);
++
++ return 0;
++}
++
++/* locks held by regulator_enable() */
++static int _regulator_enable(struct regulator_dev *rdev)
++{
++ int ret;
++
++ /* check voltage and requested load before enabling */
++ if (rdev->constraints &&
++ (rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_DRMS))
++ drms_uA_update(rdev);
++
++ if (rdev->use_count == 0) {
++ /* The regulator may on if it's not switchable or left on */
++ ret = _regulator_is_enabled(rdev);
++ if (ret == -EINVAL || ret == 0) {
++ if (!_regulator_can_change_status(rdev))
++ return -EPERM;
++
++ ret = _regulator_do_enable(rdev);
++ if (ret < 0)
++ return ret;
++
++ } else if (ret < 0) {
++ rdev_err(rdev, "is_enabled() failed: %d\n", ret);
++ return ret;
++ }
++ /* Fallthrough on positive return values - already enabled */
++ }
++
++ rdev->use_count++;
++
++ return 0;
++}
++
++/**
++ * regulator_enable - enable regulator output
++ * @regulator: regulator source
++ *
++ * Request that the regulator be enabled with the regulator output at
++ * the predefined voltage or current value. Calls to regulator_enable()
++ * must be balanced with calls to regulator_disable().
++ *
++ * NOTE: the output value can be set by other drivers, boot loader or may be
++ * hardwired in the regulator.
++ */
++int regulator_enable(struct regulator *regulator)
++{
++ struct regulator_dev *rdev = regulator->rdev;
++ int ret = 0;
++
++ if (regulator->always_on)
++ return 0;
++
++ if (rdev->supply) {
++ ret = regulator_enable(rdev->supply);
++ if (ret != 0)
++ return ret;
++ }
++
++ mutex_lock(&rdev->mutex);
++ ret = _regulator_enable(rdev);
++ mutex_unlock(&rdev->mutex);
++
++ if (ret != 0 && rdev->supply)
++ regulator_disable(rdev->supply);
++
++ return ret;
++}
++EXPORT_SYMBOL_GPL(regulator_enable);
++
++static int _regulator_do_disable(struct regulator_dev *rdev)
++{
++ int ret;
++
++ _notifier_call_chain(rdev, REGULATOR_EVENT_PRE_DISABLE, NULL);
++ trace_regulator_disable(rdev_get_name(rdev));
++
++ if (rdev->ena_pin) {
++ ret = regulator_ena_gpio_ctrl(rdev, false);
++ if (ret < 0)
++ return ret;
++ rdev->ena_gpio_state = 0;
++
++ } else if (rdev->desc->ops->disable) {
++ ret = rdev->desc->ops->disable(rdev);
++ if (ret != 0)
++ return ret;
++ }
++
++ trace_regulator_disable_complete(rdev_get_name(rdev));
++
++ return 0;
++}
++
++/* locks held by regulator_disable() */
++static int _regulator_disable(struct regulator_dev *rdev)
++{
++ int ret = 0;
++
++ if (WARN(rdev->use_count <= 0,
++ "unbalanced disables for %s\n", rdev_get_name(rdev)))
++ return -EIO;
++
++ /* are we the last user and permitted to disable ? */
++ if (rdev->use_count == 1 &&
++ (rdev->constraints && !rdev->constraints->always_on)) {
++
++ /* we are last user */
++ if (_regulator_can_change_status(rdev)) {
++ ret = _regulator_do_disable(rdev);
++ if (ret < 0) {
++ rdev_err(rdev, "failed to disable\n");
++ return ret;
++ }
++ _notifier_call_chain(rdev, REGULATOR_EVENT_DISABLE,
++ NULL);
++ }
++
++ rdev->use_count = 0;
++ } else if (rdev->use_count > 1) {
++
++ if (rdev->constraints &&
++ (rdev->constraints->valid_ops_mask &
++ REGULATOR_CHANGE_DRMS))
++ drms_uA_update(rdev);
++
++ rdev->use_count--;
++ }
++
++ return ret;
++}
++
++/**
++ * regulator_disable - disable regulator output
++ * @regulator: regulator source
++ *
++ * Disable the regulator output voltage or current. Calls to
++ * regulator_enable() must be balanced with calls to
++ * regulator_disable().
++ *
++ * NOTE: this will only disable the regulator output if no other consumer
++ * devices have it enabled, the regulator device supports disabling and
++ * machine constraints permit this operation.
++ */
++int regulator_disable(struct regulator *regulator)
++{
++ struct regulator_dev *rdev = regulator->rdev;
++ int ret = 0;
++
++ if (regulator->always_on)
++ return 0;
++
++ mutex_lock(&rdev->mutex);
++ ret = _regulator_disable(rdev);
++ mutex_unlock(&rdev->mutex);
++
++ if (ret == 0 && rdev->supply)
++ regulator_disable(rdev->supply);
++
++ return ret;
++}
++EXPORT_SYMBOL_GPL(regulator_disable);
++
++/* locks held by regulator_force_disable() */
++static int _regulator_force_disable(struct regulator_dev *rdev)
++{
++ int ret = 0;
++
++ ret = _regulator_do_disable(rdev);
++ if (ret < 0) {
++ rdev_err(rdev, "failed to force disable\n");
++ return ret;
++ }
++
++ _notifier_call_chain(rdev, REGULATOR_EVENT_FORCE_DISABLE |
++ REGULATOR_EVENT_DISABLE, NULL);
++
++ return 0;
++}
++
++/**
++ * regulator_force_disable - force disable regulator output
++ * @regulator: regulator source
++ *
++ * Forcibly disable the regulator output voltage or current.
++ * NOTE: this *will* disable the regulator output even if other consumer
++ * devices have it enabled. This should be used for situations when device
++ * damage will likely occur if the regulator is not disabled (e.g. over temp).
++ */
++int regulator_force_disable(struct regulator *regulator)
++{
++ struct regulator_dev *rdev = regulator->rdev;
++ int ret;
++
++ mutex_lock(&rdev->mutex);
++ regulator->uA_load = 0;
++ ret = _regulator_force_disable(regulator->rdev);
++ mutex_unlock(&rdev->mutex);
++
++ if (rdev->supply)
++ while (rdev->open_count--)
++ regulator_disable(rdev->supply);
++
++ return ret;
++}
++EXPORT_SYMBOL_GPL(regulator_force_disable);
++
++static void regulator_disable_work(struct work_struct *work)
++{
++ struct regulator_dev *rdev = container_of(work, struct regulator_dev,
++ disable_work.work);
++ int count, i, ret;
++
++ mutex_lock(&rdev->mutex);
++
++ BUG_ON(!rdev->deferred_disables);
++
++ count = rdev->deferred_disables;
++ rdev->deferred_disables = 0;
++
++ for (i = 0; i < count; i++) {
++ ret = _regulator_disable(rdev);
++ if (ret != 0)
++ rdev_err(rdev, "Deferred disable failed: %d\n", ret);
++ }
++
++ mutex_unlock(&rdev->mutex);
++
++ if (rdev->supply) {
++ for (i = 0; i < count; i++) {
++ ret = regulator_disable(rdev->supply);
++ if (ret != 0) {
++ rdev_err(rdev,
++ "Supply disable failed: %d\n", ret);
++ }
++ }
++ }
++}
++
++/**
++ * regulator_disable_deferred - disable regulator output with delay
++ * @regulator: regulator source
++ * @ms: miliseconds until the regulator is disabled
++ *
++ * Execute regulator_disable() on the regulator after a delay. This
++ * is intended for use with devices that require some time to quiesce.
++ *
++ * NOTE: this will only disable the regulator output if no other consumer
++ * devices have it enabled, the regulator device supports disabling and
++ * machine constraints permit this operation.
++ */
++int regulator_disable_deferred(struct regulator *regulator, int ms)
++{
++ struct regulator_dev *rdev = regulator->rdev;
++ int ret;
++
++ if (regulator->always_on)
++ return 0;
++
++ if (!ms)
++ return regulator_disable(regulator);
++
++ mutex_lock(&rdev->mutex);
++ rdev->deferred_disables++;
++ mutex_unlock(&rdev->mutex);
++
++ ret = queue_delayed_work(system_power_efficient_wq,
++ &rdev->disable_work,
++ msecs_to_jiffies(ms));
++ if (ret < 0)
++ return ret;
++ else
++ return 0;
++}
++EXPORT_SYMBOL_GPL(regulator_disable_deferred);
++
++static int _regulator_is_enabled(struct regulator_dev *rdev)
++{
++ /* A GPIO control always takes precedence */
++ if (rdev->ena_pin)
++ return rdev->ena_gpio_state;
++
++ /* If we don't know then assume that the regulator is always on */
++ if (!rdev->desc->ops->is_enabled)
++ return 1;
++
++ return rdev->desc->ops->is_enabled(rdev);
++}
++
++/**
++ * regulator_is_enabled - is the regulator output enabled
++ * @regulator: regulator source
++ *
++ * Returns positive if the regulator driver backing the source/client
++ * has requested that the device be enabled, zero if it hasn't, else a
++ * negative errno code.
++ *
++ * Note that the device backing this regulator handle can have multiple
++ * users, so it might be enabled even if regulator_enable() was never
++ * called for this particular source.
++ */
++int regulator_is_enabled(struct regulator *regulator)
++{
++ int ret;
++
++ if (regulator->always_on)
++ return 1;
++
++ mutex_lock(&regulator->rdev->mutex);
++ ret = _regulator_is_enabled(regulator->rdev);
++ mutex_unlock(&regulator->rdev->mutex);
++
++ return ret;
++}
++EXPORT_SYMBOL_GPL(regulator_is_enabled);
++
++/**
++ * regulator_can_change_voltage - check if regulator can change voltage
++ * @regulator: regulator source
++ *
++ * Returns positive if the regulator driver backing the source/client
++ * can change its voltage, false otherwise. Useful for detecting fixed
++ * or dummy regulators and disabling voltage change logic in the client
++ * driver.
++ */
++int regulator_can_change_voltage(struct regulator *regulator)
++{
++ struct regulator_dev *rdev = regulator->rdev;
++
++ if (rdev->constraints &&
++ (rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_VOLTAGE)) {
++ if (rdev->desc->n_voltages - rdev->desc->linear_min_sel > 1)
++ return 1;
++
++ if (rdev->desc->continuous_voltage_range &&
++ rdev->constraints->min_uV && rdev->constraints->max_uV &&
++ rdev->constraints->min_uV != rdev->constraints->max_uV)
++ return 1;
++ }
++
++ return 0;
++}
++EXPORT_SYMBOL_GPL(regulator_can_change_voltage);
++
++/**
++ * regulator_count_voltages - count regulator_list_voltage() selectors
++ * @regulator: regulator source
++ *
++ * Returns number of selectors, or negative errno. Selectors are
++ * numbered starting at zero, and typically correspond to bitfields
++ * in hardware registers.
++ */
++int regulator_count_voltages(struct regulator *regulator)
++{
++ struct regulator_dev *rdev = regulator->rdev;
++
++ return rdev->desc->n_voltages ? : -EINVAL;
++}
++EXPORT_SYMBOL_GPL(regulator_count_voltages);
++
++/**
++ * regulator_list_voltage - enumerate supported voltages
++ * @regulator: regulator source
++ * @selector: identify voltage to list
++ * Context: can sleep
++ *
++ * Returns a voltage that can be passed to @regulator_set_voltage(),
++ * zero if this selector code can't be used on this system, or a
++ * negative errno.
++ */
++int regulator_list_voltage(struct regulator *regulator, unsigned selector)
++{
++ struct regulator_dev *rdev = regulator->rdev;
++ struct regulator_ops *ops = rdev->desc->ops;
++ int ret;
++
++ if (rdev->desc->fixed_uV && rdev->desc->n_voltages == 1 && !selector)
++ return rdev->desc->fixed_uV;
++
++ if (!ops->list_voltage || selector >= rdev->desc->n_voltages)
++ return -EINVAL;
++
++ mutex_lock(&rdev->mutex);
++ ret = ops->list_voltage(rdev, selector);
++ mutex_unlock(&rdev->mutex);
++
++ if (ret > 0) {
++ if (ret < rdev->constraints->min_uV)
++ ret = 0;
++ else if (ret > rdev->constraints->max_uV)
++ ret = 0;
++ }
++
++ return ret;
++}
++EXPORT_SYMBOL_GPL(regulator_list_voltage);
++
++/**
++ * regulator_get_linear_step - return the voltage step size between VSEL values
++ * @regulator: regulator source
++ *
++ * Returns the voltage step size between VSEL values for linear
++ * regulators, or return 0 if the regulator isn't a linear regulator.
++ */
++unsigned int regulator_get_linear_step(struct regulator *regulator)
++{
++ struct regulator_dev *rdev = regulator->rdev;
++
++ return rdev->desc->uV_step;
++}
++EXPORT_SYMBOL_GPL(regulator_get_linear_step);
++
++/**
++ * regulator_is_supported_voltage - check if a voltage range can be supported
++ *
++ * @regulator: Regulator to check.
++ * @min_uV: Minimum required voltage in uV.
++ * @max_uV: Maximum required voltage in uV.
++ *
++ * Returns a boolean or a negative error code.
++ */
++int regulator_is_supported_voltage(struct regulator *regulator,
++ int min_uV, int max_uV)
++{
++ struct regulator_dev *rdev = regulator->rdev;
++ int i, voltages, ret;
++
++ /* If we can't change voltage check the current voltage */
++ if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_VOLTAGE)) {
++ ret = regulator_get_voltage(regulator);
++ if (ret >= 0)
++ return min_uV <= ret && ret <= max_uV;
++ else
++ return ret;
++ }
++
++ /* Any voltage within constrains range is fine? */
++ if (rdev->desc->continuous_voltage_range)
++ return min_uV >= rdev->constraints->min_uV &&
++ max_uV <= rdev->constraints->max_uV;
++
++ ret = regulator_count_voltages(regulator);
++ if (ret < 0)
++ return ret;
++ voltages = ret;
++
++ for (i = 0; i < voltages; i++) {
++ ret = regulator_list_voltage(regulator, i);
++
++ if (ret >= min_uV && ret <= max_uV)
++ return 1;
++ }
++
++ return 0;
++}
++EXPORT_SYMBOL_GPL(regulator_is_supported_voltage);
++
++static int _regulator_do_set_voltage(struct regulator_dev *rdev,
++ int min_uV, int max_uV)
++{
++ int ret;
++ int delay = 0;
++ int best_val = 0;
++ unsigned int selector;
++ int old_selector = -1;
++
++ trace_regulator_set_voltage(rdev_get_name(rdev), min_uV, max_uV);
++
++ min_uV += rdev->constraints->uV_offset;
++ max_uV += rdev->constraints->uV_offset;
++
++ /*
++ * If we can't obtain the old selector there is not enough
++ * info to call set_voltage_time_sel().
++ */
++ if (_regulator_is_enabled(rdev) &&
++ rdev->desc->ops->set_voltage_time_sel &&
++ rdev->desc->ops->get_voltage_sel) {
++ old_selector = rdev->desc->ops->get_voltage_sel(rdev);
++ if (old_selector < 0)
++ return old_selector;
++ }
++
++ if (rdev->desc->ops->set_voltage) {
++ ret = rdev->desc->ops->set_voltage(rdev, min_uV, max_uV,
++ &selector);
++
++ if (ret >= 0) {
++ if (rdev->desc->ops->list_voltage)
++ best_val = rdev->desc->ops->list_voltage(rdev,
++ selector);
++ else
++ best_val = _regulator_get_voltage(rdev);
++ }
++
++ } else if (rdev->desc->ops->set_voltage_sel) {
++ if (rdev->desc->ops->map_voltage) {
++ ret = rdev->desc->ops->map_voltage(rdev, min_uV,
++ max_uV);
++ } else {
++ if (rdev->desc->ops->list_voltage ==
++ regulator_list_voltage_linear)
++ ret = regulator_map_voltage_linear(rdev,
++ min_uV, max_uV);
++ else
++ ret = regulator_map_voltage_iterate(rdev,
++ min_uV, max_uV);
++ }
++
++ if (ret >= 0) {
++ best_val = rdev->desc->ops->list_voltage(rdev, ret);
++ if (min_uV <= best_val && max_uV >= best_val) {
++ selector = ret;
++ if (old_selector == selector)
++ ret = 0;
++ else
++ ret = rdev->desc->ops->set_voltage_sel(
++ rdev, ret);
++ } else {
++ ret = -EINVAL;
++ }
++ }
++ } else {
++ ret = -EINVAL;
++ }
++
++ /* Call set_voltage_time_sel if successfully obtained old_selector */
++ if (ret == 0 && !rdev->constraints->ramp_disable && old_selector >= 0
++ && old_selector != selector) {
++
++ delay = rdev->desc->ops->set_voltage_time_sel(rdev,
++ old_selector, selector);
++ if (delay < 0) {
++ rdev_warn(rdev, "set_voltage_time_sel() failed: %d\n",
++ delay);
++ delay = 0;
++ }
++
++ /* Insert any necessary delays */
++ if (delay >= 1000) {
++ mdelay(delay / 1000);
++ udelay(delay % 1000);
++ } else if (delay) {
++ udelay(delay);
++ }
++ }
++
++ if (ret == 0 && best_val >= 0) {
++ unsigned long data = best_val;
++
++ _notifier_call_chain(rdev, REGULATOR_EVENT_VOLTAGE_CHANGE,
++ (void *)data);
++ }
++
++ trace_regulator_set_voltage_complete(rdev_get_name(rdev), best_val);
++
++ return ret;
++}
++
++/**
++ * regulator_set_voltage - set regulator output voltage
++ * @regulator: regulator source
++ * @min_uV: Minimum required voltage in uV
++ * @max_uV: Maximum acceptable voltage in uV
++ *
++ * Sets a voltage regulator to the desired output voltage. This can be set
++ * during any regulator state. IOW, regulator can be disabled or enabled.
++ *
++ * If the regulator is enabled then the voltage will change to the new value
++ * immediately otherwise if the regulator is disabled the regulator will
++ * output at the new voltage when enabled.
++ *
++ * NOTE: If the regulator is shared between several devices then the lowest
++ * request voltage that meets the system constraints will be used.
++ * Regulator system constraints must be set for this regulator before
++ * calling this function otherwise this call will fail.
++ */
++int regulator_set_voltage(struct regulator *regulator, int min_uV, int max_uV)
++{
++ struct regulator_dev *rdev = regulator->rdev;
++ int ret = 0;
++ int old_min_uV, old_max_uV;
++
++ mutex_lock(&rdev->mutex);
++
++ /* If we're setting the same range as last time the change
++ * should be a noop (some cpufreq implementations use the same
++ * voltage for multiple frequencies, for example).
++ */
++ if (regulator->min_uV == min_uV && regulator->max_uV == max_uV)
++ goto out;
++
++ /* sanity check */
++ if (!rdev->desc->ops->set_voltage &&
++ !rdev->desc->ops->set_voltage_sel) {
++ ret = -EINVAL;
++ goto out;
++ }
++
++ /* constraints check */
++ ret = regulator_check_voltage(rdev, &min_uV, &max_uV);
++ if (ret < 0)
++ goto out;
++
++ /* restore original values in case of error */
++ old_min_uV = regulator->min_uV;
++ old_max_uV = regulator->max_uV;
++ regulator->min_uV = min_uV;
++ regulator->max_uV = max_uV;
++
++ ret = regulator_check_consumers(rdev, &min_uV, &max_uV);
++ if (ret < 0)
++ goto out2;
++
++ ret = _regulator_do_set_voltage(rdev, min_uV, max_uV);
++ if (ret < 0)
++ goto out2;
++
++out:
++ mutex_unlock(&rdev->mutex);
++ return ret;
++out2:
++ regulator->min_uV = old_min_uV;
++ regulator->max_uV = old_max_uV;
++ mutex_unlock(&rdev->mutex);
++ return ret;
++}
++EXPORT_SYMBOL_GPL(regulator_set_voltage);
++
++/**
++ * regulator_set_voltage_time - get raise/fall time
++ * @regulator: regulator source
++ * @old_uV: starting voltage in microvolts
++ * @new_uV: target voltage in microvolts
++ *
++ * Provided with the starting and ending voltage, this function attempts to
++ * calculate the time in microseconds required to rise or fall to this new
++ * voltage.
++ */
++int regulator_set_voltage_time(struct regulator *regulator,
++ int old_uV, int new_uV)
++{
++ struct regulator_dev *rdev = regulator->rdev;
++ struct regulator_ops *ops = rdev->desc->ops;
++ int old_sel = -1;
++ int new_sel = -1;
++ int voltage;
++ int i;
++
++ /* Currently requires operations to do this */
++ if (!ops->list_voltage || !ops->set_voltage_time_sel
++ || !rdev->desc->n_voltages)
++ return -EINVAL;
++
++ for (i = 0; i < rdev->desc->n_voltages; i++) {
++ /* We only look for exact voltage matches here */
++ voltage = regulator_list_voltage(regulator, i);
++ if (voltage < 0)
++ return -EINVAL;
++ if (voltage == 0)
++ continue;
++ if (voltage == old_uV)
++ old_sel = i;
++ if (voltage == new_uV)
++ new_sel = i;
++ }
++
++ if (old_sel < 0 || new_sel < 0)
++ return -EINVAL;
++
++ return ops->set_voltage_time_sel(rdev, old_sel, new_sel);
++}
++EXPORT_SYMBOL_GPL(regulator_set_voltage_time);
++
++/**
++ * regulator_set_voltage_time_sel - get raise/fall time
++ * @rdev: regulator source device
++ * @old_selector: selector for starting voltage
++ * @new_selector: selector for target voltage
++ *
++ * Provided with the starting and target voltage selectors, this function
++ * returns time in microseconds required to rise or fall to this new voltage
++ *
++ * Drivers providing ramp_delay in regulation_constraints can use this as their
++ * set_voltage_time_sel() operation.
++ */
++int regulator_set_voltage_time_sel(struct regulator_dev *rdev,
++ unsigned int old_selector,
++ unsigned int new_selector)
++{
++ unsigned int ramp_delay = 0;
++ int old_volt, new_volt;
++
++ if (rdev->constraints->ramp_delay)
++ ramp_delay = rdev->constraints->ramp_delay;
++ else if (rdev->desc->ramp_delay)
++ ramp_delay = rdev->desc->ramp_delay;
++
++ if (ramp_delay == 0) {
++ rdev_warn(rdev, "ramp_delay not set\n");
++ return 0;
++ }
++
++ /* sanity check */
++ if (!rdev->desc->ops->list_voltage)
++ return -EINVAL;
++
++ old_volt = rdev->desc->ops->list_voltage(rdev, old_selector);
++ new_volt = rdev->desc->ops->list_voltage(rdev, new_selector);
++
++ return DIV_ROUND_UP(abs(new_volt - old_volt), ramp_delay);
++}
++EXPORT_SYMBOL_GPL(regulator_set_voltage_time_sel);
++
++/**
++ * regulator_sync_voltage - re-apply last regulator output voltage
++ * @regulator: regulator source
++ *
++ * Re-apply the last configured voltage. This is intended to be used
++ * where some external control source the consumer is cooperating with
++ * has caused the configured voltage to change.
++ */
++int regulator_sync_voltage(struct regulator *regulator)
++{
++ struct regulator_dev *rdev = regulator->rdev;
++ int ret, min_uV, max_uV;
++
++ mutex_lock(&rdev->mutex);
++
++ if (!rdev->desc->ops->set_voltage &&
++ !rdev->desc->ops->set_voltage_sel) {
++ ret = -EINVAL;
++ goto out;
++ }
++
++ /* This is only going to work if we've had a voltage configured. */
++ if (!regulator->min_uV && !regulator->max_uV) {
++ ret = -EINVAL;
++ goto out;
++ }
++
++ min_uV = regulator->min_uV;
++ max_uV = regulator->max_uV;
++
++ /* This should be a paranoia check... */
++ ret = regulator_check_voltage(rdev, &min_uV, &max_uV);
++ if (ret < 0)
++ goto out;
++
++ ret = regulator_check_consumers(rdev, &min_uV, &max_uV);
++ if (ret < 0)
++ goto out;
++
++ ret = _regulator_do_set_voltage(rdev, min_uV, max_uV);
++
++out:
++ mutex_unlock(&rdev->mutex);
++ return ret;
++}
++EXPORT_SYMBOL_GPL(regulator_sync_voltage);
++
++static int _regulator_get_voltage(struct regulator_dev *rdev)
++{
++ int sel, ret;
++
++ if (rdev->desc->ops->get_voltage_sel) {
++ sel = rdev->desc->ops->get_voltage_sel(rdev);
++ if (sel < 0)
++ return sel;
++ ret = rdev->desc->ops->list_voltage(rdev, sel);
++ } else if (rdev->desc->ops->get_voltage) {
++ ret = rdev->desc->ops->get_voltage(rdev);
++ } else if (rdev->desc->ops->list_voltage) {
++ ret = rdev->desc->ops->list_voltage(rdev, 0);
++ } else if (rdev->desc->fixed_uV && (rdev->desc->n_voltages == 1)) {
++ ret = rdev->desc->fixed_uV;
++ } else {
++ return -EINVAL;
++ }
++
++ if (ret < 0)
++ return ret;
++ return ret - rdev->constraints->uV_offset;
++}
++
++/**
++ * regulator_get_voltage - get regulator output voltage
++ * @regulator: regulator source
++ *
++ * This returns the current regulator voltage in uV.
++ *
++ * NOTE: If the regulator is disabled it will return the voltage value. This
++ * function should not be used to determine regulator state.
++ */
++int regulator_get_voltage(struct regulator *regulator)
++{
++ int ret;
++
++ mutex_lock(&regulator->rdev->mutex);
++
++ ret = _regulator_get_voltage(regulator->rdev);
++
++ mutex_unlock(&regulator->rdev->mutex);
++
++ return ret;
++}
++EXPORT_SYMBOL_GPL(regulator_get_voltage);
++
++/**
++ * regulator_set_current_limit - set regulator output current limit
++ * @regulator: regulator source
++ * @min_uA: Minimum supported current in uA
++ * @max_uA: Maximum supported current in uA
++ *
++ * Sets current sink to the desired output current. This can be set during
++ * any regulator state. IOW, regulator can be disabled or enabled.
++ *
++ * If the regulator is enabled then the current will change to the new value
++ * immediately otherwise if the regulator is disabled the regulator will
++ * output at the new current when enabled.
++ *
++ * NOTE: Regulator system constraints must be set for this regulator before
++ * calling this function otherwise this call will fail.
++ */
++int regulator_set_current_limit(struct regulator *regulator,
++ int min_uA, int max_uA)
++{
++ struct regulator_dev *rdev = regulator->rdev;
++ int ret;
++
++ mutex_lock(&rdev->mutex);
++
++ /* sanity check */
++ if (!rdev->desc->ops->set_current_limit) {
++ ret = -EINVAL;
++ goto out;
++ }
++
++ /* constraints check */
++ ret = regulator_check_current_limit(rdev, &min_uA, &max_uA);
++ if (ret < 0)
++ goto out;
++
++ ret = rdev->desc->ops->set_current_limit(rdev, min_uA, max_uA);
++out:
++ mutex_unlock(&rdev->mutex);
++ return ret;
++}
++EXPORT_SYMBOL_GPL(regulator_set_current_limit);
++
++static int _regulator_get_current_limit(struct regulator_dev *rdev)
++{
++ int ret;
++
++ mutex_lock(&rdev->mutex);
++
++ /* sanity check */
++ if (!rdev->desc->ops->get_current_limit) {
++ ret = -EINVAL;
++ goto out;
++ }
++
++ ret = rdev->desc->ops->get_current_limit(rdev);
++out:
++ mutex_unlock(&rdev->mutex);
++ return ret;
++}
++
++/**
++ * regulator_get_current_limit - get regulator output current
++ * @regulator: regulator source
++ *
++ * This returns the current supplied by the specified current sink in uA.
++ *
++ * NOTE: If the regulator is disabled it will return the current value. This
++ * function should not be used to determine regulator state.
++ */
++int regulator_get_current_limit(struct regulator *regulator)
++{
++ return _regulator_get_current_limit(regulator->rdev);
++}
++EXPORT_SYMBOL_GPL(regulator_get_current_limit);
++
++/**
++ * regulator_set_mode - set regulator operating mode
++ * @regulator: regulator source
++ * @mode: operating mode - one of the REGULATOR_MODE constants
++ *
++ * Set regulator operating mode to increase regulator efficiency or improve
++ * regulation performance.
++ *
++ * NOTE: Regulator system constraints must be set for this regulator before
++ * calling this function otherwise this call will fail.
++ */
++int regulator_set_mode(struct regulator *regulator, unsigned int mode)
++{
++ struct regulator_dev *rdev = regulator->rdev;
++ int ret;
++ int regulator_curr_mode;
++
++ mutex_lock(&rdev->mutex);
++
++ /* sanity check */
++ if (!rdev->desc->ops->set_mode) {
++ ret = -EINVAL;
++ goto out;
++ }
++
++ /* return if the same mode is requested */
++ if (rdev->desc->ops->get_mode) {
++ regulator_curr_mode = rdev->desc->ops->get_mode(rdev);
++ if (regulator_curr_mode == mode) {
++ ret = 0;
++ goto out;
++ }
++ }
++
++ /* constraints check */
++ ret = regulator_mode_constrain(rdev, &mode);
++ if (ret < 0)
++ goto out;
++
++ ret = rdev->desc->ops->set_mode(rdev, mode);
++out:
++ mutex_unlock(&rdev->mutex);
++ return ret;
++}
++EXPORT_SYMBOL_GPL(regulator_set_mode);
++
++static unsigned int _regulator_get_mode(struct regulator_dev *rdev)
++{
++ int ret;
++
++ mutex_lock(&rdev->mutex);
++
++ /* sanity check */
++ if (!rdev->desc->ops->get_mode) {
++ ret = -EINVAL;
++ goto out;
++ }
++
++ ret = rdev->desc->ops->get_mode(rdev);
++out:
++ mutex_unlock(&rdev->mutex);
++ return ret;
++}
++
++/**
++ * regulator_get_mode - get regulator operating mode
++ * @regulator: regulator source
++ *
++ * Get the current regulator operating mode.
++ */
++unsigned int regulator_get_mode(struct regulator *regulator)
++{
++ return _regulator_get_mode(regulator->rdev);
++}
++EXPORT_SYMBOL_GPL(regulator_get_mode);
++
++/**
++ * regulator_set_optimum_mode - set regulator optimum operating mode
++ * @regulator: regulator source
++ * @uA_load: load current
++ *
++ * Notifies the regulator core of a new device load. This is then used by
++ * DRMS (if enabled by constraints) to set the most efficient regulator
++ * operating mode for the new regulator loading.
++ *
++ * Consumer devices notify their supply regulator of the maximum power
++ * they will require (can be taken from device datasheet in the power
++ * consumption tables) when they change operational status and hence power
++ * state. Examples of operational state changes that can affect power
++ * consumption are :-
++ *
++ * o Device is opened / closed.
++ * o Device I/O is about to begin or has just finished.
++ * o Device is idling in between work.
++ *
++ * This information is also exported via sysfs to userspace.
++ *
++ * DRMS will sum the total requested load on the regulator and change
++ * to the most efficient operating mode if platform constraints allow.
++ *
++ * Returns the new regulator mode or error.
++ */
++int regulator_set_optimum_mode(struct regulator *regulator, int uA_load)
++{
++ struct regulator_dev *rdev = regulator->rdev;
++ struct regulator *consumer;
++ int ret, output_uV, input_uV = 0, total_uA_load = 0;
++ unsigned int mode;
++
++ if (rdev->supply)
++ input_uV = regulator_get_voltage(rdev->supply);
++
++ mutex_lock(&rdev->mutex);
++
++ /*
++ * first check to see if we can set modes at all, otherwise just
++ * tell the consumer everything is OK.
++ */
++ regulator->uA_load = uA_load;
++ ret = regulator_check_drms(rdev);
++ if (ret < 0) {
++ ret = 0;
++ goto out;
++ }
++
++ if (!rdev->desc->ops->get_optimum_mode)
++ goto out;
++
++ /*
++ * we can actually do this so any errors are indicators of
++ * potential real failure.
++ */
++ ret = -EINVAL;
++
++ if (!rdev->desc->ops->set_mode)
++ goto out;
++
++ /* get output voltage */
++ output_uV = _regulator_get_voltage(rdev);
++ if (output_uV <= 0) {
++ rdev_err(rdev, "invalid output voltage found\n");
++ goto out;
++ }
++
++ /* No supply? Use constraint voltage */
++ if (input_uV <= 0)
++ input_uV = rdev->constraints->input_uV;
++ if (input_uV <= 0) {
++ rdev_err(rdev, "invalid input voltage found\n");
++ goto out;
++ }
++
++ /* calc total requested load for this regulator */
++ list_for_each_entry(consumer, &rdev->consumer_list, list)
++ total_uA_load += consumer->uA_load;
++
++ mode = rdev->desc->ops->get_optimum_mode(rdev,
++ input_uV, output_uV,
++ total_uA_load);
++ ret = regulator_mode_constrain(rdev, &mode);
++ if (ret < 0) {
++ rdev_err(rdev, "failed to get optimum mode @ %d uA %d -> %d uV\n",
++ total_uA_load, input_uV, output_uV);
++ goto out;
++ }
++
++ ret = rdev->desc->ops->set_mode(rdev, mode);
++ if (ret < 0) {
++ rdev_err(rdev, "failed to set optimum mode %x\n", mode);
++ goto out;
++ }
++ ret = mode;
++out:
++ mutex_unlock(&rdev->mutex);
++ return ret;
++}
++EXPORT_SYMBOL_GPL(regulator_set_optimum_mode);
++
++/**
++ * regulator_allow_bypass - allow the regulator to go into bypass mode
++ *
++ * @regulator: Regulator to configure
++ * @enable: enable or disable bypass mode
++ *
++ * Allow the regulator to go into bypass mode if all other consumers
++ * for the regulator also enable bypass mode and the machine
++ * constraints allow this. Bypass mode means that the regulator is
++ * simply passing the input directly to the output with no regulation.
++ */
++int regulator_allow_bypass(struct regulator *regulator, bool enable)
++{
++ struct regulator_dev *rdev = regulator->rdev;
++ int ret = 0;
++
++ if (!rdev->desc->ops->set_bypass)
++ return 0;
++
++ if (rdev->constraints &&
++ !(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_BYPASS))
++ return 0;
++
++ mutex_lock(&rdev->mutex);
++
++ if (enable && !regulator->bypass) {
++ rdev->bypass_count++;
++
++ if (rdev->bypass_count == rdev->open_count) {
++ ret = rdev->desc->ops->set_bypass(rdev, enable);
++ if (ret != 0)
++ rdev->bypass_count--;
++ }
++
++ } else if (!enable && regulator->bypass) {
++ rdev->bypass_count--;
++
++ if (rdev->bypass_count != rdev->open_count) {
++ ret = rdev->desc->ops->set_bypass(rdev, enable);
++ if (ret != 0)
++ rdev->bypass_count++;
++ }
++ }
++
++ if (ret == 0)
++ regulator->bypass = enable;
++
++ mutex_unlock(&rdev->mutex);
++
++ return ret;
++}
++EXPORT_SYMBOL_GPL(regulator_allow_bypass);
++
++/**
++ * regulator_register_notifier - register regulator event notifier
++ * @regulator: regulator source
++ * @nb: notifier block
++ *
++ * Register notifier block to receive regulator events.
++ */
++int regulator_register_notifier(struct regulator *regulator,
++ struct notifier_block *nb)
++{
++ return blocking_notifier_chain_register(&regulator->rdev->notifier,
++ nb);
++}
++EXPORT_SYMBOL_GPL(regulator_register_notifier);
++
++/**
++ * regulator_unregister_notifier - unregister regulator event notifier
++ * @regulator: regulator source
++ * @nb: notifier block
++ *
++ * Unregister regulator event notifier block.
++ */
++int regulator_unregister_notifier(struct regulator *regulator,
++ struct notifier_block *nb)
++{
++ return blocking_notifier_chain_unregister(&regulator->rdev->notifier,
++ nb);
++}
++EXPORT_SYMBOL_GPL(regulator_unregister_notifier);
++
++/* notify regulator consumers and downstream regulator consumers.
++ * Note mutex must be held by caller.
++ */
++static void _notifier_call_chain(struct regulator_dev *rdev,
++ unsigned long event, void *data)
++{
++ /* call rdev chain first */
++ blocking_notifier_call_chain(&rdev->notifier, event, data);
++}
++
++/**
++ * regulator_bulk_get - get multiple regulator consumers
++ *
++ * @dev: Device to supply
++ * @num_consumers: Number of consumers to register
++ * @consumers: Configuration of consumers; clients are stored here.
++ *
++ * @return 0 on success, an errno on failure.
++ *
++ * This helper function allows drivers to get several regulator
++ * consumers in one operation. If any of the regulators cannot be
++ * acquired then any regulators that were allocated will be freed
++ * before returning to the caller.
++ */
++int regulator_bulk_get(struct device *dev, int num_consumers,
++ struct regulator_bulk_data *consumers)
++{
++ int i;
++ int ret;
++
++ for (i = 0; i < num_consumers; i++)
++ consumers[i].consumer = NULL;
++
++ for (i = 0; i < num_consumers; i++) {
++ consumers[i].consumer = regulator_get(dev,
++ consumers[i].supply);
++ if (IS_ERR(consumers[i].consumer)) {
++ ret = PTR_ERR(consumers[i].consumer);
++ dev_err(dev, "Failed to get supply '%s': %d\n",
++ consumers[i].supply, ret);
++ consumers[i].consumer = NULL;
++ goto err;
++ }
++ }
++
++ return 0;
++
++err:
++ while (--i >= 0)
++ regulator_put(consumers[i].consumer);
++
++ return ret;
++}
++EXPORT_SYMBOL_GPL(regulator_bulk_get);
++
++static void regulator_bulk_enable_async(void *data, async_cookie_t cookie)
++{
++ struct regulator_bulk_data *bulk = data;
++
++ bulk->ret = regulator_enable(bulk->consumer);
++}
++
++/**
++ * regulator_bulk_enable - enable multiple regulator consumers
++ *
++ * @num_consumers: Number of consumers
++ * @consumers: Consumer data; clients are stored here.
++ * @return 0 on success, an errno on failure
++ *
++ * This convenience API allows consumers to enable multiple regulator
++ * clients in a single API call. If any consumers cannot be enabled
++ * then any others that were enabled will be disabled again prior to
++ * return.
++ */
++int regulator_bulk_enable(int num_consumers,
++ struct regulator_bulk_data *consumers)
++{
++ ASYNC_DOMAIN_EXCLUSIVE(async_domain);
++ int i;
++ int ret = 0;
++
++ for (i = 0; i < num_consumers; i++) {
++ if (consumers[i].consumer->always_on)
++ consumers[i].ret = 0;
++ else
++ async_schedule_domain(regulator_bulk_enable_async,
++ &consumers[i], &async_domain);
++ }
++
++ async_synchronize_full_domain(&async_domain);
++
++ /* If any consumer failed we need to unwind any that succeeded */
++ for (i = 0; i < num_consumers; i++) {
++ if (consumers[i].ret != 0) {
++ ret = consumers[i].ret;
++ goto err;
++ }
++ }
++
++ return 0;
++
++err:
++ for (i = 0; i < num_consumers; i++) {
++ if (consumers[i].ret < 0)
++ pr_err("Failed to enable %s: %d\n", consumers[i].supply,
++ consumers[i].ret);
++ else
++ regulator_disable(consumers[i].consumer);
++ }
++
++ return ret;
++}
++EXPORT_SYMBOL_GPL(regulator_bulk_enable);
++
++/**
++ * regulator_bulk_disable - disable multiple regulator consumers
++ *
++ * @num_consumers: Number of consumers
++ * @consumers: Consumer data; clients are stored here.
++ * @return 0 on success, an errno on failure
++ *
++ * This convenience API allows consumers to disable multiple regulator
++ * clients in a single API call. If any consumers cannot be disabled
++ * then any others that were disabled will be enabled again prior to
++ * return.
++ */
++int regulator_bulk_disable(int num_consumers,
++ struct regulator_bulk_data *consumers)
++{
++ int i;
++ int ret, r;
++
++ for (i = num_consumers - 1; i >= 0; --i) {
++ ret = regulator_disable(consumers[i].consumer);
++ if (ret != 0)
++ goto err;
++ }
++
++ return 0;
++
++err:
++ pr_err("Failed to disable %s: %d\n", consumers[i].supply, ret);
++ for (++i; i < num_consumers; ++i) {
++ r = regulator_enable(consumers[i].consumer);
++ if (r != 0)
++ pr_err("Failed to reename %s: %d\n",
++ consumers[i].supply, r);
++ }
++
++ return ret;
++}
++EXPORT_SYMBOL_GPL(regulator_bulk_disable);
++
++/**
++ * regulator_bulk_force_disable - force disable multiple regulator consumers
++ *
++ * @num_consumers: Number of consumers
++ * @consumers: Consumer data; clients are stored here.
++ * @return 0 on success, an errno on failure
++ *
++ * This convenience API allows consumers to forcibly disable multiple regulator
++ * clients in a single API call.
++ * NOTE: This should be used for situations when device damage will
++ * likely occur if the regulators are not disabled (e.g. over temp).
++ * Although regulator_force_disable function call for some consumers can
++ * return error numbers, the function is called for all consumers.
++ */
++int regulator_bulk_force_disable(int num_consumers,
++ struct regulator_bulk_data *consumers)
++{
++ int i;
++ int ret;
++
++ for (i = 0; i < num_consumers; i++)
++ consumers[i].ret =
++ regulator_force_disable(consumers[i].consumer);
++
++ for (i = 0; i < num_consumers; i++) {
++ if (consumers[i].ret != 0) {
++ ret = consumers[i].ret;
++ goto out;
++ }
++ }
++
++ return 0;
++out:
++ return ret;
++}
++EXPORT_SYMBOL_GPL(regulator_bulk_force_disable);
++
++/**
++ * regulator_bulk_free - free multiple regulator consumers
++ *
++ * @num_consumers: Number of consumers
++ * @consumers: Consumer data; clients are stored here.
++ *
++ * This convenience API allows consumers to free multiple regulator
++ * clients in a single API call.
++ */
++void regulator_bulk_free(int num_consumers,
++ struct regulator_bulk_data *consumers)
++{
++ int i;
++
++ for (i = 0; i < num_consumers; i++) {
++ regulator_put(consumers[i].consumer);
++ consumers[i].consumer = NULL;
++ }
++}
++EXPORT_SYMBOL_GPL(regulator_bulk_free);
++
++/**
++ * regulator_notifier_call_chain - call regulator event notifier
++ * @rdev: regulator source
++ * @event: notifier block
++ * @data: callback-specific data.
++ *
++ * Called by regulator drivers to notify clients a regulator event has
++ * occurred. We also notify regulator clients downstream.
++ * Note lock must be held by caller.
++ */
++int regulator_notifier_call_chain(struct regulator_dev *rdev,
++ unsigned long event, void *data)
++{
++ _notifier_call_chain(rdev, event, data);
++ return NOTIFY_DONE;
++
++}
++EXPORT_SYMBOL_GPL(regulator_notifier_call_chain);
++
++/**
++ * regulator_mode_to_status - convert a regulator mode into a status
++ *
++ * @mode: Mode to convert
++ *
++ * Convert a regulator mode into a status.
++ */
++int regulator_mode_to_status(unsigned int mode)
++{
++ switch (mode) {
++ case REGULATOR_MODE_FAST:
++ return REGULATOR_STATUS_FAST;
++ case REGULATOR_MODE_NORMAL:
++ return REGULATOR_STATUS_NORMAL;
++ case REGULATOR_MODE_IDLE:
++ return REGULATOR_STATUS_IDLE;
++ case REGULATOR_MODE_STANDBY:
++ return REGULATOR_STATUS_STANDBY;
++ default:
++ return REGULATOR_STATUS_UNDEFINED;
++ }
++}
++EXPORT_SYMBOL_GPL(regulator_mode_to_status);
++
++/*
++ * To avoid cluttering sysfs (and memory) with useless state, only
++ * create attributes that can be meaningfully displayed.
++ */
++static int add_regulator_attributes(struct regulator_dev *rdev)
++{
++ struct device *dev = &rdev->dev;
++ struct regulator_ops *ops = rdev->desc->ops;
++ int status = 0;
++
++ /* some attributes need specific methods to be displayed */
++ if ((ops->get_voltage && ops->get_voltage(rdev) >= 0) ||
++ (ops->get_voltage_sel && ops->get_voltage_sel(rdev) >= 0) ||
++ (ops->list_voltage && ops->list_voltage(rdev, 0) >= 0) ||
++ (rdev->desc->fixed_uV && (rdev->desc->n_voltages == 1))) {
++ status = device_create_file(dev, &dev_attr_microvolts);
++ if (status < 0)
++ return status;
++ }
++ if (ops->get_current_limit) {
++ status = device_create_file(dev, &dev_attr_microamps);
++ if (status < 0)
++ return status;
++ }
++ if (ops->get_mode) {
++ status = device_create_file(dev, &dev_attr_opmode);
++ if (status < 0)
++ return status;
++ }
++ if (rdev->ena_pin || ops->is_enabled) {
++ status = device_create_file(dev, &dev_attr_state);
++ if (status < 0)
++ return status;
++ }
++ if (ops->get_status) {
++ status = device_create_file(dev, &dev_attr_status);
++ if (status < 0)
++ return status;
++ }
++ if (ops->get_bypass) {
++ status = device_create_file(dev, &dev_attr_bypass);
++ if (status < 0)
++ return status;
++ }
++
++ /* some attributes are type-specific */
++ if (rdev->desc->type == REGULATOR_CURRENT) {
++ status = device_create_file(dev, &dev_attr_requested_microamps);
++ if (status < 0)
++ return status;
++ }
++
++ /* all the other attributes exist to support constraints;
++ * don't show them if there are no constraints, or if the
++ * relevant supporting methods are missing.
++ */
++ if (!rdev->constraints)
++ return status;
++
++ /* constraints need specific supporting methods */
++ if (ops->set_voltage || ops->set_voltage_sel) {
++ status = device_create_file(dev, &dev_attr_min_microvolts);
++ if (status < 0)
++ return status;
++ status = device_create_file(dev, &dev_attr_max_microvolts);
++ if (status < 0)
++ return status;
++ }
++ if (ops->set_current_limit) {
++ status = device_create_file(dev, &dev_attr_min_microamps);
++ if (status < 0)
++ return status;
++ status = device_create_file(dev, &dev_attr_max_microamps);
++ if (status < 0)
++ return status;
++ }
++
++ status = device_create_file(dev, &dev_attr_suspend_standby_state);
++ if (status < 0)
++ return status;
++ status = device_create_file(dev, &dev_attr_suspend_mem_state);
++ if (status < 0)
++ return status;
++ status = device_create_file(dev, &dev_attr_suspend_disk_state);
++ if (status < 0)
++ return status;
++
++ if (ops->set_suspend_voltage) {
++ status = device_create_file(dev,
++ &dev_attr_suspend_standby_microvolts);
++ if (status < 0)
++ return status;
++ status = device_create_file(dev,
++ &dev_attr_suspend_mem_microvolts);
++ if (status < 0)
++ return status;
++ status = device_create_file(dev,
++ &dev_attr_suspend_disk_microvolts);
++ if (status < 0)
++ return status;
++ }
++
++ if (ops->set_suspend_mode) {
++ status = device_create_file(dev,
++ &dev_attr_suspend_standby_mode);
++ if (status < 0)
++ return status;
++ status = device_create_file(dev,
++ &dev_attr_suspend_mem_mode);
++ if (status < 0)
++ return status;
++ status = device_create_file(dev,
++ &dev_attr_suspend_disk_mode);
++ if (status < 0)
++ return status;
++ }
++
++ return status;
++}
++
++static void rdev_init_debugfs(struct regulator_dev *rdev)
++{
++ rdev->debugfs = debugfs_create_dir(rdev_get_name(rdev), debugfs_root);
++ if (!rdev->debugfs) {
++ rdev_warn(rdev, "Failed to create debugfs directory\n");
++ return;
++ }
++
++ debugfs_create_u32("use_count", 0444, rdev->debugfs,
++ &rdev->use_count);
++ debugfs_create_u32("open_count", 0444, rdev->debugfs,
++ &rdev->open_count);
++ debugfs_create_u32("bypass_count", 0444, rdev->debugfs,
++ &rdev->bypass_count);
++}
++
++/**
++ * regulator_register - register regulator
++ * @regulator_desc: regulator to register
++ * @config: runtime configuration for regulator
++ *
++ * Called by regulator drivers to register a regulator.
++ * Returns a valid pointer to struct regulator_dev on success
++ * or an ERR_PTR() on error.
++ */
++struct regulator_dev *
++regulator_register(const struct regulator_desc *regulator_desc,
++ const struct regulator_config *config)
++{
++ const struct regulation_constraints *constraints = NULL;
++ const struct regulator_init_data *init_data;
++ static atomic_t regulator_no = ATOMIC_INIT(0);
++ struct regulator_dev *rdev;
++ struct device *dev;
++ int ret, i;
++ const char *supply = NULL;
++
++ if (regulator_desc == NULL || config == NULL)
++ return ERR_PTR(-EINVAL);
++
++ dev = config->dev;
++ WARN_ON(!dev);
++
++ if (regulator_desc->name == NULL || regulator_desc->ops == NULL)
++ return ERR_PTR(-EINVAL);
++
++ if (regulator_desc->type != REGULATOR_VOLTAGE &&
++ regulator_desc->type != REGULATOR_CURRENT)
++ return ERR_PTR(-EINVAL);
++
++ /* Only one of each should be implemented */
++ WARN_ON(regulator_desc->ops->get_voltage &&
++ regulator_desc->ops->get_voltage_sel);
++ WARN_ON(regulator_desc->ops->set_voltage &&
++ regulator_desc->ops->set_voltage_sel);
++
++ /* If we're using selectors we must implement list_voltage. */
++ if (regulator_desc->ops->get_voltage_sel &&
++ !regulator_desc->ops->list_voltage) {
++ return ERR_PTR(-EINVAL);
++ }
++ if (regulator_desc->ops->set_voltage_sel &&
++ !regulator_desc->ops->list_voltage) {
++ return ERR_PTR(-EINVAL);
++ }
++
++ init_data = config->init_data;
++
++ rdev = kzalloc(sizeof(struct regulator_dev), GFP_KERNEL);
++ if (rdev == NULL)
++ return ERR_PTR(-ENOMEM);
++
++ mutex_lock(&regulator_list_mutex);
++
++ mutex_init(&rdev->mutex);
++ rdev->reg_data = config->driver_data;
++ rdev->owner = regulator_desc->owner;
++ rdev->desc = regulator_desc;
++ if (config->regmap)
++ rdev->regmap = config->regmap;
++ else if (dev_get_regmap(dev, NULL))
++ rdev->regmap = dev_get_regmap(dev, NULL);
++ else if (dev->parent)
++ rdev->regmap = dev_get_regmap(dev->parent, NULL);
++ INIT_LIST_HEAD(&rdev->consumer_list);
++ INIT_LIST_HEAD(&rdev->list);
++ BLOCKING_INIT_NOTIFIER_HEAD(&rdev->notifier);
++ INIT_DELAYED_WORK(&rdev->disable_work, regulator_disable_work);
++
++ /* preform any regulator specific init */
++ if (init_data && init_data->regulator_init) {
++ ret = init_data->regulator_init(rdev->reg_data);
++ if (ret < 0)
++ goto clean;
++ }
++
++ /* register with sysfs */
++ rdev->dev.class = &regulator_class;
++ rdev->dev.of_node = config->of_node;
++ rdev->dev.parent = dev;
++ dev_set_name(&rdev->dev, "regulator.%d",
++ atomic_inc_return(&regulator_no) - 1);
++ ret = device_register(&rdev->dev);
++ if (ret != 0) {
++ put_device(&rdev->dev);
++ goto clean;
++ }
++
++ dev_set_drvdata(&rdev->dev, rdev);
++
++ if (gpio_is_valid(config->ena_gpio)) {
++ ret = regulator_ena_gpio_request(rdev, config);
++ if (ret != 0) {
++ rdev_err(rdev, "Failed to request enable GPIO%d: %d\n",
++ config->ena_gpio, ret);
++ goto wash;
++ }
++
++ if (config->ena_gpio_flags & GPIOF_OUT_INIT_HIGH)
++ rdev->ena_gpio_state = 1;
++
++ if (config->ena_gpio_invert)
++ rdev->ena_gpio_state = !rdev->ena_gpio_state;
++ }
++
++ /* set regulator constraints */
++ if (init_data)
++ constraints = &init_data->constraints;
++
++ ret = set_machine_constraints(rdev, constraints);
++ if (ret < 0)
++ goto scrub;
++
++ /* add attributes supported by this regulator */
++ ret = add_regulator_attributes(rdev);
++ if (ret < 0)
++ goto scrub;
++
++ if (init_data && init_data->supply_regulator)
++ supply = init_data->supply_regulator;
++ else if (regulator_desc->supply_name)
++ supply = regulator_desc->supply_name;
++
++ if (supply) {
++ struct regulator_dev *r;
++
++ r = regulator_dev_lookup(dev, supply, &ret);
++
++ if (ret == -ENODEV) {
++ /*
++ * No supply was specified for this regulator and
++ * there will never be one.
++ */
++ ret = 0;
++ goto add_dev;
++ } else if (!r) {
++ dev_err(dev, "Failed to find supply %s\n", supply);
++ ret = -EPROBE_DEFER;
++ goto scrub;
++ }
++
++ ret = set_supply(rdev, r);
++ if (ret < 0)
++ goto scrub;
++
++ /* Enable supply if rail is enabled */
++ if (_regulator_is_enabled(rdev)) {
++ ret = regulator_enable(rdev->supply);
++ if (ret < 0)
++ goto scrub;
++ }
++ }
++
++add_dev:
++ /* add consumers devices */
++ if (init_data) {
++ for (i = 0; i < init_data->num_consumer_supplies; i++) {
++ ret = set_consumer_device_supply(rdev,
++ init_data->consumer_supplies[i].dev_name,
++ init_data->consumer_supplies[i].supply);
++ if (ret < 0) {
++ dev_err(dev, "Failed to set supply %s\n",
++ init_data->consumer_supplies[i].supply);
++ goto unset_supplies;
++ }
++ }
++ }
++
++ list_add(&rdev->list, &regulator_list);
++
++ rdev_init_debugfs(rdev);
++out:
++ mutex_unlock(&regulator_list_mutex);
++ return rdev;
++
++unset_supplies:
++ unset_regulator_supplies(rdev);
++
++scrub:
++ if (rdev->supply)
++ _regulator_put(rdev->supply);
++ regulator_ena_gpio_free(rdev);
++ kfree(rdev->constraints);
++wash:
++ device_unregister(&rdev->dev);
++ /* device core frees rdev */
++ rdev = ERR_PTR(ret);
++ goto out;
++
++clean:
++ kfree(rdev);
++ rdev = ERR_PTR(ret);
++ goto out;
++}
++EXPORT_SYMBOL_GPL(regulator_register);
++
++/**
++ * regulator_unregister - unregister regulator
++ * @rdev: regulator to unregister
++ *
++ * Called by regulator drivers to unregister a regulator.
++ */
++void regulator_unregister(struct regulator_dev *rdev)
++{
++ if (rdev == NULL)
++ return;
++
++ if (rdev->supply) {
++ while (rdev->use_count--)
++ regulator_disable(rdev->supply);
++ regulator_put(rdev->supply);
++ }
++ mutex_lock(&regulator_list_mutex);
++ debugfs_remove_recursive(rdev->debugfs);
++ flush_work(&rdev->disable_work.work);
++ WARN_ON(rdev->open_count);
++ unset_regulator_supplies(rdev);
++ list_del(&rdev->list);
++ kfree(rdev->constraints);
++ regulator_ena_gpio_free(rdev);
++ device_unregister(&rdev->dev);
++ mutex_unlock(&regulator_list_mutex);
++}
++EXPORT_SYMBOL_GPL(regulator_unregister);
++
++/**
++ * regulator_suspend_prepare - prepare regulators for system wide suspend
++ * @state: system suspend state
++ *
++ * Configure each regulator with it's suspend operating parameters for state.
++ * This will usually be called by machine suspend code prior to supending.
++ */
++int regulator_suspend_prepare(suspend_state_t state)
++{
++ struct regulator_dev *rdev;
++ int ret = 0;
++
++ /* ON is handled by regulator active state */
++ if (state == PM_SUSPEND_ON)
++ return -EINVAL;
++
++ mutex_lock(&regulator_list_mutex);
++ list_for_each_entry(rdev, &regulator_list, list) {
++
++ mutex_lock(&rdev->mutex);
++ ret = suspend_prepare(rdev, state);
++ mutex_unlock(&rdev->mutex);
++
++ if (ret < 0) {
++ rdev_err(rdev, "failed to prepare\n");
++ goto out;
++ }
++ }
++out:
++ mutex_unlock(&regulator_list_mutex);
++ return ret;
++}
++EXPORT_SYMBOL_GPL(regulator_suspend_prepare);
++
++/**
++ * regulator_suspend_finish - resume regulators from system wide suspend
++ *
++ * Turn on regulators that might be turned off by regulator_suspend_prepare
++ * and that should be turned on according to the regulators properties.
++ */
++int regulator_suspend_finish(void)
++{
++ struct regulator_dev *rdev;
++ int ret = 0, error;
++
++ mutex_lock(&regulator_list_mutex);
++ list_for_each_entry(rdev, &regulator_list, list) {
++ mutex_lock(&rdev->mutex);
++ if (rdev->use_count > 0 || rdev->constraints->always_on) {
++ error = _regulator_do_enable(rdev);
++ if (error)
++ ret = error;
++ } else {
++ if (!have_full_constraints())
++ goto unlock;
++ if (!_regulator_is_enabled(rdev))
++ goto unlock;
++
++ error = _regulator_do_disable(rdev);
++ if (error)
++ ret = error;
++ }
++unlock:
++ mutex_unlock(&rdev->mutex);
++ }
++ mutex_unlock(&regulator_list_mutex);
++ return ret;
++}
++EXPORT_SYMBOL_GPL(regulator_suspend_finish);
++
++/**
++ * regulator_has_full_constraints - the system has fully specified constraints
++ *
++ * Calling this function will cause the regulator API to disable all
++ * regulators which have a zero use count and don't have an always_on
++ * constraint in a late_initcall.
++ *
++ * The intention is that this will become the default behaviour in a
++ * future kernel release so users are encouraged to use this facility
++ * now.
++ */
++void regulator_has_full_constraints(void)
++{
++ has_full_constraints = 1;
++}
++EXPORT_SYMBOL_GPL(regulator_has_full_constraints);
++
++/**
++ * rdev_get_drvdata - get rdev regulator driver data
++ * @rdev: regulator
++ *
++ * Get rdev regulator driver private data. This call can be used in the
++ * regulator driver context.
++ */
++void *rdev_get_drvdata(struct regulator_dev *rdev)
++{
++ return rdev->reg_data;
++}
++EXPORT_SYMBOL_GPL(rdev_get_drvdata);
++
++/**
++ * regulator_get_drvdata - get regulator driver data
++ * @regulator: regulator
++ *
++ * Get regulator driver private data. This call can be used in the consumer
++ * driver context when non API regulator specific functions need to be called.
++ */
++void *regulator_get_drvdata(struct regulator *regulator)
++{
++ return regulator->rdev->reg_data;
++}
++EXPORT_SYMBOL_GPL(regulator_get_drvdata);
++
++/**
++ * regulator_set_drvdata - set regulator driver data
++ * @regulator: regulator
++ * @data: data
++ */
++void regulator_set_drvdata(struct regulator *regulator, void *data)
++{
++ regulator->rdev->reg_data = data;
++}
++EXPORT_SYMBOL_GPL(regulator_set_drvdata);
++
++/**
++ * regulator_get_id - get regulator ID
++ * @rdev: regulator
++ */
++int rdev_get_id(struct regulator_dev *rdev)
++{
++ return rdev->desc->id;
++}
++EXPORT_SYMBOL_GPL(rdev_get_id);
++
++struct device *rdev_get_dev(struct regulator_dev *rdev)
++{
++ return &rdev->dev;
++}
++EXPORT_SYMBOL_GPL(rdev_get_dev);
++
++void *regulator_get_init_drvdata(struct regulator_init_data *reg_init_data)
++{
++ return reg_init_data->driver_data;
++}
++EXPORT_SYMBOL_GPL(regulator_get_init_drvdata);
++
++#ifdef CONFIG_DEBUG_FS
++static ssize_t supply_map_read_file(struct file *file, char __user *user_buf,
++ size_t count, loff_t *ppos)
++{
++ char *buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
++ ssize_t len, ret = 0;
++ struct regulator_map *map;
++
++ if (!buf)
++ return -ENOMEM;
++
++ list_for_each_entry(map, &regulator_map_list, list) {
++ len = snprintf(buf + ret, PAGE_SIZE - ret,
++ "%s -> %s.%s\n",
++ rdev_get_name(map->regulator), map->dev_name,
++ map->supply);
++ if (len >= 0)
++ ret += len;
++ if (ret > PAGE_SIZE) {
++ ret = PAGE_SIZE;
++ break;
++ }
++ }
++
++ ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
++
++ kfree(buf);
++
++ return ret;
++}
++#endif
++
++static const struct file_operations supply_map_fops = {
++#ifdef CONFIG_DEBUG_FS
++ .read = supply_map_read_file,
++ .llseek = default_llseek,
++#endif
++};
++
++static int __init regulator_init(void)
++{
++ int ret;
++
++ ret = class_register(&regulator_class);
++
++ debugfs_root = debugfs_create_dir("regulator", NULL);
++ if (!debugfs_root)
++ pr_warn("regulator: Failed to create debugfs directory\n");
++
++ debugfs_create_file("supply_map", 0444, debugfs_root, NULL,
++ &supply_map_fops);
++
++ regulator_dummy_init();
++
++ return ret;
++}
++
++/* init early to allow our consumers to complete system booting */
++core_initcall(regulator_init);
++
++static int __init regulator_init_complete(void)
++{
++ struct regulator_dev *rdev;
++ struct regulator_ops *ops;
++ struct regulation_constraints *c;
++ int enabled, ret;
++
++ /*
++ * Since DT doesn't provide an idiomatic mechanism for
++ * enabling full constraints and since it's much more natural
++ * with DT to provide them just assume that a DT enabled
++ * system has full constraints.
++ */
++ if (of_have_populated_dt())
++ has_full_constraints = true;
++
++ mutex_lock(&regulator_list_mutex);
++
++ /* If we have a full configuration then disable any regulators
++ * which are not in use or always_on. This will become the
++ * default behaviour in the future.
++ */
++ list_for_each_entry(rdev, &regulator_list, list) {
++ ops = rdev->desc->ops;
++ c = rdev->constraints;
++
++ if (c && c->always_on)
++ continue;
++
++ mutex_lock(&rdev->mutex);
++
++ if (rdev->use_count)
++ goto unlock;
++
++ /* If we can't read the status assume it's on. */
++ if (ops->is_enabled)
++ enabled = ops->is_enabled(rdev);
++ else
++ enabled = 1;
++
++ if (!enabled)
++ goto unlock;
++
++ if (have_full_constraints()) {
++ /* We log since this may kill the system if it
++ * goes wrong. */
++ rdev_info(rdev, "disabling\n");
++ ret = _regulator_do_disable(rdev);
++ if (ret != 0)
++ rdev_err(rdev, "couldn't disable: %d\n", ret);
++ } else {
++ /* The intention is that in future we will
++ * assume that full constraints are provided
++ * so warn even if we aren't going to do
++ * anything here.
++ */
++ rdev_warn(rdev, "incomplete constraints, leaving on\n");
++ }
++
++unlock:
++ mutex_unlock(&rdev->mutex);
++ }
++
++ mutex_unlock(&regulator_list_mutex);
++
++ return 0;
++}
++late_initcall(regulator_init_complete);
+diff -Nur linux-3.14.36/drivers/regulator/dummy.c linux-openelec/drivers/regulator/dummy.c
+--- linux-3.14.36/drivers/regulator/dummy.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/regulator/dummy.c 2015-05-06 12:05:42.000000000 -0500
+@@ -44,6 +44,7 @@
+
+ config.dev = &pdev->dev;
+ config.init_data = &dummy_initdata;
++ config.ena_gpio = -EINVAL;
+
+ dummy_regulator_rdev = regulator_register(&dummy_desc, &config);
+ if (IS_ERR(dummy_regulator_rdev)) {
+diff -Nur linux-3.14.36/drivers/regulator/fixed.c linux-openelec/drivers/regulator/fixed.c
+--- linux-3.14.36/drivers/regulator/fixed.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/regulator/fixed.c 2015-05-06 12:05:42.000000000 -0500
+@@ -163,9 +163,7 @@
+ drvdata->desc.n_voltages = 1;
+
+ drvdata->desc.fixed_uV = config->microvolts;
+-
+- if (config->gpio >= 0)
+- cfg.ena_gpio = config->gpio;
++ cfg.ena_gpio = config->gpio;
+ cfg.ena_gpio_invert = !config->enable_high;
+ if (config->enabled_at_boot) {
+ if (config->enable_high)
+diff -Nur linux-3.14.36/drivers/reset/gpio-reset.c linux-openelec/drivers/reset/gpio-reset.c
+--- linux-3.14.36/drivers/reset/gpio-reset.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/reset/gpio-reset.c 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,187 @@
++/*
++ * GPIO Reset Controller driver
++ *
++ * Copyright 2013 Philipp Zabel, Pengutronix
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ */
++#include <linux/delay.h>
++#include <linux/err.h>
++#include <linux/gpio.h>
++#include <linux/module.h>
++#include <linux/of_gpio.h>
++#include <linux/platform_device.h>
++#include <linux/reset-controller.h>
++
++struct gpio_reset_data {
++ struct reset_controller_dev rcdev;
++ unsigned int gpio;
++ bool active_low;
++ s32 delay_us;
++};
++
++static void gpio_reset_set(struct reset_controller_dev *rcdev, int asserted)
++{
++ struct gpio_reset_data *drvdata = container_of(rcdev,
++ struct gpio_reset_data, rcdev);
++ int value = asserted;
++
++ if (drvdata->active_low)
++ value = !value;
++
++ if (gpio_cansleep(drvdata->gpio))
++ gpio_set_value_cansleep(drvdata->gpio, value);
++ else
++ gpio_set_value(drvdata->gpio, value);
++}
++
++static int gpio_reset(struct reset_controller_dev *rcdev, unsigned long id)
++{
++ struct gpio_reset_data *drvdata = container_of(rcdev,
++ struct gpio_reset_data, rcdev);
++
++ if (drvdata->delay_us < 0)
++ return -ENOSYS;
++
++ gpio_reset_set(rcdev, 1);
++ udelay(drvdata->delay_us);
++ gpio_reset_set(rcdev, 0);
++
++ return 0;
++}
++
++static int gpio_reset_assert(struct reset_controller_dev *rcdev,
++ unsigned long id)
++{
++ gpio_reset_set(rcdev, 1);
++
++ return 0;
++}
++
++static int gpio_reset_deassert(struct reset_controller_dev *rcdev,
++ unsigned long id)
++{
++ gpio_reset_set(rcdev, 0);
++
++ return 0;
++}
++
++static struct reset_control_ops gpio_reset_ops = {
++ .reset = gpio_reset,
++ .assert = gpio_reset_assert,
++ .deassert = gpio_reset_deassert,
++};
++
++static int of_gpio_reset_xlate(struct reset_controller_dev *rcdev,
++ const struct of_phandle_args *reset_spec)
++{
++ if (WARN_ON(reset_spec->args_count != 0))
++ return -EINVAL;
++
++ return 0;
++}
++
++static int gpio_reset_probe(struct platform_device *pdev)
++{
++ struct device_node *np = pdev->dev.of_node;
++ struct gpio_reset_data *drvdata;
++ enum of_gpio_flags flags;
++ unsigned long gpio_flags;
++ bool initially_in_reset;
++ int ret;
++
++ drvdata = devm_kzalloc(&pdev->dev, sizeof(*drvdata), GFP_KERNEL);
++ if (drvdata == NULL)
++ return -ENOMEM;
++
++ if (of_gpio_named_count(np, "reset-gpios") != 1) {
++ dev_err(&pdev->dev,
++ "reset-gpios property missing, or not a single gpio\n");
++ return -EINVAL;
++ }
++
++ drvdata->gpio = of_get_named_gpio_flags(np, "reset-gpios", 0, &flags);
++ if (drvdata->gpio == -EPROBE_DEFER) {
++ return drvdata->gpio;
++ } else if (!gpio_is_valid(drvdata->gpio)) {
++ dev_err(&pdev->dev, "invalid reset gpio: %d\n", drvdata->gpio);
++ return drvdata->gpio;
++ }
++
++ drvdata->active_low = flags & OF_GPIO_ACTIVE_LOW;
++
++ ret = of_property_read_u32(np, "reset-delay-us", &drvdata->delay_us);
++ if (ret < 0)
++ drvdata->delay_us = -1;
++ else if (drvdata->delay_us < 0)
++ dev_warn(&pdev->dev, "reset delay too high\n");
++
++ initially_in_reset = of_property_read_bool(np, "initially-in-reset");
++ if (drvdata->active_low ^ initially_in_reset)
++ gpio_flags = GPIOF_OUT_INIT_HIGH;
++ else
++ gpio_flags = GPIOF_OUT_INIT_LOW;
++
++ ret = devm_gpio_request_one(&pdev->dev, drvdata->gpio, gpio_flags, NULL);
++ if (ret < 0) {
++ dev_err(&pdev->dev, "failed to request gpio %d: %d\n",
++ drvdata->gpio, ret);
++ return ret;
++ }
++
++ platform_set_drvdata(pdev, drvdata);
++
++ drvdata->rcdev.of_node = np;
++ drvdata->rcdev.owner = THIS_MODULE;
++ drvdata->rcdev.nr_resets = 1;
++ drvdata->rcdev.ops = &gpio_reset_ops;
++ drvdata->rcdev.of_xlate = of_gpio_reset_xlate;
++ reset_controller_register(&drvdata->rcdev);
++
++ return 0;
++}
++
++static int gpio_reset_remove(struct platform_device *pdev)
++{
++ struct gpio_reset_data *drvdata = platform_get_drvdata(pdev);
++
++ reset_controller_unregister(&drvdata->rcdev);
++
++ return 0;
++}
++
++static struct of_device_id gpio_reset_dt_ids[] = {
++ { .compatible = "gpio-reset" },
++ { }
++};
++
++static struct platform_driver gpio_reset_driver = {
++ .probe = gpio_reset_probe,
++ .remove = gpio_reset_remove,
++ .driver = {
++ .name = "gpio-reset",
++ .owner = THIS_MODULE,
++ .of_match_table = of_match_ptr(gpio_reset_dt_ids),
++ },
++};
++
++static int __init gpio_reset_init(void)
++{
++ return platform_driver_register(&gpio_reset_driver);
++}
++arch_initcall(gpio_reset_init);
++
++static void __exit gpio_reset_exit(void)
++{
++ platform_driver_unregister(&gpio_reset_driver);
++}
++module_exit(gpio_reset_exit);
++
++MODULE_AUTHOR("Philipp Zabel <p.zabel@pengutronix.de>");
++MODULE_DESCRIPTION("gpio reset controller");
++MODULE_LICENSE("GPL");
++MODULE_ALIAS("platform:gpio-reset");
++MODULE_DEVICE_TABLE(of, gpio_reset_dt_ids);
+diff -Nur linux-3.14.36/drivers/reset/Kconfig linux-openelec/drivers/reset/Kconfig
+--- linux-3.14.36/drivers/reset/Kconfig 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/reset/Kconfig 2015-05-06 12:05:42.000000000 -0500
+@@ -11,3 +11,15 @@
+ via GPIOs or SoC-internal reset controller modules.
+
+ If unsure, say no.
++
++if RESET_CONTROLLER
++
++config RESET_GPIO
++ tristate "GPIO reset controller support"
++ default y
++ depends on GPIOLIB && OF
++ help
++ This driver provides support for reset lines that are controlled
++ directly by GPIOs.
++
++endif
+diff -Nur linux-3.14.36/drivers/reset/Makefile linux-openelec/drivers/reset/Makefile
+--- linux-3.14.36/drivers/reset/Makefile 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/reset/Makefile 2015-05-06 12:05:42.000000000 -0500
+@@ -1,2 +1,3 @@
+ obj-$(CONFIG_RESET_CONTROLLER) += core.o
++obj-$(CONFIG_RESET_GPIO) += gpio-reset.o
+ obj-$(CONFIG_ARCH_SUNXI) += reset-sunxi.o
+diff -Nur linux-3.14.36/drivers/rtc/rtc-pcf8523.c linux-openelec/drivers/rtc/rtc-pcf8523.c
+--- linux-3.14.36/drivers/rtc/rtc-pcf8523.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/rtc/rtc-pcf8523.c 2015-05-06 12:05:42.000000000 -0500
+@@ -7,6 +7,7 @@
+ */
+
+ #include <linux/bcd.h>
++#include <linux/delay.h>
+ #include <linux/i2c.h>
+ #include <linux/module.h>
+ #include <linux/rtc.h>
+@@ -82,24 +83,85 @@
+ return 0;
+ }
+
+-static int pcf8523_select_capacitance(struct i2c_client *client, bool high)
++static int pcf8523_rtc_check_oscillator(struct i2c_client *client)
+ {
+ u8 value;
+ int err;
+
+- err = pcf8523_read(client, REG_CONTROL1, &value);
++ err = pcf8523_read(client, REG_SECONDS, &value);
+ if (err < 0)
+ return err;
+
+- if (!high)
+- value &= ~REG_CONTROL1_CAP_SEL;
+- else
+- value |= REG_CONTROL1_CAP_SEL;
++ if (value & REG_SECONDS_OS) {
++ /*
++ * If the oscillator was stopped, try to clear the flag. Upon
++ * power-up the flag is always set, but if we cannot clear it
++ * the oscillator isn't running properly for some reason. The
++ * sensible thing therefore is to return an error, signalling
++ * that the clock cannot be assumed to be correct.
++ */
++
++ value &= ~REG_SECONDS_OS;
++
++ err = pcf8523_write(client, REG_SECONDS, value);
++ if (err < 0)
++ return err;
++
++ err = pcf8523_read(client, REG_SECONDS, &value);
++ if (err < 0)
++ return err;
++
++ if (value & REG_SECONDS_OS)
++ return -EAGAIN;
++ }
++
++ return 0;
++}
++
++static int pcf8523_switch_capacitance(struct i2c_client *client)
++{
++ u8 value;
++ int err;
++
++ err = pcf8523_read(client, REG_CONTROL1, &value);
++ if (err < 0)
++ goto out;
++
++ value ^= REG_CONTROL1_CAP_SEL;
+
+ err = pcf8523_write(client, REG_CONTROL1, value);
++
++out:
++ return err;
++}
++
++static int pcf8523_enable_oscillator(struct i2c_client *client)
++{
++ int err, loop;
++
++ loop = 0;
++ while (loop < 200) {
++ err = pcf8523_rtc_check_oscillator(client);
++ if (!err)
++ return 0;
++ loop++;
++ msleep(10);
++ }
++
++ err = pcf8523_switch_capacitance(client);
+ if (err < 0)
+- return err;
++ goto out;
++
++ loop = 0;
++ while (loop < 200) {
++ err = pcf8523_rtc_check_oscillator(client);
++ if (!err)
++ return 0;
++ loop++;
++ msleep(10);
++ }
+
++out:
+ return err;
+ }
+
+@@ -290,6 +352,7 @@
+ const struct i2c_device_id *id)
+ {
+ struct pcf8523 *pcf;
++ u8 value;
+ int err;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
+@@ -299,10 +362,20 @@
+ if (!pcf)
+ return -ENOMEM;
+
+- err = pcf8523_select_capacitance(client, true);
++ /* Check whether the RTC reports battery low */
++ err = pcf8523_read(client, REG_CONTROL3, &value);
+ if (err < 0)
+ return err;
+
++ if (value & REG_CONTROL3_BLF)
++ dev_warn(&client->dev, "RTC reports battery is low\n");
++
++ err = pcf8523_enable_oscillator(client);
++ if (err < 0) {
++ dev_warn(&client->dev, "RTC reports oscillator is not running\n");
++ return err;
++ }
++
+ err = pcf8523_set_pm(client, 0);
+ if (err < 0)
+ return err;
+diff -Nur linux-3.14.36/drivers/rtc/rtc-snvs.c linux-openelec/drivers/rtc/rtc-snvs.c
+--- linux-3.14.36/drivers/rtc/rtc-snvs.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/rtc/rtc-snvs.c 2015-05-06 12:05:42.000000000 -0500
+@@ -1,5 +1,5 @@
+ /*
+- * Copyright (C) 2011-2012 Freescale Semiconductor, Inc.
++ * Copyright (C) 2011-2013 Freescale Semiconductor, Inc.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+@@ -41,6 +41,8 @@
+ spinlock_t lock;
+ };
+
++static void __iomem *snvs_base;
++
+ static u32 rtc_read_lp_counter(void __iomem *ioaddr)
+ {
+ u64 read1, read2;
+@@ -241,6 +243,15 @@
+ return events ? IRQ_HANDLED : IRQ_NONE;
+ }
+
++static void snvs_poweroff(void)
++{
++ u32 value;
++
++ value = readl(snvs_base + SNVS_LPCR);
++ /* set TOP and DP_EN bit */
++ writel(value | 0x60, snvs_base + SNVS_LPCR);
++}
++
+ static int snvs_rtc_probe(struct platform_device *pdev)
+ {
+ struct snvs_rtc_data *data;
+@@ -270,13 +281,15 @@
+ /* Clear interrupt status */
+ writel(0xffffffff, data->ioaddr + SNVS_LPSR);
+
++ snvs_base = data->ioaddr;
+ /* Enable RTC */
+ snvs_rtc_enable(data, true);
+
+ device_init_wakeup(&pdev->dev, true);
+
+ ret = devm_request_irq(&pdev->dev, data->irq, snvs_rtc_irq_handler,
+- IRQF_SHARED, "rtc alarm", &pdev->dev);
++ IRQF_SHARED | IRQF_NO_SUSPEND,
++ "rtc alarm", &pdev->dev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to request irq %d: %d\n",
+ data->irq, ret);
+@@ -290,6 +303,12 @@
+ dev_err(&pdev->dev, "failed to register rtc: %d\n", ret);
+ return ret;
+ }
++ /*
++ * if no specific power off function in board file, power off system by
++ * SNVS
++ */
++ if (!pm_power_off)
++ pm_power_off = snvs_poweroff;
+
+ return 0;
+ }
+diff -Nur linux-3.14.36/drivers/scsi/scsi_transport_iscsi.c linux-openelec/drivers/scsi/scsi_transport_iscsi.c
+--- linux-3.14.36/drivers/scsi/scsi_transport_iscsi.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/scsi/scsi_transport_iscsi.c 2015-05-06 12:05:42.000000000 -0500
+@@ -1225,7 +1225,7 @@
+ * Adds a sysfs entry for the flashnode session attributes
+ *
+ * Returns:
+- * pointer to allocated flashnode sess on sucess
++ * pointer to allocated flashnode sess on success
+ * %NULL on failure
+ */
+ struct iscsi_bus_flash_session *
+@@ -1423,7 +1423,7 @@
+ }
+
+ /**
+- * iscsi_destroy_flashnode_sess - destory flashnode session entry
++ * iscsi_destroy_flashnode_sess - destroy flashnode session entry
+ * @fnode_sess: pointer to flashnode session entry to be destroyed
+ *
+ * Deletes the flashnode session entry and all children flashnode connection
+@@ -1453,7 +1453,7 @@
+ }
+
+ /**
+- * iscsi_destroy_all_flashnode - destory all flashnode session entries
++ * iscsi_destroy_all_flashnode - destroy all flashnode session entries
+ * @shost: pointer to host data
+ *
+ * Destroys all the flashnode session entries and all corresponding children
+diff -Nur linux-3.14.36/drivers/staging/bcm/Typedefs.h linux-openelec/drivers/staging/bcm/Typedefs.h
+--- linux-3.14.36/drivers/staging/bcm/Typedefs.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/staging/bcm/Typedefs.h 2015-05-06 12:05:42.000000000 -0500
+@@ -25,16 +25,16 @@
+ typedef unsigned long ULONG;
+ typedef unsigned long DWORD;
+
+-typedef char* PCHAR;
+-typedef short* PSHORT;
+-typedef int* PINT;
+-typedef long* PLONG;
+-typedef void* PVOID;
++typedef char *PCHAR;
++typedef short *PSHORT;
++typedef int *PINT;
++typedef long *PLONG;
++typedef void *PVOID;
+
+-typedef unsigned char* PUCHAR;
+-typedef unsigned short* PUSHORT;
+-typedef unsigned int* PUINT;
+-typedef unsigned long* PULONG;
++typedef unsigned char *PUCHAR;
++typedef unsigned short *PUSHORT;
++typedef unsigned int *PUINT;
++typedef unsigned long *PULONG;
+ typedef unsigned long long ULONG64;
+ typedef unsigned long long LARGE_INTEGER;
+ typedef unsigned int UINT32;
+diff -Nur linux-3.14.36/drivers/staging/media/lirc/Kconfig linux-openelec/drivers/staging/media/lirc/Kconfig
+--- linux-3.14.36/drivers/staging/media/lirc/Kconfig 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/staging/media/lirc/Kconfig 2015-07-24 18:03:29.968842002 -0500
+@@ -38,6 +38,12 @@
+ help
+ Driver for Homebrew Parallel Port Receivers
+
++config LIRC_GPIO
++ tristate "Homebrew GPIO Port Receiver/Transmitter"
++ depends on LIRC
++ help
++ Driver for Homebrew GPIO Port Receiver/Transmitter
++
+ config LIRC_SASEM
+ tristate "Sasem USB IR Remote"
+ depends on LIRC && USB
+@@ -63,10 +69,17 @@
+ help
+ Driver for the SIR IrDA port
+
++config LIRC_XBOX
++ tristate "XBOX USB IR Remote"
++ depends on LIRC && USB
++ help
++ Driver for the Microsoft XBOX USB IR Remote
++
+ config LIRC_ZILOG
+ tristate "Zilog/Hauppauge IR Transmitter"
+ depends on LIRC && I2C
+ help
+ Driver for the Zilog/Hauppauge IR Transmitter, found on
+ PVR-150/500, HVR-1200/1250/1700/1800, HD-PVR and other cards
++
+ endif
+diff -Nur linux-3.14.36/drivers/staging/media/lirc/Kconfig.orig linux-openelec/drivers/staging/media/lirc/Kconfig.orig
+--- linux-3.14.36/drivers/staging/media/lirc/Kconfig.orig 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/staging/media/lirc/Kconfig.orig 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,78 @@
++#
++# LIRC driver(s) configuration
++#
++menuconfig LIRC_STAGING
++ bool "Linux Infrared Remote Control IR receiver/transmitter drivers"
++ depends on LIRC
++ help
++ Say Y here, and all supported Linux Infrared Remote Control IR and
++ RF receiver and transmitter drivers will be displayed. When paired
++ with a remote control and the lirc daemon, the receiver drivers
++ allow control of your Linux system via remote control.
++
++if LIRC_STAGING
++
++config LIRC_BT829
++ tristate "BT829 based hardware"
++ depends on LIRC && PCI
++ help
++ Driver for the IR interface on BT829-based hardware
++
++config LIRC_IGORPLUGUSB
++ tristate "Igor Cesko's USB IR Receiver"
++ depends on LIRC && USB
++ help
++ Driver for Igor Cesko's USB IR Receiver
++
++config LIRC_IMON
++ tristate "Legacy SoundGraph iMON Receiver and Display"
++ depends on LIRC && USB
++ help
++ Driver for the original SoundGraph iMON IR Receiver and Display
++
++ Current generation iMON devices use the input layer imon driver.
++
++config LIRC_PARALLEL
++ tristate "Homebrew Parallel Port Receiver"
++ depends on LIRC && PARPORT
++ help
++ Driver for Homebrew Parallel Port Receivers
++
++config LIRC_GPIO
++ tristate "Homebrew GPIO Port Receiver/Transmitter"
++ depends on LIRC
++ help
++ Driver for Homebrew GPIO Port Receiver/Transmitter
++
++config LIRC_SASEM
++ tristate "Sasem USB IR Remote"
++ depends on LIRC && USB
++ help
++ Driver for the Sasem OnAir Remocon-V or Dign HV5 HTPC IR/VFD Module
++
++config LIRC_SERIAL
++ tristate "Homebrew Serial Port Receiver"
++ depends on LIRC
++ help
++ Driver for Homebrew Serial Port Receivers
++
++config LIRC_SERIAL_TRANSMITTER
++ bool "Serial Port Transmitter"
++ default y
++ depends on LIRC_SERIAL
++ help
++ Serial Port Transmitter support
++
++config LIRC_SIR
++ tristate "Built-in SIR IrDA port"
++ depends on LIRC
++ help
++ Driver for the SIR IrDA port
++
++config LIRC_ZILOG
++ tristate "Zilog/Hauppauge IR Transmitter"
++ depends on LIRC && I2C
++ help
++ Driver for the Zilog/Hauppauge IR Transmitter, found on
++ PVR-150/500, HVR-1200/1250/1700/1800, HD-PVR and other cards
++endif
+diff -Nur linux-3.14.36/drivers/staging/media/lirc/lirc_gpio.c linux-openelec/drivers/staging/media/lirc/lirc_gpio.c
+--- linux-3.14.36/drivers/staging/media/lirc/lirc_gpio.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/staging/media/lirc/lirc_gpio.c 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,782 @@
++/*
++ * lirc_gpio.c
++ *
++ * lirc_gpio - Device driver that records pulse- and pause-lengths
++ * (space-lengths) (just like the lirc_serial driver does)
++ * between GPIO interrupt events on GPIO capable devices.
++ * Lots of code has been taken from the lirc_serial and the
++ * lirc_rpi modules so I would like say thanks to the authors.
++ *
++ * Copyright (C) 2014 CurlyMo <curlymoo1@gmail.com>
++ * Aron Robert Szabo <aron@reon.hu>,
++ * Michael Bishop <cleverca22@gmail.com>
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ */
++
++/*
++ lirc_gpio {
++ compatible = "lirc_gpio";
++ gpios = <&gpio3 6 1 &gpio3 7 2>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_hummingboard_gpio3_6>;
++ pinctrl-1 = <&pinctrl_hummingboard_gpio3_7>;
++ linux,sense = <-1>;
++ linux,softcarrier = <1>;
++ linux,validgpios = <1 73 72 71 70 194 195 67>;
++ };
++ */
++
++
++#include <linux/module.h>
++#include <linux/errno.h>
++#include <linux/interrupt.h>
++#include <linux/sched.h>
++#include <linux/kernel.h>
++#include <linux/time.h>
++#include <linux/string.h>
++#include <linux/delay.h>
++#include <linux/platform_device.h>
++#include <linux/irq.h>
++#include <linux/spinlock.h>
++#include <media/lirc.h>
++#include <media/lirc_dev.h>
++#include <linux/gpio.h>
++#include <linux/of.h>
++#include <linux/of_gpio.h>
++
++#define LIRC_DRIVER_NAME "lirc_gpio"
++#define RBUF_LEN 256
++#define LIRC_TRANSMITTER_LATENCY 256
++
++#ifndef MAX_UDELAY_MS
++#define MAX_UDELAY_US 5000
++#else
++#define MAX_UDELAY_US (MAX_UDELAY_MS*1000)
++#endif
++
++static ssize_t lirc_write(struct file *file, const char *buf, size_t n, loff_t *ppos);
++static long lirc_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
++static int set_use_inc(void *data);
++static void set_use_dec(void *data);
++static int lirc_gpio_probe(struct platform_device *pdev);
++static int lirc_gpio_remove(struct platform_device *pdev);
++
++struct lirc_gpio_platform_data {
++ int gpio_rx_nr;
++ int gpio_tx_nr;
++ bool active_rx_low;
++ bool active_tx_low;
++ u64 allowed_rx_protos;
++ u64 allowed_tx_protos;
++ int sense;
++ int softcarrier;
++ int validgpios[255];
++};
++
++struct lirc_gpio_dev {
++ int gpio_rx_nr;
++ int gpio_tx_nr;
++ int sense;
++ int softcarrier;
++ int validgpios[255];
++};
++
++struct lirc_gpio_dev *gpio_dev;
++
++static const struct file_operations lirc_fops = {
++ .owner = THIS_MODULE,
++ .write = lirc_write,
++ .unlocked_ioctl = lirc_ioctl,
++ .read = lirc_dev_fop_read,
++ .poll = lirc_dev_fop_poll,
++ .open = lirc_dev_fop_open,
++ .release = lirc_dev_fop_close,
++ .llseek = no_llseek,
++};
++
++struct irq_chip *irqchip;
++struct irq_data *irqdata;
++
++static struct timeval lasttv = { 0, 0 };
++static struct lirc_buffer rbuf;
++static spinlock_t lock;
++
++/* set the default GPIO input pin */
++static int gpio_in_pin = -1;
++/* set the default GPIO output pin */
++static int gpio_out_pin = -1;
++/* -1 = auto, 0 = active high, 1 = active low */
++static int sense = -2;
++/* use softcarrier by default */
++static int softcarrier = -1;
++
++/* initialized/set in init_timing_params() */
++static unsigned int freq = 38000;
++static unsigned int duty_cycle = 50;
++static unsigned long period;
++static unsigned long pulse_width;
++static unsigned long space_width;
++
++static struct lirc_driver driver = {
++ .name = LIRC_DRIVER_NAME,
++ .minor = -1,
++ .code_length = 1,
++ .sample_rate = 0,
++ .data = NULL,
++ .add_to_buf = NULL,
++ .rbuf = &rbuf,
++ .set_use_inc = set_use_inc,
++ .set_use_dec = set_use_dec,
++ .fops = &lirc_fops,
++ .dev = NULL,
++ .owner = THIS_MODULE,
++};
++
++static struct of_device_id lirc_gpio_of_match[] = {
++ { .compatible = "lirc_gpio", },
++ {}
++};
++
++static struct platform_driver lirc_gpio_driver = {
++ .probe = lirc_gpio_probe,
++ .remove = lirc_gpio_remove,
++ .driver = {
++ .name = LIRC_DRIVER_NAME,
++ .owner = THIS_MODULE,
++ .of_match_table = lirc_gpio_of_match,
++ },
++};
++
++static void safe_udelay(unsigned long usecs) {
++ while (usecs > MAX_UDELAY_US) {
++ udelay(MAX_UDELAY_US);
++ usecs -= MAX_UDELAY_US;
++ }
++ udelay(usecs);
++}
++
++static int init_timing_params(unsigned int new_duty_cycle, unsigned int new_freq) {
++ /*
++ * period, pulse/space width are kept with 8 binary places -
++ * IE multiplied by 256.
++ */
++ if(256 * 1000000L / new_freq * new_duty_cycle / 100 <=
++ LIRC_TRANSMITTER_LATENCY)
++ return -EINVAL;
++ if(256 * 1000000L / new_freq * (100 - new_duty_cycle) / 100 <=
++ LIRC_TRANSMITTER_LATENCY)
++ return -EINVAL;
++ duty_cycle = new_duty_cycle;
++ freq = new_freq;
++ period = 256 * 1000000L / freq;
++ pulse_width = period * duty_cycle / 100;
++ space_width = period - pulse_width;
++ return 0;
++}
++
++
++static long send_pulse_softcarrier(unsigned long length) {
++ int flag;
++ unsigned long actual, target, d;
++
++ if(gpio_dev->gpio_tx_nr >= 0) {
++ length <<= 8;
++
++ actual = 0; target = 0; flag = 0;
++ while(actual < length) {
++ if(flag) {
++ gpio_set_value(gpio_dev->gpio_tx_nr, 0);
++ target += space_width;
++ } else {
++ gpio_set_value(gpio_dev->gpio_tx_nr, 1);
++ target += pulse_width;
++ }
++ d = (target - actual - LIRC_TRANSMITTER_LATENCY + 128) >> 8;
++ /*
++ * Note - we've checked in ioctl that the pulse/space
++ * widths are big enough so that d is > 0
++ */
++ udelay(d);
++ actual += (d << 8) + LIRC_TRANSMITTER_LATENCY;
++ flag = !flag;
++ }
++ return (actual-length) >> 8;
++ }
++ return 0;
++}
++
++static long send_pulse(unsigned long length) {
++ if(length <= 0)
++ return 0;
++
++ if(gpio_dev->gpio_tx_nr >= 0) {
++ if(gpio_dev->softcarrier) {
++ return send_pulse_softcarrier(length);
++ } else {
++ gpio_set_value(gpio_dev->gpio_tx_nr, 1);
++ safe_udelay(length);
++ return 0;
++ }
++ }
++ return 0;
++}
++
++static void send_space(long length) {
++ if(gpio_dev->gpio_tx_nr >= 0) {
++ gpio_set_value(gpio_dev->gpio_tx_nr, 0);
++ if(length <= 0)
++ return;
++ safe_udelay(length);
++ }
++}
++
++static void rbwrite(int l) {
++ if (lirc_buffer_full(&rbuf)) {
++ /* no new signals will be accepted */
++ return;
++ }
++ lirc_buffer_write(&rbuf, (void *)&l);
++}
++
++static void frbwrite(int l) {
++ /* simple noise filter */
++ static int pulse, space;
++ static unsigned int ptr;
++
++ if(ptr > 0 && (l & PULSE_BIT)) {
++ pulse += l & PULSE_MASK;
++ if(pulse > 250) {
++ rbwrite(space);
++ rbwrite(pulse | PULSE_BIT);
++ ptr = 0;
++ pulse = 0;
++ }
++ return;
++ }
++ if(!(l & PULSE_BIT)) {
++ if(ptr == 0) {
++ if (l > 20000) {
++ space = l;
++ ptr++;
++ return;
++ }
++ } else {
++ if(l > 20000) {
++ space += pulse;
++ if (space > PULSE_MASK)
++ space = PULSE_MASK;
++ space += l;
++ if (space > PULSE_MASK)
++ space = PULSE_MASK;
++ pulse = 0;
++ return;
++ }
++ rbwrite(space);
++ rbwrite(pulse | PULSE_BIT);
++ ptr = 0;
++ pulse = 0;
++ }
++ }
++ rbwrite(l);
++}
++
++static irqreturn_t irq_handler(int i, void *blah, struct pt_regs *regs) {
++ struct timeval tv;
++ long deltv;
++ int data;
++ int signal;
++
++ /* use the GPIO signal level */
++ signal = gpio_get_value(gpio_dev->gpio_rx_nr);
++
++ /* unmask the irq */
++ irqchip->irq_unmask(irqdata);
++
++ if(gpio_dev->sense != -1) {
++ /* The HB GPIO input acts like it is an analogue input.
++ Therefor a high signal is 256 and a low signal is 1.
++ For Lirc to properly interpret the spaces and pulses,
++ we need to transform these to ones and zeros. To be
++ on the safe side, every signal higher then 128 will
++ be interpreted as a high and vice versa. */
++ if (signal > 128) {
++ signal = 1;
++ } else {
++ signal = 0;
++ }
++ /* get current time */
++ do_gettimeofday(&tv);
++
++ /* calc time since last interrupt in microseconds */
++ deltv = tv.tv_sec-lasttv.tv_sec;
++ if(tv.tv_sec < lasttv.tv_sec ||
++ (tv.tv_sec == lasttv.tv_sec &&
++ tv.tv_usec < lasttv.tv_usec)) {
++ printk(KERN_WARNING LIRC_DRIVER_NAME
++ ": AIEEEE: your clock just jumped backwards\n");
++ printk(KERN_WARNING LIRC_DRIVER_NAME
++ ": %d %d %lx %lx %lx %lx\n", signal, gpio_dev->sense,
++ tv.tv_sec, lasttv.tv_sec,
++ tv.tv_usec, lasttv.tv_usec);
++ data = PULSE_MASK;
++ } else if (deltv > 15) {
++ data = PULSE_MASK; /* really long time */
++ if(!(signal^gpio_dev->sense)) {
++ /* sanity check */
++ printk(KERN_WARNING LIRC_DRIVER_NAME
++ ": AIEEEE: %d %d %lx %lx %lx %lx\n",
++ signal, gpio_dev->sense, tv.tv_sec, lasttv.tv_sec,
++ tv.tv_usec, lasttv.tv_usec);
++ /*
++ * detecting pulse while this
++ * MUST be a space!
++ */
++ gpio_dev->sense = gpio_dev->sense ? 0 : 1;
++ }
++ } else {
++ data = (int) (deltv*1000000 +
++ (tv.tv_usec - lasttv.tv_usec));
++ }
++ frbwrite(signal^gpio_dev->sense ? data : (data|PULSE_BIT));
++ lasttv = tv;
++ wake_up_interruptible(&rbuf.wait_poll);
++ }
++
++ return IRQ_HANDLED;
++}
++
++// called when the character device is opened
++static int set_use_inc(void *data) {
++ int result;
++ unsigned long flags;
++
++ /* initialize timestamp */
++ do_gettimeofday(&lasttv);
++
++ if(gpio_dev->gpio_rx_nr >= 0) {
++ result = request_irq(gpio_to_irq(gpio_dev->gpio_rx_nr),
++ (irq_handler_t) irq_handler, 0,
++ LIRC_DRIVER_NAME, (void*) 0);
++
++ switch (result) {
++ case -EBUSY:
++ printk(KERN_ERR LIRC_DRIVER_NAME
++ ": IRQ %d is busy\n",
++ gpio_to_irq(gpio_dev->gpio_rx_nr));
++ return -EBUSY;
++ case -EINVAL:
++ printk(KERN_ERR LIRC_DRIVER_NAME
++ ": Bad irq number or handler\n");
++ return -EINVAL;
++ default:
++ break;
++ };
++
++ /* initialize pulse/space widths */
++ init_timing_params(duty_cycle, freq);
++
++ spin_lock_irqsave(&lock, flags);
++
++ /* GPIO Pin Falling/Rising Edge Detect Enable */
++ irqchip->irq_set_type(irqdata,
++ IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING);
++
++ /* unmask the irq */
++ irqchip->irq_unmask(irqdata);
++
++ spin_unlock_irqrestore(&lock, flags);
++ }
++
++ return 0;
++}
++
++static void set_use_dec(void *data) {
++ unsigned long flags;
++ if(gpio_dev->gpio_rx_nr >= 0) {
++ spin_lock_irqsave(&lock, flags);
++
++ /* GPIO Pin Falling/Rising Edge Detect Disable */
++ irqchip->irq_set_type(irqdata, 0);
++ irqchip->irq_mask(irqdata);
++
++ spin_unlock_irqrestore(&lock, flags);
++
++ free_irq(gpio_to_irq(gpio_dev->gpio_rx_nr), (void *) 0);
++ }
++}
++
++static ssize_t lirc_write(struct file *file, const char *buf, size_t n, loff_t *ppos) {
++ int i, count;
++ unsigned long flags;
++ long delta = 0;
++ int *wbuf;
++
++ if(gpio_dev->gpio_tx_nr >= 0) {
++ count = n / sizeof(int);
++ if(n % sizeof(int) || count % 2 == 0)
++ return -EINVAL;
++ wbuf = memdup_user(buf, n);
++ if(IS_ERR(wbuf))
++ return PTR_ERR(wbuf);
++ spin_lock_irqsave(&lock, flags);
++
++ for(i = 0; i < count; i++) {
++ if(i%2)
++ send_space(wbuf[i] - delta);
++ else
++ delta = send_pulse(wbuf[i]);
++ }
++ gpio_set_value(gpio_dev->gpio_tx_nr, 0);
++
++ spin_unlock_irqrestore(&lock, flags);
++ kfree(wbuf);
++ return n;
++ }
++ return 0;
++}
++
++
++static long lirc_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) {
++ int result;
++ __u32 value;
++
++ switch(cmd) {
++ case LIRC_GET_SEND_MODE:
++ return -ENOIOCTLCMD;
++ break;
++
++ case LIRC_SET_SEND_MODE:
++ result = get_user(value, (__u32 *) arg);
++ if(result)
++ return result;
++ /* only LIRC_MODE_PULSE supported */
++ if(value != LIRC_MODE_PULSE)
++ return -ENOSYS;
++ break;
++
++ case LIRC_GET_LENGTH:
++ return -ENOSYS;
++ break;
++
++ case LIRC_SET_SEND_DUTY_CYCLE:
++ result = get_user(value, (__u32 *) arg);
++ if (result)
++ return result;
++ if (value <= 0 || value > 100)
++ return -EINVAL;
++ return init_timing_params(value, freq);
++ break;
++
++ case LIRC_SET_SEND_CARRIER:
++ result = get_user(value, (__u32 *) arg);
++ if(result)
++ return result;
++ if(value > 500000 || value < 20000)
++ return -EINVAL;
++ return init_timing_params(duty_cycle, value);
++ break;
++
++ default:
++ return lirc_dev_fop_ioctl(filep, cmd, arg);
++ }
++ return 0;
++}
++
++static int lirc_gpio_get_devtree_pdata(struct device *dev, struct lirc_gpio_platform_data *pdata) {
++ struct device_node *np = dev->of_node;
++ enum of_gpio_flags flags;
++ struct property *prop;
++ const __be32 *cur;
++ int gpio = -1;
++ int ret = 0;
++ int i = 0;
++
++ if(np) {
++ gpio = of_get_gpio_flags(np, 0, &flags);
++ if(gpio < 0) {
++ if(gpio != -EPROBE_DEFER)
++ dev_err(dev, "RX gpio not defined (%d)\n", gpio);
++
++ pdata->gpio_rx_nr = -1;
++ pdata->active_rx_low = 0;
++ pdata->allowed_rx_protos = 0;
++ } else {
++ pdata->gpio_rx_nr = gpio;
++ pdata->active_rx_low = (flags & OF_GPIO_ACTIVE_LOW);
++ pdata->allowed_rx_protos = 0;
++ }
++
++ gpio = of_get_gpio_flags(np, 1, &flags);
++ if(gpio < 0) {
++ if(gpio != -EPROBE_DEFER)
++ dev_err(dev, "TX gpio not defined (%d)\n", gpio);
++
++ pdata->gpio_tx_nr = -1;
++ pdata->active_tx_low = 0;
++ pdata->allowed_tx_protos = 0;
++ } else {
++ pdata->gpio_tx_nr = gpio;
++ pdata->active_tx_low = (flags & OF_GPIO_ACTIVE_LOW);
++ pdata->allowed_tx_protos = 0;
++ }
++ ret = of_property_read_u32(np, "linux,sense", &pdata->sense);
++ if(ret) {
++ pdata->sense = -1;
++ }
++ ret = of_property_read_u32(np, "linux,softcarrier", &pdata->softcarrier);
++ if(ret) {
++ pdata->softcarrier = 1;
++ }
++ i = 0;
++ printk(KERN_DEBUG LIRC_DRIVER_NAME ": valid gpios");
++ of_property_for_each_u32(np, "linux,validgpios", prop, cur, gpio) {
++ printk(" %d", gpio);
++ pdata->validgpios[i++] = gpio;
++ }
++ printk("\n");
++ pdata->validgpios[i] = -1;
++ }
++
++ return 0;
++}
++
++static int init_port(void) {
++ int i, nlow, nhigh, ret, irq;
++
++ if(gpio_dev->gpio_tx_nr >= 0) {
++ if(gpio_request(gpio_dev->gpio_tx_nr, LIRC_DRIVER_NAME " ir/out")) {
++ printk(KERN_ALERT LIRC_DRIVER_NAME ": cant claim gpio pin %d\n", gpio_dev->gpio_tx_nr);
++ ret = -ENODEV;
++ goto exit_init_port;
++ }
++ }
++
++ if(gpio_dev->gpio_rx_nr >= 0) {
++ if(gpio_request(gpio_dev->gpio_rx_nr, LIRC_DRIVER_NAME " ir/in")) {
++ printk(KERN_ALERT LIRC_DRIVER_NAME ": cant claim gpio pin %d\n", gpio_dev->gpio_rx_nr);
++ ret = -ENODEV;
++ goto exit_gpio_free_out_pin;
++ }
++ }
++
++ if(gpio_dev->gpio_rx_nr >= 0) {
++ gpio_direction_input(gpio_dev->gpio_rx_nr);
++ }
++ if(gpio_dev->gpio_tx_nr >= 0) {
++ gpio_direction_output(gpio_dev->gpio_tx_nr, 1);
++ gpio_set_value(gpio_dev->gpio_tx_nr, 0);
++ }
++
++ if(gpio_dev->gpio_rx_nr >= 0) {
++ irq = gpio_to_irq(gpio_dev->gpio_rx_nr);
++ irqdata = irq_get_irq_data(irq);
++
++ if(irqdata && irqdata->chip) {
++ irqchip = irqdata->chip;
++ } else {
++ ret = -ENODEV;
++ goto exit_gpio_free_in_pin;
++ }
++
++ /* if pin is high, then this must be an active low receiver. */
++ if(gpio_dev->sense == -1) {
++ /* wait 1/2 sec for the power supply */
++ msleep(500);
++
++ /*
++ * probe 9 times every 0.04s, collect "votes" for
++ * active high/low
++ */
++ nlow = 0;
++ nhigh = 0;
++ for(i = 0; i < 9; i++) {
++ if(gpio_get_value(gpio_dev->gpio_rx_nr))
++ nlow++;
++ else
++ nhigh++;
++ msleep(40);
++ }
++ gpio_dev->sense = (nlow >= nhigh ? 1 : 0);
++ printk(KERN_INFO LIRC_DRIVER_NAME ": auto-detected active %s receiver on GPIO pin %d\n",
++ gpio_dev->sense ? "low" : "high", gpio_dev->gpio_rx_nr);
++ } else {
++ printk(KERN_INFO LIRC_DRIVER_NAME ": manually using active %s receiver on GPIO pin %d\n",
++ gpio_dev->sense ? "low" : "high", gpio_dev->gpio_rx_nr);
++ }
++ }
++
++ return 0;
++
++exit_gpio_free_in_pin:
++ gpio_free(gpio_dev->gpio_rx_nr);
++
++exit_gpio_free_out_pin:
++ gpio_free(gpio_dev->gpio_tx_nr);
++
++exit_init_port:
++ return ret;
++}
++
++static void lirc_gpio_exit(void) {
++ if(gpio_dev->gpio_tx_nr >= 0) {
++ gpio_free(gpio_dev->gpio_tx_nr);
++ }
++ if(gpio_dev->gpio_rx_nr >= 0) {
++ gpio_free(gpio_dev->gpio_rx_nr);
++ }
++
++ lirc_unregister_driver(driver.minor);
++ lirc_buffer_free(&rbuf);
++}
++
++static int lirc_gpio_probe(struct platform_device *pdev) {
++ const struct lirc_gpio_platform_data *pdata =
++ pdev->dev.platform_data;
++ int rc;
++ int result = 0;
++ int match = 0;
++ int i = 0;
++
++ if(pdev->dev.of_node) {
++ struct lirc_gpio_platform_data *dtpdata = devm_kzalloc(&pdev->dev, sizeof(*dtpdata), GFP_KERNEL);
++ if(!dtpdata)
++ return -ENOMEM;
++ rc = lirc_gpio_get_devtree_pdata(&pdev->dev, dtpdata);
++ if(rc)
++ return rc;
++ pdata = dtpdata;
++ }
++
++ if(!pdata)
++ return -EINVAL;
++
++ gpio_dev = kzalloc(sizeof(struct lirc_gpio_dev), GFP_KERNEL);
++ if(!gpio_dev)
++ return -ENOMEM;
++
++ gpio_dev->gpio_rx_nr = pdata->gpio_rx_nr;
++ gpio_dev->gpio_tx_nr = pdata->gpio_tx_nr;
++ gpio_dev->sense = pdata->sense;
++ gpio_dev->softcarrier = pdata->softcarrier;
++ memcpy(gpio_dev->validgpios, pdata->validgpios, 255);
++
++ if(gpio_in_pin != gpio_out_pin) {
++ match = 0;
++ for(i = 0; (i < ARRAY_SIZE(gpio_dev->validgpios)) && (!match) && (gpio_dev->validgpios[i] != -1); i++) {
++ if(gpio_in_pin == gpio_dev->validgpios[i]) {
++ match = 1;
++ break;
++ }
++ }
++ if(gpio_in_pin > -1) {
++ if(!match) {
++ printk(KERN_ERR LIRC_DRIVER_NAME
++ ": invalid RX GPIO pin specified!\n");
++ return -EINVAL;
++ } else {
++ gpio_dev->gpio_rx_nr = gpio_in_pin;
++ }
++ }
++ match = 0;
++ for(i = 0; (i < ARRAY_SIZE(gpio_dev->validgpios)) && (!match) && (gpio_dev->validgpios[i] != -1); i++) {
++ if(gpio_out_pin == gpio_dev->validgpios[i]) {
++ match = 1;
++ break;
++ }
++ }
++ if(gpio_out_pin > -1) {
++ if(!match) {
++ printk(KERN_ERR LIRC_DRIVER_NAME
++ ": invalid TX GPIO pin specified!\n");
++ return -EINVAL;
++ } else {
++ gpio_dev->gpio_tx_nr = gpio_out_pin;
++ }
++ }
++ }
++ if(sense > -2) {
++ gpio_dev->sense = sense;
++ }
++ if(softcarrier >= 0) {
++ gpio_dev->softcarrier = softcarrier;
++ }
++
++ printk(KERN_DEBUG LIRC_DRIVER_NAME ": rx %d, tx %d, sense %d, softcarrier %d\n",
++ gpio_dev->gpio_rx_nr, gpio_dev->gpio_tx_nr, gpio_dev->sense, gpio_dev->softcarrier);
++
++ platform_set_drvdata(pdev, gpio_dev);
++
++ result = lirc_buffer_init(&rbuf, sizeof(int), RBUF_LEN);
++ if(result < 0)
++ return -ENOMEM;
++
++ driver.features = LIRC_CAN_SET_SEND_DUTY_CYCLE |
++ LIRC_CAN_SET_SEND_CARRIER |
++ LIRC_CAN_SEND_PULSE |
++ LIRC_CAN_REC_MODE2;
++
++ driver.dev = &pdev->dev;
++ driver.minor = lirc_register_driver(&driver);
++
++ if(driver.minor < 0) {
++ printk(KERN_ERR LIRC_DRIVER_NAME ": device registration failed with %d\n", result);
++ result = -EIO;
++ goto exit_gpio;
++ }
++
++ result = init_port();
++ if(result < 0)
++ goto exit_gpio;
++
++ return 0;
++
++exit_gpio:
++ lirc_gpio_exit();
++
++ return result;
++}
++
++static int lirc_gpio_remove(struct platform_device *pdev) {
++ struct lirc_gpio_dev *gpio_dev = platform_get_drvdata(pdev);
++
++ lirc_gpio_exit();
++
++ kfree(gpio_dev);
++
++ return 0;
++}
++
++MODULE_DEVICE_TABLE(of, lirc_gpio_of_match);
++module_platform_driver(lirc_gpio_driver);
++
++MODULE_DESCRIPTION("Infra-red GPIO receiver and blaster driver.");
++MODULE_AUTHOR("CurlyMo <development@xbian.org>");
++MODULE_AUTHOR("Aron Robert Szabo <aron@reon.hu>");
++MODULE_AUTHOR("Michael Bishop <cleverca22@gmail.com>");
++MODULE_LICENSE("GPL");
++
++module_param(gpio_out_pin, int, S_IRUGO);
++MODULE_PARM_DESC(gpio_out_pin, "GPIO output/transmitter pin number");
++
++module_param(gpio_in_pin, int, S_IRUGO);
++MODULE_PARM_DESC(gpio_in_pin, "GPIO input/receiver pin number.");
++
++module_param(sense, int, S_IRUGO);
++MODULE_PARM_DESC(sense, "Override autodetection of IR receiver circuit"
++ " (0 = active high, 1 = active low )");
++
++module_param(softcarrier, int, S_IRUGO);
++MODULE_PARM_DESC(softcarrier, "Software carrier (0 = off, 1 = on, default on)");
++
+diff -Nur linux-3.14.36/drivers/staging/media/lirc/lirc_xbox.c linux-openelec/drivers/staging/media/lirc/lirc_xbox.c
+--- linux-3.14.36/drivers/staging/media/lirc/lirc_xbox.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/staging/media/lirc/lirc_xbox.c 2015-07-24 18:03:29.972842002 -0500
+@@ -0,0 +1,995 @@
++/*
++ * lirc_xbox - USB remote support for LIRC
++ * (supports Microsoft XBOX DVD Dongle)
++ *
++ * Copyright (C) 2003-2004 Paul Miller <pmiller9@users.sourceforge.net>
++ *
++ * This driver was derived from:
++ * Vladimir Dergachev <volodya@minspring.com>'s 2002
++ * "USB ATI Remote support" (input device)
++ * Adrian Dewhurst <sailor-lk@sailorfrag.net>'s 2002
++ * "USB StreamZap remote driver" (LIRC)
++ * Artur Lipowski <alipowski@kki.net.pl>'s 2002
++ * "lirc_dev" and "lirc_gpio" LIRC modules
++ * Michael Wojciechowski
++ * initial xbox support
++ * Vassilis Virvilis <vasvir@iit.demokritos.gr> 2006
++ * reworked the patch for lirc submission
++ * Paul Miller's <pmiller9@users.sourceforge.net> 2004
++ * lirc_atiusb - removed all ati remote support
++ * $Id: lirc_xbox.c,v 1.88 2011/06/03 11:11:11 jmartin Exp $
++ */
++
++/*
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ */
++
++#include <linux/version.h>
++
++//#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33)
++//#include <linux/autoconf.h>
++//#endif
++
++#include <linux/kernel.h>
++#include <linux/errno.h>
++#include <linux/init.h>
++#include <linux/slab.h>
++#include <linux/module.h>
++#include <linux/kmod.h>
++#include <linux/completion.h>
++#include <linux/uaccess.h>
++#include <linux/usb.h>
++#include <linux/poll.h>
++#include <linux/wait.h>
++#include <linux/list.h>
++
++//#include "drivers/kcompat.h"
++//#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 35)
++#include <media/lirc.h>
++#include <media/lirc_dev.h>
++//#else
++//#include "drivers/lirc.h"
++//#include "drivers/lirc_dev/lirc_dev.h"
++//#endif
++
++#define DRIVER_VERSION "$Revision: 0.01 $"
++#define DRIVER_AUTHOR "Jason Martin <austinspartan@users.sourceforge.net>"
++#define DRIVER_DESC "XBOX DVD Dongle USB remote driver for LIRC"
++#define DRIVER_NAME "lirc_xbox"
++
++#define CODE_LENGTH 6
++#define CODE_MIN_LENGTH 6
++#define DECODE_LENGTH 1
++
++#ifndef URB_ASYNC_UNLINK
++#define URB_ASYNC_UNLINK 0
++#endif
++
++/* module parameters */
++#ifdef CONFIG_USB_DEBUG
++static int debug = 1;
++#else
++static int debug;
++#endif
++
++#define dprintk(fmt, args...) \
++ do { \
++ if (debug) \
++ printk(KERN_DEBUG fmt, ## args); \
++ } while (0)
++
++/*
++ * USB_BUFF_LEN must be the maximum value of the code_length array.
++ * It is used for static arrays.
++ */
++#define USB_BUFF_LEN 6
++
++static int mask = 0xFFFF; /* channel acceptance bit mask */
++static int unique; /* enable channel-specific codes */
++static int repeat = 10; /* repeat time in 1/100 sec */
++static unsigned long repeat_jiffies; /* repeat timeout */
++
++/* get hi and low bytes of a 16-bits int */
++#define HI(a) ((unsigned char)((a) >> 8))
++#define LO(a) ((unsigned char)((a) & 0xff))
++
++/* general constants */
++#define SEND_FLAG_IN_PROGRESS 1
++#define SEND_FLAG_COMPLETE 2
++#define FREE_ALL 0xFF
++
++/* endpoints */
++#define EP_KEYS 0
++#define EP_MOUSE 1
++#define EP_MOUSE_ADDR 0x81
++#define EP_KEYS_ADDR 0x82
++
++/* USB vendor ids for XBOX DVD Dongles */
++#define VENDOR_MS1 0x040b
++#define VENDOR_MS2 0x045e
++#define VENDOR_MS3 0xFFFF
++
++static struct usb_device_id usb_remote_table[] = {
++ /* Gamester Xbox DVD Movie Playback Kit IR */
++ { USB_DEVICE(VENDOR_MS1, 0x6521) },
++
++ /* Microsoft Xbox DVD Movie Playback Kit IR */
++ { USB_DEVICE(VENDOR_MS2, 0x0284) },
++
++ /*
++ * Some Chinese manufacturer -- conflicts with the joystick from the
++ * same manufacturer
++ */
++ { USB_DEVICE(VENDOR_MS3, 0xFFFF) },
++
++ /* Terminating entry */
++ { }
++};
++
++/* init strings */
++#define USB_OUTLEN 7
++
++static char init1[] = {0x01, 0x00, 0x20, 0x14};
++static char init2[] = {0x01, 0x00, 0x20, 0x14, 0x20, 0x20, 0x20};
++
++struct in_endpt {
++ /* inner link in list of endpoints for the remote specified by ir */
++ struct list_head iep_list_link;
++ struct xbox_dev *ir;
++ struct urb *urb;
++ struct usb_endpoint_descriptor *ep;
++
++ /* buffers and dma */
++ unsigned char *buf;
++ unsigned int len;
++ dma_addr_t dma;
++
++ /* handle repeats */
++ unsigned char old[USB_BUFF_LEN];
++ unsigned long old_jiffies;
++};
++
++struct out_endpt {
++ struct xbox_dev *ir;
++ struct urb *urb;
++ struct usb_endpoint_descriptor *ep;
++
++ /* buffers and dma */
++ unsigned char *buf;
++ dma_addr_t dma;
++
++ /* handle sending (init strings) */
++ int send_flags;
++ wait_queue_head_t wait;
++};
++
++
++/* data structure for each usb remote */
++struct xbox_dev {
++ /* inner link in list of all remotes managed by this module */
++ struct list_head remote_list_link;
++ /* Number of usb interfaces associated with this device */
++ int dev_refcount;
++
++ /* usb */
++ struct usb_device *usbdev;
++ /* Head link to list of all inbound endpoints in this remote */
++ struct list_head iep_listhead;
++ struct out_endpt *out_init;
++ int devnum;
++
++ /* lirc */
++ struct lirc_driver *d;
++ int connected;
++
++ /* locking */
++ struct mutex lock;
++};
++
++/* list of all registered devices via the remote_list_link in xbox_dev */
++static struct list_head remote_list;
++
++/*
++ * Convenience macros to retrieve a pointer to the surrounding struct from
++ * the given list_head reference within, pointed at by link.
++ */
++#define get_iep_from_link(link) \
++ list_entry((link), struct in_endpt, iep_list_link);
++#define get_irctl_from_link(link) \
++ list_entry((link), struct xbox_dev, remote_list_link);
++
++/* send packet - used to initialize remote */
++static void send_packet(struct out_endpt *oep, u16 cmd, unsigned char *data)
++{
++ struct xbox_dev *ir = oep->ir;
++ DECLARE_WAITQUEUE(wait, current);
++ int timeout = HZ; /* 1 second */
++ unsigned char buf[USB_OUTLEN];
++
++ dprintk(DRIVER_NAME "[%d]: send called (%#x)\n", ir->devnum, cmd);
++
++ mutex_lock(&ir->lock);
++ oep->urb->transfer_buffer_length = LO(cmd) + 1;
++ oep->urb->dev = oep->ir->usbdev;
++ oep->send_flags = SEND_FLAG_IN_PROGRESS;
++
++ memcpy(buf+1, data, LO(cmd));
++ buf[0] = HI(cmd);
++ memcpy(oep->buf, buf, LO(cmd)+1);
++
++ set_current_state(TASK_INTERRUPTIBLE);
++ add_wait_queue(&oep->wait, &wait);
++
++ if (usb_submit_urb(oep->urb, GFP_ATOMIC)) {
++ set_current_state(TASK_RUNNING);
++ remove_wait_queue(&oep->wait, &wait);
++ mutex_unlock(&ir->lock);
++ return;
++ }
++ mutex_unlock(&ir->lock);
++
++ while (timeout && (oep->urb->status == -EINPROGRESS)
++ && !(oep->send_flags & SEND_FLAG_COMPLETE)) {
++ timeout = schedule_timeout(timeout);
++ rmb();
++ }
++
++ dprintk(DRIVER_NAME "[%d]: send complete (%#x)\n", ir->devnum, cmd);
++
++ set_current_state(TASK_RUNNING);
++ remove_wait_queue(&oep->wait, &wait);
++ oep->urb->transfer_flags |= URB_ASYNC_UNLINK;
++ usb_unlink_urb(oep->urb);
++}
++
++static int unregister_from_lirc(struct xbox_dev *ir)
++{
++ struct lirc_driver *d = ir->d;
++ int devnum;
++
++ devnum = ir->devnum;
++ dprintk(DRIVER_NAME "[%d]: unregister from lirc called\n", devnum);
++
++ lirc_unregister_driver(d->minor);
++
++ printk(DRIVER_NAME "[%d]: usb remote disconnected\n", devnum);
++ return 0;
++}
++
++static int set_use_inc(void *data)
++{
++ struct xbox_dev *ir = data;
++ struct list_head *pos, *n;
++ struct in_endpt *iep;
++ int rtn;
++
++ if (!ir) {
++ printk(DRIVER_NAME "[?]: set_use_inc called with no context\n");
++ return -EIO;
++ }
++ dprintk(DRIVER_NAME "[%d]: set use inc\n", ir->devnum);
++
++ mutex_lock(&ir->lock);
++ if (!ir->connected) {
++ if (!ir->usbdev) {
++ mutex_unlock(&ir->lock);
++ dprintk(DRIVER_NAME "[%d]: !ir->usbdev\n", ir->devnum);
++ return -ENOENT;
++ }
++
++ /* Iterate through the inbound endpoints */
++ list_for_each_safe(pos, n, &ir->iep_listhead) {
++ /* extract the current in_endpt */
++ iep = get_iep_from_link(pos);
++ iep->urb->dev = ir->usbdev;
++ dprintk(DRIVER_NAME "[%d]: linking iep 0x%02x (%p)\n",
++ ir->devnum, iep->ep->bEndpointAddress, iep);
++ rtn = usb_submit_urb(iep->urb, GFP_ATOMIC);
++ if (rtn) {
++ printk(DRIVER_NAME "[%d]: open result = %d "
++ "error submitting urb\n",
++ ir->devnum, rtn);
++ mutex_unlock(&ir->lock);
++ return -EIO;
++ }
++ }
++ ir->connected = 1;
++ }
++ mutex_unlock(&ir->lock);
++
++ return 0;
++}
++
++static void set_use_dec(void *data)
++{
++ struct xbox_dev *ir = data;
++ struct list_head *pos, *n;
++ struct in_endpt *iep;
++
++ if (!ir) {
++ printk(DRIVER_NAME "[?]: set_use_dec called with no context\n");
++ return;
++ }
++ dprintk(DRIVER_NAME "[%d]: set use dec\n", ir->devnum);
++
++ mutex_lock(&ir->lock);
++ if (ir->connected) {
++ /* Free inbound usb urbs */
++ list_for_each_safe(pos, n, &ir->iep_listhead) {
++ iep = get_iep_from_link(pos);
++ dprintk(DRIVER_NAME "[%d]: unlinking iep 0x%02x (%p)\n",
++ ir->devnum, iep->ep->bEndpointAddress, iep);
++ usb_kill_urb(iep->urb);
++ }
++ ir->connected = 0;
++ }
++ mutex_unlock(&ir->lock);
++}
++
++static void print_data(struct in_endpt *iep, char *buf, int len)
++{
++ const int clen = CODE_LENGTH;
++ char codes[clen * 3 + 1];
++ int i;
++
++ if (len <= 0)
++ return;
++
++ for (i = 0; i < len && i < clen; i++)
++ snprintf(codes+i*3, 4, "%02x ", buf[i] & 0xFF);
++ printk(DRIVER_NAME "[%d]: data received %s (ep=0x%x length=%d)\n",
++ iep->ir->devnum, codes, iep->ep->bEndpointAddress, len);
++}
++
++static int code_check_xbox(struct in_endpt *iep, int len)
++{
++ // struct xbox_dev *ir = iep->ir;
++ const int clen = CODE_LENGTH;
++
++ if (len != clen) {
++ dprintk(DRIVER_NAME ": We got %d instead of %d bytes from xbox "
++ "ir.. ?\n", len, clen);
++ return -1;
++ }
++
++ /* check for repeats */
++ if (memcmp(iep->old, iep->buf, len) == 0) {
++ if (iep->old_jiffies + repeat_jiffies > jiffies)
++ return -1;
++ } else {
++ /*
++ * the third byte of xbox ir packet seems to contain key info
++ * the last two bytes are.. some kind of clock?
++ */
++ iep->buf[0] = iep->buf[2];
++ memset(iep->buf + 1, 0, len - 1);
++ memcpy(iep->old, iep->buf, len);
++ }
++ iep->old_jiffies = jiffies;
++
++ return 0;
++}
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19)
++static void usb_remote_recv(struct urb *urb, struct pt_regs *regs)
++#else
++static void usb_remote_recv(struct urb *urb)
++#endif
++{
++ struct in_endpt *iep;
++ int len, result = -1;
++
++ if (!urb)
++ return;
++ iep = urb->context;
++ if (!iep) {
++ urb->transfer_flags |= URB_ASYNC_UNLINK;
++ usb_unlink_urb(urb);
++ return;
++ }
++ if (!iep->ir->usbdev)
++ return;
++
++ len = urb->actual_length;
++ if (debug)
++ print_data(iep, urb->transfer_buffer, len);
++
++ switch (urb->status) {
++
++ case 0:
++ result = code_check_xbox(iep, len);
++
++ if (result < 0)
++ break;
++
++ lirc_buffer_write(iep->ir->d->rbuf, iep->buf);
++ wake_up(&iep->ir->d->rbuf->wait_poll);
++ break;
++
++ case -ECONNRESET:
++ case -ENOENT:
++ case -ESHUTDOWN:
++ urb->transfer_flags |= URB_ASYNC_UNLINK;
++ usb_unlink_urb(urb);
++ return;
++
++ case -EPIPE:
++ default:
++ break;
++ }
++
++ usb_submit_urb(urb, GFP_ATOMIC);
++}
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19)
++static void usb_remote_send(struct urb *urb, struct pt_regs *regs)
++#else
++static void usb_remote_send(struct urb *urb)
++#endif
++{
++ struct out_endpt *oep;
++
++ if (!urb)
++ return;
++ oep = urb->context;
++ if (!oep) {
++ urb->transfer_flags |= URB_ASYNC_UNLINK;
++ usb_unlink_urb(urb);
++ return;
++ }
++ if (!oep->ir->usbdev)
++ return;
++
++ dprintk(DRIVER_NAME "[%d]: usb out called\n", oep->ir->devnum);
++
++ if (urb->status)
++ return;
++
++ oep->send_flags |= SEND_FLAG_COMPLETE;
++ wmb();
++ if (waitqueue_active(&oep->wait))
++ wake_up(&oep->wait);
++}
++
++
++/*
++ * Initialization and removal
++ */
++
++/*
++ * Free iep according to mem_failure which specifies a checkpoint into the
++ * initialization sequence for rollback recovery.
++ */
++static void free_in_endpt(struct in_endpt *iep, int mem_failure)
++{
++ struct xbox_dev *ir;
++ dprintk(DRIVER_NAME ": free_in_endpt(%p, %d)\n", iep, mem_failure);
++ if (!iep)
++ return;
++
++ ir = iep->ir;
++ if (!ir) {
++ dprintk(DRIVER_NAME ": free_in_endpt: WARNING! null ir\n");
++ return;
++ }
++ mutex_lock(&ir->lock);
++ switch (mem_failure) {
++ case FREE_ALL:
++ case 5:
++ list_del(&iep->iep_list_link);
++ dprintk(DRIVER_NAME "[%d]: free_in_endpt removing ep=0x%0x "
++ "from list\n", ir->devnum, iep->ep->bEndpointAddress);
++ case 4:
++ if (iep->urb) {
++ iep->urb->transfer_flags |= URB_ASYNC_UNLINK;
++ usb_unlink_urb(iep->urb);
++ usb_free_urb(iep->urb);
++ iep->urb = 0;
++ } else
++ dprintk(DRIVER_NAME "[%d]: free_in_endpt null urb!\n",
++ ir->devnum);
++ case 3:
++ usb_free_coherent(iep->ir->usbdev, iep->len, iep->buf, iep->dma);
++ iep->buf = 0;
++ case 2:
++ kfree(iep);
++ }
++ mutex_unlock(&ir->lock);
++}
++
++/*
++ * Construct a new inbound endpoint for this remote, and add it to the list of
++ * in_epts in ir.
++ */
++static struct in_endpt *new_in_endpt(struct xbox_dev *ir,
++ struct usb_endpoint_descriptor *ep)
++{
++ struct usb_device *dev = ir->usbdev;
++ struct in_endpt *iep;
++ int pipe, maxp, len, addr;
++ int mem_failure;
++
++ addr = ep->bEndpointAddress;
++ pipe = usb_rcvintpipe(dev, addr);
++ maxp = usb_maxpacket(dev, pipe, usb_pipeout(pipe));
++
++/* len = (maxp > USB_BUFLEN) ? USB_BUFLEN : maxp;
++ * len -= (len % CODE_LENGTH); */
++ len = CODE_LENGTH;
++
++ dprintk(DRIVER_NAME "[%d]: acceptable inbound endpoint (0x%x) found "
++ "(maxp=%d len=%d)\n", ir->devnum, addr, maxp, len);
++
++ mem_failure = 0;
++ iep = kzalloc(sizeof(*iep), GFP_KERNEL);
++ if (!iep) {
++ mem_failure = 1;
++ goto new_in_endpt_failure_check;
++ }
++ iep->ir = ir;
++ iep->ep = ep;
++ iep->len = len;
++
++ iep->buf = usb_alloc_coherent(dev, len, GFP_ATOMIC, &iep->dma);
++ if (!iep->buf) {
++ mem_failure = 2;
++ goto new_in_endpt_failure_check;
++ }
++
++ iep->urb = usb_alloc_urb(0, GFP_KERNEL);
++ if (!iep->urb)
++ mem_failure = 3;
++
++new_in_endpt_failure_check:
++
++ if (mem_failure) {
++ free_in_endpt(iep, mem_failure);
++ printk(DRIVER_NAME "[%d]: ep=0x%x out of memory (code=%d)\n",
++ ir->devnum, addr, mem_failure);
++ return NULL;
++ }
++ list_add_tail(&iep->iep_list_link, &ir->iep_listhead);
++ dprintk(DRIVER_NAME "[%d]: adding ep=0x%0x to list\n",
++ ir->devnum, iep->ep->bEndpointAddress);
++ return iep;
++}
++
++static void free_out_endpt(struct out_endpt *oep, int mem_failure)
++{
++ struct xbox_dev *ir;
++ dprintk(DRIVER_NAME ": free_out_endpt(%p, %d)\n", oep, mem_failure);
++ if (!oep)
++ return;
++
++ wake_up_all(&oep->wait);
++
++ ir = oep->ir;
++ if (!ir) {
++ dprintk(DRIVER_NAME ": free_out_endpt: WARNING! null ir\n");
++ return;
++ }
++ mutex_lock(&ir->lock);
++ switch (mem_failure) {
++ case FREE_ALL:
++ case 4:
++ if (oep->urb) {
++ oep->urb->transfer_flags |= URB_ASYNC_UNLINK;
++ usb_unlink_urb(oep->urb);
++ usb_free_urb(oep->urb);
++ oep->urb = 0;
++ } else {
++ dprintk(DRIVER_NAME "[%d]: free_out_endpt: null urb!\n",
++ ir->devnum);
++ }
++ case 3:
++ usb_free_coherent(oep->ir->usbdev, USB_OUTLEN,
++ oep->buf, oep->dma);
++ oep->buf = 0;
++ case 2:
++ kfree(oep);
++ }
++ mutex_unlock(&ir->lock);
++}
++
++static struct out_endpt *new_out_endpt(struct xbox_dev *ir,
++ struct usb_endpoint_descriptor *ep)
++{
++ struct usb_device *dev = ir->usbdev;
++ struct out_endpt *oep;
++ int mem_failure;
++
++ dprintk(DRIVER_NAME "[%d]: acceptable outbound endpoint (0x%x) found\n",
++ ir->devnum, ep->bEndpointAddress);
++
++ mem_failure = 0;
++ oep = kzalloc(sizeof(*oep), GFP_KERNEL);
++ if (!oep)
++ mem_failure = 1;
++ else {
++ oep->ir = ir;
++ oep->ep = ep;
++ init_waitqueue_head(&oep->wait);
++
++ oep->buf = usb_alloc_coherent(dev, USB_OUTLEN,
++ GFP_ATOMIC, &oep->dma);
++ if (!oep->buf)
++ mem_failure = 2;
++ else {
++ oep->urb = usb_alloc_urb(0, GFP_KERNEL);
++ if (!oep->urb)
++ mem_failure = 3;
++ }
++ }
++ if (mem_failure) {
++ free_out_endpt(oep, mem_failure);
++ printk(DRIVER_NAME "[%d]: ep=0x%x out of memory (code=%d)\n",
++ ir->devnum, ep->bEndpointAddress, mem_failure);
++ return NULL;
++ }
++ return oep;
++}
++
++static void free_irctl(struct xbox_dev *ir, int mem_failure)
++{
++ struct list_head *pos, *n;
++ struct in_endpt *in;
++ dprintk(DRIVER_NAME ": free_irctl(%p, %d)\n", ir, mem_failure);
++
++ if (!ir)
++ return;
++
++ list_for_each_safe(pos, n, &ir->iep_listhead) {
++ in = get_iep_from_link(pos);
++ free_in_endpt(in, FREE_ALL);
++ }
++ if (ir->out_init) {
++ free_out_endpt(ir->out_init, FREE_ALL);
++ ir->out_init = NULL;
++ }
++
++ mutex_lock(&ir->lock);
++ switch (mem_failure) {
++ case FREE_ALL:
++ case 6:
++ if (!--ir->dev_refcount) {
++ list_del(&ir->remote_list_link);
++ dprintk(DRIVER_NAME "[%d]: free_irctl: removing "
++ "remote from list\n", ir->devnum);
++ } else {
++ dprintk(DRIVER_NAME "[%d]: free_irctl: refcount at %d,"
++ "aborting free_irctl\n",
++ ir->devnum, ir->dev_refcount);
++ mutex_unlock(&ir->lock);
++ return;
++ }
++ case 5:
++ case 4:
++ case 3:
++ if (ir->d) {
++ switch (mem_failure) {
++ case 5:
++ lirc_buffer_free(ir->d->rbuf);
++ case 4:
++ kfree(ir->d->rbuf);
++ case 3:
++ kfree(ir->d);
++ }
++ } else
++ printk(DRIVER_NAME "[%d]: ir->d is a null pointer!\n",
++ ir->devnum);
++ case 2:
++ mutex_unlock(&ir->lock);
++ kfree(ir);
++ return;
++ }
++ mutex_unlock(&ir->lock);
++}
++
++static struct xbox_dev *new_irctl(struct usb_interface *intf)
++{
++ struct usb_device *dev = interface_to_usbdev(intf);
++ struct xbox_dev *ir;
++ struct lirc_driver *driver;
++ int devnum, dclen;
++ int mem_failure;
++
++ devnum = dev->devnum;
++
++ dprintk(DRIVER_NAME "[%d]: remote type = XBOX DVD Dongle\n", devnum);
++
++ mem_failure = 0;
++ ir = kzalloc(sizeof(*ir), GFP_KERNEL);
++ if (!ir) {
++ mem_failure = 1;
++ goto new_irctl_failure_check;
++ }
++
++ dclen = DECODE_LENGTH;
++
++ /*
++ * add this infrared remote struct to remote_list, keeping track
++ * of the number of drivers registered.
++ */
++ dprintk(DRIVER_NAME "[%d]: adding remote to list\n", devnum);
++ list_add_tail(&ir->remote_list_link, &remote_list);
++ ir->dev_refcount = 1;
++
++ driver = kzalloc(sizeof(*driver), GFP_KERNEL);
++ if (!driver) {
++ mem_failure = 2;
++ goto new_irctl_failure_check;
++ }
++
++ ir->d = driver;
++ driver->rbuf = kmalloc(sizeof(*(driver->rbuf)), GFP_KERNEL);
++ if (!driver->rbuf) {
++ mem_failure = 3;
++ goto new_irctl_failure_check;
++ }
++
++ if (lirc_buffer_init(driver->rbuf, dclen, 2)) {
++ mem_failure = 4;
++ goto new_irctl_failure_check;
++ }
++
++ strcpy(driver->name, DRIVER_NAME " ");
++ driver->minor = -1;
++ driver->code_length = dclen * 8;
++ driver->features = LIRC_CAN_REC_LIRCCODE;
++ driver->data = ir;
++ driver->set_use_inc = &set_use_inc;
++ driver->set_use_dec = &set_use_dec;
++ driver->dev = &intf->dev;
++ driver->owner = THIS_MODULE;
++ ir->usbdev = dev;
++ ir->devnum = devnum;
++
++ mutex_init(&ir->lock);
++ INIT_LIST_HEAD(&ir->iep_listhead);
++
++new_irctl_failure_check:
++
++ if (mem_failure) {
++ free_irctl(ir, mem_failure);
++ printk(DRIVER_NAME "[%d]: out of memory (code=%d)\n",
++ devnum, mem_failure);
++ return NULL;
++ }
++ return ir;
++}
++
++/*
++ * Scan the global list of remotes to see if the device listed is one of them.
++ * If it is, the corresponding xbox_dev is returned, with its dev_refcount
++ * incremented. Otherwise, returns null.
++ */
++static struct xbox_dev *get_prior_reg_ir(struct usb_device *dev)
++{
++ struct list_head *pos;
++ struct xbox_dev *ir = NULL;
++
++ dprintk(DRIVER_NAME "[%d]: scanning remote_list...\n", dev->devnum);
++ list_for_each(pos, &remote_list) {
++ ir = get_irctl_from_link(pos);
++ if (ir->usbdev != dev) {
++ dprintk(DRIVER_NAME "[%d]: device %d isn't it...",
++ dev->devnum, ir->devnum);
++ ir = NULL;
++ } else {
++ dprintk(DRIVER_NAME "[%d]: prior instance found.\n",
++ dev->devnum);
++ ir->dev_refcount++;
++ break;
++ }
++ }
++ return ir;
++}
++
++/*
++ * If the USB interface has an out endpoint for control.
++ */
++static void send_outbound_init(struct xbox_dev *ir)
++{
++ if (ir->out_init) {
++ struct out_endpt *oep = ir->out_init;
++ dprintk(DRIVER_NAME "[%d]: usb_remote_probe: initializing "
++ "outbound ep\n", ir->devnum);
++ usb_fill_int_urb(oep->urb, ir->usbdev,
++ usb_sndintpipe(ir->usbdev, oep->ep->bEndpointAddress),
++ oep->buf, USB_OUTLEN, usb_remote_send,
++ oep, oep->ep->bInterval);
++ oep->urb->transfer_dma = oep->dma;
++ oep->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
++
++ send_packet(oep, 0x8004, init1);
++ send_packet(oep, 0x8007, init2);
++ }
++}
++
++/* Log driver and usb info */
++static void log_usb_dev_info(struct usb_device *dev)
++{
++ char buf[63], name[128] = "";
++
++ if (dev->descriptor.iManufacturer
++ && usb_string(dev, dev->descriptor.iManufacturer,
++ buf, sizeof(buf)) > 0)
++ strlcpy(name, buf, sizeof(name));
++ if (dev->descriptor.iProduct
++ && usb_string(dev, dev->descriptor.iProduct, buf, sizeof(buf)) > 0)
++ snprintf(name + strlen(name), sizeof(name) - strlen(name),
++ " %s", buf);
++ printk(DRIVER_NAME "[%d]: %s on usb%d:%d\n", dev->devnum, name,
++ dev->bus->busnum, dev->devnum);
++}
++
++
++static int usb_remote_probe(struct usb_interface *intf,
++ const struct usb_device_id *id)
++{
++ struct usb_device *dev = interface_to_usbdev(intf);
++ struct usb_host_interface *idesc;
++ struct usb_endpoint_descriptor *ep;
++ struct in_endpt *iep;
++ struct xbox_dev *ir;
++ int i;
++
++ dprintk(DRIVER_NAME "[%d]: usb_remote_probe: dev:%p, intf:%p, id:%p)\n",
++ dev->devnum, dev, intf, id);
++
++ idesc = intf->cur_altsetting;
++
++ /* Check if a usb remote has already been registered for this device */
++ ir = get_prior_reg_ir(dev);
++
++ if (!ir) {
++ ir = new_irctl(intf);
++ if (!ir)
++ return -ENOMEM;
++ }
++
++ /*
++ * step through the endpoints to find first in and first out endpoint
++ * of type interrupt transfer
++ */
++ for (i = 0; i < idesc->desc.bNumEndpoints; ++i) {
++ ep = &idesc->endpoint[i].desc;
++ dprintk(DRIVER_NAME "[%d]: processing endpoint %d\n",
++ dev->devnum, i);
++ if (((ep->bEndpointAddress & USB_ENDPOINT_DIR_MASK) ==
++ USB_DIR_IN) &&
++ ((ep->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ==
++ USB_ENDPOINT_XFER_INT)) {
++
++ iep = new_in_endpt(ir, ep);
++ if (iep)
++ {
++ usb_fill_int_urb(iep->urb, dev,
++ usb_rcvintpipe(dev,
++ iep->ep->bEndpointAddress),
++ iep->buf, iep->len, usb_remote_recv,
++ iep, iep->ep->bInterval);
++ iep->urb->transfer_dma = iep->dma;
++ iep->urb->transfer_flags |=
++ URB_NO_TRANSFER_DMA_MAP;
++ }
++ }
++
++ if (((ep->bEndpointAddress & USB_ENDPOINT_DIR_MASK) ==
++ USB_DIR_OUT) &&
++ ((ep->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ==
++ USB_ENDPOINT_XFER_INT) &&
++ (ir->out_init == NULL))
++ ir->out_init = new_out_endpt(ir, ep);
++ }
++ if (list_empty(&ir->iep_listhead)) {
++ printk(DRIVER_NAME "[%d]: inbound endpoint not found\n",
++ ir->devnum);
++ free_irctl(ir, FREE_ALL);
++ return -ENODEV;
++ }
++ if (ir->dev_refcount == 1) {
++ ir->d->minor = lirc_register_driver(ir->d);
++ if (ir->d->minor < 0) {
++ free_irctl(ir, FREE_ALL);
++ return -ENODEV;
++ }
++
++ /* Note new driver registration in kernel logs */
++ log_usb_dev_info(dev);
++
++ /* outbound data (initialization) */
++ send_outbound_init(ir);
++ }
++
++ usb_set_intfdata(intf, ir);
++ return 0;
++}
++
++static void usb_remote_disconnect(struct usb_interface *intf)
++{
++ /* struct usb_device *dev = interface_to_usbdev(intf); */
++ struct xbox_dev *ir = usb_get_intfdata(intf);
++ usb_set_intfdata(intf, NULL);
++
++ dprintk(DRIVER_NAME ": disconnecting remote %d:\n",
++ (ir ? ir->devnum : -1));
++ if (!ir || !ir->d)
++ return;
++
++ if (ir->usbdev) {
++ /* Only unregister once */
++ ir->usbdev = NULL;
++ unregister_from_lirc(ir);
++ }
++
++ /* This also removes the current remote from remote_list */
++ free_irctl(ir, FREE_ALL);
++}
++
++static struct usb_driver usb_remote_driver = {
++ .name = DRIVER_NAME,
++ .probe = usb_remote_probe,
++ .disconnect = usb_remote_disconnect,
++ .id_table = usb_remote_table
++};
++
++static int __init usb_remote_init(void)
++{
++ int i;
++
++ INIT_LIST_HEAD(&remote_list);
++
++ printk(KERN_INFO "\n" DRIVER_NAME ": " DRIVER_DESC " "
++ DRIVER_VERSION "\n");
++ printk(DRIVER_NAME ": " DRIVER_AUTHOR "\n");
++ dprintk(DRIVER_NAME ": debug mode enabled: "
++ "$Id: lirc_xbox.c,v 1.88 2011/06/05 11:11:11 jmartin Exp $\n");
++
++ repeat_jiffies = repeat*HZ/100;
++
++ i = usb_register(&usb_remote_driver);
++ if (i) {
++ printk(DRIVER_NAME ": usb register failed, result = %d\n", i);
++ return -ENODEV;
++ }
++
++ return 0;
++}
++
++static void __exit usb_remote_exit(void)
++{
++ usb_deregister(&usb_remote_driver);
++}
++
++module_init(usb_remote_init);
++module_exit(usb_remote_exit);
++
++MODULE_DESCRIPTION(DRIVER_DESC);
++MODULE_AUTHOR(DRIVER_AUTHOR);
++MODULE_LICENSE("GPL");
++MODULE_DEVICE_TABLE(usb, usb_remote_table);
++
++module_param(debug, bool, S_IRUGO | S_IWUSR);
++MODULE_PARM_DESC(debug, "Debug enabled or not (default: 0)");
++
++module_param(mask, int, S_IRUGO | S_IWUSR);
++MODULE_PARM_DESC(mask, "Set channel acceptance bit mask (default: 0xFFFF)");
++
++module_param(unique, bool, S_IRUGO | S_IWUSR);
++MODULE_PARM_DESC(unique, "Enable channel-specific codes (default: 0)");
++
++module_param(repeat, int, S_IRUGO | S_IWUSR);
++MODULE_PARM_DESC(repeat, "Repeat timeout (1/100 sec) (default: 10)");
+diff -Nur linux-3.14.36/drivers/staging/media/lirc/Makefile linux-openelec/drivers/staging/media/lirc/Makefile
+--- linux-3.14.36/drivers/staging/media/lirc/Makefile 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/staging/media/lirc/Makefile 2015-07-24 18:03:29.972842002 -0500
+@@ -10,4 +10,5 @@
+ obj-$(CONFIG_LIRC_SASEM) += lirc_sasem.o
+ obj-$(CONFIG_LIRC_SERIAL) += lirc_serial.o
+ obj-$(CONFIG_LIRC_SIR) += lirc_sir.o
++obj-$(CONFIG_LIRC_XBOX) += lirc_xbox.o
+ obj-$(CONFIG_LIRC_ZILOG) += lirc_zilog.o
+diff -Nur linux-3.14.36/drivers/staging/octeon/ethernet-rgmii.c linux-openelec/drivers/staging/octeon/ethernet-rgmii.c
+--- linux-3.14.36/drivers/staging/octeon/ethernet-rgmii.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/staging/octeon/ethernet-rgmii.c 2015-05-06 12:05:42.000000000 -0500
+@@ -166,9 +166,8 @@
+
+ if (use_global_register_lock)
+ spin_unlock_irqrestore(&global_register_lock, flags);
+- else {
++ else
+ mutex_unlock(&priv->phydev->bus->mdio_lock);
+- }
+
+ if (priv->phydev == NULL) {
+ /* Tell core. */
+diff -Nur linux-3.14.36/drivers/staging/rtl8712/usb_intf.c linux-openelec/drivers/staging/rtl8712/usb_intf.c
+--- linux-3.14.36/drivers/staging/rtl8712/usb_intf.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/staging/rtl8712/usb_intf.c 2015-07-24 18:03:30.336842002 -0500
+@@ -92,6 +92,7 @@
+ {USB_DEVICE(0x0DF6, 0x005B)},
+ {USB_DEVICE(0x0DF6, 0x005D)},
+ {USB_DEVICE(0x0DF6, 0x0063)},
++ {USB_DEVICE(0x0DF6, 0x006C)},
+ /* Sweex */
+ {USB_DEVICE(0x177F, 0x0154)},
+ /* Thinkware */
+diff -Nur linux-3.14.36/drivers/staging/rtl8821ae/core.c linux-openelec/drivers/staging/rtl8821ae/core.c
+--- linux-3.14.36/drivers/staging/rtl8821ae/core.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/staging/rtl8821ae/core.c 2015-05-06 12:05:42.000000000 -0500
+@@ -1414,23 +1414,15 @@
+ * before switch channle or power save, or tx buffer packet
+ * maybe send after offchannel or rf sleep, this may cause
+ * dis-association by AP */
+-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0))
+-static void rtl_op_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
++static void rtl_op_flush(struct ieee80211_hw *hw,
++ struct ieee80211_vif *vif,
++ u32 queues, bool drop)
+ {
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ if (rtlpriv->intf_ops->flush)
+ rtlpriv->intf_ops->flush(hw, queues, drop);
+ }
+-#else
+-static void rtl_op_flush(struct ieee80211_hw *hw, bool drop)
+-{
+- struct rtl_priv *rtlpriv = rtl_priv(hw);
+-
+- if (rtlpriv->intf_ops->flush)
+- rtlpriv->intf_ops->flush(hw, drop);
+-}
+-#endif
+
+ const struct ieee80211_ops rtl_ops = {
+ .start = rtl_op_start,
+diff -Nur linux-3.14.36/drivers/thermal/device_cooling.c linux-openelec/drivers/thermal/device_cooling.c
+--- linux-3.14.36/drivers/thermal/device_cooling.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/thermal/device_cooling.c 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,157 @@
++/*
++ * Copyright (C) 2013 Freescale Semiconductor, Inc.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ */
++
++#include <linux/module.h>
++#include <linux/thermal.h>
++#include <linux/err.h>
++#include <linux/slab.h>
++
++struct devfreq_cooling_device {
++ int id;
++ struct thermal_cooling_device *cool_dev;
++ unsigned int devfreq_state;
++ unsigned int max_state;
++};
++
++static DEFINE_IDR(devfreq_idr);
++static DEFINE_MUTEX(devfreq_cooling_lock);
++
++static BLOCKING_NOTIFIER_HEAD(devfreq_cooling_chain_head);
++
++int register_devfreq_cooling_notifier(struct notifier_block *nb)
++{
++ return blocking_notifier_chain_register(
++ &devfreq_cooling_chain_head, nb);
++}
++EXPORT_SYMBOL_GPL(register_devfreq_cooling_notifier);
++
++int unregister_devfreq_cooling_notifier(struct notifier_block *nb)
++{
++ return blocking_notifier_chain_unregister(
++ &devfreq_cooling_chain_head, nb);
++}
++EXPORT_SYMBOL_GPL(unregister_devfreq_cooling_notifier);
++
++static int devfreq_cooling_notifier_call_chain(unsigned long val)
++{
++ return (blocking_notifier_call_chain(
++ &devfreq_cooling_chain_head, val, NULL)
++ == NOTIFY_BAD) ? -EINVAL : 0;
++}
++
++static int devfreq_set_cur_state(struct thermal_cooling_device *cdev,
++ unsigned long state)
++{
++ struct devfreq_cooling_device *devfreq_device = cdev->devdata;
++ int ret;
++ unsigned long notify_state;
++
++ if (state >= devfreq_device->max_state)
++ notify_state = 5;
++ else
++ notify_state = state;
++ ret = devfreq_cooling_notifier_call_chain(notify_state);
++ if (ret)
++ return -EINVAL;
++ devfreq_device->devfreq_state = state;
++
++ return 0;
++}
++
++static int devfreq_get_max_state(struct thermal_cooling_device *cdev,
++ unsigned long *state)
++{
++ struct devfreq_cooling_device *devfreq_device = cdev->devdata;
++ *state = devfreq_device->max_state;
++
++ return 0;
++}
++
++static int devfreq_get_cur_state(struct thermal_cooling_device *cdev,
++ unsigned long *state)
++{
++ struct devfreq_cooling_device *devfreq_device = cdev->devdata;
++
++ *state = devfreq_device->devfreq_state;
++
++ return 0;
++}
++
++static struct thermal_cooling_device_ops const devfreq_cooling_ops = {
++ .get_max_state = devfreq_get_max_state,
++ .get_cur_state = devfreq_get_cur_state,
++ .set_cur_state = devfreq_set_cur_state,
++};
++
++static int get_idr(struct idr *idr, int *id)
++{
++ int ret;
++
++ mutex_lock(&devfreq_cooling_lock);
++ ret = idr_alloc(idr, NULL, 0, 0, GFP_KERNEL);
++ mutex_unlock(&devfreq_cooling_lock);
++ if (unlikely(ret < 0))
++ return ret;
++ *id = ret;
++
++ return 0;
++}
++
++static void release_idr(struct idr *idr, int id)
++{
++ mutex_lock(&devfreq_cooling_lock);
++ idr_remove(idr, id);
++ mutex_unlock(&devfreq_cooling_lock);
++}
++
++struct thermal_cooling_device *devfreq_cooling_register(unsigned long max_state)
++{
++ struct thermal_cooling_device *cool_dev;
++ struct devfreq_cooling_device *devfreq_dev = NULL;
++ char dev_name[THERMAL_NAME_LENGTH];
++ int ret = 0;
++
++ devfreq_dev = kzalloc(sizeof(struct devfreq_cooling_device),
++ GFP_KERNEL);
++ if (!devfreq_dev)
++ return ERR_PTR(-ENOMEM);
++
++ ret = get_idr(&devfreq_idr, &devfreq_dev->id);
++ if (ret) {
++ kfree(devfreq_dev);
++ return ERR_PTR(-EINVAL);
++ }
++
++ snprintf(dev_name, sizeof(dev_name), "thermal-devfreq-%d",
++ devfreq_dev->id);
++
++ cool_dev = thermal_cooling_device_register(dev_name, devfreq_dev,
++ &devfreq_cooling_ops);
++ if (!cool_dev) {
++ release_idr(&devfreq_idr, devfreq_dev->id);
++ kfree(devfreq_dev);
++ return ERR_PTR(-EINVAL);
++ }
++ devfreq_dev->cool_dev = cool_dev;
++ devfreq_dev->devfreq_state = 0;
++ devfreq_dev->max_state = max_state;
++
++ return cool_dev;
++}
++EXPORT_SYMBOL_GPL(devfreq_cooling_register);
++
++void devfreq_cooling_unregister(struct thermal_cooling_device *cdev)
++{
++ struct devfreq_cooling_device *devfreq_dev = cdev->devdata;
++
++ thermal_cooling_device_unregister(devfreq_dev->cool_dev);
++ release_idr(&devfreq_idr, devfreq_dev->id);
++ kfree(devfreq_dev);
++}
++EXPORT_SYMBOL_GPL(devfreq_cooling_unregister);
+diff -Nur linux-3.14.36/drivers/thermal/fair_share.c linux-openelec/drivers/thermal/fair_share.c
+--- linux-3.14.36/drivers/thermal/fair_share.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/thermal/fair_share.c 2015-05-06 12:05:42.000000000 -0500
+@@ -23,6 +23,7 @@
+ */
+
+ #include <linux/thermal.h>
++#include <trace/events/thermal.h>
+
+ #include "thermal_core.h"
+
+@@ -34,6 +35,7 @@
+ {
+ int count = 0;
+ unsigned long trip_temp;
++ enum thermal_trip_type trip_type;
+
+ if (tz->trips == 0 || !tz->ops->get_trip_temp)
+ return 0;
+@@ -43,6 +45,16 @@
+ if (tz->temperature < trip_temp)
+ break;
+ }
++
++ /*
++ * count > 0 only if temperature is greater than first trip
++ * point, in which case, trip_point = count - 1
++ */
++ if (count > 0) {
++ tz->ops->get_trip_type(tz, count - 1, &trip_type);
++ trace_thermal_zone_trip(tz, count - 1, trip_type);
++ }
++
+ return count;
+ }
+
+diff -Nur linux-3.14.36/drivers/thermal/imx_thermal.c linux-openelec/drivers/thermal/imx_thermal.c
+--- linux-3.14.36/drivers/thermal/imx_thermal.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/thermal/imx_thermal.c 2015-05-06 12:05:42.000000000 -0500
+@@ -12,6 +12,8 @@
+ #include <linux/cpufreq.h>
+ #include <linux/delay.h>
+ #include <linux/device.h>
++#include <linux/device_cooling.h>
++#include <linux/fsl_otp.h>
+ #include <linux/init.h>
+ #include <linux/interrupt.h>
+ #include <linux/io.h>
+@@ -46,30 +48,39 @@
+
+ #define OCOTP_ANA1 0x04e0
+
+-/* The driver supports 1 passive trip point and 1 critical trip point */
+-enum imx_thermal_trip {
+- IMX_TRIP_PASSIVE,
+- IMX_TRIP_CRITICAL,
+- IMX_TRIP_NUM,
+-};
++#define OCOTP_TEMP_GRADE 0x480
++#define OCOTP_TEMP_GRADE_SHIFT 5
++#define OCOTP_TEMP_GRADE_AUT 0x3
++#define OCOTP_TEMP_GRADE_IND 0x2
++#define OCOTP_TEMP_GRADE_EXT 0x1
++#define OCOTP_TEMP_GRADE_COM 0x0
+
+ /*
+ * It defines the temperature in millicelsius for passive trip point
+ * that will trigger cooling action when crossed.
+ */
+-#define IMX_TEMP_PASSIVE 85000
++#define IMX_TEMP_MAX_PASSIVE 85000
++#define IMX_TEMP_MIN_TRIP_DELTA 6000
++
++#define IMX_POLLING_DELAY 3000 /* millisecond */
++#define IMX_PASSIVE_DELAY 2000
++
++#define FACTOR0 10000000
++#define FACTOR1 15976
++#define FACTOR2 4297157
+
+-#define IMX_POLLING_DELAY 2000 /* millisecond */
+-#define IMX_PASSIVE_DELAY 1000
++#define IMX_TRIP_PASSIVE 0
+
+ struct imx_thermal_data {
+ struct thermal_zone_device *tz;
+- struct thermal_cooling_device *cdev;
++ struct thermal_cooling_device *cdev[2];
+ enum thermal_device_mode mode;
+ struct regmap *tempmon;
+- int c1, c2; /* See formula in imx_get_sensor_data() */
++ u32 c1, c2; /* See formula in imx_get_sensor_data() */
+ unsigned long temp_passive;
+ unsigned long temp_critical;
++ unsigned long num_passive_trips;
++ unsigned long temp_zone_delta;
+ unsigned long alarm_temp;
+ unsigned long last_temp;
+ bool irq_enabled;
+@@ -84,7 +95,7 @@
+ int alarm_value;
+
+ data->alarm_temp = alarm_temp;
+- alarm_value = (alarm_temp - data->c2) / data->c1;
++ alarm_value = (data->c2 - alarm_temp) / data->c1;
+ regmap_write(map, TEMPSENSE0 + REG_CLR, TEMPSENSE0_ALARM_VALUE_MASK);
+ regmap_write(map, TEMPSENSE0 + REG_SET, alarm_value <<
+ TEMPSENSE0_ALARM_VALUE_SHIFT);
+@@ -95,6 +106,7 @@
+ struct imx_thermal_data *data = tz->devdata;
+ struct regmap *map = data->tempmon;
+ unsigned int n_meas;
++ unsigned long cur_state;
+ bool wait;
+ u32 val;
+
+@@ -136,12 +148,23 @@
+ n_meas = (val & TEMPSENSE0_TEMP_CNT_MASK) >> TEMPSENSE0_TEMP_CNT_SHIFT;
+
+ /* See imx_get_sensor_data() for formula derivation */
+- *temp = data->c2 + data->c1 * n_meas;
++ *temp = data->c2 - n_meas * data->c1;
++
++ data->cdev[0]->ops->get_cur_state(data->cdev[0], &cur_state);
+
+ /* Update alarm value to next higher trip point */
+- if (data->alarm_temp == data->temp_passive && *temp >= data->temp_passive)
++ if ((data->temp_passive < data->alarm_temp) &&
++ (data->alarm_temp < data->temp_critical) &&
++ (cur_state < data->num_passive_trips)) {
++ imx_set_alarm_temp(data, data->temp_passive + ((cur_state + 1) * data->temp_zone_delta));
++ dev_dbg(&tz->device, "thermal alarm on: T < %lu\n",
++ data->alarm_temp / 1000);
++ }
++
++ if (data->alarm_temp < data->temp_critical && *temp >= data->temp_passive + (data->num_passive_trips * data->temp_zone_delta))
+ imx_set_alarm_temp(data, data->temp_critical);
+- if (data->alarm_temp == data->temp_critical && *temp < data->temp_passive) {
++
++ if (data->alarm_temp > data->temp_passive && *temp < data->temp_passive) {
+ imx_set_alarm_temp(data, data->temp_passive);
+ dev_dbg(&tz->device, "thermal alarm off: T < %lu\n",
+ data->alarm_temp / 1000);
+@@ -210,7 +233,8 @@
+ static int imx_get_trip_type(struct thermal_zone_device *tz, int trip,
+ enum thermal_trip_type *type)
+ {
+- *type = (trip == IMX_TRIP_PASSIVE) ? THERMAL_TRIP_PASSIVE :
++ struct imx_thermal_data *data = tz->devdata;
++ *type = (trip < data->num_passive_trips) ? THERMAL_TRIP_PASSIVE :
+ THERMAL_TRIP_CRITICAL;
+ return 0;
+ }
+@@ -229,8 +253,9 @@
+ {
+ struct imx_thermal_data *data = tz->devdata;
+
+- *temp = (trip == IMX_TRIP_PASSIVE) ? data->temp_passive :
+- data->temp_critical;
++ *temp = (trip < data->num_passive_trips) ?
++ data->temp_passive + (trip * data->temp_zone_delta) :
++ data->temp_critical;
+ return 0;
+ }
+
+@@ -239,13 +264,14 @@
+ {
+ struct imx_thermal_data *data = tz->devdata;
+
+- if (trip == IMX_TRIP_CRITICAL)
++ if (trip > IMX_TRIP_PASSIVE)
+ return -EPERM;
+
+- if (temp > IMX_TEMP_PASSIVE)
++ if (trip == IMX_TRIP_PASSIVE && temp > IMX_TEMP_MAX_PASSIVE)
+ return -EINVAL;
+
+ data->temp_passive = temp;
++ data->temp_zone_delta = (data->temp_critical - data->temp_passive) / data->num_passive_trips;
+
+ imx_set_alarm_temp(data, temp);
+
+@@ -286,6 +312,37 @@
+ return 0;
+ }
+
++ int imx_get_trend(struct thermal_zone_device *tz,
++ int trip, enum thermal_trend *trend)
++{
++ struct imx_thermal_data *data = tz->devdata;
++ int ret;
++ unsigned long trip_temp, cur_state;
++
++ ret = imx_get_trip_temp(tz, trip, &trip_temp);
++ if (ret < 0)
++ return ret;
++
++ data->cdev[0]->ops->get_cur_state(data->cdev[0], &cur_state);
++
++ if (tz->temperature > tz->last_temperature &&
++ tz->temperature > (data->temp_passive + (cur_state * data->temp_zone_delta))) {
++ *trend = THERMAL_TREND_RAISING;
++ } else if (tz->temperature < tz->last_temperature && cur_state) {
++ if (tz->temperature <= (data->temp_passive - data->temp_zone_delta))
++ *trend = THERMAL_TREND_DROP_FULL;
++ else if (tz->temperature <= (data->temp_passive +
++ ((cur_state - 1) * data->temp_zone_delta)))
++ *trend = THERMAL_TREND_DROPPING;
++ else
++ *trend = THERMAL_TREND_STABLE;
++ } else {
++ *trend = THERMAL_TREND_STABLE;
++ }
++
++ return 0;
++}
++
+ static struct thermal_zone_device_ops imx_tz_ops = {
+ .bind = imx_bind,
+ .unbind = imx_unbind,
+@@ -295,6 +352,7 @@
+ .get_trip_type = imx_get_trip_type,
+ .get_trip_temp = imx_get_trip_temp,
+ .get_crit_temp = imx_get_crit_temp,
++ .get_trend = imx_get_trend,
+ .set_trip_temp = imx_set_trip_temp,
+ };
+
+@@ -302,9 +360,10 @@
+ {
+ struct imx_thermal_data *data = platform_get_drvdata(pdev);
+ struct regmap *map;
+- int t1, t2, n1, n2;
++ int t1, n1;
+ int ret;
+ u32 val;
++ u64 temp64;
+
+ map = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+ "fsl,tempmon-data");
+@@ -328,43 +387,83 @@
+ /*
+ * Sensor data layout:
+ * [31:20] - sensor value @ 25C
+- * [19:8] - sensor value of hot
+- * [7:0] - hot temperature value
++ * Use universal formula now and only need sensor value @ 25C
++ * slope = 0.4297157 - (0.0015976 * 25C fuse)
+ */
+ n1 = val >> 20;
+- n2 = (val & 0xfff00) >> 8;
+- t2 = val & 0xff;
+ t1 = 25; /* t1 always 25C */
+
+ /*
+- * Derived from linear interpolation,
+- * Tmeas = T2 + (Nmeas - N2) * (T1 - T2) / (N1 - N2)
++ * Derived from linear interpolation:
++ * slope = 0.4297157 - (0.0015976 * 25C fuse)
++ * slope = (FACTOR2 - FACTOR1 * n1) / FACTOR0
++ * (Nmeas - n1) / (Tmeas - t1) = slope
+ * We want to reduce this down to the minimum computation necessary
+ * for each temperature read. Also, we want Tmeas in millicelsius
+ * and we don't want to lose precision from integer division. So...
+- * milli_Tmeas = 1000 * T2 + 1000 * (Nmeas - N2) * (T1 - T2) / (N1 - N2)
+- * Let constant c1 = 1000 * (T1 - T2) / (N1 - N2)
+- * milli_Tmeas = (1000 * T2) + c1 * (Nmeas - N2)
+- * milli_Tmeas = (1000 * T2) + (c1 * Nmeas) - (c1 * N2)
+- * Let constant c2 = (1000 * T2) - (c1 * N2)
+- * milli_Tmeas = c2 + (c1 * Nmeas)
++ * Tmeas = (Nmeas - n1) / slope + t1
++ * milli_Tmeas = 1000 * (Nmeas - n1) / slope + 1000 * t1
++ * milli_Tmeas = -1000 * (n1 - Nmeas) / slope + 1000 * t1
++ * Let constant c1 = (-1000 / slope)
++ * milli_Tmeas = (n1 - Nmeas) * c1 + 1000 * t1
++ * Let constant c2 = n1 *c1 + 1000 * t1
++ * milli_Tmeas = c2 - Nmeas * c1
+ */
+- data->c1 = 1000 * (t1 - t2) / (n1 - n2);
+- data->c2 = 1000 * t2 - data->c1 * n2;
++ temp64 = FACTOR0;
++ temp64 *= 1000;
++ do_div(temp64, FACTOR1 * n1 - FACTOR2);
++ data->c1 = temp64;
++ data->c2 = n1 * data->c1 + 1000 * t1;
+
+- /*
+- * Set the default passive cooling trip point to 20 °C below the
+- * maximum die temperature. Can be changed from userspace.
+- */
+- data->temp_passive = 1000 * (t2 - 20);
++ return 0;
++}
+
+- /*
+- * The maximum die temperature is t2, let's give 5 °C cushion
+- * for noise and possible temperature rise between measurements.
+- */
+- data->temp_critical = 1000 * (t2 - 5);
++static void imx_set_thermal_defaults(struct imx_thermal_data *data)
++{
++ int ret;
++ u32 val;
+
+- return 0;
++ ret = fsl_otp_readl(OCOTP_TEMP_GRADE, &val);
++
++ if (ret) {
++ /*
++ * Set the default passive cooling trip point,
++ * can be changed from userspace.
++ */
++ data->temp_passive = IMX_TEMP_MAX_PASSIVE;
++
++ /*
++ * The maximum die temperature set to 20 C higher than
++ * IMX_TEMP_MAX_PASSIVE.
++ */
++ data->temp_critical = 1000 * 20 + data->temp_passive;
++ data->temp_zone_delta = (data->temp_critical - data->temp_passive) / data->num_passive_trips;
++ } else {
++ val >>= OCOTP_TEMP_GRADE_SHIFT;
++ val &= 0x3;
++
++ switch (val) {
++ case OCOTP_TEMP_GRADE_AUT:
++ data->temp_critical = 125000;
++ break;
++ case OCOTP_TEMP_GRADE_IND:
++ case OCOTP_TEMP_GRADE_EXT:
++ data->temp_critical = 105000;
++ break;
++ case OCOTP_TEMP_GRADE_COM:
++ default:
++ data->temp_critical = 95000;
++ break;
++ }
++ data->temp_passive = data->temp_critical - (IMX_TEMP_MIN_TRIP_DELTA * data->num_passive_trips);
++ data->temp_zone_delta = IMX_TEMP_MIN_TRIP_DELTA;
++ }
++
++ pr_debug("THERMAL DEFAULTS: passive: %lu \
++ critical %lu trip_points: %lu \
++ zone_delta: %lu\n",
++ data->temp_passive, data->temp_critical,
++ data->num_passive_trips, data->temp_zone_delta);
+ }
+
+ static irqreturn_t imx_thermal_alarm_irq(int irq, void *dev)
+@@ -397,6 +496,10 @@
+ int measure_freq;
+ int ret;
+
++ if (!cpufreq_get_current_driver()) {
++ dev_dbg(&pdev->dev, "no cpufreq driver!");
++ return -EPROBE_DEFER;
++ }
+ data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+@@ -421,6 +524,24 @@
+ return ret;
+ }
+
++ data->irq_enabled = true;
++
++ data->thermal_clk = devm_clk_get(&pdev->dev, NULL);
++ if (IS_ERR(data->thermal_clk)) {
++ dev_warn(&pdev->dev, "failed to get thermal clk!\n");
++ } else {
++ /*
++ * Thermal sensor needs clk on to get correct value, normally
++ * we should enable its clk before taking measurement and disable
++ * clk after measurement is done, but if alarm function is enabled,
++ * hardware will auto measure the temperature periodically, so we
++ * need to keep the clk always on for alarm function.
++ */
++ ret = clk_prepare_enable(data->thermal_clk);
++ if (ret)
++ dev_warn(&pdev->dev, "failed to enable thermal clk: %d\n", ret);
++ }
++
+ platform_set_drvdata(pdev, data);
+
+ ret = imx_get_sensor_data(pdev);
+@@ -437,16 +558,28 @@
+ regmap_write(map, TEMPSENSE0 + REG_SET, TEMPSENSE0_POWER_DOWN);
+
+ cpumask_set_cpu(0, &clip_cpus);
+- data->cdev = cpufreq_cooling_register(&clip_cpus);
+- if (IS_ERR(data->cdev)) {
+- ret = PTR_ERR(data->cdev);
++ data->cdev[0] = cpufreq_cooling_register(&clip_cpus);
++ if (IS_ERR(data->cdev[0])) {
++ ret = PTR_ERR(data->cdev[0]);
+ dev_err(&pdev->dev,
+ "failed to register cpufreq cooling device: %d\n", ret);
+ return ret;
+ }
+
++ data->cdev[0]->ops->get_max_state(data->cdev[0], &data->num_passive_trips);
++
++ data->cdev[1] = devfreq_cooling_register(data->num_passive_trips + 1);
++ if (IS_ERR(data->cdev[1])) {
++ ret = PTR_ERR(data->cdev[1]);
++ dev_err(&pdev->dev,
++ "failed to register devfreq cooling device: %d\n", ret);
++ return ret;
++ }
++
++ imx_set_thermal_defaults(data);
++
+ data->tz = thermal_zone_device_register("imx_thermal_zone",
+- IMX_TRIP_NUM,
++ data->num_passive_trips + 1,
+ BIT(IMX_TRIP_PASSIVE), data,
+ &imx_tz_ops, NULL,
+ IMX_PASSIVE_DELAY,
+@@ -455,26 +588,11 @@
+ ret = PTR_ERR(data->tz);
+ dev_err(&pdev->dev,
+ "failed to register thermal zone device %d\n", ret);
+- cpufreq_cooling_unregister(data->cdev);
++ cpufreq_cooling_unregister(data->cdev[0]);
++ devfreq_cooling_unregister(data->cdev[1]);
+ return ret;
+ }
+
+- data->thermal_clk = devm_clk_get(&pdev->dev, NULL);
+- if (IS_ERR(data->thermal_clk)) {
+- dev_warn(&pdev->dev, "failed to get thermal clk!\n");
+- } else {
+- /*
+- * Thermal sensor needs clk on to get correct value, normally
+- * we should enable its clk before taking measurement and disable
+- * clk after measurement is done, but if alarm function is enabled,
+- * hardware will auto measure the temperature periodically, so we
+- * need to keep the clk always on for alarm function.
+- */
+- ret = clk_prepare_enable(data->thermal_clk);
+- if (ret)
+- dev_warn(&pdev->dev, "failed to enable thermal clk: %d\n", ret);
+- }
+-
+ /* Enable measurements at ~ 10 Hz */
+ regmap_write(map, TEMPSENSE1 + REG_CLR, TEMPSENSE1_MEASURE_FREQ);
+ measure_freq = DIV_ROUND_UP(32768, 10); /* 10 Hz */
+@@ -483,7 +601,6 @@
+ regmap_write(map, TEMPSENSE0 + REG_CLR, TEMPSENSE0_POWER_DOWN);
+ regmap_write(map, TEMPSENSE0 + REG_SET, TEMPSENSE0_MEASURE_TEMP);
+
+- data->irq_enabled = true;
+ data->mode = THERMAL_DEVICE_ENABLED;
+
+ return 0;
+@@ -500,7 +617,8 @@
+ clk_disable_unprepare(data->thermal_clk);
+
+ thermal_zone_device_unregister(data->tz);
+- cpufreq_cooling_unregister(data->cdev);
++ cpufreq_cooling_unregister(data->cdev[0]);
++ devfreq_cooling_unregister(data->cdev[1]);
+
+ return 0;
+ }
+diff -Nur linux-3.14.36/drivers/thermal/Kconfig linux-openelec/drivers/thermal/Kconfig
+--- linux-3.14.36/drivers/thermal/Kconfig 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/thermal/Kconfig 2015-05-06 12:05:42.000000000 -0500
+@@ -125,6 +125,13 @@
+ cpufreq is used as the cooling device to throttle CPUs when the
+ passive trip is crossed.
+
++config DEVICE_THERMAL
++ tristate "generic device cooling support"
++ help
++ Support for device cooling.
++ It supports notification of crossing passive trip for devices,
++ devices need to do their own actions to cool down the SOC.
++
+ config SPEAR_THERMAL
+ bool "SPEAr thermal sensor driver"
+ depends on PLAT_SPEAR
+diff -Nur linux-3.14.36/drivers/thermal/Makefile linux-openelec/drivers/thermal/Makefile
+--- linux-3.14.36/drivers/thermal/Makefile 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/thermal/Makefile 2015-05-06 12:05:42.000000000 -0500
+@@ -26,6 +26,7 @@
+ obj-$(CONFIG_DB8500_THERMAL) += db8500_thermal.o
+ obj-$(CONFIG_ARMADA_THERMAL) += armada_thermal.o
+ obj-$(CONFIG_IMX_THERMAL) += imx_thermal.o
++obj-$(CONFIG_DEVICE_THERMAL) += device_cooling.o
+ obj-$(CONFIG_DB8500_CPUFREQ_COOLING) += db8500_cpufreq_cooling.o
+ obj-$(CONFIG_INTEL_POWERCLAMP) += intel_powerclamp.o
+ obj-$(CONFIG_X86_PKG_TEMP_THERMAL) += x86_pkg_temp_thermal.o
+diff -Nur linux-3.14.36/drivers/thermal/of-thermal.c linux-openelec/drivers/thermal/of-thermal.c
+--- linux-3.14.36/drivers/thermal/of-thermal.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/thermal/of-thermal.c 2015-05-06 12:05:42.000000000 -0500
+@@ -156,8 +156,8 @@
+
+ ret = thermal_zone_bind_cooling_device(thermal,
+ tbp->trip_id, cdev,
+- tbp->min,
+- tbp->max);
++ tbp->max,
++ tbp->min);
+ if (ret)
+ return ret;
+ }
+@@ -712,11 +712,12 @@
+ }
+
+ i = 0;
+- for_each_child_of_node(child, gchild)
++ for_each_child_of_node(child, gchild) {
+ ret = thermal_of_populate_bind_params(gchild, &tz->tbps[i++],
+ tz->trips, tz->ntrips);
+ if (ret)
+ goto free_tbps;
++ }
+
+ finish:
+ of_node_put(child);
+diff -Nur linux-3.14.36/drivers/thermal/step_wise.c linux-openelec/drivers/thermal/step_wise.c
+--- linux-3.14.36/drivers/thermal/step_wise.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/thermal/step_wise.c 2015-05-06 12:05:42.000000000 -0500
+@@ -23,6 +23,7 @@
+ */
+
+ #include <linux/thermal.h>
++#include <trace/events/thermal.h>
+
+ #include "thermal_core.h"
+
+@@ -70,10 +71,12 @@
+ if (next_target < instance->lower)
+ next_target = instance->lower;
+ }
++ dev_dbg(&cdev->device, "THERMAL_TREND_RAISING: next_target=%ld\n", next_target);
+ break;
+ case THERMAL_TREND_RAISE_FULL:
+ if (throttle)
+ next_target = instance->upper;
++ dev_dbg(&cdev->device, "THERMAL_TREND_RAISE_FULL: next_target=%ld\n", next_target);
+ break;
+ case THERMAL_TREND_DROPPING:
+ if (cur_state == instance->lower) {
+@@ -84,6 +87,7 @@
+ if (next_target > instance->upper)
+ next_target = instance->upper;
+ }
++ dev_dbg(&cdev->device, "THERMAL_TREND_DROPPING: next_target=%ld\n", next_target);
+ break;
+ case THERMAL_TREND_DROP_FULL:
+ if (cur_state == instance->lower) {
+@@ -91,6 +95,7 @@
+ next_target = THERMAL_NO_TARGET;
+ } else
+ next_target = instance->lower;
++ dev_dbg(&cdev->device, "THERMAL_TREND_DROP_FULL: next_target=%ld\n", next_target);
+ break;
+ default:
+ break;
+@@ -117,7 +122,7 @@
+ enum thermal_trend trend;
+ struct thermal_instance *instance;
+ bool throttle = false;
+- int old_target;
++ unsigned long old_target;
+
+ if (trip == THERMAL_TRIPS_NONE) {
+ trip_temp = tz->forced_passive;
+@@ -129,8 +134,10 @@
+
+ trend = get_tz_trend(tz, trip);
+
+- if (tz->temperature >= trip_temp)
++ if (tz->temperature >= trip_temp) {
+ throttle = true;
++ trace_thermal_zone_trip(tz, trip, trip_type);
++ }
+
+ dev_dbg(&tz->device, "Trip%d[type=%d,temp=%ld]:trend=%d,throttle=%d\n",
+ trip, trip_type, trip_temp, trend, throttle);
+@@ -143,8 +150,8 @@
+
+ old_target = instance->target;
+ instance->target = get_target_state(instance, trend, throttle);
+- dev_dbg(&instance->cdev->device, "old_target=%d, target=%d\n",
+- old_target, (int)instance->target);
++ dev_dbg(&instance->cdev->device, "old_target=%ld, target=%ld\n",
++ old_target, instance->target);
+
+ if (old_target == instance->target)
+ continue;
+diff -Nur linux-3.14.36/drivers/thermal/thermal_core.c linux-openelec/drivers/thermal/thermal_core.c
+--- linux-3.14.36/drivers/thermal/thermal_core.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/thermal/thermal_core.c 2015-07-24 18:03:29.268842002 -0500
+@@ -38,6 +38,9 @@
+ #include <net/netlink.h>
+ #include <net/genetlink.h>
+
++#define CREATE_TRACE_POINTS
++#include <trace/events/thermal.h>
++
+ #include "thermal_core.h"
+ #include "thermal_hwmon.h"
+
+@@ -368,6 +371,8 @@
+ if (tz->temperature < trip_temp)
+ return;
+
++ trace_thermal_zone_trip(tz, trip, trip_type);
++
+ if (tz->ops->notify)
+ tz->ops->notify(tz, trip, trip_type);
+
+@@ -463,6 +468,7 @@
+ tz->temperature = temp;
+ mutex_unlock(&tz->lock);
+
++ trace_thermal_temperature(tz);
+ dev_dbg(&tz->device, "last_temperature=%d, current_temperature=%d\n",
+ tz->last_temperature, tz->temperature);
+ }
+@@ -1287,6 +1293,7 @@
+ mutex_unlock(&cdev->lock);
+ cdev->ops->set_cur_state(cdev, target);
+ cdev->updated = true;
++ trace_cdev_update(cdev, target);
+ dev_dbg(&cdev->device, "set to state %lu\n", target);
+ }
+ EXPORT_SYMBOL(thermal_cdev_update);
+@@ -1568,8 +1575,7 @@
+
+ thermal_zone_device_update(tz);
+
+- if (!result)
+- return tz;
++ return tz;
+
+ unregister:
+ release_idr(&thermal_tz_idr, &thermal_idr_lock, tz->id);
+diff -Nur linux-3.14.36/drivers/thermal/thermal_core.c.orig linux-openelec/drivers/thermal/thermal_core.c.orig
+--- linux-3.14.36/drivers/thermal/thermal_core.c.orig 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/thermal/thermal_core.c.orig 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,1860 @@
++/*
++ * thermal.c - Generic Thermal Management Sysfs support.
++ *
++ * Copyright (C) 2008 Intel Corp
++ * Copyright (C) 2008 Zhang Rui <rui.zhang@intel.com>
++ * Copyright (C) 2008 Sujith Thomas <sujith.thomas@intel.com>
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ */
++
++#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
++
++#include <linux/module.h>
++#include <linux/device.h>
++#include <linux/err.h>
++#include <linux/slab.h>
++#include <linux/kdev_t.h>
++#include <linux/idr.h>
++#include <linux/thermal.h>
++#include <linux/reboot.h>
++#include <linux/string.h>
++#include <linux/of.h>
++#include <net/netlink.h>
++#include <net/genetlink.h>
++
++#define CREATE_TRACE_POINTS
++#include <trace/events/thermal.h>
++
++#include "thermal_core.h"
++#include "thermal_hwmon.h"
++
++MODULE_AUTHOR("Zhang Rui");
++MODULE_DESCRIPTION("Generic thermal management sysfs support");
++MODULE_LICENSE("GPL v2");
++
++static DEFINE_IDR(thermal_tz_idr);
++static DEFINE_IDR(thermal_cdev_idr);
++static DEFINE_MUTEX(thermal_idr_lock);
++
++static LIST_HEAD(thermal_tz_list);
++static LIST_HEAD(thermal_cdev_list);
++static LIST_HEAD(thermal_governor_list);
++
++static DEFINE_MUTEX(thermal_list_lock);
++static DEFINE_MUTEX(thermal_governor_lock);
++
++static struct thermal_governor *def_governor;
++
++static struct thermal_governor *__find_governor(const char *name)
++{
++ struct thermal_governor *pos;
++
++ if (!name || !name[0])
++ return def_governor;
++
++ list_for_each_entry(pos, &thermal_governor_list, governor_list)
++ if (!strnicmp(name, pos->name, THERMAL_NAME_LENGTH))
++ return pos;
++
++ return NULL;
++}
++
++int thermal_register_governor(struct thermal_governor *governor)
++{
++ int err;
++ const char *name;
++ struct thermal_zone_device *pos;
++
++ if (!governor)
++ return -EINVAL;
++
++ mutex_lock(&thermal_governor_lock);
++
++ err = -EBUSY;
++ if (__find_governor(governor->name) == NULL) {
++ err = 0;
++ list_add(&governor->governor_list, &thermal_governor_list);
++ if (!def_governor && !strncmp(governor->name,
++ DEFAULT_THERMAL_GOVERNOR, THERMAL_NAME_LENGTH))
++ def_governor = governor;
++ }
++
++ mutex_lock(&thermal_list_lock);
++
++ list_for_each_entry(pos, &thermal_tz_list, node) {
++ /*
++ * only thermal zones with specified tz->tzp->governor_name
++ * may run with tz->govenor unset
++ */
++ if (pos->governor)
++ continue;
++
++ name = pos->tzp->governor_name;
++
++ if (!strnicmp(name, governor->name, THERMAL_NAME_LENGTH))
++ pos->governor = governor;
++ }
++
++ mutex_unlock(&thermal_list_lock);
++ mutex_unlock(&thermal_governor_lock);
++
++ return err;
++}
++
++void thermal_unregister_governor(struct thermal_governor *governor)
++{
++ struct thermal_zone_device *pos;
++
++ if (!governor)
++ return;
++
++ mutex_lock(&thermal_governor_lock);
++
++ if (__find_governor(governor->name) == NULL)
++ goto exit;
++
++ mutex_lock(&thermal_list_lock);
++
++ list_for_each_entry(pos, &thermal_tz_list, node) {
++ if (!strnicmp(pos->governor->name, governor->name,
++ THERMAL_NAME_LENGTH))
++ pos->governor = NULL;
++ }
++
++ mutex_unlock(&thermal_list_lock);
++ list_del(&governor->governor_list);
++exit:
++ mutex_unlock(&thermal_governor_lock);
++ return;
++}
++
++static int get_idr(struct idr *idr, struct mutex *lock, int *id)
++{
++ int ret;
++
++ if (lock)
++ mutex_lock(lock);
++ ret = idr_alloc(idr, NULL, 0, 0, GFP_KERNEL);
++ if (lock)
++ mutex_unlock(lock);
++ if (unlikely(ret < 0))
++ return ret;
++ *id = ret;
++ return 0;
++}
++
++static void release_idr(struct idr *idr, struct mutex *lock, int id)
++{
++ if (lock)
++ mutex_lock(lock);
++ idr_remove(idr, id);
++ if (lock)
++ mutex_unlock(lock);
++}
++
++int get_tz_trend(struct thermal_zone_device *tz, int trip)
++{
++ enum thermal_trend trend;
++
++ if (tz->emul_temperature || !tz->ops->get_trend ||
++ tz->ops->get_trend(tz, trip, &trend)) {
++ if (tz->temperature > tz->last_temperature)
++ trend = THERMAL_TREND_RAISING;
++ else if (tz->temperature < tz->last_temperature)
++ trend = THERMAL_TREND_DROPPING;
++ else
++ trend = THERMAL_TREND_STABLE;
++ }
++
++ return trend;
++}
++EXPORT_SYMBOL(get_tz_trend);
++
++struct thermal_instance *get_thermal_instance(struct thermal_zone_device *tz,
++ struct thermal_cooling_device *cdev, int trip)
++{
++ struct thermal_instance *pos = NULL;
++ struct thermal_instance *target_instance = NULL;
++
++ mutex_lock(&tz->lock);
++ mutex_lock(&cdev->lock);
++
++ list_for_each_entry(pos, &tz->thermal_instances, tz_node) {
++ if (pos->tz == tz && pos->trip == trip && pos->cdev == cdev) {
++ target_instance = pos;
++ break;
++ }
++ }
++
++ mutex_unlock(&cdev->lock);
++ mutex_unlock(&tz->lock);
++
++ return target_instance;
++}
++EXPORT_SYMBOL(get_thermal_instance);
++
++static void print_bind_err_msg(struct thermal_zone_device *tz,
++ struct thermal_cooling_device *cdev, int ret)
++{
++ dev_err(&tz->device, "binding zone %s with cdev %s failed:%d\n",
++ tz->type, cdev->type, ret);
++}
++
++static void __bind(struct thermal_zone_device *tz, int mask,
++ struct thermal_cooling_device *cdev,
++ unsigned long *limits)
++{
++ int i, ret;
++
++ for (i = 0; i < tz->trips; i++) {
++ if (mask & (1 << i)) {
++ unsigned long upper, lower;
++
++ upper = THERMAL_NO_LIMIT;
++ lower = THERMAL_NO_LIMIT;
++ if (limits) {
++ lower = limits[i * 2];
++ upper = limits[i * 2 + 1];
++ }
++ ret = thermal_zone_bind_cooling_device(tz, i, cdev,
++ upper, lower);
++ if (ret)
++ print_bind_err_msg(tz, cdev, ret);
++ }
++ }
++}
++
++static void __unbind(struct thermal_zone_device *tz, int mask,
++ struct thermal_cooling_device *cdev)
++{
++ int i;
++
++ for (i = 0; i < tz->trips; i++)
++ if (mask & (1 << i))
++ thermal_zone_unbind_cooling_device(tz, i, cdev);
++}
++
++static void bind_cdev(struct thermal_cooling_device *cdev)
++{
++ int i, ret;
++ const struct thermal_zone_params *tzp;
++ struct thermal_zone_device *pos = NULL;
++
++ mutex_lock(&thermal_list_lock);
++
++ list_for_each_entry(pos, &thermal_tz_list, node) {
++ if (!pos->tzp && !pos->ops->bind)
++ continue;
++
++ if (pos->ops->bind) {
++ ret = pos->ops->bind(pos, cdev);
++ if (ret)
++ print_bind_err_msg(pos, cdev, ret);
++ continue;
++ }
++
++ tzp = pos->tzp;
++ if (!tzp || !tzp->tbp)
++ continue;
++
++ for (i = 0; i < tzp->num_tbps; i++) {
++ if (tzp->tbp[i].cdev || !tzp->tbp[i].match)
++ continue;
++ if (tzp->tbp[i].match(pos, cdev))
++ continue;
++ tzp->tbp[i].cdev = cdev;
++ __bind(pos, tzp->tbp[i].trip_mask, cdev,
++ tzp->tbp[i].binding_limits);
++ }
++ }
++
++ mutex_unlock(&thermal_list_lock);
++}
++
++static void bind_tz(struct thermal_zone_device *tz)
++{
++ int i, ret;
++ struct thermal_cooling_device *pos = NULL;
++ const struct thermal_zone_params *tzp = tz->tzp;
++
++ if (!tzp && !tz->ops->bind)
++ return;
++
++ mutex_lock(&thermal_list_lock);
++
++ /* If there is ops->bind, try to use ops->bind */
++ if (tz->ops->bind) {
++ list_for_each_entry(pos, &thermal_cdev_list, node) {
++ ret = tz->ops->bind(tz, pos);
++ if (ret)
++ print_bind_err_msg(tz, pos, ret);
++ }
++ goto exit;
++ }
++
++ if (!tzp || !tzp->tbp)
++ goto exit;
++
++ list_for_each_entry(pos, &thermal_cdev_list, node) {
++ for (i = 0; i < tzp->num_tbps; i++) {
++ if (tzp->tbp[i].cdev || !tzp->tbp[i].match)
++ continue;
++ if (tzp->tbp[i].match(tz, pos))
++ continue;
++ tzp->tbp[i].cdev = pos;
++ __bind(tz, tzp->tbp[i].trip_mask, pos,
++ tzp->tbp[i].binding_limits);
++ }
++ }
++exit:
++ mutex_unlock(&thermal_list_lock);
++}
++
++static void thermal_zone_device_set_polling(struct thermal_zone_device *tz,
++ int delay)
++{
++ if (delay > 1000)
++ mod_delayed_work(system_freezable_wq, &tz->poll_queue,
++ round_jiffies(msecs_to_jiffies(delay)));
++ else if (delay)
++ mod_delayed_work(system_freezable_wq, &tz->poll_queue,
++ msecs_to_jiffies(delay));
++ else
++ cancel_delayed_work(&tz->poll_queue);
++}
++
++static void monitor_thermal_zone(struct thermal_zone_device *tz)
++{
++ mutex_lock(&tz->lock);
++
++ if (tz->passive)
++ thermal_zone_device_set_polling(tz, tz->passive_delay);
++ else if (tz->polling_delay)
++ thermal_zone_device_set_polling(tz, tz->polling_delay);
++ else
++ thermal_zone_device_set_polling(tz, 0);
++
++ mutex_unlock(&tz->lock);
++}
++
++static void handle_non_critical_trips(struct thermal_zone_device *tz,
++ int trip, enum thermal_trip_type trip_type)
++{
++ tz->governor ? tz->governor->throttle(tz, trip) :
++ def_governor->throttle(tz, trip);
++}
++
++static void handle_critical_trips(struct thermal_zone_device *tz,
++ int trip, enum thermal_trip_type trip_type)
++{
++ long trip_temp;
++
++ tz->ops->get_trip_temp(tz, trip, &trip_temp);
++
++ /* If we have not crossed the trip_temp, we do not care. */
++ if (tz->temperature < trip_temp)
++ return;
++
++ trace_thermal_zone_trip(tz, trip, trip_type);
++
++ if (tz->ops->notify)
++ tz->ops->notify(tz, trip, trip_type);
++
++ if (trip_type == THERMAL_TRIP_CRITICAL) {
++ dev_emerg(&tz->device,
++ "critical temperature reached(%d C),shutting down\n",
++ tz->temperature / 1000);
++ orderly_poweroff(true);
++ }
++}
++
++static void handle_thermal_trip(struct thermal_zone_device *tz, int trip)
++{
++ enum thermal_trip_type type;
++
++ tz->ops->get_trip_type(tz, trip, &type);
++
++ if (type == THERMAL_TRIP_CRITICAL || type == THERMAL_TRIP_HOT)
++ handle_critical_trips(tz, trip, type);
++ else
++ handle_non_critical_trips(tz, trip, type);
++ /*
++ * Alright, we handled this trip successfully.
++ * So, start monitoring again.
++ */
++ monitor_thermal_zone(tz);
++}
++
++/**
++ * thermal_zone_get_temp() - returns its the temperature of thermal zone
++ * @tz: a valid pointer to a struct thermal_zone_device
++ * @temp: a valid pointer to where to store the resulting temperature.
++ *
++ * When a valid thermal zone reference is passed, it will fetch its
++ * temperature and fill @temp.
++ *
++ * Return: On success returns 0, an error code otherwise
++ */
++int thermal_zone_get_temp(struct thermal_zone_device *tz, unsigned long *temp)
++{
++ int ret = -EINVAL;
++#ifdef CONFIG_THERMAL_EMULATION
++ int count;
++ unsigned long crit_temp = -1UL;
++ enum thermal_trip_type type;
++#endif
++
++ if (!tz || IS_ERR(tz) || !tz->ops->get_temp)
++ goto exit;
++
++ mutex_lock(&tz->lock);
++
++ ret = tz->ops->get_temp(tz, temp);
++#ifdef CONFIG_THERMAL_EMULATION
++ if (!tz->emul_temperature)
++ goto skip_emul;
++
++ for (count = 0; count < tz->trips; count++) {
++ ret = tz->ops->get_trip_type(tz, count, &type);
++ if (!ret && type == THERMAL_TRIP_CRITICAL) {
++ ret = tz->ops->get_trip_temp(tz, count, &crit_temp);
++ break;
++ }
++ }
++
++ if (ret)
++ goto skip_emul;
++
++ if (*temp < crit_temp)
++ *temp = tz->emul_temperature;
++skip_emul:
++#endif
++ mutex_unlock(&tz->lock);
++exit:
++ return ret;
++}
++EXPORT_SYMBOL_GPL(thermal_zone_get_temp);
++
++static void update_temperature(struct thermal_zone_device *tz)
++{
++ long temp;
++ int ret;
++
++ ret = thermal_zone_get_temp(tz, &temp);
++ if (ret) {
++ dev_warn(&tz->device, "failed to read out thermal zone %d\n",
++ tz->id);
++ return;
++ }
++
++ mutex_lock(&tz->lock);
++ tz->last_temperature = tz->temperature;
++ tz->temperature = temp;
++ mutex_unlock(&tz->lock);
++
++ trace_thermal_temperature(tz);
++ dev_dbg(&tz->device, "last_temperature=%d, current_temperature=%d\n",
++ tz->last_temperature, tz->temperature);
++}
++
++void thermal_zone_device_update(struct thermal_zone_device *tz)
++{
++ int count;
++
++ if (!tz->ops->get_temp)
++ return;
++
++ update_temperature(tz);
++
++ for (count = 0; count < tz->trips; count++)
++ handle_thermal_trip(tz, count);
++}
++EXPORT_SYMBOL_GPL(thermal_zone_device_update);
++
++static void thermal_zone_device_check(struct work_struct *work)
++{
++ struct thermal_zone_device *tz = container_of(work, struct
++ thermal_zone_device,
++ poll_queue.work);
++ thermal_zone_device_update(tz);
++}
++
++/* sys I/F for thermal zone */
++
++#define to_thermal_zone(_dev) \
++ container_of(_dev, struct thermal_zone_device, device)
++
++static ssize_t
++type_show(struct device *dev, struct device_attribute *attr, char *buf)
++{
++ struct thermal_zone_device *tz = to_thermal_zone(dev);
++
++ return sprintf(buf, "%s\n", tz->type);
++}
++
++static ssize_t
++temp_show(struct device *dev, struct device_attribute *attr, char *buf)
++{
++ struct thermal_zone_device *tz = to_thermal_zone(dev);
++ long temperature;
++ int ret;
++
++ ret = thermal_zone_get_temp(tz, &temperature);
++
++ if (ret)
++ return ret;
++
++ return sprintf(buf, "%ld\n", temperature);
++}
++
++static ssize_t
++mode_show(struct device *dev, struct device_attribute *attr, char *buf)
++{
++ struct thermal_zone_device *tz = to_thermal_zone(dev);
++ enum thermal_device_mode mode;
++ int result;
++
++ if (!tz->ops->get_mode)
++ return -EPERM;
++
++ result = tz->ops->get_mode(tz, &mode);
++ if (result)
++ return result;
++
++ return sprintf(buf, "%s\n", mode == THERMAL_DEVICE_ENABLED ? "enabled"
++ : "disabled");
++}
++
++static ssize_t
++mode_store(struct device *dev, struct device_attribute *attr,
++ const char *buf, size_t count)
++{
++ struct thermal_zone_device *tz = to_thermal_zone(dev);
++ int result;
++
++ if (!tz->ops->set_mode)
++ return -EPERM;
++
++ if (!strncmp(buf, "enabled", sizeof("enabled") - 1))
++ result = tz->ops->set_mode(tz, THERMAL_DEVICE_ENABLED);
++ else if (!strncmp(buf, "disabled", sizeof("disabled") - 1))
++ result = tz->ops->set_mode(tz, THERMAL_DEVICE_DISABLED);
++ else
++ result = -EINVAL;
++
++ if (result)
++ return result;
++
++ return count;
++}
++
++static ssize_t
++trip_point_type_show(struct device *dev, struct device_attribute *attr,
++ char *buf)
++{
++ struct thermal_zone_device *tz = to_thermal_zone(dev);
++ enum thermal_trip_type type;
++ int trip, result;
++
++ if (!tz->ops->get_trip_type)
++ return -EPERM;
++
++ if (!sscanf(attr->attr.name, "trip_point_%d_type", &trip))
++ return -EINVAL;
++
++ result = tz->ops->get_trip_type(tz, trip, &type);
++ if (result)
++ return result;
++
++ switch (type) {
++ case THERMAL_TRIP_CRITICAL:
++ return sprintf(buf, "critical\n");
++ case THERMAL_TRIP_HOT:
++ return sprintf(buf, "hot\n");
++ case THERMAL_TRIP_PASSIVE:
++ return sprintf(buf, "passive\n");
++ case THERMAL_TRIP_ACTIVE:
++ return sprintf(buf, "active\n");
++ default:
++ return sprintf(buf, "unknown\n");
++ }
++}
++
++static ssize_t
++trip_point_temp_store(struct device *dev, struct device_attribute *attr,
++ const char *buf, size_t count)
++{
++ struct thermal_zone_device *tz = to_thermal_zone(dev);
++ int trip, ret;
++ unsigned long temperature;
++
++ if (!tz->ops->set_trip_temp)
++ return -EPERM;
++
++ if (!sscanf(attr->attr.name, "trip_point_%d_temp", &trip))
++ return -EINVAL;
++
++ if (kstrtoul(buf, 10, &temperature))
++ return -EINVAL;
++
++ ret = tz->ops->set_trip_temp(tz, trip, temperature);
++
++ return ret ? ret : count;
++}
++
++static ssize_t
++trip_point_temp_show(struct device *dev, struct device_attribute *attr,
++ char *buf)
++{
++ struct thermal_zone_device *tz = to_thermal_zone(dev);
++ int trip, ret;
++ long temperature;
++
++ if (!tz->ops->get_trip_temp)
++ return -EPERM;
++
++ if (!sscanf(attr->attr.name, "trip_point_%d_temp", &trip))
++ return -EINVAL;
++
++ ret = tz->ops->get_trip_temp(tz, trip, &temperature);
++
++ if (ret)
++ return ret;
++
++ return sprintf(buf, "%ld\n", temperature);
++}
++
++static ssize_t
++trip_point_hyst_store(struct device *dev, struct device_attribute *attr,
++ const char *buf, size_t count)
++{
++ struct thermal_zone_device *tz = to_thermal_zone(dev);
++ int trip, ret;
++ unsigned long temperature;
++
++ if (!tz->ops->set_trip_hyst)
++ return -EPERM;
++
++ if (!sscanf(attr->attr.name, "trip_point_%d_hyst", &trip))
++ return -EINVAL;
++
++ if (kstrtoul(buf, 10, &temperature))
++ return -EINVAL;
++
++ /*
++ * We are not doing any check on the 'temperature' value
++ * here. The driver implementing 'set_trip_hyst' has to
++ * take care of this.
++ */
++ ret = tz->ops->set_trip_hyst(tz, trip, temperature);
++
++ return ret ? ret : count;
++}
++
++static ssize_t
++trip_point_hyst_show(struct device *dev, struct device_attribute *attr,
++ char *buf)
++{
++ struct thermal_zone_device *tz = to_thermal_zone(dev);
++ int trip, ret;
++ unsigned long temperature;
++
++ if (!tz->ops->get_trip_hyst)
++ return -EPERM;
++
++ if (!sscanf(attr->attr.name, "trip_point_%d_hyst", &trip))
++ return -EINVAL;
++
++ ret = tz->ops->get_trip_hyst(tz, trip, &temperature);
++
++ return ret ? ret : sprintf(buf, "%ld\n", temperature);
++}
++
++static ssize_t
++passive_store(struct device *dev, struct device_attribute *attr,
++ const char *buf, size_t count)
++{
++ struct thermal_zone_device *tz = to_thermal_zone(dev);
++ struct thermal_cooling_device *cdev = NULL;
++ int state;
++
++ if (!sscanf(buf, "%d\n", &state))
++ return -EINVAL;
++
++ /* sanity check: values below 1000 millicelcius don't make sense
++ * and can cause the system to go into a thermal heart attack
++ */
++ if (state && state < 1000)
++ return -EINVAL;
++
++ if (state && !tz->forced_passive) {
++ mutex_lock(&thermal_list_lock);
++ list_for_each_entry(cdev, &thermal_cdev_list, node) {
++ if (!strncmp("Processor", cdev->type,
++ sizeof("Processor")))
++ thermal_zone_bind_cooling_device(tz,
++ THERMAL_TRIPS_NONE, cdev,
++ THERMAL_NO_LIMIT,
++ THERMAL_NO_LIMIT);
++ }
++ mutex_unlock(&thermal_list_lock);
++ if (!tz->passive_delay)
++ tz->passive_delay = 1000;
++ } else if (!state && tz->forced_passive) {
++ mutex_lock(&thermal_list_lock);
++ list_for_each_entry(cdev, &thermal_cdev_list, node) {
++ if (!strncmp("Processor", cdev->type,
++ sizeof("Processor")))
++ thermal_zone_unbind_cooling_device(tz,
++ THERMAL_TRIPS_NONE,
++ cdev);
++ }
++ mutex_unlock(&thermal_list_lock);
++ tz->passive_delay = 0;
++ }
++
++ tz->forced_passive = state;
++
++ thermal_zone_device_update(tz);
++
++ return count;
++}
++
++static ssize_t
++passive_show(struct device *dev, struct device_attribute *attr,
++ char *buf)
++{
++ struct thermal_zone_device *tz = to_thermal_zone(dev);
++
++ return sprintf(buf, "%d\n", tz->forced_passive);
++}
++
++static ssize_t
++policy_store(struct device *dev, struct device_attribute *attr,
++ const char *buf, size_t count)
++{
++ int ret = -EINVAL;
++ struct thermal_zone_device *tz = to_thermal_zone(dev);
++ struct thermal_governor *gov;
++ char name[THERMAL_NAME_LENGTH];
++
++ snprintf(name, sizeof(name), "%s", buf);
++
++ mutex_lock(&thermal_governor_lock);
++
++ gov = __find_governor(strim(name));
++ if (!gov)
++ goto exit;
++
++ tz->governor = gov;
++ ret = count;
++
++exit:
++ mutex_unlock(&thermal_governor_lock);
++ return ret;
++}
++
++static ssize_t
++policy_show(struct device *dev, struct device_attribute *devattr, char *buf)
++{
++ struct thermal_zone_device *tz = to_thermal_zone(dev);
++
++ return sprintf(buf, "%s\n", tz->governor->name);
++}
++
++#ifdef CONFIG_THERMAL_EMULATION
++static ssize_t
++emul_temp_store(struct device *dev, struct device_attribute *attr,
++ const char *buf, size_t count)
++{
++ struct thermal_zone_device *tz = to_thermal_zone(dev);
++ int ret = 0;
++ unsigned long temperature;
++
++ if (kstrtoul(buf, 10, &temperature))
++ return -EINVAL;
++
++ if (!tz->ops->set_emul_temp) {
++ mutex_lock(&tz->lock);
++ tz->emul_temperature = temperature;
++ mutex_unlock(&tz->lock);
++ } else {
++ ret = tz->ops->set_emul_temp(tz, temperature);
++ }
++
++ if (!ret)
++ thermal_zone_device_update(tz);
++
++ return ret ? ret : count;
++}
++static DEVICE_ATTR(emul_temp, S_IWUSR, NULL, emul_temp_store);
++#endif/*CONFIG_THERMAL_EMULATION*/
++
++static DEVICE_ATTR(type, 0444, type_show, NULL);
++static DEVICE_ATTR(temp, 0444, temp_show, NULL);
++static DEVICE_ATTR(mode, 0644, mode_show, mode_store);
++static DEVICE_ATTR(passive, S_IRUGO | S_IWUSR, passive_show, passive_store);
++static DEVICE_ATTR(policy, S_IRUGO | S_IWUSR, policy_show, policy_store);
++
++/* sys I/F for cooling device */
++#define to_cooling_device(_dev) \
++ container_of(_dev, struct thermal_cooling_device, device)
++
++static ssize_t
++thermal_cooling_device_type_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct thermal_cooling_device *cdev = to_cooling_device(dev);
++
++ return sprintf(buf, "%s\n", cdev->type);
++}
++
++static ssize_t
++thermal_cooling_device_max_state_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct thermal_cooling_device *cdev = to_cooling_device(dev);
++ unsigned long state;
++ int ret;
++
++ ret = cdev->ops->get_max_state(cdev, &state);
++ if (ret)
++ return ret;
++ return sprintf(buf, "%ld\n", state);
++}
++
++static ssize_t
++thermal_cooling_device_cur_state_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct thermal_cooling_device *cdev = to_cooling_device(dev);
++ unsigned long state;
++ int ret;
++
++ ret = cdev->ops->get_cur_state(cdev, &state);
++ if (ret)
++ return ret;
++ return sprintf(buf, "%ld\n", state);
++}
++
++static ssize_t
++thermal_cooling_device_cur_state_store(struct device *dev,
++ struct device_attribute *attr,
++ const char *buf, size_t count)
++{
++ struct thermal_cooling_device *cdev = to_cooling_device(dev);
++ unsigned long state;
++ int result;
++
++ if (!sscanf(buf, "%ld\n", &state))
++ return -EINVAL;
++
++ if ((long)state < 0)
++ return -EINVAL;
++
++ result = cdev->ops->set_cur_state(cdev, state);
++ if (result)
++ return result;
++ return count;
++}
++
++static struct device_attribute dev_attr_cdev_type =
++__ATTR(type, 0444, thermal_cooling_device_type_show, NULL);
++static DEVICE_ATTR(max_state, 0444,
++ thermal_cooling_device_max_state_show, NULL);
++static DEVICE_ATTR(cur_state, 0644,
++ thermal_cooling_device_cur_state_show,
++ thermal_cooling_device_cur_state_store);
++
++static ssize_t
++thermal_cooling_device_trip_point_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct thermal_instance *instance;
++
++ instance =
++ container_of(attr, struct thermal_instance, attr);
++
++ if (instance->trip == THERMAL_TRIPS_NONE)
++ return sprintf(buf, "-1\n");
++ else
++ return sprintf(buf, "%d\n", instance->trip);
++}
++
++/* Device management */
++
++/**
++ * thermal_zone_bind_cooling_device() - bind a cooling device to a thermal zone
++ * @tz: pointer to struct thermal_zone_device
++ * @trip: indicates which trip point the cooling devices is
++ * associated with in this thermal zone.
++ * @cdev: pointer to struct thermal_cooling_device
++ * @upper: the Maximum cooling state for this trip point.
++ * THERMAL_NO_LIMIT means no upper limit,
++ * and the cooling device can be in max_state.
++ * @lower: the Minimum cooling state can be used for this trip point.
++ * THERMAL_NO_LIMIT means no lower limit,
++ * and the cooling device can be in cooling state 0.
++ *
++ * This interface function bind a thermal cooling device to the certain trip
++ * point of a thermal zone device.
++ * This function is usually called in the thermal zone device .bind callback.
++ *
++ * Return: 0 on success, the proper error value otherwise.
++ */
++int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz,
++ int trip,
++ struct thermal_cooling_device *cdev,
++ unsigned long upper, unsigned long lower)
++{
++ struct thermal_instance *dev;
++ struct thermal_instance *pos;
++ struct thermal_zone_device *pos1;
++ struct thermal_cooling_device *pos2;
++ unsigned long max_state;
++ int result;
++
++ if (trip >= tz->trips || (trip < 0 && trip != THERMAL_TRIPS_NONE))
++ return -EINVAL;
++
++ list_for_each_entry(pos1, &thermal_tz_list, node) {
++ if (pos1 == tz)
++ break;
++ }
++ list_for_each_entry(pos2, &thermal_cdev_list, node) {
++ if (pos2 == cdev)
++ break;
++ }
++
++ if (tz != pos1 || cdev != pos2)
++ return -EINVAL;
++
++ cdev->ops->get_max_state(cdev, &max_state);
++
++ /* lower default 0, upper default max_state */
++ lower = lower == THERMAL_NO_LIMIT ? 0 : lower;
++ upper = upper == THERMAL_NO_LIMIT ? max_state : upper;
++
++ if (lower > upper || upper > max_state)
++ return -EINVAL;
++
++ dev =
++ kzalloc(sizeof(struct thermal_instance), GFP_KERNEL);
++ if (!dev)
++ return -ENOMEM;
++ dev->tz = tz;
++ dev->cdev = cdev;
++ dev->trip = trip;
++ dev->upper = upper;
++ dev->lower = lower;
++ dev->target = THERMAL_NO_TARGET;
++
++ result = get_idr(&tz->idr, &tz->lock, &dev->id);
++ if (result)
++ goto free_mem;
++
++ sprintf(dev->name, "cdev%d", dev->id);
++ result =
++ sysfs_create_link(&tz->device.kobj, &cdev->device.kobj, dev->name);
++ if (result)
++ goto release_idr;
++
++ sprintf(dev->attr_name, "cdev%d_trip_point", dev->id);
++ sysfs_attr_init(&dev->attr.attr);
++ dev->attr.attr.name = dev->attr_name;
++ dev->attr.attr.mode = 0444;
++ dev->attr.show = thermal_cooling_device_trip_point_show;
++ result = device_create_file(&tz->device, &dev->attr);
++ if (result)
++ goto remove_symbol_link;
++
++ mutex_lock(&tz->lock);
++ mutex_lock(&cdev->lock);
++ list_for_each_entry(pos, &tz->thermal_instances, tz_node)
++ if (pos->tz == tz && pos->trip == trip && pos->cdev == cdev) {
++ result = -EEXIST;
++ break;
++ }
++ if (!result) {
++ list_add_tail(&dev->tz_node, &tz->thermal_instances);
++ list_add_tail(&dev->cdev_node, &cdev->thermal_instances);
++ }
++ mutex_unlock(&cdev->lock);
++ mutex_unlock(&tz->lock);
++
++ if (!result)
++ return 0;
++
++ device_remove_file(&tz->device, &dev->attr);
++remove_symbol_link:
++ sysfs_remove_link(&tz->device.kobj, dev->name);
++release_idr:
++ release_idr(&tz->idr, &tz->lock, dev->id);
++free_mem:
++ kfree(dev);
++ return result;
++}
++EXPORT_SYMBOL_GPL(thermal_zone_bind_cooling_device);
++
++/**
++ * thermal_zone_unbind_cooling_device() - unbind a cooling device from a
++ * thermal zone.
++ * @tz: pointer to a struct thermal_zone_device.
++ * @trip: indicates which trip point the cooling devices is
++ * associated with in this thermal zone.
++ * @cdev: pointer to a struct thermal_cooling_device.
++ *
++ * This interface function unbind a thermal cooling device from the certain
++ * trip point of a thermal zone device.
++ * This function is usually called in the thermal zone device .unbind callback.
++ *
++ * Return: 0 on success, the proper error value otherwise.
++ */
++int thermal_zone_unbind_cooling_device(struct thermal_zone_device *tz,
++ int trip,
++ struct thermal_cooling_device *cdev)
++{
++ struct thermal_instance *pos, *next;
++
++ mutex_lock(&tz->lock);
++ mutex_lock(&cdev->lock);
++ list_for_each_entry_safe(pos, next, &tz->thermal_instances, tz_node) {
++ if (pos->tz == tz && pos->trip == trip && pos->cdev == cdev) {
++ list_del(&pos->tz_node);
++ list_del(&pos->cdev_node);
++ mutex_unlock(&cdev->lock);
++ mutex_unlock(&tz->lock);
++ goto unbind;
++ }
++ }
++ mutex_unlock(&cdev->lock);
++ mutex_unlock(&tz->lock);
++
++ return -ENODEV;
++
++unbind:
++ device_remove_file(&tz->device, &pos->attr);
++ sysfs_remove_link(&tz->device.kobj, pos->name);
++ release_idr(&tz->idr, &tz->lock, pos->id);
++ kfree(pos);
++ return 0;
++}
++EXPORT_SYMBOL_GPL(thermal_zone_unbind_cooling_device);
++
++static void thermal_release(struct device *dev)
++{
++ struct thermal_zone_device *tz;
++ struct thermal_cooling_device *cdev;
++
++ if (!strncmp(dev_name(dev), "thermal_zone",
++ sizeof("thermal_zone") - 1)) {
++ tz = to_thermal_zone(dev);
++ kfree(tz);
++ } else if(!strncmp(dev_name(dev), "cooling_device",
++ sizeof("cooling_device") - 1)){
++ cdev = to_cooling_device(dev);
++ kfree(cdev);
++ }
++}
++
++static struct class thermal_class = {
++ .name = "thermal",
++ .dev_release = thermal_release,
++};
++
++/**
++ * __thermal_cooling_device_register() - register a new thermal cooling device
++ * @np: a pointer to a device tree node.
++ * @type: the thermal cooling device type.
++ * @devdata: device private data.
++ * @ops: standard thermal cooling devices callbacks.
++ *
++ * This interface function adds a new thermal cooling device (fan/processor/...)
++ * to /sys/class/thermal/ folder as cooling_device[0-*]. It tries to bind itself
++ * to all the thermal zone devices registered at the same time.
++ * It also gives the opportunity to link the cooling device to a device tree
++ * node, so that it can be bound to a thermal zone created out of device tree.
++ *
++ * Return: a pointer to the created struct thermal_cooling_device or an
++ * ERR_PTR. Caller must check return value with IS_ERR*() helpers.
++ */
++static struct thermal_cooling_device *
++__thermal_cooling_device_register(struct device_node *np,
++ char *type, void *devdata,
++ const struct thermal_cooling_device_ops *ops)
++{
++ struct thermal_cooling_device *cdev;
++ int result;
++
++ if (type && strlen(type) >= THERMAL_NAME_LENGTH)
++ return ERR_PTR(-EINVAL);
++
++ if (!ops || !ops->get_max_state || !ops->get_cur_state ||
++ !ops->set_cur_state)
++ return ERR_PTR(-EINVAL);
++
++ cdev = kzalloc(sizeof(struct thermal_cooling_device), GFP_KERNEL);
++ if (!cdev)
++ return ERR_PTR(-ENOMEM);
++
++ result = get_idr(&thermal_cdev_idr, &thermal_idr_lock, &cdev->id);
++ if (result) {
++ kfree(cdev);
++ return ERR_PTR(result);
++ }
++
++ strlcpy(cdev->type, type ? : "", sizeof(cdev->type));
++ mutex_init(&cdev->lock);
++ INIT_LIST_HEAD(&cdev->thermal_instances);
++ cdev->np = np;
++ cdev->ops = ops;
++ cdev->updated = false;
++ cdev->device.class = &thermal_class;
++ cdev->devdata = devdata;
++ dev_set_name(&cdev->device, "cooling_device%d", cdev->id);
++ result = device_register(&cdev->device);
++ if (result) {
++ release_idr(&thermal_cdev_idr, &thermal_idr_lock, cdev->id);
++ kfree(cdev);
++ return ERR_PTR(result);
++ }
++
++ /* sys I/F */
++ if (type) {
++ result = device_create_file(&cdev->device, &dev_attr_cdev_type);
++ if (result)
++ goto unregister;
++ }
++
++ result = device_create_file(&cdev->device, &dev_attr_max_state);
++ if (result)
++ goto unregister;
++
++ result = device_create_file(&cdev->device, &dev_attr_cur_state);
++ if (result)
++ goto unregister;
++
++ /* Add 'this' new cdev to the global cdev list */
++ mutex_lock(&thermal_list_lock);
++ list_add(&cdev->node, &thermal_cdev_list);
++ mutex_unlock(&thermal_list_lock);
++
++ /* Update binding information for 'this' new cdev */
++ bind_cdev(cdev);
++
++ return cdev;
++
++unregister:
++ release_idr(&thermal_cdev_idr, &thermal_idr_lock, cdev->id);
++ device_unregister(&cdev->device);
++ return ERR_PTR(result);
++}
++
++/**
++ * thermal_cooling_device_register() - register a new thermal cooling device
++ * @type: the thermal cooling device type.
++ * @devdata: device private data.
++ * @ops: standard thermal cooling devices callbacks.
++ *
++ * This interface function adds a new thermal cooling device (fan/processor/...)
++ * to /sys/class/thermal/ folder as cooling_device[0-*]. It tries to bind itself
++ * to all the thermal zone devices registered at the same time.
++ *
++ * Return: a pointer to the created struct thermal_cooling_device or an
++ * ERR_PTR. Caller must check return value with IS_ERR*() helpers.
++ */
++struct thermal_cooling_device *
++thermal_cooling_device_register(char *type, void *devdata,
++ const struct thermal_cooling_device_ops *ops)
++{
++ return __thermal_cooling_device_register(NULL, type, devdata, ops);
++}
++EXPORT_SYMBOL_GPL(thermal_cooling_device_register);
++
++/**
++ * thermal_of_cooling_device_register() - register an OF thermal cooling device
++ * @np: a pointer to a device tree node.
++ * @type: the thermal cooling device type.
++ * @devdata: device private data.
++ * @ops: standard thermal cooling devices callbacks.
++ *
++ * This function will register a cooling device with device tree node reference.
++ * This interface function adds a new thermal cooling device (fan/processor/...)
++ * to /sys/class/thermal/ folder as cooling_device[0-*]. It tries to bind itself
++ * to all the thermal zone devices registered at the same time.
++ *
++ * Return: a pointer to the created struct thermal_cooling_device or an
++ * ERR_PTR. Caller must check return value with IS_ERR*() helpers.
++ */
++struct thermal_cooling_device *
++thermal_of_cooling_device_register(struct device_node *np,
++ char *type, void *devdata,
++ const struct thermal_cooling_device_ops *ops)
++{
++ return __thermal_cooling_device_register(np, type, devdata, ops);
++}
++EXPORT_SYMBOL_GPL(thermal_of_cooling_device_register);
++
++/**
++ * thermal_cooling_device_unregister - removes the registered thermal cooling device
++ * @cdev: the thermal cooling device to remove.
++ *
++ * thermal_cooling_device_unregister() must be called when the device is no
++ * longer needed.
++ */
++void thermal_cooling_device_unregister(struct thermal_cooling_device *cdev)
++{
++ int i;
++ const struct thermal_zone_params *tzp;
++ struct thermal_zone_device *tz;
++ struct thermal_cooling_device *pos = NULL;
++
++ if (!cdev)
++ return;
++
++ mutex_lock(&thermal_list_lock);
++ list_for_each_entry(pos, &thermal_cdev_list, node)
++ if (pos == cdev)
++ break;
++ if (pos != cdev) {
++ /* thermal cooling device not found */
++ mutex_unlock(&thermal_list_lock);
++ return;
++ }
++ list_del(&cdev->node);
++
++ /* Unbind all thermal zones associated with 'this' cdev */
++ list_for_each_entry(tz, &thermal_tz_list, node) {
++ if (tz->ops->unbind) {
++ tz->ops->unbind(tz, cdev);
++ continue;
++ }
++
++ if (!tz->tzp || !tz->tzp->tbp)
++ continue;
++
++ tzp = tz->tzp;
++ for (i = 0; i < tzp->num_tbps; i++) {
++ if (tzp->tbp[i].cdev == cdev) {
++ __unbind(tz, tzp->tbp[i].trip_mask, cdev);
++ tzp->tbp[i].cdev = NULL;
++ }
++ }
++ }
++
++ mutex_unlock(&thermal_list_lock);
++
++ if (cdev->type[0])
++ device_remove_file(&cdev->device, &dev_attr_cdev_type);
++ device_remove_file(&cdev->device, &dev_attr_max_state);
++ device_remove_file(&cdev->device, &dev_attr_cur_state);
++
++ release_idr(&thermal_cdev_idr, &thermal_idr_lock, cdev->id);
++ device_unregister(&cdev->device);
++ return;
++}
++EXPORT_SYMBOL_GPL(thermal_cooling_device_unregister);
++
++void thermal_cdev_update(struct thermal_cooling_device *cdev)
++{
++ struct thermal_instance *instance;
++ unsigned long target = 0;
++
++ /* cooling device is updated*/
++ if (cdev->updated)
++ return;
++
++ mutex_lock(&cdev->lock);
++ /* Make sure cdev enters the deepest cooling state */
++ list_for_each_entry(instance, &cdev->thermal_instances, cdev_node) {
++ dev_dbg(&cdev->device, "zone%d->target=%lu\n",
++ instance->tz->id, instance->target);
++ if (instance->target == THERMAL_NO_TARGET)
++ continue;
++ if (instance->target > target)
++ target = instance->target;
++ }
++ mutex_unlock(&cdev->lock);
++ cdev->ops->set_cur_state(cdev, target);
++ cdev->updated = true;
++ trace_cdev_update(cdev, target);
++ dev_dbg(&cdev->device, "set to state %lu\n", target);
++}
++EXPORT_SYMBOL(thermal_cdev_update);
++
++/**
++ * thermal_notify_framework - Sensor drivers use this API to notify framework
++ * @tz: thermal zone device
++ * @trip: indicates which trip point has been crossed
++ *
++ * This function handles the trip events from sensor drivers. It starts
++ * throttling the cooling devices according to the policy configured.
++ * For CRITICAL and HOT trip points, this notifies the respective drivers,
++ * and does actual throttling for other trip points i.e ACTIVE and PASSIVE.
++ * The throttling policy is based on the configured platform data; if no
++ * platform data is provided, this uses the step_wise throttling policy.
++ */
++void thermal_notify_framework(struct thermal_zone_device *tz, int trip)
++{
++ handle_thermal_trip(tz, trip);
++}
++EXPORT_SYMBOL_GPL(thermal_notify_framework);
++
++/**
++ * create_trip_attrs() - create attributes for trip points
++ * @tz: the thermal zone device
++ * @mask: Writeable trip point bitmap.
++ *
++ * helper function to instantiate sysfs entries for every trip
++ * point and its properties of a struct thermal_zone_device.
++ *
++ * Return: 0 on success, the proper error value otherwise.
++ */
++static int create_trip_attrs(struct thermal_zone_device *tz, int mask)
++{
++ int indx;
++ int size = sizeof(struct thermal_attr) * tz->trips;
++
++ tz->trip_type_attrs = kzalloc(size, GFP_KERNEL);
++ if (!tz->trip_type_attrs)
++ return -ENOMEM;
++
++ tz->trip_temp_attrs = kzalloc(size, GFP_KERNEL);
++ if (!tz->trip_temp_attrs) {
++ kfree(tz->trip_type_attrs);
++ return -ENOMEM;
++ }
++
++ if (tz->ops->get_trip_hyst) {
++ tz->trip_hyst_attrs = kzalloc(size, GFP_KERNEL);
++ if (!tz->trip_hyst_attrs) {
++ kfree(tz->trip_type_attrs);
++ kfree(tz->trip_temp_attrs);
++ return -ENOMEM;
++ }
++ }
++
++
++ for (indx = 0; indx < tz->trips; indx++) {
++ /* create trip type attribute */
++ snprintf(tz->trip_type_attrs[indx].name, THERMAL_NAME_LENGTH,
++ "trip_point_%d_type", indx);
++
++ sysfs_attr_init(&tz->trip_type_attrs[indx].attr.attr);
++ tz->trip_type_attrs[indx].attr.attr.name =
++ tz->trip_type_attrs[indx].name;
++ tz->trip_type_attrs[indx].attr.attr.mode = S_IRUGO;
++ tz->trip_type_attrs[indx].attr.show = trip_point_type_show;
++
++ device_create_file(&tz->device,
++ &tz->trip_type_attrs[indx].attr);
++
++ /* create trip temp attribute */
++ snprintf(tz->trip_temp_attrs[indx].name, THERMAL_NAME_LENGTH,
++ "trip_point_%d_temp", indx);
++
++ sysfs_attr_init(&tz->trip_temp_attrs[indx].attr.attr);
++ tz->trip_temp_attrs[indx].attr.attr.name =
++ tz->trip_temp_attrs[indx].name;
++ tz->trip_temp_attrs[indx].attr.attr.mode = S_IRUGO;
++ tz->trip_temp_attrs[indx].attr.show = trip_point_temp_show;
++ if (mask & (1 << indx)) {
++ tz->trip_temp_attrs[indx].attr.attr.mode |= S_IWUSR;
++ tz->trip_temp_attrs[indx].attr.store =
++ trip_point_temp_store;
++ }
++
++ device_create_file(&tz->device,
++ &tz->trip_temp_attrs[indx].attr);
++
++ /* create Optional trip hyst attribute */
++ if (!tz->ops->get_trip_hyst)
++ continue;
++ snprintf(tz->trip_hyst_attrs[indx].name, THERMAL_NAME_LENGTH,
++ "trip_point_%d_hyst", indx);
++
++ sysfs_attr_init(&tz->trip_hyst_attrs[indx].attr.attr);
++ tz->trip_hyst_attrs[indx].attr.attr.name =
++ tz->trip_hyst_attrs[indx].name;
++ tz->trip_hyst_attrs[indx].attr.attr.mode = S_IRUGO;
++ tz->trip_hyst_attrs[indx].attr.show = trip_point_hyst_show;
++ if (tz->ops->set_trip_hyst) {
++ tz->trip_hyst_attrs[indx].attr.attr.mode |= S_IWUSR;
++ tz->trip_hyst_attrs[indx].attr.store =
++ trip_point_hyst_store;
++ }
++
++ device_create_file(&tz->device,
++ &tz->trip_hyst_attrs[indx].attr);
++ }
++ return 0;
++}
++
++static void remove_trip_attrs(struct thermal_zone_device *tz)
++{
++ int indx;
++
++ for (indx = 0; indx < tz->trips; indx++) {
++ device_remove_file(&tz->device,
++ &tz->trip_type_attrs[indx].attr);
++ device_remove_file(&tz->device,
++ &tz->trip_temp_attrs[indx].attr);
++ if (tz->ops->get_trip_hyst)
++ device_remove_file(&tz->device,
++ &tz->trip_hyst_attrs[indx].attr);
++ }
++ kfree(tz->trip_type_attrs);
++ kfree(tz->trip_temp_attrs);
++ kfree(tz->trip_hyst_attrs);
++}
++
++/**
++ * thermal_zone_device_register() - register a new thermal zone device
++ * @type: the thermal zone device type
++ * @trips: the number of trip points the thermal zone support
++ * @mask: a bit string indicating the writeablility of trip points
++ * @devdata: private device data
++ * @ops: standard thermal zone device callbacks
++ * @tzp: thermal zone platform parameters
++ * @passive_delay: number of milliseconds to wait between polls when
++ * performing passive cooling
++ * @polling_delay: number of milliseconds to wait between polls when checking
++ * whether trip points have been crossed (0 for interrupt
++ * driven systems)
++ *
++ * This interface function adds a new thermal zone device (sensor) to
++ * /sys/class/thermal folder as thermal_zone[0-*]. It tries to bind all the
++ * thermal cooling devices registered at the same time.
++ * thermal_zone_device_unregister() must be called when the device is no
++ * longer needed. The passive cooling depends on the .get_trend() return value.
++ *
++ * Return: a pointer to the created struct thermal_zone_device or an
++ * in case of error, an ERR_PTR. Caller must check return value with
++ * IS_ERR*() helpers.
++ */
++struct thermal_zone_device *thermal_zone_device_register(const char *type,
++ int trips, int mask, void *devdata,
++ struct thermal_zone_device_ops *ops,
++ const struct thermal_zone_params *tzp,
++ int passive_delay, int polling_delay)
++{
++ struct thermal_zone_device *tz;
++ enum thermal_trip_type trip_type;
++ int result;
++ int count;
++ int passive = 0;
++
++ if (type && strlen(type) >= THERMAL_NAME_LENGTH)
++ return ERR_PTR(-EINVAL);
++
++ if (trips > THERMAL_MAX_TRIPS || trips < 0 || mask >> trips)
++ return ERR_PTR(-EINVAL);
++
++ if (!ops)
++ return ERR_PTR(-EINVAL);
++
++ if (trips > 0 && (!ops->get_trip_type || !ops->get_trip_temp))
++ return ERR_PTR(-EINVAL);
++
++ tz = kzalloc(sizeof(struct thermal_zone_device), GFP_KERNEL);
++ if (!tz)
++ return ERR_PTR(-ENOMEM);
++
++ INIT_LIST_HEAD(&tz->thermal_instances);
++ idr_init(&tz->idr);
++ mutex_init(&tz->lock);
++ result = get_idr(&thermal_tz_idr, &thermal_idr_lock, &tz->id);
++ if (result) {
++ kfree(tz);
++ return ERR_PTR(result);
++ }
++
++ strlcpy(tz->type, type ? : "", sizeof(tz->type));
++ tz->ops = ops;
++ tz->tzp = tzp;
++ tz->device.class = &thermal_class;
++ tz->devdata = devdata;
++ tz->trips = trips;
++ tz->passive_delay = passive_delay;
++ tz->polling_delay = polling_delay;
++
++ dev_set_name(&tz->device, "thermal_zone%d", tz->id);
++ result = device_register(&tz->device);
++ if (result) {
++ release_idr(&thermal_tz_idr, &thermal_idr_lock, tz->id);
++ kfree(tz);
++ return ERR_PTR(result);
++ }
++
++ /* sys I/F */
++ if (type) {
++ result = device_create_file(&tz->device, &dev_attr_type);
++ if (result)
++ goto unregister;
++ }
++
++ result = device_create_file(&tz->device, &dev_attr_temp);
++ if (result)
++ goto unregister;
++
++ if (ops->get_mode) {
++ result = device_create_file(&tz->device, &dev_attr_mode);
++ if (result)
++ goto unregister;
++ }
++
++ result = create_trip_attrs(tz, mask);
++ if (result)
++ goto unregister;
++
++ for (count = 0; count < trips; count++) {
++ tz->ops->get_trip_type(tz, count, &trip_type);
++ if (trip_type == THERMAL_TRIP_PASSIVE)
++ passive = 1;
++ }
++
++ if (!passive) {
++ result = device_create_file(&tz->device, &dev_attr_passive);
++ if (result)
++ goto unregister;
++ }
++
++#ifdef CONFIG_THERMAL_EMULATION
++ result = device_create_file(&tz->device, &dev_attr_emul_temp);
++ if (result)
++ goto unregister;
++#endif
++ /* Create policy attribute */
++ result = device_create_file(&tz->device, &dev_attr_policy);
++ if (result)
++ goto unregister;
++
++ /* Update 'this' zone's governor information */
++ mutex_lock(&thermal_governor_lock);
++
++ if (tz->tzp)
++ tz->governor = __find_governor(tz->tzp->governor_name);
++ else
++ tz->governor = def_governor;
++
++ mutex_unlock(&thermal_governor_lock);
++
++ if (!tz->tzp || !tz->tzp->no_hwmon) {
++ result = thermal_add_hwmon_sysfs(tz);
++ if (result)
++ goto unregister;
++ }
++
++ mutex_lock(&thermal_list_lock);
++ list_add_tail(&tz->node, &thermal_tz_list);
++ mutex_unlock(&thermal_list_lock);
++
++ /* Bind cooling devices for this zone */
++ bind_tz(tz);
++
++ INIT_DELAYED_WORK(&(tz->poll_queue), thermal_zone_device_check);
++
++ if (!tz->ops->get_temp)
++ thermal_zone_device_set_polling(tz, 0);
++
++ thermal_zone_device_update(tz);
++
++ return tz;
++
++unregister:
++ release_idr(&thermal_tz_idr, &thermal_idr_lock, tz->id);
++ device_unregister(&tz->device);
++ return ERR_PTR(result);
++}
++EXPORT_SYMBOL_GPL(thermal_zone_device_register);
++
++/**
++ * thermal_device_unregister - removes the registered thermal zone device
++ * @tz: the thermal zone device to remove
++ */
++void thermal_zone_device_unregister(struct thermal_zone_device *tz)
++{
++ int i;
++ const struct thermal_zone_params *tzp;
++ struct thermal_cooling_device *cdev;
++ struct thermal_zone_device *pos = NULL;
++
++ if (!tz)
++ return;
++
++ tzp = tz->tzp;
++
++ mutex_lock(&thermal_list_lock);
++ list_for_each_entry(pos, &thermal_tz_list, node)
++ if (pos == tz)
++ break;
++ if (pos != tz) {
++ /* thermal zone device not found */
++ mutex_unlock(&thermal_list_lock);
++ return;
++ }
++ list_del(&tz->node);
++
++ /* Unbind all cdevs associated with 'this' thermal zone */
++ list_for_each_entry(cdev, &thermal_cdev_list, node) {
++ if (tz->ops->unbind) {
++ tz->ops->unbind(tz, cdev);
++ continue;
++ }
++
++ if (!tzp || !tzp->tbp)
++ break;
++
++ for (i = 0; i < tzp->num_tbps; i++) {
++ if (tzp->tbp[i].cdev == cdev) {
++ __unbind(tz, tzp->tbp[i].trip_mask, cdev);
++ tzp->tbp[i].cdev = NULL;
++ }
++ }
++ }
++
++ mutex_unlock(&thermal_list_lock);
++
++ thermal_zone_device_set_polling(tz, 0);
++
++ if (tz->type[0])
++ device_remove_file(&tz->device, &dev_attr_type);
++ device_remove_file(&tz->device, &dev_attr_temp);
++ if (tz->ops->get_mode)
++ device_remove_file(&tz->device, &dev_attr_mode);
++ device_remove_file(&tz->device, &dev_attr_policy);
++ remove_trip_attrs(tz);
++ tz->governor = NULL;
++
++ thermal_remove_hwmon_sysfs(tz);
++ release_idr(&thermal_tz_idr, &thermal_idr_lock, tz->id);
++ idr_destroy(&tz->idr);
++ mutex_destroy(&tz->lock);
++ device_unregister(&tz->device);
++ return;
++}
++EXPORT_SYMBOL_GPL(thermal_zone_device_unregister);
++
++/**
++ * thermal_zone_get_zone_by_name() - search for a zone and returns its ref
++ * @name: thermal zone name to fetch the temperature
++ *
++ * When only one zone is found with the passed name, returns a reference to it.
++ *
++ * Return: On success returns a reference to an unique thermal zone with
++ * matching name equals to @name, an ERR_PTR otherwise (-EINVAL for invalid
++ * paramenters, -ENODEV for not found and -EEXIST for multiple matches).
++ */
++struct thermal_zone_device *thermal_zone_get_zone_by_name(const char *name)
++{
++ struct thermal_zone_device *pos = NULL, *ref = ERR_PTR(-EINVAL);
++ unsigned int found = 0;
++
++ if (!name)
++ goto exit;
++
++ mutex_lock(&thermal_list_lock);
++ list_for_each_entry(pos, &thermal_tz_list, node)
++ if (!strnicmp(name, pos->type, THERMAL_NAME_LENGTH)) {
++ found++;
++ ref = pos;
++ }
++ mutex_unlock(&thermal_list_lock);
++
++ /* nothing has been found, thus an error code for it */
++ if (found == 0)
++ ref = ERR_PTR(-ENODEV);
++ else if (found > 1)
++ /* Success only when an unique zone is found */
++ ref = ERR_PTR(-EEXIST);
++
++exit:
++ return ref;
++}
++EXPORT_SYMBOL_GPL(thermal_zone_get_zone_by_name);
++
++#ifdef CONFIG_NET
++static const struct genl_multicast_group thermal_event_mcgrps[] = {
++ { .name = THERMAL_GENL_MCAST_GROUP_NAME, },
++};
++
++static struct genl_family thermal_event_genl_family = {
++ .id = GENL_ID_GENERATE,
++ .name = THERMAL_GENL_FAMILY_NAME,
++ .version = THERMAL_GENL_VERSION,
++ .maxattr = THERMAL_GENL_ATTR_MAX,
++ .mcgrps = thermal_event_mcgrps,
++ .n_mcgrps = ARRAY_SIZE(thermal_event_mcgrps),
++};
++
++int thermal_generate_netlink_event(struct thermal_zone_device *tz,
++ enum events event)
++{
++ struct sk_buff *skb;
++ struct nlattr *attr;
++ struct thermal_genl_event *thermal_event;
++ void *msg_header;
++ int size;
++ int result;
++ static unsigned int thermal_event_seqnum;
++
++ if (!tz)
++ return -EINVAL;
++
++ /* allocate memory */
++ size = nla_total_size(sizeof(struct thermal_genl_event)) +
++ nla_total_size(0);
++
++ skb = genlmsg_new(size, GFP_ATOMIC);
++ if (!skb)
++ return -ENOMEM;
++
++ /* add the genetlink message header */
++ msg_header = genlmsg_put(skb, 0, thermal_event_seqnum++,
++ &thermal_event_genl_family, 0,
++ THERMAL_GENL_CMD_EVENT);
++ if (!msg_header) {
++ nlmsg_free(skb);
++ return -ENOMEM;
++ }
++
++ /* fill the data */
++ attr = nla_reserve(skb, THERMAL_GENL_ATTR_EVENT,
++ sizeof(struct thermal_genl_event));
++
++ if (!attr) {
++ nlmsg_free(skb);
++ return -EINVAL;
++ }
++
++ thermal_event = nla_data(attr);
++ if (!thermal_event) {
++ nlmsg_free(skb);
++ return -EINVAL;
++ }
++
++ memset(thermal_event, 0, sizeof(struct thermal_genl_event));
++
++ thermal_event->orig = tz->id;
++ thermal_event->event = event;
++
++ /* send multicast genetlink message */
++ result = genlmsg_end(skb, msg_header);
++ if (result < 0) {
++ nlmsg_free(skb);
++ return result;
++ }
++
++ result = genlmsg_multicast(&thermal_event_genl_family, skb, 0,
++ 0, GFP_ATOMIC);
++ if (result)
++ dev_err(&tz->device, "Failed to send netlink event:%d", result);
++
++ return result;
++}
++EXPORT_SYMBOL_GPL(thermal_generate_netlink_event);
++
++static int genetlink_init(void)
++{
++ return genl_register_family(&thermal_event_genl_family);
++}
++
++static void genetlink_exit(void)
++{
++ genl_unregister_family(&thermal_event_genl_family);
++}
++#else /* !CONFIG_NET */
++static inline int genetlink_init(void) { return 0; }
++static inline void genetlink_exit(void) {}
++#endif /* !CONFIG_NET */
++
++static int __init thermal_register_governors(void)
++{
++ int result;
++
++ result = thermal_gov_step_wise_register();
++ if (result)
++ return result;
++
++ result = thermal_gov_fair_share_register();
++ if (result)
++ return result;
++
++ return thermal_gov_user_space_register();
++}
++
++static void thermal_unregister_governors(void)
++{
++ thermal_gov_step_wise_unregister();
++ thermal_gov_fair_share_unregister();
++ thermal_gov_user_space_unregister();
++}
++
++static int __init thermal_init(void)
++{
++ int result;
++
++ result = thermal_register_governors();
++ if (result)
++ goto error;
++
++ result = class_register(&thermal_class);
++ if (result)
++ goto unregister_governors;
++
++ result = genetlink_init();
++ if (result)
++ goto unregister_class;
++
++ result = of_parse_thermal_zones();
++ if (result)
++ goto exit_netlink;
++
++ return 0;
++
++exit_netlink:
++ genetlink_exit();
++unregister_governors:
++ thermal_unregister_governors();
++unregister_class:
++ class_unregister(&thermal_class);
++error:
++ idr_destroy(&thermal_tz_idr);
++ idr_destroy(&thermal_cdev_idr);
++ mutex_destroy(&thermal_idr_lock);
++ mutex_destroy(&thermal_list_lock);
++ mutex_destroy(&thermal_governor_lock);
++ return result;
++}
++
++static void __exit thermal_exit(void)
++{
++ of_thermal_destroy_zones();
++ genetlink_exit();
++ class_unregister(&thermal_class);
++ thermal_unregister_governors();
++ idr_destroy(&thermal_tz_idr);
++ idr_destroy(&thermal_cdev_idr);
++ mutex_destroy(&thermal_idr_lock);
++ mutex_destroy(&thermal_list_lock);
++ mutex_destroy(&thermal_governor_lock);
++}
++
++fs_initcall(thermal_init);
++module_exit(thermal_exit);
+diff -Nur linux-3.14.36/drivers/tty/serial/earlycon.c linux-openelec/drivers/tty/serial/earlycon.c
+--- linux-3.14.36/drivers/tty/serial/earlycon.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/tty/serial/earlycon.c 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,152 @@
++/*
++ * Copyright (C) 2014 Linaro Ltd.
++ * Author: Rob Herring <robh@kernel.org>
++ *
++ * Based on 8250 earlycon:
++ * (c) Copyright 2004 Hewlett-Packard Development Company, L.P.
++ * Bjorn Helgaas <bjorn.helgaas@hp.com>
++ *
++ * This program is free software: you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++#include <linux/console.h>
++#include <linux/kernel.h>
++#include <linux/init.h>
++#include <linux/io.h>
++#include <linux/serial_core.h>
++
++#ifdef CONFIG_FIX_EARLYCON_MEM
++#include <asm/fixmap.h>
++#endif
++
++#include <asm/serial.h>
++
++static struct console early_con = {
++ .name = "earlycon",
++ .flags = CON_PRINTBUFFER | CON_BOOT,
++ .index = -1,
++};
++
++static struct earlycon_device early_console_dev = {
++ .con = &early_con,
++};
++
++static void __iomem * __init earlycon_map(unsigned long paddr, size_t size)
++{
++ void __iomem *base;
++#ifdef CONFIG_FIX_EARLYCON_MEM
++ set_fixmap_io(FIX_EARLYCON_MEM_BASE, paddr & PAGE_MASK);
++ base = (void __iomem *)__fix_to_virt(FIX_EARLYCON_MEM_BASE);
++ base += paddr & ~PAGE_MASK;
++#else
++ base = ioremap(paddr, size);
++#endif
++ if (!base)
++ pr_err("%s: Couldn't map 0x%llx\n", __func__,
++ (unsigned long long)paddr);
++
++ return base;
++}
++
++static int __init parse_options(struct earlycon_device *device,
++ char *options)
++{
++ struct uart_port *port = &device->port;
++ int mmio, mmio32, length, ret;
++ unsigned long addr;
++
++ if (!options)
++ return -ENODEV;
++
++ mmio = !strncmp(options, "mmio,", 5);
++ mmio32 = !strncmp(options, "mmio32,", 7);
++ if (mmio || mmio32) {
++ port->iotype = (mmio ? UPIO_MEM : UPIO_MEM32);
++ options += mmio ? 5 : 7;
++ ret = kstrtoul(options, 0, &addr);
++ if (ret)
++ return ret;
++ port->mapbase = addr;
++ if (mmio32)
++ port->regshift = 2;
++ } else if (!strncmp(options, "io,", 3)) {
++ port->iotype = UPIO_PORT;
++ options += 3;
++ ret = kstrtoul(options, 0, &addr);
++ if (ret)
++ return ret;
++ port->iobase = addr;
++ mmio = 0;
++ } else if (!strncmp(options, "0x", 2)) {
++ port->iotype = UPIO_MEM;
++ ret = kstrtoul(options, 0, &addr);
++ if (ret)
++ return ret;
++ port->mapbase = addr;
++ } else {
++ return -EINVAL;
++ }
++
++ port->uartclk = BASE_BAUD * 16;
++
++ options = strchr(options, ',');
++ if (options) {
++ options++;
++ ret = kstrtouint(options, 0, &device->baud);
++ if (ret)
++ return ret;
++ length = min(strcspn(options, " ") + 1,
++ (size_t)(sizeof(device->options)));
++ strlcpy(device->options, options, length);
++ }
++
++ if (mmio || mmio32)
++ pr_info("Early serial console at MMIO%s 0x%llx (options '%s')\n",
++ mmio32 ? "32" : "",
++ (unsigned long long)port->mapbase,
++ device->options);
++ else
++ pr_info("Early serial console at I/O port 0x%lx (options '%s')\n",
++ port->iobase,
++ device->options);
++
++ return 0;
++}
++
++int __init setup_earlycon(char *buf, const char *match,
++ int (*setup)(struct earlycon_device *, const char *))
++{
++ int err;
++ size_t len;
++ struct uart_port *port = &early_console_dev.port;
++
++ if (!buf || !match || !setup)
++ return 0;
++
++ len = strlen(match);
++ if (strncmp(buf, match, len))
++ return 0;
++ if (buf[len] && (buf[len] != ','))
++ return 0;
++
++ buf += len + 1;
++
++ err = parse_options(&early_console_dev, buf);
++ /* On parsing error, pass the options buf to the setup function */
++ if (!err)
++ buf = NULL;
++
++ if (port->mapbase)
++ port->membase = earlycon_map(port->mapbase, 64);
++
++ early_console_dev.con->data = &early_console_dev;
++ err = setup(&early_console_dev, buf);
++ if (err < 0)
++ return err;
++ if (!early_console_dev.con->write)
++ return -ENODEV;
++
++ register_console(early_console_dev.con);
++ return 0;
++}
+diff -Nur linux-3.14.36/drivers/tty/serial/Kconfig linux-openelec/drivers/tty/serial/Kconfig
+--- linux-3.14.36/drivers/tty/serial/Kconfig 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/tty/serial/Kconfig 2015-05-06 12:05:42.000000000 -0500
+@@ -7,6 +7,13 @@
+ menu "Serial drivers"
+ depends on HAS_IOMEM
+
++config SERIAL_EARLYCON
++ bool
++ help
++ Support for early consoles with the earlycon parameter. This enables
++ the console before standard serial driver is probed. The console is
++ enabled when early_param is processed.
++
+ source "drivers/tty/serial/8250/Kconfig"
+
+ comment "Non-8250 serial port support"
+diff -Nur linux-3.14.36/drivers/tty/serial/Makefile linux-openelec/drivers/tty/serial/Makefile
+--- linux-3.14.36/drivers/tty/serial/Makefile 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/tty/serial/Makefile 2015-05-06 12:05:42.000000000 -0500
+@@ -5,6 +5,8 @@
+ obj-$(CONFIG_SERIAL_CORE) += serial_core.o
+ obj-$(CONFIG_SERIAL_21285) += 21285.o
+
++obj-$(CONFIG_SERIAL_EARLYCON) += earlycon.o
++
+ # These Sparc drivers have to appear before others such as 8250
+ # which share ttySx minor node space. Otherwise console device
+ # names change and other unplesantries.
+diff -Nur linux-3.14.36/drivers/usb/chipidea/ci.h linux-openelec/drivers/usb/chipidea/ci.h
+--- linux-3.14.36/drivers/usb/chipidea/ci.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/usb/chipidea/ci.h 2015-05-06 12:05:42.000000000 -0500
+@@ -139,8 +139,8 @@
+ * @roles: array of supported roles for this controller
+ * @role: current role
+ * @is_otg: if the device is otg-capable
+- * @work: work for role changing
+- * @wq: workqueue thread
++ * @otg_task: the thread for handling otg task
++ * @otg_wait: the otg event waitqueue head
+ * @qh_pool: allocation pool for queue heads
+ * @td_pool: allocation pool for transfer descriptors
+ * @gadget: device side representation for peripheral controller
+@@ -165,6 +165,10 @@
+ * @b_sess_valid_event: indicates there is a vbus event, and handled
+ * at ci_otg_work
+ * @imx28_write_fix: Freescale imx28 needs swp instruction for writing
++ * @supports_runtime_pm: if runtime pm is supported
++ * @in_lpm: if the core in low power mode
++ * @wakeup_int: if wakeup interrupt occur
++ * @timer: timer to delay clock closing
+ */
+ struct ci_hdrc {
+ struct device *dev;
+@@ -174,8 +178,8 @@
+ struct ci_role_driver *roles[CI_ROLE_END];
+ enum ci_role role;
+ bool is_otg;
+- struct work_struct work;
+- struct workqueue_struct *wq;
++ struct task_struct *otg_task;
++ wait_queue_head_t otg_wait;
+
+ struct dma_pool *qh_pool;
+ struct dma_pool *td_pool;
+@@ -204,6 +208,10 @@
+ bool id_event;
+ bool b_sess_valid_event;
+ bool imx28_write_fix;
++ bool supports_runtime_pm;
++ bool in_lpm;
++ bool wakeup_int;
++ struct timer_list timer;
+ };
+
+ static inline struct ci_role_driver *ci_role(struct ci_hdrc *ci)
+diff -Nur linux-3.14.36/drivers/usb/chipidea/ci_hdrc_imx.c linux-openelec/drivers/usb/chipidea/ci_hdrc_imx.c
+--- linux-3.14.36/drivers/usb/chipidea/ci_hdrc_imx.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/usb/chipidea/ci_hdrc_imx.c 2015-07-24 18:03:30.212842002 -0500
+@@ -19,11 +19,14 @@
+ #include <linux/dma-mapping.h>
+ #include <linux/usb/chipidea.h>
+ #include <linux/clk.h>
++#include <linux/busfreq-imx6.h>
+
+ #include "ci.h"
+ #include "ci_hdrc_imx.h"
+
+-#define CI_HDRC_IMX_IMX28_WRITE_FIX BIT(0)
++#define CI_HDRC_IMX_IMX28_WRITE_FIX BIT(0)
++#define CI_HDRC_IMX_SUPPORT_RUNTIME_PM BIT(1)
++#define CI_HDRC_IMX_HOST_QUIRK BIT(2)
+
+ struct ci_hdrc_imx_platform_flag {
+ unsigned int flags;
+@@ -32,12 +35,30 @@
+ static const struct ci_hdrc_imx_platform_flag imx27_usb_data = {
+ };
+
++static const struct ci_hdrc_imx_platform_flag imx23_usb_data = {
++ .flags = CI_HDRC_IMX_HOST_QUIRK,
++};
++
+ static const struct ci_hdrc_imx_platform_flag imx28_usb_data = {
+- .flags = CI_HDRC_IMX_IMX28_WRITE_FIX,
++ .flags = CI_HDRC_IMX_IMX28_WRITE_FIX |
++ CI_HDRC_IMX_HOST_QUIRK,
++};
++
++static const struct ci_hdrc_imx_platform_flag imx6q_usb_data = {
++ .flags = CI_HDRC_IMX_SUPPORT_RUNTIME_PM |
++ CI_HDRC_IMX_HOST_QUIRK,
++};
++
++static const struct ci_hdrc_imx_platform_flag imx6sl_usb_data = {
++ .flags = CI_HDRC_IMX_SUPPORT_RUNTIME_PM |
++ CI_HDRC_IMX_HOST_QUIRK,
+ };
+
+ static const struct of_device_id ci_hdrc_imx_dt_ids[] = {
++ { .compatible = "fsl,imx6sl-usb", .data = &imx6sl_usb_data},
++ { .compatible = "fsl,imx6q-usb", .data = &imx6q_usb_data},
+ { .compatible = "fsl,imx28-usb", .data = &imx28_usb_data},
++ { .compatible = "fsl,imx23-usb", .data = &imx23_usb_data},
+ { .compatible = "fsl,imx27-usb", .data = &imx27_usb_data},
+ { /* sentinel */ }
+ };
+@@ -47,7 +68,10 @@
+ struct usb_phy *phy;
+ struct platform_device *ci_pdev;
+ struct clk *clk;
++ struct clk *clk_phy;
+ struct imx_usbmisc_data *usbmisc_data;
++ bool supports_runtime_pm;
++ bool in_lpm;
+ };
+
+ /* Common functions shared by usbmisc drivers */
+@@ -123,17 +147,31 @@
+ return PTR_ERR(data->clk);
+ }
+
++ request_bus_freq(BUS_FREQ_HIGH);
+ ret = clk_prepare_enable(data->clk);
+ if (ret) {
++ release_bus_freq(BUS_FREQ_HIGH);
+ dev_err(&pdev->dev,
+ "Failed to prepare or enable clock, err=%d\n", ret);
+ return ret;
+ }
+
++ data->clk_phy = devm_clk_get(&pdev->dev, "phy");
++ if (IS_ERR(data->clk_phy)) {
++ data->clk_phy = NULL;
++ } else {
++ ret = clk_prepare_enable(data->clk_phy);
++ if (ret) {
++ dev_err(&pdev->dev,
++ "Failed to enable clk_phy: %d\n", ret);
++ goto err_clk;
++ }
++ }
++
+ data->phy = devm_usb_get_phy_by_phandle(&pdev->dev, "fsl,usbphy", 0);
+ if (IS_ERR(data->phy)) {
+ ret = PTR_ERR(data->phy);
+- goto err_clk;
++ goto err_clk_phy;
+ }
+
+ pdata.phy = data->phy;
+@@ -145,6 +183,14 @@
+ if (ret)
+ goto err_clk;
+
++ if (imx_platform_flag->flags & CI_HDRC_IMX_SUPPORT_RUNTIME_PM) {
++ pdata.flags |= CI_HDRC_SUPPORTS_RUNTIME_PM;
++ data->supports_runtime_pm = true;
++ }
++
++ if (imx_platform_flag->flags & CI_HDRC_IMX_HOST_QUIRK)
++ pdata.flags |= CI_HDRC_IMX_EHCI_QUIRK;
++
+ if (data->usbmisc_data) {
+ ret = imx_usbmisc_init(data->usbmisc_data);
+ if (ret) {
+@@ -165,6 +211,11 @@
+ goto err_clk;
+ }
+
++ /* usbmisc needs to know dr mode to choose wakeup setting */
++ if (data->usbmisc_data)
++ data->usbmisc_data->available_role =
++ ci_hdrc_query_available_role(data->ci_pdev);
++
+ if (data->usbmisc_data) {
+ ret = imx_usbmisc_init_post(data->usbmisc_data);
+ if (ret) {
+@@ -174,17 +225,34 @@
+ }
+ }
+
++ if (data->usbmisc_data) {
++ ret = imx_usbmisc_set_wakeup(data->usbmisc_data, false);
++ if (ret) {
++ dev_err(&pdev->dev, "usbmisc set_wakeup failed, ret=%d\n",
++ ret);
++ goto disable_device;
++ }
++ }
++
+ platform_set_drvdata(pdev, data);
+
+- pm_runtime_no_callbacks(&pdev->dev);
+- pm_runtime_enable(&pdev->dev);
++ device_set_wakeup_capable(&pdev->dev, true);
++
++ if (data->supports_runtime_pm) {
++ pm_runtime_set_active(&pdev->dev);
++ pm_runtime_enable(&pdev->dev);
++ }
+
+ return 0;
+
+ disable_device:
+ ci_hdrc_remove_device(data->ci_pdev);
++err_clk_phy:
++ if (data->clk_phy)
++ clk_disable_unprepare(data->clk_phy);
+ err_clk:
+ clk_disable_unprepare(data->clk);
++ release_bus_freq(BUS_FREQ_HIGH);
+ return ret;
+ }
+
+@@ -194,11 +262,122 @@
+
+ pm_runtime_disable(&pdev->dev);
+ ci_hdrc_remove_device(data->ci_pdev);
++ if (data->clk_phy)
++ clk_disable_unprepare(data->clk_phy);
++ clk_disable_unprepare(data->clk);
++ release_bus_freq(BUS_FREQ_HIGH);
++
++ return 0;
++}
++
++#ifdef CONFIG_PM
++static int imx_controller_suspend(struct device *dev)
++{
++ struct ci_hdrc_imx_data *data = dev_get_drvdata(dev);
++ int ret;
++
++ dev_dbg(dev, "at %s\n", __func__);
++
++ if (data->in_lpm)
++ return 0;
++
++ if (data->usbmisc_data) {
++ ret = imx_usbmisc_set_wakeup(data->usbmisc_data, true);
++ if (ret) {
++ dev_err(dev,
++ "usbmisc set_wakeup failed, ret=%d\n",
++ ret);
++ return ret;
++ }
++ }
++
+ clk_disable_unprepare(data->clk);
++ release_bus_freq(BUS_FREQ_HIGH);
++ data->in_lpm = true;
+
+ return 0;
+ }
+
++static int imx_controller_resume(struct device *dev)
++{
++ struct ci_hdrc_imx_data *data = dev_get_drvdata(dev);
++ int ret = 0;
++
++ dev_dbg(dev, "at %s\n", __func__);
++
++ if (!data->in_lpm)
++ return 0;
++
++ request_bus_freq(BUS_FREQ_HIGH);
++ ret = clk_prepare_enable(data->clk);
++ if (ret) {
++ release_bus_freq(BUS_FREQ_HIGH);
++ return ret;
++ }
++
++ data->in_lpm = false;
++
++ if (data->usbmisc_data) {
++ ret = imx_usbmisc_set_wakeup(data->usbmisc_data, false);
++ if (ret) {
++ dev_err(dev,
++ "usbmisc set_wakeup failed, ret=%d\n",
++ ret);
++ ret = -EINVAL;
++ goto clk_disable;
++ }
++ }
++
++ return 0;
++
++clk_disable:
++ clk_disable_unprepare(data->clk);
++ release_bus_freq(BUS_FREQ_HIGH);
++
++ return ret;
++}
++
++#ifdef CONFIG_PM_SLEEP
++static int ci_hdrc_imx_suspend(struct device *dev)
++{
++ return imx_controller_suspend(dev);
++}
++
++static int ci_hdrc_imx_resume(struct device *dev)
++{
++ struct ci_hdrc_imx_data *data = dev_get_drvdata(dev);
++ int ret;
++
++ ret = imx_controller_resume(dev);
++ if (!ret && data->supports_runtime_pm) {
++ pm_runtime_disable(dev);
++ pm_runtime_set_active(dev);
++ pm_runtime_enable(dev);
++ }
++
++ return ret;
++}
++#endif /* CONFIG_PM_SLEEP */
++
++#ifdef CONFIG_PM_RUNTIME
++static int ci_hdrc_imx_runtime_suspend(struct device *dev)
++{
++ return imx_controller_suspend(dev);
++}
++
++static int ci_hdrc_imx_runtime_resume(struct device *dev)
++{
++ return imx_controller_resume(dev);
++}
++#endif /* CONFIG_PM_RUNTIME */
++
++#endif /* CONFIG_PM */
++static const struct dev_pm_ops ci_hdrc_imx_pm_ops = {
++ SET_SYSTEM_SLEEP_PM_OPS(ci_hdrc_imx_suspend, ci_hdrc_imx_resume)
++ SET_RUNTIME_PM_OPS(ci_hdrc_imx_runtime_suspend,
++ ci_hdrc_imx_runtime_resume, NULL)
++};
++
+ static struct platform_driver ci_hdrc_imx_driver = {
+ .probe = ci_hdrc_imx_probe,
+ .remove = ci_hdrc_imx_remove,
+@@ -206,6 +385,7 @@
+ .name = "imx_usb",
+ .owner = THIS_MODULE,
+ .of_match_table = ci_hdrc_imx_dt_ids,
++ .pm = &ci_hdrc_imx_pm_ops,
+ },
+ };
+
+diff -Nur linux-3.14.36/drivers/usb/chipidea/ci_hdrc_imx.h linux-openelec/drivers/usb/chipidea/ci_hdrc_imx.h
+--- linux-3.14.36/drivers/usb/chipidea/ci_hdrc_imx.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/usb/chipidea/ci_hdrc_imx.h 2015-05-06 12:05:42.000000000 -0500
+@@ -1,5 +1,5 @@
+ /*
+- * Copyright 2012 Freescale Semiconductor, Inc.
++ * Copyright 2012-2013 Freescale Semiconductor, Inc.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+@@ -12,14 +12,18 @@
+ #ifndef __DRIVER_USB_CHIPIDEA_CI_HDRC_IMX_H
+ #define __DRIVER_USB_CHIPIDEA_CI_HDRC_IMX_H
+
++#include <linux/usb/otg.h>
++
+ struct imx_usbmisc_data {
+ int index;
+
+ unsigned int disable_oc:1; /* over current detect disabled */
+ unsigned int evdo:1; /* set external vbus divider option */
++ enum usb_dr_mode available_role;
+ };
+
+ int imx_usbmisc_init(struct imx_usbmisc_data *);
+ int imx_usbmisc_init_post(struct imx_usbmisc_data *);
++int imx_usbmisc_set_wakeup(struct imx_usbmisc_data *, bool);
+
+ #endif /* __DRIVER_USB_CHIPIDEA_CI_HDRC_IMX_H */
+diff -Nur linux-3.14.36/drivers/usb/chipidea/ci_hdrc_msm.c linux-openelec/drivers/usb/chipidea/ci_hdrc_msm.c
+--- linux-3.14.36/drivers/usb/chipidea/ci_hdrc_msm.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/usb/chipidea/ci_hdrc_msm.c 2015-07-24 18:03:28.488842002 -0500
+@@ -17,7 +17,7 @@
+
+ #define MSM_USB_BASE (ci->hw_bank.abs)
+
+-static void ci_hdrc_msm_notify_event(struct ci_hdrc *ci, unsigned event)
++static int ci_hdrc_msm_notify_event(struct ci_hdrc *ci, unsigned event)
+ {
+ struct device *dev = ci->gadget.dev.parent;
+
+@@ -40,6 +40,8 @@
+ dev_dbg(dev, "unknown ci_hdrc event\n");
+ break;
+ }
++
++ return 0;
+ }
+
+ static struct ci_hdrc_platform_data ci_hdrc_msm_platdata = {
+diff -Nur linux-3.14.36/drivers/usb/chipidea/ci_hdrc_msm.c.orig linux-openelec/drivers/usb/chipidea/ci_hdrc_msm.c.orig
+--- linux-3.14.36/drivers/usb/chipidea/ci_hdrc_msm.c.orig 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/usb/chipidea/ci_hdrc_msm.c.orig 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,101 @@
++/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 and
++ * only version 2 as published by the Free Software Foundation.
++ */
++
++#include <linux/module.h>
++#include <linux/platform_device.h>
++#include <linux/pm_runtime.h>
++#include <linux/usb/msm_hsusb_hw.h>
++#include <linux/usb/ulpi.h>
++#include <linux/usb/gadget.h>
++#include <linux/usb/chipidea.h>
++
++#include "ci.h"
++
++#define MSM_USB_BASE (ci->hw_bank.abs)
++
++static int ci_hdrc_msm_notify_event(struct ci_hdrc *ci, unsigned event)
++{
++ struct device *dev = ci->gadget.dev.parent;
++ int val;
++
++ switch (event) {
++ case CI_HDRC_CONTROLLER_RESET_EVENT:
++ dev_dbg(dev, "CI_HDRC_CONTROLLER_RESET_EVENT received\n");
++ writel(0, USB_AHBBURST);
++ writel(0, USB_AHBMODE);
++ break;
++ case CI_HDRC_CONTROLLER_STOPPED_EVENT:
++ dev_dbg(dev, "CI_HDRC_CONTROLLER_STOPPED_EVENT received\n");
++ /*
++ * Put the transceiver in non-driving mode. Otherwise host
++ * may not detect soft-disconnection.
++ */
++ val = usb_phy_io_read(ci->transceiver, ULPI_FUNC_CTRL);
++ val &= ~ULPI_FUNC_CTRL_OPMODE_MASK;
++ val |= ULPI_FUNC_CTRL_OPMODE_NONDRIVING;
++ usb_phy_io_write(ci->transceiver, val, ULPI_FUNC_CTRL);
++ break;
++ default:
++ dev_dbg(dev, "unknown ci_hdrc event\n");
++ break;
++ }
++
++ return 0;
++}
++
++static struct ci_hdrc_platform_data ci_hdrc_msm_platdata = {
++ .name = "ci_hdrc_msm",
++ .flags = CI_HDRC_REGS_SHARED |
++ CI_HDRC_REQUIRE_TRANSCEIVER |
++ CI_HDRC_DISABLE_STREAMING,
++
++ .notify_event = ci_hdrc_msm_notify_event,
++};
++
++static int ci_hdrc_msm_probe(struct platform_device *pdev)
++{
++ struct platform_device *plat_ci;
++
++ dev_dbg(&pdev->dev, "ci_hdrc_msm_probe\n");
++
++ plat_ci = ci_hdrc_add_device(&pdev->dev,
++ pdev->resource, pdev->num_resources,
++ &ci_hdrc_msm_platdata);
++ if (IS_ERR(plat_ci)) {
++ dev_err(&pdev->dev, "ci_hdrc_add_device failed!\n");
++ return PTR_ERR(plat_ci);
++ }
++
++ platform_set_drvdata(pdev, plat_ci);
++
++ pm_runtime_no_callbacks(&pdev->dev);
++ pm_runtime_enable(&pdev->dev);
++
++ return 0;
++}
++
++static int ci_hdrc_msm_remove(struct platform_device *pdev)
++{
++ struct platform_device *plat_ci = platform_get_drvdata(pdev);
++
++ pm_runtime_disable(&pdev->dev);
++ ci_hdrc_remove_device(plat_ci);
++
++ return 0;
++}
++
++static struct platform_driver ci_hdrc_msm_driver = {
++ .probe = ci_hdrc_msm_probe,
++ .remove = ci_hdrc_msm_remove,
++ .driver = { .name = "msm_hsusb", },
++};
++
++module_platform_driver(ci_hdrc_msm_driver);
++
++MODULE_ALIAS("platform:msm_hsusb");
++MODULE_ALIAS("platform:ci13xxx_msm");
++MODULE_LICENSE("GPL v2");
+diff -Nur linux-3.14.36/drivers/usb/chipidea/core.c linux-openelec/drivers/usb/chipidea/core.c
+--- linux-3.14.36/drivers/usb/chipidea/core.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/usb/chipidea/core.c 2015-05-06 12:05:42.000000000 -0500
+@@ -165,25 +165,30 @@
+ return hw_read(ci, OP_PORTSC, PORTSC_PTC) >> __ffs(PORTSC_PTC);
+ }
+
++static void hw_wait_phy_stable(void)
++{
++ /* The controller needs at least 1ms to reflect PHY's status */
++ usleep_range(2000, 2500);
++}
++
++static void delay_runtime_pm_put_timer(unsigned long arg)
++{
++ struct ci_hdrc *ci = (struct ci_hdrc *)arg;
++
++ pm_runtime_put(ci->dev);
++}
++
+ /* The PHY enters/leaves low power mode */
+ static void ci_hdrc_enter_lpm(struct ci_hdrc *ci, bool enable)
+ {
+ enum ci_hw_regs reg = ci->hw_bank.lpm ? OP_DEVLC : OP_PORTSC;
+ bool lpm = !!(hw_read(ci, reg, PORTSC_PHCD(ci->hw_bank.lpm)));
+
+- if (enable && !lpm) {
++ if (enable && !lpm)
+ hw_write(ci, reg, PORTSC_PHCD(ci->hw_bank.lpm),
+ PORTSC_PHCD(ci->hw_bank.lpm));
+- } else if (!enable && lpm) {
+- hw_write(ci, reg, PORTSC_PHCD(ci->hw_bank.lpm),
+- 0);
+- /*
+- * The controller needs at least 1ms to reflect
+- * PHY's status, the PHY also needs some time (less
+- * than 1ms) to leave low power mode.
+- */
+- usleep_range(1500, 2000);
+- }
++ else if (!enable && lpm)
++ hw_write(ci, reg, PORTSC_PHCD(ci->hw_bank.lpm), 0);
+ }
+
+ static int hw_device_init(struct ci_hdrc *ci, void __iomem *base)
+@@ -351,6 +356,13 @@
+ irqreturn_t ret = IRQ_NONE;
+ u32 otgsc = 0;
+
++ if (ci->in_lpm) {
++ disable_irq_nosync(irq);
++ ci->wakeup_int = true;
++ pm_runtime_get(ci->dev);
++ return IRQ_HANDLED;
++ }
++
+ if (ci->is_otg)
+ otgsc = hw_read(ci, OP_OTGSC, ~0);
+
+@@ -362,7 +374,7 @@
+ ci->id_event = true;
+ ci_clear_otg_interrupt(ci, OTGSC_IDIS);
+ disable_irq_nosync(ci->irq);
+- queue_work(ci->wq, &ci->work);
++ wake_up(&ci->otg_wait);
+ return IRQ_HANDLED;
+ }
+
+@@ -374,7 +386,7 @@
+ ci->b_sess_valid_event = true;
+ ci_clear_otg_interrupt(ci, OTGSC_BSVIS);
+ disable_irq_nosync(ci->irq);
+- queue_work(ci->wq, &ci->work);
++ wake_up(&ci->otg_wait);
+ return IRQ_HANDLED;
+ }
+
+@@ -473,6 +485,33 @@
+ }
+ EXPORT_SYMBOL_GPL(ci_hdrc_remove_device);
+
++/**
++ * ci_hdrc_query_available_role: get runtime available operation mode
++ *
++ * The glue layer can get current operation mode (host/peripheral/otg)
++ * This function should be called after ci core device has created.
++ *
++ * @pdev: the platform device of ci core.
++ *
++ * Return USB_DR_MODE_XXX.
++ */
++enum usb_dr_mode ci_hdrc_query_available_role(struct platform_device *pdev)
++{
++ struct ci_hdrc *ci = platform_get_drvdata(pdev);
++
++ if (!ci)
++ return USB_DR_MODE_UNKNOWN;
++ if (ci->roles[CI_ROLE_HOST] && ci->roles[CI_ROLE_GADGET])
++ return USB_DR_MODE_OTG;
++ else if (ci->roles[CI_ROLE_HOST])
++ return USB_DR_MODE_HOST;
++ else if (ci->roles[CI_ROLE_GADGET])
++ return USB_DR_MODE_PERIPHERAL;
++ else
++ return USB_DR_MODE_UNKNOWN;
++}
++EXPORT_SYMBOL_GPL(ci_hdrc_query_available_role);
++
+ static inline void ci_role_destroy(struct ci_hdrc *ci)
+ {
+ ci_hdrc_gadget_destroy(ci);
+@@ -498,9 +537,14 @@
+
+ static int ci_usb_phy_init(struct ci_hdrc *ci)
+ {
++ int ret;
++
+ if (ci->platdata->phy) {
+ ci->transceiver = ci->platdata->phy;
+- return usb_phy_init(ci->transceiver);
++ ret = usb_phy_init(ci->transceiver);
++ if (!ret)
++ hw_wait_phy_stable();
++ return ret;
+ } else {
+ ci->global_phy = true;
+ ci->transceiver = usb_get_phy(USB_PHY_TYPE_USB2);
+@@ -559,8 +603,6 @@
+ return -ENODEV;
+ }
+
+- hw_phymode_configure(ci);
+-
+ ret = ci_usb_phy_init(ci);
+ if (ret) {
+ dev_err(dev, "unable to init phy: %d\n", ret);
+@@ -578,7 +620,13 @@
+
+ ci_get_otg_capable(ci);
+
++ hw_phymode_configure(ci);
++
+ dr_mode = ci->platdata->dr_mode;
++
++ ci->supports_runtime_pm = !!(ci->platdata->flags &
++ CI_HDRC_SUPPORTS_RUNTIME_PM);
++
+ /* initialize role(s) before the interrupt is requested */
+ if (dr_mode == USB_DR_MODE_OTG || dr_mode == USB_DR_MODE_HOST) {
+ ret = ci_hdrc_host_init(ci);
+@@ -619,11 +667,6 @@
+
+ if (ci->roles[CI_ROLE_HOST] && ci->roles[CI_ROLE_GADGET]) {
+ if (ci->is_otg) {
+- /*
+- * ID pin needs 1ms debouce time,
+- * we delay 2ms for safe.
+- */
+- mdelay(2);
+ ci->role = ci_otg_role(ci);
+ ci_enable_otg_interrupt(ci, OTGSC_IDIE);
+ } else {
+@@ -656,6 +699,15 @@
+ if (ret)
+ goto stop;
+
++ device_set_wakeup_capable(&pdev->dev, true);
++
++ if (ci->supports_runtime_pm) {
++ pm_runtime_set_active(&pdev->dev);
++ pm_runtime_enable(&pdev->dev);
++ }
++
++ setup_timer(&ci->timer, delay_runtime_pm_put_timer,
++ (unsigned long)ci);
+ ret = dbg_create_files(ci);
+ if (!ret)
+ return 0;
+@@ -673,6 +725,11 @@
+ {
+ struct ci_hdrc *ci = platform_get_drvdata(pdev);
+
++ if (ci->supports_runtime_pm) {
++ pm_runtime_get_sync(&pdev->dev);
++ pm_runtime_disable(&pdev->dev);
++ pm_runtime_put_noidle(&pdev->dev);
++ }
+ dbg_remove_files(ci);
+ free_irq(ci->irq, ci);
+ ci_role_destroy(ci);
+@@ -682,11 +739,120 @@
+ return 0;
+ }
+
++#ifdef CONFIG_PM
++static int ci_controller_suspend(struct device *dev)
++{
++ struct ci_hdrc *ci = dev_get_drvdata(dev);
++
++ dev_dbg(dev, "at %s\n", __func__);
++
++ if (ci->in_lpm)
++ return 0;
++
++ disable_irq(ci->irq);
++
++ if (ci->transceiver)
++ usb_phy_set_wakeup(ci->transceiver, true);
++
++ ci_hdrc_enter_lpm(ci, true);
++
++ if (ci->transceiver)
++ usb_phy_set_suspend(ci->transceiver, 1);
++
++ ci->in_lpm = true;
++
++ enable_irq(ci->irq);
++
++ return 0;
++}
++
++static int ci_controller_resume(struct device *dev)
++{
++ struct ci_hdrc *ci = dev_get_drvdata(dev);
++
++ dev_dbg(dev, "at %s\n", __func__);
++
++ if (!ci->in_lpm)
++ return 0;
++
++ ci_hdrc_enter_lpm(ci, false);
++
++ if (ci->transceiver) {
++ usb_phy_set_suspend(ci->transceiver, 0);
++ usb_phy_set_wakeup(ci->transceiver, false);
++ hw_wait_phy_stable();
++ }
++
++ ci->in_lpm = false;
++
++ if (ci->wakeup_int) {
++ ci->wakeup_int = false;
++ enable_irq(ci->irq);
++ mod_timer(&ci->timer, jiffies + msecs_to_jiffies(2000));
++ }
++
++ return 0;
++}
++
++#ifdef CONFIG_PM_SLEEP
++static int ci_suspend(struct device *dev)
++{
++ struct ci_hdrc *ci = dev_get_drvdata(dev);
++ int ret;
++
++ ret = ci_controller_suspend(dev);
++ if (ret)
++ return ret;
++
++ if (device_may_wakeup(dev))
++ enable_irq_wake(ci->irq);
++
++ return ret;
++}
++
++static int ci_resume(struct device *dev)
++{
++ struct ci_hdrc *ci = dev_get_drvdata(dev);
++ int ret;
++
++ if (device_may_wakeup(dev))
++ disable_irq_wake(ci->irq);
++
++ ret = ci_controller_resume(dev);
++ if (!ret && ci->supports_runtime_pm) {
++ pm_runtime_disable(dev);
++ pm_runtime_set_active(dev);
++ pm_runtime_enable(dev);
++ }
++
++ return ret;
++}
++#endif /* CONFIG_PM_SLEEP */
++
++#ifdef CONFIG_PM_RUNTIME
++static int ci_runtime_suspend(struct device *dev)
++{
++ return ci_controller_suspend(dev);
++}
++
++static int ci_runtime_resume(struct device *dev)
++{
++ return ci_controller_resume(dev);
++}
++#endif /* CONFIG_PM_RUNTIME */
++
++#endif /* CONFIG_PM */
++static const struct dev_pm_ops ci_pm_ops = {
++ SET_SYSTEM_SLEEP_PM_OPS(ci_suspend, ci_resume)
++ SET_RUNTIME_PM_OPS(ci_runtime_suspend, ci_runtime_resume, NULL)
++};
++
+ static struct platform_driver ci_hdrc_driver = {
+ .probe = ci_hdrc_probe,
+ .remove = ci_hdrc_remove,
+ .driver = {
+ .name = "ci_hdrc",
++ .pm = &ci_pm_ops,
+ },
+ };
+
+diff -Nur linux-3.14.36/drivers/usb/chipidea/host.c linux-openelec/drivers/usb/chipidea/host.c
+--- linux-3.14.36/drivers/usb/chipidea/host.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/usb/chipidea/host.c 2015-05-06 12:05:42.000000000 -0500
+@@ -33,6 +33,176 @@
+ #include "host.h"
+
+ static struct hc_driver __read_mostly ci_ehci_hc_driver;
++static int (*orig_bus_suspend)(struct usb_hcd *hcd);
++static int (*orig_bus_resume)(struct usb_hcd *hcd);
++static int (*orig_hub_control)(struct usb_hcd *hcd,
++ u16 typeReq, u16 wValue, u16 wIndex,
++ char *buf, u16 wLength);
++
++static int ci_ehci_bus_suspend(struct usb_hcd *hcd)
++{
++ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
++ int port;
++ u32 tmp;
++
++ int ret = orig_bus_suspend(hcd);
++
++ if (ret)
++ return ret;
++
++ port = HCS_N_PORTS(ehci->hcs_params);
++ while (port--) {
++ u32 __iomem *reg = &ehci->regs->port_status[port];
++ u32 portsc = ehci_readl(ehci, reg);
++
++ if (portsc & PORT_CONNECT) {
++ /*
++ * For chipidea, the resume signal will be ended
++ * automatically, so for remote wakeup case, the
++ * usbcmd.rs may not be set before the resume has
++ * ended if other resume path consumes too much
++ * time (~23ms-24ms), in that case, the SOF will not
++ * send out within 3ms after resume ends, then the
++ * device will enter suspend again.
++ */
++ if (hcd->self.root_hub->do_remote_wakeup) {
++ ehci_dbg(ehci,
++ "Remote wakeup is enabled, "
++ "and device is on the port\n");
++
++ tmp = ehci_readl(ehci, &ehci->regs->command);
++ tmp |= CMD_RUN;
++ ehci_writel(ehci, tmp, &ehci->regs->command);
++ /*
++ * It needs a short delay between set RUNSTOP
++ * and set PHCD.
++ */
++ udelay(125);
++ }
++ if (hcd->phy && test_bit(port, &ehci->bus_suspended)
++ && (ehci_port_speed(ehci, portsc) ==
++ USB_PORT_STAT_HIGH_SPEED))
++ /*
++ * notify the USB PHY, it is for global
++ * suspend case.
++ */
++ usb_phy_notify_suspend(hcd->phy,
++ USB_SPEED_HIGH);
++ }
++ }
++
++ return 0;
++}
++
++static int ci_imx_ehci_bus_resume(struct usb_hcd *hcd)
++{
++ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
++ int port;
++
++ int ret = orig_bus_resume(hcd);
++
++ if (ret)
++ return ret;
++
++ port = HCS_N_PORTS(ehci->hcs_params);
++ while (port--) {
++ u32 __iomem *reg = &ehci->regs->port_status[port];
++ u32 portsc = ehci_readl(ehci, reg);
++ /*
++ * Notify PHY after resume signal has finished, it is
++ * for global suspend case.
++ */
++ if (hcd->phy
++ && test_bit(port, &ehci->bus_suspended)
++ && (portsc & PORT_CONNECT)
++ && (ehci_port_speed(ehci, portsc) ==
++ USB_PORT_STAT_HIGH_SPEED))
++ /* notify the USB PHY */
++ usb_phy_notify_resume(hcd->phy, USB_SPEED_HIGH);
++ }
++
++ return 0;
++}
++
++/* The below code is based on tegra ehci driver */
++static int ci_imx_ehci_hub_control(
++ struct usb_hcd *hcd,
++ u16 typeReq,
++ u16 wValue,
++ u16 wIndex,
++ char *buf,
++ u16 wLength
++)
++{
++ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
++ u32 __iomem *status_reg;
++ u32 temp;
++ unsigned long flags;
++ int retval = 0;
++
++ status_reg = &ehci->regs->port_status[(wIndex & 0xff) - 1];
++
++ spin_lock_irqsave(&ehci->lock, flags);
++
++ if (typeReq == SetPortFeature && wValue == USB_PORT_FEAT_SUSPEND) {
++ temp = ehci_readl(ehci, status_reg);
++ if ((temp & PORT_PE) == 0 || (temp & PORT_RESET) != 0) {
++ retval = -EPIPE;
++ goto done;
++ }
++
++ temp &= ~(PORT_RWC_BITS | PORT_WKCONN_E);
++ temp |= PORT_WKDISC_E | PORT_WKOC_E;
++ ehci_writel(ehci, temp | PORT_SUSPEND, status_reg);
++
++ /*
++ * If a transaction is in progress, there may be a delay in
++ * suspending the port. Poll until the port is suspended.
++ */
++ if (ehci_handshake(ehci, status_reg, PORT_SUSPEND,
++ PORT_SUSPEND, 5000))
++ ehci_err(ehci, "timeout waiting for SUSPEND\n");
++
++ spin_unlock_irqrestore(&ehci->lock, flags);
++ if (ehci_port_speed(ehci, temp) ==
++ USB_PORT_STAT_HIGH_SPEED && hcd->phy) {
++ /* notify the USB PHY */
++ usb_phy_notify_suspend(hcd->phy, USB_SPEED_HIGH);
++ }
++ spin_lock_irqsave(&ehci->lock, flags);
++
++ set_bit((wIndex & 0xff) - 1, &ehci->suspended_ports);
++ goto done;
++ }
++
++ /*
++ * After resume has finished, it needs do some post resume
++ * operation for some SoCs.
++ */
++ else if (typeReq == ClearPortFeature &&
++ wValue == USB_PORT_FEAT_C_SUSPEND) {
++
++ /* Make sure the resume has finished, it should be finished */
++ if (ehci_handshake(ehci, status_reg, PORT_RESUME, 0, 25000))
++ ehci_err(ehci, "timeout waiting for resume\n");
++
++ temp = ehci_readl(ehci, status_reg);
++
++ if (ehci_port_speed(ehci, temp) ==
++ USB_PORT_STAT_HIGH_SPEED && hcd->phy) {
++ /* notify the USB PHY */
++ usb_phy_notify_resume(hcd->phy, USB_SPEED_HIGH);
++ }
++ }
++
++ spin_unlock_irqrestore(&ehci->lock, flags);
++
++ /* Handle the hub control events here */
++ return orig_hub_control(hcd, typeReq, wValue, wIndex, buf, wLength);
++done:
++ spin_unlock_irqrestore(&ehci->lock, flags);
++ return retval;
++}
+
+ static irqreturn_t host_irq(struct ci_hdrc *ci)
+ {
+@@ -64,7 +234,6 @@
+ ehci = hcd_to_ehci(hcd);
+ ehci->caps = ci->hw_bank.cap;
+ ehci->has_hostpc = ci->hw_bank.lpm;
+- ehci->has_tdi_phy_lpm = ci->hw_bank.lpm;
+ ehci->imx28_write_fix = ci->imx28_write_fix;
+
+ if (ci->platdata->reg_vbus) {
+@@ -136,5 +305,15 @@
+
+ ehci_init_driver(&ci_ehci_hc_driver, NULL);
+
++ orig_bus_suspend = ci_ehci_hc_driver.bus_suspend;
++ orig_bus_resume = ci_ehci_hc_driver.bus_resume;
++ orig_hub_control = ci_ehci_hc_driver.hub_control;
++
++ ci_ehci_hc_driver.bus_suspend = ci_ehci_bus_suspend;
++ if (ci->platdata->flags & CI_HDRC_IMX_EHCI_QUIRK) {
++ ci_ehci_hc_driver.bus_resume = ci_imx_ehci_bus_resume;
++ ci_ehci_hc_driver.hub_control = ci_imx_ehci_hub_control;
++ }
++
+ return 0;
+ }
+diff -Nur linux-3.14.36/drivers/usb/chipidea/otg.c linux-openelec/drivers/usb/chipidea/otg.c
+--- linux-3.14.36/drivers/usb/chipidea/otg.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/usb/chipidea/otg.c 2015-05-06 12:05:42.000000000 -0500
+@@ -18,6 +18,8 @@
+ #include <linux/usb/otg.h>
+ #include <linux/usb/gadget.h>
+ #include <linux/usb/chipidea.h>
++#include <linux/kthread.h>
++#include <linux/freezer.h>
+
+ #include "ci.h"
+ #include "bits.h"
+@@ -68,26 +70,53 @@
+ ci_role_start(ci, role);
+ }
+ }
++
++/* If there is pending otg event */
++static inline bool ci_otg_event_is_pending(struct ci_hdrc *ci)
++{
++ return ci->id_event || ci->b_sess_valid_event;
++}
++
+ /**
+- * ci_otg_work - perform otg (vbus/id) event handle
+- * @work: work struct
++ * ci_otg_event - perform otg (vbus/id) event handle
++ * @ci: ci_hdrc struct
+ */
+-static void ci_otg_work(struct work_struct *work)
++static void ci_otg_event(struct ci_hdrc *ci)
+ {
+- struct ci_hdrc *ci = container_of(work, struct ci_hdrc, work);
+-
+ if (ci->id_event) {
+ ci->id_event = false;
++ /* Keep controller active during id switch */
++ pm_runtime_get_sync(ci->dev);
+ ci_handle_id_switch(ci);
++ pm_runtime_put_sync(ci->dev);
+ } else if (ci->b_sess_valid_event) {
+ ci->b_sess_valid_event = false;
++ pm_runtime_get_sync(ci->dev);
+ ci_handle_vbus_change(ci);
++ pm_runtime_put_sync(ci->dev);
+ } else
+- dev_err(ci->dev, "unexpected event occurs at %s\n", __func__);
++ dev_dbg(ci->dev, "it should be quit event\n");
+
+ enable_irq(ci->irq);
+ }
+
++static int ci_otg_thread(void *ptr)
++{
++ struct ci_hdrc *ci = ptr;
++
++ set_freezable();
++
++ do {
++ wait_event_freezable(ci->otg_wait,
++ ci_otg_event_is_pending(ci) ||
++ kthread_should_stop());
++ ci_otg_event(ci);
++ } while (!kthread_should_stop());
++
++ dev_warn(ci->dev, "ci_otg_thread quits\n");
++
++ return 0;
++}
+
+ /**
+ * ci_hdrc_otg_init - initialize otg struct
+@@ -95,11 +124,11 @@
+ */
+ int ci_hdrc_otg_init(struct ci_hdrc *ci)
+ {
+- INIT_WORK(&ci->work, ci_otg_work);
+- ci->wq = create_singlethread_workqueue("ci_otg");
+- if (!ci->wq) {
+- dev_err(ci->dev, "can't create workqueue\n");
+- return -ENODEV;
++ init_waitqueue_head(&ci->otg_wait);
++ ci->otg_task = kthread_run(ci_otg_thread, ci, "ci otg thread");
++ if (IS_ERR(ci->otg_task)) {
++ dev_err(ci->dev, "error to create otg thread\n");
++ return PTR_ERR(ci->otg_task);
+ }
+
+ return 0;
+@@ -111,10 +140,7 @@
+ */
+ void ci_hdrc_otg_destroy(struct ci_hdrc *ci)
+ {
+- if (ci->wq) {
+- flush_workqueue(ci->wq);
+- destroy_workqueue(ci->wq);
+- }
++ kthread_stop(ci->otg_task);
+ ci_disable_otg_interrupt(ci, OTGSC_INT_EN_BITS);
+ ci_clear_otg_interrupt(ci, OTGSC_INT_STATUS_BITS);
+ }
+diff -Nur linux-3.14.36/drivers/usb/chipidea/udc.c linux-openelec/drivers/usb/chipidea/udc.c
+--- linux-3.14.36/drivers/usb/chipidea/udc.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/usb/chipidea/udc.c 2015-05-06 12:05:42.000000000 -0500
+@@ -681,12 +681,6 @@
+ struct ci_hdrc *ci = container_of(gadget, struct ci_hdrc, gadget);
+ unsigned long flags;
+
+- spin_lock_irqsave(&ci->lock, flags);
+- ci->gadget.speed = USB_SPEED_UNKNOWN;
+- ci->remote_wakeup = 0;
+- ci->suspended = 0;
+- spin_unlock_irqrestore(&ci->lock, flags);
+-
+ /* flush all endpoints */
+ gadget_for_each_ep(ep, gadget) {
+ usb_ep_fifo_flush(ep);
+@@ -704,6 +698,12 @@
+ ci->status = NULL;
+ }
+
++ spin_lock_irqsave(&ci->lock, flags);
++ ci->gadget.speed = USB_SPEED_UNKNOWN;
++ ci->remote_wakeup = 0;
++ ci->suspended = 0;
++ spin_unlock_irqrestore(&ci->lock, flags);
++
+ return 0;
+ }
+
+@@ -1222,6 +1222,10 @@
+ return -EBUSY;
+
+ spin_lock_irqsave(hwep->lock, flags);
++ if (hwep->ci->gadget.speed == USB_SPEED_UNKNOWN) {
++ spin_unlock_irqrestore(hwep->lock, flags);
++ return 0;
++ }
+
+ /* only internal SW should disable ctrl endpts */
+
+@@ -1311,6 +1315,10 @@
+ return -EINVAL;
+
+ spin_lock_irqsave(hwep->lock, flags);
++ if (hwep->ci->gadget.speed == USB_SPEED_UNKNOWN) {
++ spin_unlock_irqrestore(hwep->lock, flags);
++ return 0;
++ }
+ retval = _ep_queue(ep, req, gfp_flags);
+ spin_unlock_irqrestore(hwep->lock, flags);
+ return retval;
+@@ -1334,8 +1342,8 @@
+ return -EINVAL;
+
+ spin_lock_irqsave(hwep->lock, flags);
+-
+- hw_ep_flush(hwep->ci, hwep->num, hwep->dir);
++ if (hwep->ci->gadget.speed != USB_SPEED_UNKNOWN)
++ hw_ep_flush(hwep->ci, hwep->num, hwep->dir);
+
+ list_for_each_entry_safe(node, tmpnode, &hwreq->tds, td) {
+ dma_pool_free(hwep->td_pool, node->ptr, node->dma);
+@@ -1379,6 +1387,10 @@
+
+ spin_lock_irqsave(hwep->lock, flags);
+
++ if (hwep->ci->gadget.speed == USB_SPEED_UNKNOWN) {
++ spin_unlock_irqrestore(hwep->lock, flags);
++ return 0;
++ }
+ #ifndef STALL_IN
+ /* g_file_storage MS compliant but g_zero fails chapter 9 compliance */
+ if (value && hwep->type == USB_ENDPOINT_XFER_BULK && hwep->dir == TX &&
+@@ -1440,6 +1452,10 @@
+ }
+
+ spin_lock_irqsave(hwep->lock, flags);
++ if (hwep->ci->gadget.speed == USB_SPEED_UNKNOWN) {
++ spin_unlock_irqrestore(hwep->lock, flags);
++ return;
++ }
+
+ hw_ep_flush(hwep->ci, hwep->num, hwep->dir);
+
+@@ -1506,6 +1522,10 @@
+ int ret = 0;
+
+ spin_lock_irqsave(&ci->lock, flags);
++ if (ci->gadget.speed == USB_SPEED_UNKNOWN) {
++ spin_unlock_irqrestore(&ci->lock, flags);
++ return 0;
++ }
+ if (!ci->remote_wakeup) {
+ ret = -EOPNOTSUPP;
+ goto out;
+diff -Nur linux-3.14.36/drivers/usb/chipidea/usbmisc_imx.c linux-openelec/drivers/usb/chipidea/usbmisc_imx.c
+--- linux-3.14.36/drivers/usb/chipidea/usbmisc_imx.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/usb/chipidea/usbmisc_imx.c 2015-07-24 18:03:30.212842002 -0500
+@@ -1,5 +1,5 @@
+ /*
+- * Copyright 2012 Freescale Semiconductor, Inc.
++ * Copyright 2012-2013 Freescale Semiconductor, Inc.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+@@ -15,6 +15,7 @@
+ #include <linux/err.h>
+ #include <linux/io.h>
+ #include <linux/delay.h>
++#include <linux/regulator/consumer.h>
+
+ #include "ci_hdrc_imx.h"
+
+@@ -33,12 +34,18 @@
+ #define MX53_BM_OVER_CUR_DIS_UHx BIT(30)
+
+ #define MX6_BM_OVER_CUR_DIS BIT(7)
++#define MX6_BM_WAKEUP_ENABLE BIT(10)
++#define MX6_BM_ID_WAKEUP BIT(16)
++#define MX6_BM_VBUS_WAKEUP BIT(17)
++#define MX6_BM_WAKEUP_INTR BIT(31)
+
+ struct usbmisc_ops {
+ /* It's called once when probe a usb device */
+ int (*init)(struct imx_usbmisc_data *data);
+ /* It's called once after adding a usb device */
+ int (*post)(struct imx_usbmisc_data *data);
++ /* It's called when we need to enable usb wakeup */
++ int (*set_wakeup)(struct imx_usbmisc_data *data, bool enabled);
+ };
+
+ struct imx_usbmisc {
+@@ -49,6 +56,7 @@
+ };
+
+ static struct imx_usbmisc *usbmisc;
++static struct regulator *vbus_wakeup_reg;
+
+ static int usbmisc_imx25_post(struct imx_usbmisc_data *data)
+ {
+@@ -158,6 +166,47 @@
+ return 0;
+ }
+
++static u32 imx6q_finalize_wakeup_setting(struct imx_usbmisc_data *data)
++{
++ if (data->available_role == USB_DR_MODE_PERIPHERAL)
++ return MX6_BM_VBUS_WAKEUP;
++ else if (data->available_role == USB_DR_MODE_OTG)
++ return MX6_BM_VBUS_WAKEUP | MX6_BM_ID_WAKEUP;
++
++ return 0;
++}
++
++static int usbmisc_imx6q_set_wakeup
++ (struct imx_usbmisc_data *data, bool enabled)
++{
++ unsigned long flags;
++ u32 reg, val = MX6_BM_WAKEUP_ENABLE;
++ int ret = 0;
++
++ if (data->index > 3)
++ return -EINVAL;
++
++ spin_lock_irqsave(&usbmisc->lock, flags);
++ reg = readl(usbmisc->base + data->index * 4);
++ if (enabled) {
++ val |= imx6q_finalize_wakeup_setting(data);
++ writel(reg | val, usbmisc->base + data->index * 4);
++ if (vbus_wakeup_reg)
++ ret = regulator_enable(vbus_wakeup_reg);
++ } else {
++ if (reg & MX6_BM_WAKEUP_INTR)
++ pr_debug("wakeup int at ci_hdrc.%d\n", data->index);
++ val = MX6_BM_WAKEUP_ENABLE | MX6_BM_VBUS_WAKEUP
++ | MX6_BM_ID_WAKEUP;
++ writel(reg & ~val, usbmisc->base + data->index * 4);
++ if (vbus_wakeup_reg && regulator_is_enabled(vbus_wakeup_reg))
++ regulator_disable(vbus_wakeup_reg);
++ }
++ spin_unlock_irqrestore(&usbmisc->lock, flags);
++
++ return ret;
++}
++
+ static const struct usbmisc_ops imx25_usbmisc_ops = {
+ .post = usbmisc_imx25_post,
+ };
+@@ -172,6 +221,7 @@
+
+ static const struct usbmisc_ops imx6q_usbmisc_ops = {
+ .init = usbmisc_imx6q_init,
++ .set_wakeup = usbmisc_imx6q_set_wakeup,
+ };
+
+ int imx_usbmisc_init(struct imx_usbmisc_data *data)
+@@ -194,6 +244,16 @@
+ }
+ EXPORT_SYMBOL_GPL(imx_usbmisc_init_post);
+
++int imx_usbmisc_set_wakeup(struct imx_usbmisc_data *data, bool enabled)
++{
++ if (!usbmisc)
++ return -ENODEV;
++ if (!usbmisc->ops->set_wakeup)
++ return 0;
++ return usbmisc->ops->set_wakeup(data, enabled);
++}
++EXPORT_SYMBOL_GPL(imx_usbmisc_set_wakeup);
++
+ static const struct of_device_id usbmisc_imx_dt_ids[] = {
+ {
+ .compatible = "fsl,imx25-usbmisc",
+@@ -259,6 +319,18 @@
+ data->ops = (const struct usbmisc_ops *)tmp_dev->data;
+ usbmisc = data;
+
++ vbus_wakeup_reg = devm_regulator_get(&pdev->dev, "vbus-wakeup");
++ if (PTR_ERR(vbus_wakeup_reg) == -EPROBE_DEFER)
++ return -EPROBE_DEFER;
++ else if (PTR_ERR(vbus_wakeup_reg) == -ENODEV)
++ /* no vbus regualator is needed */
++ vbus_wakeup_reg = NULL;
++ else if (IS_ERR(vbus_wakeup_reg)) {
++ dev_err(&pdev->dev, "Getting regulator error: %ld\n",
++ PTR_ERR(vbus_wakeup_reg));
++ return PTR_ERR(vbus_wakeup_reg);
++ }
++
+ return 0;
+ }
+
+diff -Nur linux-3.14.36/drivers/usb/core/hub.c linux-openelec/drivers/usb/core/hub.c
+--- linux-3.14.36/drivers/usb/core/hub.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/usb/core/hub.c 2015-07-24 18:03:28.872842002 -0500
+@@ -3916,6 +3916,12 @@
+ void usb_enable_ltm(struct usb_device *udev) { }
+ EXPORT_SYMBOL_GPL(usb_enable_ltm);
+
++static int hub_handle_remote_wakeup(struct usb_hub *hub, unsigned int port,
++ u16 portstatus, u16 portchange)
++{
++ return 0;
++}
++
+ #endif /* CONFIG_PM */
+
+
+@@ -4512,8 +4518,7 @@
+
+ /* Disconnect any existing devices under this port */
+ if (udev) {
+- if (hcd->phy && !hdev->parent &&
+- !(portstatus & USB_PORT_STAT_CONNECTION))
++ if (hcd->phy && !hdev->parent)
+ usb_phy_notify_disconnect(hcd->phy, udev->speed);
+ usb_disconnect(&hub->ports[port1 - 1]->child);
+ }
+diff -Nur linux-3.14.36/drivers/usb/core/hub.c.orig linux-openelec/drivers/usb/core/hub.c.orig
+--- linux-3.14.36/drivers/usb/core/hub.c.orig 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/usb/core/hub.c.orig 2015-07-24 18:03:28.616842002 -0500
+@@ -0,0 +1,5603 @@
++/*
++ * USB hub driver.
++ *
++ * (C) Copyright 1999 Linus Torvalds
++ * (C) Copyright 1999 Johannes Erdfelt
++ * (C) Copyright 1999 Gregory P. Smith
++ * (C) Copyright 2001 Brad Hards (bhards@bigpond.net.au)
++ *
++ */
++
++#include <linux/kernel.h>
++#include <linux/errno.h>
++#include <linux/module.h>
++#include <linux/moduleparam.h>
++#include <linux/completion.h>
++#include <linux/sched.h>
++#include <linux/list.h>
++#include <linux/slab.h>
++#include <linux/ioctl.h>
++#include <linux/usb.h>
++#include <linux/usbdevice_fs.h>
++#include <linux/usb/hcd.h>
++#include <linux/usb/otg.h>
++#include <linux/usb/quirks.h>
++#include <linux/kthread.h>
++#include <linux/mutex.h>
++#include <linux/freezer.h>
++#include <linux/random.h>
++#include <linux/pm_qos.h>
++
++#include <asm/uaccess.h>
++#include <asm/byteorder.h>
++
++#include "hub.h"
++
++#define USB_VENDOR_GENESYS_LOGIC 0x05e3
++#define HUB_QUIRK_CHECK_PORT_AUTOSUSPEND 0x01
++
++static inline int hub_is_superspeed(struct usb_device *hdev)
++{
++ return (hdev->descriptor.bDeviceProtocol == USB_HUB_PR_SS);
++}
++
++/* Protect struct usb_device->state and ->children members
++ * Note: Both are also protected by ->dev.sem, except that ->state can
++ * change to USB_STATE_NOTATTACHED even when the semaphore isn't held. */
++static DEFINE_SPINLOCK(device_state_lock);
++
++/* khubd's worklist and its lock */
++static DEFINE_SPINLOCK(hub_event_lock);
++static LIST_HEAD(hub_event_list); /* List of hubs needing servicing */
++
++/* Wakes up khubd */
++static DECLARE_WAIT_QUEUE_HEAD(khubd_wait);
++
++static struct task_struct *khubd_task;
++
++/* cycle leds on hubs that aren't blinking for attention */
++static bool blinkenlights = 0;
++module_param (blinkenlights, bool, S_IRUGO);
++MODULE_PARM_DESC (blinkenlights, "true to cycle leds on hubs");
++
++/*
++ * Device SATA8000 FW1.0 from DATAST0R Technology Corp requires about
++ * 10 seconds to send reply for the initial 64-byte descriptor request.
++ */
++/* define initial 64-byte descriptor request timeout in milliseconds */
++static int initial_descriptor_timeout = USB_CTRL_GET_TIMEOUT;
++module_param(initial_descriptor_timeout, int, S_IRUGO|S_IWUSR);
++MODULE_PARM_DESC(initial_descriptor_timeout,
++ "initial 64-byte descriptor request timeout in milliseconds "
++ "(default 5000 - 5.0 seconds)");
++
++/*
++ * As of 2.6.10 we introduce a new USB device initialization scheme which
++ * closely resembles the way Windows works. Hopefully it will be compatible
++ * with a wider range of devices than the old scheme. However some previously
++ * working devices may start giving rise to "device not accepting address"
++ * errors; if that happens the user can try the old scheme by adjusting the
++ * following module parameters.
++ *
++ * For maximum flexibility there are two boolean parameters to control the
++ * hub driver's behavior. On the first initialization attempt, if the
++ * "old_scheme_first" parameter is set then the old scheme will be used,
++ * otherwise the new scheme is used. If that fails and "use_both_schemes"
++ * is set, then the driver will make another attempt, using the other scheme.
++ */
++static bool old_scheme_first = 0;
++module_param(old_scheme_first, bool, S_IRUGO | S_IWUSR);
++MODULE_PARM_DESC(old_scheme_first,
++ "start with the old device initialization scheme");
++
++static bool use_both_schemes = 1;
++module_param(use_both_schemes, bool, S_IRUGO | S_IWUSR);
++MODULE_PARM_DESC(use_both_schemes,
++ "try the other device initialization scheme if the "
++ "first one fails");
++
++/* Mutual exclusion for EHCI CF initialization. This interferes with
++ * port reset on some companion controllers.
++ */
++DECLARE_RWSEM(ehci_cf_port_reset_rwsem);
++EXPORT_SYMBOL_GPL(ehci_cf_port_reset_rwsem);
++
++#define HUB_DEBOUNCE_TIMEOUT 2000
++#define HUB_DEBOUNCE_STEP 25
++#define HUB_DEBOUNCE_STABLE 100
++
++static int usb_reset_and_verify_device(struct usb_device *udev);
++
++static inline char *portspeed(struct usb_hub *hub, int portstatus)
++{
++ if (hub_is_superspeed(hub->hdev))
++ return "5.0 Gb/s";
++ if (portstatus & USB_PORT_STAT_HIGH_SPEED)
++ return "480 Mb/s";
++ else if (portstatus & USB_PORT_STAT_LOW_SPEED)
++ return "1.5 Mb/s";
++ else
++ return "12 Mb/s";
++}
++
++/* Note that hdev or one of its children must be locked! */
++struct usb_hub *usb_hub_to_struct_hub(struct usb_device *hdev)
++{
++ if (!hdev || !hdev->actconfig || !hdev->maxchild)
++ return NULL;
++ return usb_get_intfdata(hdev->actconfig->interface[0]);
++}
++
++static int usb_device_supports_lpm(struct usb_device *udev)
++{
++ /* USB 2.1 (and greater) devices indicate LPM support through
++ * their USB 2.0 Extended Capabilities BOS descriptor.
++ */
++ if (udev->speed == USB_SPEED_HIGH) {
++ if (udev->bos->ext_cap &&
++ (USB_LPM_SUPPORT &
++ le32_to_cpu(udev->bos->ext_cap->bmAttributes)))
++ return 1;
++ return 0;
++ }
++
++ /* All USB 3.0 must support LPM, but we need their max exit latency
++ * information from the SuperSpeed Extended Capabilities BOS descriptor.
++ */
++ if (!udev->bos->ss_cap) {
++ dev_warn(&udev->dev, "No LPM exit latency info found. "
++ "Power management will be impacted.\n");
++ return 0;
++ }
++ if (udev->parent->lpm_capable)
++ return 1;
++
++ dev_warn(&udev->dev, "Parent hub missing LPM exit latency info. "
++ "Power management will be impacted.\n");
++ return 0;
++}
++
++/*
++ * Set the Maximum Exit Latency (MEL) for the host to initiate a transition from
++ * either U1 or U2.
++ */
++static void usb_set_lpm_mel(struct usb_device *udev,
++ struct usb3_lpm_parameters *udev_lpm_params,
++ unsigned int udev_exit_latency,
++ struct usb_hub *hub,
++ struct usb3_lpm_parameters *hub_lpm_params,
++ unsigned int hub_exit_latency)
++{
++ unsigned int total_mel;
++ unsigned int device_mel;
++ unsigned int hub_mel;
++
++ /*
++ * Calculate the time it takes to transition all links from the roothub
++ * to the parent hub into U0. The parent hub must then decode the
++ * packet (hub header decode latency) to figure out which port it was
++ * bound for.
++ *
++ * The Hub Header decode latency is expressed in 0.1us intervals (0x1
++ * means 0.1us). Multiply that by 100 to get nanoseconds.
++ */
++ total_mel = hub_lpm_params->mel +
++ (hub->descriptor->u.ss.bHubHdrDecLat * 100);
++
++ /*
++ * How long will it take to transition the downstream hub's port into
++ * U0? The greater of either the hub exit latency or the device exit
++ * latency.
++ *
++ * The BOS U1/U2 exit latencies are expressed in 1us intervals.
++ * Multiply that by 1000 to get nanoseconds.
++ */
++ device_mel = udev_exit_latency * 1000;
++ hub_mel = hub_exit_latency * 1000;
++ if (device_mel > hub_mel)
++ total_mel += device_mel;
++ else
++ total_mel += hub_mel;
++
++ udev_lpm_params->mel = total_mel;
++}
++
++/*
++ * Set the maximum Device to Host Exit Latency (PEL) for the device to initiate
++ * a transition from either U1 or U2.
++ */
++static void usb_set_lpm_pel(struct usb_device *udev,
++ struct usb3_lpm_parameters *udev_lpm_params,
++ unsigned int udev_exit_latency,
++ struct usb_hub *hub,
++ struct usb3_lpm_parameters *hub_lpm_params,
++ unsigned int hub_exit_latency,
++ unsigned int port_to_port_exit_latency)
++{
++ unsigned int first_link_pel;
++ unsigned int hub_pel;
++
++ /*
++ * First, the device sends an LFPS to transition the link between the
++ * device and the parent hub into U0. The exit latency is the bigger of
++ * the device exit latency or the hub exit latency.
++ */
++ if (udev_exit_latency > hub_exit_latency)
++ first_link_pel = udev_exit_latency * 1000;
++ else
++ first_link_pel = hub_exit_latency * 1000;
++
++ /*
++ * When the hub starts to receive the LFPS, there is a slight delay for
++ * it to figure out that one of the ports is sending an LFPS. Then it
++ * will forward the LFPS to its upstream link. The exit latency is the
++ * delay, plus the PEL that we calculated for this hub.
++ */
++ hub_pel = port_to_port_exit_latency * 1000 + hub_lpm_params->pel;
++
++ /*
++ * According to figure C-7 in the USB 3.0 spec, the PEL for this device
++ * is the greater of the two exit latencies.
++ */
++ if (first_link_pel > hub_pel)
++ udev_lpm_params->pel = first_link_pel;
++ else
++ udev_lpm_params->pel = hub_pel;
++}
++
++/*
++ * Set the System Exit Latency (SEL) to indicate the total worst-case time from
++ * when a device initiates a transition to U0, until when it will receive the
++ * first packet from the host controller.
++ *
++ * Section C.1.5.1 describes the four components to this:
++ * - t1: device PEL
++ * - t2: time for the ERDY to make it from the device to the host.
++ * - t3: a host-specific delay to process the ERDY.
++ * - t4: time for the packet to make it from the host to the device.
++ *
++ * t3 is specific to both the xHCI host and the platform the host is integrated
++ * into. The Intel HW folks have said it's negligible, FIXME if a different
++ * vendor says otherwise.
++ */
++static void usb_set_lpm_sel(struct usb_device *udev,
++ struct usb3_lpm_parameters *udev_lpm_params)
++{
++ struct usb_device *parent;
++ unsigned int num_hubs;
++ unsigned int total_sel;
++
++ /* t1 = device PEL */
++ total_sel = udev_lpm_params->pel;
++ /* How many external hubs are in between the device & the root port. */
++ for (parent = udev->parent, num_hubs = 0; parent->parent;
++ parent = parent->parent)
++ num_hubs++;
++ /* t2 = 2.1us + 250ns * (num_hubs - 1) */
++ if (num_hubs > 0)
++ total_sel += 2100 + 250 * (num_hubs - 1);
++
++ /* t4 = 250ns * num_hubs */
++ total_sel += 250 * num_hubs;
++
++ udev_lpm_params->sel = total_sel;
++}
++
++static void usb_set_lpm_parameters(struct usb_device *udev)
++{
++ struct usb_hub *hub;
++ unsigned int port_to_port_delay;
++ unsigned int udev_u1_del;
++ unsigned int udev_u2_del;
++ unsigned int hub_u1_del;
++ unsigned int hub_u2_del;
++
++ if (!udev->lpm_capable || udev->speed != USB_SPEED_SUPER)
++ return;
++
++ hub = usb_hub_to_struct_hub(udev->parent);
++ /* It doesn't take time to transition the roothub into U0, since it
++ * doesn't have an upstream link.
++ */
++ if (!hub)
++ return;
++
++ udev_u1_del = udev->bos->ss_cap->bU1devExitLat;
++ udev_u2_del = le16_to_cpu(udev->bos->ss_cap->bU2DevExitLat);
++ hub_u1_del = udev->parent->bos->ss_cap->bU1devExitLat;
++ hub_u2_del = le16_to_cpu(udev->parent->bos->ss_cap->bU2DevExitLat);
++
++ usb_set_lpm_mel(udev, &udev->u1_params, udev_u1_del,
++ hub, &udev->parent->u1_params, hub_u1_del);
++
++ usb_set_lpm_mel(udev, &udev->u2_params, udev_u2_del,
++ hub, &udev->parent->u2_params, hub_u2_del);
++
++ /*
++ * Appendix C, section C.2.2.2, says that there is a slight delay from
++ * when the parent hub notices the downstream port is trying to
++ * transition to U0 to when the hub initiates a U0 transition on its
++ * upstream port. The section says the delays are tPort2PortU1EL and
++ * tPort2PortU2EL, but it doesn't define what they are.
++ *
++ * The hub chapter, sections 10.4.2.4 and 10.4.2.5 seem to be talking
++ * about the same delays. Use the maximum delay calculations from those
++ * sections. For U1, it's tHubPort2PortExitLat, which is 1us max. For
++ * U2, it's tHubPort2PortExitLat + U2DevExitLat - U1DevExitLat. I
++ * assume the device exit latencies they are talking about are the hub
++ * exit latencies.
++ *
++ * What do we do if the U2 exit latency is less than the U1 exit
++ * latency? It's possible, although not likely...
++ */
++ port_to_port_delay = 1;
++
++ usb_set_lpm_pel(udev, &udev->u1_params, udev_u1_del,
++ hub, &udev->parent->u1_params, hub_u1_del,
++ port_to_port_delay);
++
++ if (hub_u2_del > hub_u1_del)
++ port_to_port_delay = 1 + hub_u2_del - hub_u1_del;
++ else
++ port_to_port_delay = 1 + hub_u1_del;
++
++ usb_set_lpm_pel(udev, &udev->u2_params, udev_u2_del,
++ hub, &udev->parent->u2_params, hub_u2_del,
++ port_to_port_delay);
++
++ /* Now that we've got PEL, calculate SEL. */
++ usb_set_lpm_sel(udev, &udev->u1_params);
++ usb_set_lpm_sel(udev, &udev->u2_params);
++}
++
++/* USB 2.0 spec Section 11.24.4.5 */
++static int get_hub_descriptor(struct usb_device *hdev, void *data)
++{
++ int i, ret, size;
++ unsigned dtype;
++
++ if (hub_is_superspeed(hdev)) {
++ dtype = USB_DT_SS_HUB;
++ size = USB_DT_SS_HUB_SIZE;
++ } else {
++ dtype = USB_DT_HUB;
++ size = sizeof(struct usb_hub_descriptor);
++ }
++
++ for (i = 0; i < 3; i++) {
++ ret = usb_control_msg(hdev, usb_rcvctrlpipe(hdev, 0),
++ USB_REQ_GET_DESCRIPTOR, USB_DIR_IN | USB_RT_HUB,
++ dtype << 8, 0, data, size,
++ USB_CTRL_GET_TIMEOUT);
++ if (ret >= (USB_DT_HUB_NONVAR_SIZE + 2))
++ return ret;
++ }
++ return -EINVAL;
++}
++
++/*
++ * USB 2.0 spec Section 11.24.2.1
++ */
++static int clear_hub_feature(struct usb_device *hdev, int feature)
++{
++ return usb_control_msg(hdev, usb_sndctrlpipe(hdev, 0),
++ USB_REQ_CLEAR_FEATURE, USB_RT_HUB, feature, 0, NULL, 0, 1000);
++}
++
++/*
++ * USB 2.0 spec Section 11.24.2.2
++ */
++int usb_clear_port_feature(struct usb_device *hdev, int port1, int feature)
++{
++ return usb_control_msg(hdev, usb_sndctrlpipe(hdev, 0),
++ USB_REQ_CLEAR_FEATURE, USB_RT_PORT, feature, port1,
++ NULL, 0, 1000);
++}
++
++/*
++ * USB 2.0 spec Section 11.24.2.13
++ */
++static int set_port_feature(struct usb_device *hdev, int port1, int feature)
++{
++ return usb_control_msg(hdev, usb_sndctrlpipe(hdev, 0),
++ USB_REQ_SET_FEATURE, USB_RT_PORT, feature, port1,
++ NULL, 0, 1000);
++}
++
++/*
++ * USB 2.0 spec Section 11.24.2.7.1.10 and table 11-7
++ * for info about using port indicators
++ */
++static void set_port_led(
++ struct usb_hub *hub,
++ int port1,
++ int selector
++)
++{
++ int status = set_port_feature(hub->hdev, (selector << 8) | port1,
++ USB_PORT_FEAT_INDICATOR);
++ if (status < 0)
++ dev_dbg (hub->intfdev,
++ "port %d indicator %s status %d\n",
++ port1,
++ ({ char *s; switch (selector) {
++ case HUB_LED_AMBER: s = "amber"; break;
++ case HUB_LED_GREEN: s = "green"; break;
++ case HUB_LED_OFF: s = "off"; break;
++ case HUB_LED_AUTO: s = "auto"; break;
++ default: s = "??"; break;
++ } s; }),
++ status);
++}
++
++#define LED_CYCLE_PERIOD ((2*HZ)/3)
++
++static void led_work (struct work_struct *work)
++{
++ struct usb_hub *hub =
++ container_of(work, struct usb_hub, leds.work);
++ struct usb_device *hdev = hub->hdev;
++ unsigned i;
++ unsigned changed = 0;
++ int cursor = -1;
++
++ if (hdev->state != USB_STATE_CONFIGURED || hub->quiescing)
++ return;
++
++ for (i = 0; i < hdev->maxchild; i++) {
++ unsigned selector, mode;
++
++ /* 30%-50% duty cycle */
++
++ switch (hub->indicator[i]) {
++ /* cycle marker */
++ case INDICATOR_CYCLE:
++ cursor = i;
++ selector = HUB_LED_AUTO;
++ mode = INDICATOR_AUTO;
++ break;
++ /* blinking green = sw attention */
++ case INDICATOR_GREEN_BLINK:
++ selector = HUB_LED_GREEN;
++ mode = INDICATOR_GREEN_BLINK_OFF;
++ break;
++ case INDICATOR_GREEN_BLINK_OFF:
++ selector = HUB_LED_OFF;
++ mode = INDICATOR_GREEN_BLINK;
++ break;
++ /* blinking amber = hw attention */
++ case INDICATOR_AMBER_BLINK:
++ selector = HUB_LED_AMBER;
++ mode = INDICATOR_AMBER_BLINK_OFF;
++ break;
++ case INDICATOR_AMBER_BLINK_OFF:
++ selector = HUB_LED_OFF;
++ mode = INDICATOR_AMBER_BLINK;
++ break;
++ /* blink green/amber = reserved */
++ case INDICATOR_ALT_BLINK:
++ selector = HUB_LED_GREEN;
++ mode = INDICATOR_ALT_BLINK_OFF;
++ break;
++ case INDICATOR_ALT_BLINK_OFF:
++ selector = HUB_LED_AMBER;
++ mode = INDICATOR_ALT_BLINK;
++ break;
++ default:
++ continue;
++ }
++ if (selector != HUB_LED_AUTO)
++ changed = 1;
++ set_port_led(hub, i + 1, selector);
++ hub->indicator[i] = mode;
++ }
++ if (!changed && blinkenlights) {
++ cursor++;
++ cursor %= hdev->maxchild;
++ set_port_led(hub, cursor + 1, HUB_LED_GREEN);
++ hub->indicator[cursor] = INDICATOR_CYCLE;
++ changed++;
++ }
++ if (changed)
++ schedule_delayed_work(&hub->leds, LED_CYCLE_PERIOD);
++}
++
++/* use a short timeout for hub/port status fetches */
++#define USB_STS_TIMEOUT 1000
++#define USB_STS_RETRIES 5
++
++/*
++ * USB 2.0 spec Section 11.24.2.6
++ */
++static int get_hub_status(struct usb_device *hdev,
++ struct usb_hub_status *data)
++{
++ int i, status = -ETIMEDOUT;
++
++ for (i = 0; i < USB_STS_RETRIES &&
++ (status == -ETIMEDOUT || status == -EPIPE); i++) {
++ status = usb_control_msg(hdev, usb_rcvctrlpipe(hdev, 0),
++ USB_REQ_GET_STATUS, USB_DIR_IN | USB_RT_HUB, 0, 0,
++ data, sizeof(*data), USB_STS_TIMEOUT);
++ }
++ return status;
++}
++
++/*
++ * USB 2.0 spec Section 11.24.2.7
++ */
++static int get_port_status(struct usb_device *hdev, int port1,
++ struct usb_port_status *data)
++{
++ int i, status = -ETIMEDOUT;
++
++ for (i = 0; i < USB_STS_RETRIES &&
++ (status == -ETIMEDOUT || status == -EPIPE); i++) {
++ status = usb_control_msg(hdev, usb_rcvctrlpipe(hdev, 0),
++ USB_REQ_GET_STATUS, USB_DIR_IN | USB_RT_PORT, 0, port1,
++ data, sizeof(*data), USB_STS_TIMEOUT);
++ }
++ return status;
++}
++
++static int hub_port_status(struct usb_hub *hub, int port1,
++ u16 *status, u16 *change)
++{
++ int ret;
++
++ mutex_lock(&hub->status_mutex);
++ ret = get_port_status(hub->hdev, port1, &hub->status->port);
++ if (ret < 4) {
++ if (ret != -ENODEV)
++ dev_err(hub->intfdev,
++ "%s failed (err = %d)\n", __func__, ret);
++ if (ret >= 0)
++ ret = -EIO;
++ } else {
++ *status = le16_to_cpu(hub->status->port.wPortStatus);
++ *change = le16_to_cpu(hub->status->port.wPortChange);
++
++ ret = 0;
++ }
++ mutex_unlock(&hub->status_mutex);
++ return ret;
++}
++
++static void kick_khubd(struct usb_hub *hub)
++{
++ unsigned long flags;
++
++ spin_lock_irqsave(&hub_event_lock, flags);
++ if (!hub->disconnected && list_empty(&hub->event_list)) {
++ list_add_tail(&hub->event_list, &hub_event_list);
++
++ /* Suppress autosuspend until khubd runs */
++ usb_autopm_get_interface_no_resume(
++ to_usb_interface(hub->intfdev));
++ wake_up(&khubd_wait);
++ }
++ spin_unlock_irqrestore(&hub_event_lock, flags);
++}
++
++void usb_kick_khubd(struct usb_device *hdev)
++{
++ struct usb_hub *hub = usb_hub_to_struct_hub(hdev);
++
++ if (hub)
++ kick_khubd(hub);
++}
++
++/*
++ * Let the USB core know that a USB 3.0 device has sent a Function Wake Device
++ * Notification, which indicates it had initiated remote wakeup.
++ *
++ * USB 3.0 hubs do not report the port link state change from U3 to U0 when the
++ * device initiates resume, so the USB core will not receive notice of the
++ * resume through the normal hub interrupt URB.
++ */
++void usb_wakeup_notification(struct usb_device *hdev,
++ unsigned int portnum)
++{
++ struct usb_hub *hub;
++
++ if (!hdev)
++ return;
++
++ hub = usb_hub_to_struct_hub(hdev);
++ if (hub) {
++ set_bit(portnum, hub->wakeup_bits);
++ kick_khubd(hub);
++ }
++}
++EXPORT_SYMBOL_GPL(usb_wakeup_notification);
++
++/* completion function, fires on port status changes and various faults */
++static void hub_irq(struct urb *urb)
++{
++ struct usb_hub *hub = urb->context;
++ int status = urb->status;
++ unsigned i;
++ unsigned long bits;
++
++ switch (status) {
++ case -ENOENT: /* synchronous unlink */
++ case -ECONNRESET: /* async unlink */
++ case -ESHUTDOWN: /* hardware going away */
++ return;
++
++ default: /* presumably an error */
++ /* Cause a hub reset after 10 consecutive errors */
++ dev_dbg (hub->intfdev, "transfer --> %d\n", status);
++ if ((++hub->nerrors < 10) || hub->error)
++ goto resubmit;
++ hub->error = status;
++ /* FALL THROUGH */
++
++ /* let khubd handle things */
++ case 0: /* we got data: port status changed */
++ bits = 0;
++ for (i = 0; i < urb->actual_length; ++i)
++ bits |= ((unsigned long) ((*hub->buffer)[i]))
++ << (i*8);
++ hub->event_bits[0] = bits;
++ break;
++ }
++
++ hub->nerrors = 0;
++
++ /* Something happened, let khubd figure it out */
++ kick_khubd(hub);
++
++resubmit:
++ if (hub->quiescing)
++ return;
++
++ if ((status = usb_submit_urb (hub->urb, GFP_ATOMIC)) != 0
++ && status != -ENODEV && status != -EPERM)
++ dev_err (hub->intfdev, "resubmit --> %d\n", status);
++}
++
++/* USB 2.0 spec Section 11.24.2.3 */
++static inline int
++hub_clear_tt_buffer (struct usb_device *hdev, u16 devinfo, u16 tt)
++{
++ /* Need to clear both directions for control ep */
++ if (((devinfo >> 11) & USB_ENDPOINT_XFERTYPE_MASK) ==
++ USB_ENDPOINT_XFER_CONTROL) {
++ int status = usb_control_msg(hdev, usb_sndctrlpipe(hdev, 0),
++ HUB_CLEAR_TT_BUFFER, USB_RT_PORT,
++ devinfo ^ 0x8000, tt, NULL, 0, 1000);
++ if (status)
++ return status;
++ }
++ return usb_control_msg(hdev, usb_sndctrlpipe(hdev, 0),
++ HUB_CLEAR_TT_BUFFER, USB_RT_PORT, devinfo,
++ tt, NULL, 0, 1000);
++}
++
++/*
++ * enumeration blocks khubd for a long time. we use keventd instead, since
++ * long blocking there is the exception, not the rule. accordingly, HCDs
++ * talking to TTs must queue control transfers (not just bulk and iso), so
++ * both can talk to the same hub concurrently.
++ */
++static void hub_tt_work(struct work_struct *work)
++{
++ struct usb_hub *hub =
++ container_of(work, struct usb_hub, tt.clear_work);
++ unsigned long flags;
++
++ spin_lock_irqsave (&hub->tt.lock, flags);
++ while (!list_empty(&hub->tt.clear_list)) {
++ struct list_head *next;
++ struct usb_tt_clear *clear;
++ struct usb_device *hdev = hub->hdev;
++ const struct hc_driver *drv;
++ int status;
++
++ next = hub->tt.clear_list.next;
++ clear = list_entry (next, struct usb_tt_clear, clear_list);
++ list_del (&clear->clear_list);
++
++ /* drop lock so HCD can concurrently report other TT errors */
++ spin_unlock_irqrestore (&hub->tt.lock, flags);
++ status = hub_clear_tt_buffer (hdev, clear->devinfo, clear->tt);
++ if (status && status != -ENODEV)
++ dev_err (&hdev->dev,
++ "clear tt %d (%04x) error %d\n",
++ clear->tt, clear->devinfo, status);
++
++ /* Tell the HCD, even if the operation failed */
++ drv = clear->hcd->driver;
++ if (drv->clear_tt_buffer_complete)
++ (drv->clear_tt_buffer_complete)(clear->hcd, clear->ep);
++
++ kfree(clear);
++ spin_lock_irqsave(&hub->tt.lock, flags);
++ }
++ spin_unlock_irqrestore (&hub->tt.lock, flags);
++}
++
++/**
++ * usb_hub_set_port_power - control hub port's power state
++ * @hdev: USB device belonging to the usb hub
++ * @hub: target hub
++ * @port1: port index
++ * @set: expected status
++ *
++ * call this function to control port's power via setting or
++ * clearing the port's PORT_POWER feature.
++ *
++ * Return: 0 if successful. A negative error code otherwise.
++ */
++int usb_hub_set_port_power(struct usb_device *hdev, struct usb_hub *hub,
++ int port1, bool set)
++{
++ int ret;
++ struct usb_port *port_dev = hub->ports[port1 - 1];
++
++ if (set)
++ ret = set_port_feature(hdev, port1, USB_PORT_FEAT_POWER);
++ else
++ ret = usb_clear_port_feature(hdev, port1, USB_PORT_FEAT_POWER);
++
++ if (!ret)
++ port_dev->power_is_on = set;
++ return ret;
++}
++
++/**
++ * usb_hub_clear_tt_buffer - clear control/bulk TT state in high speed hub
++ * @urb: an URB associated with the failed or incomplete split transaction
++ *
++ * High speed HCDs use this to tell the hub driver that some split control or
++ * bulk transaction failed in a way that requires clearing internal state of
++ * a transaction translator. This is normally detected (and reported) from
++ * interrupt context.
++ *
++ * It may not be possible for that hub to handle additional full (or low)
++ * speed transactions until that state is fully cleared out.
++ *
++ * Return: 0 if successful. A negative error code otherwise.
++ */
++int usb_hub_clear_tt_buffer(struct urb *urb)
++{
++ struct usb_device *udev = urb->dev;
++ int pipe = urb->pipe;
++ struct usb_tt *tt = udev->tt;
++ unsigned long flags;
++ struct usb_tt_clear *clear;
++
++ /* we've got to cope with an arbitrary number of pending TT clears,
++ * since each TT has "at least two" buffers that can need it (and
++ * there can be many TTs per hub). even if they're uncommon.
++ */
++ if ((clear = kmalloc (sizeof *clear, GFP_ATOMIC)) == NULL) {
++ dev_err (&udev->dev, "can't save CLEAR_TT_BUFFER state\n");
++ /* FIXME recover somehow ... RESET_TT? */
++ return -ENOMEM;
++ }
++
++ /* info that CLEAR_TT_BUFFER needs */
++ clear->tt = tt->multi ? udev->ttport : 1;
++ clear->devinfo = usb_pipeendpoint (pipe);
++ clear->devinfo |= udev->devnum << 4;
++ clear->devinfo |= usb_pipecontrol (pipe)
++ ? (USB_ENDPOINT_XFER_CONTROL << 11)
++ : (USB_ENDPOINT_XFER_BULK << 11);
++ if (usb_pipein (pipe))
++ clear->devinfo |= 1 << 15;
++
++ /* info for completion callback */
++ clear->hcd = bus_to_hcd(udev->bus);
++ clear->ep = urb->ep;
++
++ /* tell keventd to clear state for this TT */
++ spin_lock_irqsave (&tt->lock, flags);
++ list_add_tail (&clear->clear_list, &tt->clear_list);
++ schedule_work(&tt->clear_work);
++ spin_unlock_irqrestore (&tt->lock, flags);
++ return 0;
++}
++EXPORT_SYMBOL_GPL(usb_hub_clear_tt_buffer);
++
++/* If do_delay is false, return the number of milliseconds the caller
++ * needs to delay.
++ */
++static unsigned hub_power_on(struct usb_hub *hub, bool do_delay)
++{
++ int port1;
++ unsigned pgood_delay = hub->descriptor->bPwrOn2PwrGood * 2;
++ unsigned delay;
++ u16 wHubCharacteristics =
++ le16_to_cpu(hub->descriptor->wHubCharacteristics);
++
++ /* Enable power on each port. Some hubs have reserved values
++ * of LPSM (> 2) in their descriptors, even though they are
++ * USB 2.0 hubs. Some hubs do not implement port-power switching
++ * but only emulate it. In all cases, the ports won't work
++ * unless we send these messages to the hub.
++ */
++ if ((wHubCharacteristics & HUB_CHAR_LPSM) < 2)
++ dev_dbg(hub->intfdev, "enabling power on all ports\n");
++ else
++ dev_dbg(hub->intfdev, "trying to enable port power on "
++ "non-switchable hub\n");
++ for (port1 = 1; port1 <= hub->hdev->maxchild; port1++)
++ if (hub->ports[port1 - 1]->power_is_on)
++ set_port_feature(hub->hdev, port1, USB_PORT_FEAT_POWER);
++ else
++ usb_clear_port_feature(hub->hdev, port1,
++ USB_PORT_FEAT_POWER);
++
++ /* Wait at least 100 msec for power to become stable */
++ delay = max(pgood_delay, (unsigned) 100);
++ if (do_delay)
++ msleep(delay);
++ return delay;
++}
++
++static int hub_hub_status(struct usb_hub *hub,
++ u16 *status, u16 *change)
++{
++ int ret;
++
++ mutex_lock(&hub->status_mutex);
++ ret = get_hub_status(hub->hdev, &hub->status->hub);
++ if (ret < 0) {
++ if (ret != -ENODEV)
++ dev_err(hub->intfdev,
++ "%s failed (err = %d)\n", __func__, ret);
++ } else {
++ *status = le16_to_cpu(hub->status->hub.wHubStatus);
++ *change = le16_to_cpu(hub->status->hub.wHubChange);
++ ret = 0;
++ }
++ mutex_unlock(&hub->status_mutex);
++ return ret;
++}
++
++static int hub_set_port_link_state(struct usb_hub *hub, int port1,
++ unsigned int link_status)
++{
++ return set_port_feature(hub->hdev,
++ port1 | (link_status << 3),
++ USB_PORT_FEAT_LINK_STATE);
++}
++
++/*
++ * If USB 3.0 ports are placed into the Disabled state, they will no longer
++ * detect any device connects or disconnects. This is generally not what the
++ * USB core wants, since it expects a disabled port to produce a port status
++ * change event when a new device connects.
++ *
++ * Instead, set the link state to Disabled, wait for the link to settle into
++ * that state, clear any change bits, and then put the port into the RxDetect
++ * state.
++ */
++static int hub_usb3_port_disable(struct usb_hub *hub, int port1)
++{
++ int ret;
++ int total_time;
++ u16 portchange, portstatus;
++
++ if (!hub_is_superspeed(hub->hdev))
++ return -EINVAL;
++
++ ret = hub_port_status(hub, port1, &portstatus, &portchange);
++ if (ret < 0)
++ return ret;
++
++ /*
++ * USB controller Advanced Micro Devices, Inc. [AMD] FCH USB XHCI
++ * Controller [1022:7814] will have spurious result making the following
++ * usb 3.0 device hotplugging route to the 2.0 root hub and recognized
++ * as high-speed device if we set the usb 3.0 port link state to
++ * Disabled. Since it's already in USB_SS_PORT_LS_RX_DETECT state, we
++ * check the state here to avoid the bug.
++ */
++ if ((portstatus & USB_PORT_STAT_LINK_STATE) ==
++ USB_SS_PORT_LS_RX_DETECT) {
++ dev_dbg(&hub->ports[port1 - 1]->dev,
++ "Not disabling port; link state is RxDetect\n");
++ return ret;
++ }
++
++ ret = hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_SS_DISABLED);
++ if (ret)
++ return ret;
++
++ /* Wait for the link to enter the disabled state. */
++ for (total_time = 0; ; total_time += HUB_DEBOUNCE_STEP) {
++ ret = hub_port_status(hub, port1, &portstatus, &portchange);
++ if (ret < 0)
++ return ret;
++
++ if ((portstatus & USB_PORT_STAT_LINK_STATE) ==
++ USB_SS_PORT_LS_SS_DISABLED)
++ break;
++ if (total_time >= HUB_DEBOUNCE_TIMEOUT)
++ break;
++ msleep(HUB_DEBOUNCE_STEP);
++ }
++ if (total_time >= HUB_DEBOUNCE_TIMEOUT)
++ dev_warn(hub->intfdev, "Could not disable port %d after %d ms\n",
++ port1, total_time);
++
++ return hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_RX_DETECT);
++}
++
++static int hub_port_disable(struct usb_hub *hub, int port1, int set_state)
++{
++ struct usb_device *hdev = hub->hdev;
++ int ret = 0;
++
++ if (hub->ports[port1 - 1]->child && set_state)
++ usb_set_device_state(hub->ports[port1 - 1]->child,
++ USB_STATE_NOTATTACHED);
++ if (!hub->error) {
++ if (hub_is_superspeed(hub->hdev))
++ ret = hub_usb3_port_disable(hub, port1);
++ else
++ ret = usb_clear_port_feature(hdev, port1,
++ USB_PORT_FEAT_ENABLE);
++ }
++ if (ret && ret != -ENODEV)
++ dev_err(hub->intfdev, "cannot disable port %d (err = %d)\n",
++ port1, ret);
++ return ret;
++}
++
++/*
++ * Disable a port and mark a logical connect-change event, so that some
++ * time later khubd will disconnect() any existing usb_device on the port
++ * and will re-enumerate if there actually is a device attached.
++ */
++static void hub_port_logical_disconnect(struct usb_hub *hub, int port1)
++{
++ dev_dbg(hub->intfdev, "logical disconnect on port %d\n", port1);
++ hub_port_disable(hub, port1, 1);
++
++ /* FIXME let caller ask to power down the port:
++ * - some devices won't enumerate without a VBUS power cycle
++ * - SRP saves power that way
++ * - ... new call, TBD ...
++ * That's easy if this hub can switch power per-port, and
++ * khubd reactivates the port later (timer, SRP, etc).
++ * Powerdown must be optional, because of reset/DFU.
++ */
++
++ set_bit(port1, hub->change_bits);
++ kick_khubd(hub);
++}
++
++/**
++ * usb_remove_device - disable a device's port on its parent hub
++ * @udev: device to be disabled and removed
++ * Context: @udev locked, must be able to sleep.
++ *
++ * After @udev's port has been disabled, khubd is notified and it will
++ * see that the device has been disconnected. When the device is
++ * physically unplugged and something is plugged in, the events will
++ * be received and processed normally.
++ *
++ * Return: 0 if successful. A negative error code otherwise.
++ */
++int usb_remove_device(struct usb_device *udev)
++{
++ struct usb_hub *hub;
++ struct usb_interface *intf;
++
++ if (!udev->parent) /* Can't remove a root hub */
++ return -EINVAL;
++ hub = usb_hub_to_struct_hub(udev->parent);
++ intf = to_usb_interface(hub->intfdev);
++
++ usb_autopm_get_interface(intf);
++ set_bit(udev->portnum, hub->removed_bits);
++ hub_port_logical_disconnect(hub, udev->portnum);
++ usb_autopm_put_interface(intf);
++ return 0;
++}
++
++enum hub_activation_type {
++ HUB_INIT, HUB_INIT2, HUB_INIT3, /* INITs must come first */
++ HUB_POST_RESET, HUB_RESUME, HUB_RESET_RESUME,
++};
++
++static void hub_init_func2(struct work_struct *ws);
++static void hub_init_func3(struct work_struct *ws);
++
++static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
++{
++ struct usb_device *hdev = hub->hdev;
++ struct usb_hcd *hcd;
++ int ret;
++ int port1;
++ int status;
++ bool need_debounce_delay = false;
++ unsigned delay;
++
++ /* Continue a partial initialization */
++ if (type == HUB_INIT2)
++ goto init2;
++ if (type == HUB_INIT3)
++ goto init3;
++
++ /* The superspeed hub except for root hub has to use Hub Depth
++ * value as an offset into the route string to locate the bits
++ * it uses to determine the downstream port number. So hub driver
++ * should send a set hub depth request to superspeed hub after
++ * the superspeed hub is set configuration in initialization or
++ * reset procedure.
++ *
++ * After a resume, port power should still be on.
++ * For any other type of activation, turn it on.
++ */
++ if (type != HUB_RESUME) {
++ if (hdev->parent && hub_is_superspeed(hdev)) {
++ ret = usb_control_msg(hdev, usb_sndctrlpipe(hdev, 0),
++ HUB_SET_DEPTH, USB_RT_HUB,
++ hdev->level - 1, 0, NULL, 0,
++ USB_CTRL_SET_TIMEOUT);
++ if (ret < 0)
++ dev_err(hub->intfdev,
++ "set hub depth failed\n");
++ }
++
++ /* Speed up system boot by using a delayed_work for the
++ * hub's initial power-up delays. This is pretty awkward
++ * and the implementation looks like a home-brewed sort of
++ * setjmp/longjmp, but it saves at least 100 ms for each
++ * root hub (assuming usbcore is compiled into the kernel
++ * rather than as a module). It adds up.
++ *
++ * This can't be done for HUB_RESUME or HUB_RESET_RESUME
++ * because for those activation types the ports have to be
++ * operational when we return. In theory this could be done
++ * for HUB_POST_RESET, but it's easier not to.
++ */
++ if (type == HUB_INIT) {
++ delay = hub_power_on(hub, false);
++ PREPARE_DELAYED_WORK(&hub->init_work, hub_init_func2);
++ schedule_delayed_work(&hub->init_work,
++ msecs_to_jiffies(delay));
++
++ /* Suppress autosuspend until init is done */
++ usb_autopm_get_interface_no_resume(
++ to_usb_interface(hub->intfdev));
++ return; /* Continues at init2: below */
++ } else if (type == HUB_RESET_RESUME) {
++ /* The internal host controller state for the hub device
++ * may be gone after a host power loss on system resume.
++ * Update the device's info so the HW knows it's a hub.
++ */
++ hcd = bus_to_hcd(hdev->bus);
++ if (hcd->driver->update_hub_device) {
++ ret = hcd->driver->update_hub_device(hcd, hdev,
++ &hub->tt, GFP_NOIO);
++ if (ret < 0) {
++ dev_err(hub->intfdev, "Host not "
++ "accepting hub info "
++ "update.\n");
++ dev_err(hub->intfdev, "LS/FS devices "
++ "and hubs may not work "
++ "under this hub\n.");
++ }
++ }
++ hub_power_on(hub, true);
++ } else {
++ hub_power_on(hub, true);
++ }
++ }
++ init2:
++
++ /* Check each port and set hub->change_bits to let khubd know
++ * which ports need attention.
++ */
++ for (port1 = 1; port1 <= hdev->maxchild; ++port1) {
++ struct usb_device *udev = hub->ports[port1 - 1]->child;
++ u16 portstatus, portchange;
++
++ portstatus = portchange = 0;
++ status = hub_port_status(hub, port1, &portstatus, &portchange);
++ if (udev || (portstatus & USB_PORT_STAT_CONNECTION))
++ dev_dbg(hub->intfdev,
++ "port %d: status %04x change %04x\n",
++ port1, portstatus, portchange);
++
++ /* After anything other than HUB_RESUME (i.e., initialization
++ * or any sort of reset), every port should be disabled.
++ * Unconnected ports should likewise be disabled (paranoia),
++ * and so should ports for which we have no usb_device.
++ */
++ if ((portstatus & USB_PORT_STAT_ENABLE) && (
++ type != HUB_RESUME ||
++ !(portstatus & USB_PORT_STAT_CONNECTION) ||
++ !udev ||
++ udev->state == USB_STATE_NOTATTACHED)) {
++ /*
++ * USB3 protocol ports will automatically transition
++ * to Enabled state when detect an USB3.0 device attach.
++ * Do not disable USB3 protocol ports, just pretend
++ * power was lost
++ */
++ portstatus &= ~USB_PORT_STAT_ENABLE;
++ if (!hub_is_superspeed(hdev))
++ usb_clear_port_feature(hdev, port1,
++ USB_PORT_FEAT_ENABLE);
++ }
++
++ /* Clear status-change flags; we'll debounce later */
++ if (portchange & USB_PORT_STAT_C_CONNECTION) {
++ need_debounce_delay = true;
++ usb_clear_port_feature(hub->hdev, port1,
++ USB_PORT_FEAT_C_CONNECTION);
++ }
++ if (portchange & USB_PORT_STAT_C_ENABLE) {
++ need_debounce_delay = true;
++ usb_clear_port_feature(hub->hdev, port1,
++ USB_PORT_FEAT_C_ENABLE);
++ }
++ if (portchange & USB_PORT_STAT_C_RESET) {
++ need_debounce_delay = true;
++ usb_clear_port_feature(hub->hdev, port1,
++ USB_PORT_FEAT_C_RESET);
++ }
++ if ((portchange & USB_PORT_STAT_C_BH_RESET) &&
++ hub_is_superspeed(hub->hdev)) {
++ need_debounce_delay = true;
++ usb_clear_port_feature(hub->hdev, port1,
++ USB_PORT_FEAT_C_BH_PORT_RESET);
++ }
++ /* We can forget about a "removed" device when there's a
++ * physical disconnect or the connect status changes.
++ */
++ if (!(portstatus & USB_PORT_STAT_CONNECTION) ||
++ (portchange & USB_PORT_STAT_C_CONNECTION))
++ clear_bit(port1, hub->removed_bits);
++
++ if (!udev || udev->state == USB_STATE_NOTATTACHED) {
++ /* Tell khubd to disconnect the device or
++ * check for a new connection
++ */
++ if (udev || (portstatus & USB_PORT_STAT_CONNECTION) ||
++ (portstatus & USB_PORT_STAT_OVERCURRENT))
++ set_bit(port1, hub->change_bits);
++
++ } else if (portstatus & USB_PORT_STAT_ENABLE) {
++ bool port_resumed = (portstatus &
++ USB_PORT_STAT_LINK_STATE) ==
++ USB_SS_PORT_LS_U0;
++ /* The power session apparently survived the resume.
++ * If there was an overcurrent or suspend change
++ * (i.e., remote wakeup request), have khubd
++ * take care of it. Look at the port link state
++ * for USB 3.0 hubs, since they don't have a suspend
++ * change bit, and they don't set the port link change
++ * bit on device-initiated resume.
++ */
++ if (portchange || (hub_is_superspeed(hub->hdev) &&
++ port_resumed))
++ set_bit(port1, hub->change_bits);
++
++ } else if (udev->persist_enabled) {
++ struct usb_port *port_dev = hub->ports[port1 - 1];
++
++#ifdef CONFIG_PM
++ udev->reset_resume = 1;
++#endif
++ /* Don't set the change_bits when the device
++ * was powered off.
++ */
++ if (port_dev->power_is_on)
++ set_bit(port1, hub->change_bits);
++
++ } else {
++ /* The power session is gone; tell khubd */
++ usb_set_device_state(udev, USB_STATE_NOTATTACHED);
++ set_bit(port1, hub->change_bits);
++ }
++ }
++
++ /* If no port-status-change flags were set, we don't need any
++ * debouncing. If flags were set we can try to debounce the
++ * ports all at once right now, instead of letting khubd do them
++ * one at a time later on.
++ *
++ * If any port-status changes do occur during this delay, khubd
++ * will see them later and handle them normally.
++ */
++ if (need_debounce_delay) {
++ delay = HUB_DEBOUNCE_STABLE;
++
++ /* Don't do a long sleep inside a workqueue routine */
++ if (type == HUB_INIT2) {
++ PREPARE_DELAYED_WORK(&hub->init_work, hub_init_func3);
++ schedule_delayed_work(&hub->init_work,
++ msecs_to_jiffies(delay));
++ return; /* Continues at init3: below */
++ } else {
++ msleep(delay);
++ }
++ }
++ init3:
++ hub->quiescing = 0;
++
++ status = usb_submit_urb(hub->urb, GFP_NOIO);
++ if (status < 0)
++ dev_err(hub->intfdev, "activate --> %d\n", status);
++ if (hub->has_indicators && blinkenlights)
++ schedule_delayed_work(&hub->leds, LED_CYCLE_PERIOD);
++
++ /* Scan all ports that need attention */
++ kick_khubd(hub);
++
++ /* Allow autosuspend if it was suppressed */
++ if (type <= HUB_INIT3)
++ usb_autopm_put_interface_async(to_usb_interface(hub->intfdev));
++}
++
++/* Implement the continuations for the delays above */
++static void hub_init_func2(struct work_struct *ws)
++{
++ struct usb_hub *hub = container_of(ws, struct usb_hub, init_work.work);
++
++ hub_activate(hub, HUB_INIT2);
++}
++
++static void hub_init_func3(struct work_struct *ws)
++{
++ struct usb_hub *hub = container_of(ws, struct usb_hub, init_work.work);
++
++ hub_activate(hub, HUB_INIT3);
++}
++
++enum hub_quiescing_type {
++ HUB_DISCONNECT, HUB_PRE_RESET, HUB_SUSPEND
++};
++
++static void hub_quiesce(struct usb_hub *hub, enum hub_quiescing_type type)
++{
++ struct usb_device *hdev = hub->hdev;
++ int i;
++
++ cancel_delayed_work_sync(&hub->init_work);
++
++ /* khubd and related activity won't re-trigger */
++ hub->quiescing = 1;
++
++ if (type != HUB_SUSPEND) {
++ /* Disconnect all the children */
++ for (i = 0; i < hdev->maxchild; ++i) {
++ if (hub->ports[i]->child)
++ usb_disconnect(&hub->ports[i]->child);
++ }
++ }
++
++ /* Stop khubd and related activity */
++ usb_kill_urb(hub->urb);
++ if (hub->has_indicators)
++ cancel_delayed_work_sync(&hub->leds);
++ if (hub->tt.hub)
++ flush_work(&hub->tt.clear_work);
++}
++
++/* caller has locked the hub device */
++static int hub_pre_reset(struct usb_interface *intf)
++{
++ struct usb_hub *hub = usb_get_intfdata(intf);
++
++ hub_quiesce(hub, HUB_PRE_RESET);
++ return 0;
++}
++
++/* caller has locked the hub device */
++static int hub_post_reset(struct usb_interface *intf)
++{
++ struct usb_hub *hub = usb_get_intfdata(intf);
++
++ hub_activate(hub, HUB_POST_RESET);
++ return 0;
++}
++
++static int hub_configure(struct usb_hub *hub,
++ struct usb_endpoint_descriptor *endpoint)
++{
++ struct usb_hcd *hcd;
++ struct usb_device *hdev = hub->hdev;
++ struct device *hub_dev = hub->intfdev;
++ u16 hubstatus, hubchange;
++ u16 wHubCharacteristics;
++ unsigned int pipe;
++ int maxp, ret, i;
++ char *message = "out of memory";
++ unsigned unit_load;
++ unsigned full_load;
++
++ hub->buffer = kmalloc(sizeof(*hub->buffer), GFP_KERNEL);
++ if (!hub->buffer) {
++ ret = -ENOMEM;
++ goto fail;
++ }
++
++ hub->status = kmalloc(sizeof(*hub->status), GFP_KERNEL);
++ if (!hub->status) {
++ ret = -ENOMEM;
++ goto fail;
++ }
++ mutex_init(&hub->status_mutex);
++
++ hub->descriptor = kmalloc(sizeof(*hub->descriptor), GFP_KERNEL);
++ if (!hub->descriptor) {
++ ret = -ENOMEM;
++ goto fail;
++ }
++
++ /* Request the entire hub descriptor.
++ * hub->descriptor can handle USB_MAXCHILDREN ports,
++ * but the hub can/will return fewer bytes here.
++ */
++ ret = get_hub_descriptor(hdev, hub->descriptor);
++ if (ret < 0) {
++ message = "can't read hub descriptor";
++ goto fail;
++ } else if (hub->descriptor->bNbrPorts > USB_MAXCHILDREN) {
++ message = "hub has too many ports!";
++ ret = -ENODEV;
++ goto fail;
++ } else if (hub->descriptor->bNbrPorts == 0) {
++ message = "hub doesn't have any ports!";
++ ret = -ENODEV;
++ goto fail;
++ }
++
++ hdev->maxchild = hub->descriptor->bNbrPorts;
++ dev_info (hub_dev, "%d port%s detected\n", hdev->maxchild,
++ (hdev->maxchild == 1) ? "" : "s");
++
++ hub->ports = kzalloc(hdev->maxchild * sizeof(struct usb_port *),
++ GFP_KERNEL);
++ if (!hub->ports) {
++ ret = -ENOMEM;
++ goto fail;
++ }
++
++ wHubCharacteristics = le16_to_cpu(hub->descriptor->wHubCharacteristics);
++ if (hub_is_superspeed(hdev)) {
++ unit_load = 150;
++ full_load = 900;
++ } else {
++ unit_load = 100;
++ full_load = 500;
++ }
++
++ /* FIXME for USB 3.0, skip for now */
++ if ((wHubCharacteristics & HUB_CHAR_COMPOUND) &&
++ !(hub_is_superspeed(hdev))) {
++ int i;
++ char portstr[USB_MAXCHILDREN + 1];
++
++ for (i = 0; i < hdev->maxchild; i++)
++ portstr[i] = hub->descriptor->u.hs.DeviceRemovable
++ [((i + 1) / 8)] & (1 << ((i + 1) % 8))
++ ? 'F' : 'R';
++ portstr[hdev->maxchild] = 0;
++ dev_dbg(hub_dev, "compound device; port removable status: %s\n", portstr);
++ } else
++ dev_dbg(hub_dev, "standalone hub\n");
++
++ switch (wHubCharacteristics & HUB_CHAR_LPSM) {
++ case HUB_CHAR_COMMON_LPSM:
++ dev_dbg(hub_dev, "ganged power switching\n");
++ break;
++ case HUB_CHAR_INDV_PORT_LPSM:
++ dev_dbg(hub_dev, "individual port power switching\n");
++ break;
++ case HUB_CHAR_NO_LPSM:
++ case HUB_CHAR_LPSM:
++ dev_dbg(hub_dev, "no power switching (usb 1.0)\n");
++ break;
++ }
++
++ switch (wHubCharacteristics & HUB_CHAR_OCPM) {
++ case HUB_CHAR_COMMON_OCPM:
++ dev_dbg(hub_dev, "global over-current protection\n");
++ break;
++ case HUB_CHAR_INDV_PORT_OCPM:
++ dev_dbg(hub_dev, "individual port over-current protection\n");
++ break;
++ case HUB_CHAR_NO_OCPM:
++ case HUB_CHAR_OCPM:
++ dev_dbg(hub_dev, "no over-current protection\n");
++ break;
++ }
++
++ spin_lock_init (&hub->tt.lock);
++ INIT_LIST_HEAD (&hub->tt.clear_list);
++ INIT_WORK(&hub->tt.clear_work, hub_tt_work);
++ switch (hdev->descriptor.bDeviceProtocol) {
++ case USB_HUB_PR_FS:
++ break;
++ case USB_HUB_PR_HS_SINGLE_TT:
++ dev_dbg(hub_dev, "Single TT\n");
++ hub->tt.hub = hdev;
++ break;
++ case USB_HUB_PR_HS_MULTI_TT:
++ ret = usb_set_interface(hdev, 0, 1);
++ if (ret == 0) {
++ dev_dbg(hub_dev, "TT per port\n");
++ hub->tt.multi = 1;
++ } else
++ dev_err(hub_dev, "Using single TT (err %d)\n",
++ ret);
++ hub->tt.hub = hdev;
++ break;
++ case USB_HUB_PR_SS:
++ /* USB 3.0 hubs don't have a TT */
++ break;
++ default:
++ dev_dbg(hub_dev, "Unrecognized hub protocol %d\n",
++ hdev->descriptor.bDeviceProtocol);
++ break;
++ }
++
++ /* Note 8 FS bit times == (8 bits / 12000000 bps) ~= 666ns */
++ switch (wHubCharacteristics & HUB_CHAR_TTTT) {
++ case HUB_TTTT_8_BITS:
++ if (hdev->descriptor.bDeviceProtocol != 0) {
++ hub->tt.think_time = 666;
++ dev_dbg(hub_dev, "TT requires at most %d "
++ "FS bit times (%d ns)\n",
++ 8, hub->tt.think_time);
++ }
++ break;
++ case HUB_TTTT_16_BITS:
++ hub->tt.think_time = 666 * 2;
++ dev_dbg(hub_dev, "TT requires at most %d "
++ "FS bit times (%d ns)\n",
++ 16, hub->tt.think_time);
++ break;
++ case HUB_TTTT_24_BITS:
++ hub->tt.think_time = 666 * 3;
++ dev_dbg(hub_dev, "TT requires at most %d "
++ "FS bit times (%d ns)\n",
++ 24, hub->tt.think_time);
++ break;
++ case HUB_TTTT_32_BITS:
++ hub->tt.think_time = 666 * 4;
++ dev_dbg(hub_dev, "TT requires at most %d "
++ "FS bit times (%d ns)\n",
++ 32, hub->tt.think_time);
++ break;
++ }
++
++ /* probe() zeroes hub->indicator[] */
++ if (wHubCharacteristics & HUB_CHAR_PORTIND) {
++ hub->has_indicators = 1;
++ dev_dbg(hub_dev, "Port indicators are supported\n");
++ }
++
++ dev_dbg(hub_dev, "power on to power good time: %dms\n",
++ hub->descriptor->bPwrOn2PwrGood * 2);
++
++ /* power budgeting mostly matters with bus-powered hubs,
++ * and battery-powered root hubs (may provide just 8 mA).
++ */
++ ret = usb_get_status(hdev, USB_RECIP_DEVICE, 0, &hubstatus);
++ if (ret) {
++ message = "can't get hub status";
++ goto fail;
++ }
++ hcd = bus_to_hcd(hdev->bus);
++ if (hdev == hdev->bus->root_hub) {
++ if (hcd->power_budget > 0)
++ hdev->bus_mA = hcd->power_budget;
++ else
++ hdev->bus_mA = full_load * hdev->maxchild;
++ if (hdev->bus_mA >= full_load)
++ hub->mA_per_port = full_load;
++ else {
++ hub->mA_per_port = hdev->bus_mA;
++ hub->limited_power = 1;
++ }
++ } else if ((hubstatus & (1 << USB_DEVICE_SELF_POWERED)) == 0) {
++ int remaining = hdev->bus_mA -
++ hub->descriptor->bHubContrCurrent;
++
++ dev_dbg(hub_dev, "hub controller current requirement: %dmA\n",
++ hub->descriptor->bHubContrCurrent);
++ hub->limited_power = 1;
++
++ if (remaining < hdev->maxchild * unit_load)
++ dev_warn(hub_dev,
++ "insufficient power available "
++ "to use all downstream ports\n");
++ hub->mA_per_port = unit_load; /* 7.2.1 */
++
++ } else { /* Self-powered external hub */
++ /* FIXME: What about battery-powered external hubs that
++ * provide less current per port? */
++ hub->mA_per_port = full_load;
++ }
++ if (hub->mA_per_port < full_load)
++ dev_dbg(hub_dev, "%umA bus power budget for each child\n",
++ hub->mA_per_port);
++
++ /* Update the HCD's internal representation of this hub before khubd
++ * starts getting port status changes for devices under the hub.
++ */
++ if (hcd->driver->update_hub_device) {
++ ret = hcd->driver->update_hub_device(hcd, hdev,
++ &hub->tt, GFP_KERNEL);
++ if (ret < 0) {
++ message = "can't update HCD hub info";
++ goto fail;
++ }
++ }
++
++ ret = hub_hub_status(hub, &hubstatus, &hubchange);
++ if (ret < 0) {
++ message = "can't get hub status";
++ goto fail;
++ }
++
++ /* local power status reports aren't always correct */
++ if (hdev->actconfig->desc.bmAttributes & USB_CONFIG_ATT_SELFPOWER)
++ dev_dbg(hub_dev, "local power source is %s\n",
++ (hubstatus & HUB_STATUS_LOCAL_POWER)
++ ? "lost (inactive)" : "good");
++
++ if ((wHubCharacteristics & HUB_CHAR_OCPM) == 0)
++ dev_dbg(hub_dev, "%sover-current condition exists\n",
++ (hubstatus & HUB_STATUS_OVERCURRENT) ? "" : "no ");
++
++ /* set up the interrupt endpoint
++ * We use the EP's maxpacket size instead of (PORTS+1+7)/8
++ * bytes as USB2.0[11.12.3] says because some hubs are known
++ * to send more data (and thus cause overflow). For root hubs,
++ * maxpktsize is defined in hcd.c's fake endpoint descriptors
++ * to be big enough for at least USB_MAXCHILDREN ports. */
++ pipe = usb_rcvintpipe(hdev, endpoint->bEndpointAddress);
++ maxp = usb_maxpacket(hdev, pipe, usb_pipeout(pipe));
++
++ if (maxp > sizeof(*hub->buffer))
++ maxp = sizeof(*hub->buffer);
++
++ hub->urb = usb_alloc_urb(0, GFP_KERNEL);
++ if (!hub->urb) {
++ ret = -ENOMEM;
++ goto fail;
++ }
++
++ usb_fill_int_urb(hub->urb, hdev, pipe, *hub->buffer, maxp, hub_irq,
++ hub, endpoint->bInterval);
++
++ /* maybe cycle the hub leds */
++ if (hub->has_indicators && blinkenlights)
++ hub->indicator[0] = INDICATOR_CYCLE;
++
++ for (i = 0; i < hdev->maxchild; i++) {
++ ret = usb_hub_create_port_device(hub, i + 1);
++ if (ret < 0) {
++ dev_err(hub->intfdev,
++ "couldn't create port%d device.\n", i + 1);
++ hdev->maxchild = i;
++ goto fail_keep_maxchild;
++ }
++ }
++
++ usb_hub_adjust_deviceremovable(hdev, hub->descriptor);
++
++ hub_activate(hub, HUB_INIT);
++ return 0;
++
++fail:
++ hdev->maxchild = 0;
++fail_keep_maxchild:
++ dev_err (hub_dev, "config failed, %s (err %d)\n",
++ message, ret);
++ /* hub_disconnect() frees urb and descriptor */
++ return ret;
++}
++
++static void hub_release(struct kref *kref)
++{
++ struct usb_hub *hub = container_of(kref, struct usb_hub, kref);
++
++ usb_put_intf(to_usb_interface(hub->intfdev));
++ kfree(hub);
++}
++
++static unsigned highspeed_hubs;
++
++static void hub_disconnect(struct usb_interface *intf)
++{
++ struct usb_hub *hub = usb_get_intfdata(intf);
++ struct usb_device *hdev = interface_to_usbdev(intf);
++ int port1;
++
++ /* Take the hub off the event list and don't let it be added again */
++ spin_lock_irq(&hub_event_lock);
++ if (!list_empty(&hub->event_list)) {
++ list_del_init(&hub->event_list);
++ usb_autopm_put_interface_no_suspend(intf);
++ }
++ hub->disconnected = 1;
++ spin_unlock_irq(&hub_event_lock);
++
++ /* Disconnect all children and quiesce the hub */
++ hub->error = 0;
++ hub_quiesce(hub, HUB_DISCONNECT);
++
++ /* Avoid races with recursively_mark_NOTATTACHED() */
++ spin_lock_irq(&device_state_lock);
++ port1 = hdev->maxchild;
++ hdev->maxchild = 0;
++ usb_set_intfdata(intf, NULL);
++ spin_unlock_irq(&device_state_lock);
++
++ for (; port1 > 0; --port1)
++ usb_hub_remove_port_device(hub, port1);
++
++ if (hub->hdev->speed == USB_SPEED_HIGH)
++ highspeed_hubs--;
++
++ usb_free_urb(hub->urb);
++ kfree(hub->ports);
++ kfree(hub->descriptor);
++ kfree(hub->status);
++ kfree(hub->buffer);
++
++ pm_suspend_ignore_children(&intf->dev, false);
++ kref_put(&hub->kref, hub_release);
++}
++
++static int hub_probe(struct usb_interface *intf, const struct usb_device_id *id)
++{
++ struct usb_host_interface *desc;
++ struct usb_endpoint_descriptor *endpoint;
++ struct usb_device *hdev;
++ struct usb_hub *hub;
++
++ desc = intf->cur_altsetting;
++ hdev = interface_to_usbdev(intf);
++
++ /*
++ * Set default autosuspend delay as 0 to speedup bus suspend,
++ * based on the below considerations:
++ *
++ * - Unlike other drivers, the hub driver does not rely on the
++ * autosuspend delay to provide enough time to handle a wakeup
++ * event, and the submitted status URB is just to check future
++ * change on hub downstream ports, so it is safe to do it.
++ *
++ * - The patch might cause one or more auto supend/resume for
++ * below very rare devices when they are plugged into hub
++ * first time:
++ *
++ * devices having trouble initializing, and disconnect
++ * themselves from the bus and then reconnect a second
++ * or so later
++ *
++ * devices just for downloading firmware, and disconnects
++ * themselves after completing it
++ *
++ * For these quite rare devices, their drivers may change the
++ * autosuspend delay of their parent hub in the probe() to one
++ * appropriate value to avoid the subtle problem if someone
++ * does care it.
++ *
++ * - The patch may cause one or more auto suspend/resume on
++ * hub during running 'lsusb', but it is probably too
++ * infrequent to worry about.
++ *
++ * - Change autosuspend delay of hub can avoid unnecessary auto
++ * suspend timer for hub, also may decrease power consumption
++ * of USB bus.
++ *
++ * - If user has indicated to prevent autosuspend by passing
++ * usbcore.autosuspend = -1 then keep autosuspend disabled.
++ */
++#ifdef CONFIG_PM_RUNTIME
++ if (hdev->dev.power.autosuspend_delay >= 0)
++ pm_runtime_set_autosuspend_delay(&hdev->dev, 0);
++#endif
++
++ /*
++ * Hubs have proper suspend/resume support, except for root hubs
++ * where the controller driver doesn't have bus_suspend and
++ * bus_resume methods.
++ */
++ if (hdev->parent) { /* normal device */
++ usb_enable_autosuspend(hdev);
++ } else { /* root hub */
++ const struct hc_driver *drv = bus_to_hcd(hdev->bus)->driver;
++
++ if (drv->bus_suspend && drv->bus_resume)
++ usb_enable_autosuspend(hdev);
++ }
++
++ if (hdev->level == MAX_TOPO_LEVEL) {
++ dev_err(&intf->dev,
++ "Unsupported bus topology: hub nested too deep\n");
++ return -E2BIG;
++ }
++
++#ifdef CONFIG_USB_OTG_BLACKLIST_HUB
++ if (hdev->parent) {
++ dev_warn(&intf->dev, "ignoring external hub\n");
++ return -ENODEV;
++ }
++#endif
++
++ /* Some hubs have a subclass of 1, which AFAICT according to the */
++ /* specs is not defined, but it works */
++ if ((desc->desc.bInterfaceSubClass != 0) &&
++ (desc->desc.bInterfaceSubClass != 1)) {
++descriptor_error:
++ dev_err (&intf->dev, "bad descriptor, ignoring hub\n");
++ return -EIO;
++ }
++
++ /* Multiple endpoints? What kind of mutant ninja-hub is this? */
++ if (desc->desc.bNumEndpoints != 1)
++ goto descriptor_error;
++
++ endpoint = &desc->endpoint[0].desc;
++
++ /* If it's not an interrupt in endpoint, we'd better punt! */
++ if (!usb_endpoint_is_int_in(endpoint))
++ goto descriptor_error;
++
++ /* We found a hub */
++ dev_info (&intf->dev, "USB hub found\n");
++
++ hub = kzalloc(sizeof(*hub), GFP_KERNEL);
++ if (!hub) {
++ dev_dbg (&intf->dev, "couldn't kmalloc hub struct\n");
++ return -ENOMEM;
++ }
++
++ kref_init(&hub->kref);
++ INIT_LIST_HEAD(&hub->event_list);
++ hub->intfdev = &intf->dev;
++ hub->hdev = hdev;
++ INIT_DELAYED_WORK(&hub->leds, led_work);
++ INIT_DELAYED_WORK(&hub->init_work, NULL);
++ usb_get_intf(intf);
++
++ usb_set_intfdata (intf, hub);
++ intf->needs_remote_wakeup = 1;
++ pm_suspend_ignore_children(&intf->dev, true);
++
++ if (hdev->speed == USB_SPEED_HIGH)
++ highspeed_hubs++;
++
++ if (id->driver_info & HUB_QUIRK_CHECK_PORT_AUTOSUSPEND)
++ hub->quirk_check_port_auto_suspend = 1;
++
++ if (hub_configure(hub, endpoint) >= 0)
++ return 0;
++
++ hub_disconnect (intf);
++ return -ENODEV;
++}
++
++static int
++hub_ioctl(struct usb_interface *intf, unsigned int code, void *user_data)
++{
++ struct usb_device *hdev = interface_to_usbdev (intf);
++ struct usb_hub *hub = usb_hub_to_struct_hub(hdev);
++
++ /* assert ifno == 0 (part of hub spec) */
++ switch (code) {
++ case USBDEVFS_HUB_PORTINFO: {
++ struct usbdevfs_hub_portinfo *info = user_data;
++ int i;
++
++ spin_lock_irq(&device_state_lock);
++ if (hdev->devnum <= 0)
++ info->nports = 0;
++ else {
++ info->nports = hdev->maxchild;
++ for (i = 0; i < info->nports; i++) {
++ if (hub->ports[i]->child == NULL)
++ info->port[i] = 0;
++ else
++ info->port[i] =
++ hub->ports[i]->child->devnum;
++ }
++ }
++ spin_unlock_irq(&device_state_lock);
++
++ return info->nports + 1;
++ }
++
++ default:
++ return -ENOSYS;
++ }
++}
++
++/*
++ * Allow user programs to claim ports on a hub. When a device is attached
++ * to one of these "claimed" ports, the program will "own" the device.
++ */
++static int find_port_owner(struct usb_device *hdev, unsigned port1,
++ struct dev_state ***ppowner)
++{
++ struct usb_hub *hub = usb_hub_to_struct_hub(hdev);
++
++ if (hdev->state == USB_STATE_NOTATTACHED)
++ return -ENODEV;
++ if (port1 == 0 || port1 > hdev->maxchild)
++ return -EINVAL;
++
++ /* Devices not managed by the hub driver
++ * will always have maxchild equal to 0.
++ */
++ *ppowner = &(hub->ports[port1 - 1]->port_owner);
++ return 0;
++}
++
++/* In the following three functions, the caller must hold hdev's lock */
++int usb_hub_claim_port(struct usb_device *hdev, unsigned port1,
++ struct dev_state *owner)
++{
++ int rc;
++ struct dev_state **powner;
++
++ rc = find_port_owner(hdev, port1, &powner);
++ if (rc)
++ return rc;
++ if (*powner)
++ return -EBUSY;
++ *powner = owner;
++ return rc;
++}
++
++int usb_hub_release_port(struct usb_device *hdev, unsigned port1,
++ struct dev_state *owner)
++{
++ int rc;
++ struct dev_state **powner;
++
++ rc = find_port_owner(hdev, port1, &powner);
++ if (rc)
++ return rc;
++ if (*powner != owner)
++ return -ENOENT;
++ *powner = NULL;
++ return rc;
++}
++
++void usb_hub_release_all_ports(struct usb_device *hdev, struct dev_state *owner)
++{
++ struct usb_hub *hub = usb_hub_to_struct_hub(hdev);
++ int n;
++
++ for (n = 0; n < hdev->maxchild; n++) {
++ if (hub->ports[n]->port_owner == owner)
++ hub->ports[n]->port_owner = NULL;
++ }
++
++}
++
++/* The caller must hold udev's lock */
++bool usb_device_is_owned(struct usb_device *udev)
++{
++ struct usb_hub *hub;
++
++ if (udev->state == USB_STATE_NOTATTACHED || !udev->parent)
++ return false;
++ hub = usb_hub_to_struct_hub(udev->parent);
++ return !!hub->ports[udev->portnum - 1]->port_owner;
++}
++
++static void recursively_mark_NOTATTACHED(struct usb_device *udev)
++{
++ struct usb_hub *hub = usb_hub_to_struct_hub(udev);
++ int i;
++
++ for (i = 0; i < udev->maxchild; ++i) {
++ if (hub->ports[i]->child)
++ recursively_mark_NOTATTACHED(hub->ports[i]->child);
++ }
++ if (udev->state == USB_STATE_SUSPENDED)
++ udev->active_duration -= jiffies;
++ udev->state = USB_STATE_NOTATTACHED;
++}
++
++/**
++ * usb_set_device_state - change a device's current state (usbcore, hcds)
++ * @udev: pointer to device whose state should be changed
++ * @new_state: new state value to be stored
++ *
++ * udev->state is _not_ fully protected by the device lock. Although
++ * most transitions are made only while holding the lock, the state can
++ * can change to USB_STATE_NOTATTACHED at almost any time. This
++ * is so that devices can be marked as disconnected as soon as possible,
++ * without having to wait for any semaphores to be released. As a result,
++ * all changes to any device's state must be protected by the
++ * device_state_lock spinlock.
++ *
++ * Once a device has been added to the device tree, all changes to its state
++ * should be made using this routine. The state should _not_ be set directly.
++ *
++ * If udev->state is already USB_STATE_NOTATTACHED then no change is made.
++ * Otherwise udev->state is set to new_state, and if new_state is
++ * USB_STATE_NOTATTACHED then all of udev's descendants' states are also set
++ * to USB_STATE_NOTATTACHED.
++ */
++void usb_set_device_state(struct usb_device *udev,
++ enum usb_device_state new_state)
++{
++ unsigned long flags;
++ int wakeup = -1;
++
++ spin_lock_irqsave(&device_state_lock, flags);
++ if (udev->state == USB_STATE_NOTATTACHED)
++ ; /* do nothing */
++ else if (new_state != USB_STATE_NOTATTACHED) {
++
++ /* root hub wakeup capabilities are managed out-of-band
++ * and may involve silicon errata ... ignore them here.
++ */
++ if (udev->parent) {
++ if (udev->state == USB_STATE_SUSPENDED
++ || new_state == USB_STATE_SUSPENDED)
++ ; /* No change to wakeup settings */
++ else if (new_state == USB_STATE_CONFIGURED)
++ wakeup = (udev->quirks &
++ USB_QUIRK_IGNORE_REMOTE_WAKEUP) ? 0 :
++ udev->actconfig->desc.bmAttributes &
++ USB_CONFIG_ATT_WAKEUP;
++ else
++ wakeup = 0;
++ }
++ if (udev->state == USB_STATE_SUSPENDED &&
++ new_state != USB_STATE_SUSPENDED)
++ udev->active_duration -= jiffies;
++ else if (new_state == USB_STATE_SUSPENDED &&
++ udev->state != USB_STATE_SUSPENDED)
++ udev->active_duration += jiffies;
++ udev->state = new_state;
++ } else
++ recursively_mark_NOTATTACHED(udev);
++ spin_unlock_irqrestore(&device_state_lock, flags);
++ if (wakeup >= 0)
++ device_set_wakeup_capable(&udev->dev, wakeup);
++}
++EXPORT_SYMBOL_GPL(usb_set_device_state);
++
++/*
++ * Choose a device number.
++ *
++ * Device numbers are used as filenames in usbfs. On USB-1.1 and
++ * USB-2.0 buses they are also used as device addresses, however on
++ * USB-3.0 buses the address is assigned by the controller hardware
++ * and it usually is not the same as the device number.
++ *
++ * WUSB devices are simple: they have no hubs behind, so the mapping
++ * device <-> virtual port number becomes 1:1. Why? to simplify the
++ * life of the device connection logic in
++ * drivers/usb/wusbcore/devconnect.c. When we do the initial secret
++ * handshake we need to assign a temporary address in the unauthorized
++ * space. For simplicity we use the first virtual port number found to
++ * be free [drivers/usb/wusbcore/devconnect.c:wusbhc_devconnect_ack()]
++ * and that becomes it's address [X < 128] or its unauthorized address
++ * [X | 0x80].
++ *
++ * We add 1 as an offset to the one-based USB-stack port number
++ * (zero-based wusb virtual port index) for two reasons: (a) dev addr
++ * 0 is reserved by USB for default address; (b) Linux's USB stack
++ * uses always #1 for the root hub of the controller. So USB stack's
++ * port #1, which is wusb virtual-port #0 has address #2.
++ *
++ * Devices connected under xHCI are not as simple. The host controller
++ * supports virtualization, so the hardware assigns device addresses and
++ * the HCD must setup data structures before issuing a set address
++ * command to the hardware.
++ */
++static void choose_devnum(struct usb_device *udev)
++{
++ int devnum;
++ struct usb_bus *bus = udev->bus;
++
++ /* If khubd ever becomes multithreaded, this will need a lock */
++ if (udev->wusb) {
++ devnum = udev->portnum + 1;
++ BUG_ON(test_bit(devnum, bus->devmap.devicemap));
++ } else {
++ /* Try to allocate the next devnum beginning at
++ * bus->devnum_next. */
++ devnum = find_next_zero_bit(bus->devmap.devicemap, 128,
++ bus->devnum_next);
++ if (devnum >= 128)
++ devnum = find_next_zero_bit(bus->devmap.devicemap,
++ 128, 1);
++ bus->devnum_next = (devnum >= 127 ? 1 : devnum + 1);
++ }
++ if (devnum < 128) {
++ set_bit(devnum, bus->devmap.devicemap);
++ udev->devnum = devnum;
++ }
++}
++
++static void release_devnum(struct usb_device *udev)
++{
++ if (udev->devnum > 0) {
++ clear_bit(udev->devnum, udev->bus->devmap.devicemap);
++ udev->devnum = -1;
++ }
++}
++
++static void update_devnum(struct usb_device *udev, int devnum)
++{
++ /* The address for a WUSB device is managed by wusbcore. */
++ if (!udev->wusb)
++ udev->devnum = devnum;
++}
++
++static void hub_free_dev(struct usb_device *udev)
++{
++ struct usb_hcd *hcd = bus_to_hcd(udev->bus);
++
++ /* Root hubs aren't real devices, so don't free HCD resources */
++ if (hcd->driver->free_dev && udev->parent)
++ hcd->driver->free_dev(hcd, udev);
++}
++
++/**
++ * usb_disconnect - disconnect a device (usbcore-internal)
++ * @pdev: pointer to device being disconnected
++ * Context: !in_interrupt ()
++ *
++ * Something got disconnected. Get rid of it and all of its children.
++ *
++ * If *pdev is a normal device then the parent hub must already be locked.
++ * If *pdev is a root hub then the caller must hold the usb_bus_list_lock,
++ * which protects the set of root hubs as well as the list of buses.
++ *
++ * Only hub drivers (including virtual root hub drivers for host
++ * controllers) should ever call this.
++ *
++ * This call is synchronous, and may not be used in an interrupt context.
++ */
++void usb_disconnect(struct usb_device **pdev)
++{
++ struct usb_device *udev = *pdev;
++ struct usb_hub *hub = usb_hub_to_struct_hub(udev);
++ int i;
++
++ /* mark the device as inactive, so any further urb submissions for
++ * this device (and any of its children) will fail immediately.
++ * this quiesces everything except pending urbs.
++ */
++ usb_set_device_state(udev, USB_STATE_NOTATTACHED);
++ dev_info(&udev->dev, "USB disconnect, device number %d\n",
++ udev->devnum);
++
++ usb_lock_device(udev);
++
++ /* Free up all the children before we remove this device */
++ for (i = 0; i < udev->maxchild; i++) {
++ if (hub->ports[i]->child)
++ usb_disconnect(&hub->ports[i]->child);
++ }
++
++ /* deallocate hcd/hardware state ... nuking all pending urbs and
++ * cleaning up all state associated with the current configuration
++ * so that the hardware is now fully quiesced.
++ */
++ dev_dbg (&udev->dev, "unregistering device\n");
++ usb_disable_device(udev, 0);
++ usb_hcd_synchronize_unlinks(udev);
++
++ if (udev->parent) {
++ struct usb_hub *hub = usb_hub_to_struct_hub(udev->parent);
++ struct usb_port *port_dev = hub->ports[udev->portnum - 1];
++
++ sysfs_remove_link(&udev->dev.kobj, "port");
++ sysfs_remove_link(&port_dev->dev.kobj, "device");
++
++ if (!port_dev->did_runtime_put)
++ pm_runtime_put(&port_dev->dev);
++ else
++ port_dev->did_runtime_put = false;
++ }
++
++ usb_remove_ep_devs(&udev->ep0);
++ usb_unlock_device(udev);
++
++ /* Unregister the device. The device driver is responsible
++ * for de-configuring the device and invoking the remove-device
++ * notifier chain (used by usbfs and possibly others).
++ */
++ device_del(&udev->dev);
++
++ /* Free the device number and delete the parent's children[]
++ * (or root_hub) pointer.
++ */
++ release_devnum(udev);
++
++ /* Avoid races with recursively_mark_NOTATTACHED() */
++ spin_lock_irq(&device_state_lock);
++ *pdev = NULL;
++ spin_unlock_irq(&device_state_lock);
++
++ hub_free_dev(udev);
++
++ put_device(&udev->dev);
++}
++
++#ifdef CONFIG_USB_ANNOUNCE_NEW_DEVICES
++static void show_string(struct usb_device *udev, char *id, char *string)
++{
++ if (!string)
++ return;
++ dev_info(&udev->dev, "%s: %s\n", id, string);
++}
++
++static void announce_device(struct usb_device *udev)
++{
++ dev_info(&udev->dev, "New USB device found, idVendor=%04x, idProduct=%04x\n",
++ le16_to_cpu(udev->descriptor.idVendor),
++ le16_to_cpu(udev->descriptor.idProduct));
++ dev_info(&udev->dev,
++ "New USB device strings: Mfr=%d, Product=%d, SerialNumber=%d\n",
++ udev->descriptor.iManufacturer,
++ udev->descriptor.iProduct,
++ udev->descriptor.iSerialNumber);
++ show_string(udev, "Product", udev->product);
++ show_string(udev, "Manufacturer", udev->manufacturer);
++ show_string(udev, "SerialNumber", udev->serial);
++}
++#else
++static inline void announce_device(struct usb_device *udev) { }
++#endif
++
++#ifdef CONFIG_USB_OTG
++#include "otg_whitelist.h"
++#endif
++
++/**
++ * usb_enumerate_device_otg - FIXME (usbcore-internal)
++ * @udev: newly addressed device (in ADDRESS state)
++ *
++ * Finish enumeration for On-The-Go devices
++ *
++ * Return: 0 if successful. A negative error code otherwise.
++ */
++static int usb_enumerate_device_otg(struct usb_device *udev)
++{
++ int err = 0;
++
++#ifdef CONFIG_USB_OTG
++ /*
++ * OTG-aware devices on OTG-capable root hubs may be able to use SRP,
++ * to wake us after we've powered off VBUS; and HNP, switching roles
++ * "host" to "peripheral". The OTG descriptor helps figure this out.
++ */
++ if (!udev->bus->is_b_host
++ && udev->config
++ && udev->parent == udev->bus->root_hub) {
++ struct usb_otg_descriptor *desc = NULL;
++ struct usb_bus *bus = udev->bus;
++
++ /* descriptor may appear anywhere in config */
++ if (__usb_get_extra_descriptor (udev->rawdescriptors[0],
++ le16_to_cpu(udev->config[0].desc.wTotalLength),
++ USB_DT_OTG, (void **) &desc) == 0) {
++ if (desc->bmAttributes & USB_OTG_HNP) {
++ unsigned port1 = udev->portnum;
++
++ dev_info(&udev->dev,
++ "Dual-Role OTG device on %sHNP port\n",
++ (port1 == bus->otg_port)
++ ? "" : "non-");
++
++ /* enable HNP before suspend, it's simpler */
++ if (port1 == bus->otg_port)
++ bus->b_hnp_enable = 1;
++ err = usb_control_msg(udev,
++ usb_sndctrlpipe(udev, 0),
++ USB_REQ_SET_FEATURE, 0,
++ bus->b_hnp_enable
++ ? USB_DEVICE_B_HNP_ENABLE
++ : USB_DEVICE_A_ALT_HNP_SUPPORT,
++ 0, NULL, 0, USB_CTRL_SET_TIMEOUT);
++ if (err < 0) {
++ /* OTG MESSAGE: report errors here,
++ * customize to match your product.
++ */
++ dev_info(&udev->dev,
++ "can't set HNP mode: %d\n",
++ err);
++ bus->b_hnp_enable = 0;
++ }
++ }
++ }
++ }
++
++ if (!is_targeted(udev)) {
++
++ /* Maybe it can talk to us, though we can't talk to it.
++ * (Includes HNP test device.)
++ */
++ if (udev->bus->b_hnp_enable || udev->bus->is_b_host) {
++ err = usb_port_suspend(udev, PMSG_SUSPEND);
++ if (err < 0)
++ dev_dbg(&udev->dev, "HNP fail, %d\n", err);
++ }
++ err = -ENOTSUPP;
++ goto fail;
++ }
++fail:
++#endif
++ return err;
++}
++
++
++/**
++ * usb_enumerate_device - Read device configs/intfs/otg (usbcore-internal)
++ * @udev: newly addressed device (in ADDRESS state)
++ *
++ * This is only called by usb_new_device() and usb_authorize_device()
++ * and FIXME -- all comments that apply to them apply here wrt to
++ * environment.
++ *
++ * If the device is WUSB and not authorized, we don't attempt to read
++ * the string descriptors, as they will be errored out by the device
++ * until it has been authorized.
++ *
++ * Return: 0 if successful. A negative error code otherwise.
++ */
++static int usb_enumerate_device(struct usb_device *udev)
++{
++ int err;
++
++ if (udev->config == NULL) {
++ err = usb_get_configuration(udev);
++ if (err < 0) {
++ if (err != -ENODEV)
++ dev_err(&udev->dev, "can't read configurations, error %d\n",
++ err);
++ return err;
++ }
++ }
++
++ /* read the standard strings and cache them if present */
++ udev->product = usb_cache_string(udev, udev->descriptor.iProduct);
++ udev->manufacturer = usb_cache_string(udev,
++ udev->descriptor.iManufacturer);
++ udev->serial = usb_cache_string(udev, udev->descriptor.iSerialNumber);
++
++ err = usb_enumerate_device_otg(udev);
++ if (err < 0)
++ return err;
++
++ usb_detect_interface_quirks(udev);
++
++ return 0;
++}
++
++static void set_usb_port_removable(struct usb_device *udev)
++{
++ struct usb_device *hdev = udev->parent;
++ struct usb_hub *hub;
++ u8 port = udev->portnum;
++ u16 wHubCharacteristics;
++ bool removable = true;
++
++ if (!hdev)
++ return;
++
++ hub = usb_hub_to_struct_hub(udev->parent);
++
++ wHubCharacteristics = le16_to_cpu(hub->descriptor->wHubCharacteristics);
++
++ if (!(wHubCharacteristics & HUB_CHAR_COMPOUND))
++ return;
++
++ if (hub_is_superspeed(hdev)) {
++ if (le16_to_cpu(hub->descriptor->u.ss.DeviceRemovable)
++ & (1 << port))
++ removable = false;
++ } else {
++ if (hub->descriptor->u.hs.DeviceRemovable[port / 8] & (1 << (port % 8)))
++ removable = false;
++ }
++
++ if (removable)
++ udev->removable = USB_DEVICE_REMOVABLE;
++ else
++ udev->removable = USB_DEVICE_FIXED;
++}
++
++/**
++ * usb_new_device - perform initial device setup (usbcore-internal)
++ * @udev: newly addressed device (in ADDRESS state)
++ *
++ * This is called with devices which have been detected but not fully
++ * enumerated. The device descriptor is available, but not descriptors
++ * for any device configuration. The caller must have locked either
++ * the parent hub (if udev is a normal device) or else the
++ * usb_bus_list_lock (if udev is a root hub). The parent's pointer to
++ * udev has already been installed, but udev is not yet visible through
++ * sysfs or other filesystem code.
++ *
++ * This call is synchronous, and may not be used in an interrupt context.
++ *
++ * Only the hub driver or root-hub registrar should ever call this.
++ *
++ * Return: Whether the device is configured properly or not. Zero if the
++ * interface was registered with the driver core; else a negative errno
++ * value.
++ *
++ */
++int usb_new_device(struct usb_device *udev)
++{
++ int err;
++
++ if (udev->parent) {
++ /* Initialize non-root-hub device wakeup to disabled;
++ * device (un)configuration controls wakeup capable
++ * sysfs power/wakeup controls wakeup enabled/disabled
++ */
++ device_init_wakeup(&udev->dev, 0);
++ }
++
++ /* Tell the runtime-PM framework the device is active */
++ pm_runtime_set_active(&udev->dev);
++ pm_runtime_get_noresume(&udev->dev);
++ pm_runtime_use_autosuspend(&udev->dev);
++ pm_runtime_enable(&udev->dev);
++
++ /* By default, forbid autosuspend for all devices. It will be
++ * allowed for hubs during binding.
++ */
++ usb_disable_autosuspend(udev);
++
++ err = usb_enumerate_device(udev); /* Read descriptors */
++ if (err < 0)
++ goto fail;
++ dev_dbg(&udev->dev, "udev %d, busnum %d, minor = %d\n",
++ udev->devnum, udev->bus->busnum,
++ (((udev->bus->busnum-1) * 128) + (udev->devnum-1)));
++ /* export the usbdev device-node for libusb */
++ udev->dev.devt = MKDEV(USB_DEVICE_MAJOR,
++ (((udev->bus->busnum-1) * 128) + (udev->devnum-1)));
++
++ /* Tell the world! */
++ announce_device(udev);
++
++ if (udev->serial)
++ add_device_randomness(udev->serial, strlen(udev->serial));
++ if (udev->product)
++ add_device_randomness(udev->product, strlen(udev->product));
++ if (udev->manufacturer)
++ add_device_randomness(udev->manufacturer,
++ strlen(udev->manufacturer));
++
++ device_enable_async_suspend(&udev->dev);
++
++ /*
++ * check whether the hub marks this port as non-removable. Do it
++ * now so that platform-specific data can override it in
++ * device_add()
++ */
++ if (udev->parent)
++ set_usb_port_removable(udev);
++
++ /* Register the device. The device driver is responsible
++ * for configuring the device and invoking the add-device
++ * notifier chain (used by usbfs and possibly others).
++ */
++ err = device_add(&udev->dev);
++ if (err) {
++ dev_err(&udev->dev, "can't device_add, error %d\n", err);
++ goto fail;
++ }
++
++ /* Create link files between child device and usb port device. */
++ if (udev->parent) {
++ struct usb_hub *hub = usb_hub_to_struct_hub(udev->parent);
++ struct usb_port *port_dev = hub->ports[udev->portnum - 1];
++
++ err = sysfs_create_link(&udev->dev.kobj,
++ &port_dev->dev.kobj, "port");
++ if (err)
++ goto fail;
++
++ err = sysfs_create_link(&port_dev->dev.kobj,
++ &udev->dev.kobj, "device");
++ if (err) {
++ sysfs_remove_link(&udev->dev.kobj, "port");
++ goto fail;
++ }
++
++ pm_runtime_get_sync(&port_dev->dev);
++ }
++
++ (void) usb_create_ep_devs(&udev->dev, &udev->ep0, udev);
++ usb_mark_last_busy(udev);
++ pm_runtime_put_sync_autosuspend(&udev->dev);
++ return err;
++
++fail:
++ usb_set_device_state(udev, USB_STATE_NOTATTACHED);
++ pm_runtime_disable(&udev->dev);
++ pm_runtime_set_suspended(&udev->dev);
++ return err;
++}
++
++
++/**
++ * usb_deauthorize_device - deauthorize a device (usbcore-internal)
++ * @usb_dev: USB device
++ *
++ * Move the USB device to a very basic state where interfaces are disabled
++ * and the device is in fact unconfigured and unusable.
++ *
++ * We share a lock (that we have) with device_del(), so we need to
++ * defer its call.
++ *
++ * Return: 0.
++ */
++int usb_deauthorize_device(struct usb_device *usb_dev)
++{
++ usb_lock_device(usb_dev);
++ if (usb_dev->authorized == 0)
++ goto out_unauthorized;
++
++ usb_dev->authorized = 0;
++ usb_set_configuration(usb_dev, -1);
++
++out_unauthorized:
++ usb_unlock_device(usb_dev);
++ return 0;
++}
++
++
++int usb_authorize_device(struct usb_device *usb_dev)
++{
++ int result = 0, c;
++
++ usb_lock_device(usb_dev);
++ if (usb_dev->authorized == 1)
++ goto out_authorized;
++
++ result = usb_autoresume_device(usb_dev);
++ if (result < 0) {
++ dev_err(&usb_dev->dev,
++ "can't autoresume for authorization: %d\n", result);
++ goto error_autoresume;
++ }
++ result = usb_get_device_descriptor(usb_dev, sizeof(usb_dev->descriptor));
++ if (result < 0) {
++ dev_err(&usb_dev->dev, "can't re-read device descriptor for "
++ "authorization: %d\n", result);
++ goto error_device_descriptor;
++ }
++
++ usb_dev->authorized = 1;
++ /* Choose and set the configuration. This registers the interfaces
++ * with the driver core and lets interface drivers bind to them.
++ */
++ c = usb_choose_configuration(usb_dev);
++ if (c >= 0) {
++ result = usb_set_configuration(usb_dev, c);
++ if (result) {
++ dev_err(&usb_dev->dev,
++ "can't set config #%d, error %d\n", c, result);
++ /* This need not be fatal. The user can try to
++ * set other configurations. */
++ }
++ }
++ dev_info(&usb_dev->dev, "authorized to connect\n");
++
++error_device_descriptor:
++ usb_autosuspend_device(usb_dev);
++error_autoresume:
++out_authorized:
++ usb_unlock_device(usb_dev); /* complements locktree */
++ return result;
++}
++
++
++/* Returns 1 if @hub is a WUSB root hub, 0 otherwise */
++static unsigned hub_is_wusb(struct usb_hub *hub)
++{
++ struct usb_hcd *hcd;
++ if (hub->hdev->parent != NULL) /* not a root hub? */
++ return 0;
++ hcd = container_of(hub->hdev->bus, struct usb_hcd, self);
++ return hcd->wireless;
++}
++
++
++#define PORT_RESET_TRIES 5
++#define SET_ADDRESS_TRIES 2
++#define GET_DESCRIPTOR_TRIES 2
++#define SET_CONFIG_TRIES (2 * (use_both_schemes + 1))
++#define USE_NEW_SCHEME(i) ((i) / 2 == (int)old_scheme_first)
++
++#define HUB_ROOT_RESET_TIME 50 /* times are in msec */
++#define HUB_SHORT_RESET_TIME 10
++#define HUB_BH_RESET_TIME 50
++#define HUB_LONG_RESET_TIME 200
++#define HUB_RESET_TIMEOUT 800
++
++/*
++ * "New scheme" enumeration causes an extra state transition to be
++ * exposed to an xhci host and causes USB3 devices to receive control
++ * commands in the default state. This has been seen to cause
++ * enumeration failures, so disable this enumeration scheme for USB3
++ * devices.
++ */
++static bool use_new_scheme(struct usb_device *udev, int retry)
++{
++ if (udev->speed == USB_SPEED_SUPER)
++ return false;
++
++ return USE_NEW_SCHEME(retry);
++}
++
++static int hub_port_reset(struct usb_hub *hub, int port1,
++ struct usb_device *udev, unsigned int delay, bool warm);
++
++/* Is a USB 3.0 port in the Inactive or Compliance Mode state?
++ * Port worm reset is required to recover
++ */
++static bool hub_port_warm_reset_required(struct usb_hub *hub, u16 portstatus)
++{
++ return hub_is_superspeed(hub->hdev) &&
++ (((portstatus & USB_PORT_STAT_LINK_STATE) ==
++ USB_SS_PORT_LS_SS_INACTIVE) ||
++ ((portstatus & USB_PORT_STAT_LINK_STATE) ==
++ USB_SS_PORT_LS_COMP_MOD)) ;
++}
++
++static int hub_port_wait_reset(struct usb_hub *hub, int port1,
++ struct usb_device *udev, unsigned int delay, bool warm)
++{
++ int delay_time, ret;
++ u16 portstatus;
++ u16 portchange;
++
++ for (delay_time = 0;
++ delay_time < HUB_RESET_TIMEOUT;
++ delay_time += delay) {
++ /* wait to give the device a chance to reset */
++ msleep(delay);
++
++ /* read and decode port status */
++ ret = hub_port_status(hub, port1, &portstatus, &portchange);
++ if (ret < 0)
++ return ret;
++
++ /* The port state is unknown until the reset completes. */
++ if (!(portstatus & USB_PORT_STAT_RESET))
++ break;
++
++ /* switch to the long delay after two short delay failures */
++ if (delay_time >= 2 * HUB_SHORT_RESET_TIME)
++ delay = HUB_LONG_RESET_TIME;
++
++ dev_dbg (hub->intfdev,
++ "port %d not %sreset yet, waiting %dms\n",
++ port1, warm ? "warm " : "", delay);
++ }
++
++ if ((portstatus & USB_PORT_STAT_RESET))
++ return -EBUSY;
++
++ if (hub_port_warm_reset_required(hub, portstatus))
++ return -ENOTCONN;
++
++ /* Device went away? */
++ if (!(portstatus & USB_PORT_STAT_CONNECTION))
++ return -ENOTCONN;
++
++ /* bomb out completely if the connection bounced. A USB 3.0
++ * connection may bounce if multiple warm resets were issued,
++ * but the device may have successfully re-connected. Ignore it.
++ */
++ if (!hub_is_superspeed(hub->hdev) &&
++ (portchange & USB_PORT_STAT_C_CONNECTION))
++ return -ENOTCONN;
++
++ if (!(portstatus & USB_PORT_STAT_ENABLE))
++ return -EBUSY;
++
++ if (!udev)
++ return 0;
++
++ if (hub_is_wusb(hub))
++ udev->speed = USB_SPEED_WIRELESS;
++ else if (hub_is_superspeed(hub->hdev))
++ udev->speed = USB_SPEED_SUPER;
++ else if (portstatus & USB_PORT_STAT_HIGH_SPEED)
++ udev->speed = USB_SPEED_HIGH;
++ else if (portstatus & USB_PORT_STAT_LOW_SPEED)
++ udev->speed = USB_SPEED_LOW;
++ else
++ udev->speed = USB_SPEED_FULL;
++ return 0;
++}
++
++static void hub_port_finish_reset(struct usb_hub *hub, int port1,
++ struct usb_device *udev, int *status)
++{
++ switch (*status) {
++ case 0:
++ /* TRSTRCY = 10 ms; plus some extra */
++ msleep(10 + 40);
++ if (udev) {
++ struct usb_hcd *hcd = bus_to_hcd(udev->bus);
++
++ update_devnum(udev, 0);
++ /* The xHC may think the device is already reset,
++ * so ignore the status.
++ */
++ if (hcd->driver->reset_device)
++ hcd->driver->reset_device(hcd, udev);
++ }
++ /* FALL THROUGH */
++ case -ENOTCONN:
++ case -ENODEV:
++ usb_clear_port_feature(hub->hdev,
++ port1, USB_PORT_FEAT_C_RESET);
++ if (hub_is_superspeed(hub->hdev)) {
++ usb_clear_port_feature(hub->hdev, port1,
++ USB_PORT_FEAT_C_BH_PORT_RESET);
++ usb_clear_port_feature(hub->hdev, port1,
++ USB_PORT_FEAT_C_PORT_LINK_STATE);
++ usb_clear_port_feature(hub->hdev, port1,
++ USB_PORT_FEAT_C_CONNECTION);
++ }
++ if (udev)
++ usb_set_device_state(udev, *status
++ ? USB_STATE_NOTATTACHED
++ : USB_STATE_DEFAULT);
++ break;
++ }
++}
++
++/* Handle port reset and port warm(BH) reset (for USB3 protocol ports) */
++static int hub_port_reset(struct usb_hub *hub, int port1,
++ struct usb_device *udev, unsigned int delay, bool warm)
++{
++ int i, status;
++ u16 portchange, portstatus;
++
++ if (!hub_is_superspeed(hub->hdev)) {
++ if (warm) {
++ dev_err(hub->intfdev, "only USB3 hub support "
++ "warm reset\n");
++ return -EINVAL;
++ }
++ /* Block EHCI CF initialization during the port reset.
++ * Some companion controllers don't like it when they mix.
++ */
++ down_read(&ehci_cf_port_reset_rwsem);
++ } else if (!warm) {
++ /*
++ * If the caller hasn't explicitly requested a warm reset,
++ * double check and see if one is needed.
++ */
++ status = hub_port_status(hub, port1,
++ &portstatus, &portchange);
++ if (status < 0)
++ goto done;
++
++ if (hub_port_warm_reset_required(hub, portstatus))
++ warm = true;
++ }
++
++ /* Reset the port */
++ for (i = 0; i < PORT_RESET_TRIES; i++) {
++ status = set_port_feature(hub->hdev, port1, (warm ?
++ USB_PORT_FEAT_BH_PORT_RESET :
++ USB_PORT_FEAT_RESET));
++ if (status == -ENODEV) {
++ ; /* The hub is gone */
++ } else if (status) {
++ dev_err(hub->intfdev,
++ "cannot %sreset port %d (err = %d)\n",
++ warm ? "warm " : "", port1, status);
++ } else {
++ status = hub_port_wait_reset(hub, port1, udev, delay,
++ warm);
++ if (status && status != -ENOTCONN && status != -ENODEV)
++ dev_dbg(hub->intfdev,
++ "port_wait_reset: err = %d\n",
++ status);
++ }
++
++ /* Check for disconnect or reset */
++ if (status == 0 || status == -ENOTCONN || status == -ENODEV) {
++ hub_port_finish_reset(hub, port1, udev, &status);
++
++ if (!hub_is_superspeed(hub->hdev))
++ goto done;
++
++ /*
++ * If a USB 3.0 device migrates from reset to an error
++ * state, re-issue the warm reset.
++ */
++ if (hub_port_status(hub, port1,
++ &portstatus, &portchange) < 0)
++ goto done;
++
++ if (!hub_port_warm_reset_required(hub, portstatus))
++ goto done;
++
++ /*
++ * If the port is in SS.Inactive or Compliance Mode, the
++ * hot or warm reset failed. Try another warm reset.
++ */
++ if (!warm) {
++ dev_dbg(hub->intfdev, "hot reset failed, warm reset port %d\n",
++ port1);
++ warm = true;
++ }
++ }
++
++ dev_dbg (hub->intfdev,
++ "port %d not enabled, trying %sreset again...\n",
++ port1, warm ? "warm " : "");
++ delay = HUB_LONG_RESET_TIME;
++ }
++
++ dev_err (hub->intfdev,
++ "Cannot enable port %i. Maybe the USB cable is bad?\n",
++ port1);
++
++done:
++ if (!hub_is_superspeed(hub->hdev))
++ up_read(&ehci_cf_port_reset_rwsem);
++
++ return status;
++}
++
++/* Check if a port is power on */
++static int port_is_power_on(struct usb_hub *hub, unsigned portstatus)
++{
++ int ret = 0;
++
++ if (hub_is_superspeed(hub->hdev)) {
++ if (portstatus & USB_SS_PORT_STAT_POWER)
++ ret = 1;
++ } else {
++ if (portstatus & USB_PORT_STAT_POWER)
++ ret = 1;
++ }
++
++ return ret;
++}
++
++#ifdef CONFIG_PM
++
++/* Check if a port is suspended(USB2.0 port) or in U3 state(USB3.0 port) */
++static int port_is_suspended(struct usb_hub *hub, unsigned portstatus)
++{
++ int ret = 0;
++
++ if (hub_is_superspeed(hub->hdev)) {
++ if ((portstatus & USB_PORT_STAT_LINK_STATE)
++ == USB_SS_PORT_LS_U3)
++ ret = 1;
++ } else {
++ if (portstatus & USB_PORT_STAT_SUSPEND)
++ ret = 1;
++ }
++
++ return ret;
++}
++
++/* Determine whether the device on a port is ready for a normal resume,
++ * is ready for a reset-resume, or should be disconnected.
++ */
++static int check_port_resume_type(struct usb_device *udev,
++ struct usb_hub *hub, int port1,
++ int status, unsigned portchange, unsigned portstatus)
++{
++ /* Is the device still present? */
++ if (status || port_is_suspended(hub, portstatus) ||
++ !port_is_power_on(hub, portstatus) ||
++ !(portstatus & USB_PORT_STAT_CONNECTION)) {
++ if (status >= 0)
++ status = -ENODEV;
++ }
++
++ /* Can't do a normal resume if the port isn't enabled,
++ * so try a reset-resume instead.
++ */
++ else if (!(portstatus & USB_PORT_STAT_ENABLE) && !udev->reset_resume) {
++ if (udev->persist_enabled)
++ udev->reset_resume = 1;
++ else
++ status = -ENODEV;
++ }
++
++ if (status) {
++ dev_dbg(hub->intfdev,
++ "port %d status %04x.%04x after resume, %d\n",
++ port1, portchange, portstatus, status);
++ } else if (udev->reset_resume) {
++
++ /* Late port handoff can set status-change bits */
++ if (portchange & USB_PORT_STAT_C_CONNECTION)
++ usb_clear_port_feature(hub->hdev, port1,
++ USB_PORT_FEAT_C_CONNECTION);
++ if (portchange & USB_PORT_STAT_C_ENABLE)
++ usb_clear_port_feature(hub->hdev, port1,
++ USB_PORT_FEAT_C_ENABLE);
++ }
++
++ return status;
++}
++
++int usb_disable_ltm(struct usb_device *udev)
++{
++ struct usb_hcd *hcd = bus_to_hcd(udev->bus);
++
++ /* Check if the roothub and device supports LTM. */
++ if (!usb_device_supports_ltm(hcd->self.root_hub) ||
++ !usb_device_supports_ltm(udev))
++ return 0;
++
++ /* Clear Feature LTM Enable can only be sent if the device is
++ * configured.
++ */
++ if (!udev->actconfig)
++ return 0;
++
++ return usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
++ USB_REQ_CLEAR_FEATURE, USB_RECIP_DEVICE,
++ USB_DEVICE_LTM_ENABLE, 0, NULL, 0,
++ USB_CTRL_SET_TIMEOUT);
++}
++EXPORT_SYMBOL_GPL(usb_disable_ltm);
++
++void usb_enable_ltm(struct usb_device *udev)
++{
++ struct usb_hcd *hcd = bus_to_hcd(udev->bus);
++
++ /* Check if the roothub and device supports LTM. */
++ if (!usb_device_supports_ltm(hcd->self.root_hub) ||
++ !usb_device_supports_ltm(udev))
++ return;
++
++ /* Set Feature LTM Enable can only be sent if the device is
++ * configured.
++ */
++ if (!udev->actconfig)
++ return;
++
++ usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
++ USB_REQ_SET_FEATURE, USB_RECIP_DEVICE,
++ USB_DEVICE_LTM_ENABLE, 0, NULL, 0,
++ USB_CTRL_SET_TIMEOUT);
++}
++EXPORT_SYMBOL_GPL(usb_enable_ltm);
++
++/*
++ * usb_enable_remote_wakeup - enable remote wakeup for a device
++ * @udev: target device
++ *
++ * For USB-2 devices: Set the device's remote wakeup feature.
++ *
++ * For USB-3 devices: Assume there's only one function on the device and
++ * enable remote wake for the first interface. FIXME if the interface
++ * association descriptor shows there's more than one function.
++ */
++static int usb_enable_remote_wakeup(struct usb_device *udev)
++{
++ if (udev->speed < USB_SPEED_SUPER)
++ return usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
++ USB_REQ_SET_FEATURE, USB_RECIP_DEVICE,
++ USB_DEVICE_REMOTE_WAKEUP, 0, NULL, 0,
++ USB_CTRL_SET_TIMEOUT);
++ else
++ return usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
++ USB_REQ_SET_FEATURE, USB_RECIP_INTERFACE,
++ USB_INTRF_FUNC_SUSPEND,
++ USB_INTRF_FUNC_SUSPEND_RW |
++ USB_INTRF_FUNC_SUSPEND_LP,
++ NULL, 0, USB_CTRL_SET_TIMEOUT);
++}
++
++/*
++ * usb_disable_remote_wakeup - disable remote wakeup for a device
++ * @udev: target device
++ *
++ * For USB-2 devices: Clear the device's remote wakeup feature.
++ *
++ * For USB-3 devices: Assume there's only one function on the device and
++ * disable remote wake for the first interface. FIXME if the interface
++ * association descriptor shows there's more than one function.
++ */
++static int usb_disable_remote_wakeup(struct usb_device *udev)
++{
++ if (udev->speed < USB_SPEED_SUPER)
++ return usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
++ USB_REQ_CLEAR_FEATURE, USB_RECIP_DEVICE,
++ USB_DEVICE_REMOTE_WAKEUP, 0, NULL, 0,
++ USB_CTRL_SET_TIMEOUT);
++ else
++ return usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
++ USB_REQ_CLEAR_FEATURE, USB_RECIP_INTERFACE,
++ USB_INTRF_FUNC_SUSPEND, 0, NULL, 0,
++ USB_CTRL_SET_TIMEOUT);
++}
++
++/* Count of wakeup-enabled devices at or below udev */
++static unsigned wakeup_enabled_descendants(struct usb_device *udev)
++{
++ struct usb_hub *hub = usb_hub_to_struct_hub(udev);
++
++ return udev->do_remote_wakeup +
++ (hub ? hub->wakeup_enabled_descendants : 0);
++}
++
++/*
++ * usb_port_suspend - suspend a usb device's upstream port
++ * @udev: device that's no longer in active use, not a root hub
++ * Context: must be able to sleep; device not locked; pm locks held
++ *
++ * Suspends a USB device that isn't in active use, conserving power.
++ * Devices may wake out of a suspend, if anything important happens,
++ * using the remote wakeup mechanism. They may also be taken out of
++ * suspend by the host, using usb_port_resume(). It's also routine
++ * to disconnect devices while they are suspended.
++ *
++ * This only affects the USB hardware for a device; its interfaces
++ * (and, for hubs, child devices) must already have been suspended.
++ *
++ * Selective port suspend reduces power; most suspended devices draw
++ * less than 500 uA. It's also used in OTG, along with remote wakeup.
++ * All devices below the suspended port are also suspended.
++ *
++ * Devices leave suspend state when the host wakes them up. Some devices
++ * also support "remote wakeup", where the device can activate the USB
++ * tree above them to deliver data, such as a keypress or packet. In
++ * some cases, this wakes the USB host.
++ *
++ * Suspending OTG devices may trigger HNP, if that's been enabled
++ * between a pair of dual-role devices. That will change roles, such
++ * as from A-Host to A-Peripheral or from B-Host back to B-Peripheral.
++ *
++ * Devices on USB hub ports have only one "suspend" state, corresponding
++ * to ACPI D2, "may cause the device to lose some context".
++ * State transitions include:
++ *
++ * - suspend, resume ... when the VBUS power link stays live
++ * - suspend, disconnect ... VBUS lost
++ *
++ * Once VBUS drop breaks the circuit, the port it's using has to go through
++ * normal re-enumeration procedures, starting with enabling VBUS power.
++ * Other than re-initializing the hub (plug/unplug, except for root hubs),
++ * Linux (2.6) currently has NO mechanisms to initiate that: no khubd
++ * timer, no SRP, no requests through sysfs.
++ *
++ * If Runtime PM isn't enabled or used, non-SuperSpeed devices may not get
++ * suspended until their bus goes into global suspend (i.e., the root
++ * hub is suspended). Nevertheless, we change @udev->state to
++ * USB_STATE_SUSPENDED as this is the device's "logical" state. The actual
++ * upstream port setting is stored in @udev->port_is_suspended.
++ *
++ * Returns 0 on success, else negative errno.
++ */
++int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
++{
++ struct usb_hub *hub = usb_hub_to_struct_hub(udev->parent);
++ struct usb_port *port_dev = hub->ports[udev->portnum - 1];
++ int port1 = udev->portnum;
++ int status;
++ bool really_suspend = true;
++
++ /* enable remote wakeup when appropriate; this lets the device
++ * wake up the upstream hub (including maybe the root hub).
++ *
++ * NOTE: OTG devices may issue remote wakeup (or SRP) even when
++ * we don't explicitly enable it here.
++ */
++ if (udev->do_remote_wakeup) {
++ status = usb_enable_remote_wakeup(udev);
++ if (status) {
++ dev_dbg(&udev->dev, "won't remote wakeup, status %d\n",
++ status);
++ /* bail if autosuspend is requested */
++ if (PMSG_IS_AUTO(msg))
++ goto err_wakeup;
++ }
++ }
++
++ /* disable USB2 hardware LPM */
++ if (udev->usb2_hw_lpm_enabled == 1)
++ usb_set_usb2_hardware_lpm(udev, 0);
++
++ if (usb_disable_ltm(udev)) {
++ dev_err(&udev->dev, "Failed to disable LTM before suspend\n.");
++ status = -ENOMEM;
++ if (PMSG_IS_AUTO(msg))
++ goto err_ltm;
++ }
++ if (usb_unlocked_disable_lpm(udev)) {
++ dev_err(&udev->dev, "Failed to disable LPM before suspend\n.");
++ status = -ENOMEM;
++ if (PMSG_IS_AUTO(msg))
++ goto err_lpm3;
++ }
++
++ /* see 7.1.7.6 */
++ if (hub_is_superspeed(hub->hdev))
++ status = hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_U3);
++
++ /*
++ * For system suspend, we do not need to enable the suspend feature
++ * on individual USB-2 ports. The devices will automatically go
++ * into suspend a few ms after the root hub stops sending packets.
++ * The USB 2.0 spec calls this "global suspend".
++ *
++ * However, many USB hubs have a bug: They don't relay wakeup requests
++ * from a downstream port if the port's suspend feature isn't on.
++ * Therefore we will turn on the suspend feature if udev or any of its
++ * descendants is enabled for remote wakeup.
++ */
++ else if (PMSG_IS_AUTO(msg) || wakeup_enabled_descendants(udev) > 0)
++ status = set_port_feature(hub->hdev, port1,
++ USB_PORT_FEAT_SUSPEND);
++ else {
++ really_suspend = false;
++ status = 0;
++ }
++ if (status) {
++ dev_dbg(hub->intfdev, "can't suspend port %d, status %d\n",
++ port1, status);
++
++ /* Try to enable USB3 LPM and LTM again */
++ usb_unlocked_enable_lpm(udev);
++ err_lpm3:
++ usb_enable_ltm(udev);
++ err_ltm:
++ /* Try to enable USB2 hardware LPM again */
++ if (udev->usb2_hw_lpm_capable == 1)
++ usb_set_usb2_hardware_lpm(udev, 1);
++
++ if (udev->do_remote_wakeup)
++ (void) usb_disable_remote_wakeup(udev);
++ err_wakeup:
++
++ /* System sleep transitions should never fail */
++ if (!PMSG_IS_AUTO(msg))
++ status = 0;
++ } else {
++ dev_dbg(&udev->dev, "usb %ssuspend, wakeup %d\n",
++ (PMSG_IS_AUTO(msg) ? "auto-" : ""),
++ udev->do_remote_wakeup);
++ if (really_suspend) {
++ udev->port_is_suspended = 1;
++
++ /* device has up to 10 msec to fully suspend */
++ msleep(10);
++ }
++ usb_set_device_state(udev, USB_STATE_SUSPENDED);
++ }
++
++ if (status == 0 && !udev->do_remote_wakeup && udev->persist_enabled) {
++ pm_runtime_put_sync(&port_dev->dev);
++ port_dev->did_runtime_put = true;
++ }
++
++ usb_mark_last_busy(hub->hdev);
++ return status;
++}
++
++/*
++ * If the USB "suspend" state is in use (rather than "global suspend"),
++ * many devices will be individually taken out of suspend state using
++ * special "resume" signaling. This routine kicks in shortly after
++ * hardware resume signaling is finished, either because of selective
++ * resume (by host) or remote wakeup (by device) ... now see what changed
++ * in the tree that's rooted at this device.
++ *
++ * If @udev->reset_resume is set then the device is reset before the
++ * status check is done.
++ */
++static int finish_port_resume(struct usb_device *udev)
++{
++ int status = 0;
++ u16 devstatus = 0;
++
++ /* caller owns the udev device lock */
++ dev_dbg(&udev->dev, "%s\n",
++ udev->reset_resume ? "finish reset-resume" : "finish resume");
++
++ /* usb ch9 identifies four variants of SUSPENDED, based on what
++ * state the device resumes to. Linux currently won't see the
++ * first two on the host side; they'd be inside hub_port_init()
++ * during many timeouts, but khubd can't suspend until later.
++ */
++ usb_set_device_state(udev, udev->actconfig
++ ? USB_STATE_CONFIGURED
++ : USB_STATE_ADDRESS);
++
++ /* 10.5.4.5 says not to reset a suspended port if the attached
++ * device is enabled for remote wakeup. Hence the reset
++ * operation is carried out here, after the port has been
++ * resumed.
++ */
++ if (udev->reset_resume)
++ retry_reset_resume:
++ status = usb_reset_and_verify_device(udev);
++
++ /* 10.5.4.5 says be sure devices in the tree are still there.
++ * For now let's assume the device didn't go crazy on resume,
++ * and device drivers will know about any resume quirks.
++ */
++ if (status == 0) {
++ devstatus = 0;
++ status = usb_get_status(udev, USB_RECIP_DEVICE, 0, &devstatus);
++
++ /* If a normal resume failed, try doing a reset-resume */
++ if (status && !udev->reset_resume && udev->persist_enabled) {
++ dev_dbg(&udev->dev, "retry with reset-resume\n");
++ udev->reset_resume = 1;
++ goto retry_reset_resume;
++ }
++ }
++
++ if (status) {
++ dev_dbg(&udev->dev, "gone after usb resume? status %d\n",
++ status);
++ /*
++ * There are a few quirky devices which violate the standard
++ * by claiming to have remote wakeup enabled after a reset,
++ * which crash if the feature is cleared, hence check for
++ * udev->reset_resume
++ */
++ } else if (udev->actconfig && !udev->reset_resume) {
++ if (udev->speed < USB_SPEED_SUPER) {
++ if (devstatus & (1 << USB_DEVICE_REMOTE_WAKEUP))
++ status = usb_disable_remote_wakeup(udev);
++ } else {
++ status = usb_get_status(udev, USB_RECIP_INTERFACE, 0,
++ &devstatus);
++ if (!status && devstatus & (USB_INTRF_STAT_FUNC_RW_CAP
++ | USB_INTRF_STAT_FUNC_RW))
++ status = usb_disable_remote_wakeup(udev);
++ }
++
++ if (status)
++ dev_dbg(&udev->dev,
++ "disable remote wakeup, status %d\n",
++ status);
++ status = 0;
++ }
++ return status;
++}
++
++/*
++ * There are some SS USB devices which take longer time for link training.
++ * XHCI specs 4.19.4 says that when Link training is successful, port
++ * sets CSC bit to 1. So if SW reads port status before successful link
++ * training, then it will not find device to be present.
++ * USB Analyzer log with such buggy devices show that in some cases
++ * device switch on the RX termination after long delay of host enabling
++ * the VBUS. In few other cases it has been seen that device fails to
++ * negotiate link training in first attempt. It has been
++ * reported till now that few devices take as long as 2000 ms to train
++ * the link after host enabling its VBUS and termination. Following
++ * routine implements a 2000 ms timeout for link training. If in a case
++ * link trains before timeout, loop will exit earlier.
++ *
++ * FIXME: If a device was connected before suspend, but was removed
++ * while system was asleep, then the loop in the following routine will
++ * only exit at timeout.
++ *
++ * This routine should only be called when persist is enabled for a SS
++ * device.
++ */
++static int wait_for_ss_port_enable(struct usb_device *udev,
++ struct usb_hub *hub, int *port1,
++ u16 *portchange, u16 *portstatus)
++{
++ int status = 0, delay_ms = 0;
++
++ while (delay_ms < 2000) {
++ if (status || *portstatus & USB_PORT_STAT_CONNECTION)
++ break;
++ msleep(20);
++ delay_ms += 20;
++ status = hub_port_status(hub, *port1, portstatus, portchange);
++ }
++ return status;
++}
++
++/*
++ * usb_port_resume - re-activate a suspended usb device's upstream port
++ * @udev: device to re-activate, not a root hub
++ * Context: must be able to sleep; device not locked; pm locks held
++ *
++ * This will re-activate the suspended device, increasing power usage
++ * while letting drivers communicate again with its endpoints.
++ * USB resume explicitly guarantees that the power session between
++ * the host and the device is the same as it was when the device
++ * suspended.
++ *
++ * If @udev->reset_resume is set then this routine won't check that the
++ * port is still enabled. Furthermore, finish_port_resume() above will
++ * reset @udev. The end result is that a broken power session can be
++ * recovered and @udev will appear to persist across a loss of VBUS power.
++ *
++ * For example, if a host controller doesn't maintain VBUS suspend current
++ * during a system sleep or is reset when the system wakes up, all the USB
++ * power sessions below it will be broken. This is especially troublesome
++ * for mass-storage devices containing mounted filesystems, since the
++ * device will appear to have disconnected and all the memory mappings
++ * to it will be lost. Using the USB_PERSIST facility, the device can be
++ * made to appear as if it had not disconnected.
++ *
++ * This facility can be dangerous. Although usb_reset_and_verify_device() makes
++ * every effort to insure that the same device is present after the
++ * reset as before, it cannot provide a 100% guarantee. Furthermore it's
++ * quite possible for a device to remain unaltered but its media to be
++ * changed. If the user replaces a flash memory card while the system is
++ * asleep, he will have only himself to blame when the filesystem on the
++ * new card is corrupted and the system crashes.
++ *
++ * Returns 0 on success, else negative errno.
++ */
++int usb_port_resume(struct usb_device *udev, pm_message_t msg)
++{
++ struct usb_hub *hub = usb_hub_to_struct_hub(udev->parent);
++ struct usb_port *port_dev = hub->ports[udev->portnum - 1];
++ int port1 = udev->portnum;
++ int status;
++ u16 portchange, portstatus;
++
++ if (port_dev->did_runtime_put) {
++ status = pm_runtime_get_sync(&port_dev->dev);
++ port_dev->did_runtime_put = false;
++ if (status < 0) {
++ dev_dbg(&udev->dev, "can't resume usb port, status %d\n",
++ status);
++ return status;
++ }
++ }
++
++ /* Skip the initial Clear-Suspend step for a remote wakeup */
++ status = hub_port_status(hub, port1, &portstatus, &portchange);
++ if (status == 0 && !port_is_suspended(hub, portstatus))
++ goto SuspendCleared;
++
++ /* dev_dbg(hub->intfdev, "resume port %d\n", port1); */
++
++ set_bit(port1, hub->busy_bits);
++
++ /* see 7.1.7.7; affects power usage, but not budgeting */
++ if (hub_is_superspeed(hub->hdev))
++ status = hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_U0);
++ else
++ status = usb_clear_port_feature(hub->hdev,
++ port1, USB_PORT_FEAT_SUSPEND);
++ if (status) {
++ dev_dbg(hub->intfdev, "can't resume port %d, status %d\n",
++ port1, status);
++ } else {
++ /* drive resume for at least 20 msec */
++ dev_dbg(&udev->dev, "usb %sresume\n",
++ (PMSG_IS_AUTO(msg) ? "auto-" : ""));
++ msleep(25);
++
++ /* Virtual root hubs can trigger on GET_PORT_STATUS to
++ * stop resume signaling. Then finish the resume
++ * sequence.
++ */
++ status = hub_port_status(hub, port1, &portstatus, &portchange);
++
++ /* TRSMRCY = 10 msec */
++ msleep(10);
++ }
++
++ SuspendCleared:
++ if (status == 0) {
++ udev->port_is_suspended = 0;
++ if (hub_is_superspeed(hub->hdev)) {
++ if (portchange & USB_PORT_STAT_C_LINK_STATE)
++ usb_clear_port_feature(hub->hdev, port1,
++ USB_PORT_FEAT_C_PORT_LINK_STATE);
++ } else {
++ if (portchange & USB_PORT_STAT_C_SUSPEND)
++ usb_clear_port_feature(hub->hdev, port1,
++ USB_PORT_FEAT_C_SUSPEND);
++ }
++ }
++
++ clear_bit(port1, hub->busy_bits);
++
++ if (udev->persist_enabled && hub_is_superspeed(hub->hdev))
++ status = wait_for_ss_port_enable(udev, hub, &port1, &portchange,
++ &portstatus);
++
++ status = check_port_resume_type(udev,
++ hub, port1, status, portchange, portstatus);
++ if (status == 0)
++ status = finish_port_resume(udev);
++ if (status < 0) {
++ dev_dbg(&udev->dev, "can't resume, status %d\n", status);
++ hub_port_logical_disconnect(hub, port1);
++ } else {
++ /* Try to enable USB2 hardware LPM */
++ if (udev->usb2_hw_lpm_capable == 1)
++ usb_set_usb2_hardware_lpm(udev, 1);
++
++ /* Try to enable USB3 LTM and LPM */
++ usb_enable_ltm(udev);
++ usb_unlocked_enable_lpm(udev);
++ }
++
++ return status;
++}
++
++#ifdef CONFIG_PM_RUNTIME
++
++/* caller has locked udev */
++int usb_remote_wakeup(struct usb_device *udev)
++{
++ int status = 0;
++
++ if (udev->state == USB_STATE_SUSPENDED) {
++ dev_dbg(&udev->dev, "usb %sresume\n", "wakeup-");
++ status = usb_autoresume_device(udev);
++ if (status == 0) {
++ /* Let the drivers do their thing, then... */
++ usb_autosuspend_device(udev);
++ }
++ }
++ return status;
++}
++
++#endif
++
++static int check_ports_changed(struct usb_hub *hub)
++{
++ int port1;
++
++ for (port1 = 1; port1 <= hub->hdev->maxchild; ++port1) {
++ u16 portstatus, portchange;
++ int status;
++
++ status = hub_port_status(hub, port1, &portstatus, &portchange);
++ if (!status && portchange)
++ return 1;
++ }
++ return 0;
++}
++
++static int hub_suspend(struct usb_interface *intf, pm_message_t msg)
++{
++ struct usb_hub *hub = usb_get_intfdata (intf);
++ struct usb_device *hdev = hub->hdev;
++ unsigned port1;
++ int status;
++
++ /*
++ * Warn if children aren't already suspended.
++ * Also, add up the number of wakeup-enabled descendants.
++ */
++ hub->wakeup_enabled_descendants = 0;
++ for (port1 = 1; port1 <= hdev->maxchild; port1++) {
++ struct usb_device *udev;
++
++ udev = hub->ports[port1 - 1]->child;
++ if (udev && udev->can_submit) {
++ dev_warn(&intf->dev, "port %d not suspended yet\n",
++ port1);
++ if (PMSG_IS_AUTO(msg))
++ return -EBUSY;
++ }
++ if (udev)
++ hub->wakeup_enabled_descendants +=
++ wakeup_enabled_descendants(udev);
++ }
++
++ if (hdev->do_remote_wakeup && hub->quirk_check_port_auto_suspend) {
++ /* check if there are changes pending on hub ports */
++ if (check_ports_changed(hub)) {
++ if (PMSG_IS_AUTO(msg))
++ return -EBUSY;
++ pm_wakeup_event(&hdev->dev, 2000);
++ }
++ }
++
++ if (hub_is_superspeed(hdev) && hdev->do_remote_wakeup) {
++ /* Enable hub to send remote wakeup for all ports. */
++ for (port1 = 1; port1 <= hdev->maxchild; port1++) {
++ status = set_port_feature(hdev,
++ port1 |
++ USB_PORT_FEAT_REMOTE_WAKE_CONNECT |
++ USB_PORT_FEAT_REMOTE_WAKE_DISCONNECT |
++ USB_PORT_FEAT_REMOTE_WAKE_OVER_CURRENT,
++ USB_PORT_FEAT_REMOTE_WAKE_MASK);
++ }
++ }
++
++ dev_dbg(&intf->dev, "%s\n", __func__);
++
++ /* stop khubd and related activity */
++ hub_quiesce(hub, HUB_SUSPEND);
++ return 0;
++}
++
++static int hub_resume(struct usb_interface *intf)
++{
++ struct usb_hub *hub = usb_get_intfdata(intf);
++
++ dev_dbg(&intf->dev, "%s\n", __func__);
++ hub_activate(hub, HUB_RESUME);
++ return 0;
++}
++
++static int hub_reset_resume(struct usb_interface *intf)
++{
++ struct usb_hub *hub = usb_get_intfdata(intf);
++
++ dev_dbg(&intf->dev, "%s\n", __func__);
++ hub_activate(hub, HUB_RESET_RESUME);
++ return 0;
++}
++
++/**
++ * usb_root_hub_lost_power - called by HCD if the root hub lost Vbus power
++ * @rhdev: struct usb_device for the root hub
++ *
++ * The USB host controller driver calls this function when its root hub
++ * is resumed and Vbus power has been interrupted or the controller
++ * has been reset. The routine marks @rhdev as having lost power.
++ * When the hub driver is resumed it will take notice and carry out
++ * power-session recovery for all the "USB-PERSIST"-enabled child devices;
++ * the others will be disconnected.
++ */
++void usb_root_hub_lost_power(struct usb_device *rhdev)
++{
++ dev_warn(&rhdev->dev, "root hub lost power or was reset\n");
++ rhdev->reset_resume = 1;
++}
++EXPORT_SYMBOL_GPL(usb_root_hub_lost_power);
++
++static const char * const usb3_lpm_names[] = {
++ "U0",
++ "U1",
++ "U2",
++ "U3",
++};
++
++/*
++ * Send a Set SEL control transfer to the device, prior to enabling
++ * device-initiated U1 or U2. This lets the device know the exit latencies from
++ * the time the device initiates a U1 or U2 exit, to the time it will receive a
++ * packet from the host.
++ *
++ * This function will fail if the SEL or PEL values for udev are greater than
++ * the maximum allowed values for the link state to be enabled.
++ */
++static int usb_req_set_sel(struct usb_device *udev, enum usb3_link_state state)
++{
++ struct usb_set_sel_req *sel_values;
++ unsigned long long u1_sel;
++ unsigned long long u1_pel;
++ unsigned long long u2_sel;
++ unsigned long long u2_pel;
++ int ret;
++
++ if (udev->state != USB_STATE_CONFIGURED)
++ return 0;
++
++ /* Convert SEL and PEL stored in ns to us */
++ u1_sel = DIV_ROUND_UP(udev->u1_params.sel, 1000);
++ u1_pel = DIV_ROUND_UP(udev->u1_params.pel, 1000);
++ u2_sel = DIV_ROUND_UP(udev->u2_params.sel, 1000);
++ u2_pel = DIV_ROUND_UP(udev->u2_params.pel, 1000);
++
++ /*
++ * Make sure that the calculated SEL and PEL values for the link
++ * state we're enabling aren't bigger than the max SEL/PEL
++ * value that will fit in the SET SEL control transfer.
++ * Otherwise the device would get an incorrect idea of the exit
++ * latency for the link state, and could start a device-initiated
++ * U1/U2 when the exit latencies are too high.
++ */
++ if ((state == USB3_LPM_U1 &&
++ (u1_sel > USB3_LPM_MAX_U1_SEL_PEL ||
++ u1_pel > USB3_LPM_MAX_U1_SEL_PEL)) ||
++ (state == USB3_LPM_U2 &&
++ (u2_sel > USB3_LPM_MAX_U2_SEL_PEL ||
++ u2_pel > USB3_LPM_MAX_U2_SEL_PEL))) {
++ dev_dbg(&udev->dev, "Device-initiated %s disabled due to long SEL %llu us or PEL %llu us\n",
++ usb3_lpm_names[state], u1_sel, u1_pel);
++ return -EINVAL;
++ }
++
++ /*
++ * If we're enabling device-initiated LPM for one link state,
++ * but the other link state has a too high SEL or PEL value,
++ * just set those values to the max in the Set SEL request.
++ */
++ if (u1_sel > USB3_LPM_MAX_U1_SEL_PEL)
++ u1_sel = USB3_LPM_MAX_U1_SEL_PEL;
++
++ if (u1_pel > USB3_LPM_MAX_U1_SEL_PEL)
++ u1_pel = USB3_LPM_MAX_U1_SEL_PEL;
++
++ if (u2_sel > USB3_LPM_MAX_U2_SEL_PEL)
++ u2_sel = USB3_LPM_MAX_U2_SEL_PEL;
++
++ if (u2_pel > USB3_LPM_MAX_U2_SEL_PEL)
++ u2_pel = USB3_LPM_MAX_U2_SEL_PEL;
++
++ /*
++ * usb_enable_lpm() can be called as part of a failed device reset,
++ * which may be initiated by an error path of a mass storage driver.
++ * Therefore, use GFP_NOIO.
++ */
++ sel_values = kmalloc(sizeof *(sel_values), GFP_NOIO);
++ if (!sel_values)
++ return -ENOMEM;
++
++ sel_values->u1_sel = u1_sel;
++ sel_values->u1_pel = u1_pel;
++ sel_values->u2_sel = cpu_to_le16(u2_sel);
++ sel_values->u2_pel = cpu_to_le16(u2_pel);
++
++ ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
++ USB_REQ_SET_SEL,
++ USB_RECIP_DEVICE,
++ 0, 0,
++ sel_values, sizeof *(sel_values),
++ USB_CTRL_SET_TIMEOUT);
++ kfree(sel_values);
++ return ret;
++}
++
++/*
++ * Enable or disable device-initiated U1 or U2 transitions.
++ */
++static int usb_set_device_initiated_lpm(struct usb_device *udev,
++ enum usb3_link_state state, bool enable)
++{
++ int ret;
++ int feature;
++
++ switch (state) {
++ case USB3_LPM_U1:
++ feature = USB_DEVICE_U1_ENABLE;
++ break;
++ case USB3_LPM_U2:
++ feature = USB_DEVICE_U2_ENABLE;
++ break;
++ default:
++ dev_warn(&udev->dev, "%s: Can't %s non-U1 or U2 state.\n",
++ __func__, enable ? "enable" : "disable");
++ return -EINVAL;
++ }
++
++ if (udev->state != USB_STATE_CONFIGURED) {
++ dev_dbg(&udev->dev, "%s: Can't %s %s state "
++ "for unconfigured device.\n",
++ __func__, enable ? "enable" : "disable",
++ usb3_lpm_names[state]);
++ return 0;
++ }
++
++ if (enable) {
++ /*
++ * Now send the control transfer to enable device-initiated LPM
++ * for either U1 or U2.
++ */
++ ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
++ USB_REQ_SET_FEATURE,
++ USB_RECIP_DEVICE,
++ feature,
++ 0, NULL, 0,
++ USB_CTRL_SET_TIMEOUT);
++ } else {
++ ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
++ USB_REQ_CLEAR_FEATURE,
++ USB_RECIP_DEVICE,
++ feature,
++ 0, NULL, 0,
++ USB_CTRL_SET_TIMEOUT);
++ }
++ if (ret < 0) {
++ dev_warn(&udev->dev, "%s of device-initiated %s failed.\n",
++ enable ? "Enable" : "Disable",
++ usb3_lpm_names[state]);
++ return -EBUSY;
++ }
++ return 0;
++}
++
++static int usb_set_lpm_timeout(struct usb_device *udev,
++ enum usb3_link_state state, int timeout)
++{
++ int ret;
++ int feature;
++
++ switch (state) {
++ case USB3_LPM_U1:
++ feature = USB_PORT_FEAT_U1_TIMEOUT;
++ break;
++ case USB3_LPM_U2:
++ feature = USB_PORT_FEAT_U2_TIMEOUT;
++ break;
++ default:
++ dev_warn(&udev->dev, "%s: Can't set timeout for non-U1 or U2 state.\n",
++ __func__);
++ return -EINVAL;
++ }
++
++ if (state == USB3_LPM_U1 && timeout > USB3_LPM_U1_MAX_TIMEOUT &&
++ timeout != USB3_LPM_DEVICE_INITIATED) {
++ dev_warn(&udev->dev, "Failed to set %s timeout to 0x%x, "
++ "which is a reserved value.\n",
++ usb3_lpm_names[state], timeout);
++ return -EINVAL;
++ }
++
++ ret = set_port_feature(udev->parent,
++ USB_PORT_LPM_TIMEOUT(timeout) | udev->portnum,
++ feature);
++ if (ret < 0) {
++ dev_warn(&udev->dev, "Failed to set %s timeout to 0x%x,"
++ "error code %i\n", usb3_lpm_names[state],
++ timeout, ret);
++ return -EBUSY;
++ }
++ if (state == USB3_LPM_U1)
++ udev->u1_params.timeout = timeout;
++ else
++ udev->u2_params.timeout = timeout;
++ return 0;
++}
++
++/*
++ * Enable the hub-initiated U1/U2 idle timeouts, and enable device-initiated
++ * U1/U2 entry.
++ *
++ * We will attempt to enable U1 or U2, but there are no guarantees that the
++ * control transfers to set the hub timeout or enable device-initiated U1/U2
++ * will be successful.
++ *
++ * If we cannot set the parent hub U1/U2 timeout, we attempt to let the xHCI
++ * driver know about it. If that call fails, it should be harmless, and just
++ * take up more slightly more bus bandwidth for unnecessary U1/U2 exit latency.
++ */
++static void usb_enable_link_state(struct usb_hcd *hcd, struct usb_device *udev,
++ enum usb3_link_state state)
++{
++ int timeout, ret;
++ __u8 u1_mel = udev->bos->ss_cap->bU1devExitLat;
++ __le16 u2_mel = udev->bos->ss_cap->bU2DevExitLat;
++
++ /* If the device says it doesn't have *any* exit latency to come out of
++ * U1 or U2, it's probably lying. Assume it doesn't implement that link
++ * state.
++ */
++ if ((state == USB3_LPM_U1 && u1_mel == 0) ||
++ (state == USB3_LPM_U2 && u2_mel == 0))
++ return;
++
++ /*
++ * First, let the device know about the exit latencies
++ * associated with the link state we're about to enable.
++ */
++ ret = usb_req_set_sel(udev, state);
++ if (ret < 0) {
++ dev_warn(&udev->dev, "Set SEL for device-initiated %s failed.\n",
++ usb3_lpm_names[state]);
++ return;
++ }
++
++ /* We allow the host controller to set the U1/U2 timeout internally
++ * first, so that it can change its schedule to account for the
++ * additional latency to send data to a device in a lower power
++ * link state.
++ */
++ timeout = hcd->driver->enable_usb3_lpm_timeout(hcd, udev, state);
++
++ /* xHCI host controller doesn't want to enable this LPM state. */
++ if (timeout == 0)
++ return;
++
++ if (timeout < 0) {
++ dev_warn(&udev->dev, "Could not enable %s link state, "
++ "xHCI error %i.\n", usb3_lpm_names[state],
++ timeout);
++ return;
++ }
++
++ if (usb_set_lpm_timeout(udev, state, timeout))
++ /* If we can't set the parent hub U1/U2 timeout,
++ * device-initiated LPM won't be allowed either, so let the xHCI
++ * host know that this link state won't be enabled.
++ */
++ hcd->driver->disable_usb3_lpm_timeout(hcd, udev, state);
++
++ /* Only a configured device will accept the Set Feature U1/U2_ENABLE */
++ else if (udev->actconfig)
++ usb_set_device_initiated_lpm(udev, state, true);
++
++}
++
++/*
++ * Disable the hub-initiated U1/U2 idle timeouts, and disable device-initiated
++ * U1/U2 entry.
++ *
++ * If this function returns -EBUSY, the parent hub will still allow U1/U2 entry.
++ * If zero is returned, the parent will not allow the link to go into U1/U2.
++ *
++ * If zero is returned, device-initiated U1/U2 entry may still be enabled, but
++ * it won't have an effect on the bus link state because the parent hub will
++ * still disallow device-initiated U1/U2 entry.
++ *
++ * If zero is returned, the xHCI host controller may still think U1/U2 entry is
++ * possible. The result will be slightly more bus bandwidth will be taken up
++ * (to account for U1/U2 exit latency), but it should be harmless.
++ */
++static int usb_disable_link_state(struct usb_hcd *hcd, struct usb_device *udev,
++ enum usb3_link_state state)
++{
++ int feature;
++
++ switch (state) {
++ case USB3_LPM_U1:
++ feature = USB_PORT_FEAT_U1_TIMEOUT;
++ break;
++ case USB3_LPM_U2:
++ feature = USB_PORT_FEAT_U2_TIMEOUT;
++ break;
++ default:
++ dev_warn(&udev->dev, "%s: Can't disable non-U1 or U2 state.\n",
++ __func__);
++ return -EINVAL;
++ }
++
++ if (usb_set_lpm_timeout(udev, state, 0))
++ return -EBUSY;
++
++ usb_set_device_initiated_lpm(udev, state, false);
++
++ if (hcd->driver->disable_usb3_lpm_timeout(hcd, udev, state))
++ dev_warn(&udev->dev, "Could not disable xHCI %s timeout, "
++ "bus schedule bandwidth may be impacted.\n",
++ usb3_lpm_names[state]);
++ return 0;
++}
++
++/*
++ * Disable hub-initiated and device-initiated U1 and U2 entry.
++ * Caller must own the bandwidth_mutex.
++ *
++ * This will call usb_enable_lpm() on failure, which will decrement
++ * lpm_disable_count, and will re-enable LPM if lpm_disable_count reaches zero.
++ */
++int usb_disable_lpm(struct usb_device *udev)
++{
++ struct usb_hcd *hcd;
++
++ if (!udev || !udev->parent ||
++ udev->speed != USB_SPEED_SUPER ||
++ !udev->lpm_capable)
++ return 0;
++
++ hcd = bus_to_hcd(udev->bus);
++ if (!hcd || !hcd->driver->disable_usb3_lpm_timeout)
++ return 0;
++
++ udev->lpm_disable_count++;
++ if ((udev->u1_params.timeout == 0 && udev->u2_params.timeout == 0))
++ return 0;
++
++ /* If LPM is enabled, attempt to disable it. */
++ if (usb_disable_link_state(hcd, udev, USB3_LPM_U1))
++ goto enable_lpm;
++ if (usb_disable_link_state(hcd, udev, USB3_LPM_U2))
++ goto enable_lpm;
++
++ return 0;
++
++enable_lpm:
++ usb_enable_lpm(udev);
++ return -EBUSY;
++}
++EXPORT_SYMBOL_GPL(usb_disable_lpm);
++
++/* Grab the bandwidth_mutex before calling usb_disable_lpm() */
++int usb_unlocked_disable_lpm(struct usb_device *udev)
++{
++ struct usb_hcd *hcd = bus_to_hcd(udev->bus);
++ int ret;
++
++ if (!hcd)
++ return -EINVAL;
++
++ mutex_lock(hcd->bandwidth_mutex);
++ ret = usb_disable_lpm(udev);
++ mutex_unlock(hcd->bandwidth_mutex);
++
++ return ret;
++}
++EXPORT_SYMBOL_GPL(usb_unlocked_disable_lpm);
++
++/*
++ * Attempt to enable device-initiated and hub-initiated U1 and U2 entry. The
++ * xHCI host policy may prevent U1 or U2 from being enabled.
++ *
++ * Other callers may have disabled link PM, so U1 and U2 entry will be disabled
++ * until the lpm_disable_count drops to zero. Caller must own the
++ * bandwidth_mutex.
++ */
++void usb_enable_lpm(struct usb_device *udev)
++{
++ struct usb_hcd *hcd;
++
++ if (!udev || !udev->parent ||
++ udev->speed != USB_SPEED_SUPER ||
++ !udev->lpm_capable)
++ return;
++
++ udev->lpm_disable_count--;
++ hcd = bus_to_hcd(udev->bus);
++ /* Double check that we can both enable and disable LPM.
++ * Device must be configured to accept set feature U1/U2 timeout.
++ */
++ if (!hcd || !hcd->driver->enable_usb3_lpm_timeout ||
++ !hcd->driver->disable_usb3_lpm_timeout)
++ return;
++
++ if (udev->lpm_disable_count > 0)
++ return;
++
++ usb_enable_link_state(hcd, udev, USB3_LPM_U1);
++ usb_enable_link_state(hcd, udev, USB3_LPM_U2);
++}
++EXPORT_SYMBOL_GPL(usb_enable_lpm);
++
++/* Grab the bandwidth_mutex before calling usb_enable_lpm() */
++void usb_unlocked_enable_lpm(struct usb_device *udev)
++{
++ struct usb_hcd *hcd = bus_to_hcd(udev->bus);
++
++ if (!hcd)
++ return;
++
++ mutex_lock(hcd->bandwidth_mutex);
++ usb_enable_lpm(udev);
++ mutex_unlock(hcd->bandwidth_mutex);
++}
++EXPORT_SYMBOL_GPL(usb_unlocked_enable_lpm);
++
++
++#else /* CONFIG_PM */
++
++#define hub_suspend NULL
++#define hub_resume NULL
++#define hub_reset_resume NULL
++
++int usb_disable_lpm(struct usb_device *udev)
++{
++ return 0;
++}
++EXPORT_SYMBOL_GPL(usb_disable_lpm);
++
++void usb_enable_lpm(struct usb_device *udev) { }
++EXPORT_SYMBOL_GPL(usb_enable_lpm);
++
++int usb_unlocked_disable_lpm(struct usb_device *udev)
++{
++ return 0;
++}
++EXPORT_SYMBOL_GPL(usb_unlocked_disable_lpm);
++
++void usb_unlocked_enable_lpm(struct usb_device *udev) { }
++EXPORT_SYMBOL_GPL(usb_unlocked_enable_lpm);
++
++int usb_disable_ltm(struct usb_device *udev)
++{
++ return 0;
++}
++EXPORT_SYMBOL_GPL(usb_disable_ltm);
++
++void usb_enable_ltm(struct usb_device *udev) { }
++EXPORT_SYMBOL_GPL(usb_enable_ltm);
++
++static int hub_handle_remote_wakeup(struct usb_hub *hub, unsigned int port,
++ u16 portstatus, u16 portchange)
++{
++ return 0;
++}
++
++#endif /* CONFIG_PM */
++
++
++/* USB 2.0 spec, 7.1.7.3 / fig 7-29:
++ *
++ * Between connect detection and reset signaling there must be a delay
++ * of 100ms at least for debounce and power-settling. The corresponding
++ * timer shall restart whenever the downstream port detects a disconnect.
++ *
++ * Apparently there are some bluetooth and irda-dongles and a number of
++ * low-speed devices for which this debounce period may last over a second.
++ * Not covered by the spec - but easy to deal with.
++ *
++ * This implementation uses a 1500ms total debounce timeout; if the
++ * connection isn't stable by then it returns -ETIMEDOUT. It checks
++ * every 25ms for transient disconnects. When the port status has been
++ * unchanged for 100ms it returns the port status.
++ */
++int hub_port_debounce(struct usb_hub *hub, int port1, bool must_be_connected)
++{
++ int ret;
++ int total_time, stable_time = 0;
++ u16 portchange, portstatus;
++ unsigned connection = 0xffff;
++
++ for (total_time = 0; ; total_time += HUB_DEBOUNCE_STEP) {
++ ret = hub_port_status(hub, port1, &portstatus, &portchange);
++ if (ret < 0)
++ return ret;
++
++ if (!(portchange & USB_PORT_STAT_C_CONNECTION) &&
++ (portstatus & USB_PORT_STAT_CONNECTION) == connection) {
++ if (!must_be_connected ||
++ (connection == USB_PORT_STAT_CONNECTION))
++ stable_time += HUB_DEBOUNCE_STEP;
++ if (stable_time >= HUB_DEBOUNCE_STABLE)
++ break;
++ } else {
++ stable_time = 0;
++ connection = portstatus & USB_PORT_STAT_CONNECTION;
++ }
++
++ if (portchange & USB_PORT_STAT_C_CONNECTION) {
++ usb_clear_port_feature(hub->hdev, port1,
++ USB_PORT_FEAT_C_CONNECTION);
++ }
++
++ if (total_time >= HUB_DEBOUNCE_TIMEOUT)
++ break;
++ msleep(HUB_DEBOUNCE_STEP);
++ }
++
++ dev_dbg (hub->intfdev,
++ "debounce: port %d: total %dms stable %dms status 0x%x\n",
++ port1, total_time, stable_time, portstatus);
++
++ if (stable_time < HUB_DEBOUNCE_STABLE)
++ return -ETIMEDOUT;
++ return portstatus;
++}
++
++void usb_ep0_reinit(struct usb_device *udev)
++{
++ usb_disable_endpoint(udev, 0 + USB_DIR_IN, true);
++ usb_disable_endpoint(udev, 0 + USB_DIR_OUT, true);
++ usb_enable_endpoint(udev, &udev->ep0, true);
++}
++EXPORT_SYMBOL_GPL(usb_ep0_reinit);
++
++#define usb_sndaddr0pipe() (PIPE_CONTROL << 30)
++#define usb_rcvaddr0pipe() ((PIPE_CONTROL << 30) | USB_DIR_IN)
++
++static int hub_set_address(struct usb_device *udev, int devnum)
++{
++ int retval;
++ struct usb_hcd *hcd = bus_to_hcd(udev->bus);
++
++ /*
++ * The host controller will choose the device address,
++ * instead of the core having chosen it earlier
++ */
++ if (!hcd->driver->address_device && devnum <= 1)
++ return -EINVAL;
++ if (udev->state == USB_STATE_ADDRESS)
++ return 0;
++ if (udev->state != USB_STATE_DEFAULT)
++ return -EINVAL;
++ if (hcd->driver->address_device)
++ retval = hcd->driver->address_device(hcd, udev);
++ else
++ retval = usb_control_msg(udev, usb_sndaddr0pipe(),
++ USB_REQ_SET_ADDRESS, 0, devnum, 0,
++ NULL, 0, USB_CTRL_SET_TIMEOUT);
++ if (retval == 0) {
++ update_devnum(udev, devnum);
++ /* Device now using proper address. */
++ usb_set_device_state(udev, USB_STATE_ADDRESS);
++ usb_ep0_reinit(udev);
++ }
++ return retval;
++}
++
++/*
++ * There are reports of USB 3.0 devices that say they support USB 2.0 Link PM
++ * when they're plugged into a USB 2.0 port, but they don't work when LPM is
++ * enabled.
++ *
++ * Only enable USB 2.0 Link PM if the port is internal (hardwired), or the
++ * device says it supports the new USB 2.0 Link PM errata by setting the BESL
++ * support bit in the BOS descriptor.
++ */
++static void hub_set_initial_usb2_lpm_policy(struct usb_device *udev)
++{
++ int connect_type;
++
++ if (!udev->usb2_hw_lpm_capable)
++ return;
++
++ connect_type = usb_get_hub_port_connect_type(udev->parent,
++ udev->portnum);
++
++ if ((udev->bos->ext_cap->bmAttributes & USB_BESL_SUPPORT) ||
++ connect_type == USB_PORT_CONNECT_TYPE_HARD_WIRED) {
++ udev->usb2_hw_lpm_allowed = 1;
++ usb_set_usb2_hardware_lpm(udev, 1);
++ }
++}
++
++static int hub_enable_device(struct usb_device *udev)
++{
++ struct usb_hcd *hcd = bus_to_hcd(udev->bus);
++
++ if (!hcd->driver->enable_device)
++ return 0;
++ if (udev->state == USB_STATE_ADDRESS)
++ return 0;
++ if (udev->state != USB_STATE_DEFAULT)
++ return -EINVAL;
++
++ return hcd->driver->enable_device(hcd, udev);
++}
++
++/* Reset device, (re)assign address, get device descriptor.
++ * Device connection must be stable, no more debouncing needed.
++ * Returns device in USB_STATE_ADDRESS, except on error.
++ *
++ * If this is called for an already-existing device (as part of
++ * usb_reset_and_verify_device), the caller must own the device lock. For a
++ * newly detected device that is not accessible through any global
++ * pointers, it's not necessary to lock the device.
++ */
++static int
++hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
++ int retry_counter)
++{
++ static DEFINE_MUTEX(usb_address0_mutex);
++
++ struct usb_device *hdev = hub->hdev;
++ struct usb_hcd *hcd = bus_to_hcd(hdev->bus);
++ int i, j, retval;
++ unsigned delay = HUB_SHORT_RESET_TIME;
++ enum usb_device_speed oldspeed = udev->speed;
++ const char *speed;
++ int devnum = udev->devnum;
++
++ /* root hub ports have a slightly longer reset period
++ * (from USB 2.0 spec, section 7.1.7.5)
++ */
++ if (!hdev->parent) {
++ delay = HUB_ROOT_RESET_TIME;
++ if (port1 == hdev->bus->otg_port)
++ hdev->bus->b_hnp_enable = 0;
++ }
++
++ /* Some low speed devices have problems with the quick delay, so */
++ /* be a bit pessimistic with those devices. RHbug #23670 */
++ if (oldspeed == USB_SPEED_LOW)
++ delay = HUB_LONG_RESET_TIME;
++
++ mutex_lock(&usb_address0_mutex);
++
++ /* Reset the device; full speed may morph to high speed */
++ /* FIXME a USB 2.0 device may morph into SuperSpeed on reset. */
++ retval = hub_port_reset(hub, port1, udev, delay, false);
++ if (retval < 0) /* error or disconnect */
++ goto fail;
++ /* success, speed is known */
++
++ retval = -ENODEV;
++
++ if (oldspeed != USB_SPEED_UNKNOWN && oldspeed != udev->speed) {
++ dev_dbg(&udev->dev, "device reset changed speed!\n");
++ goto fail;
++ }
++ oldspeed = udev->speed;
++
++ /* USB 2.0 section 5.5.3 talks about ep0 maxpacket ...
++ * it's fixed size except for full speed devices.
++ * For Wireless USB devices, ep0 max packet is always 512 (tho
++ * reported as 0xff in the device descriptor). WUSB1.0[4.8.1].
++ */
++ switch (udev->speed) {
++ case USB_SPEED_SUPER:
++ case USB_SPEED_WIRELESS: /* fixed at 512 */
++ udev->ep0.desc.wMaxPacketSize = cpu_to_le16(512);
++ break;
++ case USB_SPEED_HIGH: /* fixed at 64 */
++ udev->ep0.desc.wMaxPacketSize = cpu_to_le16(64);
++ break;
++ case USB_SPEED_FULL: /* 8, 16, 32, or 64 */
++ /* to determine the ep0 maxpacket size, try to read
++ * the device descriptor to get bMaxPacketSize0 and
++ * then correct our initial guess.
++ */
++ udev->ep0.desc.wMaxPacketSize = cpu_to_le16(64);
++ break;
++ case USB_SPEED_LOW: /* fixed at 8 */
++ udev->ep0.desc.wMaxPacketSize = cpu_to_le16(8);
++ break;
++ default:
++ goto fail;
++ }
++
++ if (udev->speed == USB_SPEED_WIRELESS)
++ speed = "variable speed Wireless";
++ else
++ speed = usb_speed_string(udev->speed);
++
++ if (udev->speed != USB_SPEED_SUPER)
++ dev_info(&udev->dev,
++ "%s %s USB device number %d using %s\n",
++ (udev->config) ? "reset" : "new", speed,
++ devnum, udev->bus->controller->driver->name);
++
++ /* Set up TT records, if needed */
++ if (hdev->tt) {
++ udev->tt = hdev->tt;
++ udev->ttport = hdev->ttport;
++ } else if (udev->speed != USB_SPEED_HIGH
++ && hdev->speed == USB_SPEED_HIGH) {
++ if (!hub->tt.hub) {
++ dev_err(&udev->dev, "parent hub has no TT\n");
++ retval = -EINVAL;
++ goto fail;
++ }
++ udev->tt = &hub->tt;
++ udev->ttport = port1;
++ }
++
++ /* Why interleave GET_DESCRIPTOR and SET_ADDRESS this way?
++ * Because device hardware and firmware is sometimes buggy in
++ * this area, and this is how Linux has done it for ages.
++ * Change it cautiously.
++ *
++ * NOTE: If use_new_scheme() is true we will start by issuing
++ * a 64-byte GET_DESCRIPTOR request. This is what Windows does,
++ * so it may help with some non-standards-compliant devices.
++ * Otherwise we start with SET_ADDRESS and then try to read the
++ * first 8 bytes of the device descriptor to get the ep0 maxpacket
++ * value.
++ */
++ for (i = 0; i < GET_DESCRIPTOR_TRIES; (++i, msleep(100))) {
++ bool did_new_scheme = false;
++
++ if (use_new_scheme(udev, retry_counter)) {
++ struct usb_device_descriptor *buf;
++ int r = 0;
++
++ did_new_scheme = true;
++ retval = hub_enable_device(udev);
++ if (retval < 0)
++ goto fail;
++
++#define GET_DESCRIPTOR_BUFSIZE 64
++ buf = kmalloc(GET_DESCRIPTOR_BUFSIZE, GFP_NOIO);
++ if (!buf) {
++ retval = -ENOMEM;
++ continue;
++ }
++
++ /* Retry on all errors; some devices are flakey.
++ * 255 is for WUSB devices, we actually need to use
++ * 512 (WUSB1.0[4.8.1]).
++ */
++ for (j = 0; j < 3; ++j) {
++ buf->bMaxPacketSize0 = 0;
++ r = usb_control_msg(udev, usb_rcvaddr0pipe(),
++ USB_REQ_GET_DESCRIPTOR, USB_DIR_IN,
++ USB_DT_DEVICE << 8, 0,
++ buf, GET_DESCRIPTOR_BUFSIZE,
++ initial_descriptor_timeout);
++ switch (buf->bMaxPacketSize0) {
++ case 8: case 16: case 32: case 64: case 255:
++ if (buf->bDescriptorType ==
++ USB_DT_DEVICE) {
++ r = 0;
++ break;
++ }
++ /* FALL THROUGH */
++ default:
++ if (r == 0)
++ r = -EPROTO;
++ break;
++ }
++ if (r == 0)
++ break;
++ }
++ udev->descriptor.bMaxPacketSize0 =
++ buf->bMaxPacketSize0;
++ kfree(buf);
++
++ retval = hub_port_reset(hub, port1, udev, delay, false);
++ if (retval < 0) /* error or disconnect */
++ goto fail;
++ if (oldspeed != udev->speed) {
++ dev_dbg(&udev->dev,
++ "device reset changed speed!\n");
++ retval = -ENODEV;
++ goto fail;
++ }
++ if (r) {
++ if (r != -ENODEV)
++ dev_err(&udev->dev, "device descriptor read/64, error %d\n",
++ r);
++ retval = -EMSGSIZE;
++ continue;
++ }
++#undef GET_DESCRIPTOR_BUFSIZE
++ }
++
++ /*
++ * If device is WUSB, we already assigned an
++ * unauthorized address in the Connect Ack sequence;
++ * authorization will assign the final address.
++ */
++ if (udev->wusb == 0) {
++ for (j = 0; j < SET_ADDRESS_TRIES; ++j) {
++ retval = hub_set_address(udev, devnum);
++ if (retval >= 0)
++ break;
++ msleep(200);
++ }
++ if (retval < 0) {
++ if (retval != -ENODEV)
++ dev_err(&udev->dev, "device not accepting address %d, error %d\n",
++ devnum, retval);
++ goto fail;
++ }
++ if (udev->speed == USB_SPEED_SUPER) {
++ devnum = udev->devnum;
++ dev_info(&udev->dev,
++ "%s SuperSpeed USB device number %d using %s\n",
++ (udev->config) ? "reset" : "new",
++ devnum, udev->bus->controller->driver->name);
++ }
++
++ /* cope with hardware quirkiness:
++ * - let SET_ADDRESS settle, some device hardware wants it
++ * - read ep0 maxpacket even for high and low speed,
++ */
++ msleep(10);
++ /* use_new_scheme() checks the speed which may have
++ * changed since the initial look so we cache the result
++ * in did_new_scheme
++ */
++ if (did_new_scheme)
++ break;
++ }
++
++ retval = usb_get_device_descriptor(udev, 8);
++ if (retval < 8) {
++ if (retval != -ENODEV)
++ dev_err(&udev->dev,
++ "device descriptor read/8, error %d\n",
++ retval);
++ if (retval >= 0)
++ retval = -EMSGSIZE;
++ } else {
++ retval = 0;
++ break;
++ }
++ }
++ if (retval)
++ goto fail;
++
++ if (hcd->phy && !hdev->parent)
++ usb_phy_notify_connect(hcd->phy, udev->speed);
++
++ /*
++ * Some superspeed devices have finished the link training process
++ * and attached to a superspeed hub port, but the device descriptor
++ * got from those devices show they aren't superspeed devices. Warm
++ * reset the port attached by the devices can fix them.
++ */
++ if ((udev->speed == USB_SPEED_SUPER) &&
++ (le16_to_cpu(udev->descriptor.bcdUSB) < 0x0300)) {
++ dev_err(&udev->dev, "got a wrong device descriptor, "
++ "warm reset device\n");
++ hub_port_reset(hub, port1, udev,
++ HUB_BH_RESET_TIME, true);
++ retval = -EINVAL;
++ goto fail;
++ }
++
++ if (udev->descriptor.bMaxPacketSize0 == 0xff ||
++ udev->speed == USB_SPEED_SUPER)
++ i = 512;
++ else
++ i = udev->descriptor.bMaxPacketSize0;
++ if (usb_endpoint_maxp(&udev->ep0.desc) != i) {
++ if (udev->speed == USB_SPEED_LOW ||
++ !(i == 8 || i == 16 || i == 32 || i == 64)) {
++ dev_err(&udev->dev, "Invalid ep0 maxpacket: %d\n", i);
++ retval = -EMSGSIZE;
++ goto fail;
++ }
++ if (udev->speed == USB_SPEED_FULL)
++ dev_dbg(&udev->dev, "ep0 maxpacket = %d\n", i);
++ else
++ dev_warn(&udev->dev, "Using ep0 maxpacket: %d\n", i);
++ udev->ep0.desc.wMaxPacketSize = cpu_to_le16(i);
++ usb_ep0_reinit(udev);
++ }
++
++ retval = usb_get_device_descriptor(udev, USB_DT_DEVICE_SIZE);
++ if (retval < (signed)sizeof(udev->descriptor)) {
++ if (retval != -ENODEV)
++ dev_err(&udev->dev, "device descriptor read/all, error %d\n",
++ retval);
++ if (retval >= 0)
++ retval = -ENOMSG;
++ goto fail;
++ }
++
++ if (udev->wusb == 0 && le16_to_cpu(udev->descriptor.bcdUSB) >= 0x0201) {
++ retval = usb_get_bos_descriptor(udev);
++ if (!retval) {
++ udev->lpm_capable = usb_device_supports_lpm(udev);
++ usb_set_lpm_parameters(udev);
++ }
++ }
++
++ retval = 0;
++ /* notify HCD that we have a device connected and addressed */
++ if (hcd->driver->update_device)
++ hcd->driver->update_device(hcd, udev);
++ hub_set_initial_usb2_lpm_policy(udev);
++fail:
++ if (retval) {
++ hub_port_disable(hub, port1, 0);
++ update_devnum(udev, devnum); /* for disconnect processing */
++ }
++ mutex_unlock(&usb_address0_mutex);
++ return retval;
++}
++
++static void
++check_highspeed (struct usb_hub *hub, struct usb_device *udev, int port1)
++{
++ struct usb_qualifier_descriptor *qual;
++ int status;
++
++ qual = kmalloc (sizeof *qual, GFP_KERNEL);
++ if (qual == NULL)
++ return;
++
++ status = usb_get_descriptor (udev, USB_DT_DEVICE_QUALIFIER, 0,
++ qual, sizeof *qual);
++ if (status == sizeof *qual) {
++ dev_info(&udev->dev, "not running at top speed; "
++ "connect to a high speed hub\n");
++ /* hub LEDs are probably harder to miss than syslog */
++ if (hub->has_indicators) {
++ hub->indicator[port1-1] = INDICATOR_GREEN_BLINK;
++ schedule_delayed_work (&hub->leds, 0);
++ }
++ }
++ kfree(qual);
++}
++
++static unsigned
++hub_power_remaining (struct usb_hub *hub)
++{
++ struct usb_device *hdev = hub->hdev;
++ int remaining;
++ int port1;
++
++ if (!hub->limited_power)
++ return 0;
++
++ remaining = hdev->bus_mA - hub->descriptor->bHubContrCurrent;
++ for (port1 = 1; port1 <= hdev->maxchild; ++port1) {
++ struct usb_device *udev = hub->ports[port1 - 1]->child;
++ int delta;
++ unsigned unit_load;
++
++ if (!udev)
++ continue;
++ if (hub_is_superspeed(udev))
++ unit_load = 150;
++ else
++ unit_load = 100;
++
++ /*
++ * Unconfigured devices may not use more than one unit load,
++ * or 8mA for OTG ports
++ */
++ if (udev->actconfig)
++ delta = usb_get_max_power(udev, udev->actconfig);
++ else if (port1 != udev->bus->otg_port || hdev->parent)
++ delta = unit_load;
++ else
++ delta = 8;
++ if (delta > hub->mA_per_port)
++ dev_warn(&udev->dev,
++ "%dmA is over %umA budget for port %d!\n",
++ delta, hub->mA_per_port, port1);
++ remaining -= delta;
++ }
++ if (remaining < 0) {
++ dev_warn(hub->intfdev, "%dmA over power budget!\n",
++ -remaining);
++ remaining = 0;
++ }
++ return remaining;
++}
++
++/* Handle physical or logical connection change events.
++ * This routine is called when:
++ * a port connection-change occurs;
++ * a port enable-change occurs (often caused by EMI);
++ * usb_reset_and_verify_device() encounters changed descriptors (as from
++ * a firmware download)
++ * caller already locked the hub
++ */
++static void hub_port_connect_change(struct usb_hub *hub, int port1,
++ u16 portstatus, u16 portchange)
++{
++ struct usb_device *hdev = hub->hdev;
++ struct device *hub_dev = hub->intfdev;
++ struct usb_hcd *hcd = bus_to_hcd(hdev->bus);
++ unsigned wHubCharacteristics =
++ le16_to_cpu(hub->descriptor->wHubCharacteristics);
++ struct usb_device *udev;
++ int status, i;
++ unsigned unit_load;
++
++ dev_dbg (hub_dev,
++ "port %d, status %04x, change %04x, %s\n",
++ port1, portstatus, portchange, portspeed(hub, portstatus));
++
++ if (hub->has_indicators) {
++ set_port_led(hub, port1, HUB_LED_AUTO);
++ hub->indicator[port1-1] = INDICATOR_AUTO;
++ }
++
++#ifdef CONFIG_USB_OTG
++ /* during HNP, don't repeat the debounce */
++ if (hdev->bus->is_b_host)
++ portchange &= ~(USB_PORT_STAT_C_CONNECTION |
++ USB_PORT_STAT_C_ENABLE);
++#endif
++
++ /* Try to resuscitate an existing device */
++ udev = hub->ports[port1 - 1]->child;
++ if ((portstatus & USB_PORT_STAT_CONNECTION) && udev &&
++ udev->state != USB_STATE_NOTATTACHED) {
++ usb_lock_device(udev);
++ if (portstatus & USB_PORT_STAT_ENABLE) {
++ status = 0; /* Nothing to do */
++
++#ifdef CONFIG_PM_RUNTIME
++ } else if (udev->state == USB_STATE_SUSPENDED &&
++ udev->persist_enabled) {
++ /* For a suspended device, treat this as a
++ * remote wakeup event.
++ */
++ status = usb_remote_wakeup(udev);
++#endif
++
++ } else {
++ status = -ENODEV; /* Don't resuscitate */
++ }
++ usb_unlock_device(udev);
++
++ if (status == 0) {
++ clear_bit(port1, hub->change_bits);
++ return;
++ }
++ }
++
++ /* Disconnect any existing devices under this port */
++ if (udev) {
++ if (hcd->phy && !hdev->parent)
++ usb_phy_notify_disconnect(hcd->phy, udev->speed);
++ usb_disconnect(&hub->ports[port1 - 1]->child);
++ }
++ clear_bit(port1, hub->change_bits);
++
++ /* We can forget about a "removed" device when there's a physical
++ * disconnect or the connect status changes.
++ */
++ if (!(portstatus & USB_PORT_STAT_CONNECTION) ||
++ (portchange & USB_PORT_STAT_C_CONNECTION))
++ clear_bit(port1, hub->removed_bits);
++
++ if (portchange & (USB_PORT_STAT_C_CONNECTION |
++ USB_PORT_STAT_C_ENABLE)) {
++ status = hub_port_debounce_be_stable(hub, port1);
++ if (status < 0) {
++ if (status != -ENODEV && printk_ratelimit())
++ dev_err(hub_dev, "connect-debounce failed, "
++ "port %d disabled\n", port1);
++ portstatus &= ~USB_PORT_STAT_CONNECTION;
++ } else {
++ portstatus = status;
++ }
++ }
++
++ /* Return now if debouncing failed or nothing is connected or
++ * the device was "removed".
++ */
++ if (!(portstatus & USB_PORT_STAT_CONNECTION) ||
++ test_bit(port1, hub->removed_bits)) {
++
++ /* maybe switch power back on (e.g. root hub was reset) */
++ if ((wHubCharacteristics & HUB_CHAR_LPSM) < 2
++ && !port_is_power_on(hub, portstatus))
++ set_port_feature(hdev, port1, USB_PORT_FEAT_POWER);
++
++ if (portstatus & USB_PORT_STAT_ENABLE)
++ goto done;
++ return;
++ }
++ if (hub_is_superspeed(hub->hdev))
++ unit_load = 150;
++ else
++ unit_load = 100;
++
++ status = 0;
++ for (i = 0; i < SET_CONFIG_TRIES; i++) {
++
++ /* reallocate for each attempt, since references
++ * to the previous one can escape in various ways
++ */
++ udev = usb_alloc_dev(hdev, hdev->bus, port1);
++ if (!udev) {
++ dev_err (hub_dev,
++ "couldn't allocate port %d usb_device\n",
++ port1);
++ goto done;
++ }
++
++ usb_set_device_state(udev, USB_STATE_POWERED);
++ udev->bus_mA = hub->mA_per_port;
++ udev->level = hdev->level + 1;
++ udev->wusb = hub_is_wusb(hub);
++
++ /* Only USB 3.0 devices are connected to SuperSpeed hubs. */
++ if (hub_is_superspeed(hub->hdev))
++ udev->speed = USB_SPEED_SUPER;
++ else
++ udev->speed = USB_SPEED_UNKNOWN;
++
++ choose_devnum(udev);
++ if (udev->devnum <= 0) {
++ status = -ENOTCONN; /* Don't retry */
++ goto loop;
++ }
++
++ /* reset (non-USB 3.0 devices) and get descriptor */
++ status = hub_port_init(hub, udev, port1, i);
++ if (status < 0)
++ goto loop;
++
++ usb_detect_quirks(udev);
++ if (udev->quirks & USB_QUIRK_DELAY_INIT)
++ msleep(1000);
++
++ /* consecutive bus-powered hubs aren't reliable; they can
++ * violate the voltage drop budget. if the new child has
++ * a "powered" LED, users should notice we didn't enable it
++ * (without reading syslog), even without per-port LEDs
++ * on the parent.
++ */
++ if (udev->descriptor.bDeviceClass == USB_CLASS_HUB
++ && udev->bus_mA <= unit_load) {
++ u16 devstat;
++
++ status = usb_get_status(udev, USB_RECIP_DEVICE, 0,
++ &devstat);
++ if (status) {
++ dev_dbg(&udev->dev, "get status %d ?\n", status);
++ goto loop_disable;
++ }
++ if ((devstat & (1 << USB_DEVICE_SELF_POWERED)) == 0) {
++ dev_err(&udev->dev,
++ "can't connect bus-powered hub "
++ "to this port\n");
++ if (hub->has_indicators) {
++ hub->indicator[port1-1] =
++ INDICATOR_AMBER_BLINK;
++ schedule_delayed_work (&hub->leds, 0);
++ }
++ status = -ENOTCONN; /* Don't retry */
++ goto loop_disable;
++ }
++ }
++
++ /* check for devices running slower than they could */
++ if (le16_to_cpu(udev->descriptor.bcdUSB) >= 0x0200
++ && udev->speed == USB_SPEED_FULL
++ && highspeed_hubs != 0)
++ check_highspeed (hub, udev, port1);
++
++ /* Store the parent's children[] pointer. At this point
++ * udev becomes globally accessible, although presumably
++ * no one will look at it until hdev is unlocked.
++ */
++ status = 0;
++
++ /* We mustn't add new devices if the parent hub has
++ * been disconnected; we would race with the
++ * recursively_mark_NOTATTACHED() routine.
++ */
++ spin_lock_irq(&device_state_lock);
++ if (hdev->state == USB_STATE_NOTATTACHED)
++ status = -ENOTCONN;
++ else
++ hub->ports[port1 - 1]->child = udev;
++ spin_unlock_irq(&device_state_lock);
++
++ /* Run it through the hoops (find a driver, etc) */
++ if (!status) {
++ status = usb_new_device(udev);
++ if (status) {
++ spin_lock_irq(&device_state_lock);
++ hub->ports[port1 - 1]->child = NULL;
++ spin_unlock_irq(&device_state_lock);
++ }
++ }
++
++ if (status)
++ goto loop_disable;
++
++ status = hub_power_remaining(hub);
++ if (status)
++ dev_dbg(hub_dev, "%dmA power budget left\n", status);
++
++ return;
++
++loop_disable:
++ hub_port_disable(hub, port1, 1);
++loop:
++ usb_ep0_reinit(udev);
++ release_devnum(udev);
++ hub_free_dev(udev);
++ usb_put_dev(udev);
++ if ((status == -ENOTCONN) || (status == -ENOTSUPP))
++ break;
++ }
++ if (hub->hdev->parent ||
++ !hcd->driver->port_handed_over ||
++ !(hcd->driver->port_handed_over)(hcd, port1)) {
++ if (status != -ENOTCONN && status != -ENODEV)
++ dev_err(hub_dev, "unable to enumerate USB device on port %d\n",
++ port1);
++ }
++
++done:
++ hub_port_disable(hub, port1, 1);
++ if (hcd->driver->relinquish_port && !hub->hdev->parent)
++ hcd->driver->relinquish_port(hcd, port1);
++}
++
++/* Returns 1 if there was a remote wakeup and a connect status change. */
++static int hub_handle_remote_wakeup(struct usb_hub *hub, unsigned int port,
++ u16 portstatus, u16 portchange)
++{
++ struct usb_device *hdev;
++ struct usb_device *udev;
++ int connect_change = 0;
++ int ret;
++
++ hdev = hub->hdev;
++ udev = hub->ports[port - 1]->child;
++ if (!hub_is_superspeed(hdev)) {
++ if (!(portchange & USB_PORT_STAT_C_SUSPEND))
++ return 0;
++ usb_clear_port_feature(hdev, port, USB_PORT_FEAT_C_SUSPEND);
++ } else {
++ if (!udev || udev->state != USB_STATE_SUSPENDED ||
++ (portstatus & USB_PORT_STAT_LINK_STATE) !=
++ USB_SS_PORT_LS_U0)
++ return 0;
++ }
++
++ if (udev) {
++ /* TRSMRCY = 10 msec */
++ msleep(10);
++
++ usb_lock_device(udev);
++ ret = usb_remote_wakeup(udev);
++ usb_unlock_device(udev);
++ if (ret < 0)
++ connect_change = 1;
++ } else {
++ ret = -ENODEV;
++ hub_port_disable(hub, port, 1);
++ }
++ dev_dbg(hub->intfdev, "resume on port %d, status %d\n",
++ port, ret);
++ return connect_change;
++}
++
++static void hub_events(void)
++{
++ struct list_head *tmp;
++ struct usb_device *hdev;
++ struct usb_interface *intf;
++ struct usb_hub *hub;
++ struct device *hub_dev;
++ u16 hubstatus;
++ u16 hubchange;
++ u16 portstatus;
++ u16 portchange;
++ int i, ret;
++ int connect_change, wakeup_change;
++
++ /*
++ * We restart the list every time to avoid a deadlock with
++ * deleting hubs downstream from this one. This should be
++ * safe since we delete the hub from the event list.
++ * Not the most efficient, but avoids deadlocks.
++ */
++ while (1) {
++
++ /* Grab the first entry at the beginning of the list */
++ spin_lock_irq(&hub_event_lock);
++ if (list_empty(&hub_event_list)) {
++ spin_unlock_irq(&hub_event_lock);
++ break;
++ }
++
++ tmp = hub_event_list.next;
++ list_del_init(tmp);
++
++ hub = list_entry(tmp, struct usb_hub, event_list);
++ kref_get(&hub->kref);
++ hdev = hub->hdev;
++ usb_get_dev(hdev);
++ spin_unlock_irq(&hub_event_lock);
++
++ hub_dev = hub->intfdev;
++ intf = to_usb_interface(hub_dev);
++ dev_dbg(hub_dev, "state %d ports %d chg %04x evt %04x\n",
++ hdev->state, hdev->maxchild,
++ /* NOTE: expects max 15 ports... */
++ (u16) hub->change_bits[0],
++ (u16) hub->event_bits[0]);
++
++ /* Lock the device, then check to see if we were
++ * disconnected while waiting for the lock to succeed. */
++ usb_lock_device(hdev);
++ if (unlikely(hub->disconnected))
++ goto loop_disconnected;
++
++ /* If the hub has died, clean up after it */
++ if (hdev->state == USB_STATE_NOTATTACHED) {
++ hub->error = -ENODEV;
++ hub_quiesce(hub, HUB_DISCONNECT);
++ goto loop;
++ }
++
++ /* Autoresume */
++ ret = usb_autopm_get_interface(intf);
++ if (ret) {
++ dev_dbg(hub_dev, "Can't autoresume: %d\n", ret);
++ goto loop;
++ }
++
++ /* If this is an inactive hub, do nothing */
++ if (hub->quiescing)
++ goto loop_autopm;
++
++ if (hub->error) {
++ dev_dbg (hub_dev, "resetting for error %d\n",
++ hub->error);
++
++ ret = usb_reset_device(hdev);
++ if (ret) {
++ dev_dbg (hub_dev,
++ "error resetting hub: %d\n", ret);
++ goto loop_autopm;
++ }
++
++ hub->nerrors = 0;
++ hub->error = 0;
++ }
++
++ /* deal with port status changes */
++ for (i = 1; i <= hdev->maxchild; i++) {
++ if (test_bit(i, hub->busy_bits))
++ continue;
++ connect_change = test_bit(i, hub->change_bits);
++ wakeup_change = test_and_clear_bit(i, hub->wakeup_bits);
++ if (!test_and_clear_bit(i, hub->event_bits) &&
++ !connect_change && !wakeup_change)
++ continue;
++
++ ret = hub_port_status(hub, i,
++ &portstatus, &portchange);
++ if (ret < 0)
++ continue;
++
++ if (portchange & USB_PORT_STAT_C_CONNECTION) {
++ usb_clear_port_feature(hdev, i,
++ USB_PORT_FEAT_C_CONNECTION);
++ connect_change = 1;
++ }
++
++ if (portchange & USB_PORT_STAT_C_ENABLE) {
++ if (!connect_change)
++ dev_dbg (hub_dev,
++ "port %d enable change, "
++ "status %08x\n",
++ i, portstatus);
++ usb_clear_port_feature(hdev, i,
++ USB_PORT_FEAT_C_ENABLE);
++
++ /*
++ * EM interference sometimes causes badly
++ * shielded USB devices to be shutdown by
++ * the hub, this hack enables them again.
++ * Works at least with mouse driver.
++ */
++ if (!(portstatus & USB_PORT_STAT_ENABLE)
++ && !connect_change
++ && hub->ports[i - 1]->child) {
++ dev_err (hub_dev,
++ "port %i "
++ "disabled by hub (EMI?), "
++ "re-enabling...\n",
++ i);
++ connect_change = 1;
++ }
++ }
++
++ if (hub_handle_remote_wakeup(hub, i,
++ portstatus, portchange))
++ connect_change = 1;
++
++ if (portchange & USB_PORT_STAT_C_OVERCURRENT) {
++ u16 status = 0;
++ u16 unused;
++
++ dev_dbg(hub_dev, "over-current change on port "
++ "%d\n", i);
++ usb_clear_port_feature(hdev, i,
++ USB_PORT_FEAT_C_OVER_CURRENT);
++ msleep(100); /* Cool down */
++ hub_power_on(hub, true);
++ hub_port_status(hub, i, &status, &unused);
++ if (status & USB_PORT_STAT_OVERCURRENT)
++ dev_err(hub_dev, "over-current "
++ "condition on port %d\n", i);
++ }
++
++ if (portchange & USB_PORT_STAT_C_RESET) {
++ dev_dbg (hub_dev,
++ "reset change on port %d\n",
++ i);
++ usb_clear_port_feature(hdev, i,
++ USB_PORT_FEAT_C_RESET);
++ }
++ if ((portchange & USB_PORT_STAT_C_BH_RESET) &&
++ hub_is_superspeed(hub->hdev)) {
++ dev_dbg(hub_dev,
++ "warm reset change on port %d\n",
++ i);
++ usb_clear_port_feature(hdev, i,
++ USB_PORT_FEAT_C_BH_PORT_RESET);
++ }
++ if (portchange & USB_PORT_STAT_C_LINK_STATE) {
++ usb_clear_port_feature(hub->hdev, i,
++ USB_PORT_FEAT_C_PORT_LINK_STATE);
++ }
++ if (portchange & USB_PORT_STAT_C_CONFIG_ERROR) {
++ dev_warn(hub_dev,
++ "config error on port %d\n",
++ i);
++ usb_clear_port_feature(hub->hdev, i,
++ USB_PORT_FEAT_C_PORT_CONFIG_ERROR);
++ }
++
++ /* Warm reset a USB3 protocol port if it's in
++ * SS.Inactive state.
++ */
++ if (hub_port_warm_reset_required(hub, portstatus)) {
++ int status;
++ struct usb_device *udev =
++ hub->ports[i - 1]->child;
++
++ dev_dbg(hub_dev, "warm reset port %d\n", i);
++ if (!udev ||
++ !(portstatus & USB_PORT_STAT_CONNECTION) ||
++ udev->state == USB_STATE_NOTATTACHED) {
++ status = hub_port_reset(hub, i,
++ NULL, HUB_BH_RESET_TIME,
++ true);
++ if (status < 0)
++ hub_port_disable(hub, i, 1);
++ } else {
++ usb_lock_device(udev);
++ status = usb_reset_device(udev);
++ usb_unlock_device(udev);
++ connect_change = 0;
++ }
++ }
++
++ if (connect_change)
++ hub_port_connect_change(hub, i,
++ portstatus, portchange);
++ } /* end for i */
++
++ /* deal with hub status changes */
++ if (test_and_clear_bit(0, hub->event_bits) == 0)
++ ; /* do nothing */
++ else if (hub_hub_status(hub, &hubstatus, &hubchange) < 0)
++ dev_err (hub_dev, "get_hub_status failed\n");
++ else {
++ if (hubchange & HUB_CHANGE_LOCAL_POWER) {
++ dev_dbg (hub_dev, "power change\n");
++ clear_hub_feature(hdev, C_HUB_LOCAL_POWER);
++ if (hubstatus & HUB_STATUS_LOCAL_POWER)
++ /* FIXME: Is this always true? */
++ hub->limited_power = 1;
++ else
++ hub->limited_power = 0;
++ }
++ if (hubchange & HUB_CHANGE_OVERCURRENT) {
++ u16 status = 0;
++ u16 unused;
++
++ dev_dbg(hub_dev, "over-current change\n");
++ clear_hub_feature(hdev, C_HUB_OVER_CURRENT);
++ msleep(500); /* Cool down */
++ hub_power_on(hub, true);
++ hub_hub_status(hub, &status, &unused);
++ if (status & HUB_STATUS_OVERCURRENT)
++ dev_err(hub_dev, "over-current "
++ "condition\n");
++ }
++ }
++
++ loop_autopm:
++ /* Balance the usb_autopm_get_interface() above */
++ usb_autopm_put_interface_no_suspend(intf);
++ loop:
++ /* Balance the usb_autopm_get_interface_no_resume() in
++ * kick_khubd() and allow autosuspend.
++ */
++ usb_autopm_put_interface(intf);
++ loop_disconnected:
++ usb_unlock_device(hdev);
++ usb_put_dev(hdev);
++ kref_put(&hub->kref, hub_release);
++
++ } /* end while (1) */
++}
++
++static int hub_thread(void *__unused)
++{
++ /* khubd needs to be freezable to avoid interfering with USB-PERSIST
++ * port handover. Otherwise it might see that a full-speed device
++ * was gone before the EHCI controller had handed its port over to
++ * the companion full-speed controller.
++ */
++ set_freezable();
++
++ do {
++ hub_events();
++ wait_event_freezable(khubd_wait,
++ !list_empty(&hub_event_list) ||
++ kthread_should_stop());
++ } while (!kthread_should_stop() || !list_empty(&hub_event_list));
++
++ pr_debug("%s: khubd exiting\n", usbcore_name);
++ return 0;
++}
++
++static const struct usb_device_id hub_id_table[] = {
++ { .match_flags = USB_DEVICE_ID_MATCH_VENDOR
++ | USB_DEVICE_ID_MATCH_INT_CLASS,
++ .idVendor = USB_VENDOR_GENESYS_LOGIC,
++ .bInterfaceClass = USB_CLASS_HUB,
++ .driver_info = HUB_QUIRK_CHECK_PORT_AUTOSUSPEND},
++ { .match_flags = USB_DEVICE_ID_MATCH_DEV_CLASS,
++ .bDeviceClass = USB_CLASS_HUB},
++ { .match_flags = USB_DEVICE_ID_MATCH_INT_CLASS,
++ .bInterfaceClass = USB_CLASS_HUB},
++ { } /* Terminating entry */
++};
++
++MODULE_DEVICE_TABLE (usb, hub_id_table);
++
++static struct usb_driver hub_driver = {
++ .name = "hub",
++ .probe = hub_probe,
++ .disconnect = hub_disconnect,
++ .suspend = hub_suspend,
++ .resume = hub_resume,
++ .reset_resume = hub_reset_resume,
++ .pre_reset = hub_pre_reset,
++ .post_reset = hub_post_reset,
++ .unlocked_ioctl = hub_ioctl,
++ .id_table = hub_id_table,
++ .supports_autosuspend = 1,
++};
++
++int usb_hub_init(void)
++{
++ if (usb_register(&hub_driver) < 0) {
++ printk(KERN_ERR "%s: can't register hub driver\n",
++ usbcore_name);
++ return -1;
++ }
++
++ khubd_task = kthread_run(hub_thread, NULL, "khubd");
++ if (!IS_ERR(khubd_task))
++ return 0;
++
++ /* Fall through if kernel_thread failed */
++ usb_deregister(&hub_driver);
++ printk(KERN_ERR "%s: can't start khubd\n", usbcore_name);
++
++ return -1;
++}
++
++void usb_hub_cleanup(void)
++{
++ kthread_stop(khubd_task);
++
++ /*
++ * Hub resources are freed for us by usb_deregister. It calls
++ * usb_driver_purge on every device which in turn calls that
++ * devices disconnect function if it is using this driver.
++ * The hub_disconnect function takes care of releasing the
++ * individual hub resources. -greg
++ */
++ usb_deregister(&hub_driver);
++} /* usb_hub_cleanup() */
++
++static int descriptors_changed(struct usb_device *udev,
++ struct usb_device_descriptor *old_device_descriptor,
++ struct usb_host_bos *old_bos)
++{
++ int changed = 0;
++ unsigned index;
++ unsigned serial_len = 0;
++ unsigned len;
++ unsigned old_length;
++ int length;
++ char *buf;
++
++ if (memcmp(&udev->descriptor, old_device_descriptor,
++ sizeof(*old_device_descriptor)) != 0)
++ return 1;
++
++ if ((old_bos && !udev->bos) || (!old_bos && udev->bos))
++ return 1;
++ if (udev->bos) {
++ len = le16_to_cpu(udev->bos->desc->wTotalLength);
++ if (len != le16_to_cpu(old_bos->desc->wTotalLength))
++ return 1;
++ if (memcmp(udev->bos->desc, old_bos->desc, len))
++ return 1;
++ }
++
++ /* Since the idVendor, idProduct, and bcdDevice values in the
++ * device descriptor haven't changed, we will assume the
++ * Manufacturer and Product strings haven't changed either.
++ * But the SerialNumber string could be different (e.g., a
++ * different flash card of the same brand).
++ */
++ if (udev->serial)
++ serial_len = strlen(udev->serial) + 1;
++
++ len = serial_len;
++ for (index = 0; index < udev->descriptor.bNumConfigurations; index++) {
++ old_length = le16_to_cpu(udev->config[index].desc.wTotalLength);
++ len = max(len, old_length);
++ }
++
++ buf = kmalloc(len, GFP_NOIO);
++ if (buf == NULL) {
++ dev_err(&udev->dev, "no mem to re-read configs after reset\n");
++ /* assume the worst */
++ return 1;
++ }
++ for (index = 0; index < udev->descriptor.bNumConfigurations; index++) {
++ old_length = le16_to_cpu(udev->config[index].desc.wTotalLength);
++ length = usb_get_descriptor(udev, USB_DT_CONFIG, index, buf,
++ old_length);
++ if (length != old_length) {
++ dev_dbg(&udev->dev, "config index %d, error %d\n",
++ index, length);
++ changed = 1;
++ break;
++ }
++ if (memcmp (buf, udev->rawdescriptors[index], old_length)
++ != 0) {
++ dev_dbg(&udev->dev, "config index %d changed (#%d)\n",
++ index,
++ ((struct usb_config_descriptor *) buf)->
++ bConfigurationValue);
++ changed = 1;
++ break;
++ }
++ }
++
++ if (!changed && serial_len) {
++ length = usb_string(udev, udev->descriptor.iSerialNumber,
++ buf, serial_len);
++ if (length + 1 != serial_len) {
++ dev_dbg(&udev->dev, "serial string error %d\n",
++ length);
++ changed = 1;
++ } else if (memcmp(buf, udev->serial, length) != 0) {
++ dev_dbg(&udev->dev, "serial string changed\n");
++ changed = 1;
++ }
++ }
++
++ kfree(buf);
++ return changed;
++}
++
++/**
++ * usb_reset_and_verify_device - perform a USB port reset to reinitialize a device
++ * @udev: device to reset (not in SUSPENDED or NOTATTACHED state)
++ *
++ * WARNING - don't use this routine to reset a composite device
++ * (one with multiple interfaces owned by separate drivers)!
++ * Use usb_reset_device() instead.
++ *
++ * Do a port reset, reassign the device's address, and establish its
++ * former operating configuration. If the reset fails, or the device's
++ * descriptors change from their values before the reset, or the original
++ * configuration and altsettings cannot be restored, a flag will be set
++ * telling khubd to pretend the device has been disconnected and then
++ * re-connected. All drivers will be unbound, and the device will be
++ * re-enumerated and probed all over again.
++ *
++ * Return: 0 if the reset succeeded, -ENODEV if the device has been
++ * flagged for logical disconnection, or some other negative error code
++ * if the reset wasn't even attempted.
++ *
++ * Note:
++ * The caller must own the device lock. For example, it's safe to use
++ * this from a driver probe() routine after downloading new firmware.
++ * For calls that might not occur during probe(), drivers should lock
++ * the device using usb_lock_device_for_reset().
++ *
++ * Locking exception: This routine may also be called from within an
++ * autoresume handler. Such usage won't conflict with other tasks
++ * holding the device lock because these tasks should always call
++ * usb_autopm_resume_device(), thereby preventing any unwanted autoresume.
++ */
++static int usb_reset_and_verify_device(struct usb_device *udev)
++{
++ struct usb_device *parent_hdev = udev->parent;
++ struct usb_hub *parent_hub;
++ struct usb_hcd *hcd = bus_to_hcd(udev->bus);
++ struct usb_device_descriptor descriptor = udev->descriptor;
++ struct usb_host_bos *bos;
++ int i, ret = 0;
++ int port1 = udev->portnum;
++
++ if (udev->state == USB_STATE_NOTATTACHED ||
++ udev->state == USB_STATE_SUSPENDED) {
++ dev_dbg(&udev->dev, "device reset not allowed in state %d\n",
++ udev->state);
++ return -EINVAL;
++ }
++
++ if (!parent_hdev) {
++ /* this requires hcd-specific logic; see ohci_restart() */
++ dev_dbg(&udev->dev, "%s for root hub!\n", __func__);
++ return -EISDIR;
++ }
++ parent_hub = usb_hub_to_struct_hub(parent_hdev);
++
++ /* Disable USB2 hardware LPM.
++ * It will be re-enabled by the enumeration process.
++ */
++ if (udev->usb2_hw_lpm_enabled == 1)
++ usb_set_usb2_hardware_lpm(udev, 0);
++
++ bos = udev->bos;
++ udev->bos = NULL;
++
++ /* Disable LPM and LTM while we reset the device and reinstall the alt
++ * settings. Device-initiated LPM settings, and system exit latency
++ * settings are cleared when the device is reset, so we have to set
++ * them up again.
++ */
++ ret = usb_unlocked_disable_lpm(udev);
++ if (ret) {
++ dev_err(&udev->dev, "%s Failed to disable LPM\n.", __func__);
++ goto re_enumerate;
++ }
++ ret = usb_disable_ltm(udev);
++ if (ret) {
++ dev_err(&udev->dev, "%s Failed to disable LTM\n.",
++ __func__);
++ goto re_enumerate;
++ }
++
++ set_bit(port1, parent_hub->busy_bits);
++ for (i = 0; i < SET_CONFIG_TRIES; ++i) {
++
++ /* ep0 maxpacket size may change; let the HCD know about it.
++ * Other endpoints will be handled by re-enumeration. */
++ usb_ep0_reinit(udev);
++ ret = hub_port_init(parent_hub, udev, port1, i);
++ if (ret >= 0 || ret == -ENOTCONN || ret == -ENODEV)
++ break;
++ }
++ clear_bit(port1, parent_hub->busy_bits);
++
++ if (ret < 0)
++ goto re_enumerate;
++
++ /* Device might have changed firmware (DFU or similar) */
++ if (descriptors_changed(udev, &descriptor, bos)) {
++ dev_info(&udev->dev, "device firmware changed\n");
++ udev->descriptor = descriptor; /* for disconnect() calls */
++ goto re_enumerate;
++ }
++
++ /* Restore the device's previous configuration */
++ if (!udev->actconfig)
++ goto done;
++
++ mutex_lock(hcd->bandwidth_mutex);
++ ret = usb_hcd_alloc_bandwidth(udev, udev->actconfig, NULL, NULL);
++ if (ret < 0) {
++ dev_warn(&udev->dev,
++ "Busted HC? Not enough HCD resources for "
++ "old configuration.\n");
++ mutex_unlock(hcd->bandwidth_mutex);
++ goto re_enumerate;
++ }
++ ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
++ USB_REQ_SET_CONFIGURATION, 0,
++ udev->actconfig->desc.bConfigurationValue, 0,
++ NULL, 0, USB_CTRL_SET_TIMEOUT);
++ if (ret < 0) {
++ dev_err(&udev->dev,
++ "can't restore configuration #%d (error=%d)\n",
++ udev->actconfig->desc.bConfigurationValue, ret);
++ mutex_unlock(hcd->bandwidth_mutex);
++ goto re_enumerate;
++ }
++ mutex_unlock(hcd->bandwidth_mutex);
++ usb_set_device_state(udev, USB_STATE_CONFIGURED);
++
++ /* Put interfaces back into the same altsettings as before.
++ * Don't bother to send the Set-Interface request for interfaces
++ * that were already in altsetting 0; besides being unnecessary,
++ * many devices can't handle it. Instead just reset the host-side
++ * endpoint state.
++ */
++ for (i = 0; i < udev->actconfig->desc.bNumInterfaces; i++) {
++ struct usb_host_config *config = udev->actconfig;
++ struct usb_interface *intf = config->interface[i];
++ struct usb_interface_descriptor *desc;
++
++ desc = &intf->cur_altsetting->desc;
++ if (desc->bAlternateSetting == 0) {
++ usb_disable_interface(udev, intf, true);
++ usb_enable_interface(udev, intf, true);
++ ret = 0;
++ } else {
++ /* Let the bandwidth allocation function know that this
++ * device has been reset, and it will have to use
++ * alternate setting 0 as the current alternate setting.
++ */
++ intf->resetting_device = 1;
++ ret = usb_set_interface(udev, desc->bInterfaceNumber,
++ desc->bAlternateSetting);
++ intf->resetting_device = 0;
++ }
++ if (ret < 0) {
++ dev_err(&udev->dev, "failed to restore interface %d "
++ "altsetting %d (error=%d)\n",
++ desc->bInterfaceNumber,
++ desc->bAlternateSetting,
++ ret);
++ goto re_enumerate;
++ }
++ }
++
++done:
++ /* Now that the alt settings are re-installed, enable LTM and LPM. */
++ usb_set_usb2_hardware_lpm(udev, 1);
++ usb_unlocked_enable_lpm(udev);
++ usb_enable_ltm(udev);
++ usb_release_bos_descriptor(udev);
++ udev->bos = bos;
++ return 0;
++
++re_enumerate:
++ /* LPM state doesn't matter when we're about to destroy the device. */
++ hub_port_logical_disconnect(parent_hub, port1);
++ usb_release_bos_descriptor(udev);
++ udev->bos = bos;
++ return -ENODEV;
++}
++
++/**
++ * usb_reset_device - warn interface drivers and perform a USB port reset
++ * @udev: device to reset (not in SUSPENDED or NOTATTACHED state)
++ *
++ * Warns all drivers bound to registered interfaces (using their pre_reset
++ * method), performs the port reset, and then lets the drivers know that
++ * the reset is over (using their post_reset method).
++ *
++ * Return: The same as for usb_reset_and_verify_device().
++ *
++ * Note:
++ * The caller must own the device lock. For example, it's safe to use
++ * this from a driver probe() routine after downloading new firmware.
++ * For calls that might not occur during probe(), drivers should lock
++ * the device using usb_lock_device_for_reset().
++ *
++ * If an interface is currently being probed or disconnected, we assume
++ * its driver knows how to handle resets. For all other interfaces,
++ * if the driver doesn't have pre_reset and post_reset methods then
++ * we attempt to unbind it and rebind afterward.
++ */
++int usb_reset_device(struct usb_device *udev)
++{
++ int ret;
++ int i;
++ unsigned int noio_flag;
++ struct usb_host_config *config = udev->actconfig;
++
++ if (udev->state == USB_STATE_NOTATTACHED ||
++ udev->state == USB_STATE_SUSPENDED) {
++ dev_dbg(&udev->dev, "device reset not allowed in state %d\n",
++ udev->state);
++ return -EINVAL;
++ }
++
++ /*
++ * Don't allocate memory with GFP_KERNEL in current
++ * context to avoid possible deadlock if usb mass
++ * storage interface or usbnet interface(iSCSI case)
++ * is included in current configuration. The easist
++ * approach is to do it for every device reset,
++ * because the device 'memalloc_noio' flag may have
++ * not been set before reseting the usb device.
++ */
++ noio_flag = memalloc_noio_save();
++
++ /* Prevent autosuspend during the reset */
++ usb_autoresume_device(udev);
++
++ if (config) {
++ for (i = 0; i < config->desc.bNumInterfaces; ++i) {
++ struct usb_interface *cintf = config->interface[i];
++ struct usb_driver *drv;
++ int unbind = 0;
++
++ if (cintf->dev.driver) {
++ drv = to_usb_driver(cintf->dev.driver);
++ if (drv->pre_reset && drv->post_reset)
++ unbind = (drv->pre_reset)(cintf);
++ else if (cintf->condition ==
++ USB_INTERFACE_BOUND)
++ unbind = 1;
++ if (unbind)
++ usb_forced_unbind_intf(cintf);
++ }
++ }
++ }
++
++ ret = usb_reset_and_verify_device(udev);
++
++ if (config) {
++ for (i = config->desc.bNumInterfaces - 1; i >= 0; --i) {
++ struct usb_interface *cintf = config->interface[i];
++ struct usb_driver *drv;
++ int rebind = cintf->needs_binding;
++
++ if (!rebind && cintf->dev.driver) {
++ drv = to_usb_driver(cintf->dev.driver);
++ if (drv->post_reset)
++ rebind = (drv->post_reset)(cintf);
++ else if (cintf->condition ==
++ USB_INTERFACE_BOUND)
++ rebind = 1;
++ if (rebind)
++ cintf->needs_binding = 1;
++ }
++ }
++ usb_unbind_and_rebind_marked_interfaces(udev);
++ }
++
++ usb_autosuspend_device(udev);
++ memalloc_noio_restore(noio_flag);
++ return ret;
++}
++EXPORT_SYMBOL_GPL(usb_reset_device);
++
++
++/**
++ * usb_queue_reset_device - Reset a USB device from an atomic context
++ * @iface: USB interface belonging to the device to reset
++ *
++ * This function can be used to reset a USB device from an atomic
++ * context, where usb_reset_device() won't work (as it blocks).
++ *
++ * Doing a reset via this method is functionally equivalent to calling
++ * usb_reset_device(), except for the fact that it is delayed to a
++ * workqueue. This means that any drivers bound to other interfaces
++ * might be unbound, as well as users from usbfs in user space.
++ *
++ * Corner cases:
++ *
++ * - Scheduling two resets at the same time from two different drivers
++ * attached to two different interfaces of the same device is
++ * possible; depending on how the driver attached to each interface
++ * handles ->pre_reset(), the second reset might happen or not.
++ *
++ * - If a driver is unbound and it had a pending reset, the reset will
++ * be cancelled.
++ *
++ * - This function can be called during .probe() or .disconnect()
++ * times. On return from .disconnect(), any pending resets will be
++ * cancelled.
++ *
++ * There is no no need to lock/unlock the @reset_ws as schedule_work()
++ * does its own.
++ *
++ * NOTE: We don't do any reference count tracking because it is not
++ * needed. The lifecycle of the work_struct is tied to the
++ * usb_interface. Before destroying the interface we cancel the
++ * work_struct, so the fact that work_struct is queued and or
++ * running means the interface (and thus, the device) exist and
++ * are referenced.
++ */
++void usb_queue_reset_device(struct usb_interface *iface)
++{
++ schedule_work(&iface->reset_ws);
++}
++EXPORT_SYMBOL_GPL(usb_queue_reset_device);
++
++/**
++ * usb_hub_find_child - Get the pointer of child device
++ * attached to the port which is specified by @port1.
++ * @hdev: USB device belonging to the usb hub
++ * @port1: port num to indicate which port the child device
++ * is attached to.
++ *
++ * USB drivers call this function to get hub's child device
++ * pointer.
++ *
++ * Return: %NULL if input param is invalid and
++ * child's usb_device pointer if non-NULL.
++ */
++struct usb_device *usb_hub_find_child(struct usb_device *hdev,
++ int port1)
++{
++ struct usb_hub *hub = usb_hub_to_struct_hub(hdev);
++
++ if (port1 < 1 || port1 > hdev->maxchild)
++ return NULL;
++ return hub->ports[port1 - 1]->child;
++}
++EXPORT_SYMBOL_GPL(usb_hub_find_child);
++
++/**
++ * usb_set_hub_port_connect_type - set hub port connect type.
++ * @hdev: USB device belonging to the usb hub
++ * @port1: port num of the port
++ * @type: connect type of the port
++ */
++void usb_set_hub_port_connect_type(struct usb_device *hdev, int port1,
++ enum usb_port_connect_type type)
++{
++ struct usb_hub *hub = usb_hub_to_struct_hub(hdev);
++
++ if (hub)
++ hub->ports[port1 - 1]->connect_type = type;
++}
++
++/**
++ * usb_get_hub_port_connect_type - Get the port's connect type
++ * @hdev: USB device belonging to the usb hub
++ * @port1: port num of the port
++ *
++ * Return: The connect type of the port if successful. Or
++ * USB_PORT_CONNECT_TYPE_UNKNOWN if input params are invalid.
++ */
++enum usb_port_connect_type
++usb_get_hub_port_connect_type(struct usb_device *hdev, int port1)
++{
++ struct usb_hub *hub = usb_hub_to_struct_hub(hdev);
++
++ if (!hub)
++ return USB_PORT_CONNECT_TYPE_UNKNOWN;
++
++ return hub->ports[port1 - 1]->connect_type;
++}
++
++void usb_hub_adjust_deviceremovable(struct usb_device *hdev,
++ struct usb_hub_descriptor *desc)
++{
++ enum usb_port_connect_type connect_type;
++ int i;
++
++ if (!hub_is_superspeed(hdev)) {
++ for (i = 1; i <= hdev->maxchild; i++) {
++ connect_type = usb_get_hub_port_connect_type(hdev, i);
++
++ if (connect_type == USB_PORT_CONNECT_TYPE_HARD_WIRED) {
++ u8 mask = 1 << (i%8);
++
++ if (!(desc->u.hs.DeviceRemovable[i/8] & mask)) {
++ dev_dbg(&hdev->dev, "usb port%d's DeviceRemovable is changed to 1 according to platform information.\n",
++ i);
++ desc->u.hs.DeviceRemovable[i/8] |= mask;
++ }
++ }
++ }
++ } else {
++ u16 port_removable = le16_to_cpu(desc->u.ss.DeviceRemovable);
++
++ for (i = 1; i <= hdev->maxchild; i++) {
++ connect_type = usb_get_hub_port_connect_type(hdev, i);
++
++ if (connect_type == USB_PORT_CONNECT_TYPE_HARD_WIRED) {
++ u16 mask = 1 << i;
++
++ if (!(port_removable & mask)) {
++ dev_dbg(&hdev->dev, "usb port%d's DeviceRemovable is changed to 1 according to platform information.\n",
++ i);
++ port_removable |= mask;
++ }
++ }
++ }
++
++ desc->u.ss.DeviceRemovable = cpu_to_le16(port_removable);
++ }
++}
++
++#ifdef CONFIG_ACPI
++/**
++ * usb_get_hub_port_acpi_handle - Get the usb port's acpi handle
++ * @hdev: USB device belonging to the usb hub
++ * @port1: port num of the port
++ *
++ * Return: Port's acpi handle if successful, %NULL if params are
++ * invalid.
++ */
++acpi_handle usb_get_hub_port_acpi_handle(struct usb_device *hdev,
++ int port1)
++{
++ struct usb_hub *hub = usb_hub_to_struct_hub(hdev);
++
++ if (!hub)
++ return NULL;
++
++ return ACPI_HANDLE(&hub->ports[port1 - 1]->dev);
++}
++#endif
+diff -Nur linux-3.14.36/drivers/usb/core/message.c linux-openelec/drivers/usb/core/message.c
+--- linux-3.14.36/drivers/usb/core/message.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/usb/core/message.c 2015-05-06 12:05:42.000000000 -0500
+@@ -178,7 +178,7 @@
+ *
+ * Return:
+ * If successful, 0. Otherwise a negative error number. The number of actual
+- * bytes transferred will be stored in the @actual_length paramater.
++ * bytes transferred will be stored in the @actual_length parameter.
+ */
+ int usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
+ void *data, int len, int *actual_length, int timeout)
+diff -Nur linux-3.14.36/drivers/usb/core/urb.c linux-openelec/drivers/usb/core/urb.c
+--- linux-3.14.36/drivers/usb/core/urb.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/usb/core/urb.c 2015-05-06 12:05:42.000000000 -0500
+@@ -831,7 +831,7 @@
+ *
+ * this allows all outstanding URBs to be unlinked starting
+ * from the back of the queue. This function is asynchronous.
+- * The unlinking is just tiggered. It may happen after this
++ * The unlinking is just triggered. It may happen after this
+ * function has returned.
+ *
+ * This routine should not be called by a driver after its disconnect
+diff -Nur linux-3.14.36/drivers/usb/gadget/f_mass_storage.c linux-openelec/drivers/usb/gadget/f_mass_storage.c
+--- linux-3.14.36/drivers/usb/gadget/f_mass_storage.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/usb/gadget/f_mass_storage.c 2015-05-06 12:05:41.000000000 -0500
+@@ -336,8 +336,15 @@
+
+ struct usb_ep *bulk_in;
+ struct usb_ep *bulk_out;
++#ifdef CONFIG_FSL_UTP
++ void *utp;
++#endif
+ };
+
++#ifdef CONFIG_FSL_UTP
++#include "fsl_updater.h"
++#endif
++
+ static inline int __fsg_is_set(struct fsg_common *common,
+ const char *func, unsigned line)
+ {
+@@ -1131,6 +1138,13 @@
+ }
+ #endif
+
++#ifdef CONFIG_FSL_UTP
++ if (utp_get_sense(common->fsg) == 0) { /* got the sense from the UTP */
++ sd = UTP_CTX(common->fsg)->sd;
++ sdinfo = UTP_CTX(common->fsg)->sdinfo;
++ valid = 0;
++ } else
++#endif
+ if (!curlun) { /* Unsupported LUNs are okay */
+ common->bad_lun_okay = 1;
+ sd = SS_LOGICAL_UNIT_NOT_SUPPORTED;
+@@ -1152,6 +1166,9 @@
+ buf[7] = 18 - 8; /* Additional sense length */
+ buf[12] = ASC(sd);
+ buf[13] = ASCQ(sd);
++#ifdef CONFIG_FSL_UTP
++ put_unaligned_be32(UTP_CTX(common->fsg)->sdinfo_h, &buf[8]);
++#endif
+ return 18;
+ }
+
+@@ -1645,7 +1662,18 @@
+ sd = SS_INVALID_COMMAND;
+ } else if (sd != SS_NO_SENSE) {
+ DBG(common, "sending command-failure status\n");
++#ifdef CONFIG_FSL_UTP
++/*
++ * mfgtool host frequently reset bus during transfer
++ * - the response in csw to request sense will be 1 due to UTP change
++ * some storage information
++ * - host will reset the bus if response to request sense is 1
++ * - change the response to 0 if CONFIG_FSL_UTP is defined
++ */
++ status = US_BULK_STAT_OK;
++#else
+ status = US_BULK_STAT_FAIL;
++#endif
+ VDBG(common, " sense data: SK x%02x, ASC x%02x, ASCQ x%02x;"
+ " info x%x\n",
+ SK(sd), ASC(sd), ASCQ(sd), sdinfo);
+@@ -1836,6 +1864,13 @@
+ common->phase_error = 0;
+ common->short_packet_received = 0;
+
++#ifdef CONFIG_FSL_UTP
++ reply = utp_handle_message(common->fsg, common->cmnd, reply);
++
++ if (reply != -EINVAL)
++ return reply;
++#endif
++
+ down_read(&common->filesem); /* We're using the backing file */
+ switch (common->cmnd[0]) {
+
+@@ -2502,12 +2537,14 @@
+ /* Allow the thread to be frozen */
+ set_freezable();
+
++#ifndef CONFIG_FSL_UTP
+ /*
+ * Arrange for userspace references to be interpreted as kernel
+ * pointers. That way we can pass a kernel pointer to a routine
+ * that expects a __user pointer and it will work okay.
+ */
+ set_fs(get_ds());
++#endif
+
+ /* The main loop */
+ while (common->state != FSG_STATE_TERMINATED) {
+@@ -3096,6 +3133,10 @@
+
+ /*-------------------------------------------------------------------------*/
+
++#ifdef CONFIG_FSL_UTP
++#include "fsl_updater.c"
++#endif
++
+ static int fsg_bind(struct usb_configuration *c, struct usb_function *f)
+ {
+ struct fsg_dev *fsg = fsg_from_func(f);
+@@ -3127,6 +3168,10 @@
+ fsg_intf_desc.bInterfaceNumber = i;
+ fsg->interface_number = i;
+
++#ifdef CONFIG_FSL_UTP
++ utp_init(fsg);
++#endif
++
+ /* Find all the endpoints we will use */
+ ep = usb_ep_autoconfig(gadget, &fsg_fs_bulk_in_desc);
+ if (!ep)
+@@ -3185,6 +3230,10 @@
+ }
+
+ usb_free_all_descriptors(&fsg->function);
++
++#ifdef CONFIG_FSL_UTP
++ utp_exit(fsg);
++#endif
+ }
+
+ static inline struct fsg_lun_opts *to_fsg_lun_opts(struct config_item *item)
+diff -Nur linux-3.14.36/drivers/usb/gadget/fsl_updater.c linux-openelec/drivers/usb/gadget/fsl_updater.c
+--- linux-3.14.36/drivers/usb/gadget/fsl_updater.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/usb/gadget/fsl_updater.c 2015-05-06 12:05:41.000000000 -0500
+@@ -0,0 +1,594 @@
++/*
++ * Freescale UUT driver
++ *
++ * Copyright 2008-2013 Freescale Semiconductor, Inc.
++ * Copyright 2008-2009 Embedded Alley Solutions, Inc All Rights Reserved.
++ */
++
++/*
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++static u64 get_be64(u8 *buf)
++{
++ return ((u64)get_unaligned_be32(buf) << 32) |
++ get_unaligned_be32(buf + 4);
++}
++
++static int utp_init(struct fsg_dev *fsg)
++{
++ init_waitqueue_head(&utp_context.wq);
++ init_waitqueue_head(&utp_context.list_full_wq);
++
++ INIT_LIST_HEAD(&utp_context.read);
++ INIT_LIST_HEAD(&utp_context.write);
++ mutex_init(&utp_context.lock);
++
++ /* the max message is 64KB */
++ utp_context.buffer = vmalloc(0x10000);
++ if (!utp_context.buffer)
++ return -EIO;
++ utp_context.utp_version = 0x1ull;
++ fsg->utp = &utp_context;
++ return misc_register(&utp_dev);
++}
++
++static void utp_exit(struct fsg_dev *fsg)
++{
++ vfree(utp_context.buffer);
++ misc_deregister(&utp_dev);
++}
++
++static struct utp_user_data *utp_user_data_alloc(size_t size)
++{
++ struct utp_user_data *uud;
++
++ uud = vmalloc(size + sizeof(*uud));
++ if (!uud)
++ return uud;
++ memset(uud, 0, size + sizeof(*uud));
++ uud->data.size = size + sizeof(uud->data);
++ INIT_LIST_HEAD(&uud->link);
++ return uud;
++}
++
++static void utp_user_data_free(struct utp_user_data *uud)
++{
++ mutex_lock(&utp_context.lock);
++ list_del(&uud->link);
++ mutex_unlock(&utp_context.lock);
++ vfree(uud);
++}
++
++/* Get the number of element for list */
++static u32 count_list(struct list_head *l)
++{
++ u32 count = 0;
++ struct list_head *tmp;
++
++ mutex_lock(&utp_context.lock);
++ list_for_each(tmp, l) {
++ count++;
++ }
++ mutex_unlock(&utp_context.lock);
++
++ return count;
++}
++/* The routine will not go on if utp_context.queue is empty */
++#define WAIT_ACTIVITY(queue) \
++ wait_event_interruptible(utp_context.wq, !list_empty(&utp_context.queue))
++
++/* Called by userspace program (uuc) */
++static ssize_t utp_file_read(struct file *file,
++ char __user *buf,
++ size_t size,
++ loff_t *off)
++{
++ struct utp_user_data *uud;
++ size_t size_to_put;
++ int free = 0;
++
++ WAIT_ACTIVITY(read);
++
++ mutex_lock(&utp_context.lock);
++ uud = list_first_entry(&utp_context.read, struct utp_user_data, link);
++ mutex_unlock(&utp_context.lock);
++ size_to_put = uud->data.size;
++
++ if (size >= size_to_put)
++ free = !0;
++ if (copy_to_user(buf, &uud->data, size_to_put)) {
++ printk(KERN_INFO "[ %s ] copy error\n", __func__);
++ return -EACCES;
++ }
++ if (free)
++ utp_user_data_free(uud);
++ else {
++ pr_info("sizeof = %d, size = %d\n",
++ sizeof(uud->data),
++ uud->data.size);
++
++ pr_err("Will not free utp_user_data, because buffer size = %d,"
++ "need to put %d\n", size, size_to_put);
++ }
++
++ /*
++ * The user program has already finished data process,
++ * go on getting data from the host
++ */
++ wake_up(&utp_context.list_full_wq);
++
++ return size_to_put;
++}
++
++static ssize_t utp_file_write(struct file *file, const char __user *buf,
++ size_t size, loff_t *off)
++{
++ struct utp_user_data *uud;
++
++ if (size < sizeof(uud->data))
++ return -EINVAL;
++ uud = utp_user_data_alloc(size);
++ if (uud == NULL)
++ return -ENOMEM;
++ if (copy_from_user(&uud->data, buf, size)) {
++ printk(KERN_INFO "[ %s ] copy error!\n", __func__);
++ vfree(uud);
++ return -EACCES;
++ }
++ mutex_lock(&utp_context.lock);
++ list_add_tail(&uud->link, &utp_context.write);
++ /* Go on EXEC routine process */
++ wake_up(&utp_context.wq);
++ mutex_unlock(&utp_context.lock);
++ return size;
++}
++
++/*
++ * uuc should change to use soc bus infrastructure to soc information
++ * /sys/devices/soc0/soc_id
++ * this function can be removed.
++ */
++static long
++utp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
++{
++ int cpu_id = 0;
++
++ switch (cmd) {
++ case UTP_GET_CPU_ID:
++ return put_user(cpu_id, (int __user *)arg);
++ default:
++ return -ENOIOCTLCMD;
++ }
++}
++
++/* Will be called when the host wants to get the sense data */
++static int utp_get_sense(struct fsg_dev *fsg)
++{
++ if (UTP_CTX(fsg)->processed == 0)
++ return -1;
++
++ UTP_CTX(fsg)->processed = 0;
++ return 0;
++}
++
++static int utp_do_read(struct fsg_dev *fsg, void *data, size_t size)
++{
++ struct fsg_buffhd *bh;
++ int rc;
++ u32 amount_left;
++ unsigned int amount;
++
++ /* Get the starting Logical Block Address and check that it's
++ * not too big */
++
++ amount_left = size;
++ if (unlikely(amount_left == 0))
++ return -EIO; /* No default reply*/
++
++ pr_debug("%s: sending %d\n", __func__, size);
++ for (;;) {
++ /* Figure out how much we need to read:
++ * Try to read the remaining amount.
++ * But don't read more than the buffer size.
++ * And don't try to read past the end of the file.
++ * Finally, if we're not at a page boundary, don't read past
++ * the next page.
++ * If this means reading 0 then we were asked to read past
++ * the end of file. */
++ amount = min((unsigned int) amount_left, FSG_BUFLEN);
++
++ /* Wait for the next buffer to become available */
++ bh = fsg->common->next_buffhd_to_fill;
++ while (bh->state != BUF_STATE_EMPTY) {
++ rc = sleep_thread(fsg->common);
++ if (rc)
++ return rc;
++ }
++
++ /* If we were asked to read past the end of file,
++ * end with an empty buffer. */
++ if (amount == 0) {
++ bh->inreq->length = 0;
++ bh->state = BUF_STATE_FULL;
++ break;
++ }
++
++ /* Perform the read */
++ pr_info("Copied to %p, %d bytes started from %d\n",
++ bh->buf, amount, size - amount_left);
++ /* from upt buffer to file_storeage buffer */
++ memcpy(bh->buf, data + size - amount_left, amount);
++ amount_left -= amount;
++ fsg->common->residue -= amount;
++
++ bh->inreq->length = amount;
++ bh->state = BUF_STATE_FULL;
++
++ /* Send this buffer and go read some more */
++ bh->inreq->zero = 0;
++
++ /* USB Physical transfer: Data from device to host */
++ start_transfer(fsg, fsg->bulk_in, bh->inreq,
++ &bh->inreq_busy, &bh->state);
++
++ fsg->common->next_buffhd_to_fill = bh->next;
++
++ if (amount_left <= 0)
++ break;
++ }
++
++ return size - amount_left;
++}
++
++static int utp_do_write(struct fsg_dev *fsg, void *data, size_t size)
++{
++ struct fsg_buffhd *bh;
++ int get_some_more;
++ u32 amount_left_to_req, amount_left_to_write;
++ unsigned int amount;
++ int rc;
++ loff_t offset;
++
++ /* Carry out the file writes */
++ get_some_more = 1;
++ amount_left_to_req = amount_left_to_write = size;
++
++ if (unlikely(amount_left_to_write == 0))
++ return -EIO;
++
++ offset = 0;
++ while (amount_left_to_write > 0) {
++
++ /* Queue a request for more data from the host */
++ bh = fsg->common->next_buffhd_to_fill;
++ if (bh->state == BUF_STATE_EMPTY && get_some_more) {
++
++ /* Figure out how much we want to get:
++ * Try to get the remaining amount.
++ * But don't get more than the buffer size.
++ * And don't try to go past the end of the file.
++ * If we're not at a page boundary,
++ * don't go past the next page.
++ * If this means getting 0, then we were asked
++ * to write past the end of file.
++ * Finally, round down to a block boundary. */
++ amount = min(amount_left_to_req, FSG_BUFLEN);
++
++ if (amount == 0) {
++ get_some_more = 0;
++ /* cry now */
++ continue;
++ }
++
++ /* Get the next buffer */
++ amount_left_to_req -= amount;
++ if (amount_left_to_req == 0)
++ get_some_more = 0;
++
++ /* amount is always divisible by 512, hence by
++ * the bulk-out maxpacket size */
++ bh->outreq->length = bh->bulk_out_intended_length =
++ amount;
++ bh->outreq->short_not_ok = 1;
++ start_transfer(fsg, fsg->bulk_out, bh->outreq,
++ &bh->outreq_busy, &bh->state);
++ fsg->common->next_buffhd_to_fill = bh->next;
++ continue;
++ }
++
++ /* Write the received data to the backing file */
++ bh = fsg->common->next_buffhd_to_drain;
++ if (bh->state == BUF_STATE_EMPTY && !get_some_more)
++ break; /* We stopped early */
++ if (bh->state == BUF_STATE_FULL) {
++ smp_rmb();
++ fsg->common->next_buffhd_to_drain = bh->next;
++ bh->state = BUF_STATE_EMPTY;
++
++ /* Did something go wrong with the transfer? */
++ if (bh->outreq->status != 0)
++ /* cry again, COMMUNICATION_FAILURE */
++ break;
++
++ amount = bh->outreq->actual;
++
++ /* Perform the write */
++ memcpy(data + offset, bh->buf, amount);
++
++ offset += amount;
++ if (signal_pending(current))
++ return -EINTR; /* Interrupted!*/
++ amount_left_to_write -= amount;
++ fsg->common->residue -= amount;
++
++ /* Did the host decide to stop early? */
++ if (bh->outreq->actual != bh->outreq->length) {
++ fsg->common->short_packet_received = 1;
++ break;
++ }
++ continue;
++ }
++
++ /* Wait for something to happen */
++ rc = sleep_thread(fsg->common);
++ if (rc)
++ return rc;
++ }
++
++ return -EIO;
++}
++
++static inline void utp_set_sense(struct fsg_dev *fsg, u16 code, u64 reply)
++{
++ UTP_CTX(fsg)->processed = true;
++ UTP_CTX(fsg)->sdinfo = reply & 0xFFFFFFFF;
++ UTP_CTX(fsg)->sdinfo_h = (reply >> 32) & 0xFFFFFFFF;
++ UTP_CTX(fsg)->sd = (UTP_SENSE_KEY << 16) | code;
++}
++
++static void utp_poll(struct fsg_dev *fsg)
++{
++ struct utp_context *ctx = UTP_CTX(fsg);
++ struct utp_user_data *uud = NULL;
++
++ mutex_lock(&ctx->lock);
++ if (!list_empty(&ctx->write))
++ uud = list_first_entry(&ctx->write, struct utp_user_data, link);
++ mutex_unlock(&ctx->lock);
++
++ if (uud) {
++ if (uud->data.flags & UTP_FLAG_STATUS) {
++ printk(KERN_WARNING "%s: exit with status %d\n",
++ __func__, uud->data.status);
++ UTP_SS_EXIT(fsg, uud->data.status);
++ } else if (uud->data.flags & UTP_FLAG_REPORT_BUSY) {
++ UTP_SS_BUSY(fsg, --ctx->counter);
++ } else {
++ printk("%s: pass returned.\n", __func__);
++ UTP_SS_PASS(fsg);
++ }
++ utp_user_data_free(uud);
++ } else {
++ if (utp_context.cur_state & UTP_FLAG_DATA) {
++ if (count_list(&ctx->read) < 7) {
++ pr_debug("%s: pass returned in POLL stage. \n", __func__);
++ UTP_SS_PASS(fsg);
++ utp_context.cur_state = 0;
++ return;
++ }
++ }
++ UTP_SS_BUSY(fsg, --ctx->counter);
++ }
++}
++
++static int utp_exec(struct fsg_dev *fsg,
++ char *command,
++ int cmdsize,
++ unsigned long long payload)
++{
++ struct utp_user_data *uud = NULL, *uud2r;
++ struct utp_context *ctx = UTP_CTX(fsg);
++
++ ctx->counter = 0xFFFF;
++ uud2r = utp_user_data_alloc(cmdsize + 1);
++ if (!uud2r)
++ return -ENOMEM;
++ uud2r->data.flags = UTP_FLAG_COMMAND;
++ uud2r->data.payload = payload;
++ strncpy(uud2r->data.command, command, cmdsize);
++
++ mutex_lock(&ctx->lock);
++ list_add_tail(&uud2r->link, &ctx->read);
++ mutex_unlock(&ctx->lock);
++ /* wake up the read routine */
++ wake_up(&ctx->wq);
++
++ if (command[0] == '!') /* there will be no response */
++ return 0;
++
++ /*
++ * the user program (uuc) will return utp_message
++ * and add list to write list
++ */
++ WAIT_ACTIVITY(write);
++
++ mutex_lock(&ctx->lock);
++ if (!list_empty(&ctx->write)) {
++ uud = list_first_entry(&ctx->write, struct utp_user_data, link);
++#ifdef DEBUG
++ pr_info("UUD:\n\tFlags = %02X\n", uud->data.flags);
++ if (uud->data.flags & UTP_FLAG_DATA) {
++ pr_info("\tbufsize = %d\n", uud->data.bufsize);
++ print_hex_dump(KERN_DEBUG, "\t", DUMP_PREFIX_NONE,
++ 16, 2, uud->data.data, uud->data.bufsize, true);
++ }
++ if (uud->data.flags & UTP_FLAG_REPORT_BUSY)
++ pr_info("\tBUSY\n");
++#endif
++ }
++ mutex_unlock(&ctx->lock);
++
++ if (uud->data.flags & UTP_FLAG_DATA) {
++ memcpy(ctx->buffer, uud->data.data, uud->data.bufsize);
++ UTP_SS_SIZE(fsg, uud->data.bufsize);
++ } else if (uud->data.flags & UTP_FLAG_REPORT_BUSY) {
++ UTP_SS_BUSY(fsg, ctx->counter);
++ } else if (uud->data.flags & UTP_FLAG_STATUS) {
++ printk(KERN_WARNING "%s: exit with status %d\n", __func__,
++ uud->data.status);
++ UTP_SS_EXIT(fsg, uud->data.status);
++ } else {
++ pr_debug("%s: pass returned in EXEC stage. \n", __func__);
++ UTP_SS_PASS(fsg);
++ }
++ utp_user_data_free(uud);
++ return 0;
++}
++
++static int utp_send_status(struct fsg_dev *fsg)
++{
++ struct fsg_buffhd *bh;
++ u8 status = US_BULK_STAT_OK;
++ struct bulk_cs_wrap *csw;
++ int rc;
++
++ /* Wait for the next buffer to become available */
++ bh = fsg->common->next_buffhd_to_fill;
++ while (bh->state != BUF_STATE_EMPTY) {
++ rc = sleep_thread(fsg->common);
++ if (rc)
++ return rc;
++ }
++
++ if (fsg->common->phase_error) {
++ DBG(fsg, "sending phase-error status\n");
++ status = US_BULK_STAT_PHASE;
++
++ } else if ((UTP_CTX(fsg)->sd & 0xFFFF) != UTP_REPLY_PASS) {
++ status = US_BULK_STAT_FAIL;
++ }
++
++ csw = bh->buf;
++
++ /* Store and send the Bulk-only CSW */
++ csw->Signature = __constant_cpu_to_le32(US_BULK_CS_SIGN);
++ csw->Tag = fsg->common->tag;
++ csw->Residue = cpu_to_le32(fsg->common->residue);
++ csw->Status = status;
++
++ bh->inreq->length = US_BULK_CS_WRAP_LEN;
++ bh->inreq->zero = 0;
++ start_transfer(fsg, fsg->bulk_in, bh->inreq,
++ &bh->inreq_busy, &bh->state);
++ fsg->common->next_buffhd_to_fill = bh->next;
++ return 0;
++}
++
++static int utp_handle_message(struct fsg_dev *fsg,
++ char *cdb_data,
++ int default_reply)
++{
++ struct utp_msg *m = (struct utp_msg *)cdb_data;
++ void *data = NULL;
++ int r;
++ struct utp_user_data *uud2r;
++ unsigned long long param;
++ unsigned long tag;
++
++ if (m->f0 != 0xF0)
++ return default_reply;
++
++ tag = get_unaligned_be32((void *)&m->utp_msg_tag);
++ param = get_be64((void *)&m->param);
++ pr_debug("Type 0x%x, tag 0x%08lx, param %llx\n",
++ m->utp_msg_type, tag, param);
++
++ switch ((enum utp_msg_type)m->utp_msg_type) {
++
++ case UTP_POLL:
++ if (get_be64((void *)&m->param) == 1) {
++ pr_debug("%s: version request\n", __func__);
++ UTP_SS_EXIT(fsg, UTP_CTX(fsg)->utp_version);
++ break;
++ }
++ utp_poll(fsg);
++ break;
++ case UTP_EXEC:
++ pr_debug("%s: EXEC\n", __func__);
++ data = vmalloc(fsg->common->data_size);
++ memset(data, 0, fsg->common->data_size);
++ /* copy data from usb buffer to utp buffer */
++ utp_do_write(fsg, data, fsg->common->data_size);
++ utp_exec(fsg, data, fsg->common->data_size, param);
++ vfree(data);
++ break;
++ case UTP_GET: /* data from device to host */
++ pr_debug("%s: GET, %d bytes\n", __func__,
++ fsg->common->data_size);
++ r = utp_do_read(fsg, UTP_CTX(fsg)->buffer,
++ fsg->common->data_size);
++ UTP_SS_PASS(fsg);
++ break;
++ case UTP_PUT:
++ utp_context.cur_state = UTP_FLAG_DATA;
++ pr_debug("%s: PUT, Received %d bytes\n", __func__, fsg->common->data_size);/* data from host to device */
++ uud2r = utp_user_data_alloc(fsg->common->data_size);
++ if (!uud2r)
++ return -ENOMEM;
++ uud2r->data.bufsize = fsg->common->data_size;
++ uud2r->data.flags = UTP_FLAG_DATA;
++ utp_do_write(fsg, uud2r->data.data, fsg->common->data_size);
++ /* don't know what will be written */
++ mutex_lock(&UTP_CTX(fsg)->lock);
++ list_add_tail(&uud2r->link, &UTP_CTX(fsg)->read);
++ mutex_unlock(&UTP_CTX(fsg)->lock);
++ wake_up(&UTP_CTX(fsg)->wq);
++ /*
++ * Return PASS or FAIL according to uuc's status
++ * Please open it if need to check uuc's status
++ * and use another version uuc
++ */
++#if 0
++ struct utp_user_data *uud = NULL;
++ struct utp_context *ctx;
++ WAIT_ACTIVITY(write);
++ ctx = UTP_CTX(fsg);
++ mutex_lock(&ctx->lock);
++
++ if (!list_empty(&ctx->write))
++ uud = list_first_entry(&ctx->write,
++ struct utp_user_data, link);
++
++ mutex_unlock(&ctx->lock);
++ if (uud) {
++ if (uud->data.flags & UTP_FLAG_STATUS) {
++ printk(KERN_WARNING "%s: exit with status %d\n",
++ __func__, uud->data.status);
++ UTP_SS_EXIT(fsg, uud->data.status);
++ } else {
++ pr_debug("%s: pass\n", __func__);
++ UTP_SS_PASS(fsg);
++ }
++ utp_user_data_free(uud);
++ } else{
++ UTP_SS_PASS(fsg);
++ }
++#endif
++ if (count_list(&UTP_CTX(fsg)->read) < 7) {
++ utp_context.cur_state = 0;
++ UTP_SS_PASS(fsg);
++ } else
++ UTP_SS_BUSY(fsg, UTP_CTX(fsg)->counter);
++
++ break;
++ }
++
++ utp_send_status(fsg);
++ return -1;
++}
+diff -Nur linux-3.14.36/drivers/usb/gadget/fsl_updater.h linux-openelec/drivers/usb/gadget/fsl_updater.h
+--- linux-3.14.36/drivers/usb/gadget/fsl_updater.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/usb/gadget/fsl_updater.h 2015-05-06 12:05:41.000000000 -0500
+@@ -0,0 +1,150 @@
++/*
++ * Freescale UUT driver
++ *
++ * Copyright 2008-2013 Freescale Semiconductor, Inc.
++ * Copyright 2008-2009 Embedded Alley Solutions, Inc All Rights Reserved.
++ */
++
++/*
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++#ifndef __FSL_UPDATER_H
++#define __FSL_UPDATER_H
++
++#include <linux/miscdevice.h>
++#include <linux/list.h>
++#include <linux/vmalloc.h>
++#include <linux/ioctl.h>
++/* #include <mach/hardware.h> */
++
++static int utp_init(struct fsg_dev *fsg);
++static void utp_exit(struct fsg_dev *fsg);
++static ssize_t utp_file_read(struct file *file,
++ char __user *buf,
++ size_t size,
++ loff_t *off);
++
++static ssize_t utp_file_write(struct file *file,
++ const char __user *buf,
++ size_t size,
++ loff_t *off);
++
++static long utp_ioctl(struct file *file,
++ unsigned int cmd, unsigned long arg);
++static struct utp_user_data *utp_user_data_alloc(size_t size);
++static void utp_user_data_free(struct utp_user_data *uud);
++static int utp_get_sense(struct fsg_dev *fsg);
++static int utp_do_read(struct fsg_dev *fsg, void *data, size_t size);
++static int utp_do_write(struct fsg_dev *fsg, void *data, size_t size);
++static inline void utp_set_sense(struct fsg_dev *fsg, u16 code, u64 reply);
++static int utp_handle_message(struct fsg_dev *fsg,
++ char *cdb_data,
++ int default_reply);
++
++#define UTP_REPLY_PASS 0
++#define UTP_REPLY_EXIT 0x8001
++#define UTP_REPLY_BUSY 0x8002
++#define UTP_REPLY_SIZE 0x8003
++#define UTP_SENSE_KEY 9
++
++#define UTP_MINOR 222
++/* MISC_DYNAMIC_MINOR would be better, but... */
++
++#define UTP_COMMAND_SIZE 80
++
++#define UTP_SS_EXIT(fsg, r) utp_set_sense(fsg, UTP_REPLY_EXIT, (u64)r)
++#define UTP_SS_PASS(fsg) utp_set_sense(fsg, UTP_REPLY_PASS, 0)
++#define UTP_SS_BUSY(fsg, r) utp_set_sense(fsg, UTP_REPLY_BUSY, (u64)r)
++#define UTP_SS_SIZE(fsg, r) utp_set_sense(fsg, UTP_REPLY_SIZE, (u64)r)
++
++#define UTP_IOCTL_BASE 'U'
++#define UTP_GET_CPU_ID _IOR(UTP_IOCTL_BASE, 0, int)
++/* the structure of utp message which is mapped to 16-byte SCSI CBW's CDB */
++#pragma pack(1)
++struct utp_msg {
++ u8 f0;
++ u8 utp_msg_type;
++ u32 utp_msg_tag;
++ union {
++ struct {
++ u32 param_lsb;
++ u32 param_msb;
++ };
++ u64 param;
++ };
++};
++
++enum utp_msg_type {
++ UTP_POLL = 0,
++ UTP_EXEC,
++ UTP_GET,
++ UTP_PUT,
++};
++
++static struct utp_context {
++ wait_queue_head_t wq;
++ wait_queue_head_t list_full_wq;
++ struct mutex lock;
++ struct list_head read;
++ struct list_head write;
++ u32 sd, sdinfo, sdinfo_h; /* sense data */
++ int processed;
++ u8 *buffer;
++ u32 counter;
++ u64 utp_version;
++ u32 cur_state;
++} utp_context;
++
++static const struct file_operations utp_fops = {
++ .open = nonseekable_open,
++ .read = utp_file_read,
++ .write = utp_file_write,
++ /* .ioctl = utp_ioctl, */
++ .unlocked_ioctl = utp_ioctl,
++};
++
++static struct miscdevice utp_dev = {
++ .minor = UTP_MINOR,
++ .name = "utp",
++ .fops = &utp_fops,
++};
++
++#define UTP_FLAG_COMMAND 0x00000001
++#define UTP_FLAG_DATA 0x00000002
++#define UTP_FLAG_STATUS 0x00000004
++#define UTP_FLAG_REPORT_BUSY 0x10000000
++struct utp_message {
++ u32 flags;
++ size_t size;
++ union {
++ struct {
++ u64 payload;
++ char command[1];
++ };
++ struct {
++ size_t bufsize;
++ u8 data[1];
++ };
++ u32 status;
++ };
++};
++
++struct utp_user_data {
++ struct list_head link;
++ struct utp_message data;
++};
++#pragma pack()
++
++static inline struct utp_context *UTP_CTX(struct fsg_dev *fsg)
++{
++ return (struct utp_context *)fsg->utp;
++}
++
++#endif /* __FSL_UPDATER_H */
++
+diff -Nur linux-3.14.36/drivers/usb/gadget/Kconfig linux-openelec/drivers/usb/gadget/Kconfig
+--- linux-3.14.36/drivers/usb/gadget/Kconfig 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/usb/gadget/Kconfig 2015-07-24 18:03:28.752842002 -0500
+@@ -953,6 +953,12 @@
+ Say "y" to link the driver statically, or "m" to build
+ a dynamically linked module called "g_mass_storage".
+
++config FSL_UTP
++ bool "UTP over Storage Gadget"
++ depends on USB_MASS_STORAGE
++ help
++ Freescale's extension to MSC protocol
++
+ config USB_GADGET_TARGET
+ tristate "USB Gadget Target Fabric Module"
+ depends on TARGET_CORE
+diff -Nur linux-3.14.36/drivers/usb/gadget/mass_storage.c linux-openelec/drivers/usb/gadget/mass_storage.c
+--- linux-3.14.36/drivers/usb/gadget/mass_storage.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/usb/gadget/mass_storage.c 2015-05-06 12:05:41.000000000 -0500
+@@ -266,7 +266,7 @@
+ {
+ return usb_composite_probe(&msg_driver);
+ }
+-module_init(msg_init);
++late_initcall(msg_init);
+
+ static void msg_cleanup(void)
+ {
+diff -Nur linux-3.14.36/drivers/usb/host/ehci-h20ahb.c linux-openelec/drivers/usb/host/ehci-h20ahb.c
+--- linux-3.14.36/drivers/usb/host/ehci-h20ahb.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/usb/host/ehci-h20ahb.c 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,341 @@
++/*
++ * Copyright (C) 2007-2013 Texas Instruments, Inc.
++ * Author: Vikram Pandita <vikram.pandita@ti.com>
++ * Author: Anand Gadiyar <gadiyar@ti.com>
++ * Author: Keshava Munegowda <keshava_mgowda@ti.com>
++ * Author: Roger Quadros <rogerq@ti.com>
++ *
++ * Copyright (C) 2009 Nokia Corporation
++ * Contact: Felipe Balbi <felipe.balbi@nokia.com>
++ *
++ * Based on ehci-omap.c - driver for USBHOST on OMAP3/4 processors
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file COPYING in the main directory of this archive
++ * for more details.
++ *
++ */
++
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/io.h>
++#include <linux/platform_device.h>
++#include <linux/slab.h>
++#include <linux/usb/ulpi.h>
++#include <linux/pm_runtime.h>
++#include <linux/gpio.h>
++#include <linux/clk.h>
++#include <linux/usb.h>
++#include <linux/usb/hcd.h>
++#include <linux/of.h>
++#include <linux/dma-mapping.h>
++
++#include "ehci.h"
++
++#define H20AHB_HS_USB_PORTS 1
++
++/* EHCI Synopsys-specific Register Set */
++#define EHCI_INSNREG04 (0xA0)
++#define EHCI_INSNREG04_DISABLE_UNSUSPEND (1 << 5)
++#define EHCI_INSNREG05_ULPI (0xA4)
++#define EHCI_INSNREG05_ULPI_CONTROL_SHIFT 31
++#define EHCI_INSNREG05_ULPI_PORTSEL_SHIFT 24
++#define EHCI_INSNREG05_ULPI_OPSEL_SHIFT 22
++#define EHCI_INSNREG05_ULPI_REGADD_SHIFT 16
++#define EHCI_INSNREG05_ULPI_EXTREGADD_SHIFT 8
++#define EHCI_INSNREG05_ULPI_WRDATA_SHIFT 0
++
++#define DRIVER_DESC "H20AHB-EHCI Host Controller driver"
++
++static const char hcd_name[] = "ehci-h20ahb";
++
++/*-------------------------------------------------------------------------*/
++
++struct h20ahb_hcd {
++ struct usb_phy *phy[H20AHB_HS_USB_PORTS]; /* one PHY for each port */
++ int nports;
++};
++
++static inline void ehci_write(void __iomem *base, u32 reg, u32 val)
++{
++ __raw_writel(val, base + reg);
++}
++
++static inline u32 ehci_read(void __iomem *base, u32 reg)
++{
++ return __raw_readl(base + reg);
++}
++
++/* configure so an HC device and id are always provided */
++/* always called with process context; sleeping is OK */
++
++static struct hc_driver __read_mostly ehci_h20ahb_hc_driver;
++
++static const struct ehci_driver_overrides ehci_h20ahb_overrides __initdata = {
++ .extra_priv_size = sizeof(struct h20ahb_hcd),
++};
++
++static int ehci_h20ahb_phy_read(struct usb_phy *x, u32 reg)
++{
++ u32 val = (1 << EHCI_INSNREG05_ULPI_CONTROL_SHIFT) |
++ (1 << EHCI_INSNREG05_ULPI_PORTSEL_SHIFT) |
++ (3 << EHCI_INSNREG05_ULPI_OPSEL_SHIFT) |
++ (reg << EHCI_INSNREG05_ULPI_REGADD_SHIFT);
++ ehci_write(x->io_priv, 0, val);
++ while ((val = ehci_read(x->io_priv, 0)) &
++ (1 << EHCI_INSNREG05_ULPI_CONTROL_SHIFT));
++ return val & 0xff;
++}
++
++static int ehci_h20ahb_phy_write(struct usb_phy *x, u32 val, u32 reg)
++{
++ u32 v = (1 << EHCI_INSNREG05_ULPI_CONTROL_SHIFT) |
++ (1 << EHCI_INSNREG05_ULPI_PORTSEL_SHIFT) |
++ (2 << EHCI_INSNREG05_ULPI_OPSEL_SHIFT) |
++ (reg << EHCI_INSNREG05_ULPI_REGADD_SHIFT) |
++ (val & 0xff);
++ ehci_write(x->io_priv, 0, v);
++ while ((v = ehci_read(x->io_priv, 0)) &
++ (1 << EHCI_INSNREG05_ULPI_CONTROL_SHIFT));
++ return 0;
++}
++
++static struct usb_phy_io_ops ehci_h20ahb_phy_io_ops = {
++ .read = ehci_h20ahb_phy_read,
++ .write = ehci_h20ahb_phy_write,
++};
++
++
++/**
++ * ehci_hcd_h20ahb_probe - initialize Synopsis-based HCDs
++ *
++ * Allocates basic resources for this USB host controller, and
++ * then invokes the start() method for the HCD associated with it
++ * through the hotplug entry's driver_data.
++ */
++static int ehci_hcd_h20ahb_probe(struct platform_device *pdev)
++{
++ struct device *dev = &pdev->dev;
++ struct resource *res;
++ struct usb_hcd *hcd;
++ void __iomem *regs;
++ int ret;
++ int irq;
++ int i;
++ struct h20ahb_hcd *h20ahb;
++
++ if (usb_disabled())
++ return -ENODEV;
++
++ /* if (!dev->parent) {
++ dev_err(dev, "Missing parent device\n");
++ return -ENODEV;
++ }*/
++
++ /* For DT boot, get platform data from parent. i.e. usbhshost */
++ /*if (dev->of_node) {
++ pdata = dev_get_platdata(dev->parent);
++ dev->platform_data = pdata;
++ }
++
++ if (!pdata) {
++ dev_err(dev, "Missing platform data\n");
++ return -ENODEV;
++ }*/
++
++ irq = platform_get_irq(pdev, 0);
++ if (irq < 0) {
++ dev_err(dev, "EHCI irq failed\n");
++ return -ENODEV;
++ }
++
++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++ regs = devm_ioremap_resource(dev, res);
++ if (IS_ERR(regs))
++ return PTR_ERR(regs);
++
++ /*
++ * Right now device-tree probed devices don't get dma_mask set.
++ * Since shared usb code relies on it, set it here for now.
++ * Once we have dma capability bindings this can go away.
++ */
++ ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32));
++ if (ret)
++ return ret;
++
++ ret = -ENODEV;
++ hcd = usb_create_hcd(&ehci_h20ahb_hc_driver, dev,
++ dev_name(dev));
++ if (!hcd) {
++ dev_err(dev, "Failed to create HCD\n");
++ return -ENOMEM;
++ }
++
++ hcd->rsrc_start = res->start;
++ hcd->rsrc_len = resource_size(res);
++ hcd->regs = regs;
++ hcd_to_ehci(hcd)->caps = regs;
++
++ h20ahb = (struct h20ahb_hcd *)hcd_to_ehci(hcd)->priv;
++ h20ahb->nports = 1;
++
++ platform_set_drvdata(pdev, hcd);
++
++ /* get the PHY devices if needed */
++ for (i = 0 ; i < h20ahb->nports ; i++) {
++ struct usb_phy *phy;
++
++ /* get the PHY device */
++#if 0
++ if (dev->of_node)
++ phy = devm_usb_get_phy_by_phandle(dev, "phys", i);
++ else
++ phy = devm_usb_get_phy_dev(dev, i);
++#endif
++ phy = otg_ulpi_create(&ehci_h20ahb_phy_io_ops, 0);
++ if (IS_ERR(phy)) {
++ ret = PTR_ERR(phy);
++ dev_err(dev, "Can't get PHY device for port %d: %d\n",
++ i, ret);
++ goto err_phy;
++ }
++ phy->dev = dev;
++ usb_add_phy_dev(phy);
++
++ h20ahb->phy[i] = phy;
++ phy->io_priv = hcd->regs + EHCI_INSNREG05_ULPI;
++
++#if 0
++ usb_phy_init(h20ahb->phy[i]);
++ /* bring PHY out of suspend */
++ usb_phy_set_suspend(h20ahb->phy[i], 0);
++#endif
++ }
++
++ /* make the first port's phy the one used by hcd as well */
++ hcd->phy = h20ahb->phy[0];
++
++ pm_runtime_enable(dev);
++ pm_runtime_get_sync(dev);
++
++ /*
++ * An undocumented "feature" in the H20AHB EHCI controller,
++ * causes suspended ports to be taken out of suspend when
++ * the USBCMD.Run/Stop bit is cleared (for example when
++ * we do ehci_bus_suspend).
++ * This breaks suspend-resume if the root-hub is allowed
++ * to suspend. Writing 1 to this undocumented register bit
++ * disables this feature and restores normal behavior.
++ */
++ ehci_write(regs, EHCI_INSNREG04,
++ EHCI_INSNREG04_DISABLE_UNSUSPEND);
++
++ ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
++ if (ret) {
++ dev_err(dev, "failed to add hcd with err %d\n", ret);
++ goto err_pm_runtime;
++ }
++ device_wakeup_enable(hcd->self.controller);
++
++ /*
++ * Bring PHYs out of reset for non PHY modes.
++ * Even though HSIC mode is a PHY-less mode, the reset
++ * line exists between the chips and can be modelled
++ * as a PHY device for reset control.
++ */
++ for (i = 0; i < h20ahb->nports; i++) {
++ usb_phy_init(h20ahb->phy[i]);
++ /* bring PHY out of suspend */
++ usb_phy_set_suspend(h20ahb->phy[i], 0);
++ }
++
++ return 0;
++
++err_pm_runtime:
++ pm_runtime_put_sync(dev);
++
++err_phy:
++ for (i = 0; i < h20ahb->nports; i++) {
++ if (h20ahb->phy[i])
++ usb_phy_shutdown(h20ahb->phy[i]);
++ }
++
++ usb_put_hcd(hcd);
++
++ return ret;
++}
++
++
++/**
++ * ehci_hcd_h20ahb_remove - shutdown processing for EHCI HCDs
++ * @pdev: USB Host Controller being removed
++ *
++ * Reverses the effect of usb_ehci_hcd_h20ahb_probe(), first invoking
++ * the HCD's stop() method. It is always called from a thread
++ * context, normally "rmmod", "apmd", or something similar.
++ */
++static int ehci_hcd_h20ahb_remove(struct platform_device *pdev)
++{
++ struct device *dev = &pdev->dev;
++ struct usb_hcd *hcd = dev_get_drvdata(dev);
++ struct h20ahb_hcd *h20ahb = (struct h20ahb_hcd *)hcd_to_ehci(hcd)->priv;
++ int i;
++
++ usb_remove_hcd(hcd);
++
++ for (i = 0; i < h20ahb->nports; i++) {
++ if (h20ahb->phy[i])
++ usb_phy_shutdown(h20ahb->phy[i]);
++ }
++
++ usb_put_hcd(hcd);
++ pm_runtime_put_sync(dev);
++ pm_runtime_disable(dev);
++
++ return 0;
++}
++
++static const struct of_device_id h20ahb_ehci_dt_ids[] = {
++ { .compatible = "snps,ehci-h20ahb" },
++ { }
++};
++
++MODULE_DEVICE_TABLE(of, h20ahb_ehci_dt_ids);
++
++static struct platform_driver ehci_hcd_h20ahb_driver = {
++ .probe = ehci_hcd_h20ahb_probe,
++ .remove = ehci_hcd_h20ahb_remove,
++ .shutdown = usb_hcd_platform_shutdown,
++ /*.suspend = ehci_hcd_h20ahb_suspend, */
++ /*.resume = ehci_hcd_h20ahb_resume, */
++ .driver = {
++ .name = hcd_name,
++ .of_match_table = h20ahb_ehci_dt_ids,
++ }
++};
++
++/*-------------------------------------------------------------------------*/
++
++static int __init ehci_h20ahb_init(void)
++{
++ if (usb_disabled())
++ return -ENODEV;
++
++ pr_info("%s: " DRIVER_DESC "\n", hcd_name);
++
++ ehci_init_driver(&ehci_h20ahb_hc_driver, &ehci_h20ahb_overrides);
++ return platform_driver_register(&ehci_hcd_h20ahb_driver);
++}
++module_init(ehci_h20ahb_init);
++
++static void __exit ehci_h20ahb_cleanup(void)
++{
++ platform_driver_unregister(&ehci_hcd_h20ahb_driver);
++}
++module_exit(ehci_h20ahb_cleanup);
++
++MODULE_ALIAS("platform:ehci-h20ahb");
++MODULE_AUTHOR("Liviu Dudau <Liviu.Dudau@arm.com>");
++
++MODULE_DESCRIPTION(DRIVER_DESC);
++MODULE_LICENSE("GPL");
+diff -Nur linux-3.14.36/drivers/usb/host/ehci-hcd.c linux-openelec/drivers/usb/host/ehci-hcd.c
+--- linux-3.14.36/drivers/usb/host/ehci-hcd.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/usb/host/ehci-hcd.c 2015-07-24 18:03:28.488842002 -0500
+@@ -590,11 +590,16 @@
+ */
+ hcc_params = ehci_readl(ehci, &ehci->caps->hcc_params);
+ if (HCC_64BIT_ADDR(hcc_params)) {
+- ehci_writel(ehci, 0, &ehci->regs->segment);
+-#if 0
+-// this is deeply broken on almost all architectures
++#ifdef CONFIG_ARM64
++ ehci_writel(ehci, ehci->periodic_dma >> 32, &ehci->regs->segment);
++ /*
++ * this is deeply broken on almost all architectures
++ * but arm64 can use it so enable it
++ */
+ if (!dma_set_mask(hcd->self.controller, DMA_BIT_MASK(64)))
+ ehci_info(ehci, "enabled 64bit DMA\n");
++#else
++ ehci_writel(ehci, 0, &ehci->regs->segment);
+ #endif
+ }
+
+diff -Nur linux-3.14.36/drivers/usb/host/ehci-hcd.c.orig linux-openelec/drivers/usb/host/ehci-hcd.c.orig
+--- linux-3.14.36/drivers/usb/host/ehci-hcd.c.orig 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/usb/host/ehci-hcd.c.orig 2015-05-06 12:05:41.000000000 -0500
+@@ -0,0 +1,1405 @@
++/*
++ * Enhanced Host Controller Interface (EHCI) driver for USB.
++ *
++ * Maintainer: Alan Stern <stern@rowland.harvard.edu>
++ *
++ * Copyright (c) 2000-2004 by David Brownell
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License as published by the
++ * Free Software Foundation; either version 2 of the License, or (at your
++ * option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
++ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
++ * for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software Foundation,
++ * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ */
++
++#include <linux/module.h>
++#include <linux/pci.h>
++#include <linux/dmapool.h>
++#include <linux/kernel.h>
++#include <linux/delay.h>
++#include <linux/ioport.h>
++#include <linux/sched.h>
++#include <linux/vmalloc.h>
++#include <linux/errno.h>
++#include <linux/init.h>
++#include <linux/hrtimer.h>
++#include <linux/list.h>
++#include <linux/interrupt.h>
++#include <linux/usb.h>
++#include <linux/usb/hcd.h>
++#include <linux/moduleparam.h>
++#include <linux/dma-mapping.h>
++#include <linux/debugfs.h>
++#include <linux/slab.h>
++
++#include <asm/byteorder.h>
++#include <asm/io.h>
++#include <asm/irq.h>
++#include <asm/unaligned.h>
++
++#if defined(CONFIG_PPC_PS3)
++#include <asm/firmware.h>
++#endif
++
++/*-------------------------------------------------------------------------*/
++
++/*
++ * EHCI hc_driver implementation ... experimental, incomplete.
++ * Based on the final 1.0 register interface specification.
++ *
++ * USB 2.0 shows up in upcoming www.pcmcia.org technology.
++ * First was PCMCIA, like ISA; then CardBus, which is PCI.
++ * Next comes "CardBay", using USB 2.0 signals.
++ *
++ * Contains additional contributions by Brad Hards, Rory Bolt, and others.
++ * Special thanks to Intel and VIA for providing host controllers to
++ * test this driver on, and Cypress (including In-System Design) for
++ * providing early devices for those host controllers to talk to!
++ */
++
++#define DRIVER_AUTHOR "David Brownell"
++#define DRIVER_DESC "USB 2.0 'Enhanced' Host Controller (EHCI) Driver"
++
++static const char hcd_name [] = "ehci_hcd";
++
++
++#undef EHCI_URB_TRACE
++
++/* magic numbers that can affect system performance */
++#define EHCI_TUNE_CERR 3 /* 0-3 qtd retries; 0 == don't stop */
++#define EHCI_TUNE_RL_HS 4 /* nak throttle; see 4.9 */
++#define EHCI_TUNE_RL_TT 0
++#define EHCI_TUNE_MULT_HS 1 /* 1-3 transactions/uframe; 4.10.3 */
++#define EHCI_TUNE_MULT_TT 1
++/*
++ * Some drivers think it's safe to schedule isochronous transfers more than
++ * 256 ms into the future (partly as a result of an old bug in the scheduling
++ * code). In an attempt to avoid trouble, we will use a minimum scheduling
++ * length of 512 frames instead of 256.
++ */
++#define EHCI_TUNE_FLS 1 /* (medium) 512-frame schedule */
++
++/* Initial IRQ latency: faster than hw default */
++static int log2_irq_thresh = 0; // 0 to 6
++module_param (log2_irq_thresh, int, S_IRUGO);
++MODULE_PARM_DESC (log2_irq_thresh, "log2 IRQ latency, 1-64 microframes");
++
++/* initial park setting: slower than hw default */
++static unsigned park = 0;
++module_param (park, uint, S_IRUGO);
++MODULE_PARM_DESC (park, "park setting; 1-3 back-to-back async packets");
++
++/* for flakey hardware, ignore overcurrent indicators */
++static bool ignore_oc = 0;
++module_param (ignore_oc, bool, S_IRUGO);
++MODULE_PARM_DESC (ignore_oc, "ignore bogus hardware overcurrent indications");
++
++#define INTR_MASK (STS_IAA | STS_FATAL | STS_PCD | STS_ERR | STS_INT)
++
++/*-------------------------------------------------------------------------*/
++
++#include "ehci.h"
++#include "pci-quirks.h"
++
++static void compute_tt_budget(u8 budget_table[EHCI_BANDWIDTH_SIZE],
++ struct ehci_tt *tt);
++
++/*
++ * The MosChip MCS9990 controller updates its microframe counter
++ * a little before the frame counter, and occasionally we will read
++ * the invalid intermediate value. Avoid problems by checking the
++ * microframe number (the low-order 3 bits); if they are 0 then
++ * re-read the register to get the correct value.
++ */
++static unsigned ehci_moschip_read_frame_index(struct ehci_hcd *ehci)
++{
++ unsigned uf;
++
++ uf = ehci_readl(ehci, &ehci->regs->frame_index);
++ if (unlikely((uf & 7) == 0))
++ uf = ehci_readl(ehci, &ehci->regs->frame_index);
++ return uf;
++}
++
++static inline unsigned ehci_read_frame_index(struct ehci_hcd *ehci)
++{
++ if (ehci->frame_index_bug)
++ return ehci_moschip_read_frame_index(ehci);
++ return ehci_readl(ehci, &ehci->regs->frame_index);
++}
++
++#include "ehci-dbg.c"
++
++/*-------------------------------------------------------------------------*/
++
++/*
++ * ehci_handshake - spin reading hc until handshake completes or fails
++ * @ptr: address of hc register to be read
++ * @mask: bits to look at in result of read
++ * @done: value of those bits when handshake succeeds
++ * @usec: timeout in microseconds
++ *
++ * Returns negative errno, or zero on success
++ *
++ * Success happens when the "mask" bits have the specified value (hardware
++ * handshake done). There are two failure modes: "usec" have passed (major
++ * hardware flakeout), or the register reads as all-ones (hardware removed).
++ *
++ * That last failure should_only happen in cases like physical cardbus eject
++ * before driver shutdown. But it also seems to be caused by bugs in cardbus
++ * bridge shutdown: shutting down the bridge before the devices using it.
++ */
++int ehci_handshake(struct ehci_hcd *ehci, void __iomem *ptr,
++ u32 mask, u32 done, int usec)
++{
++ u32 result;
++
++ do {
++ result = ehci_readl(ehci, ptr);
++ if (result == ~(u32)0) /* card removed */
++ return -ENODEV;
++ result &= mask;
++ if (result == done)
++ return 0;
++ udelay (1);
++ usec--;
++ } while (usec > 0);
++ return -ETIMEDOUT;
++}
++EXPORT_SYMBOL_GPL(ehci_handshake);
++
++/* check TDI/ARC silicon is in host mode */
++static int tdi_in_host_mode (struct ehci_hcd *ehci)
++{
++ u32 tmp;
++
++ tmp = ehci_readl(ehci, &ehci->regs->usbmode);
++ return (tmp & 3) == USBMODE_CM_HC;
++}
++
++/*
++ * Force HC to halt state from unknown (EHCI spec section 2.3).
++ * Must be called with interrupts enabled and the lock not held.
++ */
++static int ehci_halt (struct ehci_hcd *ehci)
++{
++ u32 temp;
++
++ spin_lock_irq(&ehci->lock);
++
++ /* disable any irqs left enabled by previous code */
++ ehci_writel(ehci, 0, &ehci->regs->intr_enable);
++
++ if (ehci_is_TDI(ehci) && !tdi_in_host_mode(ehci)) {
++ spin_unlock_irq(&ehci->lock);
++ return 0;
++ }
++
++ /*
++ * This routine gets called during probe before ehci->command
++ * has been initialized, so we can't rely on its value.
++ */
++ ehci->command &= ~CMD_RUN;
++ temp = ehci_readl(ehci, &ehci->regs->command);
++ temp &= ~(CMD_RUN | CMD_IAAD);
++ ehci_writel(ehci, temp, &ehci->regs->command);
++
++ spin_unlock_irq(&ehci->lock);
++ synchronize_irq(ehci_to_hcd(ehci)->irq);
++
++ return ehci_handshake(ehci, &ehci->regs->status,
++ STS_HALT, STS_HALT, 16 * 125);
++}
++
++/* put TDI/ARC silicon into EHCI mode */
++static void tdi_reset (struct ehci_hcd *ehci)
++{
++ u32 tmp;
++
++ tmp = ehci_readl(ehci, &ehci->regs->usbmode);
++ tmp |= USBMODE_CM_HC;
++ /* The default byte access to MMR space is LE after
++ * controller reset. Set the required endian mode
++ * for transfer buffers to match the host microprocessor
++ */
++ if (ehci_big_endian_mmio(ehci))
++ tmp |= USBMODE_BE;
++ ehci_writel(ehci, tmp, &ehci->regs->usbmode);
++}
++
++/*
++ * Reset a non-running (STS_HALT == 1) controller.
++ * Must be called with interrupts enabled and the lock not held.
++ */
++static int ehci_reset (struct ehci_hcd *ehci)
++{
++ int retval;
++ u32 command = ehci_readl(ehci, &ehci->regs->command);
++
++ /* If the EHCI debug controller is active, special care must be
++ * taken before and after a host controller reset */
++ if (ehci->debug && !dbgp_reset_prep(ehci_to_hcd(ehci)))
++ ehci->debug = NULL;
++
++ command |= CMD_RESET;
++ dbg_cmd (ehci, "reset", command);
++ ehci_writel(ehci, command, &ehci->regs->command);
++ ehci->rh_state = EHCI_RH_HALTED;
++ ehci->next_statechange = jiffies;
++ retval = ehci_handshake(ehci, &ehci->regs->command,
++ CMD_RESET, 0, 250 * 1000);
++
++ if (ehci->has_hostpc) {
++ ehci_writel(ehci, USBMODE_EX_HC | USBMODE_EX_VBPS,
++ &ehci->regs->usbmode_ex);
++ ehci_writel(ehci, TXFIFO_DEFAULT, &ehci->regs->txfill_tuning);
++ }
++ if (retval)
++ return retval;
++
++ if (ehci_is_TDI(ehci))
++ tdi_reset (ehci);
++
++ if (ehci->debug)
++ dbgp_external_startup(ehci_to_hcd(ehci));
++
++ ehci->port_c_suspend = ehci->suspended_ports =
++ ehci->resuming_ports = 0;
++ return retval;
++}
++
++/*
++ * Idle the controller (turn off the schedules).
++ * Must be called with interrupts enabled and the lock not held.
++ */
++static void ehci_quiesce (struct ehci_hcd *ehci)
++{
++ u32 temp;
++
++ if (ehci->rh_state != EHCI_RH_RUNNING)
++ return;
++
++ /* wait for any schedule enables/disables to take effect */
++ temp = (ehci->command << 10) & (STS_ASS | STS_PSS);
++ ehci_handshake(ehci, &ehci->regs->status, STS_ASS | STS_PSS, temp,
++ 16 * 125);
++
++ /* then disable anything that's still active */
++ spin_lock_irq(&ehci->lock);
++ ehci->command &= ~(CMD_ASE | CMD_PSE);
++ ehci_writel(ehci, ehci->command, &ehci->regs->command);
++ spin_unlock_irq(&ehci->lock);
++
++ /* hardware can take 16 microframes to turn off ... */
++ ehci_handshake(ehci, &ehci->regs->status, STS_ASS | STS_PSS, 0,
++ 16 * 125);
++}
++
++/*-------------------------------------------------------------------------*/
++
++static void end_unlink_async(struct ehci_hcd *ehci);
++static void unlink_empty_async(struct ehci_hcd *ehci);
++static void unlink_empty_async_suspended(struct ehci_hcd *ehci);
++static void ehci_work(struct ehci_hcd *ehci);
++static void start_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh);
++static void end_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh);
++
++#include "ehci-timer.c"
++#include "ehci-hub.c"
++#include "ehci-mem.c"
++#include "ehci-q.c"
++#include "ehci-sched.c"
++#include "ehci-sysfs.c"
++
++/*-------------------------------------------------------------------------*/
++
++/* On some systems, leaving remote wakeup enabled prevents system shutdown.
++ * The firmware seems to think that powering off is a wakeup event!
++ * This routine turns off remote wakeup and everything else, on all ports.
++ */
++static void ehci_turn_off_all_ports(struct ehci_hcd *ehci)
++{
++ int port = HCS_N_PORTS(ehci->hcs_params);
++
++ while (port--)
++ ehci_writel(ehci, PORT_RWC_BITS,
++ &ehci->regs->port_status[port]);
++}
++
++/*
++ * Halt HC, turn off all ports, and let the BIOS use the companion controllers.
++ * Must be called with interrupts enabled and the lock not held.
++ */
++static void ehci_silence_controller(struct ehci_hcd *ehci)
++{
++ ehci_halt(ehci);
++
++ spin_lock_irq(&ehci->lock);
++ ehci->rh_state = EHCI_RH_HALTED;
++ ehci_turn_off_all_ports(ehci);
++
++ /* make BIOS/etc use companion controller during reboot */
++ ehci_writel(ehci, 0, &ehci->regs->configured_flag);
++
++ /* unblock posted writes */
++ ehci_readl(ehci, &ehci->regs->configured_flag);
++ spin_unlock_irq(&ehci->lock);
++}
++
++/* ehci_shutdown kick in for silicon on any bus (not just pci, etc).
++ * This forcibly disables dma and IRQs, helping kexec and other cases
++ * where the next system software may expect clean state.
++ */
++static void ehci_shutdown(struct usb_hcd *hcd)
++{
++ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
++
++ spin_lock_irq(&ehci->lock);
++ ehci->shutdown = true;
++ ehci->rh_state = EHCI_RH_STOPPING;
++ ehci->enabled_hrtimer_events = 0;
++ spin_unlock_irq(&ehci->lock);
++
++ ehci_silence_controller(ehci);
++
++ hrtimer_cancel(&ehci->hrtimer);
++}
++
++/*-------------------------------------------------------------------------*/
++
++/*
++ * ehci_work is called from some interrupts, timers, and so on.
++ * it calls driver completion functions, after dropping ehci->lock.
++ */
++static void ehci_work (struct ehci_hcd *ehci)
++{
++ /* another CPU may drop ehci->lock during a schedule scan while
++ * it reports urb completions. this flag guards against bogus
++ * attempts at re-entrant schedule scanning.
++ */
++ if (ehci->scanning) {
++ ehci->need_rescan = true;
++ return;
++ }
++ ehci->scanning = true;
++
++ rescan:
++ ehci->need_rescan = false;
++ if (ehci->async_count)
++ scan_async(ehci);
++ if (ehci->intr_count > 0)
++ scan_intr(ehci);
++ if (ehci->isoc_count > 0)
++ scan_isoc(ehci);
++ if (ehci->need_rescan)
++ goto rescan;
++ ehci->scanning = false;
++
++ /* the IO watchdog guards against hardware or driver bugs that
++ * misplace IRQs, and should let us run completely without IRQs.
++ * such lossage has been observed on both VT6202 and VT8235.
++ */
++ turn_on_io_watchdog(ehci);
++}
++
++/*
++ * Called when the ehci_hcd module is removed.
++ */
++static void ehci_stop (struct usb_hcd *hcd)
++{
++ struct ehci_hcd *ehci = hcd_to_ehci (hcd);
++
++ ehci_dbg (ehci, "stop\n");
++
++ /* no more interrupts ... */
++
++ spin_lock_irq(&ehci->lock);
++ ehci->enabled_hrtimer_events = 0;
++ spin_unlock_irq(&ehci->lock);
++
++ ehci_quiesce(ehci);
++ ehci_silence_controller(ehci);
++ ehci_reset (ehci);
++
++ hrtimer_cancel(&ehci->hrtimer);
++ remove_sysfs_files(ehci);
++ remove_debug_files (ehci);
++
++ /* root hub is shut down separately (first, when possible) */
++ spin_lock_irq (&ehci->lock);
++ end_free_itds(ehci);
++ spin_unlock_irq (&ehci->lock);
++ ehci_mem_cleanup (ehci);
++
++ if (ehci->amd_pll_fix == 1)
++ usb_amd_dev_put();
++
++ dbg_status (ehci, "ehci_stop completed",
++ ehci_readl(ehci, &ehci->regs->status));
++}
++
++/* one-time init, only for memory state */
++static int ehci_init(struct usb_hcd *hcd)
++{
++ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
++ u32 temp;
++ int retval;
++ u32 hcc_params;
++ struct ehci_qh_hw *hw;
++
++ spin_lock_init(&ehci->lock);
++
++ /*
++ * keep io watchdog by default, those good HCDs could turn off it later
++ */
++ ehci->need_io_watchdog = 1;
++
++ hrtimer_init(&ehci->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
++ ehci->hrtimer.function = ehci_hrtimer_func;
++ ehci->next_hrtimer_event = EHCI_HRTIMER_NO_EVENT;
++
++ hcc_params = ehci_readl(ehci, &ehci->caps->hcc_params);
++
++ /*
++ * by default set standard 80% (== 100 usec/uframe) max periodic
++ * bandwidth as required by USB 2.0
++ */
++ ehci->uframe_periodic_max = 100;
++
++ /*
++ * hw default: 1K periodic list heads, one per frame.
++ * periodic_size can shrink by USBCMD update if hcc_params allows.
++ */
++ ehci->periodic_size = DEFAULT_I_TDPS;
++ INIT_LIST_HEAD(&ehci->async_unlink);
++ INIT_LIST_HEAD(&ehci->async_idle);
++ INIT_LIST_HEAD(&ehci->intr_unlink_wait);
++ INIT_LIST_HEAD(&ehci->intr_unlink);
++ INIT_LIST_HEAD(&ehci->intr_qh_list);
++ INIT_LIST_HEAD(&ehci->cached_itd_list);
++ INIT_LIST_HEAD(&ehci->cached_sitd_list);
++ INIT_LIST_HEAD(&ehci->tt_list);
++
++ if (HCC_PGM_FRAMELISTLEN(hcc_params)) {
++ /* periodic schedule size can be smaller than default */
++ switch (EHCI_TUNE_FLS) {
++ case 0: ehci->periodic_size = 1024; break;
++ case 1: ehci->periodic_size = 512; break;
++ case 2: ehci->periodic_size = 256; break;
++ default: BUG();
++ }
++ }
++ if ((retval = ehci_mem_init(ehci, GFP_KERNEL)) < 0)
++ return retval;
++
++ /* controllers may cache some of the periodic schedule ... */
++ if (HCC_ISOC_CACHE(hcc_params)) // full frame cache
++ ehci->i_thresh = 0;
++ else // N microframes cached
++ ehci->i_thresh = 2 + HCC_ISOC_THRES(hcc_params);
++
++ /*
++ * dedicate a qh for the async ring head, since we couldn't unlink
++ * a 'real' qh without stopping the async schedule [4.8]. use it
++ * as the 'reclamation list head' too.
++ * its dummy is used in hw_alt_next of many tds, to prevent the qh
++ * from automatically advancing to the next td after short reads.
++ */
++ ehci->async->qh_next.qh = NULL;
++ hw = ehci->async->hw;
++ hw->hw_next = QH_NEXT(ehci, ehci->async->qh_dma);
++ hw->hw_info1 = cpu_to_hc32(ehci, QH_HEAD);
++#if defined(CONFIG_PPC_PS3)
++ hw->hw_info1 |= cpu_to_hc32(ehci, QH_INACTIVATE);
++#endif
++ hw->hw_token = cpu_to_hc32(ehci, QTD_STS_HALT);
++ hw->hw_qtd_next = EHCI_LIST_END(ehci);
++ ehci->async->qh_state = QH_STATE_LINKED;
++ hw->hw_alt_next = QTD_NEXT(ehci, ehci->async->dummy->qtd_dma);
++
++ /* clear interrupt enables, set irq latency */
++ if (log2_irq_thresh < 0 || log2_irq_thresh > 6)
++ log2_irq_thresh = 0;
++ temp = 1 << (16 + log2_irq_thresh);
++ if (HCC_PER_PORT_CHANGE_EVENT(hcc_params)) {
++ ehci->has_ppcd = 1;
++ ehci_dbg(ehci, "enable per-port change event\n");
++ temp |= CMD_PPCEE;
++ }
++ if (HCC_CANPARK(hcc_params)) {
++ /* HW default park == 3, on hardware that supports it (like
++ * NVidia and ALI silicon), maximizes throughput on the async
++ * schedule by avoiding QH fetches between transfers.
++ *
++ * With fast usb storage devices and NForce2, "park" seems to
++ * make problems: throughput reduction (!), data errors...
++ */
++ if (park) {
++ park = min(park, (unsigned) 3);
++ temp |= CMD_PARK;
++ temp |= park << 8;
++ }
++ ehci_dbg(ehci, "park %d\n", park);
++ }
++ if (HCC_PGM_FRAMELISTLEN(hcc_params)) {
++ /* periodic schedule size can be smaller than default */
++ temp &= ~(3 << 2);
++ temp |= (EHCI_TUNE_FLS << 2);
++ }
++ ehci->command = temp;
++
++ /* Accept arbitrarily long scatter-gather lists */
++ if (!(hcd->driver->flags & HCD_LOCAL_MEM))
++ hcd->self.sg_tablesize = ~0;
++ return 0;
++}
++
++/* start HC running; it's halted, ehci_init() has been run (once) */
++static int ehci_run (struct usb_hcd *hcd)
++{
++ struct ehci_hcd *ehci = hcd_to_ehci (hcd);
++ u32 temp;
++ u32 hcc_params;
++
++ hcd->uses_new_polling = 1;
++
++ /* EHCI spec section 4.1 */
++
++ ehci_writel(ehci, ehci->periodic_dma, &ehci->regs->frame_list);
++ ehci_writel(ehci, (u32)ehci->async->qh_dma, &ehci->regs->async_next);
++
++ /*
++ * hcc_params controls whether ehci->regs->segment must (!!!)
++ * be used; it constrains QH/ITD/SITD and QTD locations.
++ * pci_pool consistent memory always uses segment zero.
++ * streaming mappings for I/O buffers, like pci_map_single(),
++ * can return segments above 4GB, if the device allows.
++ *
++ * NOTE: the dma mask is visible through dma_supported(), so
++ * drivers can pass this info along ... like NETIF_F_HIGHDMA,
++ * Scsi_Host.highmem_io, and so forth. It's readonly to all
++ * host side drivers though.
++ */
++ hcc_params = ehci_readl(ehci, &ehci->caps->hcc_params);
++ if (HCC_64BIT_ADDR(hcc_params)) {
++#ifdef CONFIG_ARM64
++ ehci_writel(ehci, ehci->periodic_dma >> 32, &ehci->regs->segment);
++ /*
++ * this is deeply broken on almost all architectures
++ * but arm64 can use it so enable it
++ */
++ if (!dma_set_mask(hcd->self.controller, DMA_BIT_MASK(64)))
++ ehci_info(ehci, "enabled 64bit DMA\n");
++#else
++ ehci_writel(ehci, 0, &ehci->regs->segment);
++#endif
++ }
++
++
++ // Philips, Intel, and maybe others need CMD_RUN before the
++ // root hub will detect new devices (why?); NEC doesn't
++ ehci->command &= ~(CMD_LRESET|CMD_IAAD|CMD_PSE|CMD_ASE|CMD_RESET);
++ ehci->command |= CMD_RUN;
++ ehci_writel(ehci, ehci->command, &ehci->regs->command);
++ dbg_cmd (ehci, "init", ehci->command);
++
++ /*
++ * Start, enabling full USB 2.0 functionality ... usb 1.1 devices
++ * are explicitly handed to companion controller(s), so no TT is
++ * involved with the root hub. (Except where one is integrated,
++ * and there's no companion controller unless maybe for USB OTG.)
++ *
++ * Turning on the CF flag will transfer ownership of all ports
++ * from the companions to the EHCI controller. If any of the
++ * companions are in the middle of a port reset at the time, it
++ * could cause trouble. Write-locking ehci_cf_port_reset_rwsem
++ * guarantees that no resets are in progress. After we set CF,
++ * a short delay lets the hardware catch up; new resets shouldn't
++ * be started before the port switching actions could complete.
++ */
++ down_write(&ehci_cf_port_reset_rwsem);
++ ehci->rh_state = EHCI_RH_RUNNING;
++ ehci_writel(ehci, FLAG_CF, &ehci->regs->configured_flag);
++ ehci_readl(ehci, &ehci->regs->command); /* unblock posted writes */
++ msleep(5);
++ up_write(&ehci_cf_port_reset_rwsem);
++ ehci->last_periodic_enable = ktime_get_real();
++
++ temp = HC_VERSION(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase));
++ ehci_info (ehci,
++ "USB %x.%x started, EHCI %x.%02x%s\n",
++ ((ehci->sbrn & 0xf0)>>4), (ehci->sbrn & 0x0f),
++ temp >> 8, temp & 0xff,
++ ignore_oc ? ", overcurrent ignored" : "");
++
++ ehci_writel(ehci, INTR_MASK,
++ &ehci->regs->intr_enable); /* Turn On Interrupts */
++
++ /* GRR this is run-once init(), being done every time the HC starts.
++ * So long as they're part of class devices, we can't do it init()
++ * since the class device isn't created that early.
++ */
++ create_debug_files(ehci);
++ create_sysfs_files(ehci);
++
++ return 0;
++}
++
++int ehci_setup(struct usb_hcd *hcd)
++{
++ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
++ int retval;
++
++ ehci->regs = (void __iomem *)ehci->caps +
++ HC_LENGTH(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase));
++ dbg_hcs_params(ehci, "reset");
++ dbg_hcc_params(ehci, "reset");
++
++ /* cache this readonly data; minimize chip reads */
++ ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params);
++
++ ehci->sbrn = HCD_USB2;
++
++ /* data structure init */
++ retval = ehci_init(hcd);
++ if (retval)
++ return retval;
++
++ retval = ehci_halt(ehci);
++ if (retval)
++ return retval;
++
++ ehci_reset(ehci);
++
++ return 0;
++}
++EXPORT_SYMBOL_GPL(ehci_setup);
++
++/*-------------------------------------------------------------------------*/
++
++static irqreturn_t ehci_irq (struct usb_hcd *hcd)
++{
++ struct ehci_hcd *ehci = hcd_to_ehci (hcd);
++ u32 status, masked_status, pcd_status = 0, cmd;
++ int bh;
++ unsigned long flags;
++
++ /*
++ * For threadirqs option we use spin_lock_irqsave() variant to prevent
++ * deadlock with ehci hrtimer callback, because hrtimer callbacks run
++ * in interrupt context even when threadirqs is specified. We can go
++ * back to spin_lock() variant when hrtimer callbacks become threaded.
++ */
++ spin_lock_irqsave(&ehci->lock, flags);
++
++ status = ehci_readl(ehci, &ehci->regs->status);
++
++ /* e.g. cardbus physical eject */
++ if (status == ~(u32) 0) {
++ ehci_dbg (ehci, "device removed\n");
++ goto dead;
++ }
++
++ /*
++ * We don't use STS_FLR, but some controllers don't like it to
++ * remain on, so mask it out along with the other status bits.
++ */
++ masked_status = status & (INTR_MASK | STS_FLR);
++
++ /* Shared IRQ? */
++ if (!masked_status || unlikely(ehci->rh_state == EHCI_RH_HALTED)) {
++ spin_unlock_irqrestore(&ehci->lock, flags);
++ return IRQ_NONE;
++ }
++
++ /* clear (just) interrupts */
++ ehci_writel(ehci, masked_status, &ehci->regs->status);
++ cmd = ehci_readl(ehci, &ehci->regs->command);
++ bh = 0;
++
++ /* normal [4.15.1.2] or error [4.15.1.1] completion */
++ if (likely ((status & (STS_INT|STS_ERR)) != 0)) {
++ if (likely ((status & STS_ERR) == 0))
++ COUNT (ehci->stats.normal);
++ else
++ COUNT (ehci->stats.error);
++ bh = 1;
++ }
++
++ /* complete the unlinking of some qh [4.15.2.3] */
++ if (status & STS_IAA) {
++
++ /* Turn off the IAA watchdog */
++ ehci->enabled_hrtimer_events &= ~BIT(EHCI_HRTIMER_IAA_WATCHDOG);
++
++ /*
++ * Mild optimization: Allow another IAAD to reset the
++ * hrtimer, if one occurs before the next expiration.
++ * In theory we could always cancel the hrtimer, but
++ * tests show that about half the time it will be reset
++ * for some other event anyway.
++ */
++ if (ehci->next_hrtimer_event == EHCI_HRTIMER_IAA_WATCHDOG)
++ ++ehci->next_hrtimer_event;
++
++ /* guard against (alleged) silicon errata */
++ if (cmd & CMD_IAAD)
++ ehci_dbg(ehci, "IAA with IAAD still set?\n");
++ if (ehci->iaa_in_progress)
++ COUNT(ehci->stats.iaa);
++ end_unlink_async(ehci);
++ }
++
++ /* remote wakeup [4.3.1] */
++ if (status & STS_PCD) {
++ unsigned i = HCS_N_PORTS (ehci->hcs_params);
++ u32 ppcd = ~0;
++
++ /* kick root hub later */
++ pcd_status = status;
++
++ /* resume root hub? */
++ if (ehci->rh_state == EHCI_RH_SUSPENDED)
++ usb_hcd_resume_root_hub(hcd);
++
++ /* get per-port change detect bits */
++ if (ehci->has_ppcd)
++ ppcd = status >> 16;
++
++ while (i--) {
++ int pstatus;
++
++ /* leverage per-port change bits feature */
++ if (!(ppcd & (1 << i)))
++ continue;
++ pstatus = ehci_readl(ehci,
++ &ehci->regs->port_status[i]);
++
++ if (pstatus & PORT_OWNER)
++ continue;
++ if (!(test_bit(i, &ehci->suspended_ports) &&
++ ((pstatus & PORT_RESUME) ||
++ !(pstatus & PORT_SUSPEND)) &&
++ (pstatus & PORT_PE) &&
++ ehci->reset_done[i] == 0))
++ continue;
++
++ /* start 20 msec resume signaling from this port,
++ * and make khubd collect PORT_STAT_C_SUSPEND to
++ * stop that signaling. Use 5 ms extra for safety,
++ * like usb_port_resume() does.
++ */
++ ehci->reset_done[i] = jiffies + msecs_to_jiffies(25);
++ set_bit(i, &ehci->resuming_ports);
++ ehci_dbg (ehci, "port %d remote wakeup\n", i + 1);
++ usb_hcd_start_port_resume(&hcd->self, i);
++ mod_timer(&hcd->rh_timer, ehci->reset_done[i]);
++ }
++ }
++
++ /* PCI errors [4.15.2.4] */
++ if (unlikely ((status & STS_FATAL) != 0)) {
++ ehci_err(ehci, "fatal error\n");
++ dbg_cmd(ehci, "fatal", cmd);
++ dbg_status(ehci, "fatal", status);
++dead:
++ usb_hc_died(hcd);
++
++ /* Don't let the controller do anything more */
++ ehci->shutdown = true;
++ ehci->rh_state = EHCI_RH_STOPPING;
++ ehci->command &= ~(CMD_RUN | CMD_ASE | CMD_PSE);
++ ehci_writel(ehci, ehci->command, &ehci->regs->command);
++ ehci_writel(ehci, 0, &ehci->regs->intr_enable);
++ ehci_handle_controller_death(ehci);
++
++ /* Handle completions when the controller stops */
++ bh = 0;
++ }
++
++ if (bh)
++ ehci_work (ehci);
++ spin_unlock_irqrestore(&ehci->lock, flags);
++ if (pcd_status)
++ usb_hcd_poll_rh_status(hcd);
++ return IRQ_HANDLED;
++}
++
++/*-------------------------------------------------------------------------*/
++
++/*
++ * non-error returns are a promise to giveback() the urb later
++ * we drop ownership so next owner (or urb unlink) can get it
++ *
++ * urb + dev is in hcd.self.controller.urb_list
++ * we're queueing TDs onto software and hardware lists
++ *
++ * hcd-specific init for hcpriv hasn't been done yet
++ *
++ * NOTE: control, bulk, and interrupt share the same code to append TDs
++ * to a (possibly active) QH, and the same QH scanning code.
++ */
++static int ehci_urb_enqueue (
++ struct usb_hcd *hcd,
++ struct urb *urb,
++ gfp_t mem_flags
++) {
++ struct ehci_hcd *ehci = hcd_to_ehci (hcd);
++ struct list_head qtd_list;
++
++ INIT_LIST_HEAD (&qtd_list);
++
++ switch (usb_pipetype (urb->pipe)) {
++ case PIPE_CONTROL:
++ /* qh_completions() code doesn't handle all the fault cases
++ * in multi-TD control transfers. Even 1KB is rare anyway.
++ */
++ if (urb->transfer_buffer_length > (16 * 1024))
++ return -EMSGSIZE;
++ /* FALLTHROUGH */
++ /* case PIPE_BULK: */
++ default:
++ if (!qh_urb_transaction (ehci, urb, &qtd_list, mem_flags))
++ return -ENOMEM;
++ return submit_async(ehci, urb, &qtd_list, mem_flags);
++
++ case PIPE_INTERRUPT:
++ if (!qh_urb_transaction (ehci, urb, &qtd_list, mem_flags))
++ return -ENOMEM;
++ return intr_submit(ehci, urb, &qtd_list, mem_flags);
++
++ case PIPE_ISOCHRONOUS:
++ if (urb->dev->speed == USB_SPEED_HIGH)
++ return itd_submit (ehci, urb, mem_flags);
++ else
++ return sitd_submit (ehci, urb, mem_flags);
++ }
++}
++
++/* remove from hardware lists
++ * completions normally happen asynchronously
++ */
++
++static int ehci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
++{
++ struct ehci_hcd *ehci = hcd_to_ehci (hcd);
++ struct ehci_qh *qh;
++ unsigned long flags;
++ int rc;
++
++ spin_lock_irqsave (&ehci->lock, flags);
++ rc = usb_hcd_check_unlink_urb(hcd, urb, status);
++ if (rc)
++ goto done;
++
++ if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
++ /*
++ * We don't expedite dequeue for isochronous URBs.
++ * Just wait until they complete normally or their
++ * time slot expires.
++ */
++ } else {
++ qh = (struct ehci_qh *) urb->hcpriv;
++ qh->exception = 1;
++ switch (qh->qh_state) {
++ case QH_STATE_LINKED:
++ if (usb_pipetype(urb->pipe) == PIPE_INTERRUPT)
++ start_unlink_intr(ehci, qh);
++ else
++ start_unlink_async(ehci, qh);
++ break;
++ case QH_STATE_COMPLETING:
++ qh->dequeue_during_giveback = 1;
++ break;
++ case QH_STATE_UNLINK:
++ case QH_STATE_UNLINK_WAIT:
++ /* already started */
++ break;
++ case QH_STATE_IDLE:
++ /* QH might be waiting for a Clear-TT-Buffer */
++ qh_completions(ehci, qh);
++ break;
++ }
++ }
++done:
++ spin_unlock_irqrestore (&ehci->lock, flags);
++ return rc;
++}
++
++/*-------------------------------------------------------------------------*/
++
++// bulk qh holds the data toggle
++
++static void
++ehci_endpoint_disable (struct usb_hcd *hcd, struct usb_host_endpoint *ep)
++{
++ struct ehci_hcd *ehci = hcd_to_ehci (hcd);
++ unsigned long flags;
++ struct ehci_qh *qh;
++
++ /* ASSERT: any requests/urbs are being unlinked */
++ /* ASSERT: nobody can be submitting urbs for this any more */
++
++rescan:
++ spin_lock_irqsave (&ehci->lock, flags);
++ qh = ep->hcpriv;
++ if (!qh)
++ goto done;
++
++ /* endpoints can be iso streams. for now, we don't
++ * accelerate iso completions ... so spin a while.
++ */
++ if (qh->hw == NULL) {
++ struct ehci_iso_stream *stream = ep->hcpriv;
++
++ if (!list_empty(&stream->td_list))
++ goto idle_timeout;
++
++ /* BUG_ON(!list_empty(&stream->free_list)); */
++ reserve_release_iso_bandwidth(ehci, stream, -1);
++ kfree(stream);
++ goto done;
++ }
++
++ qh->exception = 1;
++ if (ehci->rh_state < EHCI_RH_RUNNING)
++ qh->qh_state = QH_STATE_IDLE;
++ switch (qh->qh_state) {
++ case QH_STATE_LINKED:
++ WARN_ON(!list_empty(&qh->qtd_list));
++ if (usb_endpoint_type(&ep->desc) != USB_ENDPOINT_XFER_INT)
++ start_unlink_async(ehci, qh);
++ else
++ start_unlink_intr(ehci, qh);
++ /* FALL THROUGH */
++ case QH_STATE_COMPLETING: /* already in unlinking */
++ case QH_STATE_UNLINK: /* wait for hw to finish? */
++ case QH_STATE_UNLINK_WAIT:
++idle_timeout:
++ spin_unlock_irqrestore (&ehci->lock, flags);
++ schedule_timeout_uninterruptible(1);
++ goto rescan;
++ case QH_STATE_IDLE: /* fully unlinked */
++ if (qh->clearing_tt)
++ goto idle_timeout;
++ if (list_empty (&qh->qtd_list)) {
++ if (qh->ps.bw_uperiod)
++ reserve_release_intr_bandwidth(ehci, qh, -1);
++ qh_destroy(ehci, qh);
++ break;
++ }
++ /* else FALL THROUGH */
++ default:
++ /* caller was supposed to have unlinked any requests;
++ * that's not our job. just leak this memory.
++ */
++ ehci_err (ehci, "qh %p (#%02x) state %d%s\n",
++ qh, ep->desc.bEndpointAddress, qh->qh_state,
++ list_empty (&qh->qtd_list) ? "" : "(has tds)");
++ break;
++ }
++ done:
++ ep->hcpriv = NULL;
++ spin_unlock_irqrestore (&ehci->lock, flags);
++}
++
++static void
++ehci_endpoint_reset(struct usb_hcd *hcd, struct usb_host_endpoint *ep)
++{
++ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
++ struct ehci_qh *qh;
++ int eptype = usb_endpoint_type(&ep->desc);
++ int epnum = usb_endpoint_num(&ep->desc);
++ int is_out = usb_endpoint_dir_out(&ep->desc);
++ unsigned long flags;
++
++ if (eptype != USB_ENDPOINT_XFER_BULK && eptype != USB_ENDPOINT_XFER_INT)
++ return;
++
++ spin_lock_irqsave(&ehci->lock, flags);
++ qh = ep->hcpriv;
++
++ /* For Bulk and Interrupt endpoints we maintain the toggle state
++ * in the hardware; the toggle bits in udev aren't used at all.
++ * When an endpoint is reset by usb_clear_halt() we must reset
++ * the toggle bit in the QH.
++ */
++ if (qh) {
++ if (!list_empty(&qh->qtd_list)) {
++ WARN_ONCE(1, "clear_halt for a busy endpoint\n");
++ } else {
++ /* The toggle value in the QH can't be updated
++ * while the QH is active. Unlink it now;
++ * re-linking will call qh_refresh().
++ */
++ usb_settoggle(qh->ps.udev, epnum, is_out, 0);
++ qh->exception = 1;
++ if (eptype == USB_ENDPOINT_XFER_BULK)
++ start_unlink_async(ehci, qh);
++ else
++ start_unlink_intr(ehci, qh);
++ }
++ }
++ spin_unlock_irqrestore(&ehci->lock, flags);
++}
++
++static int ehci_get_frame (struct usb_hcd *hcd)
++{
++ struct ehci_hcd *ehci = hcd_to_ehci (hcd);
++ return (ehci_read_frame_index(ehci) >> 3) % ehci->periodic_size;
++}
++
++/*-------------------------------------------------------------------------*/
++
++/* Device addition and removal */
++
++static void ehci_remove_device(struct usb_hcd *hcd, struct usb_device *udev)
++{
++ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
++
++ spin_lock_irq(&ehci->lock);
++ drop_tt(udev);
++ spin_unlock_irq(&ehci->lock);
++}
++
++/*-------------------------------------------------------------------------*/
++
++#ifdef CONFIG_PM
++
++/* suspend/resume, section 4.3 */
++
++/* These routines handle the generic parts of controller suspend/resume */
++
++int ehci_suspend(struct usb_hcd *hcd, bool do_wakeup)
++{
++ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
++
++ if (time_before(jiffies, ehci->next_statechange))
++ msleep(10);
++
++ /*
++ * Root hub was already suspended. Disable IRQ emission and
++ * mark HW unaccessible. The PM and USB cores make sure that
++ * the root hub is either suspended or stopped.
++ */
++ ehci_prepare_ports_for_controller_suspend(ehci, do_wakeup);
++
++ spin_lock_irq(&ehci->lock);
++ ehci_writel(ehci, 0, &ehci->regs->intr_enable);
++ (void) ehci_readl(ehci, &ehci->regs->intr_enable);
++
++ clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
++ spin_unlock_irq(&ehci->lock);
++
++ synchronize_irq(hcd->irq);
++
++ /* Check for race with a wakeup request */
++ if (do_wakeup && HCD_WAKEUP_PENDING(hcd)) {
++ ehci_resume(hcd, false);
++ return -EBUSY;
++ }
++
++ return 0;
++}
++EXPORT_SYMBOL_GPL(ehci_suspend);
++
++/* Returns 0 if power was preserved, 1 if power was lost */
++int ehci_resume(struct usb_hcd *hcd, bool hibernated)
++{
++ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
++
++ if (time_before(jiffies, ehci->next_statechange))
++ msleep(100);
++
++ /* Mark hardware accessible again as we are back to full power by now */
++ set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
++
++ if (ehci->shutdown)
++ return 0; /* Controller is dead */
++
++ /*
++ * If CF is still set and we aren't resuming from hibernation
++ * then we maintained suspend power.
++ * Just undo the effect of ehci_suspend().
++ */
++ if (ehci_readl(ehci, &ehci->regs->configured_flag) == FLAG_CF &&
++ !hibernated) {
++ int mask = INTR_MASK;
++
++ ehci_prepare_ports_for_controller_resume(ehci);
++
++ spin_lock_irq(&ehci->lock);
++ if (ehci->shutdown)
++ goto skip;
++
++ if (!hcd->self.root_hub->do_remote_wakeup)
++ mask &= ~STS_PCD;
++ ehci_writel(ehci, mask, &ehci->regs->intr_enable);
++ ehci_readl(ehci, &ehci->regs->intr_enable);
++ skip:
++ spin_unlock_irq(&ehci->lock);
++ return 0;
++ }
++
++ /*
++ * Else reset, to cope with power loss or resume from hibernation
++ * having let the firmware kick in during reboot.
++ */
++ usb_root_hub_lost_power(hcd->self.root_hub);
++ (void) ehci_halt(ehci);
++ (void) ehci_reset(ehci);
++
++ spin_lock_irq(&ehci->lock);
++ if (ehci->shutdown)
++ goto skip;
++
++ ehci_writel(ehci, ehci->command, &ehci->regs->command);
++ ehci_writel(ehci, FLAG_CF, &ehci->regs->configured_flag);
++ ehci_readl(ehci, &ehci->regs->command); /* unblock posted writes */
++
++ ehci->rh_state = EHCI_RH_SUSPENDED;
++ spin_unlock_irq(&ehci->lock);
++
++ return 1;
++}
++EXPORT_SYMBOL_GPL(ehci_resume);
++
++#endif
++
++/*-------------------------------------------------------------------------*/
++
++/*
++ * Generic structure: This gets copied for platform drivers so that
++ * individual entries can be overridden as needed.
++ */
++
++static const struct hc_driver ehci_hc_driver = {
++ .description = hcd_name,
++ .product_desc = "EHCI Host Controller",
++ .hcd_priv_size = sizeof(struct ehci_hcd),
++
++ /*
++ * generic hardware linkage
++ */
++ .irq = ehci_irq,
++ .flags = HCD_MEMORY | HCD_USB2 | HCD_BH,
++
++ /*
++ * basic lifecycle operations
++ */
++ .reset = ehci_setup,
++ .start = ehci_run,
++ .stop = ehci_stop,
++ .shutdown = ehci_shutdown,
++
++ /*
++ * managing i/o requests and associated device resources
++ */
++ .urb_enqueue = ehci_urb_enqueue,
++ .urb_dequeue = ehci_urb_dequeue,
++ .endpoint_disable = ehci_endpoint_disable,
++ .endpoint_reset = ehci_endpoint_reset,
++ .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
++
++ /*
++ * scheduling support
++ */
++ .get_frame_number = ehci_get_frame,
++
++ /*
++ * root hub support
++ */
++ .hub_status_data = ehci_hub_status_data,
++ .hub_control = ehci_hub_control,
++ .bus_suspend = ehci_bus_suspend,
++ .bus_resume = ehci_bus_resume,
++ .relinquish_port = ehci_relinquish_port,
++ .port_handed_over = ehci_port_handed_over,
++
++ /*
++ * device support
++ */
++ .free_dev = ehci_remove_device,
++};
++
++void ehci_init_driver(struct hc_driver *drv,
++ const struct ehci_driver_overrides *over)
++{
++ /* Copy the generic table to drv and then apply the overrides */
++ *drv = ehci_hc_driver;
++
++ if (over) {
++ drv->hcd_priv_size += over->extra_priv_size;
++ if (over->reset)
++ drv->reset = over->reset;
++ }
++}
++EXPORT_SYMBOL_GPL(ehci_init_driver);
++
++/*-------------------------------------------------------------------------*/
++
++MODULE_DESCRIPTION(DRIVER_DESC);
++MODULE_AUTHOR (DRIVER_AUTHOR);
++MODULE_LICENSE ("GPL");
++
++#ifdef CONFIG_USB_EHCI_FSL
++#include "ehci-fsl.c"
++#define PLATFORM_DRIVER ehci_fsl_driver
++#endif
++
++#ifdef CONFIG_USB_EHCI_SH
++#include "ehci-sh.c"
++#define PLATFORM_DRIVER ehci_hcd_sh_driver
++#endif
++
++#ifdef CONFIG_PPC_PS3
++#include "ehci-ps3.c"
++#define PS3_SYSTEM_BUS_DRIVER ps3_ehci_driver
++#endif
++
++#ifdef CONFIG_USB_EHCI_HCD_PPC_OF
++#include "ehci-ppc-of.c"
++#define OF_PLATFORM_DRIVER ehci_hcd_ppc_of_driver
++#endif
++
++#ifdef CONFIG_XPS_USB_HCD_XILINX
++#include "ehci-xilinx-of.c"
++#define XILINX_OF_PLATFORM_DRIVER ehci_hcd_xilinx_of_driver
++#endif
++
++#ifdef CONFIG_USB_OCTEON_EHCI
++#include "ehci-octeon.c"
++#define PLATFORM_DRIVER ehci_octeon_driver
++#endif
++
++#ifdef CONFIG_TILE_USB
++#include "ehci-tilegx.c"
++#define PLATFORM_DRIVER ehci_hcd_tilegx_driver
++#endif
++
++#ifdef CONFIG_USB_EHCI_HCD_PMC_MSP
++#include "ehci-pmcmsp.c"
++#define PLATFORM_DRIVER ehci_hcd_msp_driver
++#endif
++
++#ifdef CONFIG_SPARC_LEON
++#include "ehci-grlib.c"
++#define PLATFORM_DRIVER ehci_grlib_driver
++#endif
++
++#ifdef CONFIG_USB_EHCI_MV
++#include "ehci-mv.c"
++#define PLATFORM_DRIVER ehci_mv_driver
++#endif
++
++#ifdef CONFIG_MIPS_SEAD3
++#include "ehci-sead3.c"
++#define PLATFORM_DRIVER ehci_hcd_sead3_driver
++#endif
++
++static int __init ehci_hcd_init(void)
++{
++ int retval = 0;
++
++ if (usb_disabled())
++ return -ENODEV;
++
++ printk(KERN_INFO "%s: " DRIVER_DESC "\n", hcd_name);
++ set_bit(USB_EHCI_LOADED, &usb_hcds_loaded);
++ if (test_bit(USB_UHCI_LOADED, &usb_hcds_loaded) ||
++ test_bit(USB_OHCI_LOADED, &usb_hcds_loaded))
++ printk(KERN_WARNING "Warning! ehci_hcd should always be loaded"
++ " before uhci_hcd and ohci_hcd, not after\n");
++
++ pr_debug("%s: block sizes: qh %Zd qtd %Zd itd %Zd sitd %Zd\n",
++ hcd_name,
++ sizeof(struct ehci_qh), sizeof(struct ehci_qtd),
++ sizeof(struct ehci_itd), sizeof(struct ehci_sitd));
++
++#ifdef CONFIG_DYNAMIC_DEBUG
++ ehci_debug_root = debugfs_create_dir("ehci", usb_debug_root);
++ if (!ehci_debug_root) {
++ retval = -ENOENT;
++ goto err_debug;
++ }
++#endif
++
++#ifdef PLATFORM_DRIVER
++ retval = platform_driver_register(&PLATFORM_DRIVER);
++ if (retval < 0)
++ goto clean0;
++#endif
++
++#ifdef PS3_SYSTEM_BUS_DRIVER
++ retval = ps3_ehci_driver_register(&PS3_SYSTEM_BUS_DRIVER);
++ if (retval < 0)
++ goto clean2;
++#endif
++
++#ifdef OF_PLATFORM_DRIVER
++ retval = platform_driver_register(&OF_PLATFORM_DRIVER);
++ if (retval < 0)
++ goto clean3;
++#endif
++
++#ifdef XILINX_OF_PLATFORM_DRIVER
++ retval = platform_driver_register(&XILINX_OF_PLATFORM_DRIVER);
++ if (retval < 0)
++ goto clean4;
++#endif
++ return retval;
++
++#ifdef XILINX_OF_PLATFORM_DRIVER
++ /* platform_driver_unregister(&XILINX_OF_PLATFORM_DRIVER); */
++clean4:
++#endif
++#ifdef OF_PLATFORM_DRIVER
++ platform_driver_unregister(&OF_PLATFORM_DRIVER);
++clean3:
++#endif
++#ifdef PS3_SYSTEM_BUS_DRIVER
++ ps3_ehci_driver_unregister(&PS3_SYSTEM_BUS_DRIVER);
++clean2:
++#endif
++#ifdef PLATFORM_DRIVER
++ platform_driver_unregister(&PLATFORM_DRIVER);
++clean0:
++#endif
++#ifdef CONFIG_DYNAMIC_DEBUG
++ debugfs_remove(ehci_debug_root);
++ ehci_debug_root = NULL;
++err_debug:
++#endif
++ clear_bit(USB_EHCI_LOADED, &usb_hcds_loaded);
++ return retval;
++}
++module_init(ehci_hcd_init);
++
++static void __exit ehci_hcd_cleanup(void)
++{
++#ifdef XILINX_OF_PLATFORM_DRIVER
++ platform_driver_unregister(&XILINX_OF_PLATFORM_DRIVER);
++#endif
++#ifdef OF_PLATFORM_DRIVER
++ platform_driver_unregister(&OF_PLATFORM_DRIVER);
++#endif
++#ifdef PLATFORM_DRIVER
++ platform_driver_unregister(&PLATFORM_DRIVER);
++#endif
++#ifdef PS3_SYSTEM_BUS_DRIVER
++ ps3_ehci_driver_unregister(&PS3_SYSTEM_BUS_DRIVER);
++#endif
++#ifdef CONFIG_DYNAMIC_DEBUG
++ debugfs_remove(ehci_debug_root);
++#endif
++ clear_bit(USB_EHCI_LOADED, &usb_hcds_loaded);
++}
++module_exit(ehci_hcd_cleanup);
+diff -Nur linux-3.14.36/drivers/usb/host/ehci-hub.c linux-openelec/drivers/usb/host/ehci-hub.c
+--- linux-3.14.36/drivers/usb/host/ehci-hub.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/usb/host/ehci-hub.c 2015-07-24 18:03:28.344842002 -0500
+@@ -313,6 +313,15 @@
+ USB_PORT_STAT_HIGH_SPEED)
+ fs_idle_delay = true;
+ ehci_writel(ehci, t2, reg);
++ if ((t2 & PORT_WKDISC_E)
++ && (ehci_port_speed(ehci, t2) ==
++ USB_PORT_STAT_HIGH_SPEED))
++ /*
++ * If the high-speed device has not switched
++ * to full-speed idle before WKDISC_E has
++ * effected, there will be a WKDISC event.
++ */
++ mdelay(4);
+ changed = 1;
+ }
+ }
+diff -Nur linux-3.14.36/drivers/usb/host/ehci-hub.c.orig linux-openelec/drivers/usb/host/ehci-hub.c.orig
+--- linux-3.14.36/drivers/usb/host/ehci-hub.c.orig 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/usb/host/ehci-hub.c.orig 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,1316 @@
++/*
++ * Copyright (C) 2001-2004 by David Brownell
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License as published by the
++ * Free Software Foundation; either version 2 of the License, or (at your
++ * option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
++ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
++ * for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software Foundation,
++ * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ */
++
++/* this file is part of ehci-hcd.c */
++
++/*-------------------------------------------------------------------------*/
++
++/*
++ * EHCI Root Hub ... the nonsharable stuff
++ *
++ * Registers don't need cpu_to_le32, that happens transparently
++ */
++
++/*-------------------------------------------------------------------------*/
++#include <linux/usb/otg.h>
++
++#define PORT_WAKE_BITS (PORT_WKOC_E|PORT_WKDISC_E|PORT_WKCONN_E)
++
++#ifdef CONFIG_PM
++
++static int ehci_hub_control(
++ struct usb_hcd *hcd,
++ u16 typeReq,
++ u16 wValue,
++ u16 wIndex,
++ char *buf,
++ u16 wLength
++);
++
++static int persist_enabled_on_companion(struct usb_device *udev, void *unused)
++{
++ return !udev->maxchild && udev->persist_enabled &&
++ udev->bus->root_hub->speed < USB_SPEED_HIGH;
++}
++
++/* After a power loss, ports that were owned by the companion must be
++ * reset so that the companion can still own them.
++ */
++static void ehci_handover_companion_ports(struct ehci_hcd *ehci)
++{
++ u32 __iomem *reg;
++ u32 status;
++ int port;
++ __le32 buf;
++ struct usb_hcd *hcd = ehci_to_hcd(ehci);
++
++ if (!ehci->owned_ports)
++ return;
++
++ /*
++ * USB 1.1 devices are mostly HIDs, which don't need to persist across
++ * suspends. If we ensure that none of our companion's devices have
++ * persist_enabled (by looking through all USB 1.1 buses in the system),
++ * we can skip this and avoid slowing resume down. Devices without
++ * persist will just get reenumerated shortly after resume anyway.
++ */
++ if (!usb_for_each_dev(NULL, persist_enabled_on_companion))
++ return;
++
++ /* Make sure the ports are powered */
++ port = HCS_N_PORTS(ehci->hcs_params);
++ while (port--) {
++ if (test_bit(port, &ehci->owned_ports)) {
++ reg = &ehci->regs->port_status[port];
++ status = ehci_readl(ehci, reg) & ~PORT_RWC_BITS;
++ if (!(status & PORT_POWER)) {
++ status |= PORT_POWER;
++ ehci_writel(ehci, status, reg);
++ }
++ }
++ }
++
++ /* Give the connections some time to appear */
++ msleep(20);
++
++ spin_lock_irq(&ehci->lock);
++ port = HCS_N_PORTS(ehci->hcs_params);
++ while (port--) {
++ if (test_bit(port, &ehci->owned_ports)) {
++ reg = &ehci->regs->port_status[port];
++ status = ehci_readl(ehci, reg) & ~PORT_RWC_BITS;
++
++ /* Port already owned by companion? */
++ if (status & PORT_OWNER)
++ clear_bit(port, &ehci->owned_ports);
++ else if (test_bit(port, &ehci->companion_ports))
++ ehci_writel(ehci, status & ~PORT_PE, reg);
++ else {
++ spin_unlock_irq(&ehci->lock);
++ ehci_hub_control(hcd, SetPortFeature,
++ USB_PORT_FEAT_RESET, port + 1,
++ NULL, 0);
++ spin_lock_irq(&ehci->lock);
++ }
++ }
++ }
++ spin_unlock_irq(&ehci->lock);
++
++ if (!ehci->owned_ports)
++ return;
++ msleep(90); /* Wait for resets to complete */
++
++ spin_lock_irq(&ehci->lock);
++ port = HCS_N_PORTS(ehci->hcs_params);
++ while (port--) {
++ if (test_bit(port, &ehci->owned_ports)) {
++ spin_unlock_irq(&ehci->lock);
++ ehci_hub_control(hcd, GetPortStatus,
++ 0, port + 1,
++ (char *) &buf, sizeof(buf));
++ spin_lock_irq(&ehci->lock);
++
++ /* The companion should now own the port,
++ * but if something went wrong the port must not
++ * remain enabled.
++ */
++ reg = &ehci->regs->port_status[port];
++ status = ehci_readl(ehci, reg) & ~PORT_RWC_BITS;
++ if (status & PORT_OWNER)
++ ehci_writel(ehci, status | PORT_CSC, reg);
++ else {
++ ehci_dbg(ehci, "failed handover port %d: %x\n",
++ port + 1, status);
++ ehci_writel(ehci, status & ~PORT_PE, reg);
++ }
++ }
++ }
++
++ ehci->owned_ports = 0;
++ spin_unlock_irq(&ehci->lock);
++}
++
++static int ehci_port_change(struct ehci_hcd *ehci)
++{
++ int i = HCS_N_PORTS(ehci->hcs_params);
++
++ /* First check if the controller indicates a change event */
++
++ if (ehci_readl(ehci, &ehci->regs->status) & STS_PCD)
++ return 1;
++
++ /*
++ * Not all controllers appear to update this while going from D3 to D0,
++ * so check the individual port status registers as well
++ */
++
++ while (i--)
++ if (ehci_readl(ehci, &ehci->regs->port_status[i]) & PORT_CSC)
++ return 1;
++
++ return 0;
++}
++
++static void ehci_adjust_port_wakeup_flags(struct ehci_hcd *ehci,
++ bool suspending, bool do_wakeup)
++{
++ int port;
++ u32 temp;
++
++ /* If remote wakeup is enabled for the root hub but disabled
++ * for the controller, we must adjust all the port wakeup flags
++ * when the controller is suspended or resumed. In all other
++ * cases they don't need to be changed.
++ */
++ if (!ehci_to_hcd(ehci)->self.root_hub->do_remote_wakeup || do_wakeup)
++ return;
++
++ spin_lock_irq(&ehci->lock);
++
++ /* clear phy low-power mode before changing wakeup flags */
++ if (ehci->has_tdi_phy_lpm) {
++ port = HCS_N_PORTS(ehci->hcs_params);
++ while (port--) {
++ u32 __iomem *hostpc_reg = &ehci->regs->hostpc[port];
++
++ temp = ehci_readl(ehci, hostpc_reg);
++ ehci_writel(ehci, temp & ~HOSTPC_PHCD, hostpc_reg);
++ }
++ spin_unlock_irq(&ehci->lock);
++ msleep(5);
++ spin_lock_irq(&ehci->lock);
++ }
++
++ port = HCS_N_PORTS(ehci->hcs_params);
++ while (port--) {
++ u32 __iomem *reg = &ehci->regs->port_status[port];
++ u32 t1 = ehci_readl(ehci, reg) & ~PORT_RWC_BITS;
++ u32 t2 = t1 & ~PORT_WAKE_BITS;
++
++ /* If we are suspending the controller, clear the flags.
++ * If we are resuming the controller, set the wakeup flags.
++ */
++ if (!suspending) {
++ if (t1 & PORT_CONNECT)
++ t2 |= PORT_WKOC_E | PORT_WKDISC_E;
++ else
++ t2 |= PORT_WKOC_E | PORT_WKCONN_E;
++ }
++ ehci_writel(ehci, t2, reg);
++ }
++
++ /* enter phy low-power mode again */
++ if (ehci->has_tdi_phy_lpm) {
++ port = HCS_N_PORTS(ehci->hcs_params);
++ while (port--) {
++ u32 __iomem *hostpc_reg = &ehci->regs->hostpc[port];
++
++ temp = ehci_readl(ehci, hostpc_reg);
++ ehci_writel(ehci, temp | HOSTPC_PHCD, hostpc_reg);
++ }
++ }
++
++ /* Does the root hub have a port wakeup pending? */
++ if (!suspending && ehci_port_change(ehci))
++ usb_hcd_resume_root_hub(ehci_to_hcd(ehci));
++
++ spin_unlock_irq(&ehci->lock);
++}
++
++static int ehci_bus_suspend (struct usb_hcd *hcd)
++{
++ struct ehci_hcd *ehci = hcd_to_ehci (hcd);
++ int port;
++ int mask;
++ int changed;
++ bool fs_idle_delay;
++
++ ehci_dbg(ehci, "suspend root hub\n");
++
++ if (time_before (jiffies, ehci->next_statechange))
++ msleep(5);
++
++ /* stop the schedules */
++ ehci_quiesce(ehci);
++
++ spin_lock_irq (&ehci->lock);
++ if (ehci->rh_state < EHCI_RH_RUNNING)
++ goto done;
++
++ /* Once the controller is stopped, port resumes that are already
++ * in progress won't complete. Hence if remote wakeup is enabled
++ * for the root hub and any ports are in the middle of a resume or
++ * remote wakeup, we must fail the suspend.
++ */
++ if (hcd->self.root_hub->do_remote_wakeup) {
++ if (ehci->resuming_ports) {
++ spin_unlock_irq(&ehci->lock);
++ ehci_dbg(ehci, "suspend failed because a port is resuming\n");
++ return -EBUSY;
++ }
++ }
++
++ /* Unlike other USB host controller types, EHCI doesn't have
++ * any notion of "global" or bus-wide suspend. The driver has
++ * to manually suspend all the active unsuspended ports, and
++ * then manually resume them in the bus_resume() routine.
++ */
++ ehci->bus_suspended = 0;
++ ehci->owned_ports = 0;
++ changed = 0;
++ fs_idle_delay = false;
++ port = HCS_N_PORTS(ehci->hcs_params);
++ while (port--) {
++ u32 __iomem *reg = &ehci->regs->port_status [port];
++ u32 t1 = ehci_readl(ehci, reg) & ~PORT_RWC_BITS;
++ u32 t2 = t1 & ~PORT_WAKE_BITS;
++
++ /* keep track of which ports we suspend */
++ if (t1 & PORT_OWNER)
++ set_bit(port, &ehci->owned_ports);
++ else if ((t1 & PORT_PE) && !(t1 & PORT_SUSPEND)) {
++ t2 |= PORT_SUSPEND;
++ set_bit(port, &ehci->bus_suspended);
++ }
++
++ /* enable remote wakeup on all ports, if told to do so */
++ if (hcd->self.root_hub->do_remote_wakeup) {
++ /* only enable appropriate wake bits, otherwise the
++ * hardware can not go phy low power mode. If a race
++ * condition happens here(connection change during bits
++ * set), the port change detection will finally fix it.
++ */
++ if (t1 & PORT_CONNECT)
++ t2 |= PORT_WKOC_E | PORT_WKDISC_E;
++ else
++ t2 |= PORT_WKOC_E | PORT_WKCONN_E;
++ }
++
++ if (t1 != t2) {
++ /*
++ * On some controllers, Wake-On-Disconnect will
++ * generate false wakeup signals until the bus
++ * switches over to full-speed idle. For their
++ * sake, add a delay if we need one.
++ */
++ if ((t2 & PORT_WKDISC_E) &&
++ ehci_port_speed(ehci, t2) ==
++ USB_PORT_STAT_HIGH_SPEED)
++ fs_idle_delay = true;
++ ehci_writel(ehci, t2, reg);
++ if ((t2 & PORT_WKDISC_E)
++ && (ehci_port_speed(ehci, t2) ==
++ USB_PORT_STAT_HIGH_SPEED))
++ /*
++ * If the high-speed device has not switched
++ * to full-speed idle before WKDISC_E has
++ * effected, there will be a WKDISC event.
++ */
++ mdelay(4);
++ changed = 1;
++ }
++ }
++ spin_unlock_irq(&ehci->lock);
++
++ if ((changed && ehci->has_tdi_phy_lpm) || fs_idle_delay) {
++ /*
++ * Wait for HCD to enter low-power mode or for the bus
++ * to switch to full-speed idle.
++ */
++ usleep_range(5000, 5500);
++ }
++
++ if (changed && ehci->has_tdi_phy_lpm) {
++ spin_lock_irq(&ehci->lock);
++ port = HCS_N_PORTS(ehci->hcs_params);
++ while (port--) {
++ u32 __iomem *hostpc_reg = &ehci->regs->hostpc[port];
++ u32 t3;
++
++ t3 = ehci_readl(ehci, hostpc_reg);
++ ehci_writel(ehci, t3 | HOSTPC_PHCD, hostpc_reg);
++ t3 = ehci_readl(ehci, hostpc_reg);
++ ehci_dbg(ehci, "Port %d phy low-power mode %s\n",
++ port, (t3 & HOSTPC_PHCD) ?
++ "succeeded" : "failed");
++ }
++ spin_unlock_irq(&ehci->lock);
++ }
++
++ /* Apparently some devices need a >= 1-uframe delay here */
++ if (ehci->bus_suspended)
++ udelay(150);
++
++ /* turn off now-idle HC */
++ ehci_halt (ehci);
++
++ spin_lock_irq(&ehci->lock);
++ if (ehci->enabled_hrtimer_events & BIT(EHCI_HRTIMER_POLL_DEAD))
++ ehci_handle_controller_death(ehci);
++ if (ehci->rh_state != EHCI_RH_RUNNING)
++ goto done;
++ ehci->rh_state = EHCI_RH_SUSPENDED;
++
++ end_unlink_async(ehci);
++ unlink_empty_async_suspended(ehci);
++ ehci_handle_start_intr_unlinks(ehci);
++ ehci_handle_intr_unlinks(ehci);
++ end_free_itds(ehci);
++
++ /* allow remote wakeup */
++ mask = INTR_MASK;
++ if (!hcd->self.root_hub->do_remote_wakeup)
++ mask &= ~STS_PCD;
++ ehci_writel(ehci, mask, &ehci->regs->intr_enable);
++ ehci_readl(ehci, &ehci->regs->intr_enable);
++
++ done:
++ ehci->next_statechange = jiffies + msecs_to_jiffies(10);
++ ehci->enabled_hrtimer_events = 0;
++ ehci->next_hrtimer_event = EHCI_HRTIMER_NO_EVENT;
++ spin_unlock_irq (&ehci->lock);
++
++ hrtimer_cancel(&ehci->hrtimer);
++ return 0;
++}
++
++
++/* caller has locked the root hub, and should reset/reinit on error */
++static int ehci_bus_resume (struct usb_hcd *hcd)
++{
++ struct ehci_hcd *ehci = hcd_to_ehci (hcd);
++ u32 temp;
++ u32 power_okay;
++ int i;
++ unsigned long resume_needed = 0;
++
++ if (time_before (jiffies, ehci->next_statechange))
++ msleep(5);
++ spin_lock_irq (&ehci->lock);
++ if (!HCD_HW_ACCESSIBLE(hcd) || ehci->shutdown)
++ goto shutdown;
++
++ if (unlikely(ehci->debug)) {
++ if (!dbgp_reset_prep(hcd))
++ ehci->debug = NULL;
++ else
++ dbgp_external_startup(hcd);
++ }
++
++ /* Ideally and we've got a real resume here, and no port's power
++ * was lost. (For PCI, that means Vaux was maintained.) But we
++ * could instead be restoring a swsusp snapshot -- so that BIOS was
++ * the last user of the controller, not reset/pm hardware keeping
++ * state we gave to it.
++ */
++ power_okay = ehci_readl(ehci, &ehci->regs->intr_enable);
++ ehci_dbg(ehci, "resume root hub%s\n",
++ power_okay ? "" : " after power loss");
++
++ /* at least some APM implementations will try to deliver
++ * IRQs right away, so delay them until we're ready.
++ */
++ ehci_writel(ehci, 0, &ehci->regs->intr_enable);
++
++ /* re-init operational registers */
++ ehci_writel(ehci, 0, &ehci->regs->segment);
++ ehci_writel(ehci, ehci->periodic_dma, &ehci->regs->frame_list);
++ ehci_writel(ehci, (u32) ehci->async->qh_dma, &ehci->regs->async_next);
++
++ /* restore CMD_RUN, framelist size, and irq threshold */
++ ehci->command |= CMD_RUN;
++ ehci_writel(ehci, ehci->command, &ehci->regs->command);
++ ehci->rh_state = EHCI_RH_RUNNING;
++
++ /*
++ * According to Bugzilla #8190, the port status for some controllers
++ * will be wrong without a delay. At their wrong status, the port
++ * is enabled, but not suspended neither resumed.
++ */
++ i = HCS_N_PORTS(ehci->hcs_params);
++ while (i--) {
++ temp = ehci_readl(ehci, &ehci->regs->port_status[i]);
++ if ((temp & PORT_PE) &&
++ !(temp & (PORT_SUSPEND | PORT_RESUME))) {
++ ehci_dbg(ehci, "Port status(0x%x) is wrong\n", temp);
++ spin_unlock_irq(&ehci->lock);
++ msleep(8);
++ spin_lock_irq(&ehci->lock);
++ break;
++ }
++ }
++
++ if (ehci->shutdown)
++ goto shutdown;
++
++ /* clear phy low-power mode before resume */
++ if (ehci->bus_suspended && ehci->has_tdi_phy_lpm) {
++ i = HCS_N_PORTS(ehci->hcs_params);
++ while (i--) {
++ if (test_bit(i, &ehci->bus_suspended)) {
++ u32 __iomem *hostpc_reg =
++ &ehci->regs->hostpc[i];
++
++ temp = ehci_readl(ehci, hostpc_reg);
++ ehci_writel(ehci, temp & ~HOSTPC_PHCD,
++ hostpc_reg);
++ }
++ }
++ spin_unlock_irq(&ehci->lock);
++ msleep(5);
++ spin_lock_irq(&ehci->lock);
++ if (ehci->shutdown)
++ goto shutdown;
++ }
++
++ /* manually resume the ports we suspended during bus_suspend() */
++ i = HCS_N_PORTS (ehci->hcs_params);
++ while (i--) {
++ temp = ehci_readl(ehci, &ehci->regs->port_status [i]);
++ temp &= ~(PORT_RWC_BITS | PORT_WAKE_BITS);
++ if (test_bit(i, &ehci->bus_suspended) &&
++ (temp & PORT_SUSPEND)) {
++ temp |= PORT_RESUME;
++ set_bit(i, &resume_needed);
++ }
++ ehci_writel(ehci, temp, &ehci->regs->port_status [i]);
++ }
++
++ /* msleep for 20ms only if code is trying to resume port */
++ if (resume_needed) {
++ spin_unlock_irq(&ehci->lock);
++ msleep(20);
++ spin_lock_irq(&ehci->lock);
++ if (ehci->shutdown)
++ goto shutdown;
++ }
++
++ i = HCS_N_PORTS (ehci->hcs_params);
++ while (i--) {
++ temp = ehci_readl(ehci, &ehci->regs->port_status [i]);
++ if (test_bit(i, &resume_needed)) {
++ temp &= ~(PORT_RWC_BITS | PORT_SUSPEND | PORT_RESUME);
++ ehci_writel(ehci, temp, &ehci->regs->port_status [i]);
++ }
++ }
++
++ ehci->next_statechange = jiffies + msecs_to_jiffies(5);
++ spin_unlock_irq(&ehci->lock);
++
++ ehci_handover_companion_ports(ehci);
++
++ /* Now we can safely re-enable irqs */
++ spin_lock_irq(&ehci->lock);
++ if (ehci->shutdown)
++ goto shutdown;
++ ehci_writel(ehci, INTR_MASK, &ehci->regs->intr_enable);
++ (void) ehci_readl(ehci, &ehci->regs->intr_enable);
++ spin_unlock_irq(&ehci->lock);
++
++ return 0;
++
++ shutdown:
++ spin_unlock_irq(&ehci->lock);
++ return -ESHUTDOWN;
++}
++
++#else
++
++#define ehci_bus_suspend NULL
++#define ehci_bus_resume NULL
++
++#endif /* CONFIG_PM */
++
++/*-------------------------------------------------------------------------*/
++
++/*
++ * Sets the owner of a port
++ */
++static void set_owner(struct ehci_hcd *ehci, int portnum, int new_owner)
++{
++ u32 __iomem *status_reg;
++ u32 port_status;
++ int try;
++
++ status_reg = &ehci->regs->port_status[portnum];
++
++ /*
++ * The controller won't set the OWNER bit if the port is
++ * enabled, so this loop will sometimes require at least two
++ * iterations: one to disable the port and one to set OWNER.
++ */
++ for (try = 4; try > 0; --try) {
++ spin_lock_irq(&ehci->lock);
++ port_status = ehci_readl(ehci, status_reg);
++ if ((port_status & PORT_OWNER) == new_owner
++ || (port_status & (PORT_OWNER | PORT_CONNECT))
++ == 0)
++ try = 0;
++ else {
++ port_status ^= PORT_OWNER;
++ port_status &= ~(PORT_PE | PORT_RWC_BITS);
++ ehci_writel(ehci, port_status, status_reg);
++ }
++ spin_unlock_irq(&ehci->lock);
++ if (try > 1)
++ msleep(5);
++ }
++}
++
++/*-------------------------------------------------------------------------*/
++
++static int check_reset_complete (
++ struct ehci_hcd *ehci,
++ int index,
++ u32 __iomem *status_reg,
++ int port_status
++) {
++ if (!(port_status & PORT_CONNECT))
++ return port_status;
++
++ /* if reset finished and it's still not enabled -- handoff */
++ if (!(port_status & PORT_PE)) {
++
++ /* with integrated TT, there's nobody to hand it to! */
++ if (ehci_is_TDI(ehci)) {
++ ehci_dbg (ehci,
++ "Failed to enable port %d on root hub TT\n",
++ index+1);
++ return port_status;
++ }
++
++ ehci_dbg (ehci, "port %d full speed --> companion\n",
++ index + 1);
++
++ // what happens if HCS_N_CC(params) == 0 ?
++ port_status |= PORT_OWNER;
++ port_status &= ~PORT_RWC_BITS;
++ ehci_writel(ehci, port_status, status_reg);
++
++ /* ensure 440EPX ohci controller state is operational */
++ if (ehci->has_amcc_usb23)
++ set_ohci_hcfs(ehci, 1);
++ } else {
++ ehci_dbg(ehci, "port %d reset complete, port enabled\n",
++ index + 1);
++ /* ensure 440EPx ohci controller state is suspended */
++ if (ehci->has_amcc_usb23)
++ set_ohci_hcfs(ehci, 0);
++ }
++
++ return port_status;
++}
++
++/*-------------------------------------------------------------------------*/
++
++
++/* build "status change" packet (one or two bytes) from HC registers */
++
++static int
++ehci_hub_status_data (struct usb_hcd *hcd, char *buf)
++{
++ struct ehci_hcd *ehci = hcd_to_ehci (hcd);
++ u32 temp, status;
++ u32 mask;
++ int ports, i, retval = 1;
++ unsigned long flags;
++ u32 ppcd = ~0;
++
++ /* init status to no-changes */
++ buf [0] = 0;
++ ports = HCS_N_PORTS (ehci->hcs_params);
++ if (ports > 7) {
++ buf [1] = 0;
++ retval++;
++ }
++
++ /* Inform the core about resumes-in-progress by returning
++ * a non-zero value even if there are no status changes.
++ */
++ status = ehci->resuming_ports;
++
++ /* Some boards (mostly VIA?) report bogus overcurrent indications,
++ * causing massive log spam unless we completely ignore them. It
++ * may be relevant that VIA VT8235 controllers, where PORT_POWER is
++ * always set, seem to clear PORT_OCC and PORT_CSC when writing to
++ * PORT_POWER; that's surprising, but maybe within-spec.
++ */
++ if (!ignore_oc)
++ mask = PORT_CSC | PORT_PEC | PORT_OCC;
++ else
++ mask = PORT_CSC | PORT_PEC;
++ // PORT_RESUME from hardware ~= PORT_STAT_C_SUSPEND
++
++ /* no hub change reports (bit 0) for now (power, ...) */
++
++ /* port N changes (bit N)? */
++ spin_lock_irqsave (&ehci->lock, flags);
++
++ /* get per-port change detect bits */
++ if (ehci->has_ppcd)
++ ppcd = ehci_readl(ehci, &ehci->regs->status) >> 16;
++
++ for (i = 0; i < ports; i++) {
++ /* leverage per-port change bits feature */
++ if (ppcd & (1 << i))
++ temp = ehci_readl(ehci, &ehci->regs->port_status[i]);
++ else
++ temp = 0;
++
++ /*
++ * Return status information even for ports with OWNER set.
++ * Otherwise khubd wouldn't see the disconnect event when a
++ * high-speed device is switched over to the companion
++ * controller by the user.
++ */
++
++ if ((temp & mask) != 0 || test_bit(i, &ehci->port_c_suspend)
++ || (ehci->reset_done[i] && time_after_eq(
++ jiffies, ehci->reset_done[i]))) {
++ if (i < 7)
++ buf [0] |= 1 << (i + 1);
++ else
++ buf [1] |= 1 << (i - 7);
++ status = STS_PCD;
++ }
++ }
++
++ /* If a resume is in progress, make sure it can finish */
++ if (ehci->resuming_ports)
++ mod_timer(&hcd->rh_timer, jiffies + msecs_to_jiffies(25));
++
++ spin_unlock_irqrestore (&ehci->lock, flags);
++ return status ? retval : 0;
++}
++
++/*-------------------------------------------------------------------------*/
++
++static void
++ehci_hub_descriptor (
++ struct ehci_hcd *ehci,
++ struct usb_hub_descriptor *desc
++) {
++ int ports = HCS_N_PORTS (ehci->hcs_params);
++ u16 temp;
++
++ desc->bDescriptorType = 0x29;
++ desc->bPwrOn2PwrGood = 10; /* ehci 1.0, 2.3.9 says 20ms max */
++ desc->bHubContrCurrent = 0;
++
++ desc->bNbrPorts = ports;
++ temp = 1 + (ports / 8);
++ desc->bDescLength = 7 + 2 * temp;
++
++ /* two bitmaps: ports removable, and usb 1.0 legacy PortPwrCtrlMask */
++ memset(&desc->u.hs.DeviceRemovable[0], 0, temp);
++ memset(&desc->u.hs.DeviceRemovable[temp], 0xff, temp);
++
++ temp = 0x0008; /* per-port overcurrent reporting */
++ if (HCS_PPC (ehci->hcs_params))
++ temp |= 0x0001; /* per-port power control */
++ else
++ temp |= 0x0002; /* no power switching */
++#if 0
++// re-enable when we support USB_PORT_FEAT_INDICATOR below.
++ if (HCS_INDICATOR (ehci->hcs_params))
++ temp |= 0x0080; /* per-port indicators (LEDs) */
++#endif
++ desc->wHubCharacteristics = cpu_to_le16(temp);
++}
++
++/*-------------------------------------------------------------------------*/
++#ifdef CONFIG_USB_HCD_TEST_MODE
++
++#define EHSET_TEST_SINGLE_STEP_SET_FEATURE 0x06
++
++static void usb_ehset_completion(struct urb *urb)
++{
++ struct completion *done = urb->context;
++
++ complete(done);
++}
++static int submit_single_step_set_feature(
++ struct usb_hcd *hcd,
++ struct urb *urb,
++ int is_setup
++);
++
++/*
++ * Allocate and initialize a control URB. This request will be used by the
++ * EHSET SINGLE_STEP_SET_FEATURE test in which the DATA and STATUS stages
++ * of the GetDescriptor request are sent 15 seconds after the SETUP stage.
++ * Return NULL if failed.
++ */
++static struct urb *request_single_step_set_feature_urb(
++ struct usb_device *udev,
++ void *dr,
++ void *buf,
++ struct completion *done
++) {
++ struct urb *urb;
++ struct usb_hcd *hcd = bus_to_hcd(udev->bus);
++ struct usb_host_endpoint *ep;
++
++ urb = usb_alloc_urb(0, GFP_KERNEL);
++ if (!urb)
++ return NULL;
++
++ urb->pipe = usb_rcvctrlpipe(udev, 0);
++ ep = (usb_pipein(urb->pipe) ? udev->ep_in : udev->ep_out)
++ [usb_pipeendpoint(urb->pipe)];
++ if (!ep) {
++ usb_free_urb(urb);
++ return NULL;
++ }
++
++ urb->ep = ep;
++ urb->dev = udev;
++ urb->setup_packet = (void *)dr;
++ urb->transfer_buffer = buf;
++ urb->transfer_buffer_length = USB_DT_DEVICE_SIZE;
++ urb->complete = usb_ehset_completion;
++ urb->status = -EINPROGRESS;
++ urb->actual_length = 0;
++ urb->transfer_flags = URB_DIR_IN;
++ usb_get_urb(urb);
++ atomic_inc(&urb->use_count);
++ atomic_inc(&urb->dev->urbnum);
++ urb->setup_dma = dma_map_single(
++ hcd->self.controller,
++ urb->setup_packet,
++ sizeof(struct usb_ctrlrequest),
++ DMA_TO_DEVICE);
++ urb->transfer_dma = dma_map_single(
++ hcd->self.controller,
++ urb->transfer_buffer,
++ urb->transfer_buffer_length,
++ DMA_FROM_DEVICE);
++ urb->context = done;
++ return urb;
++}
++
++static int ehset_single_step_set_feature(struct usb_hcd *hcd, int port)
++{
++ int retval = -ENOMEM;
++ struct usb_ctrlrequest *dr;
++ struct urb *urb;
++ struct usb_device *udev;
++ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
++ struct usb_device_descriptor *buf;
++ DECLARE_COMPLETION_ONSTACK(done);
++
++ /* Obtain udev of the rhub's child port */
++ udev = usb_hub_find_child(hcd->self.root_hub, port);
++ if (!udev) {
++ ehci_err(ehci, "No device attached to the RootHub\n");
++ return -ENODEV;
++ }
++ buf = kmalloc(USB_DT_DEVICE_SIZE, GFP_KERNEL);
++ if (!buf)
++ return -ENOMEM;
++
++ dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_KERNEL);
++ if (!dr) {
++ kfree(buf);
++ return -ENOMEM;
++ }
++
++ /* Fill Setup packet for GetDescriptor */
++ dr->bRequestType = USB_DIR_IN;
++ dr->bRequest = USB_REQ_GET_DESCRIPTOR;
++ dr->wValue = cpu_to_le16(USB_DT_DEVICE << 8);
++ dr->wIndex = 0;
++ dr->wLength = cpu_to_le16(USB_DT_DEVICE_SIZE);
++ urb = request_single_step_set_feature_urb(udev, dr, buf, &done);
++ if (!urb)
++ goto cleanup;
++
++ /* Submit just the SETUP stage */
++ retval = submit_single_step_set_feature(hcd, urb, 1);
++ if (retval)
++ goto out1;
++ if (!wait_for_completion_timeout(&done, msecs_to_jiffies(2000))) {
++ usb_kill_urb(urb);
++ retval = -ETIMEDOUT;
++ ehci_err(ehci, "%s SETUP stage timed out on ep0\n", __func__);
++ goto out1;
++ }
++ msleep(15 * 1000);
++
++ /* Complete remaining DATA and STATUS stages using the same URB */
++ urb->status = -EINPROGRESS;
++ usb_get_urb(urb);
++ atomic_inc(&urb->use_count);
++ atomic_inc(&urb->dev->urbnum);
++ retval = submit_single_step_set_feature(hcd, urb, 0);
++ if (!retval && !wait_for_completion_timeout(&done,
++ msecs_to_jiffies(2000))) {
++ usb_kill_urb(urb);
++ retval = -ETIMEDOUT;
++ ehci_err(ehci, "%s IN stage timed out on ep0\n", __func__);
++ }
++out1:
++ usb_free_urb(urb);
++cleanup:
++ kfree(dr);
++ kfree(buf);
++ return retval;
++}
++#endif /* CONFIG_USB_HCD_TEST_MODE */
++/*-------------------------------------------------------------------------*/
++
++static int ehci_hub_control (
++ struct usb_hcd *hcd,
++ u16 typeReq,
++ u16 wValue,
++ u16 wIndex,
++ char *buf,
++ u16 wLength
++) {
++ struct ehci_hcd *ehci = hcd_to_ehci (hcd);
++ int ports = HCS_N_PORTS (ehci->hcs_params);
++ u32 __iomem *status_reg = &ehci->regs->port_status[
++ (wIndex & 0xff) - 1];
++ u32 __iomem *hostpc_reg = &ehci->regs->hostpc[(wIndex & 0xff) - 1];
++ u32 temp, temp1, status;
++ unsigned long flags;
++ int retval = 0;
++ unsigned selector;
++
++ /*
++ * FIXME: support SetPortFeatures USB_PORT_FEAT_INDICATOR.
++ * HCS_INDICATOR may say we can change LEDs to off/amber/green.
++ * (track current state ourselves) ... blink for diagnostics,
++ * power, "this is the one", etc. EHCI spec supports this.
++ */
++
++ spin_lock_irqsave (&ehci->lock, flags);
++ switch (typeReq) {
++ case ClearHubFeature:
++ switch (wValue) {
++ case C_HUB_LOCAL_POWER:
++ case C_HUB_OVER_CURRENT:
++ /* no hub-wide feature/status flags */
++ break;
++ default:
++ goto error;
++ }
++ break;
++ case ClearPortFeature:
++ if (!wIndex || wIndex > ports)
++ goto error;
++ wIndex--;
++ temp = ehci_readl(ehci, status_reg);
++ temp &= ~PORT_RWC_BITS;
++
++ /*
++ * Even if OWNER is set, so the port is owned by the
++ * companion controller, khubd needs to be able to clear
++ * the port-change status bits (especially
++ * USB_PORT_STAT_C_CONNECTION).
++ */
++
++ switch (wValue) {
++ case USB_PORT_FEAT_ENABLE:
++ ehci_writel(ehci, temp & ~PORT_PE, status_reg);
++ break;
++ case USB_PORT_FEAT_C_ENABLE:
++ ehci_writel(ehci, temp | PORT_PEC, status_reg);
++ break;
++ case USB_PORT_FEAT_SUSPEND:
++ if (temp & PORT_RESET)
++ goto error;
++ if (ehci->no_selective_suspend)
++ break;
++#ifdef CONFIG_USB_OTG
++ if ((hcd->self.otg_port == (wIndex + 1))
++ && hcd->self.b_hnp_enable) {
++ otg_start_hnp(hcd->phy->otg);
++ break;
++ }
++#endif
++ if (!(temp & PORT_SUSPEND))
++ break;
++ if ((temp & PORT_PE) == 0)
++ goto error;
++
++ /* clear phy low-power mode before resume */
++ if (ehci->has_tdi_phy_lpm) {
++ temp1 = ehci_readl(ehci, hostpc_reg);
++ ehci_writel(ehci, temp1 & ~HOSTPC_PHCD,
++ hostpc_reg);
++ spin_unlock_irqrestore(&ehci->lock, flags);
++ msleep(5);/* wait to leave low-power mode */
++ spin_lock_irqsave(&ehci->lock, flags);
++ }
++ /* resume signaling for 20 msec */
++ temp &= ~PORT_WAKE_BITS;
++ ehci_writel(ehci, temp | PORT_RESUME, status_reg);
++ ehci->reset_done[wIndex] = jiffies
++ + msecs_to_jiffies(20);
++ set_bit(wIndex, &ehci->resuming_ports);
++ usb_hcd_start_port_resume(&hcd->self, wIndex);
++ break;
++ case USB_PORT_FEAT_C_SUSPEND:
++ clear_bit(wIndex, &ehci->port_c_suspend);
++ break;
++ case USB_PORT_FEAT_POWER:
++ if (HCS_PPC (ehci->hcs_params))
++ ehci_writel(ehci, temp & ~PORT_POWER,
++ status_reg);
++ break;
++ case USB_PORT_FEAT_C_CONNECTION:
++ ehci_writel(ehci, temp | PORT_CSC, status_reg);
++ break;
++ case USB_PORT_FEAT_C_OVER_CURRENT:
++ ehci_writel(ehci, temp | PORT_OCC, status_reg);
++ break;
++ case USB_PORT_FEAT_C_RESET:
++ /* GetPortStatus clears reset */
++ break;
++ default:
++ goto error;
++ }
++ ehci_readl(ehci, &ehci->regs->command); /* unblock posted write */
++ break;
++ case GetHubDescriptor:
++ ehci_hub_descriptor (ehci, (struct usb_hub_descriptor *)
++ buf);
++ break;
++ case GetHubStatus:
++ /* no hub-wide feature/status flags */
++ memset (buf, 0, 4);
++ //cpu_to_le32s ((u32 *) buf);
++ break;
++ case GetPortStatus:
++ if (!wIndex || wIndex > ports)
++ goto error;
++ wIndex--;
++ status = 0;
++ temp = ehci_readl(ehci, status_reg);
++
++ // wPortChange bits
++ if (temp & PORT_CSC)
++ status |= USB_PORT_STAT_C_CONNECTION << 16;
++ if (temp & PORT_PEC)
++ status |= USB_PORT_STAT_C_ENABLE << 16;
++
++ if ((temp & PORT_OCC) && !ignore_oc){
++ status |= USB_PORT_STAT_C_OVERCURRENT << 16;
++
++ /*
++ * Hubs should disable port power on over-current.
++ * However, not all EHCI implementations do this
++ * automatically, even if they _do_ support per-port
++ * power switching; they're allowed to just limit the
++ * current. khubd will turn the power back on.
++ */
++ if (((temp & PORT_OC) || (ehci->need_oc_pp_cycle))
++ && HCS_PPC(ehci->hcs_params)) {
++ ehci_writel(ehci,
++ temp & ~(PORT_RWC_BITS | PORT_POWER),
++ status_reg);
++ temp = ehci_readl(ehci, status_reg);
++ }
++ }
++
++ /* no reset or resume pending */
++ if (!ehci->reset_done[wIndex]) {
++
++ /* Remote Wakeup received? */
++ if (temp & PORT_RESUME) {
++ /* resume signaling for 20 msec */
++ ehci->reset_done[wIndex] = jiffies
++ + msecs_to_jiffies(20);
++ usb_hcd_start_port_resume(&hcd->self, wIndex);
++ set_bit(wIndex, &ehci->resuming_ports);
++ /* check the port again */
++ mod_timer(&ehci_to_hcd(ehci)->rh_timer,
++ ehci->reset_done[wIndex]);
++ }
++
++ /* reset or resume not yet complete */
++ } else if (!time_after_eq(jiffies, ehci->reset_done[wIndex])) {
++ ; /* wait until it is complete */
++
++ /* resume completed */
++ } else if (test_bit(wIndex, &ehci->resuming_ports)) {
++ clear_bit(wIndex, &ehci->suspended_ports);
++ set_bit(wIndex, &ehci->port_c_suspend);
++ ehci->reset_done[wIndex] = 0;
++ usb_hcd_end_port_resume(&hcd->self, wIndex);
++
++ /* stop resume signaling */
++ temp &= ~(PORT_RWC_BITS | PORT_SUSPEND | PORT_RESUME);
++ ehci_writel(ehci, temp, status_reg);
++ clear_bit(wIndex, &ehci->resuming_ports);
++ retval = ehci_handshake(ehci, status_reg,
++ PORT_RESUME, 0, 2000 /* 2msec */);
++ if (retval != 0) {
++ ehci_err(ehci, "port %d resume error %d\n",
++ wIndex + 1, retval);
++ goto error;
++ }
++ temp = ehci_readl(ehci, status_reg);
++
++ /* whoever resets must GetPortStatus to complete it!! */
++ } else {
++ status |= USB_PORT_STAT_C_RESET << 16;
++ ehci->reset_done [wIndex] = 0;
++
++ /* force reset to complete */
++ ehci_writel(ehci, temp & ~(PORT_RWC_BITS | PORT_RESET),
++ status_reg);
++ /* REVISIT: some hardware needs 550+ usec to clear
++ * this bit; seems too long to spin routinely...
++ */
++ retval = ehci_handshake(ehci, status_reg,
++ PORT_RESET, 0, 1000);
++ if (retval != 0) {
++ ehci_err (ehci, "port %d reset error %d\n",
++ wIndex + 1, retval);
++ goto error;
++ }
++
++ /* see what we found out */
++ temp = check_reset_complete (ehci, wIndex, status_reg,
++ ehci_readl(ehci, status_reg));
++ }
++
++ /* transfer dedicated ports to the companion hc */
++ if ((temp & PORT_CONNECT) &&
++ test_bit(wIndex, &ehci->companion_ports)) {
++ temp &= ~PORT_RWC_BITS;
++ temp |= PORT_OWNER;
++ ehci_writel(ehci, temp, status_reg);
++ ehci_dbg(ehci, "port %d --> companion\n", wIndex + 1);
++ temp = ehci_readl(ehci, status_reg);
++ }
++
++ /*
++ * Even if OWNER is set, there's no harm letting khubd
++ * see the wPortStatus values (they should all be 0 except
++ * for PORT_POWER anyway).
++ */
++
++ if (temp & PORT_CONNECT) {
++ status |= USB_PORT_STAT_CONNECTION;
++ // status may be from integrated TT
++ if (ehci->has_hostpc) {
++ temp1 = ehci_readl(ehci, hostpc_reg);
++ status |= ehci_port_speed(ehci, temp1);
++ } else
++ status |= ehci_port_speed(ehci, temp);
++ }
++ if (temp & PORT_PE)
++ status |= USB_PORT_STAT_ENABLE;
++
++ /* maybe the port was unsuspended without our knowledge */
++ if (temp & (PORT_SUSPEND|PORT_RESUME)) {
++ status |= USB_PORT_STAT_SUSPEND;
++ } else if (test_bit(wIndex, &ehci->suspended_ports)) {
++ clear_bit(wIndex, &ehci->suspended_ports);
++ clear_bit(wIndex, &ehci->resuming_ports);
++ ehci->reset_done[wIndex] = 0;
++ if (temp & PORT_PE)
++ set_bit(wIndex, &ehci->port_c_suspend);
++ usb_hcd_end_port_resume(&hcd->self, wIndex);
++ }
++
++ if (temp & PORT_OC)
++ status |= USB_PORT_STAT_OVERCURRENT;
++ if (temp & PORT_RESET)
++ status |= USB_PORT_STAT_RESET;
++ if (temp & PORT_POWER)
++ status |= USB_PORT_STAT_POWER;
++ if (test_bit(wIndex, &ehci->port_c_suspend))
++ status |= USB_PORT_STAT_C_SUSPEND << 16;
++
++ if (status & ~0xffff) /* only if wPortChange is interesting */
++ dbg_port(ehci, "GetStatus", wIndex + 1, temp);
++ put_unaligned_le32(status, buf);
++ break;
++ case SetHubFeature:
++ switch (wValue) {
++ case C_HUB_LOCAL_POWER:
++ case C_HUB_OVER_CURRENT:
++ /* no hub-wide feature/status flags */
++ break;
++ default:
++ goto error;
++ }
++ break;
++ case SetPortFeature:
++ selector = wIndex >> 8;
++ wIndex &= 0xff;
++ if (unlikely(ehci->debug)) {
++ /* If the debug port is active any port
++ * feature requests should get denied */
++ if (wIndex == HCS_DEBUG_PORT(ehci->hcs_params) &&
++ (readl(&ehci->debug->control) & DBGP_ENABLED)) {
++ retval = -ENODEV;
++ goto error_exit;
++ }
++ }
++ if (!wIndex || wIndex > ports)
++ goto error;
++ wIndex--;
++ temp = ehci_readl(ehci, status_reg);
++ if (temp & PORT_OWNER)
++ break;
++
++ temp &= ~PORT_RWC_BITS;
++ switch (wValue) {
++ case USB_PORT_FEAT_SUSPEND:
++ if (ehci->no_selective_suspend)
++ break;
++ if ((temp & PORT_PE) == 0
++ || (temp & PORT_RESET) != 0)
++ goto error;
++
++ /* After above check the port must be connected.
++ * Set appropriate bit thus could put phy into low power
++ * mode if we have tdi_phy_lpm feature
++ */
++ temp &= ~PORT_WKCONN_E;
++ temp |= PORT_WKDISC_E | PORT_WKOC_E;
++ ehci_writel(ehci, temp | PORT_SUSPEND, status_reg);
++ if (ehci->has_tdi_phy_lpm) {
++ spin_unlock_irqrestore(&ehci->lock, flags);
++ msleep(5);/* 5ms for HCD enter low pwr mode */
++ spin_lock_irqsave(&ehci->lock, flags);
++ temp1 = ehci_readl(ehci, hostpc_reg);
++ ehci_writel(ehci, temp1 | HOSTPC_PHCD,
++ hostpc_reg);
++ temp1 = ehci_readl(ehci, hostpc_reg);
++ ehci_dbg(ehci, "Port%d phy low pwr mode %s\n",
++ wIndex, (temp1 & HOSTPC_PHCD) ?
++ "succeeded" : "failed");
++ }
++ set_bit(wIndex, &ehci->suspended_ports);
++ break;
++ case USB_PORT_FEAT_POWER:
++ if (HCS_PPC (ehci->hcs_params))
++ ehci_writel(ehci, temp | PORT_POWER,
++ status_reg);
++ break;
++ case USB_PORT_FEAT_RESET:
++ if (temp & (PORT_SUSPEND|PORT_RESUME))
++ goto error;
++ /* line status bits may report this as low speed,
++ * which can be fine if this root hub has a
++ * transaction translator built in.
++ */
++ if ((temp & (PORT_PE|PORT_CONNECT)) == PORT_CONNECT
++ && !ehci_is_TDI(ehci)
++ && PORT_USB11 (temp)) {
++ ehci_dbg (ehci,
++ "port %d low speed --> companion\n",
++ wIndex + 1);
++ temp |= PORT_OWNER;
++ } else {
++ temp |= PORT_RESET;
++ temp &= ~PORT_PE;
++
++ /*
++ * caller must wait, then call GetPortStatus
++ * usb 2.0 spec says 50 ms resets on root
++ */
++ ehci->reset_done [wIndex] = jiffies
++ + msecs_to_jiffies (50);
++ }
++ ehci_writel(ehci, temp, status_reg);
++ break;
++
++ /* For downstream facing ports (these): one hub port is put
++ * into test mode according to USB2 11.24.2.13, then the hub
++ * must be reset (which for root hub now means rmmod+modprobe,
++ * or else system reboot). See EHCI 2.3.9 and 4.14 for info
++ * about the EHCI-specific stuff.
++ */
++ case USB_PORT_FEAT_TEST:
++#ifdef CONFIG_USB_HCD_TEST_MODE
++ if (selector == EHSET_TEST_SINGLE_STEP_SET_FEATURE) {
++ spin_unlock_irqrestore(&ehci->lock, flags);
++ retval = ehset_single_step_set_feature(hcd,
++ wIndex);
++ spin_lock_irqsave(&ehci->lock, flags);
++ break;
++ }
++#endif
++ if (!selector || selector > 5)
++ goto error;
++ spin_unlock_irqrestore(&ehci->lock, flags);
++ ehci_quiesce(ehci);
++ spin_lock_irqsave(&ehci->lock, flags);
++
++ /* Put all enabled ports into suspend */
++ while (ports--) {
++ u32 __iomem *sreg =
++ &ehci->regs->port_status[ports];
++
++ temp = ehci_readl(ehci, sreg) & ~PORT_RWC_BITS;
++ if (temp & PORT_PE)
++ ehci_writel(ehci, temp | PORT_SUSPEND,
++ sreg);
++ }
++
++ spin_unlock_irqrestore(&ehci->lock, flags);
++ ehci_halt(ehci);
++ spin_lock_irqsave(&ehci->lock, flags);
++
++ temp = ehci_readl(ehci, status_reg);
++ temp |= selector << 16;
++ ehci_writel(ehci, temp, status_reg);
++ break;
++
++ default:
++ goto error;
++ }
++ ehci_readl(ehci, &ehci->regs->command); /* unblock posted writes */
++ break;
++
++ default:
++error:
++ /* "stall" on error */
++ retval = -EPIPE;
++ }
++error_exit:
++ spin_unlock_irqrestore (&ehci->lock, flags);
++ return retval;
++}
++
++static void ehci_relinquish_port(struct usb_hcd *hcd, int portnum)
++{
++ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
++
++ if (ehci_is_TDI(ehci))
++ return;
++ set_owner(ehci, --portnum, PORT_OWNER);
++}
++
++static int ehci_port_handed_over(struct usb_hcd *hcd, int portnum)
++{
++ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
++ u32 __iomem *reg;
++
++ if (ehci_is_TDI(ehci))
++ return 0;
++ reg = &ehci->regs->port_status[portnum - 1];
++ return ehci_readl(ehci, reg) & PORT_OWNER;
++}
+diff -Nur linux-3.14.36/drivers/usb/host/Kconfig linux-openelec/drivers/usb/host/Kconfig
+--- linux-3.14.36/drivers/usb/host/Kconfig 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/usb/host/Kconfig 2015-05-06 12:05:41.000000000 -0500
+@@ -158,6 +158,13 @@
+ Enables support for the on-chip EHCI controller on
+ ST SPEAr chips.
+
++config USB_EHCI_HCD_SYNOPSYS
++ tristate "Support for Synopsys Host-AHB USB 2.0 controller"
++ depends on USB_EHCI_HCD && USB_PHY
++ ---help---
++ Enable support for onchip USB controllers based on DesignWare USB 2.0
++ Host-AHB Controller IP from Synopsys.
++
+ config USB_EHCI_HCD_AT91
+ tristate "Support for Atmel on-chip EHCI USB controller"
+ depends on USB_EHCI_HCD && ARCH_AT91
+diff -Nur linux-3.14.36/drivers/usb/host/Makefile linux-openelec/drivers/usb/host/Makefile
+--- linux-3.14.36/drivers/usb/host/Makefile 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/usb/host/Makefile 2015-05-06 12:05:42.000000000 -0500
+@@ -33,6 +33,8 @@
+ obj-$(CONFIG_USB_EHCI_HCD_ORION) += ehci-orion.o
+ obj-$(CONFIG_USB_EHCI_HCD_SPEAR) += ehci-spear.o
+ obj-$(CONFIG_USB_EHCI_EXYNOS) += ehci-exynos.o
++obj-$(CONFIG_USB_EHCI_S5P) += ehci-s5p.o
++obj-$(CONFIG_USB_EHCI_HCD_SYNOPSYS) += ehci-h20ahb.o
+ obj-$(CONFIG_USB_EHCI_HCD_AT91) += ehci-atmel.o
+ obj-$(CONFIG_USB_EHCI_MSM) += ehci-msm.o
+ obj-$(CONFIG_USB_EHCI_TEGRA) += ehci-tegra.o
+diff -Nur linux-3.14.36/drivers/usb/phy/Kconfig linux-openelec/drivers/usb/phy/Kconfig
+--- linux-3.14.36/drivers/usb/phy/Kconfig 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/usb/phy/Kconfig 2015-05-06 12:05:42.000000000 -0500
+@@ -253,7 +253,7 @@
+
+ config USB_ULPI
+ bool "Generic ULPI Transceiver Driver"
+- depends on ARM
++ depends on ARM || ARM64
+ help
+ Enable this to support ULPI connected USB OTG transceivers which
+ are likely found on embedded boards.
+diff -Nur linux-3.14.36/drivers/usb/phy/phy-mxs-usb.c linux-openelec/drivers/usb/phy/phy-mxs-usb.c
+--- linux-3.14.36/drivers/usb/phy/phy-mxs-usb.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/usb/phy/phy-mxs-usb.c 2015-05-06 12:05:42.000000000 -0500
+@@ -1,5 +1,5 @@
+ /*
+- * Copyright 2012 Freescale Semiconductor, Inc.
++ * Copyright 2012-2013 Freescale Semiconductor, Inc.
+ * Copyright (C) 2012 Marek Vasut <marex@denx.de>
+ * on behalf of DENX Software Engineering GmbH
+ *
+@@ -20,6 +20,9 @@
+ #include <linux/delay.h>
+ #include <linux/err.h>
+ #include <linux/io.h>
++#include <linux/of_device.h>
++#include <linux/regmap.h>
++#include <linux/mfd/syscon.h>
+
+ #define DRIVER_NAME "mxs_phy"
+
+@@ -28,18 +31,137 @@
+ #define HW_USBPHY_CTRL_SET 0x34
+ #define HW_USBPHY_CTRL_CLR 0x38
+
++#define HW_USBPHY_DEBUG_SET 0x54
++#define HW_USBPHY_DEBUG_CLR 0x58
++
++#define HW_USBPHY_IP 0x90
++#define HW_USBPHY_IP_SET 0x94
++#define HW_USBPHY_IP_CLR 0x98
++
+ #define BM_USBPHY_CTRL_SFTRST BIT(31)
+ #define BM_USBPHY_CTRL_CLKGATE BIT(30)
++#define BM_USBPHY_CTRL_ENAUTOSET_USBCLKS BIT(26)
++#define BM_USBPHY_CTRL_ENAUTOCLR_USBCLKGATE BIT(25)
++#define BM_USBPHY_CTRL_ENVBUSCHG_WKUP BIT(23)
++#define BM_USBPHY_CTRL_ENIDCHG_WKUP BIT(22)
++#define BM_USBPHY_CTRL_ENDPDMCHG_WKUP BIT(21)
++#define BM_USBPHY_CTRL_ENAUTOCLR_PHY_PWD BIT(20)
++#define BM_USBPHY_CTRL_ENAUTOCLR_CLKGATE BIT(19)
++#define BM_USBPHY_CTRL_ENAUTO_PWRON_PLL BIT(18)
+ #define BM_USBPHY_CTRL_ENUTMILEVEL3 BIT(15)
+ #define BM_USBPHY_CTRL_ENUTMILEVEL2 BIT(14)
+ #define BM_USBPHY_CTRL_ENHOSTDISCONDETECT BIT(1)
+
++#define BM_USBPHY_IP_FIX (BIT(17) | BIT(18))
++
++#define BM_USBPHY_DEBUG_CLKGATE BIT(30)
++
++/* Anatop Registers */
++#define ANADIG_ANA_MISC0 0x150
++#define ANADIG_ANA_MISC0_SET 0x154
++#define ANADIG_ANA_MISC0_CLR 0x158
++
++#define ANADIG_USB1_VBUS_DET_STAT 0x1c0
++#define ANADIG_USB2_VBUS_DET_STAT 0x220
++
++#define ANADIG_USB1_LOOPBACK_SET 0x1e4
++#define ANADIG_USB1_LOOPBACK_CLR 0x1e8
++#define ANADIG_USB2_LOOPBACK_SET 0x244
++#define ANADIG_USB2_LOOPBACK_CLR 0x248
++
++#define ANADIG_USB1_MISC 0x1f0
++#define ANADIG_USB2_MISC 0x250
++
++#define BM_ANADIG_ANA_MISC0_STOP_MODE_CONFIG BIT(12)
++#define BM_ANADIG_ANA_MISC0_STOP_MODE_CONFIG_SL BIT(11)
++
++#define BM_ANADIG_USB1_VBUS_DET_STAT_VBUS_VALID BIT(3)
++#define BM_ANADIG_USB2_VBUS_DET_STAT_VBUS_VALID BIT(3)
++
++#define BM_ANADIG_USB1_LOOPBACK_UTMI_DIG_TST1 BIT(2)
++#define BM_ANADIG_USB1_LOOPBACK_TSTI_TX_EN BIT(5)
++#define BM_ANADIG_USB2_LOOPBACK_UTMI_DIG_TST1 BIT(2)
++#define BM_ANADIG_USB2_LOOPBACK_TSTI_TX_EN BIT(5)
++
++#define BM_ANADIG_USB1_MISC_RX_VPIN_FS BIT(29)
++#define BM_ANADIG_USB1_MISC_RX_VMIN_FS BIT(28)
++#define BM_ANADIG_USB2_MISC_RX_VPIN_FS BIT(29)
++#define BM_ANADIG_USB2_MISC_RX_VMIN_FS BIT(28)
++
++#define to_mxs_phy(p) container_of((p), struct mxs_phy, phy)
++
++/* Do disconnection between PHY and controller without vbus */
++#define MXS_PHY_DISCONNECT_LINE_WITHOUT_VBUS BIT(0)
++
++/*
++ * The PHY will be in messy if there is an wakeup after putting
++ * bus to suspend (set portsc.suspendM) but before setting PHY to low
++ * power mode (set portsc.phcd).
++ */
++#define MXS_PHY_ABNORAML_IN_SUSPEND BIT(1)
++
++/*
++ * The SOF sends too fast after resuming, it will cause disconnection
++ * between host and high speed device.
++ */
++#define MXS_PHY_SENDING_SOF_TOO_FAST BIT(2)
++
++/* The SoCs who have anatop module */
++#define MXS_PHY_HAS_ANATOP BIT(3)
++
++struct mxs_phy_data {
++ unsigned int flags;
++};
++
++static const struct mxs_phy_data imx23_phy_data = {
++ .flags = MXS_PHY_ABNORAML_IN_SUSPEND | MXS_PHY_SENDING_SOF_TOO_FAST,
++};
++
++static const struct mxs_phy_data imx6q_phy_data = {
++ .flags = MXS_PHY_SENDING_SOF_TOO_FAST |
++ MXS_PHY_DISCONNECT_LINE_WITHOUT_VBUS |
++ MXS_PHY_HAS_ANATOP,
++};
++
++static const struct mxs_phy_data imx6sl_phy_data = {
++ .flags = MXS_PHY_DISCONNECT_LINE_WITHOUT_VBUS |
++ MXS_PHY_HAS_ANATOP,
++};
++
++static const struct of_device_id mxs_phy_dt_ids[] = {
++ { .compatible = "fsl,imx6sl-usbphy", .data = &imx6sl_phy_data, },
++ { .compatible = "fsl,imx6q-usbphy", .data = &imx6q_phy_data, },
++ { .compatible = "fsl,imx23-usbphy", .data = &imx23_phy_data, },
++ { /* sentinel */ }
++};
++MODULE_DEVICE_TABLE(of, mxs_phy_dt_ids);
++
+ struct mxs_phy {
+ struct usb_phy phy;
+ struct clk *clk;
++ const struct mxs_phy_data *data;
++ struct regmap *regmap_anatop;
++ int port_id;
+ };
+
+-#define to_mxs_phy(p) container_of((p), struct mxs_phy, phy)
++static inline bool is_imx6q_phy(struct mxs_phy *mxs_phy)
++{
++ return mxs_phy->data == &imx6q_phy_data;
++}
++
++static inline bool is_imx6sl_phy(struct mxs_phy *mxs_phy)
++{
++ return mxs_phy->data == &imx6sl_phy_data;
++}
++
++/*
++ * PHY needs some 32K cycles to switch from 32K clock to
++ * bus (such as AHB/AXI, etc) clock.
++ */
++static void mxs_phy_clock_switch(void)
++{
++ usleep_range(300, 400);
++}
+
+ static int mxs_phy_hw_init(struct mxs_phy *mxs_phy)
+ {
+@@ -53,19 +175,122 @@
+ /* Power up the PHY */
+ writel(0, base + HW_USBPHY_PWD);
+
+- /* enable FS/LS device */
+- writel(BM_USBPHY_CTRL_ENUTMILEVEL2 |
+- BM_USBPHY_CTRL_ENUTMILEVEL3,
++ /*
++ * USB PHY Ctrl Setting
++ * - Auto clock/power on
++ * - Enable full/low speed support
++ */
++ writel(BM_USBPHY_CTRL_ENAUTOSET_USBCLKS |
++ BM_USBPHY_CTRL_ENAUTOCLR_USBCLKGATE |
++ BM_USBPHY_CTRL_ENAUTOCLR_PHY_PWD |
++ BM_USBPHY_CTRL_ENAUTOCLR_CLKGATE |
++ BM_USBPHY_CTRL_ENAUTO_PWRON_PLL |
++ BM_USBPHY_CTRL_ENUTMILEVEL2 |
++ BM_USBPHY_CTRL_ENUTMILEVEL3,
+ base + HW_USBPHY_CTRL_SET);
+
++ /* Enable IC solution */
++ if (is_imx6q_phy(mxs_phy) || is_imx6sl_phy(mxs_phy))
++ writel(BM_USBPHY_IP_FIX, base + HW_USBPHY_IP_SET);
++
+ return 0;
+ }
+
++/* Return true if the vbus is there */
++static bool mxs_phy_get_vbus_status(struct mxs_phy *mxs_phy)
++{
++ unsigned int vbus_value;
++
++ if (mxs_phy->port_id == 0)
++ regmap_read(mxs_phy->regmap_anatop,
++ ANADIG_USB1_VBUS_DET_STAT,
++ &vbus_value);
++ else if (mxs_phy->port_id == 1)
++ regmap_read(mxs_phy->regmap_anatop,
++ ANADIG_USB2_VBUS_DET_STAT,
++ &vbus_value);
++
++ if (vbus_value & BM_ANADIG_USB1_VBUS_DET_STAT_VBUS_VALID)
++ return true;
++ else
++ return false;
++}
++
++static void __mxs_phy_disconnect_line(struct mxs_phy *mxs_phy, bool disconnect)
++{
++ void __iomem *base = mxs_phy->phy.io_priv;
++ u32 reg;
++
++ if (disconnect)
++ writel_relaxed(BM_USBPHY_DEBUG_CLKGATE,
++ base + HW_USBPHY_DEBUG_CLR);
++
++ if (mxs_phy->port_id == 0) {
++ reg = disconnect ? ANADIG_USB1_LOOPBACK_SET
++ : ANADIG_USB1_LOOPBACK_CLR;
++ regmap_write(mxs_phy->regmap_anatop, reg,
++ BM_ANADIG_USB1_LOOPBACK_UTMI_DIG_TST1 |
++ BM_ANADIG_USB1_LOOPBACK_TSTI_TX_EN);
++ } else if (mxs_phy->port_id == 1) {
++ reg = disconnect ? ANADIG_USB2_LOOPBACK_SET
++ : ANADIG_USB2_LOOPBACK_CLR;
++ regmap_write(mxs_phy->regmap_anatop, reg,
++ BM_ANADIG_USB2_LOOPBACK_UTMI_DIG_TST1 |
++ BM_ANADIG_USB2_LOOPBACK_TSTI_TX_EN);
++ }
++
++ if (!disconnect)
++ writel_relaxed(BM_USBPHY_DEBUG_CLKGATE,
++ base + HW_USBPHY_DEBUG_SET);
++
++ /* Delay some time, and let Linestate be SE0 for controller */
++ if (disconnect)
++ usleep_range(500, 1000);
++}
++
++static void mxs_phy_disconnect_line(struct mxs_phy *mxs_phy, bool on)
++{
++ bool vbus_is_on = false;
++
++ /* If the SoCs don't need to disconnect line without vbus, quit */
++ if (!(mxs_phy->data->flags & MXS_PHY_DISCONNECT_LINE_WITHOUT_VBUS))
++ return;
++
++ /* If the SoCs don't have anatop, quit */
++ if (!mxs_phy->regmap_anatop)
++ return;
++
++ vbus_is_on = mxs_phy_get_vbus_status(mxs_phy);
++
++ if (on && !vbus_is_on)
++ __mxs_phy_disconnect_line(mxs_phy, true);
++ else
++ __mxs_phy_disconnect_line(mxs_phy, false);
++
++}
++
++static void mxs_phy_enable_ldo_in_suspend(struct mxs_phy *mxs_phy, bool on)
++{
++ unsigned int reg = on ? ANADIG_ANA_MISC0_SET : ANADIG_ANA_MISC0_CLR;
++
++ /* If the SoCs don't have anatop, quit */
++ if (!mxs_phy->regmap_anatop)
++ return;
++
++ if (is_imx6q_phy(mxs_phy))
++ regmap_write(mxs_phy->regmap_anatop, reg,
++ BM_ANADIG_ANA_MISC0_STOP_MODE_CONFIG);
++ else if (is_imx6sl_phy(mxs_phy))
++ regmap_write(mxs_phy->regmap_anatop,
++ reg, BM_ANADIG_ANA_MISC0_STOP_MODE_CONFIG_SL);
++}
++
+ static int mxs_phy_init(struct usb_phy *phy)
+ {
+ int ret;
+ struct mxs_phy *mxs_phy = to_mxs_phy(phy);
+
++ mxs_phy_clock_switch();
+ ret = clk_prepare_enable(mxs_phy->clk);
+ if (ret)
+ return ret;
+@@ -83,17 +308,62 @@
+ clk_disable_unprepare(mxs_phy->clk);
+ }
+
++static bool mxs_phy_is_low_speed_connection(struct mxs_phy *mxs_phy)
++{
++ unsigned int line_state;
++ /* bit definition is the same for all controllers */
++ unsigned int dp_bit = BM_ANADIG_USB1_MISC_RX_VPIN_FS,
++ dm_bit = BM_ANADIG_USB1_MISC_RX_VMIN_FS;
++ unsigned int reg = ANADIG_USB1_MISC;
++
++ /* If the SoCs don't have anatop, quit */
++ if (!mxs_phy->regmap_anatop)
++ return false;
++
++ if (mxs_phy->port_id == 0)
++ reg = ANADIG_USB1_MISC;
++ else if (mxs_phy->port_id == 1)
++ reg = ANADIG_USB2_MISC;
++
++ regmap_read(mxs_phy->regmap_anatop, reg, &line_state);
++
++ if ((line_state & (dp_bit | dm_bit)) == dm_bit)
++ return true;
++ else
++ return false;
++}
++
+ static int mxs_phy_suspend(struct usb_phy *x, int suspend)
+ {
+ int ret;
+ struct mxs_phy *mxs_phy = to_mxs_phy(x);
++ bool low_speed_connection, vbus_is_on;
++
++ low_speed_connection = mxs_phy_is_low_speed_connection(mxs_phy);
++ vbus_is_on = mxs_phy_get_vbus_status(mxs_phy);
+
+ if (suspend) {
+ writel(0xffffffff, x->io_priv + HW_USBPHY_PWD);
++ /*
++ * FIXME: Do not power down RXPWD1PT1 bit for low speed
++ * connect. The low speed connection will have problem at
++ * very rare cases during usb suspend and resume process.
++ */
++ if (low_speed_connection & vbus_is_on) {
++ /*
++ * If value to be set as pwd value is not 0xffffffff,
++ * several 32Khz cycles are needed.
++ */
++ mxs_phy_clock_switch();
++ writel(0xffbfffff, x->io_priv + HW_USBPHY_PWD);
++ } else {
++ writel(0xffffffff, x->io_priv + HW_USBPHY_PWD);
++ }
+ writel(BM_USBPHY_CTRL_CLKGATE,
+ x->io_priv + HW_USBPHY_CTRL_SET);
+ clk_disable_unprepare(mxs_phy->clk);
+ } else {
++ mxs_phy_clock_switch();
+ ret = clk_prepare_enable(mxs_phy->clk);
+ if (ret)
+ return ret;
+@@ -105,11 +375,28 @@
+ return 0;
+ }
+
++static int mxs_phy_set_wakeup(struct usb_phy *x, bool enabled)
++{
++ struct mxs_phy *mxs_phy = to_mxs_phy(x);
++ u32 value = BM_USBPHY_CTRL_ENVBUSCHG_WKUP |
++ BM_USBPHY_CTRL_ENDPDMCHG_WKUP |
++ BM_USBPHY_CTRL_ENIDCHG_WKUP;
++ if (enabled) {
++ mxs_phy_disconnect_line(mxs_phy, true);
++ writel_relaxed(value, x->io_priv + HW_USBPHY_CTRL_SET);
++ } else {
++ writel_relaxed(value, x->io_priv + HW_USBPHY_CTRL_CLR);
++ mxs_phy_disconnect_line(mxs_phy, false);
++ }
++
++ return 0;
++}
++
+ static int mxs_phy_on_connect(struct usb_phy *phy,
+ enum usb_device_speed speed)
+ {
+- dev_dbg(phy->dev, "%s speed device has connected\n",
+- (speed == USB_SPEED_HIGH) ? "high" : "non-high");
++ dev_dbg(phy->dev, "%s device has connected\n",
++ (speed == USB_SPEED_HIGH) ? "HS" : "FS/LS");
+
+ if (speed == USB_SPEED_HIGH)
+ writel(BM_USBPHY_CTRL_ENHOSTDISCONDETECT,
+@@ -121,8 +408,8 @@
+ static int mxs_phy_on_disconnect(struct usb_phy *phy,
+ enum usb_device_speed speed)
+ {
+- dev_dbg(phy->dev, "%s speed device has disconnected\n",
+- (speed == USB_SPEED_HIGH) ? "high" : "non-high");
++ dev_dbg(phy->dev, "%s device has disconnected\n",
++ (speed == USB_SPEED_HIGH) ? "HS" : "FS/LS");
+
+ if (speed == USB_SPEED_HIGH)
+ writel(BM_USBPHY_CTRL_ENHOSTDISCONDETECT,
+@@ -131,6 +418,48 @@
+ return 0;
+ }
+
++static int mxs_phy_on_suspend(struct usb_phy *phy,
++ enum usb_device_speed speed)
++{
++ struct mxs_phy *mxs_phy = to_mxs_phy(phy);
++
++ dev_dbg(phy->dev, "%s device has suspended\n",
++ (speed == USB_SPEED_HIGH) ? "HS" : "FS/LS");
++
++ /* delay 4ms to wait bus entering idle */
++ usleep_range(4000, 5000);
++
++ if (mxs_phy->data->flags & MXS_PHY_ABNORAML_IN_SUSPEND) {
++ writel_relaxed(0xffffffff, phy->io_priv + HW_USBPHY_PWD);
++ writel_relaxed(0, phy->io_priv + HW_USBPHY_PWD);
++ }
++
++ if (speed == USB_SPEED_HIGH)
++ writel_relaxed(BM_USBPHY_CTRL_ENHOSTDISCONDETECT,
++ phy->io_priv + HW_USBPHY_CTRL_CLR);
++
++ return 0;
++}
++
++/*
++ * The resume signal must be finished here.
++ */
++static int mxs_phy_on_resume(struct usb_phy *phy,
++ enum usb_device_speed speed)
++{
++ dev_dbg(phy->dev, "%s device has resumed\n",
++ (speed == USB_SPEED_HIGH) ? "HS" : "FS/LS");
++
++ if (speed == USB_SPEED_HIGH) {
++ /* Make sure the device has switched to High-Speed mode */
++ udelay(500);
++ writel_relaxed(BM_USBPHY_CTRL_ENHOSTDISCONDETECT,
++ phy->io_priv + HW_USBPHY_CTRL_SET);
++ }
++
++ return 0;
++}
++
+ static int mxs_phy_probe(struct platform_device *pdev)
+ {
+ struct resource *res;
+@@ -138,6 +467,9 @@
+ struct clk *clk;
+ struct mxs_phy *mxs_phy;
+ int ret;
++ const struct of_device_id *of_id =
++ of_match_device(mxs_phy_dt_ids, &pdev->dev);
++ struct device_node *np = pdev->dev.of_node;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(&pdev->dev, res);
+@@ -157,6 +489,13 @@
+ return -ENOMEM;
+ }
+
++ ret = of_alias_get_id(np, "usbphy");
++ if (ret < 0) {
++ dev_err(&pdev->dev, "failed to get alias id, errno %d\n", ret);
++ return ret;
++ }
++ mxs_phy->port_id = ret;
++
+ mxs_phy->phy.io_priv = base;
+ mxs_phy->phy.dev = &pdev->dev;
+ mxs_phy->phy.label = DRIVER_NAME;
+@@ -166,11 +505,30 @@
+ mxs_phy->phy.notify_connect = mxs_phy_on_connect;
+ mxs_phy->phy.notify_disconnect = mxs_phy_on_disconnect;
+ mxs_phy->phy.type = USB_PHY_TYPE_USB2;
++ mxs_phy->phy.set_wakeup = mxs_phy_set_wakeup;
+
+ mxs_phy->clk = clk;
++ mxs_phy->data = of_id->data;
++
++ if (mxs_phy->data->flags & MXS_PHY_SENDING_SOF_TOO_FAST) {
++ mxs_phy->phy.notify_suspend = mxs_phy_on_suspend;
++ mxs_phy->phy.notify_resume = mxs_phy_on_resume;
++ }
+
+ platform_set_drvdata(pdev, mxs_phy);
+
++ if (mxs_phy->data->flags & MXS_PHY_HAS_ANATOP) {
++ mxs_phy->regmap_anatop = syscon_regmap_lookup_by_phandle
++ (np, "fsl,anatop");
++ if (IS_ERR(mxs_phy->regmap_anatop)) {
++ dev_dbg(&pdev->dev,
++ "failed to find regmap for anatop\n");
++ return PTR_ERR(mxs_phy->regmap_anatop);
++ }
++ }
++
++ device_set_wakeup_capable(&pdev->dev, true);
++
+ ret = usb_add_phy_dev(&mxs_phy->phy);
+ if (ret)
+ return ret;
+@@ -187,11 +545,27 @@
+ return 0;
+ }
+
+-static const struct of_device_id mxs_phy_dt_ids[] = {
+- { .compatible = "fsl,imx23-usbphy", },
+- { /* sentinel */ }
+-};
+-MODULE_DEVICE_TABLE(of, mxs_phy_dt_ids);
++static int mxs_phy_system_suspend(struct device *dev)
++{
++ struct mxs_phy *mxs_phy = dev_get_drvdata(dev);
++
++ if (device_may_wakeup(dev))
++ mxs_phy_enable_ldo_in_suspend(mxs_phy, true);
++
++ return 0;
++}
++
++static int mxs_phy_system_resume(struct device *dev)
++{
++ struct mxs_phy *mxs_phy = dev_get_drvdata(dev);
++
++ if (device_may_wakeup(dev))
++ mxs_phy_enable_ldo_in_suspend(mxs_phy, false);
++
++ return 0;
++}
++
++SIMPLE_DEV_PM_OPS(mxs_phy_pm, mxs_phy_system_suspend, mxs_phy_system_resume);
+
+ static struct platform_driver mxs_phy_driver = {
+ .probe = mxs_phy_probe,
+@@ -200,6 +574,7 @@
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = mxs_phy_dt_ids,
++ .pm = &mxs_phy_pm,
+ },
+ };
+
+diff -Nur linux-3.14.36/drivers/usb/phy/phy-ulpi.c linux-openelec/drivers/usb/phy/phy-ulpi.c
+--- linux-3.14.36/drivers/usb/phy/phy-ulpi.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/usb/phy/phy-ulpi.c 2015-05-06 12:05:42.000000000 -0500
+@@ -48,6 +48,7 @@
+ ULPI_INFO(ULPI_ID(0x04cc, 0x1504), "NXP ISP1504"),
+ ULPI_INFO(ULPI_ID(0x0424, 0x0006), "SMSC USB331x"),
+ ULPI_INFO(ULPI_ID(0x0424, 0x0007), "SMSC USB3320"),
++ ULPI_INFO(ULPI_ID(0x0424, 0x0009), "SMSC USB334x"),
+ ULPI_INFO(ULPI_ID(0x0451, 0x1507), "TI TUSB1210"),
+ };
+
+diff -Nur linux-3.14.36/drivers/video/amba-clcd.c linux-openelec/drivers/video/amba-clcd.c
+--- linux-3.14.36/drivers/video/amba-clcd.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/video/amba-clcd.c 2015-05-06 12:05:42.000000000 -0500
+@@ -17,7 +17,10 @@
+ #include <linux/string.h>
+ #include <linux/slab.h>
+ #include <linux/delay.h>
++#include <linux/dma-mapping.h>
++#include <linux/memblock.h>
+ #include <linux/mm.h>
++#include <linux/of.h>
+ #include <linux/fb.h>
+ #include <linux/init.h>
+ #include <linux/ioport.h>
+@@ -31,8 +34,20 @@
+
+ #define to_clcd(info) container_of(info, struct clcd_fb, fb)
+
++#ifdef CONFIG_ARM
++#define clcdfb_dma_alloc dma_alloc_writecombine
++#define clcdfb_dma_free dma_free_writecombine
++#define clcdfb_dma_mmap dma_mmap_writecombine
++#else
++#define clcdfb_dma_alloc dma_alloc_coherent
++#define clcdfb_dma_free dma_free_coherent
++#define clcdfb_dma_mmap dma_mmap_coherent
++#endif
++
+ /* This is limited to 16 characters when displayed by X startup */
+ static const char *clcd_name = "CLCD FB";
++static char *def_mode;
++module_param_named(mode, def_mode, charp, 0);
+
+ /*
+ * Unfortunately, the enable/disable functions may be called either from
+@@ -234,6 +249,17 @@
+ bgr = caps & CLCD_CAP_BGR && var->blue.offset == 0;
+ rgb = caps & CLCD_CAP_RGB && var->red.offset == 0;
+
++ /*
++ * Seems that for 32-bit mode there is confusion about RGB
++ * ordering somewhere between user-side, kernel and hardware.
++ * The following hack seems get things working, at least on
++ * vexpress hardware and models...
++ */
++ if (var->bits_per_pixel == 32) {
++ bgr = false;
++ rgb = true;
++ }
++
+ if (!bgr && !rgb)
+ /*
+ * The requested format was not possible, try just
+@@ -393,6 +419,44 @@
+ return 0;
+ }
+
++int clcdfb_mmap_dma(struct clcd_fb *fb, struct vm_area_struct *vma)
++{
++ return clcdfb_dma_mmap(&fb->dev->dev, vma,
++ fb->fb.screen_base,
++ fb->fb.fix.smem_start,
++ fb->fb.fix.smem_len);
++}
++
++int clcdfb_mmap_io(struct clcd_fb *fb, struct vm_area_struct *vma)
++{
++ unsigned long user_count, count, pfn, off;
++
++ user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
++ count = PAGE_ALIGN(fb->fb.fix.smem_len) >> PAGE_SHIFT;
++ pfn = fb->fb.fix.smem_start >> PAGE_SHIFT;
++ off = vma->vm_pgoff;
++
++ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
++
++ if (off < count && user_count <= (count - off))
++ return remap_pfn_range(vma, vma->vm_start, pfn + off,
++ user_count << PAGE_SHIFT,
++ vma->vm_page_prot);
++
++ return -ENXIO;
++}
++
++void clcdfb_remove_dma(struct clcd_fb *fb)
++{
++ clcdfb_dma_free(&fb->dev->dev, fb->fb.fix.smem_len,
++ fb->fb.screen_base, fb->fb.fix.smem_start);
++}
++
++void clcdfb_remove_io(struct clcd_fb *fb)
++{
++ iounmap(fb->fb.screen_base);
++}
++
+ static int clcdfb_mmap(struct fb_info *info,
+ struct vm_area_struct *vma)
+ {
+@@ -543,14 +607,247 @@
+ return ret;
+ }
+
++struct string_lookup {
++ const char *string;
++ const u32 val;
++};
++
++static struct string_lookup vmode_lookups[] = {
++ { "FB_VMODE_NONINTERLACED", FB_VMODE_NONINTERLACED},
++ { "FB_VMODE_INTERLACED", FB_VMODE_INTERLACED},
++ { "FB_VMODE_DOUBLE", FB_VMODE_DOUBLE},
++ { "FB_VMODE_ODD_FLD_FIRST", FB_VMODE_ODD_FLD_FIRST},
++ { NULL, 0 },
++};
++
++static struct string_lookup tim2_lookups[] = {
++ { "TIM2_CLKSEL", TIM2_CLKSEL},
++ { "TIM2_IVS", TIM2_IVS},
++ { "TIM2_IHS", TIM2_IHS},
++ { "TIM2_IPC", TIM2_IPC},
++ { "TIM2_IOE", TIM2_IOE},
++ { "TIM2_BCD", TIM2_BCD},
++ { NULL, 0},
++};
++static struct string_lookup cntl_lookups[] = {
++ {"CNTL_LCDEN", CNTL_LCDEN},
++ {"CNTL_LCDBPP1", CNTL_LCDBPP1},
++ {"CNTL_LCDBPP2", CNTL_LCDBPP2},
++ {"CNTL_LCDBPP4", CNTL_LCDBPP4},
++ {"CNTL_LCDBPP8", CNTL_LCDBPP8},
++ {"CNTL_LCDBPP16", CNTL_LCDBPP16},
++ {"CNTL_LCDBPP16_565", CNTL_LCDBPP16_565},
++ {"CNTL_LCDBPP16_444", CNTL_LCDBPP16_444},
++ {"CNTL_LCDBPP24", CNTL_LCDBPP24},
++ {"CNTL_LCDBW", CNTL_LCDBW},
++ {"CNTL_LCDTFT", CNTL_LCDTFT},
++ {"CNTL_LCDMONO8", CNTL_LCDMONO8},
++ {"CNTL_LCDDUAL", CNTL_LCDDUAL},
++ {"CNTL_BGR", CNTL_BGR},
++ {"CNTL_BEBO", CNTL_BEBO},
++ {"CNTL_BEPO", CNTL_BEPO},
++ {"CNTL_LCDPWR", CNTL_LCDPWR},
++ {"CNTL_LCDVCOMP(1)", CNTL_LCDVCOMP(1)},
++ {"CNTL_LCDVCOMP(2)", CNTL_LCDVCOMP(2)},
++ {"CNTL_LCDVCOMP(3)", CNTL_LCDVCOMP(3)},
++ {"CNTL_LCDVCOMP(4)", CNTL_LCDVCOMP(4)},
++ {"CNTL_LCDVCOMP(5)", CNTL_LCDVCOMP(5)},
++ {"CNTL_LCDVCOMP(6)", CNTL_LCDVCOMP(6)},
++ {"CNTL_LCDVCOMP(7)", CNTL_LCDVCOMP(7)},
++ {"CNTL_LDMAFIFOTIME", CNTL_LDMAFIFOTIME},
++ {"CNTL_WATERMARK", CNTL_WATERMARK},
++ { NULL, 0},
++};
++static struct string_lookup caps_lookups[] = {
++ {"CLCD_CAP_RGB444", CLCD_CAP_RGB444},
++ {"CLCD_CAP_RGB5551", CLCD_CAP_RGB5551},
++ {"CLCD_CAP_RGB565", CLCD_CAP_RGB565},
++ {"CLCD_CAP_RGB888", CLCD_CAP_RGB888},
++ {"CLCD_CAP_BGR444", CLCD_CAP_BGR444},
++ {"CLCD_CAP_BGR5551", CLCD_CAP_BGR5551},
++ {"CLCD_CAP_BGR565", CLCD_CAP_BGR565},
++ {"CLCD_CAP_BGR888", CLCD_CAP_BGR888},
++ {"CLCD_CAP_444", CLCD_CAP_444},
++ {"CLCD_CAP_5551", CLCD_CAP_5551},
++ {"CLCD_CAP_565", CLCD_CAP_565},
++ {"CLCD_CAP_888", CLCD_CAP_888},
++ {"CLCD_CAP_RGB", CLCD_CAP_RGB},
++ {"CLCD_CAP_BGR", CLCD_CAP_BGR},
++ {"CLCD_CAP_ALL", CLCD_CAP_ALL},
++ { NULL, 0},
++};
++
++u32 parse_setting(struct string_lookup *lookup, const char *name)
++{
++ int i = 0;
++ while (lookup[i].string != NULL) {
++ if (strcmp(lookup[i].string, name) == 0)
++ return lookup[i].val;
++ ++i;
++ }
++ return -EINVAL;
++}
++
++u32 get_string_lookup(struct device_node *node, const char *name,
++ struct string_lookup *lookup)
++{
++ const char *string;
++ int count, i, ret = 0;
++
++ count = of_property_count_strings(node, name);
++ if (count >= 0)
++ for (i = 0; i < count; i++)
++ if (of_property_read_string_index(node, name, i,
++ &string) == 0)
++ ret |= parse_setting(lookup, string);
++ return ret;
++}
++
++int get_val(struct device_node *node, const char *string)
++{
++ u32 ret = 0;
++
++ if (of_property_read_u32(node, string, &ret))
++ ret = -1;
++ return ret;
++}
++
++struct clcd_panel *getPanel(struct device_node *node)
++{
++ static struct clcd_panel panel;
++
++ panel.mode.refresh = get_val(node, "refresh");
++ panel.mode.xres = get_val(node, "xres");
++ panel.mode.yres = get_val(node, "yres");
++ panel.mode.pixclock = get_val(node, "pixclock");
++ panel.mode.left_margin = get_val(node, "left_margin");
++ panel.mode.right_margin = get_val(node, "right_margin");
++ panel.mode.upper_margin = get_val(node, "upper_margin");
++ panel.mode.lower_margin = get_val(node, "lower_margin");
++ panel.mode.hsync_len = get_val(node, "hsync_len");
++ panel.mode.vsync_len = get_val(node, "vsync_len");
++ panel.mode.sync = get_val(node, "sync");
++ panel.bpp = get_val(node, "bpp");
++ panel.width = (signed short) get_val(node, "width");
++ panel.height = (signed short) get_val(node, "height");
++
++ panel.mode.vmode = get_string_lookup(node, "vmode", vmode_lookups);
++ panel.tim2 = get_string_lookup(node, "tim2", tim2_lookups);
++ panel.cntl = get_string_lookup(node, "cntl", cntl_lookups);
++ panel.caps = get_string_lookup(node, "caps", caps_lookups);
++
++ return &panel;
++}
++
++struct clcd_panel *clcdfb_get_panel(const char *name)
++{
++ struct device_node *node = NULL;
++ const char *mode;
++ struct clcd_panel *panel = NULL;
++
++ do {
++ node = of_find_compatible_node(node, NULL, "panel");
++ if (node)
++ if (of_property_read_string(node, "mode", &mode) == 0)
++ if (strcmp(mode, name) == 0) {
++ panel = getPanel(node);
++ panel->mode.name = name;
++ }
++ } while (node != NULL);
++
++ return panel;
++}
++
++#ifdef CONFIG_OF
++static int clcdfb_dt_init(struct clcd_fb *fb)
++{
++ int err = 0;
++ struct device_node *node;
++ const char *mode;
++ dma_addr_t dma;
++ u32 use_dma;
++ const __be32 *prop;
++ int len, na, ns;
++ phys_addr_t fb_base, fb_size;
++
++ node = fb->dev->dev.of_node;
++ if (!node)
++ return -ENODEV;
++
++ na = of_n_addr_cells(node);
++ ns = of_n_size_cells(node);
++
++ if (def_mode && strlen(def_mode) > 0) {
++ fb->panel = clcdfb_get_panel(def_mode);
++ if (!fb->panel)
++ printk(KERN_ERR "CLCD: invalid mode specified on the command line (%s)\n", def_mode);
++ }
++
++ if (!fb->panel) {
++ if (WARN_ON(of_property_read_string(node, "mode", &mode)))
++ return -ENODEV;
++ fb->panel = clcdfb_get_panel(mode);
++ }
++
++ if (!fb->panel)
++ return -EINVAL;
++ fb->fb.fix.smem_len = fb->panel->mode.xres * fb->panel->mode.yres * 4;
++
++ fb->board->name = "Device Tree CLCD PL111";
++ fb->board->caps = CLCD_CAP_5551 | CLCD_CAP_565 | CLCD_CAP_888;
++ fb->board->check = clcdfb_check;
++ fb->board->decode = clcdfb_decode;
++
++ if (of_property_read_u32(node, "use_dma", &use_dma))
++ use_dma = 0;
++
++ if (use_dma) {
++ fb->fb.screen_base = clcdfb_dma_alloc(&fb->dev->dev,
++ fb->fb.fix.smem_len,
++ &dma, GFP_KERNEL);
++ if (!fb->fb.screen_base) {
++ pr_err("CLCD: unable to map framebuffer\n");
++ return -ENOMEM;
++ }
++
++ fb->fb.fix.smem_start = dma;
++ fb->board->mmap = clcdfb_mmap_dma;
++ fb->board->remove = clcdfb_remove_dma;
++ } else {
++ prop = of_get_property(node, "framebuffer", &len);
++ if (WARN_ON(!prop || len < (na + ns) * sizeof(*prop)))
++ return -EINVAL;
++
++ fb_base = of_read_number(prop, na);
++ fb_size = of_read_number(prop + na, ns);
++
++ fb->fb.fix.smem_start = fb_base;
++ fb->fb.screen_base = ioremap_wc(fb_base, fb_size);
++ fb->board->mmap = clcdfb_mmap_io;
++ fb->board->remove = clcdfb_remove_io;
++ }
++
++ return err;
++}
++#endif /* CONFIG_OF */
++
+ static int clcdfb_probe(struct amba_device *dev, const struct amba_id *id)
+ {
+ struct clcd_board *board = dev_get_platdata(&dev->dev);
+ struct clcd_fb *fb;
+ int ret;
+
+- if (!board)
+- return -EINVAL;
++ if (!board) {
++#ifdef CONFIG_OF
++ if (dev->dev.of_node) {
++ board = kzalloc(sizeof(struct clcd_board), GFP_KERNEL);
++ if (!board)
++ return -ENOMEM;
++ board->setup = clcdfb_dt_init;
++ } else
++#endif
++ return -EINVAL;
++ }
+
+ ret = dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32));
+ if (ret)
+diff -Nur linux-3.14.36/drivers/video/arm-hdlcd.c linux-openelec/drivers/video/arm-hdlcd.c
+--- linux-3.14.36/drivers/video/arm-hdlcd.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/video/arm-hdlcd.c 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,844 @@
++/*
++ * drivers/video/arm-hdlcd.c
++ *
++ * Copyright (C) 2011 ARM Limited
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file COPYING in the main directory of this archive
++ * for more details.
++ *
++ * ARM HDLCD Controller
++ */
++
++#include <linux/module.h>
++#include <linux/kernel.h>
++#include <linux/errno.h>
++#include <linux/string.h>
++#include <linux/ctype.h>
++#include <linux/mm.h>
++#include <linux/delay.h>
++#include <linux/of.h>
++#include <linux/fb.h>
++#include <linux/clk.h>
++#include <linux/init.h>
++#include <linux/interrupt.h>
++#include <linux/ioport.h>
++#include <linux/dma-mapping.h>
++#include <linux/platform_device.h>
++#include <linux/memblock.h>
++#include <linux/arm-hdlcd.h>
++#ifdef HDLCD_COUNT_BUFFERUNDERRUNS
++#include <linux/proc_fs.h>
++#include <linux/seq_file.h>
++#endif
++
++#include "edid.h"
++
++#ifdef CONFIG_SERIAL_AMBA_PCU_UART
++int get_edid(u8 *msgbuf);
++#else
++#endif
++
++#define to_hdlcd_device(info) container_of(info, struct hdlcd_device, fb)
++
++static struct of_device_id hdlcd_of_matches[] = {
++ { .compatible = "arm,hdlcd" },
++ {},
++};
++
++/* Framebuffer size. */
++static unsigned long framebuffer_size;
++
++#ifdef HDLCD_COUNT_BUFFERUNDERRUNS
++static unsigned long buffer_underrun_events;
++static DEFINE_SPINLOCK(hdlcd_underrun_lock);
++
++static void hdlcd_underrun_set(unsigned long val)
++{
++ spin_lock(&hdlcd_underrun_lock);
++ buffer_underrun_events = val;
++ spin_unlock(&hdlcd_underrun_lock);
++}
++
++static unsigned long hdlcd_underrun_get(void)
++{
++ unsigned long val;
++ spin_lock(&hdlcd_underrun_lock);
++ val = buffer_underrun_events;
++ spin_unlock(&hdlcd_underrun_lock);
++ return val;
++}
++
++#ifdef CONFIG_PROC_FS
++static int hdlcd_underrun_show(struct seq_file *m, void *v)
++{
++ unsigned char underrun_string[32];
++ snprintf(underrun_string, 32, "%lu\n", hdlcd_underrun_get());
++ seq_puts(m, underrun_string);
++ return 0;
++}
++
++static int proc_hdlcd_underrun_open(struct inode *inode, struct file *file)
++{
++ return single_open(file, hdlcd_underrun_show, NULL);
++}
++
++static const struct file_operations proc_hdlcd_underrun_operations = {
++ .open = proc_hdlcd_underrun_open,
++ .read = seq_read,
++ .llseek = seq_lseek,
++ .release = single_release,
++};
++
++static int hdlcd_underrun_init(void)
++{
++ hdlcd_underrun_set(0);
++ proc_create("hdlcd_underrun", 0, NULL, &proc_hdlcd_underrun_operations);
++ return 0;
++}
++static void hdlcd_underrun_close(void)
++{
++ remove_proc_entry("hdlcd_underrun", NULL);
++}
++#else
++static int hdlcd_underrun_init(void) { return 0; }
++static void hdlcd_underrun_close(void) { }
++#endif
++#endif
++
++static char *fb_mode = "1680x1050-32@60\0\0\0\0\0";
++
++static struct fb_var_screeninfo cached_var_screeninfo;
++
++static struct fb_videomode hdlcd_default_mode = {
++ .refresh = 60,
++ .xres = 1680,
++ .yres = 1050,
++ .pixclock = 8403,
++ .left_margin = 80,
++ .right_margin = 48,
++ .upper_margin = 21,
++ .lower_margin = 3,
++ .hsync_len = 32,
++ .vsync_len = 6,
++ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
++ .vmode = FB_VMODE_NONINTERLACED
++};
++
++static inline void hdlcd_enable(struct hdlcd_device *hdlcd)
++{
++ dev_dbg(hdlcd->dev, "HDLCD: output enabled\n");
++ writel(1, hdlcd->base + HDLCD_REG_COMMAND);
++}
++
++static inline void hdlcd_disable(struct hdlcd_device *hdlcd)
++{
++ dev_dbg(hdlcd->dev, "HDLCD: output disabled\n");
++ writel(0, hdlcd->base + HDLCD_REG_COMMAND);
++}
++
++static int hdlcd_set_bitfields(struct hdlcd_device *hdlcd,
++ struct fb_var_screeninfo *var)
++{
++ int ret = 0;
++
++ memset(&var->transp, 0, sizeof(var->transp));
++ var->red.msb_right = 0;
++ var->green.msb_right = 0;
++ var->blue.msb_right = 0;
++ var->blue.offset = 0;
++
++ switch (var->bits_per_pixel) {
++ case 8:
++ /* pseudocolor */
++ var->red.length = 8;
++ var->green.length = 8;
++ var->blue.length = 8;
++ break;
++ case 16:
++ /* 565 format */
++ var->red.length = 5;
++ var->green.length = 6;
++ var->blue.length = 5;
++ break;
++ case 32:
++ var->transp.length = 8;
++ case 24:
++ var->red.length = 8;
++ var->green.length = 8;
++ var->blue.length = 8;
++ break;
++ default:
++ ret = -EINVAL;
++ break;
++ }
++
++ if (!ret) {
++ if(var->bits_per_pixel != 32)
++ {
++ var->green.offset = var->blue.length;
++ var->red.offset = var->green.offset + var->green.length;
++ }
++ else
++ {
++ /* Previously, the byte ordering for 32-bit color was
++ * (msb)<alpha><red><green><blue>(lsb)
++ * but this does not match what android expects and
++ * the colors are odd. Instead, use
++ * <alpha><blue><green><red>
++ * Since we tell fb what we are doing, console
++ * , X and directfb access should work fine.
++ */
++ var->green.offset = var->red.length;
++ var->blue.offset = var->green.offset + var->green.length;
++ var->transp.offset = var->blue.offset + var->blue.length;
++ }
++ }
++
++ return ret;
++}
++
++static int hdlcd_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
++{
++ struct hdlcd_device *hdlcd = to_hdlcd_device(info);
++ int bytes_per_pixel = var->bits_per_pixel / 8;
++
++#ifdef HDLCD_NO_VIRTUAL_SCREEN
++ var->yres_virtual = var->yres;
++#else
++ var->yres_virtual = 2 * var->yres;
++#endif
++
++ if ((var->xres_virtual * bytes_per_pixel * var->yres_virtual) > hdlcd->fb.fix.smem_len)
++ return -ENOMEM;
++
++ if (var->xres > HDLCD_MAX_XRES || var->yres > HDLCD_MAX_YRES)
++ return -EINVAL;
++
++ /* make sure the bitfields are set appropriately */
++ return hdlcd_set_bitfields(hdlcd, var);
++}
++
++/* prototype */
++static int hdlcd_pan_display(struct fb_var_screeninfo *var,
++ struct fb_info *info);
++
++#define WRITE_HDLCD_REG(reg, value) writel((value), hdlcd->base + (reg))
++#define READ_HDLCD_REG(reg) readl(hdlcd->base + (reg))
++
++static int hdlcd_set_par(struct fb_info *info)
++{
++ struct hdlcd_device *hdlcd = to_hdlcd_device(info);
++ int bytes_per_pixel = hdlcd->fb.var.bits_per_pixel / 8;
++ int polarities;
++ int old_yoffset;
++
++ /* check for shortcuts */
++ old_yoffset = cached_var_screeninfo.yoffset;
++ cached_var_screeninfo.yoffset = info->var.yoffset;
++ if (!memcmp(&info->var, &cached_var_screeninfo,
++ sizeof(struct fb_var_screeninfo))) {
++ if(old_yoffset != info->var.yoffset) {
++ /* we only changed yoffset, and we already
++ * already recorded it a couple lines up
++ */
++ hdlcd_pan_display(&info->var, info);
++ }
++ /* or no change */
++ return 0;
++ }
++
++ hdlcd->fb.fix.line_length = hdlcd->fb.var.xres * bytes_per_pixel;
++
++ if (hdlcd->fb.var.bits_per_pixel >= 16)
++ hdlcd->fb.fix.visual = FB_VISUAL_TRUECOLOR;
++ else
++ hdlcd->fb.fix.visual = FB_VISUAL_PSEUDOCOLOR;
++
++ memcpy(&cached_var_screeninfo, &info->var, sizeof(struct fb_var_screeninfo));
++
++ polarities = HDLCD_POLARITY_DATAEN |
++#ifndef CONFIG_ARCH_TUSCAN
++ HDLCD_POLARITY_PIXELCLK |
++#endif
++ HDLCD_POLARITY_DATA;
++ polarities |= (hdlcd->fb.var.sync & FB_SYNC_HOR_HIGH_ACT) ? HDLCD_POLARITY_HSYNC : 0;
++ polarities |= (hdlcd->fb.var.sync & FB_SYNC_VERT_HIGH_ACT) ? HDLCD_POLARITY_VSYNC : 0;
++
++ hdlcd_disable(hdlcd);
++
++ WRITE_HDLCD_REG(HDLCD_REG_FB_LINE_LENGTH, hdlcd->fb.var.xres * bytes_per_pixel);
++ WRITE_HDLCD_REG(HDLCD_REG_FB_LINE_PITCH, hdlcd->fb.var.xres * bytes_per_pixel);
++ WRITE_HDLCD_REG(HDLCD_REG_FB_LINE_COUNT, hdlcd->fb.var.yres - 1);
++ WRITE_HDLCD_REG(HDLCD_REG_V_SYNC, hdlcd->fb.var.vsync_len - 1);
++ WRITE_HDLCD_REG(HDLCD_REG_V_BACK_PORCH, hdlcd->fb.var.upper_margin - 1);
++ WRITE_HDLCD_REG(HDLCD_REG_V_DATA, hdlcd->fb.var.yres - 1);
++ WRITE_HDLCD_REG(HDLCD_REG_V_FRONT_PORCH, hdlcd->fb.var.lower_margin - 1);
++ WRITE_HDLCD_REG(HDLCD_REG_H_SYNC, hdlcd->fb.var.hsync_len - 1);
++ WRITE_HDLCD_REG(HDLCD_REG_H_BACK_PORCH, hdlcd->fb.var.left_margin - 1);
++ WRITE_HDLCD_REG(HDLCD_REG_H_DATA, hdlcd->fb.var.xres - 1);
++ WRITE_HDLCD_REG(HDLCD_REG_H_FRONT_PORCH, hdlcd->fb.var.right_margin - 1);
++ WRITE_HDLCD_REG(HDLCD_REG_POLARITIES, polarities);
++ WRITE_HDLCD_REG(HDLCD_REG_PIXEL_FORMAT, (bytes_per_pixel - 1) << 3);
++#ifdef HDLCD_RED_DEFAULT_COLOUR
++ WRITE_HDLCD_REG(HDLCD_REG_RED_SELECT, (0x00ff0000 | (hdlcd->fb.var.red.length & 0xf) << 8) \
++ | hdlcd->fb.var.red.offset);
++#else
++ WRITE_HDLCD_REG(HDLCD_REG_RED_SELECT, ((hdlcd->fb.var.red.length & 0xf) << 8) | hdlcd->fb.var.red.offset);
++#endif
++ WRITE_HDLCD_REG(HDLCD_REG_GREEN_SELECT, ((hdlcd->fb.var.green.length & 0xf) << 8) | hdlcd->fb.var.green.offset);
++ WRITE_HDLCD_REG(HDLCD_REG_BLUE_SELECT, ((hdlcd->fb.var.blue.length & 0xf) << 8) | hdlcd->fb.var.blue.offset);
++
++ clk_set_rate(hdlcd->clk, (1000000000 / hdlcd->fb.var.pixclock) * 1000);
++ clk_enable(hdlcd->clk);
++
++ hdlcd_enable(hdlcd);
++
++ return 0;
++}
++
++static int hdlcd_setcolreg(unsigned int regno, unsigned int red, unsigned int green,
++ unsigned int blue, unsigned int transp, struct fb_info *info)
++{
++ if (regno < 16) {
++ u32 *pal = info->pseudo_palette;
++
++ pal[regno] = ((red >> 8) << info->var.red.offset) |
++ ((green >> 8) << info->var.green.offset) |
++ ((blue >> 8) << info->var.blue.offset);
++ }
++
++ return 0;
++}
++
++static irqreturn_t hdlcd_irq(int irq, void *data)
++{
++ struct hdlcd_device *hdlcd = data;
++ unsigned long irq_mask, irq_status;
++
++ irq_mask = READ_HDLCD_REG(HDLCD_REG_INT_MASK);
++ irq_status = READ_HDLCD_REG(HDLCD_REG_INT_STATUS);
++
++ /* acknowledge interrupt(s) */
++ WRITE_HDLCD_REG(HDLCD_REG_INT_CLEAR, irq_status);
++#ifdef HDLCD_COUNT_BUFFERUNDERRUNS
++ if (irq_status & HDLCD_INTERRUPT_UNDERRUN) {
++ /* increment the count */
++ hdlcd_underrun_set(hdlcd_underrun_get() + 1);
++ }
++#endif
++ if (irq_status & HDLCD_INTERRUPT_VSYNC) {
++ /* disable future VSYNC interrupts */
++ WRITE_HDLCD_REG(HDLCD_REG_INT_MASK, irq_mask & ~HDLCD_INTERRUPT_VSYNC);
++
++ complete(&hdlcd->vsync_completion);
++ }
++
++ return IRQ_HANDLED;
++}
++
++static int hdlcd_wait_for_vsync(struct fb_info *info)
++{
++ struct hdlcd_device *hdlcd = to_hdlcd_device(info);
++ unsigned long irq_mask;
++ int err;
++
++ /* enable VSYNC interrupt */
++ irq_mask = READ_HDLCD_REG(HDLCD_REG_INT_MASK);
++ WRITE_HDLCD_REG(HDLCD_REG_INT_MASK, irq_mask | HDLCD_INTERRUPT_VSYNC);
++
++ err = wait_for_completion_interruptible_timeout(&hdlcd->vsync_completion,
++ msecs_to_jiffies(100));
++
++ if (!err)
++ return -ETIMEDOUT;
++
++ return 0;
++}
++
++static int hdlcd_blank(int blank_mode, struct fb_info *info)
++{
++ struct hdlcd_device *hdlcd = to_hdlcd_device(info);
++
++ switch (blank_mode) {
++ case FB_BLANK_POWERDOWN:
++ clk_disable(hdlcd->clk);
++ case FB_BLANK_NORMAL:
++ hdlcd_disable(hdlcd);
++ break;
++ case FB_BLANK_UNBLANK:
++ clk_enable(hdlcd->clk);
++ hdlcd_enable(hdlcd);
++ break;
++ case FB_BLANK_VSYNC_SUSPEND:
++ case FB_BLANK_HSYNC_SUSPEND:
++ default:
++ return 1;
++ }
++
++ return 0;
++}
++
++static void hdlcd_mmap_open(struct vm_area_struct *vma)
++{
++}
++
++static void hdlcd_mmap_close(struct vm_area_struct *vma)
++{
++}
++
++static struct vm_operations_struct hdlcd_mmap_ops = {
++ .open = hdlcd_mmap_open,
++ .close = hdlcd_mmap_close,
++};
++
++static int hdlcd_mmap(struct fb_info *info, struct vm_area_struct *vma)
++{
++ struct hdlcd_device *hdlcd = to_hdlcd_device(info);
++ unsigned long off;
++ unsigned long start;
++ unsigned long len = hdlcd->fb.fix.smem_len;
++
++ if (vma->vm_end - vma->vm_start == 0)
++ return 0;
++ if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
++ return -EINVAL;
++
++ off = vma->vm_pgoff << PAGE_SHIFT;
++ if ((off >= len) || (vma->vm_end - vma->vm_start + off) > len)
++ return -EINVAL;
++
++ start = hdlcd->fb.fix.smem_start;
++ off += start;
++
++ vma->vm_pgoff = off >> PAGE_SHIFT;
++ vma->vm_flags |= VM_IO;
++ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
++ vma->vm_ops = &hdlcd_mmap_ops;
++ if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT,
++ vma->vm_end - vma->vm_start,
++ vma->vm_page_prot))
++ return -EAGAIN;
++
++ return 0;
++}
++
++static int hdlcd_pan_display(struct fb_var_screeninfo *var, struct fb_info *info)
++{
++ struct hdlcd_device *hdlcd = to_hdlcd_device(info);
++
++ hdlcd->fb.var.yoffset = var->yoffset;
++ WRITE_HDLCD_REG(HDLCD_REG_FB_BASE, hdlcd->fb.fix.smem_start +
++ (var->yoffset * hdlcd->fb.fix.line_length));
++
++ hdlcd_wait_for_vsync(info);
++
++ return 0;
++}
++
++static int hdlcd_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg)
++{
++ int err;
++
++ switch (cmd) {
++ case FBIO_WAITFORVSYNC:
++ err = hdlcd_wait_for_vsync(info);
++ break;
++ default:
++ err = -ENOIOCTLCMD;
++ break;
++ }
++
++ return err;
++}
++
++static struct fb_ops hdlcd_ops = {
++ .owner = THIS_MODULE,
++ .fb_check_var = hdlcd_check_var,
++ .fb_set_par = hdlcd_set_par,
++ .fb_setcolreg = hdlcd_setcolreg,
++ .fb_blank = hdlcd_blank,
++ .fb_fillrect = cfb_fillrect,
++ .fb_copyarea = cfb_copyarea,
++ .fb_imageblit = cfb_imageblit,
++ .fb_mmap = hdlcd_mmap,
++ .fb_pan_display = hdlcd_pan_display,
++ .fb_ioctl = hdlcd_ioctl,
++ .fb_compat_ioctl = hdlcd_ioctl
++};
++
++static int hdlcd_setup(struct hdlcd_device *hdlcd)
++{
++ u32 version;
++ int err = -EFAULT;
++
++ hdlcd->fb.device = hdlcd->dev;
++
++ hdlcd->clk = clk_get(hdlcd->dev, NULL);
++ if (IS_ERR(hdlcd->clk)) {
++ dev_err(hdlcd->dev, "HDLCD: unable to find clock data\n");
++ return PTR_ERR(hdlcd->clk);
++ }
++
++ err = clk_prepare(hdlcd->clk);
++ if (err)
++ goto clk_prepare_err;
++
++ hdlcd->base = ioremap_nocache(hdlcd->fb.fix.mmio_start, hdlcd->fb.fix.mmio_len);
++ if (!hdlcd->base) {
++ dev_err(hdlcd->dev, "HDLCD: unable to map registers\n");
++ goto remap_err;
++ }
++
++ hdlcd->fb.pseudo_palette = kmalloc(sizeof(u32) * 16, GFP_KERNEL);
++ if (!hdlcd->fb.pseudo_palette) {
++ dev_err(hdlcd->dev, "HDLCD: unable to allocate pseudo_palette memory\n");
++ err = -ENOMEM;
++ goto kmalloc_err;
++ }
++
++ version = readl(hdlcd->base + HDLCD_REG_VERSION);
++ if ((version & HDLCD_PRODUCT_MASK) != HDLCD_PRODUCT_ID) {
++ dev_err(hdlcd->dev, "HDLCD: unknown product id: 0x%x\n", version);
++ err = -EINVAL;
++ goto kmalloc_err;
++ }
++ dev_info(hdlcd->dev, "HDLCD: found ARM HDLCD version r%dp%d\n",
++ (version & HDLCD_VERSION_MAJOR_MASK) >> 8,
++ version & HDLCD_VERSION_MINOR_MASK);
++
++ strcpy(hdlcd->fb.fix.id, "hdlcd");
++ hdlcd->fb.fbops = &hdlcd_ops;
++ hdlcd->fb.flags = FBINFO_FLAG_DEFAULT/* | FBINFO_VIRTFB*/;
++
++ hdlcd->fb.fix.type = FB_TYPE_PACKED_PIXELS;
++ hdlcd->fb.fix.type_aux = 0;
++ hdlcd->fb.fix.xpanstep = 0;
++ hdlcd->fb.fix.ypanstep = 1;
++ hdlcd->fb.fix.ywrapstep = 0;
++ hdlcd->fb.fix.accel = FB_ACCEL_NONE;
++
++ hdlcd->fb.var.nonstd = 0;
++ hdlcd->fb.var.activate = FB_ACTIVATE_NOW;
++ hdlcd->fb.var.height = -1;
++ hdlcd->fb.var.width = -1;
++ hdlcd->fb.var.accel_flags = 0;
++
++ init_completion(&hdlcd->vsync_completion);
++
++ if (hdlcd->edid) {
++ /* build modedb from EDID */
++ fb_edid_to_monspecs(hdlcd->edid, &hdlcd->fb.monspecs);
++ fb_videomode_to_modelist(hdlcd->fb.monspecs.modedb,
++ hdlcd->fb.monspecs.modedb_len,
++ &hdlcd->fb.modelist);
++ fb_find_mode(&hdlcd->fb.var, &hdlcd->fb, fb_mode,
++ hdlcd->fb.monspecs.modedb,
++ hdlcd->fb.monspecs.modedb_len,
++ &hdlcd_default_mode, 32);
++ } else {
++ hdlcd->fb.monspecs.hfmin = 0;
++ hdlcd->fb.monspecs.hfmax = 100000;
++ hdlcd->fb.monspecs.vfmin = 0;
++ hdlcd->fb.monspecs.vfmax = 400;
++ hdlcd->fb.monspecs.dclkmin = 1000000;
++ hdlcd->fb.monspecs.dclkmax = 100000000;
++ fb_find_mode(&hdlcd->fb.var, &hdlcd->fb, fb_mode, NULL, 0, &hdlcd_default_mode, 32);
++ }
++
++ dev_info(hdlcd->dev, "using %dx%d-%d@%d mode\n", hdlcd->fb.var.xres,
++ hdlcd->fb.var.yres, hdlcd->fb.var.bits_per_pixel,
++ hdlcd->fb.mode ? hdlcd->fb.mode->refresh : 60);
++ hdlcd->fb.var.xres_virtual = hdlcd->fb.var.xres;
++#ifdef HDLCD_NO_VIRTUAL_SCREEN
++ hdlcd->fb.var.yres_virtual = hdlcd->fb.var.yres;
++#else
++ hdlcd->fb.var.yres_virtual = hdlcd->fb.var.yres * 2;
++#endif
++
++ /* initialise and set the palette */
++ if (fb_alloc_cmap(&hdlcd->fb.cmap, NR_PALETTE, 0)) {
++ dev_err(hdlcd->dev, "failed to allocate cmap memory\n");
++ err = -ENOMEM;
++ goto setup_err;
++ }
++ fb_set_cmap(&hdlcd->fb.cmap, &hdlcd->fb);
++
++ /* Allow max number of outstanding requests with the largest beat burst */
++ WRITE_HDLCD_REG(HDLCD_REG_BUS_OPTIONS, HDLCD_BUS_MAX_OUTSTAND | HDLCD_BUS_BURST_16);
++ /* Set the framebuffer base to start of allocated memory */
++ WRITE_HDLCD_REG(HDLCD_REG_FB_BASE, hdlcd->fb.fix.smem_start);
++#ifdef HDLCD_COUNT_BUFFERUNDERRUNS
++ /* turn on underrun interrupt for counting */
++ WRITE_HDLCD_REG(HDLCD_REG_INT_MASK, HDLCD_INTERRUPT_UNDERRUN);
++#else
++ /* Ensure interrupts are disabled */
++ WRITE_HDLCD_REG(HDLCD_REG_INT_MASK, 0);
++#endif
++ fb_set_var(&hdlcd->fb, &hdlcd->fb.var);
++
++ if (!register_framebuffer(&hdlcd->fb)) {
++ return 0;
++ }
++
++ dev_err(hdlcd->dev, "HDLCD: cannot register framebuffer\n");
++
++ fb_dealloc_cmap(&hdlcd->fb.cmap);
++setup_err:
++ iounmap(hdlcd->base);
++kmalloc_err:
++ kfree(hdlcd->fb.pseudo_palette);
++remap_err:
++ clk_unprepare(hdlcd->clk);
++clk_prepare_err:
++ clk_put(hdlcd->clk);
++ return err;
++}
++
++static inline unsigned char atohex(u8 data)
++{
++ if (!isxdigit(data))
++ return 0;
++ /* truncate the upper nibble and add 9 to non-digit values */
++ return (data > 0x39) ? ((data & 0xf) + 9) : (data & 0xf);
++}
++
++/* EDID data is passed from devicetree in a literal string that can contain spaces and
++ the hexadecimal dump of the data */
++static int parse_edid_data(struct hdlcd_device *hdlcd, const u8 *edid_data, int data_len)
++{
++ int i, j;
++
++ if (!edid_data)
++ return -EINVAL;
++
++ hdlcd->edid = kzalloc(EDID_LENGTH, GFP_KERNEL);
++ if (!hdlcd->edid)
++ return -ENOMEM;
++
++ for (i = 0, j = 0; i < data_len; i++) {
++ if (isspace(edid_data[i]))
++ continue;
++ hdlcd->edid[j++] = atohex(edid_data[i]);
++ if (j >= EDID_LENGTH)
++ break;
++ }
++
++ if (j < EDID_LENGTH) {
++ kfree(hdlcd->edid);
++ hdlcd->edid = NULL;
++ return -EINVAL;
++ }
++
++ return 0;
++}
++
++static int hdlcd_probe(struct platform_device *pdev)
++{
++ int err = 0, i;
++ struct hdlcd_device *hdlcd;
++ struct resource *mem;
++#ifdef CONFIG_OF
++ struct device_node *of_node;
++#endif
++
++ memset(&cached_var_screeninfo, 0, sizeof(struct fb_var_screeninfo));
++
++ dev_dbg(&pdev->dev, "HDLCD: probing\n");
++
++ hdlcd = kzalloc(sizeof(*hdlcd), GFP_KERNEL);
++ if (!hdlcd)
++ return -ENOMEM;
++
++#ifdef CONFIG_OF
++ of_node = pdev->dev.of_node;
++ if (of_node) {
++ int len;
++ const u8 *edid;
++ const __be32 *prop = of_get_property(of_node, "mode", &len);
++ if (prop)
++ strncpy(fb_mode, (char *)prop, len);
++ prop = of_get_property(of_node, "framebuffer", &len);
++ if (prop) {
++ hdlcd->fb.fix.smem_start = of_read_ulong(prop,
++ of_n_addr_cells(of_node));
++ prop += of_n_addr_cells(of_node);
++ framebuffer_size = of_read_ulong(prop,
++ of_n_size_cells(of_node));
++ if (framebuffer_size > HDLCD_MAX_FRAMEBUFFER_SIZE)
++ framebuffer_size = HDLCD_MAX_FRAMEBUFFER_SIZE;
++ dev_dbg(&pdev->dev, "HDLCD: phys_addr = 0x%lx, size = 0x%lx\n",
++ hdlcd->fb.fix.smem_start, framebuffer_size);
++ }
++ edid = of_get_property(of_node, "edid", &len);
++ if (edid) {
++ err = parse_edid_data(hdlcd, edid, len);
++#ifdef CONFIG_SERIAL_AMBA_PCU_UART
++ } else {
++ /* ask the firmware to fetch the EDID */
++ dev_dbg(&pdev->dev, "HDLCD: Requesting EDID data\n");
++ hdlcd->edid = kzalloc(EDID_LENGTH, GFP_KERNEL);
++ if (!hdlcd->edid)
++ return -ENOMEM;
++ err = get_edid(hdlcd->edid);
++#endif /* CONFIG_SERIAL_AMBA_PCU_UART */
++ }
++ if (err)
++ dev_info(&pdev->dev, "HDLCD: Failed to parse EDID data\n");
++ }
++#endif /* CONFIG_OF */
++
++ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++ if (!mem) {
++ dev_err(&pdev->dev, "HDLCD: cannot get platform resources\n");
++ err = -EINVAL;
++ goto resource_err;
++ }
++
++ i = platform_get_irq(pdev, 0);
++ if (i < 0) {
++ dev_err(&pdev->dev, "HDLCD: no irq defined for vsync\n");
++ err = -ENOENT;
++ goto resource_err;
++ } else {
++ err = request_irq(i, hdlcd_irq, 0, dev_name(&pdev->dev), hdlcd);
++ if (err) {
++ dev_err(&pdev->dev, "HDLCD: unable to request irq\n");
++ goto resource_err;
++ }
++ hdlcd->irq = i;
++ }
++
++ if (!request_mem_region(mem->start, resource_size(mem), dev_name(&pdev->dev))) {
++ err = -ENXIO;
++ goto request_err;
++ }
++
++ if (!hdlcd->fb.fix.smem_start) {
++ dev_err(&pdev->dev, "platform did not allocate frame buffer memory\n");
++ err = -ENOMEM;
++ goto memalloc_err;
++ }
++ hdlcd->fb.screen_base = ioremap_wc(hdlcd->fb.fix.smem_start, framebuffer_size);
++ if (!hdlcd->fb.screen_base) {
++ dev_err(&pdev->dev, "unable to ioremap framebuffer\n");
++ err = -ENOMEM;
++ goto probe_err;
++ }
++
++ hdlcd->fb.screen_size = framebuffer_size;
++ hdlcd->fb.fix.smem_len = framebuffer_size;
++ hdlcd->fb.fix.mmio_start = mem->start;
++ hdlcd->fb.fix.mmio_len = resource_size(mem);
++
++ /* Clear the framebuffer */
++ memset(hdlcd->fb.screen_base, 0, framebuffer_size);
++
++ hdlcd->dev = &pdev->dev;
++
++ dev_dbg(&pdev->dev, "HDLCD: framebuffer virt base %p, phys base 0x%lX\n",
++ hdlcd->fb.screen_base, (unsigned long)hdlcd->fb.fix.smem_start);
++
++ err = hdlcd_setup(hdlcd);
++
++ if (err)
++ goto probe_err;
++
++ platform_set_drvdata(pdev, hdlcd);
++ return 0;
++
++probe_err:
++ iounmap(hdlcd->fb.screen_base);
++ memblock_free(hdlcd->fb.fix.smem_start, hdlcd->fb.fix.smem_start);
++
++memalloc_err:
++ release_mem_region(mem->start, resource_size(mem));
++
++request_err:
++ free_irq(hdlcd->irq, hdlcd);
++
++resource_err:
++ kfree(hdlcd);
++
++ return err;
++}
++
++static int hdlcd_remove(struct platform_device *pdev)
++{
++ struct hdlcd_device *hdlcd = platform_get_drvdata(pdev);
++
++ clk_disable(hdlcd->clk);
++ clk_unprepare(hdlcd->clk);
++ clk_put(hdlcd->clk);
++
++ /* unmap memory */
++ iounmap(hdlcd->fb.screen_base);
++ iounmap(hdlcd->base);
++
++ /* deallocate fb memory */
++ fb_dealloc_cmap(&hdlcd->fb.cmap);
++ kfree(hdlcd->fb.pseudo_palette);
++ memblock_free(hdlcd->fb.fix.smem_start, hdlcd->fb.fix.smem_start);
++ release_mem_region(hdlcd->fb.fix.mmio_start, hdlcd->fb.fix.mmio_len);
++
++ free_irq(hdlcd->irq, NULL);
++ kfree(hdlcd);
++
++ return 0;
++}
++
++#ifdef CONFIG_PM
++static int hdlcd_suspend(struct platform_device *pdev, pm_message_t state)
++{
++ /* not implemented yet */
++ return 0;
++}
++
++static int hdlcd_resume(struct platform_device *pdev)
++{
++ /* not implemented yet */
++ return 0;
++}
++#else
++#define hdlcd_suspend NULL
++#define hdlcd_resume NULL
++#endif
++
++static struct platform_driver hdlcd_driver = {
++ .probe = hdlcd_probe,
++ .remove = hdlcd_remove,
++ .suspend = hdlcd_suspend,
++ .resume = hdlcd_resume,
++ .driver = {
++ .name = "hdlcd",
++ .owner = THIS_MODULE,
++ .of_match_table = hdlcd_of_matches,
++ },
++};
++
++static int __init hdlcd_init(void)
++{
++#ifdef HDLCD_COUNT_BUFFERUNDERRUNS
++ int err = platform_driver_register(&hdlcd_driver);
++ if (!err)
++ hdlcd_underrun_init();
++ return err;
++#else
++ return platform_driver_register(&hdlcd_driver);
++#endif
++}
++
++void __exit hdlcd_exit(void)
++{
++#ifdef HDLCD_COUNT_BUFFERUNDERRUNS
++ hdlcd_underrun_close();
++#endif
++ platform_driver_unregister(&hdlcd_driver);
++}
++
++module_init(hdlcd_init);
++module_exit(hdlcd_exit);
++
++MODULE_AUTHOR("Liviu Dudau");
++MODULE_DESCRIPTION("ARM HDLCD core driver");
++MODULE_LICENSE("GPL v2");
+diff -Nur linux-3.14.36/drivers/video/backlight/backlight.c linux-openelec/drivers/video/backlight/backlight.c
+--- linux-3.14.36/drivers/video/backlight/backlight.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/video/backlight/backlight.c 2015-05-06 12:05:42.000000000 -0500
+@@ -41,6 +41,8 @@
+ {
+ struct backlight_device *bd;
+ struct fb_event *evdata = data;
++ int node = evdata->info->node;
++ int fb_blank = 0;
+
+ /* If we aren't interested in this event, skip it immediately ... */
+ if (event != FB_EVENT_BLANK && event != FB_EVENT_CONBLANK)
+@@ -51,12 +53,24 @@
+ if (bd->ops)
+ if (!bd->ops->check_fb ||
+ bd->ops->check_fb(bd, evdata->info)) {
+- bd->props.fb_blank = *(int *)evdata->data;
+- if (bd->props.fb_blank == FB_BLANK_UNBLANK)
+- bd->props.state &= ~BL_CORE_FBBLANK;
+- else
+- bd->props.state |= BL_CORE_FBBLANK;
+- backlight_update_status(bd);
++ fb_blank = *(int *)evdata->data;
++ if (fb_blank == FB_BLANK_UNBLANK &&
++ !bd->fb_bl_on[node]) {
++ bd->fb_bl_on[node] = true;
++ if (!bd->use_count++) {
++ bd->props.state &= ~BL_CORE_FBBLANK;
++ bd->props.fb_blank = FB_BLANK_UNBLANK;
++ backlight_update_status(bd);
++ }
++ } else if (fb_blank != FB_BLANK_UNBLANK &&
++ bd->fb_bl_on[node]) {
++ bd->fb_bl_on[node] = false;
++ if (!(--bd->use_count)) {
++ bd->props.state |= BL_CORE_FBBLANK;
++ bd->props.fb_blank = FB_BLANK_POWERDOWN;
++ backlight_update_status(bd);
++ }
++ }
+ }
+ mutex_unlock(&bd->ops_lock);
+ return 0;
+diff -Nur linux-3.14.36/drivers/video/Kconfig linux-openelec/drivers/video/Kconfig
+--- linux-3.14.36/drivers/video/Kconfig 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/video/Kconfig 2015-05-06 12:05:42.000000000 -0500
+@@ -39,6 +39,11 @@
+ config HDMI
+ bool
+
++config VEXPRESS_DVI_CONTROL
++ bool "Versatile Express DVI control"
++ depends on FB && VEXPRESS_CONFIG
++ default y
++
+ menuconfig FB
+ tristate "Support for frame buffer devices"
+ ---help---
+@@ -327,6 +332,21 @@
+ here and read <file:Documentation/kbuild/modules.txt>. The module
+ will be called amba-clcd.
+
++config FB_ARMHDLCD
++ tristate "ARM High Definition LCD support"
++ depends on FB && ARM
++ select FB_CFB_FILLRECT
++ select FB_CFB_COPYAREA
++ select FB_CFB_IMAGEBLIT
++ help
++ This framebuffer device driver is for the ARM High Definition
++ Colour LCD controller.
++
++ If you want to compile this as a module (=code which can be
++ inserted into and removed from the running kernel), say M
++ here and read <file:Documentation/kbuild/modules.txt>. The module
++ will be called arm-hdlcd.
++
+ config FB_ACORN
+ bool "Acorn VIDC support"
+ depends on (FB = y) && ARM && ARCH_ACORN
+@@ -2491,6 +2511,10 @@
+ source "drivers/video/mmp/Kconfig"
+ source "drivers/video/backlight/Kconfig"
+
++if ARCH_MXC
++source "drivers/video/mxc/Kconfig"
++endif
++
+ if VT
+ source "drivers/video/console/Kconfig"
+ endif
+diff -Nur linux-3.14.36/drivers/video/Makefile linux-openelec/drivers/video/Makefile
+--- linux-3.14.36/drivers/video/Makefile 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/video/Makefile 2015-05-06 12:05:42.000000000 -0500
+@@ -53,6 +53,7 @@
+ obj-$(CONFIG_FB_SAVAGE) += savage/
+ obj-$(CONFIG_FB_GEODE) += geode/
+ obj-$(CONFIG_FB_MBX) += mbx/
++obj-$(CONFIG_FB_MXC) += mxc/
+ obj-$(CONFIG_FB_NEOMAGIC) += neofb.o
+ obj-$(CONFIG_FB_3DFX) += tdfxfb.o
+ obj-$(CONFIG_FB_CONTROL) += controlfb.o
+@@ -99,6 +100,7 @@
+ obj-$(CONFIG_FB_PVR2) += pvr2fb.o
+ obj-$(CONFIG_FB_VOODOO1) += sstfb.o
+ obj-$(CONFIG_FB_ARMCLCD) += amba-clcd.o
++obj-$(CONFIG_FB_ARMHDLCD) += arm-hdlcd.o
+ obj-$(CONFIG_FB_GOLDFISH) += goldfishfb.o
+ obj-$(CONFIG_FB_68328) += 68328fb.o
+ obj-$(CONFIG_FB_GBE) += gbefb.o
+@@ -178,3 +180,6 @@
+ ifeq ($(CONFIG_OF),y)
+ obj-$(CONFIG_VIDEOMODE_HELPERS) += of_display_timing.o of_videomode.o
+ endif
++
++# platform specific output drivers
++obj-$(CONFIG_VEXPRESS_DVI_CONTROL) += vexpress-dvi.o
+diff -Nur linux-3.14.36/drivers/video/mxc/Kconfig linux-openelec/drivers/video/mxc/Kconfig
+--- linux-3.14.36/drivers/video/mxc/Kconfig 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/video/mxc/Kconfig 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,48 @@
++config FB_MXC
++ tristate "MXC Framebuffer support"
++ depends on FB
++ select FB_CFB_FILLRECT
++ select FB_CFB_COPYAREA
++ select FB_CFB_IMAGEBLIT
++ select FB_MODE_HELPERS
++ default y
++ help
++ This is a framebuffer device for the MXC LCD Controller.
++ See <http://www.linux-fbdev.org/> for information on framebuffer
++ devices.
++
++ If you plan to use the LCD display with your MXC system, say
++ Y here.
++
++config FB_MXC_SYNC_PANEL
++ depends on FB_MXC
++ tristate "Synchronous Panel Framebuffer"
++
++config FB_MXC_LDB
++ tristate "MXC LDB"
++ depends on FB_MXC_SYNC_PANEL
++ depends on MXC_IPU_V3
++
++config FB_MXC_MIPI_DSI
++ tristate "MXC MIPI_DSI"
++ depends on FB_MXC_SYNC_PANEL
++ depends on MXC_IPU_V3
++
++config FB_MXC_TRULY_WVGA_SYNC_PANEL
++ tristate "TRULY WVGA Panel"
++ depends on FB_MXC_SYNC_PANEL
++ depends on FB_MXC_MIPI_DSI
++
++config FB_MXC_HDMI
++ depends on FB_MXC_SYNC_PANEL
++ depends on MXC_IPU_V3
++ depends on I2C
++ tristate "MXC HDMI driver support"
++ select MFD_MXC_HDMI
++ help
++ Driver for the on-chip MXC HDMI controller.
++
++config FB_MXC_EDID
++ depends on FB_MXC && I2C
++ tristate "MXC EDID support"
++ default y
+diff -Nur linux-3.14.36/drivers/video/mxc/ldb.c linux-openelec/drivers/video/mxc/ldb.c
+--- linux-3.14.36/drivers/video/mxc/ldb.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/video/mxc/ldb.c 2015-07-24 18:03:30.280842002 -0500
+@@ -0,0 +1,1052 @@
++/*
++ * Copyright (C) 2012-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
++ */
++
++/*!
++ * @file mxc_ldb.c
++ *
++ * @brief This file contains the LDB driver device interface and fops
++ * functions.
++ */
++#include <linux/types.h>
++#include <linux/init.h>
++#include <linux/module.h>
++#include <linux/platform_device.h>
++#include <linux/err.h>
++#include <linux/clk.h>
++#include <linux/console.h>
++#include <linux/io.h>
++#include <linux/ipu.h>
++#include <linux/mxcfb.h>
++#include <linux/regulator/consumer.h>
++#include <linux/spinlock.h>
++#include <linux/of_device.h>
++#include <linux/mod_devicetable.h>
++#include "mxc_dispdrv.h"
++
++#define DISPDRV_LDB "ldb"
++
++#define LDB_BGREF_RMODE_MASK 0x00008000
++#define LDB_BGREF_RMODE_INT 0x00008000
++#define LDB_BGREF_RMODE_EXT 0x0
++
++#define LDB_DI1_VS_POL_MASK 0x00000400
++#define LDB_DI1_VS_POL_ACT_LOW 0x00000400
++#define LDB_DI1_VS_POL_ACT_HIGH 0x0
++#define LDB_DI0_VS_POL_MASK 0x00000200
++#define LDB_DI0_VS_POL_ACT_LOW 0x00000200
++#define LDB_DI0_VS_POL_ACT_HIGH 0x0
++
++#define LDB_BIT_MAP_CH1_MASK 0x00000100
++#define LDB_BIT_MAP_CH1_JEIDA 0x00000100
++#define LDB_BIT_MAP_CH1_SPWG 0x0
++#define LDB_BIT_MAP_CH0_MASK 0x00000040
++#define LDB_BIT_MAP_CH0_JEIDA 0x00000040
++#define LDB_BIT_MAP_CH0_SPWG 0x0
++
++#define LDB_DATA_WIDTH_CH1_MASK 0x00000080
++#define LDB_DATA_WIDTH_CH1_24 0x00000080
++#define LDB_DATA_WIDTH_CH1_18 0x0
++#define LDB_DATA_WIDTH_CH0_MASK 0x00000020
++#define LDB_DATA_WIDTH_CH0_24 0x00000020
++#define LDB_DATA_WIDTH_CH0_18 0x0
++
++#define LDB_CH1_MODE_MASK 0x0000000C
++#define LDB_CH1_MODE_EN_TO_DI1 0x0000000C
++#define LDB_CH1_MODE_EN_TO_DI0 0x00000004
++#define LDB_CH1_MODE_DISABLE 0x0
++#define LDB_CH0_MODE_MASK 0x00000003
++#define LDB_CH0_MODE_EN_TO_DI1 0x00000003
++#define LDB_CH0_MODE_EN_TO_DI0 0x00000001
++#define LDB_CH0_MODE_DISABLE 0x0
++
++#define LDB_SPLIT_MODE_EN 0x00000010
++
++enum {
++ IMX6_LDB,
++};
++
++enum {
++ LDB_IMX6 = 1,
++};
++
++struct fsl_mxc_ldb_platform_data {
++ int devtype;
++ u32 ext_ref;
++#define LDB_SPL_DI0 1
++#define LDB_SPL_DI1 2
++#define LDB_DUL_DI0 3
++#define LDB_DUL_DI1 4
++#define LDB_SIN0 5
++#define LDB_SIN1 6
++#define LDB_SEP0 7
++#define LDB_SEP1 8
++ int mode;
++ int ipu_id;
++ int disp_id;
++
++ /*only work for separate mode*/
++ int sec_ipu_id;
++ int sec_disp_id;
++};
++
++struct ldb_data {
++ struct platform_device *pdev;
++ struct mxc_dispdrv_handle *disp_ldb;
++ uint32_t *reg;
++ uint32_t *control_reg;
++ uint32_t *gpr3_reg;
++ uint32_t control_reg_data;
++ struct regulator *lvds_bg_reg;
++ int mode;
++ bool inited;
++ struct ldb_setting {
++ struct clk *di_clk;
++ struct clk *ldb_di_clk;
++ struct clk *div_3_5_clk;
++ struct clk *div_7_clk;
++ struct clk *div_sel_clk;
++ bool active;
++ bool clk_en;
++ int ipu;
++ int di;
++ uint32_t ch_mask;
++ uint32_t ch_val;
++ } setting[2];
++ struct notifier_block nb;
++};
++
++static int g_ldb_mode;
++
++static struct fb_videomode ldb_modedb[] = {
++ {
++ "LDB-WXGA", 60, 1280, 800, 14065,
++ 40, 40,
++ 10, 3,
++ 80, 10,
++ 0,
++ FB_VMODE_NONINTERLACED,
++ FB_MODE_IS_DETAILED,},
++ {
++ "LDB-XGA", 60, 1024, 768, 15385,
++ 220, 40,
++ 21, 7,
++ 60, 10,
++ 0,
++ FB_VMODE_NONINTERLACED,
++ FB_MODE_IS_DETAILED,},
++ {
++ "LDB-1080P60", 60, 1920, 1080, 7692,
++ 100, 40,
++ 30, 3,
++ 10, 2,
++ 0,
++ FB_VMODE_NONINTERLACED,
++ FB_MODE_IS_DETAILED,},
++ {
++ "LDB-WVGA-UDOO", 57, 800, 480, 30060, // Rif. 800x480 Panel DATAVISION dtfs070d0shlx
++ 56, 50,
++ 23, 20,
++ 150, 2,
++ 0,
++ FB_VMODE_NONINTERLACED,
++ FB_MODE_IS_DETAILED,},
++ {
++ "LDB-WXGA-UDOO", 60, 1368, 768, 12960, // Rif. 1366x768 Panel G156XW01V0
++ 9, 3,
++ 2, 7,
++ 200, 38,
++ 0,
++ FB_VMODE_NONINTERLACED,
++ FB_MODE_IS_DETAILED,},
++};
++static int ldb_modedb_sz = ARRAY_SIZE(ldb_modedb);
++
++static inline int is_imx6_ldb(struct fsl_mxc_ldb_platform_data *plat_data)
++{
++ return (plat_data->devtype == LDB_IMX6);
++}
++
++static int bits_per_pixel(int pixel_fmt)
++{
++ switch (pixel_fmt) {
++ case IPU_PIX_FMT_BGR24:
++ case IPU_PIX_FMT_RGB24:
++ return 24;
++ break;
++ case IPU_PIX_FMT_BGR666:
++ case IPU_PIX_FMT_RGB666:
++ case IPU_PIX_FMT_LVDS666:
++ return 18;
++ break;
++ default:
++ break;
++ }
++ return 0;
++}
++
++static int valid_mode(int pixel_fmt)
++{
++ return ((pixel_fmt == IPU_PIX_FMT_RGB24) ||
++ (pixel_fmt == IPU_PIX_FMT_BGR24) ||
++ (pixel_fmt == IPU_PIX_FMT_LVDS666) ||
++ (pixel_fmt == IPU_PIX_FMT_RGB666) ||
++ (pixel_fmt == IPU_PIX_FMT_BGR666));
++}
++
++static int parse_ldb_mode(char *mode)
++{
++ int ldb_mode;
++
++ if (!strcmp(mode, "spl0"))
++ ldb_mode = LDB_SPL_DI0;
++ else if (!strcmp(mode, "spl1"))
++ ldb_mode = LDB_SPL_DI1;
++ else if (!strcmp(mode, "dul0"))
++ ldb_mode = LDB_DUL_DI0;
++ else if (!strcmp(mode, "dul1"))
++ ldb_mode = LDB_DUL_DI1;
++ else if (!strcmp(mode, "sin0"))
++ ldb_mode = LDB_SIN0;
++ else if (!strcmp(mode, "sin1"))
++ ldb_mode = LDB_SIN1;
++ else if (!strcmp(mode, "sep0"))
++ ldb_mode = LDB_SEP0;
++ else if (!strcmp(mode, "sep1"))
++ ldb_mode = LDB_SEP1;
++ else
++ ldb_mode = -EINVAL;
++
++ return ldb_mode;
++}
++
++#ifndef MODULE
++/*
++ * "ldb=spl0/1" -- split mode on DI0/1
++ * "ldb=dul0/1" -- dual mode on DI0/1
++ * "ldb=sin0/1" -- single mode on LVDS0/1
++ * "ldb=sep0/1" -- separate mode begin from LVDS0/1
++ *
++ * there are two LVDS channels(LVDS0 and LVDS1) which can transfer video
++ * datas, there two channels can be used as split/dual/single/separate mode.
++ *
++ * split mode means display data from DI0 or DI1 will send to both channels
++ * LVDS0+LVDS1.
++ * dual mode means display data from DI0 or DI1 will be duplicated on LVDS0
++ * and LVDS1, it said, LVDS0 and LVDS1 has the same content.
++ * single mode means only work for DI0/DI1->LVDS0 or DI0/DI1->LVDS1.
++ * separate mode means you can make DI0/DI1->LVDS0 and DI0/DI1->LVDS1 work
++ * at the same time.
++ */
++static int __init ldb_setup(char *options)
++{
++ g_ldb_mode = parse_ldb_mode(options);
++ return (g_ldb_mode < 0) ? 0 : 1;
++}
++__setup("ldb=", ldb_setup);
++#endif
++
++static int ldb_get_of_property(struct platform_device *pdev,
++ struct fsl_mxc_ldb_platform_data *plat_data)
++{
++ struct device_node *np = pdev->dev.of_node;
++ int err;
++ u32 ipu_id, disp_id;
++ u32 sec_ipu_id, sec_disp_id;
++ char *mode;
++ u32 ext_ref;
++
++ err = of_property_read_string(np, "mode", (const char **)&mode);
++ if (err) {
++ dev_dbg(&pdev->dev, "get of property mode fail\n");
++ return err;
++ }
++ err = of_property_read_u32(np, "ext_ref", &ext_ref);
++ if (err) {
++ dev_dbg(&pdev->dev, "get of property ext_ref fail\n");
++ return err;
++ }
++ err = of_property_read_u32(np, "ipu_id", &ipu_id);
++ if (err) {
++ dev_dbg(&pdev->dev, "get of property ipu_id fail\n");
++ return err;
++ }
++ err = of_property_read_u32(np, "disp_id", &disp_id);
++ if (err) {
++ dev_dbg(&pdev->dev, "get of property disp_id fail\n");
++ return err;
++ }
++ err = of_property_read_u32(np, "sec_ipu_id", &sec_ipu_id);
++ if (err) {
++ dev_dbg(&pdev->dev, "get of property sec_ipu_id fail\n");
++ return err;
++ }
++ err = of_property_read_u32(np, "sec_disp_id", &sec_disp_id);
++ if (err) {
++ dev_dbg(&pdev->dev, "get of property sec_disp_id fail\n");
++ return err;
++ }
++
++ plat_data->mode = parse_ldb_mode(mode);
++ plat_data->ext_ref = ext_ref;
++ plat_data->ipu_id = ipu_id;
++ plat_data->disp_id = disp_id;
++ plat_data->sec_ipu_id = sec_ipu_id;
++ plat_data->sec_disp_id = sec_disp_id;
++
++ return err;
++}
++
++static int find_ldb_setting(struct ldb_data *ldb, struct fb_info *fbi)
++{
++ char *id_di[] = {
++ "DISP3 BG",
++ "DISP3 BG - DI1",
++ };
++ char id[16];
++ int i;
++
++ for (i = 0; i < 2; i++) {
++ if (ldb->setting[i].active) {
++ memset(id, 0, 16);
++ memcpy(id, id_di[ldb->setting[i].di],
++ strlen(id_di[ldb->setting[i].di]));
++ id[4] += ldb->setting[i].ipu;
++ if (!strcmp(id, fbi->fix.id))
++ return i;
++ }
++ }
++ return -EINVAL;
++}
++
++static int ldb_disp_setup(struct mxc_dispdrv_handle *disp, struct fb_info *fbi)
++{
++ uint32_t reg, val;
++ uint32_t pixel_clk, rounded_pixel_clk;
++ struct clk *ldb_clk_parent;
++ struct ldb_data *ldb = mxc_dispdrv_getdata(disp);
++ int setting_idx, di;
++ int ret;
++
++ setting_idx = find_ldb_setting(ldb, fbi);
++ if (setting_idx < 0)
++ return setting_idx;
++
++ di = ldb->setting[setting_idx].di;
++
++ /* restore channel mode setting */
++ val = readl(ldb->control_reg);
++ val |= ldb->setting[setting_idx].ch_val;
++ writel(val, ldb->control_reg);
++ dev_dbg(&ldb->pdev->dev, "LDB setup, control reg:0x%x\n",
++ readl(ldb->control_reg));
++
++ /* vsync setup */
++ reg = readl(ldb->control_reg);
++ if (fbi->var.sync & FB_SYNC_VERT_HIGH_ACT) {
++ if (di == 0)
++ reg = (reg & ~LDB_DI0_VS_POL_MASK)
++ | LDB_DI0_VS_POL_ACT_HIGH;
++ else
++ reg = (reg & ~LDB_DI1_VS_POL_MASK)
++ | LDB_DI1_VS_POL_ACT_HIGH;
++ } else {
++ if (di == 0)
++ reg = (reg & ~LDB_DI0_VS_POL_MASK)
++ | LDB_DI0_VS_POL_ACT_LOW;
++ else
++ reg = (reg & ~LDB_DI1_VS_POL_MASK)
++ | LDB_DI1_VS_POL_ACT_LOW;
++ }
++ writel(reg, ldb->control_reg);
++
++ /* clk setup */
++ if (ldb->setting[setting_idx].clk_en)
++ clk_disable_unprepare(ldb->setting[setting_idx].ldb_di_clk);
++ pixel_clk = (PICOS2KHZ(fbi->var.pixclock)) * 1000UL;
++ ldb_clk_parent = clk_get_parent(ldb->setting[setting_idx].ldb_di_clk);
++ if (IS_ERR(ldb_clk_parent)) {
++ dev_err(&ldb->pdev->dev, "get ldb di parent clk fail\n");
++ return PTR_ERR(ldb_clk_parent);
++ }
++ if ((ldb->mode == LDB_SPL_DI0) || (ldb->mode == LDB_SPL_DI1))
++ ret = clk_set_rate(ldb_clk_parent, pixel_clk * 7 / 2);
++ else
++ ret = clk_set_rate(ldb_clk_parent, pixel_clk * 7);
++ if (ret < 0) {
++ dev_err(&ldb->pdev->dev, "set ldb parent clk fail:%d\n", ret);
++ return ret;
++ }
++ rounded_pixel_clk = clk_round_rate(ldb->setting[setting_idx].ldb_di_clk,
++ pixel_clk);
++ dev_dbg(&ldb->pdev->dev, "pixel_clk:%d, rounded_pixel_clk:%d\n",
++ pixel_clk, rounded_pixel_clk);
++ ret = clk_set_rate(ldb->setting[setting_idx].ldb_di_clk,
++ rounded_pixel_clk);
++ if (ret < 0) {
++ dev_err(&ldb->pdev->dev, "set ldb di clk fail:%d\n", ret);
++ return ret;
++ }
++ ret = clk_prepare_enable(ldb->setting[setting_idx].ldb_di_clk);
++ if (ret < 0) {
++ dev_err(&ldb->pdev->dev, "enable ldb di clk fail:%d\n", ret);
++ return ret;
++ }
++
++ if (!ldb->setting[setting_idx].clk_en)
++ ldb->setting[setting_idx].clk_en = true;
++
++ return 0;
++}
++
++int ldb_fb_event(struct notifier_block *nb, unsigned long val, void *v)
++{
++ struct ldb_data *ldb = container_of(nb, struct ldb_data, nb);
++ struct fb_event *event = v;
++ struct fb_info *fbi = event->info;
++ int index;
++ uint32_t data;
++
++ index = find_ldb_setting(ldb, fbi);
++ if (index < 0)
++ return 0;
++
++ fbi->mode = (struct fb_videomode *)fb_match_mode(&fbi->var,
++ &fbi->modelist);
++
++ if (!fbi->mode) {
++ dev_warn(&ldb->pdev->dev,
++ "LDB: can not find mode for xres=%d, yres=%d\n",
++ fbi->var.xres, fbi->var.yres);
++ if (ldb->setting[index].clk_en) {
++ clk_disable(ldb->setting[index].ldb_di_clk);
++ ldb->setting[index].clk_en = false;
++ data = readl(ldb->control_reg);
++ data &= ~ldb->setting[index].ch_mask;
++ writel(data, ldb->control_reg);
++ }
++ return 0;
++ }
++
++ switch (val) {
++ case FB_EVENT_BLANK:
++ {
++ if (*((int *)event->data) == FB_BLANK_UNBLANK) {
++ if (!ldb->setting[index].clk_en) {
++ clk_enable(ldb->setting[index].ldb_di_clk);
++ ldb->setting[index].clk_en = true;
++ }
++ } else {
++ if (ldb->setting[index].clk_en) {
++ clk_disable(ldb->setting[index].ldb_di_clk);
++ ldb->setting[index].clk_en = false;
++ data = readl(ldb->control_reg);
++ data &= ~ldb->setting[index].ch_mask;
++ writel(data, ldb->control_reg);
++ dev_dbg(&ldb->pdev->dev,
++ "LDB blank, control reg:0x%x\n",
++ readl(ldb->control_reg));
++ }
++ }
++ break;
++ }
++ case FB_EVENT_SUSPEND:
++ if (ldb->setting[index].clk_en) {
++ clk_disable(ldb->setting[index].ldb_di_clk);
++ ldb->setting[index].clk_en = false;
++ }
++ break;
++ default:
++ break;
++ }
++ return 0;
++}
++
++#define LVDS_MUX_CTL_WIDTH 2
++#define LVDS_MUX_CTL_MASK 3
++#define LVDS0_MUX_CTL_OFFS 6
++#define LVDS1_MUX_CTL_OFFS 8
++#define LVDS0_MUX_CTL_MASK (LVDS_MUX_CTL_MASK << 6)
++#define LVDS1_MUX_CTL_MASK (LVDS_MUX_CTL_MASK << 8)
++#define ROUTE_IPU_DI(ipu, di) (((ipu << 1) | di) & LVDS_MUX_CTL_MASK)
++static int ldb_ipu_ldb_route(int ipu, int di, struct ldb_data *ldb)
++{
++ uint32_t reg;
++ int channel;
++ int shift;
++ int mode = ldb->mode;
++
++ reg = readl(ldb->gpr3_reg);
++ if (mode < LDB_SIN0) {
++ reg &= ~(LVDS0_MUX_CTL_MASK | LVDS1_MUX_CTL_MASK);
++ reg |= (ROUTE_IPU_DI(ipu, di) << LVDS0_MUX_CTL_OFFS) |
++ (ROUTE_IPU_DI(ipu, di) << LVDS1_MUX_CTL_OFFS);
++ dev_dbg(&ldb->pdev->dev,
++ "Dual/Split mode both channels route to IPU%d-DI%d\n",
++ ipu, di);
++ } else if ((mode == LDB_SIN0) || (mode == LDB_SIN1)) {
++ reg &= ~(LVDS0_MUX_CTL_MASK | LVDS1_MUX_CTL_MASK);
++ channel = mode - LDB_SIN0;
++ shift = LVDS0_MUX_CTL_OFFS + channel * LVDS_MUX_CTL_WIDTH;
++ reg |= ROUTE_IPU_DI(ipu, di) << shift;
++ dev_dbg(&ldb->pdev->dev,
++ "Single mode channel %d route to IPU%d-DI%d\n",
++ channel, ipu, di);
++ } else {
++ static bool first = true;
++
++ if (first) {
++ if (mode == LDB_SEP0) {
++ reg &= ~LVDS0_MUX_CTL_MASK;
++ channel = 0;
++ } else {
++ reg &= ~LVDS1_MUX_CTL_MASK;
++ channel = 1;
++ }
++ first = false;
++ } else {
++ if (mode == LDB_SEP0) {
++ reg &= ~LVDS1_MUX_CTL_MASK;
++ channel = 1;
++ } else {
++ reg &= ~LVDS0_MUX_CTL_MASK;
++ channel = 0;
++ }
++ }
++
++ shift = LVDS0_MUX_CTL_OFFS + channel * LVDS_MUX_CTL_WIDTH;
++ reg |= ROUTE_IPU_DI(ipu, di) << shift;
++
++ dev_dbg(&ldb->pdev->dev,
++ "Separate mode channel %d route to IPU%d-DI%d\n",
++ channel, ipu, di);
++ }
++ writel(reg, ldb->gpr3_reg);
++
++ return 0;
++}
++
++static int ldb_disp_init(struct mxc_dispdrv_handle *disp,
++ struct mxc_dispdrv_setting *setting)
++{
++ int ret = 0, i, lvds_channel = 0;
++ struct ldb_data *ldb = mxc_dispdrv_getdata(disp);
++ struct fsl_mxc_ldb_platform_data *plat_data = ldb->pdev->dev.platform_data;
++ struct resource *res;
++ uint32_t reg, setting_idx;
++ uint32_t ch_mask = 0, ch_val = 0;
++ uint32_t ipu_id, disp_id;
++ char di_clk[] = "ipu1_di0_sel";
++ char ldb_clk[] = "ldb_di0";
++ char div_3_5_clk[] = "di0_div_3_5";
++ char div_7_clk[] = "di0_div_7";
++ char div_sel_clk[] = "di0_div_sel";
++
++ /* if input format not valid, make RGB666 as default*/
++ if (!valid_mode(setting->if_fmt)) {
++ dev_warn(&ldb->pdev->dev, "Input pixel format not valid"
++ " use default RGB666\n");
++ setting->if_fmt = IPU_PIX_FMT_RGB666;
++ }
++
++ if (!ldb->inited) {
++ setting_idx = 0;
++ res = platform_get_resource(ldb->pdev, IORESOURCE_MEM, 0);
++ if (!res) {
++ dev_err(&ldb->pdev->dev, "get iomem fail.\n");
++ return -ENOMEM;
++ }
++
++ ldb->reg = devm_ioremap(&ldb->pdev->dev, res->start,
++ resource_size(res));
++ ldb->control_reg = ldb->reg + 2;
++ ldb->gpr3_reg = ldb->reg + 3;
++
++ /* ipu selected by platform data setting */
++ setting->dev_id = plat_data->ipu_id;
++
++ reg = readl(ldb->control_reg);
++
++ /* refrence resistor select */
++ reg &= ~LDB_BGREF_RMODE_MASK;
++ if (plat_data->ext_ref)
++ reg |= LDB_BGREF_RMODE_EXT;
++ else
++ reg |= LDB_BGREF_RMODE_INT;
++
++ /* TODO: now only use SPWG data mapping for both channel */
++ reg &= ~(LDB_BIT_MAP_CH0_MASK | LDB_BIT_MAP_CH1_MASK);
++ reg |= LDB_BIT_MAP_CH0_SPWG | LDB_BIT_MAP_CH1_SPWG;
++
++ /* channel mode setting */
++ reg &= ~(LDB_CH0_MODE_MASK | LDB_CH1_MODE_MASK);
++ reg &= ~(LDB_DATA_WIDTH_CH0_MASK | LDB_DATA_WIDTH_CH1_MASK);
++
++ if (bits_per_pixel(setting->if_fmt) == 24)
++ reg |= LDB_DATA_WIDTH_CH0_24 | LDB_DATA_WIDTH_CH1_24;
++ else
++ reg |= LDB_DATA_WIDTH_CH0_18 | LDB_DATA_WIDTH_CH1_18;
++
++ if (g_ldb_mode >= LDB_SPL_DI0)
++ ldb->mode = g_ldb_mode;
++ else
++ ldb->mode = plat_data->mode;
++
++ if ((ldb->mode == LDB_SIN0) || (ldb->mode == LDB_SIN1)) {
++ ret = ldb->mode - LDB_SIN0;
++ if (plat_data->disp_id != ret) {
++ dev_warn(&ldb->pdev->dev,
++ "change IPU DI%d to IPU DI%d for LDB "
++ "channel%d.\n",
++ plat_data->disp_id, ret, ret);
++ plat_data->disp_id = ret;
++ }
++ } else if (((ldb->mode == LDB_SEP0) || (ldb->mode == LDB_SEP1))
++ && is_imx6_ldb(plat_data)) {
++ if (plat_data->disp_id == plat_data->sec_disp_id) {
++ dev_err(&ldb->pdev->dev,
++ "For LVDS separate mode,"
++ "two DIs should be different!\n");
++ return -EINVAL;
++ }
++
++ if (((!plat_data->disp_id) && (ldb->mode == LDB_SEP1))
++ || ((plat_data->disp_id) &&
++ (ldb->mode == LDB_SEP0))) {
++ dev_dbg(&ldb->pdev->dev,
++ "LVDS separate mode:"
++ "swap DI configuration!\n");
++ ipu_id = plat_data->ipu_id;
++ disp_id = plat_data->disp_id;
++ plat_data->ipu_id = plat_data->sec_ipu_id;
++ plat_data->disp_id = plat_data->sec_disp_id;
++ plat_data->sec_ipu_id = ipu_id;
++ plat_data->sec_disp_id = disp_id;
++ }
++ }
++
++ if (ldb->mode == LDB_SPL_DI0) {
++ reg |= LDB_SPLIT_MODE_EN | LDB_CH0_MODE_EN_TO_DI0
++ | LDB_CH1_MODE_EN_TO_DI0;
++ setting->disp_id = 0;
++ } else if (ldb->mode == LDB_SPL_DI1) {
++ reg |= LDB_SPLIT_MODE_EN | LDB_CH0_MODE_EN_TO_DI1
++ | LDB_CH1_MODE_EN_TO_DI1;
++ setting->disp_id = 1;
++ } else if (ldb->mode == LDB_DUL_DI0) {
++ reg &= ~LDB_SPLIT_MODE_EN;
++ reg |= LDB_CH0_MODE_EN_TO_DI0 | LDB_CH1_MODE_EN_TO_DI0;
++ setting->disp_id = 0;
++ } else if (ldb->mode == LDB_DUL_DI1) {
++ reg &= ~LDB_SPLIT_MODE_EN;
++ reg |= LDB_CH0_MODE_EN_TO_DI1 | LDB_CH1_MODE_EN_TO_DI1;
++ setting->disp_id = 1;
++ } else if (ldb->mode == LDB_SIN0) {
++ reg &= ~LDB_SPLIT_MODE_EN;
++ setting->disp_id = plat_data->disp_id;
++ if (setting->disp_id == 0)
++ reg |= LDB_CH0_MODE_EN_TO_DI0;
++ else
++ reg |= LDB_CH0_MODE_EN_TO_DI1;
++ ch_mask = LDB_CH0_MODE_MASK;
++ ch_val = reg & LDB_CH0_MODE_MASK;
++ } else if (ldb->mode == LDB_SIN1) {
++ reg &= ~LDB_SPLIT_MODE_EN;
++ setting->disp_id = plat_data->disp_id;
++ if (setting->disp_id == 0)
++ reg |= LDB_CH1_MODE_EN_TO_DI0;
++ else
++ reg |= LDB_CH1_MODE_EN_TO_DI1;
++ ch_mask = LDB_CH1_MODE_MASK;
++ ch_val = reg & LDB_CH1_MODE_MASK;
++ } else { /* separate mode*/
++ setting->disp_id = plat_data->disp_id;
++
++ /* first output is LVDS0 or LVDS1 */
++ if (ldb->mode == LDB_SEP0)
++ lvds_channel = 0;
++ else
++ lvds_channel = 1;
++
++ reg &= ~LDB_SPLIT_MODE_EN;
++
++ if ((lvds_channel == 0) && (setting->disp_id == 0))
++ reg |= LDB_CH0_MODE_EN_TO_DI0;
++ else if ((lvds_channel == 0) && (setting->disp_id == 1))
++ reg |= LDB_CH0_MODE_EN_TO_DI1;
++ else if ((lvds_channel == 1) && (setting->disp_id == 0))
++ reg |= LDB_CH1_MODE_EN_TO_DI0;
++ else
++ reg |= LDB_CH1_MODE_EN_TO_DI1;
++ ch_mask = lvds_channel ? LDB_CH1_MODE_MASK :
++ LDB_CH0_MODE_MASK;
++ ch_val = reg & ch_mask;
++
++ if (bits_per_pixel(setting->if_fmt) == 24) {
++ if (lvds_channel == 0)
++ reg &= ~LDB_DATA_WIDTH_CH1_24;
++ else
++ reg &= ~LDB_DATA_WIDTH_CH0_24;
++ } else {
++ if (lvds_channel == 0)
++ reg &= ~LDB_DATA_WIDTH_CH1_18;
++ else
++ reg &= ~LDB_DATA_WIDTH_CH0_18;
++ }
++ }
++
++ writel(reg, ldb->control_reg);
++ if (ldb->mode < LDB_SIN0) {
++ ch_mask = LDB_CH0_MODE_MASK | LDB_CH1_MODE_MASK;
++ ch_val = reg & (LDB_CH0_MODE_MASK | LDB_CH1_MODE_MASK);
++ }
++ } else { /* second time for separate mode */
++ if ((ldb->mode == LDB_SPL_DI0) ||
++ (ldb->mode == LDB_SPL_DI1) ||
++ (ldb->mode == LDB_DUL_DI0) ||
++ (ldb->mode == LDB_DUL_DI1) ||
++ (ldb->mode == LDB_SIN0) ||
++ (ldb->mode == LDB_SIN1)) {
++ dev_err(&ldb->pdev->dev, "for second ldb disp"
++ "ldb mode should in separate mode\n");
++ return -EINVAL;
++ }
++
++ setting_idx = 1;
++ if (is_imx6_ldb(plat_data)) {
++ setting->dev_id = plat_data->sec_ipu_id;
++ setting->disp_id = plat_data->sec_disp_id;
++ } else {
++ setting->dev_id = plat_data->ipu_id;
++ setting->disp_id = !plat_data->disp_id;
++ }
++ if (setting->disp_id == ldb->setting[0].di) {
++ dev_err(&ldb->pdev->dev, "Err: for second ldb disp in"
++ "separate mode, DI should be different!\n");
++ return -EINVAL;
++ }
++
++ /* second output is LVDS0 or LVDS1 */
++ if (ldb->mode == LDB_SEP0)
++ lvds_channel = 1;
++ else
++ lvds_channel = 0;
++
++ reg = readl(ldb->control_reg);
++ if ((lvds_channel == 0) && (setting->disp_id == 0))
++ reg |= LDB_CH0_MODE_EN_TO_DI0;
++ else if ((lvds_channel == 0) && (setting->disp_id == 1))
++ reg |= LDB_CH0_MODE_EN_TO_DI1;
++ else if ((lvds_channel == 1) && (setting->disp_id == 0))
++ reg |= LDB_CH1_MODE_EN_TO_DI0;
++ else
++ reg |= LDB_CH1_MODE_EN_TO_DI1;
++ ch_mask = lvds_channel ? LDB_CH1_MODE_MASK :
++ LDB_CH0_MODE_MASK;
++ ch_val = reg & ch_mask;
++
++ if (bits_per_pixel(setting->if_fmt) == 24) {
++ if (lvds_channel == 0)
++ reg |= LDB_DATA_WIDTH_CH0_24;
++ else
++ reg |= LDB_DATA_WIDTH_CH1_24;
++ } else {
++ if (lvds_channel == 0)
++ reg |= LDB_DATA_WIDTH_CH0_18;
++ else
++ reg |= LDB_DATA_WIDTH_CH1_18;
++ }
++ writel(reg, ldb->control_reg);
++ }
++
++ /* get clocks */
++ if (is_imx6_ldb(plat_data) &&
++ ((ldb->mode == LDB_SEP0) || (ldb->mode == LDB_SEP1))) {
++ ldb_clk[6] += lvds_channel;
++ div_3_5_clk[2] += lvds_channel;
++ div_7_clk[2] += lvds_channel;
++ div_sel_clk[2] += lvds_channel;
++ } else {
++ ldb_clk[6] += setting->disp_id;
++ div_3_5_clk[2] += setting->disp_id;
++ div_7_clk[2] += setting->disp_id;
++ div_sel_clk[2] += setting->disp_id;
++ }
++ ldb->setting[setting_idx].ldb_di_clk = clk_get(&ldb->pdev->dev,
++ ldb_clk);
++ if (IS_ERR(ldb->setting[setting_idx].ldb_di_clk)) {
++ dev_err(&ldb->pdev->dev, "get ldb clk failed\n");
++ return PTR_ERR(ldb->setting[setting_idx].ldb_di_clk);
++ }
++
++ ldb->setting[setting_idx].div_3_5_clk = clk_get(&ldb->pdev->dev,
++ div_3_5_clk);
++ if (IS_ERR(ldb->setting[setting_idx].div_3_5_clk)) {
++ dev_err(&ldb->pdev->dev, "get div 3.5 clk failed\n");
++ return PTR_ERR(ldb->setting[setting_idx].div_3_5_clk);
++ }
++ ldb->setting[setting_idx].div_7_clk = clk_get(&ldb->pdev->dev,
++ div_7_clk);
++ if (IS_ERR(ldb->setting[setting_idx].div_7_clk)) {
++ dev_err(&ldb->pdev->dev, "get div 7 clk failed\n");
++ return PTR_ERR(ldb->setting[setting_idx].div_7_clk);
++ }
++
++ ldb->setting[setting_idx].div_sel_clk = clk_get(&ldb->pdev->dev,
++ div_sel_clk);
++ if (IS_ERR(ldb->setting[setting_idx].div_sel_clk)) {
++ dev_err(&ldb->pdev->dev, "get div sel clk failed\n");
++ return PTR_ERR(ldb->setting[setting_idx].div_sel_clk);
++ }
++
++ di_clk[3] += setting->dev_id;
++ di_clk[7] += setting->disp_id;
++ ldb->setting[setting_idx].di_clk = clk_get(&ldb->pdev->dev,
++ di_clk);
++ if (IS_ERR(ldb->setting[setting_idx].di_clk)) {
++ dev_err(&ldb->pdev->dev, "get di clk failed\n");
++ return PTR_ERR(ldb->setting[setting_idx].di_clk);
++ }
++
++ ldb->setting[setting_idx].ch_mask = ch_mask;
++ ldb->setting[setting_idx].ch_val = ch_val;
++
++ if (is_imx6_ldb(plat_data))
++ ldb_ipu_ldb_route(setting->dev_id, setting->disp_id, ldb);
++
++ /* must use spec video mode defined by driver */
++ ret = fb_find_mode(&setting->fbi->var, setting->fbi, setting->dft_mode_str,
++ ldb_modedb, ldb_modedb_sz, NULL, setting->default_bpp);
++ if (ret != 1)
++ fb_videomode_to_var(&setting->fbi->var, &ldb_modedb[0]);
++
++ INIT_LIST_HEAD(&setting->fbi->modelist);
++ for (i = 0; i < ldb_modedb_sz; i++) {
++ struct fb_videomode m;
++ fb_var_to_videomode(&m, &setting->fbi->var);
++ if (fb_mode_is_equal(&m, &ldb_modedb[i])) {
++ fb_add_videomode(&ldb_modedb[i],
++ &setting->fbi->modelist);
++ break;
++ }
++ }
++
++ ldb->setting[setting_idx].ipu = setting->dev_id;
++ ldb->setting[setting_idx].di = setting->disp_id;
++
++ return ret;
++}
++
++static int ldb_post_disp_init(struct mxc_dispdrv_handle *disp,
++ int ipu_id, int disp_id)
++{
++ struct ldb_data *ldb = mxc_dispdrv_getdata(disp);
++ int setting_idx = ldb->inited ? 1 : 0;
++ int ret = 0;
++
++ if (!ldb->inited) {
++ ldb->nb.notifier_call = ldb_fb_event;
++ fb_register_client(&ldb->nb);
++ }
++
++ ret = clk_set_parent(ldb->setting[setting_idx].di_clk,
++ ldb->setting[setting_idx].ldb_di_clk);
++ if (ret) {
++ dev_err(&ldb->pdev->dev, "fail to set ldb_di clk as"
++ "the parent of ipu_di clk\n");
++ return ret;
++ }
++
++ if ((ldb->mode == LDB_SPL_DI0) || (ldb->mode == LDB_SPL_DI1)) {
++ ret = clk_set_parent(ldb->setting[setting_idx].div_sel_clk,
++ ldb->setting[setting_idx].div_3_5_clk);
++ if (ret) {
++ dev_err(&ldb->pdev->dev, "fail to set div 3.5 clk as"
++ "the parent of div sel clk\n");
++ return ret;
++ }
++ } else {
++ ret = clk_set_parent(ldb->setting[setting_idx].div_sel_clk,
++ ldb->setting[setting_idx].div_7_clk);
++ if (ret) {
++ dev_err(&ldb->pdev->dev, "fail to set div 7 clk as"
++ "the parent of div sel clk\n");
++ return ret;
++ }
++ }
++
++ /* save active ldb setting for fb notifier */
++ ldb->setting[setting_idx].active = true;
++
++ ldb->inited = true;
++ return ret;
++}
++
++static void ldb_disp_deinit(struct mxc_dispdrv_handle *disp)
++{
++ struct ldb_data *ldb = mxc_dispdrv_getdata(disp);
++ int i;
++
++ writel(0, ldb->control_reg);
++
++ for (i = 0; i < 2; i++) {
++ clk_disable(ldb->setting[i].ldb_di_clk);
++ clk_put(ldb->setting[i].ldb_di_clk);
++ clk_put(ldb->setting[i].div_3_5_clk);
++ clk_put(ldb->setting[i].div_7_clk);
++ clk_put(ldb->setting[i].div_sel_clk);
++ }
++
++ fb_unregister_client(&ldb->nb);
++}
++
++static struct mxc_dispdrv_driver ldb_drv = {
++ .name = DISPDRV_LDB,
++ .init = ldb_disp_init,
++ .post_init = ldb_post_disp_init,
++ .deinit = ldb_disp_deinit,
++ .setup = ldb_disp_setup,
++};
++
++static int ldb_suspend(struct platform_device *pdev, pm_message_t state)
++{
++ struct ldb_data *ldb = dev_get_drvdata(&pdev->dev);
++ uint32_t data;
++
++ if (!ldb->inited)
++ return 0;
++ data = readl(ldb->control_reg);
++ ldb->control_reg_data = data;
++ data &= ~(LDB_CH0_MODE_MASK | LDB_CH1_MODE_MASK);
++ writel(data, ldb->control_reg);
++
++ return 0;
++}
++
++static int ldb_resume(struct platform_device *pdev)
++{
++ struct ldb_data *ldb = dev_get_drvdata(&pdev->dev);
++
++ if (!ldb->inited)
++ return 0;
++ writel(ldb->control_reg_data, ldb->control_reg);
++
++ return 0;
++}
++
++static struct platform_device_id imx_ldb_devtype[] = {
++ {
++ .name = "ldb-imx6",
++ .driver_data = LDB_IMX6,
++ }, {
++ /* sentinel */
++ }
++};
++
++static const struct of_device_id imx_ldb_dt_ids[] = {
++ { .compatible = "fsl,imx6q-ldb", .data = &imx_ldb_devtype[IMX6_LDB],},
++ { /* sentinel */ }
++};
++
++/*!
++ * This function is called by the driver framework to initialize the LDB
++ * device.
++ *
++ * @param dev The device structure for the LDB passed in by the
++ * driver framework.
++ *
++ * @return Returns 0 on success or negative error code on error
++ */
++static int ldb_probe(struct platform_device *pdev)
++{
++ int ret = 0;
++ struct ldb_data *ldb;
++ struct fsl_mxc_ldb_platform_data *plat_data;
++ const struct of_device_id *of_id =
++ of_match_device(imx_ldb_dt_ids, &pdev->dev);
++
++ dev_dbg(&pdev->dev, "%s enter\n", __func__);
++ ldb = devm_kzalloc(&pdev->dev, sizeof(struct ldb_data), GFP_KERNEL);
++ if (!ldb)
++ return -ENOMEM;
++
++ plat_data = devm_kzalloc(&pdev->dev,
++ sizeof(struct fsl_mxc_ldb_platform_data),
++ GFP_KERNEL);
++ if (!plat_data)
++ return -ENOMEM;
++ pdev->dev.platform_data = plat_data;
++ if (of_id)
++ pdev->id_entry = of_id->data;
++ plat_data->devtype = pdev->id_entry->driver_data;
++
++ ret = ldb_get_of_property(pdev, plat_data);
++ if (ret < 0) {
++ dev_err(&pdev->dev, "get ldb of property fail\n");
++ return ret;
++ }
++
++ ldb->pdev = pdev;
++ ldb->disp_ldb = mxc_dispdrv_register(&ldb_drv);
++ mxc_dispdrv_setdata(ldb->disp_ldb, ldb);
++
++ dev_set_drvdata(&pdev->dev, ldb);
++
++ dev_dbg(&pdev->dev, "%s exit\n", __func__);
++ return ret;
++}
++
++static int ldb_remove(struct platform_device *pdev)
++{
++ struct ldb_data *ldb = dev_get_drvdata(&pdev->dev);
++
++ if (!ldb->inited)
++ return 0;
++ mxc_dispdrv_puthandle(ldb->disp_ldb);
++ mxc_dispdrv_unregister(ldb->disp_ldb);
++ return 0;
++}
++
++static struct platform_driver mxcldb_driver = {
++ .driver = {
++ .name = "mxc_ldb",
++ .of_match_table = imx_ldb_dt_ids,
++ },
++ .probe = ldb_probe,
++ .remove = ldb_remove,
++ .suspend = ldb_suspend,
++ .resume = ldb_resume,
++};
++
++static int __init ldb_init(void)
++{
++ return platform_driver_register(&mxcldb_driver);
++}
++
++static void __exit ldb_uninit(void)
++{
++ platform_driver_unregister(&mxcldb_driver);
++}
++
++module_init(ldb_init);
++module_exit(ldb_uninit);
++
++MODULE_AUTHOR("Freescale Semiconductor, Inc.");
++MODULE_DESCRIPTION("MXC LDB driver");
++MODULE_LICENSE("GPL");
+diff -Nur linux-3.14.36/drivers/video/mxc/Makefile linux-openelec/drivers/video/mxc/Makefile
+--- linux-3.14.36/drivers/video/mxc/Makefile 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/video/mxc/Makefile 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,6 @@
++obj-$(CONFIG_FB_MXC_LDB) += ldb.o
++obj-$(CONFIG_FB_MXC_MIPI_DSI) += mipi_dsi.o
++obj-$(CONFIG_FB_MXC_TRULY_WVGA_SYNC_PANEL) += mxcfb_hx8369_wvga.o
++obj-$(CONFIG_FB_MXC_HDMI) += mxc_hdmi.o
++obj-$(CONFIG_FB_MXC_EDID) += mxc_edid.o
++obj-$(CONFIG_FB_MXC_SYNC_PANEL) += mxc_dispdrv.o mxc_lcdif.o mxc_ipuv3_fb.o
+diff -Nur linux-3.14.36/drivers/video/mxc/mipi_dsi.c linux-openelec/drivers/video/mxc/mipi_dsi.c
+--- linux-3.14.36/drivers/video/mxc/mipi_dsi.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/video/mxc/mipi_dsi.c 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,953 @@
++/*
++ * Copyright (C) 2011-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
++ */
++
++#include <linux/types.h>
++#include <linux/init.h>
++#include <linux/platform_device.h>
++#include <linux/err.h>
++#include <linux/clk.h>
++#include <linux/console.h>
++#include <linux/io.h>
++#include <linux/bitops.h>
++#include <linux/ipu.h>
++#include <linux/mfd/syscon.h>
++#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
++#include <linux/mipi_dsi.h>
++#include <linux/module.h>
++#include <linux/mxcfb.h>
++#include <linux/backlight.h>
++#include <linux/of_device.h>
++#include <linux/regulator/consumer.h>
++#include <linux/reset.h>
++#include <linux/spinlock.h>
++#include <linux/delay.h>
++#include <video/mipi_display.h>
++
++#include "mxc_dispdrv.h"
++#include "mipi_dsi.h"
++
++#define DISPDRV_MIPI "mipi_dsi"
++#define ROUND_UP(x) ((x)+1)
++#define NS2PS_RATIO (1000)
++#define NUMBER_OF_CHUNKS (0x8)
++#define NULL_PKT_SIZE (0x8)
++#define PHY_BTA_MAXTIME (0xd00)
++#define PHY_LP2HS_MAXTIME (0x40)
++#define PHY_HS2LP_MAXTIME (0x40)
++#define PHY_STOP_WAIT_TIME (0x20)
++#define DSI_CLKMGR_CFG_CLK_DIV (0x107)
++#define DSI_GEN_PLD_DATA_BUF_ENTRY (0x10)
++#define MIPI_MUX_CTRL(v) (((v) & 0x3) << 4)
++#define MIPI_LCD_SLEEP_MODE_DELAY (120)
++#define MIPI_DSI_REG_RW_TIMEOUT (20)
++#define MIPI_DSI_PHY_TIMEOUT (10)
++
++static struct mipi_dsi_match_lcd mipi_dsi_lcd_db[] = {
++#ifdef CONFIG_FB_MXC_TRULY_WVGA_SYNC_PANEL
++ {
++ "TRULY-WVGA",
++ {mipid_hx8369_get_lcd_videomode, mipid_hx8369_lcd_setup}
++ },
++#endif
++ {
++ "", {NULL, NULL}
++ }
++};
++
++struct _mipi_dsi_phy_pll_clk {
++ u32 max_phy_clk;
++ u32 config;
++};
++
++/* configure data for DPHY PLL 27M reference clk out */
++static const struct _mipi_dsi_phy_pll_clk mipi_dsi_phy_pll_clk_table[] = {
++ {1000, 0x74}, /* 950-1000MHz */
++ {950, 0x54}, /* 900-950Mhz */
++ {900, 0x34}, /* 850-900Mhz */
++ {850, 0x14}, /* 800-850MHz */
++ {800, 0x32}, /* 750-800MHz */
++ {750, 0x12}, /* 700-750Mhz */
++ {700, 0x30}, /* 650-700Mhz */
++ {650, 0x10}, /* 600-650MHz */
++ {600, 0x2e}, /* 550-600MHz */
++ {550, 0x0e}, /* 500-550Mhz */
++ {500, 0x2c}, /* 450-500Mhz */
++ {450, 0x0c}, /* 400-450MHz */
++ {400, 0x4a}, /* 360-400MHz */
++ {360, 0x2a}, /* 330-360Mhz */
++ {330, 0x48}, /* 300-330Mhz */
++ {300, 0x28}, /* 270-300MHz */
++ {270, 0x08}, /* 250-270MHz */
++ {250, 0x46}, /* 240-250Mhz */
++ {240, 0x26}, /* 210-240Mhz */
++ {210, 0x06}, /* 200-210MHz */
++ {200, 0x44}, /* 180-200MHz */
++ {180, 0x24}, /* 160-180MHz */
++ {160, 0x04}, /* 150-160MHz */
++};
++
++static int valid_mode(int pixel_fmt)
++{
++ return ((pixel_fmt == IPU_PIX_FMT_RGB24) ||
++ (pixel_fmt == IPU_PIX_FMT_BGR24) ||
++ (pixel_fmt == IPU_PIX_FMT_RGB666) ||
++ (pixel_fmt == IPU_PIX_FMT_RGB565) ||
++ (pixel_fmt == IPU_PIX_FMT_BGR666) ||
++ (pixel_fmt == IPU_PIX_FMT_RGB332));
++}
++
++static inline void mipi_dsi_read_register(struct mipi_dsi_info *mipi_dsi,
++ u32 reg, u32 *val)
++{
++ *val = ioread32(mipi_dsi->mmio_base + reg);
++ dev_dbg(&mipi_dsi->pdev->dev, "read_reg:0x%02x, val:0x%08x.\n",
++ reg, *val);
++}
++
++static inline void mipi_dsi_write_register(struct mipi_dsi_info *mipi_dsi,
++ u32 reg, u32 val)
++{
++ iowrite32(val, mipi_dsi->mmio_base + reg);
++ dev_dbg(&mipi_dsi->pdev->dev, "\t\twrite_reg:0x%02x, val:0x%08x.\n",
++ reg, val);
++}
++
++int mipi_dsi_pkt_write(struct mipi_dsi_info *mipi_dsi,
++ u8 data_type, const u32 *buf, int len)
++{
++ u32 val;
++ u32 status = 0;
++ int write_len = len;
++ uint32_t timeout = 0;
++
++ if (len) {
++ /* generic long write command */
++ while (len / DSI_GEN_PLD_DATA_BUF_SIZE) {
++ mipi_dsi_write_register(mipi_dsi,
++ MIPI_DSI_GEN_PLD_DATA, *buf);
++ buf++;
++ len -= DSI_GEN_PLD_DATA_BUF_SIZE;
++ mipi_dsi_read_register(mipi_dsi,
++ MIPI_DSI_CMD_PKT_STATUS, &status);
++ while ((status & DSI_CMD_PKT_STATUS_GEN_PLD_W_FULL) ==
++ DSI_CMD_PKT_STATUS_GEN_PLD_W_FULL) {
++ msleep(1);
++ timeout++;
++ if (timeout == MIPI_DSI_REG_RW_TIMEOUT)
++ return -EIO;
++ mipi_dsi_read_register(mipi_dsi,
++ MIPI_DSI_CMD_PKT_STATUS, &status);
++ }
++ }
++ /* write the remainder bytes */
++ if (len > 0) {
++ while ((status & DSI_CMD_PKT_STATUS_GEN_PLD_W_FULL) ==
++ DSI_CMD_PKT_STATUS_GEN_PLD_W_FULL) {
++ msleep(1);
++ timeout++;
++ if (timeout == MIPI_DSI_REG_RW_TIMEOUT)
++ return -EIO;
++ mipi_dsi_read_register(mipi_dsi,
++ MIPI_DSI_CMD_PKT_STATUS, &status);
++ }
++ mipi_dsi_write_register(mipi_dsi,
++ MIPI_DSI_GEN_PLD_DATA, *buf);
++ }
++
++ val = data_type | ((write_len & DSI_GEN_HDR_DATA_MASK)
++ << DSI_GEN_HDR_DATA_SHIFT);
++ } else {
++ /* generic short write command */
++ val = data_type | ((*buf & DSI_GEN_HDR_DATA_MASK)
++ << DSI_GEN_HDR_DATA_SHIFT);
++ }
++
++ mipi_dsi_read_register(mipi_dsi, MIPI_DSI_CMD_PKT_STATUS, &status);
++ while ((status & DSI_CMD_PKT_STATUS_GEN_CMD_FULL) ==
++ DSI_CMD_PKT_STATUS_GEN_CMD_FULL) {
++ msleep(1);
++ timeout++;
++ if (timeout == MIPI_DSI_REG_RW_TIMEOUT)
++ return -EIO;
++ mipi_dsi_read_register(mipi_dsi, MIPI_DSI_CMD_PKT_STATUS,
++ &status);
++ }
++ mipi_dsi_write_register(mipi_dsi, MIPI_DSI_GEN_HDR, val);
++
++ mipi_dsi_read_register(mipi_dsi, MIPI_DSI_CMD_PKT_STATUS, &status);
++ while (!((status & DSI_CMD_PKT_STATUS_GEN_CMD_EMPTY) ==
++ DSI_CMD_PKT_STATUS_GEN_CMD_EMPTY) ||
++ !((status & DSI_CMD_PKT_STATUS_GEN_PLD_W_EMPTY) ==
++ DSI_CMD_PKT_STATUS_GEN_PLD_W_EMPTY)) {
++ msleep(1);
++ timeout++;
++ if (timeout == MIPI_DSI_REG_RW_TIMEOUT)
++ return -EIO;
++ mipi_dsi_read_register(mipi_dsi, MIPI_DSI_CMD_PKT_STATUS,
++ &status);
++ }
++
++ return 0;
++}
++
++int mipi_dsi_pkt_read(struct mipi_dsi_info *mipi_dsi,
++ u8 data_type, u32 *buf, int len)
++{
++ u32 val;
++ int read_len = 0;
++ uint32_t timeout = 0;
++
++ if (!len) {
++ mipi_dbg("%s, len = 0 invalid error!\n", __func__);
++ return -EINVAL;
++ }
++
++ val = data_type | ((*buf & DSI_GEN_HDR_DATA_MASK)
++ << DSI_GEN_HDR_DATA_SHIFT);
++ memset(buf, 0, len);
++ mipi_dsi_write_register(mipi_dsi, MIPI_DSI_GEN_HDR, val);
++
++ /* wait for cmd to sent out */
++ mipi_dsi_read_register(mipi_dsi, MIPI_DSI_CMD_PKT_STATUS, &val);
++ while ((val & DSI_CMD_PKT_STATUS_GEN_RD_CMD_BUSY) !=
++ DSI_CMD_PKT_STATUS_GEN_RD_CMD_BUSY) {
++ msleep(1);
++ timeout++;
++ if (timeout == MIPI_DSI_REG_RW_TIMEOUT)
++ return -EIO;
++ mipi_dsi_read_register(mipi_dsi, MIPI_DSI_CMD_PKT_STATUS,
++ &val);
++ }
++ /* wait for entire response stroed in FIFO */
++ while ((val & DSI_CMD_PKT_STATUS_GEN_RD_CMD_BUSY) ==
++ DSI_CMD_PKT_STATUS_GEN_RD_CMD_BUSY) {
++ msleep(1);
++ timeout++;
++ if (timeout == MIPI_DSI_REG_RW_TIMEOUT)
++ return -EIO;
++ mipi_dsi_read_register(mipi_dsi, MIPI_DSI_CMD_PKT_STATUS,
++ &val);
++ }
++
++ mipi_dsi_read_register(mipi_dsi, MIPI_DSI_CMD_PKT_STATUS, &val);
++ while (!(val & DSI_CMD_PKT_STATUS_GEN_PLD_R_EMPTY)) {
++ mipi_dsi_read_register(mipi_dsi, MIPI_DSI_GEN_PLD_DATA, buf);
++ read_len += DSI_GEN_PLD_DATA_BUF_SIZE;
++ buf++;
++ mipi_dsi_read_register(mipi_dsi, MIPI_DSI_CMD_PKT_STATUS,
++ &val);
++ if (read_len == (DSI_GEN_PLD_DATA_BUF_ENTRY *
++ DSI_GEN_PLD_DATA_BUF_SIZE))
++ break;
++ }
++
++ if ((len <= read_len) &&
++ ((len + DSI_GEN_PLD_DATA_BUF_SIZE) >= read_len))
++ return 0;
++ else {
++ dev_err(&mipi_dsi->pdev->dev,
++ "actually read_len:%d != len:%d.\n", read_len, len);
++ return -ERANGE;
++ }
++}
++
++int mipi_dsi_dcs_cmd(struct mipi_dsi_info *mipi_dsi,
++ u8 cmd, const u32 *param, int num)
++{
++ int err = 0;
++ u32 buf[DSI_CMD_BUF_MAXSIZE];
++
++ switch (cmd) {
++ case MIPI_DCS_EXIT_SLEEP_MODE:
++ case MIPI_DCS_ENTER_SLEEP_MODE:
++ case MIPI_DCS_SET_DISPLAY_ON:
++ case MIPI_DCS_SET_DISPLAY_OFF:
++ buf[0] = cmd;
++ err = mipi_dsi_pkt_write(mipi_dsi,
++ MIPI_DSI_DCS_SHORT_WRITE, buf, 0);
++ break;
++
++ default:
++ dev_err(&mipi_dsi->pdev->dev,
++ "MIPI DSI DCS Command:0x%x Not supported!\n", cmd);
++ break;
++ }
++
++ return err;
++}
++
++static void mipi_dsi_dphy_init(struct mipi_dsi_info *mipi_dsi,
++ u32 cmd, u32 data)
++{
++ u32 val;
++ u32 timeout = 0;
++
++ mipi_dsi_write_register(mipi_dsi, MIPI_DSI_PHY_IF_CTRL,
++ DSI_PHY_IF_CTRL_RESET);
++ mipi_dsi_write_register(mipi_dsi, MIPI_DSI_PWR_UP, DSI_PWRUP_POWERUP);
++
++ mipi_dsi_write_register(mipi_dsi, MIPI_DSI_PHY_TST_CTRL0, 0);
++ mipi_dsi_write_register(mipi_dsi, MIPI_DSI_PHY_TST_CTRL1,
++ (0x10000 | cmd));
++ mipi_dsi_write_register(mipi_dsi, MIPI_DSI_PHY_TST_CTRL0, 2);
++ mipi_dsi_write_register(mipi_dsi, MIPI_DSI_PHY_TST_CTRL0, 0);
++ mipi_dsi_write_register(mipi_dsi, MIPI_DSI_PHY_TST_CTRL1, (0 | data));
++ mipi_dsi_write_register(mipi_dsi, MIPI_DSI_PHY_TST_CTRL0, 2);
++ mipi_dsi_write_register(mipi_dsi, MIPI_DSI_PHY_TST_CTRL0, 0);
++ val = DSI_PHY_RSTZ_EN_CLK | DSI_PHY_RSTZ_DISABLE_RST |
++ DSI_PHY_RSTZ_DISABLE_SHUTDOWN;
++ mipi_dsi_write_register(mipi_dsi, MIPI_DSI_PHY_RSTZ, val);
++
++ mipi_dsi_read_register(mipi_dsi, MIPI_DSI_PHY_STATUS, &val);
++ while ((val & DSI_PHY_STATUS_LOCK) != DSI_PHY_STATUS_LOCK) {
++ msleep(1);
++ timeout++;
++ if (timeout == MIPI_DSI_PHY_TIMEOUT) {
++ dev_err(&mipi_dsi->pdev->dev,
++ "Error: phy lock timeout!\n");
++ break;
++ }
++ mipi_dsi_read_register(mipi_dsi, MIPI_DSI_PHY_STATUS, &val);
++ }
++ timeout = 0;
++ while ((val & DSI_PHY_STATUS_STOPSTATE_CLK_LANE) !=
++ DSI_PHY_STATUS_STOPSTATE_CLK_LANE) {
++ msleep(1);
++ timeout++;
++ if (timeout == MIPI_DSI_PHY_TIMEOUT) {
++ dev_err(&mipi_dsi->pdev->dev,
++ "Error: phy lock lane timeout!\n");
++ break;
++ }
++ mipi_dsi_read_register(mipi_dsi, MIPI_DSI_PHY_STATUS, &val);
++ }
++}
++
++static void mipi_dsi_enable_controller(struct mipi_dsi_info *mipi_dsi,
++ bool init)
++{
++ u32 val;
++ u32 lane_byte_clk_period;
++ struct fb_videomode *mode = mipi_dsi->mode;
++ struct mipi_lcd_config *lcd_config = mipi_dsi->lcd_config;
++
++ if (init) {
++ mipi_dsi_write_register(mipi_dsi, MIPI_DSI_PWR_UP,
++ DSI_PWRUP_RESET);
++ mipi_dsi_write_register(mipi_dsi, MIPI_DSI_PHY_RSTZ,
++ DSI_PHY_RSTZ_RST);
++ mipi_dsi_write_register(mipi_dsi, MIPI_DSI_CLKMGR_CFG,
++ DSI_CLKMGR_CFG_CLK_DIV);
++
++ if (!(mode->sync & FB_SYNC_VERT_HIGH_ACT))
++ val = DSI_DPI_CFG_VSYNC_ACT_LOW;
++ if (!(mode->sync & FB_SYNC_HOR_HIGH_ACT))
++ val |= DSI_DPI_CFG_HSYNC_ACT_LOW;
++ if ((mode->sync & FB_SYNC_OE_LOW_ACT))
++ val |= DSI_DPI_CFG_DATAEN_ACT_LOW;
++ if (MIPI_RGB666_LOOSELY == lcd_config->dpi_fmt)
++ val |= DSI_DPI_CFG_EN18LOOSELY;
++ val |= (lcd_config->dpi_fmt & DSI_DPI_CFG_COLORCODE_MASK)
++ << DSI_DPI_CFG_COLORCODE_SHIFT;
++ val |= (lcd_config->virtual_ch & DSI_DPI_CFG_VID_MASK)
++ << DSI_DPI_CFG_VID_SHIFT;
++ mipi_dsi_write_register(mipi_dsi, MIPI_DSI_DPI_CFG, val);
++
++ val = DSI_PCKHDL_CFG_EN_BTA |
++ DSI_PCKHDL_CFG_EN_ECC_RX |
++ DSI_PCKHDL_CFG_EN_CRC_RX;
++
++ mipi_dsi_write_register(mipi_dsi, MIPI_DSI_PCKHDL_CFG, val);
++
++ val = (mode->xres & DSI_VID_PKT_CFG_VID_PKT_SZ_MASK)
++ << DSI_VID_PKT_CFG_VID_PKT_SZ_SHIFT;
++ val |= (NUMBER_OF_CHUNKS & DSI_VID_PKT_CFG_NUM_CHUNKS_MASK)
++ << DSI_VID_PKT_CFG_NUM_CHUNKS_SHIFT;
++ val |= (NULL_PKT_SIZE & DSI_VID_PKT_CFG_NULL_PKT_SZ_MASK)
++ << DSI_VID_PKT_CFG_NULL_PKT_SZ_SHIFT;
++ mipi_dsi_write_register(mipi_dsi, MIPI_DSI_VID_PKT_CFG, val);
++
++ /* enable LP mode when TX DCS cmd and enable DSI command mode */
++ mipi_dsi_write_register(mipi_dsi, MIPI_DSI_CMD_MODE_CFG,
++ MIPI_DSI_CMD_MODE_CFG_EN_LOWPOWER);
++
++ /* mipi lane byte clk period in ns unit */
++ lane_byte_clk_period = NS2PS_RATIO /
++ (lcd_config->max_phy_clk / BITS_PER_BYTE);
++ val = ROUND_UP(mode->hsync_len * mode->pixclock /
++ NS2PS_RATIO / lane_byte_clk_period)
++ << DSI_TME_LINE_CFG_HSA_TIME_SHIFT;
++ val |= ROUND_UP(mode->left_margin * mode->pixclock /
++ NS2PS_RATIO / lane_byte_clk_period)
++ << DSI_TME_LINE_CFG_HBP_TIME_SHIFT;
++ val |= ROUND_UP((mode->left_margin + mode->right_margin +
++ mode->hsync_len + mode->xres) * mode->pixclock
++ / NS2PS_RATIO / lane_byte_clk_period)
++ << DSI_TME_LINE_CFG_HLINE_TIME_SHIFT;
++ mipi_dsi_write_register(mipi_dsi, MIPI_DSI_TMR_LINE_CFG, val);
++
++ val = ((mode->vsync_len & DSI_VTIMING_CFG_VSA_LINES_MASK)
++ << DSI_VTIMING_CFG_VSA_LINES_SHIFT);
++ val |= ((mode->upper_margin & DSI_VTIMING_CFG_VBP_LINES_MASK)
++ << DSI_VTIMING_CFG_VBP_LINES_SHIFT);
++ val |= ((mode->lower_margin & DSI_VTIMING_CFG_VFP_LINES_MASK)
++ << DSI_VTIMING_CFG_VFP_LINES_SHIFT);
++ val |= ((mode->yres & DSI_VTIMING_CFG_V_ACT_LINES_MASK)
++ << DSI_VTIMING_CFG_V_ACT_LINES_SHIFT);
++ mipi_dsi_write_register(mipi_dsi, MIPI_DSI_VTIMING_CFG, val);
++
++ val = ((PHY_BTA_MAXTIME & DSI_PHY_TMR_CFG_BTA_TIME_MASK)
++ << DSI_PHY_TMR_CFG_BTA_TIME_SHIFT);
++ val |= ((PHY_LP2HS_MAXTIME & DSI_PHY_TMR_CFG_LP2HS_TIME_MASK)
++ << DSI_PHY_TMR_CFG_LP2HS_TIME_SHIFT);
++ val |= ((PHY_HS2LP_MAXTIME & DSI_PHY_TMR_CFG_HS2LP_TIME_MASK)
++ << DSI_PHY_TMR_CFG_HS2LP_TIME_SHIFT);
++ mipi_dsi_write_register(mipi_dsi, MIPI_DSI_PHY_TMR_CFG, val);
++
++ val = (((lcd_config->data_lane_num - 1) &
++ DSI_PHY_IF_CFG_N_LANES_MASK)
++ << DSI_PHY_IF_CFG_N_LANES_SHIFT);
++ val |= ((PHY_STOP_WAIT_TIME & DSI_PHY_IF_CFG_WAIT_TIME_MASK)
++ << DSI_PHY_IF_CFG_WAIT_TIME_SHIFT);
++ mipi_dsi_write_register(mipi_dsi, MIPI_DSI_PHY_IF_CFG, val);
++
++ mipi_dsi_read_register(mipi_dsi, MIPI_DSI_ERROR_ST0, &val);
++ mipi_dsi_read_register(mipi_dsi, MIPI_DSI_ERROR_ST1, &val);
++ mipi_dsi_write_register(mipi_dsi, MIPI_DSI_ERROR_MSK0, 0);
++ mipi_dsi_write_register(mipi_dsi, MIPI_DSI_ERROR_MSK1, 0);
++
++ mipi_dsi_dphy_init(mipi_dsi, DSI_PHY_CLK_INIT_COMMAND,
++ mipi_dsi->dphy_pll_config);
++ } else {
++ mipi_dsi_dphy_init(mipi_dsi, DSI_PHY_CLK_INIT_COMMAND,
++ mipi_dsi->dphy_pll_config);
++ }
++}
++
++static void mipi_dsi_disable_controller(struct mipi_dsi_info *mipi_dsi)
++{
++ mipi_dsi_write_register(mipi_dsi, MIPI_DSI_PHY_IF_CTRL,
++ DSI_PHY_IF_CTRL_RESET);
++ mipi_dsi_write_register(mipi_dsi, MIPI_DSI_PWR_UP, DSI_PWRUP_RESET);
++ mipi_dsi_write_register(mipi_dsi, MIPI_DSI_PHY_RSTZ, DSI_PHY_RSTZ_RST);
++}
++
++static irqreturn_t mipi_dsi_irq_handler(int irq, void *data)
++{
++ u32 mask0;
++ u32 mask1;
++ u32 status0;
++ u32 status1;
++ struct mipi_dsi_info *mipi_dsi;
++
++ mipi_dsi = (struct mipi_dsi_info *)data;
++ mipi_dsi_read_register(mipi_dsi, MIPI_DSI_ERROR_ST0, &status0);
++ mipi_dsi_read_register(mipi_dsi, MIPI_DSI_ERROR_ST1, &status1);
++ mipi_dsi_read_register(mipi_dsi, MIPI_DSI_ERROR_MSK0, &mask0);
++ mipi_dsi_read_register(mipi_dsi, MIPI_DSI_ERROR_MSK1, &mask1);
++
++ if ((status0 & (~mask0)) || (status1 & (~mask1))) {
++ dev_err(&mipi_dsi->pdev->dev,
++ "mipi_dsi IRQ status0:0x%x, status1:0x%x!\n",
++ status0, status1);
++ }
++
++ return IRQ_HANDLED;
++}
++
++static inline void mipi_dsi_set_mode(struct mipi_dsi_info *mipi_dsi,
++ bool cmd_mode)
++{
++ u32 val;
++
++ if (cmd_mode) {
++ mipi_dsi_write_register(mipi_dsi, MIPI_DSI_PWR_UP,
++ DSI_PWRUP_RESET);
++ mipi_dsi_read_register(mipi_dsi, MIPI_DSI_CMD_MODE_CFG, &val);
++ val |= MIPI_DSI_CMD_MODE_CFG_EN_CMD_MODE;
++ mipi_dsi_write_register(mipi_dsi, MIPI_DSI_CMD_MODE_CFG, val);
++ mipi_dsi_write_register(mipi_dsi, MIPI_DSI_VID_MODE_CFG, 0);
++ mipi_dsi_write_register(mipi_dsi, MIPI_DSI_PWR_UP,
++ DSI_PWRUP_POWERUP);
++ } else {
++ mipi_dsi_write_register(mipi_dsi, MIPI_DSI_PWR_UP,
++ DSI_PWRUP_RESET);
++ /* Disable Command mode when tranfering video data */
++ mipi_dsi_read_register(mipi_dsi, MIPI_DSI_CMD_MODE_CFG, &val);
++ val &= ~MIPI_DSI_CMD_MODE_CFG_EN_CMD_MODE;
++ mipi_dsi_write_register(mipi_dsi, MIPI_DSI_CMD_MODE_CFG, val);
++ val = DSI_VID_MODE_CFG_EN | DSI_VID_MODE_CFG_EN_BURSTMODE |
++ DSI_VID_MODE_CFG_EN_LP_MODE;
++ mipi_dsi_write_register(mipi_dsi, MIPI_DSI_VID_MODE_CFG, val);
++ mipi_dsi_write_register(mipi_dsi, MIPI_DSI_PWR_UP,
++ DSI_PWRUP_POWERUP);
++ mipi_dsi_write_register(mipi_dsi, MIPI_DSI_PHY_IF_CTRL,
++ DSI_PHY_IF_CTRL_TX_REQ_CLK_HS);
++ }
++}
++
++static int mipi_dsi_power_on(struct mxc_dispdrv_handle *disp)
++{
++ int err;
++ struct mipi_dsi_info *mipi_dsi = mxc_dispdrv_getdata(disp);
++
++ if (!mipi_dsi->dsi_power_on) {
++ clk_prepare_enable(mipi_dsi->dphy_clk);
++ clk_prepare_enable(mipi_dsi->cfg_clk);
++ mipi_dsi_enable_controller(mipi_dsi, false);
++ mipi_dsi_set_mode(mipi_dsi, false);
++ /* host send pclk/hsync/vsync for two frames before sleep-out */
++ msleep((1000/mipi_dsi->mode->refresh + 1) << 1);
++ mipi_dsi_set_mode(mipi_dsi, true);
++ err = mipi_dsi_dcs_cmd(mipi_dsi, MIPI_DCS_EXIT_SLEEP_MODE,
++ NULL, 0);
++ if (err) {
++ dev_err(&mipi_dsi->pdev->dev,
++ "MIPI DSI DCS Command sleep-in error!\n");
++ }
++ msleep(MIPI_LCD_SLEEP_MODE_DELAY);
++ mipi_dsi_set_mode(mipi_dsi, false);
++ mipi_dsi->dsi_power_on = 1;
++ }
++
++ return 0;
++}
++
++void mipi_dsi_power_off(struct mxc_dispdrv_handle *disp)
++{
++ int err;
++ struct mipi_dsi_info *mipi_dsi = mxc_dispdrv_getdata(disp);
++
++ if (mipi_dsi->dsi_power_on) {
++ mipi_dsi_set_mode(mipi_dsi, true);
++ err = mipi_dsi_dcs_cmd(mipi_dsi, MIPI_DCS_ENTER_SLEEP_MODE,
++ NULL, 0);
++ if (err) {
++ dev_err(&mipi_dsi->pdev->dev,
++ "MIPI DSI DCS Command display on error!\n");
++ }
++ /* To allow time for the supply voltages
++ * and clock circuits to stabilize.
++ */
++ msleep(5);
++ /* video stream timing on */
++ mipi_dsi_set_mode(mipi_dsi, false);
++ msleep(MIPI_LCD_SLEEP_MODE_DELAY);
++
++ mipi_dsi_set_mode(mipi_dsi, true);
++ mipi_dsi_disable_controller(mipi_dsi);
++ mipi_dsi->dsi_power_on = 0;
++ clk_disable_unprepare(mipi_dsi->dphy_clk);
++ clk_disable_unprepare(mipi_dsi->cfg_clk);
++ }
++}
++
++static int mipi_dsi_lcd_init(struct mipi_dsi_info *mipi_dsi,
++ struct mxc_dispdrv_setting *setting)
++{
++ int err;
++ int size;
++ int i;
++ struct fb_videomode *mipi_lcd_modedb;
++ struct fb_videomode mode;
++ struct device *dev = &mipi_dsi->pdev->dev;
++
++ for (i = 0; i < ARRAY_SIZE(mipi_dsi_lcd_db); i++) {
++ if (!strcmp(mipi_dsi->lcd_panel,
++ mipi_dsi_lcd_db[i].lcd_panel)) {
++ mipi_dsi->lcd_callback =
++ &mipi_dsi_lcd_db[i].lcd_callback;
++ break;
++ }
++ }
++ if (i == ARRAY_SIZE(mipi_dsi_lcd_db)) {
++ dev_err(dev, "failed to find supported lcd panel.\n");
++ return -EINVAL;
++ }
++ /* get the videomode in the order: cmdline->platform data->driver */
++ mipi_dsi->lcd_callback->get_mipi_lcd_videomode(&mipi_lcd_modedb, &size,
++ &mipi_dsi->lcd_config);
++ err = fb_find_mode(&setting->fbi->var, setting->fbi,
++ setting->dft_mode_str,
++ mipi_lcd_modedb, size, NULL,
++ setting->default_bpp);
++ if (err != 1)
++ fb_videomode_to_var(&setting->fbi->var, mipi_lcd_modedb);
++
++ INIT_LIST_HEAD(&setting->fbi->modelist);
++ for (i = 0; i < size; i++) {
++ fb_var_to_videomode(&mode, &setting->fbi->var);
++ if (fb_mode_is_equal(&mode, mipi_lcd_modedb + i)) {
++ err = fb_add_videomode(mipi_lcd_modedb + i,
++ &setting->fbi->modelist);
++ /* Note: only support fb mode from driver */
++ mipi_dsi->mode = mipi_lcd_modedb + i;
++ break;
++ }
++ }
++ if ((err < 0) || (size == i)) {
++ dev_err(dev, "failed to add videomode.\n");
++ return err;
++ }
++
++ for (i = 0; i < ARRAY_SIZE(mipi_dsi_phy_pll_clk_table); i++) {
++ if (mipi_dsi_phy_pll_clk_table[i].max_phy_clk <
++ mipi_dsi->lcd_config->max_phy_clk)
++ break;
++ }
++ if ((i == ARRAY_SIZE(mipi_dsi_phy_pll_clk_table)) ||
++ (mipi_dsi->lcd_config->max_phy_clk >
++ mipi_dsi_phy_pll_clk_table[0].max_phy_clk)) {
++ dev_err(dev, "failed to find data in"
++ "mipi_dsi_phy_pll_clk_table.\n");
++ return -EINVAL;
++ }
++ mipi_dsi->dphy_pll_config = mipi_dsi_phy_pll_clk_table[--i].config;
++ dev_dbg(dev, "dphy_pll_config:0x%x.\n", mipi_dsi->dphy_pll_config);
++
++ return 0;
++}
++
++int mipi_dsi_enable(struct mxc_dispdrv_handle *disp)
++{
++ int err;
++ struct mipi_dsi_info *mipi_dsi = mxc_dispdrv_getdata(disp);
++
++ if (!mipi_dsi->lcd_inited) {
++ err = clk_prepare_enable(mipi_dsi->dphy_clk);
++ err |= clk_prepare_enable(mipi_dsi->cfg_clk);
++ if (err)
++ dev_err(&mipi_dsi->pdev->dev,
++ "clk enable error:%d!\n", err);
++ mipi_dsi_enable_controller(mipi_dsi, true);
++ err = mipi_dsi->lcd_callback->mipi_lcd_setup(
++ mipi_dsi);
++ if (err < 0) {
++ dev_err(&mipi_dsi->pdev->dev,
++ "failed to init mipi lcd.");
++ clk_disable_unprepare(mipi_dsi->dphy_clk);
++ clk_disable_unprepare(mipi_dsi->cfg_clk);
++ return err;
++ }
++ mipi_dsi_set_mode(mipi_dsi, false);
++ mipi_dsi->dsi_power_on = 1;
++ mipi_dsi->lcd_inited = 1;
++ }
++ mipi_dsi_power_on(mipi_dsi->disp_mipi);
++
++ return 0;
++}
++
++static int mipi_dsi_disp_init(struct mxc_dispdrv_handle *disp,
++ struct mxc_dispdrv_setting *setting)
++{
++ struct mipi_dsi_info *mipi_dsi = mxc_dispdrv_getdata(disp);
++ struct device *dev = &mipi_dsi->pdev->dev;
++ int ret = 0;
++
++ if (!valid_mode(setting->if_fmt)) {
++ dev_warn(dev, "Input pixel format not valid"
++ "use default RGB24\n");
++ setting->if_fmt = IPU_PIX_FMT_RGB24;
++ }
++
++ setting->dev_id = mipi_dsi->dev_id;
++ setting->disp_id = mipi_dsi->disp_id;
++
++ ret = mipi_dsi_lcd_init(mipi_dsi, setting);
++ if (ret) {
++ dev_err(dev, "failed to init mipi dsi lcd\n");
++ return ret;
++ }
++
++ dev_dbg(dev, "MIPI DSI dispdrv inited!\n");
++ return ret;
++}
++
++static void mipi_dsi_disp_deinit(struct mxc_dispdrv_handle *disp)
++{
++ struct mipi_dsi_info *mipi_dsi;
++
++ mipi_dsi = mxc_dispdrv_getdata(disp);
++
++ mipi_dsi_power_off(mipi_dsi->disp_mipi);
++ if (mipi_dsi->bl)
++ backlight_device_unregister(mipi_dsi->bl);
++}
++
++static struct mxc_dispdrv_driver mipi_dsi_drv = {
++ .name = DISPDRV_MIPI,
++ .init = mipi_dsi_disp_init,
++ .deinit = mipi_dsi_disp_deinit,
++ .enable = mipi_dsi_enable,
++ .disable = mipi_dsi_power_off,
++};
++
++static int imx6q_mipi_dsi_get_mux(int dev_id, int disp_id)
++{
++ if (dev_id > 1 || disp_id > 1)
++ return -EINVAL;
++
++ return (dev_id << 5) | (disp_id << 4);
++}
++
++static struct mipi_dsi_bus_mux imx6q_mipi_dsi_mux[] = {
++ {
++ .reg = IOMUXC_GPR3,
++ .mask = IMX6Q_GPR3_MIPI_MUX_CTL_MASK,
++ .get_mux = imx6q_mipi_dsi_get_mux,
++ },
++};
++
++static int imx6dl_mipi_dsi_get_mux(int dev_id, int disp_id)
++{
++ if (dev_id > 1 || disp_id > 1)
++ return -EINVAL;
++
++ /* MIPI DSI source is LCDIF */
++ if (dev_id)
++ disp_id = 0;
++
++ return (dev_id << 5) | (disp_id << 4);
++}
++
++static struct mipi_dsi_bus_mux imx6dl_mipi_dsi_mux[] = {
++ {
++ .reg = IOMUXC_GPR3,
++ .mask = IMX6Q_GPR3_MIPI_MUX_CTL_MASK,
++ .get_mux = imx6dl_mipi_dsi_get_mux,
++ },
++};
++
++static const struct of_device_id imx_mipi_dsi_dt_ids[] = {
++ { .compatible = "fsl,imx6q-mipi-dsi", .data = imx6q_mipi_dsi_mux, },
++ { .compatible = "fsl,imx6dl-mipi-dsi", .data = imx6dl_mipi_dsi_mux, },
++ { }
++};
++MODULE_DEVICE_TABLE(of, imx_mipi_dsi_dt_ids);
++
++/**
++ * This function is called by the driver framework to initialize the MIPI DSI
++ * device.
++ *
++ * @param pdev The device structure for the MIPI DSI passed in by the
++ * driver framework.
++ *
++ * @return Returns 0 on success or negative error code on error
++ */
++static int mipi_dsi_probe(struct platform_device *pdev)
++{
++ struct device_node *np = pdev->dev.of_node;
++ const struct of_device_id *of_id =
++ of_match_device(of_match_ptr(imx_mipi_dsi_dt_ids),
++ &pdev->dev);
++ struct mipi_dsi_info *mipi_dsi;
++ struct resource *res;
++ u32 dev_id, disp_id;
++ const char *lcd_panel;
++ unsigned int mux;
++ int ret = 0;
++
++ mipi_dsi = devm_kzalloc(&pdev->dev, sizeof(*mipi_dsi), GFP_KERNEL);
++ if (!mipi_dsi)
++ return -ENOMEM;
++
++ ret = of_property_read_string(np, "lcd_panel", &lcd_panel);
++ if (ret) {
++ dev_err(&pdev->dev, "failed to read of property lcd_panel\n");
++ return ret;
++ }
++
++ ret = of_property_read_u32(np, "dev_id", &dev_id);
++ if (ret) {
++ dev_err(&pdev->dev, "failed to read of property dev_id\n");
++ return ret;
++ }
++ ret = of_property_read_u32(np, "disp_id", &disp_id);
++ if (ret) {
++ dev_err(&pdev->dev, "failed to read of property disp_id\n");
++ return ret;
++ }
++ mipi_dsi->dev_id = dev_id;
++ mipi_dsi->disp_id = disp_id;
++
++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++ if (!res) {
++ dev_err(&pdev->dev, "failed to get platform resource 0\n");
++ return -ENODEV;
++ }
++
++ if (!devm_request_mem_region(&pdev->dev, res->start,
++ resource_size(res), pdev->name))
++ return -EBUSY;
++
++ mipi_dsi->mmio_base = devm_ioremap(&pdev->dev, res->start,
++ resource_size(res));
++ if (!mipi_dsi->mmio_base)
++ return -EBUSY;
++
++ mipi_dsi->irq = platform_get_irq(pdev, 0);
++ if (mipi_dsi->irq < 0) {
++ dev_err(&pdev->dev, "failed get device irq\n");
++ return -ENODEV;
++ }
++
++ ret = devm_request_irq(&pdev->dev, mipi_dsi->irq,
++ mipi_dsi_irq_handler,
++ 0, "mipi_dsi", mipi_dsi);
++ if (ret) {
++ dev_err(&pdev->dev, "failed to request irq\n");
++ return ret;
++ }
++
++ mipi_dsi->dphy_clk = devm_clk_get(&pdev->dev, "mipi_pllref_clk");
++ if (IS_ERR(mipi_dsi->dphy_clk)) {
++ dev_err(&pdev->dev, "failed to get dphy pll_ref_clk\n");
++ return PTR_ERR(mipi_dsi->dphy_clk);
++ }
++
++ mipi_dsi->cfg_clk = devm_clk_get(&pdev->dev, "mipi_cfg_clk");
++ if (IS_ERR(mipi_dsi->cfg_clk)) {
++ dev_err(&pdev->dev, "failed to get cfg_clk\n");
++ return PTR_ERR(mipi_dsi->cfg_clk);
++ }
++
++ mipi_dsi->disp_power_on = devm_regulator_get(&pdev->dev,
++ "disp-power-on");
++ if (!IS_ERR(mipi_dsi->disp_power_on)) {
++ ret = regulator_enable(mipi_dsi->disp_power_on);
++ if (ret) {
++ dev_err(&pdev->dev, "failed to enable display "
++ "power regulator, err=%d\n", ret);
++ return ret;
++ }
++ } else {
++ mipi_dsi->disp_power_on = NULL;
++ }
++
++ ret = device_reset(&pdev->dev);
++ if (ret) {
++ dev_err(&pdev->dev, "failed to reset: %d\n", ret);
++ goto dev_reset_fail;
++ }
++
++ if (of_id)
++ mipi_dsi->bus_mux = of_id->data;
++
++ mipi_dsi->regmap = syscon_regmap_lookup_by_phandle(np, "gpr");
++ if (IS_ERR(mipi_dsi->regmap)) {
++ dev_err(&pdev->dev, "failed to get parent regmap\n");
++ ret = PTR_ERR(mipi_dsi->regmap);
++ goto get_parent_regmap_fail;
++ }
++
++ mux = mipi_dsi->bus_mux->get_mux(dev_id, disp_id);
++ if (mux >= 0)
++ regmap_update_bits(mipi_dsi->regmap, mipi_dsi->bus_mux->reg,
++ mipi_dsi->bus_mux->mask, mux);
++ else
++ dev_warn(&pdev->dev, "invalid dev_id or disp_id muxing\n");
++
++ mipi_dsi->lcd_panel = kstrdup(lcd_panel, GFP_KERNEL);
++ if (!mipi_dsi->lcd_panel) {
++ dev_err(&pdev->dev, "failed to allocate lcd panel name\n");
++ ret = -ENOMEM;
++ goto kstrdup_fail;
++ }
++
++ mipi_dsi->pdev = pdev;
++ mipi_dsi->disp_mipi = mxc_dispdrv_register(&mipi_dsi_drv);
++ if (IS_ERR(mipi_dsi->disp_mipi)) {
++ dev_err(&pdev->dev, "mxc_dispdrv_register error\n");
++ ret = PTR_ERR(mipi_dsi->disp_mipi);
++ goto dispdrv_reg_fail;
++ }
++
++ mxc_dispdrv_setdata(mipi_dsi->disp_mipi, mipi_dsi);
++ dev_set_drvdata(&pdev->dev, mipi_dsi);
++
++ dev_info(&pdev->dev, "i.MX MIPI DSI driver probed\n");
++ return ret;
++
++dispdrv_reg_fail:
++ kfree(mipi_dsi->lcd_panel);
++kstrdup_fail:
++get_parent_regmap_fail:
++dev_reset_fail:
++ if (mipi_dsi->disp_power_on)
++ regulator_disable(mipi_dsi->disp_power_on);
++ return ret;
++}
++
++static void mipi_dsi_shutdown(struct platform_device *pdev)
++{
++ struct mipi_dsi_info *mipi_dsi = dev_get_drvdata(&pdev->dev);
++
++ mipi_dsi_power_off(mipi_dsi->disp_mipi);
++}
++
++static int mipi_dsi_remove(struct platform_device *pdev)
++{
++ struct mipi_dsi_info *mipi_dsi = dev_get_drvdata(&pdev->dev);
++
++ mxc_dispdrv_puthandle(mipi_dsi->disp_mipi);
++ mxc_dispdrv_unregister(mipi_dsi->disp_mipi);
++
++ if (mipi_dsi->disp_power_on)
++ regulator_disable(mipi_dsi->disp_power_on);
++
++ kfree(mipi_dsi->lcd_panel);
++ dev_set_drvdata(&pdev->dev, NULL);
++
++ return 0;
++}
++
++static struct platform_driver mipi_dsi_driver = {
++ .driver = {
++ .of_match_table = imx_mipi_dsi_dt_ids,
++ .name = "mxc_mipi_dsi",
++ },
++ .probe = mipi_dsi_probe,
++ .remove = mipi_dsi_remove,
++ .shutdown = mipi_dsi_shutdown,
++};
++
++static int __init mipi_dsi_init(void)
++{
++ int err;
++
++ err = platform_driver_register(&mipi_dsi_driver);
++ if (err) {
++ pr_err("mipi_dsi_driver register failed\n");
++ return -ENODEV;
++ }
++ pr_info("MIPI DSI driver module loaded\n");
++ return 0;
++}
++
++static void __exit mipi_dsi_cleanup(void)
++{
++ platform_driver_unregister(&mipi_dsi_driver);
++}
++
++module_init(mipi_dsi_init);
++module_exit(mipi_dsi_cleanup);
++
++MODULE_AUTHOR("Freescale Semiconductor, Inc.");
++MODULE_DESCRIPTION("i.MX MIPI DSI driver");
++MODULE_LICENSE("GPL");
+diff -Nur linux-3.14.36/drivers/video/mxc/mipi_dsi.h linux-openelec/drivers/video/mxc/mipi_dsi.h
+--- linux-3.14.36/drivers/video/mxc/mipi_dsi.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/video/mxc/mipi_dsi.h 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,112 @@
++/*
++ * Copyright (C) 2011-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
++ */
++
++#ifndef __MIPI_DSI_H__
++#define __MIPI_DSI_H__
++
++#include <linux/regmap.h>
++
++#ifdef DEBUG
++#define mipi_dbg(fmt, ...) printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
++#else
++#define mipi_dbg(fmt, ...)
++#endif
++
++#define DSI_CMD_BUF_MAXSIZE (32)
++
++/* DPI interface pixel color coding map */
++enum mipi_dsi_dpi_fmt {
++ MIPI_RGB565_PACKED = 0,
++ MIPI_RGB565_LOOSELY,
++ MIPI_RGB565_CONFIG3,
++ MIPI_RGB666_PACKED,
++ MIPI_RGB666_LOOSELY,
++ MIPI_RGB888,
++};
++
++struct mipi_lcd_config {
++ u32 virtual_ch;
++ u32 data_lane_num;
++ /* device max DPHY clock in MHz unit */
++ u32 max_phy_clk;
++ enum mipi_dsi_dpi_fmt dpi_fmt;
++};
++
++struct mipi_dsi_info;
++struct mipi_dsi_lcd_callback {
++ /* callback for lcd panel operation */
++ void (*get_mipi_lcd_videomode)(struct fb_videomode **, int *,
++ struct mipi_lcd_config **);
++ int (*mipi_lcd_setup)(struct mipi_dsi_info *);
++
++};
++
++struct mipi_dsi_match_lcd {
++ char *lcd_panel;
++ struct mipi_dsi_lcd_callback lcd_callback;
++};
++
++struct mipi_dsi_bus_mux {
++ int reg;
++ int mask;
++ int (*get_mux) (int dev_id, int disp_id);
++};
++
++/* driver private data */
++struct mipi_dsi_info {
++ struct platform_device *pdev;
++ void __iomem *mmio_base;
++ struct regmap *regmap;
++ const struct mipi_dsi_bus_mux *bus_mux;
++ int dsi_power_on;
++ int lcd_inited;
++ u32 dphy_pll_config;
++ int dev_id;
++ int disp_id;
++ char *lcd_panel;
++ int irq;
++ struct clk *dphy_clk;
++ struct clk *cfg_clk;
++ struct mxc_dispdrv_handle *disp_mipi;
++ struct fb_videomode *mode;
++ struct regulator *disp_power_on;
++ struct mipi_lcd_config *lcd_config;
++ /* board related power control */
++ struct backlight_device *bl;
++ /* callback for lcd panel operation */
++ struct mipi_dsi_lcd_callback *lcd_callback;
++};
++
++int mipi_dsi_pkt_write(struct mipi_dsi_info *mipi,
++ u8 data_type, const u32 *buf, int len);
++int mipi_dsi_pkt_read(struct mipi_dsi_info *mipi,
++ u8 data_type, u32 *buf, int len);
++int mipi_dsi_dcs_cmd(struct mipi_dsi_info *mipi,
++ u8 cmd, const u32 *param, int num);
++
++#ifdef CONFIG_FB_MXC_TRULY_WVGA_SYNC_PANEL
++void mipid_hx8369_get_lcd_videomode(struct fb_videomode **mode, int *size,
++ struct mipi_lcd_config **data);
++int mipid_hx8369_lcd_setup(struct mipi_dsi_info *);
++#endif
++
++#ifndef CONFIG_FB_MXC_TRULY_WVGA_SYNC_PANEL
++#error "Please configure MIPI LCD panel, we cannot find one!"
++#endif
++
++#endif
+diff -Nur linux-3.14.36/drivers/video/mxc/mxc_dispdrv.c linux-openelec/drivers/video/mxc/mxc_dispdrv.c
+--- linux-3.14.36/drivers/video/mxc/mxc_dispdrv.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/video/mxc/mxc_dispdrv.c 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,150 @@
++/*
++ * Copyright (C) 2011-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/*!
++ * @file mxc_dispdrv.c
++ * @brief mxc display driver framework.
++ *
++ * A display device driver could call mxc_dispdrv_register(drv) in its dev_probe() function.
++ * Move all dev_probe() things into mxc_dispdrv_driver->init(), init() function should init
++ * and feedback setting;
++ * Necessary deferred operations can be done in mxc_dispdrv_driver->post_init(),
++ * after dev_id and disp_id pass usage check;
++ * Move all dev_remove() things into mxc_dispdrv_driver->deinit();
++ * Move all dev_suspend() things into fb_notifier for SUSPEND, if there is;
++ * Move all dev_resume() things into fb_notifier for RESUME, if there is;
++ *
++ * ipuv3 fb driver could call mxc_dispdrv_gethandle(name, setting) before a fb
++ * need be added, with fbi param passing by setting, after
++ * mxc_dispdrv_gethandle() return, FB driver should get the basic setting
++ * about fbi info and ipuv3-hw (ipu_id and disp_id).
++ *
++ * @ingroup Framebuffer
++ */
++
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/list.h>
++#include <linux/mutex.h>
++#include <linux/slab.h>
++#include <linux/err.h>
++#include <linux/string.h>
++#include "mxc_dispdrv.h"
++
++static LIST_HEAD(dispdrv_list);
++static DEFINE_MUTEX(dispdrv_lock);
++
++struct mxc_dispdrv_entry {
++ /* Note: drv always the first element */
++ struct mxc_dispdrv_driver *drv;
++ bool active;
++ void *priv;
++ struct list_head list;
++};
++
++struct mxc_dispdrv_handle *mxc_dispdrv_register(struct mxc_dispdrv_driver *drv)
++{
++ struct mxc_dispdrv_entry *new;
++
++ mutex_lock(&dispdrv_lock);
++
++ new = kzalloc(sizeof(struct mxc_dispdrv_entry), GFP_KERNEL);
++ if (!new) {
++ mutex_unlock(&dispdrv_lock);
++ return ERR_PTR(-ENOMEM);
++ }
++
++ new->drv = drv;
++ list_add_tail(&new->list, &dispdrv_list);
++
++ mutex_unlock(&dispdrv_lock);
++
++ return (struct mxc_dispdrv_handle *)new;
++}
++EXPORT_SYMBOL_GPL(mxc_dispdrv_register);
++
++int mxc_dispdrv_unregister(struct mxc_dispdrv_handle *handle)
++{
++ struct mxc_dispdrv_entry *entry = (struct mxc_dispdrv_entry *)handle;
++
++ if (entry) {
++ mutex_lock(&dispdrv_lock);
++ list_del(&entry->list);
++ mutex_unlock(&dispdrv_lock);
++ kfree(entry);
++ return 0;
++ } else
++ return -EINVAL;
++}
++EXPORT_SYMBOL_GPL(mxc_dispdrv_unregister);
++
++struct mxc_dispdrv_handle *mxc_dispdrv_gethandle(char *name,
++ struct mxc_dispdrv_setting *setting)
++{
++ int ret, found = 0;
++ struct mxc_dispdrv_entry *entry;
++
++ mutex_lock(&dispdrv_lock);
++ list_for_each_entry(entry, &dispdrv_list, list) {
++ if (!strcmp(entry->drv->name, name) && (entry->drv->init)) {
++ ret = entry->drv->init((struct mxc_dispdrv_handle *)
++ entry, setting);
++ if (ret >= 0) {
++ entry->active = true;
++ found = 1;
++ break;
++ }
++ }
++ }
++ mutex_unlock(&dispdrv_lock);
++
++ return found ? (struct mxc_dispdrv_handle *)entry:ERR_PTR(-ENODEV);
++}
++EXPORT_SYMBOL_GPL(mxc_dispdrv_gethandle);
++
++void mxc_dispdrv_puthandle(struct mxc_dispdrv_handle *handle)
++{
++ struct mxc_dispdrv_entry *entry = (struct mxc_dispdrv_entry *)handle;
++
++ mutex_lock(&dispdrv_lock);
++ if (entry && entry->active && entry->drv->deinit) {
++ entry->drv->deinit(handle);
++ entry->active = false;
++ }
++ mutex_unlock(&dispdrv_lock);
++
++}
++EXPORT_SYMBOL_GPL(mxc_dispdrv_puthandle);
++
++int mxc_dispdrv_setdata(struct mxc_dispdrv_handle *handle, void *data)
++{
++ struct mxc_dispdrv_entry *entry = (struct mxc_dispdrv_entry *)handle;
++
++ if (entry) {
++ entry->priv = data;
++ return 0;
++ } else
++ return -EINVAL;
++}
++EXPORT_SYMBOL_GPL(mxc_dispdrv_setdata);
++
++void *mxc_dispdrv_getdata(struct mxc_dispdrv_handle *handle)
++{
++ struct mxc_dispdrv_entry *entry = (struct mxc_dispdrv_entry *)handle;
++
++ if (entry) {
++ return entry->priv;
++ } else
++ return ERR_PTR(-EINVAL);
++}
++EXPORT_SYMBOL_GPL(mxc_dispdrv_getdata);
+diff -Nur linux-3.14.36/drivers/video/mxc/mxc_dispdrv.h linux-openelec/drivers/video/mxc/mxc_dispdrv.h
+--- linux-3.14.36/drivers/video/mxc/mxc_dispdrv.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/video/mxc/mxc_dispdrv.h 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,54 @@
++/*
++ * Copyright (C) 2011-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++#ifndef __MXC_DISPDRV_H__
++#define __MXC_DISPDRV_H__
++#include <linux/fb.h>
++
++struct mxc_dispdrv_handle {
++ struct mxc_dispdrv_driver *drv;
++};
++
++struct mxc_dispdrv_setting {
++ /*input-feedback parameter*/
++ struct fb_info *fbi;
++ int if_fmt;
++ int default_bpp;
++ char *dft_mode_str;
++
++ /*feedback parameter*/
++ int dev_id;
++ int disp_id;
++};
++
++struct mxc_dispdrv_driver {
++ const char *name;
++ int (*init) (struct mxc_dispdrv_handle *, struct mxc_dispdrv_setting *);
++ /* deferred operations after dev_id and disp_id pass usage check */
++ int (*post_init) (struct mxc_dispdrv_handle *, int dev_id, int disp_id);
++ void (*deinit) (struct mxc_dispdrv_handle *);
++ /* display driver enable function for extension */
++ int (*enable) (struct mxc_dispdrv_handle *);
++ /* display driver disable function, called at early part of fb_blank */
++ void (*disable) (struct mxc_dispdrv_handle *);
++ /* display driver setup function, called at early part of fb_set_par */
++ int (*setup) (struct mxc_dispdrv_handle *, struct fb_info *fbi);
++};
++
++struct mxc_dispdrv_handle *mxc_dispdrv_register(struct mxc_dispdrv_driver *drv);
++int mxc_dispdrv_unregister(struct mxc_dispdrv_handle *handle);
++struct mxc_dispdrv_handle *mxc_dispdrv_gethandle(char *name,
++ struct mxc_dispdrv_setting *setting);
++void mxc_dispdrv_puthandle(struct mxc_dispdrv_handle *handle);
++int mxc_dispdrv_setdata(struct mxc_dispdrv_handle *handle, void *data);
++void *mxc_dispdrv_getdata(struct mxc_dispdrv_handle *handle);
++#endif
+diff -Nur linux-3.14.36/drivers/video/mxc/mxc_edid.c linux-openelec/drivers/video/mxc/mxc_edid.c
+--- linux-3.14.36/drivers/video/mxc/mxc_edid.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/video/mxc/mxc_edid.c 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,762 @@
++/*
++ * Copyright 2009-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/*!
++ * @defgroup Framebuffer Framebuffer Driver for SDC and ADC.
++ */
++
++/*!
++ * @file mxc_edid.c
++ *
++ * @brief MXC EDID driver
++ *
++ * @ingroup Framebuffer
++ */
++
++/*!
++ * Include files
++ */
++#include <linux/i2c.h>
++#include <linux/fb.h>
++#include <video/mxc_edid.h>
++#include "../edid.h"
++
++#undef DEBUG /* define this for verbose EDID parsing output */
++#ifdef DEBUG
++#define DPRINTK(fmt, args...) printk(fmt, ## args)
++#else
++#define DPRINTK(fmt, args...)
++#endif
++
++const struct fb_videomode mxc_cea_mode[64] = {
++ /* #1: 640x480p@59.94/60Hz 4:3 */
++ [1] = {
++ NULL, 60, 640, 480, 39722, 48, 16, 33, 10, 96, 2, 0,
++ FB_VMODE_NONINTERLACED | FB_VMODE_ASPECT_4_3, 0,
++ },
++ /* #2: 720x480p@59.94/60Hz 4:3 */
++ [2] = {
++ NULL, 60, 720, 480, 37037, 60, 16, 30, 9, 62, 6, 0,
++ FB_VMODE_NONINTERLACED | FB_VMODE_ASPECT_4_3, 0,
++ },
++ /* #3: 720x480p@59.94/60Hz 16:9 */
++ [3] = {
++ NULL, 60, 720, 480, 37037, 60, 16, 30, 9, 62, 6, 0,
++ FB_VMODE_NONINTERLACED | FB_VMODE_ASPECT_16_9, 0,
++ },
++ /* #4: 1280x720p@59.94/60Hz 16:9 */
++ [4] = {
++ NULL, 60, 1280, 720, 13468, 220, 110, 20, 5, 40, 5,
++ FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
++ FB_VMODE_NONINTERLACED | FB_VMODE_ASPECT_16_9, 0
++ },
++ /* #5: 1920x1080i@59.94/60Hz 16:9 */
++ [5] = {
++ NULL, 60, 1920, 1080, 13763, 148, 88, 15, 2, 44, 5,
++ FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
++ FB_VMODE_INTERLACED | FB_VMODE_ASPECT_16_9, 0,
++ },
++ /* #6: 720(1440)x480iH@59.94/60Hz 4:3 */
++ [6] = {
++ NULL, 60, 1440, 480, 18554/*37108*/, 114, 38, 15, 4, 124, 3, 0,
++ FB_VMODE_INTERLACED | FB_VMODE_ASPECT_4_3, 0,
++ },
++ /* #7: 720(1440)x480iH@59.94/60Hz 16:9 */
++ [7] = {
++ NULL, 60, 1440, 480, 18554/*37108*/, 114, 38, 15, 4, 124, 3, 0,
++ FB_VMODE_INTERLACED | FB_VMODE_ASPECT_16_9, 0,
++ },
++ /* #8: 720(1440)x240pH@59.94/60Hz 4:3 */
++ [8] = {
++ NULL, 60, 1440, 240, 37108, 114, 38, 15, 4, 124, 3, 0,
++ FB_VMODE_NONINTERLACED | FB_VMODE_ASPECT_4_3, 0,
++ },
++ /* #9: 720(1440)x240pH@59.94/60Hz 16:9 */
++ [9] = {
++ NULL, 60, 1440, 240, 37108, 114, 38, 15, 4, 124, 3, 0,
++ FB_VMODE_NONINTERLACED | FB_VMODE_ASPECT_16_9, 0,
++ },
++ /* #14: 1440x480p@59.94/60Hz 4:3 */
++ [14] = {
++ NULL, 60, 1440, 480, 18500, 120, 32, 30, 9, 124, 6, 0,
++ FB_VMODE_NONINTERLACED | FB_VMODE_ASPECT_4_3, 0,
++ },
++ /* #15: 1440x480p@59.94/60Hz 16:9 */
++ [15] = {
++ NULL, 60, 1440, 480, 18500, 120, 32, 30, 9, 124, 6, 0,
++ FB_VMODE_NONINTERLACED | FB_VMODE_ASPECT_16_9, 0,
++ },
++ /* #16: 1920x1080p@60Hz 16:9 */
++ [16] = {
++ NULL, 60, 1920, 1080, 6734, 148, 88, 36, 4, 44, 5,
++ FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
++ FB_VMODE_NONINTERLACED | FB_VMODE_ASPECT_16_9, 0,
++ },
++ /* #17: 720x576pH@50Hz 4:3 */
++ [17] = {
++ NULL, 50, 720, 576, 37037, 68, 12, 39, 5, 64, 5, 0,
++ FB_VMODE_NONINTERLACED | FB_VMODE_ASPECT_4_3, 0,
++ },
++ /* #18: 720x576pH@50Hz 16:9 */
++ [18] = {
++ NULL, 50, 720, 576, 37037, 68, 12, 39, 5, 64, 5, 0,
++ FB_VMODE_NONINTERLACED | FB_VMODE_ASPECT_16_9, 0,
++ },
++ /* #19: 1280x720p@50Hz */
++ [19] = {
++ NULL, 50, 1280, 720, 13468, 220, 440, 20, 5, 40, 5,
++ FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
++ FB_VMODE_NONINTERLACED | FB_VMODE_ASPECT_16_9, 0,
++ },
++ /* #20: 1920x1080i@50Hz */
++ [20] = {
++ NULL, 50, 1920, 1080, 13480, 148, 528, 15, 5, 528, 5,
++ FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
++ FB_VMODE_INTERLACED | FB_VMODE_ASPECT_16_9, 0,
++ },
++ /* #23: 720(1440)x288pH@50Hz 4:3 */
++ [23] = {
++ NULL, 50, 1440, 288, 37037, 138, 24, 19, 2, 126, 3, 0,
++ FB_VMODE_NONINTERLACED | FB_VMODE_ASPECT_4_3, 0,
++ },
++ /* #24: 720(1440)x288pH@50Hz 16:9 */
++ [24] = {
++ NULL, 50, 1440, 288, 37037, 138, 24, 19, 2, 126, 3, 0,
++ FB_VMODE_NONINTERLACED | FB_VMODE_ASPECT_16_9, 0,
++ },
++ /* #29: 720(1440)x576pH@50Hz 4:3 */
++ [29] = {
++ NULL, 50, 1440, 576, 18518, 136, 24, 39, 5, 128, 5, 0,
++ FB_VMODE_NONINTERLACED | FB_VMODE_ASPECT_4_3, 0,
++ },
++ /* #30: 720(1440)x576pH@50Hz 16:9 */
++ [30] = {
++ NULL, 50, 1440, 576, 18518, 136, 24, 39, 5, 128, 5, 0,
++ FB_VMODE_NONINTERLACED | FB_VMODE_ASPECT_16_9, 0,
++ },
++ /* #31: 1920x1080p@50Hz */
++ [31] = {
++ NULL, 50, 1920, 1080, 6734, 148, 528, 36, 4, 44, 5,
++ FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
++ FB_VMODE_NONINTERLACED | FB_VMODE_ASPECT_16_9, 0,
++ },
++ /* #32: 1920x1080p@23.98/24Hz */
++ [32] = {
++ NULL, 24, 1920, 1080, 13468, 148, 638, 36, 4, 44, 5,
++ FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
++ FB_VMODE_NONINTERLACED | FB_VMODE_ASPECT_16_9, 0,
++ },
++ /* #33: 1920x1080p@25Hz */
++ [33] = {
++ NULL, 25, 1920, 1080, 13468, 148, 528, 36, 4, 44, 5,
++ FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
++ FB_VMODE_NONINTERLACED | FB_VMODE_ASPECT_16_9, 0,
++ },
++ /* #34: 1920x1080p@30Hz */
++ [34] = {
++ NULL, 30, 1920, 1080, 13468, 148, 88, 36, 4, 44, 5,
++ FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
++ FB_VMODE_NONINTERLACED | FB_VMODE_ASPECT_16_9, 0,
++ },
++ /* #41: 1280x720p@100Hz 16:9 */
++ [41] = {
++ NULL, 100, 1280, 720, 6734, 220, 440, 20, 5, 40, 5,
++ FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
++ FB_VMODE_NONINTERLACED | FB_VMODE_ASPECT_16_9, 0
++ },
++ /* #47: 1280x720p@119.88/120Hz 16:9 */
++ [47] = {
++ NULL, 120, 1280, 720, 6734, 220, 110, 20, 5, 40, 5,
++ FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
++ FB_VMODE_NONINTERLACED | FB_VMODE_ASPECT_16_9, 0
++ },
++};
++
++/*
++ * We have a special version of fb_mode_is_equal that ignores
++ * pixclock, since for many CEA modes, 2 frequencies are supported
++ * e.g. 640x480 @ 60Hz or 59.94Hz
++ */
++int mxc_edid_fb_mode_is_equal(bool use_aspect,
++ const struct fb_videomode *mode1,
++ const struct fb_videomode *mode2)
++{
++ u32 mask;
++
++ if (use_aspect)
++ mask = ~0;
++ else
++ mask = ~FB_VMODE_ASPECT_MASK;
++
++ return (mode1->xres == mode2->xres &&
++ mode1->yres == mode2->yres &&
++ mode1->hsync_len == mode2->hsync_len &&
++ mode1->vsync_len == mode2->vsync_len &&
++ mode1->left_margin == mode2->left_margin &&
++ mode1->right_margin == mode2->right_margin &&
++ mode1->upper_margin == mode2->upper_margin &&
++ mode1->lower_margin == mode2->lower_margin &&
++ mode1->sync == mode2->sync &&
++ /* refresh check, 59.94Hz and 60Hz have the same parameter
++ * in struct of mxc_cea_mode */
++ abs(mode1->refresh - mode2->refresh) <= 1 &&
++ (mode1->vmode & mask) == (mode2->vmode & mask));
++}
++
++static void get_detailed_timing(unsigned char *block,
++ struct fb_videomode *mode)
++{
++ mode->xres = H_ACTIVE;
++ mode->yres = V_ACTIVE;
++ mode->pixclock = PIXEL_CLOCK;
++ mode->pixclock /= 1000;
++ mode->pixclock = KHZ2PICOS(mode->pixclock);
++ mode->right_margin = H_SYNC_OFFSET;
++ mode->left_margin = (H_ACTIVE + H_BLANKING) -
++ (H_ACTIVE + H_SYNC_OFFSET + H_SYNC_WIDTH);
++ mode->upper_margin = V_BLANKING - V_SYNC_OFFSET -
++ V_SYNC_WIDTH;
++ mode->lower_margin = V_SYNC_OFFSET;
++ mode->hsync_len = H_SYNC_WIDTH;
++ mode->vsync_len = V_SYNC_WIDTH;
++ if (HSYNC_POSITIVE)
++ mode->sync |= FB_SYNC_HOR_HIGH_ACT;
++ if (VSYNC_POSITIVE)
++ mode->sync |= FB_SYNC_VERT_HIGH_ACT;
++ mode->refresh = PIXEL_CLOCK/((H_ACTIVE + H_BLANKING) *
++ (V_ACTIVE + V_BLANKING));
++ if (INTERLACED) {
++ mode->yres *= 2;
++ mode->upper_margin *= 2;
++ mode->lower_margin *= 2;
++ mode->vsync_len *= 2;
++ mode->vmode |= FB_VMODE_INTERLACED;
++ }
++ mode->flag = FB_MODE_IS_DETAILED;
++
++ if ((H_SIZE / 16) == (V_SIZE / 9))
++ mode->vmode |= FB_VMODE_ASPECT_16_9;
++ else if ((H_SIZE / 4) == (V_SIZE / 3))
++ mode->vmode |= FB_VMODE_ASPECT_4_3;
++ else if ((mode->xres / 16) == (mode->yres / 9))
++ mode->vmode |= FB_VMODE_ASPECT_16_9;
++ else if ((mode->xres / 4) == (mode->yres / 3))
++ mode->vmode |= FB_VMODE_ASPECT_4_3;
++
++ if (mode->vmode & FB_VMODE_ASPECT_16_9)
++ DPRINTK("Aspect ratio: 16:9\n");
++ if (mode->vmode & FB_VMODE_ASPECT_4_3)
++ DPRINTK("Aspect ratio: 4:3\n");
++ DPRINTK(" %d MHz ", PIXEL_CLOCK/1000000);
++ DPRINTK("%d %d %d %d ", H_ACTIVE, H_ACTIVE + H_SYNC_OFFSET,
++ H_ACTIVE + H_SYNC_OFFSET + H_SYNC_WIDTH, H_ACTIVE + H_BLANKING);
++ DPRINTK("%d %d %d %d ", V_ACTIVE, V_ACTIVE + V_SYNC_OFFSET,
++ V_ACTIVE + V_SYNC_OFFSET + V_SYNC_WIDTH, V_ACTIVE + V_BLANKING);
++ DPRINTK("%sHSync %sVSync\n\n", (HSYNC_POSITIVE) ? "+" : "-",
++ (VSYNC_POSITIVE) ? "+" : "-");
++}
++
++int mxc_edid_parse_ext_blk(unsigned char *edid,
++ struct mxc_edid_cfg *cfg,
++ struct fb_monspecs *specs)
++{
++ char detail_timing_desc_offset;
++ struct fb_videomode *mode, *m;
++ unsigned char index = 0x0;
++ unsigned char *block;
++ int i, num = 0, revision;
++
++ if (edid[index++] != 0x2) /* only support cea ext block now */
++ return -1;
++ revision = edid[index++];
++ DPRINTK("cea extent revision %d\n", revision);
++ mode = kzalloc(50 * sizeof(struct fb_videomode), GFP_KERNEL);
++ if (mode == NULL)
++ return -1;
++
++ detail_timing_desc_offset = edid[index++];
++
++ if (revision >= 2) {
++ cfg->cea_underscan = (edid[index] >> 7) & 0x1;
++ cfg->cea_basicaudio = (edid[index] >> 6) & 0x1;
++ cfg->cea_ycbcr444 = (edid[index] >> 5) & 0x1;
++ cfg->cea_ycbcr422 = (edid[index] >> 4) & 0x1;
++
++ DPRINTK("CEA underscan %d\n", cfg->cea_underscan);
++ DPRINTK("CEA basicaudio %d\n", cfg->cea_basicaudio);
++ DPRINTK("CEA ycbcr444 %d\n", cfg->cea_ycbcr444);
++ DPRINTK("CEA ycbcr422 %d\n", cfg->cea_ycbcr422);
++ }
++
++ if (revision >= 3) {
++ /* short desc */
++ DPRINTK("CEA Short desc timmings\n");
++ index++;
++ while (index < detail_timing_desc_offset) {
++ unsigned char tagcode, blklen;
++
++ tagcode = (edid[index] >> 5) & 0x7;
++ blklen = (edid[index]) & 0x1f;
++
++ DPRINTK("Tagcode %x Len %d\n", tagcode, blklen);
++
++ switch (tagcode) {
++ case 0x2: /*Video data block*/
++ {
++ int cea_idx;
++ i = 0;
++ while (i < blklen) {
++ index++;
++ cea_idx = edid[index] & 0x7f;
++ if (cea_idx < ARRAY_SIZE(mxc_cea_mode) &&
++ (mxc_cea_mode[cea_idx].xres)) {
++ DPRINTK("Support CEA Format #%d\n", cea_idx);
++ mode[num] = mxc_cea_mode[cea_idx];
++ mode[num].flag |= FB_MODE_IS_STANDARD;
++ num++;
++ }
++ i++;
++ }
++ break;
++ }
++ case 0x3: /*Vendor specific data*/
++ {
++ unsigned char IEEE_reg_iden[3];
++ unsigned char deep_color;
++ unsigned char latency_present;
++ unsigned char I_latency_present;
++ unsigned char hdmi_video_present;
++ unsigned char hdmi_3d_present;
++ unsigned char hdmi_3d_multi_present;
++ unsigned char hdmi_vic_len;
++ unsigned char hdmi_3d_len;
++ unsigned char index_inc = 0;
++ unsigned char vsd_end;
++
++ vsd_end = index + blklen;
++
++ IEEE_reg_iden[0] = edid[index+1];
++ IEEE_reg_iden[1] = edid[index+2];
++ IEEE_reg_iden[2] = edid[index+3];
++ cfg->physical_address[0] = (edid[index+4] & 0xf0) >> 4;
++ cfg->physical_address[1] = (edid[index+4] & 0x0f);
++ cfg->physical_address[2] = (edid[index+5] & 0xf0) >> 4;
++ cfg->physical_address[3] = (edid[index+5] & 0x0f);
++
++ if ((IEEE_reg_iden[0] == 0x03) &&
++ (IEEE_reg_iden[1] == 0x0c) &&
++ (IEEE_reg_iden[2] == 0x00))
++ cfg->hdmi_cap = 1;
++
++ if (blklen > 5) {
++ deep_color = edid[index+6];
++ if (deep_color & 0x80)
++ cfg->vsd_support_ai = true;
++ if (deep_color & 0x40)
++ cfg->vsd_dc_48bit = true;
++ if (deep_color & 0x20)
++ cfg->vsd_dc_36bit = true;
++ if (deep_color & 0x10)
++ cfg->vsd_dc_30bit = true;
++ if (deep_color & 0x08)
++ cfg->vsd_dc_y444 = true;
++ if (deep_color & 0x01)
++ cfg->vsd_dvi_dual = true;
++ }
++
++ DPRINTK("VSD hdmi capability %d\n", cfg->hdmi_cap);
++ DPRINTK("VSD support ai %d\n", cfg->vsd_support_ai);
++ DPRINTK("VSD support deep color 48bit %d\n", cfg->vsd_dc_48bit);
++ DPRINTK("VSD support deep color 36bit %d\n", cfg->vsd_dc_36bit);
++ DPRINTK("VSD support deep color 30bit %d\n", cfg->vsd_dc_30bit);
++ DPRINTK("VSD support deep color y444 %d\n", cfg->vsd_dc_y444);
++ DPRINTK("VSD support dvi dual %d\n", cfg->vsd_dvi_dual);
++
++ if (blklen > 6)
++ cfg->vsd_max_tmdsclk_rate = edid[index+7] * 5;
++ DPRINTK("VSD MAX TMDS CLOCK RATE %d\n", cfg->vsd_max_tmdsclk_rate);
++
++ if (blklen > 7) {
++ latency_present = edid[index+8] >> 7;
++ I_latency_present = (edid[index+8] & 0x40) >> 6;
++ hdmi_video_present = (edid[index+8] & 0x20) >> 5;
++ cfg->vsd_cnc3 = (edid[index+8] & 0x8) >> 3;
++ cfg->vsd_cnc2 = (edid[index+8] & 0x4) >> 2;
++ cfg->vsd_cnc1 = (edid[index+8] & 0x2) >> 1;
++ cfg->vsd_cnc0 = edid[index+8] & 0x1;
++
++ DPRINTK("VSD cnc0 %d\n", cfg->vsd_cnc0);
++ DPRINTK("VSD cnc1 %d\n", cfg->vsd_cnc1);
++ DPRINTK("VSD cnc2 %d\n", cfg->vsd_cnc2);
++ DPRINTK("VSD cnc3 %d\n", cfg->vsd_cnc3);
++ DPRINTK("latency_present %d\n", latency_present);
++ DPRINTK("I_latency_present %d\n", I_latency_present);
++ DPRINTK("hdmi_video_present %d\n", hdmi_video_present);
++
++ } else {
++ index += blklen;
++ break;
++ }
++
++ index += 9;
++
++ /*latency present */
++ if (latency_present) {
++ cfg->vsd_video_latency = edid[index++];
++ cfg->vsd_audio_latency = edid[index++];
++
++ if (I_latency_present) {
++ cfg->vsd_I_video_latency = edid[index++];
++ cfg->vsd_I_audio_latency = edid[index++];
++ } else {
++ cfg->vsd_I_video_latency = cfg->vsd_video_latency;
++ cfg->vsd_I_audio_latency = cfg->vsd_audio_latency;
++ }
++
++ DPRINTK("VSD latency video_latency %d\n", cfg->vsd_video_latency);
++ DPRINTK("VSD latency audio_latency %d\n", cfg->vsd_audio_latency);
++ DPRINTK("VSD latency I_video_latency %d\n", cfg->vsd_I_video_latency);
++ DPRINTK("VSD latency I_audio_latency %d\n", cfg->vsd_I_audio_latency);
++ }
++
++ if (hdmi_video_present) {
++ hdmi_3d_present = edid[index] >> 7;
++ hdmi_3d_multi_present = (edid[index] & 0x60) >> 5;
++ index++;
++ hdmi_vic_len = (edid[index] & 0xe0) >> 5;
++ hdmi_3d_len = edid[index] & 0x1f;
++ index++;
++
++ DPRINTK("hdmi_3d_present %d\n", hdmi_3d_present);
++ DPRINTK("hdmi_3d_multi_present %d\n", hdmi_3d_multi_present);
++ DPRINTK("hdmi_vic_len %d\n", hdmi_vic_len);
++ DPRINTK("hdmi_3d_len %d\n", hdmi_3d_len);
++
++ if (hdmi_vic_len > 0) {
++ for (i = 0; i < hdmi_vic_len; i++) {
++ cfg->hdmi_vic[i] = edid[index++];
++ DPRINTK("HDMI_vic=%d\n", cfg->hdmi_vic[i]);
++ }
++ }
++
++ if (hdmi_3d_len > 0) {
++ if (hdmi_3d_present) {
++ if (hdmi_3d_multi_present == 0x1) {
++ cfg->hdmi_3d_struct_all = (edid[index] << 8) | edid[index+1];
++ index_inc = 2;
++ } else if (hdmi_3d_multi_present == 0x2) {
++ cfg->hdmi_3d_struct_all = (edid[index] << 8) | edid[index+1];
++ cfg->hdmi_3d_mask_all = (edid[index+2] << 8) | edid[index+3];
++ index_inc = 4;
++ } else
++ index_inc = 0;
++ }
++
++ DPRINTK("HDMI 3d struct all =0x%x\n", cfg->hdmi_3d_struct_all);
++ DPRINTK("HDMI 3d mask all =0x%x\n", cfg->hdmi_3d_mask_all);
++
++ /* Read 2D vic 3D_struct */
++ if ((hdmi_3d_len - index_inc) > 0) {
++ DPRINTK("Support 3D video format\n");
++ i = 0;
++ while ((hdmi_3d_len - index_inc) > 0) {
++
++ cfg->hdmi_3d_format[i].vic_order_2d = edid[index+index_inc] >> 4;
++ cfg->hdmi_3d_format[i].struct_3d = edid[index+index_inc] & 0x0f;
++ index_inc++;
++
++ if (cfg->hdmi_3d_format[i].struct_3d == 8) {
++ cfg->hdmi_3d_format[i].detail_3d = edid[index+index_inc] >> 4;
++ index_inc++;
++ } else if (cfg->hdmi_3d_format[i].struct_3d > 8) {
++ cfg->hdmi_3d_format[i].detail_3d = 0;
++ index_inc++;
++ }
++
++ DPRINTK("vic_order_2d=%d, 3d_struct=%d, 3d_detail=0x%x\n",
++ cfg->hdmi_3d_format[i].vic_order_2d,
++ cfg->hdmi_3d_format[i].struct_3d,
++ cfg->hdmi_3d_format[i].detail_3d);
++ i++;
++ }
++ }
++ index += index_inc;
++ }
++ }
++
++ index = vsd_end;
++
++ break;
++ }
++ case 0x1: /*Audio data block*/
++ {
++ u8 audio_format, max_ch, byte1, byte2, byte3;
++
++ i = 0;
++ cfg->max_channels = 0;
++ cfg->sample_rates = 0;
++ cfg->sample_sizes = 0;
++
++ while (i < blklen) {
++ byte1 = edid[index + 1];
++ byte2 = edid[index + 2];
++ byte3 = edid[index + 3];
++ index += 3;
++ i += 3;
++
++ audio_format = byte1 >> 3;
++ max_ch = (byte1 & 0x07) + 1;
++
++ DPRINTK("Audio Format Descriptor : %2d\n", audio_format);
++ DPRINTK("Max Number of Channels : %2d\n", max_ch);
++ DPRINTK("Sample Rates : %02x\n", byte2);
++
++ /* ALSA can't specify specific compressed
++ * formats, so only care about PCM for now. */
++ if (audio_format == AUDIO_CODING_TYPE_LPCM) {
++ if (max_ch > cfg->max_channels)
++ cfg->max_channels = max_ch;
++
++ cfg->sample_rates |= byte2;
++ cfg->sample_sizes |= byte3 & 0x7;
++ DPRINTK("Sample Sizes : %02x\n",
++ byte3 & 0x7);
++ }
++ }
++ break;
++ }
++ case 0x4: /*Speaker allocation block*/
++ {
++ i = 0;
++ while (i < blklen) {
++ cfg->speaker_alloc = edid[index + 1];
++ index += 3;
++ i += 3;
++ DPRINTK("Speaker Alloc : %02x\n", cfg->speaker_alloc);
++ }
++ break;
++ }
++ case 0x7: /*User extended block*/
++ default:
++ /* skip */
++ DPRINTK("Not handle block, tagcode = 0x%x\n", tagcode);
++ index += blklen;
++ break;
++ }
++
++ index++;
++ }
++ }
++
++ /* long desc */
++ DPRINTK("CEA long desc timmings\n");
++ index = detail_timing_desc_offset;
++ block = edid + index;
++ while (index < (EDID_LENGTH - DETAILED_TIMING_DESCRIPTION_SIZE)) {
++ if (!(block[0] == 0x00 && block[1] == 0x00)) {
++ get_detailed_timing(block, &mode[num]);
++ num++;
++ }
++ block += DETAILED_TIMING_DESCRIPTION_SIZE;
++ index += DETAILED_TIMING_DESCRIPTION_SIZE;
++ }
++
++ if (!num) {
++ kfree(mode);
++ return 0;
++ }
++
++ m = kmalloc((num + specs->modedb_len) *
++ sizeof(struct fb_videomode), GFP_KERNEL);
++ if (!m)
++ return 0;
++
++ if (specs->modedb_len) {
++ memmove(m, specs->modedb,
++ specs->modedb_len * sizeof(struct fb_videomode));
++ kfree(specs->modedb);
++ }
++ memmove(m+specs->modedb_len, mode,
++ num * sizeof(struct fb_videomode));
++ kfree(mode);
++
++ specs->modedb_len += num;
++ specs->modedb = m;
++
++ return 0;
++}
++EXPORT_SYMBOL(mxc_edid_parse_ext_blk);
++
++static int mxc_edid_readblk(struct i2c_adapter *adp,
++ unsigned short addr, unsigned char *edid)
++{
++ int ret = 0, extblknum = 0;
++ unsigned char regaddr = 0x0;
++ struct i2c_msg msg[2] = {
++ {
++ .addr = addr,
++ .flags = 0,
++ .len = 1,
++ .buf = &regaddr,
++ }, {
++ .addr = addr,
++ .flags = I2C_M_RD,
++ .len = EDID_LENGTH,
++ .buf = edid,
++ },
++ };
++
++ ret = i2c_transfer(adp, msg, ARRAY_SIZE(msg));
++ if (ret != ARRAY_SIZE(msg)) {
++ DPRINTK("unable to read EDID block\n");
++ return -EIO;
++ }
++
++ if (edid[1] == 0x00)
++ return -ENOENT;
++
++ extblknum = edid[0x7E];
++
++ if (extblknum) {
++ regaddr = 128;
++ msg[1].buf = edid + EDID_LENGTH;
++
++ ret = i2c_transfer(adp, msg, ARRAY_SIZE(msg));
++ if (ret != ARRAY_SIZE(msg)) {
++ DPRINTK("unable to read EDID ext block\n");
++ return -EIO;
++ }
++ }
++
++ return extblknum;
++}
++
++static int mxc_edid_readsegblk(struct i2c_adapter *adp, unsigned short addr,
++ unsigned char *edid, int seg_num)
++{
++ int ret = 0;
++ unsigned char segment = 0x1, regaddr = 0;
++ struct i2c_msg msg[3] = {
++ {
++ .addr = 0x30,
++ .flags = 0,
++ .len = 1,
++ .buf = &segment,
++ }, {
++ .addr = addr,
++ .flags = 0,
++ .len = 1,
++ .buf = &regaddr,
++ }, {
++ .addr = addr,
++ .flags = I2C_M_RD,
++ .len = EDID_LENGTH,
++ .buf = edid,
++ },
++ };
++
++ ret = i2c_transfer(adp, msg, ARRAY_SIZE(msg));
++ if (ret != ARRAY_SIZE(msg)) {
++ DPRINTK("unable to read EDID block\n");
++ return -EIO;
++ }
++
++ if (seg_num == 2) {
++ regaddr = 128;
++ msg[2].buf = edid + EDID_LENGTH;
++
++ ret = i2c_transfer(adp, msg, ARRAY_SIZE(msg));
++ if (ret != ARRAY_SIZE(msg)) {
++ DPRINTK("unable to read EDID block\n");
++ return -EIO;
++ }
++ }
++
++ return ret;
++}
++
++int mxc_edid_var_to_vic(struct fb_var_screeninfo *var)
++{
++ int i;
++ struct fb_videomode m;
++
++ for (i = 0; i < ARRAY_SIZE(mxc_cea_mode); i++) {
++ fb_var_to_videomode(&m, var);
++ if (mxc_edid_fb_mode_is_equal(false, &m, &mxc_cea_mode[i]))
++ break;
++ }
++
++ if (i == ARRAY_SIZE(mxc_cea_mode))
++ return 0;
++
++ return i;
++}
++EXPORT_SYMBOL(mxc_edid_var_to_vic);
++
++int mxc_edid_mode_to_vic(const struct fb_videomode *mode)
++{
++ int i;
++ bool use_aspect = (mode->vmode & FB_VMODE_ASPECT_MASK);
++
++ for (i = 0; i < ARRAY_SIZE(mxc_cea_mode); i++) {
++ if (mxc_edid_fb_mode_is_equal(use_aspect, mode, &mxc_cea_mode[i]))
++ break;
++ }
++
++ if (i == ARRAY_SIZE(mxc_cea_mode))
++ return 0;
++
++ return i;
++}
++EXPORT_SYMBOL(mxc_edid_mode_to_vic);
++
++/* make sure edid has 512 bytes*/
++int mxc_edid_read(struct i2c_adapter *adp, unsigned short addr,
++ unsigned char *edid, struct mxc_edid_cfg *cfg, struct fb_info *fbi)
++{
++ int ret = 0, extblknum;
++ if (!adp || !edid || !cfg || !fbi)
++ return -EINVAL;
++
++ memset(edid, 0, EDID_LENGTH*4);
++ memset(cfg, 0, sizeof(struct mxc_edid_cfg));
++
++ extblknum = mxc_edid_readblk(adp, addr, edid);
++ if (extblknum < 0)
++ return extblknum;
++
++ /* edid first block parsing */
++ memset(&fbi->monspecs, 0, sizeof(fbi->monspecs));
++ fb_edid_to_monspecs(edid, &fbi->monspecs);
++
++ if (extblknum) {
++ int i;
++
++ /* need read segment block? */
++ if (extblknum > 1) {
++ ret = mxc_edid_readsegblk(adp, addr,
++ edid + EDID_LENGTH*2, extblknum - 1);
++ if (ret < 0)
++ return ret;
++ }
++
++ for (i = 1; i <= extblknum; i++)
++ /* edid ext block parsing */
++ mxc_edid_parse_ext_blk(edid + i*EDID_LENGTH,
++ cfg, &fbi->monspecs);
++ }
++
++ return 0;
++}
++EXPORT_SYMBOL(mxc_edid_read);
++
+diff -Nur linux-3.14.36/drivers/video/mxc/mxcfb_hx8369_wvga.c linux-openelec/drivers/video/mxc/mxcfb_hx8369_wvga.c
+--- linux-3.14.36/drivers/video/mxc/mxcfb_hx8369_wvga.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/video/mxc/mxcfb_hx8369_wvga.c 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,449 @@
++/*
++ * Copyright (C) 2011-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
++ */
++
++#include <linux/types.h>
++#include <linux/init.h>
++#include <linux/delay.h>
++#include <linux/platform_device.h>
++#include <linux/err.h>
++#include <linux/clk.h>
++#include <linux/console.h>
++#include <linux/io.h>
++#include <linux/bitops.h>
++#include <linux/spinlock.h>
++#include <linux/mipi_dsi.h>
++#include <linux/mxcfb.h>
++#include <linux/backlight.h>
++#include <video/mipi_display.h>
++
++#include "mipi_dsi.h"
++
++#define MIPI_DSI_MAX_RET_PACK_SIZE (0x4)
++
++#define HX8369BL_MAX_BRIGHT (255)
++#define HX8369BL_DEF_BRIGHT (255)
++
++#define HX8369_MAX_DPHY_CLK (800)
++#define HX8369_ONE_DATA_LANE (0x1)
++#define HX8369_TWO_DATA_LANE (0x2)
++
++#define HX8369_CMD_SETEXTC (0xB9)
++#define HX8369_CMD_SETEXTC_LEN (0x4)
++#define HX8369_CMD_SETEXTC_PARAM_1 (0x6983ff)
++
++#define HX8369_CMD_GETHXID (0xF4)
++#define HX8369_CMD_GETHXID_LEN (0x4)
++#define HX8369_ID (0x69)
++#define HX8369_ID_MASK (0xFF)
++
++#define HX8369_CMD_SETDISP (0xB2)
++#define HX8369_CMD_SETDISP_LEN (16)
++#define HX8369_CMD_SETDISP_1_HALT (0x00)
++#define HX8369_CMD_SETDISP_2_RES_MODE (0x23)
++#define HX8369_CMD_SETDISP_3_BP (0x03)
++#define HX8369_CMD_SETDISP_4_FP (0x03)
++#define HX8369_CMD_SETDISP_5_SAP (0x70)
++#define HX8369_CMD_SETDISP_6_GENON (0x00)
++#define HX8369_CMD_SETDISP_7_GENOFF (0xff)
++#define HX8369_CMD_SETDISP_8_RTN (0x00)
++#define HX8369_CMD_SETDISP_9_TEI (0x00)
++#define HX8369_CMD_SETDISP_10_TEP_UP (0x00)
++#define HX8369_CMD_SETDISP_11_TEP_LOW (0x00)
++#define HX8369_CMD_SETDISP_12_BP_PE (0x03)
++#define HX8369_CMD_SETDISP_13_FP_PE (0x03)
++#define HX8369_CMD_SETDISP_14_RTN_PE (0x00)
++#define HX8369_CMD_SETDISP_15_GON (0x01)
++
++#define HX8369_CMD_SETCYC (0xB4)
++#define HX8369_CMD_SETCYC_LEN (6)
++#define HX8369_CMD_SETCYC_PARAM_1 (0x5f1d00)
++#define HX8369_CMD_SETCYC_PARAM_2 (0x060e)
++
++#define HX8369_CMD_SETGIP (0xD5)
++#define HX8369_CMD_SETGIP_LEN (27)
++#define HX8369_CMD_SETGIP_PARAM_1 (0x030400)
++#define HX8369_CMD_SETGIP_PARAM_2 (0x1c050100)
++#define HX8369_CMD_SETGIP_PARAM_3 (0x00030170)
++#define HX8369_CMD_SETGIP_PARAM_4 (0x51064000)
++#define HX8369_CMD_SETGIP_PARAM_5 (0x41000007)
++#define HX8369_CMD_SETGIP_PARAM_6 (0x07075006)
++#define HX8369_CMD_SETGIP_PARAM_7 (0x040f)
++
++#define HX8369_CMD_SETPOWER (0xB1)
++#define HX8369_CMD_SETPOWER_LEN (20)
++#define HX8369_CMD_SETPOWER_PARAM_1 (0x340001)
++#define HX8369_CMD_SETPOWER_PARAM_2 (0x0f0f0006)
++#define HX8369_CMD_SETPOWER_PARAM_3 (0x3f3f322a)
++#define HX8369_CMD_SETPOWER_PARAM_4 (0xe6013a07)
++#define HX8369_CMD_SETPOWER_PARAM_5 (0xe6e6e6e6)
++
++#define HX8369_CMD_SETVCOM (0xB6)
++#define HX8369_CMD_SETVCOM_LEN (3)
++#define HX8369_CMD_SETVCOM_PARAM_1 (0x5656)
++
++#define HX8369_CMD_SETPANEL (0xCC)
++#define HX8369_CMD_SETPANEL_PARAM_1 (0x02)
++
++#define HX8369_CMD_SETGAMMA (0xE0)
++#define HX8369_CMD_SETGAMMA_LEN (35)
++#define HX8369_CMD_SETGAMMA_PARAM_1 (0x221d00)
++#define HX8369_CMD_SETGAMMA_PARAM_2 (0x2e3f3d38)
++#define HX8369_CMD_SETGAMMA_PARAM_3 (0x0f0d064a)
++#define HX8369_CMD_SETGAMMA_PARAM_4 (0x16131513)
++#define HX8369_CMD_SETGAMMA_PARAM_5 (0x1d001910)
++#define HX8369_CMD_SETGAMMA_PARAM_6 (0x3f3d3822)
++#define HX8369_CMD_SETGAMMA_PARAM_7 (0x0d064a2e)
++#define HX8369_CMD_SETGAMMA_PARAM_8 (0x1315130f)
++#define HX8369_CMD_SETGAMMA_PARAM_9 (0x191016)
++
++#define HX8369_CMD_SETMIPI (0xBA)
++#define HX8369_CMD_SETMIPI_LEN (14)
++#define HX8369_CMD_SETMIPI_PARAM_1 (0xc6a000)
++#define HX8369_CMD_SETMIPI_PARAM_2 (0x10000a00)
++#define HX8369_CMD_SETMIPI_ONELANE (0x10 << 24)
++#define HX8369_CMD_SETMIPI_TWOLANE (0x11 << 24)
++#define HX8369_CMD_SETMIPI_PARAM_3 (0x00026f30)
++#define HX8369_CMD_SETMIPI_PARAM_4 (0x4018)
++
++#define HX8369_CMD_SETPIXEL_FMT (0x3A)
++#define HX8369_CMD_SETPIXEL_FMT_24BPP (0x77)
++#define HX8369_CMD_SETPIXEL_FMT_18BPP (0x66)
++#define HX8369_CMD_SETPIXEL_FMT_16BPP (0x55)
++
++#define HX8369_CMD_SETCLUMN_ADDR (0x2A)
++#define HX8369_CMD_SETCLUMN_ADDR_LEN (5)
++#define HX8369_CMD_SETCLUMN_ADDR_PARAM_1 (0xdf0000)
++#define HX8369_CMD_SETCLUMN_ADDR_PARAM_2 (0x01)
++
++#define HX8369_CMD_SETPAGE_ADDR (0x2B)
++#define HX8369_CMD_SETPAGE_ADDR_LEN (5)
++#define HX8369_CMD_SETPAGE_ADDR_PARAM_1 (0x1f0000)
++#define HX8369_CMD_SETPAGE_ADDR_PARAM_2 (0x03)
++
++#define HX8369_CMD_WRT_DISP_BRIGHT (0x51)
++#define HX8369_CMD_WRT_DISP_BRIGHT_PARAM_1 (0xFF)
++
++#define HX8369_CMD_WRT_CABC_MIN_BRIGHT (0x5E)
++#define HX8369_CMD_WRT_CABC_MIN_BRIGHT_PARAM_1 (0x20)
++
++#define HX8369_CMD_WRT_CABC_CTRL (0x55)
++#define HX8369_CMD_WRT_CABC_CTRL_PARAM_1 (0x1)
++
++#define HX8369_CMD_WRT_CTRL_DISP (0x53)
++#define HX8369_CMD_WRT_CTRL_DISP_PARAM_1 (0x24)
++
++#define CHECK_RETCODE(ret) \
++do { \
++ if (ret < 0) { \
++ dev_err(&mipi_dsi->pdev->dev, \
++ "%s ERR: ret:%d, line:%d.\n", \
++ __func__, ret, __LINE__); \
++ return ret; \
++ } \
++} while (0)
++
++static int hx8369bl_brightness;
++static int mipid_init_backlight(struct mipi_dsi_info *mipi_dsi);
++
++static struct fb_videomode truly_lcd_modedb[] = {
++ {
++ "TRULY-WVGA", 64, 480, 800, 37880,
++ 8, 8,
++ 6, 6,
++ 8, 6,
++ FB_SYNC_OE_LOW_ACT,
++ FB_VMODE_NONINTERLACED,
++ 0,
++ },
++};
++
++static struct mipi_lcd_config lcd_config = {
++ .virtual_ch = 0x0,
++ .data_lane_num = HX8369_TWO_DATA_LANE,
++ .max_phy_clk = HX8369_MAX_DPHY_CLK,
++ .dpi_fmt = MIPI_RGB888,
++};
++void mipid_hx8369_get_lcd_videomode(struct fb_videomode **mode, int *size,
++ struct mipi_lcd_config **data)
++{
++ *mode = &truly_lcd_modedb[0];
++ *size = ARRAY_SIZE(truly_lcd_modedb);
++ *data = &lcd_config;
++}
++
++int mipid_hx8369_lcd_setup(struct mipi_dsi_info *mipi_dsi)
++{
++ u32 buf[DSI_CMD_BUF_MAXSIZE];
++ int err;
++
++ dev_dbg(&mipi_dsi->pdev->dev, "MIPI DSI LCD setup.\n");
++ buf[0] = HX8369_CMD_SETEXTC | (HX8369_CMD_SETEXTC_PARAM_1 << 8);
++ err = mipi_dsi_pkt_write(mipi_dsi, MIPI_DSI_GENERIC_LONG_WRITE,
++ buf, HX8369_CMD_SETEXTC_LEN);
++ CHECK_RETCODE(err);
++ buf[0] = MIPI_DSI_MAX_RET_PACK_SIZE;
++ err = mipi_dsi_pkt_write(mipi_dsi,
++ MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE,
++ buf, 0);
++ CHECK_RETCODE(err);
++ buf[0] = HX8369_CMD_GETHXID;
++ err = mipi_dsi_pkt_read(mipi_dsi,
++ MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM,
++ buf, HX8369_CMD_GETHXID_LEN);
++ if (!err && ((buf[0] & HX8369_ID_MASK) == HX8369_ID)) {
++ dev_info(&mipi_dsi->pdev->dev,
++ "MIPI DSI LCD ID:0x%x.\n", buf[0]);
++ } else {
++ dev_err(&mipi_dsi->pdev->dev,
++ "mipi_dsi_pkt_read err:%d, data:0x%x.\n",
++ err, buf[0]);
++ dev_info(&mipi_dsi->pdev->dev,
++ "MIPI DSI LCD not detected!\n");
++ return err;
++ }
++
++ /* set LCD resolution as 480RGBx800, DPI interface,
++ * display operation mode: RGB data bypass GRAM mode.
++ */
++ buf[0] = HX8369_CMD_SETDISP | (HX8369_CMD_SETDISP_1_HALT << 8) |
++ (HX8369_CMD_SETDISP_2_RES_MODE << 16) |
++ (HX8369_CMD_SETDISP_3_BP << 24);
++ buf[1] = HX8369_CMD_SETDISP_4_FP | (HX8369_CMD_SETDISP_5_SAP << 8) |
++ (HX8369_CMD_SETDISP_6_GENON << 16) |
++ (HX8369_CMD_SETDISP_7_GENOFF << 24);
++ buf[2] = HX8369_CMD_SETDISP_8_RTN | (HX8369_CMD_SETDISP_9_TEI << 8) |
++ (HX8369_CMD_SETDISP_10_TEP_UP << 16) |
++ (HX8369_CMD_SETDISP_11_TEP_LOW << 24);
++ buf[3] = HX8369_CMD_SETDISP_12_BP_PE |
++ (HX8369_CMD_SETDISP_13_FP_PE << 8) |
++ (HX8369_CMD_SETDISP_14_RTN_PE << 16) |
++ (HX8369_CMD_SETDISP_15_GON << 24);
++ err = mipi_dsi_pkt_write(mipi_dsi, MIPI_DSI_GENERIC_LONG_WRITE,
++ buf, HX8369_CMD_SETDISP_LEN);
++ CHECK_RETCODE(err);
++
++ /* Set display waveform cycle */
++ buf[0] = HX8369_CMD_SETCYC | (HX8369_CMD_SETCYC_PARAM_1 << 8);
++ buf[1] = HX8369_CMD_SETCYC_PARAM_2;
++ err = mipi_dsi_pkt_write(mipi_dsi, MIPI_DSI_GENERIC_LONG_WRITE,
++ buf, HX8369_CMD_SETCYC_LEN);
++ CHECK_RETCODE(err);
++
++ /* Set GIP timing output control */
++ buf[0] = HX8369_CMD_SETGIP | (HX8369_CMD_SETGIP_PARAM_1 << 8);
++ buf[1] = HX8369_CMD_SETGIP_PARAM_2;
++ buf[2] = HX8369_CMD_SETGIP_PARAM_3;
++ buf[3] = HX8369_CMD_SETGIP_PARAM_4;
++ buf[4] = HX8369_CMD_SETGIP_PARAM_5;
++ buf[5] = HX8369_CMD_SETGIP_PARAM_6;
++ buf[6] = HX8369_CMD_SETGIP_PARAM_7;
++ err = mipi_dsi_pkt_write(mipi_dsi, MIPI_DSI_GENERIC_LONG_WRITE, buf,
++ HX8369_CMD_SETGIP_LEN);
++ CHECK_RETCODE(err);
++
++ /* Set power: standby, DC etc. */
++ buf[0] = HX8369_CMD_SETPOWER | (HX8369_CMD_SETPOWER_PARAM_1 << 8);
++ buf[1] = HX8369_CMD_SETPOWER_PARAM_2;
++ buf[2] = HX8369_CMD_SETPOWER_PARAM_3;
++ buf[3] = HX8369_CMD_SETPOWER_PARAM_4;
++ buf[4] = HX8369_CMD_SETPOWER_PARAM_5;
++ err = mipi_dsi_pkt_write(mipi_dsi, MIPI_DSI_GENERIC_LONG_WRITE, buf,
++ HX8369_CMD_SETPOWER_LEN);
++ CHECK_RETCODE(err);
++
++ /* Set VCOM voltage. */
++ buf[0] = HX8369_CMD_SETVCOM | (HX8369_CMD_SETVCOM_PARAM_1 << 8);
++ err = mipi_dsi_pkt_write(mipi_dsi, MIPI_DSI_GENERIC_LONG_WRITE, buf,
++ HX8369_CMD_SETVCOM_LEN);
++ CHECK_RETCODE(err);
++
++ /* Set Panel: BGR/RGB or Inversion. */
++ buf[0] = HX8369_CMD_SETPANEL | (HX8369_CMD_SETPANEL_PARAM_1 << 8);
++ err = mipi_dsi_pkt_write(mipi_dsi,
++ MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM, buf, 0);
++ CHECK_RETCODE(err);
++
++ /* Set gamma curve related setting */
++ buf[0] = HX8369_CMD_SETGAMMA | (HX8369_CMD_SETGAMMA_PARAM_1 << 8);
++ buf[1] = HX8369_CMD_SETGAMMA_PARAM_2;
++ buf[2] = HX8369_CMD_SETGAMMA_PARAM_3;
++ buf[3] = HX8369_CMD_SETGAMMA_PARAM_4;
++ buf[4] = HX8369_CMD_SETGAMMA_PARAM_5;
++ buf[5] = HX8369_CMD_SETGAMMA_PARAM_6;
++ buf[7] = HX8369_CMD_SETGAMMA_PARAM_7;
++ buf[7] = HX8369_CMD_SETGAMMA_PARAM_8;
++ buf[8] = HX8369_CMD_SETGAMMA_PARAM_9;
++ err = mipi_dsi_pkt_write(mipi_dsi, MIPI_DSI_GENERIC_LONG_WRITE, buf,
++ HX8369_CMD_SETGAMMA_LEN);
++ CHECK_RETCODE(err);
++
++ /* Set MIPI: DPHYCMD & DSICMD, data lane number */
++ buf[0] = HX8369_CMD_SETMIPI | (HX8369_CMD_SETMIPI_PARAM_1 << 8);
++ buf[1] = HX8369_CMD_SETMIPI_PARAM_2;
++ buf[2] = HX8369_CMD_SETMIPI_PARAM_3;
++ if (lcd_config.data_lane_num == HX8369_ONE_DATA_LANE)
++ buf[2] |= HX8369_CMD_SETMIPI_ONELANE;
++ else
++ buf[2] |= HX8369_CMD_SETMIPI_TWOLANE;
++ buf[3] = HX8369_CMD_SETMIPI_PARAM_4;
++ err = mipi_dsi_pkt_write(mipi_dsi, MIPI_DSI_GENERIC_LONG_WRITE, buf,
++ HX8369_CMD_SETMIPI_LEN);
++ CHECK_RETCODE(err);
++
++ /* Set pixel format:24bpp */
++ buf[0] = HX8369_CMD_SETPIXEL_FMT;
++ switch (lcd_config.dpi_fmt) {
++ case MIPI_RGB565_PACKED:
++ case MIPI_RGB565_LOOSELY:
++ case MIPI_RGB565_CONFIG3:
++ buf[0] |= (HX8369_CMD_SETPIXEL_FMT_16BPP << 8);
++ break;
++
++ case MIPI_RGB666_LOOSELY:
++ case MIPI_RGB666_PACKED:
++ buf[0] |= (HX8369_CMD_SETPIXEL_FMT_18BPP << 8);
++ break;
++
++ case MIPI_RGB888:
++ buf[0] |= (HX8369_CMD_SETPIXEL_FMT_24BPP << 8);
++ break;
++
++ default:
++ buf[0] |= (HX8369_CMD_SETPIXEL_FMT_24BPP << 8);
++ break;
++ }
++ err = mipi_dsi_pkt_write(mipi_dsi, MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM,
++ buf, 0);
++ CHECK_RETCODE(err);
++
++ /* Set column address: 0~479 */
++ buf[0] = HX8369_CMD_SETCLUMN_ADDR |
++ (HX8369_CMD_SETCLUMN_ADDR_PARAM_1 << 8);
++ buf[1] = HX8369_CMD_SETCLUMN_ADDR_PARAM_2;
++ err = mipi_dsi_pkt_write(mipi_dsi, MIPI_DSI_GENERIC_LONG_WRITE,
++ buf, HX8369_CMD_SETCLUMN_ADDR_LEN);
++ CHECK_RETCODE(err);
++
++ /* Set page address: 0~799 */
++ buf[0] = HX8369_CMD_SETPAGE_ADDR |
++ (HX8369_CMD_SETPAGE_ADDR_PARAM_1 << 8);
++ buf[1] = HX8369_CMD_SETPAGE_ADDR_PARAM_2;
++ err = mipi_dsi_pkt_write(mipi_dsi, MIPI_DSI_GENERIC_LONG_WRITE,
++ buf, HX8369_CMD_SETPAGE_ADDR_LEN);
++ CHECK_RETCODE(err);
++
++ /* Set display brightness related */
++ buf[0] = HX8369_CMD_WRT_DISP_BRIGHT |
++ (HX8369_CMD_WRT_DISP_BRIGHT_PARAM_1 << 8);
++ err = mipi_dsi_pkt_write(mipi_dsi, MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM,
++ buf, 0);
++ CHECK_RETCODE(err);
++
++ buf[0] = HX8369_CMD_WRT_CABC_CTRL |
++ (HX8369_CMD_WRT_CABC_CTRL_PARAM_1 << 8);
++ err = mipi_dsi_pkt_write(mipi_dsi, MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM,
++ buf, 0);
++ CHECK_RETCODE(err);
++
++ buf[0] = HX8369_CMD_WRT_CTRL_DISP |
++ (HX8369_CMD_WRT_CTRL_DISP_PARAM_1 << 8);
++ err = mipi_dsi_pkt_write(mipi_dsi, MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM,
++ buf, 0);
++ CHECK_RETCODE(err);
++
++ /* exit sleep mode and set display on */
++ buf[0] = MIPI_DCS_EXIT_SLEEP_MODE;
++ err = mipi_dsi_pkt_write(mipi_dsi, MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM,
++ buf, 0);
++ CHECK_RETCODE(err);
++ /* To allow time for the supply voltages
++ * and clock circuits to stabilize.
++ */
++ msleep(5);
++ buf[0] = MIPI_DCS_SET_DISPLAY_ON;
++ err = mipi_dsi_pkt_write(mipi_dsi, MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM,
++ buf, 0);
++ CHECK_RETCODE(err);
++
++ err = mipid_init_backlight(mipi_dsi);
++ return err;
++}
++
++static int mipid_bl_update_status(struct backlight_device *bl)
++{
++ u32 buf;
++ int brightness = bl->props.brightness;
++ struct mipi_dsi_info *mipi_dsi = bl_get_data(bl);
++
++ if (bl->props.power != FB_BLANK_UNBLANK ||
++ bl->props.fb_blank != FB_BLANK_UNBLANK)
++ brightness = 0;
++
++ buf = HX8369_CMD_WRT_DISP_BRIGHT |
++ ((brightness & HX8369BL_MAX_BRIGHT) << 8);
++ mipi_dsi_pkt_write(mipi_dsi, MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM,
++ &buf, 0);
++
++ hx8369bl_brightness = brightness & HX8369BL_MAX_BRIGHT;
++
++ dev_dbg(&bl->dev, "mipid backlight bringtness:%d.\n", brightness);
++ return 0;
++}
++
++static int mipid_bl_get_brightness(struct backlight_device *bl)
++{
++ return hx8369bl_brightness;
++}
++
++static int mipi_bl_check_fb(struct backlight_device *bl, struct fb_info *fbi)
++{
++ return 0;
++}
++
++static const struct backlight_ops mipid_lcd_bl_ops = {
++ .update_status = mipid_bl_update_status,
++ .get_brightness = mipid_bl_get_brightness,
++ .check_fb = mipi_bl_check_fb,
++};
++
++static int mipid_init_backlight(struct mipi_dsi_info *mipi_dsi)
++{
++ struct backlight_properties props;
++ struct backlight_device *bl;
++
++ if (mipi_dsi->bl) {
++ pr_debug("mipid backlight already init!\n");
++ return 0;
++ }
++ memset(&props, 0, sizeof(struct backlight_properties));
++ props.max_brightness = HX8369BL_MAX_BRIGHT;
++ props.type = BACKLIGHT_RAW;
++ bl = backlight_device_register("mipid-bl", &mipi_dsi->pdev->dev,
++ mipi_dsi, &mipid_lcd_bl_ops, &props);
++ if (IS_ERR(bl)) {
++ pr_err("error %ld on backlight register\n", PTR_ERR(bl));
++ return PTR_ERR(bl);
++ }
++ mipi_dsi->bl = bl;
++ bl->props.power = FB_BLANK_UNBLANK;
++ bl->props.fb_blank = FB_BLANK_UNBLANK;
++ bl->props.brightness = HX8369BL_DEF_BRIGHT;
++
++ mipid_bl_update_status(bl);
++ return 0;
++}
+diff -Nur linux-3.14.36/drivers/video/mxc/mxc_hdmi.c linux-openelec/drivers/video/mxc/mxc_hdmi.c
+--- linux-3.14.36/drivers/video/mxc/mxc_hdmi.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/video/mxc/mxc_hdmi.c 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,3042 @@
++/*
++ * Copyright (C) 2011-2014 Freescale Semiconductor, Inc.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ *
++ */
++/*
++ * SH-Mobile High-Definition Multimedia Interface (HDMI) driver
++ * for SLISHDMI13T and SLIPHDMIT IP cores
++ *
++ * Copyright (C) 2010, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include <linux/module.h>
++#include <linux/kernel.h>
++#include <linux/device.h>
++#include <linux/platform_device.h>
++#include <linux/input.h>
++#include <linux/interrupt.h>
++#include <linux/irq.h>
++#include <linux/io.h>
++#include <linux/fb.h>
++#include <linux/init.h>
++#include <linux/list.h>
++#include <linux/delay.h>
++#include <linux/dma-mapping.h>
++#include <linux/err.h>
++#include <linux/clk.h>
++#include <linux/uaccess.h>
++#include <linux/cpufreq.h>
++#include <linux/firmware.h>
++#include <linux/kthread.h>
++#include <linux/regulator/driver.h>
++#include <linux/fsl_devices.h>
++#include <linux/ipu.h>
++#include <linux/regmap.h>
++#include <linux/pinctrl/consumer.h>
++#include <linux/of_device.h>
++
++#include <linux/console.h>
++#include <linux/types.h>
++
++#include "../edid.h"
++#include <video/mxc_edid.h>
++#include <video/mxc_hdmi.h>
++#include "mxc_dispdrv.h"
++
++#include <linux/mfd/mxc-hdmi-core.h>
++
++#define DISPDRV_HDMI "hdmi"
++#define HDMI_EDID_LEN 512
++
++/* status codes for reading edid */
++#define HDMI_EDID_SUCCESS 0
++#define HDMI_EDID_FAIL -1
++#define HDMI_EDID_SAME -2
++#define HDMI_EDID_NO_MODES -3
++
++#define NUM_CEA_VIDEO_MODES 64
++#define DEFAULT_VIDEO_MODE 16 /* 1080P */
++
++#define RGB 0
++#define YCBCR444 1
++#define YCBCR422_16BITS 2
++#define YCBCR422_8BITS 3
++#define XVYCC444 4
++
++/*
++ * We follow a flowchart which is in the "Synopsys DesignWare Courses
++ * HDMI Transmitter Controller User Guide, 1.30a", section 3.1
++ * (dwc_hdmi_tx_user.pdf)
++ *
++ * Below are notes that say "HDMI Initialization Step X"
++ * These correspond to the flowchart.
++ */
++
++/*
++ * We are required to configure VGA mode before reading edid
++ * in HDMI Initialization Step B
++ */
++static const struct fb_videomode vga_mode = {
++ /* 640x480 @ 60 Hz, 31.5 kHz hsync */
++ NULL, 60, 640, 480, 39721, 48, 16, 33, 10, 96, 2, 0,
++ FB_VMODE_NONINTERLACED | FB_VMODE_ASPECT_4_3, FB_MODE_IS_VESA,
++};
++
++enum hdmi_datamap {
++ RGB444_8B = 0x01,
++ RGB444_10B = 0x03,
++ RGB444_12B = 0x05,
++ RGB444_16B = 0x07,
++ YCbCr444_8B = 0x09,
++ YCbCr444_10B = 0x0B,
++ YCbCr444_12B = 0x0D,
++ YCbCr444_16B = 0x0F,
++ YCbCr422_8B = 0x16,
++ YCbCr422_10B = 0x14,
++ YCbCr422_12B = 0x12,
++};
++
++enum hdmi_colorimetry {
++ eITU601,
++ eITU709,
++};
++
++struct hdmi_vmode {
++ bool mDVI;
++ bool mHSyncPolarity;
++ bool mVSyncPolarity;
++ bool mInterlaced;
++ bool mDataEnablePolarity;
++
++ unsigned int mPixelClock;
++ unsigned int mPixelRepetitionInput;
++ unsigned int mPixelRepetitionOutput;
++};
++
++struct hdmi_data_info {
++ unsigned int enc_in_format;
++ unsigned int enc_out_format;
++ unsigned int enc_color_depth;
++ unsigned int colorimetry;
++ unsigned int pix_repet_factor;
++ unsigned int hdcp_enable;
++ unsigned int rgb_out_enable;
++ unsigned int rgb_quant_range;
++ struct hdmi_vmode video_mode;
++};
++
++struct hdmi_phy_reg_config {
++ /* HDMI PHY register config for pass HCT */
++ u16 reg_vlev;
++ u16 reg_cksymtx;
++};
++
++struct mxc_hdmi {
++ struct platform_device *pdev;
++ struct platform_device *core_pdev;
++ struct mxc_dispdrv_handle *disp_mxc_hdmi;
++ struct fb_info *fbi;
++ struct clk *hdmi_isfr_clk;
++ struct clk *hdmi_iahb_clk;
++ struct timer_list jitter_timer;
++ struct work_struct hotplug_work;
++ struct delayed_work hdcp_hdp_work;
++
++ struct notifier_block nb;
++
++ struct hdmi_data_info hdmi_data;
++ int vic;
++ int edid_status;
++ struct mxc_edid_cfg edid_cfg;
++ u8 edid[HDMI_EDID_LEN];
++ bool fb_reg;
++ bool cable_plugin;
++ u8 blank;
++ bool dft_mode_set;
++ char *dft_mode_str;
++ int default_bpp;
++ u8 latest_intr_stat;
++ u8 plug_event;
++ u8 plug_mask;
++ bool irq_enabled;
++ spinlock_t irq_lock;
++ bool phy_enabled;
++ struct fb_videomode default_mode;
++ struct fb_videomode previous_non_vga_mode;
++ bool requesting_vga_for_initialization;
++
++ int *gpr_base;
++ int *gpr_hdmi_base;
++ int *gpr_sdma_base;
++ int cpu_type;
++ int cpu_version;
++ struct hdmi_phy_reg_config phy_config;
++
++ struct pinctrl *pinctrl;
++};
++
++static int hdmi_major;
++static struct class *hdmi_class;
++
++struct i2c_client *hdmi_i2c;
++struct mxc_hdmi *g_hdmi;
++
++static bool hdmi_inited;
++static bool hdcp_init;
++
++extern const struct fb_videomode mxc_cea_mode[64];
++extern void mxc_hdmi_cec_handle(u16 cec_stat);
++
++static void mxc_hdmi_setup(struct mxc_hdmi *hdmi, unsigned long event);
++static void hdmi_enable_overflow_interrupts(void);
++static void hdmi_disable_overflow_interrupts(void);
++
++static char *rgb_quant_range = "default";
++module_param(rgb_quant_range, charp, S_IRUGO);
++MODULE_PARM_DESC(rgb_quant_range, "RGB Quant Range (default, limited, full)");
++
++static struct platform_device_id imx_hdmi_devtype[] = {
++ {
++ .name = "hdmi-imx6DL",
++ .driver_data = IMX6DL_HDMI,
++ }, {
++ .name = "hdmi-imx6Q",
++ .driver_data = IMX6Q_HDMI,
++ }, {
++ /* sentinel */
++ }
++};
++MODULE_DEVICE_TABLE(platform, imx_hdmi_devtype);
++
++static const struct of_device_id imx_hdmi_dt_ids[] = {
++ { .compatible = "fsl,imx6dl-hdmi-video", .data = &imx_hdmi_devtype[IMX6DL_HDMI], },
++ { .compatible = "fsl,imx6q-hdmi-video", .data = &imx_hdmi_devtype[IMX6Q_HDMI], },
++ { /* sentinel */ }
++};
++MODULE_DEVICE_TABLE(of, imx_hdmi_dt_ids);
++
++static inline int cpu_is_imx6dl(struct mxc_hdmi *hdmi)
++{
++ return hdmi->cpu_type == IMX6DL_HDMI;
++}
++#ifdef DEBUG
++static void dump_fb_videomode(struct fb_videomode *m)
++{
++ pr_debug("fb_videomode = %d %d %d %d %d %d %d %d %d %d %d %d %d\n",
++ m->refresh, m->xres, m->yres, m->pixclock, m->left_margin,
++ m->right_margin, m->upper_margin, m->lower_margin,
++ m->hsync_len, m->vsync_len, m->sync, m->vmode, m->flag);
++}
++#else
++static void dump_fb_videomode(struct fb_videomode *m)
++{}
++#endif
++
++static ssize_t mxc_hdmi_show_name(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct mxc_hdmi *hdmi = dev_get_drvdata(dev);
++
++ strcpy(buf, hdmi->fbi->fix.id);
++ sprintf(buf+strlen(buf), "\n");
++
++ return strlen(buf);
++}
++
++static DEVICE_ATTR(fb_name, S_IRUGO, mxc_hdmi_show_name, NULL);
++
++static ssize_t mxc_hdmi_show_state(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct mxc_hdmi *hdmi = dev_get_drvdata(dev);
++
++ if (hdmi->cable_plugin == false)
++ strcpy(buf, "plugout\n");
++ else
++ strcpy(buf, "plugin\n");
++
++ return strlen(buf);
++}
++
++static DEVICE_ATTR(cable_state, S_IRUGO, mxc_hdmi_show_state, NULL);
++
++static ssize_t mxc_hdmi_show_edid(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct mxc_hdmi *hdmi = dev_get_drvdata(dev);
++ int i, j, len = 0;
++
++ for (j = 0; j < HDMI_EDID_LEN/16; j++) {
++ for (i = 0; i < 16; i++)
++ len += sprintf(buf+len, "0x%02X ",
++ hdmi->edid[j*16 + i]);
++ len += sprintf(buf+len, "\n");
++ }
++
++ return len;
++}
++
++static DEVICE_ATTR(edid, S_IRUGO, mxc_hdmi_show_edid, NULL);
++
++static ssize_t mxc_hdmi_show_rgb_out_enable(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct mxc_hdmi *hdmi = dev_get_drvdata(dev);
++
++ if (hdmi->hdmi_data.rgb_out_enable == true)
++ strcpy(buf, "RGB out\n");
++ else
++ strcpy(buf, "YCbCr out\n");
++
++ return strlen(buf);
++}
++
++static ssize_t mxc_hdmi_store_rgb_out_enable(struct device *dev,
++ struct device_attribute *attr, const char *buf, size_t count)
++{
++ struct mxc_hdmi *hdmi = dev_get_drvdata(dev);
++ unsigned long value;
++ int ret;
++
++ ret = strict_strtoul(buf, 10, &value);
++ if (ret)
++ return ret;
++
++ hdmi->hdmi_data.rgb_out_enable = value;
++
++ /* Reconfig HDMI for output color space change */
++ mxc_hdmi_setup(hdmi, 0);
++
++ return count;
++}
++
++static DEVICE_ATTR(rgb_out_enable, S_IRUGO | S_IWUSR,
++ mxc_hdmi_show_rgb_out_enable,
++ mxc_hdmi_store_rgb_out_enable);
++
++static ssize_t mxc_hdmi_show_rgb_quant_range(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct mxc_hdmi *hdmi = dev_get_drvdata(dev);
++
++ switch (hdmi->hdmi_data.rgb_quant_range) {
++ case HDMI_FC_AVICONF2_RGB_QUANT_LIMITED_RANGE:
++ strcpy(buf, "limited\n");
++ break;
++ case HDMI_FC_AVICONF2_RGB_QUANT_FULL_RANGE:
++ strcpy(buf, "full\n");
++ break;
++ case HDMI_FC_AVICONF2_RGB_QUANT_DEFAULT:
++ default:
++ strcpy(buf, "default\n");
++ break;
++ };
++
++ return strlen(buf);
++}
++
++static ssize_t mxc_hdmi_store_rgb_quant_range(struct device *dev,
++ struct device_attribute *attr, const char *buf, size_t count)
++{
++ struct mxc_hdmi *hdmi = dev_get_drvdata(dev);
++ int ret = count;
++
++ if (sysfs_streq("limited", buf)) {
++ hdmi->hdmi_data.rgb_quant_range = HDMI_FC_AVICONF2_RGB_QUANT_LIMITED_RANGE;
++ } else if (sysfs_streq("full", buf)) {
++ hdmi->hdmi_data.rgb_quant_range = HDMI_FC_AVICONF2_RGB_QUANT_FULL_RANGE;
++ } else if (sysfs_streq("default", buf)) {
++ hdmi->hdmi_data.rgb_quant_range = HDMI_FC_AVICONF2_RGB_QUANT_DEFAULT;
++ } else {
++ ret = -EINVAL;
++ goto out;
++ }
++
++ /* Reconfig HDMI for output RGB Quant Range change if using RGB out */
++ if(hdmi->hdmi_data.rgb_out_enable)
++ mxc_hdmi_setup(hdmi, 0);
++out:
++ return ret;
++}
++
++static DEVICE_ATTR(rgb_quant_range, S_IRUGO | S_IWUSR,
++ mxc_hdmi_show_rgb_quant_range,
++ mxc_hdmi_store_rgb_quant_range);
++
++static ssize_t mxc_hdmi_show_hdcp_enable(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct mxc_hdmi *hdmi = dev_get_drvdata(dev);
++
++ if (hdmi->hdmi_data.hdcp_enable == false)
++ strcpy(buf, "hdcp disable\n");
++ else
++ strcpy(buf, "hdcp enable\n");
++
++ return strlen(buf);
++
++}
++
++static ssize_t mxc_hdmi_store_hdcp_enable(struct device *dev,
++ struct device_attribute *attr, const char *buf, size_t count)
++{
++ struct mxc_hdmi *hdmi = dev_get_drvdata(dev);
++ char event_string[32];
++ char *envp[] = { event_string, NULL };
++ unsigned long value;
++ int ret;
++
++ ret = strict_strtoul(buf, 10, &value);
++ if (ret)
++ return ret;
++
++ hdmi->hdmi_data.hdcp_enable = value;
++
++ /* Reconfig HDMI for HDCP */
++ mxc_hdmi_setup(hdmi, 0);
++
++ if (hdmi->hdmi_data.hdcp_enable == false) {
++ sprintf(event_string, "EVENT=hdcpdisable");
++ kobject_uevent_env(&hdmi->pdev->dev.kobj, KOBJ_CHANGE, envp);
++ } else {
++ sprintf(event_string, "EVENT=hdcpenable");
++ kobject_uevent_env(&hdmi->pdev->dev.kobj, KOBJ_CHANGE, envp);
++ }
++
++ return count;
++
++}
++
++static DEVICE_ATTR(hdcp_enable, S_IRUGO | S_IWUSR,
++ mxc_hdmi_show_hdcp_enable, mxc_hdmi_store_hdcp_enable);
++
++/*!
++ * this submodule is responsible for the video data synchronization.
++ * for example, for RGB 4:4:4 input, the data map is defined as
++ * pin{47~40} <==> R[7:0]
++ * pin{31~24} <==> G[7:0]
++ * pin{15~8} <==> B[7:0]
++ */
++static void hdmi_video_sample(struct mxc_hdmi *hdmi)
++{
++ int color_format = 0;
++ u8 val;
++
++ if (hdmi->hdmi_data.enc_in_format == RGB) {
++ if (hdmi->hdmi_data.enc_color_depth == 8)
++ color_format = 0x01;
++ else if (hdmi->hdmi_data.enc_color_depth == 10)
++ color_format = 0x03;
++ else if (hdmi->hdmi_data.enc_color_depth == 12)
++ color_format = 0x05;
++ else if (hdmi->hdmi_data.enc_color_depth == 16)
++ color_format = 0x07;
++ else
++ return;
++ } else if (hdmi->hdmi_data.enc_in_format == YCBCR444) {
++ if (hdmi->hdmi_data.enc_color_depth == 8)
++ color_format = 0x09;
++ else if (hdmi->hdmi_data.enc_color_depth == 10)
++ color_format = 0x0B;
++ else if (hdmi->hdmi_data.enc_color_depth == 12)
++ color_format = 0x0D;
++ else if (hdmi->hdmi_data.enc_color_depth == 16)
++ color_format = 0x0F;
++ else
++ return;
++ } else if (hdmi->hdmi_data.enc_in_format == YCBCR422_8BITS) {
++ if (hdmi->hdmi_data.enc_color_depth == 8)
++ color_format = 0x16;
++ else if (hdmi->hdmi_data.enc_color_depth == 10)
++ color_format = 0x14;
++ else if (hdmi->hdmi_data.enc_color_depth == 12)
++ color_format = 0x12;
++ else
++ return;
++ }
++
++ val = HDMI_TX_INVID0_INTERNAL_DE_GENERATOR_DISABLE |
++ ((color_format << HDMI_TX_INVID0_VIDEO_MAPPING_OFFSET) &
++ HDMI_TX_INVID0_VIDEO_MAPPING_MASK);
++ hdmi_writeb(val, HDMI_TX_INVID0);
++
++ /* Enable TX stuffing: When DE is inactive, fix the output data to 0 */
++ val = HDMI_TX_INSTUFFING_BDBDATA_STUFFING_ENABLE |
++ HDMI_TX_INSTUFFING_RCRDATA_STUFFING_ENABLE |
++ HDMI_TX_INSTUFFING_GYDATA_STUFFING_ENABLE;
++ hdmi_writeb(val, HDMI_TX_INSTUFFING);
++ hdmi_writeb(0x0, HDMI_TX_GYDATA0);
++ hdmi_writeb(0x0, HDMI_TX_GYDATA1);
++ hdmi_writeb(0x0, HDMI_TX_RCRDATA0);
++ hdmi_writeb(0x0, HDMI_TX_RCRDATA1);
++ hdmi_writeb(0x0, HDMI_TX_BCBDATA0);
++ hdmi_writeb(0x0, HDMI_TX_BCBDATA1);
++}
++
++static int isColorSpaceConversion(struct mxc_hdmi *hdmi)
++{
++ return (hdmi->hdmi_data.enc_in_format != hdmi->hdmi_data.enc_out_format) ||
++ (hdmi->hdmi_data.enc_out_format == RGB &&
++ ((hdmi->hdmi_data.rgb_quant_range == HDMI_FC_AVICONF2_RGB_QUANT_LIMITED_RANGE) ||
++ (hdmi->hdmi_data.rgb_quant_range == HDMI_FC_AVICONF2_RGB_QUANT_DEFAULT && hdmi->vic > 1)));
++}
++
++static int isColorSpaceDecimation(struct mxc_hdmi *hdmi)
++{
++ return ((hdmi->hdmi_data.enc_out_format == YCBCR422_8BITS) &&
++ (hdmi->hdmi_data.enc_in_format == RGB ||
++ hdmi->hdmi_data.enc_in_format == YCBCR444));
++}
++
++static int isColorSpaceInterpolation(struct mxc_hdmi *hdmi)
++{
++ return ((hdmi->hdmi_data.enc_in_format == YCBCR422_8BITS) &&
++ (hdmi->hdmi_data.enc_out_format == RGB
++ || hdmi->hdmi_data.enc_out_format == YCBCR444));
++}
++
++/*!
++ * update the color space conversion coefficients.
++ */
++static void update_csc_coeffs(struct mxc_hdmi *hdmi)
++{
++ unsigned short csc_coeff[3][4];
++ unsigned int csc_scale = 1;
++ u8 val;
++ bool coeff_selected = false;
++
++ if (isColorSpaceConversion(hdmi)) { /* csc needed */
++ if (hdmi->hdmi_data.enc_out_format == RGB) {
++ if (hdmi->hdmi_data.enc_in_format == RGB) {
++ csc_coeff[0][0] = 0x1b80;
++ csc_coeff[0][1] = 0x0000;
++ csc_coeff[0][2] = 0x0000;
++ csc_coeff[0][3] = 0x0020;
++
++ csc_coeff[1][0] = 0x0000;
++ csc_coeff[1][1] = 0x1b80;
++ csc_coeff[1][2] = 0x0000;
++ csc_coeff[1][3] = 0x0020;
++
++ csc_coeff[2][0] = 0x0000;
++ csc_coeff[2][1] = 0x0000;
++ csc_coeff[2][2] = 0x1b80;
++ csc_coeff[2][3] = 0x0020;
++
++ csc_scale = 1;
++ coeff_selected = true;
++ } else if (hdmi->hdmi_data.colorimetry == eITU601) {
++ csc_coeff[0][0] = 0x2000;
++ csc_coeff[0][1] = 0x6926;
++ csc_coeff[0][2] = 0x74fd;
++ csc_coeff[0][3] = 0x010e;
++
++ csc_coeff[1][0] = 0x2000;
++ csc_coeff[1][1] = 0x2cdd;
++ csc_coeff[1][2] = 0x0000;
++ csc_coeff[1][3] = 0x7e9a;
++
++ csc_coeff[2][0] = 0x2000;
++ csc_coeff[2][1] = 0x0000;
++ csc_coeff[2][2] = 0x38b4;
++ csc_coeff[2][3] = 0x7e3b;
++
++ csc_scale = 1;
++ coeff_selected = true;
++ } else if (hdmi->hdmi_data.colorimetry == eITU709) {
++ csc_coeff[0][0] = 0x2000;
++ csc_coeff[0][1] = 0x7106;
++ csc_coeff[0][2] = 0x7a02;
++ csc_coeff[0][3] = 0x00a7;
++
++ csc_coeff[1][0] = 0x2000;
++ csc_coeff[1][1] = 0x3264;
++ csc_coeff[1][2] = 0x0000;
++ csc_coeff[1][3] = 0x7e6d;
++
++ csc_coeff[2][0] = 0x2000;
++ csc_coeff[2][1] = 0x0000;
++ csc_coeff[2][2] = 0x3b61;
++ csc_coeff[2][3] = 0x7e25;
++
++ csc_scale = 1;
++ coeff_selected = true;
++ }
++ } else if (hdmi->hdmi_data.enc_in_format == RGB) {
++ if (hdmi->hdmi_data.colorimetry == eITU601) {
++ csc_coeff[0][0] = 0x2591;
++ csc_coeff[0][1] = 0x1322;
++ csc_coeff[0][2] = 0x074b;
++ csc_coeff[0][3] = 0x0000;
++
++ csc_coeff[1][0] = 0x6535;
++ csc_coeff[1][1] = 0x2000;
++ csc_coeff[1][2] = 0x7acc;
++ csc_coeff[1][3] = 0x0200;
++
++ csc_coeff[2][0] = 0x6acd;
++ csc_coeff[2][1] = 0x7534;
++ csc_coeff[2][2] = 0x2000;
++ csc_coeff[2][3] = 0x0200;
++
++ csc_scale = 0;
++ coeff_selected = true;
++ } else if (hdmi->hdmi_data.colorimetry == eITU709) {
++ csc_coeff[0][0] = 0x2dc5;
++ csc_coeff[0][1] = 0x0d9b;
++ csc_coeff[0][2] = 0x049e;
++ csc_coeff[0][3] = 0x0000;
++
++ csc_coeff[1][0] = 0x62f0;
++ csc_coeff[1][1] = 0x2000;
++ csc_coeff[1][2] = 0x7d11;
++ csc_coeff[1][3] = 0x0200;
++
++ csc_coeff[2][0] = 0x6756;
++ csc_coeff[2][1] = 0x78ab;
++ csc_coeff[2][2] = 0x2000;
++ csc_coeff[2][3] = 0x0200;
++
++ csc_scale = 0;
++ coeff_selected = true;
++ }
++ }
++ }
++
++ if (!coeff_selected) {
++ csc_coeff[0][0] = 0x2000;
++ csc_coeff[0][1] = 0x0000;
++ csc_coeff[0][2] = 0x0000;
++ csc_coeff[0][3] = 0x0000;
++
++ csc_coeff[1][0] = 0x0000;
++ csc_coeff[1][1] = 0x2000;
++ csc_coeff[1][2] = 0x0000;
++ csc_coeff[1][3] = 0x0000;
++
++ csc_coeff[2][0] = 0x0000;
++ csc_coeff[2][1] = 0x0000;
++ csc_coeff[2][2] = 0x2000;
++ csc_coeff[2][3] = 0x0000;
++
++ csc_scale = 1;
++ }
++
++ /* Update CSC parameters in HDMI CSC registers */
++ hdmi_writeb((unsigned char)(csc_coeff[0][0] & 0xFF),
++ HDMI_CSC_COEF_A1_LSB);
++ hdmi_writeb((unsigned char)(csc_coeff[0][0] >> 8),
++ HDMI_CSC_COEF_A1_MSB);
++ hdmi_writeb((unsigned char)(csc_coeff[0][1] & 0xFF),
++ HDMI_CSC_COEF_A2_LSB);
++ hdmi_writeb((unsigned char)(csc_coeff[0][1] >> 8),
++ HDMI_CSC_COEF_A2_MSB);
++ hdmi_writeb((unsigned char)(csc_coeff[0][2] & 0xFF),
++ HDMI_CSC_COEF_A3_LSB);
++ hdmi_writeb((unsigned char)(csc_coeff[0][2] >> 8),
++ HDMI_CSC_COEF_A3_MSB);
++ hdmi_writeb((unsigned char)(csc_coeff[0][3] & 0xFF),
++ HDMI_CSC_COEF_A4_LSB);
++ hdmi_writeb((unsigned char)(csc_coeff[0][3] >> 8),
++ HDMI_CSC_COEF_A4_MSB);
++
++ hdmi_writeb((unsigned char)(csc_coeff[1][0] & 0xFF),
++ HDMI_CSC_COEF_B1_LSB);
++ hdmi_writeb((unsigned char)(csc_coeff[1][0] >> 8),
++ HDMI_CSC_COEF_B1_MSB);
++ hdmi_writeb((unsigned char)(csc_coeff[1][1] & 0xFF),
++ HDMI_CSC_COEF_B2_LSB);
++ hdmi_writeb((unsigned char)(csc_coeff[1][1] >> 8),
++ HDMI_CSC_COEF_B2_MSB);
++ hdmi_writeb((unsigned char)(csc_coeff[1][2] & 0xFF),
++ HDMI_CSC_COEF_B3_LSB);
++ hdmi_writeb((unsigned char)(csc_coeff[1][2] >> 8),
++ HDMI_CSC_COEF_B3_MSB);
++ hdmi_writeb((unsigned char)(csc_coeff[1][3] & 0xFF),
++ HDMI_CSC_COEF_B4_LSB);
++ hdmi_writeb((unsigned char)(csc_coeff[1][3] >> 8),
++ HDMI_CSC_COEF_B4_MSB);
++
++ hdmi_writeb((unsigned char)(csc_coeff[2][0] & 0xFF),
++ HDMI_CSC_COEF_C1_LSB);
++ hdmi_writeb((unsigned char)(csc_coeff[2][0] >> 8),
++ HDMI_CSC_COEF_C1_MSB);
++ hdmi_writeb((unsigned char)(csc_coeff[2][1] & 0xFF),
++ HDMI_CSC_COEF_C2_LSB);
++ hdmi_writeb((unsigned char)(csc_coeff[2][1] >> 8),
++ HDMI_CSC_COEF_C2_MSB);
++ hdmi_writeb((unsigned char)(csc_coeff[2][2] & 0xFF),
++ HDMI_CSC_COEF_C3_LSB);
++ hdmi_writeb((unsigned char)(csc_coeff[2][2] >> 8),
++ HDMI_CSC_COEF_C3_MSB);
++ hdmi_writeb((unsigned char)(csc_coeff[2][3] & 0xFF),
++ HDMI_CSC_COEF_C4_LSB);
++ hdmi_writeb((unsigned char)(csc_coeff[2][3] >> 8),
++ HDMI_CSC_COEF_C4_MSB);
++
++ val = hdmi_readb(HDMI_CSC_SCALE);
++ val &= ~HDMI_CSC_SCALE_CSCSCALE_MASK;
++ val |= csc_scale & HDMI_CSC_SCALE_CSCSCALE_MASK;
++ hdmi_writeb(val, HDMI_CSC_SCALE);
++}
++
++static void hdmi_video_csc(struct mxc_hdmi *hdmi)
++{
++ int color_depth = 0;
++ int interpolation = HDMI_CSC_CFG_INTMODE_DISABLE;
++ int decimation = HDMI_CSC_CFG_DECMODE_DISABLE;
++ u8 val;
++
++ /* YCC422 interpolation to 444 mode */
++ if (isColorSpaceInterpolation(hdmi))
++ interpolation = HDMI_CSC_CFG_INTMODE_CHROMA_INT_FORMULA1;
++ else if (isColorSpaceDecimation(hdmi))
++ decimation = HDMI_CSC_CFG_DECMODE_CHROMA_INT_FORMULA3;
++
++ if (hdmi->hdmi_data.enc_color_depth == 8)
++ color_depth = HDMI_CSC_SCALE_CSC_COLORDE_PTH_24BPP;
++ else if (hdmi->hdmi_data.enc_color_depth == 10)
++ color_depth = HDMI_CSC_SCALE_CSC_COLORDE_PTH_30BPP;
++ else if (hdmi->hdmi_data.enc_color_depth == 12)
++ color_depth = HDMI_CSC_SCALE_CSC_COLORDE_PTH_36BPP;
++ else if (hdmi->hdmi_data.enc_color_depth == 16)
++ color_depth = HDMI_CSC_SCALE_CSC_COLORDE_PTH_48BPP;
++ else
++ return;
++
++ /*configure the CSC registers */
++ hdmi_writeb(interpolation | decimation, HDMI_CSC_CFG);
++ val = hdmi_readb(HDMI_CSC_SCALE);
++ val &= ~HDMI_CSC_SCALE_CSC_COLORDE_PTH_MASK;
++ val |= color_depth;
++ hdmi_writeb(val, HDMI_CSC_SCALE);
++
++ update_csc_coeffs(hdmi);
++}
++
++/*!
++ * HDMI video packetizer is used to packetize the data.
++ * for example, if input is YCC422 mode or repeater is used,
++ * data should be repacked this module can be bypassed.
++ */
++static void hdmi_video_packetize(struct mxc_hdmi *hdmi)
++{
++ unsigned int color_depth = 0;
++ unsigned int remap_size = HDMI_VP_REMAP_YCC422_16bit;
++ unsigned int output_select = HDMI_VP_CONF_OUTPUT_SELECTOR_PP;
++ struct hdmi_data_info *hdmi_data = &hdmi->hdmi_data;
++ u8 val;
++
++ if (hdmi_data->enc_out_format == RGB
++ || hdmi_data->enc_out_format == YCBCR444) {
++ if (hdmi_data->enc_color_depth == 0)
++ output_select = HDMI_VP_CONF_OUTPUT_SELECTOR_BYPASS;
++ else if (hdmi_data->enc_color_depth == 8) {
++ color_depth = 4;
++ output_select = HDMI_VP_CONF_OUTPUT_SELECTOR_BYPASS;
++ } else if (hdmi_data->enc_color_depth == 10)
++ color_depth = 5;
++ else if (hdmi_data->enc_color_depth == 12)
++ color_depth = 6;
++ else if (hdmi_data->enc_color_depth == 16)
++ color_depth = 7;
++ else
++ return;
++ } else if (hdmi_data->enc_out_format == YCBCR422_8BITS) {
++ if (hdmi_data->enc_color_depth == 0 ||
++ hdmi_data->enc_color_depth == 8)
++ remap_size = HDMI_VP_REMAP_YCC422_16bit;
++ else if (hdmi_data->enc_color_depth == 10)
++ remap_size = HDMI_VP_REMAP_YCC422_20bit;
++ else if (hdmi_data->enc_color_depth == 12)
++ remap_size = HDMI_VP_REMAP_YCC422_24bit;
++ else
++ return;
++ output_select = HDMI_VP_CONF_OUTPUT_SELECTOR_YCC422;
++ } else
++ return;
++
++ /* HDMI not support deep color,
++ * because IPU MAX support color depth is 24bit */
++ color_depth = 0;
++
++ /* set the packetizer registers */
++ val = ((color_depth << HDMI_VP_PR_CD_COLOR_DEPTH_OFFSET) &
++ HDMI_VP_PR_CD_COLOR_DEPTH_MASK) |
++ ((hdmi_data->pix_repet_factor <<
++ HDMI_VP_PR_CD_DESIRED_PR_FACTOR_OFFSET) &
++ HDMI_VP_PR_CD_DESIRED_PR_FACTOR_MASK);
++ hdmi_writeb(val, HDMI_VP_PR_CD);
++
++ val = hdmi_readb(HDMI_VP_STUFF);
++ val &= ~HDMI_VP_STUFF_PR_STUFFING_MASK;
++ val |= HDMI_VP_STUFF_PR_STUFFING_STUFFING_MODE;
++ hdmi_writeb(val, HDMI_VP_STUFF);
++
++ /* Data from pixel repeater block */
++ if (hdmi_data->pix_repet_factor > 1) {
++ val = hdmi_readb(HDMI_VP_CONF);
++ val &= ~(HDMI_VP_CONF_PR_EN_MASK |
++ HDMI_VP_CONF_BYPASS_SELECT_MASK);
++ val |= HDMI_VP_CONF_PR_EN_ENABLE |
++ HDMI_VP_CONF_BYPASS_SELECT_PIX_REPEATER;
++ hdmi_writeb(val, HDMI_VP_CONF);
++ } else { /* data from packetizer block */
++ val = hdmi_readb(HDMI_VP_CONF);
++ val &= ~(HDMI_VP_CONF_PR_EN_MASK |
++ HDMI_VP_CONF_BYPASS_SELECT_MASK);
++ val |= HDMI_VP_CONF_PR_EN_DISABLE |
++ HDMI_VP_CONF_BYPASS_SELECT_VID_PACKETIZER;
++ hdmi_writeb(val, HDMI_VP_CONF);
++ }
++
++ val = hdmi_readb(HDMI_VP_STUFF);
++ val &= ~HDMI_VP_STUFF_IDEFAULT_PHASE_MASK;
++ val |= 1 << HDMI_VP_STUFF_IDEFAULT_PHASE_OFFSET;
++ hdmi_writeb(val, HDMI_VP_STUFF);
++
++ hdmi_writeb(remap_size, HDMI_VP_REMAP);
++
++ if (output_select == HDMI_VP_CONF_OUTPUT_SELECTOR_PP) {
++ val = hdmi_readb(HDMI_VP_CONF);
++ val &= ~(HDMI_VP_CONF_BYPASS_EN_MASK |
++ HDMI_VP_CONF_PP_EN_ENMASK |
++ HDMI_VP_CONF_YCC422_EN_MASK);
++ val |= HDMI_VP_CONF_BYPASS_EN_DISABLE |
++ HDMI_VP_CONF_PP_EN_ENABLE |
++ HDMI_VP_CONF_YCC422_EN_DISABLE;
++ hdmi_writeb(val, HDMI_VP_CONF);
++ } else if (output_select == HDMI_VP_CONF_OUTPUT_SELECTOR_YCC422) {
++ val = hdmi_readb(HDMI_VP_CONF);
++ val &= ~(HDMI_VP_CONF_BYPASS_EN_MASK |
++ HDMI_VP_CONF_PP_EN_ENMASK |
++ HDMI_VP_CONF_YCC422_EN_MASK);
++ val |= HDMI_VP_CONF_BYPASS_EN_DISABLE |
++ HDMI_VP_CONF_PP_EN_DISABLE |
++ HDMI_VP_CONF_YCC422_EN_ENABLE;
++ hdmi_writeb(val, HDMI_VP_CONF);
++ } else if (output_select == HDMI_VP_CONF_OUTPUT_SELECTOR_BYPASS) {
++ val = hdmi_readb(HDMI_VP_CONF);
++ val &= ~(HDMI_VP_CONF_BYPASS_EN_MASK |
++ HDMI_VP_CONF_PP_EN_ENMASK |
++ HDMI_VP_CONF_YCC422_EN_MASK);
++ val |= HDMI_VP_CONF_BYPASS_EN_ENABLE |
++ HDMI_VP_CONF_PP_EN_DISABLE |
++ HDMI_VP_CONF_YCC422_EN_DISABLE;
++ hdmi_writeb(val, HDMI_VP_CONF);
++ } else {
++ return;
++ }
++
++ val = hdmi_readb(HDMI_VP_STUFF);
++ val &= ~(HDMI_VP_STUFF_PP_STUFFING_MASK |
++ HDMI_VP_STUFF_YCC422_STUFFING_MASK);
++ val |= HDMI_VP_STUFF_PP_STUFFING_STUFFING_MODE |
++ HDMI_VP_STUFF_YCC422_STUFFING_STUFFING_MODE;
++ hdmi_writeb(val, HDMI_VP_STUFF);
++
++ val = hdmi_readb(HDMI_VP_CONF);
++ val &= ~HDMI_VP_CONF_OUTPUT_SELECTOR_MASK;
++ val |= output_select;
++ hdmi_writeb(val, HDMI_VP_CONF);
++}
++
++#if 0
++/* Force a fixed color screen */
++static void hdmi_video_force_output(struct mxc_hdmi *hdmi, unsigned char force)
++{
++ u8 val;
++
++ dev_dbg(&hdmi->pdev->dev, "%s\n", __func__);
++
++ if (force) {
++ hdmi_writeb(0x00, HDMI_FC_DBGTMDS2); /* R */
++ hdmi_writeb(0x00, HDMI_FC_DBGTMDS1); /* G */
++ hdmi_writeb(0xFF, HDMI_FC_DBGTMDS0); /* B */
++ val = hdmi_readb(HDMI_FC_DBGFORCE);
++ val |= HDMI_FC_DBGFORCE_FORCEVIDEO;
++ hdmi_writeb(val, HDMI_FC_DBGFORCE);
++ } else {
++ val = hdmi_readb(HDMI_FC_DBGFORCE);
++ val &= ~HDMI_FC_DBGFORCE_FORCEVIDEO;
++ hdmi_writeb(val, HDMI_FC_DBGFORCE);
++ hdmi_writeb(0x00, HDMI_FC_DBGTMDS2); /* R */
++ hdmi_writeb(0x00, HDMI_FC_DBGTMDS1); /* G */
++ hdmi_writeb(0x00, HDMI_FC_DBGTMDS0); /* B */
++ }
++}
++#endif
++
++static inline void hdmi_phy_test_clear(struct mxc_hdmi *hdmi,
++ unsigned char bit)
++{
++ u8 val = hdmi_readb(HDMI_PHY_TST0);
++ val &= ~HDMI_PHY_TST0_TSTCLR_MASK;
++ val |= (bit << HDMI_PHY_TST0_TSTCLR_OFFSET) &
++ HDMI_PHY_TST0_TSTCLR_MASK;
++ hdmi_writeb(val, HDMI_PHY_TST0);
++}
++
++static inline void hdmi_phy_test_enable(struct mxc_hdmi *hdmi,
++ unsigned char bit)
++{
++ u8 val = hdmi_readb(HDMI_PHY_TST0);
++ val &= ~HDMI_PHY_TST0_TSTEN_MASK;
++ val |= (bit << HDMI_PHY_TST0_TSTEN_OFFSET) &
++ HDMI_PHY_TST0_TSTEN_MASK;
++ hdmi_writeb(val, HDMI_PHY_TST0);
++}
++
++static inline void hdmi_phy_test_clock(struct mxc_hdmi *hdmi,
++ unsigned char bit)
++{
++ u8 val = hdmi_readb(HDMI_PHY_TST0);
++ val &= ~HDMI_PHY_TST0_TSTCLK_MASK;
++ val |= (bit << HDMI_PHY_TST0_TSTCLK_OFFSET) &
++ HDMI_PHY_TST0_TSTCLK_MASK;
++ hdmi_writeb(val, HDMI_PHY_TST0);
++}
++
++static inline void hdmi_phy_test_din(struct mxc_hdmi *hdmi,
++ unsigned char bit)
++{
++ hdmi_writeb(bit, HDMI_PHY_TST1);
++}
++
++static inline void hdmi_phy_test_dout(struct mxc_hdmi *hdmi,
++ unsigned char bit)
++{
++ hdmi_writeb(bit, HDMI_PHY_TST2);
++}
++
++static bool hdmi_phy_wait_i2c_done(struct mxc_hdmi *hdmi, int msec)
++{
++ unsigned char val = 0;
++ val = hdmi_readb(HDMI_IH_I2CMPHY_STAT0) & 0x3;
++ while (val == 0) {
++ udelay(1000);
++ if (msec-- == 0)
++ return false;
++ val = hdmi_readb(HDMI_IH_I2CMPHY_STAT0) & 0x3;
++ }
++ return true;
++}
++
++static void hdmi_phy_i2c_write(struct mxc_hdmi *hdmi, unsigned short data,
++ unsigned char addr)
++{
++ hdmi_writeb(0xFF, HDMI_IH_I2CMPHY_STAT0);
++ hdmi_writeb(addr, HDMI_PHY_I2CM_ADDRESS_ADDR);
++ hdmi_writeb((unsigned char)(data >> 8),
++ HDMI_PHY_I2CM_DATAO_1_ADDR);
++ hdmi_writeb((unsigned char)(data >> 0),
++ HDMI_PHY_I2CM_DATAO_0_ADDR);
++ hdmi_writeb(HDMI_PHY_I2CM_OPERATION_ADDR_WRITE,
++ HDMI_PHY_I2CM_OPERATION_ADDR);
++ hdmi_phy_wait_i2c_done(hdmi, 1000);
++}
++
++#if 0
++static unsigned short hdmi_phy_i2c_read(struct mxc_hdmi *hdmi,
++ unsigned char addr)
++{
++ unsigned short data;
++ unsigned char msb = 0, lsb = 0;
++ hdmi_writeb(0xFF, HDMI_IH_I2CMPHY_STAT0);
++ hdmi_writeb(addr, HDMI_PHY_I2CM_ADDRESS_ADDR);
++ hdmi_writeb(HDMI_PHY_I2CM_OPERATION_ADDR_READ,
++ HDMI_PHY_I2CM_OPERATION_ADDR);
++ hdmi_phy_wait_i2c_done(hdmi, 1000);
++ msb = hdmi_readb(HDMI_PHY_I2CM_DATAI_1_ADDR);
++ lsb = hdmi_readb(HDMI_PHY_I2CM_DATAI_0_ADDR);
++ data = (msb << 8) | lsb;
++ return data;
++}
++
++static int hdmi_phy_i2c_write_verify(struct mxc_hdmi *hdmi, unsigned short data,
++ unsigned char addr)
++{
++ unsigned short val = 0;
++ hdmi_phy_i2c_write(hdmi, data, addr);
++ val = hdmi_phy_i2c_read(hdmi, addr);
++ return (val == data);
++}
++#endif
++
++static bool hdmi_edid_wait_i2c_done(struct mxc_hdmi *hdmi, int msec)
++{
++ unsigned char val = 0;
++ val = hdmi_readb(HDMI_IH_I2CM_STAT0) & 0x2;
++ while (val == 0) {
++
++ udelay(1000);
++ if (msec-- == 0) {
++ dev_dbg(&hdmi->pdev->dev,
++ "HDMI EDID i2c operation time out!!\n");
++ return false;
++ }
++ val = hdmi_readb(HDMI_IH_I2CM_STAT0) & 0x2;
++ }
++ return true;
++}
++
++static u8 hdmi_edid_i2c_read(struct mxc_hdmi *hdmi,
++ u8 addr, u8 blockno)
++{
++ u8 spointer = blockno / 2;
++ u8 edidaddress = ((blockno % 2) * 0x80) + addr;
++ u8 data;
++
++ hdmi_writeb(0xFF, HDMI_IH_I2CM_STAT0);
++ hdmi_writeb(edidaddress, HDMI_I2CM_ADDRESS);
++ hdmi_writeb(spointer, HDMI_I2CM_SEGADDR);
++ if (spointer == 0)
++ hdmi_writeb(HDMI_I2CM_OPERATION_READ,
++ HDMI_I2CM_OPERATION);
++ else
++ hdmi_writeb(HDMI_I2CM_OPERATION_READ_EXT,
++ HDMI_I2CM_OPERATION);
++
++ hdmi_edid_wait_i2c_done(hdmi, 1000);
++ data = hdmi_readb(HDMI_I2CM_DATAI);
++ hdmi_writeb(0xFF, HDMI_IH_I2CM_STAT0);
++ return data;
++}
++
++
++/* "Power-down enable (active low)"
++ * That mean that power up == 1! */
++static void mxc_hdmi_phy_enable_power(u8 enable)
++{
++ hdmi_mask_writeb(enable, HDMI_PHY_CONF0,
++ HDMI_PHY_CONF0_PDZ_OFFSET,
++ HDMI_PHY_CONF0_PDZ_MASK);
++}
++
++static void mxc_hdmi_phy_enable_tmds(u8 enable)
++{
++ hdmi_mask_writeb(enable, HDMI_PHY_CONF0,
++ HDMI_PHY_CONF0_ENTMDS_OFFSET,
++ HDMI_PHY_CONF0_ENTMDS_MASK);
++}
++
++static void mxc_hdmi_phy_gen2_pddq(u8 enable)
++{
++ hdmi_mask_writeb(enable, HDMI_PHY_CONF0,
++ HDMI_PHY_CONF0_GEN2_PDDQ_OFFSET,
++ HDMI_PHY_CONF0_GEN2_PDDQ_MASK);
++}
++
++static void mxc_hdmi_phy_gen2_txpwron(u8 enable)
++{
++ hdmi_mask_writeb(enable, HDMI_PHY_CONF0,
++ HDMI_PHY_CONF0_GEN2_TXPWRON_OFFSET,
++ HDMI_PHY_CONF0_GEN2_TXPWRON_MASK);
++}
++
++#if 0
++static void mxc_hdmi_phy_gen2_enhpdrxsense(u8 enable)
++{
++ hdmi_mask_writeb(enable, HDMI_PHY_CONF0,
++ HDMI_PHY_CONF0_GEN2_ENHPDRXSENSE_OFFSET,
++ HDMI_PHY_CONF0_GEN2_ENHPDRXSENSE_MASK);
++}
++#endif
++
++static void mxc_hdmi_phy_sel_data_en_pol(u8 enable)
++{
++ hdmi_mask_writeb(enable, HDMI_PHY_CONF0,
++ HDMI_PHY_CONF0_SELDATAENPOL_OFFSET,
++ HDMI_PHY_CONF0_SELDATAENPOL_MASK);
++}
++
++static void mxc_hdmi_phy_sel_interface_control(u8 enable)
++{
++ hdmi_mask_writeb(enable, HDMI_PHY_CONF0,
++ HDMI_PHY_CONF0_SELDIPIF_OFFSET,
++ HDMI_PHY_CONF0_SELDIPIF_MASK);
++}
++
++static int hdmi_phy_configure(struct mxc_hdmi *hdmi, unsigned char pRep,
++ unsigned char cRes, int cscOn)
++{
++ u8 val;
++ u8 msec;
++
++ dev_dbg(&hdmi->pdev->dev, "%s\n", __func__);
++
++ /* color resolution 0 is 8 bit colour depth */
++ if (cRes == 0)
++ cRes = 8;
++
++ if (pRep != 0)
++ return false;
++ else if (cRes != 8 && cRes != 12)
++ return false;
++
++ /* Enable csc path */
++ if (cscOn)
++ val = HDMI_MC_FLOWCTRL_FEED_THROUGH_OFF_CSC_IN_PATH;
++ else
++ val = HDMI_MC_FLOWCTRL_FEED_THROUGH_OFF_CSC_BYPASS;
++
++ hdmi_writeb(val, HDMI_MC_FLOWCTRL);
++
++ /* gen2 tx power off */
++ mxc_hdmi_phy_gen2_txpwron(0);
++
++ /* gen2 pddq */
++ mxc_hdmi_phy_gen2_pddq(1);
++
++ /* PHY reset */
++ hdmi_writeb(HDMI_MC_PHYRSTZ_DEASSERT, HDMI_MC_PHYRSTZ);
++ hdmi_writeb(HDMI_MC_PHYRSTZ_ASSERT, HDMI_MC_PHYRSTZ);
++
++ hdmi_writeb(HDMI_MC_HEACPHY_RST_ASSERT, HDMI_MC_HEACPHY_RST);
++
++ hdmi_phy_test_clear(hdmi, 1);
++ hdmi_writeb(HDMI_PHY_I2CM_SLAVE_ADDR_PHY_GEN2,
++ HDMI_PHY_I2CM_SLAVE_ADDR);
++ hdmi_phy_test_clear(hdmi, 0);
++
++ if (hdmi->hdmi_data.video_mode.mPixelClock < 0) {
++ dev_dbg(&hdmi->pdev->dev, "Pixel clock (%d) must be positive\n",
++ hdmi->hdmi_data.video_mode.mPixelClock);
++ return false;
++ }
++
++ if (hdmi->hdmi_data.video_mode.mPixelClock <= 45250000) {
++ switch (cRes) {
++ case 8:
++ /* PLL/MPLL Cfg */
++ hdmi_phy_i2c_write(hdmi, 0x01e0, 0x06);
++ hdmi_phy_i2c_write(hdmi, 0x0000, 0x15); /* GMPCTRL */
++ break;
++ case 10:
++ hdmi_phy_i2c_write(hdmi, 0x21e1, 0x06);
++ hdmi_phy_i2c_write(hdmi, 0x0000, 0x15);
++ break;
++ case 12:
++ hdmi_phy_i2c_write(hdmi, 0x41e2, 0x06);
++ hdmi_phy_i2c_write(hdmi, 0x0000, 0x15);
++ break;
++ default:
++ return false;
++ }
++ } else if (hdmi->hdmi_data.video_mode.mPixelClock <= 92500000) {
++ switch (cRes) {
++ case 8:
++ hdmi_phy_i2c_write(hdmi, 0x0140, 0x06);
++ hdmi_phy_i2c_write(hdmi, 0x0005, 0x15);
++ break;
++ case 10:
++ hdmi_phy_i2c_write(hdmi, 0x2141, 0x06);
++ hdmi_phy_i2c_write(hdmi, 0x0005, 0x15);
++ break;
++ case 12:
++ hdmi_phy_i2c_write(hdmi, 0x4142, 0x06);
++ hdmi_phy_i2c_write(hdmi, 0x0005, 0x15);
++ default:
++ return false;
++ }
++ } else if (hdmi->hdmi_data.video_mode.mPixelClock <= 148500000) {
++ switch (cRes) {
++ case 8:
++ hdmi_phy_i2c_write(hdmi, 0x00a0, 0x06);
++ hdmi_phy_i2c_write(hdmi, 0x000a, 0x15);
++ break;
++ case 10:
++ hdmi_phy_i2c_write(hdmi, 0x20a1, 0x06);
++ hdmi_phy_i2c_write(hdmi, 0x000a, 0x15);
++ break;
++ case 12:
++ hdmi_phy_i2c_write(hdmi, 0x40a2, 0x06);
++ hdmi_phy_i2c_write(hdmi, 0x000a, 0x15);
++ default:
++ return false;
++ }
++ } else {
++ switch (cRes) {
++ case 8:
++ hdmi_phy_i2c_write(hdmi, 0x00a0, 0x06);
++ hdmi_phy_i2c_write(hdmi, 0x000a, 0x15);
++ break;
++ case 10:
++ hdmi_phy_i2c_write(hdmi, 0x2001, 0x06);
++ hdmi_phy_i2c_write(hdmi, 0x000f, 0x15);
++ break;
++ case 12:
++ hdmi_phy_i2c_write(hdmi, 0x4002, 0x06);
++ hdmi_phy_i2c_write(hdmi, 0x000f, 0x15);
++ default:
++ return false;
++ }
++ }
++
++ if (hdmi->hdmi_data.video_mode.mPixelClock <= 54000000) {
++ switch (cRes) {
++ case 8:
++ hdmi_phy_i2c_write(hdmi, 0x091c, 0x10); /* CURRCTRL */
++ break;
++ case 10:
++ hdmi_phy_i2c_write(hdmi, 0x091c, 0x10);
++ break;
++ case 12:
++ hdmi_phy_i2c_write(hdmi, 0x06dc, 0x10);
++ break;
++ default:
++ return false;
++ }
++ } else if (hdmi->hdmi_data.video_mode.mPixelClock <= 58400000) {
++ switch (cRes) {
++ case 8:
++ hdmi_phy_i2c_write(hdmi, 0x091c, 0x10);
++ break;
++ case 10:
++ hdmi_phy_i2c_write(hdmi, 0x06dc, 0x10);
++ break;
++ case 12:
++ hdmi_phy_i2c_write(hdmi, 0x06dc, 0x10);
++ break;
++ default:
++ return false;
++ }
++ } else if (hdmi->hdmi_data.video_mode.mPixelClock <= 72000000) {
++ switch (cRes) {
++ case 8:
++ hdmi_phy_i2c_write(hdmi, 0x06dc, 0x10);
++ break;
++ case 10:
++ hdmi_phy_i2c_write(hdmi, 0x06dc, 0x10);
++ break;
++ case 12:
++ hdmi_phy_i2c_write(hdmi, 0x091c, 0x10);
++ break;
++ default:
++ return false;
++ }
++ } else if (hdmi->hdmi_data.video_mode.mPixelClock <= 74250000) {
++ switch (cRes) {
++ case 8:
++ hdmi_phy_i2c_write(hdmi, 0x06dc, 0x10);
++ break;
++ case 10:
++ hdmi_phy_i2c_write(hdmi, 0x0b5c, 0x10);
++ break;
++ case 12:
++ hdmi_phy_i2c_write(hdmi, 0x091c, 0x10);
++ break;
++ default:
++ return false;
++ }
++ } else if (hdmi->hdmi_data.video_mode.mPixelClock <= 118800000) {
++ switch (cRes) {
++ case 8:
++ hdmi_phy_i2c_write(hdmi, 0x091c, 0x10);
++ break;
++ case 10:
++ hdmi_phy_i2c_write(hdmi, 0x091c, 0x10);
++ break;
++ case 12:
++ hdmi_phy_i2c_write(hdmi, 0x06dc, 0x10);
++ break;
++ default:
++ return false;
++ }
++ } else if (hdmi->hdmi_data.video_mode.mPixelClock <= 216000000) {
++ switch (cRes) {
++ case 8:
++ hdmi_phy_i2c_write(hdmi, 0x06dc, 0x10);
++ break;
++ case 10:
++ hdmi_phy_i2c_write(hdmi, 0x0b5c, 0x10);
++ break;
++ case 12:
++ hdmi_phy_i2c_write(hdmi, 0x091c, 0x10);
++ break;
++ default:
++ return false;
++ }
++ } else {
++ dev_err(&hdmi->pdev->dev,
++ "Pixel clock %d - unsupported by HDMI\n",
++ hdmi->hdmi_data.video_mode.mPixelClock);
++ return false;
++ }
++
++ hdmi_phy_i2c_write(hdmi, 0x0000, 0x13); /* PLLPHBYCTRL */
++ hdmi_phy_i2c_write(hdmi, 0x0006, 0x17);
++ /* RESISTANCE TERM 133Ohm Cfg */
++ hdmi_phy_i2c_write(hdmi, 0x0005, 0x19); /* TXTERM */
++ /* PREEMP Cgf 0.00 */
++ hdmi_phy_i2c_write(hdmi, 0x800d, 0x09); /* CKSYMTXCTRL */
++ /* TX/CK LVL 10 */
++ hdmi_phy_i2c_write(hdmi, 0x01ad, 0x0E); /* VLEVCTRL */
++
++ /* Board specific setting for PHY register 0x09, 0x0e to pass HCT */
++ if (hdmi->phy_config.reg_cksymtx != 0)
++ hdmi_phy_i2c_write(hdmi, hdmi->phy_config.reg_cksymtx, 0x09);
++
++ if (hdmi->phy_config.reg_vlev != 0)
++ hdmi_phy_i2c_write(hdmi, hdmi->phy_config.reg_vlev, 0x0E);
++
++ /* REMOVE CLK TERM */
++ hdmi_phy_i2c_write(hdmi, 0x8000, 0x05); /* CKCALCTRL */
++
++ if (hdmi->hdmi_data.video_mode.mPixelClock > 148500000) {
++ hdmi_phy_i2c_write(hdmi, 0x800b, 0x09);
++ hdmi_phy_i2c_write(hdmi, 0x0129, 0x0E);
++ }
++
++ mxc_hdmi_phy_enable_power(1);
++
++ /* toggle TMDS enable */
++ mxc_hdmi_phy_enable_tmds(0);
++ mxc_hdmi_phy_enable_tmds(1);
++
++ /* gen2 tx power on */
++ mxc_hdmi_phy_gen2_txpwron(1);
++ mxc_hdmi_phy_gen2_pddq(0);
++
++ /*Wait for PHY PLL lock */
++ msec = 4;
++ val = hdmi_readb(HDMI_PHY_STAT0) & HDMI_PHY_TX_PHY_LOCK;
++ while (val == 0) {
++ udelay(1000);
++ if (msec-- == 0) {
++ dev_dbg(&hdmi->pdev->dev, "PHY PLL not locked\n");
++ return false;
++ }
++ val = hdmi_readb(HDMI_PHY_STAT0) & HDMI_PHY_TX_PHY_LOCK;
++ }
++
++ return true;
++}
++
++static void mxc_hdmi_phy_init(struct mxc_hdmi *hdmi)
++{
++ int i;
++ bool cscon = false;
++
++ dev_dbg(&hdmi->pdev->dev, "%s\n", __func__);
++
++ /* Never do phy init if pixel clock is gated.
++ * Otherwise HDMI PHY will get messed up and generate an overflow
++ * interrupt that can't be cleared or detected by accessing the
++ * status register. */
++ if (!hdmi->fb_reg || !hdmi->cable_plugin
++ || (hdmi->blank != FB_BLANK_UNBLANK))
++ return;
++
++ /*check csc whether needed activated in HDMI mode */
++ cscon = (isColorSpaceConversion(hdmi) &&
++ !hdmi->hdmi_data.video_mode.mDVI);
++
++ /* HDMI Phy spec says to do the phy initialization sequence twice */
++ for (i = 0 ; i < 2 ; i++) {
++ mxc_hdmi_phy_sel_data_en_pol(1);
++ mxc_hdmi_phy_sel_interface_control(0);
++ mxc_hdmi_phy_enable_tmds(0);
++ mxc_hdmi_phy_enable_power(0);
++
++ /* Enable CSC */
++ hdmi_phy_configure(hdmi, 0, 8, cscon);
++ }
++
++ hdmi->phy_enabled = true;
++ if (!hdmi->hdmi_data.video_mode.mDVI)
++ hdmi_enable_overflow_interrupts();
++}
++
++static void hdmi_config_AVI(struct mxc_hdmi *hdmi)
++{
++ u8 val;
++ u8 pix_fmt;
++ u8 under_scan;
++ u8 act_ratio, coded_ratio, colorimetry, ext_colorimetry;
++ struct fb_videomode mode;
++ const struct fb_videomode *edid_mode;
++ bool aspect_16_9;
++
++ dev_dbg(&hdmi->pdev->dev, "set up AVI frame\n");
++
++ fb_var_to_videomode(&mode, &hdmi->fbi->var);
++ /* Use mode from list extracted from EDID to get aspect ratio */
++ if (!list_empty(&hdmi->fbi->modelist)) {
++ edid_mode = fb_find_nearest_mode(&mode, &hdmi->fbi->modelist);
++ if (edid_mode->vmode & FB_VMODE_ASPECT_16_9)
++ aspect_16_9 = true;
++ else
++ aspect_16_9 = false;
++ } else
++ aspect_16_9 = false;
++
++ /********************************************
++ * AVI Data Byte 1
++ ********************************************/
++ if (hdmi->hdmi_data.enc_out_format == YCBCR444)
++ pix_fmt = HDMI_FC_AVICONF0_PIX_FMT_YCBCR444;
++ else if (hdmi->hdmi_data.enc_out_format == YCBCR422_8BITS)
++ pix_fmt = HDMI_FC_AVICONF0_PIX_FMT_YCBCR422;
++ else
++ pix_fmt = HDMI_FC_AVICONF0_PIX_FMT_RGB;
++
++ if (hdmi->edid_cfg.cea_underscan)
++ under_scan = HDMI_FC_AVICONF0_SCAN_INFO_UNDERSCAN;
++ else
++ under_scan = HDMI_FC_AVICONF0_SCAN_INFO_NODATA;
++
++ /*
++ * Active format identification data is present in the AVI InfoFrame.
++ * Under scan info, no bar data
++ */
++ val = pix_fmt | under_scan |
++ HDMI_FC_AVICONF0_ACTIVE_FMT_INFO_PRESENT |
++ HDMI_FC_AVICONF0_BAR_DATA_NO_DATA;
++
++ hdmi_writeb(val, HDMI_FC_AVICONF0);
++
++ /********************************************
++ * AVI Data Byte 2
++ ********************************************/
++
++ /* Set the Aspect Ratio */
++ if (aspect_16_9) {
++ act_ratio = HDMI_FC_AVICONF1_ACTIVE_ASPECT_RATIO_16_9;
++ coded_ratio = HDMI_FC_AVICONF1_CODED_ASPECT_RATIO_16_9;
++ } else {
++ act_ratio = HDMI_FC_AVICONF1_ACTIVE_ASPECT_RATIO_4_3;
++ coded_ratio = HDMI_FC_AVICONF1_CODED_ASPECT_RATIO_4_3;
++ }
++
++ /* Set up colorimetry */
++ if (hdmi->hdmi_data.enc_out_format == XVYCC444) {
++ colorimetry = HDMI_FC_AVICONF1_COLORIMETRY_EXTENDED_INFO;
++ if (hdmi->hdmi_data.colorimetry == eITU601)
++ ext_colorimetry =
++ HDMI_FC_AVICONF2_EXT_COLORIMETRY_XVYCC601;
++ else /* hdmi->hdmi_data.colorimetry == eITU709 */
++ ext_colorimetry =
++ HDMI_FC_AVICONF2_EXT_COLORIMETRY_XVYCC709;
++ } else if (hdmi->hdmi_data.enc_out_format != RGB) {
++ if (hdmi->hdmi_data.colorimetry == eITU601)
++ colorimetry = HDMI_FC_AVICONF1_COLORIMETRY_SMPTE;
++ else /* hdmi->hdmi_data.colorimetry == eITU709 */
++ colorimetry = HDMI_FC_AVICONF1_COLORIMETRY_ITUR;
++ ext_colorimetry = HDMI_FC_AVICONF2_EXT_COLORIMETRY_XVYCC601;
++ } else { /* Carries no data */
++ colorimetry = HDMI_FC_AVICONF1_COLORIMETRY_NO_DATA;
++ ext_colorimetry = HDMI_FC_AVICONF2_EXT_COLORIMETRY_XVYCC601;
++ }
++
++ val = colorimetry | coded_ratio | act_ratio;
++ hdmi_writeb(val, HDMI_FC_AVICONF1);
++
++ /********************************************
++ * AVI Data Byte 3
++ ********************************************/
++
++ val = HDMI_FC_AVICONF2_IT_CONTENT_NO_DATA | ext_colorimetry |
++ hdmi->hdmi_data.rgb_quant_range |
++ HDMI_FC_AVICONF2_SCALING_NONE;
++ hdmi_writeb(val, HDMI_FC_AVICONF2);
++
++ /********************************************
++ * AVI Data Byte 4
++ ********************************************/
++ hdmi_writeb(hdmi->vic, HDMI_FC_AVIVID);
++
++ /********************************************
++ * AVI Data Byte 5
++ ********************************************/
++
++ /* Set up input and output pixel repetition */
++ val = (((hdmi->hdmi_data.video_mode.mPixelRepetitionInput + 1) <<
++ HDMI_FC_PRCONF_INCOMING_PR_FACTOR_OFFSET) &
++ HDMI_FC_PRCONF_INCOMING_PR_FACTOR_MASK) |
++ ((hdmi->hdmi_data.video_mode.mPixelRepetitionOutput <<
++ HDMI_FC_PRCONF_OUTPUT_PR_FACTOR_OFFSET) &
++ HDMI_FC_PRCONF_OUTPUT_PR_FACTOR_MASK);
++ hdmi_writeb(val, HDMI_FC_PRCONF);
++
++ /* IT Content and quantization range = don't care */
++ val = HDMI_FC_AVICONF3_IT_CONTENT_TYPE_GRAPHICS |
++ HDMI_FC_AVICONF3_QUANT_RANGE_LIMITED;
++ hdmi_writeb(val, HDMI_FC_AVICONF3);
++
++ /********************************************
++ * AVI Data Bytes 6-13
++ ********************************************/
++ hdmi_writeb(0, HDMI_FC_AVIETB0);
++ hdmi_writeb(0, HDMI_FC_AVIETB1);
++ hdmi_writeb(0, HDMI_FC_AVISBB0);
++ hdmi_writeb(0, HDMI_FC_AVISBB1);
++ hdmi_writeb(0, HDMI_FC_AVIELB0);
++ hdmi_writeb(0, HDMI_FC_AVIELB1);
++ hdmi_writeb(0, HDMI_FC_AVISRB0);
++ hdmi_writeb(0, HDMI_FC_AVISRB1);
++}
++
++/*!
++ * this submodule is responsible for the video/audio data composition.
++ */
++static void hdmi_av_composer(struct mxc_hdmi *hdmi)
++{
++ u8 inv_val;
++ struct fb_info *fbi = hdmi->fbi;
++ struct fb_videomode fb_mode;
++ struct hdmi_vmode *vmode = &hdmi->hdmi_data.video_mode;
++ int hblank, vblank;
++
++ dev_dbg(&hdmi->pdev->dev, "%s\n", __func__);
++
++ fb_var_to_videomode(&fb_mode, &fbi->var);
++
++ vmode->mHSyncPolarity = ((fb_mode.sync & FB_SYNC_HOR_HIGH_ACT) != 0);
++ vmode->mVSyncPolarity = ((fb_mode.sync & FB_SYNC_VERT_HIGH_ACT) != 0);
++ vmode->mInterlaced = ((fb_mode.vmode & FB_VMODE_INTERLACED) != 0);
++ vmode->mPixelClock = (fb_mode.xres + fb_mode.left_margin +
++ fb_mode.right_margin + fb_mode.hsync_len) * (fb_mode.yres +
++ fb_mode.upper_margin + fb_mode.lower_margin +
++ fb_mode.vsync_len) * fb_mode.refresh;
++
++ dev_dbg(&hdmi->pdev->dev, "final pixclk = %d\n", vmode->mPixelClock);
++
++ /* Set up HDMI_FC_INVIDCONF */
++ inv_val = (vmode->mVSyncPolarity ?
++ HDMI_FC_INVIDCONF_VSYNC_IN_POLARITY_ACTIVE_HIGH :
++ HDMI_FC_INVIDCONF_VSYNC_IN_POLARITY_ACTIVE_LOW);
++
++ inv_val |= (vmode->mHSyncPolarity ?
++ HDMI_FC_INVIDCONF_HSYNC_IN_POLARITY_ACTIVE_HIGH :
++ HDMI_FC_INVIDCONF_HSYNC_IN_POLARITY_ACTIVE_LOW);
++
++ inv_val |= (vmode->mDataEnablePolarity ?
++ HDMI_FC_INVIDCONF_DE_IN_POLARITY_ACTIVE_HIGH :
++ HDMI_FC_INVIDCONF_DE_IN_POLARITY_ACTIVE_LOW);
++
++ if (hdmi->vic == 39)
++ inv_val |= HDMI_FC_INVIDCONF_R_V_BLANK_IN_OSC_ACTIVE_HIGH;
++ else
++ inv_val |= (vmode->mInterlaced ?
++ HDMI_FC_INVIDCONF_R_V_BLANK_IN_OSC_ACTIVE_HIGH :
++ HDMI_FC_INVIDCONF_R_V_BLANK_IN_OSC_ACTIVE_LOW);
++
++ inv_val |= (vmode->mInterlaced ?
++ HDMI_FC_INVIDCONF_IN_I_P_INTERLACED :
++ HDMI_FC_INVIDCONF_IN_I_P_PROGRESSIVE);
++
++ inv_val |= (vmode->mDVI ?
++ HDMI_FC_INVIDCONF_DVI_MODEZ_DVI_MODE :
++ HDMI_FC_INVIDCONF_DVI_MODEZ_HDMI_MODE);
++
++ hdmi_writeb(inv_val, HDMI_FC_INVIDCONF);
++
++ /* Set up horizontal active pixel region width */
++ hdmi_writeb(fb_mode.xres >> 8, HDMI_FC_INHACTV1);
++ hdmi_writeb(fb_mode.xres, HDMI_FC_INHACTV0);
++
++ /* Set up vertical blanking pixel region width */
++ hdmi_writeb(fb_mode.yres >> 8, HDMI_FC_INVACTV1);
++ hdmi_writeb(fb_mode.yres, HDMI_FC_INVACTV0);
++
++ /* Set up horizontal blanking pixel region width */
++ hblank = fb_mode.left_margin + fb_mode.right_margin +
++ fb_mode.hsync_len;
++ hdmi_writeb(hblank >> 8, HDMI_FC_INHBLANK1);
++ hdmi_writeb(hblank, HDMI_FC_INHBLANK0);
++
++ /* Set up vertical blanking pixel region width */
++ vblank = fb_mode.upper_margin + fb_mode.lower_margin +
++ fb_mode.vsync_len;
++ hdmi_writeb(vblank, HDMI_FC_INVBLANK);
++
++ /* Set up HSYNC active edge delay width (in pixel clks) */
++ hdmi_writeb(fb_mode.right_margin >> 8, HDMI_FC_HSYNCINDELAY1);
++ hdmi_writeb(fb_mode.right_margin, HDMI_FC_HSYNCINDELAY0);
++
++ /* Set up VSYNC active edge delay (in pixel clks) */
++ hdmi_writeb(fb_mode.lower_margin, HDMI_FC_VSYNCINDELAY);
++
++ /* Set up HSYNC active pulse width (in pixel clks) */
++ hdmi_writeb(fb_mode.hsync_len >> 8, HDMI_FC_HSYNCINWIDTH1);
++ hdmi_writeb(fb_mode.hsync_len, HDMI_FC_HSYNCINWIDTH0);
++
++ /* Set up VSYNC active edge delay (in pixel clks) */
++ hdmi_writeb(fb_mode.vsync_len, HDMI_FC_VSYNCINWIDTH);
++
++ dev_dbg(&hdmi->pdev->dev, "%s exit\n", __func__);
++}
++
++static int mxc_edid_read_internal(struct mxc_hdmi *hdmi, unsigned char *edid,
++ struct mxc_edid_cfg *cfg, struct fb_info *fbi)
++{
++ int extblknum;
++ int i, j, ret;
++ unsigned char *ediddata = edid;
++ unsigned char tmpedid[EDID_LENGTH];
++
++ dev_info(&hdmi->pdev->dev, "%s\n", __func__);
++
++ if (!edid || !cfg || !fbi)
++ return -EINVAL;
++
++ /* init HDMI I2CM for read edid*/
++ hdmi_writeb(0x0, HDMI_I2CM_DIV);
++ hdmi_writeb(0x00, HDMI_I2CM_SS_SCL_HCNT_1_ADDR);
++ hdmi_writeb(0x79, HDMI_I2CM_SS_SCL_HCNT_0_ADDR);
++ hdmi_writeb(0x00, HDMI_I2CM_SS_SCL_LCNT_1_ADDR);
++ hdmi_writeb(0x91, HDMI_I2CM_SS_SCL_LCNT_0_ADDR);
++
++ hdmi_writeb(0x00, HDMI_I2CM_FS_SCL_HCNT_1_ADDR);
++ hdmi_writeb(0x0F, HDMI_I2CM_FS_SCL_HCNT_0_ADDR);
++ hdmi_writeb(0x00, HDMI_I2CM_FS_SCL_LCNT_1_ADDR);
++ hdmi_writeb(0x21, HDMI_I2CM_FS_SCL_LCNT_0_ADDR);
++
++ hdmi_writeb(0x50, HDMI_I2CM_SLAVE);
++ hdmi_writeb(0x30, HDMI_I2CM_SEGADDR);
++
++ /* Umask edid interrupt */
++ hdmi_writeb(HDMI_I2CM_INT_DONE_POL,
++ HDMI_I2CM_INT);
++
++ hdmi_writeb(HDMI_I2CM_CTLINT_NAC_POL |
++ HDMI_I2CM_CTLINT_ARBITRATION_POL,
++ HDMI_I2CM_CTLINT);
++
++ /* reset edid data zero */
++ memset(edid, 0, EDID_LENGTH*4);
++ memset(cfg, 0, sizeof(struct mxc_edid_cfg));
++
++ /* Check first three byte of EDID head */
++ if (!(hdmi_edid_i2c_read(hdmi, 0, 0) == 0x00) ||
++ !(hdmi_edid_i2c_read(hdmi, 1, 0) == 0xFF) ||
++ !(hdmi_edid_i2c_read(hdmi, 2, 0) == 0xFF)) {
++ dev_info(&hdmi->pdev->dev, "EDID head check failed!");
++ return -ENOENT;
++ }
++
++ for (i = 0; i < 128; i++) {
++ *ediddata = hdmi_edid_i2c_read(hdmi, i, 0);
++ ediddata++;
++ }
++
++ extblknum = edid[0x7E];
++ if (extblknum < 0)
++ return extblknum;
++
++ if (extblknum) {
++ ediddata = edid + EDID_LENGTH;
++ for (i = 0; i < 128; i++) {
++ *ediddata = hdmi_edid_i2c_read(hdmi, i, 1);
++ ediddata++;
++ }
++ }
++
++ /* edid first block parsing */
++ memset(&fbi->monspecs, 0, sizeof(fbi->monspecs));
++ fb_edid_to_monspecs(edid, &fbi->monspecs);
++
++ ret = mxc_edid_parse_ext_blk(edid + EDID_LENGTH,
++ cfg, &fbi->monspecs);
++ if (ret < 0) {
++ fb_edid_add_monspecs(edid + EDID_LENGTH, &fbi->monspecs);
++ if (fbi->monspecs.modedb_len > 0)
++ hdmi->edid_cfg.hdmi_cap = false;
++ else
++ return -ENOENT;
++ }
++
++ /* need read segment block? */
++ if (extblknum > 1) {
++ for (j = 1; j <= extblknum; j++) {
++ for (i = 0; i < 128; i++)
++ *(tmpedid + 1) = hdmi_edid_i2c_read(hdmi, i, j);
++
++ /* edid ext block parsing */
++ ret = mxc_edid_parse_ext_blk(tmpedid + EDID_LENGTH,
++ cfg, &fbi->monspecs);
++ if (ret < 0)
++ return -ENOENT;
++ }
++ }
++
++ return 0;
++}
++
++static int mxc_hdmi_read_edid(struct mxc_hdmi *hdmi)
++{
++ int ret;
++ u8 edid_old[HDMI_EDID_LEN];
++ u8 clkdis;
++
++ dev_dbg(&hdmi->pdev->dev, "%s\n", __func__);
++
++ /* save old edid */
++ memcpy(edid_old, hdmi->edid, HDMI_EDID_LEN);
++
++ /* Read EDID via HDMI DDC when HDCP Enable */
++ if (!hdcp_init)
++ ret = mxc_edid_read(hdmi_i2c->adapter, hdmi_i2c->addr,
++ hdmi->edid, &hdmi->edid_cfg, hdmi->fbi);
++ else {
++
++ /* Disable HDCP clk */
++ if (hdmi->hdmi_data.hdcp_enable) {
++ clkdis = hdmi_readb(HDMI_MC_CLKDIS);
++ clkdis |= HDMI_MC_CLKDIS_HDCPCLK_DISABLE;
++ hdmi_writeb(clkdis, HDMI_MC_CLKDIS);
++ }
++
++ ret = mxc_edid_read_internal(hdmi, hdmi->edid,
++ &hdmi->edid_cfg, hdmi->fbi);
++
++ /* Enable HDCP clk */
++ if (hdmi->hdmi_data.hdcp_enable) {
++ clkdis = hdmi_readb(HDMI_MC_CLKDIS);
++ clkdis &= ~HDMI_MC_CLKDIS_HDCPCLK_DISABLE;
++ hdmi_writeb(clkdis, HDMI_MC_CLKDIS);
++ }
++
++ }
++
++ if (ret < 0)
++ return HDMI_EDID_FAIL;
++
++ dev_info(&hdmi->pdev->dev, "%s HDMI in %s mode\n", __func__, hdmi->edid_cfg.hdmi_cap?"HDMI":"DVI");
++ hdmi->plug_event = hdmi->edid_cfg.hdmi_cap?HDMI_IH_PHY_STAT0_HPD:HDMI_DVI_IH_STAT;
++ hdmi->plug_mask = hdmi->edid_cfg.hdmi_cap?HDMI_PHY_HPD:HDMI_DVI_STAT;
++
++ if (!memcmp(edid_old, hdmi->edid, HDMI_EDID_LEN)) {
++ dev_info(&hdmi->pdev->dev, "same edid\n");
++ return HDMI_EDID_SAME;
++ }
++
++ if (hdmi->fbi->monspecs.modedb_len == 0) {
++ dev_info(&hdmi->pdev->dev, "No modes read from edid\n");
++ return HDMI_EDID_NO_MODES;
++ }
++
++ return HDMI_EDID_SUCCESS;
++}
++
++static void mxc_hdmi_phy_disable(struct mxc_hdmi *hdmi)
++{
++ dev_dbg(&hdmi->pdev->dev, "%s\n", __func__);
++
++ if (!hdmi->phy_enabled)
++ return;
++
++ hdmi_disable_overflow_interrupts();
++
++ /* Setting PHY to reset status */
++ hdmi_writeb(HDMI_MC_PHYRSTZ_DEASSERT, HDMI_MC_PHYRSTZ);
++
++ /* Power down PHY */
++ mxc_hdmi_phy_enable_tmds(0);
++ mxc_hdmi_phy_enable_power(0);
++ mxc_hdmi_phy_gen2_txpwron(0);
++ mxc_hdmi_phy_gen2_pddq(1);
++
++ hdmi->phy_enabled = false;
++ dev_dbg(&hdmi->pdev->dev, "%s - exit\n", __func__);
++}
++
++/* HDMI Initialization Step B.4 */
++static void mxc_hdmi_enable_video_path(struct mxc_hdmi *hdmi)
++{
++ u8 clkdis;
++
++ dev_dbg(&hdmi->pdev->dev, "%s\n", __func__);
++
++ /* control period minimum duration */
++ hdmi_writeb(12, HDMI_FC_CTRLDUR);
++ hdmi_writeb(32, HDMI_FC_EXCTRLDUR);
++ hdmi_writeb(1, HDMI_FC_EXCTRLSPAC);
++
++ /* Set to fill TMDS data channels */
++ hdmi_writeb(0x0B, HDMI_FC_CH0PREAM);
++ hdmi_writeb(0x16, HDMI_FC_CH1PREAM);
++ hdmi_writeb(0x21, HDMI_FC_CH2PREAM);
++
++ /* Save CEC clock */
++ clkdis = hdmi_readb(HDMI_MC_CLKDIS) & HDMI_MC_CLKDIS_CECCLK_DISABLE;
++ clkdis |= ~HDMI_MC_CLKDIS_CECCLK_DISABLE;
++
++ /* Enable pixel clock and tmds data path */
++ clkdis = 0x7F & clkdis;
++ clkdis &= ~HDMI_MC_CLKDIS_PIXELCLK_DISABLE;
++ hdmi_writeb(clkdis, HDMI_MC_CLKDIS);
++
++ clkdis &= ~HDMI_MC_CLKDIS_TMDSCLK_DISABLE;
++ hdmi_writeb(clkdis, HDMI_MC_CLKDIS);
++
++ /* Enable csc path */
++ if (isColorSpaceConversion(hdmi) && !hdmi->hdmi_data.video_mode.mDVI) {
++ clkdis &= ~HDMI_MC_CLKDIS_CSCCLK_DISABLE;
++ hdmi_writeb(clkdis, HDMI_MC_CLKDIS);
++ }
++}
++
++static void hdmi_enable_audio_clk(struct mxc_hdmi *hdmi)
++{
++ u8 clkdis;
++
++ dev_dbg(&hdmi->pdev->dev, "%s\n", __func__);
++
++ clkdis = hdmi_readb(HDMI_MC_CLKDIS);
++ clkdis &= ~HDMI_MC_CLKDIS_AUDCLK_DISABLE;
++ hdmi_writeb(clkdis, HDMI_MC_CLKDIS);
++}
++
++/* Workaround to clear the overflow condition */
++static void mxc_hdmi_clear_overflow(struct mxc_hdmi *hdmi)
++{
++ int count;
++ u8 val;
++
++ /* TMDS software reset */
++ hdmi_writeb((u8)~HDMI_MC_SWRSTZ_TMDSSWRST_REQ, HDMI_MC_SWRSTZ);
++
++ val = hdmi_readb(HDMI_FC_INVIDCONF);
++
++ if (cpu_is_imx6dl(hdmi)) {
++ hdmi_writeb(val, HDMI_FC_INVIDCONF);
++ return;
++ }
++
++ for (count = 0 ; count < 5 ; count++)
++ hdmi_writeb(val, HDMI_FC_INVIDCONF);
++}
++
++static void hdmi_enable_overflow_interrupts(void)
++{
++ pr_debug("%s\n", __func__);
++ hdmi_writeb(0, HDMI_FC_MASK2);
++ hdmi_writeb(0, HDMI_IH_MUTE_FC_STAT2);
++}
++
++static void hdmi_disable_overflow_interrupts(void)
++{
++ pr_debug("%s\n", __func__);
++ hdmi_writeb(HDMI_IH_MUTE_FC_STAT2_OVERFLOW_MASK,
++ HDMI_IH_MUTE_FC_STAT2);
++ hdmi_writeb(0xff, HDMI_FC_MASK2);
++}
++
++static void mxc_hdmi_notify_fb(struct mxc_hdmi *hdmi)
++{
++ dev_dbg(&hdmi->pdev->dev, "%s\n", __func__);
++
++ /* Don't notify if we aren't registered yet */
++ WARN_ON(!hdmi->fb_reg);
++
++ /* disable the phy before ipu changes mode */
++ mxc_hdmi_phy_disable(hdmi);
++
++ /*
++ * Note that fb_set_var will block. During this time,
++ * FB_EVENT_MODE_CHANGE callback will happen.
++ * So by the end of this function, mxc_hdmi_setup()
++ * will be done.
++ */
++ hdmi->fbi->var.activate |= FB_ACTIVATE_FORCE;
++ console_lock();
++ hdmi->fbi->flags |= FBINFO_MISC_USEREVENT;
++ fb_set_var(hdmi->fbi, &hdmi->fbi->var);
++ hdmi->fbi->flags &= ~FBINFO_MISC_USEREVENT;
++ console_unlock();
++
++ dev_dbg(&hdmi->pdev->dev, "%s exit\n", __func__);
++}
++
++static void mxc_hdmi_edid_rebuild_modelist(struct mxc_hdmi *hdmi)
++{
++ int i;
++ struct fb_videomode *mode;
++
++ dev_dbg(&hdmi->pdev->dev, "%s\n", __func__);
++
++ console_lock();
++
++ fb_destroy_modelist(&hdmi->fbi->modelist);
++ fb_add_videomode(&vga_mode, &hdmi->fbi->modelist);
++
++ for (i = 0; i < hdmi->fbi->monspecs.modedb_len; i++) {
++ /*
++ * We might check here if mode is supported by HDMI.
++ * We do not currently support interlaced modes.
++ * And add CEA modes in the modelist.
++ */
++ mode = &hdmi->fbi->monspecs.modedb[i];
++
++ if ((mode->vmode & FB_VMODE_INTERLACED) ||
++ (hdmi->edid_cfg.hdmi_cap &&
++ (mxc_edid_mode_to_vic(mode) == 0)))
++ continue;
++
++ dev_dbg(&hdmi->pdev->dev, "Added mode %d:", i);
++ dev_dbg(&hdmi->pdev->dev,
++ "xres = %d, yres = %d, freq = %d, vmode = %d, flag = %d\n",
++ hdmi->fbi->monspecs.modedb[i].xres,
++ hdmi->fbi->monspecs.modedb[i].yres,
++ hdmi->fbi->monspecs.modedb[i].refresh,
++ hdmi->fbi->monspecs.modedb[i].vmode,
++ hdmi->fbi->monspecs.modedb[i].flag);
++
++ fb_add_videomode(mode, &hdmi->fbi->modelist);
++ }
++
++ console_unlock();
++}
++
++static void mxc_hdmi_default_edid_cfg(struct mxc_hdmi *hdmi)
++{
++ /* Default setting HDMI working in HDMI mode */
++ hdmi->edid_cfg.hdmi_cap = true;
++}
++
++static void mxc_hdmi_default_modelist(struct mxc_hdmi *hdmi)
++{
++ u32 i;
++ const struct fb_videomode *mode;
++
++ dev_dbg(&hdmi->pdev->dev, "%s\n", __func__);
++
++ /* If not EDID data read, set up default modelist */
++ dev_info(&hdmi->pdev->dev, "No modes read from edid\n");
++ dev_info(&hdmi->pdev->dev, "create default modelist\n");
++
++ console_lock();
++
++ fb_destroy_modelist(&hdmi->fbi->modelist);
++
++ /*Add all no interlaced CEA mode to default modelist */
++ for (i = 0; i < ARRAY_SIZE(mxc_cea_mode); i++) {
++ mode = &mxc_cea_mode[i];
++ if (!(mode->vmode & FB_VMODE_INTERLACED) && (mode->xres != 0))
++ fb_add_videomode(mode, &hdmi->fbi->modelist);
++ }
++
++ console_unlock();
++}
++
++static void mxc_hdmi_set_mode_to_vga_dvi(struct mxc_hdmi *hdmi)
++{
++ dev_dbg(&hdmi->pdev->dev, "%s\n", __func__);
++
++ hdmi_disable_overflow_interrupts();
++
++ fb_videomode_to_var(&hdmi->fbi->var, &vga_mode);
++
++ hdmi->requesting_vga_for_initialization = true;
++ mxc_hdmi_notify_fb(hdmi);
++ hdmi->requesting_vga_for_initialization = false;
++}
++
++static void mxc_hdmi_set_mode(struct mxc_hdmi *hdmi)
++{
++ const struct fb_videomode *mode;
++ struct fb_videomode m;
++ struct fb_var_screeninfo var;
++
++ dev_dbg(&hdmi->pdev->dev, "%s\n", __func__);
++
++ /* Set the default mode only once. */
++ if (!hdmi->dft_mode_set) {
++ fb_videomode_to_var(&var, &hdmi->default_mode);
++ hdmi->dft_mode_set = true;
++ } else
++ fb_videomode_to_var(&var, &hdmi->previous_non_vga_mode);
++
++ fb_var_to_videomode(&m, &var);
++ dump_fb_videomode(&m);
++
++ mode = fb_find_nearest_mode(&m, &hdmi->fbi->modelist);
++ if (!mode) {
++ pr_err("%s: could not find mode in modelist\n", __func__);
++ return;
++ }
++
++ /* If video mode same as previous, init HDMI again */
++ if (fb_mode_is_equal(&hdmi->previous_non_vga_mode, mode)) {
++ dev_dbg(&hdmi->pdev->dev,
++ "%s: Video mode same as previous\n", __func__);
++ /* update fbi mode in case modelist is updated */
++ hdmi->fbi->mode = (struct fb_videomode *)mode;
++ /* update hdmi setting in case EDID data updated */
++ mxc_hdmi_setup(hdmi, 0);
++ } else {
++ dev_dbg(&hdmi->pdev->dev, "%s: New video mode\n", __func__);
++ mxc_hdmi_set_mode_to_vga_dvi(hdmi);
++ fb_videomode_to_var(&hdmi->fbi->var, mode);
++ dump_fb_videomode((struct fb_videomode *)mode);
++ mxc_hdmi_notify_fb(hdmi);
++ }
++
++}
++
++static void mxc_hdmi_cable_connected(struct mxc_hdmi *hdmi)
++{
++ dev_dbg(&hdmi->pdev->dev, "%s\n", __func__);
++
++ hdmi->cable_plugin = true;
++
++ /* HDMI Initialization Step C */
++ hdmi->edid_status = mxc_hdmi_read_edid(hdmi);
++
++ /* Read EDID again if first EDID read failed */
++ if (hdmi->edid_status == HDMI_EDID_NO_MODES ||
++ hdmi->edid_status == HDMI_EDID_FAIL) {
++ dev_info(&hdmi->pdev->dev, "Read EDID again\n");
++ hdmi->edid_status = mxc_hdmi_read_edid(hdmi);
++ }
++
++ /* HDMI Initialization Steps D, E, F */
++ switch (hdmi->edid_status) {
++ case HDMI_EDID_SUCCESS:
++ mxc_hdmi_edid_rebuild_modelist(hdmi);
++ break;
++
++ /* Nothing to do if EDID same */
++ case HDMI_EDID_SAME:
++ break;
++
++ case HDMI_EDID_FAIL:
++ mxc_hdmi_default_edid_cfg(hdmi);
++ /* No break here */
++ case HDMI_EDID_NO_MODES:
++ default:
++ mxc_hdmi_default_modelist(hdmi);
++ break;
++ }
++
++ /* Save edid cfg for audio driver */
++ hdmi_set_edid_cfg(hdmi->edid_status, &hdmi->edid_cfg);
++
++ /* Setting video mode */
++ mxc_hdmi_set_mode(hdmi);
++
++ dev_dbg(&hdmi->pdev->dev, "%s exit\n", __func__);
++}
++
++static int mxc_hdmi_power_on(struct mxc_dispdrv_handle *disp)
++{
++ struct mxc_hdmi *hdmi = mxc_dispdrv_getdata(disp);
++ mxc_hdmi_phy_init(hdmi);
++ return 0;
++}
++
++static void mxc_hdmi_power_off(struct mxc_dispdrv_handle *disp)
++{
++ struct mxc_hdmi *hdmi = mxc_dispdrv_getdata(disp);
++ mxc_hdmi_phy_disable(hdmi);
++}
++
++static void mxc_hdmi_cable_disconnected(struct mxc_hdmi *hdmi)
++{
++ u8 clkdis;
++
++ dev_dbg(&hdmi->pdev->dev, "%s\n", __func__);
++
++ /* Save CEC clock */
++ clkdis = hdmi_readb(HDMI_MC_CLKDIS) & HDMI_MC_CLKDIS_CECCLK_DISABLE;
++ clkdis |= ~HDMI_MC_CLKDIS_CECCLK_DISABLE;
++
++ /* Disable All HDMI clock */
++ hdmi_writeb(0xff & clkdis, HDMI_MC_CLKDIS);
++
++ mxc_hdmi_phy_disable(hdmi);
++
++ hdmi_disable_overflow_interrupts();
++
++ hdmi->cable_plugin = false;
++}
++
++static void hotplug_worker(struct work_struct *work)
++{
++ struct mxc_hdmi *hdmi =
++ container_of(work, struct mxc_hdmi, hotplug_work);
++ u32 hdmi_phy_stat0, hdmi_phy_pol0, hdmi_phy_mask0;
++ unsigned long flags;
++ char event_string[32];
++ char *envp[] = { event_string, NULL };
++
++ hdmi_phy_stat0 = hdmi_readb(HDMI_PHY_STAT0);
++ hdmi_phy_pol0 = hdmi_readb(HDMI_PHY_POL0);
++
++ if (hdmi->latest_intr_stat & hdmi->plug_event) {
++ /* Make HPD intr active low to capture unplug event or
++ * active high to capture plugin event */
++ hdmi_writeb((hdmi->plug_mask & ~hdmi_phy_pol0), HDMI_PHY_POL0);
++
++ /* check cable status */
++ if (hdmi_phy_stat0 & hdmi->plug_mask) {
++ /* Plugin event */
++ dev_dbg(&hdmi->pdev->dev, "EVENT=plugin\n");
++ mxc_hdmi_cable_connected(hdmi);
++
++ sprintf(event_string, "EVENT=plugin");
++ kobject_uevent_env(&hdmi->pdev->dev.kobj, KOBJ_CHANGE, envp);
++#ifdef CONFIG_MXC_HDMI_CEC
++ mxc_hdmi_cec_handle(0x80);
++#endif
++ hdmi_set_cable_state(1);
++ } else {
++ /* Plugout event */
++ dev_dbg(&hdmi->pdev->dev, "EVENT=plugout\n");
++ hdmi_set_cable_state(0);
++ mxc_hdmi_abort_stream();
++ mxc_hdmi_cable_disconnected(hdmi);
++
++ sprintf(event_string, "EVENT=plugout");
++ kobject_uevent_env(&hdmi->pdev->dev.kobj, KOBJ_CHANGE, envp);
++#ifdef CONFIG_MXC_HDMI_CEC
++ mxc_hdmi_cec_handle(0x100);
++#endif
++ }
++ }
++
++ /* Lock here to ensure full powerdown sequence
++ * completed before next interrupt processed */
++ spin_lock_irqsave(&hdmi->irq_lock, flags);
++
++ /* Re-enable HPD interrupts */
++ hdmi_phy_mask0 = hdmi_readb(HDMI_PHY_MASK0);
++ hdmi_phy_mask0 &= ~hdmi->plug_mask;
++ hdmi_writeb(hdmi_phy_mask0, HDMI_PHY_MASK0);
++
++ /* Unmute interrupts */
++ hdmi_writeb(~hdmi->plug_event, HDMI_IH_MUTE_PHY_STAT0);
++
++ if (hdmi_readb(HDMI_IH_FC_STAT2) & HDMI_IH_FC_STAT2_OVERFLOW_MASK)
++ mxc_hdmi_clear_overflow(hdmi);
++
++ spin_unlock_irqrestore(&hdmi->irq_lock, flags);
++}
++
++static void hotplug_work_launch(unsigned long data)
++{
++ struct mxc_hdmi *hdmi = (struct mxc_hdmi *)data;
++ pr_debug("%s\n", __func__);
++ schedule_work(&hdmi->hotplug_work);
++}
++
++static void hdcp_hdp_worker(struct work_struct *work)
++{
++ struct delayed_work *delay_work = to_delayed_work(work);
++ struct mxc_hdmi *hdmi =
++ container_of(delay_work, struct mxc_hdmi, hdcp_hdp_work);
++ char event_string[32];
++ char *envp[] = { event_string, NULL };
++
++ /* HDCP interrupt */
++ sprintf(event_string, "EVENT=hdcpint");
++ kobject_uevent_env(&hdmi->pdev->dev.kobj, KOBJ_CHANGE, envp);
++
++ /* Unmute interrupts in HDCP application*/
++}
++
++static irqreturn_t mxc_hdmi_hotplug(int irq, void *data)
++{
++ struct mxc_hdmi *hdmi = data;
++ u8 val, intr_stat;
++ unsigned long flags;
++
++ spin_lock_irqsave(&hdmi->irq_lock, flags);
++
++ /* Check and clean packet overflow interrupt.*/
++ if (hdmi_readb(HDMI_IH_FC_STAT2) &
++ HDMI_IH_FC_STAT2_OVERFLOW_MASK) {
++ mxc_hdmi_clear_overflow(hdmi);
++
++ dev_dbg(&hdmi->pdev->dev, "Overflow interrupt received\n");
++ /* clear irq status */
++ hdmi_writeb(HDMI_IH_FC_STAT2_OVERFLOW_MASK,
++ HDMI_IH_FC_STAT2);
++ }
++
++ /*
++ * We could not disable the irq. Probably the audio driver
++ * has enabled it. Masking off the HDMI interrupts using
++ * HDMI registers.
++ */
++ /* Capture status - used in hotplug_worker ISR */
++ intr_stat = hdmi_readb(HDMI_IH_PHY_STAT0);
++ if (intr_stat & hdmi->plug_event) {
++
++ dev_dbg(&hdmi->pdev->dev, "Hotplug interrupt received\n");
++ dev_dbg(&hdmi->pdev->dev, "intr_stat %u plug_event %u\n", intr_stat, hdmi->plug_event);
++ hdmi->latest_intr_stat = intr_stat;
++
++ /* Mute interrupts until handled */
++
++ val = hdmi_readb(HDMI_IH_MUTE_PHY_STAT0);
++ val |= hdmi->plug_event;
++ hdmi_writeb(val, HDMI_IH_MUTE_PHY_STAT0);
++
++ val = hdmi_readb(HDMI_PHY_MASK0);
++ val |= hdmi->plug_mask;
++ hdmi_writeb(val, HDMI_PHY_MASK0);
++
++ /* Clear Hotplug interrupts */
++ hdmi_writeb(hdmi->plug_event, HDMI_IH_PHY_STAT0);
++
++ if(hdmi_inited) {
++ mod_timer(&hdmi->jitter_timer, jiffies + HZ);
++ }
++ }
++
++ /* Check HDCP interrupt state */
++ if (hdmi->hdmi_data.hdcp_enable) {
++ val = hdmi_readb(HDMI_A_APIINTSTAT);
++ if (val != 0) {
++ /* Mute interrupts until interrupt handled */
++ val = 0xFF;
++ hdmi_writeb(val, HDMI_A_APIINTMSK);
++ schedule_delayed_work(&(hdmi->hdcp_hdp_work), msecs_to_jiffies(50));
++ }
++ }
++
++ spin_unlock_irqrestore(&hdmi->irq_lock, flags);
++ return IRQ_HANDLED;
++}
++
++static void mxc_hdmi_setup(struct mxc_hdmi *hdmi, unsigned long event)
++{
++ struct fb_videomode m;
++ const struct fb_videomode *edid_mode;
++
++ dev_dbg(&hdmi->pdev->dev, "%s\n", __func__);
++
++ fb_var_to_videomode(&m, &hdmi->fbi->var);
++ dump_fb_videomode(&m);
++
++ dev_dbg(&hdmi->pdev->dev, "%s - video mode changed\n", __func__);
++
++ hdmi->vic = 0;
++ if (!hdmi->requesting_vga_for_initialization) {
++ /* Save mode if this isn't the result of requesting
++ * vga default. */
++ memcpy(&hdmi->previous_non_vga_mode, &m,
++ sizeof(struct fb_videomode));
++ if (!list_empty(&hdmi->fbi->modelist)) {
++ edid_mode = fb_find_nearest_mode(&m, &hdmi->fbi->modelist);
++ pr_debug("edid mode ");
++ dump_fb_videomode((struct fb_videomode *)edid_mode);
++ /* update fbi mode */
++ hdmi->fbi->mode = (struct fb_videomode *)edid_mode;
++ hdmi->vic = mxc_edid_mode_to_vic(edid_mode);
++ }
++ }
++
++ hdmi_disable_overflow_interrupts();
++
++ dev_dbg(&hdmi->pdev->dev, "CEA mode used vic=%d\n", hdmi->vic);
++ if (hdmi->edid_cfg.hdmi_cap || !hdmi->edid_status) {
++ hdmi_set_dvi_mode(0);
++ hdmi->hdmi_data.video_mode.mDVI = false;
++ } else {
++ hdmi_set_dvi_mode(1);
++ dev_dbg(&hdmi->pdev->dev, "CEA mode vic=%d work in DVI\n", hdmi->vic);
++ hdmi->hdmi_data.video_mode.mDVI = true;
++ }
++
++ if ((hdmi->vic == 6) || (hdmi->vic == 7) ||
++ (hdmi->vic == 21) || (hdmi->vic == 22) ||
++ (hdmi->vic == 2) || (hdmi->vic == 3) ||
++ (hdmi->vic == 17) || (hdmi->vic == 18))
++ hdmi->hdmi_data.colorimetry = eITU601;
++ else
++ hdmi->hdmi_data.colorimetry = eITU709;
++
++ if ((hdmi->vic == 10) || (hdmi->vic == 11) ||
++ (hdmi->vic == 12) || (hdmi->vic == 13) ||
++ (hdmi->vic == 14) || (hdmi->vic == 15) ||
++ (hdmi->vic == 25) || (hdmi->vic == 26) ||
++ (hdmi->vic == 27) || (hdmi->vic == 28) ||
++ (hdmi->vic == 29) || (hdmi->vic == 30) ||
++ (hdmi->vic == 35) || (hdmi->vic == 36) ||
++ (hdmi->vic == 37) || (hdmi->vic == 38))
++ hdmi->hdmi_data.video_mode.mPixelRepetitionOutput = 1;
++ else
++ hdmi->hdmi_data.video_mode.mPixelRepetitionOutput = 0;
++
++ hdmi->hdmi_data.video_mode.mPixelRepetitionInput = 0;
++
++ /* TODO: Get input format from IPU (via FB driver iface) */
++ hdmi->hdmi_data.enc_in_format = RGB;
++
++ hdmi->hdmi_data.enc_out_format = RGB;
++
++ /* YCbCr only enabled in HDMI mode */
++ if (!hdmi->hdmi_data.video_mode.mDVI &&
++ !hdmi->hdmi_data.rgb_out_enable) {
++ if (hdmi->edid_cfg.cea_ycbcr444)
++ hdmi->hdmi_data.enc_out_format = YCBCR444;
++ else if (hdmi->edid_cfg.cea_ycbcr422)
++ hdmi->hdmi_data.enc_out_format = YCBCR422_8BITS;
++ }
++
++ /* IPU not support depth color output */
++ hdmi->hdmi_data.enc_color_depth = 8;
++ hdmi->hdmi_data.pix_repet_factor = 0;
++ hdmi->hdmi_data.video_mode.mDataEnablePolarity = true;
++
++ /* HDMI Initialization Step B.1 */
++ hdmi_av_composer(hdmi);
++
++ /* HDMI Initializateion Step B.2 */
++ mxc_hdmi_phy_init(hdmi);
++
++ /* HDMI Initialization Step B.3 */
++ mxc_hdmi_enable_video_path(hdmi);
++
++ /* not for DVI mode */
++ if (hdmi->hdmi_data.video_mode.mDVI)
++ dev_dbg(&hdmi->pdev->dev, "%s DVI mode\n", __func__);
++ else {
++ dev_dbg(&hdmi->pdev->dev, "%s CEA mode\n", __func__);
++
++ /* HDMI Initialization Step E - Configure audio */
++ hdmi_clk_regenerator_update_pixel_clock(hdmi->fbi->var.pixclock);
++ hdmi_enable_audio_clk(hdmi);
++
++ /* HDMI Initialization Step F - Configure AVI InfoFrame */
++ hdmi_config_AVI(hdmi);
++ }
++
++ hdmi_video_packetize(hdmi);
++ hdmi_video_csc(hdmi);
++ hdmi_video_sample(hdmi);
++
++ mxc_hdmi_clear_overflow(hdmi);
++
++ dev_dbg(&hdmi->pdev->dev, "%s exit\n\n", __func__);
++
++}
++
++/* Wait until we are registered to enable interrupts */
++static void mxc_hdmi_fb_registered(struct mxc_hdmi *hdmi)
++{
++ unsigned long flags;
++
++ if (hdmi->fb_reg)
++ return;
++
++ spin_lock_irqsave(&hdmi->irq_lock, flags);
++
++ dev_dbg(&hdmi->pdev->dev, "%s\n", __func__);
++
++ hdmi_writeb(HDMI_PHY_I2CM_INT_ADDR_DONE_POL,
++ HDMI_PHY_I2CM_INT_ADDR);
++
++ hdmi_writeb(HDMI_PHY_I2CM_CTLINT_ADDR_NAC_POL |
++ HDMI_PHY_I2CM_CTLINT_ADDR_ARBITRATION_POL,
++ HDMI_PHY_I2CM_CTLINT_ADDR);
++
++ /* enable cable hot plug irq */
++ hdmi_writeb(~hdmi->plug_mask, HDMI_PHY_MASK0);
++
++ /* Clear Hotplug interrupts */
++ hdmi_writeb(hdmi->plug_event, HDMI_IH_PHY_STAT0);
++
++ /* Unmute interrupts */
++ hdmi_writeb(~hdmi->plug_event, HDMI_IH_MUTE_PHY_STAT0);
++
++ hdmi->fb_reg = true;
++
++ spin_unlock_irqrestore(&hdmi->irq_lock, flags);
++
++}
++
++static int mxc_hdmi_fb_event(struct notifier_block *nb,
++ unsigned long val, void *v)
++{
++ struct fb_event *event = v;
++ struct mxc_hdmi *hdmi = container_of(nb, struct mxc_hdmi, nb);
++
++ if (strcmp(event->info->fix.id, hdmi->fbi->fix.id))
++ return 0;
++
++ switch (val) {
++ case FB_EVENT_FB_REGISTERED:
++ dev_dbg(&hdmi->pdev->dev, "event=FB_EVENT_FB_REGISTERED\n");
++ mxc_hdmi_fb_registered(hdmi);
++ hdmi_set_registered(1);
++ break;
++
++ case FB_EVENT_FB_UNREGISTERED:
++ dev_dbg(&hdmi->pdev->dev, "event=FB_EVENT_FB_UNREGISTERED\n");
++ hdmi->fb_reg = false;
++ hdmi_set_registered(0);
++ break;
++
++ case FB_EVENT_MODE_CHANGE:
++ dev_dbg(&hdmi->pdev->dev, "event=FB_EVENT_MODE_CHANGE\n");
++ if (hdmi->fb_reg)
++ mxc_hdmi_setup(hdmi, val);
++ break;
++
++ case FB_EVENT_BLANK:
++ if ((*((int *)event->data) == FB_BLANK_UNBLANK) &&
++ (*((int *)event->data) != hdmi->blank)) {
++ dev_dbg(&hdmi->pdev->dev,
++ "event=FB_EVENT_BLANK - UNBLANK\n");
++
++ hdmi->blank = *((int *)event->data);
++
++ /* Re-enable HPD interrupts */
++ val = hdmi_readb(HDMI_PHY_MASK0);
++ val &= ~hdmi->plug_mask;
++ hdmi_writeb(val, HDMI_PHY_MASK0);
++
++ /* Unmute interrupts */
++ hdmi_writeb(~hdmi->plug_event, HDMI_IH_MUTE_PHY_STAT0);
++
++ if (hdmi->fb_reg && hdmi->cable_plugin)
++ mxc_hdmi_setup(hdmi, val);
++ hdmi_set_blank_state(1);
++ } else if (*((int *)event->data) != hdmi->blank) {
++ dev_dbg(&hdmi->pdev->dev,
++ "event=FB_EVENT_BLANK - BLANK\n");
++ hdmi_set_blank_state(0);
++ mxc_hdmi_abort_stream();
++
++ mxc_hdmi_phy_disable(hdmi);
++
++ if(hdmi->plug_mask == HDMI_DVI_STAT) {
++ u8 val;
++ pr_info("In DVI Mode disable interrupts\n");
++ val = hdmi_readb(HDMI_IH_MUTE_PHY_STAT0);
++ val |= hdmi->plug_event;
++ hdmi_writeb(val, HDMI_IH_MUTE_PHY_STAT0);
++
++ val = hdmi_readb(HDMI_PHY_MASK0);
++ val |= hdmi->plug_mask;
++ hdmi_writeb(val, HDMI_PHY_MASK0);
++
++ hdmi_set_dvi_mode(1);
++ }
++
++ hdmi->blank = *((int *)event->data);
++ } else
++ dev_dbg(&hdmi->pdev->dev,
++ "FB BLANK state no changed!\n");
++
++ break;
++
++ case FB_EVENT_SUSPEND:
++ dev_dbg(&hdmi->pdev->dev,
++ "event=FB_EVENT_SUSPEND\n");
++
++ if (hdmi->blank == FB_BLANK_UNBLANK) {
++ mxc_hdmi_phy_disable(hdmi);
++ clk_disable(hdmi->hdmi_iahb_clk);
++ clk_disable(hdmi->hdmi_isfr_clk);
++ }
++ break;
++
++ case FB_EVENT_RESUME:
++ dev_dbg(&hdmi->pdev->dev,
++ "event=FB_EVENT_RESUME\n");
++
++ if (hdmi->blank == FB_BLANK_UNBLANK) {
++ clk_enable(hdmi->hdmi_iahb_clk);
++ clk_enable(hdmi->hdmi_isfr_clk);
++ mxc_hdmi_phy_init(hdmi);
++ }
++ break;
++
++ }
++ return 0;
++}
++
++static void hdmi_init_route(struct mxc_hdmi *hdmi)
++{
++ uint32_t hdmi_mux_setting, reg;
++ int ipu_id, disp_id;
++
++ ipu_id = mxc_hdmi_ipu_id;
++ disp_id = mxc_hdmi_disp_id;
++
++ if ((ipu_id > 1) || (ipu_id < 0)) {
++ pr_err("Invalid IPU select for HDMI: %d. Set to 0\n", ipu_id);
++ ipu_id = 0;
++ }
++
++ if ((disp_id > 1) || (disp_id < 0)) {
++ pr_err("Invalid DI select for HDMI: %d. Set to 0\n", disp_id);
++ disp_id = 0;
++ }
++
++ reg = readl(hdmi->gpr_hdmi_base);
++
++ /* Configure the connection between IPU1/2 and HDMI */
++ hdmi_mux_setting = 2*ipu_id + disp_id;
++
++ /* GPR3, bits 2-3 = HDMI_MUX_CTL */
++ reg &= ~0xd;
++ reg |= hdmi_mux_setting << 2;
++
++ writel(reg, hdmi->gpr_hdmi_base);
++
++ /* Set HDMI event as SDMA event2 for HDMI audio */
++ reg = readl(hdmi->gpr_sdma_base);
++ reg |= 0x1;
++ writel(reg, hdmi->gpr_sdma_base);
++}
++
++static void hdmi_hdcp_get_property(struct platform_device *pdev)
++{
++ struct device_node *np = pdev->dev.of_node;
++
++ /* Check hdcp enable by dts.*/
++ hdcp_init = of_property_read_bool(np, "fsl,hdcp");
++ if (hdcp_init)
++ dev_dbg(&pdev->dev, "hdcp enable\n");
++ else
++ dev_dbg(&pdev->dev, "hdcp disable\n");
++}
++
++static void hdmi_get_of_property(struct mxc_hdmi *hdmi)
++{
++ struct platform_device *pdev = hdmi->pdev;
++ struct device_node *np = pdev->dev.of_node;
++ const struct of_device_id *of_id =
++ of_match_device(imx_hdmi_dt_ids, &pdev->dev);
++ int ret;
++ u32 phy_reg_vlev = 0, phy_reg_cksymtx = 0;
++
++ if (of_id) {
++ pdev->id_entry = of_id->data;
++ hdmi->cpu_type = pdev->id_entry->driver_data;
++ }
++
++ /* HDMI PHY register vlev and cksymtx preperty is optional.
++ * It is for specific board to pass HCT electrical part.
++ * Default value will been setting in HDMI PHY config function
++ * if it is not define in device tree.
++ */
++ ret = of_property_read_u32(np, "fsl,phy_reg_vlev", &phy_reg_vlev);
++ if (ret)
++ dev_dbg(&pdev->dev, "No board specific HDMI PHY vlev\n");
++
++ ret = of_property_read_u32(np, "fsl,phy_reg_cksymtx", &phy_reg_cksymtx);
++ if (ret)
++ dev_dbg(&pdev->dev, "No board specific HDMI PHY cksymtx\n");
++
++ /* Specific phy config */
++ hdmi->phy_config.reg_cksymtx = phy_reg_cksymtx;
++ hdmi->phy_config.reg_vlev = phy_reg_vlev;
++
++}
++
++/* HDMI Initialization Step A */
++static int mxc_hdmi_disp_init(struct mxc_dispdrv_handle *disp,
++ struct mxc_dispdrv_setting *setting)
++{
++ int ret = 0;
++ u32 i;
++ const struct fb_videomode *mode;
++ struct fb_videomode m;
++ struct mxc_hdmi *hdmi = mxc_dispdrv_getdata(disp);
++ int irq = platform_get_irq(hdmi->pdev, 0);
++
++ dev_dbg(&hdmi->pdev->dev, "%s\n", __func__);
++
++ /* Check hdmi disp init once */
++ if (hdmi_inited) {
++ dev_err(&hdmi->pdev->dev,
++ "Error only one HDMI output support now!\n");
++ return -1;
++ }
++
++ hdmi_get_of_property(hdmi);
++
++ if (irq < 0)
++ return -ENODEV;
++
++ /* Setting HDMI default to blank state */
++ hdmi->blank = FB_BLANK_POWERDOWN;
++
++ setting->dev_id = mxc_hdmi_ipu_id;
++ setting->disp_id = mxc_hdmi_disp_id;
++ setting->if_fmt = IPU_PIX_FMT_RGB24;
++
++ hdmi->dft_mode_str = setting->dft_mode_str;
++ hdmi->default_bpp = setting->default_bpp;
++ dev_dbg(&hdmi->pdev->dev, "%s - default mode %s bpp=%d\n",
++ __func__, hdmi->dft_mode_str, hdmi->default_bpp);
++
++ hdmi->fbi = setting->fbi;
++
++ hdmi_init_route(hdmi);
++
++ hdmi->hdmi_isfr_clk = clk_get(&hdmi->pdev->dev, "hdmi_isfr");
++ if (IS_ERR(hdmi->hdmi_isfr_clk)) {
++ ret = PTR_ERR(hdmi->hdmi_isfr_clk);
++ dev_err(&hdmi->pdev->dev,
++ "Unable to get HDMI clk: %d\n", ret);
++ goto egetclk1;
++ }
++
++ ret = clk_prepare_enable(hdmi->hdmi_isfr_clk);
++ if (ret < 0) {
++ dev_err(&hdmi->pdev->dev,
++ "Cannot enable HDMI isfr clock: %d\n", ret);
++ goto erate1;
++ }
++
++ hdmi->hdmi_iahb_clk = clk_get(&hdmi->pdev->dev, "hdmi_iahb");
++ if (IS_ERR(hdmi->hdmi_iahb_clk)) {
++ ret = PTR_ERR(hdmi->hdmi_iahb_clk);
++ dev_err(&hdmi->pdev->dev,
++ "Unable to get HDMI clk: %d\n", ret);
++ goto egetclk2;
++ }
++
++ ret = clk_prepare_enable(hdmi->hdmi_iahb_clk);
++ if (ret < 0) {
++ dev_err(&hdmi->pdev->dev,
++ "Cannot enable HDMI iahb clock: %d\n", ret);
++ goto erate2;
++ }
++
++ dev_dbg(&hdmi->pdev->dev, "Enabled HDMI clocks\n");
++
++ /* Init DDC pins for HDCP */
++ if (hdcp_init) {
++ hdmi->pinctrl = devm_pinctrl_get_select_default(&hdmi->pdev->dev);
++ if (IS_ERR(hdmi->pinctrl)) {
++ dev_err(&hdmi->pdev->dev, "can't get/select DDC pinctrl\n");
++ goto erate2;
++ }
++ }
++
++ /* Product and revision IDs */
++ dev_info(&hdmi->pdev->dev,
++ "Detected HDMI controller 0x%x:0x%x:0x%x:0x%x\n",
++ hdmi_readb(HDMI_DESIGN_ID),
++ hdmi_readb(HDMI_REVISION_ID),
++ hdmi_readb(HDMI_PRODUCT_ID0),
++ hdmi_readb(HDMI_PRODUCT_ID1));
++
++ /* To prevent overflows in HDMI_IH_FC_STAT2, set the clk regenerator
++ * N and cts values before enabling phy */
++ hdmi_init_clk_regenerator();
++
++ INIT_LIST_HEAD(&hdmi->fbi->modelist);
++
++ spin_lock_init(&hdmi->irq_lock);
++
++ /* Set the default mode and modelist when disp init. */
++ fb_find_mode(&hdmi->fbi->var, hdmi->fbi,
++ hdmi->dft_mode_str, NULL, 0, NULL,
++ hdmi->default_bpp);
++
++ console_lock();
++
++ fb_destroy_modelist(&hdmi->fbi->modelist);
++
++ /*Add all no interlaced CEA mode to default modelist */
++ for (i = 0; i < ARRAY_SIZE(mxc_cea_mode); i++) {
++ mode = &mxc_cea_mode[i];
++ if (!(mode->vmode & FB_VMODE_INTERLACED) && (mode->xres != 0))
++ fb_add_videomode(mode, &hdmi->fbi->modelist);
++ }
++
++ console_unlock();
++
++ /* Find a nearest mode in default modelist */
++ fb_var_to_videomode(&m, &hdmi->fbi->var);
++ dump_fb_videomode(&m);
++
++ hdmi->dft_mode_set = false;
++ /* Save default video mode */
++ memcpy(&hdmi->default_mode, &m, sizeof(struct fb_videomode));
++
++ mode = fb_find_nearest_mode(&m, &hdmi->fbi->modelist);
++ if (!mode) {
++ pr_err("%s: could not find mode in modelist\n", __func__);
++ return -1;
++ }
++
++ fb_videomode_to_var(&hdmi->fbi->var, mode);
++
++ /* update fbi mode */
++ hdmi->fbi->mode = (struct fb_videomode *)mode;
++
++ /* Default setting HDMI working in HDMI mode*/
++ hdmi->edid_cfg.hdmi_cap = true;
++
++ hdmi->plug_event = HDMI_DVI_IH_STAT;
++ hdmi->plug_mask = HDMI_DVI_STAT;
++
++ setup_timer(&hdmi->jitter_timer, hotplug_work_launch, (unsigned long)hdmi);
++ INIT_WORK(&hdmi->hotplug_work, hotplug_worker);
++ INIT_DELAYED_WORK(&hdmi->hdcp_hdp_work, hdcp_hdp_worker);
++
++ /* Configure registers related to HDMI interrupt
++ * generation before registering IRQ. */
++ hdmi_writeb(hdmi->plug_mask, HDMI_PHY_POL0);
++
++ /* Clear Hotplug interrupts */
++ hdmi_writeb(hdmi->plug_event, HDMI_IH_PHY_STAT0);
++
++ hdmi->nb.notifier_call = mxc_hdmi_fb_event;
++ ret = fb_register_client(&hdmi->nb);
++ if (ret < 0)
++ goto efbclient;
++
++ memset(&hdmi->hdmi_data, 0, sizeof(struct hdmi_data_info));
++
++ /* Default HDMI working in RGB mode */
++ hdmi->hdmi_data.rgb_out_enable = true;
++
++ if (!strcasecmp(rgb_quant_range, "limited")) {
++ hdmi->hdmi_data.rgb_quant_range = HDMI_FC_AVICONF2_RGB_QUANT_LIMITED_RANGE;
++ } else if (!strcasecmp(rgb_quant_range, "full")) {
++ hdmi->hdmi_data.rgb_quant_range = HDMI_FC_AVICONF2_RGB_QUANT_FULL_RANGE;
++ } else {
++ hdmi->hdmi_data.rgb_quant_range = HDMI_FC_AVICONF2_RGB_QUANT_DEFAULT;
++ }
++
++ ret = devm_request_irq(&hdmi->pdev->dev, irq, mxc_hdmi_hotplug, IRQF_SHARED,
++ dev_name(&hdmi->pdev->dev), hdmi);
++ if (ret < 0) {
++ dev_err(&hdmi->pdev->dev,
++ "Unable to request irq: %d\n", ret);
++ goto ereqirq;
++ }
++
++ ret = device_create_file(&hdmi->pdev->dev, &dev_attr_fb_name);
++ if (ret < 0)
++ dev_warn(&hdmi->pdev->dev,
++ "cound not create sys node for fb name\n");
++ ret = device_create_file(&hdmi->pdev->dev, &dev_attr_cable_state);
++ if (ret < 0)
++ dev_warn(&hdmi->pdev->dev,
++ "cound not create sys node for cable state\n");
++ ret = device_create_file(&hdmi->pdev->dev, &dev_attr_edid);
++ if (ret < 0)
++ dev_warn(&hdmi->pdev->dev,
++ "cound not create sys node for edid\n");
++
++ ret = device_create_file(&hdmi->pdev->dev, &dev_attr_rgb_out_enable);
++ if (ret < 0)
++ dev_warn(&hdmi->pdev->dev,
++ "cound not create sys node for rgb out enable\n");
++
++ ret = device_create_file(&hdmi->pdev->dev, &dev_attr_rgb_quant_range);
++ if (ret < 0)
++ dev_warn(&hdmi->pdev->dev,
++ "cound not create sys node for rgb quant range\n");
++
++ ret = device_create_file(&hdmi->pdev->dev, &dev_attr_hdcp_enable);
++ if (ret < 0)
++ dev_warn(&hdmi->pdev->dev,
++ "cound not create sys node for hdcp enable\n");
++
++ dev_dbg(&hdmi->pdev->dev, "%s exit\n", __func__);
++
++ hdmi_inited = true;
++
++ return ret;
++
++efbclient:
++ free_irq(irq, hdmi);
++ereqirq:
++ clk_disable_unprepare(hdmi->hdmi_iahb_clk);
++erate2:
++ clk_put(hdmi->hdmi_iahb_clk);
++egetclk2:
++ clk_disable_unprepare(hdmi->hdmi_isfr_clk);
++erate1:
++ clk_put(hdmi->hdmi_isfr_clk);
++egetclk1:
++ dev_dbg(&hdmi->pdev->dev, "%s error exit\n", __func__);
++
++ return ret;
++}
++
++static void mxc_hdmi_disp_deinit(struct mxc_dispdrv_handle *disp)
++{
++ struct mxc_hdmi *hdmi = mxc_dispdrv_getdata(disp);
++
++ dev_dbg(&hdmi->pdev->dev, "%s\n", __func__);
++
++ fb_unregister_client(&hdmi->nb);
++
++ clk_disable_unprepare(hdmi->hdmi_isfr_clk);
++ clk_put(hdmi->hdmi_isfr_clk);
++ clk_disable_unprepare(hdmi->hdmi_iahb_clk);
++ clk_put(hdmi->hdmi_iahb_clk);
++
++ platform_device_unregister(hdmi->pdev);
++
++ hdmi_inited = false;
++}
++
++static struct mxc_dispdrv_driver mxc_hdmi_drv = {
++ .name = DISPDRV_HDMI,
++ .init = mxc_hdmi_disp_init,
++ .deinit = mxc_hdmi_disp_deinit,
++ .enable = mxc_hdmi_power_on,
++ .disable = mxc_hdmi_power_off,
++};
++
++
++static int mxc_hdmi_open(struct inode *inode, struct file *file)
++{
++ return 0;
++}
++
++static long mxc_hdmi_ioctl(struct file *file,
++ unsigned int cmd, unsigned long arg)
++{
++ int __user *argp = (void __user *)arg;
++ int ret = 0;
++
++ switch (cmd) {
++ case HDMI_IOC_GET_RESOURCE:
++ ret = copy_to_user(argp, &g_hdmi->hdmi_data,
++ sizeof(g_hdmi->hdmi_data)) ? -EFAULT : 0;
++ break;
++ case HDMI_IOC_GET_CPU_TYPE:
++ *argp = g_hdmi->cpu_type;
++ break;
++ default:
++ pr_debug("Unsupport cmd %d\n", cmd);
++ break;
++ }
++ return ret;
++}
++
++static int mxc_hdmi_release(struct inode *inode, struct file *file)
++{
++ return 0;
++}
++
++static const struct file_operations mxc_hdmi_fops = {
++ .owner = THIS_MODULE,
++ .open = mxc_hdmi_open,
++ .release = mxc_hdmi_release,
++ .unlocked_ioctl = mxc_hdmi_ioctl,
++};
++
++
++static int mxc_hdmi_probe(struct platform_device *pdev)
++{
++ struct mxc_hdmi *hdmi;
++ struct device *temp_class;
++ struct resource *res;
++ int ret = 0;
++
++ /* Check I2C driver is loaded and available
++ * check hdcp function is enable by dts */
++ hdmi_hdcp_get_property(pdev);
++ if (!hdmi_i2c && !hdcp_init)
++ return -ENODEV;
++
++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++ if (!res)
++ return -ENOENT;
++
++ hdmi = devm_kzalloc(&pdev->dev,
++ sizeof(struct mxc_hdmi),
++ GFP_KERNEL);
++ if (!hdmi) {
++ dev_err(&pdev->dev, "Cannot allocate device data\n");
++ ret = -ENOMEM;
++ goto ealloc;
++ }
++ g_hdmi = hdmi;
++
++ hdmi_major = register_chrdev(hdmi_major, "mxc_hdmi", &mxc_hdmi_fops);
++ if (hdmi_major < 0) {
++ printk(KERN_ERR "HDMI: unable to get a major for HDMI\n");
++ ret = -EBUSY;
++ goto ealloc;
++ }
++
++ hdmi_class = class_create(THIS_MODULE, "mxc_hdmi");
++ if (IS_ERR(hdmi_class)) {
++ ret = PTR_ERR(hdmi_class);
++ goto err_out_chrdev;
++ }
++
++ temp_class = device_create(hdmi_class, NULL, MKDEV(hdmi_major, 0),
++ NULL, "mxc_hdmi");
++ if (IS_ERR(temp_class)) {
++ ret = PTR_ERR(temp_class);
++ goto err_out_class;
++ }
++
++ hdmi->pdev = pdev;
++
++ hdmi->core_pdev = platform_device_alloc("mxc_hdmi_core", -1);
++ if (!hdmi->core_pdev) {
++ pr_err("%s failed platform_device_alloc for hdmi core\n",
++ __func__);
++ ret = -ENOMEM;
++ goto ecore;
++ }
++
++ hdmi->gpr_base = ioremap(res->start, resource_size(res));
++ if (!hdmi->gpr_base) {
++ dev_err(&pdev->dev, "ioremap failed\n");
++ ret = -ENOMEM;
++ goto eiomap;
++ }
++
++ hdmi->gpr_hdmi_base = hdmi->gpr_base + 3;
++ hdmi->gpr_sdma_base = hdmi->gpr_base;
++
++ hdmi_inited = false;
++
++ hdmi->disp_mxc_hdmi = mxc_dispdrv_register(&mxc_hdmi_drv);
++ if (IS_ERR(hdmi->disp_mxc_hdmi)) {
++ dev_err(&pdev->dev, "Failed to register dispdrv - 0x%x\n",
++ (int)hdmi->disp_mxc_hdmi);
++ ret = (int)hdmi->disp_mxc_hdmi;
++ goto edispdrv;
++ }
++ mxc_dispdrv_setdata(hdmi->disp_mxc_hdmi, hdmi);
++
++ platform_set_drvdata(pdev, hdmi);
++
++ return 0;
++edispdrv:
++ iounmap(hdmi->gpr_base);
++eiomap:
++ platform_device_put(hdmi->core_pdev);
++ecore:
++ kfree(hdmi);
++err_out_class:
++ device_destroy(hdmi_class, MKDEV(hdmi_major, 0));
++ class_destroy(hdmi_class);
++err_out_chrdev:
++ unregister_chrdev(hdmi_major, "mxc_hdmi");
++ealloc:
++ return ret;
++}
++
++static int mxc_hdmi_remove(struct platform_device *pdev)
++{
++ struct mxc_hdmi *hdmi = platform_get_drvdata(pdev);
++ int irq = platform_get_irq(pdev, 0);
++
++ fb_unregister_client(&hdmi->nb);
++
++ mxc_dispdrv_puthandle(hdmi->disp_mxc_hdmi);
++ mxc_dispdrv_unregister(hdmi->disp_mxc_hdmi);
++ iounmap(hdmi->gpr_base);
++ /* No new work will be scheduled, wait for running ISR */
++ free_irq(irq, hdmi);
++ kfree(hdmi);
++ g_hdmi = NULL;
++
++ return 0;
++}
++
++static struct platform_driver mxc_hdmi_driver = {
++ .probe = mxc_hdmi_probe,
++ .remove = mxc_hdmi_remove,
++ .driver = {
++ .name = "mxc_hdmi",
++ .of_match_table = imx_hdmi_dt_ids,
++ .owner = THIS_MODULE,
++ },
++};
++
++static int __init mxc_hdmi_init(void)
++{
++ return platform_driver_register(&mxc_hdmi_driver);
++}
++module_init(mxc_hdmi_init);
++
++static void __exit mxc_hdmi_exit(void)
++{
++ if (hdmi_major > 0) {
++ device_destroy(hdmi_class, MKDEV(hdmi_major, 0));
++ class_destroy(hdmi_class);
++ unregister_chrdev(hdmi_major, "mxc_hdmi");
++ hdmi_major = 0;
++ }
++
++ platform_driver_unregister(&mxc_hdmi_driver);
++}
++module_exit(mxc_hdmi_exit);
++
++static int mxc_hdmi_i2c_probe(struct i2c_client *client,
++ const struct i2c_device_id *id)
++{
++ if (!i2c_check_functionality(client->adapter,
++ I2C_FUNC_SMBUS_BYTE | I2C_FUNC_I2C))
++ return -ENODEV;
++
++ hdmi_i2c = client;
++
++ return 0;
++}
++
++static int mxc_hdmi_i2c_remove(struct i2c_client *client)
++{
++ hdmi_i2c = NULL;
++ return 0;
++}
++
++static const struct of_device_id imx_hdmi_i2c_match[] = {
++ { .compatible = "fsl,imx6-hdmi-i2c", },
++ { /* sentinel */ }
++};
++
++static const struct i2c_device_id mxc_hdmi_i2c_id[] = {
++ { "mxc_hdmi_i2c", 0 },
++ {},
++};
++MODULE_DEVICE_TABLE(i2c, mxc_hdmi_i2c_id);
++
++static struct i2c_driver mxc_hdmi_i2c_driver = {
++ .driver = {
++ .name = "mxc_hdmi_i2c",
++ .of_match_table = imx_hdmi_i2c_match,
++ },
++ .probe = mxc_hdmi_i2c_probe,
++ .remove = mxc_hdmi_i2c_remove,
++ .id_table = mxc_hdmi_i2c_id,
++};
++
++static int __init mxc_hdmi_i2c_init(void)
++{
++ return i2c_add_driver(&mxc_hdmi_i2c_driver);
++}
++
++static void __exit mxc_hdmi_i2c_exit(void)
++{
++ i2c_del_driver(&mxc_hdmi_i2c_driver);
++}
++
++module_init(mxc_hdmi_i2c_init);
++module_exit(mxc_hdmi_i2c_exit);
++
++MODULE_AUTHOR("Freescale Semiconductor, Inc.");
+diff -Nur linux-3.14.36/drivers/video/mxc/mxc_ipuv3_fb.c linux-openelec/drivers/video/mxc/mxc_ipuv3_fb.c
+--- linux-3.14.36/drivers/video/mxc/mxc_ipuv3_fb.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/video/mxc/mxc_ipuv3_fb.c 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,2578 @@
++/*
++ * Copyright 2004-2014 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/*!
++ * @defgroup Framebuffer Framebuffer Driver for SDC and ADC.
++ */
++
++/*!
++ * @file mxcfb.c
++ *
++ * @brief MXC Frame buffer driver for SDC
++ *
++ * @ingroup Framebuffer
++ */
++
++/*!
++ * Include files
++ */
++#include <linux/clk.h>
++#include <linux/console.h>
++#include <linux/delay.h>
++#include <linux/dma-mapping.h>
++#include <linux/errno.h>
++#include <linux/fb.h>
++#include <linux/fsl_devices.h>
++#include <linux/init.h>
++#include <linux/interrupt.h>
++#include <linux/io.h>
++#include <linux/ioport.h>
++#include <linux/ipu.h>
++#include <linux/ipu-v3.h>
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/mxcfb.h>
++#include <linux/of_device.h>
++#include <linux/platform_device.h>
++#include <linux/sched.h>
++#include <linux/slab.h>
++#include <linux/string.h>
++#include <linux/uaccess.h>
++
++#include "mxc_dispdrv.h"
++
++/*
++ * Driver name
++ */
++#define MXCFB_NAME "mxc_sdc_fb"
++
++/* Display port number */
++#define MXCFB_PORT_NUM 2
++/*!
++ * Structure containing the MXC specific framebuffer information.
++ */
++struct mxcfb_info {
++ int default_bpp;
++ int cur_blank;
++ int next_blank;
++ ipu_channel_t ipu_ch;
++ int ipu_id;
++ int ipu_di;
++ u32 ipu_di_pix_fmt;
++ bool ipu_int_clk;
++ bool overlay;
++ bool alpha_chan_en;
++ bool late_init;
++ bool first_set_par;
++ dma_addr_t alpha_phy_addr0;
++ dma_addr_t alpha_phy_addr1;
++ void *alpha_virt_addr0;
++ void *alpha_virt_addr1;
++ uint32_t alpha_mem_len;
++ uint32_t ipu_ch_irq;
++ uint32_t ipu_ch_nf_irq;
++ uint32_t ipu_alp_ch_irq;
++ uint32_t cur_ipu_buf;
++ uint32_t cur_ipu_alpha_buf;
++
++ u32 pseudo_palette[16];
++
++ bool mode_found;
++ struct completion flip_complete;
++ struct completion alpha_flip_complete;
++ struct completion vsync_complete;
++
++ void *ipu;
++ struct fb_info *ovfbi;
++
++ struct mxc_dispdrv_handle *dispdrv;
++
++ struct fb_var_screeninfo cur_var;
++};
++
++struct mxcfb_pfmt {
++ u32 fb_pix_fmt;
++ int bpp;
++ struct fb_bitfield red;
++ struct fb_bitfield green;
++ struct fb_bitfield blue;
++ struct fb_bitfield transp;
++};
++
++static const struct mxcfb_pfmt mxcfb_pfmts[] = {
++ /* pixel bpp red green blue transp */
++ {IPU_PIX_FMT_RGB565, 16, {11, 5, 0}, { 5, 6, 0}, { 0, 5, 0}, { 0, 0, 0} },
++ {IPU_PIX_FMT_RGB24, 24, { 0, 8, 0}, { 8, 8, 0}, {16, 8, 0}, { 0, 0, 0} },
++ {IPU_PIX_FMT_BGR24, 24, {16, 8, 0}, { 8, 8, 0}, { 0, 8, 0}, { 0, 0, 0} },
++ {IPU_PIX_FMT_RGB32, 32, { 0, 8, 0}, { 8, 8, 0}, {16, 8, 0}, {24, 8, 0} },
++ {IPU_PIX_FMT_BGR32, 32, {16, 8, 0}, { 8, 8, 0}, { 0, 8, 0}, {24, 8, 0} },
++ {IPU_PIX_FMT_ABGR32, 32, {24, 8, 0}, {16, 8, 0}, { 8, 8, 0}, { 0, 8, 0} },
++};
++
++struct mxcfb_alloc_list {
++ struct list_head list;
++ dma_addr_t phy_addr;
++ void *cpu_addr;
++ u32 size;
++};
++
++enum {
++ BOTH_ON,
++ SRC_ON,
++ TGT_ON,
++ BOTH_OFF
++};
++
++static bool g_dp_in_use[2];
++LIST_HEAD(fb_alloc_list);
++
++/* Return default standard(RGB) pixel format */
++static uint32_t bpp_to_pixfmt(int bpp)
++{
++ uint32_t pixfmt = 0;
++
++ switch (bpp) {
++ case 24:
++ pixfmt = IPU_PIX_FMT_BGR24;
++ break;
++ case 32:
++ pixfmt = IPU_PIX_FMT_BGR32;
++ break;
++ case 16:
++ pixfmt = IPU_PIX_FMT_RGB565;
++ break;
++ }
++ return pixfmt;
++}
++
++static inline int bitfield_is_equal(struct fb_bitfield f1,
++ struct fb_bitfield f2)
++{
++ return !memcmp(&f1, &f2, sizeof(f1));
++}
++
++static int pixfmt_to_var(uint32_t pixfmt, struct fb_var_screeninfo *var)
++{
++ int i, ret = -1;
++
++ for (i = 0; i < ARRAY_SIZE(mxcfb_pfmts); i++) {
++ if (pixfmt == mxcfb_pfmts[i].fb_pix_fmt) {
++ var->red = mxcfb_pfmts[i].red;
++ var->green = mxcfb_pfmts[i].green;
++ var->blue = mxcfb_pfmts[i].blue;
++ var->transp = mxcfb_pfmts[i].transp;
++ var->bits_per_pixel = mxcfb_pfmts[i].bpp;
++ ret = 0;
++ break;
++ }
++ }
++ return ret;
++}
++
++static int bpp_to_var(int bpp, struct fb_var_screeninfo *var)
++{
++ uint32_t pixfmt = 0;
++
++ pixfmt = bpp_to_pixfmt(bpp);
++ if (pixfmt)
++ return pixfmt_to_var(pixfmt, var);
++ else
++ return -1;
++}
++
++static int check_var_pixfmt(struct fb_var_screeninfo *var)
++{
++ int i, ret = -1;
++
++ for (i = 0; i < ARRAY_SIZE(mxcfb_pfmts); i++) {
++ if (bitfield_is_equal(var->red, mxcfb_pfmts[i].red) &&
++ bitfield_is_equal(var->green, mxcfb_pfmts[i].green) &&
++ bitfield_is_equal(var->blue, mxcfb_pfmts[i].blue) &&
++ bitfield_is_equal(var->transp, mxcfb_pfmts[i].transp) &&
++ var->bits_per_pixel == mxcfb_pfmts[i].bpp) {
++ ret = 0;
++ break;
++ }
++ }
++ return ret;
++}
++
++static uint32_t fbi_to_pixfmt(struct fb_info *fbi)
++{
++ int i;
++ uint32_t pixfmt = 0;
++
++ if (fbi->var.nonstd)
++ return fbi->var.nonstd;
++
++ for (i = 0; i < ARRAY_SIZE(mxcfb_pfmts); i++) {
++ if (bitfield_is_equal(fbi->var.red, mxcfb_pfmts[i].red) &&
++ bitfield_is_equal(fbi->var.green, mxcfb_pfmts[i].green) &&
++ bitfield_is_equal(fbi->var.blue, mxcfb_pfmts[i].blue) &&
++ bitfield_is_equal(fbi->var.transp, mxcfb_pfmts[i].transp)) {
++ pixfmt = mxcfb_pfmts[i].fb_pix_fmt;
++ break;
++ }
++ }
++
++ if (pixfmt == 0)
++ dev_err(fbi->device, "cannot get pixel format\n");
++
++ return pixfmt;
++}
++
++static struct fb_info *found_registered_fb(ipu_channel_t ipu_ch, int ipu_id)
++{
++ int i;
++ struct mxcfb_info *mxc_fbi;
++ struct fb_info *fbi = NULL;
++
++ for (i = 0; i < num_registered_fb; i++) {
++ mxc_fbi =
++ ((struct mxcfb_info *)(registered_fb[i]->par));
++
++ if ((mxc_fbi->ipu_ch == ipu_ch) &&
++ (mxc_fbi->ipu_id == ipu_id)) {
++ fbi = registered_fb[i];
++ break;
++ }
++ }
++ return fbi;
++}
++
++static irqreturn_t mxcfb_irq_handler(int irq, void *dev_id);
++static irqreturn_t mxcfb_nf_irq_handler(int irq, void *dev_id);
++static int mxcfb_blank(int blank, struct fb_info *info);
++static int mxcfb_map_video_memory(struct fb_info *fbi);
++static int mxcfb_unmap_video_memory(struct fb_info *fbi);
++
++/*
++ * Set fixed framebuffer parameters based on variable settings.
++ *
++ * @param info framebuffer information pointer
++ */
++static int mxcfb_set_fix(struct fb_info *info)
++{
++ struct fb_fix_screeninfo *fix = &info->fix;
++ struct fb_var_screeninfo *var = &info->var;
++
++ fix->line_length = var->xres_virtual * var->bits_per_pixel / 8;
++
++ fix->type = FB_TYPE_PACKED_PIXELS;
++ fix->accel = FB_ACCEL_NONE;
++ fix->visual = FB_VISUAL_TRUECOLOR;
++ fix->xpanstep = 1;
++ fix->ywrapstep = 1;
++ fix->ypanstep = 1;
++
++ return 0;
++}
++
++static int _setup_disp_channel1(struct fb_info *fbi)
++{
++ ipu_channel_params_t params;
++ struct mxcfb_info *mxc_fbi = (struct mxcfb_info *)fbi->par;
++
++ memset(&params, 0, sizeof(params));
++
++ if (mxc_fbi->ipu_ch == MEM_DC_SYNC) {
++ params.mem_dc_sync.di = mxc_fbi->ipu_di;
++ if (fbi->var.vmode & FB_VMODE_INTERLACED)
++ params.mem_dc_sync.interlaced = true;
++ params.mem_dc_sync.out_pixel_fmt = mxc_fbi->ipu_di_pix_fmt;
++ params.mem_dc_sync.in_pixel_fmt = fbi_to_pixfmt(fbi);
++ } else {
++ params.mem_dp_bg_sync.di = mxc_fbi->ipu_di;
++ if (fbi->var.vmode & FB_VMODE_INTERLACED)
++ params.mem_dp_bg_sync.interlaced = true;
++ params.mem_dp_bg_sync.out_pixel_fmt = mxc_fbi->ipu_di_pix_fmt;
++ params.mem_dp_bg_sync.in_pixel_fmt = fbi_to_pixfmt(fbi);
++ if (mxc_fbi->alpha_chan_en)
++ params.mem_dp_bg_sync.alpha_chan_en = true;
++ }
++ ipu_init_channel(mxc_fbi->ipu, mxc_fbi->ipu_ch, &params);
++
++ return 0;
++}
++
++static int _setup_disp_channel2(struct fb_info *fbi)
++{
++ int retval = 0;
++ struct mxcfb_info *mxc_fbi = (struct mxcfb_info *)fbi->par;
++ int fb_stride;
++ unsigned long base;
++ unsigned int fr_xoff, fr_yoff, fr_w, fr_h;
++
++ switch (fbi_to_pixfmt(fbi)) {
++ case IPU_PIX_FMT_YUV420P2:
++ case IPU_PIX_FMT_YVU420P:
++ case IPU_PIX_FMT_NV12:
++ case IPU_PIX_FMT_YUV422P:
++ case IPU_PIX_FMT_YVU422P:
++ case IPU_PIX_FMT_YUV420P:
++ case IPU_PIX_FMT_YUV444P:
++ fb_stride = fbi->var.xres_virtual;
++ break;
++ default:
++ fb_stride = fbi->fix.line_length;
++ }
++
++ base = fbi->fix.smem_start;
++ fr_xoff = fbi->var.xoffset;
++ fr_w = fbi->var.xres_virtual;
++ if (!(fbi->var.vmode & FB_VMODE_YWRAP)) {
++ dev_dbg(fbi->device, "Y wrap disabled\n");
++ fr_yoff = fbi->var.yoffset % fbi->var.yres;
++ fr_h = fbi->var.yres;
++ base += fbi->fix.line_length * fbi->var.yres *
++ (fbi->var.yoffset / fbi->var.yres);
++ } else {
++ dev_dbg(fbi->device, "Y wrap enabled\n");
++ fr_yoff = fbi->var.yoffset;
++ fr_h = fbi->var.yres_virtual;
++ }
++ base += fr_yoff * fb_stride + fr_xoff;
++
++ mxc_fbi->cur_ipu_buf = 2;
++ init_completion(&mxc_fbi->flip_complete);
++ /*
++ * We don't need to wait for vsync at the first time
++ * we do pan display after fb is initialized, as IPU will
++ * switch to the newly selected buffer automatically,
++ * so we call complete() for both mxc_fbi->flip_complete
++ * and mxc_fbi->alpha_flip_complete.
++ */
++ complete(&mxc_fbi->flip_complete);
++ if (mxc_fbi->alpha_chan_en) {
++ mxc_fbi->cur_ipu_alpha_buf = 1;
++ init_completion(&mxc_fbi->alpha_flip_complete);
++ complete(&mxc_fbi->alpha_flip_complete);
++ }
++
++ retval = ipu_init_channel_buffer(mxc_fbi->ipu,
++ mxc_fbi->ipu_ch, IPU_INPUT_BUFFER,
++ fbi_to_pixfmt(fbi),
++ fbi->var.xres, fbi->var.yres,
++ fb_stride,
++ fbi->var.rotate,
++ base,
++ base,
++ fbi->var.accel_flags &
++ FB_ACCEL_DOUBLE_FLAG ? 0 : base,
++ 0, 0);
++ if (retval) {
++ dev_err(fbi->device,
++ "ipu_init_channel_buffer error %d\n", retval);
++ return retval;
++ }
++
++ /* update u/v offset */
++ ipu_update_channel_offset(mxc_fbi->ipu, mxc_fbi->ipu_ch,
++ IPU_INPUT_BUFFER,
++ fbi_to_pixfmt(fbi),
++ fr_w,
++ fr_h,
++ fr_w,
++ 0, 0,
++ fr_yoff,
++ fr_xoff);
++
++ if (mxc_fbi->alpha_chan_en) {
++ retval = ipu_init_channel_buffer(mxc_fbi->ipu,
++ mxc_fbi->ipu_ch,
++ IPU_ALPHA_IN_BUFFER,
++ IPU_PIX_FMT_GENERIC,
++ fbi->var.xres, fbi->var.yres,
++ fbi->var.xres,
++ fbi->var.rotate,
++ mxc_fbi->alpha_phy_addr1,
++ mxc_fbi->alpha_phy_addr0,
++ 0,
++ 0, 0);
++ if (retval) {
++ dev_err(fbi->device,
++ "ipu_init_channel_buffer error %d\n", retval);
++ return retval;
++ }
++ }
++
++ return retval;
++}
++
++static bool mxcfb_need_to_set_par(struct fb_info *fbi)
++{
++ struct mxcfb_info *mxc_fbi = fbi->par;
++
++ if ((fbi->var.activate & FB_ACTIVATE_FORCE) &&
++ (fbi->var.activate & FB_ACTIVATE_MASK) == FB_ACTIVATE_NOW)
++ return true;
++
++ /*
++ * Ignore xoffset and yoffset update,
++ * because pan display handles this case.
++ */
++ mxc_fbi->cur_var.xoffset = fbi->var.xoffset;
++ mxc_fbi->cur_var.yoffset = fbi->var.yoffset;
++
++ return !!memcmp(&mxc_fbi->cur_var, &fbi->var,
++ sizeof(struct fb_var_screeninfo));
++}
++
++/*
++ * Set framebuffer parameters and change the operating mode.
++ *
++ * @param info framebuffer information pointer
++ */
++static int mxcfb_set_par(struct fb_info *fbi)
++{
++ int retval = 0;
++ u32 mem_len, alpha_mem_len;
++ ipu_di_signal_cfg_t sig_cfg;
++ struct mxcfb_info *mxc_fbi = (struct mxcfb_info *)fbi->par;
++
++ int16_t ov_pos_x = 0, ov_pos_y = 0;
++ int ov_pos_ret = 0;
++ struct mxcfb_info *mxc_fbi_fg = NULL;
++ bool ovfbi_enable = false;
++
++ if (ipu_ch_param_bad_alpha_pos(fbi_to_pixfmt(fbi)) &&
++ mxc_fbi->alpha_chan_en) {
++ dev_err(fbi->device, "Bad pixel format for "
++ "graphics plane fb\n");
++ return -EINVAL;
++ }
++
++ if (mxc_fbi->ovfbi)
++ mxc_fbi_fg = (struct mxcfb_info *)mxc_fbi->ovfbi->par;
++
++ if (mxc_fbi->ovfbi && mxc_fbi_fg)
++ if (mxc_fbi_fg->next_blank == FB_BLANK_UNBLANK)
++ ovfbi_enable = true;
++
++ if (!mxcfb_need_to_set_par(fbi))
++ return 0;
++
++ dev_dbg(fbi->device, "Reconfiguring framebuffer\n");
++
++ if (fbi->var.xres == 0 || fbi->var.yres == 0)
++ return 0;
++
++ if (ovfbi_enable) {
++ ov_pos_ret = ipu_disp_get_window_pos(
++ mxc_fbi_fg->ipu, mxc_fbi_fg->ipu_ch,
++ &ov_pos_x, &ov_pos_y);
++ if (ov_pos_ret < 0)
++ dev_err(fbi->device, "Get overlay pos failed, dispdrv:%s.\n",
++ mxc_fbi->dispdrv->drv->name);
++
++ ipu_clear_irq(mxc_fbi_fg->ipu, mxc_fbi_fg->ipu_ch_irq);
++ ipu_disable_irq(mxc_fbi_fg->ipu, mxc_fbi_fg->ipu_ch_irq);
++ ipu_clear_irq(mxc_fbi_fg->ipu, mxc_fbi_fg->ipu_ch_nf_irq);
++ ipu_disable_irq(mxc_fbi_fg->ipu, mxc_fbi_fg->ipu_ch_nf_irq);
++ ipu_disable_channel(mxc_fbi_fg->ipu, mxc_fbi_fg->ipu_ch, true);
++ ipu_uninit_channel(mxc_fbi_fg->ipu, mxc_fbi_fg->ipu_ch);
++ }
++
++ ipu_clear_irq(mxc_fbi->ipu, mxc_fbi->ipu_ch_irq);
++ ipu_disable_irq(mxc_fbi->ipu, mxc_fbi->ipu_ch_irq);
++ ipu_clear_irq(mxc_fbi->ipu, mxc_fbi->ipu_ch_nf_irq);
++ ipu_disable_irq(mxc_fbi->ipu, mxc_fbi->ipu_ch_nf_irq);
++ ipu_disable_channel(mxc_fbi->ipu, mxc_fbi->ipu_ch, true);
++ ipu_uninit_channel(mxc_fbi->ipu, mxc_fbi->ipu_ch);
++
++ /*
++ * Disable IPU hsp clock if it is enabled for an
++ * additional time in ipu common driver.
++ */
++ if (mxc_fbi->first_set_par && mxc_fbi->late_init)
++ ipu_disable_hsp_clk(mxc_fbi->ipu);
++
++ mxcfb_set_fix(fbi);
++
++ mem_len = fbi->var.yres_virtual * fbi->fix.line_length;
++ if (!fbi->fix.smem_start || (mem_len > fbi->fix.smem_len)) {
++ if (fbi->fix.smem_start)
++ mxcfb_unmap_video_memory(fbi);
++
++ if (mxcfb_map_video_memory(fbi) < 0)
++ return -ENOMEM;
++ }
++
++ if (mxc_fbi->first_set_par) {
++ /*
++ * Clear the screen in case uboot fb pixel format is not
++ * the same to kernel fb pixel format.
++ */
++ if (mxc_fbi->late_init)
++ memset((char *)fbi->screen_base, 0, fbi->fix.smem_len);
++
++ mxc_fbi->first_set_par = false;
++ }
++
++ if (mxc_fbi->alpha_chan_en) {
++ alpha_mem_len = fbi->var.xres * fbi->var.yres;
++ if ((!mxc_fbi->alpha_phy_addr0 && !mxc_fbi->alpha_phy_addr1) ||
++ (alpha_mem_len > mxc_fbi->alpha_mem_len)) {
++ if (mxc_fbi->alpha_phy_addr0)
++ dma_free_coherent(fbi->device,
++ mxc_fbi->alpha_mem_len,
++ mxc_fbi->alpha_virt_addr0,
++ mxc_fbi->alpha_phy_addr0);
++ if (mxc_fbi->alpha_phy_addr1)
++ dma_free_coherent(fbi->device,
++ mxc_fbi->alpha_mem_len,
++ mxc_fbi->alpha_virt_addr1,
++ mxc_fbi->alpha_phy_addr1);
++
++ mxc_fbi->alpha_virt_addr0 =
++ dma_alloc_coherent(fbi->device,
++ alpha_mem_len,
++ &mxc_fbi->alpha_phy_addr0,
++ GFP_DMA | GFP_KERNEL);
++
++ mxc_fbi->alpha_virt_addr1 =
++ dma_alloc_coherent(fbi->device,
++ alpha_mem_len,
++ &mxc_fbi->alpha_phy_addr1,
++ GFP_DMA | GFP_KERNEL);
++ if (mxc_fbi->alpha_virt_addr0 == NULL ||
++ mxc_fbi->alpha_virt_addr1 == NULL) {
++ dev_err(fbi->device, "mxcfb: dma alloc for"
++ " alpha buffer failed.\n");
++ if (mxc_fbi->alpha_virt_addr0)
++ dma_free_coherent(fbi->device,
++ mxc_fbi->alpha_mem_len,
++ mxc_fbi->alpha_virt_addr0,
++ mxc_fbi->alpha_phy_addr0);
++ if (mxc_fbi->alpha_virt_addr1)
++ dma_free_coherent(fbi->device,
++ mxc_fbi->alpha_mem_len,
++ mxc_fbi->alpha_virt_addr1,
++ mxc_fbi->alpha_phy_addr1);
++ return -ENOMEM;
++ }
++ mxc_fbi->alpha_mem_len = alpha_mem_len;
++ }
++ }
++
++ if (mxc_fbi->next_blank != FB_BLANK_UNBLANK)
++ return retval;
++
++ if (mxc_fbi->dispdrv && mxc_fbi->dispdrv->drv->setup) {
++ retval = mxc_fbi->dispdrv->drv->setup(mxc_fbi->dispdrv, fbi);
++ if (retval < 0) {
++ dev_err(fbi->device, "setup error, dispdrv:%s.\n",
++ mxc_fbi->dispdrv->drv->name);
++ return -EINVAL;
++ }
++ }
++
++ _setup_disp_channel1(fbi);
++ if (ovfbi_enable)
++ _setup_disp_channel1(mxc_fbi->ovfbi);
++
++ if (!mxc_fbi->overlay) {
++ uint32_t out_pixel_fmt;
++
++ memset(&sig_cfg, 0, sizeof(sig_cfg));
++ if (fbi->var.vmode & FB_VMODE_INTERLACED)
++ sig_cfg.interlaced = true;
++ out_pixel_fmt = mxc_fbi->ipu_di_pix_fmt;
++ if (fbi->var.vmode & FB_VMODE_ODD_FLD_FIRST) /* PAL */
++ sig_cfg.odd_field_first = true;
++ if (mxc_fbi->ipu_int_clk)
++ sig_cfg.int_clk = true;
++ if (fbi->var.sync & FB_SYNC_HOR_HIGH_ACT)
++ sig_cfg.Hsync_pol = true;
++ if (fbi->var.sync & FB_SYNC_VERT_HIGH_ACT)
++ sig_cfg.Vsync_pol = true;
++ if (!(fbi->var.sync & FB_SYNC_CLK_LAT_FALL))
++ sig_cfg.clk_pol = true;
++ if (fbi->var.sync & FB_SYNC_DATA_INVERT)
++ sig_cfg.data_pol = true;
++ if (!(fbi->var.sync & FB_SYNC_OE_LOW_ACT))
++ sig_cfg.enable_pol = true;
++ if (fbi->var.sync & FB_SYNC_CLK_IDLE_EN)
++ sig_cfg.clkidle_en = true;
++
++ dev_dbg(fbi->device, "pixclock = %ul Hz\n",
++ (u32) (PICOS2KHZ(fbi->var.pixclock) * 1000UL));
++
++ if (ipu_init_sync_panel(mxc_fbi->ipu, mxc_fbi->ipu_di,
++ (PICOS2KHZ(fbi->var.pixclock)) * 1000UL,
++ fbi->var.xres, fbi->var.yres,
++ out_pixel_fmt,
++ fbi->var.left_margin,
++ fbi->var.hsync_len,
++ fbi->var.right_margin,
++ fbi->var.upper_margin,
++ fbi->var.vsync_len,
++ fbi->var.lower_margin,
++ 0, sig_cfg) != 0) {
++ dev_err(fbi->device,
++ "mxcfb: Error initializing panel.\n");
++ return -EINVAL;
++ }
++
++ fbi->mode =
++ (struct fb_videomode *)fb_match_mode(&fbi->var,
++ &fbi->modelist);
++
++ ipu_disp_set_window_pos(mxc_fbi->ipu, mxc_fbi->ipu_ch, 0, 0);
++ }
++
++ retval = _setup_disp_channel2(fbi);
++ if (retval) {
++ ipu_uninit_channel(mxc_fbi->ipu, mxc_fbi->ipu_ch);
++ return retval;
++ }
++
++ if (ovfbi_enable) {
++ if (ov_pos_ret >= 0)
++ ipu_disp_set_window_pos(
++ mxc_fbi_fg->ipu, mxc_fbi_fg->ipu_ch,
++ ov_pos_x, ov_pos_y);
++ retval = _setup_disp_channel2(mxc_fbi->ovfbi);
++ if (retval) {
++ ipu_uninit_channel(mxc_fbi_fg->ipu, mxc_fbi_fg->ipu_ch);
++ ipu_uninit_channel(mxc_fbi->ipu, mxc_fbi->ipu_ch);
++ return retval;
++ }
++ }
++
++ ipu_enable_channel(mxc_fbi->ipu, mxc_fbi->ipu_ch);
++ if (ovfbi_enable)
++ ipu_enable_channel(mxc_fbi_fg->ipu, mxc_fbi_fg->ipu_ch);
++
++ if (mxc_fbi->dispdrv && mxc_fbi->dispdrv->drv->enable) {
++ retval = mxc_fbi->dispdrv->drv->enable(mxc_fbi->dispdrv);
++ if (retval < 0) {
++ dev_err(fbi->device, "enable error, dispdrv:%s.\n",
++ mxc_fbi->dispdrv->drv->name);
++ return -EINVAL;
++ }
++ }
++
++ mxc_fbi->cur_var = fbi->var;
++
++ return retval;
++}
++
++static int _swap_channels(struct fb_info *fbi_from,
++ struct fb_info *fbi_to, bool both_on)
++{
++ int retval, tmp;
++ ipu_channel_t old_ch;
++ struct fb_info *ovfbi;
++ struct mxcfb_info *mxc_fbi_from = (struct mxcfb_info *)fbi_from->par;
++ struct mxcfb_info *mxc_fbi_to = (struct mxcfb_info *)fbi_to->par;
++
++ if (both_on) {
++ ipu_disable_channel(mxc_fbi_to->ipu, mxc_fbi_to->ipu_ch, true);
++ ipu_uninit_channel(mxc_fbi_to->ipu, mxc_fbi_to->ipu_ch);
++ }
++
++ /* switch the mxc fbi parameters */
++ old_ch = mxc_fbi_from->ipu_ch;
++ mxc_fbi_from->ipu_ch = mxc_fbi_to->ipu_ch;
++ mxc_fbi_to->ipu_ch = old_ch;
++ tmp = mxc_fbi_from->ipu_ch_irq;
++ mxc_fbi_from->ipu_ch_irq = mxc_fbi_to->ipu_ch_irq;
++ mxc_fbi_to->ipu_ch_irq = tmp;
++ tmp = mxc_fbi_from->ipu_ch_nf_irq;
++ mxc_fbi_from->ipu_ch_nf_irq = mxc_fbi_to->ipu_ch_nf_irq;
++ mxc_fbi_to->ipu_ch_nf_irq = tmp;
++ ovfbi = mxc_fbi_from->ovfbi;
++ mxc_fbi_from->ovfbi = mxc_fbi_to->ovfbi;
++ mxc_fbi_to->ovfbi = ovfbi;
++
++ _setup_disp_channel1(fbi_from);
++ retval = _setup_disp_channel2(fbi_from);
++ if (retval)
++ return retval;
++
++ /* switch between dp and dc, disable old idmac, enable new idmac */
++ retval = ipu_swap_channel(mxc_fbi_from->ipu, old_ch, mxc_fbi_from->ipu_ch);
++ ipu_uninit_channel(mxc_fbi_from->ipu, old_ch);
++
++ if (both_on) {
++ _setup_disp_channel1(fbi_to);
++ retval = _setup_disp_channel2(fbi_to);
++ if (retval)
++ return retval;
++ ipu_enable_channel(mxc_fbi_to->ipu, mxc_fbi_to->ipu_ch);
++ }
++
++ return retval;
++}
++
++static int swap_channels(struct fb_info *fbi_from)
++{
++ int i;
++ int swap_mode;
++ ipu_channel_t ch_to;
++ struct mxcfb_info *mxc_fbi_from = (struct mxcfb_info *)fbi_from->par;
++ struct fb_info *fbi_to = NULL;
++ struct mxcfb_info *mxc_fbi_to;
++
++ /* what's the target channel? */
++ if (mxc_fbi_from->ipu_ch == MEM_BG_SYNC)
++ ch_to = MEM_DC_SYNC;
++ else
++ ch_to = MEM_BG_SYNC;
++
++ fbi_to = found_registered_fb(ch_to, mxc_fbi_from->ipu_id);
++ if (!fbi_to)
++ return -1;
++ mxc_fbi_to = (struct mxcfb_info *)fbi_to->par;
++
++ ipu_clear_irq(mxc_fbi_from->ipu, mxc_fbi_from->ipu_ch_irq);
++ ipu_clear_irq(mxc_fbi_to->ipu, mxc_fbi_to->ipu_ch_irq);
++ ipu_free_irq(mxc_fbi_from->ipu, mxc_fbi_from->ipu_ch_irq, fbi_from);
++ ipu_free_irq(mxc_fbi_to->ipu, mxc_fbi_to->ipu_ch_irq, fbi_to);
++ ipu_clear_irq(mxc_fbi_from->ipu, mxc_fbi_from->ipu_ch_nf_irq);
++ ipu_clear_irq(mxc_fbi_to->ipu, mxc_fbi_to->ipu_ch_nf_irq);
++ ipu_free_irq(mxc_fbi_from->ipu, mxc_fbi_from->ipu_ch_nf_irq, fbi_from);
++ ipu_free_irq(mxc_fbi_to->ipu, mxc_fbi_to->ipu_ch_nf_irq, fbi_to);
++
++ if (mxc_fbi_from->cur_blank == FB_BLANK_UNBLANK) {
++ if (mxc_fbi_to->cur_blank == FB_BLANK_UNBLANK)
++ swap_mode = BOTH_ON;
++ else
++ swap_mode = SRC_ON;
++ } else {
++ if (mxc_fbi_to->cur_blank == FB_BLANK_UNBLANK)
++ swap_mode = TGT_ON;
++ else
++ swap_mode = BOTH_OFF;
++ }
++
++ switch (swap_mode) {
++ case BOTH_ON:
++ /* disable target->switch src->enable target */
++ _swap_channels(fbi_from, fbi_to, true);
++ break;
++ case SRC_ON:
++ /* just switch src */
++ _swap_channels(fbi_from, fbi_to, false);
++ break;
++ case TGT_ON:
++ /* just switch target */
++ _swap_channels(fbi_to, fbi_from, false);
++ break;
++ case BOTH_OFF:
++ /* switch directly, no more need to do */
++ mxc_fbi_to->ipu_ch = mxc_fbi_from->ipu_ch;
++ mxc_fbi_from->ipu_ch = ch_to;
++ i = mxc_fbi_from->ipu_ch_irq;
++ mxc_fbi_from->ipu_ch_irq = mxc_fbi_to->ipu_ch_irq;
++ mxc_fbi_to->ipu_ch_irq = i;
++ i = mxc_fbi_from->ipu_ch_nf_irq;
++ mxc_fbi_from->ipu_ch_nf_irq = mxc_fbi_to->ipu_ch_nf_irq;
++ mxc_fbi_to->ipu_ch_nf_irq = i;
++ break;
++ default:
++ break;
++ }
++
++ if (ipu_request_irq(mxc_fbi_from->ipu, mxc_fbi_from->ipu_ch_irq,
++ mxcfb_irq_handler, IPU_IRQF_ONESHOT,
++ MXCFB_NAME, fbi_from) != 0) {
++ dev_err(fbi_from->device, "Error registering irq %d\n",
++ mxc_fbi_from->ipu_ch_irq);
++ return -EBUSY;
++ }
++ ipu_disable_irq(mxc_fbi_from->ipu, mxc_fbi_from->ipu_ch_irq);
++ if (ipu_request_irq(mxc_fbi_to->ipu, mxc_fbi_to->ipu_ch_irq,
++ mxcfb_irq_handler, IPU_IRQF_ONESHOT,
++ MXCFB_NAME, fbi_to) != 0) {
++ dev_err(fbi_to->device, "Error registering irq %d\n",
++ mxc_fbi_to->ipu_ch_irq);
++ return -EBUSY;
++ }
++ ipu_disable_irq(mxc_fbi_to->ipu, mxc_fbi_to->ipu_ch_irq);
++ if (ipu_request_irq(mxc_fbi_from->ipu, mxc_fbi_from->ipu_ch_nf_irq,
++ mxcfb_nf_irq_handler, IPU_IRQF_ONESHOT,
++ MXCFB_NAME, fbi_from) != 0) {
++ dev_err(fbi_from->device, "Error registering irq %d\n",
++ mxc_fbi_from->ipu_ch_nf_irq);
++ return -EBUSY;
++ }
++ ipu_disable_irq(mxc_fbi_from->ipu, mxc_fbi_from->ipu_ch_nf_irq);
++ if (ipu_request_irq(mxc_fbi_to->ipu, mxc_fbi_to->ipu_ch_nf_irq,
++ mxcfb_nf_irq_handler, IPU_IRQF_ONESHOT,
++ MXCFB_NAME, fbi_to) != 0) {
++ dev_err(fbi_to->device, "Error registering irq %d\n",
++ mxc_fbi_to->ipu_ch_nf_irq);
++ return -EBUSY;
++ }
++ ipu_disable_irq(mxc_fbi_to->ipu, mxc_fbi_to->ipu_ch_nf_irq);
++
++ return 0;
++}
++
++/*
++ * Check framebuffer variable parameters and adjust to valid values.
++ *
++ * @param var framebuffer variable parameters
++ *
++ * @param info framebuffer information pointer
++ */
++static int mxcfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
++{
++ u32 vtotal;
++ u32 htotal;
++ struct mxcfb_info *mxc_fbi = (struct mxcfb_info *)info->par;
++
++
++ if (var->xres == 0 || var->yres == 0)
++ return 0;
++
++ /* fg should not bigger than bg */
++ if (mxc_fbi->ipu_ch == MEM_FG_SYNC) {
++ struct fb_info *fbi_tmp;
++ int bg_xres = 0, bg_yres = 0;
++ int16_t pos_x, pos_y;
++
++ bg_xres = var->xres;
++ bg_yres = var->yres;
++
++ fbi_tmp = found_registered_fb(MEM_BG_SYNC, mxc_fbi->ipu_id);
++ if (fbi_tmp) {
++ bg_xres = fbi_tmp->var.xres;
++ bg_yres = fbi_tmp->var.yres;
++ }
++
++ ipu_disp_get_window_pos(mxc_fbi->ipu, mxc_fbi->ipu_ch, &pos_x, &pos_y);
++
++ if ((var->xres + pos_x) > bg_xres)
++ var->xres = bg_xres - pos_x;
++ if ((var->yres + pos_y) > bg_yres)
++ var->yres = bg_yres - pos_y;
++ }
++
++ if (var->rotate > IPU_ROTATE_VERT_FLIP)
++ var->rotate = IPU_ROTATE_NONE;
++
++ if (var->xres_virtual < var->xres)
++ var->xres_virtual = var->xres;
++
++ if (var->yres_virtual < var->yres)
++ var->yres_virtual = var->yres * 3;
++
++ if ((var->bits_per_pixel != 32) && (var->bits_per_pixel != 24) &&
++ (var->bits_per_pixel != 16) && (var->bits_per_pixel != 12) &&
++ (var->bits_per_pixel != 8))
++ var->bits_per_pixel = 16;
++
++ if (check_var_pixfmt(var))
++ /* Fall back to default */
++ bpp_to_var(var->bits_per_pixel, var);
++
++ if (var->pixclock < 1000) {
++ htotal = var->xres + var->right_margin + var->hsync_len +
++ var->left_margin;
++ vtotal = var->yres + var->lower_margin + var->vsync_len +
++ var->upper_margin;
++ var->pixclock = (vtotal * htotal * 6UL) / 100UL;
++ var->pixclock = KHZ2PICOS(var->pixclock);
++ dev_dbg(info->device,
++ "pixclock set for 60Hz refresh = %u ps\n",
++ var->pixclock);
++ }
++
++ var->height = -1;
++ var->width = -1;
++ var->grayscale = 0;
++
++ return 0;
++}
++
++static inline u_int _chan_to_field(u_int chan, struct fb_bitfield *bf)
++{
++ chan &= 0xffff;
++ chan >>= 16 - bf->length;
++ return chan << bf->offset;
++}
++
++static int mxcfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
++ u_int trans, struct fb_info *fbi)
++{
++ unsigned int val;
++ int ret = 1;
++
++ /*
++ * If greyscale is true, then we convert the RGB value
++ * to greyscale no matter what visual we are using.
++ */
++ if (fbi->var.grayscale)
++ red = green = blue = (19595 * red + 38470 * green +
++ 7471 * blue) >> 16;
++ switch (fbi->fix.visual) {
++ case FB_VISUAL_TRUECOLOR:
++ /*
++ * 16-bit True Colour. We encode the RGB value
++ * according to the RGB bitfield information.
++ */
++ if (regno < 16) {
++ u32 *pal = fbi->pseudo_palette;
++
++ val = _chan_to_field(red, &fbi->var.red);
++ val |= _chan_to_field(green, &fbi->var.green);
++ val |= _chan_to_field(blue, &fbi->var.blue);
++
++ pal[regno] = val;
++ ret = 0;
++ }
++ break;
++
++ case FB_VISUAL_STATIC_PSEUDOCOLOR:
++ case FB_VISUAL_PSEUDOCOLOR:
++ break;
++ }
++
++ return ret;
++}
++
++/*
++ * Function to handle custom ioctls for MXC framebuffer.
++ *
++ * @param inode inode struct
++ *
++ * @param file file struct
++ *
++ * @param cmd Ioctl command to handle
++ *
++ * @param arg User pointer to command arguments
++ *
++ * @param fbi framebuffer information pointer
++ */
++static int mxcfb_ioctl(struct fb_info *fbi, unsigned int cmd, unsigned long arg)
++{
++ int retval = 0;
++ int __user *argp = (void __user *)arg;
++ struct mxcfb_info *mxc_fbi = (struct mxcfb_info *)fbi->par;
++
++ switch (cmd) {
++ case MXCFB_SET_GBL_ALPHA:
++ {
++ struct mxcfb_gbl_alpha ga;
++
++ if (copy_from_user(&ga, (void *)arg, sizeof(ga))) {
++ retval = -EFAULT;
++ break;
++ }
++
++ if (ipu_disp_set_global_alpha(mxc_fbi->ipu,
++ mxc_fbi->ipu_ch,
++ (bool)ga.enable,
++ ga.alpha)) {
++ retval = -EINVAL;
++ break;
++ }
++
++ if (ga.enable)
++ mxc_fbi->alpha_chan_en = false;
++
++ if (ga.enable)
++ dev_dbg(fbi->device,
++ "Set global alpha of %s to %d\n",
++ fbi->fix.id, ga.alpha);
++ break;
++ }
++ case MXCFB_SET_LOC_ALPHA:
++ {
++ struct mxcfb_loc_alpha la;
++ bool bad_pixfmt =
++ ipu_ch_param_bad_alpha_pos(fbi_to_pixfmt(fbi));
++
++ if (copy_from_user(&la, (void *)arg, sizeof(la))) {
++ retval = -EFAULT;
++ break;
++ }
++
++ if (la.enable && !la.alpha_in_pixel) {
++ struct fb_info *fbi_tmp;
++ ipu_channel_t ipu_ch;
++
++ if (bad_pixfmt) {
++ dev_err(fbi->device, "Bad pixel format "
++ "for graphics plane fb\n");
++ retval = -EINVAL;
++ break;
++ }
++
++ mxc_fbi->alpha_chan_en = true;
++
++ if (mxc_fbi->ipu_ch == MEM_FG_SYNC)
++ ipu_ch = MEM_BG_SYNC;
++ else if (mxc_fbi->ipu_ch == MEM_BG_SYNC)
++ ipu_ch = MEM_FG_SYNC;
++ else {
++ retval = -EINVAL;
++ break;
++ }
++
++ fbi_tmp = found_registered_fb(ipu_ch, mxc_fbi->ipu_id);
++ if (fbi_tmp)
++ ((struct mxcfb_info *)(fbi_tmp->par))->alpha_chan_en = false;
++ } else
++ mxc_fbi->alpha_chan_en = false;
++
++ if (ipu_disp_set_global_alpha(mxc_fbi->ipu,
++ mxc_fbi->ipu_ch,
++ !(bool)la.enable, 0)) {
++ retval = -EINVAL;
++ break;
++ }
++
++ fbi->var.activate = (fbi->var.activate & ~FB_ACTIVATE_MASK) |
++ FB_ACTIVATE_NOW | FB_ACTIVATE_FORCE;
++ mxcfb_set_par(fbi);
++
++ la.alpha_phy_addr0 = mxc_fbi->alpha_phy_addr0;
++ la.alpha_phy_addr1 = mxc_fbi->alpha_phy_addr1;
++ if (copy_to_user((void *)arg, &la, sizeof(la))) {
++ retval = -EFAULT;
++ break;
++ }
++
++ if (la.enable)
++ dev_dbg(fbi->device,
++ "Enable DP local alpha for %s\n",
++ fbi->fix.id);
++ break;
++ }
++ case MXCFB_SET_LOC_ALP_BUF:
++ {
++ unsigned long base;
++ uint32_t ipu_alp_ch_irq;
++
++ if (!(((mxc_fbi->ipu_ch == MEM_FG_SYNC) ||
++ (mxc_fbi->ipu_ch == MEM_BG_SYNC)) &&
++ (mxc_fbi->alpha_chan_en))) {
++ dev_err(fbi->device,
++ "Should use background or overlay "
++ "framebuffer to set the alpha buffer "
++ "number\n");
++ return -EINVAL;
++ }
++
++ if (get_user(base, argp))
++ return -EFAULT;
++
++ if (base != mxc_fbi->alpha_phy_addr0 &&
++ base != mxc_fbi->alpha_phy_addr1) {
++ dev_err(fbi->device,
++ "Wrong alpha buffer physical address "
++ "%lu\n", base);
++ return -EINVAL;
++ }
++
++ if (mxc_fbi->ipu_ch == MEM_FG_SYNC)
++ ipu_alp_ch_irq = IPU_IRQ_FG_ALPHA_SYNC_EOF;
++ else
++ ipu_alp_ch_irq = IPU_IRQ_BG_ALPHA_SYNC_EOF;
++
++ retval = wait_for_completion_timeout(
++ &mxc_fbi->alpha_flip_complete, HZ/2);
++ if (retval == 0) {
++ dev_err(fbi->device, "timeout when waiting for alpha flip irq\n");
++ retval = -ETIMEDOUT;
++ break;
++ }
++
++ mxc_fbi->cur_ipu_alpha_buf =
++ !mxc_fbi->cur_ipu_alpha_buf;
++ if (ipu_update_channel_buffer(mxc_fbi->ipu, mxc_fbi->ipu_ch,
++ IPU_ALPHA_IN_BUFFER,
++ mxc_fbi->
++ cur_ipu_alpha_buf,
++ base) == 0) {
++ ipu_select_buffer(mxc_fbi->ipu, mxc_fbi->ipu_ch,
++ IPU_ALPHA_IN_BUFFER,
++ mxc_fbi->cur_ipu_alpha_buf);
++ ipu_clear_irq(mxc_fbi->ipu, ipu_alp_ch_irq);
++ ipu_enable_irq(mxc_fbi->ipu, ipu_alp_ch_irq);
++ } else {
++ dev_err(fbi->device,
++ "Error updating %s SDC alpha buf %d "
++ "to address=0x%08lX\n",
++ fbi->fix.id,
++ mxc_fbi->cur_ipu_alpha_buf, base);
++ }
++ break;
++ }
++ case MXCFB_SET_CLR_KEY:
++ {
++ struct mxcfb_color_key key;
++ if (copy_from_user(&key, (void *)arg, sizeof(key))) {
++ retval = -EFAULT;
++ break;
++ }
++ retval = ipu_disp_set_color_key(mxc_fbi->ipu, mxc_fbi->ipu_ch,
++ key.enable,
++ key.color_key);
++ dev_dbg(fbi->device, "Set color key to 0x%08X\n",
++ key.color_key);
++ break;
++ }
++ case MXCFB_SET_GAMMA:
++ {
++ struct mxcfb_gamma gamma;
++ if (copy_from_user(&gamma, (void *)arg, sizeof(gamma))) {
++ retval = -EFAULT;
++ break;
++ }
++ retval = ipu_disp_set_gamma_correction(mxc_fbi->ipu,
++ mxc_fbi->ipu_ch,
++ gamma.enable,
++ gamma.constk,
++ gamma.slopek);
++ break;
++ }
++ case MXCFB_WAIT_FOR_VSYNC:
++ {
++ if (mxc_fbi->ipu_ch == MEM_FG_SYNC) {
++ /* BG should poweron */
++ struct mxcfb_info *bg_mxcfbi = NULL;
++ struct fb_info *fbi_tmp;
++
++ fbi_tmp = found_registered_fb(MEM_BG_SYNC, mxc_fbi->ipu_id);
++ if (fbi_tmp)
++ bg_mxcfbi = ((struct mxcfb_info *)(fbi_tmp->par));
++
++ if (!bg_mxcfbi) {
++ retval = -EINVAL;
++ break;
++ }
++ if (bg_mxcfbi->cur_blank != FB_BLANK_UNBLANK) {
++ retval = -EINVAL;
++ break;
++ }
++ }
++ if (mxc_fbi->cur_blank != FB_BLANK_UNBLANK) {
++ retval = -EINVAL;
++ break;
++ }
++
++ init_completion(&mxc_fbi->vsync_complete);
++ ipu_clear_irq(mxc_fbi->ipu, mxc_fbi->ipu_ch_nf_irq);
++ ipu_enable_irq(mxc_fbi->ipu, mxc_fbi->ipu_ch_nf_irq);
++ retval = wait_for_completion_interruptible_timeout(
++ &mxc_fbi->vsync_complete, 1 * HZ);
++ if (retval == 0) {
++ dev_err(fbi->device,
++ "MXCFB_WAIT_FOR_VSYNC: timeout %d\n",
++ retval);
++ retval = -ETIME;
++ } else if (retval > 0) {
++ retval = 0;
++ }
++ break;
++ }
++ case FBIO_ALLOC:
++ {
++ int size;
++ struct mxcfb_alloc_list *mem;
++
++ mem = kzalloc(sizeof(*mem), GFP_KERNEL);
++ if (mem == NULL)
++ return -ENOMEM;
++
++ if (get_user(size, argp))
++ return -EFAULT;
++
++ mem->size = PAGE_ALIGN(size);
++
++ mem->cpu_addr = dma_alloc_coherent(fbi->device, size,
++ &mem->phy_addr,
++ GFP_KERNEL);
++ if (mem->cpu_addr == NULL) {
++ kfree(mem);
++ return -ENOMEM;
++ }
++
++ list_add(&mem->list, &fb_alloc_list);
++
++ dev_dbg(fbi->device, "allocated %d bytes @ 0x%08X\n",
++ mem->size, mem->phy_addr);
++
++ if (put_user(mem->phy_addr, argp))
++ return -EFAULT;
++
++ break;
++ }
++ case FBIO_FREE:
++ {
++ unsigned long offset;
++ struct mxcfb_alloc_list *mem;
++
++ if (get_user(offset, argp))
++ return -EFAULT;
++
++ retval = -EINVAL;
++ list_for_each_entry(mem, &fb_alloc_list, list) {
++ if (mem->phy_addr == offset) {
++ list_del(&mem->list);
++ dma_free_coherent(fbi->device,
++ mem->size,
++ mem->cpu_addr,
++ mem->phy_addr);
++ kfree(mem);
++ retval = 0;
++ break;
++ }
++ }
++
++ break;
++ }
++ case MXCFB_SET_OVERLAY_POS:
++ {
++ struct mxcfb_pos pos;
++ struct fb_info *bg_fbi = NULL;
++ struct mxcfb_info *bg_mxcfbi = NULL;
++
++ if (mxc_fbi->ipu_ch != MEM_FG_SYNC) {
++ dev_err(fbi->device, "Should use the overlay "
++ "framebuffer to set the position of "
++ "the overlay window\n");
++ retval = -EINVAL;
++ break;
++ }
++
++ if (copy_from_user(&pos, (void *)arg, sizeof(pos))) {
++ retval = -EFAULT;
++ break;
++ }
++
++ bg_fbi = found_registered_fb(MEM_BG_SYNC, mxc_fbi->ipu_id);
++ if (bg_fbi)
++ bg_mxcfbi = ((struct mxcfb_info *)(bg_fbi->par));
++
++ if (bg_fbi == NULL) {
++ dev_err(fbi->device, "Cannot find the "
++ "background framebuffer\n");
++ retval = -ENOENT;
++ break;
++ }
++
++ /* if fb is unblank, check if the pos fit the display */
++ if (mxc_fbi->cur_blank == FB_BLANK_UNBLANK) {
++ if (fbi->var.xres + pos.x > bg_fbi->var.xres) {
++ if (bg_fbi->var.xres < fbi->var.xres)
++ pos.x = 0;
++ else
++ pos.x = bg_fbi->var.xres - fbi->var.xres;
++ }
++ if (fbi->var.yres + pos.y > bg_fbi->var.yres) {
++ if (bg_fbi->var.yres < fbi->var.yres)
++ pos.y = 0;
++ else
++ pos.y = bg_fbi->var.yres - fbi->var.yres;
++ }
++ }
++
++ retval = ipu_disp_set_window_pos(mxc_fbi->ipu, mxc_fbi->ipu_ch,
++ pos.x, pos.y);
++
++ if (copy_to_user((void *)arg, &pos, sizeof(pos))) {
++ retval = -EFAULT;
++ break;
++ }
++ break;
++ }
++ case MXCFB_GET_FB_IPU_CHAN:
++ {
++ struct mxcfb_info *mxc_fbi =
++ (struct mxcfb_info *)fbi->par;
++
++ if (put_user(mxc_fbi->ipu_ch, argp))
++ return -EFAULT;
++ break;
++ }
++ case MXCFB_GET_DIFMT:
++ {
++ struct mxcfb_info *mxc_fbi =
++ (struct mxcfb_info *)fbi->par;
++
++ if (put_user(mxc_fbi->ipu_di_pix_fmt, argp))
++ return -EFAULT;
++ break;
++ }
++ case MXCFB_GET_FB_IPU_DI:
++ {
++ struct mxcfb_info *mxc_fbi =
++ (struct mxcfb_info *)fbi->par;
++
++ if (put_user(mxc_fbi->ipu_di, argp))
++ return -EFAULT;
++ break;
++ }
++ case MXCFB_GET_FB_BLANK:
++ {
++ struct mxcfb_info *mxc_fbi =
++ (struct mxcfb_info *)fbi->par;
++
++ if (put_user(mxc_fbi->cur_blank, argp))
++ return -EFAULT;
++ break;
++ }
++ case MXCFB_SET_DIFMT:
++ {
++ struct mxcfb_info *mxc_fbi =
++ (struct mxcfb_info *)fbi->par;
++
++ if (get_user(mxc_fbi->ipu_di_pix_fmt, argp))
++ return -EFAULT;
++
++ break;
++ }
++ case MXCFB_CSC_UPDATE:
++ {
++ struct mxcfb_csc_matrix csc;
++
++ if (copy_from_user(&csc, (void *) arg, sizeof(csc)))
++ return -EFAULT;
++
++ if ((mxc_fbi->ipu_ch != MEM_FG_SYNC) &&
++ (mxc_fbi->ipu_ch != MEM_BG_SYNC) &&
++ (mxc_fbi->ipu_ch != MEM_BG_ASYNC0))
++ return -EFAULT;
++ ipu_set_csc_coefficients(mxc_fbi->ipu, mxc_fbi->ipu_ch,
++ csc.param);
++ }
++ default:
++ retval = -EINVAL;
++ }
++ return retval;
++}
++
++/*
++ * mxcfb_blank():
++ * Blank the display.
++ */
++static int mxcfb_blank(int blank, struct fb_info *info)
++{
++ struct mxcfb_info *mxc_fbi = (struct mxcfb_info *)info->par;
++ int ret = 0;
++
++ dev_dbg(info->device, "blank = %d\n", blank);
++
++ if (mxc_fbi->cur_blank == blank)
++ return 0;
++
++ mxc_fbi->next_blank = blank;
++
++ switch (blank) {
++ case FB_BLANK_POWERDOWN:
++ case FB_BLANK_VSYNC_SUSPEND:
++ case FB_BLANK_HSYNC_SUSPEND:
++ case FB_BLANK_NORMAL:
++ if (mxc_fbi->dispdrv && mxc_fbi->dispdrv->drv->disable)
++ mxc_fbi->dispdrv->drv->disable(mxc_fbi->dispdrv);
++ ipu_disable_channel(mxc_fbi->ipu, mxc_fbi->ipu_ch, true);
++ if (mxc_fbi->ipu_di >= 0)
++ ipu_uninit_sync_panel(mxc_fbi->ipu, mxc_fbi->ipu_di);
++ ipu_uninit_channel(mxc_fbi->ipu, mxc_fbi->ipu_ch);
++ break;
++ case FB_BLANK_UNBLANK:
++ info->var.activate = (info->var.activate & ~FB_ACTIVATE_MASK) |
++ FB_ACTIVATE_NOW | FB_ACTIVATE_FORCE;
++ ret = mxcfb_set_par(info);
++ break;
++ }
++ if (!ret)
++ mxc_fbi->cur_blank = blank;
++ return ret;
++}
++
++/*
++ * Pan or Wrap the Display
++ *
++ * This call looks only at xoffset, yoffset and the FB_VMODE_YWRAP flag
++ *
++ * @param var Variable screen buffer information
++ * @param info Framebuffer information pointer
++ */
++static int
++mxcfb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info)
++{
++ struct mxcfb_info *mxc_fbi = (struct mxcfb_info *)info->par,
++ *mxc_graphic_fbi = NULL;
++ u_int y_bottom;
++ unsigned int fr_xoff, fr_yoff, fr_w, fr_h;
++ unsigned long base, active_alpha_phy_addr = 0;
++ bool loc_alpha_en = false;
++ int fb_stride;
++ int i;
++ int ret;
++
++ /* no pan display during fb blank */
++ if (mxc_fbi->ipu_ch == MEM_FG_SYNC) {
++ struct mxcfb_info *bg_mxcfbi = NULL;
++ struct fb_info *fbi_tmp;
++
++ fbi_tmp = found_registered_fb(MEM_BG_SYNC, mxc_fbi->ipu_id);
++ if (fbi_tmp)
++ bg_mxcfbi = ((struct mxcfb_info *)(fbi_tmp->par));
++ if (!bg_mxcfbi)
++ return -EINVAL;
++ if (bg_mxcfbi->cur_blank != FB_BLANK_UNBLANK)
++ return -EINVAL;
++ }
++ if (mxc_fbi->cur_blank != FB_BLANK_UNBLANK)
++ return -EINVAL;
++
++ y_bottom = var->yoffset;
++
++ if (y_bottom > info->var.yres_virtual)
++ return -EINVAL;
++
++ switch (fbi_to_pixfmt(info)) {
++ case IPU_PIX_FMT_YUV420P2:
++ case IPU_PIX_FMT_YVU420P:
++ case IPU_PIX_FMT_NV12:
++ case IPU_PIX_FMT_YUV422P:
++ case IPU_PIX_FMT_YVU422P:
++ case IPU_PIX_FMT_YUV420P:
++ case IPU_PIX_FMT_YUV444P:
++ fb_stride = info->var.xres_virtual;
++ break;
++ default:
++ fb_stride = info->fix.line_length;
++ }
++
++ base = info->fix.smem_start;
++ fr_xoff = var->xoffset;
++ fr_w = info->var.xres_virtual;
++ if (!(var->vmode & FB_VMODE_YWRAP)) {
++ dev_dbg(info->device, "Y wrap disabled\n");
++ fr_yoff = var->yoffset % info->var.yres;
++ fr_h = info->var.yres;
++ base += info->fix.line_length * info->var.yres *
++ (var->yoffset / info->var.yres);
++ } else {
++ dev_dbg(info->device, "Y wrap enabled\n");
++ fr_yoff = var->yoffset;
++ fr_h = info->var.yres_virtual;
++ }
++ base += fr_yoff * fb_stride + fr_xoff;
++
++ /* Check if DP local alpha is enabled and find the graphic fb */
++ if (mxc_fbi->ipu_ch == MEM_BG_SYNC || mxc_fbi->ipu_ch == MEM_FG_SYNC) {
++ for (i = 0; i < num_registered_fb; i++) {
++ char bg_id[] = "DISP3 BG";
++ char fg_id[] = "DISP3 FG";
++ char *idstr = registered_fb[i]->fix.id;
++ bg_id[4] += mxc_fbi->ipu_id;
++ fg_id[4] += mxc_fbi->ipu_id;
++ if ((strcmp(idstr, bg_id) == 0 ||
++ strcmp(idstr, fg_id) == 0) &&
++ ((struct mxcfb_info *)
++ (registered_fb[i]->par))->alpha_chan_en) {
++ loc_alpha_en = true;
++ mxc_graphic_fbi = (struct mxcfb_info *)
++ (registered_fb[i]->par);
++ active_alpha_phy_addr =
++ mxc_fbi->cur_ipu_alpha_buf ?
++ mxc_graphic_fbi->alpha_phy_addr1 :
++ mxc_graphic_fbi->alpha_phy_addr0;
++ dev_dbg(info->device, "Updating SDC alpha "
++ "buf %d address=0x%08lX\n",
++ !mxc_fbi->cur_ipu_alpha_buf,
++ active_alpha_phy_addr);
++ break;
++ }
++ }
++ }
++
++ ret = wait_for_completion_timeout(&mxc_fbi->flip_complete, HZ/2);
++ if (ret == 0) {
++ dev_err(info->device, "timeout when waiting for flip irq\n");
++ return -ETIMEDOUT;
++ }
++
++ ++mxc_fbi->cur_ipu_buf;
++ mxc_fbi->cur_ipu_buf %= 3;
++ mxc_fbi->cur_ipu_alpha_buf = !mxc_fbi->cur_ipu_alpha_buf;
++
++ dev_dbg(info->device, "Updating SDC %s buf %d address=0x%08lX\n",
++ info->fix.id, mxc_fbi->cur_ipu_buf, base);
++
++ if (ipu_update_channel_buffer(mxc_fbi->ipu, mxc_fbi->ipu_ch, IPU_INPUT_BUFFER,
++ mxc_fbi->cur_ipu_buf, base) == 0) {
++ /* Update the DP local alpha buffer only for graphic plane */
++ if (loc_alpha_en && mxc_graphic_fbi == mxc_fbi &&
++ ipu_update_channel_buffer(mxc_graphic_fbi->ipu, mxc_graphic_fbi->ipu_ch,
++ IPU_ALPHA_IN_BUFFER,
++ mxc_fbi->cur_ipu_alpha_buf,
++ active_alpha_phy_addr) == 0) {
++ ipu_select_buffer(mxc_graphic_fbi->ipu, mxc_graphic_fbi->ipu_ch,
++ IPU_ALPHA_IN_BUFFER,
++ mxc_fbi->cur_ipu_alpha_buf);
++ }
++
++ /* update u/v offset */
++ ipu_update_channel_offset(mxc_fbi->ipu, mxc_fbi->ipu_ch,
++ IPU_INPUT_BUFFER,
++ fbi_to_pixfmt(info),
++ fr_w,
++ fr_h,
++ fr_w,
++ 0, 0,
++ fr_yoff,
++ fr_xoff);
++
++ ipu_select_buffer(mxc_fbi->ipu, mxc_fbi->ipu_ch, IPU_INPUT_BUFFER,
++ mxc_fbi->cur_ipu_buf);
++ ipu_clear_irq(mxc_fbi->ipu, mxc_fbi->ipu_ch_irq);
++ ipu_enable_irq(mxc_fbi->ipu, mxc_fbi->ipu_ch_irq);
++ } else {
++ dev_err(info->device,
++ "Error updating SDC buf %d to address=0x%08lX, "
++ "current buf %d, buf0 ready %d, buf1 ready %d, "
++ "buf2 ready %d\n", mxc_fbi->cur_ipu_buf, base,
++ ipu_get_cur_buffer_idx(mxc_fbi->ipu, mxc_fbi->ipu_ch,
++ IPU_INPUT_BUFFER),
++ ipu_check_buffer_ready(mxc_fbi->ipu, mxc_fbi->ipu_ch,
++ IPU_INPUT_BUFFER, 0),
++ ipu_check_buffer_ready(mxc_fbi->ipu, mxc_fbi->ipu_ch,
++ IPU_INPUT_BUFFER, 1),
++ ipu_check_buffer_ready(mxc_fbi->ipu, mxc_fbi->ipu_ch,
++ IPU_INPUT_BUFFER, 2));
++ ++mxc_fbi->cur_ipu_buf;
++ mxc_fbi->cur_ipu_buf %= 3;
++ ++mxc_fbi->cur_ipu_buf;
++ mxc_fbi->cur_ipu_buf %= 3;
++ mxc_fbi->cur_ipu_alpha_buf = !mxc_fbi->cur_ipu_alpha_buf;
++ ipu_clear_irq(mxc_fbi->ipu, mxc_fbi->ipu_ch_irq);
++ ipu_enable_irq(mxc_fbi->ipu, mxc_fbi->ipu_ch_irq);
++ return -EBUSY;
++ }
++
++ dev_dbg(info->device, "Update complete\n");
++
++ info->var.yoffset = var->yoffset;
++
++ return 0;
++}
++
++/*
++ * Function to handle custom mmap for MXC framebuffer.
++ *
++ * @param fbi framebuffer information pointer
++ *
++ * @param vma Pointer to vm_area_struct
++ */
++static int mxcfb_mmap(struct fb_info *fbi, struct vm_area_struct *vma)
++{
++ bool found = false;
++ u32 len;
++ unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
++ struct mxcfb_alloc_list *mem;
++ struct mxcfb_info *mxc_fbi = (struct mxcfb_info *)fbi->par;
++
++ if (offset < fbi->fix.smem_len) {
++ /* mapping framebuffer memory */
++ len = fbi->fix.smem_len - offset;
++ vma->vm_pgoff = (fbi->fix.smem_start + offset) >> PAGE_SHIFT;
++ } else if ((vma->vm_pgoff ==
++ (mxc_fbi->alpha_phy_addr0 >> PAGE_SHIFT)) ||
++ (vma->vm_pgoff ==
++ (mxc_fbi->alpha_phy_addr1 >> PAGE_SHIFT))) {
++ len = mxc_fbi->alpha_mem_len;
++ } else {
++ list_for_each_entry(mem, &fb_alloc_list, list) {
++ if (offset == mem->phy_addr) {
++ found = true;
++ len = mem->size;
++ break;
++ }
++ }
++ if (!found)
++ return -EINVAL;
++ }
++
++ len = PAGE_ALIGN(len);
++ if (vma->vm_end - vma->vm_start > len)
++ return -EINVAL;
++
++ /* make buffers bufferable */
++ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
++
++ vma->vm_flags |= VM_IO;
++
++ if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
++ vma->vm_end - vma->vm_start, vma->vm_page_prot)) {
++ dev_dbg(fbi->device, "mmap remap_pfn_range failed\n");
++ return -ENOBUFS;
++ }
++
++ return 0;
++}
++
++/*!
++ * This structure contains the pointers to the control functions that are
++ * invoked by the core framebuffer driver to perform operations like
++ * blitting, rectangle filling, copy regions and cursor definition.
++ */
++static struct fb_ops mxcfb_ops = {
++ .owner = THIS_MODULE,
++ .fb_set_par = mxcfb_set_par,
++ .fb_check_var = mxcfb_check_var,
++ .fb_setcolreg = mxcfb_setcolreg,
++ .fb_pan_display = mxcfb_pan_display,
++ .fb_ioctl = mxcfb_ioctl,
++ .fb_mmap = mxcfb_mmap,
++ .fb_fillrect = cfb_fillrect,
++ .fb_copyarea = cfb_copyarea,
++ .fb_imageblit = cfb_imageblit,
++ .fb_blank = mxcfb_blank,
++};
++
++static irqreturn_t mxcfb_irq_handler(int irq, void *dev_id)
++{
++ struct fb_info *fbi = dev_id;
++ struct mxcfb_info *mxc_fbi = fbi->par;
++
++ complete(&mxc_fbi->flip_complete);
++ return IRQ_HANDLED;
++}
++
++static irqreturn_t mxcfb_nf_irq_handler(int irq, void *dev_id)
++{
++ struct fb_info *fbi = dev_id;
++ struct mxcfb_info *mxc_fbi = fbi->par;
++
++ complete(&mxc_fbi->vsync_complete);
++ return IRQ_HANDLED;
++}
++
++static irqreturn_t mxcfb_alpha_irq_handler(int irq, void *dev_id)
++{
++ struct fb_info *fbi = dev_id;
++ struct mxcfb_info *mxc_fbi = fbi->par;
++
++ complete(&mxc_fbi->alpha_flip_complete);
++ return IRQ_HANDLED;
++}
++
++/*
++ * Suspends the framebuffer and blanks the screen. Power management support
++ */
++static int mxcfb_suspend(struct platform_device *pdev, pm_message_t state)
++{
++ struct fb_info *fbi = platform_get_drvdata(pdev);
++ struct mxcfb_info *mxc_fbi = (struct mxcfb_info *)fbi->par;
++ int saved_blank;
++#ifdef CONFIG_FB_MXC_LOW_PWR_DISPLAY
++ void *fbmem;
++#endif
++
++ if (mxc_fbi->ovfbi) {
++ struct mxcfb_info *mxc_fbi_fg =
++ (struct mxcfb_info *)mxc_fbi->ovfbi->par;
++
++ console_lock();
++ fb_set_suspend(mxc_fbi->ovfbi, 1);
++ saved_blank = mxc_fbi_fg->cur_blank;
++ mxcfb_blank(FB_BLANK_POWERDOWN, mxc_fbi->ovfbi);
++ mxc_fbi_fg->next_blank = saved_blank;
++ console_unlock();
++ }
++
++ console_lock();
++ fb_set_suspend(fbi, 1);
++ saved_blank = mxc_fbi->cur_blank;
++ mxcfb_blank(FB_BLANK_POWERDOWN, fbi);
++ mxc_fbi->next_blank = saved_blank;
++ console_unlock();
++
++ return 0;
++}
++
++/*
++ * Resumes the framebuffer and unblanks the screen. Power management support
++ */
++static int mxcfb_resume(struct platform_device *pdev)
++{
++ struct fb_info *fbi = platform_get_drvdata(pdev);
++ struct mxcfb_info *mxc_fbi = (struct mxcfb_info *)fbi->par;
++
++ console_lock();
++ mxcfb_blank(mxc_fbi->next_blank, fbi);
++ fb_set_suspend(fbi, 0);
++ console_unlock();
++
++ if (mxc_fbi->ovfbi) {
++ struct mxcfb_info *mxc_fbi_fg =
++ (struct mxcfb_info *)mxc_fbi->ovfbi->par;
++ console_lock();
++ mxcfb_blank(mxc_fbi_fg->next_blank, mxc_fbi->ovfbi);
++ fb_set_suspend(mxc_fbi->ovfbi, 0);
++ console_unlock();
++ }
++
++ return 0;
++}
++
++/*
++ * Main framebuffer functions
++ */
++
++/*!
++ * Allocates the DRAM memory for the frame buffer. This buffer is remapped
++ * into a non-cached, non-buffered, memory region to allow palette and pixel
++ * writes to occur without flushing the cache. Once this area is remapped,
++ * all virtual memory access to the video memory should occur at the new region.
++ *
++ * @param fbi framebuffer information pointer
++ *
++ * @return Error code indicating success or failure
++ */
++static int mxcfb_map_video_memory(struct fb_info *fbi)
++{
++ if (fbi->fix.smem_len < fbi->var.yres_virtual * fbi->fix.line_length)
++ fbi->fix.smem_len = fbi->var.yres_virtual *
++ fbi->fix.line_length;
++
++ fbi->screen_base = dma_alloc_writecombine(fbi->device,
++ fbi->fix.smem_len,
++ (dma_addr_t *)&fbi->fix.smem_start,
++ GFP_DMA | GFP_KERNEL);
++ if (fbi->screen_base == 0) {
++ dev_err(fbi->device, "Unable to allocate framebuffer memory\n");
++ fbi->fix.smem_len = 0;
++ fbi->fix.smem_start = 0;
++ return -EBUSY;
++ }
++
++ dev_dbg(fbi->device, "allocated fb @ paddr=0x%08X, size=%d.\n",
++ (uint32_t) fbi->fix.smem_start, fbi->fix.smem_len);
++
++ fbi->screen_size = fbi->fix.smem_len;
++
++ /* Clear the screen */
++ memset((char *)fbi->screen_base, 0, fbi->fix.smem_len);
++
++ return 0;
++}
++
++/*!
++ * De-allocates the DRAM memory for the frame buffer.
++ *
++ * @param fbi framebuffer information pointer
++ *
++ * @return Error code indicating success or failure
++ */
++static int mxcfb_unmap_video_memory(struct fb_info *fbi)
++{
++ dma_free_writecombine(fbi->device, fbi->fix.smem_len,
++ fbi->screen_base, fbi->fix.smem_start);
++ fbi->screen_base = 0;
++ fbi->fix.smem_start = 0;
++ fbi->fix.smem_len = 0;
++ return 0;
++}
++
++/*!
++ * Initializes the framebuffer information pointer. After allocating
++ * sufficient memory for the framebuffer structure, the fields are
++ * filled with custom information passed in from the configurable
++ * structures. This includes information such as bits per pixel,
++ * color maps, screen width/height and RGBA offsets.
++ *
++ * @return Framebuffer structure initialized with our information
++ */
++static struct fb_info *mxcfb_init_fbinfo(struct device *dev, struct fb_ops *ops)
++{
++ struct fb_info *fbi;
++ struct mxcfb_info *mxcfbi;
++
++ /*
++ * Allocate sufficient memory for the fb structure
++ */
++ fbi = framebuffer_alloc(sizeof(struct mxcfb_info), dev);
++ if (!fbi)
++ return NULL;
++
++ mxcfbi = (struct mxcfb_info *)fbi->par;
++
++ fbi->var.activate = FB_ACTIVATE_NOW;
++
++ fbi->fbops = ops;
++ fbi->flags = FBINFO_FLAG_DEFAULT;
++ fbi->pseudo_palette = mxcfbi->pseudo_palette;
++
++ /*
++ * Allocate colormap
++ */
++ fb_alloc_cmap(&fbi->cmap, 16, 0);
++
++ return fbi;
++}
++
++static ssize_t show_disp_chan(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct fb_info *info = dev_get_drvdata(dev);
++ struct mxcfb_info *mxcfbi = (struct mxcfb_info *)info->par;
++
++ if (mxcfbi->ipu_ch == MEM_BG_SYNC)
++ return sprintf(buf, "2-layer-fb-bg\n");
++ else if (mxcfbi->ipu_ch == MEM_FG_SYNC)
++ return sprintf(buf, "2-layer-fb-fg\n");
++ else if (mxcfbi->ipu_ch == MEM_DC_SYNC)
++ return sprintf(buf, "1-layer-fb\n");
++ else
++ return sprintf(buf, "err: no display chan\n");
++}
++
++static ssize_t swap_disp_chan(struct device *dev,
++ struct device_attribute *attr,
++ const char *buf, size_t count)
++{
++ struct fb_info *info = dev_get_drvdata(dev);
++ struct mxcfb_info *mxcfbi = (struct mxcfb_info *)info->par;
++ struct mxcfb_info *fg_mxcfbi = NULL;
++
++ console_lock();
++ /* swap only happen between DP-BG and DC, while DP-FG disable */
++ if (((mxcfbi->ipu_ch == MEM_BG_SYNC) &&
++ (strstr(buf, "1-layer-fb") != NULL)) ||
++ ((mxcfbi->ipu_ch == MEM_DC_SYNC) &&
++ (strstr(buf, "2-layer-fb-bg") != NULL))) {
++ struct fb_info *fbi_fg;
++
++ fbi_fg = found_registered_fb(MEM_FG_SYNC, mxcfbi->ipu_id);
++ if (fbi_fg)
++ fg_mxcfbi = (struct mxcfb_info *)fbi_fg->par;
++
++ if (!fg_mxcfbi ||
++ fg_mxcfbi->cur_blank == FB_BLANK_UNBLANK) {
++ dev_err(dev,
++ "Can not switch while fb2(fb-fg) is on.\n");
++ console_unlock();
++ return count;
++ }
++
++ if (swap_channels(info) < 0)
++ dev_err(dev, "Swap display channel failed.\n");
++ }
++
++ console_unlock();
++ return count;
++}
++static DEVICE_ATTR(fsl_disp_property, S_IWUSR | S_IRUGO,
++ show_disp_chan, swap_disp_chan);
++
++static ssize_t show_disp_dev(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct fb_info *info = dev_get_drvdata(dev);
++ struct mxcfb_info *mxcfbi = (struct mxcfb_info *)info->par;
++
++ if (mxcfbi->ipu_ch == MEM_FG_SYNC)
++ return sprintf(buf, "overlay\n");
++ else
++ return sprintf(buf, "%s\n", mxcfbi->dispdrv->drv->name);
++}
++static DEVICE_ATTR(fsl_disp_dev_property, S_IRUGO, show_disp_dev, NULL);
++
++static int mxcfb_dispdrv_init(struct platform_device *pdev,
++ struct fb_info *fbi)
++{
++ struct ipuv3_fb_platform_data *plat_data = pdev->dev.platform_data;
++ struct mxcfb_info *mxcfbi = (struct mxcfb_info *)fbi->par;
++ struct mxc_dispdrv_setting setting;
++ char disp_dev[32], *default_dev = "lcd";
++ int ret = 0;
++
++ setting.if_fmt = plat_data->interface_pix_fmt;
++ setting.dft_mode_str = plat_data->mode_str;
++ setting.default_bpp = plat_data->default_bpp;
++ if (!setting.default_bpp)
++ setting.default_bpp = 16;
++ setting.fbi = fbi;
++ if (!strlen(plat_data->disp_dev)) {
++ memcpy(disp_dev, default_dev, strlen(default_dev));
++ disp_dev[strlen(default_dev)] = '\0';
++ } else {
++ memcpy(disp_dev, plat_data->disp_dev,
++ strlen(plat_data->disp_dev));
++ disp_dev[strlen(plat_data->disp_dev)] = '\0';
++ }
++
++ dev_info(&pdev->dev, "register mxc display driver %s\n", disp_dev);
++
++ mxcfbi->dispdrv = mxc_dispdrv_gethandle(disp_dev, &setting);
++ if (IS_ERR(mxcfbi->dispdrv)) {
++ ret = PTR_ERR(mxcfbi->dispdrv);
++ dev_err(&pdev->dev, "NO mxc display driver found!\n");
++ return ret;
++ } else {
++ /* fix-up */
++ mxcfbi->ipu_di_pix_fmt = setting.if_fmt;
++ mxcfbi->default_bpp = setting.default_bpp;
++
++ /* setting */
++ mxcfbi->ipu_id = setting.dev_id;
++ mxcfbi->ipu_di = setting.disp_id;
++ dev_dbg(&pdev->dev, "di_pixfmt:0x%x, bpp:0x%x, di:%d, ipu:%d\n",
++ setting.if_fmt, setting.default_bpp,
++ setting.disp_id, setting.dev_id);
++ }
++
++ return ret;
++}
++
++/*
++ * Parse user specified options (`video=trident:')
++ * example:
++ * video=mxcfb0:dev=lcd,800x480M-16@55,if=RGB565,bpp=16,noaccel
++ * video=mxcfb0:dev=lcd,800x480M-16@55,if=RGB565,fbpix=RGB565
++ */
++static int mxcfb_option_setup(struct platform_device *pdev, struct fb_info *fbi)
++{
++ struct ipuv3_fb_platform_data *pdata = pdev->dev.platform_data;
++ char *options, *opt, *fb_mode_str = NULL;
++ char name[] = "mxcfb0";
++ uint32_t fb_pix_fmt = 0;
++
++ name[5] += pdev->id;
++ if (fb_get_options(name, &options)) {
++ dev_err(&pdev->dev, "Can't get fb option for %s!\n", name);
++ return -ENODEV;
++ }
++
++ if (!options || !*options)
++ return 0;
++
++ while ((opt = strsep(&options, ",")) != NULL) {
++ if (!*opt)
++ continue;
++
++ if (!strncmp(opt, "dev=", 4)) {
++ memcpy(pdata->disp_dev, opt + 4, strlen(opt) - 4);
++ pdata->disp_dev[strlen(opt) - 4] = '\0';
++ } else if (!strncmp(opt, "if=", 3)) {
++ if (!strncmp(opt+3, "RGB24", 5))
++ pdata->interface_pix_fmt = IPU_PIX_FMT_RGB24;
++ else if (!strncmp(opt+3, "BGR24", 5))
++ pdata->interface_pix_fmt = IPU_PIX_FMT_BGR24;
++ else if (!strncmp(opt+3, "GBR24", 5))
++ pdata->interface_pix_fmt = IPU_PIX_FMT_GBR24;
++ else if (!strncmp(opt+3, "RGB565", 6))
++ pdata->interface_pix_fmt = IPU_PIX_FMT_RGB565;
++ else if (!strncmp(opt+3, "RGB666", 6))
++ pdata->interface_pix_fmt = IPU_PIX_FMT_RGB666;
++ else if (!strncmp(opt+3, "YUV444", 6))
++ pdata->interface_pix_fmt = IPU_PIX_FMT_YUV444;
++ else if (!strncmp(opt+3, "LVDS666", 7))
++ pdata->interface_pix_fmt = IPU_PIX_FMT_LVDS666;
++ else if (!strncmp(opt+3, "YUYV16", 6))
++ pdata->interface_pix_fmt = IPU_PIX_FMT_YUYV;
++ else if (!strncmp(opt+3, "UYVY16", 6))
++ pdata->interface_pix_fmt = IPU_PIX_FMT_UYVY;
++ else if (!strncmp(opt+3, "YVYU16", 6))
++ pdata->interface_pix_fmt = IPU_PIX_FMT_YVYU;
++ else if (!strncmp(opt+3, "VYUY16", 6))
++ pdata->interface_pix_fmt = IPU_PIX_FMT_VYUY;
++ } else if (!strncmp(opt, "fbpix=", 6)) {
++ if (!strncmp(opt+6, "RGB24", 5))
++ fb_pix_fmt = IPU_PIX_FMT_RGB24;
++ else if (!strncmp(opt+6, "BGR24", 5))
++ fb_pix_fmt = IPU_PIX_FMT_BGR24;
++ else if (!strncmp(opt+6, "RGB32", 5))
++ fb_pix_fmt = IPU_PIX_FMT_RGB32;
++ else if (!strncmp(opt+6, "BGR32", 5))
++ fb_pix_fmt = IPU_PIX_FMT_BGR32;
++ else if (!strncmp(opt+6, "ABGR32", 6))
++ fb_pix_fmt = IPU_PIX_FMT_ABGR32;
++ else if (!strncmp(opt+6, "RGB565", 6))
++ fb_pix_fmt = IPU_PIX_FMT_RGB565;
++
++ if (fb_pix_fmt) {
++ pixfmt_to_var(fb_pix_fmt, &fbi->var);
++ pdata->default_bpp =
++ fbi->var.bits_per_pixel;
++ }
++ } else if (!strncmp(opt, "int_clk", 7)) {
++ pdata->int_clk = true;
++ continue;
++ } else if (!strncmp(opt, "bpp=", 4)) {
++ /* bpp setting cannot overwirte fbpix setting */
++ if (fb_pix_fmt)
++ continue;
++
++ pdata->default_bpp =
++ simple_strtoul(opt + 4, NULL, 0);
++
++ fb_pix_fmt = bpp_to_pixfmt(pdata->default_bpp);
++ if (fb_pix_fmt)
++ pixfmt_to_var(fb_pix_fmt, &fbi->var);
++ } else
++ fb_mode_str = opt;
++ }
++
++ if (fb_mode_str)
++ pdata->mode_str = fb_mode_str;
++
++ return 0;
++}
++
++static int mxcfb_register(struct fb_info *fbi)
++{
++ struct mxcfb_info *mxcfbi = (struct mxcfb_info *)fbi->par;
++ struct fb_videomode m;
++ int ret = 0;
++ char bg0_id[] = "DISP3 BG";
++ char bg1_id[] = "DISP3 BG - DI1";
++ char fg_id[] = "DISP3 FG";
++
++ if (mxcfbi->ipu_di == 0) {
++ bg0_id[4] += mxcfbi->ipu_id;
++ strcpy(fbi->fix.id, bg0_id);
++ } else if (mxcfbi->ipu_di == 1) {
++ bg1_id[4] += mxcfbi->ipu_id;
++ strcpy(fbi->fix.id, bg1_id);
++ } else { /* Overlay */
++ fg_id[4] += mxcfbi->ipu_id;
++ strcpy(fbi->fix.id, fg_id);
++ }
++
++ mxcfb_check_var(&fbi->var, fbi);
++
++ mxcfb_set_fix(fbi);
++
++ /* Added first mode to fbi modelist. */
++ if (!fbi->modelist.next || !fbi->modelist.prev)
++ INIT_LIST_HEAD(&fbi->modelist);
++ fb_var_to_videomode(&m, &fbi->var);
++ fb_add_videomode(&m, &fbi->modelist);
++
++ if (ipu_request_irq(mxcfbi->ipu, mxcfbi->ipu_ch_irq,
++ mxcfb_irq_handler, IPU_IRQF_ONESHOT, MXCFB_NAME, fbi) != 0) {
++ dev_err(fbi->device, "Error registering EOF irq handler.\n");
++ ret = -EBUSY;
++ goto err0;
++ }
++ ipu_disable_irq(mxcfbi->ipu, mxcfbi->ipu_ch_irq);
++ if (ipu_request_irq(mxcfbi->ipu, mxcfbi->ipu_ch_nf_irq,
++ mxcfb_nf_irq_handler, IPU_IRQF_ONESHOT, MXCFB_NAME, fbi) != 0) {
++ dev_err(fbi->device, "Error registering NFACK irq handler.\n");
++ ret = -EBUSY;
++ goto err1;
++ }
++ ipu_disable_irq(mxcfbi->ipu, mxcfbi->ipu_ch_nf_irq);
++
++ if (mxcfbi->ipu_alp_ch_irq != -1)
++ if (ipu_request_irq(mxcfbi->ipu, mxcfbi->ipu_alp_ch_irq,
++ mxcfb_alpha_irq_handler, IPU_IRQF_ONESHOT,
++ MXCFB_NAME, fbi) != 0) {
++ dev_err(fbi->device, "Error registering alpha irq "
++ "handler.\n");
++ ret = -EBUSY;
++ goto err2;
++ }
++
++ if (!mxcfbi->late_init) {
++ fbi->var.activate |= FB_ACTIVATE_FORCE;
++ console_lock();
++ fbi->flags |= FBINFO_MISC_USEREVENT;
++ ret = fb_set_var(fbi, &fbi->var);
++ fbi->flags &= ~FBINFO_MISC_USEREVENT;
++ console_unlock();
++ if (ret < 0) {
++ dev_err(fbi->device, "Error fb_set_var ret:%d\n", ret);
++ goto err3;
++ }
++
++ if (mxcfbi->next_blank == FB_BLANK_UNBLANK) {
++ console_lock();
++ ret = fb_blank(fbi, FB_BLANK_UNBLANK);
++ console_unlock();
++ if (ret < 0) {
++ dev_err(fbi->device,
++ "Error fb_blank ret:%d\n", ret);
++ goto err4;
++ }
++ }
++ } else {
++ /*
++ * Setup the channel again though bootloader
++ * has done this, then set_par() can stop the
++ * channel neatly and re-initialize it .
++ */
++ if (mxcfbi->next_blank == FB_BLANK_UNBLANK) {
++ console_lock();
++ _setup_disp_channel1(fbi);
++ ipu_enable_channel(mxcfbi->ipu, mxcfbi->ipu_ch);
++ console_unlock();
++ }
++ }
++
++
++ ret = register_framebuffer(fbi);
++ if (ret < 0)
++ goto err5;
++
++ return ret;
++err5:
++ if (mxcfbi->next_blank == FB_BLANK_UNBLANK) {
++ console_lock();
++ if (!mxcfbi->late_init)
++ fb_blank(fbi, FB_BLANK_POWERDOWN);
++ else {
++ ipu_disable_channel(mxcfbi->ipu, mxcfbi->ipu_ch,
++ true);
++ ipu_uninit_channel(mxcfbi->ipu, mxcfbi->ipu_ch);
++ }
++ console_unlock();
++ }
++err4:
++err3:
++ if (mxcfbi->ipu_alp_ch_irq != -1)
++ ipu_free_irq(mxcfbi->ipu, mxcfbi->ipu_alp_ch_irq, fbi);
++err2:
++ ipu_free_irq(mxcfbi->ipu, mxcfbi->ipu_ch_nf_irq, fbi);
++err1:
++ ipu_free_irq(mxcfbi->ipu, mxcfbi->ipu_ch_irq, fbi);
++err0:
++ return ret;
++}
++
++static void mxcfb_unregister(struct fb_info *fbi)
++{
++ struct mxcfb_info *mxcfbi = (struct mxcfb_info *)fbi->par;
++
++ if (mxcfbi->ipu_alp_ch_irq != -1)
++ ipu_free_irq(mxcfbi->ipu, mxcfbi->ipu_alp_ch_irq, fbi);
++ if (mxcfbi->ipu_ch_irq)
++ ipu_free_irq(mxcfbi->ipu, mxcfbi->ipu_ch_irq, fbi);
++ if (mxcfbi->ipu_ch_nf_irq)
++ ipu_free_irq(mxcfbi->ipu, mxcfbi->ipu_ch_nf_irq, fbi);
++
++ unregister_framebuffer(fbi);
++}
++
++static int mxcfb_setup_overlay(struct platform_device *pdev,
++ struct fb_info *fbi_bg, struct resource *res)
++{
++ struct fb_info *ovfbi;
++ struct mxcfb_info *mxcfbi_bg = (struct mxcfb_info *)fbi_bg->par;
++ struct mxcfb_info *mxcfbi_fg;
++ int ret = 0;
++
++ ovfbi = mxcfb_init_fbinfo(&pdev->dev, &mxcfb_ops);
++ if (!ovfbi) {
++ ret = -ENOMEM;
++ goto init_ovfbinfo_failed;
++ }
++ mxcfbi_fg = (struct mxcfb_info *)ovfbi->par;
++
++ mxcfbi_fg->ipu = ipu_get_soc(mxcfbi_bg->ipu_id);
++ if (IS_ERR(mxcfbi_fg->ipu)) {
++ ret = -ENODEV;
++ goto get_ipu_failed;
++ }
++ mxcfbi_fg->ipu_id = mxcfbi_bg->ipu_id;
++ mxcfbi_fg->ipu_ch_irq = IPU_IRQ_FG_SYNC_EOF;
++ mxcfbi_fg->ipu_ch_nf_irq = IPU_IRQ_FG_SYNC_NFACK;
++ mxcfbi_fg->ipu_alp_ch_irq = IPU_IRQ_FG_ALPHA_SYNC_EOF;
++ mxcfbi_fg->ipu_ch = MEM_FG_SYNC;
++ mxcfbi_fg->ipu_di = -1;
++ mxcfbi_fg->ipu_di_pix_fmt = mxcfbi_bg->ipu_di_pix_fmt;
++ mxcfbi_fg->overlay = true;
++ mxcfbi_fg->cur_blank = mxcfbi_fg->next_blank = FB_BLANK_POWERDOWN;
++
++ /* Need dummy values until real panel is configured */
++ ovfbi->var.xres = 240;
++ ovfbi->var.yres = 320;
++
++ if (res && res->start && res->end) {
++ ovfbi->fix.smem_len = res->end - res->start + 1;
++ ovfbi->fix.smem_start = res->start;
++ ovfbi->screen_base = ioremap(
++ ovfbi->fix.smem_start,
++ ovfbi->fix.smem_len);
++ }
++
++ ret = mxcfb_register(ovfbi);
++ if (ret < 0)
++ goto register_ov_failed;
++
++ mxcfbi_bg->ovfbi = ovfbi;
++
++ return ret;
++
++register_ov_failed:
++get_ipu_failed:
++ fb_dealloc_cmap(&ovfbi->cmap);
++ framebuffer_release(ovfbi);
++init_ovfbinfo_failed:
++ return ret;
++}
++
++static void mxcfb_unsetup_overlay(struct fb_info *fbi_bg)
++{
++ struct mxcfb_info *mxcfbi_bg = (struct mxcfb_info *)fbi_bg->par;
++ struct fb_info *ovfbi = mxcfbi_bg->ovfbi;
++
++ mxcfb_unregister(ovfbi);
++
++ if (&ovfbi->cmap)
++ fb_dealloc_cmap(&ovfbi->cmap);
++ framebuffer_release(ovfbi);
++}
++
++static bool ipu_usage[2][2];
++static int ipu_test_set_usage(int ipu, int di)
++{
++ if (ipu_usage[ipu][di])
++ return -EBUSY;
++ else
++ ipu_usage[ipu][di] = true;
++ return 0;
++}
++
++static void ipu_clear_usage(int ipu, int di)
++{
++ ipu_usage[ipu][di] = false;
++}
++
++static int mxcfb_get_of_property(struct platform_device *pdev,
++ struct ipuv3_fb_platform_data *plat_data)
++{
++ struct device_node *np = pdev->dev.of_node;
++ const char *disp_dev;
++ const char *mode_str;
++ const char *pixfmt;
++ int err;
++ int len;
++ u32 bpp, int_clk;
++ u32 late_init;
++
++ err = of_property_read_string(np, "disp_dev", &disp_dev);
++ if (err < 0) {
++ dev_dbg(&pdev->dev, "get of property disp_dev fail\n");
++ return err;
++ }
++ err = of_property_read_string(np, "mode_str", &mode_str);
++ if (err < 0) {
++ dev_dbg(&pdev->dev, "get of property mode_str fail\n");
++ return err;
++ }
++ err = of_property_read_string(np, "interface_pix_fmt", &pixfmt);
++ if (err) {
++ dev_dbg(&pdev->dev, "get of property pix fmt fail\n");
++ return err;
++ }
++ err = of_property_read_u32(np, "default_bpp", &bpp);
++ if (err) {
++ dev_dbg(&pdev->dev, "get of property bpp fail\n");
++ return err;
++ }
++ err = of_property_read_u32(np, "int_clk", &int_clk);
++ if (err) {
++ dev_dbg(&pdev->dev, "get of property int_clk fail\n");
++ return err;
++ }
++ err = of_property_read_u32(np, "late_init", &late_init);
++ if (err) {
++ dev_dbg(&pdev->dev, "get of property late_init fail\n");
++ return err;
++ }
++
++ if (!strncmp(pixfmt, "RGB24", 5))
++ plat_data->interface_pix_fmt = IPU_PIX_FMT_RGB24;
++ else if (!strncmp(pixfmt, "BGR24", 5))
++ plat_data->interface_pix_fmt = IPU_PIX_FMT_BGR24;
++ else if (!strncmp(pixfmt, "GBR24", 5))
++ plat_data->interface_pix_fmt = IPU_PIX_FMT_GBR24;
++ else if (!strncmp(pixfmt, "RGB565", 6))
++ plat_data->interface_pix_fmt = IPU_PIX_FMT_RGB565;
++ else if (!strncmp(pixfmt, "RGB666", 6))
++ plat_data->interface_pix_fmt = IPU_PIX_FMT_RGB666;
++ else if (!strncmp(pixfmt, "YUV444", 6))
++ plat_data->interface_pix_fmt = IPU_PIX_FMT_YUV444;
++ else if (!strncmp(pixfmt, "LVDS666", 7))
++ plat_data->interface_pix_fmt = IPU_PIX_FMT_LVDS666;
++ else if (!strncmp(pixfmt, "YUYV16", 6))
++ plat_data->interface_pix_fmt = IPU_PIX_FMT_YUYV;
++ else if (!strncmp(pixfmt, "UYVY16", 6))
++ plat_data->interface_pix_fmt = IPU_PIX_FMT_UYVY;
++ else if (!strncmp(pixfmt, "YVYU16", 6))
++ plat_data->interface_pix_fmt = IPU_PIX_FMT_YVYU;
++ else if (!strncmp(pixfmt, "VYUY16", 6))
++ plat_data->interface_pix_fmt = IPU_PIX_FMT_VYUY;
++ else {
++ dev_err(&pdev->dev, "err interface_pix_fmt!\n");
++ return -ENOENT;
++ }
++
++ len = min(sizeof(plat_data->disp_dev) - 1, strlen(disp_dev));
++ memcpy(plat_data->disp_dev, disp_dev, len);
++ plat_data->disp_dev[len] = '\0';
++ plat_data->mode_str = (char *)mode_str;
++ plat_data->default_bpp = bpp;
++ plat_data->int_clk = (bool)int_clk;
++ plat_data->late_init = (bool)late_init;
++ return err;
++}
++
++/*!
++ * Probe routine for the framebuffer driver. It is called during the
++ * driver binding process. The following functions are performed in
++ * this routine: Framebuffer initialization, Memory allocation and
++ * mapping, Framebuffer registration, IPU initialization.
++ *
++ * @return Appropriate error code to the kernel common code
++ */
++static int mxcfb_probe(struct platform_device *pdev)
++{
++ struct ipuv3_fb_platform_data *plat_data;
++ struct fb_info *fbi;
++ struct mxcfb_info *mxcfbi;
++ struct resource *res;
++ int ret = 0;
++
++ dev_dbg(&pdev->dev, "%s enter\n", __func__);
++ pdev->id = of_alias_get_id(pdev->dev.of_node, "mxcfb");
++ if (pdev->id < 0) {
++ dev_err(&pdev->dev, "can not get alias id\n");
++ return pdev->id;
++ }
++
++ plat_data = devm_kzalloc(&pdev->dev, sizeof(struct
++ ipuv3_fb_platform_data), GFP_KERNEL);
++ if (!plat_data)
++ return -ENOMEM;
++ pdev->dev.platform_data = plat_data;
++
++ ret = mxcfb_get_of_property(pdev, plat_data);
++ if (ret < 0) {
++ dev_err(&pdev->dev, "get mxcfb of property fail\n");
++ return ret;
++ }
++
++ /* Initialize FB structures */
++ fbi = mxcfb_init_fbinfo(&pdev->dev, &mxcfb_ops);
++ if (!fbi) {
++ ret = -ENOMEM;
++ goto init_fbinfo_failed;
++ }
++
++ ret = mxcfb_option_setup(pdev, fbi);
++ if (ret)
++ goto get_fb_option_failed;
++
++ mxcfbi = (struct mxcfb_info *)fbi->par;
++ mxcfbi->ipu_int_clk = plat_data->int_clk;
++ mxcfbi->late_init = plat_data->late_init;
++ mxcfbi->first_set_par = true;
++ ret = mxcfb_dispdrv_init(pdev, fbi);
++ if (ret < 0)
++ goto init_dispdrv_failed;
++
++ ret = ipu_test_set_usage(mxcfbi->ipu_id, mxcfbi->ipu_di);
++ if (ret < 0) {
++ dev_err(&pdev->dev, "ipu%d-di%d already in use\n",
++ mxcfbi->ipu_id, mxcfbi->ipu_di);
++ goto ipu_in_busy;
++ }
++
++ if (mxcfbi->dispdrv->drv->post_init) {
++ ret = mxcfbi->dispdrv->drv->post_init(mxcfbi->dispdrv,
++ mxcfbi->ipu_id,
++ mxcfbi->ipu_di);
++ if (ret < 0) {
++ dev_err(&pdev->dev, "post init failed\n");
++ goto post_init_failed;
++ }
++ }
++
++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++ if (res && res->start && res->end) {
++ fbi->fix.smem_len = res->end - res->start + 1;
++ fbi->fix.smem_start = res->start;
++ fbi->screen_base = ioremap(fbi->fix.smem_start, fbi->fix.smem_len);
++ /* Do not clear the fb content drawn in bootloader. */
++ if (!mxcfbi->late_init)
++ memset(fbi->screen_base, 0, fbi->fix.smem_len);
++ }
++
++ mxcfbi->ipu = ipu_get_soc(mxcfbi->ipu_id);
++ if (IS_ERR(mxcfbi->ipu)) {
++ ret = -ENODEV;
++ goto get_ipu_failed;
++ }
++
++ /* first user uses DP with alpha feature */
++ if (!g_dp_in_use[mxcfbi->ipu_id]) {
++ mxcfbi->ipu_ch_irq = IPU_IRQ_BG_SYNC_EOF;
++ mxcfbi->ipu_ch_nf_irq = IPU_IRQ_BG_SYNC_NFACK;
++ mxcfbi->ipu_alp_ch_irq = IPU_IRQ_BG_ALPHA_SYNC_EOF;
++ mxcfbi->ipu_ch = MEM_BG_SYNC;
++ /* Unblank the primary fb only by default */
++ if (pdev->id == 0)
++ mxcfbi->cur_blank = mxcfbi->next_blank = FB_BLANK_UNBLANK;
++ else
++ mxcfbi->cur_blank = mxcfbi->next_blank = FB_BLANK_POWERDOWN;
++
++ ret = mxcfb_register(fbi);
++ if (ret < 0)
++ goto mxcfb_register_failed;
++
++ ipu_disp_set_global_alpha(mxcfbi->ipu, mxcfbi->ipu_ch,
++ true, 0x80);
++ ipu_disp_set_color_key(mxcfbi->ipu, mxcfbi->ipu_ch, false, 0);
++
++ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
++ ret = mxcfb_setup_overlay(pdev, fbi, res);
++
++ if (ret < 0) {
++ mxcfb_unregister(fbi);
++ goto mxcfb_setupoverlay_failed;
++ }
++
++ g_dp_in_use[mxcfbi->ipu_id] = true;
++
++ ret = device_create_file(mxcfbi->ovfbi->dev,
++ &dev_attr_fsl_disp_property);
++ if (ret)
++ dev_err(mxcfbi->ovfbi->dev, "Error %d on creating "
++ "file for disp property\n",
++ ret);
++
++ ret = device_create_file(mxcfbi->ovfbi->dev,
++ &dev_attr_fsl_disp_dev_property);
++ if (ret)
++ dev_err(mxcfbi->ovfbi->dev, "Error %d on creating "
++ "file for disp device "
++ "propety\n", ret);
++ } else {
++ mxcfbi->ipu_ch_irq = IPU_IRQ_DC_SYNC_EOF;
++ mxcfbi->ipu_ch_nf_irq = IPU_IRQ_DC_SYNC_NFACK;
++ mxcfbi->ipu_alp_ch_irq = -1;
++ mxcfbi->ipu_ch = MEM_DC_SYNC;
++ mxcfbi->cur_blank = mxcfbi->next_blank = FB_BLANK_POWERDOWN;
++
++ ret = mxcfb_register(fbi);
++ if (ret < 0)
++ goto mxcfb_register_failed;
++ }
++
++ platform_set_drvdata(pdev, fbi);
++
++ ret = device_create_file(fbi->dev, &dev_attr_fsl_disp_property);
++ if (ret)
++ dev_err(&pdev->dev, "Error %d on creating file for disp "
++ "property\n", ret);
++
++ ret = device_create_file(fbi->dev, &dev_attr_fsl_disp_dev_property);
++ if (ret)
++ dev_err(&pdev->dev, "Error %d on creating file for disp "
++ " device propety\n", ret);
++
++ return 0;
++
++mxcfb_setupoverlay_failed:
++mxcfb_register_failed:
++get_ipu_failed:
++post_init_failed:
++ ipu_clear_usage(mxcfbi->ipu_id, mxcfbi->ipu_di);
++ipu_in_busy:
++init_dispdrv_failed:
++ fb_dealloc_cmap(&fbi->cmap);
++ framebuffer_release(fbi);
++get_fb_option_failed:
++init_fbinfo_failed:
++ return ret;
++}
++
++static int mxcfb_remove(struct platform_device *pdev)
++{
++ struct fb_info *fbi = platform_get_drvdata(pdev);
++ struct mxcfb_info *mxc_fbi = fbi->par;
++
++ if (!fbi)
++ return 0;
++
++ device_remove_file(fbi->dev, &dev_attr_fsl_disp_dev_property);
++ device_remove_file(fbi->dev, &dev_attr_fsl_disp_property);
++ mxcfb_blank(FB_BLANK_POWERDOWN, fbi);
++ mxcfb_unregister(fbi);
++ mxcfb_unmap_video_memory(fbi);
++
++ if (mxc_fbi->ovfbi) {
++ device_remove_file(mxc_fbi->ovfbi->dev,
++ &dev_attr_fsl_disp_dev_property);
++ device_remove_file(mxc_fbi->ovfbi->dev,
++ &dev_attr_fsl_disp_property);
++ mxcfb_blank(FB_BLANK_POWERDOWN, mxc_fbi->ovfbi);
++ mxcfb_unsetup_overlay(fbi);
++ mxcfb_unmap_video_memory(mxc_fbi->ovfbi);
++ }
++
++ ipu_clear_usage(mxc_fbi->ipu_id, mxc_fbi->ipu_di);
++ if (&fbi->cmap)
++ fb_dealloc_cmap(&fbi->cmap);
++ framebuffer_release(fbi);
++ return 0;
++}
++
++static const struct of_device_id imx_mxcfb_dt_ids[] = {
++ { .compatible = "fsl,mxc_sdc_fb"},
++ { /* sentinel */ }
++};
++
++/*!
++ * This structure contains pointers to the power management callback functions.
++ */
++static struct platform_driver mxcfb_driver = {
++ .driver = {
++ .name = MXCFB_NAME,
++ .of_match_table = imx_mxcfb_dt_ids,
++ },
++ .probe = mxcfb_probe,
++ .remove = mxcfb_remove,
++ .suspend = mxcfb_suspend,
++ .resume = mxcfb_resume,
++};
++
++/*!
++ * Main entry function for the framebuffer. The function registers the power
++ * management callback functions with the kernel and also registers the MXCFB
++ * callback functions with the core Linux framebuffer driver \b fbmem.c
++ *
++ * @return Error code indicating success or failure
++ */
++int __init mxcfb_init(void)
++{
++ return platform_driver_register(&mxcfb_driver);
++}
++
++void mxcfb_exit(void)
++{
++ platform_driver_unregister(&mxcfb_driver);
++}
++
++module_init(mxcfb_init);
++module_exit(mxcfb_exit);
++
++MODULE_AUTHOR("Freescale Semiconductor, Inc.");
++MODULE_DESCRIPTION("MXC framebuffer driver");
++MODULE_LICENSE("GPL");
++MODULE_SUPPORTED_DEVICE("fb");
+diff -Nur linux-3.14.36/drivers/video/mxc/mxc_lcdif.c linux-openelec/drivers/video/mxc/mxc_lcdif.c
+--- linux-3.14.36/drivers/video/mxc/mxc_lcdif.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/video/mxc/mxc_lcdif.c 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,235 @@
++/*
++ * Copyright (C) 2011-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++#include <linux/init.h>
++#include <linux/ipu.h>
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/mxcfb.h>
++#include <linux/of_device.h>
++#include <linux/pinctrl/consumer.h>
++#include <linux/platform_device.h>
++
++#include "mxc_dispdrv.h"
++
++struct mxc_lcd_platform_data {
++ u32 default_ifmt;
++ u32 ipu_id;
++ u32 disp_id;
++};
++
++struct mxc_lcdif_data {
++ struct platform_device *pdev;
++ struct mxc_dispdrv_handle *disp_lcdif;
++};
++
++#define DISPDRV_LCD "lcd"
++
++static struct fb_videomode lcdif_modedb[] = {
++ {
++ /* 800x480 @ 57 Hz , pixel clk @ 27MHz */
++ "CLAA-WVGA", 57, 800, 480, 37037, 40, 60, 10, 10, 20, 10,
++ FB_SYNC_CLK_LAT_FALL,
++ FB_VMODE_NONINTERLACED,
++ 0,},
++ {
++ /* 800x480 @ 60 Hz , pixel clk @ 32MHz */
++ "SEIKO-WVGA", 60, 800, 480, 29850, 89, 164, 23, 10, 10, 10,
++ FB_SYNC_CLK_LAT_FALL,
++ FB_VMODE_NONINTERLACED,
++ 0,},
++};
++static int lcdif_modedb_sz = ARRAY_SIZE(lcdif_modedb);
++
++static int lcdif_init(struct mxc_dispdrv_handle *disp,
++ struct mxc_dispdrv_setting *setting)
++{
++ int ret, i;
++ struct mxc_lcdif_data *lcdif = mxc_dispdrv_getdata(disp);
++ struct mxc_lcd_platform_data *plat_data
++ = lcdif->pdev->dev.platform_data;
++ struct fb_videomode *modedb = lcdif_modedb;
++ int modedb_sz = lcdif_modedb_sz;
++
++ /* use platform defined ipu/di */
++ setting->dev_id = plat_data->ipu_id;
++ setting->disp_id = plat_data->disp_id;
++
++ ret = fb_find_mode(&setting->fbi->var, setting->fbi, setting->dft_mode_str,
++ modedb, modedb_sz, NULL, setting->default_bpp);
++ if (!ret) {
++ fb_videomode_to_var(&setting->fbi->var, &modedb[0]);
++ setting->if_fmt = plat_data->default_ifmt;
++ }
++
++ INIT_LIST_HEAD(&setting->fbi->modelist);
++ for (i = 0; i < modedb_sz; i++) {
++ struct fb_videomode m;
++ fb_var_to_videomode(&m, &setting->fbi->var);
++ if (fb_mode_is_equal(&m, &modedb[i])) {
++ fb_add_videomode(&modedb[i],
++ &setting->fbi->modelist);
++ break;
++ }
++ }
++
++ return ret;
++}
++
++void lcdif_deinit(struct mxc_dispdrv_handle *disp)
++{
++ /*TODO*/
++}
++
++static struct mxc_dispdrv_driver lcdif_drv = {
++ .name = DISPDRV_LCD,
++ .init = lcdif_init,
++ .deinit = lcdif_deinit,
++};
++
++static int lcd_get_of_property(struct platform_device *pdev,
++ struct mxc_lcd_platform_data *plat_data)
++{
++ struct device_node *np = pdev->dev.of_node;
++ int err;
++ u32 ipu_id, disp_id;
++ const char *default_ifmt;
++
++ err = of_property_read_string(np, "default_ifmt", &default_ifmt);
++ if (err) {
++ dev_dbg(&pdev->dev, "get of property default_ifmt fail\n");
++ return err;
++ }
++ err = of_property_read_u32(np, "ipu_id", &ipu_id);
++ if (err) {
++ dev_dbg(&pdev->dev, "get of property ipu_id fail\n");
++ return err;
++ }
++ err = of_property_read_u32(np, "disp_id", &disp_id);
++ if (err) {
++ dev_dbg(&pdev->dev, "get of property disp_id fail\n");
++ return err;
++ }
++
++ plat_data->ipu_id = ipu_id;
++ plat_data->disp_id = disp_id;
++ if (!strncmp(default_ifmt, "RGB24", 5))
++ plat_data->default_ifmt = IPU_PIX_FMT_RGB24;
++ else if (!strncmp(default_ifmt, "BGR24", 5))
++ plat_data->default_ifmt = IPU_PIX_FMT_BGR24;
++ else if (!strncmp(default_ifmt, "GBR24", 5))
++ plat_data->default_ifmt = IPU_PIX_FMT_GBR24;
++ else if (!strncmp(default_ifmt, "RGB565", 6))
++ plat_data->default_ifmt = IPU_PIX_FMT_RGB565;
++ else if (!strncmp(default_ifmt, "RGB666", 6))
++ plat_data->default_ifmt = IPU_PIX_FMT_RGB666;
++ else if (!strncmp(default_ifmt, "YUV444", 6))
++ plat_data->default_ifmt = IPU_PIX_FMT_YUV444;
++ else if (!strncmp(default_ifmt, "LVDS666", 7))
++ plat_data->default_ifmt = IPU_PIX_FMT_LVDS666;
++ else if (!strncmp(default_ifmt, "YUYV16", 6))
++ plat_data->default_ifmt = IPU_PIX_FMT_YUYV;
++ else if (!strncmp(default_ifmt, "UYVY16", 6))
++ plat_data->default_ifmt = IPU_PIX_FMT_UYVY;
++ else if (!strncmp(default_ifmt, "YVYU16", 6))
++ plat_data->default_ifmt = IPU_PIX_FMT_YVYU;
++ else if (!strncmp(default_ifmt, "VYUY16", 6))
++ plat_data->default_ifmt = IPU_PIX_FMT_VYUY;
++ else {
++ dev_err(&pdev->dev, "err default_ifmt!\n");
++ return -ENOENT;
++ }
++
++ return err;
++}
++
++static int mxc_lcdif_probe(struct platform_device *pdev)
++{
++ int ret;
++ struct pinctrl *pinctrl;
++ struct mxc_lcdif_data *lcdif;
++ struct mxc_lcd_platform_data *plat_data;
++
++ dev_dbg(&pdev->dev, "%s enter\n", __func__);
++ lcdif = devm_kzalloc(&pdev->dev, sizeof(struct mxc_lcdif_data),
++ GFP_KERNEL);
++ if (!lcdif)
++ return -ENOMEM;
++ plat_data = devm_kzalloc(&pdev->dev,
++ sizeof(struct mxc_lcd_platform_data),
++ GFP_KERNEL);
++ if (!plat_data)
++ return -ENOMEM;
++ pdev->dev.platform_data = plat_data;
++
++ ret = lcd_get_of_property(pdev, plat_data);
++ if (ret < 0) {
++ dev_err(&pdev->dev, "get lcd of property fail\n");
++ return ret;
++ }
++
++ pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
++ if (IS_ERR(pinctrl)) {
++ dev_err(&pdev->dev, "can't get/select pinctrl\n");
++ return PTR_ERR(pinctrl);
++ }
++
++ lcdif->pdev = pdev;
++ lcdif->disp_lcdif = mxc_dispdrv_register(&lcdif_drv);
++ mxc_dispdrv_setdata(lcdif->disp_lcdif, lcdif);
++
++ dev_set_drvdata(&pdev->dev, lcdif);
++ dev_dbg(&pdev->dev, "%s exit\n", __func__);
++
++ return ret;
++}
++
++static int mxc_lcdif_remove(struct platform_device *pdev)
++{
++ struct mxc_lcdif_data *lcdif = dev_get_drvdata(&pdev->dev);
++
++ mxc_dispdrv_puthandle(lcdif->disp_lcdif);
++ mxc_dispdrv_unregister(lcdif->disp_lcdif);
++ kfree(lcdif);
++ return 0;
++}
++
++static const struct of_device_id imx_lcd_dt_ids[] = {
++ { .compatible = "fsl,lcd"},
++ { /* sentinel */ }
++};
++static struct platform_driver mxc_lcdif_driver = {
++ .driver = {
++ .name = "mxc_lcdif",
++ .of_match_table = imx_lcd_dt_ids,
++ },
++ .probe = mxc_lcdif_probe,
++ .remove = mxc_lcdif_remove,
++};
++
++static int __init mxc_lcdif_init(void)
++{
++ return platform_driver_register(&mxc_lcdif_driver);
++}
++
++static void __exit mxc_lcdif_exit(void)
++{
++ platform_driver_unregister(&mxc_lcdif_driver);
++}
++
++module_init(mxc_lcdif_init);
++module_exit(mxc_lcdif_exit);
++
++MODULE_AUTHOR("Freescale Semiconductor, Inc.");
++MODULE_DESCRIPTION("i.MX ipuv3 LCD extern port driver");
++MODULE_LICENSE("GPL");
+diff -Nur linux-3.14.36/drivers/video/mxsfb.c linux-openelec/drivers/video/mxsfb.c
+--- linux-3.14.36/drivers/video/mxsfb.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/drivers/video/mxsfb.c 2015-05-06 12:05:42.000000000 -0500
+@@ -96,9 +96,10 @@
+ #define CTRL_DF24 (1 << 1)
+ #define CTRL_RUN (1 << 0)
+
+-#define CTRL1_FIFO_CLEAR (1 << 21)
+-#define CTRL1_SET_BYTE_PACKAGING(x) (((x) & 0xf) << 16)
+-#define CTRL1_GET_BYTE_PACKAGING(x) (((x) >> 16) & 0xf)
++#define CTRL1_RECOVERY_ON_UNDERFLOW (1 << 24)
++#define CTRL1_FIFO_CLEAR (1 << 21)
++#define CTRL1_SET_BYTE_PACKAGING(x) (((x) & 0xf) << 16)
++#define CTRL1_GET_BYTE_PACKAGING(x) (((x) >> 16) & 0xf)
+
+ #define TRANSFER_COUNT_SET_VCOUNT(x) (((x) & 0xffff) << 16)
+ #define TRANSFER_COUNT_GET_VCOUNT(x) (((x) >> 16) & 0xffff)
+@@ -149,8 +150,8 @@
+ #define STMLCDIF_18BIT 2 /** pixel data bus to the display is of 18 bit width */
+ #define STMLCDIF_24BIT 3 /** pixel data bus to the display is of 24 bit width */
+
+-#define MXSFB_SYNC_DATA_ENABLE_HIGH_ACT (1 << 6)
+-#define MXSFB_SYNC_DOTCLK_FALLING_ACT (1 << 7) /* negtive edge sampling */
++#define FB_SYNC_OE_LOW_ACT 0x80000000
++#define FB_SYNC_CLK_LAT_FALL 0x40000000
+
+ enum mxsfb_devtype {
+ MXSFB_V3,
+@@ -178,7 +179,6 @@
+ unsigned ld_intf_width;
+ unsigned dotclk_delay;
+ const struct mxsfb_devdata *devdata;
+- u32 sync;
+ struct regulator *reg_lcd;
+ };
+
+@@ -275,9 +275,15 @@
+ if (var->yres < MIN_YRES)
+ var->yres = MIN_YRES;
+
+- var->xres_virtual = var->xres;
++ if (var->xres_virtual > var->xres) {
++ dev_dbg(fb_info->device, "stride not supported\n");
++ return -EINVAL;
++ }
+
+- var->yres_virtual = var->yres;
++ if (var->xres_virtual < var->xres)
++ var->xres_virtual = var->xres;
++ if (var->yres_virtual < var->yres)
++ var->yres_virtual = var->yres;
+
+ switch (var->bits_per_pixel) {
+ case 16:
+@@ -344,6 +350,9 @@
+
+ writel(CTRL_RUN, host->base + LCDC_CTRL + REG_SET);
+
++ /* Recovery on underflow */
++ writel(CTRL1_RECOVERY_ON_UNDERFLOW, host->base + LCDC_CTRL1 + REG_SET);
++
+ host->enabled = 1;
+ }
+
+@@ -392,14 +401,6 @@
+ int line_size, fb_size;
+ int reenable = 0;
+
+- line_size = fb_info->var.xres * (fb_info->var.bits_per_pixel >> 3);
+- fb_size = fb_info->var.yres_virtual * line_size;
+-
+- if (fb_size > fb_info->fix.smem_len)
+- return -ENOMEM;
+-
+- fb_info->fix.line_length = line_size;
+-
+ /*
+ * It seems, you can't re-program the controller if it is still running.
+ * This may lead into shifted pictures (FIFO issue?).
+@@ -413,6 +414,19 @@
+ /* clear the FIFOs */
+ writel(CTRL1_FIFO_CLEAR, host->base + LCDC_CTRL1 + REG_SET);
+
++ line_size = fb_info->var.xres * (fb_info->var.bits_per_pixel >> 3);
++ fb_info->fix.line_length = line_size;
++ fb_size = fb_info->var.yres_virtual * line_size;
++
++ /* Reallocate memory */
++ if (!fb_info->fix.smem_start || (fb_size > fb_info->fix.smem_len)) {
++ if (fb_info->fix.smem_start)
++ mxsfb_unmap_videomem(fb_info);
++
++ if (mxsfb_map_videomem(fb_info) < 0)
++ return -ENOMEM;
++ }
++
+ ctrl = CTRL_BYPASS_COUNT | CTRL_MASTER |
+ CTRL_SET_BUS_WIDTH(host->ld_intf_width);
+
+@@ -459,9 +473,9 @@
+ vdctrl0 |= VDCTRL0_HSYNC_ACT_HIGH;
+ if (fb_info->var.sync & FB_SYNC_VERT_HIGH_ACT)
+ vdctrl0 |= VDCTRL0_VSYNC_ACT_HIGH;
+- if (host->sync & MXSFB_SYNC_DATA_ENABLE_HIGH_ACT)
++ if (!(fb_info->var.sync & FB_SYNC_OE_LOW_ACT))
+ vdctrl0 |= VDCTRL0_ENABLE_ACT_HIGH;
+- if (host->sync & MXSFB_SYNC_DOTCLK_FALLING_ACT)
++ if (fb_info->var.sync & FB_SYNC_CLK_LAT_FALL)
+ vdctrl0 |= VDCTRL0_DOTCLK_ACT_FALLING;
+
+ writel(vdctrl0, host->base + LCDC_VDCTRL0);
+@@ -578,6 +592,34 @@
+ return 0;
+ }
+
++static int mxsfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
++{
++ u32 len;
++ unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
++
++ if (offset < info->fix.smem_len) {
++ /* mapping framebuffer memory */
++ len = info->fix.smem_len - offset;
++ vma->vm_pgoff = (info->fix.smem_start + offset) >> PAGE_SHIFT;
++ } else
++ return -EINVAL;
++
++ len = PAGE_ALIGN(len);
++ if (vma->vm_end - vma->vm_start > len)
++ return -EINVAL;
++
++ /* make buffers bufferable */
++ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
++
++ if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
++ vma->vm_end - vma->vm_start, vma->vm_page_prot)) {
++ dev_dbg(info->device, "mmap remap_pfn_range failed\n");
++ return -ENOBUFS;
++ }
++
++ return 0;
++}
++
+ static struct fb_ops mxsfb_ops = {
+ .owner = THIS_MODULE,
+ .fb_check_var = mxsfb_check_var,
+@@ -585,6 +627,7 @@
+ .fb_setcolreg = mxsfb_setcolreg,
+ .fb_blank = mxsfb_blank,
+ .fb_pan_display = mxsfb_pan_display,
++ .fb_mmap = mxsfb_mmap,
+ .fb_fillrect = cfb_fillrect,
+ .fb_copyarea = cfb_copyarea,
+ .fb_imageblit = cfb_imageblit,
+@@ -800,7 +843,62 @@
+ {
+ struct fb_info *fb_info = &host->fb_info;
+
+- free_pages_exact(fb_info->screen_base, fb_info->fix.smem_len);
++ mxsfb_unmap_videomem(fb_info);
++}
++
++/*!
++ * Allocates the DRAM memory for the frame buffer. This buffer is remapped
++ * into a non-cached, non-buffered, memory region to allow palette and pixel
++ * writes to occur without flushing the cache. Once this area is remapped,
++ * all virtual memory access to the video memory should occur at the new region.
++ *
++ * @param fbi framebuffer information pointer
++ *
++ * @return Error code indicating success or failure
++ */
++static int mxsfb_map_videomem(struct fb_info *fbi)
++{
++ if (fbi->fix.smem_len < fbi->var.yres_virtual * fbi->fix.line_length)
++ fbi->fix.smem_len = fbi->var.yres_virtual *
++ fbi->fix.line_length;
++
++ fbi->screen_base = dma_alloc_writecombine(fbi->device,
++ fbi->fix.smem_len,
++ (dma_addr_t *)&fbi->fix.smem_start,
++ GFP_DMA | GFP_KERNEL);
++ if (fbi->screen_base == 0) {
++ dev_err(fbi->device, "Unable to allocate framebuffer memory\n");
++ fbi->fix.smem_len = 0;
++ fbi->fix.smem_start = 0;
++ return -EBUSY;
++ }
++
++ dev_dbg(fbi->device, "allocated fb @ paddr=0x%08X, size=%d.\n",
++ (uint32_t) fbi->fix.smem_start, fbi->fix.smem_len);
++
++ fbi->screen_size = fbi->fix.smem_len;
++
++ /* Clear the screen */
++ memset((char *)fbi->screen_base, 0, fbi->fix.smem_len);
++
++ return 0;
++}
++
++/*!
++ * De-allocates the DRAM memory for the frame buffer.
++ *
++ * @param fbi framebuffer information pointer
++ *
++ * @return Error code indicating success or failure
++ */
++static int mxsfb_unmap_videomem(struct fb_info *fbi)
++{
++ dma_free_writecombine(fbi->device, fbi->fix.smem_len,
++ fbi->screen_base, fbi->fix.smem_start);
++ fbi->screen_base = 0;
++ fbi->fix.smem_start = 0;
++ fbi->fix.smem_len = 0;
++ return 0;
+ }
+
+ static struct platform_device_id mxsfb_devtype[] = {
+diff -Nur linux-3.14.36/drivers/video/vexpress-dvi.c linux-openelec/drivers/video/vexpress-dvi.c
+--- linux-3.14.36/drivers/video/vexpress-dvi.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/drivers/video/vexpress-dvi.c 2015-05-06 12:05:42.000000000 -0500
+@@ -0,0 +1,220 @@
++/*
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * Copyright (C) 2012 ARM Limited
++ */
++
++#define pr_fmt(fmt) "vexpress-dvi: " fmt
++
++#include <linux/fb.h>
++#include <linux/of.h>
++#include <linux/of_device.h>
++#include <linux/vexpress.h>
++
++
++static struct vexpress_config_func *vexpress_dvimode_func;
++
++static struct {
++ u32 xres, yres, mode;
++} vexpress_dvi_dvimodes[] = {
++ { 640, 480, 0 }, /* VGA */
++ { 800, 600, 1 }, /* SVGA */
++ { 1024, 768, 2 }, /* XGA */
++ { 1280, 1024, 3 }, /* SXGA */
++ { 1600, 1200, 4 }, /* UXGA */
++ { 1920, 1080, 5 }, /* HD1080 */
++};
++
++static void vexpress_dvi_mode_set(struct fb_info *info, u32 xres, u32 yres)
++{
++ int err = -ENOENT;
++ int i;
++
++ if (!vexpress_dvimode_func)
++ return;
++
++ for (i = 0; i < ARRAY_SIZE(vexpress_dvi_dvimodes); i++) {
++ if (vexpress_dvi_dvimodes[i].xres == xres &&
++ vexpress_dvi_dvimodes[i].yres == yres) {
++ pr_debug("mode: %ux%u = %d\n", xres, yres,
++ vexpress_dvi_dvimodes[i].mode);
++ err = vexpress_config_write(vexpress_dvimode_func, 0,
++ vexpress_dvi_dvimodes[i].mode);
++ break;
++ }
++ }
++
++ if (err)
++ pr_warn("Failed to set %ux%u mode! (%d)\n", xres, yres, err);
++}
++
++
++static struct vexpress_config_func *vexpress_muxfpga_func;
++static int vexpress_dvi_fb = -1;
++
++static int vexpress_dvi_mux_set(struct fb_info *info)
++{
++ int err;
++ u32 site = vexpress_get_site_by_dev(info->device);
++
++ if (!vexpress_muxfpga_func)
++ return -ENXIO;
++
++ err = vexpress_config_write(vexpress_muxfpga_func, 0, site);
++ if (!err) {
++ pr_debug("Selected MUXFPGA input %d (fb%d)\n", site,
++ info->node);
++ vexpress_dvi_fb = info->node;
++ vexpress_dvi_mode_set(info, info->var.xres,
++ info->var.yres);
++ } else {
++ pr_warn("Failed to select MUXFPGA input %d (fb%d)! (%d)\n",
++ site, info->node, err);
++ }
++
++ return err;
++}
++
++static int vexpress_dvi_fb_select(int fb)
++{
++ int err;
++ struct fb_info *info;
++
++ /* fb0 is the default */
++ if (fb < 0)
++ fb = 0;
++
++ info = registered_fb[fb];
++ if (!info || !lock_fb_info(info))
++ return -ENODEV;
++
++ err = vexpress_dvi_mux_set(info);
++
++ unlock_fb_info(info);
++
++ return err;
++}
++
++static ssize_t vexpress_dvi_fb_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ return sprintf(buf, "%d\n", vexpress_dvi_fb);
++}
++
++static ssize_t vexpress_dvi_fb_store(struct device *dev,
++ struct device_attribute *attr, const char *buf, size_t count)
++{
++ long value;
++ int err = kstrtol(buf, 0, &value);
++
++ if (!err)
++ err = vexpress_dvi_fb_select(value);
++
++ return err ? err : count;
++}
++
++DEVICE_ATTR(fb, S_IRUGO | S_IWUSR, vexpress_dvi_fb_show,
++ vexpress_dvi_fb_store);
++
++
++static int vexpress_dvi_fb_event_notify(struct notifier_block *self,
++ unsigned long action, void *data)
++{
++ struct fb_event *event = data;
++ struct fb_info *info = event->info;
++ struct fb_videomode *mode = event->data;
++
++ switch (action) {
++ case FB_EVENT_FB_REGISTERED:
++ if (vexpress_dvi_fb < 0)
++ vexpress_dvi_mux_set(info);
++ break;
++ case FB_EVENT_MODE_CHANGE:
++ case FB_EVENT_MODE_CHANGE_ALL:
++ if (info->node == vexpress_dvi_fb)
++ vexpress_dvi_mode_set(info, mode->xres, mode->yres);
++ break;
++ }
++
++ return NOTIFY_OK;
++}
++
++static struct notifier_block vexpress_dvi_fb_notifier = {
++ .notifier_call = vexpress_dvi_fb_event_notify,
++};
++static bool vexpress_dvi_fb_notifier_registered;
++
++
++enum vexpress_dvi_func { FUNC_MUXFPGA, FUNC_DVIMODE };
++
++static struct of_device_id vexpress_dvi_of_match[] = {
++ {
++ .compatible = "arm,vexpress-muxfpga",
++ .data = (void *)FUNC_MUXFPGA,
++ }, {
++ .compatible = "arm,vexpress-dvimode",
++ .data = (void *)FUNC_DVIMODE,
++ },
++ {}
++};
++
++static int vexpress_dvi_probe(struct platform_device *pdev)
++{
++ enum vexpress_dvi_func func;
++ const struct of_device_id *match =
++ of_match_device(vexpress_dvi_of_match, &pdev->dev);
++
++ if (match)
++ func = (enum vexpress_dvi_func)match->data;
++ else
++ func = pdev->id_entry->driver_data;
++
++ switch (func) {
++ case FUNC_MUXFPGA:
++ vexpress_muxfpga_func =
++ vexpress_config_func_get_by_dev(&pdev->dev);
++ device_create_file(&pdev->dev, &dev_attr_fb);
++ break;
++ case FUNC_DVIMODE:
++ vexpress_dvimode_func =
++ vexpress_config_func_get_by_dev(&pdev->dev);
++ break;
++ }
++
++ if (!vexpress_dvi_fb_notifier_registered) {
++ fb_register_client(&vexpress_dvi_fb_notifier);
++ vexpress_dvi_fb_notifier_registered = true;
++ }
++
++ vexpress_dvi_fb_select(vexpress_dvi_fb);
++
++ return 0;
++}
++
++static const struct platform_device_id vexpress_dvi_id_table[] = {
++ { .name = "vexpress-muxfpga", .driver_data = FUNC_MUXFPGA, },
++ { .name = "vexpress-dvimode", .driver_data = FUNC_DVIMODE, },
++ {}
++};
++
++static struct platform_driver vexpress_dvi_driver = {
++ .probe = vexpress_dvi_probe,
++ .driver = {
++ .name = "vexpress-dvi",
++ .of_match_table = vexpress_dvi_of_match,
++ },
++ .id_table = vexpress_dvi_id_table,
++};
++
++static int __init vexpress_dvi_init(void)
++{
++ return platform_driver_register(&vexpress_dvi_driver);
++}
++device_initcall(vexpress_dvi_init);
+diff -Nur linux-3.14.36/firmware/imx/sdma/sdma-imx6q.bin.ihex linux-openelec/firmware/imx/sdma/sdma-imx6q.bin.ihex
+--- linux-3.14.36/firmware/imx/sdma/sdma-imx6q.bin.ihex 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/firmware/imx/sdma/sdma-imx6q.bin.ihex 2015-05-06 12:05:45.000000000 -0500
+@@ -0,0 +1,116 @@
++:1000000053444D4101000000010000001C000000AD
++:1000100026000000B40000007A0600008202000002
++:10002000FFFFFFFF00000000FFFFFFFFFFFFFFFFDC
++:10003000FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFD0
++:10004000FFFFFFFFFFFFFFFF6A1A0000FFFFFFFF38
++:10005000EB020000BB180000FFFFFFFF08040000D8
++:10006000FFFFFFFFC0030000FFFFFFFFFFFFFFFFD9
++:10007000FFFFFFFFAB020000FFFFFFFF7B0300005D
++:10008000FFFFFFFFFFFFFFFF4C0400006E040000B6
++:10009000FFFFFFFF00180000FFFFFFFFFFFFFFFF54
++:1000A000000000000018000062180000161A00008E
++:1000B000061B0000E3C1DB57E35FE357F352016A1D
++:1000C0008F00D500017D8D00A005EB5D7804037DD8
++:1000D00079042C7D367C79041F7CEE56000F600677
++:1000E000057D0965437E0A62417E20980A623E7E54
++:1000F00009653C7E12051205AD026007037DFB55C4
++:10010000D36D2B98FB55041DD36DC86A2F7F011F3B
++:1001100003200048E47C5398FB55D76D1500057803
++:100120000962C86A0962C86AD76D5298FB55D76DD3
++:100130001500150005780A62C86A0A62C86AD76D98
++:100140005298FB55D76D15001500150005780B6208
++:10015000C86A0B62C86AD76D097CDF6D077F000033
++:10016000EB55004D077DFAC1E35706980700CC68B0
++:100170000C6813C20AC20398D9C1E3C1DB57E35F1D
++:10018000E357F352216A8F00D500017D8D00A00551
++:10019000EB5DFB567804037D79042A7D317C79047C
++:1001A000207C700B1103EB53000F6003057D096584
++:1001B000377E0A62357E86980A62327E0965307E15
++:1001C00012051205AD026007027C065A8E98265A67
++:1001D000277F011F03200048E87C700B1103135395
++:1001E000AF98150004780962065A0962265AAE983B
++:1001F0001500150004780A62065A0A62265AAE985B
++:1002000015001500150004780B62065A0B62265A79
++:10021000077C0000EB55004D067DFAC1E357699855
++:1002200007000C6813C20AC26698700B11031353BF
++:100230006C07017CD9C1FB5E8A066B07017CD9C1C2
++:10024000F35EDB59D3588F0110010F398B003CC18D
++:100250002B7DC05AC85B4EC1277C88038906E35CAE
++:10026000FF0D1105FF1DBC053E07004D187D7008F0
++:1002700011007E07097D7D07027D2852E698F8521D
++:10028000DB54BC02CC02097C7C07027D2852EF982B
++:10029000F852D354BC02CC02097D0004DD988B00D7
++:1002A000C052C85359C1D67D0002CD98FF08BF0087
++:1002B0007F07157D8804D500017D8D00A005EB5DCD
++:1002C0008F0212021202FF3ADA05027C3E071899E9
++:1002D000A402DD02027D3E0718995E071899EB55CE
++:1002E0009805EB5DF352FB546A07267D6C07017D90
++:1002F00055996B07577C6907047D6807027D010EDD
++:100300002F999358D600017D8E009355A005935DDB
++:10031000A00602780255045D1D7C004E087C69072A
++:10032000037D0255177E3C99045D147F8906935026
++:100330000048017D2799A099150006780255045DB3
++:100340004F070255245D2F07017CA09917006F0706
++:10035000017C012093559D000700A7D9F598D36C27
++:100360006907047D6807027D010E64999358D600E1
++:10037000017D8E009355A005935DA006027802557D
++:10038000C86D0F7C004E087C6907037D0255097E0D
++:100390007199C86D067F890693500048017D5C996C
++:1003A000A0999A99C36A6907047D6807027D010EC6
++:1003B00087999358D600017D8E009355A005935DD3
++:1003C000A0060278C865045D0F7C004E087C6907B2
++:1003D000037DC865097E9499045D067F8906935064
++:1003E0000048017D7F99A09993559D000700FF6CFF
++:1003F000A7D9F5980000E354EB55004D017CF59822
++:10040000DD98E354EB55FF0A1102FF1A7F07027CC7
++:10041000A005B4999D008C05BA05A0051002BA0488
++:10042000AD0454040600E3C1DB57FB52C36AF35228
++:10043000056A8F00D500017D8D00A005EB5D780475
++:10044000037D79042B7D1E7C7904337CEE56000FEE
++:10045000FB556007027DC36DD599041DC36DC8624D
++:100460003B7E6006027D10021202096A357F12028D
++:10047000096A327F1202096A2F7F011F0320004898
++:10048000E77C099AFB55C76D150015001500057826
++:10049000C8620B6AC8620B6AC76D089AFB55C76DC4
++:1004A000150015000578C8620A6AC8620A6AC76D35
++:1004B000089AFB55C76D15000578C862096AC862BD
++:1004C000096AC76D097C286A077F0000EB55004D5B
++:1004D000057DFAC1DB57BF9977C254040AC2BA99A5
++:1004E000D9C1E3C1DB57F352056A8F00D500017D06
++:1004F0008D00A005FB567804037D7904297D1F7CBF
++:1005000079042E7CE35D700D1105ED55000F600739
++:10051000027D0652329A2652337E6005027D100219
++:100520001202096A2D7F1202096A2A7F1202096AE1
++:10053000277F011F03200048EA7CE3555D9A1500E0
++:1005400015001500047806520B6A26520B6A5C9A55
++:1005500015001500047806520A6A26520A6A5C9A47
++:10056000150004780652096A2652096A097C286A2D
++:10057000077F0000DB57004D057DFAC1DB571B9A52
++:1005800077C254040AC2189AE3C1DB57F352056AD2
++:10059000FB568E02941AC36AC8626902247D941EB7
++:1005A000C36ED36EC8624802C86A9426981EC36E92
++:1005B000D36EC8624C02C86A9826C36E981EC36E7A
++:1005C000C8629826C36E6002097CC8626E02247DF0
++:1005D000096A1E7F0125004D257D849A286A187FAF
++:1005E00004627AC2B89AE36E8F00D805017D8D004F
++:1005F000A005C8626E02107D096A0A7F0120F97C9D
++:10060000286A067F0000004D0D7DFAC1DB576E9A07
++:10061000070004620C6AB59A286AFA7F04627AC2FB
++:1006200058045404286AF47F0AC26B9AD9C1E3C102
++:10063000DB57F352056AFB568E02941A0252690286
++:100640001D7D941E06524802065A9426981E065294
++:100650004C02065A9826981E065260020A7C98267A
++:1006600006526E02237D096A1D7F0125004D247DFF
++:10067000D19A286A177F04627AC2029B8F00D8053C
++:10068000017D8D00A00506526E02107D096A0A7F69
++:100690000120F97C286A067F0000004D0D7DFAC11B
++:1006A000DB57C19A070004620C6AFF9A286AFA7F36
++:1006B00004627AC258045404286AF47F0AC2BE9ABB
++:1006C000016E0B612F7E0B622D7E0B632B7E0C0D5A
++:1006D0001704170417049D04081DCC05017C0C0D9C
++:1006E000D16A000F4207C86FDD6F1C7F8E009D002E
++:1006F00001680B67177ED56B04080278C86F120774
++:10070000117C0B670F7E04080278C86F12070A7C01
++:10071000DD6F087FD169010FC86FDD6F037F0101B5
++:0E0720000004129B0700FF680C680002129B89
++:00000001FF
+diff -Nur linux-3.14.36/firmware/Makefile linux-openelec/firmware/Makefile
+--- linux-3.14.36/firmware/Makefile 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/firmware/Makefile 2015-05-06 12:05:45.000000000 -0500
+@@ -61,6 +61,7 @@
+ radeon/RV770_pfp.bin radeon/RV770_me.bin \
+ radeon/RV730_pfp.bin radeon/RV730_me.bin \
+ radeon/RV710_pfp.bin radeon/RV710_me.bin
++fw-shipped-$(CONFIG_IMX_SDMA) += imx/sdma/sdma-imx6q.bin
+ fw-shipped-$(CONFIG_DVB_AV7110) += av7110/bootcode.bin
+ fw-shipped-$(CONFIG_DVB_TTUSB_BUDGET) += ttusb-budget/dspbootcode.bin
+ fw-shipped-$(CONFIG_E100) += e100/d101m_ucode.bin e100/d101s_ucode.bin \
+@@ -210,6 +211,8 @@
+ $(obj)/%: $(obj)/%.ihex | $(objtree)/$(obj)/$$(dir %)
+ $(call cmd,ihex)
+
++.NOTPARALLEL: $(obj)/%
++
+ # Don't depend on ihex2fw if we're installing and it already exists.
+ # Putting it after | in the dependencies doesn't seem sufficient when
+ # we're installing after a cross-compile, because ihex2fw has dependencies
+diff -Nur linux-3.14.36/fs/btrfs/Kconfig linux-openelec/fs/btrfs/Kconfig
+--- linux-3.14.36/fs/btrfs/Kconfig 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/fs/btrfs/Kconfig 2015-05-06 12:05:43.000000000 -0500
+@@ -1,5 +1,6 @@
+ config BTRFS_FS
+ tristate "Btrfs filesystem support"
++ select LIBCRC32C
+ select CRYPTO
+ select CRYPTO_CRC32C
+ select ZLIB_INFLATE
+diff -Nur linux-3.14.36/fs/buffer.c linux-openelec/fs/buffer.c
+--- linux-3.14.36/fs/buffer.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/fs/buffer.c 2015-07-24 18:03:29.528842002 -0500
+@@ -3110,7 +3110,7 @@
+ * until the buffer gets unlocked).
+ *
+ * ll_rw_block sets b_end_io to simple completion handler that marks
+- * the buffer up-to-date (if approriate), unlocks the buffer and wakes
++ * the buffer up-to-date (if appropriate), unlocks the buffer and wakes
+ * any waiters.
+ *
+ * All of the buffers must be for the same device, and must also be a
+diff -Nur linux-3.14.36/fs/compat_binfmt_elf.c linux-openelec/fs/compat_binfmt_elf.c
+--- linux-3.14.36/fs/compat_binfmt_elf.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/fs/compat_binfmt_elf.c 2015-05-06 12:05:43.000000000 -0500
+@@ -88,6 +88,11 @@
+ #define ELF_HWCAP COMPAT_ELF_HWCAP
+ #endif
+
++#ifdef COMPAT_ELF_HWCAP2
++#undef ELF_HWCAP2
++#define ELF_HWCAP2 COMPAT_ELF_HWCAP2
++#endif
++
+ #ifdef COMPAT_ARCH_DLINFO
+ #undef ARCH_DLINFO
+ #define ARCH_DLINFO COMPAT_ARCH_DLINFO
+diff -Nur linux-3.14.36/fs/debugfs/inode.c linux-openelec/fs/debugfs/inode.c
+--- linux-3.14.36/fs/debugfs/inode.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/fs/debugfs/inode.c 2015-07-24 18:03:29.800842002 -0500
+@@ -367,7 +367,7 @@
+ * @name: a pointer to a string containing the name of the file to create.
+ * @mode: the permission that the file should have.
+ * @parent: a pointer to the parent dentry for this file. This should be a
+- * directory dentry if set. If this paramater is NULL, then the
++ * directory dentry if set. If this parameter is NULL, then the
+ * file will be created in the root of the debugfs filesystem.
+ * @data: a pointer to something that the caller will want to get to later
+ * on. The inode.i_private pointer will point to this value on
+@@ -409,7 +409,7 @@
+ * @name: a pointer to a string containing the name of the directory to
+ * create.
+ * @parent: a pointer to the parent dentry for this file. This should be a
+- * directory dentry if set. If this paramater is NULL, then the
++ * directory dentry if set. If this parameter is NULL, then the
+ * directory will be created in the root of the debugfs filesystem.
+ *
+ * This function creates a directory in debugfs with the given name.
+@@ -434,7 +434,7 @@
+ * @name: a pointer to a string containing the name of the symbolic link to
+ * create.
+ * @parent: a pointer to the parent dentry for this symbolic link. This
+- * should be a directory dentry if set. If this paramater is NULL,
++ * should be a directory dentry if set. If this parameter is NULL,
+ * then the symbolic link will be created in the root of the debugfs
+ * filesystem.
+ * @target: a pointer to a string containing the path to the target of the
+diff -Nur linux-3.14.36/include/asm-generic/word-at-a-time.h linux-openelec/include/asm-generic/word-at-a-time.h
+--- linux-3.14.36/include/asm-generic/word-at-a-time.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/include/asm-generic/word-at-a-time.h 2015-05-06 12:05:45.000000000 -0500
+@@ -50,7 +50,7 @@
+ }
+
+ #ifndef zero_bytemask
+-#define zero_bytemask(mask) (~0ul << __fls(mask) << 1)
++#define zero_bytemask(mask) (~1ul << __fls(mask))
+ #endif
+
+ #endif /* _ASM_WORD_AT_A_TIME_H */
+diff -Nur linux-3.14.36/include/crypto/algapi.h linux-openelec/include/crypto/algapi.h
+--- linux-3.14.36/include/crypto/algapi.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/include/crypto/algapi.h 2015-05-06 12:05:44.000000000 -0500
+@@ -100,9 +100,12 @@
+ void *page;
+ u8 *buffer;
+ u8 *iv;
++ unsigned int ivsize;
+
+ int flags;
+- unsigned int blocksize;
++ unsigned int walk_blocksize;
++ unsigned int cipher_blocksize;
++ unsigned int alignmask;
+ };
+
+ struct ablkcipher_walk {
+@@ -192,6 +195,10 @@
+ int blkcipher_walk_virt_block(struct blkcipher_desc *desc,
+ struct blkcipher_walk *walk,
+ unsigned int blocksize);
++int blkcipher_aead_walk_virt_block(struct blkcipher_desc *desc,
++ struct blkcipher_walk *walk,
++ struct crypto_aead *tfm,
++ unsigned int blocksize);
+
+ int ablkcipher_walk_done(struct ablkcipher_request *req,
+ struct ablkcipher_walk *walk, int err);
+diff -Nur linux-3.14.36/include/drm/drm_fb_helper.h linux-openelec/include/drm/drm_fb_helper.h
+--- linux-3.14.36/include/drm/drm_fb_helper.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/include/drm/drm_fb_helper.h 2015-05-06 12:05:44.000000000 -0500
+@@ -55,7 +55,7 @@
+ * save the current lut when force-restoring the fbdev for e.g.
+ * kdbg.
+ * @fb_probe: Driver callback to allocate and initialize the fbdev info
+- * structure. Futhermore it also needs to allocate the drm
++ * structure. Furthermore it also needs to allocate the drm
+ * framebuffer used to back the fbdev.
+ * @initial_config: Setup an initial fbdev display configuration
+ *
+diff -Nur linux-3.14.36/include/dt-bindings/clock/imx6sl-clock.h linux-openelec/include/dt-bindings/clock/imx6sl-clock.h
+--- linux-3.14.36/include/dt-bindings/clock/imx6sl-clock.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/include/dt-bindings/clock/imx6sl-clock.h 2015-05-06 12:05:44.000000000 -0500
+@@ -1,5 +1,5 @@
+ /*
+- * Copyright 2013 Freescale Semiconductor, Inc.
++ * Copyright (C) 2013 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+@@ -71,8 +71,8 @@
+ #define IMX6SL_CLK_PERIPH 58
+ #define IMX6SL_CLK_PERIPH2 59
+ #define IMX6SL_CLK_OCRAM_PODF 60
+-#define IMX6SL_CLK_PERIPH_CLK2_PODF 61
+-#define IMX6SL_CLK_PERIPH2_CLK2_PODF 62
++#define IMX6SL_CLK_PERIPH_CLK2 61
++#define IMX6SL_CLK_PERIPH2_CLK2 62
+ #define IMX6SL_CLK_IPG 63
+ #define IMX6SL_CLK_CSI_PODF 64
+ #define IMX6SL_CLK_LCDIF_AXI_PODF 65
+@@ -145,6 +145,7 @@
+ #define IMX6SL_CLK_USDHC4 132
+ #define IMX6SL_CLK_PLL4_AUDIO_DIV 133
+ #define IMX6SL_CLK_SPBA 134
+-#define IMX6SL_CLK_END 135
++#define IMX6SL_CLK_UART_OSC_4M 135
++#define IMX6SL_CLK_END 136
+
+ #endif /* __DT_BINDINGS_CLOCK_IMX6SL_H */
+diff -Nur linux-3.14.36/include/linux/ahci_platform.h linux-openelec/include/linux/ahci_platform.h
+--- linux-3.14.36/include/linux/ahci_platform.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/include/linux/ahci_platform.h 2015-05-06 12:05:44.000000000 -0500
+@@ -19,15 +19,38 @@
+
+ struct device;
+ struct ata_port_info;
++struct ahci_host_priv;
++struct platform_device;
+
++/*
++ * Note ahci_platform_data is deprecated, it is only kept around for use
++ * by the old da850 and spear13xx ahci code.
++ * New drivers should instead declare their own platform_driver struct, and
++ * use ahci_platform* functions in their own probe, suspend and resume methods.
++ */
+ struct ahci_platform_data {
+ int (*init)(struct device *dev, void __iomem *addr);
+ void (*exit)(struct device *dev);
+ int (*suspend)(struct device *dev);
+ int (*resume)(struct device *dev);
+- const struct ata_port_info *ata_port_info;
+- unsigned int force_port_map;
+- unsigned int mask_port_map;
+ };
+
++int ahci_platform_enable_clks(struct ahci_host_priv *hpriv);
++void ahci_platform_disable_clks(struct ahci_host_priv *hpriv);
++int ahci_platform_enable_resources(struct ahci_host_priv *hpriv);
++void ahci_platform_disable_resources(struct ahci_host_priv *hpriv);
++struct ahci_host_priv *ahci_platform_get_resources(
++ struct platform_device *pdev);
++int ahci_platform_init_host(struct platform_device *pdev,
++ struct ahci_host_priv *hpriv,
++ const struct ata_port_info *pi_template,
++ unsigned long host_flags,
++ unsigned int force_port_map,
++ unsigned int mask_port_map);
++
++int ahci_platform_suspend_host(struct device *dev);
++int ahci_platform_resume_host(struct device *dev);
++int ahci_platform_suspend(struct device *dev);
++int ahci_platform_resume(struct device *dev);
++
+ #endif /* _AHCI_PLATFORM_H */
+diff -Nur linux-3.14.36/include/linux/amba/clcd.h linux-openelec/include/linux/amba/clcd.h
+--- linux-3.14.36/include/linux/amba/clcd.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/include/linux/amba/clcd.h 2015-05-06 12:05:44.000000000 -0500
+@@ -243,6 +243,9 @@
+ val |= CNTL_BGR;
+ }
+
++ /* Reset the current colour depth */
++ val &= ~CNTL_LCDBPP16_444;
++
+ switch (var->bits_per_pixel) {
+ case 1:
+ val |= CNTL_LCDBPP1;
+@@ -264,14 +267,15 @@
+ */
+ if (amba_part(fb->dev) == 0x110 ||
+ var->green.length == 5)
+- val |= CNTL_LCDBPP16;
++ val |= CNTL_LCDBPP16 | CNTL_BGR;
+ else if (var->green.length == 6)
+- val |= CNTL_LCDBPP16_565;
++ val |= CNTL_LCDBPP16_565 | CNTL_BGR;
+ else
+- val |= CNTL_LCDBPP16_444;
++ val |= CNTL_LCDBPP16_444 | CNTL_BGR;
+ break;
+ case 32:
+ val |= CNTL_LCDBPP24;
++ val &= ~CNTL_BGR;
+ break;
+ }
+
+diff -Nur linux-3.14.36/include/linux/arm-hdlcd.h linux-openelec/include/linux/arm-hdlcd.h
+--- linux-3.14.36/include/linux/arm-hdlcd.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/include/linux/arm-hdlcd.h 2015-05-06 12:05:44.000000000 -0500
+@@ -0,0 +1,122 @@
++/*
++ * include/linux/arm-hdlcd.h
++ *
++ * Copyright (C) 2011 ARM Limited
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file COPYING in the main directory of this archive
++ * for more details.
++ *
++ * ARM HDLCD Controller register definition
++ */
++
++#include <linux/fb.h>
++#include <linux/completion.h>
++
++/* register offsets */
++#define HDLCD_REG_VERSION 0x0000 /* ro */
++#define HDLCD_REG_INT_RAWSTAT 0x0010 /* rw */
++#define HDLCD_REG_INT_CLEAR 0x0014 /* wo */
++#define HDLCD_REG_INT_MASK 0x0018 /* rw */
++#define HDLCD_REG_INT_STATUS 0x001c /* ro */
++#define HDLCD_REG_USER_OUT 0x0020 /* rw */
++#define HDLCD_REG_FB_BASE 0x0100 /* rw */
++#define HDLCD_REG_FB_LINE_LENGTH 0x0104 /* rw */
++#define HDLCD_REG_FB_LINE_COUNT 0x0108 /* rw */
++#define HDLCD_REG_FB_LINE_PITCH 0x010c /* rw */
++#define HDLCD_REG_BUS_OPTIONS 0x0110 /* rw */
++#define HDLCD_REG_V_SYNC 0x0200 /* rw */
++#define HDLCD_REG_V_BACK_PORCH 0x0204 /* rw */
++#define HDLCD_REG_V_DATA 0x0208 /* rw */
++#define HDLCD_REG_V_FRONT_PORCH 0x020c /* rw */
++#define HDLCD_REG_H_SYNC 0x0210 /* rw */
++#define HDLCD_REG_H_BACK_PORCH 0x0214 /* rw */
++#define HDLCD_REG_H_DATA 0x0218 /* rw */
++#define HDLCD_REG_H_FRONT_PORCH 0x021c /* rw */
++#define HDLCD_REG_POLARITIES 0x0220 /* rw */
++#define HDLCD_REG_COMMAND 0x0230 /* rw */
++#define HDLCD_REG_PIXEL_FORMAT 0x0240 /* rw */
++#define HDLCD_REG_BLUE_SELECT 0x0244 /* rw */
++#define HDLCD_REG_GREEN_SELECT 0x0248 /* rw */
++#define HDLCD_REG_RED_SELECT 0x024c /* rw */
++
++/* version */
++#define HDLCD_PRODUCT_ID 0x1CDC0000
++#define HDLCD_PRODUCT_MASK 0xFFFF0000
++#define HDLCD_VERSION_MAJOR_MASK 0x0000FF00
++#define HDLCD_VERSION_MINOR_MASK 0x000000FF
++
++/* interrupts */
++#define HDLCD_INTERRUPT_DMA_END (1 << 0)
++#define HDLCD_INTERRUPT_BUS_ERROR (1 << 1)
++#define HDLCD_INTERRUPT_VSYNC (1 << 2)
++#define HDLCD_INTERRUPT_UNDERRUN (1 << 3)
++
++/* polarity */
++#define HDLCD_POLARITY_VSYNC (1 << 0)
++#define HDLCD_POLARITY_HSYNC (1 << 1)
++#define HDLCD_POLARITY_DATAEN (1 << 2)
++#define HDLCD_POLARITY_DATA (1 << 3)
++#define HDLCD_POLARITY_PIXELCLK (1 << 4)
++
++/* commands */
++#define HDLCD_COMMAND_DISABLE (0 << 0)
++#define HDLCD_COMMAND_ENABLE (1 << 0)
++
++/* pixel format */
++#define HDLCD_PIXEL_FMT_LITTLE_ENDIAN (0 << 31)
++#define HDLCD_PIXEL_FMT_BIG_ENDIAN (1 << 31)
++#define HDLCD_BYTES_PER_PIXEL_MASK (3 << 3)
++
++/* bus options */
++#define HDLCD_BUS_BURST_MASK 0x01f
++#define HDLCD_BUS_MAX_OUTSTAND 0xf00
++#define HDLCD_BUS_BURST_NONE (0 << 0)
++#define HDLCD_BUS_BURST_1 (1 << 0)
++#define HDLCD_BUS_BURST_2 (1 << 1)
++#define HDLCD_BUS_BURST_4 (1 << 2)
++#define HDLCD_BUS_BURST_8 (1 << 3)
++#define HDLCD_BUS_BURST_16 (1 << 4)
++
++/* Max resolution supported is 4096x4096, 8 bit per color component,
++ 8 bit alpha, but we are going to choose the usual hardware default
++ (2048x2048, 32 bpp) and enable double buffering */
++#define HDLCD_MAX_XRES 2048
++#define HDLCD_MAX_YRES 2048
++#define HDLCD_MAX_FRAMEBUFFER_SIZE (HDLCD_MAX_XRES * HDLCD_MAX_YRES << 2)
++
++#define HDLCD_MEM_BASE (CONFIG_PAGE_OFFSET - 0x1000000)
++
++#define NR_PALETTE 256
++
++/* OEMs using HDLCD may wish to enable these settings if
++ * display disruption is apparent and you suspect HDLCD
++ * access to RAM may be starved.
++ */
++/* Turn HDLCD default color red instead of black so
++ * that it's easy to see pixel clock data underruns
++ * (compared to other visual disruption)
++ */
++//#define HDLCD_RED_DEFAULT_COLOUR
++/* Add a counter in the IRQ handler to count buffer underruns
++ * and /proc/hdlcd_underrun to read the counter
++ */
++//#define HDLCD_COUNT_BUFFERUNDERRUNS
++/* Restrict height to 1x screen size
++ *
++ */
++//#define HDLCD_NO_VIRTUAL_SCREEN
++
++#ifdef CONFIG_ANDROID
++#define HDLCD_NO_VIRTUAL_SCREEN
++#endif
++
++struct hdlcd_device {
++ struct fb_info fb;
++ struct device *dev;
++ struct clk *clk;
++ void __iomem *base;
++ int irq;
++ struct completion vsync_completion;
++ unsigned char *edid;
++};
+diff -Nur linux-3.14.36/include/linux/backlight.h linux-openelec/include/linux/backlight.h
+--- linux-3.14.36/include/linux/backlight.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/include/linux/backlight.h 2015-05-06 12:05:44.000000000 -0500
+@@ -9,6 +9,7 @@
+ #define _LINUX_BACKLIGHT_H
+
+ #include <linux/device.h>
++#include <linux/fb.h>
+ #include <linux/mutex.h>
+ #include <linux/notifier.h>
+
+@@ -104,6 +105,11 @@
+ struct list_head entry;
+
+ struct device dev;
++
++ /* Multiple framebuffers may share one backlight device */
++ bool fb_bl_on[FB_MAX];
++
++ int use_count;
+ };
+
+ static inline void backlight_update_status(struct backlight_device *bd)
+diff -Nur linux-3.14.36/include/linux/busfreq-imx6.h linux-openelec/include/linux/busfreq-imx6.h
+--- linux-3.14.36/include/linux/busfreq-imx6.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/include/linux/busfreq-imx6.h 2015-05-06 12:05:44.000000000 -0500
+@@ -0,0 +1,23 @@
++/*
++ * Copyright 2012-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#ifndef __ASM_ARCH_MXC_BUSFREQ_H__
++#define __ASM_ARCH_MXC_BUSFREQ_H__
++
++/*
++ * This enumerates busfreq mode.
++ */
++enum bus_freq_mode {
++ BUS_FREQ_HIGH,
++ BUS_FREQ_MED,
++ BUS_FREQ_AUDIO,
++ BUS_FREQ_LOW,
++};
++void request_bus_freq(enum bus_freq_mode mode);
++void release_bus_freq(enum bus_freq_mode mode);
++#endif
+diff -Nur linux-3.14.36/include/linux/cgroup_subsys.h linux-openelec/include/linux/cgroup_subsys.h
+--- linux-3.14.36/include/linux/cgroup_subsys.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/include/linux/cgroup_subsys.h 2015-05-06 12:05:44.000000000 -0500
+@@ -39,6 +39,10 @@
+ SUBSYS(blkio)
+ #endif
+
++#if IS_SUBSYS_ENABLED(CONFIG_CGROUP_BFQIO)
++SUBSYS(bfqio)
++#endif
++
+ #if IS_SUBSYS_ENABLED(CONFIG_CGROUP_PERF)
+ SUBSYS(perf)
+ #endif
+diff -Nur linux-3.14.36/include/linux/clk-provider.h linux-openelec/include/linux/clk-provider.h
+--- linux-3.14.36/include/linux/clk-provider.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/include/linux/clk-provider.h 2015-05-06 12:05:44.000000000 -0500
+@@ -30,6 +30,13 @@
+ #define CLK_GET_RATE_NOCACHE BIT(6) /* do not use the cached clk rate */
+ #define CLK_SET_RATE_NO_REPARENT BIT(7) /* don't re-parent on rate change */
+ #define CLK_GET_ACCURACY_NOCACHE BIT(8) /* do not use the cached clk accuracy */
++/*
++ * Basic mux clk, can't switch parent while there is another basic mux clk
++ * being its child. Otherwise, a glitch might be propagated to downstream
++ * clocks through this child mux.
++ */
++#define CLK_IS_BASIC_MUX BIT(9)
++
+
+ struct clk_hw;
+
+diff -Nur linux-3.14.36/include/linux/cma.h linux-openelec/include/linux/cma.h
+--- linux-3.14.36/include/linux/cma.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/include/linux/cma.h 2015-05-06 12:05:44.000000000 -0500
+@@ -0,0 +1,27 @@
++#ifndef __CMA_H__
++#define __CMA_H__
++
++/*
++ * There is always at least global CMA area and a few optional
++ * areas configured in kernel .config.
++ */
++#ifdef CONFIG_CMA_AREAS
++#define MAX_CMA_AREAS (1 + CONFIG_CMA_AREAS)
++
++#else
++#define MAX_CMA_AREAS (0)
++
++#endif
++
++struct cma;
++
++extern phys_addr_t cma_get_base(struct cma *cma);
++extern unsigned long cma_get_size(struct cma *cma);
++
++extern int __init cma_declare_contiguous(phys_addr_t size,
++ phys_addr_t base, phys_addr_t limit,
++ phys_addr_t alignment, unsigned int order_per_bit,
++ bool fixed, struct cma **res_cma);
++extern struct page *cma_alloc(struct cma *cma, int count, unsigned int align);
++extern bool cma_release(struct cma *cma, struct page *pages, int count);
++#endif
+diff -Nur linux-3.14.36/include/linux/cpufeature.h linux-openelec/include/linux/cpufeature.h
+--- linux-3.14.36/include/linux/cpufeature.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/include/linux/cpufeature.h 2015-05-06 12:05:44.000000000 -0500
+@@ -0,0 +1,60 @@
++/*
++ * Copyright (C) 2014 Linaro Ltd. <ard.biesheuvel@linaro.org>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#ifndef __LINUX_CPUFEATURE_H
++#define __LINUX_CPUFEATURE_H
++
++#ifdef CONFIG_GENERIC_CPU_AUTOPROBE
++
++#include <linux/mod_devicetable.h>
++#include <asm/cpufeature.h>
++
++/*
++ * Macros imported from <asm/cpufeature.h>:
++ * - cpu_feature(x) ordinal value of feature called 'x'
++ * - cpu_have_feature(u32 n) whether feature #n is available
++ * - MAX_CPU_FEATURES upper bound for feature ordinal values
++ * Optional:
++ * - CPU_FEATURE_TYPEFMT format string fragment for printing the cpu type
++ * - CPU_FEATURE_TYPEVAL set of values matching the format string above
++ */
++
++#ifndef CPU_FEATURE_TYPEFMT
++#define CPU_FEATURE_TYPEFMT "%s"
++#endif
++
++#ifndef CPU_FEATURE_TYPEVAL
++#define CPU_FEATURE_TYPEVAL ELF_PLATFORM
++#endif
++
++/*
++ * Use module_cpu_feature_match(feature, module_init_function) to
++ * declare that
++ * a) the module shall be probed upon discovery of CPU feature 'feature'
++ * (typically at boot time using udev)
++ * b) the module must not be loaded if CPU feature 'feature' is not present
++ * (not even by manual insmod).
++ *
++ * For a list of legal values for 'feature', please consult the file
++ * 'asm/cpufeature.h' of your favorite architecture.
++ */
++#define module_cpu_feature_match(x, __init) \
++static struct cpu_feature const cpu_feature_match_ ## x[] = \
++ { { .feature = cpu_feature(x) }, { } }; \
++MODULE_DEVICE_TABLE(cpu, cpu_feature_match_ ## x); \
++ \
++static int cpu_feature_match_ ## x ## _init(void) \
++{ \
++ if (!cpu_have_feature(cpu_feature(x))) \
++ return -ENODEV; \
++ return __init(); \
++} \
++module_init(cpu_feature_match_ ## x ## _init)
++
++#endif
++#endif
+diff -Nur linux-3.14.36/include/linux/cpufreq.h linux-openelec/include/linux/cpufreq.h
+--- linux-3.14.36/include/linux/cpufreq.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/include/linux/cpufreq.h 2015-05-06 12:05:44.000000000 -0500
+@@ -429,6 +429,9 @@
+ #elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND)
+ extern struct cpufreq_governor cpufreq_gov_ondemand;
+ #define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_ondemand)
++#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE)
++extern struct cpufreq_governor cpufreq_gov_interactive;
++#define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_interactive)
+ #elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE)
+ extern struct cpufreq_governor cpufreq_gov_conservative;
+ #define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_conservative)
+diff -Nur linux-3.14.36/include/linux/cpu.h linux-openelec/include/linux/cpu.h
+--- linux-3.14.36/include/linux/cpu.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/include/linux/cpu.h 2015-05-06 12:05:44.000000000 -0500
+@@ -226,4 +226,11 @@
+ void arch_cpu_idle_exit(void);
+ void arch_cpu_idle_dead(void);
+
++#define IDLE_START 1
++#define IDLE_END 2
++
++void idle_notifier_register(struct notifier_block *n);
++void idle_notifier_unregister(struct notifier_block *n);
++void idle_notifier_call_chain(unsigned long val);
++
+ #endif /* _LINUX_CPU_H_ */
+diff -Nur linux-3.14.36/include/linux/device_cooling.h linux-openelec/include/linux/device_cooling.h
+--- linux-3.14.36/include/linux/device_cooling.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/include/linux/device_cooling.h 2015-05-06 12:05:44.000000000 -0500
+@@ -0,0 +1,45 @@
++/*
++ * Copyright (C) 2013 Freescale Semiconductor, Inc.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ */
++
++#ifndef __DEVICE_THERMAL_H__
++#define __DEVICE_THERMAL_H__
++
++#include <linux/thermal.h>
++
++#ifdef CONFIG_DEVICE_THERMAL
++int register_devfreq_cooling_notifier(struct notifier_block *nb);
++int unregister_devfreq_cooling_notifier(struct notifier_block *nb);
++struct thermal_cooling_device *devfreq_cooling_register(unsigned long max_state);
++void devfreq_cooling_unregister(struct thermal_cooling_device *cdev);
++#else
++static inline
++int register_devfreq_cooling_notifier(struct notifier_block *nb)
++{
++ return 0;
++}
++
++static inline
++int unregister_devfreq_cooling_notifier(struct notifier_block *nb)
++{
++ return 0;
++}
++
++static inline
++struct thermal_cooling_device *devfreq_cooling_register(unsigned long max_state)
++{
++ return NULL;
++}
++
++static inline
++void devfreq_cooling_unregister(struct thermal_cooling_device *cdev)
++{
++ return;
++}
++#endif
++#endif /* __DEVICE_THERMAL_H__ */
+diff -Nur linux-3.14.36/include/linux/dma-contiguous.h linux-openelec/include/linux/dma-contiguous.h
+--- linux-3.14.36/include/linux/dma-contiguous.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/include/linux/dma-contiguous.h 2015-05-06 12:05:44.000000000 -0500
+@@ -53,18 +53,13 @@
+
+ #ifdef __KERNEL__
+
++#include <linux/device.h>
++
+ struct cma;
+ struct page;
+-struct device;
+
+ #ifdef CONFIG_DMA_CMA
+
+-/*
+- * There is always at least global CMA area and a few optional device
+- * private areas configured in kernel .config.
+- */
+-#define MAX_CMA_AREAS (1 + CONFIG_CMA_AREAS)
+-
+ extern struct cma *dma_contiguous_default_area;
+
+ static inline struct cma *dev_get_cma_area(struct device *dev)
+@@ -88,7 +83,8 @@
+ void dma_contiguous_reserve(phys_addr_t addr_limit);
+
+ int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
+- phys_addr_t limit, struct cma **res_cma);
++ phys_addr_t limit, struct cma **res_cma,
++ bool fixed);
+
+ /**
+ * dma_declare_contiguous() - reserve area for contiguous memory handling
+@@ -108,7 +104,7 @@
+ {
+ struct cma *cma;
+ int ret;
+- ret = dma_contiguous_reserve_area(size, base, limit, &cma);
++ ret = dma_contiguous_reserve_area(size, base, limit, &cma, true);
+ if (ret == 0)
+ dev_set_cma_area(dev, cma);
+
+@@ -122,8 +118,6 @@
+
+ #else
+
+-#define MAX_CMA_AREAS (0)
+-
+ static inline struct cma *dev_get_cma_area(struct device *dev)
+ {
+ return NULL;
+@@ -136,7 +130,9 @@
+ static inline void dma_contiguous_reserve(phys_addr_t limit) { }
+
+ static inline int dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
+- phys_addr_t limit, struct cma **res_cma) {
++ phys_addr_t limit, struct cma **res_cma,
++ bool fixed)
++{
+ return -ENOSYS;
+ }
+
+diff -Nur linux-3.14.36/include/linux/dmaengine.h linux-openelec/include/linux/dmaengine.h
+--- linux-3.14.36/include/linux/dmaengine.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/include/linux/dmaengine.h 2015-05-06 12:05:44.000000000 -0500
+@@ -333,6 +333,8 @@
+ * @slave_id: Slave requester id. Only valid for slave channels. The dma
+ * slave peripheral will have unique id as dma requester which need to be
+ * pass as slave config.
++ * @dma_request0: this is the first dma request of this dma channel.
++ * @dma_request1: this is the second dma request of this dma channel.
+ *
+ * This struct is passed in as configuration data to a DMA engine
+ * in order to set up a certain channel for DMA transport at runtime.
+@@ -361,6 +363,8 @@
+ u32 dst_maxburst;
+ bool device_fc;
+ unsigned int slave_id;
++ int dma_request0;
++ int dma_request1;
+ };
+
+ /**
+diff -Nur linux-3.14.36/include/linux/fsl_otp.h linux-openelec/include/linux/fsl_otp.h
+--- linux-3.14.36/include/linux/fsl_otp.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/include/linux/fsl_otp.h 2015-05-06 12:05:44.000000000 -0500
+@@ -0,0 +1,6 @@
++#ifndef _LINUX_FSL_OTP_H
++#define _LINUX_FSL_OTP_H
++
++int fsl_otp_readl(unsigned long offset, u32 *value);
++
++#endif
+diff -Nur linux-3.14.36/include/linux/ftrace.h linux-openelec/include/linux/ftrace.h
+--- linux-3.14.36/include/linux/ftrace.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/include/linux/ftrace.h 2015-05-06 12:05:44.000000000 -0500
+@@ -605,25 +605,27 @@
+ #endif
+ }
+
+-#ifndef HAVE_ARCH_CALLER_ADDR
++/* All archs should have this, but we define it for consistency */
++#ifndef ftrace_return_address0
++# define ftrace_return_address0 __builtin_return_address(0)
++#endif
++
++/* Archs may use other ways for ADDR1 and beyond */
++#ifndef ftrace_return_address
+ # ifdef CONFIG_FRAME_POINTER
+-# define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0))
+-# define CALLER_ADDR1 ((unsigned long)__builtin_return_address(1))
+-# define CALLER_ADDR2 ((unsigned long)__builtin_return_address(2))
+-# define CALLER_ADDR3 ((unsigned long)__builtin_return_address(3))
+-# define CALLER_ADDR4 ((unsigned long)__builtin_return_address(4))
+-# define CALLER_ADDR5 ((unsigned long)__builtin_return_address(5))
+-# define CALLER_ADDR6 ((unsigned long)__builtin_return_address(6))
++# define ftrace_return_address(n) __builtin_return_address(n)
+ # else
+-# define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0))
+-# define CALLER_ADDR1 0UL
+-# define CALLER_ADDR2 0UL
+-# define CALLER_ADDR3 0UL
+-# define CALLER_ADDR4 0UL
+-# define CALLER_ADDR5 0UL
+-# define CALLER_ADDR6 0UL
++# define ftrace_return_address(n) 0UL
+ # endif
+-#endif /* ifndef HAVE_ARCH_CALLER_ADDR */
++#endif
++
++#define CALLER_ADDR0 ((unsigned long)ftrace_return_address0)
++#define CALLER_ADDR1 ((unsigned long)ftrace_return_address(1))
++#define CALLER_ADDR2 ((unsigned long)ftrace_return_address(2))
++#define CALLER_ADDR3 ((unsigned long)ftrace_return_address(3))
++#define CALLER_ADDR4 ((unsigned long)ftrace_return_address(4))
++#define CALLER_ADDR5 ((unsigned long)ftrace_return_address(5))
++#define CALLER_ADDR6 ((unsigned long)ftrace_return_address(6))
+
+ #ifdef CONFIG_IRQSOFF_TRACER
+ extern void time_hardirqs_on(unsigned long a0, unsigned long a1);
+diff -Nur linux-3.14.36/include/linux/hardirq.h linux-openelec/include/linux/hardirq.h
+--- linux-3.14.36/include/linux/hardirq.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/include/linux/hardirq.h 2015-05-06 12:05:44.000000000 -0500
+@@ -9,6 +9,7 @@
+
+
+ extern void synchronize_irq(unsigned int irq);
++extern void synchronize_hardirq(unsigned int irq);
+
+ #if defined(CONFIG_TINY_RCU)
+
+diff -Nur linux-3.14.36/include/linux/hsi/hsi.h linux-openelec/include/linux/hsi/hsi.h
+--- linux-3.14.36/include/linux/hsi/hsi.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/include/linux/hsi/hsi.h 2015-05-06 12:05:44.000000000 -0500
+@@ -178,7 +178,7 @@
+ * @complete: Transfer completion callback
+ * @destructor: Destructor to free resources when flushing
+ * @status: Status of the transfer when completed
+- * @actual_len: Actual length of data transfered on completion
++ * @actual_len: Actual length of data transferred on completion
+ * @channel: Channel were to TX/RX the message
+ * @ttype: Transfer type (TX if set, RX otherwise)
+ * @break_frame: if true HSI will send/receive a break frame. Data buffers are
+diff -Nur linux-3.14.36/include/linux/ipu.h linux-openelec/include/linux/ipu.h
+--- linux-3.14.36/include/linux/ipu.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/include/linux/ipu.h 2015-05-06 12:05:44.000000000 -0500
+@@ -0,0 +1,38 @@
++/*
++ * Copyright 2005-2013 Freescale Semiconductor, Inc.
++ */
++
++/*
++ * The code contained herein is licensed under the GNU Lesser General
++ * Public License. You may obtain a copy of the GNU Lesser General
++ * Public License Version 2.1 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/lgpl-license.html
++ * http://www.gnu.org/copyleft/lgpl.html
++ */
++
++/*!
++ * @defgroup IPU MXC Image Processing Unit (IPU) Driver
++ */
++/*!
++ * @file linux/ipu.h
++ *
++ * @brief This file contains the IPU driver API declarations.
++ *
++ * @ingroup IPU
++ */
++
++#ifndef __LINUX_IPU_H__
++#define __LINUX_IPU_H__
++
++#include <linux/interrupt.h>
++#include <uapi/linux/ipu.h>
++
++unsigned int fmt_to_bpp(unsigned int pixelformat);
++cs_t colorspaceofpixel(int fmt);
++int need_csc(int ifmt, int ofmt);
++
++int ipu_queue_task(struct ipu_task *task);
++int ipu_check_task(struct ipu_task *task);
++
++#endif
+diff -Nur linux-3.14.36/include/linux/ipu-v3.h linux-openelec/include/linux/ipu-v3.h
+--- linux-3.14.36/include/linux/ipu-v3.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/include/linux/ipu-v3.h 2015-05-06 12:05:44.000000000 -0500
+@@ -0,0 +1,752 @@
++/*
++ * Copyright (c) 2010 Sascha Hauer <s.hauer@pengutronix.de>
++ * Copyright (C) 2011-2013 Freescale Semiconductor, Inc.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License as published by the
++ * Free Software Foundation; either version 2 of the License, or (at your
++ * option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
++ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
++ * for more details.
++ */
++
++#ifndef __LINUX_IPU_V3_H_
++#define __LINUX_IPU_V3_H_
++
++#include <linux/ipu.h>
++
++/* IPU Driver channels definitions. */
++/* Note these are different from IDMA channels */
++#define IPU_MAX_CH 32
++#define _MAKE_CHAN(num, v_in, g_in, a_in, out) \
++ ((num << 24) | (v_in << 18) | (g_in << 12) | (a_in << 6) | out)
++#define _MAKE_ALT_CHAN(ch) (ch | (IPU_MAX_CH << 24))
++#define IPU_CHAN_ID(ch) (ch >> 24)
++#define IPU_CHAN_ALT(ch) (ch & 0x02000000)
++#define IPU_CHAN_ALPHA_IN_DMA(ch) ((uint32_t) (ch >> 6) & 0x3F)
++#define IPU_CHAN_GRAPH_IN_DMA(ch) ((uint32_t) (ch >> 12) & 0x3F)
++#define IPU_CHAN_VIDEO_IN_DMA(ch) ((uint32_t) (ch >> 18) & 0x3F)
++#define IPU_CHAN_OUT_DMA(ch) ((uint32_t) (ch & 0x3F))
++#define NO_DMA 0x3F
++#define ALT 1
++/*!
++ * Enumeration of IPU logical channels. An IPU logical channel is defined as a
++ * combination of an input (memory to IPU), output (IPU to memory), and/or
++ * secondary input IDMA channels and in some cases an Image Converter task.
++ * Some channels consist of only an input or output.
++ */
++typedef enum {
++ CHAN_NONE = -1,
++ MEM_ROT_ENC_MEM = _MAKE_CHAN(1, 45, NO_DMA, NO_DMA, 48),
++ MEM_ROT_VF_MEM = _MAKE_CHAN(2, 46, NO_DMA, NO_DMA, 49),
++ MEM_ROT_PP_MEM = _MAKE_CHAN(3, 47, NO_DMA, NO_DMA, 50),
++
++ MEM_PRP_ENC_MEM = _MAKE_CHAN(4, 12, 14, 17, 20),
++ MEM_PRP_VF_MEM = _MAKE_CHAN(5, 12, 14, 17, 21),
++ MEM_PP_MEM = _MAKE_CHAN(6, 11, 15, 18, 22),
++
++ MEM_DC_SYNC = _MAKE_CHAN(7, 28, NO_DMA, NO_DMA, NO_DMA),
++ MEM_DC_ASYNC = _MAKE_CHAN(8, 41, NO_DMA, NO_DMA, NO_DMA),
++ MEM_BG_SYNC = _MAKE_CHAN(9, 23, NO_DMA, 51, NO_DMA),
++ MEM_FG_SYNC = _MAKE_CHAN(10, 27, NO_DMA, 31, NO_DMA),
++
++ MEM_BG_ASYNC0 = _MAKE_CHAN(11, 24, NO_DMA, 52, NO_DMA),
++ MEM_FG_ASYNC0 = _MAKE_CHAN(12, 29, NO_DMA, 33, NO_DMA),
++ MEM_BG_ASYNC1 = _MAKE_ALT_CHAN(MEM_BG_ASYNC0),
++ MEM_FG_ASYNC1 = _MAKE_ALT_CHAN(MEM_FG_ASYNC0),
++
++ DIRECT_ASYNC0 = _MAKE_CHAN(13, NO_DMA, NO_DMA, NO_DMA, NO_DMA),
++ DIRECT_ASYNC1 = _MAKE_CHAN(14, NO_DMA, NO_DMA, NO_DMA, NO_DMA),
++
++ CSI_MEM0 = _MAKE_CHAN(15, NO_DMA, NO_DMA, NO_DMA, 0),
++ CSI_MEM1 = _MAKE_CHAN(16, NO_DMA, NO_DMA, NO_DMA, 1),
++ CSI_MEM2 = _MAKE_CHAN(17, NO_DMA, NO_DMA, NO_DMA, 2),
++ CSI_MEM3 = _MAKE_CHAN(18, NO_DMA, NO_DMA, NO_DMA, 3),
++
++ CSI_MEM = CSI_MEM0,
++
++ CSI_PRP_ENC_MEM = _MAKE_CHAN(19, NO_DMA, NO_DMA, NO_DMA, 20),
++ CSI_PRP_VF_MEM = _MAKE_CHAN(20, NO_DMA, NO_DMA, NO_DMA, 21),
++
++ /* for vdi mem->vdi->ic->mem , add graphics plane and alpha*/
++ MEM_VDI_PRP_VF_MEM_P = _MAKE_CHAN(21, 8, 14, 17, 21),
++ MEM_VDI_PRP_VF_MEM = _MAKE_CHAN(22, 9, 14, 17, 21),
++ MEM_VDI_PRP_VF_MEM_N = _MAKE_CHAN(23, 10, 14, 17, 21),
++
++ /* for vdi mem->vdi->mem */
++ MEM_VDI_MEM_P = _MAKE_CHAN(24, 8, NO_DMA, NO_DMA, 5),
++ MEM_VDI_MEM = _MAKE_CHAN(25, 9, NO_DMA, NO_DMA, 5),
++ MEM_VDI_MEM_N = _MAKE_CHAN(26, 10, NO_DMA, NO_DMA, 5),
++
++ /* fake channel for vdoa to link with IPU */
++ MEM_VDOA_MEM = _MAKE_CHAN(27, NO_DMA, NO_DMA, NO_DMA, NO_DMA),
++
++ MEM_PP_ADC = CHAN_NONE,
++ ADC_SYS2 = CHAN_NONE,
++
++} ipu_channel_t;
++
++/*!
++ * Enumeration of types of buffers for a logical channel.
++ */
++typedef enum {
++ IPU_OUTPUT_BUFFER = 0, /*!< Buffer for output from IPU */
++ IPU_ALPHA_IN_BUFFER = 1, /*!< Buffer for input to IPU */
++ IPU_GRAPH_IN_BUFFER = 2, /*!< Buffer for input to IPU */
++ IPU_VIDEO_IN_BUFFER = 3, /*!< Buffer for input to IPU */
++ IPU_INPUT_BUFFER = IPU_VIDEO_IN_BUFFER,
++ IPU_SEC_INPUT_BUFFER = IPU_GRAPH_IN_BUFFER,
++} ipu_buffer_t;
++
++#define IPU_PANEL_SERIAL 1
++#define IPU_PANEL_PARALLEL 2
++
++/*!
++ * Enumeration of ADC channel operation mode.
++ */
++typedef enum {
++ Disable,
++ WriteTemplateNonSeq,
++ ReadTemplateNonSeq,
++ WriteTemplateUnCon,
++ ReadTemplateUnCon,
++ WriteDataWithRS,
++ WriteDataWoRS,
++ WriteCmd
++} mcu_mode_t;
++
++/*!
++ * Enumeration of ADC channel addressing mode.
++ */
++typedef enum {
++ FullWoBE,
++ FullWithBE,
++ XY
++} display_addressing_t;
++
++/*!
++ * Union of initialization parameters for a logical channel.
++ */
++typedef union {
++ struct {
++ uint32_t csi;
++ uint32_t mipi_id;
++ uint32_t mipi_vc;
++ bool mipi_en;
++ bool interlaced;
++ } csi_mem;
++ struct {
++ uint32_t in_width;
++ uint32_t in_height;
++ uint32_t in_pixel_fmt;
++ uint32_t out_width;
++ uint32_t out_height;
++ uint32_t out_pixel_fmt;
++ uint32_t outh_resize_ratio;
++ uint32_t outv_resize_ratio;
++ uint32_t csi;
++ uint32_t mipi_id;
++ uint32_t mipi_vc;
++ bool mipi_en;
++ } csi_prp_enc_mem;
++ struct {
++ uint32_t in_width;
++ uint32_t in_height;
++ uint32_t in_pixel_fmt;
++ uint32_t out_width;
++ uint32_t out_height;
++ uint32_t out_pixel_fmt;
++ uint32_t outh_resize_ratio;
++ uint32_t outv_resize_ratio;
++ } mem_prp_enc_mem;
++ struct {
++ uint32_t in_width;
++ uint32_t in_height;
++ uint32_t in_pixel_fmt;
++ uint32_t out_width;
++ uint32_t out_height;
++ uint32_t out_pixel_fmt;
++ } mem_rot_enc_mem;
++ struct {
++ uint32_t in_width;
++ uint32_t in_height;
++ uint32_t in_pixel_fmt;
++ uint32_t out_width;
++ uint32_t out_height;
++ uint32_t out_pixel_fmt;
++ uint32_t outh_resize_ratio;
++ uint32_t outv_resize_ratio;
++ bool graphics_combine_en;
++ bool global_alpha_en;
++ bool key_color_en;
++ uint32_t in_g_pixel_fmt;
++ uint8_t alpha;
++ uint32_t key_color;
++ bool alpha_chan_en;
++ ipu_motion_sel motion_sel;
++ enum v4l2_field field_fmt;
++ uint32_t csi;
++ uint32_t mipi_id;
++ uint32_t mipi_vc;
++ bool mipi_en;
++ } csi_prp_vf_mem;
++ struct {
++ uint32_t in_width;
++ uint32_t in_height;
++ uint32_t in_pixel_fmt;
++ uint32_t out_width;
++ uint32_t out_height;
++ uint32_t out_pixel_fmt;
++ bool graphics_combine_en;
++ bool global_alpha_en;
++ bool key_color_en;
++ display_port_t disp;
++ uint32_t out_left;
++ uint32_t out_top;
++ } csi_prp_vf_adc;
++ struct {
++ uint32_t in_width;
++ uint32_t in_height;
++ uint32_t in_pixel_fmt;
++ uint32_t out_width;
++ uint32_t out_height;
++ uint32_t out_pixel_fmt;
++ uint32_t outh_resize_ratio;
++ uint32_t outv_resize_ratio;
++ bool graphics_combine_en;
++ bool global_alpha_en;
++ bool key_color_en;
++ uint32_t in_g_pixel_fmt;
++ uint8_t alpha;
++ uint32_t key_color;
++ bool alpha_chan_en;
++ ipu_motion_sel motion_sel;
++ enum v4l2_field field_fmt;
++ } mem_prp_vf_mem;
++ struct {
++ uint32_t temp;
++ } mem_prp_vf_adc;
++ struct {
++ uint32_t temp;
++ } mem_rot_vf_mem;
++ struct {
++ uint32_t in_width;
++ uint32_t in_height;
++ uint32_t in_pixel_fmt;
++ uint32_t out_width;
++ uint32_t out_height;
++ uint32_t out_pixel_fmt;
++ uint32_t outh_resize_ratio;
++ uint32_t outv_resize_ratio;
++ bool graphics_combine_en;
++ bool global_alpha_en;
++ bool key_color_en;
++ uint32_t in_g_pixel_fmt;
++ uint8_t alpha;
++ uint32_t key_color;
++ bool alpha_chan_en;
++ } mem_pp_mem;
++ struct {
++ uint32_t temp;
++ } mem_rot_mem;
++ struct {
++ uint32_t in_width;
++ uint32_t in_height;
++ uint32_t in_pixel_fmt;
++ uint32_t out_width;
++ uint32_t out_height;
++ uint32_t out_pixel_fmt;
++ bool graphics_combine_en;
++ bool global_alpha_en;
++ bool key_color_en;
++ display_port_t disp;
++ uint32_t out_left;
++ uint32_t out_top;
++ } mem_pp_adc;
++ struct {
++ uint32_t di;
++ bool interlaced;
++ uint32_t in_pixel_fmt;
++ uint32_t out_pixel_fmt;
++ } mem_dc_sync;
++ struct {
++ uint32_t temp;
++ } mem_sdc_fg;
++ struct {
++ uint32_t di;
++ bool interlaced;
++ uint32_t in_pixel_fmt;
++ uint32_t out_pixel_fmt;
++ bool alpha_chan_en;
++ } mem_dp_bg_sync;
++ struct {
++ uint32_t temp;
++ } mem_sdc_bg;
++ struct {
++ uint32_t di;
++ bool interlaced;
++ uint32_t in_pixel_fmt;
++ uint32_t out_pixel_fmt;
++ bool alpha_chan_en;
++ } mem_dp_fg_sync;
++ struct {
++ uint32_t di;
++ } direct_async;
++ struct {
++ display_port_t disp;
++ mcu_mode_t ch_mode;
++ uint32_t out_left;
++ uint32_t out_top;
++ } adc_sys1;
++ struct {
++ display_port_t disp;
++ mcu_mode_t ch_mode;
++ uint32_t out_left;
++ uint32_t out_top;
++ } adc_sys2;
++} ipu_channel_params_t;
++
++/*
++ * IPU_IRQF_ONESHOT - Interrupt is not reenabled after the irq handler finished.
++ */
++#define IPU_IRQF_NONE 0x00000000
++#define IPU_IRQF_ONESHOT 0x00000001
++
++/*!
++ * Enumeration of IPU interrupt sources.
++ */
++enum ipu_irq_line {
++ IPU_IRQ_CSI0_OUT_EOF = 0,
++ IPU_IRQ_CSI1_OUT_EOF = 1,
++ IPU_IRQ_CSI2_OUT_EOF = 2,
++ IPU_IRQ_CSI3_OUT_EOF = 3,
++ IPU_IRQ_VDIC_OUT_EOF = 5,
++ IPU_IRQ_VDI_P_IN_EOF = 8,
++ IPU_IRQ_VDI_C_IN_EOF = 9,
++ IPU_IRQ_VDI_N_IN_EOF = 10,
++ IPU_IRQ_PP_IN_EOF = 11,
++ IPU_IRQ_PRP_IN_EOF = 12,
++ IPU_IRQ_PRP_GRAPH_IN_EOF = 14,
++ IPU_IRQ_PP_GRAPH_IN_EOF = 15,
++ IPU_IRQ_PRP_ALPHA_IN_EOF = 17,
++ IPU_IRQ_PP_ALPHA_IN_EOF = 18,
++ IPU_IRQ_PRP_ENC_OUT_EOF = 20,
++ IPU_IRQ_PRP_VF_OUT_EOF = 21,
++ IPU_IRQ_PP_OUT_EOF = 22,
++ IPU_IRQ_BG_SYNC_EOF = 23,
++ IPU_IRQ_BG_ASYNC_EOF = 24,
++ IPU_IRQ_FG_SYNC_EOF = 27,
++ IPU_IRQ_DC_SYNC_EOF = 28,
++ IPU_IRQ_FG_ASYNC_EOF = 29,
++ IPU_IRQ_FG_ALPHA_SYNC_EOF = 31,
++
++ IPU_IRQ_FG_ALPHA_ASYNC_EOF = 33,
++ IPU_IRQ_DC_READ_EOF = 40,
++ IPU_IRQ_DC_ASYNC_EOF = 41,
++ IPU_IRQ_DC_CMD1_EOF = 42,
++ IPU_IRQ_DC_CMD2_EOF = 43,
++ IPU_IRQ_DC_MASK_EOF = 44,
++ IPU_IRQ_PRP_ENC_ROT_IN_EOF = 45,
++ IPU_IRQ_PRP_VF_ROT_IN_EOF = 46,
++ IPU_IRQ_PP_ROT_IN_EOF = 47,
++ IPU_IRQ_PRP_ENC_ROT_OUT_EOF = 48,
++ IPU_IRQ_PRP_VF_ROT_OUT_EOF = 49,
++ IPU_IRQ_PP_ROT_OUT_EOF = 50,
++ IPU_IRQ_BG_ALPHA_SYNC_EOF = 51,
++ IPU_IRQ_BG_ALPHA_ASYNC_EOF = 52,
++
++ IPU_IRQ_BG_SYNC_NFACK = 64 + 23,
++ IPU_IRQ_FG_SYNC_NFACK = 64 + 27,
++ IPU_IRQ_DC_SYNC_NFACK = 64 + 28,
++
++ IPU_IRQ_DP_SF_START = 448 + 2,
++ IPU_IRQ_DP_SF_END = 448 + 3,
++ IPU_IRQ_BG_SF_END = IPU_IRQ_DP_SF_END,
++ IPU_IRQ_DC_FC_0 = 448 + 8,
++ IPU_IRQ_DC_FC_1 = 448 + 9,
++ IPU_IRQ_DC_FC_2 = 448 + 10,
++ IPU_IRQ_DC_FC_3 = 448 + 11,
++ IPU_IRQ_DC_FC_4 = 448 + 12,
++ IPU_IRQ_DC_FC_6 = 448 + 13,
++ IPU_IRQ_VSYNC_PRE_0 = 448 + 14,
++ IPU_IRQ_VSYNC_PRE_1 = 448 + 15,
++
++ IPU_IRQ_COUNT
++};
++
++/*!
++ * Bitfield of Display Interface signal polarities.
++ */
++typedef struct {
++ unsigned datamask_en:1;
++ unsigned int_clk:1;
++ unsigned interlaced:1;
++ unsigned odd_field_first:1;
++ unsigned clksel_en:1;
++ unsigned clkidle_en:1;
++ unsigned data_pol:1; /* true = inverted */
++ unsigned clk_pol:1; /* true = rising edge */
++ unsigned enable_pol:1;
++ unsigned Hsync_pol:1; /* true = active high */
++ unsigned Vsync_pol:1;
++} ipu_di_signal_cfg_t;
++
++/*!
++ * Bitfield of CSI signal polarities and modes.
++ */
++
++typedef struct {
++ unsigned data_width:4;
++ unsigned clk_mode:3;
++ unsigned ext_vsync:1;
++ unsigned Vsync_pol:1;
++ unsigned Hsync_pol:1;
++ unsigned pixclk_pol:1;
++ unsigned data_pol:1;
++ unsigned sens_clksrc:1;
++ unsigned pack_tight:1;
++ unsigned force_eof:1;
++ unsigned data_en_pol:1;
++ unsigned data_fmt;
++ unsigned csi;
++ unsigned mclk;
++} ipu_csi_signal_cfg_t;
++
++/*!
++ * Enumeration of CSI data bus widths.
++ */
++enum {
++ IPU_CSI_DATA_WIDTH_4 = 0,
++ IPU_CSI_DATA_WIDTH_8 = 1,
++ IPU_CSI_DATA_WIDTH_10 = 3,
++ IPU_CSI_DATA_WIDTH_16 = 9,
++};
++
++/*!
++ * Enumeration of CSI clock modes.
++ */
++enum {
++ IPU_CSI_CLK_MODE_GATED_CLK,
++ IPU_CSI_CLK_MODE_NONGATED_CLK,
++ IPU_CSI_CLK_MODE_CCIR656_PROGRESSIVE,
++ IPU_CSI_CLK_MODE_CCIR656_INTERLACED,
++ IPU_CSI_CLK_MODE_CCIR1120_PROGRESSIVE_DDR,
++ IPU_CSI_CLK_MODE_CCIR1120_PROGRESSIVE_SDR,
++ IPU_CSI_CLK_MODE_CCIR1120_INTERLACED_DDR,
++ IPU_CSI_CLK_MODE_CCIR1120_INTERLACED_SDR,
++};
++
++enum {
++ IPU_CSI_MIPI_DI0,
++ IPU_CSI_MIPI_DI1,
++ IPU_CSI_MIPI_DI2,
++ IPU_CSI_MIPI_DI3,
++};
++
++typedef enum {
++ RGB,
++ YCbCr,
++ YUV
++} ipu_color_space_t;
++
++/*!
++ * Enumeration of ADC vertical sync mode.
++ */
++typedef enum {
++ VsyncNone,
++ VsyncInternal,
++ VsyncCSI,
++ VsyncExternal
++} vsync_t;
++
++typedef enum {
++ DAT,
++ CMD
++} cmddata_t;
++
++/*!
++ * Enumeration of ADC display update mode.
++ */
++typedef enum {
++ IPU_ADC_REFRESH_NONE,
++ IPU_ADC_AUTO_REFRESH,
++ IPU_ADC_AUTO_REFRESH_SNOOP,
++ IPU_ADC_SNOOPING,
++} ipu_adc_update_mode_t;
++
++/*!
++ * Enumeration of ADC display interface types (serial or parallel).
++ */
++enum {
++ IPU_ADC_IFC_MODE_SYS80_TYPE1,
++ IPU_ADC_IFC_MODE_SYS80_TYPE2,
++ IPU_ADC_IFC_MODE_SYS68K_TYPE1,
++ IPU_ADC_IFC_MODE_SYS68K_TYPE2,
++ IPU_ADC_IFC_MODE_3WIRE_SERIAL,
++ IPU_ADC_IFC_MODE_4WIRE_SERIAL,
++ IPU_ADC_IFC_MODE_5WIRE_SERIAL_CLK,
++ IPU_ADC_IFC_MODE_5WIRE_SERIAL_CS,
++};
++
++enum {
++ IPU_ADC_IFC_WIDTH_8,
++ IPU_ADC_IFC_WIDTH_16,
++};
++
++/*!
++ * Enumeration of ADC display interface burst mode.
++ */
++enum {
++ IPU_ADC_BURST_WCS,
++ IPU_ADC_BURST_WBLCK,
++ IPU_ADC_BURST_NONE,
++ IPU_ADC_BURST_SERIAL,
++};
++
++/*!
++ * Enumeration of ADC display interface RW signal timing modes.
++ */
++enum {
++ IPU_ADC_SER_NO_RW,
++ IPU_ADC_SER_RW_BEFORE_RS,
++ IPU_ADC_SER_RW_AFTER_RS,
++};
++
++/*!
++ * Bitfield of ADC signal polarities and modes.
++ */
++typedef struct {
++ unsigned data_pol:1;
++ unsigned clk_pol:1;
++ unsigned cs_pol:1;
++ unsigned rs_pol:1;
++ unsigned addr_pol:1;
++ unsigned read_pol:1;
++ unsigned write_pol:1;
++ unsigned Vsync_pol:1;
++ unsigned burst_pol:1;
++ unsigned burst_mode:2;
++ unsigned ifc_mode:3;
++ unsigned ifc_width:5;
++ unsigned ser_preamble_len:4;
++ unsigned ser_preamble:8;
++ unsigned ser_rw_mode:2;
++} ipu_adc_sig_cfg_t;
++
++/*!
++ * Enumeration of ADC template commands.
++ */
++enum {
++ RD_DATA,
++ RD_ACK,
++ RD_WAIT,
++ WR_XADDR,
++ WR_YADDR,
++ WR_ADDR,
++ WR_CMND,
++ WR_DATA,
++};
++
++/*!
++ * Enumeration of ADC template command flow control.
++ */
++enum {
++ SINGLE_STEP,
++ PAUSE,
++ STOP,
++};
++
++
++/*Define template constants*/
++#define ATM_ADDR_RANGE 0x20 /*offset address of DISP */
++#define TEMPLATE_BUF_SIZE 0x20 /*size of template */
++
++/*!
++ * Define to create ADC template command entry.
++ */
++#define ipu_adc_template_gen(oc, rs, fc, dat) (((rs) << 29) | ((fc) << 27) | \
++ ((oc) << 24) | (dat))
++
++typedef struct {
++ u32 reg;
++ u32 value;
++} ipu_lpmc_reg_t;
++
++#define IPU_LPMC_REG_READ 0x80000000L
++
++#define CSI_MCLK_VF 1
++#define CSI_MCLK_ENC 2
++#define CSI_MCLK_RAW 4
++#define CSI_MCLK_I2C 8
++
++struct ipu_soc;
++/* Common IPU API */
++struct ipu_soc *ipu_get_soc(int id);
++int32_t ipu_init_channel(struct ipu_soc *ipu, ipu_channel_t channel, ipu_channel_params_t *params);
++void ipu_uninit_channel(struct ipu_soc *ipu, ipu_channel_t channel);
++void ipu_disable_hsp_clk(struct ipu_soc *ipu);
++
++static inline bool ipu_can_rotate_in_place(ipu_rotate_mode_t rot)
++{
++#ifdef CONFIG_MXC_IPU_V3D
++ return (rot < IPU_ROTATE_HORIZ_FLIP);
++#else
++ return (rot < IPU_ROTATE_90_RIGHT);
++#endif
++}
++
++int32_t ipu_init_channel_buffer(struct ipu_soc *ipu, ipu_channel_t channel, ipu_buffer_t type,
++ uint32_t pixel_fmt,
++ uint16_t width, uint16_t height,
++ uint32_t stride,
++ ipu_rotate_mode_t rot_mode,
++ dma_addr_t phyaddr_0, dma_addr_t phyaddr_1,
++ dma_addr_t phyaddr_2,
++ uint32_t u_offset, uint32_t v_offset);
++
++int32_t ipu_update_channel_buffer(struct ipu_soc *ipu, ipu_channel_t channel, ipu_buffer_t type,
++ uint32_t bufNum, dma_addr_t phyaddr);
++
++int32_t ipu_update_channel_offset(struct ipu_soc *ipu, ipu_channel_t channel, ipu_buffer_t type,
++ uint32_t pixel_fmt,
++ uint16_t width, uint16_t height,
++ uint32_t stride,
++ uint32_t u, uint32_t v,
++ uint32_t vertical_offset, uint32_t horizontal_offset);
++
++int32_t ipu_select_buffer(struct ipu_soc *ipu, ipu_channel_t channel,
++ ipu_buffer_t type, uint32_t bufNum);
++int32_t ipu_select_multi_vdi_buffer(struct ipu_soc *ipu, uint32_t bufNum);
++
++int32_t ipu_link_channels(struct ipu_soc *ipu, ipu_channel_t src_ch, ipu_channel_t dest_ch);
++int32_t ipu_unlink_channels(struct ipu_soc *ipu, ipu_channel_t src_ch, ipu_channel_t dest_ch);
++
++int32_t ipu_is_channel_busy(struct ipu_soc *ipu, ipu_channel_t channel);
++int32_t ipu_check_buffer_ready(struct ipu_soc *ipu, ipu_channel_t channel, ipu_buffer_t type,
++ uint32_t bufNum);
++void ipu_clear_buffer_ready(struct ipu_soc *ipu, ipu_channel_t channel, ipu_buffer_t type,
++ uint32_t bufNum);
++uint32_t ipu_get_cur_buffer_idx(struct ipu_soc *ipu, ipu_channel_t channel, ipu_buffer_t type);
++int32_t ipu_enable_channel(struct ipu_soc *ipu, ipu_channel_t channel);
++int32_t ipu_disable_channel(struct ipu_soc *ipu, ipu_channel_t channel, bool wait_for_stop);
++int32_t ipu_swap_channel(struct ipu_soc *ipu, ipu_channel_t from_ch, ipu_channel_t to_ch);
++uint32_t ipu_channel_status(struct ipu_soc *ipu, ipu_channel_t channel);
++
++int32_t ipu_enable_csi(struct ipu_soc *ipu, uint32_t csi);
++int32_t ipu_disable_csi(struct ipu_soc *ipu, uint32_t csi);
++
++int ipu_lowpwr_display_enable(void);
++int ipu_lowpwr_display_disable(void);
++
++int ipu_enable_irq(struct ipu_soc *ipu, uint32_t irq);
++void ipu_disable_irq(struct ipu_soc *ipu, uint32_t irq);
++void ipu_clear_irq(struct ipu_soc *ipu, uint32_t irq);
++int ipu_request_irq(struct ipu_soc *ipu, uint32_t irq,
++ irqreturn_t(*handler) (int, void *),
++ uint32_t irq_flags, const char *devname, void *dev_id);
++void ipu_free_irq(struct ipu_soc *ipu, uint32_t irq, void *dev_id);
++bool ipu_get_irq_status(struct ipu_soc *ipu, uint32_t irq);
++void ipu_set_csc_coefficients(struct ipu_soc *ipu, ipu_channel_t channel, int32_t param[][3]);
++int32_t ipu_set_channel_bandmode(struct ipu_soc *ipu, ipu_channel_t channel,
++ ipu_buffer_t type, uint32_t band_height);
++
++/* two stripe calculations */
++struct stripe_param{
++ unsigned int input_width; /* width of the input stripe */
++ unsigned int output_width; /* width of the output stripe */
++ unsigned int input_column; /* the first column on the input stripe */
++ unsigned int output_column; /* the first column on the output stripe */
++ unsigned int idr;
++ /* inverse downisizing ratio parameter; expressed as a power of 2 */
++ unsigned int irr;
++ /* inverse resizing ratio parameter; expressed as a multiple of 2^-13 */
++};
++int ipu_calc_stripes_sizes(const unsigned int input_frame_width,
++ unsigned int output_frame_width,
++ const unsigned int maximal_stripe_width,
++ const unsigned long long cirr,
++ const unsigned int equal_stripes,
++ u32 input_pixelformat,
++ u32 output_pixelformat,
++ struct stripe_param *left,
++ struct stripe_param *right);
++
++/* SDC API */
++int32_t ipu_init_sync_panel(struct ipu_soc *ipu, int disp,
++ uint32_t pixel_clk,
++ uint16_t width, uint16_t height,
++ uint32_t pixel_fmt,
++ uint16_t h_start_width, uint16_t h_sync_width,
++ uint16_t h_end_width, uint16_t v_start_width,
++ uint16_t v_sync_width, uint16_t v_end_width,
++ uint32_t v_to_h_sync, ipu_di_signal_cfg_t sig);
++
++void ipu_uninit_sync_panel(struct ipu_soc *ipu, int disp);
++
++int32_t ipu_disp_set_window_pos(struct ipu_soc *ipu, ipu_channel_t channel, int16_t x_pos,
++ int16_t y_pos);
++int32_t ipu_disp_get_window_pos(struct ipu_soc *ipu, ipu_channel_t channel, int16_t *x_pos,
++ int16_t *y_pos);
++int32_t ipu_disp_set_global_alpha(struct ipu_soc *ipu, ipu_channel_t channel, bool enable,
++ uint8_t alpha);
++int32_t ipu_disp_set_color_key(struct ipu_soc *ipu, ipu_channel_t channel, bool enable,
++ uint32_t colorKey);
++int32_t ipu_disp_set_gamma_correction(struct ipu_soc *ipu, ipu_channel_t channel, bool enable,
++ int constk[], int slopek[]);
++
++int ipu_init_async_panel(struct ipu_soc *ipu, int disp, int type, uint32_t cycle_time,
++ uint32_t pixel_fmt, ipu_adc_sig_cfg_t sig);
++void ipu_disp_direct_write(struct ipu_soc *ipu, ipu_channel_t channel, u32 value, u32 offset);
++void ipu_reset_disp_panel(struct ipu_soc *ipu);
++
++/* CMOS Sensor Interface API */
++int32_t ipu_csi_init_interface(struct ipu_soc *ipu, uint16_t width, uint16_t height,
++ uint32_t pixel_fmt, ipu_csi_signal_cfg_t sig);
++
++int32_t ipu_csi_get_sensor_protocol(struct ipu_soc *ipu, uint32_t csi);
++
++int32_t ipu_csi_enable_mclk(struct ipu_soc *ipu, int src, bool flag, bool wait);
++
++static inline int32_t ipu_csi_enable_mclk_if(struct ipu_soc *ipu, int src, uint32_t csi,
++ bool flag, bool wait)
++{
++ return ipu_csi_enable_mclk(ipu, csi, flag, wait);
++}
++
++int ipu_csi_read_mclk_flag(void);
++
++void ipu_csi_flash_strobe(bool flag);
++
++void ipu_csi_get_window_size(struct ipu_soc *ipu, uint32_t *width, uint32_t *height, uint32_t csi);
++
++void ipu_csi_set_window_size(struct ipu_soc *ipu, uint32_t width, uint32_t height, uint32_t csi);
++
++void ipu_csi_set_window_pos(struct ipu_soc *ipu, uint32_t left, uint32_t top, uint32_t csi);
++
++uint32_t bytes_per_pixel(uint32_t fmt);
++
++bool ipu_ch_param_bad_alpha_pos(uint32_t fmt);
++
++struct ipuv3_fb_platform_data {
++ char disp_dev[32];
++ u32 interface_pix_fmt;
++ char *mode_str;
++ int default_bpp;
++ bool int_clk;
++
++ /* reserved mem */
++ resource_size_t res_base[2];
++ resource_size_t res_size[2];
++
++ /*
++ * Late init to avoid display channel being
++ * re-initialized as we've probably setup the
++ * channel in bootloader.
++ */
++ bool late_init;
++};
++
++#endif /* __LINUX_IPU_V3_H_ */
+diff -Nur linux-3.14.36/include/linux/isl29023.h linux-openelec/include/linux/isl29023.h
+--- linux-3.14.36/include/linux/isl29023.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/include/linux/isl29023.h 2015-05-06 12:05:44.000000000 -0500
+@@ -0,0 +1,47 @@
++/*
++ * Copyright (C) 2011-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
++ */
++
++#ifndef __ISL29023_H__
++#define __ISL29023_H__
++
++#include <linux/types.h>
++
++#define ISL29023_PD_MODE 0x0
++#define ISL29023_ALS_ONCE_MODE 0x1
++#define ISL29023_IR_ONCE_MODE 0x2
++#define ISL29023_ALS_CONT_MODE 0x5
++#define ISL29023_IR_CONT_MODE 0x6
++
++#define ISL29023_INT_PERSISTS_1 0x0
++#define ISL29023_INT_PERSISTS_4 0x1
++#define ISL29023_INT_PERSISTS_8 0x2
++#define ISL29023_INT_PERSISTS_16 0x3
++
++#define ISL29023_RES_16 0x0
++#define ISL29023_RES_12 0x1
++#define ISL29023_RES_8 0x2
++#define ISL29023_RES_4 0x3
++
++#define ISL29023_RANGE_1K 0x0
++#define ISL29023_RANGE_4K 0x1
++#define ISL29023_RANGE_16K 0x2
++#define ISL29023_RANGE_64K 0x3
++
++#endif
+diff -Nur linux-3.14.36/include/linux/kfifo.h linux-openelec/include/linux/kfifo.h
+--- linux-3.14.36/include/linux/kfifo.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/include/linux/kfifo.h 2015-05-06 12:05:44.000000000 -0500
+@@ -722,7 +722,7 @@
+ /**
+ * kfifo_dma_out_finish - finish a DMA OUT operation
+ * @fifo: address of the fifo to be used
+- * @len: number of bytes transferd
++ * @len: number of bytes transferrd
+ *
+ * This macro finish a DMA OUT operation. The out counter will be updated by
+ * the len parameter. No error checking will be done.
+diff -Nur linux-3.14.36/include/linux/mailbox_client.h linux-openelec/include/linux/mailbox_client.h
+--- linux-3.14.36/include/linux/mailbox_client.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/include/linux/mailbox_client.h 2015-05-06 12:05:44.000000000 -0500
+@@ -0,0 +1,46 @@
++/*
++ * Copyright (C) 2014 Linaro Ltd.
++ * Author: Jassi Brar <jassisinghbrar@gmail.com>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#ifndef __MAILBOX_CLIENT_H
++#define __MAILBOX_CLIENT_H
++
++#include <linux/of.h>
++
++struct mbox_chan;
++
++/**
++ * struct mbox_client - User of a mailbox
++ * @dev: The client device
++ * @chan_name: The "controller:channel" this client wants
++ * @rx_callback: Atomic callback to provide client the data received
++ * @tx_done: Atomic callback to tell client of data transmission
++ * @tx_block: If the mbox_send_message should block until data is
++ * transmitted.
++ * @tx_tout: Max block period in ms before TX is assumed failure
++ * @knows_txdone: if the client could run the TX state machine. Usually
++ * if the client receives some ACK packet for transmission.
++ * Unused if the controller already has TX_Done/RTR IRQ.
++ */
++struct mbox_client {
++ struct device *dev;
++ const char *chan_name;
++ void (*rx_callback)(struct mbox_client *cl, void *mssg);
++ void (*tx_done)(struct mbox_client *cl, void *mssg, int r);
++ bool tx_block;
++ unsigned long tx_tout;
++ bool knows_txdone;
++};
++
++struct mbox_chan *mbox_request_channel(struct mbox_client *cl);
++int mbox_send_message(struct mbox_chan *chan, void *mssg);
++void mbox_client_txdone(struct mbox_chan *chan, int r);
++bool mbox_client_peek_data(struct mbox_chan *chan);
++void mbox_free_channel(struct mbox_chan *chan);
++
++#endif /* __MAILBOX_CLIENT_H */
+diff -Nur linux-3.14.36/include/linux/mailbox_controller.h linux-openelec/include/linux/mailbox_controller.h
+--- linux-3.14.36/include/linux/mailbox_controller.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/include/linux/mailbox_controller.h 2015-05-06 12:05:44.000000000 -0500
+@@ -0,0 +1,121 @@
++/*
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#ifndef __MAILBOX_CONTROLLER_H
++#define __MAILBOX_CONTROLLER_H
++
++#include <linux/of.h>
++
++struct mbox_chan;
++
++/**
++ * struct mbox_chan_ops - s/w representation of a communication chan
++ * @send_data: The API asks the MBOX controller driver, in atomic
++ * context try to transmit a message on the bus. Returns 0 if
++ * data is accepted for transmission, -EBUSY while rejecting
++ * if the remote hasn't yet read the last data sent. Actual
++ * transmission of data is reported by the controller via
++ * mbox_chan_txdone (if it has some TX ACK irq). It must not
++ * block.
++ * @startup: Called when a client requests the chan. The controller
++ * could ask clients for additional parameters of communication
++ * to be provided via client's chan_data. This call may
++ * block. After this call the Controller must forward any
++ * data received on the chan by calling mbox_chan_received_data.
++ * @shutdown: Called when a client relinquishes control of a chan.
++ * This call may block too. The controller must not forwared
++ * any received data anymore.
++ * @last_tx_done: If the controller sets 'txdone_poll', the API calls
++ * this to poll status of last TX. The controller must
++ * give priority to IRQ method over polling and never
++ * set both txdone_poll and txdone_irq. Only in polling
++ * mode 'send_data' is expected to return -EBUSY.
++ * Used only if txdone_poll:=true && txdone_irq:=false
++ * @peek_data: Atomic check for any received data. Return true if controller
++ * has some data to push to the client. False otherwise.
++ */
++struct mbox_chan_ops {
++ int (*send_data)(struct mbox_chan *chan, void *data);
++ int (*startup)(struct mbox_chan *chan);
++ void (*shutdown)(struct mbox_chan *chan);
++ bool (*last_tx_done)(struct mbox_chan *chan);
++ bool (*peek_data)(struct mbox_chan *chan);
++};
++
++/**
++ * struct mbox_controller - Controller of a class of communication chans
++ * @dev: Device backing this controller
++ * @controller_name: Literal name of the controller.
++ * @ops: Operators that work on each communication chan
++ * @chans: Null terminated array of chans.
++ * @txdone_irq: Indicates if the controller can report to API when
++ * the last transmitted data was read by the remote.
++ * Eg, if it has some TX ACK irq.
++ * @txdone_poll: If the controller can read but not report the TX
++ * done. Ex, some register shows the TX status but
++ * no interrupt rises. Ignored if 'txdone_irq' is set.
++ * @txpoll_period: If 'txdone_poll' is in effect, the API polls for
++ * last TX's status after these many millisecs
++ */
++struct mbox_controller {
++ struct device *dev;
++ struct mbox_chan_ops *ops;
++ struct mbox_chan *chans;
++ int num_chans;
++ bool txdone_irq;
++ bool txdone_poll;
++ unsigned txpoll_period;
++ struct mbox_chan *(*of_xlate)(struct mbox_controller *mbox,
++ const struct of_phandle_args *sp);
++ /*
++ * If the controller supports only TXDONE_BY_POLL,
++ * this timer polls all the links for txdone.
++ */
++ struct timer_list poll;
++ unsigned period;
++ /* Hook to add to the global controller list */
++ struct list_head node;
++};
++
++/*
++ * The length of circular buffer for queuing messages from a client.
++ * 'msg_count' tracks the number of buffered messages while 'msg_free'
++ * is the index where the next message would be buffered.
++ * We shouldn't need it too big because every transferr is interrupt
++ * triggered and if we have lots of data to transfer, the interrupt
++ * latencies are going to be the bottleneck, not the buffer length.
++ * Besides, mbox_send_message could be called from atomic context and
++ * the client could also queue another message from the notifier 'tx_done'
++ * of the last transfer done.
++ * REVIST: If too many platforms see the "Try increasing MBOX_TX_QUEUE_LEN"
++ * print, it needs to be taken from config option or somesuch.
++ */
++#define MBOX_TX_QUEUE_LEN 20
++
++struct mbox_chan {
++ struct mbox_controller *mbox; /* Parent Controller */
++ unsigned txdone_method;
++
++ /* client */
++ struct mbox_client *cl;
++ struct completion tx_complete;
++
++ void *active_req;
++ unsigned msg_count, msg_free;
++ void *msg_data[MBOX_TX_QUEUE_LEN];
++ /* Access to the channel */
++ spinlock_t lock;
++
++ /* Private data for controller */
++ void *con_priv;
++};
++
++int mbox_controller_register(struct mbox_controller *mbox);
++void mbox_chan_received_data(struct mbox_chan *chan, void *data);
++void mbox_chan_txdone(struct mbox_chan *chan, int r);
++void mbox_controller_unregister(struct mbox_controller *mbox);
++
++#endif /* __MAILBOX_CONTROLLER_H */
+diff -Nur linux-3.14.36/include/linux/mailbox.h linux-openelec/include/linux/mailbox.h
+--- linux-3.14.36/include/linux/mailbox.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/include/linux/mailbox.h 1969-12-31 18:00:00.000000000 -0600
+@@ -1,17 +0,0 @@
+-/*
+- * This program is free software; you can redistribute it and/or modify it
+- * under the terms and conditions of the GNU General Public License,
+- * version 2, as published by the Free Software Foundation.
+- *
+- * This program is distributed in the hope it will be useful, but WITHOUT
+- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+- * more details.
+- *
+- * You should have received a copy of the GNU General Public License along with
+- * this program. If not, see <http://www.gnu.org/licenses/>.
+- */
+-
+-int pl320_ipc_transmit(u32 *data);
+-int pl320_ipc_register_notifier(struct notifier_block *nb);
+-int pl320_ipc_unregister_notifier(struct notifier_block *nb);
+diff -Nur linux-3.14.36/include/linux/memblock.h linux-openelec/include/linux/memblock.h
+--- linux-3.14.36/include/linux/memblock.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/include/linux/memblock.h 2015-05-06 12:05:44.000000000 -0500
+@@ -221,6 +221,8 @@
+ #define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0)
+ #define MEMBLOCK_ALLOC_ACCESSIBLE 0
+
++phys_addr_t __init memblock_alloc_range(phys_addr_t size, phys_addr_t align,
++ phys_addr_t start, phys_addr_t end);
+ phys_addr_t memblock_alloc_base(phys_addr_t size, phys_addr_t align,
+ phys_addr_t max_addr);
+ phys_addr_t __memblock_alloc_base(phys_addr_t size, phys_addr_t align,
+diff -Nur linux-3.14.36/include/linux/mfd/abx500/ab8500.h linux-openelec/include/linux/mfd/abx500/ab8500.h
+--- linux-3.14.36/include/linux/mfd/abx500/ab8500.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/include/linux/mfd/abx500/ab8500.h 2015-05-06 12:05:44.000000000 -0500
+@@ -347,7 +347,6 @@
+ struct mutex lock;
+ struct mutex irq_lock;
+ atomic_t transfer_ongoing;
+- int irq_base;
+ int irq;
+ struct irq_domain *domain;
+ enum ab8500_version version;
+@@ -378,7 +377,6 @@
+ * @regulator: machine-specific constraints for regulators
+ */
+ struct ab8500_platform_data {
+- int irq_base;
+ void (*init) (struct ab8500 *);
+ struct ab8500_regulator_platform_data *regulator;
+ struct ab8500_codec_platform_data *codec;
+diff -Nur linux-3.14.36/include/linux/mfd/dbx500-prcmu.h linux-openelec/include/linux/mfd/dbx500-prcmu.h
+--- linux-3.14.36/include/linux/mfd/dbx500-prcmu.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/include/linux/mfd/dbx500-prcmu.h 2015-05-06 12:05:44.000000000 -0500
+@@ -183,8 +183,6 @@
+ bool enable_set_ddr_opp;
+ bool enable_ape_opp_100_voltage;
+ struct ab8500_platform_data *ab_platdata;
+- int ab_irq;
+- int irq_base;
+ u32 version_offset;
+ u32 legacy_offset;
+ u32 adt_offset;
+diff -Nur linux-3.14.36/include/linux/mfd/mxc-hdmi-core.h linux-openelec/include/linux/mfd/mxc-hdmi-core.h
+--- linux-3.14.36/include/linux/mfd/mxc-hdmi-core.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/include/linux/mfd/mxc-hdmi-core.h 2015-05-06 12:05:44.000000000 -0500
+@@ -0,0 +1,68 @@
++/*
++ * Copyright (C) 2011-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ *
++ */
++#ifndef __LINUX_MXC_HDMI_CORE_H_
++#define __LINUX_MXC_HDMI_CORE_H_
++
++#include <video/mxc_edid.h>
++
++#include <sound/core.h>
++#include <sound/pcm.h>
++#include <sound/pcm_params.h>
++#include <sound/soc.h>
++
++#define IRQ_DISABLE_SUCCEED 0
++#define IRQ_DISABLE_FAIL 1
++
++bool hdmi_check_overflow(void);
++
++u8 hdmi_readb(unsigned int reg);
++void hdmi_writeb(u8 value, unsigned int reg);
++void hdmi_mask_writeb(u8 data, unsigned int addr, u8 shift, u8 mask);
++unsigned int hdmi_read4(unsigned int reg);
++void hdmi_write4(unsigned int value, unsigned int reg);
++
++void hdmi_irq_init(void);
++void hdmi_irq_enable(int irq);
++unsigned int hdmi_irq_disable(int irq);
++
++void hdmi_set_sample_rate(unsigned int rate);
++void hdmi_set_dma_mode(unsigned int dma_running);
++void hdmi_init_clk_regenerator(void);
++void hdmi_clk_regenerator_update_pixel_clock(u32 pixclock);
++
++void hdmi_set_edid_cfg(int edid_status, struct mxc_edid_cfg *cfg);
++int hdmi_get_edid_cfg(struct mxc_edid_cfg *cfg);
++
++extern int mxc_hdmi_ipu_id;
++extern int mxc_hdmi_disp_id;
++
++void hdmi_set_registered(int registered);
++int hdmi_get_registered(void);
++int mxc_hdmi_abort_stream(void);
++int mxc_hdmi_register_audio(struct snd_pcm_substream *substream);
++void mxc_hdmi_unregister_audio(struct snd_pcm_substream *substream);
++void hdmi_set_dvi_mode(unsigned int state);
++unsigned int hdmi_set_cable_state(unsigned int state);
++unsigned int hdmi_set_blank_state(unsigned int state);
++int check_hdmi_state(void);
++
++void hdmi_cec_start_device(void);
++void hdmi_cec_stop_device(void);
++
++#endif
+diff -Nur linux-3.14.36/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h linux-openelec/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
+--- linux-3.14.36/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h 2015-05-06 12:05:44.000000000 -0500
+@@ -1,5 +1,5 @@
+ /*
+- * Copyright (C) 2012 Freescale Semiconductor, Inc.
++ * Copyright (C) 2012-2013 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+@@ -122,7 +122,9 @@
+ #define IMX6Q_GPR1_USB_OTG_ID_SEL_MASK BIT(13)
+ #define IMX6Q_GPR1_USB_OTG_ID_SEL_ENET_RX_ER 0x0
+ #define IMX6Q_GPR1_USB_OTG_ID_SEL_GPIO_1 BIT(13)
+-#define IMX6Q_GPR1_GINT BIT(12)
++#define IMX6Q_GPR1_GINT_MASK BIT(12)
++#define IMX6Q_GPR1_GINT_CLEAR 0x0
++#define IMX6Q_GPR1_GINT_ASSERT BIT(12)
+ #define IMX6Q_GPR1_ADDRS3_MASK (0x3 << 10)
+ #define IMX6Q_GPR1_ADDRS3_32MB (0x0 << 10)
+ #define IMX6Q_GPR1_ADDRS3_64MB (0x1 << 10)
+diff -Nur linux-3.14.36/include/linux/mipi_csi2.h linux-openelec/include/linux/mipi_csi2.h
+--- linux-3.14.36/include/linux/mipi_csi2.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/include/linux/mipi_csi2.h 2015-05-06 12:05:44.000000000 -0500
+@@ -0,0 +1,93 @@
++/*
++ * Copyright (C) 2011-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
++ */
++
++#ifndef __INCLUDE_MIPI_CSI2_H
++#define __INCLUDE_MIPI_CSI2_H
++
++/* MIPI CSI2 registers */
++#define MIPI_CSI2_REG(offset) (offset)
++
++#define MIPI_CSI2_VERSION MIPI_CSI2_REG(0x000)
++#define MIPI_CSI2_N_LANES MIPI_CSI2_REG(0x004)
++#define MIPI_CSI2_PHY_SHUTDOWNZ MIPI_CSI2_REG(0x008)
++#define MIPI_CSI2_DPHY_RSTZ MIPI_CSI2_REG(0x00c)
++#define MIPI_CSI2_CSI2_RESETN MIPI_CSI2_REG(0x010)
++#define MIPI_CSI2_PHY_STATE MIPI_CSI2_REG(0x014)
++#define MIPI_CSI2_DATA_IDS_1 MIPI_CSI2_REG(0x018)
++#define MIPI_CSI2_DATA_IDS_2 MIPI_CSI2_REG(0x01c)
++#define MIPI_CSI2_ERR1 MIPI_CSI2_REG(0x020)
++#define MIPI_CSI2_ERR2 MIPI_CSI2_REG(0x024)
++#define MIPI_CSI2_MASK1 MIPI_CSI2_REG(0x028)
++#define MIPI_CSI2_MASK2 MIPI_CSI2_REG(0x02c)
++#define MIPI_CSI2_PHY_TST_CTRL0 MIPI_CSI2_REG(0x030)
++#define MIPI_CSI2_PHY_TST_CTRL1 MIPI_CSI2_REG(0x034)
++#define MIPI_CSI2_SFT_RESET MIPI_CSI2_REG(0xf00)
++
++/* mipi data type */
++#define MIPI_DT_YUV420 0x18 /* YYY.../UYVY.... */
++#define MIPI_DT_YUV420_LEGACY 0x1a /* UYY.../VYY... */
++#define MIPI_DT_YUV422 0x1e /* UYVY... */
++#define MIPI_DT_RGB444 0x20
++#define MIPI_DT_RGB555 0x21
++#define MIPI_DT_RGB565 0x22
++#define MIPI_DT_RGB666 0x23
++#define MIPI_DT_RGB888 0x24
++#define MIPI_DT_RAW6 0x28
++#define MIPI_DT_RAW7 0x29
++#define MIPI_DT_RAW8 0x2a
++#define MIPI_DT_RAW10 0x2b
++#define MIPI_DT_RAW12 0x2c
++#define MIPI_DT_RAW14 0x2d
++
++
++struct mipi_csi2_info;
++/* mipi csi2 API */
++struct mipi_csi2_info *mipi_csi2_get_info(void);
++
++bool mipi_csi2_enable(struct mipi_csi2_info *info);
++
++bool mipi_csi2_disable(struct mipi_csi2_info *info);
++
++bool mipi_csi2_get_status(struct mipi_csi2_info *info);
++
++int mipi_csi2_get_bind_ipu(struct mipi_csi2_info *info);
++
++unsigned int mipi_csi2_get_bind_csi(struct mipi_csi2_info *info);
++
++unsigned int mipi_csi2_get_virtual_channel(struct mipi_csi2_info *info);
++
++unsigned int mipi_csi2_set_lanes(struct mipi_csi2_info *info);
++
++unsigned int mipi_csi2_set_datatype(struct mipi_csi2_info *info,
++ unsigned int datatype);
++
++unsigned int mipi_csi2_get_datatype(struct mipi_csi2_info *info);
++
++unsigned int mipi_csi2_dphy_status(struct mipi_csi2_info *info);
++
++unsigned int mipi_csi2_get_error1(struct mipi_csi2_info *info);
++
++unsigned int mipi_csi2_get_error2(struct mipi_csi2_info *info);
++
++int mipi_csi2_pixelclk_enable(struct mipi_csi2_info *info);
++
++void mipi_csi2_pixelclk_disable(struct mipi_csi2_info *info);
++
++int mipi_csi2_reset(struct mipi_csi2_info *info);
++
++#endif
+diff -Nur linux-3.14.36/include/linux/mipi_dsi.h linux-openelec/include/linux/mipi_dsi.h
+--- linux-3.14.36/include/linux/mipi_dsi.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/include/linux/mipi_dsi.h 2015-05-06 12:05:44.000000000 -0500
+@@ -0,0 +1,171 @@
++/*
++ * Copyright (C) 2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
++ */
++
++#ifndef __INCLUDE_MIPI_DSI_H
++#define __INCLUDE_MIPI_DSI_H
++
++#define MIPI_DSI_VERSION (0x000)
++#define MIPI_DSI_PWR_UP (0x004)
++#define MIPI_DSI_CLKMGR_CFG (0x008)
++#define MIPI_DSI_DPI_CFG (0x00c)
++#define MIPI_DSI_DBI_CFG (0x010)
++#define MIPI_DSI_DBIS_CMDSIZE (0x014)
++#define MIPI_DSI_PCKHDL_CFG (0x018)
++#define MIPI_DSI_VID_MODE_CFG (0x01c)
++#define MIPI_DSI_VID_PKT_CFG (0x020)
++#define MIPI_DSI_CMD_MODE_CFG (0x024)
++#define MIPI_DSI_TMR_LINE_CFG (0x028)
++#define MIPI_DSI_VTIMING_CFG (0x02c)
++#define MIPI_DSI_PHY_TMR_CFG (0x030)
++#define MIPI_DSI_GEN_HDR (0x034)
++#define MIPI_DSI_GEN_PLD_DATA (0x038)
++#define MIPI_DSI_CMD_PKT_STATUS (0x03c)
++#define MIPI_DSI_TO_CNT_CFG (0x040)
++#define MIPI_DSI_ERROR_ST0 (0x044)
++#define MIPI_DSI_ERROR_ST1 (0x048)
++#define MIPI_DSI_ERROR_MSK0 (0x04c)
++#define MIPI_DSI_ERROR_MSK1 (0x050)
++#define MIPI_DSI_PHY_RSTZ (0x054)
++#define MIPI_DSI_PHY_IF_CFG (0x058)
++#define MIPI_DSI_PHY_IF_CTRL (0x05c)
++#define MIPI_DSI_PHY_STATUS (0x060)
++#define MIPI_DSI_PHY_TST_CTRL0 (0x064)
++#define MIPI_DSI_PHY_TST_CTRL1 (0x068)
++
++#define DSI_PWRUP_RESET (0x0 << 0)
++#define DSI_PWRUP_POWERUP (0x1 << 0)
++
++#define DSI_DPI_CFG_VID_SHIFT (0)
++#define DSI_DPI_CFG_VID_MASK (0x3)
++#define DSI_DPI_CFG_COLORCODE_SHIFT (2)
++#define DSI_DPI_CFG_COLORCODE_MASK (0x7)
++#define DSI_DPI_CFG_DATAEN_ACT_LOW (0x1 << 5)
++#define DSI_DPI_CFG_DATAEN_ACT_HIGH (0x0 << 5)
++#define DSI_DPI_CFG_VSYNC_ACT_LOW (0x1 << 6)
++#define DSI_DPI_CFG_VSYNC_ACT_HIGH (0x0 << 6)
++#define DSI_DPI_CFG_HSYNC_ACT_LOW (0x1 << 7)
++#define DSI_DPI_CFG_HSYNC_ACT_HIGH (0x0 << 7)
++#define DSI_DPI_CFG_SHUTD_ACT_LOW (0x1 << 8)
++#define DSI_DPI_CFG_SHUTD_ACT_HIGH (0x0 << 8)
++#define DSI_DPI_CFG_COLORMODE_ACT_LOW (0x1 << 9)
++#define DSI_DPI_CFG_COLORMODE_ACT_HIGH (0x0 << 9)
++#define DSI_DPI_CFG_EN18LOOSELY (0x1 << 10)
++
++#define DSI_PCKHDL_CFG_EN_EOTP_TX (0x1 << 0)
++#define DSI_PCKHDL_CFG_EN_EOTP_RX (0x1 << 1)
++#define DSI_PCKHDL_CFG_EN_BTA (0x1 << 2)
++#define DSI_PCKHDL_CFG_EN_ECC_RX (0x1 << 3)
++#define DSI_PCKHDL_CFG_EN_CRC_RX (0x1 << 4)
++#define DSI_PCKHDL_CFG_GEN_VID_RX_MASK (0x3)
++#define DSI_PCKHDL_CFG_GEN_VID_RX_SHIFT (5)
++
++#define DSI_VID_MODE_CFG_EN (0x1 << 0)
++#define DSI_VID_MODE_CFG_EN_BURSTMODE (0x3 << 1)
++#define DSI_VID_MODE_CFG_TYPE_MASK (0x3)
++#define DSI_VID_MODE_CFG_TYPE_SHIFT (1)
++#define DSI_VID_MODE_CFG_EN_LP_VSA (0x1 << 3)
++#define DSI_VID_MODE_CFG_EN_LP_VBP (0x1 << 4)
++#define DSI_VID_MODE_CFG_EN_LP_VFP (0x1 << 5)
++#define DSI_VID_MODE_CFG_EN_LP_VACT (0x1 << 6)
++#define DSI_VID_MODE_CFG_EN_LP_HBP (0x1 << 7)
++#define DSI_VID_MODE_CFG_EN_LP_HFP (0x1 << 8)
++#define DSI_VID_MODE_CFG_EN_MULTI_PKT (0x1 << 9)
++#define DSI_VID_MODE_CFG_EN_NULL_PKT (0x1 << 10)
++#define DSI_VID_MODE_CFG_EN_FRAME_ACK (0x1 << 11)
++#define DSI_VID_MODE_CFG_EN_LP_MODE (DSI_VID_MODE_CFG_EN_LP_VSA | \
++ DSI_VID_MODE_CFG_EN_LP_VBP | \
++ DSI_VID_MODE_CFG_EN_LP_VFP | \
++ DSI_VID_MODE_CFG_EN_LP_HFP | \
++ DSI_VID_MODE_CFG_EN_LP_HBP | \
++ DSI_VID_MODE_CFG_EN_LP_VACT)
++
++
++
++#define DSI_VID_PKT_CFG_VID_PKT_SZ_MASK (0x7ff)
++#define DSI_VID_PKT_CFG_VID_PKT_SZ_SHIFT (0)
++#define DSI_VID_PKT_CFG_NUM_CHUNKS_MASK (0x3ff)
++#define DSI_VID_PKT_CFG_NUM_CHUNKS_SHIFT (11)
++#define DSI_VID_PKT_CFG_NULL_PKT_SZ_MASK (0x3ff)
++#define DSI_VID_PKT_CFG_NULL_PKT_SZ_SHIFT (21)
++
++#define MIPI_DSI_CMD_MODE_CFG_EN_LOWPOWER (0x1FFF)
++#define MIPI_DSI_CMD_MODE_CFG_EN_CMD_MODE (0x1 << 0)
++
++#define DSI_TME_LINE_CFG_HSA_TIME_MASK (0x1ff)
++#define DSI_TME_LINE_CFG_HSA_TIME_SHIFT (0)
++#define DSI_TME_LINE_CFG_HBP_TIME_MASK (0x1ff)
++#define DSI_TME_LINE_CFG_HBP_TIME_SHIFT (9)
++#define DSI_TME_LINE_CFG_HLINE_TIME_MASK (0x3fff)
++#define DSI_TME_LINE_CFG_HLINE_TIME_SHIFT (18)
++
++#define DSI_VTIMING_CFG_VSA_LINES_MASK (0xf)
++#define DSI_VTIMING_CFG_VSA_LINES_SHIFT (0)
++#define DSI_VTIMING_CFG_VBP_LINES_MASK (0x3f)
++#define DSI_VTIMING_CFG_VBP_LINES_SHIFT (4)
++#define DSI_VTIMING_CFG_VFP_LINES_MASK (0x3f)
++#define DSI_VTIMING_CFG_VFP_LINES_SHIFT (10)
++#define DSI_VTIMING_CFG_V_ACT_LINES_MASK (0x7ff)
++#define DSI_VTIMING_CFG_V_ACT_LINES_SHIFT (16)
++
++#define DSI_PHY_TMR_CFG_BTA_TIME_MASK (0xfff)
++#define DSI_PHY_TMR_CFG_BTA_TIME_SHIFT (0)
++#define DSI_PHY_TMR_CFG_LP2HS_TIME_MASK (0xff)
++#define DSI_PHY_TMR_CFG_LP2HS_TIME_SHIFT (12)
++#define DSI_PHY_TMR_CFG_HS2LP_TIME_MASK (0xff)
++#define DSI_PHY_TMR_CFG_HS2LP_TIME_SHIFT (20)
++
++#define DSI_PHY_IF_CFG_N_LANES_MASK (0x3)
++#define DSI_PHY_IF_CFG_N_LANES_SHIFT (0)
++#define DSI_PHY_IF_CFG_WAIT_TIME_MASK (0xff)
++#define DSI_PHY_IF_CFG_WAIT_TIME_SHIFT (2)
++
++#define DSI_PHY_RSTZ_EN_CLK (0x1 << 2)
++#define DSI_PHY_RSTZ_DISABLE_RST (0x1 << 1)
++#define DSI_PHY_RSTZ_DISABLE_SHUTDOWN (0x1 << 0)
++#define DSI_PHY_RSTZ_RST (0x0)
++
++#define DSI_PHY_STATUS_LOCK (0x1 << 0)
++#define DSI_PHY_STATUS_STOPSTATE_CLK_LANE (0x1 << 2)
++
++#define DSI_GEN_HDR_TYPE_MASK (0xff)
++#define DSI_GEN_HDR_TYPE_SHIFT (0)
++#define DSI_GEN_HDR_DATA_MASK (0xffff)
++#define DSI_GEN_HDR_DATA_SHIFT (8)
++
++#define DSI_CMD_PKT_STATUS_GEN_CMD_EMPTY (0x1 << 0)
++#define DSI_CMD_PKT_STATUS_GEN_CMD_FULL (0x1 << 1)
++#define DSI_CMD_PKT_STATUS_GEN_PLD_W_EMPTY (0x1 << 2)
++#define DSI_CMD_PKT_STATUS_GEN_PLD_W_FULL (0x1 << 3)
++#define DSI_CMD_PKT_STATUS_GEN_PLD_R_EMPTY (0x1 << 4)
++#define DSI_CMD_PKT_STATUS_GEN_RD_CMD_BUSY (0x1 << 6)
++
++#define DSI_ERROR_MSK0_ALL_MASK (0x1fffff)
++#define DSI_ERROR_MSK1_ALL_MASK (0x3ffff)
++
++#define DSI_PHY_IF_CTRL_RESET (0x0)
++#define DSI_PHY_IF_CTRL_TX_REQ_CLK_HS (0x1 << 0)
++#define DSI_PHY_IF_CTRL_TX_REQ_CLK_ULPS (0x1 << 1)
++#define DSI_PHY_IF_CTRL_TX_EXIT_CLK_ULPS (0x1 << 2)
++#define DSI_PHY_IF_CTRL_TX_REQ_DATA_ULPS (0x1 << 3)
++#define DSI_PHY_IF_CTRL_TX_EXIT_DATA_ULPS (0x1 << 4)
++#define DSI_PHY_IF_CTRL_TX_TRIG_MASK (0xF)
++#define DSI_PHY_IF_CTRL_TX_TRIG_SHIFT (5)
++
++#define DSI_PHY_CLK_INIT_COMMAND (0x44)
++#define DSI_GEN_PLD_DATA_BUF_SIZE (0x4)
++#endif
+diff -Nur linux-3.14.36/include/linux/mmc/card.h linux-openelec/include/linux/mmc/card.h
+--- linux-3.14.36/include/linux/mmc/card.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/include/linux/mmc/card.h 2015-05-06 12:05:44.000000000 -0500
+@@ -86,10 +86,13 @@
+ unsigned int data_sector_size; /* 512 bytes or 4KB */
+ unsigned int data_tag_unit_size; /* DATA TAG UNIT size */
+ unsigned int boot_ro_lock; /* ro lock support */
++ unsigned int boot_size;
+ bool boot_ro_lockable;
+ u8 raw_exception_status; /* 54 */
+ u8 raw_partition_support; /* 160 */
+ u8 raw_rpmb_size_mult; /* 168 */
++ u8 boot_bus_width; /* 177 */
++ u8 boot_config; /* 179 */
+ u8 raw_erased_mem_count; /* 181 */
+ u8 raw_ext_csd_structure; /* 194 */
+ u8 raw_card_type; /* 196 */
+@@ -102,6 +105,7 @@
+ u8 raw_hc_erase_gap_size; /* 221 */
+ u8 raw_erase_timeout_mult; /* 223 */
+ u8 raw_hc_erase_grp_size; /* 224 */
++ u8 boot_info; /* 228 */
+ u8 raw_sec_trim_mult; /* 229 */
+ u8 raw_sec_erase_mult; /* 230 */
+ u8 raw_sec_feature_support;/* 231 */
+diff -Nur linux-3.14.36/include/linux/mmc/host.h linux-openelec/include/linux/mmc/host.h
+--- linux-3.14.36/include/linux/mmc/host.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/include/linux/mmc/host.h 2015-05-06 12:05:44.000000000 -0500
+@@ -282,6 +282,7 @@
+ MMC_CAP2_PACKED_WR)
+ #define MMC_CAP2_NO_PRESCAN_POWERUP (1 << 14) /* Don't power up before scan */
+ #define MMC_CAP2_SANITIZE (1 << 15) /* Support Sanitize */
++#define MMC_CAP2_SDIO_NOTHREAD (1 << 16)
+
+ mmc_pm_flag_t pm_caps; /* supported pm features */
+
+@@ -297,6 +298,11 @@
+ unsigned long clkgate_delay;
+ #endif
+
++ /* card specific properties to deal with power and reset */
++ struct regulator *card_regulator; /* External VCC needed by the card */
++ struct gpio_desc *card_reset_gpios[2]; /* External resets, active low */
++ struct clk *card_clk; /* External clock needed by the card */
++
+ /* host specific block data */
+ unsigned int max_seg_size; /* see blk_queue_max_segment_size */
+ unsigned short max_segs; /* see blk_queue_max_segments */
+@@ -397,6 +403,8 @@
+ wake_up_process(host->sdio_irq_thread);
+ }
+
++void sdio_run_irqs(struct mmc_host *host);
++
+ #ifdef CONFIG_REGULATOR
+ int mmc_regulator_get_ocrmask(struct regulator *supply);
+ int mmc_regulator_set_ocr(struct mmc_host *mmc,
+diff -Nur linux-3.14.36/include/linux/mmc/mmc.h linux-openelec/include/linux/mmc/mmc.h
+--- linux-3.14.36/include/linux/mmc/mmc.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/include/linux/mmc/mmc.h 2015-05-06 12:05:44.000000000 -0500
+@@ -292,6 +292,7 @@
+ #define EXT_CSD_RPMB_MULT 168 /* RO */
+ #define EXT_CSD_BOOT_WP 173 /* R/W */
+ #define EXT_CSD_ERASE_GROUP_DEF 175 /* R/W */
++#define EXT_CSD_BOOT_BUS_WIDTH 177 /* R/W */
+ #define EXT_CSD_PART_CONFIG 179 /* R/W */
+ #define EXT_CSD_ERASED_MEM_CONT 181 /* RO */
+ #define EXT_CSD_BUS_WIDTH 183 /* R/W */
+@@ -313,6 +314,7 @@
+ #define EXT_CSD_ERASE_TIMEOUT_MULT 223 /* RO */
+ #define EXT_CSD_HC_ERASE_GRP_SIZE 224 /* RO */
+ #define EXT_CSD_BOOT_MULT 226 /* RO */
++#define EXT_CSD_BOOT_INFO 228 /* RO, 1 bytes */
+ #define EXT_CSD_SEC_TRIM_MULT 229 /* RO */
+ #define EXT_CSD_SEC_ERASE_MULT 230 /* RO */
+ #define EXT_CSD_SEC_FEATURE_SUPPORT 231 /* RO */
+@@ -378,6 +380,29 @@
+ #define EXT_CSD_SEC_GB_CL_EN BIT(4)
+ #define EXT_CSD_SEC_SANITIZE BIT(6) /* v4.5 only */
+
++#define EXT_CSD_BOOT_BUS_WIDTH_MASK (0x1F)
++#define EXT_CSD_BOOT_BUS_WIDTH_MODE_MASK (0x3 << 3)
++#define EXT_CSD_BOOT_BUS_WIDTH_MODE_SDR_NORMAL (0x0)
++#define EXT_CSD_BOOT_BUS_WIDTH_MODE_SDR_HIGH (0x1)
++#define EXT_CSD_BOOT_BUS_WIDTH_MODE_DDR (0x2)
++#define EXT_CSD_BOOT_BUS_WIDTH_RST_WIDTH (1 << 2)
++#define EXT_CSD_BOOT_BUS_WIDTH_WIDTH_MASK (0x3)
++#define EXT_CSD_BOOT_BUS_WIDTH_1_SDR_4_DDR (0x0)
++#define EXT_CSD_BOOT_BUS_WIDTH_4_SDR_4_DDR (0x1)
++#define EXT_CSD_BOOT_BUS_WIDTH_8_SDR_8_DDR (0x2)
++
++#define EXT_CSD_BOOT_ACK_ENABLE (0x1 << 6)
++#define EXT_CSD_BOOT_PARTITION_ENABLE_MASK (0x7 << 3)
++#define EXT_CSD_BOOT_PARTITION_DISABLE (0x0)
++#define EXT_CSD_BOOT_PARTITION_PART1 (0x1 << 3)
++#define EXT_CSD_BOOT_PARTITION_PART2 (0x2 << 3)
++#define EXT_CSD_BOOT_PARTITION_USER (0x7 << 3)
++
++#define EXT_CSD_BOOT_PARTITION_ACCESS_MASK (0x7)
++#define EXT_CSD_BOOT_PARTITION_ACCESS_DISABLE (0x0)
++#define EXT_CSD_BOOT_PARTITION_ACCESS_PART1 (0x1)
++#define EXT_CSD_BOOT_PARTITION_ACCESS_PART2 (0x2)
++
+ #define EXT_CSD_RST_N_EN_MASK 0x3
+ #define EXT_CSD_RST_N_ENABLED 1 /* RST_n is enabled on card */
+
+diff -Nur linux-3.14.36/include/linux/mmc/sdhci.h linux-openelec/include/linux/mmc/sdhci.h
+--- linux-3.14.36/include/linux/mmc/sdhci.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/include/linux/mmc/sdhci.h 2015-05-06 12:05:44.000000000 -0500
+@@ -57,12 +57,8 @@
+ #define SDHCI_QUIRK_BROKEN_CARD_DETECTION (1<<15)
+ /* Controller reports inverted write-protect state */
+ #define SDHCI_QUIRK_INVERTED_WRITE_PROTECT (1<<16)
+-/* Controller has nonstandard clock management */
+-#define SDHCI_QUIRK_NONSTANDARD_CLOCK (1<<17)
+ /* Controller does not like fast PIO transfers */
+ #define SDHCI_QUIRK_PIO_NEEDS_DELAY (1<<18)
+-/* Controller losing signal/interrupt enable states after reset */
+-#define SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET (1<<19)
+ /* Controller has to be forced to use block size of 2048 bytes */
+ #define SDHCI_QUIRK_FORCE_BLK_SZ_2048 (1<<20)
+ /* Controller cannot do multi-block transfers */
+@@ -100,6 +96,7 @@
+ #define SDHCI_QUIRK2_BROKEN_HOST_CONTROL (1<<5)
+ /* Controller does not support HS200 */
+ #define SDHCI_QUIRK2_BROKEN_HS200 (1<<6)
++#define SDHCI_QUIRK2_NOSTD_TIMEOUT_COUNTER (1<<7)
+
+ int irq; /* Device IRQ */
+ void __iomem *ioaddr; /* Mapped address */
+@@ -145,6 +142,7 @@
+
+ bool runtime_suspended; /* Host is runtime suspended */
+ bool bus_on; /* Bus power prevents runtime suspend */
++ bool preset_enabled; /* Preset is enabled */
+
+ struct mmc_request *mrq; /* Current request */
+ struct mmc_command *cmd; /* Current command */
+@@ -162,8 +160,7 @@
+ dma_addr_t adma_addr; /* Mapped ADMA descr. table */
+ dma_addr_t align_addr; /* Mapped bounce buffer */
+
+- struct tasklet_struct card_tasklet; /* Tasklet structures */
+- struct tasklet_struct finish_tasklet;
++ struct tasklet_struct finish_tasklet; /* Tasklet structures */
+
+ struct timer_list timer; /* Timer for timeouts */
+
+@@ -175,6 +172,13 @@
+ unsigned int ocr_avail_mmc;
+ u32 ocr_mask; /* available voltages */
+
++ unsigned timing; /* Current timing */
++
++ u32 thread_isr;
++
++ /* cached registers */
++ u32 ier;
++
+ wait_queue_head_t buf_ready_int; /* Waitqueue for Buffer Read Ready interrupt */
+ unsigned int tuning_done; /* Condition flag set when CMD19 succeeds */
+
+diff -Nur linux-3.14.36/include/linux/mmc/sdio_ids.h linux-openelec/include/linux/mmc/sdio_ids.h
+--- linux-3.14.36/include/linux/mmc/sdio_ids.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/include/linux/mmc/sdio_ids.h 2015-05-06 12:05:44.000000000 -0500
+@@ -31,6 +31,7 @@
+ #define SDIO_DEVICE_ID_BROADCOM_4334 0x4334
+ #define SDIO_DEVICE_ID_BROADCOM_4335_4339 0x4335
+ #define SDIO_DEVICE_ID_BROADCOM_43362 43362
++#define SDIO_DEVICE_ID_BROADCOM_4354 0x4354
+
+ #define SDIO_VENDOR_ID_INTEL 0x0089
+ #define SDIO_DEVICE_ID_INTEL_IWMC3200WIMAX 0x1402
+diff -Nur linux-3.14.36/include/linux/mod_devicetable.h linux-openelec/include/linux/mod_devicetable.h
+--- linux-3.14.36/include/linux/mod_devicetable.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/include/linux/mod_devicetable.h 2015-05-06 12:05:44.000000000 -0500
+@@ -564,6 +564,15 @@
+ #define X86_MODEL_ANY 0
+ #define X86_FEATURE_ANY 0 /* Same as FPU, you can't test for that */
+
++/*
++ * Generic table type for matching CPU features.
++ * @feature: the bit number of the feature (0 - 65535)
++ */
++
++struct cpu_feature {
++ __u16 feature;
++};
++
+ #define IPACK_ANY_FORMAT 0xff
+ #define IPACK_ANY_ID (~0)
+ struct ipack_device_id {
+diff -Nur linux-3.14.36/include/linux/mtd/map.h linux-openelec/include/linux/mtd/map.h
+--- linux-3.14.36/include/linux/mtd/map.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/include/linux/mtd/map.h 2015-05-06 12:05:44.000000000 -0500
+@@ -438,7 +438,7 @@
+ if (map->cached)
+ memcpy(to, (char *)map->cached + from, len);
+ else
+- memcpy_fromio(to, map->virt + from, len);
++ memcpy(to, map->virt + from, len);
+ }
+
+ static inline void inline_map_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len)
+diff -Nur linux-3.14.36/include/linux/mxc_asrc.h linux-openelec/include/linux/mxc_asrc.h
+--- linux-3.14.36/include/linux/mxc_asrc.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/include/linux/mxc_asrc.h 2015-05-06 12:05:44.000000000 -0500
+@@ -0,0 +1,386 @@
++/*
++ * Copyright 2008-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ *
++ * @file mxc_asrc.h
++ *
++ * @brief i.MX Asynchronous Sample Rate Converter
++ *
++ * @ingroup Audio
++ */
++
++#ifndef __MXC_ASRC_H__
++#define __MXC_ASRC_H__
++
++#include <uapi/linux/mxc_asrc.h>
++#include <linux/scatterlist.h>
++
++#define ASRC_DMA_BUFFER_NUM 2
++#define ASRC_INPUTFIFO_THRESHOLD 32
++#define ASRC_OUTPUTFIFO_THRESHOLD 32
++#define ASRC_FIFO_THRESHOLD_MIN 0
++#define ASRC_FIFO_THRESHOLD_MAX 63
++#define ASRC_DMA_BUFFER_SIZE (1024 * 48 * 4)
++#define ASRC_MAX_BUFFER_SIZE (1024 * 48)
++#define ASRC_OUTPUT_LAST_SAMPLE_DEFAULT 8
++
++
++/* Ideal Ratio mode doesn't care the outclk frequency, so be fixed */
++#define ASRC_PRESCALER_IDEAL_RATIO 5
++/* SPDIF rxclk pulse rate is 128 * samplerate, so 2 ^ 7 */
++#define ASRC_PRESCALER_SPDIF_RX 7
++/* SPDIF txclk pulse rate is 64 * samplerate, so 2 ^ 6 */
++#define ASRC_PRESCALER_SPDIF_TX 6
++/* I2S bclk is 16 * 2 = 32, so 2 ^ 5 */
++#define ASRC_PRESCALER_I2S_16BIT 5
++/* I2S bclk is 24 * 2 = 48 -> 64, so 2 ^ 6 */
++#define ASRC_PRESCALER_I2S_24BIT 6
++
++
++#define REG_ASRCTR 0x00
++#define REG_ASRIER 0x04
++#define REG_ASRCNCR 0x0C
++#define REG_ASRCFG 0x10
++#define REG_ASRCSR 0x14
++
++#define REG_ASRCDR1 0x18
++#define REG_ASRCDR2 0x1C
++#define REG_ASRCDR(x) ((x < 2) ? REG_ASRCDR1 : REG_ASRCDR2)
++
++#define REG_ASRSTR 0x20
++#define REG_ASRRA 0x24
++#define REG_ASRRB 0x28
++#define REG_ASRRC 0x2C
++#define REG_ASRPM1 0x40
++#define REG_ASRPM2 0x44
++#define REG_ASRPM3 0x48
++#define REG_ASRPM4 0x4C
++#define REG_ASRPM5 0x50
++#define REG_ASRTFR1 0x54
++#define REG_ASRCCR 0x5C
++
++#define REG_ASRDIA 0x60
++#define REG_ASRDOA 0x64
++#define REG_ASRDIB 0x68
++#define REG_ASRDOB 0x6C
++#define REG_ASRDIC 0x70
++#define REG_ASRDOC 0x74
++#define REG_ASRDI(x) (REG_ASRDIA + (x << 3))
++#define REG_ASRDO(x) (REG_ASRDOA + (x << 3))
++
++#define REG_ASRIDRHA 0x80
++#define REG_ASRIDRLA 0x84
++#define REG_ASRIDRHB 0x88
++#define REG_ASRIDRLB 0x8C
++#define REG_ASRIDRHC 0x90
++#define REG_ASRIDRLC 0x94
++#define REG_ASRIDRH(x) (REG_ASRIDRHA + (x << 3))
++#define REG_ASRIDRL(x) (REG_ASRIDRLA + (x << 3))
++
++#define REG_ASR76K 0x98
++#define REG_ASR56K 0x9C
++
++#define REG_ASRMCRA 0xA0
++#define REG_ASRFSTA 0xA4
++#define REG_ASRMCRB 0xA8
++#define REG_ASRFSTB 0xAC
++#define REG_ASRMCRC 0xB0
++#define REG_ASRFSTC 0xB4
++#define REG_ASRMCR(x) (REG_ASRMCRA + (x << 3))
++#define REG_ASRFST(x) (REG_ASRFSTA + (x << 3))
++
++#define REG_ASRMCR1A 0xC0
++#define REG_ASRMCR1B 0xC4
++#define REG_ASRMCR1C 0xC8
++#define REG_ASRMCR1(x) (REG_ASRMCR1A + (x << 2))
++
++
++/* REG0 0x00 REG_ASRCTR */
++#define ASRCTR_ATSx_SHIFT(x) (20 + x)
++#define ASRCTR_ATSx_MASK(x) (1 << ASRCTR_ATSx_SHIFT(x))
++#define ASRCTR_ATS(x) (1 << ASRCTR_ATSx_SHIFT(x))
++#define ASRCTR_USRx_SHIFT(x) (14 + (x << 1))
++#define ASRCTR_USRx_MASK(x) (1 << ASRCTR_USRx_SHIFT(x))
++#define ASRCTR_USR(x) (1 << ASRCTR_USRx_SHIFT(x))
++#define ASRCTR_IDRx_SHIFT(x) (13 + (x << 1))
++#define ASRCTR_IDRx_MASK(x) (1 << ASRCTR_IDRx_SHIFT(x))
++#define ASRCTR_IDR(x) (1 << ASRCTR_IDRx_SHIFT(x))
++#define ASRCTR_SRST_SHIFT 4
++#define ASRCTR_SRST_MASK (1 << ASRCTR_SRST_SHIFT)
++#define ASRCTR_SRST (1 << ASRCTR_SRST_SHIFT)
++#define ASRCTR_ASRCEx_SHIFT(x) (1 + x)
++#define ASRCTR_ASRCEx_MASK(x) (1 << ASRCTR_ASRCEx_SHIFT(x))
++#define ASRCTR_ASRCE(x) (1 << ASRCTR_ASRCEx_SHIFT(x))
++#define ASRCTR_ASRCEN_SHIFT 0
++#define ASRCTR_ASRCEN_MASK (1 << ASRCTR_ASRCEN_SHIFT)
++#define ASRCTR_ASRCEN (1 << ASRCTR_ASRCEN_SHIFT)
++
++/* REG1 0x04 REG_ASRIER */
++#define ASRIER_AFPWE_SHIFT 7
++#define ASRIER_AFPWE_MASK (1 << ASRIER_AFPWE_SHIFT)
++#define ASRIER_AFPWE (1 << ASRIER_AFPWE_SHIFT)
++#define ASRIER_AOLIE_SHIFT 6
++#define ASRIER_AOLIE_MASK (1 << ASRIER_AOLIE_SHIFT)
++#define ASRIER_AOLIE (1 << ASRIER_AOLIE_SHIFT)
++#define ASRIER_ADOEx_SHIFT(x) (3 + x)
++#define ASRIER_ADOEx_MASK(x) (1 << ASRIER_ADOEx_SHIFT(x))
++#define ASRIER_ADOE(x) (1 << ASRIER_ADOEx_SHIFT(x))
++#define ASRIER_ADIEx_SHIFT(x) (0 + x)
++#define ASRIER_ADIEx_MASK(x) (1 << ASRIER_ADIEx_SHIFT(x))
++#define ASRIER_ADIE(x) (1 << ASRIER_ADIEx_SHIFT(x))
++
++/* REG2 0x0C REG_ASRCNCR */
++#define ASRCNCR_ANCx_SHIFT(x, b) (b * x)
++#define ASRCNCR_ANCx_MASK(x, b) (((1 << b) - 1) << ASRCNCR_ANCx_SHIFT(x, b))
++#define ASRCNCR_ANCx_get(x, v, b) ((v & ASRCNCR_ANCx_MASK(x, b)) >> ASRCNCR_ANCx_SHIFT(x, b))
++#define ASRCNCR_ANCx_set(x, v, b) ((v << ASRCNCR_ANCx_SHIFT(x, b)) & ASRCNCR_ANCx_MASK(x, b))
++
++/* REG3 0x10 REG_ASRCFG */
++#define ASRCFG_INIRQx_SHIFT(x) (21 + x)
++#define ASRCFG_INIRQx_MASK(x) (1 << ASRCFG_INIRQx_SHIFT(x))
++#define ASRCFG_INIRQx (1 << ASRCFG_INIRQx_SHIFT(x))
++#define ASRCFG_NDPRx_SHIFT(x) (18 + x)
++#define ASRCFG_NDPRx_MASK(x) (1 << ASRCFG_NDPRx_SHIFT(x))
++#define ASRCFG_NDPRx (1 << ASRCFG_NDPRx_SHIFT(x))
++#define ASRCFG_POSTMODx_SHIFT(x) (8 + (x << 2))
++#define ASRCFG_POSTMODx_WIDTH 2
++#define ASRCFG_POSTMODx_MASK(x) (((1 << ASRCFG_POSTMODx_WIDTH) - 1) << ASRCFG_POSTMODx_SHIFT(x))
++#define ASRCFG_POSTMOD(x, v) ((v) << ASRCFG_POSTMODx_SHIFT(x))
++#define ASRCFG_POSTMODx_UP(x) (0 << ASRCFG_POSTMODx_SHIFT(x))
++#define ASRCFG_POSTMODx_DCON(x) (1 << ASRCFG_POSTMODx_SHIFT(x))
++#define ASRCFG_POSTMODx_DOWN(x) (2 << ASRCFG_POSTMODx_SHIFT(x))
++#define ASRCFG_PREMODx_SHIFT(x) (6 + (x << 2))
++#define ASRCFG_PREMODx_WIDTH 2
++#define ASRCFG_PREMODx_MASK(x) (((1 << ASRCFG_PREMODx_WIDTH) - 1) << ASRCFG_PREMODx_SHIFT(x))
++#define ASRCFG_PREMOD(x, v) ((v) << ASRCFG_PREMODx_SHIFT(x))
++#define ASRCFG_PREMODx_UP(x) (0 << ASRCFG_PREMODx_SHIFT(x))
++#define ASRCFG_PREMODx_DCON(x) (1 << ASRCFG_PREMODx_SHIFT(x))
++#define ASRCFG_PREMODx_DOWN(x) (2 << ASRCFG_PREMODx_SHIFT(x))
++#define ASRCFG_PREMODx_BYPASS(x) (3 << ASRCFG_PREMODx_SHIFT(x))
++
++/* REG4 0x14 REG_ASRCSR */
++#define ASRCSR_AxCSx_WIDTH 4
++#define ASRCSR_AxCSx_MASK ((1 << ASRCSR_AxCSx_WIDTH) - 1)
++#define ASRCSR_AOCSx_SHIFT(x) (12 + (x << 2))
++#define ASRCSR_AOCSx_MASK(x) (((1 << ASRCSR_AxCSx_WIDTH) - 1) << ASRCSR_AOCSx_SHIFT(x))
++#define ASRCSR_AOCS(x, v) ((v) << ASRCSR_AOCSx_SHIFT(x))
++#define ASRCSR_AICSx_SHIFT(x) (x << 2)
++#define ASRCSR_AICSx_MASK(x) (((1 << ASRCSR_AxCSx_WIDTH) - 1) << ASRCSR_AICSx_SHIFT(x))
++#define ASRCSR_AICS(x, v) ((v) << ASRCSR_AICSx_SHIFT(x))
++
++/* REG5&6 0x18 & 0x1C REG_ASRCDR1 & ASRCDR2 */
++#define ASRCDRx_AxCPx_WIDTH 3
++#define ASRCDRx_AICPx_SHIFT(x) (0 + (x % 2) * 6)
++#define ASRCDRx_AICPx_MASK(x) (((1 << ASRCDRx_AxCPx_WIDTH) - 1) << ASRCDRx_AICPx_SHIFT(x))
++#define ASRCDRx_AICP(x, v) ((v) << ASRCDRx_AICPx_SHIFT(x))
++#define ASRCDRx_AICDx_SHIFT(x) (3 + (x % 2) * 6)
++#define ASRCDRx_AICDx_MASK(x) (((1 << ASRCDRx_AxCPx_WIDTH) - 1) << ASRCDRx_AICDx_SHIFT(x))
++#define ASRCDRx_AICD(x, v) ((v) << ASRCDRx_AICDx_SHIFT(x))
++#define ASRCDRx_AOCPx_SHIFT(x) ((x < 2) ? 12 + x * 6 : 6)
++#define ASRCDRx_AOCPx_MASK(x) (((1 << ASRCDRx_AxCPx_WIDTH) - 1) << ASRCDRx_AOCPx_SHIFT(x))
++#define ASRCDRx_AOCP(x, v) ((v) << ASRCDRx_AOCPx_SHIFT(x))
++#define ASRCDRx_AOCDx_SHIFT(x) ((x < 2) ? 15 + x * 6 : 9)
++#define ASRCDRx_AOCDx_MASK(x) (((1 << ASRCDRx_AxCPx_WIDTH) - 1) << ASRCDRx_AOCDx_SHIFT(x))
++#define ASRCDRx_AOCD(x, v) ((v) << ASRCDRx_AOCDx_SHIFT(x))
++
++/* REG7 0x20 REG_ASRSTR */
++#define ASRSTR_DSLCNT_SHIFT 21
++#define ASRSTR_DSLCNT_MASK (1 << ASRSTR_DSLCNT_SHIFT)
++#define ASRSTR_DSLCNT (1 << ASRSTR_DSLCNT_SHIFT)
++#define ASRSTR_ATQOL_SHIFT 20
++#define ASRSTR_ATQOL_MASK (1 << ASRSTR_ATQOL_SHIFT)
++#define ASRSTR_ATQOL (1 << ASRSTR_ATQOL_SHIFT)
++#define ASRSTR_AOOLx_SHIFT(x) (17 + x)
++#define ASRSTR_AOOLx_MASK(x) (1 << ASRSTR_AOOLx_SHIFT(x))
++#define ASRSTR_AOOL(x) (1 << ASRSTR_AOOLx_SHIFT(x))
++#define ASRSTR_AIOLx_SHIFT(x) (14 + x)
++#define ASRSTR_AIOLx_MASK(x) (1 << ASRSTR_AIOLx_SHIFT(x))
++#define ASRSTR_AIOL(x) (1 << ASRSTR_AIOLx_SHIFT(x))
++#define ASRSTR_AODOx_SHIFT(x) (11 + x)
++#define ASRSTR_AODOx_MASK(x) (1 << ASRSTR_AODOx_SHIFT(x))
++#define ASRSTR_AODO(x) (1 << ASRSTR_AODOx_SHIFT(x))
++#define ASRSTR_AIDUx_SHIFT(x) (8 + x)
++#define ASRSTR_AIDUx_MASK(x) (1 << ASRSTR_AIDUx_SHIFT(x))
++#define ASRSTR_AIDU(x) (1 << ASRSTR_AIDUx_SHIFT(x))
++#define ASRSTR_FPWT_SHIFT 7
++#define ASRSTR_FPWT_MASK (1 << ASRSTR_FPWT_SHIFT)
++#define ASRSTR_FPWT (1 << ASRSTR_FPWT_SHIFT)
++#define ASRSTR_AOLE_SHIFT 6
++#define ASRSTR_AOLE_MASK (1 << ASRSTR_AOLE_SHIFT)
++#define ASRSTR_AOLE (1 << ASRSTR_AOLE_SHIFT)
++#define ASRSTR_AODEx_SHIFT(x) (3 + x)
++#define ASRSTR_AODFx_MASK(x) (1 << ASRSTR_AODEx_SHIFT(x))
++#define ASRSTR_AODF(x) (1 << ASRSTR_AODEx_SHIFT(x))
++#define ASRSTR_AIDEx_SHIFT(x) (0 + x)
++#define ASRSTR_AIDEx_MASK(x) (1 << ASRSTR_AIDEx_SHIFT(x))
++#define ASRSTR_AIDE(x) (1 << ASRSTR_AIDEx_SHIFT(x))
++
++/* REG10 0x54 REG_ASRTFR1 */
++#define ASRTFR1_TF_BASE_WIDTH 7
++#define ASRTFR1_TF_BASE_SHIFT 6
++#define ASRTFR1_TF_BASE_MASK (((1 << ASRTFR1_TF_BASE_WIDTH) - 1) << ASRTFR1_TF_BASE_SHIFT)
++#define ASRTFR1_TF_BASE(x) ((x) << ASRTFR1_TF_BASE_SHIFT)
++
++/*
++ * REG22 0xA0 REG_ASRMCRA
++ * REG24 0xA8 REG_ASRMCRB
++ * REG26 0xB0 REG_ASRMCRC
++ */
++#define ASRMCRx_ZEROBUFx_SHIFT 23
++#define ASRMCRx_ZEROBUFxCLR_MASK (1 << ASRMCRx_ZEROBUFx_SHIFT)
++#define ASRMCRx_ZEROBUFxCLR (1 << ASRMCRx_ZEROBUFx_SHIFT)
++#define ASRMCRx_EXTTHRSHx_SHIFT 22
++#define ASRMCRx_EXTTHRSHx_MASK (1 << ASRMCRx_EXTTHRSHx_SHIFT)
++#define ASRMCRx_EXTTHRSHx (1 << ASRMCRx_EXTTHRSHx_SHIFT)
++#define ASRMCRx_BUFSTALLx_SHIFT 21
++#define ASRMCRx_BUFSTALLx_MASK (1 << ASRMCRx_BUFSTALLx_SHIFT)
++#define ASRMCRx_BUFSTALLx (1 << ASRMCRx_BUFSTALLx_SHIFT)
++#define ASRMCRx_BYPASSPOLYx_SHIFT 20
++#define ASRMCRx_BYPASSPOLYx_MASK (1 << ASRMCRx_BYPASSPOLYx_SHIFT)
++#define ASRMCRx_BYPASSPOLYx (1 << ASRMCRx_BYPASSPOLYx_SHIFT)
++#define ASRMCRx_OUTFIFO_THRESHOLD_WIDTH 6
++#define ASRMCRx_OUTFIFO_THRESHOLD_SHIFT 12
++#define ASRMCRx_OUTFIFO_THRESHOLD_MASK (((1 << ASRMCRx_OUTFIFO_THRESHOLD_WIDTH) - 1) << ASRMCRx_OUTFIFO_THRESHOLD_SHIFT)
++#define ASRMCRx_OUTFIFO_THRESHOLD(v) (((v) << ASRMCRx_OUTFIFO_THRESHOLD_SHIFT) & ASRMCRx_OUTFIFO_THRESHOLD_MASK)
++#define ASRMCRx_RSYNIFx_SHIFT 11
++#define ASRMCRx_RSYNIFx_MASK (1 << ASRMCRx_RSYNIFx_SHIFT)
++#define ASRMCRx_RSYNIFx (1 << ASRMCRx_RSYNIFx_SHIFT)
++#define ASRMCRx_RSYNOFx_SHIFT 10
++#define ASRMCRx_RSYNOFx_MASK (1 << ASRMCRx_RSYNOFx_SHIFT)
++#define ASRMCRx_RSYNOFx (1 << ASRMCRx_RSYNOFx_SHIFT)
++#define ASRMCRx_INFIFO_THRESHOLD_WIDTH 6
++#define ASRMCRx_INFIFO_THRESHOLD_SHIFT 0
++#define ASRMCRx_INFIFO_THRESHOLD_MASK (((1 << ASRMCRx_INFIFO_THRESHOLD_WIDTH) - 1) << ASRMCRx_INFIFO_THRESHOLD_SHIFT)
++#define ASRMCRx_INFIFO_THRESHOLD(v) (((v) << ASRMCRx_INFIFO_THRESHOLD_SHIFT) & ASRMCRx_INFIFO_THRESHOLD_MASK)
++
++/*
++ * REG23 0xA4 REG_ASRFSTA
++ * REG25 0xAC REG_ASRFSTB
++ * REG27 0xB4 REG_ASRFSTC
++ */
++#define ASRFSTx_OAFx_SHIFT 23
++#define ASRFSTx_OAFx_MASK (1 << ASRFSTx_OAFx_SHIFT)
++#define ASRFSTx_OAFx (1 << ASRFSTx_OAFx_SHIFT)
++#define ASRFSTx_OUTPUT_FIFO_WIDTH 7
++#define ASRFSTx_OUTPUT_FIFO_SHIFT 12
++#define ASRFSTx_OUTPUT_FIFO_MASK (((1 << ASRFSTx_OUTPUT_FIFO_WIDTH) - 1) << ASRFSTx_OUTPUT_FIFO_SHIFT)
++#define ASRFSTx_IAEx_SHIFT 11
++#define ASRFSTx_IAEx_MASK (1 << ASRFSTx_OAFx_SHIFT)
++#define ASRFSTx_IAEx (1 << ASRFSTx_OAFx_SHIFT)
++#define ASRFSTx_INPUT_FIFO_WIDTH 7
++#define ASRFSTx_INPUT_FIFO_SHIFT 0
++#define ASRFSTx_INPUT_FIFO_MASK ((1 << ASRFSTx_INPUT_FIFO_WIDTH) - 1)
++
++/* REG28 0xC0 & 0xC4 & 0xC8 REG_ASRMCR1x */
++#define ASRMCR1x_IWD_WIDTH 3
++#define ASRMCR1x_IWD_SHIFT 9
++#define ASRMCR1x_IWD_MASK (((1 << ASRMCR1x_IWD_WIDTH) - 1) << ASRMCR1x_IWD_SHIFT)
++#define ASRMCR1x_IWD(v) ((v) << ASRMCR1x_IWD_SHIFT)
++#define ASRMCR1x_IMSB_SHIFT 8
++#define ASRMCR1x_IMSB_MASK (1 << ASRMCR1x_IMSB_SHIFT)
++#define ASRMCR1x_IMSB_MSB (1 << ASRMCR1x_IMSB_SHIFT)
++#define ASRMCR1x_IMSB_LSB (0 << ASRMCR1x_IMSB_SHIFT)
++#define ASRMCR1x_OMSB_SHIFT 2
++#define ASRMCR1x_OMSB_MASK (1 << ASRMCR1x_OMSB_SHIFT)
++#define ASRMCR1x_OMSB_MSB (1 << ASRMCR1x_OMSB_SHIFT)
++#define ASRMCR1x_OMSB_LSB (0 << ASRMCR1x_OMSB_SHIFT)
++#define ASRMCR1x_OSGN_SHIFT 1
++#define ASRMCR1x_OSGN_MASK (1 << ASRMCR1x_OSGN_SHIFT)
++#define ASRMCR1x_OSGN (1 << ASRMCR1x_OSGN_SHIFT)
++#define ASRMCR1x_OW16_SHIFT 0
++#define ASRMCR1x_OW16_MASK (1 << ASRMCR1x_OW16_SHIFT)
++#define ASRMCR1x_OW16(v) ((v) << ASRMCR1x_OW16_SHIFT)
++
++
++struct dma_block {
++ unsigned int index;
++ unsigned int length;
++ void *dma_vaddr;
++ dma_addr_t dma_paddr;
++ struct list_head queue;
++};
++
++struct asrc_p2p_params {
++ u32 p2p_rate; /* ASRC output rate for p2p */
++ enum asrc_word_width p2p_width; /* ASRC output wordwidth for p2p */
++};
++
++struct asrc_pair_params {
++ enum asrc_pair_index index;
++ struct completion input_complete;
++ struct completion output_complete;
++ struct completion lastperiod_complete;
++ struct dma_chan *input_dma_channel;
++ struct dma_chan *output_dma_channel;
++ unsigned int input_buffer_size;
++ unsigned int output_buffer_size;
++ unsigned int buffer_num;
++ unsigned int pair_hold;
++ unsigned int asrc_active;
++ unsigned int channel_nums;
++ struct dma_block input_dma_total;
++ struct dma_block input_dma[ASRC_DMA_BUFFER_NUM];
++ struct dma_block output_dma_total;
++ struct dma_block output_dma[ASRC_DMA_BUFFER_NUM];
++ struct dma_block output_last_period;
++ struct dma_async_tx_descriptor *desc_in;
++ struct dma_async_tx_descriptor *desc_out;
++ struct work_struct task_output_work;
++ unsigned int input_sg_nodes;
++ unsigned int output_sg_nodes;
++ struct scatterlist input_sg[4], output_sg[4];
++ enum asrc_word_width input_word_width;
++ enum asrc_word_width output_word_width;
++ u32 input_sample_rate;
++ u32 output_sample_rate;
++ u32 input_wm;
++ u32 output_wm;
++ unsigned int last_period_sample;
++};
++
++struct asrc_data {
++ struct asrc_pair asrc_pair[ASRC_PAIR_MAX_NUM];
++ struct proc_dir_entry *proc_asrc;
++ struct class *asrc_class;
++ struct regmap *regmap;
++ struct clk *asrc_clk;
++ struct clk *dma_clk;
++ unsigned long paddr;
++ unsigned int channel_bits;
++ int asrc_major;
++ int irq;
++ struct device *dev;
++};
++
++struct asrc_p2p_ops {
++ void (*asrc_p2p_start_conv)(enum asrc_pair_index);
++ void (*asrc_p2p_stop_conv)(enum asrc_pair_index);
++ int (*asrc_p2p_get_dma_request)(enum asrc_pair_index, bool);
++ u32 (*asrc_p2p_per_addr)(enum asrc_pair_index, bool);
++ int (*asrc_p2p_req_pair)(int, enum asrc_pair_index *index);
++ int (*asrc_p2p_config_pair)(struct asrc_config *config);
++ void (*asrc_p2p_release_pair)(enum asrc_pair_index);
++ void (*asrc_p2p_finish_conv)(enum asrc_pair_index);
++};
++
++extern void asrc_p2p_hook(struct asrc_p2p_ops *asrc_p2p_ct);
++
++extern int asrc_req_pair(int chn_num, enum asrc_pair_index *index);
++extern void asrc_release_pair(enum asrc_pair_index index);
++extern int asrc_config_pair(struct asrc_config *config);
++extern void asrc_get_status(struct asrc_status_flags *flags);
++extern void asrc_start_conv(enum asrc_pair_index index);
++extern void asrc_stop_conv(enum asrc_pair_index index);
++extern u32 asrc_get_per_addr(enum asrc_pair_index index, bool i);
++extern int asrc_get_dma_request(enum asrc_pair_index index, bool i);
++extern void asrc_finish_conv(enum asrc_pair_index index);
++extern int asrc_set_watermark(enum asrc_pair_index index,
++ u32 in_wm, u32 out_wm);
++
++#endif/* __MXC_ASRC_H__ */
+diff -Nur linux-3.14.36/include/linux/mxcfb.h linux-openelec/include/linux/mxcfb.h
+--- linux-3.14.36/include/linux/mxcfb.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/include/linux/mxcfb.h 2015-05-06 12:05:44.000000000 -0500
+@@ -0,0 +1,46 @@
++/*
++ * Copyright 2004-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * The code contained herein is licensed under the GNU Lesser General
++ * Public License. You may obtain a copy of the GNU Lesser General
++ * Public License Version 2.1 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/lgpl-license.html
++ * http://www.gnu.org/copyleft/lgpl.html
++ */
++
++/*
++ * @file linux/mxcfb.h
++ *
++ * @brief Global header file for the MXC Frame buffer
++ *
++ * @ingroup Framebuffer
++ */
++#ifndef __LINUX_MXCFB_H__
++#define __LINUX_MXCFB_H__
++
++#include <uapi/linux/mxcfb.h>
++
++extern struct fb_videomode mxcfb_modedb[];
++extern int mxcfb_modedb_sz;
++
++enum {
++ MXC_DISP_SPEC_DEV = 0,
++ MXC_DISP_DDC_DEV = 1,
++};
++
++enum {
++ MXCFB_REFRESH_OFF,
++ MXCFB_REFRESH_AUTO,
++ MXCFB_REFRESH_PARTIAL,
++};
++
++int mxcfb_set_refresh_mode(struct fb_info *fbi, int mode,
++ struct mxcfb_rect *update_region);
++int mxc_elcdif_frame_addr_setup(dma_addr_t phys);
++void mxcfb_elcdif_register_mode(const struct fb_videomode *modedb,
++ int num_modes, int dev_mode);
++
++#endif
+diff -Nur linux-3.14.36/include/linux/mxc_mlb.h linux-openelec/include/linux/mxc_mlb.h
+--- linux-3.14.36/include/linux/mxc_mlb.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/include/linux/mxc_mlb.h 2015-05-06 12:05:44.000000000 -0500
+@@ -0,0 +1,55 @@
++/*
++ * mxc_mlb.h
++ *
++ * Copyright 2008-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++#ifndef _MXC_MLB_H
++#define _MXC_MLB_H
++
++/* define IOCTL command */
++#define MLB_DBG_RUNTIME _IO('S', 0x09)
++#define MLB_SET_FPS _IOW('S', 0x10, unsigned int)
++#define MLB_GET_VER _IOR('S', 0x11, unsigned long)
++#define MLB_SET_DEVADDR _IOR('S', 0x12, unsigned char)
++
++/*!
++ * set channel address for each logical channel
++ * the MSB 16bits is for tx channel, the left LSB is for rx channel
++ */
++#define MLB_CHAN_SETADDR _IOW('S', 0x13, unsigned int)
++#define MLB_CHAN_STARTUP _IO('S', 0x14)
++#define MLB_CHAN_SHUTDOWN _IO('S', 0x15)
++#define MLB_CHAN_GETEVENT _IOR('S', 0x16, unsigned long)
++
++#define MLB_SET_ISOC_BLKSIZE_188 _IO('S', 0x17)
++#define MLB_SET_ISOC_BLKSIZE_196 _IO('S', 0x18)
++#define MLB_SET_SYNC_QUAD _IOW('S', 0x19, unsigned int)
++#define MLB_IRQ_ENABLE _IO('S', 0x20)
++#define MLB_IRQ_DISABLE _IO('S', 0x21)
++
++/*!
++ * MLB event define
++ */
++enum {
++ MLB_EVT_TX_PROTO_ERR_CUR = 1 << 0,
++ MLB_EVT_TX_BRK_DETECT_CUR = 1 << 1,
++ MLB_EVT_TX_PROTO_ERR_PREV = 1 << 8,
++ MLB_EVT_TX_BRK_DETECT_PREV = 1 << 9,
++ MLB_EVT_RX_PROTO_ERR_CUR = 1 << 16,
++ MLB_EVT_RX_BRK_DETECT_CUR = 1 << 17,
++ MLB_EVT_RX_PROTO_ERR_PREV = 1 << 24,
++ MLB_EVT_RX_BRK_DETECT_PREV = 1 << 25,
++};
++
++
++#endif /* _MXC_MLB_H */
+diff -Nur linux-3.14.36/include/linux/mxc_v4l2.h linux-openelec/include/linux/mxc_v4l2.h
+--- linux-3.14.36/include/linux/mxc_v4l2.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/include/linux/mxc_v4l2.h 2015-05-06 12:05:44.000000000 -0500
+@@ -0,0 +1,27 @@
++/*
++ * Copyright 2004-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * The code contained herein is licensed under the GNU Lesser General
++ * Public License. You may obtain a copy of the GNU Lesser General
++ * Public License Version 2.1 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/lgpl-license.html
++ * http://www.gnu.org/copyleft/lgpl.html
++ */
++
++/*!
++ * @file linux/mxc_v4l2.h
++ *
++ * @brief MXC V4L2 private header file
++ *
++ * @ingroup MXC V4L2
++ */
++
++#ifndef __LINUX_MXC_V4L2_H__
++#define __LINUX_MXC_V4L2_H__
++
++#include <uapi/linux/mxc_v4l2.h>
++
++#endif
+diff -Nur linux-3.14.36/include/linux/mxc_vpu.h linux-openelec/include/linux/mxc_vpu.h
+--- linux-3.14.36/include/linux/mxc_vpu.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/include/linux/mxc_vpu.h 2015-05-06 12:05:44.000000000 -0500
+@@ -0,0 +1,118 @@
++/*
++ * Copyright 2004-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * The code contained herein is licensed under the GNU Lesser General
++ * Public License. You may obtain a copy of the GNU Lesser General
++ * Public License Version 2.1 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/lgpl-license.html
++ * http://www.gnu.org/copyleft/lgpl.html
++ */
++
++/*!
++ * @defgroup VPU Video Processor Unit Driver
++ */
++
++/*!
++ * @file linux/mxc_vpu.h
++ *
++ * @brief VPU system initialization and file operation definition
++ *
++ * @ingroup VPU
++ */
++
++#ifndef __LINUX_MXC_VPU_H__
++#define __LINUX_MXC_VPU_H__
++
++#include <linux/fs.h>
++
++struct mxc_vpu_platform_data {
++ bool iram_enable;
++ int iram_size;
++ void (*reset) (void);
++ void (*pg) (int);
++};
++
++struct vpu_mem_desc {
++ u32 size;
++ dma_addr_t phy_addr;
++ u32 cpu_addr; /* cpu address to free the dma mem */
++ u32 virt_uaddr; /* virtual user space address */
++};
++
++#define VPU_IOC_MAGIC 'V'
++
++#define VPU_IOC_PHYMEM_ALLOC _IO(VPU_IOC_MAGIC, 0)
++#define VPU_IOC_PHYMEM_FREE _IO(VPU_IOC_MAGIC, 1)
++#define VPU_IOC_WAIT4INT _IO(VPU_IOC_MAGIC, 2)
++#define VPU_IOC_PHYMEM_DUMP _IO(VPU_IOC_MAGIC, 3)
++#define VPU_IOC_REG_DUMP _IO(VPU_IOC_MAGIC, 4)
++#define VPU_IOC_IRAM_SETTING _IO(VPU_IOC_MAGIC, 6)
++#define VPU_IOC_CLKGATE_SETTING _IO(VPU_IOC_MAGIC, 7)
++#define VPU_IOC_GET_WORK_ADDR _IO(VPU_IOC_MAGIC, 8)
++#define VPU_IOC_REQ_VSHARE_MEM _IO(VPU_IOC_MAGIC, 9)
++#define VPU_IOC_SYS_SW_RESET _IO(VPU_IOC_MAGIC, 11)
++#define VPU_IOC_GET_SHARE_MEM _IO(VPU_IOC_MAGIC, 12)
++#define VPU_IOC_QUERY_BITWORK_MEM _IO(VPU_IOC_MAGIC, 13)
++#define VPU_IOC_SET_BITWORK_MEM _IO(VPU_IOC_MAGIC, 14)
++#define VPU_IOC_PHYMEM_CHECK _IO(VPU_IOC_MAGIC, 15)
++#define VPU_IOC_LOCK_DEV _IO(VPU_IOC_MAGIC, 16)
++
++#define BIT_CODE_RUN 0x000
++#define BIT_CODE_DOWN 0x004
++#define BIT_INT_CLEAR 0x00C
++#define BIT_INT_STATUS 0x010
++#define BIT_CUR_PC 0x018
++#define BIT_INT_REASON 0x174
++
++#define MJPEG_PIC_STATUS_REG 0x3004
++#define MBC_SET_SUBBLK_EN 0x4A0
++
++#define BIT_WORK_CTRL_BUF_BASE 0x100
++#define BIT_WORK_CTRL_BUF_REG(i) (BIT_WORK_CTRL_BUF_BASE + i * 4)
++#define BIT_CODE_BUF_ADDR BIT_WORK_CTRL_BUF_REG(0)
++#define BIT_WORK_BUF_ADDR BIT_WORK_CTRL_BUF_REG(1)
++#define BIT_PARA_BUF_ADDR BIT_WORK_CTRL_BUF_REG(2)
++#define BIT_BIT_STREAM_CTRL BIT_WORK_CTRL_BUF_REG(3)
++#define BIT_FRAME_MEM_CTRL BIT_WORK_CTRL_BUF_REG(4)
++#define BIT_BIT_STREAM_PARAM BIT_WORK_CTRL_BUF_REG(5)
++
++#ifndef CONFIG_SOC_IMX6Q
++#define BIT_RESET_CTRL 0x11C
++#else
++#define BIT_RESET_CTRL 0x128
++#endif
++
++/* i could be 0, 1, 2, 3 */
++#define BIT_RD_PTR_BASE 0x120
++#define BIT_RD_PTR_REG(i) (BIT_RD_PTR_BASE + i * 8)
++#define BIT_WR_PTR_REG(i) (BIT_RD_PTR_BASE + i * 8 + 4)
++
++/* i could be 0, 1, 2, 3 */
++#define BIT_FRM_DIS_FLG_BASE (cpu_is_mx51() ? 0x150 : 0x140)
++#define BIT_FRM_DIS_FLG_REG(i) (BIT_FRM_DIS_FLG_BASE + i * 4)
++
++#define BIT_BUSY_FLAG 0x160
++#define BIT_RUN_COMMAND 0x164
++#define BIT_INT_ENABLE 0x170
++
++#define BITVAL_PIC_RUN 8
++
++#define VPU_SLEEP_REG_VALUE 10
++#define VPU_WAKE_REG_VALUE 11
++
++int vl2cc_init(u32 vl2cc_hw_base);
++void vl2cc_enable(void);
++void vl2cc_flush(void);
++void vl2cc_disable(void);
++void vl2cc_cleanup(void);
++
++int vl2cc_init(u32 vl2cc_hw_base);
++void vl2cc_enable(void);
++void vl2cc_flush(void);
++void vl2cc_disable(void);
++void vl2cc_cleanup(void);
++
++#endif
+diff -Nur linux-3.14.36/include/linux/phy.h linux-openelec/include/linux/phy.h
+--- linux-3.14.36/include/linux/phy.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/include/linux/phy.h 2015-05-06 12:05:44.000000000 -0500
+@@ -609,6 +609,7 @@
+ return phydev->drv->read_status(phydev);
+ }
+
++int genphy_config_init(struct phy_device *phydev);
+ int genphy_setup_forced(struct phy_device *phydev);
+ int genphy_restart_aneg(struct phy_device *phydev);
+ int genphy_config_aneg(struct phy_device *phydev);
+diff -Nur linux-3.14.36/include/linux/pipe_fs_i.h linux-openelec/include/linux/pipe_fs_i.h
+--- linux-3.14.36/include/linux/pipe_fs_i.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/include/linux/pipe_fs_i.h 2015-05-06 12:05:44.000000000 -0500
+@@ -35,7 +35,7 @@
+ * @tmp_page: cached released page
+ * @readers: number of current readers of this pipe
+ * @writers: number of current writers of this pipe
+- * @files: number of struct file refering this pipe (protected by ->i_lock)
++ * @files: number of struct file referring this pipe (protected by ->i_lock)
+ * @waiting_writers: number of writers blocked waiting for room
+ * @r_counter: reader counter
+ * @w_counter: writer counter
+diff -Nur linux-3.14.36/include/linux/pl320-ipc.h linux-openelec/include/linux/pl320-ipc.h
+--- linux-3.14.36/include/linux/pl320-ipc.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/include/linux/pl320-ipc.h 2015-05-06 12:05:44.000000000 -0500
+@@ -0,0 +1,17 @@
++/*
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program. If not, see <http://www.gnu.org/licenses/>.
++ */
++
++int pl320_ipc_transmit(u32 *data);
++int pl320_ipc_register_notifier(struct notifier_block *nb);
++int pl320_ipc_unregister_notifier(struct notifier_block *nb);
+diff -Nur linux-3.14.36/include/linux/platform_data/dma-imx.h linux-openelec/include/linux/platform_data/dma-imx.h
+--- linux-3.14.36/include/linux/platform_data/dma-imx.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/include/linux/platform_data/dma-imx.h 2015-05-06 12:05:44.000000000 -0500
+@@ -1,5 +1,5 @@
+ /*
+- * Copyright 2004-2009 Freescale Semiconductor, Inc. All Rights Reserved.
++ * Copyright 2004-2013 Freescale Semiconductor, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+@@ -40,6 +40,7 @@
+ IMX_DMATYPE_ASRC, /* ASRC */
+ IMX_DMATYPE_ESAI, /* ESAI */
+ IMX_DMATYPE_SSI_DUAL, /* SSI Dual FIFO */
++ IMX_DMATYPE_HDMI, /* HDMI Audio */
+ };
+
+ enum imx_dma_prio {
+@@ -49,9 +50,11 @@
+ };
+
+ struct imx_dma_data {
+- int dma_request; /* DMA request line */
++ int dma_request0; /* DMA request line */
++ int dma_request1;
+ enum sdma_peripheral_type peripheral_type;
+ int priority;
++ void *data_addr1, *data_addr2;
+ };
+
+ static inline int imx_dma_is_ipu(struct dma_chan *chan)
+@@ -59,6 +62,11 @@
+ return !strcmp(dev_name(chan->device->dev), "ipu-core");
+ }
+
++static inline int imx_dma_is_pxp(struct dma_chan *chan)
++{
++ return strstr(dev_name(chan->device->dev), "pxp") != NULL;
++}
++
+ static inline int imx_dma_is_general_purpose(struct dma_chan *chan)
+ {
+ return !strcmp(chan->device->dev->driver->name, "imx-sdma") ||
+diff -Nur linux-3.14.36/include/linux/power/imx6_usb_charger.h linux-openelec/include/linux/power/imx6_usb_charger.h
+--- linux-3.14.36/include/linux/power/imx6_usb_charger.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/include/linux/power/imx6_usb_charger.h 2015-05-06 12:05:44.000000000 -0500
+@@ -0,0 +1,80 @@
++/*
++ * Copyright (C) 2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++#ifndef __IMXUSB6_CHARGER_H
++#define __IMXUSB6_CHARGER_H
++
++#include <linux/power_supply.h>
++enum battery_charging_spec {
++ BATTERY_CHARGING_SPEC_NONE = 0,
++ BATTERY_CHARGING_SPEC_UNKNOWN,
++ BATTERY_CHARGING_SPEC_1_0,
++ BATTERY_CHARGING_SPEC_1_1,
++ BATTERY_CHARGING_SPEC_1_2,
++};
++
++struct usb_charger {
++ /* The anatop regmap */
++ struct regmap *anatop;
++ /* USB controller */
++ struct device *dev;
++ struct power_supply psy;
++ struct mutex lock;
++
++ /* Compliant with Battery Charging Specification version (if any) */
++ enum battery_charging_spec bc;
++
++ /* properties */
++ unsigned present:1;
++ unsigned online:1;
++ unsigned max_current;
++ int (*connect)(struct usb_charger *charger);
++ int (*disconnect)(struct usb_charger *charger);
++ int (*set_power)(struct usb_charger *charger, unsigned mA);
++
++ int (*detect)(struct usb_charger *charger);
++};
++
++#ifdef CONFIG_IMX6_USB_CHARGER
++extern void imx6_usb_remove_charger(struct usb_charger *charger);
++extern int imx6_usb_create_charger(struct usb_charger *charger,
++ const char *name);
++extern int imx6_usb_vbus_disconnect(struct usb_charger *charger);
++extern int imx6_usb_vbus_connect(struct usb_charger *charger);
++extern int imx6_usb_charger_detect_post(struct usb_charger *charger);
++#else
++void imx6_usb_remove_charger(struct usb_charger *charger)
++{
++
++}
++
++int imx6_usb_create_charger(struct usb_charger *charger,
++ const char *name)
++{
++ return -ENODEV;
++}
++
++int imx6_usb_vbus_disconnect(struct usb_charger *charger)
++{
++ return -ENODEV;
++}
++
++int imx6_usb_vbus_connect(struct usb_charger *charger)
++{
++ return -ENODEV;
++}
++int imx6_usb_charger_detect_post(struct usb_charger *charger)
++{
++ return -ENODEV;
++}
++#endif
++
++#endif /* __IMXUSB6_CHARGER_H */
+diff -Nur linux-3.14.36/include/linux/ptp_clock_kernel.h linux-openelec/include/linux/ptp_clock_kernel.h
+--- linux-3.14.36/include/linux/ptp_clock_kernel.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/include/linux/ptp_clock_kernel.h 2015-05-06 12:05:44.000000000 -0500
+@@ -49,7 +49,11 @@
+ * @n_alarm: The number of programmable alarms.
+ * @n_ext_ts: The number of external time stamp channels.
+ * @n_per_out: The number of programmable periodic signals.
++ * @n_pins: The number of programmable pins.
+ * @pps: Indicates whether the clock supports a PPS callback.
++ * @pin_config: Array of length 'n_pins'. If the number of
++ * programmable pins is nonzero, then drivers must
++ * allocate and initialize this array.
+ *
+ * clock operations
+ *
+@@ -70,6 +74,18 @@
+ * parameter request: Desired resource to enable or disable.
+ * parameter on: Caller passes one to enable or zero to disable.
+ *
++ * @verify: Confirm that a pin can perform a given function. The PTP
++ * Hardware Clock subsystem maintains the 'pin_config'
++ * array on behalf of the drivers, but the PHC subsystem
++ * assumes that every pin can perform every function. This
++ * hook gives drivers a way of telling the core about
++ * limitations on specific pins. This function must return
++ * zero if the function can be assigned to this pin, and
++ * nonzero otherwise.
++ * parameter pin: index of the pin in question.
++ * parameter func: the desired function to use.
++ * parameter chan: the function channel index to use.
++ *
+ * Drivers should embed their ptp_clock_info within a private
+ * structure, obtaining a reference to it using container_of().
+ *
+@@ -83,13 +99,17 @@
+ int n_alarm;
+ int n_ext_ts;
+ int n_per_out;
++ int n_pins;
+ int pps;
++ struct ptp_pin_desc *pin_config;
+ int (*adjfreq)(struct ptp_clock_info *ptp, s32 delta);
+ int (*adjtime)(struct ptp_clock_info *ptp, s64 delta);
+ int (*gettime)(struct ptp_clock_info *ptp, struct timespec *ts);
+ int (*settime)(struct ptp_clock_info *ptp, const struct timespec *ts);
+ int (*enable)(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *request, int on);
++ int (*verify)(struct ptp_clock_info *ptp, unsigned int pin,
++ enum ptp_pin_function func, unsigned int chan);
+ };
+
+ struct ptp_clock;
+@@ -156,4 +176,17 @@
+
+ extern int ptp_clock_index(struct ptp_clock *ptp);
+
++/**
++ * ptp_find_pin() - obtain the pin index of a given auxiliary function
++ *
++ * @ptp: The clock obtained from ptp_clock_register().
++ * @func: One of the ptp_pin_function enumerated values.
++ * @chan: The particular functional channel to find.
++ * Return: Pin index in the range of zero to ptp_clock_caps.n_pins - 1,
++ * or -1 if the auxiliary function cannot be found.
++ */
++
++int ptp_find_pin(struct ptp_clock *ptp,
++ enum ptp_pin_function func, unsigned int chan);
++
+ #endif
+diff -Nur linux-3.14.36/include/linux/pxp_device.h linux-openelec/include/linux/pxp_device.h
+--- linux-3.14.36/include/linux/pxp_device.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/include/linux/pxp_device.h 2015-05-06 12:05:44.000000000 -0500
+@@ -0,0 +1,68 @@
++/*
++ * Copyright (C) 2013-2014 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ *
++ */
++#ifndef _PXP_DEVICE
++#define _PXP_DEVICE
++
++#include <linux/idr.h>
++#include <linux/hash.h>
++#include <uapi/linux/pxp_device.h>
++
++struct pxp_irq_info {
++ wait_queue_head_t waitq;
++ atomic_t irq_pending;
++ int hist_status;
++};
++
++struct pxp_buffer_hash {
++ struct hlist_head *hash_table;
++ u32 order;
++ spinlock_t hash_lock;
++};
++
++struct pxp_buf_obj {
++ uint32_t handle;
++
++ uint32_t size;
++ uint32_t mem_type;
++
++ unsigned long offset;
++ void *virtual;
++
++ struct hlist_node item;
++};
++
++struct pxp_chan_obj {
++ uint32_t handle;
++ struct dma_chan *chan;
++};
++
++/* File private data */
++struct pxp_file {
++ struct file *filp;
++
++ /* record allocated dma buffer */
++ struct idr buffer_idr;
++ spinlock_t buffer_lock;
++
++ /* record allocated dma channel */
++ struct idr channel_idr;
++ spinlock_t channel_lock;
++};
++
++#endif
+diff -Nur linux-3.14.36/include/linux/pxp_dma.h linux-openelec/include/linux/pxp_dma.h
+--- linux-3.14.36/include/linux/pxp_dma.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/include/linux/pxp_dma.h 2015-05-06 12:05:44.000000000 -0500
+@@ -0,0 +1,72 @@
++/*
++ * Copyright (C) 2010-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ *
++ */
++#ifndef _PXP_DMA
++#define _PXP_DMA
++
++#include <uapi/linux/pxp_dma.h>
++
++struct pxp_tx_desc {
++ struct dma_async_tx_descriptor txd;
++ struct list_head tx_list;
++ struct list_head list;
++ int len;
++ union {
++ struct pxp_layer_param s0_param;
++ struct pxp_layer_param out_param;
++ struct pxp_layer_param ol_param;
++ } layer_param;
++ struct pxp_proc_data proc_data;
++
++ u32 hist_status; /* Histogram output status */
++
++ struct pxp_tx_desc *next;
++};
++
++struct pxp_channel {
++ struct dma_chan dma_chan;
++ dma_cookie_t completed; /* last completed cookie */
++ enum pxp_channel_status status;
++ void *client; /* Only one client per channel */
++ unsigned int n_tx_desc;
++ struct pxp_tx_desc *desc; /* allocated tx-descriptors */
++ struct list_head queue; /* queued tx-descriptors */
++ struct list_head list; /* track queued channel number */
++ spinlock_t lock; /* protects sg[0,1], queue,
++ * status, cookie, free_list
++ */
++ int active_buffer;
++ unsigned int eof_irq;
++ char eof_name[16]; /* EOF IRQ name for request_irq() */
++};
++
++#define to_tx_desc(tx) container_of(tx, struct pxp_tx_desc, txd)
++#define to_pxp_channel(d) container_of(d, struct pxp_channel, dma_chan)
++
++void pxp_txd_ack(struct dma_async_tx_descriptor *txd,
++ struct pxp_channel *pxp_chan);
++
++#ifdef CONFIG_MXC_PXP_CLIENT_DEVICE
++int register_pxp_device(void);
++void unregister_pxp_device(void);
++#else
++int register_pxp_device(void) { return 0; }
++void unregister_pxp_device(void) {}
++#endif
++
++#endif
+diff -Nur linux-3.14.36/include/linux/regmap.h linux-openelec/include/linux/regmap.h
+--- linux-3.14.36/include/linux/regmap.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/include/linux/regmap.h 2015-07-24 18:03:30.308842002 -0500
+@@ -27,6 +27,7 @@
+ struct regmap;
+ struct regmap_range_cfg;
+ struct regmap_field;
++struct snd_ac97;
+
+ /* An enum of all the supported cache types */
+ enum regcache_type {
+@@ -272,6 +273,12 @@
+ typedef int (*regmap_hw_read)(void *context,
+ const void *reg_buf, size_t reg_size,
+ void *val_buf, size_t val_size);
++
++typedef int (*regmap_hw_reg_read)(void *context, unsigned int reg,
++ unsigned int *val);
++typedef int (*regmap_hw_reg_write)(void *context, unsigned int reg,
++ unsigned int val);
++
+ typedef struct regmap_async *(*regmap_hw_async_alloc)(void);
+ typedef void (*regmap_hw_free_context)(void *context);
+
+@@ -305,7 +312,9 @@
+ regmap_hw_write write;
+ regmap_hw_gather_write gather_write;
+ regmap_hw_async_write async_write;
++ regmap_hw_reg_write reg_write;
+ regmap_hw_read read;
++ regmap_hw_reg_read reg_read;
+ regmap_hw_free_context free_context;
+ regmap_hw_async_alloc async_alloc;
+ u8 read_flag_mask;
+@@ -326,6 +335,8 @@
+ struct regmap *regmap_init_mmio_clk(struct device *dev, const char *clk_id,
+ void __iomem *regs,
+ const struct regmap_config *config);
++struct regmap *regmap_init_ac97(struct snd_ac97 *ac97,
++ const struct regmap_config *config);
+
+ struct regmap *devm_regmap_init(struct device *dev,
+ const struct regmap_bus *bus,
+@@ -340,6 +351,10 @@
+ struct regmap *devm_regmap_init_mmio_clk(struct device *dev, const char *clk_id,
+ void __iomem *regs,
+ const struct regmap_config *config);
++struct regmap *devm_regmap_init_ac97(struct snd_ac97 *ac97,
++ const struct regmap_config *config);
++
++bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg);
+
+ /**
+ * regmap_init_mmio(): Initialise register map
+diff -Nur linux-3.14.36/include/linux/regulator/consumer.h linux-openelec/include/linux/regulator/consumer.h
+--- linux-3.14.36/include/linux/regulator/consumer.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/include/linux/regulator/consumer.h 2015-05-06 12:05:44.000000000 -0500
+@@ -2,6 +2,7 @@
+ * consumer.h -- SoC Regulator consumer support.
+ *
+ * Copyright (C) 2007, 2008 Wolfson Microelectronics PLC.
++ * Copyright (C) 2013 Freescale Semiconductor, Inc.
+ *
+ * Author: Liam Girdwood <lrg@slimlogic.co.uk>
+ *
+@@ -105,6 +106,8 @@
+ #define REGULATOR_EVENT_FORCE_DISABLE 0x20
+ #define REGULATOR_EVENT_VOLTAGE_CHANGE 0x40
+ #define REGULATOR_EVENT_DISABLE 0x80
++#define REGULATOR_EVENT_PRE_DISABLE 0x100
++#define REGULATOR_EVENT_ENABLE 0x200
+
+ struct regulator;
+
+diff -Nur linux-3.14.36/include/linux/reset.h linux-openelec/include/linux/reset.h
+--- linux-3.14.36/include/linux/reset.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/include/linux/reset.h 2015-05-06 12:05:44.000000000 -0500
+@@ -12,6 +12,13 @@
+ void reset_control_put(struct reset_control *rstc);
+ struct reset_control *devm_reset_control_get(struct device *dev, const char *id);
+
++#ifdef CONFIG_RESET_CONTROLLER
+ int device_reset(struct device *dev);
++#else
++static inline int device_reset(struct device *dev)
++{
++ return 0;
++}
++#endif /* CONFIG_RESET_CONTROLLER */
+
+ #endif
+diff -Nur linux-3.14.36/include/linux/serial_core.h linux-openelec/include/linux/serial_core.h
+--- linux-3.14.36/include/linux/serial_core.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/include/linux/serial_core.h 2015-05-06 12:05:44.000000000 -0500
+@@ -285,6 +285,22 @@
+ /*
+ * Console helpers.
+ */
++struct earlycon_device {
++ struct console *con;
++ struct uart_port port;
++ char options[16]; /* e.g., 115200n8 */
++ unsigned int baud;
++};
++int setup_earlycon(char *buf, const char *match,
++ int (*setup)(struct earlycon_device *, const char *));
++
++#define EARLYCON_DECLARE(name, func) \
++static int __init name ## _setup_earlycon(char *buf) \
++{ \
++ return setup_earlycon(buf, __stringify(name), func); \
++} \
++early_param("earlycon", name ## _setup_earlycon);
++
+ struct uart_port *uart_get_console(struct uart_port *ports, int nr,
+ struct console *c);
+ void uart_parse_options(char *options, int *baud, int *parity, int *bits,
+diff -Nur linux-3.14.36/include/linux/skbuff.h linux-openelec/include/linux/skbuff.h
+--- linux-3.14.36/include/linux/skbuff.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/include/linux/skbuff.h 2015-07-24 18:03:28.620842002 -0500
+@@ -2038,7 +2038,7 @@
+ }
+
+ /**
+- * skb_frag_page - retrieve the page refered to by a paged fragment
++ * skb_frag_page - retrieve the page referred to by a paged fragment
+ * @frag: the paged fragment
+ *
+ * Returns the &struct page associated with @frag.
+diff -Nur linux-3.14.36/include/linux/spi/spi.h linux-openelec/include/linux/spi/spi.h
+--- linux-3.14.36/include/linux/spi/spi.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/include/linux/spi/spi.h 2015-05-06 12:05:44.000000000 -0500
+@@ -234,7 +234,7 @@
+ * @mode_bits: flags understood by this controller driver
+ * @bits_per_word_mask: A mask indicating which values of bits_per_word are
+ * supported by the driver. Bit n indicates that a bits_per_word n+1 is
+- * suported. If set, the SPI core will reject any transfer with an
++ * supported. If set, the SPI core will reject any transfer with an
+ * unsupported bits_per_word. If not set, this value is simply ignored,
+ * and it's up to the individual driver to perform any validation.
+ * @min_speed_hz: Lowest supported transfer speed
+@@ -259,7 +259,7 @@
+ * @cur_msg: the currently in-flight message
+ * @cur_msg_prepared: spi_prepare_message was called for the currently
+ * in-flight message
+- * @xfer_completion: used by core tranfer_one_message()
++ * @xfer_completion: used by core transfer_one_message()
+ * @busy: message pump is busy
+ * @running: message pump is running
+ * @rt: whether this queue is set to run as a realtime task
+@@ -498,7 +498,7 @@
+ * @rx_buf: data to be read (dma-safe memory), or NULL
+ * @tx_dma: DMA address of tx_buf, if @spi_message.is_dma_mapped
+ * @rx_dma: DMA address of rx_buf, if @spi_message.is_dma_mapped
+- * @tx_nbits: number of bits used for writting. If 0 the default
++ * @tx_nbits: number of bits used for writing. If 0 the default
+ * (SPI_NBITS_SINGLE) is used.
+ * @rx_nbits: number of bits used for reading. If 0 the default
+ * (SPI_NBITS_SINGLE) is used.
+@@ -556,7 +556,7 @@
+ * by the results of previous messages and where the whole transaction
+ * ends when the chipselect goes intactive.
+ *
+- * When SPI can transfer in 1x,2x or 4x. It can get this tranfer information
++ * When SPI can transfer in 1x,2x or 4x. It can get this transfer information
+ * from device through @tx_nbits and @rx_nbits. In Bi-direction, these
+ * two should both be set. User can set transfer mode with SPI_NBITS_SINGLE(1x)
+ * SPI_NBITS_DUAL(2x) and SPI_NBITS_QUAD(4x) to support these three transfer.
+diff -Nur linux-3.14.36/include/linux/syscalls.h linux-openelec/include/linux/syscalls.h
+--- linux-3.14.36/include/linux/syscalls.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/include/linux/syscalls.h 2015-05-06 12:05:44.000000000 -0500
+@@ -744,6 +744,9 @@
+ int newdfd, const char __user *newname, int flags);
+ asmlinkage long sys_renameat(int olddfd, const char __user * oldname,
+ int newdfd, const char __user * newname);
++asmlinkage long sys_renameat2(int olddfd, const char __user *oldname,
++ int newdfd, const char __user *newname,
++ unsigned int flags);
+ asmlinkage long sys_futimesat(int dfd, const char __user *filename,
+ struct timeval __user *utimes);
+ asmlinkage long sys_faccessat(int dfd, const char __user *filename, int mode);
+diff -Nur linux-3.14.36/include/linux/usb/chipidea.h linux-openelec/include/linux/usb/chipidea.h
+--- linux-3.14.36/include/linux/usb/chipidea.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/include/linux/usb/chipidea.h 2015-05-06 12:05:44.000000000 -0500
+@@ -18,6 +18,7 @@
+ unsigned long flags;
+ #define CI_HDRC_REGS_SHARED BIT(0)
+ #define CI_HDRC_REQUIRE_TRANSCEIVER BIT(1)
++#define CI_HDRC_SUPPORTS_RUNTIME_PM BIT(2)
+ #define CI_HDRC_DISABLE_STREAMING BIT(3)
+ /*
+ * Only set it when DCCPARAMS.DC==1 and DCCPARAMS.HC==1,
+@@ -25,6 +26,7 @@
+ */
+ #define CI_HDRC_DUAL_ROLE_NOT_OTG BIT(4)
+ #define CI_HDRC_IMX28_WRITE_FIX BIT(5)
++#define CI_HDRC_IMX_EHCI_QUIRK BIT(6)
+ enum usb_dr_mode dr_mode;
+ #define CI_HDRC_CONTROLLER_RESET_EVENT 0
+ #define CI_HDRC_CONTROLLER_STOPPED_EVENT 1
+@@ -42,4 +44,6 @@
+ /* Remove ci hdrc device */
+ void ci_hdrc_remove_device(struct platform_device *pdev);
+
++/* Get current available role */
++enum usb_dr_mode ci_hdrc_query_available_role(struct platform_device *pdev);
+ #endif
+diff -Nur linux-3.14.36/include/linux/usb/composite.h linux-openelec/include/linux/usb/composite.h
+--- linux-3.14.36/include/linux/usb/composite.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/include/linux/usb/composite.h 2015-05-06 12:05:44.000000000 -0500
+@@ -92,7 +92,7 @@
+ * @suspend: Notifies functions when the host stops sending USB traffic.
+ * @resume: Notifies functions when the host restarts USB traffic.
+ * @get_status: Returns function status as a reply to
+- * GetStatus() request when the recepient is Interface.
++ * GetStatus() request when the recipient is Interface.
+ * @func_suspend: callback to be called when
+ * SetFeature(FUNCTION_SUSPEND) is reseived
+ *
+diff -Nur linux-3.14.36/include/linux/usb/phy.h linux-openelec/include/linux/usb/phy.h
+--- linux-3.14.36/include/linux/usb/phy.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/include/linux/usb/phy.h 2015-05-06 12:05:44.000000000 -0500
+@@ -111,11 +111,23 @@
+ int (*set_suspend)(struct usb_phy *x,
+ int suspend);
+
++ /*
++ * Set wakeup enable for PHY, in that case, the PHY can be
++ * waken up from suspend status due to external events,
++ * like vbus change, dp/dm change and id.
++ */
++ int (*set_wakeup)(struct usb_phy *x, bool enabled);
++
+ /* notify phy connect status change */
+ int (*notify_connect)(struct usb_phy *x,
+ enum usb_device_speed speed);
+ int (*notify_disconnect)(struct usb_phy *x,
+ enum usb_device_speed speed);
++ int (*notify_suspend)(struct usb_phy *x,
++ enum usb_device_speed speed);
++ int (*notify_resume)(struct usb_phy *x,
++ enum usb_device_speed speed);
++
+ };
+
+ /**
+@@ -265,6 +277,15 @@
+ }
+
+ static inline int
++usb_phy_set_wakeup(struct usb_phy *x, bool enabled)
++{
++ if (x && x->set_wakeup)
++ return x->set_wakeup(x, enabled);
++ else
++ return 0;
++}
++
++static inline int
+ usb_phy_notify_connect(struct usb_phy *x, enum usb_device_speed speed)
+ {
+ if (x && x->notify_connect)
+@@ -281,6 +302,24 @@
+ else
+ return 0;
+ }
++
++static inline int usb_phy_notify_suspend
++ (struct usb_phy *x, enum usb_device_speed speed)
++{
++ if (x && x->notify_suspend)
++ return x->notify_suspend(x, speed);
++ else
++ return 0;
++}
++
++static inline int usb_phy_notify_resume
++ (struct usb_phy *x, enum usb_device_speed speed)
++{
++ if (x && x->notify_resume)
++ return x->notify_resume(x, speed);
++ else
++ return 0;
++}
+
+ /* notifiers */
+ static inline int
+diff -Nur linux-3.14.36/include/media/rc-map.h linux-openelec/include/media/rc-map.h
+--- linux-3.14.36/include/media/rc-map.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/include/media/rc-map.h 2015-07-24 18:03:30.128842002 -0500
+@@ -119,6 +119,7 @@
+ #define RC_MAP_DM1105_NEC "rc-dm1105-nec"
+ #define RC_MAP_DNTV_LIVE_DVBT_PRO "rc-dntv-live-dvbt-pro"
+ #define RC_MAP_DNTV_LIVE_DVB_T "rc-dntv-live-dvb-t"
++#define RC_MAP_DVBSKY "rc-dvbsky"
+ #define RC_MAP_EMPTY "rc-empty"
+ #define RC_MAP_EM_TERRATEC "rc-em-terratec"
+ #define RC_MAP_ENCORE_ENLTV2 "rc-encore-enltv2"
+diff -Nur linux-3.14.36/include/net/cfg80211.h linux-openelec/include/net/cfg80211.h
+--- linux-3.14.36/include/net/cfg80211.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/include/net/cfg80211.h 2015-05-06 12:05:44.000000000 -0500
+@@ -1729,7 +1729,7 @@
+ u8 *ssid;
+ size_t ssid_len;
+ enum nl80211_auth_type auth_type;
+- u8 *ie;
++ const u8 *ie;
+ size_t ie_len;
+ bool privacy;
+ enum nl80211_mfp mfp;
+@@ -3888,6 +3888,7 @@
+ *
+ * @dev: network device
+ * @bssid: the BSSID of the IBSS joined
++ * @channel: the channel of the IBSS joined
+ * @gfp: allocation flags
+ *
+ * This function notifies cfg80211 that the device joined an IBSS or
+@@ -3897,7 +3898,8 @@
+ * with the locally generated beacon -- this guarantees that there is
+ * always a scan result for this IBSS. cfg80211 will handle the rest.
+ */
+-void cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid, gfp_t gfp);
++void cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid,
++ struct ieee80211_channel *channel, gfp_t gfp);
+
+ /**
+ * cfg80211_notify_new_candidate - notify cfg80211 of a new mesh peer candidate
+diff -Nur linux-3.14.36/include/net/mac80211.h linux-openelec/include/net/mac80211.h
+--- linux-3.14.36/include/net/mac80211.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/include/net/mac80211.h 2015-05-06 12:05:44.000000000 -0500
+@@ -1895,7 +1895,7 @@
+ *
+ * Driver informs U-APSD client support by enabling
+ * %IEEE80211_HW_SUPPORTS_UAPSD flag. The mode is configured through the
+- * uapsd paramater in conf_tx() operation. Hardware needs to send the QoS
++ * uapsd parameter in conf_tx() operation. Hardware needs to send the QoS
+ * Nullfunc frames and stay awake until the service period has ended. To
+ * utilize U-APSD, dynamic powersave is disabled for voip AC and all frames
+ * from that AC are transmitted with powersave enabled.
+@@ -2101,7 +2101,7 @@
+ * with the number of frames to be released and which TIDs they are
+ * to come from. In this case, the driver is responsible for setting
+ * the EOSP (for uAPSD) and MORE_DATA bits in the released frames,
+- * to help the @more_data paramter is passed to tell the driver if
++ * to help the @more_data parameter is passed to tell the driver if
+ * there is more data on other TIDs -- the TIDs to release frames
+ * from are ignored since mac80211 doesn't know how many frames the
+ * buffers for those TIDs contain.
+@@ -2616,6 +2616,7 @@
+ * of queues to flush, which is useful if different virtual interfaces
+ * use different hardware queues; it may also indicate all queues.
+ * If the parameter @drop is set to %true, pending frames may be dropped.
++ * Note that vif can be NULL.
+ * The callback can sleep.
+ *
+ * @channel_switch: Drivers that need (or want) to offload the channel
+@@ -2662,7 +2663,7 @@
+ * parameters. In the case where the driver buffers some frames for
+ * sleeping stations mac80211 will use this callback to tell the driver
+ * to release some frames, either for PS-poll or uAPSD.
+- * Note that if the @more_data paramter is %false the driver must check
++ * Note that if the @more_data parameter is %false the driver must check
+ * if there are more frames on the given TIDs, and if there are more than
+ * the frames being released then it must still set the more-data bit in
+ * the frame. If the @more_data parameter is %true, then of course the
+@@ -2878,7 +2879,8 @@
+ struct netlink_callback *cb,
+ void *data, int len);
+ #endif
+- void (*flush)(struct ieee80211_hw *hw, u32 queues, bool drop);
++ void (*flush)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
++ u32 queues, bool drop);
+ void (*channel_switch)(struct ieee80211_hw *hw,
+ struct ieee80211_channel_switch *ch_switch);
+ int (*napi_poll)(struct ieee80211_hw *hw, int budget);
+diff -Nur linux-3.14.36/include/net/rtnetlink.h linux-openelec/include/net/rtnetlink.h
+--- linux-3.14.36/include/net/rtnetlink.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/include/net/rtnetlink.h 2015-05-06 12:05:44.000000000 -0500
+@@ -140,7 +140,7 @@
+ struct nlattr *tb[]);
+ int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm);
+
+-extern const struct nla_policy ifla_policy[IFLA_MAX+1];
++int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len);
+
+ #define MODULE_ALIAS_RTNL_LINK(kind) MODULE_ALIAS("rtnl-link-" kind)
+
+diff -Nur linux-3.14.36/include/net/tso.h linux-openelec/include/net/tso.h
+--- linux-3.14.36/include/net/tso.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/include/net/tso.h 2015-05-06 12:05:44.000000000 -0500
+@@ -0,0 +1,20 @@
++#ifndef _TSO_H
++#define _TSO_H
++
++#include <net/ip.h>
++
++struct tso_t {
++ int next_frag_idx;
++ void *data;
++ size_t size;
++ u16 ip_id;
++ u32 tcp_seq;
++};
++
++int tso_count_descs(struct sk_buff *skb);
++void tso_build_hdr(struct sk_buff *skb, char *hdr, struct tso_t *tso,
++ int size, bool is_last);
++void tso_build_data(struct sk_buff *skb, struct tso_t *tso, int size);
++void tso_start(struct sk_buff *skb, struct tso_t *tso);
++
++#endif /* _TSO_H */
+diff -Nur linux-3.14.36/include/sound/soc.h linux-openelec/include/sound/soc.h
+--- linux-3.14.36/include/sound/soc.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/include/sound/soc.h 2015-07-24 18:03:30.320842002 -0500
+@@ -412,6 +412,7 @@
+ const char *dai_link, int stream);
+ struct snd_soc_pcm_runtime *snd_soc_get_pcm_runtime(struct snd_soc_card *card,
+ const char *dai_link);
++int soc_ac97_dev_register(struct snd_soc_codec *codec);
+
+ /* Utility functions to get clock rates from various things */
+ int snd_soc_calc_frame_size(int sample_size, int channels, int tdm_slots);
+diff -Nur linux-3.14.36/include/sound/wm8962.h linux-openelec/include/sound/wm8962.h
+--- linux-3.14.36/include/sound/wm8962.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/include/sound/wm8962.h 2015-05-06 12:05:44.000000000 -0500
+@@ -55,6 +55,9 @@
+ * in a DC measurement configuration.
+ */
+ bool in4_dc_measure;
++
++ /* MCLK for wm8962 */
++ struct clk *codec_mclk;
+ };
+
+ #endif
+diff -Nur linux-3.14.36/include/trace/events/cpufreq_interactive.h linux-openelec/include/trace/events/cpufreq_interactive.h
+--- linux-3.14.36/include/trace/events/cpufreq_interactive.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/include/trace/events/cpufreq_interactive.h 2015-05-06 12:05:44.000000000 -0500
+@@ -0,0 +1,112 @@
++#undef TRACE_SYSTEM
++#define TRACE_SYSTEM cpufreq_interactive
++
++#if !defined(_TRACE_CPUFREQ_INTERACTIVE_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _TRACE_CPUFREQ_INTERACTIVE_H
++
++#include <linux/tracepoint.h>
++
++DECLARE_EVENT_CLASS(set,
++ TP_PROTO(u32 cpu_id, unsigned long targfreq,
++ unsigned long actualfreq),
++ TP_ARGS(cpu_id, targfreq, actualfreq),
++
++ TP_STRUCT__entry(
++ __field( u32, cpu_id )
++ __field(unsigned long, targfreq )
++ __field(unsigned long, actualfreq )
++ ),
++
++ TP_fast_assign(
++ __entry->cpu_id = (u32) cpu_id;
++ __entry->targfreq = targfreq;
++ __entry->actualfreq = actualfreq;
++ ),
++
++ TP_printk("cpu=%u targ=%lu actual=%lu",
++ __entry->cpu_id, __entry->targfreq,
++ __entry->actualfreq)
++);
++
++DEFINE_EVENT(set, cpufreq_interactive_setspeed,
++ TP_PROTO(u32 cpu_id, unsigned long targfreq,
++ unsigned long actualfreq),
++ TP_ARGS(cpu_id, targfreq, actualfreq)
++);
++
++DECLARE_EVENT_CLASS(loadeval,
++ TP_PROTO(unsigned long cpu_id, unsigned long load,
++ unsigned long curtarg, unsigned long curactual,
++ unsigned long newtarg),
++ TP_ARGS(cpu_id, load, curtarg, curactual, newtarg),
++
++ TP_STRUCT__entry(
++ __field(unsigned long, cpu_id )
++ __field(unsigned long, load )
++ __field(unsigned long, curtarg )
++ __field(unsigned long, curactual )
++ __field(unsigned long, newtarg )
++ ),
++
++ TP_fast_assign(
++ __entry->cpu_id = cpu_id;
++ __entry->load = load;
++ __entry->curtarg = curtarg;
++ __entry->curactual = curactual;
++ __entry->newtarg = newtarg;
++ ),
++
++ TP_printk("cpu=%lu load=%lu cur=%lu actual=%lu targ=%lu",
++ __entry->cpu_id, __entry->load, __entry->curtarg,
++ __entry->curactual, __entry->newtarg)
++);
++
++DEFINE_EVENT(loadeval, cpufreq_interactive_target,
++ TP_PROTO(unsigned long cpu_id, unsigned long load,
++ unsigned long curtarg, unsigned long curactual,
++ unsigned long newtarg),
++ TP_ARGS(cpu_id, load, curtarg, curactual, newtarg)
++);
++
++DEFINE_EVENT(loadeval, cpufreq_interactive_already,
++ TP_PROTO(unsigned long cpu_id, unsigned long load,
++ unsigned long curtarg, unsigned long curactual,
++ unsigned long newtarg),
++ TP_ARGS(cpu_id, load, curtarg, curactual, newtarg)
++);
++
++DEFINE_EVENT(loadeval, cpufreq_interactive_notyet,
++ TP_PROTO(unsigned long cpu_id, unsigned long load,
++ unsigned long curtarg, unsigned long curactual,
++ unsigned long newtarg),
++ TP_ARGS(cpu_id, load, curtarg, curactual, newtarg)
++);
++
++TRACE_EVENT(cpufreq_interactive_boost,
++ TP_PROTO(const char *s),
++ TP_ARGS(s),
++ TP_STRUCT__entry(
++ __string(s, s)
++ ),
++ TP_fast_assign(
++ __assign_str(s, s);
++ ),
++ TP_printk("%s", __get_str(s))
++);
++
++TRACE_EVENT(cpufreq_interactive_unboost,
++ TP_PROTO(const char *s),
++ TP_ARGS(s),
++ TP_STRUCT__entry(
++ __string(s, s)
++ ),
++ TP_fast_assign(
++ __assign_str(s, s);
++ ),
++ TP_printk("%s", __get_str(s))
++);
++
++#endif /* _TRACE_CPUFREQ_INTERACTIVE_H */
++
++/* This part must be outside protection */
++#include <trace/define_trace.h>
+diff -Nur linux-3.14.36/include/trace/events/thermal.h linux-openelec/include/trace/events/thermal.h
+--- linux-3.14.36/include/trace/events/thermal.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/include/trace/events/thermal.h 2015-05-06 12:05:44.000000000 -0500
+@@ -0,0 +1,83 @@
++#undef TRACE_SYSTEM
++#define TRACE_SYSTEM thermal
++
++#if !defined(_TRACE_THERMAL_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _TRACE_THERMAL_H
++
++#include <linux/thermal.h>
++#include <linux/tracepoint.h>
++
++TRACE_EVENT(thermal_temperature,
++
++ TP_PROTO(struct thermal_zone_device *tz),
++
++ TP_ARGS(tz),
++
++ TP_STRUCT__entry(
++ __string(thermal_zone, tz->type)
++ __field(int, id)
++ __field(int, temp_prev)
++ __field(int, temp)
++ ),
++
++ TP_fast_assign(
++ __assign_str(thermal_zone, tz->type);
++ __entry->id = tz->id;
++ __entry->temp_prev = tz->last_temperature;
++ __entry->temp = tz->temperature;
++ ),
++
++ TP_printk("thermal_zone=%s id=%d temp_prev=%d temp=%d",
++ __get_str(thermal_zone), __entry->id, __entry->temp_prev,
++ __entry->temp)
++);
++
++TRACE_EVENT(cdev_update,
++
++ TP_PROTO(struct thermal_cooling_device *cdev, unsigned long target),
++
++ TP_ARGS(cdev, target),
++
++ TP_STRUCT__entry(
++ __string(type, cdev->type)
++ __field(unsigned long, target)
++ ),
++
++ TP_fast_assign(
++ __assign_str(type, cdev->type);
++ __entry->target = target;
++ ),
++
++ TP_printk("type=%s target=%lu", __get_str(type), __entry->target)
++);
++
++TRACE_EVENT(thermal_zone_trip,
++
++ TP_PROTO(struct thermal_zone_device *tz, int trip,
++ enum thermal_trip_type trip_type),
++
++ TP_ARGS(tz, trip, trip_type),
++
++ TP_STRUCT__entry(
++ __string(thermal_zone, tz->type)
++ __field(int, id)
++ __field(int, trip)
++ __field(enum thermal_trip_type, trip_type)
++ ),
++
++ TP_fast_assign(
++ __assign_str(thermal_zone, tz->type);
++ __entry->id = tz->id;
++ __entry->trip = trip;
++ __entry->trip_type = trip_type;
++ ),
++
++ TP_printk("thermal_zone=%s id=%d trip=%d trip_type=%d",
++ __get_str(thermal_zone), __entry->id, __entry->trip,
++ __entry->trip_type)
++);
++
++#endif /* _TRACE_THERMAL_H */
++
++/* This part must be outside protection */
++#include <trace/define_trace.h>
+diff -Nur linux-3.14.36/include/uapi/linux/ipu.h linux-openelec/include/uapi/linux/ipu.h
+--- linux-3.14.36/include/uapi/linux/ipu.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/include/uapi/linux/ipu.h 2015-05-06 12:05:45.000000000 -0500
+@@ -0,0 +1,282 @@
++/*
++ * Copyright (C) 2013 Freescale Semiconductor, Inc. All Rights Reserved
++ */
++
++/*
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
++ */
++
++/*!
++ * @defgroup IPU MXC Image Processing Unit (IPU) Driver
++ */
++/*!
++ * @file uapi/linux/ipu.h
++ *
++ * @brief This file contains the IPU driver API declarations.
++ *
++ * @ingroup IPU
++ */
++
++#ifndef __ASM_ARCH_IPU_H__
++#define __ASM_ARCH_IPU_H__
++
++#include <linux/types.h>
++#include <linux/videodev2.h>
++
++#ifndef __KERNEL__
++#ifndef __cplusplus
++typedef unsigned char bool;
++#endif
++#define irqreturn_t int
++#define dma_addr_t int
++#define uint32_t unsigned int
++#define uint16_t unsigned short
++#define uint8_t unsigned char
++#define u32 unsigned int
++#define u8 unsigned char
++#define __u32 u32
++#endif
++
++/*!
++ * Enumeration of IPU rotation modes
++ */
++typedef enum {
++ /* Note the enum values correspond to BAM value */
++ IPU_ROTATE_NONE = 0,
++ IPU_ROTATE_VERT_FLIP = 1,
++ IPU_ROTATE_HORIZ_FLIP = 2,
++ IPU_ROTATE_180 = 3,
++ IPU_ROTATE_90_RIGHT = 4,
++ IPU_ROTATE_90_RIGHT_VFLIP = 5,
++ IPU_ROTATE_90_RIGHT_HFLIP = 6,
++ IPU_ROTATE_90_LEFT = 7,
++} ipu_rotate_mode_t;
++
++/*!
++ * Enumeration of VDI MOTION select
++ */
++typedef enum {
++ MED_MOTION = 0,
++ LOW_MOTION = 1,
++ HIGH_MOTION = 2,
++} ipu_motion_sel;
++
++/*!
++ * Enumeration of DI ports for ADC.
++ */
++typedef enum {
++ DISP0,
++ DISP1,
++ DISP2,
++ DISP3
++} display_port_t;
++
++/* IPU Pixel format definitions */
++/* Four-character-code (FOURCC) */
++#define fourcc(a, b, c, d)\
++ (((__u32)(a)<<0)|((__u32)(b)<<8)|((__u32)(c)<<16)|((__u32)(d)<<24))
++
++/*!
++ * @name IPU Pixel Formats
++ *
++ * Pixel formats are defined with ASCII FOURCC code. The pixel format codes are
++ * the same used by V4L2 API.
++ */
++
++/*! @{ */
++/*! @name Generic or Raw Data Formats */
++/*! @{ */
++#define IPU_PIX_FMT_GENERIC fourcc('I', 'P', 'U', '0') /*!< IPU Generic Data */
++#define IPU_PIX_FMT_GENERIC_32 fourcc('I', 'P', 'U', '1') /*!< IPU Generic Data */
++#define IPU_PIX_FMT_GENERIC_16 fourcc('I', 'P', 'U', '2') /*!< IPU Generic Data */
++#define IPU_PIX_FMT_LVDS666 fourcc('L', 'V', 'D', '6') /*!< IPU Generic Data */
++#define IPU_PIX_FMT_LVDS888 fourcc('L', 'V', 'D', '8') /*!< IPU Generic Data */
++/*! @} */
++/*! @name RGB Formats */
++/*! @{ */
++#define IPU_PIX_FMT_RGB332 fourcc('R', 'G', 'B', '1') /*!< 8 RGB-3-3-2 */
++#define IPU_PIX_FMT_RGB555 fourcc('R', 'G', 'B', 'O') /*!< 16 RGB-5-5-5 */
++#define IPU_PIX_FMT_RGB565 fourcc('R', 'G', 'B', 'P') /*!< 1 6 RGB-5-6-5 */
++#define IPU_PIX_FMT_RGB666 fourcc('R', 'G', 'B', '6') /*!< 18 RGB-6-6-6 */
++#define IPU_PIX_FMT_BGR666 fourcc('B', 'G', 'R', '6') /*!< 18 BGR-6-6-6 */
++#define IPU_PIX_FMT_BGR24 fourcc('B', 'G', 'R', '3') /*!< 24 BGR-8-8-8 */
++#define IPU_PIX_FMT_RGB24 fourcc('R', 'G', 'B', '3') /*!< 24 RGB-8-8-8 */
++#define IPU_PIX_FMT_GBR24 fourcc('G', 'B', 'R', '3') /*!< 24 GBR-8-8-8 */
++#define IPU_PIX_FMT_BGR32 fourcc('B', 'G', 'R', '4') /*!< 32 BGR-8-8-8-8 */
++#define IPU_PIX_FMT_BGRA32 fourcc('B', 'G', 'R', 'A') /*!< 32 BGR-8-8-8-8 */
++#define IPU_PIX_FMT_RGB32 fourcc('R', 'G', 'B', '4') /*!< 32 RGB-8-8-8-8 */
++#define IPU_PIX_FMT_RGBA32 fourcc('R', 'G', 'B', 'A') /*!< 32 RGB-8-8-8-8 */
++#define IPU_PIX_FMT_ABGR32 fourcc('A', 'B', 'G', 'R') /*!< 32 ABGR-8-8-8-8 */
++/*! @} */
++/*! @name YUV Interleaved Formats */
++/*! @{ */
++#define IPU_PIX_FMT_YUYV fourcc('Y', 'U', 'Y', 'V') /*!< 16 YUV 4:2:2 */
++#define IPU_PIX_FMT_UYVY fourcc('U', 'Y', 'V', 'Y') /*!< 16 YUV 4:2:2 */
++#define IPU_PIX_FMT_YVYU fourcc('Y', 'V', 'Y', 'U') /*!< 16 YVYU 4:2:2 */
++#define IPU_PIX_FMT_VYUY fourcc('V', 'Y', 'U', 'Y') /*!< 16 VYYU 4:2:2 */
++#define IPU_PIX_FMT_Y41P fourcc('Y', '4', '1', 'P') /*!< 12 YUV 4:1:1 */
++#define IPU_PIX_FMT_YUV444 fourcc('Y', '4', '4', '4') /*!< 24 YUV 4:4:4 */
++#define IPU_PIX_FMT_VYU444 fourcc('V', '4', '4', '4') /*!< 24 VYU 4:4:4 */
++/* two planes -- one Y, one Cb + Cr interleaved */
++#define IPU_PIX_FMT_NV12 fourcc('N', 'V', '1', '2') /* 12 Y/CbCr 4:2:0 */
++/* two planes -- 12 tiled Y/CbCr 4:2:0 */
++#define IPU_PIX_FMT_TILED_NV12 fourcc('T', 'N', 'V', 'P')
++#define IPU_PIX_FMT_TILED_NV12F fourcc('T', 'N', 'V', 'F')
++
++/*! @} */
++/*! @name YUV Planar Formats */
++/*! @{ */
++#define IPU_PIX_FMT_GREY fourcc('G', 'R', 'E', 'Y') /*!< 8 Greyscale */
++#define IPU_PIX_FMT_YVU410P fourcc('Y', 'V', 'U', '9') /*!< 9 YVU 4:1:0 */
++#define IPU_PIX_FMT_YUV410P fourcc('Y', 'U', 'V', '9') /*!< 9 YUV 4:1:0 */
++#define IPU_PIX_FMT_YVU420P fourcc('Y', 'V', '1', '2') /*!< 12 YVU 4:2:0 */
++#define IPU_PIX_FMT_YUV420P fourcc('I', '4', '2', '0') /*!< 12 YUV 4:2:0 */
++#define IPU_PIX_FMT_YUV420P2 fourcc('Y', 'U', '1', '2') /*!< 12 YUV 4:2:0 */
++#define IPU_PIX_FMT_YVU422P fourcc('Y', 'V', '1', '6') /*!< 16 YVU 4:2:2 */
++#define IPU_PIX_FMT_YUV422P fourcc('4', '2', '2', 'P') /*!< 16 YUV 4:2:2 */
++/* non-interleaved 4:4:4 */
++#define IPU_PIX_FMT_YUV444P fourcc('4', '4', '4', 'P') /*!< 24 YUV 4:4:4 */
++/*! @} */
++#define IPU_PIX_FMT_TILED_NV12_MBALIGN (16)
++#define TILED_NV12_FRAME_SIZE(w, h) \
++ (ALIGN((w) * (h), SZ_4K) + ALIGN((w) * (h) / 2, SZ_4K))
++/* IPU device */
++typedef enum {
++ RGB_CS,
++ YUV_CS,
++ NULL_CS
++} cs_t;
++
++struct ipu_pos {
++ u32 x;
++ u32 y;
++};
++
++struct ipu_crop {
++ struct ipu_pos pos;
++ u32 w;
++ u32 h;
++};
++
++struct ipu_deinterlace {
++ bool enable;
++ u8 motion; /*see ipu_motion_sel*/
++#define IPU_DEINTERLACE_FIELD_TOP 0
++#define IPU_DEINTERLACE_FIELD_BOTTOM 1
++#define IPU_DEINTERLACE_FIELD_MASK \
++ (IPU_DEINTERLACE_FIELD_TOP | IPU_DEINTERLACE_FIELD_BOTTOM)
++ /* deinterlace frame rate double flags */
++#define IPU_DEINTERLACE_RATE_EN 0x80
++#define IPU_DEINTERLACE_RATE_FRAME1 0x40
++#define IPU_DEINTERLACE_RATE_MASK \
++ (IPU_DEINTERLACE_RATE_EN | IPU_DEINTERLACE_RATE_FRAME1)
++#define IPU_DEINTERLACE_MAX_FRAME 2
++ u8 field_fmt;
++};
++
++struct ipu_input {
++ u32 width;
++ u32 height;
++ u32 format;
++ struct ipu_crop crop;
++ dma_addr_t paddr;
++
++ struct ipu_deinterlace deinterlace;
++ dma_addr_t paddr_n; /*valid when deinterlace enable*/
++};
++
++struct ipu_alpha {
++#define IPU_ALPHA_MODE_GLOBAL 0
++#define IPU_ALPHA_MODE_LOCAL 1
++ u8 mode;
++ u8 gvalue; /* 0~255 */
++ dma_addr_t loc_alp_paddr;
++};
++
++struct ipu_colorkey {
++ bool enable;
++ u32 value; /* RGB 24bit */
++};
++
++struct ipu_overlay {
++ u32 width;
++ u32 height;
++ u32 format;
++ struct ipu_crop crop;
++ struct ipu_alpha alpha;
++ struct ipu_colorkey colorkey;
++ dma_addr_t paddr;
++};
++
++struct ipu_output {
++ u32 width;
++ u32 height;
++ u32 format;
++ u8 rotate;
++ struct ipu_crop crop;
++ dma_addr_t paddr;
++};
++
++struct ipu_task {
++ struct ipu_input input;
++ struct ipu_output output;
++
++ bool overlay_en;
++ struct ipu_overlay overlay;
++
++#define IPU_TASK_PRIORITY_NORMAL 0
++#define IPU_TASK_PRIORITY_HIGH 1
++ u8 priority;
++
++#define IPU_TASK_ID_ANY 0
++#define IPU_TASK_ID_VF 1
++#define IPU_TASK_ID_PP 2
++#define IPU_TASK_ID_MAX 3
++ u8 task_id;
++
++ int timeout;
++};
++
++enum {
++ IPU_CHECK_OK = 0,
++ IPU_CHECK_WARN_INPUT_OFFS_NOT8ALIGN = 0x1,
++ IPU_CHECK_WARN_OUTPUT_OFFS_NOT8ALIGN = 0x2,
++ IPU_CHECK_WARN_OVERLAY_OFFS_NOT8ALIGN = 0x4,
++ IPU_CHECK_ERR_MIN,
++ IPU_CHECK_ERR_INPUT_CROP,
++ IPU_CHECK_ERR_OUTPUT_CROP,
++ IPU_CHECK_ERR_OVERLAY_CROP,
++ IPU_CHECK_ERR_INPUT_OVER_LIMIT,
++ IPU_CHECK_ERR_OV_OUT_NO_FIT,
++ IPU_CHECK_ERR_OVERLAY_WITH_VDI,
++ IPU_CHECK_ERR_PROC_NO_NEED,
++ IPU_CHECK_ERR_SPLIT_INPUTW_OVER,
++ IPU_CHECK_ERR_SPLIT_INPUTH_OVER,
++ IPU_CHECK_ERR_SPLIT_OUTPUTW_OVER,
++ IPU_CHECK_ERR_SPLIT_OUTPUTH_OVER,
++ IPU_CHECK_ERR_SPLIT_WITH_ROT,
++ IPU_CHECK_ERR_NOT_SUPPORT,
++ IPU_CHECK_ERR_NOT16ALIGN,
++ IPU_CHECK_ERR_W_DOWNSIZE_OVER,
++ IPU_CHECK_ERR_H_DOWNSIZE_OVER,
++};
++
++/* IOCTL commands */
++#define IPU_CHECK_TASK _IOWR('I', 0x1, struct ipu_task)
++#define IPU_QUEUE_TASK _IOW('I', 0x2, struct ipu_task)
++#define IPU_ALLOC _IOWR('I', 0x3, int)
++#define IPU_FREE _IOW('I', 0x4, int)
++
++#endif
+diff -Nur linux-3.14.36/include/uapi/linux/isl29023.h linux-openelec/include/uapi/linux/isl29023.h
+--- linux-3.14.36/include/uapi/linux/isl29023.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/include/uapi/linux/isl29023.h 2015-05-06 12:05:45.000000000 -0500
+@@ -0,0 +1,47 @@
++/*
++ * Copyright (C) 2011-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
++ */
++
++#ifndef __UAPI_LINUX_ISL29023_H__
++#define __UAPI_LINUX_ISL29023_H__
++
++#include <linux/types.h>
++
++#define ISL29023_PD_MODE 0x0
++#define ISL29023_ALS_ONCE_MODE 0x1
++#define ISL29023_IR_ONCE_MODE 0x2
++#define ISL29023_ALS_CONT_MODE 0x5
++#define ISL29023_IR_CONT_MODE 0x6
++
++#define ISL29023_INT_PERSISTS_1 0x0
++#define ISL29023_INT_PERSISTS_4 0x1
++#define ISL29023_INT_PERSISTS_8 0x2
++#define ISL29023_INT_PERSISTS_16 0x3
++
++#define ISL29023_RES_16 0x0
++#define ISL29023_RES_12 0x1
++#define ISL29023_RES_8 0x2
++#define ISL29023_RES_4 0x3
++
++#define ISL29023_RANGE_1K 0x0
++#define ISL29023_RANGE_4K 0x1
++#define ISL29023_RANGE_16K 0x2
++#define ISL29023_RANGE_64K 0x3
++
++#endif
+diff -Nur linux-3.14.36/include/uapi/linux/Kbuild linux-openelec/include/uapi/linux/Kbuild
+--- linux-3.14.36/include/uapi/linux/Kbuild 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/include/uapi/linux/Kbuild 2015-05-06 12:05:45.000000000 -0500
+@@ -226,6 +226,7 @@
+ header-y += kvm_para.h
+ endif
+
++header-y += ipu.h
+ header-y += l2tp.h
+ header-y += libc-compat.h
+ header-y += limits.h
+@@ -253,6 +254,9 @@
+ header-y += msdos_fs.h
+ header-y += msg.h
+ header-y += mtio.h
++header-y += mxcfb.h
++header-y += mxc_asrc.h
++header-y += mxc_v4l2.h
+ header-y += n_r3964.h
+ header-y += nbd.h
+ header-y += ncp.h
+@@ -318,6 +322,8 @@
+ header-y += prctl.h
+ header-y += ptp_clock.h
+ header-y += ptrace.h
++header-y += pxp_dma.h
++header-y += pxp_device.h
+ header-y += qnx4_fs.h
+ header-y += qnxtypes.h
+ header-y += quota.h
+diff -Nur linux-3.14.36/include/uapi/linux/mxc_asrc.h linux-openelec/include/uapi/linux/mxc_asrc.h
+--- linux-3.14.36/include/uapi/linux/mxc_asrc.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/include/uapi/linux/mxc_asrc.h 2015-05-06 12:05:45.000000000 -0500
+@@ -0,0 +1,143 @@
++/*
++ * Copyright 2008-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ *
++ * @file mxc_asrc.h
++ *
++ * @brief i.MX Asynchronous Sample Rate Converter
++ *
++ * @ingroup Audio
++ */
++
++#ifndef __MXC_ASRC_UAPI_H__
++#define __MXC_ASRC_UAPI_H__
++
++#define ASRC_IOC_MAGIC 'C'
++
++#define ASRC_REQ_PAIR _IOWR(ASRC_IOC_MAGIC, 0, struct asrc_req)
++#define ASRC_CONFIG_PAIR _IOWR(ASRC_IOC_MAGIC, 1, struct asrc_config)
++#define ASRC_RELEASE_PAIR _IOW(ASRC_IOC_MAGIC, 2, enum asrc_pair_index)
++#define ASRC_CONVERT _IOW(ASRC_IOC_MAGIC, 3, struct asrc_convert_buffer)
++#define ASRC_START_CONV _IOW(ASRC_IOC_MAGIC, 4, enum asrc_pair_index)
++#define ASRC_STOP_CONV _IOW(ASRC_IOC_MAGIC, 5, enum asrc_pair_index)
++#define ASRC_STATUS _IOW(ASRC_IOC_MAGIC, 6, struct asrc_status_flags)
++#define ASRC_FLUSH _IOW(ASRC_IOC_MAGIC, 7, enum asrc_pair_index)
++
++enum asrc_pair_index {
++ ASRC_UNVALID_PAIR = -1,
++ ASRC_PAIR_A = 0,
++ ASRC_PAIR_B = 1,
++ ASRC_PAIR_C = 2,
++};
++
++#define ASRC_PAIR_MAX_NUM (ASRC_PAIR_C + 1)
++
++enum asrc_inclk {
++ INCLK_NONE = 0x03,
++ INCLK_ESAI_RX = 0x00,
++ INCLK_SSI1_RX = 0x01,
++ INCLK_SSI2_RX = 0x02,
++ INCLK_SSI3_RX = 0x07,
++ INCLK_SPDIF_RX = 0x04,
++ INCLK_MLB_CLK = 0x05,
++ INCLK_PAD = 0x06,
++ INCLK_ESAI_TX = 0x08,
++ INCLK_SSI1_TX = 0x09,
++ INCLK_SSI2_TX = 0x0a,
++ INCLK_SSI3_TX = 0x0b,
++ INCLK_SPDIF_TX = 0x0c,
++ INCLK_ASRCK1_CLK = 0x0f,
++};
++
++enum asrc_outclk {
++ OUTCLK_NONE = 0x03,
++ OUTCLK_ESAI_TX = 0x00,
++ OUTCLK_SSI1_TX = 0x01,
++ OUTCLK_SSI2_TX = 0x02,
++ OUTCLK_SSI3_TX = 0x07,
++ OUTCLK_SPDIF_TX = 0x04,
++ OUTCLK_MLB_CLK = 0x05,
++ OUTCLK_PAD = 0x06,
++ OUTCLK_ESAI_RX = 0x08,
++ OUTCLK_SSI1_RX = 0x09,
++ OUTCLK_SSI2_RX = 0x0a,
++ OUTCLK_SSI3_RX = 0x0b,
++ OUTCLK_SPDIF_RX = 0x0c,
++ OUTCLK_ASRCK1_CLK = 0x0f,
++};
++
++enum asrc_word_width {
++ ASRC_WIDTH_24_BIT = 0,
++ ASRC_WIDTH_16_BIT = 1,
++ ASRC_WIDTH_8_BIT = 2,
++};
++
++struct asrc_config {
++ enum asrc_pair_index pair;
++ unsigned int channel_num;
++ unsigned int buffer_num;
++ unsigned int dma_buffer_size;
++ unsigned int input_sample_rate;
++ unsigned int output_sample_rate;
++ enum asrc_word_width input_word_width;
++ enum asrc_word_width output_word_width;
++ enum asrc_inclk inclk;
++ enum asrc_outclk outclk;
++};
++
++struct asrc_pair {
++ unsigned int start_channel;
++ unsigned int chn_num;
++ unsigned int chn_max;
++ unsigned int active;
++ unsigned int overload_error;
++};
++
++struct asrc_req {
++ unsigned int chn_num;
++ enum asrc_pair_index index;
++};
++
++struct asrc_querybuf {
++ unsigned int buffer_index;
++ unsigned int input_length;
++ unsigned int output_length;
++ unsigned long input_offset;
++ unsigned long output_offset;
++};
++
++struct asrc_convert_buffer {
++ void *input_buffer_vaddr;
++ void *output_buffer_vaddr;
++ unsigned int input_buffer_length;
++ unsigned int output_buffer_length;
++};
++
++struct asrc_buffer {
++ unsigned int index;
++ unsigned int length;
++ unsigned int output_last_length;
++ int buf_valid;
++};
++
++struct asrc_status_flags {
++ enum asrc_pair_index index;
++ unsigned int overload_error;
++};
++
++#define ASRC_BUF_NA -35 /* ASRC DQ's buffer is NOT available */
++#define ASRC_BUF_AV 35 /* ASRC DQ's buffer is available */
++enum asrc_error_status {
++ ASRC_TASK_Q_OVERLOAD = 0x01,
++ ASRC_OUTPUT_TASK_OVERLOAD = 0x02,
++ ASRC_INPUT_TASK_OVERLOAD = 0x04,
++ ASRC_OUTPUT_BUFFER_OVERFLOW = 0x08,
++ ASRC_INPUT_BUFFER_UNDERRUN = 0x10,
++};
++#endif/* __MXC_ASRC_UAPI_H__ */
+diff -Nur linux-3.14.36/include/uapi/linux/mxcfb.h linux-openelec/include/uapi/linux/mxcfb.h
+--- linux-3.14.36/include/uapi/linux/mxcfb.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/include/uapi/linux/mxcfb.h 2015-05-06 12:05:45.000000000 -0500
+@@ -0,0 +1,174 @@
++/*
++ * Copyright (C) 2013 Freescale Semiconductor, Inc. All Rights Reserved
++ */
++
++/*
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
++ */
++
++/*
++ * @file uapi/linux/mxcfb.h
++ *
++ * @brief Global header file for the MXC frame buffer
++ *
++ * @ingroup Framebuffer
++ */
++#ifndef __ASM_ARCH_MXCFB_H__
++#define __ASM_ARCH_MXCFB_H__
++
++#include <linux/fb.h>
++
++#define FB_SYNC_OE_LOW_ACT 0x80000000
++#define FB_SYNC_CLK_LAT_FALL 0x40000000
++#define FB_SYNC_DATA_INVERT 0x20000000
++#define FB_SYNC_CLK_IDLE_EN 0x10000000
++#define FB_SYNC_SHARP_MODE 0x08000000
++#define FB_SYNC_SWAP_RGB 0x04000000
++#define FB_ACCEL_TRIPLE_FLAG 0x00000000
++#define FB_ACCEL_DOUBLE_FLAG 0x00000001
++
++struct mxcfb_gbl_alpha {
++ int enable;
++ int alpha;
++};
++
++struct mxcfb_loc_alpha {
++ int enable;
++ int alpha_in_pixel;
++ unsigned long alpha_phy_addr0;
++ unsigned long alpha_phy_addr1;
++};
++
++struct mxcfb_color_key {
++ int enable;
++ __u32 color_key;
++};
++
++struct mxcfb_pos {
++ __u16 x;
++ __u16 y;
++};
++
++struct mxcfb_gamma {
++ int enable;
++ int constk[16];
++ int slopek[16];
++};
++
++struct mxcfb_rect {
++ __u32 top;
++ __u32 left;
++ __u32 width;
++ __u32 height;
++};
++
++#define GRAYSCALE_8BIT 0x1
++#define GRAYSCALE_8BIT_INVERTED 0x2
++#define GRAYSCALE_4BIT 0x3
++#define GRAYSCALE_4BIT_INVERTED 0x4
++
++#define AUTO_UPDATE_MODE_REGION_MODE 0
++#define AUTO_UPDATE_MODE_AUTOMATIC_MODE 1
++
++#define UPDATE_SCHEME_SNAPSHOT 0
++#define UPDATE_SCHEME_QUEUE 1
++#define UPDATE_SCHEME_QUEUE_AND_MERGE 2
++
++#define UPDATE_MODE_PARTIAL 0x0
++#define UPDATE_MODE_FULL 0x1
++
++#define WAVEFORM_MODE_AUTO 257
++
++#define TEMP_USE_AMBIENT 0x1000
++
++#define EPDC_FLAG_ENABLE_INVERSION 0x01
++#define EPDC_FLAG_FORCE_MONOCHROME 0x02
++#define EPDC_FLAG_USE_CMAP 0x04
++#define EPDC_FLAG_USE_ALT_BUFFER 0x100
++#define EPDC_FLAG_TEST_COLLISION 0x200
++#define EPDC_FLAG_GROUP_UPDATE 0x400
++#define EPDC_FLAG_USE_DITHERING_Y1 0x2000
++#define EPDC_FLAG_USE_DITHERING_Y4 0x4000
++
++#define FB_POWERDOWN_DISABLE -1
++
++struct mxcfb_alt_buffer_data {
++ __u32 phys_addr;
++ __u32 width; /* width of entire buffer */
++ __u32 height; /* height of entire buffer */
++ struct mxcfb_rect alt_update_region; /* region within buffer to update */
++};
++
++struct mxcfb_update_data {
++ struct mxcfb_rect update_region;
++ __u32 waveform_mode;
++ __u32 update_mode;
++ __u32 update_marker;
++ int temp;
++ unsigned int flags;
++ struct mxcfb_alt_buffer_data alt_buffer_data;
++};
++
++struct mxcfb_update_marker_data {
++ __u32 update_marker;
++ __u32 collision_test;
++};
++
++/*
++ * Structure used to define waveform modes for driver
++ * Needed for driver to perform auto-waveform selection
++ */
++struct mxcfb_waveform_modes {
++ int mode_init;
++ int mode_du;
++ int mode_gc4;
++ int mode_gc8;
++ int mode_gc16;
++ int mode_gc32;
++};
++
++/*
++ * Structure used to define a 5*3 matrix of parameters for
++ * setting IPU DP CSC module related to this framebuffer.
++ */
++struct mxcfb_csc_matrix {
++ int param[5][3];
++};
++
++#define MXCFB_WAIT_FOR_VSYNC _IOW('F', 0x20, u_int32_t)
++#define MXCFB_SET_GBL_ALPHA _IOW('F', 0x21, struct mxcfb_gbl_alpha)
++#define MXCFB_SET_CLR_KEY _IOW('F', 0x22, struct mxcfb_color_key)
++#define MXCFB_SET_OVERLAY_POS _IOWR('F', 0x24, struct mxcfb_pos)
++#define MXCFB_GET_FB_IPU_CHAN _IOR('F', 0x25, u_int32_t)
++#define MXCFB_SET_LOC_ALPHA _IOWR('F', 0x26, struct mxcfb_loc_alpha)
++#define MXCFB_SET_LOC_ALP_BUF _IOW('F', 0x27, unsigned long)
++#define MXCFB_SET_GAMMA _IOW('F', 0x28, struct mxcfb_gamma)
++#define MXCFB_GET_FB_IPU_DI _IOR('F', 0x29, u_int32_t)
++#define MXCFB_GET_DIFMT _IOR('F', 0x2A, u_int32_t)
++#define MXCFB_GET_FB_BLANK _IOR('F', 0x2B, u_int32_t)
++#define MXCFB_SET_DIFMT _IOW('F', 0x2C, u_int32_t)
++#define MXCFB_CSC_UPDATE _IOW('F', 0x2D, struct mxcfb_csc_matrix)
++
++/* IOCTLs for E-ink panel updates */
++#define MXCFB_SET_WAVEFORM_MODES _IOW('F', 0x2B, struct mxcfb_waveform_modes)
++#define MXCFB_SET_TEMPERATURE _IOW('F', 0x2C, int32_t)
++#define MXCFB_SET_AUTO_UPDATE_MODE _IOW('F', 0x2D, __u32)
++#define MXCFB_SEND_UPDATE _IOW('F', 0x2E, struct mxcfb_update_data)
++#define MXCFB_WAIT_FOR_UPDATE_COMPLETE _IOWR('F', 0x2F, struct mxcfb_update_marker_data)
++#define MXCFB_SET_PWRDOWN_DELAY _IOW('F', 0x30, int32_t)
++#define MXCFB_GET_PWRDOWN_DELAY _IOR('F', 0x31, int32_t)
++#define MXCFB_SET_UPDATE_SCHEME _IOW('F', 0x32, __u32)
++#define MXCFB_GET_WORK_BUFFER _IOWR('F', 0x34, unsigned long)
++#endif
+diff -Nur linux-3.14.36/include/uapi/linux/mxc_mlb.h linux-openelec/include/uapi/linux/mxc_mlb.h
+--- linux-3.14.36/include/uapi/linux/mxc_mlb.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/include/uapi/linux/mxc_mlb.h 2015-05-06 12:05:45.000000000 -0500
+@@ -0,0 +1,55 @@
++/*
++ * mxc_mlb.h
++ *
++ * Copyright 2008-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++#ifndef _MXC_MLB_UAPI_H
++#define _MXC_MLB_UAPI_H
++
++/* define IOCTL command */
++#define MLB_DBG_RUNTIME _IO('S', 0x09)
++#define MLB_SET_FPS _IOW('S', 0x10, unsigned int)
++#define MLB_GET_VER _IOR('S', 0x11, unsigned long)
++#define MLB_SET_DEVADDR _IOR('S', 0x12, unsigned char)
++
++/*!
++ * set channel address for each logical channel
++ * the MSB 16bits is for tx channel, the left LSB is for rx channel
++ */
++#define MLB_CHAN_SETADDR _IOW('S', 0x13, unsigned int)
++#define MLB_CHAN_STARTUP _IO('S', 0x14)
++#define MLB_CHAN_SHUTDOWN _IO('S', 0x15)
++#define MLB_CHAN_GETEVENT _IOR('S', 0x16, unsigned long)
++
++#define MLB_SET_ISOC_BLKSIZE_188 _IO('S', 0x17)
++#define MLB_SET_ISOC_BLKSIZE_196 _IO('S', 0x18)
++#define MLB_SET_SYNC_QUAD _IOW('S', 0x19, unsigned int)
++#define MLB_IRQ_ENABLE _IO('S', 0x20)
++#define MLB_IRQ_DISABLE _IO('S', 0x21)
++
++/*!
++ * MLB event define
++ */
++enum {
++ MLB_EVT_TX_PROTO_ERR_CUR = 1 << 0,
++ MLB_EVT_TX_BRK_DETECT_CUR = 1 << 1,
++ MLB_EVT_TX_PROTO_ERR_PREV = 1 << 8,
++ MLB_EVT_TX_BRK_DETECT_PREV = 1 << 9,
++ MLB_EVT_RX_PROTO_ERR_CUR = 1 << 16,
++ MLB_EVT_RX_BRK_DETECT_CUR = 1 << 17,
++ MLB_EVT_RX_PROTO_ERR_PREV = 1 << 24,
++ MLB_EVT_RX_BRK_DETECT_PREV = 1 << 25,
++};
++
++
++#endif /* _MXC_MLB_H */
+diff -Nur linux-3.14.36/include/uapi/linux/mxc_v4l2.h linux-openelec/include/uapi/linux/mxc_v4l2.h
+--- linux-3.14.36/include/uapi/linux/mxc_v4l2.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/include/uapi/linux/mxc_v4l2.h 2015-05-06 12:05:45.000000000 -0500
+@@ -0,0 +1,56 @@
++/*
++ * Copyright (C) 2013 Freescale Semiconductor, Inc. All Rights Reserved
++ */
++
++/*
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
++ */
++
++/*!
++ * @file uapi/linux/mxc_v4l2.h
++ *
++ * @brief MXC V4L2 private header file
++ *
++ * @ingroup MXC V4L2
++ */
++
++#ifndef __ASM_ARCH_MXC_V4L2_H__
++#define __ASM_ARCH_MXC_V4L2_H__
++
++/*
++ * For IPUv1 and IPUv3, V4L2_CID_MXC_ROT means encoder ioctl ID.
++ * And V4L2_CID_MXC_VF_ROT is viewfinder ioctl ID only for IPUv1 and IPUv3.
++ */
++#define V4L2_CID_MXC_ROT (V4L2_CID_PRIVATE_BASE + 0)
++#define V4L2_CID_MXC_FLASH (V4L2_CID_PRIVATE_BASE + 1)
++#define V4L2_CID_MXC_VF_ROT (V4L2_CID_PRIVATE_BASE + 2)
++#define V4L2_CID_MXC_MOTION (V4L2_CID_PRIVATE_BASE + 3)
++#define V4L2_CID_MXC_SWITCH_CAM (V4L2_CID_PRIVATE_BASE + 6)
++
++#define V4L2_MXC_ROTATE_NONE 0
++#define V4L2_MXC_ROTATE_VERT_FLIP 1
++#define V4L2_MXC_ROTATE_HORIZ_FLIP 2
++#define V4L2_MXC_ROTATE_180 3
++#define V4L2_MXC_ROTATE_90_RIGHT 4
++#define V4L2_MXC_ROTATE_90_RIGHT_VFLIP 5
++#define V4L2_MXC_ROTATE_90_RIGHT_HFLIP 6
++#define V4L2_MXC_ROTATE_90_LEFT 7
++
++struct v4l2_mxc_offset {
++ uint32_t u_offset;
++ uint32_t v_offset;
++};
++
++#endif
+diff -Nur linux-3.14.36/include/uapi/linux/ptp_clock.h linux-openelec/include/uapi/linux/ptp_clock.h
+--- linux-3.14.36/include/uapi/linux/ptp_clock.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/include/uapi/linux/ptp_clock.h 2015-05-06 12:05:45.000000000 -0500
+@@ -50,7 +50,8 @@
+ int n_ext_ts; /* Number of external time stamp channels. */
+ int n_per_out; /* Number of programmable periodic signals. */
+ int pps; /* Whether the clock supports a PPS callback. */
+- int rsv[15]; /* Reserved for future use. */
++ int n_pins; /* Number of input/output pins. */
++ int rsv[14]; /* Reserved for future use. */
+ };
+
+ struct ptp_extts_request {
+@@ -80,6 +81,40 @@
+ struct ptp_clock_time ts[2 * PTP_MAX_SAMPLES + 1];
+ };
+
++enum ptp_pin_function {
++ PTP_PF_NONE,
++ PTP_PF_EXTTS,
++ PTP_PF_PEROUT,
++ PTP_PF_PHYSYNC,
++};
++
++struct ptp_pin_desc {
++ /*
++ * Hardware specific human readable pin name. This field is
++ * set by the kernel during the PTP_PIN_GETFUNC ioctl and is
++ * ignored for the PTP_PIN_SETFUNC ioctl.
++ */
++ char name[64];
++ /*
++ * Pin index in the range of zero to ptp_clock_caps.n_pins - 1.
++ */
++ unsigned int index;
++ /*
++ * Which of the PTP_PF_xxx functions to use on this pin.
++ */
++ unsigned int func;
++ /*
++ * The specific channel to use for this function.
++ * This corresponds to the 'index' field of the
++ * PTP_EXTTS_REQUEST and PTP_PEROUT_REQUEST ioctls.
++ */
++ unsigned int chan;
++ /*
++ * Reserved for future use.
++ */
++ unsigned int rsv[5];
++};
++
+ #define PTP_CLK_MAGIC '='
+
+ #define PTP_CLOCK_GETCAPS _IOR(PTP_CLK_MAGIC, 1, struct ptp_clock_caps)
+@@ -87,6 +122,8 @@
+ #define PTP_PEROUT_REQUEST _IOW(PTP_CLK_MAGIC, 3, struct ptp_perout_request)
+ #define PTP_ENABLE_PPS _IOW(PTP_CLK_MAGIC, 4, int)
+ #define PTP_SYS_OFFSET _IOW(PTP_CLK_MAGIC, 5, struct ptp_sys_offset)
++#define PTP_PIN_GETFUNC _IOWR(PTP_CLK_MAGIC, 6, struct ptp_pin_desc)
++#define PTP_PIN_SETFUNC _IOW(PTP_CLK_MAGIC, 7, struct ptp_pin_desc)
+
+ struct ptp_extts_event {
+ struct ptp_clock_time t; /* Time event occured. */
+diff -Nur linux-3.14.36/include/uapi/linux/ptrace.h linux-openelec/include/uapi/linux/ptrace.h
+--- linux-3.14.36/include/uapi/linux/ptrace.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/include/uapi/linux/ptrace.h 2015-07-24 18:03:30.348842002 -0500
+@@ -55,11 +55,13 @@
+
+ #define PTRACE_PEEKSIGINFO 0x4209
+
++#ifdef __KERNEL__
+ struct ptrace_peeksiginfo_args {
+ __u64 off; /* from which siginfo to start */
+ __u32 flags;
+ __s32 nr; /* how may siginfos to take */
+ };
++#endif /* __KERNEL__ */
+
+ #define PTRACE_GETSIGMASK 0x420a
+ #define PTRACE_SETSIGMASK 0x420b
+diff -Nur linux-3.14.36/include/uapi/linux/ptrace.h.orig linux-openelec/include/uapi/linux/ptrace.h.orig
+--- linux-3.14.36/include/uapi/linux/ptrace.h.orig 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/include/uapi/linux/ptrace.h.orig 2015-05-06 12:05:45.000000000 -0500
+@@ -0,0 +1,99 @@
++#ifndef _UAPI_LINUX_PTRACE_H
++#define _UAPI_LINUX_PTRACE_H
++/* ptrace.h */
++/* structs and defines to help the user use the ptrace system call. */
++
++/* has the defines to get at the registers. */
++
++#include <linux/types.h>
++
++#define PTRACE_TRACEME 0
++#define PTRACE_PEEKTEXT 1
++#define PTRACE_PEEKDATA 2
++#define PTRACE_PEEKUSR 3
++#define PTRACE_POKETEXT 4
++#define PTRACE_POKEDATA 5
++#define PTRACE_POKEUSR 6
++#define PTRACE_CONT 7
++#define PTRACE_KILL 8
++#define PTRACE_SINGLESTEP 9
++
++#define PTRACE_ATTACH 16
++#define PTRACE_DETACH 17
++
++#define PTRACE_SYSCALL 24
++
++/* 0x4200-0x4300 are reserved for architecture-independent additions. */
++#define PTRACE_SETOPTIONS 0x4200
++#define PTRACE_GETEVENTMSG 0x4201
++#define PTRACE_GETSIGINFO 0x4202
++#define PTRACE_SETSIGINFO 0x4203
++
++/*
++ * Generic ptrace interface that exports the architecture specific regsets
++ * using the corresponding NT_* types (which are also used in the core dump).
++ * Please note that the NT_PRSTATUS note type in a core dump contains a full
++ * 'struct elf_prstatus'. But the user_regset for NT_PRSTATUS contains just the
++ * elf_gregset_t that is the pr_reg field of 'struct elf_prstatus'. For all the
++ * other user_regset flavors, the user_regset layout and the ELF core dump note
++ * payload are exactly the same layout.
++ *
++ * This interface usage is as follows:
++ * struct iovec iov = { buf, len};
++ *
++ * ret = ptrace(PTRACE_GETREGSET/PTRACE_SETREGSET, pid, NT_XXX_TYPE, &iov);
++ *
++ * On the successful completion, iov.len will be updated by the kernel,
++ * specifying how much the kernel has written/read to/from the user's iov.buf.
++ */
++#define PTRACE_GETREGSET 0x4204
++#define PTRACE_SETREGSET 0x4205
++
++#define PTRACE_SEIZE 0x4206
++#define PTRACE_INTERRUPT 0x4207
++#define PTRACE_LISTEN 0x4208
++
++#define PTRACE_PEEKSIGINFO 0x4209
++
++struct ptrace_peeksiginfo_args {
++ __u64 off; /* from which siginfo to start */
++ __u32 flags;
++ __s32 nr; /* how may siginfos to take */
++};
++
++#define PTRACE_GETSIGMASK 0x420a
++#define PTRACE_SETSIGMASK 0x420b
++
++/* Read signals from a shared (process wide) queue */
++#define PTRACE_PEEKSIGINFO_SHARED (1 << 0)
++
++/* Wait extended result codes for the above trace options. */
++#define PTRACE_EVENT_FORK 1
++#define PTRACE_EVENT_VFORK 2
++#define PTRACE_EVENT_CLONE 3
++#define PTRACE_EVENT_EXEC 4
++#define PTRACE_EVENT_VFORK_DONE 5
++#define PTRACE_EVENT_EXIT 6
++#define PTRACE_EVENT_SECCOMP 7
++/* Extended result codes which enabled by means other than options. */
++#define PTRACE_EVENT_STOP 128
++
++/* Options set using PTRACE_SETOPTIONS or using PTRACE_SEIZE @data param */
++#define PTRACE_O_TRACESYSGOOD 1
++#define PTRACE_O_TRACEFORK (1 << PTRACE_EVENT_FORK)
++#define PTRACE_O_TRACEVFORK (1 << PTRACE_EVENT_VFORK)
++#define PTRACE_O_TRACECLONE (1 << PTRACE_EVENT_CLONE)
++#define PTRACE_O_TRACEEXEC (1 << PTRACE_EVENT_EXEC)
++#define PTRACE_O_TRACEVFORKDONE (1 << PTRACE_EVENT_VFORK_DONE)
++#define PTRACE_O_TRACEEXIT (1 << PTRACE_EVENT_EXIT)
++#define PTRACE_O_TRACESECCOMP (1 << PTRACE_EVENT_SECCOMP)
++
++/* eventless options */
++#define PTRACE_O_EXITKILL (1 << 20)
++
++#define PTRACE_O_MASK (0x000000ff | PTRACE_O_EXITKILL)
++
++#include <asm/ptrace.h>
++
++
++#endif /* _UAPI_LINUX_PTRACE_H */
+diff -Nur linux-3.14.36/include/uapi/linux/pxp_device.h linux-openelec/include/uapi/linux/pxp_device.h
+--- linux-3.14.36/include/uapi/linux/pxp_device.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/include/uapi/linux/pxp_device.h 2015-05-06 12:05:44.000000000 -0500
+@@ -0,0 +1,63 @@
++/*
++ * Copyright (C) 2013-2014 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ *
++ */
++#ifndef _UAPI_PXP_DEVICE
++#define _UAPI_PXP_DEVICE
++
++#include <linux/pxp_dma.h>
++
++struct pxp_chan_handle {
++ unsigned int handle;
++ int hist_status;
++};
++
++struct pxp_mem_desc {
++ unsigned int handle;
++ unsigned int size;
++ dma_addr_t phys_addr;
++ unsigned int virt_uaddr; /* virtual user space address */
++ unsigned int mtype;
++};
++
++struct pxp_mem_flush {
++ unsigned int handle;
++ unsigned int type;
++};
++
++#define PXP_IOC_MAGIC 'P'
++
++#define PXP_IOC_GET_CHAN _IOR(PXP_IOC_MAGIC, 0, struct pxp_mem_desc)
++#define PXP_IOC_PUT_CHAN _IOW(PXP_IOC_MAGIC, 1, struct pxp_mem_desc)
++#define PXP_IOC_CONFIG_CHAN _IOW(PXP_IOC_MAGIC, 2, struct pxp_mem_desc)
++#define PXP_IOC_START_CHAN _IOW(PXP_IOC_MAGIC, 3, struct pxp_mem_desc)
++#define PXP_IOC_GET_PHYMEM _IOWR(PXP_IOC_MAGIC, 4, struct pxp_mem_desc)
++#define PXP_IOC_PUT_PHYMEM _IOW(PXP_IOC_MAGIC, 5, struct pxp_mem_desc)
++#define PXP_IOC_WAIT4CMPLT _IOWR(PXP_IOC_MAGIC, 6, struct pxp_mem_desc)
++#define PXP_IOC_FLUSH_PHYMEM _IOR(PXP_IOC_MAGIC, 7, struct pxp_mem_flush)
++
++/* Memory types supported*/
++#define MEMORY_TYPE_UNCACHED 0x0
++#define MEMORY_TYPE_WC 0x1
++#define MEMORY_TYPE_CACHED 0x2
++
++/* Cache flush operations */
++#define CACHE_CLEAN 0x1
++#define CACHE_INVALIDATE 0x2
++#define CACHE_FLUSH 0x4
++
++#endif
+diff -Nur linux-3.14.36/include/uapi/linux/pxp_dma.h linux-openelec/include/uapi/linux/pxp_dma.h
+--- linux-3.14.36/include/uapi/linux/pxp_dma.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/include/uapi/linux/pxp_dma.h 2015-05-06 12:05:45.000000000 -0500
+@@ -0,0 +1,173 @@
++/*
++ * Copyright (C) 2013-2014 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ *
++ */
++#ifndef _UAPI_PXP_DMA
++#define _UAPI_PXP_DMA
++
++#include <linux/posix_types.h>
++#include <linux/types.h>
++
++#ifndef __KERNEL__
++typedef unsigned long dma_addr_t;
++typedef unsigned char bool;
++#endif
++
++/* PXP Pixel format definitions */
++/* Four-character-code (FOURCC) */
++#define fourcc(a, b, c, d)\
++ (((__u32)(a)<<0)|((__u32)(b)<<8)|((__u32)(c)<<16)|((__u32)(d)<<24))
++
++/*!
++ * @name PXP Pixel Formats
++ *
++ * Pixel formats are defined with ASCII FOURCC code. The pixel format codes are
++ * the same used by V4L2 API.
++ */
++
++/*! @} */
++/*! @name RGB Formats */
++/*! @{ */
++#define PXP_PIX_FMT_RGB332 fourcc('R', 'G', 'B', '1') /*!< 8 RGB-3-3-2 */
++#define PXP_PIX_FMT_RGB555 fourcc('R', 'G', 'B', 'O') /*!< 16 RGB-5-5-5 */
++#define PXP_PIX_FMT_RGB565 fourcc('R', 'G', 'B', 'P') /*!< 1 6 RGB-5-6-5 */
++#define PXP_PIX_FMT_RGB666 fourcc('R', 'G', 'B', '6') /*!< 18 RGB-6-6-6 */
++#define PXP_PIX_FMT_BGR666 fourcc('B', 'G', 'R', '6') /*!< 18 BGR-6-6-6 */
++#define PXP_PIX_FMT_BGR24 fourcc('B', 'G', 'R', '3') /*!< 24 BGR-8-8-8 */
++#define PXP_PIX_FMT_RGB24 fourcc('R', 'G', 'B', '3') /*!< 24 RGB-8-8-8 */
++#define PXP_PIX_FMT_BGR32 fourcc('B', 'G', 'R', '4') /*!< 32 BGR-8-8-8-8 */
++#define PXP_PIX_FMT_BGRA32 fourcc('B', 'G', 'R', 'A') /*!< 32 BGR-8-8-8-8 */
++#define PXP_PIX_FMT_RGB32 fourcc('R', 'G', 'B', '4') /*!< 32 RGB-8-8-8-8 */
++#define PXP_PIX_FMT_RGBA32 fourcc('R', 'G', 'B', 'A') /*!< 32 RGB-8-8-8-8 */
++#define PXP_PIX_FMT_ABGR32 fourcc('A', 'B', 'G', 'R') /*!< 32 ABGR-8-8-8-8 */
++/*! @} */
++/*! @name YUV Interleaved Formats */
++/*! @{ */
++#define PXP_PIX_FMT_YUYV fourcc('Y', 'U', 'Y', 'V') /*!< 16 YUV 4:2:2 */
++#define PXP_PIX_FMT_UYVY fourcc('U', 'Y', 'V', 'Y') /*!< 16 YUV 4:2:2 */
++#define PXP_PIX_FMT_VYUY fourcc('V', 'Y', 'U', 'Y') /*!< 16 YVU 4:2:2 */
++#define PXP_PIX_FMT_YVYU fourcc('Y', 'V', 'Y', 'U') /*!< 16 YVU 4:2:2 */
++#define PXP_PIX_FMT_Y41P fourcc('Y', '4', '1', 'P') /*!< 12 YUV 4:1:1 */
++#define PXP_PIX_FMT_YUV444 fourcc('Y', '4', '4', '4') /*!< 24 YUV 4:4:4 */
++/* two planes -- one Y, one Cb + Cr interleaved */
++#define PXP_PIX_FMT_NV12 fourcc('N', 'V', '1', '2') /* 12 Y/CbCr 4:2:0 */
++#define PXP_PIX_FMT_NV21 fourcc('N', 'V', '2', '1') /* 12 Y/CbCr 4:2:0 */
++#define PXP_PIX_FMT_NV16 fourcc('N', 'V', '1', '6') /* 12 Y/CbCr 4:2:2 */
++#define PXP_PIX_FMT_NV61 fourcc('N', 'V', '6', '1') /* 12 Y/CbCr 4:2:2 */
++/*! @} */
++/*! @name YUV Planar Formats */
++/*! @{ */
++#define PXP_PIX_FMT_GREY fourcc('G', 'R', 'E', 'Y') /*!< 8 Greyscale */
++#define PXP_PIX_FMT_GY04 fourcc('G', 'Y', '0', '4') /*!< 4 Greyscale */
++#define PXP_PIX_FMT_YVU410P fourcc('Y', 'V', 'U', '9') /*!< 9 YVU 4:1:0 */
++#define PXP_PIX_FMT_YUV410P fourcc('Y', 'U', 'V', '9') /*!< 9 YUV 4:1:0 */
++#define PXP_PIX_FMT_YVU420P fourcc('Y', 'V', '1', '2') /*!< 12 YVU 4:2:0 */
++#define PXP_PIX_FMT_YUV420P fourcc('I', '4', '2', '0') /*!< 12 YUV 4:2:0 */
++#define PXP_PIX_FMT_YUV420P2 fourcc('Y', 'U', '1', '2') /*!< 12 YUV 4:2:0 */
++#define PXP_PIX_FMT_YVU422P fourcc('Y', 'V', '1', '6') /*!< 16 YVU 4:2:2 */
++#define PXP_PIX_FMT_YUV422P fourcc('4', '2', '2', 'P') /*!< 16 YUV 4:2:2 */
++/*! @} */
++
++#define PXP_LUT_NONE 0x0
++#define PXP_LUT_INVERT 0x1
++#define PXP_LUT_BLACK_WHITE 0x2
++#define PXP_LUT_USE_CMAP 0x4
++
++#define NR_PXP_VIRT_CHANNEL 16
++
++/* Order significant! */
++enum pxp_channel_status {
++ PXP_CHANNEL_FREE,
++ PXP_CHANNEL_INITIALIZED,
++ PXP_CHANNEL_READY,
++};
++
++struct rect {
++ int top; /* Upper left coordinate of rectangle */
++ int left;
++ int width;
++ int height;
++};
++
++struct pxp_layer_param {
++ unsigned short width;
++ unsigned short height;
++ unsigned short stride; /* aka pitch */
++ unsigned int pixel_fmt;
++
++ /* layers combining parameters
++ * (these are ignored for S0 and output
++ * layers, and only apply for OL layer)
++ */
++ bool combine_enable;
++ unsigned int color_key_enable;
++ unsigned int color_key;
++ bool global_alpha_enable;
++ /* global alpha is either override or multiply */
++ bool global_override;
++ unsigned char global_alpha;
++ bool alpha_invert;
++ bool local_alpha_enable;
++
++ dma_addr_t paddr;
++};
++
++struct pxp_proc_data {
++ /* S0 Transformation Info */
++ int scaling;
++ int hflip;
++ int vflip;
++ int rotate;
++ int rot_pos;
++ int yuv;
++
++ /* Source rectangle (srect) defines the sub-rectangle
++ * within S0 to undergo processing.
++ */
++ struct rect srect;
++ /* Dest rect (drect) defines how to position the processed
++ * source rectangle (after resizing) within the output frame,
++ * whose dimensions are defined in pxp->pxp_conf_state.out_param
++ */
++ struct rect drect;
++
++ /* Current S0 configuration */
++ unsigned int bgcolor;
++
++ /* Output overlay support */
++ int overlay_state;
++
++ /* LUT transformation on Y data */
++ int lut_transform;
++ unsigned char *lut_map; /* 256 entries */
++ bool lut_map_updated; /* Map recently changed */
++ bool combine_enable;
++};
++
++struct pxp_config_data {
++ struct pxp_layer_param s0_param;
++ struct pxp_layer_param ol_param[8];
++ struct pxp_layer_param out_param;
++ struct pxp_proc_data proc_data;
++ int layer_nr;
++
++ /* Users don't touch */
++ int handle;
++};
++
++
++#endif
+diff -Nur linux-3.14.36/include/video/mxc_edid.h linux-openelec/include/video/mxc_edid.h
+--- linux-3.14.36/include/video/mxc_edid.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/include/video/mxc_edid.h 2015-05-06 12:05:44.000000000 -0500
+@@ -0,0 +1,105 @@
++/*
++ * Copyright 2009-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/*!
++ * @defgroup Framebuffer Framebuffer Driver for SDC and ADC.
++ */
++
++/*!
++ * @file mxc_edid.h
++ *
++ * @brief MXC EDID tools
++ *
++ * @ingroup Framebuffer
++ */
++
++#ifndef MXC_EDID_H
++#define MXC_EDID_H
++
++#include <linux/fb.h>
++
++#define FB_VMODE_ASPECT_4_3 0x10
++#define FB_VMODE_ASPECT_16_9 0x20
++#define FB_VMODE_ASPECT_MASK (FB_VMODE_ASPECT_4_3 | FB_VMODE_ASPECT_16_9)
++
++enum cea_audio_coding_types {
++ AUDIO_CODING_TYPE_REF_STREAM_HEADER = 0,
++ AUDIO_CODING_TYPE_LPCM = 1,
++ AUDIO_CODING_TYPE_AC3 = 2,
++ AUDIO_CODING_TYPE_MPEG1 = 3,
++ AUDIO_CODING_TYPE_MP3 = 4,
++ AUDIO_CODING_TYPE_MPEG2 = 5,
++ AUDIO_CODING_TYPE_AACLC = 6,
++ AUDIO_CODING_TYPE_DTS = 7,
++ AUDIO_CODING_TYPE_ATRAC = 8,
++ AUDIO_CODING_TYPE_SACD = 9,
++ AUDIO_CODING_TYPE_EAC3 = 10,
++ AUDIO_CODING_TYPE_DTS_HD = 11,
++ AUDIO_CODING_TYPE_MLP = 12,
++ AUDIO_CODING_TYPE_DST = 13,
++ AUDIO_CODING_TYPE_WMAPRO = 14,
++ AUDIO_CODING_TYPE_RESERVED = 15,
++};
++
++struct mxc_hdmi_3d_format {
++ unsigned char vic_order_2d;
++ unsigned char struct_3d;
++ unsigned char detail_3d;
++ unsigned char reserved;
++};
++
++struct mxc_edid_cfg {
++ bool cea_underscan;
++ bool cea_basicaudio;
++ bool cea_ycbcr444;
++ bool cea_ycbcr422;
++ bool hdmi_cap;
++
++ /*VSD*/
++ bool vsd_support_ai;
++ bool vsd_dc_48bit;
++ bool vsd_dc_36bit;
++ bool vsd_dc_30bit;
++ bool vsd_dc_y444;
++ bool vsd_dvi_dual;
++
++ bool vsd_cnc0;
++ bool vsd_cnc1;
++ bool vsd_cnc2;
++ bool vsd_cnc3;
++
++ u8 vsd_video_latency;
++ u8 vsd_audio_latency;
++ u8 vsd_I_video_latency;
++ u8 vsd_I_audio_latency;
++
++ u8 physical_address[4];
++ u8 hdmi_vic[64];
++ struct mxc_hdmi_3d_format hdmi_3d_format[64];
++ u16 hdmi_3d_mask_all;
++ u16 hdmi_3d_struct_all;
++ u32 vsd_max_tmdsclk_rate;
++
++ u8 max_channels;
++ u8 sample_sizes;
++ u8 sample_rates;
++ u8 speaker_alloc;
++};
++
++int mxc_edid_var_to_vic(struct fb_var_screeninfo *var);
++int mxc_edid_mode_to_vic(const struct fb_videomode *mode);
++int mxc_edid_read(struct i2c_adapter *adp, unsigned short addr,
++ unsigned char *edid, struct mxc_edid_cfg *cfg, struct fb_info *fbi);
++int mxc_edid_parse_ext_blk(unsigned char *edid, struct mxc_edid_cfg *cfg,
++ struct fb_monspecs *specs);
++#endif
+diff -Nur linux-3.14.36/include/video/mxc_hdmi.h linux-openelec/include/video/mxc_hdmi.h
+--- linux-3.14.36/include/video/mxc_hdmi.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/include/video/mxc_hdmi.h 2015-05-06 12:05:44.000000000 -0500
+@@ -0,0 +1,1027 @@
++/*
++ * Copyright (C) 2011-2013 Freescale Semiconductor, Inc.
++ */
++
++/*
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
++ */
++
++#ifndef __MXC_HDMI_H__
++#define __MXC_HDMI_H__
++
++/*
++ * Hdmi controller registers
++ */
++
++/* Identification Registers */
++#define HDMI_DESIGN_ID 0x0000
++#define HDMI_REVISION_ID 0x0001
++#define HDMI_PRODUCT_ID0 0x0002
++#define HDMI_PRODUCT_ID1 0x0003
++#define HDMI_CONFIG0_ID 0x0004
++#define HDMI_CONFIG1_ID 0x0005
++#define HDMI_CONFIG2_ID 0x0006
++#define HDMI_CONFIG3_ID 0x0007
++
++/* Interrupt Registers */
++#define HDMI_IH_FC_STAT0 0x0100
++#define HDMI_IH_FC_STAT1 0x0101
++#define HDMI_IH_FC_STAT2 0x0102
++#define HDMI_IH_AS_STAT0 0x0103
++#define HDMI_IH_PHY_STAT0 0x0104
++#define HDMI_IH_I2CM_STAT0 0x0105
++#define HDMI_IH_CEC_STAT0 0x0106
++#define HDMI_IH_VP_STAT0 0x0107
++#define HDMI_IH_I2CMPHY_STAT0 0x0108
++#define HDMI_IH_AHBDMAAUD_STAT0 0x0109
++
++#define HDMI_IH_MUTE_FC_STAT0 0x0180
++#define HDMI_IH_MUTE_FC_STAT1 0x0181
++#define HDMI_IH_MUTE_FC_STAT2 0x0182
++#define HDMI_IH_MUTE_AS_STAT0 0x0183
++#define HDMI_IH_MUTE_PHY_STAT0 0x0184
++#define HDMI_IH_MUTE_I2CM_STAT0 0x0185
++#define HDMI_IH_MUTE_CEC_STAT0 0x0186
++#define HDMI_IH_MUTE_VP_STAT0 0x0187
++#define HDMI_IH_MUTE_I2CMPHY_STAT0 0x0188
++#define HDMI_IH_MUTE_AHBDMAAUD_STAT0 0x0189
++#define HDMI_IH_MUTE 0x01FF
++
++/* Video Sample Registers */
++#define HDMI_TX_INVID0 0x0200
++#define HDMI_TX_INSTUFFING 0x0201
++#define HDMI_TX_GYDATA0 0x0202
++#define HDMI_TX_GYDATA1 0x0203
++#define HDMI_TX_RCRDATA0 0x0204
++#define HDMI_TX_RCRDATA1 0x0205
++#define HDMI_TX_BCBDATA0 0x0206
++#define HDMI_TX_BCBDATA1 0x0207
++
++/* Video Packetizer Registers */
++#define HDMI_VP_STATUS 0x0800
++#define HDMI_VP_PR_CD 0x0801
++#define HDMI_VP_STUFF 0x0802
++#define HDMI_VP_REMAP 0x0803
++#define HDMI_VP_CONF 0x0804
++#define HDMI_VP_STAT 0x0805
++#define HDMI_VP_INT 0x0806
++#define HDMI_VP_MASK 0x0807
++#define HDMI_VP_POL 0x0808
++
++/* Frame Composer Registers */
++#define HDMI_FC_INVIDCONF 0x1000
++#define HDMI_FC_INHACTV0 0x1001
++#define HDMI_FC_INHACTV1 0x1002
++#define HDMI_FC_INHBLANK0 0x1003
++#define HDMI_FC_INHBLANK1 0x1004
++#define HDMI_FC_INVACTV0 0x1005
++#define HDMI_FC_INVACTV1 0x1006
++#define HDMI_FC_INVBLANK 0x1007
++#define HDMI_FC_HSYNCINDELAY0 0x1008
++#define HDMI_FC_HSYNCINDELAY1 0x1009
++#define HDMI_FC_HSYNCINWIDTH0 0x100A
++#define HDMI_FC_HSYNCINWIDTH1 0x100B
++#define HDMI_FC_VSYNCINDELAY 0x100C
++#define HDMI_FC_VSYNCINWIDTH 0x100D
++#define HDMI_FC_INFREQ0 0x100E
++#define HDMI_FC_INFREQ1 0x100F
++#define HDMI_FC_INFREQ2 0x1010
++#define HDMI_FC_CTRLDUR 0x1011
++#define HDMI_FC_EXCTRLDUR 0x1012
++#define HDMI_FC_EXCTRLSPAC 0x1013
++#define HDMI_FC_CH0PREAM 0x1014
++#define HDMI_FC_CH1PREAM 0x1015
++#define HDMI_FC_CH2PREAM 0x1016
++#define HDMI_FC_AVICONF3 0x1017
++#define HDMI_FC_GCP 0x1018
++#define HDMI_FC_AVICONF0 0x1019
++#define HDMI_FC_AVICONF1 0x101A
++#define HDMI_FC_AVICONF2 0x101B
++#define HDMI_FC_AVIVID 0x101C
++#define HDMI_FC_AVIETB0 0x101D
++#define HDMI_FC_AVIETB1 0x101E
++#define HDMI_FC_AVISBB0 0x101F
++#define HDMI_FC_AVISBB1 0x1020
++#define HDMI_FC_AVIELB0 0x1021
++#define HDMI_FC_AVIELB1 0x1022
++#define HDMI_FC_AVISRB0 0x1023
++#define HDMI_FC_AVISRB1 0x1024
++#define HDMI_FC_AUDICONF0 0x1025
++#define HDMI_FC_AUDICONF1 0x1026
++#define HDMI_FC_AUDICONF2 0x1027
++#define HDMI_FC_AUDICONF3 0x1028
++#define HDMI_FC_VSDIEEEID0 0x1029
++#define HDMI_FC_VSDSIZE 0x102A
++#define HDMI_FC_VSDIEEEID1 0x1030
++#define HDMI_FC_VSDIEEEID2 0x1031
++#define HDMI_FC_VSDPAYLOAD0 0x1032
++#define HDMI_FC_VSDPAYLOAD1 0x1033
++#define HDMI_FC_VSDPAYLOAD2 0x1034
++#define HDMI_FC_VSDPAYLOAD3 0x1035
++#define HDMI_FC_VSDPAYLOAD4 0x1036
++#define HDMI_FC_VSDPAYLOAD5 0x1037
++#define HDMI_FC_VSDPAYLOAD6 0x1038
++#define HDMI_FC_VSDPAYLOAD7 0x1039
++#define HDMI_FC_VSDPAYLOAD8 0x103A
++#define HDMI_FC_VSDPAYLOAD9 0x103B
++#define HDMI_FC_VSDPAYLOAD10 0x103C
++#define HDMI_FC_VSDPAYLOAD11 0x103D
++#define HDMI_FC_VSDPAYLOAD12 0x103E
++#define HDMI_FC_VSDPAYLOAD13 0x103F
++#define HDMI_FC_VSDPAYLOAD14 0x1040
++#define HDMI_FC_VSDPAYLOAD15 0x1041
++#define HDMI_FC_VSDPAYLOAD16 0x1042
++#define HDMI_FC_VSDPAYLOAD17 0x1043
++#define HDMI_FC_VSDPAYLOAD18 0x1044
++#define HDMI_FC_VSDPAYLOAD19 0x1045
++#define HDMI_FC_VSDPAYLOAD20 0x1046
++#define HDMI_FC_VSDPAYLOAD21 0x1047
++#define HDMI_FC_VSDPAYLOAD22 0x1048
++#define HDMI_FC_VSDPAYLOAD23 0x1049
++#define HDMI_FC_SPDVENDORNAME0 0x104A
++#define HDMI_FC_SPDVENDORNAME1 0x104B
++#define HDMI_FC_SPDVENDORNAME2 0x104C
++#define HDMI_FC_SPDVENDORNAME3 0x104D
++#define HDMI_FC_SPDVENDORNAME4 0x104E
++#define HDMI_FC_SPDVENDORNAME5 0x104F
++#define HDMI_FC_SPDVENDORNAME6 0x1050
++#define HDMI_FC_SPDVENDORNAME7 0x1051
++#define HDMI_FC_SDPPRODUCTNAME0 0x1052
++#define HDMI_FC_SDPPRODUCTNAME1 0x1053
++#define HDMI_FC_SDPPRODUCTNAME2 0x1054
++#define HDMI_FC_SDPPRODUCTNAME3 0x1055
++#define HDMI_FC_SDPPRODUCTNAME4 0x1056
++#define HDMI_FC_SDPPRODUCTNAME5 0x1057
++#define HDMI_FC_SDPPRODUCTNAME6 0x1058
++#define HDMI_FC_SDPPRODUCTNAME7 0x1059
++#define HDMI_FC_SDPPRODUCTNAME8 0x105A
++#define HDMI_FC_SDPPRODUCTNAME9 0x105B
++#define HDMI_FC_SDPPRODUCTNAME10 0x105C
++#define HDMI_FC_SDPPRODUCTNAME11 0x105D
++#define HDMI_FC_SDPPRODUCTNAME12 0x105E
++#define HDMI_FC_SDPPRODUCTNAME13 0x105F
++#define HDMI_FC_SDPPRODUCTNAME14 0x1060
++#define HDMI_FC_SPDPRODUCTNAME15 0x1061
++#define HDMI_FC_SPDDEVICEINF 0x1062
++#define HDMI_FC_AUDSCONF 0x1063
++#define HDMI_FC_AUDSSTAT 0x1064
++#define HDMI_FC_DATACH0FILL 0x1070
++#define HDMI_FC_DATACH1FILL 0x1071
++#define HDMI_FC_DATACH2FILL 0x1072
++#define HDMI_FC_CTRLQHIGH 0x1073
++#define HDMI_FC_CTRLQLOW 0x1074
++#define HDMI_FC_ACP0 0x1075
++#define HDMI_FC_ACP28 0x1076
++#define HDMI_FC_ACP27 0x1077
++#define HDMI_FC_ACP26 0x1078
++#define HDMI_FC_ACP25 0x1079
++#define HDMI_FC_ACP24 0x107A
++#define HDMI_FC_ACP23 0x107B
++#define HDMI_FC_ACP22 0x107C
++#define HDMI_FC_ACP21 0x107D
++#define HDMI_FC_ACP20 0x107E
++#define HDMI_FC_ACP19 0x107F
++#define HDMI_FC_ACP18 0x1080
++#define HDMI_FC_ACP17 0x1081
++#define HDMI_FC_ACP16 0x1082
++#define HDMI_FC_ACP15 0x1083
++#define HDMI_FC_ACP14 0x1084
++#define HDMI_FC_ACP13 0x1085
++#define HDMI_FC_ACP12 0x1086
++#define HDMI_FC_ACP11 0x1087
++#define HDMI_FC_ACP10 0x1088
++#define HDMI_FC_ACP9 0x1089
++#define HDMI_FC_ACP8 0x108A
++#define HDMI_FC_ACP7 0x108B
++#define HDMI_FC_ACP6 0x108C
++#define HDMI_FC_ACP5 0x108D
++#define HDMI_FC_ACP4 0x108E
++#define HDMI_FC_ACP3 0x108F
++#define HDMI_FC_ACP2 0x1090
++#define HDMI_FC_ACP1 0x1091
++#define HDMI_FC_ISCR1_0 0x1092
++#define HDMI_FC_ISCR1_16 0x1093
++#define HDMI_FC_ISCR1_15 0x1094
++#define HDMI_FC_ISCR1_14 0x1095
++#define HDMI_FC_ISCR1_13 0x1096
++#define HDMI_FC_ISCR1_12 0x1097
++#define HDMI_FC_ISCR1_11 0x1098
++#define HDMI_FC_ISCR1_10 0x1099
++#define HDMI_FC_ISCR1_9 0x109A
++#define HDMI_FC_ISCR1_8 0x109B
++#define HDMI_FC_ISCR1_7 0x109C
++#define HDMI_FC_ISCR1_6 0x109D
++#define HDMI_FC_ISCR1_5 0x109E
++#define HDMI_FC_ISCR1_4 0x109F
++#define HDMI_FC_ISCR1_3 0x10A0
++#define HDMI_FC_ISCR1_2 0x10A1
++#define HDMI_FC_ISCR1_1 0x10A2
++#define HDMI_FC_ISCR2_15 0x10A3
++#define HDMI_FC_ISCR2_14 0x10A4
++#define HDMI_FC_ISCR2_13 0x10A5
++#define HDMI_FC_ISCR2_12 0x10A6
++#define HDMI_FC_ISCR2_11 0x10A7
++#define HDMI_FC_ISCR2_10 0x10A8
++#define HDMI_FC_ISCR2_9 0x10A9
++#define HDMI_FC_ISCR2_8 0x10AA
++#define HDMI_FC_ISCR2_7 0x10AB
++#define HDMI_FC_ISCR2_6 0x10AC
++#define HDMI_FC_ISCR2_5 0x10AD
++#define HDMI_FC_ISCR2_4 0x10AE
++#define HDMI_FC_ISCR2_3 0x10AF
++#define HDMI_FC_ISCR2_2 0x10B0
++#define HDMI_FC_ISCR2_1 0x10B1
++#define HDMI_FC_ISCR2_0 0x10B2
++#define HDMI_FC_DATAUTO0 0x10B3
++#define HDMI_FC_DATAUTO1 0x10B4
++#define HDMI_FC_DATAUTO2 0x10B5
++#define HDMI_FC_DATMAN 0x10B6
++#define HDMI_FC_DATAUTO3 0x10B7
++#define HDMI_FC_RDRB0 0x10B8
++#define HDMI_FC_RDRB1 0x10B9
++#define HDMI_FC_RDRB2 0x10BA
++#define HDMI_FC_RDRB3 0x10BB
++#define HDMI_FC_RDRB4 0x10BC
++#define HDMI_FC_RDRB5 0x10BD
++#define HDMI_FC_RDRB6 0x10BE
++#define HDMI_FC_RDRB7 0x10BF
++#define HDMI_FC_STAT0 0x10D0
++#define HDMI_FC_INT0 0x10D1
++#define HDMI_FC_MASK0 0x10D2
++#define HDMI_FC_POL0 0x10D3
++#define HDMI_FC_STAT1 0x10D4
++#define HDMI_FC_INT1 0x10D5
++#define HDMI_FC_MASK1 0x10D6
++#define HDMI_FC_POL1 0x10D7
++#define HDMI_FC_STAT2 0x10D8
++#define HDMI_FC_INT2 0x10D9
++#define HDMI_FC_MASK2 0x10DA
++#define HDMI_FC_POL2 0x10DB
++#define HDMI_FC_PRCONF 0x10E0
++
++#define HDMI_FC_GMD_STAT 0x1100
++#define HDMI_FC_GMD_EN 0x1101
++#define HDMI_FC_GMD_UP 0x1102
++#define HDMI_FC_GMD_CONF 0x1103
++#define HDMI_FC_GMD_HB 0x1104
++#define HDMI_FC_GMD_PB0 0x1105
++#define HDMI_FC_GMD_PB1 0x1106
++#define HDMI_FC_GMD_PB2 0x1107
++#define HDMI_FC_GMD_PB3 0x1108
++#define HDMI_FC_GMD_PB4 0x1109
++#define HDMI_FC_GMD_PB5 0x110A
++#define HDMI_FC_GMD_PB6 0x110B
++#define HDMI_FC_GMD_PB7 0x110C
++#define HDMI_FC_GMD_PB8 0x110D
++#define HDMI_FC_GMD_PB9 0x110E
++#define HDMI_FC_GMD_PB10 0x110F
++#define HDMI_FC_GMD_PB11 0x1110
++#define HDMI_FC_GMD_PB12 0x1111
++#define HDMI_FC_GMD_PB13 0x1112
++#define HDMI_FC_GMD_PB14 0x1113
++#define HDMI_FC_GMD_PB15 0x1114
++#define HDMI_FC_GMD_PB16 0x1115
++#define HDMI_FC_GMD_PB17 0x1116
++#define HDMI_FC_GMD_PB18 0x1117
++#define HDMI_FC_GMD_PB19 0x1118
++#define HDMI_FC_GMD_PB20 0x1119
++#define HDMI_FC_GMD_PB21 0x111A
++#define HDMI_FC_GMD_PB22 0x111B
++#define HDMI_FC_GMD_PB23 0x111C
++#define HDMI_FC_GMD_PB24 0x111D
++#define HDMI_FC_GMD_PB25 0x111E
++#define HDMI_FC_GMD_PB26 0x111F
++#define HDMI_FC_GMD_PB27 0x1120
++
++#define HDMI_FC_DBGFORCE 0x1200
++#define HDMI_FC_DBGAUD0CH0 0x1201
++#define HDMI_FC_DBGAUD1CH0 0x1202
++#define HDMI_FC_DBGAUD2CH0 0x1203
++#define HDMI_FC_DBGAUD0CH1 0x1204
++#define HDMI_FC_DBGAUD1CH1 0x1205
++#define HDMI_FC_DBGAUD2CH1 0x1206
++#define HDMI_FC_DBGAUD0CH2 0x1207
++#define HDMI_FC_DBGAUD1CH2 0x1208
++#define HDMI_FC_DBGAUD2CH2 0x1209
++#define HDMI_FC_DBGAUD0CH3 0x120A
++#define HDMI_FC_DBGAUD1CH3 0x120B
++#define HDMI_FC_DBGAUD2CH3 0x120C
++#define HDMI_FC_DBGAUD0CH4 0x120D
++#define HDMI_FC_DBGAUD1CH4 0x120E
++#define HDMI_FC_DBGAUD2CH4 0x120F
++#define HDMI_FC_DBGAUD0CH5 0x1210
++#define HDMI_FC_DBGAUD1CH5 0x1211
++#define HDMI_FC_DBGAUD2CH5 0x1212
++#define HDMI_FC_DBGAUD0CH6 0x1213
++#define HDMI_FC_DBGAUD1CH6 0x1214
++#define HDMI_FC_DBGAUD2CH6 0x1215
++#define HDMI_FC_DBGAUD0CH7 0x1216
++#define HDMI_FC_DBGAUD1CH7 0x1217
++#define HDMI_FC_DBGAUD2CH7 0x1218
++#define HDMI_FC_DBGTMDS0 0x1219
++#define HDMI_FC_DBGTMDS1 0x121A
++#define HDMI_FC_DBGTMDS2 0x121B
++
++/* HDMI Source PHY Registers */
++#define HDMI_PHY_CONF0 0x3000
++#define HDMI_PHY_TST0 0x3001
++#define HDMI_PHY_TST1 0x3002
++#define HDMI_PHY_TST2 0x3003
++#define HDMI_PHY_STAT0 0x3004
++#define HDMI_PHY_INT0 0x3005
++#define HDMI_PHY_MASK0 0x3006
++#define HDMI_PHY_POL0 0x3007
++
++/* HDMI Master PHY Registers */
++#define HDMI_PHY_I2CM_SLAVE_ADDR 0x3020
++#define HDMI_PHY_I2CM_ADDRESS_ADDR 0x3021
++#define HDMI_PHY_I2CM_DATAO_1_ADDR 0x3022
++#define HDMI_PHY_I2CM_DATAO_0_ADDR 0x3023
++#define HDMI_PHY_I2CM_DATAI_1_ADDR 0x3024
++#define HDMI_PHY_I2CM_DATAI_0_ADDR 0x3025
++#define HDMI_PHY_I2CM_OPERATION_ADDR 0x3026
++#define HDMI_PHY_I2CM_INT_ADDR 0x3027
++#define HDMI_PHY_I2CM_CTLINT_ADDR 0x3028
++#define HDMI_PHY_I2CM_DIV_ADDR 0x3029
++#define HDMI_PHY_I2CM_SOFTRSTZ_ADDR 0x302a
++#define HDMI_PHY_I2CM_SS_SCL_HCNT_1_ADDR 0x302b
++#define HDMI_PHY_I2CM_SS_SCL_HCNT_0_ADDR 0x302c
++#define HDMI_PHY_I2CM_SS_SCL_LCNT_1_ADDR 0x302d
++#define HDMI_PHY_I2CM_SS_SCL_LCNT_0_ADDR 0x302e
++#define HDMI_PHY_I2CM_FS_SCL_HCNT_1_ADDR 0x302f
++#define HDMI_PHY_I2CM_FS_SCL_HCNT_0_ADDR 0x3030
++#define HDMI_PHY_I2CM_FS_SCL_LCNT_1_ADDR 0x3031
++#define HDMI_PHY_I2CM_FS_SCL_LCNT_0_ADDR 0x3032
++
++/* Audio Sampler Registers */
++#define HDMI_AUD_CONF0 0x3100
++#define HDMI_AUD_CONF1 0x3101
++#define HDMI_AUD_INT 0x3102
++#define HDMI_AUD_CONF2 0x3103
++#define HDMI_AUD_N1 0x3200
++#define HDMI_AUD_N2 0x3201
++#define HDMI_AUD_N3 0x3202
++#define HDMI_AUD_CTS1 0x3203
++#define HDMI_AUD_CTS2 0x3204
++#define HDMI_AUD_CTS3 0x3205
++#define HDMI_AUD_INPUTCLKFS 0x3206
++#define HDMI_AUD_SPDIFINT 0x3302
++#define HDMI_AUD_CONF0_HBR 0x3400
++#define HDMI_AUD_HBR_STATUS 0x3401
++#define HDMI_AUD_HBR_INT 0x3402
++#define HDMI_AUD_HBR_POL 0x3403
++#define HDMI_AUD_HBR_MASK 0x3404
++
++/* Generic Parallel Audio Interface Registers */
++/* Not used as GPAUD interface is not enabled in hw */
++#define HDMI_GP_CONF0 0x3500
++#define HDMI_GP_CONF1 0x3501
++#define HDMI_GP_CONF2 0x3502
++#define HDMI_GP_STAT 0x3503
++#define HDMI_GP_INT 0x3504
++#define HDMI_GP_MASK 0x3505
++#define HDMI_GP_POL 0x3506
++
++/* Audio DMA Registers */
++#define HDMI_AHB_DMA_CONF0 0x3600
++#define HDMI_AHB_DMA_START 0x3601
++#define HDMI_AHB_DMA_STOP 0x3602
++#define HDMI_AHB_DMA_THRSLD 0x3603
++#define HDMI_AHB_DMA_STRADDR0 0x3604
++#define HDMI_AHB_DMA_STRADDR1 0x3605
++#define HDMI_AHB_DMA_STRADDR2 0x3606
++#define HDMI_AHB_DMA_STRADDR3 0x3607
++#define HDMI_AHB_DMA_STPADDR0 0x3608
++#define HDMI_AHB_DMA_STPADDR1 0x3609
++#define HDMI_AHB_DMA_STPADDR2 0x360a
++#define HDMI_AHB_DMA_STPADDR3 0x360b
++#define HDMI_AHB_DMA_BSTADDR0 0x360c
++#define HDMI_AHB_DMA_BSTADDR1 0x360d
++#define HDMI_AHB_DMA_BSTADDR2 0x360e
++#define HDMI_AHB_DMA_BSTADDR3 0x360f
++#define HDMI_AHB_DMA_MBLENGTH0 0x3610
++#define HDMI_AHB_DMA_MBLENGTH1 0x3611
++#define HDMI_AHB_DMA_STAT 0x3612
++#define HDMI_AHB_DMA_INT 0x3613
++#define HDMI_AHB_DMA_MASK 0x3614
++#define HDMI_AHB_DMA_POL 0x3615
++#define HDMI_AHB_DMA_CONF1 0x3616
++#define HDMI_AHB_DMA_BUFFSTAT 0x3617
++#define HDMI_AHB_DMA_BUFFINT 0x3618
++#define HDMI_AHB_DMA_BUFFMASK 0x3619
++#define HDMI_AHB_DMA_BUFFPOL 0x361a
++
++/* Main Controller Registers */
++#define HDMI_MC_SFRDIV 0x4000
++#define HDMI_MC_CLKDIS 0x4001
++#define HDMI_MC_SWRSTZ 0x4002
++#define HDMI_MC_OPCTRL 0x4003
++#define HDMI_MC_FLOWCTRL 0x4004
++#define HDMI_MC_PHYRSTZ 0x4005
++#define HDMI_MC_LOCKONCLOCK 0x4006
++#define HDMI_MC_HEACPHY_RST 0x4007
++
++/* Color Space Converter Registers */
++#define HDMI_CSC_CFG 0x4100
++#define HDMI_CSC_SCALE 0x4101
++#define HDMI_CSC_COEF_A1_MSB 0x4102
++#define HDMI_CSC_COEF_A1_LSB 0x4103
++#define HDMI_CSC_COEF_A2_MSB 0x4104
++#define HDMI_CSC_COEF_A2_LSB 0x4105
++#define HDMI_CSC_COEF_A3_MSB 0x4106
++#define HDMI_CSC_COEF_A3_LSB 0x4107
++#define HDMI_CSC_COEF_A4_MSB 0x4108
++#define HDMI_CSC_COEF_A4_LSB 0x4109
++#define HDMI_CSC_COEF_B1_MSB 0x410A
++#define HDMI_CSC_COEF_B1_LSB 0x410B
++#define HDMI_CSC_COEF_B2_MSB 0x410C
++#define HDMI_CSC_COEF_B2_LSB 0x410D
++#define HDMI_CSC_COEF_B3_MSB 0x410E
++#define HDMI_CSC_COEF_B3_LSB 0x410F
++#define HDMI_CSC_COEF_B4_MSB 0x4110
++#define HDMI_CSC_COEF_B4_LSB 0x4111
++#define HDMI_CSC_COEF_C1_MSB 0x4112
++#define HDMI_CSC_COEF_C1_LSB 0x4113
++#define HDMI_CSC_COEF_C2_MSB 0x4114
++#define HDMI_CSC_COEF_C2_LSB 0x4115
++#define HDMI_CSC_COEF_C3_MSB 0x4116
++#define HDMI_CSC_COEF_C3_LSB 0x4117
++#define HDMI_CSC_COEF_C4_MSB 0x4118
++#define HDMI_CSC_COEF_C4_LSB 0x4119
++
++/* HDCP Interrupt Registers */
++#define HDMI_A_APIINTCLR 0x5006
++#define HDMI_A_APIINTSTAT 0x5007
++#define HDMI_A_APIINTMSK 0x5008
++
++/* CEC Engine Registers */
++#define HDMI_CEC_CTRL 0x7D00
++#define HDMI_CEC_STAT 0x7D01
++#define HDMI_CEC_MASK 0x7D02
++#define HDMI_CEC_POLARITY 0x7D03
++#define HDMI_CEC_INT 0x7D04
++#define HDMI_CEC_ADDR_L 0x7D05
++#define HDMI_CEC_ADDR_H 0x7D06
++#define HDMI_CEC_TX_CNT 0x7D07
++#define HDMI_CEC_RX_CNT 0x7D08
++#define HDMI_CEC_TX_DATA0 0x7D10
++#define HDMI_CEC_TX_DATA1 0x7D11
++#define HDMI_CEC_TX_DATA2 0x7D12
++#define HDMI_CEC_TX_DATA3 0x7D13
++#define HDMI_CEC_TX_DATA4 0x7D14
++#define HDMI_CEC_TX_DATA5 0x7D15
++#define HDMI_CEC_TX_DATA6 0x7D16
++#define HDMI_CEC_TX_DATA7 0x7D17
++#define HDMI_CEC_TX_DATA8 0x7D18
++#define HDMI_CEC_TX_DATA9 0x7D19
++#define HDMI_CEC_TX_DATA10 0x7D1a
++#define HDMI_CEC_TX_DATA11 0x7D1b
++#define HDMI_CEC_TX_DATA12 0x7D1c
++#define HDMI_CEC_TX_DATA13 0x7D1d
++#define HDMI_CEC_TX_DATA14 0x7D1e
++#define HDMI_CEC_TX_DATA15 0x7D1f
++#define HDMI_CEC_RX_DATA0 0x7D20
++#define HDMI_CEC_RX_DATA1 0x7D21
++#define HDMI_CEC_RX_DATA2 0x7D22
++#define HDMI_CEC_RX_DATA3 0x7D23
++#define HDMI_CEC_RX_DATA4 0x7D24
++#define HDMI_CEC_RX_DATA5 0x7D25
++#define HDMI_CEC_RX_DATA6 0x7D26
++#define HDMI_CEC_RX_DATA7 0x7D27
++#define HDMI_CEC_RX_DATA8 0x7D28
++#define HDMI_CEC_RX_DATA9 0x7D29
++#define HDMI_CEC_RX_DATA10 0x7D2a
++#define HDMI_CEC_RX_DATA11 0x7D2b
++#define HDMI_CEC_RX_DATA12 0x7D2c
++#define HDMI_CEC_RX_DATA13 0x7D2d
++#define HDMI_CEC_RX_DATA14 0x7D2e
++#define HDMI_CEC_RX_DATA15 0x7D2f
++#define HDMI_CEC_LOCK 0x7D30
++#define HDMI_CEC_WKUPCTRL 0x7D31
++
++/* I2C Master Registers (E-DDC) */
++#define HDMI_I2CM_SLAVE 0x7E00
++#define HDMI_I2CM_ADDRESS 0x7E01
++#define HDMI_I2CM_DATAO 0x7E02
++#define HDMI_I2CM_DATAI 0x7E03
++#define HDMI_I2CM_OPERATION 0x7E04
++#define HDMI_I2CM_INT 0x7E05
++#define HDMI_I2CM_CTLINT 0x7E06
++#define HDMI_I2CM_DIV 0x7E07
++#define HDMI_I2CM_SEGADDR 0x7E08
++#define HDMI_I2CM_SOFTRSTZ 0x7E09
++#define HDMI_I2CM_SEGPTR 0x7E0A
++#define HDMI_I2CM_SS_SCL_HCNT_1_ADDR 0x7E0B
++#define HDMI_I2CM_SS_SCL_HCNT_0_ADDR 0x7E0C
++#define HDMI_I2CM_SS_SCL_LCNT_1_ADDR 0x7E0D
++#define HDMI_I2CM_SS_SCL_LCNT_0_ADDR 0x7E0E
++#define HDMI_I2CM_FS_SCL_HCNT_1_ADDR 0x7E0F
++#define HDMI_I2CM_FS_SCL_HCNT_0_ADDR 0x7E10
++#define HDMI_I2CM_FS_SCL_LCNT_1_ADDR 0x7E11
++#define HDMI_I2CM_FS_SCL_LCNT_0_ADDR 0x7E12
++
++/* Random Number Generator Registers (RNG) */
++#define HDMI_RNG_BASE 0x8000
++
++
++/*
++ * Register field definitions
++ */
++enum {
++/* IH_FC_INT2 field values */
++ HDMI_IH_FC_INT2_OVERFLOW_MASK = 0x03,
++ HDMI_IH_FC_INT2_LOW_PRIORITY_OVERFLOW = 0x02,
++ HDMI_IH_FC_INT2_HIGH_PRIORITY_OVERFLOW = 0x01,
++
++/* IH_FC_STAT2 field values */
++ HDMI_IH_FC_STAT2_OVERFLOW_MASK = 0x03,
++ HDMI_IH_FC_STAT2_LOW_PRIORITY_OVERFLOW = 0x02,
++ HDMI_IH_FC_STAT2_HIGH_PRIORITY_OVERFLOW = 0x01,
++
++/* IH_PHY_STAT0 field values */
++ HDMI_IH_PHY_STAT0_RX_SENSE3 = 0x20,
++ HDMI_IH_PHY_STAT0_RX_SENSE2 = 0x10,
++ HDMI_IH_PHY_STAT0_RX_SENSE1 = 0x8,
++ HDMI_IH_PHY_STAT0_RX_SENSE0 = 0x4,
++ HDMI_IH_PHY_STAT0_TX_PHY_LOCK = 0x2,
++ HDMI_IH_PHY_STAT0_HPD = 0x1,
++
++/* IH_CEC_STAT0 field values */
++ HDMI_IH_CEC_STAT0_WAKEUP = 0x40,
++ HDMI_IH_CEC_STAT0_ERROR_FOLL = 0x20,
++ HDMI_IH_CEC_STAT0_ERROR_INIT = 0x10,
++ HDMI_IH_CEC_STAT0_ARB_LOST = 0x8,
++ HDMI_IH_CEC_STAT0_NACK = 0x4,
++ HDMI_IH_CEC_STAT0_EOM = 0x2,
++ HDMI_IH_CEC_STAT0_DONE = 0x1,
++
++
++/* IH_MUTE_I2CMPHY_STAT0 field values */
++ HDMI_IH_MUTE_I2CMPHY_STAT0_I2CMPHYDONE = 0x2,
++ HDMI_IH_MUTE_I2CMPHY_STAT0_I2CMPHYERROR = 0x1,
++
++/* IH_PHY_STAT0 field values */
++ HDMI_IH_MUTE_PHY_STAT0_RX_SENSE3 = 0x20,
++ HDMI_IH_MUTE_PHY_STAT0_RX_SENSE2 = 0x10,
++ HDMI_IH_MUTE_PHY_STAT0_RX_SENSE1 = 0x8,
++ HDMI_IH_MUTE_PHY_STAT0_RX_SENSE0 = 0x4,
++ HDMI_IH_MUTE_PHY_STAT0_TX_PHY_LOCK = 0x2,
++ HDMI_IH_MUTE_PHY_STAT0_HPD = 0x1,
++
++/* IH and IH_MUTE convenience macro RX_SENSE | HPD*/
++ HDMI_DVI_IH_STAT = 0x3D,
++
++
++/* IH_AHBDMAAUD_STAT0 field values */
++ HDMI_IH_AHBDMAAUD_STAT0_ERROR = 0x20,
++ HDMI_IH_AHBDMAAUD_STAT0_LOST = 0x10,
++ HDMI_IH_AHBDMAAUD_STAT0_RETRY = 0x08,
++ HDMI_IH_AHBDMAAUD_STAT0_DONE = 0x04,
++ HDMI_IH_AHBDMAAUD_STAT0_BUFFFULL = 0x02,
++ HDMI_IH_AHBDMAAUD_STAT0_BUFFEMPTY = 0x01,
++
++/* IH_MUTE_FC_STAT2 field values */
++ HDMI_IH_MUTE_FC_STAT2_OVERFLOW_MASK = 0x03,
++ HDMI_IH_MUTE_FC_STAT2_LOW_PRIORITY_OVERFLOW = 0x02,
++ HDMI_IH_MUTE_FC_STAT2_HIGH_PRIORITY_OVERFLOW = 0x01,
++
++/* IH_MUTE_AHBDMAAUD_STAT0 field values */
++ HDMI_IH_MUTE_AHBDMAAUD_STAT0_ERROR = 0x20,
++ HDMI_IH_MUTE_AHBDMAAUD_STAT0_LOST = 0x10,
++ HDMI_IH_MUTE_AHBDMAAUD_STAT0_RETRY = 0x08,
++ HDMI_IH_MUTE_AHBDMAAUD_STAT0_DONE = 0x04,
++ HDMI_IH_MUTE_AHBDMAAUD_STAT0_BUFFFULL = 0x02,
++ HDMI_IH_MUTE_AHBDMAAUD_STAT0_BUFFEMPTY = 0x01,
++
++/* IH_MUTE field values */
++ HDMI_IH_MUTE_MUTE_WAKEUP_INTERRUPT = 0x2,
++ HDMI_IH_MUTE_MUTE_ALL_INTERRUPT = 0x1,
++
++/* TX_INVID0 field values */
++ HDMI_TX_INVID0_INTERNAL_DE_GENERATOR_MASK = 0x80,
++ HDMI_TX_INVID0_INTERNAL_DE_GENERATOR_ENABLE = 0x80,
++ HDMI_TX_INVID0_INTERNAL_DE_GENERATOR_DISABLE = 0x00,
++ HDMI_TX_INVID0_VIDEO_MAPPING_MASK = 0x1F,
++ HDMI_TX_INVID0_VIDEO_MAPPING_OFFSET = 0,
++
++/* TX_INSTUFFING field values */
++ HDMI_TX_INSTUFFING_BDBDATA_STUFFING_MASK = 0x4,
++ HDMI_TX_INSTUFFING_BDBDATA_STUFFING_ENABLE = 0x4,
++ HDMI_TX_INSTUFFING_BDBDATA_STUFFING_DISABLE = 0x0,
++ HDMI_TX_INSTUFFING_RCRDATA_STUFFING_MASK = 0x2,
++ HDMI_TX_INSTUFFING_RCRDATA_STUFFING_ENABLE = 0x2,
++ HDMI_TX_INSTUFFING_RCRDATA_STUFFING_DISABLE = 0x0,
++ HDMI_TX_INSTUFFING_GYDATA_STUFFING_MASK = 0x1,
++ HDMI_TX_INSTUFFING_GYDATA_STUFFING_ENABLE = 0x1,
++ HDMI_TX_INSTUFFING_GYDATA_STUFFING_DISABLE = 0x0,
++
++/* VP_PR_CD field values */
++ HDMI_VP_PR_CD_COLOR_DEPTH_MASK = 0xF0,
++ HDMI_VP_PR_CD_COLOR_DEPTH_OFFSET = 4,
++ HDMI_VP_PR_CD_DESIRED_PR_FACTOR_MASK = 0x0F,
++ HDMI_VP_PR_CD_DESIRED_PR_FACTOR_OFFSET = 0,
++
++/* VP_STUFF field values */
++ HDMI_VP_STUFF_IDEFAULT_PHASE_MASK = 0x20,
++ HDMI_VP_STUFF_IDEFAULT_PHASE_OFFSET = 5,
++ HDMI_VP_STUFF_IFIX_PP_TO_LAST_MASK = 0x10,
++ HDMI_VP_STUFF_IFIX_PP_TO_LAST_OFFSET = 4,
++ HDMI_VP_STUFF_ICX_GOTO_P0_ST_MASK = 0x8,
++ HDMI_VP_STUFF_ICX_GOTO_P0_ST_OFFSET = 3,
++ HDMI_VP_STUFF_YCC422_STUFFING_MASK = 0x4,
++ HDMI_VP_STUFF_YCC422_STUFFING_STUFFING_MODE = 0x4,
++ HDMI_VP_STUFF_YCC422_STUFFING_DIRECT_MODE = 0x0,
++ HDMI_VP_STUFF_PP_STUFFING_MASK = 0x2,
++ HDMI_VP_STUFF_PP_STUFFING_STUFFING_MODE = 0x2,
++ HDMI_VP_STUFF_PP_STUFFING_DIRECT_MODE = 0x0,
++ HDMI_VP_STUFF_PR_STUFFING_MASK = 0x1,
++ HDMI_VP_STUFF_PR_STUFFING_STUFFING_MODE = 0x1,
++ HDMI_VP_STUFF_PR_STUFFING_DIRECT_MODE = 0x0,
++
++/* VP_CONF field values */
++ HDMI_VP_CONF_BYPASS_EN_MASK = 0x40,
++ HDMI_VP_CONF_BYPASS_EN_ENABLE = 0x40,
++ HDMI_VP_CONF_BYPASS_EN_DISABLE = 0x00,
++ HDMI_VP_CONF_PP_EN_ENMASK = 0x20,
++ HDMI_VP_CONF_PP_EN_ENABLE = 0x20,
++ HDMI_VP_CONF_PP_EN_DISABLE = 0x00,
++ HDMI_VP_CONF_PR_EN_MASK = 0x10,
++ HDMI_VP_CONF_PR_EN_ENABLE = 0x10,
++ HDMI_VP_CONF_PR_EN_DISABLE = 0x00,
++ HDMI_VP_CONF_YCC422_EN_MASK = 0x8,
++ HDMI_VP_CONF_YCC422_EN_ENABLE = 0x8,
++ HDMI_VP_CONF_YCC422_EN_DISABLE = 0x0,
++ HDMI_VP_CONF_BYPASS_SELECT_MASK = 0x4,
++ HDMI_VP_CONF_BYPASS_SELECT_VID_PACKETIZER = 0x4,
++ HDMI_VP_CONF_BYPASS_SELECT_PIX_REPEATER = 0x0,
++ HDMI_VP_CONF_OUTPUT_SELECTOR_MASK = 0x3,
++ HDMI_VP_CONF_OUTPUT_SELECTOR_BYPASS = 0x3,
++ HDMI_VP_CONF_OUTPUT_SELECTOR_YCC422 = 0x1,
++ HDMI_VP_CONF_OUTPUT_SELECTOR_PP = 0x0,
++
++/* VP_REMAP field values */
++ HDMI_VP_REMAP_MASK = 0x3,
++ HDMI_VP_REMAP_YCC422_24bit = 0x2,
++ HDMI_VP_REMAP_YCC422_20bit = 0x1,
++ HDMI_VP_REMAP_YCC422_16bit = 0x0,
++
++/* FC_INVIDCONF field values */
++ HDMI_FC_INVIDCONF_VSYNC_IN_POLARITY_MASK = 0x40,
++ HDMI_FC_INVIDCONF_VSYNC_IN_POLARITY_ACTIVE_HIGH = 0x40,
++ HDMI_FC_INVIDCONF_VSYNC_IN_POLARITY_ACTIVE_LOW = 0x00,
++ HDMI_FC_INVIDCONF_HSYNC_IN_POLARITY_MASK = 0x20,
++ HDMI_FC_INVIDCONF_HSYNC_IN_POLARITY_ACTIVE_HIGH = 0x20,
++ HDMI_FC_INVIDCONF_HSYNC_IN_POLARITY_ACTIVE_LOW = 0x00,
++ HDMI_FC_INVIDCONF_DE_IN_POLARITY_MASK = 0x10,
++ HDMI_FC_INVIDCONF_DE_IN_POLARITY_ACTIVE_HIGH = 0x10,
++ HDMI_FC_INVIDCONF_DE_IN_POLARITY_ACTIVE_LOW = 0x00,
++ HDMI_FC_INVIDCONF_DVI_MODEZ_MASK = 0x8,
++ HDMI_FC_INVIDCONF_DVI_MODEZ_HDMI_MODE = 0x8,
++ HDMI_FC_INVIDCONF_DVI_MODEZ_DVI_MODE = 0x0,
++ HDMI_FC_INVIDCONF_R_V_BLANK_IN_OSC_MASK = 0x2,
++ HDMI_FC_INVIDCONF_R_V_BLANK_IN_OSC_ACTIVE_HIGH = 0x2,
++ HDMI_FC_INVIDCONF_R_V_BLANK_IN_OSC_ACTIVE_LOW = 0x0,
++ HDMI_FC_INVIDCONF_IN_I_P_MASK = 0x1,
++ HDMI_FC_INVIDCONF_IN_I_P_INTERLACED = 0x1,
++ HDMI_FC_INVIDCONF_IN_I_P_PROGRESSIVE = 0x0,
++
++/* FC_AUDICONF0 field values */
++ HDMI_FC_AUDICONF0_CC_OFFSET = 4,
++ HDMI_FC_AUDICONF0_CC_MASK = 0x70,
++ HDMI_FC_AUDICONF0_CT_OFFSET = 0,
++ HDMI_FC_AUDICONF0_CT_MASK = 0xF,
++
++/* FC_AUDICONF1 field values */
++ HDMI_FC_AUDICONF1_SS_OFFSET = 3,
++ HDMI_FC_AUDICONF1_SS_MASK = 0x18,
++ HDMI_FC_AUDICONF1_SF_OFFSET = 0,
++ HDMI_FC_AUDICONF1_SF_MASK = 0x7,
++
++/* FC_AUDICONF3 field values */
++ HDMI_FC_AUDICONF3_LFEPBL_OFFSET = 5,
++ HDMI_FC_AUDICONF3_LFEPBL_MASK = 0x60,
++ HDMI_FC_AUDICONF3_DM_INH_OFFSET = 4,
++ HDMI_FC_AUDICONF3_DM_INH_MASK = 0x10,
++ HDMI_FC_AUDICONF3_LSV_OFFSET = 0,
++ HDMI_FC_AUDICONF3_LSV_MASK = 0xF,
++
++/* FC_AUDSCHNLS0 field values */
++ HDMI_FC_AUDSCHNLS0_CGMSA_OFFSET = 4,
++ HDMI_FC_AUDSCHNLS0_CGMSA_MASK = 0x30,
++ HDMI_FC_AUDSCHNLS0_COPYRIGHT_OFFSET = 0,
++ HDMI_FC_AUDSCHNLS0_COPYRIGHT_MASK = 0x01,
++
++/* FC_AUDSCHNLS3-6 field values */
++ HDMI_FC_AUDSCHNLS3_OIEC_CH0_OFFSET = 0,
++ HDMI_FC_AUDSCHNLS3_OIEC_CH0_MASK = 0x0f,
++ HDMI_FC_AUDSCHNLS3_OIEC_CH1_OFFSET = 4,
++ HDMI_FC_AUDSCHNLS3_OIEC_CH1_MASK = 0xf0,
++ HDMI_FC_AUDSCHNLS4_OIEC_CH2_OFFSET = 0,
++ HDMI_FC_AUDSCHNLS4_OIEC_CH2_MASK = 0x0f,
++ HDMI_FC_AUDSCHNLS4_OIEC_CH3_OFFSET = 4,
++ HDMI_FC_AUDSCHNLS4_OIEC_CH3_MASK = 0xf0,
++
++ HDMI_FC_AUDSCHNLS5_OIEC_CH0_OFFSET = 0,
++ HDMI_FC_AUDSCHNLS5_OIEC_CH0_MASK = 0x0f,
++ HDMI_FC_AUDSCHNLS5_OIEC_CH1_OFFSET = 4,
++ HDMI_FC_AUDSCHNLS5_OIEC_CH1_MASK = 0xf0,
++ HDMI_FC_AUDSCHNLS6_OIEC_CH2_OFFSET = 0,
++ HDMI_FC_AUDSCHNLS6_OIEC_CH2_MASK = 0x0f,
++ HDMI_FC_AUDSCHNLS6_OIEC_CH3_OFFSET = 4,
++ HDMI_FC_AUDSCHNLS6_OIEC_CH3_MASK = 0xf0,
++
++/* HDMI_FC_AUDSCHNLS7 field values */
++ HDMI_FC_AUDSCHNLS7_ACCURACY_OFFSET = 4,
++ HDMI_FC_AUDSCHNLS7_ACCURACY_MASK = 0x30,
++
++/* HDMI_FC_AUDSCHNLS8 field values */
++ HDMI_FC_AUDSCHNLS8_ORIGSAMPFREQ_MASK = 0xf0,
++ HDMI_FC_AUDSCHNLS8_ORIGSAMPFREQ_OFFSET = 4,
++ HDMI_FC_AUDSCHNLS8_WORDLEGNTH_MASK = 0x0f,
++ HDMI_FC_AUDSCHNLS8_WORDLEGNTH_OFFSET = 0,
++
++/* FC_AUDSCONF field values */
++ HDMI_FC_AUDSCONF_AUD_PACKET_SAMPFIT_MASK = 0xF0,
++ HDMI_FC_AUDSCONF_AUD_PACKET_SAMPFIT_OFFSET = 4,
++ HDMI_FC_AUDSCONF_AUD_PACKET_LAYOUT_MASK = 0x1,
++ HDMI_FC_AUDSCONF_AUD_PACKET_LAYOUT_OFFSET = 0,
++ HDMI_FC_AUDSCONF_AUD_PACKET_LAYOUT_LAYOUT1 = 0x1,
++ HDMI_FC_AUDSCONF_AUD_PACKET_LAYOUT_LAYOUT0 = 0x0,
++
++/* FC_STAT2 field values */
++ HDMI_FC_STAT2_OVERFLOW_MASK = 0x03,
++ HDMI_FC_STAT2_LOW_PRIORITY_OVERFLOW = 0x02,
++ HDMI_FC_STAT2_HIGH_PRIORITY_OVERFLOW = 0x01,
++
++/* FC_INT2 field values */
++ HDMI_FC_INT2_OVERFLOW_MASK = 0x03,
++ HDMI_FC_INT2_LOW_PRIORITY_OVERFLOW = 0x02,
++ HDMI_FC_INT2_HIGH_PRIORITY_OVERFLOW = 0x01,
++
++/* FC_MASK2 field values */
++ HDMI_FC_MASK2_OVERFLOW_MASK = 0x03,
++ HDMI_FC_MASK2_LOW_PRIORITY_OVERFLOW = 0x02,
++ HDMI_FC_MASK2_HIGH_PRIORITY_OVERFLOW = 0x01,
++
++/* FC_PRCONF field values */
++ HDMI_FC_PRCONF_INCOMING_PR_FACTOR_MASK = 0xF0,
++ HDMI_FC_PRCONF_INCOMING_PR_FACTOR_OFFSET = 4,
++ HDMI_FC_PRCONF_OUTPUT_PR_FACTOR_MASK = 0x0F,
++ HDMI_FC_PRCONF_OUTPUT_PR_FACTOR_OFFSET = 0,
++
++/* FC_AVICONF0-FC_AVICONF3 field values */
++ HDMI_FC_AVICONF0_PIX_FMT_MASK = 0x03,
++ HDMI_FC_AVICONF0_PIX_FMT_RGB = 0x00,
++ HDMI_FC_AVICONF0_PIX_FMT_YCBCR422 = 0x01,
++ HDMI_FC_AVICONF0_PIX_FMT_YCBCR444 = 0x02,
++ HDMI_FC_AVICONF0_ACTIVE_FMT_MASK = 0x40,
++ HDMI_FC_AVICONF0_ACTIVE_FMT_INFO_PRESENT = 0x40,
++ HDMI_FC_AVICONF0_ACTIVE_FMT_NO_INFO = 0x00,
++ HDMI_FC_AVICONF0_BAR_DATA_MASK = 0x0C,
++ HDMI_FC_AVICONF0_BAR_DATA_NO_DATA = 0x00,
++ HDMI_FC_AVICONF0_BAR_DATA_VERT_BAR = 0x04,
++ HDMI_FC_AVICONF0_BAR_DATA_HORIZ_BAR = 0x08,
++ HDMI_FC_AVICONF0_BAR_DATA_VERT_HORIZ_BAR = 0x0C,
++ HDMI_FC_AVICONF0_SCAN_INFO_MASK = 0x30,
++ HDMI_FC_AVICONF0_SCAN_INFO_OVERSCAN = 0x10,
++ HDMI_FC_AVICONF0_SCAN_INFO_UNDERSCAN = 0x20,
++ HDMI_FC_AVICONF0_SCAN_INFO_NODATA = 0x00,
++
++ HDMI_FC_AVICONF1_ACTIVE_ASPECT_RATIO_MASK = 0x0F,
++ HDMI_FC_AVICONF1_ACTIVE_ASPECT_RATIO_USE_CODED = 0x08,
++ HDMI_FC_AVICONF1_ACTIVE_ASPECT_RATIO_4_3 = 0x09,
++ HDMI_FC_AVICONF1_ACTIVE_ASPECT_RATIO_16_9 = 0x0A,
++ HDMI_FC_AVICONF1_ACTIVE_ASPECT_RATIO_14_9 = 0x0B,
++ HDMI_FC_AVICONF1_CODED_ASPECT_RATIO_MASK = 0x30,
++ HDMI_FC_AVICONF1_CODED_ASPECT_RATIO_NO_DATA = 0x00,
++ HDMI_FC_AVICONF1_CODED_ASPECT_RATIO_4_3 = 0x10,
++ HDMI_FC_AVICONF1_CODED_ASPECT_RATIO_16_9 = 0x20,
++ HDMI_FC_AVICONF1_COLORIMETRY_MASK = 0xC0,
++ HDMI_FC_AVICONF1_COLORIMETRY_NO_DATA = 0x00,
++ HDMI_FC_AVICONF1_COLORIMETRY_SMPTE = 0x40,
++ HDMI_FC_AVICONF1_COLORIMETRY_ITUR = 0x80,
++ HDMI_FC_AVICONF1_COLORIMETRY_EXTENDED_INFO = 0xC0,
++
++ HDMI_FC_AVICONF2_SCALING_MASK = 0x03,
++ HDMI_FC_AVICONF2_SCALING_NONE = 0x00,
++ HDMI_FC_AVICONF2_SCALING_HORIZ = 0x01,
++ HDMI_FC_AVICONF2_SCALING_VERT = 0x02,
++ HDMI_FC_AVICONF2_SCALING_HORIZ_VERT = 0x03,
++ HDMI_FC_AVICONF2_RGB_QUANT_MASK = 0x0C,
++ HDMI_FC_AVICONF2_RGB_QUANT_DEFAULT = 0x00,
++ HDMI_FC_AVICONF2_RGB_QUANT_LIMITED_RANGE = 0x04,
++ HDMI_FC_AVICONF2_RGB_QUANT_FULL_RANGE = 0x08,
++ HDMI_FC_AVICONF2_EXT_COLORIMETRY_MASK = 0x70,
++ HDMI_FC_AVICONF2_EXT_COLORIMETRY_XVYCC601 = 0x00,
++ HDMI_FC_AVICONF2_EXT_COLORIMETRY_XVYCC709 = 0x10,
++ HDMI_FC_AVICONF2_EXT_COLORIMETRY_SYCC601 = 0x20,
++ HDMI_FC_AVICONF2_EXT_COLORIMETRY_ADOBE_YCC601 = 0x30,
++ HDMI_FC_AVICONF2_EXT_COLORIMETRY_ADOBE_RGB = 0x40,
++ HDMI_FC_AVICONF2_IT_CONTENT_MASK = 0x80,
++ HDMI_FC_AVICONF2_IT_CONTENT_NO_DATA = 0x00,
++ HDMI_FC_AVICONF2_IT_CONTENT_VALID = 0x80,
++
++ HDMI_FC_AVICONF3_IT_CONTENT_TYPE_MASK = 0x03,
++ HDMI_FC_AVICONF3_IT_CONTENT_TYPE_GRAPHICS = 0x00,
++ HDMI_FC_AVICONF3_IT_CONTENT_TYPE_PHOTO = 0x01,
++ HDMI_FC_AVICONF3_IT_CONTENT_TYPE_CINEMA = 0x02,
++ HDMI_FC_AVICONF3_IT_CONTENT_TYPE_GAME = 0x03,
++ HDMI_FC_AVICONF3_QUANT_RANGE_MASK = 0x0C,
++ HDMI_FC_AVICONF3_QUANT_RANGE_LIMITED = 0x00,
++ HDMI_FC_AVICONF3_QUANT_RANGE_FULL = 0x04,
++
++/* FC_DBGFORCE field values */
++ HDMI_FC_DBGFORCE_FORCEAUDIO = 0x10,
++ HDMI_FC_DBGFORCE_FORCEVIDEO = 0x1,
++
++/* PHY_CONF0 field values */
++ HDMI_PHY_CONF0_PDZ_MASK = 0x80,
++ HDMI_PHY_CONF0_PDZ_OFFSET = 7,
++ HDMI_PHY_CONF0_ENTMDS_MASK = 0x40,
++ HDMI_PHY_CONF0_ENTMDS_OFFSET = 6,
++ HDMI_PHY_CONF0_SPARECTRL = 0x20,
++ HDMI_PHY_CONF0_GEN2_PDDQ_MASK = 0x10,
++ HDMI_PHY_CONF0_GEN2_PDDQ_OFFSET = 4,
++ HDMI_PHY_CONF0_GEN2_TXPWRON_MASK = 0x8,
++ HDMI_PHY_CONF0_GEN2_TXPWRON_OFFSET = 3,
++ HDMI_PHY_CONF0_GEN2_ENHPDRXSENSE_MASK = 0x4,
++ HDMI_PHY_CONF0_GEN2_ENHPDRXSENSE_OFFSET = 2,
++ HDMI_PHY_CONF0_SELDATAENPOL_MASK = 0x2,
++ HDMI_PHY_CONF0_SELDATAENPOL_OFFSET = 1,
++ HDMI_PHY_CONF0_SELDIPIF_MASK = 0x1,
++ HDMI_PHY_CONF0_SELDIPIF_OFFSET = 0,
++
++/* PHY_TST0 field values */
++ HDMI_PHY_TST0_TSTCLR_MASK = 0x20,
++ HDMI_PHY_TST0_TSTCLR_OFFSET = 5,
++ HDMI_PHY_TST0_TSTEN_MASK = 0x10,
++ HDMI_PHY_TST0_TSTEN_OFFSET = 4,
++ HDMI_PHY_TST0_TSTCLK_MASK = 0x1,
++ HDMI_PHY_TST0_TSTCLK_OFFSET = 0,
++
++/* PHY_STAT0 field values */
++ HDMI_PHY_RX_SENSE3 = 0x80,
++ HDMI_PHY_RX_SENSE2 = 0x40,
++ HDMI_PHY_RX_SENSE1 = 0x20,
++ HDMI_PHY_RX_SENSE0 = 0x10,
++ HDMI_PHY_HPD = 0x02,
++ HDMI_PHY_TX_PHY_LOCK = 0x01,
++
++/* HDMI STAT convenience RX_SENSE | HPD */
++ HDMI_DVI_STAT = 0xF2,
++
++/* PHY_I2CM_SLAVE_ADDR field values */
++ HDMI_PHY_I2CM_SLAVE_ADDR_PHY_GEN2 = 0x69,
++ HDMI_PHY_I2CM_SLAVE_ADDR_HEAC_PHY = 0x49,
++
++/* PHY_I2CM_OPERATION_ADDR field values */
++ HDMI_PHY_I2CM_OPERATION_ADDR_WRITE = 0x10,
++ HDMI_PHY_I2CM_OPERATION_ADDR_READ = 0x1,
++
++/* HDMI_PHY_I2CM_INT_ADDR */
++ HDMI_PHY_I2CM_INT_ADDR_DONE_POL = 0x08,
++ HDMI_PHY_I2CM_INT_ADDR_DONE_MASK = 0x04,
++
++/* HDMI_PHY_I2CM_CTLINT_ADDR */
++ HDMI_PHY_I2CM_CTLINT_ADDR_NAC_POL = 0x80,
++ HDMI_PHY_I2CM_CTLINT_ADDR_NAC_MASK = 0x40,
++ HDMI_PHY_I2CM_CTLINT_ADDR_ARBITRATION_POL = 0x08,
++ HDMI_PHY_I2CM_CTLINT_ADDR_ARBITRATION_MASK = 0x04,
++
++/* AUD_CTS3 field values */
++ HDMI_AUD_CTS3_N_SHIFT_OFFSET = 5,
++ HDMI_AUD_CTS3_N_SHIFT_MASK = 0xe0,
++ HDMI_AUD_CTS3_N_SHIFT_1 = 0,
++ HDMI_AUD_CTS3_N_SHIFT_16 = 0x20,
++ HDMI_AUD_CTS3_N_SHIFT_32 = 0x40,
++ HDMI_AUD_CTS3_N_SHIFT_64 = 0x60,
++ HDMI_AUD_CTS3_N_SHIFT_128 = 0x80,
++ HDMI_AUD_CTS3_N_SHIFT_256 = 0xa0,
++ /* note that the CTS3 MANUAL bit has been removed
++ from our part. Can't set it, will read as 0. */
++ HDMI_AUD_CTS3_CTS_MANUAL = 0x10,
++ HDMI_AUD_CTS3_AUDCTS19_16_MASK = 0x0f,
++
++/* AHB_DMA_CONF0 field values */
++ HDMI_AHB_DMA_CONF0_SW_FIFO_RST_OFFSET = 7,
++ HDMI_AHB_DMA_CONF0_SW_FIFO_RST_MASK = 0x80,
++ HDMI_AHB_DMA_CONF0_HBR_OFFSET = 4,
++ HDMI_AHB_DMA_CONF0_HBR_MASK = 0x10,
++ HDMI_AHB_DMA_CONF0_EN_HLOCK_OFFSET = 3,
++ HDMI_AHB_DMA_CONF0_EN_HLOCK_MASK = 0x08,
++ HDMI_AHB_DMA_CONF0_INCR_TYPE_OFFSET = 1,
++ HDMI_AHB_DMA_CONF0_INCR_TYPE_MASK = 0x06,
++ HDMI_AHB_DMA_CONF0_INCR4 = 0x0,
++ HDMI_AHB_DMA_CONF0_INCR8 = 0x2,
++ HDMI_AHB_DMA_CONF0_INCR16 = 0x4,
++ HDMI_AHB_DMA_CONF0_BURST_MODE = 0x1,
++
++/* HDMI_AHB_DMA_START field values */
++ HDMI_AHB_DMA_START_START_OFFSET = 0,
++ HDMI_AHB_DMA_START_START_MASK = 0x01,
++
++/* HDMI_AHB_DMA_STOP field values */
++ HDMI_AHB_DMA_STOP_STOP_OFFSET = 0,
++ HDMI_AHB_DMA_STOP_STOP_MASK = 0x01,
++
++/* AHB_DMA_STAT, AHB_DMA_INT, AHB_DMA_MASK, AHB_DMA_POL field values */
++ HDMI_AHB_DMA_DONE = 0x80,
++ HDMI_AHB_DMA_RETRY_SPLIT = 0x40,
++ HDMI_AHB_DMA_LOSTOWNERSHIP = 0x20,
++ HDMI_AHB_DMA_ERROR = 0x10,
++ HDMI_AHB_DMA_FIFO_THREMPTY = 0x04,
++ HDMI_AHB_DMA_FIFO_FULL = 0x02,
++ HDMI_AHB_DMA_FIFO_EMPTY = 0x01,
++
++/* AHB_DMA_BUFFSTAT, AHB_DMA_BUFFINT, AHB_DMA_BUFFMASK, AHB_DMA_BUFFPOL field values */
++ HDMI_AHB_DMA_BUFFSTAT_FULL = 0x02,
++ HDMI_AHB_DMA_BUFFSTAT_EMPTY = 0x01,
++
++/* MC_CLKDIS field values */
++ HDMI_MC_CLKDIS_HDCPCLK_DISABLE = 0x40,
++ HDMI_MC_CLKDIS_CECCLK_DISABLE = 0x20,
++ HDMI_MC_CLKDIS_CSCCLK_DISABLE = 0x10,
++ HDMI_MC_CLKDIS_AUDCLK_DISABLE = 0x8,
++ HDMI_MC_CLKDIS_PREPCLK_DISABLE = 0x4,
++ HDMI_MC_CLKDIS_TMDSCLK_DISABLE = 0x2,
++ HDMI_MC_CLKDIS_PIXELCLK_DISABLE = 0x1,
++
++/* MC_SWRSTZ field values */
++ HDMI_MC_SWRSTZ_TMDSSWRST_REQ = 0x02,
++
++/* MC_FLOWCTRL field values */
++ HDMI_MC_FLOWCTRL_FEED_THROUGH_OFF_MASK = 0x1,
++ HDMI_MC_FLOWCTRL_FEED_THROUGH_OFF_CSC_IN_PATH = 0x1,
++ HDMI_MC_FLOWCTRL_FEED_THROUGH_OFF_CSC_BYPASS = 0x0,
++
++/* MC_PHYRSTZ field values */
++ HDMI_MC_PHYRSTZ_ASSERT = 0x0,
++ HDMI_MC_PHYRSTZ_DEASSERT = 0x1,
++
++/* MC_HEACPHY_RST field values */
++ HDMI_MC_HEACPHY_RST_ASSERT = 0x1,
++ HDMI_MC_HEACPHY_RST_DEASSERT = 0x0,
++
++/* CSC_CFG field values */
++ HDMI_CSC_CFG_INTMODE_MASK = 0x30,
++ HDMI_CSC_CFG_INTMODE_OFFSET = 4,
++ HDMI_CSC_CFG_INTMODE_DISABLE = 0x00,
++ HDMI_CSC_CFG_INTMODE_CHROMA_INT_FORMULA1 = 0x10,
++ HDMI_CSC_CFG_INTMODE_CHROMA_INT_FORMULA2 = 0x20,
++ HDMI_CSC_CFG_DECMODE_MASK = 0x3,
++ HDMI_CSC_CFG_DECMODE_OFFSET = 0,
++ HDMI_CSC_CFG_DECMODE_DISABLE = 0x0,
++ HDMI_CSC_CFG_DECMODE_CHROMA_INT_FORMULA1 = 0x1,
++ HDMI_CSC_CFG_DECMODE_CHROMA_INT_FORMULA2 = 0x2,
++ HDMI_CSC_CFG_DECMODE_CHROMA_INT_FORMULA3 = 0x3,
++
++/* CSC_SCALE field values */
++ HDMI_CSC_SCALE_CSC_COLORDE_PTH_MASK = 0xF0,
++ HDMI_CSC_SCALE_CSC_COLORDE_PTH_24BPP = 0x00,
++ HDMI_CSC_SCALE_CSC_COLORDE_PTH_30BPP = 0x50,
++ HDMI_CSC_SCALE_CSC_COLORDE_PTH_36BPP = 0x60,
++ HDMI_CSC_SCALE_CSC_COLORDE_PTH_48BPP = 0x70,
++ HDMI_CSC_SCALE_CSCSCALE_MASK = 0x03,
++
++/* I2CM_OPERATION field values */
++ HDMI_I2CM_OPERATION_WRITE = 0x10,
++ HDMI_I2CM_OPERATION_READ_EXT = 0x2,
++ HDMI_I2CM_OPERATION_READ = 0x1,
++
++/* HDMI_I2CM_INT */
++ HDMI_I2CM_INT_DONE_POL = 0x08,
++ HDMI_I2CM_INT_DONE_MASK = 0x04,
++
++/* HDMI_I2CM_CTLINT */
++ HDMI_I2CM_CTLINT_NAC_POL = 0x80,
++ HDMI_I2CM_CTLINT_NAC_MASK = 0x40,
++ HDMI_I2CM_CTLINT_ARBITRATION_POL = 0x08,
++ HDMI_I2CM_CTLINT_ARBITRATION_MASK = 0x04,
++
++};
++
++enum imx_hdmi_type {
++ IMX6DL_HDMI,
++ IMX6Q_HDMI,
++};
++
++/* IOCTL commands */
++#define HDMI_IOC_MAGIC 'H'
++
++#define HDMI_IOC_GET_RESOURCE _IO(HDMI_IOC_MAGIC, 0)
++#define HDMI_IOC_GET_CPU_TYPE _IO(HDMI_IOC_MAGIC, 1)
++
++
++#endif /* __MXC_HDMI_H__ */
+diff -Nur linux-3.14.36/init/main.c linux-openelec/init/main.c
+--- linux-3.14.36/init/main.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/init/main.c 2015-07-24 18:03:29.948842002 -0500
+@@ -914,8 +914,14 @@
+ do_basic_setup();
+
+ /* Open the /dev/console on the rootfs, this should never fail */
+- if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
+- pr_err("Warning: unable to open an initial console.\n");
++ char *console = "/dev_console";
++
++ if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0) {
++ sys_mknod(console, S_IFCHR|0600, (TTYAUX_MAJOR<<8)|1);
++ if (sys_open(console, O_RDWR, 0) < 0)
++ printk(KERN_WARNING "Warning: unable to open an initial console.\n");
++ sys_unlink(console);
++ }
+
+ (void) sys_dup(0);
+ (void) sys_dup(0);
+diff -Nur linux-3.14.36/init/main.c.orig linux-openelec/init/main.c.orig
+--- linux-3.14.36/init/main.c.orig 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/init/main.c.orig 2015-07-24 18:03:28.272842002 -0500
+@@ -0,0 +1,943 @@
++/*
++ * linux/init/main.c
++ *
++ * Copyright (C) 1991, 1992 Linus Torvalds
++ *
++ * GK 2/5/95 - Changed to support mounting root fs via NFS
++ * Added initrd & change_root: Werner Almesberger & Hans Lermen, Feb '96
++ * Moan early if gcc is old, avoiding bogus kernels - Paul Gortmaker, May '96
++ * Simplified starting of init: Michael A. Griffith <grif@acm.org>
++ */
++
++#define DEBUG /* Enable initcall_debug */
++
++#include <linux/types.h>
++#include <linux/module.h>
++#include <linux/proc_fs.h>
++#include <linux/kernel.h>
++#include <linux/syscalls.h>
++#include <linux/stackprotector.h>
++#include <linux/string.h>
++#include <linux/ctype.h>
++#include <linux/delay.h>
++#include <linux/ioport.h>
++#include <linux/init.h>
++#include <linux/initrd.h>
++#include <linux/bootmem.h>
++#include <linux/acpi.h>
++#include <linux/tty.h>
++#include <linux/percpu.h>
++#include <linux/kmod.h>
++#include <linux/vmalloc.h>
++#include <linux/kernel_stat.h>
++#include <linux/start_kernel.h>
++#include <linux/security.h>
++#include <linux/smp.h>
++#include <linux/profile.h>
++#include <linux/rcupdate.h>
++#include <linux/moduleparam.h>
++#include <linux/kallsyms.h>
++#include <linux/writeback.h>
++#include <linux/cpu.h>
++#include <linux/cpuset.h>
++#include <linux/cgroup.h>
++#include <linux/efi.h>
++#include <linux/tick.h>
++#include <linux/interrupt.h>
++#include <linux/taskstats_kern.h>
++#include <linux/delayacct.h>
++#include <linux/unistd.h>
++#include <linux/rmap.h>
++#include <linux/mempolicy.h>
++#include <linux/key.h>
++#include <linux/buffer_head.h>
++#include <linux/page_cgroup.h>
++#include <linux/debug_locks.h>
++#include <linux/debugobjects.h>
++#include <linux/lockdep.h>
++#include <linux/kmemleak.h>
++#include <linux/pid_namespace.h>
++#include <linux/device.h>
++#include <linux/kthread.h>
++#include <linux/sched.h>
++#include <linux/signal.h>
++#include <linux/idr.h>
++#include <linux/kgdb.h>
++#include <linux/ftrace.h>
++#include <linux/async.h>
++#include <linux/kmemcheck.h>
++#include <linux/sfi.h>
++#include <linux/shmem_fs.h>
++#include <linux/slab.h>
++#include <linux/perf_event.h>
++#include <linux/file.h>
++#include <linux/ptrace.h>
++#include <linux/blkdev.h>
++#include <linux/elevator.h>
++#include <linux/sched_clock.h>
++#include <linux/context_tracking.h>
++#include <linux/random.h>
++
++#include <asm/io.h>
++#include <asm/bugs.h>
++#include <asm/setup.h>
++#include <asm/sections.h>
++#include <asm/cacheflush.h>
++
++#ifdef CONFIG_X86_LOCAL_APIC
++#include <asm/smp.h>
++#endif
++
++static int kernel_init(void *);
++
++extern void init_IRQ(void);
++extern void fork_init(unsigned long);
++extern void radix_tree_init(void);
++#ifndef CONFIG_DEBUG_RODATA
++static inline void mark_rodata_ro(void) { }
++#endif
++
++/*
++ * Debug helper: via this flag we know that we are in 'early bootup code'
++ * where only the boot processor is running with IRQ disabled. This means
++ * two things - IRQ must not be enabled before the flag is cleared and some
++ * operations which are not allowed with IRQ disabled are allowed while the
++ * flag is set.
++ */
++bool early_boot_irqs_disabled __read_mostly;
++
++enum system_states system_state __read_mostly;
++EXPORT_SYMBOL(system_state);
++
++/*
++ * Boot command-line arguments
++ */
++#define MAX_INIT_ARGS CONFIG_INIT_ENV_ARG_LIMIT
++#define MAX_INIT_ENVS CONFIG_INIT_ENV_ARG_LIMIT
++
++extern void time_init(void);
++/* Default late time init is NULL. archs can override this later. */
++void (*__initdata late_time_init)(void);
++
++/* Untouched command line saved by arch-specific code. */
++char __initdata boot_command_line[COMMAND_LINE_SIZE];
++/* Untouched saved command line (eg. for /proc) */
++char *saved_command_line;
++/* Command line for parameter parsing */
++static char *static_command_line;
++/* Command line for per-initcall parameter parsing */
++static char *initcall_command_line;
++
++static char *execute_command;
++static char *ramdisk_execute_command;
++
++/*
++ * Used to generate warnings if static_key manipulation functions are used
++ * before jump_label_init is called.
++ */
++bool static_key_initialized __read_mostly = false;
++EXPORT_SYMBOL_GPL(static_key_initialized);
++
++/*
++ * If set, this is an indication to the drivers that reset the underlying
++ * device before going ahead with the initialization otherwise driver might
++ * rely on the BIOS and skip the reset operation.
++ *
++ * This is useful if kernel is booting in an unreliable environment.
++ * For ex. kdump situaiton where previous kernel has crashed, BIOS has been
++ * skipped and devices will be in unknown state.
++ */
++unsigned int reset_devices;
++EXPORT_SYMBOL(reset_devices);
++
++static int __init set_reset_devices(char *str)
++{
++ reset_devices = 1;
++ return 1;
++}
++
++__setup("reset_devices", set_reset_devices);
++
++static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
++const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
++static const char *panic_later, *panic_param;
++
++extern const struct obs_kernel_param __setup_start[], __setup_end[];
++
++static int __init obsolete_checksetup(char *line)
++{
++ const struct obs_kernel_param *p;
++ int had_early_param = 0;
++
++ p = __setup_start;
++ do {
++ int n = strlen(p->str);
++ if (parameqn(line, p->str, n)) {
++ if (p->early) {
++ /* Already done in parse_early_param?
++ * (Needs exact match on param part).
++ * Keep iterating, as we can have early
++ * params and __setups of same names 8( */
++ if (line[n] == '\0' || line[n] == '=')
++ had_early_param = 1;
++ } else if (!p->setup_func) {
++ pr_warn("Parameter %s is obsolete, ignored\n",
++ p->str);
++ return 1;
++ } else if (p->setup_func(line + n))
++ return 1;
++ }
++ p++;
++ } while (p < __setup_end);
++
++ return had_early_param;
++}
++
++/*
++ * This should be approx 2 Bo*oMips to start (note initial shift), and will
++ * still work even if initially too large, it will just take slightly longer
++ */
++unsigned long loops_per_jiffy = (1<<12);
++
++EXPORT_SYMBOL(loops_per_jiffy);
++
++static int __init debug_kernel(char *str)
++{
++ console_loglevel = 10;
++ return 0;
++}
++
++static int __init quiet_kernel(char *str)
++{
++ console_loglevel = 4;
++ return 0;
++}
++
++early_param("debug", debug_kernel);
++early_param("quiet", quiet_kernel);
++
++static int __init loglevel(char *str)
++{
++ int newlevel;
++
++ /*
++ * Only update loglevel value when a correct setting was passed,
++ * to prevent blind crashes (when loglevel being set to 0) that
++ * are quite hard to debug
++ */
++ if (get_option(&str, &newlevel)) {
++ console_loglevel = newlevel;
++ return 0;
++ }
++
++ return -EINVAL;
++}
++
++early_param("loglevel", loglevel);
++
++/* Change NUL term back to "=", to make "param" the whole string. */
++static int __init repair_env_string(char *param, char *val, const char *unused)
++{
++ if (val) {
++ /* param=val or param="val"? */
++ if (val == param+strlen(param)+1)
++ val[-1] = '=';
++ else if (val == param+strlen(param)+2) {
++ val[-2] = '=';
++ memmove(val-1, val, strlen(val)+1);
++ val--;
++ } else
++ BUG();
++ }
++ return 0;
++}
++
++/*
++ * Unknown boot options get handed to init, unless they look like
++ * unused parameters (modprobe will find them in /proc/cmdline).
++ */
++static int __init unknown_bootoption(char *param, char *val, const char *unused)
++{
++ repair_env_string(param, val, unused);
++
++ /* Handle obsolete-style parameters */
++ if (obsolete_checksetup(param))
++ return 0;
++
++ /* Unused module parameter. */
++ if (strchr(param, '.') && (!val || strchr(param, '.') < val))
++ return 0;
++
++ if (panic_later)
++ return 0;
++
++ if (val) {
++ /* Environment option */
++ unsigned int i;
++ for (i = 0; envp_init[i]; i++) {
++ if (i == MAX_INIT_ENVS) {
++ panic_later = "env";
++ panic_param = param;
++ }
++ if (!strncmp(param, envp_init[i], val - param))
++ break;
++ }
++ envp_init[i] = param;
++ } else {
++ /* Command line option */
++ unsigned int i;
++ for (i = 0; argv_init[i]; i++) {
++ if (i == MAX_INIT_ARGS) {
++ panic_later = "init";
++ panic_param = param;
++ }
++ }
++ argv_init[i] = param;
++ }
++ return 0;
++}
++
++static int __init init_setup(char *str)
++{
++ unsigned int i;
++
++ execute_command = str;
++ /*
++ * In case LILO is going to boot us with default command line,
++ * it prepends "auto" before the whole cmdline which makes
++ * the shell think it should execute a script with such name.
++ * So we ignore all arguments entered _before_ init=... [MJ]
++ */
++ for (i = 1; i < MAX_INIT_ARGS; i++)
++ argv_init[i] = NULL;
++ return 1;
++}
++__setup("init=", init_setup);
++
++static int __init rdinit_setup(char *str)
++{
++ unsigned int i;
++
++ ramdisk_execute_command = str;
++ /* See "auto" comment in init_setup */
++ for (i = 1; i < MAX_INIT_ARGS; i++)
++ argv_init[i] = NULL;
++ return 1;
++}
++__setup("rdinit=", rdinit_setup);
++
++#ifndef CONFIG_SMP
++static const unsigned int setup_max_cpus = NR_CPUS;
++#ifdef CONFIG_X86_LOCAL_APIC
++static void __init smp_init(void)
++{
++ APIC_init_uniprocessor();
++}
++#else
++#define smp_init() do { } while (0)
++#endif
++
++static inline void setup_nr_cpu_ids(void) { }
++static inline void smp_prepare_cpus(unsigned int maxcpus) { }
++#endif
++
++/*
++ * We need to store the untouched command line for future reference.
++ * We also need to store the touched command line since the parameter
++ * parsing is performed in place, and we should allow a component to
++ * store reference of name/value for future reference.
++ */
++static void __init setup_command_line(char *command_line)
++{
++ saved_command_line =
++ memblock_virt_alloc(strlen(boot_command_line) + 1, 0);
++ initcall_command_line =
++ memblock_virt_alloc(strlen(boot_command_line) + 1, 0);
++ static_command_line = memblock_virt_alloc(strlen(command_line) + 1, 0);
++ strcpy (saved_command_line, boot_command_line);
++ strcpy (static_command_line, command_line);
++}
++
++/*
++ * We need to finalize in a non-__init function or else race conditions
++ * between the root thread and the init thread may cause start_kernel to
++ * be reaped by free_initmem before the root thread has proceeded to
++ * cpu_idle.
++ *
++ * gcc-3.4 accidentally inlines this function, so use noinline.
++ */
++
++static __initdata DECLARE_COMPLETION(kthreadd_done);
++
++static noinline void __init_refok rest_init(void)
++{
++ int pid;
++
++ rcu_scheduler_starting();
++ /*
++ * We need to spawn init first so that it obtains pid 1, however
++ * the init task will end up wanting to create kthreads, which, if
++ * we schedule it before we create kthreadd, will OOPS.
++ */
++ kernel_thread(kernel_init, NULL, CLONE_FS | CLONE_SIGHAND);
++ numa_default_policy();
++ pid = kernel_thread(kthreadd, NULL, CLONE_FS | CLONE_FILES);
++ rcu_read_lock();
++ kthreadd_task = find_task_by_pid_ns(pid, &init_pid_ns);
++ rcu_read_unlock();
++ complete(&kthreadd_done);
++
++ /*
++ * The boot idle thread must execute schedule()
++ * at least once to get things moving:
++ */
++ init_idle_bootup_task(current);
++ schedule_preempt_disabled();
++ /* Call into cpu_idle with preempt disabled */
++ cpu_startup_entry(CPUHP_ONLINE);
++}
++
++/* Check for early params. */
++static int __init do_early_param(char *param, char *val, const char *unused)
++{
++ const struct obs_kernel_param *p;
++
++ for (p = __setup_start; p < __setup_end; p++) {
++ if ((p->early && parameq(param, p->str)) ||
++ (strcmp(param, "console") == 0 &&
++ strcmp(p->str, "earlycon") == 0)
++ ) {
++ if (p->setup_func(val) != 0)
++ pr_warn("Malformed early option '%s'\n", param);
++ }
++ }
++ /* We accept everything at this stage. */
++ return 0;
++}
++
++void __init parse_early_options(char *cmdline)
++{
++ parse_args("early options", cmdline, NULL, 0, 0, 0, do_early_param);
++}
++
++/* Arch code calls this early on, or if not, just before other parsing. */
++void __init parse_early_param(void)
++{
++ static __initdata int done = 0;
++ static __initdata char tmp_cmdline[COMMAND_LINE_SIZE];
++
++ if (done)
++ return;
++
++ /* All fall through to do_early_param. */
++ strlcpy(tmp_cmdline, boot_command_line, COMMAND_LINE_SIZE);
++ parse_early_options(tmp_cmdline);
++ done = 1;
++}
++
++/*
++ * Activate the first processor.
++ */
++
++static void __init boot_cpu_init(void)
++{
++ int cpu = smp_processor_id();
++ /* Mark the boot cpu "present", "online" etc for SMP and UP case */
++ set_cpu_online(cpu, true);
++ set_cpu_active(cpu, true);
++ set_cpu_present(cpu, true);
++ set_cpu_possible(cpu, true);
++}
++
++void __init __weak smp_setup_processor_id(void)
++{
++}
++
++# if THREAD_SIZE >= PAGE_SIZE
++void __init __weak thread_info_cache_init(void)
++{
++}
++#endif
++
++/*
++ * Set up kernel memory allocators
++ */
++static void __init mm_init(void)
++{
++ /*
++ * page_cgroup requires contiguous pages,
++ * bigger than MAX_ORDER unless SPARSEMEM.
++ */
++ page_cgroup_init_flatmem();
++ mem_init();
++ kmem_cache_init();
++ percpu_init_late();
++ pgtable_init();
++ vmalloc_init();
++}
++
++asmlinkage void __init start_kernel(void)
++{
++ char * command_line;
++ extern const struct kernel_param __start___param[], __stop___param[];
++
++ /*
++ * Need to run as early as possible, to initialize the
++ * lockdep hash:
++ */
++ lockdep_init();
++ smp_setup_processor_id();
++ debug_objects_early_init();
++
++ /*
++ * Set up the the initial canary ASAP:
++ */
++ boot_init_stack_canary();
++
++ cgroup_init_early();
++
++ local_irq_disable();
++ early_boot_irqs_disabled = true;
++
++/*
++ * Interrupts are still disabled. Do necessary setups, then
++ * enable them
++ */
++ boot_cpu_init();
++ page_address_init();
++ pr_notice("%s", linux_banner);
++ setup_arch(&command_line);
++ mm_init_owner(&init_mm, &init_task);
++ mm_init_cpumask(&init_mm);
++ setup_command_line(command_line);
++ setup_nr_cpu_ids();
++ setup_per_cpu_areas();
++ smp_prepare_boot_cpu(); /* arch-specific boot-cpu hooks */
++
++ build_all_zonelists(NULL, NULL);
++ page_alloc_init();
++
++ pr_notice("Kernel command line: %s\n", boot_command_line);
++ parse_early_param();
++ parse_args("Booting kernel", static_command_line, __start___param,
++ __stop___param - __start___param,
++ -1, -1, &unknown_bootoption);
++
++ jump_label_init();
++
++ /*
++ * These use large bootmem allocations and must precede
++ * kmem_cache_init()
++ */
++ setup_log_buf(0);
++ pidhash_init();
++ vfs_caches_init_early();
++ sort_main_extable();
++ trap_init();
++ mm_init();
++
++ /*
++ * Set up the scheduler prior starting any interrupts (such as the
++ * timer interrupt). Full topology setup happens at smp_init()
++ * time - but meanwhile we still have a functioning scheduler.
++ */
++ sched_init();
++ /*
++ * Disable preemption - early bootup scheduling is extremely
++ * fragile until we cpu_idle() for the first time.
++ */
++ preempt_disable();
++ if (WARN(!irqs_disabled(), "Interrupts were enabled *very* early, fixing it\n"))
++ local_irq_disable();
++ idr_init_cache();
++ rcu_init();
++ tick_nohz_init();
++ context_tracking_init();
++ radix_tree_init();
++ /* init some links before init_ISA_irqs() */
++ early_irq_init();
++ init_IRQ();
++ tick_init();
++ init_timers();
++ hrtimers_init();
++ softirq_init();
++ timekeeping_init();
++ time_init();
++ sched_clock_postinit();
++ perf_event_init();
++ profile_init();
++ call_function_init();
++ WARN(!irqs_disabled(), "Interrupts were enabled early\n");
++ early_boot_irqs_disabled = false;
++ local_irq_enable();
++
++ kmem_cache_init_late();
++
++ /*
++ * HACK ALERT! This is early. We're enabling the console before
++ * we've done PCI setups etc, and console_init() must be aware of
++ * this. But we do want output early, in case something goes wrong.
++ */
++ console_init();
++ if (panic_later)
++ panic("Too many boot %s vars at `%s'", panic_later,
++ panic_param);
++
++ lockdep_info();
++
++ /*
++ * Need to run this when irqs are enabled, because it wants
++ * to self-test [hard/soft]-irqs on/off lock inversion bugs
++ * too:
++ */
++ locking_selftest();
++
++#ifdef CONFIG_BLK_DEV_INITRD
++ if (initrd_start && !initrd_below_start_ok &&
++ page_to_pfn(virt_to_page((void *)initrd_start)) < min_low_pfn) {
++ pr_crit("initrd overwritten (0x%08lx < 0x%08lx) - disabling it.\n",
++ page_to_pfn(virt_to_page((void *)initrd_start)),
++ min_low_pfn);
++ initrd_start = 0;
++ }
++#endif
++ page_cgroup_init();
++ debug_objects_mem_init();
++ kmemleak_init();
++ setup_per_cpu_pageset();
++ numa_policy_init();
++ if (late_time_init)
++ late_time_init();
++ sched_clock_init();
++ calibrate_delay();
++ pidmap_init();
++ anon_vma_init();
++ acpi_early_init();
++#ifdef CONFIG_X86
++ if (efi_enabled(EFI_RUNTIME_SERVICES))
++ efi_enter_virtual_mode();
++#endif
++#ifdef CONFIG_X86_ESPFIX64
++ /* Should be run before the first non-init thread is created */
++ init_espfix_bsp();
++#endif
++ thread_info_cache_init();
++ cred_init();
++ fork_init(totalram_pages);
++ proc_caches_init();
++ buffer_init();
++ key_init();
++ security_init();
++ dbg_late_init();
++ vfs_caches_init(totalram_pages);
++ signals_init();
++ /* rootfs populating might need page-writeback */
++ page_writeback_init();
++#ifdef CONFIG_PROC_FS
++ proc_root_init();
++#endif
++ cgroup_init();
++ cpuset_init();
++ taskstats_init_early();
++ delayacct_init();
++
++ check_bugs();
++
++ sfi_init_late();
++
++ if (efi_enabled(EFI_RUNTIME_SERVICES)) {
++ efi_late_init();
++ efi_free_boot_services();
++ }
++
++ ftrace_init();
++
++ /* Do the rest non-__init'ed, we're now alive */
++ rest_init();
++}
++
++/* Call all constructor functions linked into the kernel. */
++static void __init do_ctors(void)
++{
++#ifdef CONFIG_CONSTRUCTORS
++ ctor_fn_t *fn = (ctor_fn_t *) __ctors_start;
++
++ for (; fn < (ctor_fn_t *) __ctors_end; fn++)
++ (*fn)();
++#endif
++}
++
++bool initcall_debug;
++core_param(initcall_debug, initcall_debug, bool, 0644);
++
++static int __init_or_module do_one_initcall_debug(initcall_t fn)
++{
++ ktime_t calltime, delta, rettime;
++ unsigned long long duration;
++ int ret;
++
++ pr_debug("calling %pF @ %i\n", fn, task_pid_nr(current));
++ calltime = ktime_get();
++ ret = fn();
++ rettime = ktime_get();
++ delta = ktime_sub(rettime, calltime);
++ duration = (unsigned long long) ktime_to_ns(delta) >> 10;
++ pr_debug("initcall %pF returned %d after %lld usecs\n",
++ fn, ret, duration);
++
++ return ret;
++}
++
++int __init_or_module do_one_initcall(initcall_t fn)
++{
++ int count = preempt_count();
++ int ret;
++ char msgbuf[64];
++
++ if (initcall_debug)
++ ret = do_one_initcall_debug(fn);
++ else
++ ret = fn();
++
++ msgbuf[0] = 0;
++
++ if (preempt_count() != count) {
++ sprintf(msgbuf, "preemption imbalance ");
++ preempt_count_set(count);
++ }
++ if (irqs_disabled()) {
++ strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
++ local_irq_enable();
++ }
++ WARN(msgbuf[0], "initcall %pF returned with %s\n", fn, msgbuf);
++
++ return ret;
++}
++
++
++extern initcall_t __initcall_start[];
++extern initcall_t __initcall0_start[];
++extern initcall_t __initcall1_start[];
++extern initcall_t __initcall2_start[];
++extern initcall_t __initcall3_start[];
++extern initcall_t __initcall4_start[];
++extern initcall_t __initcall5_start[];
++extern initcall_t __initcall6_start[];
++extern initcall_t __initcall7_start[];
++extern initcall_t __initcall_end[];
++
++static initcall_t *initcall_levels[] __initdata = {
++ __initcall0_start,
++ __initcall1_start,
++ __initcall2_start,
++ __initcall3_start,
++ __initcall4_start,
++ __initcall5_start,
++ __initcall6_start,
++ __initcall7_start,
++ __initcall_end,
++};
++
++/* Keep these in sync with initcalls in include/linux/init.h */
++static char *initcall_level_names[] __initdata = {
++ "early",
++ "core",
++ "postcore",
++ "arch",
++ "subsys",
++ "fs",
++ "device",
++ "late",
++};
++
++static void __init do_initcall_level(int level)
++{
++ extern const struct kernel_param __start___param[], __stop___param[];
++ initcall_t *fn;
++
++ strcpy(initcall_command_line, saved_command_line);
++ parse_args(initcall_level_names[level],
++ initcall_command_line, __start___param,
++ __stop___param - __start___param,
++ level, level,
++ &repair_env_string);
++
++ for (fn = initcall_levels[level]; fn < initcall_levels[level+1]; fn++)
++ do_one_initcall(*fn);
++}
++
++static void __init do_initcalls(void)
++{
++ int level;
++
++ for (level = 0; level < ARRAY_SIZE(initcall_levels) - 1; level++)
++ do_initcall_level(level);
++}
++
++/*
++ * Ok, the machine is now initialized. None of the devices
++ * have been touched yet, but the CPU subsystem is up and
++ * running, and memory and process management works.
++ *
++ * Now we can finally start doing some real work..
++ */
++static void __init do_basic_setup(void)
++{
++ cpuset_init_smp();
++ usermodehelper_init();
++ shmem_init();
++ driver_init();
++ init_irq_proc();
++ do_ctors();
++ usermodehelper_enable();
++ do_initcalls();
++ random_int_secret_init();
++}
++
++static void __init do_pre_smp_initcalls(void)
++{
++ initcall_t *fn;
++
++ for (fn = __initcall_start; fn < __initcall0_start; fn++)
++ do_one_initcall(*fn);
++}
++
++/*
++ * This function requests modules which should be loaded by default and is
++ * called twice right after initrd is mounted and right before init is
++ * exec'd. If such modules are on either initrd or rootfs, they will be
++ * loaded before control is passed to userland.
++ */
++void __init load_default_modules(void)
++{
++ load_default_elevator_module();
++}
++
++static int run_init_process(const char *init_filename)
++{
++ argv_init[0] = init_filename;
++ return do_execve(getname_kernel(init_filename),
++ (const char __user *const __user *)argv_init,
++ (const char __user *const __user *)envp_init);
++}
++
++static int try_to_run_init_process(const char *init_filename)
++{
++ int ret;
++
++ ret = run_init_process(init_filename);
++
++ if (ret && ret != -ENOENT) {
++ pr_err("Starting init: %s exists but couldn't execute it (error %d)\n",
++ init_filename, ret);
++ }
++
++ return ret;
++}
++
++static noinline void __init kernel_init_freeable(void);
++
++static int __ref kernel_init(void *unused)
++{
++ int ret;
++
++ kernel_init_freeable();
++ /* need to finish all async __init code before freeing the memory */
++ async_synchronize_full();
++ free_initmem();
++ mark_rodata_ro();
++ system_state = SYSTEM_RUNNING;
++ numa_default_policy();
++
++ flush_delayed_fput();
++
++ if (ramdisk_execute_command) {
++ ret = run_init_process(ramdisk_execute_command);
++ if (!ret)
++ return 0;
++ pr_err("Failed to execute %s (error %d)\n",
++ ramdisk_execute_command, ret);
++ }
++
++ /*
++ * We try each of these until one succeeds.
++ *
++ * The Bourne shell can be used instead of init if we are
++ * trying to recover a really broken machine.
++ */
++ if (execute_command) {
++ ret = run_init_process(execute_command);
++ if (!ret)
++ return 0;
++ pr_err("Failed to execute %s (error %d). Attempting defaults...\n",
++ execute_command, ret);
++ }
++ if (!try_to_run_init_process("/sbin/init") ||
++ !try_to_run_init_process("/etc/init") ||
++ !try_to_run_init_process("/bin/init") ||
++ !try_to_run_init_process("/bin/sh"))
++ return 0;
++
++ panic("No working init found. Try passing init= option to kernel. "
++ "See Linux Documentation/init.txt for guidance.");
++}
++
++static noinline void __init kernel_init_freeable(void)
++{
++ /*
++ * Wait until kthreadd is all set-up.
++ */
++ wait_for_completion(&kthreadd_done);
++
++ /* Now the scheduler is fully set up and can do blocking allocations */
++ gfp_allowed_mask = __GFP_BITS_MASK;
++
++ /*
++ * init can allocate pages on any node
++ */
++ set_mems_allowed(node_states[N_MEMORY]);
++ /*
++ * init can run on any cpu.
++ */
++ set_cpus_allowed_ptr(current, cpu_all_mask);
++
++ cad_pid = task_pid(current);
++
++ smp_prepare_cpus(setup_max_cpus);
++
++ do_pre_smp_initcalls();
++ lockup_detector_init();
++
++ smp_init();
++ sched_init_smp();
++
++ do_basic_setup();
++
++ /* Open the /dev/console on the rootfs, this should never fail */
++ if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
++ pr_err("Warning: unable to open an initial console.\n");
++
++ (void) sys_dup(0);
++ (void) sys_dup(0);
++ /*
++ * check if there is an early userspace init. If yes, let it do all
++ * the work
++ */
++
++ if (!ramdisk_execute_command)
++ ramdisk_execute_command = "/init";
++
++ if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
++ ramdisk_execute_command = NULL;
++ prepare_namespace();
++ }
++
++ /*
++ * Ok, we have completed the initial bootup, and
++ * we're essentially up and running. Get rid of the
++ * initmem segments and start the user-mode stuff..
++ */
++
++ /* rootfs is available now, try loading default modules */
++ load_default_modules();
++}
+diff -Nur linux-3.14.36/kernel/cpu.c linux-openelec/kernel/cpu.c
+--- linux-3.14.36/kernel/cpu.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/kernel/cpu.c 2015-05-06 12:05:44.000000000 -0500
+@@ -722,3 +722,22 @@
+ {
+ cpumask_copy(to_cpumask(cpu_online_bits), src);
+ }
++
++static ATOMIC_NOTIFIER_HEAD(idle_notifier);
++void idle_notifier_register(struct notifier_block *n)
++{
++ atomic_notifier_chain_register(&idle_notifier, n);
++}
++EXPORT_SYMBOL_GPL(idle_notifier_register);
++
++void idle_notifier_unregister(struct notifier_block *n)
++{
++ atomic_notifier_chain_unregister(&idle_notifier, n);
++}
++EXPORT_SYMBOL_GPL(idle_notifier_unregister);
++
++void idle_notifier_call_chain(unsigned long val)
++{
++ atomic_notifier_call_chain(&idle_notifier, val, NULL);
++}
++EXPORT_SYMBOL_GPL(idle_notifier_call_chain);
+diff -Nur linux-3.14.36/kernel/irq/manage.c linux-openelec/kernel/irq/manage.c
+--- linux-3.14.36/kernel/irq/manage.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/kernel/irq/manage.c 2015-05-06 12:05:44.000000000 -0500
+@@ -32,24 +32,10 @@
+ early_param("threadirqs", setup_forced_irqthreads);
+ #endif
+
+-/**
+- * synchronize_irq - wait for pending IRQ handlers (on other CPUs)
+- * @irq: interrupt number to wait for
+- *
+- * This function waits for any pending IRQ handlers for this interrupt
+- * to complete before returning. If you use this function while
+- * holding a resource the IRQ handler may need you will deadlock.
+- *
+- * This function may be called - with care - from IRQ context.
+- */
+-void synchronize_irq(unsigned int irq)
++static void __synchronize_hardirq(struct irq_desc *desc)
+ {
+- struct irq_desc *desc = irq_to_desc(irq);
+ bool inprogress;
+
+- if (!desc)
+- return;
+-
+ do {
+ unsigned long flags;
+
+@@ -67,12 +53,56 @@
+
+ /* Oops, that failed? */
+ } while (inprogress);
++}
++
++/**
++ * synchronize_hardirq - wait for pending hard IRQ handlers (on other CPUs)
++ * @irq: interrupt number to wait for
++ *
++ * This function waits for any pending hard IRQ handlers for this
++ * interrupt to complete before returning. If you use this
++ * function while holding a resource the IRQ handler may need you
++ * will deadlock. It does not take associated threaded handlers
++ * into account.
++ *
++ * Do not use this for shutdown scenarios where you must be sure
++ * that all parts (hardirq and threaded handler) have completed.
++ *
++ * This function may be called - with care - from IRQ context.
++ */
++void synchronize_hardirq(unsigned int irq)
++{
++ struct irq_desc *desc = irq_to_desc(irq);
+
+- /*
+- * We made sure that no hardirq handler is running. Now verify
+- * that no threaded handlers are active.
+- */
+- wait_event(desc->wait_for_threads, !atomic_read(&desc->threads_active));
++ if (desc)
++ __synchronize_hardirq(desc);
++}
++EXPORT_SYMBOL(synchronize_hardirq);
++
++/**
++ * synchronize_irq - wait for pending IRQ handlers (on other CPUs)
++ * @irq: interrupt number to wait for
++ *
++ * This function waits for any pending IRQ handlers for this interrupt
++ * to complete before returning. If you use this function while
++ * holding a resource the IRQ handler may need you will deadlock.
++ *
++ * This function may be called - with care - from IRQ context.
++ */
++void synchronize_irq(unsigned int irq)
++{
++ struct irq_desc *desc = irq_to_desc(irq);
++
++ if (desc) {
++ __synchronize_hardirq(desc);
++ /*
++ * We made sure that no hardirq handler is
++ * running. Now verify that no threaded handlers are
++ * active.
++ */
++ wait_event(desc->wait_for_threads,
++ !atomic_read(&desc->threads_active));
++ }
+ }
+ EXPORT_SYMBOL(synchronize_irq);
+
+diff -Nur linux-3.14.36/kernel/power/main.c linux-openelec/kernel/power/main.c
+--- linux-3.14.36/kernel/power/main.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/kernel/power/main.c 2015-07-24 18:03:30.368842002 -0500
+@@ -46,7 +46,7 @@
+ }
+
+ /* If set, devices may be suspended and resumed asynchronously. */
+-int pm_async_enabled = 1;
++int pm_async_enabled = 0;
+
+ static ssize_t pm_async_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+diff -Nur linux-3.14.36/kernel/relay.c linux-openelec/kernel/relay.c
+--- linux-3.14.36/kernel/relay.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/kernel/relay.c 2015-05-06 12:05:44.000000000 -0500
+@@ -227,7 +227,7 @@
+ * relay_remove_buf - remove a channel buffer
+ * @kref: target kernel reference that contains the relay buffer
+ *
+- * Removes the file from the fileystem, which also frees the
++ * Removes the file from the filesystem, which also frees the
+ * rchan_buf_struct and the channel buffer. Should only be called from
+ * kref_put().
+ */
+diff -Nur linux-3.14.36/kernel/signal.c linux-openelec/kernel/signal.c
+--- linux-3.14.36/kernel/signal.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/kernel/signal.c 2015-05-06 12:05:44.000000000 -0500
+@@ -2382,7 +2382,7 @@
+ * @regs: user register state
+ * @stepping: nonzero if debugger single-step or block-step in use
+ *
+- * This function should be called when a signal has succesfully been
++ * This function should be called when a signal has successfully been
+ * delivered. It updates the blocked signals accordingly (@ka->sa.sa_mask
+ * is always blocked, and the signal itself is blocked unless %SA_NODEFER
+ * is set in @ka->sa.sa_flags. Tracing is notified.
+diff -Nur linux-3.14.36/linaro/configs/android.conf linux-openelec/linaro/configs/android.conf
+--- linux-3.14.36/linaro/configs/android.conf 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/linaro/configs/android.conf 2015-05-06 12:05:45.000000000 -0500
+@@ -0,0 +1,42 @@
++CONFIG_IPV6=y
++# CONFIG_IPV6_SIT is not set
++CONFIG_PANIC_TIMEOUT=0
++CONFIG_HAS_WAKELOCK=y
++CONFIG_WAKELOCK=y
++CONFIG_BLK_DEV_LOOP=y
++CONFIG_DM_CRYPT=y
++CONFIG_POWER_SUPPLY=y
++CONFIG_ANDROID_PARANOID_NETWORK=y
++CONFIG_NET_ACTIVITY_STATS=y
++CONFIG_INPUT_MISC=y
++CONFIG_INPUT_UINPUT=y
++CONFIG_INPUT_GPIO=y
++CONFIG_USB_G_ANDROID=y
++CONFIG_SWITCH=y
++CONFIG_STAGING=y
++CONFIG_ANDROID=y
++CONFIG_ANDROID_BINDER_IPC=y
++CONFIG_ASHMEM=y
++CONFIG_ANDROID_LOGGER=y
++CONFIG_ANDROID_TIMED_OUTPUT=y
++CONFIG_ANDROID_TIMED_GPIO=y
++CONFIG_ANDROID_LOW_MEMORY_KILLER=y
++CONFIG_ANDROID_INTF_ALARM_DEV=y
++CONFIG_CRYPTO_TWOFISH=y
++CONFIG_BLK_DEV_RAM=y
++CONFIG_BLK_DEV_RAM_COUNT=16
++CONFIG_BLK_DEV_RAM_SIZE=16384
++CONFIG_FUSE_FS=y
++CONFIG_CPU_FREQ_GOV_INTERACTIVE=y
++CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE=y
++CONFIG_ION=y
++CONFIG_SYNC=y
++CONFIG_SW_SYNC=y
++CONFIG_SW_SYNC_USER=y
++CONFIG_ION_TEST=y
++CONFIG_ION_DUMMY=y
++CONFIG_ADF=y
++CONFIG_ADF_FBDEV=y
++CONFIG_ADF_MEMBLOCK=y
++CONFIG_DMA_SHARED_BUFFER=y
++CONFIG_TUN=y
+diff -Nur linux-3.14.36/linaro/configs/arndale.conf linux-openelec/linaro/configs/arndale.conf
+--- linux-3.14.36/linaro/configs/arndale.conf 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/linaro/configs/arndale.conf 2015-05-06 12:05:45.000000000 -0500
+@@ -0,0 +1,66 @@
++CONFIG_KALLSYMS_ALL=y
++CONFIG_PARTITION_ADVANCED=y
++CONFIG_BSD_DISKLABEL=y
++CONFIG_SOLARIS_X86_PARTITION=y
++CONFIG_ARCH_EXYNOS=y
++CONFIG_S3C_LOWLEVEL_UART_PORT=2
++CONFIG_ARCH_EXYNOS5=y
++# CONFIG_EXYNOS_ATAGS is not set
++CONFIG_MACH_EXYNOS4_DT=y
++CONFIG_VMSPLIT_2G=y
++CONFIG_NR_CPUS=2
++CONFIG_HIGHMEM=y
++# CONFIG_COMPACTION is not set
++CONFIG_ARM_APPENDED_DTB=y
++CONFIG_ARM_ATAG_DTB_COMPAT=y
++CONFIG_CMDLINE="root=/dev/ram0 rw ramdisk=8192 initrd=0x41000000,8M console=ttySAC1,115200 init= mem=256M"
++CONFIG_CPU_FREQ_GOV_USERSPACE=y
++CONFIG_VFP=y
++CONFIG_NEON=y
++CONFIG_PM_RUNTIME=y
++CONFIG_BLK_DEV_LOOP=y
++CONFIG_BLK_DEV_SD=y
++CONFIG_CHR_DEV_SG=y
++CONFIG_ATA=y
++CONFIG_SATA_AHCI_PLATFORM=y
++CONFIG_SATA_EXYNOS=y
++CONFIG_AX88796=y
++CONFIG_AX88796_93CX6=y
++CONFIG_INPUT_EVDEV=y
++CONFIG_KEYBOARD_GPIO=y
++CONFIG_INPUT_TOUCHSCREEN=y
++CONFIG_SERIAL_8250=y
++CONFIG_SERIAL_SAMSUNG=y
++CONFIG_SERIAL_SAMSUNG_CONSOLE=y
++CONFIG_HW_RANDOM=y
++CONFIG_I2C=y
++CONFIG_I2C_S3C2410=y
++CONFIG_THERMAL=y
++CONFIG_CPU_THERMAL=y
++CONFIG_EXYNOS_THERMAL=y
++CONFIG_MFD_SEC_CORE=y
++CONFIG_REGULATOR=y
++CONFIG_REGULATOR_FIXED_VOLTAGE=y
++CONFIG_REGULATOR_S5M8767=y
++CONFIG_DRM=y
++CONFIG_DRM_LOAD_EDID_FIRMWARE=y
++CONFIG_DRM_EXYNOS=y
++CONFIG_DRM_EXYNOS_DMABUF=y
++CONFIG_DRM_EXYNOS_HDMI=y
++CONFIG_FRAMEBUFFER_CONSOLE=y
++CONFIG_LOGO=y
++CONFIG_MMC=y
++CONFIG_MMC_UNSAFE_RESUME=y
++CONFIG_MMC_DW=y
++CONFIG_MMC_DW_IDMAC=y
++CONFIG_MMC_DW_EXYNOS=y
++CONFIG_RTC_CLASS=y
++CONFIG_RTC_DRV_S3C=y
++CONFIG_DEBUG_KERNEL=y
++CONFIG_DETECT_HUNG_TASK=y
++CONFIG_DEBUG_RT_MUTEXES=y
++CONFIG_DEBUG_SPINLOCK=y
++CONFIG_DEBUG_INFO=y
++CONFIG_RCU_CPU_STALL_TIMEOUT=60
++CONFIG_DEBUG_USER=y
++CONFIG_TUN=y
+diff -Nur linux-3.14.36/linaro/configs/bigendian.conf linux-openelec/linaro/configs/bigendian.conf
+--- linux-3.14.36/linaro/configs/bigendian.conf 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/linaro/configs/bigendian.conf 2015-05-06 12:05:45.000000000 -0500
+@@ -0,0 +1,4 @@
++CONFIG_CPU_BIG_ENDIAN=y
++CONFIG_CPU_ENDIAN_BE8=y
++# CONFIG_VIRTUALIZATION is not set
++# CONFIG_MMC_DW_IDMAC is not set
+diff -Nur linux-3.14.36/linaro/configs/big-LITTLE-IKS.conf linux-openelec/linaro/configs/big-LITTLE-IKS.conf
+--- linux-3.14.36/linaro/configs/big-LITTLE-IKS.conf 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/linaro/configs/big-LITTLE-IKS.conf 2015-05-06 12:05:45.000000000 -0500
+@@ -0,0 +1,5 @@
++CONFIG_BIG_LITTLE=y
++CONFIG_BL_SWITCHER=y
++CONFIG_ARM_DT_BL_CPUFREQ=y
++CONFIG_ARM_VEXPRESS_BL_CPUFREQ=y
++CONFIG_CPU_FREQ_GOV_USERSPACE=y
+diff -Nur linux-3.14.36/linaro/configs/debug.conf linux-openelec/linaro/configs/debug.conf
+--- linux-3.14.36/linaro/configs/debug.conf 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/linaro/configs/debug.conf 2015-05-06 12:05:45.000000000 -0500
+@@ -0,0 +1 @@
++CONFIG_PROVE_LOCKING=y
+diff -Nur linux-3.14.36/linaro/configs/distribution.conf linux-openelec/linaro/configs/distribution.conf
+--- linux-3.14.36/linaro/configs/distribution.conf 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/linaro/configs/distribution.conf 2015-05-06 12:05:45.000000000 -0500
+@@ -0,0 +1,49 @@
++# CONFIG_LOCALVERSION_AUTO is not set
++CONFIG_CGROUPS=y
++# CONFIG_COMPAT_BRK is not set
++CONFIG_DEFAULT_MMAP_MIN_ADDR=32768
++CONFIG_SECCOMP=y
++CONFIG_CC_STACKPROTECTOR=y
++CONFIG_SYN_COOKIES=y
++CONFIG_IPV6=y
++CONFIG_NETLABEL=y
++CONFIG_BRIDGE_NETFILTER=y
++CONFIG_NF_CONNTRACK=m
++CONFIG_NETFILTER_XT_CONNMARK=m
++CONFIG_NETFILTER_XT_MARK=m
++CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
++CONFIG_NF_CONNTRACK_IPV4=m
++CONFIG_NF_NAT_IPV4=m
++CONFIG_IP_NF_IPTABLES=m
++CONFIG_IP_NF_FILTER=m
++CONFIG_IP_NF_MANGLE=m
++CONFIG_NF_CONNTRACK_IPV6=m
++CONFIG_NF_NAT_IPV6=m
++CONFIG_IP6_NF_IPTABLES=m
++CONFIG_IP6_NF_FILTER=m
++CONFIG_IP6_NF_MANGLE=m
++CONFIG_BRIDGE_NF_EBTABLES=m
++CONFIG_BRIDGE_EBT_MARK_T=m
++CONFIG_BRIDGE=m
++CONFIG_TUN=y
++CONFIG_DEVTMPFS=y
++CONFIG_DEVTMPFS_MOUNT=y
++CONFIG_BLK_DEV_RAM=y
++CONFIG_BLK_DEV_RAM_SIZE=65536
++CONFIG_INPUT_MISC=y
++CONFIG_INPUT_UINPUT=y
++# CONFIG_DEVKMEM is not set
++CONFIG_FRAMEBUFFER_CONSOLE=y
++CONFIG_AUTOFS4_FS=y
++CONFIG_TMPFS_POSIX_ACL=y
++CONFIG_STRICT_DEVMEM=y
++CONFIG_SECURITY=y
++CONFIG_LSM_MMAP_MIN_ADDR=0
++CONFIG_SECURITY_SELINUX=y
++CONFIG_SECURITY_SMACK=y
++CONFIG_SECURITY_APPARMOR=y
++CONFIG_DEFAULT_SECURITY_APPARMOR=y
++CONFIG_HUGETLBFS=y
++CONFIG_HUGETLB_PAGE=y
++CONFIG_TRANSPARENT_HUGEPAGE=y
++CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS=y
+diff -Nur linux-3.14.36/linaro/configs/highbank.conf linux-openelec/linaro/configs/highbank.conf
+--- linux-3.14.36/linaro/configs/highbank.conf 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/linaro/configs/highbank.conf 2015-05-06 12:05:45.000000000 -0500
+@@ -0,0 +1,40 @@
++CONFIG_EXPERIMENTAL=y
++CONFIG_NO_HZ=y
++CONFIG_HIGH_RES_TIMERS=y
++CONFIG_ARCH_HIGHBANK=y
++CONFIG_ARM_ERRATA_754322=y
++CONFIG_SMP=y
++CONFIG_SCHED_MC=y
++CONFIG_AEABI=y
++CONFIG_CMDLINE="console=ttyAMA0"
++CONFIG_CPU_IDLE=y
++CONFIG_VFP=y
++CONFIG_NEON=y
++CONFIG_NET=y
++CONFIG_SCSI=y
++CONFIG_BLK_DEV_SD=y
++CONFIG_ATA=y
++CONFIG_SATA_AHCI_PLATFORM=y
++CONFIG_SATA_HIGHBANK=y
++CONFIG_NETDEVICES=y
++CONFIG_NET_CALXEDA_XGMAC=y
++CONFIG_SERIAL_AMBA_PL011=y
++CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
++CONFIG_IPMI_HANDLER=y
++CONFIG_IPMI_SI=y
++CONFIG_I2C=y
++CONFIG_I2C_DESIGNWARE_PLATFORM=y
++CONFIG_SPI=y
++CONFIG_SPI_PL022=y
++CONFIG_GPIO_PL061=y
++CONFIG_MMC=y
++CONFIG_MMC_SDHCI=y
++CONFIG_MMC_SDHCI_PLTFM=y
++CONFIG_EDAC=y
++CONFIG_EDAC_MM_EDAC=y
++CONFIG_EDAC_HIGHBANK_MC=y
++CONFIG_EDAC_HIGHBANK_L2=y
++CONFIG_RTC_CLASS=y
++CONFIG_RTC_DRV_PL031=y
++CONFIG_DMADEVICES=y
++CONFIG_PL330_DMA=y
+diff -Nur linux-3.14.36/linaro/configs/kvm-guest.conf linux-openelec/linaro/configs/kvm-guest.conf
+--- linux-3.14.36/linaro/configs/kvm-guest.conf 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/linaro/configs/kvm-guest.conf 2015-05-06 12:05:45.000000000 -0500
+@@ -0,0 +1,11 @@
++CONFIG_BALLOON_COMPACTION=y
++CONFIG_VIRTIO_BLK=y
++CONFIG_VIRTIO_NET=y
++CONFIG_HVC_DRIVER=y
++CONFIG_VIRTIO_CONSOLE=y
++CONFIG_VIRTIO=y
++CONFIG_VIRTIO_BALLOON=y
++CONFIG_VIRTIO_MMIO=y
++CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y
++CONFIG_VIRTUALIZATION=y
++# CONFIG_THUMB2_KERNEL is not set
+diff -Nur linux-3.14.36/linaro/configs/kvm-host.conf linux-openelec/linaro/configs/kvm-host.conf
+--- linux-3.14.36/linaro/configs/kvm-host.conf 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/linaro/configs/kvm-host.conf 2015-05-06 12:05:45.000000000 -0500
+@@ -0,0 +1,11 @@
++CONFIG_VIRTUALIZATION=y
++CONFIG_ARM_LPAE=y
++CONFIG_ARM_VIRT_EXT=y
++CONFIG_HAVE_KVM_IRQCHIP=y
++CONFIG_KVM_ARM_HOST=y
++CONFIG_KVM_ARM_MAX_VCPUS=4
++CONFIG_KVM_ARM_TIMER=y
++CONFIG_KVM_ARM_VGIC=y
++CONFIG_KVM_MMIO=y
++CONFIG_KVM=y
++CONFIG_BLK_DEV_NBD=m
+diff -Nur linux-3.14.36/linaro/configs/linaro-base.conf linux-openelec/linaro/configs/linaro-base.conf
+--- linux-3.14.36/linaro/configs/linaro-base.conf 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/linaro/configs/linaro-base.conf 2015-05-06 12:05:45.000000000 -0500
+@@ -0,0 +1,115 @@
++CONFIG_SYSVIPC=y
++CONFIG_POSIX_MQUEUE=y
++CONFIG_BSD_PROCESS_ACCT=y
++CONFIG_IKCONFIG=y
++CONFIG_IKCONFIG_PROC=y
++CONFIG_LOG_BUF_SHIFT=16
++CONFIG_BLK_DEV_INITRD=y
++CONFIG_EMBEDDED=y
++CONFIG_HOTPLUG=y
++CONFIG_PERF_EVENTS=y
++CONFIG_SLAB=y
++CONFIG_PROFILING=y
++CONFIG_OPROFILE=y
++CONFIG_MODULES=y
++CONFIG_MODULE_UNLOAD=y
++CONFIG_NO_HZ=y
++CONFIG_HIGH_RES_TIMERS=y
++CONFIG_SMP=y
++CONFIG_SCHED_MC=y
++CONFIG_SCHED_SMT=y
++CONFIG_THUMB2_KERNEL=y
++CONFIG_AEABI=y
++# CONFIG_OABI_COMPAT is not set
++CONFIG_CPU_FREQ=y
++CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
++CONFIG_CPU_IDLE=y
++CONFIG_BINFMT_MISC=y
++CONFIG_MD=y
++CONFIG_BLK_DEV_DM=y
++CONFIG_NET=y
++CONFIG_PACKET=y
++CONFIG_UNIX=y
++CONFIG_XFRM_USER=y
++CONFIG_NET_KEY=y
++CONFIG_NET_KEY_MIGRATE=y
++CONFIG_INET=y
++CONFIG_IP_MULTICAST=y
++CONFIG_IP_PNP=y
++CONFIG_IP_PNP_DHCP=y
++CONFIG_IP_PNP_BOOTP=y
++CONFIG_IP_PNP_RARP=y
++# CONFIG_INET_LRO is not set
++CONFIG_NETFILTER=y
++CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
++CONFIG_CONNECTOR=y
++CONFIG_MTD=y
++CONFIG_MTD_CMDLINE_PARTS=y
++CONFIG_MTD_BLOCK=y
++CONFIG_MTD_OOPS=y
++CONFIG_MTD_CFI=y
++CONFIG_MTD_CFI_INTELEXT=y
++CONFIG_MTD_NAND=y
++CONFIG_NETDEVICES=y
++CONFIG_EXT2_FS=y
++CONFIG_EXT3_FS=y
++CONFIG_EXT4_FS=y
++CONFIG_BTRFS_FS=y
++CONFIG_QUOTA=y
++CONFIG_QFMT_V2=y
++CONFIG_MSDOS_FS=y
++CONFIG_VFAT_FS=y
++CONFIG_TMPFS=y
++CONFIG_ECRYPT_FS=y
++CONFIG_JFFS2_FS=y
++CONFIG_JFFS2_SUMMARY=y
++CONFIG_JFFS2_FS_XATTR=y
++CONFIG_JFFS2_COMPRESSION_OPTIONS=y
++CONFIG_JFFS2_LZO=y
++CONFIG_JFFS2_RUBIN=y
++CONFIG_CRAMFS=y
++CONFIG_NETWORK_FILESYSTEMS=y
++CONFIG_NFS_FS=y
++# CONFIG_NFS_V2 is not set
++CONFIG_NFS_V3=y
++CONFIG_NFS_V3_ACL=y
++CONFIG_NFS_V4=y
++CONFIG_ROOT_NFS=y
++CONFIG_NLS_CODEPAGE_437=y
++CONFIG_NLS_ISO8859_1=y
++CONFIG_PRINTK_TIME=y
++CONFIG_MAGIC_SYSRQ=y
++CONFIG_DEBUG_FS=y
++CONFIG_SCHEDSTATS=y
++CONFIG_TIMER_STATS=y
++CONFIG_KEYS=y
++CONFIG_CRYPTO_MICHAEL_MIC=y
++CONFIG_CRC_CCITT=y
++CONFIG_CRC_T10DIF=y
++CONFIG_CRC_ITU_T=y
++CONFIG_CRC7=y
++CONFIG_HW_PERF_EVENTS=y
++CONFIG_FUNCTION_TRACER=y
++CONFIG_ENABLE_DEFAULT_TRACERS=y
++CONFIG_PROC_DEVICETREE=y
++CONFIG_JUMP_LABEL=y
++CONFIG_STRICT_DEVMEM=y
++CONFIG_KGDB=y
++CONFIG_KGDB_TESTS=y
++CONFIG_OF_IDLE_STATES=y
++CONFIG_FTRACE=y
++CONFIG_FUNCTION_TRACER=y
++CONFIG_FTRACE_SYSCALLS=y
++CONFIG_STACK_TRACER=y
++CONFIG_FUNCTION_PROFILER=y
++CONFIG_MAILBOX=y
++CONFIG_AUDIT=y
++CONFIG_NF_CONNTRACK_SECMARK=y
++CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
++CONFIG_NETFILTER_XT_TARGET_SECMARK=y
++CONFIG_IP_NF_SECURITY=y
++CONFIG_SECURITY=y
++CONFIG_SECURITY_NETWORK=y
++CONFIG_LSM_MMAP_MIN_ADDR=4096
++CONFIG_SECURITY_SELINUX=y
++CONFIG_EXT4_FS_SECURITY=y
+diff -Nur linux-3.14.36/linaro/configs/omap4.conf linux-openelec/linaro/configs/omap4.conf
+--- linux-3.14.36/linaro/configs/omap4.conf 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/linaro/configs/omap4.conf 2015-05-06 12:05:45.000000000 -0500
+@@ -0,0 +1,196 @@
++CONFIG_EXPERT=y
++CONFIG_KPROBES=y
++CONFIG_MODULE_FORCE_LOAD=y
++CONFIG_MODULE_FORCE_UNLOAD=y
++CONFIG_MODVERSIONS=y
++CONFIG_MODULE_SRCVERSION_ALL=y
++# CONFIG_BLK_DEV_BSG is not set
++CONFIG_PARTITION_ADVANCED=y
++CONFIG_GPIO_PCA953X=y
++CONFIG_OMAP_RESET_CLOCKS=y
++CONFIG_OMAP_MUX_DEBUG=y
++CONFIG_ARCH_OMAP3=y
++CONFIG_ARCH_OMAP4=y
++CONFIG_ARCH_OMAP2PLUS=y
++CONFIG_SOC_OMAP5=y
++# CONFIG_ARCH_OMAP2 is not set
++CONFIG_ARCH_VEXPRESS_CA9X4=y
++CONFIG_ARM_THUMBEE=y
++CONFIG_ARM_ERRATA_411920=y
++CONFIG_NR_CPUS=2
++CONFIG_ZBOOT_ROM_TEXT=0x0
++CONFIG_ZBOOT_ROM_BSS=0x0
++CONFIG_CMDLINE="root=/dev/mmcblk0p2 rootwait console=ttyO2,115200"
++CONFIG_KEXEC=y
++CONFIG_PM_DEBUG=y
++CONFIG_CAN=m
++CONFIG_CAN_C_CAN=m
++CONFIG_CAN_C_CAN_PLATFORM=m
++CONFIG_BT=m
++CONFIG_BT_HCIUART=m
++CONFIG_BT_HCIUART_H4=y
++CONFIG_BT_HCIUART_BCSP=y
++CONFIG_BT_HCIUART_LL=y
++CONFIG_BT_HCIBCM203X=m
++CONFIG_BT_HCIBPA10X=m
++CONFIG_CFG80211=m
++CONFIG_MAC80211=m
++CONFIG_MAC80211_RC_PID=y
++CONFIG_MAC80211_RC_DEFAULT_PID=y
++CONFIG_CMA=y
++CONFIG_MTD_NAND_OMAP2=y
++CONFIG_MTD_ONENAND=y
++CONFIG_MTD_ONENAND_VERIFY_WRITE=y
++CONFIG_MTD_ONENAND_OMAP2=y
++CONFIG_MTD_UBI=y
++CONFIG_BLK_DEV_LOOP=y
++CONFIG_BLK_DEV_RAM_SIZE=16384
++CONFIG_SENSORS_TSL2550=m
++CONFIG_SENSORS_LIS3_I2C=m
++CONFIG_SCSI=y
++CONFIG_BLK_DEV_SD=y
++CONFIG_SCSI_MULTI_LUN=y
++CONFIG_SCSI_SCAN_ASYNC=y
++CONFIG_KS8851=y
++CONFIG_KS8851_MLL=y
++CONFIG_SMC91X=y
++CONFIG_SMSC911X=y
++CONFIG_TI_CPSW=y
++CONFIG_SMSC_PHY=y
++CONFIG_USB_USBNET=y
++CONFIG_USB_NET_SMSC95XX=y
++CONFIG_USB_ALI_M5632=y
++CONFIG_USB_AN2720=y
++CONFIG_USB_EPSON2888=y
++CONFIG_USB_KC2190=y
++CONFIG_LIBERTAS=m
++CONFIG_LIBERTAS_USB=m
++CONFIG_LIBERTAS_SDIO=m
++CONFIG_LIBERTAS_DEBUG=y
++CONFIG_INPUT_JOYDEV=y
++CONFIG_INPUT_EVDEV=y
++CONFIG_KEYBOARD_GPIO=y
++CONFIG_KEYBOARD_MATRIX=m
++CONFIG_KEYBOARD_TWL4030=y
++CONFIG_INPUT_TOUCHSCREEN=y
++CONFIG_TOUCHSCREEN_ADS7846=y
++CONFIG_INPUT_TWL4030_PWRBUTTON=y
++CONFIG_VT_HW_CONSOLE_BINDING=y
++# CONFIG_LEGACY_PTYS is not set
++CONFIG_SERIAL_8250=y
++CONFIG_SERIAL_8250_CONSOLE=y
++CONFIG_SERIAL_8250_NR_UARTS=32
++CONFIG_SERIAL_8250_EXTENDED=y
++CONFIG_SERIAL_8250_MANY_PORTS=y
++CONFIG_SERIAL_8250_SHARE_IRQ=y
++CONFIG_SERIAL_8250_DETECT_IRQ=y
++CONFIG_SERIAL_8250_RSA=y
++CONFIG_SERIAL_AMBA_PL011=y
++CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
++CONFIG_SERIAL_OMAP=y
++CONFIG_SERIAL_OMAP_CONSOLE=y
++CONFIG_HW_RANDOM=y
++CONFIG_I2C_CHARDEV=y
++CONFIG_SPI=y
++CONFIG_SPI_OMAP24XX=y
++CONFIG_PINCTRL_SINGLE=y
++CONFIG_DEBUG_GPIO=y
++CONFIG_GPIO_SYSFS=y
++CONFIG_GPIO_TWL4030=y
++CONFIG_W1=y
++CONFIG_SENSORS_LM75=m
++CONFIG_WATCHDOG=y
++CONFIG_OMAP_WATCHDOG=y
++CONFIG_TWL4030_WATCHDOG=y
++CONFIG_MFD_TPS65217=y
++CONFIG_MFD_TPS65910=y
++CONFIG_TWL6040_CORE=y
++CONFIG_REGULATOR_TPS65023=y
++CONFIG_REGULATOR_TPS6507X=y
++CONFIG_REGULATOR_TPS65217=y
++CONFIG_REGULATOR_TPS65910=y
++CONFIG_REGULATOR_TWL4030=y
++CONFIG_FB=y
++CONFIG_FIRMWARE_EDID=y
++CONFIG_FB_MODE_HELPERS=y
++CONFIG_FB_TILEBLITTING=y
++CONFIG_OMAP2_DSS=m
++CONFIG_OMAP2_DSS_RFBI=y
++CONFIG_OMAP2_DSS_SDI=y
++CONFIG_OMAP2_DSS_DSI=y
++CONFIG_FB_OMAP2=m
++CONFIG_PANEL_GENERIC_DPI=m
++CONFIG_PANEL_TFP410=m
++CONFIG_PANEL_SHARP_LS037V7DW01=m
++CONFIG_PANEL_NEC_NL8048HL11_01B=m
++CONFIG_PANEL_TAAL=m
++CONFIG_PANEL_TPO_TD043MTEA1=m
++CONFIG_PANEL_ACX565AKM=m
++CONFIG_BACKLIGHT_LCD_SUPPORT=y
++CONFIG_LCD_CLASS_DEVICE=y
++CONFIG_LCD_PLATFORM=y
++CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y
++CONFIG_FONTS=y
++CONFIG_FONT_8x8=y
++CONFIG_FONT_8x16=y
++CONFIG_LOGO=y
++CONFIG_SOUND=m
++CONFIG_SND=m
++CONFIG_SND_VERBOSE_PRINTK=y
++CONFIG_SND_DEBUG=y
++CONFIG_SND_USB_AUDIO=m
++CONFIG_SND_SOC=m
++CONFIG_SND_OMAP_SOC=m
++CONFIG_SND_OMAP_SOC_OMAP_TWL4030=m
++CONFIG_SND_OMAP_SOC_OMAP_ABE_TWL6040=m
++CONFIG_SND_OMAP_SOC_OMAP3_PANDORA=m
++CONFIG_USB=y
++CONFIG_USB_DEBUG=y
++CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
++CONFIG_USB_MON=y
++CONFIG_USB_EHCI_HCD=y
++CONFIG_USB_OHCI_HCD=y
++CONFIG_USB_WDM=y
++CONFIG_USB_STORAGE=y
++CONFIG_USB_TEST=y
++CONFIG_USB_PHY=y
++CONFIG_NOP_USB_XCEIV=y
++CONFIG_USB_GADGET=y
++CONFIG_USB_GADGET_DEBUG=y
++CONFIG_USB_GADGET_DEBUG_FILES=y
++CONFIG_USB_GADGET_DEBUG_FS=y
++CONFIG_USB_ZERO=m
++CONFIG_MMC=y
++CONFIG_MMC_UNSAFE_RESUME=y
++CONFIG_SDIO_UART=y
++CONFIG_MMC_ARMMMCI=y
++CONFIG_MMC_OMAP=y
++CONFIG_MMC_OMAP_HS=y
++CONFIG_NEW_LEDS=y
++CONFIG_LEDS_CLASS=y
++CONFIG_LEDS_GPIO=y
++CONFIG_LEDS_TRIGGERS=y
++CONFIG_LEDS_TRIGGER_TIMER=y
++CONFIG_LEDS_TRIGGER_ONESHOT=y
++CONFIG_LEDS_TRIGGER_HEARTBEAT=y
++CONFIG_LEDS_TRIGGER_BACKLIGHT=y
++CONFIG_LEDS_TRIGGER_CPU=y
++CONFIG_LEDS_TRIGGER_GPIO=y
++CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
++CONFIG_RTC_CLASS=y
++CONFIG_RTC_DRV_TWL92330=y
++CONFIG_RTC_DRV_TWL4030=y
++CONFIG_RTC_DRV_OMAP=y
++CONFIG_DMADEVICES=y
++CONFIG_DMA_OMAP=y
++# CONFIG_EXT3_FS_XATTR is not set
++CONFIG_UBIFS_FS=y
++CONFIG_NFS_FS=y
++CONFIG_NFS_V3_ACL=y
++CONFIG_NFS_V4=y
++CONFIG_ROOT_NFS=y
++# CONFIG_DEBUG_BUGVERBOSE is not set
++CONFIG_DEBUG_INFO=y
++# CONFIG_CRYPTO_ANSI_CPRNG is not set
++CONFIG_LIBCRC32C=y
++# CONFIG_CPU_FREQ is not set
+diff -Nur linux-3.14.36/linaro/configs/preempt-rt.conf linux-openelec/linaro/configs/preempt-rt.conf
+--- linux-3.14.36/linaro/configs/preempt-rt.conf 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/linaro/configs/preempt-rt.conf 2015-05-06 12:05:45.000000000 -0500
+@@ -0,0 +1,4 @@
++CONFIG_PREEMPT=y
++CONFIG_PREEMPT_RT_FULL=y
++CONFIG_SLUB=y
++# CONFIG_CPU_FREQ is not set
+diff -Nur linux-3.14.36/linaro/configs/vexpress64.conf linux-openelec/linaro/configs/vexpress64.conf
+--- linux-3.14.36/linaro/configs/vexpress64.conf 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/linaro/configs/vexpress64.conf 2015-05-06 12:05:45.000000000 -0500
+@@ -0,0 +1,56 @@
++CONFIG_ARCH_VEXPRESS=y
++CONFIG_SMP=y
++CONFIG_NR_CPUS=8
++CONFIG_CMDLINE="console=ttyAMA0"
++CONFIG_COMPAT=y
++CONFIG_SMC91X=y
++CONFIG_INPUT_EVDEV=y
++CONFIG_SERIO_AMBAKMI=y
++CONFIG_SERIAL_AMBA_PL011=y
++CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
++# CONFIG_SERIO_I8042 is not set
++CONFIG_FB=y
++CONFIG_FB_ARMCLCD=y
++CONFIG_FRAMEBUFFER_CONSOLE=y
++# CONFIG_VGA_CONSOLE is not set
++CONFIG_LOGO=y
++# CONFIG_LOGO_LINUX_MONO is not set
++# CONFIG_LOGO_LINUX_VGA16 is not set
++CONFIG_MMC=y
++CONFIG_MMC_ARMMMCI=y
++CONFIG_RTC_CLASS=y
++CONFIG_RTC_DRV_PL031=y
++CONFIG_NFS_FS=y
++CONFIG_NFS_V3=y
++CONFIG_NFS_V3_ACL=y
++CONFIG_NFS_V4=y
++CONFIG_ROOT_NFS=y
++CONFIG_VIRTIO=y
++CONFIG_VIRTIO_BLK=y
++CONFIG_VIRTIO_MMIO=y
++CONFIG_REGULATOR=y
++CONFIG_REGULATOR_FIXED_VOLTAGE=y
++CONFIG_CMA=y
++CONFIG_DMA_CMA=y
++CONFIG_COMMON_CLK_SCPI=y
++CONFIG_SMSC911X=y
++CONFIG_I2C=y
++CONFIG_ARM_MHU_MBOX=y
++CONFIG_ARM_SCPI_PROTOCOL=y
++CONFIG_USB_HIDDEV=y
++CONFIG_SCSI=y
++CONFIG_BLK_DEV_SD=y
++CONFIG_USB_STORAGE=y
++CONFIG_USB=y
++CONFIG_USB_ULPI=y
++CONFIG_USB_EHCI_HCD=y
++CONFIG_USB_EHCI_HCD_SYNOPSYS=y
++CONFIG_USB_OHCI_HCD=y
++CONFIG_USB_PHY=y
++CONFIG_USB_ISP1301=y
++CONFIG_PM_OPP=y
++CONFIG_GENERIC_CPUFREQ_CPU0=y
++CONFIG_ARM_BIG_LITTLE_CPUFREQ=y
++CONFIG_ARM_DT_BL_CPUFREQ=y
++CONFIG_ARM64_CPUIDLE=y
++CONFIG_ARM64_CRYPTO=y
+diff -Nur linux-3.14.36/linaro/configs/vexpress.conf linux-openelec/linaro/configs/vexpress.conf
+--- linux-3.14.36/linaro/configs/vexpress.conf 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/linaro/configs/vexpress.conf 2015-05-06 12:05:45.000000000 -0500
+@@ -0,0 +1,64 @@
++CONFIG_ARCH_VEXPRESS=y
++CONFIG_ARCH_VEXPRESS_CA9X4=y
++CONFIG_HAVE_ARM_ARCH_TIMER=y
++CONFIG_NR_CPUS=8
++CONFIG_HIGHMEM=y
++CONFIG_HIGHPTE=y
++CONFIG_ARM_PSCI=y
++CONFIG_MCPM=y
++CONFIG_ARCH_VEXPRESS_DCSCB=y
++CONFIG_ARCH_VEXPRESS_TC2_PM=y
++CONFIG_ARM_BIG_LITTLE_CPUIDLE=y
++CONFIG_BIG_LITTLE=y
++CONFIG_ARM_VEXPRESS_SPC_CPUFREQ=y
++CONFIG_PM_OPP=y
++CONFIG_CPU_FREQ=y
++CONFIG_CPU_FREQ_GOV_ONDEMAND=y
++CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
++CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y
++CONFIG_CMDLINE="console=ttyAMA0,38400n8 root=/dev/mmcblk0p2 rootwait mmci.fmax=4000000"
++CONFIG_VFP=y
++CONFIG_NEON=y
++CONFIG_SCSI=y
++CONFIG_BLK_DEV_SD=y
++CONFIG_SMSC911X=y
++CONFIG_SMC91X=y
++CONFIG_INPUT_EVDEV=y
++CONFIG_SERIO_AMBAKMI=y
++CONFIG_SERIAL_AMBA_PL011=y
++CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
++CONFIG_FB=y
++CONFIG_FB_ARMCLCD=y
++CONFIG_FB_ARMHDLCD=y
++CONFIG_LOGO=y
++# CONFIG_LOGO_LINUX_MONO is not set
++# CONFIG_LOGO_LINUX_VGA16 is not set
++CONFIG_SOUND=y
++CONFIG_SND=y
++CONFIG_SND_ARMAACI=y
++CONFIG_USB=y
++CONFIG_USB_ISP1760_HCD=y
++CONFIG_USB_STORAGE=y
++CONFIG_MMC=y
++CONFIG_MMC_ARMMMCI=y
++CONFIG_RTC_CLASS=y
++CONFIG_RTC_DRV_PL031=y
++CONFIG_NFS_FS=y
++CONFIG_NFS_V3=y
++CONFIG_NFS_V3_ACL=y
++CONFIG_NFS_V4=y
++CONFIG_ROOT_NFS=y
++CONFIG_VEXPRESS_CONFIG=y
++CONFIG_SENSORS_VEXPRESS=y
++CONFIG_REGULATOR=y
++CONFIG_REGULATOR_VEXPRESS=y
++CONFIG_NEW_LEDS=y
++CONFIG_LEDS_CLASS=y
++CONFIG_LEDS_GPIO=y
++CONFIG_LEDS_TRIGGERS=y
++CONFIG_LEDS_TRIGGER_HEARTBEAT=y
++CONFIG_LEDS_TRIGGER_CPU=y
++CONFIG_VIRTIO=y
++CONFIG_VIRTIO_BLK=y
++CONFIG_VIRTIO_MMIO=y
++CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y
+diff -Nur linux-3.14.36/linaro/configs/vexpress-tuning.conf linux-openelec/linaro/configs/vexpress-tuning.conf
+--- linux-3.14.36/linaro/configs/vexpress-tuning.conf 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/linaro/configs/vexpress-tuning.conf 2015-05-06 12:05:45.000000000 -0500
+@@ -0,0 +1 @@
++# CONFIG_PROVE_LOCKING is not set
+diff -Nur linux-3.14.36/linaro/configs/xen.conf linux-openelec/linaro/configs/xen.conf
+--- linux-3.14.36/linaro/configs/xen.conf 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/linaro/configs/xen.conf 2015-05-06 12:05:45.000000000 -0500
+@@ -0,0 +1,7 @@
++CONFIG_XEN=y
++CONFIG_XEN_NETDEV_FRONTEND=y
++CONFIG_XEN_NETDEV_BACKEND=y
++CONFIG_XEN_BLKDEV_FRONTEND=y
++CONFIG_XEN_BLKDEV_BACKEND=y
++CONFIG_XENFS=y
++CONFIG_XEN_COMPAT_XENFS=y
+diff -Nur linux-3.14.36/MAINTAINERS linux-openelec/MAINTAINERS
+--- linux-3.14.36/MAINTAINERS 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/MAINTAINERS 2015-05-06 12:05:44.000000000 -0500
+@@ -5511,6 +5511,14 @@
+ F: drivers/net/macvlan.c
+ F: include/linux/if_macvlan.h
+
++MAILBOX API
++M: Jassi Brar <jassisinghbrar@gmail.com>
++L: linux-kernel@vger.kernel.org
++S: Maintained
++F: drivers/mailbox/
++F: include/linux/mailbox_client.h
++F: include/linux/mailbox_controller.h
++
+ MAN-PAGES: MANUAL PAGES FOR LINUX -- Sections 2, 3, 4, 5, and 7
+ M: Michael Kerrisk <mtk.manpages@gmail.com>
+ W: http://www.kernel.org/doc/man-pages
+diff -Nur linux-3.14.36/mm/cma.c linux-openelec/mm/cma.c
+--- linux-3.14.36/mm/cma.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/mm/cma.c 2015-05-06 12:05:44.000000000 -0500
+@@ -0,0 +1,356 @@
++/*
++ * Contiguous Memory Allocator
++ *
++ * Copyright (c) 2010-2011 by Samsung Electronics.
++ * Copyright IBM Corporation, 2013
++ * Copyright LG Electronics Inc., 2014
++ * Written by:
++ * Marek Szyprowski <m.szyprowski@samsung.com>
++ * Michal Nazarewicz <mina86@mina86.com>
++ * Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
++ * Joonsoo Kim <iamjoonsoo.kim@lge.com>
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation; either version 2 of the
++ * License or (at your optional) any later version of the license.
++ */
++
++#define pr_fmt(fmt) "cma: " fmt
++
++#ifdef CONFIG_CMA_DEBUG
++#ifndef DEBUG
++# define DEBUG
++#endif
++#endif
++
++#include <linux/memblock.h>
++#include <linux/err.h>
++#include <linux/mm.h>
++#include <linux/mutex.h>
++#include <linux/sizes.h>
++#include <linux/slab.h>
++#include <linux/log2.h>
++#include <linux/cma.h>
++#include <linux/highmem.h>
++
++struct cma {
++ unsigned long base_pfn;
++ unsigned long count;
++ unsigned long *bitmap;
++ unsigned int order_per_bit; /* Order of pages represented by one bit */
++ struct mutex lock;
++};
++
++static struct cma cma_areas[MAX_CMA_AREAS];
++static unsigned cma_area_count;
++static DEFINE_MUTEX(cma_mutex);
++
++phys_addr_t cma_get_base(struct cma *cma)
++{
++ return PFN_PHYS(cma->base_pfn);
++}
++
++unsigned long cma_get_size(struct cma *cma)
++{
++ return cma->count << PAGE_SHIFT;
++}
++
++static unsigned long cma_bitmap_aligned_mask(struct cma *cma, int align_order)
++{
++ return (1UL << (align_order >> cma->order_per_bit)) - 1;
++}
++
++static unsigned long cma_bitmap_maxno(struct cma *cma)
++{
++ return cma->count >> cma->order_per_bit;
++}
++
++static unsigned long cma_bitmap_pages_to_bits(struct cma *cma,
++ unsigned long pages)
++{
++ return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit;
++}
++
++static void cma_clear_bitmap(struct cma *cma, unsigned long pfn, int count)
++{
++ unsigned long bitmap_no, bitmap_count;
++
++ bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit;
++ bitmap_count = cma_bitmap_pages_to_bits(cma, count);
++
++ mutex_lock(&cma->lock);
++ bitmap_clear(cma->bitmap, bitmap_no, bitmap_count);
++ mutex_unlock(&cma->lock);
++}
++
++static int __init cma_activate_area(struct cma *cma)
++{
++ int bitmap_size = BITS_TO_LONGS(cma_bitmap_maxno(cma)) * sizeof(long);
++ unsigned long base_pfn = cma->base_pfn, pfn = base_pfn;
++ unsigned i = cma->count >> pageblock_order;
++ struct zone *zone;
++
++ cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
++
++ if (!cma->bitmap)
++ return -ENOMEM;
++
++ WARN_ON_ONCE(!pfn_valid(pfn));
++ zone = page_zone(pfn_to_page(pfn));
++
++ do {
++ unsigned j;
++
++ base_pfn = pfn;
++ for (j = pageblock_nr_pages; j; --j, pfn++) {
++ WARN_ON_ONCE(!pfn_valid(pfn));
++ /*
++ * alloc_contig_range requires the pfn range
++ * specified to be in the same zone. Make this
++ * simple by forcing the entire CMA resv range
++ * to be in the same zone.
++ */
++ if (page_zone(pfn_to_page(pfn)) != zone)
++ goto err;
++ }
++ init_cma_reserved_pageblock(pfn_to_page(base_pfn));
++ } while (--i);
++
++ mutex_init(&cma->lock);
++ return 0;
++
++err:
++ kfree(cma->bitmap);
++ return -EINVAL;
++}
++
++static int __init cma_init_reserved_areas(void)
++{
++ int i;
++
++ for (i = 0; i < cma_area_count; i++) {
++ int ret = cma_activate_area(&cma_areas[i]);
++
++ if (ret)
++ return ret;
++ }
++
++ return 0;
++}
++core_initcall(cma_init_reserved_areas);
++
++/**
++ * cma_declare_contiguous() - reserve custom contiguous area
++ * @base: Base address of the reserved area optional, use 0 for any
++ * @size: Size of the reserved area (in bytes),
++ * @limit: End address of the reserved memory (optional, 0 for any).
++ * @alignment: Alignment for the CMA area, should be power of 2 or zero
++ * @order_per_bit: Order of pages represented by one bit on bitmap.
++ * @fixed: hint about where to place the reserved area
++ * @res_cma: Pointer to store the created cma region.
++ *
++ * This function reserves memory from early allocator. It should be
++ * called by arch specific code once the early allocator (memblock or bootmem)
++ * has been activated and all other subsystems have already allocated/reserved
++ * memory. This function allows to create custom reserved areas.
++ *
++ * If @fixed is true, reserve contiguous area at exactly @base. If false,
++ * reserve in range from @base to @limit.
++ */
++int __init cma_declare_contiguous(phys_addr_t base,
++ phys_addr_t size, phys_addr_t limit,
++ phys_addr_t alignment, unsigned int order_per_bit,
++ bool fixed, struct cma **res_cma)
++{
++ struct cma *cma;
++ phys_addr_t memblock_end = memblock_end_of_DRAM();
++ phys_addr_t highmem_start = __pa(high_memory);
++ int ret = 0;
++
++ pr_debug("%s(size %lx, base %08lx, limit %08lx alignment %08lx)\n",
++ __func__, (unsigned long)size, (unsigned long)base,
++ (unsigned long)limit, (unsigned long)alignment);
++
++ if (cma_area_count == ARRAY_SIZE(cma_areas)) {
++ pr_err("Not enough slots for CMA reserved regions!\n");
++ return -ENOSPC;
++ }
++
++ if (!size)
++ return -EINVAL;
++
++ if (alignment && !is_power_of_2(alignment))
++ return -EINVAL;
++
++ /*
++ * Sanitise input arguments.
++ * Pages both ends in CMA area could be merged into adjacent unmovable
++ * migratetype page by page allocator's buddy algorithm. In the case,
++ * you couldn't get a contiguous memory, which is not what we want.
++ */
++ alignment = max(alignment,
++ (phys_addr_t)PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order));
++ base = ALIGN(base, alignment);
++ size = ALIGN(size, alignment);
++ limit &= ~(alignment - 1);
++
++ /* size should be aligned with order_per_bit */
++ if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit))
++ return -EINVAL;
++
++ /*
++ * adjust limit to avoid crossing low/high memory boundary for
++ * automatically allocated regions
++ */
++ if (((limit == 0 || limit > memblock_end) &&
++ (memblock_end - size < highmem_start &&
++ memblock_end > highmem_start)) ||
++ (!fixed && limit > highmem_start && limit - size < highmem_start)) {
++ limit = highmem_start;
++ }
++
++ if (fixed && base < highmem_start && base+size > highmem_start) {
++ ret = -EINVAL;
++ pr_err("Region at %08lx defined on low/high memory boundary (%08lx)\n",
++ (unsigned long)base, (unsigned long)highmem_start);
++ goto err;
++ }
++
++ /* Reserve memory */
++ if (base && fixed) {
++ if (memblock_is_region_reserved(base, size) ||
++ memblock_reserve(base, size) < 0) {
++ ret = -EBUSY;
++ goto err;
++ }
++ } else {
++ phys_addr_t addr = memblock_alloc_range(size, alignment, base,
++ limit);
++ if (!addr) {
++ ret = -ENOMEM;
++ goto err;
++ } else {
++ base = addr;
++ }
++ }
++
++ /*
++ * Each reserved area must be initialised later, when more kernel
++ * subsystems (like slab allocator) are available.
++ */
++ cma = &cma_areas[cma_area_count];
++ cma->base_pfn = PFN_DOWN(base);
++ cma->count = size >> PAGE_SHIFT;
++ cma->order_per_bit = order_per_bit;
++ *res_cma = cma;
++ cma_area_count++;
++
++ pr_info("CMA: reserved %ld MiB at %08lx\n", (unsigned long)size / SZ_1M,
++ (unsigned long)base);
++ return 0;
++
++err:
++ pr_err("CMA: failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M);
++ return ret;
++}
++
++/**
++ * cma_alloc() - allocate pages from contiguous area
++ * @cma: Contiguous memory region for which the allocation is performed.
++ * @count: Requested number of pages.
++ * @align: Requested alignment of pages (in PAGE_SIZE order).
++ *
++ * This function allocates part of contiguous memory on specific
++ * contiguous memory area.
++ */
++struct page *cma_alloc(struct cma *cma, int count, unsigned int align)
++{
++ unsigned long mask, pfn, start = 0;
++ unsigned long bitmap_maxno, bitmap_no, bitmap_count;
++ struct page *page = NULL;
++ int ret;
++
++ if (!cma || !cma->count)
++ return NULL;
++
++ pr_debug("%s(cma %p, count %d, align %d)\n", __func__, (void *)cma,
++ count, align);
++
++ if (!count)
++ return NULL;
++
++ mask = cma_bitmap_aligned_mask(cma, align);
++ bitmap_maxno = cma_bitmap_maxno(cma);
++ bitmap_count = cma_bitmap_pages_to_bits(cma, count);
++
++ for (;;) {
++ mutex_lock(&cma->lock);
++ bitmap_no = bitmap_find_next_zero_area(cma->bitmap,
++ bitmap_maxno, start, bitmap_count, mask);
++ if (bitmap_no >= bitmap_maxno) {
++ mutex_unlock(&cma->lock);
++ break;
++ }
++ bitmap_set(cma->bitmap, bitmap_no, bitmap_count);
++ /*
++ * It's safe to drop the lock here. We've marked this region for
++ * our exclusive use. If the migration fails we will take the
++ * lock again and unmark it.
++ */
++ mutex_unlock(&cma->lock);
++
++ pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit);
++ mutex_lock(&cma_mutex);
++ ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA);
++ mutex_unlock(&cma_mutex);
++ if (ret == 0) {
++ page = pfn_to_page(pfn);
++ break;
++ }
++
++ cma_clear_bitmap(cma, pfn, count);
++ if (ret != -EBUSY)
++ break;
++
++ pr_debug("%s(): memory range at %p is busy, retrying\n",
++ __func__, pfn_to_page(pfn));
++ /* try again with a bit different memory target */
++ start = bitmap_no + mask + 1;
++ }
++
++ pr_debug("%s(): returned %p\n", __func__, page);
++ return page;
++}
++
++/**
++ * cma_release() - release allocated pages
++ * @cma: Contiguous memory region for which the allocation is performed.
++ * @pages: Allocated pages.
++ * @count: Number of allocated pages.
++ *
++ * This function releases memory allocated by alloc_cma().
++ * It returns false when provided pages do not belong to contiguous area and
++ * true otherwise.
++ */
++bool cma_release(struct cma *cma, struct page *pages, int count)
++{
++ unsigned long pfn;
++
++ if (!cma || !pages)
++ return false;
++
++ pr_debug("%s(page %p)\n", __func__, (void *)pages);
++
++ pfn = page_to_pfn(pages);
++
++ if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
++ return false;
++
++ VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);
++
++ free_contig_range(pfn, count);
++ cma_clear_bitmap(cma, pfn, count);
++
++ return true;
++}
+diff -Nur linux-3.14.36/mm/Kconfig linux-openelec/mm/Kconfig
+--- linux-3.14.36/mm/Kconfig 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/mm/Kconfig 2015-05-06 12:05:44.000000000 -0500
+@@ -514,6 +514,17 @@
+ processing calls such as dma_alloc_from_contiguous().
+ This option does not affect warning and error messages.
+
++config CMA_AREAS
++ int "Maximum count of the CMA areas"
++ depends on CMA
++ default 7
++ help
++ CMA allows to create CMA areas for particular purpose, mainly,
++ used as device private area. This parameter sets the maximum
++ number of CMA area in the system.
++
++ If unsure, leave the default value "7".
++
+ config ZBUD
+ tristate
+ default n
+diff -Nur linux-3.14.36/mm/Makefile linux-openelec/mm/Makefile
+--- linux-3.14.36/mm/Makefile 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/mm/Makefile 2015-07-24 18:03:28.532842002 -0500
+@@ -61,3 +61,4 @@
+ obj-$(CONFIG_MEMORY_ISOLATION) += page_isolation.o
+ obj-$(CONFIG_ZBUD) += zbud.o
+ obj-$(CONFIG_ZSMALLOC) += zsmalloc.o
++obj-$(CONFIG_CMA) += cma.o
+diff -Nur linux-3.14.36/mm/memblock.c linux-openelec/mm/memblock.c
+--- linux-3.14.36/mm/memblock.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/mm/memblock.c 2015-07-24 18:03:28.508842002 -0500
+@@ -974,22 +974,35 @@
+ }
+ #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
+
+-static phys_addr_t __init memblock_alloc_base_nid(phys_addr_t size,
+- phys_addr_t align, phys_addr_t max_addr,
+- int nid)
++static phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size,
++ phys_addr_t align, phys_addr_t start,
++ phys_addr_t end, int nid)
+ {
+ phys_addr_t found;
+
+ if (!align)
+ align = SMP_CACHE_BYTES;
+
+- found = memblock_find_in_range_node(size, align, 0, max_addr, nid);
++ found = memblock_find_in_range_node(size, align, start, end, nid);
+ if (found && !memblock_reserve(found, size))
+ return found;
+
+ return 0;
+ }
+
++phys_addr_t __init memblock_alloc_range(phys_addr_t size, phys_addr_t align,
++ phys_addr_t start, phys_addr_t end)
++{
++ return memblock_alloc_range_nid(size, align, start, end, NUMA_NO_NODE);
++}
++
++static phys_addr_t __init memblock_alloc_base_nid(phys_addr_t size,
++ phys_addr_t align, phys_addr_t max_addr,
++ int nid)
++{
++ return memblock_alloc_range_nid(size, align, 0, max_addr, nid);
++}
++
+ phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid)
+ {
+ return memblock_alloc_base_nid(size, align, MEMBLOCK_ALLOC_ACCESSIBLE, nid);
+diff -Nur linux-3.14.36/net/atm/svc.c linux-openelec/net/atm/svc.c
+--- linux-3.14.36/net/atm/svc.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/net/atm/svc.c 2015-05-06 12:05:43.000000000 -0500
+@@ -263,17 +263,11 @@
+ goto out;
+ }
+ }
+-/*
+- * Not supported yet
+- *
+- * #ifndef CONFIG_SINGLE_SIGITF
+- */
++
+ vcc->qos.txtp.max_pcr = SELECT_TOP_PCR(vcc->qos.txtp);
+ vcc->qos.txtp.pcr = 0;
+ vcc->qos.txtp.min_pcr = 0;
+-/*
+- * #endif
+- */
++
+ error = vcc_connect(sock, vcc->itf, vcc->vpi, vcc->vci);
+ if (!error)
+ sock->state = SS_CONNECTED;
+diff -Nur linux-3.14.36/net/core/dev.c linux-openelec/net/core/dev.c
+--- linux-3.14.36/net/core/dev.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/net/core/dev.c 2015-07-24 18:03:29.808842002 -0500
+@@ -3457,7 +3457,7 @@
+ * @rx_handler: receive handler to register
+ * @rx_handler_data: data pointer that is used by rx handler
+ *
+- * Register a receive hander for a device. This handler will then be
++ * Register a receive handler for a device. This handler will then be
+ * called from __netif_receive_skb. A negative errno code is returned
+ * on a failure.
+ *
+diff -Nur linux-3.14.36/net/core/Makefile linux-openelec/net/core/Makefile
+--- linux-3.14.36/net/core/Makefile 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/net/core/Makefile 2015-05-06 12:05:43.000000000 -0500
+@@ -9,7 +9,7 @@
+
+ obj-y += dev.o ethtool.o dev_addr_lists.o dst.o netevent.o \
+ neighbour.o rtnetlink.o utils.o link_watch.o filter.o \
+- sock_diag.o dev_ioctl.o
++ sock_diag.o dev_ioctl.o tso.o
+
+ obj-$(CONFIG_XFRM) += flow.o
+ obj-y += net-sysfs.o
+diff -Nur linux-3.14.36/net/core/rtnetlink.c linux-openelec/net/core/rtnetlink.c
+--- linux-3.14.36/net/core/rtnetlink.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/net/core/rtnetlink.c 2015-07-24 18:03:29.808842002 -0500
+@@ -1157,73 +1157,7 @@
+ return -EMSGSIZE;
+ }
+
+-static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
+-{
+- struct net *net = sock_net(skb->sk);
+- int h, s_h;
+- int idx = 0, s_idx;
+- struct net_device *dev;
+- struct hlist_head *head;
+- struct nlattr *tb[IFLA_MAX+1];
+- u32 ext_filter_mask = 0;
+- int err;
+- int hdrlen;
+-
+- s_h = cb->args[0];
+- s_idx = cb->args[1];
+-
+- rcu_read_lock();
+- cb->seq = net->dev_base_seq;
+-
+- /* A hack to preserve kernel<->userspace interface.
+- * The correct header is ifinfomsg. It is consistent with rtnl_getlink.
+- * However, before Linux v3.9 the code here assumed rtgenmsg and that's
+- * what iproute2 < v3.9.0 used.
+- * We can detect the old iproute2. Even including the IFLA_EXT_MASK
+- * attribute, its netlink message is shorter than struct ifinfomsg.
+- */
+- hdrlen = nlmsg_len(cb->nlh) < sizeof(struct ifinfomsg) ?
+- sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg);
+-
+- if (nlmsg_parse(cb->nlh, hdrlen, tb, IFLA_MAX, ifla_policy) >= 0) {
+-
+- if (tb[IFLA_EXT_MASK])
+- ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
+- }
+-
+- for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
+- idx = 0;
+- head = &net->dev_index_head[h];
+- hlist_for_each_entry_rcu(dev, head, index_hlist) {
+- if (idx < s_idx)
+- goto cont;
+- err = rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK,
+- NETLINK_CB(cb->skb).portid,
+- cb->nlh->nlmsg_seq, 0,
+- NLM_F_MULTI,
+- ext_filter_mask);
+- /* If we ran out of room on the first message,
+- * we're in trouble
+- */
+- WARN_ON((err == -EMSGSIZE) && (skb->len == 0));
+-
+- if (err <= 0)
+- goto out;
+-
+- nl_dump_check_consistent(cb, nlmsg_hdr(skb));
+-cont:
+- idx++;
+- }
+- }
+-out:
+- rcu_read_unlock();
+- cb->args[1] = idx;
+- cb->args[0] = h;
+-
+- return skb->len;
+-}
+-
+-const struct nla_policy ifla_policy[IFLA_MAX+1] = {
++static const struct nla_policy ifla_policy[IFLA_MAX+1] = {
+ [IFLA_IFNAME] = { .type = NLA_STRING, .len = IFNAMSIZ-1 },
+ [IFLA_ADDRESS] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
+ [IFLA_BROADCAST] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
+@@ -1250,7 +1184,6 @@
+ [IFLA_NUM_RX_QUEUES] = { .type = NLA_U32 },
+ [IFLA_PHYS_PORT_ID] = { .type = NLA_BINARY, .len = MAX_PHYS_PORT_ID_LEN },
+ };
+-EXPORT_SYMBOL(ifla_policy);
+
+ static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = {
+ [IFLA_INFO_KIND] = { .type = NLA_STRING },
+@@ -1284,6 +1217,61 @@
+ [IFLA_PORT_RESPONSE] = { .type = NLA_U16, },
+ };
+
++static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
++{
++ struct net *net = sock_net(skb->sk);
++ int h, s_h;
++ int idx = 0, s_idx;
++ struct net_device *dev;
++ struct hlist_head *head;
++ struct nlattr *tb[IFLA_MAX+1];
++ u32 ext_filter_mask = 0;
++
++ s_h = cb->args[0];
++ s_idx = cb->args[1];
++
++ rcu_read_lock();
++ cb->seq = net->dev_base_seq;
++
++ if (nlmsg_parse(cb->nlh, sizeof(struct ifinfomsg), tb, IFLA_MAX,
++ ifla_policy) >= 0) {
++
++ if (tb[IFLA_EXT_MASK])
++ ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
++ }
++
++ for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
++ idx = 0;
++ head = &net->dev_index_head[h];
++ hlist_for_each_entry_rcu(dev, head, index_hlist) {
++ if (idx < s_idx)
++ goto cont;
++ if (rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK,
++ NETLINK_CB(cb->skb).portid,
++ cb->nlh->nlmsg_seq, 0,
++ NLM_F_MULTI,
++ ext_filter_mask) <= 0)
++ goto out;
++
++ nl_dump_check_consistent(cb, nlmsg_hdr(skb));
++cont:
++ idx++;
++ }
++ }
++out:
++ rcu_read_unlock();
++ cb->args[1] = idx;
++ cb->args[0] = h;
++
++ return skb->len;
++}
++
++int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len)
++{
++ return nla_parse(tb, IFLA_MAX, head, len, ifla_policy);
++}
++EXPORT_SYMBOL(rtnl_nla_parse_ifla);
++
+ struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[])
+ {
+ struct net *net;
+diff -Nur linux-3.14.36/net/core/rtnetlink.c.orig linux-openelec/net/core/rtnetlink.c.orig
+--- linux-3.14.36/net/core/rtnetlink.c.orig 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/net/core/rtnetlink.c.orig 2015-07-24 18:03:29.668842002 -0500
+@@ -0,0 +1,2943 @@
++/*
++ * INET An implementation of the TCP/IP protocol suite for the LINUX
++ * operating system. INET is implemented using the BSD Socket
++ * interface as the means of communication with the user level.
++ *
++ * Routing netlink socket interface: protocol independent part.
++ *
++ * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version
++ * 2 of the License, or (at your option) any later version.
++ *
++ * Fixes:
++ * Vitaly E. Lavrov RTA_OK arithmetics was wrong.
++ */
++
++#include <linux/errno.h>
++#include <linux/module.h>
++#include <linux/types.h>
++#include <linux/socket.h>
++#include <linux/kernel.h>
++#include <linux/timer.h>
++#include <linux/string.h>
++#include <linux/sockios.h>
++#include <linux/net.h>
++#include <linux/fcntl.h>
++#include <linux/mm.h>
++#include <linux/slab.h>
++#include <linux/interrupt.h>
++#include <linux/capability.h>
++#include <linux/skbuff.h>
++#include <linux/init.h>
++#include <linux/security.h>
++#include <linux/mutex.h>
++#include <linux/if_addr.h>
++#include <linux/if_bridge.h>
++#include <linux/pci.h>
++#include <linux/etherdevice.h>
++
++#include <asm/uaccess.h>
++
++#include <linux/inet.h>
++#include <linux/netdevice.h>
++#include <net/ip.h>
++#include <net/protocol.h>
++#include <net/arp.h>
++#include <net/route.h>
++#include <net/udp.h>
++#include <net/sock.h>
++#include <net/pkt_sched.h>
++#include <net/fib_rules.h>
++#include <net/rtnetlink.h>
++#include <net/net_namespace.h>
++
++struct rtnl_link {
++ rtnl_doit_func doit;
++ rtnl_dumpit_func dumpit;
++ rtnl_calcit_func calcit;
++};
++
++static DEFINE_MUTEX(rtnl_mutex);
++
++void rtnl_lock(void)
++{
++ mutex_lock(&rtnl_mutex);
++}
++EXPORT_SYMBOL(rtnl_lock);
++
++void __rtnl_unlock(void)
++{
++ mutex_unlock(&rtnl_mutex);
++}
++
++void rtnl_unlock(void)
++{
++ /* This fellow will unlock it for us. */
++ netdev_run_todo();
++}
++EXPORT_SYMBOL(rtnl_unlock);
++
++int rtnl_trylock(void)
++{
++ return mutex_trylock(&rtnl_mutex);
++}
++EXPORT_SYMBOL(rtnl_trylock);
++
++int rtnl_is_locked(void)
++{
++ return mutex_is_locked(&rtnl_mutex);
++}
++EXPORT_SYMBOL(rtnl_is_locked);
++
++#ifdef CONFIG_PROVE_LOCKING
++int lockdep_rtnl_is_held(void)
++{
++ return lockdep_is_held(&rtnl_mutex);
++}
++EXPORT_SYMBOL(lockdep_rtnl_is_held);
++#endif /* #ifdef CONFIG_PROVE_LOCKING */
++
++static struct rtnl_link *rtnl_msg_handlers[RTNL_FAMILY_MAX + 1];
++
++static inline int rtm_msgindex(int msgtype)
++{
++ int msgindex = msgtype - RTM_BASE;
++
++ /*
++ * msgindex < 0 implies someone tried to register a netlink
++ * control code. msgindex >= RTM_NR_MSGTYPES may indicate that
++ * the message type has not been added to linux/rtnetlink.h
++ */
++ BUG_ON(msgindex < 0 || msgindex >= RTM_NR_MSGTYPES);
++
++ return msgindex;
++}
++
++static rtnl_doit_func rtnl_get_doit(int protocol, int msgindex)
++{
++ struct rtnl_link *tab;
++
++ if (protocol <= RTNL_FAMILY_MAX)
++ tab = rtnl_msg_handlers[protocol];
++ else
++ tab = NULL;
++
++ if (tab == NULL || tab[msgindex].doit == NULL)
++ tab = rtnl_msg_handlers[PF_UNSPEC];
++
++ return tab[msgindex].doit;
++}
++
++static rtnl_dumpit_func rtnl_get_dumpit(int protocol, int msgindex)
++{
++ struct rtnl_link *tab;
++
++ if (protocol <= RTNL_FAMILY_MAX)
++ tab = rtnl_msg_handlers[protocol];
++ else
++ tab = NULL;
++
++ if (tab == NULL || tab[msgindex].dumpit == NULL)
++ tab = rtnl_msg_handlers[PF_UNSPEC];
++
++ return tab[msgindex].dumpit;
++}
++
++static rtnl_calcit_func rtnl_get_calcit(int protocol, int msgindex)
++{
++ struct rtnl_link *tab;
++
++ if (protocol <= RTNL_FAMILY_MAX)
++ tab = rtnl_msg_handlers[protocol];
++ else
++ tab = NULL;
++
++ if (tab == NULL || tab[msgindex].calcit == NULL)
++ tab = rtnl_msg_handlers[PF_UNSPEC];
++
++ return tab[msgindex].calcit;
++}
++
++/**
++ * __rtnl_register - Register a rtnetlink message type
++ * @protocol: Protocol family or PF_UNSPEC
++ * @msgtype: rtnetlink message type
++ * @doit: Function pointer called for each request message
++ * @dumpit: Function pointer called for each dump request (NLM_F_DUMP) message
++ * @calcit: Function pointer to calc size of dump message
++ *
++ * Registers the specified function pointers (at least one of them has
++ * to be non-NULL) to be called whenever a request message for the
++ * specified protocol family and message type is received.
++ *
++ * The special protocol family PF_UNSPEC may be used to define fallback
++ * function pointers for the case when no entry for the specific protocol
++ * family exists.
++ *
++ * Returns 0 on success or a negative error code.
++ */
++int __rtnl_register(int protocol, int msgtype,
++ rtnl_doit_func doit, rtnl_dumpit_func dumpit,
++ rtnl_calcit_func calcit)
++{
++ struct rtnl_link *tab;
++ int msgindex;
++
++ BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
++ msgindex = rtm_msgindex(msgtype);
++
++ tab = rtnl_msg_handlers[protocol];
++ if (tab == NULL) {
++ tab = kcalloc(RTM_NR_MSGTYPES, sizeof(*tab), GFP_KERNEL);
++ if (tab == NULL)
++ return -ENOBUFS;
++
++ rtnl_msg_handlers[protocol] = tab;
++ }
++
++ if (doit)
++ tab[msgindex].doit = doit;
++
++ if (dumpit)
++ tab[msgindex].dumpit = dumpit;
++
++ if (calcit)
++ tab[msgindex].calcit = calcit;
++
++ return 0;
++}
++EXPORT_SYMBOL_GPL(__rtnl_register);
++
++/**
++ * rtnl_register - Register a rtnetlink message type
++ *
++ * Identical to __rtnl_register() but panics on failure. This is useful
++ * as failure of this function is very unlikely, it can only happen due
++ * to lack of memory when allocating the chain to store all message
++ * handlers for a protocol. Meant for use in init functions where lack
++ * of memory implies no sense in continuing.
++ */
++void rtnl_register(int protocol, int msgtype,
++ rtnl_doit_func doit, rtnl_dumpit_func dumpit,
++ rtnl_calcit_func calcit)
++{
++ if (__rtnl_register(protocol, msgtype, doit, dumpit, calcit) < 0)
++ panic("Unable to register rtnetlink message handler, "
++ "protocol = %d, message type = %d\n",
++ protocol, msgtype);
++}
++EXPORT_SYMBOL_GPL(rtnl_register);
++
++/**
++ * rtnl_unregister - Unregister a rtnetlink message type
++ * @protocol: Protocol family or PF_UNSPEC
++ * @msgtype: rtnetlink message type
++ *
++ * Returns 0 on success or a negative error code.
++ */
++int rtnl_unregister(int protocol, int msgtype)
++{
++ int msgindex;
++
++ BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
++ msgindex = rtm_msgindex(msgtype);
++
++ if (rtnl_msg_handlers[protocol] == NULL)
++ return -ENOENT;
++
++ rtnl_msg_handlers[protocol][msgindex].doit = NULL;
++ rtnl_msg_handlers[protocol][msgindex].dumpit = NULL;
++
++ return 0;
++}
++EXPORT_SYMBOL_GPL(rtnl_unregister);
++
++/**
++ * rtnl_unregister_all - Unregister all rtnetlink message type of a protocol
++ * @protocol : Protocol family or PF_UNSPEC
++ *
++ * Identical to calling rtnl_unregster() for all registered message types
++ * of a certain protocol family.
++ */
++void rtnl_unregister_all(int protocol)
++{
++ BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
++
++ kfree(rtnl_msg_handlers[protocol]);
++ rtnl_msg_handlers[protocol] = NULL;
++}
++EXPORT_SYMBOL_GPL(rtnl_unregister_all);
++
++static LIST_HEAD(link_ops);
++
++static const struct rtnl_link_ops *rtnl_link_ops_get(const char *kind)
++{
++ const struct rtnl_link_ops *ops;
++
++ list_for_each_entry(ops, &link_ops, list) {
++ if (!strcmp(ops->kind, kind))
++ return ops;
++ }
++ return NULL;
++}
++
++/**
++ * __rtnl_link_register - Register rtnl_link_ops with rtnetlink.
++ * @ops: struct rtnl_link_ops * to register
++ *
++ * The caller must hold the rtnl_mutex. This function should be used
++ * by drivers that create devices during module initialization. It
++ * must be called before registering the devices.
++ *
++ * Returns 0 on success or a negative error code.
++ */
++int __rtnl_link_register(struct rtnl_link_ops *ops)
++{
++ if (rtnl_link_ops_get(ops->kind))
++ return -EEXIST;
++
++ if (!ops->dellink)
++ ops->dellink = unregister_netdevice_queue;
++
++ list_add_tail(&ops->list, &link_ops);
++ return 0;
++}
++EXPORT_SYMBOL_GPL(__rtnl_link_register);
++
++/**
++ * rtnl_link_register - Register rtnl_link_ops with rtnetlink.
++ * @ops: struct rtnl_link_ops * to register
++ *
++ * Returns 0 on success or a negative error code.
++ */
++int rtnl_link_register(struct rtnl_link_ops *ops)
++{
++ int err;
++
++ rtnl_lock();
++ err = __rtnl_link_register(ops);
++ rtnl_unlock();
++ return err;
++}
++EXPORT_SYMBOL_GPL(rtnl_link_register);
++
++static void __rtnl_kill_links(struct net *net, struct rtnl_link_ops *ops)
++{
++ struct net_device *dev;
++ LIST_HEAD(list_kill);
++
++ for_each_netdev(net, dev) {
++ if (dev->rtnl_link_ops == ops)
++ ops->dellink(dev, &list_kill);
++ }
++ unregister_netdevice_many(&list_kill);
++}
++
++/**
++ * __rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink.
++ * @ops: struct rtnl_link_ops * to unregister
++ *
++ * The caller must hold the rtnl_mutex.
++ */
++void __rtnl_link_unregister(struct rtnl_link_ops *ops)
++{
++ struct net *net;
++
++ for_each_net(net) {
++ __rtnl_kill_links(net, ops);
++ }
++ list_del(&ops->list);
++}
++EXPORT_SYMBOL_GPL(__rtnl_link_unregister);
++
++/* Return with the rtnl_lock held when there are no network
++ * devices unregistering in any network namespace.
++ */
++static void rtnl_lock_unregistering_all(void)
++{
++ struct net *net;
++ bool unregistering;
++ DEFINE_WAIT(wait);
++
++ for (;;) {
++ prepare_to_wait(&netdev_unregistering_wq, &wait,
++ TASK_UNINTERRUPTIBLE);
++ unregistering = false;
++ rtnl_lock();
++ for_each_net(net) {
++ if (net->dev_unreg_count > 0) {
++ unregistering = true;
++ break;
++ }
++ }
++ if (!unregistering)
++ break;
++ __rtnl_unlock();
++ schedule();
++ }
++ finish_wait(&netdev_unregistering_wq, &wait);
++}
++
++/**
++ * rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink.
++ * @ops: struct rtnl_link_ops * to unregister
++ */
++void rtnl_link_unregister(struct rtnl_link_ops *ops)
++{
++ /* Close the race with cleanup_net() */
++ mutex_lock(&net_mutex);
++ rtnl_lock_unregistering_all();
++ __rtnl_link_unregister(ops);
++ rtnl_unlock();
++ mutex_unlock(&net_mutex);
++}
++EXPORT_SYMBOL_GPL(rtnl_link_unregister);
++
++static size_t rtnl_link_get_slave_info_data_size(const struct net_device *dev)
++{
++ struct net_device *master_dev;
++ const struct rtnl_link_ops *ops;
++
++ master_dev = netdev_master_upper_dev_get((struct net_device *) dev);
++ if (!master_dev)
++ return 0;
++ ops = master_dev->rtnl_link_ops;
++ if (!ops || !ops->get_slave_size)
++ return 0;
++ /* IFLA_INFO_SLAVE_DATA + nested data */
++ return nla_total_size(sizeof(struct nlattr)) +
++ ops->get_slave_size(master_dev, dev);
++}
++
++static size_t rtnl_link_get_size(const struct net_device *dev)
++{
++ const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
++ size_t size;
++
++ if (!ops)
++ return 0;
++
++ size = nla_total_size(sizeof(struct nlattr)) + /* IFLA_LINKINFO */
++ nla_total_size(strlen(ops->kind) + 1); /* IFLA_INFO_KIND */
++
++ if (ops->get_size)
++ /* IFLA_INFO_DATA + nested data */
++ size += nla_total_size(sizeof(struct nlattr)) +
++ ops->get_size(dev);
++
++ if (ops->get_xstats_size)
++ /* IFLA_INFO_XSTATS */
++ size += nla_total_size(ops->get_xstats_size(dev));
++
++ size += rtnl_link_get_slave_info_data_size(dev);
++
++ return size;
++}
++
++static LIST_HEAD(rtnl_af_ops);
++
++static const struct rtnl_af_ops *rtnl_af_lookup(const int family)
++{
++ const struct rtnl_af_ops *ops;
++
++ list_for_each_entry(ops, &rtnl_af_ops, list) {
++ if (ops->family == family)
++ return ops;
++ }
++
++ return NULL;
++}
++
++/**
++ * rtnl_af_register - Register rtnl_af_ops with rtnetlink.
++ * @ops: struct rtnl_af_ops * to register
++ *
++ * Returns 0 on success or a negative error code.
++ */
++void rtnl_af_register(struct rtnl_af_ops *ops)
++{
++ rtnl_lock();
++ list_add_tail(&ops->list, &rtnl_af_ops);
++ rtnl_unlock();
++}
++EXPORT_SYMBOL_GPL(rtnl_af_register);
++
++/**
++ * __rtnl_af_unregister - Unregister rtnl_af_ops from rtnetlink.
++ * @ops: struct rtnl_af_ops * to unregister
++ *
++ * The caller must hold the rtnl_mutex.
++ */
++void __rtnl_af_unregister(struct rtnl_af_ops *ops)
++{
++ list_del(&ops->list);
++}
++EXPORT_SYMBOL_GPL(__rtnl_af_unregister);
++
++/**
++ * rtnl_af_unregister - Unregister rtnl_af_ops from rtnetlink.
++ * @ops: struct rtnl_af_ops * to unregister
++ */
++void rtnl_af_unregister(struct rtnl_af_ops *ops)
++{
++ rtnl_lock();
++ __rtnl_af_unregister(ops);
++ rtnl_unlock();
++}
++EXPORT_SYMBOL_GPL(rtnl_af_unregister);
++
++static size_t rtnl_link_get_af_size(const struct net_device *dev)
++{
++ struct rtnl_af_ops *af_ops;
++ size_t size;
++
++ /* IFLA_AF_SPEC */
++ size = nla_total_size(sizeof(struct nlattr));
++
++ list_for_each_entry(af_ops, &rtnl_af_ops, list) {
++ if (af_ops->get_link_af_size) {
++ /* AF_* + nested data */
++ size += nla_total_size(sizeof(struct nlattr)) +
++ af_ops->get_link_af_size(dev);
++ }
++ }
++
++ return size;
++}
++
++static bool rtnl_have_link_slave_info(const struct net_device *dev)
++{
++ struct net_device *master_dev;
++
++ master_dev = netdev_master_upper_dev_get((struct net_device *) dev);
++ if (master_dev && master_dev->rtnl_link_ops)
++ return true;
++ return false;
++}
++
++static int rtnl_link_slave_info_fill(struct sk_buff *skb,
++ const struct net_device *dev)
++{
++ struct net_device *master_dev;
++ const struct rtnl_link_ops *ops;
++ struct nlattr *slave_data;
++ int err;
++
++ master_dev = netdev_master_upper_dev_get((struct net_device *) dev);
++ if (!master_dev)
++ return 0;
++ ops = master_dev->rtnl_link_ops;
++ if (!ops)
++ return 0;
++ if (nla_put_string(skb, IFLA_INFO_SLAVE_KIND, ops->kind) < 0)
++ return -EMSGSIZE;
++ if (ops->fill_slave_info) {
++ slave_data = nla_nest_start(skb, IFLA_INFO_SLAVE_DATA);
++ if (!slave_data)
++ return -EMSGSIZE;
++ err = ops->fill_slave_info(skb, master_dev, dev);
++ if (err < 0)
++ goto err_cancel_slave_data;
++ nla_nest_end(skb, slave_data);
++ }
++ return 0;
++
++err_cancel_slave_data:
++ nla_nest_cancel(skb, slave_data);
++ return err;
++}
++
++static int rtnl_link_info_fill(struct sk_buff *skb,
++ const struct net_device *dev)
++{
++ const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
++ struct nlattr *data;
++ int err;
++
++ if (!ops)
++ return 0;
++ if (nla_put_string(skb, IFLA_INFO_KIND, ops->kind) < 0)
++ return -EMSGSIZE;
++ if (ops->fill_xstats) {
++ err = ops->fill_xstats(skb, dev);
++ if (err < 0)
++ return err;
++ }
++ if (ops->fill_info) {
++ data = nla_nest_start(skb, IFLA_INFO_DATA);
++ if (data == NULL)
++ return -EMSGSIZE;
++ err = ops->fill_info(skb, dev);
++ if (err < 0)
++ goto err_cancel_data;
++ nla_nest_end(skb, data);
++ }
++ return 0;
++
++err_cancel_data:
++ nla_nest_cancel(skb, data);
++ return err;
++}
++
++static int rtnl_link_fill(struct sk_buff *skb, const struct net_device *dev)
++{
++ struct nlattr *linkinfo;
++ int err = -EMSGSIZE;
++
++ linkinfo = nla_nest_start(skb, IFLA_LINKINFO);
++ if (linkinfo == NULL)
++ goto out;
++
++ err = rtnl_link_info_fill(skb, dev);
++ if (err < 0)
++ goto err_cancel_link;
++
++ err = rtnl_link_slave_info_fill(skb, dev);
++ if (err < 0)
++ goto err_cancel_link;
++
++ nla_nest_end(skb, linkinfo);
++ return 0;
++
++err_cancel_link:
++ nla_nest_cancel(skb, linkinfo);
++out:
++ return err;
++}
++
++int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, unsigned int group, int echo)
++{
++ struct sock *rtnl = net->rtnl;
++ int err = 0;
++
++ NETLINK_CB(skb).dst_group = group;
++ if (echo)
++ atomic_inc(&skb->users);
++ netlink_broadcast(rtnl, skb, pid, group, GFP_KERNEL);
++ if (echo)
++ err = netlink_unicast(rtnl, skb, pid, MSG_DONTWAIT);
++ return err;
++}
++
++int rtnl_unicast(struct sk_buff *skb, struct net *net, u32 pid)
++{
++ struct sock *rtnl = net->rtnl;
++
++ return nlmsg_unicast(rtnl, skb, pid);
++}
++EXPORT_SYMBOL(rtnl_unicast);
++
++void rtnl_notify(struct sk_buff *skb, struct net *net, u32 pid, u32 group,
++ struct nlmsghdr *nlh, gfp_t flags)
++{
++ struct sock *rtnl = net->rtnl;
++ int report = 0;
++
++ if (nlh)
++ report = nlmsg_report(nlh);
++
++ nlmsg_notify(rtnl, skb, pid, group, report, flags);
++}
++EXPORT_SYMBOL(rtnl_notify);
++
++void rtnl_set_sk_err(struct net *net, u32 group, int error)
++{
++ struct sock *rtnl = net->rtnl;
++
++ netlink_set_err(rtnl, 0, group, error);
++}
++EXPORT_SYMBOL(rtnl_set_sk_err);
++
++int rtnetlink_put_metrics(struct sk_buff *skb, u32 *metrics)
++{
++ struct nlattr *mx;
++ int i, valid = 0;
++
++ mx = nla_nest_start(skb, RTA_METRICS);
++ if (mx == NULL)
++ return -ENOBUFS;
++
++ for (i = 0; i < RTAX_MAX; i++) {
++ if (metrics[i]) {
++ valid++;
++ if (nla_put_u32(skb, i+1, metrics[i]))
++ goto nla_put_failure;
++ }
++ }
++
++ if (!valid) {
++ nla_nest_cancel(skb, mx);
++ return 0;
++ }
++
++ return nla_nest_end(skb, mx);
++
++nla_put_failure:
++ nla_nest_cancel(skb, mx);
++ return -EMSGSIZE;
++}
++EXPORT_SYMBOL(rtnetlink_put_metrics);
++
++int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, u32 id,
++ long expires, u32 error)
++{
++ struct rta_cacheinfo ci = {
++ .rta_lastuse = jiffies_delta_to_clock_t(jiffies - dst->lastuse),
++ .rta_used = dst->__use,
++ .rta_clntref = atomic_read(&(dst->__refcnt)),
++ .rta_error = error,
++ .rta_id = id,
++ };
++
++ if (expires) {
++ unsigned long clock;
++
++ clock = jiffies_to_clock_t(abs(expires));
++ clock = min_t(unsigned long, clock, INT_MAX);
++ ci.rta_expires = (expires > 0) ? clock : -clock;
++ }
++ return nla_put(skb, RTA_CACHEINFO, sizeof(ci), &ci);
++}
++EXPORT_SYMBOL_GPL(rtnl_put_cacheinfo);
++
++static void set_operstate(struct net_device *dev, unsigned char transition)
++{
++ unsigned char operstate = dev->operstate;
++
++ switch (transition) {
++ case IF_OPER_UP:
++ if ((operstate == IF_OPER_DORMANT ||
++ operstate == IF_OPER_UNKNOWN) &&
++ !netif_dormant(dev))
++ operstate = IF_OPER_UP;
++ break;
++
++ case IF_OPER_DORMANT:
++ if (operstate == IF_OPER_UP ||
++ operstate == IF_OPER_UNKNOWN)
++ operstate = IF_OPER_DORMANT;
++ break;
++ }
++
++ if (dev->operstate != operstate) {
++ write_lock_bh(&dev_base_lock);
++ dev->operstate = operstate;
++ write_unlock_bh(&dev_base_lock);
++ netdev_state_change(dev);
++ }
++}
++
++static unsigned int rtnl_dev_get_flags(const struct net_device *dev)
++{
++ return (dev->flags & ~(IFF_PROMISC | IFF_ALLMULTI)) |
++ (dev->gflags & (IFF_PROMISC | IFF_ALLMULTI));
++}
++
++static unsigned int rtnl_dev_combine_flags(const struct net_device *dev,
++ const struct ifinfomsg *ifm)
++{
++ unsigned int flags = ifm->ifi_flags;
++
++ /* bugwards compatibility: ifi_change == 0 is treated as ~0 */
++ if (ifm->ifi_change)
++ flags = (flags & ifm->ifi_change) |
++ (rtnl_dev_get_flags(dev) & ~ifm->ifi_change);
++
++ return flags;
++}
++
++static void copy_rtnl_link_stats(struct rtnl_link_stats *a,
++ const struct rtnl_link_stats64 *b)
++{
++ a->rx_packets = b->rx_packets;
++ a->tx_packets = b->tx_packets;
++ a->rx_bytes = b->rx_bytes;
++ a->tx_bytes = b->tx_bytes;
++ a->rx_errors = b->rx_errors;
++ a->tx_errors = b->tx_errors;
++ a->rx_dropped = b->rx_dropped;
++ a->tx_dropped = b->tx_dropped;
++
++ a->multicast = b->multicast;
++ a->collisions = b->collisions;
++
++ a->rx_length_errors = b->rx_length_errors;
++ a->rx_over_errors = b->rx_over_errors;
++ a->rx_crc_errors = b->rx_crc_errors;
++ a->rx_frame_errors = b->rx_frame_errors;
++ a->rx_fifo_errors = b->rx_fifo_errors;
++ a->rx_missed_errors = b->rx_missed_errors;
++
++ a->tx_aborted_errors = b->tx_aborted_errors;
++ a->tx_carrier_errors = b->tx_carrier_errors;
++ a->tx_fifo_errors = b->tx_fifo_errors;
++ a->tx_heartbeat_errors = b->tx_heartbeat_errors;
++ a->tx_window_errors = b->tx_window_errors;
++
++ a->rx_compressed = b->rx_compressed;
++ a->tx_compressed = b->tx_compressed;
++}
++
++static void copy_rtnl_link_stats64(void *v, const struct rtnl_link_stats64 *b)
++{
++ memcpy(v, b, sizeof(*b));
++}
++
++/* All VF info */
++static inline int rtnl_vfinfo_size(const struct net_device *dev,
++ u32 ext_filter_mask)
++{
++ if (dev->dev.parent && dev_is_pci(dev->dev.parent) &&
++ (ext_filter_mask & RTEXT_FILTER_VF)) {
++ int num_vfs = dev_num_vf(dev->dev.parent);
++ size_t size = nla_total_size(sizeof(struct nlattr));
++ size += nla_total_size(num_vfs * sizeof(struct nlattr));
++ size += num_vfs *
++ (nla_total_size(sizeof(struct ifla_vf_mac)) +
++ nla_total_size(sizeof(struct ifla_vf_vlan)) +
++ nla_total_size(sizeof(struct ifla_vf_tx_rate)) +
++ nla_total_size(sizeof(struct ifla_vf_spoofchk)) +
++ nla_total_size(sizeof(struct ifla_vf_link_state)));
++ return size;
++ } else
++ return 0;
++}
++
++static size_t rtnl_port_size(const struct net_device *dev,
++ u32 ext_filter_mask)
++{
++ size_t port_size = nla_total_size(4) /* PORT_VF */
++ + nla_total_size(PORT_PROFILE_MAX) /* PORT_PROFILE */
++ + nla_total_size(sizeof(struct ifla_port_vsi))
++ /* PORT_VSI_TYPE */
++ + nla_total_size(PORT_UUID_MAX) /* PORT_INSTANCE_UUID */
++ + nla_total_size(PORT_UUID_MAX) /* PORT_HOST_UUID */
++ + nla_total_size(1) /* PROT_VDP_REQUEST */
++ + nla_total_size(2); /* PORT_VDP_RESPONSE */
++ size_t vf_ports_size = nla_total_size(sizeof(struct nlattr));
++ size_t vf_port_size = nla_total_size(sizeof(struct nlattr))
++ + port_size;
++ size_t port_self_size = nla_total_size(sizeof(struct nlattr))
++ + port_size;
++
++ if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent ||
++ !(ext_filter_mask & RTEXT_FILTER_VF))
++ return 0;
++ if (dev_num_vf(dev->dev.parent))
++ return port_self_size + vf_ports_size +
++ vf_port_size * dev_num_vf(dev->dev.parent);
++ else
++ return port_self_size;
++}
++
++static noinline size_t if_nlmsg_size(const struct net_device *dev,
++ u32 ext_filter_mask)
++{
++ return NLMSG_ALIGN(sizeof(struct ifinfomsg))
++ + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
++ + nla_total_size(IFALIASZ) /* IFLA_IFALIAS */
++ + nla_total_size(IFNAMSIZ) /* IFLA_QDISC */
++ + nla_total_size(sizeof(struct rtnl_link_ifmap))
++ + nla_total_size(sizeof(struct rtnl_link_stats))
++ + nla_total_size(sizeof(struct rtnl_link_stats64))
++ + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
++ + nla_total_size(MAX_ADDR_LEN) /* IFLA_BROADCAST */
++ + nla_total_size(4) /* IFLA_TXQLEN */
++ + nla_total_size(4) /* IFLA_WEIGHT */
++ + nla_total_size(4) /* IFLA_MTU */
++ + nla_total_size(4) /* IFLA_LINK */
++ + nla_total_size(4) /* IFLA_MASTER */
++ + nla_total_size(1) /* IFLA_CARRIER */
++ + nla_total_size(4) /* IFLA_PROMISCUITY */
++ + nla_total_size(4) /* IFLA_NUM_TX_QUEUES */
++ + nla_total_size(4) /* IFLA_NUM_RX_QUEUES */
++ + nla_total_size(1) /* IFLA_OPERSTATE */
++ + nla_total_size(1) /* IFLA_LINKMODE */
++ + nla_total_size(ext_filter_mask
++ & RTEXT_FILTER_VF ? 4 : 0) /* IFLA_NUM_VF */
++ + rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */
++ + rtnl_port_size(dev, ext_filter_mask) /* IFLA_VF_PORTS + IFLA_PORT_SELF */
++ + rtnl_link_get_size(dev) /* IFLA_LINKINFO */
++ + rtnl_link_get_af_size(dev) /* IFLA_AF_SPEC */
++ + nla_total_size(MAX_PHYS_PORT_ID_LEN); /* IFLA_PHYS_PORT_ID */
++}
++
++static int rtnl_vf_ports_fill(struct sk_buff *skb, struct net_device *dev)
++{
++ struct nlattr *vf_ports;
++ struct nlattr *vf_port;
++ int vf;
++ int err;
++
++ vf_ports = nla_nest_start(skb, IFLA_VF_PORTS);
++ if (!vf_ports)
++ return -EMSGSIZE;
++
++ for (vf = 0; vf < dev_num_vf(dev->dev.parent); vf++) {
++ vf_port = nla_nest_start(skb, IFLA_VF_PORT);
++ if (!vf_port)
++ goto nla_put_failure;
++ if (nla_put_u32(skb, IFLA_PORT_VF, vf))
++ goto nla_put_failure;
++ err = dev->netdev_ops->ndo_get_vf_port(dev, vf, skb);
++ if (err == -EMSGSIZE)
++ goto nla_put_failure;
++ if (err) {
++ nla_nest_cancel(skb, vf_port);
++ continue;
++ }
++ nla_nest_end(skb, vf_port);
++ }
++
++ nla_nest_end(skb, vf_ports);
++
++ return 0;
++
++nla_put_failure:
++ nla_nest_cancel(skb, vf_ports);
++ return -EMSGSIZE;
++}
++
++static int rtnl_port_self_fill(struct sk_buff *skb, struct net_device *dev)
++{
++ struct nlattr *port_self;
++ int err;
++
++ port_self = nla_nest_start(skb, IFLA_PORT_SELF);
++ if (!port_self)
++ return -EMSGSIZE;
++
++ err = dev->netdev_ops->ndo_get_vf_port(dev, PORT_SELF_VF, skb);
++ if (err) {
++ nla_nest_cancel(skb, port_self);
++ return (err == -EMSGSIZE) ? err : 0;
++ }
++
++ nla_nest_end(skb, port_self);
++
++ return 0;
++}
++
++static int rtnl_port_fill(struct sk_buff *skb, struct net_device *dev,
++ u32 ext_filter_mask)
++{
++ int err;
++
++ if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent ||
++ !(ext_filter_mask & RTEXT_FILTER_VF))
++ return 0;
++
++ err = rtnl_port_self_fill(skb, dev);
++ if (err)
++ return err;
++
++ if (dev_num_vf(dev->dev.parent)) {
++ err = rtnl_vf_ports_fill(skb, dev);
++ if (err)
++ return err;
++ }
++
++ return 0;
++}
++
++static int rtnl_phys_port_id_fill(struct sk_buff *skb, struct net_device *dev)
++{
++ int err;
++ struct netdev_phys_port_id ppid;
++
++ err = dev_get_phys_port_id(dev, &ppid);
++ if (err) {
++ if (err == -EOPNOTSUPP)
++ return 0;
++ return err;
++ }
++
++ if (nla_put(skb, IFLA_PHYS_PORT_ID, ppid.id_len, ppid.id))
++ return -EMSGSIZE;
++
++ return 0;
++}
++
++static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
++ int type, u32 pid, u32 seq, u32 change,
++ unsigned int flags, u32 ext_filter_mask)
++{
++ struct ifinfomsg *ifm;
++ struct nlmsghdr *nlh;
++ struct rtnl_link_stats64 temp;
++ const struct rtnl_link_stats64 *stats;
++ struct nlattr *attr, *af_spec;
++ struct rtnl_af_ops *af_ops;
++ struct net_device *upper_dev = netdev_master_upper_dev_get(dev);
++
++ ASSERT_RTNL();
++ nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifm), flags);
++ if (nlh == NULL)
++ return -EMSGSIZE;
++
++ ifm = nlmsg_data(nlh);
++ ifm->ifi_family = AF_UNSPEC;
++ ifm->__ifi_pad = 0;
++ ifm->ifi_type = dev->type;
++ ifm->ifi_index = dev->ifindex;
++ ifm->ifi_flags = dev_get_flags(dev);
++ ifm->ifi_change = change;
++
++ if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
++ nla_put_u32(skb, IFLA_TXQLEN, dev->tx_queue_len) ||
++ nla_put_u8(skb, IFLA_OPERSTATE,
++ netif_running(dev) ? dev->operstate : IF_OPER_DOWN) ||
++ nla_put_u8(skb, IFLA_LINKMODE, dev->link_mode) ||
++ nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
++ nla_put_u32(skb, IFLA_GROUP, dev->group) ||
++ nla_put_u32(skb, IFLA_PROMISCUITY, dev->promiscuity) ||
++ nla_put_u32(skb, IFLA_NUM_TX_QUEUES, dev->num_tx_queues) ||
++#ifdef CONFIG_RPS
++ nla_put_u32(skb, IFLA_NUM_RX_QUEUES, dev->num_rx_queues) ||
++#endif
++ (dev->ifindex != dev->iflink &&
++ nla_put_u32(skb, IFLA_LINK, dev->iflink)) ||
++ (upper_dev &&
++ nla_put_u32(skb, IFLA_MASTER, upper_dev->ifindex)) ||
++ nla_put_u8(skb, IFLA_CARRIER, netif_carrier_ok(dev)) ||
++ (dev->qdisc &&
++ nla_put_string(skb, IFLA_QDISC, dev->qdisc->ops->id)) ||
++ (dev->ifalias &&
++ nla_put_string(skb, IFLA_IFALIAS, dev->ifalias)))
++ goto nla_put_failure;
++
++ if (1) {
++ struct rtnl_link_ifmap map = {
++ .mem_start = dev->mem_start,
++ .mem_end = dev->mem_end,
++ .base_addr = dev->base_addr,
++ .irq = dev->irq,
++ .dma = dev->dma,
++ .port = dev->if_port,
++ };
++ if (nla_put(skb, IFLA_MAP, sizeof(map), &map))
++ goto nla_put_failure;
++ }
++
++ if (dev->addr_len) {
++ if (nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr) ||
++ nla_put(skb, IFLA_BROADCAST, dev->addr_len, dev->broadcast))
++ goto nla_put_failure;
++ }
++
++ if (rtnl_phys_port_id_fill(skb, dev))
++ goto nla_put_failure;
++
++ attr = nla_reserve(skb, IFLA_STATS,
++ sizeof(struct rtnl_link_stats));
++ if (attr == NULL)
++ goto nla_put_failure;
++
++ stats = dev_get_stats(dev, &temp);
++ copy_rtnl_link_stats(nla_data(attr), stats);
++
++ attr = nla_reserve(skb, IFLA_STATS64,
++ sizeof(struct rtnl_link_stats64));
++ if (attr == NULL)
++ goto nla_put_failure;
++ copy_rtnl_link_stats64(nla_data(attr), stats);
++
++ if (dev->dev.parent && (ext_filter_mask & RTEXT_FILTER_VF) &&
++ nla_put_u32(skb, IFLA_NUM_VF, dev_num_vf(dev->dev.parent)))
++ goto nla_put_failure;
++
++ if (dev->netdev_ops->ndo_get_vf_config && dev->dev.parent
++ && (ext_filter_mask & RTEXT_FILTER_VF)) {
++ int i;
++
++ struct nlattr *vfinfo, *vf;
++ int num_vfs = dev_num_vf(dev->dev.parent);
++
++ vfinfo = nla_nest_start(skb, IFLA_VFINFO_LIST);
++ if (!vfinfo)
++ goto nla_put_failure;
++ for (i = 0; i < num_vfs; i++) {
++ struct ifla_vf_info ivi;
++ struct ifla_vf_mac vf_mac;
++ struct ifla_vf_vlan vf_vlan;
++ struct ifla_vf_tx_rate vf_tx_rate;
++ struct ifla_vf_spoofchk vf_spoofchk;
++ struct ifla_vf_link_state vf_linkstate;
++
++ /*
++ * Not all SR-IOV capable drivers support the
++ * spoofcheck query. Preset to -1 so the user
++ * space tool can detect that the driver didn't
++ * report anything.
++ */
++ ivi.spoofchk = -1;
++ memset(ivi.mac, 0, sizeof(ivi.mac));
++ /* The default value for VF link state is "auto"
++ * IFLA_VF_LINK_STATE_AUTO which equals zero
++ */
++ ivi.linkstate = 0;
++ if (dev->netdev_ops->ndo_get_vf_config(dev, i, &ivi))
++ break;
++ vf_mac.vf =
++ vf_vlan.vf =
++ vf_tx_rate.vf =
++ vf_spoofchk.vf =
++ vf_linkstate.vf = ivi.vf;
++
++ memcpy(vf_mac.mac, ivi.mac, sizeof(ivi.mac));
++ vf_vlan.vlan = ivi.vlan;
++ vf_vlan.qos = ivi.qos;
++ vf_tx_rate.rate = ivi.tx_rate;
++ vf_spoofchk.setting = ivi.spoofchk;
++ vf_linkstate.link_state = ivi.linkstate;
++ vf = nla_nest_start(skb, IFLA_VF_INFO);
++ if (!vf) {
++ nla_nest_cancel(skb, vfinfo);
++ goto nla_put_failure;
++ }
++ if (nla_put(skb, IFLA_VF_MAC, sizeof(vf_mac), &vf_mac) ||
++ nla_put(skb, IFLA_VF_VLAN, sizeof(vf_vlan), &vf_vlan) ||
++ nla_put(skb, IFLA_VF_TX_RATE, sizeof(vf_tx_rate),
++ &vf_tx_rate) ||
++ nla_put(skb, IFLA_VF_SPOOFCHK, sizeof(vf_spoofchk),
++ &vf_spoofchk) ||
++ nla_put(skb, IFLA_VF_LINK_STATE, sizeof(vf_linkstate),
++ &vf_linkstate))
++ goto nla_put_failure;
++ nla_nest_end(skb, vf);
++ }
++ nla_nest_end(skb, vfinfo);
++ }
++
++ if (rtnl_port_fill(skb, dev, ext_filter_mask))
++ goto nla_put_failure;
++
++ if (dev->rtnl_link_ops || rtnl_have_link_slave_info(dev)) {
++ if (rtnl_link_fill(skb, dev) < 0)
++ goto nla_put_failure;
++ }
++
++ if (!(af_spec = nla_nest_start(skb, IFLA_AF_SPEC)))
++ goto nla_put_failure;
++
++ list_for_each_entry(af_ops, &rtnl_af_ops, list) {
++ if (af_ops->fill_link_af) {
++ struct nlattr *af;
++ int err;
++
++ if (!(af = nla_nest_start(skb, af_ops->family)))
++ goto nla_put_failure;
++
++ err = af_ops->fill_link_af(skb, dev);
++
++ /*
++ * Caller may return ENODATA to indicate that there
++ * was no data to be dumped. This is not an error, it
++ * means we should trim the attribute header and
++ * continue.
++ */
++ if (err == -ENODATA)
++ nla_nest_cancel(skb, af);
++ else if (err < 0)
++ goto nla_put_failure;
++
++ nla_nest_end(skb, af);
++ }
++ }
++
++ nla_nest_end(skb, af_spec);
++
++ return nlmsg_end(skb, nlh);
++
++nla_put_failure:
++ nlmsg_cancel(skb, nlh);
++ return -EMSGSIZE;
++}
++
++static const struct nla_policy ifla_policy[IFLA_MAX+1] = {
++ [IFLA_IFNAME] = { .type = NLA_STRING, .len = IFNAMSIZ-1 },
++ [IFLA_ADDRESS] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
++ [IFLA_BROADCAST] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
++ [IFLA_MAP] = { .len = sizeof(struct rtnl_link_ifmap) },
++ [IFLA_MTU] = { .type = NLA_U32 },
++ [IFLA_LINK] = { .type = NLA_U32 },
++ [IFLA_MASTER] = { .type = NLA_U32 },
++ [IFLA_CARRIER] = { .type = NLA_U8 },
++ [IFLA_TXQLEN] = { .type = NLA_U32 },
++ [IFLA_WEIGHT] = { .type = NLA_U32 },
++ [IFLA_OPERSTATE] = { .type = NLA_U8 },
++ [IFLA_LINKMODE] = { .type = NLA_U8 },
++ [IFLA_LINKINFO] = { .type = NLA_NESTED },
++ [IFLA_NET_NS_PID] = { .type = NLA_U32 },
++ [IFLA_NET_NS_FD] = { .type = NLA_U32 },
++ [IFLA_IFALIAS] = { .type = NLA_STRING, .len = IFALIASZ-1 },
++ [IFLA_VFINFO_LIST] = {. type = NLA_NESTED },
++ [IFLA_VF_PORTS] = { .type = NLA_NESTED },
++ [IFLA_PORT_SELF] = { .type = NLA_NESTED },
++ [IFLA_AF_SPEC] = { .type = NLA_NESTED },
++ [IFLA_EXT_MASK] = { .type = NLA_U32 },
++ [IFLA_PROMISCUITY] = { .type = NLA_U32 },
++ [IFLA_NUM_TX_QUEUES] = { .type = NLA_U32 },
++ [IFLA_NUM_RX_QUEUES] = { .type = NLA_U32 },
++ [IFLA_PHYS_PORT_ID] = { .type = NLA_BINARY, .len = MAX_PHYS_PORT_ID_LEN },
++};
++
++static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = {
++ [IFLA_INFO_KIND] = { .type = NLA_STRING },
++ [IFLA_INFO_DATA] = { .type = NLA_NESTED },
++ [IFLA_INFO_SLAVE_KIND] = { .type = NLA_STRING },
++ [IFLA_INFO_SLAVE_DATA] = { .type = NLA_NESTED },
++};
++
++static const struct nla_policy ifla_vfinfo_policy[IFLA_VF_INFO_MAX+1] = {
++ [IFLA_VF_INFO] = { .type = NLA_NESTED },
++};
++
++static const struct nla_policy ifla_vf_policy[IFLA_VF_MAX+1] = {
++ [IFLA_VF_MAC] = { .type = NLA_BINARY,
++ .len = sizeof(struct ifla_vf_mac) },
++ [IFLA_VF_VLAN] = { .type = NLA_BINARY,
++ .len = sizeof(struct ifla_vf_vlan) },
++ [IFLA_VF_TX_RATE] = { .type = NLA_BINARY,
++ .len = sizeof(struct ifla_vf_tx_rate) },
++ [IFLA_VF_SPOOFCHK] = { .type = NLA_BINARY,
++ .len = sizeof(struct ifla_vf_spoofchk) },
++};
++
++static const struct nla_policy ifla_port_policy[IFLA_PORT_MAX+1] = {
++ [IFLA_PORT_VF] = { .type = NLA_U32 },
++ [IFLA_PORT_PROFILE] = { .type = NLA_STRING,
++ .len = PORT_PROFILE_MAX },
++ [IFLA_PORT_VSI_TYPE] = { .type = NLA_BINARY,
++ .len = sizeof(struct ifla_port_vsi)},
++ [IFLA_PORT_INSTANCE_UUID] = { .type = NLA_BINARY,
++ .len = PORT_UUID_MAX },
++ [IFLA_PORT_HOST_UUID] = { .type = NLA_STRING,
++ .len = PORT_UUID_MAX },
++ [IFLA_PORT_REQUEST] = { .type = NLA_U8, },
++ [IFLA_PORT_RESPONSE] = { .type = NLA_U16, },
++};
++
++static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
++{
++ struct net *net = sock_net(skb->sk);
++ int h, s_h;
++ int idx = 0, s_idx;
++ struct net_device *dev;
++ struct hlist_head *head;
++ struct nlattr *tb[IFLA_MAX+1];
++ u32 ext_filter_mask = 0;
++
++ s_h = cb->args[0];
++ s_idx = cb->args[1];
++
++ rcu_read_lock();
++ cb->seq = net->dev_base_seq;
++
++ if (nlmsg_parse(cb->nlh, sizeof(struct ifinfomsg), tb, IFLA_MAX,
++ ifla_policy) >= 0) {
++
++ if (tb[IFLA_EXT_MASK])
++ ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
++ }
++
++ for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
++ idx = 0;
++ head = &net->dev_index_head[h];
++ hlist_for_each_entry_rcu(dev, head, index_hlist) {
++ if (idx < s_idx)
++ goto cont;
++ if (rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK,
++ NETLINK_CB(cb->skb).portid,
++ cb->nlh->nlmsg_seq, 0,
++ NLM_F_MULTI,
++ ext_filter_mask) <= 0)
++ goto out;
++
++ nl_dump_check_consistent(cb, nlmsg_hdr(skb));
++cont:
++ idx++;
++ }
++ }
++out:
++ rcu_read_unlock();
++ cb->args[1] = idx;
++ cb->args[0] = h;
++
++ return skb->len;
++}
++
++int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len)
++{
++ return nla_parse(tb, IFLA_MAX, head, len, ifla_policy);
++}
++EXPORT_SYMBOL(rtnl_nla_parse_ifla);
++
++struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[])
++{
++ struct net *net;
++ /* Examine the link attributes and figure out which
++ * network namespace we are talking about.
++ */
++ if (tb[IFLA_NET_NS_PID])
++ net = get_net_ns_by_pid(nla_get_u32(tb[IFLA_NET_NS_PID]));
++ else if (tb[IFLA_NET_NS_FD])
++ net = get_net_ns_by_fd(nla_get_u32(tb[IFLA_NET_NS_FD]));
++ else
++ net = get_net(src_net);
++ return net;
++}
++EXPORT_SYMBOL(rtnl_link_get_net);
++
++static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[])
++{
++ if (dev) {
++ if (tb[IFLA_ADDRESS] &&
++ nla_len(tb[IFLA_ADDRESS]) < dev->addr_len)
++ return -EINVAL;
++
++ if (tb[IFLA_BROADCAST] &&
++ nla_len(tb[IFLA_BROADCAST]) < dev->addr_len)
++ return -EINVAL;
++ }
++
++ if (tb[IFLA_AF_SPEC]) {
++ struct nlattr *af;
++ int rem, err;
++
++ nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) {
++ const struct rtnl_af_ops *af_ops;
++
++ if (!(af_ops = rtnl_af_lookup(nla_type(af))))
++ return -EAFNOSUPPORT;
++
++ if (!af_ops->set_link_af)
++ return -EOPNOTSUPP;
++
++ if (af_ops->validate_link_af) {
++ err = af_ops->validate_link_af(dev, af);
++ if (err < 0)
++ return err;
++ }
++ }
++ }
++
++ return 0;
++}
++
++static int do_setvfinfo(struct net_device *dev, struct nlattr *attr)
++{
++ int rem, err = -EINVAL;
++ struct nlattr *vf;
++ const struct net_device_ops *ops = dev->netdev_ops;
++
++ nla_for_each_nested(vf, attr, rem) {
++ switch (nla_type(vf)) {
++ case IFLA_VF_MAC: {
++ struct ifla_vf_mac *ivm;
++ ivm = nla_data(vf);
++ err = -EOPNOTSUPP;
++ if (ops->ndo_set_vf_mac)
++ err = ops->ndo_set_vf_mac(dev, ivm->vf,
++ ivm->mac);
++ break;
++ }
++ case IFLA_VF_VLAN: {
++ struct ifla_vf_vlan *ivv;
++ ivv = nla_data(vf);
++ err = -EOPNOTSUPP;
++ if (ops->ndo_set_vf_vlan)
++ err = ops->ndo_set_vf_vlan(dev, ivv->vf,
++ ivv->vlan,
++ ivv->qos);
++ break;
++ }
++ case IFLA_VF_TX_RATE: {
++ struct ifla_vf_tx_rate *ivt;
++ ivt = nla_data(vf);
++ err = -EOPNOTSUPP;
++ if (ops->ndo_set_vf_tx_rate)
++ err = ops->ndo_set_vf_tx_rate(dev, ivt->vf,
++ ivt->rate);
++ break;
++ }
++ case IFLA_VF_SPOOFCHK: {
++ struct ifla_vf_spoofchk *ivs;
++ ivs = nla_data(vf);
++ err = -EOPNOTSUPP;
++ if (ops->ndo_set_vf_spoofchk)
++ err = ops->ndo_set_vf_spoofchk(dev, ivs->vf,
++ ivs->setting);
++ break;
++ }
++ case IFLA_VF_LINK_STATE: {
++ struct ifla_vf_link_state *ivl;
++ ivl = nla_data(vf);
++ err = -EOPNOTSUPP;
++ if (ops->ndo_set_vf_link_state)
++ err = ops->ndo_set_vf_link_state(dev, ivl->vf,
++ ivl->link_state);
++ break;
++ }
++ default:
++ err = -EINVAL;
++ break;
++ }
++ if (err)
++ break;
++ }
++ return err;
++}
++
++static int do_set_master(struct net_device *dev, int ifindex)
++{
++ struct net_device *upper_dev = netdev_master_upper_dev_get(dev);
++ const struct net_device_ops *ops;
++ int err;
++
++ if (upper_dev) {
++ if (upper_dev->ifindex == ifindex)
++ return 0;
++ ops = upper_dev->netdev_ops;
++ if (ops->ndo_del_slave) {
++ err = ops->ndo_del_slave(upper_dev, dev);
++ if (err)
++ return err;
++ } else {
++ return -EOPNOTSUPP;
++ }
++ }
++
++ if (ifindex) {
++ upper_dev = __dev_get_by_index(dev_net(dev), ifindex);
++ if (!upper_dev)
++ return -EINVAL;
++ ops = upper_dev->netdev_ops;
++ if (ops->ndo_add_slave) {
++ err = ops->ndo_add_slave(upper_dev, dev);
++ if (err)
++ return err;
++ } else {
++ return -EOPNOTSUPP;
++ }
++ }
++ return 0;
++}
++
++static int do_setlink(const struct sk_buff *skb,
++ struct net_device *dev, struct ifinfomsg *ifm,
++ struct nlattr **tb, char *ifname, int modified)
++{
++ const struct net_device_ops *ops = dev->netdev_ops;
++ int err;
++
++ if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD]) {
++ struct net *net = rtnl_link_get_net(dev_net(dev), tb);
++ if (IS_ERR(net)) {
++ err = PTR_ERR(net);
++ goto errout;
++ }
++ if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) {
++ put_net(net);
++ err = -EPERM;
++ goto errout;
++ }
++ err = dev_change_net_namespace(dev, net, ifname);
++ put_net(net);
++ if (err)
++ goto errout;
++ modified = 1;
++ }
++
++ if (tb[IFLA_MAP]) {
++ struct rtnl_link_ifmap *u_map;
++ struct ifmap k_map;
++
++ if (!ops->ndo_set_config) {
++ err = -EOPNOTSUPP;
++ goto errout;
++ }
++
++ if (!netif_device_present(dev)) {
++ err = -ENODEV;
++ goto errout;
++ }
++
++ u_map = nla_data(tb[IFLA_MAP]);
++ k_map.mem_start = (unsigned long) u_map->mem_start;
++ k_map.mem_end = (unsigned long) u_map->mem_end;
++ k_map.base_addr = (unsigned short) u_map->base_addr;
++ k_map.irq = (unsigned char) u_map->irq;
++ k_map.dma = (unsigned char) u_map->dma;
++ k_map.port = (unsigned char) u_map->port;
++
++ err = ops->ndo_set_config(dev, &k_map);
++ if (err < 0)
++ goto errout;
++
++ modified = 1;
++ }
++
++ if (tb[IFLA_ADDRESS]) {
++ struct sockaddr *sa;
++ int len;
++
++ len = sizeof(sa_family_t) + dev->addr_len;
++ sa = kmalloc(len, GFP_KERNEL);
++ if (!sa) {
++ err = -ENOMEM;
++ goto errout;
++ }
++ sa->sa_family = dev->type;
++ memcpy(sa->sa_data, nla_data(tb[IFLA_ADDRESS]),
++ dev->addr_len);
++ err = dev_set_mac_address(dev, sa);
++ kfree(sa);
++ if (err)
++ goto errout;
++ modified = 1;
++ }
++
++ if (tb[IFLA_MTU]) {
++ err = dev_set_mtu(dev, nla_get_u32(tb[IFLA_MTU]));
++ if (err < 0)
++ goto errout;
++ modified = 1;
++ }
++
++ if (tb[IFLA_GROUP]) {
++ dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP]));
++ modified = 1;
++ }
++
++ /*
++ * Interface selected by interface index but interface
++ * name provided implies that a name change has been
++ * requested.
++ */
++ if (ifm->ifi_index > 0 && ifname[0]) {
++ err = dev_change_name(dev, ifname);
++ if (err < 0)
++ goto errout;
++ modified = 1;
++ }
++
++ if (tb[IFLA_IFALIAS]) {
++ err = dev_set_alias(dev, nla_data(tb[IFLA_IFALIAS]),
++ nla_len(tb[IFLA_IFALIAS]));
++ if (err < 0)
++ goto errout;
++ modified = 1;
++ }
++
++ if (tb[IFLA_BROADCAST]) {
++ nla_memcpy(dev->broadcast, tb[IFLA_BROADCAST], dev->addr_len);
++ call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
++ }
++
++ if (ifm->ifi_flags || ifm->ifi_change) {
++ err = dev_change_flags(dev, rtnl_dev_combine_flags(dev, ifm));
++ if (err < 0)
++ goto errout;
++ }
++
++ if (tb[IFLA_MASTER]) {
++ err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER]));
++ if (err)
++ goto errout;
++ modified = 1;
++ }
++
++ if (tb[IFLA_CARRIER]) {
++ err = dev_change_carrier(dev, nla_get_u8(tb[IFLA_CARRIER]));
++ if (err)
++ goto errout;
++ modified = 1;
++ }
++
++ if (tb[IFLA_TXQLEN])
++ dev->tx_queue_len = nla_get_u32(tb[IFLA_TXQLEN]);
++
++ if (tb[IFLA_OPERSTATE])
++ set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE]));
++
++ if (tb[IFLA_LINKMODE]) {
++ write_lock_bh(&dev_base_lock);
++ dev->link_mode = nla_get_u8(tb[IFLA_LINKMODE]);
++ write_unlock_bh(&dev_base_lock);
++ }
++
++ if (tb[IFLA_VFINFO_LIST]) {
++ struct nlattr *attr;
++ int rem;
++ nla_for_each_nested(attr, tb[IFLA_VFINFO_LIST], rem) {
++ if (nla_type(attr) != IFLA_VF_INFO) {
++ err = -EINVAL;
++ goto errout;
++ }
++ err = do_setvfinfo(dev, attr);
++ if (err < 0)
++ goto errout;
++ modified = 1;
++ }
++ }
++ err = 0;
++
++ if (tb[IFLA_VF_PORTS]) {
++ struct nlattr *port[IFLA_PORT_MAX+1];
++ struct nlattr *attr;
++ int vf;
++ int rem;
++
++ err = -EOPNOTSUPP;
++ if (!ops->ndo_set_vf_port)
++ goto errout;
++
++ nla_for_each_nested(attr, tb[IFLA_VF_PORTS], rem) {
++ if (nla_type(attr) != IFLA_VF_PORT)
++ continue;
++ err = nla_parse_nested(port, IFLA_PORT_MAX,
++ attr, ifla_port_policy);
++ if (err < 0)
++ goto errout;
++ if (!port[IFLA_PORT_VF]) {
++ err = -EOPNOTSUPP;
++ goto errout;
++ }
++ vf = nla_get_u32(port[IFLA_PORT_VF]);
++ err = ops->ndo_set_vf_port(dev, vf, port);
++ if (err < 0)
++ goto errout;
++ modified = 1;
++ }
++ }
++ err = 0;
++
++ if (tb[IFLA_PORT_SELF]) {
++ struct nlattr *port[IFLA_PORT_MAX+1];
++
++ err = nla_parse_nested(port, IFLA_PORT_MAX,
++ tb[IFLA_PORT_SELF], ifla_port_policy);
++ if (err < 0)
++ goto errout;
++
++ err = -EOPNOTSUPP;
++ if (ops->ndo_set_vf_port)
++ err = ops->ndo_set_vf_port(dev, PORT_SELF_VF, port);
++ if (err < 0)
++ goto errout;
++ modified = 1;
++ }
++
++ if (tb[IFLA_AF_SPEC]) {
++ struct nlattr *af;
++ int rem;
++
++ nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) {
++ const struct rtnl_af_ops *af_ops;
++
++ if (!(af_ops = rtnl_af_lookup(nla_type(af))))
++ BUG();
++
++ err = af_ops->set_link_af(dev, af);
++ if (err < 0)
++ goto errout;
++
++ modified = 1;
++ }
++ }
++ err = 0;
++
++errout:
++ if (err < 0 && modified)
++ net_warn_ratelimited("A link change request failed with some changes committed already. Interface %s may have been left with an inconsistent configuration, please check.\n",
++ dev->name);
++
++ return err;
++}
++
++static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh)
++{
++ struct net *net = sock_net(skb->sk);
++ struct ifinfomsg *ifm;
++ struct net_device *dev;
++ int err;
++ struct nlattr *tb[IFLA_MAX+1];
++ char ifname[IFNAMSIZ];
++
++ err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy);
++ if (err < 0)
++ goto errout;
++
++ if (tb[IFLA_IFNAME])
++ nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
++ else
++ ifname[0] = '\0';
++
++ err = -EINVAL;
++ ifm = nlmsg_data(nlh);
++ if (ifm->ifi_index > 0)
++ dev = __dev_get_by_index(net, ifm->ifi_index);
++ else if (tb[IFLA_IFNAME])
++ dev = __dev_get_by_name(net, ifname);
++ else
++ goto errout;
++
++ if (dev == NULL) {
++ err = -ENODEV;
++ goto errout;
++ }
++
++ err = validate_linkmsg(dev, tb);
++ if (err < 0)
++ goto errout;
++
++ err = do_setlink(skb, dev, ifm, tb, ifname, 0);
++errout:
++ return err;
++}
++
++static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh)
++{
++ struct net *net = sock_net(skb->sk);
++ const struct rtnl_link_ops *ops;
++ struct net_device *dev;
++ struct ifinfomsg *ifm;
++ char ifname[IFNAMSIZ];
++ struct nlattr *tb[IFLA_MAX+1];
++ int err;
++ LIST_HEAD(list_kill);
++
++ err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy);
++ if (err < 0)
++ return err;
++
++ if (tb[IFLA_IFNAME])
++ nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
++
++ ifm = nlmsg_data(nlh);
++ if (ifm->ifi_index > 0)
++ dev = __dev_get_by_index(net, ifm->ifi_index);
++ else if (tb[IFLA_IFNAME])
++ dev = __dev_get_by_name(net, ifname);
++ else
++ return -EINVAL;
++
++ if (!dev)
++ return -ENODEV;
++
++ ops = dev->rtnl_link_ops;
++ if (!ops)
++ return -EOPNOTSUPP;
++
++ ops->dellink(dev, &list_kill);
++ unregister_netdevice_many(&list_kill);
++ return 0;
++}
++
++int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm)
++{
++ unsigned int old_flags;
++ int err;
++
++ old_flags = dev->flags;
++ if (ifm && (ifm->ifi_flags || ifm->ifi_change)) {
++ err = __dev_change_flags(dev, rtnl_dev_combine_flags(dev, ifm));
++ if (err < 0)
++ return err;
++ }
++
++ dev->rtnl_link_state = RTNL_LINK_INITIALIZED;
++
++ __dev_notify_flags(dev, old_flags, ~0U);
++ return 0;
++}
++EXPORT_SYMBOL(rtnl_configure_link);
++
++struct net_device *rtnl_create_link(struct net *net,
++ char *ifname, const struct rtnl_link_ops *ops, struct nlattr *tb[])
++{
++ int err;
++ struct net_device *dev;
++ unsigned int num_tx_queues = 1;
++ unsigned int num_rx_queues = 1;
++
++ if (tb[IFLA_NUM_TX_QUEUES])
++ num_tx_queues = nla_get_u32(tb[IFLA_NUM_TX_QUEUES]);
++ else if (ops->get_num_tx_queues)
++ num_tx_queues = ops->get_num_tx_queues();
++
++ if (tb[IFLA_NUM_RX_QUEUES])
++ num_rx_queues = nla_get_u32(tb[IFLA_NUM_RX_QUEUES]);
++ else if (ops->get_num_rx_queues)
++ num_rx_queues = ops->get_num_rx_queues();
++
++ err = -ENOMEM;
++ dev = alloc_netdev_mqs(ops->priv_size, ifname, ops->setup,
++ num_tx_queues, num_rx_queues);
++ if (!dev)
++ goto err;
++
++ dev_net_set(dev, net);
++ dev->rtnl_link_ops = ops;
++ dev->rtnl_link_state = RTNL_LINK_INITIALIZING;
++
++ if (tb[IFLA_MTU])
++ dev->mtu = nla_get_u32(tb[IFLA_MTU]);
++ if (tb[IFLA_ADDRESS]) {
++ memcpy(dev->dev_addr, nla_data(tb[IFLA_ADDRESS]),
++ nla_len(tb[IFLA_ADDRESS]));
++ dev->addr_assign_type = NET_ADDR_SET;
++ }
++ if (tb[IFLA_BROADCAST])
++ memcpy(dev->broadcast, nla_data(tb[IFLA_BROADCAST]),
++ nla_len(tb[IFLA_BROADCAST]));
++ if (tb[IFLA_TXQLEN])
++ dev->tx_queue_len = nla_get_u32(tb[IFLA_TXQLEN]);
++ if (tb[IFLA_OPERSTATE])
++ set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE]));
++ if (tb[IFLA_LINKMODE])
++ dev->link_mode = nla_get_u8(tb[IFLA_LINKMODE]);
++ if (tb[IFLA_GROUP])
++ dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP]));
++
++ return dev;
++
++err:
++ return ERR_PTR(err);
++}
++EXPORT_SYMBOL(rtnl_create_link);
++
++static int rtnl_group_changelink(const struct sk_buff *skb,
++ struct net *net, int group,
++ struct ifinfomsg *ifm,
++ struct nlattr **tb)
++{
++ struct net_device *dev;
++ int err;
++
++ for_each_netdev(net, dev) {
++ if (dev->group == group) {
++ err = do_setlink(skb, dev, ifm, tb, NULL, 0);
++ if (err < 0)
++ return err;
++ }
++ }
++
++ return 0;
++}
++
++static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh)
++{
++ struct net *net = sock_net(skb->sk);
++ const struct rtnl_link_ops *ops;
++ const struct rtnl_link_ops *m_ops = NULL;
++ struct net_device *dev;
++ struct net_device *master_dev = NULL;
++ struct ifinfomsg *ifm;
++ char kind[MODULE_NAME_LEN];
++ char ifname[IFNAMSIZ];
++ struct nlattr *tb[IFLA_MAX+1];
++ struct nlattr *linkinfo[IFLA_INFO_MAX+1];
++ int err;
++
++#ifdef CONFIG_MODULES
++replay:
++#endif
++ err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy);
++ if (err < 0)
++ return err;
++
++ if (tb[IFLA_IFNAME])
++ nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
++ else
++ ifname[0] = '\0';
++
++ ifm = nlmsg_data(nlh);
++ if (ifm->ifi_index > 0)
++ dev = __dev_get_by_index(net, ifm->ifi_index);
++ else {
++ if (ifname[0])
++ dev = __dev_get_by_name(net, ifname);
++ else
++ dev = NULL;
++ }
++
++ if (dev) {
++ master_dev = netdev_master_upper_dev_get(dev);
++ if (master_dev)
++ m_ops = master_dev->rtnl_link_ops;
++ }
++
++ err = validate_linkmsg(dev, tb);
++ if (err < 0)
++ return err;
++
++ if (tb[IFLA_LINKINFO]) {
++ err = nla_parse_nested(linkinfo, IFLA_INFO_MAX,
++ tb[IFLA_LINKINFO], ifla_info_policy);
++ if (err < 0)
++ return err;
++ } else
++ memset(linkinfo, 0, sizeof(linkinfo));
++
++ if (linkinfo[IFLA_INFO_KIND]) {
++ nla_strlcpy(kind, linkinfo[IFLA_INFO_KIND], sizeof(kind));
++ ops = rtnl_link_ops_get(kind);
++ } else {
++ kind[0] = '\0';
++ ops = NULL;
++ }
++
++ if (1) {
++ struct nlattr *attr[ops ? ops->maxtype + 1 : 0];
++ struct nlattr *slave_attr[m_ops ? m_ops->slave_maxtype + 1 : 0];
++ struct nlattr **data = NULL;
++ struct nlattr **slave_data = NULL;
++ struct net *dest_net;
++
++ if (ops) {
++ if (ops->maxtype && linkinfo[IFLA_INFO_DATA]) {
++ err = nla_parse_nested(attr, ops->maxtype,
++ linkinfo[IFLA_INFO_DATA],
++ ops->policy);
++ if (err < 0)
++ return err;
++ data = attr;
++ }
++ if (ops->validate) {
++ err = ops->validate(tb, data);
++ if (err < 0)
++ return err;
++ }
++ }
++
++ if (m_ops) {
++ if (m_ops->slave_maxtype &&
++ linkinfo[IFLA_INFO_SLAVE_DATA]) {
++ err = nla_parse_nested(slave_attr,
++ m_ops->slave_maxtype,
++ linkinfo[IFLA_INFO_SLAVE_DATA],
++ m_ops->slave_policy);
++ if (err < 0)
++ return err;
++ slave_data = slave_attr;
++ }
++ if (m_ops->slave_validate) {
++ err = m_ops->slave_validate(tb, slave_data);
++ if (err < 0)
++ return err;
++ }
++ }
++
++ if (dev) {
++ int modified = 0;
++
++ if (nlh->nlmsg_flags & NLM_F_EXCL)
++ return -EEXIST;
++ if (nlh->nlmsg_flags & NLM_F_REPLACE)
++ return -EOPNOTSUPP;
++
++ if (linkinfo[IFLA_INFO_DATA]) {
++ if (!ops || ops != dev->rtnl_link_ops ||
++ !ops->changelink)
++ return -EOPNOTSUPP;
++
++ err = ops->changelink(dev, tb, data);
++ if (err < 0)
++ return err;
++ modified = 1;
++ }
++
++ if (linkinfo[IFLA_INFO_SLAVE_DATA]) {
++ if (!m_ops || !m_ops->slave_changelink)
++ return -EOPNOTSUPP;
++
++ err = m_ops->slave_changelink(master_dev, dev,
++ tb, slave_data);
++ if (err < 0)
++ return err;
++ modified = 1;
++ }
++
++ return do_setlink(skb, dev, ifm, tb, ifname, modified);
++ }
++
++ if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
++ if (ifm->ifi_index == 0 && tb[IFLA_GROUP])
++ return rtnl_group_changelink(skb, net,
++ nla_get_u32(tb[IFLA_GROUP]),
++ ifm, tb);
++ return -ENODEV;
++ }
++
++ if (tb[IFLA_MAP] || tb[IFLA_MASTER] || tb[IFLA_PROTINFO])
++ return -EOPNOTSUPP;
++
++ if (!ops) {
++#ifdef CONFIG_MODULES
++ if (kind[0]) {
++ __rtnl_unlock();
++ request_module("rtnl-link-%s", kind);
++ rtnl_lock();
++ ops = rtnl_link_ops_get(kind);
++ if (ops)
++ goto replay;
++ }
++#endif
++ return -EOPNOTSUPP;
++ }
++
++ if (!ifname[0])
++ snprintf(ifname, IFNAMSIZ, "%s%%d", ops->kind);
++
++ dest_net = rtnl_link_get_net(net, tb);
++ if (IS_ERR(dest_net))
++ return PTR_ERR(dest_net);
++
++ dev = rtnl_create_link(dest_net, ifname, ops, tb);
++ if (IS_ERR(dev)) {
++ err = PTR_ERR(dev);
++ goto out;
++ }
++
++ dev->ifindex = ifm->ifi_index;
++
++ if (ops->newlink) {
++ err = ops->newlink(net, dev, tb, data);
++ /* Drivers should call free_netdev() in ->destructor
++ * and unregister it on failure so that device could be
++ * finally freed in rtnl_unlock.
++ */
++ if (err < 0)
++ goto out;
++ } else {
++ err = register_netdevice(dev);
++ if (err < 0) {
++ free_netdev(dev);
++ goto out;
++ }
++ }
++ err = rtnl_configure_link(dev, ifm);
++ if (err < 0)
++ unregister_netdevice(dev);
++out:
++ put_net(dest_net);
++ return err;
++ }
++}
++
++static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr* nlh)
++{
++ struct net *net = sock_net(skb->sk);
++ struct ifinfomsg *ifm;
++ char ifname[IFNAMSIZ];
++ struct nlattr *tb[IFLA_MAX+1];
++ struct net_device *dev = NULL;
++ struct sk_buff *nskb;
++ int err;
++ u32 ext_filter_mask = 0;
++
++ err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy);
++ if (err < 0)
++ return err;
++
++ if (tb[IFLA_IFNAME])
++ nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
++
++ if (tb[IFLA_EXT_MASK])
++ ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
++
++ ifm = nlmsg_data(nlh);
++ if (ifm->ifi_index > 0)
++ dev = __dev_get_by_index(net, ifm->ifi_index);
++ else if (tb[IFLA_IFNAME])
++ dev = __dev_get_by_name(net, ifname);
++ else
++ return -EINVAL;
++
++ if (dev == NULL)
++ return -ENODEV;
++
++ nskb = nlmsg_new(if_nlmsg_size(dev, ext_filter_mask), GFP_KERNEL);
++ if (nskb == NULL)
++ return -ENOBUFS;
++
++ err = rtnl_fill_ifinfo(nskb, dev, RTM_NEWLINK, NETLINK_CB(skb).portid,
++ nlh->nlmsg_seq, 0, 0, ext_filter_mask);
++ if (err < 0) {
++ /* -EMSGSIZE implies BUG in if_nlmsg_size */
++ WARN_ON(err == -EMSGSIZE);
++ kfree_skb(nskb);
++ } else
++ err = rtnl_unicast(nskb, net, NETLINK_CB(skb).portid);
++
++ return err;
++}
++
++static u16 rtnl_calcit(struct sk_buff *skb, struct nlmsghdr *nlh)
++{
++ struct net *net = sock_net(skb->sk);
++ struct net_device *dev;
++ struct nlattr *tb[IFLA_MAX+1];
++ u32 ext_filter_mask = 0;
++ u16 min_ifinfo_dump_size = 0;
++ int hdrlen;
++
++ /* Same kernel<->userspace interface hack as in rtnl_dump_ifinfo. */
++ hdrlen = nlmsg_len(nlh) < sizeof(struct ifinfomsg) ?
++ sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg);
++
++ if (nlmsg_parse(nlh, hdrlen, tb, IFLA_MAX, ifla_policy) >= 0) {
++ if (tb[IFLA_EXT_MASK])
++ ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
++ }
++
++ if (!ext_filter_mask)
++ return NLMSG_GOODSIZE;
++ /*
++ * traverse the list of net devices and compute the minimum
++ * buffer size based upon the filter mask.
++ */
++ list_for_each_entry(dev, &net->dev_base_head, dev_list) {
++ min_ifinfo_dump_size = max_t(u16, min_ifinfo_dump_size,
++ if_nlmsg_size(dev,
++ ext_filter_mask));
++ }
++
++ return min_ifinfo_dump_size;
++}
++
++static int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb)
++{
++ int idx;
++ int s_idx = cb->family;
++
++ if (s_idx == 0)
++ s_idx = 1;
++ for (idx = 1; idx <= RTNL_FAMILY_MAX; idx++) {
++ int type = cb->nlh->nlmsg_type-RTM_BASE;
++ if (idx < s_idx || idx == PF_PACKET)
++ continue;
++ if (rtnl_msg_handlers[idx] == NULL ||
++ rtnl_msg_handlers[idx][type].dumpit == NULL)
++ continue;
++ if (idx > s_idx) {
++ memset(&cb->args[0], 0, sizeof(cb->args));
++ cb->prev_seq = 0;
++ cb->seq = 0;
++ }
++ if (rtnl_msg_handlers[idx][type].dumpit(skb, cb))
++ break;
++ }
++ cb->family = idx;
++
++ return skb->len;
++}
++
++void rtmsg_ifinfo(int type, struct net_device *dev, unsigned int change,
++ gfp_t flags)
++{
++ struct net *net = dev_net(dev);
++ struct sk_buff *skb;
++ int err = -ENOBUFS;
++ size_t if_info_size;
++
++ skb = nlmsg_new((if_info_size = if_nlmsg_size(dev, 0)), flags);
++ if (skb == NULL)
++ goto errout;
++
++ err = rtnl_fill_ifinfo(skb, dev, type, 0, 0, change, 0, 0);
++ if (err < 0) {
++ /* -EMSGSIZE implies BUG in if_nlmsg_size() */
++ WARN_ON(err == -EMSGSIZE);
++ kfree_skb(skb);
++ goto errout;
++ }
++ rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, flags);
++ return;
++errout:
++ if (err < 0)
++ rtnl_set_sk_err(net, RTNLGRP_LINK, err);
++}
++EXPORT_SYMBOL(rtmsg_ifinfo);
++
++static int nlmsg_populate_fdb_fill(struct sk_buff *skb,
++ struct net_device *dev,
++ u8 *addr, u32 pid, u32 seq,
++ int type, unsigned int flags,
++ int nlflags)
++{
++ struct nlmsghdr *nlh;
++ struct ndmsg *ndm;
++
++ nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), nlflags);
++ if (!nlh)
++ return -EMSGSIZE;
++
++ ndm = nlmsg_data(nlh);
++ ndm->ndm_family = AF_BRIDGE;
++ ndm->ndm_pad1 = 0;
++ ndm->ndm_pad2 = 0;
++ ndm->ndm_flags = flags;
++ ndm->ndm_type = 0;
++ ndm->ndm_ifindex = dev->ifindex;
++ ndm->ndm_state = NUD_PERMANENT;
++
++ if (nla_put(skb, NDA_LLADDR, ETH_ALEN, addr))
++ goto nla_put_failure;
++
++ return nlmsg_end(skb, nlh);
++
++nla_put_failure:
++ nlmsg_cancel(skb, nlh);
++ return -EMSGSIZE;
++}
++
++static inline size_t rtnl_fdb_nlmsg_size(void)
++{
++ return NLMSG_ALIGN(sizeof(struct ndmsg)) + nla_total_size(ETH_ALEN);
++}
++
++static void rtnl_fdb_notify(struct net_device *dev, u8 *addr, int type)
++{
++ struct net *net = dev_net(dev);
++ struct sk_buff *skb;
++ int err = -ENOBUFS;
++
++ skb = nlmsg_new(rtnl_fdb_nlmsg_size(), GFP_ATOMIC);
++ if (!skb)
++ goto errout;
++
++ err = nlmsg_populate_fdb_fill(skb, dev, addr, 0, 0, type, NTF_SELF, 0);
++ if (err < 0) {
++ kfree_skb(skb);
++ goto errout;
++ }
++
++ rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
++ return;
++errout:
++ rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
++}
++
++/**
++ * ndo_dflt_fdb_add - default netdevice operation to add an FDB entry
++ */
++int ndo_dflt_fdb_add(struct ndmsg *ndm,
++ struct nlattr *tb[],
++ struct net_device *dev,
++ const unsigned char *addr,
++ u16 flags)
++{
++ int err = -EINVAL;
++
++ /* If aging addresses are supported device will need to
++ * implement its own handler for this.
++ */
++ if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
++ pr_info("%s: FDB only supports static addresses\n", dev->name);
++ return err;
++ }
++
++ if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
++ err = dev_uc_add_excl(dev, addr);
++ else if (is_multicast_ether_addr(addr))
++ err = dev_mc_add_excl(dev, addr);
++
++ /* Only return duplicate errors if NLM_F_EXCL is set */
++ if (err == -EEXIST && !(flags & NLM_F_EXCL))
++ err = 0;
++
++ return err;
++}
++EXPORT_SYMBOL(ndo_dflt_fdb_add);
++
++static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh)
++{
++ struct net *net = sock_net(skb->sk);
++ struct ndmsg *ndm;
++ struct nlattr *tb[NDA_MAX+1];
++ struct net_device *dev;
++ u8 *addr;
++ int err;
++
++ err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL);
++ if (err < 0)
++ return err;
++
++ ndm = nlmsg_data(nlh);
++ if (ndm->ndm_ifindex == 0) {
++ pr_info("PF_BRIDGE: RTM_NEWNEIGH with invalid ifindex\n");
++ return -EINVAL;
++ }
++
++ dev = __dev_get_by_index(net, ndm->ndm_ifindex);
++ if (dev == NULL) {
++ pr_info("PF_BRIDGE: RTM_NEWNEIGH with unknown ifindex\n");
++ return -ENODEV;
++ }
++
++ if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) {
++ pr_info("PF_BRIDGE: RTM_NEWNEIGH with invalid address\n");
++ return -EINVAL;
++ }
++
++ addr = nla_data(tb[NDA_LLADDR]);
++
++ err = -EOPNOTSUPP;
++
++ /* Support fdb on master device the net/bridge default case */
++ if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) &&
++ (dev->priv_flags & IFF_BRIDGE_PORT)) {
++ struct net_device *br_dev = netdev_master_upper_dev_get(dev);
++ const struct net_device_ops *ops = br_dev->netdev_ops;
++
++ err = ops->ndo_fdb_add(ndm, tb, dev, addr, nlh->nlmsg_flags);
++ if (err)
++ goto out;
++ else
++ ndm->ndm_flags &= ~NTF_MASTER;
++ }
++
++ /* Embedded bridge, macvlan, and any other device support */
++ if ((ndm->ndm_flags & NTF_SELF)) {
++ if (dev->netdev_ops->ndo_fdb_add)
++ err = dev->netdev_ops->ndo_fdb_add(ndm, tb, dev, addr,
++ nlh->nlmsg_flags);
++ else
++ err = ndo_dflt_fdb_add(ndm, tb, dev, addr,
++ nlh->nlmsg_flags);
++
++ if (!err) {
++ rtnl_fdb_notify(dev, addr, RTM_NEWNEIGH);
++ ndm->ndm_flags &= ~NTF_SELF;
++ }
++ }
++out:
++ return err;
++}
++
++/**
++ * ndo_dflt_fdb_del - default netdevice operation to delete an FDB entry
++ */
++int ndo_dflt_fdb_del(struct ndmsg *ndm,
++ struct nlattr *tb[],
++ struct net_device *dev,
++ const unsigned char *addr)
++{
++ int err = -EOPNOTSUPP;
++
++ /* If aging addresses are supported device will need to
++ * implement its own handler for this.
++ */
++ if (!(ndm->ndm_state & NUD_PERMANENT)) {
++ pr_info("%s: FDB only supports static addresses\n", dev->name);
++ return -EINVAL;
++ }
++
++ if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
++ err = dev_uc_del(dev, addr);
++ else if (is_multicast_ether_addr(addr))
++ err = dev_mc_del(dev, addr);
++ else
++ err = -EINVAL;
++
++ return err;
++}
++EXPORT_SYMBOL(ndo_dflt_fdb_del);
++
++static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh)
++{
++ struct net *net = sock_net(skb->sk);
++ struct ndmsg *ndm;
++ struct nlattr *tb[NDA_MAX+1];
++ struct net_device *dev;
++ int err = -EINVAL;
++ __u8 *addr;
++
++ if (!netlink_capable(skb, CAP_NET_ADMIN))
++ return -EPERM;
++
++ err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL);
++ if (err < 0)
++ return err;
++
++ ndm = nlmsg_data(nlh);
++ if (ndm->ndm_ifindex == 0) {
++ pr_info("PF_BRIDGE: RTM_DELNEIGH with invalid ifindex\n");
++ return -EINVAL;
++ }
++
++ dev = __dev_get_by_index(net, ndm->ndm_ifindex);
++ if (dev == NULL) {
++ pr_info("PF_BRIDGE: RTM_DELNEIGH with unknown ifindex\n");
++ return -ENODEV;
++ }
++
++ if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) {
++ pr_info("PF_BRIDGE: RTM_DELNEIGH with invalid address\n");
++ return -EINVAL;
++ }
++
++ addr = nla_data(tb[NDA_LLADDR]);
++
++ err = -EOPNOTSUPP;
++
++ /* Support fdb on master device the net/bridge default case */
++ if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) &&
++ (dev->priv_flags & IFF_BRIDGE_PORT)) {
++ struct net_device *br_dev = netdev_master_upper_dev_get(dev);
++ const struct net_device_ops *ops = br_dev->netdev_ops;
++
++ if (ops->ndo_fdb_del)
++ err = ops->ndo_fdb_del(ndm, tb, dev, addr);
++
++ if (err)
++ goto out;
++ else
++ ndm->ndm_flags &= ~NTF_MASTER;
++ }
++
++ /* Embedded bridge, macvlan, and any other device support */
++ if (ndm->ndm_flags & NTF_SELF) {
++ if (dev->netdev_ops->ndo_fdb_del)
++ err = dev->netdev_ops->ndo_fdb_del(ndm, tb, dev, addr);
++ else
++ err = ndo_dflt_fdb_del(ndm, tb, dev, addr);
++
++ if (!err) {
++ rtnl_fdb_notify(dev, addr, RTM_DELNEIGH);
++ ndm->ndm_flags &= ~NTF_SELF;
++ }
++ }
++out:
++ return err;
++}
++
++static int nlmsg_populate_fdb(struct sk_buff *skb,
++ struct netlink_callback *cb,
++ struct net_device *dev,
++ int *idx,
++ struct netdev_hw_addr_list *list)
++{
++ struct netdev_hw_addr *ha;
++ int err;
++ u32 portid, seq;
++
++ portid = NETLINK_CB(cb->skb).portid;
++ seq = cb->nlh->nlmsg_seq;
++
++ list_for_each_entry(ha, &list->list, list) {
++ if (*idx < cb->args[0])
++ goto skip;
++
++ err = nlmsg_populate_fdb_fill(skb, dev, ha->addr,
++ portid, seq,
++ RTM_NEWNEIGH, NTF_SELF,
++ NLM_F_MULTI);
++ if (err < 0)
++ return err;
++skip:
++ *idx += 1;
++ }
++ return 0;
++}
++
++/**
++ * ndo_dflt_fdb_dump - default netdevice operation to dump an FDB table.
++ * @nlh: netlink message header
++ * @dev: netdevice
++ *
++ * Default netdevice operation to dump the existing unicast address list.
++ * Returns number of addresses from list put in skb.
++ */
++int ndo_dflt_fdb_dump(struct sk_buff *skb,
++ struct netlink_callback *cb,
++ struct net_device *dev,
++ int idx)
++{
++ int err;
++
++ netif_addr_lock_bh(dev);
++ err = nlmsg_populate_fdb(skb, cb, dev, &idx, &dev->uc);
++ if (err)
++ goto out;
++ nlmsg_populate_fdb(skb, cb, dev, &idx, &dev->mc);
++out:
++ netif_addr_unlock_bh(dev);
++ return idx;
++}
++EXPORT_SYMBOL(ndo_dflt_fdb_dump);
++
++static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
++{
++ int idx = 0;
++ struct net *net = sock_net(skb->sk);
++ struct net_device *dev;
++
++ rcu_read_lock();
++ for_each_netdev_rcu(net, dev) {
++ if (dev->priv_flags & IFF_BRIDGE_PORT) {
++ struct net_device *br_dev;
++ const struct net_device_ops *ops;
++
++ br_dev = netdev_master_upper_dev_get(dev);
++ ops = br_dev->netdev_ops;
++ if (ops->ndo_fdb_dump)
++ idx = ops->ndo_fdb_dump(skb, cb, dev, idx);
++ }
++
++ if (dev->netdev_ops->ndo_fdb_dump)
++ idx = dev->netdev_ops->ndo_fdb_dump(skb, cb, dev, idx);
++ else
++ idx = ndo_dflt_fdb_dump(skb, cb, dev, idx);
++ }
++ rcu_read_unlock();
++
++ cb->args[0] = idx;
++ return skb->len;
++}
++
++int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
++ struct net_device *dev, u16 mode)
++{
++ struct nlmsghdr *nlh;
++ struct ifinfomsg *ifm;
++ struct nlattr *br_afspec;
++ u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN;
++ struct net_device *br_dev = netdev_master_upper_dev_get(dev);
++
++ nlh = nlmsg_put(skb, pid, seq, RTM_NEWLINK, sizeof(*ifm), NLM_F_MULTI);
++ if (nlh == NULL)
++ return -EMSGSIZE;
++
++ ifm = nlmsg_data(nlh);
++ ifm->ifi_family = AF_BRIDGE;
++ ifm->__ifi_pad = 0;
++ ifm->ifi_type = dev->type;
++ ifm->ifi_index = dev->ifindex;
++ ifm->ifi_flags = dev_get_flags(dev);
++ ifm->ifi_change = 0;
++
++
++ if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
++ nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
++ nla_put_u8(skb, IFLA_OPERSTATE, operstate) ||
++ (br_dev &&
++ nla_put_u32(skb, IFLA_MASTER, br_dev->ifindex)) ||
++ (dev->addr_len &&
++ nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) ||
++ (dev->ifindex != dev->iflink &&
++ nla_put_u32(skb, IFLA_LINK, dev->iflink)))
++ goto nla_put_failure;
++
++ br_afspec = nla_nest_start(skb, IFLA_AF_SPEC);
++ if (!br_afspec)
++ goto nla_put_failure;
++
++ if (nla_put_u16(skb, IFLA_BRIDGE_FLAGS, BRIDGE_FLAGS_SELF) ||
++ nla_put_u16(skb, IFLA_BRIDGE_MODE, mode)) {
++ nla_nest_cancel(skb, br_afspec);
++ goto nla_put_failure;
++ }
++ nla_nest_end(skb, br_afspec);
++
++ return nlmsg_end(skb, nlh);
++nla_put_failure:
++ nlmsg_cancel(skb, nlh);
++ return -EMSGSIZE;
++}
++EXPORT_SYMBOL(ndo_dflt_bridge_getlink);
++
++static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb)
++{
++ struct net *net = sock_net(skb->sk);
++ struct net_device *dev;
++ int idx = 0;
++ u32 portid = NETLINK_CB(cb->skb).portid;
++ u32 seq = cb->nlh->nlmsg_seq;
++ struct nlattr *extfilt;
++ u32 filter_mask = 0;
++
++ extfilt = nlmsg_find_attr(cb->nlh, sizeof(struct ifinfomsg),
++ IFLA_EXT_MASK);
++ if (extfilt)
++ filter_mask = nla_get_u32(extfilt);
++
++ rcu_read_lock();
++ for_each_netdev_rcu(net, dev) {
++ const struct net_device_ops *ops = dev->netdev_ops;
++ struct net_device *br_dev = netdev_master_upper_dev_get(dev);
++
++ if (br_dev && br_dev->netdev_ops->ndo_bridge_getlink) {
++ if (idx >= cb->args[0] &&
++ br_dev->netdev_ops->ndo_bridge_getlink(
++ skb, portid, seq, dev, filter_mask) < 0)
++ break;
++ idx++;
++ }
++
++ if (ops->ndo_bridge_getlink) {
++ if (idx >= cb->args[0] &&
++ ops->ndo_bridge_getlink(skb, portid, seq, dev,
++ filter_mask) < 0)
++ break;
++ idx++;
++ }
++ }
++ rcu_read_unlock();
++ cb->args[0] = idx;
++
++ return skb->len;
++}
++
++static inline size_t bridge_nlmsg_size(void)
++{
++ return NLMSG_ALIGN(sizeof(struct ifinfomsg))
++ + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
++ + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
++ + nla_total_size(sizeof(u32)) /* IFLA_MASTER */
++ + nla_total_size(sizeof(u32)) /* IFLA_MTU */
++ + nla_total_size(sizeof(u32)) /* IFLA_LINK */
++ + nla_total_size(sizeof(u32)) /* IFLA_OPERSTATE */
++ + nla_total_size(sizeof(u8)) /* IFLA_PROTINFO */
++ + nla_total_size(sizeof(struct nlattr)) /* IFLA_AF_SPEC */
++ + nla_total_size(sizeof(u16)) /* IFLA_BRIDGE_FLAGS */
++ + nla_total_size(sizeof(u16)); /* IFLA_BRIDGE_MODE */
++}
++
++static int rtnl_bridge_notify(struct net_device *dev, u16 flags)
++{
++ struct net *net = dev_net(dev);
++ struct net_device *br_dev = netdev_master_upper_dev_get(dev);
++ struct sk_buff *skb;
++ int err = -EOPNOTSUPP;
++
++ skb = nlmsg_new(bridge_nlmsg_size(), GFP_ATOMIC);
++ if (!skb) {
++ err = -ENOMEM;
++ goto errout;
++ }
++
++ if ((!flags || (flags & BRIDGE_FLAGS_MASTER)) &&
++ br_dev && br_dev->netdev_ops->ndo_bridge_getlink) {
++ err = br_dev->netdev_ops->ndo_bridge_getlink(skb, 0, 0, dev, 0);
++ if (err < 0)
++ goto errout;
++ }
++
++ if ((flags & BRIDGE_FLAGS_SELF) &&
++ dev->netdev_ops->ndo_bridge_getlink) {
++ err = dev->netdev_ops->ndo_bridge_getlink(skb, 0, 0, dev, 0);
++ if (err < 0)
++ goto errout;
++ }
++
++ if (!skb->len)
++ goto errout;
++
++ rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC);
++ return 0;
++errout:
++ WARN_ON(err == -EMSGSIZE);
++ kfree_skb(skb);
++ if (err)
++ rtnl_set_sk_err(net, RTNLGRP_LINK, err);
++ return err;
++}
++
++static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh)
++{
++ struct net *net = sock_net(skb->sk);
++ struct ifinfomsg *ifm;
++ struct net_device *dev;
++ struct nlattr *br_spec, *attr = NULL;
++ int rem, err = -EOPNOTSUPP;
++ u16 oflags, flags = 0;
++ bool have_flags = false;
++
++ if (nlmsg_len(nlh) < sizeof(*ifm))
++ return -EINVAL;
++
++ ifm = nlmsg_data(nlh);
++ if (ifm->ifi_family != AF_BRIDGE)
++ return -EPFNOSUPPORT;
++
++ dev = __dev_get_by_index(net, ifm->ifi_index);
++ if (!dev) {
++ pr_info("PF_BRIDGE: RTM_SETLINK with unknown ifindex\n");
++ return -ENODEV;
++ }
++
++ br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
++ if (br_spec) {
++ nla_for_each_nested(attr, br_spec, rem) {
++ if (nla_type(attr) == IFLA_BRIDGE_FLAGS) {
++ have_flags = true;
++ flags = nla_get_u16(attr);
++ break;
++ }
++ }
++ }
++
++ oflags = flags;
++
++ if (!flags || (flags & BRIDGE_FLAGS_MASTER)) {
++ struct net_device *br_dev = netdev_master_upper_dev_get(dev);
++
++ if (!br_dev || !br_dev->netdev_ops->ndo_bridge_setlink) {
++ err = -EOPNOTSUPP;
++ goto out;
++ }
++
++ err = br_dev->netdev_ops->ndo_bridge_setlink(dev, nlh);
++ if (err)
++ goto out;
++
++ flags &= ~BRIDGE_FLAGS_MASTER;
++ }
++
++ if ((flags & BRIDGE_FLAGS_SELF)) {
++ if (!dev->netdev_ops->ndo_bridge_setlink)
++ err = -EOPNOTSUPP;
++ else
++ err = dev->netdev_ops->ndo_bridge_setlink(dev, nlh);
++
++ if (!err)
++ flags &= ~BRIDGE_FLAGS_SELF;
++ }
++
++ if (have_flags)
++ memcpy(nla_data(attr), &flags, sizeof(flags));
++ /* Generate event to notify upper layer of bridge change */
++ if (!err)
++ err = rtnl_bridge_notify(dev, oflags);
++out:
++ return err;
++}
++
++static int rtnl_bridge_dellink(struct sk_buff *skb, struct nlmsghdr *nlh)
++{
++ struct net *net = sock_net(skb->sk);
++ struct ifinfomsg *ifm;
++ struct net_device *dev;
++ struct nlattr *br_spec, *attr = NULL;
++ int rem, err = -EOPNOTSUPP;
++ u16 oflags, flags = 0;
++ bool have_flags = false;
++
++ if (nlmsg_len(nlh) < sizeof(*ifm))
++ return -EINVAL;
++
++ ifm = nlmsg_data(nlh);
++ if (ifm->ifi_family != AF_BRIDGE)
++ return -EPFNOSUPPORT;
++
++ dev = __dev_get_by_index(net, ifm->ifi_index);
++ if (!dev) {
++ pr_info("PF_BRIDGE: RTM_SETLINK with unknown ifindex\n");
++ return -ENODEV;
++ }
++
++ br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
++ if (br_spec) {
++ nla_for_each_nested(attr, br_spec, rem) {
++ if (nla_type(attr) == IFLA_BRIDGE_FLAGS) {
++ have_flags = true;
++ flags = nla_get_u16(attr);
++ break;
++ }
++ }
++ }
++
++ oflags = flags;
++
++ if (!flags || (flags & BRIDGE_FLAGS_MASTER)) {
++ struct net_device *br_dev = netdev_master_upper_dev_get(dev);
++
++ if (!br_dev || !br_dev->netdev_ops->ndo_bridge_dellink) {
++ err = -EOPNOTSUPP;
++ goto out;
++ }
++
++ err = br_dev->netdev_ops->ndo_bridge_dellink(dev, nlh);
++ if (err)
++ goto out;
++
++ flags &= ~BRIDGE_FLAGS_MASTER;
++ }
++
++ if ((flags & BRIDGE_FLAGS_SELF)) {
++ if (!dev->netdev_ops->ndo_bridge_dellink)
++ err = -EOPNOTSUPP;
++ else
++ err = dev->netdev_ops->ndo_bridge_dellink(dev, nlh);
++
++ if (!err)
++ flags &= ~BRIDGE_FLAGS_SELF;
++ }
++
++ if (have_flags)
++ memcpy(nla_data(attr), &flags, sizeof(flags));
++ /* Generate event to notify upper layer of bridge change */
++ if (!err)
++ err = rtnl_bridge_notify(dev, oflags);
++out:
++ return err;
++}
++
++/* Process one rtnetlink message. */
++
++static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
++{
++ struct net *net = sock_net(skb->sk);
++ rtnl_doit_func doit;
++ int sz_idx, kind;
++ int family;
++ int type;
++ int err;
++
++ type = nlh->nlmsg_type;
++ if (type > RTM_MAX)
++ return -EOPNOTSUPP;
++
++ type -= RTM_BASE;
++
++ /* All the messages must have at least 1 byte length */
++ if (nlmsg_len(nlh) < sizeof(struct rtgenmsg))
++ return 0;
++
++ family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family;
++ sz_idx = type>>2;
++ kind = type&3;
++
++ if (kind != 2 && !netlink_net_capable(skb, CAP_NET_ADMIN))
++ return -EPERM;
++
++ if (kind == 2 && nlh->nlmsg_flags&NLM_F_DUMP) {
++ struct sock *rtnl;
++ rtnl_dumpit_func dumpit;
++ rtnl_calcit_func calcit;
++ u16 min_dump_alloc = 0;
++
++ dumpit = rtnl_get_dumpit(family, type);
++ if (dumpit == NULL)
++ return -EOPNOTSUPP;
++ calcit = rtnl_get_calcit(family, type);
++ if (calcit)
++ min_dump_alloc = calcit(skb, nlh);
++
++ __rtnl_unlock();
++ rtnl = net->rtnl;
++ {
++ struct netlink_dump_control c = {
++ .dump = dumpit,
++ .min_dump_alloc = min_dump_alloc,
++ };
++ err = netlink_dump_start(rtnl, skb, nlh, &c);
++ }
++ rtnl_lock();
++ return err;
++ }
++
++ doit = rtnl_get_doit(family, type);
++ if (doit == NULL)
++ return -EOPNOTSUPP;
++
++ return doit(skb, nlh);
++}
++
++static void rtnetlink_rcv(struct sk_buff *skb)
++{
++ rtnl_lock();
++ netlink_rcv_skb(skb, &rtnetlink_rcv_msg);
++ rtnl_unlock();
++}
++
++static int rtnetlink_event(struct notifier_block *this, unsigned long event, void *ptr)
++{
++ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
++
++ switch (event) {
++ case NETDEV_UP:
++ case NETDEV_DOWN:
++ case NETDEV_PRE_UP:
++ case NETDEV_POST_INIT:
++ case NETDEV_REGISTER:
++ case NETDEV_CHANGE:
++ case NETDEV_PRE_TYPE_CHANGE:
++ case NETDEV_GOING_DOWN:
++ case NETDEV_UNREGISTER:
++ case NETDEV_UNREGISTER_FINAL:
++ case NETDEV_RELEASE:
++ case NETDEV_JOIN:
++ break;
++ default:
++ rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL);
++ break;
++ }
++ return NOTIFY_DONE;
++}
++
++static struct notifier_block rtnetlink_dev_notifier = {
++ .notifier_call = rtnetlink_event,
++};
++
++
++static int __net_init rtnetlink_net_init(struct net *net)
++{
++ struct sock *sk;
++ struct netlink_kernel_cfg cfg = {
++ .groups = RTNLGRP_MAX,
++ .input = rtnetlink_rcv,
++ .cb_mutex = &rtnl_mutex,
++ .flags = NL_CFG_F_NONROOT_RECV,
++ };
++
++ sk = netlink_kernel_create(net, NETLINK_ROUTE, &cfg);
++ if (!sk)
++ return -ENOMEM;
++ net->rtnl = sk;
++ return 0;
++}
++
++static void __net_exit rtnetlink_net_exit(struct net *net)
++{
++ netlink_kernel_release(net->rtnl);
++ net->rtnl = NULL;
++}
++
++static struct pernet_operations rtnetlink_net_ops = {
++ .init = rtnetlink_net_init,
++ .exit = rtnetlink_net_exit,
++};
++
++void __init rtnetlink_init(void)
++{
++ if (register_pernet_subsys(&rtnetlink_net_ops))
++ panic("rtnetlink_init: cannot initialize rtnetlink\n");
++
++ register_netdevice_notifier(&rtnetlink_dev_notifier);
++
++ rtnl_register(PF_UNSPEC, RTM_GETLINK, rtnl_getlink,
++ rtnl_dump_ifinfo, rtnl_calcit);
++ rtnl_register(PF_UNSPEC, RTM_SETLINK, rtnl_setlink, NULL, NULL);
++ rtnl_register(PF_UNSPEC, RTM_NEWLINK, rtnl_newlink, NULL, NULL);
++ rtnl_register(PF_UNSPEC, RTM_DELLINK, rtnl_dellink, NULL, NULL);
++
++ rtnl_register(PF_UNSPEC, RTM_GETADDR, NULL, rtnl_dump_all, NULL);
++ rtnl_register(PF_UNSPEC, RTM_GETROUTE, NULL, rtnl_dump_all, NULL);
++
++ rtnl_register(PF_BRIDGE, RTM_NEWNEIGH, rtnl_fdb_add, NULL, NULL);
++ rtnl_register(PF_BRIDGE, RTM_DELNEIGH, rtnl_fdb_del, NULL, NULL);
++ rtnl_register(PF_BRIDGE, RTM_GETNEIGH, NULL, rtnl_fdb_dump, NULL);
++
++ rtnl_register(PF_BRIDGE, RTM_GETLINK, NULL, rtnl_bridge_getlink, NULL);
++ rtnl_register(PF_BRIDGE, RTM_DELLINK, rtnl_bridge_dellink, NULL, NULL);
++ rtnl_register(PF_BRIDGE, RTM_SETLINK, rtnl_bridge_setlink, NULL, NULL);
++}
++
+diff -Nur linux-3.14.36/net/core/tso.c linux-openelec/net/core/tso.c
+--- linux-3.14.36/net/core/tso.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/net/core/tso.c 2015-05-06 12:05:43.000000000 -0500
+@@ -0,0 +1,72 @@
++#include <net/ip.h>
++#include <net/tso.h>
++
++/* Calculate expected number of TX descriptors */
++int tso_count_descs(struct sk_buff *skb)
++{
++ /* The Marvell Way */
++ return skb_shinfo(skb)->gso_segs * 2 + skb_shinfo(skb)->nr_frags;
++}
++
++void tso_build_hdr(struct sk_buff *skb, char *hdr, struct tso_t *tso,
++ int size, bool is_last)
++{
++ struct iphdr *iph;
++ struct tcphdr *tcph;
++ int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
++ int mac_hdr_len = skb_network_offset(skb);
++
++ memcpy(hdr, skb->data, hdr_len);
++ iph = (struct iphdr *)(hdr + mac_hdr_len);
++ iph->id = htons(tso->ip_id);
++ iph->tot_len = htons(size + hdr_len - mac_hdr_len);
++ tcph = (struct tcphdr *)(hdr + skb_transport_offset(skb));
++ tcph->seq = htonl(tso->tcp_seq);
++ tso->ip_id++;
++
++ if (!is_last) {
++ /* Clear all special flags for not last packet */
++ tcph->psh = 0;
++ tcph->fin = 0;
++ tcph->rst = 0;
++ }
++}
++
++void tso_build_data(struct sk_buff *skb, struct tso_t *tso, int size)
++{
++ tso->tcp_seq += size;
++ tso->size -= size;
++ tso->data += size;
++
++ if ((tso->size == 0) &&
++ (tso->next_frag_idx < skb_shinfo(skb)->nr_frags)) {
++ skb_frag_t *frag = &skb_shinfo(skb)->frags[tso->next_frag_idx];
++
++ /* Move to next segment */
++ tso->size = frag->size;
++ tso->data = page_address(frag->page.p) + frag->page_offset;
++ tso->next_frag_idx++;
++ }
++}
++
++void tso_start(struct sk_buff *skb, struct tso_t *tso)
++{
++ int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
++
++ tso->ip_id = ntohs(ip_hdr(skb)->id);
++ tso->tcp_seq = ntohl(tcp_hdr(skb)->seq);
++ tso->next_frag_idx = 0;
++
++ /* Build first data */
++ tso->size = skb_headlen(skb) - hdr_len;
++ tso->data = skb->data + hdr_len;
++ if ((tso->size == 0) &&
++ (tso->next_frag_idx < skb_shinfo(skb)->nr_frags)) {
++ skb_frag_t *frag = &skb_shinfo(skb)->frags[tso->next_frag_idx];
++
++ /* Move to next segment */
++ tso->size = frag->size;
++ tso->data = page_address(frag->page.p) + frag->page_offset;
++ tso->next_frag_idx++;
++ }
++}
+diff -Nur linux-3.14.36/net/ieee802154/Kconfig linux-openelec/net/ieee802154/Kconfig
+--- linux-3.14.36/net/ieee802154/Kconfig 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/net/ieee802154/Kconfig 2015-05-06 12:05:43.000000000 -0500
+@@ -15,7 +15,7 @@
+ depends on IEEE802154 && IPV6
+ select 6LOWPAN_IPHC
+ ---help---
+- IPv6 compression over IEEE 802.15.4.
++ IPv6 compression over IEEE 802.15.4.
+
+ config 6LOWPAN_IPHC
+ tristate
+diff -Nur linux-3.14.36/net/mac80211/driver-ops.h linux-openelec/net/mac80211/driver-ops.h
+--- linux-3.14.36/net/mac80211/driver-ops.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/net/mac80211/driver-ops.h 2015-05-06 12:05:43.000000000 -0500
+@@ -722,13 +722,19 @@
+ }
+
+ static inline void drv_flush(struct ieee80211_local *local,
++ struct ieee80211_sub_if_data *sdata,
+ u32 queues, bool drop)
+ {
++ struct ieee80211_vif *vif = sdata ? &sdata->vif : NULL;
++
+ might_sleep();
+
++ if (sdata)
++ check_sdata_in_driver(sdata);
++
+ trace_drv_flush(local, queues, drop);
+ if (local->ops->flush)
+- local->ops->flush(&local->hw, queues, drop);
++ local->ops->flush(&local->hw, vif, queues, drop);
+ trace_drv_return_void(local);
+ }
+
+diff -Nur linux-3.14.36/net/mac80211/ibss.c linux-openelec/net/mac80211/ibss.c
+--- linux-3.14.36/net/mac80211/ibss.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/net/mac80211/ibss.c 2015-07-24 18:03:29.064842002 -0500
+@@ -386,7 +386,7 @@
+ presp->head_len, 0, GFP_KERNEL);
+ cfg80211_put_bss(local->hw.wiphy, bss);
+ netif_carrier_on(sdata->dev);
+- cfg80211_ibss_joined(sdata->dev, ifibss->bssid, GFP_KERNEL);
++ cfg80211_ibss_joined(sdata->dev, ifibss->bssid, chan, GFP_KERNEL);
+ }
+
+ static void ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
+diff -Nur linux-3.14.36/net/mac80211/util.c linux-openelec/net/mac80211/util.c
+--- linux-3.14.36/net/mac80211/util.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/net/mac80211/util.c 2015-05-06 12:05:43.000000000 -0500
+@@ -554,7 +554,7 @@
+ ieee80211_stop_queues_by_reason(&local->hw, IEEE80211_MAX_QUEUE_MAP,
+ IEEE80211_QUEUE_STOP_REASON_FLUSH);
+
+- drv_flush(local, queues, false);
++ drv_flush(local, sdata, queues, false);
+
+ ieee80211_wake_queues_by_reason(&local->hw, IEEE80211_MAX_QUEUE_MAP,
+ IEEE80211_QUEUE_STOP_REASON_FLUSH);
+diff -Nur linux-3.14.36/net/wireless/core.h linux-openelec/net/wireless/core.h
+--- linux-3.14.36/net/wireless/core.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/net/wireless/core.h 2015-05-06 12:05:43.000000000 -0500
+@@ -211,6 +211,7 @@
+ } dc;
+ struct {
+ u8 bssid[ETH_ALEN];
++ struct ieee80211_channel *channel;
+ } ij;
+ };
+ };
+@@ -258,7 +259,8 @@
+ struct net_device *dev, bool nowext);
+ int cfg80211_leave_ibss(struct cfg80211_registered_device *rdev,
+ struct net_device *dev, bool nowext);
+-void __cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid);
++void __cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid,
++ struct ieee80211_channel *channel);
+ int cfg80211_ibss_wext_join(struct cfg80211_registered_device *rdev,
+ struct wireless_dev *wdev);
+
+diff -Nur linux-3.14.36/net/wireless/ibss.c linux-openelec/net/wireless/ibss.c
+--- linux-3.14.36/net/wireless/ibss.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/net/wireless/ibss.c 2015-05-06 12:05:43.000000000 -0500
+@@ -14,7 +14,8 @@
+ #include "rdev-ops.h"
+
+
+-void __cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid)
++void __cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid,
++ struct ieee80211_channel *channel)
+ {
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ struct cfg80211_bss *bss;
+@@ -28,8 +29,7 @@
+ if (!wdev->ssid_len)
+ return;
+
+- bss = cfg80211_get_bss(wdev->wiphy, NULL, bssid,
+- wdev->ssid, wdev->ssid_len,
++ bss = cfg80211_get_bss(wdev->wiphy, channel, bssid, NULL, 0,
+ WLAN_CAPABILITY_IBSS, WLAN_CAPABILITY_IBSS);
+
+ if (WARN_ON(!bss))
+@@ -54,21 +54,26 @@
+ #endif
+ }
+
+-void cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid, gfp_t gfp)
++void cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid,
++ struct ieee80211_channel *channel, gfp_t gfp)
+ {
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+ struct cfg80211_event *ev;
+ unsigned long flags;
+
+- trace_cfg80211_ibss_joined(dev, bssid);
++ trace_cfg80211_ibss_joined(dev, bssid, channel);
++
++ if (WARN_ON(!channel))
++ return;
+
+ ev = kzalloc(sizeof(*ev), gfp);
+ if (!ev)
+ return;
+
+ ev->type = EVENT_IBSS_JOINED;
+- memcpy(ev->cr.bssid, bssid, ETH_ALEN);
++ memcpy(ev->ij.bssid, bssid, ETH_ALEN);
++ ev->ij.channel = channel;
+
+ spin_lock_irqsave(&wdev->event_lock, flags);
+ list_add_tail(&ev->list, &wdev->event_list);
+diff -Nur linux-3.14.36/net/wireless/trace.h linux-openelec/net/wireless/trace.h
+--- linux-3.14.36/net/wireless/trace.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/net/wireless/trace.h 2015-07-24 18:03:28.276842002 -0500
+@@ -2279,11 +2279,6 @@
+ TP_printk(NETDEV_PR_FMT ", " MAC_PR_FMT, NETDEV_PR_ARG, MAC_PR_ARG(addr))
+ );
+
+-DEFINE_EVENT(cfg80211_rx_evt, cfg80211_ibss_joined,
+- TP_PROTO(struct net_device *netdev, const u8 *addr),
+- TP_ARGS(netdev, addr)
+-);
+-
+ DEFINE_EVENT(cfg80211_rx_evt, cfg80211_rx_spurious_frame,
+ TP_PROTO(struct net_device *netdev, const u8 *addr),
+ TP_ARGS(netdev, addr)
+@@ -2294,6 +2289,24 @@
+ TP_ARGS(netdev, addr)
+ );
+
++TRACE_EVENT(cfg80211_ibss_joined,
++ TP_PROTO(struct net_device *netdev, const u8 *bssid,
++ struct ieee80211_channel *channel),
++ TP_ARGS(netdev, bssid, channel),
++ TP_STRUCT__entry(
++ NETDEV_ENTRY
++ MAC_ENTRY(bssid)
++ CHAN_ENTRY
++ ),
++ TP_fast_assign(
++ NETDEV_ASSIGN;
++ MAC_ASSIGN(bssid, bssid);
++ CHAN_ASSIGN(channel);
++ ),
++ TP_printk(NETDEV_PR_FMT ", bssid: " MAC_PR_FMT ", " CHAN_PR_FMT,
++ NETDEV_PR_ARG, MAC_PR_ARG(bssid), CHAN_PR_ARG)
++);
++
+ TRACE_EVENT(cfg80211_probe_status,
+ TP_PROTO(struct net_device *netdev, const u8 *addr, u64 cookie,
+ bool acked),
+diff -Nur linux-3.14.36/net/wireless/util.c linux-openelec/net/wireless/util.c
+--- linux-3.14.36/net/wireless/util.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/net/wireless/util.c 2015-05-06 12:05:43.000000000 -0500
+@@ -820,7 +820,8 @@
+ ev->dc.reason, true);
+ break;
+ case EVENT_IBSS_JOINED:
+- __cfg80211_ibss_joined(wdev->netdev, ev->ij.bssid);
++ __cfg80211_ibss_joined(wdev->netdev, ev->ij.bssid,
++ ev->ij.channel);
+ break;
+ }
+ wdev_unlock(wdev);
+diff -Nur linux-3.14.36/scripts/Makefile.lib linux-openelec/scripts/Makefile.lib
+--- linux-3.14.36/scripts/Makefile.lib 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/scripts/Makefile.lib 2015-05-06 12:05:43.000000000 -0500
+@@ -153,6 +153,7 @@
+ -I$(srctree)/arch/$(SRCARCH)/boot/dts \
+ -I$(srctree)/arch/$(SRCARCH)/boot/dts/include \
+ -I$(srctree)/drivers/of/testcase-data \
++ -I$(srctree)/include \
+ -undef -D__DTS__
+
+ # Finds the multi-part object the current object will be linked into
+diff -Nur linux-3.14.36/scripts/mod/devicetable-offsets.c linux-openelec/scripts/mod/devicetable-offsets.c
+--- linux-3.14.36/scripts/mod/devicetable-offsets.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/scripts/mod/devicetable-offsets.c 2015-05-06 12:05:43.000000000 -0500
+@@ -174,6 +174,9 @@
+ DEVID_FIELD(x86_cpu_id, model);
+ DEVID_FIELD(x86_cpu_id, vendor);
+
++ DEVID(cpu_feature);
++ DEVID_FIELD(cpu_feature, feature);
++
+ DEVID(mei_cl_device_id);
+ DEVID_FIELD(mei_cl_device_id, name);
+
+diff -Nur linux-3.14.36/scripts/mod/file2alias.c linux-openelec/scripts/mod/file2alias.c
+--- linux-3.14.36/scripts/mod/file2alias.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/scripts/mod/file2alias.c 2015-05-06 12:05:43.000000000 -0500
+@@ -1135,6 +1135,16 @@
+ }
+ ADD_TO_DEVTABLE("x86cpu", x86_cpu_id, do_x86cpu_entry);
+
++/* LOOKS like cpu:type:*:feature:*FEAT* */
++static int do_cpu_entry(const char *filename, void *symval, char *alias)
++{
++ DEF_FIELD(symval, cpu_feature, feature);
++
++ sprintf(alias, "cpu:type:*:feature:*%04X*", feature);
++ return 1;
++}
++ADD_TO_DEVTABLE("cpu", cpu_feature, do_cpu_entry);
++
+ /* Looks like: mei:S */
+ static int do_mei_entry(const char *filename, void *symval,
+ char *alias)
+diff -Nur linux-3.14.36/scripts/recordmcount.c linux-openelec/scripts/recordmcount.c
+--- linux-3.14.36/scripts/recordmcount.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/scripts/recordmcount.c 2015-05-06 12:05:43.000000000 -0500
+@@ -40,6 +40,11 @@
+ #define R_METAG_NONE 3
+ #endif
+
++#ifndef EM_AARCH64
++#define EM_AARCH64 183
++#define R_AARCH64_ABS64 257
++#endif
++
+ static int fd_map; /* File descriptor for file being modified. */
+ static int mmap_failed; /* Boolean flag. */
+ static void *ehdr_curr; /* current ElfXX_Ehdr * for resource cleanup */
+@@ -347,6 +352,8 @@
+ case EM_ARM: reltype = R_ARM_ABS32;
+ altmcount = "__gnu_mcount_nc";
+ break;
++ case EM_AARCH64:
++ reltype = R_AARCH64_ABS64; gpfx = '_'; break;
+ case EM_IA_64: reltype = R_IA64_IMM64; gpfx = '_'; break;
+ case EM_METAG: reltype = R_METAG_ADDR32;
+ altmcount = "_mcount_wrapper";
+diff -Nur linux-3.14.36/scripts/recordmcount.pl linux-openelec/scripts/recordmcount.pl
+--- linux-3.14.36/scripts/recordmcount.pl 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/scripts/recordmcount.pl 2015-07-24 18:03:29.540842002 -0500
+@@ -278,6 +278,11 @@
+ $mcount_regex = "^\\s*([0-9a-fA-F]+):\\s*R_ARM_(CALL|PC24|THM_CALL)" .
+ "\\s+(__gnu_mcount_nc|mcount)\$";
+
++} elsif ($arch eq "arm64") {
++ $alignment = 3;
++ $section_type = '%progbits';
++ $mcount_regex = "^\\s*([0-9a-fA-F]+):\\s*R_AARCH64_CALL26\\s+_mcount\$";
++ $type = ".quad";
+ } elsif ($arch eq "ia64") {
+ $mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\s_mcount\$";
+ $type = "data8";
+diff -Nur linux-3.14.36/sound/soc/codecs/cs42888.c linux-openelec/sound/soc/codecs/cs42888.c
+--- linux-3.14.36/sound/soc/codecs/cs42888.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/sound/soc/codecs/cs42888.c 2015-05-06 12:05:43.000000000 -0500
+@@ -0,0 +1,934 @@
++/*
++ * cs42888.c -- CS42888 ALSA SoC Audio Driver
++ * Copyright (C) 2010-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++/*
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++#include <linux/module.h>
++#include <linux/moduleparam.h>
++#include <linux/kernel.h>
++#include <linux/init.h>
++#include <linux/clk.h>
++#include <linux/delay.h>
++#include <linux/pm.h>
++#include <linux/i2c.h>
++#include <linux/spi/spi.h>
++#include <linux/platform_device.h>
++#include <linux/regulator/consumer.h>
++
++#include <sound/core.h>
++#include <sound/pcm.h>
++#include <sound/pcm_params.h>
++#include <sound/soc.h>
++#include <sound/soc-dapm.h>
++#include <sound/tlv.h>
++#include <sound/initval.h>
++#include <asm/div64.h>
++#include "cs42888.h"
++
++#define CS42888_NUM_SUPPLIES 4
++static const char *cs42888_supply_names[CS42888_NUM_SUPPLIES] = {
++ "VA",
++ "VD",
++ "VLS",
++ "VLC",
++};
++
++#define CS42888_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |\
++ SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE)
++
++/* Private data for the CS42888 */
++struct cs42888_private {
++ struct clk *clk;
++ struct snd_soc_codec *codec;
++ u8 reg_cache[CS42888_NUMREGS + 1];
++ unsigned int mclk; /* Input frequency of the MCLK pin */
++ unsigned int slave_mode;
++ struct regulator_bulk_data supplies[CS42888_NUM_SUPPLIES];
++};
++
++/**
++ * cs42888_fill_cache - pre-fill the CS42888 register cache.
++ * @codec: the codec for this CS42888
++ *
++ * This function fills in the CS42888 register cache by reading the register
++ * values from the hardware.
++ *
++ * This CS42888 registers are cached to avoid excessive I2C I/O operations.
++ * After the initial read to pre-fill the cache, the CS42888 never updates
++ * the register values, so we won't have a cache coherency problem.
++ *
++ * We use the auto-increment feature of the CS42888 to read all registers in
++ * one shot.
++ */
++static int cs42888_fill_cache(struct snd_soc_codec *codec)
++{
++ u8 *cache = codec->reg_cache;
++ struct i2c_client *i2c_client = to_i2c_client(codec->dev);
++ s32 length;
++
++ length = i2c_smbus_read_i2c_block_data(i2c_client,
++ CS42888_FIRSTREG | CS42888_I2C_INCR, CS42888_NUMREGS, \
++ cache + 1);
++
++ if (length != CS42888_NUMREGS) {
++ dev_err(codec->dev, "i2c read failure, addr=0x%x\n",
++ i2c_client->addr);
++ return -EIO;
++ }
++ return 0;
++}
++
++#ifdef DEBUG
++static void dump_reg(struct snd_soc_codec *codec)
++{
++ int i, reg;
++ int ret;
++ u8 *cache = codec->reg_cache + 1;
++
++ dev_dbg(codec->dev, "dump begin\n");
++ dev_dbg(codec->dev, "reg value in cache\n");
++ for (i = 0; i < CS42888_NUMREGS; i++)
++ dev_dbg(codec->dev, "reg[%d] = 0x%x\n", i, cache[i]);
++
++ dev_dbg(codec->dev, "real reg value\n");
++ ret = cs42888_fill_cache(codec);
++ if (ret < 0) {
++ dev_err(codec->dev, "failed to fill register cache\n");
++ return ret;
++ }
++ for (i = 0; i < CS42888_NUMREGS; i++)
++ dev_dbg(codec->dev, "reg[%d] = 0x%x\n", i, cache[i]);
++
++ dev_dbg(codec->dev, "dump end\n");
++}
++#else
++static void dump_reg(struct snd_soc_codec *codec)
++{
++}
++#endif
++
++/* -127.5dB to 0dB with step of 0.5dB */
++static const DECLARE_TLV_DB_SCALE(dac_tlv, -12750, 50, 1);
++/* -64dB to 24dB with step of 0.5dB */
++static const DECLARE_TLV_DB_SCALE(adc_tlv, -6400, 50, 1);
++
++static int cs42888_out_vu(struct snd_kcontrol *kcontrol,
++ struct snd_ctl_elem_value *ucontrol)
++{
++ return snd_soc_put_volsw_2r(kcontrol, ucontrol);
++}
++
++static int cs42888_info_volsw_s8(struct snd_kcontrol *kcontrol,
++ struct snd_ctl_elem_info *uinfo)
++{
++ struct soc_mixer_control *mc =
++ (struct soc_mixer_control *)kcontrol->private_value;
++
++ uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
++ uinfo->count = 2;
++ uinfo->value.integer.min = 0;
++ uinfo->value.integer.max = mc->max - mc->min;
++ return 0;
++}
++
++static int cs42888_get_volsw_s8(struct snd_kcontrol *kcontrol,
++ struct snd_ctl_elem_value *ucontrol)
++{
++ struct soc_mixer_control *mc =
++ (struct soc_mixer_control *)kcontrol->private_value;
++ struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
++ s8 val = snd_soc_read(codec, mc->reg);
++ ucontrol->value.integer.value[0] = val - mc->min;
++
++ val = snd_soc_read(codec, mc->rreg);
++ ucontrol->value.integer.value[1] = val - mc->min;
++ return 0;
++}
++
++int cs42888_put_volsw_s8(struct snd_kcontrol *kcontrol,
++ struct snd_ctl_elem_value *ucontrol)
++{
++ struct soc_mixer_control *mc =
++ (struct soc_mixer_control *)kcontrol->private_value;
++ struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
++ unsigned short val;
++ int ret;
++
++ val = ucontrol->value.integer.value[0] + mc->min;
++ ret = snd_soc_write(codec, mc->reg, val);
++ if (ret < 0) {
++ dev_err(codec->dev, "i2c write failed\n");
++ return ret;
++ }
++
++ val = ucontrol->value.integer.value[1] + mc->min;
++ ret = snd_soc_write(codec, mc->rreg, val);
++ if (ret < 0) {
++ dev_err(codec->dev, "i2c write failed\n");
++ return ret;
++ }
++ return 0;
++}
++
++#define SOC_CS42888_DOUBLE_R_TLV(xname, reg_left, reg_right, xshift, xmax, \
++ xinvert, tlv_array) \
++{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
++ .name = (xname), \
++ .access = SNDRV_CTL_ELEM_ACCESS_TLV_READ |\
++ SNDRV_CTL_ELEM_ACCESS_READWRITE, \
++ .tlv.p = (tlv_array), \
++ .info = snd_soc_info_volsw, \
++ .get = snd_soc_get_volsw, \
++ .put = cs42888_out_vu, \
++ .private_value = (unsigned long)&(struct soc_mixer_control) \
++ {.reg = reg_left, \
++ .rreg = reg_right, \
++ .shift = xshift, \
++ .max = xmax, \
++ .invert = xinvert} \
++}
++
++#define SOC_CS42888_DOUBLE_R_S8_TLV(xname, reg_left, reg_right, xmin, xmax, \
++ tlv_array) \
++{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname), \
++ .access = SNDRV_CTL_ELEM_ACCESS_TLV_READ | \
++ SNDRV_CTL_ELEM_ACCESS_READWRITE, \
++ .tlv.p = (tlv_array), \
++ .info = cs42888_info_volsw_s8, \
++ .get = cs42888_get_volsw_s8, \
++ .put = cs42888_put_volsw_s8, \
++ .private_value = (unsigned long)&(struct soc_mixer_control) \
++ {.reg = reg_left, \
++ .rreg = reg_right, \
++ .min = xmin, \
++ .max = xmax} \
++}
++
++static const char *cs42888_adcfilter[] = { "None", "High Pass" };
++static const char *cs42888_dacinvert[] = { "Disabled", "Enabled" };
++static const char *cs42888_adcinvert[] = { "Disabled", "Enabled" };
++static const char *cs42888_dacamute[] = { "Disabled", "AutoMute" };
++static const char *cs42888_dac_sngvol[] = { "Disabled", "Enabled" };
++static const char *cs42888_dac_szc[] = { "Immediate Change", "Zero Cross",
++ "Soft Ramp", "Soft Ramp on Zero Cross" };
++static const char *cs42888_mute_adc[] = { "UnMute", "Mute" };
++static const char *cs42888_adc_sngvol[] = { "Disabled", "Enabled" };
++static const char *cs42888_adc_szc[] = { "Immediate Change", "Zero Cross",
++ "Soft Ramp", "Soft Ramp on Zero Cross" };
++static const char *cs42888_dac_dem[] = { "No-De-Emphasis", "De-Emphasis" };
++static const char *cs42888_adc_single[] = { "Differential", "Single-Ended" };
++
++static const struct soc_enum cs42888_enum[] = {
++ SOC_ENUM_SINGLE(CS42888_ADCCTL, 7, 2, cs42888_adcfilter),
++ SOC_ENUM_DOUBLE(CS42888_DACINV, 0, 1, 2, cs42888_dacinvert),
++ SOC_ENUM_DOUBLE(CS42888_DACINV, 2, 3, 2, cs42888_dacinvert),
++ SOC_ENUM_DOUBLE(CS42888_DACINV, 4, 5, 2, cs42888_dacinvert),
++ SOC_ENUM_DOUBLE(CS42888_DACINV, 6, 7, 2, cs42888_dacinvert),
++ SOC_ENUM_DOUBLE(CS42888_ADCINV, 0, 1, 2, cs42888_adcinvert),
++ SOC_ENUM_DOUBLE(CS42888_ADCINV, 2, 3, 2, cs42888_adcinvert),
++ SOC_ENUM_SINGLE(CS42888_TRANS, 4, 2, cs42888_dacamute),
++ SOC_ENUM_SINGLE(CS42888_TRANS, 7, 2, cs42888_dac_sngvol),
++ SOC_ENUM_SINGLE(CS42888_TRANS, 5, 4, cs42888_dac_szc),
++ SOC_ENUM_SINGLE(CS42888_TRANS, 3, 2, cs42888_mute_adc),
++ SOC_ENUM_SINGLE(CS42888_TRANS, 2, 2, cs42888_adc_sngvol),
++ SOC_ENUM_SINGLE(CS42888_TRANS, 0, 4, cs42888_adc_szc),
++ SOC_ENUM_SINGLE(CS42888_ADCCTL, 5, 2, cs42888_dac_dem),
++ SOC_ENUM_SINGLE(CS42888_ADCCTL, 4, 2, cs42888_adc_single),
++ SOC_ENUM_SINGLE(CS42888_ADCCTL, 3, 2, cs42888_adc_single),
++};
++
++static const struct snd_kcontrol_new cs42888_snd_controls[] = {
++ SOC_CS42888_DOUBLE_R_TLV("DAC1 Playback Volume", CS42888_VOLAOUT1,
++ CS42888_VOLAOUT2, 0, 0xff, 1, dac_tlv),
++ SOC_CS42888_DOUBLE_R_TLV("DAC2 Playback Volume", CS42888_VOLAOUT3,
++ CS42888_VOLAOUT4, 0, 0xff, 1, dac_tlv),
++ SOC_CS42888_DOUBLE_R_TLV("DAC3 Playback Volume", CS42888_VOLAOUT5,
++ CS42888_VOLAOUT6, 0, 0xff, 1, dac_tlv),
++ SOC_CS42888_DOUBLE_R_TLV("DAC4 Playback Volume", CS42888_VOLAOUT7,
++ CS42888_VOLAOUT8, 0, 0xff, 1, dac_tlv),
++ SOC_CS42888_DOUBLE_R_S8_TLV("ADC1 Capture Volume", CS42888_VOLAIN1,
++ CS42888_VOLAIN2, -128, 48, adc_tlv),
++ SOC_CS42888_DOUBLE_R_S8_TLV("ADC2 Capture Volume", CS42888_VOLAIN3,
++ CS42888_VOLAIN4, -128, 48, adc_tlv),
++ SOC_ENUM("ADC High-Pass Filter Switch", cs42888_enum[0]),
++ SOC_ENUM("DAC1 Invert Switch", cs42888_enum[1]),
++ SOC_ENUM("DAC2 Invert Switch", cs42888_enum[2]),
++ SOC_ENUM("DAC3 Invert Switch", cs42888_enum[3]),
++ SOC_ENUM("DAC4 Invert Switch", cs42888_enum[4]),
++ SOC_ENUM("ADC1 Invert Switch", cs42888_enum[5]),
++ SOC_ENUM("ADC2 Invert Switch", cs42888_enum[6]),
++ SOC_ENUM("DAC Auto Mute Switch", cs42888_enum[7]),
++ SOC_ENUM("DAC Single Volume Control Switch", cs42888_enum[8]),
++ SOC_ENUM("DAC Soft Ramp and Zero Cross Control Switch", cs42888_enum[9]),
++ SOC_ENUM("Mute ADC Serial Port Switch", cs42888_enum[10]),
++ SOC_ENUM("ADC Single Volume Control Switch", cs42888_enum[11]),
++ SOC_ENUM("ADC Soft Ramp and Zero Cross Control Switch", cs42888_enum[12]),
++ SOC_ENUM("DAC Deemphasis Switch", cs42888_enum[13]),
++ SOC_ENUM("ADC1 Single Ended Mode Switch", cs42888_enum[14]),
++ SOC_ENUM("ADC2 Single Ended Mode Switch", cs42888_enum[15]),
++};
++
++
++static const struct snd_soc_dapm_widget cs42888_dapm_widgets[] = {
++ SND_SOC_DAPM_DAC("DAC1", "codec-Playback", CS42888_PWRCTL, 1, 1),
++ SND_SOC_DAPM_DAC("DAC2", "codec-Playback", CS42888_PWRCTL, 2, 1),
++ SND_SOC_DAPM_DAC("DAC3", "codec-Playback", CS42888_PWRCTL, 3, 1),
++ SND_SOC_DAPM_DAC("DAC4", "codec-Playback", CS42888_PWRCTL, 4, 1),
++
++ SND_SOC_DAPM_OUTPUT("AOUT1L"),
++ SND_SOC_DAPM_OUTPUT("AOUT1R"),
++ SND_SOC_DAPM_OUTPUT("AOUT2L"),
++ SND_SOC_DAPM_OUTPUT("AOUT2R"),
++ SND_SOC_DAPM_OUTPUT("AOUT3L"),
++ SND_SOC_DAPM_OUTPUT("AOUT3R"),
++ SND_SOC_DAPM_OUTPUT("AOUT4L"),
++ SND_SOC_DAPM_OUTPUT("AOUT4R"),
++
++ SND_SOC_DAPM_ADC("ADC1", "codec-Capture", CS42888_PWRCTL, 5, 1),
++ SND_SOC_DAPM_ADC("ADC2", "codec-Capture", CS42888_PWRCTL, 6, 1),
++
++ SND_SOC_DAPM_INPUT("AIN1L"),
++ SND_SOC_DAPM_INPUT("AIN1R"),
++ SND_SOC_DAPM_INPUT("AIN2L"),
++ SND_SOC_DAPM_INPUT("AIN2R"),
++
++ SND_SOC_DAPM_PGA_E("PWR", CS42888_PWRCTL, 0, 1, NULL, 0,
++ NULL, 0),
++};
++
++static const struct snd_soc_dapm_route audio_map[] = {
++ /* Playback */
++ { "PWR", NULL, "DAC1" },
++ { "PWR", NULL, "DAC1" },
++
++ { "PWR", NULL, "DAC2" },
++ { "PWR", NULL, "DAC2" },
++
++ { "PWR", NULL, "DAC3" },
++ { "PWR", NULL, "DAC3" },
++
++ { "PWR", NULL, "DAC4" },
++ { "PWR", NULL, "DAC4" },
++
++ { "AOUT1L", NULL, "PWR" },
++ { "AOUT1R", NULL, "PWR" },
++
++ { "AOUT2L", NULL, "PWR" },
++ { "AOUT2R", NULL, "PWR" },
++
++ { "AOUT3L", NULL, "PWR" },
++ { "AOUT3R", NULL, "PWR" },
++
++ { "AOUT4L", NULL, "PWR" },
++ { "AOUT4R", NULL, "PWR" },
++
++ /* Capture */
++ { "PWR", NULL, "AIN1L" },
++ { "PWR", NULL, "AIN1R" },
++
++ { "PWR", NULL, "AIN2L" },
++ { "PWR", NULL, "AIN2R" },
++
++ { "ADC1", NULL, "PWR" },
++ { "ADC1", NULL, "PWR" },
++
++ { "ADC2", NULL, "PWR" },
++ { "ADC2", NULL, "PWR" },
++};
++
++
++static int cs42888_add_widgets(struct snd_soc_codec *codec)
++{
++ snd_soc_dapm_new_controls(&codec->dapm, cs42888_dapm_widgets,
++ ARRAY_SIZE(cs42888_dapm_widgets));
++
++ snd_soc_dapm_add_routes(&codec->dapm, audio_map, ARRAY_SIZE(audio_map));
++
++ snd_soc_dapm_new_widgets(&codec->dapm);
++ return 0;
++}
++
++/**
++ * struct cs42888_mode_ratios - clock ratio tables
++ * @ratio: the ratio of MCLK to the sample rate
++ * @speed_mode: the Speed Mode bits to set in the Mode Control register for
++ * this ratio
++ * @mclk: the Ratio Select bits to set in the Mode Control register for this
++ * ratio
++ *
++ * The data for this chart is taken from Table 10 of the CS42888 reference
++ * manual.
++ *
++ * This table is used to determine how to program the Functional Mode register.
++ * It is also used by cs42888_set_dai_sysclk() to tell ALSA which sampling
++ * rates the CS42888 currently supports.
++ *
++ * @speed_mode is the corresponding bit pattern to be written to the
++ * MODE bits of the Mode Control Register
++ *
++ * @mclk is the corresponding bit pattern to be wirten to the MCLK bits of
++ * the Mode Control Register.
++ *
++ */
++struct cs42888_mode_ratios {
++ unsigned int ratio;
++ u8 speed_mode;
++ u8 mclk;
++};
++
++static struct cs42888_mode_ratios cs42888_mode_ratios[] = {
++ {64, CS42888_MODE_4X, CS42888_MODE_DIV1},
++ {96, CS42888_MODE_4X, CS42888_MODE_DIV2},
++ {128, CS42888_MODE_2X, CS42888_MODE_DIV1},
++ {192, CS42888_MODE_2X, CS42888_MODE_DIV2},
++ {256, CS42888_MODE_1X, CS42888_MODE_DIV1},
++ {384, CS42888_MODE_2X, CS42888_MODE_DIV4},
++ {512, CS42888_MODE_1X, CS42888_MODE_DIV3},
++ {768, CS42888_MODE_1X, CS42888_MODE_DIV4},
++ {1024, CS42888_MODE_1X, CS42888_MODE_DIV5}
++};
++
++/* The number of MCLK/LRCK ratios supported by the CS42888 */
++#define NUM_MCLK_RATIOS ARRAY_SIZE(cs42888_mode_ratios)
++
++/**
++ * cs42888_set_dai_sysclk - determine the CS42888 samples rates.
++ * @codec_dai: the codec DAI
++ * @clk_id: the clock ID (ignored)
++ * @freq: the MCLK input frequency
++ * @dir: the clock direction (ignored)
++ *
++ * This function is used to tell the codec driver what the input MCLK
++ * frequency is.
++ *
++ */
++static int cs42888_set_dai_sysclk(struct snd_soc_dai *codec_dai,
++ int clk_id, unsigned int freq, int dir)
++{
++ struct snd_soc_codec *codec = codec_dai->codec;
++ struct cs42888_private *cs42888 = snd_soc_codec_get_drvdata(codec);
++
++ cs42888->mclk = freq;
++ return 0;
++}
++
++/**
++ * cs42888_set_dai_fmt - configure the codec for the selected audio format
++ * @codec_dai: the codec DAI
++ * @format: a SND_SOC_DAIFMT_x value indicating the data format
++ *
++ * This function takes a bitmask of SND_SOC_DAIFMT_x bits and programs the
++ * codec accordingly.
++ *
++ * Currently, this function only supports SND_SOC_DAIFMT_I2S and
++ * SND_SOC_DAIFMT_LEFT_J. The CS42888 codec also supports right-justified
++ * data for playback only, but ASoC currently does not support different
++ * formats for playback vs. record.
++ */
++static int cs42888_set_dai_fmt(struct snd_soc_dai *codec_dai,
++ unsigned int format)
++{
++ struct snd_soc_codec *codec = codec_dai->codec;
++ struct cs42888_private *cs42888 = snd_soc_codec_get_drvdata(codec);
++ int ret = 0;
++ u8 val;
++
++ val = snd_soc_read(codec, CS42888_FORMAT);
++ val &= ~CS42888_FORMAT_DAC_DIF_MASK;
++ val &= ~CS42888_FORMAT_ADC_DIF_MASK;
++ /* set DAI format */
++ switch (format & SND_SOC_DAIFMT_FORMAT_MASK) {
++ case SND_SOC_DAIFMT_LEFT_J:
++ val |= DIF_LEFT_J << CS42888_FORMAT_DAC_DIF_OFFSET;
++ val |= DIF_LEFT_J << CS42888_FORMAT_ADC_DIF_OFFSET;
++ break;
++ case SND_SOC_DAIFMT_I2S:
++ val |= DIF_I2S << CS42888_FORMAT_DAC_DIF_OFFSET;
++ val |= DIF_I2S << CS42888_FORMAT_ADC_DIF_OFFSET;
++ break;
++ case SND_SOC_DAIFMT_RIGHT_J:
++ val |= DIF_RIGHT_J << CS42888_FORMAT_DAC_DIF_OFFSET;
++ val |= DIF_RIGHT_J << CS42888_FORMAT_ADC_DIF_OFFSET;
++ break;
++ default:
++ dev_err(codec->dev, "invalid dai format\n");
++ return -EINVAL;
++ }
++
++ ret = snd_soc_write(codec, CS42888_FORMAT, val);
++ if (ret < 0) {
++ dev_err(codec->dev, "i2c write failed\n");
++ return ret;
++ }
++
++ val = snd_soc_read(codec, CS42888_MODE);
++ /* set master/slave audio interface */
++ switch (format & SND_SOC_DAIFMT_MASTER_MASK) {
++ case SND_SOC_DAIFMT_CBS_CFS:
++ cs42888->slave_mode = 1;
++ val &= ~CS42888_MODE_SPEED_MASK;
++ val |= CS42888_MODE_SLAVE;
++ break;
++ case SND_SOC_DAIFMT_CBM_CFM:
++ cs42888->slave_mode = 0;
++ break;
++ default:
++ /* all other modes are unsupported by the hardware */
++ return -EINVAL;
++ }
++
++ ret = snd_soc_write(codec, CS42888_MODE, val);
++ if (ret < 0) {
++ dev_err(codec->dev, "i2c write failed\n");
++ return ret;
++ }
++
++ dump_reg(codec);
++ return ret;
++}
++
++/**
++ * cs42888_hw_params - program the CS42888 with the given hardware parameters.
++ * @substream: the audio stream
++ * @params: the hardware parameters to set
++
++ * @dai: the SOC DAI (ignored)
++ *
++ * This function programs the hardware with the values provided.
++ * Specifically, the sample rate and the data format.
++ *
++ * The .ops functions are used to provide board-specific data, like input
++ * frequencies, to this driver. This function takes that information,
++ * combines it with the hardware parameters provided, and programs the
++ * hardware accordingly.
++ */
++static int cs42888_hw_params(struct snd_pcm_substream *substream,
++ struct snd_pcm_hw_params *params,
++ struct snd_soc_dai *dai)
++{
++ struct snd_soc_pcm_runtime *rtd = substream->private_data;
++ struct snd_soc_codec *codec = rtd->codec;
++ struct cs42888_private *cs42888 = snd_soc_codec_get_drvdata(codec);
++ int ret;
++ u32 i, rate, ratio, val;
++
++ rate = params_rate(params); /* Sampling rate, in Hz */
++ ratio = cs42888->mclk / rate; /* MCLK/LRCK ratio */
++ for (i = 0; i < NUM_MCLK_RATIOS; i++) {
++ if (cs42888_mode_ratios[i].ratio == ratio)
++ break;
++ }
++
++ if (i == NUM_MCLK_RATIOS) {
++ /* We did not find a matching ratio */
++ dev_err(codec->dev, "could not find matching ratio\n");
++ return -EINVAL;
++ }
++
++ if (!cs42888->slave_mode) {
++ val = snd_soc_read(codec, CS42888_MODE);
++ val &= ~CS42888_MODE_SPEED_MASK;
++ val |= cs42888_mode_ratios[i].speed_mode;
++ val &= ~CS42888_MODE_DIV_MASK;
++ val |= cs42888_mode_ratios[i].mclk;
++ } else {
++ val = snd_soc_read(codec, CS42888_MODE);
++ val &= ~CS42888_MODE_SPEED_MASK;
++ val |= CS42888_MODE_SLAVE;
++ val &= ~CS42888_MODE_DIV_MASK;
++ val |= cs42888_mode_ratios[i].mclk;
++ }
++ ret = snd_soc_write(codec, CS42888_MODE, val);
++ if (ret < 0) {
++ dev_err(codec->dev, "i2c write failed\n");
++ return ret;
++ }
++
++ /* Unmute all the channels */
++ val = snd_soc_read(codec, CS42888_MUTE);
++ val &= ~CS42888_MUTE_ALL;
++ ret = snd_soc_write(codec, CS42888_MUTE, val);
++ if (ret < 0) {
++ dev_err(codec->dev, "i2c write failed\n");
++ return ret;
++ }
++
++ ret = cs42888_fill_cache(codec);
++ if (ret < 0) {
++ dev_err(codec->dev, "failed to fill register cache\n");
++ return ret;
++ }
++
++ dump_reg(codec);
++ return ret;
++}
++
++/**
++ * cs42888_shutdown - cs42888 enters into low power mode again.
++ * @substream: the audio stream
++ * @dai: the SOC DAI (ignored)
++ *
++ * The .ops functions are used to provide board-specific data, like input
++ * frequencies, to this driver. This function takes that information,
++ * combines it with the hardware parameters provided, and programs the
++ * hardware accordingly.
++ */
++static void cs42888_shutdown(struct snd_pcm_substream *substream,
++ struct snd_soc_dai *dai)
++{
++ struct snd_soc_pcm_runtime *rtd = substream->private_data;
++ struct snd_soc_codec *codec = rtd->codec;
++ int ret;
++ u8 val;
++
++ /* Mute all the channels */
++ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
++ val = snd_soc_read(codec, CS42888_MUTE);
++ val |= CS42888_MUTE_ALL;
++ ret = snd_soc_write(codec, CS42888_MUTE, val);
++ if (ret < 0)
++ dev_err(codec->dev, "i2c write failed\n");
++ }
++}
++
++static int cs42888_prepare(struct snd_pcm_substream *substream,
++ struct snd_soc_dai *dai)
++{
++ struct snd_soc_pcm_runtime *rtd = substream->private_data;
++ struct snd_soc_card *card = rtd->card;
++ struct snd_soc_dai *tmp_codec_dai;
++ struct snd_soc_pcm_runtime *tmp_rtd;
++ u32 i;
++
++ for (i = 0; i < card->num_rtd; i++) {
++ tmp_codec_dai = card->rtd[i].codec_dai;
++ tmp_rtd = (struct snd_soc_pcm_runtime *)(card->rtd + i);
++ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
++ cancel_delayed_work(&tmp_rtd->delayed_work);
++ }
++ return 0;
++}
++
++static struct snd_soc_dai_ops cs42888_dai_ops = {
++ .set_fmt = cs42888_set_dai_fmt,
++ .set_sysclk = cs42888_set_dai_sysclk,
++ .hw_params = cs42888_hw_params,
++ .shutdown = cs42888_shutdown,
++ .prepare = cs42888_prepare,
++};
++
++
++static struct snd_soc_dai_driver cs42888_dai = {
++ .name = "CS42888",
++ .playback = {
++ .stream_name = "codec-Playback",
++ .channels_min = 2,
++ .channels_max = 8,
++ .rates = SNDRV_PCM_RATE_8000_192000,
++ .formats = CS42888_FORMATS,
++ },
++ .capture = {
++ .stream_name = "codec-Capture",
++ .channels_min = 2,
++ .channels_max = 4,
++ .rates = SNDRV_PCM_RATE_8000_192000,
++ .formats = CS42888_FORMATS,
++ },
++ .ops = &cs42888_dai_ops,
++};
++
++/**
++ * cs42888_probe - ASoC probe function
++ * @pdev: platform device
++ *
++ * This function is called when ASoC has all the pieces it needs to
++ * instantiate a sound driver.
++ */
++static int cs42888_probe(struct snd_soc_codec *codec)
++{
++ struct cs42888_private *cs42888 = snd_soc_codec_get_drvdata(codec);
++ int ret, i, val;
++
++ cs42888->codec = codec;
++ /* setup i2c data ops */
++ ret = snd_soc_codec_set_cache_io(codec, 8, 8, SND_SOC_I2C);
++ if (ret < 0) {
++ dev_err(codec->dev, "Failed to set cache I/O: %d\n", ret);
++ return ret;
++ }
++
++ for (i = 0; i < ARRAY_SIZE(cs42888->supplies); i++)
++ cs42888->supplies[i].supply = cs42888_supply_names[i];
++
++ ret = devm_regulator_bulk_get(codec->dev,
++ ARRAY_SIZE(cs42888->supplies), cs42888->supplies);
++ if (ret) {
++ dev_err(codec->dev, "Failed to request supplies: %d\n",
++ ret);
++ return ret;
++ }
++
++ ret = regulator_bulk_enable(ARRAY_SIZE(cs42888->supplies),
++ cs42888->supplies);
++ if (ret) {
++ dev_err(codec->dev, "Failed to enable supplies: %d\n",
++ ret);
++ goto err;
++ }
++ msleep(1);
++
++ /* The I2C interface is set up, so pre-fill our register cache */
++ ret = cs42888_fill_cache(codec);
++ if (ret < 0) {
++ dev_err(codec->dev, "failed to fill register cache\n");
++ goto err;
++ }
++
++ /* Enter low power state */
++ val = snd_soc_read(codec, CS42888_PWRCTL);
++ val |= CS42888_PWRCTL_PDN_MASK;
++ ret = snd_soc_write(codec, CS42888_PWRCTL, val);
++ if (ret < 0) {
++ dev_err(codec->dev, "i2c write failed\n");
++ goto err;
++ }
++
++ /* Disable auto-mute */
++ val = snd_soc_read(codec, CS42888_TRANS);
++ val &= ~CS42888_TRANS_AMUTE_MASK;
++ val &= ~CS42888_TRANS_DAC_SZC_MASK;
++ val |= CS42888_TRANS_DAC_SZC_SR;
++ ret = snd_soc_write(codec, CS42888_TRANS, val);
++ if (ret < 0) {
++ dev_err(codec->dev, "i2c write failed\n");
++ goto err;
++ }
++ /* Add the non-DAPM controls */
++ snd_soc_add_codec_controls(codec, cs42888_snd_controls,
++ ARRAY_SIZE(cs42888_snd_controls));
++
++ /* Add DAPM controls */
++ cs42888_add_widgets(codec);
++ return 0;
++err:
++ regulator_bulk_disable(ARRAY_SIZE(cs42888->supplies),
++ cs42888->supplies);
++ return ret;
++}
++
++/**
++ * cs42888_remove - ASoC remove function
++ * @pdev: platform device
++ *
++ * This function is the counterpart to cs42888_probe().
++ */
++static int cs42888_remove(struct snd_soc_codec *codec)
++{
++ struct cs42888_private *cs42888 = snd_soc_codec_get_drvdata(codec);
++
++ regulator_bulk_disable(ARRAY_SIZE(cs42888->supplies),
++ cs42888->supplies);
++
++ return 0;
++};
++
++/*
++ * ASoC codec device structure
++ *
++ * Assign this variable to the codec_dev field of the machine driver's
++ * snd_soc_device structure.
++ */
++static struct snd_soc_codec_driver cs42888_driver = {
++ .probe = cs42888_probe,
++ .remove = cs42888_remove,
++ .reg_cache_size = CS42888_NUMREGS + 1,
++ .reg_word_size = sizeof(u8),
++ .reg_cache_step = 1,
++};
++
++/**
++ * cs42888_i2c_probe - initialize the I2C interface of the CS42888
++ * @i2c_client: the I2C client object
++ * @id: the I2C device ID (ignored)
++ *
++ * This function is called whenever the I2C subsystem finds a device that
++ * matches the device ID given via a prior call to i2c_add_driver().
++ */
++static int cs42888_i2c_probe(struct i2c_client *i2c_client,
++ const struct i2c_device_id *id)
++{
++ struct cs42888_private *cs42888;
++ int ret, val;
++
++ /* Verify that we have a CS42888 */
++ val = i2c_smbus_read_byte_data(i2c_client, CS42888_CHIPID);
++ if (val < 0) {
++ dev_err(&i2c_client->dev, "Device with ID register %x is not a CS42888", val);
++ return -ENODEV;
++ }
++ /* The top four bits of the chip ID should be 0000. */
++ if ((val & CS42888_CHIPID_ID_MASK) != 0x00) {
++ dev_err(&i2c_client->dev, "device is not a CS42888\n");
++ return -ENODEV;
++ }
++
++ dev_info(&i2c_client->dev, "found device at i2c address %X\n",
++ i2c_client->addr);
++ dev_info(&i2c_client->dev, "hardware revision %X\n", val & 0xF);
++
++ /* Allocate enough space for the snd_soc_codec structure
++ and our private data together. */
++ cs42888 = devm_kzalloc(&i2c_client->dev, sizeof(struct cs42888_private), GFP_KERNEL);
++ if (!cs42888) {
++ dev_err(&i2c_client->dev, "could not allocate codec\n");
++ return -ENOMEM;
++ }
++
++ i2c_set_clientdata(i2c_client, cs42888);
++
++ cs42888->clk = devm_clk_get(&i2c_client->dev, NULL);
++ if (IS_ERR(cs42888->clk)) {
++ ret = PTR_ERR(cs42888->clk);
++ dev_err(&i2c_client->dev, "Cannot get the clock: %d\n", ret);
++ return ret;
++ }
++
++ cs42888->mclk = clk_get_rate(cs42888->clk);
++ switch (cs42888->mclk) {
++ case 24576000:
++ cs42888_dai.playback.rates = SNDRV_PCM_RATE_48000 |
++ SNDRV_PCM_RATE_96000 |
++ SNDRV_PCM_RATE_192000;
++ cs42888_dai.capture.rates = SNDRV_PCM_RATE_48000 |
++ SNDRV_PCM_RATE_96000 |
++ SNDRV_PCM_RATE_192000;
++ break;
++ case 16934400:
++ cs42888_dai.playback.rates = SNDRV_PCM_RATE_44100 |
++ SNDRV_PCM_RATE_88200 |
++ SNDRV_PCM_RATE_176400;
++ cs42888_dai.capture.rates = SNDRV_PCM_RATE_44100 |
++ SNDRV_PCM_RATE_88200 |
++ SNDRV_PCM_RATE_176400;
++ break;
++ default:
++ dev_err(&i2c_client->dev, "codec mclk is not supported %d\n", cs42888->mclk);
++ break;
++ }
++
++ ret = snd_soc_register_codec(&i2c_client->dev,
++ &cs42888_driver, &cs42888_dai, 1);
++ if (ret) {
++ dev_err(&i2c_client->dev, "Failed to register codec:%d\n", ret);
++ return ret;
++ }
++ return 0;
++}
++
++/**
++ * cs42888_i2c_remove - remove an I2C device
++ * @i2c_client: the I2C client object
++ *
++ * This function is the counterpart to cs42888_i2c_probe().
++ */
++static int cs42888_i2c_remove(struct i2c_client *i2c_client)
++{
++ snd_soc_unregister_codec(&i2c_client->dev);
++ return 0;
++}
++
++/*
++ * cs42888_i2c_id - I2C device IDs supported by this driver
++ */
++static struct i2c_device_id cs42888_i2c_id[] = {
++ {"cs42888", 0},
++ {}
++};
++MODULE_DEVICE_TABLE(i2c, cs42888_i2c_id);
++
++#ifdef CONFIG_PM
++/* This suspend/resume implementation can handle both - a simple standby
++ * where the codec remains powered, and a full suspend, where the voltage
++ * domain the codec is connected to is teared down and/or any other hardware
++ * reset condition is asserted.
++ *
++ * The codec's own power saving features are enabled in the suspend callback,
++ * and all registers are written back to the hardware when resuming.
++ */
++
++static int cs42888_i2c_suspend(struct i2c_client *client, pm_message_t mesg)
++{
++ struct cs42888_private *cs42888 = i2c_get_clientdata(client);
++ struct snd_soc_codec *codec = cs42888->codec;
++ int reg = snd_soc_read(codec, CS42888_PWRCTL) | CS42888_PWRCTL_PDN_MASK;
++ return snd_soc_write(codec, CS42888_PWRCTL, reg);
++}
++
++static int cs42888_i2c_resume(struct i2c_client *client)
++{
++ struct cs42888_private *cs42888 = i2c_get_clientdata(client);
++ struct snd_soc_codec *codec = cs42888->codec;
++ int reg;
++
++ /* In case the device was put to hard reset during sleep, we need to
++ * wait 500ns here before any I2C communication. */
++ ndelay(500);
++
++ /* first restore the entire register cache ... */
++ for (reg = CS42888_FIRSTREG; reg <= CS42888_LASTREG; reg++) {
++ u8 val = snd_soc_read(codec, reg);
++
++ if (i2c_smbus_write_byte_data(client, reg, val)) {
++ dev_err(codec->dev, "i2c write failed\n");
++ return -EIO;
++ }
++ }
++
++ /* ... then disable the power-down bits */
++ reg = snd_soc_read(codec, CS42888_PWRCTL);
++ reg &= ~CS42888_PWRCTL_PDN_MASK;
++ return snd_soc_write(codec, CS42888_PWRCTL, reg);
++}
++#else
++#define cs42888_i2c_suspend NULL
++#define cs42888_i2c_resume NULL
++#endif /* CONFIG_PM */
++
++/*
++ * cs42888_i2c_driver - I2C device identification
++ *
++ * This structure tells the I2C subsystem how to identify and support a
++ * given I2C device type.
++ */
++
++static const struct of_device_id cs42888_dt_ids[] = {
++ { .compatible = "cirrus,cs42888", },
++ { /* sentinel */ }
++};
++
++static struct i2c_driver cs42888_i2c_driver = {
++ .driver = {
++ .name = "cs42888",
++ .owner = THIS_MODULE,
++ .of_match_table = cs42888_dt_ids,
++ },
++ .probe = cs42888_i2c_probe,
++ .remove = cs42888_i2c_remove,
++ .suspend = cs42888_i2c_suspend,
++ .resume = cs42888_i2c_resume,
++ .id_table = cs42888_i2c_id,
++};
++
++module_i2c_driver(cs42888_i2c_driver);
++
++MODULE_AUTHOR("Freescale Semiconductor, Inc.");
++MODULE_DESCRIPTION("Cirrus Logic CS42888 ALSA SoC Codec Driver");
++MODULE_LICENSE("GPL");
+diff -Nur linux-3.14.36/sound/soc/codecs/cs42888.h linux-openelec/sound/soc/codecs/cs42888.h
+--- linux-3.14.36/sound/soc/codecs/cs42888.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/sound/soc/codecs/cs42888.h 2015-05-06 12:05:43.000000000 -0500
+@@ -0,0 +1,123 @@
++/*
++ * Copyright (C) 2010-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++#ifndef _CS42888_H
++#define _CS42888_H
++
++/* CS42888 registers addresses */
++#define CS42888_CHIPID 0x01 /* Chip ID */
++#define CS42888_PWRCTL 0x02 /* Power Control */
++#define CS42888_MODE 0x03 /* Functional Mode */
++#define CS42888_FORMAT 0x04 /* Interface Formats */
++#define CS42888_ADCCTL 0x05 /* ADC Control */
++#define CS42888_TRANS 0x06 /* Transition Control */
++#define CS42888_MUTE 0x07 /* Mute Control */
++#define CS42888_VOLAOUT1 0x08 /* Volume Control AOUT1*/
++#define CS42888_VOLAOUT2 0x09 /* Volume Control AOUT2*/
++#define CS42888_VOLAOUT3 0x0A /* Volume Control AOUT3*/
++#define CS42888_VOLAOUT4 0x0B /* Volume Control AOUT4*/
++#define CS42888_VOLAOUT5 0x0C /* Volume Control AOUT5*/
++#define CS42888_VOLAOUT6 0x0D /* Volume Control AOUT6*/
++#define CS42888_VOLAOUT7 0x0E /* Volume Control AOUT7*/
++#define CS42888_VOLAOUT8 0x0F /* Volume Control AOUT8*/
++#define CS42888_DACINV 0x10 /* DAC Channel Invert */
++#define CS42888_VOLAIN1 0x11 /* Volume Control AIN1 */
++#define CS42888_VOLAIN2 0x12 /* Volume Control AIN2 */
++#define CS42888_VOLAIN3 0x13 /* Volume Control AIN3 */
++#define CS42888_VOLAIN4 0x14 /* Volume Control AIN4 */
++#define CS42888_ADCINV 0x17 /* ADC Channel Invert */
++#define CS42888_STATUSCTL 0x18 /* Status Control */
++#define CS42888_STATUS 0x19 /* Status */
++#define CS42888_STATUSMASK 0x1A /* Status Mask */
++
++#define CS42888_FIRSTREG 0x01
++#define CS42888_LASTREG 0x1A
++#define CS42888_NUMREGS (CS42888_LASTREG - CS42888_FIRSTREG + 1)
++#define CS42888_I2C_INCR 0x80
++
++/* Bit masks for the CS42888 registers */
++#define CS42888_CHIPID_ID_MASK 0xF0
++#define CS42888_CHIPID_REV 0x0F
++#define CS42888_PWRCTL_PDN_ADC2_OFFSET 6
++#define CS42888_PWRCTL_PDN_ADC1_OFFSET 5
++#define CS42888_PWRCTL_PDN_DAC4_OFFSET 4
++#define CS42888_PWRCTL_PDN_DAC3_OFFSET 3
++#define CS42888_PWRCTL_PDN_DAC2_OFFSET 2
++#define CS42888_PWRCTL_PDN_DAC1_OFFSET 1
++#define CS42888_PWRCTL_PDN_OFFSET 0
++#define CS42888_PWRCTL_PDN_ADC2_MASK (1 << CS42888_PWRCTL_PDN_ADC2_OFFSET)
++#define CS42888_PWRCTL_PDN_ADC1_MASK (1 << CS42888_PWRCTL_PDN_ADC1_OFFSET)
++#define CS42888_PWRCTL_PDN_DAC4_MASK (1 << CS42888_PWRCTL_PDN_DAC4_OFFSET)
++#define CS42888_PWRCTL_PDN_DAC3_MASK (1 << CS42888_PWRCTL_PDN_DAC3_OFFSET)
++#define CS42888_PWRCTL_PDN_DAC2_MASK (1 << CS42888_PWRCTL_PDN_DAC2_OFFSET)
++#define CS42888_PWRCTL_PDN_DAC1_MASK (1 << CS42888_PWRCTL_PDN_DAC1_OFFSET)
++#define CS42888_PWRCTL_PDN_MASK (1 << CS42888_PWRCTL_PDN_OFFSET)
++
++#define CS42888_MODE_SPEED_MASK 0xF0
++#define CS42888_MODE_1X 0x00
++#define CS42888_MODE_2X 0x50
++#define CS42888_MODE_4X 0xA0
++#define CS42888_MODE_SLAVE 0xF0
++#define CS42888_MODE_DIV_MASK 0x0E
++#define CS42888_MODE_DIV1 0x00
++#define CS42888_MODE_DIV2 0x02
++#define CS42888_MODE_DIV3 0x04
++#define CS42888_MODE_DIV4 0x06
++#define CS42888_MODE_DIV5 0x08
++
++#define CS42888_FORMAT_FREEZE_OFFSET 7
++#define CS42888_FORMAT_AUX_DIF_OFFSET 6
++#define CS42888_FORMAT_DAC_DIF_OFFSET 3
++#define CS42888_FORMAT_ADC_DIF_OFFSET 0
++#define CS42888_FORMAT_FREEZE_MASK (1 << CS42888_FORMAT_FREEZE_OFFSET)
++#define CS42888_FORMAT_AUX_DIF_MASK (1 << CS42888_FORMAT_AUX_DIF_OFFSET)
++#define CS42888_FORMAT_DAC_DIF_MASK (7 << CS42888_FORMAT_DAC_DIF_OFFSET)
++#define CS42888_FORMAT_ADC_DIF_MASK (7 << CS42888_FORMAT_ADC_DIF_OFFSET)
++
++#define CS42888_TRANS_DAC_SNGVOL_OFFSET 7
++#define CS42888_TRANS_DAC_SZC_OFFSET 5
++#define CS42888_TRANS_AMUTE_OFFSET 4
++#define CS42888_TRANS_MUTE_ADC_SP_OFFSET 3
++#define CS42888_TRANS_ADC_SNGVOL_OFFSET 2
++#define CS42888_TRANS_ADC_SZC_OFFSET 0
++#define CS42888_TRANS_DAC_SNGVOL_MASK (1 << CS42888_TRANS_DAC_SNGVOL_OFFSET)
++#define CS42888_TRANS_DAC_SZC_MASK (3 << CS42888_TRANS_DAC_SZC_OFFSET)
++#define CS42888_TRANS_AMUTE_MASK (1 << CS42888_TRANS_AMUTE_OFFSET)
++#define CS42888_TRANS_MUTE_ADC_SP_MASK (1 << CS42888_TRANS_MUTE_ADC_SP_OFFSET)
++#define CS42888_TRANS_ADC_SNGVOL_MASK (1 << CS42888_TRANS_ADC_SNGVOL_OFFSET)
++#define CS42888_TRANS_ADC_SZC_MASK (3 << CS42888_TRANS_ADC_SZC_OFFSET)
++#define CS42888_TRANS_DAC_SZC_IC (0 << CS42888_TRANS_DAC_SZC_OFFSET)
++#define CS42888_TRANS_DAC_SZC_ZC (1 << CS42888_TRANS_DAC_SZC_OFFSET)
++#define CS42888_TRANS_DAC_SZC_SR (2 << CS42888_TRANS_DAC_SZC_OFFSET)
++#define CS42888_TRANS_DAC_SZC_SRZC (3 << CS42888_TRANS_DAC_SZC_OFFSET)
++
++#define CS42888_MUTE_AOUT8 (0x1 << 7)
++#define CS42888_MUTE_AOUT7 (0x1 << 6)
++#define CS42888_MUTE_AOUT6 (0x1 << 5)
++#define CS42888_MUTE_AOUT5 (0x1 << 4)
++#define CS42888_MUTE_AOUT4 (0x1 << 3)
++#define CS42888_MUTE_AOUT3 (0x1 << 2)
++#define CS42888_MUTE_AOUT2 (0x1 << 1)
++#define CS42888_MUTE_AOUT1 (0x1 << 0)
++#define CS42888_MUTE_ALL (CS42888_MUTE_AOUT1 | CS42888_MUTE_AOUT2 | \
++ CS42888_MUTE_AOUT3 | CS42888_MUTE_AOUT4 | \
++ CS42888_MUTE_AOUT5 | CS42888_MUTE_AOUT6 | \
++ CS42888_MUTE_AOUT7 | CS42888_MUTE_AOUT8)
++
++#define DIF_LEFT_J 0
++#define DIF_I2S 1
++#define DIF_RIGHT_J 2
++#define DIF_TDM 6
++
++
++#endif
+diff -Nur linux-3.14.36/sound/soc/codecs/Kconfig linux-openelec/sound/soc/codecs/Kconfig
+--- linux-3.14.36/sound/soc/codecs/Kconfig 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/sound/soc/codecs/Kconfig 2015-07-24 18:03:30.316842002 -0500
+@@ -37,6 +37,7 @@
+ select SND_SOC_CS42L73 if I2C
+ select SND_SOC_CS4270 if I2C
+ select SND_SOC_CS4271 if SND_SOC_I2C_AND_SPI
++ select SND_SOC_CS42888 if I2C
+ select SND_SOC_CX20442 if TTY
+ select SND_SOC_DA7210 if I2C
+ select SND_SOC_DA7213 if I2C
+@@ -81,6 +82,7 @@
+ select SND_SOC_TWL6040 if TWL6040_CORE
+ select SND_SOC_UDA134X
+ select SND_SOC_UDA1380 if I2C
++ select SND_SOC_VT1613 if SND_SOC_AC97_BUS
+ select SND_SOC_WL1273 if MFD_WL1273_CORE
+ select SND_SOC_WM0010 if SPI_MASTER
+ select SND_SOC_WM1250_EV1 if I2C
+@@ -254,6 +256,9 @@
+ config SND_SOC_CS4271
+ tristate
+
++config SND_SOC_CS42888
++ tristate
++
+ config SND_SOC_CX20442
+ tristate
+ depends on TTY
+@@ -382,7 +387,11 @@
+
+ config SND_SOC_UDA1380
+ tristate
+-
++
++config SND_SOC_VT1613
++ select REGMAP_AC97
++ tristate
++
+ config SND_SOC_WL1273
+ tristate
+
+diff -Nur linux-3.14.36/sound/soc/codecs/Makefile linux-openelec/sound/soc/codecs/Makefile
+--- linux-3.14.36/sound/soc/codecs/Makefile 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/sound/soc/codecs/Makefile 2015-07-24 18:03:30.304842002 -0500
+@@ -23,6 +23,7 @@
+ snd-soc-cs42l73-objs := cs42l73.o
+ snd-soc-cs4270-objs := cs4270.o
+ snd-soc-cs4271-objs := cs4271.o
++snd-soc-cs42888-objs := cs42888.o
+ snd-soc-cx20442-objs := cx20442.o
+ snd-soc-da7210-objs := da7210.o
+ snd-soc-da7213-objs := da7213.o
+@@ -71,6 +72,7 @@
+ snd-soc-twl6040-objs := twl6040.o
+ snd-soc-uda134x-objs := uda134x.o
+ snd-soc-uda1380-objs := uda1380.o
++snd-soc-vt1613-objs := vt1613.o
+ snd-soc-wl1273-objs := wl1273.o
+ snd-soc-wm-adsp-objs := wm_adsp.o
+ snd-soc-wm0010-objs := wm0010.o
+@@ -156,6 +158,7 @@
+ obj-$(CONFIG_SND_SOC_CS42L73) += snd-soc-cs42l73.o
+ obj-$(CONFIG_SND_SOC_CS4270) += snd-soc-cs4270.o
+ obj-$(CONFIG_SND_SOC_CS4271) += snd-soc-cs4271.o
++obj-$(CONFIG_SND_SOC_CS42888) += snd-soc-cs42888.o
+ obj-$(CONFIG_SND_SOC_CX20442) += snd-soc-cx20442.o
+ obj-$(CONFIG_SND_SOC_DA7210) += snd-soc-da7210.o
+ obj-$(CONFIG_SND_SOC_DA7213) += snd-soc-da7213.o
+@@ -201,6 +204,7 @@
+ obj-$(CONFIG_SND_SOC_TWL6040) += snd-soc-twl6040.o
+ obj-$(CONFIG_SND_SOC_UDA134X) += snd-soc-uda134x.o
+ obj-$(CONFIG_SND_SOC_UDA1380) += snd-soc-uda1380.o
++obj-$(CONFIG_SND_SOC_VT1613) += snd-soc-vt1613.o
+ obj-$(CONFIG_SND_SOC_WL1273) += snd-soc-wl1273.o
+ obj-$(CONFIG_SND_SOC_WM0010) += snd-soc-wm0010.o
+ obj-$(CONFIG_SND_SOC_WM1250_EV1) += snd-soc-wm1250-ev1.o
+diff -Nur linux-3.14.36/sound/soc/codecs/sgtl5000.c linux-openelec/sound/soc/codecs/sgtl5000.c
+--- linux-3.14.36/sound/soc/codecs/sgtl5000.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/sound/soc/codecs/sgtl5000.c 2015-07-24 18:03:29.660842002 -0500
+@@ -756,7 +756,7 @@
+ struct ldo_regulator *ldo = rdev_get_drvdata(dev);
+ struct snd_soc_codec *codec = (struct snd_soc_codec *)ldo->codec_data;
+ int reg;
+-
++dev_info(codec->dev, "%s(): enabled %u\n", __func__, ldo->enabled);
+ if (ldo_regulator_is_enabled(dev))
+ return 0;
+
+@@ -788,10 +788,16 @@
+ {
+ struct ldo_regulator *ldo = rdev_get_drvdata(dev);
+ struct snd_soc_codec *codec = (struct snd_soc_codec *)ldo->codec_data;
++dev_info(codec->dev, "%s(): enabled %u\n", __func__, ldo->enabled);
++
++ snd_soc_update_bits(codec, SGTL5000_CHIP_ANA_POWER,
++ SGTL5000_LINREG_SIMPLE_POWERUP,
++ SGTL5000_LINREG_SIMPLE_POWERUP);
+
+ snd_soc_update_bits(codec, SGTL5000_CHIP_ANA_POWER,
+ SGTL5000_LINEREG_D_POWERUP,
+ 0);
++dev_info(codec->dev, "%s: ANA_POWER = 0x%04x\n", __func__, snd_soc_read(codec, SGTL5000_CHIP_ANA_POWER));
+
+ /* clear voltage info */
+ snd_soc_update_bits(codec, SGTL5000_CHIP_LINREG_CTRL,
+@@ -849,6 +855,7 @@
+ config.dev = codec->dev;
+ config.driver_data = ldo;
+ config.init_data = init_data;
++ config.ena_gpio = -EINVAL;
+
+ ldo->dev = regulator_register(&ldo->desc, &config);
+ if (IS_ERR(ldo->dev)) {
+@@ -1202,8 +1209,11 @@
+ * if vddio and vddd > 3.1v,
+ * charge pump should be clean before set ana_pwr
+ */
+- snd_soc_update_bits(codec, SGTL5000_CHIP_ANA_POWER,
+- SGTL5000_VDDC_CHRGPMP_POWERUP, 0);
++// FIXME: this is total crap - we have read this register above into
++// ana_pwr, which we then modify (above), and then write back to the
++// register below. This modification just gets completely overwritten.
++// snd_soc_update_bits(codec, SGTL5000_CHIP_ANA_POWER,
++// SGTL5000_VDDC_CHRGPMP_POWERUP, 0);
+
+ /* VDDC use VDDIO rail */
+ lreg_ctrl |= SGTL5000_VDDC_ASSN_OVRD;
+@@ -1320,7 +1330,7 @@
+ return ret;
+ }
+
+- ret = regulator_bulk_get(codec->dev, ARRAY_SIZE(sgtl5000->supplies),
++ ret = devm_regulator_bulk_get(codec->dev, ARRAY_SIZE(sgtl5000->supplies),
+ sgtl5000->supplies);
+ if (ret)
+ goto err_ldo_remove;
+@@ -1328,16 +1338,13 @@
+ ret = regulator_bulk_enable(ARRAY_SIZE(sgtl5000->supplies),
+ sgtl5000->supplies);
+ if (ret)
+- goto err_regulator_free;
++ goto err_ldo_remove;
+
+ /* wait for all power rails bring up */
+ udelay(10);
+
+ return 0;
+
+-err_regulator_free:
+- regulator_bulk_free(ARRAY_SIZE(sgtl5000->supplies),
+- sgtl5000->supplies);
+ err_ldo_remove:
+ if (!external_vddd)
+ ldo_regulator_remove(codec);
+@@ -1358,6 +1365,9 @@
+ return ret;
+ }
+
++ if (!devres_open_group(codec->dev, NULL, GFP_KERNEL))
++ return -ENOMEM;
++
+ ret = sgtl5000_enable_regulators(codec);
+ if (ret)
+ return ret;
+@@ -1414,8 +1424,9 @@
+ err:
+ regulator_bulk_disable(ARRAY_SIZE(sgtl5000->supplies),
+ sgtl5000->supplies);
+- regulator_bulk_free(ARRAY_SIZE(sgtl5000->supplies),
+- sgtl5000->supplies);
++
++ devres_release_group(codec->dev, NULL);
++
+ ldo_regulator_remove(codec);
+
+ return ret;
+@@ -1429,8 +1440,9 @@
+
+ regulator_bulk_disable(ARRAY_SIZE(sgtl5000->supplies),
+ sgtl5000->supplies);
+- regulator_bulk_free(ARRAY_SIZE(sgtl5000->supplies),
+- sgtl5000->supplies);
++
++ devres_release_group(codec->dev, NULL);
++
+ ldo_regulator_remove(codec);
+
+ return 0;
+diff -Nur linux-3.14.36/sound/soc/codecs/sgtl5000.c.orig linux-openelec/sound/soc/codecs/sgtl5000.c.orig
+--- linux-3.14.36/sound/soc/codecs/sgtl5000.c.orig 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/sound/soc/codecs/sgtl5000.c.orig 2015-07-24 18:03:29.096842002 -0500
+@@ -0,0 +1,1609 @@
++/*
++ * sgtl5000.c -- SGTL5000 ALSA SoC Audio driver
++ *
++ * Copyright 2010-2011 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include <linux/module.h>
++#include <linux/moduleparam.h>
++#include <linux/init.h>
++#include <linux/delay.h>
++#include <linux/slab.h>
++#include <linux/pm.h>
++#include <linux/i2c.h>
++#include <linux/clk.h>
++#include <linux/regmap.h>
++#include <linux/regulator/driver.h>
++#include <linux/regulator/machine.h>
++#include <linux/regulator/consumer.h>
++#include <linux/of_device.h>
++#include <sound/core.h>
++#include <sound/tlv.h>
++#include <sound/pcm.h>
++#include <sound/pcm_params.h>
++#include <sound/soc.h>
++#include <sound/soc-dapm.h>
++#include <sound/initval.h>
++
++#include "sgtl5000.h"
++
++#define SGTL5000_DAP_REG_OFFSET 0x0100
++#define SGTL5000_MAX_REG_OFFSET 0x013A
++
++/* default value of sgtl5000 registers */
++static const struct reg_default sgtl5000_reg_defaults[] = {
++ { SGTL5000_CHIP_CLK_CTRL, 0x0008 },
++ { SGTL5000_CHIP_I2S_CTRL, 0x0010 },
++ { SGTL5000_CHIP_SSS_CTRL, 0x0010 },
++ { SGTL5000_CHIP_DAC_VOL, 0x3c3c },
++ { SGTL5000_CHIP_PAD_STRENGTH, 0x015f },
++ { SGTL5000_CHIP_ANA_HP_CTRL, 0x1818 },
++ { SGTL5000_CHIP_ANA_CTRL, 0x0111 },
++ { SGTL5000_CHIP_LINE_OUT_VOL, 0x0404 },
++ { SGTL5000_CHIP_ANA_POWER, 0x7060 },
++ { SGTL5000_CHIP_PLL_CTRL, 0x5000 },
++ { SGTL5000_DAP_BASS_ENHANCE, 0x0040 },
++ { SGTL5000_DAP_BASS_ENHANCE_CTRL, 0x051f },
++ { SGTL5000_DAP_SURROUND, 0x0040 },
++ { SGTL5000_DAP_EQ_BASS_BAND0, 0x002f },
++ { SGTL5000_DAP_EQ_BASS_BAND1, 0x002f },
++ { SGTL5000_DAP_EQ_BASS_BAND2, 0x002f },
++ { SGTL5000_DAP_EQ_BASS_BAND3, 0x002f },
++ { SGTL5000_DAP_EQ_BASS_BAND4, 0x002f },
++ { SGTL5000_DAP_MAIN_CHAN, 0x8000 },
++ { SGTL5000_DAP_AVC_CTRL, 0x0510 },
++ { SGTL5000_DAP_AVC_THRESHOLD, 0x1473 },
++ { SGTL5000_DAP_AVC_ATTACK, 0x0028 },
++ { SGTL5000_DAP_AVC_DECAY, 0x0050 },
++};
++
++/* regulator supplies for sgtl5000, VDDD is an optional external supply */
++enum sgtl5000_regulator_supplies {
++ VDDA,
++ VDDIO,
++ VDDD,
++ SGTL5000_SUPPLY_NUM
++};
++
++/* vddd is optional supply */
++static const char *supply_names[SGTL5000_SUPPLY_NUM] = {
++ "VDDA",
++ "VDDIO",
++ "VDDD"
++};
++
++#define LDO_CONSUMER_NAME "VDDD_LDO"
++#define LDO_VOLTAGE 1200000
++
++static struct regulator_consumer_supply ldo_consumer[] = {
++ REGULATOR_SUPPLY(LDO_CONSUMER_NAME, NULL),
++};
++
++static struct regulator_init_data ldo_init_data = {
++ .constraints = {
++ .min_uV = 1200000,
++ .max_uV = 1200000,
++ .valid_modes_mask = REGULATOR_MODE_NORMAL,
++ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
++ },
++ .num_consumer_supplies = 1,
++ .consumer_supplies = &ldo_consumer[0],
++};
++
++/*
++ * sgtl5000 internal ldo regulator,
++ * enabled when VDDD not provided
++ */
++struct ldo_regulator {
++ struct regulator_desc desc;
++ struct regulator_dev *dev;
++ int voltage;
++ void *codec_data;
++ bool enabled;
++};
++
++/* sgtl5000 private structure in codec */
++struct sgtl5000_priv {
++ int sysclk; /* sysclk rate */
++ int master; /* i2s master or not */
++ int fmt; /* i2s data format */
++ struct regulator_bulk_data supplies[SGTL5000_SUPPLY_NUM];
++ struct ldo_regulator *ldo;
++ struct regmap *regmap;
++ struct clk *mclk;
++ int revision;
++};
++
++/*
++ * mic_bias power on/off share the same register bits with
++ * output impedance of mic bias, when power on mic bias, we
++ * need reclaim it to impedance value.
++ * 0x0 = Powered off
++ * 0x1 = 2Kohm
++ * 0x2 = 4Kohm
++ * 0x3 = 8Kohm
++ */
++static int mic_bias_event(struct snd_soc_dapm_widget *w,
++ struct snd_kcontrol *kcontrol, int event)
++{
++ switch (event) {
++ case SND_SOC_DAPM_POST_PMU:
++ /* change mic bias resistor to 4Kohm */
++ snd_soc_update_bits(w->codec, SGTL5000_CHIP_MIC_CTRL,
++ SGTL5000_BIAS_R_MASK,
++ SGTL5000_BIAS_R_4k << SGTL5000_BIAS_R_SHIFT);
++ break;
++
++ case SND_SOC_DAPM_PRE_PMD:
++ snd_soc_update_bits(w->codec, SGTL5000_CHIP_MIC_CTRL,
++ SGTL5000_BIAS_R_MASK, 0);
++ break;
++ }
++ return 0;
++}
++
++/*
++ * As manual described, ADC/DAC only works when VAG powerup,
++ * So enabled VAG before ADC/DAC up.
++ * In power down case, we need wait 400ms when vag fully ramped down.
++ */
++static int power_vag_event(struct snd_soc_dapm_widget *w,
++ struct snd_kcontrol *kcontrol, int event)
++{
++ const u32 mask = SGTL5000_DAC_POWERUP | SGTL5000_ADC_POWERUP;
++
++ switch (event) {
++ case SND_SOC_DAPM_POST_PMU:
++ snd_soc_update_bits(w->codec, SGTL5000_CHIP_ANA_POWER,
++ SGTL5000_VAG_POWERUP, SGTL5000_VAG_POWERUP);
++ break;
++
++ case SND_SOC_DAPM_PRE_PMD:
++ /*
++ * Don't clear VAG_POWERUP, when both DAC and ADC are
++ * operational to prevent inadvertently starving the
++ * other one of them.
++ */
++ if ((snd_soc_read(w->codec, SGTL5000_CHIP_ANA_POWER) &
++ mask) != mask) {
++ snd_soc_update_bits(w->codec, SGTL5000_CHIP_ANA_POWER,
++ SGTL5000_VAG_POWERUP, 0);
++ msleep(400);
++ }
++ break;
++ default:
++ break;
++ }
++
++ return 0;
++}
++
++/* input sources for ADC */
++static const char *adc_mux_text[] = {
++ "MIC_IN", "LINE_IN"
++};
++
++static const struct soc_enum adc_enum =
++SOC_ENUM_SINGLE(SGTL5000_CHIP_ANA_CTRL, 2, 2, adc_mux_text);
++
++static const struct snd_kcontrol_new adc_mux =
++SOC_DAPM_ENUM("Capture Mux", adc_enum);
++
++/* input sources for DAC */
++static const char *dac_mux_text[] = {
++ "DAC", "LINE_IN"
++};
++
++static const struct soc_enum dac_enum =
++SOC_ENUM_SINGLE(SGTL5000_CHIP_ANA_CTRL, 6, 2, dac_mux_text);
++
++static const struct snd_kcontrol_new dac_mux =
++SOC_DAPM_ENUM("Headphone Mux", dac_enum);
++
++static const struct snd_soc_dapm_widget sgtl5000_dapm_widgets[] = {
++ SND_SOC_DAPM_INPUT("LINE_IN"),
++ SND_SOC_DAPM_INPUT("MIC_IN"),
++
++ SND_SOC_DAPM_OUTPUT("HP_OUT"),
++ SND_SOC_DAPM_OUTPUT("LINE_OUT"),
++
++ SND_SOC_DAPM_SUPPLY("Mic Bias", SGTL5000_CHIP_MIC_CTRL, 8, 0,
++ mic_bias_event,
++ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
++
++ SND_SOC_DAPM_PGA("HP", SGTL5000_CHIP_ANA_POWER, 4, 0, NULL, 0),
++ SND_SOC_DAPM_PGA("LO", SGTL5000_CHIP_ANA_POWER, 0, 0, NULL, 0),
++
++ SND_SOC_DAPM_MUX("Capture Mux", SND_SOC_NOPM, 0, 0, &adc_mux),
++ SND_SOC_DAPM_MUX("Headphone Mux", SND_SOC_NOPM, 0, 0, &dac_mux),
++
++ /* aif for i2s input */
++ SND_SOC_DAPM_AIF_IN("AIFIN", "Playback",
++ 0, SGTL5000_CHIP_DIG_POWER,
++ 0, 0),
++
++ /* aif for i2s output */
++ SND_SOC_DAPM_AIF_OUT("AIFOUT", "Capture",
++ 0, SGTL5000_CHIP_DIG_POWER,
++ 1, 0),
++
++ SND_SOC_DAPM_ADC("ADC", "Capture", SGTL5000_CHIP_ANA_POWER, 1, 0),
++ SND_SOC_DAPM_DAC("DAC", "Playback", SGTL5000_CHIP_ANA_POWER, 3, 0),
++
++ SND_SOC_DAPM_PRE("VAG_POWER_PRE", power_vag_event),
++ SND_SOC_DAPM_POST("VAG_POWER_POST", power_vag_event),
++};
++
++/* routes for sgtl5000 */
++static const struct snd_soc_dapm_route sgtl5000_dapm_routes[] = {
++ {"Capture Mux", "LINE_IN", "LINE_IN"}, /* line_in --> adc_mux */
++ {"Capture Mux", "MIC_IN", "MIC_IN"}, /* mic_in --> adc_mux */
++
++ {"ADC", NULL, "Capture Mux"}, /* adc_mux --> adc */
++ {"AIFOUT", NULL, "ADC"}, /* adc --> i2s_out */
++
++ {"DAC", NULL, "AIFIN"}, /* i2s-->dac,skip audio mux */
++ {"Headphone Mux", "DAC", "DAC"}, /* dac --> hp_mux */
++ {"LO", NULL, "DAC"}, /* dac --> line_out */
++
++ {"Headphone Mux", "LINE_IN", "LINE_IN"},/* line_in --> hp_mux */
++ {"HP", NULL, "Headphone Mux"}, /* hp_mux --> hp */
++
++ {"LINE_OUT", NULL, "LO"},
++ {"HP_OUT", NULL, "HP"},
++};
++
++/* custom function to fetch info of PCM playback volume */
++static int dac_info_volsw(struct snd_kcontrol *kcontrol,
++ struct snd_ctl_elem_info *uinfo)
++{
++ uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
++ uinfo->count = 2;
++ uinfo->value.integer.min = 0;
++ uinfo->value.integer.max = 0xfc - 0x3c;
++ return 0;
++}
++
++/*
++ * custom function to get of PCM playback volume
++ *
++ * dac volume register
++ * 15-------------8-7--------------0
++ * | R channel vol | L channel vol |
++ * -------------------------------
++ *
++ * PCM volume with 0.5017 dB steps from 0 to -90 dB
++ *
++ * register values map to dB
++ * 0x3B and less = Reserved
++ * 0x3C = 0 dB
++ * 0x3D = -0.5 dB
++ * 0xF0 = -90 dB
++ * 0xFC and greater = Muted
++ *
++ * register value map to userspace value
++ *
++ * register value 0x3c(0dB) 0xf0(-90dB)0xfc
++ * ------------------------------
++ * userspace value 0xc0 0
++ */
++static int dac_get_volsw(struct snd_kcontrol *kcontrol,
++ struct snd_ctl_elem_value *ucontrol)
++{
++ struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
++ int reg;
++ int l;
++ int r;
++
++ reg = snd_soc_read(codec, SGTL5000_CHIP_DAC_VOL);
++
++ /* get left channel volume */
++ l = (reg & SGTL5000_DAC_VOL_LEFT_MASK) >> SGTL5000_DAC_VOL_LEFT_SHIFT;
++
++ /* get right channel volume */
++ r = (reg & SGTL5000_DAC_VOL_RIGHT_MASK) >> SGTL5000_DAC_VOL_RIGHT_SHIFT;
++
++ /* make sure value fall in (0x3c,0xfc) */
++ l = clamp(l, 0x3c, 0xfc);
++ r = clamp(r, 0x3c, 0xfc);
++
++ /* invert it and map to userspace value */
++ l = 0xfc - l;
++ r = 0xfc - r;
++
++ ucontrol->value.integer.value[0] = l;
++ ucontrol->value.integer.value[1] = r;
++
++ return 0;
++}
++
++/*
++ * custom function to put of PCM playback volume
++ *
++ * dac volume register
++ * 15-------------8-7--------------0
++ * | R channel vol | L channel vol |
++ * -------------------------------
++ *
++ * PCM volume with 0.5017 dB steps from 0 to -90 dB
++ *
++ * register values map to dB
++ * 0x3B and less = Reserved
++ * 0x3C = 0 dB
++ * 0x3D = -0.5 dB
++ * 0xF0 = -90 dB
++ * 0xFC and greater = Muted
++ *
++ * userspace value map to register value
++ *
++ * userspace value 0xc0 0
++ * ------------------------------
++ * register value 0x3c(0dB) 0xf0(-90dB)0xfc
++ */
++static int dac_put_volsw(struct snd_kcontrol *kcontrol,
++ struct snd_ctl_elem_value *ucontrol)
++{
++ struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
++ int reg;
++ int l;
++ int r;
++
++ l = ucontrol->value.integer.value[0];
++ r = ucontrol->value.integer.value[1];
++
++ /* make sure userspace volume fall in (0, 0xfc-0x3c) */
++ l = clamp(l, 0, 0xfc - 0x3c);
++ r = clamp(r, 0, 0xfc - 0x3c);
++
++ /* invert it, get the value can be set to register */
++ l = 0xfc - l;
++ r = 0xfc - r;
++
++ /* shift to get the register value */
++ reg = l << SGTL5000_DAC_VOL_LEFT_SHIFT |
++ r << SGTL5000_DAC_VOL_RIGHT_SHIFT;
++
++ snd_soc_write(codec, SGTL5000_CHIP_DAC_VOL, reg);
++
++ return 0;
++}
++
++static const DECLARE_TLV_DB_SCALE(capture_6db_attenuate, -600, 600, 0);
++
++/* tlv for mic gain, 0db 20db 30db 40db */
++static const unsigned int mic_gain_tlv[] = {
++ TLV_DB_RANGE_HEAD(2),
++ 0, 0, TLV_DB_SCALE_ITEM(0, 0, 0),
++ 1, 3, TLV_DB_SCALE_ITEM(2000, 1000, 0),
++};
++
++/* tlv for hp volume, -51.5db to 12.0db, step .5db */
++static const DECLARE_TLV_DB_SCALE(headphone_volume, -5150, 50, 0);
++
++static const struct snd_kcontrol_new sgtl5000_snd_controls[] = {
++ /* SOC_DOUBLE_S8_TLV with invert */
++ {
++ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
++ .name = "PCM Playback Volume",
++ .access = SNDRV_CTL_ELEM_ACCESS_TLV_READ |
++ SNDRV_CTL_ELEM_ACCESS_READWRITE,
++ .info = dac_info_volsw,
++ .get = dac_get_volsw,
++ .put = dac_put_volsw,
++ },
++
++ SOC_DOUBLE("Capture Volume", SGTL5000_CHIP_ANA_ADC_CTRL, 0, 4, 0xf, 0),
++ SOC_SINGLE_TLV("Capture Attenuate Switch (-6dB)",
++ SGTL5000_CHIP_ANA_ADC_CTRL,
++ 8, 1, 0, capture_6db_attenuate),
++ SOC_SINGLE("Capture ZC Switch", SGTL5000_CHIP_ANA_CTRL, 1, 1, 0),
++
++ SOC_DOUBLE_TLV("Headphone Playback Volume",
++ SGTL5000_CHIP_ANA_HP_CTRL,
++ 0, 8,
++ 0x7f, 1,
++ headphone_volume),
++ SOC_SINGLE("Headphone Playback ZC Switch", SGTL5000_CHIP_ANA_CTRL,
++ 5, 1, 0),
++
++ SOC_SINGLE_TLV("Mic Volume", SGTL5000_CHIP_MIC_CTRL,
++ 0, 3, 0, mic_gain_tlv),
++};
++
++/* mute the codec used by alsa core */
++static int sgtl5000_digital_mute(struct snd_soc_dai *codec_dai, int mute)
++{
++ struct snd_soc_codec *codec = codec_dai->codec;
++ u16 adcdac_ctrl = SGTL5000_DAC_MUTE_LEFT | SGTL5000_DAC_MUTE_RIGHT;
++
++ snd_soc_update_bits(codec, SGTL5000_CHIP_ADCDAC_CTRL,
++ adcdac_ctrl, mute ? adcdac_ctrl : 0);
++
++ return 0;
++}
++
++/* set codec format */
++static int sgtl5000_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
++{
++ struct snd_soc_codec *codec = codec_dai->codec;
++ struct sgtl5000_priv *sgtl5000 = snd_soc_codec_get_drvdata(codec);
++ u16 i2sctl = 0;
++
++ sgtl5000->master = 0;
++ /*
++ * i2s clock and frame master setting.
++ * ONLY support:
++ * - clock and frame slave,
++ * - clock and frame master
++ */
++ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
++ case SND_SOC_DAIFMT_CBS_CFS:
++ break;
++ case SND_SOC_DAIFMT_CBM_CFM:
++ i2sctl |= SGTL5000_I2S_MASTER;
++ sgtl5000->master = 1;
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ /* setting i2s data format */
++ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
++ case SND_SOC_DAIFMT_DSP_A:
++ i2sctl |= SGTL5000_I2S_MODE_PCM;
++ break;
++ case SND_SOC_DAIFMT_DSP_B:
++ i2sctl |= SGTL5000_I2S_MODE_PCM;
++ i2sctl |= SGTL5000_I2S_LRALIGN;
++ break;
++ case SND_SOC_DAIFMT_I2S:
++ i2sctl |= SGTL5000_I2S_MODE_I2S_LJ;
++ break;
++ case SND_SOC_DAIFMT_RIGHT_J:
++ i2sctl |= SGTL5000_I2S_MODE_RJ;
++ i2sctl |= SGTL5000_I2S_LRPOL;
++ break;
++ case SND_SOC_DAIFMT_LEFT_J:
++ i2sctl |= SGTL5000_I2S_MODE_I2S_LJ;
++ i2sctl |= SGTL5000_I2S_LRALIGN;
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ sgtl5000->fmt = fmt & SND_SOC_DAIFMT_FORMAT_MASK;
++
++ /* Clock inversion */
++ switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
++ case SND_SOC_DAIFMT_NB_NF:
++ break;
++ case SND_SOC_DAIFMT_IB_NF:
++ i2sctl |= SGTL5000_I2S_SCLK_INV;
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ snd_soc_write(codec, SGTL5000_CHIP_I2S_CTRL, i2sctl);
++
++ return 0;
++}
++
++/* set codec sysclk */
++static int sgtl5000_set_dai_sysclk(struct snd_soc_dai *codec_dai,
++ int clk_id, unsigned int freq, int dir)
++{
++ struct snd_soc_codec *codec = codec_dai->codec;
++ struct sgtl5000_priv *sgtl5000 = snd_soc_codec_get_drvdata(codec);
++
++ switch (clk_id) {
++ case SGTL5000_SYSCLK:
++ sgtl5000->sysclk = freq;
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ return 0;
++}
++
++/*
++ * set clock according to i2s frame clock,
++ * sgtl5000 provide 2 clock sources.
++ * 1. sys_mclk. sample freq can only configure to
++ * 1/256, 1/384, 1/512 of sys_mclk.
++ * 2. pll. can derive any audio clocks.
++ *
++ * clock setting rules:
++ * 1. in slave mode, only sys_mclk can use.
++ * 2. as constraint by sys_mclk, sample freq should
++ * set to 32k, 44.1k and above.
++ * 3. using sys_mclk prefer to pll to save power.
++ */
++static int sgtl5000_set_clock(struct snd_soc_codec *codec, int frame_rate)
++{
++ struct sgtl5000_priv *sgtl5000 = snd_soc_codec_get_drvdata(codec);
++ int clk_ctl = 0;
++ int sys_fs; /* sample freq */
++
++ /*
++ * sample freq should be divided by frame clock,
++ * if frame clock lower than 44.1khz, sample feq should set to
++ * 32khz or 44.1khz.
++ */
++ switch (frame_rate) {
++ case 8000:
++ case 16000:
++ sys_fs = 32000;
++ break;
++ case 11025:
++ case 22050:
++ sys_fs = 44100;
++ break;
++ default:
++ sys_fs = frame_rate;
++ break;
++ }
++
++ /* set divided factor of frame clock */
++ switch (sys_fs / frame_rate) {
++ case 4:
++ clk_ctl |= SGTL5000_RATE_MODE_DIV_4 << SGTL5000_RATE_MODE_SHIFT;
++ break;
++ case 2:
++ clk_ctl |= SGTL5000_RATE_MODE_DIV_2 << SGTL5000_RATE_MODE_SHIFT;
++ break;
++ case 1:
++ clk_ctl |= SGTL5000_RATE_MODE_DIV_1 << SGTL5000_RATE_MODE_SHIFT;
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ /* set the sys_fs according to frame rate */
++ switch (sys_fs) {
++ case 32000:
++ clk_ctl |= SGTL5000_SYS_FS_32k << SGTL5000_SYS_FS_SHIFT;
++ break;
++ case 44100:
++ clk_ctl |= SGTL5000_SYS_FS_44_1k << SGTL5000_SYS_FS_SHIFT;
++ break;
++ case 48000:
++ clk_ctl |= SGTL5000_SYS_FS_48k << SGTL5000_SYS_FS_SHIFT;
++ break;
++ case 96000:
++ clk_ctl |= SGTL5000_SYS_FS_96k << SGTL5000_SYS_FS_SHIFT;
++ break;
++ default:
++ dev_err(codec->dev, "frame rate %d not supported\n",
++ frame_rate);
++ return -EINVAL;
++ }
++
++ /*
++ * calculate the divider of mclk/sample_freq,
++ * factor of freq =96k can only be 256, since mclk in range (12m,27m)
++ */
++ switch (sgtl5000->sysclk / sys_fs) {
++ case 256:
++ clk_ctl |= SGTL5000_MCLK_FREQ_256FS <<
++ SGTL5000_MCLK_FREQ_SHIFT;
++ break;
++ case 384:
++ clk_ctl |= SGTL5000_MCLK_FREQ_384FS <<
++ SGTL5000_MCLK_FREQ_SHIFT;
++ break;
++ case 512:
++ clk_ctl |= SGTL5000_MCLK_FREQ_512FS <<
++ SGTL5000_MCLK_FREQ_SHIFT;
++ break;
++ default:
++ /* if mclk not satisify the divider, use pll */
++ if (sgtl5000->master) {
++ clk_ctl |= SGTL5000_MCLK_FREQ_PLL <<
++ SGTL5000_MCLK_FREQ_SHIFT;
++ } else {
++ dev_err(codec->dev,
++ "PLL not supported in slave mode\n");
++ return -EINVAL;
++ }
++ }
++
++ /* if using pll, please check manual 6.4.2 for detail */
++ if ((clk_ctl & SGTL5000_MCLK_FREQ_MASK) == SGTL5000_MCLK_FREQ_PLL) {
++ u64 out, t;
++ int div2;
++ int pll_ctl;
++ unsigned int in, int_div, frac_div;
++
++ if (sgtl5000->sysclk > 17000000) {
++ div2 = 1;
++ in = sgtl5000->sysclk / 2;
++ } else {
++ div2 = 0;
++ in = sgtl5000->sysclk;
++ }
++ if (sys_fs == 44100)
++ out = 180633600;
++ else
++ out = 196608000;
++ t = do_div(out, in);
++ int_div = out;
++ t *= 2048;
++ do_div(t, in);
++ frac_div = t;
++ pll_ctl = int_div << SGTL5000_PLL_INT_DIV_SHIFT |
++ frac_div << SGTL5000_PLL_FRAC_DIV_SHIFT;
++
++ snd_soc_write(codec, SGTL5000_CHIP_PLL_CTRL, pll_ctl);
++ if (div2)
++ snd_soc_update_bits(codec,
++ SGTL5000_CHIP_CLK_TOP_CTRL,
++ SGTL5000_INPUT_FREQ_DIV2,
++ SGTL5000_INPUT_FREQ_DIV2);
++ else
++ snd_soc_update_bits(codec,
++ SGTL5000_CHIP_CLK_TOP_CTRL,
++ SGTL5000_INPUT_FREQ_DIV2,
++ 0);
++
++ /* power up pll */
++ snd_soc_update_bits(codec, SGTL5000_CHIP_ANA_POWER,
++ SGTL5000_PLL_POWERUP | SGTL5000_VCOAMP_POWERUP,
++ SGTL5000_PLL_POWERUP | SGTL5000_VCOAMP_POWERUP);
++
++ /* if using pll, clk_ctrl must be set after pll power up */
++ snd_soc_write(codec, SGTL5000_CHIP_CLK_CTRL, clk_ctl);
++ } else {
++ /* otherwise, clk_ctrl must be set before pll power down */
++ snd_soc_write(codec, SGTL5000_CHIP_CLK_CTRL, clk_ctl);
++
++ /* power down pll */
++ snd_soc_update_bits(codec, SGTL5000_CHIP_ANA_POWER,
++ SGTL5000_PLL_POWERUP | SGTL5000_VCOAMP_POWERUP,
++ 0);
++ }
++
++ return 0;
++}
++
++/*
++ * Set PCM DAI bit size and sample rate.
++ * input: params_rate, params_fmt
++ */
++static int sgtl5000_pcm_hw_params(struct snd_pcm_substream *substream,
++ struct snd_pcm_hw_params *params,
++ struct snd_soc_dai *dai)
++{
++ struct snd_soc_codec *codec = dai->codec;
++ struct sgtl5000_priv *sgtl5000 = snd_soc_codec_get_drvdata(codec);
++ int channels = params_channels(params);
++ int i2s_ctl = 0;
++ int stereo;
++ int ret;
++
++ /* sysclk should already set */
++ if (!sgtl5000->sysclk) {
++ dev_err(codec->dev, "%s: set sysclk first!\n", __func__);
++ return -EFAULT;
++ }
++
++ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
++ stereo = SGTL5000_DAC_STEREO;
++ else
++ stereo = SGTL5000_ADC_STEREO;
++
++ /* set mono to save power */
++ snd_soc_update_bits(codec, SGTL5000_CHIP_ANA_POWER, stereo,
++ channels == 1 ? 0 : stereo);
++
++ /* set codec clock base on lrclk */
++ ret = sgtl5000_set_clock(codec, params_rate(params));
++ if (ret)
++ return ret;
++
++ /* set i2s data format */
++ switch (params_format(params)) {
++ case SNDRV_PCM_FORMAT_S16_LE:
++ if (sgtl5000->fmt == SND_SOC_DAIFMT_RIGHT_J)
++ return -EINVAL;
++ i2s_ctl |= SGTL5000_I2S_DLEN_16 << SGTL5000_I2S_DLEN_SHIFT;
++ i2s_ctl |= SGTL5000_I2S_SCLKFREQ_32FS <<
++ SGTL5000_I2S_SCLKFREQ_SHIFT;
++ break;
++ case SNDRV_PCM_FORMAT_S20_3LE:
++ i2s_ctl |= SGTL5000_I2S_DLEN_20 << SGTL5000_I2S_DLEN_SHIFT;
++ i2s_ctl |= SGTL5000_I2S_SCLKFREQ_64FS <<
++ SGTL5000_I2S_SCLKFREQ_SHIFT;
++ break;
++ case SNDRV_PCM_FORMAT_S24_LE:
++ i2s_ctl |= SGTL5000_I2S_DLEN_24 << SGTL5000_I2S_DLEN_SHIFT;
++ i2s_ctl |= SGTL5000_I2S_SCLKFREQ_64FS <<
++ SGTL5000_I2S_SCLKFREQ_SHIFT;
++ break;
++ case SNDRV_PCM_FORMAT_S32_LE:
++ if (sgtl5000->fmt == SND_SOC_DAIFMT_RIGHT_J)
++ return -EINVAL;
++ i2s_ctl |= SGTL5000_I2S_DLEN_32 << SGTL5000_I2S_DLEN_SHIFT;
++ i2s_ctl |= SGTL5000_I2S_SCLKFREQ_64FS <<
++ SGTL5000_I2S_SCLKFREQ_SHIFT;
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ snd_soc_update_bits(codec, SGTL5000_CHIP_I2S_CTRL,
++ SGTL5000_I2S_DLEN_MASK | SGTL5000_I2S_SCLKFREQ_MASK,
++ i2s_ctl);
++
++ return 0;
++}
++
++#ifdef CONFIG_REGULATOR
++static int ldo_regulator_is_enabled(struct regulator_dev *dev)
++{
++ struct ldo_regulator *ldo = rdev_get_drvdata(dev);
++
++ return ldo->enabled;
++}
++
++static int ldo_regulator_enable(struct regulator_dev *dev)
++{
++ struct ldo_regulator *ldo = rdev_get_drvdata(dev);
++ struct snd_soc_codec *codec = (struct snd_soc_codec *)ldo->codec_data;
++ int reg;
++dev_info(codec->dev, "%s(): enabled %u\n", __func__, ldo->enabled);
++ if (ldo_regulator_is_enabled(dev))
++ return 0;
++
++ /* set regulator value firstly */
++ reg = (1600 - ldo->voltage / 1000) / 50;
++ reg = clamp(reg, 0x0, 0xf);
++
++ /* amend the voltage value, unit: uV */
++ ldo->voltage = (1600 - reg * 50) * 1000;
++
++ /* set voltage to register */
++ snd_soc_update_bits(codec, SGTL5000_CHIP_LINREG_CTRL,
++ SGTL5000_LINREG_VDDD_MASK, reg);
++
++ snd_soc_update_bits(codec, SGTL5000_CHIP_ANA_POWER,
++ SGTL5000_LINEREG_D_POWERUP,
++ SGTL5000_LINEREG_D_POWERUP);
++
++ /* when internal ldo enabled, simple digital power can be disabled */
++ snd_soc_update_bits(codec, SGTL5000_CHIP_ANA_POWER,
++ SGTL5000_LINREG_SIMPLE_POWERUP,
++ 0);
++
++ ldo->enabled = 1;
++ return 0;
++}
++
++static int ldo_regulator_disable(struct regulator_dev *dev)
++{
++ struct ldo_regulator *ldo = rdev_get_drvdata(dev);
++ struct snd_soc_codec *codec = (struct snd_soc_codec *)ldo->codec_data;
++dev_info(codec->dev, "%s(): enabled %u\n", __func__, ldo->enabled);
++
++ snd_soc_update_bits(codec, SGTL5000_CHIP_ANA_POWER,
++ SGTL5000_LINREG_SIMPLE_POWERUP,
++ SGTL5000_LINREG_SIMPLE_POWERUP);
++
++ snd_soc_update_bits(codec, SGTL5000_CHIP_ANA_POWER,
++ SGTL5000_LINEREG_D_POWERUP,
++ 0);
++dev_info(codec->dev, "%s: ANA_POWER = 0x%04x\n", __func__, snd_soc_read(codec, SGTL5000_CHIP_ANA_POWER));
++
++ /* clear voltage info */
++ snd_soc_update_bits(codec, SGTL5000_CHIP_LINREG_CTRL,
++ SGTL5000_LINREG_VDDD_MASK, 0);
++
++ ldo->enabled = 0;
++
++ return 0;
++}
++
++static int ldo_regulator_get_voltage(struct regulator_dev *dev)
++{
++ struct ldo_regulator *ldo = rdev_get_drvdata(dev);
++
++ return ldo->voltage;
++}
++
++static struct regulator_ops ldo_regulator_ops = {
++ .is_enabled = ldo_regulator_is_enabled,
++ .enable = ldo_regulator_enable,
++ .disable = ldo_regulator_disable,
++ .get_voltage = ldo_regulator_get_voltage,
++};
++
++static int ldo_regulator_register(struct snd_soc_codec *codec,
++ struct regulator_init_data *init_data,
++ int voltage)
++{
++ struct ldo_regulator *ldo;
++ struct sgtl5000_priv *sgtl5000 = snd_soc_codec_get_drvdata(codec);
++ struct regulator_config config = { };
++
++ ldo = kzalloc(sizeof(struct ldo_regulator), GFP_KERNEL);
++
++ if (!ldo) {
++ dev_err(codec->dev, "failed to allocate ldo_regulator\n");
++ return -ENOMEM;
++ }
++
++ ldo->desc.name = kstrdup(dev_name(codec->dev), GFP_KERNEL);
++ if (!ldo->desc.name) {
++ kfree(ldo);
++ dev_err(codec->dev, "failed to allocate decs name memory\n");
++ return -ENOMEM;
++ }
++
++ ldo->desc.type = REGULATOR_VOLTAGE;
++ ldo->desc.owner = THIS_MODULE;
++ ldo->desc.ops = &ldo_regulator_ops;
++ ldo->desc.n_voltages = 1;
++
++ ldo->codec_data = codec;
++ ldo->voltage = voltage;
++
++ config.dev = codec->dev;
++ config.driver_data = ldo;
++ config.init_data = init_data;
++ config.ena_gpio = -EINVAL;
++
++ ldo->dev = regulator_register(&ldo->desc, &config);
++ if (IS_ERR(ldo->dev)) {
++ int ret = PTR_ERR(ldo->dev);
++
++ dev_err(codec->dev, "failed to register regulator\n");
++ kfree(ldo->desc.name);
++ kfree(ldo);
++
++ return ret;
++ }
++ sgtl5000->ldo = ldo;
++
++ return 0;
++}
++
++static int ldo_regulator_remove(struct snd_soc_codec *codec)
++{
++ struct sgtl5000_priv *sgtl5000 = snd_soc_codec_get_drvdata(codec);
++ struct ldo_regulator *ldo = sgtl5000->ldo;
++
++ if (!ldo)
++ return 0;
++
++ regulator_unregister(ldo->dev);
++ kfree(ldo->desc.name);
++ kfree(ldo);
++
++ return 0;
++}
++#else
++static int ldo_regulator_register(struct snd_soc_codec *codec,
++ struct regulator_init_data *init_data,
++ int voltage)
++{
++ dev_err(codec->dev, "this setup needs regulator support in the kernel\n");
++ return -EINVAL;
++}
++
++static int ldo_regulator_remove(struct snd_soc_codec *codec)
++{
++ return 0;
++}
++#endif
++
++/*
++ * set dac bias
++ * common state changes:
++ * startup:
++ * off --> standby --> prepare --> on
++ * standby --> prepare --> on
++ *
++ * stop:
++ * on --> prepare --> standby
++ */
++static int sgtl5000_set_bias_level(struct snd_soc_codec *codec,
++ enum snd_soc_bias_level level)
++{
++ int ret;
++ struct sgtl5000_priv *sgtl5000 = snd_soc_codec_get_drvdata(codec);
++
++ switch (level) {
++ case SND_SOC_BIAS_ON:
++ case SND_SOC_BIAS_PREPARE:
++ break;
++ case SND_SOC_BIAS_STANDBY:
++ if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
++ ret = regulator_bulk_enable(
++ ARRAY_SIZE(sgtl5000->supplies),
++ sgtl5000->supplies);
++ if (ret)
++ return ret;
++ udelay(10);
++
++ regcache_cache_only(sgtl5000->regmap, false);
++
++ ret = regcache_sync(sgtl5000->regmap);
++ if (ret != 0) {
++ dev_err(codec->dev,
++ "Failed to restore cache: %d\n", ret);
++
++ regcache_cache_only(sgtl5000->regmap, true);
++ regulator_bulk_disable(ARRAY_SIZE(sgtl5000->supplies),
++ sgtl5000->supplies);
++
++ return ret;
++ }
++ }
++
++ break;
++ case SND_SOC_BIAS_OFF:
++ regcache_cache_only(sgtl5000->regmap, true);
++ regulator_bulk_disable(ARRAY_SIZE(sgtl5000->supplies),
++ sgtl5000->supplies);
++ break;
++ }
++
++ codec->dapm.bias_level = level;
++ return 0;
++}
++
++#define SGTL5000_FORMATS (SNDRV_PCM_FMTBIT_S16_LE |\
++ SNDRV_PCM_FMTBIT_S20_3LE |\
++ SNDRV_PCM_FMTBIT_S24_LE |\
++ SNDRV_PCM_FMTBIT_S32_LE)
++
++static const struct snd_soc_dai_ops sgtl5000_ops = {
++ .hw_params = sgtl5000_pcm_hw_params,
++ .digital_mute = sgtl5000_digital_mute,
++ .set_fmt = sgtl5000_set_dai_fmt,
++ .set_sysclk = sgtl5000_set_dai_sysclk,
++};
++
++static struct snd_soc_dai_driver sgtl5000_dai = {
++ .name = "sgtl5000",
++ .playback = {
++ .stream_name = "Playback",
++ .channels_min = 1,
++ .channels_max = 2,
++ /*
++ * only support 8~48K + 96K,
++ * TODO modify hw_param to support more
++ */
++ .rates = SNDRV_PCM_RATE_8000_48000 | SNDRV_PCM_RATE_96000,
++ .formats = SGTL5000_FORMATS,
++ },
++ .capture = {
++ .stream_name = "Capture",
++ .channels_min = 1,
++ .channels_max = 2,
++ .rates = SNDRV_PCM_RATE_8000_48000 | SNDRV_PCM_RATE_96000,
++ .formats = SGTL5000_FORMATS,
++ },
++ .ops = &sgtl5000_ops,
++ .symmetric_rates = 1,
++};
++
++static bool sgtl5000_volatile(struct device *dev, unsigned int reg)
++{
++ switch (reg) {
++ case SGTL5000_CHIP_ID:
++ case SGTL5000_CHIP_ADCDAC_CTRL:
++ case SGTL5000_CHIP_ANA_STATUS:
++ return true;
++ }
++
++ return false;
++}
++
++static bool sgtl5000_readable(struct device *dev, unsigned int reg)
++{
++ switch (reg) {
++ case SGTL5000_CHIP_ID:
++ case SGTL5000_CHIP_DIG_POWER:
++ case SGTL5000_CHIP_CLK_CTRL:
++ case SGTL5000_CHIP_I2S_CTRL:
++ case SGTL5000_CHIP_SSS_CTRL:
++ case SGTL5000_CHIP_ADCDAC_CTRL:
++ case SGTL5000_CHIP_DAC_VOL:
++ case SGTL5000_CHIP_PAD_STRENGTH:
++ case SGTL5000_CHIP_ANA_ADC_CTRL:
++ case SGTL5000_CHIP_ANA_HP_CTRL:
++ case SGTL5000_CHIP_ANA_CTRL:
++ case SGTL5000_CHIP_LINREG_CTRL:
++ case SGTL5000_CHIP_REF_CTRL:
++ case SGTL5000_CHIP_MIC_CTRL:
++ case SGTL5000_CHIP_LINE_OUT_CTRL:
++ case SGTL5000_CHIP_LINE_OUT_VOL:
++ case SGTL5000_CHIP_ANA_POWER:
++ case SGTL5000_CHIP_PLL_CTRL:
++ case SGTL5000_CHIP_CLK_TOP_CTRL:
++ case SGTL5000_CHIP_ANA_STATUS:
++ case SGTL5000_CHIP_SHORT_CTRL:
++ case SGTL5000_CHIP_ANA_TEST2:
++ case SGTL5000_DAP_CTRL:
++ case SGTL5000_DAP_PEQ:
++ case SGTL5000_DAP_BASS_ENHANCE:
++ case SGTL5000_DAP_BASS_ENHANCE_CTRL:
++ case SGTL5000_DAP_AUDIO_EQ:
++ case SGTL5000_DAP_SURROUND:
++ case SGTL5000_DAP_FLT_COEF_ACCESS:
++ case SGTL5000_DAP_COEF_WR_B0_MSB:
++ case SGTL5000_DAP_COEF_WR_B0_LSB:
++ case SGTL5000_DAP_EQ_BASS_BAND0:
++ case SGTL5000_DAP_EQ_BASS_BAND1:
++ case SGTL5000_DAP_EQ_BASS_BAND2:
++ case SGTL5000_DAP_EQ_BASS_BAND3:
++ case SGTL5000_DAP_EQ_BASS_BAND4:
++ case SGTL5000_DAP_MAIN_CHAN:
++ case SGTL5000_DAP_MIX_CHAN:
++ case SGTL5000_DAP_AVC_CTRL:
++ case SGTL5000_DAP_AVC_THRESHOLD:
++ case SGTL5000_DAP_AVC_ATTACK:
++ case SGTL5000_DAP_AVC_DECAY:
++ case SGTL5000_DAP_COEF_WR_B1_MSB:
++ case SGTL5000_DAP_COEF_WR_B1_LSB:
++ case SGTL5000_DAP_COEF_WR_B2_MSB:
++ case SGTL5000_DAP_COEF_WR_B2_LSB:
++ case SGTL5000_DAP_COEF_WR_A1_MSB:
++ case SGTL5000_DAP_COEF_WR_A1_LSB:
++ case SGTL5000_DAP_COEF_WR_A2_MSB:
++ case SGTL5000_DAP_COEF_WR_A2_LSB:
++ return true;
++
++ default:
++ return false;
++ }
++}
++
++#ifdef CONFIG_SUSPEND
++static int sgtl5000_suspend(struct snd_soc_codec *codec)
++{
++ sgtl5000_set_bias_level(codec, SND_SOC_BIAS_OFF);
++
++ return 0;
++}
++
++/*
++ * restore all sgtl5000 registers,
++ * since a big hole between dap and regular registers,
++ * we will restore them respectively.
++ */
++static int sgtl5000_restore_regs(struct snd_soc_codec *codec)
++{
++ u16 *cache = codec->reg_cache;
++ u16 reg;
++
++ /* restore regular registers */
++ for (reg = 0; reg <= SGTL5000_CHIP_SHORT_CTRL; reg += 2) {
++
++ /* These regs should restore in particular order */
++ if (reg == SGTL5000_CHIP_ANA_POWER ||
++ reg == SGTL5000_CHIP_CLK_CTRL ||
++ reg == SGTL5000_CHIP_LINREG_CTRL ||
++ reg == SGTL5000_CHIP_LINE_OUT_CTRL ||
++ reg == SGTL5000_CHIP_REF_CTRL)
++ continue;
++
++ snd_soc_write(codec, reg, cache[reg]);
++ }
++
++ /* restore dap registers */
++ for (reg = SGTL5000_DAP_REG_OFFSET; reg < SGTL5000_MAX_REG_OFFSET; reg += 2)
++ snd_soc_write(codec, reg, cache[reg]);
++
++ /*
++ * restore these regs according to the power setting sequence in
++ * sgtl5000_set_power_regs() and clock setting sequence in
++ * sgtl5000_set_clock().
++ *
++ * The order of restore is:
++ * 1. SGTL5000_CHIP_CLK_CTRL MCLK_FREQ bits (1:0) should be restore after
++ * SGTL5000_CHIP_ANA_POWER PLL bits set
++ * 2. SGTL5000_CHIP_LINREG_CTRL should be set before
++ * SGTL5000_CHIP_ANA_POWER LINREG_D restored
++ * 3. SGTL5000_CHIP_REF_CTRL controls Analog Ground Voltage,
++ * prefer to resotre it after SGTL5000_CHIP_ANA_POWER restored
++ */
++ snd_soc_write(codec, SGTL5000_CHIP_LINREG_CTRL,
++ cache[SGTL5000_CHIP_LINREG_CTRL]);
++
++ snd_soc_write(codec, SGTL5000_CHIP_ANA_POWER,
++ cache[SGTL5000_CHIP_ANA_POWER]);
++
++ snd_soc_write(codec, SGTL5000_CHIP_CLK_CTRL,
++ cache[SGTL5000_CHIP_CLK_CTRL]);
++
++ snd_soc_write(codec, SGTL5000_CHIP_REF_CTRL,
++ cache[SGTL5000_CHIP_REF_CTRL]);
++
++ snd_soc_write(codec, SGTL5000_CHIP_LINE_OUT_CTRL,
++ cache[SGTL5000_CHIP_LINE_OUT_CTRL]);
++ return 0;
++}
++
++static int sgtl5000_resume(struct snd_soc_codec *codec)
++{
++ /* Bring the codec back up to standby to enable regulators */
++ sgtl5000_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
++
++ /* Restore registers by cached in memory */
++ sgtl5000_restore_regs(codec);
++ return 0;
++}
++#else
++#define sgtl5000_suspend NULL
++#define sgtl5000_resume NULL
++#endif /* CONFIG_SUSPEND */
++
++/*
++ * sgtl5000 has 3 internal power supplies:
++ * 1. VAG, normally set to vdda/2
++ * 2. chargepump, set to different value
++ * according to voltage of vdda and vddio
++ * 3. line out VAG, normally set to vddio/2
++ *
++ * and should be set according to:
++ * 1. vddd provided by external or not
++ * 2. vdda and vddio voltage value. > 3.1v or not
++ * 3. chip revision >=0x11 or not. If >=0x11, not use external vddd.
++ */
++static int sgtl5000_set_power_regs(struct snd_soc_codec *codec)
++{
++ int vddd;
++ int vdda;
++ int vddio;
++ u16 ana_pwr;
++ u16 lreg_ctrl;
++ int vag;
++ struct sgtl5000_priv *sgtl5000 = snd_soc_codec_get_drvdata(codec);
++
++ vdda = regulator_get_voltage(sgtl5000->supplies[VDDA].consumer);
++ vddio = regulator_get_voltage(sgtl5000->supplies[VDDIO].consumer);
++ vddd = regulator_get_voltage(sgtl5000->supplies[VDDD].consumer);
++
++ vdda = vdda / 1000;
++ vddio = vddio / 1000;
++ vddd = vddd / 1000;
++
++ if (vdda <= 0 || vddio <= 0 || vddd < 0) {
++ dev_err(codec->dev, "regulator voltage not set correctly\n");
++
++ return -EINVAL;
++ }
++
++ /* according to datasheet, maximum voltage of supplies */
++ if (vdda > 3600 || vddio > 3600 || vddd > 1980) {
++ dev_err(codec->dev,
++ "exceed max voltage vdda %dmV vddio %dmV vddd %dmV\n",
++ vdda, vddio, vddd);
++
++ return -EINVAL;
++ }
++
++ /* reset value */
++ ana_pwr = snd_soc_read(codec, SGTL5000_CHIP_ANA_POWER);
++ ana_pwr |= SGTL5000_DAC_STEREO |
++ SGTL5000_ADC_STEREO |
++ SGTL5000_REFTOP_POWERUP;
++ lreg_ctrl = snd_soc_read(codec, SGTL5000_CHIP_LINREG_CTRL);
++
++ if (vddio < 3100 && vdda < 3100) {
++ /* enable internal oscillator used for charge pump */
++ snd_soc_update_bits(codec, SGTL5000_CHIP_CLK_TOP_CTRL,
++ SGTL5000_INT_OSC_EN,
++ SGTL5000_INT_OSC_EN);
++ /* Enable VDDC charge pump */
++ ana_pwr |= SGTL5000_VDDC_CHRGPMP_POWERUP;
++ } else if (vddio >= 3100 && vdda >= 3100) {
++ /*
++ * if vddio and vddd > 3.1v,
++ * charge pump should be clean before set ana_pwr
++ */
++// FIXME: this is total crap - we have read this register above into
++// ana_pwr, which we then modify (above), and then write back to the
++// register below. This modification just gets completely overwritten.
++// snd_soc_update_bits(codec, SGTL5000_CHIP_ANA_POWER,
++// SGTL5000_VDDC_CHRGPMP_POWERUP, 0);
++
++ /* VDDC use VDDIO rail */
++ lreg_ctrl |= SGTL5000_VDDC_ASSN_OVRD;
++ lreg_ctrl |= SGTL5000_VDDC_MAN_ASSN_VDDIO <<
++ SGTL5000_VDDC_MAN_ASSN_SHIFT;
++ }
++
++ snd_soc_write(codec, SGTL5000_CHIP_LINREG_CTRL, lreg_ctrl);
++
++ snd_soc_write(codec, SGTL5000_CHIP_ANA_POWER, ana_pwr);
++
++ /* set voltage to register */
++ snd_soc_update_bits(codec, SGTL5000_CHIP_LINREG_CTRL,
++ SGTL5000_LINREG_VDDD_MASK, 0x8);
++
++ /*
++ * if vddd linear reg has been enabled,
++ * simple digital supply should be clear to get
++ * proper VDDD voltage.
++ */
++ if (ana_pwr & SGTL5000_LINEREG_D_POWERUP)
++ snd_soc_update_bits(codec, SGTL5000_CHIP_ANA_POWER,
++ SGTL5000_LINREG_SIMPLE_POWERUP,
++ 0);
++ else
++ snd_soc_update_bits(codec, SGTL5000_CHIP_ANA_POWER,
++ SGTL5000_LINREG_SIMPLE_POWERUP |
++ SGTL5000_STARTUP_POWERUP,
++ 0);
++
++ /*
++ * set ADC/DAC VAG to vdda / 2,
++ * should stay in range (0.8v, 1.575v)
++ */
++ vag = vdda / 2;
++ if (vag <= SGTL5000_ANA_GND_BASE)
++ vag = 0;
++ else if (vag >= SGTL5000_ANA_GND_BASE + SGTL5000_ANA_GND_STP *
++ (SGTL5000_ANA_GND_MASK >> SGTL5000_ANA_GND_SHIFT))
++ vag = SGTL5000_ANA_GND_MASK >> SGTL5000_ANA_GND_SHIFT;
++ else
++ vag = (vag - SGTL5000_ANA_GND_BASE) / SGTL5000_ANA_GND_STP;
++
++ snd_soc_update_bits(codec, SGTL5000_CHIP_REF_CTRL,
++ SGTL5000_ANA_GND_MASK, vag << SGTL5000_ANA_GND_SHIFT);
++
++ /* set line out VAG to vddio / 2, in range (0.8v, 1.675v) */
++ vag = vddio / 2;
++ if (vag <= SGTL5000_LINE_OUT_GND_BASE)
++ vag = 0;
++ else if (vag >= SGTL5000_LINE_OUT_GND_BASE +
++ SGTL5000_LINE_OUT_GND_STP * SGTL5000_LINE_OUT_GND_MAX)
++ vag = SGTL5000_LINE_OUT_GND_MAX;
++ else
++ vag = (vag - SGTL5000_LINE_OUT_GND_BASE) /
++ SGTL5000_LINE_OUT_GND_STP;
++
++ snd_soc_update_bits(codec, SGTL5000_CHIP_LINE_OUT_CTRL,
++ SGTL5000_LINE_OUT_CURRENT_MASK |
++ SGTL5000_LINE_OUT_GND_MASK,
++ vag << SGTL5000_LINE_OUT_GND_SHIFT |
++ SGTL5000_LINE_OUT_CURRENT_360u <<
++ SGTL5000_LINE_OUT_CURRENT_SHIFT);
++
++ return 0;
++}
++
++static int sgtl5000_replace_vddd_with_ldo(struct snd_soc_codec *codec)
++{
++ struct sgtl5000_priv *sgtl5000 = snd_soc_codec_get_drvdata(codec);
++ int ret;
++
++ /* set internal ldo to 1.2v */
++ ret = ldo_regulator_register(codec, &ldo_init_data, LDO_VOLTAGE);
++ if (ret) {
++ dev_err(codec->dev,
++ "Failed to register vddd internal supplies: %d\n", ret);
++ return ret;
++ }
++
++ sgtl5000->supplies[VDDD].supply = LDO_CONSUMER_NAME;
++
++ dev_info(codec->dev, "Using internal LDO instead of VDDD\n");
++ return 0;
++}
++
++static int sgtl5000_enable_regulators(struct snd_soc_codec *codec)
++{
++ int ret;
++ int i;
++ int external_vddd = 0;
++ struct sgtl5000_priv *sgtl5000 = snd_soc_codec_get_drvdata(codec);
++ struct regulator *vddd;
++
++ for (i = 0; i < ARRAY_SIZE(sgtl5000->supplies); i++)
++ sgtl5000->supplies[i].supply = supply_names[i];
++
++ /* External VDDD only works before revision 0x11 */
++ if (sgtl5000->revision < 0x11) {
++ vddd = regulator_get_optional(codec->dev, "VDDD");
++ if (IS_ERR(vddd)) {
++ /* See if it's just not registered yet */
++ if (PTR_ERR(vddd) == -EPROBE_DEFER)
++ return -EPROBE_DEFER;
++ } else {
++ external_vddd = 1;
++ regulator_put(vddd);
++ }
++ }
++
++ if (!external_vddd) {
++ ret = sgtl5000_replace_vddd_with_ldo(codec);
++ if (ret)
++ return ret;
++ }
++
++ ret = devm_regulator_bulk_get(codec->dev, ARRAY_SIZE(sgtl5000->supplies),
++ sgtl5000->supplies);
++ if (ret)
++ goto err_ldo_remove;
++
++ ret = regulator_bulk_enable(ARRAY_SIZE(sgtl5000->supplies),
++ sgtl5000->supplies);
++ if (ret)
++ goto err_ldo_remove;
++
++ /* wait for all power rails bring up */
++ udelay(10);
++
++ return 0;
++
++err_ldo_remove:
++ if (!external_vddd)
++ ldo_regulator_remove(codec);
++ return ret;
++
++}
++
++static int sgtl5000_probe(struct snd_soc_codec *codec)
++{
++ int ret;
++ struct sgtl5000_priv *sgtl5000 = snd_soc_codec_get_drvdata(codec);
++
++ /* setup i2c data ops */
++ codec->control_data = sgtl5000->regmap;
++ ret = snd_soc_codec_set_cache_io(codec, 16, 16, SND_SOC_REGMAP);
++ if (ret < 0) {
++ dev_err(codec->dev, "Failed to set cache I/O: %d\n", ret);
++ return ret;
++ }
++
++ if (!devres_open_group(codec->dev, NULL, GFP_KERNEL))
++ return -ENOMEM;
++
++ ret = sgtl5000_enable_regulators(codec);
++ if (ret)
++ return ret;
++
++ /* power up sgtl5000 */
++ ret = sgtl5000_set_power_regs(codec);
++ if (ret)
++ goto err;
++
++ /* enable small pop, introduce 400ms delay in turning off */
++ snd_soc_update_bits(codec, SGTL5000_CHIP_REF_CTRL,
++ SGTL5000_SMALL_POP, 1);
++
++ /* disable short cut detector */
++ snd_soc_write(codec, SGTL5000_CHIP_SHORT_CTRL, 0);
++
++ /*
++ * set i2s as default input of sound switch
++ * TODO: add sound switch to control and dapm widge.
++ */
++ snd_soc_write(codec, SGTL5000_CHIP_SSS_CTRL,
++ SGTL5000_DAC_SEL_I2S_IN << SGTL5000_DAC_SEL_SHIFT);
++ snd_soc_write(codec, SGTL5000_CHIP_DIG_POWER,
++ SGTL5000_ADC_EN | SGTL5000_DAC_EN);
++
++ /* enable dac volume ramp by default */
++ snd_soc_write(codec, SGTL5000_CHIP_ADCDAC_CTRL,
++ SGTL5000_DAC_VOL_RAMP_EN |
++ SGTL5000_DAC_MUTE_RIGHT |
++ SGTL5000_DAC_MUTE_LEFT);
++
++ snd_soc_write(codec, SGTL5000_CHIP_PAD_STRENGTH, 0x015f);
++
++ snd_soc_write(codec, SGTL5000_CHIP_ANA_CTRL,
++ SGTL5000_HP_ZCD_EN |
++ SGTL5000_ADC_ZCD_EN);
++
++ snd_soc_write(codec, SGTL5000_CHIP_MIC_CTRL, 2);
++
++ /*
++ * disable DAP
++ * TODO:
++ * Enable DAP in kcontrol and dapm.
++ */
++ snd_soc_write(codec, SGTL5000_DAP_CTRL, 0);
++
++ /* leading to standby state */
++ ret = sgtl5000_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
++ if (ret)
++ goto err;
++
++ return 0;
++
++err:
++ regulator_bulk_disable(ARRAY_SIZE(sgtl5000->supplies),
++ sgtl5000->supplies);
++
++ devres_release_group(codec->dev, NULL);
++
++ ldo_regulator_remove(codec);
++
++ return ret;
++}
++
++static int sgtl5000_remove(struct snd_soc_codec *codec)
++{
++ struct sgtl5000_priv *sgtl5000 = snd_soc_codec_get_drvdata(codec);
++
++ sgtl5000_set_bias_level(codec, SND_SOC_BIAS_OFF);
++
++ regulator_bulk_disable(ARRAY_SIZE(sgtl5000->supplies),
++ sgtl5000->supplies);
++
++ devres_release_group(codec->dev, NULL);
++
++ ldo_regulator_remove(codec);
++
++ return 0;
++}
++
++static struct snd_soc_codec_driver sgtl5000_driver = {
++ .probe = sgtl5000_probe,
++ .remove = sgtl5000_remove,
++ .suspend = sgtl5000_suspend,
++ .resume = sgtl5000_resume,
++ .set_bias_level = sgtl5000_set_bias_level,
++ .controls = sgtl5000_snd_controls,
++ .num_controls = ARRAY_SIZE(sgtl5000_snd_controls),
++ .dapm_widgets = sgtl5000_dapm_widgets,
++ .num_dapm_widgets = ARRAY_SIZE(sgtl5000_dapm_widgets),
++ .dapm_routes = sgtl5000_dapm_routes,
++ .num_dapm_routes = ARRAY_SIZE(sgtl5000_dapm_routes),
++};
++
++static const struct regmap_config sgtl5000_regmap = {
++ .reg_bits = 16,
++ .val_bits = 16,
++ .reg_stride = 2,
++
++ .max_register = SGTL5000_MAX_REG_OFFSET,
++ .volatile_reg = sgtl5000_volatile,
++ .readable_reg = sgtl5000_readable,
++
++ .cache_type = REGCACHE_RBTREE,
++ .reg_defaults = sgtl5000_reg_defaults,
++ .num_reg_defaults = ARRAY_SIZE(sgtl5000_reg_defaults),
++};
++
++/*
++ * Write all the default values from sgtl5000_reg_defaults[] array into the
++ * sgtl5000 registers, to make sure we always start with the sane registers
++ * values as stated in the datasheet.
++ *
++ * Since sgtl5000 does not have a reset line, nor a reset command in software,
++ * we follow this approach to guarantee we always start from the default values
++ * and avoid problems like, not being able to probe after an audio playback
++ * followed by a system reset or a 'reboot' command in Linux
++ */
++static int sgtl5000_fill_defaults(struct sgtl5000_priv *sgtl5000)
++{
++ int i, ret, val, index;
++
++ for (i = 0; i < ARRAY_SIZE(sgtl5000_reg_defaults); i++) {
++ val = sgtl5000_reg_defaults[i].def;
++ index = sgtl5000_reg_defaults[i].reg;
++ ret = regmap_write(sgtl5000->regmap, index, val);
++ if (ret)
++ return ret;
++ }
++
++ return 0;
++}
++
++static int sgtl5000_i2c_probe(struct i2c_client *client,
++ const struct i2c_device_id *id)
++{
++ struct sgtl5000_priv *sgtl5000;
++ int ret, reg, rev;
++
++ sgtl5000 = devm_kzalloc(&client->dev, sizeof(struct sgtl5000_priv),
++ GFP_KERNEL);
++ if (!sgtl5000)
++ return -ENOMEM;
++
++ sgtl5000->regmap = devm_regmap_init_i2c(client, &sgtl5000_regmap);
++ if (IS_ERR(sgtl5000->regmap)) {
++ ret = PTR_ERR(sgtl5000->regmap);
++ dev_err(&client->dev, "Failed to allocate regmap: %d\n", ret);
++ return ret;
++ }
++
++ sgtl5000->mclk = devm_clk_get(&client->dev, NULL);
++ if (IS_ERR(sgtl5000->mclk)) {
++ ret = PTR_ERR(sgtl5000->mclk);
++ dev_err(&client->dev, "Failed to get mclock: %d\n", ret);
++ /* Defer the probe to see if the clk will be provided later */
++ if (ret == -ENOENT)
++ return -EPROBE_DEFER;
++ return ret;
++ }
++
++ ret = clk_prepare_enable(sgtl5000->mclk);
++ if (ret)
++ return ret;
++
++ /* read chip information */
++ ret = regmap_read(sgtl5000->regmap, SGTL5000_CHIP_ID, &reg);
++ if (ret)
++ goto disable_clk;
++
++ if (((reg & SGTL5000_PARTID_MASK) >> SGTL5000_PARTID_SHIFT) !=
++ SGTL5000_PARTID_PART_ID) {
++ dev_err(&client->dev,
++ "Device with ID register %x is not a sgtl5000\n", reg);
++ ret = -ENODEV;
++ goto disable_clk;
++ }
++
++ rev = (reg & SGTL5000_REVID_MASK) >> SGTL5000_REVID_SHIFT;
++ dev_info(&client->dev, "sgtl5000 revision 0x%x\n", rev);
++ sgtl5000->revision = rev;
++
++ i2c_set_clientdata(client, sgtl5000);
++
++ /* Ensure sgtl5000 will start with sane register values */
++ ret = sgtl5000_fill_defaults(sgtl5000);
++ if (ret)
++ goto disable_clk;
++
++ ret = snd_soc_register_codec(&client->dev,
++ &sgtl5000_driver, &sgtl5000_dai, 1);
++ if (ret)
++ goto disable_clk;
++
++ return 0;
++
++disable_clk:
++ clk_disable_unprepare(sgtl5000->mclk);
++ return ret;
++}
++
++static int sgtl5000_i2c_remove(struct i2c_client *client)
++{
++ struct sgtl5000_priv *sgtl5000 = i2c_get_clientdata(client);
++
++ snd_soc_unregister_codec(&client->dev);
++ clk_disable_unprepare(sgtl5000->mclk);
++ return 0;
++}
++
++static const struct i2c_device_id sgtl5000_id[] = {
++ {"sgtl5000", 0},
++ {},
++};
++
++MODULE_DEVICE_TABLE(i2c, sgtl5000_id);
++
++static const struct of_device_id sgtl5000_dt_ids[] = {
++ { .compatible = "fsl,sgtl5000", },
++ { /* sentinel */ }
++};
++MODULE_DEVICE_TABLE(of, sgtl5000_dt_ids);
++
++static struct i2c_driver sgtl5000_i2c_driver = {
++ .driver = {
++ .name = "sgtl5000",
++ .owner = THIS_MODULE,
++ .of_match_table = sgtl5000_dt_ids,
++ },
++ .probe = sgtl5000_i2c_probe,
++ .remove = sgtl5000_i2c_remove,
++ .id_table = sgtl5000_id,
++};
++
++module_i2c_driver(sgtl5000_i2c_driver);
++
++MODULE_DESCRIPTION("Freescale SGTL5000 ALSA SoC Codec Driver");
++MODULE_AUTHOR("Zeng Zhaoming <zengzm.kernel@gmail.com>");
++MODULE_LICENSE("GPL");
+diff -Nur linux-3.14.36/sound/soc/codecs/spdif_transmitter.c linux-openelec/sound/soc/codecs/spdif_transmitter.c
+--- linux-3.14.36/sound/soc/codecs/spdif_transmitter.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/sound/soc/codecs/spdif_transmitter.c 2015-05-06 12:05:43.000000000 -0500
+@@ -24,7 +24,7 @@
+
+ #define DRV_NAME "spdif-dit"
+
+-#define STUB_RATES SNDRV_PCM_RATE_8000_96000
++#define STUB_RATES SNDRV_PCM_RATE_8000_192000
+ #define STUB_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | \
+ SNDRV_PCM_FMTBIT_S20_3LE | \
+ SNDRV_PCM_FMTBIT_S24_LE)
+diff -Nur linux-3.14.36/sound/soc/codecs/vt1613.c linux-openelec/sound/soc/codecs/vt1613.c
+--- linux-3.14.36/sound/soc/codecs/vt1613.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/sound/soc/codecs/vt1613.c 2015-07-24 18:03:30.316842002 -0500
+@@ -0,0 +1,497 @@
++/*
++* vt1613.c -- ALSA SoC VT1613 AC'97 codec support
++*
++* Copyright 2010-2015 Seco s.r.l.
++*
++* This program is free software; you can redistribute it and/or
++* modify it under the terms of the GNU General Public License
++* version 2 as published by the Free Software Foundation.
++*
++* This program is distributed in the hope that it will be useful, but
++* WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++* General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not, write to the Free Software
++* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
++* 02110-1301 USA
++*
++*/
++
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/delay.h>
++#include <linux/regmap.h>
++#include <linux/slab.h>
++#include <linux/of_device.h>
++#include <sound/soc.h>
++#include <sound/soc-dapm.h>
++#include <sound/initval.h>
++#include <sound/tlv.h>
++#include <sound/ac97_codec.h>
++
++#include "vt1613.h"
++#define DRV_NAME "vt1613-codec"
++
++/* TODO: S/PDIF implementation. As this driver was developed for UDOO board's needs,
++* we skipped S/PDIF features developing*/
++
++bool vt1613_modules_dep_ok = true;
++EXPORT_SYMBOL_GPL(vt1613_modules_dep_ok);
++
++static const struct reg_default vt1613_reg_defaults[] = {
++ { 0x00, 0x0140 }, /* Reset */
++ { 0x02, 0x8000 }, /* Stereo Output Volume */
++ { 0x04, 0x8000 }, /* HP Stereo Output Volume */
++ { 0x0A, 0x0000 }, /* PC Beep Volume */
++ { 0x0C, 0x8008 }, /* Phone Volume */
++ { 0x0E, 0x8008 }, /* MIC Volume */
++ { 0x10, 0x8808 }, /* Line In Volume */
++ { 0x12, 0x8808 }, /* CD Volume */
++ { 0x16, 0x8808 }, /* AUX Volume */
++ { 0x18, 0x8808 }, /* PCM out Volume */
++ { 0x1A, 0x0000 }, /* Record Select */
++ { 0x1C, 0x8000 }, /* Record Gain */
++ { 0x20, 0x0000 }, /* General Purpose */
++ { 0x24, 0x0001 }, /* Audio Interrupt & Paging (AC'97 2.3) */
++ { 0x26, 0x0000 }, /* Powerdown control / status */
++ { 0x28, 0x097C }, /* Extended Audio ID */
++ { 0x2A, 0x3830 }, /* Extended Audio Status and Control */
++ { 0x2C, 0xBB80 }, /* PCM Front DAC Rate */
++ { 0x32, 0xBB80 }, /* PCM LR ADC Rate */
++ { 0x3A, 0x2000 }, /* S/PDIF Control */
++ { 0x5A, 0x0000 }, /* Vendor Reserved Register */
++ { 0x5C, 0x00A9 }, /* Vendor Reserved Register */
++ { 0x62, 0xFFFF }, /* PCI SVID Page ID = 01h */
++ { 0x64, 0xFFFF }, /* PCI SID Page ID = 01h */
++ { 0x66, 0x0000 }, /* S/PDIF RX Status Page ID = 00h */
++ { 0x68, 0x0000 }, /* S/PDIF RX Status Page ID = 00h */
++ { 0x6A, 0x0000 }, /* S/PDIF RX Control Page ID = 00h */
++ { 0x6C, 0x376A }, /* DAC Slot Mapping Page ID = 01h */
++ { 0x6E, 0x0000 }, /* ADC Slot Mapping Page ID = 01h */
++ { 0x70, 0x0000 }, /* ADC / SPDIF RX Left Peak */
++ { 0x74, 0x0000 }, /* PLL Setting /Debugging */
++ { 0x76, 0x1182 }, /* Miscellaneous */
++ { 0x78, 0x0070 }, /* GPIO Control */
++ { 0x7A, 0x0070 }, /* GPIO Status */
++ { 0x7C, 0x5649 }, /* Vendor ID1 */
++ { 0x7E, 0x4120 }, /* Vendor ID2 */
++};
++
++static bool vt1613_readable_reg(struct device *dev, unsigned int reg)
++{
++ switch (reg) {
++ case AC97_RESET ... AC97_HEADPHONE:
++ case AC97_PC_BEEP ... AC97_CD:
++ case AC97_AUX ... AC97_GENERAL_PURPOSE:
++ case AC97_INT_PAGING ... AC97_PCM_LR_ADC_RATE:
++ case AC97_SPDIF:
++ case AC97_AD_TEST:
++ case AC97_VT1613_STEREO_MIC:
++ case AC97_PCI_SVID ... AC97_SENSE_INFO:
++ case AC97_VT1613_DAC_SLOT_MAP:
++ case AC97_VT1613_ADC_SLOT_MAP:
++ case AC97_AD_CODEC_CFG:
++ case AC97_AD_SERIAL_CFG:
++ case AC97_AD_MISC:
++ case AC97_VT1613_GPIO_CTRL:
++ case AC97_VT1613_GPIO_STATUS:
++ case AC97_VENDOR_ID1:
++ case AC97_VENDOR_ID2:
++ return true;
++ default:
++ return false;
++ }
++}
++
++static bool vt1613_writeable_reg(struct device *dev, unsigned int reg)
++{
++ switch (reg) {
++ case AC97_RESET:
++ case AC97_EXTENDED_ID:
++ case AC97_PCM_SURR_DAC_RATE:
++ case AC97_PCM_LFE_DAC_RATE:
++ case AC97_FUNC_SELECT:
++ case AC97_FUNC_INFO:
++ case AC97_VT1613_ADC_SLOT_MAP:
++ case AC97_VENDOR_ID1:
++ case AC97_VENDOR_ID2:
++ return false;
++ default:
++ return vt1613_readable_reg(dev, reg);
++ }
++}
++
++static const struct regmap_config vt1613_regmap_config = {
++ .name = "vt1613_regmap",
++ .reg_bits = 16,
++ .reg_stride = 2,
++ .val_bits = 16,
++ .max_register = 0x7E,
++ .cache_type = REGCACHE_RBTREE,
++
++ .volatile_reg = regmap_ac97_default_volatile,
++ .readable_reg = vt1613_readable_reg,
++ .writeable_reg = vt1613_writeable_reg,
++
++ .reg_defaults = vt1613_reg_defaults,
++ .num_reg_defaults = ARRAY_SIZE(vt1613_reg_defaults),
++};
++
++static const char *vt1613_record_mux[] = {"Mic", "CD", "--", "AUX",
++ "Line", "Stereo Mix", "Mono Mix", "Phone"};
++static SOC_ENUM_DOUBLE_DECL(vt1613_record_enum,
++ AC97_REC_SEL, 8, 0, vt1613_record_mux);
++
++static const char *vt1613_mic_mux[] = {"Mic1", "Mic2"};
++static SOC_ENUM_SINGLE_DECL(vt1613_mic_enum,
++ AC97_GENERAL_PURPOSE, 8, vt1613_mic_mux);
++
++static const char *vt1613_boost[] = {"0dB", "20dB"};
++static SOC_ENUM_SINGLE_DECL(vt1613_boost_enum,
++ AC97_MIC, 6, vt1613_boost);
++
++static const char *vt1613_mic_sel[] = {"MonoMic", "StereoMic"};
++static SOC_ENUM_SINGLE_DECL(vt1613_mic_sel_enum,
++ AC97_VT1613_STEREO_MIC, 2, vt1613_mic_sel);
++
++static const DECLARE_TLV_DB_LINEAR(master_tlv, -4650, 0);
++static const DECLARE_TLV_DB_LINEAR(record_tlv, 0, 2250);
++static const DECLARE_TLV_DB_LINEAR(beep_tlv, -4500, 0);
++static const DECLARE_TLV_DB_LINEAR(mix_tlv, -3450, 1200);
++
++static const struct snd_kcontrol_new vt1613_snd_ac97_controls[] = {
++ SOC_DOUBLE_TLV("Speaker Playback Volume", AC97_MASTER, 8, 0, 31, 1, master_tlv),
++ SOC_SINGLE("Speaker Playback Switch", AC97_MASTER, 15, 1, 1),
++
++ SOC_DOUBLE_TLV("Headphone Playback Volume", AC97_HEADPHONE, 8, 0, 31, 1, master_tlv),
++ SOC_SINGLE("Headphone Playback Switch", AC97_HEADPHONE, 15, 1, 1),
++
++ SOC_DOUBLE_TLV("PCM Playback Volume", AC97_PCM, 8, 0, 31, 1, mix_tlv),
++ SOC_SINGLE("PCM Playback Switch", AC97_PCM, 15, 1, 1),
++
++ SOC_DOUBLE_TLV("Record Capture Volume", AC97_REC_GAIN, 8, 0, 15, 0, record_tlv),
++ SOC_SINGLE("Record Capture Switch", AC97_REC_GAIN, 15, 1, 1),
++
++ SOC_SINGLE_TLV("Beep Volume", AC97_PC_BEEP, 1, 15, 1, beep_tlv),
++ SOC_SINGLE("Beep Switch", AC97_PC_BEEP, 15, 1, 1),
++ SOC_SINGLE_TLV("Phone Volume", AC97_PHONE, 0, 31, 0, mix_tlv),
++ SOC_SINGLE("Phone Switch", AC97_PHONE, 15, 1, 1),
++
++ /* Mono Mic and Stereo Mic's right channel controls */
++ SOC_SINGLE_TLV("Mic/StereoMic_R Volume", AC97_MIC, 0, 31, 0, mix_tlv),
++ SOC_SINGLE("Mic/StereoMic_R Switch", AC97_MIC, 15, 1, 1),
++
++ /* Stereo Mic's left channel controls */
++ SOC_SINGLE("StereoMic_L Switch", AC97_MIC, 7, 1, 1),
++ SOC_SINGLE_TLV("StereoMic_L Volume", AC97_MIC, 8, 31, 0, mix_tlv),
++
++ SOC_DOUBLE_TLV("Line Volume", AC97_LINE, 8, 0, 31, 0, mix_tlv),
++ SOC_SINGLE("Line Switch", AC97_LINE, 15, 1, 1),
++ SOC_DOUBLE_TLV("CD Volume", AC97_CD, 8, 0, 31, 0, mix_tlv),
++ SOC_SINGLE("CD Switch", AC97_CD, 15, 1, 1),
++ SOC_DOUBLE_TLV("AUX Volume", AC97_AUX, 8, 0, 31, 0, mix_tlv),
++ SOC_SINGLE("AUX Switch", AC97_AUX, 15, 1, 1),
++
++ SOC_SINGLE("Analog Loopback", AC97_GENERAL_PURPOSE, 7, 1, 0),
++
++ SOC_ENUM("Mic Boost", vt1613_boost_enum),
++ SOC_ENUM("Mic1/2 Mux", vt1613_mic_enum),
++ SOC_ENUM("Mic Select", vt1613_mic_sel_enum),
++ SOC_ENUM("Record Mux", vt1613_record_enum),
++};
++
++static const unsigned int vt1613_rates[] = {
++ 8000, 11025, 12000, 16000, 22050, 24000, 32000, 44100, 48000
++};
++
++static const struct snd_pcm_hw_constraint_list vt1613_rate_constraints = {
++ .count = ARRAY_SIZE(vt1613_rates),
++ .list = vt1613_rates,
++};
++
++static int vt1613_startup(struct snd_pcm_substream *substream,
++ struct snd_soc_dai *dai)
++{
++ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK){
++ snd_pcm_hw_constraint_list(substream->runtime, 0,
++ SNDRV_PCM_HW_PARAM_RATE, &vt1613_rate_constraints);
++ } else {
++ snd_pcm_hw_constraint_list(substream->runtime, 0,
++ SNDRV_PCM_HW_PARAM_RATE, &vt1613_rate_constraints);
++ }
++
++ return 0;
++}
++
++static int ac97_analog_prepare(struct snd_pcm_substream *substream,
++ struct snd_soc_dai *dai)
++{
++ struct snd_soc_codec *codec = dai->codec;
++ struct snd_pcm_runtime *runtime = substream->runtime;
++ unsigned short reg;
++
++ /* enable variable rate audio (VRA) and disable S/PDIF output */
++ snd_soc_write(codec, AC97_EXTENDED_STATUS,
++ (snd_soc_read(codec, AC97_EXTENDED_STATUS) | 0x1) & ~0x4);
++
++ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK){
++ reg = AC97_PCM_FRONT_DAC_RATE;
++ } else {
++ reg = AC97_PCM_LR_ADC_RATE;
++ }
++
++ return snd_soc_write(codec, reg, runtime->rate);
++}
++
++static int ac97_digital_prepare(struct snd_pcm_substream *substream,
++ struct snd_soc_dai *dai)
++{
++ struct snd_soc_codec *codec = dai->codec;
++ struct snd_pcm_runtime *runtime = substream->runtime;
++
++ snd_soc_write(codec, AC97_SPDIF, 0x2002);
++
++ /* enable VRA and S/PDIF output */
++ snd_soc_write(codec, AC97_EXTENDED_STATUS, snd_soc_read(codec, AC97_EXTENDED_STATUS) | 0x5);
++
++ return snd_soc_write(codec, AC97_PCM_FRONT_DAC_RATE, runtime->rate);
++}
++
++static int vt1613_set_bias_level(struct snd_soc_codec *codec,
++ enum snd_soc_bias_level level)
++{
++ switch (level) {
++ case SND_SOC_BIAS_ON: /* full On */
++ case SND_SOC_BIAS_PREPARE: /* partial On */
++ case SND_SOC_BIAS_STANDBY: /* Off, with power */
++ snd_soc_write(codec, AC97_POWERDOWN, 0x0000);
++ break;
++ case SND_SOC_BIAS_OFF: /* Off, without power */
++ /* disable everything including AC link */
++ snd_soc_write(codec, AC97_POWERDOWN, 0xffff);
++ break;
++ }
++ codec->dapm.bias_level = level;
++ return 0;
++}
++
++static int vt1613_reset(struct snd_soc_codec *codec, int try_warm)
++{
++
++ if (try_warm && soc_ac97_ops->warm_reset) {
++ soc_ac97_ops->warm_reset(codec->ac97);
++ if (snd_soc_read(codec, AC97_RESET) == 0x0140)
++ return 1;
++ }
++ soc_ac97_ops->reset(codec->ac97);
++ if (soc_ac97_ops->warm_reset)
++ soc_ac97_ops->warm_reset(codec->ac97);
++ if (snd_soc_read(codec, AC97_RESET) == 0x0140)
++ return 0;
++
++ return -EIO;
++}
++
++static struct snd_soc_dai_ops vt1613_dai_ops_analog = {
++ .startup = vt1613_startup,
++ .prepare = ac97_analog_prepare,
++};
++
++static struct snd_soc_dai_ops vt1613_dai_ops_digital = {
++ .prepare = ac97_digital_prepare,
++};
++
++struct snd_soc_dai_driver vt1613_dai[] = {
++{
++ .name = "vt1613-hifi-analog",
++ .ac97_control = 1,
++
++ .playback = {
++ .stream_name = "Playback",
++ .channels_min = 1,
++ .channels_max = 2,
++ .rates = SNDRV_PCM_RATE_KNOT,
++ .formats = SND_SOC_STD_AC97_FMTS,
++ },
++ .capture = {
++ .stream_name = "Capture",
++ .channels_min = 1,
++ .channels_max = 2,
++ .rates = SNDRV_PCM_RATE_KNOT,
++ .formats = SND_SOC_STD_AC97_FMTS,
++ },
++
++ .ops = &vt1613_dai_ops_analog,
++},
++{
++ .name = "vt1613-hifi-IEC958",
++ .ac97_control = 1,
++
++ .playback = {
++ .stream_name = "vt1613 IEC958",
++ .channels_min = 1,
++ .channels_max = 2,
++ .rates = SNDRV_PCM_RATE_32000 |
++ SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000,
++ .formats = SNDRV_PCM_FORMAT_IEC958_SUBFRAME_BE,
++ },
++
++ .ops = &vt1613_dai_ops_digital,
++}
++};
++
++static int vt1613_codec_suspend(struct snd_soc_codec *codec)
++{
++ vt1613_set_bias_level(codec, SND_SOC_BIAS_OFF);
++
++ return 0;
++}
++
++static int vt1613_codec_resume(struct snd_soc_codec *codec)
++{
++ u16 id, reset;
++
++ reset = 0;
++ /* give the codec an AC97 warm reset to start the link */
++reset:
++ if (reset > 5) {
++ printk(KERN_ERR "vt1613 failed to resume");
++ return -EIO;
++ }
++ codec->ac97->bus->ops->warm_reset(codec->ac97);
++ id = soc_ac97_ops->read(codec->ac97, AC97_VENDOR_ID2);
++ if (id != 0x4123) {
++ vt1613_reset(codec, 0);
++ reset++;
++ goto reset;
++ }
++ vt1613_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
++
++ return 0;
++}
++
++static int vt1613_codec_probe(struct snd_soc_codec *codec)
++{
++ int ret = 0;
++ struct regmap *regmap;
++
++ ret = snd_soc_new_ac97_codec(codec, soc_ac97_ops, 0);
++ if (ret){
++ dev_err(codec->dev, "Failed to register AC97 codec: %d\n", ret);
++ return ret;
++ }
++
++ soc_ac97_dev_register(codec);
++ if (ret){
++ dev_err(codec->dev, "Failed to register AC97 codec to bus: %d\n", ret);
++ goto free_ac97;
++ }
++ codec->ac97_registered = 1;
++
++ regmap = regmap_init_ac97(codec->ac97, &vt1613_regmap_config);
++ if (IS_ERR(regmap)) {
++ ret = PTR_ERR(regmap);
++ dev_err(codec->dev, "Failed to init register map: %d\n", ret);
++ goto free_ac97;
++ }
++
++ codec->control_data = regmap;
++ ret = snd_soc_codec_set_cache_io(codec, 16, 16, SND_SOC_REGMAP);
++ if (ret < 0) {
++ dev_err(codec->dev, "Failed to set cache I/O: %d\n", ret);
++ goto free_regmap;
++ }
++ snd_soc_codec_set_drvdata(codec, codec->ac97);
++
++ /* do a cold reset for the controller and then try
++ * a warm reset followed by an optional cold reset for codec */
++
++ vt1613_reset(codec, 0);
++ ret = vt1613_reset(codec, 1);
++ if (ret < 0) {
++ printk(KERN_ERR "Failed to reset VT1613: AC97 link error\n");
++ goto free_regmap;
++ }
++
++ /* Read out vendor IDs */
++ printk(KERN_INFO "VT1613 SoC Audio Codec [ID = %04x - %04x]\n",
++ snd_soc_read(codec, AC97_VENDOR_ID1), snd_soc_read(codec, AC97_VENDOR_ID2));
++
++ vt1613_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
++
++ /* unmute captures and playbacks volume */
++ snd_soc_write(codec, AC97_MASTER, 0x0000);
++ snd_soc_write(codec, AC97_PCM, 0x0000);
++ snd_soc_write(codec, AC97_REC_GAIN, 0x0000);
++
++ /* At 3.3V analog supply, for the bits 3:2 should be set 10b for the lowest power instead of default 00b */
++ snd_soc_write(codec, AC97_AD_TEST, snd_soc_read(codec, AC97_AD_TEST) | 0x0008);
++
++ /* To maximize recording quality by removing white noise */
++ snd_soc_write(codec, AC97_AD_TEST, snd_soc_read(codec, AC97_AD_TEST) | 0x0400);
++
++ snd_soc_add_codec_controls(codec, vt1613_snd_ac97_controls,
++ ARRAY_SIZE(vt1613_snd_ac97_controls));
++
++ return 0;
++
++free_regmap:
++ regmap_exit(regmap);
++free_ac97:
++ snd_soc_free_ac97_codec(codec);
++
++ return ret;
++}
++
++static int vt1613_codec_remove(struct snd_soc_codec *codec)
++{
++ regmap_exit(codec->control_data);
++ snd_soc_free_ac97_codec(codec);
++ return 0;
++}
++
++struct snd_soc_codec_driver vt1613_codec = {
++ .probe = vt1613_codec_probe,
++ .remove = vt1613_codec_remove,
++ .suspend = vt1613_codec_suspend,
++ .resume = vt1613_codec_resume,
++ .set_bias_level = vt1613_set_bias_level,
++};
++
++static int vt1613_probe(struct platform_device *pdev)
++{
++ return snd_soc_register_codec(&pdev->dev,
++ &vt1613_codec, vt1613_dai, ARRAY_SIZE(vt1613_dai));
++}
++
++static int vt1613_remove(struct platform_device *pdev)
++{
++ snd_soc_unregister_codec(&pdev->dev);
++ return 0;
++}
++
++static const struct of_device_id vt1613_of_match[] = {
++ { .compatible = "via,vt1613", },
++ { }
++};
++MODULE_DEVICE_TABLE(of, vt1613_of_match);
++
++static struct platform_driver vt1613_codec_driver = {
++ .driver = {
++ .name = DRV_NAME,
++ .owner = THIS_MODULE,
++ .of_match_table = vt1613_of_match,
++ },
++
++ .probe = vt1613_probe,
++ .remove = vt1613_remove,
++};
++
++module_platform_driver(vt1613_codec_driver);
++
++MODULE_DESCRIPTION("ASoC VT1613 codec driver");
++MODULE_AUTHOR("Seco s.r.l.");
++MODULE_LICENSE("GPL");
++MODULE_ALIAS("platform:" DRV_NAME);
+diff -Nur linux-3.14.36/sound/soc/codecs/vt1613.h linux-openelec/sound/soc/codecs/vt1613.h
+--- linux-3.14.36/sound/soc/codecs/vt1613.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/sound/soc/codecs/vt1613.h 2015-07-24 18:03:30.316842002 -0500
+@@ -0,0 +1,29 @@
++/*
++ * vt1613.h - VT1613 audio codec interface
++ *
++ * Copyright 2010-2015 Seco s.r.l.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License as published by the
++ * Free Software Foundation; either version 2 of the License, or (at your
++ * option) any later version.
++ */
++
++#ifndef _VT1613_H
++#define _VT1613_H
++
++#define AC97_VT1613_DAC_SLOT_MAP 0x6C
++#define AC97_VT1613_ADC_SLOT_MAP 0x6E
++
++#define AC97_VT1613_GPIO_CTRL 0x78
++#define AC97_VT1613_GPIO_STATUS 0x7A
++
++#define AC97_VT1613_STEREO_MIC 0x5C
++
++/* VT1613 DAI ID's */
++#define VT1613_DAI_AC97_ANALOG 0
++#define VT1613_DAI_AC97_DIGITAL 1
++
++extern bool vt1613_modules_dep_ok;
++
++#endif
+diff -Nur linux-3.14.36/sound/soc/codecs/wm8962.c linux-openelec/sound/soc/codecs/wm8962.c
+--- linux-3.14.36/sound/soc/codecs/wm8962.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/sound/soc/codecs/wm8962.c 2015-05-06 12:05:43.000000000 -0500
+@@ -16,6 +16,7 @@
+ #include <linux/init.h>
+ #include <linux/delay.h>
+ #include <linux/pm.h>
++#include <linux/clk.h>
+ #include <linux/gcd.h>
+ #include <linux/gpio.h>
+ #include <linux/i2c.h>
+@@ -2942,7 +2943,8 @@
+ WM8962_DAC_MUTE, val);
+ }
+
+-#define WM8962_RATES SNDRV_PCM_RATE_8000_96000
++#define WM8962_RATES (SNDRV_PCM_RATE_8000_48000 |\
++ SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000)
+
+ #define WM8962_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |\
+ SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE)
+@@ -3536,6 +3538,15 @@
+ pdata->gpio_init[i] = 0x0;
+ }
+
++ pdata->codec_mclk = devm_clk_get(&i2c->dev, NULL);
++
++ /*
++ * If clk_get() failed, we assume that clock's enabled by default.
++ * Otherwise, we let driver prepare and control the clock source.
++ */
++ if (IS_ERR(pdata->codec_mclk))
++ pdata->codec_mclk = NULL;
++
+ return 0;
+ }
+
+@@ -3567,6 +3578,9 @@
+ return ret;
+ }
+
++ if (wm8962->pdata.codec_mclk)
++ clk_prepare(wm8962->pdata.codec_mclk);
++
+ for (i = 0; i < ARRAY_SIZE(wm8962->supplies); i++)
+ wm8962->supplies[i].supply = wm8962_supply_names[i];
+
+@@ -3669,6 +3683,27 @@
+ WM8962_MICBIAS_LVL,
+ wm8962->pdata.mic_cfg);
+
++ /* set the default volume for playback and record*/
++ snd_soc_update_bits(codec, WM8962_HPOUTL_VOLUME,
++ WM8962_HPOUTL_VOL_MASK, 0x5d);
++ snd_soc_update_bits(codec, WM8962_HPOUTR_VOLUME,
++ WM8962_HPOUTR_VOL_MASK, 0x5d);
++ snd_soc_update_bits(codec, WM8962_SPKOUTL_VOLUME,
++ WM8962_SPKOUTL_VOL_MASK, 0x72);
++ snd_soc_update_bits(codec, WM8962_SPKOUTR_VOLUME,
++ WM8962_SPKOUTR_VOL_MASK, 0x72);
++
++ snd_soc_update_bits(codec, WM8962_LEFT_INPUT_VOLUME,
++ WM8962_INL_VOL_MASK, 0x3f);
++ snd_soc_update_bits(codec, WM8962_RIGHT_INPUT_VOLUME,
++ WM8962_INR_VOL_MASK, 0x3f);
++ snd_soc_update_bits(codec, WM8962_LEFT_ADC_VOLUME,
++ WM8962_ADCL_VOL_MASK, 0xd8);
++ snd_soc_update_bits(codec, WM8962_RIGHT_ADC_VOLUME,
++ WM8962_ADCR_VOL_MASK, 0xd8);
++ snd_soc_update_bits(codec, WM8962_RIGHT_INPUT_MIXER_VOLUME,
++ WM8962_IN3R_MIXINR_VOL_MASK, 0x7);
++
+ /* Latch volume update bits */
+ regmap_update_bits(wm8962->regmap, WM8962_LEFT_INPUT_VOLUME,
+ WM8962_IN_VU, WM8962_IN_VU);
+@@ -3752,6 +3787,9 @@
+
+ regcache_cache_only(wm8962->regmap, true);
+
++ /* The cache-only should be turned on before we power down the codec */
++ regcache_cache_only(wm8962->regmap, true);
++
+ /* The drivers should power up as needed */
+ regulator_bulk_disable(ARRAY_SIZE(wm8962->supplies), wm8962->supplies);
+
+@@ -3760,11 +3798,19 @@
+ err_enable:
+ regulator_bulk_disable(ARRAY_SIZE(wm8962->supplies), wm8962->supplies);
+ err:
++ if (wm8962->pdata.codec_mclk)
++ clk_unprepare(wm8962->pdata.codec_mclk);
++
+ return ret;
+ }
+
+ static int wm8962_i2c_remove(struct i2c_client *client)
+ {
++ struct wm8962_priv *wm8962 = dev_get_drvdata(&client->dev);
++
++ if (wm8962->pdata.codec_mclk)
++ clk_unprepare(wm8962->pdata.codec_mclk);
++
+ snd_soc_unregister_codec(&client->dev);
+ return 0;
+ }
+@@ -3775,6 +3821,9 @@
+ struct wm8962_priv *wm8962 = dev_get_drvdata(dev);
+ int ret;
+
++ if (wm8962->pdata.codec_mclk)
++ clk_enable(wm8962->pdata.codec_mclk);
++
+ ret = regulator_bulk_enable(ARRAY_SIZE(wm8962->supplies),
+ wm8962->supplies);
+ if (ret != 0) {
+@@ -3834,6 +3883,10 @@
+ regulator_bulk_disable(ARRAY_SIZE(wm8962->supplies),
+ wm8962->supplies);
+
++ if (wm8962->pdata.codec_mclk)
++ clk_disable(wm8962->pdata.codec_mclk);
++
++
+ return 0;
+ }
+ #endif
+diff -Nur linux-3.14.36/sound/soc/fsl/fsl_asrc.c linux-openelec/sound/soc/fsl/fsl_asrc.c
+--- linux-3.14.36/sound/soc/fsl/fsl_asrc.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/sound/soc/fsl/fsl_asrc.c 2015-05-06 12:05:43.000000000 -0500
+@@ -0,0 +1,498 @@
++/*
++ * Copyright (C) 2010-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++#include <linux/module.h>
++#include <linux/of.h>
++#include <linux/of_platform.h>
++#include <linux/slab.h>
++#include <linux/device.h>
++#include <linux/i2c.h>
++#include <linux/clk.h>
++#include <linux/delay.h>
++#include <linux/mxc_asrc.h>
++#include <sound/core.h>
++#include <sound/pcm.h>
++#include <sound/pcm_params.h>
++#include <sound/soc.h>
++#include <sound/initval.h>
++#include <sound/dmaengine_pcm.h>
++
++#include "fsl_asrc.h"
++#include "imx-pcm.h"
++
++static bool filter(struct dma_chan *chan, void *param)
++{
++ if (!imx_dma_is_general_purpose(chan))
++ return false;
++
++ chan->private = param;
++
++ return true;
++}
++
++static int asrc_p2p_request_channel(struct snd_pcm_substream *substream)
++{
++ struct snd_soc_pcm_runtime *rtd = substream->private_data;
++ struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
++ struct fsl_asrc_p2p *asrc_p2p = snd_soc_dai_get_drvdata(cpu_dai);
++ enum dma_slave_buswidth buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
++ struct snd_dmaengine_dai_dma_data *dma_params_be = NULL;
++ struct snd_dmaengine_dai_dma_data *dma_params_fe = NULL;
++ struct imx_dma_data *fe_filter_data = NULL;
++ struct imx_dma_data *be_filter_data = NULL;
++
++ struct dma_slave_config slave_config;
++ dma_cap_mask_t mask;
++ struct dma_chan *chan;
++ int ret;
++ struct snd_soc_dpcm *dpcm;
++
++ /* find the be for this fe stream */
++ list_for_each_entry(dpcm, &rtd->dpcm[substream->stream].be_clients, list_be) {
++ if (dpcm->fe == rtd) {
++ struct snd_soc_pcm_runtime *be = dpcm->be;
++ struct snd_soc_dai *dai = be->cpu_dai;
++ struct snd_pcm_substream *be_substream;
++ be_substream = snd_soc_dpcm_get_substream(be, substream->stream);
++ dma_params_be = snd_soc_dai_get_dma_data(dai, be_substream);
++ break;
++ }
++ }
++
++ if (!dma_params_be) {
++ dev_err(rtd->card->dev, "can not get be substream\n");
++ return -EINVAL;
++ }
++
++ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
++ dma_params_fe = &asrc_p2p->dma_params_tx;
++ else
++ dma_params_fe = &asrc_p2p->dma_params_rx;
++
++ fe_filter_data = dma_params_fe->filter_data;
++ be_filter_data = dma_params_be->filter_data;
++
++ if (asrc_p2p->output_width == 16)
++ buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
++ else
++ buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES;
++
++ /* reconfig memory to FIFO dma request */
++ dma_params_fe->addr = asrc_p2p->asrc_ops.asrc_p2p_per_addr(
++ asrc_p2p->asrc_index, 1);
++ fe_filter_data->dma_request0 = asrc_p2p->dmarx[asrc_p2p->asrc_index];
++ dma_params_fe->maxburst = dma_params_be->maxburst;
++
++ dma_cap_zero(mask);
++ dma_cap_set(DMA_SLAVE, mask);
++ dma_cap_set(DMA_CYCLIC, mask);
++
++ /* config p2p dma channel */
++ asrc_p2p->asrc_p2p_dma_data.peripheral_type = IMX_DMATYPE_ASRC;
++ asrc_p2p->asrc_p2p_dma_data.priority = DMA_PRIO_HIGH;
++ asrc_p2p->asrc_p2p_dma_data.dma_request1 = asrc_p2p->dmatx[asrc_p2p->asrc_index];
++ /* need to get target device's dma dma_addr, burstsize */
++ asrc_p2p->asrc_p2p_dma_data.dma_request0 = be_filter_data->dma_request0;
++
++ /* Request channel */
++ asrc_p2p->asrc_p2p_dma_chan =
++ dma_request_channel(mask, filter, &asrc_p2p->asrc_p2p_dma_data);
++
++ if (!asrc_p2p->asrc_p2p_dma_chan) {
++ dev_err(rtd->card->dev, "can not request dma channel\n");
++ goto error;
++ }
++ chan = asrc_p2p->asrc_p2p_dma_chan;
++
++ /*
++ * Buswidth is not used in the sdma for p2p. Here we set the maxburst fix to
++ * twice of dma_params's burstsize.
++ */
++ slave_config.direction = DMA_DEV_TO_DEV;
++ slave_config.src_addr = asrc_p2p->asrc_ops.asrc_p2p_per_addr(asrc_p2p->asrc_index, 0);
++ slave_config.src_addr_width = buswidth;
++ slave_config.src_maxburst = dma_params_be->maxburst * 2;
++ slave_config.dst_addr = dma_params_be->addr;
++ slave_config.dst_addr_width = buswidth;
++ slave_config.dst_maxburst = dma_params_be->maxburst * 2;
++ slave_config.dma_request0 = be_filter_data->dma_request0;
++ slave_config.dma_request1 = asrc_p2p->dmatx[asrc_p2p->asrc_index];
++
++ ret = dmaengine_slave_config(asrc_p2p->asrc_p2p_dma_chan,
++ &slave_config);
++ if (ret) {
++ dev_err(rtd->card->dev, "can not config dma channel\n");
++ goto error;
++ }
++
++ return 0;
++error:
++ if (asrc_p2p->asrc_p2p_dma_chan) {
++ dma_release_channel(asrc_p2p->asrc_p2p_dma_chan);
++ asrc_p2p->asrc_p2p_dma_chan = NULL;
++ }
++
++ return -EINVAL;
++}
++
++static int config_asrc(struct snd_pcm_substream *substream,
++ struct snd_pcm_hw_params *params)
++{
++ struct snd_soc_pcm_runtime *rtd = substream->private_data;
++ struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
++ struct fsl_asrc_p2p *asrc_p2p = snd_soc_dai_get_drvdata(cpu_dai);
++ unsigned int rate = params_rate(params);
++ unsigned int channel = params_channels(params);
++ struct asrc_config config = {0};
++ int output_word_width = 0;
++ int input_word_width = 0;
++ int ret = 0;
++ if ((channel != 2) && (channel != 4) && (channel != 6)) {
++ dev_err(cpu_dai->dev, "param channel is not correct\n");
++ return -EINVAL;
++ }
++
++ ret = asrc_p2p->asrc_ops.asrc_p2p_req_pair(channel, &asrc_p2p->asrc_index);
++ if (ret < 0) {
++ dev_err(cpu_dai->dev, "Fail to request asrc pair\n");
++ return -EINVAL;
++ }
++
++ if (asrc_p2p->output_width == 16)
++ output_word_width = ASRC_WIDTH_16_BIT;
++ else
++ output_word_width = ASRC_WIDTH_24_BIT;
++
++ switch (params_format(params)) {
++ case SNDRV_PCM_FORMAT_U16:
++ case SNDRV_PCM_FORMAT_S16_LE:
++ case SNDRV_PCM_FORMAT_S16_BE:
++ input_word_width = ASRC_WIDTH_16_BIT;
++ break;
++ case SNDRV_PCM_FORMAT_S20_3LE:
++ case SNDRV_PCM_FORMAT_S20_3BE:
++ case SNDRV_PCM_FORMAT_S24_3LE:
++ case SNDRV_PCM_FORMAT_S24_3BE:
++ case SNDRV_PCM_FORMAT_S24_BE:
++ case SNDRV_PCM_FORMAT_S24_LE:
++ case SNDRV_PCM_FORMAT_U24_BE:
++ case SNDRV_PCM_FORMAT_U24_LE:
++ case SNDRV_PCM_FORMAT_U24_3BE:
++ case SNDRV_PCM_FORMAT_U24_3LE:
++ input_word_width = ASRC_WIDTH_24_BIT;
++ break;
++ case SNDRV_PCM_FORMAT_S8:
++ case SNDRV_PCM_FORMAT_U8:
++ case SNDRV_PCM_FORMAT_S32:
++ case SNDRV_PCM_FORMAT_U32:
++ default:
++ dev_err(cpu_dai->dev, "Format is not support!\n");
++ return -EINVAL;
++ }
++
++ config.input_word_width = input_word_width;
++ config.output_word_width = output_word_width;
++ config.pair = asrc_p2p->asrc_index;
++ config.channel_num = channel;
++ config.input_sample_rate = rate;
++ config.output_sample_rate = asrc_p2p->output_rate;
++ config.inclk = INCLK_NONE;
++
++ switch (asrc_p2p->per_dev) {
++ case SSI1:
++ config.outclk = OUTCLK_SSI1_TX;
++ break;
++ case SSI2:
++ config.outclk = OUTCLK_SSI2_TX;
++ break;
++ case SSI3:
++ config.outclk = OUTCLK_SSI3_TX;
++ break;
++ case ESAI:
++ config.outclk = OUTCLK_ESAI_TX;
++ break;
++ default:
++ dev_err(cpu_dai->dev, "peripheral device is not correct\n");
++ return -EINVAL;
++ }
++
++ ret = asrc_p2p->asrc_ops.asrc_p2p_config_pair(&config);
++ if (ret < 0) {
++ dev_err(cpu_dai->dev, "Fail to config asrc\n");
++ return ret;
++ }
++
++ return 0;
++}
++
++static int fsl_asrc_p2p_hw_params(struct snd_pcm_substream *substream,
++ struct snd_pcm_hw_params *params,
++ struct snd_soc_dai *cpu_dai)
++{
++ int ret = 0;
++
++ ret = config_asrc(substream, params);
++ if (ret < 0)
++ return ret;
++
++ return asrc_p2p_request_channel(substream);
++}
++
++static int fsl_asrc_p2p_hw_free(struct snd_pcm_substream *substream,
++ struct snd_soc_dai *cpu_dai)
++{
++ struct fsl_asrc_p2p *asrc_p2p = snd_soc_dai_get_drvdata(cpu_dai);
++
++ if (asrc_p2p->asrc_p2p_dma_chan) {
++ /* Release p2p dma resource */
++ dma_release_channel(asrc_p2p->asrc_p2p_dma_chan);
++ asrc_p2p->asrc_p2p_dma_chan = NULL;
++ }
++
++ if (asrc_p2p->asrc_index != -1) {
++ asrc_p2p->asrc_ops.asrc_p2p_release_pair(asrc_p2p->asrc_index);
++ asrc_p2p->asrc_ops.asrc_p2p_finish_conv(asrc_p2p->asrc_index);
++ }
++ asrc_p2p->asrc_index = -1;
++
++ return 0;
++}
++
++static int fsl_asrc_dma_prepare_and_submit(struct snd_pcm_substream *substream,
++ struct fsl_asrc_p2p *asrc_p2p)
++{
++ struct dma_async_tx_descriptor *desc = asrc_p2p->asrc_p2p_desc;
++ struct dma_chan *chan = asrc_p2p->asrc_p2p_dma_chan;
++ struct snd_soc_pcm_runtime *rtd = substream->private_data;
++ struct device *dev = rtd->platform->dev;
++
++ desc = dmaengine_prep_dma_cyclic(chan, 0xffff, 64, 64, DMA_DEV_TO_DEV, 0);
++ if (!desc) {
++ dev_err(dev, "failed to prepare slave dma\n");
++ return -EINVAL;
++ }
++
++ dmaengine_submit(desc);
++
++ return 0;
++}
++
++static int fsl_asrc_p2p_trigger(struct snd_pcm_substream *substream, int cmd,
++ struct snd_soc_dai *cpu_dai)
++{
++ struct fsl_asrc_p2p *asrc_p2p = snd_soc_dai_get_drvdata(cpu_dai);
++ int ret;
++
++ switch (cmd) {
++ case SNDRV_PCM_TRIGGER_START:
++ case SNDRV_PCM_TRIGGER_RESUME:
++ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
++ ret = fsl_asrc_dma_prepare_and_submit(substream, asrc_p2p);
++ if (ret)
++ return ret;
++ dma_async_issue_pending(asrc_p2p->asrc_p2p_dma_chan);
++ asrc_p2p->asrc_ops.asrc_p2p_start_conv(asrc_p2p->asrc_index);
++ break;
++ case SNDRV_PCM_TRIGGER_SUSPEND:
++ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
++ case SNDRV_PCM_TRIGGER_STOP:
++ dmaengine_terminate_all(asrc_p2p->asrc_p2p_dma_chan);
++ asrc_p2p->asrc_ops.asrc_p2p_stop_conv(asrc_p2p->asrc_index);
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ return 0;
++}
++
++#define IMX_ASRC_RATES SNDRV_PCM_RATE_8000_192000
++
++#define IMX_ASRC_FORMATS \
++ (SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S16_LE | \
++ SNDRV_PCM_FORMAT_S20_3LE)
++
++static struct snd_soc_dai_ops fsl_asrc_p2p_dai_ops = {
++ .trigger = fsl_asrc_p2p_trigger,
++ .hw_params = fsl_asrc_p2p_hw_params,
++ .hw_free = fsl_asrc_p2p_hw_free,
++};
++
++static int fsl_asrc_p2p_dai_probe(struct snd_soc_dai *dai)
++{
++ struct fsl_asrc_p2p *asrc_p2p = snd_soc_dai_get_drvdata(dai);
++
++ dai->playback_dma_data = &asrc_p2p->dma_params_tx;
++ dai->capture_dma_data = &asrc_p2p->dma_params_rx;
++
++ return 0;
++}
++
++static struct snd_soc_dai_driver fsl_asrc_p2p_dai = {
++ .probe = fsl_asrc_p2p_dai_probe,
++ .playback = {
++ .stream_name = "asrc-Playback",
++ .channels_min = 1,
++ .channels_max = 10,
++ .rates = IMX_ASRC_RATES,
++ .formats = IMX_ASRC_FORMATS,
++ },
++ .capture = {
++ .stream_name = "asrc-Capture",
++ .channels_min = 1,
++ .channels_max = 4,
++ .rates = IMX_ASRC_RATES,
++ .formats = IMX_ASRC_FORMATS,
++ },
++ .ops = &fsl_asrc_p2p_dai_ops,
++};
++
++static const struct snd_soc_component_driver fsl_asrc_p2p_component = {
++ .name = "fsl-asrc-p2p",
++};
++
++/*
++ * This function will register the snd_soc_pcm_link drivers.
++ */
++static int fsl_asrc_p2p_probe(struct platform_device *pdev)
++{
++ struct fsl_asrc_p2p *asrc_p2p;
++ struct device_node *np = pdev->dev.of_node;
++ const char *p;
++ const uint32_t *iprop_rate, *iprop_width;
++ int ret = 0;
++
++ if (!of_device_is_available(np)) {
++ dev_err(&pdev->dev, "There is no device node\n");
++ return -ENODEV;
++ }
++
++ asrc_p2p = devm_kzalloc(&pdev->dev, sizeof(struct fsl_asrc_p2p), GFP_KERNEL);
++ if (!asrc_p2p) {
++ dev_err(&pdev->dev, "can not alloc memory\n");
++ return -ENOMEM;
++ }
++ asrc_p2p->asrc_ops.asrc_p2p_start_conv = asrc_start_conv;
++ asrc_p2p->asrc_ops.asrc_p2p_stop_conv = asrc_stop_conv;
++ asrc_p2p->asrc_ops.asrc_p2p_per_addr = asrc_get_per_addr;
++ asrc_p2p->asrc_ops.asrc_p2p_req_pair = asrc_req_pair;
++ asrc_p2p->asrc_ops.asrc_p2p_config_pair = asrc_config_pair;
++ asrc_p2p->asrc_ops.asrc_p2p_release_pair = asrc_release_pair;
++ asrc_p2p->asrc_ops.asrc_p2p_finish_conv = asrc_finish_conv;
++
++ asrc_p2p->asrc_index = -1;
++
++ iprop_rate = of_get_property(np, "fsl,output-rate", NULL);
++ if (iprop_rate)
++ asrc_p2p->output_rate = be32_to_cpup(iprop_rate);
++ else {
++ dev_err(&pdev->dev, "There is no output-rate in dts\n");
++ return -EINVAL;
++ }
++ iprop_width = of_get_property(np, "fsl,output-width", NULL);
++ if (iprop_width)
++ asrc_p2p->output_width = be32_to_cpup(iprop_width);
++
++ if (asrc_p2p->output_width != 16 && asrc_p2p->output_width != 24) {
++ dev_err(&pdev->dev, "output_width is not acceptable\n");
++ return -EINVAL;
++ }
++
++ ret = of_property_read_u32_array(np,
++ "fsl,asrc-dma-tx-events", asrc_p2p->dmatx, 3);
++ if (ret) {
++ dev_err(&pdev->dev, "Failed to get fsl,asrc-dma-tx-events.\n");
++ return -EINVAL;
++ }
++
++ ret = of_property_read_u32_array(np,
++ "fsl,asrc-dma-rx-events", asrc_p2p->dmarx, 3);
++ if (ret) {
++ dev_err(&pdev->dev, "Failed to get fsl,asrc-dma-rx-events.\n");
++ return -EINVAL;
++ }
++
++ asrc_p2p->filter_data_tx.peripheral_type = IMX_DMATYPE_ASRC;
++ asrc_p2p->filter_data_rx.peripheral_type = IMX_DMATYPE_ASRC;
++
++ asrc_p2p->dma_params_tx.filter_data = &asrc_p2p->filter_data_tx;
++ asrc_p2p->dma_params_rx.filter_data = &asrc_p2p->filter_data_rx;
++
++ platform_set_drvdata(pdev, asrc_p2p);
++
++ p = strrchr(np->full_name, '/') + 1;
++ strcpy(asrc_p2p->name, p);
++ fsl_asrc_p2p_dai.name = asrc_p2p->name;
++
++ ret = snd_soc_register_component(&pdev->dev, &fsl_asrc_p2p_component,
++ &fsl_asrc_p2p_dai, 1);
++ if (ret) {
++ dev_err(&pdev->dev, "register DAI failed\n");
++ goto failed_register;
++ }
++
++ asrc_p2p->soc_platform_pdev = platform_device_register_simple(
++ "imx-pcm-asrc", -1, NULL, 0);
++ if (IS_ERR(asrc_p2p->soc_platform_pdev)) {
++ ret = PTR_ERR(asrc_p2p->soc_platform_pdev);
++ goto failed_pdev_alloc;
++ }
++
++ ret = imx_pcm_dma_init(asrc_p2p->soc_platform_pdev, SND_DMAENGINE_PCM_FLAG_NO_RESIDUE |
++ SND_DMAENGINE_PCM_FLAG_NO_DT |
++ SND_DMAENGINE_PCM_FLAG_COMPAT,
++ IMX_ASRC_DMABUF_SIZE);
++ if (ret) {
++ dev_err(&pdev->dev, "init pcm dma failed\n");
++ goto failed_pcm_init;
++ }
++
++ return 0;
++
++failed_pcm_init:
++ platform_device_unregister(asrc_p2p->soc_platform_pdev);
++failed_pdev_alloc:
++ snd_soc_unregister_component(&pdev->dev);
++failed_register:
++
++ return ret;
++}
++
++static int fsl_asrc_p2p_remove(struct platform_device *pdev)
++{
++ struct fsl_asrc_p2p *asrc_p2p = platform_get_drvdata(pdev);
++
++ platform_device_unregister(asrc_p2p->soc_platform_pdev);
++ snd_soc_unregister_component(&pdev->dev);
++
++ return 0;
++}
++
++static const struct of_device_id fsl_asrc_p2p_dt_ids[] = {
++ { .compatible = "fsl,imx6q-asrc-p2p", },
++ { /* sentinel */ }
++};
++
++static struct platform_driver fsl_asrc_p2p_driver = {
++ .probe = fsl_asrc_p2p_probe,
++ .remove = fsl_asrc_p2p_remove,
++ .driver = {
++ .name = "fsl-asrc-p2p",
++ .owner = THIS_MODULE,
++ .of_match_table = fsl_asrc_p2p_dt_ids,
++ },
++};
++module_platform_driver(fsl_asrc_p2p_driver);
++
++MODULE_AUTHOR("Freescale Semiconductor, Inc.");
++MODULE_DESCRIPTION("i.MX ASoC ASRC P2P driver");
++MODULE_ALIAS("platform:fsl-asrc-p2p");
++MODULE_LICENSE("GPL");
+diff -Nur linux-3.14.36/sound/soc/fsl/fsl_asrc.h linux-openelec/sound/soc/fsl/fsl_asrc.h
+--- linux-3.14.36/sound/soc/fsl/fsl_asrc.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/sound/soc/fsl/fsl_asrc.h 2015-05-06 12:05:43.000000000 -0500
+@@ -0,0 +1,48 @@
++/*
++ * fsl_asrc.h - ALSA ASRC interface
++ *
++ * Copyright (C) 2013 Freescale Semiconductor, Inc. This file is licensed
++ * under the terms of the GNU General Public License version 2. This
++ * program is licensed "as is" without any warranty of any kind, whether
++ * express or implied.
++ */
++
++#ifndef _FSL_ASRC_P2P_H
++#define _FSL_ASRC_P2P_H
++
++#include <linux/mxc_asrc.h>
++#include <sound/dmaengine_pcm.h>
++#include <linux/platform_data/dma-imx.h>
++
++enum peripheral_device_type {
++ UNKNOWN,
++ SSI1,
++ SSI2,
++ SSI3,
++ ESAI,
++};
++
++struct fsl_asrc_p2p {
++ int output_rate;
++ int output_width;
++ enum asrc_pair_index asrc_index;
++ enum peripheral_device_type per_dev;
++ struct asrc_p2p_ops asrc_ops;
++
++ struct snd_dmaengine_dai_dma_data dma_params_rx;
++ struct snd_dmaengine_dai_dma_data dma_params_tx;
++ struct imx_dma_data filter_data_tx;
++ struct imx_dma_data filter_data_rx;
++
++ struct dma_async_tx_descriptor *asrc_p2p_desc;
++ struct dma_chan *asrc_p2p_dma_chan;
++ struct imx_dma_data asrc_p2p_dma_data;
++ struct platform_device *soc_platform_pdev;
++
++ int dmarx[3];
++ int dmatx[3];
++
++ char name[32];
++};
++
++#endif
+diff -Nur linux-3.14.36/sound/soc/fsl/fsl_asrc_pcm.c linux-openelec/sound/soc/fsl/fsl_asrc_pcm.c
+--- linux-3.14.36/sound/soc/fsl/fsl_asrc_pcm.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/sound/soc/fsl/fsl_asrc_pcm.c 2015-05-06 12:05:43.000000000 -0500
+@@ -0,0 +1,41 @@
++/*
++ * Copyright (C) 2010-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++#include <linux/module.h>
++#include <linux/platform_device.h>
++
++
++/*
++ * Here add one platform module "imx-pcm-asrc" as pcm platform module.
++ * If we use the asrc_p2p node as the pcm platform, there will be one issue.
++ * snd_soc_dapm_new_dai_widgets will be called twice, one in probe link_dais,
++ * one in probe platform. so there will be two dai_widgets added to widget list.
++ * but only the seconed one will be recorded in dai->playback_widget.
++ * Machine driver will add the audio route, but when it go through the
++ * widget list, it will found the cpu_dai widget is the first one in the list.
++ * add use the first one to link the audio route.
++ * when use the fe/be architecture for asrc p2p, it need to go through from
++ * the fe->cpu_dai->playback_widget. but this is the second widget, so the
++ * result is that it can't find a availble audio route for p2p case. So here
++ * use another pcm platform to avoid this issue.
++ */
++static struct platform_driver imx_pcm_driver = {
++ .driver = {
++ .name = "imx-pcm-asrc",
++ .owner = THIS_MODULE,
++ },
++};
++
++module_platform_driver(imx_pcm_driver);
++MODULE_AUTHOR("Freescale Semiconductor, Inc.");
++MODULE_DESCRIPTION("i.MX ASoC PCM driver");
++MODULE_ALIAS("platform:imx-pcm-asrc");
++MODULE_LICENSE("GPL");
+diff -Nur linux-3.14.36/sound/soc/fsl/fsl_esai.c linux-openelec/sound/soc/fsl/fsl_esai.c
+--- linux-3.14.36/sound/soc/fsl/fsl_esai.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/sound/soc/fsl/fsl_esai.c 2015-05-06 12:05:43.000000000 -0500
+@@ -785,7 +785,7 @@
+ return ret;
+ }
+
+- ret = imx_pcm_dma_init(pdev);
++ ret = imx_pcm_dma_init(pdev, NULL, IMX_ESAI_DMABUF_SIZE);
+ if (ret)
+ dev_err(&pdev->dev, "failed to init imx pcm dma: %d\n", ret);
+
+diff -Nur linux-3.14.36/sound/soc/fsl/fsl_hdmi.c linux-openelec/sound/soc/fsl/fsl_hdmi.c
+--- linux-3.14.36/sound/soc/fsl/fsl_hdmi.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/sound/soc/fsl/fsl_hdmi.c 2015-05-06 12:05:43.000000000 -0500
+@@ -0,0 +1,614 @@
++/*
++ * ALSA SoC HDMI Audio Layer for Freescale i.MX
++ *
++ * Copyright (C) 2011-2013 Freescale Semiconductor, Inc.
++ *
++ * Some code from patch_hdmi.c
++ * Copyright (c) 2008-2010 Intel Corporation. All rights reserved.
++ * Copyright (c) 2006 ATI Technologies Inc.
++ * Copyright (c) 2008 NVIDIA Corp. All rights reserved.
++ * Copyright (c) 2008 Wei Ni <wni@nvidia.com>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
++ */
++
++#include <linux/init.h>
++#include <linux/module.h>
++#include <linux/dma-mapping.h>
++#include <linux/slab.h>
++#include <linux/clk.h>
++#include <linux/delay.h>
++#include <linux/mfd/mxc-hdmi-core.h>
++#include <sound/pcm.h>
++#include <sound/soc.h>
++#include <sound/asoundef.h>
++
++#include <video/mxc_hdmi.h>
++
++#include "imx-hdmi.h"
++
++
++static struct mxc_edid_cfg edid_cfg;
++
++static u32 playback_rates[HDMI_MAX_RATES];
++static u32 playback_sample_size[HDMI_MAX_SAMPLE_SIZE];
++static u32 playback_channels[HDMI_MAX_CHANNEL_CONSTRAINTS];
++
++static struct snd_pcm_hw_constraint_list playback_constraint_rates;
++static struct snd_pcm_hw_constraint_list playback_constraint_bits;
++static struct snd_pcm_hw_constraint_list playback_constraint_channels;
++
++#ifdef DEBUG
++static void dumpregs(struct snd_soc_dai *dai)
++{
++ u32 n, cts;
++
++ cts = (hdmi_readb(HDMI_AUD_CTS3) << 16) |
++ (hdmi_readb(HDMI_AUD_CTS2) << 8) |
++ hdmi_readb(HDMI_AUD_CTS1);
++
++ n = (hdmi_readb(HDMI_AUD_N3) << 16) |
++ (hdmi_readb(HDMI_AUD_N2) << 8) |
++ hdmi_readb(HDMI_AUD_N1);
++
++ dev_dbg(dai->dev, "HDMI_PHY_CONF0 0x%02x\n",
++ hdmi_readb(HDMI_PHY_CONF0));
++ dev_dbg(dai->dev, "HDMI_MC_CLKDIS 0x%02x\n",
++ hdmi_readb(HDMI_MC_CLKDIS));
++ dev_dbg(dai->dev, "HDMI_AUD_N[1-3] 0x%06x (%d)\n",
++ n, n);
++ dev_dbg(dai->dev, "HDMI_AUD_CTS[1-3] 0x%06x (%d)\n",
++ cts, cts);
++ dev_dbg(dai->dev, "HDMI_FC_AUDSCONF 0x%02x\n",
++ hdmi_readb(HDMI_FC_AUDSCONF));
++}
++#else
++static void dumpregs(struct snd_soc_dai *dai) {}
++#endif
++
++enum cea_speaker_placement {
++ FL = (1 << 0), /* Front Left */
++ FC = (1 << 1), /* Front Center */
++ FR = (1 << 2), /* Front Right */
++ FLC = (1 << 3), /* Front Left Center */
++ FRC = (1 << 4), /* Front Right Center */
++ RL = (1 << 5), /* Rear Left */
++ RC = (1 << 6), /* Rear Center */
++ RR = (1 << 7), /* Rear Right */
++ RLC = (1 << 8), /* Rear Left Center */
++ RRC = (1 << 9), /* Rear Right Center */
++ LFE = (1 << 10), /* Low Frequency Effect */
++ FLW = (1 << 11), /* Front Left Wide */
++ FRW = (1 << 12), /* Front Right Wide */
++ FLH = (1 << 13), /* Front Left High */
++ FCH = (1 << 14), /* Front Center High */
++ FRH = (1 << 15), /* Front Right High */
++ TC = (1 << 16), /* Top Center */
++};
++
++/*
++ * EDID SA bits in the CEA Speaker Allocation data block
++ */
++static int edid_speaker_allocation_bits[] = {
++ [0] = FL | FR,
++ [1] = LFE,
++ [2] = FC,
++ [3] = RL | RR,
++ [4] = RC,
++ [5] = FLC | FRC,
++ [6] = RLC | RRC,
++ [7] = FLW | FRW,
++ [8] = FLH | FRH,
++ [9] = TC,
++ [10] = FCH,
++};
++
++struct cea_channel_speaker_allocation {
++ int ca_index;
++ int speakers[8];
++
++ /* Derived values, just for convenience */
++ int channels;
++ int spk_mask;
++};
++
++/*
++ * This is an ordered list!
++ *
++ * The preceding ones have better chances to be selected by
++ * hdmi_channel_allocation().
++ */
++static struct cea_channel_speaker_allocation channel_allocations[] = {
++ /* channel: 7 6 5 4 3 2 1 0 */
++ { .ca_index = 0x00, .speakers = { 0, 0, 0, 0, 0, 0, FR, FL },},
++ /* 2.1 */
++ { .ca_index = 0x01, .speakers = { 0, 0, 0, 0, 0, LFE, FR, FL },},
++ /* Dolby Surround */
++ { .ca_index = 0x08, .speakers = { 0, 0, RR, RL, 0, 0, FR, FL },}, /* Prefer FL/FR/RL/RR over FL/FR/LFE/FC */
++ { .ca_index = 0x02, .speakers = { 0, 0, 0, 0, FC, 0, FR, FL },},
++ { .ca_index = 0x03, .speakers = { 0, 0, 0, 0, FC, LFE, FR, FL },},
++ { .ca_index = 0x04, .speakers = { 0, 0, 0, RC, 0, 0, FR, FL },},
++ { .ca_index = 0x05, .speakers = { 0, 0, 0, RC, 0, LFE, FR, FL },},
++ { .ca_index = 0x06, .speakers = { 0, 0, 0, RC, FC, 0, FR, FL },},
++ { .ca_index = 0x07, .speakers = { 0, 0, 0, RC, FC, LFE, FR, FL },},
++ { .ca_index = 0x09, .speakers = { 0, 0, RR, RL, 0, LFE, FR, FL },},
++ { .ca_index = 0x0a, .speakers = { 0, 0, RR, RL, FC, 0, FR, FL },},
++ /* surround51 */
++ { .ca_index = 0x0b, .speakers = { 0, 0, RR, RL, FC, LFE, FR, FL },},
++ { .ca_index = 0x0c, .speakers = { 0, RC, RR, RL, 0, 0, FR, FL },},
++ { .ca_index = 0x0d, .speakers = { 0, RC, RR, RL, 0, LFE, FR, FL },},
++ { .ca_index = 0x0e, .speakers = { 0, RC, RR, RL, FC, 0, FR, FL },},
++ /* 6.1 */
++ { .ca_index = 0x0f, .speakers = { 0, RC, RR, RL, FC, LFE, FR, FL },},
++ { .ca_index = 0x10, .speakers = { RRC, RLC, RR, RL, 0, 0, FR, FL },},
++ { .ca_index = 0x11, .speakers = { RRC, RLC, RR, RL, 0, LFE, FR, FL },},
++ { .ca_index = 0x12, .speakers = { RRC, RLC, RR, RL, FC, 0, FR, FL },},
++ /* surround71 */
++ { .ca_index = 0x13, .speakers = { RRC, RLC, RR, RL, FC, LFE, FR, FL },},
++ { .ca_index = 0x14, .speakers = { FRC, FLC, 0, 0, 0, 0, FR, FL },},
++ { .ca_index = 0x15, .speakers = { FRC, FLC, 0, 0, 0, LFE, FR, FL },},
++ { .ca_index = 0x16, .speakers = { FRC, FLC, 0, 0, FC, 0, FR, FL },},
++ { .ca_index = 0x17, .speakers = { FRC, FLC, 0, 0, FC, LFE, FR, FL },},
++ { .ca_index = 0x18, .speakers = { FRC, FLC, 0, RC, 0, 0, FR, FL },},
++ { .ca_index = 0x19, .speakers = { FRC, FLC, 0, RC, 0, LFE, FR, FL },},
++ { .ca_index = 0x1a, .speakers = { FRC, FLC, 0, RC, FC, 0, FR, FL },},
++ { .ca_index = 0x1b, .speakers = { FRC, FLC, 0, RC, FC, LFE, FR, FL },},
++ { .ca_index = 0x1c, .speakers = { FRC, FLC, RR, RL, 0, 0, FR, FL },},
++ { .ca_index = 0x1d, .speakers = { FRC, FLC, RR, RL, 0, LFE, FR, FL },},
++ { .ca_index = 0x1e, .speakers = { FRC, FLC, RR, RL, FC, 0, FR, FL },},
++ { .ca_index = 0x1f, .speakers = { FRC, FLC, RR, RL, FC, LFE, FR, FL },},
++ { .ca_index = 0x20, .speakers = { 0, FCH, RR, RL, FC, 0, FR, FL },},
++ { .ca_index = 0x21, .speakers = { 0, FCH, RR, RL, FC, LFE, FR, FL },},
++ { .ca_index = 0x22, .speakers = { TC, 0, RR, RL, FC, 0, FR, FL },},
++ { .ca_index = 0x23, .speakers = { TC, 0, RR, RL, FC, LFE, FR, FL },},
++ { .ca_index = 0x24, .speakers = { FRH, FLH, RR, RL, 0, 0, FR, FL },},
++ { .ca_index = 0x25, .speakers = { FRH, FLH, RR, RL, 0, LFE, FR, FL },},
++ { .ca_index = 0x26, .speakers = { FRW, FLW, RR, RL, 0, 0, FR, FL },},
++ { .ca_index = 0x27, .speakers = { FRW, FLW, RR, RL, 0, LFE, FR, FL },},
++ { .ca_index = 0x28, .speakers = { TC, RC, RR, RL, FC, 0, FR, FL },},
++ { .ca_index = 0x29, .speakers = { TC, RC, RR, RL, FC, LFE, FR, FL },},
++ { .ca_index = 0x2a, .speakers = { FCH, RC, RR, RL, FC, 0, FR, FL },},
++ { .ca_index = 0x2b, .speakers = { FCH, RC, RR, RL, FC, LFE, FR, FL },},
++ { .ca_index = 0x2c, .speakers = { TC, FCH, RR, RL, FC, 0, FR, FL },},
++ { .ca_index = 0x2d, .speakers = { TC, FCH, RR, RL, FC, LFE, FR, FL },},
++ { .ca_index = 0x2e, .speakers = { FRH, FLH, RR, RL, FC, 0, FR, FL },},
++ { .ca_index = 0x2f, .speakers = { FRH, FLH, RR, RL, FC, LFE, FR, FL },},
++ { .ca_index = 0x30, .speakers = { FRW, FLW, RR, RL, FC, 0, FR, FL },},
++ { .ca_index = 0x31, .speakers = { FRW, FLW, RR, RL, FC, LFE, FR, FL },},
++};
++
++/* Compute derived values in channel_allocations[] */
++static void init_channel_allocations(void)
++{
++ struct cea_channel_speaker_allocation *p;
++ int i, j;
++
++ for (i = 0; i < ARRAY_SIZE(channel_allocations); i++) {
++ p = channel_allocations + i;
++ p->channels = 0;
++ p->spk_mask = 0;
++ for (j = 0; j < ARRAY_SIZE(p->speakers); j++)
++ if (p->speakers[j]) {
++ p->channels++;
++ p->spk_mask |= p->speakers[j];
++ }
++ }
++}
++
++/*
++ * The transformation takes two steps:
++ *
++ * speaker_alloc => (edid_speaker_allocation_bits[]) => spk_mask
++ * spk_mask => (channel_allocations[]) => CA
++ *
++ * TODO: it could select the wrong CA from multiple candidates.
++*/
++static int hdmi_channel_allocation(int channels)
++{
++ int spk_mask = 0, ca = 0, i, tmpchn, tmpspk;
++
++ /* CA defaults to 0 for basic stereo audio */
++ if (channels <= 2)
++ return 0;
++
++ /*
++ * Expand EDID's speaker allocation mask
++ *
++ * EDID tells the speaker mask in a compact(paired) form,
++ * expand EDID's notions to match the ones used by Audio InfoFrame.
++ */
++ for (i = 0; i < ARRAY_SIZE(edid_speaker_allocation_bits); i++) {
++ if (edid_cfg.speaker_alloc & (1 << i))
++ spk_mask |= edid_speaker_allocation_bits[i];
++ }
++
++ /* Search for the first working match in the CA table */
++ for (i = 0; i < ARRAY_SIZE(channel_allocations); i++) {
++ tmpchn = channel_allocations[i].channels;
++ tmpspk = channel_allocations[i].spk_mask;
++
++ if (channels == tmpchn && (spk_mask & tmpspk) == tmpspk) {
++ ca = channel_allocations[i].ca_index;
++ break;
++ }
++ }
++
++ return ca;
++}
++
++static void hdmi_set_audio_infoframe(unsigned int channels)
++{
++ u8 audiconf0, audiconf2;
++
++ /*
++ * From CEA-861-D spec:
++ * HDMI requires the CT, SS and SF fields to be set to 0 ("Refer
++ * to Stream Header") as these items are carried in the audio stream.
++ *
++ * So we only set the CC and CA fields.
++ */
++ audiconf0 = ((channels - 1) << HDMI_FC_AUDICONF0_CC_OFFSET) &
++ HDMI_FC_AUDICONF0_CC_MASK;
++
++ audiconf2 = hdmi_channel_allocation(channels);
++
++ hdmi_writeb(audiconf0, HDMI_FC_AUDICONF0);
++ hdmi_writeb(0, HDMI_FC_AUDICONF1);
++ hdmi_writeb(audiconf2, HDMI_FC_AUDICONF2);
++ hdmi_writeb(0, HDMI_FC_AUDICONF3);
++}
++
++static int cea_audio_rates[HDMI_MAX_RATES] = {
++ 32000, 44100, 48000, 88200, 96000, 176400, 192000,
++};
++
++static void fsl_hdmi_get_playback_rates(void)
++{
++ int i, count = 0;
++ u8 rates;
++
++ /* Always assume basic audio support */
++ rates = edid_cfg.sample_rates | 0x7;
++
++ for (i = 0 ; i < HDMI_MAX_RATES ; i++)
++ if ((rates & (1 << i)) != 0)
++ playback_rates[count++] = cea_audio_rates[i];
++
++ playback_constraint_rates.list = playback_rates;
++ playback_constraint_rates.count = count;
++
++ for (i = 0 ; i < playback_constraint_rates.count ; i++)
++ pr_debug("%s: constraint = %d Hz\n", __func__, playback_rates[i]);
++}
++
++static void fsl_hdmi_get_playback_sample_size(void)
++{
++ int i = 0;
++
++ /* Always assume basic audio support */
++ playback_sample_size[i++] = 16;
++
++ if (edid_cfg.sample_sizes & 0x4)
++ playback_sample_size[i++] = 32;
++
++ playback_constraint_bits.list = playback_sample_size;
++ playback_constraint_bits.count = i;
++
++ for (i = 0 ; i < playback_constraint_bits.count ; i++)
++ pr_debug("%s: constraint = %d bits\n", __func__, playback_sample_size[i]);
++}
++
++static void fsl_hdmi_get_playback_channels(void)
++{
++ int channels = 2, i = 0;
++
++ /* Always assume basic audio support */
++ playback_channels[i++] = channels;
++ channels += 2;
++
++ while ((i < HDMI_MAX_CHANNEL_CONSTRAINTS) &&
++ (channels <= edid_cfg.max_channels)) {
++ playback_channels[i++] = channels;
++ channels += 2;
++ }
++
++ playback_constraint_channels.list = playback_channels;
++ playback_constraint_channels.count = i;
++
++ for (i = 0 ; i < playback_constraint_channels.count ; i++)
++ pr_debug("%s: constraint = %d channels\n", __func__, playback_channels[i]);
++}
++
++static int fsl_hdmi_update_constraints(struct snd_pcm_substream *substream)
++{
++ struct snd_pcm_runtime *runtime = substream->runtime;
++ int edid_status, ret;
++
++ edid_status = hdmi_get_edid_cfg(&edid_cfg);
++
++ if (edid_status && !edid_cfg.hdmi_cap)
++ return -1;
++
++ fsl_hdmi_get_playback_rates();
++ ret = snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
++ &playback_constraint_rates);
++ if (ret)
++ return ret;
++
++ fsl_hdmi_get_playback_sample_size();
++ ret = snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_SAMPLE_BITS,
++ &playback_constraint_bits);
++ if (ret)
++ return ret;
++
++ fsl_hdmi_get_playback_channels();
++ ret = snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
++ &playback_constraint_channels);
++ if (ret)
++ return ret;
++
++ ret = snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
++ if (ret)
++ return ret;
++
++ return 0;
++}
++
++static int fsl_hdmi_soc_startup(struct snd_pcm_substream *substream,
++ struct snd_soc_dai *dai)
++{
++ struct imx_hdmi *hdmi_data = snd_soc_dai_get_drvdata(dai);
++ int ret;
++
++ ret = fsl_hdmi_update_constraints(substream);
++ if (ret < 0)
++ return ret;
++
++ clk_prepare_enable(hdmi_data->isfr_clk);
++ clk_prepare_enable(hdmi_data->iahb_clk);
++
++ dev_dbg(dai->dev, "%s hdmi clks: isfr:%d iahb:%d\n", __func__,
++ (int)clk_get_rate(hdmi_data->isfr_clk),
++ (int)clk_get_rate(hdmi_data->iahb_clk));
++
++ /* Indicates the subpacket represents a flatline sample */
++ hdmi_audio_writeb(FC_AUDSCONF, AUD_PACKET_SAMPFIT, 0x0);
++
++ return 0;
++}
++
++static void fsl_hdmi_soc_shutdown(struct snd_pcm_substream *substream,
++ struct snd_soc_dai *dai)
++{
++ struct imx_hdmi *hdmi_data = snd_soc_dai_get_drvdata(dai);
++
++ clk_disable_unprepare(hdmi_data->iahb_clk);
++ clk_disable_unprepare(hdmi_data->isfr_clk);
++}
++
++static int fsl_hdmi_soc_prepare(struct snd_pcm_substream *substream,
++ struct snd_soc_dai *dai)
++{
++ struct snd_pcm_runtime *runtime = substream->runtime;
++
++ hdmi_set_audio_infoframe(runtime->channels);
++ hdmi_audio_writeb(FC_AUDSCONF, AUD_PACKET_LAYOUT,
++ (runtime->channels > 2) ? 0x1 : 0x0);
++ hdmi_set_sample_rate(runtime->rate);
++ dumpregs(dai);
++
++ return 0;
++}
++
++static struct snd_soc_dai_ops fsl_hdmi_soc_dai_ops = {
++ .startup = fsl_hdmi_soc_startup,
++ .shutdown = fsl_hdmi_soc_shutdown,
++ .prepare = fsl_hdmi_soc_prepare,
++};
++
++/* IEC60958 status functions */
++static int fsl_hdmi_iec_info(struct snd_kcontrol *kcontrol,
++ struct snd_ctl_elem_info *uinfo)
++{
++ uinfo->type = SNDRV_CTL_ELEM_TYPE_IEC958;
++ uinfo->count = 1;
++
++ return 0;
++}
++
++
++static int fsl_hdmi_iec_get(struct snd_kcontrol *kcontrol,
++ struct snd_ctl_elem_value *uvalue)
++{
++ int i;
++
++ for (i = 0 ; i < 6 ; i++)
++ uvalue->value.iec958.status[i] = iec_header.status[i];
++
++ return 0;
++}
++
++static int fsl_hdmi_iec_put(struct snd_kcontrol *kcontrol,
++ struct snd_ctl_elem_value *uvalue)
++{
++ int i;
++
++ /* Do not allow professional mode */
++ if (uvalue->value.iec958.status[0] & IEC958_AES0_PROFESSIONAL)
++ return -EPERM;
++
++ for (i = 0 ; i < 6 ; i++) {
++ iec_header.status[i] = uvalue->value.iec958.status[i];
++ pr_debug("%s status[%d]=0x%02x\n", __func__, i, iec_header.status[i]);
++ }
++
++ return 0;
++}
++
++static struct snd_kcontrol_new fsl_hdmi_ctrls[] = {
++ /* Status cchanel controller */
++ {
++ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
++ .name = SNDRV_CTL_NAME_IEC958("", PLAYBACK, DEFAULT),
++ .access = SNDRV_CTL_ELEM_ACCESS_READ |
++ SNDRV_CTL_ELEM_ACCESS_WRITE |
++ SNDRV_CTL_ELEM_ACCESS_VOLATILE,
++ .info = fsl_hdmi_iec_info,
++ .get = fsl_hdmi_iec_get,
++ .put = fsl_hdmi_iec_put,
++ },
++};
++
++static int fsl_hdmi_soc_dai_probe(struct snd_soc_dai *dai)
++{
++ int ret;
++
++ init_channel_allocations();
++
++ ret = snd_soc_add_dai_controls(dai, fsl_hdmi_ctrls,
++ ARRAY_SIZE(fsl_hdmi_ctrls));
++ if (ret)
++ dev_warn(dai->dev, "failed to add dai controls\n");
++
++ return 0;
++}
++
++static struct snd_soc_dai_driver fsl_hdmi_dai = {
++ .probe = &fsl_hdmi_soc_dai_probe,
++ .playback = {
++ .channels_min = 2,
++ .channels_max = 8,
++ .rates = MXC_HDMI_RATES_PLAYBACK,
++ .formats = MXC_HDMI_FORMATS_PLAYBACK,
++ },
++ .ops = &fsl_hdmi_soc_dai_ops,
++};
++
++static const struct snd_soc_component_driver fsl_hdmi_component = {
++ .name = "fsl-hdmi",
++};
++
++static int fsl_hdmi_dai_probe(struct platform_device *pdev)
++{
++ struct device_node *np = pdev->dev.of_node;
++ struct imx_hdmi *hdmi_data;
++ int ret = 0;
++
++ if (!np)
++ return -ENODEV;
++
++ if (!hdmi_get_registered()) {
++ dev_err(&pdev->dev, "failed to probe. Load HDMI-video first.\n");
++ return -ENOMEM;
++ }
++
++ hdmi_data = devm_kzalloc(&pdev->dev, sizeof(*hdmi_data), GFP_KERNEL);
++ if (!hdmi_data) {
++ dev_err(&pdev->dev, "failed to alloc hdmi_data\n");
++ return -ENOMEM;
++ }
++
++ hdmi_data->pdev = pdev;
++
++ memcpy(&hdmi_data->cpu_dai_drv, &fsl_hdmi_dai, sizeof(fsl_hdmi_dai));
++ hdmi_data->cpu_dai_drv.name = np->name;
++
++ hdmi_data->isfr_clk = devm_clk_get(&pdev->dev, "hdmi_isfr");
++ if (IS_ERR(hdmi_data->isfr_clk)) {
++ ret = PTR_ERR(hdmi_data->isfr_clk);
++ dev_err(&pdev->dev, "failed to get HDMI isfr clk: %d\n", ret);
++ return -EINVAL;
++ }
++
++ hdmi_data->iahb_clk = devm_clk_get(&pdev->dev, "hdmi_iahb");
++ if (IS_ERR(hdmi_data->iahb_clk)) {
++ ret = PTR_ERR(hdmi_data->iahb_clk);
++ dev_err(&pdev->dev, "failed to get HDMI ahb clk: %d\n", ret);
++ return -EINVAL;
++ }
++
++ dev_set_drvdata(&pdev->dev, hdmi_data);
++ ret = snd_soc_register_component(&pdev->dev, &fsl_hdmi_component,
++ &hdmi_data->cpu_dai_drv, 1);
++ if (ret) {
++ dev_err(&pdev->dev, "register DAI failed\n");
++ return ret;
++ }
++
++ hdmi_data->codec_dev = platform_device_register_simple(
++ "hdmi-audio-codec", -1, NULL, 0);
++ if (IS_ERR(hdmi_data->codec_dev)) {
++ dev_err(&pdev->dev, "failed to register HDMI audio codec\n");
++ ret = PTR_ERR(hdmi_data->codec_dev);
++ goto fail;
++ }
++
++ hdmi_data->dma_dev = platform_device_alloc("imx-hdmi-audio", -1);
++ if (IS_ERR(hdmi_data->dma_dev)) {
++ ret = PTR_ERR(hdmi_data->dma_dev);
++ goto fail_dma;
++ }
++
++ platform_set_drvdata(hdmi_data->dma_dev, hdmi_data);
++
++ ret = platform_device_add(hdmi_data->dma_dev);
++ if (ret) {
++ platform_device_put(hdmi_data->dma_dev);
++ goto fail_dma;
++ }
++
++ return 0;
++
++fail_dma:
++ platform_device_unregister(hdmi_data->codec_dev);
++fail:
++ snd_soc_unregister_component(&pdev->dev);
++
++ return ret;
++}
++
++static int fsl_hdmi_dai_remove(struct platform_device *pdev)
++{
++ struct imx_hdmi *hdmi_data = platform_get_drvdata(pdev);
++
++ platform_device_unregister(hdmi_data->dma_dev);
++ platform_device_unregister(hdmi_data->codec_dev);
++ snd_soc_unregister_component(&pdev->dev);
++
++ return 0;
++}
++
++static const struct of_device_id fsl_hdmi_dai_dt_ids[] = {
++ { .compatible = "fsl,imx6dl-hdmi-audio", },
++ { .compatible = "fsl,imx6q-hdmi-audio", },
++ { /* sentinel */ }
++};
++MODULE_DEVICE_TABLE(of, fsl_hdmi_dai_dt_ids);
++
++static struct platform_driver fsl_hdmi_driver = {
++ .probe = fsl_hdmi_dai_probe,
++ .remove = fsl_hdmi_dai_remove,
++ .driver = {
++ .name = "fsl-hdmi-dai",
++ .owner = THIS_MODULE,
++ .of_match_table = fsl_hdmi_dai_dt_ids,
++ },
++};
++module_platform_driver(fsl_hdmi_driver);
++
++MODULE_AUTHOR("Freescale Semiconductor, Inc.");
++MODULE_DESCRIPTION("IMX HDMI TX DAI");
++MODULE_LICENSE("GPL");
++MODULE_ALIAS("platform:fsl-hdmi-dai");
+diff -Nur linux-3.14.36/sound/soc/fsl/fsl_spdif.c linux-openelec/sound/soc/fsl/fsl_spdif.c
+--- linux-3.14.36/sound/soc/fsl/fsl_spdif.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/sound/soc/fsl/fsl_spdif.c 2015-05-06 12:05:43.000000000 -0500
+@@ -21,6 +21,8 @@
+ #include <linux/of_address.h>
+ #include <linux/of_device.h>
+ #include <linux/of_irq.h>
++#include <linux/pm_runtime.h>
++#include <linux/busfreq-imx6.h>
+
+ #include <sound/asoundef.h>
+ #include <sound/soc.h>
+@@ -53,7 +55,7 @@
+ spinlock_t ctl_lock;
+
+ /* IEC958 channel tx status bit */
+- unsigned char ch_status[4];
++ unsigned char ch_status[6];
+
+ /* User bits */
+ unsigned char subcode[2 * SPDIF_UBITS_SIZE];
+@@ -80,6 +82,7 @@
+ u8 rxclk_src;
+ struct clk *txclk[SPDIF_TXRATE_MAX];
+ struct clk *rxclk;
++ struct clk *sysclk;
+ struct snd_dmaengine_dai_dma_data dma_params_tx;
+ struct snd_dmaengine_dai_dma_data dma_params_rx;
+
+@@ -295,11 +298,11 @@
+ return -EBUSY;
+ }
+
+-static void spdif_set_cstatus(struct spdif_mixer_control *ctrl,
+- u8 mask, u8 cstatus)
++static inline void spdif_set_cstatus(struct spdif_mixer_control *ctrl,
++ u8 byteno, u8 mask, u8 cstatus)
+ {
+- ctrl->ch_status[3] &= ~mask;
+- ctrl->ch_status[3] |= cstatus & mask;
++ ctrl->ch_status[byteno] &= ~mask;
++ ctrl->ch_status[byteno] |= cstatus & mask;
+ }
+
+ static void spdif_write_channel_status(struct fsl_spdif_priv *spdif_priv)
+@@ -316,10 +319,16 @@
+
+ dev_dbg(&pdev->dev, "STCSCH: 0x%06x\n", ch_status);
+
+- ch_status = bitrev8(ctrl->ch_status[3]) << 16;
++ ch_status = bitrev8(ctrl->ch_status[3]) << 16 |
++ (bitrev8(ctrl->ch_status[4]) << 8) |
++ bitrev8(ctrl->ch_status[5]);
+ regmap_write(regmap, REG_SPDIF_STCSCL, ch_status);
+
+ dev_dbg(&pdev->dev, "STCSCL: 0x%06x\n", ch_status);
++
++ /* Set outgoing validity (0: pcm, 1: non-audio) */
++ regmap_update_bits(regmap, REG_SPDIF_SCR, SCR_VAL_MASK,
++ (ctrl->ch_status[0] & IEC958_AES0_NONAUDIO) ? 0 : SCR_VAL_CLEAR);
+ }
+
+ /* Set SPDIF PhaseConfig register for rx clock */
+@@ -347,23 +356,45 @@
+ struct spdif_mixer_control *ctrl = &spdif_priv->fsl_spdif_control;
+ struct regmap *regmap = spdif_priv->regmap;
+ struct platform_device *pdev = spdif_priv->pdev;
+- unsigned long csfs = 0;
+ u32 stc, mask, rate;
+- u8 clk, div;
++ u8 clk, div, csfs, csofs;
+ int ret;
+
+ switch (sample_rate) {
+ case 32000:
+ rate = SPDIF_TXRATE_32000;
+ csfs = IEC958_AES3_CON_FS_32000;
++ csofs = IEC958_AES4_CON_ORIGFS_32000;
+ break;
+ case 44100:
+ rate = SPDIF_TXRATE_44100;
+ csfs = IEC958_AES3_CON_FS_44100;
++ csofs = IEC958_AES4_CON_ORIGFS_44100;
+ break;
+ case 48000:
+ rate = SPDIF_TXRATE_48000;
+ csfs = IEC958_AES3_CON_FS_48000;
++ csofs = IEC958_AES4_CON_ORIGFS_48000;
++ break;
++ case 88200:
++ rate = SPDIF_TXRATE_88200;
++ csfs = IEC958_AES3_CON_FS_88200;
++ csofs = IEC958_AES4_CON_ORIGFS_88200;
++ break;
++ case 96000:
++ rate = SPDIF_TXRATE_96000;
++ csfs = IEC958_AES3_CON_FS_96000;
++ csofs = IEC958_AES4_CON_ORIGFS_96000;
++ break;
++ case 176400:
++ rate = SPDIF_TXRATE_176400;
++ csfs = IEC958_AES3_CON_FS_176400;
++ csofs = IEC958_AES4_CON_ORIGFS_176400;
++ break;
++ case 192000:
++ rate = SPDIF_TXRATE_192000;
++ csfs = IEC958_AES3_CON_FS_192000;
++ csofs = IEC958_AES4_CON_ORIGFS_192000;
+ break;
+ default:
+ dev_err(&pdev->dev, "unsupported sample rate %d\n", sample_rate);
+@@ -399,7 +430,8 @@
+ clk_get_rate(spdif_priv->txclk[rate]));
+
+ /* set fs field in consumer channel status */
+- spdif_set_cstatus(ctrl, IEC958_AES3_CON_FS, csfs);
++ spdif_set_cstatus(ctrl, 3, IEC958_AES3_CON_FS, csfs);
++ spdif_set_cstatus(ctrl, 4, IEC958_AES4_CON_ORIGFS, csofs);
+
+ /* select clock source and divisor */
+ stc = STC_TXCLK_ALL_EN | STC_TXCLK_SRC_SET(clk) | STC_TXCLK_DIV(div);
+@@ -421,6 +453,8 @@
+ u32 scr, mask, i;
+ int ret;
+
++ pm_runtime_get_sync(cpu_dai->dev);
++
+ /* Reset module and interrupts only for first initialization */
+ if (!cpu_dai->active) {
+ ret = spdif_softreset(spdif_priv);
+@@ -485,6 +519,8 @@
+ regmap_update_bits(regmap, REG_SPDIF_SCR,
+ SCR_LOW_POWER, SCR_LOW_POWER);
+ }
++
++ pm_runtime_put_sync(cpu_dai->dev);
+ }
+
+ static int fsl_spdif_hw_params(struct snd_pcm_substream *substream,
+@@ -505,8 +541,8 @@
+ __func__, sample_rate);
+ return ret;
+ }
+- spdif_set_cstatus(ctrl, IEC958_AES3_CON_CLOCK,
+- IEC958_AES3_CON_CLOCK_1000PPM);
++ spdif_set_cstatus(ctrl, 3, IEC958_AES3_CON_CLOCK,
++ IEC958_AES3_CON_CLOCK_1000PPM);
+ spdif_write_channel_status(spdif_priv);
+ } else {
+ /* Setup rx clock source */
+@@ -576,14 +612,13 @@
+ static int fsl_spdif_pb_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *uvalue)
+ {
++ int i;
+ struct snd_soc_dai *cpu_dai = snd_kcontrol_chip(kcontrol);
+ struct fsl_spdif_priv *spdif_priv = snd_soc_dai_get_drvdata(cpu_dai);
+ struct spdif_mixer_control *ctrl = &spdif_priv->fsl_spdif_control;
+
+- uvalue->value.iec958.status[0] = ctrl->ch_status[0];
+- uvalue->value.iec958.status[1] = ctrl->ch_status[1];
+- uvalue->value.iec958.status[2] = ctrl->ch_status[2];
+- uvalue->value.iec958.status[3] = ctrl->ch_status[3];
++ for (i = 0; i < ARRAY_SIZE(ctrl->ch_status); i++)
++ uvalue->value.iec958.status[i] = ctrl->ch_status[i];
+
+ return 0;
+ }
+@@ -591,14 +626,13 @@
+ static int fsl_spdif_pb_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *uvalue)
+ {
++ int i;
+ struct snd_soc_dai *cpu_dai = snd_kcontrol_chip(kcontrol);
+ struct fsl_spdif_priv *spdif_priv = snd_soc_dai_get_drvdata(cpu_dai);
+ struct spdif_mixer_control *ctrl = &spdif_priv->fsl_spdif_control;
+
+- ctrl->ch_status[0] = uvalue->value.iec958.status[0];
+- ctrl->ch_status[1] = uvalue->value.iec958.status[1];
+- ctrl->ch_status[2] = uvalue->value.iec958.status[2];
+- ctrl->ch_status[3] = uvalue->value.iec958.status[3];
++ for (i = 0; i < ARRAY_SIZE(ctrl->ch_status); i++)
++ ctrl->ch_status[i] = uvalue->value.iec958.status[i];
+
+ spdif_write_channel_status(spdif_priv);
+
+@@ -754,7 +788,7 @@
+ clksrc = (phaseconf >> SRPC_CLKSRC_SEL_OFFSET) & 0xf;
+ if (srpc_dpll_locked[clksrc] && (phaseconf & SRPC_DPLL_LOCKED)) {
+ /* Get bus clock from system */
+- busclk_freq = clk_get_rate(spdif_priv->rxclk);
++ busclk_freq = clk_get_rate(spdif_priv->sysclk);
+ }
+
+ /* FreqMeas_CLK = (BUS_CLK * FreqMeas) / 2 ^ 10 / GAINSEL / 128 */
+@@ -999,7 +1033,7 @@
+ struct clk *clk, u64 savesub,
+ enum spdif_txrate index)
+ {
+- const u32 rate[] = { 32000, 44100, 48000 };
++ const u32 rate[] = { 32000, 44100, 48000, 88200, 96000, 176400, 192000 };
+ u64 rate_ideal, rate_actual, sub;
+ u32 div, arate;
+
+@@ -1017,7 +1051,7 @@
+ break;
+ } else if (arate / rate[index] == 1) {
+ /* A little bigger than expect */
+- sub = (arate - rate[index]) * 100000;
++ sub = (u64)(arate - rate[index]) * 100000;
+ do_div(sub, rate[index]);
+ if (sub < savesub) {
+ savesub = sub;
+@@ -1025,7 +1059,7 @@
+ }
+ } else if (rate[index] / arate == 1) {
+ /* A little smaller than expect */
+- sub = (rate[index] - arate) * 100000;
++ sub = (u64)(rate[index] - arate) * 100000;
+ do_div(sub, rate[index]);
+ if (sub < savesub) {
+ savesub = sub;
+@@ -1040,7 +1074,7 @@
+ static int fsl_spdif_probe_txclk(struct fsl_spdif_priv *spdif_priv,
+ enum spdif_txrate index)
+ {
+- const u32 rate[] = { 32000, 44100, 48000 };
++ const u32 rate[] = { 32000, 44100, 48000, 88200, 96000, 176400, 192000 };
+ struct platform_device *pdev = spdif_priv->pdev;
+ struct device *dev = &pdev->dev;
+ u64 savesub = 100000, ret;
+@@ -1058,6 +1092,13 @@
+ if (!clk_get_rate(clk))
+ continue;
+
++ /* TODO: We here ignore sysclk source due to imperfect clock
++ * selecting mechanism: sysclk is a bit different which we can
++ * not change its clock rate but use another inner divider to
++ * derive a proper clock rate. */
++ if (i == SPDIF_CLK_SRC_SYSCLK)
++ continue;
++
+ ret = fsl_spdif_txclk_caldiv(spdif_priv, clk, savesub, index);
+ if (savesub == ret)
+ continue;
+@@ -1131,6 +1172,13 @@
+ return ret;
+ }
+
++ /* Get system clock for rx clock rate calculation */
++ spdif_priv->sysclk = devm_clk_get(&pdev->dev, "rxtx5");
++ if (IS_ERR(spdif_priv->sysclk)) {
++ dev_err(&pdev->dev, "no system clock(rxtx5) in devicetree\n");
++ return PTR_ERR(spdif_priv->sysclk);
++ }
++
+ /* Select clock source for rx/tx clock */
+ spdif_priv->rxclk = devm_clk_get(&pdev->dev, "rxtx1");
+ if (IS_ERR(spdif_priv->rxclk)) {
+@@ -1150,12 +1198,13 @@
+ spin_lock_init(&ctrl->ctl_lock);
+
+ /* Init tx channel status default value */
+- ctrl->ch_status[0] =
+- IEC958_AES0_CON_NOT_COPYRIGHT | IEC958_AES0_CON_EMPHASIS_5015;
++ ctrl->ch_status[0] = IEC958_AES0_CON_NOT_COPYRIGHT;
+ ctrl->ch_status[1] = IEC958_AES1_CON_DIGDIGCONV_ID;
+ ctrl->ch_status[2] = 0x00;
+ ctrl->ch_status[3] =
+ IEC958_AES3_CON_FS_44100 | IEC958_AES3_CON_CLOCK_1000PPM;
++ ctrl->ch_status[4] = IEC958_AES4_CON_ORIGFS_44100;
++ ctrl->ch_status[5] = IEC958_AES5_CON_CGMSA_COPYFREELY;
+
+ spdif_priv->dpll_locked = false;
+
+@@ -1164,6 +1213,8 @@
+ spdif_priv->dma_params_tx.addr = res->start + REG_SPDIF_STL;
+ spdif_priv->dma_params_rx.addr = res->start + REG_SPDIF_SRL;
+
++ pm_runtime_enable(&pdev->dev);
++
+ /* Register with ASoC */
+ dev_set_drvdata(&pdev->dev, spdif_priv);
+
+@@ -1174,13 +1225,34 @@
+ return ret;
+ }
+
+- ret = imx_pcm_dma_init(pdev);
++ ret = imx_pcm_dma_init(pdev, SND_DMAENGINE_PCM_FLAG_COMPAT,
++ IMX_SPDIF_DMABUF_SIZE);
+ if (ret)
+ dev_err(&pdev->dev, "imx_pcm_dma_init failed: %d\n", ret);
+
+ return ret;
+ }
+
++#ifdef CONFIG_PM_RUNTIME
++static int fsl_spdif_runtime_resume(struct device *dev)
++{
++ request_bus_freq(BUS_FREQ_HIGH);
++ return 0;
++}
++
++static int fsl_spdif_runtime_suspend(struct device *dev)
++{
++ release_bus_freq(BUS_FREQ_HIGH);
++ return 0;
++}
++#endif
++
++static const struct dev_pm_ops fsl_spdif_pm = {
++ SET_RUNTIME_PM_OPS(fsl_spdif_runtime_suspend,
++ fsl_spdif_runtime_resume,
++ NULL)
++};
++
+ static const struct of_device_id fsl_spdif_dt_ids[] = {
+ { .compatible = "fsl,imx35-spdif", },
+ {}
+@@ -1192,6 +1264,7 @@
+ .name = "fsl-spdif-dai",
+ .owner = THIS_MODULE,
+ .of_match_table = fsl_spdif_dt_ids,
++ .pm = &fsl_spdif_pm,
+ },
+ .probe = fsl_spdif_probe,
+ };
+diff -Nur linux-3.14.36/sound/soc/fsl/fsl_spdif.h linux-openelec/sound/soc/fsl/fsl_spdif.h
+--- linux-3.14.36/sound/soc/fsl/fsl_spdif.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/sound/soc/fsl/fsl_spdif.h 2015-05-06 12:05:43.000000000 -0500
+@@ -157,13 +157,19 @@
+ #define STC_TXCLK_DIV(x) ((((x) - 1) << STC_TXCLK_DIV_OFFSET) & STC_TXCLK_DIV_MASK)
+ #define STC_TXCLK_SRC_MAX 8
+
++#define SPDIF_CLK_SRC_SYSCLK 5
++
+ /* SPDIF tx rate */
+ enum spdif_txrate {
+ SPDIF_TXRATE_32000 = 0,
+ SPDIF_TXRATE_44100,
+ SPDIF_TXRATE_48000,
++ SPDIF_TXRATE_88200,
++ SPDIF_TXRATE_96000,
++ SPDIF_TXRATE_176400,
++ SPDIF_TXRATE_192000,
+ };
+-#define SPDIF_TXRATE_MAX (SPDIF_TXRATE_48000 + 1)
++#define SPDIF_TXRATE_MAX (SPDIF_TXRATE_192000 + 1)
+
+
+ #define SPDIF_CSTATUS_BYTE 6
+@@ -173,7 +179,11 @@
+
+ #define FSL_SPDIF_RATES_PLAYBACK (SNDRV_PCM_RATE_32000 | \
+ SNDRV_PCM_RATE_44100 | \
+- SNDRV_PCM_RATE_48000)
++ SNDRV_PCM_RATE_48000 | \
++ SNDRV_PCM_RATE_88200 | \
++ SNDRV_PCM_RATE_96000 | \
++ SNDRV_PCM_RATE_176400| \
++ SNDRV_PCM_RATE_192000)
+
+ #define FSL_SPDIF_RATES_CAPTURE (SNDRV_PCM_RATE_16000 | \
+ SNDRV_PCM_RATE_32000 | \
+diff -Nur linux-3.14.36/sound/soc/fsl/fsl_ssi.c linux-openelec/sound/soc/fsl/fsl_ssi.c
+--- linux-3.14.36/sound/soc/fsl/fsl_ssi.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/sound/soc/fsl/fsl_ssi.c 2015-07-24 18:03:30.316842002 -0500
+@@ -3,7 +3,7 @@
+ *
+ * Author: Timur Tabi <timur@freescale.com>
+ *
+- * Copyright 2007-2010 Freescale Semiconductor, Inc.
++ * Copyright (C) 2007-2013 Freescale Semiconductor, Inc.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+@@ -30,6 +30,7 @@
+ * around this by not polling these bits but only wait a fixed delay.
+ */
+
++#include <linux/busfreq-imx6.h>
+ #include <linux/init.h>
+ #include <linux/io.h>
+ #include <linux/module.h>
+@@ -43,6 +44,7 @@
+ #include <linux/of_address.h>
+ #include <linux/of_irq.h>
+ #include <linux/of_platform.h>
++#include <linux/pm_runtime.h>
+
+ #include <sound/core.h>
+ #include <sound/pcm.h>
+@@ -73,6 +75,24 @@
+ }
+ #endif
+
++#ifdef DEBUG
++#define NUM_OF_SSI_REG (sizeof(struct ccsr_ssi) / sizeof(__be32))
++
++void dump_reg(struct ccsr_ssi __iomem *ssi)
++{
++ u32 val, i;
++
++ for (i = 0; i < NUM_OF_SSI_REG; i++) {
++ if (&ssi->stx0 + i == NULL)
++ continue;
++ val = read_ssi(&ssi->stx0 + i);
++ pr_debug("REG %x = %x\n", (u32)(&ssi->stx0 + i) & 0xff, val);
++ }
++}
++#else
++void dump_reg(struct ccsr_ssi __iomem *ssi) {}
++#endif
++
+ /**
+ * FSLSSI_I2S_RATES: sample rates supported by the I2S
+ *
+@@ -171,8 +191,6 @@
+ struct clk *clk;
+ struct snd_dmaengine_dai_dma_data dma_params_tx;
+ struct snd_dmaengine_dai_dma_data dma_params_rx;
+- struct imx_dma_data filter_data_tx;
+- struct imx_dma_data filter_data_rx;
+ struct imx_pcm_fiq_params fiq_params;
+ /* Register values for rx/tx configuration */
+ struct fsl_ssi_rxtx_reg_val rxtx_reg_val;
+@@ -206,6 +224,26 @@
+ char name[1];
+ };
+
++#ifdef CONFIG_PM_RUNTIME
++static int fsl_ssi_runtime_resume(struct device *dev)
++{
++ request_bus_freq(BUS_FREQ_AUDIO);
++ return 0;
++}
++
++static int fsl_ssi_runtime_suspend(struct device *dev)
++{
++ release_bus_freq(BUS_FREQ_AUDIO);
++ return 0;
++}
++#endif
++
++static const struct dev_pm_ops fsl_ssi_pm = {
++ SET_RUNTIME_PM_OPS(fsl_ssi_runtime_suspend,
++ fsl_ssi_runtime_resume,
++ NULL)
++};
++
+ static const struct of_device_id fsl_ssi_ids[] = {
+ { .compatible = "fsl,mpc8610-ssi", .data = (void *) FSL_SSI_MCP8610},
+ { .compatible = "fsl,imx51-ssi", .data = (void *) FSL_SSI_MX51},
+@@ -489,6 +527,23 @@
+ }
+ }
+
++static void fsl_ssi_clk_ctrl(struct fsl_ssi_private *ssi_private, bool enable)
++{
++ if (enable) {
++ if (ssi_private->ssi_on_imx) {
++ if (!IS_ERR(ssi_private->baudclk))
++ clk_enable(ssi_private->baudclk);
++ clk_enable(ssi_private->clk);
++ }
++ } else {
++ if (ssi_private->ssi_on_imx) {
++ if (!IS_ERR(ssi_private->baudclk))
++ clk_disable(ssi_private->baudclk);
++ clk_disable(ssi_private->clk);
++ }
++ }
++}
++
+ /*
+ * Enable/Disable a ssi configuration. You have to pass either
+ * ssi_private->rxtx_reg_val.rx or tx as vals parameter.
+@@ -509,6 +564,8 @@
+ else
+ avals = &ssi_private->rxtx_reg_val.rx;
+
++ fsl_ssi_clk_ctrl(ssi_private, enable);
++
+ /* If vals should be disabled, start with disabling the unit */
+ if (!enable) {
+ u32 scr = vals->scr & (vals->scr ^ avals->scr);
+@@ -748,6 +805,8 @@
+ snd_soc_dai_get_drvdata(rtd->cpu_dai);
+ unsigned long flags;
+
++ pm_runtime_get_sync(dai->dev);
++
+ /* First, we only do fsl_ssi_setup() when SSI is going to be active.
+ * Second, fsl_ssi_setup was already called by ac97_init earlier if
+ * the driver is in ac97 mode.
+@@ -877,6 +936,9 @@
+ strcr |= CCSR_SSI_STCR_TFSL | CCSR_SSI_STCR_TSCKP |
+ CCSR_SSI_STCR_TXBIT0;
+ break;
++ case SND_SOC_DAIFMT_AC97:
++ scr |= CCSR_SSI_SCR_I2S_MODE_NORMAL;
++ break;
+ default:
+ return -EINVAL;
+ }
+@@ -912,6 +974,11 @@
+ case SND_SOC_DAIFMT_CBM_CFM:
+ scr &= ~CCSR_SSI_SCR_SYS_CLK_EN;
+ break;
++ case SND_SOC_DAIFMT_CBM_CFS:
++ strcr &= ~CCSR_SSI_STCR_TXDIR; /* transmit clock is external */
++ strcr |= CCSR_SSI_STCR_TFDIR; /* frame sync generated internally */
++ scr &= ~CCSR_SSI_SCR_SYS_CLK_EN;
++ break;
+ default:
+ return -EINVAL;
+ }
+@@ -929,6 +996,9 @@
+ write_ssi(srcr, &ssi->srcr);
+ write_ssi(scr, &ssi->scr);
+
++ if (fmt & SND_SOC_DAIFMT_AC97)
++ fsl_ssi_setup_ac97(ssi_private);
++
+ return 0;
+ }
+
+@@ -1083,14 +1153,17 @@
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
++ case SNDRV_PCM_TRIGGER_RESUME:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ fsl_ssi_tx_config(ssi_private, true);
+ else
+ fsl_ssi_rx_config(ssi_private, true);
++ dump_reg(ssi);
+ break;
+
+ case SNDRV_PCM_TRIGGER_STOP:
++ case SNDRV_PCM_TRIGGER_SUSPEND:
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ fsl_ssi_tx_config(ssi_private, false);
+@@ -1119,6 +1192,12 @@
+ return 0;
+ }
+
++static void fsl_ssi_shutdown(struct snd_pcm_substream *substream,
++ struct snd_soc_dai *dai)
++{
++ pm_runtime_put_sync(dai->dev);
++}
++
+ static int fsl_ssi_dai_probe(struct snd_soc_dai *dai)
+ {
+ struct fsl_ssi_private *ssi_private = snd_soc_dai_get_drvdata(dai);
+@@ -1138,6 +1217,7 @@
+ .set_sysclk = fsl_ssi_set_dai_sysclk,
+ .set_tdm_slot = fsl_ssi_set_dai_tdm_slot,
+ .trigger = fsl_ssi_trigger,
++ .shutdown = fsl_ssi_shutdown,
+ };
+
+ /* Template for the CPU dai driver structure */
+@@ -1163,6 +1243,7 @@
+ };
+
+ static struct snd_soc_dai_driver fsl_ssi_ac97_dai = {
++ .probe = fsl_ssi_dai_probe,
+ .ac97_control = 1,
+ .playback = {
+ .stream_name = "AC97 Playback",
+@@ -1257,13 +1338,13 @@
+ int ret = 0;
+ struct device_attribute *dev_attr = NULL;
+ struct device_node *np = pdev->dev.of_node;
++ u32 dmas[4];
+ const struct of_device_id *of_id;
+ enum fsl_ssi_type hw_type;
+ const char *p, *sprop;
+ const uint32_t *iprop;
+ struct resource res;
+ char name[64];
+- bool shared;
+ bool ac97 = false;
+
+ /* SSIs that are not connected on the board should have a
+@@ -1381,7 +1462,6 @@
+
+ if (hw_type == FSL_SSI_MX21 || hw_type == FSL_SSI_MX51 ||
+ hw_type == FSL_SSI_MX35) {
+- u32 dma_events[2], dmas[4];
+ ssi_private->ssi_on_imx = true;
+
+ ssi_private->clk = devm_clk_get(&pdev->dev, NULL);
+@@ -1390,9 +1470,9 @@
+ dev_err(&pdev->dev, "could not get clock: %d\n", ret);
+ goto error_irqmap;
+ }
+- ret = clk_prepare_enable(ssi_private->clk);
++ ret = clk_prepare(ssi_private->clk);
+ if (ret) {
+- dev_err(&pdev->dev, "clk_prepare_enable failed: %d\n",
++ dev_err(&pdev->dev, "clk_prepare failed: %d\n",
+ ret);
+ goto error_irqmap;
+ }
+@@ -1405,41 +1485,21 @@
+ dev_dbg(&pdev->dev, "could not get baud clock: %ld\n",
+ PTR_ERR(ssi_private->baudclk));
+ else
+- clk_prepare_enable(ssi_private->baudclk);
++ clk_prepare(ssi_private->baudclk);
+
+ /*
+ * We have burstsize be "fifo_depth - 2" to match the SSI
+ * watermark setting in fsl_ssi_startup().
+ */
+- ssi_private->dma_params_tx.maxburst =
+- ssi_private->fifo_depth - 2;
+- ssi_private->dma_params_rx.maxburst =
+- ssi_private->fifo_depth - 2;
++ ssi_private->dma_params_tx.maxburst = ssi_private->fifo_depth - 2;
++ ssi_private->dma_params_rx.maxburst = ssi_private->fifo_depth - 2;
+ ssi_private->dma_params_tx.addr =
+ ssi_private->ssi_phys + offsetof(struct ccsr_ssi, stx0);
+ ssi_private->dma_params_rx.addr =
+ ssi_private->ssi_phys + offsetof(struct ccsr_ssi, srx0);
+- ssi_private->dma_params_tx.filter_data =
+- &ssi_private->filter_data_tx;
+- ssi_private->dma_params_rx.filter_data =
+- &ssi_private->filter_data_rx;
+- if (!of_property_read_bool(pdev->dev.of_node, "dmas") &&
+- ssi_private->use_dma) {
+- /*
+- * FIXME: This is a temporary solution until all
+- * necessary dma drivers support the generic dma
+- * bindings.
+- */
+- ret = of_property_read_u32_array(pdev->dev.of_node,
+- "fsl,ssi-dma-events", dma_events, 2);
+- if (ret && ssi_private->use_dma) {
+- dev_err(&pdev->dev, "could not get dma events but fsl-ssi is configured to use DMA\n");
+- goto error_clk;
+- }
+- }
+- /* Should this be merge with the above? */
+- if (!of_property_read_u32_array(pdev->dev.of_node, "dmas", dmas, 4)
+- && dmas[2] == IMX_DMATYPE_SSI_DUAL) {
++
++ ret = !of_property_read_u32_array(np, "dmas", dmas, 4);
++ if (ssi_private->use_dma && !ret && dmas[2] == IMX_DMATYPE_SSI_DUAL) {
+ ssi_private->use_dual_fifo = true;
+ /* When using dual fifo mode, we need to keep watermark
+ * as even numbers due to dma script limitation.
+@@ -1447,14 +1507,10 @@
+ ssi_private->dma_params_tx.maxburst &= ~0x1;
+ ssi_private->dma_params_rx.maxburst &= ~0x1;
+ }
++ }
+
+- shared = of_device_is_compatible(of_get_parent(np),
+- "fsl,spba-bus");
+-
+- imx_pcm_dma_params_init_data(&ssi_private->filter_data_tx,
+- dma_events[0], shared ? IMX_DMATYPE_SSI_SP : IMX_DMATYPE_SSI);
+- imx_pcm_dma_params_init_data(&ssi_private->filter_data_rx,
+- dma_events[1], shared ? IMX_DMATYPE_SSI_SP : IMX_DMATYPE_SSI);
++ if (ac97) {
++ fsl_ssi_clk_ctrl(ssi_private, true);
+ }
+
+ /*
+@@ -1474,6 +1530,8 @@
+ }
+ }
+
++ pm_runtime_enable(&pdev->dev);
++
+ /* Register with ASoC */
+ dev_set_drvdata(&pdev->dev, ssi_private);
+
+@@ -1509,7 +1567,8 @@
+ if (ret)
+ goto error_pcm;
+ } else {
+- ret = imx_pcm_dma_init(pdev);
++ ret = imx_pcm_dma_init(pdev, NULL,
++ IMX_SSI_DMABUF_SIZE);
+ if (ret)
+ goto error_pcm;
+ }
+@@ -1565,12 +1624,16 @@
+ error_dev:
+ device_remove_file(&pdev->dev, dev_attr);
+
+-error_clk:
+ if (ssi_private->ssi_on_imx) {
+ if (!IS_ERR(ssi_private->baudclk))
+- clk_disable_unprepare(ssi_private->baudclk);
+- clk_disable_unprepare(ssi_private->clk);
++ clk_unprepare(ssi_private->baudclk);
++ clk_unprepare(ssi_private->clk);
+ }
++error_clk:
++ if (!IS_ERR(ssi_private->baudclk))
++ clk_unprepare(ssi_private->baudclk);
++ if (!IS_ERR(ssi_private->clk))
++ clk_unprepare(ssi_private->clk);
+
+ error_irqmap:
+ if (ssi_private->irq_stats)
+@@ -1590,8 +1653,8 @@
+ snd_soc_unregister_component(&pdev->dev);
+ if (ssi_private->ssi_on_imx) {
+ if (!IS_ERR(ssi_private->baudclk))
+- clk_disable_unprepare(ssi_private->baudclk);
+- clk_disable_unprepare(ssi_private->clk);
++ clk_unprepare(ssi_private->baudclk);
++ clk_unprepare(ssi_private->clk);
+ }
+ if (ssi_private->irq_stats)
+ irq_dispose_mapping(ssi_private->irq);
+@@ -1604,6 +1667,7 @@
+ .name = "fsl-ssi-dai",
+ .owner = THIS_MODULE,
+ .of_match_table = fsl_ssi_ids,
++ .pm = &fsl_ssi_pm,
+ },
+ .probe = fsl_ssi_probe,
+ .remove = fsl_ssi_remove,
+diff -Nur linux-3.14.36/sound/soc/fsl/imx-ac97-vt1613.c linux-openelec/sound/soc/fsl/imx-ac97-vt1613.c
+--- linux-3.14.36/sound/soc/fsl/imx-ac97-vt1613.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/sound/soc/fsl/imx-ac97-vt1613.c 2015-07-24 18:03:30.316842002 -0500
+@@ -0,0 +1,208 @@
++/*
++ * imx-ac97-vt1613.c -- SoC audio for i.MX Seco UDOO board with
++ * VT1613 AC'97 codec
++ * Copyright: Seco s.r.l.
++
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License as published by the
++ * Free Software Foundation; either version 2 of the License, or (at your
++ * option) any later version.
++ */
++
++
++#include <linux/module.h>
++#include <linux/of_platform.h>
++#include <sound/soc.h>
++#include <sound/soc-dapm.h>
++
++#include "../codecs/vt1613.h"
++#include "imx-audmux.h"
++#include "fsl_ssi.h"
++
++#define DRV_NAME "imx-ac97-vt1613"
++
++static int imx_vt1613_audio_params(struct snd_pcm_substream *substream,
++ struct snd_pcm_hw_params *params)
++{
++ struct snd_soc_pcm_runtime *rtd = substream->private_data;
++ struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
++ int ret;
++
++ ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_AC97
++ | SND_SOC_DAIFMT_NB_NF
++ | SND_SOC_DAIFMT_CBM_CFS);
++ if (ret < 0) {
++ dev_err(cpu_dai->dev,
++ "Failed to set cpu dai format: %d\n", ret);
++ return ret;
++ }
++
++ return 0;
++}
++static struct snd_soc_ops imx_vt1613_audio_ops = {
++ .hw_params = imx_vt1613_audio_params,
++};
++
++
++static struct snd_soc_dai_link imx_vt1613_dai = {
++ .name = "vt1613-AC97",
++ .stream_name = "AC97-analog",
++ .codec_dai_name = "vt1613-hifi-analog",
++};
++
++static struct snd_soc_card imx_vt1613_card = {
++ .name = "imx-vt1613-audio",
++ .owner = THIS_MODULE,
++ .dai_link = &imx_vt1613_dai,
++ .num_links = 1,
++};
++
++static int imx_audmux_ac97_config(struct platform_device *pdev, int intPort, int extPort)
++{
++ int ret;
++ unsigned int ptcr, pdcr;
++
++ intPort = intPort - 1;
++ extPort = extPort - 1;
++
++ ptcr = IMX_AUDMUX_V2_PTCR_SYN |
++ IMX_AUDMUX_V2_PTCR_TCLKDIR | IMX_AUDMUX_V2_PTCR_TCSEL(extPort);
++ pdcr = IMX_AUDMUX_V2_PDCR_RXDSEL(extPort);
++
++ ret = imx_audmux_v2_configure_port(intPort, ptcr, pdcr);
++ if (ret) {
++ dev_err(&pdev->dev, "Audmux internal port setup failed\n");
++ return ret;
++ }
++
++ ptcr = IMX_AUDMUX_V2_PTCR_SYN |
++ IMX_AUDMUX_V2_PTCR_TFSDIR | IMX_AUDMUX_V2_PTCR_TFSEL(intPort);
++
++ pdcr = IMX_AUDMUX_V2_PDCR_RXDSEL(intPort);
++
++ ret = imx_audmux_v2_configure_port(extPort, ptcr, pdcr);
++ if (ret) {
++ dev_err(&pdev->dev, "Audmux external port setup failed\n");
++ return ret;
++ }
++
++ return 0;
++}
++
++static int imx_vt1613_probe(struct platform_device *pdev)
++{
++ struct device_node *ssi_np, *codec_np, *np = pdev->dev.of_node;
++
++ struct platform_device *codec_pdev;
++ struct platform_device *ssi_pdev;
++ int int_port, ext_port;
++ int ret;
++
++ if (!of_machine_is_compatible("udoo,imx6q-udoo"))
++ return -ENODEV;
++
++ if (vt1613_modules_dep_ok) {
++ dev_dbg(&pdev->dev, "module dependency (codec module) ok\n");
++ } else {
++ dev_err(&pdev->dev, "module dependency (codec module) not ok\n");
++ }
++
++ ret = of_property_read_u32(np, "mux-int-port", &int_port);
++ if (ret) {
++ dev_err(&pdev->dev, "mux-int-port property missing or invalid\n");
++ return ret;
++ }
++
++ ret = of_property_read_u32(np, "mux-ext-port", &ext_port);
++ if (ret) {
++ dev_err(&pdev->dev, "mux-ext-port property missing or invalid\n");
++ return ret;
++ }
++
++ ret = imx_audmux_ac97_config(pdev, int_port, ext_port);
++ if (ret) {
++ dev_err(&pdev->dev, "Audmux port setup failed\n");
++ return ret;
++ }
++
++ ssi_np = of_parse_phandle(np, "ssi-controller", 0);
++ if (!ssi_np) {
++ dev_err(&pdev->dev, "ssi-controller phandle missing or invalid\n");
++ return -EINVAL;
++ }
++ ssi_pdev = of_find_device_by_node(ssi_np);
++ if (!ssi_pdev) {
++ dev_err(&pdev->dev, "Failed to find SSI platform device\n");
++ ret = -EINVAL;
++ goto fail;
++ }
++
++ codec_np = of_parse_phandle(np, "audio-codec", 0);
++ if (!codec_np) {
++ dev_err(&pdev->dev, "audio-codec phandle missing or invalid\n");
++ ret = -EINVAL;
++ goto fail;
++ }
++ codec_pdev = of_find_device_by_node(codec_np);
++ if (!codec_pdev) {
++ dev_err(&pdev->dev, "Failed to find codec device\n");
++ ret = -EINVAL;
++ goto fail;
++ }
++
++ imx_vt1613_dai.codec_name = dev_name(&codec_pdev->dev);
++ imx_vt1613_dai.cpu_of_node = ssi_np;
++ imx_vt1613_dai.cpu_dai_name = dev_name(&ssi_pdev->dev);
++ imx_vt1613_dai.platform_of_node = ssi_np;
++ imx_vt1613_dai.ops = &imx_vt1613_audio_ops;
++ imx_vt1613_dai.dai_fmt = SND_SOC_DAIFMT_AC97 | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBM_CFS;
++
++ imx_vt1613_card.dev = &pdev->dev;
++
++ platform_set_drvdata(pdev, &imx_vt1613_card);
++
++ ret = snd_soc_register_card(&imx_vt1613_card);
++ if (ret)
++ dev_err(&pdev->dev, "snd_soc_register_card() failed: %d\n", ret);
++
++fail:
++ if (ssi_np)
++ of_node_put(ssi_np);
++ if (codec_np)
++ of_node_put(codec_np);
++
++ return ret;
++
++}
++
++static int imx_vt1613_remove(struct platform_device *pdev)
++{
++ int ret;
++ struct snd_soc_card *card = platform_get_drvdata(pdev);
++
++ ret = snd_soc_unregister_card(card);
++
++ return ret;
++}
++
++static const struct of_device_id imx_vt1613_audio_match[] = {
++ { .compatible = "udoo,imx-vt1613-audio", },
++ {}
++};
++MODULE_DEVICE_TABLE(of, imx_vt1613_audio_match);
++
++static struct platform_driver imx_vt1613_driver = {
++ .driver = {
++ .name = DRV_NAME,
++ .owner = THIS_MODULE,
++ .of_match_table = imx_vt1613_audio_match,
++ },
++ .probe = imx_vt1613_probe,
++ .remove = imx_vt1613_remove,
++};
++module_platform_driver(imx_vt1613_driver);
++
++MODULE_AUTHOR("Seco <info@seco.it>");
++MODULE_DESCRIPTION(DRV_NAME ":Freescale i.MX VT1613 AC97 ASoC machine driver");
++MODULE_LICENSE("GPL v2");
++MODULE_ALIAS("platform:imx-vt1613");
+diff -Nur linux-3.14.36/sound/soc/fsl/imx-cs42888.c linux-openelec/sound/soc/fsl/imx-cs42888.c
+--- linux-3.14.36/sound/soc/fsl/imx-cs42888.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/sound/soc/fsl/imx-cs42888.c 2015-05-06 12:05:43.000000000 -0500
+@@ -0,0 +1,369 @@
++/*
++ * Copyright (C) 2010-2014 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++#include <linux/module.h>
++#include <linux/of.h>
++#include <linux/of_platform.h>
++#include <linux/slab.h>
++#include <linux/device.h>
++#include <linux/i2c.h>
++#include <linux/clk.h>
++#include <linux/delay.h>
++#include <sound/core.h>
++#include <sound/pcm.h>
++#include <sound/soc.h>
++#include <sound/initval.h>
++#include <sound/pcm_params.h>
++
++#include "fsl_esai.h"
++#include "fsl_asrc.h"
++
++#define CODEC_CLK_EXTER_OSC 1
++#define CODEC_CLK_ESAI_HCKT 2
++
++struct imx_priv {
++ int hw;
++ int fe_output_rate;
++ int fe_output_width;
++ unsigned int mclk_freq;
++ unsigned int codec_mclk;
++ struct platform_device *pdev;
++};
++
++static struct imx_priv card_priv;
++
++static int imx_cs42888_startup(struct snd_pcm_substream *substream)
++{
++ struct snd_soc_pcm_runtime *rtd = substream->private_data;
++ struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
++ struct imx_priv *priv = &card_priv;
++
++ if (!cpu_dai->active)
++ priv->hw = 0;
++ return 0;
++}
++
++static void imx_cs42888_shutdown(struct snd_pcm_substream *substream)
++{
++ struct snd_soc_pcm_runtime *rtd = substream->private_data;
++ struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
++ struct imx_priv *priv = &card_priv;
++
++ if (!cpu_dai->active)
++ priv->hw = 0;
++}
++
++static const struct {
++ int rate;
++ int ratio1;
++ int ratio2;
++} sr_vals[] = {
++ { 32000, 5, 3 },
++ { 48000, 5, 3 },
++ { 64000, 2, 1 },
++ { 96000, 2, 1 },
++ { 128000, 2, 1 },
++ { 44100, 5, 3 },
++ { 88200, 2, 1 },
++ { 176400, 0, 0 },
++ { 192000, 0, 0 },
++};
++
++static int imx_cs42888_surround_hw_params(struct snd_pcm_substream *substream,
++ struct snd_pcm_hw_params *params)
++{
++ struct snd_soc_pcm_runtime *rtd = substream->private_data;
++ struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
++ struct snd_soc_dai *codec_dai = rtd->codec_dai;
++ struct imx_priv *priv = &card_priv;
++ unsigned int rate = params_rate(params);
++ unsigned int lrclk_ratio = 0, i;
++ u32 dai_format = 0;
++
++ if (priv->hw)
++ return 0;
++
++ priv->hw = 1;
++
++ if (priv->codec_mclk & CODEC_CLK_ESAI_HCKT) {
++ for (i = 0; i < ARRAY_SIZE(sr_vals); i++) {
++ if (sr_vals[i].rate == rate) {
++ lrclk_ratio = sr_vals[i].ratio1;
++ break;
++ }
++ }
++ if (i == ARRAY_SIZE(sr_vals)) {
++ dev_err(&priv->pdev->dev, "Unsupported rate %dHz\n", rate);
++ return -EINVAL;
++ }
++
++ dai_format = SND_SOC_DAIFMT_LEFT_J | SND_SOC_DAIFMT_NB_NF |
++ SND_SOC_DAIFMT_CBS_CFS;
++
++ /* set the ESAI system clock as output */
++ snd_soc_dai_set_sysclk(cpu_dai, ESAI_CLK_EXTAL_DIV,
++ priv->mclk_freq, SND_SOC_CLOCK_OUT);
++ snd_soc_dai_set_clkdiv(cpu_dai, ESAI_TX_DIV_PM, 2);
++ snd_soc_dai_set_clkdiv(cpu_dai, ESAI_RX_DIV_PM, 2);
++ /* set codec Master clock */
++ snd_soc_dai_set_sysclk(codec_dai, 0, priv->mclk_freq,\
++ SND_SOC_CLOCK_IN);
++ } else if (priv->codec_mclk & CODEC_CLK_EXTER_OSC) {
++ for (i = 0; i < ARRAY_SIZE(sr_vals); i++) {
++ if (sr_vals[i].rate == rate) {
++ lrclk_ratio = sr_vals[i].ratio2;
++ break;
++ }
++ }
++ if (i == ARRAY_SIZE(sr_vals)) {
++ dev_err(&priv->pdev->dev, "Unsupported rate %dHz\n", rate);
++ return -EINVAL;
++ }
++
++ dai_format = SND_SOC_DAIFMT_LEFT_J | SND_SOC_DAIFMT_NB_NF |
++ SND_SOC_DAIFMT_CBS_CFS;
++
++ snd_soc_dai_set_sysclk(cpu_dai, ESAI_CLK_EXTAL,
++ priv->mclk_freq, SND_SOC_CLOCK_OUT);
++ snd_soc_dai_set_clkdiv(cpu_dai, ESAI_TX_DIV_PM, 0);
++ snd_soc_dai_set_clkdiv(cpu_dai, ESAI_RX_DIV_PM, 0);
++ snd_soc_dai_set_sysclk(codec_dai, 0, priv->mclk_freq,\
++ SND_SOC_CLOCK_IN);
++ }
++
++ /* set cpu DAI configuration */
++ snd_soc_dai_set_fmt(cpu_dai, dai_format);
++ /* set i.MX active slot mask */
++ snd_soc_dai_set_tdm_slot(cpu_dai, 0x3, 0x3, 2, 32);
++ /* set the ratio */
++ snd_soc_dai_set_clkdiv(cpu_dai, ESAI_TX_DIV_PSR, 1);
++ snd_soc_dai_set_clkdiv(cpu_dai, ESAI_TX_DIV_FP, lrclk_ratio);
++ snd_soc_dai_set_clkdiv(cpu_dai, ESAI_RX_DIV_PSR, 1);
++ snd_soc_dai_set_clkdiv(cpu_dai, ESAI_RX_DIV_FP, lrclk_ratio);
++
++ /* set codec DAI configuration */
++ snd_soc_dai_set_fmt(codec_dai, dai_format);
++ return 0;
++}
++
++static struct snd_soc_ops imx_cs42888_surround_ops = {
++ .startup = imx_cs42888_startup,
++ .shutdown = imx_cs42888_shutdown,
++ .hw_params = imx_cs42888_surround_hw_params,
++};
++
++static const struct snd_soc_dapm_widget imx_cs42888_dapm_widgets[] = {
++ SND_SOC_DAPM_LINE("Line Out Jack", NULL),
++ SND_SOC_DAPM_LINE("Line In Jack", NULL),
++};
++
++static const struct snd_soc_dapm_route audio_map[] = {
++ /* Line out jack */
++ {"Line Out Jack", NULL, "AOUT1L"},
++ {"Line Out Jack", NULL, "AOUT1R"},
++ {"Line Out Jack", NULL, "AOUT2L"},
++ {"Line Out Jack", NULL, "AOUT2R"},
++ {"Line Out Jack", NULL, "AOUT3L"},
++ {"Line Out Jack", NULL, "AOUT3R"},
++ {"Line Out Jack", NULL, "AOUT4L"},
++ {"Line Out Jack", NULL, "AOUT4R"},
++ {"AIN1L", NULL, "Line In Jack"},
++ {"AIN1R", NULL, "Line In Jack"},
++ {"AIN2L", NULL, "Line In Jack"},
++ {"AIN2R", NULL, "Line In Jack"},
++ {"esai-Playback", NULL, "asrc-Playback"},
++ {"codec-Playback", NULL, "esai-Playback"},/*Playback is the codec dai*/
++};
++
++static int be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
++ struct snd_pcm_hw_params *params) {
++
++ struct imx_priv *priv = &card_priv;
++
++ hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE)->min = priv->fe_output_rate;
++ hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE)->max = priv->fe_output_rate;
++ snd_mask_none(hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT));
++ if (priv->fe_output_width == 16)
++ snd_mask_set(hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT),
++ SNDRV_PCM_FORMAT_S16_LE);
++ else
++ snd_mask_set(hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT),
++ SNDRV_PCM_FORMAT_S24_LE);
++ return 0;
++}
++
++static struct snd_soc_dai_link imx_cs42888_dai[] = {
++ {
++ .name = "HiFi",
++ .stream_name = "HiFi",
++ .codec_dai_name = "CS42888",
++ .ops = &imx_cs42888_surround_ops,
++ },
++ {
++ .name = "HiFi-ASRC-FE",
++ .stream_name = "HiFi-ASRC-FE",
++ .codec_name = "snd-soc-dummy",
++ .codec_dai_name = "snd-soc-dummy-dai",
++ .dynamic = 1,
++ },
++ {
++ .name = "HiFi-ASRC-BE",
++ .stream_name = "HiFi-ASRC-BE",
++ .codec_dai_name = "CS42888",
++ .platform_name = "snd-soc-dummy",
++ .no_pcm = 1,
++ .ops = &imx_cs42888_surround_ops,
++ .be_hw_params_fixup = be_hw_params_fixup,
++ },
++};
++
++static struct snd_soc_card snd_soc_card_imx_cs42888 = {
++ .name = "cs42888-audio",
++ .dai_link = imx_cs42888_dai,
++ .dapm_widgets = imx_cs42888_dapm_widgets,
++ .num_dapm_widgets = ARRAY_SIZE(imx_cs42888_dapm_widgets),
++ .dapm_routes = audio_map,
++ .num_dapm_routes = ARRAY_SIZE(audio_map),
++};
++
++/*
++ * This function will register the snd_soc_pcm_link drivers.
++ */
++static int imx_cs42888_probe(struct platform_device *pdev)
++{
++ struct device_node *esai_np, *codec_np;
++ struct device_node *asrc_np;
++ struct platform_device *esai_pdev;
++ struct platform_device *asrc_pdev = NULL;
++ struct i2c_client *codec_dev;
++ struct imx_priv *priv = &card_priv;
++ struct clk *codec_clk = NULL;
++ const char *mclk_name;
++ int ret;
++
++ priv->pdev = pdev;
++
++ esai_np = of_parse_phandle(pdev->dev.of_node, "esai-controller", 0);
++ codec_np = of_parse_phandle(pdev->dev.of_node, "audio-codec", 0);
++ if (!esai_np || !codec_np) {
++ dev_err(&pdev->dev, "phandle missing or invalid\n");
++ ret = -EINVAL;
++ goto fail;
++ }
++
++ asrc_np = of_parse_phandle(pdev->dev.of_node, "asrc-controller", 0);
++ if (asrc_np) {
++ asrc_pdev = of_find_device_by_node(asrc_np);
++ if (asrc_pdev) {
++ struct fsl_asrc_p2p *asrc_p2p;
++ asrc_p2p = platform_get_drvdata(asrc_pdev);
++ asrc_p2p->per_dev = ESAI;
++ priv->fe_output_rate = asrc_p2p->output_rate;
++ priv->fe_output_width = asrc_p2p->output_width;
++ }
++ }
++
++ esai_pdev = of_find_device_by_node(esai_np);
++ if (!esai_pdev) {
++ dev_err(&pdev->dev, "failed to find ESAI platform device\n");
++ ret = -EINVAL;
++ goto fail;
++ }
++ codec_dev = of_find_i2c_device_by_node(codec_np);
++ if (!codec_dev) {
++ dev_err(&pdev->dev, "failed to find codec platform device\n");
++ ret = -EINVAL;
++ goto fail;
++ }
++
++ /*if there is no asrc controller, we only enable one device*/
++ if (!asrc_pdev) {
++ imx_cs42888_dai[0].codec_of_node = codec_np;
++ imx_cs42888_dai[0].cpu_dai_name = dev_name(&esai_pdev->dev);
++ imx_cs42888_dai[0].platform_of_node = esai_np;
++ snd_soc_card_imx_cs42888.num_links = 1;
++ } else {
++ imx_cs42888_dai[0].codec_of_node = codec_np;
++ imx_cs42888_dai[0].cpu_dai_name = dev_name(&esai_pdev->dev);
++ imx_cs42888_dai[0].platform_of_node = esai_np;
++ imx_cs42888_dai[1].cpu_dai_name = dev_name(&asrc_pdev->dev);
++ imx_cs42888_dai[1].platform_name = "imx-pcm-asrc";
++ imx_cs42888_dai[2].codec_of_node = codec_np;
++ imx_cs42888_dai[2].cpu_dai_name = dev_name(&esai_pdev->dev);
++ snd_soc_card_imx_cs42888.num_links = 3;
++ }
++
++ codec_clk = devm_clk_get(&codec_dev->dev, NULL);
++ if (IS_ERR(codec_clk)) {
++ ret = PTR_ERR(codec_clk);
++ dev_err(&codec_dev->dev, "failed to get codec clk: %d\n", ret);
++ goto fail;
++ }
++ priv->mclk_freq = clk_get_rate(codec_clk);
++
++ ret = of_property_read_string(codec_np, "clock-names", &mclk_name);
++ if (ret) {
++ dev_err(&pdev->dev, "%s: failed to get mclk source\n", __func__);
++ goto fail;
++ }
++ if (!strcmp(mclk_name, "codec_osc"))
++ priv->codec_mclk = CODEC_CLK_EXTER_OSC;
++ else if (!strcmp(mclk_name, "esai"))
++ priv->codec_mclk = CODEC_CLK_ESAI_HCKT;
++ else {
++ dev_err(&pdev->dev, "mclk source is not correct %s\n", mclk_name);
++ goto fail;
++ }
++
++ snd_soc_card_imx_cs42888.dev = &pdev->dev;
++
++ platform_set_drvdata(pdev, &snd_soc_card_imx_cs42888);
++
++ ret = snd_soc_register_card(&snd_soc_card_imx_cs42888);
++ if (ret)
++ dev_err(&pdev->dev, "snd_soc_register_card failed (%d)\n", ret);
++fail:
++ if (esai_np)
++ of_node_put(esai_np);
++ if (codec_np)
++ of_node_put(codec_np);
++ return ret;
++}
++
++static int imx_cs42888_remove(struct platform_device *pdev)
++{
++ snd_soc_unregister_card(&snd_soc_card_imx_cs42888);
++ return 0;
++}
++
++static const struct of_device_id imx_cs42888_dt_ids[] = {
++ { .compatible = "fsl,imx-audio-cs42888", },
++ { /* sentinel */ }
++};
++
++static struct platform_driver imx_cs42888_driver = {
++ .probe = imx_cs42888_probe,
++ .remove = imx_cs42888_remove,
++ .driver = {
++ .name = "imx-cs42888",
++ .owner = THIS_MODULE,
++ .pm = &snd_soc_pm_ops,
++ .of_match_table = imx_cs42888_dt_ids,
++ },
++};
++module_platform_driver(imx_cs42888_driver);
++
++MODULE_AUTHOR("Freescale Semiconductor, Inc.");
++MODULE_DESCRIPTION("ALSA SoC cs42888 Machine Layer Driver");
++MODULE_ALIAS("platform:imx-cs42888");
++MODULE_LICENSE("GPL");
+diff -Nur linux-3.14.36/sound/soc/fsl/imx-hdmi.c linux-openelec/sound/soc/fsl/imx-hdmi.c
+--- linux-3.14.36/sound/soc/fsl/imx-hdmi.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/sound/soc/fsl/imx-hdmi.c 2015-05-06 12:05:43.000000000 -0500
+@@ -0,0 +1,113 @@
++/*
++ * ASoC HDMI Transmitter driver for IMX development boards
++ *
++ * Copyright (C) 2011-2013 Freescale Semiconductor, Inc.
++ *
++ * based on stmp3780_devb_hdmi.c
++ *
++ * Vladimir Barinov <vbarinov@embeddedalley.com>
++ *
++ * Copyright 2008 SigmaTel, Inc
++ * Copyright 2008 Embedded Alley Solutions, Inc
++ *
++ * This file is licensed under the terms of the GNU General Public License
++ * version 2. This program is licensed "as is" without any warranty of any
++ * kind, whether express or implied.
++ */
++
++#include <linux/module.h>
++#include <linux/of_platform.h>
++#include <linux/mfd/mxc-hdmi-core.h>
++#include <sound/soc.h>
++
++#include "imx-hdmi.h"
++
++/* imx digital audio interface glue - connects codec <--> CPU */
++static struct snd_soc_dai_link imx_hdmi_dai_link = {
++ .name = "i.MX HDMI Audio Tx",
++ .stream_name = "i.MX HDMI Audio Tx",
++ .codec_dai_name = "hdmi-hifi",
++ .codec_name = "hdmi-audio-codec",
++ .platform_name = "imx-hdmi-audio",
++};
++
++static struct snd_soc_card snd_soc_card_imx_hdmi = {
++ .name = "imx-hdmi-soc",
++ .dai_link = &imx_hdmi_dai_link,
++ .num_links = 1,
++};
++
++static int imx_hdmi_audio_probe(struct platform_device *pdev)
++{
++ struct device_node *hdmi_np, *np = pdev->dev.of_node;
++ struct snd_soc_card *card = &snd_soc_card_imx_hdmi;
++ struct platform_device *hdmi_pdev;
++ int ret = 0;
++
++ if (!hdmi_get_registered()) {
++ dev_err(&pdev->dev, "initialize HDMI-audio failed. load HDMI-video first!\n");
++ return -ENODEV;
++ }
++
++ hdmi_np = of_parse_phandle(np, "hdmi-controller", 0);
++ if (!hdmi_np) {
++ dev_err(&pdev->dev, "failed to find hdmi-audio cpudai\n");
++ ret = -EINVAL;
++ goto end;
++ }
++
++ hdmi_pdev = of_find_device_by_node(hdmi_np);
++ if (!hdmi_pdev) {
++ dev_err(&pdev->dev, "failed to find SSI platform device\n");
++ ret = -EINVAL;
++ goto end;
++ }
++
++ card->dev = &pdev->dev;
++ card->dai_link->cpu_dai_name = dev_name(&hdmi_pdev->dev);
++
++ platform_set_drvdata(pdev, card);
++
++ ret = snd_soc_register_card(card);
++ if (ret)
++ dev_err(&pdev->dev, "failed to register card: %d\n", ret);
++
++end:
++ if (hdmi_np)
++ of_node_put(hdmi_np);
++
++ return ret;
++}
++
++static int imx_hdmi_audio_remove(struct platform_device *pdev)
++{
++ struct snd_soc_card *card = platform_get_drvdata(pdev);
++
++ snd_soc_unregister_card(card);
++
++ return 0;
++}
++
++static const struct of_device_id imx_hdmi_dt_ids[] = {
++ { .compatible = "fsl,imx-audio-hdmi", },
++ { /* sentinel */ }
++};
++MODULE_DEVICE_TABLE(of, imx_hdmi_dt_ids);
++
++static struct platform_driver imx_hdmi_audio_driver = {
++ .probe = imx_hdmi_audio_probe,
++ .remove = imx_hdmi_audio_remove,
++ .driver = {
++ .of_match_table = imx_hdmi_dt_ids,
++ .name = "imx-audio-hdmi",
++ .owner = THIS_MODULE,
++ .pm = &snd_soc_pm_ops,
++ },
++};
++
++module_platform_driver(imx_hdmi_audio_driver);
++
++MODULE_AUTHOR("Freescale Semiconductor, Inc.");
++MODULE_DESCRIPTION("IMX HDMI TX ASoC driver");
++MODULE_LICENSE("GPL");
++MODULE_ALIAS("platform:imx-audio-hdmi");
+diff -Nur linux-3.14.36/sound/soc/fsl/imx-hdmi-dma.c linux-openelec/sound/soc/fsl/imx-hdmi-dma.c
+--- linux-3.14.36/sound/soc/fsl/imx-hdmi-dma.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/sound/soc/fsl/imx-hdmi-dma.c 2015-05-06 12:05:43.000000000 -0500
+@@ -0,0 +1,1240 @@
++/*
++ * imx-hdmi-dma.c -- HDMI DMA driver for ALSA Soc Audio Layer
++ *
++ * Copyright (C) 2011-2014 Freescale Semiconductor, Inc.
++ *
++ * based on imx-pcm-dma-mx2.c
++ * Copyright 2009 Sascha Hauer <s.hauer@pengutronix.de>
++ *
++ * This code is based on code copyrighted by Freescale,
++ * Liam Girdwood, Javier Martin and probably others.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License as published by the
++ * Free Software Foundation; either version 2 of the License, or (at your
++ * option) any later version.
++ */
++
++#include <linux/module.h>
++#include <linux/delay.h>
++#include <linux/dma-mapping.h>
++#include <linux/mfd/mxc-hdmi-core.h>
++#include <linux/platform_data/dma-imx.h>
++
++#include <video/mxc_hdmi.h>
++
++#include "imx-hdmi.h"
++
++#define HDMI_DMA_BURST_UNSPECIFIED_LEGNTH 0
++#define HDMI_DMA_BURST_INCR4 1
++#define HDMI_DMA_BURST_INCR8 2
++#define HDMI_DMA_BURST_INCR16 3
++
++#define HDMI_BASE_ADDR 0x00120000
++
++struct hdmi_sdma_script {
++ int control_reg_addr;
++ int status_reg_addr;
++ int dma_start_addr;
++ u32 buffer[20];
++};
++
++struct hdmi_dma_priv {
++ struct snd_pcm_substream *substream;
++ struct platform_device *pdev;
++
++ struct snd_dma_buffer hw_buffer;
++ unsigned long buffer_bytes;
++ unsigned long appl_bytes;
++
++ int periods;
++ int period_time;
++ int period_bytes;
++ int dma_period_bytes;
++ int buffer_ratio;
++
++ unsigned long offset;
++
++ snd_pcm_format_t format;
++ int sample_align;
++ int sample_bits;
++ int channels;
++ int rate;
++
++ int frame_idx;
++
++ bool tx_active;
++ spinlock_t irq_lock;
++
++ /* SDMA part */
++ dma_addr_t phy_hdmi_sdma_t;
++ struct hdmi_sdma_script *hdmi_sdma_t;
++ struct dma_chan *dma_channel;
++ struct imx_dma_data dma_data;
++ struct dma_async_tx_descriptor *desc;
++ struct imx_hdmi_sdma_params sdma_params;
++};
++
++/* bit 0:0:0:b:p(0):c:(u)0:(v)0 */
++/* max 8 channels supported; channels are interleaved */
++static u8 g_packet_head_table[48 * 8];
++
++/* channel remapping for hdmi_dma_copy_xxxx() */
++static u8 g_channel_remap_table[24];
++
++/* default mapping tables */
++static const u8 channel_maps_alsa_cea[5][8] = {
++ { 0, 1, 2, 3, 4, 5, 6, 7 }, /* 0CH: no remapping */
++ { 0, 1, 2, 3, 4, 5, 6, 7 }, /* 2CH: no remapping */
++ { 0, 1, 2, 3, 4, 5, 6, 7 }, /* 4CH: no remapping */
++ { 0, 1, 4, 5, 3, 2, 6, 7 }, /* 6CH: ALSA5.1 to CEA */
++ { 0, 1, 6, 7, 3, 2, 4, 5 } /* 8CH: ALSA7.1 to CEA */
++};
++
++static const u8 channel_maps_cea_alsa[5][8] = {
++ { 0, 1, 2, 3, 4, 5, 6, 7 }, /* 0CH: no remapping */
++ { 0, 1, 2, 3, 4, 5, 6, 7 }, /* 2CH: no remapping */
++ { 0, 1, 2, 3, 4, 5, 6, 7 }, /* 4CH: no remapping */
++ { 0, 1, 5, 4, 2, 3, 6, 7 }, /* 6CH: CEA to ALSA5.1 */
++ { 0, 1, 5, 4, 6, 7, 2, 3 } /* 8CH: CEA to ALSA7.1 */
++};
++
++union hdmi_audio_header_t iec_header;
++EXPORT_SYMBOL(iec_header);
++
++/*
++ * Note that the period size for DMA != period size for ALSA because the
++ * driver adds iec frame info to the audio samples (in hdmi_dma_copy).
++ *
++ * Each 4 byte subframe = 1 byte of iec data + 3 byte audio sample.
++ *
++ * A 16 bit audio sample becomes 32 bits including the frame info. Ratio=2
++ * A 24 bit audio sample becomes 32 bits including the frame info. Ratio=3:4
++ * If the 24 bit raw audio is in 32 bit words, the
++ *
++ * Original Packed into subframe Ratio of size Format
++ * sample how many size of DMA buffer
++ * (bits) bits to ALSA buffer
++ * -------- ----------- -------- -------------- ------------------------
++ * 16 16 32 2 SNDRV_PCM_FORMAT_S16_LE
++ * 24 24 32 1.33 SNDRV_PCM_FORMAT_S24_3LE*
++ * 24 32 32 1 SNDRV_PCM_FORMAT_S24_LE
++ *
++ * *so SNDRV_PCM_FORMAT_S24_3LE is not supported.
++ */
++
++/*
++ * The minimum dma period is one IEC audio frame (192 * 4 * channels).
++ * The maximum dma period for the HDMI DMA is 8K.
++ *
++ * channels minimum maximum
++ * dma period dma period
++ * -------- ------------------ ----------
++ * 2 192 * 4 * 2 = 1536 * 4 = 6144
++ * 4 192 * 4 * 4 = 3072 * 2 = 6144
++ * 6 192 * 4 * 6 = 4608 * 1 = 4608
++ * 8 192 * 4 * 8 = 6144 * 1 = 6144
++ *
++ * Bottom line:
++ * 1. Must keep the ratio of DMA buffer to ALSA buffer consistent.
++ * 2. frame_idx is saved in the private data, so even if a frame cannot be
++ * transmitted in a period, it can be continued in the next period. This
++ * is necessary for 6 ch.
++ */
++#define HDMI_DMA_PERIOD_BYTES (12288)
++#define HDMI_DMA_BUF_SIZE (1280 * 1024)
++#define HDMI_PCM_BUF_SIZE (1280 * 1024)
++
++#define hdmi_audio_debug(dev, reg) \
++ dev_dbg(dev, #reg ": 0x%02x\n", hdmi_readb(reg))
++
++#ifdef DEBUG
++static void dumpregs(struct device *dev)
++{
++ hdmi_audio_debug(dev, HDMI_AHB_DMA_CONF0);
++ hdmi_audio_debug(dev, HDMI_AHB_DMA_START);
++ hdmi_audio_debug(dev, HDMI_AHB_DMA_STOP);
++ hdmi_audio_debug(dev, HDMI_AHB_DMA_THRSLD);
++ hdmi_audio_debug(dev, HDMI_AHB_DMA_STRADDR0);
++ hdmi_audio_debug(dev, HDMI_AHB_DMA_STPADDR0);
++ hdmi_audio_debug(dev, HDMI_AHB_DMA_BSTADDR0);
++ hdmi_audio_debug(dev, HDMI_AHB_DMA_MBLENGTH0);
++ hdmi_audio_debug(dev, HDMI_AHB_DMA_MBLENGTH1);
++ hdmi_audio_debug(dev, HDMI_AHB_DMA_STAT);
++ hdmi_audio_debug(dev, HDMI_AHB_DMA_INT);
++ hdmi_audio_debug(dev, HDMI_AHB_DMA_MASK);
++ hdmi_audio_debug(dev, HDMI_AHB_DMA_POL);
++ hdmi_audio_debug(dev, HDMI_AHB_DMA_CONF1);
++ hdmi_audio_debug(dev, HDMI_AHB_DMA_BUFFSTAT);
++ hdmi_audio_debug(dev, HDMI_AHB_DMA_BUFFINT);
++ hdmi_audio_debug(dev, HDMI_AHB_DMA_BUFFMASK);
++ hdmi_audio_debug(dev, HDMI_AHB_DMA_BUFFPOL);
++ hdmi_audio_debug(dev, HDMI_IH_MUTE_AHBDMAAUD_STAT0);
++ hdmi_audio_debug(dev, HDMI_IH_AHBDMAAUD_STAT0);
++ hdmi_audio_debug(dev, HDMI_IH_MUTE);
++}
++
++static void dumppriv(struct device *dev, struct hdmi_dma_priv *priv)
++{
++ dev_dbg(dev, "channels = %d\n", priv->channels);
++ dev_dbg(dev, "periods = %d\n", priv->periods);
++ dev_dbg(dev, "period_bytes = %d\n", priv->period_bytes);
++ dev_dbg(dev, "dma period_bytes = %d\n", priv->dma_period_bytes);
++ dev_dbg(dev, "buffer_ratio = %d\n", priv->buffer_ratio);
++ dev_dbg(dev, "hw dma buffer = 0x%08x\n", (int)priv->hw_buffer.addr);
++ dev_dbg(dev, "dma buf size = %d\n", (int)priv->buffer_bytes);
++ dev_dbg(dev, "sample_rate = %d\n", (int)priv->rate);
++}
++#else
++static void dumpregs(struct device *dev) {}
++static void dumppriv(struct device *dev, struct hdmi_dma_priv *priv) {}
++#endif
++
++/*
++ * Conditions for DMA to work:
++ * ((final_addr - initial_addr)>>2)+1) < 2k. So max period is 8k.
++ * (inital_addr & 0x3) == 0
++ * (final_addr & 0x3) == 0x3
++ *
++ * The DMA Period should be an integer multiple of the IEC 60958 audio
++ * frame size, which is 768 bytes (192 * 4).
++ */
++static void hdmi_dma_set_addr(int start_addr, int dma_period_bytes)
++{
++ int final_addr = start_addr + dma_period_bytes - 1;
++
++ hdmi_write4(start_addr, HDMI_AHB_DMA_STRADDR0);
++ hdmi_write4(final_addr, HDMI_AHB_DMA_STPADDR0);
++}
++
++static void hdmi_dma_irq_set(bool set)
++{
++ u8 val = hdmi_readb(HDMI_AHB_DMA_MASK);
++
++ if (set)
++ val |= HDMI_AHB_DMA_DONE;
++ else
++ val &= (u8)~HDMI_AHB_DMA_DONE;
++
++ hdmi_writeb(val, HDMI_AHB_DMA_MASK);
++}
++
++static void hdmi_mask(int mask)
++{
++ u8 regval = hdmi_readb(HDMI_AHB_DMA_MASK);
++
++ if (mask)
++ regval |= HDMI_AHB_DMA_ERROR | HDMI_AHB_DMA_FIFO_EMPTY;
++ else
++ regval &= (u8)~(HDMI_AHB_DMA_ERROR | HDMI_AHB_DMA_FIFO_EMPTY);
++
++ hdmi_writeb(regval, HDMI_AHB_DMA_MASK);
++}
++
++static inline int odd_ones(unsigned a)
++{
++ a ^= a >> 16;
++ a ^= a >> 8;
++ a ^= a >> 4;
++ a ^= a >> 2;
++ a ^= a >> 1;
++
++ return a & 1;
++}
++
++/* Add frame information for one pcm subframe */
++static u32 hdmi_dma_add_frame_info(struct hdmi_dma_priv *priv,
++ u32 pcm_data, int subframe_idx)
++{
++ union hdmi_audio_dma_data_t subframe;
++ union hdmi_audio_header_t tmp_header;
++
++ subframe.U = 0;
++
++ if (priv->frame_idx < 42) {
++ tmp_header = iec_header;
++
++ /* fill v (validity) */
++ subframe.B.v = tmp_header.B.linear_pcm;
++
++ /* fill c (channel status) */
++ if (tmp_header.B.linear_pcm == 0)
++ tmp_header.B.channel = subframe_idx + 1;
++ subframe.B.c = tmp_header.U >> priv->frame_idx;
++ } else {
++ /* fill v (validity), c is always zero */
++ subframe.B.v = iec_header.B.linear_pcm;
++ }
++
++ /* fill data */
++ if (priv->sample_bits == 16)
++ pcm_data <<= 8;
++ subframe.B.data = pcm_data;
++
++ /* fill p (parity) Note: Do not include b ! */
++ subframe.B.p = odd_ones(subframe.U);
++
++ /* fill b (start-of-block) */
++ if (priv->frame_idx == 0)
++ subframe.B.b = 1;
++
++ return subframe.U;
++}
++
++static void init_table(int channels)
++{
++ int i, map_sel, ch;
++ unsigned char *p = g_packet_head_table;
++ union hdmi_audio_header_t tmp_header = iec_header;
++
++ for (i = 0; i < 48; i++) {
++ int b = 0;
++ if (i == 0)
++ b = 1;
++
++ for (ch = 0; ch < channels; ch++) {
++ int c = 0;
++ if (i < 42) {
++ tmp_header.B.channel = ch + 1;
++ c = (tmp_header.U >> i) & 0x1;
++ }
++ /* preset bit p as c */
++ *p++ = (b << 4) | (c << 2) | (c << 3);
++ }
++ }
++
++ map_sel = channels / 2;
++ for (i = 0; i < 24; i++) {
++ g_channel_remap_table[i] = (i / channels) * channels +
++ channel_maps_cea_alsa[map_sel][i % channels];
++ }
++}
++
++/* Optimization for IEC head */
++static void hdmi_dma_copy_16_c_lut(u16 *src, u32 *dst, int samples,
++ u8 *lookup_table)
++{
++ u32 sample, head;
++ int i = 0;
++
++ while (samples--) {
++ /* get source sample */
++ sample = src[g_channel_remap_table[i]];
++
++ /* get packet header and p-bit */
++ head = *lookup_table++ ^ (odd_ones(sample) << 3);
++
++ /* store sample and header */
++ *dst++ = (head << 24) | (sample << 8);
++
++ if (++i == 24) {
++ src += 24;
++ i = 0;
++ }
++ }
++}
++
++static void hdmi_dma_copy_16_c_fast(u16 *src, u32 *dst, int samples)
++{
++ u32 sample;
++ int i = 0;
++
++ while (samples--) {
++ /* get source sample */
++ sample = src[g_channel_remap_table[i]];
++
++ /* store sample and p-bit */
++ *dst++ = (odd_ones(sample) << (3+24)) | (sample << 8);
++
++ if (++i == 24) {
++ src += 24;
++ i = 0;
++ }
++ }
++}
++
++static void hdmi_dma_copy_24_c_lut(u32 *src, u32 *dst, int samples,
++ u8 *lookup_table)
++{
++ u32 sample, head;
++ int i = 0;
++
++ while (samples--) {
++ /* get source sample */
++ sample = src[g_channel_remap_table[i]] & 0x00ffffff;
++
++ /* get packet header and p-bit */
++ head = *lookup_table++ ^ (odd_ones(sample) << 3);
++
++ /* store sample and header */
++ *dst++ = (head << 24) | sample;
++
++ if (++i == 24) {
++ src += 24;
++ i = 0;
++ }
++ }
++}
++
++static void hdmi_dma_copy_24_c_fast(u32 *src, u32 *dst, int samples)
++{
++ u32 sample;
++ int i = 0;
++
++ while (samples--) {
++ /* get source sample */
++ sample = src[g_channel_remap_table[i]] & 0x00ffffff;
++
++ /* store sample and p-bit */
++ *dst++ = (odd_ones(sample) << (3+24)) | sample;
++
++ if (++i == 24) {
++ src += 24;
++ i = 0;
++ }
++ }
++}
++
++static void hdmi_mmap_copy(u8 *src, int samplesize, u32 *dst, int framecnt, int channelcnt)
++{
++ /* split input frames into 192-frame each */
++ int count_in_192 = (framecnt + 191) / 192;
++ int i;
++
++ typedef void (*fn_copy_lut)(u8 *src, u32 *dst, int samples, u8 *lookup_table);
++ typedef void (*fn_copy_fast)(u8 *src, u32 *dst, int samples);
++ fn_copy_lut copy_lut;
++ fn_copy_fast copy_fast;
++
++ if (samplesize == 4) {
++ copy_lut = (fn_copy_lut)hdmi_dma_copy_24_c_lut;
++ copy_fast = (fn_copy_fast)hdmi_dma_copy_24_c_fast;
++ } else {
++ copy_lut = (fn_copy_lut)hdmi_dma_copy_16_c_lut;
++ copy_fast = (fn_copy_fast)hdmi_dma_copy_16_c_fast;
++ }
++
++ for (i = 0; i < count_in_192; i++) {
++ int count, samples;
++
++ /* handles frame index [0, 48) */
++ count = (framecnt < 48) ? framecnt : 48;
++ samples = count * channelcnt;
++ copy_lut(src, dst, samples, g_packet_head_table);
++ framecnt -= count;
++ if (framecnt == 0)
++ break;
++
++ src += samples * samplesize;
++ dst += samples;
++
++ /* handles frame index [48, 192) */
++ count = (framecnt < 192 - 48) ? framecnt : 192 - 48;
++ samples = count * channelcnt;
++ copy_fast(src, dst, samples);
++ framecnt -= count;
++ src += samples * samplesize;
++ dst += samples;
++ }
++}
++
++static void hdmi_dma_mmap_copy(struct snd_pcm_substream *substream,
++ int offset, int count)
++{
++ struct snd_soc_pcm_runtime *rtd = substream->private_data;
++ struct snd_pcm_runtime *runtime = substream->runtime;
++ struct hdmi_dma_priv *priv = runtime->private_data;
++ struct device *dev = rtd->platform->dev;
++ u32 framecount, *dst;
++
++ framecount = count / (priv->sample_align * priv->channels);
++
++ /* hw_buffer is the destination for pcm data plus frame info. */
++ dst = (u32 *)(priv->hw_buffer.area + (offset * priv->buffer_ratio));
++
++ switch (priv->format) {
++ case SNDRV_PCM_FORMAT_S16_LE:
++ case SNDRV_PCM_FORMAT_S24_LE:
++ /* dma_buffer is the mmapped buffer we are copying pcm from. */
++ hdmi_mmap_copy(runtime->dma_area + offset,
++ priv->sample_align, dst, framecount, priv->channels);
++ break;
++ default:
++ dev_err(dev, "unsupported sample format %s\n",
++ snd_pcm_format_name(priv->format));
++ return;
++ }
++}
++
++static void hdmi_dma_data_copy(struct snd_pcm_substream *substream,
++ struct hdmi_dma_priv *priv, char type)
++{
++ struct snd_pcm_runtime *runtime = substream->runtime;
++ unsigned long offset, count, appl_bytes, space_to_end;
++
++ if (runtime->access != SNDRV_PCM_ACCESS_MMAP_INTERLEAVED)
++ return;
++
++ appl_bytes = frames_to_bytes(runtime, runtime->status->hw_ptr);
++
++ switch (type) {
++ case 'p':
++ offset = (appl_bytes + 2 * priv->period_bytes) % priv->buffer_bytes;
++ count = priv->period_bytes;
++ space_to_end = priv->period_bytes;
++ break;
++ case 'b':
++ offset = appl_bytes % priv->buffer_bytes;
++ count = priv->buffer_bytes;
++ space_to_end = priv->buffer_bytes - offset;
++ break;
++ default:
++ return;
++ }
++
++ if (count <= space_to_end) {
++ hdmi_dma_mmap_copy(substream, offset, count);
++ } else {
++ hdmi_dma_mmap_copy(substream, offset, space_to_end);
++ hdmi_dma_mmap_copy(substream, 0, count - space_to_end);
++ }
++}
++
++static void hdmi_sdma_callback(void *data)
++{
++ struct hdmi_dma_priv *priv = (struct hdmi_dma_priv *)data;
++ struct snd_pcm_substream *substream = priv->substream;
++ struct snd_pcm_runtime *runtime = substream->runtime;
++ unsigned long flags;
++
++ spin_lock_irqsave(&priv->irq_lock, flags);
++
++ if (runtime && runtime->dma_area && priv->tx_active) {
++ priv->offset += priv->period_bytes;
++ priv->offset %= priv->period_bytes * priv->periods;
++
++ /* Copy data by period_bytes */
++ hdmi_dma_data_copy(substream, priv, 'p');
++
++ snd_pcm_period_elapsed(substream);
++ }
++
++ spin_unlock_irqrestore(&priv->irq_lock, flags);
++
++ return;
++}
++
++static int hdmi_dma_set_thrsld_incrtype(struct device *dev, int channels)
++{
++ u8 mask = HDMI_AHB_DMA_CONF0_BURST_MODE | HDMI_AHB_DMA_CONF0_INCR_TYPE_MASK;
++ u8 val = hdmi_readb(HDMI_AHB_DMA_CONF0) & ~mask;
++ int incr_type, threshold;
++
++ switch (hdmi_readb(HDMI_REVISION_ID)) {
++ case 0x0a:
++ incr_type = HDMI_DMA_BURST_INCR4;
++ if (channels == 2)
++ threshold = 126;
++ else
++ threshold = 124;
++ break;
++ case 0x1a:
++ incr_type = HDMI_DMA_BURST_INCR8;
++ threshold = 128;
++ break;
++ default:
++ dev_err(dev, "unknown hdmi controller!\n");
++ return -ENODEV;
++ }
++
++ hdmi_writeb(threshold, HDMI_AHB_DMA_THRSLD);
++
++ switch (incr_type) {
++ case HDMI_DMA_BURST_UNSPECIFIED_LEGNTH:
++ break;
++ case HDMI_DMA_BURST_INCR4:
++ val |= HDMI_AHB_DMA_CONF0_BURST_MODE;
++ break;
++ case HDMI_DMA_BURST_INCR8:
++ val |= HDMI_AHB_DMA_CONF0_BURST_MODE |
++ HDMI_AHB_DMA_CONF0_INCR8;
++ break;
++ case HDMI_DMA_BURST_INCR16:
++ val |= HDMI_AHB_DMA_CONF0_BURST_MODE |
++ HDMI_AHB_DMA_CONF0_INCR16;
++ break;
++ default:
++ dev_err(dev, "invalid increment type: %d!", incr_type);
++ return -EINVAL;
++ }
++
++ hdmi_writeb(val, HDMI_AHB_DMA_CONF0);
++
++ hdmi_audio_debug(dev, HDMI_AHB_DMA_THRSLD);
++
++ return 0;
++}
++
++static int hdmi_dma_configure_dma(struct device *dev, int channels)
++{
++ int ret;
++ static u8 chan_enable[] = { 0x00, 0x03, 0x33, 0x3f, 0xff };
++
++ if (channels <= 0 || channels > 8 || channels % 2 != 0) {
++ dev_err(dev, "unsupported channel number: %d\n", channels);
++ return -EINVAL;
++ }
++
++ hdmi_audio_writeb(AHB_DMA_CONF0, EN_HLOCK, 0x1);
++
++ ret = hdmi_dma_set_thrsld_incrtype(dev, channels);
++ if (ret)
++ return ret;
++
++ hdmi_writeb(chan_enable[channels / 2], HDMI_AHB_DMA_CONF1);
++
++ return 0;
++}
++
++static void hdmi_dma_init_iec_header(void)
++{
++ iec_header.U = 0;
++
++ iec_header.B.consumer = 0; /* Consumer use */
++ iec_header.B.linear_pcm = 0; /* linear pcm audio */
++ iec_header.B.copyright = 1; /* no copyright */
++ iec_header.B.pre_emphasis = 0; /* 2 channels without pre-emphasis */
++ iec_header.B.mode = 0; /* Mode 0 */
++
++ iec_header.B.category_code = 0;
++
++ iec_header.B.source = 2; /* stereo */
++ iec_header.B.channel = 0;
++
++ iec_header.B.sample_freq = 0x02; /* 48 KHz */
++ iec_header.B.clock_acc = 0; /* Level II */
++
++ iec_header.B.word_length = 0x02; /* 16 bits */
++ iec_header.B.org_sample_freq = 0x0D; /* 48 KHz */
++
++ iec_header.B.cgms_a = 0; /* Copying is permitted without restriction */
++}
++
++static int hdmi_dma_update_iec_header(struct snd_pcm_substream *substream)
++{
++ struct snd_soc_pcm_runtime *rtd = substream->private_data;
++ struct snd_pcm_runtime *runtime = substream->runtime;
++ struct hdmi_dma_priv *priv = runtime->private_data;
++ struct device *dev = rtd->platform->dev;
++
++ iec_header.B.source = priv->channels;
++
++ switch (priv->rate) {
++ case 32000:
++ iec_header.B.sample_freq = 0x03;
++ iec_header.B.org_sample_freq = 0x0C;
++ break;
++ case 44100:
++ iec_header.B.sample_freq = 0x00;
++ iec_header.B.org_sample_freq = 0x0F;
++ break;
++ case 48000:
++ iec_header.B.sample_freq = 0x02;
++ iec_header.B.org_sample_freq = 0x0D;
++ break;
++ case 88200:
++ iec_header.B.sample_freq = 0x08;
++ iec_header.B.org_sample_freq = 0x07;
++ break;
++ case 96000:
++ iec_header.B.sample_freq = 0x0A;
++ iec_header.B.org_sample_freq = 0x05;
++ break;
++ case 176400:
++ iec_header.B.sample_freq = 0x0C;
++ iec_header.B.org_sample_freq = 0x03;
++ break;
++ case 192000:
++ iec_header.B.sample_freq = 0x0E;
++ iec_header.B.org_sample_freq = 0x01;
++ break;
++ default:
++ dev_err(dev, "unsupported sample rate\n");
++ return -EFAULT;
++ }
++
++ switch (priv->format) {
++ case SNDRV_PCM_FORMAT_S16_LE:
++ iec_header.B.word_length = 0x02;
++ break;
++ case SNDRV_PCM_FORMAT_S24_LE:
++ iec_header.B.word_length = 0x0b;
++ break;
++ default:
++ return -EFAULT;
++ }
++
++ return 0;
++}
++
++/*
++ * The HDMI block transmits the audio data without adding any of the audio
++ * frame bits. So we have to copy the raw dma data from the ALSA buffer
++ * to the DMA buffer, adding the frame information.
++ */
++static int hdmi_dma_copy(struct snd_pcm_substream *substream, int channel,
++ snd_pcm_uframes_t pos, void __user *buf,
++ snd_pcm_uframes_t frames)
++{
++ struct snd_pcm_runtime *runtime = substream->runtime;
++ struct hdmi_dma_priv *priv = runtime->private_data;
++ unsigned int count = frames_to_bytes(runtime, frames);
++ unsigned int pos_bytes = frames_to_bytes(runtime, pos);
++ int channel_no, pcm_idx, subframe_idx, bits_left, sample_bits, map_sel;
++ u32 pcm_data[8], pcm_temp, *hw_buf, sample_block, inc_mask;
++
++ /* Adding frame info to pcm data from userspace and copy to hw_buffer */
++ hw_buf = (u32 *)(priv->hw_buffer.area + (pos_bytes * priv->buffer_ratio));
++
++ sample_bits = priv->sample_align * 8;
++ sample_block = priv->sample_align * priv->channels;
++
++ if (iec_header.B.linear_pcm == 0) {
++ map_sel = priv->channels / 2;
++ inc_mask = 1 << (priv->channels - 1);
++ } else {
++ map_sel = 0;
++ inc_mask = 0xaa;
++ }
++
++ while (count > 0) {
++ if (copy_from_user(pcm_data, buf, sample_block))
++ return -EFAULT;
++
++ buf += sample_block;
++ count -= sample_block;
++
++ channel_no = pcm_idx = 0;
++ do {
++ pcm_temp = pcm_data[pcm_idx++];
++ bits_left = 32;
++ for (;;) {
++ /* re-map channels */
++ subframe_idx = channel_maps_alsa_cea[map_sel][channel_no];
++
++ /* Save the header info to the audio dma buffer */
++ hw_buf[subframe_idx] = hdmi_dma_add_frame_info(
++ priv, pcm_temp, subframe_idx);
++
++ if (inc_mask & (1 << channel_no)) {
++ if (++priv->frame_idx == 192)
++ priv->frame_idx = 0;
++ }
++
++ channel_no++;
++
++ if (bits_left <= sample_bits)
++ break;
++
++ bits_left -= sample_bits;
++ pcm_temp >>= sample_bits;
++ }
++ } while (channel_no < priv->channels);
++
++ hw_buf += priv->channels;
++ }
++
++ return 0;
++}
++
++static int hdmi_sdma_initbuf(struct device *dev, struct hdmi_dma_priv *priv)
++{
++ struct hdmi_sdma_script *hdmi_sdma_t = priv->hdmi_sdma_t;
++ u32 *head, *tail, i;
++
++ if (!hdmi_sdma_t) {
++ dev_err(dev, "hdmi private addr invalid!!!\n");
++ return -EINVAL;
++ }
++
++ hdmi_sdma_t->control_reg_addr = HDMI_BASE_ADDR + HDMI_AHB_DMA_START;
++ hdmi_sdma_t->status_reg_addr = HDMI_BASE_ADDR + HDMI_IH_AHBDMAAUD_STAT0;
++ hdmi_sdma_t->dma_start_addr = HDMI_BASE_ADDR + HDMI_AHB_DMA_STRADDR0;
++
++ head = &hdmi_sdma_t->buffer[0];
++ tail = &hdmi_sdma_t->buffer[1];
++
++ for (i = 0; i < priv->sdma_params.buffer_num; i++) {
++ *head = priv->hw_buffer.addr + i * priv->period_bytes * priv->buffer_ratio;
++ *tail = *head + priv->dma_period_bytes - 1;
++ head += 2;
++ tail += 2;
++ }
++
++ return 0;
++}
++
++static int hdmi_sdma_config(struct snd_pcm_substream *substream,
++ struct hdmi_dma_priv *priv)
++{
++ struct snd_soc_pcm_runtime *rtd = substream->private_data;
++ struct device *dai_dev = &priv->pdev->dev;
++ struct device *dev = rtd->platform->dev;
++ struct dma_slave_config slave_config;
++ int ret;
++
++ priv->dma_channel = dma_request_slave_channel(dai_dev, "tx");
++ if (priv->dma_channel == NULL) {
++ dev_err(dev, "failed to alloc dma channel\n");
++ return -EBUSY;
++ }
++
++ priv->dma_data.data_addr1 = &priv->sdma_params.buffer_num;
++ priv->dma_data.data_addr2 = &priv->sdma_params.phyaddr;
++ priv->dma_channel->private = &priv->dma_data;
++
++ slave_config.direction = DMA_TRANS_NONE;
++ slave_config.dma_request0 = 0;
++ slave_config.dma_request1 = 0;
++
++ ret = dmaengine_slave_config(priv->dma_channel, &slave_config);
++ if (ret) {
++ dev_err(dev, "failed to config slave dma\n");
++ return -EINVAL;
++ }
++
++ return 0;
++}
++
++static int hdmi_dma_hw_free(struct snd_pcm_substream *substream)
++{
++ struct snd_pcm_runtime *runtime = substream->runtime;
++ struct hdmi_dma_priv *priv = runtime->private_data;
++
++ if (priv->dma_channel) {
++ dma_release_channel(priv->dma_channel);
++ priv->dma_channel = NULL;
++ }
++
++ return 0;
++}
++
++static int hdmi_dma_hw_params(struct snd_pcm_substream *substream,
++ struct snd_pcm_hw_params *params)
++{
++ struct snd_pcm_runtime *runtime = substream->runtime;
++ struct hdmi_dma_priv *priv = runtime->private_data;
++ struct snd_soc_pcm_runtime *rtd = substream->private_data;
++ struct device *dev = rtd->platform->dev;
++ int ret;
++
++ priv->buffer_bytes = params_buffer_bytes(params);
++ priv->periods = params_periods(params);
++ priv->period_bytes = params_period_bytes(params);
++ priv->channels = params_channels(params);
++ priv->format = params_format(params);
++ priv->rate = params_rate(params);
++
++ priv->offset = 0;
++ priv->period_time = HZ / (priv->rate / params_period_size(params));
++
++ switch (priv->format) {
++ case SNDRV_PCM_FORMAT_S16_LE:
++ priv->buffer_ratio = 2;
++ priv->sample_align = 2;
++ priv->sample_bits = 16;
++ break;
++ case SNDRV_PCM_FORMAT_S24_LE:
++ /* 24 bit audio in 32 bit word */
++ priv->buffer_ratio = 1;
++ priv->sample_align = 4;
++ priv->sample_bits = 24;
++ break;
++ default:
++ dev_err(dev, "unsupported sample format: %d\n", priv->format);
++ return -EINVAL;
++ }
++
++ priv->dma_period_bytes = priv->period_bytes * priv->buffer_ratio;
++ priv->sdma_params.buffer_num = priv->periods;
++ priv->sdma_params.phyaddr = priv->phy_hdmi_sdma_t;
++
++ ret = hdmi_sdma_initbuf(dev, priv);
++ if (ret)
++ return ret;
++
++ ret = hdmi_sdma_config(substream, priv);
++ if (ret)
++ return ret;
++
++ snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);
++
++ ret = hdmi_dma_configure_dma(dev, priv->channels);
++ if (ret)
++ return ret;
++
++ hdmi_dma_set_addr(priv->hw_buffer.addr, priv->dma_period_bytes);
++
++ dumppriv(dev, priv);
++
++ hdmi_dma_update_iec_header(substream);
++
++ /* Init par for mmap optimizate */
++ init_table(priv->channels);
++
++ priv->appl_bytes = 0;
++ priv->frame_idx = 0;
++
++ return 0;
++}
++
++static void hdmi_dma_trigger_init(struct snd_pcm_substream *substream,
++ struct hdmi_dma_priv *priv)
++{
++ unsigned long status;
++ bool hbr;
++
++ /*
++ * Set HBR mode (>192kHz IEC-61937 HD audio bitstreaming).
++ * This is done this late because userspace may alter the AESx
++ * parameters until the stream is finally prepared.
++ */
++ hbr = (iec_header.B.linear_pcm != 0 && priv->channels == 8);
++ hdmi_audio_writeb(AHB_DMA_CONF0, HBR, !!hbr);
++
++ /*
++ * Override AES3 - parameter: This is a temporary hack for
++ * callers that provide incorrect information when opening
++ * the device. 0x09 (i.e. 768K) is the only acceptable value.
++ */
++ if (hbr) {
++ iec_header.B.sample_freq = 0x09;
++ iec_header.B.org_sample_freq = 0x00;
++ }
++
++ priv->offset = 0;
++
++ /* Copy data by buffer_bytes */
++ hdmi_dma_data_copy(substream, priv, 'b');
++
++ hdmi_audio_writeb(AHB_DMA_CONF0, SW_FIFO_RST, 0x1);
++
++ /* Delay after reset */
++ udelay(1);
++
++ status = hdmi_readb(HDMI_IH_AHBDMAAUD_STAT0);
++ hdmi_writeb(status, HDMI_IH_AHBDMAAUD_STAT0);
++}
++
++static int hdmi_dma_prepare_and_submit(struct snd_pcm_substream *substream,
++ struct hdmi_dma_priv *priv)
++{
++ struct snd_soc_pcm_runtime *rtd = substream->private_data;
++ struct device *dev = rtd->platform->dev;
++
++ priv->desc = dmaengine_prep_dma_cyclic(priv->dma_channel, 0, 0, 0,
++ DMA_TRANS_NONE, 0);
++ if (!priv->desc) {
++ dev_err(dev, "failed to prepare slave dma\n");
++ return -EINVAL;
++ }
++
++ priv->desc->callback = hdmi_sdma_callback;
++ priv->desc->callback_param = (void *)priv;
++ dmaengine_submit(priv->desc);
++
++ return 0;
++}
++
++static int hdmi_dma_trigger(struct snd_pcm_substream *substream, int cmd)
++{
++ struct snd_pcm_runtime *runtime = substream->runtime;
++ struct snd_soc_pcm_runtime *rtd = substream->private_data;
++ struct hdmi_dma_priv *priv = runtime->private_data;
++ struct device *dev = rtd->platform->dev;
++ int ret;
++
++ switch (cmd) {
++ case SNDRV_PCM_TRIGGER_START:
++ case SNDRV_PCM_TRIGGER_RESUME:
++ if (!check_hdmi_state())
++ return 0;
++ hdmi_dma_trigger_init(substream, priv);
++
++ dumpregs(dev);
++
++ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
++ priv->tx_active = true;
++ hdmi_audio_writeb(AHB_DMA_START, START, 0x1);
++ hdmi_dma_irq_set(false);
++ hdmi_set_dma_mode(1);
++ ret = hdmi_dma_prepare_and_submit(substream, priv);
++ if (ret)
++ return ret;
++ dma_async_issue_pending(priv->desc->chan);
++ break;
++ case SNDRV_PCM_TRIGGER_STOP:
++ case SNDRV_PCM_TRIGGER_SUSPEND:
++ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
++ dmaengine_terminate_all(priv->dma_channel);
++ hdmi_set_dma_mode(0);
++ hdmi_dma_irq_set(true);
++ hdmi_audio_writeb(AHB_DMA_STOP, STOP, 0x1);
++ priv->tx_active = false;
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ return 0;
++}
++
++static snd_pcm_uframes_t hdmi_dma_pointer(struct snd_pcm_substream *substream)
++{
++ struct snd_pcm_runtime *runtime = substream->runtime;
++ struct hdmi_dma_priv *priv = runtime->private_data;
++
++ return bytes_to_frames(runtime, priv->offset);
++}
++
++static struct snd_pcm_hardware snd_imx_hardware = {
++ .info = SNDRV_PCM_INFO_INTERLEAVED |
++ SNDRV_PCM_INFO_BLOCK_TRANSFER |
++ SNDRV_PCM_INFO_MMAP |
++ SNDRV_PCM_INFO_MMAP_VALID |
++ SNDRV_PCM_INFO_PAUSE |
++ SNDRV_PCM_INFO_RESUME,
++ .formats = MXC_HDMI_FORMATS_PLAYBACK,
++ .rate_min = 32000,
++ .channels_min = 2,
++ .channels_max = 8,
++ .buffer_bytes_max = HDMI_PCM_BUF_SIZE,
++ .period_bytes_min = HDMI_DMA_PERIOD_BYTES / 2,
++ .period_bytes_max = HDMI_DMA_PERIOD_BYTES / 2,
++ .periods_min = 8,
++ .periods_max = HDMI_DMA_BUF_SIZE / HDMI_DMA_PERIOD_BYTES,
++ .fifo_size = 0,
++};
++
++static void hdmi_dma_irq_enable(struct hdmi_dma_priv *priv)
++{
++ unsigned long flags;
++
++ hdmi_writeb(0xff, HDMI_AHB_DMA_POL);
++ hdmi_writeb(0xff, HDMI_AHB_DMA_BUFFPOL);
++
++ spin_lock_irqsave(&priv->irq_lock, flags);
++
++ hdmi_writeb(0xff, HDMI_IH_AHBDMAAUD_STAT0);
++ hdmi_writeb(0xff, HDMI_IH_MUTE_AHBDMAAUD_STAT0);
++ hdmi_dma_irq_set(false);
++ hdmi_mask(0);
++
++ spin_unlock_irqrestore(&priv->irq_lock, flags);
++}
++
++static void hdmi_dma_irq_disable(struct hdmi_dma_priv *priv)
++{
++ unsigned long flags;
++
++ spin_lock_irqsave(&priv->irq_lock, flags);
++
++ hdmi_dma_irq_set(true);
++ hdmi_writeb(0x0, HDMI_IH_MUTE_AHBDMAAUD_STAT0);
++ hdmi_writeb(0xff, HDMI_IH_AHBDMAAUD_STAT0);
++ hdmi_mask(1);
++
++ spin_unlock_irqrestore(&priv->irq_lock, flags);
++}
++
++static int hdmi_dma_open(struct snd_pcm_substream *substream)
++{
++ struct snd_pcm_runtime *runtime = substream->runtime;
++ struct snd_soc_pcm_runtime *rtd = substream->private_data;
++ struct device *dev = rtd->platform->dev;
++ struct hdmi_dma_priv *priv = dev_get_drvdata(dev);
++ int ret;
++
++ runtime->private_data = priv;
++
++ ret = mxc_hdmi_register_audio(substream);
++ if (ret < 0) {
++ dev_err(dev, "HDMI Video is not ready!\n");
++ return ret;
++ }
++
++ hdmi_audio_writeb(AHB_DMA_CONF0, SW_FIFO_RST, 0x1);
++
++ ret = snd_pcm_hw_constraint_integer(substream->runtime,
++ SNDRV_PCM_HW_PARAM_PERIODS);
++ if (ret < 0)
++ return ret;
++
++ snd_soc_set_runtime_hwparams(substream, &snd_imx_hardware);
++
++ hdmi_dma_irq_enable(priv);
++
++ return 0;
++}
++
++static int hdmi_dma_close(struct snd_pcm_substream *substream)
++{
++ struct snd_pcm_runtime *runtime = substream->runtime;
++ struct hdmi_dma_priv *priv = runtime->private_data;
++
++ hdmi_dma_irq_disable(priv);
++ mxc_hdmi_unregister_audio(substream);
++
++ return 0;
++}
++
++static struct snd_pcm_ops imx_hdmi_dma_pcm_ops = {
++ .open = hdmi_dma_open,
++ .close = hdmi_dma_close,
++ .ioctl = snd_pcm_lib_ioctl,
++ .hw_params = hdmi_dma_hw_params,
++ .hw_free = hdmi_dma_hw_free,
++ .trigger = hdmi_dma_trigger,
++ .pointer = hdmi_dma_pointer,
++ .copy = hdmi_dma_copy,
++};
++
++static int imx_hdmi_dma_pcm_new(struct snd_soc_pcm_runtime *rtd)
++{
++ struct hdmi_dma_priv *priv = dev_get_drvdata(rtd->platform->dev);
++ struct snd_card *card = rtd->card->snd_card;
++ struct snd_pcm_substream *substream;
++ struct snd_pcm *pcm = rtd->pcm;
++ u64 dma_mask = DMA_BIT_MASK(32);
++ int ret = 0;
++
++ if (!card->dev->dma_mask)
++ card->dev->dma_mask = &dma_mask;
++ if (!card->dev->coherent_dma_mask)
++ card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
++
++ substream = pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream;
++
++ ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, pcm->card->dev,
++ HDMI_PCM_BUF_SIZE, &substream->dma_buffer);
++ if (ret) {
++ dev_err(card->dev, "failed to alloc playback dma buffer\n");
++ return ret;
++ }
++
++ priv->substream = substream;
++
++ /* Alloc the hw_buffer */
++ ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, pcm->card->dev,
++ HDMI_DMA_BUF_SIZE, &priv->hw_buffer);
++ if (ret) {
++ dev_err(card->dev, "failed to alloc hw dma buffer\n");
++ return ret;
++ }
++
++ return ret;
++}
++
++static void imx_hdmi_dma_pcm_free(struct snd_pcm *pcm)
++{
++ int stream = SNDRV_PCM_STREAM_PLAYBACK;
++ struct snd_pcm_substream *substream = pcm->streams[stream].substream;
++ struct snd_soc_pcm_runtime *rtd = pcm->private_data;
++ struct hdmi_dma_priv *priv = dev_get_drvdata(rtd->platform->dev);
++
++ if (substream) {
++ snd_dma_free_pages(&substream->dma_buffer);
++ substream->dma_buffer.area = NULL;
++ substream->dma_buffer.addr = 0;
++ }
++
++ /* Free the hw_buffer */
++ snd_dma_free_pages(&priv->hw_buffer);
++ priv->hw_buffer.area = NULL;
++ priv->hw_buffer.addr = 0;
++}
++
++static struct snd_soc_platform_driver imx_hdmi_platform = {
++ .ops = &imx_hdmi_dma_pcm_ops,
++ .pcm_new = imx_hdmi_dma_pcm_new,
++ .pcm_free = imx_hdmi_dma_pcm_free,
++};
++
++static int imx_soc_platform_probe(struct platform_device *pdev)
++{
++ struct imx_hdmi *hdmi_drvdata = platform_get_drvdata(pdev);
++ struct hdmi_dma_priv *priv;
++ int ret = 0;
++
++ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
++ if (!priv) {
++ dev_err(&pdev->dev, "Failed to alloc hdmi_dma\n");
++ return -ENOMEM;
++ }
++
++ priv->hdmi_sdma_t = dma_alloc_coherent(NULL,
++ sizeof(struct hdmi_sdma_script),
++ &priv->phy_hdmi_sdma_t, GFP_KERNEL);
++ if (!priv->hdmi_sdma_t) {
++ dev_err(&pdev->dev, "Failed to alloc hdmi_sdma_t\n");
++ return -ENOMEM;
++ }
++
++ priv->tx_active = false;
++ spin_lock_init(&priv->irq_lock);
++
++ priv->pdev = hdmi_drvdata->pdev;
++
++ hdmi_dma_init_iec_header();
++
++ dev_set_drvdata(&pdev->dev, priv);
++
++ switch (hdmi_readb(HDMI_REVISION_ID)) {
++ case 0x0a:
++ snd_imx_hardware.period_bytes_max = HDMI_DMA_PERIOD_BYTES / 4;
++ snd_imx_hardware.period_bytes_min = HDMI_DMA_PERIOD_BYTES / 4;
++ snd_imx_hardware.periods_max = HDMI_DMA_BUF_SIZE / (HDMI_DMA_PERIOD_BYTES / 2);
++ break;
++ default:
++ break;
++ }
++
++ ret = snd_soc_register_platform(&pdev->dev, &imx_hdmi_platform);
++ if (ret)
++ goto err_plat;
++
++ return 0;
++
++err_plat:
++ dma_free_coherent(NULL, sizeof(struct hdmi_sdma_script),
++ priv->hdmi_sdma_t, priv->phy_hdmi_sdma_t);
++
++ return ret;
++}
++
++static int imx_soc_platform_remove(struct platform_device *pdev)
++{
++ struct hdmi_dma_priv *priv = dev_get_drvdata(&pdev->dev);
++
++ dma_free_coherent(NULL, sizeof(struct hdmi_sdma_script),
++ priv->hdmi_sdma_t, priv->phy_hdmi_sdma_t);
++
++ snd_soc_unregister_platform(&pdev->dev);
++
++ return 0;
++}
++
++static struct platform_driver imx_hdmi_dma_driver = {
++ .driver = {
++ .name = "imx-hdmi-audio",
++ .owner = THIS_MODULE,
++ },
++ .probe = imx_soc_platform_probe,
++ .remove = imx_soc_platform_remove,
++};
++
++module_platform_driver(imx_hdmi_dma_driver);
++
++MODULE_AUTHOR("Freescale Semiconductor, Inc.");
++MODULE_DESCRIPTION("i.MX HDMI audio DMA");
++MODULE_LICENSE("GPL");
+diff -Nur linux-3.14.36/sound/soc/fsl/imx-hdmi.h linux-openelec/sound/soc/fsl/imx-hdmi.h
+--- linux-3.14.36/sound/soc/fsl/imx-hdmi.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-openelec/sound/soc/fsl/imx-hdmi.h 2015-05-06 12:05:43.000000000 -0500
+@@ -0,0 +1,105 @@
++/*
++ * Copyright (C) 2011-2013 Freescale Semiconductor, Inc.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
++ */
++
++#ifndef __IMX_HDMI_H
++#define __IMX_HDMI_H
++
++struct imx_hdmi_sdma_params {
++ dma_addr_t phyaddr;
++ u32 buffer_num;
++ int dma;
++};
++
++struct imx_hdmi {
++ struct snd_soc_dai_driver cpu_dai_drv;
++ struct platform_device *codec_dev;
++ struct platform_device *dma_dev;
++ struct platform_device *pdev;
++ struct clk *isfr_clk;
++ struct clk *iahb_clk;
++};
++
++#define HDMI_MAX_RATES 7
++#define HDMI_MAX_SAMPLE_SIZE 3
++#define HDMI_MAX_CHANNEL_CONSTRAINTS 4
++
++#define MXC_HDMI_RATES_PLAYBACK \
++ (SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000 | \
++ SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000 | \
++ SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_192000)
++
++#define MXC_HDMI_FORMATS_PLAYBACK \
++ (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE)
++
++union hdmi_audio_header_t {
++ uint64_t U;
++ struct {
++ unsigned consumer:1;
++ unsigned linear_pcm:1;
++ unsigned copyright:1;
++ unsigned pre_emphasis:3;
++ unsigned mode:2;
++
++ unsigned category_code:8;
++
++ unsigned source:4;
++ unsigned channel:4;
++
++ unsigned sample_freq:4;
++ unsigned clock_acc:2;
++ unsigned reserved0:2;
++
++ unsigned word_length:4;
++ unsigned org_sample_freq:4;
++
++ unsigned cgms_a:2;
++ unsigned reserved1:6;
++
++ unsigned reserved2:8;
++
++ unsigned reserved3:8;
++ } B;
++ unsigned char status[8];
++};
++
++union hdmi_audio_dma_data_t {
++ uint32_t U;
++ struct {
++ unsigned data:24;
++ unsigned v:1;
++ unsigned u:1;
++ unsigned c:1;
++ unsigned p:1;
++ unsigned b:1;
++ unsigned reserved:3;
++ } B;
++};
++
++extern union hdmi_audio_header_t iec_header;
++
++#define hdmi_audio_writeb(reg, bit, val) \
++ do { \
++ hdmi_mask_writeb(val, HDMI_ ## reg, \
++ HDMI_ ## reg ## _ ## bit ## _OFFSET, \
++ HDMI_ ## reg ## _ ## bit ## _MASK); \
++ pr_debug("Set reg: HDMI_" #reg " (0x%x) "\
++ "bit: HDMI_" #reg "_" #bit " (%d) to val: %x\n", \
++ HDMI_ ## reg, HDMI_ ## reg ## _ ## bit ## _OFFSET, val); \
++ } while (0)
++
++#endif /* __IMX_HDMI_H */
+diff -Nur linux-3.14.36/sound/soc/fsl/imx-pcm-dma.c linux-openelec/sound/soc/fsl/imx-pcm-dma.c
+--- linux-3.14.36/sound/soc/fsl/imx-pcm-dma.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/sound/soc/fsl/imx-pcm-dma.c 2015-05-06 12:05:43.000000000 -0500
+@@ -11,6 +11,10 @@
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
++#include <linux/init.h>
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/device.h>
+ #include <linux/platform_device.h>
+ #include <linux/dmaengine.h>
+ #include <linux/types.h>
+@@ -20,6 +24,7 @@
+ #include <sound/pcm.h>
+ #include <sound/soc.h>
+ #include <sound/dmaengine_pcm.h>
++#include <linux/platform_data/dma-imx.h>
+
+ #include "imx-pcm.h"
+
+@@ -40,28 +45,97 @@
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_PAUSE |
+ SNDRV_PCM_INFO_RESUME,
+- .formats = SNDRV_PCM_FMTBIT_S16_LE,
+- .buffer_bytes_max = IMX_SSI_DMABUF_SIZE,
++ .formats = SNDRV_PCM_FMTBIT_S16_LE |
++ SNDRV_PCM_FMTBIT_S24_LE |
++ SNDRV_PCM_FMTBIT_S20_3LE,
++ .buffer_bytes_max = IMX_DEFAULT_DMABUF_SIZE,
+ .period_bytes_min = 128,
+ .period_bytes_max = 65535, /* Limited by SDMA engine */
+- .periods_min = 2,
++ .periods_min = 4,
+ .periods_max = 255,
+ .fifo_size = 0,
+ };
+
++static void imx_pcm_dma_set_config_from_dai_data(
++ const struct snd_pcm_substream *substream,
++ const struct snd_dmaengine_dai_dma_data *dma_data,
++ struct dma_slave_config *slave_config)
++{
++ struct imx_dma_data *filter_data = dma_data->filter_data;
++
++ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
++ slave_config->dst_addr = dma_data->addr;
++ slave_config->dst_maxburst = dma_data->maxburst;
++ if (dma_data->addr_width != DMA_SLAVE_BUSWIDTH_UNDEFINED)
++ slave_config->dst_addr_width = dma_data->addr_width;
++ } else {
++ slave_config->src_addr = dma_data->addr;
++ slave_config->src_maxburst = dma_data->maxburst;
++ if (dma_data->addr_width != DMA_SLAVE_BUSWIDTH_UNDEFINED)
++ slave_config->src_addr_width = dma_data->addr_width;
++ }
++
++ slave_config->slave_id = dma_data->slave_id;
++
++ /*
++ * In dma binding mode, there is no filter_data, so dma_request need to be
++ * set to zero.
++ */
++ if (filter_data) {
++ slave_config->dma_request0 = filter_data->dma_request0;
++ slave_config->dma_request1 = filter_data->dma_request1;
++ } else {
++ slave_config->dma_request0 = 0;
++ slave_config->dma_request1 = 0;
++ }
++}
++
++static int imx_pcm_dma_prepare_slave_config(struct snd_pcm_substream *substream,
++ struct snd_pcm_hw_params *params, struct dma_slave_config *slave_config)
++{
++ struct snd_soc_pcm_runtime *rtd = substream->private_data;
++ struct snd_dmaengine_dai_dma_data *dma_data;
++ int ret;
++
++ dma_data = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream);
++
++ ret = snd_hwparams_to_dma_slave_config(substream, params, slave_config);
++ if (ret)
++ return ret;
++
++ imx_pcm_dma_set_config_from_dai_data(substream, dma_data,
++ slave_config);
++
++ return 0;
++}
++
+ static const struct snd_dmaengine_pcm_config imx_dmaengine_pcm_config = {
+ .pcm_hardware = &imx_pcm_hardware,
+- .prepare_slave_config = snd_dmaengine_pcm_prepare_slave_config,
++ .prepare_slave_config = imx_pcm_dma_prepare_slave_config,
+ .compat_filter_fn = filter,
+- .prealloc_buffer_size = IMX_SSI_DMABUF_SIZE,
++ .prealloc_buffer_size = IMX_DEFAULT_DMABUF_SIZE,
+ };
+
+-int imx_pcm_dma_init(struct platform_device *pdev)
++int imx_pcm_dma_init(struct platform_device *pdev, unsigned int flags, size_t size)
+ {
+- return devm_snd_dmaengine_pcm_register(&pdev->dev,
+- &imx_dmaengine_pcm_config,
+- SND_DMAENGINE_PCM_FLAG_NO_RESIDUE |
+- SND_DMAENGINE_PCM_FLAG_COMPAT);
++ struct snd_dmaengine_pcm_config *config;
++ struct snd_pcm_hardware *pcm_hardware;
++
++ config = devm_kzalloc(&pdev->dev,
++ sizeof(struct snd_dmaengine_pcm_config), GFP_KERNEL);
++ *config = imx_dmaengine_pcm_config;
++ if (size)
++ config->prealloc_buffer_size = size;
++
++ pcm_hardware = devm_kzalloc(&pdev->dev,
++ sizeof(struct snd_pcm_hardware), GFP_KERNEL);
++ *pcm_hardware = imx_pcm_hardware;
++ if (size)
++ pcm_hardware->buffer_bytes_max = size;
++
++ config->pcm_hardware = pcm_hardware;
++
++ return devm_snd_dmaengine_pcm_register(&pdev->dev, config, flags);
+ }
+ EXPORT_SYMBOL_GPL(imx_pcm_dma_init);
+
+diff -Nur linux-3.14.36/sound/soc/fsl/imx-pcm.h linux-openelec/sound/soc/fsl/imx-pcm.h
+--- linux-3.14.36/sound/soc/fsl/imx-pcm.h 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/sound/soc/fsl/imx-pcm.h 2015-05-06 12:05:43.000000000 -0500
+@@ -18,13 +18,17 @@
+ /*
+ * Do not change this as the FIQ handler depends on this size
+ */
++#define IMX_DEFAULT_DMABUF_SIZE (256 * 1024)
+ #define IMX_SSI_DMABUF_SIZE (64 * 1024)
++#define IMX_SPDIF_DMABUF_SIZE (64 * 1024)
++#define IMX_ESAI_DMABUF_SIZE (256 * 1024)
++#define IMX_ASRC_DMABUF_SIZE (256 * 1024)
+
+ static inline void
+ imx_pcm_dma_params_init_data(struct imx_dma_data *dma_data,
+ int dma, enum sdma_peripheral_type peripheral_type)
+ {
+- dma_data->dma_request = dma;
++ dma_data->dma_request0 = dma;
+ dma_data->priority = DMA_PRIO_HIGH;
+ dma_data->peripheral_type = peripheral_type;
+ }
+@@ -39,9 +43,10 @@
+ };
+
+ #if IS_ENABLED(CONFIG_SND_SOC_IMX_PCM_DMA)
+-int imx_pcm_dma_init(struct platform_device *pdev);
++int imx_pcm_dma_init(struct platform_device *pdev, unsigned int flags, size_t size);
+ #else
+-static inline int imx_pcm_dma_init(struct platform_device *pdev)
++static inline int imx_pcm_dma_init(struct platform_device *pdev,
++ unsigned int flags, size_t size)
+ {
+ return -ENODEV;
+ }
+diff -Nur linux-3.14.36/sound/soc/fsl/imx-spdif.c linux-openelec/sound/soc/fsl/imx-spdif.c
+--- linux-3.14.36/sound/soc/fsl/imx-spdif.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/sound/soc/fsl/imx-spdif.c 2015-05-06 12:05:43.000000000 -0500
+@@ -65,14 +65,15 @@
+ if (ret)
+ goto end;
+
++ platform_set_drvdata(pdev, &data->card);
++ snd_soc_card_set_drvdata(&data->card, data);
++
+ ret = devm_snd_soc_register_card(&pdev->dev, &data->card);
+ if (ret) {
+ dev_err(&pdev->dev, "snd_soc_register_card failed: %d\n", ret);
+ goto end;
+ }
+
+- platform_set_drvdata(pdev, data);
+-
+ end:
+ if (spdif_np)
+ of_node_put(spdif_np);
+@@ -90,6 +91,7 @@
+ .driver = {
+ .name = "imx-spdif",
+ .owner = THIS_MODULE,
++ .pm = &snd_soc_pm_ops,
+ .of_match_table = imx_spdif_dt_ids,
+ },
+ .probe = imx_spdif_audio_probe,
+diff -Nur linux-3.14.36/sound/soc/fsl/imx-ssi.c linux-openelec/sound/soc/fsl/imx-ssi.c
+--- linux-3.14.36/sound/soc/fsl/imx-ssi.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/sound/soc/fsl/imx-ssi.c 2015-05-06 12:05:43.000000000 -0500
+@@ -602,7 +602,8 @@
+ ssi->fiq_params.dma_params_tx = &ssi->dma_params_tx;
+
+ ssi->fiq_init = imx_pcm_fiq_init(pdev, &ssi->fiq_params);
+- ssi->dma_init = imx_pcm_dma_init(pdev);
++ ssi->dma_init = imx_pcm_dma_init(pdev, SND_DMAENGINE_PCM_FLAG_NO_RESIDUE,
++ IMX_SSI_DMABUF_SIZE);
+
+ if (ssi->fiq_init && ssi->dma_init) {
+ ret = ssi->fiq_init;
+diff -Nur linux-3.14.36/sound/soc/fsl/imx-wm8962.c linux-openelec/sound/soc/fsl/imx-wm8962.c
+--- linux-3.14.36/sound/soc/fsl/imx-wm8962.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/sound/soc/fsl/imx-wm8962.c 2015-05-06 12:05:43.000000000 -0500
+@@ -1,9 +1,9 @@
+ /*
+- * Copyright 2013 Freescale Semiconductor, Inc.
++ * Copyright (C) 2013 Freescale Semiconductor, Inc.
+ *
+ * Based on imx-sgtl5000.c
+- * Copyright 2012 Freescale Semiconductor, Inc.
+- * Copyright 2012 Linaro Ltd.
++ * Copyright (C) 2012 Freescale Semiconductor, Inc.
++ * Copyright (C) 2012 Linaro Ltd.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+@@ -16,9 +16,12 @@
+ #include <linux/module.h>
+ #include <linux/of_platform.h>
+ #include <linux/i2c.h>
++#include <linux/of_gpio.h>
+ #include <linux/slab.h>
++#include <linux/gpio.h>
+ #include <linux/clk.h>
+ #include <sound/soc.h>
++#include <sound/jack.h>
+ #include <sound/pcm_params.h>
+ #include <sound/soc-dapm.h>
+ #include <linux/pinctrl/consumer.h>
+@@ -33,15 +36,134 @@
+ struct snd_soc_card card;
+ char codec_dai_name[DAI_NAME_SIZE];
+ char platform_name[DAI_NAME_SIZE];
+- struct clk *codec_clk;
+ unsigned int clk_frequency;
+ };
+
+ struct imx_priv {
++ int hp_gpio;
++ int hp_active_low;
++ int mic_gpio;
++ int mic_active_low;
++ bool amic_mono;
++ bool dmic_mono;
++ struct snd_soc_codec *codec;
+ struct platform_device *pdev;
++ struct snd_pcm_substream *first_stream;
++ struct snd_pcm_substream *second_stream;
+ };
+ static struct imx_priv card_priv;
+
++static struct snd_soc_jack imx_hp_jack;
++static struct snd_soc_jack_pin imx_hp_jack_pins[] = {
++ {
++ .pin = "Headphone Jack",
++ .mask = SND_JACK_HEADPHONE,
++ },
++};
++static struct snd_soc_jack_gpio imx_hp_jack_gpio = {
++ .name = "headphone detect",
++ .report = SND_JACK_HEADPHONE,
++ .debounce_time = 250,
++ .invert = 0,
++};
++
++static struct snd_soc_jack imx_mic_jack;
++static struct snd_soc_jack_pin imx_mic_jack_pins[] = {
++ {
++ .pin = "AMIC",
++ .mask = SND_JACK_MICROPHONE,
++ },
++};
++static struct snd_soc_jack_gpio imx_mic_jack_gpio = {
++ .name = "microphone detect",
++ .report = SND_JACK_MICROPHONE,
++ .debounce_time = 250,
++ .invert = 0,
++};
++
++static int hpjack_status_check(void)
++{
++ struct imx_priv *priv = &card_priv;
++ struct platform_device *pdev = priv->pdev;
++ char *envp[3], *buf;
++ int hp_status, ret;
++
++ if (!gpio_is_valid(priv->hp_gpio))
++ return 0;
++
++ hp_status = gpio_get_value(priv->hp_gpio) ? 1 : 0;
++
++ buf = kmalloc(32, GFP_ATOMIC);
++ if (!buf) {
++ dev_err(&pdev->dev, "%s kmalloc failed\n", __func__);
++ return -ENOMEM;
++ }
++
++ if (hp_status != priv->hp_active_low) {
++ snprintf(buf, 32, "STATE=%d", 2);
++ snd_soc_dapm_disable_pin(&priv->codec->dapm, "Ext Spk");
++ ret = imx_hp_jack_gpio.report;
++ } else {
++ snprintf(buf, 32, "STATE=%d", 0);
++ snd_soc_dapm_enable_pin(&priv->codec->dapm, "Ext Spk");
++ ret = 0;
++ }
++
++ envp[0] = "NAME=headphone";
++ envp[1] = buf;
++ envp[2] = NULL;
++ kobject_uevent_env(&pdev->dev.kobj, KOBJ_CHANGE, envp);
++ kfree(buf);
++
++ return ret;
++}
++
++static int micjack_status_check(void)
++{
++ struct imx_priv *priv = &card_priv;
++ struct platform_device *pdev = priv->pdev;
++ char *envp[3], *buf;
++ int mic_status, ret;
++
++ if (!gpio_is_valid(priv->mic_gpio))
++ return 0;
++
++ mic_status = gpio_get_value(priv->mic_gpio) ? 1 : 0;
++
++ if ((mic_status != priv->mic_active_low && priv->amic_mono)
++ || (mic_status == priv->mic_active_low && priv->dmic_mono))
++ snd_soc_update_bits(priv->codec, WM8962_THREED1,
++ WM8962_ADC_MONOMIX_MASK, WM8962_ADC_MONOMIX);
++ else
++ snd_soc_update_bits(priv->codec, WM8962_THREED1,
++ WM8962_ADC_MONOMIX_MASK, 0);
++
++ buf = kmalloc(32, GFP_ATOMIC);
++ if (!buf) {
++ dev_err(&pdev->dev, "%s kmalloc failed\n", __func__);
++ return -ENOMEM;
++ }
++
++ if (mic_status != priv->mic_active_low) {
++ snprintf(buf, 32, "STATE=%d", 2);
++ snd_soc_dapm_disable_pin(&priv->codec->dapm, "DMIC");
++ ret = imx_mic_jack_gpio.report;
++ } else {
++ snprintf(buf, 32, "STATE=%d", 0);
++ snd_soc_dapm_enable_pin(&priv->codec->dapm, "DMIC");
++ ret = 0;
++ }
++
++ envp[0] = "NAME=microphone";
++ envp[1] = buf;
++ envp[2] = NULL;
++ kobject_uevent_env(&pdev->dev.kobj, KOBJ_CHANGE, envp);
++ kfree(buf);
++
++ return ret;
++}
++
++
+ static const struct snd_soc_dapm_widget imx_wm8962_dapm_widgets[] = {
+ SND_SOC_DAPM_HP("Headphone Jack", NULL),
+ SND_SOC_DAPM_SPK("Ext Spk", NULL),
+@@ -49,14 +171,57 @@
+ SND_SOC_DAPM_MIC("DMIC", NULL),
+ };
+
+-static int sample_rate = 44100;
+-static snd_pcm_format_t sample_format = SNDRV_PCM_FORMAT_S16_LE;
+-
+ static int imx_hifi_hw_params(struct snd_pcm_substream *substream,
+- struct snd_pcm_hw_params *params)
++ struct snd_pcm_hw_params *params)
+ {
+- sample_rate = params_rate(params);
+- sample_format = params_format(params);
++ struct snd_soc_pcm_runtime *rtd = substream->private_data;
++ struct snd_soc_dai *codec_dai = rtd->codec_dai;
++ struct imx_priv *priv = &card_priv;
++ struct device *dev = &priv->pdev->dev;
++ struct snd_soc_card *card = codec_dai->codec->card;
++ struct imx_wm8962_data *data = snd_soc_card_get_drvdata(card);
++ unsigned int sample_rate = params_rate(params);
++ snd_pcm_format_t sample_format = params_format(params);
++ u32 dai_format, pll_out;
++ int ret = 0;
++
++ if (!priv->first_stream) {
++ priv->first_stream = substream;
++ } else {
++ priv->second_stream = substream;
++
++ /* We suppose the two substream are using same params */
++ return 0;
++ }
++
++ dai_format = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
++ SND_SOC_DAIFMT_CBM_CFM;
++
++ /* set codec DAI configuration */
++ ret = snd_soc_dai_set_fmt(codec_dai, dai_format);
++ if (ret) {
++ dev_err(dev, "failed to set codec dai fmt: %d\n", ret);
++ return ret;
++ }
++
++ if (sample_format == SNDRV_PCM_FORMAT_S24_LE)
++ pll_out = sample_rate * 384;
++ else
++ pll_out = sample_rate * 256;
++
++ ret = snd_soc_dai_set_pll(codec_dai, WM8962_FLL, WM8962_FLL_MCLK,
++ data->clk_frequency, pll_out);
++ if (ret) {
++ dev_err(dev, "failed to start FLL: %d\n", ret);
++ return ret;
++ }
++
++ ret = snd_soc_dai_set_sysclk(codec_dai, WM8962_SYSCLK_FLL,
++ pll_out, SND_SOC_CLOCK_IN);
++ if (ret) {
++ dev_err(dev, "failed to set SYSCLK: %d\n", ret);
++ return ret;
++ }
+
+ return 0;
+ }
+@@ -133,6 +298,89 @@
+ return 0;
+ }
+
++static int imx_wm8962_gpio_init(struct snd_soc_pcm_runtime *rtd)
++{
++ struct snd_soc_codec *codec = rtd->codec;
++ struct imx_priv *priv = &card_priv;
++
++ priv->codec = codec;
++
++ if (gpio_is_valid(priv->hp_gpio)) {
++ imx_hp_jack_gpio.gpio = priv->hp_gpio;
++ imx_hp_jack_gpio.jack_status_check = hpjack_status_check;
++
++ snd_soc_jack_new(codec, "Headphone Jack", SND_JACK_HEADPHONE, &imx_hp_jack);
++ snd_soc_jack_add_pins(&imx_hp_jack,
++ ARRAY_SIZE(imx_hp_jack_pins), imx_hp_jack_pins);
++ snd_soc_jack_add_gpios(&imx_hp_jack, 1, &imx_hp_jack_gpio);
++ }
++
++ if (gpio_is_valid(priv->mic_gpio)) {
++ imx_mic_jack_gpio.gpio = priv->mic_gpio;
++ imx_mic_jack_gpio.jack_status_check = micjack_status_check;
++
++ snd_soc_jack_new(codec, "AMIC", SND_JACK_MICROPHONE, &imx_mic_jack);
++ snd_soc_jack_add_pins(&imx_mic_jack,
++ ARRAY_SIZE(imx_mic_jack_pins), imx_mic_jack_pins);
++ snd_soc_jack_add_gpios(&imx_mic_jack, 1, &imx_mic_jack_gpio);
++ } else if (priv->amic_mono || priv->dmic_mono) {
++ /*
++ * Permanent set monomix bit if only one microphone
++ * is present on the board while it needs monomix.
++ */
++ snd_soc_update_bits(priv->codec, WM8962_THREED1,
++ WM8962_ADC_MONOMIX_MASK, WM8962_ADC_MONOMIX);
++ }
++
++ return 0;
++}
++
++static ssize_t show_headphone(struct device_driver *dev, char *buf)
++{
++ struct imx_priv *priv = &card_priv;
++ int hp_status;
++
++ if (!gpio_is_valid(priv->hp_gpio)) {
++ strcpy(buf, "no detect gpio connected\n");
++ return strlen(buf);
++ }
++
++ /* Check if headphone is plugged in */
++ hp_status = gpio_get_value(priv->hp_gpio) ? 1 : 0;
++
++ if (hp_status != priv->hp_active_low)
++ strcpy(buf, "headphone\n");
++ else
++ strcpy(buf, "speaker\n");
++
++ return strlen(buf);
++}
++
++static DRIVER_ATTR(headphone, S_IRUGO | S_IWUSR, show_headphone, NULL);
++
++static ssize_t show_mic(struct device_driver *dev, char *buf)
++{
++ struct imx_priv *priv = &card_priv;
++ int mic_status;
++
++ if (!gpio_is_valid(priv->mic_gpio)) {
++ strcpy(buf, "no detect gpio connected\n");
++ return strlen(buf);
++ }
++
++ /* Check if analog microphone is plugged in */
++ mic_status = gpio_get_value(priv->mic_gpio) ? 1 : 0;
++
++ if (mic_status != priv->mic_active_low)
++ strcpy(buf, "amic\n");
++ else
++ strcpy(buf, "dmic\n");
++
++ return strlen(buf);
++}
++
++static DRIVER_ATTR(microphone, S_IRUGO | S_IWUSR, show_mic, NULL);
++
+ static int imx_wm8962_late_probe(struct snd_soc_card *card)
+ {
+ struct snd_soc_dai *codec_dai = card->rtd[0].codec_dai;
+@@ -157,6 +405,7 @@
+ struct imx_priv *priv = &card_priv;
+ struct i2c_client *codec_dev;
+ struct imx_wm8962_data *data;
++ struct clk *codec_clk = NULL;
+ int int_port, ext_port;
+ int ret;
+
+@@ -219,25 +468,31 @@
+ goto fail;
+ }
+
++ priv->first_stream = NULL;
++ priv->second_stream = NULL;
++
+ data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+ if (!data) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+- data->codec_clk = devm_clk_get(&codec_dev->dev, NULL);
+- if (IS_ERR(data->codec_clk)) {
+- ret = PTR_ERR(data->codec_clk);
++ codec_clk = devm_clk_get(&codec_dev->dev, NULL);
++ if (IS_ERR(codec_clk)) {
++ ret = PTR_ERR(codec_clk);
+ dev_err(&codec_dev->dev, "failed to get codec clk: %d\n", ret);
+ goto fail;
+ }
+
+- data->clk_frequency = clk_get_rate(data->codec_clk);
+- ret = clk_prepare_enable(data->codec_clk);
+- if (ret) {
+- dev_err(&codec_dev->dev, "failed to enable codec clk: %d\n", ret);
+- goto fail;
+- }
++ data->clk_frequency = clk_get_rate(codec_clk);
++
++ priv->amic_mono = of_property_read_bool(codec_np, "amic-mono");
++ priv->dmic_mono = of_property_read_bool(codec_np, "dmic-mono");
++
++ priv->hp_gpio = of_get_named_gpio_flags(np, "hp-det-gpios", 0,
++ (enum of_gpio_flags *)&priv->hp_active_low);
++ priv->mic_gpio = of_get_named_gpio_flags(np, "mic-det-gpios", 0,
++ (enum of_gpio_flags *)&priv->mic_active_low);
+
+ data->dai.name = "HiFi";
+ data->dai.stream_name = "HiFi";
+@@ -246,23 +501,23 @@
+ data->dai.cpu_dai_name = dev_name(&ssi_pdev->dev);
+ data->dai.platform_of_node = ssi_np;
+ data->dai.ops = &imx_hifi_ops;
++ data->dai.init = &imx_wm8962_gpio_init;
+ data->dai.dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
+ SND_SOC_DAIFMT_CBM_CFM;
+
+ data->card.dev = &pdev->dev;
+ ret = snd_soc_of_parse_card_name(&data->card, "model");
+ if (ret)
+- goto clk_fail;
++ goto fail;
+ ret = snd_soc_of_parse_audio_routing(&data->card, "audio-routing");
+ if (ret)
+- goto clk_fail;
++ goto fail;
+ data->card.num_links = 1;
+ data->card.dai_link = &data->dai;
+ data->card.dapm_widgets = imx_wm8962_dapm_widgets;
+ data->card.num_dapm_widgets = ARRAY_SIZE(imx_wm8962_dapm_widgets);
+
+ data->card.late_probe = imx_wm8962_late_probe;
+- data->card.set_bias_level = imx_wm8962_set_bias_level;
+
+ platform_set_drvdata(pdev, &data->card);
+ snd_soc_card_set_drvdata(&data->card, data);
+@@ -270,16 +525,31 @@
+ ret = devm_snd_soc_register_card(&pdev->dev, &data->card);
+ if (ret) {
+ dev_err(&pdev->dev, "snd_soc_register_card failed (%d)\n", ret);
+- goto clk_fail;
++ goto fail;
+ }
+
+- of_node_put(ssi_np);
+- of_node_put(codec_np);
++ if (gpio_is_valid(priv->hp_gpio)) {
++ ret = driver_create_file(pdev->dev.driver, &driver_attr_headphone);
++ if (ret) {
++ dev_err(&pdev->dev, "create hp attr failed (%d)\n", ret);
++ goto fail_hp;
++ }
++ }
+
+- return 0;
++ if (gpio_is_valid(priv->mic_gpio)) {
++ ret = driver_create_file(pdev->dev.driver, &driver_attr_microphone);
++ if (ret) {
++ dev_err(&pdev->dev, "create mic attr failed (%d)\n", ret);
++ goto fail_mic;
++ }
++ }
++
++ goto fail;
+
+-clk_fail:
+- clk_disable_unprepare(data->codec_clk);
++fail_mic:
++ driver_remove_file(pdev->dev.driver, &driver_attr_headphone);
++fail_hp:
++ snd_soc_unregister_card(&data->card);
+ fail:
+ if (ssi_np)
+ of_node_put(ssi_np);
+@@ -291,11 +561,8 @@
+
+ static int imx_wm8962_remove(struct platform_device *pdev)
+ {
+- struct snd_soc_card *card = platform_get_drvdata(pdev);
+- struct imx_wm8962_data *data = snd_soc_card_get_drvdata(card);
+-
+- if (!IS_ERR(data->codec_clk))
+- clk_disable_unprepare(data->codec_clk);
++ driver_remove_file(pdev->dev.driver, &driver_attr_microphone);
++ driver_remove_file(pdev->dev.driver, &driver_attr_headphone);
+
+ return 0;
+ }
+diff -Nur linux-3.14.36/sound/soc/fsl/Kconfig linux-openelec/sound/soc/fsl/Kconfig
+--- linux-3.14.36/sound/soc/fsl/Kconfig 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/sound/soc/fsl/Kconfig 2015-07-24 18:03:30.304842002 -0500
+@@ -11,6 +11,12 @@
+ config SND_SOC_FSL_ESAI
+ tristate
+
++config SND_SOC_FSL_ASRC
++ tristate
++
++config SND_SOC_FSL_HDMI
++ tristate
++
+ config SND_SOC_FSL_UTILS
+ tristate
+
+@@ -126,6 +132,11 @@
+ tristate
+ select SND_SOC_GENERIC_DMAENGINE_PCM
+
++config SND_SOC_IMX_HDMI_DMA
++ bool
++ select SND_SOC_GENERIC_DMAENGINE_PCM
++ select SND_SOC_IMX_PCM_DMA
++
+ config SND_SOC_IMX_AUDMUX
+ tristate
+
+@@ -178,6 +189,18 @@
+ Enable I2S based access to the TLV320AIC23B codec attached
+ to the SSI interface
+
++config SND_SOC_IMX_CS42888
++ tristate "SoC Audio support for i.MX boards with cs42888"
++ depends on OF && I2C
++ select SND_SOC_CS42888
++ select SND_SOC_IMX_PCM_DMA
++ select SND_SOC_FSL_ESAI
++ select SND_SOC_FSL_UTILS
++ help
++ SoC Audio support for i.MX boards with cs42888
++ Say Y if you want to add support for SoC audio on an i.MX board with
++ a cs42888 codec.
++
+ config SND_SOC_IMX_WM8962
+ tristate "SoC Audio support for i.MX boards with wm8962"
+ depends on OF && I2C
+@@ -200,6 +223,18 @@
+ Say Y if you want to add support for SoC audio on an i.MX board with
+ a sgtl5000 codec.
+
++config SND_SOC_IMX_AC97_VT1613
++ tristate "SoC Audio support for i.MX boards with VT1613 AC'97"
++ depends on OF
++ select SND_SOC_AC97_BUS
++ select SND_SOC_VT1613
++ select SND_SOC_IMX_PCM_DMA
++ select SND_SOC_IMX_AUDMUX
++ select SND_SOC_FSL_SSI
++ help
++ Say Y if you want to add support for SoC audio on an i.MX board with
++ a VT1613 codec in AC97 mode.
++
+ config SND_SOC_IMX_SPDIF
+ tristate "SoC Audio support for i.MX boards with S/PDIF"
+ select SND_SOC_IMX_PCM_DMA
+@@ -210,6 +245,17 @@
+ Say Y if you want to add support for SoC audio on an i.MX board with
+ a S/DPDIF.
+
++config SND_SOC_IMX_HDMI
++ tristate "SoC Audio support for i.MX boards with HDMI port"
++ depends on MFD_MXC_HDMI
++ select SND_SOC_IMX_HDMI_DMA
++ select SND_SOC_FSL_HDMI
++ select SND_SOC_HDMI_CODEC
++ help
++ SoC Audio support for i.MX boards with HDMI audio
++ Say Y if you want to add support for SoC audio on an i.MX board with
++ IMX HDMI.
++
+ config SND_SOC_IMX_MC13783
+ tristate "SoC Audio support for I.MX boards with mc13783"
+ depends on MFD_MC13XXX && ARM
+diff -Nur linux-3.14.36/sound/soc/fsl/Makefile linux-openelec/sound/soc/fsl/Makefile
+--- linux-3.14.36/sound/soc/fsl/Makefile 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/sound/soc/fsl/Makefile 2015-07-24 18:03:30.304842002 -0500
+@@ -14,13 +14,19 @@
+ snd-soc-fsl-sai-objs := fsl_sai.o
+ snd-soc-fsl-ssi-objs := fsl_ssi.o
+ snd-soc-fsl-spdif-objs := fsl_spdif.o
++snd-soc-fsl-hdmi-objs := fsl_hdmi.o
+ snd-soc-fsl-esai-objs := fsl_esai.o
++snd-soc-fsl-asrc-pcm-objs := fsl_asrc_pcm.o
++snd-soc-fsl-asrc-objs := fsl_asrc.o
+ snd-soc-fsl-utils-objs := fsl_utils.o
+ snd-soc-fsl-dma-objs := fsl_dma.o
+ obj-$(CONFIG_SND_SOC_FSL_SAI) += snd-soc-fsl-sai.o
+ obj-$(CONFIG_SND_SOC_FSL_SSI) += snd-soc-fsl-ssi.o
+ obj-$(CONFIG_SND_SOC_FSL_SPDIF) += snd-soc-fsl-spdif.o
++obj-$(CONFIG_SND_SOC_FSL_HDMI) += snd-soc-fsl-hdmi.o
+ obj-$(CONFIG_SND_SOC_FSL_ESAI) += snd-soc-fsl-esai.o
++obj-$(CONFIG_SND_SOC_FSL_ASRC) += snd-soc-fsl-asrc-pcm.o
++obj-$(CONFIG_SND_SOC_FSL_ASRC) += snd-soc-fsl-asrc.o
+ obj-$(CONFIG_SND_SOC_FSL_UTILS) += snd-soc-fsl-utils.o
+ obj-$(CONFIG_SND_SOC_POWERPC_DMA) += snd-soc-fsl-dma.o
+
+@@ -41,22 +47,29 @@
+
+ obj-$(CONFIG_SND_SOC_IMX_PCM_FIQ) += imx-pcm-fiq.o
+ obj-$(CONFIG_SND_SOC_IMX_PCM_DMA) += imx-pcm-dma.o
++obj-$(CONFIG_SND_SOC_IMX_HDMI_DMA) += imx-hdmi-dma.o
+
+ # i.MX Machine Support
+ snd-soc-eukrea-tlv320-objs := eukrea-tlv320.o
+ snd-soc-phycore-ac97-objs := phycore-ac97.o
+ snd-soc-mx27vis-aic32x4-objs := mx27vis-aic32x4.o
+ snd-soc-wm1133-ev1-objs := wm1133-ev1.o
++snd-soc-imx-cs42888-objs := imx-cs42888.o
+ snd-soc-imx-sgtl5000-objs := imx-sgtl5000.o
++snd-soc-imx-ac97-vt1613-objs := imx-ac97-vt1613.o
+ snd-soc-imx-wm8962-objs := imx-wm8962.o
+ snd-soc-imx-spdif-objs := imx-spdif.o
++snd-soc-imx-hdmi-objs := imx-hdmi.o
+ snd-soc-imx-mc13783-objs := imx-mc13783.o
+
+ obj-$(CONFIG_SND_SOC_EUKREA_TLV320) += snd-soc-eukrea-tlv320.o
+ obj-$(CONFIG_SND_SOC_PHYCORE_AC97) += snd-soc-phycore-ac97.o
+ obj-$(CONFIG_SND_SOC_MX27VIS_AIC32X4) += snd-soc-mx27vis-aic32x4.o
+ obj-$(CONFIG_SND_MXC_SOC_WM1133_EV1) += snd-soc-wm1133-ev1.o
++obj-$(CONFIG_SND_SOC_IMX_CS42888) += snd-soc-imx-cs42888.o
+ obj-$(CONFIG_SND_SOC_IMX_SGTL5000) += snd-soc-imx-sgtl5000.o
++obj-$(CONFIG_SND_SOC_IMX_AC97_VT1613) += snd-soc-imx-ac97-vt1613.o
+ obj-$(CONFIG_SND_SOC_IMX_WM8962) += snd-soc-imx-wm8962.o
+ obj-$(CONFIG_SND_SOC_IMX_SPDIF) += snd-soc-imx-spdif.o
++obj-$(CONFIG_SND_SOC_IMX_HDMI) += snd-soc-imx-hdmi.o
+ obj-$(CONFIG_SND_SOC_IMX_MC13783) += snd-soc-imx-mc13783.o
+diff -Nur linux-3.14.36/sound/soc/soc-core.c linux-openelec/sound/soc/soc-core.c
+--- linux-3.14.36/sound/soc/soc-core.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/sound/soc/soc-core.c 2015-07-24 18:03:30.320842002 -0500
+@@ -523,7 +523,7 @@
+ static void soc_ac97_device_release(struct device *dev){}
+
+ /* register ac97 codec to bus */
+-static int soc_ac97_dev_register(struct snd_soc_codec *codec)
++int soc_ac97_dev_register(struct snd_soc_codec *codec)
+ {
+ int err;
+
+@@ -541,6 +541,7 @@
+ }
+ return 0;
+ }
++EXPORT_SYMBOL_GPL(soc_ac97_dev_register);
+ #endif
+
+ static void codec2codec_close_delayed_work(struct work_struct *work)
+@@ -882,6 +883,8 @@
+ dai_link->cpu_dai_name);
+ return -EPROBE_DEFER;
+ }
++
++ dev_err(card->dev, "ASoC: CPU DAI %s registered\n", dai_link->cpu_dai_name);
+
+ /* Find CODEC from registered CODECs */
+ list_for_each_entry(codec, &codec_list, list) {
+@@ -920,6 +923,8 @@
+ dai_link->codec_name);
+ return -EPROBE_DEFER;
+ }
++
++ dev_err(card->dev, "ASoC: CODEC %s registered\n", dai_link->codec_name);
+
+ /* if there's no platform we match on the empty platform */
+ platform_name = dai_link->platform_name;
+diff -Nur linux-3.14.36/sound/soc/soc-pcm.c linux-openelec/sound/soc/soc-pcm.c
+--- linux-3.14.36/sound/soc/soc-pcm.c 2015-03-18 07:31:43.000000000 -0500
++++ linux-openelec/sound/soc/soc-pcm.c 2015-07-24 18:03:29.096842002 -0500
+@@ -945,7 +945,7 @@
+ }
+ }
+
+- dev_err(card->dev, "ASoC: can't get %s BE for %s\n",
++ dev_dbg(card->dev, "ASoC: can't get %s BE for %s\n",
+ stream ? "capture" : "playback", widget->name);
+ return NULL;
+ }
+@@ -1062,7 +1062,7 @@
+ /* is there a valid BE rtd for this widget */
+ be = dpcm_get_be(card, list->widgets[i], stream);
+ if (!be) {
+- dev_err(fe->dev, "ASoC: no BE found for %s\n",
++ dev_dbg(fe->dev, "ASoC: no BE found for %s\n",
+ list->widgets[i]->name);
+ continue;
+ }
diff --git a/target/config/Config.in.kernelversion.choice b/target/config/Config.in.kernelversion.choice
index b99ab4ef5..22d095e36 100644
--- a/target/config/Config.in.kernelversion.choice
+++ b/target/config/Config.in.kernelversion.choice
@@ -44,6 +44,15 @@ config ADK_KERNEL_VERSION_3_14_45
depends on !ADK_TARGET_ARCH_H8300
select ADK_KERNEL_VERSION_3_14
+config ADK_KERNEL_VERSION_3_14_36
+ bool "3.14.36"
+ depends on !ADK_TARGET_ARCH_NIOS2
+ depends on !ADK_TARGET_SYSTEM_RASPBERRY_PI
+ depends on !ADK_TARGET_SYSTEM_RASPBERRY_PI2
+ depends on !ADK_TARGET_SYSTEM_QEMU_SPARC
+ depends on !ADK_TARGET_ARCH_H8300
+ select ADK_KERNEL_VERSION_3_14
+
config ADK_KERNEL_VERSION_3_12_44
bool "3.12.44"
depends on !ADK_TARGET_SYSTEM_SOLIDRUN_IMX6
diff --git a/target/config/Config.in.kernelversion.default b/target/config/Config.in.kernelversion.default
index 8f7302194..e350a73fc 100644
--- a/target/config/Config.in.kernelversion.default
+++ b/target/config/Config.in.kernelversion.default
@@ -34,6 +34,7 @@ config ADK_KERNEL_VERSION
default "4.0.6" if ADK_KERNEL_VERSION_4_0_6
default "3.18.16" if ADK_KERNEL_VERSION_3_18_16
default "3.14.45" if ADK_KERNEL_VERSION_3_14_45
+ default "3.14.36" if ADK_KERNEL_VERSION_3_14_36
default "3.12.44" if ADK_KERNEL_VERSION_3_12_44
default "3.10.81" if ADK_KERNEL_VERSION_3_10_81
default "3.4.108" if ADK_KERNEL_VERSION_3_4_108